From 517f343aff1293225e39a4a64fb5d543482c9dc8 Mon Sep 17 00:00:00 2001 From: Grazvydas Ignotas Date: Mon, 19 Apr 2010 19:08:33 +0300 Subject: [PATCH] compat-wireless-2010-03-10 --- .gitignore | 14 + COPYRIGHT | 356 + Makefile | 356 + README | 248 + compat-release | 1 + compat/Makefile | 28 + compat/compat-2.6.14.c | 18 + compat/compat-2.6.18.c | 18 + compat/compat-2.6.19.c | 18 + compat/compat-2.6.21.c | 18 + compat/compat-2.6.22.c | 18 + compat/compat-2.6.23.c | 249 + compat/compat-2.6.24.c | 185 + compat/compat-2.6.25.c | 102 + compat/compat-2.6.26.c | 93 + compat/compat-2.6.27.c | 244 + compat/compat-2.6.28.c | 373 + compat/compat-2.6.29.c | 68 + compat/compat-2.6.30.c | 18 + compat/compat-2.6.31.c | 64 + compat/compat-2.6.32.c | 124 + compat/compat-2.6.33.c | 137 + compat/compat_firmware_class.c | 726 + compat/main.c | 25 + compat/pm_qos_params.c | 477 + compat/scripts/compat_firmware_install | 19 + config.mk | 468 + drivers/bluetooth/Makefile | 29 + drivers/bluetooth/ath3k.c | 189 + drivers/bluetooth/bcm203x.c | 301 + drivers/bluetooth/bfusb.c | 791 + drivers/bluetooth/bluecard_cs.c | 974 ++ drivers/bluetooth/bpa10x.c | 542 + drivers/bluetooth/bt3c_cs.c | 806 + drivers/bluetooth/btmrvl_debugfs.c | 431 + drivers/bluetooth/btmrvl_drv.h | 140 + drivers/bluetooth/btmrvl_main.c | 635 + drivers/bluetooth/btmrvl_sdio.c | 1008 ++ drivers/bluetooth/btmrvl_sdio.h | 108 + drivers/bluetooth/btsdio.c | 400 + drivers/bluetooth/btuart_cs.c | 734 + drivers/bluetooth/btusb.c | 1208 ++ drivers/bluetooth/dtl1_cs.c | 687 + drivers/bluetooth/hci_bcsp.c | 763 + drivers/bluetooth/hci_h4.c | 290 + drivers/bluetooth/hci_ldisc.c | 587 + drivers/bluetooth/hci_ll.c | 535 + drivers/bluetooth/hci_uart.h | 93 + drivers/bluetooth/hci_vhci.c | 311 + drivers/misc/eeprom/Makefile | 1 + drivers/misc/eeprom/eeprom_93cx6.c | 240 + drivers/net/Makefile | 6 + drivers/net/atl1c/Makefile | 2 + drivers/net/atl1c/atl1c.h | 635 + drivers/net/atl1c/atl1c_ethtool.c | 319 + drivers/net/atl1c/atl1c_hw.c | 586 + drivers/net/atl1c/atl1c_hw.h | 864 ++ drivers/net/atl1c/atl1c_main.c | 2891 ++++ drivers/net/atl1e/Makefile | 2 + drivers/net/atl1e/atl1e.h | 510 + drivers/net/atl1e/atl1e_ethtool.c | 394 + drivers/net/atl1e/atl1e_hw.c | 659 + drivers/net/atl1e/atl1e_hw.h | 793 + drivers/net/atl1e/atl1e_main.c | 2570 ++++ drivers/net/atl1e/atl1e_param.c | 268 + drivers/net/atlx/Makefile | 3 + drivers/net/atlx/atl1.c | 3697 +++++ drivers/net/atlx/atl1.h | 796 + drivers/net/atlx/atl2.c | 3126 ++++ drivers/net/atlx/atl2.h | 528 + drivers/net/atlx/atlx.c | 242 + drivers/net/atlx/atlx.h | 503 + drivers/net/b44.c | 2387 +++ drivers/net/b44.h | 401 + drivers/net/usb/Makefile | 8 + drivers/net/usb/cdc_ether.c | 653 + drivers/net/usb/rndis_host.c | 632 + drivers/net/usb/usbnet.c | 1523 ++ drivers/net/wireless/Makefile | 37 + drivers/net/wireless/adm8211.c | 2011 +++ drivers/net/wireless/adm8211.h | 602 + drivers/net/wireless/at76c50x-usb.c | 2512 ++++ drivers/net/wireless/at76c50x-usb.h | 463 + drivers/net/wireless/ath/Makefile | 11 + drivers/net/wireless/ath/ar9170/Makefile | 3 + drivers/net/wireless/ath/ar9170/ar9170.h | 306 + drivers/net/wireless/ath/ar9170/cmd.c | 128 + drivers/net/wireless/ath/ar9170/cmd.h | 92 + drivers/net/wireless/ath/ar9170/eeprom.h | 179 + drivers/net/wireless/ath/ar9170/hw.h | 429 + drivers/net/wireless/ath/ar9170/led.c | 181 + drivers/net/wireless/ath/ar9170/mac.c | 519 + drivers/net/wireless/ath/ar9170/main.c | 2726 ++++ drivers/net/wireless/ath/ar9170/phy.c | 1721 +++ drivers/net/wireless/ath/ar9170/usb.c | 976 ++ drivers/net/wireless/ath/ar9170/usb.h | 81 + drivers/net/wireless/ath/ath.h | 116 + drivers/net/wireless/ath/ath5k/Makefile | 16 + drivers/net/wireless/ath/ath5k/ath5k.h | 1378 ++ drivers/net/wireless/ath/ath5k/attach.c | 362 + drivers/net/wireless/ath/ath5k/base.c | 3340 +++++ drivers/net/wireless/ath/ath5k/base.h | 205 + drivers/net/wireless/ath/ath5k/caps.c | 195 + drivers/net/wireless/ath/ath5k/debug.c | 540 + drivers/net/wireless/ath/ath5k/debug.h | 203 + drivers/net/wireless/ath/ath5k/desc.c | 696 + drivers/net/wireless/ath/ath5k/desc.h | 332 + drivers/net/wireless/ath/ath5k/dma.c | 703 + drivers/net/wireless/ath/ath5k/eeprom.c | 1818 +++ drivers/net/wireless/ath/ath5k/eeprom.h | 479 + drivers/net/wireless/ath/ath5k/gpio.c | 176 + drivers/net/wireless/ath/ath5k/initvals.c | 1555 ++ drivers/net/wireless/ath/ath5k/led.c | 192 + drivers/net/wireless/ath/ath5k/pcu.c | 1153 ++ drivers/net/wireless/ath/ath5k/phy.c | 3146 ++++ drivers/net/wireless/ath/ath5k/qcu.c | 555 + drivers/net/wireless/ath/ath5k/reg.h | 2587 ++++ drivers/net/wireless/ath/ath5k/reset.c | 1390 ++ drivers/net/wireless/ath/ath5k/rfbuffer.h | 1181 ++ drivers/net/wireless/ath/ath5k/rfgain.h | 516 + drivers/net/wireless/ath/ath5k/rfkill.c | 121 + drivers/net/wireless/ath/ath9k/Makefile | 30 + drivers/net/wireless/ath/ath9k/ahb.c | 188 + drivers/net/wireless/ath/ath9k/ani.c | 828 ++ drivers/net/wireless/ath/ath9k/ani.h | 123 + drivers/net/wireless/ath/ath9k/ath9k.h | 606 + drivers/net/wireless/ath/ath9k/beacon.c | 788 + drivers/net/wireless/ath/ath9k/btcoex.c | 227 + drivers/net/wireless/ath/ath9k/btcoex.h | 62 + drivers/net/wireless/ath/ath9k/calib.c | 1255 ++ drivers/net/wireless/ath/ath9k/calib.h | 135 + drivers/net/wireless/ath/ath9k/common.c | 299 + drivers/net/wireless/ath/ath9k/common.h | 127 + drivers/net/wireless/ath/ath9k/debug.c | 795 + drivers/net/wireless/ath/ath9k/debug.h | 223 + drivers/net/wireless/ath/ath9k/eeprom.c | 276 + drivers/net/wireless/ath/ath9k/eeprom.h | 718 + drivers/net/wireless/ath/ath9k/eeprom_4k.c | 1198 ++ drivers/net/wireless/ath/ath9k/eeprom_9287.c | 1184 ++ drivers/net/wireless/ath/ath9k/eeprom_def.c | 1486 ++ drivers/net/wireless/ath/ath9k/gpio.c | 442 + drivers/net/wireless/ath/ath9k/hw.c | 3954 +++++ drivers/net/wireless/ath/ath9k/hw.c.orig | 3954 +++++ drivers/net/wireless/ath/ath9k/hw.h | 708 + drivers/net/wireless/ath/ath9k/init.c | 863 ++ drivers/net/wireless/ath/ath9k/initvals.h | 7052 +++++++++ drivers/net/wireless/ath/ath9k/mac.c | 1067 ++ drivers/net/wireless/ath/ath9k/mac.h | 720 + drivers/net/wireless/ath/ath9k/main.c | 2065 +++ drivers/net/wireless/ath/ath9k/pci.c | 324 + drivers/net/wireless/ath/ath9k/phy.c | 976 ++ drivers/net/wireless/ath/ath9k/phy.h | 625 + drivers/net/wireless/ath/ath9k/rc.c | 1422 ++ drivers/net/wireless/ath/ath9k/rc.h | 188 + drivers/net/wireless/ath/ath9k/recv.c | 652 + drivers/net/wireless/ath/ath9k/reg.h | 1719 +++ drivers/net/wireless/ath/ath9k/virtual.c | 746 + drivers/net/wireless/ath/ath9k/xmit.c | 2285 +++ drivers/net/wireless/ath/debug.c | 32 + drivers/net/wireless/ath/debug.h | 77 + drivers/net/wireless/ath/hw.c | 126 + drivers/net/wireless/ath/main.c | 58 + drivers/net/wireless/ath/reg.h | 27 + drivers/net/wireless/ath/regd.c | 590 + drivers/net/wireless/ath/regd.h | 263 + drivers/net/wireless/ath/regd_common.h | 475 + drivers/net/wireless/b43/Makefile | 22 + drivers/net/wireless/b43/b43.h | 927 ++ drivers/net/wireless/b43/b43.h.orig | 924 ++ drivers/net/wireless/b43/debugfs.c | 831 ++ drivers/net/wireless/b43/debugfs.h | 111 + drivers/net/wireless/b43/dma.c | 1671 +++ drivers/net/wireless/b43/dma.h | 289 + drivers/net/wireless/b43/leds.c | 360 + drivers/net/wireless/b43/leds.h | 94 + drivers/net/wireless/b43/lo.c | 1017 ++ drivers/net/wireless/b43/lo.h | 87 + drivers/net/wireless/b43/main.c | 5112 +++++++ drivers/net/wireless/b43/main.c.orig | 5095 +++++++ drivers/net/wireless/b43/main.h | 144 + drivers/net/wireless/b43/pcmcia.c | 157 + drivers/net/wireless/b43/pcmcia.h | 20 + drivers/net/wireless/b43/phy_a.c | 594 + drivers/net/wireless/b43/phy_a.h | 130 + drivers/net/wireless/b43/phy_common.c | 468 + drivers/net/wireless/b43/phy_common.h | 435 + drivers/net/wireless/b43/phy_g.c | 3056 ++++ drivers/net/wireless/b43/phy_g.h | 208 + drivers/net/wireless/b43/phy_lp.c | 2733 ++++ drivers/net/wireless/b43/phy_lp.h | 912 ++ drivers/net/wireless/b43/phy_n.c | 3292 +++++ drivers/net/wireless/b43/phy_n.h | 1026 ++ drivers/net/wireless/b43/pio.c | 815 ++ drivers/net/wireless/b43/pio.h | 165 + drivers/net/wireless/b43/rfkill.c | 86 + drivers/net/wireless/b43/rfkill.h | 11 + drivers/net/wireless/b43/sdio.c | 202 + drivers/net/wireless/b43/sdio.h | 45 + drivers/net/wireless/b43/sysfs.c | 155 + drivers/net/wireless/b43/sysfs.h | 9 + drivers/net/wireless/b43/tables.c | 466 + drivers/net/wireless/b43/tables.h | 34 + drivers/net/wireless/b43/tables_lpphy.c | 2457 ++++ drivers/net/wireless/b43/tables_lpphy.h | 44 + drivers/net/wireless/b43/tables_nphy.c | 3168 ++++ drivers/net/wireless/b43/tables_nphy.h | 203 + drivers/net/wireless/b43/wa.c | 636 + drivers/net/wireless/b43/wa.h | 7 + drivers/net/wireless/b43/xmit.c | 799 + drivers/net/wireless/b43/xmit.h | 354 + drivers/net/wireless/b43legacy/Makefile | 19 + drivers/net/wireless/b43legacy/b43legacy.h | 833 ++ drivers/net/wireless/b43legacy/debugfs.c | 505 + drivers/net/wireless/b43legacy/debugfs.h | 89 + drivers/net/wireless/b43legacy/dma.c | 1689 +++ drivers/net/wireless/b43legacy/dma.h | 337 + drivers/net/wireless/b43legacy/ilt.c | 336 + drivers/net/wireless/b43legacy/ilt.h | 34 + drivers/net/wireless/b43legacy/leds.c | 243 + drivers/net/wireless/b43legacy/leds.h | 63 + drivers/net/wireless/b43legacy/main.c | 3990 +++++ drivers/net/wireless/b43legacy/main.h | 127 + drivers/net/wireless/b43legacy/phy.c | 2257 +++ drivers/net/wireless/b43legacy/phy.h | 209 + drivers/net/wireless/b43legacy/pio.c | 694 + drivers/net/wireless/b43legacy/pio.h | 158 + drivers/net/wireless/b43legacy/radio.c | 2162 +++ drivers/net/wireless/b43legacy/radio.h | 98 + drivers/net/wireless/b43legacy/rfkill.c | 91 + drivers/net/wireless/b43legacy/rfkill.h | 11 + drivers/net/wireless/b43legacy/sysfs.c | 238 + drivers/net/wireless/b43legacy/sysfs.h | 9 + drivers/net/wireless/b43legacy/xmit.c | 680 + drivers/net/wireless/b43legacy/xmit.h | 261 + drivers/net/wireless/ipw2x00/Makefile | 14 + drivers/net/wireless/ipw2x00/ipw2100.c | 8769 +++++++++++ drivers/net/wireless/ipw2x00/ipw2100.h | 1166 ++ drivers/net/wireless/ipw2x00/ipw2200.c | 12137 ++++++++++++++++ drivers/net/wireless/ipw2x00/ipw2200.h | 2017 +++ drivers/net/wireless/ipw2x00/libipw.h | 1092 ++ drivers/net/wireless/ipw2x00/libipw_geo.c | 195 + drivers/net/wireless/ipw2x00/libipw_module.c | 343 + drivers/net/wireless/ipw2x00/libipw_rx.c | 1798 +++ drivers/net/wireless/ipw2x00/libipw_tx.c | 546 + drivers/net/wireless/ipw2x00/libipw_wx.c | 771 + drivers/net/wireless/iwlwifi/Makefile | 23 + drivers/net/wireless/iwlwifi/iwl-1000.c | 281 + drivers/net/wireless/iwlwifi/iwl-3945-fh.h | 188 + drivers/net/wireless/iwlwifi/iwl-3945-hw.h | 296 + drivers/net/wireless/iwlwifi/iwl-3945-led.c | 91 + drivers/net/wireless/iwlwifi/iwl-3945-led.h | 32 + drivers/net/wireless/iwlwifi/iwl-3945-rs.c | 973 ++ drivers/net/wireless/iwlwifi/iwl-3945.c | 2859 ++++ drivers/net/wireless/iwlwifi/iwl-3945.h | 301 + drivers/net/wireless/iwlwifi/iwl-4965-hw.h | 816 ++ drivers/net/wireless/iwlwifi/iwl-4965.c | 2274 +++ drivers/net/wireless/iwlwifi/iwl-5000-hw.h | 121 + drivers/net/wireless/iwlwifi/iwl-5000.c | 1774 +++ drivers/net/wireless/iwlwifi/iwl-6000-hw.h | 81 + drivers/net/wireless/iwlwifi/iwl-6000.c | 548 + drivers/net/wireless/iwlwifi/iwl-agn-led.c | 85 + drivers/net/wireless/iwlwifi/iwl-agn-led.h | 32 + drivers/net/wireless/iwlwifi/iwl-agn-rs.c | 3029 ++++ drivers/net/wireless/iwlwifi/iwl-agn-rs.h | 504 + drivers/net/wireless/iwlwifi/iwl-agn.c | 3943 +++++ drivers/net/wireless/iwlwifi/iwl-agn.c.orig | 3943 +++++ drivers/net/wireless/iwlwifi/iwl-calib.c | 901 ++ drivers/net/wireless/iwlwifi/iwl-calib.h | 84 + drivers/net/wireless/iwlwifi/iwl-commands.h | 3817 +++++ drivers/net/wireless/iwlwifi/iwl-core.c | 3443 +++++ drivers/net/wireless/iwlwifi/iwl-core.h | 706 + drivers/net/wireless/iwlwifi/iwl-csr.h | 451 + drivers/net/wireless/iwlwifi/iwl-debug.h | 194 + drivers/net/wireless/iwlwifi/iwl-debugfs.c | 2394 +++ drivers/net/wireless/iwlwifi/iwl-dev.h | 1446 ++ drivers/net/wireless/iwlwifi/iwl-devtrace.c | 42 + drivers/net/wireless/iwlwifi/iwl-devtrace.h | 271 + drivers/net/wireless/iwlwifi/iwl-eeprom.c | 1154 ++ drivers/net/wireless/iwlwifi/iwl-eeprom.h | 507 + drivers/net/wireless/iwlwifi/iwl-fh.h | 518 + drivers/net/wireless/iwlwifi/iwl-hcmd.c | 283 + drivers/net/wireless/iwlwifi/iwl-helpers.h | 170 + drivers/net/wireless/iwlwifi/iwl-io.h | 527 + drivers/net/wireless/iwlwifi/iwl-led.c | 225 + drivers/net/wireless/iwlwifi/iwl-led.h | 66 + drivers/net/wireless/iwlwifi/iwl-power.c | 970 ++ drivers/net/wireless/iwlwifi/iwl-power.h | 146 + drivers/net/wireless/iwlwifi/iwl-prph.h | 577 + drivers/net/wireless/iwlwifi/iwl-rx.c | 1214 ++ drivers/net/wireless/iwlwifi/iwl-scan.c | 987 ++ drivers/net/wireless/iwlwifi/iwl-spectrum.h | 92 + drivers/net/wireless/iwlwifi/iwl-sta.c | 1293 ++ drivers/net/wireless/iwlwifi/iwl-sta.h | 72 + drivers/net/wireless/iwlwifi/iwl-tx.c | 1626 +++ drivers/net/wireless/iwlwifi/iwl3945-base.c | 4313 ++++++ drivers/net/wireless/iwmc3200wifi/Makefile | 5 + drivers/net/wireless/iwmc3200wifi/bus.h | 57 + drivers/net/wireless/iwmc3200wifi/cfg80211.c | 856 ++ drivers/net/wireless/iwmc3200wifi/cfg80211.h | 31 + drivers/net/wireless/iwmc3200wifi/commands.c | 994 ++ drivers/net/wireless/iwmc3200wifi/commands.h | 508 + drivers/net/wireless/iwmc3200wifi/debug.h | 126 + drivers/net/wireless/iwmc3200wifi/debugfs.c | 573 + drivers/net/wireless/iwmc3200wifi/eeprom.c | 233 + drivers/net/wireless/iwmc3200wifi/eeprom.h | 127 + drivers/net/wireless/iwmc3200wifi/fw.c | 416 + drivers/net/wireless/iwmc3200wifi/fw.h | 100 + drivers/net/wireless/iwmc3200wifi/hal.c | 466 + drivers/net/wireless/iwmc3200wifi/hal.h | 236 + drivers/net/wireless/iwmc3200wifi/iwm.h | 364 + drivers/net/wireless/iwmc3200wifi/lmac.h | 484 + drivers/net/wireless/iwmc3200wifi/main.c | 842 ++ drivers/net/wireless/iwmc3200wifi/netdev.c | 188 + drivers/net/wireless/iwmc3200wifi/rx.c | 1666 +++ drivers/net/wireless/iwmc3200wifi/rx.h | 60 + drivers/net/wireless/iwmc3200wifi/sdio.c | 528 + drivers/net/wireless/iwmc3200wifi/sdio.h | 64 + drivers/net/wireless/iwmc3200wifi/tx.c | 528 + drivers/net/wireless/iwmc3200wifi/umac.h | 789 + drivers/net/wireless/libertas/Makefile | 23 + drivers/net/wireless/libertas/assoc.c | 2246 +++ drivers/net/wireless/libertas/assoc.h | 155 + drivers/net/wireless/libertas/cfg.c | 198 + drivers/net/wireless/libertas/cfg.h | 16 + drivers/net/wireless/libertas/cmd.c | 1608 ++ drivers/net/wireless/libertas/cmd.h | 130 + drivers/net/wireless/libertas/cmdresp.c | 494 + drivers/net/wireless/libertas/debugfs.c | 1006 ++ drivers/net/wireless/libertas/debugfs.h | 10 + drivers/net/wireless/libertas/decl.h | 53 + drivers/net/wireless/libertas/defs.h | 421 + drivers/net/wireless/libertas/dev.h | 208 + drivers/net/wireless/libertas/ethtool.c | 125 + drivers/net/wireless/libertas/host.h | 962 ++ drivers/net/wireless/libertas/if_cs.c | 1042 ++ drivers/net/wireless/libertas/if_sdio.c | 1230 ++ drivers/net/wireless/libertas/if_sdio.h | 56 + drivers/net/wireless/libertas/if_spi.c | 1109 ++ drivers/net/wireless/libertas/if_spi.h | 211 + drivers/net/wireless/libertas/if_usb.c | 1117 ++ drivers/net/wireless/libertas/if_usb.h | 105 + drivers/net/wireless/libertas/main.c | 1325 ++ drivers/net/wireless/libertas/main.c.orig | 1314 ++ drivers/net/wireless/libertas/mesh.c | 1164 ++ drivers/net/wireless/libertas/mesh.h | 110 + drivers/net/wireless/libertas/radiotap.h | 44 + drivers/net/wireless/libertas/rx.c | 376 + drivers/net/wireless/libertas/scan.c | 1353 ++ drivers/net/wireless/libertas/scan.h | 63 + drivers/net/wireless/libertas/tx.c | 204 + drivers/net/wireless/libertas/types.h | 266 + drivers/net/wireless/libertas/wext.c | 2348 +++ drivers/net/wireless/libertas/wext.h | 17 + drivers/net/wireless/libertas_tf/Makefile | 6 + drivers/net/wireless/libertas_tf/cmd.c | 668 + drivers/net/wireless/libertas_tf/if_usb.c | 767 + drivers/net/wireless/libertas_tf/if_usb.h | 98 + .../net/wireless/libertas_tf/libertas_tf.h | 514 + drivers/net/wireless/libertas_tf/main.c | 670 + drivers/net/wireless/mac80211_hwsim.c | 1472 ++ drivers/net/wireless/mwl8k.c | 4214 ++++++ drivers/net/wireless/p54/Makefile | 7 + drivers/net/wireless/p54/eeprom.c | 760 + drivers/net/wireless/p54/eeprom.h | 226 + drivers/net/wireless/p54/fwio.c | 716 + drivers/net/wireless/p54/led.c | 162 + drivers/net/wireless/p54/lmac.h | 558 + drivers/net/wireless/p54/main.c | 647 + drivers/net/wireless/p54/net2280.h | 452 + drivers/net/wireless/p54/p54.h | 257 + drivers/net/wireless/p54/p54pci.c | 675 + drivers/net/wireless/p54/p54pci.h | 110 + drivers/net/wireless/p54/p54spi.c | 734 + drivers/net/wireless/p54/p54spi.h | 125 + drivers/net/wireless/p54/p54spi_eeprom.h | 678 + drivers/net/wireless/p54/p54usb.c | 1073 ++ drivers/net/wireless/p54/p54usb.h | 148 + drivers/net/wireless/p54/txrx.c | 869 ++ drivers/net/wireless/rndis_wlan.c | 3118 ++++ drivers/net/wireless/rndis_wlan.c.orig | 3112 ++++ drivers/net/wireless/rt2x00/Makefile | 23 + drivers/net/wireless/rt2x00/rt2400pci.c | 1676 +++ drivers/net/wireless/rt2x00/rt2400pci.h | 953 ++ drivers/net/wireless/rt2x00/rt2500pci.c | 1974 +++ drivers/net/wireless/rt2x00/rt2500pci.h | 1235 ++ drivers/net/wireless/rt2x00/rt2500usb.c | 1924 +++ drivers/net/wireless/rt2x00/rt2500usb.h | 847 ++ drivers/net/wireless/rt2x00/rt2800.h | 1852 +++ drivers/net/wireless/rt2x00/rt2800lib.c | 2324 +++ drivers/net/wireless/rt2x00/rt2800lib.h | 150 + drivers/net/wireless/rt2x00/rt2800pci.c | 1288 ++ drivers/net/wireless/rt2x00/rt2800pci.h | 159 + drivers/net/wireless/rt2x00/rt2800usb.c | 1119 ++ drivers/net/wireless/rt2x00/rt2800usb.h | 189 + drivers/net/wireless/rt2x00/rt2x00.h | 1051 ++ drivers/net/wireless/rt2x00/rt2x00config.c | 212 + drivers/net/wireless/rt2x00/rt2x00crypto.c | 258 + drivers/net/wireless/rt2x00/rt2x00debug.c | 739 + drivers/net/wireless/rt2x00/rt2x00debug.h | 70 + drivers/net/wireless/rt2x00/rt2x00dev.c | 1014 ++ drivers/net/wireless/rt2x00/rt2x00dump.h | 121 + drivers/net/wireless/rt2x00/rt2x00firmware.c | 128 + drivers/net/wireless/rt2x00/rt2x00ht.c | 69 + drivers/net/wireless/rt2x00/rt2x00leds.c | 246 + drivers/net/wireless/rt2x00/rt2x00leds.h | 46 + drivers/net/wireless/rt2x00/rt2x00lib.h | 460 + drivers/net/wireless/rt2x00/rt2x00link.c | 394 + drivers/net/wireless/rt2x00/rt2x00mac.c | 672 + drivers/net/wireless/rt2x00/rt2x00pci.c | 407 + drivers/net/wireless/rt2x00/rt2x00pci.h | 135 + drivers/net/wireless/rt2x00/rt2x00queue.c | 921 ++ drivers/net/wireless/rt2x00/rt2x00queue.h | 639 + drivers/net/wireless/rt2x00/rt2x00reg.h | 272 + drivers/net/wireless/rt2x00/rt2x00soc.c | 159 + drivers/net/wireless/rt2x00/rt2x00soc.h | 44 + drivers/net/wireless/rt2x00/rt2x00usb.c | 741 + drivers/net/wireless/rt2x00/rt2x00usb.h | 451 + drivers/net/wireless/rt2x00/rt61pci.c | 2852 ++++ drivers/net/wireless/rt2x00/rt61pci.h | 1501 ++ drivers/net/wireless/rt2x00/rt73usb.c | 2461 ++++ drivers/net/wireless/rt2x00/rt73usb.h | 1076 ++ drivers/net/wireless/rtl818x/Makefile | 7 + drivers/net/wireless/rtl818x/rtl8180.h | 119 + drivers/net/wireless/rtl818x/rtl8180_dev.c | 1101 ++ .../net/wireless/rtl818x/rtl8180_grf5101.c | 180 + .../net/wireless/rtl818x/rtl8180_grf5101.h | 28 + .../net/wireless/rtl818x/rtl8180_max2820.c | 152 + .../net/wireless/rtl818x/rtl8180_max2820.h | 28 + .../net/wireless/rtl818x/rtl8180_rtl8225.c | 788 + .../net/wireless/rtl818x/rtl8180_rtl8225.h | 23 + drivers/net/wireless/rtl818x/rtl8180_sa2400.c | 202 + drivers/net/wireless/rtl818x/rtl8180_sa2400.h | 36 + drivers/net/wireless/rtl818x/rtl8187.h | 271 + drivers/net/wireless/rtl818x/rtl8187_dev.c | 1588 ++ drivers/net/wireless/rtl818x/rtl8187_leds.c | 245 + drivers/net/wireless/rtl818x/rtl8187_leds.h | 59 + drivers/net/wireless/rtl818x/rtl8187_rfkill.c | 64 + drivers/net/wireless/rtl818x/rtl8187_rfkill.h | 8 + .../net/wireless/rtl818x/rtl8187_rtl8225.c | 983 ++ .../net/wireless/rtl818x/rtl8187_rtl8225.h | 44 + drivers/net/wireless/rtl818x/rtl818x.h | 243 + drivers/net/wireless/wl12xx/Makefile | 16 + drivers/net/wireless/wl12xx/wl1251.h | 427 + drivers/net/wireless/wl12xx/wl1251_acx.c | 1047 ++ drivers/net/wireless/wl12xx/wl1251_acx.h | 1413 ++ drivers/net/wireless/wl12xx/wl1251_boot.c | 557 + drivers/net/wireless/wl12xx/wl1251_boot.h | 41 + drivers/net/wireless/wl12xx/wl1251_cmd.c | 495 + drivers/net/wireless/wl12xx/wl1251_cmd.h | 417 + drivers/net/wireless/wl12xx/wl1251_debugfs.c | 541 + drivers/net/wireless/wl12xx/wl1251_debugfs.h | 33 + drivers/net/wireless/wl12xx/wl1251_event.c | 143 + drivers/net/wireless/wl12xx/wl1251_event.h | 121 + drivers/net/wireless/wl12xx/wl1251_init.c | 424 + drivers/net/wireless/wl12xx/wl1251_init.h | 88 + drivers/net/wireless/wl12xx/wl1251_io.c | 196 + drivers/net/wireless/wl12xx/wl1251_io.h | 64 + drivers/net/wireless/wl12xx/wl1251_main.c | 1362 ++ drivers/net/wireless/wl12xx/wl1251_ps.c | 196 + drivers/net/wireless/wl12xx/wl1251_ps.h | 37 + drivers/net/wireless/wl12xx/wl1251_reg.h | 650 + drivers/net/wireless/wl12xx/wl1251_rx.c | 193 + drivers/net/wireless/wl12xx/wl1251_rx.h | 124 + drivers/net/wireless/wl12xx/wl1251_sdio.c | 205 + drivers/net/wireless/wl12xx/wl1251_spi.c | 349 + drivers/net/wireless/wl12xx/wl1251_spi.h | 61 + drivers/net/wireless/wl12xx/wl1251_tx.c | 560 + drivers/net/wireless/wl12xx/wl1251_tx.h | 233 + drivers/net/wireless/wl12xx/wl1271.h | 497 + drivers/net/wireless/wl12xx/wl1271_acx.c | 1144 ++ drivers/net/wireless/wl12xx/wl1271_acx.h | 1089 ++ drivers/net/wireless/wl12xx/wl1271_boot.c | 551 + drivers/net/wireless/wl12xx/wl1271_boot.h | 66 + drivers/net/wireless/wl12xx/wl1271_cmd.c | 983 ++ drivers/net/wireless/wl12xx/wl1271_cmd.h | 521 + drivers/net/wireless/wl12xx/wl1271_conf.h | 795 + drivers/net/wireless/wl12xx/wl1271_debugfs.c | 580 + drivers/net/wireless/wl12xx/wl1271_debugfs.h | 33 + drivers/net/wireless/wl12xx/wl1271_event.c | 248 + drivers/net/wireless/wl12xx/wl1271_event.h | 117 + drivers/net/wireless/wl12xx/wl1271_init.c | 334 + drivers/net/wireless/wl12xx/wl1271_init.h | 36 + drivers/net/wireless/wl12xx/wl1271_io.c | 213 + drivers/net/wireless/wl12xx/wl1271_io.h | 68 + drivers/net/wireless/wl12xx/wl1271_main.c | 2239 +++ drivers/net/wireless/wl12xx/wl1271_ps.c | 165 + drivers/net/wireless/wl12xx/wl1271_ps.h | 36 + drivers/net/wireless/wl12xx/wl1271_reg.h | 614 + drivers/net/wireless/wl12xx/wl1271_rx.c | 223 + drivers/net/wireless/wl12xx/wl1271_rx.h | 121 + drivers/net/wireless/wl12xx/wl1271_spi.c | 257 + drivers/net/wireless/wl12xx/wl1271_spi.h | 96 + drivers/net/wireless/wl12xx/wl1271_testmode.c | 283 + drivers/net/wireless/wl12xx/wl1271_testmode.h | 31 + drivers/net/wireless/wl12xx/wl1271_tx.c | 439 + drivers/net/wireless/wl12xx/wl1271_tx.h | 166 + drivers/net/wireless/wl12xx/wl12xx_80211.h | 156 + drivers/net/wireless/zd1211rw/Makefile | 11 + drivers/net/wireless/zd1211rw/zd_chip.c | 1499 ++ drivers/net/wireless/zd1211rw/zd_chip.h | 968 ++ drivers/net/wireless/zd1211rw/zd_def.h | 64 + drivers/net/wireless/zd1211rw/zd_mac.c | 1214 ++ drivers/net/wireless/zd1211rw/zd_mac.h | 312 + drivers/net/wireless/zd1211rw/zd_rf.c | 182 + drivers/net/wireless/zd1211rw/zd_rf.h | 111 + drivers/net/wireless/zd1211rw/zd_rf_al2230.c | 442 + drivers/net/wireless/zd1211rw/zd_rf_al7230b.c | 495 + drivers/net/wireless/zd1211rw/zd_rf_rf2959.c | 282 + drivers/net/wireless/zd1211rw/zd_rf_uw2453.c | 537 + drivers/net/wireless/zd1211rw/zd_usb.c | 1578 ++ drivers/net/wireless/zd1211rw/zd_usb.h | 266 + drivers/ssb/Makefile | 23 + drivers/ssb/b43_pci_bridge.c | 53 + drivers/ssb/driver_chipcommon.c | 486 + drivers/ssb/driver_chipcommon_pmu.c | 609 + drivers/ssb/driver_extif.c | 146 + drivers/ssb/driver_gige.c | 294 + drivers/ssb/driver_mipscore.c | 294 + drivers/ssb/driver_pcicore.c | 629 + drivers/ssb/embedded.c | 223 + drivers/ssb/main.c | 1453 ++ drivers/ssb/pci.c | 926 ++ drivers/ssb/pcihost_wrapper.c | 114 + drivers/ssb/pcmcia.c | 864 ++ drivers/ssb/scan.c | 439 + drivers/ssb/sdio.c | 610 + drivers/ssb/sprom.c | 177 + drivers/ssb/ssb_private.h | 209 + enable-older-kernels/README | 12 + enable-older-kernels/enable-2.6.21.patch | 26 + enable-older-kernels/enable-2.6.22.patch | 26 + enable-older-kernels/enable-2.6.23.patch | 26 + enable-older-kernels/enable-2.6.24.patch | 26 + git-describe | 2 + include/linux/ath9k_platform.h | 28 + include/linux/bitops.h | 196 + include/linux/compat-2.6.14.h | 13 + include/linux/compat-2.6.18.h | 13 + include/linux/compat-2.6.19.h | 24 + include/linux/compat-2.6.21.h | 18 + include/linux/compat-2.6.22.h | 104 + include/linux/compat-2.6.23.h | 136 + include/linux/compat-2.6.24.h | 257 + include/linux/compat-2.6.25.h | 177 + include/linux/compat-2.6.26.h | 381 + include/linux/compat-2.6.27.h | 225 + include/linux/compat-2.6.28.h | 218 + include/linux/compat-2.6.29.h | 66 + include/linux/compat-2.6.30.h | 28 + include/linux/compat-2.6.31.h | 197 + include/linux/compat-2.6.32.h | 97 + include/linux/compat-2.6.33.h | 108 + include/linux/compat-2.6.34.h | 160 + include/linux/compat-2.6.h | 31 + include/linux/eeprom_93cx6.h | 73 + include/linux/ieee80211.h | 1597 ++ include/linux/nl80211.h | 1586 ++ include/linux/pci_ids.h | 2739 ++++ include/linux/pm_qos_params.h | 26 + include/linux/rfkill_backport.h | 391 + include/linux/spi/libertas_spi.h | 29 + include/linux/spi/wl12xx.h | 32 + include/linux/ssb/ssb.h | 683 + include/linux/ssb/ssb_driver_chipcommon.h | 643 + include/linux/ssb/ssb_driver_extif.h | 214 + include/linux/ssb/ssb_driver_gige.h | 174 + include/linux/ssb/ssb_driver_mips.h | 46 + include/linux/ssb/ssb_driver_pci.h | 130 + include/linux/ssb/ssb_embedded.h | 18 + include/linux/ssb/ssb_regs.h | 456 + include/linux/unaligned/access_ok.h | 67 + include/linux/unaligned/be_byteshift.h | 70 + include/linux/unaligned/be_memmove.h | 36 + include/linux/unaligned/be_struct.h | 36 + include/linux/unaligned/generic.h | 68 + include/linux/unaligned/le_byteshift.h | 70 + include/linux/unaligned/le_memmove.h | 36 + include/linux/unaligned/le_struct.h | 36 + include/linux/unaligned/memmove.h | 45 + include/linux/unaligned/packed_struct.h | 46 + include/linux/usb/rndis_host.h | 270 + include/linux/usb/usbnet.h | 217 + include/linux/wireless.h | 1160 ++ include/net/bluetooth/bluetooth.h | 181 + include/net/bluetooth/hci.h | 1036 ++ include/net/bluetooth/hci_core.h | 678 + include/net/bluetooth/l2cap.h | 405 + include/net/bluetooth/rfcomm.h | 373 + include/net/cfg80211.h | 2341 +++ include/net/ieee80211_radiotap.h | 265 + include/net/lib80211.h | 129 + include/net/mac80211.h | 2536 ++++ include/net/regulatory.h | 102 + include/net/wext.h | 60 + linux-next-cherry-picks/README | 16 + net/bluetooth/Makefile | 13 + net/bluetooth/af_bluetooth.c | 471 + net/bluetooth/bnep/Makefile | 7 + net/bluetooth/bnep/bnep.h | 179 + net/bluetooth/bnep/core.c | 747 + net/bluetooth/bnep/netdev.c | 260 + net/bluetooth/bnep/sock.c | 262 + net/bluetooth/cmtp/Makefile | 7 + net/bluetooth/cmtp/capi.c | 664 + net/bluetooth/cmtp/cmtp.h | 135 + net/bluetooth/cmtp/core.c | 493 + net/bluetooth/cmtp/sock.c | 257 + net/bluetooth/hci_conn.c | 705 + net/bluetooth/hci_core.c | 1656 +++ net/bluetooth/hci_event.c | 1994 +++ net/bluetooth/hci_sock.c | 748 + net/bluetooth/hci_sock.c.orig | 740 + net/bluetooth/hci_sysfs.c | 524 + net/bluetooth/hci_sysfs.c.orig | 512 + net/bluetooth/hidp/Makefile | 7 + net/bluetooth/hidp/core.c | 1186 ++ net/bluetooth/hidp/hidp.h | 175 + net/bluetooth/hidp/sock.c | 308 + net/bluetooth/l2cap.c | 4084 ++++++ net/bluetooth/lib.c | 152 + net/bluetooth/rfcomm/Makefile | 8 + net/bluetooth/rfcomm/core.c | 2204 +++ net/bluetooth/rfcomm/sock.c | 1159 ++ net/bluetooth/rfcomm/tty.c | 1200 ++ net/bluetooth/sco.c | 1079 ++ net/mac80211/Makefile | 56 + net/mac80211/aes_ccm.c | 154 + net/mac80211/aes_ccm.h | 26 + net/mac80211/aes_cmac.c | 135 + net/mac80211/aes_cmac.h | 19 + net/mac80211/agg-rx.c | 310 + net/mac80211/agg-tx.c | 696 + net/mac80211/cfg.c | 1511 ++ net/mac80211/cfg.h | 9 + net/mac80211/debugfs.c | 485 + net/mac80211/debugfs.h | 14 + net/mac80211/debugfs_key.c | 345 + net/mac80211/debugfs_key.h | 37 + net/mac80211/debugfs_netdev.c | 421 + net/mac80211/debugfs_netdev.h | 22 + net/mac80211/debugfs_sta.c | 301 + net/mac80211/debugfs_sta.h | 14 + net/mac80211/driver-ops.h | 366 + net/mac80211/driver-trace.c | 9 + net/mac80211/driver-trace.h | 779 + net/mac80211/event.c | 27 + net/mac80211/ht.c | 234 + net/mac80211/ibss.c | 964 ++ net/mac80211/ieee80211_i.h | 1219 ++ net/mac80211/iface.c | 1111 ++ net/mac80211/key.c | 629 + net/mac80211/key.h | 160 + net/mac80211/led.c | 161 + net/mac80211/led.h | 44 + net/mac80211/main.c | 756 + net/mac80211/mesh.c | 763 + net/mac80211/mesh.h | 347 + net/mac80211/mesh_hwmp.c | 1017 ++ net/mac80211/mesh_pathtbl.c | 742 + net/mac80211/mesh_plink.c | 754 + net/mac80211/michael.c | 86 + net/mac80211/michael.h | 24 + net/mac80211/mlme.c | 2134 +++ net/mac80211/offchannel.c | 170 + net/mac80211/pm.c | 119 + net/mac80211/rate.c | 399 + net/mac80211/rate.h | 151 + net/mac80211/rc80211_minstrel.c | 570 + net/mac80211/rc80211_minstrel.h | 86 + net/mac80211/rc80211_minstrel_debugfs.c | 164 + net/mac80211/rc80211_pid.h | 281 + net/mac80211/rc80211_pid_algo.c | 476 + net/mac80211/rc80211_pid_debugfs.c | 225 + net/mac80211/rx.c | 2587 ++++ net/mac80211/scan.c | 706 + net/mac80211/spectmgmt.c | 86 + net/mac80211/sta_info.c | 987 ++ net/mac80211/sta_info.h | 474 + net/mac80211/status.c | 388 + net/mac80211/tkip.c | 342 + net/mac80211/tkip.h | 33 + net/mac80211/tx.c | 2493 ++++ net/mac80211/util.c | 1394 ++ net/mac80211/wep.c | 340 + net/mac80211/wep.h | 35 + net/mac80211/wme.c | 159 + net/mac80211/wme.h | 30 + net/mac80211/work.c | 1100 ++ net/mac80211/wpa.c | 602 + net/mac80211/wpa.h | 36 + net/rfkill/Makefile | 7 + net/rfkill/core.c | 1252 ++ net/rfkill/input.c | 354 + net/rfkill/input.c.orig | 350 + net/rfkill/rfkill.h | 27 + net/wireless/Makefile | 18 + net/wireless/chan.c | 102 + net/wireless/core.c | 925 ++ net/wireless/core.h | 412 + net/wireless/debugfs.c | 118 + net/wireless/debugfs.h | 11 + net/wireless/ethtool.c | 45 + net/wireless/ethtool.h | 6 + net/wireless/ibss.c | 509 + net/wireless/lib80211.c | 284 + net/wireless/lib80211_crypt_ccmp.c | 494 + net/wireless/lib80211_crypt_tkip.c | 787 + net/wireless/lib80211_crypt_wep.c | 296 + net/wireless/mlme.c | 896 ++ net/wireless/nl80211.c | 5914 ++++++++ net/wireless/nl80211.h | 85 + net/wireless/radiotap.c | 350 + net/wireless/reg.c | 2729 ++++ net/wireless/reg.h | 84 + net/wireless/regdb.h | 7 + net/wireless/scan.c | 1111 ++ net/wireless/sme.c | 999 ++ net/wireless/sysfs.c | 133 + net/wireless/sysfs.h | 9 + net/wireless/util.c | 853 ++ net/wireless/wext-compat.c | 1508 ++ net/wireless/wext-compat.h | 49 + net/wireless/wext-core.c | 1103 ++ net/wireless/wext-priv.c | 248 + net/wireless/wext-proc.c | 159 + net/wireless/wext-sme.c | 402 + net/wireless/wext-spy.c | 231 + patches/01-netdev.patch | 802 + patches/02-ksize.patch | 44 + patches/03-rfkill.patch | 233 + patches/04-netns.patch | 142 + patches/05-usb.patch | 14 + patches/06-header-changes.patch | 126 + patches/07-change-default-rate-alg.patch | 34 + patches/08-rename-iwl4965-config.patch | 31 + patches/09-threaded-irq.patch | 63 + patches/10-add-wext-handlers-to-netdev.patch | 26 + patches/11-dev-pm-ops.patch | 58 + patches/12-iw_handler-changes.patch | 14 + patches/13-trace.patch | 44 + patches/14-device-type.patch | 56 + patches/15-symbol-export-conflicts.patch | 18 + patches/16-bluetooth.patch | 609 + patches/17-netdev-queue.patch | 47 + patches/18-rename-usb-net-symbols.patch | 52 + patches/19-kfifo.patch | 47 + patches/20-pcidev.patch | 18 + patches/21-capi-proc_fops.patch | 73 + patches/22-multiqueue.patch | 161 + patches/98-add-compat-wireless.patch | 30 + patches/99-change-makefiles.patch | 90 + patches/README | 16 + scripts/admin-clean.sh | 13 + scripts/admin-refresh.sh | 3 + scripts/admin-update.sh | 338 + scripts/athenable | 46 + scripts/athload | 57 + scripts/b43enable | 59 + scripts/b43load | 65 + scripts/btload.sh | 10 + scripts/btunload.sh | 13 + scripts/check_config.sh | 33 + scripts/check_depmod | 83 + scripts/compress_modules | 40 + scripts/driver-select | 289 + scripts/gen-compat-autoconf.sh | 199 + scripts/gen-stable-release.sh | 90 + scripts/iwl-enable | 47 + scripts/iwl-load | 58 + scripts/load.sh | 26 + scripts/madwifi-unload | 58 + scripts/modlib.sh | 87 + scripts/unload.sh | 58 + scripts/update-initramfs | 33 + scripts/wlload.sh | 22 + scripts/wlunload.sh | 54 + udev/50-compat_firmware.rules | 4 + udev/compat_firmware.sh | 35 + udev/ubuntu/50-compat_firmware.rules | 4 + udev/ubuntu/compat_firmware.sh | 29 + 780 files changed, 486531 insertions(+) create mode 100644 .gitignore create mode 100644 COPYRIGHT create mode 100644 Makefile create mode 100644 README create mode 100644 compat-release create mode 100644 compat/Makefile create mode 100644 compat/compat-2.6.14.c create mode 100644 compat/compat-2.6.18.c create mode 100644 compat/compat-2.6.19.c create mode 100644 compat/compat-2.6.21.c create mode 100644 compat/compat-2.6.22.c create mode 100644 compat/compat-2.6.23.c create mode 100644 compat/compat-2.6.24.c create mode 100644 compat/compat-2.6.25.c create mode 100644 compat/compat-2.6.26.c create mode 100644 compat/compat-2.6.27.c create mode 100644 compat/compat-2.6.28.c create mode 100644 compat/compat-2.6.29.c create mode 100644 compat/compat-2.6.30.c create mode 100644 compat/compat-2.6.31.c create mode 100644 compat/compat-2.6.32.c create mode 100644 compat/compat-2.6.33.c create mode 100644 compat/compat_firmware_class.c create mode 100644 compat/main.c create mode 100644 compat/pm_qos_params.c create mode 100755 compat/scripts/compat_firmware_install create mode 100644 config.mk create mode 100644 drivers/bluetooth/Makefile create mode 100644 drivers/bluetooth/ath3k.c create mode 100644 drivers/bluetooth/bcm203x.c create mode 100644 drivers/bluetooth/bfusb.c create mode 100644 drivers/bluetooth/bluecard_cs.c create mode 100644 drivers/bluetooth/bpa10x.c create mode 100644 drivers/bluetooth/bt3c_cs.c create mode 100644 drivers/bluetooth/btmrvl_debugfs.c create mode 100644 drivers/bluetooth/btmrvl_drv.h create mode 100644 drivers/bluetooth/btmrvl_main.c create mode 100644 drivers/bluetooth/btmrvl_sdio.c create mode 100644 drivers/bluetooth/btmrvl_sdio.h create mode 100644 drivers/bluetooth/btsdio.c create mode 100644 drivers/bluetooth/btuart_cs.c create mode 100644 drivers/bluetooth/btusb.c create mode 100644 drivers/bluetooth/dtl1_cs.c create mode 100644 drivers/bluetooth/hci_bcsp.c create mode 100644 drivers/bluetooth/hci_h4.c create mode 100644 drivers/bluetooth/hci_ldisc.c create mode 100644 drivers/bluetooth/hci_ll.c create mode 100644 drivers/bluetooth/hci_uart.h create mode 100644 drivers/bluetooth/hci_vhci.c create mode 100644 drivers/misc/eeprom/Makefile create mode 100644 drivers/misc/eeprom/eeprom_93cx6.c create mode 100644 drivers/net/Makefile create mode 100644 drivers/net/atl1c/Makefile create mode 100644 drivers/net/atl1c/atl1c.h create mode 100644 drivers/net/atl1c/atl1c_ethtool.c create mode 100644 drivers/net/atl1c/atl1c_hw.c create mode 100644 drivers/net/atl1c/atl1c_hw.h create mode 100644 drivers/net/atl1c/atl1c_main.c create mode 100644 drivers/net/atl1e/Makefile create mode 100644 drivers/net/atl1e/atl1e.h create mode 100644 drivers/net/atl1e/atl1e_ethtool.c create mode 100644 drivers/net/atl1e/atl1e_hw.c create mode 100644 drivers/net/atl1e/atl1e_hw.h create mode 100644 drivers/net/atl1e/atl1e_main.c create mode 100644 drivers/net/atl1e/atl1e_param.c create mode 100644 drivers/net/atlx/Makefile create mode 100644 drivers/net/atlx/atl1.c create mode 100644 drivers/net/atlx/atl1.h create mode 100644 drivers/net/atlx/atl2.c create mode 100644 drivers/net/atlx/atl2.h create mode 100644 drivers/net/atlx/atlx.c create mode 100644 drivers/net/atlx/atlx.h create mode 100644 drivers/net/b44.c create mode 100644 drivers/net/b44.h create mode 100644 drivers/net/usb/Makefile create mode 100644 drivers/net/usb/cdc_ether.c create mode 100644 drivers/net/usb/rndis_host.c create mode 100644 drivers/net/usb/usbnet.c create mode 100644 drivers/net/wireless/Makefile create mode 100644 drivers/net/wireless/adm8211.c create mode 100644 drivers/net/wireless/adm8211.h create mode 100644 drivers/net/wireless/at76c50x-usb.c create mode 100644 drivers/net/wireless/at76c50x-usb.h create mode 100644 drivers/net/wireless/ath/Makefile create mode 100644 drivers/net/wireless/ath/ar9170/Makefile create mode 100644 drivers/net/wireless/ath/ar9170/ar9170.h create mode 100644 drivers/net/wireless/ath/ar9170/cmd.c create mode 100644 drivers/net/wireless/ath/ar9170/cmd.h create mode 100644 drivers/net/wireless/ath/ar9170/eeprom.h create mode 100644 drivers/net/wireless/ath/ar9170/hw.h create mode 100644 drivers/net/wireless/ath/ar9170/led.c create mode 100644 drivers/net/wireless/ath/ar9170/mac.c create mode 100644 drivers/net/wireless/ath/ar9170/main.c create mode 100644 drivers/net/wireless/ath/ar9170/phy.c create mode 100644 drivers/net/wireless/ath/ar9170/usb.c create mode 100644 drivers/net/wireless/ath/ar9170/usb.h create mode 100644 drivers/net/wireless/ath/ath.h create mode 100644 drivers/net/wireless/ath/ath5k/Makefile create mode 100644 drivers/net/wireless/ath/ath5k/ath5k.h create mode 100644 drivers/net/wireless/ath/ath5k/attach.c create mode 100644 drivers/net/wireless/ath/ath5k/base.c create mode 100644 drivers/net/wireless/ath/ath5k/base.h create mode 100644 drivers/net/wireless/ath/ath5k/caps.c create mode 100644 drivers/net/wireless/ath/ath5k/debug.c create mode 100644 drivers/net/wireless/ath/ath5k/debug.h create mode 100644 drivers/net/wireless/ath/ath5k/desc.c create mode 100644 drivers/net/wireless/ath/ath5k/desc.h create mode 100644 drivers/net/wireless/ath/ath5k/dma.c create mode 100644 drivers/net/wireless/ath/ath5k/eeprom.c create mode 100644 drivers/net/wireless/ath/ath5k/eeprom.h create mode 100644 drivers/net/wireless/ath/ath5k/gpio.c create mode 100644 drivers/net/wireless/ath/ath5k/initvals.c create mode 100644 drivers/net/wireless/ath/ath5k/led.c create mode 100644 drivers/net/wireless/ath/ath5k/pcu.c create mode 100644 drivers/net/wireless/ath/ath5k/phy.c create mode 100644 drivers/net/wireless/ath/ath5k/qcu.c create mode 100644 drivers/net/wireless/ath/ath5k/reg.h create mode 100644 drivers/net/wireless/ath/ath5k/reset.c create mode 100644 drivers/net/wireless/ath/ath5k/rfbuffer.h create mode 100644 drivers/net/wireless/ath/ath5k/rfgain.h create mode 100644 drivers/net/wireless/ath/ath5k/rfkill.c create mode 100644 drivers/net/wireless/ath/ath9k/Makefile create mode 100644 drivers/net/wireless/ath/ath9k/ahb.c create mode 100644 drivers/net/wireless/ath/ath9k/ani.c create mode 100644 drivers/net/wireless/ath/ath9k/ani.h create mode 100644 drivers/net/wireless/ath/ath9k/ath9k.h create mode 100644 drivers/net/wireless/ath/ath9k/beacon.c create mode 100644 drivers/net/wireless/ath/ath9k/btcoex.c create mode 100644 drivers/net/wireless/ath/ath9k/btcoex.h create mode 100644 drivers/net/wireless/ath/ath9k/calib.c create mode 100644 drivers/net/wireless/ath/ath9k/calib.h create mode 100644 drivers/net/wireless/ath/ath9k/common.c create mode 100644 drivers/net/wireless/ath/ath9k/common.h create mode 100644 drivers/net/wireless/ath/ath9k/debug.c create mode 100644 drivers/net/wireless/ath/ath9k/debug.h create mode 100644 drivers/net/wireless/ath/ath9k/eeprom.c create mode 100644 drivers/net/wireless/ath/ath9k/eeprom.h create mode 100644 drivers/net/wireless/ath/ath9k/eeprom_4k.c create mode 100644 drivers/net/wireless/ath/ath9k/eeprom_9287.c create mode 100644 drivers/net/wireless/ath/ath9k/eeprom_def.c create mode 100644 drivers/net/wireless/ath/ath9k/gpio.c create mode 100644 drivers/net/wireless/ath/ath9k/hw.c create mode 100644 drivers/net/wireless/ath/ath9k/hw.c.orig create mode 100644 drivers/net/wireless/ath/ath9k/hw.h create mode 100644 drivers/net/wireless/ath/ath9k/init.c create mode 100644 drivers/net/wireless/ath/ath9k/initvals.h create mode 100644 drivers/net/wireless/ath/ath9k/mac.c create mode 100644 drivers/net/wireless/ath/ath9k/mac.h create mode 100644 drivers/net/wireless/ath/ath9k/main.c create mode 100644 drivers/net/wireless/ath/ath9k/pci.c create mode 100644 drivers/net/wireless/ath/ath9k/phy.c create mode 100644 drivers/net/wireless/ath/ath9k/phy.h create mode 100644 drivers/net/wireless/ath/ath9k/rc.c create mode 100644 drivers/net/wireless/ath/ath9k/rc.h create mode 100644 drivers/net/wireless/ath/ath9k/recv.c create mode 100644 drivers/net/wireless/ath/ath9k/reg.h create mode 100644 drivers/net/wireless/ath/ath9k/virtual.c create mode 100644 drivers/net/wireless/ath/ath9k/xmit.c create mode 100644 drivers/net/wireless/ath/debug.c create mode 100644 drivers/net/wireless/ath/debug.h create mode 100644 drivers/net/wireless/ath/hw.c create mode 100644 drivers/net/wireless/ath/main.c create mode 100644 drivers/net/wireless/ath/reg.h create mode 100644 drivers/net/wireless/ath/regd.c create mode 100644 drivers/net/wireless/ath/regd.h create mode 100644 drivers/net/wireless/ath/regd_common.h create mode 100644 drivers/net/wireless/b43/Makefile create mode 100644 drivers/net/wireless/b43/b43.h create mode 100644 drivers/net/wireless/b43/b43.h.orig create mode 100644 drivers/net/wireless/b43/debugfs.c create mode 100644 drivers/net/wireless/b43/debugfs.h create mode 100644 drivers/net/wireless/b43/dma.c create mode 100644 drivers/net/wireless/b43/dma.h create mode 100644 drivers/net/wireless/b43/leds.c create mode 100644 drivers/net/wireless/b43/leds.h create mode 100644 drivers/net/wireless/b43/lo.c create mode 100644 drivers/net/wireless/b43/lo.h create mode 100644 drivers/net/wireless/b43/main.c create mode 100644 drivers/net/wireless/b43/main.c.orig create mode 100644 drivers/net/wireless/b43/main.h create mode 100644 drivers/net/wireless/b43/pcmcia.c create mode 100644 drivers/net/wireless/b43/pcmcia.h create mode 100644 drivers/net/wireless/b43/phy_a.c create mode 100644 drivers/net/wireless/b43/phy_a.h create mode 100644 drivers/net/wireless/b43/phy_common.c create mode 100644 drivers/net/wireless/b43/phy_common.h create mode 100644 drivers/net/wireless/b43/phy_g.c create mode 100644 drivers/net/wireless/b43/phy_g.h create mode 100644 drivers/net/wireless/b43/phy_lp.c create mode 100644 drivers/net/wireless/b43/phy_lp.h create mode 100644 drivers/net/wireless/b43/phy_n.c create mode 100644 drivers/net/wireless/b43/phy_n.h create mode 100644 drivers/net/wireless/b43/pio.c create mode 100644 drivers/net/wireless/b43/pio.h create mode 100644 drivers/net/wireless/b43/rfkill.c create mode 100644 drivers/net/wireless/b43/rfkill.h create mode 100644 drivers/net/wireless/b43/sdio.c create mode 100644 drivers/net/wireless/b43/sdio.h create mode 100644 drivers/net/wireless/b43/sysfs.c create mode 100644 drivers/net/wireless/b43/sysfs.h create mode 100644 drivers/net/wireless/b43/tables.c create mode 100644 drivers/net/wireless/b43/tables.h create mode 100644 drivers/net/wireless/b43/tables_lpphy.c create mode 100644 drivers/net/wireless/b43/tables_lpphy.h create mode 100644 drivers/net/wireless/b43/tables_nphy.c create mode 100644 drivers/net/wireless/b43/tables_nphy.h create mode 100644 drivers/net/wireless/b43/wa.c create mode 100644 drivers/net/wireless/b43/wa.h create mode 100644 drivers/net/wireless/b43/xmit.c create mode 100644 drivers/net/wireless/b43/xmit.h create mode 100644 drivers/net/wireless/b43legacy/Makefile create mode 100644 drivers/net/wireless/b43legacy/b43legacy.h create mode 100644 drivers/net/wireless/b43legacy/debugfs.c create mode 100644 drivers/net/wireless/b43legacy/debugfs.h create mode 100644 drivers/net/wireless/b43legacy/dma.c create mode 100644 drivers/net/wireless/b43legacy/dma.h create mode 100644 drivers/net/wireless/b43legacy/ilt.c create mode 100644 drivers/net/wireless/b43legacy/ilt.h create mode 100644 drivers/net/wireless/b43legacy/leds.c create mode 100644 drivers/net/wireless/b43legacy/leds.h create mode 100644 drivers/net/wireless/b43legacy/main.c create mode 100644 drivers/net/wireless/b43legacy/main.h create mode 100644 drivers/net/wireless/b43legacy/phy.c create mode 100644 drivers/net/wireless/b43legacy/phy.h create mode 100644 drivers/net/wireless/b43legacy/pio.c create mode 100644 drivers/net/wireless/b43legacy/pio.h create mode 100644 drivers/net/wireless/b43legacy/radio.c create mode 100644 drivers/net/wireless/b43legacy/radio.h create mode 100644 drivers/net/wireless/b43legacy/rfkill.c create mode 100644 drivers/net/wireless/b43legacy/rfkill.h create mode 100644 drivers/net/wireless/b43legacy/sysfs.c create mode 100644 drivers/net/wireless/b43legacy/sysfs.h create mode 100644 drivers/net/wireless/b43legacy/xmit.c create mode 100644 drivers/net/wireless/b43legacy/xmit.h create mode 100644 drivers/net/wireless/ipw2x00/Makefile create mode 100644 drivers/net/wireless/ipw2x00/ipw2100.c create mode 100644 drivers/net/wireless/ipw2x00/ipw2100.h create mode 100644 drivers/net/wireless/ipw2x00/ipw2200.c create mode 100644 drivers/net/wireless/ipw2x00/ipw2200.h create mode 100644 drivers/net/wireless/ipw2x00/libipw.h create mode 100644 drivers/net/wireless/ipw2x00/libipw_geo.c create mode 100644 drivers/net/wireless/ipw2x00/libipw_module.c create mode 100644 drivers/net/wireless/ipw2x00/libipw_rx.c create mode 100644 drivers/net/wireless/ipw2x00/libipw_tx.c create mode 100644 drivers/net/wireless/ipw2x00/libipw_wx.c create mode 100644 drivers/net/wireless/iwlwifi/Makefile create mode 100644 drivers/net/wireless/iwlwifi/iwl-1000.c create mode 100644 drivers/net/wireless/iwlwifi/iwl-3945-fh.h create mode 100644 drivers/net/wireless/iwlwifi/iwl-3945-hw.h create mode 100644 drivers/net/wireless/iwlwifi/iwl-3945-led.c create mode 100644 drivers/net/wireless/iwlwifi/iwl-3945-led.h create mode 100644 drivers/net/wireless/iwlwifi/iwl-3945-rs.c create mode 100644 drivers/net/wireless/iwlwifi/iwl-3945.c create mode 100644 drivers/net/wireless/iwlwifi/iwl-3945.h create mode 100644 drivers/net/wireless/iwlwifi/iwl-4965-hw.h create mode 100644 drivers/net/wireless/iwlwifi/iwl-4965.c create mode 100644 drivers/net/wireless/iwlwifi/iwl-5000-hw.h create mode 100644 drivers/net/wireless/iwlwifi/iwl-5000.c create mode 100644 drivers/net/wireless/iwlwifi/iwl-6000-hw.h create mode 100644 drivers/net/wireless/iwlwifi/iwl-6000.c create mode 100644 drivers/net/wireless/iwlwifi/iwl-agn-led.c create mode 100644 drivers/net/wireless/iwlwifi/iwl-agn-led.h create mode 100644 drivers/net/wireless/iwlwifi/iwl-agn-rs.c create mode 100644 drivers/net/wireless/iwlwifi/iwl-agn-rs.h create mode 100644 drivers/net/wireless/iwlwifi/iwl-agn.c create mode 100644 drivers/net/wireless/iwlwifi/iwl-agn.c.orig create mode 100644 drivers/net/wireless/iwlwifi/iwl-calib.c create mode 100644 drivers/net/wireless/iwlwifi/iwl-calib.h create mode 100644 drivers/net/wireless/iwlwifi/iwl-commands.h create mode 100644 drivers/net/wireless/iwlwifi/iwl-core.c create mode 100644 drivers/net/wireless/iwlwifi/iwl-core.h create mode 100644 drivers/net/wireless/iwlwifi/iwl-csr.h create mode 100644 drivers/net/wireless/iwlwifi/iwl-debug.h create mode 100644 drivers/net/wireless/iwlwifi/iwl-debugfs.c create mode 100644 drivers/net/wireless/iwlwifi/iwl-dev.h create mode 100644 drivers/net/wireless/iwlwifi/iwl-devtrace.c create mode 100644 drivers/net/wireless/iwlwifi/iwl-devtrace.h create mode 100644 drivers/net/wireless/iwlwifi/iwl-eeprom.c create mode 100644 drivers/net/wireless/iwlwifi/iwl-eeprom.h create mode 100644 drivers/net/wireless/iwlwifi/iwl-fh.h create mode 100644 drivers/net/wireless/iwlwifi/iwl-hcmd.c create mode 100644 drivers/net/wireless/iwlwifi/iwl-helpers.h create mode 100644 drivers/net/wireless/iwlwifi/iwl-io.h create mode 100644 drivers/net/wireless/iwlwifi/iwl-led.c create mode 100644 drivers/net/wireless/iwlwifi/iwl-led.h create mode 100644 drivers/net/wireless/iwlwifi/iwl-power.c create mode 100644 drivers/net/wireless/iwlwifi/iwl-power.h create mode 100644 drivers/net/wireless/iwlwifi/iwl-prph.h create mode 100644 drivers/net/wireless/iwlwifi/iwl-rx.c create mode 100644 drivers/net/wireless/iwlwifi/iwl-scan.c create mode 100644 drivers/net/wireless/iwlwifi/iwl-spectrum.h create mode 100644 drivers/net/wireless/iwlwifi/iwl-sta.c create mode 100644 drivers/net/wireless/iwlwifi/iwl-sta.h create mode 100644 drivers/net/wireless/iwlwifi/iwl-tx.c create mode 100644 drivers/net/wireless/iwlwifi/iwl3945-base.c create mode 100644 drivers/net/wireless/iwmc3200wifi/Makefile create mode 100644 drivers/net/wireless/iwmc3200wifi/bus.h create mode 100644 drivers/net/wireless/iwmc3200wifi/cfg80211.c create mode 100644 drivers/net/wireless/iwmc3200wifi/cfg80211.h create mode 100644 drivers/net/wireless/iwmc3200wifi/commands.c create mode 100644 drivers/net/wireless/iwmc3200wifi/commands.h create mode 100644 drivers/net/wireless/iwmc3200wifi/debug.h create mode 100644 drivers/net/wireless/iwmc3200wifi/debugfs.c create mode 100644 drivers/net/wireless/iwmc3200wifi/eeprom.c create mode 100644 drivers/net/wireless/iwmc3200wifi/eeprom.h create mode 100644 drivers/net/wireless/iwmc3200wifi/fw.c create mode 100644 drivers/net/wireless/iwmc3200wifi/fw.h create mode 100644 drivers/net/wireless/iwmc3200wifi/hal.c create mode 100644 drivers/net/wireless/iwmc3200wifi/hal.h create mode 100644 drivers/net/wireless/iwmc3200wifi/iwm.h create mode 100644 drivers/net/wireless/iwmc3200wifi/lmac.h create mode 100644 drivers/net/wireless/iwmc3200wifi/main.c create mode 100644 drivers/net/wireless/iwmc3200wifi/netdev.c create mode 100644 drivers/net/wireless/iwmc3200wifi/rx.c create mode 100644 drivers/net/wireless/iwmc3200wifi/rx.h create mode 100644 drivers/net/wireless/iwmc3200wifi/sdio.c create mode 100644 drivers/net/wireless/iwmc3200wifi/sdio.h create mode 100644 drivers/net/wireless/iwmc3200wifi/tx.c create mode 100644 drivers/net/wireless/iwmc3200wifi/umac.h create mode 100644 drivers/net/wireless/libertas/Makefile create mode 100644 drivers/net/wireless/libertas/assoc.c create mode 100644 drivers/net/wireless/libertas/assoc.h create mode 100644 drivers/net/wireless/libertas/cfg.c create mode 100644 drivers/net/wireless/libertas/cfg.h create mode 100644 drivers/net/wireless/libertas/cmd.c create mode 100644 drivers/net/wireless/libertas/cmd.h create mode 100644 drivers/net/wireless/libertas/cmdresp.c create mode 100644 drivers/net/wireless/libertas/debugfs.c create mode 100644 drivers/net/wireless/libertas/debugfs.h create mode 100644 drivers/net/wireless/libertas/decl.h create mode 100644 drivers/net/wireless/libertas/defs.h create mode 100644 drivers/net/wireless/libertas/dev.h create mode 100644 drivers/net/wireless/libertas/ethtool.c create mode 100644 drivers/net/wireless/libertas/host.h create mode 100644 drivers/net/wireless/libertas/if_cs.c create mode 100644 drivers/net/wireless/libertas/if_sdio.c create mode 100644 drivers/net/wireless/libertas/if_sdio.h create mode 100644 drivers/net/wireless/libertas/if_spi.c create mode 100644 drivers/net/wireless/libertas/if_spi.h create mode 100644 drivers/net/wireless/libertas/if_usb.c create mode 100644 drivers/net/wireless/libertas/if_usb.h create mode 100644 drivers/net/wireless/libertas/main.c create mode 100644 drivers/net/wireless/libertas/main.c.orig create mode 100644 drivers/net/wireless/libertas/mesh.c create mode 100644 drivers/net/wireless/libertas/mesh.h create mode 100644 drivers/net/wireless/libertas/radiotap.h create mode 100644 drivers/net/wireless/libertas/rx.c create mode 100644 drivers/net/wireless/libertas/scan.c create mode 100644 drivers/net/wireless/libertas/scan.h create mode 100644 drivers/net/wireless/libertas/tx.c create mode 100644 drivers/net/wireless/libertas/types.h create mode 100644 drivers/net/wireless/libertas/wext.c create mode 100644 drivers/net/wireless/libertas/wext.h create mode 100644 drivers/net/wireless/libertas_tf/Makefile create mode 100644 drivers/net/wireless/libertas_tf/cmd.c create mode 100644 drivers/net/wireless/libertas_tf/if_usb.c create mode 100644 drivers/net/wireless/libertas_tf/if_usb.h create mode 100644 drivers/net/wireless/libertas_tf/libertas_tf.h create mode 100644 drivers/net/wireless/libertas_tf/main.c create mode 100644 drivers/net/wireless/mac80211_hwsim.c create mode 100644 drivers/net/wireless/mwl8k.c create mode 100644 drivers/net/wireless/p54/Makefile create mode 100644 drivers/net/wireless/p54/eeprom.c create mode 100644 drivers/net/wireless/p54/eeprom.h create mode 100644 drivers/net/wireless/p54/fwio.c create mode 100644 drivers/net/wireless/p54/led.c create mode 100644 drivers/net/wireless/p54/lmac.h create mode 100644 drivers/net/wireless/p54/main.c create mode 100644 drivers/net/wireless/p54/net2280.h create mode 100644 drivers/net/wireless/p54/p54.h create mode 100644 drivers/net/wireless/p54/p54pci.c create mode 100644 drivers/net/wireless/p54/p54pci.h create mode 100644 drivers/net/wireless/p54/p54spi.c create mode 100644 drivers/net/wireless/p54/p54spi.h create mode 100644 drivers/net/wireless/p54/p54spi_eeprom.h create mode 100644 drivers/net/wireless/p54/p54usb.c create mode 100644 drivers/net/wireless/p54/p54usb.h create mode 100644 drivers/net/wireless/p54/txrx.c create mode 100644 drivers/net/wireless/rndis_wlan.c create mode 100644 drivers/net/wireless/rndis_wlan.c.orig create mode 100644 drivers/net/wireless/rt2x00/Makefile create mode 100644 drivers/net/wireless/rt2x00/rt2400pci.c create mode 100644 drivers/net/wireless/rt2x00/rt2400pci.h create mode 100644 drivers/net/wireless/rt2x00/rt2500pci.c create mode 100644 drivers/net/wireless/rt2x00/rt2500pci.h create mode 100644 drivers/net/wireless/rt2x00/rt2500usb.c create mode 100644 drivers/net/wireless/rt2x00/rt2500usb.h create mode 100644 drivers/net/wireless/rt2x00/rt2800.h create mode 100644 drivers/net/wireless/rt2x00/rt2800lib.c create mode 100644 drivers/net/wireless/rt2x00/rt2800lib.h create mode 100644 drivers/net/wireless/rt2x00/rt2800pci.c create mode 100644 drivers/net/wireless/rt2x00/rt2800pci.h create mode 100644 drivers/net/wireless/rt2x00/rt2800usb.c create mode 100644 drivers/net/wireless/rt2x00/rt2800usb.h create mode 100644 drivers/net/wireless/rt2x00/rt2x00.h create mode 100644 drivers/net/wireless/rt2x00/rt2x00config.c create mode 100644 drivers/net/wireless/rt2x00/rt2x00crypto.c create mode 100644 drivers/net/wireless/rt2x00/rt2x00debug.c create mode 100644 drivers/net/wireless/rt2x00/rt2x00debug.h create mode 100644 drivers/net/wireless/rt2x00/rt2x00dev.c create mode 100644 drivers/net/wireless/rt2x00/rt2x00dump.h create mode 100644 drivers/net/wireless/rt2x00/rt2x00firmware.c create mode 100644 drivers/net/wireless/rt2x00/rt2x00ht.c create mode 100644 drivers/net/wireless/rt2x00/rt2x00leds.c create mode 100644 drivers/net/wireless/rt2x00/rt2x00leds.h create mode 100644 drivers/net/wireless/rt2x00/rt2x00lib.h create mode 100644 drivers/net/wireless/rt2x00/rt2x00link.c create mode 100644 drivers/net/wireless/rt2x00/rt2x00mac.c create mode 100644 drivers/net/wireless/rt2x00/rt2x00pci.c create mode 100644 drivers/net/wireless/rt2x00/rt2x00pci.h create mode 100644 drivers/net/wireless/rt2x00/rt2x00queue.c create mode 100644 drivers/net/wireless/rt2x00/rt2x00queue.h create mode 100644 drivers/net/wireless/rt2x00/rt2x00reg.h create mode 100644 drivers/net/wireless/rt2x00/rt2x00soc.c create mode 100644 drivers/net/wireless/rt2x00/rt2x00soc.h create mode 100644 drivers/net/wireless/rt2x00/rt2x00usb.c create mode 100644 drivers/net/wireless/rt2x00/rt2x00usb.h create mode 100644 drivers/net/wireless/rt2x00/rt61pci.c create mode 100644 drivers/net/wireless/rt2x00/rt61pci.h create mode 100644 drivers/net/wireless/rt2x00/rt73usb.c create mode 100644 drivers/net/wireless/rt2x00/rt73usb.h create mode 100644 drivers/net/wireless/rtl818x/Makefile create mode 100644 drivers/net/wireless/rtl818x/rtl8180.h create mode 100644 drivers/net/wireless/rtl818x/rtl8180_dev.c create mode 100644 drivers/net/wireless/rtl818x/rtl8180_grf5101.c create mode 100644 drivers/net/wireless/rtl818x/rtl8180_grf5101.h create mode 100644 drivers/net/wireless/rtl818x/rtl8180_max2820.c create mode 100644 drivers/net/wireless/rtl818x/rtl8180_max2820.h create mode 100644 drivers/net/wireless/rtl818x/rtl8180_rtl8225.c create mode 100644 drivers/net/wireless/rtl818x/rtl8180_rtl8225.h create mode 100644 drivers/net/wireless/rtl818x/rtl8180_sa2400.c create mode 100644 drivers/net/wireless/rtl818x/rtl8180_sa2400.h create mode 100644 drivers/net/wireless/rtl818x/rtl8187.h create mode 100644 drivers/net/wireless/rtl818x/rtl8187_dev.c create mode 100644 drivers/net/wireless/rtl818x/rtl8187_leds.c create mode 100644 drivers/net/wireless/rtl818x/rtl8187_leds.h create mode 100644 drivers/net/wireless/rtl818x/rtl8187_rfkill.c create mode 100644 drivers/net/wireless/rtl818x/rtl8187_rfkill.h create mode 100644 drivers/net/wireless/rtl818x/rtl8187_rtl8225.c create mode 100644 drivers/net/wireless/rtl818x/rtl8187_rtl8225.h create mode 100644 drivers/net/wireless/rtl818x/rtl818x.h create mode 100644 drivers/net/wireless/wl12xx/Makefile create mode 100644 drivers/net/wireless/wl12xx/wl1251.h create mode 100644 drivers/net/wireless/wl12xx/wl1251_acx.c create mode 100644 drivers/net/wireless/wl12xx/wl1251_acx.h create mode 100644 drivers/net/wireless/wl12xx/wl1251_boot.c create mode 100644 drivers/net/wireless/wl12xx/wl1251_boot.h create mode 100644 drivers/net/wireless/wl12xx/wl1251_cmd.c create mode 100644 drivers/net/wireless/wl12xx/wl1251_cmd.h create mode 100644 drivers/net/wireless/wl12xx/wl1251_debugfs.c create mode 100644 drivers/net/wireless/wl12xx/wl1251_debugfs.h create mode 100644 drivers/net/wireless/wl12xx/wl1251_event.c create mode 100644 drivers/net/wireless/wl12xx/wl1251_event.h create mode 100644 drivers/net/wireless/wl12xx/wl1251_init.c create mode 100644 drivers/net/wireless/wl12xx/wl1251_init.h create mode 100644 drivers/net/wireless/wl12xx/wl1251_io.c create mode 100644 drivers/net/wireless/wl12xx/wl1251_io.h create mode 100644 drivers/net/wireless/wl12xx/wl1251_main.c create mode 100644 drivers/net/wireless/wl12xx/wl1251_ps.c create mode 100644 drivers/net/wireless/wl12xx/wl1251_ps.h create mode 100644 drivers/net/wireless/wl12xx/wl1251_reg.h create mode 100644 drivers/net/wireless/wl12xx/wl1251_rx.c create mode 100644 drivers/net/wireless/wl12xx/wl1251_rx.h create mode 100644 drivers/net/wireless/wl12xx/wl1251_sdio.c create mode 100644 drivers/net/wireless/wl12xx/wl1251_spi.c create mode 100644 drivers/net/wireless/wl12xx/wl1251_spi.h create mode 100644 drivers/net/wireless/wl12xx/wl1251_tx.c create mode 100644 drivers/net/wireless/wl12xx/wl1251_tx.h create mode 100644 drivers/net/wireless/wl12xx/wl1271.h create mode 100644 drivers/net/wireless/wl12xx/wl1271_acx.c create mode 100644 drivers/net/wireless/wl12xx/wl1271_acx.h create mode 100644 drivers/net/wireless/wl12xx/wl1271_boot.c create mode 100644 drivers/net/wireless/wl12xx/wl1271_boot.h create mode 100644 drivers/net/wireless/wl12xx/wl1271_cmd.c create mode 100644 drivers/net/wireless/wl12xx/wl1271_cmd.h create mode 100644 drivers/net/wireless/wl12xx/wl1271_conf.h create mode 100644 drivers/net/wireless/wl12xx/wl1271_debugfs.c create mode 100644 drivers/net/wireless/wl12xx/wl1271_debugfs.h create mode 100644 drivers/net/wireless/wl12xx/wl1271_event.c create mode 100644 drivers/net/wireless/wl12xx/wl1271_event.h create mode 100644 drivers/net/wireless/wl12xx/wl1271_init.c create mode 100644 drivers/net/wireless/wl12xx/wl1271_init.h create mode 100644 drivers/net/wireless/wl12xx/wl1271_io.c create mode 100644 drivers/net/wireless/wl12xx/wl1271_io.h create mode 100644 drivers/net/wireless/wl12xx/wl1271_main.c create mode 100644 drivers/net/wireless/wl12xx/wl1271_ps.c create mode 100644 drivers/net/wireless/wl12xx/wl1271_ps.h create mode 100644 drivers/net/wireless/wl12xx/wl1271_reg.h create mode 100644 drivers/net/wireless/wl12xx/wl1271_rx.c create mode 100644 drivers/net/wireless/wl12xx/wl1271_rx.h create mode 100644 drivers/net/wireless/wl12xx/wl1271_spi.c create mode 100644 drivers/net/wireless/wl12xx/wl1271_spi.h create mode 100644 drivers/net/wireless/wl12xx/wl1271_testmode.c create mode 100644 drivers/net/wireless/wl12xx/wl1271_testmode.h create mode 100644 drivers/net/wireless/wl12xx/wl1271_tx.c create mode 100644 drivers/net/wireless/wl12xx/wl1271_tx.h create mode 100644 drivers/net/wireless/wl12xx/wl12xx_80211.h create mode 100644 drivers/net/wireless/zd1211rw/Makefile create mode 100644 drivers/net/wireless/zd1211rw/zd_chip.c create mode 100644 drivers/net/wireless/zd1211rw/zd_chip.h create mode 100644 drivers/net/wireless/zd1211rw/zd_def.h create mode 100644 drivers/net/wireless/zd1211rw/zd_mac.c create mode 100644 drivers/net/wireless/zd1211rw/zd_mac.h create mode 100644 drivers/net/wireless/zd1211rw/zd_rf.c create mode 100644 drivers/net/wireless/zd1211rw/zd_rf.h create mode 100644 drivers/net/wireless/zd1211rw/zd_rf_al2230.c create mode 100644 drivers/net/wireless/zd1211rw/zd_rf_al7230b.c create mode 100644 drivers/net/wireless/zd1211rw/zd_rf_rf2959.c create mode 100644 drivers/net/wireless/zd1211rw/zd_rf_uw2453.c create mode 100644 drivers/net/wireless/zd1211rw/zd_usb.c create mode 100644 drivers/net/wireless/zd1211rw/zd_usb.h create mode 100644 drivers/ssb/Makefile create mode 100644 drivers/ssb/b43_pci_bridge.c create mode 100644 drivers/ssb/driver_chipcommon.c create mode 100644 drivers/ssb/driver_chipcommon_pmu.c create mode 100644 drivers/ssb/driver_extif.c create mode 100644 drivers/ssb/driver_gige.c create mode 100644 drivers/ssb/driver_mipscore.c create mode 100644 drivers/ssb/driver_pcicore.c create mode 100644 drivers/ssb/embedded.c create mode 100644 drivers/ssb/main.c create mode 100644 drivers/ssb/pci.c create mode 100644 drivers/ssb/pcihost_wrapper.c create mode 100644 drivers/ssb/pcmcia.c create mode 100644 drivers/ssb/scan.c create mode 100644 drivers/ssb/sdio.c create mode 100644 drivers/ssb/sprom.c create mode 100644 drivers/ssb/ssb_private.h create mode 100644 enable-older-kernels/README create mode 100644 enable-older-kernels/enable-2.6.21.patch create mode 100644 enable-older-kernels/enable-2.6.22.patch create mode 100644 enable-older-kernels/enable-2.6.23.patch create mode 100644 enable-older-kernels/enable-2.6.24.patch create mode 100644 git-describe create mode 100644 include/linux/ath9k_platform.h create mode 100644 include/linux/bitops.h create mode 100644 include/linux/compat-2.6.14.h create mode 100644 include/linux/compat-2.6.18.h create mode 100644 include/linux/compat-2.6.19.h create mode 100644 include/linux/compat-2.6.21.h create mode 100644 include/linux/compat-2.6.22.h create mode 100644 include/linux/compat-2.6.23.h create mode 100644 include/linux/compat-2.6.24.h create mode 100644 include/linux/compat-2.6.25.h create mode 100644 include/linux/compat-2.6.26.h create mode 100644 include/linux/compat-2.6.27.h create mode 100644 include/linux/compat-2.6.28.h create mode 100644 include/linux/compat-2.6.29.h create mode 100644 include/linux/compat-2.6.30.h create mode 100644 include/linux/compat-2.6.31.h create mode 100644 include/linux/compat-2.6.32.h create mode 100644 include/linux/compat-2.6.33.h create mode 100644 include/linux/compat-2.6.34.h create mode 100644 include/linux/compat-2.6.h create mode 100644 include/linux/eeprom_93cx6.h create mode 100644 include/linux/ieee80211.h create mode 100644 include/linux/nl80211.h create mode 100644 include/linux/pci_ids.h create mode 100644 include/linux/pm_qos_params.h create mode 100644 include/linux/rfkill_backport.h create mode 100644 include/linux/spi/libertas_spi.h create mode 100644 include/linux/spi/wl12xx.h create mode 100644 include/linux/ssb/ssb.h create mode 100644 include/linux/ssb/ssb_driver_chipcommon.h create mode 100644 include/linux/ssb/ssb_driver_extif.h create mode 100644 include/linux/ssb/ssb_driver_gige.h create mode 100644 include/linux/ssb/ssb_driver_mips.h create mode 100644 include/linux/ssb/ssb_driver_pci.h create mode 100644 include/linux/ssb/ssb_embedded.h create mode 100644 include/linux/ssb/ssb_regs.h create mode 100644 include/linux/unaligned/access_ok.h create mode 100644 include/linux/unaligned/be_byteshift.h create mode 100644 include/linux/unaligned/be_memmove.h create mode 100644 include/linux/unaligned/be_struct.h create mode 100644 include/linux/unaligned/generic.h create mode 100644 include/linux/unaligned/le_byteshift.h create mode 100644 include/linux/unaligned/le_memmove.h create mode 100644 include/linux/unaligned/le_struct.h create mode 100644 include/linux/unaligned/memmove.h create mode 100644 include/linux/unaligned/packed_struct.h create mode 100644 include/linux/usb/rndis_host.h create mode 100644 include/linux/usb/usbnet.h create mode 100644 include/linux/wireless.h create mode 100644 include/net/bluetooth/bluetooth.h create mode 100644 include/net/bluetooth/hci.h create mode 100644 include/net/bluetooth/hci_core.h create mode 100644 include/net/bluetooth/l2cap.h create mode 100644 include/net/bluetooth/rfcomm.h create mode 100644 include/net/cfg80211.h create mode 100644 include/net/ieee80211_radiotap.h create mode 100644 include/net/lib80211.h create mode 100644 include/net/mac80211.h create mode 100644 include/net/regulatory.h create mode 100644 include/net/wext.h create mode 100644 linux-next-cherry-picks/README create mode 100644 net/bluetooth/Makefile create mode 100644 net/bluetooth/af_bluetooth.c create mode 100644 net/bluetooth/bnep/Makefile create mode 100644 net/bluetooth/bnep/bnep.h create mode 100644 net/bluetooth/bnep/core.c create mode 100644 net/bluetooth/bnep/netdev.c create mode 100644 net/bluetooth/bnep/sock.c create mode 100644 net/bluetooth/cmtp/Makefile create mode 100644 net/bluetooth/cmtp/capi.c create mode 100644 net/bluetooth/cmtp/cmtp.h create mode 100644 net/bluetooth/cmtp/core.c create mode 100644 net/bluetooth/cmtp/sock.c create mode 100644 net/bluetooth/hci_conn.c create mode 100644 net/bluetooth/hci_core.c create mode 100644 net/bluetooth/hci_event.c create mode 100644 net/bluetooth/hci_sock.c create mode 100644 net/bluetooth/hci_sock.c.orig create mode 100644 net/bluetooth/hci_sysfs.c create mode 100644 net/bluetooth/hci_sysfs.c.orig create mode 100644 net/bluetooth/hidp/Makefile create mode 100644 net/bluetooth/hidp/core.c create mode 100644 net/bluetooth/hidp/hidp.h create mode 100644 net/bluetooth/hidp/sock.c create mode 100644 net/bluetooth/l2cap.c create mode 100644 net/bluetooth/lib.c create mode 100644 net/bluetooth/rfcomm/Makefile create mode 100644 net/bluetooth/rfcomm/core.c create mode 100644 net/bluetooth/rfcomm/sock.c create mode 100644 net/bluetooth/rfcomm/tty.c create mode 100644 net/bluetooth/sco.c create mode 100644 net/mac80211/Makefile create mode 100644 net/mac80211/aes_ccm.c create mode 100644 net/mac80211/aes_ccm.h create mode 100644 net/mac80211/aes_cmac.c create mode 100644 net/mac80211/aes_cmac.h create mode 100644 net/mac80211/agg-rx.c create mode 100644 net/mac80211/agg-tx.c create mode 100644 net/mac80211/cfg.c create mode 100644 net/mac80211/cfg.h create mode 100644 net/mac80211/debugfs.c create mode 100644 net/mac80211/debugfs.h create mode 100644 net/mac80211/debugfs_key.c create mode 100644 net/mac80211/debugfs_key.h create mode 100644 net/mac80211/debugfs_netdev.c create mode 100644 net/mac80211/debugfs_netdev.h create mode 100644 net/mac80211/debugfs_sta.c create mode 100644 net/mac80211/debugfs_sta.h create mode 100644 net/mac80211/driver-ops.h create mode 100644 net/mac80211/driver-trace.c create mode 100644 net/mac80211/driver-trace.h create mode 100644 net/mac80211/event.c create mode 100644 net/mac80211/ht.c create mode 100644 net/mac80211/ibss.c create mode 100644 net/mac80211/ieee80211_i.h create mode 100644 net/mac80211/iface.c create mode 100644 net/mac80211/key.c create mode 100644 net/mac80211/key.h create mode 100644 net/mac80211/led.c create mode 100644 net/mac80211/led.h create mode 100644 net/mac80211/main.c create mode 100644 net/mac80211/mesh.c create mode 100644 net/mac80211/mesh.h create mode 100644 net/mac80211/mesh_hwmp.c create mode 100644 net/mac80211/mesh_pathtbl.c create mode 100644 net/mac80211/mesh_plink.c create mode 100644 net/mac80211/michael.c create mode 100644 net/mac80211/michael.h create mode 100644 net/mac80211/mlme.c create mode 100644 net/mac80211/offchannel.c create mode 100644 net/mac80211/pm.c create mode 100644 net/mac80211/rate.c create mode 100644 net/mac80211/rate.h create mode 100644 net/mac80211/rc80211_minstrel.c create mode 100644 net/mac80211/rc80211_minstrel.h create mode 100644 net/mac80211/rc80211_minstrel_debugfs.c create mode 100644 net/mac80211/rc80211_pid.h create mode 100644 net/mac80211/rc80211_pid_algo.c create mode 100644 net/mac80211/rc80211_pid_debugfs.c create mode 100644 net/mac80211/rx.c create mode 100644 net/mac80211/scan.c create mode 100644 net/mac80211/spectmgmt.c create mode 100644 net/mac80211/sta_info.c create mode 100644 net/mac80211/sta_info.h create mode 100644 net/mac80211/status.c create mode 100644 net/mac80211/tkip.c create mode 100644 net/mac80211/tkip.h create mode 100644 net/mac80211/tx.c create mode 100644 net/mac80211/util.c create mode 100644 net/mac80211/wep.c create mode 100644 net/mac80211/wep.h create mode 100644 net/mac80211/wme.c create mode 100644 net/mac80211/wme.h create mode 100644 net/mac80211/work.c create mode 100644 net/mac80211/wpa.c create mode 100644 net/mac80211/wpa.h create mode 100644 net/rfkill/Makefile create mode 100644 net/rfkill/core.c create mode 100644 net/rfkill/input.c create mode 100644 net/rfkill/input.c.orig create mode 100644 net/rfkill/rfkill.h create mode 100644 net/wireless/Makefile create mode 100644 net/wireless/chan.c create mode 100644 net/wireless/core.c create mode 100644 net/wireless/core.h create mode 100644 net/wireless/debugfs.c create mode 100644 net/wireless/debugfs.h create mode 100644 net/wireless/ethtool.c create mode 100644 net/wireless/ethtool.h create mode 100644 net/wireless/ibss.c create mode 100644 net/wireless/lib80211.c create mode 100644 net/wireless/lib80211_crypt_ccmp.c create mode 100644 net/wireless/lib80211_crypt_tkip.c create mode 100644 net/wireless/lib80211_crypt_wep.c create mode 100644 net/wireless/mlme.c create mode 100644 net/wireless/nl80211.c create mode 100644 net/wireless/nl80211.h create mode 100644 net/wireless/radiotap.c create mode 100644 net/wireless/reg.c create mode 100644 net/wireless/reg.h create mode 100644 net/wireless/regdb.h create mode 100644 net/wireless/scan.c create mode 100644 net/wireless/sme.c create mode 100644 net/wireless/sysfs.c create mode 100644 net/wireless/sysfs.h create mode 100644 net/wireless/util.c create mode 100644 net/wireless/wext-compat.c create mode 100644 net/wireless/wext-compat.h create mode 100644 net/wireless/wext-core.c create mode 100644 net/wireless/wext-priv.c create mode 100644 net/wireless/wext-proc.c create mode 100644 net/wireless/wext-sme.c create mode 100644 net/wireless/wext-spy.c create mode 100644 patches/01-netdev.patch create mode 100644 patches/02-ksize.patch create mode 100644 patches/03-rfkill.patch create mode 100644 patches/04-netns.patch create mode 100644 patches/05-usb.patch create mode 100644 patches/06-header-changes.patch create mode 100644 patches/07-change-default-rate-alg.patch create mode 100644 patches/08-rename-iwl4965-config.patch create mode 100644 patches/09-threaded-irq.patch create mode 100644 patches/10-add-wext-handlers-to-netdev.patch create mode 100644 patches/11-dev-pm-ops.patch create mode 100644 patches/12-iw_handler-changes.patch create mode 100644 patches/13-trace.patch create mode 100644 patches/14-device-type.patch create mode 100644 patches/15-symbol-export-conflicts.patch create mode 100644 patches/16-bluetooth.patch create mode 100644 patches/17-netdev-queue.patch create mode 100644 patches/18-rename-usb-net-symbols.patch create mode 100644 patches/19-kfifo.patch create mode 100644 patches/20-pcidev.patch create mode 100644 patches/21-capi-proc_fops.patch create mode 100644 patches/22-multiqueue.patch create mode 100644 patches/98-add-compat-wireless.patch create mode 100644 patches/99-change-makefiles.patch create mode 100644 patches/README create mode 100755 scripts/admin-clean.sh create mode 100755 scripts/admin-refresh.sh create mode 100755 scripts/admin-update.sh create mode 100755 scripts/athenable create mode 100755 scripts/athload create mode 100755 scripts/b43enable create mode 100755 scripts/b43load create mode 100755 scripts/btload.sh create mode 100755 scripts/btunload.sh create mode 100755 scripts/check_config.sh create mode 100755 scripts/check_depmod create mode 100755 scripts/compress_modules create mode 100755 scripts/driver-select create mode 100755 scripts/gen-compat-autoconf.sh create mode 100755 scripts/gen-stable-release.sh create mode 100755 scripts/iwl-enable create mode 100755 scripts/iwl-load create mode 100755 scripts/load.sh create mode 100755 scripts/madwifi-unload create mode 100755 scripts/modlib.sh create mode 100755 scripts/unload.sh create mode 100755 scripts/update-initramfs create mode 100755 scripts/wlload.sh create mode 100755 scripts/wlunload.sh create mode 100644 udev/50-compat_firmware.rules create mode 100755 udev/compat_firmware.sh create mode 100644 udev/ubuntu/50-compat_firmware.rules create mode 100755 udev/ubuntu/compat_firmware.sh diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..8babafb --- /dev/null +++ b/.gitignore @@ -0,0 +1,14 @@ +*~ +master-tag +.compat_autoconf* +.config.md5 +.config.mk_md5sum.txt +.tmp_versions +Module.symvers +module.order +modules.order +.pc +compat_autoconf.h +*.o +*o.cmd +*.mod.c diff --git a/COPYRIGHT b/COPYRIGHT new file mode 100644 index 0000000..ca442d3 --- /dev/null +++ b/COPYRIGHT @@ -0,0 +1,356 @@ + + NOTE! This copyright does *not* cover user programs that use kernel + services by normal system calls - this is merely considered normal use + of the kernel, and does *not* fall under the heading of "derived work". + Also note that the GPL below is copyrighted by the Free Software + Foundation, but the instance of code that it refers to (the Linux + kernel) is copyrighted by me and others who actually wrote it. + + Also note that the only valid version of the GPL as far as the kernel + is concerned is _this_ particular version of the license (ie v2, not + v2.2 or v3.x or whatever), unless explicitly otherwise stated. + + Linus Torvalds + +---------------------------------------- + + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..cacd35b --- /dev/null +++ b/Makefile @@ -0,0 +1,356 @@ +export KMODDIR?= updates +KMODDIR_ARG:= "INSTALL_MOD_DIR=$(KMODDIR)" +ifneq ($(origin KLIB), undefined) +KMODPATH_ARG:= "INSTALL_MOD_PATH=$(KLIB)" +else +export KLIB:= /lib/modules/$(shell uname -r) +endif +export KLIB_BUILD ?= $(KLIB)/build +# Sometimes not available in the path +MODPROBE := /sbin/modprobe +MADWIFI=$(shell $(MODPROBE) -l ath_pci) +OLD_IWL=$(shell $(MODPROBE) -l iwl4965) + +DESTDIR?= + +ifneq ($(KERNELRELEASE),) + +NOSTDINC_FLAGS := -I$(M)/include/ -include $(M)/include/linux/compat-2.6.h $(CFLAGS) + +obj-y := compat/ + +obj-$(CONFIG_COMPAT_RFKILL) += net/rfkill/ + +ifeq ($(BT),) +obj-$(CONFIG_COMPAT_WIRELESS) += net/wireless/ net/mac80211/ +obj-$(CONFIG_COMPAT_WIRELESS_MODULES) += drivers/net/wireless/ + +obj-$(CONFIG_COMPAT_NET_USB_MODULES) += drivers/net/usb/ + +obj-$(CONFIG_COMPAT_NETWORK_MODULES) += drivers/net/ +obj-$(CONFIG_COMPAT_VAR_MODULES) += drivers/ssb/ drivers/misc/eeprom/ +endif + +obj-$(CONFIG_COMPAT_BLUETOOTH) += net/bluetooth/ +obj-$(CONFIG_COMPAT_BLUETOOTH_MODULES) += drivers/bluetooth/ + +else + +export PWD := $(shell pwd) + +# These exported as they are used by the scripts +# to check config and compat autoconf +export COMPAT_CONFIG=config.mk +export CONFIG_CHECK=.$(COMPAT_CONFIG)_md5sum.txt +export COMPAT_AUTOCONF=include/linux/compat_autoconf.h +export CREL=$(shell cat $(PWD)/compat-release) +export CREL_PRE:=.compat_autoconf_ +export CREL_CHECK:=$(CREL_PRE)$(CREL) + +include $(PWD)/$(COMPAT_CONFIG) + +all: modules + +modules: $(CREL_CHECK) + @./scripts/check_config.sh + $(MAKE) -C $(KLIB_BUILD) M=$(PWD) modules + @touch $@ + +bt: $(CREL_CHECK) + @./scripts/check_config.sh + $(MAKE) -C $(KLIB_BUILD) M=$(PWD) BT=TRUE modules + @touch $@ + +# With the above and this we make sure we generate a new compat autoconf per +# new relase of compat-wireless-2.6 OR when the user updates the +# $(COMPAT_CONFIG) file +$(CREL_CHECK): + @# Force to regenerate compat autoconf + @rm -f $(CONFIG_CHECK) + @./scripts/check_config.sh + @touch $@ + @md5sum $(COMPAT_CONFIG) > $(CONFIG_CHECK) + +btinstall: btuninstall bt-install-modules + +bt-install-modules: bt + $(MAKE) -C $(KLIB_BUILD) M=$(PWD) $(KMODDIR_ARG) $(KMODPATH_ARG) BT=TRUE \ + modules_install + @/sbin/depmod -ae + @echo + @echo "Currently detected bluetooth subsystem modules:" + @echo + @$(MODPROBE) -l sco + @$(MODPROBE) -l l2cap + @$(MODPROBE) -l hidp + @$(MODPROBE) -l rfcomm + @$(MODPROBE) -l bnep + @$(MODPROBE) -l btusb + @$(MODPROBE) -l bluetooth + @echo + @echo Now run: + @echo + @echo sudo make btunload: + @echo + @echo And the load the needed bluetooth modules. If unsure reboot. + @echo + +btuninstall: + @# New location, matches upstream + @rm -rf $(KLIB)/$(KMODDIR)/net/bluetooth/ + @rm -rf $(KLIB)/$(KMODDIR)/drivers/bluetooth/ + @# Lets only remove the stuff we are sure we are providing + @# on the misc directory. + @/sbin/depmod -ae + @echo + @echo "Your old bluetooth subsystem modules were left intact:" + @echo + @$(MODPROBE) -l sco + @$(MODPROBE) -l l2cap + @$(MODPROBE) -l hidp + @$(MODPROBE) -l rfcomm + @$(MODPROBE) -l bnep + @$(MODPROBE) -l btusb + @$(MODPROBE) -l bluetooth + @ + @echo + +btclean: + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) BT=TRUE clean + @rm -f $(CREL_PRE)* + +install: uninstall install-modules install-scripts + +install-modules: modules + $(MAKE) -C $(KLIB_BUILD) M=$(PWD) $(KMODDIR_ARG) $(KMODPATH_ARG) \ + modules_install + @./scripts/update-initramfs + +install-scripts: + @# All the scripts we can use + @mkdir -p $(DESTDIR)/usr/lib/compat-wireless/ + @install scripts/modlib.sh $(DESTDIR)/usr/lib/compat-wireless/ + @install scripts/madwifi-unload $(DESTDIR)/usr/sbin/ + @# This is to allow switching between drivers without blacklisting + @install scripts/athenable $(DESTDIR)/usr/sbin/ + @install scripts/b43enable $(DESTDIR)/usr/sbin/ + @install scripts/iwl-enable $(DESTDIR)/usr/sbin/ + @install scripts/athload $(DESTDIR)/usr/sbin/ + @install scripts/b43load $(DESTDIR)/usr/sbin/ + @install scripts/iwl-load $(DESTDIR)/usr/sbin/ + @if [ ! -z "$(MADWIFI)" ] && [ -z "$(DESTDIR)" ]; then \ + echo ;\ + echo -n "Note: madwifi detected, we're going to disable it. " ;\ + echo "If you would like to enable it later you can run:" ;\ + echo " sudo athenable madwifi" ;\ + echo ;\ + echo Running athenable ath5k...;\ + /usr/sbin/athenable ath5k ;\ + fi + @if [ ! -z "$(OLD_IWL)" ] && [ -z "$(DESTDIR)" ]; then \ + echo ;\ + echo -n "Note: iwl4965 detected, we're going to disable it. " ;\ + echo "If you would like to enable it later you can run:" ;\ + echo " sudo iwl-load iwl4965" ;\ + echo ;\ + echo Running iwl-enable iwlagn...;\ + /usr/sbin/iwl-enable iwlagn ;\ + fi + @# If on distributions like Mandriva which like to + @# compress their modules this will find out and do + @# it for you. Reason is some old version of modutils + @# won't know mac80211.ko should be used instead of + @# mac80211.ko.gz + @./scripts/compress_modules + @# Mandrake doesn't have a depmod.d/ conf file to prefer + @# the updates/ dir which is what we use so we add one for it + @# (or any other distribution that doens't have this). + @./scripts/check_depmod + @# Udev stuff needed for the new compat_firmware_class. + @./compat/scripts/compat_firmware_install + @/sbin/depmod -a + @echo + @echo "Currently detected wireless subsystem modules:" + @echo + @$(MODPROBE) -l mac80211 + @$(MODPROBE) -l cfg80211 + @$(MODPROBE) -l lib80211 + @$(MODPROBE) -l adm8211 + @$(MODPROBE) -l ar9170usb + @$(MODPROBE) -l at76c50x-usb + @$(MODPROBE) -l ath + @$(MODPROBE) -l ath5k + @$(MODPROBE) -l ath9k + @$(MODPROBE) -l b43 + @$(MODPROBE) -l b43legacy + @$(MODPROBE) -l b44 + @$(MODPROBE) -l cdc_ether + @$(MODPROBE) -l eeprom_93cx6 + @$(MODPROBE) -l ipw2100 + @$(MODPROBE) -l ipw2200 + @$(MODPROBE) -l iwl3945 + @$(MODPROBE) -l iwlagn + @$(MODPROBE) -l iwlcore + @$(MODPROBE) -l lib80211_crypt_ccmp + @$(MODPROBE) -l lib80211_crypt_tkip + @$(MODPROBE) -l lib80211_crypt_wep + @$(MODPROBE) -l libertas + @$(MODPROBE) -l libertas_cs + @$(MODPROBE) -l libertas_sdio + @$(MODPROBE) -l libertas_spi + @$(MODPROBE) -l libertas_tf + @$(MODPROBE) -l libertas_tf_usb + @$(MODPROBE) -l libipw + @$(MODPROBE) -l mac80211_hwsim + @$(MODPROBE) -l mwl8k + @$(MODPROBE) -l p54common + @$(MODPROBE) -l p54pci + @$(MODPROBE) -l p54spi + @$(MODPROBE) -l p54usb + @$(MODPROBE) -l rndis_host + @$(MODPROBE) -l rndis_wlan + @$(MODPROBE) -l rt2400pci + @$(MODPROBE) -l rt2500pci + @$(MODPROBE) -l rt2500usb + @$(MODPROBE) -l rt2x00lib + @$(MODPROBE) -l rt2x00pci + @$(MODPROBE) -l rt2x00usb + @$(MODPROBE) -l rt61pci + @$(MODPROBE) -l rt73usb + @$(MODPROBE) -l rtl8180 + @$(MODPROBE) -l rtl8187 + @$(MODPROBE) -l ssb + @$(MODPROBE) -l usb8xxx + @$(MODPROBE) -l usbnet + @$(MODPROBE) -l zd1211rw + @echo + @echo "Currently detected bluetooth subsystem modules:" + @echo + @$(MODPROBE) -l sco + @$(MODPROBE) -l l2cap + @$(MODPROBE) -l hidp + @$(MODPROBE) -l rfcomm + @$(MODPROBE) -l bnep + @$(MODPROBE) -l btusb + @$(MODPROBE) -l bluetooth + @echo + @echo Now run: + @echo + @echo sudo make unload to unload both wireless and bluetooth modules + @echo sudo make wlunload to unload wireless modules + @echo sudo make btunload to unload bluetooth modules + @echo + @echo And then load the wireless or bluetooth module you need. If unsure reboot. + @echo Alternatively use "sudo make load/wlload/btload" to load modules + @echo + +uninstall: + @# New location, matches upstream + @rm -rf $(KLIB)/$(KMODDIR)/net/mac80211/ + @rm -rf $(KLIB)/$(KMODDIR)/net/rfkill/ + @rm -rf $(KLIB)/$(KMODDIR)/net/wireless/ + @rm -rf $(KLIB)/$(KMODDIR)/drivers/ssb/ + @rm -rf $(KLIB)/$(KMODDIR)/drivers/net/usb/ + @rm -rf $(KLIB)/$(KMODDIR)/drivers/net/wireless/ + @# Lets only remove the stuff we are sure we are providing + @# on the misc directory. + @rm -f $(KLIB)/$(KMODDIR)/drivers/misc/eeprom/eeprom_93cx6.ko* + @rm -f $(KLIB)/$(KMODDIR)/drivers/misc/eeprom_93cx6.ko* + @rm -f $(KLIB)/$(KMODDIR)/drivers/net/b44.ko* + @/sbin/depmod -a + @echo + @echo "Your old wireless subsystem modules were left intact:" + @echo + @$(MODPROBE) -l mac80211 + @$(MODPROBE) -l cfg80211 + @$(MODPROBE) -l lib80211 + @$(MODPROBE) -l adm8211 + @$(MODPROBE) -l ar9170usb + @$(MODPROBE) -l at76c50x-usb + @$(MODPROBE) -l ath + @$(MODPROBE) -l ath5k + @$(MODPROBE) -l ath9k + @$(MODPROBE) -l b43 + @$(MODPROBE) -l b43legacy + @$(MODPROBE) -l b44 + @$(MODPROBE) -l cdc_ether + @$(MODPROBE) -l eeprom_93cx6 + @$(MODPROBE) -l ipw2100 + @$(MODPROBE) -l ipw2200 + @$(MODPROBE) -l iwl3945 + @$(MODPROBE) -l iwlagn + @$(MODPROBE) -l iwlcore + @$(MODPROBE) -l lib80211_crypt_ccmp + @$(MODPROBE) -l lib80211_crypt_tkip + @$(MODPROBE) -l lib80211_crypt_wep + @$(MODPROBE) -l libertas + @$(MODPROBE) -l libertas_cs + @$(MODPROBE) -l libertas_sdio + @$(MODPROBE) -l libertas_spi + @$(MODPROBE) -l libertas_tf + @$(MODPROBE) -l libertas_tf_usb + @$(MODPROBE) -l libipw + @$(MODPROBE) -l mac80211_hwsim + @$(MODPROBE) -l mwl8k + @$(MODPROBE) -l p54common + @$(MODPROBE) -l p54pci + @$(MODPROBE) -l p54spi + @$(MODPROBE) -l p54usb + @$(MODPROBE) -l rndis_host + @$(MODPROBE) -l rndis_wlan + @$(MODPROBE) -l rt2400pci + @$(MODPROBE) -l rt2500pci + @$(MODPROBE) -l rt2500usb + @$(MODPROBE) -l rt2x00lib + @$(MODPROBE) -l rt2x00pci + @$(MODPROBE) -l rt2x00usb + @$(MODPROBE) -l rt61pci + @$(MODPROBE) -l rt73usb + @$(MODPROBE) -l rtl8180 + @$(MODPROBE) -l rtl8187 + @$(MODPROBE) -l ssb + @$(MODPROBE) -l usb8xxx + @$(MODPROBE) -l usbnet + @$(MODPROBE) -l zd1211rw + @echo + @echo "Your old bluetooth subsystem modules were left intact:" + @echo + @$(MODPROBE) -l sco + @$(MODPROBE) -l l2cap + @$(MODPROBE) -l hidp + @$(MODPROBE) -l rfcomm + @$(MODPROBE) -l bnep + @$(MODPROBE) -l btusb + @$(MODPROBE) -l bluetooth + @ + @echo + +clean: + @if [ -d net -a -d $(KLIB_BUILD) ]; then \ + $(MAKE) -C $(KLIB_BUILD) M=$(PWD) clean ;\ + fi + @rm -f $(CREL_PRE)* +unload: + @./scripts/unload.sh + +load: unload + @./scripts/load.sh + +btunload: + @./scripts/btunload.sh + +btload: btunload + @./scripts/btload.sh + +wlunload: + @./scripts/wlunload.sh + +wlload: wlunload + @./scripts/wlload.sh + +.PHONY: all clean install uninstall unload load btunload btload wlunload wlload modules bt + +endif + +clean-files += Module.symvers Module.markers modules modules.order +clean-files += $(CREL_CHECK) $(CONFIG_CHECK) diff --git a/README b/README new file mode 100644 index 0000000..88b16b4 --- /dev/null +++ b/README @@ -0,0 +1,248 @@ + +Linux Wireless compatibility package +===================================== + +This is a Linux wireless compatibility package which provides the latest +Linux wireless subsystem enhancements for kernels 2.6.25 and above. +It is technically possible to support kernels < 2.6.25 but more +work is required for that. +It also provides Linux bluetooth subsystem enhancements for kernels 2.6.27 and above. + +If you'd like to keep the wireless-testing git repository local as well, +please read out git-guide which explains how to achieve this: + +http://wireless.kernel.org/en/developers/git-guide + +With a local git repository you can update the compatibility package yourself. +For more information on how to do this please refer the Developers section below. + +Documentation +------------ + +This package is also documented online and has more-up-to date +information online than on this README file. You should read the wiki page +and not rely on this README: + +http://wireless.kernel.org/en/users/Download + +Subscribe to the wiki page to get updates on the documentation. + +Where to get the latest +----------------------- + +This package lets you build your own 'latest', all you need is a local git repository +checkout of wireless-testing.git. However since not many users are expected to keep +a local git repository of wireless-testing we provide daily snapshots of this +package + the wireless subsystem code. You can find the latest snapshot at: + +http://wireless.kernel.org/en/users/Download + +Selecting your driver +--------------------- + +If you know the driver you want you can select it with our +helper script: + +./scripts/driver-select + +Run that script to see more information. + +Building, and installing +------------------------ + +Build: build the latest linux wireless subsystem + + make + +Install: + +We use the updates/ directory so your distribution's drivers are left intact. + + sudo make install + +Uninstall: + +This nukes our changes to updates/ so you can go back to using your +distribution's supported drivers. + + sudo make uninstall + +Load: + +Reboot unless you know what you are doing. + + +Bluetooth modules can be seperately compiled and installed using below commands + +Build: + make bt + +Install: + sudo make btinstall + +Uninstall: + sudo make btuninstall + +Load: + sudo make btload + +Unload: + sudo make btunload + +Drivers +------- + +This is the list of drivers this package provides. It adds +all new drivers or drivers which keep being updated which you might +be interested in. + +Driver +adm8211 +ath5k +ath9h +ar9170 +b43 +b43legacy +iwl3945 +iwlagn +ipw2100 +ipw2200 +libertas_cs (Libertas) +ub8xxx (Libertas) +p54pci +p54usb +rt2400pci (rt2x00) +rt2500pci (rt2x00) +rt2500usb (rt2x00) +rt61pci (rt2x00) +rt73usb (rt2x00) +rtl8180 (Realtek) +rtl8187 (Realtek) +zd1211rw + +This package also provides more drivers which may be documented here + +For a complete list see: + +http://wireless.kernel.org/en/users/Download + +Non-wireless drivers +-------------------- + +To support b43 ssb is also provided, and since ssb is also provided +we provide b44 (the ethernet driverl). + +The new rfkill drivers also provided and backported. + +Bluetooth drivers: +bluetooth +btusb +hci_uart +btsdio +btuart_cs +bluecard_cs +bfusb + +Firmware: +--------- + +If your driver needs firmware please be sure to check the driver page +for that driver here: + +http://wireless.kernel.org/en/users/Drivers + +Why? +---- + +For users or developers stuck on older kernels that want to help test or +patch wireless work. Additionally if you're on a recent kernel this lets +you get the latest and greatest wireless-testing git work without much effort. +This may mean new drivers for some users. Last but not least we hope this +will encourage vendors and developers to post patches upstream first +rather than forking or maintaining their own mac80211 releases with +their own patches for their own drivers. + +Building for external kernels +---------------------------------- + +If you have a kernel you do not have installed but yet want to build the +compat-wireless-2.6 drivers for it you can use this syntax: + +make KLIB=/home/mcgrof/kernels/linux-2.6.23.9 KLIB_BUILD=/home/mcgrof/kernels/linux-2.6.23.9 + +If you have a kernel installed, which is not your currently running kernel (e.g. via +distro updates; plus its corresponding kernel-dev package), you can use this syntax: + +make KLIB=/lib/modules/2.6.30.6-53.fc11.x86_64 + + and to install to your system's root path for the non-running kernel: + +make KLIB=/lib/modules/2.6.30.6-53.fc11.x86_64 KMODPATH_ARG='INSTALL_MOD_PATH=' install + +Bugs +----- + +If you've found a bug please report it to our linux-wireless mailing list: + +linux-wireless@vger.kernel.org + +Report the bug because you are working with the latest and greatest. +If your bug is compatibility-related then we should still try to fix +it within the compat.[ch] work. + +ChangeLog +--------- + +Here you see the list of changes to all wireless drivers, the wireless core and mac80211. + +http://git.kernel.org/?p=linux/kernel/git/linville/wireless-testing.git;a=log; + +This views all the changes on the 'everything' branch of wireless-testing.git. + +License +------- + +This work is a subset of the Linux kernel as such we keep the kernel's +Copyright practice. Some files have their own copyright and in those +cases the license is mentioned in the file. All additional work made +to building this package is licensed under the GPLv2. + +Developers +---------- + +Compatibility work goes into compat/compat.[ch]. If using those files do +not suffice additional actual code changes can go into compat/compat.diff. + +If you have your own wireless-testing git tree, before running admin-update.sh +be sure to set your GIT_TREE variable. For example: + +export GIT_TREE=/home/mcgrof/wireless-testing/ + +scripts/admin-clean.sh - Cleans the compat-wireless-2.6 tree +scripts/admin-update.sh - Updates compat-wireless-2.6 with your git tree +scripts/admin-refresh.sh - Does the above two + +TODO +---- + +* Compatibilty work for 2.6.18 --> 2.6.21 + +Patches for compatibility work +------------------------------ + +Please send patches against: + +git://git.kernel.org/pub/scm/linux/kernel/git/mcgrof/compat-wireless-2.6.git + +To: Luis R. Rodriguez +CC: linux-wireless@vger.kernel.org +Subject: [PATCH] compat-2.6: backport foo + +Patches for drivers +------------------- + +If you'd like to send patches for a driver though you can send it using our +Submitting Patches guideline: + +http://wireless.kernel.org/en/developers/SubmittingPatches + diff --git a/compat-release b/compat-release new file mode 100644 index 0000000..ba03d07 --- /dev/null +++ b/compat-release @@ -0,0 +1 @@ +next-20100225 diff --git a/compat/Makefile b/compat/Makefile new file mode 100644 index 0000000..2005ff3 --- /dev/null +++ b/compat/Makefile @@ -0,0 +1,28 @@ +obj-m += compat.o +#compat-objs := + +obj-$(CONFIG_COMPAT_FIRMWARE_CLASS) += compat_firmware_class.o + +compat-y += main.o + +# Compat kernel compatibility code +compat-$(CONFIG_COMPAT_KERNEL_14) += compat-2.6.14.o +compat-$(CONFIG_COMPAT_KERNEL_18) += compat-2.6.18.o +compat-$(CONFIG_COMPAT_KERNEL_19) += compat-2.6.19.o +compat-$(CONFIG_COMPAT_KERNEL_21) += compat-2.6.21.o +compat-$(CONFIG_COMPAT_KERNEL_22) += compat-2.6.22.o +compat-$(CONFIG_COMPAT_KERNEL_23) += compat-2.6.23.o +compat-$(CONFIG_COMPAT_KERNEL_24) += compat-2.6.24.o + +compat-$(CONFIG_COMPAT_KERNEL_25) += \ + compat-2.6.25.o \ + pm_qos_params.o + +compat-$(CONFIG_COMPAT_KERNEL_26) += compat-2.6.26.o +compat-$(CONFIG_COMPAT_KERNEL_27) += compat-2.6.27.o +compat-$(CONFIG_COMPAT_KERNEL_28) += compat-2.6.28.o +compat-$(CONFIG_COMPAT_KERNEL_29) += compat-2.6.29.o +compat-$(CONFIG_COMPAT_KERNEL_30) += compat-2.6.30.o +compat-$(CONFIG_COMPAT_KERNEL_31) += compat-2.6.31.o +compat-$(CONFIG_COMPAT_KERNEL_32) += compat-2.6.32.o +compat-$(CONFIG_COMPAT_KERNEL_33) += compat-2.6.33.o diff --git a/compat/compat-2.6.14.c b/compat/compat-2.6.14.c new file mode 100644 index 0000000..e0af181 --- /dev/null +++ b/compat/compat-2.6.14.c @@ -0,0 +1,18 @@ +/* + * Copyright 2007 Luis R. Rodriguez + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Compatibility file for Linux wireless for kernels 2.6.14. + */ + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14)) + +/* 2.6.14 compat code goes here */ + +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) */ + diff --git a/compat/compat-2.6.18.c b/compat/compat-2.6.18.c new file mode 100644 index 0000000..c7fb2e4 --- /dev/null +++ b/compat/compat-2.6.18.c @@ -0,0 +1,18 @@ +/* + * Copyright 2007 Luis R. Rodriguez + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Compatibility file for Linux wireless for kernels 2.6.18. + */ + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)) + +/* 2.6.18 compat code goes here */ + +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) */ + diff --git a/compat/compat-2.6.19.c b/compat/compat-2.6.19.c new file mode 100644 index 0000000..fc75b64 --- /dev/null +++ b/compat/compat-2.6.19.c @@ -0,0 +1,18 @@ +/* + * Copyright 2007 Luis R. Rodriguez + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Compatibility file for Linux wireless for kernels 2.6.19. + */ + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) + +/* 2.6.19 compat code goes here */ + +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */ + diff --git a/compat/compat-2.6.21.c b/compat/compat-2.6.21.c new file mode 100644 index 0000000..e2737fb --- /dev/null +++ b/compat/compat-2.6.21.c @@ -0,0 +1,18 @@ +/* + * Copyright 2007 Luis R. Rodriguez + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Compatibility file for Linux wireless for kernels 2.6.21. + */ + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) + +/* 2.6.21 compat code goes here */ + +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) */ + diff --git a/compat/compat-2.6.22.c b/compat/compat-2.6.22.c new file mode 100644 index 0000000..b1ff1f3 --- /dev/null +++ b/compat/compat-2.6.22.c @@ -0,0 +1,18 @@ +/* + * Copyright 2007 Luis R. Rodriguez + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Compatibility file for Linux wireless for kernels 2.6.22. + */ + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)) + +/* 2.6.22 compat code goes here */ + +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) */ + diff --git a/compat/compat-2.6.23.c b/compat/compat-2.6.23.c new file mode 100644 index 0000000..136d949 --- /dev/null +++ b/compat/compat-2.6.23.c @@ -0,0 +1,249 @@ +/* + * Copyright 2007 Luis R. Rodriguez + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Compatibility file for Linux wireless for kernels 2.6.23. + */ + +#include + +/* All things not in 2.6.22 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) + +/* On net/core/dev.c as of 2.6.24 */ +int __dev_addr_delete(struct dev_addr_list **list, int *count, + void *addr, int alen, int glbl) +{ + struct dev_addr_list *da; + + for (; (da = *list) != NULL; list = &da->next) { + if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 && + alen == da->da_addrlen) { + if (glbl) { + int old_glbl = da->da_gusers; + da->da_gusers = 0; + if (old_glbl == 0) + break; + } + if (--da->da_users) + return 0; + + *list = da->next; + kfree(da); + (*count)--; + return 0; + } + } + return -ENOENT; +} +EXPORT_SYMBOL(__dev_addr_delete); + +/* On net/core/dev.c as of 2.6.24. This is not yet used by mac80211 but + * might as well add it */ +int __dev_addr_add(struct dev_addr_list **list, int *count, + void *addr, int alen, int glbl) +{ + struct dev_addr_list *da; + + for (da = *list; da != NULL; da = da->next) { + if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 && + da->da_addrlen == alen) { + if (glbl) { + int old_glbl = da->da_gusers; + da->da_gusers = 1; + if (old_glbl) + return 0; + } + da->da_users++; + return 0; + } + } + + da = kmalloc(sizeof(*da), GFP_ATOMIC); + if (da == NULL) + return -ENOMEM; + memcpy(da->da_addr, addr, alen); + da->da_addrlen = alen; + da->da_users = 1; + da->da_gusers = glbl ? 1 : 0; + da->next = *list; + *list = da; + (*count)++; + return 0; +} +EXPORT_SYMBOL(__dev_addr_add); + + +/* Part of net/core/dev_mcast.c as of 2.6.23. This is a slightly different version. + * Since da->da_synced is not part of 2.6.22 we need to take longer route when + * syncing */ + +/** + * dev_mc_sync - Synchronize device's multicast list to another device + * @to: destination device + * @from: source device + * + * Add newly added addresses to the destination device and release + * addresses that have no users left. The source device must be + * locked by netif_tx_lock_bh. + * + * This function is intended to be called from the dev->set_multicast_list + * function of layered software devices. + */ +int dev_mc_sync(struct net_device *to, struct net_device *from) +{ + struct dev_addr_list *da, *next, *da_to; + int err = 0; + + netif_tx_lock_bh(to); + da = from->mc_list; + while (da != NULL) { + int synced = 0; + next = da->next; + da_to = to->mc_list; + /* 2.6.22 does not have da->da_synced so lets take the long route */ + while (da_to != NULL) { + if (memcmp(da_to->da_addr, da->da_addr, da_to->da_addrlen) == 0 && + da->da_addrlen == da_to->da_addrlen) + synced = 1; + break; + } + if (!synced) { + err = __dev_addr_add(&to->mc_list, &to->mc_count, + da->da_addr, da->da_addrlen, 0); + if (err < 0) + break; + da->da_users++; + } else if (da->da_users == 1) { + __dev_addr_delete(&to->mc_list, &to->mc_count, + da->da_addr, da->da_addrlen, 0); + __dev_addr_delete(&from->mc_list, &from->mc_count, + da->da_addr, da->da_addrlen, 0); + } + da = next; + } + if (!err) + __dev_set_rx_mode(to); + netif_tx_unlock_bh(to); + + return err; +} +EXPORT_SYMBOL(dev_mc_sync); + + +/* Part of net/core/dev_mcast.c as of 2.6.23. This is a slighty different version. + * Since da->da_synced is not part of 2.6.22 we need to take longer route when + * unsyncing */ + +/** + * dev_mc_unsync - Remove synchronized addresses from the destination + * device + * @to: destination device + * @from: source device + * + * Remove all addresses that were added to the destination device by + * dev_mc_sync(). This function is intended to be called from the + * dev->stop function of layered software devices. + */ +void dev_mc_unsync(struct net_device *to, struct net_device *from) +{ + struct dev_addr_list *da, *next, *da_to; + + netif_tx_lock_bh(from); + netif_tx_lock_bh(to); + + da = from->mc_list; + while (da != NULL) { + bool synced = false; + next = da->next; + da_to = to->mc_list; + /* 2.6.22 does not have da->da_synced so lets take the long route */ + while (da_to != NULL) { + if (memcmp(da_to->da_addr, da->da_addr, da_to->da_addrlen) == 0 && + da->da_addrlen == da_to->da_addrlen) + synced = true; + break; + } + if (!synced) { + da = next; + continue; + } + __dev_addr_delete(&to->mc_list, &to->mc_count, + da->da_addr, da->da_addrlen, 0); + __dev_addr_delete(&from->mc_list, &from->mc_count, + da->da_addr, da->da_addrlen, 0); + da = next; + } + __dev_set_rx_mode(to); + + netif_tx_unlock_bh(to); + netif_tx_unlock_bh(from); +} +EXPORT_SYMBOL(dev_mc_unsync); + +/* Added as of 2.6.23 on net/core/dev.c. Slightly modifed, no dev->set_rx_mode on + * 2.6.22 so ignore that. */ + +/* + * Upload unicast and multicast address lists to device and + * configure RX filtering. When the device doesn't support unicast + * filtering it is put in promiscous mode while unicast addresses + * are present. + */ +void __dev_set_rx_mode(struct net_device *dev) +{ + /* dev_open will call this function so the list will stay sane. */ + if (!(dev->flags&IFF_UP)) + return; + + if (!netif_device_present(dev)) + return; + +/* This needs to be ported to 2.6.22 framework */ +#if 0 + /* Unicast addresses changes may only happen under the rtnl, + * therefore calling __dev_set_promiscuity here is safe. + */ + if (dev->uc_count > 0 && !dev->uc_promisc) { + __dev_set_promiscuity(dev, 1); + dev->uc_promisc = 1; + } else if (dev->uc_count == 0 && dev->uc_promisc) { + __dev_set_promiscuity(dev, -1); + dev->uc_promisc = 0; + } +#endif + + if (dev->set_multicast_list) + dev->set_multicast_list(dev); +} + +#ifndef HAVE_PCI_SET_MWI +int pci_try_set_mwi(struct pci_dev *dev) +{ + return 0; +} +EXPORT_SYMBOL(pci_try_set_mwi); +#else + +/** + * pci_try_set_mwi - enables memory-write-invalidate PCI transaction + * @dev: the PCI device for which MWI is enabled + * + * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND. + * Callers are not required to check the return value. + * + * RETURNS: An appropriate -ERRNO error value on error, or zero for success. + */ +int pci_try_set_mwi(struct pci_dev *dev) +{ + int rc = pci_set_mwi(dev); + return rc; +} +EXPORT_SYMBOL(pci_try_set_mwi); +#endif + +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) */ + diff --git a/compat/compat-2.6.24.c b/compat/compat-2.6.24.c new file mode 100644 index 0000000..31a9212 --- /dev/null +++ b/compat/compat-2.6.24.c @@ -0,0 +1,185 @@ +/* + * Copyright 2007 Luis R. Rodriguez + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Compatibility file for Linux wireless for kernels 2.6.24. + */ + +#include + +/* All things not in 2.6.22 and 2.6.23 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) + +#include + +/* + * We simply won't use it though, just declare it for our wrappers and + * for usage with tons of code that makes mention to it. + */ +struct net init_net; +EXPORT_SYMBOL(init_net); + +/* Part of net/ethernet/eth.c as of 2.6.24 */ +char *print_mac(char *buf, const u8 *addr) +{ + sprintf(buf, MAC_FMT, + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); + return buf; +} +EXPORT_SYMBOL(print_mac); + +/* 2.6.22 and 2.6.23 have eth_header_cache_update defined as extern in include/linux/etherdevice.h + * and actually defined in net/ethernet/eth.c but 2.6.24 exports it. Lets export it here */ + +/** + * eth_header_cache_update - update cache entry + * @hh: destination cache entry + * @dev: network device + * @haddr: new hardware address + * + * Called by Address Resolution module to notify changes in address. + */ +void eth_header_cache_update(struct hh_cache *hh, + struct net_device *dev, + unsigned char *haddr) +{ + memcpy(((u8 *) hh->hh_data) + HH_DATA_OFF(sizeof(struct ethhdr)), + haddr, ETH_ALEN); +} +EXPORT_SYMBOL(eth_header_cache_update); + +/* 2.6.22 and 2.6.23 have eth_header_cache defined as extern in include/linux/etherdevice.h + * and actually defined in net/ethernet/eth.c but 2.6.24 exports it. Lets export it here */ + +/** + * eth_header_cache - fill cache entry from neighbour + * @neigh: source neighbour + * @hh: destination cache entry + * Create an Ethernet header template from the neighbour. + */ +int eth_header_cache(struct neighbour *neigh, struct hh_cache *hh) +{ + __be16 type = hh->hh_type; + struct ethhdr *eth; + const struct net_device *dev = neigh->dev; + + eth = (struct ethhdr *) + (((u8 *) hh->hh_data) + (HH_DATA_OFF(sizeof(*eth)))); + + if (type == htons(ETH_P_802_3)) + return -1; + + eth->h_proto = type; + memcpy(eth->h_source, dev->dev_addr, ETH_ALEN); + memcpy(eth->h_dest, neigh->ha, ETH_ALEN); + hh->hh_len = ETH_HLEN; + return 0; +} +EXPORT_SYMBOL(eth_header_cache); + +/* 2.6.22 and 2.6.23 have eth_header() defined as extern in include/linux/etherdevice.h + * and actually defined in net/ethernet/eth.c but 2.6.24 exports it. Lets export it here */ + +/** + * eth_header - create the Ethernet header + * @skb: buffer to alter + * @dev: source device + * @type: Ethernet type field + * @daddr: destination address (NULL leave destination address) + * @saddr: source address (NULL use device source address) + * @len: packet length (<= skb->len) + * + * + * Set the protocol type. For a packet of type ETH_P_802_3 we put the length + * in here instead. It is up to the 802.2 layer to carry protocol information. + */ +int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, + void *daddr, void *saddr, unsigned len) +{ + struct ethhdr *eth = (struct ethhdr *)skb_push(skb, ETH_HLEN); + + if (type != ETH_P_802_3) + eth->h_proto = htons(type); + else + eth->h_proto = htons(len); + + /* + * Set the source hardware address. + */ + + if (!saddr) + saddr = dev->dev_addr; + memcpy(eth->h_source, saddr, dev->addr_len); + + if (daddr) { + memcpy(eth->h_dest, daddr, dev->addr_len); + return ETH_HLEN; + } + + /* + * Anyway, the loopback-device should never use this function... + */ + + if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) { + memset(eth->h_dest, 0, dev->addr_len); + return ETH_HLEN; + } + + return -ETH_HLEN; +} + +EXPORT_SYMBOL(eth_header); + +/* 2.6.22 and 2.6.23 have eth_rebuild_header defined as extern in include/linux/etherdevice.h + * and actually defined in net/ethernet/eth.c but 2.6.24 exports it. Lets export it here */ + +/** + * eth_rebuild_header- rebuild the Ethernet MAC header. + * @skb: socket buffer to update + * + * This is called after an ARP or IPV6 ndisc it's resolution on this + * sk_buff. We now let protocol (ARP) fill in the other fields. + * + * This routine CANNOT use cached dst->neigh! + * Really, it is used only when dst->neigh is wrong. + */ +int eth_rebuild_header(struct sk_buff *skb) +{ + struct ethhdr *eth = (struct ethhdr *)skb->data; + struct net_device *dev = skb->dev; + + switch (eth->h_proto) { +#ifdef CONFIG_INET + case __constant_htons(ETH_P_IP): + return arp_find(eth->h_dest, skb); +#endif + default: + printk(KERN_DEBUG + "%s: unable to resolve type %X addresses.\n", + dev->name, (int)eth->h_proto); + + memcpy(eth->h_source, dev->dev_addr, ETH_ALEN); + break; + } + + return 0; +} +EXPORT_SYMBOL(eth_rebuild_header); + +/* 2.6.24 will introduce struct pci_dev is_pcie bit. To help + * with the compatibility code (compat.diff) being smaller, we provide a helper + * so in cases where that will be used we can simply slap ifdefs with this + * routine. Use compat_ prefex to not pollute namespace. */ +int compat_is_pcie(struct pci_dev *pdev) +{ + int cap; + cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); + return cap ? 1 : 0; +} +EXPORT_SYMBOL(compat_is_pcie); + +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) */ + diff --git a/compat/compat-2.6.25.c b/compat/compat-2.6.25.c new file mode 100644 index 0000000..7546821 --- /dev/null +++ b/compat/compat-2.6.25.c @@ -0,0 +1,102 @@ +/* + * Copyright 2007-2010 Luis R. Rodriguez + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Compatibility file for Linux wireless for kernels 2.6.25. + */ + +/* All things not in 2.6.22, 2.6.23 and 2.6.24 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)) + +#include + +/** + * The following things are out of ./lib/vsprintf.c + * The new iwlwifi driver is using them. + */ + +/** + * strict_strtoul - convert a string to an unsigned long strictly + * @cp: The string to be converted + * @base: The number base to use + * @res: The converted result value + * + * strict_strtoul converts a string to an unsigned long only if the + * string is really an unsigned long string, any string containing + * any invalid char at the tail will be rejected and -EINVAL is returned, + * only a newline char at the tail is acceptible because people generally + * change a module parameter in the following way: + * + * echo 1024 > /sys/module/e1000/parameters/copybreak + * + * echo will append a newline to the tail. + * + * It returns 0 if conversion is successful and *res is set to the converted + * value, otherwise it returns -EINVAL and *res is set to 0. + * + * simple_strtoul just ignores the successive invalid characters and + * return the converted value of prefix part of the string. + */ +int strict_strtoul(const char *cp, unsigned int base, unsigned long *res); + +/** + * strict_strtol - convert a string to a long strictly + * @cp: The string to be converted + * @base: The number base to use + * @res: The converted result value + * + * strict_strtol is similiar to strict_strtoul, but it allows the first + * character of a string is '-'. + * + * It returns 0 if conversion is successful and *res is set to the converted + * value, otherwise it returns -EINVAL and *res is set to 0. + */ +int strict_strtol(const char *cp, unsigned int base, long *res); + +#define define_strict_strtoux(type, valtype) \ +int strict_strtou##type(const char *cp, unsigned int base, valtype *res)\ +{ \ + char *tail; \ + valtype val; \ + size_t len; \ + \ + *res = 0; \ + len = strlen(cp); \ + if (len == 0) \ + return -EINVAL; \ + \ + val = simple_strtou##type(cp, &tail, base); \ + if ((*tail == '\0') || \ + ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {\ + *res = val; \ + return 0; \ + } \ + \ + return -EINVAL; \ +} \ + +#define define_strict_strtox(type, valtype) \ +int strict_strto##type(const char *cp, unsigned int base, valtype *res) \ +{ \ + int ret; \ + if (*cp == '-') { \ + ret = strict_strtou##type(cp+1, base, res); \ + if (!ret) \ + *res = -(*res); \ + } else \ + ret = strict_strtou##type(cp, base, res); \ + \ + return ret; \ +} \ + +define_strict_strtoux(l, unsigned long) +define_strict_strtox(l, long) + +EXPORT_SYMBOL(strict_strtoul); +EXPORT_SYMBOL(strict_strtol); + +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) */ + diff --git a/compat/compat-2.6.26.c b/compat/compat-2.6.26.c new file mode 100644 index 0000000..b9bf9e7 --- /dev/null +++ b/compat/compat-2.6.26.c @@ -0,0 +1,93 @@ +/* + * Copyright 2007-2010 Luis R. Rodriguez + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Compatibility file for Linux wireless for kernels 2.6.26. + * + * Copyright holders from ported work: + * + * Copyright (c) 2002-2003 Patrick Mochel + * Copyright (c) 2006-2007 Greg Kroah-Hartman + * Copyright (c) 2006-2007 Novell Inc. + */ + +#include + +/* All things not in 2.6.25 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) + + +/* 2.6.24 does not have the struct kobject with a name */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)) + +/** + * kobject_set_name_vargs - Set the name of an kobject + * @kobj: struct kobject to set the name of + * @fmt: format string used to build the name + * @vargs: vargs to format the string. + */ +static +int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, + va_list vargs) +{ + const char *old_name = kobj->name; + char *s; + + if (kobj->name && !fmt) + return 0; + + kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs); + if (!kobj->name) + return -ENOMEM; + + /* ewww... some of these buggers have '/' in the name ... */ + while ((s = strchr(kobj->name, '/'))) + s[0] = '!'; + + kfree(old_name); + return 0; +} +#else +static +int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, + va_list vargs) +{ + struct device *dev; + unsigned int len; + va_list aq; + + dev = container_of(kobj, struct device, kobj); + + va_copy(aq, vargs); + len = vsnprintf(NULL, 0, fmt, aq); + va_end(aq); + + len = len < BUS_ID_SIZE ? (len + 1) : BUS_ID_SIZE; + + vsnprintf(dev->bus_id, len, fmt, vargs); + return 0; +} +#endif + +/** + * dev_set_name - set a device name + * @dev: device + * @fmt: format string for the device's name + */ +int dev_set_name(struct device *dev, const char *fmt, ...) +{ + va_list vargs; + int err; + + va_start(vargs, fmt); + err = kobject_set_name_vargs(&dev->kobj, fmt, vargs); + va_end(vargs); + return err; +} +EXPORT_SYMBOL_GPL(dev_set_name); + +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) */ + diff --git a/compat/compat-2.6.27.c b/compat/compat-2.6.27.c new file mode 100644 index 0000000..b78d465 --- /dev/null +++ b/compat/compat-2.6.27.c @@ -0,0 +1,244 @@ +/* + * Copyright 2007 Luis R. Rodriguez + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Compatibility file for Linux wireless for kernels 2.6.27 + */ + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)) + +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) +#include +#include +#include +#include +#endif + +/* rfkill notification chain */ +#define RFKILL_STATE_CHANGED 0x0001 /* state of a normal rfkill + switch has changed */ + +/* + * e5899e1b7d73e67de758a32174a859cc2586c0b9 made pci_pme_capable() external, + * it was defined internally, some drivers want access to this information. + * + * Unfortunately the old kernels do not have ->pm_cap or ->pme_support so + * we have to call the PCI routines directly. + */ + +/** + * pci_pme_capable - check the capability of PCI device to generate PME# + * @dev: PCI device to handle. + * @state: PCI state from which device will issue PME#. + * + * This is the backport code for older kernels for compat-wireless, we read stuff + * from the initialization stuff from pci_pm_init(). + */ +bool pci_pme_capable(struct pci_dev *dev, pci_power_t state) +{ + int pm; + u16 pmc = 0; + u16 pme_support; /* as from the pci dev */ + /* find PCI PM capability in list */ + pm = pci_find_capability(dev, PCI_CAP_ID_PM); + if (!pm) + return false; + + if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { + dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n", + pmc & PCI_PM_CAP_VER_MASK); + return false; + } + + pmc &= PCI_PM_CAP_PME_MASK; + + if (!pmc) + return false; + + pme_support = pmc >> PCI_PM_CAP_PME_SHIFT; + + /* Check device's ability to generate PME# */ + + return !!(pme_support & (1 << state)); +} +EXPORT_SYMBOL(pci_pme_capable); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) +/** + * mmc_align_data_size - pads a transfer size to a more optimal value + * @card: the MMC card associated with the data transfer + * @sz: original transfer size + * + * Pads the original data size with a number of extra bytes in + * order to avoid controller bugs and/or performance hits + * (e.g. some controllers revert to PIO for certain sizes). + * + * Returns the improved size, which might be unmodified. + * + * Note that this function is only relevant when issuing a + * single scatter gather entry. + */ +unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz) +{ + /* + * FIXME: We don't have a system for the controller to tell + * the core about its problems yet, so for now we just 32-bit + * align the size. + */ + sz = ((sz + 3) / 4) * 4; + + return sz; +} +EXPORT_SYMBOL(mmc_align_data_size); + +/* + * Calculate the maximum byte mode transfer size + */ +static inline unsigned int sdio_max_byte_size(struct sdio_func *func) +{ + unsigned int mval = (unsigned int) min(func->card->host->max_seg_size, + func->card->host->max_blk_size); + mval = min(mval, func->max_blksize); + return min(mval, 512u); /* maximum size for byte mode */ +} + +/** + * sdio_align_size - pads a transfer size to a more optimal value + * @func: SDIO function + * @sz: original transfer size + * + * Pads the original data size with a number of extra bytes in + * order to avoid controller bugs and/or performance hits + * (e.g. some controllers revert to PIO for certain sizes). + * + * If possible, it will also adjust the size so that it can be + * handled in just a single request. + * + * Returns the improved size, which might be unmodified. + */ +unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz) +{ + unsigned int orig_sz; + unsigned int blk_sz, byte_sz; + unsigned chunk_sz; + + orig_sz = sz; + + /* + * Do a first check with the controller, in case it + * wants to increase the size up to a point where it + * might need more than one block. + */ + sz = mmc_align_data_size(func->card, sz); + + /* + * If we can still do this with just a byte transfer, then + * we're done. + */ + if (sz <= sdio_max_byte_size(func)) + return sz; + + if (func->card->cccr.multi_block) { + /* + * Check if the transfer is already block aligned + */ + if ((sz % func->cur_blksize) == 0) + return sz; + + /* + * Realign it so that it can be done with one request, + * and recheck if the controller still likes it. + */ + blk_sz = ((sz + func->cur_blksize - 1) / + func->cur_blksize) * func->cur_blksize; + blk_sz = mmc_align_data_size(func->card, blk_sz); + + /* + * This value is only good if it is still just + * one request. + */ + if ((blk_sz % func->cur_blksize) == 0) + return blk_sz; + + /* + * We failed to do one request, but at least try to + * pad the remainder properly. + */ + byte_sz = mmc_align_data_size(func->card, + sz % func->cur_blksize); + if (byte_sz <= sdio_max_byte_size(func)) { + blk_sz = sz / func->cur_blksize; + return blk_sz * func->cur_blksize + byte_sz; + } + } else { + /* + * We need multiple requests, so first check that the + * controller can handle the chunk size; + */ + chunk_sz = mmc_align_data_size(func->card, + sdio_max_byte_size(func)); + if (chunk_sz == sdio_max_byte_size(func)) { + /* + * Fix up the size of the remainder (if any) + */ + byte_sz = orig_sz % chunk_sz; + if (byte_sz) { + byte_sz = mmc_align_data_size(func->card, + byte_sz); + } + + return (orig_sz / chunk_sz) * chunk_sz + byte_sz; + } + } + + /* + * The controller is simply incapable of transferring the size + * we want in decent manner, so just return the original size. + */ + return orig_sz; +} +EXPORT_SYMBOL_GPL(sdio_align_size); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) */ + +#ifdef CONFIG_DEBUG_FS +/* + * Backport of debugfs_remove_recursive() without using the internals globals + * which are used by the kernel's version with: + * simple_release_fs(&debugfs_mount, &debugfs_mount_count); + */ +void debugfs_remove_recursive(struct dentry *dentry) +{ + struct dentry *last = NULL; + + /* Sanity checks */ + if (!dentry || !dentry->d_parent || !dentry->d_parent->d_inode) + return; + + while (dentry != last) { + struct dentry *child = dentry; + + /* Find a child without children */ + while (!list_empty(&child->d_subdirs)) + child = list_entry(child->d_subdirs.next, + struct dentry, + d_u.d_child); + + /* Bail out if we already tried to remove that entry */ + if (child == last) + return; + + last = child; + debugfs_remove(child); + } +} +EXPORT_SYMBOL_GPL(debugfs_remove_recursive); +#endif /* CONFIG_DEBUG_FS */ + +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) */ + diff --git a/compat/compat-2.6.28.c b/compat/compat-2.6.28.c new file mode 100644 index 0000000..115bd23 --- /dev/null +++ b/compat/compat-2.6.28.c @@ -0,0 +1,373 @@ +/* + * Copyright 2007 Luis R. Rodriguez + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Compatibility file for Linux wireless for kernels 2.6.28. + */ + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)) + +#include + +/* 2.6.28 compat code goes here */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) +#if defined(CONFIG_USB) || defined(CONFIG_USB_MODULE) +/* + * Compat-wireless notes for USB backport stuff: + * + * urb->reject exists on 2.6.27, the poison/unpoison helpers + * did not though. The anchor poison does not exist so we cannot use them. + * + * USB anchor poising seems to exist to prevent future driver sumbissions + * of usb_anchor_urb() to an anchor marked as poisoned. For older kernels + * we cannot use that, so new usb_anchor_urb()s will be anchored. The down + * side to this should be submission of URBs will continue being anchored + * on an anchor instead of having them being rejected immediately when the + * driver realized we needed to stop. For ar9170 we poison URBs upon the + * ar9170 mac80211 stop callback(), don't think this should be so bad. + * It mean there is period of time in older kernels for which we continue + * to anchor new URBs to a known stopped anchor. We have two anchors + * (TX, and RX) + */ + +#if 0 +/** + * usb_poison_urb - reliably kill a transfer and prevent further use of an URB + * @urb: pointer to URB describing a previously submitted request, + * may be NULL + * + * This routine cancels an in-progress request. It is guaranteed that + * upon return all completion handlers will have finished and the URB + * will be totally idle and cannot be reused. These features make + * this an ideal way to stop I/O in a disconnect() callback. + * If the request has not already finished or been unlinked + * the completion handler will see urb->status == -ENOENT. + * + * After and while the routine runs, attempts to resubmit the URB will fail + * with error -EPERM. Thus even if the URB's completion handler always + * tries to resubmit, it will not succeed and the URB will become idle. + * + * This routine may not be used in an interrupt context (such as a bottom + * half or a completion handler), or when holding a spinlock, or in other + * situations where the caller can't schedule(). + * + * This routine should not be called by a driver after its disconnect + * method has returned. + */ +void usb_poison_urb(struct urb *urb) +{ + might_sleep(); + if (!(urb && urb->dev && urb->ep)) + return; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)) + spin_lock_irq(&usb_reject_lock); +#endif + ++urb->reject; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)) + spin_unlock_irq(&usb_reject_lock); +#endif + /* + * XXX: usb_hcd_unlink_urb() needs backporting... this is defined + * on usb hcd.c but urb.c gets access to it. That is, older kernels + * have usb_hcd_unlink_urb() but its not exported, nor can we + * re-implement it exactly. This essentially dequeues the urb from + * hw, we need to figure out a way to backport this. + */ + //usb_hcd_unlink_urb(urb, -ENOENT); + + wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0); +} +EXPORT_SYMBOL_GPL(usb_poison_urb); +#endif +#endif /* CONFIG_USB */ + +#if defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE) + +#include +struct pcmcia_cfg_mem { + tuple_t tuple; + cisparse_t parse; + u8 buf[256]; + cistpl_cftable_entry_t dflt; +}; +/** + * pcmcia_loop_config() - loop over configuration options + * @p_dev: the struct pcmcia_device which we need to loop for. + * @conf_check: function to call for each configuration option. + * It gets passed the struct pcmcia_device, the CIS data + * describing the configuration option, and private data + * being passed to pcmcia_loop_config() + * @priv_data: private data to be passed to the conf_check function. + * + * pcmcia_loop_config() loops over all configuration options, and calls + * the driver-specific conf_check() for each one, checking whether + * it is a valid one. Returns 0 on success or errorcode otherwise. + */ +int pcmcia_loop_config(struct pcmcia_device *p_dev, + int (*conf_check) (struct pcmcia_device *p_dev, + cistpl_cftable_entry_t *cfg, + cistpl_cftable_entry_t *dflt, + unsigned int vcc, + void *priv_data), + void *priv_data) +{ + struct pcmcia_cfg_mem *cfg_mem; + + tuple_t *tuple; + int ret; + unsigned int vcc; + + cfg_mem = kzalloc(sizeof(struct pcmcia_cfg_mem), GFP_KERNEL); + if (cfg_mem == NULL) + return -ENOMEM; + + /* get the current Vcc setting */ + vcc = p_dev->socket->socket.Vcc; + + tuple = &cfg_mem->tuple; + tuple->TupleData = cfg_mem->buf; + tuple->TupleDataMax = 255; + tuple->TupleOffset = 0; + tuple->DesiredTuple = CISTPL_CFTABLE_ENTRY; + tuple->Attributes = 0; + + ret = pcmcia_get_first_tuple(p_dev, tuple); + while (!ret) { + cistpl_cftable_entry_t *cfg = &cfg_mem->parse.cftable_entry; + + if (pcmcia_get_tuple_data(p_dev, tuple)) + goto next_entry; + + if (pcmcia_parse_tuple(tuple, &cfg_mem->parse)) + goto next_entry; + + /* default values */ + p_dev->conf.ConfigIndex = cfg->index; + if (cfg->flags & CISTPL_CFTABLE_DEFAULT) + cfg_mem->dflt = *cfg; + + ret = conf_check(p_dev, cfg, &cfg_mem->dflt, vcc, priv_data); + if (!ret) + break; + +next_entry: + ret = pcmcia_get_next_tuple(p_dev, tuple); + } + + return ret; +} +EXPORT_SYMBOL(pcmcia_loop_config); + +#endif /* CONFIG_PCMCIA */ + +#if defined(CONFIG_USB) || defined(CONFIG_USB_MODULE) + +void usb_unpoison_urb(struct urb *urb) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)) + unsigned long flags; +#endif + + if (!urb) + return; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)) + spin_lock_irqsave(&usb_reject_lock, flags); +#endif + --urb->reject; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)) + spin_unlock_irqrestore(&usb_reject_lock, flags); +#endif +} +EXPORT_SYMBOL_GPL(usb_unpoison_urb); + + +#if 0 +/** + * usb_poison_anchored_urbs - cease all traffic from an anchor + * @anchor: anchor the requests are bound to + * + * this allows all outstanding URBs to be poisoned starting + * from the back of the queue. Newly added URBs will also be + * poisoned + * + * This routine should not be called by a driver after its disconnect + * method has returned. + */ +void usb_poison_anchored_urbs(struct usb_anchor *anchor) +{ + struct urb *victim; + + spin_lock_irq(&anchor->lock); + // anchor->poisoned = 1; /* XXX: Cannot backport */ + while (!list_empty(&anchor->urb_list)) { + victim = list_entry(anchor->urb_list.prev, struct urb, + anchor_list); + /* we must make sure the URB isn't freed before we kill it*/ + usb_get_urb(victim); + spin_unlock_irq(&anchor->lock); + /* this will unanchor the URB */ + usb_poison_urb(victim); + usb_put_urb(victim); + spin_lock_irq(&anchor->lock); + } + spin_unlock_irq(&anchor->lock); +} +EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs); +#endif + +/** + * usb_get_from_anchor - get an anchor's oldest urb + * @anchor: the anchor whose urb you want + * + * this will take the oldest urb from an anchor, + * unanchor and return it + */ +struct urb *usb_get_from_anchor(struct usb_anchor *anchor) +{ + struct urb *victim; + unsigned long flags; + + spin_lock_irqsave(&anchor->lock, flags); + if (!list_empty(&anchor->urb_list)) { + victim = list_entry(anchor->urb_list.next, struct urb, + anchor_list); + usb_get_urb(victim); + spin_unlock_irqrestore(&anchor->lock, flags); + usb_unanchor_urb(victim); + } else { + spin_unlock_irqrestore(&anchor->lock, flags); + victim = NULL; + } + + return victim; +} + +EXPORT_SYMBOL_GPL(usb_get_from_anchor); + +/** + * usb_scuttle_anchored_urbs - unanchor all an anchor's urbs + * @anchor: the anchor whose urbs you want to unanchor + * + * use this to get rid of all an anchor's urbs + */ +void usb_scuttle_anchored_urbs(struct usb_anchor *anchor) +{ + struct urb *victim; + unsigned long flags; + + spin_lock_irqsave(&anchor->lock, flags); + while (!list_empty(&anchor->urb_list)) { + victim = list_entry(anchor->urb_list.prev, struct urb, + anchor_list); + usb_get_urb(victim); + spin_unlock_irqrestore(&anchor->lock, flags); + /* this may free the URB */ + usb_unanchor_urb(victim); + usb_put_urb(victim); + spin_lock_irqsave(&anchor->lock, flags); + } + spin_unlock_irqrestore(&anchor->lock, flags); +} + +EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs); + +/** + * usb_anchor_empty - is an anchor empty + * @anchor: the anchor you want to query + * + * returns 1 if the anchor has no urbs associated with it + */ +int usb_anchor_empty(struct usb_anchor *anchor) +{ + return list_empty(&anchor->urb_list); +} + +EXPORT_SYMBOL_GPL(usb_anchor_empty); +#endif /* CONFIG_USB */ +#endif + +void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar) +{ + /* + * Make sure the BAR is actually a memory resource, not an IO resource + */ + if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { + WARN_ON(1); + return NULL; + } + return ioremap_nocache(pci_resource_start(pdev, bar), + pci_resource_len(pdev, bar)); +} +EXPORT_SYMBOL_GPL(pci_ioremap_bar); + +static unsigned long round_jiffies_common(unsigned long j, int cpu, + bool force_up) +{ + int rem; + unsigned long original = j; + + /* + * We don't want all cpus firing their timers at once hitting the + * same lock or cachelines, so we skew each extra cpu with an extra + * 3 jiffies. This 3 jiffies came originally from the mm/ code which + * already did this. + * The skew is done by adding 3*cpunr, then round, then subtract this + * extra offset again. + */ + j += cpu * 3; + + rem = j % HZ; + + /* + * If the target jiffie is just after a whole second (which can happen + * due to delays of the timer irq, long irq off times etc etc) then + * we should round down to the whole second, not up. Use 1/4th second + * as cutoff for this rounding as an extreme upper bound for this. + * But never round down if @force_up is set. + */ + if (rem < HZ/4 && !force_up) /* round down */ + j = j - rem; + else /* round up */ + j = j - rem + HZ; + + /* now that we have rounded, subtract the extra skew again */ + j -= cpu * 3; + + if (j <= jiffies) /* rounding ate our timeout entirely; */ + return original; + return j; +} + +/** + * round_jiffies_up - function to round jiffies up to a full second + * @j: the time in (absolute) jiffies that should be rounded + * + * This is the same as round_jiffies() except that it will never + * round down. This is useful for timeouts for which the exact time + * of firing does not matter too much, as long as they don't fire too + * early. + */ +unsigned long round_jiffies_up(unsigned long j) +{ + return round_jiffies_common(j, raw_smp_processor_id(), true); +} +EXPORT_SYMBOL_GPL(round_jiffies_up); + +void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, + int size) +{ + skb_fill_page_desc(skb, i, page, off, size); + skb->len += size; + skb->data_len += size; + skb->truesize += size; +} +EXPORT_SYMBOL(skb_add_rx_frag); + +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) */ diff --git a/compat/compat-2.6.29.c b/compat/compat-2.6.29.c new file mode 100644 index 0000000..c8e0183 --- /dev/null +++ b/compat/compat-2.6.29.c @@ -0,0 +1,68 @@ +/* + * Copyright 2007 Luis R. Rodriguez + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Compatibility file for Linux wireless for kernels 2.6.29. + */ + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29)) + +#include +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) +#if defined(CONFIG_USB) || defined(CONFIG_USB_MODULE) +/** + * usb_unpoison_anchored_urbs - let an anchor be used successfully again + * @anchor: anchor the requests are bound to + * + * Reverses the effect of usb_poison_anchored_urbs + * the anchor can be used normally after it returns + */ +void usb_unpoison_anchored_urbs(struct usb_anchor *anchor) +{ + unsigned long flags; + struct urb *lazarus; + + spin_lock_irqsave(&anchor->lock, flags); + list_for_each_entry(lazarus, &anchor->urb_list, anchor_list) { + usb_unpoison_urb(lazarus); + } + //anchor->poisoned = 0; /* XXX: cannot backport */ + spin_unlock_irqrestore(&anchor->lock, flags); +} +EXPORT_SYMBOL_GPL(usb_unpoison_anchored_urbs); +#endif /* CONFIG_USB */ +#endif + +/** + * eth_mac_addr - set new Ethernet hardware address + * @dev: network device + * @p: socket address + * Change hardware address of device. + * + * This doesn't change hardware matching, so needs to be overridden + * for most real devices. + */ +int eth_mac_addr(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + + if (netif_running(dev)) + return -EBUSY; + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + return 0; +} +EXPORT_SYMBOL(eth_mac_addr); +/* Source: net/ethernet/eth.c */ + + +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) */ + diff --git a/compat/compat-2.6.30.c b/compat/compat-2.6.30.c new file mode 100644 index 0000000..f821918 --- /dev/null +++ b/compat/compat-2.6.30.c @@ -0,0 +1,18 @@ +/* + * Copyright 2007 Luis R. Rodriguez + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Compatibility file for Linux wireless for kernels 2.6.30. + */ + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)) + +/* 2.6.30 compat code goes here */ + +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) */ + diff --git a/compat/compat-2.6.31.c b/compat/compat-2.6.31.c new file mode 100644 index 0000000..dc8588f --- /dev/null +++ b/compat/compat-2.6.31.c @@ -0,0 +1,64 @@ +/* + * Copyright 2007 Luis R. Rodriguez + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Compatibility file for Linux wireless for kernels 2.6.31. + */ + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)) + +#include + +/** + * genl_register_family_with_ops - register a generic netlink family + * @family: generic netlink family + * @ops: operations to be registered + * @n_ops: number of elements to register + * + * Registers the specified family and operations from the specified table. + * Only one family may be registered with the same family name or identifier. + * + * The family id may equal GENL_ID_GENERATE causing an unique id to + * be automatically generated and assigned. + * + * Either a doit or dumpit callback must be specified for every registered + * operation or the function will fail. Only one operation structure per + * command identifier may be registered. + * + * See include/net/genetlink.h for more documenation on the operations + * structure. + * + * This is equivalent to calling genl_register_family() followed by + * genl_register_ops() for every operation entry in the table taking + * care to unregister the family on error path. + * + * Return 0 on success or a negative error code. + */ +int genl_register_family_with_ops(struct genl_family *family, + struct genl_ops *ops, size_t n_ops) +{ + int err, i; + + err = genl_register_family(family); + if (err) + return err; + + for (i = 0; i < n_ops; ++i, ++ops) { + err = genl_register_ops(family, ops); + if (err) + goto err_out; + } + return 0; +err_out: + genl_unregister_family(family); + return err; +} +EXPORT_SYMBOL(genl_register_family_with_ops); + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)) */ + diff --git a/compat/compat-2.6.32.c b/compat/compat-2.6.32.c new file mode 100644 index 0000000..22c2c19 --- /dev/null +++ b/compat/compat-2.6.32.c @@ -0,0 +1,124 @@ +/* + * Copyright 2007 Luis R. Rodriguez + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Compatibility file for Linux wireless for kernels 2.6.32. + */ + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)) + +#include + +int __dev_addr_add(struct dev_addr_list **list, int *count, + void *addr, int alen, int glbl) +{ + struct dev_addr_list *da; + + for (da = *list; da != NULL; da = da->next) { + if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 && + da->da_addrlen == alen) { + if (glbl) { + int old_glbl = da->da_gusers; + da->da_gusers = 1; + if (old_glbl) + return 0; + } + da->da_users++; + return 0; + } + } + + da = kzalloc(sizeof(*da), GFP_ATOMIC); + if (da == NULL) + return -ENOMEM; + memcpy(da->da_addr, addr, alen); + da->da_addrlen = alen; + da->da_users = 1; + da->da_gusers = glbl ? 1 : 0; + da->next = *list; + *list = da; + (*count)++; + return 0; +} + +int __dev_addr_delete(struct dev_addr_list **list, int *count, + void *addr, int alen, int glbl) +{ + struct dev_addr_list *da; + + for (; (da = *list) != NULL; list = &da->next) { + if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 && + alen == da->da_addrlen) { + if (glbl) { + int old_glbl = da->da_gusers; + da->da_gusers = 0; + if (old_glbl == 0) + break; + } + if (--da->da_users) + return 0; + + *list = da->next; + kfree(da); + (*count)--; + return 0; + } + } + return -ENOENT; +} + +int __dev_addr_sync(struct dev_addr_list **to, int *to_count, + struct dev_addr_list **from, int *from_count) +{ + struct dev_addr_list *da, *next; + int err = 0; + + da = *from; + while (da != NULL) { + next = da->next; + if (!da->da_synced) { + err = __dev_addr_add(to, to_count, + da->da_addr, da->da_addrlen, 0); + if (err < 0) + break; + da->da_synced = 1; + da->da_users++; + } else if (da->da_users == 1) { + __dev_addr_delete(to, to_count, + da->da_addr, da->da_addrlen, 0); + __dev_addr_delete(from, from_count, + da->da_addr, da->da_addrlen, 0); + } + da = next; + } + return err; +} +EXPORT_SYMBOL_GPL(__dev_addr_sync); + +void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, + struct dev_addr_list **from, int *from_count) +{ + struct dev_addr_list *da, *next; + + da = *from; + while (da != NULL) { + next = da->next; + if (da->da_synced) { + __dev_addr_delete(to, to_count, + da->da_addr, da->da_addrlen, 0); + da->da_synced = 0; + __dev_addr_delete(from, from_count, + da->da_addr, da->da_addrlen, 0); + } + da = next; + } +} +EXPORT_SYMBOL_GPL(__dev_addr_unsync); + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)) */ + diff --git a/compat/compat-2.6.33.c b/compat/compat-2.6.33.c new file mode 100644 index 0000000..055082e --- /dev/null +++ b/compat/compat-2.6.33.c @@ -0,0 +1,137 @@ +/* + * Copyright 2009 Hauke Mehrtens + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Compatibility file for Linux wireless for kernels 2.6.33. + */ + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33)) + +#include + +#if defined(CONFIG_PCCARD) || defined(CONFIG_PCCARD_MODULE) + +/** + * pccard_loop_tuple() - loop over tuples in the CIS + * @s: the struct pcmcia_socket where the card is inserted + * @function: the device function we loop for + * @code: which CIS code shall we look for? + * @parse: buffer where the tuple shall be parsed (or NULL, if no parse) + * @priv_data: private data to be passed to the loop_tuple function. + * @loop_tuple: function to call for each CIS entry of type @function. IT + * gets passed the raw tuple, the paresed tuple (if @parse is + * set) and @priv_data. + * + * pccard_loop_tuple() loops over all CIS entries of type @function, and + * calls the @loop_tuple function for each entry. If the call to @loop_tuple + * returns 0, the loop exits. Returns 0 on success or errorcode otherwise. + */ +int pccard_loop_tuple(struct pcmcia_socket *s, unsigned int function, + cisdata_t code, cisparse_t *parse, void *priv_data, + int (*loop_tuple) (tuple_t *tuple, + cisparse_t *parse, + void *priv_data)) +{ + tuple_t tuple; + cisdata_t *buf; + int ret; + + buf = kzalloc(256, GFP_KERNEL); + if (buf == NULL) { + dev_printk(KERN_WARNING, &s->dev, "no memory to read tuple\n"); + return -ENOMEM; + } + + tuple.TupleData = buf; + tuple.TupleDataMax = 255; + tuple.TupleOffset = 0; + tuple.DesiredTuple = code; + tuple.Attributes = 0; + + ret = pccard_get_first_tuple(s, function, &tuple); + while (!ret) { + if (pccard_get_tuple_data(s, &tuple)) + goto next_entry; + + if (parse) + if (pcmcia_parse_tuple(&tuple, parse)) + goto next_entry; + + ret = loop_tuple(&tuple, parse, priv_data); + if (!ret) + break; + +next_entry: + ret = pccard_get_next_tuple(s, function, &tuple); + } + + kfree(buf); + return ret; +} +EXPORT_SYMBOL(pccard_loop_tuple); +/* Source: drivers/pcmcia/cistpl.c */ + +#if defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE) + +struct pcmcia_loop_mem { + struct pcmcia_device *p_dev; + void *priv_data; + int (*loop_tuple) (struct pcmcia_device *p_dev, + tuple_t *tuple, + void *priv_data); +}; + +/** + * pcmcia_do_loop_tuple() - internal helper for pcmcia_loop_config() + * + * pcmcia_do_loop_tuple() is the internal callback for the call from + * pcmcia_loop_tuple() to pccard_loop_tuple(). Data is transferred + * by a struct pcmcia_cfg_mem. + */ +static int pcmcia_do_loop_tuple(tuple_t *tuple, cisparse_t *parse, void *priv) +{ + struct pcmcia_loop_mem *loop = priv; + + return loop->loop_tuple(loop->p_dev, tuple, loop->priv_data); +}; + +/** + * pcmcia_loop_tuple() - loop over tuples in the CIS + * @p_dev: the struct pcmcia_device which we need to loop for. + * @code: which CIS code shall we look for? + * @priv_data: private data to be passed to the loop_tuple function. + * @loop_tuple: function to call for each CIS entry of type @function. IT + * gets passed the raw tuple and @priv_data. + * + * pcmcia_loop_tuple() loops over all CIS entries of type @function, and + * calls the @loop_tuple function for each entry. If the call to @loop_tuple + * returns 0, the loop exits. Returns 0 on success or errorcode otherwise. + */ +int pcmcia_loop_tuple(struct pcmcia_device *p_dev, cisdata_t code, + int (*loop_tuple) (struct pcmcia_device *p_dev, + tuple_t *tuple, + void *priv_data), + void *priv_data) +{ + struct pcmcia_loop_mem loop = { + .p_dev = p_dev, + .loop_tuple = loop_tuple, + .priv_data = priv_data}; + + return pccard_loop_tuple(p_dev->socket, p_dev->func, code, NULL, + &loop, pcmcia_do_loop_tuple); +} +EXPORT_SYMBOL(pcmcia_loop_tuple); +/* Source: drivers/pcmcia/pcmcia_resource.c */ + +#endif /* CONFIG_PCMCIA */ + +#endif /* CONFIG_PCCARD */ + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33)) */ + diff --git a/compat/compat_firmware_class.c b/compat/compat_firmware_class.c new file mode 100644 index 0000000..ce937a3 --- /dev/null +++ b/compat/compat_firmware_class.c @@ -0,0 +1,726 @@ +/* + * firmware_class.c - Multi purpose firmware loading support + * + * Copyright (c) 2003 Manuel Estrada Sainz + * + * Please see Documentation/firmware_class/ for more information. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define to_dev(obj) container_of(obj, struct device, kobj) + +MODULE_AUTHOR("Manuel Estrada Sainz"); +MODULE_DESCRIPTION("Multi purpose firmware loading support"); +MODULE_LICENSE("GPL"); + +enum { + FW_STATUS_LOADING, + FW_STATUS_DONE, + FW_STATUS_ABORT, +}; + +static int loading_timeout = 60; /* In seconds */ + +/* fw_lock could be moved to 'struct firmware_priv' but since it is just + * guarding for corner cases a global lock should be OK */ +static DEFINE_MUTEX(fw_lock); + +struct firmware_priv { + char *fw_id; + struct completion completion; + struct bin_attribute attr_data; + struct firmware *fw; + unsigned long status; + struct page **pages; + int nr_pages; + int page_array_size; + const char *vdata; + struct timer_list timeout; +}; + +//#ifdef CONFIG_FW_LOADER +#if 0 +extern struct builtin_fw __start_builtin_fw[]; +extern struct builtin_fw __end_builtin_fw[]; +#else /* Module case. Avoid ifdefs later; it'll all optimise out */ +static struct builtin_fw *__start_builtin_fw; +static struct builtin_fw *__end_builtin_fw; +#endif + +static void +fw_load_abort(struct firmware_priv *fw_priv) +{ + set_bit(FW_STATUS_ABORT, &fw_priv->status); + wmb(); + complete(&fw_priv->completion); +} + +static ssize_t +firmware_timeout_show(struct class *class, char *buf) +{ + return sprintf(buf, "%d\n", loading_timeout); +} + +/** + * firmware_timeout_store - set number of seconds to wait for firmware + * @class: device class pointer + * @buf: buffer to scan for timeout value + * @count: number of bytes in @buf + * + * Sets the number of seconds to wait for the firmware. Once + * this expires an error will be returned to the driver and no + * firmware will be provided. + * + * Note: zero means 'wait forever'. + **/ +static ssize_t +firmware_timeout_store(struct class *class, const char *buf, size_t count) +{ + loading_timeout = simple_strtol(buf, NULL, 10); + if (loading_timeout < 0) + loading_timeout = 0; + return count; +} + +static CLASS_ATTR(timeout, 0644, firmware_timeout_show, firmware_timeout_store); + +static void fw_dev_release(struct device *dev); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) +static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct firmware_priv *fw_priv = dev_get_drvdata(dev); + + if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->fw_id)) + return -ENOMEM; + if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout)) + return -ENOMEM; + + return 0; +} +#else +static int firmware_uevent(struct device *dev, char **envp, + int num_envp, char *buf, int size) +{ + struct firmware_priv *fw_priv = dev_get_drvdata(dev); + int error, len = 0, i = 0; + + error = add_uevent_var(envp, num_envp, &i, + buf, size, &len, + "FIRMWARE=%s", fw_priv->fw_id); + if (error) + goto exit; + + error = add_uevent_var(envp, num_envp, &i, + buf, size, &len, + "TIMEOUT=%i", loading_timeout); + if (error) + goto exit; + + return 0; +exit: + envp[i] = NULL; + return error; +} +#endif + +static struct class firmware_class = { + .name = "compat_firmware", + .dev_uevent = firmware_uevent, + .dev_release = fw_dev_release, +}; + +static ssize_t firmware_loading_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct firmware_priv *fw_priv = dev_get_drvdata(dev); + int loading = test_bit(FW_STATUS_LOADING, &fw_priv->status); + return sprintf(buf, "%d\n", loading); +} + +/* Some architectures don't have PAGE_KERNEL_RO */ +#ifndef PAGE_KERNEL_RO +#define PAGE_KERNEL_RO PAGE_KERNEL +#endif +/** + * firmware_loading_store - set value in the 'loading' control file + * @dev: device pointer + * @attr: device attribute pointer + * @buf: buffer to scan for loading control value + * @count: number of bytes in @buf + * + * The relevant values are: + * + * 1: Start a load, discarding any previous partial load. + * 0: Conclude the load and hand the data to the driver code. + * -1: Conclude the load with an error and discard any written data. + **/ +static ssize_t firmware_loading_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct firmware_priv *fw_priv = dev_get_drvdata(dev); + int loading = simple_strtol(buf, NULL, 10); + int i; + + switch (loading) { + case 1: + mutex_lock(&fw_lock); + if (!fw_priv->fw) { + mutex_unlock(&fw_lock); + break; + } + vfree(fw_priv->fw->data); + fw_priv->fw->data = NULL; + for (i = 0; i < fw_priv->nr_pages; i++) + __free_page(fw_priv->pages[i]); + kfree(fw_priv->pages); + fw_priv->pages = NULL; + fw_priv->page_array_size = 0; + fw_priv->nr_pages = 0; + fw_priv->fw->size = 0; + set_bit(FW_STATUS_LOADING, &fw_priv->status); + mutex_unlock(&fw_lock); + break; + case 0: + if (test_bit(FW_STATUS_LOADING, &fw_priv->status)) { + vfree(fw_priv->fw->data); + fw_priv->fw->data = vmap(fw_priv->pages, + fw_priv->nr_pages, + 0, PAGE_KERNEL_RO); + if (!fw_priv->fw->data) { + dev_err(dev, "%s: vmap() failed\n", __func__); + goto err; + } + /* Pages will be freed by vfree() */ + fw_priv->page_array_size = 0; + fw_priv->nr_pages = 0; + complete(&fw_priv->completion); + clear_bit(FW_STATUS_LOADING, &fw_priv->status); + break; + } + /* fallthrough */ + default: + dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading); + /* fallthrough */ + case -1: + err: + fw_load_abort(fw_priv); + break; + } + + return count; +} + +static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store); + +static ssize_t +firmware_data_read(struct kobject *kobj, struct bin_attribute *bin_attr, + char *buffer, loff_t offset, size_t count) +{ + struct device *dev = to_dev(kobj); + struct firmware_priv *fw_priv = dev_get_drvdata(dev); + struct firmware *fw; + ssize_t ret_count; + + mutex_lock(&fw_lock); + fw = fw_priv->fw; + if (!fw || test_bit(FW_STATUS_DONE, &fw_priv->status)) { + ret_count = -ENODEV; + goto out; + } + if (offset > fw->size) { + ret_count = 0; + goto out; + } + if (count > fw->size - offset) + count = fw->size - offset; + + ret_count = count; + + while (count) { + void *page_data; + int page_nr = offset >> PAGE_SHIFT; + int page_ofs = offset & (PAGE_SIZE-1); + int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count); + + page_data = kmap(fw_priv->pages[page_nr]); + + memcpy(buffer, page_data + page_ofs, page_cnt); + + kunmap(fw_priv->pages[page_nr]); + buffer += page_cnt; + offset += page_cnt; + count -= page_cnt; + } +out: + mutex_unlock(&fw_lock); + return ret_count; +} + +static int +fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size) +{ + int pages_needed = ALIGN(min_size, PAGE_SIZE) >> PAGE_SHIFT; + + /* If the array of pages is too small, grow it... */ + if (fw_priv->page_array_size < pages_needed) { + int new_array_size = max(pages_needed, + fw_priv->page_array_size * 2); + struct page **new_pages; + + new_pages = kmalloc(new_array_size * sizeof(void *), + GFP_KERNEL); + if (!new_pages) { + fw_load_abort(fw_priv); + return -ENOMEM; + } + memcpy(new_pages, fw_priv->pages, + fw_priv->page_array_size * sizeof(void *)); + memset(&new_pages[fw_priv->page_array_size], 0, sizeof(void *) * + (new_array_size - fw_priv->page_array_size)); + kfree(fw_priv->pages); + fw_priv->pages = new_pages; + fw_priv->page_array_size = new_array_size; + } + + while (fw_priv->nr_pages < pages_needed) { + fw_priv->pages[fw_priv->nr_pages] = + alloc_page(GFP_KERNEL | __GFP_HIGHMEM); + + if (!fw_priv->pages[fw_priv->nr_pages]) { + fw_load_abort(fw_priv); + return -ENOMEM; + } + fw_priv->nr_pages++; + } + return 0; +} + +/** + * firmware_data_write - write method for firmware + * @kobj: kobject for the device + * @bin_attr: bin_attr structure + * @buffer: buffer being written + * @offset: buffer offset for write in total data store area + * @count: buffer size + * + * Data written to the 'data' attribute will be later handed to + * the driver as a firmware image. + **/ +static ssize_t +firmware_data_write(struct kobject *kobj, struct bin_attribute *bin_attr, + char *buffer, loff_t offset, size_t count) +{ + struct device *dev = to_dev(kobj); + struct firmware_priv *fw_priv = dev_get_drvdata(dev); + struct firmware *fw; + ssize_t retval; + + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + + mutex_lock(&fw_lock); + fw = fw_priv->fw; + if (!fw || test_bit(FW_STATUS_DONE, &fw_priv->status)) { + retval = -ENODEV; + goto out; + } + retval = fw_realloc_buffer(fw_priv, offset + count); + if (retval) + goto out; + + retval = count; + + while (count) { + void *page_data; + int page_nr = offset >> PAGE_SHIFT; + int page_ofs = offset & (PAGE_SIZE - 1); + int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count); + + page_data = kmap(fw_priv->pages[page_nr]); + + memcpy(page_data + page_ofs, buffer, page_cnt); + + kunmap(fw_priv->pages[page_nr]); + buffer += page_cnt; + offset += page_cnt; + count -= page_cnt; + } + + fw->size = max_t(size_t, offset, fw->size); +out: + mutex_unlock(&fw_lock); + return retval; +} + +static struct bin_attribute firmware_attr_data_tmpl = { + .attr = {.name = "data", .mode = 0644}, + .size = 0, + .read = firmware_data_read, + .write = firmware_data_write, +}; + +static void fw_dev_release(struct device *dev) +{ + struct firmware_priv *fw_priv = dev_get_drvdata(dev); + int i; + + for (i = 0; i < fw_priv->nr_pages; i++) + __free_page(fw_priv->pages[i]); + kfree(fw_priv->pages); + kfree(fw_priv->fw_id); + kfree(fw_priv); + kfree(dev); + + module_put(THIS_MODULE); +} + +static void +firmware_class_timeout(u_long data) +{ + struct firmware_priv *fw_priv = (struct firmware_priv *) data; + fw_load_abort(fw_priv); +} + +static int fw_register_device(struct device **dev_p, const char *fw_name, + struct device *device) +{ + int retval; + struct firmware_priv *fw_priv = kzalloc(sizeof(*fw_priv), + GFP_KERNEL); + struct device *f_dev = kzalloc(sizeof(*f_dev), GFP_KERNEL); + + *dev_p = NULL; + + if (!fw_priv || !f_dev) { + dev_err(device, "%s: kmalloc failed\n", __func__); + retval = -ENOMEM; + goto error_kfree; + } + + init_completion(&fw_priv->completion); + fw_priv->attr_data = firmware_attr_data_tmpl; + fw_priv->fw_id = kstrdup(fw_name, GFP_KERNEL); + if (!fw_priv->fw_id) { + dev_err(device, "%s: Firmware name allocation failed\n", + __func__); + retval = -ENOMEM; + goto error_kfree; + } + + fw_priv->timeout.function = firmware_class_timeout; + fw_priv->timeout.data = (u_long) fw_priv; + init_timer(&fw_priv->timeout); + + dev_set_name(f_dev, "%s", dev_name(device)); + f_dev->parent = device; + f_dev->class = &firmware_class; + dev_set_drvdata(f_dev, fw_priv); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)) + dev_set_uevent_suppress(f_dev, 1); +#endif + retval = device_register(f_dev); + if (retval) { + dev_err(device, "%s: device_register failed\n", __func__); + put_device(f_dev); + return retval; + } + *dev_p = f_dev; + return 0; + +error_kfree: + kfree(f_dev); + kfree(fw_priv); + return retval; +} + +static int fw_setup_device(struct firmware *fw, struct device **dev_p, + const char *fw_name, struct device *device, + int uevent) +{ + struct device *f_dev; + struct firmware_priv *fw_priv; + int retval; + + *dev_p = NULL; + retval = fw_register_device(&f_dev, fw_name, device); + if (retval) + goto out; + + /* Need to pin this module until class device is destroyed */ + __module_get(THIS_MODULE); + + fw_priv = dev_get_drvdata(f_dev); + + fw_priv->fw = fw; + retval = sysfs_create_bin_file(&f_dev->kobj, &fw_priv->attr_data); + if (retval) { + dev_err(device, "%s: sysfs_create_bin_file failed\n", __func__); + goto error_unreg; + } + + retval = device_create_file(f_dev, &dev_attr_loading); + if (retval) { + dev_err(device, "%s: device_create_file failed\n", __func__); + goto error_unreg; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)) + if (uevent) + dev_set_uevent_suppress(f_dev, 0); +#endif + *dev_p = f_dev; + goto out; + +error_unreg: + device_unregister(f_dev); +out: + return retval; +} + +static int +_request_firmware(const struct firmware **firmware_p, const char *name, + struct device *device, int uevent) +{ + struct device *f_dev; + struct firmware_priv *fw_priv; + struct firmware *firmware; + struct builtin_fw *builtin; + int retval; + + if (!firmware_p) + return -EINVAL; + + *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); + if (!firmware) { + dev_err(device, "%s: kmalloc(struct firmware) failed\n", + __func__); + retval = -ENOMEM; + goto out; + } + + for (builtin = __start_builtin_fw; builtin != __end_builtin_fw; + builtin++) { + if (strcmp(name, builtin->name)) + continue; + dev_info(device, "firmware: using built-in firmware %s\n", + name); + firmware->size = builtin->size; + firmware->data = builtin->data; + return 0; + } + + if (uevent) + dev_info(device, "firmware: requesting %s\n", name); + + retval = fw_setup_device(firmware, &f_dev, name, device, uevent); + if (retval) + goto error_kfree_fw; + + fw_priv = dev_get_drvdata(f_dev); + + if (uevent) { + if (loading_timeout > 0) { + fw_priv->timeout.expires = jiffies + loading_timeout * HZ; + add_timer(&fw_priv->timeout); + } + + kobject_uevent(&f_dev->kobj, KOBJ_ADD); + wait_for_completion(&fw_priv->completion); + set_bit(FW_STATUS_DONE, &fw_priv->status); + del_timer_sync(&fw_priv->timeout); + } else + wait_for_completion(&fw_priv->completion); + + mutex_lock(&fw_lock); + if (!fw_priv->fw->size || test_bit(FW_STATUS_ABORT, &fw_priv->status)) { + retval = -ENOENT; + release_firmware(fw_priv->fw); + *firmware_p = NULL; + } + fw_priv->fw = NULL; + mutex_unlock(&fw_lock); + device_unregister(f_dev); + goto out; + +error_kfree_fw: + kfree(firmware); + *firmware_p = NULL; +out: + return retval; +} + +/** + * request_firmware: - send firmware request and wait for it + * @firmware_p: pointer to firmware image + * @name: name of firmware file + * @device: device for which firmware is being loaded + * + * @firmware_p will be used to return a firmware image by the name + * of @name for device @device. + * + * Should be called from user context where sleeping is allowed. + * + * @name will be used as $FIRMWARE in the uevent environment and + * should be distinctive enough not to be confused with any other + * firmware image for this or any other device. + **/ +int +request_firmware(const struct firmware **firmware_p, const char *name, + struct device *device) +{ + int uevent = 1; + return _request_firmware(firmware_p, name, device, uevent); +} + +/** + * release_firmware: - release the resource associated with a firmware image + * @fw: firmware resource to release + **/ +void +release_firmware(const struct firmware *fw) +{ + struct builtin_fw *builtin; + + if (fw) { + for (builtin = __start_builtin_fw; builtin != __end_builtin_fw; + builtin++) { + if (fw->data == builtin->data) + goto free_fw; + } + vfree(fw->data); + free_fw: + kfree(fw); + } +} + +/* Async support */ +struct firmware_work { + struct work_struct work; + struct module *module; + const char *name; + struct device *device; + void *context; + void (*cont)(const struct firmware *fw, void *context); + int uevent; +}; + +static int +request_firmware_work_func(void *arg) +{ + struct firmware_work *fw_work = arg; + const struct firmware *fw; + int ret; + if (!arg) { + WARN_ON(1); + return 0; + } + ret = _request_firmware(&fw, fw_work->name, fw_work->device, + fw_work->uevent); + + fw_work->cont(fw, fw_work->context); + + module_put(fw_work->module); + kfree(fw_work); + return ret; +} + +/** + * request_firmware_nowait: asynchronous version of request_firmware + * @module: module requesting the firmware + * @uevent: sends uevent to copy the firmware image if this flag + * is non-zero else the firmware copy must be done manually. + * @name: name of firmware file + * @device: device for which firmware is being loaded + * @gfp: allocation flags + * @context: will be passed over to @cont, and + * @fw may be %NULL if firmware request fails. + * @cont: function will be called asynchronously when the firmware + * request is over. + * + * Asynchronous variant of request_firmware() for user contexts where + * it is not possible to sleep for long time. It can't be called + * in atomic contexts. + **/ +int +request_firmware_nowait( + struct module *module, int uevent, + const char *name, struct device *device, gfp_t gfp, void *context, + void (*cont)(const struct firmware *fw, void *context)) +{ + struct task_struct *task; + struct firmware_work *fw_work = kmalloc(sizeof (struct firmware_work), + gfp); + + if (!fw_work) + return -ENOMEM; + if (!try_module_get(module)) { + kfree(fw_work); + return -EFAULT; + } + + *fw_work = (struct firmware_work) { + .module = module, + .name = name, + .device = device, + .context = context, + .cont = cont, + .uevent = uevent, + }; + + task = kthread_run(request_firmware_work_func, fw_work, + "firmware/%s", name); + + if (IS_ERR(task)) { + fw_work->cont(NULL, fw_work->context); + module_put(fw_work->module); + kfree(fw_work); + return PTR_ERR(task); + } + return 0; +} + +static int __init +firmware_class_init(void) +{ + int error; + error = class_register(&firmware_class); + if (error) { + printk(KERN_ERR "%s: class_register failed\n", __func__); + return error; + } + error = class_create_file(&firmware_class, &class_attr_timeout); + if (error) { + printk(KERN_ERR "%s: class_create_file failed\n", + __func__); + class_unregister(&firmware_class); + } + return error; + +} +static void __exit +firmware_class_exit(void) +{ + class_unregister(&firmware_class); +} + +fs_initcall(firmware_class_init); +module_exit(firmware_class_exit); + +EXPORT_SYMBOL(release_firmware); +EXPORT_SYMBOL(request_firmware); +EXPORT_SYMBOL(request_firmware_nowait); diff --git a/compat/main.c b/compat/main.c new file mode 100644 index 0000000..d7f713d --- /dev/null +++ b/compat/main.c @@ -0,0 +1,25 @@ +#include + +MODULE_AUTHOR("Luis R. Rodriguez"); +MODULE_DESCRIPTION("Kernel compatibility module"); +MODULE_LICENSE("GPL"); + +static int __init compat_init(void) +{ + /* pm-qos for kernels <= 2.6.24, this is a no-op on newer kernels */ + compat_pm_qos_power_init(); + printk(KERN_INFO "Generic kernel compatibility enabled based on " + "linux-next next-20100113\n"); + + return 0; +} +module_init(compat_init); + +static void __exit compat_exit(void) +{ + compat_pm_qos_power_deinit(); + + return; +} +module_exit(compat_exit); + diff --git a/compat/pm_qos_params.c b/compat/pm_qos_params.c new file mode 100644 index 0000000..833d98c --- /dev/null +++ b/compat/pm_qos_params.c @@ -0,0 +1,477 @@ +#include + +/* This is the backport of pm-qos params for kernels <= 2.6.25 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)) + +/* + * This module exposes the interface to kernel space for specifying + * QoS dependencies. It provides infrastructure for registration of: + * + * Dependents on a QoS value : register requirements + * Watchers of QoS value : get notified when target QoS value changes + * + * This QoS design is best effort based. Dependents register their QoS needs. + * Watchers register to keep track of the current QoS needs of the system. + * + * There are 3 basic classes of QoS parameter: latency, timeout, throughput + * each have defined units: + * latency: usec + * timeout: usec <-- currently not used. + * throughput: kbs (kilo byte / sec) + * + * There are lists of pm_qos_objects each one wrapping requirements, notifiers + * + * User mode requirements on a QOS parameter register themselves to the + * subsystem by opening the device node /dev/... and writing there request to + * the node. As long as the process holds a file handle open to the node the + * client continues to be accounted for. Upon file release the usermode + * requirement is removed and a new qos target is computed. This way when the + * requirement that the application has is cleaned up when closes the file + * pointer or exits the pm_qos_object will get an opportunity to clean up. + * + * Mark Gross + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * locking rule: all changes to requirements or notifiers lists + * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock + * held, taken with _irqsave. One lock to rule them all + */ +struct requirement_list { + struct list_head list; + union { + s32 value; + s32 usec; + s32 kbps; + }; + char *name; +}; + +static s32 max_compare(s32 v1, s32 v2); +static s32 min_compare(s32 v1, s32 v2); + +struct pm_qos_object { + struct requirement_list requirements; + struct blocking_notifier_head *notifiers; + struct miscdevice pm_qos_power_miscdev; + char *name; + s32 default_value; + atomic_t target_value; + s32 (*comparitor)(s32, s32); +}; + +static struct pm_qos_object null_pm_qos; +static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier); +static struct pm_qos_object cpu_dma_pm_qos = { + .requirements = {LIST_HEAD_INIT(cpu_dma_pm_qos.requirements.list)}, + .notifiers = &cpu_dma_lat_notifier, + .name = "cpu_dma_latency", + .default_value = 2000 * USEC_PER_SEC, + .target_value = ATOMIC_INIT(2000 * USEC_PER_SEC), + .comparitor = min_compare +}; + +static BLOCKING_NOTIFIER_HEAD(network_lat_notifier); +static struct pm_qos_object network_lat_pm_qos = { + .requirements = {LIST_HEAD_INIT(network_lat_pm_qos.requirements.list)}, + .notifiers = &network_lat_notifier, + .name = "network_latency", + .default_value = 2000 * USEC_PER_SEC, + .target_value = ATOMIC_INIT(2000 * USEC_PER_SEC), + .comparitor = min_compare +}; + + +static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier); +static struct pm_qos_object network_throughput_pm_qos = { + .requirements = + {LIST_HEAD_INIT(network_throughput_pm_qos.requirements.list)}, + .notifiers = &network_throughput_notifier, + .name = "network_throughput", + .default_value = 0, + .target_value = ATOMIC_INIT(0), + .comparitor = max_compare +}; + +static BLOCKING_NOTIFIER_HEAD(system_bus_freq_notifier); +static struct pm_qos_object system_bus_freq_pm_qos = { + .requirements = + {LIST_HEAD_INIT(system_bus_freq_pm_qos.requirements.list)}, + .notifiers = &system_bus_freq_notifier, + .name = "system_bus_freq", + .default_value = 0, + .target_value = ATOMIC_INIT(0), + .comparitor = max_compare +}; + + +static struct pm_qos_object *pm_qos_array[PM_QOS_NUM_CLASSES] = { + [PM_QOS_RESERVED] = &null_pm_qos, + [PM_QOS_CPU_DMA_LATENCY] = &cpu_dma_pm_qos, + [PM_QOS_NETWORK_LATENCY] = &network_lat_pm_qos, + [PM_QOS_NETWORK_THROUGHPUT] = &network_throughput_pm_qos, + [PM_QOS_SYSTEM_BUS_FREQ] = &system_bus_freq_pm_qos, +}; + +static DEFINE_SPINLOCK(pm_qos_lock); + +static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, + size_t count, loff_t *f_pos); +static int pm_qos_power_open(struct inode *inode, struct file *filp); +static int pm_qos_power_release(struct inode *inode, struct file *filp); + +static const struct file_operations pm_qos_power_fops = { + .write = pm_qos_power_write, + .open = pm_qos_power_open, + .release = pm_qos_power_release, +}; + +/* static helper functions */ +static s32 max_compare(s32 v1, s32 v2) +{ + return max(v1, v2); +} + +static s32 min_compare(s32 v1, s32 v2) +{ + return min(v1, v2); +} + + +static void update_target(int target) +{ + s32 extreme_value; + struct requirement_list *node; + unsigned long flags; + int call_notifier = 0; + + spin_lock_irqsave(&pm_qos_lock, flags); + extreme_value = pm_qos_array[target]->default_value; + list_for_each_entry(node, + &pm_qos_array[target]->requirements.list, list) { + extreme_value = pm_qos_array[target]->comparitor( + extreme_value, node->value); + } + if (atomic_read(&pm_qos_array[target]->target_value) != extreme_value) { + call_notifier = 1; + atomic_set(&pm_qos_array[target]->target_value, extreme_value); + pr_debug(KERN_ERR "new target for qos %d is %d\n", target, + atomic_read(&pm_qos_array[target]->target_value)); + } + spin_unlock_irqrestore(&pm_qos_lock, flags); + + if (call_notifier) + blocking_notifier_call_chain(pm_qos_array[target]->notifiers, + (unsigned long) extreme_value, NULL); +} + +static int register_pm_qos_misc(struct pm_qos_object *qos) +{ + qos->pm_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR; + qos->pm_qos_power_miscdev.name = qos->name; + qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops; + + return misc_register(&qos->pm_qos_power_miscdev); +} + +static int find_pm_qos_object_by_minor(int minor) +{ + int pm_qos_class; + + for (pm_qos_class = 0; + pm_qos_class < PM_QOS_NUM_CLASSES; pm_qos_class++) { + if (minor == + pm_qos_array[pm_qos_class]->pm_qos_power_miscdev.minor) + return pm_qos_class; + } + return -1; +} + +/** + * pm_qos_requirement - returns current system wide qos expectation + * @pm_qos_class: identification of which qos value is requested + * + * This function returns the current target value in an atomic manner. + */ +int pm_qos_requirement(int pm_qos_class) +{ + return atomic_read(&pm_qos_array[pm_qos_class]->target_value); +} +EXPORT_SYMBOL_GPL(pm_qos_requirement); + +/** + * pm_qos_add_requirement - inserts new qos request into the list + * @pm_qos_class: identifies which list of qos request to us + * @name: identifies the request + * @value: defines the qos request + * + * This function inserts a new entry in the pm_qos_class list of requested qos + * performance characteristics. It recomputes the aggregate QoS expectations + * for the pm_qos_class of parameters. + */ +int pm_qos_add_requirement(int pm_qos_class, char *name, s32 value) +{ + struct requirement_list *dep; + unsigned long flags; + + dep = kzalloc(sizeof(struct requirement_list), GFP_KERNEL); + if (dep) { + if (value == PM_QOS_DEFAULT_VALUE) + dep->value = pm_qos_array[pm_qos_class]->default_value; + else + dep->value = value; + dep->name = kstrdup(name, GFP_KERNEL); + if (!dep->name) + goto cleanup; + + spin_lock_irqsave(&pm_qos_lock, flags); + list_add(&dep->list, + &pm_qos_array[pm_qos_class]->requirements.list); + spin_unlock_irqrestore(&pm_qos_lock, flags); + update_target(pm_qos_class); + + return 0; + } + +cleanup: + kfree(dep); + return -ENOMEM; +} +EXPORT_SYMBOL_GPL(pm_qos_add_requirement); + +/** + * pm_qos_update_requirement - modifies an existing qos request + * @pm_qos_class: identifies which list of qos request to us + * @name: identifies the request + * @value: defines the qos request + * + * Updates an existing qos requirement for the pm_qos_class of parameters along + * with updating the target pm_qos_class value. + * + * If the named request isn't in the list then no change is made. + */ +int pm_qos_update_requirement(int pm_qos_class, char *name, s32 new_value) +{ + unsigned long flags; + struct requirement_list *node; + int pending_update = 0; + + spin_lock_irqsave(&pm_qos_lock, flags); + list_for_each_entry(node, + &pm_qos_array[pm_qos_class]->requirements.list, list) { + if (strcmp(node->name, name) == 0) { + if (new_value == PM_QOS_DEFAULT_VALUE) + node->value = + pm_qos_array[pm_qos_class]->default_value; + else + node->value = new_value; + pending_update = 1; + break; + } + } + spin_unlock_irqrestore(&pm_qos_lock, flags); + if (pending_update) + update_target(pm_qos_class); + + return 0; +} +EXPORT_SYMBOL_GPL(pm_qos_update_requirement); + +/** + * pm_qos_remove_requirement - modifies an existing qos request + * @pm_qos_class: identifies which list of qos request to us + * @name: identifies the request + * + * Will remove named qos request from pm_qos_class list of parameters and + * recompute the current target value for the pm_qos_class. + */ +void pm_qos_remove_requirement(int pm_qos_class, char *name) +{ + unsigned long flags; + struct requirement_list *node; + int pending_update = 0; + + spin_lock_irqsave(&pm_qos_lock, flags); + list_for_each_entry(node, + &pm_qos_array[pm_qos_class]->requirements.list, list) { + if (strcmp(node->name, name) == 0) { + kfree(node->name); + list_del(&node->list); + kfree(node); + pending_update = 1; + break; + } + } + spin_unlock_irqrestore(&pm_qos_lock, flags); + if (pending_update) + update_target(pm_qos_class); +} +EXPORT_SYMBOL_GPL(pm_qos_remove_requirement); + +/** + * pm_qos_add_notifier - sets notification entry for changes to target value + * @pm_qos_class: identifies which qos target changes should be notified. + * @notifier: notifier block managed by caller. + * + * will register the notifier into a notification chain that gets called + * upon changes to the pm_qos_class target value. + */ +int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier) +{ + int retval; + + retval = blocking_notifier_chain_register( + pm_qos_array[pm_qos_class]->notifiers, notifier); + + return retval; +} +EXPORT_SYMBOL_GPL(pm_qos_add_notifier); + +/** + * pm_qos_remove_notifier - deletes notification entry from chain. + * @pm_qos_class: identifies which qos target changes are notified. + * @notifier: notifier block to be removed. + * + * will remove the notifier from the notification chain that gets called + * upon changes to the pm_qos_class target value. + */ +int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier) +{ + int retval; + + retval = blocking_notifier_chain_unregister( + pm_qos_array[pm_qos_class]->notifiers, notifier); + + return retval; +} +EXPORT_SYMBOL_GPL(pm_qos_remove_notifier); + +#define PID_NAME_LEN 32 + +static int pm_qos_power_open(struct inode *inode, struct file *filp) +{ + int ret; + long pm_qos_class; + char name[PID_NAME_LEN]; + + pm_qos_class = find_pm_qos_object_by_minor(iminor(inode)); + if (pm_qos_class >= 0) { + filp->private_data = (void *)pm_qos_class; + snprintf(name, PID_NAME_LEN, "process_%d", current->pid); + ret = pm_qos_add_requirement(pm_qos_class, name, + PM_QOS_DEFAULT_VALUE); + if (ret >= 0) + return 0; + } + return -EPERM; +} + +static int pm_qos_power_release(struct inode *inode, struct file *filp) +{ + int pm_qos_class; + char name[PID_NAME_LEN]; + + pm_qos_class = (long)filp->private_data; + snprintf(name, PID_NAME_LEN, "process_%d", current->pid); + pm_qos_remove_requirement(pm_qos_class, name); + + return 0; +} + +static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, + size_t count, loff_t *f_pos) +{ + s32 value; + int pm_qos_class; + char name[PID_NAME_LEN]; + + pm_qos_class = (long)filp->private_data; + if (count != sizeof(s32)) + return -EINVAL; + if (copy_from_user(&value, buf, sizeof(s32))) + return -EFAULT; + snprintf(name, PID_NAME_LEN, "process_%d", current->pid); + pm_qos_update_requirement(pm_qos_class, name, value); + + return sizeof(s32); +} + + +/* + * This initializes pm-qos for older kernels. + */ +int compat_pm_qos_power_init(void) +{ + int ret = 0; + + ret = register_pm_qos_misc(&cpu_dma_pm_qos); + if (ret < 0) { + printk(KERN_ERR "pm_qos_param: cpu_dma_latency setup failed\n"); + return ret; + } + ret = register_pm_qos_misc(&network_lat_pm_qos); + if (ret < 0) { + printk(KERN_ERR "pm_qos_param: network_latency setup failed\n"); + return ret; + } + ret = register_pm_qos_misc(&network_throughput_pm_qos); + if (ret < 0) { + printk(KERN_ERR + "pm_qos_param: network_throughput setup failed\n"); + return ret; + } + ret = register_pm_qos_misc(&system_bus_freq_pm_qos); + if (ret < 0) + printk(KERN_ERR + "pm_qos_param: system_bus_freq setup failed\n"); + + return ret; +} + +int compat_pm_qos_power_deinit(void) +{ + int ret = 0; + + ret = misc_deregister(&cpu_dma_pm_qos.pm_qos_power_miscdev); + if (ret < 0) { + printk(KERN_ERR "pm_qos_param: cpu_dma_latency deinit failed\n"); + return ret; + } + + ret = misc_deregister(&network_lat_pm_qos.pm_qos_power_miscdev); + if (ret < 0) { + printk(KERN_ERR "pm_qos_param: network_latency deinit failed\n"); + return ret; + } + + ret = misc_deregister(&network_throughput_pm_qos.pm_qos_power_miscdev); + if (ret < 0) { + printk(KERN_ERR + "pm_qos_param: network_throughput deinit failed\n"); + return ret; + } + + ret = misc_deregister(&system_bus_freq_pm_qos.pm_qos_power_miscdev); + if (ret < 0) { + printk(KERN_ERR + "pm_qos_param: system_bus_freq deinit failed\n"); + return ret; + } + + return ret; +} +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) */ diff --git a/compat/scripts/compat_firmware_install b/compat/scripts/compat_firmware_install new file mode 100755 index 0000000..d92961a --- /dev/null +++ b/compat/scripts/compat_firmware_install @@ -0,0 +1,19 @@ +#!/bin/sh + +if [ -f /usr/bin/lsb_release ]; then + LSB_RED_ID=$(/usr/bin/lsb_release -i -s) +else + LSB_RED_ID="Unknown" +fi + +case $LSB_RED_ID in +"Ubuntu") + cp udev/ubuntu/compat_firmware.sh /lib/udev/ + cp udev/ubuntu/50-compat_firmware.rules /lib/udev/rules.d/ + ;; +*) + cp udev/compat_firmware.sh /lib/udev/ + cp udev/50-compat_firmware.rules /lib/udev/rules.d/ + ;; +esac + diff --git a/config.mk b/config.mk new file mode 100644 index 0000000..5565e12 --- /dev/null +++ b/config.mk @@ -0,0 +1,468 @@ +export + +## NOTE +## Make sure to have each variable declaration start +## in the first column, no whitespace allowed. + +ifeq ($(wildcard $(KLIB_BUILD)/.config),) +# These will be ignored by compat autoconf + CONFIG_PCI=y + CONFIG_USB=y + CONFIG_PCMCIA=y + CONFIG_SSB=m +else +include $(KLIB_BUILD)/.config +endif + +# We will warn when you don't have MQ support or NET_SCHED enabled. +# +# We could consider just quiting if MQ and NET_SCHED is disabled +# as I suspect all users of this package want 802.11e (WME) and +# 802.11n (HT) support. +ifneq ($(wildcard $(KLIB_BUILD)/Makefile),) +COMPAT_LATEST_VERSION = 33 +KERNEL_SUBLEVEL := $(shell $(MAKE) -C $(KLIB_BUILD) kernelversion | sed -n 's/^2\.6\.\([0-9]\+\).*/\1/p') +COMPAT_VERSIONS := $(shell I=$(COMPAT_LATEST_VERSION); while [ "$$I" -gt $(KERNEL_SUBLEVEL) ]; do echo $$I; I=$$(($$I - 1)); done) +$(foreach ver,$(COMPAT_VERSIONS),$(eval CONFIG_COMPAT_KERNEL_$(ver)=y)) + +ifdef CONFIG_COMPAT_KERNEL_25 +$(error "ERROR: compat-wireless by default supports kernels >= 2.6.25, try enabling only one driver though") +endif + +ifeq ($(CONFIG_CFG80211),y) +$(error "ERROR: your kernel has CONFIG_CFG80211=y, you should have it CONFIG_CFG80211=m if you want to use this thing.") +endif + + +# 2.6.27 has FTRACE_DYNAMIC borked, so we will complain if +# you have it enabled, otherwise you will very likely run into +# a kernel panic. +ifeq ($(KERNEL_SUBLEVEL),27) +ifeq ($(CONFIG_DYNAMIC_FTRACE),y) +$(error "ERROR: Your 2.6.27 kernel has CONFIG_DYNAMIC_FTRACE, please upgrade your distribution kernel as newer ones should not have this enabled (and if so report a bug) or remove this warning if you know what you are doing") +endif +endif + +# This is because with CONFIG_MAC80211 include/linux/skbuff.h will +# enable on 2.6.27 a new attribute: +# +# skb->do_not_encrypt +# +# and on 2.6.28 another new attribute: +# +# skb->requeue +# +ifeq ($(shell test $(KERNEL_SUBLEVEL) -ge 27 && echo yes),yes) +ifeq ($(CONFIG_MAC80211),) +$(error "ERROR: Your >=2.6.27 kernel has CONFIG_MAC80211 disabled, you should have it CONFIG_MAC80211=m if you want to use this thing.") +endif +endif + +ifneq ($(KERNELRELEASE),) # This prevents a warning + +ifeq ($(CONFIG_NET_SCHED),) + QOS_REQS_MISSING+=CONFIG_NET_SCHED +endif + +ifneq ($(QOS_REQS_MISSING),) # Complain about our missing dependencies +$(warning "WARNING: You are running a kernel >= 2.6.23, you should enable in it $(QOS_REQS_MISSING) for 802.11[ne] support") +endif + +endif # build check +endif # kernel Makefile check + +# These both are needed by compat-wireless || compat-bluetooth so enable them + CONFIG_COMPAT_RFKILL=y + +ifeq ($(CONFIG_MAC80211),y) +$(error "ERROR: you have MAC80211 compiled into the kernel, CONFIG_MAC80211=y, as such you cannot replace its mac80211 driver. You need this set to CONFIG_MAC80211=m. If you are using Fedora upgrade your kernel as later version should this set as modular. For further information on Fedora see https://bugzilla.redhat.com/show_bug.cgi?id=470143. If you are using your own kernel recompile it and make mac80211 modular") +else + CONFIG_COMPAT_WIRELESS=y + CONFIG_COMPAT_WIRELESS_MODULES=m + CONFIG_COMPAT_VAR_MODULES=m +# We could technically separate these but not yet, we only have b44 +# Note that we don't intend on backporting network drivers that +# use Multiqueue as that was a pain to backport to kernels older than +# 2.6.27. But -- we could just disable those drivers from kernels +# older than 2.6.27 + CONFIG_COMPAT_NETWORK_MODULES=m + CONFIG_COMPAT_NET_USB_MODULES=m +endif + +# The Bluetooth compatibility only builds on kernels >= 2.6.27 for now +ifndef CONFIG_COMPAT_KERNEL_27 +ifeq ($(CONFIG_BT),y) +# we'll ignore compiling bluetooth +else + CONFIG_COMPAT_BLUETOOTH=y + CONFIG_COMPAT_BLUETOOTH_MODULES=m +endif +endif # Kernel >= 2.6.26 + +ifeq ($(CONFIG_COMPAT_KERNEL_33),y) +ifneq ($(CONFIG_FW_LOADER),) + CONFIG_COMPAT_FIRMWARE_CLASS=m +endif +endif + + +# Wireless subsystem stuff +CONFIG_MAC80211=m + +# CONFIG_MAC80211_DEBUGFS=y +# CONFIG_MAC80211_DEBUG_MENU=y +# CONFIG_MAC80211_NOINLINE=y +# CONFIG_MAC80211_VERBOSE_DEBUG=y +# CONFIG_MAC80211_HT_DEBUG=y +# CONFIG_MAC80211_TKIP_DEBUG=y +# CONFIG_MAC80211_IBSS_DEBUG=y +# CONFIG_MAC80211_VERBOSE_PS_DEBUG=y +# CONFIG_MAC80211_VERBOSE_MPL_DEBUG=y +# CONFIG_MAC80211_VERBOSE_MHWMP_DEBUG=y +# CONFIG_MAC80211_DEBUG_COUNTERS=y +# CONFIG_MAC80211_DRIVER_API_TRACER=y + +# choose between pid and minstrel as default rate control algorithm +CONFIG_MAC80211_RC_DEFAULT=minstrel +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +# CONFIG_MAC80211_RC_DEFAULT_PID=y +# This is the one used by our compat-wireless net/mac80211/rate.c +# in case you have and old kernel which is overriding this to pid. +CONFIG_COMPAT_MAC80211_RC_DEFAULT=minstrel +CONFIG_MAC80211_RC_PID=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_LEDS=y + +# enable mesh networking too +CONFIG_MAC80211_MESH=y + +CONFIG_CFG80211=m +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS=y +# CONFIG_NL80211_TESTMODE=y +# CONFIG_CFG80211_DEVELOPER_WARNINGS=y +# CONFIG_CFG80211_REG_DEBUG=y +# See below for wext stuff + +CONFIG_LIB80211=m +CONFIG_LIB80211_CRYPT_WEP=m +CONFIG_LIB80211_CRYPT_CCMP=m +CONFIG_LIB80211_CRYPT_TKIP=m +# CONFIG_LIB80211_DEBUG=y + +CONFIG_WIRELESS_OLD_REGULATORY=n + +CONFIG_BT=m +CONFIG_BT_L2CAP=m +CONFIG_BT_SCO=m +CONFIG_BT_RFCOMM=m +CONFIG_BT_BNEP=m +CONFIG_BT_ATH3K=m +# CONFIG_BT_CMTP depends on ISDN_CAPI +ifneq ($(CONFIG_BT_CMTP),) +CONFIG_BT_CMTP=m +endif +CONFIG_BT_HIDP=m + +ifdef CONFIG_COMPAT_KERNEL_32 +# Old kernels stil do depend on CONFIG_WIRELESS_EXT +# as we add the wireless handler back to the struct +# netdevice +ifneq ($(CONFIG_WIRELESS_EXT),) +CONFIG_CFG80211_WEXT=y +endif +else +# 2.6.33 and above do not need CONFIG_WIRELESS_EXT, but the +# reality is we should select CONFIG_WIRELESS_EXT only if a +# driver claims for it (one of the old non-cfg80211 drivers). +# Then users could either have this on or off but we leave it +# on in case users on >= 2.6.33 still have iwconfig and other +# old deprecated userspace applications. +CONFIG_CFG80211_WEXT=y +endif # CONFIG_COMPAT_KERNEL_32 + +# mac80211 test driver +CONFIG_MAC80211_HWSIM=m + +# PCI Drivers +ifneq ($(CONFIG_PCI),) + +CONFIG_ATH5K=m +# CONFIG_ATH5K_DEBUG=y +CONFIG_ATH9K_HW=m +CONFIG_ATH9K=m +# Note: once ath9k_htc is added we'll have to move +# CONFIG_ATH9K_COMMON to an area that doesn't depend on PCI +# as you could then have ath9k disabled but ath9k_htc enabled. +CONFIG_ATH9K_COMMON=m +# CONFIG_ATH9K_DEBUGFS=y + + +CONFIG_IWLWIFI=m +# CONFIG_IWLWIFI_DEBUG=y +# CONFIG_IWLWIFI_DEBUGFS=y +# CONFIG_IWLWIFI_DEVICE_TRACING=y +CONFIG_IWLAGN=m +CONFIG_COMPAT_IWL4965=y +CONFIG_IWL5000=y +CONFIG_IWL3945=m + + +CONFIG_B43=m +CONFIG_B43_HWRNG=y +CONFIG_B43_PCI_AUTOSELECT=y +ifneq ($(CONFIG_PCMCIA),) +CONFIG_B43_PCMCIA=y +endif +CONFIG_B43_LEDS=y +CONFIG_B43_PHY_LP=y +CONFIG_B43_NPHY=y +# CONFIG_B43_DEBUG=y + +CONFIG_B43LEGACY=m +CONFIG_B43LEGACY_HWRNG=y +CONFIG_B43LEGACY_PCI_AUTOSELECT=y +CONFIG_B43LEGACY_LEDS=y +# CONFIG_B43LEGACY_DEBUG=y +CONFIG_B43LEGACY_DMA=y +CONFIG_B43LEGACY_PIO=y + +# The Intel ipws +CONFIG_LIBIPW=m +# CONFIG_LIBIPW_DEBUG=y + + +CONFIG_IPW2100=m +CONFIG_IPW2100_MONITOR=y +# CONFIG_IPW2100_DEBUG=y +CONFIG_IPW2200=m +CONFIG_IPW2200_MONITOR=y +CONFIG_IPW2200_RADIOTAP=y +CONFIG_IPW2200_PROMISCUOUS=y +CONFIG_IPW2200_QOS=y +# CONFIG_IPW2200_DEBUG=y +# The above enables use a second interface prefixed 'rtap'. +# Example usage: +# +# % modprobe ipw2200 rtap_iface=1 +# % ifconfig rtap0 up +# % tethereal -i rtap0 +# +# If you do not specify 'rtap_iface=1' as a module parameter then +# the rtap interface will not be created and you will need to turn +# it on via sysfs: +# +# % echo 1 > /sys/bus/pci/drivers/ipw2200/*/rtap_iface + +ifneq ($(CONFIG_SSB),) +# Sonics Silicon Backplane +CONFIG_SSB_SPROM=y +# CONFIG_SSB_DEBUG=y + +CONFIG_SSB_BLOCKIO=y +CONFIG_SSB_PCIHOST=y +CONFIG_SSB_B43_PCI_BRIDGE=y +ifneq ($(CONFIG_PCMCIA),) +CONFIG_SSB_PCMCIAHOST=y +endif +# CONFIG_SSB_DEBUG=y +CONFIG_SSB_DRIVER_PCICORE=y +endif + +CONFIG_P54_PCI=m + +CONFIG_B44=m +CONFIG_B44_PCI=y + +CONFIG_RTL8180=m + +CONFIG_ADM8211=m + +CONFIG_RT2X00_LIB_PCI=m +CONFIG_RT2400PCI=m +CONFIG_RT2500PCI=m +CONFIG_RT2800PCI=m +CONFIG_RT2800PCI_PCI=y +NEED_RT2X00=y + +# Two rt2x00 drivers require firmware: rt61pci and rt73usb. They depend on +# CRC to check the firmware. We check here first for the PCI +# driver as we're in the PCI section. +ifneq ($(CONFIG_CRC_ITU_T),) +CONFIG_RT61PCI=m +NEED_RT2X00_FIRMWARE=y +endif + +CONFIG_MWL8K=m + +# Ethernet drivers go here +CONFIG_ATL1=m +CONFIG_ATL2=m +CONFIG_ATL1E=m +CONFIG_ATL1C=m + +endif +## end of PCI + +ifneq ($(CONFIG_PCMCIA),) + +ifdef CONFIG_COMPAT_KERNEL_27 +CONFIG_LIBERTAS=n +CONFIG_LIBERTAS_CS=n +else +CONFIG_LIBERTAS_CS=m +NEED_LIBERTAS=y +endif + +endif +## end of PCMCIA + +# This is required for some cards +CONFIG_EEPROM_93CX6=m + +# USB Drivers +ifneq ($(CONFIG_USB),) +CONFIG_ZD1211RW=m +# CONFIG_ZD1211RW_DEBUG=y + +# Sorry, rndis_wlan uses cancel_work_sync which is new and can't be done in compat... + +# Wireless RNDIS USB support (RTL8185 802.11g) A-Link WL54PC +# All of these devices are based on Broadcom 4320 chip which +# is only wireless RNDIS chip known to date. +# Note: this depends on CONFIG_USB_NET_RNDIS_HOST and CONFIG_USB_NET_CDCETHER +# it also requires new RNDIS_HOST and CDC_ETHER modules which we add +ifdef CONFIG_COMPAT_KERNEL_29 +CONFIG_USB_COMPAT_USBNET=n +CONFIG_USB_NET_COMPAT_RNDIS_HOST=n +CONFIG_USB_NET_COMPAT_RNDIS_WLAN=n +CONFIG_USB_NET_COMPAT_CDCETHER=n +else +CONFIG_USB_COMPAT_USBNET=m +CONFIG_USB_NET_COMPAT_RNDIS_HOST=m +CONFIG_USB_NET_COMPAT_RNDIS_WLAN=m +CONFIG_USB_NET_COMPAT_CDCETHER=m +endif + + +CONFIG_P54_USB=m +CONFIG_RTL8187=m +CONFIG_RTL8187_LEDS=y + +CONFIG_AT76C50X_USB=m + +ifndef CONFIG_COMPAT_KERNEL_28 +CONFIG_AR9170_USB=m +CONFIG_AR9170_LEDS=y +endif + +# RT2500USB does not require firmware +CONFIG_RT2500USB=m +CONFIG_RT2800USB=m +CONFIG_RT2X00_LIB_USB=m +NEED_RT2X00=y +# RT73USB requires firmware +ifneq ($(CONFIG_CRC_ITU_T),) +CONFIG_RT73USB=m +NEED_RT2X00_FIRMWARE=y +endif + +ifdef CONFIG_COMPAT_KERNEL_27 +CONFIG_LIBERTAS_THINFIRM_USB=n +CONFIG_LIBERTAS_USB=n +NEED_LIBERTAS=n +else +CONFIG_LIBERTAS_THINFIRM_USB=m +CONFIG_LIBERTAS_USB=m +NEED_LIBERTAS=y +endif + +endif # end of USB driver list + +ifneq ($(CONFIG_SPI_MASTER),) + +CONFIG_WL1251=m +CONFIG_WL1251_SPI=m +CONFIG_P54_SPI=m + +ifdef CONFIG_COMPAT_KERNEL_27 +CONFIG_LIBERTAS_SPI=n +NEED_LIBERTAS=n +else +CONFIG_LIBERTAS_SPI=m +NEED_LIBERTAS=y +endif + +endif # end of SPI driver list + +ifneq ($(CONFIG_MMC),) + +CONFIG_SSB_SDIOHOST=y +CONFIG_B43_SDIO=y +CONFIG_WL1251_SDIO=m + +ifdef CONFIG_COMPAT_KERNEL_27 +CONFIG_LIBERTAS_SDIO=n +NEED_LIBERTAS=n +else +CONFIG_LIBERTAS_SDIO=m +NEED_LIBERTAS=y +endif + +# Activate iwmc3200wifi support only on kernel >= 2.6.29. +# iwmc3200wifi uses new netdev_ops api no supported by old kernel. +ifndef CONFIG_COMPAT_KERNEL_29 +CONFIG_IWM=m +# CONFIG_IWM_DEBUG=y +endif + +endif # end of SDIO driver list + +# Common rt2x00 requirements +ifeq ($(NEED_RT2X00),y) +CONFIG_RT2X00=y +CONFIG_RT2X00_LIB=m +CONFIG_RT2800_LIB=m +CONFIG_RT2X00_LIB_HT=y +CONFIG_RT2X00_LIB_FIRMWARE=y +CONFIG_RT2X00_LIB_CRYPTO=y +CONFIG_RT2X00_LIB_LEDS=y +# CONFIG_RT2X00_DEBUG=y +# CONFIG_RT2X00_LIB_DEBUGFS +endif + +ifeq ($(NEED_RT2X00_FIRMWARE),y) +CONFIG_RT2X00_LIB_FIRMWARE=y +endif + +# p54 +CONFIG_P54_COMMON=m +CONFIG_P54_LEDS=y + +# Atheros +CONFIG_ATH_COMMON=m +# CONFIG_ATH_DEBUG=y + +CONFIG_WL12XX=y +CONFIG_WL1251=m +CONFIG_WL1271=m + +ifdef CONFIG_COMPAT_KERNEL_27 +CONFIG_LIBERTAS=n +else +ifeq ($(NEED_LIBERTAS),y) +CONFIG_LIBERTAS_THINFIRM=m +CONFIG_LIBERTAS=m +CONFIG_LIBERTAS_MESH=y +# CONFIG_LIBERTAS_DEBUG=y +endif +endif + +# We need the backported rfkill module on kernel < 2.6.31. +# In more recent kernel versions use the in kernel rfkill module. +ifdef CONFIG_COMPAT_KERNEL_31 +CONFIG_RFKILL_BACKPORT=m +CONFIG_RFKILL_BACKPORT_LEDS=y +CONFIG_RFKILL_BACKPORT_INPUT=y +endif + diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile new file mode 100644 index 0000000..7e5aed5 --- /dev/null +++ b/drivers/bluetooth/Makefile @@ -0,0 +1,29 @@ +# +# Makefile for the Linux Bluetooth HCI device drivers. +# + +obj-$(CONFIG_BT_HCIVHCI) += hci_vhci.o +obj-$(CONFIG_BT_HCIUART) += hci_uart.o +obj-$(CONFIG_BT_HCIBCM203X) += bcm203x.o +obj-$(CONFIG_BT_HCIBPA10X) += bpa10x.o +obj-$(CONFIG_BT_HCIBFUSB) += bfusb.o +obj-$(CONFIG_BT_HCIDTL1) += dtl1_cs.o +obj-$(CONFIG_BT_HCIBT3C) += bt3c_cs.o +obj-$(CONFIG_BT_HCIBLUECARD) += bluecard_cs.o +obj-$(CONFIG_BT_HCIBTUART) += btuart_cs.o + +obj-$(CONFIG_BT_HCIBTUSB) += btusb.o +obj-$(CONFIG_BT_HCIBTSDIO) += btsdio.o + +obj-$(CONFIG_BT_ATH3K) += ath3k.o +obj-$(CONFIG_BT_MRVL) += btmrvl.o +obj-$(CONFIG_BT_MRVL_SDIO) += btmrvl_sdio.o + +btmrvl-y := btmrvl_main.o +btmrvl-$(CONFIG_DEBUG_FS) += btmrvl_debugfs.o + +hci_uart-y := hci_ldisc.o +hci_uart-$(CONFIG_BT_HCIUART_H4) += hci_h4.o +hci_uart-$(CONFIG_BT_HCIUART_BCSP) += hci_bcsp.o +hci_uart-$(CONFIG_BT_HCIUART_LL) += hci_ll.o +hci_uart-objs := $(hci_uart-y) diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c new file mode 100644 index 0000000..128cae4 --- /dev/null +++ b/drivers/bluetooth/ath3k.c @@ -0,0 +1,189 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define VERSION "1.0" + + +static struct usb_device_id ath3k_table[] = { + /* Atheros AR3011 */ + { USB_DEVICE(0x0CF3, 0x3000) }, + { } /* Terminating entry */ +}; + +MODULE_DEVICE_TABLE(usb, ath3k_table); + +#define USB_REQ_DFU_DNLOAD 1 +#define BULK_SIZE 4096 + +struct ath3k_data { + struct usb_device *udev; + u8 *fw_data; + u32 fw_size; + u32 fw_sent; +}; + +static int ath3k_load_firmware(struct ath3k_data *data, + unsigned char *firmware, + int count) +{ + u8 *send_buf; + int err, pipe, len, size, sent = 0; + + BT_DBG("ath3k %p udev %p", data, data->udev); + + pipe = usb_sndctrlpipe(data->udev, 0); + + if ((usb_control_msg(data->udev, pipe, + USB_REQ_DFU_DNLOAD, + USB_TYPE_VENDOR, 0, 0, + firmware, 20, USB_CTRL_SET_TIMEOUT)) < 0) { + BT_ERR("Can't change to loading configuration err"); + return -EBUSY; + } + sent += 20; + count -= 20; + + send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC); + if (!send_buf) { + BT_ERR("Can't allocate memory chunk for firmware"); + return -ENOMEM; + } + + while (count) { + size = min_t(uint, count, BULK_SIZE); + pipe = usb_sndbulkpipe(data->udev, 0x02); + memcpy(send_buf, firmware + sent, size); + + err = usb_bulk_msg(data->udev, pipe, send_buf, size, + &len, 3000); + + if (err || (len != size)) { + BT_ERR("Error in firmware loading err = %d," + "len = %d, size = %d", err, len, size); + goto error; + } + + sent += size; + count -= size; + } + + kfree(send_buf); + return 0; + +error: + kfree(send_buf); + return err; +} + +static int ath3k_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + const struct firmware *firmware; + struct usb_device *udev = interface_to_usbdev(intf); + struct ath3k_data *data; + int size; + + BT_DBG("intf %p id %p", intf, id); + + if (intf->cur_altsetting->desc.bInterfaceNumber != 0) + return -ENODEV; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->udev = udev; + + if (request_firmware(&firmware, "ath3k-1.fw", &udev->dev) < 0) { + kfree(data); + return -EIO; + } + + size = max_t(uint, firmware->size, 4096); + data->fw_data = kmalloc(size, GFP_KERNEL); + if (!data->fw_data) { + release_firmware(firmware); + kfree(data); + return -ENOMEM; + } + + memcpy(data->fw_data, firmware->data, firmware->size); + data->fw_size = firmware->size; + data->fw_sent = 0; + release_firmware(firmware); + + usb_set_intfdata(intf, data); + if (ath3k_load_firmware(data, data->fw_data, data->fw_size)) { + usb_set_intfdata(intf, NULL); + kfree(data->fw_data); + kfree(data); + return -EIO; + } + + return 0; +} + +static void ath3k_disconnect(struct usb_interface *intf) +{ + struct ath3k_data *data = usb_get_intfdata(intf); + + BT_DBG("ath3k_disconnect intf %p", intf); + + kfree(data->fw_data); + kfree(data); +} + +static struct usb_driver ath3k_driver = { + .name = "ath3k", + .probe = ath3k_probe, + .disconnect = ath3k_disconnect, + .id_table = ath3k_table, +}; + +static int __init ath3k_init(void) +{ + BT_INFO("Atheros AR30xx firmware driver ver %s", VERSION); + return usb_register(&ath3k_driver); +} + +static void __exit ath3k_exit(void) +{ + usb_deregister(&ath3k_driver); +} + +module_init(ath3k_init); +module_exit(ath3k_exit); + +MODULE_AUTHOR("Atheros Communications"); +MODULE_DESCRIPTION("Atheros AR30xx firmware driver"); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE("ath3k-1.fw"); diff --git a/drivers/bluetooth/bcm203x.c b/drivers/bluetooth/bcm203x.c new file mode 100644 index 0000000..b0c84c1 --- /dev/null +++ b/drivers/bluetooth/bcm203x.c @@ -0,0 +1,301 @@ +/* + * + * Broadcom Blutonium firmware driver + * + * Copyright (C) 2003 Maxim Krasnyansky + * Copyright (C) 2003 Marcel Holtmann + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include + +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include + +#define VERSION "1.2" + +static const struct usb_device_id bcm203x_table[] = { + /* Broadcom Blutonium (BCM2033) */ + { USB_DEVICE(0x0a5c, 0x2033) }, + + { } /* Terminating entry */ +}; + +MODULE_DEVICE_TABLE(usb, bcm203x_table); + +#define BCM203X_ERROR 0 +#define BCM203X_RESET 1 +#define BCM203X_LOAD_MINIDRV 2 +#define BCM203X_SELECT_MEMORY 3 +#define BCM203X_CHECK_MEMORY 4 +#define BCM203X_LOAD_FIRMWARE 5 +#define BCM203X_CHECK_FIRMWARE 6 + +#define BCM203X_IN_EP 0x81 +#define BCM203X_OUT_EP 0x02 + +struct bcm203x_data { + struct usb_device *udev; + + unsigned long state; + + struct work_struct work; + + struct urb *urb; + unsigned char *buffer; + + unsigned char *fw_data; + unsigned int fw_size; + unsigned int fw_sent; +}; + +static void bcm203x_complete(struct urb *urb) +{ + struct bcm203x_data *data = urb->context; + struct usb_device *udev = urb->dev; + int len; + + BT_DBG("udev %p urb %p", udev, urb); + + if (urb->status) { + BT_ERR("URB failed with status %d", urb->status); + data->state = BCM203X_ERROR; + return; + } + + switch (data->state) { + case BCM203X_LOAD_MINIDRV: + memcpy(data->buffer, "#", 1); + + usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, BCM203X_OUT_EP), + data->buffer, 1, bcm203x_complete, data); + + data->state = BCM203X_SELECT_MEMORY; + + schedule_work(&data->work); + break; + + case BCM203X_SELECT_MEMORY: + usb_fill_int_urb(urb, udev, usb_rcvintpipe(udev, BCM203X_IN_EP), + data->buffer, 32, bcm203x_complete, data, 1); + + data->state = BCM203X_CHECK_MEMORY; + + if (usb_submit_urb(data->urb, GFP_ATOMIC) < 0) + BT_ERR("Can't submit URB"); + break; + + case BCM203X_CHECK_MEMORY: + if (data->buffer[0] != '#') { + BT_ERR("Memory select failed"); + data->state = BCM203X_ERROR; + break; + } + + data->state = BCM203X_LOAD_FIRMWARE; + + case BCM203X_LOAD_FIRMWARE: + if (data->fw_sent == data->fw_size) { + usb_fill_int_urb(urb, udev, usb_rcvintpipe(udev, BCM203X_IN_EP), + data->buffer, 32, bcm203x_complete, data, 1); + + data->state = BCM203X_CHECK_FIRMWARE; + } else { + len = min_t(uint, data->fw_size - data->fw_sent, 4096); + + usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, BCM203X_OUT_EP), + data->fw_data + data->fw_sent, len, bcm203x_complete, data); + + data->fw_sent += len; + } + + if (usb_submit_urb(data->urb, GFP_ATOMIC) < 0) + BT_ERR("Can't submit URB"); + break; + + case BCM203X_CHECK_FIRMWARE: + if (data->buffer[0] != '.') { + BT_ERR("Firmware loading failed"); + data->state = BCM203X_ERROR; + break; + } + + data->state = BCM203X_RESET; + break; + } +} + +static void bcm203x_work(struct work_struct *work) +{ + struct bcm203x_data *data = + container_of(work, struct bcm203x_data, work); + + if (usb_submit_urb(data->urb, GFP_ATOMIC) < 0) + BT_ERR("Can't submit URB"); +} + +static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id *id) +{ + const struct firmware *firmware; + struct usb_device *udev = interface_to_usbdev(intf); + struct bcm203x_data *data; + int size; + + BT_DBG("intf %p id %p", intf, id); + + if (intf->cur_altsetting->desc.bInterfaceNumber != 0) + return -ENODEV; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) { + BT_ERR("Can't allocate memory for data structure"); + return -ENOMEM; + } + + data->udev = udev; + data->state = BCM203X_LOAD_MINIDRV; + + data->urb = usb_alloc_urb(0, GFP_KERNEL); + if (!data->urb) { + BT_ERR("Can't allocate URB"); + kfree(data); + return -ENOMEM; + } + + if (request_firmware(&firmware, "BCM2033-MD.hex", &udev->dev) < 0) { + BT_ERR("Mini driver request failed"); + usb_free_urb(data->urb); + kfree(data); + return -EIO; + } + + BT_DBG("minidrv data %p size %zu", firmware->data, firmware->size); + + size = max_t(uint, firmware->size, 4096); + + data->buffer = kmalloc(size, GFP_KERNEL); + if (!data->buffer) { + BT_ERR("Can't allocate memory for mini driver"); + release_firmware(firmware); + usb_free_urb(data->urb); + kfree(data); + return -ENOMEM; + } + + memcpy(data->buffer, firmware->data, firmware->size); + + usb_fill_bulk_urb(data->urb, udev, usb_sndbulkpipe(udev, BCM203X_OUT_EP), + data->buffer, firmware->size, bcm203x_complete, data); + + release_firmware(firmware); + + if (request_firmware(&firmware, "BCM2033-FW.bin", &udev->dev) < 0) { + BT_ERR("Firmware request failed"); + usb_free_urb(data->urb); + kfree(data->buffer); + kfree(data); + return -EIO; + } + + BT_DBG("firmware data %p size %zu", firmware->data, firmware->size); + + data->fw_data = kmalloc(firmware->size, GFP_KERNEL); + if (!data->fw_data) { + BT_ERR("Can't allocate memory for firmware image"); + release_firmware(firmware); + usb_free_urb(data->urb); + kfree(data->buffer); + kfree(data); + return -ENOMEM; + } + + memcpy(data->fw_data, firmware->data, firmware->size); + data->fw_size = firmware->size; + data->fw_sent = 0; + + release_firmware(firmware); + + INIT_WORK(&data->work, bcm203x_work); + + usb_set_intfdata(intf, data); + + schedule_work(&data->work); + + return 0; +} + +static void bcm203x_disconnect(struct usb_interface *intf) +{ + struct bcm203x_data *data = usb_get_intfdata(intf); + + BT_DBG("intf %p", intf); + + usb_kill_urb(data->urb); + + usb_set_intfdata(intf, NULL); + + usb_free_urb(data->urb); + kfree(data->fw_data); + kfree(data->buffer); + kfree(data); +} + +static struct usb_driver bcm203x_driver = { + .name = "bcm203x", + .probe = bcm203x_probe, + .disconnect = bcm203x_disconnect, + .id_table = bcm203x_table, +}; + +static int __init bcm203x_init(void) +{ + int err; + + BT_INFO("Broadcom Blutonium firmware driver ver %s", VERSION); + + err = usb_register(&bcm203x_driver); + if (err < 0) + BT_ERR("Failed to register USB driver"); + + return err; +} + +static void __exit bcm203x_exit(void) +{ + usb_deregister(&bcm203x_driver); +} + +module_init(bcm203x_init); +module_exit(bcm203x_exit); + +MODULE_AUTHOR("Marcel Holtmann "); +MODULE_DESCRIPTION("Broadcom Blutonium firmware driver ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE("BCM2033-MD.hex"); +MODULE_FIRMWARE("BCM2033-FW.bin"); diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c new file mode 100644 index 0000000..005919a --- /dev/null +++ b/drivers/bluetooth/bfusb.c @@ -0,0 +1,791 @@ +/* + * + * AVM BlueFRITZ! USB driver + * + * Copyright (C) 2003-2006 Marcel Holtmann + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include +#include + +#define VERSION "1.2" + +static struct usb_driver bfusb_driver; + +static struct usb_device_id bfusb_table[] = { + /* AVM BlueFRITZ! USB */ + { USB_DEVICE(0x057c, 0x2200) }, + + { } /* Terminating entry */ +}; + +MODULE_DEVICE_TABLE(usb, bfusb_table); + +#define BFUSB_MAX_BLOCK_SIZE 256 + +#define BFUSB_BLOCK_TIMEOUT 3000 + +#define BFUSB_TX_PROCESS 1 +#define BFUSB_TX_WAKEUP 2 + +#define BFUSB_MAX_BULK_TX 2 +#define BFUSB_MAX_BULK_RX 2 + +struct bfusb_data { + struct hci_dev *hdev; + + unsigned long state; + + struct usb_device *udev; + + unsigned int bulk_in_ep; + unsigned int bulk_out_ep; + unsigned int bulk_pkt_size; + + rwlock_t lock; + + struct sk_buff_head transmit_q; + + struct sk_buff *reassembly; + + atomic_t pending_tx; + struct sk_buff_head pending_q; + struct sk_buff_head completed_q; +}; + +struct bfusb_data_scb { + struct urb *urb; +}; + +static void bfusb_tx_complete(struct urb *urb); +static void bfusb_rx_complete(struct urb *urb); + +static struct urb *bfusb_get_completed(struct bfusb_data *data) +{ + struct sk_buff *skb; + struct urb *urb = NULL; + + BT_DBG("bfusb %p", data); + + skb = skb_dequeue(&data->completed_q); + if (skb) { + urb = ((struct bfusb_data_scb *) skb->cb)->urb; + kfree_skb(skb); + } + + return urb; +} + +static void bfusb_unlink_urbs(struct bfusb_data *data) +{ + struct sk_buff *skb; + struct urb *urb; + + BT_DBG("bfusb %p", data); + + while ((skb = skb_dequeue(&data->pending_q))) { + urb = ((struct bfusb_data_scb *) skb->cb)->urb; + usb_kill_urb(urb); + skb_queue_tail(&data->completed_q, skb); + } + + while ((urb = bfusb_get_completed(data))) + usb_free_urb(urb); +} + +static int bfusb_send_bulk(struct bfusb_data *data, struct sk_buff *skb) +{ + struct bfusb_data_scb *scb = (void *) skb->cb; + struct urb *urb = bfusb_get_completed(data); + int err, pipe; + + BT_DBG("bfusb %p skb %p len %d", data, skb, skb->len); + + if (!urb && !(urb = usb_alloc_urb(0, GFP_ATOMIC))) + return -ENOMEM; + + pipe = usb_sndbulkpipe(data->udev, data->bulk_out_ep); + + usb_fill_bulk_urb(urb, data->udev, pipe, skb->data, skb->len, + bfusb_tx_complete, skb); + + scb->urb = urb; + + skb_queue_tail(&data->pending_q, skb); + + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err) { + BT_ERR("%s bulk tx submit failed urb %p err %d", + data->hdev->name, urb, err); + skb_unlink(skb, &data->pending_q); + usb_free_urb(urb); + } else + atomic_inc(&data->pending_tx); + + return err; +} + +static void bfusb_tx_wakeup(struct bfusb_data *data) +{ + struct sk_buff *skb; + + BT_DBG("bfusb %p", data); + + if (test_and_set_bit(BFUSB_TX_PROCESS, &data->state)) { + set_bit(BFUSB_TX_WAKEUP, &data->state); + return; + } + + do { + clear_bit(BFUSB_TX_WAKEUP, &data->state); + + while ((atomic_read(&data->pending_tx) < BFUSB_MAX_BULK_TX) && + (skb = skb_dequeue(&data->transmit_q))) { + if (bfusb_send_bulk(data, skb) < 0) { + skb_queue_head(&data->transmit_q, skb); + break; + } + } + + } while (test_bit(BFUSB_TX_WAKEUP, &data->state)); + + clear_bit(BFUSB_TX_PROCESS, &data->state); +} + +static void bfusb_tx_complete(struct urb *urb) +{ + struct sk_buff *skb = (struct sk_buff *) urb->context; + struct bfusb_data *data = (struct bfusb_data *) skb->dev; + + BT_DBG("bfusb %p urb %p skb %p len %d", data, urb, skb, skb->len); + + atomic_dec(&data->pending_tx); + + if (!test_bit(HCI_RUNNING, &data->hdev->flags)) + return; + + if (!urb->status) + data->hdev->stat.byte_tx += skb->len; + else + data->hdev->stat.err_tx++; + + read_lock(&data->lock); + + skb_unlink(skb, &data->pending_q); + skb_queue_tail(&data->completed_q, skb); + + bfusb_tx_wakeup(data); + + read_unlock(&data->lock); +} + + +static int bfusb_rx_submit(struct bfusb_data *data, struct urb *urb) +{ + struct bfusb_data_scb *scb; + struct sk_buff *skb; + int err, pipe, size = HCI_MAX_FRAME_SIZE + 32; + + BT_DBG("bfusb %p urb %p", data, urb); + + if (!urb && !(urb = usb_alloc_urb(0, GFP_ATOMIC))) + return -ENOMEM; + + skb = bt_skb_alloc(size, GFP_ATOMIC); + if (!skb) { + usb_free_urb(urb); + return -ENOMEM; + } + + skb->dev = (void *) data; + + scb = (struct bfusb_data_scb *) skb->cb; + scb->urb = urb; + + pipe = usb_rcvbulkpipe(data->udev, data->bulk_in_ep); + + usb_fill_bulk_urb(urb, data->udev, pipe, skb->data, size, + bfusb_rx_complete, skb); + + skb_queue_tail(&data->pending_q, skb); + + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err) { + BT_ERR("%s bulk rx submit failed urb %p err %d", + data->hdev->name, urb, err); + skb_unlink(skb, &data->pending_q); + kfree_skb(skb); + usb_free_urb(urb); + } + + return err; +} + +static inline int bfusb_recv_block(struct bfusb_data *data, int hdr, unsigned char *buf, int len) +{ + BT_DBG("bfusb %p hdr 0x%02x data %p len %d", data, hdr, buf, len); + + if (hdr & 0x10) { + BT_ERR("%s error in block", data->hdev->name); + kfree_skb(data->reassembly); + data->reassembly = NULL; + return -EIO; + } + + if (hdr & 0x04) { + struct sk_buff *skb; + unsigned char pkt_type; + int pkt_len = 0; + + if (data->reassembly) { + BT_ERR("%s unexpected start block", data->hdev->name); + kfree_skb(data->reassembly); + data->reassembly = NULL; + } + + if (len < 1) { + BT_ERR("%s no packet type found", data->hdev->name); + return -EPROTO; + } + + pkt_type = *buf++; len--; + + switch (pkt_type) { + case HCI_EVENT_PKT: + if (len >= HCI_EVENT_HDR_SIZE) { + struct hci_event_hdr *hdr = (struct hci_event_hdr *) buf; + pkt_len = HCI_EVENT_HDR_SIZE + hdr->plen; + } else { + BT_ERR("%s event block is too short", data->hdev->name); + return -EILSEQ; + } + break; + + case HCI_ACLDATA_PKT: + if (len >= HCI_ACL_HDR_SIZE) { + struct hci_acl_hdr *hdr = (struct hci_acl_hdr *) buf; + pkt_len = HCI_ACL_HDR_SIZE + __le16_to_cpu(hdr->dlen); + } else { + BT_ERR("%s data block is too short", data->hdev->name); + return -EILSEQ; + } + break; + + case HCI_SCODATA_PKT: + if (len >= HCI_SCO_HDR_SIZE) { + struct hci_sco_hdr *hdr = (struct hci_sco_hdr *) buf; + pkt_len = HCI_SCO_HDR_SIZE + hdr->dlen; + } else { + BT_ERR("%s audio block is too short", data->hdev->name); + return -EILSEQ; + } + break; + } + + skb = bt_skb_alloc(pkt_len, GFP_ATOMIC); + if (!skb) { + BT_ERR("%s no memory for the packet", data->hdev->name); + return -ENOMEM; + } + + skb->dev = (void *) data->hdev; + bt_cb(skb)->pkt_type = pkt_type; + + data->reassembly = skb; + } else { + if (!data->reassembly) { + BT_ERR("%s unexpected continuation block", data->hdev->name); + return -EIO; + } + } + + if (len > 0) + memcpy(skb_put(data->reassembly, len), buf, len); + + if (hdr & 0x08) { + hci_recv_frame(data->reassembly); + data->reassembly = NULL; + } + + return 0; +} + +static void bfusb_rx_complete(struct urb *urb) +{ + struct sk_buff *skb = (struct sk_buff *) urb->context; + struct bfusb_data *data = (struct bfusb_data *) skb->dev; + unsigned char *buf = urb->transfer_buffer; + int count = urb->actual_length; + int err, hdr, len; + + BT_DBG("bfusb %p urb %p skb %p len %d", data, urb, skb, skb->len); + + read_lock(&data->lock); + + if (!test_bit(HCI_RUNNING, &data->hdev->flags)) + goto unlock; + + if (urb->status || !count) + goto resubmit; + + data->hdev->stat.byte_rx += count; + + skb_put(skb, count); + + while (count) { + hdr = buf[0] | (buf[1] << 8); + + if (hdr & 0x4000) { + len = 0; + count -= 2; + buf += 2; + } else { + len = (buf[2] == 0) ? 256 : buf[2]; + count -= 3; + buf += 3; + } + + if (count < len) { + BT_ERR("%s block extends over URB buffer ranges", + data->hdev->name); + } + + if ((hdr & 0xe1) == 0xc1) + bfusb_recv_block(data, hdr, buf, len); + + count -= len; + buf += len; + } + + skb_unlink(skb, &data->pending_q); + kfree_skb(skb); + + bfusb_rx_submit(data, urb); + + read_unlock(&data->lock); + + return; + +resubmit: + urb->dev = data->udev; + + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err) { + BT_ERR("%s bulk resubmit failed urb %p err %d", + data->hdev->name, urb, err); + } + +unlock: + read_unlock(&data->lock); +} + +static int bfusb_open(struct hci_dev *hdev) +{ + struct bfusb_data *data = hdev->driver_data; + unsigned long flags; + int i, err; + + BT_DBG("hdev %p bfusb %p", hdev, data); + + if (test_and_set_bit(HCI_RUNNING, &hdev->flags)) + return 0; + + write_lock_irqsave(&data->lock, flags); + + err = bfusb_rx_submit(data, NULL); + if (!err) { + for (i = 1; i < BFUSB_MAX_BULK_RX; i++) + bfusb_rx_submit(data, NULL); + } else { + clear_bit(HCI_RUNNING, &hdev->flags); + } + + write_unlock_irqrestore(&data->lock, flags); + + return err; +} + +static int bfusb_flush(struct hci_dev *hdev) +{ + struct bfusb_data *data = hdev->driver_data; + + BT_DBG("hdev %p bfusb %p", hdev, data); + + skb_queue_purge(&data->transmit_q); + + return 0; +} + +static int bfusb_close(struct hci_dev *hdev) +{ + struct bfusb_data *data = hdev->driver_data; + unsigned long flags; + + BT_DBG("hdev %p bfusb %p", hdev, data); + + if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) + return 0; + + write_lock_irqsave(&data->lock, flags); + write_unlock_irqrestore(&data->lock, flags); + + bfusb_unlink_urbs(data); + bfusb_flush(hdev); + + return 0; +} + +static int bfusb_send_frame(struct sk_buff *skb) +{ + struct hci_dev *hdev = (struct hci_dev *) skb->dev; + struct bfusb_data *data; + struct sk_buff *nskb; + unsigned char buf[3]; + int sent = 0, size, count; + + BT_DBG("hdev %p skb %p type %d len %d", hdev, skb, bt_cb(skb)->pkt_type, skb->len); + + if (!hdev) { + BT_ERR("Frame for unknown HCI device (hdev=NULL)"); + return -ENODEV; + } + + if (!test_bit(HCI_RUNNING, &hdev->flags)) + return -EBUSY; + + data = hdev->driver_data; + + switch (bt_cb(skb)->pkt_type) { + case HCI_COMMAND_PKT: + hdev->stat.cmd_tx++; + break; + case HCI_ACLDATA_PKT: + hdev->stat.acl_tx++; + break; + case HCI_SCODATA_PKT: + hdev->stat.sco_tx++; + break; + }; + + /* Prepend skb with frame type */ + memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); + + count = skb->len; + + /* Max HCI frame size seems to be 1511 + 1 */ + nskb = bt_skb_alloc(count + 32, GFP_ATOMIC); + if (!nskb) { + BT_ERR("Can't allocate memory for new packet"); + return -ENOMEM; + } + + nskb->dev = (void *) data; + + while (count) { + size = min_t(uint, count, BFUSB_MAX_BLOCK_SIZE); + + buf[0] = 0xc1 | ((sent == 0) ? 0x04 : 0) | ((count == size) ? 0x08 : 0); + buf[1] = 0x00; + buf[2] = (size == BFUSB_MAX_BLOCK_SIZE) ? 0 : size; + + memcpy(skb_put(nskb, 3), buf, 3); + skb_copy_from_linear_data_offset(skb, sent, skb_put(nskb, size), size); + + sent += size; + count -= size; + } + + /* Don't send frame with multiple size of bulk max packet */ + if ((nskb->len % data->bulk_pkt_size) == 0) { + buf[0] = 0xdd; + buf[1] = 0x00; + memcpy(skb_put(nskb, 2), buf, 2); + } + + read_lock(&data->lock); + + skb_queue_tail(&data->transmit_q, nskb); + bfusb_tx_wakeup(data); + + read_unlock(&data->lock); + + kfree_skb(skb); + + return 0; +} + +static void bfusb_destruct(struct hci_dev *hdev) +{ + struct bfusb_data *data = hdev->driver_data; + + BT_DBG("hdev %p bfusb %p", hdev, data); + + kfree(data); +} + +static int bfusb_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg) +{ + return -ENOIOCTLCMD; +} + +static int bfusb_load_firmware(struct bfusb_data *data, + const unsigned char *firmware, int count) +{ + unsigned char *buf; + int err, pipe, len, size, sent = 0; + + BT_DBG("bfusb %p udev %p", data, data->udev); + + BT_INFO("BlueFRITZ! USB loading firmware"); + + pipe = usb_sndctrlpipe(data->udev, 0); + + if (usb_control_msg(data->udev, pipe, USB_REQ_SET_CONFIGURATION, + 0, 1, 0, NULL, 0, USB_CTRL_SET_TIMEOUT) < 0) { + BT_ERR("Can't change to loading configuration"); + return -EBUSY; + } + + data->udev->toggle[0] = data->udev->toggle[1] = 0; + + buf = kmalloc(BFUSB_MAX_BLOCK_SIZE + 3, GFP_ATOMIC); + if (!buf) { + BT_ERR("Can't allocate memory chunk for firmware"); + return -ENOMEM; + } + + pipe = usb_sndbulkpipe(data->udev, data->bulk_out_ep); + + while (count) { + size = min_t(uint, count, BFUSB_MAX_BLOCK_SIZE + 3); + + memcpy(buf, firmware + sent, size); + + err = usb_bulk_msg(data->udev, pipe, buf, size, + &len, BFUSB_BLOCK_TIMEOUT); + + if (err || (len != size)) { + BT_ERR("Error in firmware loading"); + goto error; + } + + sent += size; + count -= size; + } + + err = usb_bulk_msg(data->udev, pipe, NULL, 0, + &len, BFUSB_BLOCK_TIMEOUT); + if (err < 0) { + BT_ERR("Error in null packet request"); + goto error; + } + + pipe = usb_sndctrlpipe(data->udev, 0); + + err = usb_control_msg(data->udev, pipe, USB_REQ_SET_CONFIGURATION, + 0, 2, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); + if (err < 0) { + BT_ERR("Can't change to running configuration"); + goto error; + } + + data->udev->toggle[0] = data->udev->toggle[1] = 0; + + BT_INFO("BlueFRITZ! USB device ready"); + + kfree(buf); + return 0; + +error: + kfree(buf); + + pipe = usb_sndctrlpipe(data->udev, 0); + + usb_control_msg(data->udev, pipe, USB_REQ_SET_CONFIGURATION, + 0, 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); + + return err; +} + +static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *id) +{ + const struct firmware *firmware; + struct usb_device *udev = interface_to_usbdev(intf); + struct usb_host_endpoint *bulk_out_ep; + struct usb_host_endpoint *bulk_in_ep; + struct hci_dev *hdev; + struct bfusb_data *data; + + BT_DBG("intf %p id %p", intf, id); + + /* Check number of endpoints */ + if (intf->cur_altsetting->desc.bNumEndpoints < 2) + return -EIO; + + bulk_out_ep = &intf->cur_altsetting->endpoint[0]; + bulk_in_ep = &intf->cur_altsetting->endpoint[1]; + + if (!bulk_out_ep || !bulk_in_ep) { + BT_ERR("Bulk endpoints not found"); + goto done; + } + + /* Initialize control structure and load firmware */ + data = kzalloc(sizeof(struct bfusb_data), GFP_KERNEL); + if (!data) { + BT_ERR("Can't allocate memory for control structure"); + goto done; + } + + data->udev = udev; + data->bulk_in_ep = bulk_in_ep->desc.bEndpointAddress; + data->bulk_out_ep = bulk_out_ep->desc.bEndpointAddress; + data->bulk_pkt_size = le16_to_cpu(bulk_out_ep->desc.wMaxPacketSize); + + rwlock_init(&data->lock); + + data->reassembly = NULL; + + skb_queue_head_init(&data->transmit_q); + skb_queue_head_init(&data->pending_q); + skb_queue_head_init(&data->completed_q); + + if (request_firmware(&firmware, "bfubase.frm", &udev->dev) < 0) { + BT_ERR("Firmware request failed"); + goto error; + } + + BT_DBG("firmware data %p size %zu", firmware->data, firmware->size); + + if (bfusb_load_firmware(data, firmware->data, firmware->size) < 0) { + BT_ERR("Firmware loading failed"); + goto release; + } + + release_firmware(firmware); + + /* Initialize and register HCI device */ + hdev = hci_alloc_dev(); + if (!hdev) { + BT_ERR("Can't allocate HCI device"); + goto error; + } + + data->hdev = hdev; + + hdev->bus = HCI_USB; + hdev->driver_data = data; + SET_HCIDEV_DEV(hdev, &intf->dev); + + hdev->open = bfusb_open; + hdev->close = bfusb_close; + hdev->flush = bfusb_flush; + hdev->send = bfusb_send_frame; + hdev->destruct = bfusb_destruct; + hdev->ioctl = bfusb_ioctl; + + hdev->owner = THIS_MODULE; + + if (hci_register_dev(hdev) < 0) { + BT_ERR("Can't register HCI device"); + hci_free_dev(hdev); + goto error; + } + + usb_set_intfdata(intf, data); + + return 0; + +release: + release_firmware(firmware); + +error: + kfree(data); + +done: + return -EIO; +} + +static void bfusb_disconnect(struct usb_interface *intf) +{ + struct bfusb_data *data = usb_get_intfdata(intf); + struct hci_dev *hdev = data->hdev; + + BT_DBG("intf %p", intf); + + if (!hdev) + return; + + usb_set_intfdata(intf, NULL); + + bfusb_close(hdev); + + if (hci_unregister_dev(hdev) < 0) + BT_ERR("Can't unregister HCI device %s", hdev->name); + + hci_free_dev(hdev); +} + +static struct usb_driver bfusb_driver = { + .name = "bfusb", + .probe = bfusb_probe, + .disconnect = bfusb_disconnect, + .id_table = bfusb_table, +}; + +static int __init bfusb_init(void) +{ + int err; + + BT_INFO("BlueFRITZ! USB driver ver %s", VERSION); + + err = usb_register(&bfusb_driver); + if (err < 0) + BT_ERR("Failed to register BlueFRITZ! USB driver"); + + return err; +} + +static void __exit bfusb_exit(void) +{ + usb_deregister(&bfusb_driver); +} + +module_init(bfusb_init); +module_exit(bfusb_exit); + +MODULE_AUTHOR("Marcel Holtmann "); +MODULE_DESCRIPTION("BlueFRITZ! USB driver ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE("bfubase.frm"); diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c new file mode 100644 index 0000000..d9bf87c --- /dev/null +++ b/drivers/bluetooth/bluecard_cs.c @@ -0,0 +1,974 @@ +/* + * + * Bluetooth driver for the Anycom BlueCard (LSE039/LSE041) + * + * Copyright (C) 2001-2002 Marcel Holtmann + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation; + * + * Software distributed under the License is distributed on an "AS + * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or + * implied. See the License for the specific language governing + * rights and limitations under the License. + * + * The initial developer of the original code is David A. Hinds + * . Portions created by David A. Hinds + * are Copyright (C) 1999 David A. Hinds. All Rights Reserved. + * + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + + + +/* ======================== Module parameters ======================== */ + + +MODULE_AUTHOR("Marcel Holtmann "); +MODULE_DESCRIPTION("Bluetooth driver for the Anycom BlueCard (LSE039/LSE041)"); +MODULE_LICENSE("GPL"); + + + +/* ======================== Local structures ======================== */ + + +typedef struct bluecard_info_t { + struct pcmcia_device *p_dev; + dev_node_t node; + + struct hci_dev *hdev; + + spinlock_t lock; /* For serializing operations */ + struct timer_list timer; /* For LED control */ + + struct sk_buff_head txq; + unsigned long tx_state; + + unsigned long rx_state; + unsigned long rx_count; + struct sk_buff *rx_skb; + + unsigned char ctrl_reg; + unsigned long hw_state; /* Status of the hardware and LED control */ +} bluecard_info_t; + + +static int bluecard_config(struct pcmcia_device *link); +static void bluecard_release(struct pcmcia_device *link); + +static void bluecard_detach(struct pcmcia_device *p_dev); + + +/* Default baud rate: 57600, 115200, 230400 or 460800 */ +#define DEFAULT_BAUD_RATE 230400 + + +/* Hardware states */ +#define CARD_READY 1 +#define CARD_HAS_PCCARD_ID 4 +#define CARD_HAS_POWER_LED 5 +#define CARD_HAS_ACTIVITY_LED 6 + +/* Transmit states */ +#define XMIT_SENDING 1 +#define XMIT_WAKEUP 2 +#define XMIT_BUFFER_NUMBER 5 /* unset = buffer one, set = buffer two */ +#define XMIT_BUF_ONE_READY 6 +#define XMIT_BUF_TWO_READY 7 +#define XMIT_SENDING_READY 8 + +/* Receiver states */ +#define RECV_WAIT_PACKET_TYPE 0 +#define RECV_WAIT_EVENT_HEADER 1 +#define RECV_WAIT_ACL_HEADER 2 +#define RECV_WAIT_SCO_HEADER 3 +#define RECV_WAIT_DATA 4 + +/* Special packet types */ +#define PKT_BAUD_RATE_57600 0x80 +#define PKT_BAUD_RATE_115200 0x81 +#define PKT_BAUD_RATE_230400 0x82 +#define PKT_BAUD_RATE_460800 0x83 + + +/* These are the register offsets */ +#define REG_COMMAND 0x20 +#define REG_INTERRUPT 0x21 +#define REG_CONTROL 0x22 +#define REG_RX_CONTROL 0x24 +#define REG_CARD_RESET 0x30 +#define REG_LED_CTRL 0x30 + +/* REG_COMMAND */ +#define REG_COMMAND_TX_BUF_ONE 0x01 +#define REG_COMMAND_TX_BUF_TWO 0x02 +#define REG_COMMAND_RX_BUF_ONE 0x04 +#define REG_COMMAND_RX_BUF_TWO 0x08 +#define REG_COMMAND_RX_WIN_ONE 0x00 +#define REG_COMMAND_RX_WIN_TWO 0x10 + +/* REG_CONTROL */ +#define REG_CONTROL_BAUD_RATE_57600 0x00 +#define REG_CONTROL_BAUD_RATE_115200 0x01 +#define REG_CONTROL_BAUD_RATE_230400 0x02 +#define REG_CONTROL_BAUD_RATE_460800 0x03 +#define REG_CONTROL_RTS 0x04 +#define REG_CONTROL_BT_ON 0x08 +#define REG_CONTROL_BT_RESET 0x10 +#define REG_CONTROL_BT_RES_PU 0x20 +#define REG_CONTROL_INTERRUPT 0x40 +#define REG_CONTROL_CARD_RESET 0x80 + +/* REG_RX_CONTROL */ +#define RTS_LEVEL_SHIFT_BITS 0x02 + + + +/* ======================== LED handling routines ======================== */ + + +static void bluecard_activity_led_timeout(u_long arg) +{ + bluecard_info_t *info = (bluecard_info_t *)arg; + unsigned int iobase = info->p_dev->io.BasePort1; + + if (!test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) + return; + + if (test_bit(CARD_HAS_ACTIVITY_LED, &(info->hw_state))) { + /* Disable activity LED */ + outb(0x08 | 0x20, iobase + 0x30); + } else { + /* Disable power LED */ + outb(0x00, iobase + 0x30); + } +} + + +static void bluecard_enable_activity_led(bluecard_info_t *info) +{ + unsigned int iobase = info->p_dev->io.BasePort1; + + if (!test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) + return; + + if (test_bit(CARD_HAS_ACTIVITY_LED, &(info->hw_state))) { + /* Enable activity LED */ + outb(0x10 | 0x40, iobase + 0x30); + + /* Stop the LED after HZ/4 */ + mod_timer(&(info->timer), jiffies + HZ / 4); + } else { + /* Enable power LED */ + outb(0x08 | 0x20, iobase + 0x30); + + /* Stop the LED after HZ/2 */ + mod_timer(&(info->timer), jiffies + HZ / 2); + } +} + + + +/* ======================== Interrupt handling ======================== */ + + +static int bluecard_write(unsigned int iobase, unsigned int offset, __u8 *buf, int len) +{ + int i, actual; + + actual = (len > 15) ? 15 : len; + + outb_p(actual, iobase + offset); + + for (i = 0; i < actual; i++) + outb_p(buf[i], iobase + offset + i + 1); + + return actual; +} + + +static void bluecard_write_wakeup(bluecard_info_t *info) +{ + if (!info) { + BT_ERR("Unknown device"); + return; + } + + if (!test_bit(XMIT_SENDING_READY, &(info->tx_state))) + return; + + if (test_and_set_bit(XMIT_SENDING, &(info->tx_state))) { + set_bit(XMIT_WAKEUP, &(info->tx_state)); + return; + } + + do { + register unsigned int iobase = info->p_dev->io.BasePort1; + register unsigned int offset; + register unsigned char command; + register unsigned long ready_bit; + register struct sk_buff *skb; + register int len; + + clear_bit(XMIT_WAKEUP, &(info->tx_state)); + + if (!pcmcia_dev_present(info->p_dev)) + return; + + if (test_bit(XMIT_BUFFER_NUMBER, &(info->tx_state))) { + if (!test_bit(XMIT_BUF_TWO_READY, &(info->tx_state))) + break; + offset = 0x10; + command = REG_COMMAND_TX_BUF_TWO; + ready_bit = XMIT_BUF_TWO_READY; + } else { + if (!test_bit(XMIT_BUF_ONE_READY, &(info->tx_state))) + break; + offset = 0x00; + command = REG_COMMAND_TX_BUF_ONE; + ready_bit = XMIT_BUF_ONE_READY; + } + + if (!(skb = skb_dequeue(&(info->txq)))) + break; + + if (bt_cb(skb)->pkt_type & 0x80) { + /* Disable RTS */ + info->ctrl_reg |= REG_CONTROL_RTS; + outb(info->ctrl_reg, iobase + REG_CONTROL); + } + + /* Activate LED */ + bluecard_enable_activity_led(info); + + /* Send frame */ + len = bluecard_write(iobase, offset, skb->data, skb->len); + + /* Tell the FPGA to send the data */ + outb_p(command, iobase + REG_COMMAND); + + /* Mark the buffer as dirty */ + clear_bit(ready_bit, &(info->tx_state)); + + if (bt_cb(skb)->pkt_type & 0x80) { + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); + DEFINE_WAIT(wait); + + unsigned char baud_reg; + + switch (bt_cb(skb)->pkt_type) { + case PKT_BAUD_RATE_460800: + baud_reg = REG_CONTROL_BAUD_RATE_460800; + break; + case PKT_BAUD_RATE_230400: + baud_reg = REG_CONTROL_BAUD_RATE_230400; + break; + case PKT_BAUD_RATE_115200: + baud_reg = REG_CONTROL_BAUD_RATE_115200; + break; + case PKT_BAUD_RATE_57600: + /* Fall through... */ + default: + baud_reg = REG_CONTROL_BAUD_RATE_57600; + break; + } + + /* Wait until the command reaches the baseband */ + prepare_to_wait(&wq, &wait, TASK_INTERRUPTIBLE); + schedule_timeout(HZ/10); + finish_wait(&wq, &wait); + + /* Set baud on baseband */ + info->ctrl_reg &= ~0x03; + info->ctrl_reg |= baud_reg; + outb(info->ctrl_reg, iobase + REG_CONTROL); + + /* Enable RTS */ + info->ctrl_reg &= ~REG_CONTROL_RTS; + outb(info->ctrl_reg, iobase + REG_CONTROL); + + /* Wait before the next HCI packet can be send */ + prepare_to_wait(&wq, &wait, TASK_INTERRUPTIBLE); + schedule_timeout(HZ); + finish_wait(&wq, &wait); + } + + if (len == skb->len) { + kfree_skb(skb); + } else { + skb_pull(skb, len); + skb_queue_head(&(info->txq), skb); + } + + info->hdev->stat.byte_tx += len; + + /* Change buffer */ + change_bit(XMIT_BUFFER_NUMBER, &(info->tx_state)); + + } while (test_bit(XMIT_WAKEUP, &(info->tx_state))); + + clear_bit(XMIT_SENDING, &(info->tx_state)); +} + + +static int bluecard_read(unsigned int iobase, unsigned int offset, __u8 *buf, int size) +{ + int i, n, len; + + outb(REG_COMMAND_RX_WIN_ONE, iobase + REG_COMMAND); + + len = inb(iobase + offset); + n = 0; + i = 1; + + while (n < len) { + + if (i == 16) { + outb(REG_COMMAND_RX_WIN_TWO, iobase + REG_COMMAND); + i = 0; + } + + buf[n] = inb(iobase + offset + i); + + n++; + i++; + + } + + return len; +} + + +static void bluecard_receive(bluecard_info_t *info, unsigned int offset) +{ + unsigned int iobase; + unsigned char buf[31]; + int i, len; + + if (!info) { + BT_ERR("Unknown device"); + return; + } + + iobase = info->p_dev->io.BasePort1; + + if (test_bit(XMIT_SENDING_READY, &(info->tx_state))) + bluecard_enable_activity_led(info); + + len = bluecard_read(iobase, offset, buf, sizeof(buf)); + + for (i = 0; i < len; i++) { + + /* Allocate packet */ + if (info->rx_skb == NULL) { + info->rx_state = RECV_WAIT_PACKET_TYPE; + info->rx_count = 0; + if (!(info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) { + BT_ERR("Can't allocate mem for new packet"); + return; + } + } + + if (info->rx_state == RECV_WAIT_PACKET_TYPE) { + + info->rx_skb->dev = (void *) info->hdev; + bt_cb(info->rx_skb)->pkt_type = buf[i]; + + switch (bt_cb(info->rx_skb)->pkt_type) { + + case 0x00: + /* init packet */ + if (offset != 0x00) { + set_bit(XMIT_BUF_ONE_READY, &(info->tx_state)); + set_bit(XMIT_BUF_TWO_READY, &(info->tx_state)); + set_bit(XMIT_SENDING_READY, &(info->tx_state)); + bluecard_write_wakeup(info); + } + + kfree_skb(info->rx_skb); + info->rx_skb = NULL; + break; + + case HCI_EVENT_PKT: + info->rx_state = RECV_WAIT_EVENT_HEADER; + info->rx_count = HCI_EVENT_HDR_SIZE; + break; + + case HCI_ACLDATA_PKT: + info->rx_state = RECV_WAIT_ACL_HEADER; + info->rx_count = HCI_ACL_HDR_SIZE; + break; + + case HCI_SCODATA_PKT: + info->rx_state = RECV_WAIT_SCO_HEADER; + info->rx_count = HCI_SCO_HDR_SIZE; + break; + + default: + /* unknown packet */ + BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type); + info->hdev->stat.err_rx++; + + kfree_skb(info->rx_skb); + info->rx_skb = NULL; + break; + + } + + } else { + + *skb_put(info->rx_skb, 1) = buf[i]; + info->rx_count--; + + if (info->rx_count == 0) { + + int dlen; + struct hci_event_hdr *eh; + struct hci_acl_hdr *ah; + struct hci_sco_hdr *sh; + + switch (info->rx_state) { + + case RECV_WAIT_EVENT_HEADER: + eh = hci_event_hdr(info->rx_skb); + info->rx_state = RECV_WAIT_DATA; + info->rx_count = eh->plen; + break; + + case RECV_WAIT_ACL_HEADER: + ah = hci_acl_hdr(info->rx_skb); + dlen = __le16_to_cpu(ah->dlen); + info->rx_state = RECV_WAIT_DATA; + info->rx_count = dlen; + break; + + case RECV_WAIT_SCO_HEADER: + sh = hci_sco_hdr(info->rx_skb); + info->rx_state = RECV_WAIT_DATA; + info->rx_count = sh->dlen; + break; + + case RECV_WAIT_DATA: + hci_recv_frame(info->rx_skb); + info->rx_skb = NULL; + break; + + } + + } + + } + + + } + + info->hdev->stat.byte_rx += len; +} + + +static irqreturn_t bluecard_interrupt(int irq, void *dev_inst) +{ + bluecard_info_t *info = dev_inst; + unsigned int iobase; + unsigned char reg; + + if (!info || !info->hdev) + /* our irq handler is shared */ + return IRQ_NONE; + + if (!test_bit(CARD_READY, &(info->hw_state))) + return IRQ_HANDLED; + + iobase = info->p_dev->io.BasePort1; + + spin_lock(&(info->lock)); + + /* Disable interrupt */ + info->ctrl_reg &= ~REG_CONTROL_INTERRUPT; + outb(info->ctrl_reg, iobase + REG_CONTROL); + + reg = inb(iobase + REG_INTERRUPT); + + if ((reg != 0x00) && (reg != 0xff)) { + + if (reg & 0x04) { + bluecard_receive(info, 0x00); + outb(0x04, iobase + REG_INTERRUPT); + outb(REG_COMMAND_RX_BUF_ONE, iobase + REG_COMMAND); + } + + if (reg & 0x08) { + bluecard_receive(info, 0x10); + outb(0x08, iobase + REG_INTERRUPT); + outb(REG_COMMAND_RX_BUF_TWO, iobase + REG_COMMAND); + } + + if (reg & 0x01) { + set_bit(XMIT_BUF_ONE_READY, &(info->tx_state)); + outb(0x01, iobase + REG_INTERRUPT); + bluecard_write_wakeup(info); + } + + if (reg & 0x02) { + set_bit(XMIT_BUF_TWO_READY, &(info->tx_state)); + outb(0x02, iobase + REG_INTERRUPT); + bluecard_write_wakeup(info); + } + + } + + /* Enable interrupt */ + info->ctrl_reg |= REG_CONTROL_INTERRUPT; + outb(info->ctrl_reg, iobase + REG_CONTROL); + + spin_unlock(&(info->lock)); + + return IRQ_HANDLED; +} + + + +/* ======================== Device specific HCI commands ======================== */ + + +static int bluecard_hci_set_baud_rate(struct hci_dev *hdev, int baud) +{ + bluecard_info_t *info = (bluecard_info_t *)(hdev->driver_data); + struct sk_buff *skb; + + /* Ericsson baud rate command */ + unsigned char cmd[] = { HCI_COMMAND_PKT, 0x09, 0xfc, 0x01, 0x03 }; + + if (!(skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) { + BT_ERR("Can't allocate mem for new packet"); + return -1; + } + + switch (baud) { + case 460800: + cmd[4] = 0x00; + bt_cb(skb)->pkt_type = PKT_BAUD_RATE_460800; + break; + case 230400: + cmd[4] = 0x01; + bt_cb(skb)->pkt_type = PKT_BAUD_RATE_230400; + break; + case 115200: + cmd[4] = 0x02; + bt_cb(skb)->pkt_type = PKT_BAUD_RATE_115200; + break; + case 57600: + /* Fall through... */ + default: + cmd[4] = 0x03; + bt_cb(skb)->pkt_type = PKT_BAUD_RATE_57600; + break; + } + + memcpy(skb_put(skb, sizeof(cmd)), cmd, sizeof(cmd)); + + skb_queue_tail(&(info->txq), skb); + + bluecard_write_wakeup(info); + + return 0; +} + + + +/* ======================== HCI interface ======================== */ + + +static int bluecard_hci_flush(struct hci_dev *hdev) +{ + bluecard_info_t *info = (bluecard_info_t *)(hdev->driver_data); + + /* Drop TX queue */ + skb_queue_purge(&(info->txq)); + + return 0; +} + + +static int bluecard_hci_open(struct hci_dev *hdev) +{ + bluecard_info_t *info = (bluecard_info_t *)(hdev->driver_data); + unsigned int iobase = info->p_dev->io.BasePort1; + + if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) + bluecard_hci_set_baud_rate(hdev, DEFAULT_BAUD_RATE); + + if (test_and_set_bit(HCI_RUNNING, &(hdev->flags))) + return 0; + + if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) { + /* Enable LED */ + outb(0x08 | 0x20, iobase + 0x30); + } + + return 0; +} + + +static int bluecard_hci_close(struct hci_dev *hdev) +{ + bluecard_info_t *info = (bluecard_info_t *)(hdev->driver_data); + unsigned int iobase = info->p_dev->io.BasePort1; + + if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags))) + return 0; + + bluecard_hci_flush(hdev); + + if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) { + /* Disable LED */ + outb(0x00, iobase + 0x30); + } + + return 0; +} + + +static int bluecard_hci_send_frame(struct sk_buff *skb) +{ + bluecard_info_t *info; + struct hci_dev *hdev = (struct hci_dev *)(skb->dev); + + if (!hdev) { + BT_ERR("Frame for unknown HCI device (hdev=NULL)"); + return -ENODEV; + } + + info = (bluecard_info_t *)(hdev->driver_data); + + switch (bt_cb(skb)->pkt_type) { + case HCI_COMMAND_PKT: + hdev->stat.cmd_tx++; + break; + case HCI_ACLDATA_PKT: + hdev->stat.acl_tx++; + break; + case HCI_SCODATA_PKT: + hdev->stat.sco_tx++; + break; + }; + + /* Prepend skb with frame type */ + memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); + skb_queue_tail(&(info->txq), skb); + + bluecard_write_wakeup(info); + + return 0; +} + + +static void bluecard_hci_destruct(struct hci_dev *hdev) +{ +} + + +static int bluecard_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg) +{ + return -ENOIOCTLCMD; +} + + + +/* ======================== Card services HCI interaction ======================== */ + + +static int bluecard_open(bluecard_info_t *info) +{ + unsigned int iobase = info->p_dev->io.BasePort1; + struct hci_dev *hdev; + unsigned char id; + + spin_lock_init(&(info->lock)); + + init_timer(&(info->timer)); + info->timer.function = &bluecard_activity_led_timeout; + info->timer.data = (u_long)info; + + skb_queue_head_init(&(info->txq)); + + info->rx_state = RECV_WAIT_PACKET_TYPE; + info->rx_count = 0; + info->rx_skb = NULL; + + /* Initialize HCI device */ + hdev = hci_alloc_dev(); + if (!hdev) { + BT_ERR("Can't allocate HCI device"); + return -ENOMEM; + } + + info->hdev = hdev; + + hdev->bus = HCI_PCCARD; + hdev->driver_data = info; + SET_HCIDEV_DEV(hdev, &info->p_dev->dev); + + hdev->open = bluecard_hci_open; + hdev->close = bluecard_hci_close; + hdev->flush = bluecard_hci_flush; + hdev->send = bluecard_hci_send_frame; + hdev->destruct = bluecard_hci_destruct; + hdev->ioctl = bluecard_hci_ioctl; + + hdev->owner = THIS_MODULE; + + id = inb(iobase + 0x30); + + if ((id & 0x0f) == 0x02) + set_bit(CARD_HAS_PCCARD_ID, &(info->hw_state)); + + if (id & 0x10) + set_bit(CARD_HAS_POWER_LED, &(info->hw_state)); + + if (id & 0x20) + set_bit(CARD_HAS_ACTIVITY_LED, &(info->hw_state)); + + /* Reset card */ + info->ctrl_reg = REG_CONTROL_BT_RESET | REG_CONTROL_CARD_RESET; + outb(info->ctrl_reg, iobase + REG_CONTROL); + + /* Turn FPGA off */ + outb(0x80, iobase + 0x30); + + /* Wait some time */ + msleep(10); + + /* Turn FPGA on */ + outb(0x00, iobase + 0x30); + + /* Activate card */ + info->ctrl_reg = REG_CONTROL_BT_ON | REG_CONTROL_BT_RES_PU; + outb(info->ctrl_reg, iobase + REG_CONTROL); + + /* Enable interrupt */ + outb(0xff, iobase + REG_INTERRUPT); + info->ctrl_reg |= REG_CONTROL_INTERRUPT; + outb(info->ctrl_reg, iobase + REG_CONTROL); + + if ((id & 0x0f) == 0x03) { + /* Disable RTS */ + info->ctrl_reg |= REG_CONTROL_RTS; + outb(info->ctrl_reg, iobase + REG_CONTROL); + + /* Set baud rate */ + info->ctrl_reg |= 0x03; + outb(info->ctrl_reg, iobase + REG_CONTROL); + + /* Enable RTS */ + info->ctrl_reg &= ~REG_CONTROL_RTS; + outb(info->ctrl_reg, iobase + REG_CONTROL); + + set_bit(XMIT_BUF_ONE_READY, &(info->tx_state)); + set_bit(XMIT_BUF_TWO_READY, &(info->tx_state)); + set_bit(XMIT_SENDING_READY, &(info->tx_state)); + } + + /* Start the RX buffers */ + outb(REG_COMMAND_RX_BUF_ONE, iobase + REG_COMMAND); + outb(REG_COMMAND_RX_BUF_TWO, iobase + REG_COMMAND); + + /* Signal that the hardware is ready */ + set_bit(CARD_READY, &(info->hw_state)); + + /* Drop TX queue */ + skb_queue_purge(&(info->txq)); + + /* Control the point at which RTS is enabled */ + outb((0x0f << RTS_LEVEL_SHIFT_BITS) | 1, iobase + REG_RX_CONTROL); + + /* Timeout before it is safe to send the first HCI packet */ + msleep(1250); + + /* Register HCI device */ + if (hci_register_dev(hdev) < 0) { + BT_ERR("Can't register HCI device"); + info->hdev = NULL; + hci_free_dev(hdev); + return -ENODEV; + } + + return 0; +} + + +static int bluecard_close(bluecard_info_t *info) +{ + unsigned int iobase = info->p_dev->io.BasePort1; + struct hci_dev *hdev = info->hdev; + + if (!hdev) + return -ENODEV; + + bluecard_hci_close(hdev); + + clear_bit(CARD_READY, &(info->hw_state)); + + /* Reset card */ + info->ctrl_reg = REG_CONTROL_BT_RESET | REG_CONTROL_CARD_RESET; + outb(info->ctrl_reg, iobase + REG_CONTROL); + + /* Turn FPGA off */ + outb(0x80, iobase + 0x30); + + if (hci_unregister_dev(hdev) < 0) + BT_ERR("Can't unregister HCI device %s", hdev->name); + + hci_free_dev(hdev); + + return 0; +} + +static int bluecard_probe(struct pcmcia_device *link) +{ + bluecard_info_t *info; + + /* Create new info device */ + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->p_dev = link; + link->priv = info; + + link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; + link->io.NumPorts1 = 8; + link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; + + link->irq.Handler = bluecard_interrupt; + + link->conf.Attributes = CONF_ENABLE_IRQ; + link->conf.IntType = INT_MEMORY_AND_IO; + + return bluecard_config(link); +} + + +static void bluecard_detach(struct pcmcia_device *link) +{ + bluecard_info_t *info = link->priv; + + bluecard_release(link); + kfree(info); +} + + +static int bluecard_config(struct pcmcia_device *link) +{ + bluecard_info_t *info = link->priv; + int i, n; + + link->conf.ConfigIndex = 0x20; + link->io.NumPorts1 = 64; + link->io.IOAddrLines = 6; + + for (n = 0; n < 0x400; n += 0x40) { + link->io.BasePort1 = n ^ 0x300; + i = pcmcia_request_io(link, &link->io); + if (i == 0) + break; + } + + if (i != 0) + goto failed; + + i = pcmcia_request_irq(link, &link->irq); + if (i != 0) + link->irq.AssignedIRQ = 0; + + i = pcmcia_request_configuration(link, &link->conf); + if (i != 0) + goto failed; + + if (bluecard_open(info) != 0) + goto failed; + + strcpy(info->node.dev_name, info->hdev->name); + link->dev_node = &info->node; + + return 0; + +failed: + bluecard_release(link); + return -ENODEV; +} + + +static void bluecard_release(struct pcmcia_device *link) +{ + bluecard_info_t *info = link->priv; + + bluecard_close(info); + + del_timer(&(info->timer)); + + pcmcia_disable_device(link); +} + +static struct pcmcia_device_id bluecard_ids[] = { + PCMCIA_DEVICE_PROD_ID12("BlueCard", "LSE041", 0xbaf16fbf, 0x657cc15e), + PCMCIA_DEVICE_PROD_ID12("BTCFCARD", "LSE139", 0xe3987764, 0x2524b59c), + PCMCIA_DEVICE_PROD_ID12("WSS", "LSE039", 0x0a0736ec, 0x24e6dfab), + PCMCIA_DEVICE_NULL +}; +MODULE_DEVICE_TABLE(pcmcia, bluecard_ids); + +static struct pcmcia_driver bluecard_driver = { + .owner = THIS_MODULE, + .drv = { + .name = "bluecard_cs", + }, + .probe = bluecard_probe, + .remove = bluecard_detach, + .id_table = bluecard_ids, +}; + +static int __init init_bluecard_cs(void) +{ + return pcmcia_register_driver(&bluecard_driver); +} + + +static void __exit exit_bluecard_cs(void) +{ + pcmcia_unregister_driver(&bluecard_driver); +} + +module_init(init_bluecard_cs); +module_exit(exit_bluecard_cs); diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c new file mode 100644 index 0000000..d945cd1 --- /dev/null +++ b/drivers/bluetooth/bpa10x.c @@ -0,0 +1,542 @@ +/* + * + * Digianswer Bluetooth USB driver + * + * Copyright (C) 2004-2007 Marcel Holtmann + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#define VERSION "0.10" + +static struct usb_device_id bpa10x_table[] = { + /* Tektronix BPA 100/105 (Digianswer) */ + { USB_DEVICE(0x08fd, 0x0002) }, + + { } /* Terminating entry */ +}; + +MODULE_DEVICE_TABLE(usb, bpa10x_table); + +struct bpa10x_data { + struct hci_dev *hdev; + struct usb_device *udev; + + struct usb_anchor tx_anchor; + struct usb_anchor rx_anchor; + + struct sk_buff *rx_skb[2]; +}; + +#define HCI_VENDOR_HDR_SIZE 5 + +struct hci_vendor_hdr { + __u8 type; + __le16 snum; + __le16 dlen; +} __attribute__ ((packed)); + +static int bpa10x_recv(struct hci_dev *hdev, int queue, void *buf, int count) +{ + struct bpa10x_data *data = hdev->driver_data; + + BT_DBG("%s queue %d buffer %p count %d", hdev->name, + queue, buf, count); + + if (queue < 0 || queue > 1) + return -EILSEQ; + + hdev->stat.byte_rx += count; + + while (count) { + struct sk_buff *skb = data->rx_skb[queue]; + struct { __u8 type; int expect; } *scb; + int type, len = 0; + + if (!skb) { + /* Start of the frame */ + + type = *((__u8 *) buf); + count--; buf++; + + switch (type) { + case HCI_EVENT_PKT: + if (count >= HCI_EVENT_HDR_SIZE) { + struct hci_event_hdr *h = buf; + len = HCI_EVENT_HDR_SIZE + h->plen; + } else + return -EILSEQ; + break; + + case HCI_ACLDATA_PKT: + if (count >= HCI_ACL_HDR_SIZE) { + struct hci_acl_hdr *h = buf; + len = HCI_ACL_HDR_SIZE + + __le16_to_cpu(h->dlen); + } else + return -EILSEQ; + break; + + case HCI_SCODATA_PKT: + if (count >= HCI_SCO_HDR_SIZE) { + struct hci_sco_hdr *h = buf; + len = HCI_SCO_HDR_SIZE + h->dlen; + } else + return -EILSEQ; + break; + + case HCI_VENDOR_PKT: + if (count >= HCI_VENDOR_HDR_SIZE) { + struct hci_vendor_hdr *h = buf; + len = HCI_VENDOR_HDR_SIZE + + __le16_to_cpu(h->dlen); + } else + return -EILSEQ; + break; + } + + skb = bt_skb_alloc(len, GFP_ATOMIC); + if (!skb) { + BT_ERR("%s no memory for packet", hdev->name); + return -ENOMEM; + } + + skb->dev = (void *) hdev; + + data->rx_skb[queue] = skb; + + scb = (void *) skb->cb; + scb->type = type; + scb->expect = len; + } else { + /* Continuation */ + + scb = (void *) skb->cb; + len = scb->expect; + } + + len = min(len, count); + + memcpy(skb_put(skb, len), buf, len); + + scb->expect -= len; + + if (scb->expect == 0) { + /* Complete frame */ + + data->rx_skb[queue] = NULL; + + bt_cb(skb)->pkt_type = scb->type; + hci_recv_frame(skb); + } + + count -= len; buf += len; + } + + return 0; +} + +static void bpa10x_tx_complete(struct urb *urb) +{ + struct sk_buff *skb = urb->context; + struct hci_dev *hdev = (struct hci_dev *) skb->dev; + + BT_DBG("%s urb %p status %d count %d", hdev->name, + urb, urb->status, urb->actual_length); + + if (!test_bit(HCI_RUNNING, &hdev->flags)) + goto done; + + if (!urb->status) + hdev->stat.byte_tx += urb->transfer_buffer_length; + else + hdev->stat.err_tx++; + +done: + kfree(urb->setup_packet); + + kfree_skb(skb); +} + +static void bpa10x_rx_complete(struct urb *urb) +{ + struct hci_dev *hdev = urb->context; + struct bpa10x_data *data = hdev->driver_data; + int err; + + BT_DBG("%s urb %p status %d count %d", hdev->name, + urb, urb->status, urb->actual_length); + + if (!test_bit(HCI_RUNNING, &hdev->flags)) + return; + + if (urb->status == 0) { + if (bpa10x_recv(hdev, usb_pipebulk(urb->pipe), + urb->transfer_buffer, + urb->actual_length) < 0) { + BT_ERR("%s corrupted event packet", hdev->name); + hdev->stat.err_rx++; + } + } + + usb_anchor_urb(urb, &data->rx_anchor); + + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err < 0) { + BT_ERR("%s urb %p failed to resubmit (%d)", + hdev->name, urb, -err); + usb_unanchor_urb(urb); + } +} + +static inline int bpa10x_submit_intr_urb(struct hci_dev *hdev) +{ + struct bpa10x_data *data = hdev->driver_data; + struct urb *urb; + unsigned char *buf; + unsigned int pipe; + int err, size = 16; + + BT_DBG("%s", hdev->name); + + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) + return -ENOMEM; + + buf = kmalloc(size, GFP_KERNEL); + if (!buf) { + usb_free_urb(urb); + return -ENOMEM; + } + + pipe = usb_rcvintpipe(data->udev, 0x81); + + usb_fill_int_urb(urb, data->udev, pipe, buf, size, + bpa10x_rx_complete, hdev, 1); + + urb->transfer_flags |= URB_FREE_BUFFER; + + usb_anchor_urb(urb, &data->rx_anchor); + + err = usb_submit_urb(urb, GFP_KERNEL); + if (err < 0) { + BT_ERR("%s urb %p submission failed (%d)", + hdev->name, urb, -err); + usb_unanchor_urb(urb); + } + + usb_free_urb(urb); + + return err; +} + +static inline int bpa10x_submit_bulk_urb(struct hci_dev *hdev) +{ + struct bpa10x_data *data = hdev->driver_data; + struct urb *urb; + unsigned char *buf; + unsigned int pipe; + int err, size = 64; + + BT_DBG("%s", hdev->name); + + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) + return -ENOMEM; + + buf = kmalloc(size, GFP_KERNEL); + if (!buf) { + usb_free_urb(urb); + return -ENOMEM; + } + + pipe = usb_rcvbulkpipe(data->udev, 0x82); + + usb_fill_bulk_urb(urb, data->udev, pipe, + buf, size, bpa10x_rx_complete, hdev); + + urb->transfer_flags |= URB_FREE_BUFFER; + + usb_anchor_urb(urb, &data->rx_anchor); + + err = usb_submit_urb(urb, GFP_KERNEL); + if (err < 0) { + BT_ERR("%s urb %p submission failed (%d)", + hdev->name, urb, -err); + usb_unanchor_urb(urb); + } + + usb_free_urb(urb); + + return err; +} + +static int bpa10x_open(struct hci_dev *hdev) +{ + struct bpa10x_data *data = hdev->driver_data; + int err; + + BT_DBG("%s", hdev->name); + + if (test_and_set_bit(HCI_RUNNING, &hdev->flags)) + return 0; + + err = bpa10x_submit_intr_urb(hdev); + if (err < 0) + goto error; + + err = bpa10x_submit_bulk_urb(hdev); + if (err < 0) + goto error; + + return 0; + +error: + usb_kill_anchored_urbs(&data->rx_anchor); + + clear_bit(HCI_RUNNING, &hdev->flags); + + return err; +} + +static int bpa10x_close(struct hci_dev *hdev) +{ + struct bpa10x_data *data = hdev->driver_data; + + BT_DBG("%s", hdev->name); + + if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) + return 0; + + usb_kill_anchored_urbs(&data->rx_anchor); + + return 0; +} + +static int bpa10x_flush(struct hci_dev *hdev) +{ + struct bpa10x_data *data = hdev->driver_data; + + BT_DBG("%s", hdev->name); + + usb_kill_anchored_urbs(&data->tx_anchor); + + return 0; +} + +static int bpa10x_send_frame(struct sk_buff *skb) +{ + struct hci_dev *hdev = (struct hci_dev *) skb->dev; + struct bpa10x_data *data = hdev->driver_data; + struct usb_ctrlrequest *dr; + struct urb *urb; + unsigned int pipe; + int err; + + BT_DBG("%s", hdev->name); + + if (!test_bit(HCI_RUNNING, &hdev->flags)) + return -EBUSY; + + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!urb) + return -ENOMEM; + + /* Prepend skb with frame type */ + *skb_push(skb, 1) = bt_cb(skb)->pkt_type; + + switch (bt_cb(skb)->pkt_type) { + case HCI_COMMAND_PKT: + dr = kmalloc(sizeof(*dr), GFP_ATOMIC); + if (!dr) { + usb_free_urb(urb); + return -ENOMEM; + } + + dr->bRequestType = USB_TYPE_VENDOR; + dr->bRequest = 0; + dr->wIndex = 0; + dr->wValue = 0; + dr->wLength = __cpu_to_le16(skb->len); + + pipe = usb_sndctrlpipe(data->udev, 0x00); + + usb_fill_control_urb(urb, data->udev, pipe, (void *) dr, + skb->data, skb->len, bpa10x_tx_complete, skb); + + hdev->stat.cmd_tx++; + break; + + case HCI_ACLDATA_PKT: + pipe = usb_sndbulkpipe(data->udev, 0x02); + + usb_fill_bulk_urb(urb, data->udev, pipe, + skb->data, skb->len, bpa10x_tx_complete, skb); + + hdev->stat.acl_tx++; + break; + + case HCI_SCODATA_PKT: + pipe = usb_sndbulkpipe(data->udev, 0x02); + + usb_fill_bulk_urb(urb, data->udev, pipe, + skb->data, skb->len, bpa10x_tx_complete, skb); + + hdev->stat.sco_tx++; + break; + + default: + usb_free_urb(urb); + return -EILSEQ; + } + + usb_anchor_urb(urb, &data->tx_anchor); + + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err < 0) { + BT_ERR("%s urb %p submission failed", hdev->name, urb); + kfree(urb->setup_packet); + usb_unanchor_urb(urb); + } + + usb_free_urb(urb); + + return 0; +} + +static void bpa10x_destruct(struct hci_dev *hdev) +{ + struct bpa10x_data *data = hdev->driver_data; + + BT_DBG("%s", hdev->name); + + kfree_skb(data->rx_skb[0]); + kfree_skb(data->rx_skb[1]); + kfree(data); +} + +static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *id) +{ + struct bpa10x_data *data; + struct hci_dev *hdev; + int err; + + BT_DBG("intf %p id %p", intf, id); + + if (intf->cur_altsetting->desc.bInterfaceNumber != 0) + return -ENODEV; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->udev = interface_to_usbdev(intf); + + init_usb_anchor(&data->tx_anchor); + init_usb_anchor(&data->rx_anchor); + + hdev = hci_alloc_dev(); + if (!hdev) { + kfree(data); + return -ENOMEM; + } + + hdev->bus = HCI_USB; + hdev->driver_data = data; + + data->hdev = hdev; + + SET_HCIDEV_DEV(hdev, &intf->dev); + + hdev->open = bpa10x_open; + hdev->close = bpa10x_close; + hdev->flush = bpa10x_flush; + hdev->send = bpa10x_send_frame; + hdev->destruct = bpa10x_destruct; + + hdev->owner = THIS_MODULE; + + set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); + + err = hci_register_dev(hdev); + if (err < 0) { + hci_free_dev(hdev); + kfree(data); + return err; + } + + usb_set_intfdata(intf, data); + + return 0; +} + +static void bpa10x_disconnect(struct usb_interface *intf) +{ + struct bpa10x_data *data = usb_get_intfdata(intf); + + BT_DBG("intf %p", intf); + + if (!data) + return; + + usb_set_intfdata(intf, NULL); + + hci_unregister_dev(data->hdev); + + hci_free_dev(data->hdev); +} + +static struct usb_driver bpa10x_driver = { + .name = "bpa10x", + .probe = bpa10x_probe, + .disconnect = bpa10x_disconnect, + .id_table = bpa10x_table, +}; + +static int __init bpa10x_init(void) +{ + BT_INFO("Digianswer Bluetooth USB driver ver %s", VERSION); + + return usb_register(&bpa10x_driver); +} + +static void __exit bpa10x_exit(void) +{ + usb_deregister(&bpa10x_driver); +} + +module_init(bpa10x_init); +module_exit(bpa10x_exit); + +MODULE_AUTHOR("Marcel Holtmann "); +MODULE_DESCRIPTION("Digianswer Bluetooth USB driver ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c new file mode 100644 index 0000000..027cb8b --- /dev/null +++ b/drivers/bluetooth/bt3c_cs.c @@ -0,0 +1,806 @@ +/* + * + * Driver for the 3Com Bluetooth PCMCIA card + * + * Copyright (C) 2001-2002 Marcel Holtmann + * Jose Orlando Pereira + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation; + * + * Software distributed under the License is distributed on an "AS + * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or + * implied. See the License for the specific language governing + * rights and limitations under the License. + * + * The initial developer of the original code is David A. Hinds + * . Portions created by David A. Hinds + * are Copyright (C) 1999 David A. Hinds. All Rights Reserved. + * + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + + + +/* ======================== Module parameters ======================== */ + + +MODULE_AUTHOR("Marcel Holtmann "); +MODULE_DESCRIPTION("Bluetooth driver for the 3Com Bluetooth PCMCIA card"); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE("BT3CPCC.bin"); + + + +/* ======================== Local structures ======================== */ + + +typedef struct bt3c_info_t { + struct pcmcia_device *p_dev; + dev_node_t node; + + struct hci_dev *hdev; + + spinlock_t lock; /* For serializing operations */ + + struct sk_buff_head txq; + unsigned long tx_state; + + unsigned long rx_state; + unsigned long rx_count; + struct sk_buff *rx_skb; +} bt3c_info_t; + + +static int bt3c_config(struct pcmcia_device *link); +static void bt3c_release(struct pcmcia_device *link); + +static void bt3c_detach(struct pcmcia_device *p_dev); + + +/* Transmit states */ +#define XMIT_SENDING 1 +#define XMIT_WAKEUP 2 +#define XMIT_WAITING 8 + +/* Receiver states */ +#define RECV_WAIT_PACKET_TYPE 0 +#define RECV_WAIT_EVENT_HEADER 1 +#define RECV_WAIT_ACL_HEADER 2 +#define RECV_WAIT_SCO_HEADER 3 +#define RECV_WAIT_DATA 4 + + + +/* ======================== Special I/O functions ======================== */ + + +#define DATA_L 0 +#define DATA_H 1 +#define ADDR_L 2 +#define ADDR_H 3 +#define CONTROL 4 + + +static inline void bt3c_address(unsigned int iobase, unsigned short addr) +{ + outb(addr & 0xff, iobase + ADDR_L); + outb((addr >> 8) & 0xff, iobase + ADDR_H); +} + + +static inline void bt3c_put(unsigned int iobase, unsigned short value) +{ + outb(value & 0xff, iobase + DATA_L); + outb((value >> 8) & 0xff, iobase + DATA_H); +} + + +static inline void bt3c_io_write(unsigned int iobase, unsigned short addr, unsigned short value) +{ + bt3c_address(iobase, addr); + bt3c_put(iobase, value); +} + + +static inline unsigned short bt3c_get(unsigned int iobase) +{ + unsigned short value = inb(iobase + DATA_L); + + value |= inb(iobase + DATA_H) << 8; + + return value; +} + + +static inline unsigned short bt3c_read(unsigned int iobase, unsigned short addr) +{ + bt3c_address(iobase, addr); + + return bt3c_get(iobase); +} + + + +/* ======================== Interrupt handling ======================== */ + + +static int bt3c_write(unsigned int iobase, int fifo_size, __u8 *buf, int len) +{ + int actual = 0; + + bt3c_address(iobase, 0x7080); + + /* Fill FIFO with current frame */ + while (actual < len) { + /* Transmit next byte */ + bt3c_put(iobase, buf[actual]); + actual++; + } + + bt3c_io_write(iobase, 0x7005, actual); + + return actual; +} + + +static void bt3c_write_wakeup(bt3c_info_t *info) +{ + if (!info) { + BT_ERR("Unknown device"); + return; + } + + if (test_and_set_bit(XMIT_SENDING, &(info->tx_state))) + return; + + do { + register unsigned int iobase = info->p_dev->io.BasePort1; + register struct sk_buff *skb; + register int len; + + if (!pcmcia_dev_present(info->p_dev)) + break; + + + if (!(skb = skb_dequeue(&(info->txq)))) { + clear_bit(XMIT_SENDING, &(info->tx_state)); + break; + } + + /* Send frame */ + len = bt3c_write(iobase, 256, skb->data, skb->len); + + if (len != skb->len) { + BT_ERR("Very strange"); + } + + kfree_skb(skb); + + info->hdev->stat.byte_tx += len; + + } while (0); +} + + +static void bt3c_receive(bt3c_info_t *info) +{ + unsigned int iobase; + int size = 0, avail; + + if (!info) { + BT_ERR("Unknown device"); + return; + } + + iobase = info->p_dev->io.BasePort1; + + avail = bt3c_read(iobase, 0x7006); + //printk("bt3c_cs: receiving %d bytes\n", avail); + + bt3c_address(iobase, 0x7480); + while (size < avail) { + size++; + info->hdev->stat.byte_rx++; + + /* Allocate packet */ + if (info->rx_skb == NULL) { + info->rx_state = RECV_WAIT_PACKET_TYPE; + info->rx_count = 0; + if (!(info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) { + BT_ERR("Can't allocate mem for new packet"); + return; + } + } + + + if (info->rx_state == RECV_WAIT_PACKET_TYPE) { + + info->rx_skb->dev = (void *) info->hdev; + bt_cb(info->rx_skb)->pkt_type = inb(iobase + DATA_L); + inb(iobase + DATA_H); + //printk("bt3c: PACKET_TYPE=%02x\n", bt_cb(info->rx_skb)->pkt_type); + + switch (bt_cb(info->rx_skb)->pkt_type) { + + case HCI_EVENT_PKT: + info->rx_state = RECV_WAIT_EVENT_HEADER; + info->rx_count = HCI_EVENT_HDR_SIZE; + break; + + case HCI_ACLDATA_PKT: + info->rx_state = RECV_WAIT_ACL_HEADER; + info->rx_count = HCI_ACL_HDR_SIZE; + break; + + case HCI_SCODATA_PKT: + info->rx_state = RECV_WAIT_SCO_HEADER; + info->rx_count = HCI_SCO_HDR_SIZE; + break; + + default: + /* Unknown packet */ + BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type); + info->hdev->stat.err_rx++; + clear_bit(HCI_RUNNING, &(info->hdev->flags)); + + kfree_skb(info->rx_skb); + info->rx_skb = NULL; + break; + + } + + } else { + + __u8 x = inb(iobase + DATA_L); + + *skb_put(info->rx_skb, 1) = x; + inb(iobase + DATA_H); + info->rx_count--; + + if (info->rx_count == 0) { + + int dlen; + struct hci_event_hdr *eh; + struct hci_acl_hdr *ah; + struct hci_sco_hdr *sh; + + switch (info->rx_state) { + + case RECV_WAIT_EVENT_HEADER: + eh = hci_event_hdr(info->rx_skb); + info->rx_state = RECV_WAIT_DATA; + info->rx_count = eh->plen; + break; + + case RECV_WAIT_ACL_HEADER: + ah = hci_acl_hdr(info->rx_skb); + dlen = __le16_to_cpu(ah->dlen); + info->rx_state = RECV_WAIT_DATA; + info->rx_count = dlen; + break; + + case RECV_WAIT_SCO_HEADER: + sh = hci_sco_hdr(info->rx_skb); + info->rx_state = RECV_WAIT_DATA; + info->rx_count = sh->dlen; + break; + + case RECV_WAIT_DATA: + hci_recv_frame(info->rx_skb); + info->rx_skb = NULL; + break; + + } + + } + + } + + } + + bt3c_io_write(iobase, 0x7006, 0x0000); +} + + +static irqreturn_t bt3c_interrupt(int irq, void *dev_inst) +{ + bt3c_info_t *info = dev_inst; + unsigned int iobase; + int iir; + irqreturn_t r = IRQ_NONE; + + if (!info || !info->hdev) + /* our irq handler is shared */ + return IRQ_NONE; + + iobase = info->p_dev->io.BasePort1; + + spin_lock(&(info->lock)); + + iir = inb(iobase + CONTROL); + if (iir & 0x80) { + int stat = bt3c_read(iobase, 0x7001); + + if ((stat & 0xff) == 0x7f) { + BT_ERR("Very strange (stat=0x%04x)", stat); + } else if ((stat & 0xff) != 0xff) { + if (stat & 0x0020) { + int status = bt3c_read(iobase, 0x7002) & 0x10; + BT_INFO("%s: Antenna %s", info->hdev->name, + status ? "out" : "in"); + } + if (stat & 0x0001) + bt3c_receive(info); + if (stat & 0x0002) { + //BT_ERR("Ack (stat=0x%04x)", stat); + clear_bit(XMIT_SENDING, &(info->tx_state)); + bt3c_write_wakeup(info); + } + + bt3c_io_write(iobase, 0x7001, 0x0000); + + outb(iir, iobase + CONTROL); + } + r = IRQ_HANDLED; + } + + spin_unlock(&(info->lock)); + + return r; +} + + + +/* ======================== HCI interface ======================== */ + + +static int bt3c_hci_flush(struct hci_dev *hdev) +{ + bt3c_info_t *info = (bt3c_info_t *)(hdev->driver_data); + + /* Drop TX queue */ + skb_queue_purge(&(info->txq)); + + return 0; +} + + +static int bt3c_hci_open(struct hci_dev *hdev) +{ + set_bit(HCI_RUNNING, &(hdev->flags)); + + return 0; +} + + +static int bt3c_hci_close(struct hci_dev *hdev) +{ + if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags))) + return 0; + + bt3c_hci_flush(hdev); + + return 0; +} + + +static int bt3c_hci_send_frame(struct sk_buff *skb) +{ + bt3c_info_t *info; + struct hci_dev *hdev = (struct hci_dev *)(skb->dev); + unsigned long flags; + + if (!hdev) { + BT_ERR("Frame for unknown HCI device (hdev=NULL)"); + return -ENODEV; + } + + info = (bt3c_info_t *) (hdev->driver_data); + + switch (bt_cb(skb)->pkt_type) { + case HCI_COMMAND_PKT: + hdev->stat.cmd_tx++; + break; + case HCI_ACLDATA_PKT: + hdev->stat.acl_tx++; + break; + case HCI_SCODATA_PKT: + hdev->stat.sco_tx++; + break; + }; + + /* Prepend skb with frame type */ + memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); + skb_queue_tail(&(info->txq), skb); + + spin_lock_irqsave(&(info->lock), flags); + + bt3c_write_wakeup(info); + + spin_unlock_irqrestore(&(info->lock), flags); + + return 0; +} + + +static void bt3c_hci_destruct(struct hci_dev *hdev) +{ +} + + +static int bt3c_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg) +{ + return -ENOIOCTLCMD; +} + + + +/* ======================== Card services HCI interaction ======================== */ + + +static int bt3c_load_firmware(bt3c_info_t *info, const unsigned char *firmware, + int count) +{ + char *ptr = (char *) firmware; + char b[9]; + unsigned int iobase, size, addr, fcs, tmp; + int i, err = 0; + + iobase = info->p_dev->io.BasePort1; + + /* Reset */ + bt3c_io_write(iobase, 0x8040, 0x0404); + bt3c_io_write(iobase, 0x8040, 0x0400); + + udelay(1); + + bt3c_io_write(iobase, 0x8040, 0x0404); + + udelay(17); + + /* Load */ + while (count) { + if (ptr[0] != 'S') { + BT_ERR("Bad address in firmware"); + err = -EFAULT; + goto error; + } + + memset(b, 0, sizeof(b)); + memcpy(b, ptr + 2, 2); + size = simple_strtoul(b, NULL, 16); + + memset(b, 0, sizeof(b)); + memcpy(b, ptr + 4, 8); + addr = simple_strtoul(b, NULL, 16); + + memset(b, 0, sizeof(b)); + memcpy(b, ptr + (size * 2) + 2, 2); + fcs = simple_strtoul(b, NULL, 16); + + memset(b, 0, sizeof(b)); + for (tmp = 0, i = 0; i < size; i++) { + memcpy(b, ptr + (i * 2) + 2, 2); + tmp += simple_strtol(b, NULL, 16); + } + + if (((tmp + fcs) & 0xff) != 0xff) { + BT_ERR("Checksum error in firmware"); + err = -EILSEQ; + goto error; + } + + if (ptr[1] == '3') { + bt3c_address(iobase, addr); + + memset(b, 0, sizeof(b)); + for (i = 0; i < (size - 4) / 2; i++) { + memcpy(b, ptr + (i * 4) + 12, 4); + tmp = simple_strtoul(b, NULL, 16); + bt3c_put(iobase, tmp); + } + } + + ptr += (size * 2) + 6; + count -= (size * 2) + 6; + } + + udelay(17); + + /* Boot */ + bt3c_address(iobase, 0x3000); + outb(inb(iobase + CONTROL) | 0x40, iobase + CONTROL); + +error: + udelay(17); + + /* Clear */ + bt3c_io_write(iobase, 0x7006, 0x0000); + bt3c_io_write(iobase, 0x7005, 0x0000); + bt3c_io_write(iobase, 0x7001, 0x0000); + + return err; +} + + +static int bt3c_open(bt3c_info_t *info) +{ + const struct firmware *firmware; + struct hci_dev *hdev; + int err; + + spin_lock_init(&(info->lock)); + + skb_queue_head_init(&(info->txq)); + + info->rx_state = RECV_WAIT_PACKET_TYPE; + info->rx_count = 0; + info->rx_skb = NULL; + + /* Initialize HCI device */ + hdev = hci_alloc_dev(); + if (!hdev) { + BT_ERR("Can't allocate HCI device"); + return -ENOMEM; + } + + info->hdev = hdev; + + hdev->bus = HCI_PCCARD; + hdev->driver_data = info; + SET_HCIDEV_DEV(hdev, &info->p_dev->dev); + + hdev->open = bt3c_hci_open; + hdev->close = bt3c_hci_close; + hdev->flush = bt3c_hci_flush; + hdev->send = bt3c_hci_send_frame; + hdev->destruct = bt3c_hci_destruct; + hdev->ioctl = bt3c_hci_ioctl; + + hdev->owner = THIS_MODULE; + + /* Load firmware */ + err = request_firmware(&firmware, "BT3CPCC.bin", &info->p_dev->dev); + if (err < 0) { + BT_ERR("Firmware request failed"); + goto error; + } + + err = bt3c_load_firmware(info, firmware->data, firmware->size); + + release_firmware(firmware); + + if (err < 0) { + BT_ERR("Firmware loading failed"); + goto error; + } + + /* Timeout before it is safe to send the first HCI packet */ + msleep(1000); + + /* Register HCI device */ + err = hci_register_dev(hdev); + if (err < 0) { + BT_ERR("Can't register HCI device"); + goto error; + } + + return 0; + +error: + info->hdev = NULL; + hci_free_dev(hdev); + return err; +} + + +static int bt3c_close(bt3c_info_t *info) +{ + struct hci_dev *hdev = info->hdev; + + if (!hdev) + return -ENODEV; + + bt3c_hci_close(hdev); + + if (hci_unregister_dev(hdev) < 0) + BT_ERR("Can't unregister HCI device %s", hdev->name); + + hci_free_dev(hdev); + + return 0; +} + +static int bt3c_probe(struct pcmcia_device *link) +{ + bt3c_info_t *info; + + /* Create new info device */ + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->p_dev = link; + link->priv = info; + + link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; + link->io.NumPorts1 = 8; + link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; + + link->irq.Handler = bt3c_interrupt; + + link->conf.Attributes = CONF_ENABLE_IRQ; + link->conf.IntType = INT_MEMORY_AND_IO; + + return bt3c_config(link); +} + + +static void bt3c_detach(struct pcmcia_device *link) +{ + bt3c_info_t *info = link->priv; + + bt3c_release(link); + kfree(info); +} + +static int bt3c_check_config(struct pcmcia_device *p_dev, + cistpl_cftable_entry_t *cf, + cistpl_cftable_entry_t *dflt, + unsigned int vcc, + void *priv_data) +{ + unsigned long try = (unsigned long) priv_data; + + if (cf->vpp1.present & (1 << CISTPL_POWER_VNOM)) + p_dev->conf.Vpp = cf->vpp1.param[CISTPL_POWER_VNOM] / 10000; + if ((cf->io.nwin > 0) && (cf->io.win[0].len == 8) && + (cf->io.win[0].base != 0)) { + p_dev->io.BasePort1 = cf->io.win[0].base; + p_dev->io.IOAddrLines = (try == 0) ? 16 : + cf->io.flags & CISTPL_IO_LINES_MASK; + if (!pcmcia_request_io(p_dev, &p_dev->io)) + return 0; + } + return -ENODEV; +} + +static int bt3c_check_config_notpicky(struct pcmcia_device *p_dev, + cistpl_cftable_entry_t *cf, + cistpl_cftable_entry_t *dflt, + unsigned int vcc, + void *priv_data) +{ + static unsigned int base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 }; + int j; + + if ((cf->io.nwin > 0) && ((cf->io.flags & CISTPL_IO_LINES_MASK) <= 3)) { + for (j = 0; j < 5; j++) { + p_dev->io.BasePort1 = base[j]; + p_dev->io.IOAddrLines = base[j] ? 16 : 3; + if (!pcmcia_request_io(p_dev, &p_dev->io)) + return 0; + } + } + return -ENODEV; +} + +static int bt3c_config(struct pcmcia_device *link) +{ + bt3c_info_t *info = link->priv; + int i; + unsigned long try; + + /* First pass: look for a config entry that looks normal. + Two tries: without IO aliases, then with aliases */ + for (try = 0; try < 2; try++) + if (!pcmcia_loop_config(link, bt3c_check_config, (void *) try)) + goto found_port; + + /* Second pass: try to find an entry that isn't picky about + its base address, then try to grab any standard serial port + address, and finally try to get any free port. */ + if (!pcmcia_loop_config(link, bt3c_check_config_notpicky, NULL)) + goto found_port; + + BT_ERR("No usable port range found"); + goto failed; + +found_port: + i = pcmcia_request_irq(link, &link->irq); + if (i != 0) + link->irq.AssignedIRQ = 0; + + i = pcmcia_request_configuration(link, &link->conf); + if (i != 0) + goto failed; + + if (bt3c_open(info) != 0) + goto failed; + + strcpy(info->node.dev_name, info->hdev->name); + link->dev_node = &info->node; + + return 0; + +failed: + bt3c_release(link); + return -ENODEV; +} + + +static void bt3c_release(struct pcmcia_device *link) +{ + bt3c_info_t *info = link->priv; + + bt3c_close(info); + + pcmcia_disable_device(link); +} + + +static struct pcmcia_device_id bt3c_ids[] = { + PCMCIA_DEVICE_PROD_ID13("3COM", "Bluetooth PC Card", 0xefce0a31, 0xd4ce9b02), + PCMCIA_DEVICE_NULL +}; +MODULE_DEVICE_TABLE(pcmcia, bt3c_ids); + +static struct pcmcia_driver bt3c_driver = { + .owner = THIS_MODULE, + .drv = { + .name = "bt3c_cs", + }, + .probe = bt3c_probe, + .remove = bt3c_detach, + .id_table = bt3c_ids, +}; + +static int __init init_bt3c_cs(void) +{ + return pcmcia_register_driver(&bt3c_driver); +} + + +static void __exit exit_bt3c_cs(void) +{ + pcmcia_unregister_driver(&bt3c_driver); +} + +module_init(init_bt3c_cs); +module_exit(exit_bt3c_cs); diff --git a/drivers/bluetooth/btmrvl_debugfs.c b/drivers/bluetooth/btmrvl_debugfs.c new file mode 100644 index 0000000..3126a3d --- /dev/null +++ b/drivers/bluetooth/btmrvl_debugfs.c @@ -0,0 +1,431 @@ +/** + * Marvell Bluetooth driver: debugfs related functions + * + * Copyright (C) 2009, Marvell International Ltd. + * + * This software file (the "File") is distributed by Marvell International + * Ltd. under the terms of the GNU General Public License Version 2, June 1991 + * (the "License"). You may use, redistribute and/or modify this File in + * accordance with the terms and conditions of the License, a copy of which + * is available by writing to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the + * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. + * + * + * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE + * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE + * ARE EXPRESSLY DISCLAIMED. The License provides additional details about + * this warranty disclaimer. + **/ + +#include + +#include +#include + +#include "btmrvl_drv.h" + +struct btmrvl_debugfs_data { + struct dentry *config_dir; + struct dentry *status_dir; + + /* config */ + struct dentry *psmode; + struct dentry *pscmd; + struct dentry *hsmode; + struct dentry *hscmd; + struct dentry *gpiogap; + struct dentry *hscfgcmd; + + /* status */ + struct dentry *curpsmode; + struct dentry *hsstate; + struct dentry *psstate; + struct dentry *txdnldready; +}; + +static int btmrvl_open_generic(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t btmrvl_hscfgcmd_write(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + struct btmrvl_private *priv = file->private_data; + char buf[16]; + long result, ret; + + memset(buf, 0, sizeof(buf)); + + if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) + return -EFAULT; + + ret = strict_strtol(buf, 10, &result); + + priv->btmrvl_dev.hscfgcmd = result; + + if (priv->btmrvl_dev.hscfgcmd) { + btmrvl_prepare_command(priv); + wake_up_interruptible(&priv->main_thread.wait_q); + } + + return count; +} + +static ssize_t btmrvl_hscfgcmd_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct btmrvl_private *priv = file->private_data; + char buf[16]; + int ret; + + ret = snprintf(buf, sizeof(buf) - 1, "%d\n", + priv->btmrvl_dev.hscfgcmd); + + return simple_read_from_buffer(userbuf, count, ppos, buf, ret); +} + +static const struct file_operations btmrvl_hscfgcmd_fops = { + .read = btmrvl_hscfgcmd_read, + .write = btmrvl_hscfgcmd_write, + .open = btmrvl_open_generic, +}; + +static ssize_t btmrvl_psmode_write(struct file *file, const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct btmrvl_private *priv = file->private_data; + char buf[16]; + long result, ret; + + memset(buf, 0, sizeof(buf)); + + if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) + return -EFAULT; + + ret = strict_strtol(buf, 10, &result); + + priv->btmrvl_dev.psmode = result; + + return count; +} + +static ssize_t btmrvl_psmode_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct btmrvl_private *priv = file->private_data; + char buf[16]; + int ret; + + ret = snprintf(buf, sizeof(buf) - 1, "%d\n", + priv->btmrvl_dev.psmode); + + return simple_read_from_buffer(userbuf, count, ppos, buf, ret); +} + +static const struct file_operations btmrvl_psmode_fops = { + .read = btmrvl_psmode_read, + .write = btmrvl_psmode_write, + .open = btmrvl_open_generic, +}; + +static ssize_t btmrvl_pscmd_write(struct file *file, const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct btmrvl_private *priv = file->private_data; + char buf[16]; + long result, ret; + + memset(buf, 0, sizeof(buf)); + + if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) + return -EFAULT; + + ret = strict_strtol(buf, 10, &result); + + priv->btmrvl_dev.pscmd = result; + + if (priv->btmrvl_dev.pscmd) { + btmrvl_prepare_command(priv); + wake_up_interruptible(&priv->main_thread.wait_q); + } + + return count; + +} + +static ssize_t btmrvl_pscmd_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct btmrvl_private *priv = file->private_data; + char buf[16]; + int ret; + + ret = snprintf(buf, sizeof(buf) - 1, "%d\n", priv->btmrvl_dev.pscmd); + + return simple_read_from_buffer(userbuf, count, ppos, buf, ret); +} + +static const struct file_operations btmrvl_pscmd_fops = { + .read = btmrvl_pscmd_read, + .write = btmrvl_pscmd_write, + .open = btmrvl_open_generic, +}; + +static ssize_t btmrvl_gpiogap_write(struct file *file, const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct btmrvl_private *priv = file->private_data; + char buf[16]; + long result, ret; + + memset(buf, 0, sizeof(buf)); + + if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) + return -EFAULT; + + ret = strict_strtol(buf, 16, &result); + + priv->btmrvl_dev.gpio_gap = result; + + return count; +} + +static ssize_t btmrvl_gpiogap_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct btmrvl_private *priv = file->private_data; + char buf[16]; + int ret; + + ret = snprintf(buf, sizeof(buf) - 1, "0x%x\n", + priv->btmrvl_dev.gpio_gap); + + return simple_read_from_buffer(userbuf, count, ppos, buf, ret); +} + +static const struct file_operations btmrvl_gpiogap_fops = { + .read = btmrvl_gpiogap_read, + .write = btmrvl_gpiogap_write, + .open = btmrvl_open_generic, +}; + +static ssize_t btmrvl_hscmd_write(struct file *file, const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct btmrvl_private *priv = (struct btmrvl_private *) file->private_data; + char buf[16]; + long result, ret; + + memset(buf, 0, sizeof(buf)); + + if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) + return -EFAULT; + + ret = strict_strtol(buf, 10, &result); + + priv->btmrvl_dev.hscmd = result; + if (priv->btmrvl_dev.hscmd) { + btmrvl_prepare_command(priv); + wake_up_interruptible(&priv->main_thread.wait_q); + } + + return count; +} + +static ssize_t btmrvl_hscmd_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct btmrvl_private *priv = file->private_data; + char buf[16]; + int ret; + + ret = snprintf(buf, sizeof(buf) - 1, "%d\n", priv->btmrvl_dev.hscmd); + + return simple_read_from_buffer(userbuf, count, ppos, buf, ret); +} + +static const struct file_operations btmrvl_hscmd_fops = { + .read = btmrvl_hscmd_read, + .write = btmrvl_hscmd_write, + .open = btmrvl_open_generic, +}; + +static ssize_t btmrvl_hsmode_write(struct file *file, const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct btmrvl_private *priv = file->private_data; + char buf[16]; + long result, ret; + + memset(buf, 0, sizeof(buf)); + + if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) + return -EFAULT; + + ret = strict_strtol(buf, 10, &result); + + priv->btmrvl_dev.hsmode = result; + + return count; +} + +static ssize_t btmrvl_hsmode_read(struct file *file, char __user * userbuf, + size_t count, loff_t *ppos) +{ + struct btmrvl_private *priv = file->private_data; + char buf[16]; + int ret; + + ret = snprintf(buf, sizeof(buf) - 1, "%d\n", priv->btmrvl_dev.hsmode); + + return simple_read_from_buffer(userbuf, count, ppos, buf, ret); +} + +static const struct file_operations btmrvl_hsmode_fops = { + .read = btmrvl_hsmode_read, + .write = btmrvl_hsmode_write, + .open = btmrvl_open_generic, +}; + +static ssize_t btmrvl_curpsmode_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct btmrvl_private *priv = file->private_data; + char buf[16]; + int ret; + + ret = snprintf(buf, sizeof(buf) - 1, "%d\n", priv->adapter->psmode); + + return simple_read_from_buffer(userbuf, count, ppos, buf, ret); +} + +static const struct file_operations btmrvl_curpsmode_fops = { + .read = btmrvl_curpsmode_read, + .open = btmrvl_open_generic, +}; + +static ssize_t btmrvl_psstate_read(struct file *file, char __user * userbuf, + size_t count, loff_t *ppos) +{ + struct btmrvl_private *priv = file->private_data; + char buf[16]; + int ret; + + ret = snprintf(buf, sizeof(buf) - 1, "%d\n", priv->adapter->ps_state); + + return simple_read_from_buffer(userbuf, count, ppos, buf, ret); +} + +static const struct file_operations btmrvl_psstate_fops = { + .read = btmrvl_psstate_read, + .open = btmrvl_open_generic, +}; + +static ssize_t btmrvl_hsstate_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct btmrvl_private *priv = file->private_data; + char buf[16]; + int ret; + + ret = snprintf(buf, sizeof(buf) - 1, "%d\n", priv->adapter->hs_state); + + return simple_read_from_buffer(userbuf, count, ppos, buf, ret); +} + +static const struct file_operations btmrvl_hsstate_fops = { + .read = btmrvl_hsstate_read, + .open = btmrvl_open_generic, +}; + +static ssize_t btmrvl_txdnldready_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct btmrvl_private *priv = file->private_data; + char buf[16]; + int ret; + + ret = snprintf(buf, sizeof(buf) - 1, "%d\n", + priv->btmrvl_dev.tx_dnld_rdy); + + return simple_read_from_buffer(userbuf, count, ppos, buf, ret); +} + +static const struct file_operations btmrvl_txdnldready_fops = { + .read = btmrvl_txdnldready_read, + .open = btmrvl_open_generic, +}; + +void btmrvl_debugfs_init(struct hci_dev *hdev) +{ + struct btmrvl_private *priv = hdev->driver_data; + struct btmrvl_debugfs_data *dbg; + + if (!hdev->debugfs) + return; + + dbg = kzalloc(sizeof(*dbg), GFP_KERNEL); + priv->debugfs_data = dbg; + + if (!dbg) { + BT_ERR("Can not allocate memory for btmrvl_debugfs_data."); + return; + } + + dbg->config_dir = debugfs_create_dir("config", hdev->debugfs); + + dbg->psmode = debugfs_create_file("psmode", 0644, dbg->config_dir, + hdev->driver_data, &btmrvl_psmode_fops); + dbg->pscmd = debugfs_create_file("pscmd", 0644, dbg->config_dir, + hdev->driver_data, &btmrvl_pscmd_fops); + dbg->gpiogap = debugfs_create_file("gpiogap", 0644, dbg->config_dir, + hdev->driver_data, &btmrvl_gpiogap_fops); + dbg->hsmode = debugfs_create_file("hsmode", 0644, dbg->config_dir, + hdev->driver_data, &btmrvl_hsmode_fops); + dbg->hscmd = debugfs_create_file("hscmd", 0644, dbg->config_dir, + hdev->driver_data, &btmrvl_hscmd_fops); + dbg->hscfgcmd = debugfs_create_file("hscfgcmd", 0644, dbg->config_dir, + hdev->driver_data, &btmrvl_hscfgcmd_fops); + + dbg->status_dir = debugfs_create_dir("status", hdev->debugfs); + dbg->curpsmode = debugfs_create_file("curpsmode", 0444, + dbg->status_dir, + hdev->driver_data, + &btmrvl_curpsmode_fops); + dbg->psstate = debugfs_create_file("psstate", 0444, dbg->status_dir, + hdev->driver_data, &btmrvl_psstate_fops); + dbg->hsstate = debugfs_create_file("hsstate", 0444, dbg->status_dir, + hdev->driver_data, &btmrvl_hsstate_fops); + dbg->txdnldready = debugfs_create_file("txdnldready", 0444, + dbg->status_dir, + hdev->driver_data, + &btmrvl_txdnldready_fops); +} + +void btmrvl_debugfs_remove(struct hci_dev *hdev) +{ + struct btmrvl_private *priv = hdev->driver_data; + struct btmrvl_debugfs_data *dbg = priv->debugfs_data; + + if (!dbg) + return; + + debugfs_remove(dbg->psmode); + debugfs_remove(dbg->pscmd); + debugfs_remove(dbg->gpiogap); + debugfs_remove(dbg->hsmode); + debugfs_remove(dbg->hscmd); + debugfs_remove(dbg->hscfgcmd); + debugfs_remove(dbg->config_dir); + + debugfs_remove(dbg->curpsmode); + debugfs_remove(dbg->psstate); + debugfs_remove(dbg->hsstate); + debugfs_remove(dbg->txdnldready); + debugfs_remove(dbg->status_dir); + + kfree(dbg); +} diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h new file mode 100644 index 0000000..523d197 --- /dev/null +++ b/drivers/bluetooth/btmrvl_drv.h @@ -0,0 +1,140 @@ +/* + * Marvell Bluetooth driver: global definitions & declarations + * + * Copyright (C) 2009, Marvell International Ltd. + * + * This software file (the "File") is distributed by Marvell International + * Ltd. under the terms of the GNU General Public License Version 2, June 1991 + * (the "License"). You may use, redistribute and/or modify this File in + * accordance with the terms and conditions of the License, a copy of which + * is available by writing to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the + * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. + * + * + * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE + * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE + * ARE EXPRESSLY DISCLAIMED. The License provides additional details about + * this warranty disclaimer. + * + */ + +#include +#include +#include + +#define BTM_HEADER_LEN 4 +#define BTM_UPLD_SIZE 2312 + +/* Time to wait until Host Sleep state change in millisecond */ +#define WAIT_UNTIL_HS_STATE_CHANGED 5000 +/* Time to wait for command response in millisecond */ +#define WAIT_UNTIL_CMD_RESP 5000 + +struct btmrvl_thread { + struct task_struct *task; + wait_queue_head_t wait_q; + void *priv; +}; + +struct btmrvl_device { + void *card; + struct hci_dev *hcidev; + + u8 tx_dnld_rdy; + + u8 psmode; + u8 pscmd; + u8 hsmode; + u8 hscmd; + + /* Low byte is gap, high byte is GPIO */ + u16 gpio_gap; + + u8 hscfgcmd; + u8 sendcmdflag; +}; + +struct btmrvl_adapter { + u32 int_count; + struct sk_buff_head tx_queue; + u8 psmode; + u8 ps_state; + u8 hs_state; + u8 wakeup_tries; + wait_queue_head_t cmd_wait_q; + u8 cmd_complete; +}; + +struct btmrvl_private { + struct btmrvl_device btmrvl_dev; + struct btmrvl_adapter *adapter; + struct btmrvl_thread main_thread; + int (*hw_host_to_card) (struct btmrvl_private *priv, + u8 *payload, u16 nb); + int (*hw_wakeup_firmware) (struct btmrvl_private *priv); + spinlock_t driver_lock; /* spinlock used by driver */ +#ifdef CONFIG_DEBUG_FS + void *debugfs_data; +#endif +}; + +#define MRVL_VENDOR_PKT 0xFE + +/* Bluetooth commands */ +#define BT_CMD_AUTO_SLEEP_MODE 0x23 +#define BT_CMD_HOST_SLEEP_CONFIG 0x59 +#define BT_CMD_HOST_SLEEP_ENABLE 0x5A +#define BT_CMD_MODULE_CFG_REQ 0x5B + +/* Sub-commands: Module Bringup/Shutdown Request */ +#define MODULE_BRINGUP_REQ 0xF1 +#define MODULE_SHUTDOWN_REQ 0xF2 + +#define BT_EVENT_POWER_STATE 0x20 + +/* Bluetooth Power States */ +#define BT_PS_ENABLE 0x02 +#define BT_PS_DISABLE 0x03 +#define BT_PS_SLEEP 0x01 + +#define OGF 0x3F + +/* Host Sleep states */ +#define HS_ACTIVATED 0x01 +#define HS_DEACTIVATED 0x00 + +/* Power Save modes */ +#define PS_SLEEP 0x01 +#define PS_AWAKE 0x00 + +struct btmrvl_cmd { + __le16 ocf_ogf; + u8 length; + u8 data[4]; +} __attribute__ ((packed)); + +struct btmrvl_event { + u8 ec; /* event counter */ + u8 length; + u8 data[4]; +} __attribute__ ((packed)); + +/* Prototype of global function */ + +struct btmrvl_private *btmrvl_add_card(void *card); +int btmrvl_remove_card(struct btmrvl_private *priv); + +void btmrvl_interrupt(struct btmrvl_private *priv); + +void btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb); +int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb); + +int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd); +int btmrvl_enable_ps(struct btmrvl_private *priv); +int btmrvl_prepare_command(struct btmrvl_private *priv); + +#ifdef CONFIG_DEBUG_FS +void btmrvl_debugfs_init(struct hci_dev *hdev); +void btmrvl_debugfs_remove(struct hci_dev *hdev); +#endif diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c new file mode 100644 index 0000000..53a43ad --- /dev/null +++ b/drivers/bluetooth/btmrvl_main.c @@ -0,0 +1,635 @@ +/** + * Marvell Bluetooth driver + * + * Copyright (C) 2009, Marvell International Ltd. + * + * This software file (the "File") is distributed by Marvell International + * Ltd. under the terms of the GNU General Public License Version 2, June 1991 + * (the "License"). You may use, redistribute and/or modify this File in + * accordance with the terms and conditions of the License, a copy of which + * is available by writing to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the + * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. + * + * + * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE + * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE + * ARE EXPRESSLY DISCLAIMED. The License provides additional details about + * this warranty disclaimer. + **/ + +#include +#include + +#include "btmrvl_drv.h" + +#define VERSION "1.0" + +/* + * This function is called by interface specific interrupt handler. + * It updates Power Save & Host Sleep states, and wakes up the main + * thread. + */ +void btmrvl_interrupt(struct btmrvl_private *priv) +{ + priv->adapter->ps_state = PS_AWAKE; + + priv->adapter->wakeup_tries = 0; + + priv->adapter->int_count++; + + wake_up_interruptible(&priv->main_thread.wait_q); +} +EXPORT_SYMBOL_GPL(btmrvl_interrupt); + +void btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb) +{ + struct hci_event_hdr *hdr = (void *) skb->data; + struct hci_ev_cmd_complete *ec; + u16 opcode, ocf; + + if (hdr->evt == HCI_EV_CMD_COMPLETE) { + ec = (void *) (skb->data + HCI_EVENT_HDR_SIZE); + opcode = __le16_to_cpu(ec->opcode); + ocf = hci_opcode_ocf(opcode); + if (ocf == BT_CMD_MODULE_CFG_REQ && + priv->btmrvl_dev.sendcmdflag) { + priv->btmrvl_dev.sendcmdflag = false; + priv->adapter->cmd_complete = true; + wake_up_interruptible(&priv->adapter->cmd_wait_q); + } + } +} +EXPORT_SYMBOL_GPL(btmrvl_check_evtpkt); + +int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb) +{ + struct btmrvl_adapter *adapter = priv->adapter; + struct btmrvl_event *event; + u8 ret = 0; + + event = (struct btmrvl_event *) skb->data; + if (event->ec != 0xff) { + BT_DBG("Not Marvell Event=%x", event->ec); + ret = -EINVAL; + goto exit; + } + + switch (event->data[0]) { + case BT_CMD_AUTO_SLEEP_MODE: + if (!event->data[2]) { + if (event->data[1] == BT_PS_ENABLE) + adapter->psmode = 1; + else + adapter->psmode = 0; + BT_DBG("PS Mode:%s", + (adapter->psmode) ? "Enable" : "Disable"); + } else { + BT_DBG("PS Mode command failed"); + } + break; + + case BT_CMD_HOST_SLEEP_CONFIG: + if (!event->data[3]) + BT_DBG("gpio=%x, gap=%x", event->data[1], + event->data[2]); + else + BT_DBG("HSCFG command failed"); + break; + + case BT_CMD_HOST_SLEEP_ENABLE: + if (!event->data[1]) { + adapter->hs_state = HS_ACTIVATED; + if (adapter->psmode) + adapter->ps_state = PS_SLEEP; + wake_up_interruptible(&adapter->cmd_wait_q); + BT_DBG("HS ACTIVATED!"); + } else { + BT_DBG("HS Enable failed"); + } + break; + + case BT_CMD_MODULE_CFG_REQ: + if (priv->btmrvl_dev.sendcmdflag && + event->data[1] == MODULE_BRINGUP_REQ) { + BT_DBG("EVENT:%s", (event->data[2]) ? + "Bring-up failed" : "Bring-up succeed"); + } else if (priv->btmrvl_dev.sendcmdflag && + event->data[1] == MODULE_SHUTDOWN_REQ) { + BT_DBG("EVENT:%s", (event->data[2]) ? + "Shutdown failed" : "Shutdown succeed"); + } else { + BT_DBG("BT_CMD_MODULE_CFG_REQ resp for APP"); + ret = -EINVAL; + } + break; + + case BT_EVENT_POWER_STATE: + if (event->data[1] == BT_PS_SLEEP) + adapter->ps_state = PS_SLEEP; + BT_DBG("EVENT:%s", + (adapter->ps_state) ? "PS_SLEEP" : "PS_AWAKE"); + break; + + default: + BT_DBG("Unknown Event=%d", event->data[0]); + ret = -EINVAL; + break; + } + +exit: + if (!ret) + kfree_skb(skb); + + return ret; +} +EXPORT_SYMBOL_GPL(btmrvl_process_event); + +int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd) +{ + struct sk_buff *skb; + struct btmrvl_cmd *cmd; + int ret = 0; + + skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC); + if (skb == NULL) { + BT_ERR("No free skb"); + return -ENOMEM; + } + + cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd)); + cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF, BT_CMD_MODULE_CFG_REQ)); + cmd->length = 1; + cmd->data[0] = subcmd; + + bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT; + + skb->dev = (void *) priv->btmrvl_dev.hcidev; + skb_queue_head(&priv->adapter->tx_queue, skb); + + priv->btmrvl_dev.sendcmdflag = true; + + priv->adapter->cmd_complete = false; + + BT_DBG("Queue module cfg Command"); + + wake_up_interruptible(&priv->main_thread.wait_q); + + if (!wait_event_interruptible_timeout(priv->adapter->cmd_wait_q, + priv->adapter->cmd_complete, + msecs_to_jiffies(WAIT_UNTIL_CMD_RESP))) { + ret = -ETIMEDOUT; + BT_ERR("module_cfg_cmd(%x): timeout: %d", + subcmd, priv->btmrvl_dev.sendcmdflag); + } + + BT_DBG("module cfg Command done"); + + return ret; +} +EXPORT_SYMBOL_GPL(btmrvl_send_module_cfg_cmd); + +int btmrvl_enable_ps(struct btmrvl_private *priv) +{ + struct sk_buff *skb; + struct btmrvl_cmd *cmd; + + skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC); + if (skb == NULL) { + BT_ERR("No free skb"); + return -ENOMEM; + } + + cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd)); + cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF, + BT_CMD_AUTO_SLEEP_MODE)); + cmd->length = 1; + + if (priv->btmrvl_dev.psmode) + cmd->data[0] = BT_PS_ENABLE; + else + cmd->data[0] = BT_PS_DISABLE; + + bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT; + + skb->dev = (void *) priv->btmrvl_dev.hcidev; + skb_queue_head(&priv->adapter->tx_queue, skb); + + BT_DBG("Queue PSMODE Command:%d", cmd->data[0]); + + return 0; +} +EXPORT_SYMBOL_GPL(btmrvl_enable_ps); + +static int btmrvl_enable_hs(struct btmrvl_private *priv) +{ + struct sk_buff *skb; + struct btmrvl_cmd *cmd; + int ret = 0; + + skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC); + if (skb == NULL) { + BT_ERR("No free skb"); + return -ENOMEM; + } + + cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd)); + cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF, BT_CMD_HOST_SLEEP_ENABLE)); + cmd->length = 0; + + bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT; + + skb->dev = (void *) priv->btmrvl_dev.hcidev; + skb_queue_head(&priv->adapter->tx_queue, skb); + + BT_DBG("Queue hs enable Command"); + + wake_up_interruptible(&priv->main_thread.wait_q); + + if (!wait_event_interruptible_timeout(priv->adapter->cmd_wait_q, + priv->adapter->hs_state, + msecs_to_jiffies(WAIT_UNTIL_HS_STATE_CHANGED))) { + ret = -ETIMEDOUT; + BT_ERR("timeout: %d, %d,%d", priv->adapter->hs_state, + priv->adapter->ps_state, + priv->adapter->wakeup_tries); + } + + return ret; +} + +int btmrvl_prepare_command(struct btmrvl_private *priv) +{ + struct sk_buff *skb = NULL; + struct btmrvl_cmd *cmd; + int ret = 0; + + if (priv->btmrvl_dev.hscfgcmd) { + priv->btmrvl_dev.hscfgcmd = 0; + + skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC); + if (skb == NULL) { + BT_ERR("No free skb"); + return -ENOMEM; + } + + cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd)); + cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF, BT_CMD_HOST_SLEEP_CONFIG)); + cmd->length = 2; + cmd->data[0] = (priv->btmrvl_dev.gpio_gap & 0xff00) >> 8; + cmd->data[1] = (u8) (priv->btmrvl_dev.gpio_gap & 0x00ff); + + bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT; + + skb->dev = (void *) priv->btmrvl_dev.hcidev; + skb_queue_head(&priv->adapter->tx_queue, skb); + + BT_DBG("Queue HSCFG Command, gpio=0x%x, gap=0x%x", + cmd->data[0], cmd->data[1]); + } + + if (priv->btmrvl_dev.pscmd) { + priv->btmrvl_dev.pscmd = 0; + btmrvl_enable_ps(priv); + } + + if (priv->btmrvl_dev.hscmd) { + priv->btmrvl_dev.hscmd = 0; + + if (priv->btmrvl_dev.hsmode) { + ret = btmrvl_enable_hs(priv); + } else { + ret = priv->hw_wakeup_firmware(priv); + priv->adapter->hs_state = HS_DEACTIVATED; + } + } + + return ret; +} + +static int btmrvl_tx_pkt(struct btmrvl_private *priv, struct sk_buff *skb) +{ + int ret = 0; + + if (!skb || !skb->data) + return -EINVAL; + + if (!skb->len || ((skb->len + BTM_HEADER_LEN) > BTM_UPLD_SIZE)) { + BT_ERR("Tx Error: Bad skb length %d : %d", + skb->len, BTM_UPLD_SIZE); + return -EINVAL; + } + + if (skb_headroom(skb) < BTM_HEADER_LEN) { + struct sk_buff *tmp = skb; + + skb = skb_realloc_headroom(skb, BTM_HEADER_LEN); + if (!skb) { + BT_ERR("Tx Error: realloc_headroom failed %d", + BTM_HEADER_LEN); + skb = tmp; + return -EINVAL; + } + + kfree_skb(tmp); + } + + skb_push(skb, BTM_HEADER_LEN); + + /* header type: byte[3] + * HCI_COMMAND = 1, ACL_DATA = 2, SCO_DATA = 3, 0xFE = Vendor + * header length: byte[2][1][0] + */ + + skb->data[0] = (skb->len & 0x0000ff); + skb->data[1] = (skb->len & 0x00ff00) >> 8; + skb->data[2] = (skb->len & 0xff0000) >> 16; + skb->data[3] = bt_cb(skb)->pkt_type; + + if (priv->hw_host_to_card) + ret = priv->hw_host_to_card(priv, skb->data, skb->len); + + return ret; +} + +static void btmrvl_init_adapter(struct btmrvl_private *priv) +{ + skb_queue_head_init(&priv->adapter->tx_queue); + + priv->adapter->ps_state = PS_AWAKE; + + init_waitqueue_head(&priv->adapter->cmd_wait_q); +} + +static void btmrvl_free_adapter(struct btmrvl_private *priv) +{ + skb_queue_purge(&priv->adapter->tx_queue); + + kfree(priv->adapter); + + priv->adapter = NULL; +} + +static int btmrvl_ioctl(struct hci_dev *hdev, + unsigned int cmd, unsigned long arg) +{ + return -ENOIOCTLCMD; +} + +static void btmrvl_destruct(struct hci_dev *hdev) +{ +} + +static int btmrvl_send_frame(struct sk_buff *skb) +{ + struct hci_dev *hdev = (struct hci_dev *) skb->dev; + struct btmrvl_private *priv = NULL; + + BT_DBG("type=%d, len=%d", skb->pkt_type, skb->len); + + if (!hdev || !hdev->driver_data) { + BT_ERR("Frame for unknown HCI device"); + return -ENODEV; + } + + priv = (struct btmrvl_private *) hdev->driver_data; + if (!test_bit(HCI_RUNNING, &hdev->flags)) { + BT_ERR("Failed testing HCI_RUNING, flags=%lx", hdev->flags); + print_hex_dump_bytes("data: ", DUMP_PREFIX_OFFSET, + skb->data, skb->len); + return -EBUSY; + } + + switch (bt_cb(skb)->pkt_type) { + case HCI_COMMAND_PKT: + hdev->stat.cmd_tx++; + break; + + case HCI_ACLDATA_PKT: + hdev->stat.acl_tx++; + break; + + case HCI_SCODATA_PKT: + hdev->stat.sco_tx++; + break; + } + + skb_queue_tail(&priv->adapter->tx_queue, skb); + + wake_up_interruptible(&priv->main_thread.wait_q); + + return 0; +} + +static int btmrvl_flush(struct hci_dev *hdev) +{ + struct btmrvl_private *priv = hdev->driver_data; + + skb_queue_purge(&priv->adapter->tx_queue); + + return 0; +} + +static int btmrvl_close(struct hci_dev *hdev) +{ + struct btmrvl_private *priv = hdev->driver_data; + + if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) + return 0; + + skb_queue_purge(&priv->adapter->tx_queue); + + return 0; +} + +static int btmrvl_open(struct hci_dev *hdev) +{ + set_bit(HCI_RUNNING, &hdev->flags); + + return 0; +} + +/* + * This function handles the event generated by firmware, rx data + * received from firmware, and tx data sent from kernel. + */ +static int btmrvl_service_main_thread(void *data) +{ + struct btmrvl_thread *thread = data; + struct btmrvl_private *priv = thread->priv; + struct btmrvl_adapter *adapter = priv->adapter; + wait_queue_t wait; + struct sk_buff *skb; + ulong flags; + + init_waitqueue_entry(&wait, current); + + current->flags |= PF_NOFREEZE; + + for (;;) { + add_wait_queue(&thread->wait_q, &wait); + + set_current_state(TASK_INTERRUPTIBLE); + + if (adapter->wakeup_tries || + ((!adapter->int_count) && + (!priv->btmrvl_dev.tx_dnld_rdy || + skb_queue_empty(&adapter->tx_queue)))) { + BT_DBG("main_thread is sleeping..."); + schedule(); + } + + set_current_state(TASK_RUNNING); + + remove_wait_queue(&thread->wait_q, &wait); + + BT_DBG("main_thread woke up"); + + if (kthread_should_stop()) { + BT_DBG("main_thread: break from main thread"); + break; + } + + spin_lock_irqsave(&priv->driver_lock, flags); + if (adapter->int_count) { + adapter->int_count = 0; + } else if (adapter->ps_state == PS_SLEEP && + !skb_queue_empty(&adapter->tx_queue)) { + spin_unlock_irqrestore(&priv->driver_lock, flags); + adapter->wakeup_tries++; + priv->hw_wakeup_firmware(priv); + continue; + } + spin_unlock_irqrestore(&priv->driver_lock, flags); + + if (adapter->ps_state == PS_SLEEP) + continue; + + if (!priv->btmrvl_dev.tx_dnld_rdy) + continue; + + skb = skb_dequeue(&adapter->tx_queue); + if (skb) { + if (btmrvl_tx_pkt(priv, skb)) + priv->btmrvl_dev.hcidev->stat.err_tx++; + else + priv->btmrvl_dev.hcidev->stat.byte_tx += skb->len; + + kfree_skb(skb); + } + } + + return 0; +} + +struct btmrvl_private *btmrvl_add_card(void *card) +{ + struct hci_dev *hdev = NULL; + struct btmrvl_private *priv; + int ret; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + BT_ERR("Can not allocate priv"); + goto err_priv; + } + + priv->adapter = kzalloc(sizeof(*priv->adapter), GFP_KERNEL); + if (!priv->adapter) { + BT_ERR("Allocate buffer for btmrvl_adapter failed!"); + goto err_adapter; + } + + btmrvl_init_adapter(priv); + + hdev = hci_alloc_dev(); + if (!hdev) { + BT_ERR("Can not allocate HCI device"); + goto err_hdev; + } + + BT_DBG("Starting kthread..."); + priv->main_thread.priv = priv; + spin_lock_init(&priv->driver_lock); + + init_waitqueue_head(&priv->main_thread.wait_q); + priv->main_thread.task = kthread_run(btmrvl_service_main_thread, + &priv->main_thread, "btmrvl_main_service"); + + priv->btmrvl_dev.hcidev = hdev; + priv->btmrvl_dev.card = card; + + hdev->driver_data = priv; + + priv->btmrvl_dev.tx_dnld_rdy = true; + + hdev->bus = HCI_SDIO; + hdev->open = btmrvl_open; + hdev->close = btmrvl_close; + hdev->flush = btmrvl_flush; + hdev->send = btmrvl_send_frame; + hdev->destruct = btmrvl_destruct; + hdev->ioctl = btmrvl_ioctl; + hdev->owner = THIS_MODULE; + + ret = hci_register_dev(hdev); + if (ret < 0) { + BT_ERR("Can not register HCI device"); + goto err_hci_register_dev; + } + +#ifdef CONFIG_DEBUG_FS + btmrvl_debugfs_init(hdev); +#endif + + return priv; + +err_hci_register_dev: + /* Stop the thread servicing the interrupts */ + kthread_stop(priv->main_thread.task); + + hci_free_dev(hdev); + +err_hdev: + btmrvl_free_adapter(priv); + +err_adapter: + kfree(priv); + +err_priv: + return NULL; +} +EXPORT_SYMBOL_GPL(btmrvl_add_card); + +int btmrvl_remove_card(struct btmrvl_private *priv) +{ + struct hci_dev *hdev; + + hdev = priv->btmrvl_dev.hcidev; + + wake_up_interruptible(&priv->adapter->cmd_wait_q); + + kthread_stop(priv->main_thread.task); + +#ifdef CONFIG_DEBUG_FS + btmrvl_debugfs_remove(hdev); +#endif + + hci_unregister_dev(hdev); + + hci_free_dev(hdev); + + priv->btmrvl_dev.hcidev = NULL; + + btmrvl_free_adapter(priv); + + kfree(priv); + + return 0; +} +EXPORT_SYMBOL_GPL(btmrvl_remove_card); + +MODULE_AUTHOR("Marvell International Ltd."); +MODULE_DESCRIPTION("Marvell Bluetooth driver ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c new file mode 100644 index 0000000..94f1f55 --- /dev/null +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -0,0 +1,1008 @@ +/** + * Marvell BT-over-SDIO driver: SDIO interface related functions. + * + * Copyright (C) 2009, Marvell International Ltd. + * + * This software file (the "File") is distributed by Marvell International + * Ltd. under the terms of the GNU General Public License Version 2, June 1991 + * (the "License"). You may use, redistribute and/or modify this File in + * accordance with the terms and conditions of the License, a copy of which + * is available by writing to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the + * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. + * + * + * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE + * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE + * ARE EXPRESSLY DISCLAIMED. The License provides additional details about + * this warranty disclaimer. + **/ + +#include + +#include +#include + +#include +#include + +#include "btmrvl_drv.h" +#include "btmrvl_sdio.h" + +#define VERSION "1.0" + +/* The btmrvl_sdio_remove() callback function is called + * when user removes this module from kernel space or ejects + * the card from the slot. The driver handles these 2 cases + * differently. + * If the user is removing the module, a MODULE_SHUTDOWN_REQ + * command is sent to firmware and interrupt will be disabled. + * If the card is removed, there is no need to send command + * or disable interrupt. + * + * The variable 'user_rmmod' is used to distinguish these two + * scenarios. This flag is initialized as FALSE in case the card + * is removed, and will be set to TRUE for module removal when + * module_exit function is called. + */ +static u8 user_rmmod; + +static const struct btmrvl_sdio_device btmrvl_sdio_sd6888 = { + .helper = "sd8688_helper.bin", + .firmware = "sd8688.bin", +}; + +static const struct sdio_device_id btmrvl_sdio_ids[] = { + /* Marvell SD8688 Bluetooth device */ + { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9105), + .driver_data = (unsigned long) &btmrvl_sdio_sd6888 }, + + { } /* Terminating entry */ +}; + +MODULE_DEVICE_TABLE(sdio, btmrvl_sdio_ids); + +static int btmrvl_sdio_get_rx_unit(struct btmrvl_sdio_card *card) +{ + u8 reg; + int ret; + + reg = sdio_readb(card->func, CARD_RX_UNIT_REG, &ret); + if (!ret) + card->rx_unit = reg; + + return ret; +} + +static int btmrvl_sdio_read_fw_status(struct btmrvl_sdio_card *card, u16 *dat) +{ + u8 fws0, fws1; + int ret; + + *dat = 0; + + fws0 = sdio_readb(card->func, CARD_FW_STATUS0_REG, &ret); + + if (!ret) + fws1 = sdio_readb(card->func, CARD_FW_STATUS1_REG, &ret); + + if (ret) + return -EIO; + + *dat = (((u16) fws1) << 8) | fws0; + + return 0; +} + +static int btmrvl_sdio_read_rx_len(struct btmrvl_sdio_card *card, u16 *dat) +{ + u8 reg; + int ret; + + reg = sdio_readb(card->func, CARD_RX_LEN_REG, &ret); + if (!ret) + *dat = (u16) reg << card->rx_unit; + + return ret; +} + +static int btmrvl_sdio_enable_host_int_mask(struct btmrvl_sdio_card *card, + u8 mask) +{ + int ret; + + sdio_writeb(card->func, mask, HOST_INT_MASK_REG, &ret); + if (ret) { + BT_ERR("Unable to enable the host interrupt!"); + ret = -EIO; + } + + return ret; +} + +static int btmrvl_sdio_disable_host_int_mask(struct btmrvl_sdio_card *card, + u8 mask) +{ + u8 host_int_mask; + int ret; + + host_int_mask = sdio_readb(card->func, HOST_INT_MASK_REG, &ret); + if (ret) + return -EIO; + + host_int_mask &= ~mask; + + sdio_writeb(card->func, host_int_mask, HOST_INT_MASK_REG, &ret); + if (ret < 0) { + BT_ERR("Unable to disable the host interrupt!"); + return -EIO; + } + + return 0; +} + +static int btmrvl_sdio_poll_card_status(struct btmrvl_sdio_card *card, u8 bits) +{ + unsigned int tries; + u8 status; + int ret; + + for (tries = 0; tries < MAX_POLL_TRIES * 1000; tries++) { + status = sdio_readb(card->func, CARD_STATUS_REG, &ret); + if (ret) + goto failed; + if ((status & bits) == bits) + return ret; + + udelay(1); + } + + ret = -ETIMEDOUT; + +failed: + BT_ERR("FAILED! ret=%d", ret); + + return ret; +} + +static int btmrvl_sdio_verify_fw_download(struct btmrvl_sdio_card *card, + int pollnum) +{ + int ret = -ETIMEDOUT; + u16 firmwarestat; + unsigned int tries; + + /* Wait for firmware to become ready */ + for (tries = 0; tries < pollnum; tries++) { + if (btmrvl_sdio_read_fw_status(card, &firmwarestat) < 0) + continue; + + if (firmwarestat == FIRMWARE_READY) { + ret = 0; + break; + } else { + msleep(10); + } + } + + return ret; +} + +static int btmrvl_sdio_download_helper(struct btmrvl_sdio_card *card) +{ + const struct firmware *fw_helper = NULL; + const u8 *helper = NULL; + int ret; + void *tmphlprbuf = NULL; + int tmphlprbufsz, hlprblknow, helperlen; + u8 *helperbuf; + u32 tx_len; + + ret = request_firmware(&fw_helper, card->helper, + &card->func->dev); + if ((ret < 0) || !fw_helper) { + BT_ERR("request_firmware(helper) failed, error code = %d", + ret); + ret = -ENOENT; + goto done; + } + + helper = fw_helper->data; + helperlen = fw_helper->size; + + BT_DBG("Downloading helper image (%d bytes), block size %d bytes", + helperlen, SDIO_BLOCK_SIZE); + + tmphlprbufsz = ALIGN_SZ(BTM_UPLD_SIZE, BTSDIO_DMA_ALIGN); + + tmphlprbuf = kmalloc(tmphlprbufsz, GFP_KERNEL); + if (!tmphlprbuf) { + BT_ERR("Unable to allocate buffer for helper." + " Terminating download"); + ret = -ENOMEM; + goto done; + } + + memset(tmphlprbuf, 0, tmphlprbufsz); + + helperbuf = (u8 *) ALIGN_ADDR(tmphlprbuf, BTSDIO_DMA_ALIGN); + + /* Perform helper data transfer */ + tx_len = (FIRMWARE_TRANSFER_NBLOCK * SDIO_BLOCK_SIZE) + - SDIO_HEADER_LEN; + hlprblknow = 0; + + do { + ret = btmrvl_sdio_poll_card_status(card, + CARD_IO_READY | DN_LD_CARD_RDY); + if (ret < 0) { + BT_ERR("Helper download poll status timeout @ %d", + hlprblknow); + goto done; + } + + /* Check if there is more data? */ + if (hlprblknow >= helperlen) + break; + + if (helperlen - hlprblknow < tx_len) + tx_len = helperlen - hlprblknow; + + /* Little-endian */ + helperbuf[0] = ((tx_len & 0x000000ff) >> 0); + helperbuf[1] = ((tx_len & 0x0000ff00) >> 8); + helperbuf[2] = ((tx_len & 0x00ff0000) >> 16); + helperbuf[3] = ((tx_len & 0xff000000) >> 24); + + memcpy(&helperbuf[SDIO_HEADER_LEN], &helper[hlprblknow], + tx_len); + + /* Now send the data */ + ret = sdio_writesb(card->func, card->ioport, helperbuf, + FIRMWARE_TRANSFER_NBLOCK * SDIO_BLOCK_SIZE); + if (ret < 0) { + BT_ERR("IO error during helper download @ %d", + hlprblknow); + goto done; + } + + hlprblknow += tx_len; + } while (true); + + BT_DBG("Transferring helper image EOF block"); + + memset(helperbuf, 0x0, SDIO_BLOCK_SIZE); + + ret = sdio_writesb(card->func, card->ioport, helperbuf, + SDIO_BLOCK_SIZE); + if (ret < 0) { + BT_ERR("IO error in writing helper image EOF block"); + goto done; + } + + ret = 0; + +done: + kfree(tmphlprbuf); + if (fw_helper) + release_firmware(fw_helper); + + return ret; +} + +static int btmrvl_sdio_download_fw_w_helper(struct btmrvl_sdio_card *card) +{ + const struct firmware *fw_firmware = NULL; + const u8 *firmware = NULL; + int firmwarelen, tmpfwbufsz, ret; + unsigned int tries, offset; + u8 base0, base1; + void *tmpfwbuf = NULL; + u8 *fwbuf; + u16 len; + int txlen = 0, tx_blocks = 0, count = 0; + + ret = request_firmware(&fw_firmware, card->firmware, + &card->func->dev); + if ((ret < 0) || !fw_firmware) { + BT_ERR("request_firmware(firmware) failed, error code = %d", + ret); + ret = -ENOENT; + goto done; + } + + firmware = fw_firmware->data; + firmwarelen = fw_firmware->size; + + BT_DBG("Downloading FW image (%d bytes)", firmwarelen); + + tmpfwbufsz = ALIGN_SZ(BTM_UPLD_SIZE, BTSDIO_DMA_ALIGN); + tmpfwbuf = kmalloc(tmpfwbufsz, GFP_KERNEL); + if (!tmpfwbuf) { + BT_ERR("Unable to allocate buffer for firmware." + " Terminating download"); + ret = -ENOMEM; + goto done; + } + + memset(tmpfwbuf, 0, tmpfwbufsz); + + /* Ensure aligned firmware buffer */ + fwbuf = (u8 *) ALIGN_ADDR(tmpfwbuf, BTSDIO_DMA_ALIGN); + + /* Perform firmware data transfer */ + offset = 0; + do { + ret = btmrvl_sdio_poll_card_status(card, + CARD_IO_READY | DN_LD_CARD_RDY); + if (ret < 0) { + BT_ERR("FW download with helper poll status" + " timeout @ %d", offset); + goto done; + } + + /* Check if there is more data ? */ + if (offset >= firmwarelen) + break; + + for (tries = 0; tries < MAX_POLL_TRIES; tries++) { + base0 = sdio_readb(card->func, + SQ_READ_BASE_ADDRESS_A0_REG, &ret); + if (ret) { + BT_ERR("BASE0 register read failed:" + " base0 = 0x%04X(%d)." + " Terminating download", + base0, base0); + ret = -EIO; + goto done; + } + base1 = sdio_readb(card->func, + SQ_READ_BASE_ADDRESS_A1_REG, &ret); + if (ret) { + BT_ERR("BASE1 register read failed:" + " base1 = 0x%04X(%d)." + " Terminating download", + base1, base1); + ret = -EIO; + goto done; + } + + len = (((u16) base1) << 8) | base0; + if (len) + break; + + udelay(10); + } + + if (!len) + break; + else if (len > BTM_UPLD_SIZE) { + BT_ERR("FW download failure @%d, invalid length %d", + offset, len); + ret = -EINVAL; + goto done; + } + + txlen = len; + + if (len & BIT(0)) { + count++; + if (count > MAX_WRITE_IOMEM_RETRY) { + BT_ERR("FW download failure @%d, " + "over max retry count", offset); + ret = -EIO; + goto done; + } + BT_ERR("FW CRC error indicated by the helper: " + "len = 0x%04X, txlen = %d", len, txlen); + len &= ~BIT(0); + /* Set txlen to 0 so as to resend from same offset */ + txlen = 0; + } else { + count = 0; + + /* Last block ? */ + if (firmwarelen - offset < txlen) + txlen = firmwarelen - offset; + + tx_blocks = + (txlen + SDIO_BLOCK_SIZE - 1) / SDIO_BLOCK_SIZE; + + memcpy(fwbuf, &firmware[offset], txlen); + } + + ret = sdio_writesb(card->func, card->ioport, fwbuf, + tx_blocks * SDIO_BLOCK_SIZE); + + if (ret < 0) { + BT_ERR("FW download, writesb(%d) failed @%d", + count, offset); + sdio_writeb(card->func, HOST_CMD53_FIN, CONFIG_REG, + &ret); + if (ret) + BT_ERR("writeb failed (CFG)"); + } + + offset += txlen; + } while (true); + + BT_DBG("FW download over, size %d bytes", offset); + + ret = 0; + +done: + kfree(tmpfwbuf); + + if (fw_firmware) + release_firmware(fw_firmware); + + return ret; +} + +static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv) +{ + u16 buf_len = 0; + int ret, buf_block_len, blksz; + struct sk_buff *skb = NULL; + u32 type; + u8 *payload = NULL; + struct hci_dev *hdev = priv->btmrvl_dev.hcidev; + struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; + + if (!card || !card->func) { + BT_ERR("card or function is NULL!"); + ret = -EINVAL; + goto exit; + } + + /* Read the length of data to be transferred */ + ret = btmrvl_sdio_read_rx_len(card, &buf_len); + if (ret < 0) { + BT_ERR("read rx_len failed"); + ret = -EIO; + goto exit; + } + + blksz = SDIO_BLOCK_SIZE; + buf_block_len = (buf_len + blksz - 1) / blksz; + + if (buf_len <= SDIO_HEADER_LEN + || (buf_block_len * blksz) > ALLOC_BUF_SIZE) { + BT_ERR("invalid packet length: %d", buf_len); + ret = -EINVAL; + goto exit; + } + + /* Allocate buffer */ + skb = bt_skb_alloc(buf_block_len * blksz + BTSDIO_DMA_ALIGN, + GFP_ATOMIC); + if (skb == NULL) { + BT_ERR("No free skb"); + goto exit; + } + + if ((unsigned long) skb->data & (BTSDIO_DMA_ALIGN - 1)) { + skb_put(skb, (unsigned long) skb->data & + (BTSDIO_DMA_ALIGN - 1)); + skb_pull(skb, (unsigned long) skb->data & + (BTSDIO_DMA_ALIGN - 1)); + } + + payload = skb->data; + + ret = sdio_readsb(card->func, payload, card->ioport, + buf_block_len * blksz); + if (ret < 0) { + BT_ERR("readsb failed: %d", ret); + ret = -EIO; + goto exit; + } + + /* This is SDIO specific header length: byte[2][1][0], type: byte[3] + * (HCI_COMMAND = 1, ACL_DATA = 2, SCO_DATA = 3, 0xFE = Vendor) + */ + + buf_len = payload[0]; + buf_len |= (u16) payload[1] << 8; + type = payload[3]; + + switch (type) { + case HCI_ACLDATA_PKT: + case HCI_SCODATA_PKT: + case HCI_EVENT_PKT: + bt_cb(skb)->pkt_type = type; + skb->dev = (void *)hdev; + skb_put(skb, buf_len); + skb_pull(skb, SDIO_HEADER_LEN); + + if (type == HCI_EVENT_PKT) + btmrvl_check_evtpkt(priv, skb); + + hci_recv_frame(skb); + hdev->stat.byte_rx += buf_len; + break; + + case MRVL_VENDOR_PKT: + bt_cb(skb)->pkt_type = HCI_VENDOR_PKT; + skb->dev = (void *)hdev; + skb_put(skb, buf_len); + skb_pull(skb, SDIO_HEADER_LEN); + + if (btmrvl_process_event(priv, skb)) + hci_recv_frame(skb); + + hdev->stat.byte_rx += buf_len; + break; + + default: + BT_ERR("Unknown packet type:%d", type); + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, payload, + blksz * buf_block_len); + + kfree_skb(skb); + skb = NULL; + break; + } + +exit: + if (ret) { + hdev->stat.err_rx++; + if (skb) + kfree_skb(skb); + } + + return ret; +} + +static int btmrvl_sdio_get_int_status(struct btmrvl_private *priv, u8 * ireg) +{ + int ret; + u8 sdio_ireg = 0; + struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; + + *ireg = 0; + + sdio_ireg = sdio_readb(card->func, HOST_INTSTATUS_REG, &ret); + if (ret) { + BT_ERR("sdio_readb: read int status register failed"); + ret = -EIO; + goto done; + } + + if (sdio_ireg != 0) { + /* + * DN_LD_HOST_INT_STATUS and/or UP_LD_HOST_INT_STATUS + * Clear the interrupt status register and re-enable the + * interrupt. + */ + BT_DBG("sdio_ireg = 0x%x", sdio_ireg); + + sdio_writeb(card->func, ~(sdio_ireg) & (DN_LD_HOST_INT_STATUS | + UP_LD_HOST_INT_STATUS), + HOST_INTSTATUS_REG, &ret); + if (ret) { + BT_ERR("sdio_writeb: clear int status register " + "failed"); + ret = -EIO; + goto done; + } + } + + if (sdio_ireg & DN_LD_HOST_INT_STATUS) { + if (priv->btmrvl_dev.tx_dnld_rdy) + BT_DBG("tx_done already received: " + " int_status=0x%x", sdio_ireg); + else + priv->btmrvl_dev.tx_dnld_rdy = true; + } + + if (sdio_ireg & UP_LD_HOST_INT_STATUS) + btmrvl_sdio_card_to_host(priv); + + *ireg = sdio_ireg; + + ret = 0; + +done: + return ret; +} + +static void btmrvl_sdio_interrupt(struct sdio_func *func) +{ + struct btmrvl_private *priv; + struct hci_dev *hcidev; + struct btmrvl_sdio_card *card; + u8 ireg = 0; + + card = sdio_get_drvdata(func); + if (card && card->priv) { + priv = card->priv; + hcidev = priv->btmrvl_dev.hcidev; + + if (btmrvl_sdio_get_int_status(priv, &ireg)) + BT_ERR("reading HOST_INT_STATUS_REG failed"); + else + BT_DBG("HOST_INT_STATUS_REG %#x", ireg); + + btmrvl_interrupt(priv); + } +} + +static int btmrvl_sdio_register_dev(struct btmrvl_sdio_card *card) +{ + struct sdio_func *func; + u8 reg; + int ret = 0; + + if (!card || !card->func) { + BT_ERR("Error: card or function is NULL!"); + ret = -EINVAL; + goto failed; + } + + func = card->func; + + sdio_claim_host(func); + + ret = sdio_enable_func(func); + if (ret) { + BT_ERR("sdio_enable_func() failed: ret=%d", ret); + ret = -EIO; + goto release_host; + } + + ret = sdio_claim_irq(func, btmrvl_sdio_interrupt); + if (ret) { + BT_ERR("sdio_claim_irq failed: ret=%d", ret); + ret = -EIO; + goto disable_func; + } + + ret = sdio_set_block_size(card->func, SDIO_BLOCK_SIZE); + if (ret) { + BT_ERR("cannot set SDIO block size"); + ret = -EIO; + goto release_irq; + } + + reg = sdio_readb(func, IO_PORT_0_REG, &ret); + if (ret < 0) { + ret = -EIO; + goto release_irq; + } + + card->ioport = reg; + + reg = sdio_readb(func, IO_PORT_1_REG, &ret); + if (ret < 0) { + ret = -EIO; + goto release_irq; + } + + card->ioport |= (reg << 8); + + reg = sdio_readb(func, IO_PORT_2_REG, &ret); + if (ret < 0) { + ret = -EIO; + goto release_irq; + } + + card->ioport |= (reg << 16); + + BT_DBG("SDIO FUNC%d IO port: 0x%x", func->num, card->ioport); + + sdio_set_drvdata(func, card); + + sdio_release_host(func); + + return 0; + +release_irq: + sdio_release_irq(func); + +disable_func: + sdio_disable_func(func); + +release_host: + sdio_release_host(func); + +failed: + return ret; +} + +static int btmrvl_sdio_unregister_dev(struct btmrvl_sdio_card *card) +{ + if (card && card->func) { + sdio_claim_host(card->func); + sdio_release_irq(card->func); + sdio_disable_func(card->func); + sdio_release_host(card->func); + sdio_set_drvdata(card->func, NULL); + } + + return 0; +} + +static int btmrvl_sdio_enable_host_int(struct btmrvl_sdio_card *card) +{ + int ret; + + if (!card || !card->func) + return -EINVAL; + + sdio_claim_host(card->func); + + ret = btmrvl_sdio_enable_host_int_mask(card, HIM_ENABLE); + + btmrvl_sdio_get_rx_unit(card); + + sdio_release_host(card->func); + + return ret; +} + +static int btmrvl_sdio_disable_host_int(struct btmrvl_sdio_card *card) +{ + int ret; + + if (!card || !card->func) + return -EINVAL; + + sdio_claim_host(card->func); + + ret = btmrvl_sdio_disable_host_int_mask(card, HIM_DISABLE); + + sdio_release_host(card->func); + + return ret; +} + +static int btmrvl_sdio_host_to_card(struct btmrvl_private *priv, + u8 *payload, u16 nb) +{ + struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; + int ret = 0; + int buf_block_len; + int blksz; + int i = 0; + u8 *buf = NULL; + void *tmpbuf = NULL; + int tmpbufsz; + + if (!card || !card->func) { + BT_ERR("card or function is NULL!"); + return -EINVAL; + } + + buf = payload; + if ((unsigned long) payload & (BTSDIO_DMA_ALIGN - 1)) { + tmpbufsz = ALIGN_SZ(nb, BTSDIO_DMA_ALIGN); + tmpbuf = kzalloc(tmpbufsz, GFP_KERNEL); + if (!tmpbuf) + return -ENOMEM; + buf = (u8 *) ALIGN_ADDR(tmpbuf, BTSDIO_DMA_ALIGN); + memcpy(buf, payload, nb); + } + + blksz = SDIO_BLOCK_SIZE; + buf_block_len = (nb + blksz - 1) / blksz; + + sdio_claim_host(card->func); + + do { + /* Transfer data to card */ + ret = sdio_writesb(card->func, card->ioport, buf, + buf_block_len * blksz); + if (ret < 0) { + i++; + BT_ERR("i=%d writesb failed: %d", i, ret); + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, + payload, nb); + ret = -EIO; + if (i > MAX_WRITE_IOMEM_RETRY) + goto exit; + } + } while (ret); + + priv->btmrvl_dev.tx_dnld_rdy = false; + +exit: + sdio_release_host(card->func); + kfree(tmpbuf); + + return ret; +} + +static int btmrvl_sdio_download_fw(struct btmrvl_sdio_card *card) +{ + int ret = 0; + + if (!card || !card->func) { + BT_ERR("card or function is NULL!"); + return -EINVAL; + } + sdio_claim_host(card->func); + + if (!btmrvl_sdio_verify_fw_download(card, 1)) { + BT_DBG("Firmware already downloaded!"); + goto done; + } + + ret = btmrvl_sdio_download_helper(card); + if (ret) { + BT_ERR("Failed to download helper!"); + ret = -EIO; + goto done; + } + + if (btmrvl_sdio_download_fw_w_helper(card)) { + BT_ERR("Failed to download firmware!"); + ret = -EIO; + goto done; + } + + if (btmrvl_sdio_verify_fw_download(card, MAX_POLL_TRIES)) { + BT_ERR("FW failed to be active in time!"); + ret = -ETIMEDOUT; + goto done; + } + +done: + sdio_release_host(card->func); + + return ret; +} + +static int btmrvl_sdio_wakeup_fw(struct btmrvl_private *priv) +{ + struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; + int ret = 0; + + if (!card || !card->func) { + BT_ERR("card or function is NULL!"); + return -EINVAL; + } + + sdio_claim_host(card->func); + + sdio_writeb(card->func, HOST_POWER_UP, CONFIG_REG, &ret); + + sdio_release_host(card->func); + + BT_DBG("wake up firmware"); + + return ret; +} + +static int btmrvl_sdio_probe(struct sdio_func *func, + const struct sdio_device_id *id) +{ + int ret = 0; + struct btmrvl_private *priv = NULL; + struct btmrvl_sdio_card *card = NULL; + + BT_INFO("vendor=0x%x, device=0x%x, class=%d, fn=%d", + id->vendor, id->device, id->class, func->num); + + card = kzalloc(sizeof(*card), GFP_KERNEL); + if (!card) { + ret = -ENOMEM; + goto done; + } + + card->func = func; + + if (id->driver_data) { + struct btmrvl_sdio_device *data = (void *) id->driver_data; + card->helper = data->helper; + card->firmware = data->firmware; + } + + if (btmrvl_sdio_register_dev(card) < 0) { + BT_ERR("Failed to register BT device!"); + ret = -ENODEV; + goto free_card; + } + + /* Disable the interrupts on the card */ + btmrvl_sdio_disable_host_int(card); + + if (btmrvl_sdio_download_fw(card)) { + BT_ERR("Downloading firmware failed!"); + ret = -ENODEV; + goto unreg_dev; + } + + msleep(100); + + btmrvl_sdio_enable_host_int(card); + + priv = btmrvl_add_card(card); + if (!priv) { + BT_ERR("Initializing card failed!"); + ret = -ENODEV; + goto disable_host_int; + } + + card->priv = priv; + + /* Initialize the interface specific function pointers */ + priv->hw_host_to_card = btmrvl_sdio_host_to_card; + priv->hw_wakeup_firmware = btmrvl_sdio_wakeup_fw; + + btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ); + priv->btmrvl_dev.psmode = 1; + btmrvl_enable_ps(priv); + + return 0; + +disable_host_int: + btmrvl_sdio_disable_host_int(card); +unreg_dev: + btmrvl_sdio_unregister_dev(card); +free_card: + kfree(card); +done: + return ret; +} + +static void btmrvl_sdio_remove(struct sdio_func *func) +{ + struct btmrvl_sdio_card *card; + + if (func) { + card = sdio_get_drvdata(func); + if (card) { + /* Send SHUTDOWN command & disable interrupt + * if user removes the module. + */ + if (user_rmmod) { + btmrvl_send_module_cfg_cmd(card->priv, + MODULE_SHUTDOWN_REQ); + btmrvl_sdio_disable_host_int(card); + } + BT_DBG("unregester dev"); + btmrvl_sdio_unregister_dev(card); + btmrvl_remove_card(card->priv); + kfree(card); + } + } +} + +static struct sdio_driver bt_mrvl_sdio = { + .name = "btmrvl_sdio", + .id_table = btmrvl_sdio_ids, + .probe = btmrvl_sdio_probe, + .remove = btmrvl_sdio_remove, +}; + +static int __init btmrvl_sdio_init_module(void) +{ + if (sdio_register_driver(&bt_mrvl_sdio) != 0) { + BT_ERR("SDIO Driver Registration Failed"); + return -ENODEV; + } + + /* Clear the flag in case user removes the card. */ + user_rmmod = 0; + + return 0; +} + +static void __exit btmrvl_sdio_exit_module(void) +{ + /* Set the flag as user is removing this module. */ + user_rmmod = 1; + + sdio_unregister_driver(&bt_mrvl_sdio); +} + +module_init(btmrvl_sdio_init_module); +module_exit(btmrvl_sdio_exit_module); + +MODULE_AUTHOR("Marvell International Ltd."); +MODULE_DESCRIPTION("Marvell BT-over-SDIO driver ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL v2"); +MODULE_FIRMWARE("sd8688_helper.bin"); +MODULE_FIRMWARE("sd8688.bin"); diff --git a/drivers/bluetooth/btmrvl_sdio.h b/drivers/bluetooth/btmrvl_sdio.h new file mode 100644 index 0000000..27329f1 --- /dev/null +++ b/drivers/bluetooth/btmrvl_sdio.h @@ -0,0 +1,108 @@ +/** + * Marvell BT-over-SDIO driver: SDIO interface related definitions + * + * Copyright (C) 2009, Marvell International Ltd. + * + * This software file (the "File") is distributed by Marvell International + * Ltd. under the terms of the GNU General Public License Version 2, June 1991 + * (the "License"). You may use, redistribute and/or modify this File in + * accordance with the terms and conditions of the License, a copy of which + * is available by writing to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the + * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. + * + * + * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE + * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE + * ARE EXPRESSLY DISCLAIMED. The License provides additional details about + * this warranty disclaimer. + * + **/ + +#define SDIO_HEADER_LEN 4 + +/* SD block size can not bigger than 64 due to buf size limit in firmware */ +/* define SD block size for data Tx/Rx */ +#define SDIO_BLOCK_SIZE 64 + +/* Number of blocks for firmware transfer */ +#define FIRMWARE_TRANSFER_NBLOCK 2 + +/* This is for firmware specific length */ +#define FW_EXTRA_LEN 36 + +#define MRVDRV_SIZE_OF_CMD_BUFFER (2 * 1024) + +#define MRVDRV_BT_RX_PACKET_BUFFER_SIZE \ + (HCI_MAX_FRAME_SIZE + FW_EXTRA_LEN) + +#define ALLOC_BUF_SIZE (((max_t (int, MRVDRV_BT_RX_PACKET_BUFFER_SIZE, \ + MRVDRV_SIZE_OF_CMD_BUFFER) + SDIO_HEADER_LEN \ + + SDIO_BLOCK_SIZE - 1) / SDIO_BLOCK_SIZE) \ + * SDIO_BLOCK_SIZE) + +/* The number of times to try when polling for status */ +#define MAX_POLL_TRIES 100 + +/* Max retry number of CMD53 write */ +#define MAX_WRITE_IOMEM_RETRY 2 + +/* Host Control Registers */ +#define IO_PORT_0_REG 0x00 +#define IO_PORT_1_REG 0x01 +#define IO_PORT_2_REG 0x02 + +#define CONFIG_REG 0x03 +#define HOST_POWER_UP BIT(1) +#define HOST_CMD53_FIN BIT(2) + +#define HOST_INT_MASK_REG 0x04 +#define HIM_DISABLE 0xff +#define HIM_ENABLE (BIT(0) | BIT(1)) + +#define HOST_INTSTATUS_REG 0x05 +#define UP_LD_HOST_INT_STATUS BIT(0) +#define DN_LD_HOST_INT_STATUS BIT(1) + +/* Card Control Registers */ +#define SQ_READ_BASE_ADDRESS_A0_REG 0x10 +#define SQ_READ_BASE_ADDRESS_A1_REG 0x11 + +#define CARD_STATUS_REG 0x20 +#define DN_LD_CARD_RDY BIT(0) +#define CARD_IO_READY BIT(3) + +#define CARD_FW_STATUS0_REG 0x40 +#define CARD_FW_STATUS1_REG 0x41 +#define FIRMWARE_READY 0xfedc + +#define CARD_RX_LEN_REG 0x42 +#define CARD_RX_UNIT_REG 0x43 + + +struct btmrvl_sdio_card { + struct sdio_func *func; + u32 ioport; + const char *helper; + const char *firmware; + u8 rx_unit; + struct btmrvl_private *priv; +}; + +struct btmrvl_sdio_device { + const char *helper; + const char *firmware; +}; + + +/* Platform specific DMA alignment */ +#define BTSDIO_DMA_ALIGN 8 + +/* Macros for Data Alignment : size */ +#define ALIGN_SZ(p, a) \ + (((p) + ((a) - 1)) & ~((a) - 1)) + +/* Macros for Data Alignment : address */ +#define ALIGN_ADDR(p, a) \ + ((((unsigned long)(p)) + (((unsigned long)(a)) - 1)) & \ + ~(((unsigned long)(a)) - 1)) diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c new file mode 100644 index 0000000..76e5127 --- /dev/null +++ b/drivers/bluetooth/btsdio.c @@ -0,0 +1,400 @@ +/* + * + * Generic Bluetooth SDIO driver + * + * Copyright (C) 2007 Cambridge Silicon Radio Ltd. + * Copyright (C) 2007 Marcel Holtmann + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#define VERSION "0.1" + +static const struct sdio_device_id btsdio_table[] = { + /* Generic Bluetooth Type-A SDIO device */ + { SDIO_DEVICE_CLASS(SDIO_CLASS_BT_A) }, + + /* Generic Bluetooth Type-B SDIO device */ + { SDIO_DEVICE_CLASS(SDIO_CLASS_BT_B) }, + + { } /* Terminating entry */ +}; + +MODULE_DEVICE_TABLE(sdio, btsdio_table); + +struct btsdio_data { + struct hci_dev *hdev; + struct sdio_func *func; + + struct work_struct work; + + struct sk_buff_head txq; +}; + +#define REG_RDAT 0x00 /* Receiver Data */ +#define REG_TDAT 0x00 /* Transmitter Data */ +#define REG_PC_RRT 0x10 /* Read Packet Control */ +#define REG_PC_WRT 0x11 /* Write Packet Control */ +#define REG_RTC_STAT 0x12 /* Retry Control Status */ +#define REG_RTC_SET 0x12 /* Retry Control Set */ +#define REG_INTRD 0x13 /* Interrupt Indication */ +#define REG_CL_INTRD 0x13 /* Interrupt Clear */ +#define REG_EN_INTRD 0x14 /* Interrupt Enable */ +#define REG_MD_STAT 0x20 /* Bluetooth Mode Status */ + +static int btsdio_tx_packet(struct btsdio_data *data, struct sk_buff *skb) +{ + int err; + + BT_DBG("%s", data->hdev->name); + + /* Prepend Type-A header */ + skb_push(skb, 4); + skb->data[0] = (skb->len & 0x0000ff); + skb->data[1] = (skb->len & 0x00ff00) >> 8; + skb->data[2] = (skb->len & 0xff0000) >> 16; + skb->data[3] = bt_cb(skb)->pkt_type; + + err = sdio_writesb(data->func, REG_TDAT, skb->data, skb->len); + if (err < 0) { + skb_pull(skb, 4); + sdio_writeb(data->func, 0x01, REG_PC_WRT, NULL); + return err; + } + + data->hdev->stat.byte_tx += skb->len; + + kfree_skb(skb); + + return 0; +} + +static void btsdio_work(struct work_struct *work) +{ + struct btsdio_data *data = container_of(work, struct btsdio_data, work); + struct sk_buff *skb; + int err; + + BT_DBG("%s", data->hdev->name); + + sdio_claim_host(data->func); + + while ((skb = skb_dequeue(&data->txq))) { + err = btsdio_tx_packet(data, skb); + if (err < 0) { + data->hdev->stat.err_tx++; + skb_queue_head(&data->txq, skb); + break; + } + } + + sdio_release_host(data->func); +} + +static int btsdio_rx_packet(struct btsdio_data *data) +{ + u8 hdr[4] __attribute__ ((aligned(4))); + struct sk_buff *skb; + int err, len; + + BT_DBG("%s", data->hdev->name); + + err = sdio_readsb(data->func, hdr, REG_RDAT, 4); + if (err < 0) + return err; + + len = hdr[0] | (hdr[1] << 8) | (hdr[2] << 16); + if (len < 4 || len > 65543) + return -EILSEQ; + + skb = bt_skb_alloc(len - 4, GFP_KERNEL); + if (!skb) { + /* Out of memory. Prepare a read retry and just + * return with the expectation that the next time + * we're called we'll have more memory. */ + return -ENOMEM; + } + + skb_put(skb, len - 4); + + err = sdio_readsb(data->func, skb->data, REG_RDAT, len - 4); + if (err < 0) { + kfree_skb(skb); + return err; + } + + data->hdev->stat.byte_rx += len; + + skb->dev = (void *) data->hdev; + bt_cb(skb)->pkt_type = hdr[3]; + + err = hci_recv_frame(skb); + if (err < 0) + return err; + + sdio_writeb(data->func, 0x00, REG_PC_RRT, NULL); + + return 0; +} + +static void btsdio_interrupt(struct sdio_func *func) +{ + struct btsdio_data *data = sdio_get_drvdata(func); + int intrd; + + BT_DBG("%s", data->hdev->name); + + intrd = sdio_readb(func, REG_INTRD, NULL); + if (intrd & 0x01) { + sdio_writeb(func, 0x01, REG_CL_INTRD, NULL); + + if (btsdio_rx_packet(data) < 0) { + data->hdev->stat.err_rx++; + sdio_writeb(data->func, 0x01, REG_PC_RRT, NULL); + } + } +} + +static int btsdio_open(struct hci_dev *hdev) +{ + struct btsdio_data *data = hdev->driver_data; + int err; + + BT_DBG("%s", hdev->name); + + if (test_and_set_bit(HCI_RUNNING, &hdev->flags)) + return 0; + + sdio_claim_host(data->func); + + err = sdio_enable_func(data->func); + if (err < 0) { + clear_bit(HCI_RUNNING, &hdev->flags); + goto release; + } + + err = sdio_claim_irq(data->func, btsdio_interrupt); + if (err < 0) { + sdio_disable_func(data->func); + clear_bit(HCI_RUNNING, &hdev->flags); + goto release; + } + + if (data->func->class == SDIO_CLASS_BT_B) + sdio_writeb(data->func, 0x00, REG_MD_STAT, NULL); + + sdio_writeb(data->func, 0x01, REG_EN_INTRD, NULL); + +release: + sdio_release_host(data->func); + + return err; +} + +static int btsdio_close(struct hci_dev *hdev) +{ + struct btsdio_data *data = hdev->driver_data; + + BT_DBG("%s", hdev->name); + + if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) + return 0; + + sdio_claim_host(data->func); + + sdio_writeb(data->func, 0x00, REG_EN_INTRD, NULL); + + sdio_release_irq(data->func); + sdio_disable_func(data->func); + + sdio_release_host(data->func); + + return 0; +} + +static int btsdio_flush(struct hci_dev *hdev) +{ + struct btsdio_data *data = hdev->driver_data; + + BT_DBG("%s", hdev->name); + + skb_queue_purge(&data->txq); + + return 0; +} + +static int btsdio_send_frame(struct sk_buff *skb) +{ + struct hci_dev *hdev = (struct hci_dev *) skb->dev; + struct btsdio_data *data = hdev->driver_data; + + BT_DBG("%s", hdev->name); + + if (!test_bit(HCI_RUNNING, &hdev->flags)) + return -EBUSY; + + switch (bt_cb(skb)->pkt_type) { + case HCI_COMMAND_PKT: + hdev->stat.cmd_tx++; + break; + + case HCI_ACLDATA_PKT: + hdev->stat.acl_tx++; + break; + + case HCI_SCODATA_PKT: + hdev->stat.sco_tx++; + break; + + default: + return -EILSEQ; + } + + skb_queue_tail(&data->txq, skb); + + schedule_work(&data->work); + + return 0; +} + +static void btsdio_destruct(struct hci_dev *hdev) +{ + struct btsdio_data *data = hdev->driver_data; + + BT_DBG("%s", hdev->name); + + kfree(data); +} + +static int btsdio_probe(struct sdio_func *func, + const struct sdio_device_id *id) +{ + struct btsdio_data *data; + struct hci_dev *hdev; + struct sdio_func_tuple *tuple = func->tuples; + int err; + + BT_DBG("func %p id %p class 0x%04x", func, id, func->class); + + while (tuple) { + BT_DBG("code 0x%x size %d", tuple->code, tuple->size); + tuple = tuple->next; + } + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->func = func; + + INIT_WORK(&data->work, btsdio_work); + + skb_queue_head_init(&data->txq); + + hdev = hci_alloc_dev(); + if (!hdev) { + kfree(data); + return -ENOMEM; + } + + hdev->bus = HCI_SDIO; + hdev->driver_data = data; + + data->hdev = hdev; + + SET_HCIDEV_DEV(hdev, &func->dev); + + hdev->open = btsdio_open; + hdev->close = btsdio_close; + hdev->flush = btsdio_flush; + hdev->send = btsdio_send_frame; + hdev->destruct = btsdio_destruct; + + hdev->owner = THIS_MODULE; + + err = hci_register_dev(hdev); + if (err < 0) { + hci_free_dev(hdev); + kfree(data); + return err; + } + + sdio_set_drvdata(func, data); + + return 0; +} + +static void btsdio_remove(struct sdio_func *func) +{ + struct btsdio_data *data = sdio_get_drvdata(func); + struct hci_dev *hdev; + + BT_DBG("func %p", func); + + if (!data) + return; + + hdev = data->hdev; + + sdio_set_drvdata(func, NULL); + + hci_unregister_dev(hdev); + + hci_free_dev(hdev); +} + +static struct sdio_driver btsdio_driver = { + .name = "btsdio", + .probe = btsdio_probe, + .remove = btsdio_remove, + .id_table = btsdio_table, +}; + +static int __init btsdio_init(void) +{ + BT_INFO("Generic Bluetooth SDIO driver ver %s", VERSION); + + return sdio_register_driver(&btsdio_driver); +} + +static void __exit btsdio_exit(void) +{ + sdio_unregister_driver(&btsdio_driver); +} + +module_init(btsdio_init); +module_exit(btsdio_exit); + +MODULE_AUTHOR("Marcel Holtmann "); +MODULE_DESCRIPTION("Generic Bluetooth SDIO driver ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c new file mode 100644 index 0000000..60c0953 --- /dev/null +++ b/drivers/bluetooth/btuart_cs.c @@ -0,0 +1,734 @@ +/* + * + * Driver for Bluetooth PCMCIA cards with HCI UART interface + * + * Copyright (C) 2001-2002 Marcel Holtmann + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation; + * + * Software distributed under the License is distributed on an "AS + * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or + * implied. See the License for the specific language governing + * rights and limitations under the License. + * + * The initial developer of the original code is David A. Hinds + * . Portions created by David A. Hinds + * are Copyright (C) 1999 David A. Hinds. All Rights Reserved. + * + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + + + +/* ======================== Module parameters ======================== */ + + +MODULE_AUTHOR("Marcel Holtmann "); +MODULE_DESCRIPTION("Bluetooth driver for Bluetooth PCMCIA cards with HCI UART interface"); +MODULE_LICENSE("GPL"); + + + +/* ======================== Local structures ======================== */ + + +typedef struct btuart_info_t { + struct pcmcia_device *p_dev; + dev_node_t node; + + struct hci_dev *hdev; + + spinlock_t lock; /* For serializing operations */ + + struct sk_buff_head txq; + unsigned long tx_state; + + unsigned long rx_state; + unsigned long rx_count; + struct sk_buff *rx_skb; +} btuart_info_t; + + +static int btuart_config(struct pcmcia_device *link); +static void btuart_release(struct pcmcia_device *link); + +static void btuart_detach(struct pcmcia_device *p_dev); + + +/* Maximum baud rate */ +#define SPEED_MAX 115200 + +/* Default baud rate: 57600, 115200, 230400 or 460800 */ +#define DEFAULT_BAUD_RATE 115200 + + +/* Transmit states */ +#define XMIT_SENDING 1 +#define XMIT_WAKEUP 2 +#define XMIT_WAITING 8 + +/* Receiver states */ +#define RECV_WAIT_PACKET_TYPE 0 +#define RECV_WAIT_EVENT_HEADER 1 +#define RECV_WAIT_ACL_HEADER 2 +#define RECV_WAIT_SCO_HEADER 3 +#define RECV_WAIT_DATA 4 + + + +/* ======================== Interrupt handling ======================== */ + + +static int btuart_write(unsigned int iobase, int fifo_size, __u8 *buf, int len) +{ + int actual = 0; + + /* Tx FIFO should be empty */ + if (!(inb(iobase + UART_LSR) & UART_LSR_THRE)) + return 0; + + /* Fill FIFO with current frame */ + while ((fifo_size-- > 0) && (actual < len)) { + /* Transmit next byte */ + outb(buf[actual], iobase + UART_TX); + actual++; + } + + return actual; +} + + +static void btuart_write_wakeup(btuart_info_t *info) +{ + if (!info) { + BT_ERR("Unknown device"); + return; + } + + if (test_and_set_bit(XMIT_SENDING, &(info->tx_state))) { + set_bit(XMIT_WAKEUP, &(info->tx_state)); + return; + } + + do { + register unsigned int iobase = info->p_dev->io.BasePort1; + register struct sk_buff *skb; + register int len; + + clear_bit(XMIT_WAKEUP, &(info->tx_state)); + + if (!pcmcia_dev_present(info->p_dev)) + return; + + if (!(skb = skb_dequeue(&(info->txq)))) + break; + + /* Send frame */ + len = btuart_write(iobase, 16, skb->data, skb->len); + set_bit(XMIT_WAKEUP, &(info->tx_state)); + + if (len == skb->len) { + kfree_skb(skb); + } else { + skb_pull(skb, len); + skb_queue_head(&(info->txq), skb); + } + + info->hdev->stat.byte_tx += len; + + } while (test_bit(XMIT_WAKEUP, &(info->tx_state))); + + clear_bit(XMIT_SENDING, &(info->tx_state)); +} + + +static void btuart_receive(btuart_info_t *info) +{ + unsigned int iobase; + int boguscount = 0; + + if (!info) { + BT_ERR("Unknown device"); + return; + } + + iobase = info->p_dev->io.BasePort1; + + do { + info->hdev->stat.byte_rx++; + + /* Allocate packet */ + if (info->rx_skb == NULL) { + info->rx_state = RECV_WAIT_PACKET_TYPE; + info->rx_count = 0; + if (!(info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) { + BT_ERR("Can't allocate mem for new packet"); + return; + } + } + + if (info->rx_state == RECV_WAIT_PACKET_TYPE) { + + info->rx_skb->dev = (void *) info->hdev; + bt_cb(info->rx_skb)->pkt_type = inb(iobase + UART_RX); + + switch (bt_cb(info->rx_skb)->pkt_type) { + + case HCI_EVENT_PKT: + info->rx_state = RECV_WAIT_EVENT_HEADER; + info->rx_count = HCI_EVENT_HDR_SIZE; + break; + + case HCI_ACLDATA_PKT: + info->rx_state = RECV_WAIT_ACL_HEADER; + info->rx_count = HCI_ACL_HDR_SIZE; + break; + + case HCI_SCODATA_PKT: + info->rx_state = RECV_WAIT_SCO_HEADER; + info->rx_count = HCI_SCO_HDR_SIZE; + break; + + default: + /* Unknown packet */ + BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type); + info->hdev->stat.err_rx++; + clear_bit(HCI_RUNNING, &(info->hdev->flags)); + + kfree_skb(info->rx_skb); + info->rx_skb = NULL; + break; + + } + + } else { + + *skb_put(info->rx_skb, 1) = inb(iobase + UART_RX); + info->rx_count--; + + if (info->rx_count == 0) { + + int dlen; + struct hci_event_hdr *eh; + struct hci_acl_hdr *ah; + struct hci_sco_hdr *sh; + + + switch (info->rx_state) { + + case RECV_WAIT_EVENT_HEADER: + eh = hci_event_hdr(info->rx_skb); + info->rx_state = RECV_WAIT_DATA; + info->rx_count = eh->plen; + break; + + case RECV_WAIT_ACL_HEADER: + ah = hci_acl_hdr(info->rx_skb); + dlen = __le16_to_cpu(ah->dlen); + info->rx_state = RECV_WAIT_DATA; + info->rx_count = dlen; + break; + + case RECV_WAIT_SCO_HEADER: + sh = hci_sco_hdr(info->rx_skb); + info->rx_state = RECV_WAIT_DATA; + info->rx_count = sh->dlen; + break; + + case RECV_WAIT_DATA: + hci_recv_frame(info->rx_skb); + info->rx_skb = NULL; + break; + + } + + } + + } + + /* Make sure we don't stay here too long */ + if (boguscount++ > 16) + break; + + } while (inb(iobase + UART_LSR) & UART_LSR_DR); +} + + +static irqreturn_t btuart_interrupt(int irq, void *dev_inst) +{ + btuart_info_t *info = dev_inst; + unsigned int iobase; + int boguscount = 0; + int iir, lsr; + irqreturn_t r = IRQ_NONE; + + if (!info || !info->hdev) + /* our irq handler is shared */ + return IRQ_NONE; + + iobase = info->p_dev->io.BasePort1; + + spin_lock(&(info->lock)); + + iir = inb(iobase + UART_IIR) & UART_IIR_ID; + while (iir) { + r = IRQ_HANDLED; + + /* Clear interrupt */ + lsr = inb(iobase + UART_LSR); + + switch (iir) { + case UART_IIR_RLSI: + BT_ERR("RLSI"); + break; + case UART_IIR_RDI: + /* Receive interrupt */ + btuart_receive(info); + break; + case UART_IIR_THRI: + if (lsr & UART_LSR_THRE) { + /* Transmitter ready for data */ + btuart_write_wakeup(info); + } + break; + default: + BT_ERR("Unhandled IIR=%#x", iir); + break; + } + + /* Make sure we don't stay here too long */ + if (boguscount++ > 100) + break; + + iir = inb(iobase + UART_IIR) & UART_IIR_ID; + + } + + spin_unlock(&(info->lock)); + + return r; +} + + +static void btuart_change_speed(btuart_info_t *info, unsigned int speed) +{ + unsigned long flags; + unsigned int iobase; + int fcr; /* FIFO control reg */ + int lcr; /* Line control reg */ + int divisor; + + if (!info) { + BT_ERR("Unknown device"); + return; + } + + iobase = info->p_dev->io.BasePort1; + + spin_lock_irqsave(&(info->lock), flags); + + /* Turn off interrupts */ + outb(0, iobase + UART_IER); + + divisor = SPEED_MAX / speed; + + fcr = UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT; + + /* + * Use trigger level 1 to avoid 3 ms. timeout delay at 9600 bps, and + * almost 1,7 ms at 19200 bps. At speeds above that we can just forget + * about this timeout since it will always be fast enough. + */ + + if (speed < 38400) + fcr |= UART_FCR_TRIGGER_1; + else + fcr |= UART_FCR_TRIGGER_14; + + /* Bluetooth cards use 8N1 */ + lcr = UART_LCR_WLEN8; + + outb(UART_LCR_DLAB | lcr, iobase + UART_LCR); /* Set DLAB */ + outb(divisor & 0xff, iobase + UART_DLL); /* Set speed */ + outb(divisor >> 8, iobase + UART_DLM); + outb(lcr, iobase + UART_LCR); /* Set 8N1 */ + outb(fcr, iobase + UART_FCR); /* Enable FIFO's */ + + /* Turn on interrupts */ + outb(UART_IER_RLSI | UART_IER_RDI | UART_IER_THRI, iobase + UART_IER); + + spin_unlock_irqrestore(&(info->lock), flags); +} + + + +/* ======================== HCI interface ======================== */ + + +static int btuart_hci_flush(struct hci_dev *hdev) +{ + btuart_info_t *info = (btuart_info_t *)(hdev->driver_data); + + /* Drop TX queue */ + skb_queue_purge(&(info->txq)); + + return 0; +} + + +static int btuart_hci_open(struct hci_dev *hdev) +{ + set_bit(HCI_RUNNING, &(hdev->flags)); + + return 0; +} + + +static int btuart_hci_close(struct hci_dev *hdev) +{ + if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags))) + return 0; + + btuart_hci_flush(hdev); + + return 0; +} + + +static int btuart_hci_send_frame(struct sk_buff *skb) +{ + btuart_info_t *info; + struct hci_dev *hdev = (struct hci_dev *)(skb->dev); + + if (!hdev) { + BT_ERR("Frame for unknown HCI device (hdev=NULL)"); + return -ENODEV; + } + + info = (btuart_info_t *)(hdev->driver_data); + + switch (bt_cb(skb)->pkt_type) { + case HCI_COMMAND_PKT: + hdev->stat.cmd_tx++; + break; + case HCI_ACLDATA_PKT: + hdev->stat.acl_tx++; + break; + case HCI_SCODATA_PKT: + hdev->stat.sco_tx++; + break; + }; + + /* Prepend skb with frame type */ + memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); + skb_queue_tail(&(info->txq), skb); + + btuart_write_wakeup(info); + + return 0; +} + + +static void btuart_hci_destruct(struct hci_dev *hdev) +{ +} + + +static int btuart_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg) +{ + return -ENOIOCTLCMD; +} + + + +/* ======================== Card services HCI interaction ======================== */ + + +static int btuart_open(btuart_info_t *info) +{ + unsigned long flags; + unsigned int iobase = info->p_dev->io.BasePort1; + struct hci_dev *hdev; + + spin_lock_init(&(info->lock)); + + skb_queue_head_init(&(info->txq)); + + info->rx_state = RECV_WAIT_PACKET_TYPE; + info->rx_count = 0; + info->rx_skb = NULL; + + /* Initialize HCI device */ + hdev = hci_alloc_dev(); + if (!hdev) { + BT_ERR("Can't allocate HCI device"); + return -ENOMEM; + } + + info->hdev = hdev; + + hdev->bus = HCI_PCCARD; + hdev->driver_data = info; + SET_HCIDEV_DEV(hdev, &info->p_dev->dev); + + hdev->open = btuart_hci_open; + hdev->close = btuart_hci_close; + hdev->flush = btuart_hci_flush; + hdev->send = btuart_hci_send_frame; + hdev->destruct = btuart_hci_destruct; + hdev->ioctl = btuart_hci_ioctl; + + hdev->owner = THIS_MODULE; + + spin_lock_irqsave(&(info->lock), flags); + + /* Reset UART */ + outb(0, iobase + UART_MCR); + + /* Turn off interrupts */ + outb(0, iobase + UART_IER); + + /* Initialize UART */ + outb(UART_LCR_WLEN8, iobase + UART_LCR); /* Reset DLAB */ + outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase + UART_MCR); + + /* Turn on interrupts */ + // outb(UART_IER_RLSI | UART_IER_RDI | UART_IER_THRI, iobase + UART_IER); + + spin_unlock_irqrestore(&(info->lock), flags); + + btuart_change_speed(info, DEFAULT_BAUD_RATE); + + /* Timeout before it is safe to send the first HCI packet */ + msleep(1000); + + /* Register HCI device */ + if (hci_register_dev(hdev) < 0) { + BT_ERR("Can't register HCI device"); + info->hdev = NULL; + hci_free_dev(hdev); + return -ENODEV; + } + + return 0; +} + + +static int btuart_close(btuart_info_t *info) +{ + unsigned long flags; + unsigned int iobase = info->p_dev->io.BasePort1; + struct hci_dev *hdev = info->hdev; + + if (!hdev) + return -ENODEV; + + btuart_hci_close(hdev); + + spin_lock_irqsave(&(info->lock), flags); + + /* Reset UART */ + outb(0, iobase + UART_MCR); + + /* Turn off interrupts */ + outb(0, iobase + UART_IER); + + spin_unlock_irqrestore(&(info->lock), flags); + + if (hci_unregister_dev(hdev) < 0) + BT_ERR("Can't unregister HCI device %s", hdev->name); + + hci_free_dev(hdev); + + return 0; +} + +static int btuart_probe(struct pcmcia_device *link) +{ + btuart_info_t *info; + + /* Create new info device */ + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->p_dev = link; + link->priv = info; + + link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; + link->io.NumPorts1 = 8; + link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; + + link->irq.Handler = btuart_interrupt; + + link->conf.Attributes = CONF_ENABLE_IRQ; + link->conf.IntType = INT_MEMORY_AND_IO; + + return btuart_config(link); +} + + +static void btuart_detach(struct pcmcia_device *link) +{ + btuart_info_t *info = link->priv; + + btuart_release(link); + kfree(info); +} + +static int btuart_check_config(struct pcmcia_device *p_dev, + cistpl_cftable_entry_t *cf, + cistpl_cftable_entry_t *dflt, + unsigned int vcc, + void *priv_data) +{ + int *try = priv_data; + + if (cf->vpp1.present & (1 << CISTPL_POWER_VNOM)) + p_dev->conf.Vpp = cf->vpp1.param[CISTPL_POWER_VNOM] / 10000; + if ((cf->io.nwin > 0) && (cf->io.win[0].len == 8) && + (cf->io.win[0].base != 0)) { + p_dev->io.BasePort1 = cf->io.win[0].base; + p_dev->io.IOAddrLines = (*try == 0) ? 16 : + cf->io.flags & CISTPL_IO_LINES_MASK; + if (!pcmcia_request_io(p_dev, &p_dev->io)) + return 0; + } + return -ENODEV; +} + +static int btuart_check_config_notpicky(struct pcmcia_device *p_dev, + cistpl_cftable_entry_t *cf, + cistpl_cftable_entry_t *dflt, + unsigned int vcc, + void *priv_data) +{ + static unsigned int base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 }; + int j; + + if ((cf->io.nwin > 0) && ((cf->io.flags & CISTPL_IO_LINES_MASK) <= 3)) { + for (j = 0; j < 5; j++) { + p_dev->io.BasePort1 = base[j]; + p_dev->io.IOAddrLines = base[j] ? 16 : 3; + if (!pcmcia_request_io(p_dev, &p_dev->io)) + return 0; + } + } + return -ENODEV; +} + +static int btuart_config(struct pcmcia_device *link) +{ + btuart_info_t *info = link->priv; + int i; + int try; + + /* First pass: look for a config entry that looks normal. + Two tries: without IO aliases, then with aliases */ + for (try = 0; try < 2; try++) + if (!pcmcia_loop_config(link, btuart_check_config, &try)) + goto found_port; + + /* Second pass: try to find an entry that isn't picky about + its base address, then try to grab any standard serial port + address, and finally try to get any free port. */ + if (!pcmcia_loop_config(link, btuart_check_config_notpicky, NULL)) + goto found_port; + + BT_ERR("No usable port range found"); + goto failed; + +found_port: + i = pcmcia_request_irq(link, &link->irq); + if (i != 0) + link->irq.AssignedIRQ = 0; + + i = pcmcia_request_configuration(link, &link->conf); + if (i != 0) + goto failed; + + if (btuart_open(info) != 0) + goto failed; + + strcpy(info->node.dev_name, info->hdev->name); + link->dev_node = &info->node; + + return 0; + +failed: + btuart_release(link); + return -ENODEV; +} + + +static void btuart_release(struct pcmcia_device *link) +{ + btuart_info_t *info = link->priv; + + btuart_close(info); + + pcmcia_disable_device(link); +} + +static struct pcmcia_device_id btuart_ids[] = { + /* don't use this driver. Use serial_cs + hci_uart instead */ + PCMCIA_DEVICE_NULL +}; +MODULE_DEVICE_TABLE(pcmcia, btuart_ids); + +static struct pcmcia_driver btuart_driver = { + .owner = THIS_MODULE, + .drv = { + .name = "btuart_cs", + }, + .probe = btuart_probe, + .remove = btuart_detach, + .id_table = btuart_ids, +}; + +static int __init init_btuart_cs(void) +{ + return pcmcia_register_driver(&btuart_driver); +} + + +static void __exit exit_btuart_cs(void) +{ + pcmcia_unregister_driver(&btuart_driver); +} + +module_init(init_btuart_cs); +module_exit(exit_btuart_cs); diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c new file mode 100644 index 0000000..5d9cc53 --- /dev/null +++ b/drivers/bluetooth/btusb.c @@ -0,0 +1,1208 @@ +/* + * + * Generic Bluetooth USB driver + * + * Copyright (C) 2005-2008 Marcel Holtmann + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#define VERSION "0.6" + +static int ignore_dga; +static int ignore_csr; +static int ignore_sniffer; +static int disable_scofix; +static int force_scofix; + +static int reset = 1; + +static struct usb_driver btusb_driver; + +#define BTUSB_IGNORE 0x01 +#define BTUSB_DIGIANSWER 0x02 +#define BTUSB_CSR 0x04 +#define BTUSB_SNIFFER 0x08 +#define BTUSB_BCM92035 0x10 +#define BTUSB_BROKEN_ISOC 0x20 +#define BTUSB_WRONG_SCO_MTU 0x40 + +static struct usb_device_id btusb_table[] = { + /* Generic Bluetooth USB device */ + { USB_DEVICE_INFO(0xe0, 0x01, 0x01) }, + + /* AVM BlueFRITZ! USB v2.0 */ + { USB_DEVICE(0x057c, 0x3800) }, + + /* Bluetooth Ultraport Module from IBM */ + { USB_DEVICE(0x04bf, 0x030a) }, + + /* ALPS Modules with non-standard id */ + { USB_DEVICE(0x044e, 0x3001) }, + { USB_DEVICE(0x044e, 0x3002) }, + + /* Ericsson with non-standard id */ + { USB_DEVICE(0x0bdb, 0x1002) }, + + /* Canyon CN-BTU1 with HID interfaces */ + { USB_DEVICE(0x0c10, 0x0000) }, + + { } /* Terminating entry */ +}; + +MODULE_DEVICE_TABLE(usb, btusb_table); + +static struct usb_device_id blacklist_table[] = { + /* CSR BlueCore devices */ + { USB_DEVICE(0x0a12, 0x0001), .driver_info = BTUSB_CSR }, + + /* Broadcom BCM2033 without firmware */ + { USB_DEVICE(0x0a5c, 0x2033), .driver_info = BTUSB_IGNORE }, + + /* Broadcom BCM2035 */ + { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU }, + { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU }, + { USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 }, + + /* Broadcom BCM2045 */ + { USB_DEVICE(0x0a5c, 0x2039), .driver_info = BTUSB_WRONG_SCO_MTU }, + { USB_DEVICE(0x0a5c, 0x2101), .driver_info = BTUSB_WRONG_SCO_MTU }, + + /* IBM/Lenovo ThinkPad with Broadcom chip */ + { USB_DEVICE(0x0a5c, 0x201e), .driver_info = BTUSB_WRONG_SCO_MTU }, + { USB_DEVICE(0x0a5c, 0x2110), .driver_info = BTUSB_WRONG_SCO_MTU }, + + /* HP laptop with Broadcom chip */ + { USB_DEVICE(0x03f0, 0x171d), .driver_info = BTUSB_WRONG_SCO_MTU }, + + /* Dell laptop with Broadcom chip */ + { USB_DEVICE(0x413c, 0x8126), .driver_info = BTUSB_WRONG_SCO_MTU }, + + /* Dell Wireless 370 and 410 devices */ + { USB_DEVICE(0x413c, 0x8152), .driver_info = BTUSB_WRONG_SCO_MTU }, + { USB_DEVICE(0x413c, 0x8156), .driver_info = BTUSB_WRONG_SCO_MTU }, + + /* Belkin F8T012 and F8T013 devices */ + { USB_DEVICE(0x050d, 0x0012), .driver_info = BTUSB_WRONG_SCO_MTU }, + { USB_DEVICE(0x050d, 0x0013), .driver_info = BTUSB_WRONG_SCO_MTU }, + + /* Asus WL-BTD202 device */ + { USB_DEVICE(0x0b05, 0x1715), .driver_info = BTUSB_WRONG_SCO_MTU }, + + /* Kensington Bluetooth USB adapter */ + { USB_DEVICE(0x047d, 0x105e), .driver_info = BTUSB_WRONG_SCO_MTU }, + + /* RTX Telecom based adapters with buggy SCO support */ + { USB_DEVICE(0x0400, 0x0807), .driver_info = BTUSB_BROKEN_ISOC }, + { USB_DEVICE(0x0400, 0x080a), .driver_info = BTUSB_BROKEN_ISOC }, + + /* CONWISE Technology based adapters with buggy SCO support */ + { USB_DEVICE(0x0e5e, 0x6622), .driver_info = BTUSB_BROKEN_ISOC }, + + /* Digianswer devices */ + { USB_DEVICE(0x08fd, 0x0001), .driver_info = BTUSB_DIGIANSWER }, + { USB_DEVICE(0x08fd, 0x0002), .driver_info = BTUSB_IGNORE }, + + /* CSR BlueCore Bluetooth Sniffer */ + { USB_DEVICE(0x0a12, 0x0002), .driver_info = BTUSB_SNIFFER }, + + /* Frontline ComProbe Bluetooth Sniffer */ + { USB_DEVICE(0x16d3, 0x0002), .driver_info = BTUSB_SNIFFER }, + + { } /* Terminating entry */ +}; + +#define BTUSB_MAX_ISOC_FRAMES 10 + +#define BTUSB_INTR_RUNNING 0 +#define BTUSB_BULK_RUNNING 1 +#define BTUSB_ISOC_RUNNING 2 +#define BTUSB_SUSPENDING 3 + +struct btusb_data { + struct hci_dev *hdev; + struct usb_device *udev; + struct usb_interface *intf; + struct usb_interface *isoc; + + spinlock_t lock; + + unsigned long flags; + + struct work_struct work; + struct work_struct waker; + + struct usb_anchor tx_anchor; + struct usb_anchor intr_anchor; + struct usb_anchor bulk_anchor; + struct usb_anchor isoc_anchor; + struct usb_anchor deferred; + int tx_in_flight; + spinlock_t txlock; + + struct usb_endpoint_descriptor *intr_ep; + struct usb_endpoint_descriptor *bulk_tx_ep; + struct usb_endpoint_descriptor *bulk_rx_ep; + struct usb_endpoint_descriptor *isoc_tx_ep; + struct usb_endpoint_descriptor *isoc_rx_ep; + + __u8 cmdreq_type; + + unsigned int sco_num; + int isoc_altsetting; + int suspend_count; + int did_iso_resume:1; +}; + +static int inc_tx(struct btusb_data *data) +{ + unsigned long flags; + int rv; + + spin_lock_irqsave(&data->txlock, flags); + rv = test_bit(BTUSB_SUSPENDING, &data->flags); + if (!rv) + data->tx_in_flight++; + spin_unlock_irqrestore(&data->txlock, flags); + + return rv; +} + +static void btusb_intr_complete(struct urb *urb) +{ + struct hci_dev *hdev = urb->context; + struct btusb_data *data = hdev->driver_data; + int err; + + BT_DBG("%s urb %p status %d count %d", hdev->name, + urb, urb->status, urb->actual_length); + + if (!test_bit(HCI_RUNNING, &hdev->flags)) + return; + + if (urb->status == 0) { + hdev->stat.byte_rx += urb->actual_length; + + if (hci_recv_fragment(hdev, HCI_EVENT_PKT, + urb->transfer_buffer, + urb->actual_length) < 0) { + BT_ERR("%s corrupted event packet", hdev->name); + hdev->stat.err_rx++; + } + } + + if (!test_bit(BTUSB_INTR_RUNNING, &data->flags)) + return; + + usb_mark_last_busy(data->udev); + usb_anchor_urb(urb, &data->intr_anchor); + + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err < 0) { + BT_ERR("%s urb %p failed to resubmit (%d)", + hdev->name, urb, -err); + usb_unanchor_urb(urb); + } +} + +static int btusb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags) +{ + struct btusb_data *data = hdev->driver_data; + struct urb *urb; + unsigned char *buf; + unsigned int pipe; + int err, size; + + BT_DBG("%s", hdev->name); + + if (!data->intr_ep) + return -ENODEV; + + urb = usb_alloc_urb(0, mem_flags); + if (!urb) + return -ENOMEM; + + size = le16_to_cpu(data->intr_ep->wMaxPacketSize); + + buf = kmalloc(size, mem_flags); + if (!buf) { + usb_free_urb(urb); + return -ENOMEM; + } + + pipe = usb_rcvintpipe(data->udev, data->intr_ep->bEndpointAddress); + + usb_fill_int_urb(urb, data->udev, pipe, buf, size, + btusb_intr_complete, hdev, + data->intr_ep->bInterval); + + urb->transfer_flags |= URB_FREE_BUFFER; + + usb_anchor_urb(urb, &data->intr_anchor); + + err = usb_submit_urb(urb, mem_flags); + if (err < 0) { + BT_ERR("%s urb %p submission failed (%d)", + hdev->name, urb, -err); + usb_unanchor_urb(urb); + } + + usb_free_urb(urb); + + return err; +} + +static void btusb_bulk_complete(struct urb *urb) +{ + struct hci_dev *hdev = urb->context; + struct btusb_data *data = hdev->driver_data; + int err; + + BT_DBG("%s urb %p status %d count %d", hdev->name, + urb, urb->status, urb->actual_length); + + if (!test_bit(HCI_RUNNING, &hdev->flags)) + return; + + if (urb->status == 0) { + hdev->stat.byte_rx += urb->actual_length; + + if (hci_recv_fragment(hdev, HCI_ACLDATA_PKT, + urb->transfer_buffer, + urb->actual_length) < 0) { + BT_ERR("%s corrupted ACL packet", hdev->name); + hdev->stat.err_rx++; + } + } + + if (!test_bit(BTUSB_BULK_RUNNING, &data->flags)) + return; + + usb_anchor_urb(urb, &data->bulk_anchor); + usb_mark_last_busy(data->udev); + + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err < 0) { + BT_ERR("%s urb %p failed to resubmit (%d)", + hdev->name, urb, -err); + usb_unanchor_urb(urb); + } +} + +static int btusb_submit_bulk_urb(struct hci_dev *hdev, gfp_t mem_flags) +{ + struct btusb_data *data = hdev->driver_data; + struct urb *urb; + unsigned char *buf; + unsigned int pipe; + int err, size = HCI_MAX_FRAME_SIZE; + + BT_DBG("%s", hdev->name); + + if (!data->bulk_rx_ep) + return -ENODEV; + + urb = usb_alloc_urb(0, mem_flags); + if (!urb) + return -ENOMEM; + + buf = kmalloc(size, mem_flags); + if (!buf) { + usb_free_urb(urb); + return -ENOMEM; + } + + pipe = usb_rcvbulkpipe(data->udev, data->bulk_rx_ep->bEndpointAddress); + + usb_fill_bulk_urb(urb, data->udev, pipe, + buf, size, btusb_bulk_complete, hdev); + + urb->transfer_flags |= URB_FREE_BUFFER; + + usb_mark_last_busy(data->udev); + usb_anchor_urb(urb, &data->bulk_anchor); + + err = usb_submit_urb(urb, mem_flags); + if (err < 0) { + BT_ERR("%s urb %p submission failed (%d)", + hdev->name, urb, -err); + usb_unanchor_urb(urb); + } + + usb_free_urb(urb); + + return err; +} + +static void btusb_isoc_complete(struct urb *urb) +{ + struct hci_dev *hdev = urb->context; + struct btusb_data *data = hdev->driver_data; + int i, err; + + BT_DBG("%s urb %p status %d count %d", hdev->name, + urb, urb->status, urb->actual_length); + + if (!test_bit(HCI_RUNNING, &hdev->flags)) + return; + + if (urb->status == 0) { + for (i = 0; i < urb->number_of_packets; i++) { + unsigned int offset = urb->iso_frame_desc[i].offset; + unsigned int length = urb->iso_frame_desc[i].actual_length; + + if (urb->iso_frame_desc[i].status) + continue; + + hdev->stat.byte_rx += length; + + if (hci_recv_fragment(hdev, HCI_SCODATA_PKT, + urb->transfer_buffer + offset, + length) < 0) { + BT_ERR("%s corrupted SCO packet", hdev->name); + hdev->stat.err_rx++; + } + } + } + + if (!test_bit(BTUSB_ISOC_RUNNING, &data->flags)) + return; + + usb_anchor_urb(urb, &data->isoc_anchor); + + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err < 0) { + BT_ERR("%s urb %p failed to resubmit (%d)", + hdev->name, urb, -err); + usb_unanchor_urb(urb); + } +} + +static void inline __fill_isoc_descriptor(struct urb *urb, int len, int mtu) +{ + int i, offset = 0; + + BT_DBG("len %d mtu %d", len, mtu); + + for (i = 0; i < BTUSB_MAX_ISOC_FRAMES && len >= mtu; + i++, offset += mtu, len -= mtu) { + urb->iso_frame_desc[i].offset = offset; + urb->iso_frame_desc[i].length = mtu; + } + + if (len && i < BTUSB_MAX_ISOC_FRAMES) { + urb->iso_frame_desc[i].offset = offset; + urb->iso_frame_desc[i].length = len; + i++; + } + + urb->number_of_packets = i; +} + +static int btusb_submit_isoc_urb(struct hci_dev *hdev, gfp_t mem_flags) +{ + struct btusb_data *data = hdev->driver_data; + struct urb *urb; + unsigned char *buf; + unsigned int pipe; + int err, size; + + BT_DBG("%s", hdev->name); + + if (!data->isoc_rx_ep) + return -ENODEV; + + urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, mem_flags); + if (!urb) + return -ENOMEM; + + size = le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize) * + BTUSB_MAX_ISOC_FRAMES; + + buf = kmalloc(size, mem_flags); + if (!buf) { + usb_free_urb(urb); + return -ENOMEM; + } + + pipe = usb_rcvisocpipe(data->udev, data->isoc_rx_ep->bEndpointAddress); + + urb->dev = data->udev; + urb->pipe = pipe; + urb->context = hdev; + urb->complete = btusb_isoc_complete; + urb->interval = data->isoc_rx_ep->bInterval; + + urb->transfer_flags = URB_FREE_BUFFER | URB_ISO_ASAP; + urb->transfer_buffer = buf; + urb->transfer_buffer_length = size; + + __fill_isoc_descriptor(urb, size, + le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize)); + + usb_anchor_urb(urb, &data->isoc_anchor); + + err = usb_submit_urb(urb, mem_flags); + if (err < 0) { + BT_ERR("%s urb %p submission failed (%d)", + hdev->name, urb, -err); + usb_unanchor_urb(urb); + } + + usb_free_urb(urb); + + return err; +} + +static void btusb_tx_complete(struct urb *urb) +{ + struct sk_buff *skb = urb->context; + struct hci_dev *hdev = (struct hci_dev *) skb->dev; + struct btusb_data *data = hdev->driver_data; + + BT_DBG("%s urb %p status %d count %d", hdev->name, + urb, urb->status, urb->actual_length); + + if (!test_bit(HCI_RUNNING, &hdev->flags)) + goto done; + + if (!urb->status) + hdev->stat.byte_tx += urb->transfer_buffer_length; + else + hdev->stat.err_tx++; + +done: + spin_lock(&data->txlock); + data->tx_in_flight--; + spin_unlock(&data->txlock); + + kfree(urb->setup_packet); + + kfree_skb(skb); +} + +static void btusb_isoc_tx_complete(struct urb *urb) +{ + struct sk_buff *skb = urb->context; + struct hci_dev *hdev = (struct hci_dev *) skb->dev; + + BT_DBG("%s urb %p status %d count %d", hdev->name, + urb, urb->status, urb->actual_length); + + if (!test_bit(HCI_RUNNING, &hdev->flags)) + goto done; + + if (!urb->status) + hdev->stat.byte_tx += urb->transfer_buffer_length; + else + hdev->stat.err_tx++; + +done: + kfree(urb->setup_packet); + + kfree_skb(skb); +} + +static int btusb_open(struct hci_dev *hdev) +{ + struct btusb_data *data = hdev->driver_data; + int err; + + BT_DBG("%s", hdev->name); + + err = usb_autopm_get_interface(data->intf); + if (err < 0) + return err; + + data->intf->needs_remote_wakeup = 1; + + if (test_and_set_bit(HCI_RUNNING, &hdev->flags)) + goto done; + + if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags)) + goto done; + + err = btusb_submit_intr_urb(hdev, GFP_KERNEL); + if (err < 0) + goto failed; + + err = btusb_submit_bulk_urb(hdev, GFP_KERNEL); + if (err < 0) { + usb_kill_anchored_urbs(&data->intr_anchor); + goto failed; + } + + set_bit(BTUSB_BULK_RUNNING, &data->flags); + btusb_submit_bulk_urb(hdev, GFP_KERNEL); + +done: + usb_autopm_put_interface(data->intf); + return 0; + +failed: + clear_bit(BTUSB_INTR_RUNNING, &data->flags); + clear_bit(HCI_RUNNING, &hdev->flags); + usb_autopm_put_interface(data->intf); + return err; +} + +static void btusb_stop_traffic(struct btusb_data *data) +{ + usb_kill_anchored_urbs(&data->intr_anchor); + usb_kill_anchored_urbs(&data->bulk_anchor); + usb_kill_anchored_urbs(&data->isoc_anchor); +} + +static int btusb_close(struct hci_dev *hdev) +{ + struct btusb_data *data = hdev->driver_data; + int err; + + BT_DBG("%s", hdev->name); + + if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) + return 0; + + cancel_work_sync(&data->work); + cancel_work_sync(&data->waker); + + clear_bit(BTUSB_ISOC_RUNNING, &data->flags); + clear_bit(BTUSB_BULK_RUNNING, &data->flags); + clear_bit(BTUSB_INTR_RUNNING, &data->flags); + + btusb_stop_traffic(data); + err = usb_autopm_get_interface(data->intf); + if (err < 0) + goto failed; + + data->intf->needs_remote_wakeup = 0; + usb_autopm_put_interface(data->intf); + +failed: + usb_scuttle_anchored_urbs(&data->deferred); + return 0; +} + +static int btusb_flush(struct hci_dev *hdev) +{ + struct btusb_data *data = hdev->driver_data; + + BT_DBG("%s", hdev->name); + + usb_kill_anchored_urbs(&data->tx_anchor); + + return 0; +} + +static int btusb_send_frame(struct sk_buff *skb) +{ + struct hci_dev *hdev = (struct hci_dev *) skb->dev; + struct btusb_data *data = hdev->driver_data; + struct usb_ctrlrequest *dr; + struct urb *urb; + unsigned int pipe; + int err; + + BT_DBG("%s", hdev->name); + + if (!test_bit(HCI_RUNNING, &hdev->flags)) + return -EBUSY; + + switch (bt_cb(skb)->pkt_type) { + case HCI_COMMAND_PKT: + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!urb) + return -ENOMEM; + + dr = kmalloc(sizeof(*dr), GFP_ATOMIC); + if (!dr) { + usb_free_urb(urb); + return -ENOMEM; + } + + dr->bRequestType = data->cmdreq_type; + dr->bRequest = 0; + dr->wIndex = 0; + dr->wValue = 0; + dr->wLength = __cpu_to_le16(skb->len); + + pipe = usb_sndctrlpipe(data->udev, 0x00); + + usb_fill_control_urb(urb, data->udev, pipe, (void *) dr, + skb->data, skb->len, btusb_tx_complete, skb); + + hdev->stat.cmd_tx++; + break; + + case HCI_ACLDATA_PKT: + if (!data->bulk_tx_ep || hdev->conn_hash.acl_num < 1) + return -ENODEV; + + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!urb) + return -ENOMEM; + + pipe = usb_sndbulkpipe(data->udev, + data->bulk_tx_ep->bEndpointAddress); + + usb_fill_bulk_urb(urb, data->udev, pipe, + skb->data, skb->len, btusb_tx_complete, skb); + + hdev->stat.acl_tx++; + break; + + case HCI_SCODATA_PKT: + if (!data->isoc_tx_ep || hdev->conn_hash.sco_num < 1) + return -ENODEV; + + urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, GFP_ATOMIC); + if (!urb) + return -ENOMEM; + + pipe = usb_sndisocpipe(data->udev, + data->isoc_tx_ep->bEndpointAddress); + + urb->dev = data->udev; + urb->pipe = pipe; + urb->context = skb; + urb->complete = btusb_isoc_tx_complete; + urb->interval = data->isoc_tx_ep->bInterval; + + urb->transfer_flags = URB_ISO_ASAP; + urb->transfer_buffer = skb->data; + urb->transfer_buffer_length = skb->len; + + __fill_isoc_descriptor(urb, skb->len, + le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize)); + + hdev->stat.sco_tx++; + goto skip_waking; + + default: + return -EILSEQ; + } + + err = inc_tx(data); + if (err) { + usb_anchor_urb(urb, &data->deferred); + schedule_work(&data->waker); + err = 0; + goto done; + } + +skip_waking: + usb_anchor_urb(urb, &data->tx_anchor); + + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err < 0) { + BT_ERR("%s urb %p submission failed", hdev->name, urb); + kfree(urb->setup_packet); + usb_unanchor_urb(urb); + } else { + usb_mark_last_busy(data->udev); + } + + usb_free_urb(urb); + +done: + return err; +} + +static void btusb_destruct(struct hci_dev *hdev) +{ + struct btusb_data *data = hdev->driver_data; + + BT_DBG("%s", hdev->name); + + kfree(data); +} + +static void btusb_notify(struct hci_dev *hdev, unsigned int evt) +{ + struct btusb_data *data = hdev->driver_data; + + BT_DBG("%s evt %d", hdev->name, evt); + + if (hdev->conn_hash.sco_num != data->sco_num) { + data->sco_num = hdev->conn_hash.sco_num; + schedule_work(&data->work); + } +} + +static int inline __set_isoc_interface(struct hci_dev *hdev, int altsetting) +{ + struct btusb_data *data = hdev->driver_data; + struct usb_interface *intf = data->isoc; + struct usb_endpoint_descriptor *ep_desc; + int i, err; + + if (!data->isoc) + return -ENODEV; + + err = usb_set_interface(data->udev, 1, altsetting); + if (err < 0) { + BT_ERR("%s setting interface failed (%d)", hdev->name, -err); + return err; + } + + data->isoc_altsetting = altsetting; + + data->isoc_tx_ep = NULL; + data->isoc_rx_ep = NULL; + + for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) { + ep_desc = &intf->cur_altsetting->endpoint[i].desc; + + if (!data->isoc_tx_ep && usb_endpoint_is_isoc_out(ep_desc)) { + data->isoc_tx_ep = ep_desc; + continue; + } + + if (!data->isoc_rx_ep && usb_endpoint_is_isoc_in(ep_desc)) { + data->isoc_rx_ep = ep_desc; + continue; + } + } + + if (!data->isoc_tx_ep || !data->isoc_rx_ep) { + BT_ERR("%s invalid SCO descriptors", hdev->name); + return -ENODEV; + } + + return 0; +} + +static void btusb_work(struct work_struct *work) +{ + struct btusb_data *data = container_of(work, struct btusb_data, work); + struct hci_dev *hdev = data->hdev; + int err; + + if (hdev->conn_hash.sco_num > 0) { + if (!data->did_iso_resume) { + err = usb_autopm_get_interface(data->isoc); + if (err < 0) { + clear_bit(BTUSB_ISOC_RUNNING, &data->flags); + usb_kill_anchored_urbs(&data->isoc_anchor); + return; + } + + data->did_iso_resume = 1; + } + if (data->isoc_altsetting != 2) { + clear_bit(BTUSB_ISOC_RUNNING, &data->flags); + usb_kill_anchored_urbs(&data->isoc_anchor); + + if (__set_isoc_interface(hdev, 2) < 0) + return; + } + + if (!test_and_set_bit(BTUSB_ISOC_RUNNING, &data->flags)) { + if (btusb_submit_isoc_urb(hdev, GFP_KERNEL) < 0) + clear_bit(BTUSB_ISOC_RUNNING, &data->flags); + else + btusb_submit_isoc_urb(hdev, GFP_KERNEL); + } + } else { + clear_bit(BTUSB_ISOC_RUNNING, &data->flags); + usb_kill_anchored_urbs(&data->isoc_anchor); + + __set_isoc_interface(hdev, 0); + if (data->did_iso_resume) { + data->did_iso_resume = 0; + usb_autopm_put_interface(data->isoc); + } + } +} + +static void btusb_waker(struct work_struct *work) +{ + struct btusb_data *data = container_of(work, struct btusb_data, waker); + int err; + + err = usb_autopm_get_interface(data->intf); + if (err < 0) + return; + + usb_autopm_put_interface(data->intf); +} + +static int btusb_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct usb_endpoint_descriptor *ep_desc; + struct btusb_data *data; + struct hci_dev *hdev; + int i, err; + + BT_DBG("intf %p id %p", intf, id); + + /* interface numbers are hardcoded in the spec */ + if (intf->cur_altsetting->desc.bInterfaceNumber != 0) + return -ENODEV; + + if (!id->driver_info) { + const struct usb_device_id *match; + match = usb_match_id(intf, blacklist_table); + if (match) + id = match; + } + + if (id->driver_info == BTUSB_IGNORE) + return -ENODEV; + + if (ignore_dga && id->driver_info & BTUSB_DIGIANSWER) + return -ENODEV; + + if (ignore_csr && id->driver_info & BTUSB_CSR) + return -ENODEV; + + if (ignore_sniffer && id->driver_info & BTUSB_SNIFFER) + return -ENODEV; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) { + ep_desc = &intf->cur_altsetting->endpoint[i].desc; + + if (!data->intr_ep && usb_endpoint_is_int_in(ep_desc)) { + data->intr_ep = ep_desc; + continue; + } + + if (!data->bulk_tx_ep && usb_endpoint_is_bulk_out(ep_desc)) { + data->bulk_tx_ep = ep_desc; + continue; + } + + if (!data->bulk_rx_ep && usb_endpoint_is_bulk_in(ep_desc)) { + data->bulk_rx_ep = ep_desc; + continue; + } + } + + if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep) { + kfree(data); + return -ENODEV; + } + + data->cmdreq_type = USB_TYPE_CLASS; + + data->udev = interface_to_usbdev(intf); + data->intf = intf; + + spin_lock_init(&data->lock); + + INIT_WORK(&data->work, btusb_work); + INIT_WORK(&data->waker, btusb_waker); + spin_lock_init(&data->txlock); + + init_usb_anchor(&data->tx_anchor); + init_usb_anchor(&data->intr_anchor); + init_usb_anchor(&data->bulk_anchor); + init_usb_anchor(&data->isoc_anchor); + init_usb_anchor(&data->deferred); + + hdev = hci_alloc_dev(); + if (!hdev) { + kfree(data); + return -ENOMEM; + } + + hdev->bus = HCI_USB; + hdev->driver_data = data; + + data->hdev = hdev; + + SET_HCIDEV_DEV(hdev, &intf->dev); + + hdev->open = btusb_open; + hdev->close = btusb_close; + hdev->flush = btusb_flush; + hdev->send = btusb_send_frame; + hdev->destruct = btusb_destruct; + hdev->notify = btusb_notify; + + hdev->owner = THIS_MODULE; + + /* Interface numbers are hardcoded in the specification */ + data->isoc = usb_ifnum_to_if(data->udev, 1); + + if (!reset) + set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); + + if (force_scofix || id->driver_info & BTUSB_WRONG_SCO_MTU) { + if (!disable_scofix) + set_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks); + } + + if (id->driver_info & BTUSB_BROKEN_ISOC) + data->isoc = NULL; + + if (id->driver_info & BTUSB_DIGIANSWER) { + data->cmdreq_type = USB_TYPE_VENDOR; + set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); + } + + if (id->driver_info & BTUSB_CSR) { + struct usb_device *udev = data->udev; + + /* Old firmware would otherwise execute USB reset */ + if (le16_to_cpu(udev->descriptor.bcdDevice) < 0x117) + set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); + } + + if (id->driver_info & BTUSB_SNIFFER) { + struct usb_device *udev = data->udev; + + /* New sniffer firmware has crippled HCI interface */ + if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x997) + set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); + + data->isoc = NULL; + } + + if (id->driver_info & BTUSB_BCM92035) { + unsigned char cmd[] = { 0x3b, 0xfc, 0x01, 0x00 }; + struct sk_buff *skb; + + skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL); + if (skb) { + memcpy(skb_put(skb, sizeof(cmd)), cmd, sizeof(cmd)); + skb_queue_tail(&hdev->driver_init, skb); + } + } + + if (data->isoc) { + err = usb_driver_claim_interface(&btusb_driver, + data->isoc, data); + if (err < 0) { + hci_free_dev(hdev); + kfree(data); + return err; + } + } + + err = hci_register_dev(hdev); + if (err < 0) { + hci_free_dev(hdev); + kfree(data); + return err; + } + + usb_set_intfdata(intf, data); + + return 0; +} + +static void btusb_disconnect(struct usb_interface *intf) +{ + struct btusb_data *data = usb_get_intfdata(intf); + struct hci_dev *hdev; + + BT_DBG("intf %p", intf); + + if (!data) + return; + + hdev = data->hdev; + + __hci_dev_hold(hdev); + + usb_set_intfdata(data->intf, NULL); + + if (data->isoc) + usb_set_intfdata(data->isoc, NULL); + + hci_unregister_dev(hdev); + + if (intf == data->isoc) + usb_driver_release_interface(&btusb_driver, data->intf); + else if (data->isoc) + usb_driver_release_interface(&btusb_driver, data->isoc); + + __hci_dev_put(hdev); + + hci_free_dev(hdev); +} + +#ifdef CONFIG_PM +static int btusb_suspend(struct usb_interface *intf, pm_message_t message) +{ + struct btusb_data *data = usb_get_intfdata(intf); + + BT_DBG("intf %p", intf); + + if (data->suspend_count++) + return 0; + + spin_lock_irq(&data->txlock); + if (!((message.event & PM_EVENT_AUTO) && data->tx_in_flight)) { + set_bit(BTUSB_SUSPENDING, &data->flags); + spin_unlock_irq(&data->txlock); + } else { + spin_unlock_irq(&data->txlock); + data->suspend_count--; + return -EBUSY; + } + + cancel_work_sync(&data->work); + + btusb_stop_traffic(data); + usb_kill_anchored_urbs(&data->tx_anchor); + + return 0; +} + +static void play_deferred(struct btusb_data *data) +{ + struct urb *urb; + int err; + + while ((urb = usb_get_from_anchor(&data->deferred))) { + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err < 0) + break; + + data->tx_in_flight++; + } + usb_scuttle_anchored_urbs(&data->deferred); +} + +static int btusb_resume(struct usb_interface *intf) +{ + struct btusb_data *data = usb_get_intfdata(intf); + struct hci_dev *hdev = data->hdev; + int err = 0; + + BT_DBG("intf %p", intf); + + if (--data->suspend_count) + return 0; + + if (!test_bit(HCI_RUNNING, &hdev->flags)) + goto done; + + if (test_bit(BTUSB_INTR_RUNNING, &data->flags)) { + err = btusb_submit_intr_urb(hdev, GFP_NOIO); + if (err < 0) { + clear_bit(BTUSB_INTR_RUNNING, &data->flags); + goto failed; + } + } + + if (test_bit(BTUSB_BULK_RUNNING, &data->flags)) { + err = btusb_submit_bulk_urb(hdev, GFP_NOIO); + if (err < 0) { + clear_bit(BTUSB_BULK_RUNNING, &data->flags); + goto failed; + } + + btusb_submit_bulk_urb(hdev, GFP_NOIO); + } + + if (test_bit(BTUSB_ISOC_RUNNING, &data->flags)) { + if (btusb_submit_isoc_urb(hdev, GFP_NOIO) < 0) + clear_bit(BTUSB_ISOC_RUNNING, &data->flags); + else + btusb_submit_isoc_urb(hdev, GFP_NOIO); + } + + spin_lock_irq(&data->txlock); + play_deferred(data); + clear_bit(BTUSB_SUSPENDING, &data->flags); + spin_unlock_irq(&data->txlock); + schedule_work(&data->work); + + return 0; + +failed: + usb_scuttle_anchored_urbs(&data->deferred); +done: + spin_lock_irq(&data->txlock); + clear_bit(BTUSB_SUSPENDING, &data->flags); + spin_unlock_irq(&data->txlock); + + return err; +} +#endif + +static struct usb_driver btusb_driver = { + .name = "btusb", + .probe = btusb_probe, + .disconnect = btusb_disconnect, +#ifdef CONFIG_PM + .suspend = btusb_suspend, + .resume = btusb_resume, +#endif + .id_table = btusb_table, + .supports_autosuspend = 1, +}; + +static int __init btusb_init(void) +{ + BT_INFO("Generic Bluetooth USB driver ver %s", VERSION); + + return usb_register(&btusb_driver); +} + +static void __exit btusb_exit(void) +{ + usb_deregister(&btusb_driver); +} + +module_init(btusb_init); +module_exit(btusb_exit); + +module_param(ignore_dga, bool, 0644); +MODULE_PARM_DESC(ignore_dga, "Ignore devices with id 08fd:0001"); + +module_param(ignore_csr, bool, 0644); +MODULE_PARM_DESC(ignore_csr, "Ignore devices with id 0a12:0001"); + +module_param(ignore_sniffer, bool, 0644); +MODULE_PARM_DESC(ignore_sniffer, "Ignore devices with id 0a12:0002"); + +module_param(disable_scofix, bool, 0644); +MODULE_PARM_DESC(disable_scofix, "Disable fixup of wrong SCO buffer size"); + +module_param(force_scofix, bool, 0644); +MODULE_PARM_DESC(force_scofix, "Force fixup of wrong SCO buffers size"); + +module_param(reset, bool, 0644); +MODULE_PARM_DESC(reset, "Send HCI reset command on initialization"); + +MODULE_AUTHOR("Marcel Holtmann "); +MODULE_DESCRIPTION("Generic Bluetooth USB driver ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c new file mode 100644 index 0000000..1778831 --- /dev/null +++ b/drivers/bluetooth/dtl1_cs.c @@ -0,0 +1,687 @@ +/* + * + * A driver for Nokia Connectivity Card DTL-1 devices + * + * Copyright (C) 2001-2002 Marcel Holtmann + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation; + * + * Software distributed under the License is distributed on an "AS + * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or + * implied. See the License for the specific language governing + * rights and limitations under the License. + * + * The initial developer of the original code is David A. Hinds + * . Portions created by David A. Hinds + * are Copyright (C) 1999 David A. Hinds. All Rights Reserved. + * + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + + + +/* ======================== Module parameters ======================== */ + + +MODULE_AUTHOR("Marcel Holtmann "); +MODULE_DESCRIPTION("Bluetooth driver for Nokia Connectivity Card DTL-1"); +MODULE_LICENSE("GPL"); + + + +/* ======================== Local structures ======================== */ + + +typedef struct dtl1_info_t { + struct pcmcia_device *p_dev; + dev_node_t node; + + struct hci_dev *hdev; + + spinlock_t lock; /* For serializing operations */ + + unsigned long flowmask; /* HCI flow mask */ + int ri_latch; + + struct sk_buff_head txq; + unsigned long tx_state; + + unsigned long rx_state; + unsigned long rx_count; + struct sk_buff *rx_skb; +} dtl1_info_t; + + +static int dtl1_config(struct pcmcia_device *link); +static void dtl1_release(struct pcmcia_device *link); + +static void dtl1_detach(struct pcmcia_device *p_dev); + + +/* Transmit states */ +#define XMIT_SENDING 1 +#define XMIT_WAKEUP 2 +#define XMIT_WAITING 8 + +/* Receiver States */ +#define RECV_WAIT_NSH 0 +#define RECV_WAIT_DATA 1 + + +typedef struct { + u8 type; + u8 zero; + u16 len; +} __attribute__ ((packed)) nsh_t; /* Nokia Specific Header */ + +#define NSHL 4 /* Nokia Specific Header Length */ + + + +/* ======================== Interrupt handling ======================== */ + + +static int dtl1_write(unsigned int iobase, int fifo_size, __u8 *buf, int len) +{ + int actual = 0; + + /* Tx FIFO should be empty */ + if (!(inb(iobase + UART_LSR) & UART_LSR_THRE)) + return 0; + + /* Fill FIFO with current frame */ + while ((fifo_size-- > 0) && (actual < len)) { + /* Transmit next byte */ + outb(buf[actual], iobase + UART_TX); + actual++; + } + + return actual; +} + + +static void dtl1_write_wakeup(dtl1_info_t *info) +{ + if (!info) { + BT_ERR("Unknown device"); + return; + } + + if (test_bit(XMIT_WAITING, &(info->tx_state))) { + set_bit(XMIT_WAKEUP, &(info->tx_state)); + return; + } + + if (test_and_set_bit(XMIT_SENDING, &(info->tx_state))) { + set_bit(XMIT_WAKEUP, &(info->tx_state)); + return; + } + + do { + register unsigned int iobase = info->p_dev->io.BasePort1; + register struct sk_buff *skb; + register int len; + + clear_bit(XMIT_WAKEUP, &(info->tx_state)); + + if (!pcmcia_dev_present(info->p_dev)) + return; + + if (!(skb = skb_dequeue(&(info->txq)))) + break; + + /* Send frame */ + len = dtl1_write(iobase, 32, skb->data, skb->len); + + if (len == skb->len) { + set_bit(XMIT_WAITING, &(info->tx_state)); + kfree_skb(skb); + } else { + skb_pull(skb, len); + skb_queue_head(&(info->txq), skb); + } + + info->hdev->stat.byte_tx += len; + + } while (test_bit(XMIT_WAKEUP, &(info->tx_state))); + + clear_bit(XMIT_SENDING, &(info->tx_state)); +} + + +static void dtl1_control(dtl1_info_t *info, struct sk_buff *skb) +{ + u8 flowmask = *(u8 *)skb->data; + int i; + + printk(KERN_INFO "Bluetooth: Nokia control data ="); + for (i = 0; i < skb->len; i++) { + printk(" %02x", skb->data[i]); + } + printk("\n"); + + /* transition to active state */ + if (((info->flowmask & 0x07) == 0) && ((flowmask & 0x07) != 0)) { + clear_bit(XMIT_WAITING, &(info->tx_state)); + dtl1_write_wakeup(info); + } + + info->flowmask = flowmask; + + kfree_skb(skb); +} + + +static void dtl1_receive(dtl1_info_t *info) +{ + unsigned int iobase; + nsh_t *nsh; + int boguscount = 0; + + if (!info) { + BT_ERR("Unknown device"); + return; + } + + iobase = info->p_dev->io.BasePort1; + + do { + info->hdev->stat.byte_rx++; + + /* Allocate packet */ + if (info->rx_skb == NULL) + if (!(info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) { + BT_ERR("Can't allocate mem for new packet"); + info->rx_state = RECV_WAIT_NSH; + info->rx_count = NSHL; + return; + } + + *skb_put(info->rx_skb, 1) = inb(iobase + UART_RX); + nsh = (nsh_t *)info->rx_skb->data; + + info->rx_count--; + + if (info->rx_count == 0) { + + switch (info->rx_state) { + case RECV_WAIT_NSH: + info->rx_state = RECV_WAIT_DATA; + info->rx_count = nsh->len + (nsh->len & 0x0001); + break; + case RECV_WAIT_DATA: + bt_cb(info->rx_skb)->pkt_type = nsh->type; + + /* remove PAD byte if it exists */ + if (nsh->len & 0x0001) { + info->rx_skb->tail--; + info->rx_skb->len--; + } + + /* remove NSH */ + skb_pull(info->rx_skb, NSHL); + + switch (bt_cb(info->rx_skb)->pkt_type) { + case 0x80: + /* control data for the Nokia Card */ + dtl1_control(info, info->rx_skb); + break; + case 0x82: + case 0x83: + case 0x84: + /* send frame to the HCI layer */ + info->rx_skb->dev = (void *) info->hdev; + bt_cb(info->rx_skb)->pkt_type &= 0x0f; + hci_recv_frame(info->rx_skb); + break; + default: + /* unknown packet */ + BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type); + kfree_skb(info->rx_skb); + break; + } + + info->rx_state = RECV_WAIT_NSH; + info->rx_count = NSHL; + info->rx_skb = NULL; + break; + } + + } + + /* Make sure we don't stay here too long */ + if (boguscount++ > 32) + break; + + } while (inb(iobase + UART_LSR) & UART_LSR_DR); +} + + +static irqreturn_t dtl1_interrupt(int irq, void *dev_inst) +{ + dtl1_info_t *info = dev_inst; + unsigned int iobase; + unsigned char msr; + int boguscount = 0; + int iir, lsr; + irqreturn_t r = IRQ_NONE; + + if (!info || !info->hdev) + /* our irq handler is shared */ + return IRQ_NONE; + + iobase = info->p_dev->io.BasePort1; + + spin_lock(&(info->lock)); + + iir = inb(iobase + UART_IIR) & UART_IIR_ID; + while (iir) { + + r = IRQ_HANDLED; + /* Clear interrupt */ + lsr = inb(iobase + UART_LSR); + + switch (iir) { + case UART_IIR_RLSI: + BT_ERR("RLSI"); + break; + case UART_IIR_RDI: + /* Receive interrupt */ + dtl1_receive(info); + break; + case UART_IIR_THRI: + if (lsr & UART_LSR_THRE) { + /* Transmitter ready for data */ + dtl1_write_wakeup(info); + } + break; + default: + BT_ERR("Unhandled IIR=%#x", iir); + break; + } + + /* Make sure we don't stay here too long */ + if (boguscount++ > 100) + break; + + iir = inb(iobase + UART_IIR) & UART_IIR_ID; + + } + + msr = inb(iobase + UART_MSR); + + if (info->ri_latch ^ (msr & UART_MSR_RI)) { + info->ri_latch = msr & UART_MSR_RI; + clear_bit(XMIT_WAITING, &(info->tx_state)); + dtl1_write_wakeup(info); + r = IRQ_HANDLED; + } + + spin_unlock(&(info->lock)); + + return r; +} + + + +/* ======================== HCI interface ======================== */ + + +static int dtl1_hci_open(struct hci_dev *hdev) +{ + set_bit(HCI_RUNNING, &(hdev->flags)); + + return 0; +} + + +static int dtl1_hci_flush(struct hci_dev *hdev) +{ + dtl1_info_t *info = (dtl1_info_t *)(hdev->driver_data); + + /* Drop TX queue */ + skb_queue_purge(&(info->txq)); + + return 0; +} + + +static int dtl1_hci_close(struct hci_dev *hdev) +{ + if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags))) + return 0; + + dtl1_hci_flush(hdev); + + return 0; +} + + +static int dtl1_hci_send_frame(struct sk_buff *skb) +{ + dtl1_info_t *info; + struct hci_dev *hdev = (struct hci_dev *)(skb->dev); + struct sk_buff *s; + nsh_t nsh; + + if (!hdev) { + BT_ERR("Frame for unknown HCI device (hdev=NULL)"); + return -ENODEV; + } + + info = (dtl1_info_t *)(hdev->driver_data); + + switch (bt_cb(skb)->pkt_type) { + case HCI_COMMAND_PKT: + hdev->stat.cmd_tx++; + nsh.type = 0x81; + break; + case HCI_ACLDATA_PKT: + hdev->stat.acl_tx++; + nsh.type = 0x82; + break; + case HCI_SCODATA_PKT: + hdev->stat.sco_tx++; + nsh.type = 0x83; + break; + default: + return -EILSEQ; + }; + + nsh.zero = 0; + nsh.len = skb->len; + + s = bt_skb_alloc(NSHL + skb->len + 1, GFP_ATOMIC); + if (!s) + return -ENOMEM; + + skb_reserve(s, NSHL); + skb_copy_from_linear_data(skb, skb_put(s, skb->len), skb->len); + if (skb->len & 0x0001) + *skb_put(s, 1) = 0; /* PAD */ + + /* Prepend skb with Nokia frame header and queue */ + memcpy(skb_push(s, NSHL), &nsh, NSHL); + skb_queue_tail(&(info->txq), s); + + dtl1_write_wakeup(info); + + kfree_skb(skb); + + return 0; +} + + +static void dtl1_hci_destruct(struct hci_dev *hdev) +{ +} + + +static int dtl1_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg) +{ + return -ENOIOCTLCMD; +} + + + +/* ======================== Card services HCI interaction ======================== */ + + +static int dtl1_open(dtl1_info_t *info) +{ + unsigned long flags; + unsigned int iobase = info->p_dev->io.BasePort1; + struct hci_dev *hdev; + + spin_lock_init(&(info->lock)); + + skb_queue_head_init(&(info->txq)); + + info->rx_state = RECV_WAIT_NSH; + info->rx_count = NSHL; + info->rx_skb = NULL; + + set_bit(XMIT_WAITING, &(info->tx_state)); + + /* Initialize HCI device */ + hdev = hci_alloc_dev(); + if (!hdev) { + BT_ERR("Can't allocate HCI device"); + return -ENOMEM; + } + + info->hdev = hdev; + + hdev->bus = HCI_PCCARD; + hdev->driver_data = info; + SET_HCIDEV_DEV(hdev, &info->p_dev->dev); + + hdev->open = dtl1_hci_open; + hdev->close = dtl1_hci_close; + hdev->flush = dtl1_hci_flush; + hdev->send = dtl1_hci_send_frame; + hdev->destruct = dtl1_hci_destruct; + hdev->ioctl = dtl1_hci_ioctl; + + hdev->owner = THIS_MODULE; + + spin_lock_irqsave(&(info->lock), flags); + + /* Reset UART */ + outb(0, iobase + UART_MCR); + + /* Turn off interrupts */ + outb(0, iobase + UART_IER); + + /* Initialize UART */ + outb(UART_LCR_WLEN8, iobase + UART_LCR); /* Reset DLAB */ + outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase + UART_MCR); + + info->ri_latch = inb(info->p_dev->io.BasePort1 + UART_MSR) & UART_MSR_RI; + + /* Turn on interrupts */ + outb(UART_IER_RLSI | UART_IER_RDI | UART_IER_THRI, iobase + UART_IER); + + spin_unlock_irqrestore(&(info->lock), flags); + + /* Timeout before it is safe to send the first HCI packet */ + msleep(2000); + + /* Register HCI device */ + if (hci_register_dev(hdev) < 0) { + BT_ERR("Can't register HCI device"); + info->hdev = NULL; + hci_free_dev(hdev); + return -ENODEV; + } + + return 0; +} + + +static int dtl1_close(dtl1_info_t *info) +{ + unsigned long flags; + unsigned int iobase = info->p_dev->io.BasePort1; + struct hci_dev *hdev = info->hdev; + + if (!hdev) + return -ENODEV; + + dtl1_hci_close(hdev); + + spin_lock_irqsave(&(info->lock), flags); + + /* Reset UART */ + outb(0, iobase + UART_MCR); + + /* Turn off interrupts */ + outb(0, iobase + UART_IER); + + spin_unlock_irqrestore(&(info->lock), flags); + + if (hci_unregister_dev(hdev) < 0) + BT_ERR("Can't unregister HCI device %s", hdev->name); + + hci_free_dev(hdev); + + return 0; +} + +static int dtl1_probe(struct pcmcia_device *link) +{ + dtl1_info_t *info; + + /* Create new info device */ + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->p_dev = link; + link->priv = info; + + link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; + link->io.NumPorts1 = 8; + link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; + + link->irq.Handler = dtl1_interrupt; + + link->conf.Attributes = CONF_ENABLE_IRQ; + link->conf.IntType = INT_MEMORY_AND_IO; + + return dtl1_config(link); +} + + +static void dtl1_detach(struct pcmcia_device *link) +{ + dtl1_info_t *info = link->priv; + + dtl1_release(link); + + kfree(info); +} + +static int dtl1_confcheck(struct pcmcia_device *p_dev, + cistpl_cftable_entry_t *cf, + cistpl_cftable_entry_t *dflt, + unsigned int vcc, + void *priv_data) +{ + if ((cf->io.nwin == 1) && (cf->io.win[0].len > 8)) { + p_dev->io.BasePort1 = cf->io.win[0].base; + p_dev->io.NumPorts1 = cf->io.win[0].len; /*yo */ + p_dev->io.IOAddrLines = cf->io.flags & CISTPL_IO_LINES_MASK; + if (!pcmcia_request_io(p_dev, &p_dev->io)) + return 0; + } + return -ENODEV; +} + +static int dtl1_config(struct pcmcia_device *link) +{ + dtl1_info_t *info = link->priv; + int i; + + /* Look for a generic full-sized window */ + link->io.NumPorts1 = 8; + if (pcmcia_loop_config(link, dtl1_confcheck, NULL) < 0) + goto failed; + + i = pcmcia_request_irq(link, &link->irq); + if (i != 0) + link->irq.AssignedIRQ = 0; + + i = pcmcia_request_configuration(link, &link->conf); + if (i != 0) + goto failed; + + if (dtl1_open(info) != 0) + goto failed; + + strcpy(info->node.dev_name, info->hdev->name); + link->dev_node = &info->node; + + return 0; + +failed: + dtl1_release(link); + return -ENODEV; +} + + +static void dtl1_release(struct pcmcia_device *link) +{ + dtl1_info_t *info = link->priv; + + dtl1_close(info); + + pcmcia_disable_device(link); +} + + +static struct pcmcia_device_id dtl1_ids[] = { + PCMCIA_DEVICE_PROD_ID12("Nokia Mobile Phones", "DTL-1", 0xe1bfdd64, 0xe168480d), + PCMCIA_DEVICE_PROD_ID12("Nokia Mobile Phones", "DTL-4", 0xe1bfdd64, 0x9102bc82), + PCMCIA_DEVICE_PROD_ID12("Socket", "CF", 0xb38bcc2e, 0x44ebf863), + PCMCIA_DEVICE_PROD_ID12("Socket", "CF+ Personal Network Card", 0xb38bcc2e, 0xe732bae3), + PCMCIA_DEVICE_NULL +}; +MODULE_DEVICE_TABLE(pcmcia, dtl1_ids); + +static struct pcmcia_driver dtl1_driver = { + .owner = THIS_MODULE, + .drv = { + .name = "dtl1_cs", + }, + .probe = dtl1_probe, + .remove = dtl1_detach, + .id_table = dtl1_ids, +}; + +static int __init init_dtl1_cs(void) +{ + return pcmcia_register_driver(&dtl1_driver); +} + + +static void __exit exit_dtl1_cs(void) +{ + pcmcia_unregister_driver(&dtl1_driver); +} + +module_init(init_dtl1_cs); +module_exit(exit_dtl1_cs); diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c new file mode 100644 index 0000000..40aec0f --- /dev/null +++ b/drivers/bluetooth/hci_bcsp.c @@ -0,0 +1,763 @@ +/* + * + * Bluetooth HCI UART driver + * + * Copyright (C) 2002-2003 Fabrizio Gennari + * Copyright (C) 2004-2005 Marcel Holtmann + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "hci_uart.h" + +#define VERSION "0.3" + +static int txcrc = 1; +static int hciextn = 1; + +#define BCSP_TXWINSIZE 4 + +#define BCSP_ACK_PKT 0x05 +#define BCSP_LE_PKT 0x06 + +struct bcsp_struct { + struct sk_buff_head unack; /* Unack'ed packets queue */ + struct sk_buff_head rel; /* Reliable packets queue */ + struct sk_buff_head unrel; /* Unreliable packets queue */ + + unsigned long rx_count; + struct sk_buff *rx_skb; + u8 rxseq_txack; /* rxseq == txack. */ + u8 rxack; /* Last packet sent by us that the peer ack'ed */ + struct timer_list tbcsp; + + enum { + BCSP_W4_PKT_DELIMITER, + BCSP_W4_PKT_START, + BCSP_W4_BCSP_HDR, + BCSP_W4_DATA, + BCSP_W4_CRC + } rx_state; + + enum { + BCSP_ESCSTATE_NOESC, + BCSP_ESCSTATE_ESC + } rx_esc_state; + + u8 use_crc; + u16 message_crc; + u8 txack_req; /* Do we need to send ack's to the peer? */ + + /* Reliable packet sequence number - used to assign seq to each rel pkt. */ + u8 msgq_txseq; +}; + +/* ---- BCSP CRC calculation ---- */ + +/* Table for calculating CRC for polynomial 0x1021, LSB processed first, +initial value 0xffff, bits shifted in reverse order. */ + +static const u16 crc_table[] = { + 0x0000, 0x1081, 0x2102, 0x3183, + 0x4204, 0x5285, 0x6306, 0x7387, + 0x8408, 0x9489, 0xa50a, 0xb58b, + 0xc60c, 0xd68d, 0xe70e, 0xf78f +}; + +/* Initialise the crc calculator */ +#define BCSP_CRC_INIT(x) x = 0xffff + +/* + Update crc with next data byte + + Implementation note + The data byte is treated as two nibbles. The crc is generated + in reverse, i.e., bits are fed into the register from the top. +*/ +static void bcsp_crc_update(u16 *crc, u8 d) +{ + u16 reg = *crc; + + reg = (reg >> 4) ^ crc_table[(reg ^ d) & 0x000f]; + reg = (reg >> 4) ^ crc_table[(reg ^ (d >> 4)) & 0x000f]; + + *crc = reg; +} + +/* ---- BCSP core ---- */ + +static void bcsp_slip_msgdelim(struct sk_buff *skb) +{ + const char pkt_delim = 0xc0; + + memcpy(skb_put(skb, 1), &pkt_delim, 1); +} + +static void bcsp_slip_one_byte(struct sk_buff *skb, u8 c) +{ + const char esc_c0[2] = { 0xdb, 0xdc }; + const char esc_db[2] = { 0xdb, 0xdd }; + + switch (c) { + case 0xc0: + memcpy(skb_put(skb, 2), &esc_c0, 2); + break; + case 0xdb: + memcpy(skb_put(skb, 2), &esc_db, 2); + break; + default: + memcpy(skb_put(skb, 1), &c, 1); + } +} + +static int bcsp_enqueue(struct hci_uart *hu, struct sk_buff *skb) +{ + struct bcsp_struct *bcsp = hu->priv; + + if (skb->len > 0xFFF) { + BT_ERR("Packet too long"); + kfree_skb(skb); + return 0; + } + + switch (bt_cb(skb)->pkt_type) { + case HCI_ACLDATA_PKT: + case HCI_COMMAND_PKT: + skb_queue_tail(&bcsp->rel, skb); + break; + + case HCI_SCODATA_PKT: + skb_queue_tail(&bcsp->unrel, skb); + break; + + default: + BT_ERR("Unknown packet type"); + kfree_skb(skb); + break; + } + + return 0; +} + +static struct sk_buff *bcsp_prepare_pkt(struct bcsp_struct *bcsp, u8 *data, + int len, int pkt_type) +{ + struct sk_buff *nskb; + u8 hdr[4], chan; + u16 BCSP_CRC_INIT(bcsp_txmsg_crc); + int rel, i; + + switch (pkt_type) { + case HCI_ACLDATA_PKT: + chan = 6; /* BCSP ACL channel */ + rel = 1; /* reliable channel */ + break; + case HCI_COMMAND_PKT: + chan = 5; /* BCSP cmd/evt channel */ + rel = 1; /* reliable channel */ + break; + case HCI_SCODATA_PKT: + chan = 7; /* BCSP SCO channel */ + rel = 0; /* unreliable channel */ + break; + case BCSP_LE_PKT: + chan = 1; /* BCSP LE channel */ + rel = 0; /* unreliable channel */ + break; + case BCSP_ACK_PKT: + chan = 0; /* BCSP internal channel */ + rel = 0; /* unreliable channel */ + break; + default: + BT_ERR("Unknown packet type"); + return NULL; + } + + if (hciextn && chan == 5) { + __le16 opcode = ((struct hci_command_hdr *)data)->opcode; + + /* Vendor specific commands */ + if (hci_opcode_ogf(__le16_to_cpu(opcode)) == 0x3f) { + u8 desc = *(data + HCI_COMMAND_HDR_SIZE); + if ((desc & 0xf0) == 0xc0) { + data += HCI_COMMAND_HDR_SIZE + 1; + len -= HCI_COMMAND_HDR_SIZE + 1; + chan = desc & 0x0f; + } + } + } + + /* Max len of packet: (original len +4(bcsp hdr) +2(crc))*2 + (because bytes 0xc0 and 0xdb are escaped, worst case is + when the packet is all made of 0xc0 and 0xdb :) ) + + 2 (0xc0 delimiters at start and end). */ + + nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC); + if (!nskb) + return NULL; + + bt_cb(nskb)->pkt_type = pkt_type; + + bcsp_slip_msgdelim(nskb); + + hdr[0] = bcsp->rxseq_txack << 3; + bcsp->txack_req = 0; + BT_DBG("We request packet no %u to card", bcsp->rxseq_txack); + + if (rel) { + hdr[0] |= 0x80 + bcsp->msgq_txseq; + BT_DBG("Sending packet with seqno %u", bcsp->msgq_txseq); + bcsp->msgq_txseq = ++(bcsp->msgq_txseq) & 0x07; + } + + if (bcsp->use_crc) + hdr[0] |= 0x40; + + hdr[1] = ((len << 4) & 0xff) | chan; + hdr[2] = len >> 4; + hdr[3] = ~(hdr[0] + hdr[1] + hdr[2]); + + /* Put BCSP header */ + for (i = 0; i < 4; i++) { + bcsp_slip_one_byte(nskb, hdr[i]); + + if (bcsp->use_crc) + bcsp_crc_update(&bcsp_txmsg_crc, hdr[i]); + } + + /* Put payload */ + for (i = 0; i < len; i++) { + bcsp_slip_one_byte(nskb, data[i]); + + if (bcsp->use_crc) + bcsp_crc_update(&bcsp_txmsg_crc, data[i]); + } + + /* Put CRC */ + if (bcsp->use_crc) { + bcsp_txmsg_crc = bitrev16(bcsp_txmsg_crc); + bcsp_slip_one_byte(nskb, (u8) ((bcsp_txmsg_crc >> 8) & 0x00ff)); + bcsp_slip_one_byte(nskb, (u8) (bcsp_txmsg_crc & 0x00ff)); + } + + bcsp_slip_msgdelim(nskb); + return nskb; +} + +/* This is a rewrite of pkt_avail in ABCSP */ +static struct sk_buff *bcsp_dequeue(struct hci_uart *hu) +{ + struct bcsp_struct *bcsp = hu->priv; + unsigned long flags; + struct sk_buff *skb; + + /* First of all, check for unreliable messages in the queue, + since they have priority */ + + if ((skb = skb_dequeue(&bcsp->unrel)) != NULL) { + struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, bt_cb(skb)->pkt_type); + if (nskb) { + kfree_skb(skb); + return nskb; + } else { + skb_queue_head(&bcsp->unrel, skb); + BT_ERR("Could not dequeue pkt because alloc_skb failed"); + } + } + + /* Now, try to send a reliable pkt. We can only send a + reliable packet if the number of packets sent but not yet ack'ed + is < than the winsize */ + + spin_lock_irqsave_nested(&bcsp->unack.lock, flags, SINGLE_DEPTH_NESTING); + + if (bcsp->unack.qlen < BCSP_TXWINSIZE && (skb = skb_dequeue(&bcsp->rel)) != NULL) { + struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, bt_cb(skb)->pkt_type); + if (nskb) { + __skb_queue_tail(&bcsp->unack, skb); + mod_timer(&bcsp->tbcsp, jiffies + HZ / 4); + spin_unlock_irqrestore(&bcsp->unack.lock, flags); + return nskb; + } else { + skb_queue_head(&bcsp->rel, skb); + BT_ERR("Could not dequeue pkt because alloc_skb failed"); + } + } + + spin_unlock_irqrestore(&bcsp->unack.lock, flags); + + /* We could not send a reliable packet, either because there are + none or because there are too many unack'ed pkts. Did we receive + any packets we have not acknowledged yet ? */ + + if (bcsp->txack_req) { + /* if so, craft an empty ACK pkt and send it on BCSP unreliable + channel 0 */ + struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, NULL, 0, BCSP_ACK_PKT); + return nskb; + } + + /* We have nothing to send */ + return NULL; +} + +static int bcsp_flush(struct hci_uart *hu) +{ + BT_DBG("hu %p", hu); + return 0; +} + +/* Remove ack'ed packets */ +static void bcsp_pkt_cull(struct bcsp_struct *bcsp) +{ + struct sk_buff *skb, *tmp; + unsigned long flags; + int i, pkts_to_be_removed; + u8 seqno; + + spin_lock_irqsave(&bcsp->unack.lock, flags); + + pkts_to_be_removed = skb_queue_len(&bcsp->unack); + seqno = bcsp->msgq_txseq; + + while (pkts_to_be_removed) { + if (bcsp->rxack == seqno) + break; + pkts_to_be_removed--; + seqno = (seqno - 1) & 0x07; + } + + if (bcsp->rxack != seqno) + BT_ERR("Peer acked invalid packet"); + + BT_DBG("Removing %u pkts out of %u, up to seqno %u", + pkts_to_be_removed, skb_queue_len(&bcsp->unack), + (seqno - 1) & 0x07); + + i = 0; + skb_queue_walk_safe(&bcsp->unack, skb, tmp) { + if (i >= pkts_to_be_removed) + break; + i++; + + __skb_unlink(skb, &bcsp->unack); + kfree_skb(skb); + } + + if (skb_queue_empty(&bcsp->unack)) + del_timer(&bcsp->tbcsp); + + spin_unlock_irqrestore(&bcsp->unack.lock, flags); + + if (i != pkts_to_be_removed) + BT_ERR("Removed only %u out of %u pkts", i, pkts_to_be_removed); +} + +/* Handle BCSP link-establishment packets. When we + detect a "sync" packet, symptom that the BT module has reset, + we do nothing :) (yet) */ +static void bcsp_handle_le_pkt(struct hci_uart *hu) +{ + struct bcsp_struct *bcsp = hu->priv; + u8 conf_pkt[4] = { 0xad, 0xef, 0xac, 0xed }; + u8 conf_rsp_pkt[4] = { 0xde, 0xad, 0xd0, 0xd0 }; + u8 sync_pkt[4] = { 0xda, 0xdc, 0xed, 0xed }; + + /* spot "conf" pkts and reply with a "conf rsp" pkt */ + if (bcsp->rx_skb->data[1] >> 4 == 4 && bcsp->rx_skb->data[2] == 0 && + !memcmp(&bcsp->rx_skb->data[4], conf_pkt, 4)) { + struct sk_buff *nskb = alloc_skb(4, GFP_ATOMIC); + + BT_DBG("Found a LE conf pkt"); + if (!nskb) + return; + memcpy(skb_put(nskb, 4), conf_rsp_pkt, 4); + bt_cb(nskb)->pkt_type = BCSP_LE_PKT; + + skb_queue_head(&bcsp->unrel, nskb); + hci_uart_tx_wakeup(hu); + } + /* Spot "sync" pkts. If we find one...disaster! */ + else if (bcsp->rx_skb->data[1] >> 4 == 4 && bcsp->rx_skb->data[2] == 0 && + !memcmp(&bcsp->rx_skb->data[4], sync_pkt, 4)) { + BT_ERR("Found a LE sync pkt, card has reset"); + } +} + +static inline void bcsp_unslip_one_byte(struct bcsp_struct *bcsp, unsigned char byte) +{ + const u8 c0 = 0xc0, db = 0xdb; + + switch (bcsp->rx_esc_state) { + case BCSP_ESCSTATE_NOESC: + switch (byte) { + case 0xdb: + bcsp->rx_esc_state = BCSP_ESCSTATE_ESC; + break; + default: + memcpy(skb_put(bcsp->rx_skb, 1), &byte, 1); + if ((bcsp->rx_skb-> data[0] & 0x40) != 0 && + bcsp->rx_state != BCSP_W4_CRC) + bcsp_crc_update(&bcsp->message_crc, byte); + bcsp->rx_count--; + } + break; + + case BCSP_ESCSTATE_ESC: + switch (byte) { + case 0xdc: + memcpy(skb_put(bcsp->rx_skb, 1), &c0, 1); + if ((bcsp->rx_skb-> data[0] & 0x40) != 0 && + bcsp->rx_state != BCSP_W4_CRC) + bcsp_crc_update(&bcsp-> message_crc, 0xc0); + bcsp->rx_esc_state = BCSP_ESCSTATE_NOESC; + bcsp->rx_count--; + break; + + case 0xdd: + memcpy(skb_put(bcsp->rx_skb, 1), &db, 1); + if ((bcsp->rx_skb-> data[0] & 0x40) != 0 && + bcsp->rx_state != BCSP_W4_CRC) + bcsp_crc_update(&bcsp-> message_crc, 0xdb); + bcsp->rx_esc_state = BCSP_ESCSTATE_NOESC; + bcsp->rx_count--; + break; + + default: + BT_ERR ("Invalid byte %02x after esc byte", byte); + kfree_skb(bcsp->rx_skb); + bcsp->rx_skb = NULL; + bcsp->rx_state = BCSP_W4_PKT_DELIMITER; + bcsp->rx_count = 0; + } + } +} + +static void bcsp_complete_rx_pkt(struct hci_uart *hu) +{ + struct bcsp_struct *bcsp = hu->priv; + int pass_up; + + if (bcsp->rx_skb->data[0] & 0x80) { /* reliable pkt */ + BT_DBG("Received seqno %u from card", bcsp->rxseq_txack); + bcsp->rxseq_txack++; + bcsp->rxseq_txack %= 0x8; + bcsp->txack_req = 1; + + /* If needed, transmit an ack pkt */ + hci_uart_tx_wakeup(hu); + } + + bcsp->rxack = (bcsp->rx_skb->data[0] >> 3) & 0x07; + BT_DBG("Request for pkt %u from card", bcsp->rxack); + + bcsp_pkt_cull(bcsp); + if ((bcsp->rx_skb->data[1] & 0x0f) == 6 && + bcsp->rx_skb->data[0] & 0x80) { + bt_cb(bcsp->rx_skb)->pkt_type = HCI_ACLDATA_PKT; + pass_up = 1; + } else if ((bcsp->rx_skb->data[1] & 0x0f) == 5 && + bcsp->rx_skb->data[0] & 0x80) { + bt_cb(bcsp->rx_skb)->pkt_type = HCI_EVENT_PKT; + pass_up = 1; + } else if ((bcsp->rx_skb->data[1] & 0x0f) == 7) { + bt_cb(bcsp->rx_skb)->pkt_type = HCI_SCODATA_PKT; + pass_up = 1; + } else if ((bcsp->rx_skb->data[1] & 0x0f) == 1 && + !(bcsp->rx_skb->data[0] & 0x80)) { + bcsp_handle_le_pkt(hu); + pass_up = 0; + } else + pass_up = 0; + + if (!pass_up) { + struct hci_event_hdr hdr; + u8 desc = (bcsp->rx_skb->data[1] & 0x0f); + + if (desc != 0 && desc != 1) { + if (hciextn) { + desc |= 0xc0; + skb_pull(bcsp->rx_skb, 4); + memcpy(skb_push(bcsp->rx_skb, 1), &desc, 1); + + hdr.evt = 0xff; + hdr.plen = bcsp->rx_skb->len; + memcpy(skb_push(bcsp->rx_skb, HCI_EVENT_HDR_SIZE), &hdr, HCI_EVENT_HDR_SIZE); + bt_cb(bcsp->rx_skb)->pkt_type = HCI_EVENT_PKT; + + hci_recv_frame(bcsp->rx_skb); + } else { + BT_ERR ("Packet for unknown channel (%u %s)", + bcsp->rx_skb->data[1] & 0x0f, + bcsp->rx_skb->data[0] & 0x80 ? + "reliable" : "unreliable"); + kfree_skb(bcsp->rx_skb); + } + } else + kfree_skb(bcsp->rx_skb); + } else { + /* Pull out BCSP hdr */ + skb_pull(bcsp->rx_skb, 4); + + hci_recv_frame(bcsp->rx_skb); + } + + bcsp->rx_state = BCSP_W4_PKT_DELIMITER; + bcsp->rx_skb = NULL; +} + +static u16 bscp_get_crc(struct bcsp_struct *bcsp) +{ + return get_unaligned_be16(&bcsp->rx_skb->data[bcsp->rx_skb->len - 2]); +} + +/* Recv data */ +static int bcsp_recv(struct hci_uart *hu, void *data, int count) +{ + struct bcsp_struct *bcsp = hu->priv; + register unsigned char *ptr; + + BT_DBG("hu %p count %d rx_state %d rx_count %ld", + hu, count, bcsp->rx_state, bcsp->rx_count); + + ptr = data; + while (count) { + if (bcsp->rx_count) { + if (*ptr == 0xc0) { + BT_ERR("Short BCSP packet"); + kfree_skb(bcsp->rx_skb); + bcsp->rx_state = BCSP_W4_PKT_START; + bcsp->rx_count = 0; + } else + bcsp_unslip_one_byte(bcsp, *ptr); + + ptr++; count--; + continue; + } + + switch (bcsp->rx_state) { + case BCSP_W4_BCSP_HDR: + if ((0xff & (u8) ~ (bcsp->rx_skb->data[0] + bcsp->rx_skb->data[1] + + bcsp->rx_skb->data[2])) != bcsp->rx_skb->data[3]) { + BT_ERR("Error in BCSP hdr checksum"); + kfree_skb(bcsp->rx_skb); + bcsp->rx_state = BCSP_W4_PKT_DELIMITER; + bcsp->rx_count = 0; + continue; + } + if (bcsp->rx_skb->data[0] & 0x80 /* reliable pkt */ + && (bcsp->rx_skb->data[0] & 0x07) != bcsp->rxseq_txack) { + BT_ERR ("Out-of-order packet arrived, got %u expected %u", + bcsp->rx_skb->data[0] & 0x07, bcsp->rxseq_txack); + + kfree_skb(bcsp->rx_skb); + bcsp->rx_state = BCSP_W4_PKT_DELIMITER; + bcsp->rx_count = 0; + continue; + } + bcsp->rx_state = BCSP_W4_DATA; + bcsp->rx_count = (bcsp->rx_skb->data[1] >> 4) + + (bcsp->rx_skb->data[2] << 4); /* May be 0 */ + continue; + + case BCSP_W4_DATA: + if (bcsp->rx_skb->data[0] & 0x40) { /* pkt with crc */ + bcsp->rx_state = BCSP_W4_CRC; + bcsp->rx_count = 2; + } else + bcsp_complete_rx_pkt(hu); + continue; + + case BCSP_W4_CRC: + if (bitrev16(bcsp->message_crc) != bscp_get_crc(bcsp)) { + BT_ERR ("Checksum failed: computed %04x received %04x", + bitrev16(bcsp->message_crc), + bscp_get_crc(bcsp)); + + kfree_skb(bcsp->rx_skb); + bcsp->rx_state = BCSP_W4_PKT_DELIMITER; + bcsp->rx_count = 0; + continue; + } + skb_trim(bcsp->rx_skb, bcsp->rx_skb->len - 2); + bcsp_complete_rx_pkt(hu); + continue; + + case BCSP_W4_PKT_DELIMITER: + switch (*ptr) { + case 0xc0: + bcsp->rx_state = BCSP_W4_PKT_START; + break; + default: + /*BT_ERR("Ignoring byte %02x", *ptr);*/ + break; + } + ptr++; count--; + break; + + case BCSP_W4_PKT_START: + switch (*ptr) { + case 0xc0: + ptr++; count--; + break; + + default: + bcsp->rx_state = BCSP_W4_BCSP_HDR; + bcsp->rx_count = 4; + bcsp->rx_esc_state = BCSP_ESCSTATE_NOESC; + BCSP_CRC_INIT(bcsp->message_crc); + + /* Do not increment ptr or decrement count + * Allocate packet. Max len of a BCSP pkt= + * 0xFFF (payload) +4 (header) +2 (crc) */ + + bcsp->rx_skb = bt_skb_alloc(0x1005, GFP_ATOMIC); + if (!bcsp->rx_skb) { + BT_ERR("Can't allocate mem for new packet"); + bcsp->rx_state = BCSP_W4_PKT_DELIMITER; + bcsp->rx_count = 0; + return 0; + } + bcsp->rx_skb->dev = (void *) hu->hdev; + break; + } + break; + } + } + return count; +} + + /* Arrange to retransmit all messages in the relq. */ +static void bcsp_timed_event(unsigned long arg) +{ + struct hci_uart *hu = (struct hci_uart *) arg; + struct bcsp_struct *bcsp = hu->priv; + struct sk_buff *skb; + unsigned long flags; + + BT_DBG("hu %p retransmitting %u pkts", hu, bcsp->unack.qlen); + + spin_lock_irqsave_nested(&bcsp->unack.lock, flags, SINGLE_DEPTH_NESTING); + + while ((skb = __skb_dequeue_tail(&bcsp->unack)) != NULL) { + bcsp->msgq_txseq = (bcsp->msgq_txseq - 1) & 0x07; + skb_queue_head(&bcsp->rel, skb); + } + + spin_unlock_irqrestore(&bcsp->unack.lock, flags); + + hci_uart_tx_wakeup(hu); +} + +static int bcsp_open(struct hci_uart *hu) +{ + struct bcsp_struct *bcsp; + + BT_DBG("hu %p", hu); + + bcsp = kzalloc(sizeof(*bcsp), GFP_ATOMIC); + if (!bcsp) + return -ENOMEM; + + hu->priv = bcsp; + skb_queue_head_init(&bcsp->unack); + skb_queue_head_init(&bcsp->rel); + skb_queue_head_init(&bcsp->unrel); + + init_timer(&bcsp->tbcsp); + bcsp->tbcsp.function = bcsp_timed_event; + bcsp->tbcsp.data = (u_long) hu; + + bcsp->rx_state = BCSP_W4_PKT_DELIMITER; + + if (txcrc) + bcsp->use_crc = 1; + + return 0; +} + +static int bcsp_close(struct hci_uart *hu) +{ + struct bcsp_struct *bcsp = hu->priv; + hu->priv = NULL; + + BT_DBG("hu %p", hu); + + skb_queue_purge(&bcsp->unack); + skb_queue_purge(&bcsp->rel); + skb_queue_purge(&bcsp->unrel); + del_timer(&bcsp->tbcsp); + + kfree(bcsp); + return 0; +} + +static struct hci_uart_proto bcsp = { + .id = HCI_UART_BCSP, + .open = bcsp_open, + .close = bcsp_close, + .enqueue = bcsp_enqueue, + .dequeue = bcsp_dequeue, + .recv = bcsp_recv, + .flush = bcsp_flush +}; + +int bcsp_init(void) +{ + int err = hci_uart_register_proto(&bcsp); + + if (!err) + BT_INFO("HCI BCSP protocol initialized"); + else + BT_ERR("HCI BCSP protocol registration failed"); + + return err; +} + +int bcsp_deinit(void) +{ + return hci_uart_unregister_proto(&bcsp); +} + +module_param(txcrc, bool, 0644); +MODULE_PARM_DESC(txcrc, "Transmit CRC with every BCSP packet"); + +module_param(hciextn, bool, 0644); +MODULE_PARM_DESC(hciextn, "Convert HCI Extensions into BCSP packets"); diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c new file mode 100644 index 0000000..c0ce813 --- /dev/null +++ b/drivers/bluetooth/hci_h4.c @@ -0,0 +1,290 @@ +/* + * + * Bluetooth HCI UART driver + * + * Copyright (C) 2000-2001 Qualcomm Incorporated + * Copyright (C) 2002-2003 Maxim Krasnyansky + * Copyright (C) 2004-2005 Marcel Holtmann + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "hci_uart.h" + +#define VERSION "1.2" + +struct h4_struct { + unsigned long rx_state; + unsigned long rx_count; + struct sk_buff *rx_skb; + struct sk_buff_head txq; +}; + +/* H4 receiver States */ +#define H4_W4_PACKET_TYPE 0 +#define H4_W4_EVENT_HDR 1 +#define H4_W4_ACL_HDR 2 +#define H4_W4_SCO_HDR 3 +#define H4_W4_DATA 4 + +/* Initialize protocol */ +static int h4_open(struct hci_uart *hu) +{ + struct h4_struct *h4; + + BT_DBG("hu %p", hu); + + h4 = kzalloc(sizeof(*h4), GFP_ATOMIC); + if (!h4) + return -ENOMEM; + + skb_queue_head_init(&h4->txq); + + hu->priv = h4; + return 0; +} + +/* Flush protocol data */ +static int h4_flush(struct hci_uart *hu) +{ + struct h4_struct *h4 = hu->priv; + + BT_DBG("hu %p", hu); + + skb_queue_purge(&h4->txq); + + return 0; +} + +/* Close protocol */ +static int h4_close(struct hci_uart *hu) +{ + struct h4_struct *h4 = hu->priv; + + hu->priv = NULL; + + BT_DBG("hu %p", hu); + + skb_queue_purge(&h4->txq); + + kfree_skb(h4->rx_skb); + + hu->priv = NULL; + kfree(h4); + + return 0; +} + +/* Enqueue frame for transmittion (padding, crc, etc) */ +static int h4_enqueue(struct hci_uart *hu, struct sk_buff *skb) +{ + struct h4_struct *h4 = hu->priv; + + BT_DBG("hu %p skb %p", hu, skb); + + /* Prepend skb with frame type */ + memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); + skb_queue_tail(&h4->txq, skb); + + return 0; +} + +static inline int h4_check_data_len(struct h4_struct *h4, int len) +{ + register int room = skb_tailroom(h4->rx_skb); + + BT_DBG("len %d room %d", len, room); + + if (!len) { + hci_recv_frame(h4->rx_skb); + } else if (len > room) { + BT_ERR("Data length is too large"); + kfree_skb(h4->rx_skb); + } else { + h4->rx_state = H4_W4_DATA; + h4->rx_count = len; + return len; + } + + h4->rx_state = H4_W4_PACKET_TYPE; + h4->rx_skb = NULL; + h4->rx_count = 0; + + return 0; +} + +/* Recv data */ +static int h4_recv(struct hci_uart *hu, void *data, int count) +{ + struct h4_struct *h4 = hu->priv; + register char *ptr; + struct hci_event_hdr *eh; + struct hci_acl_hdr *ah; + struct hci_sco_hdr *sh; + register int len, type, dlen; + + BT_DBG("hu %p count %d rx_state %ld rx_count %ld", + hu, count, h4->rx_state, h4->rx_count); + + ptr = data; + while (count) { + if (h4->rx_count) { + len = min_t(unsigned int, h4->rx_count, count); + memcpy(skb_put(h4->rx_skb, len), ptr, len); + h4->rx_count -= len; count -= len; ptr += len; + + if (h4->rx_count) + continue; + + switch (h4->rx_state) { + case H4_W4_DATA: + BT_DBG("Complete data"); + + hci_recv_frame(h4->rx_skb); + + h4->rx_state = H4_W4_PACKET_TYPE; + h4->rx_skb = NULL; + continue; + + case H4_W4_EVENT_HDR: + eh = hci_event_hdr(h4->rx_skb); + + BT_DBG("Event header: evt 0x%2.2x plen %d", eh->evt, eh->plen); + + h4_check_data_len(h4, eh->plen); + continue; + + case H4_W4_ACL_HDR: + ah = hci_acl_hdr(h4->rx_skb); + dlen = __le16_to_cpu(ah->dlen); + + BT_DBG("ACL header: dlen %d", dlen); + + h4_check_data_len(h4, dlen); + continue; + + case H4_W4_SCO_HDR: + sh = hci_sco_hdr(h4->rx_skb); + + BT_DBG("SCO header: dlen %d", sh->dlen); + + h4_check_data_len(h4, sh->dlen); + continue; + } + } + + /* H4_W4_PACKET_TYPE */ + switch (*ptr) { + case HCI_EVENT_PKT: + BT_DBG("Event packet"); + h4->rx_state = H4_W4_EVENT_HDR; + h4->rx_count = HCI_EVENT_HDR_SIZE; + type = HCI_EVENT_PKT; + break; + + case HCI_ACLDATA_PKT: + BT_DBG("ACL packet"); + h4->rx_state = H4_W4_ACL_HDR; + h4->rx_count = HCI_ACL_HDR_SIZE; + type = HCI_ACLDATA_PKT; + break; + + case HCI_SCODATA_PKT: + BT_DBG("SCO packet"); + h4->rx_state = H4_W4_SCO_HDR; + h4->rx_count = HCI_SCO_HDR_SIZE; + type = HCI_SCODATA_PKT; + break; + + default: + BT_ERR("Unknown HCI packet type %2.2x", (__u8)*ptr); + hu->hdev->stat.err_rx++; + ptr++; count--; + continue; + }; + + ptr++; count--; + + /* Allocate packet */ + h4->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC); + if (!h4->rx_skb) { + BT_ERR("Can't allocate mem for new packet"); + h4->rx_state = H4_W4_PACKET_TYPE; + h4->rx_count = 0; + return 0; + } + + h4->rx_skb->dev = (void *) hu->hdev; + bt_cb(h4->rx_skb)->pkt_type = type; + } + + return count; +} + +static struct sk_buff *h4_dequeue(struct hci_uart *hu) +{ + struct h4_struct *h4 = hu->priv; + return skb_dequeue(&h4->txq); +} + +static struct hci_uart_proto h4p = { + .id = HCI_UART_H4, + .open = h4_open, + .close = h4_close, + .recv = h4_recv, + .enqueue = h4_enqueue, + .dequeue = h4_dequeue, + .flush = h4_flush, +}; + +int h4_init(void) +{ + int err = hci_uart_register_proto(&h4p); + + if (!err) + BT_INFO("HCI H4 protocol initialized"); + else + BT_ERR("HCI H4 protocol registration failed"); + + return err; +} + +int h4_deinit(void) +{ + return hci_uart_unregister_proto(&h4p); +} diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c new file mode 100644 index 0000000..116fc66 --- /dev/null +++ b/drivers/bluetooth/hci_ldisc.c @@ -0,0 +1,587 @@ +/* + * + * Bluetooth HCI UART driver + * + * Copyright (C) 2000-2001 Qualcomm Incorporated + * Copyright (C) 2002-2003 Maxim Krasnyansky + * Copyright (C) 2004-2005 Marcel Holtmann + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "hci_uart.h" + +#define VERSION "2.2" + +static int reset = 0; + +static struct hci_uart_proto *hup[HCI_UART_MAX_PROTO]; + +int hci_uart_register_proto(struct hci_uart_proto *p) +{ + if (p->id >= HCI_UART_MAX_PROTO) + return -EINVAL; + + if (hup[p->id]) + return -EEXIST; + + hup[p->id] = p; + + return 0; +} + +int hci_uart_unregister_proto(struct hci_uart_proto *p) +{ + if (p->id >= HCI_UART_MAX_PROTO) + return -EINVAL; + + if (!hup[p->id]) + return -EINVAL; + + hup[p->id] = NULL; + + return 0; +} + +static struct hci_uart_proto *hci_uart_get_proto(unsigned int id) +{ + if (id >= HCI_UART_MAX_PROTO) + return NULL; + + return hup[id]; +} + +static inline void hci_uart_tx_complete(struct hci_uart *hu, int pkt_type) +{ + struct hci_dev *hdev = hu->hdev; + + /* Update HCI stat counters */ + switch (pkt_type) { + case HCI_COMMAND_PKT: + hdev->stat.cmd_tx++; + break; + + case HCI_ACLDATA_PKT: + hdev->stat.acl_tx++; + break; + + case HCI_SCODATA_PKT: + hdev->stat.cmd_tx++; + break; + } +} + +static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu) +{ + struct sk_buff *skb = hu->tx_skb; + + if (!skb) + skb = hu->proto->dequeue(hu); + else + hu->tx_skb = NULL; + + return skb; +} + +int hci_uart_tx_wakeup(struct hci_uart *hu) +{ + struct tty_struct *tty = hu->tty; + struct hci_dev *hdev = hu->hdev; + struct sk_buff *skb; + + if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state)) { + set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state); + return 0; + } + + BT_DBG(""); + +restart: + clear_bit(HCI_UART_TX_WAKEUP, &hu->tx_state); + + while ((skb = hci_uart_dequeue(hu))) { + int len; + + set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); + len = tty->ops->write(tty, skb->data, skb->len); + hdev->stat.byte_tx += len; + + skb_pull(skb, len); + if (skb->len) { + hu->tx_skb = skb; + break; + } + + hci_uart_tx_complete(hu, bt_cb(skb)->pkt_type); + kfree_skb(skb); + } + + if (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state)) + goto restart; + + clear_bit(HCI_UART_SENDING, &hu->tx_state); + return 0; +} + +/* ------- Interface to HCI layer ------ */ +/* Initialize device */ +static int hci_uart_open(struct hci_dev *hdev) +{ + BT_DBG("%s %p", hdev->name, hdev); + + /* Nothing to do for UART driver */ + + set_bit(HCI_RUNNING, &hdev->flags); + + return 0; +} + +/* Reset device */ +static int hci_uart_flush(struct hci_dev *hdev) +{ + struct hci_uart *hu = (struct hci_uart *) hdev->driver_data; + struct tty_struct *tty = hu->tty; + + BT_DBG("hdev %p tty %p", hdev, tty); + + if (hu->tx_skb) { + kfree_skb(hu->tx_skb); hu->tx_skb = NULL; + } + + /* Flush any pending characters in the driver and discipline. */ + tty_ldisc_flush(tty); + tty_driver_flush_buffer(tty); + + if (test_bit(HCI_UART_PROTO_SET, &hu->flags)) + hu->proto->flush(hu); + + return 0; +} + +/* Close device */ +static int hci_uart_close(struct hci_dev *hdev) +{ + BT_DBG("hdev %p", hdev); + + if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) + return 0; + + hci_uart_flush(hdev); + hdev->flush = NULL; + return 0; +} + +/* Send frames from HCI layer */ +static int hci_uart_send_frame(struct sk_buff *skb) +{ + struct hci_dev* hdev = (struct hci_dev *) skb->dev; + struct tty_struct *tty; + struct hci_uart *hu; + + if (!hdev) { + BT_ERR("Frame for unknown device (hdev=NULL)"); + return -ENODEV; + } + + if (!test_bit(HCI_RUNNING, &hdev->flags)) + return -EBUSY; + + hu = (struct hci_uart *) hdev->driver_data; + tty = hu->tty; + + BT_DBG("%s: type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len); + + hu->proto->enqueue(hu, skb); + + hci_uart_tx_wakeup(hu); + + return 0; +} + +static void hci_uart_destruct(struct hci_dev *hdev) +{ + if (!hdev) + return; + + BT_DBG("%s", hdev->name); + kfree(hdev->driver_data); +} + +/* ------ LDISC part ------ */ +/* hci_uart_tty_open + * + * Called when line discipline changed to HCI_UART. + * + * Arguments: + * tty pointer to tty info structure + * Return Value: + * 0 if success, otherwise error code + */ +static int hci_uart_tty_open(struct tty_struct *tty) +{ + struct hci_uart *hu = (void *) tty->disc_data; + + BT_DBG("tty %p", tty); + + if (hu) + return -EEXIST; + + if (!(hu = kzalloc(sizeof(struct hci_uart), GFP_KERNEL))) { + BT_ERR("Can't allocate control structure"); + return -ENFILE; + } + + tty->disc_data = hu; + hu->tty = tty; + tty->receive_room = 65536; + + spin_lock_init(&hu->rx_lock); + + /* Flush any pending characters in the driver and line discipline. */ + + /* FIXME: why is this needed. Note don't use ldisc_ref here as the + open path is before the ldisc is referencable */ + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,30)) + if (tty->ldisc->ops->flush_buffer) + tty->ldisc->ops->flush_buffer(tty); +#else + if (tty->ldisc.ops->flush_buffer) + tty->ldisc.ops->flush_buffer(tty); +#endif + tty_driver_flush_buffer(tty); + + return 0; +} + +/* hci_uart_tty_close() + * + * Called when the line discipline is changed to something + * else, the tty is closed, or the tty detects a hangup. + */ +static void hci_uart_tty_close(struct tty_struct *tty) +{ + struct hci_uart *hu = (void *)tty->disc_data; + + BT_DBG("tty %p", tty); + + /* Detach from the tty */ + tty->disc_data = NULL; + + if (hu) { + struct hci_dev *hdev = hu->hdev; + + if (hdev) + hci_uart_close(hdev); + + if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) { + hu->proto->close(hu); + hci_unregister_dev(hdev); + hci_free_dev(hdev); + } + } +} + +/* hci_uart_tty_wakeup() + * + * Callback for transmit wakeup. Called when low level + * device driver can accept more send data. + * + * Arguments: tty pointer to associated tty instance data + * Return Value: None + */ +static void hci_uart_tty_wakeup(struct tty_struct *tty) +{ + struct hci_uart *hu = (void *)tty->disc_data; + + BT_DBG(""); + + if (!hu) + return; + + clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); + + if (tty != hu->tty) + return; + + if (test_bit(HCI_UART_PROTO_SET, &hu->flags)) + hci_uart_tx_wakeup(hu); +} + +/* hci_uart_tty_receive() + * + * Called by tty low level driver when receive data is + * available. + * + * Arguments: tty pointer to tty isntance data + * data pointer to received data + * flags pointer to flags for data + * count count of received data in bytes + * + * Return Value: None + */ +static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, char *flags, int count) +{ + struct hci_uart *hu = (void *)tty->disc_data; + + if (!hu || tty != hu->tty) + return; + + if (!test_bit(HCI_UART_PROTO_SET, &hu->flags)) + return; + + spin_lock(&hu->rx_lock); + hu->proto->recv(hu, (void *) data, count); + hu->hdev->stat.byte_rx += count; + spin_unlock(&hu->rx_lock); + + tty_unthrottle(tty); +} + +static int hci_uart_register_dev(struct hci_uart *hu) +{ + struct hci_dev *hdev; + + BT_DBG(""); + + /* Initialize and register HCI device */ + hdev = hci_alloc_dev(); + if (!hdev) { + BT_ERR("Can't allocate HCI device"); + return -ENOMEM; + } + + hu->hdev = hdev; + + hdev->bus = HCI_UART; + hdev->driver_data = hu; + + hdev->open = hci_uart_open; + hdev->close = hci_uart_close; + hdev->flush = hci_uart_flush; + hdev->send = hci_uart_send_frame; + hdev->destruct = hci_uart_destruct; + + hdev->owner = THIS_MODULE; + + if (!reset) + set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); + + if (hci_register_dev(hdev) < 0) { + BT_ERR("Can't register HCI device"); + hci_free_dev(hdev); + return -ENODEV; + } + + return 0; +} + +static int hci_uart_set_proto(struct hci_uart *hu, int id) +{ + struct hci_uart_proto *p; + int err; + + p = hci_uart_get_proto(id); + if (!p) + return -EPROTONOSUPPORT; + + err = p->open(hu); + if (err) + return err; + + hu->proto = p; + + err = hci_uart_register_dev(hu); + if (err) { + p->close(hu); + return err; + } + + return 0; +} + +/* hci_uart_tty_ioctl() + * + * Process IOCTL system call for the tty device. + * + * Arguments: + * + * tty pointer to tty instance data + * file pointer to open file object for device + * cmd IOCTL command code + * arg argument for IOCTL call (cmd dependent) + * + * Return Value: Command dependent + */ +static int hci_uart_tty_ioctl(struct tty_struct *tty, struct file * file, + unsigned int cmd, unsigned long arg) +{ + struct hci_uart *hu = (void *)tty->disc_data; + int err = 0; + + BT_DBG(""); + + /* Verify the status of the device */ + if (!hu) + return -EBADF; + + switch (cmd) { + case HCIUARTSETPROTO: + if (!test_and_set_bit(HCI_UART_PROTO_SET, &hu->flags)) { + err = hci_uart_set_proto(hu, arg); + if (err) { + clear_bit(HCI_UART_PROTO_SET, &hu->flags); + return err; + } + } else + return -EBUSY; + break; + + case HCIUARTGETPROTO: + if (test_bit(HCI_UART_PROTO_SET, &hu->flags)) + return hu->proto->id; + return -EUNATCH; + + case HCIUARTGETDEVICE: + if (test_bit(HCI_UART_PROTO_SET, &hu->flags)) + return hu->hdev->id; + return -EUNATCH; + + default: +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) + err = n_tty_ioctl_helper(tty, file, cmd, arg); +#else + err = n_tty_ioctl(tty, file, cmd, arg); +#endif + break; + }; + + return err; +} + +/* + * We don't provide read/write/poll interface for user space. + */ +static ssize_t hci_uart_tty_read(struct tty_struct *tty, struct file *file, + unsigned char __user *buf, size_t nr) +{ + return 0; +} + +static ssize_t hci_uart_tty_write(struct tty_struct *tty, struct file *file, + const unsigned char *data, size_t count) +{ + return 0; +} + +static unsigned int hci_uart_tty_poll(struct tty_struct *tty, + struct file *filp, poll_table *wait) +{ + return 0; +} + +static int __init hci_uart_init(void) +{ + static struct tty_ldisc_ops hci_uart_ldisc; + int err; + + BT_INFO("HCI UART driver ver %s", VERSION); + + /* Register the tty discipline */ + + memset(&hci_uart_ldisc, 0, sizeof (hci_uart_ldisc)); + hci_uart_ldisc.magic = TTY_LDISC_MAGIC; + hci_uart_ldisc.name = "n_hci"; + hci_uart_ldisc.open = hci_uart_tty_open; + hci_uart_ldisc.close = hci_uart_tty_close; + hci_uart_ldisc.read = hci_uart_tty_read; + hci_uart_ldisc.write = hci_uart_tty_write; + hci_uart_ldisc.ioctl = hci_uart_tty_ioctl; + hci_uart_ldisc.poll = hci_uart_tty_poll; + hci_uart_ldisc.receive_buf = hci_uart_tty_receive; + hci_uart_ldisc.write_wakeup = hci_uart_tty_wakeup; + hci_uart_ldisc.owner = THIS_MODULE; + + if ((err = tty_register_ldisc(N_HCI, &hci_uart_ldisc))) { + BT_ERR("HCI line discipline registration failed. (%d)", err); + return err; + } + +#ifdef CONFIG_BT_HCIUART_H4 + h4_init(); +#endif +#ifdef CONFIG_BT_HCIUART_BCSP + bcsp_init(); +#endif +#ifdef CONFIG_BT_HCIUART_LL + ll_init(); +#endif + + return 0; +} + +static void __exit hci_uart_exit(void) +{ + int err; + +#ifdef CONFIG_BT_HCIUART_H4 + h4_deinit(); +#endif +#ifdef CONFIG_BT_HCIUART_BCSP + bcsp_deinit(); +#endif +#ifdef CONFIG_BT_HCIUART_LL + ll_deinit(); +#endif + + /* Release tty registration of line discipline */ + if ((err = tty_unregister_ldisc(N_HCI))) + BT_ERR("Can't unregister HCI line discipline (%d)", err); +} + +module_init(hci_uart_init); +module_exit(hci_uart_exit); + +module_param(reset, bool, 0644); +MODULE_PARM_DESC(reset, "Send HCI reset command on initialization"); + +MODULE_AUTHOR("Marcel Holtmann "); +MODULE_DESCRIPTION("Bluetooth HCI UART driver ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_LDISC(N_HCI); diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c new file mode 100644 index 0000000..5c65014 --- /dev/null +++ b/drivers/bluetooth/hci_ll.c @@ -0,0 +1,535 @@ +/* + * Texas Instruments' Bluetooth HCILL UART protocol + * + * HCILL (HCI Low Level) is a Texas Instruments' power management + * protocol extension to H4. + * + * Copyright (C) 2007 Texas Instruments, Inc. + * + * Written by Ohad Ben-Cohen + * + * Acknowledgements: + * This file is based on hci_h4.c, which was written + * by Maxim Krasnyansky and Marcel Holtmann. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "hci_uart.h" + +/* HCILL commands */ +#define HCILL_GO_TO_SLEEP_IND 0x30 +#define HCILL_GO_TO_SLEEP_ACK 0x31 +#define HCILL_WAKE_UP_IND 0x32 +#define HCILL_WAKE_UP_ACK 0x33 + +/* HCILL receiver States */ +#define HCILL_W4_PACKET_TYPE 0 +#define HCILL_W4_EVENT_HDR 1 +#define HCILL_W4_ACL_HDR 2 +#define HCILL_W4_SCO_HDR 3 +#define HCILL_W4_DATA 4 + +/* HCILL states */ +enum hcill_states_e { + HCILL_ASLEEP, + HCILL_ASLEEP_TO_AWAKE, + HCILL_AWAKE, + HCILL_AWAKE_TO_ASLEEP +}; + +struct hcill_cmd { + u8 cmd; +} __attribute__((packed)); + +struct ll_struct { + unsigned long rx_state; + unsigned long rx_count; + struct sk_buff *rx_skb; + struct sk_buff_head txq; + spinlock_t hcill_lock; /* HCILL state lock */ + unsigned long hcill_state; /* HCILL power state */ + struct sk_buff_head tx_wait_q; /* HCILL wait queue */ +}; + +/* + * Builds and sends an HCILL command packet. + * These are very simple packets with only 1 cmd byte + */ +static int send_hcill_cmd(u8 cmd, struct hci_uart *hu) +{ + int err = 0; + struct sk_buff *skb = NULL; + struct ll_struct *ll = hu->priv; + struct hcill_cmd *hcill_packet; + + BT_DBG("hu %p cmd 0x%x", hu, cmd); + + /* allocate packet */ + skb = bt_skb_alloc(1, GFP_ATOMIC); + if (!skb) { + BT_ERR("cannot allocate memory for HCILL packet"); + err = -ENOMEM; + goto out; + } + + /* prepare packet */ + hcill_packet = (struct hcill_cmd *) skb_put(skb, 1); + hcill_packet->cmd = cmd; + skb->dev = (void *) hu->hdev; + + /* send packet */ + skb_queue_tail(&ll->txq, skb); +out: + return err; +} + +/* Initialize protocol */ +static int ll_open(struct hci_uart *hu) +{ + struct ll_struct *ll; + + BT_DBG("hu %p", hu); + + ll = kzalloc(sizeof(*ll), GFP_ATOMIC); + if (!ll) + return -ENOMEM; + + skb_queue_head_init(&ll->txq); + skb_queue_head_init(&ll->tx_wait_q); + spin_lock_init(&ll->hcill_lock); + + ll->hcill_state = HCILL_AWAKE; + + hu->priv = ll; + + return 0; +} + +/* Flush protocol data */ +static int ll_flush(struct hci_uart *hu) +{ + struct ll_struct *ll = hu->priv; + + BT_DBG("hu %p", hu); + + skb_queue_purge(&ll->tx_wait_q); + skb_queue_purge(&ll->txq); + + return 0; +} + +/* Close protocol */ +static int ll_close(struct hci_uart *hu) +{ + struct ll_struct *ll = hu->priv; + + BT_DBG("hu %p", hu); + + skb_queue_purge(&ll->tx_wait_q); + skb_queue_purge(&ll->txq); + + kfree_skb(ll->rx_skb); + + hu->priv = NULL; + + kfree(ll); + + return 0; +} + +/* + * internal function, which does common work of the device wake up process: + * 1. places all pending packets (waiting in tx_wait_q list) in txq list. + * 2. changes internal state to HCILL_AWAKE. + * Note: assumes that hcill_lock spinlock is taken, + * shouldn't be called otherwise! + */ +static void __ll_do_awake(struct ll_struct *ll) +{ + struct sk_buff *skb = NULL; + + while ((skb = skb_dequeue(&ll->tx_wait_q))) + skb_queue_tail(&ll->txq, skb); + + ll->hcill_state = HCILL_AWAKE; +} + +/* + * Called upon a wake-up-indication from the device + */ +static void ll_device_want_to_wakeup(struct hci_uart *hu) +{ + unsigned long flags; + struct ll_struct *ll = hu->priv; + + BT_DBG("hu %p", hu); + + /* lock hcill state */ + spin_lock_irqsave(&ll->hcill_lock, flags); + + switch (ll->hcill_state) { + case HCILL_ASLEEP_TO_AWAKE: + /* + * This state means that both the host and the BRF chip + * have simultaneously sent a wake-up-indication packet. + * Traditionaly, in this case, receiving a wake-up-indication + * was enough and an additional wake-up-ack wasn't needed. + * This has changed with the BRF6350, which does require an + * explicit wake-up-ack. Other BRF versions, which do not + * require an explicit ack here, do accept it, thus it is + * perfectly safe to always send one. + */ + BT_DBG("dual wake-up-indication"); + /* deliberate fall-through - do not add break */ + case HCILL_ASLEEP: + /* acknowledge device wake up */ + if (send_hcill_cmd(HCILL_WAKE_UP_ACK, hu) < 0) { + BT_ERR("cannot acknowledge device wake up"); + goto out; + } + break; + default: + /* any other state is illegal */ + BT_ERR("received HCILL_WAKE_UP_IND in state %ld", ll->hcill_state); + break; + } + + /* send pending packets and change state to HCILL_AWAKE */ + __ll_do_awake(ll); + +out: + spin_unlock_irqrestore(&ll->hcill_lock, flags); + + /* actually send the packets */ + hci_uart_tx_wakeup(hu); +} + +/* + * Called upon a sleep-indication from the device + */ +static void ll_device_want_to_sleep(struct hci_uart *hu) +{ + unsigned long flags; + struct ll_struct *ll = hu->priv; + + BT_DBG("hu %p", hu); + + /* lock hcill state */ + spin_lock_irqsave(&ll->hcill_lock, flags); + + /* sanity check */ + if (ll->hcill_state != HCILL_AWAKE) + BT_ERR("ERR: HCILL_GO_TO_SLEEP_IND in state %ld", ll->hcill_state); + + /* acknowledge device sleep */ + if (send_hcill_cmd(HCILL_GO_TO_SLEEP_ACK, hu) < 0) { + BT_ERR("cannot acknowledge device sleep"); + goto out; + } + + /* update state */ + ll->hcill_state = HCILL_ASLEEP; + +out: + spin_unlock_irqrestore(&ll->hcill_lock, flags); + + /* actually send the sleep ack packet */ + hci_uart_tx_wakeup(hu); +} + +/* + * Called upon wake-up-acknowledgement from the device + */ +static void ll_device_woke_up(struct hci_uart *hu) +{ + unsigned long flags; + struct ll_struct *ll = hu->priv; + + BT_DBG("hu %p", hu); + + /* lock hcill state */ + spin_lock_irqsave(&ll->hcill_lock, flags); + + /* sanity check */ + if (ll->hcill_state != HCILL_ASLEEP_TO_AWAKE) + BT_ERR("received HCILL_WAKE_UP_ACK in state %ld", ll->hcill_state); + + /* send pending packets and change state to HCILL_AWAKE */ + __ll_do_awake(ll); + + spin_unlock_irqrestore(&ll->hcill_lock, flags); + + /* actually send the packets */ + hci_uart_tx_wakeup(hu); +} + +/* Enqueue frame for transmittion (padding, crc, etc) */ +/* may be called from two simultaneous tasklets */ +static int ll_enqueue(struct hci_uart *hu, struct sk_buff *skb) +{ + unsigned long flags = 0; + struct ll_struct *ll = hu->priv; + + BT_DBG("hu %p skb %p", hu, skb); + + /* Prepend skb with frame type */ + memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); + + /* lock hcill state */ + spin_lock_irqsave(&ll->hcill_lock, flags); + + /* act according to current state */ + switch (ll->hcill_state) { + case HCILL_AWAKE: + BT_DBG("device awake, sending normally"); + skb_queue_tail(&ll->txq, skb); + break; + case HCILL_ASLEEP: + BT_DBG("device asleep, waking up and queueing packet"); + /* save packet for later */ + skb_queue_tail(&ll->tx_wait_q, skb); + /* awake device */ + if (send_hcill_cmd(HCILL_WAKE_UP_IND, hu) < 0) { + BT_ERR("cannot wake up device"); + break; + } + ll->hcill_state = HCILL_ASLEEP_TO_AWAKE; + break; + case HCILL_ASLEEP_TO_AWAKE: + BT_DBG("device waking up, queueing packet"); + /* transient state; just keep packet for later */ + skb_queue_tail(&ll->tx_wait_q, skb); + break; + default: + BT_ERR("illegal hcill state: %ld (losing packet)", ll->hcill_state); + kfree_skb(skb); + break; + } + + spin_unlock_irqrestore(&ll->hcill_lock, flags); + + return 0; +} + +static inline int ll_check_data_len(struct ll_struct *ll, int len) +{ + register int room = skb_tailroom(ll->rx_skb); + + BT_DBG("len %d room %d", len, room); + + if (!len) { + hci_recv_frame(ll->rx_skb); + } else if (len > room) { + BT_ERR("Data length is too large"); + kfree_skb(ll->rx_skb); + } else { + ll->rx_state = HCILL_W4_DATA; + ll->rx_count = len; + return len; + } + + ll->rx_state = HCILL_W4_PACKET_TYPE; + ll->rx_skb = NULL; + ll->rx_count = 0; + + return 0; +} + +/* Recv data */ +static int ll_recv(struct hci_uart *hu, void *data, int count) +{ + struct ll_struct *ll = hu->priv; + register char *ptr; + struct hci_event_hdr *eh; + struct hci_acl_hdr *ah; + struct hci_sco_hdr *sh; + register int len, type, dlen; + + BT_DBG("hu %p count %d rx_state %ld rx_count %ld", hu, count, ll->rx_state, ll->rx_count); + + ptr = data; + while (count) { + if (ll->rx_count) { + len = min_t(unsigned int, ll->rx_count, count); + memcpy(skb_put(ll->rx_skb, len), ptr, len); + ll->rx_count -= len; count -= len; ptr += len; + + if (ll->rx_count) + continue; + + switch (ll->rx_state) { + case HCILL_W4_DATA: + BT_DBG("Complete data"); + hci_recv_frame(ll->rx_skb); + + ll->rx_state = HCILL_W4_PACKET_TYPE; + ll->rx_skb = NULL; + continue; + + case HCILL_W4_EVENT_HDR: + eh = (struct hci_event_hdr *) ll->rx_skb->data; + + BT_DBG("Event header: evt 0x%2.2x plen %d", eh->evt, eh->plen); + + ll_check_data_len(ll, eh->plen); + continue; + + case HCILL_W4_ACL_HDR: + ah = (struct hci_acl_hdr *) ll->rx_skb->data; + dlen = __le16_to_cpu(ah->dlen); + + BT_DBG("ACL header: dlen %d", dlen); + + ll_check_data_len(ll, dlen); + continue; + + case HCILL_W4_SCO_HDR: + sh = (struct hci_sco_hdr *) ll->rx_skb->data; + + BT_DBG("SCO header: dlen %d", sh->dlen); + + ll_check_data_len(ll, sh->dlen); + continue; + } + } + + /* HCILL_W4_PACKET_TYPE */ + switch (*ptr) { + case HCI_EVENT_PKT: + BT_DBG("Event packet"); + ll->rx_state = HCILL_W4_EVENT_HDR; + ll->rx_count = HCI_EVENT_HDR_SIZE; + type = HCI_EVENT_PKT; + break; + + case HCI_ACLDATA_PKT: + BT_DBG("ACL packet"); + ll->rx_state = HCILL_W4_ACL_HDR; + ll->rx_count = HCI_ACL_HDR_SIZE; + type = HCI_ACLDATA_PKT; + break; + + case HCI_SCODATA_PKT: + BT_DBG("SCO packet"); + ll->rx_state = HCILL_W4_SCO_HDR; + ll->rx_count = HCI_SCO_HDR_SIZE; + type = HCI_SCODATA_PKT; + break; + + /* HCILL signals */ + case HCILL_GO_TO_SLEEP_IND: + BT_DBG("HCILL_GO_TO_SLEEP_IND packet"); + ll_device_want_to_sleep(hu); + ptr++; count--; + continue; + + case HCILL_GO_TO_SLEEP_ACK: + /* shouldn't happen */ + BT_ERR("received HCILL_GO_TO_SLEEP_ACK (in state %ld)", ll->hcill_state); + ptr++; count--; + continue; + + case HCILL_WAKE_UP_IND: + BT_DBG("HCILL_WAKE_UP_IND packet"); + ll_device_want_to_wakeup(hu); + ptr++; count--; + continue; + + case HCILL_WAKE_UP_ACK: + BT_DBG("HCILL_WAKE_UP_ACK packet"); + ll_device_woke_up(hu); + ptr++; count--; + continue; + + default: + BT_ERR("Unknown HCI packet type %2.2x", (__u8)*ptr); + hu->hdev->stat.err_rx++; + ptr++; count--; + continue; + }; + + ptr++; count--; + + /* Allocate packet */ + ll->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC); + if (!ll->rx_skb) { + BT_ERR("Can't allocate mem for new packet"); + ll->rx_state = HCILL_W4_PACKET_TYPE; + ll->rx_count = 0; + return 0; + } + + ll->rx_skb->dev = (void *) hu->hdev; + bt_cb(ll->rx_skb)->pkt_type = type; + } + + return count; +} + +static struct sk_buff *ll_dequeue(struct hci_uart *hu) +{ + struct ll_struct *ll = hu->priv; + return skb_dequeue(&ll->txq); +} + +static struct hci_uart_proto llp = { + .id = HCI_UART_LL, + .open = ll_open, + .close = ll_close, + .recv = ll_recv, + .enqueue = ll_enqueue, + .dequeue = ll_dequeue, + .flush = ll_flush, +}; + +int ll_init(void) +{ + int err = hci_uart_register_proto(&llp); + + if (!err) + BT_INFO("HCILL protocol initialized"); + else + BT_ERR("HCILL protocol registration failed"); + + return err; +} + +int ll_deinit(void) +{ + return hci_uart_unregister_proto(&llp); +} diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h new file mode 100644 index 0000000..50113db --- /dev/null +++ b/drivers/bluetooth/hci_uart.h @@ -0,0 +1,93 @@ +/* + * + * Bluetooth HCI UART driver + * + * Copyright (C) 2000-2001 Qualcomm Incorporated + * Copyright (C) 2002-2003 Maxim Krasnyansky + * Copyright (C) 2004-2005 Marcel Holtmann + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#ifndef N_HCI +#define N_HCI 15 +#endif + +/* Ioctls */ +#define HCIUARTSETPROTO _IOW('U', 200, int) +#define HCIUARTGETPROTO _IOR('U', 201, int) +#define HCIUARTGETDEVICE _IOR('U', 202, int) + +/* UART protocols */ +#define HCI_UART_MAX_PROTO 5 + +#define HCI_UART_H4 0 +#define HCI_UART_BCSP 1 +#define HCI_UART_3WIRE 2 +#define HCI_UART_H4DS 3 +#define HCI_UART_LL 4 + +struct hci_uart; + +struct hci_uart_proto { + unsigned int id; + int (*open)(struct hci_uart *hu); + int (*close)(struct hci_uart *hu); + int (*flush)(struct hci_uart *hu); + int (*recv)(struct hci_uart *hu, void *data, int len); + int (*enqueue)(struct hci_uart *hu, struct sk_buff *skb); + struct sk_buff *(*dequeue)(struct hci_uart *hu); +}; + +struct hci_uart { + struct tty_struct *tty; + struct hci_dev *hdev; + unsigned long flags; + + struct hci_uart_proto *proto; + void *priv; + + struct sk_buff *tx_skb; + unsigned long tx_state; + spinlock_t rx_lock; +}; + +/* HCI_UART flag bits */ +#define HCI_UART_PROTO_SET 0 + +/* TX states */ +#define HCI_UART_SENDING 1 +#define HCI_UART_TX_WAKEUP 2 + +int hci_uart_register_proto(struct hci_uart_proto *p); +int hci_uart_unregister_proto(struct hci_uart_proto *p); +int hci_uart_tx_wakeup(struct hci_uart *hu); + +#ifdef CONFIG_BT_HCIUART_H4 +int h4_init(void); +int h4_deinit(void); +#endif + +#ifdef CONFIG_BT_HCIUART_BCSP +int bcsp_init(void); +int bcsp_deinit(void); +#endif + +#ifdef CONFIG_BT_HCIUART_LL +int ll_init(void); +int ll_deinit(void); +#endif diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c new file mode 100644 index 0000000..bb0aefd --- /dev/null +++ b/drivers/bluetooth/hci_vhci.c @@ -0,0 +1,311 @@ +/* + * + * Bluetooth virtual HCI driver + * + * Copyright (C) 2000-2001 Qualcomm Incorporated + * Copyright (C) 2002-2003 Maxim Krasnyansky + * Copyright (C) 2004-2006 Marcel Holtmann + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#define VERSION "1.3" + +struct vhci_data { + struct hci_dev *hdev; + + unsigned long flags; + + wait_queue_head_t read_wait; + struct sk_buff_head readq; +}; + +static int vhci_open_dev(struct hci_dev *hdev) +{ + set_bit(HCI_RUNNING, &hdev->flags); + + return 0; +} + +static int vhci_close_dev(struct hci_dev *hdev) +{ + struct vhci_data *data = hdev->driver_data; + + if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) + return 0; + + skb_queue_purge(&data->readq); + + return 0; +} + +static int vhci_flush(struct hci_dev *hdev) +{ + struct vhci_data *data = hdev->driver_data; + + skb_queue_purge(&data->readq); + + return 0; +} + +static int vhci_send_frame(struct sk_buff *skb) +{ + struct hci_dev* hdev = (struct hci_dev *) skb->dev; + struct vhci_data *data; + + if (!hdev) { + BT_ERR("Frame for unknown HCI device (hdev=NULL)"); + return -ENODEV; + } + + if (!test_bit(HCI_RUNNING, &hdev->flags)) + return -EBUSY; + + data = hdev->driver_data; + + memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); + skb_queue_tail(&data->readq, skb); + + wake_up_interruptible(&data->read_wait); + + return 0; +} + +static void vhci_destruct(struct hci_dev *hdev) +{ + kfree(hdev->driver_data); +} + +static inline ssize_t vhci_get_user(struct vhci_data *data, + const char __user *buf, size_t count) +{ + struct sk_buff *skb; + + if (count > HCI_MAX_FRAME_SIZE) + return -EINVAL; + + skb = bt_skb_alloc(count, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + if (copy_from_user(skb_put(skb, count), buf, count)) { + kfree_skb(skb); + return -EFAULT; + } + + skb->dev = (void *) data->hdev; + bt_cb(skb)->pkt_type = *((__u8 *) skb->data); + skb_pull(skb, 1); + + hci_recv_frame(skb); + + return count; +} + +static inline ssize_t vhci_put_user(struct vhci_data *data, + struct sk_buff *skb, char __user *buf, int count) +{ + char __user *ptr = buf; + int len, total = 0; + + len = min_t(unsigned int, skb->len, count); + + if (copy_to_user(ptr, skb->data, len)) + return -EFAULT; + + total += len; + + data->hdev->stat.byte_tx += len; + + switch (bt_cb(skb)->pkt_type) { + case HCI_COMMAND_PKT: + data->hdev->stat.cmd_tx++; + break; + + case HCI_ACLDATA_PKT: + data->hdev->stat.acl_tx++; + break; + + case HCI_SCODATA_PKT: + data->hdev->stat.cmd_tx++; + break; + }; + + return total; +} + +static ssize_t vhci_read(struct file *file, + char __user *buf, size_t count, loff_t *pos) +{ + struct vhci_data *data = file->private_data; + struct sk_buff *skb; + ssize_t ret = 0; + + while (count) { + skb = skb_dequeue(&data->readq); + if (skb) { + ret = vhci_put_user(data, skb, buf, count); + if (ret < 0) + skb_queue_head(&data->readq, skb); + else + kfree_skb(skb); + break; + } + + if (file->f_flags & O_NONBLOCK) { + ret = -EAGAIN; + break; + } + + ret = wait_event_interruptible(data->read_wait, + !skb_queue_empty(&data->readq)); + if (ret < 0) + break; + } + + return ret; +} + +static ssize_t vhci_write(struct file *file, + const char __user *buf, size_t count, loff_t *pos) +{ + struct vhci_data *data = file->private_data; + + return vhci_get_user(data, buf, count); +} + +static unsigned int vhci_poll(struct file *file, poll_table *wait) +{ + struct vhci_data *data = file->private_data; + + poll_wait(file, &data->read_wait, wait); + + if (!skb_queue_empty(&data->readq)) + return POLLIN | POLLRDNORM; + + return POLLOUT | POLLWRNORM; +} + +static int vhci_open(struct inode *inode, struct file *file) +{ + struct vhci_data *data; + struct hci_dev *hdev; + + data = kzalloc(sizeof(struct vhci_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + skb_queue_head_init(&data->readq); + init_waitqueue_head(&data->read_wait); + + hdev = hci_alloc_dev(); + if (!hdev) { + kfree(data); + return -ENOMEM; + } + + data->hdev = hdev; + + hdev->bus = HCI_VIRTUAL; + hdev->driver_data = data; + + hdev->open = vhci_open_dev; + hdev->close = vhci_close_dev; + hdev->flush = vhci_flush; + hdev->send = vhci_send_frame; + hdev->destruct = vhci_destruct; + + hdev->owner = THIS_MODULE; + + if (hci_register_dev(hdev) < 0) { + BT_ERR("Can't register HCI device"); + kfree(data); + hci_free_dev(hdev); + return -EBUSY; + } + + file->private_data = data; + + return nonseekable_open(inode, file); +} + +static int vhci_release(struct inode *inode, struct file *file) +{ + struct vhci_data *data = file->private_data; + struct hci_dev *hdev = data->hdev; + + if (hci_unregister_dev(hdev) < 0) { + BT_ERR("Can't unregister HCI device %s", hdev->name); + } + + hci_free_dev(hdev); + + file->private_data = NULL; + + return 0; +} + +static const struct file_operations vhci_fops = { + .owner = THIS_MODULE, + .read = vhci_read, + .write = vhci_write, + .poll = vhci_poll, + .open = vhci_open, + .release = vhci_release, +}; + +static struct miscdevice vhci_miscdev= { + .name = "vhci", + .fops = &vhci_fops, + .minor = MISC_DYNAMIC_MINOR, +}; + +static int __init vhci_init(void) +{ + BT_INFO("Virtual HCI driver ver %s", VERSION); + + return misc_register(&vhci_miscdev); +} + +static void __exit vhci_exit(void) +{ + misc_deregister(&vhci_miscdev); +} + +module_init(vhci_init); +module_exit(vhci_exit); + +MODULE_AUTHOR("Marcel Holtmann "); +MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile new file mode 100644 index 0000000..151feb2 --- /dev/null +++ b/drivers/misc/eeprom/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o diff --git a/drivers/misc/eeprom/eeprom_93cx6.c b/drivers/misc/eeprom/eeprom_93cx6.c new file mode 100644 index 0000000..15b1780 --- /dev/null +++ b/drivers/misc/eeprom/eeprom_93cx6.c @@ -0,0 +1,240 @@ +/* + Copyright (C) 2004 - 2006 rt2x00 SourceForge Project + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: eeprom_93cx6 + Abstract: EEPROM reader routines for 93cx6 chipsets. + Supported chipsets: 93c46 & 93c66. + */ + +#include +#include +#include +#include + +MODULE_AUTHOR("http://rt2x00.serialmonkey.com"); +MODULE_VERSION("1.0"); +MODULE_DESCRIPTION("EEPROM 93cx6 chip driver"); +MODULE_LICENSE("GPL"); + +static inline void eeprom_93cx6_pulse_high(struct eeprom_93cx6 *eeprom) +{ + eeprom->reg_data_clock = 1; + eeprom->register_write(eeprom); + + /* + * Add a short delay for the pulse to work. + * According to the specifications the "maximum minimum" + * time should be 450ns. + */ + ndelay(450); +} + +static inline void eeprom_93cx6_pulse_low(struct eeprom_93cx6 *eeprom) +{ + eeprom->reg_data_clock = 0; + eeprom->register_write(eeprom); + + /* + * Add a short delay for the pulse to work. + * According to the specifications the "maximum minimum" + * time should be 450ns. + */ + ndelay(450); +} + +static void eeprom_93cx6_startup(struct eeprom_93cx6 *eeprom) +{ + /* + * Clear all flags, and enable chip select. + */ + eeprom->register_read(eeprom); + eeprom->reg_data_in = 0; + eeprom->reg_data_out = 0; + eeprom->reg_data_clock = 0; + eeprom->reg_chip_select = 1; + eeprom->register_write(eeprom); + + /* + * kick a pulse. + */ + eeprom_93cx6_pulse_high(eeprom); + eeprom_93cx6_pulse_low(eeprom); +} + +static void eeprom_93cx6_cleanup(struct eeprom_93cx6 *eeprom) +{ + /* + * Clear chip_select and data_in flags. + */ + eeprom->register_read(eeprom); + eeprom->reg_data_in = 0; + eeprom->reg_chip_select = 0; + eeprom->register_write(eeprom); + + /* + * kick a pulse. + */ + eeprom_93cx6_pulse_high(eeprom); + eeprom_93cx6_pulse_low(eeprom); +} + +static void eeprom_93cx6_write_bits(struct eeprom_93cx6 *eeprom, + const u16 data, const u16 count) +{ + unsigned int i; + + eeprom->register_read(eeprom); + + /* + * Clear data flags. + */ + eeprom->reg_data_in = 0; + eeprom->reg_data_out = 0; + + /* + * Start writing all bits. + */ + for (i = count; i > 0; i--) { + /* + * Check if this bit needs to be set. + */ + eeprom->reg_data_in = !!(data & (1 << (i - 1))); + + /* + * Write the bit to the eeprom register. + */ + eeprom->register_write(eeprom); + + /* + * Kick a pulse. + */ + eeprom_93cx6_pulse_high(eeprom); + eeprom_93cx6_pulse_low(eeprom); + } + + eeprom->reg_data_in = 0; + eeprom->register_write(eeprom); +} + +static void eeprom_93cx6_read_bits(struct eeprom_93cx6 *eeprom, + u16 *data, const u16 count) +{ + unsigned int i; + u16 buf = 0; + + eeprom->register_read(eeprom); + + /* + * Clear data flags. + */ + eeprom->reg_data_in = 0; + eeprom->reg_data_out = 0; + + /* + * Start reading all bits. + */ + for (i = count; i > 0; i--) { + eeprom_93cx6_pulse_high(eeprom); + + eeprom->register_read(eeprom); + + /* + * Clear data_in flag. + */ + eeprom->reg_data_in = 0; + + /* + * Read if the bit has been set. + */ + if (eeprom->reg_data_out) + buf |= (1 << (i - 1)); + + eeprom_93cx6_pulse_low(eeprom); + } + + *data = buf; +} + +/** + * eeprom_93cx6_read - Read multiple words from eeprom + * @eeprom: Pointer to eeprom structure + * @word: Word index from where we should start reading + * @data: target pointer where the information will have to be stored + * + * This function will read the eeprom data as host-endian word + * into the given data pointer. + */ +void eeprom_93cx6_read(struct eeprom_93cx6 *eeprom, const u8 word, + u16 *data) +{ + u16 command; + + /* + * Initialize the eeprom register + */ + eeprom_93cx6_startup(eeprom); + + /* + * Select the read opcode and the word to be read. + */ + command = (PCI_EEPROM_READ_OPCODE << eeprom->width) | word; + eeprom_93cx6_write_bits(eeprom, command, + PCI_EEPROM_WIDTH_OPCODE + eeprom->width); + + /* + * Read the requested 16 bits. + */ + eeprom_93cx6_read_bits(eeprom, data, 16); + + /* + * Cleanup eeprom register. + */ + eeprom_93cx6_cleanup(eeprom); +} +EXPORT_SYMBOL_GPL(eeprom_93cx6_read); + +/** + * eeprom_93cx6_multiread - Read multiple words from eeprom + * @eeprom: Pointer to eeprom structure + * @word: Word index from where we should start reading + * @data: target pointer where the information will have to be stored + * @words: Number of words that should be read. + * + * This function will read all requested words from the eeprom, + * this is done by calling eeprom_93cx6_read() multiple times. + * But with the additional change that while the eeprom_93cx6_read + * will return host ordered bytes, this method will return little + * endian words. + */ +void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, const u8 word, + __le16 *data, const u16 words) +{ + unsigned int i; + u16 tmp; + + for (i = 0; i < words; i++) { + tmp = 0; + eeprom_93cx6_read(eeprom, word + i, &tmp); + data[i] = cpu_to_le16(tmp); + } +} +EXPORT_SYMBOL_GPL(eeprom_93cx6_multiread); + diff --git a/drivers/net/Makefile b/drivers/net/Makefile new file mode 100644 index 0000000..c784ef1 --- /dev/null +++ b/drivers/net/Makefile @@ -0,0 +1,6 @@ + +obj-$(CONFIG_B44) += b44.o +obj-$(CONFIG_ATL1) += atlx/ +obj-$(CONFIG_ATL2) += atlx/ +obj-$(CONFIG_ATL1E) += atl1e/ +obj-$(CONFIG_ATL1C) += atl1c/ diff --git a/drivers/net/atl1c/Makefile b/drivers/net/atl1c/Makefile new file mode 100644 index 0000000..c37d966 --- /dev/null +++ b/drivers/net/atl1c/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_ATL1C) += atl1c.o +atl1c-objs := atl1c_main.o atl1c_hw.o atl1c_ethtool.o diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h new file mode 100644 index 0000000..84ae905 --- /dev/null +++ b/drivers/net/atl1c/atl1c.h @@ -0,0 +1,635 @@ +/* + * Copyright(c) 2008 - 2009 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _ATL1C_H_ +#define _ATL1C_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "atl1c_hw.h" + +/* Wake Up Filter Control */ +#define AT_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define AT_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define AT_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define AT_WUFC_MC 0x00000008 /* Multicast Wakeup Enable */ +#define AT_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ + +#define AT_VLAN_TO_TAG(_vlan, _tag) \ + _tag = ((((_vlan) >> 8) & 0xFF) |\ + (((_vlan) & 0xFF) << 8)) + +#define AT_TAG_TO_VLAN(_tag, _vlan) \ + _vlan = ((((_tag) >> 8) & 0xFF) |\ + (((_tag) & 0xFF) << 8)) + +#define SPEED_0 0xffff +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +#define AT_RX_BUF_SIZE (ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN) +#define MAX_JUMBO_FRAME_SIZE (9*1024) +#define MAX_TX_OFFLOAD_THRESH (9*1024) + +#define AT_MAX_RECEIVE_QUEUE 4 +#define AT_DEF_RECEIVE_QUEUE 1 +#define AT_MAX_TRANSMIT_QUEUE 2 + +#define AT_DMA_HI_ADDR_MASK 0xffffffff00000000ULL +#define AT_DMA_LO_ADDR_MASK 0x00000000ffffffffULL + +#define AT_TX_WATCHDOG (5 * HZ) +#define AT_MAX_INT_WORK 5 +#define AT_TWSI_EEPROM_TIMEOUT 100 +#define AT_HW_MAX_IDLE_DELAY 10 +#define AT_SUSPEND_LINK_TIMEOUT 28 + +#define AT_ASPM_L0S_TIMER 6 +#define AT_ASPM_L1_TIMER 12 + +#define ATL1C_PCIE_L0S_L1_DISABLE 0x01 +#define ATL1C_PCIE_PHY_RESET 0x02 + +#define ATL1C_ASPM_L0s_ENABLE 0x0001 +#define ATL1C_ASPM_L1_ENABLE 0x0002 + +#define AT_REGS_LEN (75 * sizeof(u32)) +#define AT_EEPROM_LEN 512 + +#define ATL1C_GET_DESC(R, i, type) (&(((type *)((R)->desc))[i])) +#define ATL1C_RFD_DESC(R, i) ATL1C_GET_DESC(R, i, struct atl1c_rx_free_desc) +#define ATL1C_TPD_DESC(R, i) ATL1C_GET_DESC(R, i, struct atl1c_tpd_desc) +#define ATL1C_RRD_DESC(R, i) ATL1C_GET_DESC(R, i, struct atl1c_recv_ret_status) + +/* tpd word 1 bit 0:7 General Checksum task offload */ +#define TPD_L4HDR_OFFSET_MASK 0x00FF +#define TPD_L4HDR_OFFSET_SHIFT 0 + +/* tpd word 1 bit 0:7 Large Send task offload (IPv4/IPV6) */ +#define TPD_TCPHDR_OFFSET_MASK 0x00FF +#define TPD_TCPHDR_OFFSET_SHIFT 0 + +/* tpd word 1 bit 0:7 Custom Checksum task offload */ +#define TPD_PLOADOFFSET_MASK 0x00FF +#define TPD_PLOADOFFSET_SHIFT 0 + +/* tpd word 1 bit 8:17 */ +#define TPD_CCSUM_EN_MASK 0x0001 +#define TPD_CCSUM_EN_SHIFT 8 +#define TPD_IP_CSUM_MASK 0x0001 +#define TPD_IP_CSUM_SHIFT 9 +#define TPD_TCP_CSUM_MASK 0x0001 +#define TPD_TCP_CSUM_SHIFT 10 +#define TPD_UDP_CSUM_MASK 0x0001 +#define TPD_UDP_CSUM_SHIFT 11 +#define TPD_LSO_EN_MASK 0x0001 /* TCP Large Send Offload */ +#define TPD_LSO_EN_SHIFT 12 +#define TPD_LSO_VER_MASK 0x0001 +#define TPD_LSO_VER_SHIFT 13 /* 0 : ipv4; 1 : ipv4/ipv6 */ +#define TPD_CON_VTAG_MASK 0x0001 +#define TPD_CON_VTAG_SHIFT 14 +#define TPD_INS_VTAG_MASK 0x0001 +#define TPD_INS_VTAG_SHIFT 15 +#define TPD_IPV4_PACKET_MASK 0x0001 /* valid when LSO VER is 1 */ +#define TPD_IPV4_PACKET_SHIFT 16 +#define TPD_ETH_TYPE_MASK 0x0001 +#define TPD_ETH_TYPE_SHIFT 17 /* 0 : 802.3 frame; 1 : Ethernet */ + +/* tpd word 18:25 Custom Checksum task offload */ +#define TPD_CCSUM_OFFSET_MASK 0x00FF +#define TPD_CCSUM_OFFSET_SHIFT 18 +#define TPD_CCSUM_EPAD_MASK 0x0001 +#define TPD_CCSUM_EPAD_SHIFT 30 + +/* tpd word 18:30 Large Send task offload (IPv4/IPV6) */ +#define TPD_MSS_MASK 0x1FFF +#define TPD_MSS_SHIFT 18 + +#define TPD_EOP_MASK 0x0001 +#define TPD_EOP_SHIFT 31 + +struct atl1c_tpd_desc { + __le16 buffer_len; /* include 4-byte CRC */ + __le16 vlan_tag; + __le32 word1; + __le64 buffer_addr; +}; + +struct atl1c_tpd_ext_desc { + u32 reservd_0; + __le32 word1; + __le32 pkt_len; + u32 reservd_1; +}; +/* rrs word 0 bit 0:31 */ +#define RRS_RX_CSUM_MASK 0xFFFF +#define RRS_RX_CSUM_SHIFT 0 +#define RRS_RX_RFD_CNT_MASK 0x000F +#define RRS_RX_RFD_CNT_SHIFT 16 +#define RRS_RX_RFD_INDEX_MASK 0x0FFF +#define RRS_RX_RFD_INDEX_SHIFT 20 + +/* rrs flag bit 0:16 */ +#define RRS_HEAD_LEN_MASK 0x00FF +#define RRS_HEAD_LEN_SHIFT 0 +#define RRS_HDS_TYPE_MASK 0x0003 +#define RRS_HDS_TYPE_SHIFT 8 +#define RRS_CPU_NUM_MASK 0x0003 +#define RRS_CPU_NUM_SHIFT 10 +#define RRS_HASH_FLG_MASK 0x000F +#define RRS_HASH_FLG_SHIFT 12 + +#define RRS_HDS_TYPE_HEAD 1 +#define RRS_HDS_TYPE_DATA 2 + +#define RRS_IS_NO_HDS_TYPE(flag) \ + ((((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK) == 0) + +#define RRS_IS_HDS_HEAD(flag) \ + ((((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK) == \ + RRS_HDS_TYPE_HEAD) + +#define RRS_IS_HDS_DATA(flag) \ + ((((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK) == \ + RRS_HDS_TYPE_DATA) + +/* rrs word 3 bit 0:31 */ +#define RRS_PKT_SIZE_MASK 0x3FFF +#define RRS_PKT_SIZE_SHIFT 0 +#define RRS_ERR_L4_CSUM_MASK 0x0001 +#define RRS_ERR_L4_CSUM_SHIFT 14 +#define RRS_ERR_IP_CSUM_MASK 0x0001 +#define RRS_ERR_IP_CSUM_SHIFT 15 +#define RRS_VLAN_INS_MASK 0x0001 +#define RRS_VLAN_INS_SHIFT 16 +#define RRS_PROT_ID_MASK 0x0007 +#define RRS_PROT_ID_SHIFT 17 +#define RRS_RX_ERR_SUM_MASK 0x0001 +#define RRS_RX_ERR_SUM_SHIFT 20 +#define RRS_RX_ERR_CRC_MASK 0x0001 +#define RRS_RX_ERR_CRC_SHIFT 21 +#define RRS_RX_ERR_FAE_MASK 0x0001 +#define RRS_RX_ERR_FAE_SHIFT 22 +#define RRS_RX_ERR_TRUNC_MASK 0x0001 +#define RRS_RX_ERR_TRUNC_SHIFT 23 +#define RRS_RX_ERR_RUNC_MASK 0x0001 +#define RRS_RX_ERR_RUNC_SHIFT 24 +#define RRS_RX_ERR_ICMP_MASK 0x0001 +#define RRS_RX_ERR_ICMP_SHIFT 25 +#define RRS_PACKET_BCAST_MASK 0x0001 +#define RRS_PACKET_BCAST_SHIFT 26 +#define RRS_PACKET_MCAST_MASK 0x0001 +#define RRS_PACKET_MCAST_SHIFT 27 +#define RRS_PACKET_TYPE_MASK 0x0001 +#define RRS_PACKET_TYPE_SHIFT 28 +#define RRS_FIFO_FULL_MASK 0x0001 +#define RRS_FIFO_FULL_SHIFT 29 +#define RRS_802_3_LEN_ERR_MASK 0x0001 +#define RRS_802_3_LEN_ERR_SHIFT 30 +#define RRS_RXD_UPDATED_MASK 0x0001 +#define RRS_RXD_UPDATED_SHIFT 31 + +#define RRS_ERR_L4_CSUM 0x00004000 +#define RRS_ERR_IP_CSUM 0x00008000 +#define RRS_VLAN_INS 0x00010000 +#define RRS_RX_ERR_SUM 0x00100000 +#define RRS_RX_ERR_CRC 0x00200000 +#define RRS_802_3_LEN_ERR 0x40000000 +#define RRS_RXD_UPDATED 0x80000000 + +#define RRS_PACKET_TYPE_802_3 1 +#define RRS_PACKET_TYPE_ETH 0 +#define RRS_PACKET_IS_ETH(word) \ + ((((word) >> RRS_PACKET_TYPE_SHIFT) & RRS_PACKET_TYPE_MASK) == \ + RRS_PACKET_TYPE_ETH) +#define RRS_RXD_IS_VALID(word) \ + ((((word) >> RRS_RXD_UPDATED_SHIFT) & RRS_RXD_UPDATED_MASK) == 1) + +#define RRS_PACKET_PROT_IS_IPV4_ONLY(word) \ + ((((word) >> RRS_PROT_ID_SHIFT) & RRS_PROT_ID_MASK) == 1) +#define RRS_PACKET_PROT_IS_IPV6_ONLY(word) \ + ((((word) >> RRS_PROT_ID_SHIFT) & RRS_PROT_ID_MASK) == 6) + +struct atl1c_recv_ret_status { + __le32 word0; + __le32 rss_hash; + __le16 vlan_tag; + __le16 flag; + __le32 word3; +}; + +/* RFD desciptor */ +struct atl1c_rx_free_desc { + __le64 buffer_addr; +}; + +/* DMA Order Settings */ +enum atl1c_dma_order { + atl1c_dma_ord_in = 1, + atl1c_dma_ord_enh = 2, + atl1c_dma_ord_out = 4 +}; + +enum atl1c_dma_rcb { + atl1c_rcb_64 = 0, + atl1c_rcb_128 = 1 +}; + +enum atl1c_mac_speed { + atl1c_mac_speed_0 = 0, + atl1c_mac_speed_10_100 = 1, + atl1c_mac_speed_1000 = 2 +}; + +enum atl1c_dma_req_block { + atl1c_dma_req_128 = 0, + atl1c_dma_req_256 = 1, + atl1c_dma_req_512 = 2, + atl1c_dma_req_1024 = 3, + atl1c_dma_req_2048 = 4, + atl1c_dma_req_4096 = 5 +}; + +enum atl1c_rss_mode { + atl1c_rss_mode_disable = 0, + atl1c_rss_sig_que = 1, + atl1c_rss_mul_que_sig_int = 2, + atl1c_rss_mul_que_mul_int = 4, +}; + +enum atl1c_rss_type { + atl1c_rss_disable = 0, + atl1c_rss_ipv4 = 1, + atl1c_rss_ipv4_tcp = 2, + atl1c_rss_ipv6 = 4, + atl1c_rss_ipv6_tcp = 8 +}; + +enum atl1c_nic_type { + athr_l1c = 0, + athr_l2c = 1, + athr_l2c_b, + athr_l2c_b2, + athr_l1d, +}; + +enum atl1c_trans_queue { + atl1c_trans_normal = 0, + atl1c_trans_high = 1 +}; + +struct atl1c_hw_stats { + /* rx */ + unsigned long rx_ok; /* The number of good packet received. */ + unsigned long rx_bcast; /* The number of good broadcast packet received. */ + unsigned long rx_mcast; /* The number of good multicast packet received. */ + unsigned long rx_pause; /* The number of Pause packet received. */ + unsigned long rx_ctrl; /* The number of Control packet received other than Pause frame. */ + unsigned long rx_fcs_err; /* The number of packets with bad FCS. */ + unsigned long rx_len_err; /* The number of packets with mismatch of length field and actual size. */ + unsigned long rx_byte_cnt; /* The number of bytes of good packet received. FCS is NOT included. */ + unsigned long rx_runt; /* The number of packets received that are less than 64 byte long and with good FCS. */ + unsigned long rx_frag; /* The number of packets received that are less than 64 byte long and with bad FCS. */ + unsigned long rx_sz_64; /* The number of good and bad packets received that are 64 byte long. */ + unsigned long rx_sz_65_127; /* The number of good and bad packets received that are between 65 and 127-byte long. */ + unsigned long rx_sz_128_255; /* The number of good and bad packets received that are between 128 and 255-byte long. */ + unsigned long rx_sz_256_511; /* The number of good and bad packets received that are between 256 and 511-byte long. */ + unsigned long rx_sz_512_1023; /* The number of good and bad packets received that are between 512 and 1023-byte long. */ + unsigned long rx_sz_1024_1518; /* The number of good and bad packets received that are between 1024 and 1518-byte long. */ + unsigned long rx_sz_1519_max; /* The number of good and bad packets received that are between 1519-byte and MTU. */ + unsigned long rx_sz_ov; /* The number of good and bad packets received that are more than MTU size truncated by Selene. */ + unsigned long rx_rxf_ov; /* The number of frame dropped due to occurrence of RX FIFO overflow. */ + unsigned long rx_rrd_ov; /* The number of frame dropped due to occurrence of RRD overflow. */ + unsigned long rx_align_err; /* Alignment Error */ + unsigned long rx_bcast_byte_cnt; /* The byte count of broadcast packet received, excluding FCS. */ + unsigned long rx_mcast_byte_cnt; /* The byte count of multicast packet received, excluding FCS. */ + unsigned long rx_err_addr; /* The number of packets dropped due to address filtering. */ + + /* tx */ + unsigned long tx_ok; /* The number of good packet transmitted. */ + unsigned long tx_bcast; /* The number of good broadcast packet transmitted. */ + unsigned long tx_mcast; /* The number of good multicast packet transmitted. */ + unsigned long tx_pause; /* The number of Pause packet transmitted. */ + unsigned long tx_exc_defer; /* The number of packets transmitted with excessive deferral. */ + unsigned long tx_ctrl; /* The number of packets transmitted is a control frame, excluding Pause frame. */ + unsigned long tx_defer; /* The number of packets transmitted that is deferred. */ + unsigned long tx_byte_cnt; /* The number of bytes of data transmitted. FCS is NOT included. */ + unsigned long tx_sz_64; /* The number of good and bad packets transmitted that are 64 byte long. */ + unsigned long tx_sz_65_127; /* The number of good and bad packets transmitted that are between 65 and 127-byte long. */ + unsigned long tx_sz_128_255; /* The number of good and bad packets transmitted that are between 128 and 255-byte long. */ + unsigned long tx_sz_256_511; /* The number of good and bad packets transmitted that are between 256 and 511-byte long. */ + unsigned long tx_sz_512_1023; /* The number of good and bad packets transmitted that are between 512 and 1023-byte long. */ + unsigned long tx_sz_1024_1518; /* The number of good and bad packets transmitted that are between 1024 and 1518-byte long. */ + unsigned long tx_sz_1519_max; /* The number of good and bad packets transmitted that are between 1519-byte and MTU. */ + unsigned long tx_1_col; /* The number of packets subsequently transmitted successfully with a single prior collision. */ + unsigned long tx_2_col; /* The number of packets subsequently transmitted successfully with multiple prior collisions. */ + unsigned long tx_late_col; /* The number of packets transmitted with late collisions. */ + unsigned long tx_abort_col; /* The number of transmit packets aborted due to excessive collisions. */ + unsigned long tx_underrun; /* The number of transmit packets aborted due to transmit FIFO underrun, or TRD FIFO underrun */ + unsigned long tx_rd_eop; /* The number of times that read beyond the EOP into the next frame area when TRD was not written timely */ + unsigned long tx_len_err; /* The number of transmit packets with length field does NOT match the actual frame size. */ + unsigned long tx_trunc; /* The number of transmit packets truncated due to size exceeding MTU. */ + unsigned long tx_bcast_byte; /* The byte count of broadcast packet transmitted, excluding FCS. */ + unsigned long tx_mcast_byte; /* The byte count of multicast packet transmitted, excluding FCS. */ +}; + +struct atl1c_hw { + u8 __iomem *hw_addr; /* inner register address */ + struct atl1c_adapter *adapter; + enum atl1c_nic_type nic_type; + enum atl1c_dma_order dma_order; + enum atl1c_dma_rcb rcb_value; + enum atl1c_dma_req_block dmar_block; + enum atl1c_dma_req_block dmaw_block; + + u16 device_id; + u16 vendor_id; + u16 subsystem_id; + u16 subsystem_vendor_id; + u8 revision_id; + + u32 intr_mask; + u8 dmaw_dly_cnt; + u8 dmar_dly_cnt; + + u8 preamble_len; + u16 max_frame_size; + u16 min_frame_size; + + enum atl1c_mac_speed mac_speed; + bool mac_duplex; + bool hibernate; + u16 media_type; +#define MEDIA_TYPE_AUTO_SENSOR 0 +#define MEDIA_TYPE_100M_FULL 1 +#define MEDIA_TYPE_100M_HALF 2 +#define MEDIA_TYPE_10M_FULL 3 +#define MEDIA_TYPE_10M_HALF 4 + + u16 autoneg_advertised; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg; + + u16 tx_imt; /* TX Interrupt Moderator timer ( 2us resolution) */ + u16 rx_imt; /* RX Interrupt Moderator timer ( 2us resolution) */ + u16 ict; /* Interrupt Clear timer (2us resolution) */ + u16 ctrl_flags; +#define ATL1C_INTR_CLEAR_ON_READ 0x0001 +#define ATL1C_INTR_MODRT_ENABLE 0x0002 +#define ATL1C_CMB_ENABLE 0x0004 +#define ATL1C_SMB_ENABLE 0x0010 +#define ATL1C_TXQ_MODE_ENHANCE 0x0020 +#define ATL1C_RX_IPV6_CHKSUM 0x0040 +#define ATL1C_ASPM_L0S_SUPPORT 0x0080 +#define ATL1C_ASPM_L1_SUPPORT 0x0100 +#define ATL1C_ASPM_CTRL_MON 0x0200 +#define ATL1C_HIB_DISABLE 0x0400 +#define ATL1C_APS_MODE_ENABLE 0x0800 +#define ATL1C_LINK_EXT_SYNC 0x1000 +#define ATL1C_CLK_GATING_EN 0x2000 +#define ATL1C_FPGA_VERSION 0x8000 + u16 link_cap_flags; +#define ATL1C_LINK_CAP_1000M 0x0001 + u16 cmb_tpd; + u16 cmb_rrd; + u16 cmb_rx_timer; /* 2us resolution */ + u16 cmb_tx_timer; + u32 smb_timer; + + u16 rrd_thresh; /* Threshold of number of RRD produced to trigger + interrupt request */ + u16 tpd_thresh; + u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned burst. */ + u8 rfd_burst; + enum atl1c_rss_type rss_type; + enum atl1c_rss_mode rss_mode; + u8 rss_hash_bits; + u32 base_cpu; + u32 indirect_tab; + u8 mac_addr[ETH_ALEN]; + u8 perm_mac_addr[ETH_ALEN]; + + bool phy_configured; + bool re_autoneg; + bool emi_ca; +}; + +/* + * atl1c_ring_header represents a single, contiguous block of DMA space + * mapped for the three descriptor rings (tpd, rfd, rrd) and the two + * message blocks (cmb, smb) described below + */ +struct atl1c_ring_header { + void *desc; /* virtual address */ + dma_addr_t dma; /* physical address*/ + unsigned int size; /* length in bytes */ +}; + +/* + * atl1c_buffer is wrapper around a pointer to a socket buffer + * so a DMA handle can be stored along with the skb + */ +struct atl1c_buffer { + struct sk_buff *skb; /* socket buffer */ + u16 length; /* rx buffer length */ + u16 flags; /* information of buffer */ +#define ATL1C_BUFFER_FREE 0x0001 +#define ATL1C_BUFFER_BUSY 0x0002 +#define ATL1C_BUFFER_STATE_MASK 0x0003 + +#define ATL1C_PCIMAP_SINGLE 0x0004 +#define ATL1C_PCIMAP_PAGE 0x0008 +#define ATL1C_PCIMAP_TYPE_MASK 0x000C + +#define ATL1C_PCIMAP_TODEVICE 0x0010 +#define ATL1C_PCIMAP_FROMDEVICE 0x0020 +#define ATL1C_PCIMAP_DIRECTION_MASK 0x0030 + dma_addr_t dma; +}; + +#define ATL1C_SET_BUFFER_STATE(buff, state) do { \ + ((buff)->flags) &= ~ATL1C_BUFFER_STATE_MASK; \ + ((buff)->flags) |= (state); \ + } while (0) + +#define ATL1C_SET_PCIMAP_TYPE(buff, type, direction) do { \ + ((buff)->flags) &= ~ATL1C_PCIMAP_TYPE_MASK; \ + ((buff)->flags) |= (type); \ + ((buff)->flags) &= ~ATL1C_PCIMAP_DIRECTION_MASK; \ + ((buff)->flags) |= (direction); \ + } while (0) + +/* transimit packet descriptor (tpd) ring */ +struct atl1c_tpd_ring { + void *desc; /* descriptor ring virtual address */ + dma_addr_t dma; /* descriptor ring physical address */ + u16 size; /* descriptor ring length in bytes */ + u16 count; /* number of descriptors in the ring */ + u16 next_to_use; /* this is protectd by adapter->tx_lock */ + atomic_t next_to_clean; + struct atl1c_buffer *buffer_info; +}; + +/* receive free descriptor (rfd) ring */ +struct atl1c_rfd_ring { + void *desc; /* descriptor ring virtual address */ + dma_addr_t dma; /* descriptor ring physical address */ + u16 size; /* descriptor ring length in bytes */ + u16 count; /* number of descriptors in the ring */ + u16 next_to_use; + u16 next_to_clean; + struct atl1c_buffer *buffer_info; +}; + +/* receive return desciptor (rrd) ring */ +struct atl1c_rrd_ring { + void *desc; /* descriptor ring virtual address */ + dma_addr_t dma; /* descriptor ring physical address */ + u16 size; /* descriptor ring length in bytes */ + u16 count; /* number of descriptors in the ring */ + u16 next_to_use; + u16 next_to_clean; +}; + +struct atl1c_cmb { + void *cmb; + dma_addr_t dma; +}; + +struct atl1c_smb { + void *smb; + dma_addr_t dma; +}; + +/* board specific private data structure */ +struct atl1c_adapter { + struct net_device *netdev; + struct pci_dev *pdev; + struct vlan_group *vlgrp; + struct napi_struct napi; + struct atl1c_hw hw; + struct atl1c_hw_stats hw_stats; + struct net_device_stats net_stats; + struct mii_if_info mii; /* MII interface info */ + u16 rx_buffer_len; + + unsigned long flags; +#define __AT_TESTING 0x0001 +#define __AT_RESETTING 0x0002 +#define __AT_DOWN 0x0003 + u8 work_event; +#define ATL1C_WORK_EVENT_RESET 0x01 +#define ATL1C_WORK_EVENT_LINK_CHANGE 0x02 + u32 msg_enable; + + bool have_msi; + u32 wol; + u16 link_speed; + u16 link_duplex; + + spinlock_t mdio_lock; + spinlock_t tx_lock; + atomic_t irq_sem; + + struct work_struct common_task; + struct timer_list watchdog_timer; + struct timer_list phy_config_timer; + + /* All Descriptor memory */ + struct atl1c_ring_header ring_header; + struct atl1c_tpd_ring tpd_ring[AT_MAX_TRANSMIT_QUEUE]; + struct atl1c_rfd_ring rfd_ring[AT_MAX_RECEIVE_QUEUE]; + struct atl1c_rrd_ring rrd_ring[AT_MAX_RECEIVE_QUEUE]; + struct atl1c_cmb cmb; + struct atl1c_smb smb; + int num_rx_queues; + u32 bd_number; /* board number;*/ +}; + +#define AT_WRITE_REG(a, reg, value) ( \ + writel((value), ((a)->hw_addr + reg))) + +#define AT_WRITE_FLUSH(a) (\ + readl((a)->hw_addr)) + +#define AT_READ_REG(a, reg, pdata) do { \ + if (unlikely((a)->hibernate)) { \ + readl((a)->hw_addr + reg); \ + *(u32 *)pdata = readl((a)->hw_addr + reg); \ + } else { \ + *(u32 *)pdata = readl((a)->hw_addr + reg); \ + } \ + } while (0) + +#define AT_WRITE_REGB(a, reg, value) (\ + writeb((value), ((a)->hw_addr + reg))) + +#define AT_READ_REGB(a, reg) (\ + readb((a)->hw_addr + reg)) + +#define AT_WRITE_REGW(a, reg, value) (\ + writew((value), ((a)->hw_addr + reg))) + +#define AT_READ_REGW(a, reg) (\ + readw((a)->hw_addr + reg)) + +#define AT_WRITE_REG_ARRAY(a, reg, offset, value) ( \ + writel((value), (((a)->hw_addr + reg) + ((offset) << 2)))) + +#define AT_READ_REG_ARRAY(a, reg, offset) ( \ + readl(((a)->hw_addr + reg) + ((offset) << 2))) + +extern char atl1c_driver_name[]; +extern char atl1c_driver_version[]; + +extern int atl1c_up(struct atl1c_adapter *adapter); +extern void atl1c_down(struct atl1c_adapter *adapter); +extern void atl1c_reinit_locked(struct atl1c_adapter *adapter); +extern s32 atl1c_reset_hw(struct atl1c_hw *hw); +extern void atl1c_set_ethtool_ops(struct net_device *netdev); +#endif /* _ATL1C_H_ */ diff --git a/drivers/net/atl1c/atl1c_ethtool.c b/drivers/net/atl1c/atl1c_ethtool.c new file mode 100644 index 0000000..61a0f2f --- /dev/null +++ b/drivers/net/atl1c/atl1c_ethtool.c @@ -0,0 +1,319 @@ +/* + * Copyright(c) 2009 - 2009 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#include +#include + +#include "atl1c.h" + +static int atl1c_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct atl1c_hw *hw = &adapter->hw; + + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_TP); + if (hw->link_cap_flags & ATL1C_LINK_CAP_1000M) + ecmd->supported |= SUPPORTED_1000baseT_Full; + + ecmd->advertising = ADVERTISED_TP; + + ecmd->advertising |= hw->autoneg_advertised; + + ecmd->port = PORT_TP; + ecmd->phy_address = 0; + ecmd->transceiver = XCVR_INTERNAL; + + if (adapter->link_speed != SPEED_0) { + ecmd->speed = adapter->link_speed; + if (adapter->link_duplex == FULL_DUPLEX) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + } else { + ecmd->speed = -1; + ecmd->duplex = -1; + } + + ecmd->autoneg = AUTONEG_ENABLE; + return 0; +} + +static int atl1c_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct atl1c_hw *hw = &adapter->hw; + u16 autoneg_advertised; + + while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) + msleep(1); + + if (ecmd->autoneg == AUTONEG_ENABLE) { + autoneg_advertised = ADVERTISED_Autoneg; + } else { + if (ecmd->speed == SPEED_1000) { + if (ecmd->duplex != DUPLEX_FULL) { + if (netif_msg_link(adapter)) + dev_warn(&adapter->pdev->dev, + "1000M half is invalid\n"); + clear_bit(__AT_RESETTING, &adapter->flags); + return -EINVAL; + } + autoneg_advertised = ADVERTISED_1000baseT_Full; + } else if (ecmd->speed == SPEED_100) { + if (ecmd->duplex == DUPLEX_FULL) + autoneg_advertised = ADVERTISED_100baseT_Full; + else + autoneg_advertised = ADVERTISED_100baseT_Half; + } else { + if (ecmd->duplex == DUPLEX_FULL) + autoneg_advertised = ADVERTISED_10baseT_Full; + else + autoneg_advertised = ADVERTISED_10baseT_Half; + } + } + + if (hw->autoneg_advertised != autoneg_advertised) { + hw->autoneg_advertised = autoneg_advertised; + if (atl1c_restart_autoneg(hw) != 0) { + if (netif_msg_link(adapter)) + dev_warn(&adapter->pdev->dev, + "ethtool speed/duplex setting failed\n"); + clear_bit(__AT_RESETTING, &adapter->flags); + return -EINVAL; + } + } + clear_bit(__AT_RESETTING, &adapter->flags); + return 0; +} + +static u32 atl1c_get_tx_csum(struct net_device *netdev) +{ + return (netdev->features & NETIF_F_HW_CSUM) != 0; +} + +static u32 atl1c_get_msglevel(struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void atl1c_set_msglevel(struct net_device *netdev, u32 data) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +static int atl1c_get_regs_len(struct net_device *netdev) +{ + return AT_REGS_LEN; +} + +static void atl1c_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct atl1c_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u16 phy_data; + + memset(p, 0, AT_REGS_LEN); + + regs->version = 0; + AT_READ_REG(hw, REG_VPD_CAP, p++); + AT_READ_REG(hw, REG_PM_CTRL, p++); + AT_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL, p++); + AT_READ_REG(hw, REG_TWSI_CTRL, p++); + AT_READ_REG(hw, REG_PCIE_DEV_MISC_CTRL, p++); + AT_READ_REG(hw, REG_MASTER_CTRL, p++); + AT_READ_REG(hw, REG_MANUAL_TIMER_INIT, p++); + AT_READ_REG(hw, REG_IRQ_MODRT_TIMER_INIT, p++); + AT_READ_REG(hw, REG_GPHY_CTRL, p++); + AT_READ_REG(hw, REG_LINK_CTRL, p++); + AT_READ_REG(hw, REG_IDLE_STATUS, p++); + AT_READ_REG(hw, REG_MDIO_CTRL, p++); + AT_READ_REG(hw, REG_SERDES_LOCK, p++); + AT_READ_REG(hw, REG_MAC_CTRL, p++); + AT_READ_REG(hw, REG_MAC_IPG_IFG, p++); + AT_READ_REG(hw, REG_MAC_STA_ADDR, p++); + AT_READ_REG(hw, REG_MAC_STA_ADDR+4, p++); + AT_READ_REG(hw, REG_RX_HASH_TABLE, p++); + AT_READ_REG(hw, REG_RX_HASH_TABLE+4, p++); + AT_READ_REG(hw, REG_RXQ_CTRL, p++); + AT_READ_REG(hw, REG_TXQ_CTRL, p++); + AT_READ_REG(hw, REG_MTU, p++); + AT_READ_REG(hw, REG_WOL_CTRL, p++); + + atl1c_read_phy_reg(hw, MII_BMCR, &phy_data); + regs_buff[73] = (u32) phy_data; + atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); + regs_buff[74] = (u32) phy_data; +} + +static int atl1c_get_eeprom_len(struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + + if (atl1c_check_eeprom_exist(&adapter->hw)) + return AT_EEPROM_LEN; + else + return 0; +} + +static int atl1c_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct atl1c_hw *hw = &adapter->hw; + u32 *eeprom_buff; + int first_dword, last_dword; + int ret_val = 0; + int i; + + if (eeprom->len == 0) + return -EINVAL; + + if (!atl1c_check_eeprom_exist(hw)) /* not exist */ + return -EINVAL; + + eeprom->magic = adapter->pdev->vendor | + (adapter->pdev->device << 16); + + first_dword = eeprom->offset >> 2; + last_dword = (eeprom->offset + eeprom->len - 1) >> 2; + + eeprom_buff = kmalloc(sizeof(u32) * + (last_dword - first_dword + 1), GFP_KERNEL); + if (eeprom_buff == NULL) + return -ENOMEM; + + for (i = first_dword; i < last_dword; i++) { + if (!atl1c_read_eeprom(hw, i * 4, &(eeprom_buff[i-first_dword]))) { + kfree(eeprom_buff); + return -EIO; + } + } + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3), + eeprom->len); + kfree(eeprom_buff); + + return ret_val; + return 0; +} + +static void atl1c_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, atl1c_driver_version, + sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->n_stats = 0; + drvinfo->testinfo_len = 0; + drvinfo->regdump_len = atl1c_get_regs_len(netdev); + drvinfo->eedump_len = atl1c_get_eeprom_len(netdev); +} + +static void atl1c_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + + wol->supported = WAKE_MAGIC | WAKE_PHY; + wol->wolopts = 0; + + if (adapter->wol & AT_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & AT_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & AT_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & AT_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; + if (adapter->wol & AT_WUFC_LNKC) + wol->wolopts |= WAKE_PHY; + + return; +} + +static int atl1c_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | + WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)) + return -EOPNOTSUPP; + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= AT_WUFC_MAG; + if (wol->wolopts & WAKE_PHY) + adapter->wol |= AT_WUFC_LNKC; + + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static int atl1c_nway_reset(struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + if (netif_running(netdev)) + atl1c_reinit_locked(adapter); + return 0; +} + +static const struct ethtool_ops atl1c_ethtool_ops = { + .get_settings = atl1c_get_settings, + .set_settings = atl1c_set_settings, + .get_drvinfo = atl1c_get_drvinfo, + .get_regs_len = atl1c_get_regs_len, + .get_regs = atl1c_get_regs, + .get_wol = atl1c_get_wol, + .set_wol = atl1c_set_wol, + .get_msglevel = atl1c_get_msglevel, + .set_msglevel = atl1c_set_msglevel, + .nway_reset = atl1c_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = atl1c_get_eeprom_len, + .get_eeprom = atl1c_get_eeprom, + .get_tx_csum = atl1c_get_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, +}; + +void atl1c_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &atl1c_ethtool_ops); +} diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c new file mode 100644 index 0000000..f1389d6 --- /dev/null +++ b/drivers/net/atl1c/atl1c_hw.c @@ -0,0 +1,586 @@ +/* + * Copyright(c) 2007 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include +#include +#include +#include + +#include "atl1c.h" + +/* + * check_eeprom_exist + * return 1 if eeprom exist + */ +int atl1c_check_eeprom_exist(struct atl1c_hw *hw) +{ + u32 data; + + AT_READ_REG(hw, REG_TWSI_DEBUG, &data); + if (data & TWSI_DEBUG_DEV_EXIST) + return 1; + + return 0; +} + +void atl1c_hw_set_mac_addr(struct atl1c_hw *hw) +{ + u32 value; + /* + * 00-0B-6A-F6-00-DC + * 0: 6AF600DC 1: 000B + * low dword + */ + value = (((u32)hw->mac_addr[2]) << 24) | + (((u32)hw->mac_addr[3]) << 16) | + (((u32)hw->mac_addr[4]) << 8) | + (((u32)hw->mac_addr[5])) ; + AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value); + /* hight dword */ + value = (((u32)hw->mac_addr[0]) << 8) | + (((u32)hw->mac_addr[1])) ; + AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value); +} + +/* + * atl1c_get_permanent_address + * return 0 if get valid mac address, + */ +static int atl1c_get_permanent_address(struct atl1c_hw *hw) +{ + u32 addr[2]; + u32 i; + u32 otp_ctrl_data; + u32 twsi_ctrl_data; + u8 eth_addr[ETH_ALEN]; + u16 phy_data; + bool raise_vol = false; + + /* init */ + addr[0] = addr[1] = 0; + AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data); + if (atl1c_check_eeprom_exist(hw)) { + if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c_b) { + /* Enable OTP CLK */ + if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) { + otp_ctrl_data |= OTP_CTRL_CLK_EN; + AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data); + AT_WRITE_FLUSH(hw); + msleep(1); + } + } + + if (hw->nic_type == athr_l2c_b || + hw->nic_type == athr_l2c_b2 || + hw->nic_type == athr_l1d) { + atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x00); + if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data)) + goto out; + phy_data &= 0xFF7F; + atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data); + + atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B); + if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data)) + goto out; + phy_data |= 0x8; + atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data); + udelay(20); + raise_vol = true; + } + + AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data); + twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART; + AT_WRITE_REG(hw, REG_TWSI_CTRL, twsi_ctrl_data); + for (i = 0; i < AT_TWSI_EEPROM_TIMEOUT; i++) { + msleep(10); + AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data); + if ((twsi_ctrl_data & TWSI_CTRL_SW_LDSTART) == 0) + break; + } + if (i >= AT_TWSI_EEPROM_TIMEOUT) + return -1; + } + /* Disable OTP_CLK */ + if ((hw->nic_type == athr_l1c || hw->nic_type == athr_l2c)) { + if (otp_ctrl_data & OTP_CTRL_CLK_EN) { + otp_ctrl_data &= ~OTP_CTRL_CLK_EN; + AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data); + AT_WRITE_FLUSH(hw); + msleep(1); + } + } + if (raise_vol) { + if (hw->nic_type == athr_l2c_b || + hw->nic_type == athr_l2c_b2 || + hw->nic_type == athr_l1d) { + atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x00); + if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data)) + goto out; + phy_data |= 0x80; + atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data); + + atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B); + if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data)) + goto out; + phy_data &= 0xFFF7; + atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data); + udelay(20); + } + } + + /* maybe MAC-address is from BIOS */ + AT_READ_REG(hw, REG_MAC_STA_ADDR, &addr[0]); + AT_READ_REG(hw, REG_MAC_STA_ADDR + 4, &addr[1]); + *(u32 *) ð_addr[2] = swab32(addr[0]); + *(u16 *) ð_addr[0] = swab16(*(u16 *)&addr[1]); + + if (is_valid_ether_addr(eth_addr)) { + memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); + return 0; + } + +out: + return -1; +} + +bool atl1c_read_eeprom(struct atl1c_hw *hw, u32 offset, u32 *p_value) +{ + int i; + int ret = false; + u32 otp_ctrl_data; + u32 control; + u32 data; + + if (offset & 3) + return ret; /* address do not align */ + + AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data); + if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) + AT_WRITE_REG(hw, REG_OTP_CTRL, + (otp_ctrl_data | OTP_CTRL_CLK_EN)); + + AT_WRITE_REG(hw, REG_EEPROM_DATA_LO, 0); + control = (offset & EEPROM_CTRL_ADDR_MASK) << EEPROM_CTRL_ADDR_SHIFT; + AT_WRITE_REG(hw, REG_EEPROM_CTRL, control); + + for (i = 0; i < 10; i++) { + udelay(100); + AT_READ_REG(hw, REG_EEPROM_CTRL, &control); + if (control & EEPROM_CTRL_RW) + break; + } + if (control & EEPROM_CTRL_RW) { + AT_READ_REG(hw, REG_EEPROM_CTRL, &data); + AT_READ_REG(hw, REG_EEPROM_DATA_LO, p_value); + data = data & 0xFFFF; + *p_value = swab32((data << 16) | (*p_value >> 16)); + ret = true; + } + if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) + AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data); + + return ret; +} +/* + * Reads the adapter's MAC address from the EEPROM + * + * hw - Struct containing variables accessed by shared code + */ +int atl1c_read_mac_addr(struct atl1c_hw *hw) +{ + int err = 0; + + err = atl1c_get_permanent_address(hw); + if (err) + random_ether_addr(hw->perm_mac_addr); + + memcpy(hw->mac_addr, hw->perm_mac_addr, sizeof(hw->perm_mac_addr)); + return 0; +} + +/* + * atl1c_hash_mc_addr + * purpose + * set hash value for a multicast address + * hash calcu processing : + * 1. calcu 32bit CRC for multicast address + * 2. reverse crc with MSB to LSB + */ +u32 atl1c_hash_mc_addr(struct atl1c_hw *hw, u8 *mc_addr) +{ + u32 crc32; + u32 value = 0; + int i; + + crc32 = ether_crc_le(6, mc_addr); + for (i = 0; i < 32; i++) + value |= (((crc32 >> i) & 1) << (31 - i)); + + return value; +} + +/* + * Sets the bit in the multicast table corresponding to the hash value. + * hw - Struct containing variables accessed by shared code + * hash_value - Multicast address hash value + */ +void atl1c_hash_set(struct atl1c_hw *hw, u32 hash_value) +{ + u32 hash_bit, hash_reg; + u32 mta; + + /* + * The HASH Table is a register array of 2 32-bit registers. + * It is treated like an array of 64 bits. We want to set + * bit BitArray[hash_value]. So we figure out what register + * the bit is in, read it, OR in the new bit, then write + * back the new value. The register is determined by the + * upper bit of the hash value and the bit within that + * register are determined by the lower 5 bits of the value. + */ + hash_reg = (hash_value >> 31) & 0x1; + hash_bit = (hash_value >> 26) & 0x1F; + + mta = AT_READ_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg); + + mta |= (1 << hash_bit); + + AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg, mta); +} + +/* + * Reads the value from a PHY register + * hw - Struct containing variables accessed by shared code + * reg_addr - address of the PHY register to read + */ +int atl1c_read_phy_reg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data) +{ + u32 val; + int i; + + val = ((u32)(reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT | + MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | + MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; + + AT_WRITE_REG(hw, REG_MDIO_CTRL, val); + + for (i = 0; i < MDIO_WAIT_TIMES; i++) { + udelay(2); + AT_READ_REG(hw, REG_MDIO_CTRL, &val); + if (!(val & (MDIO_START | MDIO_BUSY))) + break; + } + if (!(val & (MDIO_START | MDIO_BUSY))) { + *phy_data = (u16)val; + return 0; + } + + return -1; +} + +/* + * Writes a value to a PHY register + * hw - Struct containing variables accessed by shared code + * reg_addr - address of the PHY register to write + * data - data to write to the PHY + */ +int atl1c_write_phy_reg(struct atl1c_hw *hw, u32 reg_addr, u16 phy_data) +{ + int i; + u32 val; + + val = ((u32)(phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT | + (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT | + MDIO_SUP_PREAMBLE | MDIO_START | + MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; + + AT_WRITE_REG(hw, REG_MDIO_CTRL, val); + + for (i = 0; i < MDIO_WAIT_TIMES; i++) { + udelay(2); + AT_READ_REG(hw, REG_MDIO_CTRL, &val); + if (!(val & (MDIO_START | MDIO_BUSY))) + break; + } + + if (!(val & (MDIO_START | MDIO_BUSY))) + return 0; + + return -1; +} + +/* + * Configures PHY autoneg and flow control advertisement settings + * + * hw - Struct containing variables accessed by shared code + */ +static int atl1c_phy_setup_adv(struct atl1c_hw *hw) +{ + u16 mii_adv_data = ADVERTISE_DEFAULT_CAP & ~ADVERTISE_SPEED_MASK; + u16 mii_giga_ctrl_data = GIGA_CR_1000T_DEFAULT_CAP & + ~GIGA_CR_1000T_SPEED_MASK; + + if (hw->autoneg_advertised & ADVERTISED_10baseT_Half) + mii_adv_data |= ADVERTISE_10HALF; + if (hw->autoneg_advertised & ADVERTISED_10baseT_Full) + mii_adv_data |= ADVERTISE_10FULL; + if (hw->autoneg_advertised & ADVERTISED_100baseT_Half) + mii_adv_data |= ADVERTISE_100HALF; + if (hw->autoneg_advertised & ADVERTISED_100baseT_Full) + mii_adv_data |= ADVERTISE_100FULL; + + if (hw->autoneg_advertised & ADVERTISED_Autoneg) + mii_adv_data |= ADVERTISE_10HALF | ADVERTISE_10FULL | + ADVERTISE_100HALF | ADVERTISE_100FULL; + + if (hw->link_cap_flags & ATL1C_LINK_CAP_1000M) { + if (hw->autoneg_advertised & ADVERTISED_1000baseT_Half) + mii_giga_ctrl_data |= ADVERTISE_1000HALF; + if (hw->autoneg_advertised & ADVERTISED_1000baseT_Full) + mii_giga_ctrl_data |= ADVERTISE_1000FULL; + if (hw->autoneg_advertised & ADVERTISED_Autoneg) + mii_giga_ctrl_data |= ADVERTISE_1000HALF | + ADVERTISE_1000FULL; + } + + if (atl1c_write_phy_reg(hw, MII_ADVERTISE, mii_adv_data) != 0 || + atl1c_write_phy_reg(hw, MII_GIGA_CR, mii_giga_ctrl_data) != 0) + return -1; + return 0; +} + +void atl1c_phy_disable(struct atl1c_hw *hw) +{ + AT_WRITE_REGW(hw, REG_GPHY_CTRL, + GPHY_CTRL_PW_WOL_DIS | GPHY_CTRL_EXT_RESET); +} + +static void atl1c_phy_magic_data(struct atl1c_hw *hw) +{ + u16 data; + + data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE | + ((1 & ANA_INTERVAL_SEL_TIMER_MASK) << + ANA_INTERVAL_SEL_TIMER_SHIFT); + + atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_18); + atl1c_write_phy_reg(hw, MII_DBG_DATA, data); + + data = (2 & ANA_SERDES_CDR_BW_MASK) | ANA_MS_PAD_DBG | + ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL | + ANA_SERDES_EN_LCKDT; + + atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_5); + atl1c_write_phy_reg(hw, MII_DBG_DATA, data); + + data = (44 & ANA_LONG_CABLE_TH_100_MASK) | + ((33 & ANA_SHORT_CABLE_TH_100_MASK) << + ANA_SHORT_CABLE_TH_100_SHIFT) | ANA_BP_BAD_LINK_ACCUM | + ANA_BP_SMALL_BW; + + atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_54); + atl1c_write_phy_reg(hw, MII_DBG_DATA, data); + + data = (11 & ANA_IECHO_ADJ_MASK) | ((11 & ANA_IECHO_ADJ_MASK) << + ANA_IECHO_ADJ_2_SHIFT) | ((8 & ANA_IECHO_ADJ_MASK) << + ANA_IECHO_ADJ_1_SHIFT) | ((8 & ANA_IECHO_ADJ_MASK) << + ANA_IECHO_ADJ_0_SHIFT); + + atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_4); + atl1c_write_phy_reg(hw, MII_DBG_DATA, data); + + data = ANA_RESTART_CAL | ((7 & ANA_MANUL_SWICH_ON_MASK) << + ANA_MANUL_SWICH_ON_SHIFT) | ANA_MAN_ENABLE | + ANA_SEL_HSP | ANA_EN_HB | ANA_OEN_125M; + + atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_0); + atl1c_write_phy_reg(hw, MII_DBG_DATA, data); + + if (hw->ctrl_flags & ATL1C_HIB_DISABLE) { + atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_41); + if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &data) != 0) + return; + data &= ~ANA_TOP_PS_EN; + atl1c_write_phy_reg(hw, MII_DBG_DATA, data); + + atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_11); + if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &data) != 0) + return; + data &= ~ANA_PS_HIB_EN; + atl1c_write_phy_reg(hw, MII_DBG_DATA, data); + } +} + +int atl1c_phy_reset(struct atl1c_hw *hw) +{ + struct atl1c_adapter *adapter = hw->adapter; + struct pci_dev *pdev = adapter->pdev; + u16 phy_data; + u32 phy_ctrl_data = GPHY_CTRL_DEFAULT; + u32 mii_ier_data = IER_LINK_UP | IER_LINK_DOWN; + int err; + + if (hw->ctrl_flags & ATL1C_HIB_DISABLE) + phy_ctrl_data &= ~GPHY_CTRL_HIB_EN; + + AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl_data); + AT_WRITE_FLUSH(hw); + msleep(40); + phy_ctrl_data |= GPHY_CTRL_EXT_RESET; + AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl_data); + AT_WRITE_FLUSH(hw); + msleep(10); + + if (hw->nic_type == athr_l2c_b) { + atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x0A); + atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data); + atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data & 0xDFFF); + } + + if (hw->nic_type == athr_l2c_b || + hw->nic_type == athr_l2c_b2 || + hw->nic_type == athr_l1d) { + atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B); + atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data); + atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data & 0xFFF7); + msleep(20); + } + + /*Enable PHY LinkChange Interrupt */ + err = atl1c_write_phy_reg(hw, MII_IER, mii_ier_data); + if (err) { + if (netif_msg_hw(adapter)) + dev_err(&pdev->dev, + "Error enable PHY linkChange Interrupt\n"); + return err; + } + if (!(hw->ctrl_flags & ATL1C_FPGA_VERSION)) + atl1c_phy_magic_data(hw); + return 0; +} + +int atl1c_phy_init(struct atl1c_hw *hw) +{ + struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter; + struct pci_dev *pdev = adapter->pdev; + int ret_val; + u16 mii_bmcr_data = BMCR_RESET; + u16 phy_id1, phy_id2; + + if ((atl1c_read_phy_reg(hw, MII_PHYSID1, &phy_id1) != 0) || + (atl1c_read_phy_reg(hw, MII_PHYSID2, &phy_id2) != 0)) { + if (netif_msg_link(adapter)) + dev_err(&pdev->dev, "Error get phy ID\n"); + return -1; + } + switch (hw->media_type) { + case MEDIA_TYPE_AUTO_SENSOR: + ret_val = atl1c_phy_setup_adv(hw); + if (ret_val) { + if (netif_msg_link(adapter)) + dev_err(&pdev->dev, + "Error Setting up Auto-Negotiation\n"); + return ret_val; + } + mii_bmcr_data |= BMCR_AUTO_NEG_EN | BMCR_RESTART_AUTO_NEG; + break; + case MEDIA_TYPE_100M_FULL: + mii_bmcr_data |= BMCR_SPEED_100 | BMCR_FULL_DUPLEX; + break; + case MEDIA_TYPE_100M_HALF: + mii_bmcr_data |= BMCR_SPEED_100; + break; + case MEDIA_TYPE_10M_FULL: + mii_bmcr_data |= BMCR_SPEED_10 | BMCR_FULL_DUPLEX; + break; + case MEDIA_TYPE_10M_HALF: + mii_bmcr_data |= BMCR_SPEED_10; + break; + default: + if (netif_msg_link(adapter)) + dev_err(&pdev->dev, "Wrong Media type %d\n", + hw->media_type); + return -1; + break; + } + + ret_val = atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data); + if (ret_val) + return ret_val; + hw->phy_configured = true; + + return 0; +} + +/* + * Detects the current speed and duplex settings of the hardware. + * + * hw - Struct containing variables accessed by shared code + * speed - Speed of the connection + * duplex - Duplex setting of the connection + */ +int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex) +{ + int err; + u16 phy_data; + + /* Read PHY Specific Status Register (17) */ + err = atl1c_read_phy_reg(hw, MII_GIGA_PSSR, &phy_data); + if (err) + return err; + + if (!(phy_data & GIGA_PSSR_SPD_DPLX_RESOLVED)) + return -1; + + switch (phy_data & GIGA_PSSR_SPEED) { + case GIGA_PSSR_1000MBS: + *speed = SPEED_1000; + break; + case GIGA_PSSR_100MBS: + *speed = SPEED_100; + break; + case GIGA_PSSR_10MBS: + *speed = SPEED_10; + break; + default: + return -1; + break; + } + + if (phy_data & GIGA_PSSR_DPLX) + *duplex = FULL_DUPLEX; + else + *duplex = HALF_DUPLEX; + + return 0; +} + +int atl1c_restart_autoneg(struct atl1c_hw *hw) +{ + int err = 0; + u16 mii_bmcr_data = BMCR_RESET; + + err = atl1c_phy_setup_adv(hw); + if (err) + return err; + mii_bmcr_data |= BMCR_AUTO_NEG_EN | BMCR_RESTART_AUTO_NEG; + + return atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data); +} diff --git a/drivers/net/atl1c/atl1c_hw.h b/drivers/net/atl1c/atl1c_hw.h new file mode 100644 index 0000000..1eeb3ed --- /dev/null +++ b/drivers/net/atl1c/atl1c_hw.h @@ -0,0 +1,864 @@ +/* + * Copyright(c) 2008 - 2009 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _ATL1C_HW_H_ +#define _ATL1C_HW_H_ + +#include +#include + +struct atl1c_adapter; +struct atl1c_hw; + +/* function prototype */ +void atl1c_phy_disable(struct atl1c_hw *hw); +void atl1c_hw_set_mac_addr(struct atl1c_hw *hw); +int atl1c_phy_reset(struct atl1c_hw *hw); +int atl1c_read_mac_addr(struct atl1c_hw *hw); +int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex); +u32 atl1c_hash_mc_addr(struct atl1c_hw *hw, u8 *mc_addr); +void atl1c_hash_set(struct atl1c_hw *hw, u32 hash_value); +int atl1c_read_phy_reg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data); +int atl1c_write_phy_reg(struct atl1c_hw *hw, u32 reg_addr, u16 phy_data); +bool atl1c_read_eeprom(struct atl1c_hw *hw, u32 offset, u32 *p_value); +int atl1c_phy_init(struct atl1c_hw *hw); +int atl1c_check_eeprom_exist(struct atl1c_hw *hw); +int atl1c_restart_autoneg(struct atl1c_hw *hw); + +/* register definition */ +#define REG_DEVICE_CAP 0x5C +#define DEVICE_CAP_MAX_PAYLOAD_MASK 0x7 +#define DEVICE_CAP_MAX_PAYLOAD_SHIFT 0 + +#define REG_DEVICE_CTRL 0x60 +#define DEVICE_CTRL_MAX_PAYLOAD_MASK 0x7 +#define DEVICE_CTRL_MAX_PAYLOAD_SHIFT 5 +#define DEVICE_CTRL_MAX_RREQ_SZ_MASK 0x7 +#define DEVICE_CTRL_MAX_RREQ_SZ_SHIFT 12 + +#define REG_LINK_CTRL 0x68 +#define LINK_CTRL_L0S_EN 0x01 +#define LINK_CTRL_L1_EN 0x02 +#define LINK_CTRL_EXT_SYNC 0x80 + +#define REG_VPD_CAP 0x6C +#define VPD_CAP_ID_MASK 0xff +#define VPD_CAP_ID_SHIFT 0 +#define VPD_CAP_NEXT_PTR_MASK 0xFF +#define VPD_CAP_NEXT_PTR_SHIFT 8 +#define VPD_CAP_VPD_ADDR_MASK 0x7FFF +#define VPD_CAP_VPD_ADDR_SHIFT 16 +#define VPD_CAP_VPD_FLAG 0x80000000 + +#define REG_VPD_DATA 0x70 + +#define REG_PCIE_UC_SEVERITY 0x10C +#define PCIE_UC_SERVRITY_TRN 0x00000001 +#define PCIE_UC_SERVRITY_DLP 0x00000010 +#define PCIE_UC_SERVRITY_PSN_TLP 0x00001000 +#define PCIE_UC_SERVRITY_FCP 0x00002000 +#define PCIE_UC_SERVRITY_CPL_TO 0x00004000 +#define PCIE_UC_SERVRITY_CA 0x00008000 +#define PCIE_UC_SERVRITY_UC 0x00010000 +#define PCIE_UC_SERVRITY_ROV 0x00020000 +#define PCIE_UC_SERVRITY_MLFP 0x00040000 +#define PCIE_UC_SERVRITY_ECRC 0x00080000 +#define PCIE_UC_SERVRITY_UR 0x00100000 + +#define REG_DEV_SERIALNUM_CTRL 0x200 +#define REG_DEV_MAC_SEL_MASK 0x0 /* 0:EUI; 1:MAC */ +#define REG_DEV_MAC_SEL_SHIFT 0 +#define REG_DEV_SERIAL_NUM_EN_MASK 0x1 +#define REG_DEV_SERIAL_NUM_EN_SHIFT 1 + +#define REG_TWSI_CTRL 0x218 +#define TWSI_CTRL_LD_OFFSET_MASK 0xFF +#define TWSI_CTRL_LD_OFFSET_SHIFT 0 +#define TWSI_CTRL_LD_SLV_ADDR_MASK 0x7 +#define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8 +#define TWSI_CTRL_SW_LDSTART 0x800 +#define TWSI_CTRL_HW_LDSTART 0x1000 +#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x7F +#define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15 +#define TWSI_CTRL_LD_EXIST 0x400000 +#define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3 +#define TWSI_CTRL_READ_FREQ_SEL_SHIFT 23 +#define TWSI_CTRL_FREQ_SEL_100K 0 +#define TWSI_CTRL_FREQ_SEL_200K 1 +#define TWSI_CTRL_FREQ_SEL_300K 2 +#define TWSI_CTRL_FREQ_SEL_400K 3 +#define TWSI_CTRL_SMB_SLV_ADDR +#define TWSI_CTRL_WRITE_FREQ_SEL_MASK 0x3 +#define TWSI_CTRL_WRITE_FREQ_SEL_SHIFT 24 + + +#define REG_PCIE_DEV_MISC_CTRL 0x21C +#define PCIE_DEV_MISC_EXT_PIPE 0x2 +#define PCIE_DEV_MISC_RETRY_BUFDIS 0x1 +#define PCIE_DEV_MISC_SPIROM_EXIST 0x4 +#define PCIE_DEV_MISC_SERDES_ENDIAN 0x8 +#define PCIE_DEV_MISC_SERDES_SEL_DIN 0x10 + +#define REG_PCIE_PHYMISC 0x1000 +#define PCIE_PHYMISC_FORCE_RCV_DET 0x4 + +#define REG_TWSI_DEBUG 0x1108 +#define TWSI_DEBUG_DEV_EXIST 0x20000000 + +#define REG_EEPROM_CTRL 0x12C0 +#define EEPROM_CTRL_DATA_HI_MASK 0xFFFF +#define EEPROM_CTRL_DATA_HI_SHIFT 0 +#define EEPROM_CTRL_ADDR_MASK 0x3FF +#define EEPROM_CTRL_ADDR_SHIFT 16 +#define EEPROM_CTRL_ACK 0x40000000 +#define EEPROM_CTRL_RW 0x80000000 + +#define REG_EEPROM_DATA_LO 0x12C4 + +#define REG_OTP_CTRL 0x12F0 +#define OTP_CTRL_CLK_EN 0x0002 + +#define REG_PM_CTRL 0x12F8 +#define PM_CTRL_SDES_EN 0x00000001 +#define PM_CTRL_RBER_EN 0x00000002 +#define PM_CTRL_CLK_REQ_EN 0x00000004 +#define PM_CTRL_ASPM_L1_EN 0x00000008 +#define PM_CTRL_SERDES_L1_EN 0x00000010 +#define PM_CTRL_SERDES_PLL_L1_EN 0x00000020 +#define PM_CTRL_SERDES_PD_EX_L1 0x00000040 +#define PM_CTRL_SERDES_BUDS_RX_L1_EN 0x00000080 +#define PM_CTRL_L0S_ENTRY_TIMER_MASK 0xF +#define PM_CTRL_L0S_ENTRY_TIMER_SHIFT 8 +#define PM_CTRL_ASPM_L0S_EN 0x00001000 +#define PM_CTRL_CLK_SWH_L1 0x00002000 +#define PM_CTRL_CLK_PWM_VER1_1 0x00004000 +#define PM_CTRL_PCIE_RECV 0x00008000 +#define PM_CTRL_L1_ENTRY_TIMER_MASK 0xF +#define PM_CTRL_L1_ENTRY_TIMER_SHIFT 16 +#define PM_CTRL_PM_REQ_TIMER_MASK 0xF +#define PM_CTRL_PM_REQ_TIMER_SHIFT 20 +#define PM_CTRL_LCKDET_TIMER_MASK 0x3F +#define PM_CTRL_LCKDET_TIMER_SHIFT 24 +#define PM_CTRL_EN_BUFS_RX_L0S 0x10000000 +#define PM_CTRL_SA_DLY_EN 0x20000000 +#define PM_CTRL_MAC_ASPM_CHK 0x40000000 +#define PM_CTRL_HOTRST 0x80000000 + +/* Selene Master Control Register */ +#define REG_MASTER_CTRL 0x1400 +#define MASTER_CTRL_SOFT_RST 0x1 +#define MASTER_CTRL_TEST_MODE_MASK 0x3 +#define MASTER_CTRL_TEST_MODE_SHIFT 2 +#define MASTER_CTRL_BERT_START 0x10 +#define MASTER_CTRL_MTIMER_EN 0x100 +#define MASTER_CTRL_MANUAL_INT 0x200 +#define MASTER_CTRL_TX_ITIMER_EN 0x400 +#define MASTER_CTRL_RX_ITIMER_EN 0x800 +#define MASTER_CTRL_CLK_SEL_DIS 0x1000 +#define MASTER_CTRL_CLK_SWH_MODE 0x2000 +#define MASTER_CTRL_INT_RDCLR 0x4000 +#define MASTER_CTRL_REV_NUM_SHIFT 16 +#define MASTER_CTRL_REV_NUM_MASK 0xff +#define MASTER_CTRL_DEV_ID_SHIFT 24 +#define MASTER_CTRL_DEV_ID_MASK 0x7f +#define MASTER_CTRL_OTP_SEL 0x80000000 + +/* Timer Initial Value Register */ +#define REG_MANUAL_TIMER_INIT 0x1404 + +/* IRQ ModeratorTimer Initial Value Register */ +#define REG_IRQ_MODRT_TIMER_INIT 0x1408 +#define IRQ_MODRT_TIMER_MASK 0xffff +#define IRQ_MODRT_TX_TIMER_SHIFT 0 +#define IRQ_MODRT_RX_TIMER_SHIFT 16 + +#define REG_GPHY_CTRL 0x140C +#define GPHY_CTRL_EXT_RESET 0x1 +#define GPHY_CTRL_RTL_MODE 0x2 +#define GPHY_CTRL_LED_MODE 0x4 +#define GPHY_CTRL_ANEG_NOW 0x8 +#define GPHY_CTRL_REV_ANEG 0x10 +#define GPHY_CTRL_GATE_25M_EN 0x20 +#define GPHY_CTRL_LPW_EXIT 0x40 +#define GPHY_CTRL_PHY_IDDQ 0x80 +#define GPHY_CTRL_PHY_IDDQ_DIS 0x100 +#define GPHY_CTRL_GIGA_DIS 0x200 +#define GPHY_CTRL_HIB_EN 0x400 +#define GPHY_CTRL_HIB_PULSE 0x800 +#define GPHY_CTRL_SEL_ANA_RST 0x1000 +#define GPHY_CTRL_PHY_PLL_ON 0x2000 +#define GPHY_CTRL_PWDOWN_HW 0x4000 +#define GPHY_CTRL_PHY_PLL_BYPASS 0x8000 + +#define GPHY_CTRL_DEFAULT ( \ + GPHY_CTRL_SEL_ANA_RST |\ + GPHY_CTRL_HIB_PULSE |\ + GPHY_CTRL_HIB_EN) + +#define GPHY_CTRL_PW_WOL_DIS ( \ + GPHY_CTRL_SEL_ANA_RST |\ + GPHY_CTRL_HIB_PULSE |\ + GPHY_CTRL_HIB_EN |\ + GPHY_CTRL_PWDOWN_HW |\ + GPHY_CTRL_PHY_IDDQ) + +/* Block IDLE Status Register */ +#define REG_IDLE_STATUS 0x1410 +#define IDLE_STATUS_MASK 0x00FF +#define IDLE_STATUS_RXMAC_NO_IDLE 0x1 +#define IDLE_STATUS_TXMAC_NO_IDLE 0x2 +#define IDLE_STATUS_RXQ_NO_IDLE 0x4 +#define IDLE_STATUS_TXQ_NO_IDLE 0x8 +#define IDLE_STATUS_DMAR_NO_IDLE 0x10 +#define IDLE_STATUS_DMAW_NO_IDLE 0x20 +#define IDLE_STATUS_SMB_NO_IDLE 0x40 +#define IDLE_STATUS_CMB_NO_IDLE 0x80 + +/* MDIO Control Register */ +#define REG_MDIO_CTRL 0x1414 +#define MDIO_DATA_MASK 0xffff /* On MDIO write, the 16-bit + * control data to write to PHY + * MII management register */ +#define MDIO_DATA_SHIFT 0 /* On MDIO read, the 16-bit + * status data that was read + * from the PHY MII management register */ +#define MDIO_REG_ADDR_MASK 0x1f /* MDIO register address */ +#define MDIO_REG_ADDR_SHIFT 16 +#define MDIO_RW 0x200000 /* 1: read, 0: write */ +#define MDIO_SUP_PREAMBLE 0x400000 /* Suppress preamble */ +#define MDIO_START 0x800000 /* Write 1 to initiate the MDIO + * master. And this bit is self + * cleared after one cycle */ +#define MDIO_CLK_SEL_SHIFT 24 +#define MDIO_CLK_25_4 0 +#define MDIO_CLK_25_6 2 +#define MDIO_CLK_25_8 3 +#define MDIO_CLK_25_10 4 +#define MDIO_CLK_25_14 5 +#define MDIO_CLK_25_20 6 +#define MDIO_CLK_25_28 7 +#define MDIO_BUSY 0x8000000 +#define MDIO_AP_EN 0x10000000 +#define MDIO_WAIT_TIMES 10 + +/* MII PHY Status Register */ +#define REG_PHY_STATUS 0x1418 +#define PHY_GENERAL_STATUS_MASK 0xFFFF +#define PHY_STATUS_RECV_ENABLE 0x0001 +#define PHY_OE_PWSP_STATUS_MASK 0x07FF +#define PHY_OE_PWSP_STATUS_SHIFT 16 +#define PHY_STATUS_LPW_STATE 0x80000000 +/* BIST Control and Status Register0 (for the Packet Memory) */ +#define REG_BIST0_CTRL 0x141c +#define BIST0_NOW 0x1 +#define BIST0_SRAM_FAIL 0x2 /* 1: The SRAM failure is + * un-repairable because + * it has address decoder + * failure or more than 1 cell + * stuck-to-x failure */ +#define BIST0_FUSE_FLAG 0x4 + +/* BIST Control and Status Register1(for the retry buffer of PCI Express) */ +#define REG_BIST1_CTRL 0x1420 +#define BIST1_NOW 0x1 +#define BIST1_SRAM_FAIL 0x2 +#define BIST1_FUSE_FLAG 0x4 + +/* SerDes Lock Detect Control and Status Register */ +#define REG_SERDES_LOCK 0x1424 +#define SERDES_LOCK_DETECT 0x1 /* SerDes lock detected. This signal + * comes from Analog SerDes */ +#define SERDES_LOCK_DETECT_EN 0x2 /* 1: Enable SerDes Lock detect function */ + +/* MAC Control Register */ +#define REG_MAC_CTRL 0x1480 +#define MAC_CTRL_TX_EN 0x1 +#define MAC_CTRL_RX_EN 0x2 +#define MAC_CTRL_TX_FLOW 0x4 +#define MAC_CTRL_RX_FLOW 0x8 +#define MAC_CTRL_LOOPBACK 0x10 +#define MAC_CTRL_DUPLX 0x20 +#define MAC_CTRL_ADD_CRC 0x40 +#define MAC_CTRL_PAD 0x80 +#define MAC_CTRL_LENCHK 0x100 +#define MAC_CTRL_HUGE_EN 0x200 +#define MAC_CTRL_PRMLEN_SHIFT 10 +#define MAC_CTRL_PRMLEN_MASK 0xf +#define MAC_CTRL_RMV_VLAN 0x4000 +#define MAC_CTRL_PROMIS_EN 0x8000 +#define MAC_CTRL_TX_PAUSE 0x10000 +#define MAC_CTRL_SCNT 0x20000 +#define MAC_CTRL_SRST_TX 0x40000 +#define MAC_CTRL_TX_SIMURST 0x80000 +#define MAC_CTRL_SPEED_SHIFT 20 +#define MAC_CTRL_SPEED_MASK 0x3 +#define MAC_CTRL_DBG_TX_BKPRESURE 0x400000 +#define MAC_CTRL_TX_HUGE 0x800000 +#define MAC_CTRL_RX_CHKSUM_EN 0x1000000 +#define MAC_CTRL_MC_ALL_EN 0x2000000 +#define MAC_CTRL_BC_EN 0x4000000 +#define MAC_CTRL_DBG 0x8000000 +#define MAC_CTRL_SINGLE_PAUSE_EN 0x10000000 +#define MAC_CTRL_HASH_ALG_CRC32 0x20000000 +#define MAC_CTRL_SPEED_MODE_SW 0x40000000 + +/* MAC IPG/IFG Control Register */ +#define REG_MAC_IPG_IFG 0x1484 +#define MAC_IPG_IFG_IPGT_SHIFT 0 /* Desired back to back + * inter-packet gap. The + * default is 96-bit time */ +#define MAC_IPG_IFG_IPGT_MASK 0x7f +#define MAC_IPG_IFG_MIFG_SHIFT 8 /* Minimum number of IFG to + * enforce in between RX frames */ +#define MAC_IPG_IFG_MIFG_MASK 0xff /* Frame gap below such IFP is dropped */ +#define MAC_IPG_IFG_IPGR1_SHIFT 16 /* 64bit Carrier-Sense window */ +#define MAC_IPG_IFG_IPGR1_MASK 0x7f +#define MAC_IPG_IFG_IPGR2_SHIFT 24 /* 96-bit IPG window */ +#define MAC_IPG_IFG_IPGR2_MASK 0x7f + +/* MAC STATION ADDRESS */ +#define REG_MAC_STA_ADDR 0x1488 + +/* Hash table for multicast address */ +#define REG_RX_HASH_TABLE 0x1490 + +/* MAC Half-Duplex Control Register */ +#define REG_MAC_HALF_DUPLX_CTRL 0x1498 +#define MAC_HALF_DUPLX_CTRL_LCOL_SHIFT 0 /* Collision Window */ +#define MAC_HALF_DUPLX_CTRL_LCOL_MASK 0x3ff +#define MAC_HALF_DUPLX_CTRL_RETRY_SHIFT 12 +#define MAC_HALF_DUPLX_CTRL_RETRY_MASK 0xf +#define MAC_HALF_DUPLX_CTRL_EXC_DEF_EN 0x10000 +#define MAC_HALF_DUPLX_CTRL_NO_BACK_C 0x20000 +#define MAC_HALF_DUPLX_CTRL_NO_BACK_P 0x40000 /* No back-off on backpressure, + * immediately start the + * transmission after back pressure */ +#define MAC_HALF_DUPLX_CTRL_ABEBE 0x80000 /* 1: Alternative Binary Exponential Back-off Enabled */ +#define MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT 20 /* Maximum binary exponential number */ +#define MAC_HALF_DUPLX_CTRL_ABEBT_MASK 0xf +#define MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT 24 /* IPG to start JAM for collision based flow control in half-duplex */ +#define MAC_HALF_DUPLX_CTRL_JAMIPG_MASK 0xf /* mode. In unit of 8-bit time */ + +/* Maximum Frame Length Control Register */ +#define REG_MTU 0x149c + +/* Wake-On-Lan control register */ +#define REG_WOL_CTRL 0x14a0 +#define WOL_PATTERN_EN 0x00000001 +#define WOL_PATTERN_PME_EN 0x00000002 +#define WOL_MAGIC_EN 0x00000004 +#define WOL_MAGIC_PME_EN 0x00000008 +#define WOL_LINK_CHG_EN 0x00000010 +#define WOL_LINK_CHG_PME_EN 0x00000020 +#define WOL_PATTERN_ST 0x00000100 +#define WOL_MAGIC_ST 0x00000200 +#define WOL_LINKCHG_ST 0x00000400 +#define WOL_CLK_SWITCH_EN 0x00008000 +#define WOL_PT0_EN 0x00010000 +#define WOL_PT1_EN 0x00020000 +#define WOL_PT2_EN 0x00040000 +#define WOL_PT3_EN 0x00080000 +#define WOL_PT4_EN 0x00100000 +#define WOL_PT5_EN 0x00200000 +#define WOL_PT6_EN 0x00400000 + +/* WOL Length ( 2 DWORD ) */ +#define REG_WOL_PATTERN_LEN 0x14a4 +#define WOL_PT_LEN_MASK 0x7f +#define WOL_PT0_LEN_SHIFT 0 +#define WOL_PT1_LEN_SHIFT 8 +#define WOL_PT2_LEN_SHIFT 16 +#define WOL_PT3_LEN_SHIFT 24 +#define WOL_PT4_LEN_SHIFT 0 +#define WOL_PT5_LEN_SHIFT 8 +#define WOL_PT6_LEN_SHIFT 16 + +/* Internal SRAM Partition Register */ +#define RFDX_HEAD_ADDR_MASK 0x03FF +#define RFDX_HARD_ADDR_SHIFT 0 +#define RFDX_TAIL_ADDR_MASK 0x03FF +#define RFDX_TAIL_ADDR_SHIFT 16 + +#define REG_SRAM_RFD0_INFO 0x1500 +#define REG_SRAM_RFD1_INFO 0x1504 +#define REG_SRAM_RFD2_INFO 0x1508 +#define REG_SRAM_RFD3_INFO 0x150C + +#define REG_RFD_NIC_LEN 0x1510 /* In 8-bytes */ +#define RFD_NIC_LEN_MASK 0x03FF + +#define REG_SRAM_TRD_ADDR 0x1518 +#define TPD_HEAD_ADDR_MASK 0x03FF +#define TPD_HEAD_ADDR_SHIFT 0 +#define TPD_TAIL_ADDR_MASK 0x03FF +#define TPD_TAIL_ADDR_SHIFT 16 + +#define REG_SRAM_TRD_LEN 0x151C /* In 8-bytes */ +#define TPD_NIC_LEN_MASK 0x03FF + +#define REG_SRAM_RXF_ADDR 0x1520 +#define REG_SRAM_RXF_LEN 0x1524 +#define REG_SRAM_TXF_ADDR 0x1528 +#define REG_SRAM_TXF_LEN 0x152C +#define REG_SRAM_TCPH_ADDR 0x1530 +#define REG_SRAM_PKTH_ADDR 0x1532 + +/* + * Load Ptr Register + * Software sets this bit after the initialization of the head and tail */ +#define REG_LOAD_PTR 0x1534 + +/* + * addresses of all descriptors, as well as the following descriptor + * control register, which triggers each function block to load the head + * pointer to prepare for the operation. This bit is then self-cleared + * after one cycle. + */ +#define REG_RX_BASE_ADDR_HI 0x1540 +#define REG_TX_BASE_ADDR_HI 0x1544 +#define REG_SMB_BASE_ADDR_HI 0x1548 +#define REG_SMB_BASE_ADDR_LO 0x154C +#define REG_RFD0_HEAD_ADDR_LO 0x1550 +#define REG_RFD1_HEAD_ADDR_LO 0x1554 +#define REG_RFD2_HEAD_ADDR_LO 0x1558 +#define REG_RFD3_HEAD_ADDR_LO 0x155C +#define REG_RFD_RING_SIZE 0x1560 +#define RFD_RING_SIZE_MASK 0x0FFF +#define REG_RX_BUF_SIZE 0x1564 +#define RX_BUF_SIZE_MASK 0xFFFF +#define REG_RRD0_HEAD_ADDR_LO 0x1568 +#define REG_RRD1_HEAD_ADDR_LO 0x156C +#define REG_RRD2_HEAD_ADDR_LO 0x1570 +#define REG_RRD3_HEAD_ADDR_LO 0x1574 +#define REG_RRD_RING_SIZE 0x1578 +#define RRD_RING_SIZE_MASK 0x0FFF +#define REG_HTPD_HEAD_ADDR_LO 0x157C +#define REG_NTPD_HEAD_ADDR_LO 0x1580 +#define REG_TPD_RING_SIZE 0x1584 +#define TPD_RING_SIZE_MASK 0xFFFF +#define REG_CMB_BASE_ADDR_LO 0x1588 + +/* RSS about */ +#define REG_RSS_KEY0 0x14B0 +#define REG_RSS_KEY1 0x14B4 +#define REG_RSS_KEY2 0x14B8 +#define REG_RSS_KEY3 0x14BC +#define REG_RSS_KEY4 0x14C0 +#define REG_RSS_KEY5 0x14C4 +#define REG_RSS_KEY6 0x14C8 +#define REG_RSS_KEY7 0x14CC +#define REG_RSS_KEY8 0x14D0 +#define REG_RSS_KEY9 0x14D4 +#define REG_IDT_TABLE0 0x14E0 +#define REG_IDT_TABLE1 0x14E4 +#define REG_IDT_TABLE2 0x14E8 +#define REG_IDT_TABLE3 0x14EC +#define REG_IDT_TABLE4 0x14F0 +#define REG_IDT_TABLE5 0x14F4 +#define REG_IDT_TABLE6 0x14F8 +#define REG_IDT_TABLE7 0x14FC +#define REG_IDT_TABLE REG_IDT_TABLE0 +#define REG_RSS_HASH_VALUE 0x15B0 +#define REG_RSS_HASH_FLAG 0x15B4 +#define REG_BASE_CPU_NUMBER 0x15B8 + +/* TXQ Control Register */ +#define REG_TXQ_CTRL 0x1590 +#define TXQ_NUM_TPD_BURST_MASK 0xF +#define TXQ_NUM_TPD_BURST_SHIFT 0 +#define TXQ_CTRL_IP_OPTION_EN 0x10 +#define TXQ_CTRL_EN 0x20 +#define TXQ_CTRL_ENH_MODE 0x40 +#define TXQ_CTRL_LS_8023_EN 0x80 +#define TXQ_TXF_BURST_NUM_SHIFT 16 +#define TXQ_TXF_BURST_NUM_MASK 0xFFFF + +/* Jumbo packet Threshold for task offload */ +#define REG_TX_TSO_OFFLOAD_THRESH 0x1594 /* In 8-bytes */ +#define TX_TSO_OFFLOAD_THRESH_MASK 0x07FF + +#define REG_TXF_WATER_MARK 0x1598 /* In 8-bytes */ +#define TXF_WATER_MARK_MASK 0x0FFF +#define TXF_LOW_WATER_MARK_SHIFT 0 +#define TXF_HIGH_WATER_MARK_SHIFT 16 +#define TXQ_CTRL_BURST_MODE_EN 0x80000000 + +#define REG_THRUPUT_MON_CTRL 0x159C +#define THRUPUT_MON_RATE_MASK 0x3 +#define THRUPUT_MON_RATE_SHIFT 0 +#define THRUPUT_MON_EN 0x80 + +/* RXQ Control Register */ +#define REG_RXQ_CTRL 0x15A0 +#define ASPM_THRUPUT_LIMIT_MASK 0x3 +#define ASPM_THRUPUT_LIMIT_SHIFT 0 +#define ASPM_THRUPUT_LIMIT_NO 0x00 +#define ASPM_THRUPUT_LIMIT_1M 0x01 +#define ASPM_THRUPUT_LIMIT_10M 0x02 +#define ASPM_THRUPUT_LIMIT_100M 0x04 +#define RXQ1_CTRL_EN 0x10 +#define RXQ2_CTRL_EN 0x20 +#define RXQ3_CTRL_EN 0x40 +#define IPV6_CHKSUM_CTRL_EN 0x80 +#define RSS_HASH_BITS_MASK 0x00FF +#define RSS_HASH_BITS_SHIFT 8 +#define RSS_HASH_IPV4 0x10000 +#define RSS_HASH_IPV4_TCP 0x20000 +#define RSS_HASH_IPV6 0x40000 +#define RSS_HASH_IPV6_TCP 0x80000 +#define RXQ_RFD_BURST_NUM_MASK 0x003F +#define RXQ_RFD_BURST_NUM_SHIFT 20 +#define RSS_MODE_MASK 0x0003 +#define RSS_MODE_SHIFT 26 +#define RSS_NIP_QUEUE_SEL_MASK 0x1 +#define RSS_NIP_QUEUE_SEL_SHIFT 28 +#define RRS_HASH_CTRL_EN 0x20000000 +#define RX_CUT_THRU_EN 0x40000000 +#define RXQ_CTRL_EN 0x80000000 + +#define REG_RFD_FREE_THRESH 0x15A4 +#define RFD_FREE_THRESH_MASK 0x003F +#define RFD_FREE_HI_THRESH_SHIFT 0 +#define RFD_FREE_LO_THRESH_SHIFT 6 + +/* RXF flow control register */ +#define REG_RXQ_RXF_PAUSE_THRESH 0x15A8 +#define RXQ_RXF_PAUSE_TH_HI_SHIFT 0 +#define RXQ_RXF_PAUSE_TH_HI_MASK 0x0FFF +#define RXQ_RXF_PAUSE_TH_LO_SHIFT 16 +#define RXQ_RXF_PAUSE_TH_LO_MASK 0x0FFF + +#define REG_RXD_DMA_CTRL 0x15AC +#define RXD_DMA_THRESH_MASK 0x0FFF /* In 8-bytes */ +#define RXD_DMA_THRESH_SHIFT 0 +#define RXD_DMA_DOWN_TIMER_MASK 0xFFFF +#define RXD_DMA_DOWN_TIMER_SHIFT 16 + +/* DMA Engine Control Register */ +#define REG_DMA_CTRL 0x15C0 +#define DMA_CTRL_DMAR_IN_ORDER 0x1 +#define DMA_CTRL_DMAR_ENH_ORDER 0x2 +#define DMA_CTRL_DMAR_OUT_ORDER 0x4 +#define DMA_CTRL_RCB_VALUE 0x8 +#define DMA_CTRL_DMAR_BURST_LEN_MASK 0x0007 +#define DMA_CTRL_DMAR_BURST_LEN_SHIFT 4 +#define DMA_CTRL_DMAW_BURST_LEN_MASK 0x0007 +#define DMA_CTRL_DMAW_BURST_LEN_SHIFT 7 +#define DMA_CTRL_DMAR_REQ_PRI 0x400 +#define DMA_CTRL_DMAR_DLY_CNT_MASK 0x001F +#define DMA_CTRL_DMAR_DLY_CNT_SHIFT 11 +#define DMA_CTRL_DMAW_DLY_CNT_MASK 0x000F +#define DMA_CTRL_DMAW_DLY_CNT_SHIFT 16 +#define DMA_CTRL_CMB_EN 0x100000 +#define DMA_CTRL_SMB_EN 0x200000 +#define DMA_CTRL_CMB_NOW 0x400000 +#define MAC_CTRL_SMB_DIS 0x1000000 +#define DMA_CTRL_SMB_NOW 0x80000000 + +/* CMB/SMB Control Register */ +#define REG_SMB_STAT_TIMER 0x15C4 /* 2us resolution */ +#define SMB_STAT_TIMER_MASK 0xFFFFFF +#define REG_CMB_TPD_THRESH 0x15C8 +#define CMB_TPD_THRESH_MASK 0xFFFF +#define REG_CMB_TX_TIMER 0x15CC /* 2us resolution */ +#define CMB_TX_TIMER_MASK 0xFFFF + +/* Mail box */ +#define MB_RFDX_PROD_IDX_MASK 0xFFFF +#define REG_MB_RFD0_PROD_IDX 0x15E0 +#define REG_MB_RFD1_PROD_IDX 0x15E4 +#define REG_MB_RFD2_PROD_IDX 0x15E8 +#define REG_MB_RFD3_PROD_IDX 0x15EC + +#define MB_PRIO_PROD_IDX_MASK 0xFFFF +#define REG_MB_PRIO_PROD_IDX 0x15F0 +#define MB_HTPD_PROD_IDX_SHIFT 0 +#define MB_NTPD_PROD_IDX_SHIFT 16 + +#define MB_PRIO_CONS_IDX_MASK 0xFFFF +#define REG_MB_PRIO_CONS_IDX 0x15F4 +#define MB_HTPD_CONS_IDX_SHIFT 0 +#define MB_NTPD_CONS_IDX_SHIFT 16 + +#define REG_MB_RFD01_CONS_IDX 0x15F8 +#define MB_RFD0_CONS_IDX_MASK 0x0000FFFF +#define MB_RFD1_CONS_IDX_MASK 0xFFFF0000 +#define REG_MB_RFD23_CONS_IDX 0x15FC +#define MB_RFD2_CONS_IDX_MASK 0x0000FFFF +#define MB_RFD3_CONS_IDX_MASK 0xFFFF0000 + +/* Interrupt Status Register */ +#define REG_ISR 0x1600 +#define ISR_SMB 0x00000001 +#define ISR_TIMER 0x00000002 +/* + * Software manual interrupt, for debug. Set when SW_MAN_INT_EN is set + * in Table 51 Selene Master Control Register (Offset 0x1400). + */ +#define ISR_MANUAL 0x00000004 +#define ISR_HW_RXF_OV 0x00000008 /* RXF overflow interrupt */ +#define ISR_RFD0_UR 0x00000010 /* RFD0 under run */ +#define ISR_RFD1_UR 0x00000020 +#define ISR_RFD2_UR 0x00000040 +#define ISR_RFD3_UR 0x00000080 +#define ISR_TXF_UR 0x00000100 +#define ISR_DMAR_TO_RST 0x00000200 +#define ISR_DMAW_TO_RST 0x00000400 +#define ISR_TX_CREDIT 0x00000800 +#define ISR_GPHY 0x00001000 +/* GPHY low power state interrupt */ +#define ISR_GPHY_LPW 0x00002000 +#define ISR_TXQ_TO_RST 0x00004000 +#define ISR_TX_PKT 0x00008000 +#define ISR_RX_PKT_0 0x00010000 +#define ISR_RX_PKT_1 0x00020000 +#define ISR_RX_PKT_2 0x00040000 +#define ISR_RX_PKT_3 0x00080000 +#define ISR_MAC_RX 0x00100000 +#define ISR_MAC_TX 0x00200000 +#define ISR_UR_DETECTED 0x00400000 +#define ISR_FERR_DETECTED 0x00800000 +#define ISR_NFERR_DETECTED 0x01000000 +#define ISR_CERR_DETECTED 0x02000000 +#define ISR_PHY_LINKDOWN 0x04000000 +#define ISR_DIS_INT 0x80000000 + +/* Interrupt Mask Register */ +#define REG_IMR 0x1604 + +#define IMR_NORMAL_MASK (\ + ISR_MANUAL |\ + ISR_HW_RXF_OV |\ + ISR_RFD0_UR |\ + ISR_TXF_UR |\ + ISR_DMAR_TO_RST |\ + ISR_TXQ_TO_RST |\ + ISR_DMAW_TO_RST |\ + ISR_GPHY |\ + ISR_TX_PKT |\ + ISR_RX_PKT_0 |\ + ISR_GPHY_LPW |\ + ISR_PHY_LINKDOWN) + +#define ISR_RX_PKT (\ + ISR_RX_PKT_0 |\ + ISR_RX_PKT_1 |\ + ISR_RX_PKT_2 |\ + ISR_RX_PKT_3) + +#define ISR_OVER (\ + ISR_RFD0_UR |\ + ISR_RFD1_UR |\ + ISR_RFD2_UR |\ + ISR_RFD3_UR |\ + ISR_HW_RXF_OV |\ + ISR_TXF_UR) + +#define ISR_ERROR (\ + ISR_DMAR_TO_RST |\ + ISR_TXQ_TO_RST |\ + ISR_DMAW_TO_RST |\ + ISR_PHY_LINKDOWN) + +#define REG_INT_RETRIG_TIMER 0x1608 +#define INT_RETRIG_TIMER_MASK 0xFFFF + +#define REG_HDS_CTRL 0x160C +#define HDS_CTRL_EN 0x0001 +#define HDS_CTRL_BACKFILLSIZE_SHIFT 8 +#define HDS_CTRL_BACKFILLSIZE_MASK 0x0FFF +#define HDS_CTRL_MAX_HDRSIZE_SHIFT 20 +#define HDS_CTRL_MAC_HDRSIZE_MASK 0x0FFF + +#define REG_MAC_RX_STATUS_BIN 0x1700 +#define REG_MAC_RX_STATUS_END 0x175c +#define REG_MAC_TX_STATUS_BIN 0x1760 +#define REG_MAC_TX_STATUS_END 0x17c0 + +/* DEBUG ADDR */ +#define REG_DEBUG_DATA0 0x1900 +#define REG_DEBUG_DATA1 0x1904 + +/* PHY Control Register */ +#define MII_BMCR 0x00 +#define BMCR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define BMCR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ +#define BMCR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define BMCR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define BMCR_ISOLATE 0x0400 /* Isolate PHY from MII */ +#define BMCR_POWER_DOWN 0x0800 /* Power down */ +#define BMCR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define BMCR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define BMCR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define BMCR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ +#define BMCR_SPEED_MASK 0x2040 +#define BMCR_SPEED_1000 0x0040 +#define BMCR_SPEED_100 0x2000 +#define BMCR_SPEED_10 0x0000 + +/* PHY Status Register */ +#define MII_BMSR 0x01 +#define BMMSR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ +#define BMSR_JABBER_DETECT 0x0002 /* Jabber Detected */ +#define BMSR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define BMSR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ +#define BMSR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ +#define BMSR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define BMSR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ +#define BMSR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ +#define BMSR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ +#define BMSR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ +#define BMSR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ +#define BMSR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ +#define BMSR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ +#define BMMII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ +#define BMMII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ + +#define MII_PHYSID1 0x02 +#define MII_PHYSID2 0x03 + +/* Autoneg Advertisement Register */ +#define MII_ADVERTISE 0x04 +#define ADVERTISE_SPEED_MASK 0x01E0 +#define ADVERTISE_DEFAULT_CAP 0x0DE0 + +/* 1000BASE-T Control Register */ +#define MII_GIGA_CR 0x09 +#define GIGA_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port 0=DTE device */ + +#define GIGA_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master 0=Configure PHY as Slave */ +#define GIGA_CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value 0=Automatic Master/Slave config */ +#define GIGA_CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ +#define GIGA_CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ +#define GIGA_CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ +#define GIGA_CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ +#define GIGA_CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ +#define GIGA_CR_1000T_SPEED_MASK 0x0300 +#define GIGA_CR_1000T_DEFAULT_CAP 0x0300 + +/* PHY Specific Status Register */ +#define MII_GIGA_PSSR 0x11 +#define GIGA_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ +#define GIGA_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ +#define GIGA_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define GIGA_PSSR_10MBS 0x0000 /* 00=10Mbs */ +#define GIGA_PSSR_100MBS 0x4000 /* 01=100Mbs */ +#define GIGA_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +/* PHY Interrupt Enable Register */ +#define MII_IER 0x12 +#define IER_LINK_UP 0x0400 +#define IER_LINK_DOWN 0x0800 + +/* PHY Interrupt Status Register */ +#define MII_ISR 0x13 +#define ISR_LINK_UP 0x0400 +#define ISR_LINK_DOWN 0x0800 + +/* Cable-Detect-Test Control Register */ +#define MII_CDTC 0x16 +#define CDTC_EN_OFF 0 /* sc */ +#define CDTC_EN_BITS 1 +#define CDTC_PAIR_OFF 8 +#define CDTC_PAIR_BIT 2 + +/* Cable-Detect-Test Status Register */ +#define MII_CDTS 0x1C +#define CDTS_STATUS_OFF 8 +#define CDTS_STATUS_BITS 2 +#define CDTS_STATUS_NORMAL 0 +#define CDTS_STATUS_SHORT 1 +#define CDTS_STATUS_OPEN 2 +#define CDTS_STATUS_INVALID 3 + +#define MII_DBG_ADDR 0x1D +#define MII_DBG_DATA 0x1E + +#define MII_ANA_CTRL_0 0x0 +#define ANA_RESTART_CAL 0x0001 +#define ANA_MANUL_SWICH_ON_SHIFT 0x1 +#define ANA_MANUL_SWICH_ON_MASK 0xF +#define ANA_MAN_ENABLE 0x0020 +#define ANA_SEL_HSP 0x0040 +#define ANA_EN_HB 0x0080 +#define ANA_EN_HBIAS 0x0100 +#define ANA_OEN_125M 0x0200 +#define ANA_EN_LCKDT 0x0400 +#define ANA_LCKDT_PHY 0x0800 +#define ANA_AFE_MODE 0x1000 +#define ANA_VCO_SLOW 0x2000 +#define ANA_VCO_FAST 0x4000 +#define ANA_SEL_CLK125M_DSP 0x8000 + +#define MII_ANA_CTRL_4 0x4 +#define ANA_IECHO_ADJ_MASK 0xF +#define ANA_IECHO_ADJ_3_SHIFT 0 +#define ANA_IECHO_ADJ_2_SHIFT 4 +#define ANA_IECHO_ADJ_1_SHIFT 8 +#define ANA_IECHO_ADJ_0_SHIFT 12 + +#define MII_ANA_CTRL_5 0x5 +#define ANA_SERDES_CDR_BW_SHIFT 0 +#define ANA_SERDES_CDR_BW_MASK 0x3 +#define ANA_MS_PAD_DBG 0x0004 +#define ANA_SPEEDUP_DBG 0x0008 +#define ANA_SERDES_TH_LOS_SHIFT 4 +#define ANA_SERDES_TH_LOS_MASK 0x3 +#define ANA_SERDES_EN_DEEM 0x0040 +#define ANA_SERDES_TXELECIDLE 0x0080 +#define ANA_SERDES_BEACON 0x0100 +#define ANA_SERDES_HALFTXDR 0x0200 +#define ANA_SERDES_SEL_HSP 0x0400 +#define ANA_SERDES_EN_PLL 0x0800 +#define ANA_SERDES_EN 0x1000 +#define ANA_SERDES_EN_LCKDT 0x2000 + +#define MII_ANA_CTRL_11 0xB +#define ANA_PS_HIB_EN 0x8000 + +#define MII_ANA_CTRL_18 0x12 +#define ANA_TEST_MODE_10BT_01SHIFT 0 +#define ANA_TEST_MODE_10BT_01MASK 0x3 +#define ANA_LOOP_SEL_10BT 0x0004 +#define ANA_RGMII_MODE_SW 0x0008 +#define ANA_EN_LONGECABLE 0x0010 +#define ANA_TEST_MODE_10BT_2 0x0020 +#define ANA_EN_10BT_IDLE 0x0400 +#define ANA_EN_MASK_TB 0x0800 +#define ANA_TRIGGER_SEL_TIMER_SHIFT 12 +#define ANA_TRIGGER_SEL_TIMER_MASK 0x3 +#define ANA_INTERVAL_SEL_TIMER_SHIFT 14 +#define ANA_INTERVAL_SEL_TIMER_MASK 0x3 + +#define MII_ANA_CTRL_41 0x29 +#define ANA_TOP_PS_EN 0x8000 + +#define MII_ANA_CTRL_54 0x36 +#define ANA_LONG_CABLE_TH_100_SHIFT 0 +#define ANA_LONG_CABLE_TH_100_MASK 0x3F +#define ANA_DESERVED 0x0040 +#define ANA_EN_LIT_CH 0x0080 +#define ANA_SHORT_CABLE_TH_100_SHIFT 8 +#define ANA_SHORT_CABLE_TH_100_MASK 0x3F +#define ANA_BP_BAD_LINK_ACCUM 0x4000 +#define ANA_BP_SMALL_BW 0x8000 + +#endif /*_ATL1C_HW_H_*/ diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c new file mode 100644 index 0000000..ac89ed4 --- /dev/null +++ b/drivers/net/atl1c/atl1c_main.c @@ -0,0 +1,2891 @@ +/* + * Copyright(c) 2008 - 2009 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include "atl1c.h" + +#define ATL1C_DRV_VERSION "1.0.0.2-NAPI" +char atl1c_driver_name[] = "atl1c"; +char atl1c_driver_version[] = ATL1C_DRV_VERSION; +#define PCI_DEVICE_ID_ATTANSIC_L2C 0x1062 +#define PCI_DEVICE_ID_ATTANSIC_L1C 0x1063 +#define PCI_DEVICE_ID_ATHEROS_L2C_B 0x2060 /* AR8152 v1.1 Fast 10/100 */ +#define PCI_DEVICE_ID_ATHEROS_L2C_B2 0x2062 /* AR8152 v2.0 Fast 10/100 */ +#define PCI_DEVICE_ID_ATHEROS_L1D 0x1073 /* AR8151 v1.0 Gigabit 1000 */ + +#define L2CB_V10 0xc0 +#define L2CB_V11 0xc1 + +/* + * atl1c_pci_tbl - PCI Device ID Table + * + * Wildcard entries (PCI_ANY_ID) should come last + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static DEFINE_PCI_DEVICE_TABLE(atl1c_pci_tbl) = { + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1C)}, + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2C)}, + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B)}, + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B2)}, + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D)}, + /* required last entry */ + { 0 } +}; +MODULE_DEVICE_TABLE(pci, atl1c_pci_tbl); + +MODULE_AUTHOR("Jie Yang "); +MODULE_DESCRIPTION("Atheros 1000M Ethernet Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(ATL1C_DRV_VERSION); + +static int atl1c_stop_mac(struct atl1c_hw *hw); +static void atl1c_enable_rx_ctrl(struct atl1c_hw *hw); +static void atl1c_enable_tx_ctrl(struct atl1c_hw *hw); +static void atl1c_disable_l0s_l1(struct atl1c_hw *hw); +static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup); +static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter); +static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que, + int *work_done, int work_to_do); + +static const u16 atl1c_pay_load_size[] = { + 128, 256, 512, 1024, 2048, 4096, +}; + +static const u16 atl1c_rfd_prod_idx_regs[AT_MAX_RECEIVE_QUEUE] = +{ + REG_MB_RFD0_PROD_IDX, + REG_MB_RFD1_PROD_IDX, + REG_MB_RFD2_PROD_IDX, + REG_MB_RFD3_PROD_IDX +}; + +static const u16 atl1c_rfd_addr_lo_regs[AT_MAX_RECEIVE_QUEUE] = +{ + REG_RFD0_HEAD_ADDR_LO, + REG_RFD1_HEAD_ADDR_LO, + REG_RFD2_HEAD_ADDR_LO, + REG_RFD3_HEAD_ADDR_LO +}; + +static const u16 atl1c_rrd_addr_lo_regs[AT_MAX_RECEIVE_QUEUE] = +{ + REG_RRD0_HEAD_ADDR_LO, + REG_RRD1_HEAD_ADDR_LO, + REG_RRD2_HEAD_ADDR_LO, + REG_RRD3_HEAD_ADDR_LO +}; + +static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP; + +/* + * atl1c_init_pcie - init PCIE module + */ +static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag) +{ + u32 data; + u32 pci_cmd; + struct pci_dev *pdev = hw->adapter->pdev; + + AT_READ_REG(hw, PCI_COMMAND, &pci_cmd); + pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; + pci_cmd |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | + PCI_COMMAND_IO); + AT_WRITE_REG(hw, PCI_COMMAND, pci_cmd); + + /* + * Clear any PowerSaveing Settings + */ + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + /* + * Mask some pcie error bits + */ + AT_READ_REG(hw, REG_PCIE_UC_SEVERITY, &data); + data &= ~PCIE_UC_SERVRITY_DLP; + data &= ~PCIE_UC_SERVRITY_FCP; + AT_WRITE_REG(hw, REG_PCIE_UC_SEVERITY, data); + + if (flag & ATL1C_PCIE_L0S_L1_DISABLE) + atl1c_disable_l0s_l1(hw); + if (flag & ATL1C_PCIE_PHY_RESET) + AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT); + else + AT_WRITE_REG(hw, REG_GPHY_CTRL, + GPHY_CTRL_DEFAULT | GPHY_CTRL_EXT_RESET); + + msleep(1); +} + +/* + * atl1c_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + */ +static inline void atl1c_irq_enable(struct atl1c_adapter *adapter) +{ + if (likely(atomic_dec_and_test(&adapter->irq_sem))) { + AT_WRITE_REG(&adapter->hw, REG_ISR, 0x7FFFFFFF); + AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask); + AT_WRITE_FLUSH(&adapter->hw); + } +} + +/* + * atl1c_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + */ +static inline void atl1c_irq_disable(struct atl1c_adapter *adapter) +{ + atomic_inc(&adapter->irq_sem); + AT_WRITE_REG(&adapter->hw, REG_IMR, 0); + AT_WRITE_FLUSH(&adapter->hw); + synchronize_irq(adapter->pdev->irq); +} + +/* + * atl1c_irq_reset - reset interrupt confiure on the NIC + * @adapter: board private structure + */ +static inline void atl1c_irq_reset(struct atl1c_adapter *adapter) +{ + atomic_set(&adapter->irq_sem, 1); + atl1c_irq_enable(adapter); +} + +/* + * atl1c_wait_until_idle - wait up to AT_HW_MAX_IDLE_DELAY reads + * of the idle status register until the device is actually idle + */ +static u32 atl1c_wait_until_idle(struct atl1c_hw *hw) +{ + int timeout; + u32 data; + + for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) { + AT_READ_REG(hw, REG_IDLE_STATUS, &data); + if ((data & IDLE_STATUS_MASK) == 0) + return 0; + msleep(1); + } + return data; +} + +/* + * atl1c_phy_config - Timer Call-back + * @data: pointer to netdev cast into an unsigned long + */ +static void atl1c_phy_config(unsigned long data) +{ + struct atl1c_adapter *adapter = (struct atl1c_adapter *) data; + struct atl1c_hw *hw = &adapter->hw; + unsigned long flags; + + spin_lock_irqsave(&adapter->mdio_lock, flags); + atl1c_restart_autoneg(hw); + spin_unlock_irqrestore(&adapter->mdio_lock, flags); +} + +void atl1c_reinit_locked(struct atl1c_adapter *adapter) +{ + WARN_ON(in_interrupt()); + atl1c_down(adapter); + atl1c_up(adapter); + clear_bit(__AT_RESETTING, &adapter->flags); +} + +static void atl1c_check_link_status(struct atl1c_adapter *adapter) +{ + struct atl1c_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err; + unsigned long flags; + u16 speed, duplex, phy_data; + + spin_lock_irqsave(&adapter->mdio_lock, flags); + /* MII_BMSR must read twise */ + atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); + atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); + spin_unlock_irqrestore(&adapter->mdio_lock, flags); + + if ((phy_data & BMSR_LSTATUS) == 0) { + /* link down */ + if (netif_carrier_ok(netdev)) { + hw->hibernate = true; + if (atl1c_stop_mac(hw) != 0) + if (netif_msg_hw(adapter)) + dev_warn(&pdev->dev, + "stop mac failed\n"); + atl1c_set_aspm(hw, false); + } + netif_carrier_off(netdev); + } else { + /* Link Up */ + hw->hibernate = false; + spin_lock_irqsave(&adapter->mdio_lock, flags); + err = atl1c_get_speed_and_duplex(hw, &speed, &duplex); + spin_unlock_irqrestore(&adapter->mdio_lock, flags); + if (unlikely(err)) + return; + /* link result is our setting */ + if (adapter->link_speed != speed || + adapter->link_duplex != duplex) { + adapter->link_speed = speed; + adapter->link_duplex = duplex; + atl1c_set_aspm(hw, true); + atl1c_enable_tx_ctrl(hw); + atl1c_enable_rx_ctrl(hw); + atl1c_setup_mac_ctrl(adapter); + if (netif_msg_link(adapter)) + dev_info(&pdev->dev, + "%s: %s NIC Link is Up<%d Mbps %s>\n", + atl1c_driver_name, netdev->name, + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full Duplex" : "Half Duplex"); + } + if (!netif_carrier_ok(netdev)) + netif_carrier_on(netdev); + } +} + +static void atl1c_link_chg_event(struct atl1c_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + u16 phy_data; + u16 link_up; + + spin_lock(&adapter->mdio_lock); + atl1c_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); + atl1c_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); + spin_unlock(&adapter->mdio_lock); + link_up = phy_data & BMSR_LSTATUS; + /* notify upper layer link down ASAP */ + if (!link_up) { + if (netif_carrier_ok(netdev)) { + /* old link state: Up */ + netif_carrier_off(netdev); + if (netif_msg_link(adapter)) + dev_info(&pdev->dev, + "%s: %s NIC Link is Down\n", + atl1c_driver_name, netdev->name); + adapter->link_speed = SPEED_0; + } + } + + adapter->work_event |= ATL1C_WORK_EVENT_LINK_CHANGE; + schedule_work(&adapter->common_task); +} + +static void atl1c_common_task(struct work_struct *work) +{ + struct atl1c_adapter *adapter; + struct net_device *netdev; + + adapter = container_of(work, struct atl1c_adapter, common_task); + netdev = adapter->netdev; + + if (adapter->work_event & ATL1C_WORK_EVENT_RESET) { + netif_device_detach(netdev); + atl1c_down(adapter); + atl1c_up(adapter); + netif_device_attach(netdev); + return; + } + + if (adapter->work_event & ATL1C_WORK_EVENT_LINK_CHANGE) + atl1c_check_link_status(adapter); + + return; +} + + +static void atl1c_del_timer(struct atl1c_adapter *adapter) +{ + del_timer_sync(&adapter->phy_config_timer); +} + + +/* + * atl1c_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + */ +static void atl1c_tx_timeout(struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + adapter->work_event |= ATL1C_WORK_EVENT_RESET; + schedule_work(&adapter->common_task); +} + +/* + * atl1c_set_multi - Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_multi entry point is called whenever the multicast address + * list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper multicast, + * promiscuous mode, and all-multi behavior. + */ +static void atl1c_set_multi(struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct atl1c_hw *hw = &adapter->hw; + struct dev_mc_list *mc_ptr; + u32 mac_ctrl_data; + u32 hash_value; + + /* Check for Promiscuous and All Multicast modes */ + AT_READ_REG(hw, REG_MAC_CTRL, &mac_ctrl_data); + + if (netdev->flags & IFF_PROMISC) { + mac_ctrl_data |= MAC_CTRL_PROMIS_EN; + } else if (netdev->flags & IFF_ALLMULTI) { + mac_ctrl_data |= MAC_CTRL_MC_ALL_EN; + mac_ctrl_data &= ~MAC_CTRL_PROMIS_EN; + } else { + mac_ctrl_data &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN); + } + + AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data); + + /* clear the old settings from the multicast hash table */ + AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0); + AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); + + /* comoute mc addresses' hash value ,and put it into hash table */ + netdev_for_each_mc_addr(mc_ptr, netdev) { + hash_value = atl1c_hash_mc_addr(hw, mc_ptr->dmi_addr); + atl1c_hash_set(hw, hash_value); + } +} + +static void atl1c_vlan_rx_register(struct net_device *netdev, + struct vlan_group *grp) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + u32 mac_ctrl_data = 0; + + if (netif_msg_pktdata(adapter)) + dev_dbg(&pdev->dev, "atl1c_vlan_rx_register\n"); + + atl1c_irq_disable(adapter); + + adapter->vlgrp = grp; + AT_READ_REG(&adapter->hw, REG_MAC_CTRL, &mac_ctrl_data); + + if (grp) { + /* enable VLAN tag insert/strip */ + mac_ctrl_data |= MAC_CTRL_RMV_VLAN; + } else { + /* disable VLAN tag insert/strip */ + mac_ctrl_data &= ~MAC_CTRL_RMV_VLAN; + } + + AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data); + atl1c_irq_enable(adapter); +} + +static void atl1c_restore_vlan(struct atl1c_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + + if (netif_msg_pktdata(adapter)) + dev_dbg(&pdev->dev, "atl1c_restore_vlan !"); + atl1c_vlan_rx_register(adapter->netdev, adapter->vlgrp); +} +/* + * atl1c_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + */ +static int atl1c_set_mac_addr(struct net_device *netdev, void *p) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (netif_running(netdev)) + return -EBUSY; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); + + atl1c_hw_set_mac_addr(&adapter->hw); + + return 0; +} + +static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter, + struct net_device *dev) +{ + int mtu = dev->mtu; + + adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ? + roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE; +} +/* + * atl1c_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + */ +static int atl1c_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + int old_mtu = netdev->mtu; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + + if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || + (max_frame > MAX_JUMBO_FRAME_SIZE)) { + if (netif_msg_link(adapter)) + dev_warn(&adapter->pdev->dev, "invalid MTU setting\n"); + return -EINVAL; + } + /* set MTU */ + if (old_mtu != new_mtu && netif_running(netdev)) { + while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) + msleep(1); + netdev->mtu = new_mtu; + adapter->hw.max_frame_size = new_mtu; + atl1c_set_rxbufsize(adapter, netdev); + atl1c_down(adapter); + atl1c_up(adapter); + clear_bit(__AT_RESETTING, &adapter->flags); + if (adapter->hw.ctrl_flags & ATL1C_FPGA_VERSION) { + u32 phy_data; + + AT_READ_REG(&adapter->hw, 0x1414, &phy_data); + phy_data |= 0x10000000; + AT_WRITE_REG(&adapter->hw, 0x1414, phy_data); + } + + } + return 0; +} + +/* + * caller should hold mdio_lock + */ +static int atl1c_mdio_read(struct net_device *netdev, int phy_id, int reg_num) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + u16 result; + + atl1c_read_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, &result); + return result; +} + +static void atl1c_mdio_write(struct net_device *netdev, int phy_id, + int reg_num, int val) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + + atl1c_write_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, val); +} + +/* + * atl1c_mii_ioctl - + * @netdev: + * @ifreq: + * @cmd: + */ +static int atl1c_mii_ioctl(struct net_device *netdev, + struct ifreq *ifr, int cmd) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + struct mii_ioctl_data *data = if_mii(ifr); + unsigned long flags; + int retval = 0; + + if (!netif_running(netdev)) + return -EINVAL; + + spin_lock_irqsave(&adapter->mdio_lock, flags); + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = 0; + break; + + case SIOCGMIIREG: + if (atl1c_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, + &data->val_out)) { + retval = -EIO; + goto out; + } + break; + + case SIOCSMIIREG: + if (data->reg_num & ~(0x1F)) { + retval = -EFAULT; + goto out; + } + + dev_dbg(&pdev->dev, " write %x %x", + data->reg_num, data->val_in); + if (atl1c_write_phy_reg(&adapter->hw, + data->reg_num, data->val_in)) { + retval = -EIO; + goto out; + } + break; + + default: + retval = -EOPNOTSUPP; + break; + } +out: + spin_unlock_irqrestore(&adapter->mdio_lock, flags); + return retval; +} + +/* + * atl1c_ioctl - + * @netdev: + * @ifreq: + * @cmd: + */ +static int atl1c_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return atl1c_mii_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + +/* + * atl1c_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + */ +static int __devinit atl1c_alloc_queues(struct atl1c_adapter *adapter) +{ + return 0; +} + +static void atl1c_set_mac_type(struct atl1c_hw *hw) +{ + switch (hw->device_id) { + case PCI_DEVICE_ID_ATTANSIC_L2C: + hw->nic_type = athr_l2c; + break; + case PCI_DEVICE_ID_ATTANSIC_L1C: + hw->nic_type = athr_l1c; + break; + case PCI_DEVICE_ID_ATHEROS_L2C_B: + hw->nic_type = athr_l2c_b; + break; + case PCI_DEVICE_ID_ATHEROS_L2C_B2: + hw->nic_type = athr_l2c_b2; + break; + case PCI_DEVICE_ID_ATHEROS_L1D: + hw->nic_type = athr_l1d; + break; + default: + break; + } +} + +static int atl1c_setup_mac_funcs(struct atl1c_hw *hw) +{ + u32 phy_status_data; + u32 link_ctrl_data; + + atl1c_set_mac_type(hw); + AT_READ_REG(hw, REG_PHY_STATUS, &phy_status_data); + AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data); + + hw->ctrl_flags = ATL1C_INTR_CLEAR_ON_READ | + ATL1C_INTR_MODRT_ENABLE | + ATL1C_RX_IPV6_CHKSUM | + ATL1C_TXQ_MODE_ENHANCE; + if (link_ctrl_data & LINK_CTRL_L0S_EN) + hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT; + if (link_ctrl_data & LINK_CTRL_L1_EN) + hw->ctrl_flags |= ATL1C_ASPM_L1_SUPPORT; + if (link_ctrl_data & LINK_CTRL_EXT_SYNC) + hw->ctrl_flags |= ATL1C_LINK_EXT_SYNC; + + if (hw->nic_type == athr_l1c || + hw->nic_type == athr_l1d) { + hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON; + hw->link_cap_flags |= ATL1C_LINK_CAP_1000M; + } + return 0; +} +/* + * atl1c_sw_init - Initialize general software structures (struct atl1c_adapter) + * @adapter: board private structure to initialize + * + * atl1c_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + */ +static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter) +{ + struct atl1c_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + + adapter->wol = 0; + adapter->link_speed = SPEED_0; + adapter->link_duplex = FULL_DUPLEX; + adapter->num_rx_queues = AT_DEF_RECEIVE_QUEUE; + adapter->tpd_ring[0].count = 1024; + adapter->rfd_ring[0].count = 512; + + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_id = pdev->subsystem_device; + + /* before link up, we assume hibernate is true */ + hw->hibernate = true; + hw->media_type = MEDIA_TYPE_AUTO_SENSOR; + if (atl1c_setup_mac_funcs(hw) != 0) { + dev_err(&pdev->dev, "set mac function pointers failed\n"); + return -1; + } + hw->intr_mask = IMR_NORMAL_MASK; + hw->phy_configured = false; + hw->preamble_len = 7; + hw->max_frame_size = adapter->netdev->mtu; + if (adapter->num_rx_queues < 2) { + hw->rss_type = atl1c_rss_disable; + hw->rss_mode = atl1c_rss_mode_disable; + } else { + hw->rss_type = atl1c_rss_ipv4; + hw->rss_mode = atl1c_rss_mul_que_mul_int; + hw->rss_hash_bits = 16; + } + hw->autoneg_advertised = ADVERTISED_Autoneg; + hw->indirect_tab = 0xE4E4E4E4; + hw->base_cpu = 0; + + hw->ict = 50000; /* 100ms */ + hw->smb_timer = 200000; /* 400ms */ + hw->cmb_tpd = 4; + hw->cmb_tx_timer = 1; /* 2 us */ + hw->rx_imt = 200; + hw->tx_imt = 1000; + + hw->tpd_burst = 5; + hw->rfd_burst = 8; + hw->dma_order = atl1c_dma_ord_out; + hw->dmar_block = atl1c_dma_req_1024; + hw->dmaw_block = atl1c_dma_req_1024; + hw->dmar_dly_cnt = 15; + hw->dmaw_dly_cnt = 4; + + if (atl1c_alloc_queues(adapter)) { + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + /* TODO */ + atl1c_set_rxbufsize(adapter, adapter->netdev); + atomic_set(&adapter->irq_sem, 1); + spin_lock_init(&adapter->mdio_lock); + spin_lock_init(&adapter->tx_lock); + set_bit(__AT_DOWN, &adapter->flags); + + return 0; +} + +static inline void atl1c_clean_buffer(struct pci_dev *pdev, + struct atl1c_buffer *buffer_info, int in_irq) +{ + u16 pci_driection; + if (buffer_info->flags & ATL1C_BUFFER_FREE) + return; + if (buffer_info->dma) { + if (buffer_info->flags & ATL1C_PCIMAP_FROMDEVICE) + pci_driection = PCI_DMA_FROMDEVICE; + else + pci_driection = PCI_DMA_TODEVICE; + + if (buffer_info->flags & ATL1C_PCIMAP_SINGLE) + pci_unmap_single(pdev, buffer_info->dma, + buffer_info->length, pci_driection); + else if (buffer_info->flags & ATL1C_PCIMAP_PAGE) + pci_unmap_page(pdev, buffer_info->dma, + buffer_info->length, pci_driection); + } + if (buffer_info->skb) { + if (in_irq) + dev_kfree_skb_irq(buffer_info->skb); + else + dev_kfree_skb(buffer_info->skb); + } + buffer_info->dma = 0; + buffer_info->skb = NULL; + ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE); +} +/* + * atl1c_clean_tx_ring - Free Tx-skb + * @adapter: board private structure + */ +static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter, + enum atl1c_trans_queue type) +{ + struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type]; + struct atl1c_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + u16 index, ring_count; + + ring_count = tpd_ring->count; + for (index = 0; index < ring_count; index++) { + buffer_info = &tpd_ring->buffer_info[index]; + atl1c_clean_buffer(pdev, buffer_info, 0); + } + + /* Zero out Tx-buffers */ + memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) * + ring_count); + atomic_set(&tpd_ring->next_to_clean, 0); + tpd_ring->next_to_use = 0; +} + +/* + * atl1c_clean_rx_ring - Free rx-reservation skbs + * @adapter: board private structure + */ +static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter) +{ + struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring; + struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring; + struct atl1c_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + int i, j; + + for (i = 0; i < adapter->num_rx_queues; i++) { + for (j = 0; j < rfd_ring[i].count; j++) { + buffer_info = &rfd_ring[i].buffer_info[j]; + atl1c_clean_buffer(pdev, buffer_info, 0); + } + /* zero out the descriptor ring */ + memset(rfd_ring[i].desc, 0, rfd_ring[i].size); + rfd_ring[i].next_to_clean = 0; + rfd_ring[i].next_to_use = 0; + rrd_ring[i].next_to_use = 0; + rrd_ring[i].next_to_clean = 0; + } +} + +/* + * Read / Write Ptr Initialize: + */ +static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter) +{ + struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring; + struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring; + struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring; + struct atl1c_buffer *buffer_info; + int i, j; + + for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) { + tpd_ring[i].next_to_use = 0; + atomic_set(&tpd_ring[i].next_to_clean, 0); + buffer_info = tpd_ring[i].buffer_info; + for (j = 0; j < tpd_ring->count; j++) + ATL1C_SET_BUFFER_STATE(&buffer_info[i], + ATL1C_BUFFER_FREE); + } + for (i = 0; i < adapter->num_rx_queues; i++) { + rfd_ring[i].next_to_use = 0; + rfd_ring[i].next_to_clean = 0; + rrd_ring[i].next_to_use = 0; + rrd_ring[i].next_to_clean = 0; + for (j = 0; j < rfd_ring[i].count; j++) { + buffer_info = &rfd_ring[i].buffer_info[j]; + ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE); + } + } +} + +/* + * atl1c_free_ring_resources - Free Tx / RX descriptor Resources + * @adapter: board private structure + * + * Free all transmit software resources + */ +static void atl1c_free_ring_resources(struct atl1c_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + + pci_free_consistent(pdev, adapter->ring_header.size, + adapter->ring_header.desc, + adapter->ring_header.dma); + adapter->ring_header.desc = NULL; + + /* Note: just free tdp_ring.buffer_info, + * it contain rfd_ring.buffer_info, do not double free */ + if (adapter->tpd_ring[0].buffer_info) { + kfree(adapter->tpd_ring[0].buffer_info); + adapter->tpd_ring[0].buffer_info = NULL; + } +} + +/* + * atl1c_setup_mem_resources - allocate Tx / RX descriptor resources + * @adapter: board private structure + * + * Return 0 on success, negative on failure + */ +static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring; + struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring; + struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring; + struct atl1c_ring_header *ring_header = &adapter->ring_header; + int num_rx_queues = adapter->num_rx_queues; + int size; + int i; + int count = 0; + int rx_desc_count = 0; + u32 offset = 0; + + rrd_ring[0].count = rfd_ring[0].count; + for (i = 1; i < AT_MAX_TRANSMIT_QUEUE; i++) + tpd_ring[i].count = tpd_ring[0].count; + + for (i = 1; i < adapter->num_rx_queues; i++) + rfd_ring[i].count = rrd_ring[i].count = rfd_ring[0].count; + + /* 2 tpd queue, one high priority queue, + * another normal priority queue */ + size = sizeof(struct atl1c_buffer) * (tpd_ring->count * 2 + + rfd_ring->count * num_rx_queues); + tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL); + if (unlikely(!tpd_ring->buffer_info)) { + dev_err(&pdev->dev, "kzalloc failed, size = %d\n", + size); + goto err_nomem; + } + for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) { + tpd_ring[i].buffer_info = + (struct atl1c_buffer *) (tpd_ring->buffer_info + count); + count += tpd_ring[i].count; + } + + for (i = 0; i < num_rx_queues; i++) { + rfd_ring[i].buffer_info = + (struct atl1c_buffer *) (tpd_ring->buffer_info + count); + count += rfd_ring[i].count; + rx_desc_count += rfd_ring[i].count; + } + /* + * real ring DMA buffer + * each ring/block may need up to 8 bytes for alignment, hence the + * additional bytes tacked onto the end. + */ + ring_header->size = size = + sizeof(struct atl1c_tpd_desc) * tpd_ring->count * 2 + + sizeof(struct atl1c_rx_free_desc) * rx_desc_count + + sizeof(struct atl1c_recv_ret_status) * rx_desc_count + + sizeof(struct atl1c_hw_stats) + + 8 * 4 + 8 * 2 * num_rx_queues; + + ring_header->desc = pci_alloc_consistent(pdev, ring_header->size, + &ring_header->dma); + if (unlikely(!ring_header->desc)) { + dev_err(&pdev->dev, "pci_alloc_consistend failed\n"); + goto err_nomem; + } + memset(ring_header->desc, 0, ring_header->size); + /* init TPD ring */ + + tpd_ring[0].dma = roundup(ring_header->dma, 8); + offset = tpd_ring[0].dma - ring_header->dma; + for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) { + tpd_ring[i].dma = ring_header->dma + offset; + tpd_ring[i].desc = (u8 *) ring_header->desc + offset; + tpd_ring[i].size = + sizeof(struct atl1c_tpd_desc) * tpd_ring[i].count; + offset += roundup(tpd_ring[i].size, 8); + } + /* init RFD ring */ + for (i = 0; i < num_rx_queues; i++) { + rfd_ring[i].dma = ring_header->dma + offset; + rfd_ring[i].desc = (u8 *) ring_header->desc + offset; + rfd_ring[i].size = sizeof(struct atl1c_rx_free_desc) * + rfd_ring[i].count; + offset += roundup(rfd_ring[i].size, 8); + } + + /* init RRD ring */ + for (i = 0; i < num_rx_queues; i++) { + rrd_ring[i].dma = ring_header->dma + offset; + rrd_ring[i].desc = (u8 *) ring_header->desc + offset; + rrd_ring[i].size = sizeof(struct atl1c_recv_ret_status) * + rrd_ring[i].count; + offset += roundup(rrd_ring[i].size, 8); + } + + adapter->smb.dma = ring_header->dma + offset; + adapter->smb.smb = (u8 *)ring_header->desc + offset; + return 0; + +err_nomem: + kfree(tpd_ring->buffer_info); + return -ENOMEM; +} + +static void atl1c_configure_des_ring(struct atl1c_adapter *adapter) +{ + struct atl1c_hw *hw = &adapter->hw; + struct atl1c_rfd_ring *rfd_ring = (struct atl1c_rfd_ring *) + adapter->rfd_ring; + struct atl1c_rrd_ring *rrd_ring = (struct atl1c_rrd_ring *) + adapter->rrd_ring; + struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *) + adapter->tpd_ring; + struct atl1c_cmb *cmb = (struct atl1c_cmb *) &adapter->cmb; + struct atl1c_smb *smb = (struct atl1c_smb *) &adapter->smb; + int i; + + /* TPD */ + AT_WRITE_REG(hw, REG_TX_BASE_ADDR_HI, + (u32)((tpd_ring[atl1c_trans_normal].dma & + AT_DMA_HI_ADDR_MASK) >> 32)); + /* just enable normal priority TX queue */ + AT_WRITE_REG(hw, REG_NTPD_HEAD_ADDR_LO, + (u32)(tpd_ring[atl1c_trans_normal].dma & + AT_DMA_LO_ADDR_MASK)); + AT_WRITE_REG(hw, REG_HTPD_HEAD_ADDR_LO, + (u32)(tpd_ring[atl1c_trans_high].dma & + AT_DMA_LO_ADDR_MASK)); + AT_WRITE_REG(hw, REG_TPD_RING_SIZE, + (u32)(tpd_ring[0].count & TPD_RING_SIZE_MASK)); + + + /* RFD */ + AT_WRITE_REG(hw, REG_RX_BASE_ADDR_HI, + (u32)((rfd_ring[0].dma & AT_DMA_HI_ADDR_MASK) >> 32)); + for (i = 0; i < adapter->num_rx_queues; i++) + AT_WRITE_REG(hw, atl1c_rfd_addr_lo_regs[i], + (u32)(rfd_ring[i].dma & AT_DMA_LO_ADDR_MASK)); + + AT_WRITE_REG(hw, REG_RFD_RING_SIZE, + rfd_ring[0].count & RFD_RING_SIZE_MASK); + AT_WRITE_REG(hw, REG_RX_BUF_SIZE, + adapter->rx_buffer_len & RX_BUF_SIZE_MASK); + + /* RRD */ + for (i = 0; i < adapter->num_rx_queues; i++) + AT_WRITE_REG(hw, atl1c_rrd_addr_lo_regs[i], + (u32)(rrd_ring[i].dma & AT_DMA_LO_ADDR_MASK)); + AT_WRITE_REG(hw, REG_RRD_RING_SIZE, + (rrd_ring[0].count & RRD_RING_SIZE_MASK)); + + /* CMB */ + AT_WRITE_REG(hw, REG_CMB_BASE_ADDR_LO, cmb->dma & AT_DMA_LO_ADDR_MASK); + + /* SMB */ + AT_WRITE_REG(hw, REG_SMB_BASE_ADDR_HI, + (u32)((smb->dma & AT_DMA_HI_ADDR_MASK) >> 32)); + AT_WRITE_REG(hw, REG_SMB_BASE_ADDR_LO, + (u32)(smb->dma & AT_DMA_LO_ADDR_MASK)); + /* Load all of base address above */ + AT_WRITE_REG(hw, REG_LOAD_PTR, 1); +} + +static void atl1c_configure_tx(struct atl1c_adapter *adapter) +{ + struct atl1c_hw *hw = &adapter->hw; + u32 dev_ctrl_data; + u32 max_pay_load; + u16 tx_offload_thresh; + u32 txq_ctrl_data; + u32 extra_size = 0; /* Jumbo frame threshold in QWORD unit */ + + extra_size = ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; + tx_offload_thresh = MAX_TX_OFFLOAD_THRESH; + AT_WRITE_REG(hw, REG_TX_TSO_OFFLOAD_THRESH, + (tx_offload_thresh >> 3) & TX_TSO_OFFLOAD_THRESH_MASK); + AT_READ_REG(hw, REG_DEVICE_CTRL, &dev_ctrl_data); + max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT) & + DEVICE_CTRL_MAX_PAYLOAD_MASK; + hw->dmaw_block = min(max_pay_load, hw->dmaw_block); + max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT) & + DEVICE_CTRL_MAX_RREQ_SZ_MASK; + hw->dmar_block = min(max_pay_load, hw->dmar_block); + + txq_ctrl_data = (hw->tpd_burst & TXQ_NUM_TPD_BURST_MASK) << + TXQ_NUM_TPD_BURST_SHIFT; + if (hw->ctrl_flags & ATL1C_TXQ_MODE_ENHANCE) + txq_ctrl_data |= TXQ_CTRL_ENH_MODE; + txq_ctrl_data |= (atl1c_pay_load_size[hw->dmar_block] & + TXQ_TXF_BURST_NUM_MASK) << TXQ_TXF_BURST_NUM_SHIFT; + + AT_WRITE_REG(hw, REG_TXQ_CTRL, txq_ctrl_data); +} + +static void atl1c_configure_rx(struct atl1c_adapter *adapter) +{ + struct atl1c_hw *hw = &adapter->hw; + u32 rxq_ctrl_data; + + rxq_ctrl_data = (hw->rfd_burst & RXQ_RFD_BURST_NUM_MASK) << + RXQ_RFD_BURST_NUM_SHIFT; + + if (hw->ctrl_flags & ATL1C_RX_IPV6_CHKSUM) + rxq_ctrl_data |= IPV6_CHKSUM_CTRL_EN; + if (hw->rss_type == atl1c_rss_ipv4) + rxq_ctrl_data |= RSS_HASH_IPV4; + if (hw->rss_type == atl1c_rss_ipv4_tcp) + rxq_ctrl_data |= RSS_HASH_IPV4_TCP; + if (hw->rss_type == atl1c_rss_ipv6) + rxq_ctrl_data |= RSS_HASH_IPV6; + if (hw->rss_type == atl1c_rss_ipv6_tcp) + rxq_ctrl_data |= RSS_HASH_IPV6_TCP; + if (hw->rss_type != atl1c_rss_disable) + rxq_ctrl_data |= RRS_HASH_CTRL_EN; + + rxq_ctrl_data |= (hw->rss_mode & RSS_MODE_MASK) << + RSS_MODE_SHIFT; + rxq_ctrl_data |= (hw->rss_hash_bits & RSS_HASH_BITS_MASK) << + RSS_HASH_BITS_SHIFT; + if (hw->ctrl_flags & ATL1C_ASPM_CTRL_MON) + rxq_ctrl_data |= (ASPM_THRUPUT_LIMIT_100M & + ASPM_THRUPUT_LIMIT_MASK) << ASPM_THRUPUT_LIMIT_SHIFT; + + AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data); +} + +static void atl1c_configure_rss(struct atl1c_adapter *adapter) +{ + struct atl1c_hw *hw = &adapter->hw; + + AT_WRITE_REG(hw, REG_IDT_TABLE, hw->indirect_tab); + AT_WRITE_REG(hw, REG_BASE_CPU_NUMBER, hw->base_cpu); +} + +static void atl1c_configure_dma(struct atl1c_adapter *adapter) +{ + struct atl1c_hw *hw = &adapter->hw; + u32 dma_ctrl_data; + + dma_ctrl_data = DMA_CTRL_DMAR_REQ_PRI; + if (hw->ctrl_flags & ATL1C_CMB_ENABLE) + dma_ctrl_data |= DMA_CTRL_CMB_EN; + if (hw->ctrl_flags & ATL1C_SMB_ENABLE) + dma_ctrl_data |= DMA_CTRL_SMB_EN; + else + dma_ctrl_data |= MAC_CTRL_SMB_DIS; + + switch (hw->dma_order) { + case atl1c_dma_ord_in: + dma_ctrl_data |= DMA_CTRL_DMAR_IN_ORDER; + break; + case atl1c_dma_ord_enh: + dma_ctrl_data |= DMA_CTRL_DMAR_ENH_ORDER; + break; + case atl1c_dma_ord_out: + dma_ctrl_data |= DMA_CTRL_DMAR_OUT_ORDER; + break; + default: + break; + } + + dma_ctrl_data |= (((u32)hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) + << DMA_CTRL_DMAR_BURST_LEN_SHIFT; + dma_ctrl_data |= (((u32)hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK) + << DMA_CTRL_DMAW_BURST_LEN_SHIFT; + dma_ctrl_data |= (((u32)hw->dmar_dly_cnt) & DMA_CTRL_DMAR_DLY_CNT_MASK) + << DMA_CTRL_DMAR_DLY_CNT_SHIFT; + dma_ctrl_data |= (((u32)hw->dmaw_dly_cnt) & DMA_CTRL_DMAW_DLY_CNT_MASK) + << DMA_CTRL_DMAW_DLY_CNT_SHIFT; + + AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data); +} + +/* + * Stop the mac, transmit and receive units + * hw - Struct containing variables accessed by shared code + * return : 0 or idle status (if error) + */ +static int atl1c_stop_mac(struct atl1c_hw *hw) +{ + u32 data; + + AT_READ_REG(hw, REG_RXQ_CTRL, &data); + data &= ~(RXQ1_CTRL_EN | RXQ2_CTRL_EN | + RXQ3_CTRL_EN | RXQ_CTRL_EN); + AT_WRITE_REG(hw, REG_RXQ_CTRL, data); + + AT_READ_REG(hw, REG_TXQ_CTRL, &data); + data &= ~TXQ_CTRL_EN; + AT_WRITE_REG(hw, REG_TWSI_CTRL, data); + + atl1c_wait_until_idle(hw); + + AT_READ_REG(hw, REG_MAC_CTRL, &data); + data &= ~(MAC_CTRL_TX_EN | MAC_CTRL_RX_EN); + AT_WRITE_REG(hw, REG_MAC_CTRL, data); + + return (int)atl1c_wait_until_idle(hw); +} + +static void atl1c_enable_rx_ctrl(struct atl1c_hw *hw) +{ + u32 data; + + AT_READ_REG(hw, REG_RXQ_CTRL, &data); + switch (hw->adapter->num_rx_queues) { + case 4: + data |= (RXQ3_CTRL_EN | RXQ2_CTRL_EN | RXQ1_CTRL_EN); + break; + case 3: + data |= (RXQ2_CTRL_EN | RXQ1_CTRL_EN); + break; + case 2: + data |= RXQ1_CTRL_EN; + break; + default: + break; + } + data |= RXQ_CTRL_EN; + AT_WRITE_REG(hw, REG_RXQ_CTRL, data); +} + +static void atl1c_enable_tx_ctrl(struct atl1c_hw *hw) +{ + u32 data; + + AT_READ_REG(hw, REG_TXQ_CTRL, &data); + data |= TXQ_CTRL_EN; + AT_WRITE_REG(hw, REG_TXQ_CTRL, data); +} + +/* + * Reset the transmit and receive units; mask and clear all interrupts. + * hw - Struct containing variables accessed by shared code + * return : 0 or idle status (if error) + */ +static int atl1c_reset_mac(struct atl1c_hw *hw) +{ + struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter; + struct pci_dev *pdev = adapter->pdev; + int ret; + + AT_WRITE_REG(hw, REG_IMR, 0); + AT_WRITE_REG(hw, REG_ISR, ISR_DIS_INT); + + ret = atl1c_stop_mac(hw); + if (ret) + return ret; + /* + * Issue Soft Reset to the MAC. This will reset the chip's + * transmit, receive, DMA. It will not effect + * the current PCI configuration. The global reset bit is self- + * clearing, and should clear within a microsecond. + */ + AT_WRITE_REGW(hw, REG_MASTER_CTRL, MASTER_CTRL_SOFT_RST); + AT_WRITE_FLUSH(hw); + msleep(10); + /* Wait at least 10ms for All module to be Idle */ + + if (atl1c_wait_until_idle(hw)) { + dev_err(&pdev->dev, + "MAC state machine can't be idle since" + " disabled for 10ms second\n"); + return -1; + } + return 0; +} + +static void atl1c_disable_l0s_l1(struct atl1c_hw *hw) +{ + u32 pm_ctrl_data; + + AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data); + pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK << + PM_CTRL_L1_ENTRY_TIMER_SHIFT); + pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1; + pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN; + pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN; + pm_ctrl_data &= ~PM_CTRL_MAC_ASPM_CHK; + pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1; + + pm_ctrl_data |= PM_CTRL_SERDES_BUDS_RX_L1_EN; + pm_ctrl_data |= PM_CTRL_SERDES_PLL_L1_EN; + pm_ctrl_data |= PM_CTRL_SERDES_L1_EN; + AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data); +} + +/* + * Set ASPM state. + * Enable/disable L0s/L1 depend on link state. + */ +static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup) +{ + u32 pm_ctrl_data; + u32 link_ctrl_data; + + AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data); + AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data); + pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1; + + pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK << + PM_CTRL_L1_ENTRY_TIMER_SHIFT); + pm_ctrl_data &= ~(PM_CTRL_LCKDET_TIMER_MASK << + PM_CTRL_LCKDET_TIMER_SHIFT); + + pm_ctrl_data |= PM_CTRL_MAC_ASPM_CHK; + pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN; + pm_ctrl_data |= PM_CTRL_RBER_EN; + pm_ctrl_data |= PM_CTRL_SDES_EN; + + if (hw->nic_type == athr_l2c_b || + hw->nic_type == athr_l1d || + hw->nic_type == athr_l2c_b2) { + link_ctrl_data &= ~LINK_CTRL_EXT_SYNC; + if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE)) { + if (hw->nic_type == athr_l2c_b && + hw->revision_id == L2CB_V10) + link_ctrl_data |= LINK_CTRL_EXT_SYNC; + } + + AT_WRITE_REG(hw, REG_LINK_CTRL, link_ctrl_data); + + pm_ctrl_data |= PM_CTRL_PCIE_RECV; + pm_ctrl_data |= AT_ASPM_L1_TIMER << PM_CTRL_PM_REQ_TIMER_SHIFT; + pm_ctrl_data &= ~PM_CTRL_EN_BUFS_RX_L0S; + pm_ctrl_data &= ~PM_CTRL_SA_DLY_EN; + pm_ctrl_data &= ~PM_CTRL_HOTRST; + pm_ctrl_data |= 1 << PM_CTRL_L1_ENTRY_TIMER_SHIFT; + pm_ctrl_data |= PM_CTRL_SERDES_PD_EX_L1; + } + + if (linkup) { + pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN; + pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN; + if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT) + pm_ctrl_data |= PM_CTRL_ASPM_L1_EN; + if (hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT) + pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN; + + if (hw->nic_type == athr_l2c_b || + hw->nic_type == athr_l1d || + hw->nic_type == athr_l2c_b2) { + if (hw->nic_type == athr_l2c_b) + if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE)) + pm_ctrl_data &= PM_CTRL_ASPM_L0S_EN; + pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN; + pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN; + pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN; + pm_ctrl_data |= PM_CTRL_CLK_SWH_L1; + if (hw->adapter->link_speed == SPEED_100 || + hw->adapter->link_speed == SPEED_1000) { + pm_ctrl_data &= + ~(PM_CTRL_L1_ENTRY_TIMER_MASK << + PM_CTRL_L1_ENTRY_TIMER_SHIFT); + if (hw->nic_type == athr_l1d) + pm_ctrl_data |= 0xF << + PM_CTRL_L1_ENTRY_TIMER_SHIFT; + else + pm_ctrl_data |= 7 << + PM_CTRL_L1_ENTRY_TIMER_SHIFT; + } + } else { + pm_ctrl_data |= PM_CTRL_SERDES_L1_EN; + pm_ctrl_data |= PM_CTRL_SERDES_PLL_L1_EN; + pm_ctrl_data |= PM_CTRL_SERDES_BUDS_RX_L1_EN; + pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1; + pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN; + pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN; + } + atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29); + if (hw->adapter->link_speed == SPEED_10) + if (hw->nic_type == athr_l1d) + atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0xB69D); + else + atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB6DD); + else if (hw->adapter->link_speed == SPEED_100) + atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB2DD); + else + atl1c_write_phy_reg(hw, MII_DBG_DATA, 0x96DD); + + } else { + pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN; + pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN; + pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN; + pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN; + + pm_ctrl_data |= PM_CTRL_CLK_SWH_L1; + + if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT) + pm_ctrl_data |= PM_CTRL_ASPM_L1_EN; + else + pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN; + } + + AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data); +} + +static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter) +{ + struct atl1c_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 mac_ctrl_data; + + mac_ctrl_data = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN; + mac_ctrl_data |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW); + + if (adapter->link_duplex == FULL_DUPLEX) { + hw->mac_duplex = true; + mac_ctrl_data |= MAC_CTRL_DUPLX; + } + + if (adapter->link_speed == SPEED_1000) + hw->mac_speed = atl1c_mac_speed_1000; + else + hw->mac_speed = atl1c_mac_speed_10_100; + + mac_ctrl_data |= (hw->mac_speed & MAC_CTRL_SPEED_MASK) << + MAC_CTRL_SPEED_SHIFT; + + mac_ctrl_data |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD); + mac_ctrl_data |= ((hw->preamble_len & MAC_CTRL_PRMLEN_MASK) << + MAC_CTRL_PRMLEN_SHIFT); + + if (adapter->vlgrp) + mac_ctrl_data |= MAC_CTRL_RMV_VLAN; + + mac_ctrl_data |= MAC_CTRL_BC_EN; + if (netdev->flags & IFF_PROMISC) + mac_ctrl_data |= MAC_CTRL_PROMIS_EN; + if (netdev->flags & IFF_ALLMULTI) + mac_ctrl_data |= MAC_CTRL_MC_ALL_EN; + + mac_ctrl_data |= MAC_CTRL_SINGLE_PAUSE_EN; + if (hw->nic_type == athr_l1d || hw->nic_type == athr_l2c_b2) { + mac_ctrl_data |= MAC_CTRL_SPEED_MODE_SW; + mac_ctrl_data |= MAC_CTRL_HASH_ALG_CRC32; + } + AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data); +} + +/* + * atl1c_configure - Configure Transmit&Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Tx /Rx unit of the MAC after a reset. + */ +static int atl1c_configure(struct atl1c_adapter *adapter) +{ + struct atl1c_hw *hw = &adapter->hw; + u32 master_ctrl_data = 0; + u32 intr_modrt_data; + + /* clear interrupt status */ + AT_WRITE_REG(hw, REG_ISR, 0xFFFFFFFF); + /* Clear any WOL status */ + AT_WRITE_REG(hw, REG_WOL_CTRL, 0); + /* set Interrupt Clear Timer + * HW will enable self to assert interrupt event to system after + * waiting x-time for software to notify it accept interrupt. + */ + AT_WRITE_REG(hw, REG_INT_RETRIG_TIMER, + hw->ict & INT_RETRIG_TIMER_MASK); + + atl1c_configure_des_ring(adapter); + + if (hw->ctrl_flags & ATL1C_INTR_MODRT_ENABLE) { + intr_modrt_data = (hw->tx_imt & IRQ_MODRT_TIMER_MASK) << + IRQ_MODRT_TX_TIMER_SHIFT; + intr_modrt_data |= (hw->rx_imt & IRQ_MODRT_TIMER_MASK) << + IRQ_MODRT_RX_TIMER_SHIFT; + AT_WRITE_REG(hw, REG_IRQ_MODRT_TIMER_INIT, intr_modrt_data); + master_ctrl_data |= + MASTER_CTRL_TX_ITIMER_EN | MASTER_CTRL_RX_ITIMER_EN; + } + + if (hw->ctrl_flags & ATL1C_INTR_CLEAR_ON_READ) + master_ctrl_data |= MASTER_CTRL_INT_RDCLR; + + AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data); + + if (hw->ctrl_flags & ATL1C_CMB_ENABLE) { + AT_WRITE_REG(hw, REG_CMB_TPD_THRESH, + hw->cmb_tpd & CMB_TPD_THRESH_MASK); + AT_WRITE_REG(hw, REG_CMB_TX_TIMER, + hw->cmb_tx_timer & CMB_TX_TIMER_MASK); + } + + if (hw->ctrl_flags & ATL1C_SMB_ENABLE) + AT_WRITE_REG(hw, REG_SMB_STAT_TIMER, + hw->smb_timer & SMB_STAT_TIMER_MASK); + /* set MTU */ + AT_WRITE_REG(hw, REG_MTU, hw->max_frame_size + ETH_HLEN + + VLAN_HLEN + ETH_FCS_LEN); + /* HDS, disable */ + AT_WRITE_REG(hw, REG_HDS_CTRL, 0); + + atl1c_configure_tx(adapter); + atl1c_configure_rx(adapter); + atl1c_configure_rss(adapter); + atl1c_configure_dma(adapter); + + return 0; +} + +static void atl1c_update_hw_stats(struct atl1c_adapter *adapter) +{ + u16 hw_reg_addr = 0; + unsigned long *stats_item = NULL; + u32 data; + + /* update rx status */ + hw_reg_addr = REG_MAC_RX_STATUS_BIN; + stats_item = &adapter->hw_stats.rx_ok; + while (hw_reg_addr <= REG_MAC_RX_STATUS_END) { + AT_READ_REG(&adapter->hw, hw_reg_addr, &data); + *stats_item += data; + stats_item++; + hw_reg_addr += 4; + } +/* update tx status */ + hw_reg_addr = REG_MAC_TX_STATUS_BIN; + stats_item = &adapter->hw_stats.tx_ok; + while (hw_reg_addr <= REG_MAC_TX_STATUS_END) { + AT_READ_REG(&adapter->hw, hw_reg_addr, &data); + *stats_item += data; + stats_item++; + hw_reg_addr += 4; + } +} + +/* + * atl1c_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the timer callback. + */ +static struct net_device_stats *atl1c_get_stats(struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct atl1c_hw_stats *hw_stats = &adapter->hw_stats; + struct net_device_stats *net_stats = &adapter->net_stats; + + atl1c_update_hw_stats(adapter); + net_stats->rx_packets = hw_stats->rx_ok; + net_stats->tx_packets = hw_stats->tx_ok; + net_stats->rx_bytes = hw_stats->rx_byte_cnt; + net_stats->tx_bytes = hw_stats->tx_byte_cnt; + net_stats->multicast = hw_stats->rx_mcast; + net_stats->collisions = hw_stats->tx_1_col + + hw_stats->tx_2_col * 2 + + hw_stats->tx_late_col + hw_stats->tx_abort_col; + net_stats->rx_errors = hw_stats->rx_frag + hw_stats->rx_fcs_err + + hw_stats->rx_len_err + hw_stats->rx_sz_ov + + hw_stats->rx_rrd_ov + hw_stats->rx_align_err; + net_stats->rx_fifo_errors = hw_stats->rx_rxf_ov; + net_stats->rx_length_errors = hw_stats->rx_len_err; + net_stats->rx_crc_errors = hw_stats->rx_fcs_err; + net_stats->rx_frame_errors = hw_stats->rx_align_err; + net_stats->rx_over_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov; + + net_stats->rx_missed_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov; + + net_stats->tx_errors = hw_stats->tx_late_col + hw_stats->tx_abort_col + + hw_stats->tx_underrun + hw_stats->tx_trunc; + net_stats->tx_fifo_errors = hw_stats->tx_underrun; + net_stats->tx_aborted_errors = hw_stats->tx_abort_col; + net_stats->tx_window_errors = hw_stats->tx_late_col; + + return &adapter->net_stats; +} + +static inline void atl1c_clear_phy_int(struct atl1c_adapter *adapter) +{ + u16 phy_data; + + spin_lock(&adapter->mdio_lock); + atl1c_read_phy_reg(&adapter->hw, MII_ISR, &phy_data); + spin_unlock(&adapter->mdio_lock); +} + +static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter, + enum atl1c_trans_queue type) +{ + struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *) + &adapter->tpd_ring[type]; + struct atl1c_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); + u16 hw_next_to_clean; + u16 shift; + u32 data; + + if (type == atl1c_trans_high) + shift = MB_HTPD_CONS_IDX_SHIFT; + else + shift = MB_NTPD_CONS_IDX_SHIFT; + + AT_READ_REG(&adapter->hw, REG_MB_PRIO_CONS_IDX, &data); + hw_next_to_clean = (data >> shift) & MB_PRIO_PROD_IDX_MASK; + + while (next_to_clean != hw_next_to_clean) { + buffer_info = &tpd_ring->buffer_info[next_to_clean]; + atl1c_clean_buffer(pdev, buffer_info, 1); + if (++next_to_clean == tpd_ring->count) + next_to_clean = 0; + atomic_set(&tpd_ring->next_to_clean, next_to_clean); + } + + if (netif_queue_stopped(adapter->netdev) && + netif_carrier_ok(adapter->netdev)) { + netif_wake_queue(adapter->netdev); + } + + return true; +} + +/* + * atl1c_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + * @pt_regs: CPU registers structure + */ +static irqreturn_t atl1c_intr(int irq, void *data) +{ + struct net_device *netdev = data; + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + struct atl1c_hw *hw = &adapter->hw; + int max_ints = AT_MAX_INT_WORK; + int handled = IRQ_NONE; + u32 status; + u32 reg_data; + + do { + AT_READ_REG(hw, REG_ISR, ®_data); + status = reg_data & hw->intr_mask; + + if (status == 0 || (status & ISR_DIS_INT) != 0) { + if (max_ints != AT_MAX_INT_WORK) + handled = IRQ_HANDLED; + break; + } + /* link event */ + if (status & ISR_GPHY) + atl1c_clear_phy_int(adapter); + /* Ack ISR */ + AT_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT); + if (status & ISR_RX_PKT) { + if (likely(napi_schedule_prep(&adapter->napi))) { + hw->intr_mask &= ~ISR_RX_PKT; + AT_WRITE_REG(hw, REG_IMR, hw->intr_mask); + __napi_schedule(&adapter->napi); + } + } + if (status & ISR_TX_PKT) + atl1c_clean_tx_irq(adapter, atl1c_trans_normal); + + handled = IRQ_HANDLED; + /* check if PCIE PHY Link down */ + if (status & ISR_ERROR) { + if (netif_msg_hw(adapter)) + dev_err(&pdev->dev, + "atl1c hardware error (status = 0x%x)\n", + status & ISR_ERROR); + /* reset MAC */ + hw->intr_mask &= ~ISR_ERROR; + AT_WRITE_REG(hw, REG_IMR, hw->intr_mask); + adapter->work_event |= ATL1C_WORK_EVENT_RESET; + schedule_work(&adapter->common_task); + break; + } + + if (status & ISR_OVER) + if (netif_msg_intr(adapter)) + dev_warn(&pdev->dev, + "TX/RX overflow (status = 0x%x)\n", + status & ISR_OVER); + + /* link event */ + if (status & (ISR_GPHY | ISR_MANUAL)) { + adapter->net_stats.tx_carrier_errors++; + atl1c_link_chg_event(adapter); + break; + } + + } while (--max_ints > 0); + /* re-enable Interrupt*/ + AT_WRITE_REG(&adapter->hw, REG_ISR, 0); + return handled; +} + +static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter, + struct sk_buff *skb, struct atl1c_recv_ret_status *prrs) +{ + /* + * The pid field in RRS in not correct sometimes, so we + * cannot figure out if the packet is fragmented or not, + * so we tell the KERNEL CHECKSUM_NONE + */ + skb->ip_summed = CHECKSUM_NONE; +} + +static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, const int ringid) +{ + struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[ringid]; + struct pci_dev *pdev = adapter->pdev; + struct atl1c_buffer *buffer_info, *next_info; + struct sk_buff *skb; + void *vir_addr = NULL; + u16 num_alloc = 0; + u16 rfd_next_to_use, next_next; + struct atl1c_rx_free_desc *rfd_desc; + + next_next = rfd_next_to_use = rfd_ring->next_to_use; + if (++next_next == rfd_ring->count) + next_next = 0; + buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; + next_info = &rfd_ring->buffer_info[next_next]; + + while (next_info->flags & ATL1C_BUFFER_FREE) { + rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use); + + skb = dev_alloc_skb(adapter->rx_buffer_len); + if (unlikely(!skb)) { + if (netif_msg_rx_err(adapter)) + dev_warn(&pdev->dev, "alloc rx buffer failed\n"); + break; + } + + /* + * Make buffer alignment 2 beyond a 16 byte boundary + * this will result in a 16 byte aligned IP header after + * the 14 byte MAC header is removed + */ + vir_addr = skb->data; + ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); + buffer_info->skb = skb; + buffer_info->length = adapter->rx_buffer_len; + buffer_info->dma = pci_map_single(pdev, vir_addr, + buffer_info->length, + PCI_DMA_FROMDEVICE); + ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE, + ATL1C_PCIMAP_FROMDEVICE); + rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + rfd_next_to_use = next_next; + if (++next_next == rfd_ring->count) + next_next = 0; + buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; + next_info = &rfd_ring->buffer_info[next_next]; + num_alloc++; + } + + if (num_alloc) { + /* TODO: update mailbox here */ + wmb(); + rfd_ring->next_to_use = rfd_next_to_use; + AT_WRITE_REG(&adapter->hw, atl1c_rfd_prod_idx_regs[ringid], + rfd_ring->next_to_use & MB_RFDX_PROD_IDX_MASK); + } + + return num_alloc; +} + +static void atl1c_clean_rrd(struct atl1c_rrd_ring *rrd_ring, + struct atl1c_recv_ret_status *rrs, u16 num) +{ + u16 i; + /* the relationship between rrd and rfd is one map one */ + for (i = 0; i < num; i++, rrs = ATL1C_RRD_DESC(rrd_ring, + rrd_ring->next_to_clean)) { + rrs->word3 &= ~RRS_RXD_UPDATED; + if (++rrd_ring->next_to_clean == rrd_ring->count) + rrd_ring->next_to_clean = 0; + } +} + +static void atl1c_clean_rfd(struct atl1c_rfd_ring *rfd_ring, + struct atl1c_recv_ret_status *rrs, u16 num) +{ + u16 i; + u16 rfd_index; + struct atl1c_buffer *buffer_info = rfd_ring->buffer_info; + + rfd_index = (rrs->word0 >> RRS_RX_RFD_INDEX_SHIFT) & + RRS_RX_RFD_INDEX_MASK; + for (i = 0; i < num; i++) { + buffer_info[rfd_index].skb = NULL; + ATL1C_SET_BUFFER_STATE(&buffer_info[rfd_index], + ATL1C_BUFFER_FREE); + if (++rfd_index == rfd_ring->count) + rfd_index = 0; + } + rfd_ring->next_to_clean = rfd_index; +} + +static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que, + int *work_done, int work_to_do) +{ + u16 rfd_num, rfd_index; + u16 count = 0; + u16 length; + struct pci_dev *pdev = adapter->pdev; + struct net_device *netdev = adapter->netdev; + struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[que]; + struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[que]; + struct sk_buff *skb; + struct atl1c_recv_ret_status *rrs; + struct atl1c_buffer *buffer_info; + + while (1) { + if (*work_done >= work_to_do) + break; + rrs = ATL1C_RRD_DESC(rrd_ring, rrd_ring->next_to_clean); + if (likely(RRS_RXD_IS_VALID(rrs->word3))) { + rfd_num = (rrs->word0 >> RRS_RX_RFD_CNT_SHIFT) & + RRS_RX_RFD_CNT_MASK; + if (unlikely(rfd_num != 1)) + /* TODO support mul rfd*/ + if (netif_msg_rx_err(adapter)) + dev_warn(&pdev->dev, + "Multi rfd not support yet!\n"); + goto rrs_checked; + } else { + break; + } +rrs_checked: + atl1c_clean_rrd(rrd_ring, rrs, rfd_num); + if (rrs->word3 & (RRS_RX_ERR_SUM | RRS_802_3_LEN_ERR)) { + atl1c_clean_rfd(rfd_ring, rrs, rfd_num); + if (netif_msg_rx_err(adapter)) + dev_warn(&pdev->dev, + "wrong packet! rrs word3 is %x\n", + rrs->word3); + continue; + } + + length = le16_to_cpu((rrs->word3 >> RRS_PKT_SIZE_SHIFT) & + RRS_PKT_SIZE_MASK); + /* Good Receive */ + if (likely(rfd_num == 1)) { + rfd_index = (rrs->word0 >> RRS_RX_RFD_INDEX_SHIFT) & + RRS_RX_RFD_INDEX_MASK; + buffer_info = &rfd_ring->buffer_info[rfd_index]; + pci_unmap_single(pdev, buffer_info->dma, + buffer_info->length, PCI_DMA_FROMDEVICE); + skb = buffer_info->skb; + } else { + /* TODO */ + if (netif_msg_rx_err(adapter)) + dev_warn(&pdev->dev, + "Multi rfd not support yet!\n"); + break; + } + atl1c_clean_rfd(rfd_ring, rrs, rfd_num); + skb_put(skb, length - ETH_FCS_LEN); + skb->protocol = eth_type_trans(skb, netdev); + skb->dev = netdev; + atl1c_rx_checksum(adapter, skb, rrs); + if (unlikely(adapter->vlgrp) && rrs->word3 & RRS_VLAN_INS) { + u16 vlan; + + AT_TAG_TO_VLAN(rrs->vlan_tag, vlan); + vlan = le16_to_cpu(vlan); + vlan_hwaccel_receive_skb(skb, adapter->vlgrp, vlan); + } else + netif_receive_skb(skb); + + (*work_done)++; + count++; + } + if (count) + atl1c_alloc_rx_buffer(adapter, que); +} + +/* + * atl1c_clean - NAPI Rx polling callback + * @adapter: board private structure + */ +static int atl1c_clean(struct napi_struct *napi, int budget) +{ + struct atl1c_adapter *adapter = + container_of(napi, struct atl1c_adapter, napi); + int work_done = 0; + + /* Keep link state information with original netdev */ + if (!netif_carrier_ok(adapter->netdev)) + goto quit_polling; + /* just enable one RXQ */ + atl1c_clean_rx_irq(adapter, 0, &work_done, budget); + + if (work_done < budget) { +quit_polling: + napi_complete(napi); + adapter->hw.intr_mask |= ISR_RX_PKT; + AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask); + } + return work_done; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER + +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void atl1c_netpoll(struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + + disable_irq(adapter->pdev->irq); + atl1c_intr(adapter->pdev->irq, netdev); + enable_irq(adapter->pdev->irq); +} +#endif + +static inline u16 atl1c_tpd_avail(struct atl1c_adapter *adapter, enum atl1c_trans_queue type) +{ + struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type]; + u16 next_to_use = 0; + u16 next_to_clean = 0; + + next_to_clean = atomic_read(&tpd_ring->next_to_clean); + next_to_use = tpd_ring->next_to_use; + + return (u16)(next_to_clean > next_to_use) ? + (next_to_clean - next_to_use - 1) : + (tpd_ring->count + next_to_clean - next_to_use - 1); +} + +/* + * get next usable tpd + * Note: should call atl1c_tdp_avail to make sure + * there is enough tpd to use + */ +static struct atl1c_tpd_desc *atl1c_get_tpd(struct atl1c_adapter *adapter, + enum atl1c_trans_queue type) +{ + struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type]; + struct atl1c_tpd_desc *tpd_desc; + u16 next_to_use = 0; + + next_to_use = tpd_ring->next_to_use; + if (++tpd_ring->next_to_use == tpd_ring->count) + tpd_ring->next_to_use = 0; + tpd_desc = ATL1C_TPD_DESC(tpd_ring, next_to_use); + memset(tpd_desc, 0, sizeof(struct atl1c_tpd_desc)); + return tpd_desc; +} + +static struct atl1c_buffer * +atl1c_get_tx_buffer(struct atl1c_adapter *adapter, struct atl1c_tpd_desc *tpd) +{ + struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring; + + return &tpd_ring->buffer_info[tpd - + (struct atl1c_tpd_desc *)tpd_ring->desc]; +} + +/* Calculate the transmit packet descript needed*/ +static u16 atl1c_cal_tpd_req(const struct sk_buff *skb) +{ + u16 tpd_req; + u16 proto_hdr_len = 0; + + tpd_req = skb_shinfo(skb)->nr_frags + 1; + + if (skb_is_gso(skb)) { + proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + if (proto_hdr_len < skb_headlen(skb)) + tpd_req++; + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) + tpd_req++; + } + return tpd_req; +} + +static int atl1c_tso_csum(struct atl1c_adapter *adapter, + struct sk_buff *skb, + struct atl1c_tpd_desc **tpd, + enum atl1c_trans_queue type) +{ + struct pci_dev *pdev = adapter->pdev; + u8 hdr_len; + u32 real_len; + unsigned short offload_type; + int err; + + if (skb_is_gso(skb)) { + if (skb_header_cloned(skb)) { + err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (unlikely(err)) + return -1; + } + offload_type = skb_shinfo(skb)->gso_type; + + if (offload_type & SKB_GSO_TCPV4) { + real_len = (((unsigned char *)ip_hdr(skb) - skb->data) + + ntohs(ip_hdr(skb)->tot_len)); + + if (real_len < skb->len) + pskb_trim(skb, real_len); + + hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb)); + if (unlikely(skb->len == hdr_len)) { + /* only xsum need */ + if (netif_msg_tx_queued(adapter)) + dev_warn(&pdev->dev, + "IPV4 tso with zero data??\n"); + goto check_sum; + } else { + ip_hdr(skb)->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic( + ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + (*tpd)->word1 |= 1 << TPD_IPV4_PACKET_SHIFT; + } + } + + if (offload_type & SKB_GSO_TCPV6) { + struct atl1c_tpd_ext_desc *etpd = + *(struct atl1c_tpd_ext_desc **)(tpd); + + memset(etpd, 0, sizeof(struct atl1c_tpd_ext_desc)); + *tpd = atl1c_get_tpd(adapter, type); + ipv6_hdr(skb)->payload_len = 0; + /* check payload == 0 byte ? */ + hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb)); + if (unlikely(skb->len == hdr_len)) { + /* only xsum need */ + if (netif_msg_tx_queued(adapter)) + dev_warn(&pdev->dev, + "IPV6 tso with zero data??\n"); + goto check_sum; + } else + tcp_hdr(skb)->check = ~csum_ipv6_magic( + &ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + etpd->word1 |= 1 << TPD_LSO_EN_SHIFT; + etpd->word1 |= 1 << TPD_LSO_VER_SHIFT; + etpd->pkt_len = cpu_to_le32(skb->len); + (*tpd)->word1 |= 1 << TPD_LSO_VER_SHIFT; + } + + (*tpd)->word1 |= 1 << TPD_LSO_EN_SHIFT; + (*tpd)->word1 |= (skb_transport_offset(skb) & TPD_TCPHDR_OFFSET_MASK) << + TPD_TCPHDR_OFFSET_SHIFT; + (*tpd)->word1 |= (skb_shinfo(skb)->gso_size & TPD_MSS_MASK) << + TPD_MSS_SHIFT; + return 0; + } + +check_sum: + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { + u8 css, cso; + cso = skb_transport_offset(skb); + + if (unlikely(cso & 0x1)) { + if (netif_msg_tx_err(adapter)) + dev_err(&adapter->pdev->dev, + "payload offset should not an event number\n"); + return -1; + } else { + css = cso + skb->csum_offset; + + (*tpd)->word1 |= ((cso >> 1) & TPD_PLOADOFFSET_MASK) << + TPD_PLOADOFFSET_SHIFT; + (*tpd)->word1 |= ((css >> 1) & TPD_CCSUM_OFFSET_MASK) << + TPD_CCSUM_OFFSET_SHIFT; + (*tpd)->word1 |= 1 << TPD_CCSUM_EN_SHIFT; + } + } + return 0; +} + +static void atl1c_tx_map(struct atl1c_adapter *adapter, + struct sk_buff *skb, struct atl1c_tpd_desc *tpd, + enum atl1c_trans_queue type) +{ + struct atl1c_tpd_desc *use_tpd = NULL; + struct atl1c_buffer *buffer_info = NULL; + u16 buf_len = skb_headlen(skb); + u16 map_len = 0; + u16 mapped_len = 0; + u16 hdr_len = 0; + u16 nr_frags; + u16 f; + int tso; + + nr_frags = skb_shinfo(skb)->nr_frags; + tso = (tpd->word1 >> TPD_LSO_EN_SHIFT) & TPD_LSO_EN_MASK; + if (tso) { + /* TSO */ + map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + use_tpd = tpd; + + buffer_info = atl1c_get_tx_buffer(adapter, use_tpd); + buffer_info->length = map_len; + buffer_info->dma = pci_map_single(adapter->pdev, + skb->data, hdr_len, PCI_DMA_TODEVICE); + ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); + ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE, + ATL1C_PCIMAP_TODEVICE); + mapped_len += map_len; + use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma); + use_tpd->buffer_len = cpu_to_le16(buffer_info->length); + } + + if (mapped_len < buf_len) { + /* mapped_len == 0, means we should use the first tpd, + which is given by caller */ + if (mapped_len == 0) + use_tpd = tpd; + else { + use_tpd = atl1c_get_tpd(adapter, type); + memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc)); + } + buffer_info = atl1c_get_tx_buffer(adapter, use_tpd); + buffer_info->length = buf_len - mapped_len; + buffer_info->dma = + pci_map_single(adapter->pdev, skb->data + mapped_len, + buffer_info->length, PCI_DMA_TODEVICE); + ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); + ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE, + ATL1C_PCIMAP_TODEVICE); + use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma); + use_tpd->buffer_len = cpu_to_le16(buffer_info->length); + } + + for (f = 0; f < nr_frags; f++) { + struct skb_frag_struct *frag; + + frag = &skb_shinfo(skb)->frags[f]; + + use_tpd = atl1c_get_tpd(adapter, type); + memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc)); + + buffer_info = atl1c_get_tx_buffer(adapter, use_tpd); + buffer_info->length = frag->size; + buffer_info->dma = + pci_map_page(adapter->pdev, frag->page, + frag->page_offset, + buffer_info->length, + PCI_DMA_TODEVICE); + ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); + ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_PAGE, + ATL1C_PCIMAP_TODEVICE); + use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma); + use_tpd->buffer_len = cpu_to_le16(buffer_info->length); + } + + /* The last tpd */ + use_tpd->word1 |= 1 << TPD_EOP_SHIFT; + /* The last buffer info contain the skb address, + so it will be free after unmap */ + buffer_info->skb = skb; +} + +static void atl1c_tx_queue(struct atl1c_adapter *adapter, struct sk_buff *skb, + struct atl1c_tpd_desc *tpd, enum atl1c_trans_queue type) +{ + struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type]; + u32 prod_data; + + AT_READ_REG(&adapter->hw, REG_MB_PRIO_PROD_IDX, &prod_data); + switch (type) { + case atl1c_trans_high: + prod_data &= 0xFFFF0000; + prod_data |= tpd_ring->next_to_use & 0xFFFF; + break; + case atl1c_trans_normal: + prod_data &= 0x0000FFFF; + prod_data |= (tpd_ring->next_to_use & 0xFFFF) << 16; + break; + default: + break; + } + wmb(); + AT_WRITE_REG(&adapter->hw, REG_MB_PRIO_PROD_IDX, prod_data); +} + +static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + unsigned long flags; + u16 tpd_req = 1; + struct atl1c_tpd_desc *tpd; + enum atl1c_trans_queue type = atl1c_trans_normal; + + if (test_bit(__AT_DOWN, &adapter->flags)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + tpd_req = atl1c_cal_tpd_req(skb); + if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) { + if (netif_msg_pktdata(adapter)) + dev_info(&adapter->pdev->dev, "tx locked\n"); + return NETDEV_TX_LOCKED; + } + if (skb->mark == 0x01) + type = atl1c_trans_high; + else + type = atl1c_trans_normal; + + if (atl1c_tpd_avail(adapter, type) < tpd_req) { + /* no enough descriptor, just stop queue */ + netif_stop_queue(netdev); + spin_unlock_irqrestore(&adapter->tx_lock, flags); + return NETDEV_TX_BUSY; + } + + tpd = atl1c_get_tpd(adapter, type); + + /* do TSO and check sum */ + if (atl1c_tso_csum(adapter, skb, &tpd, type) != 0) { + spin_unlock_irqrestore(&adapter->tx_lock, flags); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) { + u16 vlan = vlan_tx_tag_get(skb); + __le16 tag; + + vlan = cpu_to_le16(vlan); + AT_VLAN_TO_TAG(vlan, tag); + tpd->word1 |= 1 << TPD_INS_VTAG_SHIFT; + tpd->vlan_tag = tag; + } + + if (skb_network_offset(skb) != ETH_HLEN) + tpd->word1 |= 1 << TPD_ETH_TYPE_SHIFT; /* Ethernet frame */ + + atl1c_tx_map(adapter, skb, tpd, type); + atl1c_tx_queue(adapter, skb, tpd, type); + + spin_unlock_irqrestore(&adapter->tx_lock, flags); + return NETDEV_TX_OK; +} + +static void atl1c_free_irq(struct atl1c_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + free_irq(adapter->pdev->irq, netdev); + + if (adapter->have_msi) + pci_disable_msi(adapter->pdev); +} + +static int atl1c_request_irq(struct atl1c_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct net_device *netdev = adapter->netdev; + int flags = 0; + int err = 0; + + adapter->have_msi = true; + err = pci_enable_msi(adapter->pdev); + if (err) { + if (netif_msg_ifup(adapter)) + dev_err(&pdev->dev, + "Unable to allocate MSI interrupt Error: %d\n", + err); + adapter->have_msi = false; + } else + netdev->irq = pdev->irq; + + if (!adapter->have_msi) + flags |= IRQF_SHARED; + err = request_irq(adapter->pdev->irq, atl1c_intr, flags, + netdev->name, netdev); + if (err) { + if (netif_msg_ifup(adapter)) + dev_err(&pdev->dev, + "Unable to allocate interrupt Error: %d\n", + err); + if (adapter->have_msi) + pci_disable_msi(adapter->pdev); + return err; + } + if (netif_msg_ifup(adapter)) + dev_dbg(&pdev->dev, "atl1c_request_irq OK\n"); + return err; +} + +int atl1c_up(struct atl1c_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int num; + int err; + int i; + + netif_carrier_off(netdev); + atl1c_init_ring_ptrs(adapter); + atl1c_set_multi(netdev); + atl1c_restore_vlan(adapter); + + for (i = 0; i < adapter->num_rx_queues; i++) { + num = atl1c_alloc_rx_buffer(adapter, i); + if (unlikely(num == 0)) { + err = -ENOMEM; + goto err_alloc_rx; + } + } + + if (atl1c_configure(adapter)) { + err = -EIO; + goto err_up; + } + + err = atl1c_request_irq(adapter); + if (unlikely(err)) + goto err_up; + + clear_bit(__AT_DOWN, &adapter->flags); + napi_enable(&adapter->napi); + atl1c_irq_enable(adapter); + atl1c_check_link_status(adapter); + netif_start_queue(netdev); + return err; + +err_up: +err_alloc_rx: + atl1c_clean_rx_ring(adapter); + return err; +} + +void atl1c_down(struct atl1c_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + atl1c_del_timer(adapter); + adapter->work_event = 0; /* clear all event */ + /* signal that we're down so the interrupt handler does not + * reschedule our watchdog timer */ + set_bit(__AT_DOWN, &adapter->flags); + netif_carrier_off(netdev); + napi_disable(&adapter->napi); + atl1c_irq_disable(adapter); + atl1c_free_irq(adapter); + AT_WRITE_REG(&adapter->hw, REG_ISR, ISR_DIS_INT); + /* reset MAC to disable all RX/TX */ + atl1c_reset_mac(&adapter->hw); + msleep(1); + + adapter->link_speed = SPEED_0; + adapter->link_duplex = -1; + atl1c_clean_tx_ring(adapter, atl1c_trans_normal); + atl1c_clean_tx_ring(adapter, atl1c_trans_high); + atl1c_clean_rx_ring(adapter); +} + +/* + * atl1c_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + */ +static int atl1c_open(struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + int err; + + /* disallow open during test */ + if (test_bit(__AT_TESTING, &adapter->flags)) + return -EBUSY; + + /* allocate rx/tx dma buffer & descriptors */ + err = atl1c_setup_ring_resources(adapter); + if (unlikely(err)) + return err; + + err = atl1c_up(adapter); + if (unlikely(err)) + goto err_up; + + if (adapter->hw.ctrl_flags & ATL1C_FPGA_VERSION) { + u32 phy_data; + + AT_READ_REG(&adapter->hw, REG_MDIO_CTRL, &phy_data); + phy_data |= MDIO_AP_EN; + AT_WRITE_REG(&adapter->hw, REG_MDIO_CTRL, phy_data); + } + return 0; + +err_up: + atl1c_free_irq(adapter); + atl1c_free_ring_resources(adapter); + atl1c_reset_mac(&adapter->hw); + return err; +} + +/* + * atl1c_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + */ +static int atl1c_close(struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + + WARN_ON(test_bit(__AT_RESETTING, &adapter->flags)); + atl1c_down(adapter); + atl1c_free_ring_resources(adapter); + return 0; +} + +static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct atl1c_hw *hw = &adapter->hw; + u32 ctrl; + u32 mac_ctrl_data; + u32 master_ctrl_data; + u32 wol_ctrl_data = 0; + u16 mii_bmsr_data; + u16 save_autoneg_advertised; + u16 mii_intr_status_data; + u32 wufc = adapter->wol; + u32 i; + int retval = 0; + + if (netif_running(netdev)) { + WARN_ON(test_bit(__AT_RESETTING, &adapter->flags)); + atl1c_down(adapter); + } + netif_device_detach(netdev); + atl1c_disable_l0s_l1(hw); + retval = pci_save_state(pdev); + if (retval) + return retval; + if (wufc) { + AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data); + master_ctrl_data &= ~MASTER_CTRL_CLK_SEL_DIS; + + /* get link status */ + atl1c_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data); + atl1c_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data); + save_autoneg_advertised = hw->autoneg_advertised; + hw->autoneg_advertised = ADVERTISED_10baseT_Half; + if (atl1c_restart_autoneg(hw) != 0) + if (netif_msg_link(adapter)) + dev_warn(&pdev->dev, "phy autoneg failed\n"); + hw->phy_configured = false; /* re-init PHY when resume */ + hw->autoneg_advertised = save_autoneg_advertised; + /* turn on magic packet wol */ + if (wufc & AT_WUFC_MAG) + wol_ctrl_data = WOL_MAGIC_EN | WOL_MAGIC_PME_EN; + + if (wufc & AT_WUFC_LNKC) { + for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) { + msleep(100); + atl1c_read_phy_reg(hw, MII_BMSR, + (u16 *)&mii_bmsr_data); + if (mii_bmsr_data & BMSR_LSTATUS) + break; + } + if ((mii_bmsr_data & BMSR_LSTATUS) == 0) + if (netif_msg_link(adapter)) + dev_warn(&pdev->dev, + "%s: Link may change" + "when suspend\n", + atl1c_driver_name); + wol_ctrl_data |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN; + /* only link up can wake up */ + if (atl1c_write_phy_reg(hw, MII_IER, IER_LINK_UP) != 0) { + if (netif_msg_link(adapter)) + dev_err(&pdev->dev, + "%s: read write phy " + "register failed.\n", + atl1c_driver_name); + goto wol_dis; + } + } + /* clear phy interrupt */ + atl1c_read_phy_reg(hw, MII_ISR, &mii_intr_status_data); + /* Config MAC Ctrl register */ + mac_ctrl_data = MAC_CTRL_RX_EN; + /* set to 10/100M halt duplex */ + mac_ctrl_data |= atl1c_mac_speed_10_100 << MAC_CTRL_SPEED_SHIFT; + mac_ctrl_data |= (((u32)adapter->hw.preamble_len & + MAC_CTRL_PRMLEN_MASK) << + MAC_CTRL_PRMLEN_SHIFT); + + if (adapter->vlgrp) + mac_ctrl_data |= MAC_CTRL_RMV_VLAN; + + /* magic packet maybe Broadcast&multicast&Unicast frame */ + if (wufc & AT_WUFC_MAG) + mac_ctrl_data |= MAC_CTRL_BC_EN; + + if (netif_msg_hw(adapter)) + dev_dbg(&pdev->dev, + "%s: suspend MAC=0x%x\n", + atl1c_driver_name, mac_ctrl_data); + AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data); + AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data); + AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data); + + /* pcie patch */ + AT_READ_REG(hw, REG_PCIE_PHYMISC, &ctrl); + ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; + AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl); + + pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); + goto suspend_exit; + } +wol_dis: + + /* WOL disabled */ + AT_WRITE_REG(hw, REG_WOL_CTRL, 0); + + /* pcie patch */ + AT_READ_REG(hw, REG_PCIE_PHYMISC, &ctrl); + ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; + AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl); + + atl1c_phy_disable(hw); + hw->phy_configured = false; /* re-init PHY when resume */ + + pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); +suspend_exit: + + pci_disable_device(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +static int atl1c_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1c_adapter *adapter = netdev_priv(netdev); + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); + + atl1c_phy_reset(&adapter->hw); + atl1c_reset_mac(&adapter->hw); + netif_device_attach(netdev); + if (netif_running(netdev)) + atl1c_up(adapter); + + return 0; +} + +static void atl1c_shutdown(struct pci_dev *pdev) +{ + atl1c_suspend(pdev, PMSG_SUSPEND); +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) +static const struct net_device_ops atl1c_netdev_ops = { + .ndo_open = atl1c_open, + .ndo_stop = atl1c_close, + .ndo_validate_addr = eth_validate_addr, + .ndo_start_xmit = atl1c_xmit_frame, + .ndo_set_mac_address = atl1c_set_mac_addr, + .ndo_set_multicast_list = atl1c_set_multi, + .ndo_change_mtu = atl1c_change_mtu, + .ndo_do_ioctl = atl1c_ioctl, + .ndo_tx_timeout = atl1c_tx_timeout, + .ndo_get_stats = atl1c_get_stats, + .ndo_vlan_rx_register = atl1c_vlan_rx_register, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = atl1c_netpoll, +#endif +}; +#endif + +static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev) +{ + SET_NETDEV_DEV(netdev, &pdev->dev); + pci_set_drvdata(pdev, netdev); + + netdev->irq = pdev->irq; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + netdev->netdev_ops = &atl1c_netdev_ops; +#else + netdev->change_mtu = atl1c_change_mtu; + netdev->hard_start_xmit = atl1c_xmit_frame; + netdev->open = atl1c_open; + netdev->stop = atl1c_close; + netdev->tx_timeout = atl1c_tx_timeout; + netdev->set_mac_address = atl1c_set_mac_addr; + netdev->do_ioctl = atl1c_ioctl; + netdev->get_stats = atl1c_get_stats; + netdev->set_multicast_list = atl1c_set_multi; + netdev->vlan_rx_register = atl1c_vlan_rx_register; +#ifdef CONFIG_NET_POLL_CONTROLLER + netdev->poll_controller = atl1c_netpoll; +#endif +#endif + netdev->watchdog_timeo = AT_TX_WATCHDOG; + atl1c_set_ethtool_ops(netdev); + + /* TODO: add when ready */ + netdev->features = NETIF_F_SG | + NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_RX | + NETIF_F_TSO | + NETIF_F_TSO6; + return 0; +} + +/* + * atl1c_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in atl1c_pci_tbl + * + * Returns 0 on success, negative on failure + * + * atl1c_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + */ +static int __devinit atl1c_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct atl1c_adapter *adapter; + static int cards_found; + + int err = 0; + + /* enable device (incl. PCI PM wakeup and hotplug setup) */ + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, "cannot enable PCI device\n"); + return err; + } + + /* + * The atl1c chip can DMA to 64-bit addresses, but it uses a single + * shared register for the high 32 bits, so only a single, aligned, + * 4 GB physical address range can be used at a time. + * + * Supporting 64-bit DMA on this hardware is more trouble than it's + * worth. It is far easier to limit to 32-bit DMA than update + * various kernel subsystems to support the mechanics required by a + * fixed-high-32-bit system. + */ + if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) || + (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) { + dev_err(&pdev->dev, "No usable DMA configuration,aborting\n"); + goto err_dma; + } + + err = pci_request_regions(pdev, atl1c_driver_name); + if (err) { + dev_err(&pdev->dev, "cannot obtain PCI resources\n"); + goto err_pci_reg; + } + + pci_set_master(pdev); + + netdev = alloc_etherdev(sizeof(struct atl1c_adapter)); + if (netdev == NULL) { + err = -ENOMEM; + dev_err(&pdev->dev, "etherdev alloc failed\n"); + goto err_alloc_etherdev; + } + + err = atl1c_init_netdev(netdev, pdev); + if (err) { + dev_err(&pdev->dev, "init netdevice failed\n"); + goto err_init_netdev; + } + adapter = netdev_priv(netdev); + adapter->bd_number = cards_found; + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->hw.adapter = adapter; + adapter->msg_enable = netif_msg_init(-1, atl1c_default_msg); + adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); + if (!adapter->hw.hw_addr) { + err = -EIO; + dev_err(&pdev->dev, "cannot map device registers\n"); + goto err_ioremap; + } + netdev->base_addr = (unsigned long)adapter->hw.hw_addr; + + /* init mii data */ + adapter->mii.dev = netdev; + adapter->mii.mdio_read = atl1c_mdio_read; + adapter->mii.mdio_write = atl1c_mdio_write; + adapter->mii.phy_id_mask = 0x1f; + adapter->mii.reg_num_mask = MDIO_REG_ADDR_MASK; + netif_napi_add(netdev, &adapter->napi, atl1c_clean, 64); + setup_timer(&adapter->phy_config_timer, atl1c_phy_config, + (unsigned long)adapter); + /* setup the private structure */ + err = atl1c_sw_init(adapter); + if (err) { + dev_err(&pdev->dev, "net device private data init failed\n"); + goto err_sw_init; + } + atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE | + ATL1C_PCIE_PHY_RESET); + + /* Init GPHY as early as possible due to power saving issue */ + atl1c_phy_reset(&adapter->hw); + + err = atl1c_reset_mac(&adapter->hw); + if (err) { + err = -EIO; + goto err_reset; + } + + device_init_wakeup(&pdev->dev, 1); + /* reset the controller to + * put the device in a known good starting state */ + err = atl1c_phy_init(&adapter->hw); + if (err) { + err = -EIO; + goto err_reset; + } + if (atl1c_read_mac_addr(&adapter->hw) != 0) { + err = -EIO; + dev_err(&pdev->dev, "get mac address failed\n"); + goto err_eeprom; + } + memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); + memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len); + if (netif_msg_probe(adapter)) + dev_dbg(&pdev->dev, "mac address : %pM\n", + adapter->hw.mac_addr); + + atl1c_hw_set_mac_addr(&adapter->hw); + INIT_WORK(&adapter->common_task, atl1c_common_task); + adapter->work_event = 0; + err = register_netdev(netdev); + if (err) { + dev_err(&pdev->dev, "register netdevice failed\n"); + goto err_register; + } + + if (netif_msg_probe(adapter)) + dev_info(&pdev->dev, "version %s\n", ATL1C_DRV_VERSION); + cards_found++; + return 0; + +err_reset: +err_register: +err_sw_init: +err_eeprom: + iounmap(adapter->hw.hw_addr); +err_init_netdev: +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/* + * atl1c_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * atl1c_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + */ +static void __devexit atl1c_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1c_adapter *adapter = netdev_priv(netdev); + + unregister_netdev(netdev); + atl1c_phy_disable(&adapter->hw); + + iounmap(adapter->hw.hw_addr); + + pci_release_regions(pdev); + pci_disable_device(pdev); + free_netdev(netdev); +} + +/* + * atl1c_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t atl1c_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1c_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + atl1c_down(adapter); + + pci_disable_device(pdev); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/* + * atl1c_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the e1000_resume routine. + */ +static pci_ers_result_t atl1c_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1c_adapter *adapter = netdev_priv(netdev); + + if (pci_enable_device(pdev)) { + if (netif_msg_hw(adapter)) + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + atl1c_reset_mac(&adapter->hw); + + return PCI_ERS_RESULT_RECOVERED; +} + +/* + * atl1c_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the atl1c_resume routine. + */ +static void atl1c_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1c_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) { + if (atl1c_up(adapter)) { + if (netif_msg_hw(adapter)) + dev_err(&pdev->dev, + "Cannot bring device back up after reset\n"); + return; + } + } + + netif_device_attach(netdev); +} + +static struct pci_error_handlers atl1c_err_handler = { + .error_detected = atl1c_io_error_detected, + .slot_reset = atl1c_io_slot_reset, + .resume = atl1c_io_resume, +}; + +static struct pci_driver atl1c_driver = { + .name = atl1c_driver_name, + .id_table = atl1c_pci_tbl, + .probe = atl1c_probe, + .remove = __devexit_p(atl1c_remove), + /* Power Managment Hooks */ + .suspend = atl1c_suspend, + .resume = atl1c_resume, + .shutdown = atl1c_shutdown, + .err_handler = &atl1c_err_handler +}; + +/* + * atl1c_init_module - Driver Registration Routine + * + * atl1c_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + */ +static int __init atl1c_init_module(void) +{ + return pci_register_driver(&atl1c_driver); +} + +/* + * atl1c_exit_module - Driver Exit Cleanup Routine + * + * atl1c_exit_module is called just before the driver is removed + * from memory. + */ +static void __exit atl1c_exit_module(void) +{ + pci_unregister_driver(&atl1c_driver); +} + +module_init(atl1c_init_module); +module_exit(atl1c_exit_module); diff --git a/drivers/net/atl1e/Makefile b/drivers/net/atl1e/Makefile new file mode 100644 index 0000000..bc11be8 --- /dev/null +++ b/drivers/net/atl1e/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_ATL1E) += atl1e.o +atl1e-objs += atl1e_main.o atl1e_hw.o atl1e_ethtool.o atl1e_param.o diff --git a/drivers/net/atl1e/atl1e.h b/drivers/net/atl1e/atl1e.h new file mode 100644 index 0000000..490d3b3 --- /dev/null +++ b/drivers/net/atl1e/atl1e.h @@ -0,0 +1,510 @@ +/* + * Copyright(c) 2007 Atheros Corporation. All rights reserved. + * Copyright(c) 2007 xiong huang + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _ATL1E_H_ +#define _ATL1E_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "atl1e_hw.h" + +#define PCI_REG_COMMAND 0x04 /* PCI Command Register */ +#define CMD_IO_SPACE 0x0001 +#define CMD_MEMORY_SPACE 0x0002 +#define CMD_BUS_MASTER 0x0004 + +#define BAR_0 0 +#define BAR_1 1 +#define BAR_5 5 + +/* Wake Up Filter Control */ +#define AT_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define AT_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define AT_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define AT_WUFC_MC 0x00000008 /* Multicast Wakeup Enable */ +#define AT_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ + +#define SPEED_0 0xffff +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +/* Error Codes */ +#define AT_ERR_EEPROM 1 +#define AT_ERR_PHY 2 +#define AT_ERR_CONFIG 3 +#define AT_ERR_PARAM 4 +#define AT_ERR_MAC_TYPE 5 +#define AT_ERR_PHY_TYPE 6 +#define AT_ERR_PHY_SPEED 7 +#define AT_ERR_PHY_RES 8 +#define AT_ERR_TIMEOUT 9 + +#define MAX_JUMBO_FRAME_SIZE 0x2000 + +#define AT_VLAN_TAG_TO_TPD_TAG(_vlan, _tpd) \ + _tpd = (((_vlan) << (4)) | (((_vlan) >> 13) & 7) |\ + (((_vlan) >> 9) & 8)) + +#define AT_TPD_TAG_TO_VLAN_TAG(_tpd, _vlan) \ + _vlan = (((_tpd) >> 8) | (((_tpd) & 0x77) << 9) |\ + (((_tdp) & 0x88) << 5)) + +#define AT_MAX_RECEIVE_QUEUE 4 +#define AT_PAGE_NUM_PER_QUEUE 2 + +#define AT_DMA_HI_ADDR_MASK 0xffffffff00000000ULL +#define AT_DMA_LO_ADDR_MASK 0x00000000ffffffffULL + +#define AT_TX_WATCHDOG (5 * HZ) +#define AT_MAX_INT_WORK 10 +#define AT_TWSI_EEPROM_TIMEOUT 100 +#define AT_HW_MAX_IDLE_DELAY 10 +#define AT_SUSPEND_LINK_TIMEOUT 28 + +#define AT_REGS_LEN 75 +#define AT_EEPROM_LEN 512 +#define AT_ADV_MASK (ADVERTISE_10_HALF |\ + ADVERTISE_10_FULL |\ + ADVERTISE_100_HALF |\ + ADVERTISE_100_FULL |\ + ADVERTISE_1000_FULL) + +/* tpd word 2 */ +#define TPD_BUFLEN_MASK 0x3FFF +#define TPD_BUFLEN_SHIFT 0 +#define TPD_DMAINT_MASK 0x0001 +#define TPD_DMAINT_SHIFT 14 +#define TPD_PKTNT_MASK 0x0001 +#define TPD_PKTINT_SHIFT 15 +#define TPD_VLANTAG_MASK 0xFFFF +#define TPD_VLAN_SHIFT 16 + +/* tpd word 3 bits 0:4 */ +#define TPD_EOP_MASK 0x0001 +#define TPD_EOP_SHIFT 0 +#define TPD_IP_VERSION_MASK 0x0001 +#define TPD_IP_VERSION_SHIFT 1 /* 0 : IPV4, 1 : IPV6 */ +#define TPD_INS_VL_TAG_MASK 0x0001 +#define TPD_INS_VL_TAG_SHIFT 2 +#define TPD_CC_SEGMENT_EN_MASK 0x0001 +#define TPD_CC_SEGMENT_EN_SHIFT 3 +#define TPD_SEGMENT_EN_MASK 0x0001 +#define TPD_SEGMENT_EN_SHIFT 4 + +/* tdp word 3 bits 5:7 if ip version is 0 */ +#define TPD_IP_CSUM_MASK 0x0001 +#define TPD_IP_CSUM_SHIFT 5 +#define TPD_TCP_CSUM_MASK 0x0001 +#define TPD_TCP_CSUM_SHIFT 6 +#define TPD_UDP_CSUM_MASK 0x0001 +#define TPD_UDP_CSUM_SHIFT 7 + +/* tdp word 3 bits 5:7 if ip version is 1 */ +#define TPD_V6_IPHLLO_MASK 0x0007 +#define TPD_V6_IPHLLO_SHIFT 7 + +/* tpd word 3 bits 8:9 bit */ +#define TPD_VL_TAGGED_MASK 0x0001 +#define TPD_VL_TAGGED_SHIFT 8 +#define TPD_ETHTYPE_MASK 0x0001 +#define TPD_ETHTYPE_SHIFT 9 + +/* tdp word 3 bits 10:13 if ip version is 0 */ +#define TDP_V4_IPHL_MASK 0x000F +#define TPD_V4_IPHL_SHIFT 10 + +/* tdp word 3 bits 10:13 if ip version is 1 */ +#define TPD_V6_IPHLHI_MASK 0x000F +#define TPD_V6_IPHLHI_SHIFT 10 + +/* tpd word 3 bit 14:31 if segment enabled */ +#define TPD_TCPHDRLEN_MASK 0x000F +#define TPD_TCPHDRLEN_SHIFT 14 +#define TPD_HDRFLAG_MASK 0x0001 +#define TPD_HDRFLAG_SHIFT 18 +#define TPD_MSS_MASK 0x1FFF +#define TPD_MSS_SHIFT 19 + +/* tdp word 3 bit 16:31 if custom csum enabled */ +#define TPD_PLOADOFFSET_MASK 0x00FF +#define TPD_PLOADOFFSET_SHIFT 16 +#define TPD_CCSUMOFFSET_MASK 0x00FF +#define TPD_CCSUMOFFSET_SHIFT 24 + +struct atl1e_tpd_desc { + __le64 buffer_addr; + __le32 word2; + __le32 word3; +}; + +/* how about 0x2000 */ +#define MAX_TX_BUF_LEN 0x2000 +#define MAX_TX_BUF_SHIFT 13 +/*#define MAX_TX_BUF_LEN 0x3000 */ + +/* rrs word 1 bit 0:31 */ +#define RRS_RX_CSUM_MASK 0xFFFF +#define RRS_RX_CSUM_SHIFT 0 +#define RRS_PKT_SIZE_MASK 0x3FFF +#define RRS_PKT_SIZE_SHIFT 16 +#define RRS_CPU_NUM_MASK 0x0003 +#define RRS_CPU_NUM_SHIFT 30 + +#define RRS_IS_RSS_IPV4 0x0001 +#define RRS_IS_RSS_IPV4_TCP 0x0002 +#define RRS_IS_RSS_IPV6 0x0004 +#define RRS_IS_RSS_IPV6_TCP 0x0008 +#define RRS_IS_IPV6 0x0010 +#define RRS_IS_IP_FRAG 0x0020 +#define RRS_IS_IP_DF 0x0040 +#define RRS_IS_802_3 0x0080 +#define RRS_IS_VLAN_TAG 0x0100 +#define RRS_IS_ERR_FRAME 0x0200 +#define RRS_IS_IPV4 0x0400 +#define RRS_IS_UDP 0x0800 +#define RRS_IS_TCP 0x1000 +#define RRS_IS_BCAST 0x2000 +#define RRS_IS_MCAST 0x4000 +#define RRS_IS_PAUSE 0x8000 + +#define RRS_ERR_BAD_CRC 0x0001 +#define RRS_ERR_CODE 0x0002 +#define RRS_ERR_DRIBBLE 0x0004 +#define RRS_ERR_RUNT 0x0008 +#define RRS_ERR_RX_OVERFLOW 0x0010 +#define RRS_ERR_TRUNC 0x0020 +#define RRS_ERR_IP_CSUM 0x0040 +#define RRS_ERR_L4_CSUM 0x0080 +#define RRS_ERR_LENGTH 0x0100 +#define RRS_ERR_DES_ADDR 0x0200 + +struct atl1e_recv_ret_status { + u16 seq_num; + u16 hash_lo; + __le32 word1; + u16 pkt_flag; + u16 err_flag; + u16 hash_hi; + u16 vtag; +}; + +enum atl1e_dma_req_block { + atl1e_dma_req_128 = 0, + atl1e_dma_req_256 = 1, + atl1e_dma_req_512 = 2, + atl1e_dma_req_1024 = 3, + atl1e_dma_req_2048 = 4, + atl1e_dma_req_4096 = 5 +}; + +enum atl1e_rrs_type { + atl1e_rrs_disable = 0, + atl1e_rrs_ipv4 = 1, + atl1e_rrs_ipv4_tcp = 2, + atl1e_rrs_ipv6 = 4, + atl1e_rrs_ipv6_tcp = 8 +}; + +enum atl1e_nic_type { + athr_l1e = 0, + athr_l2e_revA = 1, + athr_l2e_revB = 2 +}; + +struct atl1e_hw_stats { + /* rx */ + unsigned long rx_ok; /* The number of good packet received. */ + unsigned long rx_bcast; /* The number of good broadcast packet received. */ + unsigned long rx_mcast; /* The number of good multicast packet received. */ + unsigned long rx_pause; /* The number of Pause packet received. */ + unsigned long rx_ctrl; /* The number of Control packet received other than Pause frame. */ + unsigned long rx_fcs_err; /* The number of packets with bad FCS. */ + unsigned long rx_len_err; /* The number of packets with mismatch of length field and actual size. */ + unsigned long rx_byte_cnt; /* The number of bytes of good packet received. FCS is NOT included. */ + unsigned long rx_runt; /* The number of packets received that are less than 64 byte long and with good FCS. */ + unsigned long rx_frag; /* The number of packets received that are less than 64 byte long and with bad FCS. */ + unsigned long rx_sz_64; /* The number of good and bad packets received that are 64 byte long. */ + unsigned long rx_sz_65_127; /* The number of good and bad packets received that are between 65 and 127-byte long. */ + unsigned long rx_sz_128_255; /* The number of good and bad packets received that are between 128 and 255-byte long. */ + unsigned long rx_sz_256_511; /* The number of good and bad packets received that are between 256 and 511-byte long. */ + unsigned long rx_sz_512_1023; /* The number of good and bad packets received that are between 512 and 1023-byte long. */ + unsigned long rx_sz_1024_1518; /* The number of good and bad packets received that are between 1024 and 1518-byte long. */ + unsigned long rx_sz_1519_max; /* The number of good and bad packets received that are between 1519-byte and MTU. */ + unsigned long rx_sz_ov; /* The number of good and bad packets received that are more than MTU size truncated by Selene. */ + unsigned long rx_rxf_ov; /* The number of frame dropped due to occurrence of RX FIFO overflow. */ + unsigned long rx_rrd_ov; /* The number of frame dropped due to occurrence of RRD overflow. */ + unsigned long rx_align_err; /* Alignment Error */ + unsigned long rx_bcast_byte_cnt; /* The byte count of broadcast packet received, excluding FCS. */ + unsigned long rx_mcast_byte_cnt; /* The byte count of multicast packet received, excluding FCS. */ + unsigned long rx_err_addr; /* The number of packets dropped due to address filtering. */ + + /* tx */ + unsigned long tx_ok; /* The number of good packet transmitted. */ + unsigned long tx_bcast; /* The number of good broadcast packet transmitted. */ + unsigned long tx_mcast; /* The number of good multicast packet transmitted. */ + unsigned long tx_pause; /* The number of Pause packet transmitted. */ + unsigned long tx_exc_defer; /* The number of packets transmitted with excessive deferral. */ + unsigned long tx_ctrl; /* The number of packets transmitted is a control frame, excluding Pause frame. */ + unsigned long tx_defer; /* The number of packets transmitted that is deferred. */ + unsigned long tx_byte_cnt; /* The number of bytes of data transmitted. FCS is NOT included. */ + unsigned long tx_sz_64; /* The number of good and bad packets transmitted that are 64 byte long. */ + unsigned long tx_sz_65_127; /* The number of good and bad packets transmitted that are between 65 and 127-byte long. */ + unsigned long tx_sz_128_255; /* The number of good and bad packets transmitted that are between 128 and 255-byte long. */ + unsigned long tx_sz_256_511; /* The number of good and bad packets transmitted that are between 256 and 511-byte long. */ + unsigned long tx_sz_512_1023; /* The number of good and bad packets transmitted that are between 512 and 1023-byte long. */ + unsigned long tx_sz_1024_1518; /* The number of good and bad packets transmitted that are between 1024 and 1518-byte long. */ + unsigned long tx_sz_1519_max; /* The number of good and bad packets transmitted that are between 1519-byte and MTU. */ + unsigned long tx_1_col; /* The number of packets subsequently transmitted successfully with a single prior collision. */ + unsigned long tx_2_col; /* The number of packets subsequently transmitted successfully with multiple prior collisions. */ + unsigned long tx_late_col; /* The number of packets transmitted with late collisions. */ + unsigned long tx_abort_col; /* The number of transmit packets aborted due to excessive collisions. */ + unsigned long tx_underrun; /* The number of transmit packets aborted due to transmit FIFO underrun, or TRD FIFO underrun */ + unsigned long tx_rd_eop; /* The number of times that read beyond the EOP into the next frame area when TRD was not written timely */ + unsigned long tx_len_err; /* The number of transmit packets with length field does NOT match the actual frame size. */ + unsigned long tx_trunc; /* The number of transmit packets truncated due to size exceeding MTU. */ + unsigned long tx_bcast_byte; /* The byte count of broadcast packet transmitted, excluding FCS. */ + unsigned long tx_mcast_byte; /* The byte count of multicast packet transmitted, excluding FCS. */ +}; + +struct atl1e_hw { + u8 __iomem *hw_addr; /* inner register address */ + resource_size_t mem_rang; + struct atl1e_adapter *adapter; + enum atl1e_nic_type nic_type; + u16 device_id; + u16 vendor_id; + u16 subsystem_id; + u16 subsystem_vendor_id; + u8 revision_id; + u16 pci_cmd_word; + u8 mac_addr[ETH_ALEN]; + u8 perm_mac_addr[ETH_ALEN]; + u8 preamble_len; + u16 max_frame_size; + u16 rx_jumbo_th; + u16 tx_jumbo_th; + + u16 media_type; +#define MEDIA_TYPE_AUTO_SENSOR 0 +#define MEDIA_TYPE_100M_FULL 1 +#define MEDIA_TYPE_100M_HALF 2 +#define MEDIA_TYPE_10M_FULL 3 +#define MEDIA_TYPE_10M_HALF 4 + + u16 autoneg_advertised; +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define ADVERTISE_1000_FULL 0x0020 + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg; + + u16 imt; /* Interrupt Moderator timer ( 2us resolution) */ + u16 ict; /* Interrupt Clear timer (2us resolution) */ + u32 smb_timer; + u16 rrd_thresh; /* Threshold of number of RRD produced to trigger + interrupt request */ + u16 tpd_thresh; + u16 rx_count_down; /* 2us resolution */ + u16 tx_count_down; + + u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned burst. */ + enum atl1e_rrs_type rrs_type; + u32 base_cpu; + u32 indirect_tab; + + enum atl1e_dma_req_block dmar_block; + enum atl1e_dma_req_block dmaw_block; + u8 dmaw_dly_cnt; + u8 dmar_dly_cnt; + + bool phy_configured; + bool re_autoneg; + bool emi_ca; +}; + +/* + * wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct atl1e_tx_buffer { + struct sk_buff *skb; + u16 flags; +#define ATL1E_TX_PCIMAP_SINGLE 0x0001 +#define ATL1E_TX_PCIMAP_PAGE 0x0002 +#define ATL1E_TX_PCIMAP_TYPE_MASK 0x0003 + u16 length; + dma_addr_t dma; +}; + +#define ATL1E_SET_PCIMAP_TYPE(tx_buff, type) do { \ + ((tx_buff)->flags) &= ~ATL1E_TX_PCIMAP_TYPE_MASK; \ + ((tx_buff)->flags) |= (type); \ + } while (0) + +struct atl1e_rx_page { + dma_addr_t dma; /* receive rage DMA address */ + u8 *addr; /* receive rage virtual address */ + dma_addr_t write_offset_dma; /* the DMA address which contain the + receive data offset in the page */ + u32 *write_offset_addr; /* the virtaul address which contain + the receive data offset in the page */ + u32 read_offset; /* the offset where we have read */ +}; + +struct atl1e_rx_page_desc { + struct atl1e_rx_page rx_page[AT_PAGE_NUM_PER_QUEUE]; + u8 rx_using; + u16 rx_nxseq; +}; + +/* transmit packet descriptor (tpd) ring */ +struct atl1e_tx_ring { + struct atl1e_tpd_desc *desc; /* descriptor ring virtual address */ + dma_addr_t dma; /* descriptor ring physical address */ + u16 count; /* the count of transmit rings */ + rwlock_t tx_lock; + u16 next_to_use; + atomic_t next_to_clean; + struct atl1e_tx_buffer *tx_buffer; + dma_addr_t cmb_dma; + u32 *cmb; +}; + +/* receive packet descriptor ring */ +struct atl1e_rx_ring { + void *desc; + dma_addr_t dma; + int size; + u32 page_size; /* bytes length of rxf page */ + u32 real_page_size; /* real_page_size = page_size + jumbo + aliagn */ + struct atl1e_rx_page_desc rx_page_desc[AT_MAX_RECEIVE_QUEUE]; +}; + +/* board specific private data structure */ +struct atl1e_adapter { + struct net_device *netdev; + struct pci_dev *pdev; + struct vlan_group *vlgrp; + struct napi_struct napi; + struct mii_if_info mii; /* MII interface info */ + struct atl1e_hw hw; + struct atl1e_hw_stats hw_stats; + + bool have_msi; + u32 wol; + u16 link_speed; + u16 link_duplex; + + spinlock_t mdio_lock; + spinlock_t tx_lock; + atomic_t irq_sem; + + struct work_struct reset_task; + struct work_struct link_chg_task; + struct timer_list watchdog_timer; + struct timer_list phy_config_timer; + + /* All Descriptor memory */ + dma_addr_t ring_dma; + void *ring_vir_addr; + u32 ring_size; + + struct atl1e_tx_ring tx_ring; + struct atl1e_rx_ring rx_ring; + int num_rx_queues; + unsigned long flags; +#define __AT_TESTING 0x0001 +#define __AT_RESETTING 0x0002 +#define __AT_DOWN 0x0003 + + u32 bd_number; /* board number;*/ + u32 pci_state[16]; + u32 *config_space; +}; + +#define AT_WRITE_REG(a, reg, value) ( \ + writel((value), ((a)->hw_addr + reg))) + +#define AT_WRITE_FLUSH(a) (\ + readl((a)->hw_addr)) + +#define AT_READ_REG(a, reg) ( \ + readl((a)->hw_addr + reg)) + +#define AT_WRITE_REGB(a, reg, value) (\ + writeb((value), ((a)->hw_addr + reg))) + +#define AT_READ_REGB(a, reg) (\ + readb((a)->hw_addr + reg)) + +#define AT_WRITE_REGW(a, reg, value) (\ + writew((value), ((a)->hw_addr + reg))) + +#define AT_READ_REGW(a, reg) (\ + readw((a)->hw_addr + reg)) + +#define AT_WRITE_REG_ARRAY(a, reg, offset, value) ( \ + writel((value), (((a)->hw_addr + reg) + ((offset) << 2)))) + +#define AT_READ_REG_ARRAY(a, reg, offset) ( \ + readl(((a)->hw_addr + reg) + ((offset) << 2))) + +extern char atl1e_driver_name[]; +extern char atl1e_driver_version[]; + +extern void atl1e_check_options(struct atl1e_adapter *adapter); +extern int atl1e_up(struct atl1e_adapter *adapter); +extern void atl1e_down(struct atl1e_adapter *adapter); +extern void atl1e_reinit_locked(struct atl1e_adapter *adapter); +extern s32 atl1e_reset_hw(struct atl1e_hw *hw); +extern void atl1e_set_ethtool_ops(struct net_device *netdev); +#endif /* _ATL1_E_H_ */ diff --git a/drivers/net/atl1e/atl1e_ethtool.c b/drivers/net/atl1e/atl1e_ethtool.c new file mode 100644 index 0000000..a76006c --- /dev/null +++ b/drivers/net/atl1e/atl1e_ethtool.c @@ -0,0 +1,394 @@ +/* + * Copyright(c) 2007 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#include +#include + +#include "atl1e.h" + +static int atl1e_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct atl1e_hw *hw = &adapter->hw; + + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_TP); + if (hw->nic_type == athr_l1e) + ecmd->supported |= SUPPORTED_1000baseT_Full; + + ecmd->advertising = ADVERTISED_TP; + + ecmd->advertising |= ADVERTISED_Autoneg; + ecmd->advertising |= hw->autoneg_advertised; + + ecmd->port = PORT_TP; + ecmd->phy_address = 0; + ecmd->transceiver = XCVR_INTERNAL; + + if (adapter->link_speed != SPEED_0) { + ecmd->speed = adapter->link_speed; + if (adapter->link_duplex == FULL_DUPLEX) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + } else { + ecmd->speed = -1; + ecmd->duplex = -1; + } + + ecmd->autoneg = AUTONEG_ENABLE; + return 0; +} + +static int atl1e_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct atl1e_hw *hw = &adapter->hw; + + while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) + msleep(1); + + if (ecmd->autoneg == AUTONEG_ENABLE) { + u16 adv4, adv9; + + if ((ecmd->advertising&ADVERTISE_1000_FULL)) { + if (hw->nic_type == athr_l1e) { + hw->autoneg_advertised = + ecmd->advertising & AT_ADV_MASK; + } else { + clear_bit(__AT_RESETTING, &adapter->flags); + return -EINVAL; + } + } else if (ecmd->advertising&ADVERTISE_1000_HALF) { + clear_bit(__AT_RESETTING, &adapter->flags); + return -EINVAL; + } else { + hw->autoneg_advertised = + ecmd->advertising & AT_ADV_MASK; + } + ecmd->advertising = hw->autoneg_advertised | + ADVERTISED_TP | ADVERTISED_Autoneg; + + adv4 = hw->mii_autoneg_adv_reg & ~MII_AR_SPEED_MASK; + adv9 = hw->mii_1000t_ctrl_reg & ~MII_AT001_CR_1000T_SPEED_MASK; + if (hw->autoneg_advertised & ADVERTISE_10_HALF) + adv4 |= MII_AR_10T_HD_CAPS; + if (hw->autoneg_advertised & ADVERTISE_10_FULL) + adv4 |= MII_AR_10T_FD_CAPS; + if (hw->autoneg_advertised & ADVERTISE_100_HALF) + adv4 |= MII_AR_100TX_HD_CAPS; + if (hw->autoneg_advertised & ADVERTISE_100_FULL) + adv4 |= MII_AR_100TX_FD_CAPS; + if (hw->autoneg_advertised & ADVERTISE_1000_FULL) + adv9 |= MII_AT001_CR_1000T_FD_CAPS; + + if (adv4 != hw->mii_autoneg_adv_reg || + adv9 != hw->mii_1000t_ctrl_reg) { + hw->mii_autoneg_adv_reg = adv4; + hw->mii_1000t_ctrl_reg = adv9; + hw->re_autoneg = true; + } + + } else { + clear_bit(__AT_RESETTING, &adapter->flags); + return -EINVAL; + } + + /* reset the link */ + + if (netif_running(adapter->netdev)) { + atl1e_down(adapter); + atl1e_up(adapter); + } else + atl1e_reset_hw(&adapter->hw); + + clear_bit(__AT_RESETTING, &adapter->flags); + return 0; +} + +static u32 atl1e_get_msglevel(struct net_device *netdev) +{ +#ifdef DBG + return 1; +#else + return 0; +#endif +} + +static int atl1e_get_regs_len(struct net_device *netdev) +{ + return AT_REGS_LEN * sizeof(u32); +} + +static void atl1e_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct atl1e_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u16 phy_data; + + memset(p, 0, AT_REGS_LEN * sizeof(u32)); + + regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; + + regs_buff[0] = AT_READ_REG(hw, REG_VPD_CAP); + regs_buff[1] = AT_READ_REG(hw, REG_SPI_FLASH_CTRL); + regs_buff[2] = AT_READ_REG(hw, REG_SPI_FLASH_CONFIG); + regs_buff[3] = AT_READ_REG(hw, REG_TWSI_CTRL); + regs_buff[4] = AT_READ_REG(hw, REG_PCIE_DEV_MISC_CTRL); + regs_buff[5] = AT_READ_REG(hw, REG_MASTER_CTRL); + regs_buff[6] = AT_READ_REG(hw, REG_MANUAL_TIMER_INIT); + regs_buff[7] = AT_READ_REG(hw, REG_IRQ_MODU_TIMER_INIT); + regs_buff[8] = AT_READ_REG(hw, REG_GPHY_CTRL); + regs_buff[9] = AT_READ_REG(hw, REG_CMBDISDMA_TIMER); + regs_buff[10] = AT_READ_REG(hw, REG_IDLE_STATUS); + regs_buff[11] = AT_READ_REG(hw, REG_MDIO_CTRL); + regs_buff[12] = AT_READ_REG(hw, REG_SERDES_LOCK); + regs_buff[13] = AT_READ_REG(hw, REG_MAC_CTRL); + regs_buff[14] = AT_READ_REG(hw, REG_MAC_IPG_IFG); + regs_buff[15] = AT_READ_REG(hw, REG_MAC_STA_ADDR); + regs_buff[16] = AT_READ_REG(hw, REG_MAC_STA_ADDR+4); + regs_buff[17] = AT_READ_REG(hw, REG_RX_HASH_TABLE); + regs_buff[18] = AT_READ_REG(hw, REG_RX_HASH_TABLE+4); + regs_buff[19] = AT_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL); + regs_buff[20] = AT_READ_REG(hw, REG_MTU); + regs_buff[21] = AT_READ_REG(hw, REG_WOL_CTRL); + regs_buff[22] = AT_READ_REG(hw, REG_SRAM_TRD_ADDR); + regs_buff[23] = AT_READ_REG(hw, REG_SRAM_TRD_LEN); + regs_buff[24] = AT_READ_REG(hw, REG_SRAM_RXF_ADDR); + regs_buff[25] = AT_READ_REG(hw, REG_SRAM_RXF_LEN); + regs_buff[26] = AT_READ_REG(hw, REG_SRAM_TXF_ADDR); + regs_buff[27] = AT_READ_REG(hw, REG_SRAM_TXF_LEN); + regs_buff[28] = AT_READ_REG(hw, REG_SRAM_TCPH_ADDR); + regs_buff[29] = AT_READ_REG(hw, REG_SRAM_PKTH_ADDR); + + atl1e_read_phy_reg(hw, MII_BMCR, &phy_data); + regs_buff[73] = (u32)phy_data; + atl1e_read_phy_reg(hw, MII_BMSR, &phy_data); + regs_buff[74] = (u32)phy_data; +} + +static int atl1e_get_eeprom_len(struct net_device *netdev) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + + if (!atl1e_check_eeprom_exist(&adapter->hw)) + return AT_EEPROM_LEN; + else + return 0; +} + +static int atl1e_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct atl1e_hw *hw = &adapter->hw; + u32 *eeprom_buff; + int first_dword, last_dword; + int ret_val = 0; + int i; + + if (eeprom->len == 0) + return -EINVAL; + + if (atl1e_check_eeprom_exist(hw)) /* not exist */ + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_dword = eeprom->offset >> 2; + last_dword = (eeprom->offset + eeprom->len - 1) >> 2; + + eeprom_buff = kmalloc(sizeof(u32) * + (last_dword - first_dword + 1), GFP_KERNEL); + if (eeprom_buff == NULL) + return -ENOMEM; + + for (i = first_dword; i < last_dword; i++) { + if (!atl1e_read_eeprom(hw, i * 4, &(eeprom_buff[i-first_dword]))) { + kfree(eeprom_buff); + return -EIO; + } + } + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3), + eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int atl1e_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct atl1e_hw *hw = &adapter->hw; + u32 *eeprom_buff; + u32 *ptr; + int first_dword, last_dword; + int ret_val = 0; + int i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EINVAL; + + first_dword = eeprom->offset >> 2; + last_dword = (eeprom->offset + eeprom->len - 1) >> 2; + eeprom_buff = kmalloc(AT_EEPROM_LEN, GFP_KERNEL); + if (eeprom_buff == NULL) + return -ENOMEM; + + ptr = (u32 *)eeprom_buff; + + if (eeprom->offset & 3) { + /* need read/modify/write of first changed EEPROM word */ + /* only the second byte of the word is being modified */ + if (!atl1e_read_eeprom(hw, first_dword * 4, &(eeprom_buff[0]))) { + ret_val = -EIO; + goto out; + } + ptr++; + } + if (((eeprom->offset + eeprom->len) & 3)) { + /* need read/modify/write of last changed EEPROM word */ + /* only the first byte of the word is being modified */ + + if (!atl1e_read_eeprom(hw, last_dword * 4, + &(eeprom_buff[last_dword - first_dword]))) { + ret_val = -EIO; + goto out; + } + } + + /* Device's eeprom is always little-endian, word addressable */ + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_dword - first_dword + 1; i++) { + if (!atl1e_write_eeprom(hw, ((first_dword + i) * 4), + eeprom_buff[i])) { + ret_val = -EIO; + goto out; + } + } +out: + kfree(eeprom_buff); + return ret_val; +} + +static void atl1e_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + + strncpy(drvinfo->driver, atl1e_driver_name, 32); + strncpy(drvinfo->version, atl1e_driver_version, 32); + strncpy(drvinfo->fw_version, "L1e", 32); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); + drvinfo->n_stats = 0; + drvinfo->testinfo_len = 0; + drvinfo->regdump_len = atl1e_get_regs_len(netdev); + drvinfo->eedump_len = atl1e_get_eeprom_len(netdev); +} + +static void atl1e_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + + wol->supported = WAKE_MAGIC | WAKE_PHY; + wol->wolopts = 0; + + if (adapter->wol & AT_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & AT_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & AT_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & AT_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; + if (adapter->wol & AT_WUFC_LNKC) + wol->wolopts |= WAKE_PHY; + + return; +} + +static int atl1e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | + WAKE_UCAST | WAKE_MCAST | WAKE_BCAST)) + return -EOPNOTSUPP; + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= AT_WUFC_MAG; + if (wol->wolopts & WAKE_PHY) + adapter->wol |= AT_WUFC_LNKC; + + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static int atl1e_nway_reset(struct net_device *netdev) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + if (netif_running(netdev)) + atl1e_reinit_locked(adapter); + return 0; +} + +static const struct ethtool_ops atl1e_ethtool_ops = { + .get_settings = atl1e_get_settings, + .set_settings = atl1e_set_settings, + .get_drvinfo = atl1e_get_drvinfo, + .get_regs_len = atl1e_get_regs_len, + .get_regs = atl1e_get_regs, + .get_wol = atl1e_get_wol, + .set_wol = atl1e_set_wol, + .get_msglevel = atl1e_get_msglevel, + .nway_reset = atl1e_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = atl1e_get_eeprom_len, + .get_eeprom = atl1e_get_eeprom, + .set_eeprom = atl1e_set_eeprom, + .set_tx_csum = ethtool_op_set_tx_hw_csum, + .set_sg = ethtool_op_set_sg, + .set_tso = ethtool_op_set_tso, +}; + +void atl1e_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &atl1e_ethtool_ops); +} diff --git a/drivers/net/atl1e/atl1e_hw.c b/drivers/net/atl1e/atl1e_hw.c new file mode 100644 index 0000000..76cc043 --- /dev/null +++ b/drivers/net/atl1e/atl1e_hw.c @@ -0,0 +1,659 @@ +/* + * Copyright(c) 2007 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include +#include +#include +#include + +#include "atl1e.h" + +/* + * check_eeprom_exist + * return 0 if eeprom exist + */ +int atl1e_check_eeprom_exist(struct atl1e_hw *hw) +{ + u32 value; + + value = AT_READ_REG(hw, REG_SPI_FLASH_CTRL); + if (value & SPI_FLASH_CTRL_EN_VPD) { + value &= ~SPI_FLASH_CTRL_EN_VPD; + AT_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value); + } + value = AT_READ_REGW(hw, REG_PCIE_CAP_LIST); + return ((value & 0xFF00) == 0x6C00) ? 0 : 1; +} + +void atl1e_hw_set_mac_addr(struct atl1e_hw *hw) +{ + u32 value; + /* + * 00-0B-6A-F6-00-DC + * 0: 6AF600DC 1: 000B + * low dword + */ + value = (((u32)hw->mac_addr[2]) << 24) | + (((u32)hw->mac_addr[3]) << 16) | + (((u32)hw->mac_addr[4]) << 8) | + (((u32)hw->mac_addr[5])) ; + AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value); + /* hight dword */ + value = (((u32)hw->mac_addr[0]) << 8) | + (((u32)hw->mac_addr[1])) ; + AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value); +} + +/* + * atl1e_get_permanent_address + * return 0 if get valid mac address, + */ +static int atl1e_get_permanent_address(struct atl1e_hw *hw) +{ + u32 addr[2]; + u32 i; + u32 twsi_ctrl_data; + u8 eth_addr[ETH_ALEN]; + + if (is_valid_ether_addr(hw->perm_mac_addr)) + return 0; + + /* init */ + addr[0] = addr[1] = 0; + + if (!atl1e_check_eeprom_exist(hw)) { + /* eeprom exist */ + twsi_ctrl_data = AT_READ_REG(hw, REG_TWSI_CTRL); + twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART; + AT_WRITE_REG(hw, REG_TWSI_CTRL, twsi_ctrl_data); + for (i = 0; i < AT_TWSI_EEPROM_TIMEOUT; i++) { + msleep(10); + twsi_ctrl_data = AT_READ_REG(hw, REG_TWSI_CTRL); + if ((twsi_ctrl_data & TWSI_CTRL_SW_LDSTART) == 0) + break; + } + if (i >= AT_TWSI_EEPROM_TIMEOUT) + return AT_ERR_TIMEOUT; + } + + /* maybe MAC-address is from BIOS */ + addr[0] = AT_READ_REG(hw, REG_MAC_STA_ADDR); + addr[1] = AT_READ_REG(hw, REG_MAC_STA_ADDR + 4); + *(u32 *) ð_addr[2] = swab32(addr[0]); + *(u16 *) ð_addr[0] = swab16(*(u16 *)&addr[1]); + + if (is_valid_ether_addr(eth_addr)) { + memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); + return 0; + } + + return AT_ERR_EEPROM; +} + +bool atl1e_write_eeprom(struct atl1e_hw *hw, u32 offset, u32 value) +{ + return true; +} + +bool atl1e_read_eeprom(struct atl1e_hw *hw, u32 offset, u32 *p_value) +{ + int i; + u32 control; + + if (offset & 3) + return false; /* address do not align */ + + AT_WRITE_REG(hw, REG_VPD_DATA, 0); + control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT; + AT_WRITE_REG(hw, REG_VPD_CAP, control); + + for (i = 0; i < 10; i++) { + msleep(2); + control = AT_READ_REG(hw, REG_VPD_CAP); + if (control & VPD_CAP_VPD_FLAG) + break; + } + if (control & VPD_CAP_VPD_FLAG) { + *p_value = AT_READ_REG(hw, REG_VPD_DATA); + return true; + } + return false; /* timeout */ +} + +void atl1e_force_ps(struct atl1e_hw *hw) +{ + AT_WRITE_REGW(hw, REG_GPHY_CTRL, + GPHY_CTRL_PW_WOL_DIS | GPHY_CTRL_EXT_RESET); +} + +/* + * Reads the adapter's MAC address from the EEPROM + * + * hw - Struct containing variables accessed by shared code + */ +int atl1e_read_mac_addr(struct atl1e_hw *hw) +{ + int err = 0; + + err = atl1e_get_permanent_address(hw); + if (err) + return AT_ERR_EEPROM; + memcpy(hw->mac_addr, hw->perm_mac_addr, sizeof(hw->perm_mac_addr)); + return 0; +} + +/* + * atl1e_hash_mc_addr + * purpose + * set hash value for a multicast address + */ +u32 atl1e_hash_mc_addr(struct atl1e_hw *hw, u8 *mc_addr) +{ + u32 crc32; + u32 value = 0; + int i; + + crc32 = ether_crc_le(6, mc_addr); + for (i = 0; i < 32; i++) + value |= (((crc32 >> i) & 1) << (31 - i)); + + return value; +} + +/* + * Sets the bit in the multicast table corresponding to the hash value. + * hw - Struct containing variables accessed by shared code + * hash_value - Multicast address hash value + */ +void atl1e_hash_set(struct atl1e_hw *hw, u32 hash_value) +{ + u32 hash_bit, hash_reg; + u32 mta; + + /* + * The HASH Table is a register array of 2 32-bit registers. + * It is treated like an array of 64 bits. We want to set + * bit BitArray[hash_value]. So we figure out what register + * the bit is in, read it, OR in the new bit, then write + * back the new value. The register is determined by the + * upper 7 bits of the hash value and the bit within that + * register are determined by the lower 5 bits of the value. + */ + hash_reg = (hash_value >> 31) & 0x1; + hash_bit = (hash_value >> 26) & 0x1F; + + mta = AT_READ_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg); + + mta |= (1 << hash_bit); + + AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg, mta); +} +/* + * Reads the value from a PHY register + * hw - Struct containing variables accessed by shared code + * reg_addr - address of the PHY register to read + */ +int atl1e_read_phy_reg(struct atl1e_hw *hw, u16 reg_addr, u16 *phy_data) +{ + u32 val; + int i; + + val = ((u32)(reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT | + MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | + MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; + + AT_WRITE_REG(hw, REG_MDIO_CTRL, val); + + wmb(); + + for (i = 0; i < MDIO_WAIT_TIMES; i++) { + udelay(2); + val = AT_READ_REG(hw, REG_MDIO_CTRL); + if (!(val & (MDIO_START | MDIO_BUSY))) + break; + wmb(); + } + if (!(val & (MDIO_START | MDIO_BUSY))) { + *phy_data = (u16)val; + return 0; + } + + return AT_ERR_PHY; +} + +/* + * Writes a value to a PHY register + * hw - Struct containing variables accessed by shared code + * reg_addr - address of the PHY register to write + * data - data to write to the PHY + */ +int atl1e_write_phy_reg(struct atl1e_hw *hw, u32 reg_addr, u16 phy_data) +{ + int i; + u32 val; + + val = ((u32)(phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT | + (reg_addr&MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT | + MDIO_SUP_PREAMBLE | + MDIO_START | + MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; + + AT_WRITE_REG(hw, REG_MDIO_CTRL, val); + wmb(); + + for (i = 0; i < MDIO_WAIT_TIMES; i++) { + udelay(2); + val = AT_READ_REG(hw, REG_MDIO_CTRL); + if (!(val & (MDIO_START | MDIO_BUSY))) + break; + wmb(); + } + + if (!(val & (MDIO_START | MDIO_BUSY))) + return 0; + + return AT_ERR_PHY; +} + +/* + * atl1e_init_pcie - init PCIE module + */ +static void atl1e_init_pcie(struct atl1e_hw *hw) +{ + u32 value; + /* comment 2lines below to save more power when sususpend + value = LTSSM_TEST_MODE_DEF; + AT_WRITE_REG(hw, REG_LTSSM_TEST_MODE, value); + */ + + /* pcie flow control mode change */ + value = AT_READ_REG(hw, 0x1008); + value |= 0x8000; + AT_WRITE_REG(hw, 0x1008, value); +} +/* + * Configures PHY autoneg and flow control advertisement settings + * + * hw - Struct containing variables accessed by shared code + */ +static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw) +{ + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg; + + if (0 != hw->mii_autoneg_adv_reg) + return 0; + /* Read the MII Auto-Neg Advertisement Register (Address 4/9). */ + mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK; + mii_1000t_ctrl_reg = MII_AT001_CR_1000T_DEFAULT_CAP_MASK; + + /* + * Need to parse autoneg_advertised and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* + * First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK; + mii_1000t_ctrl_reg &= ~MII_AT001_CR_1000T_SPEED_MASK; + + /* + * Need to parse MediaType and setup the + * appropriate PHY registers. + */ + switch (hw->media_type) { + case MEDIA_TYPE_AUTO_SENSOR: + mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS | + MII_AR_10T_FD_CAPS | + MII_AR_100TX_HD_CAPS | + MII_AR_100TX_FD_CAPS); + hw->autoneg_advertised = ADVERTISE_10_HALF | + ADVERTISE_10_FULL | + ADVERTISE_100_HALF | + ADVERTISE_100_FULL; + if (hw->nic_type == athr_l1e) { + mii_1000t_ctrl_reg |= + MII_AT001_CR_1000T_FD_CAPS; + hw->autoneg_advertised |= ADVERTISE_1000_FULL; + } + break; + + case MEDIA_TYPE_100M_FULL: + mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS; + hw->autoneg_advertised = ADVERTISE_100_FULL; + break; + + case MEDIA_TYPE_100M_HALF: + mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS; + hw->autoneg_advertised = ADVERTISE_100_HALF; + break; + + case MEDIA_TYPE_10M_FULL: + mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS; + hw->autoneg_advertised = ADVERTISE_10_FULL; + break; + + default: + mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS; + hw->autoneg_advertised = ADVERTISE_10_HALF; + break; + } + + /* flow control fixed to enable all */ + mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE); + + hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg; + hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg; + + ret_val = atl1e_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) { + ret_val = atl1e_write_phy_reg(hw, MII_AT001_CR, + mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + return 0; +} + + +/* + * Resets the PHY and make all config validate + * + * hw - Struct containing variables accessed by shared code + * + * Sets bit 15 and 12 of the MII control regiser (for F001 bug) + */ +int atl1e_phy_commit(struct atl1e_hw *hw) +{ + struct atl1e_adapter *adapter = hw->adapter; + int ret_val; + u16 phy_data; + + phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG; + + ret_val = atl1e_write_phy_reg(hw, MII_BMCR, phy_data); + if (ret_val) { + u32 val; + int i; + /************************************** + * pcie serdes link may be down ! + **************************************/ + for (i = 0; i < 25; i++) { + msleep(1); + val = AT_READ_REG(hw, REG_MDIO_CTRL); + if (!(val & (MDIO_START | MDIO_BUSY))) + break; + } + + if (0 != (val & (MDIO_START | MDIO_BUSY))) { + netdev_err(adapter->netdev, + "pcie linkdown at least for 25ms\n"); + return ret_val; + } + + netdev_err(adapter->netdev, "pcie linkup after %d ms\n", i); + } + return 0; +} + +int atl1e_phy_init(struct atl1e_hw *hw) +{ + struct atl1e_adapter *adapter = hw->adapter; + s32 ret_val; + u16 phy_val; + + if (hw->phy_configured) { + if (hw->re_autoneg) { + hw->re_autoneg = false; + return atl1e_restart_autoneg(hw); + } + return 0; + } + + /* RESET GPHY Core */ + AT_WRITE_REGW(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT); + msleep(2); + AT_WRITE_REGW(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT | + GPHY_CTRL_EXT_RESET); + msleep(2); + + /* patches */ + /* p1. eable hibernation mode */ + ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0xB); + if (ret_val) + return ret_val; + ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0xBC00); + if (ret_val) + return ret_val; + /* p2. set Class A/B for all modes */ + ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0); + if (ret_val) + return ret_val; + phy_val = 0x02ef; + /* remove Class AB */ + /* phy_val = hw->emi_ca ? 0x02ef : 0x02df; */ + ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, phy_val); + if (ret_val) + return ret_val; + /* p3. 10B ??? */ + ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x12); + if (ret_val) + return ret_val; + ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x4C04); + if (ret_val) + return ret_val; + /* p4. 1000T power */ + ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x4); + if (ret_val) + return ret_val; + ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x8BBB); + if (ret_val) + return ret_val; + + ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x5); + if (ret_val) + return ret_val; + ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x2C46); + if (ret_val) + return ret_val; + + msleep(1); + + /*Enable PHY LinkChange Interrupt */ + ret_val = atl1e_write_phy_reg(hw, MII_INT_CTRL, 0xC00); + if (ret_val) { + netdev_err(adapter->netdev, + "Error enable PHY linkChange Interrupt\n"); + return ret_val; + } + /* setup AutoNeg parameters */ + ret_val = atl1e_phy_setup_autoneg_adv(hw); + if (ret_val) { + netdev_err(adapter->netdev, + "Error Setting up Auto-Negotiation\n"); + return ret_val; + } + /* SW.Reset & En-Auto-Neg to restart Auto-Neg*/ + netdev_dbg(adapter->netdev, "Restarting Auto-Negotiation\n"); + ret_val = atl1e_phy_commit(hw); + if (ret_val) { + netdev_err(adapter->netdev, "Error resetting the phy\n"); + return ret_val; + } + + hw->phy_configured = true; + + return 0; +} + +/* + * Reset the transmit and receive units; mask and clear all interrupts. + * hw - Struct containing variables accessed by shared code + * return : 0 or idle status (if error) + */ +int atl1e_reset_hw(struct atl1e_hw *hw) +{ + struct atl1e_adapter *adapter = hw->adapter; + struct pci_dev *pdev = adapter->pdev; + + u32 idle_status_data = 0; + u16 pci_cfg_cmd_word = 0; + int timeout = 0; + + /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */ + pci_read_config_word(pdev, PCI_REG_COMMAND, &pci_cfg_cmd_word); + if ((pci_cfg_cmd_word & (CMD_IO_SPACE | + CMD_MEMORY_SPACE | CMD_BUS_MASTER)) + != (CMD_IO_SPACE | CMD_MEMORY_SPACE | CMD_BUS_MASTER)) { + pci_cfg_cmd_word |= (CMD_IO_SPACE | + CMD_MEMORY_SPACE | CMD_BUS_MASTER); + pci_write_config_word(pdev, PCI_REG_COMMAND, pci_cfg_cmd_word); + } + + /* + * Issue Soft Reset to the MAC. This will reset the chip's + * transmit, receive, DMA. It will not effect + * the current PCI configuration. The global reset bit is self- + * clearing, and should clear within a microsecond. + */ + AT_WRITE_REG(hw, REG_MASTER_CTRL, + MASTER_CTRL_LED_MODE | MASTER_CTRL_SOFT_RST); + wmb(); + msleep(1); + + /* Wait at least 10ms for All module to be Idle */ + for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) { + idle_status_data = AT_READ_REG(hw, REG_IDLE_STATUS); + if (idle_status_data == 0) + break; + msleep(1); + cpu_relax(); + } + + if (timeout >= AT_HW_MAX_IDLE_DELAY) { + netdev_err(adapter->netdev, + "MAC state machine can't be idle since disabled for 10ms second\n"); + return AT_ERR_TIMEOUT; + } + + return 0; +} + + +/* + * Performs basic configuration of the adapter. + * + * hw - Struct containing variables accessed by shared code + * Assumes that the controller has previously been reset and is in a + * post-reset uninitialized state. Initializes multicast table, + * and Calls routines to setup link + * Leaves the transmit and receive units disabled and uninitialized. + */ +int atl1e_init_hw(struct atl1e_hw *hw) +{ + s32 ret_val = 0; + + atl1e_init_pcie(hw); + + /* Zero out the Multicast HASH table */ + /* clear the old settings from the multicast hash table */ + AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0); + AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); + + ret_val = atl1e_phy_init(hw); + + return ret_val; +} + +/* + * Detects the current speed and duplex settings of the hardware. + * + * hw - Struct containing variables accessed by shared code + * speed - Speed of the connection + * duplex - Duplex setting of the connection + */ +int atl1e_get_speed_and_duplex(struct atl1e_hw *hw, u16 *speed, u16 *duplex) +{ + int err; + u16 phy_data; + + /* Read PHY Specific Status Register (17) */ + err = atl1e_read_phy_reg(hw, MII_AT001_PSSR, &phy_data); + if (err) + return err; + + if (!(phy_data & MII_AT001_PSSR_SPD_DPLX_RESOLVED)) + return AT_ERR_PHY_RES; + + switch (phy_data & MII_AT001_PSSR_SPEED) { + case MII_AT001_PSSR_1000MBS: + *speed = SPEED_1000; + break; + case MII_AT001_PSSR_100MBS: + *speed = SPEED_100; + break; + case MII_AT001_PSSR_10MBS: + *speed = SPEED_10; + break; + default: + return AT_ERR_PHY_SPEED; + break; + } + + if (phy_data & MII_AT001_PSSR_DPLX) + *duplex = FULL_DUPLEX; + else + *duplex = HALF_DUPLEX; + + return 0; +} + +int atl1e_restart_autoneg(struct atl1e_hw *hw) +{ + int err = 0; + + err = atl1e_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg); + if (err) + return err; + + if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) { + err = atl1e_write_phy_reg(hw, MII_AT001_CR, + hw->mii_1000t_ctrl_reg); + if (err) + return err; + } + + err = atl1e_write_phy_reg(hw, MII_BMCR, + MII_CR_RESET | MII_CR_AUTO_NEG_EN | + MII_CR_RESTART_AUTO_NEG); + return err; +} + diff --git a/drivers/net/atl1e/atl1e_hw.h b/drivers/net/atl1e/atl1e_hw.h new file mode 100644 index 0000000..5ea2f4d --- /dev/null +++ b/drivers/net/atl1e/atl1e_hw.h @@ -0,0 +1,793 @@ +/* + * Copyright(c) 2007 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _ATHL1E_HW_H_ +#define _ATHL1E_HW_H_ + +#include +#include + +struct atl1e_adapter; +struct atl1e_hw; + +/* function prototype */ +s32 atl1e_reset_hw(struct atl1e_hw *hw); +s32 atl1e_read_mac_addr(struct atl1e_hw *hw); +s32 atl1e_init_hw(struct atl1e_hw *hw); +s32 atl1e_phy_commit(struct atl1e_hw *hw); +s32 atl1e_get_speed_and_duplex(struct atl1e_hw *hw, u16 *speed, u16 *duplex); +u32 atl1e_auto_get_fc(struct atl1e_adapter *adapter, u16 duplex); +u32 atl1e_hash_mc_addr(struct atl1e_hw *hw, u8 *mc_addr); +void atl1e_hash_set(struct atl1e_hw *hw, u32 hash_value); +s32 atl1e_read_phy_reg(struct atl1e_hw *hw, u16 reg_addr, u16 *phy_data); +s32 atl1e_write_phy_reg(struct atl1e_hw *hw, u32 reg_addr, u16 phy_data); +s32 atl1e_validate_mdi_setting(struct atl1e_hw *hw); +void atl1e_hw_set_mac_addr(struct atl1e_hw *hw); +bool atl1e_read_eeprom(struct atl1e_hw *hw, u32 offset, u32 *p_value); +bool atl1e_write_eeprom(struct atl1e_hw *hw, u32 offset, u32 value); +s32 atl1e_phy_enter_power_saving(struct atl1e_hw *hw); +s32 atl1e_phy_leave_power_saving(struct atl1e_hw *hw); +s32 atl1e_phy_init(struct atl1e_hw *hw); +int atl1e_check_eeprom_exist(struct atl1e_hw *hw); +void atl1e_force_ps(struct atl1e_hw *hw); +s32 atl1e_restart_autoneg(struct atl1e_hw *hw); + +/* register definition */ +#define REG_PM_CTRLSTAT 0x44 + +#define REG_PCIE_CAP_LIST 0x58 + +#define REG_DEVICE_CAP 0x5C +#define DEVICE_CAP_MAX_PAYLOAD_MASK 0x7 +#define DEVICE_CAP_MAX_PAYLOAD_SHIFT 0 + +#define REG_DEVICE_CTRL 0x60 +#define DEVICE_CTRL_MAX_PAYLOAD_MASK 0x7 +#define DEVICE_CTRL_MAX_PAYLOAD_SHIFT 5 +#define DEVICE_CTRL_MAX_RREQ_SZ_MASK 0x7 +#define DEVICE_CTRL_MAX_RREQ_SZ_SHIFT 12 + +#define REG_VPD_CAP 0x6C +#define VPD_CAP_ID_MASK 0xff +#define VPD_CAP_ID_SHIFT 0 +#define VPD_CAP_NEXT_PTR_MASK 0xFF +#define VPD_CAP_NEXT_PTR_SHIFT 8 +#define VPD_CAP_VPD_ADDR_MASK 0x7FFF +#define VPD_CAP_VPD_ADDR_SHIFT 16 +#define VPD_CAP_VPD_FLAG 0x80000000 + +#define REG_VPD_DATA 0x70 + +#define REG_SPI_FLASH_CTRL 0x200 +#define SPI_FLASH_CTRL_STS_NON_RDY 0x1 +#define SPI_FLASH_CTRL_STS_WEN 0x2 +#define SPI_FLASH_CTRL_STS_WPEN 0x80 +#define SPI_FLASH_CTRL_DEV_STS_MASK 0xFF +#define SPI_FLASH_CTRL_DEV_STS_SHIFT 0 +#define SPI_FLASH_CTRL_INS_MASK 0x7 +#define SPI_FLASH_CTRL_INS_SHIFT 8 +#define SPI_FLASH_CTRL_START 0x800 +#define SPI_FLASH_CTRL_EN_VPD 0x2000 +#define SPI_FLASH_CTRL_LDSTART 0x8000 +#define SPI_FLASH_CTRL_CS_HI_MASK 0x3 +#define SPI_FLASH_CTRL_CS_HI_SHIFT 16 +#define SPI_FLASH_CTRL_CS_HOLD_MASK 0x3 +#define SPI_FLASH_CTRL_CS_HOLD_SHIFT 18 +#define SPI_FLASH_CTRL_CLK_LO_MASK 0x3 +#define SPI_FLASH_CTRL_CLK_LO_SHIFT 20 +#define SPI_FLASH_CTRL_CLK_HI_MASK 0x3 +#define SPI_FLASH_CTRL_CLK_HI_SHIFT 22 +#define SPI_FLASH_CTRL_CS_SETUP_MASK 0x3 +#define SPI_FLASH_CTRL_CS_SETUP_SHIFT 24 +#define SPI_FLASH_CTRL_EROM_PGSZ_MASK 0x3 +#define SPI_FLASH_CTRL_EROM_PGSZ_SHIFT 26 +#define SPI_FLASH_CTRL_WAIT_READY 0x10000000 + +#define REG_SPI_ADDR 0x204 + +#define REG_SPI_DATA 0x208 + +#define REG_SPI_FLASH_CONFIG 0x20C +#define SPI_FLASH_CONFIG_LD_ADDR_MASK 0xFFFFFF +#define SPI_FLASH_CONFIG_LD_ADDR_SHIFT 0 +#define SPI_FLASH_CONFIG_VPD_ADDR_MASK 0x3 +#define SPI_FLASH_CONFIG_VPD_ADDR_SHIFT 24 +#define SPI_FLASH_CONFIG_LD_EXIST 0x4000000 + + +#define REG_SPI_FLASH_OP_PROGRAM 0x210 +#define REG_SPI_FLASH_OP_SC_ERASE 0x211 +#define REG_SPI_FLASH_OP_CHIP_ERASE 0x212 +#define REG_SPI_FLASH_OP_RDID 0x213 +#define REG_SPI_FLASH_OP_WREN 0x214 +#define REG_SPI_FLASH_OP_RDSR 0x215 +#define REG_SPI_FLASH_OP_WRSR 0x216 +#define REG_SPI_FLASH_OP_READ 0x217 + +#define REG_TWSI_CTRL 0x218 +#define TWSI_CTRL_LD_OFFSET_MASK 0xFF +#define TWSI_CTRL_LD_OFFSET_SHIFT 0 +#define TWSI_CTRL_LD_SLV_ADDR_MASK 0x7 +#define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8 +#define TWSI_CTRL_SW_LDSTART 0x800 +#define TWSI_CTRL_HW_LDSTART 0x1000 +#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x0x7F +#define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15 +#define TWSI_CTRL_LD_EXIST 0x400000 +#define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3 +#define TWSI_CTRL_READ_FREQ_SEL_SHIFT 23 +#define TWSI_CTRL_FREQ_SEL_100K 0 +#define TWSI_CTRL_FREQ_SEL_200K 1 +#define TWSI_CTRL_FREQ_SEL_300K 2 +#define TWSI_CTRL_FREQ_SEL_400K 3 +#define TWSI_CTRL_SMB_SLV_ADDR +#define TWSI_CTRL_WRITE_FREQ_SEL_MASK 0x3 +#define TWSI_CTRL_WRITE_FREQ_SEL_SHIFT 24 + + +#define REG_PCIE_DEV_MISC_CTRL 0x21C +#define PCIE_DEV_MISC_CTRL_EXT_PIPE 0x2 +#define PCIE_DEV_MISC_CTRL_RETRY_BUFDIS 0x1 +#define PCIE_DEV_MISC_CTRL_SPIROM_EXIST 0x4 +#define PCIE_DEV_MISC_CTRL_SERDES_ENDIAN 0x8 +#define PCIE_DEV_MISC_CTRL_SERDES_SEL_DIN 0x10 + +#define REG_PCIE_PHYMISC 0x1000 +#define PCIE_PHYMISC_FORCE_RCV_DET 0x4 + +#define REG_LTSSM_TEST_MODE 0x12FC +#define LTSSM_TEST_MODE_DEF 0xE000 + +/* Selene Master Control Register */ +#define REG_MASTER_CTRL 0x1400 +#define MASTER_CTRL_SOFT_RST 0x1 +#define MASTER_CTRL_MTIMER_EN 0x2 +#define MASTER_CTRL_ITIMER_EN 0x4 +#define MASTER_CTRL_MANUAL_INT 0x8 +#define MASTER_CTRL_ITIMER2_EN 0x20 +#define MASTER_CTRL_INT_RDCLR 0x40 +#define MASTER_CTRL_LED_MODE 0x200 +#define MASTER_CTRL_REV_NUM_SHIFT 16 +#define MASTER_CTRL_REV_NUM_MASK 0xff +#define MASTER_CTRL_DEV_ID_SHIFT 24 +#define MASTER_CTRL_DEV_ID_MASK 0xff + +/* Timer Initial Value Register */ +#define REG_MANUAL_TIMER_INIT 0x1404 + + +/* IRQ ModeratorTimer Initial Value Register */ +#define REG_IRQ_MODU_TIMER_INIT 0x1408 /* w */ +#define REG_IRQ_MODU_TIMER2_INIT 0x140A /* w */ + + +#define REG_GPHY_CTRL 0x140C +#define GPHY_CTRL_EXT_RESET 1 +#define GPHY_CTRL_PIPE_MOD 2 +#define GPHY_CTRL_TEST_MODE_MASK 3 +#define GPHY_CTRL_TEST_MODE_SHIFT 2 +#define GPHY_CTRL_BERT_START 0x10 +#define GPHY_CTRL_GATE_25M_EN 0x20 +#define GPHY_CTRL_LPW_EXIT 0x40 +#define GPHY_CTRL_PHY_IDDQ 0x80 +#define GPHY_CTRL_PHY_IDDQ_DIS 0x100 +#define GPHY_CTRL_PCLK_SEL_DIS 0x200 +#define GPHY_CTRL_HIB_EN 0x400 +#define GPHY_CTRL_HIB_PULSE 0x800 +#define GPHY_CTRL_SEL_ANA_RST 0x1000 +#define GPHY_CTRL_PHY_PLL_ON 0x2000 +#define GPHY_CTRL_PWDOWN_HW 0x4000 +#define GPHY_CTRL_DEFAULT (\ + GPHY_CTRL_PHY_PLL_ON |\ + GPHY_CTRL_SEL_ANA_RST |\ + GPHY_CTRL_HIB_PULSE |\ + GPHY_CTRL_HIB_EN) + +#define GPHY_CTRL_PW_WOL_DIS (\ + GPHY_CTRL_PHY_PLL_ON |\ + GPHY_CTRL_SEL_ANA_RST |\ + GPHY_CTRL_HIB_PULSE |\ + GPHY_CTRL_HIB_EN |\ + GPHY_CTRL_PWDOWN_HW |\ + GPHY_CTRL_PCLK_SEL_DIS |\ + GPHY_CTRL_PHY_IDDQ) + +/* IRQ Anti-Lost Timer Initial Value Register */ +#define REG_CMBDISDMA_TIMER 0x140E + + +/* Block IDLE Status Register */ +#define REG_IDLE_STATUS 0x1410 +#define IDLE_STATUS_RXMAC 1 /* 1: RXMAC state machine is in non-IDLE state. 0: RXMAC is idling */ +#define IDLE_STATUS_TXMAC 2 /* 1: TXMAC state machine is in non-IDLE state. 0: TXMAC is idling */ +#define IDLE_STATUS_RXQ 4 /* 1: RXQ state machine is in non-IDLE state. 0: RXQ is idling */ +#define IDLE_STATUS_TXQ 8 /* 1: TXQ state machine is in non-IDLE state. 0: TXQ is idling */ +#define IDLE_STATUS_DMAR 0x10 /* 1: DMAR state machine is in non-IDLE state. 0: DMAR is idling */ +#define IDLE_STATUS_DMAW 0x20 /* 1: DMAW state machine is in non-IDLE state. 0: DMAW is idling */ +#define IDLE_STATUS_SMB 0x40 /* 1: SMB state machine is in non-IDLE state. 0: SMB is idling */ +#define IDLE_STATUS_CMB 0x80 /* 1: CMB state machine is in non-IDLE state. 0: CMB is idling */ + +/* MDIO Control Register */ +#define REG_MDIO_CTRL 0x1414 +#define MDIO_DATA_MASK 0xffff /* On MDIO write, the 16-bit control data to write to PHY MII management register */ +#define MDIO_DATA_SHIFT 0 /* On MDIO read, the 16-bit status data that was read from the PHY MII management register*/ +#define MDIO_REG_ADDR_MASK 0x1f /* MDIO register address */ +#define MDIO_REG_ADDR_SHIFT 16 +#define MDIO_RW 0x200000 /* 1: read, 0: write */ +#define MDIO_SUP_PREAMBLE 0x400000 /* Suppress preamble */ +#define MDIO_START 0x800000 /* Write 1 to initiate the MDIO master. And this bit is self cleared after one cycle*/ +#define MDIO_CLK_SEL_SHIFT 24 +#define MDIO_CLK_25_4 0 +#define MDIO_CLK_25_6 2 +#define MDIO_CLK_25_8 3 +#define MDIO_CLK_25_10 4 +#define MDIO_CLK_25_14 5 +#define MDIO_CLK_25_20 6 +#define MDIO_CLK_25_28 7 +#define MDIO_BUSY 0x8000000 +#define MDIO_AP_EN 0x10000000 +#define MDIO_WAIT_TIMES 10 + +/* MII PHY Status Register */ +#define REG_PHY_STATUS 0x1418 +#define PHY_STATUS_100M 0x20000 +#define PHY_STATUS_EMI_CA 0x40000 + +/* BIST Control and Status Register0 (for the Packet Memory) */ +#define REG_BIST0_CTRL 0x141c +#define BIST0_NOW 0x1 /* 1: To trigger BIST0 logic. This bit stays high during the */ +/* BIST process and reset to zero when BIST is done */ +#define BIST0_SRAM_FAIL 0x2 /* 1: The SRAM failure is un-repairable because it has address */ +/* decoder failure or more than 1 cell stuck-to-x failure */ +#define BIST0_FUSE_FLAG 0x4 /* 1: Indicating one cell has been fixed */ + +/* BIST Control and Status Register1(for the retry buffer of PCI Express) */ +#define REG_BIST1_CTRL 0x1420 +#define BIST1_NOW 0x1 /* 1: To trigger BIST0 logic. This bit stays high during the */ +/* BIST process and reset to zero when BIST is done */ +#define BIST1_SRAM_FAIL 0x2 /* 1: The SRAM failure is un-repairable because it has address */ +/* decoder failure or more than 1 cell stuck-to-x failure.*/ +#define BIST1_FUSE_FLAG 0x4 + +/* SerDes Lock Detect Control and Status Register */ +#define REG_SERDES_LOCK 0x1424 +#define SERDES_LOCK_DETECT 1 /* 1: SerDes lock detected . This signal comes from Analog SerDes */ +#define SERDES_LOCK_DETECT_EN 2 /* 1: Enable SerDes Lock detect function */ + +/* MAC Control Register */ +#define REG_MAC_CTRL 0x1480 +#define MAC_CTRL_TX_EN 1 /* 1: Transmit Enable */ +#define MAC_CTRL_RX_EN 2 /* 1: Receive Enable */ +#define MAC_CTRL_TX_FLOW 4 /* 1: Transmit Flow Control Enable */ +#define MAC_CTRL_RX_FLOW 8 /* 1: Receive Flow Control Enable */ +#define MAC_CTRL_LOOPBACK 0x10 /* 1: Loop back at G/MII Interface */ +#define MAC_CTRL_DUPLX 0x20 /* 1: Full-duplex mode 0: Half-duplex mode */ +#define MAC_CTRL_ADD_CRC 0x40 /* 1: Instruct MAC to attach CRC on all egress Ethernet frames */ +#define MAC_CTRL_PAD 0x80 /* 1: Instruct MAC to pad short frames to 60-bytes, and then attach CRC. This bit has higher priority over CRC_EN */ +#define MAC_CTRL_LENCHK 0x100 /* 1: Instruct MAC to check if length field matches the real packet length */ +#define MAC_CTRL_HUGE_EN 0x200 /* 1: receive Jumbo frame enable */ +#define MAC_CTRL_PRMLEN_SHIFT 10 /* Preamble length */ +#define MAC_CTRL_PRMLEN_MASK 0xf +#define MAC_CTRL_RMV_VLAN 0x4000 /* 1: to remove VLAN Tag automatically from all receive packets */ +#define MAC_CTRL_PROMIS_EN 0x8000 /* 1: Promiscuous Mode Enable */ +#define MAC_CTRL_TX_PAUSE 0x10000 /* 1: transmit test pause */ +#define MAC_CTRL_SCNT 0x20000 /* 1: shortcut slot time counter */ +#define MAC_CTRL_SRST_TX 0x40000 /* 1: synchronized reset Transmit MAC module */ +#define MAC_CTRL_TX_SIMURST 0x80000 /* 1: transmit simulation reset */ +#define MAC_CTRL_SPEED_SHIFT 20 /* 10: gigabit 01:10M/100M */ +#define MAC_CTRL_SPEED_MASK 0x300000 +#define MAC_CTRL_SPEED_1000 2 +#define MAC_CTRL_SPEED_10_100 1 +#define MAC_CTRL_DBG_TX_BKPRESURE 0x400000 /* 1: transmit maximum backoff (half-duplex test bit) */ +#define MAC_CTRL_TX_HUGE 0x800000 /* 1: transmit huge enable */ +#define MAC_CTRL_RX_CHKSUM_EN 0x1000000 /* 1: RX checksum enable */ +#define MAC_CTRL_MC_ALL_EN 0x2000000 /* 1: upload all multicast frame without error to system */ +#define MAC_CTRL_BC_EN 0x4000000 /* 1: upload all broadcast frame without error to system */ +#define MAC_CTRL_DBG 0x8000000 /* 1: upload all received frame to system (Debug Mode) */ + +/* MAC IPG/IFG Control Register */ +#define REG_MAC_IPG_IFG 0x1484 +#define MAC_IPG_IFG_IPGT_SHIFT 0 /* Desired back to back inter-packet gap. The default is 96-bit time */ +#define MAC_IPG_IFG_IPGT_MASK 0x7f +#define MAC_IPG_IFG_MIFG_SHIFT 8 /* Minimum number of IFG to enforce in between RX frames */ +#define MAC_IPG_IFG_MIFG_MASK 0xff /* Frame gap below such IFP is dropped */ +#define MAC_IPG_IFG_IPGR1_SHIFT 16 /* 64bit Carrier-Sense window */ +#define MAC_IPG_IFG_IPGR1_MASK 0x7f +#define MAC_IPG_IFG_IPGR2_SHIFT 24 /* 96-bit IPG window */ +#define MAC_IPG_IFG_IPGR2_MASK 0x7f + +/* MAC STATION ADDRESS */ +#define REG_MAC_STA_ADDR 0x1488 + +/* Hash table for multicast address */ +#define REG_RX_HASH_TABLE 0x1490 + + +/* MAC Half-Duplex Control Register */ +#define REG_MAC_HALF_DUPLX_CTRL 0x1498 +#define MAC_HALF_DUPLX_CTRL_LCOL_SHIFT 0 /* Collision Window */ +#define MAC_HALF_DUPLX_CTRL_LCOL_MASK 0x3ff +#define MAC_HALF_DUPLX_CTRL_RETRY_SHIFT 12 /* Retransmission maximum, afterwards the packet will be discarded */ +#define MAC_HALF_DUPLX_CTRL_RETRY_MASK 0xf +#define MAC_HALF_DUPLX_CTRL_EXC_DEF_EN 0x10000 /* 1: Allow the transmission of a packet which has been excessively deferred */ +#define MAC_HALF_DUPLX_CTRL_NO_BACK_C 0x20000 /* 1: No back-off on collision, immediately start the retransmission */ +#define MAC_HALF_DUPLX_CTRL_NO_BACK_P 0x40000 /* 1: No back-off on backpressure, immediately start the transmission after back pressure */ +#define MAC_HALF_DUPLX_CTRL_ABEBE 0x80000 /* 1: Alternative Binary Exponential Back-off Enabled */ +#define MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT 20 /* Maximum binary exponential number */ +#define MAC_HALF_DUPLX_CTRL_ABEBT_MASK 0xf +#define MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT 24 /* IPG to start JAM for collision based flow control in half-duplex */ +#define MAC_HALF_DUPLX_CTRL_JAMIPG_MASK 0xf /* mode. In unit of 8-bit time */ + +/* Maximum Frame Length Control Register */ +#define REG_MTU 0x149c + +/* Wake-On-Lan control register */ +#define REG_WOL_CTRL 0x14a0 +#define WOL_PATTERN_EN 0x00000001 +#define WOL_PATTERN_PME_EN 0x00000002 +#define WOL_MAGIC_EN 0x00000004 +#define WOL_MAGIC_PME_EN 0x00000008 +#define WOL_LINK_CHG_EN 0x00000010 +#define WOL_LINK_CHG_PME_EN 0x00000020 +#define WOL_PATTERN_ST 0x00000100 +#define WOL_MAGIC_ST 0x00000200 +#define WOL_LINKCHG_ST 0x00000400 +#define WOL_CLK_SWITCH_EN 0x00008000 +#define WOL_PT0_EN 0x00010000 +#define WOL_PT1_EN 0x00020000 +#define WOL_PT2_EN 0x00040000 +#define WOL_PT3_EN 0x00080000 +#define WOL_PT4_EN 0x00100000 +#define WOL_PT5_EN 0x00200000 +#define WOL_PT6_EN 0x00400000 +/* WOL Length ( 2 DWORD ) */ +#define REG_WOL_PATTERN_LEN 0x14a4 +#define WOL_PT_LEN_MASK 0x7f +#define WOL_PT0_LEN_SHIFT 0 +#define WOL_PT1_LEN_SHIFT 8 +#define WOL_PT2_LEN_SHIFT 16 +#define WOL_PT3_LEN_SHIFT 24 +#define WOL_PT4_LEN_SHIFT 0 +#define WOL_PT5_LEN_SHIFT 8 +#define WOL_PT6_LEN_SHIFT 16 + +/* Internal SRAM Partition Register */ +#define REG_SRAM_TRD_ADDR 0x1518 +#define REG_SRAM_TRD_LEN 0x151C +#define REG_SRAM_RXF_ADDR 0x1520 +#define REG_SRAM_RXF_LEN 0x1524 +#define REG_SRAM_TXF_ADDR 0x1528 +#define REG_SRAM_TXF_LEN 0x152C +#define REG_SRAM_TCPH_ADDR 0x1530 +#define REG_SRAM_PKTH_ADDR 0x1532 + +/* Load Ptr Register */ +#define REG_LOAD_PTR 0x1534 /* Software sets this bit after the initialization of the head and tail */ + +/* + * addresses of all descriptors, as well as the following descriptor + * control register, which triggers each function block to load the head + * pointer to prepare for the operation. This bit is then self-cleared + * after one cycle. + */ + +/* Descriptor Control register */ +#define REG_RXF3_BASE_ADDR_HI 0x153C +#define REG_DESC_BASE_ADDR_HI 0x1540 +#define REG_RXF0_BASE_ADDR_HI 0x1540 /* share with DESC BASE ADDR HI */ +#define REG_HOST_RXF0_PAGE0_LO 0x1544 +#define REG_HOST_RXF0_PAGE1_LO 0x1548 +#define REG_TPD_BASE_ADDR_LO 0x154C +#define REG_RXF1_BASE_ADDR_HI 0x1550 +#define REG_RXF2_BASE_ADDR_HI 0x1554 +#define REG_HOST_RXFPAGE_SIZE 0x1558 +#define REG_TPD_RING_SIZE 0x155C +/* RSS about */ +#define REG_RSS_KEY0 0x14B0 +#define REG_RSS_KEY1 0x14B4 +#define REG_RSS_KEY2 0x14B8 +#define REG_RSS_KEY3 0x14BC +#define REG_RSS_KEY4 0x14C0 +#define REG_RSS_KEY5 0x14C4 +#define REG_RSS_KEY6 0x14C8 +#define REG_RSS_KEY7 0x14CC +#define REG_RSS_KEY8 0x14D0 +#define REG_RSS_KEY9 0x14D4 +#define REG_IDT_TABLE4 0x14E0 +#define REG_IDT_TABLE5 0x14E4 +#define REG_IDT_TABLE6 0x14E8 +#define REG_IDT_TABLE7 0x14EC +#define REG_IDT_TABLE0 0x1560 +#define REG_IDT_TABLE1 0x1564 +#define REG_IDT_TABLE2 0x1568 +#define REG_IDT_TABLE3 0x156C +#define REG_IDT_TABLE REG_IDT_TABLE0 +#define REG_RSS_HASH_VALUE 0x1570 +#define REG_RSS_HASH_FLAG 0x1574 +#define REG_BASE_CPU_NUMBER 0x157C + + +/* TXQ Control Register */ +#define REG_TXQ_CTRL 0x1580 +#define TXQ_CTRL_NUM_TPD_BURST_MASK 0xF +#define TXQ_CTRL_NUM_TPD_BURST_SHIFT 0 +#define TXQ_CTRL_EN 0x20 /* 1: Enable TXQ */ +#define TXQ_CTRL_ENH_MODE 0x40 /* Performance enhancement mode, in which up to two back-to-back DMA read commands might be dispatched. */ +#define TXQ_CTRL_TXF_BURST_NUM_SHIFT 16 /* Number of data byte to read in a cache-aligned burst. Each SRAM entry is 8-byte in length. */ +#define TXQ_CTRL_TXF_BURST_NUM_MASK 0xffff + +/* Jumbo packet Threshold for task offload */ +#define REG_TX_EARLY_TH 0x1584 /* Jumbo frame threshold in QWORD unit. Packet greater than */ +/* JUMBO_TASK_OFFLOAD_THRESHOLD will not be task offloaded. */ +#define TX_TX_EARLY_TH_MASK 0x7ff +#define TX_TX_EARLY_TH_SHIFT 0 + + +/* RXQ Control Register */ +#define REG_RXQ_CTRL 0x15A0 +#define RXQ_CTRL_PBA_ALIGN_32 0 /* rx-packet alignment */ +#define RXQ_CTRL_PBA_ALIGN_64 1 +#define RXQ_CTRL_PBA_ALIGN_128 2 +#define RXQ_CTRL_PBA_ALIGN_256 3 +#define RXQ_CTRL_Q1_EN 0x10 +#define RXQ_CTRL_Q2_EN 0x20 +#define RXQ_CTRL_Q3_EN 0x40 +#define RXQ_CTRL_IPV6_XSUM_VERIFY_EN 0x80 +#define RXQ_CTRL_HASH_TLEN_SHIFT 8 +#define RXQ_CTRL_HASH_TLEN_MASK 0xFF +#define RXQ_CTRL_HASH_TYPE_IPV4 0x10000 +#define RXQ_CTRL_HASH_TYPE_IPV4_TCP 0x20000 +#define RXQ_CTRL_HASH_TYPE_IPV6 0x40000 +#define RXQ_CTRL_HASH_TYPE_IPV6_TCP 0x80000 +#define RXQ_CTRL_RSS_MODE_DISABLE 0 +#define RXQ_CTRL_RSS_MODE_SQSINT 0x4000000 +#define RXQ_CTRL_RSS_MODE_MQUESINT 0x8000000 +#define RXQ_CTRL_RSS_MODE_MQUEMINT 0xC000000 +#define RXQ_CTRL_NIP_QUEUE_SEL_TBL 0x10000000 +#define RXQ_CTRL_HASH_ENABLE 0x20000000 +#define RXQ_CTRL_CUT_THRU_EN 0x40000000 +#define RXQ_CTRL_EN 0x80000000 + +/* Rx jumbo packet threshold and rrd retirement timer */ +#define REG_RXQ_JMBOSZ_RRDTIM 0x15A4 +/* + * Jumbo packet threshold for non-VLAN packet, in QWORD (64-bit) unit. + * When the packet length greater than or equal to this value, RXQ + * shall start cut-through forwarding of the received packet. + */ +#define RXQ_JMBOSZ_TH_MASK 0x7ff +#define RXQ_JMBOSZ_TH_SHIFT 0 /* RRD retirement timer. Decrement by 1 after every 512ns passes*/ +#define RXQ_JMBO_LKAH_MASK 0xf +#define RXQ_JMBO_LKAH_SHIFT 11 + +/* RXF flow control register */ +#define REG_RXQ_RXF_PAUSE_THRESH 0x15A8 +#define RXQ_RXF_PAUSE_TH_HI_SHIFT 0 +#define RXQ_RXF_PAUSE_TH_HI_MASK 0xfff +#define RXQ_RXF_PAUSE_TH_LO_SHIFT 16 +#define RXQ_RXF_PAUSE_TH_LO_MASK 0xfff + + +/* DMA Engine Control Register */ +#define REG_DMA_CTRL 0x15C0 +#define DMA_CTRL_DMAR_IN_ORDER 0x1 +#define DMA_CTRL_DMAR_ENH_ORDER 0x2 +#define DMA_CTRL_DMAR_OUT_ORDER 0x4 +#define DMA_CTRL_RCB_VALUE 0x8 +#define DMA_CTRL_DMAR_BURST_LEN_SHIFT 4 +#define DMA_CTRL_DMAR_BURST_LEN_MASK 7 +#define DMA_CTRL_DMAW_BURST_LEN_SHIFT 7 +#define DMA_CTRL_DMAW_BURST_LEN_MASK 7 +#define DMA_CTRL_DMAR_REQ_PRI 0x400 +#define DMA_CTRL_DMAR_DLY_CNT_MASK 0x1F +#define DMA_CTRL_DMAR_DLY_CNT_SHIFT 11 +#define DMA_CTRL_DMAW_DLY_CNT_MASK 0xF +#define DMA_CTRL_DMAW_DLY_CNT_SHIFT 16 +#define DMA_CTRL_TXCMB_EN 0x100000 +#define DMA_CTRL_RXCMB_EN 0x200000 + + +/* CMB/SMB Control Register */ +#define REG_SMB_STAT_TIMER 0x15C4 +#define REG_TRIG_RRD_THRESH 0x15CA +#define REG_TRIG_TPD_THRESH 0x15C8 +#define REG_TRIG_TXTIMER 0x15CC +#define REG_TRIG_RXTIMER 0x15CE + +/* HOST RXF Page 1,2,3 address */ +#define REG_HOST_RXF1_PAGE0_LO 0x15D0 +#define REG_HOST_RXF1_PAGE1_LO 0x15D4 +#define REG_HOST_RXF2_PAGE0_LO 0x15D8 +#define REG_HOST_RXF2_PAGE1_LO 0x15DC +#define REG_HOST_RXF3_PAGE0_LO 0x15E0 +#define REG_HOST_RXF3_PAGE1_LO 0x15E4 + +/* Mail box */ +#define REG_MB_RXF1_RADDR 0x15B4 +#define REG_MB_RXF2_RADDR 0x15B8 +#define REG_MB_RXF3_RADDR 0x15BC +#define REG_MB_TPD_PROD_IDX 0x15F0 + +/* RXF-Page 0-3 PageNo & Valid bit */ +#define REG_HOST_RXF0_PAGE0_VLD 0x15F4 +#define HOST_RXF_VALID 1 +#define HOST_RXF_PAGENO_SHIFT 1 +#define HOST_RXF_PAGENO_MASK 0x7F +#define REG_HOST_RXF0_PAGE1_VLD 0x15F5 +#define REG_HOST_RXF1_PAGE0_VLD 0x15F6 +#define REG_HOST_RXF1_PAGE1_VLD 0x15F7 +#define REG_HOST_RXF2_PAGE0_VLD 0x15F8 +#define REG_HOST_RXF2_PAGE1_VLD 0x15F9 +#define REG_HOST_RXF3_PAGE0_VLD 0x15FA +#define REG_HOST_RXF3_PAGE1_VLD 0x15FB + +/* Interrupt Status Register */ +#define REG_ISR 0x1600 +#define ISR_SMB 1 +#define ISR_TIMER 2 /* Interrupt when Timer is counted down to zero */ +/* + * Software manual interrupt, for debug. Set when SW_MAN_INT_EN is set + * in Table 51 Selene Master Control Register (Offset 0x1400). + */ +#define ISR_MANUAL 4 +#define ISR_HW_RXF_OV 8 /* RXF overflow interrupt */ +#define ISR_HOST_RXF0_OV 0x10 +#define ISR_HOST_RXF1_OV 0x20 +#define ISR_HOST_RXF2_OV 0x40 +#define ISR_HOST_RXF3_OV 0x80 +#define ISR_TXF_UN 0x100 +#define ISR_RX0_PAGE_FULL 0x200 +#define ISR_DMAR_TO_RST 0x400 +#define ISR_DMAW_TO_RST 0x800 +#define ISR_GPHY 0x1000 +#define ISR_TX_CREDIT 0x2000 +#define ISR_GPHY_LPW 0x4000 /* GPHY low power state interrupt */ +#define ISR_RX_PKT 0x10000 /* One packet received, triggered by RFD */ +#define ISR_TX_PKT 0x20000 /* One packet transmitted, triggered by TPD */ +#define ISR_TX_DMA 0x40000 +#define ISR_RX_PKT_1 0x80000 +#define ISR_RX_PKT_2 0x100000 +#define ISR_RX_PKT_3 0x200000 +#define ISR_MAC_RX 0x400000 +#define ISR_MAC_TX 0x800000 +#define ISR_UR_DETECTED 0x1000000 +#define ISR_FERR_DETECTED 0x2000000 +#define ISR_NFERR_DETECTED 0x4000000 +#define ISR_CERR_DETECTED 0x8000000 +#define ISR_PHY_LINKDOWN 0x10000000 +#define ISR_DIS_INT 0x80000000 + + +/* Interrupt Mask Register */ +#define REG_IMR 0x1604 + + +#define IMR_NORMAL_MASK (\ + ISR_SMB |\ + ISR_TXF_UN |\ + ISR_HW_RXF_OV |\ + ISR_HOST_RXF0_OV|\ + ISR_MANUAL |\ + ISR_GPHY |\ + ISR_GPHY_LPW |\ + ISR_DMAR_TO_RST |\ + ISR_DMAW_TO_RST |\ + ISR_PHY_LINKDOWN|\ + ISR_RX_PKT |\ + ISR_TX_PKT) + +#define ISR_TX_EVENT (ISR_TXF_UN | ISR_TX_PKT) +#define ISR_RX_EVENT (ISR_HOST_RXF0_OV | ISR_HW_RXF_OV | ISR_RX_PKT) + +#define REG_MAC_RX_STATUS_BIN 0x1700 +#define REG_MAC_RX_STATUS_END 0x175c +#define REG_MAC_TX_STATUS_BIN 0x1760 +#define REG_MAC_TX_STATUS_END 0x17c0 + +/* Hardware Offset Register */ +#define REG_HOST_RXF0_PAGEOFF 0x1800 +#define REG_TPD_CONS_IDX 0x1804 +#define REG_HOST_RXF1_PAGEOFF 0x1808 +#define REG_HOST_RXF2_PAGEOFF 0x180C +#define REG_HOST_RXF3_PAGEOFF 0x1810 + +/* RXF-Page 0-3 Offset DMA Address */ +#define REG_HOST_RXF0_MB0_LO 0x1820 +#define REG_HOST_RXF0_MB1_LO 0x1824 +#define REG_HOST_RXF1_MB0_LO 0x1828 +#define REG_HOST_RXF1_MB1_LO 0x182C +#define REG_HOST_RXF2_MB0_LO 0x1830 +#define REG_HOST_RXF2_MB1_LO 0x1834 +#define REG_HOST_RXF3_MB0_LO 0x1838 +#define REG_HOST_RXF3_MB1_LO 0x183C + +/* Tpd CMB DMA Address */ +#define REG_HOST_TX_CMB_LO 0x1840 +#define REG_HOST_SMB_ADDR_LO 0x1844 + +/* DEBUG ADDR */ +#define REG_DEBUG_DATA0 0x1900 +#define REG_DEBUG_DATA1 0x1904 + +/***************************** MII definition ***************************************/ +/* PHY Common Register */ +#define MII_BMCR 0x00 +#define MII_BMSR 0x01 +#define MII_PHYSID1 0x02 +#define MII_PHYSID2 0x03 +#define MII_ADVERTISE 0x04 +#define MII_LPA 0x05 +#define MII_EXPANSION 0x06 +#define MII_AT001_CR 0x09 +#define MII_AT001_SR 0x0A +#define MII_AT001_ESR 0x0F +#define MII_AT001_PSCR 0x10 +#define MII_AT001_PSSR 0x11 +#define MII_INT_CTRL 0x12 +#define MII_INT_STATUS 0x13 +#define MII_SMARTSPEED 0x14 +#define MII_RERRCOUNTER 0x15 +#define MII_SREVISION 0x16 +#define MII_RESV1 0x17 +#define MII_LBRERROR 0x18 +#define MII_PHYADDR 0x19 +#define MII_RESV2 0x1a +#define MII_TPISTATUS 0x1b +#define MII_NCONFIG 0x1c + +#define MII_DBG_ADDR 0x1D +#define MII_DBG_DATA 0x1E + + +/* PHY Control Register */ +#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ +#define MII_CR_SPEED_MASK 0x2040 +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 + + +/* PHY Status Register */ +#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ +#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ +#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ +#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ +#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ +#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ +#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ +#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ +#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ +#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ +#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ + +/* Link partner ability register. */ +#define MII_LPA_SLCT 0x001f /* Same as advertise selector */ +#define MII_LPA_10HALF 0x0020 /* Can do 10mbps half-duplex */ +#define MII_LPA_10FULL 0x0040 /* Can do 10mbps full-duplex */ +#define MII_LPA_100HALF 0x0080 /* Can do 100mbps half-duplex */ +#define MII_LPA_100FULL 0x0100 /* Can do 100mbps full-duplex */ +#define MII_LPA_100BASE4 0x0200 /* 100BASE-T4 */ +#define MII_LPA_PAUSE 0x0400 /* PAUSE */ +#define MII_LPA_ASYPAUSE 0x0800 /* Asymmetrical PAUSE */ +#define MII_LPA_RFAULT 0x2000 /* Link partner faulted */ +#define MII_LPA_LPACK 0x4000 /* Link partner acked us */ +#define MII_LPA_NPAGE 0x8000 /* Next page bit */ + +/* Autoneg Advertisement Register */ +#define MII_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ +#define MII_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define MII_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define MII_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define MII_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define MII_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ +#define MII_AR_PAUSE 0x0400 /* Pause operation desired */ +#define MII_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ +#define MII_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ +#define MII_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ +#define MII_AR_SPEED_MASK 0x01E0 +#define MII_AR_DEFAULT_CAP_MASK 0x0DE0 + +/* 1000BASE-T Control Register */ +#define MII_AT001_CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define MII_AT001_CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ +#define MII_AT001_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */ +/* 0=DTE device */ +#define MII_AT001_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ +/* 0=Configure PHY as Slave */ +#define MII_AT001_CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ +/* 0=Automatic Master/Slave config */ +#define MII_AT001_CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ +#define MII_AT001_CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ +#define MII_AT001_CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ +#define MII_AT001_CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ +#define MII_AT001_CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ +#define MII_AT001_CR_1000T_SPEED_MASK 0x0300 +#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK 0x0300 + +/* 1000BASE-T Status Register */ +#define MII_AT001_SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ +#define MII_AT001_SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ +#define MII_AT001_SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define MII_AT001_SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ +#define MII_AT001_SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */ +#define MII_AT001_SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ +#define MII_AT001_SR_1000T_REMOTE_RX_STATUS_SHIFT 12 +#define MII_AT001_SR_1000T_LOCAL_RX_STATUS_SHIFT 13 + +/* Extended Status Register */ +#define MII_AT001_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */ +#define MII_AT001_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */ +#define MII_AT001_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */ +#define MII_AT001_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */ + +/* AT001 PHY Specific Control Register */ +#define MII_AT001_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */ +#define MII_AT001_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ +#define MII_AT001_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */ +#define MII_AT001_PSCR_MAC_POWERDOWN 0x0008 +#define MII_AT001_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low, + * 0=CLK125 toggling + */ +#define MII_AT001_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ +/* Manual MDI configuration */ +#define MII_AT001_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +#define MII_AT001_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover, + * 100BASE-TX/10BASE-T: + * MDI Mode + */ +#define MII_AT001_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled + * all speeds. + */ +#define MII_AT001_PSCR_10BT_EXT_DIST_ENABLE 0x0080 +/* 1=Enable Extended 10BASE-T distance + * (Lower 10BASE-T RX Threshold) + * 0=Normal 10BASE-T RX Threshold */ +#define MII_AT001_PSCR_MII_5BIT_ENABLE 0x0100 +/* 1=5-Bit interface in 100BASE-TX + * 0=MII interface in 100BASE-TX */ +#define MII_AT001_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */ +#define MII_AT001_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */ +#define MII_AT001_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ +#define MII_AT001_PSCR_POLARITY_REVERSAL_SHIFT 1 +#define MII_AT001_PSCR_AUTO_X_MODE_SHIFT 5 +#define MII_AT001_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7 +/* AT001 PHY Specific Status Register */ +#define MII_AT001_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ +#define MII_AT001_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ +#define MII_AT001_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define MII_AT001_PSSR_10MBS 0x0000 /* 00=10Mbs */ +#define MII_AT001_PSSR_100MBS 0x4000 /* 01=100Mbs */ +#define MII_AT001_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#endif /*_ATHL1E_HW_H_*/ diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c new file mode 100644 index 0000000..39bdf49 --- /dev/null +++ b/drivers/net/atl1e/atl1e_main.c @@ -0,0 +1,2570 @@ +/* + * Copyright(c) 2007 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include "atl1e.h" + +#define DRV_VERSION "1.0.0.7-NAPI" + +char atl1e_driver_name[] = "ATL1E"; +char atl1e_driver_version[] = DRV_VERSION; +#define PCI_DEVICE_ID_ATTANSIC_L1E 0x1026 +/* + * atl1e_pci_tbl - PCI Device ID Table + * + * Wildcard entries (PCI_ANY_ID) should come last + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static DEFINE_PCI_DEVICE_TABLE(atl1e_pci_tbl) = { + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1E)}, + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, 0x1066)}, + /* required last entry */ + { 0 } +}; +MODULE_DEVICE_TABLE(pci, atl1e_pci_tbl); + +MODULE_AUTHOR("Atheros Corporation, , Jie Yang "); +MODULE_DESCRIPTION("Atheros 1000M Ethernet Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter); + +static const u16 +atl1e_rx_page_vld_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] = +{ + {REG_HOST_RXF0_PAGE0_VLD, REG_HOST_RXF0_PAGE1_VLD}, + {REG_HOST_RXF1_PAGE0_VLD, REG_HOST_RXF1_PAGE1_VLD}, + {REG_HOST_RXF2_PAGE0_VLD, REG_HOST_RXF2_PAGE1_VLD}, + {REG_HOST_RXF3_PAGE0_VLD, REG_HOST_RXF3_PAGE1_VLD} +}; + +static const u16 atl1e_rx_page_hi_addr_regs[AT_MAX_RECEIVE_QUEUE] = +{ + REG_RXF0_BASE_ADDR_HI, + REG_RXF1_BASE_ADDR_HI, + REG_RXF2_BASE_ADDR_HI, + REG_RXF3_BASE_ADDR_HI +}; + +static const u16 +atl1e_rx_page_lo_addr_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] = +{ + {REG_HOST_RXF0_PAGE0_LO, REG_HOST_RXF0_PAGE1_LO}, + {REG_HOST_RXF1_PAGE0_LO, REG_HOST_RXF1_PAGE1_LO}, + {REG_HOST_RXF2_PAGE0_LO, REG_HOST_RXF2_PAGE1_LO}, + {REG_HOST_RXF3_PAGE0_LO, REG_HOST_RXF3_PAGE1_LO} +}; + +static const u16 +atl1e_rx_page_write_offset_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] = +{ + {REG_HOST_RXF0_MB0_LO, REG_HOST_RXF0_MB1_LO}, + {REG_HOST_RXF1_MB0_LO, REG_HOST_RXF1_MB1_LO}, + {REG_HOST_RXF2_MB0_LO, REG_HOST_RXF2_MB1_LO}, + {REG_HOST_RXF3_MB0_LO, REG_HOST_RXF3_MB1_LO} +}; + +static const u16 atl1e_pay_load_size[] = { + 128, 256, 512, 1024, 2048, 4096, +}; + +/* + * atl1e_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + */ +static inline void atl1e_irq_enable(struct atl1e_adapter *adapter) +{ + if (likely(atomic_dec_and_test(&adapter->irq_sem))) { + AT_WRITE_REG(&adapter->hw, REG_ISR, 0); + AT_WRITE_REG(&adapter->hw, REG_IMR, IMR_NORMAL_MASK); + AT_WRITE_FLUSH(&adapter->hw); + } +} + +/* + * atl1e_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + */ +static inline void atl1e_irq_disable(struct atl1e_adapter *adapter) +{ + atomic_inc(&adapter->irq_sem); + AT_WRITE_REG(&adapter->hw, REG_IMR, 0); + AT_WRITE_FLUSH(&adapter->hw); + synchronize_irq(adapter->pdev->irq); +} + +/* + * atl1e_irq_reset - reset interrupt confiure on the NIC + * @adapter: board private structure + */ +static inline void atl1e_irq_reset(struct atl1e_adapter *adapter) +{ + atomic_set(&adapter->irq_sem, 0); + AT_WRITE_REG(&adapter->hw, REG_ISR, 0); + AT_WRITE_REG(&adapter->hw, REG_IMR, 0); + AT_WRITE_FLUSH(&adapter->hw); +} + +/* + * atl1e_phy_config - Timer Call-back + * @data: pointer to netdev cast into an unsigned long + */ +static void atl1e_phy_config(unsigned long data) +{ + struct atl1e_adapter *adapter = (struct atl1e_adapter *) data; + struct atl1e_hw *hw = &adapter->hw; + unsigned long flags; + + spin_lock_irqsave(&adapter->mdio_lock, flags); + atl1e_restart_autoneg(hw); + spin_unlock_irqrestore(&adapter->mdio_lock, flags); +} + +void atl1e_reinit_locked(struct atl1e_adapter *adapter) +{ + + WARN_ON(in_interrupt()); + while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) + msleep(1); + atl1e_down(adapter); + atl1e_up(adapter); + clear_bit(__AT_RESETTING, &adapter->flags); +} + +static void atl1e_reset_task(struct work_struct *work) +{ + struct atl1e_adapter *adapter; + adapter = container_of(work, struct atl1e_adapter, reset_task); + + atl1e_reinit_locked(adapter); +} + +static int atl1e_check_link(struct atl1e_adapter *adapter) +{ + struct atl1e_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int err = 0; + u16 speed, duplex, phy_data; + + /* MII_BMSR must read twice */ + atl1e_read_phy_reg(hw, MII_BMSR, &phy_data); + atl1e_read_phy_reg(hw, MII_BMSR, &phy_data); + if ((phy_data & BMSR_LSTATUS) == 0) { + /* link down */ + if (netif_carrier_ok(netdev)) { /* old link state: Up */ + u32 value; + /* disable rx */ + value = AT_READ_REG(hw, REG_MAC_CTRL); + value &= ~MAC_CTRL_RX_EN; + AT_WRITE_REG(hw, REG_MAC_CTRL, value); + adapter->link_speed = SPEED_0; + netif_carrier_off(netdev); + netif_stop_queue(netdev); + } + } else { + /* Link Up */ + err = atl1e_get_speed_and_duplex(hw, &speed, &duplex); + if (unlikely(err)) + return err; + + /* link result is our setting */ + if (adapter->link_speed != speed || + adapter->link_duplex != duplex) { + adapter->link_speed = speed; + adapter->link_duplex = duplex; + atl1e_setup_mac_ctrl(adapter); + netdev_info(netdev, + "NIC Link is Up <%d Mbps %s Duplex>\n", + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full" : "Half"); + } + + if (!netif_carrier_ok(netdev)) { + /* Link down -> Up */ + netif_carrier_on(netdev); + netif_wake_queue(netdev); + } + } + return 0; +} + +/* + * atl1e_link_chg_task - deal with link change event Out of interrupt context + * @netdev: network interface device structure + */ +static void atl1e_link_chg_task(struct work_struct *work) +{ + struct atl1e_adapter *adapter; + unsigned long flags; + + adapter = container_of(work, struct atl1e_adapter, link_chg_task); + spin_lock_irqsave(&adapter->mdio_lock, flags); + atl1e_check_link(adapter); + spin_unlock_irqrestore(&adapter->mdio_lock, flags); +} + +static void atl1e_link_chg_event(struct atl1e_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u16 phy_data = 0; + u16 link_up = 0; + + spin_lock(&adapter->mdio_lock); + atl1e_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); + atl1e_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); + spin_unlock(&adapter->mdio_lock); + link_up = phy_data & BMSR_LSTATUS; + /* notify upper layer link down ASAP */ + if (!link_up) { + if (netif_carrier_ok(netdev)) { + /* old link state: Up */ + netdev_info(netdev, "NIC Link is Down\n"); + adapter->link_speed = SPEED_0; + netif_stop_queue(netdev); + } + } + schedule_work(&adapter->link_chg_task); +} + +static void atl1e_del_timer(struct atl1e_adapter *adapter) +{ + del_timer_sync(&adapter->phy_config_timer); +} + +static void atl1e_cancel_work(struct atl1e_adapter *adapter) +{ + cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->link_chg_task); +} + +/* + * atl1e_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + */ +static void atl1e_tx_timeout(struct net_device *netdev) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + schedule_work(&adapter->reset_task); +} + +/* + * atl1e_set_multi - Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_multi entry point is called whenever the multicast address + * list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper multicast, + * promiscuous mode, and all-multi behavior. + */ +static void atl1e_set_multi(struct net_device *netdev) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct atl1e_hw *hw = &adapter->hw; + struct dev_mc_list *mc_ptr; + u32 mac_ctrl_data = 0; + u32 hash_value; + + /* Check for Promiscuous and All Multicast modes */ + mac_ctrl_data = AT_READ_REG(hw, REG_MAC_CTRL); + + if (netdev->flags & IFF_PROMISC) { + mac_ctrl_data |= MAC_CTRL_PROMIS_EN; + } else if (netdev->flags & IFF_ALLMULTI) { + mac_ctrl_data |= MAC_CTRL_MC_ALL_EN; + mac_ctrl_data &= ~MAC_CTRL_PROMIS_EN; + } else { + mac_ctrl_data &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN); + } + + AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data); + + /* clear the old settings from the multicast hash table */ + AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0); + AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); + + /* comoute mc addresses' hash value ,and put it into hash table */ + netdev_for_each_mc_addr(mc_ptr, netdev) { + hash_value = atl1e_hash_mc_addr(hw, mc_ptr->dmi_addr); + atl1e_hash_set(hw, hash_value); + } +} + +static void atl1e_vlan_rx_register(struct net_device *netdev, + struct vlan_group *grp) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + u32 mac_ctrl_data = 0; + + netdev_dbg(adapter->netdev, "%s\n", __func__); + + atl1e_irq_disable(adapter); + + adapter->vlgrp = grp; + mac_ctrl_data = AT_READ_REG(&adapter->hw, REG_MAC_CTRL); + + if (grp) { + /* enable VLAN tag insert/strip */ + mac_ctrl_data |= MAC_CTRL_RMV_VLAN; + } else { + /* disable VLAN tag insert/strip */ + mac_ctrl_data &= ~MAC_CTRL_RMV_VLAN; + } + + AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data); + atl1e_irq_enable(adapter); +} + +static void atl1e_restore_vlan(struct atl1e_adapter *adapter) +{ + netdev_dbg(adapter->netdev, "%s\n", __func__); + atl1e_vlan_rx_register(adapter->netdev, adapter->vlgrp); +} +/* + * atl1e_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + */ +static int atl1e_set_mac_addr(struct net_device *netdev, void *p) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (netif_running(netdev)) + return -EBUSY; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); + + atl1e_hw_set_mac_addr(&adapter->hw); + + return 0; +} + +/* + * atl1e_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + */ +static int atl1e_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + int old_mtu = netdev->mtu; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + + if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || + (max_frame > MAX_JUMBO_FRAME_SIZE)) { + netdev_warn(adapter->netdev, "invalid MTU setting\n"); + return -EINVAL; + } + /* set MTU */ + if (old_mtu != new_mtu && netif_running(netdev)) { + while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) + msleep(1); + netdev->mtu = new_mtu; + adapter->hw.max_frame_size = new_mtu; + adapter->hw.rx_jumbo_th = (max_frame + 7) >> 3; + atl1e_down(adapter); + atl1e_up(adapter); + clear_bit(__AT_RESETTING, &adapter->flags); + } + return 0; +} + +/* + * caller should hold mdio_lock + */ +static int atl1e_mdio_read(struct net_device *netdev, int phy_id, int reg_num) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + u16 result; + + atl1e_read_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, &result); + return result; +} + +static void atl1e_mdio_write(struct net_device *netdev, int phy_id, + int reg_num, int val) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + + atl1e_write_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, val); +} + +/* + * atl1e_mii_ioctl - + * @netdev: + * @ifreq: + * @cmd: + */ +static int atl1e_mii_ioctl(struct net_device *netdev, + struct ifreq *ifr, int cmd) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct mii_ioctl_data *data = if_mii(ifr); + unsigned long flags; + int retval = 0; + + if (!netif_running(netdev)) + return -EINVAL; + + spin_lock_irqsave(&adapter->mdio_lock, flags); + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = 0; + break; + + case SIOCGMIIREG: + if (atl1e_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, + &data->val_out)) { + retval = -EIO; + goto out; + } + break; + + case SIOCSMIIREG: + if (data->reg_num & ~(0x1F)) { + retval = -EFAULT; + goto out; + } + + netdev_dbg(adapter->netdev, " write %x %x\n", + data->reg_num, data->val_in); + if (atl1e_write_phy_reg(&adapter->hw, + data->reg_num, data->val_in)) { + retval = -EIO; + goto out; + } + break; + + default: + retval = -EOPNOTSUPP; + break; + } +out: + spin_unlock_irqrestore(&adapter->mdio_lock, flags); + return retval; + +} + +/* + * atl1e_ioctl - + * @netdev: + * @ifreq: + * @cmd: + */ +static int atl1e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return atl1e_mii_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + +static void atl1e_setup_pcicmd(struct pci_dev *pdev) +{ + u16 cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &cmd); + cmd &= ~(PCI_COMMAND_INTX_DISABLE | PCI_COMMAND_IO); + cmd |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); + pci_write_config_word(pdev, PCI_COMMAND, cmd); + + /* + * some motherboards BIOS(PXE/EFI) driver may set PME + * while they transfer control to OS (Windows/Linux) + * so we should clear this bit before NIC work normally + */ + pci_write_config_dword(pdev, REG_PM_CTRLSTAT, 0); + msleep(1); +} + +/* + * atl1e_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + */ +static int __devinit atl1e_alloc_queues(struct atl1e_adapter *adapter) +{ + return 0; +} + +/* + * atl1e_sw_init - Initialize general software structures (struct atl1e_adapter) + * @adapter: board private structure to initialize + * + * atl1e_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + */ +static int __devinit atl1e_sw_init(struct atl1e_adapter *adapter) +{ + struct atl1e_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + u32 phy_status_data = 0; + + adapter->wol = 0; + adapter->link_speed = SPEED_0; /* hardware init */ + adapter->link_duplex = FULL_DUPLEX; + adapter->num_rx_queues = 1; + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_id = pdev->subsystem_device; + + pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); + pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); + + phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS); + /* nic type */ + if (hw->revision_id >= 0xF0) { + hw->nic_type = athr_l2e_revB; + } else { + if (phy_status_data & PHY_STATUS_100M) + hw->nic_type = athr_l1e; + else + hw->nic_type = athr_l2e_revA; + } + + phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS); + + if (phy_status_data & PHY_STATUS_EMI_CA) + hw->emi_ca = true; + else + hw->emi_ca = false; + + hw->phy_configured = false; + hw->preamble_len = 7; + hw->max_frame_size = adapter->netdev->mtu; + hw->rx_jumbo_th = (hw->max_frame_size + ETH_HLEN + + VLAN_HLEN + ETH_FCS_LEN + 7) >> 3; + + hw->rrs_type = atl1e_rrs_disable; + hw->indirect_tab = 0; + hw->base_cpu = 0; + + /* need confirm */ + + hw->ict = 50000; /* 100ms */ + hw->smb_timer = 200000; /* 200ms */ + hw->tpd_burst = 5; + hw->rrd_thresh = 1; + hw->tpd_thresh = adapter->tx_ring.count / 2; + hw->rx_count_down = 4; /* 2us resolution */ + hw->tx_count_down = hw->imt * 4 / 3; + hw->dmar_block = atl1e_dma_req_1024; + hw->dmaw_block = atl1e_dma_req_1024; + hw->dmar_dly_cnt = 15; + hw->dmaw_dly_cnt = 4; + + if (atl1e_alloc_queues(adapter)) { + netdev_err(adapter->netdev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + atomic_set(&adapter->irq_sem, 1); + spin_lock_init(&adapter->mdio_lock); + spin_lock_init(&adapter->tx_lock); + + set_bit(__AT_DOWN, &adapter->flags); + + return 0; +} + +/* + * atl1e_clean_tx_ring - Free Tx-skb + * @adapter: board private structure + */ +static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter) +{ + struct atl1e_tx_ring *tx_ring = (struct atl1e_tx_ring *) + &adapter->tx_ring; + struct atl1e_tx_buffer *tx_buffer = NULL; + struct pci_dev *pdev = adapter->pdev; + u16 index, ring_count; + + if (tx_ring->desc == NULL || tx_ring->tx_buffer == NULL) + return; + + ring_count = tx_ring->count; + /* first unmmap dma */ + for (index = 0; index < ring_count; index++) { + tx_buffer = &tx_ring->tx_buffer[index]; + if (tx_buffer->dma) { + if (tx_buffer->flags & ATL1E_TX_PCIMAP_SINGLE) + pci_unmap_single(pdev, tx_buffer->dma, + tx_buffer->length, PCI_DMA_TODEVICE); + else if (tx_buffer->flags & ATL1E_TX_PCIMAP_PAGE) + pci_unmap_page(pdev, tx_buffer->dma, + tx_buffer->length, PCI_DMA_TODEVICE); + tx_buffer->dma = 0; + } + } + /* second free skb */ + for (index = 0; index < ring_count; index++) { + tx_buffer = &tx_ring->tx_buffer[index]; + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + tx_buffer->skb = NULL; + } + } + /* Zero out Tx-buffers */ + memset(tx_ring->desc, 0, sizeof(struct atl1e_tpd_desc) * + ring_count); + memset(tx_ring->tx_buffer, 0, sizeof(struct atl1e_tx_buffer) * + ring_count); +} + +/* + * atl1e_clean_rx_ring - Free rx-reservation skbs + * @adapter: board private structure + */ +static void atl1e_clean_rx_ring(struct atl1e_adapter *adapter) +{ + struct atl1e_rx_ring *rx_ring = + (struct atl1e_rx_ring *)&adapter->rx_ring; + struct atl1e_rx_page_desc *rx_page_desc = rx_ring->rx_page_desc; + u16 i, j; + + + if (adapter->ring_vir_addr == NULL) + return; + /* Zero out the descriptor ring */ + for (i = 0; i < adapter->num_rx_queues; i++) { + for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) { + if (rx_page_desc[i].rx_page[j].addr != NULL) { + memset(rx_page_desc[i].rx_page[j].addr, 0, + rx_ring->real_page_size); + } + } + } +} + +static void atl1e_cal_ring_size(struct atl1e_adapter *adapter, u32 *ring_size) +{ + *ring_size = ((u32)(adapter->tx_ring.count * + sizeof(struct atl1e_tpd_desc) + 7 + /* tx ring, qword align */ + + adapter->rx_ring.real_page_size * AT_PAGE_NUM_PER_QUEUE * + adapter->num_rx_queues + 31 + /* rx ring, 32 bytes align */ + + (1 + AT_PAGE_NUM_PER_QUEUE * adapter->num_rx_queues) * + sizeof(u32) + 3)); + /* tx, rx cmd, dword align */ +} + +static void atl1e_init_ring_resources(struct atl1e_adapter *adapter) +{ + struct atl1e_tx_ring *tx_ring = NULL; + struct atl1e_rx_ring *rx_ring = NULL; + + tx_ring = &adapter->tx_ring; + rx_ring = &adapter->rx_ring; + + rx_ring->real_page_size = adapter->rx_ring.page_size + + adapter->hw.max_frame_size + + ETH_HLEN + VLAN_HLEN + + ETH_FCS_LEN; + rx_ring->real_page_size = roundup(rx_ring->real_page_size, 32); + atl1e_cal_ring_size(adapter, &adapter->ring_size); + + adapter->ring_vir_addr = NULL; + adapter->rx_ring.desc = NULL; + rwlock_init(&adapter->tx_ring.tx_lock); + + return; +} + +/* + * Read / Write Ptr Initialize: + */ +static void atl1e_init_ring_ptrs(struct atl1e_adapter *adapter) +{ + struct atl1e_tx_ring *tx_ring = NULL; + struct atl1e_rx_ring *rx_ring = NULL; + struct atl1e_rx_page_desc *rx_page_desc = NULL; + int i, j; + + tx_ring = &adapter->tx_ring; + rx_ring = &adapter->rx_ring; + rx_page_desc = rx_ring->rx_page_desc; + + tx_ring->next_to_use = 0; + atomic_set(&tx_ring->next_to_clean, 0); + + for (i = 0; i < adapter->num_rx_queues; i++) { + rx_page_desc[i].rx_using = 0; + rx_page_desc[i].rx_nxseq = 0; + for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) { + *rx_page_desc[i].rx_page[j].write_offset_addr = 0; + rx_page_desc[i].rx_page[j].read_offset = 0; + } + } +} + +/* + * atl1e_free_ring_resources - Free Tx / RX descriptor Resources + * @adapter: board private structure + * + * Free all transmit software resources + */ +static void atl1e_free_ring_resources(struct atl1e_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + + atl1e_clean_tx_ring(adapter); + atl1e_clean_rx_ring(adapter); + + if (adapter->ring_vir_addr) { + pci_free_consistent(pdev, adapter->ring_size, + adapter->ring_vir_addr, adapter->ring_dma); + adapter->ring_vir_addr = NULL; + } + + if (adapter->tx_ring.tx_buffer) { + kfree(adapter->tx_ring.tx_buffer); + adapter->tx_ring.tx_buffer = NULL; + } +} + +/* + * atl1e_setup_mem_resources - allocate Tx / RX descriptor resources + * @adapter: board private structure + * + * Return 0 on success, negative on failure + */ +static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct atl1e_tx_ring *tx_ring; + struct atl1e_rx_ring *rx_ring; + struct atl1e_rx_page_desc *rx_page_desc; + int size, i, j; + u32 offset = 0; + int err = 0; + + if (adapter->ring_vir_addr != NULL) + return 0; /* alloced already */ + + tx_ring = &adapter->tx_ring; + rx_ring = &adapter->rx_ring; + + /* real ring DMA buffer */ + + size = adapter->ring_size; + adapter->ring_vir_addr = pci_alloc_consistent(pdev, + adapter->ring_size, &adapter->ring_dma); + + if (adapter->ring_vir_addr == NULL) { + netdev_err(adapter->netdev, + "pci_alloc_consistent failed, size = D%d\n", size); + return -ENOMEM; + } + + memset(adapter->ring_vir_addr, 0, adapter->ring_size); + + rx_page_desc = rx_ring->rx_page_desc; + + /* Init TPD Ring */ + tx_ring->dma = roundup(adapter->ring_dma, 8); + offset = tx_ring->dma - adapter->ring_dma; + tx_ring->desc = (struct atl1e_tpd_desc *) + (adapter->ring_vir_addr + offset); + size = sizeof(struct atl1e_tx_buffer) * (tx_ring->count); + tx_ring->tx_buffer = kzalloc(size, GFP_KERNEL); + if (tx_ring->tx_buffer == NULL) { + netdev_err(adapter->netdev, "kzalloc failed, size = D%d\n", + size); + err = -ENOMEM; + goto failed; + } + + /* Init RXF-Pages */ + offset += (sizeof(struct atl1e_tpd_desc) * tx_ring->count); + offset = roundup(offset, 32); + + for (i = 0; i < adapter->num_rx_queues; i++) { + for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) { + rx_page_desc[i].rx_page[j].dma = + adapter->ring_dma + offset; + rx_page_desc[i].rx_page[j].addr = + adapter->ring_vir_addr + offset; + offset += rx_ring->real_page_size; + } + } + + /* Init CMB dma address */ + tx_ring->cmb_dma = adapter->ring_dma + offset; + tx_ring->cmb = (u32 *)(adapter->ring_vir_addr + offset); + offset += sizeof(u32); + + for (i = 0; i < adapter->num_rx_queues; i++) { + for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) { + rx_page_desc[i].rx_page[j].write_offset_dma = + adapter->ring_dma + offset; + rx_page_desc[i].rx_page[j].write_offset_addr = + adapter->ring_vir_addr + offset; + offset += sizeof(u32); + } + } + + if (unlikely(offset > adapter->ring_size)) { + netdev_err(adapter->netdev, "offset(%d) > ring size(%d) !!\n", + offset, adapter->ring_size); + err = -1; + goto failed; + } + + return 0; +failed: + if (adapter->ring_vir_addr != NULL) { + pci_free_consistent(pdev, adapter->ring_size, + adapter->ring_vir_addr, adapter->ring_dma); + adapter->ring_vir_addr = NULL; + } + return err; +} + +static inline void atl1e_configure_des_ring(const struct atl1e_adapter *adapter) +{ + + struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw; + struct atl1e_rx_ring *rx_ring = + (struct atl1e_rx_ring *)&adapter->rx_ring; + struct atl1e_tx_ring *tx_ring = + (struct atl1e_tx_ring *)&adapter->tx_ring; + struct atl1e_rx_page_desc *rx_page_desc = NULL; + int i, j; + + AT_WRITE_REG(hw, REG_DESC_BASE_ADDR_HI, + (u32)((adapter->ring_dma & AT_DMA_HI_ADDR_MASK) >> 32)); + AT_WRITE_REG(hw, REG_TPD_BASE_ADDR_LO, + (u32)((tx_ring->dma) & AT_DMA_LO_ADDR_MASK)); + AT_WRITE_REG(hw, REG_TPD_RING_SIZE, (u16)(tx_ring->count)); + AT_WRITE_REG(hw, REG_HOST_TX_CMB_LO, + (u32)((tx_ring->cmb_dma) & AT_DMA_LO_ADDR_MASK)); + + rx_page_desc = rx_ring->rx_page_desc; + /* RXF Page Physical address / Page Length */ + for (i = 0; i < AT_MAX_RECEIVE_QUEUE; i++) { + AT_WRITE_REG(hw, atl1e_rx_page_hi_addr_regs[i], + (u32)((adapter->ring_dma & + AT_DMA_HI_ADDR_MASK) >> 32)); + for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) { + u32 page_phy_addr; + u32 offset_phy_addr; + + page_phy_addr = rx_page_desc[i].rx_page[j].dma; + offset_phy_addr = + rx_page_desc[i].rx_page[j].write_offset_dma; + + AT_WRITE_REG(hw, atl1e_rx_page_lo_addr_regs[i][j], + page_phy_addr & AT_DMA_LO_ADDR_MASK); + AT_WRITE_REG(hw, atl1e_rx_page_write_offset_regs[i][j], + offset_phy_addr & AT_DMA_LO_ADDR_MASK); + AT_WRITE_REGB(hw, atl1e_rx_page_vld_regs[i][j], 1); + } + } + /* Page Length */ + AT_WRITE_REG(hw, REG_HOST_RXFPAGE_SIZE, rx_ring->page_size); + /* Load all of base address above */ + AT_WRITE_REG(hw, REG_LOAD_PTR, 1); + + return; +} + +static inline void atl1e_configure_tx(struct atl1e_adapter *adapter) +{ + struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw; + u32 dev_ctrl_data = 0; + u32 max_pay_load = 0; + u32 jumbo_thresh = 0; + u32 extra_size = 0; /* Jumbo frame threshold in QWORD unit */ + + /* configure TXQ param */ + if (hw->nic_type != athr_l2e_revB) { + extra_size = ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; + if (hw->max_frame_size <= 1500) { + jumbo_thresh = hw->max_frame_size + extra_size; + } else if (hw->max_frame_size < 6*1024) { + jumbo_thresh = + (hw->max_frame_size + extra_size) * 2 / 3; + } else { + jumbo_thresh = (hw->max_frame_size + extra_size) / 2; + } + AT_WRITE_REG(hw, REG_TX_EARLY_TH, (jumbo_thresh + 7) >> 3); + } + + dev_ctrl_data = AT_READ_REG(hw, REG_DEVICE_CTRL); + + max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT)) & + DEVICE_CTRL_MAX_PAYLOAD_MASK; + + hw->dmaw_block = min(max_pay_load, hw->dmaw_block); + + max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT)) & + DEVICE_CTRL_MAX_RREQ_SZ_MASK; + hw->dmar_block = min(max_pay_load, hw->dmar_block); + + if (hw->nic_type != athr_l2e_revB) + AT_WRITE_REGW(hw, REG_TXQ_CTRL + 2, + atl1e_pay_load_size[hw->dmar_block]); + /* enable TXQ */ + AT_WRITE_REGW(hw, REG_TXQ_CTRL, + (((u16)hw->tpd_burst & TXQ_CTRL_NUM_TPD_BURST_MASK) + << TXQ_CTRL_NUM_TPD_BURST_SHIFT) + | TXQ_CTRL_ENH_MODE | TXQ_CTRL_EN); + return; +} + +static inline void atl1e_configure_rx(struct atl1e_adapter *adapter) +{ + struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw; + u32 rxf_len = 0; + u32 rxf_low = 0; + u32 rxf_high = 0; + u32 rxf_thresh_data = 0; + u32 rxq_ctrl_data = 0; + + if (hw->nic_type != athr_l2e_revB) { + AT_WRITE_REGW(hw, REG_RXQ_JMBOSZ_RRDTIM, + (u16)((hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK) << + RXQ_JMBOSZ_TH_SHIFT | + (1 & RXQ_JMBO_LKAH_MASK) << + RXQ_JMBO_LKAH_SHIFT)); + + rxf_len = AT_READ_REG(hw, REG_SRAM_RXF_LEN); + rxf_high = rxf_len * 4 / 5; + rxf_low = rxf_len / 5; + rxf_thresh_data = ((rxf_high & RXQ_RXF_PAUSE_TH_HI_MASK) + << RXQ_RXF_PAUSE_TH_HI_SHIFT) | + ((rxf_low & RXQ_RXF_PAUSE_TH_LO_MASK) + << RXQ_RXF_PAUSE_TH_LO_SHIFT); + + AT_WRITE_REG(hw, REG_RXQ_RXF_PAUSE_THRESH, rxf_thresh_data); + } + + /* RRS */ + AT_WRITE_REG(hw, REG_IDT_TABLE, hw->indirect_tab); + AT_WRITE_REG(hw, REG_BASE_CPU_NUMBER, hw->base_cpu); + + if (hw->rrs_type & atl1e_rrs_ipv4) + rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV4; + + if (hw->rrs_type & atl1e_rrs_ipv4_tcp) + rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV4_TCP; + + if (hw->rrs_type & atl1e_rrs_ipv6) + rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV6; + + if (hw->rrs_type & atl1e_rrs_ipv6_tcp) + rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV6_TCP; + + if (hw->rrs_type != atl1e_rrs_disable) + rxq_ctrl_data |= + (RXQ_CTRL_HASH_ENABLE | RXQ_CTRL_RSS_MODE_MQUESINT); + + rxq_ctrl_data |= RXQ_CTRL_IPV6_XSUM_VERIFY_EN | RXQ_CTRL_PBA_ALIGN_32 | + RXQ_CTRL_CUT_THRU_EN | RXQ_CTRL_EN; + + AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data); + return; +} + +static inline void atl1e_configure_dma(struct atl1e_adapter *adapter) +{ + struct atl1e_hw *hw = &adapter->hw; + u32 dma_ctrl_data = 0; + + dma_ctrl_data = DMA_CTRL_RXCMB_EN; + dma_ctrl_data |= (((u32)hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) + << DMA_CTRL_DMAR_BURST_LEN_SHIFT; + dma_ctrl_data |= (((u32)hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK) + << DMA_CTRL_DMAW_BURST_LEN_SHIFT; + dma_ctrl_data |= DMA_CTRL_DMAR_REQ_PRI | DMA_CTRL_DMAR_OUT_ORDER; + dma_ctrl_data |= (((u32)hw->dmar_dly_cnt) & DMA_CTRL_DMAR_DLY_CNT_MASK) + << DMA_CTRL_DMAR_DLY_CNT_SHIFT; + dma_ctrl_data |= (((u32)hw->dmaw_dly_cnt) & DMA_CTRL_DMAW_DLY_CNT_MASK) + << DMA_CTRL_DMAW_DLY_CNT_SHIFT; + + AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data); + return; +} + +static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter) +{ + u32 value; + struct atl1e_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + + /* Config MAC CTRL Register */ + value = MAC_CTRL_TX_EN | + MAC_CTRL_RX_EN ; + + if (FULL_DUPLEX == adapter->link_duplex) + value |= MAC_CTRL_DUPLX; + + value |= ((u32)((SPEED_1000 == adapter->link_speed) ? + MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) << + MAC_CTRL_SPEED_SHIFT); + value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW); + + value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD); + value |= (((u32)adapter->hw.preamble_len & + MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); + + if (adapter->vlgrp) + value |= MAC_CTRL_RMV_VLAN; + + value |= MAC_CTRL_BC_EN; + if (netdev->flags & IFF_PROMISC) + value |= MAC_CTRL_PROMIS_EN; + if (netdev->flags & IFF_ALLMULTI) + value |= MAC_CTRL_MC_ALL_EN; + + AT_WRITE_REG(hw, REG_MAC_CTRL, value); +} + +/* + * atl1e_configure - Configure Transmit&Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Tx /Rx unit of the MAC after a reset. + */ +static int atl1e_configure(struct atl1e_adapter *adapter) +{ + struct atl1e_hw *hw = &adapter->hw; + + u32 intr_status_data = 0; + + /* clear interrupt status */ + AT_WRITE_REG(hw, REG_ISR, ~0); + + /* 1. set MAC Address */ + atl1e_hw_set_mac_addr(hw); + + /* 2. Init the Multicast HASH table done by set_muti */ + + /* 3. Clear any WOL status */ + AT_WRITE_REG(hw, REG_WOL_CTRL, 0); + + /* 4. Descripter Ring BaseMem/Length/Read ptr/Write ptr + * TPD Ring/SMB/RXF0 Page CMBs, they use the same + * High 32bits memory */ + atl1e_configure_des_ring(adapter); + + /* 5. set Interrupt Moderator Timer */ + AT_WRITE_REGW(hw, REG_IRQ_MODU_TIMER_INIT, hw->imt); + AT_WRITE_REGW(hw, REG_IRQ_MODU_TIMER2_INIT, hw->imt); + AT_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_LED_MODE | + MASTER_CTRL_ITIMER_EN | MASTER_CTRL_ITIMER2_EN); + + /* 6. rx/tx threshold to trig interrupt */ + AT_WRITE_REGW(hw, REG_TRIG_RRD_THRESH, hw->rrd_thresh); + AT_WRITE_REGW(hw, REG_TRIG_TPD_THRESH, hw->tpd_thresh); + AT_WRITE_REGW(hw, REG_TRIG_RXTIMER, hw->rx_count_down); + AT_WRITE_REGW(hw, REG_TRIG_TXTIMER, hw->tx_count_down); + + /* 7. set Interrupt Clear Timer */ + AT_WRITE_REGW(hw, REG_CMBDISDMA_TIMER, hw->ict); + + /* 8. set MTU */ + AT_WRITE_REG(hw, REG_MTU, hw->max_frame_size + ETH_HLEN + + VLAN_HLEN + ETH_FCS_LEN); + + /* 9. config TXQ early tx threshold */ + atl1e_configure_tx(adapter); + + /* 10. config RXQ */ + atl1e_configure_rx(adapter); + + /* 11. config DMA Engine */ + atl1e_configure_dma(adapter); + + /* 12. smb timer to trig interrupt */ + AT_WRITE_REG(hw, REG_SMB_STAT_TIMER, hw->smb_timer); + + intr_status_data = AT_READ_REG(hw, REG_ISR); + if (unlikely((intr_status_data & ISR_PHY_LINKDOWN) != 0)) { + netdev_err(adapter->netdev, + "atl1e_configure failed, PCIE phy link down\n"); + return -1; + } + + AT_WRITE_REG(hw, REG_ISR, 0x7fffffff); + return 0; +} + +/* + * atl1e_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the timer callback. + */ +static struct net_device_stats *atl1e_get_stats(struct net_device *netdev) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct atl1e_hw_stats *hw_stats = &adapter->hw_stats; + struct net_device_stats *net_stats = &netdev->stats; + + net_stats->rx_packets = hw_stats->rx_ok; + net_stats->tx_packets = hw_stats->tx_ok; + net_stats->rx_bytes = hw_stats->rx_byte_cnt; + net_stats->tx_bytes = hw_stats->tx_byte_cnt; + net_stats->multicast = hw_stats->rx_mcast; + net_stats->collisions = hw_stats->tx_1_col + + hw_stats->tx_2_col * 2 + + hw_stats->tx_late_col + hw_stats->tx_abort_col; + + net_stats->rx_errors = hw_stats->rx_frag + hw_stats->rx_fcs_err + + hw_stats->rx_len_err + hw_stats->rx_sz_ov + + hw_stats->rx_rrd_ov + hw_stats->rx_align_err; + net_stats->rx_fifo_errors = hw_stats->rx_rxf_ov; + net_stats->rx_length_errors = hw_stats->rx_len_err; + net_stats->rx_crc_errors = hw_stats->rx_fcs_err; + net_stats->rx_frame_errors = hw_stats->rx_align_err; + net_stats->rx_over_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov; + + net_stats->rx_missed_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov; + + net_stats->tx_errors = hw_stats->tx_late_col + hw_stats->tx_abort_col + + hw_stats->tx_underrun + hw_stats->tx_trunc; + net_stats->tx_fifo_errors = hw_stats->tx_underrun; + net_stats->tx_aborted_errors = hw_stats->tx_abort_col; + net_stats->tx_window_errors = hw_stats->tx_late_col; + + return net_stats; +} + +static void atl1e_update_hw_stats(struct atl1e_adapter *adapter) +{ + u16 hw_reg_addr = 0; + unsigned long *stats_item = NULL; + + /* update rx status */ + hw_reg_addr = REG_MAC_RX_STATUS_BIN; + stats_item = &adapter->hw_stats.rx_ok; + while (hw_reg_addr <= REG_MAC_RX_STATUS_END) { + *stats_item += AT_READ_REG(&adapter->hw, hw_reg_addr); + stats_item++; + hw_reg_addr += 4; + } + /* update tx status */ + hw_reg_addr = REG_MAC_TX_STATUS_BIN; + stats_item = &adapter->hw_stats.tx_ok; + while (hw_reg_addr <= REG_MAC_TX_STATUS_END) { + *stats_item += AT_READ_REG(&adapter->hw, hw_reg_addr); + stats_item++; + hw_reg_addr += 4; + } +} + +static inline void atl1e_clear_phy_int(struct atl1e_adapter *adapter) +{ + u16 phy_data; + + spin_lock(&adapter->mdio_lock); + atl1e_read_phy_reg(&adapter->hw, MII_INT_STATUS, &phy_data); + spin_unlock(&adapter->mdio_lock); +} + +static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter) +{ + struct atl1e_tx_ring *tx_ring = (struct atl1e_tx_ring *) + &adapter->tx_ring; + struct atl1e_tx_buffer *tx_buffer = NULL; + u16 hw_next_to_clean = AT_READ_REGW(&adapter->hw, REG_TPD_CONS_IDX); + u16 next_to_clean = atomic_read(&tx_ring->next_to_clean); + + while (next_to_clean != hw_next_to_clean) { + tx_buffer = &tx_ring->tx_buffer[next_to_clean]; + if (tx_buffer->dma) { + if (tx_buffer->flags & ATL1E_TX_PCIMAP_SINGLE) + pci_unmap_single(adapter->pdev, tx_buffer->dma, + tx_buffer->length, PCI_DMA_TODEVICE); + else if (tx_buffer->flags & ATL1E_TX_PCIMAP_PAGE) + pci_unmap_page(adapter->pdev, tx_buffer->dma, + tx_buffer->length, PCI_DMA_TODEVICE); + tx_buffer->dma = 0; + } + + if (tx_buffer->skb) { + dev_kfree_skb_irq(tx_buffer->skb); + tx_buffer->skb = NULL; + } + + if (++next_to_clean == tx_ring->count) + next_to_clean = 0; + } + + atomic_set(&tx_ring->next_to_clean, next_to_clean); + + if (netif_queue_stopped(adapter->netdev) && + netif_carrier_ok(adapter->netdev)) { + netif_wake_queue(adapter->netdev); + } + + return true; +} + +/* + * atl1e_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + * @pt_regs: CPU registers structure + */ +static irqreturn_t atl1e_intr(int irq, void *data) +{ + struct net_device *netdev = data; + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct atl1e_hw *hw = &adapter->hw; + int max_ints = AT_MAX_INT_WORK; + int handled = IRQ_NONE; + u32 status; + + do { + status = AT_READ_REG(hw, REG_ISR); + if ((status & IMR_NORMAL_MASK) == 0 || + (status & ISR_DIS_INT) != 0) { + if (max_ints != AT_MAX_INT_WORK) + handled = IRQ_HANDLED; + break; + } + /* link event */ + if (status & ISR_GPHY) + atl1e_clear_phy_int(adapter); + /* Ack ISR */ + AT_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT); + + handled = IRQ_HANDLED; + /* check if PCIE PHY Link down */ + if (status & ISR_PHY_LINKDOWN) { + netdev_err(adapter->netdev, + "pcie phy linkdown %x\n", status); + if (netif_running(adapter->netdev)) { + /* reset MAC */ + atl1e_irq_reset(adapter); + schedule_work(&adapter->reset_task); + break; + } + } + + /* check if DMA read/write error */ + if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { + netdev_err(adapter->netdev, + "PCIE DMA RW error (status = 0x%x)\n", + status); + atl1e_irq_reset(adapter); + schedule_work(&adapter->reset_task); + break; + } + + if (status & ISR_SMB) + atl1e_update_hw_stats(adapter); + + /* link event */ + if (status & (ISR_GPHY | ISR_MANUAL)) { + netdev->stats.tx_carrier_errors++; + atl1e_link_chg_event(adapter); + break; + } + + /* transmit event */ + if (status & ISR_TX_EVENT) + atl1e_clean_tx_irq(adapter); + + if (status & ISR_RX_EVENT) { + /* + * disable rx interrupts, without + * the synchronize_irq bit + */ + AT_WRITE_REG(hw, REG_IMR, + IMR_NORMAL_MASK & ~ISR_RX_EVENT); + AT_WRITE_FLUSH(hw); + if (likely(napi_schedule_prep( + &adapter->napi))) + __napi_schedule(&adapter->napi); + } + } while (--max_ints > 0); + /* re-enable Interrupt*/ + AT_WRITE_REG(&adapter->hw, REG_ISR, 0); + + return handled; +} + +static inline void atl1e_rx_checksum(struct atl1e_adapter *adapter, + struct sk_buff *skb, struct atl1e_recv_ret_status *prrs) +{ + u8 *packet = (u8 *)(prrs + 1); + struct iphdr *iph; + u16 head_len = ETH_HLEN; + u16 pkt_flags; + u16 err_flags; + + skb->ip_summed = CHECKSUM_NONE; + pkt_flags = prrs->pkt_flag; + err_flags = prrs->err_flag; + if (((pkt_flags & RRS_IS_IPV4) || (pkt_flags & RRS_IS_IPV6)) && + ((pkt_flags & RRS_IS_TCP) || (pkt_flags & RRS_IS_UDP))) { + if (pkt_flags & RRS_IS_IPV4) { + if (pkt_flags & RRS_IS_802_3) + head_len += 8; + iph = (struct iphdr *) (packet + head_len); + if (iph->frag_off != 0 && !(pkt_flags & RRS_IS_IP_DF)) + goto hw_xsum; + } + if (!(err_flags & (RRS_ERR_IP_CSUM | RRS_ERR_L4_CSUM))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + return; + } + } + +hw_xsum : + return; +} + +static struct atl1e_rx_page *atl1e_get_rx_page(struct atl1e_adapter *adapter, + u8 que) +{ + struct atl1e_rx_page_desc *rx_page_desc = + (struct atl1e_rx_page_desc *) adapter->rx_ring.rx_page_desc; + u8 rx_using = rx_page_desc[que].rx_using; + + return (struct atl1e_rx_page *)&(rx_page_desc[que].rx_page[rx_using]); +} + +static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct atl1e_rx_ring *rx_ring = (struct atl1e_rx_ring *) + &adapter->rx_ring; + struct atl1e_rx_page_desc *rx_page_desc = + (struct atl1e_rx_page_desc *) rx_ring->rx_page_desc; + struct sk_buff *skb = NULL; + struct atl1e_rx_page *rx_page = atl1e_get_rx_page(adapter, que); + u32 packet_size, write_offset; + struct atl1e_recv_ret_status *prrs; + + write_offset = *(rx_page->write_offset_addr); + if (likely(rx_page->read_offset < write_offset)) { + do { + if (*work_done >= work_to_do) + break; + (*work_done)++; + /* get new packet's rrs */ + prrs = (struct atl1e_recv_ret_status *) (rx_page->addr + + rx_page->read_offset); + /* check sequence number */ + if (prrs->seq_num != rx_page_desc[que].rx_nxseq) { + netdev_err(netdev, + "rx sequence number error (rx=%d) (expect=%d)\n", + prrs->seq_num, + rx_page_desc[que].rx_nxseq); + rx_page_desc[que].rx_nxseq++; + /* just for debug use */ + AT_WRITE_REG(&adapter->hw, REG_DEBUG_DATA0, + (((u32)prrs->seq_num) << 16) | + rx_page_desc[que].rx_nxseq); + goto fatal_err; + } + rx_page_desc[que].rx_nxseq++; + + /* error packet */ + if (prrs->pkt_flag & RRS_IS_ERR_FRAME) { + if (prrs->err_flag & (RRS_ERR_BAD_CRC | + RRS_ERR_DRIBBLE | RRS_ERR_CODE | + RRS_ERR_TRUNC)) { + /* hardware error, discard this packet*/ + netdev_err(netdev, + "rx packet desc error %x\n", + *((u32 *)prrs + 1)); + goto skip_pkt; + } + } + + packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) & + RRS_PKT_SIZE_MASK) - 4; /* CRC */ + skb = netdev_alloc_skb_ip_align(netdev, packet_size); + if (skb == NULL) { + netdev_warn(netdev, + "Memory squeeze, deferring packet\n"); + goto skip_pkt; + } + skb->dev = netdev; + memcpy(skb->data, (u8 *)(prrs + 1), packet_size); + skb_put(skb, packet_size); + skb->protocol = eth_type_trans(skb, netdev); + atl1e_rx_checksum(adapter, skb, prrs); + + if (unlikely(adapter->vlgrp && + (prrs->pkt_flag & RRS_IS_VLAN_TAG))) { + u16 vlan_tag = (prrs->vtag >> 4) | + ((prrs->vtag & 7) << 13) | + ((prrs->vtag & 8) << 9); + netdev_dbg(netdev, + "RXD VLAN TAG=0x%04x\n", + prrs->vtag); + vlan_hwaccel_receive_skb(skb, adapter->vlgrp, + vlan_tag); + } else { + netif_receive_skb(skb); + } + +skip_pkt: + /* skip current packet whether it's ok or not. */ + rx_page->read_offset += + (((u32)((prrs->word1 >> RRS_PKT_SIZE_SHIFT) & + RRS_PKT_SIZE_MASK) + + sizeof(struct atl1e_recv_ret_status) + 31) & + 0xFFFFFFE0); + + if (rx_page->read_offset >= rx_ring->page_size) { + /* mark this page clean */ + u16 reg_addr; + u8 rx_using; + + rx_page->read_offset = + *(rx_page->write_offset_addr) = 0; + rx_using = rx_page_desc[que].rx_using; + reg_addr = + atl1e_rx_page_vld_regs[que][rx_using]; + AT_WRITE_REGB(&adapter->hw, reg_addr, 1); + rx_page_desc[que].rx_using ^= 1; + rx_page = atl1e_get_rx_page(adapter, que); + } + write_offset = *(rx_page->write_offset_addr); + } while (rx_page->read_offset < write_offset); + } + + return; + +fatal_err: + if (!test_bit(__AT_DOWN, &adapter->flags)) + schedule_work(&adapter->reset_task); +} + +/* + * atl1e_clean - NAPI Rx polling callback + * @adapter: board private structure + */ +static int atl1e_clean(struct napi_struct *napi, int budget) +{ + struct atl1e_adapter *adapter = + container_of(napi, struct atl1e_adapter, napi); + u32 imr_data; + int work_done = 0; + + /* Keep link state information with original netdev */ + if (!netif_carrier_ok(adapter->netdev)) + goto quit_polling; + + atl1e_clean_rx_irq(adapter, 0, &work_done, budget); + + /* If no Tx and not enough Rx work done, exit the polling mode */ + if (work_done < budget) { +quit_polling: + napi_complete(napi); + imr_data = AT_READ_REG(&adapter->hw, REG_IMR); + AT_WRITE_REG(&adapter->hw, REG_IMR, imr_data | ISR_RX_EVENT); + /* test debug */ + if (test_bit(__AT_DOWN, &adapter->flags)) { + atomic_dec(&adapter->irq_sem); + netdev_err(adapter->netdev, + "atl1e_clean is called when AT_DOWN\n"); + } + /* reenable RX intr */ + /*atl1e_irq_enable(adapter); */ + + } + return work_done; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER + +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void atl1e_netpoll(struct net_device *netdev) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + + disable_irq(adapter->pdev->irq); + atl1e_intr(adapter->pdev->irq, netdev); + enable_irq(adapter->pdev->irq); +} +#endif + +static inline u16 atl1e_tpd_avail(struct atl1e_adapter *adapter) +{ + struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; + u16 next_to_use = 0; + u16 next_to_clean = 0; + + next_to_clean = atomic_read(&tx_ring->next_to_clean); + next_to_use = tx_ring->next_to_use; + + return (u16)(next_to_clean > next_to_use) ? + (next_to_clean - next_to_use - 1) : + (tx_ring->count + next_to_clean - next_to_use - 1); +} + +/* + * get next usable tpd + * Note: should call atl1e_tdp_avail to make sure + * there is enough tpd to use + */ +static struct atl1e_tpd_desc *atl1e_get_tpd(struct atl1e_adapter *adapter) +{ + struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; + u16 next_to_use = 0; + + next_to_use = tx_ring->next_to_use; + if (++tx_ring->next_to_use == tx_ring->count) + tx_ring->next_to_use = 0; + + memset(&tx_ring->desc[next_to_use], 0, sizeof(struct atl1e_tpd_desc)); + return (struct atl1e_tpd_desc *)&tx_ring->desc[next_to_use]; +} + +static struct atl1e_tx_buffer * +atl1e_get_tx_buffer(struct atl1e_adapter *adapter, struct atl1e_tpd_desc *tpd) +{ + struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; + + return &tx_ring->tx_buffer[tpd - tx_ring->desc]; +} + +/* Calculate the transmit packet descript needed*/ +static u16 atl1e_cal_tdp_req(const struct sk_buff *skb) +{ + int i = 0; + u16 tpd_req = 1; + u16 fg_size = 0; + u16 proto_hdr_len = 0; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + fg_size = skb_shinfo(skb)->frags[i].size; + tpd_req += ((fg_size + MAX_TX_BUF_LEN - 1) >> MAX_TX_BUF_SHIFT); + } + + if (skb_is_gso(skb)) { + if (skb->protocol == htons(ETH_P_IP) || + (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6)) { + proto_hdr_len = skb_transport_offset(skb) + + tcp_hdrlen(skb); + if (proto_hdr_len < skb_headlen(skb)) { + tpd_req += ((skb_headlen(skb) - proto_hdr_len + + MAX_TX_BUF_LEN - 1) >> + MAX_TX_BUF_SHIFT); + } + } + + } + return tpd_req; +} + +static int atl1e_tso_csum(struct atl1e_adapter *adapter, + struct sk_buff *skb, struct atl1e_tpd_desc *tpd) +{ + u8 hdr_len; + u32 real_len; + unsigned short offload_type; + int err; + + if (skb_is_gso(skb)) { + if (skb_header_cloned(skb)) { + err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (unlikely(err)) + return -1; + } + offload_type = skb_shinfo(skb)->gso_type; + + if (offload_type & SKB_GSO_TCPV4) { + real_len = (((unsigned char *)ip_hdr(skb) - skb->data) + + ntohs(ip_hdr(skb)->tot_len)); + + if (real_len < skb->len) + pskb_trim(skb, real_len); + + hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb)); + if (unlikely(skb->len == hdr_len)) { + /* only xsum need */ + netdev_warn(adapter->netdev, + "IPV4 tso with zero data??\n"); + goto check_sum; + } else { + ip_hdr(skb)->check = 0; + ip_hdr(skb)->tot_len = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic( + ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + tpd->word3 |= (ip_hdr(skb)->ihl & + TDP_V4_IPHL_MASK) << + TPD_V4_IPHL_SHIFT; + tpd->word3 |= ((tcp_hdrlen(skb) >> 2) & + TPD_TCPHDRLEN_MASK) << + TPD_TCPHDRLEN_SHIFT; + tpd->word3 |= ((skb_shinfo(skb)->gso_size) & + TPD_MSS_MASK) << TPD_MSS_SHIFT; + tpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT; + } + return 0; + } + } + +check_sum: + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { + u8 css, cso; + + cso = skb_transport_offset(skb); + if (unlikely(cso & 0x1)) { + netdev_err(adapter->netdev, + "payload offset should not ant event number\n"); + return -1; + } else { + css = cso + skb->csum_offset; + tpd->word3 |= (cso & TPD_PLOADOFFSET_MASK) << + TPD_PLOADOFFSET_SHIFT; + tpd->word3 |= (css & TPD_CCSUMOFFSET_MASK) << + TPD_CCSUMOFFSET_SHIFT; + tpd->word3 |= 1 << TPD_CC_SEGMENT_EN_SHIFT; + } + } + + return 0; +} + +static void atl1e_tx_map(struct atl1e_adapter *adapter, + struct sk_buff *skb, struct atl1e_tpd_desc *tpd) +{ + struct atl1e_tpd_desc *use_tpd = NULL; + struct atl1e_tx_buffer *tx_buffer = NULL; + u16 buf_len = skb->len - skb->data_len; + u16 map_len = 0; + u16 mapped_len = 0; + u16 hdr_len = 0; + u16 nr_frags; + u16 f; + int segment; + + nr_frags = skb_shinfo(skb)->nr_frags; + segment = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK; + if (segment) { + /* TSO */ + map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + use_tpd = tpd; + + tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd); + tx_buffer->length = map_len; + tx_buffer->dma = pci_map_single(adapter->pdev, + skb->data, hdr_len, PCI_DMA_TODEVICE); + ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE); + mapped_len += map_len; + use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma); + use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) | + ((cpu_to_le32(tx_buffer->length) & + TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT); + } + + while (mapped_len < buf_len) { + /* mapped_len == 0, means we should use the first tpd, + which is given by caller */ + if (mapped_len == 0) { + use_tpd = tpd; + } else { + use_tpd = atl1e_get_tpd(adapter); + memcpy(use_tpd, tpd, sizeof(struct atl1e_tpd_desc)); + } + tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd); + tx_buffer->skb = NULL; + + tx_buffer->length = map_len = + ((buf_len - mapped_len) >= MAX_TX_BUF_LEN) ? + MAX_TX_BUF_LEN : (buf_len - mapped_len); + tx_buffer->dma = + pci_map_single(adapter->pdev, skb->data + mapped_len, + map_len, PCI_DMA_TODEVICE); + ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE); + mapped_len += map_len; + use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma); + use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) | + ((cpu_to_le32(tx_buffer->length) & + TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT); + } + + for (f = 0; f < nr_frags; f++) { + struct skb_frag_struct *frag; + u16 i; + u16 seg_num; + + frag = &skb_shinfo(skb)->frags[f]; + buf_len = frag->size; + + seg_num = (buf_len + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN; + for (i = 0; i < seg_num; i++) { + use_tpd = atl1e_get_tpd(adapter); + memcpy(use_tpd, tpd, sizeof(struct atl1e_tpd_desc)); + + tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd); + BUG_ON(tx_buffer->skb); + + tx_buffer->skb = NULL; + tx_buffer->length = + (buf_len > MAX_TX_BUF_LEN) ? + MAX_TX_BUF_LEN : buf_len; + buf_len -= tx_buffer->length; + + tx_buffer->dma = + pci_map_page(adapter->pdev, frag->page, + frag->page_offset + + (i * MAX_TX_BUF_LEN), + tx_buffer->length, + PCI_DMA_TODEVICE); + ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_PAGE); + use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma); + use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) | + ((cpu_to_le32(tx_buffer->length) & + TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT); + } + } + + if ((tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK) + /* note this one is a tcp header */ + tpd->word3 |= 1 << TPD_HDRFLAG_SHIFT; + /* The last tpd */ + + use_tpd->word3 |= 1 << TPD_EOP_SHIFT; + /* The last buffer info contain the skb address, + so it will be free after unmap */ + tx_buffer->skb = skb; +} + +static void atl1e_tx_queue(struct atl1e_adapter *adapter, u16 count, + struct atl1e_tpd_desc *tpd) +{ + struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). */ + wmb(); + AT_WRITE_REG(&adapter->hw, REG_MB_TPD_PROD_IDX, tx_ring->next_to_use); +} + +static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + unsigned long flags; + u16 tpd_req = 1; + struct atl1e_tpd_desc *tpd; + + if (test_bit(__AT_DOWN, &adapter->flags)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (unlikely(skb->len <= 0)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + tpd_req = atl1e_cal_tdp_req(skb); + if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) + return NETDEV_TX_LOCKED; + + if (atl1e_tpd_avail(adapter) < tpd_req) { + /* no enough descriptor, just stop queue */ + netif_stop_queue(netdev); + spin_unlock_irqrestore(&adapter->tx_lock, flags); + return NETDEV_TX_BUSY; + } + + tpd = atl1e_get_tpd(adapter); + + if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) { + u16 vlan_tag = vlan_tx_tag_get(skb); + u16 atl1e_vlan_tag; + + tpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT; + AT_VLAN_TAG_TO_TPD_TAG(vlan_tag, atl1e_vlan_tag); + tpd->word2 |= (atl1e_vlan_tag & TPD_VLANTAG_MASK) << + TPD_VLAN_SHIFT; + } + + if (skb->protocol == htons(ETH_P_8021Q)) + tpd->word3 |= 1 << TPD_VL_TAGGED_SHIFT; + + if (skb_network_offset(skb) != ETH_HLEN) + tpd->word3 |= 1 << TPD_ETHTYPE_SHIFT; /* 802.3 frame */ + + /* do TSO and check sum */ + if (atl1e_tso_csum(adapter, skb, tpd) != 0) { + spin_unlock_irqrestore(&adapter->tx_lock, flags); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + atl1e_tx_map(adapter, skb, tpd); + atl1e_tx_queue(adapter, tpd_req, tpd); + + netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */ + spin_unlock_irqrestore(&adapter->tx_lock, flags); + return NETDEV_TX_OK; +} + +static void atl1e_free_irq(struct atl1e_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + free_irq(adapter->pdev->irq, netdev); + + if (adapter->have_msi) + pci_disable_msi(adapter->pdev); +} + +static int atl1e_request_irq(struct atl1e_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct net_device *netdev = adapter->netdev; + int flags = 0; + int err = 0; + + adapter->have_msi = true; + err = pci_enable_msi(adapter->pdev); + if (err) { + netdev_dbg(adapter->netdev, + "Unable to allocate MSI interrupt Error: %d\n", err); + adapter->have_msi = false; + } else + netdev->irq = pdev->irq; + + + if (!adapter->have_msi) + flags |= IRQF_SHARED; + err = request_irq(adapter->pdev->irq, atl1e_intr, flags, + netdev->name, netdev); + if (err) { + netdev_dbg(adapter->netdev, + "Unable to allocate interrupt Error: %d\n", err); + if (adapter->have_msi) + pci_disable_msi(adapter->pdev); + return err; + } + netdev_dbg(adapter->netdev, "atl1e_request_irq OK\n"); + return err; +} + +int atl1e_up(struct atl1e_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err = 0; + u32 val; + + /* hardware has been reset, we need to reload some things */ + err = atl1e_init_hw(&adapter->hw); + if (err) { + err = -EIO; + return err; + } + atl1e_init_ring_ptrs(adapter); + atl1e_set_multi(netdev); + atl1e_restore_vlan(adapter); + + if (atl1e_configure(adapter)) { + err = -EIO; + goto err_up; + } + + clear_bit(__AT_DOWN, &adapter->flags); + napi_enable(&adapter->napi); + atl1e_irq_enable(adapter); + val = AT_READ_REG(&adapter->hw, REG_MASTER_CTRL); + AT_WRITE_REG(&adapter->hw, REG_MASTER_CTRL, + val | MASTER_CTRL_MANUAL_INT); + +err_up: + return err; +} + +void atl1e_down(struct atl1e_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + /* signal that we're down so the interrupt handler does not + * reschedule our watchdog timer */ + set_bit(__AT_DOWN, &adapter->flags); + +#ifdef NETIF_F_LLTX + netif_stop_queue(netdev); +#else + netif_tx_disable(netdev); +#endif + + /* reset MAC to disable all RX/TX */ + atl1e_reset_hw(&adapter->hw); + msleep(1); + + napi_disable(&adapter->napi); + atl1e_del_timer(adapter); + atl1e_irq_disable(adapter); + + netif_carrier_off(netdev); + adapter->link_speed = SPEED_0; + adapter->link_duplex = -1; + atl1e_clean_tx_ring(adapter); + atl1e_clean_rx_ring(adapter); +} + +/* + * atl1e_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + */ +static int atl1e_open(struct net_device *netdev) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + int err; + + /* disallow open during test */ + if (test_bit(__AT_TESTING, &adapter->flags)) + return -EBUSY; + + /* allocate rx/tx dma buffer & descriptors */ + atl1e_init_ring_resources(adapter); + err = atl1e_setup_ring_resources(adapter); + if (unlikely(err)) + return err; + + err = atl1e_request_irq(adapter); + if (unlikely(err)) + goto err_req_irq; + + err = atl1e_up(adapter); + if (unlikely(err)) + goto err_up; + + return 0; + +err_up: + atl1e_free_irq(adapter); +err_req_irq: + atl1e_free_ring_resources(adapter); + atl1e_reset_hw(&adapter->hw); + + return err; +} + +/* + * atl1e_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + */ +static int atl1e_close(struct net_device *netdev) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + + WARN_ON(test_bit(__AT_RESETTING, &adapter->flags)); + atl1e_down(adapter); + atl1e_free_irq(adapter); + atl1e_free_ring_resources(adapter); + + return 0; +} + +static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct atl1e_hw *hw = &adapter->hw; + u32 ctrl = 0; + u32 mac_ctrl_data = 0; + u32 wol_ctrl_data = 0; + u16 mii_advertise_data = 0; + u16 mii_bmsr_data = 0; + u16 mii_intr_status_data = 0; + u32 wufc = adapter->wol; + u32 i; +#ifdef CONFIG_PM + int retval = 0; +#endif + + if (netif_running(netdev)) { + WARN_ON(test_bit(__AT_RESETTING, &adapter->flags)); + atl1e_down(adapter); + } + netif_device_detach(netdev); + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; +#endif + + if (wufc) { + /* get link status */ + atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data); + atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data); + + mii_advertise_data = MII_AR_10T_HD_CAPS; + + if ((atl1e_write_phy_reg(hw, MII_AT001_CR, 0) != 0) || + (atl1e_write_phy_reg(hw, + MII_ADVERTISE, mii_advertise_data) != 0) || + (atl1e_phy_commit(hw)) != 0) { + netdev_dbg(adapter->netdev, "set phy register failed\n"); + goto wol_dis; + } + + hw->phy_configured = false; /* re-init PHY when resume */ + + /* turn on magic packet wol */ + if (wufc & AT_WUFC_MAG) + wol_ctrl_data |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN; + + if (wufc & AT_WUFC_LNKC) { + /* if orignal link status is link, just wait for retrive link */ + if (mii_bmsr_data & BMSR_LSTATUS) { + for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) { + msleep(100); + atl1e_read_phy_reg(hw, MII_BMSR, + (u16 *)&mii_bmsr_data); + if (mii_bmsr_data & BMSR_LSTATUS) + break; + } + + if ((mii_bmsr_data & BMSR_LSTATUS) == 0) + netdev_dbg(adapter->netdev, + "Link may change when suspend\n"); + } + wol_ctrl_data |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN; + /* only link up can wake up */ + if (atl1e_write_phy_reg(hw, MII_INT_CTRL, 0x400) != 0) { + netdev_dbg(adapter->netdev, + "read write phy register failed\n"); + goto wol_dis; + } + } + /* clear phy interrupt */ + atl1e_read_phy_reg(hw, MII_INT_STATUS, &mii_intr_status_data); + /* Config MAC Ctrl register */ + mac_ctrl_data = MAC_CTRL_RX_EN; + /* set to 10/100M halt duplex */ + mac_ctrl_data |= MAC_CTRL_SPEED_10_100 << MAC_CTRL_SPEED_SHIFT; + mac_ctrl_data |= (((u32)adapter->hw.preamble_len & + MAC_CTRL_PRMLEN_MASK) << + MAC_CTRL_PRMLEN_SHIFT); + + if (adapter->vlgrp) + mac_ctrl_data |= MAC_CTRL_RMV_VLAN; + + /* magic packet maybe Broadcast&multicast&Unicast frame */ + if (wufc & AT_WUFC_MAG) + mac_ctrl_data |= MAC_CTRL_BC_EN; + + netdev_dbg(adapter->netdev, "suspend MAC=0x%x\n", + mac_ctrl_data); + + AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data); + AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data); + /* pcie patch */ + ctrl = AT_READ_REG(hw, REG_PCIE_PHYMISC); + ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; + AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl); + pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); + goto suspend_exit; + } +wol_dis: + + /* WOL disabled */ + AT_WRITE_REG(hw, REG_WOL_CTRL, 0); + + /* pcie patch */ + ctrl = AT_READ_REG(hw, REG_PCIE_PHYMISC); + ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; + AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl); + + atl1e_force_ps(hw); + hw->phy_configured = false; /* re-init PHY when resume */ + + pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); + +suspend_exit: + + if (netif_running(netdev)) + atl1e_free_irq(adapter); + + pci_disable_device(pdev); + + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +#ifdef CONFIG_PM +static int atl1e_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1e_adapter *adapter = netdev_priv(netdev); + u32 err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + err = pci_enable_device(pdev); + if (err) { + netdev_err(adapter->netdev, + "Cannot enable PCI device from suspend\n"); + return err; + } + + pci_set_master(pdev); + + AT_READ_REG(&adapter->hw, REG_WOL_CTRL); /* clear WOL status */ + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); + + if (netif_running(netdev)) { + err = atl1e_request_irq(adapter); + if (err) + return err; + } + + atl1e_reset_hw(&adapter->hw); + + if (netif_running(netdev)) + atl1e_up(adapter); + + netif_device_attach(netdev); + + return 0; +} +#endif + +static void atl1e_shutdown(struct pci_dev *pdev) +{ + atl1e_suspend(pdev, PMSG_SUSPEND); +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) +static const struct net_device_ops atl1e_netdev_ops = { + .ndo_open = atl1e_open, + .ndo_stop = atl1e_close, + .ndo_start_xmit = atl1e_xmit_frame, + .ndo_get_stats = atl1e_get_stats, + .ndo_set_multicast_list = atl1e_set_multi, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = atl1e_set_mac_addr, + .ndo_change_mtu = atl1e_change_mtu, + .ndo_do_ioctl = atl1e_ioctl, + .ndo_tx_timeout = atl1e_tx_timeout, + .ndo_vlan_rx_register = atl1e_vlan_rx_register, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = atl1e_netpoll, +#endif + +}; +#endif + +static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev) +{ + SET_NETDEV_DEV(netdev, &pdev->dev); + pci_set_drvdata(pdev, netdev); + + netdev->irq = pdev->irq; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + netdev->netdev_ops = &atl1e_netdev_ops; +#else + netdev->change_mtu = atl1e_change_mtu; + netdev->hard_start_xmit = atl1e_xmit_frame; + netdev->open = atl1e_open; + netdev->stop = atl1e_close; + netdev->tx_timeout = atl1e_tx_timeout; + netdev->set_mac_address = atl1e_set_mac_addr; + netdev->do_ioctl = atl1e_ioctl; + netdev->get_stats = atl1e_get_stats; + netdev->set_multicast_list = atl1e_set_multi; + netdev->vlan_rx_register = atl1e_vlan_rx_register; +#ifdef CONFIG_NET_POLL_CONTROLLER + netdev->poll_controller = atl1e_netpoll; +#endif +#endif + + netdev->watchdog_timeo = AT_TX_WATCHDOG; + atl1e_set_ethtool_ops(netdev); + + netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + netdev->features |= NETIF_F_LLTX; + netdev->features |= NETIF_F_TSO; + + return 0; +} + +/* + * atl1e_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in atl1e_pci_tbl + * + * Returns 0 on success, negative on failure + * + * atl1e_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + */ +static int __devinit atl1e_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct atl1e_adapter *adapter = NULL; + static int cards_found; + + int err = 0; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "cannot enable PCI device\n"); + return err; + } + + /* + * The atl1e chip can DMA to 64-bit addresses, but it uses a single + * shared register for the high 32 bits, so only a single, aligned, + * 4 GB physical address range can be used at a time. + * + * Supporting 64-bit DMA on this hardware is more trouble than it's + * worth. It is far easier to limit to 32-bit DMA than update + * various kernel subsystems to support the mechanics required by a + * fixed-high-32-bit system. + */ + if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) || + (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) { + dev_err(&pdev->dev, "No usable DMA configuration,aborting\n"); + goto err_dma; + } + + err = pci_request_regions(pdev, atl1e_driver_name); + if (err) { + dev_err(&pdev->dev, "cannot obtain PCI resources\n"); + goto err_pci_reg; + } + + pci_set_master(pdev); + + netdev = alloc_etherdev(sizeof(struct atl1e_adapter)); + if (netdev == NULL) { + err = -ENOMEM; + dev_err(&pdev->dev, "etherdev alloc failed\n"); + goto err_alloc_etherdev; + } + + err = atl1e_init_netdev(netdev, pdev); + if (err) { + netdev_err(netdev, "init netdevice failed\n"); + goto err_init_netdev; + } + adapter = netdev_priv(netdev); + adapter->bd_number = cards_found; + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->hw.adapter = adapter; + adapter->hw.hw_addr = pci_iomap(pdev, BAR_0, 0); + if (!adapter->hw.hw_addr) { + err = -EIO; + netdev_err(netdev, "cannot map device registers\n"); + goto err_ioremap; + } + netdev->base_addr = (unsigned long)adapter->hw.hw_addr; + + /* init mii data */ + adapter->mii.dev = netdev; + adapter->mii.mdio_read = atl1e_mdio_read; + adapter->mii.mdio_write = atl1e_mdio_write; + adapter->mii.phy_id_mask = 0x1f; + adapter->mii.reg_num_mask = MDIO_REG_ADDR_MASK; + + netif_napi_add(netdev, &adapter->napi, atl1e_clean, 64); + + init_timer(&adapter->phy_config_timer); + adapter->phy_config_timer.function = &atl1e_phy_config; + adapter->phy_config_timer.data = (unsigned long) adapter; + + /* get user settings */ + atl1e_check_options(adapter); + /* + * Mark all PCI regions associated with PCI device + * pdev as being reserved by owner atl1e_driver_name + * Enables bus-mastering on the device and calls + * pcibios_set_master to do the needed arch specific settings + */ + atl1e_setup_pcicmd(pdev); + /* setup the private structure */ + err = atl1e_sw_init(adapter); + if (err) { + netdev_err(netdev, "net device private data init failed\n"); + goto err_sw_init; + } + + /* Init GPHY as early as possible due to power saving issue */ + atl1e_phy_init(&adapter->hw); + /* reset the controller to + * put the device in a known good starting state */ + err = atl1e_reset_hw(&adapter->hw); + if (err) { + err = -EIO; + goto err_reset; + } + + if (atl1e_read_mac_addr(&adapter->hw) != 0) { + err = -EIO; + netdev_err(netdev, "get mac address failed\n"); + goto err_eeprom; + } + + memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); + memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len); + netdev_dbg(netdev, "mac address : %pM\n", adapter->hw.mac_addr); + + INIT_WORK(&adapter->reset_task, atl1e_reset_task); + INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task); + err = register_netdev(netdev); + if (err) { + netdev_err(netdev, "register netdevice failed\n"); + goto err_register; + } + + /* assume we have no link for now */ + netif_stop_queue(netdev); + netif_carrier_off(netdev); + + cards_found++; + + return 0; + +err_reset: +err_register: +err_sw_init: +err_eeprom: + iounmap(adapter->hw.hw_addr); +err_init_netdev: +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/* + * atl1e_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * atl1e_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + */ +static void __devexit atl1e_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1e_adapter *adapter = netdev_priv(netdev); + + /* + * flush_scheduled work may reschedule our watchdog task, so + * explicitly disable watchdog tasks from being rescheduled + */ + set_bit(__AT_DOWN, &adapter->flags); + + atl1e_del_timer(adapter); + atl1e_cancel_work(adapter); + + unregister_netdev(netdev); + atl1e_free_ring_resources(adapter); + atl1e_force_ps(&adapter->hw); + iounmap(adapter->hw.hw_addr); + pci_release_regions(pdev); + free_netdev(netdev); + pci_disable_device(pdev); +} + +/* + * atl1e_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t +atl1e_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1e_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + atl1e_down(adapter); + + pci_disable_device(pdev); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/* + * atl1e_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the e1000_resume routine. + */ +static pci_ers_result_t atl1e_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1e_adapter *adapter = netdev_priv(netdev); + + if (pci_enable_device(pdev)) { + netdev_err(adapter->netdev, + "Cannot re-enable PCI device after reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + atl1e_reset_hw(&adapter->hw); + + return PCI_ERS_RESULT_RECOVERED; +} + +/* + * atl1e_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the atl1e_resume routine. + */ +static void atl1e_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1e_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) { + if (atl1e_up(adapter)) { + netdev_err(adapter->netdev, + "can't bring device back up after reset\n"); + return; + } + } + + netif_device_attach(netdev); +} + +static struct pci_error_handlers atl1e_err_handler = { + .error_detected = atl1e_io_error_detected, + .slot_reset = atl1e_io_slot_reset, + .resume = atl1e_io_resume, +}; + +static struct pci_driver atl1e_driver = { + .name = atl1e_driver_name, + .id_table = atl1e_pci_tbl, + .probe = atl1e_probe, + .remove = __devexit_p(atl1e_remove), + /* Power Managment Hooks */ +#ifdef CONFIG_PM + .suspend = atl1e_suspend, + .resume = atl1e_resume, +#endif + .shutdown = atl1e_shutdown, + .err_handler = &atl1e_err_handler +}; + +/* + * atl1e_init_module - Driver Registration Routine + * + * atl1e_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + */ +static int __init atl1e_init_module(void) +{ + return pci_register_driver(&atl1e_driver); +} + +/* + * atl1e_exit_module - Driver Exit Cleanup Routine + * + * atl1e_exit_module is called just before the driver is removed + * from memory. + */ +static void __exit atl1e_exit_module(void) +{ + pci_unregister_driver(&atl1e_driver); +} + +module_init(atl1e_init_module); +module_exit(atl1e_exit_module); diff --git a/drivers/net/atl1e/atl1e_param.c b/drivers/net/atl1e/atl1e_param.c new file mode 100644 index 0000000..0ce60b6 --- /dev/null +++ b/drivers/net/atl1e/atl1e_param.c @@ -0,0 +1,268 @@ +/* + * Copyright(c) 2007 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include + +#include "atl1e.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ + +#define ATL1E_MAX_NIC 32 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ +#define ATL1E_PARAM_INIT { [0 ... ATL1E_MAX_NIC] = OPTION_UNSET } + +#define ATL1E_PARAM(x, desc) \ + static int __devinitdata x[ATL1E_MAX_NIC + 1] = ATL1E_PARAM_INIT; \ + static unsigned int num_##x; \ + module_param_array_named(x, x, int, &num_##x, 0); \ + MODULE_PARM_DESC(x, desc); + +/* Transmit Memory count + * + * Valid Range: 64-2048 + * + * Default Value: 128 + */ +#define ATL1E_MIN_TX_DESC_CNT 32 +#define ATL1E_MAX_TX_DESC_CNT 1020 +#define ATL1E_DEFAULT_TX_DESC_CNT 128 +ATL1E_PARAM(tx_desc_cnt, "Transmit description count"); + +/* Receive Memory Block Count + * + * Valid Range: 16-512 + * + * Default Value: 128 + */ +#define ATL1E_MIN_RX_MEM_SIZE 8 /* 8KB */ +#define ATL1E_MAX_RX_MEM_SIZE 1024 /* 1MB */ +#define ATL1E_DEFAULT_RX_MEM_SIZE 256 /* 128KB */ +ATL1E_PARAM(rx_mem_size, "memory size of rx buffer(KB)"); + +/* User Specified MediaType Override + * + * Valid Range: 0-5 + * - 0 - auto-negotiate at all supported speeds + * - 1 - only link at 100Mbps Full Duplex + * - 2 - only link at 100Mbps Half Duplex + * - 3 - only link at 10Mbps Full Duplex + * - 4 - only link at 10Mbps Half Duplex + * Default Value: 0 + */ + +ATL1E_PARAM(media_type, "MediaType Select"); + +/* Interrupt Moderate Timer in units of 2 us + * + * Valid Range: 10-65535 + * + * Default Value: 45000(90ms) + */ +#define INT_MOD_DEFAULT_CNT 100 /* 200us */ +#define INT_MOD_MAX_CNT 65000 +#define INT_MOD_MIN_CNT 50 +ATL1E_PARAM(int_mod_timer, "Interrupt Moderator Timer"); + +#define AUTONEG_ADV_DEFAULT 0x2F +#define AUTONEG_ADV_MASK 0x2F +#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL + +#define FLASH_VENDOR_DEFAULT 0 +#define FLASH_VENDOR_MIN 0 +#define FLASH_VENDOR_MAX 2 + +struct atl1e_option { + enum { enable_option, range_option, list_option } type; + char *name; + char *err; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + struct atl1e_opt_list { int i; char *str; } *p; + } l; + } arg; +}; + +static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt, struct atl1e_adapter *adapter) +{ + if (*value == OPTION_UNSET) { + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + netdev_info(adapter->netdev, + "%s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + netdev_info(adapter->netdev, + "%s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { + netdev_info(adapter->netdev, "%s set to %i\n", + opt->name, *value); + return 0; + } + break; + case list_option:{ + int i; + struct atl1e_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + netdev_info(adapter->netdev, + "%s\n", ent->str); + return 0; + } + } + break; + } + default: + BUG(); + } + + netdev_info(adapter->netdev, "Invalid %s specified (%i) %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return -1; +} + +/* + * atl1e_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + */ +void __devinit atl1e_check_options(struct atl1e_adapter *adapter) +{ + int bd = adapter->bd_number; + + if (bd >= ATL1E_MAX_NIC) { + netdev_notice(adapter->netdev, + "no configuration for board #%i\n", bd); + netdev_notice(adapter->netdev, + "Using defaults for all values\n"); + } + + { /* Transmit Ring Size */ + struct atl1e_option opt = { + .type = range_option, + .name = "Transmit Ddescription Count", + .err = "using default of " + __MODULE_STRING(ATL1E_DEFAULT_TX_DESC_CNT), + .def = ATL1E_DEFAULT_TX_DESC_CNT, + .arg = { .r = { .min = ATL1E_MIN_TX_DESC_CNT, + .max = ATL1E_MAX_TX_DESC_CNT} } + }; + int val; + if (num_tx_desc_cnt > bd) { + val = tx_desc_cnt[bd]; + atl1e_validate_option(&val, &opt, adapter); + adapter->tx_ring.count = (u16) val & 0xFFFC; + } else + adapter->tx_ring.count = (u16)opt.def; + } + + { /* Receive Memory Block Count */ + struct atl1e_option opt = { + .type = range_option, + .name = "Memory size of rx buffer(KB)", + .err = "using default of " + __MODULE_STRING(ATL1E_DEFAULT_RX_MEM_SIZE), + .def = ATL1E_DEFAULT_RX_MEM_SIZE, + .arg = { .r = { .min = ATL1E_MIN_RX_MEM_SIZE, + .max = ATL1E_MAX_RX_MEM_SIZE} } + }; + int val; + if (num_rx_mem_size > bd) { + val = rx_mem_size[bd]; + atl1e_validate_option(&val, &opt, adapter); + adapter->rx_ring.page_size = (u32)val * 1024; + } else { + adapter->rx_ring.page_size = (u32)opt.def * 1024; + } + } + + { /* Interrupt Moderate Timer */ + struct atl1e_option opt = { + .type = range_option, + .name = "Interrupt Moderate Timer", + .err = "using default of " + __MODULE_STRING(INT_MOD_DEFAULT_CNT), + .def = INT_MOD_DEFAULT_CNT, + .arg = { .r = { .min = INT_MOD_MIN_CNT, + .max = INT_MOD_MAX_CNT} } + } ; + int val; + if (num_int_mod_timer > bd) { + val = int_mod_timer[bd]; + atl1e_validate_option(&val, &opt, adapter); + adapter->hw.imt = (u16) val; + } else + adapter->hw.imt = (u16)(opt.def); + } + + { /* MediaType */ + struct atl1e_option opt = { + .type = range_option, + .name = "Speed/Duplex Selection", + .err = "using default of " + __MODULE_STRING(MEDIA_TYPE_AUTO_SENSOR), + .def = MEDIA_TYPE_AUTO_SENSOR, + .arg = { .r = { .min = MEDIA_TYPE_AUTO_SENSOR, + .max = MEDIA_TYPE_10M_HALF} } + } ; + int val; + if (num_media_type > bd) { + val = media_type[bd]; + atl1e_validate_option(&val, &opt, adapter); + adapter->hw.media_type = (u16) val; + } else + adapter->hw.media_type = (u16)(opt.def); + + } +} diff --git a/drivers/net/atlx/Makefile b/drivers/net/atlx/Makefile new file mode 100644 index 0000000..e4f6022 --- /dev/null +++ b/drivers/net/atlx/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_ATL1) += atl1.o +obj-$(CONFIG_ATL2) += atl2.o + diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c new file mode 100644 index 0000000..5600c2a --- /dev/null +++ b/drivers/net/atlx/atl1.c @@ -0,0 +1,3697 @@ +/* + * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. + * Copyright(c) 2006 - 2007 Chris Snook + * Copyright(c) 2006 - 2008 Jay Cliburn + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution in the + * file called COPYING. + * + * Contact Information: + * Xiong Huang + * Jie Yang + * Chris Snook + * Jay Cliburn + * + * This version is adapted from the Attansic reference driver. + * + * TODO: + * Add more ethtool functions. + * Fix abstruse irq enable/disable condition described here: + * http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2 + * + * NEEDS TESTING: + * VLAN + * multicast + * promiscuous mode + * interrupt coalescing + * SMP torture testing + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "atl1.h" + +#define ATLX_DRIVER_VERSION "2.1.3" +MODULE_AUTHOR("Xiong Huang , \ + Chris Snook , Jay Cliburn "); +MODULE_LICENSE("GPL"); +MODULE_VERSION(ATLX_DRIVER_VERSION); + +/* Temporary hack for merging atl1 and atl2 */ +#include "atlx.c" + +/* + * This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ +#define ATL1_MAX_NIC 4 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +#define ATL1_PARAM_INIT { [0 ... ATL1_MAX_NIC] = OPTION_UNSET } + +/* + * Interrupt Moderate Timer in units of 2 us + * + * Valid Range: 10-65535 + * + * Default Value: 100 (200us) + */ +static int __devinitdata int_mod_timer[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT; +static unsigned int num_int_mod_timer; +module_param_array_named(int_mod_timer, int_mod_timer, int, + &num_int_mod_timer, 0); +MODULE_PARM_DESC(int_mod_timer, "Interrupt moderator timer"); + +#define DEFAULT_INT_MOD_CNT 100 /* 200us */ +#define MAX_INT_MOD_CNT 65000 +#define MIN_INT_MOD_CNT 50 + +struct atl1_option { + enum { enable_option, range_option, list_option } type; + char *name; + char *err; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + struct atl1_opt_list { + int i; + char *str; + } *p; + } l; + } arg; +}; + +static int __devinit atl1_validate_option(int *value, struct atl1_option *opt, + struct pci_dev *pdev) +{ + if (*value == OPTION_UNSET) { + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + dev_info(&pdev->dev, "%s enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + dev_info(&pdev->dev, "%s disabled\n", opt->name); + return 0; + } + break; + case range_option: + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { + dev_info(&pdev->dev, "%s set to %i\n", opt->name, + *value); + return 0; + } + break; + case list_option:{ + int i; + struct atl1_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + dev_info(&pdev->dev, "%s\n", + ent->str); + return 0; + } + } + } + break; + + default: + break; + } + + dev_info(&pdev->dev, "invalid %s specified (%i) %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return -1; +} + +/* + * atl1_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + */ +static void __devinit atl1_check_options(struct atl1_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int bd = adapter->bd_number; + if (bd >= ATL1_MAX_NIC) { + dev_notice(&pdev->dev, "no configuration for board#%i\n", bd); + dev_notice(&pdev->dev, "using defaults for all values\n"); + } + { /* Interrupt Moderate Timer */ + struct atl1_option opt = { + .type = range_option, + .name = "Interrupt Moderator Timer", + .err = "using default of " + __MODULE_STRING(DEFAULT_INT_MOD_CNT), + .def = DEFAULT_INT_MOD_CNT, + .arg = {.r = {.min = MIN_INT_MOD_CNT, + .max = MAX_INT_MOD_CNT} } + }; + int val; + if (num_int_mod_timer > bd) { + val = int_mod_timer[bd]; + atl1_validate_option(&val, &opt, pdev); + adapter->imt = (u16) val; + } else + adapter->imt = (u16) (opt.def); + } +} + +/* + * atl1_pci_tbl - PCI Device ID Table + */ +static DEFINE_PCI_DEVICE_TABLE(atl1_pci_tbl) = { + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1)}, + /* required last entry */ + {0,} +}; +MODULE_DEVICE_TABLE(pci, atl1_pci_tbl); + +static const u32 atl1_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP; + +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Message level (0=none,...,16=all)"); + +/* + * Reset the transmit and receive units; mask and clear all interrupts. + * hw - Struct containing variables accessed by shared code + * return : 0 or idle status (if error) + */ +static s32 atl1_reset_hw(struct atl1_hw *hw) +{ + struct pci_dev *pdev = hw->back->pdev; + struct atl1_adapter *adapter = hw->back; + u32 icr; + int i; + + /* + * Clear Interrupt mask to stop board from generating + * interrupts & Clear any pending interrupt events + */ + /* + * iowrite32(0, hw->hw_addr + REG_IMR); + * iowrite32(0xffffffff, hw->hw_addr + REG_ISR); + */ + + /* + * Issue Soft Reset to the MAC. This will reset the chip's + * transmit, receive, DMA. It will not effect + * the current PCI configuration. The global reset bit is self- + * clearing, and should clear within a microsecond. + */ + iowrite32(MASTER_CTRL_SOFT_RST, hw->hw_addr + REG_MASTER_CTRL); + ioread32(hw->hw_addr + REG_MASTER_CTRL); + + iowrite16(1, hw->hw_addr + REG_PHY_ENABLE); + ioread16(hw->hw_addr + REG_PHY_ENABLE); + + /* delay about 1ms */ + msleep(1); + + /* Wait at least 10ms for All module to be Idle */ + for (i = 0; i < 10; i++) { + icr = ioread32(hw->hw_addr + REG_IDLE_STATUS); + if (!icr) + break; + /* delay 1 ms */ + msleep(1); + /* FIXME: still the right way to do this? */ + cpu_relax(); + } + + if (icr) { + if (netif_msg_hw(adapter)) + dev_dbg(&pdev->dev, "ICR = 0x%x\n", icr); + return icr; + } + + return 0; +} + +/* function about EEPROM + * + * check_eeprom_exist + * return 0 if eeprom exist + */ +static int atl1_check_eeprom_exist(struct atl1_hw *hw) +{ + u32 value; + value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); + if (value & SPI_FLASH_CTRL_EN_VPD) { + value &= ~SPI_FLASH_CTRL_EN_VPD; + iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); + } + + value = ioread16(hw->hw_addr + REG_PCIE_CAP_LIST); + return ((value & 0xFF00) == 0x6C00) ? 0 : 1; +} + +static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value) +{ + int i; + u32 control; + + if (offset & 3) + /* address do not align */ + return false; + + iowrite32(0, hw->hw_addr + REG_VPD_DATA); + control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT; + iowrite32(control, hw->hw_addr + REG_VPD_CAP); + ioread32(hw->hw_addr + REG_VPD_CAP); + + for (i = 0; i < 10; i++) { + msleep(2); + control = ioread32(hw->hw_addr + REG_VPD_CAP); + if (control & VPD_CAP_VPD_FLAG) + break; + } + if (control & VPD_CAP_VPD_FLAG) { + *p_value = ioread32(hw->hw_addr + REG_VPD_DATA); + return true; + } + /* timeout */ + return false; +} + +/* + * Reads the value from a PHY register + * hw - Struct containing variables accessed by shared code + * reg_addr - address of the PHY register to read + */ +s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data) +{ + u32 val; + int i; + + val = ((u32) (reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT | + MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | MDIO_CLK_25_4 << + MDIO_CLK_SEL_SHIFT; + iowrite32(val, hw->hw_addr + REG_MDIO_CTRL); + ioread32(hw->hw_addr + REG_MDIO_CTRL); + + for (i = 0; i < MDIO_WAIT_TIMES; i++) { + udelay(2); + val = ioread32(hw->hw_addr + REG_MDIO_CTRL); + if (!(val & (MDIO_START | MDIO_BUSY))) + break; + } + if (!(val & (MDIO_START | MDIO_BUSY))) { + *phy_data = (u16) val; + return 0; + } + return ATLX_ERR_PHY; +} + +#define CUSTOM_SPI_CS_SETUP 2 +#define CUSTOM_SPI_CLK_HI 2 +#define CUSTOM_SPI_CLK_LO 2 +#define CUSTOM_SPI_CS_HOLD 2 +#define CUSTOM_SPI_CS_HI 3 + +static bool atl1_spi_read(struct atl1_hw *hw, u32 addr, u32 *buf) +{ + int i; + u32 value; + + iowrite32(0, hw->hw_addr + REG_SPI_DATA); + iowrite32(addr, hw->hw_addr + REG_SPI_ADDR); + + value = SPI_FLASH_CTRL_WAIT_READY | + (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) << + SPI_FLASH_CTRL_CS_SETUP_SHIFT | (CUSTOM_SPI_CLK_HI & + SPI_FLASH_CTRL_CLK_HI_MASK) << + SPI_FLASH_CTRL_CLK_HI_SHIFT | (CUSTOM_SPI_CLK_LO & + SPI_FLASH_CTRL_CLK_LO_MASK) << + SPI_FLASH_CTRL_CLK_LO_SHIFT | (CUSTOM_SPI_CS_HOLD & + SPI_FLASH_CTRL_CS_HOLD_MASK) << + SPI_FLASH_CTRL_CS_HOLD_SHIFT | (CUSTOM_SPI_CS_HI & + SPI_FLASH_CTRL_CS_HI_MASK) << + SPI_FLASH_CTRL_CS_HI_SHIFT | (1 & SPI_FLASH_CTRL_INS_MASK) << + SPI_FLASH_CTRL_INS_SHIFT; + + iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); + + value |= SPI_FLASH_CTRL_START; + iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); + ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); + + for (i = 0; i < 10; i++) { + msleep(1); + value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); + if (!(value & SPI_FLASH_CTRL_START)) + break; + } + + if (value & SPI_FLASH_CTRL_START) + return false; + + *buf = ioread32(hw->hw_addr + REG_SPI_DATA); + + return true; +} + +/* + * get_permanent_address + * return 0 if get valid mac address, + */ +static int atl1_get_permanent_address(struct atl1_hw *hw) +{ + u32 addr[2]; + u32 i, control; + u16 reg; + u8 eth_addr[ETH_ALEN]; + bool key_valid; + + if (is_valid_ether_addr(hw->perm_mac_addr)) + return 0; + + /* init */ + addr[0] = addr[1] = 0; + + if (!atl1_check_eeprom_exist(hw)) { + reg = 0; + key_valid = false; + /* Read out all EEPROM content */ + i = 0; + while (1) { + if (atl1_read_eeprom(hw, i + 0x100, &control)) { + if (key_valid) { + if (reg == REG_MAC_STA_ADDR) + addr[0] = control; + else if (reg == (REG_MAC_STA_ADDR + 4)) + addr[1] = control; + key_valid = false; + } else if ((control & 0xff) == 0x5A) { + key_valid = true; + reg = (u16) (control >> 16); + } else + break; + } else + /* read error */ + break; + i += 4; + } + + *(u32 *) ð_addr[2] = swab32(addr[0]); + *(u16 *) ð_addr[0] = swab16(*(u16 *) &addr[1]); + if (is_valid_ether_addr(eth_addr)) { + memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); + return 0; + } + } + + /* see if SPI FLAGS exist ? */ + addr[0] = addr[1] = 0; + reg = 0; + key_valid = false; + i = 0; + while (1) { + if (atl1_spi_read(hw, i + 0x1f000, &control)) { + if (key_valid) { + if (reg == REG_MAC_STA_ADDR) + addr[0] = control; + else if (reg == (REG_MAC_STA_ADDR + 4)) + addr[1] = control; + key_valid = false; + } else if ((control & 0xff) == 0x5A) { + key_valid = true; + reg = (u16) (control >> 16); + } else + /* data end */ + break; + } else + /* read error */ + break; + i += 4; + } + + *(u32 *) ð_addr[2] = swab32(addr[0]); + *(u16 *) ð_addr[0] = swab16(*(u16 *) &addr[1]); + if (is_valid_ether_addr(eth_addr)) { + memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); + return 0; + } + + /* + * On some motherboards, the MAC address is written by the + * BIOS directly to the MAC register during POST, and is + * not stored in eeprom. If all else thus far has failed + * to fetch the permanent MAC address, try reading it directly. + */ + addr[0] = ioread32(hw->hw_addr + REG_MAC_STA_ADDR); + addr[1] = ioread16(hw->hw_addr + (REG_MAC_STA_ADDR + 4)); + *(u32 *) ð_addr[2] = swab32(addr[0]); + *(u16 *) ð_addr[0] = swab16(*(u16 *) &addr[1]); + if (is_valid_ether_addr(eth_addr)) { + memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); + return 0; + } + + return 1; +} + +/* + * Reads the adapter's MAC address from the EEPROM + * hw - Struct containing variables accessed by shared code + */ +static s32 atl1_read_mac_addr(struct atl1_hw *hw) +{ + u16 i; + + if (atl1_get_permanent_address(hw)) + random_ether_addr(hw->perm_mac_addr); + + for (i = 0; i < ETH_ALEN; i++) + hw->mac_addr[i] = hw->perm_mac_addr[i]; + return 0; +} + +/* + * Hashes an address to determine its location in the multicast table + * hw - Struct containing variables accessed by shared code + * mc_addr - the multicast address to hash + * + * atl1_hash_mc_addr + * purpose + * set hash value for a multicast address + * hash calcu processing : + * 1. calcu 32bit CRC for multicast address + * 2. reverse crc with MSB to LSB + */ +u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr) +{ + u32 crc32, value = 0; + int i; + + crc32 = ether_crc_le(6, mc_addr); + for (i = 0; i < 32; i++) + value |= (((crc32 >> i) & 1) << (31 - i)); + + return value; +} + +/* + * Sets the bit in the multicast table corresponding to the hash value. + * hw - Struct containing variables accessed by shared code + * hash_value - Multicast address hash value + */ +void atl1_hash_set(struct atl1_hw *hw, u32 hash_value) +{ + u32 hash_bit, hash_reg; + u32 mta; + + /* + * The HASH Table is a register array of 2 32-bit registers. + * It is treated like an array of 64 bits. We want to set + * bit BitArray[hash_value]. So we figure out what register + * the bit is in, read it, OR in the new bit, then write + * back the new value. The register is determined by the + * upper 7 bits of the hash value and the bit within that + * register are determined by the lower 5 bits of the value. + */ + hash_reg = (hash_value >> 31) & 0x1; + hash_bit = (hash_value >> 26) & 0x1F; + mta = ioread32((hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2)); + mta |= (1 << hash_bit); + iowrite32(mta, (hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2)); +} + +/* + * Writes a value to a PHY register + * hw - Struct containing variables accessed by shared code + * reg_addr - address of the PHY register to write + * data - data to write to the PHY + */ +static s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data) +{ + int i; + u32 val; + + val = ((u32) (phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT | + (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT | + MDIO_SUP_PREAMBLE | + MDIO_START | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; + iowrite32(val, hw->hw_addr + REG_MDIO_CTRL); + ioread32(hw->hw_addr + REG_MDIO_CTRL); + + for (i = 0; i < MDIO_WAIT_TIMES; i++) { + udelay(2); + val = ioread32(hw->hw_addr + REG_MDIO_CTRL); + if (!(val & (MDIO_START | MDIO_BUSY))) + break; + } + + if (!(val & (MDIO_START | MDIO_BUSY))) + return 0; + + return ATLX_ERR_PHY; +} + +/* + * Make L001's PHY out of Power Saving State (bug) + * hw - Struct containing variables accessed by shared code + * when power on, L001's PHY always on Power saving State + * (Gigabit Link forbidden) + */ +static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw) +{ + s32 ret; + ret = atl1_write_phy_reg(hw, 29, 0x0029); + if (ret) + return ret; + return atl1_write_phy_reg(hw, 30, 0); +} + +/* + * Resets the PHY and make all config validate + * hw - Struct containing variables accessed by shared code + * + * Sets bit 15 and 12 of the MII Control regiser (for F001 bug) + */ +static s32 atl1_phy_reset(struct atl1_hw *hw) +{ + struct pci_dev *pdev = hw->back->pdev; + struct atl1_adapter *adapter = hw->back; + s32 ret_val; + u16 phy_data; + + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || + hw->media_type == MEDIA_TYPE_1000M_FULL) + phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; + else { + switch (hw->media_type) { + case MEDIA_TYPE_100M_FULL: + phy_data = + MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | + MII_CR_RESET; + break; + case MEDIA_TYPE_100M_HALF: + phy_data = MII_CR_SPEED_100 | MII_CR_RESET; + break; + case MEDIA_TYPE_10M_FULL: + phy_data = + MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; + break; + default: + /* MEDIA_TYPE_10M_HALF: */ + phy_data = MII_CR_SPEED_10 | MII_CR_RESET; + break; + } + } + + ret_val = atl1_write_phy_reg(hw, MII_BMCR, phy_data); + if (ret_val) { + u32 val; + int i; + /* pcie serdes link may be down! */ + if (netif_msg_hw(adapter)) + dev_dbg(&pdev->dev, "pcie phy link down\n"); + + for (i = 0; i < 25; i++) { + msleep(1); + val = ioread32(hw->hw_addr + REG_MDIO_CTRL); + if (!(val & (MDIO_START | MDIO_BUSY))) + break; + } + + if ((val & (MDIO_START | MDIO_BUSY)) != 0) { + if (netif_msg_hw(adapter)) + dev_warn(&pdev->dev, + "pcie link down at least 25ms\n"); + return ret_val; + } + } + return 0; +} + +/* + * Configures PHY autoneg and flow control advertisement settings + * hw - Struct containing variables accessed by shared code + */ +static s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw) +{ + s32 ret_val; + s16 mii_autoneg_adv_reg; + s16 mii_1000t_ctrl_reg; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK; + + /* Read the MII 1000Base-T Control Register (Address 9). */ + mii_1000t_ctrl_reg = MII_ATLX_CR_1000T_DEFAULT_CAP_MASK; + + /* + * First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK; + mii_1000t_ctrl_reg &= ~MII_ATLX_CR_1000T_SPEED_MASK; + + /* + * Need to parse media_type and set up + * the appropriate PHY registers. + */ + switch (hw->media_type) { + case MEDIA_TYPE_AUTO_SENSOR: + mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS | + MII_AR_10T_FD_CAPS | + MII_AR_100TX_HD_CAPS | + MII_AR_100TX_FD_CAPS); + mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS; + break; + + case MEDIA_TYPE_1000M_FULL: + mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS; + break; + + case MEDIA_TYPE_100M_FULL: + mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS; + break; + + case MEDIA_TYPE_100M_HALF: + mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS; + break; + + case MEDIA_TYPE_10M_FULL: + mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS; + break; + + default: + mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS; + break; + } + + /* flow control fixed to enable all */ + mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE); + + hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg; + hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg; + + ret_val = atl1_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + ret_val = atl1_write_phy_reg(hw, MII_ATLX_CR, mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + + return 0; +} + +/* + * Configures link settings. + * hw - Struct containing variables accessed by shared code + * Assumes the hardware has previously been reset and the + * transmitter and receiver are not enabled. + */ +static s32 atl1_setup_link(struct atl1_hw *hw) +{ + struct pci_dev *pdev = hw->back->pdev; + struct atl1_adapter *adapter = hw->back; + s32 ret_val; + + /* + * Options: + * PHY will advertise value(s) parsed from + * autoneg_advertised and fc + * no matter what autoneg is , We will not wait link result. + */ + ret_val = atl1_phy_setup_autoneg_adv(hw); + if (ret_val) { + if (netif_msg_link(adapter)) + dev_dbg(&pdev->dev, + "error setting up autonegotiation\n"); + return ret_val; + } + /* SW.Reset , En-Auto-Neg if needed */ + ret_val = atl1_phy_reset(hw); + if (ret_val) { + if (netif_msg_link(adapter)) + dev_dbg(&pdev->dev, "error resetting phy\n"); + return ret_val; + } + hw->phy_configured = true; + return ret_val; +} + +static void atl1_init_flash_opcode(struct atl1_hw *hw) +{ + if (hw->flash_vendor >= ARRAY_SIZE(flash_table)) + /* Atmel */ + hw->flash_vendor = 0; + + /* Init OP table */ + iowrite8(flash_table[hw->flash_vendor].cmd_program, + hw->hw_addr + REG_SPI_FLASH_OP_PROGRAM); + iowrite8(flash_table[hw->flash_vendor].cmd_sector_erase, + hw->hw_addr + REG_SPI_FLASH_OP_SC_ERASE); + iowrite8(flash_table[hw->flash_vendor].cmd_chip_erase, + hw->hw_addr + REG_SPI_FLASH_OP_CHIP_ERASE); + iowrite8(flash_table[hw->flash_vendor].cmd_rdid, + hw->hw_addr + REG_SPI_FLASH_OP_RDID); + iowrite8(flash_table[hw->flash_vendor].cmd_wren, + hw->hw_addr + REG_SPI_FLASH_OP_WREN); + iowrite8(flash_table[hw->flash_vendor].cmd_rdsr, + hw->hw_addr + REG_SPI_FLASH_OP_RDSR); + iowrite8(flash_table[hw->flash_vendor].cmd_wrsr, + hw->hw_addr + REG_SPI_FLASH_OP_WRSR); + iowrite8(flash_table[hw->flash_vendor].cmd_read, + hw->hw_addr + REG_SPI_FLASH_OP_READ); +} + +/* + * Performs basic configuration of the adapter. + * hw - Struct containing variables accessed by shared code + * Assumes that the controller has previously been reset and is in a + * post-reset uninitialized state. Initializes multicast table, + * and Calls routines to setup link + * Leaves the transmit and receive units disabled and uninitialized. + */ +static s32 atl1_init_hw(struct atl1_hw *hw) +{ + u32 ret_val = 0; + + /* Zero out the Multicast HASH table */ + iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE); + /* clear the old settings from the multicast hash table */ + iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2)); + + atl1_init_flash_opcode(hw); + + if (!hw->phy_configured) { + /* enable GPHY LinkChange Interrrupt */ + ret_val = atl1_write_phy_reg(hw, 18, 0xC00); + if (ret_val) + return ret_val; + /* make PHY out of power-saving state */ + ret_val = atl1_phy_leave_power_saving(hw); + if (ret_val) + return ret_val; + /* Call a subroutine to configure the link */ + ret_val = atl1_setup_link(hw); + } + return ret_val; +} + +/* + * Detects the current speed and duplex settings of the hardware. + * hw - Struct containing variables accessed by shared code + * speed - Speed of the connection + * duplex - Duplex setting of the connection + */ +static s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex) +{ + struct pci_dev *pdev = hw->back->pdev; + struct atl1_adapter *adapter = hw->back; + s32 ret_val; + u16 phy_data; + + /* ; --- Read PHY Specific Status Register (17) */ + ret_val = atl1_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data); + if (ret_val) + return ret_val; + + if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED)) + return ATLX_ERR_PHY_RES; + + switch (phy_data & MII_ATLX_PSSR_SPEED) { + case MII_ATLX_PSSR_1000MBS: + *speed = SPEED_1000; + break; + case MII_ATLX_PSSR_100MBS: + *speed = SPEED_100; + break; + case MII_ATLX_PSSR_10MBS: + *speed = SPEED_10; + break; + default: + if (netif_msg_hw(adapter)) + dev_dbg(&pdev->dev, "error getting speed\n"); + return ATLX_ERR_PHY_SPEED; + break; + } + if (phy_data & MII_ATLX_PSSR_DPLX) + *duplex = FULL_DUPLEX; + else + *duplex = HALF_DUPLEX; + + return 0; +} + +void atl1_set_mac_addr(struct atl1_hw *hw) +{ + u32 value; + /* + * 00-0B-6A-F6-00-DC + * 0: 6AF600DC 1: 000B + * low dword + */ + value = (((u32) hw->mac_addr[2]) << 24) | + (((u32) hw->mac_addr[3]) << 16) | + (((u32) hw->mac_addr[4]) << 8) | (((u32) hw->mac_addr[5])); + iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR); + /* high dword */ + value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1])); + iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2)); +} + +/* + * atl1_sw_init - Initialize general software structures (struct atl1_adapter) + * @adapter: board private structure to initialize + * + * atl1_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + */ +static int __devinit atl1_sw_init(struct atl1_adapter *adapter) +{ + struct atl1_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + + hw->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + + adapter->wol = 0; + adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7; + adapter->ict = 50000; /* 100ms */ + adapter->link_speed = SPEED_0; /* hardware init */ + adapter->link_duplex = FULL_DUPLEX; + + hw->phy_configured = false; + hw->preamble_len = 7; + hw->ipgt = 0x60; + hw->min_ifg = 0x50; + hw->ipgr1 = 0x40; + hw->ipgr2 = 0x60; + hw->max_retry = 0xf; + hw->lcol = 0x37; + hw->jam_ipg = 7; + hw->rfd_burst = 8; + hw->rrd_burst = 8; + hw->rfd_fetch_gap = 1; + hw->rx_jumbo_th = adapter->rx_buffer_len / 8; + hw->rx_jumbo_lkah = 1; + hw->rrd_ret_timer = 16; + hw->tpd_burst = 4; + hw->tpd_fetch_th = 16; + hw->txf_burst = 0x100; + hw->tx_jumbo_task_th = (hw->max_frame_size + 7) >> 3; + hw->tpd_fetch_gap = 1; + hw->rcb_value = atl1_rcb_64; + hw->dma_ord = atl1_dma_ord_enh; + hw->dmar_block = atl1_dma_req_256; + hw->dmaw_block = atl1_dma_req_256; + hw->cmb_rrd = 4; + hw->cmb_tpd = 4; + hw->cmb_rx_timer = 1; /* about 2us */ + hw->cmb_tx_timer = 1; /* about 2us */ + hw->smb_timer = 100000; /* about 200ms */ + + spin_lock_init(&adapter->lock); + spin_lock_init(&adapter->mb_lock); + + return 0; +} + +static int mdio_read(struct net_device *netdev, int phy_id, int reg_num) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + u16 result; + + atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result); + + return result; +} + +static void mdio_write(struct net_device *netdev, int phy_id, int reg_num, + int val) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + + atl1_write_phy_reg(&adapter->hw, reg_num, val); +} + +/* + * atl1_mii_ioctl - + * @netdev: + * @ifreq: + * @cmd: + */ +static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + unsigned long flags; + int retval; + + if (!netif_running(netdev)) + return -EINVAL; + + spin_lock_irqsave(&adapter->lock, flags); + retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL); + spin_unlock_irqrestore(&adapter->lock, flags); + + return retval; +} + +/* + * atl1_setup_mem_resources - allocate Tx / RX descriptor resources + * @adapter: board private structure + * + * Return 0 on success, negative on failure + */ +static s32 atl1_setup_ring_resources(struct atl1_adapter *adapter) +{ + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; + struct atl1_ring_header *ring_header = &adapter->ring_header; + struct pci_dev *pdev = adapter->pdev; + int size; + u8 offset = 0; + + size = sizeof(struct atl1_buffer) * (tpd_ring->count + rfd_ring->count); + tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL); + if (unlikely(!tpd_ring->buffer_info)) { + if (netif_msg_drv(adapter)) + dev_err(&pdev->dev, "kzalloc failed , size = D%d\n", + size); + goto err_nomem; + } + rfd_ring->buffer_info = + (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count); + + /* + * real ring DMA buffer + * each ring/block may need up to 8 bytes for alignment, hence the + * additional 40 bytes tacked onto the end. + */ + ring_header->size = size = + sizeof(struct tx_packet_desc) * tpd_ring->count + + sizeof(struct rx_free_desc) * rfd_ring->count + + sizeof(struct rx_return_desc) * rrd_ring->count + + sizeof(struct coals_msg_block) + + sizeof(struct stats_msg_block) + + 40; + + ring_header->desc = pci_alloc_consistent(pdev, ring_header->size, + &ring_header->dma); + if (unlikely(!ring_header->desc)) { + if (netif_msg_drv(adapter)) + dev_err(&pdev->dev, "pci_alloc_consistent failed\n"); + goto err_nomem; + } + + memset(ring_header->desc, 0, ring_header->size); + + /* init TPD ring */ + tpd_ring->dma = ring_header->dma; + offset = (tpd_ring->dma & 0x7) ? (8 - (ring_header->dma & 0x7)) : 0; + tpd_ring->dma += offset; + tpd_ring->desc = (u8 *) ring_header->desc + offset; + tpd_ring->size = sizeof(struct tx_packet_desc) * tpd_ring->count; + + /* init RFD ring */ + rfd_ring->dma = tpd_ring->dma + tpd_ring->size; + offset = (rfd_ring->dma & 0x7) ? (8 - (rfd_ring->dma & 0x7)) : 0; + rfd_ring->dma += offset; + rfd_ring->desc = (u8 *) tpd_ring->desc + (tpd_ring->size + offset); + rfd_ring->size = sizeof(struct rx_free_desc) * rfd_ring->count; + + + /* init RRD ring */ + rrd_ring->dma = rfd_ring->dma + rfd_ring->size; + offset = (rrd_ring->dma & 0x7) ? (8 - (rrd_ring->dma & 0x7)) : 0; + rrd_ring->dma += offset; + rrd_ring->desc = (u8 *) rfd_ring->desc + (rfd_ring->size + offset); + rrd_ring->size = sizeof(struct rx_return_desc) * rrd_ring->count; + + + /* init CMB */ + adapter->cmb.dma = rrd_ring->dma + rrd_ring->size; + offset = (adapter->cmb.dma & 0x7) ? (8 - (adapter->cmb.dma & 0x7)) : 0; + adapter->cmb.dma += offset; + adapter->cmb.cmb = (struct coals_msg_block *) + ((u8 *) rrd_ring->desc + (rrd_ring->size + offset)); + + /* init SMB */ + adapter->smb.dma = adapter->cmb.dma + sizeof(struct coals_msg_block); + offset = (adapter->smb.dma & 0x7) ? (8 - (adapter->smb.dma & 0x7)) : 0; + adapter->smb.dma += offset; + adapter->smb.smb = (struct stats_msg_block *) + ((u8 *) adapter->cmb.cmb + + (sizeof(struct coals_msg_block) + offset)); + + return 0; + +err_nomem: + kfree(tpd_ring->buffer_info); + return -ENOMEM; +} + +static void atl1_init_ring_ptrs(struct atl1_adapter *adapter) +{ + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; + + atomic_set(&tpd_ring->next_to_use, 0); + atomic_set(&tpd_ring->next_to_clean, 0); + + rfd_ring->next_to_clean = 0; + atomic_set(&rfd_ring->next_to_use, 0); + + rrd_ring->next_to_use = 0; + atomic_set(&rrd_ring->next_to_clean, 0); +} + +/* + * atl1_clean_rx_ring - Free RFD Buffers + * @adapter: board private structure + */ +static void atl1_clean_rx_ring(struct atl1_adapter *adapter) +{ + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; + struct atl1_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + unsigned long size; + unsigned int i; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rfd_ring->count; i++) { + buffer_info = &rfd_ring->buffer_info[i]; + if (buffer_info->dma) { + pci_unmap_page(pdev, buffer_info->dma, + buffer_info->length, PCI_DMA_FROMDEVICE); + buffer_info->dma = 0; + } + if (buffer_info->skb) { + dev_kfree_skb(buffer_info->skb); + buffer_info->skb = NULL; + } + } + + size = sizeof(struct atl1_buffer) * rfd_ring->count; + memset(rfd_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rfd_ring->desc, 0, rfd_ring->size); + + rfd_ring->next_to_clean = 0; + atomic_set(&rfd_ring->next_to_use, 0); + + rrd_ring->next_to_use = 0; + atomic_set(&rrd_ring->next_to_clean, 0); +} + +/* + * atl1_clean_tx_ring - Free Tx Buffers + * @adapter: board private structure + */ +static void atl1_clean_tx_ring(struct atl1_adapter *adapter) +{ + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + struct atl1_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + unsigned long size; + unsigned int i; + + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tpd_ring->count; i++) { + buffer_info = &tpd_ring->buffer_info[i]; + if (buffer_info->dma) { + pci_unmap_page(pdev, buffer_info->dma, + buffer_info->length, PCI_DMA_TODEVICE); + buffer_info->dma = 0; + } + } + + for (i = 0; i < tpd_ring->count; i++) { + buffer_info = &tpd_ring->buffer_info[i]; + if (buffer_info->skb) { + dev_kfree_skb_any(buffer_info->skb); + buffer_info->skb = NULL; + } + } + + size = sizeof(struct atl1_buffer) * tpd_ring->count; + memset(tpd_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(tpd_ring->desc, 0, tpd_ring->size); + + atomic_set(&tpd_ring->next_to_use, 0); + atomic_set(&tpd_ring->next_to_clean, 0); +} + +/* + * atl1_free_ring_resources - Free Tx / RX descriptor Resources + * @adapter: board private structure + * + * Free all transmit software resources + */ +static void atl1_free_ring_resources(struct atl1_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; + struct atl1_ring_header *ring_header = &adapter->ring_header; + + atl1_clean_tx_ring(adapter); + atl1_clean_rx_ring(adapter); + + kfree(tpd_ring->buffer_info); + pci_free_consistent(pdev, ring_header->size, ring_header->desc, + ring_header->dma); + + tpd_ring->buffer_info = NULL; + tpd_ring->desc = NULL; + tpd_ring->dma = 0; + + rfd_ring->buffer_info = NULL; + rfd_ring->desc = NULL; + rfd_ring->dma = 0; + + rrd_ring->desc = NULL; + rrd_ring->dma = 0; +} + +static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter) +{ + u32 value; + struct atl1_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + /* Config MAC CTRL Register */ + value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN; + /* duplex */ + if (FULL_DUPLEX == adapter->link_duplex) + value |= MAC_CTRL_DUPLX; + /* speed */ + value |= ((u32) ((SPEED_1000 == adapter->link_speed) ? + MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) << + MAC_CTRL_SPEED_SHIFT); + /* flow control */ + value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW); + /* PAD & CRC */ + value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD); + /* preamble length */ + value |= (((u32) adapter->hw.preamble_len + & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); + /* vlan */ + if (adapter->vlgrp) + value |= MAC_CTRL_RMV_VLAN; + /* rx checksum + if (adapter->rx_csum) + value |= MAC_CTRL_RX_CHKSUM_EN; + */ + /* filter mode */ + value |= MAC_CTRL_BC_EN; + if (netdev->flags & IFF_PROMISC) + value |= MAC_CTRL_PROMIS_EN; + else if (netdev->flags & IFF_ALLMULTI) + value |= MAC_CTRL_MC_ALL_EN; + /* value |= MAC_CTRL_LOOPBACK; */ + iowrite32(value, hw->hw_addr + REG_MAC_CTRL); +} + +static u32 atl1_check_link(struct atl1_adapter *adapter) +{ + struct atl1_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 ret_val; + u16 speed, duplex, phy_data; + int reconfig = 0; + + /* MII_BMSR must read twice */ + atl1_read_phy_reg(hw, MII_BMSR, &phy_data); + atl1_read_phy_reg(hw, MII_BMSR, &phy_data); + if (!(phy_data & BMSR_LSTATUS)) { + /* link down */ + if (netif_carrier_ok(netdev)) { + /* old link state: Up */ + if (netif_msg_link(adapter)) + dev_info(&adapter->pdev->dev, "link is down\n"); + adapter->link_speed = SPEED_0; + netif_carrier_off(netdev); + } + return 0; + } + + /* Link Up */ + ret_val = atl1_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) + return ret_val; + + switch (hw->media_type) { + case MEDIA_TYPE_1000M_FULL: + if (speed != SPEED_1000 || duplex != FULL_DUPLEX) + reconfig = 1; + break; + case MEDIA_TYPE_100M_FULL: + if (speed != SPEED_100 || duplex != FULL_DUPLEX) + reconfig = 1; + break; + case MEDIA_TYPE_100M_HALF: + if (speed != SPEED_100 || duplex != HALF_DUPLEX) + reconfig = 1; + break; + case MEDIA_TYPE_10M_FULL: + if (speed != SPEED_10 || duplex != FULL_DUPLEX) + reconfig = 1; + break; + case MEDIA_TYPE_10M_HALF: + if (speed != SPEED_10 || duplex != HALF_DUPLEX) + reconfig = 1; + break; + } + + /* link result is our setting */ + if (!reconfig) { + if (adapter->link_speed != speed || + adapter->link_duplex != duplex) { + adapter->link_speed = speed; + adapter->link_duplex = duplex; + atl1_setup_mac_ctrl(adapter); + if (netif_msg_link(adapter)) + dev_info(&adapter->pdev->dev, + "%s link is up %d Mbps %s\n", + netdev->name, adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "full duplex" : "half duplex"); + } + if (!netif_carrier_ok(netdev)) { + /* Link down -> Up */ + netif_carrier_on(netdev); + } + return 0; + } + + /* change original link status */ + if (netif_carrier_ok(netdev)) { + adapter->link_speed = SPEED_0; + netif_carrier_off(netdev); + netif_stop_queue(netdev); + } + + if (hw->media_type != MEDIA_TYPE_AUTO_SENSOR && + hw->media_type != MEDIA_TYPE_1000M_FULL) { + switch (hw->media_type) { + case MEDIA_TYPE_100M_FULL: + phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | + MII_CR_RESET; + break; + case MEDIA_TYPE_100M_HALF: + phy_data = MII_CR_SPEED_100 | MII_CR_RESET; + break; + case MEDIA_TYPE_10M_FULL: + phy_data = + MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; + break; + default: + /* MEDIA_TYPE_10M_HALF: */ + phy_data = MII_CR_SPEED_10 | MII_CR_RESET; + break; + } + atl1_write_phy_reg(hw, MII_BMCR, phy_data); + return 0; + } + + /* auto-neg, insert timer to re-config phy */ + if (!adapter->phy_timer_pending) { + adapter->phy_timer_pending = true; + mod_timer(&adapter->phy_config_timer, + round_jiffies(jiffies + 3 * HZ)); + } + + return 0; +} + +static void set_flow_ctrl_old(struct atl1_adapter *adapter) +{ + u32 hi, lo, value; + + /* RFD Flow Control */ + value = adapter->rfd_ring.count; + hi = value / 16; + if (hi < 2) + hi = 2; + lo = value * 7 / 8; + + value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | + ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); + iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH); + + /* RRD Flow Control */ + value = adapter->rrd_ring.count; + lo = value / 16; + hi = value * 7 / 8; + if (lo < 2) + lo = 2; + value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | + ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); + iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH); +} + +static void set_flow_ctrl_new(struct atl1_hw *hw) +{ + u32 hi, lo, value; + + /* RXF Flow Control */ + value = ioread32(hw->hw_addr + REG_SRAM_RXF_LEN); + lo = value / 16; + if (lo < 192) + lo = 192; + hi = value * 7 / 8; + if (hi < lo) + hi = lo + 16; + value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | + ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); + iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH); + + /* RRD Flow Control */ + value = ioread32(hw->hw_addr + REG_SRAM_RRD_LEN); + lo = value / 8; + hi = value * 7 / 8; + if (lo < 2) + lo = 2; + if (hi < lo) + hi = lo + 3; + value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | + ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); + iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH); +} + +/* + * atl1_configure - Configure Transmit&Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Tx /Rx unit of the MAC after a reset. + */ +static u32 atl1_configure(struct atl1_adapter *adapter) +{ + struct atl1_hw *hw = &adapter->hw; + u32 value; + + /* clear interrupt status */ + iowrite32(0xffffffff, adapter->hw.hw_addr + REG_ISR); + + /* set MAC Address */ + value = (((u32) hw->mac_addr[2]) << 24) | + (((u32) hw->mac_addr[3]) << 16) | + (((u32) hw->mac_addr[4]) << 8) | + (((u32) hw->mac_addr[5])); + iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR); + value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1])); + iowrite32(value, hw->hw_addr + (REG_MAC_STA_ADDR + 4)); + + /* tx / rx ring */ + + /* HI base address */ + iowrite32((u32) ((adapter->tpd_ring.dma & 0xffffffff00000000ULL) >> 32), + hw->hw_addr + REG_DESC_BASE_ADDR_HI); + /* LO base address */ + iowrite32((u32) (adapter->rfd_ring.dma & 0x00000000ffffffffULL), + hw->hw_addr + REG_DESC_RFD_ADDR_LO); + iowrite32((u32) (adapter->rrd_ring.dma & 0x00000000ffffffffULL), + hw->hw_addr + REG_DESC_RRD_ADDR_LO); + iowrite32((u32) (adapter->tpd_ring.dma & 0x00000000ffffffffULL), + hw->hw_addr + REG_DESC_TPD_ADDR_LO); + iowrite32((u32) (adapter->cmb.dma & 0x00000000ffffffffULL), + hw->hw_addr + REG_DESC_CMB_ADDR_LO); + iowrite32((u32) (adapter->smb.dma & 0x00000000ffffffffULL), + hw->hw_addr + REG_DESC_SMB_ADDR_LO); + + /* element count */ + value = adapter->rrd_ring.count; + value <<= 16; + value += adapter->rfd_ring.count; + iowrite32(value, hw->hw_addr + REG_DESC_RFD_RRD_RING_SIZE); + iowrite32(adapter->tpd_ring.count, hw->hw_addr + + REG_DESC_TPD_RING_SIZE); + + /* Load Ptr */ + iowrite32(1, hw->hw_addr + REG_LOAD_PTR); + + /* config Mailbox */ + value = ((atomic_read(&adapter->tpd_ring.next_to_use) + & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) | + ((atomic_read(&adapter->rrd_ring.next_to_clean) + & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) | + ((atomic_read(&adapter->rfd_ring.next_to_use) + & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT); + iowrite32(value, hw->hw_addr + REG_MAILBOX); + + /* config IPG/IFG */ + value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK) + << MAC_IPG_IFG_IPGT_SHIFT) | + (((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK) + << MAC_IPG_IFG_MIFG_SHIFT) | + (((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK) + << MAC_IPG_IFG_IPGR1_SHIFT) | + (((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK) + << MAC_IPG_IFG_IPGR2_SHIFT); + iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG); + + /* config Half-Duplex Control */ + value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) | + (((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK) + << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) | + MAC_HALF_DUPLX_CTRL_EXC_DEF_EN | + (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) | + (((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK) + << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT); + iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL); + + /* set Interrupt Moderator Timer */ + iowrite16(adapter->imt, hw->hw_addr + REG_IRQ_MODU_TIMER_INIT); + iowrite32(MASTER_CTRL_ITIMER_EN, hw->hw_addr + REG_MASTER_CTRL); + + /* set Interrupt Clear Timer */ + iowrite16(adapter->ict, hw->hw_addr + REG_CMBDISDMA_TIMER); + + /* set max frame size hw will accept */ + iowrite32(hw->max_frame_size, hw->hw_addr + REG_MTU); + + /* jumbo size & rrd retirement timer */ + value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK) + << RXQ_JMBOSZ_TH_SHIFT) | + (((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK) + << RXQ_JMBO_LKAH_SHIFT) | + (((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK) + << RXQ_RRD_TIMER_SHIFT); + iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM); + + /* Flow Control */ + switch (hw->dev_rev) { + case 0x8001: + case 0x9001: + case 0x9002: + case 0x9003: + set_flow_ctrl_old(adapter); + break; + default: + set_flow_ctrl_new(hw); + break; + } + + /* config TXQ */ + value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK) + << TXQ_CTRL_TPD_BURST_NUM_SHIFT) | + (((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK) + << TXQ_CTRL_TXF_BURST_NUM_SHIFT) | + (((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK) + << TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE | + TXQ_CTRL_EN; + iowrite32(value, hw->hw_addr + REG_TXQ_CTRL); + + /* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */ + value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK) + << TX_JUMBO_TASK_TH_SHIFT) | + (((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK) + << TX_TPD_MIN_IPG_SHIFT); + iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG); + + /* config RXQ */ + value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK) + << RXQ_CTRL_RFD_BURST_NUM_SHIFT) | + (((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK) + << RXQ_CTRL_RRD_BURST_THRESH_SHIFT) | + (((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK) + << RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) | RXQ_CTRL_CUT_THRU_EN | + RXQ_CTRL_EN; + iowrite32(value, hw->hw_addr + REG_RXQ_CTRL); + + /* config DMA Engine */ + value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) + << DMA_CTRL_DMAR_BURST_LEN_SHIFT) | + ((((u32) hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK) + << DMA_CTRL_DMAW_BURST_LEN_SHIFT) | DMA_CTRL_DMAR_EN | + DMA_CTRL_DMAW_EN; + value |= (u32) hw->dma_ord; + if (atl1_rcb_128 == hw->rcb_value) + value |= DMA_CTRL_RCB_VALUE; + iowrite32(value, hw->hw_addr + REG_DMA_CTRL); + + /* config CMB / SMB */ + value = (hw->cmb_tpd > adapter->tpd_ring.count) ? + hw->cmb_tpd : adapter->tpd_ring.count; + value <<= 16; + value |= hw->cmb_rrd; + iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TH); + value = hw->cmb_rx_timer | ((u32) hw->cmb_tx_timer << 16); + iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TIMER); + iowrite32(hw->smb_timer, hw->hw_addr + REG_SMB_TIMER); + + /* --- enable CMB / SMB */ + value = CSMB_CTRL_CMB_EN | CSMB_CTRL_SMB_EN; + iowrite32(value, hw->hw_addr + REG_CSMB_CTRL); + + value = ioread32(adapter->hw.hw_addr + REG_ISR); + if (unlikely((value & ISR_PHY_LINKDOWN) != 0)) + value = 1; /* config failed */ + else + value = 0; + + /* clear all interrupt status */ + iowrite32(0x3fffffff, adapter->hw.hw_addr + REG_ISR); + iowrite32(0, adapter->hw.hw_addr + REG_ISR); + return value; +} + +/* + * atl1_pcie_patch - Patch for PCIE module + */ +static void atl1_pcie_patch(struct atl1_adapter *adapter) +{ + u32 value; + + /* much vendor magic here */ + value = 0x6500; + iowrite32(value, adapter->hw.hw_addr + 0x12FC); + /* pcie flow control mode change */ + value = ioread32(adapter->hw.hw_addr + 0x1008); + value |= 0x8000; + iowrite32(value, adapter->hw.hw_addr + 0x1008); +} + +/* + * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400 + * on PCI Command register is disable. + * The function enable this bit. + * Brackett, 2006/03/15 + */ +static void atl1_via_workaround(struct atl1_adapter *adapter) +{ + unsigned long value; + + value = ioread16(adapter->hw.hw_addr + PCI_COMMAND); + if (value & PCI_COMMAND_INTX_DISABLE) + value &= ~PCI_COMMAND_INTX_DISABLE; + iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND); +} + +static void atl1_inc_smb(struct atl1_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct stats_msg_block *smb = adapter->smb.smb; + + /* Fill out the OS statistics structure */ + adapter->soft_stats.rx_packets += smb->rx_ok; + adapter->soft_stats.tx_packets += smb->tx_ok; + adapter->soft_stats.rx_bytes += smb->rx_byte_cnt; + adapter->soft_stats.tx_bytes += smb->tx_byte_cnt; + adapter->soft_stats.multicast += smb->rx_mcast; + adapter->soft_stats.collisions += (smb->tx_1_col + smb->tx_2_col * 2 + + smb->tx_late_col + smb->tx_abort_col * adapter->hw.max_retry); + + /* Rx Errors */ + adapter->soft_stats.rx_errors += (smb->rx_frag + smb->rx_fcs_err + + smb->rx_len_err + smb->rx_sz_ov + smb->rx_rxf_ov + + smb->rx_rrd_ov + smb->rx_align_err); + adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov; + adapter->soft_stats.rx_length_errors += smb->rx_len_err; + adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err; + adapter->soft_stats.rx_frame_errors += smb->rx_align_err; + adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov + + smb->rx_rxf_ov); + + adapter->soft_stats.rx_pause += smb->rx_pause; + adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov; + adapter->soft_stats.rx_trunc += smb->rx_sz_ov; + + /* Tx Errors */ + adapter->soft_stats.tx_errors += (smb->tx_late_col + + smb->tx_abort_col + smb->tx_underrun + smb->tx_trunc); + adapter->soft_stats.tx_fifo_errors += smb->tx_underrun; + adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col; + adapter->soft_stats.tx_window_errors += smb->tx_late_col; + + adapter->soft_stats.excecol += smb->tx_abort_col; + adapter->soft_stats.deffer += smb->tx_defer; + adapter->soft_stats.scc += smb->tx_1_col; + adapter->soft_stats.mcc += smb->tx_2_col; + adapter->soft_stats.latecol += smb->tx_late_col; + adapter->soft_stats.tx_underun += smb->tx_underrun; + adapter->soft_stats.tx_trunc += smb->tx_trunc; + adapter->soft_stats.tx_pause += smb->tx_pause; + + netdev->stats.rx_packets = adapter->soft_stats.rx_packets; + netdev->stats.tx_packets = adapter->soft_stats.tx_packets; + netdev->stats.rx_bytes = adapter->soft_stats.rx_bytes; + netdev->stats.tx_bytes = adapter->soft_stats.tx_bytes; + netdev->stats.multicast = adapter->soft_stats.multicast; + netdev->stats.collisions = adapter->soft_stats.collisions; + netdev->stats.rx_errors = adapter->soft_stats.rx_errors; + netdev->stats.rx_over_errors = + adapter->soft_stats.rx_missed_errors; + netdev->stats.rx_length_errors = + adapter->soft_stats.rx_length_errors; + netdev->stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors; + netdev->stats.rx_frame_errors = + adapter->soft_stats.rx_frame_errors; + netdev->stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors; + netdev->stats.rx_missed_errors = + adapter->soft_stats.rx_missed_errors; + netdev->stats.tx_errors = adapter->soft_stats.tx_errors; + netdev->stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors; + netdev->stats.tx_aborted_errors = + adapter->soft_stats.tx_aborted_errors; + netdev->stats.tx_window_errors = + adapter->soft_stats.tx_window_errors; + netdev->stats.tx_carrier_errors = + adapter->soft_stats.tx_carrier_errors; +} + +static void atl1_update_mailbox(struct atl1_adapter *adapter) +{ + unsigned long flags; + u32 tpd_next_to_use; + u32 rfd_next_to_use; + u32 rrd_next_to_clean; + u32 value; + + spin_lock_irqsave(&adapter->mb_lock, flags); + + tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); + rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use); + rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean); + + value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << + MB_RFD_PROD_INDX_SHIFT) | + ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << + MB_RRD_CONS_INDX_SHIFT) | + ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << + MB_TPD_PROD_INDX_SHIFT); + iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); + + spin_unlock_irqrestore(&adapter->mb_lock, flags); +} + +static void atl1_clean_alloc_flag(struct atl1_adapter *adapter, + struct rx_return_desc *rrd, u16 offset) +{ + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + + while (rfd_ring->next_to_clean != (rrd->buf_indx + offset)) { + rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced = 0; + if (++rfd_ring->next_to_clean == rfd_ring->count) { + rfd_ring->next_to_clean = 0; + } + } +} + +static void atl1_update_rfd_index(struct atl1_adapter *adapter, + struct rx_return_desc *rrd) +{ + u16 num_buf; + + num_buf = (rrd->xsz.xsum_sz.pkt_size + adapter->rx_buffer_len - 1) / + adapter->rx_buffer_len; + if (rrd->num_buf == num_buf) + /* clean alloc flag for bad rrd */ + atl1_clean_alloc_flag(adapter, rrd, num_buf); +} + +static void atl1_rx_checksum(struct atl1_adapter *adapter, + struct rx_return_desc *rrd, struct sk_buff *skb) +{ + struct pci_dev *pdev = adapter->pdev; + + /* + * The L1 hardware contains a bug that erroneously sets the + * PACKET_FLAG_ERR and ERR_FLAG_L4_CHKSUM bits whenever a + * fragmented IP packet is received, even though the packet + * is perfectly valid and its checksum is correct. There's + * no way to distinguish between one of these good packets + * and a packet that actually contains a TCP/UDP checksum + * error, so all we can do is allow it to be handed up to + * the higher layers and let it be sorted out there. + */ + + skb->ip_summed = CHECKSUM_NONE; + + if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { + if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC | + ERR_FLAG_CODE | ERR_FLAG_OV)) { + adapter->hw_csum_err++; + if (netif_msg_rx_err(adapter)) + dev_printk(KERN_DEBUG, &pdev->dev, + "rx checksum error\n"); + return; + } + } + + /* not IPv4 */ + if (!(rrd->pkt_flg & PACKET_FLAG_IPV4)) + /* checksum is invalid, but it's not an IPv4 pkt, so ok */ + return; + + /* IPv4 packet */ + if (likely(!(rrd->err_flg & + (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM)))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + adapter->hw_csum_good++; + return; + } + + return; +} + +/* + * atl1_alloc_rx_buffers - Replace used receive buffers + * @adapter: address of board private structure + */ +static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter) +{ + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct pci_dev *pdev = adapter->pdev; + struct page *page; + unsigned long offset; + struct atl1_buffer *buffer_info, *next_info; + struct sk_buff *skb; + u16 num_alloc = 0; + u16 rfd_next_to_use, next_next; + struct rx_free_desc *rfd_desc; + + next_next = rfd_next_to_use = atomic_read(&rfd_ring->next_to_use); + if (++next_next == rfd_ring->count) + next_next = 0; + buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; + next_info = &rfd_ring->buffer_info[next_next]; + + while (!buffer_info->alloced && !next_info->alloced) { + if (buffer_info->skb) { + buffer_info->alloced = 1; + goto next; + } + + rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use); + + skb = netdev_alloc_skb_ip_align(adapter->netdev, + adapter->rx_buffer_len); + if (unlikely(!skb)) { + /* Better luck next round */ + adapter->netdev->stats.rx_dropped++; + break; + } + + buffer_info->alloced = 1; + buffer_info->skb = skb; + buffer_info->length = (u16) adapter->rx_buffer_len; + page = virt_to_page(skb->data); + offset = (unsigned long)skb->data & ~PAGE_MASK; + buffer_info->dma = pci_map_page(pdev, page, offset, + adapter->rx_buffer_len, + PCI_DMA_FROMDEVICE); + rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len); + rfd_desc->coalese = 0; + +next: + rfd_next_to_use = next_next; + if (unlikely(++next_next == rfd_ring->count)) + next_next = 0; + + buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; + next_info = &rfd_ring->buffer_info[next_next]; + num_alloc++; + } + + if (num_alloc) { + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + atomic_set(&rfd_ring->next_to_use, (int)rfd_next_to_use); + } + return num_alloc; +} + +static void atl1_intr_rx(struct atl1_adapter *adapter) +{ + int i, count; + u16 length; + u16 rrd_next_to_clean; + u32 value; + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; + struct atl1_buffer *buffer_info; + struct rx_return_desc *rrd; + struct sk_buff *skb; + + count = 0; + + rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean); + + while (1) { + rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean); + i = 1; + if (likely(rrd->xsz.valid)) { /* packet valid */ +chk_rrd: + /* check rrd status */ + if (likely(rrd->num_buf == 1)) + goto rrd_ok; + else if (netif_msg_rx_err(adapter)) { + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "unexpected RRD buffer count\n"); + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "rx_buf_len = %d\n", + adapter->rx_buffer_len); + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "RRD num_buf = %d\n", + rrd->num_buf); + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "RRD pkt_len = %d\n", + rrd->xsz.xsum_sz.pkt_size); + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "RRD pkt_flg = 0x%08X\n", + rrd->pkt_flg); + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "RRD err_flg = 0x%08X\n", + rrd->err_flg); + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "RRD vlan_tag = 0x%08X\n", + rrd->vlan_tag); + } + + /* rrd seems to be bad */ + if (unlikely(i-- > 0)) { + /* rrd may not be DMAed completely */ + udelay(1); + goto chk_rrd; + } + /* bad rrd */ + if (netif_msg_rx_err(adapter)) + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "bad RRD\n"); + /* see if update RFD index */ + if (rrd->num_buf > 1) + atl1_update_rfd_index(adapter, rrd); + + /* update rrd */ + rrd->xsz.valid = 0; + if (++rrd_next_to_clean == rrd_ring->count) + rrd_next_to_clean = 0; + count++; + continue; + } else { /* current rrd still not be updated */ + + break; + } +rrd_ok: + /* clean alloc flag for bad rrd */ + atl1_clean_alloc_flag(adapter, rrd, 0); + + buffer_info = &rfd_ring->buffer_info[rrd->buf_indx]; + if (++rfd_ring->next_to_clean == rfd_ring->count) + rfd_ring->next_to_clean = 0; + + /* update rrd next to clean */ + if (++rrd_next_to_clean == rrd_ring->count) + rrd_next_to_clean = 0; + count++; + + if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { + if (!(rrd->err_flg & + (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM + | ERR_FLAG_LEN))) { + /* packet error, don't need upstream */ + buffer_info->alloced = 0; + rrd->xsz.valid = 0; + continue; + } + } + + /* Good Receive */ + pci_unmap_page(adapter->pdev, buffer_info->dma, + buffer_info->length, PCI_DMA_FROMDEVICE); + buffer_info->dma = 0; + skb = buffer_info->skb; + length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size); + + skb_put(skb, length - ETH_FCS_LEN); + + /* Receive Checksum Offload */ + atl1_rx_checksum(adapter, rrd, skb); + skb->protocol = eth_type_trans(skb, adapter->netdev); + + if (adapter->vlgrp && (rrd->pkt_flg & PACKET_FLAG_VLAN_INS)) { + u16 vlan_tag = (rrd->vlan_tag >> 4) | + ((rrd->vlan_tag & 7) << 13) | + ((rrd->vlan_tag & 8) << 9); + vlan_hwaccel_rx(skb, adapter->vlgrp, vlan_tag); + } else + netif_rx(skb); + + /* let protocol layer free skb */ + buffer_info->skb = NULL; + buffer_info->alloced = 0; + rrd->xsz.valid = 0; + } + + atomic_set(&rrd_ring->next_to_clean, rrd_next_to_clean); + + atl1_alloc_rx_buffers(adapter); + + /* update mailbox ? */ + if (count) { + u32 tpd_next_to_use; + u32 rfd_next_to_use; + + spin_lock(&adapter->mb_lock); + + tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); + rfd_next_to_use = + atomic_read(&adapter->rfd_ring.next_to_use); + rrd_next_to_clean = + atomic_read(&adapter->rrd_ring.next_to_clean); + value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << + MB_RFD_PROD_INDX_SHIFT) | + ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << + MB_RRD_CONS_INDX_SHIFT) | + ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << + MB_TPD_PROD_INDX_SHIFT); + iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); + spin_unlock(&adapter->mb_lock); + } +} + +static void atl1_intr_tx(struct atl1_adapter *adapter) +{ + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + struct atl1_buffer *buffer_info; + u16 sw_tpd_next_to_clean; + u16 cmb_tpd_next_to_clean; + + sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean); + cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx); + + while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) { + struct tx_packet_desc *tpd; + + tpd = ATL1_TPD_DESC(tpd_ring, sw_tpd_next_to_clean); + buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean]; + if (buffer_info->dma) { + pci_unmap_page(adapter->pdev, buffer_info->dma, + buffer_info->length, PCI_DMA_TODEVICE); + buffer_info->dma = 0; + } + + if (buffer_info->skb) { + dev_kfree_skb_irq(buffer_info->skb); + buffer_info->skb = NULL; + } + + if (++sw_tpd_next_to_clean == tpd_ring->count) + sw_tpd_next_to_clean = 0; + } + atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean); + + if (netif_queue_stopped(adapter->netdev) && + netif_carrier_ok(adapter->netdev)) + netif_wake_queue(adapter->netdev); +} + +static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring) +{ + u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); + u16 next_to_use = atomic_read(&tpd_ring->next_to_use); + return ((next_to_clean > next_to_use) ? + next_to_clean - next_to_use - 1 : + tpd_ring->count + next_to_clean - next_to_use - 1); +} + +static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, + struct tx_packet_desc *ptpd) +{ + u8 hdr_len, ip_off; + u32 real_len; + int err; + + if (skb_shinfo(skb)->gso_size) { + if (skb_header_cloned(skb)) { + err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (unlikely(err)) + return -1; + } + + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + + real_len = (((unsigned char *)iph - skb->data) + + ntohs(iph->tot_len)); + if (real_len < skb->len) + pskb_trim(skb, real_len); + hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb)); + if (skb->len == hdr_len) { + iph->check = 0; + tcp_hdr(skb)->check = + ~csum_tcpudp_magic(iph->saddr, + iph->daddr, tcp_hdrlen(skb), + IPPROTO_TCP, 0); + ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) << + TPD_IPHL_SHIFT; + ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) & + TPD_TCPHDRLEN_MASK) << + TPD_TCPHDRLEN_SHIFT; + ptpd->word3 |= 1 << TPD_IP_CSUM_SHIFT; + ptpd->word3 |= 1 << TPD_TCP_CSUM_SHIFT; + return 1; + } + + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, IPPROTO_TCP, 0); + ip_off = (unsigned char *)iph - + (unsigned char *) skb_network_header(skb); + if (ip_off == 8) /* 802.3-SNAP frame */ + ptpd->word3 |= 1 << TPD_ETHTYPE_SHIFT; + else if (ip_off != 0) + return -2; + + ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) << + TPD_IPHL_SHIFT; + ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) & + TPD_TCPHDRLEN_MASK) << TPD_TCPHDRLEN_SHIFT; + ptpd->word3 |= (skb_shinfo(skb)->gso_size & + TPD_MSS_MASK) << TPD_MSS_SHIFT; + ptpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT; + return 3; + } + } + return false; +} + +static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, + struct tx_packet_desc *ptpd) +{ + u8 css, cso; + + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { + css = (u8) (skb->csum_start - skb_headroom(skb)); + cso = css + (u8) skb->csum_offset; + if (unlikely(css & 0x1)) { + /* L1 hardware requires an even number here */ + if (netif_msg_tx_err(adapter)) + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "payload offset not an even number\n"); + return -1; + } + ptpd->word3 |= (css & TPD_PLOADOFFSET_MASK) << + TPD_PLOADOFFSET_SHIFT; + ptpd->word3 |= (cso & TPD_CCSUMOFFSET_MASK) << + TPD_CCSUMOFFSET_SHIFT; + ptpd->word3 |= 1 << TPD_CUST_CSUM_EN_SHIFT; + return true; + } + return 0; +} + +static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, + struct tx_packet_desc *ptpd) +{ + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + struct atl1_buffer *buffer_info; + u16 buf_len = skb->len; + struct page *page; + unsigned long offset; + unsigned int nr_frags; + unsigned int f; + int retval; + u16 next_to_use; + u16 data_len; + u8 hdr_len; + + buf_len -= skb->data_len; + nr_frags = skb_shinfo(skb)->nr_frags; + next_to_use = atomic_read(&tpd_ring->next_to_use); + buffer_info = &tpd_ring->buffer_info[next_to_use]; + BUG_ON(buffer_info->skb); + /* put skb in last TPD */ + buffer_info->skb = NULL; + + retval = (ptpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK; + if (retval) { + /* TSO */ + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + buffer_info->length = hdr_len; + page = virt_to_page(skb->data); + offset = (unsigned long)skb->data & ~PAGE_MASK; + buffer_info->dma = pci_map_page(adapter->pdev, page, + offset, hdr_len, + PCI_DMA_TODEVICE); + + if (++next_to_use == tpd_ring->count) + next_to_use = 0; + + if (buf_len > hdr_len) { + int i, nseg; + + data_len = buf_len - hdr_len; + nseg = (data_len + ATL1_MAX_TX_BUF_LEN - 1) / + ATL1_MAX_TX_BUF_LEN; + for (i = 0; i < nseg; i++) { + buffer_info = + &tpd_ring->buffer_info[next_to_use]; + buffer_info->skb = NULL; + buffer_info->length = + (ATL1_MAX_TX_BUF_LEN >= + data_len) ? ATL1_MAX_TX_BUF_LEN : data_len; + data_len -= buffer_info->length; + page = virt_to_page(skb->data + + (hdr_len + i * ATL1_MAX_TX_BUF_LEN)); + offset = (unsigned long)(skb->data + + (hdr_len + i * ATL1_MAX_TX_BUF_LEN)) & + ~PAGE_MASK; + buffer_info->dma = pci_map_page(adapter->pdev, + page, offset, buffer_info->length, + PCI_DMA_TODEVICE); + if (++next_to_use == tpd_ring->count) + next_to_use = 0; + } + } + } else { + /* not TSO */ + buffer_info->length = buf_len; + page = virt_to_page(skb->data); + offset = (unsigned long)skb->data & ~PAGE_MASK; + buffer_info->dma = pci_map_page(adapter->pdev, page, + offset, buf_len, PCI_DMA_TODEVICE); + if (++next_to_use == tpd_ring->count) + next_to_use = 0; + } + + for (f = 0; f < nr_frags; f++) { + struct skb_frag_struct *frag; + u16 i, nseg; + + frag = &skb_shinfo(skb)->frags[f]; + buf_len = frag->size; + + nseg = (buf_len + ATL1_MAX_TX_BUF_LEN - 1) / + ATL1_MAX_TX_BUF_LEN; + for (i = 0; i < nseg; i++) { + buffer_info = &tpd_ring->buffer_info[next_to_use]; + BUG_ON(buffer_info->skb); + + buffer_info->skb = NULL; + buffer_info->length = (buf_len > ATL1_MAX_TX_BUF_LEN) ? + ATL1_MAX_TX_BUF_LEN : buf_len; + buf_len -= buffer_info->length; + buffer_info->dma = pci_map_page(adapter->pdev, + frag->page, + frag->page_offset + (i * ATL1_MAX_TX_BUF_LEN), + buffer_info->length, PCI_DMA_TODEVICE); + + if (++next_to_use == tpd_ring->count) + next_to_use = 0; + } + } + + /* last tpd's buffer-info */ + buffer_info->skb = skb; +} + +static void atl1_tx_queue(struct atl1_adapter *adapter, u16 count, + struct tx_packet_desc *ptpd) +{ + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + struct atl1_buffer *buffer_info; + struct tx_packet_desc *tpd; + u16 j; + u32 val; + u16 next_to_use = (u16) atomic_read(&tpd_ring->next_to_use); + + for (j = 0; j < count; j++) { + buffer_info = &tpd_ring->buffer_info[next_to_use]; + tpd = ATL1_TPD_DESC(&adapter->tpd_ring, next_to_use); + if (tpd != ptpd) + memcpy(tpd, ptpd, sizeof(struct tx_packet_desc)); + tpd->buffer_addr = cpu_to_le64(buffer_info->dma); + tpd->word2 &= ~(TPD_BUFLEN_MASK << TPD_BUFLEN_SHIFT); + tpd->word2 |= (cpu_to_le16(buffer_info->length) & + TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT; + + /* + * if this is the first packet in a TSO chain, set + * TPD_HDRFLAG, otherwise, clear it. + */ + val = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & + TPD_SEGMENT_EN_MASK; + if (val) { + if (!j) + tpd->word3 |= 1 << TPD_HDRFLAG_SHIFT; + else + tpd->word3 &= ~(1 << TPD_HDRFLAG_SHIFT); + } + + if (j == (count - 1)) + tpd->word3 |= 1 << TPD_EOP_SHIFT; + + if (++next_to_use == tpd_ring->count) + next_to_use = 0; + } + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + + atomic_set(&tpd_ring->next_to_use, next_to_use); +} + +static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + int len = skb->len; + int tso; + int count = 1; + int ret_val; + struct tx_packet_desc *ptpd; + u16 frag_size; + u16 vlan_tag; + unsigned int nr_frags = 0; + unsigned int mss = 0; + unsigned int f; + unsigned int proto_hdr_len; + + len -= skb->data_len; + + if (unlikely(skb->len <= 0)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + nr_frags = skb_shinfo(skb)->nr_frags; + for (f = 0; f < nr_frags; f++) { + frag_size = skb_shinfo(skb)->frags[f].size; + if (frag_size) + count += (frag_size + ATL1_MAX_TX_BUF_LEN - 1) / + ATL1_MAX_TX_BUF_LEN; + } + + mss = skb_shinfo(skb)->gso_size; + if (mss) { + if (skb->protocol == htons(ETH_P_IP)) { + proto_hdr_len = (skb_transport_offset(skb) + + tcp_hdrlen(skb)); + if (unlikely(proto_hdr_len > len)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + /* need additional TPD ? */ + if (proto_hdr_len != len) + count += (len - proto_hdr_len + + ATL1_MAX_TX_BUF_LEN - 1) / + ATL1_MAX_TX_BUF_LEN; + } + } + + if (atl1_tpd_avail(&adapter->tpd_ring) < count) { + /* not enough descriptors */ + netif_stop_queue(netdev); + if (netif_msg_tx_queued(adapter)) + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "tx busy\n"); + return NETDEV_TX_BUSY; + } + + ptpd = ATL1_TPD_DESC(tpd_ring, + (u16) atomic_read(&tpd_ring->next_to_use)); + memset(ptpd, 0, sizeof(struct tx_packet_desc)); + + if (adapter->vlgrp && vlan_tx_tag_present(skb)) { + vlan_tag = vlan_tx_tag_get(skb); + vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) | + ((vlan_tag >> 9) & 0x8); + ptpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT; + ptpd->word2 |= (vlan_tag & TPD_VLANTAG_MASK) << + TPD_VLANTAG_SHIFT; + } + + tso = atl1_tso(adapter, skb, ptpd); + if (tso < 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (!tso) { + ret_val = atl1_tx_csum(adapter, skb, ptpd); + if (ret_val < 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + } + + atl1_tx_map(adapter, skb, ptpd); + atl1_tx_queue(adapter, count, ptpd); + atl1_update_mailbox(adapter); + mmiowb(); + return NETDEV_TX_OK; +} + +/* + * atl1_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + * @pt_regs: CPU registers structure + */ +static irqreturn_t atl1_intr(int irq, void *data) +{ + struct atl1_adapter *adapter = netdev_priv(data); + u32 status; + int max_ints = 10; + + status = adapter->cmb.cmb->int_stats; + if (!status) + return IRQ_NONE; + + do { + /* clear CMB interrupt status at once */ + adapter->cmb.cmb->int_stats = 0; + + if (status & ISR_GPHY) /* clear phy status */ + atlx_clear_phy_int(adapter); + + /* clear ISR status, and Enable CMB DMA/Disable Interrupt */ + iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR); + + /* check if SMB intr */ + if (status & ISR_SMB) + atl1_inc_smb(adapter); + + /* check if PCIE PHY Link down */ + if (status & ISR_PHY_LINKDOWN) { + if (netif_msg_intr(adapter)) + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "pcie phy link down %x\n", status); + if (netif_running(adapter->netdev)) { /* reset MAC */ + iowrite32(0, adapter->hw.hw_addr + REG_IMR); + schedule_work(&adapter->pcie_dma_to_rst_task); + return IRQ_HANDLED; + } + } + + /* check if DMA read/write error ? */ + if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { + if (netif_msg_intr(adapter)) + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "pcie DMA r/w error (status = 0x%x)\n", + status); + iowrite32(0, adapter->hw.hw_addr + REG_IMR); + schedule_work(&adapter->pcie_dma_to_rst_task); + return IRQ_HANDLED; + } + + /* link event */ + if (status & ISR_GPHY) { + adapter->soft_stats.tx_carrier_errors++; + atl1_check_for_link(adapter); + } + + /* transmit event */ + if (status & ISR_CMB_TX) + atl1_intr_tx(adapter); + + /* rx exception */ + if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN | + ISR_RRD_OV | ISR_HOST_RFD_UNRUN | + ISR_HOST_RRD_OV | ISR_CMB_RX))) { + if (status & (ISR_RXF_OV | ISR_RFD_UNRUN | + ISR_RRD_OV | ISR_HOST_RFD_UNRUN | + ISR_HOST_RRD_OV)) + if (netif_msg_intr(adapter)) + dev_printk(KERN_DEBUG, + &adapter->pdev->dev, + "rx exception, ISR = 0x%x\n", + status); + atl1_intr_rx(adapter); + } + + if (--max_ints < 0) + break; + + } while ((status = adapter->cmb.cmb->int_stats)); + + /* re-enable Interrupt */ + iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR); + return IRQ_HANDLED; +} + + +/* + * atl1_phy_config - Timer Call-back + * @data: pointer to netdev cast into an unsigned long + */ +static void atl1_phy_config(unsigned long data) +{ + struct atl1_adapter *adapter = (struct atl1_adapter *)data; + struct atl1_hw *hw = &adapter->hw; + unsigned long flags; + + spin_lock_irqsave(&adapter->lock, flags); + adapter->phy_timer_pending = false; + atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg); + atl1_write_phy_reg(hw, MII_ATLX_CR, hw->mii_1000t_ctrl_reg); + atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN); + spin_unlock_irqrestore(&adapter->lock, flags); +} + +/* + * Orphaned vendor comment left intact here: + * + * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT + * will assert. We do soft reset <0x1400=1> according + * with the SPEC. BUT, it seemes that PCIE or DMA + * state-machine will not be reset. DMAR_TO_INT will + * assert again and again. + * + */ + +static int atl1_reset(struct atl1_adapter *adapter) +{ + int ret; + ret = atl1_reset_hw(&adapter->hw); + if (ret) + return ret; + return atl1_init_hw(&adapter->hw); +} + +static s32 atl1_up(struct atl1_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + int irq_flags = IRQF_SAMPLE_RANDOM; + + /* hardware has been reset, we need to reload some things */ + atlx_set_multi(netdev); + atl1_init_ring_ptrs(adapter); + atlx_restore_vlan(adapter); + err = atl1_alloc_rx_buffers(adapter); + if (unlikely(!err)) + /* no RX BUFFER allocated */ + return -ENOMEM; + + if (unlikely(atl1_configure(adapter))) { + err = -EIO; + goto err_up; + } + + err = pci_enable_msi(adapter->pdev); + if (err) { + if (netif_msg_ifup(adapter)) + dev_info(&adapter->pdev->dev, + "Unable to enable MSI: %d\n", err); + irq_flags |= IRQF_SHARED; + } + + err = request_irq(adapter->pdev->irq, atl1_intr, irq_flags, + netdev->name, netdev); + if (unlikely(err)) + goto err_up; + + atlx_irq_enable(adapter); + atl1_check_link(adapter); + netif_start_queue(netdev); + return 0; + +err_up: + pci_disable_msi(adapter->pdev); + /* free rx_buffers */ + atl1_clean_rx_ring(adapter); + return err; +} + +static void atl1_down(struct atl1_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + netif_stop_queue(netdev); + del_timer_sync(&adapter->phy_config_timer); + adapter->phy_timer_pending = false; + + atlx_irq_disable(adapter); + free_irq(adapter->pdev->irq, netdev); + pci_disable_msi(adapter->pdev); + atl1_reset_hw(&adapter->hw); + adapter->cmb.cmb->int_stats = 0; + + adapter->link_speed = SPEED_0; + adapter->link_duplex = -1; + netif_carrier_off(netdev); + + atl1_clean_tx_ring(adapter); + atl1_clean_rx_ring(adapter); +} + +static void atl1_tx_timeout_task(struct work_struct *work) +{ + struct atl1_adapter *adapter = + container_of(work, struct atl1_adapter, tx_timeout_task); + struct net_device *netdev = adapter->netdev; + + netif_device_detach(netdev); + atl1_down(adapter); + atl1_up(adapter); + netif_device_attach(netdev); +} + +/* + * atl1_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + */ +static int atl1_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + int old_mtu = netdev->mtu; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + + if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || + (max_frame > MAX_JUMBO_FRAME_SIZE)) { + if (netif_msg_link(adapter)) + dev_warn(&adapter->pdev->dev, "invalid MTU setting\n"); + return -EINVAL; + } + + adapter->hw.max_frame_size = max_frame; + adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3; + adapter->rx_buffer_len = (max_frame + 7) & ~7; + adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8; + + netdev->mtu = new_mtu; + if ((old_mtu != new_mtu) && netif_running(netdev)) { + atl1_down(adapter); + atl1_up(adapter); + } + + return 0; +} + +/* + * atl1_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + */ +static int atl1_open(struct net_device *netdev) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + int err; + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = atl1_setup_ring_resources(adapter); + if (err) + return err; + + err = atl1_up(adapter); + if (err) + goto err_up; + + return 0; + +err_up: + atl1_reset(adapter); + return err; +} + +/* + * atl1_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + */ +static int atl1_close(struct net_device *netdev) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + atl1_down(adapter); + atl1_free_ring_resources(adapter); + return 0; +} + +#ifdef CONFIG_PM +static int atl1_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_hw *hw = &adapter->hw; + u32 ctrl = 0; + u32 wufc = adapter->wol; + u32 val; + int retval; + u16 speed; + u16 duplex; + + netif_device_detach(netdev); + if (netif_running(netdev)) + atl1_down(adapter); + + retval = pci_save_state(pdev); + if (retval) + return retval; + + atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); + atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); + val = ctrl & BMSR_LSTATUS; + if (val) + wufc &= ~ATLX_WUFC_LNKC; + + if (val && wufc) { + val = atl1_get_speed_and_duplex(hw, &speed, &duplex); + if (val) { + if (netif_msg_ifdown(adapter)) + dev_printk(KERN_DEBUG, &pdev->dev, + "error getting speed/duplex\n"); + goto disable_wol; + } + + ctrl = 0; + + /* enable magic packet WOL */ + if (wufc & ATLX_WUFC_MAG) + ctrl |= (WOL_MAGIC_EN | WOL_MAGIC_PME_EN); + iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); + ioread32(hw->hw_addr + REG_WOL_CTRL); + + /* configure the mac */ + ctrl = MAC_CTRL_RX_EN; + ctrl |= ((u32)((speed == SPEED_1000) ? MAC_CTRL_SPEED_1000 : + MAC_CTRL_SPEED_10_100) << MAC_CTRL_SPEED_SHIFT); + if (duplex == FULL_DUPLEX) + ctrl |= MAC_CTRL_DUPLX; + ctrl |= (((u32)adapter->hw.preamble_len & + MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); + if (adapter->vlgrp) + ctrl |= MAC_CTRL_RMV_VLAN; + if (wufc & ATLX_WUFC_MAG) + ctrl |= MAC_CTRL_BC_EN; + iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL); + ioread32(hw->hw_addr + REG_MAC_CTRL); + + /* poke the PHY */ + ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC); + ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; + iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); + ioread32(hw->hw_addr + REG_PCIE_PHYMISC); + + pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); + goto exit; + } + + if (!val && wufc) { + ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); + iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); + ioread32(hw->hw_addr + REG_WOL_CTRL); + iowrite32(0, hw->hw_addr + REG_MAC_CTRL); + ioread32(hw->hw_addr + REG_MAC_CTRL); + hw->phy_configured = false; + pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); + goto exit; + } + +disable_wol: + iowrite32(0, hw->hw_addr + REG_WOL_CTRL); + ioread32(hw->hw_addr + REG_WOL_CTRL); + ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC); + ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; + iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); + ioread32(hw->hw_addr + REG_PCIE_PHYMISC); + hw->phy_configured = false; + pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); +exit: + if (netif_running(netdev)) + pci_disable_msi(adapter->pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +static int atl1_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1_adapter *adapter = netdev_priv(netdev); + u32 err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + err = pci_enable_device(pdev); + if (err) { + if (netif_msg_ifup(adapter)) + dev_printk(KERN_DEBUG, &pdev->dev, + "error enabling pci device\n"); + return err; + } + + pci_set_master(pdev); + iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + atl1_reset_hw(&adapter->hw); + adapter->cmb.cmb->int_stats = 0; + + if (netif_running(netdev)) + atl1_up(adapter); + netif_device_attach(netdev); + + return 0; +} +#else +#define atl1_suspend NULL +#define atl1_resume NULL +#endif + +static void atl1_shutdown(struct pci_dev *pdev) +{ +#ifdef CONFIG_PM + atl1_suspend(pdev, PMSG_SUSPEND); +#endif +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void atl1_poll_controller(struct net_device *netdev) +{ + disable_irq(netdev->irq); + atl1_intr(netdev->irq, netdev); + enable_irq(netdev->irq); +} +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) +static const struct net_device_ops atl1_netdev_ops = { + .ndo_open = atl1_open, + .ndo_stop = atl1_close, + .ndo_start_xmit = atl1_xmit_frame, + .ndo_set_multicast_list = atlx_set_multi, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = atl1_set_mac, + .ndo_change_mtu = atl1_change_mtu, + .ndo_do_ioctl = atlx_ioctl, + .ndo_tx_timeout = atlx_tx_timeout, + .ndo_vlan_rx_register = atlx_vlan_rx_register, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = atl1_poll_controller, +#endif +}; +#endif + +/* + * atl1_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in atl1_pci_tbl + * + * Returns 0 on success, negative on failure + * + * atl1_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + */ +static int __devinit atl1_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct atl1_adapter *adapter; + static int cards_found = 0; + int err; + + err = pci_enable_device(pdev); + if (err) + return err; + + /* + * The atl1 chip can DMA to 64-bit addresses, but it uses a single + * shared register for the high 32 bits, so only a single, aligned, + * 4 GB physical address range can be used at a time. + * + * Supporting 64-bit DMA on this hardware is more trouble than it's + * worth. It is far easier to limit to 32-bit DMA than update + * various kernel subsystems to support the mechanics required by a + * fixed-high-32-bit system. + */ + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "no usable DMA configuration\n"); + goto err_dma; + } + /* + * Mark all PCI regions associated with PCI device + * pdev as being reserved by owner atl1_driver_name + */ + err = pci_request_regions(pdev, ATLX_DRIVER_NAME); + if (err) + goto err_request_regions; + + /* + * Enables bus-mastering on the device and calls + * pcibios_set_master to do the needed arch specific settings + */ + pci_set_master(pdev); + + netdev = alloc_etherdev(sizeof(struct atl1_adapter)); + if (!netdev) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->hw.back = adapter; + adapter->msg_enable = netif_msg_init(debug, atl1_default_msg); + + adapter->hw.hw_addr = pci_iomap(pdev, 0, 0); + if (!adapter->hw.hw_addr) { + err = -EIO; + goto err_pci_iomap; + } + /* get device revision number */ + adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr + + (REG_MASTER_CTRL + 2)); + if (netif_msg_probe(adapter)) + dev_info(&pdev->dev, "version %s\n", ATLX_DRIVER_VERSION); + + /* set default ring resource counts */ + adapter->rfd_ring.count = adapter->rrd_ring.count = ATL1_DEFAULT_RFD; + adapter->tpd_ring.count = ATL1_DEFAULT_TPD; + + adapter->mii.dev = netdev; + adapter->mii.mdio_read = mdio_read; + adapter->mii.mdio_write = mdio_write; + adapter->mii.phy_id_mask = 0x1f; + adapter->mii.reg_num_mask = 0x1f; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + netdev->netdev_ops = &atl1_netdev_ops; +#else + netdev->change_mtu = atl1_change_mtu; + netdev->hard_start_xmit = atl1_xmit_frame; + netdev->open = atl1_open; + netdev->stop = atl1_close; + netdev->tx_timeout = atlx_tx_timeout; + netdev->set_mac_address = atl1_set_mac; + netdev->do_ioctl = atlx_ioctl; + netdev->set_multicast_list = atlx_set_multi; + netdev->vlan_rx_register = atlx_vlan_rx_register; +#ifdef CONFIG_NET_POLL_CONTROLLER + netdev->poll_controller = atl1_poll_controller; +#endif +#endif + netdev->watchdog_timeo = 5 * HZ; + + netdev->ethtool_ops = &atl1_ethtool_ops; + adapter->bd_number = cards_found; + + /* setup the private structure */ + err = atl1_sw_init(adapter); + if (err) + goto err_common; + + netdev->features = NETIF_F_HW_CSUM; + netdev->features |= NETIF_F_SG; + netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); + + /* + * patch for some L1 of old version, + * the final version of L1 may not need these + * patches + */ + /* atl1_pcie_patch(adapter); */ + + /* really reset GPHY core */ + iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE); + + /* + * reset the controller to + * put the device in a known good starting state + */ + if (atl1_reset_hw(&adapter->hw)) { + err = -EIO; + goto err_common; + } + + /* copy the MAC address out of the EEPROM */ + atl1_read_mac_addr(&adapter->hw); + memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + err = -EIO; + goto err_common; + } + + atl1_check_options(adapter); + + /* pre-init the MAC, and setup link */ + err = atl1_init_hw(&adapter->hw); + if (err) { + err = -EIO; + goto err_common; + } + + atl1_pcie_patch(adapter); + /* assume we have no link for now */ + netif_carrier_off(netdev); + netif_stop_queue(netdev); + + setup_timer(&adapter->phy_config_timer, &atl1_phy_config, + (unsigned long)adapter); + adapter->phy_timer_pending = false; + + INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task); + + INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task); + + INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task); + + err = register_netdev(netdev); + if (err) + goto err_common; + + cards_found++; + atl1_via_workaround(adapter); + return 0; + +err_common: + pci_iounmap(pdev, adapter->hw.hw_addr); +err_pci_iomap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_regions(pdev); +err_dma: +err_request_regions: + pci_disable_device(pdev); + return err; +} + +/* + * atl1_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * atl1_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + */ +static void __devexit atl1_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1_adapter *adapter; + /* Device not available. Return. */ + if (!netdev) + return; + + adapter = netdev_priv(netdev); + + /* + * Some atl1 boards lack persistent storage for their MAC, and get it + * from the BIOS during POST. If we've been messing with the MAC + * address, we need to save the permanent one. + */ + if (memcmp(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN)) { + memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, + ETH_ALEN); + atl1_set_mac_addr(&adapter->hw); + } + + iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE); + unregister_netdev(netdev); + pci_iounmap(pdev, adapter->hw.hw_addr); + pci_release_regions(pdev); + free_netdev(netdev); + pci_disable_device(pdev); +} + +static struct pci_driver atl1_driver = { + .name = ATLX_DRIVER_NAME, + .id_table = atl1_pci_tbl, + .probe = atl1_probe, + .remove = __devexit_p(atl1_remove), + .suspend = atl1_suspend, + .resume = atl1_resume, + .shutdown = atl1_shutdown +}; + +/* + * atl1_exit_module - Driver Exit Cleanup Routine + * + * atl1_exit_module is called just before the driver is removed + * from memory. + */ +static void __exit atl1_exit_module(void) +{ + pci_unregister_driver(&atl1_driver); +} + +/* + * atl1_init_module - Driver Registration Routine + * + * atl1_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + */ +static int __init atl1_init_module(void) +{ + return pci_register_driver(&atl1_driver); +} + +module_init(atl1_init_module); +module_exit(atl1_exit_module); + +struct atl1_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define ATL1_STAT(m) \ + sizeof(((struct atl1_adapter *)0)->m), offsetof(struct atl1_adapter, m) + +static struct atl1_stats atl1_gstrings_stats[] = { + {"rx_packets", ATL1_STAT(soft_stats.rx_packets)}, + {"tx_packets", ATL1_STAT(soft_stats.tx_packets)}, + {"rx_bytes", ATL1_STAT(soft_stats.rx_bytes)}, + {"tx_bytes", ATL1_STAT(soft_stats.tx_bytes)}, + {"rx_errors", ATL1_STAT(soft_stats.rx_errors)}, + {"tx_errors", ATL1_STAT(soft_stats.tx_errors)}, + {"multicast", ATL1_STAT(soft_stats.multicast)}, + {"collisions", ATL1_STAT(soft_stats.collisions)}, + {"rx_length_errors", ATL1_STAT(soft_stats.rx_length_errors)}, + {"rx_over_errors", ATL1_STAT(soft_stats.rx_missed_errors)}, + {"rx_crc_errors", ATL1_STAT(soft_stats.rx_crc_errors)}, + {"rx_frame_errors", ATL1_STAT(soft_stats.rx_frame_errors)}, + {"rx_fifo_errors", ATL1_STAT(soft_stats.rx_fifo_errors)}, + {"rx_missed_errors", ATL1_STAT(soft_stats.rx_missed_errors)}, + {"tx_aborted_errors", ATL1_STAT(soft_stats.tx_aborted_errors)}, + {"tx_carrier_errors", ATL1_STAT(soft_stats.tx_carrier_errors)}, + {"tx_fifo_errors", ATL1_STAT(soft_stats.tx_fifo_errors)}, + {"tx_window_errors", ATL1_STAT(soft_stats.tx_window_errors)}, + {"tx_abort_exce_coll", ATL1_STAT(soft_stats.excecol)}, + {"tx_abort_late_coll", ATL1_STAT(soft_stats.latecol)}, + {"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)}, + {"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)}, + {"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)}, + {"tx_underun", ATL1_STAT(soft_stats.tx_underun)}, + {"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)}, + {"tx_pause", ATL1_STAT(soft_stats.tx_pause)}, + {"rx_pause", ATL1_STAT(soft_stats.rx_pause)}, + {"rx_rrd_ov", ATL1_STAT(soft_stats.rx_rrd_ov)}, + {"rx_trunc", ATL1_STAT(soft_stats.rx_trunc)} +}; + +static void atl1_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + int i; + char *p; + + for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) { + p = (char *)adapter+atl1_gstrings_stats[i].stat_offset; + data[i] = (atl1_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + +} + +static int atl1_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(atl1_gstrings_stats); + default: + return -EOPNOTSUPP; + } +} + +static int atl1_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_hw *hw = &adapter->hw; + + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | SUPPORTED_TP); + ecmd->advertising = ADVERTISED_TP; + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || + hw->media_type == MEDIA_TYPE_1000M_FULL) { + ecmd->advertising |= ADVERTISED_Autoneg; + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR) { + ecmd->advertising |= ADVERTISED_Autoneg; + ecmd->advertising |= + (ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Full); + } else + ecmd->advertising |= (ADVERTISED_1000baseT_Full); + } + ecmd->port = PORT_TP; + ecmd->phy_address = 0; + ecmd->transceiver = XCVR_INTERNAL; + + if (netif_carrier_ok(adapter->netdev)) { + u16 link_speed, link_duplex; + atl1_get_speed_and_duplex(hw, &link_speed, &link_duplex); + ecmd->speed = link_speed; + if (link_duplex == FULL_DUPLEX) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + } else { + ecmd->speed = -1; + ecmd->duplex = -1; + } + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || + hw->media_type == MEDIA_TYPE_1000M_FULL) + ecmd->autoneg = AUTONEG_ENABLE; + else + ecmd->autoneg = AUTONEG_DISABLE; + + return 0; +} + +static int atl1_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_hw *hw = &adapter->hw; + u16 phy_data; + int ret_val = 0; + u16 old_media_type = hw->media_type; + + if (netif_running(adapter->netdev)) { + if (netif_msg_link(adapter)) + dev_dbg(&adapter->pdev->dev, + "ethtool shutting down adapter\n"); + atl1_down(adapter); + } + + if (ecmd->autoneg == AUTONEG_ENABLE) + hw->media_type = MEDIA_TYPE_AUTO_SENSOR; + else { + if (ecmd->speed == SPEED_1000) { + if (ecmd->duplex != DUPLEX_FULL) { + if (netif_msg_link(adapter)) + dev_warn(&adapter->pdev->dev, + "1000M half is invalid\n"); + ret_val = -EINVAL; + goto exit_sset; + } + hw->media_type = MEDIA_TYPE_1000M_FULL; + } else if (ecmd->speed == SPEED_100) { + if (ecmd->duplex == DUPLEX_FULL) + hw->media_type = MEDIA_TYPE_100M_FULL; + else + hw->media_type = MEDIA_TYPE_100M_HALF; + } else { + if (ecmd->duplex == DUPLEX_FULL) + hw->media_type = MEDIA_TYPE_10M_FULL; + else + hw->media_type = MEDIA_TYPE_10M_HALF; + } + } + switch (hw->media_type) { + case MEDIA_TYPE_AUTO_SENSOR: + ecmd->advertising = + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Full | + ADVERTISED_Autoneg | ADVERTISED_TP; + break; + case MEDIA_TYPE_1000M_FULL: + ecmd->advertising = + ADVERTISED_1000baseT_Full | + ADVERTISED_Autoneg | ADVERTISED_TP; + break; + default: + ecmd->advertising = 0; + break; + } + if (atl1_phy_setup_autoneg_adv(hw)) { + ret_val = -EINVAL; + if (netif_msg_link(adapter)) + dev_warn(&adapter->pdev->dev, + "invalid ethtool speed/duplex setting\n"); + goto exit_sset; + } + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || + hw->media_type == MEDIA_TYPE_1000M_FULL) + phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; + else { + switch (hw->media_type) { + case MEDIA_TYPE_100M_FULL: + phy_data = + MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | + MII_CR_RESET; + break; + case MEDIA_TYPE_100M_HALF: + phy_data = MII_CR_SPEED_100 | MII_CR_RESET; + break; + case MEDIA_TYPE_10M_FULL: + phy_data = + MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; + break; + default: + /* MEDIA_TYPE_10M_HALF: */ + phy_data = MII_CR_SPEED_10 | MII_CR_RESET; + break; + } + } + atl1_write_phy_reg(hw, MII_BMCR, phy_data); +exit_sset: + if (ret_val) + hw->media_type = old_media_type; + + if (netif_running(adapter->netdev)) { + if (netif_msg_link(adapter)) + dev_dbg(&adapter->pdev->dev, + "ethtool starting adapter\n"); + atl1_up(adapter); + } else if (!ret_val) { + if (netif_msg_link(adapter)) + dev_dbg(&adapter->pdev->dev, + "ethtool resetting adapter\n"); + atl1_reset(adapter); + } + return ret_val; +} + +static void atl1_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, ATLX_DRIVER_VERSION, + sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->eedump_len = ATL1_EEDUMP_LEN; +} + +static void atl1_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + + wol->supported = WAKE_MAGIC; + wol->wolopts = 0; + if (adapter->wol & ATLX_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; + return; +} + +static int atl1_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | + WAKE_ARP | WAKE_MAGICSECURE)) + return -EOPNOTSUPP; + adapter->wol = 0; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= ATLX_WUFC_MAG; + return 0; +} + +static u32 atl1_get_msglevel(struct net_device *netdev) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void atl1_set_msglevel(struct net_device *netdev, u32 value) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = value; +} + +static int atl1_get_regs_len(struct net_device *netdev) +{ + return ATL1_REG_COUNT * sizeof(u32); +} + +static void atl1_get_regs(struct net_device *netdev, struct ethtool_regs *regs, + void *p) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_hw *hw = &adapter->hw; + unsigned int i; + u32 *regbuf = p; + + for (i = 0; i < ATL1_REG_COUNT; i++) { + /* + * This switch statement avoids reserved regions + * of register space. + */ + switch (i) { + case 6 ... 9: + case 14: + case 29 ... 31: + case 34 ... 63: + case 75 ... 127: + case 136 ... 1023: + case 1027 ... 1087: + case 1091 ... 1151: + case 1194 ... 1195: + case 1200 ... 1201: + case 1206 ... 1213: + case 1216 ... 1279: + case 1290 ... 1311: + case 1323 ... 1343: + case 1358 ... 1359: + case 1368 ... 1375: + case 1378 ... 1383: + case 1388 ... 1391: + case 1393 ... 1395: + case 1402 ... 1403: + case 1410 ... 1471: + case 1522 ... 1535: + /* reserved region; don't read it */ + regbuf[i] = 0; + break; + default: + /* unreserved region */ + regbuf[i] = ioread32(hw->hw_addr + (i * sizeof(u32))); + } + } +} + +static void atl1_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_tpd_ring *txdr = &adapter->tpd_ring; + struct atl1_rfd_ring *rxdr = &adapter->rfd_ring; + + ring->rx_max_pending = ATL1_MAX_RFD; + ring->tx_max_pending = ATL1_MAX_TPD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = rxdr->count; + ring->tx_pending = txdr->count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +static int atl1_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_tpd_ring *tpdr = &adapter->tpd_ring; + struct atl1_rrd_ring *rrdr = &adapter->rrd_ring; + struct atl1_rfd_ring *rfdr = &adapter->rfd_ring; + + struct atl1_tpd_ring tpd_old, tpd_new; + struct atl1_rfd_ring rfd_old, rfd_new; + struct atl1_rrd_ring rrd_old, rrd_new; + struct atl1_ring_header rhdr_old, rhdr_new; + int err; + + tpd_old = adapter->tpd_ring; + rfd_old = adapter->rfd_ring; + rrd_old = adapter->rrd_ring; + rhdr_old = adapter->ring_header; + + if (netif_running(adapter->netdev)) + atl1_down(adapter); + + rfdr->count = (u16) max(ring->rx_pending, (u32) ATL1_MIN_RFD); + rfdr->count = rfdr->count > ATL1_MAX_RFD ? ATL1_MAX_RFD : + rfdr->count; + rfdr->count = (rfdr->count + 3) & ~3; + rrdr->count = rfdr->count; + + tpdr->count = (u16) max(ring->tx_pending, (u32) ATL1_MIN_TPD); + tpdr->count = tpdr->count > ATL1_MAX_TPD ? ATL1_MAX_TPD : + tpdr->count; + tpdr->count = (tpdr->count + 3) & ~3; + + if (netif_running(adapter->netdev)) { + /* try to get new resources before deleting old */ + err = atl1_setup_ring_resources(adapter); + if (err) + goto err_setup_ring; + + /* + * save the new, restore the old in order to free it, + * then restore the new back again + */ + + rfd_new = adapter->rfd_ring; + rrd_new = adapter->rrd_ring; + tpd_new = adapter->tpd_ring; + rhdr_new = adapter->ring_header; + adapter->rfd_ring = rfd_old; + adapter->rrd_ring = rrd_old; + adapter->tpd_ring = tpd_old; + adapter->ring_header = rhdr_old; + atl1_free_ring_resources(adapter); + adapter->rfd_ring = rfd_new; + adapter->rrd_ring = rrd_new; + adapter->tpd_ring = tpd_new; + adapter->ring_header = rhdr_new; + + err = atl1_up(adapter); + if (err) + return err; + } + return 0; + +err_setup_ring: + adapter->rfd_ring = rfd_old; + adapter->rrd_ring = rrd_old; + adapter->tpd_ring = tpd_old; + adapter->ring_header = rhdr_old; + atl1_up(adapter); + return err; +} + +static void atl1_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *epause) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_hw *hw = &adapter->hw; + + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || + hw->media_type == MEDIA_TYPE_1000M_FULL) { + epause->autoneg = AUTONEG_ENABLE; + } else { + epause->autoneg = AUTONEG_DISABLE; + } + epause->rx_pause = 1; + epause->tx_pause = 1; +} + +static int atl1_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *epause) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_hw *hw = &adapter->hw; + + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || + hw->media_type == MEDIA_TYPE_1000M_FULL) { + epause->autoneg = AUTONEG_ENABLE; + } else { + epause->autoneg = AUTONEG_DISABLE; + } + + epause->rx_pause = 1; + epause->tx_pause = 1; + + return 0; +} + +/* FIXME: is this right? -- CHS */ +static u32 atl1_get_rx_csum(struct net_device *netdev) +{ + return 1; +} + +static void atl1_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) { + memcpy(p, atl1_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static int atl1_nway_reset(struct net_device *netdev) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_hw *hw = &adapter->hw; + + if (netif_running(netdev)) { + u16 phy_data; + atl1_down(adapter); + + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || + hw->media_type == MEDIA_TYPE_1000M_FULL) { + phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; + } else { + switch (hw->media_type) { + case MEDIA_TYPE_100M_FULL: + phy_data = MII_CR_FULL_DUPLEX | + MII_CR_SPEED_100 | MII_CR_RESET; + break; + case MEDIA_TYPE_100M_HALF: + phy_data = MII_CR_SPEED_100 | MII_CR_RESET; + break; + case MEDIA_TYPE_10M_FULL: + phy_data = MII_CR_FULL_DUPLEX | + MII_CR_SPEED_10 | MII_CR_RESET; + break; + default: + /* MEDIA_TYPE_10M_HALF */ + phy_data = MII_CR_SPEED_10 | MII_CR_RESET; + } + } + atl1_write_phy_reg(hw, MII_BMCR, phy_data); + atl1_up(adapter); + } + return 0; +} + +const struct ethtool_ops atl1_ethtool_ops = { + .get_settings = atl1_get_settings, + .set_settings = atl1_set_settings, + .get_drvinfo = atl1_get_drvinfo, + .get_wol = atl1_get_wol, + .set_wol = atl1_set_wol, + .get_msglevel = atl1_get_msglevel, + .set_msglevel = atl1_set_msglevel, + .get_regs_len = atl1_get_regs_len, + .get_regs = atl1_get_regs, + .get_ringparam = atl1_get_ringparam, + .set_ringparam = atl1_set_ringparam, + .get_pauseparam = atl1_get_pauseparam, + .set_pauseparam = atl1_set_pauseparam, + .get_rx_csum = atl1_get_rx_csum, + .set_tx_csum = ethtool_op_set_tx_hw_csum, + .get_link = ethtool_op_get_link, + .set_sg = ethtool_op_set_sg, + .get_strings = atl1_get_strings, + .nway_reset = atl1_nway_reset, + .get_ethtool_stats = atl1_get_ethtool_stats, + .get_sset_count = atl1_get_sset_count, + .set_tso = ethtool_op_set_tso, +}; diff --git a/drivers/net/atlx/atl1.h b/drivers/net/atlx/atl1.h new file mode 100644 index 0000000..146372f --- /dev/null +++ b/drivers/net/atlx/atl1.h @@ -0,0 +1,796 @@ +/* + * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. + * Copyright(c) 2006 - 2007 Chris Snook + * Copyright(c) 2006 - 2008 Jay Cliburn + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef ATL1_H +#define ATL1_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "atlx.h" + +#define ATLX_DRIVER_NAME "atl1" + +MODULE_DESCRIPTION("Atheros L1 Gigabit Ethernet Driver"); + +#define atlx_adapter atl1_adapter +#define atlx_check_for_link atl1_check_for_link +#define atlx_check_link atl1_check_link +#define atlx_hash_mc_addr atl1_hash_mc_addr +#define atlx_hash_set atl1_hash_set +#define atlx_hw atl1_hw +#define atlx_mii_ioctl atl1_mii_ioctl +#define atlx_read_phy_reg atl1_read_phy_reg +#define atlx_set_mac atl1_set_mac +#define atlx_set_mac_addr atl1_set_mac_addr + +struct atl1_adapter; +struct atl1_hw; + +/* function prototypes needed by multiple files */ +u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr); +void atl1_hash_set(struct atl1_hw *hw, u32 hash_value); +s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data); +void atl1_set_mac_addr(struct atl1_hw *hw); +static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd); +static u32 atl1_check_link(struct atl1_adapter *adapter); + +extern const struct ethtool_ops atl1_ethtool_ops; + +/* hardware definitions specific to L1 */ + +/* Block IDLE Status Register */ +#define IDLE_STATUS_RXMAC 0x1 +#define IDLE_STATUS_TXMAC 0x2 +#define IDLE_STATUS_RXQ 0x4 +#define IDLE_STATUS_TXQ 0x8 +#define IDLE_STATUS_DMAR 0x10 +#define IDLE_STATUS_DMAW 0x20 +#define IDLE_STATUS_SMB 0x40 +#define IDLE_STATUS_CMB 0x80 + +/* MDIO Control Register */ +#define MDIO_WAIT_TIMES 30 + +/* MAC Control Register */ +#define MAC_CTRL_TX_PAUSE 0x10000 +#define MAC_CTRL_SCNT 0x20000 +#define MAC_CTRL_SRST_TX 0x40000 +#define MAC_CTRL_TX_SIMURST 0x80000 +#define MAC_CTRL_SPEED_SHIFT 20 +#define MAC_CTRL_SPEED_MASK 0x300000 +#define MAC_CTRL_SPEED_1000 0x2 +#define MAC_CTRL_SPEED_10_100 0x1 +#define MAC_CTRL_DBG_TX_BKPRESURE 0x400000 +#define MAC_CTRL_TX_HUGE 0x800000 +#define MAC_CTRL_RX_CHKSUM_EN 0x1000000 +#define MAC_CTRL_DBG 0x8000000 + +/* Wake-On-Lan control register */ +#define WOL_CLK_SWITCH_EN 0x8000 +#define WOL_PT5_EN 0x200000 +#define WOL_PT6_EN 0x400000 +#define WOL_PT5_MATCH 0x8000000 +#define WOL_PT6_MATCH 0x10000000 + +/* WOL Length ( 2 DWORD ) */ +#define REG_WOL_PATTERN_LEN 0x14A4 +#define WOL_PT_LEN_MASK 0x7F +#define WOL_PT0_LEN_SHIFT 0 +#define WOL_PT1_LEN_SHIFT 8 +#define WOL_PT2_LEN_SHIFT 16 +#define WOL_PT3_LEN_SHIFT 24 +#define WOL_PT4_LEN_SHIFT 0 +#define WOL_PT5_LEN_SHIFT 8 +#define WOL_PT6_LEN_SHIFT 16 + +/* Internal SRAM Partition Registers, low 32 bits */ +#define REG_SRAM_RFD_LEN 0x1504 +#define REG_SRAM_RRD_ADDR 0x1508 +#define REG_SRAM_RRD_LEN 0x150C +#define REG_SRAM_TPD_ADDR 0x1510 +#define REG_SRAM_TPD_LEN 0x1514 +#define REG_SRAM_TRD_ADDR 0x1518 +#define REG_SRAM_TRD_LEN 0x151C +#define REG_SRAM_RXF_ADDR 0x1520 +#define REG_SRAM_RXF_LEN 0x1524 +#define REG_SRAM_TXF_ADDR 0x1528 +#define REG_SRAM_TXF_LEN 0x152C +#define REG_SRAM_TCPH_PATH_ADDR 0x1530 +#define SRAM_TCPH_ADDR_MASK 0xFFF +#define SRAM_TCPH_ADDR_SHIFT 0 +#define SRAM_PATH_ADDR_MASK 0xFFF +#define SRAM_PATH_ADDR_SHIFT 16 + +/* Load Ptr Register */ +#define REG_LOAD_PTR 0x1534 + +/* Descriptor Control registers, low 32 bits */ +#define REG_DESC_RFD_ADDR_LO 0x1544 +#define REG_DESC_RRD_ADDR_LO 0x1548 +#define REG_DESC_TPD_ADDR_LO 0x154C +#define REG_DESC_CMB_ADDR_LO 0x1550 +#define REG_DESC_SMB_ADDR_LO 0x1554 +#define REG_DESC_RFD_RRD_RING_SIZE 0x1558 +#define DESC_RFD_RING_SIZE_MASK 0x7FF +#define DESC_RFD_RING_SIZE_SHIFT 0 +#define DESC_RRD_RING_SIZE_MASK 0x7FF +#define DESC_RRD_RING_SIZE_SHIFT 16 +#define REG_DESC_TPD_RING_SIZE 0x155C +#define DESC_TPD_RING_SIZE_MASK 0x3FF +#define DESC_TPD_RING_SIZE_SHIFT 0 + +/* TXQ Control Register */ +#define REG_TXQ_CTRL 0x1580 +#define TXQ_CTRL_TPD_BURST_NUM_SHIFT 0 +#define TXQ_CTRL_TPD_BURST_NUM_MASK 0x1F +#define TXQ_CTRL_EN 0x20 +#define TXQ_CTRL_ENH_MODE 0x40 +#define TXQ_CTRL_TPD_FETCH_TH_SHIFT 8 +#define TXQ_CTRL_TPD_FETCH_TH_MASK 0x3F +#define TXQ_CTRL_TXF_BURST_NUM_SHIFT 16 +#define TXQ_CTRL_TXF_BURST_NUM_MASK 0xFFFF + +/* Jumbo packet Threshold for task offload */ +#define REG_TX_JUMBO_TASK_TH_TPD_IPG 0x1584 +#define TX_JUMBO_TASK_TH_MASK 0x7FF +#define TX_JUMBO_TASK_TH_SHIFT 0 +#define TX_TPD_MIN_IPG_MASK 0x1F +#define TX_TPD_MIN_IPG_SHIFT 16 + +/* RXQ Control Register */ +#define REG_RXQ_CTRL 0x15A0 +#define RXQ_CTRL_RFD_BURST_NUM_SHIFT 0 +#define RXQ_CTRL_RFD_BURST_NUM_MASK 0xFF +#define RXQ_CTRL_RRD_BURST_THRESH_SHIFT 8 +#define RXQ_CTRL_RRD_BURST_THRESH_MASK 0xFF +#define RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT 16 +#define RXQ_CTRL_RFD_PREF_MIN_IPG_MASK 0x1F +#define RXQ_CTRL_CUT_THRU_EN 0x40000000 +#define RXQ_CTRL_EN 0x80000000 + +/* Rx jumbo packet threshold and rrd retirement timer */ +#define REG_RXQ_JMBOSZ_RRDTIM 0x15A4 +#define RXQ_JMBOSZ_TH_MASK 0x7FF +#define RXQ_JMBOSZ_TH_SHIFT 0 +#define RXQ_JMBO_LKAH_MASK 0xF +#define RXQ_JMBO_LKAH_SHIFT 11 +#define RXQ_RRD_TIMER_MASK 0xFFFF +#define RXQ_RRD_TIMER_SHIFT 16 + +/* RFD flow control register */ +#define REG_RXQ_RXF_PAUSE_THRESH 0x15A8 +#define RXQ_RXF_PAUSE_TH_HI_SHIFT 16 +#define RXQ_RXF_PAUSE_TH_HI_MASK 0xFFF +#define RXQ_RXF_PAUSE_TH_LO_SHIFT 0 +#define RXQ_RXF_PAUSE_TH_LO_MASK 0xFFF + +/* RRD flow control register */ +#define REG_RXQ_RRD_PAUSE_THRESH 0x15AC +#define RXQ_RRD_PAUSE_TH_HI_SHIFT 0 +#define RXQ_RRD_PAUSE_TH_HI_MASK 0xFFF +#define RXQ_RRD_PAUSE_TH_LO_SHIFT 16 +#define RXQ_RRD_PAUSE_TH_LO_MASK 0xFFF + +/* DMA Engine Control Register */ +#define REG_DMA_CTRL 0x15C0 +#define DMA_CTRL_DMAR_IN_ORDER 0x1 +#define DMA_CTRL_DMAR_ENH_ORDER 0x2 +#define DMA_CTRL_DMAR_OUT_ORDER 0x4 +#define DMA_CTRL_RCB_VALUE 0x8 +#define DMA_CTRL_DMAR_BURST_LEN_SHIFT 4 +#define DMA_CTRL_DMAR_BURST_LEN_MASK 7 +#define DMA_CTRL_DMAW_BURST_LEN_SHIFT 7 +#define DMA_CTRL_DMAW_BURST_LEN_MASK 7 +#define DMA_CTRL_DMAR_EN 0x400 +#define DMA_CTRL_DMAW_EN 0x800 + +/* CMB/SMB Control Register */ +#define REG_CSMB_CTRL 0x15D0 +#define CSMB_CTRL_CMB_NOW 1 +#define CSMB_CTRL_SMB_NOW 2 +#define CSMB_CTRL_CMB_EN 4 +#define CSMB_CTRL_SMB_EN 8 + +/* CMB DMA Write Threshold Register */ +#define REG_CMB_WRITE_TH 0x15D4 +#define CMB_RRD_TH_SHIFT 0 +#define CMB_RRD_TH_MASK 0x7FF +#define CMB_TPD_TH_SHIFT 16 +#define CMB_TPD_TH_MASK 0x7FF + +/* RX/TX count-down timer to trigger CMB-write. 2us resolution. */ +#define REG_CMB_WRITE_TIMER 0x15D8 +#define CMB_RX_TM_SHIFT 0 +#define CMB_RX_TM_MASK 0xFFFF +#define CMB_TX_TM_SHIFT 16 +#define CMB_TX_TM_MASK 0xFFFF + +/* Number of packet received since last CMB write */ +#define REG_CMB_RX_PKT_CNT 0x15DC + +/* Number of packet transmitted since last CMB write */ +#define REG_CMB_TX_PKT_CNT 0x15E0 + +/* SMB auto DMA timer register */ +#define REG_SMB_TIMER 0x15E4 + +/* Mailbox Register */ +#define REG_MAILBOX 0x15F0 +#define MB_RFD_PROD_INDX_SHIFT 0 +#define MB_RFD_PROD_INDX_MASK 0x7FF +#define MB_RRD_CONS_INDX_SHIFT 11 +#define MB_RRD_CONS_INDX_MASK 0x7FF +#define MB_TPD_PROD_INDX_SHIFT 22 +#define MB_TPD_PROD_INDX_MASK 0x3FF + +/* Interrupt Status Register */ +#define ISR_SMB 0x1 +#define ISR_TIMER 0x2 +#define ISR_MANUAL 0x4 +#define ISR_RXF_OV 0x8 +#define ISR_RFD_UNRUN 0x10 +#define ISR_RRD_OV 0x20 +#define ISR_TXF_UNRUN 0x40 +#define ISR_LINK 0x80 +#define ISR_HOST_RFD_UNRUN 0x100 +#define ISR_HOST_RRD_OV 0x200 +#define ISR_DMAR_TO_RST 0x400 +#define ISR_DMAW_TO_RST 0x800 +#define ISR_GPHY 0x1000 +#define ISR_RX_PKT 0x10000 +#define ISR_TX_PKT 0x20000 +#define ISR_TX_DMA 0x40000 +#define ISR_RX_DMA 0x80000 +#define ISR_CMB_RX 0x100000 +#define ISR_CMB_TX 0x200000 +#define ISR_MAC_RX 0x400000 +#define ISR_MAC_TX 0x800000 +#define ISR_DIS_SMB 0x20000000 +#define ISR_DIS_DMA 0x40000000 + +/* Normal Interrupt mask */ +#define IMR_NORMAL_MASK (\ + ISR_SMB |\ + ISR_GPHY |\ + ISR_PHY_LINKDOWN|\ + ISR_DMAR_TO_RST |\ + ISR_DMAW_TO_RST |\ + ISR_CMB_TX |\ + ISR_CMB_RX) + +/* Debug Interrupt Mask (enable all interrupt) */ +#define IMR_DEBUG_MASK (\ + ISR_SMB |\ + ISR_TIMER |\ + ISR_MANUAL |\ + ISR_RXF_OV |\ + ISR_RFD_UNRUN |\ + ISR_RRD_OV |\ + ISR_TXF_UNRUN |\ + ISR_LINK |\ + ISR_CMB_TX |\ + ISR_CMB_RX |\ + ISR_RX_PKT |\ + ISR_TX_PKT |\ + ISR_MAC_RX |\ + ISR_MAC_TX) + +#define MEDIA_TYPE_1000M_FULL 1 +#define MEDIA_TYPE_100M_FULL 2 +#define MEDIA_TYPE_100M_HALF 3 +#define MEDIA_TYPE_10M_FULL 4 +#define MEDIA_TYPE_10M_HALF 5 + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* All but 1000-Half */ + +#define MAX_JUMBO_FRAME_SIZE 10240 + +#define ATL1_EEDUMP_LEN 48 + +/* Statistics counters collected by the MAC */ +struct stats_msg_block { + /* rx */ + u32 rx_ok; /* good RX packets */ + u32 rx_bcast; /* good RX broadcast packets */ + u32 rx_mcast; /* good RX multicast packets */ + u32 rx_pause; /* RX pause frames */ + u32 rx_ctrl; /* RX control packets other than pause frames */ + u32 rx_fcs_err; /* RX packets with bad FCS */ + u32 rx_len_err; /* RX packets with length != actual size */ + u32 rx_byte_cnt; /* good bytes received. FCS is NOT included */ + u32 rx_runt; /* RX packets < 64 bytes with good FCS */ + u32 rx_frag; /* RX packets < 64 bytes with bad FCS */ + u32 rx_sz_64; /* 64 byte RX packets */ + u32 rx_sz_65_127; + u32 rx_sz_128_255; + u32 rx_sz_256_511; + u32 rx_sz_512_1023; + u32 rx_sz_1024_1518; + u32 rx_sz_1519_max; /* 1519 byte to MTU RX packets */ + u32 rx_sz_ov; /* truncated RX packets > MTU */ + u32 rx_rxf_ov; /* frames dropped due to RX FIFO overflow */ + u32 rx_rrd_ov; /* frames dropped due to RRD overflow */ + u32 rx_align_err; /* alignment errors */ + u32 rx_bcast_byte_cnt; /* RX broadcast bytes, excluding FCS */ + u32 rx_mcast_byte_cnt; /* RX multicast bytes, excluding FCS */ + u32 rx_err_addr; /* packets dropped due to address filtering */ + + /* tx */ + u32 tx_ok; /* good TX packets */ + u32 tx_bcast; /* good TX broadcast packets */ + u32 tx_mcast; /* good TX multicast packets */ + u32 tx_pause; /* TX pause frames */ + u32 tx_exc_defer; /* TX packets deferred excessively */ + u32 tx_ctrl; /* TX control frames, excluding pause frames */ + u32 tx_defer; /* TX packets deferred */ + u32 tx_byte_cnt; /* bytes transmitted, FCS is NOT included */ + u32 tx_sz_64; /* 64 byte TX packets */ + u32 tx_sz_65_127; + u32 tx_sz_128_255; + u32 tx_sz_256_511; + u32 tx_sz_512_1023; + u32 tx_sz_1024_1518; + u32 tx_sz_1519_max; /* 1519 byte to MTU TX packets */ + u32 tx_1_col; /* packets TX after a single collision */ + u32 tx_2_col; /* packets TX after multiple collisions */ + u32 tx_late_col; /* TX packets with late collisions */ + u32 tx_abort_col; /* TX packets aborted w/excessive collisions */ + u32 tx_underrun; /* TX packets aborted due to TX FIFO underrun + * or TRD FIFO underrun */ + u32 tx_rd_eop; /* reads beyond the EOP into the next frame + * when TRD was not written timely */ + u32 tx_len_err; /* TX packets where length != actual size */ + u32 tx_trunc; /* TX packets truncated due to size > MTU */ + u32 tx_bcast_byte; /* broadcast bytes transmitted, excluding FCS */ + u32 tx_mcast_byte; /* multicast bytes transmitted, excluding FCS */ + u32 smb_updated; /* 1: SMB Updated. This is used by software to + * indicate the statistics update. Software + * should clear this bit after retrieving the + * statistics information. */ +}; + +/* Coalescing Message Block */ +struct coals_msg_block { + u32 int_stats; /* interrupt status */ + u16 rrd_prod_idx; /* TRD Producer Index. */ + u16 rfd_cons_idx; /* RFD Consumer Index. */ + u16 update; /* Selene sets this bit every time it DMAs the + * CMB to host memory. Software should clear + * this bit when CMB info is processed. */ + u16 tpd_cons_idx; /* TPD Consumer Index. */ +}; + +/* RRD descriptor */ +struct rx_return_desc { + u8 num_buf; /* Number of RFD buffers used by the received packet */ + u8 resved; + u16 buf_indx; /* RFD Index of the first buffer */ + union { + u32 valid; + struct { + u16 rx_chksum; + u16 pkt_size; + } xsum_sz; + } xsz; + + u16 pkt_flg; /* Packet flags */ + u16 err_flg; /* Error flags */ + u16 resved2; + u16 vlan_tag; /* VLAN TAG */ +}; + +#define PACKET_FLAG_ETH_TYPE 0x0080 +#define PACKET_FLAG_VLAN_INS 0x0100 +#define PACKET_FLAG_ERR 0x0200 +#define PACKET_FLAG_IPV4 0x0400 +#define PACKET_FLAG_UDP 0x0800 +#define PACKET_FLAG_TCP 0x1000 +#define PACKET_FLAG_BCAST 0x2000 +#define PACKET_FLAG_MCAST 0x4000 +#define PACKET_FLAG_PAUSE 0x8000 + +#define ERR_FLAG_CRC 0x0001 +#define ERR_FLAG_CODE 0x0002 +#define ERR_FLAG_DRIBBLE 0x0004 +#define ERR_FLAG_RUNT 0x0008 +#define ERR_FLAG_OV 0x0010 +#define ERR_FLAG_TRUNC 0x0020 +#define ERR_FLAG_IP_CHKSUM 0x0040 +#define ERR_FLAG_L4_CHKSUM 0x0080 +#define ERR_FLAG_LEN 0x0100 +#define ERR_FLAG_DES_ADDR 0x0200 + +/* RFD descriptor */ +struct rx_free_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + __le16 buf_len; /* Size of the receive buffer in host memory */ + u16 coalese; /* Update consumer index to host after the + * reception of this frame */ + /* __attribute__ ((packed)) is required */ +} __attribute__ ((packed)); + +/* + * The L1 transmit packet descriptor is comprised of four 32-bit words. + * + * 31 0 + * +---------------------------------------+ + * | Word 0: Buffer addr lo | + * +---------------------------------------+ + * | Word 1: Buffer addr hi | + * +---------------------------------------+ + * | Word 2 | + * +---------------------------------------+ + * | Word 3 | + * +---------------------------------------+ + * + * Words 0 and 1 combine to form a 64-bit buffer address. + * + * Word 2 is self explanatory in the #define block below. + * + * Word 3 has two forms, depending upon the state of bits 3 and 4. + * If bits 3 and 4 are both zero, then bits 14:31 are unused by the + * hardware. Otherwise, if either bit 3 or 4 is set, the definition + * of bits 14:31 vary according to the following depiction. + * + * 0 End of packet 0 End of packet + * 1 Coalesce 1 Coalesce + * 2 Insert VLAN tag 2 Insert VLAN tag + * 3 Custom csum enable = 0 3 Custom csum enable = 1 + * 4 Segment enable = 1 4 Segment enable = 0 + * 5 Generate IP checksum 5 Generate IP checksum + * 6 Generate TCP checksum 6 Generate TCP checksum + * 7 Generate UDP checksum 7 Generate UDP checksum + * 8 VLAN tagged 8 VLAN tagged + * 9 Ethernet frame type 9 Ethernet frame type + * 10-+ 10-+ + * 11 | IP hdr length (10:13) 11 | IP hdr length (10:13) + * 12 | (num 32-bit words) 12 | (num 32-bit words) + * 13-+ 13-+ + * 14-+ 14 Unused + * 15 | TCP hdr length (14:17) 15 Unused + * 16 | (num 32-bit words) 16-+ + * 17-+ 17 | + * 18 Header TPD flag 18 | + * 19-+ 19 | Payload offset + * 20 | 20 | (16:23) + * 21 | 21 | + * 22 | 22 | + * 23 | 23-+ + * 24 | 24-+ + * 25 | MSS (19:31) 25 | + * 26 | 26 | + * 27 | 27 | Custom csum offset + * 28 | 28 | (24:31) + * 29 | 29 | + * 30 | 30 | + * 31-+ 31-+ + */ + +/* tpd word 2 */ +#define TPD_BUFLEN_MASK 0x3FFF +#define TPD_BUFLEN_SHIFT 0 +#define TPD_DMAINT_MASK 0x0001 +#define TPD_DMAINT_SHIFT 14 +#define TPD_PKTNT_MASK 0x0001 +#define TPD_PKTINT_SHIFT 15 +#define TPD_VLANTAG_MASK 0xFFFF +#define TPD_VLANTAG_SHIFT 16 + +/* tpd word 3 bits 0:13 */ +#define TPD_EOP_MASK 0x0001 +#define TPD_EOP_SHIFT 0 +#define TPD_COALESCE_MASK 0x0001 +#define TPD_COALESCE_SHIFT 1 +#define TPD_INS_VL_TAG_MASK 0x0001 +#define TPD_INS_VL_TAG_SHIFT 2 +#define TPD_CUST_CSUM_EN_MASK 0x0001 +#define TPD_CUST_CSUM_EN_SHIFT 3 +#define TPD_SEGMENT_EN_MASK 0x0001 +#define TPD_SEGMENT_EN_SHIFT 4 +#define TPD_IP_CSUM_MASK 0x0001 +#define TPD_IP_CSUM_SHIFT 5 +#define TPD_TCP_CSUM_MASK 0x0001 +#define TPD_TCP_CSUM_SHIFT 6 +#define TPD_UDP_CSUM_MASK 0x0001 +#define TPD_UDP_CSUM_SHIFT 7 +#define TPD_VL_TAGGED_MASK 0x0001 +#define TPD_VL_TAGGED_SHIFT 8 +#define TPD_ETHTYPE_MASK 0x0001 +#define TPD_ETHTYPE_SHIFT 9 +#define TPD_IPHL_MASK 0x000F +#define TPD_IPHL_SHIFT 10 + +/* tpd word 3 bits 14:31 if segment enabled */ +#define TPD_TCPHDRLEN_MASK 0x000F +#define TPD_TCPHDRLEN_SHIFT 14 +#define TPD_HDRFLAG_MASK 0x0001 +#define TPD_HDRFLAG_SHIFT 18 +#define TPD_MSS_MASK 0x1FFF +#define TPD_MSS_SHIFT 19 + +/* tpd word 3 bits 16:31 if custom csum enabled */ +#define TPD_PLOADOFFSET_MASK 0x00FF +#define TPD_PLOADOFFSET_SHIFT 16 +#define TPD_CCSUMOFFSET_MASK 0x00FF +#define TPD_CCSUMOFFSET_SHIFT 24 + +struct tx_packet_desc { + __le64 buffer_addr; + __le32 word2; + __le32 word3; +}; + +/* DMA Order Settings */ +enum atl1_dma_order { + atl1_dma_ord_in = 1, + atl1_dma_ord_enh = 2, + atl1_dma_ord_out = 4 +}; + +enum atl1_dma_rcb { + atl1_rcb_64 = 0, + atl1_rcb_128 = 1 +}; + +enum atl1_dma_req_block { + atl1_dma_req_128 = 0, + atl1_dma_req_256 = 1, + atl1_dma_req_512 = 2, + atl1_dma_req_1024 = 3, + atl1_dma_req_2048 = 4, + atl1_dma_req_4096 = 5 +}; + +#define ATL1_MAX_INTR 3 +#define ATL1_MAX_TX_BUF_LEN 0x3000 /* 12288 bytes */ + +#define ATL1_DEFAULT_TPD 256 +#define ATL1_MAX_TPD 1024 +#define ATL1_MIN_TPD 64 +#define ATL1_DEFAULT_RFD 512 +#define ATL1_MIN_RFD 128 +#define ATL1_MAX_RFD 2048 +#define ATL1_REG_COUNT 1538 + +#define ATL1_GET_DESC(R, i, type) (&(((type *)((R)->desc))[i])) +#define ATL1_RFD_DESC(R, i) ATL1_GET_DESC(R, i, struct rx_free_desc) +#define ATL1_TPD_DESC(R, i) ATL1_GET_DESC(R, i, struct tx_packet_desc) +#define ATL1_RRD_DESC(R, i) ATL1_GET_DESC(R, i, struct rx_return_desc) + +/* + * atl1_ring_header represents a single, contiguous block of DMA space + * mapped for the three descriptor rings (tpd, rfd, rrd) and the two + * message blocks (cmb, smb) described below + */ +struct atl1_ring_header { + void *desc; /* virtual address */ + dma_addr_t dma; /* physical address*/ + unsigned int size; /* length in bytes */ +}; + +/* + * atl1_buffer is wrapper around a pointer to a socket buffer + * so a DMA handle can be stored along with the skb + */ +struct atl1_buffer { + struct sk_buff *skb; /* socket buffer */ + u16 length; /* rx buffer length */ + u16 alloced; /* 1 if skb allocated */ + dma_addr_t dma; +}; + +/* transmit packet descriptor (tpd) ring */ +struct atl1_tpd_ring { + void *desc; /* descriptor ring virtual address */ + dma_addr_t dma; /* descriptor ring physical address */ + u16 size; /* descriptor ring length in bytes */ + u16 count; /* number of descriptors in the ring */ + u16 hw_idx; /* hardware index */ + atomic_t next_to_clean; + atomic_t next_to_use; + struct atl1_buffer *buffer_info; +}; + +/* receive free descriptor (rfd) ring */ +struct atl1_rfd_ring { + void *desc; /* descriptor ring virtual address */ + dma_addr_t dma; /* descriptor ring physical address */ + u16 size; /* descriptor ring length in bytes */ + u16 count; /* number of descriptors in the ring */ + atomic_t next_to_use; + u16 next_to_clean; + struct atl1_buffer *buffer_info; +}; + +/* receive return descriptor (rrd) ring */ +struct atl1_rrd_ring { + void *desc; /* descriptor ring virtual address */ + dma_addr_t dma; /* descriptor ring physical address */ + unsigned int size; /* descriptor ring length in bytes */ + u16 count; /* number of descriptors in the ring */ + u16 next_to_use; + atomic_t next_to_clean; +}; + +/* coalescing message block (cmb) */ +struct atl1_cmb { + struct coals_msg_block *cmb; + dma_addr_t dma; +}; + +/* statistics message block (smb) */ +struct atl1_smb { + struct stats_msg_block *smb; + dma_addr_t dma; +}; + +/* Statistics counters */ +struct atl1_sft_stats { + u64 rx_packets; + u64 tx_packets; + u64 rx_bytes; + u64 tx_bytes; + u64 multicast; + u64 collisions; + u64 rx_errors; + u64 rx_length_errors; + u64 rx_crc_errors; + u64 rx_frame_errors; + u64 rx_fifo_errors; + u64 rx_missed_errors; + u64 tx_errors; + u64 tx_fifo_errors; + u64 tx_aborted_errors; + u64 tx_window_errors; + u64 tx_carrier_errors; + u64 tx_pause; /* TX pause frames */ + u64 excecol; /* TX packets w/ excessive collisions */ + u64 deffer; /* TX packets deferred */ + u64 scc; /* packets TX after a single collision */ + u64 mcc; /* packets TX after multiple collisions */ + u64 latecol; /* TX packets w/ late collisions */ + u64 tx_underun; /* TX packets aborted due to TX FIFO underrun + * or TRD FIFO underrun */ + u64 tx_trunc; /* TX packets truncated due to size > MTU */ + u64 rx_pause; /* num Pause packets received. */ + u64 rx_rrd_ov; + u64 rx_trunc; +}; + +/* hardware structure */ +struct atl1_hw { + u8 __iomem *hw_addr; + struct atl1_adapter *back; + enum atl1_dma_order dma_ord; + enum atl1_dma_rcb rcb_value; + enum atl1_dma_req_block dmar_block; + enum atl1_dma_req_block dmaw_block; + u8 preamble_len; + u8 max_retry; + u8 jam_ipg; /* IPG to start JAM for collision based flow + * control in half-duplex mode. In units of + * 8-bit time */ + u8 ipgt; /* Desired back to back inter-packet gap. + * The default is 96-bit time */ + u8 min_ifg; /* Minimum number of IFG to enforce in between + * receive frames. Frame gap below such IFP + * is dropped */ + u8 ipgr1; /* 64bit Carrier-Sense window */ + u8 ipgr2; /* 96-bit IPG window */ + u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned + * burst. Each TPD is 16 bytes long */ + u8 rfd_burst; /* Number of RFD to prefetch in cache-aligned + * burst. Each RFD is 12 bytes long */ + u8 rfd_fetch_gap; + u8 rrd_burst; /* Threshold number of RRDs that can be retired + * in a burst. Each RRD is 16 bytes long */ + u8 tpd_fetch_th; + u8 tpd_fetch_gap; + u16 tx_jumbo_task_th; + u16 txf_burst; /* Number of data bytes to read in a cache- + * aligned burst. Each SRAM entry is 8 bytes */ + u16 rx_jumbo_th; /* Jumbo packet size for non-VLAN packet. VLAN + * packets should add 4 bytes */ + u16 rx_jumbo_lkah; + u16 rrd_ret_timer; /* RRD retirement timer. Decrement by 1 after + * every 512ns passes. */ + u16 lcol; /* Collision Window */ + + u16 cmb_tpd; + u16 cmb_rrd; + u16 cmb_rx_timer; + u16 cmb_tx_timer; + u32 smb_timer; + u16 media_type; + u16 autoneg_advertised; + + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg; + + u32 max_frame_size; + u32 min_frame_size; + + u16 dev_rev; + + /* spi flash */ + u8 flash_vendor; + + u8 mac_addr[ETH_ALEN]; + u8 perm_mac_addr[ETH_ALEN]; + + bool phy_configured; +}; + +struct atl1_adapter { + struct net_device *netdev; + struct pci_dev *pdev; + + struct atl1_sft_stats soft_stats; + struct vlan_group *vlgrp; + u32 rx_buffer_len; + u32 wol; + u16 link_speed; + u16 link_duplex; + spinlock_t lock; + struct work_struct tx_timeout_task; + struct work_struct link_chg_task; + struct work_struct pcie_dma_to_rst_task; + + struct timer_list phy_config_timer; + bool phy_timer_pending; + + /* all descriptor rings' memory */ + struct atl1_ring_header ring_header; + + /* TX */ + struct atl1_tpd_ring tpd_ring; + spinlock_t mb_lock; + + /* RX */ + struct atl1_rfd_ring rfd_ring; + struct atl1_rrd_ring rrd_ring; + u64 hw_csum_err; + u64 hw_csum_good; + u32 msg_enable; + u16 imt; /* interrupt moderator timer (2us resolution) */ + u16 ict; /* interrupt clear timer (2us resolution */ + struct mii_if_info mii; /* MII interface info */ + + u32 bd_number; /* board number */ + bool pci_using_64; + struct atl1_hw hw; + struct atl1_smb smb; + struct atl1_cmb cmb; +}; + +#endif /* ATL1_H */ diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c new file mode 100644 index 0000000..5c5b153 --- /dev/null +++ b/drivers/net/atlx/atl2.c @@ -0,0 +1,3126 @@ +/* + * Copyright(c) 2006 - 2007 Atheros Corporation. All rights reserved. + * Copyright(c) 2007 - 2008 Chris Snook + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "atl2.h" + +#define ATL2_DRV_VERSION "2.2.3" + +static char atl2_driver_name[] = "atl2"; +static const char atl2_driver_string[] = "Atheros(R) L2 Ethernet Driver"; +static char atl2_copyright[] = "Copyright (c) 2007 Atheros Corporation."; +static char atl2_driver_version[] = ATL2_DRV_VERSION; + +MODULE_AUTHOR("Atheros Corporation , Chris Snook "); +MODULE_DESCRIPTION("Atheros Fast Ethernet Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(ATL2_DRV_VERSION); + +/* + * atl2_pci_tbl - PCI Device ID Table + */ +static DEFINE_PCI_DEVICE_TABLE(atl2_pci_tbl) = { + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2)}, + /* required last entry */ + {0,} +}; +MODULE_DEVICE_TABLE(pci, atl2_pci_tbl); + +static void atl2_set_ethtool_ops(struct net_device *netdev); + +static void atl2_check_options(struct atl2_adapter *adapter); + +/* + * atl2_sw_init - Initialize general software structures (struct atl2_adapter) + * @adapter: board private structure to initialize + * + * atl2_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + */ +static int __devinit atl2_sw_init(struct atl2_adapter *adapter) +{ + struct atl2_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_id = pdev->subsystem_device; + + pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); + pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); + + adapter->wol = 0; + adapter->ict = 50000; /* ~100ms */ + adapter->link_speed = SPEED_0; /* hardware init */ + adapter->link_duplex = FULL_DUPLEX; + + hw->phy_configured = false; + hw->preamble_len = 7; + hw->ipgt = 0x60; + hw->min_ifg = 0x50; + hw->ipgr1 = 0x40; + hw->ipgr2 = 0x60; + hw->retry_buf = 2; + hw->max_retry = 0xf; + hw->lcol = 0x37; + hw->jam_ipg = 7; + hw->fc_rxd_hi = 0; + hw->fc_rxd_lo = 0; + hw->max_frame_size = adapter->netdev->mtu; + + spin_lock_init(&adapter->stats_lock); + + set_bit(__ATL2_DOWN, &adapter->flags); + + return 0; +} + +/* + * atl2_set_multi - Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_multi entry point is called whenever the multicast address + * list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper multicast, + * promiscuous mode, and all-multi behavior. + */ +static void atl2_set_multi(struct net_device *netdev) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + struct atl2_hw *hw = &adapter->hw; + struct dev_mc_list *mc_ptr; + u32 rctl; + u32 hash_value; + + /* Check for Promiscuous and All Multicast modes */ + rctl = ATL2_READ_REG(hw, REG_MAC_CTRL); + + if (netdev->flags & IFF_PROMISC) { + rctl |= MAC_CTRL_PROMIS_EN; + } else if (netdev->flags & IFF_ALLMULTI) { + rctl |= MAC_CTRL_MC_ALL_EN; + rctl &= ~MAC_CTRL_PROMIS_EN; + } else + rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN); + + ATL2_WRITE_REG(hw, REG_MAC_CTRL, rctl); + + /* clear the old settings from the multicast hash table */ + ATL2_WRITE_REG(hw, REG_RX_HASH_TABLE, 0); + ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); + + /* comoute mc addresses' hash value ,and put it into hash table */ + netdev_for_each_mc_addr(mc_ptr, netdev) { + hash_value = atl2_hash_mc_addr(hw, mc_ptr->dmi_addr); + atl2_hash_set(hw, hash_value); + } +} + +static void init_ring_ptrs(struct atl2_adapter *adapter) +{ + /* Read / Write Ptr Initialize: */ + adapter->txd_write_ptr = 0; + atomic_set(&adapter->txd_read_ptr, 0); + + adapter->rxd_read_ptr = 0; + adapter->rxd_write_ptr = 0; + + atomic_set(&adapter->txs_write_ptr, 0); + adapter->txs_next_clear = 0; +} + +/* + * atl2_configure - Configure Transmit&Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Tx /Rx unit of the MAC after a reset. + */ +static int atl2_configure(struct atl2_adapter *adapter) +{ + struct atl2_hw *hw = &adapter->hw; + u32 value; + + /* clear interrupt status */ + ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0xffffffff); + + /* set MAC Address */ + value = (((u32)hw->mac_addr[2]) << 24) | + (((u32)hw->mac_addr[3]) << 16) | + (((u32)hw->mac_addr[4]) << 8) | + (((u32)hw->mac_addr[5])); + ATL2_WRITE_REG(hw, REG_MAC_STA_ADDR, value); + value = (((u32)hw->mac_addr[0]) << 8) | + (((u32)hw->mac_addr[1])); + ATL2_WRITE_REG(hw, (REG_MAC_STA_ADDR+4), value); + + /* HI base address */ + ATL2_WRITE_REG(hw, REG_DESC_BASE_ADDR_HI, + (u32)((adapter->ring_dma & 0xffffffff00000000ULL) >> 32)); + + /* LO base address */ + ATL2_WRITE_REG(hw, REG_TXD_BASE_ADDR_LO, + (u32)(adapter->txd_dma & 0x00000000ffffffffULL)); + ATL2_WRITE_REG(hw, REG_TXS_BASE_ADDR_LO, + (u32)(adapter->txs_dma & 0x00000000ffffffffULL)); + ATL2_WRITE_REG(hw, REG_RXD_BASE_ADDR_LO, + (u32)(adapter->rxd_dma & 0x00000000ffffffffULL)); + + /* element count */ + ATL2_WRITE_REGW(hw, REG_TXD_MEM_SIZE, (u16)(adapter->txd_ring_size/4)); + ATL2_WRITE_REGW(hw, REG_TXS_MEM_SIZE, (u16)adapter->txs_ring_size); + ATL2_WRITE_REGW(hw, REG_RXD_BUF_NUM, (u16)adapter->rxd_ring_size); + + /* config Internal SRAM */ +/* + ATL2_WRITE_REGW(hw, REG_SRAM_TXRAM_END, sram_tx_end); + ATL2_WRITE_REGW(hw, REG_SRAM_TXRAM_END, sram_rx_end); +*/ + + /* config IPG/IFG */ + value = (((u32)hw->ipgt & MAC_IPG_IFG_IPGT_MASK) << + MAC_IPG_IFG_IPGT_SHIFT) | + (((u32)hw->min_ifg & MAC_IPG_IFG_MIFG_MASK) << + MAC_IPG_IFG_MIFG_SHIFT) | + (((u32)hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK) << + MAC_IPG_IFG_IPGR1_SHIFT)| + (((u32)hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK) << + MAC_IPG_IFG_IPGR2_SHIFT); + ATL2_WRITE_REG(hw, REG_MAC_IPG_IFG, value); + + /* config Half-Duplex Control */ + value = ((u32)hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) | + (((u32)hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK) << + MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) | + MAC_HALF_DUPLX_CTRL_EXC_DEF_EN | + (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) | + (((u32)hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK) << + MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT); + ATL2_WRITE_REG(hw, REG_MAC_HALF_DUPLX_CTRL, value); + + /* set Interrupt Moderator Timer */ + ATL2_WRITE_REGW(hw, REG_IRQ_MODU_TIMER_INIT, adapter->imt); + ATL2_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_ITIMER_EN); + + /* set Interrupt Clear Timer */ + ATL2_WRITE_REGW(hw, REG_CMBDISDMA_TIMER, adapter->ict); + + /* set MTU */ + ATL2_WRITE_REG(hw, REG_MTU, adapter->netdev->mtu + + ENET_HEADER_SIZE + VLAN_SIZE + ETHERNET_FCS_SIZE); + + /* 1590 */ + ATL2_WRITE_REG(hw, REG_TX_CUT_THRESH, 0x177); + + /* flow control */ + ATL2_WRITE_REGW(hw, REG_PAUSE_ON_TH, hw->fc_rxd_hi); + ATL2_WRITE_REGW(hw, REG_PAUSE_OFF_TH, hw->fc_rxd_lo); + + /* Init mailbox */ + ATL2_WRITE_REGW(hw, REG_MB_TXD_WR_IDX, (u16)adapter->txd_write_ptr); + ATL2_WRITE_REGW(hw, REG_MB_RXD_RD_IDX, (u16)adapter->rxd_read_ptr); + + /* enable DMA read/write */ + ATL2_WRITE_REGB(hw, REG_DMAR, DMAR_EN); + ATL2_WRITE_REGB(hw, REG_DMAW, DMAW_EN); + + value = ATL2_READ_REG(&adapter->hw, REG_ISR); + if ((value & ISR_PHY_LINKDOWN) != 0) + value = 1; /* config failed */ + else + value = 0; + + /* clear all interrupt status */ + ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0x3fffffff); + ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0); + return value; +} + +/* + * atl2_setup_ring_resources - allocate Tx / RX descriptor resources + * @adapter: board private structure + * + * Return 0 on success, negative on failure + */ +static s32 atl2_setup_ring_resources(struct atl2_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int size; + u8 offset = 0; + + /* real ring DMA buffer */ + adapter->ring_size = size = + adapter->txd_ring_size * 1 + 7 + /* dword align */ + adapter->txs_ring_size * 4 + 7 + /* dword align */ + adapter->rxd_ring_size * 1536 + 127; /* 128bytes align */ + + adapter->ring_vir_addr = pci_alloc_consistent(pdev, size, + &adapter->ring_dma); + if (!adapter->ring_vir_addr) + return -ENOMEM; + memset(adapter->ring_vir_addr, 0, adapter->ring_size); + + /* Init TXD Ring */ + adapter->txd_dma = adapter->ring_dma ; + offset = (adapter->txd_dma & 0x7) ? (8 - (adapter->txd_dma & 0x7)) : 0; + adapter->txd_dma += offset; + adapter->txd_ring = (struct tx_pkt_header *) (adapter->ring_vir_addr + + offset); + + /* Init TXS Ring */ + adapter->txs_dma = adapter->txd_dma + adapter->txd_ring_size; + offset = (adapter->txs_dma & 0x7) ? (8 - (adapter->txs_dma & 0x7)) : 0; + adapter->txs_dma += offset; + adapter->txs_ring = (struct tx_pkt_status *) + (((u8 *)adapter->txd_ring) + (adapter->txd_ring_size + offset)); + + /* Init RXD Ring */ + adapter->rxd_dma = adapter->txs_dma + adapter->txs_ring_size * 4; + offset = (adapter->rxd_dma & 127) ? + (128 - (adapter->rxd_dma & 127)) : 0; + if (offset > 7) + offset -= 8; + else + offset += (128 - 8); + + adapter->rxd_dma += offset; + adapter->rxd_ring = (struct rx_desc *) (((u8 *)adapter->txs_ring) + + (adapter->txs_ring_size * 4 + offset)); + +/* + * Read / Write Ptr Initialize: + * init_ring_ptrs(adapter); + */ + return 0; +} + +/* + * atl2_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + */ +static inline void atl2_irq_enable(struct atl2_adapter *adapter) +{ + ATL2_WRITE_REG(&adapter->hw, REG_IMR, IMR_NORMAL_MASK); + ATL2_WRITE_FLUSH(&adapter->hw); +} + +/* + * atl2_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + */ +static inline void atl2_irq_disable(struct atl2_adapter *adapter) +{ + ATL2_WRITE_REG(&adapter->hw, REG_IMR, 0); + ATL2_WRITE_FLUSH(&adapter->hw); + synchronize_irq(adapter->pdev->irq); +} + +#ifdef NETIF_F_HW_VLAN_TX +static void atl2_vlan_rx_register(struct net_device *netdev, + struct vlan_group *grp) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + u32 ctrl; + + atl2_irq_disable(adapter); + adapter->vlgrp = grp; + + if (grp) { + /* enable VLAN tag insert/strip */ + ctrl = ATL2_READ_REG(&adapter->hw, REG_MAC_CTRL); + ctrl |= MAC_CTRL_RMV_VLAN; + ATL2_WRITE_REG(&adapter->hw, REG_MAC_CTRL, ctrl); + } else { + /* disable VLAN tag insert/strip */ + ctrl = ATL2_READ_REG(&adapter->hw, REG_MAC_CTRL); + ctrl &= ~MAC_CTRL_RMV_VLAN; + ATL2_WRITE_REG(&adapter->hw, REG_MAC_CTRL, ctrl); + } + + atl2_irq_enable(adapter); +} + +static void atl2_restore_vlan(struct atl2_adapter *adapter) +{ + atl2_vlan_rx_register(adapter->netdev, adapter->vlgrp); +} +#endif + +static void atl2_intr_rx(struct atl2_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct rx_desc *rxd; + struct sk_buff *skb; + + do { + rxd = adapter->rxd_ring+adapter->rxd_write_ptr; + if (!rxd->status.update) + break; /* end of tx */ + + /* clear this flag at once */ + rxd->status.update = 0; + + if (rxd->status.ok && rxd->status.pkt_size >= 60) { + int rx_size = (int)(rxd->status.pkt_size - 4); + /* alloc new buffer */ + skb = netdev_alloc_skb_ip_align(netdev, rx_size); + if (NULL == skb) { + printk(KERN_WARNING + "%s: Mem squeeze, deferring packet.\n", + netdev->name); + /* + * Check that some rx space is free. If not, + * free one and mark stats->rx_dropped++. + */ + netdev->stats.rx_dropped++; + break; + } + skb->dev = netdev; + memcpy(skb->data, rxd->packet, rx_size); + skb_put(skb, rx_size); + skb->protocol = eth_type_trans(skb, netdev); +#ifdef NETIF_F_HW_VLAN_TX + if (adapter->vlgrp && (rxd->status.vlan)) { + u16 vlan_tag = (rxd->status.vtag>>4) | + ((rxd->status.vtag&7) << 13) | + ((rxd->status.vtag&8) << 9); + vlan_hwaccel_rx(skb, adapter->vlgrp, vlan_tag); + } else +#endif + netif_rx(skb); + netdev->stats.rx_bytes += rx_size; + netdev->stats.rx_packets++; + } else { + netdev->stats.rx_errors++; + + if (rxd->status.ok && rxd->status.pkt_size <= 60) + netdev->stats.rx_length_errors++; + if (rxd->status.mcast) + netdev->stats.multicast++; + if (rxd->status.crc) + netdev->stats.rx_crc_errors++; + if (rxd->status.align) + netdev->stats.rx_frame_errors++; + } + + /* advance write ptr */ + if (++adapter->rxd_write_ptr == adapter->rxd_ring_size) + adapter->rxd_write_ptr = 0; + } while (1); + + /* update mailbox? */ + adapter->rxd_read_ptr = adapter->rxd_write_ptr; + ATL2_WRITE_REGW(&adapter->hw, REG_MB_RXD_RD_IDX, adapter->rxd_read_ptr); +} + +static void atl2_intr_tx(struct atl2_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u32 txd_read_ptr; + u32 txs_write_ptr; + struct tx_pkt_status *txs; + struct tx_pkt_header *txph; + int free_hole = 0; + + do { + txs_write_ptr = (u32) atomic_read(&adapter->txs_write_ptr); + txs = adapter->txs_ring + txs_write_ptr; + if (!txs->update) + break; /* tx stop here */ + + free_hole = 1; + txs->update = 0; + + if (++txs_write_ptr == adapter->txs_ring_size) + txs_write_ptr = 0; + atomic_set(&adapter->txs_write_ptr, (int)txs_write_ptr); + + txd_read_ptr = (u32) atomic_read(&adapter->txd_read_ptr); + txph = (struct tx_pkt_header *) + (((u8 *)adapter->txd_ring) + txd_read_ptr); + + if (txph->pkt_size != txs->pkt_size) { + struct tx_pkt_status *old_txs = txs; + printk(KERN_WARNING + "%s: txs packet size not consistent with txd" + " txd_:0x%08x, txs_:0x%08x!\n", + adapter->netdev->name, + *(u32 *)txph, *(u32 *)txs); + printk(KERN_WARNING + "txd read ptr: 0x%x\n", + txd_read_ptr); + txs = adapter->txs_ring + txs_write_ptr; + printk(KERN_WARNING + "txs-behind:0x%08x\n", + *(u32 *)txs); + if (txs_write_ptr < 2) { + txs = adapter->txs_ring + + (adapter->txs_ring_size + + txs_write_ptr - 2); + } else { + txs = adapter->txs_ring + (txs_write_ptr - 2); + } + printk(KERN_WARNING + "txs-before:0x%08x\n", + *(u32 *)txs); + txs = old_txs; + } + + /* 4for TPH */ + txd_read_ptr += (((u32)(txph->pkt_size) + 7) & ~3); + if (txd_read_ptr >= adapter->txd_ring_size) + txd_read_ptr -= adapter->txd_ring_size; + + atomic_set(&adapter->txd_read_ptr, (int)txd_read_ptr); + + /* tx statistics: */ + if (txs->ok) { + netdev->stats.tx_bytes += txs->pkt_size; + netdev->stats.tx_packets++; + } + else + netdev->stats.tx_errors++; + + if (txs->defer) + netdev->stats.collisions++; + if (txs->abort_col) + netdev->stats.tx_aborted_errors++; + if (txs->late_col) + netdev->stats.tx_window_errors++; + if (txs->underun) + netdev->stats.tx_fifo_errors++; + } while (1); + + if (free_hole) { + if (netif_queue_stopped(adapter->netdev) && + netif_carrier_ok(adapter->netdev)) + netif_wake_queue(adapter->netdev); + } +} + +static void atl2_check_for_link(struct atl2_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u16 phy_data = 0; + + spin_lock(&adapter->stats_lock); + atl2_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); + atl2_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); + spin_unlock(&adapter->stats_lock); + + /* notify upper layer link down ASAP */ + if (!(phy_data & BMSR_LSTATUS)) { /* Link Down */ + if (netif_carrier_ok(netdev)) { /* old link state: Up */ + printk(KERN_INFO "%s: %s NIC Link is Down\n", + atl2_driver_name, netdev->name); + adapter->link_speed = SPEED_0; + netif_carrier_off(netdev); + netif_stop_queue(netdev); + } + } + schedule_work(&adapter->link_chg_task); +} + +static inline void atl2_clear_phy_int(struct atl2_adapter *adapter) +{ + u16 phy_data; + spin_lock(&adapter->stats_lock); + atl2_read_phy_reg(&adapter->hw, 19, &phy_data); + spin_unlock(&adapter->stats_lock); +} + +/* + * atl2_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + * @pt_regs: CPU registers structure + */ +static irqreturn_t atl2_intr(int irq, void *data) +{ + struct atl2_adapter *adapter = netdev_priv(data); + struct atl2_hw *hw = &adapter->hw; + u32 status; + + status = ATL2_READ_REG(hw, REG_ISR); + if (0 == status) + return IRQ_NONE; + + /* link event */ + if (status & ISR_PHY) + atl2_clear_phy_int(adapter); + + /* clear ISR status, and Enable CMB DMA/Disable Interrupt */ + ATL2_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT); + + /* check if PCIE PHY Link down */ + if (status & ISR_PHY_LINKDOWN) { + if (netif_running(adapter->netdev)) { /* reset MAC */ + ATL2_WRITE_REG(hw, REG_ISR, 0); + ATL2_WRITE_REG(hw, REG_IMR, 0); + ATL2_WRITE_FLUSH(hw); + schedule_work(&adapter->reset_task); + return IRQ_HANDLED; + } + } + + /* check if DMA read/write error? */ + if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { + ATL2_WRITE_REG(hw, REG_ISR, 0); + ATL2_WRITE_REG(hw, REG_IMR, 0); + ATL2_WRITE_FLUSH(hw); + schedule_work(&adapter->reset_task); + return IRQ_HANDLED; + } + + /* link event */ + if (status & (ISR_PHY | ISR_MANUAL)) { + adapter->netdev->stats.tx_carrier_errors++; + atl2_check_for_link(adapter); + } + + /* transmit event */ + if (status & ISR_TX_EVENT) + atl2_intr_tx(adapter); + + /* rx exception */ + if (status & ISR_RX_EVENT) + atl2_intr_rx(adapter); + + /* re-enable Interrupt */ + ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0); + return IRQ_HANDLED; +} + +static int atl2_request_irq(struct atl2_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int flags, err = 0; + + flags = IRQF_SHARED; + adapter->have_msi = true; + err = pci_enable_msi(adapter->pdev); + if (err) + adapter->have_msi = false; + + if (adapter->have_msi) + flags &= ~IRQF_SHARED; + + return request_irq(adapter->pdev->irq, atl2_intr, flags, netdev->name, + netdev); +} + +/* + * atl2_free_ring_resources - Free Tx / RX descriptor Resources + * @adapter: board private structure + * + * Free all transmit software resources + */ +static void atl2_free_ring_resources(struct atl2_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + pci_free_consistent(pdev, adapter->ring_size, adapter->ring_vir_addr, + adapter->ring_dma); +} + +/* + * atl2_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + */ +static int atl2_open(struct net_device *netdev) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + int err; + u32 val; + + /* disallow open during test */ + if (test_bit(__ATL2_TESTING, &adapter->flags)) + return -EBUSY; + + /* allocate transmit descriptors */ + err = atl2_setup_ring_resources(adapter); + if (err) + return err; + + err = atl2_init_hw(&adapter->hw); + if (err) { + err = -EIO; + goto err_init_hw; + } + + /* hardware has been reset, we need to reload some things */ + atl2_set_multi(netdev); + init_ring_ptrs(adapter); + +#ifdef NETIF_F_HW_VLAN_TX + atl2_restore_vlan(adapter); +#endif + + if (atl2_configure(adapter)) { + err = -EIO; + goto err_config; + } + + err = atl2_request_irq(adapter); + if (err) + goto err_req_irq; + + clear_bit(__ATL2_DOWN, &adapter->flags); + + mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 4*HZ)); + + val = ATL2_READ_REG(&adapter->hw, REG_MASTER_CTRL); + ATL2_WRITE_REG(&adapter->hw, REG_MASTER_CTRL, + val | MASTER_CTRL_MANUAL_INT); + + atl2_irq_enable(adapter); + + return 0; + +err_init_hw: +err_req_irq: +err_config: + atl2_free_ring_resources(adapter); + atl2_reset_hw(&adapter->hw); + + return err; +} + +static void atl2_down(struct atl2_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + /* signal that we're down so the interrupt handler does not + * reschedule our watchdog timer */ + set_bit(__ATL2_DOWN, &adapter->flags); + + netif_tx_disable(netdev); + + /* reset MAC to disable all RX/TX */ + atl2_reset_hw(&adapter->hw); + msleep(1); + + atl2_irq_disable(adapter); + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_config_timer); + clear_bit(0, &adapter->cfg_phy); + + netif_carrier_off(netdev); + adapter->link_speed = SPEED_0; + adapter->link_duplex = -1; +} + +static void atl2_free_irq(struct atl2_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + free_irq(adapter->pdev->irq, netdev); + +#ifdef CONFIG_PCI_MSI + if (adapter->have_msi) + pci_disable_msi(adapter->pdev); +#endif +} + +/* + * atl2_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + */ +static int atl2_close(struct net_device *netdev) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + + WARN_ON(test_bit(__ATL2_RESETTING, &adapter->flags)); + + atl2_down(adapter); + atl2_free_irq(adapter); + atl2_free_ring_resources(adapter); + + return 0; +} + +static inline int TxsFreeUnit(struct atl2_adapter *adapter) +{ + u32 txs_write_ptr = (u32) atomic_read(&adapter->txs_write_ptr); + + return (adapter->txs_next_clear >= txs_write_ptr) ? + (int) (adapter->txs_ring_size - adapter->txs_next_clear + + txs_write_ptr - 1) : + (int) (txs_write_ptr - adapter->txs_next_clear - 1); +} + +static inline int TxdFreeBytes(struct atl2_adapter *adapter) +{ + u32 txd_read_ptr = (u32)atomic_read(&adapter->txd_read_ptr); + + return (adapter->txd_write_ptr >= txd_read_ptr) ? + (int) (adapter->txd_ring_size - adapter->txd_write_ptr + + txd_read_ptr - 1) : + (int) (txd_read_ptr - adapter->txd_write_ptr - 1); +} + +static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + struct tx_pkt_header *txph; + u32 offset, copy_len; + int txs_unused; + int txbuf_unused; + + if (test_bit(__ATL2_DOWN, &adapter->flags)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (unlikely(skb->len <= 0)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + txs_unused = TxsFreeUnit(adapter); + txbuf_unused = TxdFreeBytes(adapter); + + if (skb->len + sizeof(struct tx_pkt_header) + 4 > txbuf_unused || + txs_unused < 1) { + /* not enough resources */ + netif_stop_queue(netdev); + return NETDEV_TX_BUSY; + } + + offset = adapter->txd_write_ptr; + + txph = (struct tx_pkt_header *) (((u8 *)adapter->txd_ring) + offset); + + *(u32 *)txph = 0; + txph->pkt_size = skb->len; + + offset += 4; + if (offset >= adapter->txd_ring_size) + offset -= adapter->txd_ring_size; + copy_len = adapter->txd_ring_size - offset; + if (copy_len >= skb->len) { + memcpy(((u8 *)adapter->txd_ring) + offset, skb->data, skb->len); + offset += ((u32)(skb->len + 3) & ~3); + } else { + memcpy(((u8 *)adapter->txd_ring)+offset, skb->data, copy_len); + memcpy((u8 *)adapter->txd_ring, skb->data+copy_len, + skb->len-copy_len); + offset = ((u32)(skb->len-copy_len + 3) & ~3); + } +#ifdef NETIF_F_HW_VLAN_TX + if (adapter->vlgrp && vlan_tx_tag_present(skb)) { + u16 vlan_tag = vlan_tx_tag_get(skb); + vlan_tag = (vlan_tag << 4) | + (vlan_tag >> 13) | + ((vlan_tag >> 9) & 0x8); + txph->ins_vlan = 1; + txph->vlan = vlan_tag; + } +#endif + if (offset >= adapter->txd_ring_size) + offset -= adapter->txd_ring_size; + adapter->txd_write_ptr = offset; + + /* clear txs before send */ + adapter->txs_ring[adapter->txs_next_clear].update = 0; + if (++adapter->txs_next_clear == adapter->txs_ring_size) + adapter->txs_next_clear = 0; + + ATL2_WRITE_REGW(&adapter->hw, REG_MB_TXD_WR_IDX, + (adapter->txd_write_ptr >> 2)); + + mmiowb(); + netdev->trans_start = jiffies; + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +/* + * atl2_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + */ +static int atl2_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + struct atl2_hw *hw = &adapter->hw; + + if ((new_mtu < 40) || (new_mtu > (ETH_DATA_LEN + VLAN_SIZE))) + return -EINVAL; + + /* set MTU */ + if (hw->max_frame_size != new_mtu) { + netdev->mtu = new_mtu; + ATL2_WRITE_REG(hw, REG_MTU, new_mtu + ENET_HEADER_SIZE + + VLAN_SIZE + ETHERNET_FCS_SIZE); + } + + return 0; +} + +/* + * atl2_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + */ +static int atl2_set_mac(struct net_device *netdev, void *p) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (netif_running(netdev)) + return -EBUSY; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); + + atl2_set_mac_addr(&adapter->hw); + + return 0; +} + +/* + * atl2_mii_ioctl - + * @netdev: + * @ifreq: + * @cmd: + */ +static int atl2_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + struct mii_ioctl_data *data = if_mii(ifr); + unsigned long flags; + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = 0; + break; + case SIOCGMIIREG: + spin_lock_irqsave(&adapter->stats_lock, flags); + if (atl2_read_phy_reg(&adapter->hw, + data->reg_num & 0x1F, &data->val_out)) { + spin_unlock_irqrestore(&adapter->stats_lock, flags); + return -EIO; + } + spin_unlock_irqrestore(&adapter->stats_lock, flags); + break; + case SIOCSMIIREG: + if (data->reg_num & ~(0x1F)) + return -EFAULT; + spin_lock_irqsave(&adapter->stats_lock, flags); + if (atl2_write_phy_reg(&adapter->hw, data->reg_num, + data->val_in)) { + spin_unlock_irqrestore(&adapter->stats_lock, flags); + return -EIO; + } + spin_unlock_irqrestore(&adapter->stats_lock, flags); + break; + default: + return -EOPNOTSUPP; + } + return 0; +} + +/* + * atl2_ioctl - + * @netdev: + * @ifreq: + * @cmd: + */ +static int atl2_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return atl2_mii_ioctl(netdev, ifr, cmd); +#ifdef ETHTOOL_OPS_COMPAT + case SIOCETHTOOL: + return ethtool_ioctl(ifr); +#endif + default: + return -EOPNOTSUPP; + } +} + +/* + * atl2_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + */ +static void atl2_tx_timeout(struct net_device *netdev) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + schedule_work(&adapter->reset_task); +} + +/* + * atl2_watchdog - Timer Call-back + * @data: pointer to netdev cast into an unsigned long + */ +static void atl2_watchdog(unsigned long data) +{ + struct atl2_adapter *adapter = (struct atl2_adapter *) data; + + if (!test_bit(__ATL2_DOWN, &adapter->flags)) { + u32 drop_rxd, drop_rxs; + unsigned long flags; + + spin_lock_irqsave(&adapter->stats_lock, flags); + drop_rxd = ATL2_READ_REG(&adapter->hw, REG_STS_RXD_OV); + drop_rxs = ATL2_READ_REG(&adapter->hw, REG_STS_RXS_OV); + spin_unlock_irqrestore(&adapter->stats_lock, flags); + + adapter->netdev->stats.rx_over_errors += drop_rxd + drop_rxs; + + /* Reset the timer */ + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 4 * HZ)); + } +} + +/* + * atl2_phy_config - Timer Call-back + * @data: pointer to netdev cast into an unsigned long + */ +static void atl2_phy_config(unsigned long data) +{ + struct atl2_adapter *adapter = (struct atl2_adapter *) data; + struct atl2_hw *hw = &adapter->hw; + unsigned long flags; + + spin_lock_irqsave(&adapter->stats_lock, flags); + atl2_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg); + atl2_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN | + MII_CR_RESTART_AUTO_NEG); + spin_unlock_irqrestore(&adapter->stats_lock, flags); + clear_bit(0, &adapter->cfg_phy); +} + +static int atl2_up(struct atl2_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err = 0; + u32 val; + + /* hardware has been reset, we need to reload some things */ + + err = atl2_init_hw(&adapter->hw); + if (err) { + err = -EIO; + return err; + } + + atl2_set_multi(netdev); + init_ring_ptrs(adapter); + +#ifdef NETIF_F_HW_VLAN_TX + atl2_restore_vlan(adapter); +#endif + + if (atl2_configure(adapter)) { + err = -EIO; + goto err_up; + } + + clear_bit(__ATL2_DOWN, &adapter->flags); + + val = ATL2_READ_REG(&adapter->hw, REG_MASTER_CTRL); + ATL2_WRITE_REG(&adapter->hw, REG_MASTER_CTRL, val | + MASTER_CTRL_MANUAL_INT); + + atl2_irq_enable(adapter); + +err_up: + return err; +} + +static void atl2_reinit_locked(struct atl2_adapter *adapter) +{ + WARN_ON(in_interrupt()); + while (test_and_set_bit(__ATL2_RESETTING, &adapter->flags)) + msleep(1); + atl2_down(adapter); + atl2_up(adapter); + clear_bit(__ATL2_RESETTING, &adapter->flags); +} + +static void atl2_reset_task(struct work_struct *work) +{ + struct atl2_adapter *adapter; + adapter = container_of(work, struct atl2_adapter, reset_task); + + atl2_reinit_locked(adapter); +} + +static void atl2_setup_mac_ctrl(struct atl2_adapter *adapter) +{ + u32 value; + struct atl2_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + + /* Config MAC CTRL Register */ + value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN | MAC_CTRL_MACLP_CLK_PHY; + + /* duplex */ + if (FULL_DUPLEX == adapter->link_duplex) + value |= MAC_CTRL_DUPLX; + + /* flow control */ + value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW); + + /* PAD & CRC */ + value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD); + + /* preamble length */ + value |= (((u32)adapter->hw.preamble_len & MAC_CTRL_PRMLEN_MASK) << + MAC_CTRL_PRMLEN_SHIFT); + + /* vlan */ + if (adapter->vlgrp) + value |= MAC_CTRL_RMV_VLAN; + + /* filter mode */ + value |= MAC_CTRL_BC_EN; + if (netdev->flags & IFF_PROMISC) + value |= MAC_CTRL_PROMIS_EN; + else if (netdev->flags & IFF_ALLMULTI) + value |= MAC_CTRL_MC_ALL_EN; + + /* half retry buffer */ + value |= (((u32)(adapter->hw.retry_buf & + MAC_CTRL_HALF_LEFT_BUF_MASK)) << MAC_CTRL_HALF_LEFT_BUF_SHIFT); + + ATL2_WRITE_REG(hw, REG_MAC_CTRL, value); +} + +static int atl2_check_link(struct atl2_adapter *adapter) +{ + struct atl2_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int ret_val; + u16 speed, duplex, phy_data; + int reconfig = 0; + + /* MII_BMSR must read twise */ + atl2_read_phy_reg(hw, MII_BMSR, &phy_data); + atl2_read_phy_reg(hw, MII_BMSR, &phy_data); + if (!(phy_data&BMSR_LSTATUS)) { /* link down */ + if (netif_carrier_ok(netdev)) { /* old link state: Up */ + u32 value; + /* disable rx */ + value = ATL2_READ_REG(hw, REG_MAC_CTRL); + value &= ~MAC_CTRL_RX_EN; + ATL2_WRITE_REG(hw, REG_MAC_CTRL, value); + adapter->link_speed = SPEED_0; + netif_carrier_off(netdev); + netif_stop_queue(netdev); + } + return 0; + } + + /* Link Up */ + ret_val = atl2_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) + return ret_val; + switch (hw->MediaType) { + case MEDIA_TYPE_100M_FULL: + if (speed != SPEED_100 || duplex != FULL_DUPLEX) + reconfig = 1; + break; + case MEDIA_TYPE_100M_HALF: + if (speed != SPEED_100 || duplex != HALF_DUPLEX) + reconfig = 1; + break; + case MEDIA_TYPE_10M_FULL: + if (speed != SPEED_10 || duplex != FULL_DUPLEX) + reconfig = 1; + break; + case MEDIA_TYPE_10M_HALF: + if (speed != SPEED_10 || duplex != HALF_DUPLEX) + reconfig = 1; + break; + } + /* link result is our setting */ + if (reconfig == 0) { + if (adapter->link_speed != speed || + adapter->link_duplex != duplex) { + adapter->link_speed = speed; + adapter->link_duplex = duplex; + atl2_setup_mac_ctrl(adapter); + printk(KERN_INFO "%s: %s NIC Link is Up<%d Mbps %s>\n", + atl2_driver_name, netdev->name, + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full Duplex" : "Half Duplex"); + } + + if (!netif_carrier_ok(netdev)) { /* Link down -> Up */ + netif_carrier_on(netdev); + netif_wake_queue(netdev); + } + return 0; + } + + /* change original link status */ + if (netif_carrier_ok(netdev)) { + u32 value; + /* disable rx */ + value = ATL2_READ_REG(hw, REG_MAC_CTRL); + value &= ~MAC_CTRL_RX_EN; + ATL2_WRITE_REG(hw, REG_MAC_CTRL, value); + + adapter->link_speed = SPEED_0; + netif_carrier_off(netdev); + netif_stop_queue(netdev); + } + + /* auto-neg, insert timer to re-config phy + * (if interval smaller than 5 seconds, something strange) */ + if (!test_bit(__ATL2_DOWN, &adapter->flags)) { + if (!test_and_set_bit(0, &adapter->cfg_phy)) + mod_timer(&adapter->phy_config_timer, + round_jiffies(jiffies + 5 * HZ)); + } + + return 0; +} + +/* + * atl2_link_chg_task - deal with link change event Out of interrupt context + * @netdev: network interface device structure + */ +static void atl2_link_chg_task(struct work_struct *work) +{ + struct atl2_adapter *adapter; + unsigned long flags; + + adapter = container_of(work, struct atl2_adapter, link_chg_task); + + spin_lock_irqsave(&adapter->stats_lock, flags); + atl2_check_link(adapter); + spin_unlock_irqrestore(&adapter->stats_lock, flags); +} + +static void atl2_setup_pcicmd(struct pci_dev *pdev) +{ + u16 cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &cmd); + + if (cmd & PCI_COMMAND_INTX_DISABLE) + cmd &= ~PCI_COMMAND_INTX_DISABLE; + if (cmd & PCI_COMMAND_IO) + cmd &= ~PCI_COMMAND_IO; + if (0 == (cmd & PCI_COMMAND_MEMORY)) + cmd |= PCI_COMMAND_MEMORY; + if (0 == (cmd & PCI_COMMAND_MASTER)) + cmd |= PCI_COMMAND_MASTER; + pci_write_config_word(pdev, PCI_COMMAND, cmd); + + /* + * some motherboards BIOS(PXE/EFI) driver may set PME + * while they transfer control to OS (Windows/Linux) + * so we should clear this bit before NIC work normally + */ + pci_write_config_dword(pdev, REG_PM_CTRLSTAT, 0); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void atl2_poll_controller(struct net_device *netdev) +{ + disable_irq(netdev->irq); + atl2_intr(netdev->irq, netdev); + enable_irq(netdev->irq); +} +#endif + + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) +static const struct net_device_ops atl2_netdev_ops = { + .ndo_open = atl2_open, + .ndo_stop = atl2_close, + .ndo_start_xmit = atl2_xmit_frame, + .ndo_set_multicast_list = atl2_set_multi, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = atl2_set_mac, + .ndo_change_mtu = atl2_change_mtu, + .ndo_do_ioctl = atl2_ioctl, + .ndo_tx_timeout = atl2_tx_timeout, + .ndo_vlan_rx_register = atl2_vlan_rx_register, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = atl2_poll_controller, +#endif +}; +#endif + +/* + * atl2_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in atl2_pci_tbl + * + * Returns 0 on success, negative on failure + * + * atl2_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + */ +static int __devinit atl2_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct atl2_adapter *adapter; + static int cards_found; + unsigned long mmio_start; + int mmio_len; + int err; + + cards_found = 0; + + err = pci_enable_device(pdev); + if (err) + return err; + + /* + * atl2 is a shared-high-32-bit device, so we're stuck with 32-bit DMA + * until the kernel has the proper infrastructure to support 64-bit DMA + * on these devices. + */ + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) && + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { + printk(KERN_ERR "atl2: No usable DMA configuration, aborting\n"); + goto err_dma; + } + + /* Mark all PCI regions associated with PCI device + * pdev as being reserved by owner atl2_driver_name */ + err = pci_request_regions(pdev, atl2_driver_name); + if (err) + goto err_pci_reg; + + /* Enables bus-mastering on the device and calls + * pcibios_set_master to do the needed arch specific settings */ + pci_set_master(pdev); + + err = -ENOMEM; + netdev = alloc_etherdev(sizeof(struct atl2_adapter)); + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->hw.back = adapter; + + mmio_start = pci_resource_start(pdev, 0x0); + mmio_len = pci_resource_len(pdev, 0x0); + + adapter->hw.mem_rang = (u32)mmio_len; + adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); + if (!adapter->hw.hw_addr) { + err = -EIO; + goto err_ioremap; + } + + atl2_setup_pcicmd(pdev); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + netdev->netdev_ops = &atl2_netdev_ops; +#else + netdev->change_mtu = atl2_change_mtu; + netdev->hard_start_xmit = atl2_xmit_frame; + netdev->open = atl2_open; + netdev->stop = atl2_close; + netdev->tx_timeout = atl2_tx_timeout; + netdev->set_mac_address = atl2_set_mac; + netdev->do_ioctl = atl2_ioctl; + netdev->set_multicast_list = atl2_set_multi; + netdev->vlan_rx_register = atl2_vlan_rx_register; +#ifdef CONFIG_NET_POLL_CONTROLLER + netdev->poll_controller = atl2_poll_controller; +#endif +#endif + atl2_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + netdev->mem_start = mmio_start; + netdev->mem_end = mmio_start + mmio_len; + adapter->bd_number = cards_found; + adapter->pci_using_64 = false; + + /* setup the private structure */ + err = atl2_sw_init(adapter); + if (err) + goto err_sw_init; + + err = -EIO; + +#ifdef NETIF_F_HW_VLAN_TX + netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); +#endif + + /* Init PHY as early as possible due to power saving issue */ + atl2_phy_init(&adapter->hw); + + /* reset the controller to + * put the device in a known good starting state */ + + if (atl2_reset_hw(&adapter->hw)) { + err = -EIO; + goto err_reset; + } + + /* copy the MAC address out of the EEPROM */ + atl2_read_mac_addr(&adapter->hw); + memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); +/* FIXME: do we still need this? */ +#ifdef ETHTOOL_GPERMADDR + memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->perm_addr)) { +#else + if (!is_valid_ether_addr(netdev->dev_addr)) { +#endif + err = -EIO; + goto err_eeprom; + } + + atl2_check_options(adapter); + + init_timer(&adapter->watchdog_timer); + adapter->watchdog_timer.function = &atl2_watchdog; + adapter->watchdog_timer.data = (unsigned long) adapter; + + init_timer(&adapter->phy_config_timer); + adapter->phy_config_timer.function = &atl2_phy_config; + adapter->phy_config_timer.data = (unsigned long) adapter; + + INIT_WORK(&adapter->reset_task, atl2_reset_task); + INIT_WORK(&adapter->link_chg_task, atl2_link_chg_task); + + strcpy(netdev->name, "eth%d"); /* ?? */ + err = register_netdev(netdev); + if (err) + goto err_register; + + /* assume we have no link for now */ + netif_carrier_off(netdev); + netif_stop_queue(netdev); + + cards_found++; + + return 0; + +err_reset: +err_register: +err_sw_init: +err_eeprom: + iounmap(adapter->hw.hw_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/* + * atl2_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * atl2_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + */ +/* FIXME: write the original MAC address back in case it was changed from a + * BIOS-set value, as in atl1 -- CHS */ +static void __devexit atl2_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl2_adapter *adapter = netdev_priv(netdev); + + /* flush_scheduled work may reschedule our watchdog task, so + * explicitly disable watchdog tasks from being rescheduled */ + set_bit(__ATL2_DOWN, &adapter->flags); + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_config_timer); + + flush_scheduled_work(); + + unregister_netdev(netdev); + + atl2_force_ps(&adapter->hw); + + iounmap(adapter->hw.hw_addr); + pci_release_regions(pdev); + + free_netdev(netdev); + + pci_disable_device(pdev); +} + +static int atl2_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl2_adapter *adapter = netdev_priv(netdev); + struct atl2_hw *hw = &adapter->hw; + u16 speed, duplex; + u32 ctrl = 0; + u32 wufc = adapter->wol; + +#ifdef CONFIG_PM + int retval = 0; +#endif + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + WARN_ON(test_bit(__ATL2_RESETTING, &adapter->flags)); + atl2_down(adapter); + } + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; +#endif + + atl2_read_phy_reg(hw, MII_BMSR, (u16 *)&ctrl); + atl2_read_phy_reg(hw, MII_BMSR, (u16 *)&ctrl); + if (ctrl & BMSR_LSTATUS) + wufc &= ~ATLX_WUFC_LNKC; + + if (0 != (ctrl & BMSR_LSTATUS) && 0 != wufc) { + u32 ret_val; + /* get current link speed & duplex */ + ret_val = atl2_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + printk(KERN_DEBUG + "%s: get speed&duplex error while suspend\n", + atl2_driver_name); + goto wol_dis; + } + + ctrl = 0; + + /* turn on magic packet wol */ + if (wufc & ATLX_WUFC_MAG) + ctrl |= (WOL_MAGIC_EN | WOL_MAGIC_PME_EN); + + /* ignore Link Chg event when Link is up */ + ATL2_WRITE_REG(hw, REG_WOL_CTRL, ctrl); + + /* Config MAC CTRL Register */ + ctrl = MAC_CTRL_RX_EN | MAC_CTRL_MACLP_CLK_PHY; + if (FULL_DUPLEX == adapter->link_duplex) + ctrl |= MAC_CTRL_DUPLX; + ctrl |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD); + ctrl |= (((u32)adapter->hw.preamble_len & + MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); + ctrl |= (((u32)(adapter->hw.retry_buf & + MAC_CTRL_HALF_LEFT_BUF_MASK)) << + MAC_CTRL_HALF_LEFT_BUF_SHIFT); + if (wufc & ATLX_WUFC_MAG) { + /* magic packet maybe Broadcast&multicast&Unicast */ + ctrl |= MAC_CTRL_BC_EN; + } + + ATL2_WRITE_REG(hw, REG_MAC_CTRL, ctrl); + + /* pcie patch */ + ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC); + ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; + ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl); + ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1); + ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK; + ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl); + + pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); + goto suspend_exit; + } + + if (0 == (ctrl&BMSR_LSTATUS) && 0 != (wufc&ATLX_WUFC_LNKC)) { + /* link is down, so only LINK CHG WOL event enable */ + ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); + ATL2_WRITE_REG(hw, REG_WOL_CTRL, ctrl); + ATL2_WRITE_REG(hw, REG_MAC_CTRL, 0); + + /* pcie patch */ + ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC); + ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; + ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl); + ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1); + ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK; + ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl); + + hw->phy_configured = false; /* re-init PHY when resume */ + + pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); + + goto suspend_exit; + } + +wol_dis: + /* WOL disabled */ + ATL2_WRITE_REG(hw, REG_WOL_CTRL, 0); + + /* pcie patch */ + ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC); + ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; + ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl); + ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1); + ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK; + ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl); + + atl2_force_ps(hw); + hw->phy_configured = false; /* re-init PHY when resume */ + + pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); + +suspend_exit: + if (netif_running(netdev)) + atl2_free_irq(adapter); + + pci_disable_device(pdev); + + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +#ifdef CONFIG_PM +static int atl2_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl2_adapter *adapter = netdev_priv(netdev); + u32 err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + err = pci_enable_device(pdev); + if (err) { + printk(KERN_ERR + "atl2: Cannot enable PCI device from suspend\n"); + return err; + } + + pci_set_master(pdev); + + ATL2_READ_REG(&adapter->hw, REG_WOL_CTRL); /* clear WOL status */ + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + ATL2_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); + + if (netif_running(netdev)) { + err = atl2_request_irq(adapter); + if (err) + return err; + } + + atl2_reset_hw(&adapter->hw); + + if (netif_running(netdev)) + atl2_up(adapter); + + netif_device_attach(netdev); + + return 0; +} +#endif + +static void atl2_shutdown(struct pci_dev *pdev) +{ + atl2_suspend(pdev, PMSG_SUSPEND); +} + +static struct pci_driver atl2_driver = { + .name = atl2_driver_name, + .id_table = atl2_pci_tbl, + .probe = atl2_probe, + .remove = __devexit_p(atl2_remove), + /* Power Managment Hooks */ + .suspend = atl2_suspend, +#ifdef CONFIG_PM + .resume = atl2_resume, +#endif + .shutdown = atl2_shutdown, +}; + +/* + * atl2_init_module - Driver Registration Routine + * + * atl2_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + */ +static int __init atl2_init_module(void) +{ + printk(KERN_INFO "%s - version %s\n", atl2_driver_string, + atl2_driver_version); + printk(KERN_INFO "%s\n", atl2_copyright); + return pci_register_driver(&atl2_driver); +} +module_init(atl2_init_module); + +/* + * atl2_exit_module - Driver Exit Cleanup Routine + * + * atl2_exit_module is called just before the driver is removed + * from memory. + */ +static void __exit atl2_exit_module(void) +{ + pci_unregister_driver(&atl2_driver); +} +module_exit(atl2_exit_module); + +static void atl2_read_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value) +{ + struct atl2_adapter *adapter = hw->back; + pci_read_config_word(adapter->pdev, reg, value); +} + +static void atl2_write_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value) +{ + struct atl2_adapter *adapter = hw->back; + pci_write_config_word(adapter->pdev, reg, *value); +} + +static int atl2_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + struct atl2_hw *hw = &adapter->hw; + + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_TP); + ecmd->advertising = ADVERTISED_TP; + + ecmd->advertising |= ADVERTISED_Autoneg; + ecmd->advertising |= hw->autoneg_advertised; + + ecmd->port = PORT_TP; + ecmd->phy_address = 0; + ecmd->transceiver = XCVR_INTERNAL; + + if (adapter->link_speed != SPEED_0) { + ecmd->speed = adapter->link_speed; + if (adapter->link_duplex == FULL_DUPLEX) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + } else { + ecmd->speed = -1; + ecmd->duplex = -1; + } + + ecmd->autoneg = AUTONEG_ENABLE; + return 0; +} + +static int atl2_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + struct atl2_hw *hw = &adapter->hw; + + while (test_and_set_bit(__ATL2_RESETTING, &adapter->flags)) + msleep(1); + + if (ecmd->autoneg == AUTONEG_ENABLE) { +#define MY_ADV_MASK (ADVERTISE_10_HALF | \ + ADVERTISE_10_FULL | \ + ADVERTISE_100_HALF| \ + ADVERTISE_100_FULL) + + if ((ecmd->advertising & MY_ADV_MASK) == MY_ADV_MASK) { + hw->MediaType = MEDIA_TYPE_AUTO_SENSOR; + hw->autoneg_advertised = MY_ADV_MASK; + } else if ((ecmd->advertising & MY_ADV_MASK) == + ADVERTISE_100_FULL) { + hw->MediaType = MEDIA_TYPE_100M_FULL; + hw->autoneg_advertised = ADVERTISE_100_FULL; + } else if ((ecmd->advertising & MY_ADV_MASK) == + ADVERTISE_100_HALF) { + hw->MediaType = MEDIA_TYPE_100M_HALF; + hw->autoneg_advertised = ADVERTISE_100_HALF; + } else if ((ecmd->advertising & MY_ADV_MASK) == + ADVERTISE_10_FULL) { + hw->MediaType = MEDIA_TYPE_10M_FULL; + hw->autoneg_advertised = ADVERTISE_10_FULL; + } else if ((ecmd->advertising & MY_ADV_MASK) == + ADVERTISE_10_HALF) { + hw->MediaType = MEDIA_TYPE_10M_HALF; + hw->autoneg_advertised = ADVERTISE_10_HALF; + } else { + clear_bit(__ATL2_RESETTING, &adapter->flags); + return -EINVAL; + } + ecmd->advertising = hw->autoneg_advertised | + ADVERTISED_TP | ADVERTISED_Autoneg; + } else { + clear_bit(__ATL2_RESETTING, &adapter->flags); + return -EINVAL; + } + + /* reset the link */ + if (netif_running(adapter->netdev)) { + atl2_down(adapter); + atl2_up(adapter); + } else + atl2_reset_hw(&adapter->hw); + + clear_bit(__ATL2_RESETTING, &adapter->flags); + return 0; +} + +static u32 atl2_get_tx_csum(struct net_device *netdev) +{ + return (netdev->features & NETIF_F_HW_CSUM) != 0; +} + +static u32 atl2_get_msglevel(struct net_device *netdev) +{ + return 0; +} + +/* + * It's sane for this to be empty, but we might want to take advantage of this. + */ +static void atl2_set_msglevel(struct net_device *netdev, u32 data) +{ +} + +static int atl2_get_regs_len(struct net_device *netdev) +{ +#define ATL2_REGS_LEN 42 + return sizeof(u32) * ATL2_REGS_LEN; +} + +static void atl2_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + struct atl2_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u16 phy_data; + + memset(p, 0, sizeof(u32) * ATL2_REGS_LEN); + + regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; + + regs_buff[0] = ATL2_READ_REG(hw, REG_VPD_CAP); + regs_buff[1] = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL); + regs_buff[2] = ATL2_READ_REG(hw, REG_SPI_FLASH_CONFIG); + regs_buff[3] = ATL2_READ_REG(hw, REG_TWSI_CTRL); + regs_buff[4] = ATL2_READ_REG(hw, REG_PCIE_DEV_MISC_CTRL); + regs_buff[5] = ATL2_READ_REG(hw, REG_MASTER_CTRL); + regs_buff[6] = ATL2_READ_REG(hw, REG_MANUAL_TIMER_INIT); + regs_buff[7] = ATL2_READ_REG(hw, REG_IRQ_MODU_TIMER_INIT); + regs_buff[8] = ATL2_READ_REG(hw, REG_PHY_ENABLE); + regs_buff[9] = ATL2_READ_REG(hw, REG_CMBDISDMA_TIMER); + regs_buff[10] = ATL2_READ_REG(hw, REG_IDLE_STATUS); + regs_buff[11] = ATL2_READ_REG(hw, REG_MDIO_CTRL); + regs_buff[12] = ATL2_READ_REG(hw, REG_SERDES_LOCK); + regs_buff[13] = ATL2_READ_REG(hw, REG_MAC_CTRL); + regs_buff[14] = ATL2_READ_REG(hw, REG_MAC_IPG_IFG); + regs_buff[15] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR); + regs_buff[16] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR+4); + regs_buff[17] = ATL2_READ_REG(hw, REG_RX_HASH_TABLE); + regs_buff[18] = ATL2_READ_REG(hw, REG_RX_HASH_TABLE+4); + regs_buff[19] = ATL2_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL); + regs_buff[20] = ATL2_READ_REG(hw, REG_MTU); + regs_buff[21] = ATL2_READ_REG(hw, REG_WOL_CTRL); + regs_buff[22] = ATL2_READ_REG(hw, REG_SRAM_TXRAM_END); + regs_buff[23] = ATL2_READ_REG(hw, REG_DESC_BASE_ADDR_HI); + regs_buff[24] = ATL2_READ_REG(hw, REG_TXD_BASE_ADDR_LO); + regs_buff[25] = ATL2_READ_REG(hw, REG_TXD_MEM_SIZE); + regs_buff[26] = ATL2_READ_REG(hw, REG_TXS_BASE_ADDR_LO); + regs_buff[27] = ATL2_READ_REG(hw, REG_TXS_MEM_SIZE); + regs_buff[28] = ATL2_READ_REG(hw, REG_RXD_BASE_ADDR_LO); + regs_buff[29] = ATL2_READ_REG(hw, REG_RXD_BUF_NUM); + regs_buff[30] = ATL2_READ_REG(hw, REG_DMAR); + regs_buff[31] = ATL2_READ_REG(hw, REG_TX_CUT_THRESH); + regs_buff[32] = ATL2_READ_REG(hw, REG_DMAW); + regs_buff[33] = ATL2_READ_REG(hw, REG_PAUSE_ON_TH); + regs_buff[34] = ATL2_READ_REG(hw, REG_PAUSE_OFF_TH); + regs_buff[35] = ATL2_READ_REG(hw, REG_MB_TXD_WR_IDX); + regs_buff[36] = ATL2_READ_REG(hw, REG_MB_RXD_RD_IDX); + regs_buff[38] = ATL2_READ_REG(hw, REG_ISR); + regs_buff[39] = ATL2_READ_REG(hw, REG_IMR); + + atl2_read_phy_reg(hw, MII_BMCR, &phy_data); + regs_buff[40] = (u32)phy_data; + atl2_read_phy_reg(hw, MII_BMSR, &phy_data); + regs_buff[41] = (u32)phy_data; +} + +static int atl2_get_eeprom_len(struct net_device *netdev) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + + if (!atl2_check_eeprom_exist(&adapter->hw)) + return 512; + else + return 0; +} + +static int atl2_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + struct atl2_hw *hw = &adapter->hw; + u32 *eeprom_buff; + int first_dword, last_dword; + int ret_val = 0; + int i; + + if (eeprom->len == 0) + return -EINVAL; + + if (atl2_check_eeprom_exist(hw)) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_dword = eeprom->offset >> 2; + last_dword = (eeprom->offset + eeprom->len - 1) >> 2; + + eeprom_buff = kmalloc(sizeof(u32) * (last_dword - first_dword + 1), + GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + for (i = first_dword; i < last_dword; i++) { + if (!atl2_read_eeprom(hw, i*4, &(eeprom_buff[i-first_dword]))) { + ret_val = -EIO; + goto free; + } + } + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3), + eeprom->len); +free: + kfree(eeprom_buff); + + return ret_val; +} + +static int atl2_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + struct atl2_hw *hw = &adapter->hw; + u32 *eeprom_buff; + u32 *ptr; + int max_len, first_dword, last_dword, ret_val = 0; + int i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EFAULT; + + max_len = 512; + + first_dword = eeprom->offset >> 2; + last_dword = (eeprom->offset + eeprom->len - 1) >> 2; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (u32 *)eeprom_buff; + + if (eeprom->offset & 3) { + /* need read/modify/write of first changed EEPROM word */ + /* only the second byte of the word is being modified */ + if (!atl2_read_eeprom(hw, first_dword*4, &(eeprom_buff[0]))) + return -EIO; + ptr++; + } + if (((eeprom->offset + eeprom->len) & 3)) { + /* + * need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ + if (!atl2_read_eeprom(hw, last_dword * 4, + &(eeprom_buff[last_dword - first_dword]))) + return -EIO; + } + + /* Device's eeprom is always little-endian, word addressable */ + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_dword - first_dword + 1; i++) { + if (!atl2_write_eeprom(hw, ((first_dword+i)*4), eeprom_buff[i])) + return -EIO; + } + + kfree(eeprom_buff); + return ret_val; +} + +static void atl2_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + + strncpy(drvinfo->driver, atl2_driver_name, 32); + strncpy(drvinfo->version, atl2_driver_version, 32); + strncpy(drvinfo->fw_version, "L2", 32); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); + drvinfo->n_stats = 0; + drvinfo->testinfo_len = 0; + drvinfo->regdump_len = atl2_get_regs_len(netdev); + drvinfo->eedump_len = atl2_get_eeprom_len(netdev); +} + +static void atl2_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + + wol->supported = WAKE_MAGIC; + wol->wolopts = 0; + + if (adapter->wol & ATLX_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & ATLX_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & ATLX_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & ATLX_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; + if (adapter->wol & ATLX_WUFC_LNKC) + wol->wolopts |= WAKE_PHY; +} + +static int atl2_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE)) + return -EOPNOTSUPP; + + if (wol->wolopts & (WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)) + return -EOPNOTSUPP; + + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= ATLX_WUFC_MAG; + if (wol->wolopts & WAKE_PHY) + adapter->wol |= ATLX_WUFC_LNKC; + + return 0; +} + +static int atl2_nway_reset(struct net_device *netdev) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + if (netif_running(netdev)) + atl2_reinit_locked(adapter); + return 0; +} + +static const struct ethtool_ops atl2_ethtool_ops = { + .get_settings = atl2_get_settings, + .set_settings = atl2_set_settings, + .get_drvinfo = atl2_get_drvinfo, + .get_regs_len = atl2_get_regs_len, + .get_regs = atl2_get_regs, + .get_wol = atl2_get_wol, + .set_wol = atl2_set_wol, + .get_msglevel = atl2_get_msglevel, + .set_msglevel = atl2_set_msglevel, + .nway_reset = atl2_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = atl2_get_eeprom_len, + .get_eeprom = atl2_get_eeprom, + .set_eeprom = atl2_set_eeprom, + .get_tx_csum = atl2_get_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, +#ifdef NETIF_F_TSO + .get_tso = ethtool_op_get_tso, +#endif +}; + +static void atl2_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &atl2_ethtool_ops); +} + +#define LBYTESWAP(a) ((((a) & 0x00ff00ff) << 8) | \ + (((a) & 0xff00ff00) >> 8)) +#define LONGSWAP(a) ((LBYTESWAP(a) << 16) | (LBYTESWAP(a) >> 16)) +#define SHORTSWAP(a) (((a) << 8) | ((a) >> 8)) + +/* + * Reset the transmit and receive units; mask and clear all interrupts. + * + * hw - Struct containing variables accessed by shared code + * return : 0 or idle status (if error) + */ +static s32 atl2_reset_hw(struct atl2_hw *hw) +{ + u32 icr; + u16 pci_cfg_cmd_word; + int i; + + /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */ + atl2_read_pci_cfg(hw, PCI_REG_COMMAND, &pci_cfg_cmd_word); + if ((pci_cfg_cmd_word & + (CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER)) != + (CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER)) { + pci_cfg_cmd_word |= + (CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER); + atl2_write_pci_cfg(hw, PCI_REG_COMMAND, &pci_cfg_cmd_word); + } + + /* Clear Interrupt mask to stop board from generating + * interrupts & Clear any pending interrupt events + */ + /* FIXME */ + /* ATL2_WRITE_REG(hw, REG_IMR, 0); */ + /* ATL2_WRITE_REG(hw, REG_ISR, 0xffffffff); */ + + /* Issue Soft Reset to the MAC. This will reset the chip's + * transmit, receive, DMA. It will not effect + * the current PCI configuration. The global reset bit is self- + * clearing, and should clear within a microsecond. + */ + ATL2_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_SOFT_RST); + wmb(); + msleep(1); /* delay about 1ms */ + + /* Wait at least 10ms for All module to be Idle */ + for (i = 0; i < 10; i++) { + icr = ATL2_READ_REG(hw, REG_IDLE_STATUS); + if (!icr) + break; + msleep(1); /* delay 1 ms */ + cpu_relax(); + } + + if (icr) + return icr; + + return 0; +} + +#define CUSTOM_SPI_CS_SETUP 2 +#define CUSTOM_SPI_CLK_HI 2 +#define CUSTOM_SPI_CLK_LO 2 +#define CUSTOM_SPI_CS_HOLD 2 +#define CUSTOM_SPI_CS_HI 3 + +static struct atl2_spi_flash_dev flash_table[] = +{ +/* MFR WRSR READ PROGRAM WREN WRDI RDSR RDID SECTOR_ERASE CHIP_ERASE */ +{"Atmel", 0x0, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62 }, +{"SST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0x90, 0x20, 0x60 }, +{"ST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0xAB, 0xD8, 0xC7 }, +}; + +static bool atl2_spi_read(struct atl2_hw *hw, u32 addr, u32 *buf) +{ + int i; + u32 value; + + ATL2_WRITE_REG(hw, REG_SPI_DATA, 0); + ATL2_WRITE_REG(hw, REG_SPI_ADDR, addr); + + value = SPI_FLASH_CTRL_WAIT_READY | + (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) << + SPI_FLASH_CTRL_CS_SETUP_SHIFT | + (CUSTOM_SPI_CLK_HI & SPI_FLASH_CTRL_CLK_HI_MASK) << + SPI_FLASH_CTRL_CLK_HI_SHIFT | + (CUSTOM_SPI_CLK_LO & SPI_FLASH_CTRL_CLK_LO_MASK) << + SPI_FLASH_CTRL_CLK_LO_SHIFT | + (CUSTOM_SPI_CS_HOLD & SPI_FLASH_CTRL_CS_HOLD_MASK) << + SPI_FLASH_CTRL_CS_HOLD_SHIFT | + (CUSTOM_SPI_CS_HI & SPI_FLASH_CTRL_CS_HI_MASK) << + SPI_FLASH_CTRL_CS_HI_SHIFT | + (0x1 & SPI_FLASH_CTRL_INS_MASK) << SPI_FLASH_CTRL_INS_SHIFT; + + ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value); + + value |= SPI_FLASH_CTRL_START; + + ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value); + + for (i = 0; i < 10; i++) { + msleep(1); + value = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL); + if (!(value & SPI_FLASH_CTRL_START)) + break; + } + + if (value & SPI_FLASH_CTRL_START) + return false; + + *buf = ATL2_READ_REG(hw, REG_SPI_DATA); + + return true; +} + +/* + * get_permanent_address + * return 0 if get valid mac address, + */ +static int get_permanent_address(struct atl2_hw *hw) +{ + u32 Addr[2]; + u32 i, Control; + u16 Register; + u8 EthAddr[NODE_ADDRESS_SIZE]; + bool KeyValid; + + if (is_valid_ether_addr(hw->perm_mac_addr)) + return 0; + + Addr[0] = 0; + Addr[1] = 0; + + if (!atl2_check_eeprom_exist(hw)) { /* eeprom exists */ + Register = 0; + KeyValid = false; + + /* Read out all EEPROM content */ + i = 0; + while (1) { + if (atl2_read_eeprom(hw, i + 0x100, &Control)) { + if (KeyValid) { + if (Register == REG_MAC_STA_ADDR) + Addr[0] = Control; + else if (Register == + (REG_MAC_STA_ADDR + 4)) + Addr[1] = Control; + KeyValid = false; + } else if ((Control & 0xff) == 0x5A) { + KeyValid = true; + Register = (u16) (Control >> 16); + } else { + /* assume data end while encount an invalid KEYWORD */ + break; + } + } else { + break; /* read error */ + } + i += 4; + } + + *(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]); + *(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *) &Addr[1]); + + if (is_valid_ether_addr(EthAddr)) { + memcpy(hw->perm_mac_addr, EthAddr, NODE_ADDRESS_SIZE); + return 0; + } + return 1; + } + + /* see if SPI flash exists? */ + Addr[0] = 0; + Addr[1] = 0; + Register = 0; + KeyValid = false; + i = 0; + while (1) { + if (atl2_spi_read(hw, i + 0x1f000, &Control)) { + if (KeyValid) { + if (Register == REG_MAC_STA_ADDR) + Addr[0] = Control; + else if (Register == (REG_MAC_STA_ADDR + 4)) + Addr[1] = Control; + KeyValid = false; + } else if ((Control & 0xff) == 0x5A) { + KeyValid = true; + Register = (u16) (Control >> 16); + } else { + break; /* data end */ + } + } else { + break; /* read error */ + } + i += 4; + } + + *(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]); + *(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *)&Addr[1]); + if (is_valid_ether_addr(EthAddr)) { + memcpy(hw->perm_mac_addr, EthAddr, NODE_ADDRESS_SIZE); + return 0; + } + /* maybe MAC-address is from BIOS */ + Addr[0] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR); + Addr[1] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR + 4); + *(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]); + *(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *) &Addr[1]); + + if (is_valid_ether_addr(EthAddr)) { + memcpy(hw->perm_mac_addr, EthAddr, NODE_ADDRESS_SIZE); + return 0; + } + + return 1; +} + +/* + * Reads the adapter's MAC address from the EEPROM + * + * hw - Struct containing variables accessed by shared code + */ +static s32 atl2_read_mac_addr(struct atl2_hw *hw) +{ + u16 i; + + if (get_permanent_address(hw)) { + /* for test */ + /* FIXME: shouldn't we use random_ether_addr() here? */ + hw->perm_mac_addr[0] = 0x00; + hw->perm_mac_addr[1] = 0x13; + hw->perm_mac_addr[2] = 0x74; + hw->perm_mac_addr[3] = 0x00; + hw->perm_mac_addr[4] = 0x5c; + hw->perm_mac_addr[5] = 0x38; + } + + for (i = 0; i < NODE_ADDRESS_SIZE; i++) + hw->mac_addr[i] = hw->perm_mac_addr[i]; + + return 0; +} + +/* + * Hashes an address to determine its location in the multicast table + * + * hw - Struct containing variables accessed by shared code + * mc_addr - the multicast address to hash + * + * atl2_hash_mc_addr + * purpose + * set hash value for a multicast address + * hash calcu processing : + * 1. calcu 32bit CRC for multicast address + * 2. reverse crc with MSB to LSB + */ +static u32 atl2_hash_mc_addr(struct atl2_hw *hw, u8 *mc_addr) +{ + u32 crc32, value; + int i; + + value = 0; + crc32 = ether_crc_le(6, mc_addr); + + for (i = 0; i < 32; i++) + value |= (((crc32 >> i) & 1) << (31 - i)); + + return value; +} + +/* + * Sets the bit in the multicast table corresponding to the hash value. + * + * hw - Struct containing variables accessed by shared code + * hash_value - Multicast address hash value + */ +static void atl2_hash_set(struct atl2_hw *hw, u32 hash_value) +{ + u32 hash_bit, hash_reg; + u32 mta; + + /* The HASH Table is a register array of 2 32-bit registers. + * It is treated like an array of 64 bits. We want to set + * bit BitArray[hash_value]. So we figure out what register + * the bit is in, read it, OR in the new bit, then write + * back the new value. The register is determined by the + * upper 7 bits of the hash value and the bit within that + * register are determined by the lower 5 bits of the value. + */ + hash_reg = (hash_value >> 31) & 0x1; + hash_bit = (hash_value >> 26) & 0x1F; + + mta = ATL2_READ_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg); + + mta |= (1 << hash_bit); + + ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg, mta); +} + +/* + * atl2_init_pcie - init PCIE module + */ +static void atl2_init_pcie(struct atl2_hw *hw) +{ + u32 value; + value = LTSSM_TEST_MODE_DEF; + ATL2_WRITE_REG(hw, REG_LTSSM_TEST_MODE, value); + + value = PCIE_DLL_TX_CTRL1_DEF; + ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, value); +} + +static void atl2_init_flash_opcode(struct atl2_hw *hw) +{ + if (hw->flash_vendor >= ARRAY_SIZE(flash_table)) + hw->flash_vendor = 0; /* ATMEL */ + + /* Init OP table */ + ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_PROGRAM, + flash_table[hw->flash_vendor].cmdPROGRAM); + ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_SC_ERASE, + flash_table[hw->flash_vendor].cmdSECTOR_ERASE); + ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_CHIP_ERASE, + flash_table[hw->flash_vendor].cmdCHIP_ERASE); + ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_RDID, + flash_table[hw->flash_vendor].cmdRDID); + ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_WREN, + flash_table[hw->flash_vendor].cmdWREN); + ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_RDSR, + flash_table[hw->flash_vendor].cmdRDSR); + ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_WRSR, + flash_table[hw->flash_vendor].cmdWRSR); + ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_READ, + flash_table[hw->flash_vendor].cmdREAD); +} + +/******************************************************************** +* Performs basic configuration of the adapter. +* +* hw - Struct containing variables accessed by shared code +* Assumes that the controller has previously been reset and is in a +* post-reset uninitialized state. Initializes multicast table, +* and Calls routines to setup link +* Leaves the transmit and receive units disabled and uninitialized. +********************************************************************/ +static s32 atl2_init_hw(struct atl2_hw *hw) +{ + u32 ret_val = 0; + + atl2_init_pcie(hw); + + /* Zero out the Multicast HASH table */ + /* clear the old settings from the multicast hash table */ + ATL2_WRITE_REG(hw, REG_RX_HASH_TABLE, 0); + ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); + + atl2_init_flash_opcode(hw); + + ret_val = atl2_phy_init(hw); + + return ret_val; +} + +/* + * Detects the current speed and duplex settings of the hardware. + * + * hw - Struct containing variables accessed by shared code + * speed - Speed of the connection + * duplex - Duplex setting of the connection + */ +static s32 atl2_get_speed_and_duplex(struct atl2_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 ret_val; + u16 phy_data; + + /* Read PHY Specific Status Register (17) */ + ret_val = atl2_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data); + if (ret_val) + return ret_val; + + if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED)) + return ATLX_ERR_PHY_RES; + + switch (phy_data & MII_ATLX_PSSR_SPEED) { + case MII_ATLX_PSSR_100MBS: + *speed = SPEED_100; + break; + case MII_ATLX_PSSR_10MBS: + *speed = SPEED_10; + break; + default: + return ATLX_ERR_PHY_SPEED; + break; + } + + if (phy_data & MII_ATLX_PSSR_DPLX) + *duplex = FULL_DUPLEX; + else + *duplex = HALF_DUPLEX; + + return 0; +} + +/* + * Reads the value from a PHY register + * hw - Struct containing variables accessed by shared code + * reg_addr - address of the PHY register to read + */ +static s32 atl2_read_phy_reg(struct atl2_hw *hw, u16 reg_addr, u16 *phy_data) +{ + u32 val; + int i; + + val = ((u32)(reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT | + MDIO_START | + MDIO_SUP_PREAMBLE | + MDIO_RW | + MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; + ATL2_WRITE_REG(hw, REG_MDIO_CTRL, val); + + wmb(); + + for (i = 0; i < MDIO_WAIT_TIMES; i++) { + udelay(2); + val = ATL2_READ_REG(hw, REG_MDIO_CTRL); + if (!(val & (MDIO_START | MDIO_BUSY))) + break; + wmb(); + } + if (!(val & (MDIO_START | MDIO_BUSY))) { + *phy_data = (u16)val; + return 0; + } + + return ATLX_ERR_PHY; +} + +/* + * Writes a value to a PHY register + * hw - Struct containing variables accessed by shared code + * reg_addr - address of the PHY register to write + * data - data to write to the PHY + */ +static s32 atl2_write_phy_reg(struct atl2_hw *hw, u32 reg_addr, u16 phy_data) +{ + int i; + u32 val; + + val = ((u32)(phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT | + (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT | + MDIO_SUP_PREAMBLE | + MDIO_START | + MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; + ATL2_WRITE_REG(hw, REG_MDIO_CTRL, val); + + wmb(); + + for (i = 0; i < MDIO_WAIT_TIMES; i++) { + udelay(2); + val = ATL2_READ_REG(hw, REG_MDIO_CTRL); + if (!(val & (MDIO_START | MDIO_BUSY))) + break; + + wmb(); + } + + if (!(val & (MDIO_START | MDIO_BUSY))) + return 0; + + return ATLX_ERR_PHY; +} + +/* + * Configures PHY autoneg and flow control advertisement settings + * + * hw - Struct containing variables accessed by shared code + */ +static s32 atl2_phy_setup_autoneg_adv(struct atl2_hw *hw) +{ + s32 ret_val; + s16 mii_autoneg_adv_reg; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK; + + /* Need to parse autoneg_advertised and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). */ + mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK; + + /* Need to parse MediaType and setup the + * appropriate PHY registers. */ + switch (hw->MediaType) { + case MEDIA_TYPE_AUTO_SENSOR: + mii_autoneg_adv_reg |= + (MII_AR_10T_HD_CAPS | + MII_AR_10T_FD_CAPS | + MII_AR_100TX_HD_CAPS| + MII_AR_100TX_FD_CAPS); + hw->autoneg_advertised = + ADVERTISE_10_HALF | + ADVERTISE_10_FULL | + ADVERTISE_100_HALF| + ADVERTISE_100_FULL; + break; + case MEDIA_TYPE_100M_FULL: + mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS; + hw->autoneg_advertised = ADVERTISE_100_FULL; + break; + case MEDIA_TYPE_100M_HALF: + mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS; + hw->autoneg_advertised = ADVERTISE_100_HALF; + break; + case MEDIA_TYPE_10M_FULL: + mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS; + hw->autoneg_advertised = ADVERTISE_10_FULL; + break; + default: + mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS; + hw->autoneg_advertised = ADVERTISE_10_HALF; + break; + } + + /* flow control fixed to enable all */ + mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE); + + hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg; + + ret_val = atl2_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg); + + if (ret_val) + return ret_val; + + return 0; +} + +/* + * Resets the PHY and make all config validate + * + * hw - Struct containing variables accessed by shared code + * + * Sets bit 15 and 12 of the MII Control regiser (for F001 bug) + */ +static s32 atl2_phy_commit(struct atl2_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG; + ret_val = atl2_write_phy_reg(hw, MII_BMCR, phy_data); + if (ret_val) { + u32 val; + int i; + /* pcie serdes link may be down ! */ + for (i = 0; i < 25; i++) { + msleep(1); + val = ATL2_READ_REG(hw, REG_MDIO_CTRL); + if (!(val & (MDIO_START | MDIO_BUSY))) + break; + } + + if (0 != (val & (MDIO_START | MDIO_BUSY))) { + printk(KERN_ERR "atl2: PCIe link down for at least 25ms !\n"); + return ret_val; + } + } + return 0; +} + +static s32 atl2_phy_init(struct atl2_hw *hw) +{ + s32 ret_val; + u16 phy_val; + + if (hw->phy_configured) + return 0; + + /* Enable PHY */ + ATL2_WRITE_REGW(hw, REG_PHY_ENABLE, 1); + ATL2_WRITE_FLUSH(hw); + msleep(1); + + /* check if the PHY is in powersaving mode */ + atl2_write_phy_reg(hw, MII_DBG_ADDR, 0); + atl2_read_phy_reg(hw, MII_DBG_DATA, &phy_val); + + /* 024E / 124E 0r 0274 / 1274 ? */ + if (phy_val & 0x1000) { + phy_val &= ~0x1000; + atl2_write_phy_reg(hw, MII_DBG_DATA, phy_val); + } + + msleep(1); + + /*Enable PHY LinkChange Interrupt */ + ret_val = atl2_write_phy_reg(hw, 18, 0xC00); + if (ret_val) + return ret_val; + + /* setup AutoNeg parameters */ + ret_val = atl2_phy_setup_autoneg_adv(hw); + if (ret_val) + return ret_val; + + /* SW.Reset & En-Auto-Neg to restart Auto-Neg */ + ret_val = atl2_phy_commit(hw); + if (ret_val) + return ret_val; + + hw->phy_configured = true; + + return ret_val; +} + +static void atl2_set_mac_addr(struct atl2_hw *hw) +{ + u32 value; + /* 00-0B-6A-F6-00-DC + * 0: 6AF600DC 1: 000B + * low dword */ + value = (((u32)hw->mac_addr[2]) << 24) | + (((u32)hw->mac_addr[3]) << 16) | + (((u32)hw->mac_addr[4]) << 8) | + (((u32)hw->mac_addr[5])); + ATL2_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value); + /* hight dword */ + value = (((u32)hw->mac_addr[0]) << 8) | + (((u32)hw->mac_addr[1])); + ATL2_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value); +} + +/* + * check_eeprom_exist + * return 0 if eeprom exist + */ +static int atl2_check_eeprom_exist(struct atl2_hw *hw) +{ + u32 value; + + value = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL); + if (value & SPI_FLASH_CTRL_EN_VPD) { + value &= ~SPI_FLASH_CTRL_EN_VPD; + ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value); + } + value = ATL2_READ_REGW(hw, REG_PCIE_CAP_LIST); + return ((value & 0xFF00) == 0x6C00) ? 0 : 1; +} + +/* FIXME: This doesn't look right. -- CHS */ +static bool atl2_write_eeprom(struct atl2_hw *hw, u32 offset, u32 value) +{ + return true; +} + +static bool atl2_read_eeprom(struct atl2_hw *hw, u32 Offset, u32 *pValue) +{ + int i; + u32 Control; + + if (Offset & 0x3) + return false; /* address do not align */ + + ATL2_WRITE_REG(hw, REG_VPD_DATA, 0); + Control = (Offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT; + ATL2_WRITE_REG(hw, REG_VPD_CAP, Control); + + for (i = 0; i < 10; i++) { + msleep(2); + Control = ATL2_READ_REG(hw, REG_VPD_CAP); + if (Control & VPD_CAP_VPD_FLAG) + break; + } + + if (Control & VPD_CAP_VPD_FLAG) { + *pValue = ATL2_READ_REG(hw, REG_VPD_DATA); + return true; + } + return false; /* timeout */ +} + +static void atl2_force_ps(struct atl2_hw *hw) +{ + u16 phy_val; + + atl2_write_phy_reg(hw, MII_DBG_ADDR, 0); + atl2_read_phy_reg(hw, MII_DBG_DATA, &phy_val); + atl2_write_phy_reg(hw, MII_DBG_DATA, phy_val | 0x1000); + + atl2_write_phy_reg(hw, MII_DBG_ADDR, 2); + atl2_write_phy_reg(hw, MII_DBG_DATA, 0x3000); + atl2_write_phy_reg(hw, MII_DBG_ADDR, 3); + atl2_write_phy_reg(hw, MII_DBG_DATA, 0); +} + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ +#define ATL2_MAX_NIC 4 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ +#define ATL2_PARAM_INIT {[0 ... ATL2_MAX_NIC] = OPTION_UNSET} +#ifndef module_param_array +/* Module Parameters are always initialized to -1, so that the driver + * can tell the difference between no user specified value or the + * user asking for the default value. + * The true default values are loaded in when atl2_check_options is called. + * + * This is a GCC extension to ANSI C. + * See the item "Labeled Elements in Initializers" in the section + * "Extensions to the C Language Family" of the GCC documentation. + */ + +#define ATL2_PARAM(X, desc) \ + static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \ + MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \ + MODULE_PARM_DESC(X, desc); +#else +#define ATL2_PARAM(X, desc) \ + static int __devinitdata X[ATL2_MAX_NIC+1] = ATL2_PARAM_INIT; \ + static unsigned int num_##X; \ + module_param_array_named(X, X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); +#endif + +/* + * Transmit Memory Size + * Valid Range: 64-2048 + * Default Value: 128 + */ +#define ATL2_MIN_TX_MEMSIZE 4 /* 4KB */ +#define ATL2_MAX_TX_MEMSIZE 64 /* 64KB */ +#define ATL2_DEFAULT_TX_MEMSIZE 8 /* 8KB */ +ATL2_PARAM(TxMemSize, "Bytes of Transmit Memory"); + +/* + * Receive Memory Block Count + * Valid Range: 16-512 + * Default Value: 128 + */ +#define ATL2_MIN_RXD_COUNT 16 +#define ATL2_MAX_RXD_COUNT 512 +#define ATL2_DEFAULT_RXD_COUNT 64 +ATL2_PARAM(RxMemBlock, "Number of receive memory block"); + +/* + * User Specified MediaType Override + * + * Valid Range: 0-5 + * - 0 - auto-negotiate at all supported speeds + * - 1 - only link at 1000Mbps Full Duplex + * - 2 - only link at 100Mbps Full Duplex + * - 3 - only link at 100Mbps Half Duplex + * - 4 - only link at 10Mbps Full Duplex + * - 5 - only link at 10Mbps Half Duplex + * Default Value: 0 + */ +ATL2_PARAM(MediaType, "MediaType Select"); + +/* + * Interrupt Moderate Timer in units of 2048 ns (~2 us) + * Valid Range: 10-65535 + * Default Value: 45000(90ms) + */ +#define INT_MOD_DEFAULT_CNT 100 /* 200us */ +#define INT_MOD_MAX_CNT 65000 +#define INT_MOD_MIN_CNT 50 +ATL2_PARAM(IntModTimer, "Interrupt Moderator Timer"); + +/* + * FlashVendor + * Valid Range: 0-2 + * 0 - Atmel + * 1 - SST + * 2 - ST + */ +ATL2_PARAM(FlashVendor, "SPI Flash Vendor"); + +#define AUTONEG_ADV_DEFAULT 0x2F +#define AUTONEG_ADV_MASK 0x2F +#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL + +#define FLASH_VENDOR_DEFAULT 0 +#define FLASH_VENDOR_MIN 0 +#define FLASH_VENDOR_MAX 2 + +struct atl2_option { + enum { enable_option, range_option, list_option } type; + char *name; + char *err; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + struct atl2_opt_list { int i; char *str; } *p; + } l; + } arg; +}; + +static int __devinit atl2_validate_option(int *value, struct atl2_option *opt) +{ + int i; + struct atl2_opt_list *ent; + + if (*value == OPTION_UNSET) { + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + printk(KERN_INFO "%s Enabled\n", opt->name); + return 0; + break; + case OPTION_DISABLED: + printk(KERN_INFO "%s Disabled\n", opt->name); + return 0; + break; + } + break; + case range_option: + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { + printk(KERN_INFO "%s set to %i\n", opt->name, *value); + return 0; + } + break; + case list_option: + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + printk(KERN_INFO "%s\n", ent->str); + return 0; + } + } + break; + default: + BUG(); + } + + printk(KERN_INFO "Invalid %s specified (%i) %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return -1; +} + +/* + * atl2_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + */ +static void __devinit atl2_check_options(struct atl2_adapter *adapter) +{ + int val; + struct atl2_option opt; + int bd = adapter->bd_number; + if (bd >= ATL2_MAX_NIC) { + printk(KERN_NOTICE "Warning: no configuration for board #%i\n", + bd); + printk(KERN_NOTICE "Using defaults for all values\n"); +#ifndef module_param_array + bd = ATL2_MAX_NIC; +#endif + } + + /* Bytes of Transmit Memory */ + opt.type = range_option; + opt.name = "Bytes of Transmit Memory"; + opt.err = "using default of " __MODULE_STRING(ATL2_DEFAULT_TX_MEMSIZE); + opt.def = ATL2_DEFAULT_TX_MEMSIZE; + opt.arg.r.min = ATL2_MIN_TX_MEMSIZE; + opt.arg.r.max = ATL2_MAX_TX_MEMSIZE; +#ifdef module_param_array + if (num_TxMemSize > bd) { +#endif + val = TxMemSize[bd]; + atl2_validate_option(&val, &opt); + adapter->txd_ring_size = ((u32) val) * 1024; +#ifdef module_param_array + } else + adapter->txd_ring_size = ((u32)opt.def) * 1024; +#endif + /* txs ring size: */ + adapter->txs_ring_size = adapter->txd_ring_size / 128; + if (adapter->txs_ring_size > 160) + adapter->txs_ring_size = 160; + + /* Receive Memory Block Count */ + opt.type = range_option; + opt.name = "Number of receive memory block"; + opt.err = "using default of " __MODULE_STRING(ATL2_DEFAULT_RXD_COUNT); + opt.def = ATL2_DEFAULT_RXD_COUNT; + opt.arg.r.min = ATL2_MIN_RXD_COUNT; + opt.arg.r.max = ATL2_MAX_RXD_COUNT; +#ifdef module_param_array + if (num_RxMemBlock > bd) { +#endif + val = RxMemBlock[bd]; + atl2_validate_option(&val, &opt); + adapter->rxd_ring_size = (u32)val; + /* FIXME */ + /* ((u16)val)&~1; */ /* even number */ +#ifdef module_param_array + } else + adapter->rxd_ring_size = (u32)opt.def; +#endif + /* init RXD Flow control value */ + adapter->hw.fc_rxd_hi = (adapter->rxd_ring_size / 8) * 7; + adapter->hw.fc_rxd_lo = (ATL2_MIN_RXD_COUNT / 8) > + (adapter->rxd_ring_size / 12) ? (ATL2_MIN_RXD_COUNT / 8) : + (adapter->rxd_ring_size / 12); + + /* Interrupt Moderate Timer */ + opt.type = range_option; + opt.name = "Interrupt Moderate Timer"; + opt.err = "using default of " __MODULE_STRING(INT_MOD_DEFAULT_CNT); + opt.def = INT_MOD_DEFAULT_CNT; + opt.arg.r.min = INT_MOD_MIN_CNT; + opt.arg.r.max = INT_MOD_MAX_CNT; +#ifdef module_param_array + if (num_IntModTimer > bd) { +#endif + val = IntModTimer[bd]; + atl2_validate_option(&val, &opt); + adapter->imt = (u16) val; +#ifdef module_param_array + } else + adapter->imt = (u16)(opt.def); +#endif + /* Flash Vendor */ + opt.type = range_option; + opt.name = "SPI Flash Vendor"; + opt.err = "using default of " __MODULE_STRING(FLASH_VENDOR_DEFAULT); + opt.def = FLASH_VENDOR_DEFAULT; + opt.arg.r.min = FLASH_VENDOR_MIN; + opt.arg.r.max = FLASH_VENDOR_MAX; +#ifdef module_param_array + if (num_FlashVendor > bd) { +#endif + val = FlashVendor[bd]; + atl2_validate_option(&val, &opt); + adapter->hw.flash_vendor = (u8) val; +#ifdef module_param_array + } else + adapter->hw.flash_vendor = (u8)(opt.def); +#endif + /* MediaType */ + opt.type = range_option; + opt.name = "Speed/Duplex Selection"; + opt.err = "using default of " __MODULE_STRING(MEDIA_TYPE_AUTO_SENSOR); + opt.def = MEDIA_TYPE_AUTO_SENSOR; + opt.arg.r.min = MEDIA_TYPE_AUTO_SENSOR; + opt.arg.r.max = MEDIA_TYPE_10M_HALF; +#ifdef module_param_array + if (num_MediaType > bd) { +#endif + val = MediaType[bd]; + atl2_validate_option(&val, &opt); + adapter->hw.MediaType = (u16) val; +#ifdef module_param_array + } else + adapter->hw.MediaType = (u16)(opt.def); +#endif +} diff --git a/drivers/net/atlx/atl2.h b/drivers/net/atlx/atl2.h new file mode 100644 index 0000000..927e4de --- /dev/null +++ b/drivers/net/atlx/atl2.h @@ -0,0 +1,528 @@ +/* atl2.h -- atl2 driver definitions + * + * Copyright(c) 2007 Atheros Corporation. All rights reserved. + * Copyright(c) 2006 xiong huang + * Copyright(c) 2007 Chris Snook + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _ATL2_H_ +#define _ATL2_H_ + +#include +#include + +#ifndef _ATL2_HW_H_ +#define _ATL2_HW_H_ + +#ifndef _ATL2_OSDEP_H_ +#define _ATL2_OSDEP_H_ + +#include +#include +#include +#include + +#include "atlx.h" + +#ifdef ETHTOOL_OPS_COMPAT +extern int ethtool_ioctl(struct ifreq *ifr); +#endif + +#define PCI_COMMAND_REGISTER PCI_COMMAND +#define CMD_MEM_WRT_INVALIDATE PCI_COMMAND_INVALIDATE +#define ETH_ADDR_LEN ETH_ALEN + +#define ATL2_WRITE_REG(a, reg, value) (iowrite32((value), \ + ((a)->hw_addr + (reg)))) + +#define ATL2_WRITE_FLUSH(a) (ioread32((a)->hw_addr)) + +#define ATL2_READ_REG(a, reg) (ioread32((a)->hw_addr + (reg))) + +#define ATL2_WRITE_REGB(a, reg, value) (iowrite8((value), \ + ((a)->hw_addr + (reg)))) + +#define ATL2_READ_REGB(a, reg) (ioread8((a)->hw_addr + (reg))) + +#define ATL2_WRITE_REGW(a, reg, value) (iowrite16((value), \ + ((a)->hw_addr + (reg)))) + +#define ATL2_READ_REGW(a, reg) (ioread16((a)->hw_addr + (reg))) + +#define ATL2_WRITE_REG_ARRAY(a, reg, offset, value) \ + (iowrite32((value), (((a)->hw_addr + (reg)) + ((offset) << 2)))) + +#define ATL2_READ_REG_ARRAY(a, reg, offset) \ + (ioread32(((a)->hw_addr + (reg)) + ((offset) << 2))) + +#endif /* _ATL2_OSDEP_H_ */ + +struct atl2_adapter; +struct atl2_hw; + +/* function prototype */ +static s32 atl2_reset_hw(struct atl2_hw *hw); +static s32 atl2_read_mac_addr(struct atl2_hw *hw); +static s32 atl2_init_hw(struct atl2_hw *hw); +static s32 atl2_get_speed_and_duplex(struct atl2_hw *hw, u16 *speed, + u16 *duplex); +static u32 atl2_hash_mc_addr(struct atl2_hw *hw, u8 *mc_addr); +static void atl2_hash_set(struct atl2_hw *hw, u32 hash_value); +static s32 atl2_read_phy_reg(struct atl2_hw *hw, u16 reg_addr, u16 *phy_data); +static s32 atl2_write_phy_reg(struct atl2_hw *hw, u32 reg_addr, u16 phy_data); +static void atl2_read_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value); +static void atl2_write_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value); +static void atl2_set_mac_addr(struct atl2_hw *hw); +static bool atl2_read_eeprom(struct atl2_hw *hw, u32 Offset, u32 *pValue); +static bool atl2_write_eeprom(struct atl2_hw *hw, u32 offset, u32 value); +static s32 atl2_phy_init(struct atl2_hw *hw); +static int atl2_check_eeprom_exist(struct atl2_hw *hw); +static void atl2_force_ps(struct atl2_hw *hw); + +/* register definition */ + +/* Block IDLE Status Register */ +#define IDLE_STATUS_RXMAC 1 /* 1: RXMAC is non-IDLE */ +#define IDLE_STATUS_TXMAC 2 /* 1: TXMAC is non-IDLE */ +#define IDLE_STATUS_DMAR 8 /* 1: DMAR is non-IDLE */ +#define IDLE_STATUS_DMAW 4 /* 1: DMAW is non-IDLE */ + +/* MDIO Control Register */ +#define MDIO_WAIT_TIMES 10 + +/* MAC Control Register */ +#define MAC_CTRL_DBG_TX_BKPRESURE 0x100000 /* 1: TX max backoff */ +#define MAC_CTRL_MACLP_CLK_PHY 0x8000000 /* 1: 25MHz from phy */ +#define MAC_CTRL_HALF_LEFT_BUF_SHIFT 28 +#define MAC_CTRL_HALF_LEFT_BUF_MASK 0xF /* MAC retry buf x32B */ + +/* Internal SRAM Partition Register */ +#define REG_SRAM_TXRAM_END 0x1500 /* Internal tail address of TXRAM + * default: 2byte*1024 */ +#define REG_SRAM_RXRAM_END 0x1502 /* Internal tail address of RXRAM + * default: 2byte*1024 */ + +/* Descriptor Control register */ +#define REG_TXD_BASE_ADDR_LO 0x1544 /* The base address of the Transmit + * Data Mem low 32-bit(dword align) */ +#define REG_TXD_MEM_SIZE 0x1548 /* Transmit Data Memory size(by + * double word , max 256KB) */ +#define REG_TXS_BASE_ADDR_LO 0x154C /* The base address of the Transmit + * Status Memory low 32-bit(dword word + * align) */ +#define REG_TXS_MEM_SIZE 0x1550 /* double word unit, max 4*2047 + * bytes. */ +#define REG_RXD_BASE_ADDR_LO 0x1554 /* The base address of the Transmit + * Status Memory low 32-bit(unit 8 + * bytes) */ +#define REG_RXD_BUF_NUM 0x1558 /* Receive Data & Status Memory buffer + * number (unit 1536bytes, max + * 1536*2047) */ + +/* DMAR Control Register */ +#define REG_DMAR 0x1580 +#define DMAR_EN 0x1 /* 1: Enable DMAR */ + +/* TX Cur-Through (early tx threshold) Control Register */ +#define REG_TX_CUT_THRESH 0x1590 /* TxMac begin transmit packet + * threshold(unit word) */ + +/* DMAW Control Register */ +#define REG_DMAW 0x15A0 +#define DMAW_EN 0x1 + +/* Flow control register */ +#define REG_PAUSE_ON_TH 0x15A8 /* RXD high watermark of overflow + * threshold configuration register */ +#define REG_PAUSE_OFF_TH 0x15AA /* RXD lower watermark of overflow + * threshold configuration register */ + +/* Mailbox Register */ +#define REG_MB_TXD_WR_IDX 0x15f0 /* double word align */ +#define REG_MB_RXD_RD_IDX 0x15F4 /* RXD Read index (unit: 1536byets) */ + +/* Interrupt Status Register */ +#define ISR_TIMER 1 /* Interrupt when Timer counts down to zero */ +#define ISR_MANUAL 2 /* Software manual interrupt, for debug. Set + * when SW_MAN_INT_EN is set in Table 51 + * Selene Master Control Register + * (Offset 0x1400). */ +#define ISR_RXF_OV 4 /* RXF overflow interrupt */ +#define ISR_TXF_UR 8 /* TXF underrun interrupt */ +#define ISR_TXS_OV 0x10 /* Internal transmit status buffer full + * interrupt */ +#define ISR_RXS_OV 0x20 /* Internal receive status buffer full + * interrupt */ +#define ISR_LINK_CHG 0x40 /* Link Status Change Interrupt */ +#define ISR_HOST_TXD_UR 0x80 +#define ISR_HOST_RXD_OV 0x100 /* Host rx data memory full , one pulse */ +#define ISR_DMAR_TO_RST 0x200 /* DMAR op timeout interrupt. SW should + * do Reset */ +#define ISR_DMAW_TO_RST 0x400 +#define ISR_PHY 0x800 /* phy interrupt */ +#define ISR_TS_UPDATE 0x10000 /* interrupt after new tx pkt status written + * to host */ +#define ISR_RS_UPDATE 0x20000 /* interrupt ater new rx pkt status written + * to host. */ +#define ISR_TX_EARLY 0x40000 /* interrupt when txmac begin transmit one + * packet */ + +#define ISR_TX_EVENT (ISR_TXF_UR | ISR_TXS_OV | ISR_HOST_TXD_UR |\ + ISR_TS_UPDATE | ISR_TX_EARLY) +#define ISR_RX_EVENT (ISR_RXF_OV | ISR_RXS_OV | ISR_HOST_RXD_OV |\ + ISR_RS_UPDATE) + +#define IMR_NORMAL_MASK (\ + /*ISR_LINK_CHG |*/\ + ISR_MANUAL |\ + ISR_DMAR_TO_RST |\ + ISR_DMAW_TO_RST |\ + ISR_PHY |\ + ISR_PHY_LINKDOWN |\ + ISR_TS_UPDATE |\ + ISR_RS_UPDATE) + +/* Receive MAC Statistics Registers */ +#define REG_STS_RX_PAUSE 0x1700 /* Num pause packets received */ +#define REG_STS_RXD_OV 0x1704 /* Num frames dropped due to RX + * FIFO overflow */ +#define REG_STS_RXS_OV 0x1708 /* Num frames dropped due to RX + * Status Buffer Overflow */ +#define REG_STS_RX_FILTER 0x170C /* Num packets dropped due to + * address filtering */ + +/* MII definitions */ + +/* PHY Common Register */ +#define MII_SMARTSPEED 0x14 +#define MII_DBG_ADDR 0x1D +#define MII_DBG_DATA 0x1E + +/* PCI Command Register Bit Definitions */ +#define PCI_REG_COMMAND 0x04 +#define CMD_IO_SPACE 0x0001 +#define CMD_MEMORY_SPACE 0x0002 +#define CMD_BUS_MASTER 0x0004 + +#define MEDIA_TYPE_100M_FULL 1 +#define MEDIA_TYPE_100M_HALF 2 +#define MEDIA_TYPE_10M_FULL 3 +#define MEDIA_TYPE_10M_HALF 4 + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x000F /* Everything */ + +/* The size (in bytes) of a ethernet packet */ +#define ENET_HEADER_SIZE 14 +#define MAXIMUM_ETHERNET_FRAME_SIZE 1518 /* with FCS */ +#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* with FCS */ +#define ETHERNET_FCS_SIZE 4 +#define MAX_JUMBO_FRAME_SIZE 0x2000 +#define VLAN_SIZE 4 + +struct tx_pkt_header { + unsigned pkt_size:11; + unsigned:4; /* reserved */ + unsigned ins_vlan:1; /* txmac should insert vlan */ + unsigned short vlan; /* vlan tag */ +}; +/* FIXME: replace above bitfields with MASK/SHIFT defines below */ +#define TX_PKT_HEADER_SIZE_MASK 0x7FF +#define TX_PKT_HEADER_SIZE_SHIFT 0 +#define TX_PKT_HEADER_INS_VLAN_MASK 0x1 +#define TX_PKT_HEADER_INS_VLAN_SHIFT 15 +#define TX_PKT_HEADER_VLAN_TAG_MASK 0xFFFF +#define TX_PKT_HEADER_VLAN_TAG_SHIFT 16 + +struct tx_pkt_status { + unsigned pkt_size:11; + unsigned:5; /* reserved */ + unsigned ok:1; /* current packet transmitted without error */ + unsigned bcast:1; /* broadcast packet */ + unsigned mcast:1; /* multicast packet */ + unsigned pause:1; /* transmiited a pause frame */ + unsigned ctrl:1; + unsigned defer:1; /* current packet is xmitted with defer */ + unsigned exc_defer:1; + unsigned single_col:1; + unsigned multi_col:1; + unsigned late_col:1; + unsigned abort_col:1; + unsigned underun:1; /* current packet is aborted + * due to txram underrun */ + unsigned:3; /* reserved */ + unsigned update:1; /* always 1'b1 in tx_status_buf */ +}; +/* FIXME: replace above bitfields with MASK/SHIFT defines below */ +#define TX_PKT_STATUS_SIZE_MASK 0x7FF +#define TX_PKT_STATUS_SIZE_SHIFT 0 +#define TX_PKT_STATUS_OK_MASK 0x1 +#define TX_PKT_STATUS_OK_SHIFT 16 +#define TX_PKT_STATUS_BCAST_MASK 0x1 +#define TX_PKT_STATUS_BCAST_SHIFT 17 +#define TX_PKT_STATUS_MCAST_MASK 0x1 +#define TX_PKT_STATUS_MCAST_SHIFT 18 +#define TX_PKT_STATUS_PAUSE_MASK 0x1 +#define TX_PKT_STATUS_PAUSE_SHIFT 19 +#define TX_PKT_STATUS_CTRL_MASK 0x1 +#define TX_PKT_STATUS_CTRL_SHIFT 20 +#define TX_PKT_STATUS_DEFER_MASK 0x1 +#define TX_PKT_STATUS_DEFER_SHIFT 21 +#define TX_PKT_STATUS_EXC_DEFER_MASK 0x1 +#define TX_PKT_STATUS_EXC_DEFER_SHIFT 22 +#define TX_PKT_STATUS_SINGLE_COL_MASK 0x1 +#define TX_PKT_STATUS_SINGLE_COL_SHIFT 23 +#define TX_PKT_STATUS_MULTI_COL_MASK 0x1 +#define TX_PKT_STATUS_MULTI_COL_SHIFT 24 +#define TX_PKT_STATUS_LATE_COL_MASK 0x1 +#define TX_PKT_STATUS_LATE_COL_SHIFT 25 +#define TX_PKT_STATUS_ABORT_COL_MASK 0x1 +#define TX_PKT_STATUS_ABORT_COL_SHIFT 26 +#define TX_PKT_STATUS_UNDERRUN_MASK 0x1 +#define TX_PKT_STATUS_UNDERRUN_SHIFT 27 +#define TX_PKT_STATUS_UPDATE_MASK 0x1 +#define TX_PKT_STATUS_UPDATE_SHIFT 31 + +struct rx_pkt_status { + unsigned pkt_size:11; /* packet size, max 2047 bytes */ + unsigned:5; /* reserved */ + unsigned ok:1; /* current packet received ok without error */ + unsigned bcast:1; /* current packet is broadcast */ + unsigned mcast:1; /* current packet is multicast */ + unsigned pause:1; + unsigned ctrl:1; + unsigned crc:1; /* received a packet with crc error */ + unsigned code:1; /* received a packet with code error */ + unsigned runt:1; /* received a packet less than 64 bytes + * with good crc */ + unsigned frag:1; /* received a packet less than 64 bytes + * with bad crc */ + unsigned trunc:1; /* current frame truncated due to rxram full */ + unsigned align:1; /* this packet is alignment error */ + unsigned vlan:1; /* this packet has vlan */ + unsigned:3; /* reserved */ + unsigned update:1; + unsigned short vtag; /* vlan tag */ + unsigned:16; +}; +/* FIXME: replace above bitfields with MASK/SHIFT defines below */ +#define RX_PKT_STATUS_SIZE_MASK 0x7FF +#define RX_PKT_STATUS_SIZE_SHIFT 0 +#define RX_PKT_STATUS_OK_MASK 0x1 +#define RX_PKT_STATUS_OK_SHIFT 16 +#define RX_PKT_STATUS_BCAST_MASK 0x1 +#define RX_PKT_STATUS_BCAST_SHIFT 17 +#define RX_PKT_STATUS_MCAST_MASK 0x1 +#define RX_PKT_STATUS_MCAST_SHIFT 18 +#define RX_PKT_STATUS_PAUSE_MASK 0x1 +#define RX_PKT_STATUS_PAUSE_SHIFT 19 +#define RX_PKT_STATUS_CTRL_MASK 0x1 +#define RX_PKT_STATUS_CTRL_SHIFT 20 +#define RX_PKT_STATUS_CRC_MASK 0x1 +#define RX_PKT_STATUS_CRC_SHIFT 21 +#define RX_PKT_STATUS_CODE_MASK 0x1 +#define RX_PKT_STATUS_CODE_SHIFT 22 +#define RX_PKT_STATUS_RUNT_MASK 0x1 +#define RX_PKT_STATUS_RUNT_SHIFT 23 +#define RX_PKT_STATUS_FRAG_MASK 0x1 +#define RX_PKT_STATUS_FRAG_SHIFT 24 +#define RX_PKT_STATUS_TRUNK_MASK 0x1 +#define RX_PKT_STATUS_TRUNK_SHIFT 25 +#define RX_PKT_STATUS_ALIGN_MASK 0x1 +#define RX_PKT_STATUS_ALIGN_SHIFT 26 +#define RX_PKT_STATUS_VLAN_MASK 0x1 +#define RX_PKT_STATUS_VLAN_SHIFT 27 +#define RX_PKT_STATUS_UPDATE_MASK 0x1 +#define RX_PKT_STATUS_UPDATE_SHIFT 31 +#define RX_PKT_STATUS_VLAN_TAG_MASK 0xFFFF +#define RX_PKT_STATUS_VLAN_TAG_SHIFT 32 + +struct rx_desc { + struct rx_pkt_status status; + unsigned char packet[1536-sizeof(struct rx_pkt_status)]; +}; + +enum atl2_speed_duplex { + atl2_10_half = 0, + atl2_10_full = 1, + atl2_100_half = 2, + atl2_100_full = 3 +}; + +struct atl2_spi_flash_dev { + const char *manu_name; /* manufacturer id */ + /* op-code */ + u8 cmdWRSR; + u8 cmdREAD; + u8 cmdPROGRAM; + u8 cmdWREN; + u8 cmdWRDI; + u8 cmdRDSR; + u8 cmdRDID; + u8 cmdSECTOR_ERASE; + u8 cmdCHIP_ERASE; +}; + +/* Structure containing variables used by the shared code (atl2_hw.c) */ +struct atl2_hw { + u8 __iomem *hw_addr; + void *back; + + u8 preamble_len; + u8 max_retry; /* Retransmission maximum, afterwards the + * packet will be discarded. */ + u8 jam_ipg; /* IPG to start JAM for collision based flow + * control in half-duplex mode. In unit of + * 8-bit time. */ + u8 ipgt; /* Desired back to back inter-packet gap. The + * default is 96-bit time. */ + u8 min_ifg; /* Minimum number of IFG to enforce in between + * RX frames. Frame gap below such IFP is + * dropped. */ + u8 ipgr1; /* 64bit Carrier-Sense window */ + u8 ipgr2; /* 96-bit IPG window */ + u8 retry_buf; /* When half-duplex mode, should hold some + * bytes for mac retry . (8*4bytes unit) */ + + u16 fc_rxd_hi; + u16 fc_rxd_lo; + u16 lcol; /* Collision Window */ + u16 max_frame_size; + + u16 MediaType; + u16 autoneg_advertised; + u16 pci_cmd_word; + + u16 mii_autoneg_adv_reg; + + u32 mem_rang; + u32 txcw; + u32 mc_filter_type; + u32 num_mc_addrs; + u32 collision_delta; + u32 tx_packet_delta; + u16 phy_spd_default; + + u16 device_id; + u16 vendor_id; + u16 subsystem_id; + u16 subsystem_vendor_id; + u8 revision_id; + + /* spi flash */ + u8 flash_vendor; + + u8 dma_fairness; + u8 mac_addr[NODE_ADDRESS_SIZE]; + u8 perm_mac_addr[NODE_ADDRESS_SIZE]; + + /* FIXME */ + /* bool phy_preamble_sup; */ + bool phy_configured; +}; + +#endif /* _ATL2_HW_H_ */ + +struct atl2_ring_header { + /* pointer to the descriptor ring memory */ + void *desc; + /* physical address of the descriptor ring */ + dma_addr_t dma; + /* length of descriptor ring in bytes */ + unsigned int size; +}; + +/* board specific private data structure */ +struct atl2_adapter { + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; +#ifdef NETIF_F_HW_VLAN_TX + struct vlan_group *vlgrp; +#endif + u32 wol; + u16 link_speed; + u16 link_duplex; + + spinlock_t stats_lock; + + struct work_struct reset_task; + struct work_struct link_chg_task; + struct timer_list watchdog_timer; + struct timer_list phy_config_timer; + + unsigned long cfg_phy; + bool mac_disabled; + + /* All Descriptor memory */ + dma_addr_t ring_dma; + void *ring_vir_addr; + int ring_size; + + struct tx_pkt_header *txd_ring; + dma_addr_t txd_dma; + + struct tx_pkt_status *txs_ring; + dma_addr_t txs_dma; + + struct rx_desc *rxd_ring; + dma_addr_t rxd_dma; + + u32 txd_ring_size; /* bytes per unit */ + u32 txs_ring_size; /* dwords per unit */ + u32 rxd_ring_size; /* 1536 bytes per unit */ + + /* read /write ptr: */ + /* host */ + u32 txd_write_ptr; + u32 txs_next_clear; + u32 rxd_read_ptr; + + /* nic */ + atomic_t txd_read_ptr; + atomic_t txs_write_ptr; + u32 rxd_write_ptr; + + /* Interrupt Moderator timer ( 2us resolution) */ + u16 imt; + /* Interrupt Clear timer (2us resolution) */ + u16 ict; + + unsigned long flags; + /* structs defined in atl2_hw.h */ + u32 bd_number; /* board number */ + bool pci_using_64; + bool have_msi; + struct atl2_hw hw; + + u32 usr_cmd; + /* FIXME */ + /* u32 regs_buff[ATL2_REGS_LEN]; */ + u32 pci_state[16]; + + u32 *config_space; +}; + +enum atl2_state_t { + __ATL2_TESTING, + __ATL2_RESETTING, + __ATL2_DOWN +}; + +#endif /* _ATL2_H_ */ diff --git a/drivers/net/atlx/atlx.c b/drivers/net/atlx/atlx.c new file mode 100644 index 0000000..72f3306 --- /dev/null +++ b/drivers/net/atlx/atlx.c @@ -0,0 +1,242 @@ +/* atlx.c -- common functions for Attansic network drivers + * + * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. + * Copyright(c) 2006 - 2007 Chris Snook + * Copyright(c) 2006 - 2008 Jay Cliburn + * Copyright(c) 2007 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* Including this file like a header is a temporary hack, I promise. -- CHS */ +#ifndef ATLX_C +#define ATLX_C + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "atlx.h" + +static struct atlx_spi_flash_dev flash_table[] = { +/* MFR_NAME WRSR READ PRGM WREN WRDI RDSR RDID SEC_ERS CHIP_ERS */ + {"Atmel", 0x00, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62}, + {"SST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0x90, 0x20, 0x60}, + {"ST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0xAB, 0xD8, 0xC7}, +}; + +static int atlx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return atlx_mii_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + +/* + * atlx_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + */ +static int atlx_set_mac(struct net_device *netdev, void *p) +{ + struct atlx_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (netif_running(netdev)) + return -EBUSY; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); + + atlx_set_mac_addr(&adapter->hw); + return 0; +} + +static void atlx_check_for_link(struct atlx_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u16 phy_data = 0; + + spin_lock(&adapter->lock); + adapter->phy_timer_pending = false; + atlx_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); + atlx_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); + spin_unlock(&adapter->lock); + + /* notify upper layer link down ASAP */ + if (!(phy_data & BMSR_LSTATUS)) { + /* Link Down */ + if (netif_carrier_ok(netdev)) { + /* old link state: Up */ + dev_info(&adapter->pdev->dev, "%s link is down\n", + netdev->name); + adapter->link_speed = SPEED_0; + netif_carrier_off(netdev); + } + } + schedule_work(&adapter->link_chg_task); +} + +/* + * atlx_set_multi - Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_multi entry point is called whenever the multicast address + * list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper multicast, + * promiscuous mode, and all-multi behavior. + */ +static void atlx_set_multi(struct net_device *netdev) +{ + struct atlx_adapter *adapter = netdev_priv(netdev); + struct atlx_hw *hw = &adapter->hw; + struct dev_mc_list *mc_ptr; + u32 rctl; + u32 hash_value; + + /* Check for Promiscuous and All Multicast modes */ + rctl = ioread32(hw->hw_addr + REG_MAC_CTRL); + if (netdev->flags & IFF_PROMISC) + rctl |= MAC_CTRL_PROMIS_EN; + else if (netdev->flags & IFF_ALLMULTI) { + rctl |= MAC_CTRL_MC_ALL_EN; + rctl &= ~MAC_CTRL_PROMIS_EN; + } else + rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN); + + iowrite32(rctl, hw->hw_addr + REG_MAC_CTRL); + + /* clear the old settings from the multicast hash table */ + iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE); + iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2)); + + /* compute mc addresses' hash value ,and put it into hash table */ + netdev_for_each_mc_addr(mc_ptr, netdev) { + hash_value = atlx_hash_mc_addr(hw, mc_ptr->dmi_addr); + atlx_hash_set(hw, hash_value); + } +} + +/* + * atlx_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + */ +static void atlx_irq_enable(struct atlx_adapter *adapter) +{ + iowrite32(IMR_NORMAL_MASK, adapter->hw.hw_addr + REG_IMR); + ioread32(adapter->hw.hw_addr + REG_IMR); +} + +/* + * atlx_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + */ +static void atlx_irq_disable(struct atlx_adapter *adapter) +{ + iowrite32(0, adapter->hw.hw_addr + REG_IMR); + ioread32(adapter->hw.hw_addr + REG_IMR); + synchronize_irq(adapter->pdev->irq); +} + +static void atlx_clear_phy_int(struct atlx_adapter *adapter) +{ + u16 phy_data; + unsigned long flags; + + spin_lock_irqsave(&adapter->lock, flags); + atlx_read_phy_reg(&adapter->hw, 19, &phy_data); + spin_unlock_irqrestore(&adapter->lock, flags); +} + +/* + * atlx_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + */ +static void atlx_tx_timeout(struct net_device *netdev) +{ + struct atlx_adapter *adapter = netdev_priv(netdev); + /* Do the reset outside of interrupt context */ + schedule_work(&adapter->tx_timeout_task); +} + +/* + * atlx_link_chg_task - deal with link change event Out of interrupt context + */ +static void atlx_link_chg_task(struct work_struct *work) +{ + struct atlx_adapter *adapter; + unsigned long flags; + + adapter = container_of(work, struct atlx_adapter, link_chg_task); + + spin_lock_irqsave(&adapter->lock, flags); + atlx_check_link(adapter); + spin_unlock_irqrestore(&adapter->lock, flags); +} + +static void atlx_vlan_rx_register(struct net_device *netdev, + struct vlan_group *grp) +{ + struct atlx_adapter *adapter = netdev_priv(netdev); + unsigned long flags; + u32 ctrl; + + spin_lock_irqsave(&adapter->lock, flags); + /* atlx_irq_disable(adapter); FIXME: confirm/remove */ + adapter->vlgrp = grp; + + if (grp) { + /* enable VLAN tag insert/strip */ + ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL); + ctrl |= MAC_CTRL_RMV_VLAN; + iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL); + } else { + /* disable VLAN tag insert/strip */ + ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL); + ctrl &= ~MAC_CTRL_RMV_VLAN; + iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL); + } + + /* atlx_irq_enable(adapter); FIXME */ + spin_unlock_irqrestore(&adapter->lock, flags); +} + +static void atlx_restore_vlan(struct atlx_adapter *adapter) +{ + atlx_vlan_rx_register(adapter->netdev, adapter->vlgrp); +} + +#endif /* ATLX_C */ diff --git a/drivers/net/atlx/atlx.h b/drivers/net/atlx/atlx.h new file mode 100644 index 0000000..14054b7 --- /dev/null +++ b/drivers/net/atlx/atlx.h @@ -0,0 +1,503 @@ +/* atlx_hw.h -- common hardware definitions for Attansic network drivers + * + * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. + * Copyright(c) 2006 - 2007 Chris Snook + * Copyright(c) 2006 - 2008 Jay Cliburn + * Copyright(c) 2007 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef ATLX_H +#define ATLX_H + +#include +#include + +#define ATLX_ERR_PHY 2 +#define ATLX_ERR_PHY_SPEED 7 +#define ATLX_ERR_PHY_RES 8 + +#define SPEED_0 0xffff +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +#define MEDIA_TYPE_AUTO_SENSOR 0 + +/* register definitions */ +#define REG_PM_CTRLSTAT 0x44 + +#define REG_PCIE_CAP_LIST 0x58 + +#define REG_VPD_CAP 0x6C +#define VPD_CAP_ID_MASK 0xFF +#define VPD_CAP_ID_SHIFT 0 +#define VPD_CAP_NEXT_PTR_MASK 0xFF +#define VPD_CAP_NEXT_PTR_SHIFT 8 +#define VPD_CAP_VPD_ADDR_MASK 0x7FFF +#define VPD_CAP_VPD_ADDR_SHIFT 16 +#define VPD_CAP_VPD_FLAG 0x80000000 + +#define REG_VPD_DATA 0x70 + +#define REG_SPI_FLASH_CTRL 0x200 +#define SPI_FLASH_CTRL_STS_NON_RDY 0x1 +#define SPI_FLASH_CTRL_STS_WEN 0x2 +#define SPI_FLASH_CTRL_STS_WPEN 0x80 +#define SPI_FLASH_CTRL_DEV_STS_MASK 0xFF +#define SPI_FLASH_CTRL_DEV_STS_SHIFT 0 +#define SPI_FLASH_CTRL_INS_MASK 0x7 +#define SPI_FLASH_CTRL_INS_SHIFT 8 +#define SPI_FLASH_CTRL_START 0x800 +#define SPI_FLASH_CTRL_EN_VPD 0x2000 +#define SPI_FLASH_CTRL_LDSTART 0x8000 +#define SPI_FLASH_CTRL_CS_HI_MASK 0x3 +#define SPI_FLASH_CTRL_CS_HI_SHIFT 16 +#define SPI_FLASH_CTRL_CS_HOLD_MASK 0x3 +#define SPI_FLASH_CTRL_CS_HOLD_SHIFT 18 +#define SPI_FLASH_CTRL_CLK_LO_MASK 0x3 +#define SPI_FLASH_CTRL_CLK_LO_SHIFT 20 +#define SPI_FLASH_CTRL_CLK_HI_MASK 0x3 +#define SPI_FLASH_CTRL_CLK_HI_SHIFT 22 +#define SPI_FLASH_CTRL_CS_SETUP_MASK 0x3 +#define SPI_FLASH_CTRL_CS_SETUP_SHIFT 24 +#define SPI_FLASH_CTRL_EROM_PGSZ_MASK 0x3 +#define SPI_FLASH_CTRL_EROM_PGSZ_SHIFT 26 +#define SPI_FLASH_CTRL_WAIT_READY 0x10000000 + +#define REG_SPI_ADDR 0x204 + +#define REG_SPI_DATA 0x208 + +#define REG_SPI_FLASH_CONFIG 0x20C +#define SPI_FLASH_CONFIG_LD_ADDR_MASK 0xFFFFFF +#define SPI_FLASH_CONFIG_LD_ADDR_SHIFT 0 +#define SPI_FLASH_CONFIG_VPD_ADDR_MASK 0x3 +#define SPI_FLASH_CONFIG_VPD_ADDR_SHIFT 24 +#define SPI_FLASH_CONFIG_LD_EXIST 0x4000000 + +#define REG_SPI_FLASH_OP_PROGRAM 0x210 +#define REG_SPI_FLASH_OP_SC_ERASE 0x211 +#define REG_SPI_FLASH_OP_CHIP_ERASE 0x212 +#define REG_SPI_FLASH_OP_RDID 0x213 +#define REG_SPI_FLASH_OP_WREN 0x214 +#define REG_SPI_FLASH_OP_RDSR 0x215 +#define REG_SPI_FLASH_OP_WRSR 0x216 +#define REG_SPI_FLASH_OP_READ 0x217 + +#define REG_TWSI_CTRL 0x218 +#define TWSI_CTRL_LD_OFFSET_MASK 0xFF +#define TWSI_CTRL_LD_OFFSET_SHIFT 0 +#define TWSI_CTRL_LD_SLV_ADDR_MASK 0x7 +#define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8 +#define TWSI_CTRL_SW_LDSTART 0x800 +#define TWSI_CTRL_HW_LDSTART 0x1000 +#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x7F +#define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15 +#define TWSI_CTRL_LD_EXIST 0x400000 +#define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3 +#define TWSI_CTRL_READ_FREQ_SEL_SHIFT 23 +#define TWSI_CTRL_FREQ_SEL_100K 0 +#define TWSI_CTRL_FREQ_SEL_200K 1 +#define TWSI_CTRL_FREQ_SEL_300K 2 +#define TWSI_CTRL_FREQ_SEL_400K 3 +#define TWSI_CTRL_SMB_SLV_ADDR /* FIXME: define or remove */ +#define TWSI_CTRL_WRITE_FREQ_SEL_MASK 0x3 +#define TWSI_CTRL_WRITE_FREQ_SEL_SHIFT 24 + +#define REG_PCIE_DEV_MISC_CTRL 0x21C +#define PCIE_DEV_MISC_CTRL_EXT_PIPE 0x2 +#define PCIE_DEV_MISC_CTRL_RETRY_BUFDIS 0x1 +#define PCIE_DEV_MISC_CTRL_SPIROM_EXIST 0x4 +#define PCIE_DEV_MISC_CTRL_SERDES_ENDIAN 0x8 +#define PCIE_DEV_MISC_CTRL_SERDES_SEL_DIN 0x10 + +#define REG_PCIE_PHYMISC 0x1000 +#define PCIE_PHYMISC_FORCE_RCV_DET 0x4 + +#define REG_PCIE_DLL_TX_CTRL1 0x1104 +#define PCIE_DLL_TX_CTRL1_SEL_NOR_CLK 0x400 +#define PCIE_DLL_TX_CTRL1_DEF 0x568 + +#define REG_LTSSM_TEST_MODE 0x12FC +#define LTSSM_TEST_MODE_DEF 0x6500 + +/* Master Control Register */ +#define REG_MASTER_CTRL 0x1400 +#define MASTER_CTRL_SOFT_RST 0x1 +#define MASTER_CTRL_MTIMER_EN 0x2 +#define MASTER_CTRL_ITIMER_EN 0x4 +#define MASTER_CTRL_MANUAL_INT 0x8 +#define MASTER_CTRL_REV_NUM_SHIFT 16 +#define MASTER_CTRL_REV_NUM_MASK 0xFF +#define MASTER_CTRL_DEV_ID_SHIFT 24 +#define MASTER_CTRL_DEV_ID_MASK 0xFF + +/* Timer Initial Value Register */ +#define REG_MANUAL_TIMER_INIT 0x1404 + +/* IRQ Moderator Timer Initial Value Register */ +#define REG_IRQ_MODU_TIMER_INIT 0x1408 + +#define REG_PHY_ENABLE 0x140C + +/* IRQ Anti-Lost Timer Initial Value Register */ +#define REG_CMBDISDMA_TIMER 0x140E + +/* Block IDLE Status Register */ +#define REG_IDLE_STATUS 0x1410 + +/* MDIO Control Register */ +#define REG_MDIO_CTRL 0x1414 +#define MDIO_DATA_MASK 0xFFFF +#define MDIO_DATA_SHIFT 0 +#define MDIO_REG_ADDR_MASK 0x1F +#define MDIO_REG_ADDR_SHIFT 16 +#define MDIO_RW 0x200000 +#define MDIO_SUP_PREAMBLE 0x400000 +#define MDIO_START 0x800000 +#define MDIO_CLK_SEL_SHIFT 24 +#define MDIO_CLK_25_4 0 +#define MDIO_CLK_25_6 2 +#define MDIO_CLK_25_8 3 +#define MDIO_CLK_25_10 4 +#define MDIO_CLK_25_14 5 +#define MDIO_CLK_25_20 6 +#define MDIO_CLK_25_28 7 +#define MDIO_BUSY 0x8000000 + +/* MII PHY Status Register */ +#define REG_PHY_STATUS 0x1418 + +/* BIST Control and Status Register0 (for the Packet Memory) */ +#define REG_BIST0_CTRL 0x141C +#define BIST0_NOW 0x1 +#define BIST0_SRAM_FAIL 0x2 +#define BIST0_FUSE_FLAG 0x4 +#define REG_BIST1_CTRL 0x1420 +#define BIST1_NOW 0x1 +#define BIST1_SRAM_FAIL 0x2 +#define BIST1_FUSE_FLAG 0x4 + +/* SerDes Lock Detect Control and Status Register */ +#define REG_SERDES_LOCK 0x1424 +#define SERDES_LOCK_DETECT 1 +#define SERDES_LOCK_DETECT_EN 2 + +/* MAC Control Register */ +#define REG_MAC_CTRL 0x1480 +#define MAC_CTRL_TX_EN 1 +#define MAC_CTRL_RX_EN 2 +#define MAC_CTRL_TX_FLOW 4 +#define MAC_CTRL_RX_FLOW 8 +#define MAC_CTRL_LOOPBACK 0x10 +#define MAC_CTRL_DUPLX 0x20 +#define MAC_CTRL_ADD_CRC 0x40 +#define MAC_CTRL_PAD 0x80 +#define MAC_CTRL_LENCHK 0x100 +#define MAC_CTRL_HUGE_EN 0x200 +#define MAC_CTRL_PRMLEN_SHIFT 10 +#define MAC_CTRL_PRMLEN_MASK 0xF +#define MAC_CTRL_RMV_VLAN 0x4000 +#define MAC_CTRL_PROMIS_EN 0x8000 +#define MAC_CTRL_MC_ALL_EN 0x2000000 +#define MAC_CTRL_BC_EN 0x4000000 + +/* MAC IPG/IFG Control Register */ +#define REG_MAC_IPG_IFG 0x1484 +#define MAC_IPG_IFG_IPGT_SHIFT 0 +#define MAC_IPG_IFG_IPGT_MASK 0x7F +#define MAC_IPG_IFG_MIFG_SHIFT 8 +#define MAC_IPG_IFG_MIFG_MASK 0xFF +#define MAC_IPG_IFG_IPGR1_SHIFT 16 +#define MAC_IPG_IFG_IPGR1_MASK 0x7F +#define MAC_IPG_IFG_IPGR2_SHIFT 24 +#define MAC_IPG_IFG_IPGR2_MASK 0x7F + +/* MAC STATION ADDRESS */ +#define REG_MAC_STA_ADDR 0x1488 + +/* Hash table for multicast address */ +#define REG_RX_HASH_TABLE 0x1490 + +/* MAC Half-Duplex Control Register */ +#define REG_MAC_HALF_DUPLX_CTRL 0x1498 +#define MAC_HALF_DUPLX_CTRL_LCOL_SHIFT 0 +#define MAC_HALF_DUPLX_CTRL_LCOL_MASK 0x3FF +#define MAC_HALF_DUPLX_CTRL_RETRY_SHIFT 12 +#define MAC_HALF_DUPLX_CTRL_RETRY_MASK 0xF +#define MAC_HALF_DUPLX_CTRL_EXC_DEF_EN 0x10000 +#define MAC_HALF_DUPLX_CTRL_NO_BACK_C 0x20000 +#define MAC_HALF_DUPLX_CTRL_NO_BACK_P 0x40000 +#define MAC_HALF_DUPLX_CTRL_ABEBE 0x80000 +#define MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT 20 +#define MAC_HALF_DUPLX_CTRL_ABEBT_MASK 0xF +#define MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT 24 +#define MAC_HALF_DUPLX_CTRL_JAMIPG_MASK 0xF + +/* Maximum Frame Length Control Register */ +#define REG_MTU 0x149C + +/* Wake-On-Lan control register */ +#define REG_WOL_CTRL 0x14A0 +#define WOL_PATTERN_EN 0x1 +#define WOL_PATTERN_PME_EN 0x2 +#define WOL_MAGIC_EN 0x4 +#define WOL_MAGIC_PME_EN 0x8 +#define WOL_LINK_CHG_EN 0x10 +#define WOL_LINK_CHG_PME_EN 0x20 +#define WOL_PATTERN_ST 0x100 +#define WOL_MAGIC_ST 0x200 +#define WOL_LINKCHG_ST 0x400 +#define WOL_PT0_EN 0x10000 +#define WOL_PT1_EN 0x20000 +#define WOL_PT2_EN 0x40000 +#define WOL_PT3_EN 0x80000 +#define WOL_PT4_EN 0x100000 +#define WOL_PT0_MATCH 0x1000000 +#define WOL_PT1_MATCH 0x2000000 +#define WOL_PT2_MATCH 0x4000000 +#define WOL_PT3_MATCH 0x8000000 +#define WOL_PT4_MATCH 0x10000000 + +/* Internal SRAM Partition Register, high 32 bits */ +#define REG_SRAM_RFD_ADDR 0x1500 + +/* Descriptor Control register, high 32 bits */ +#define REG_DESC_BASE_ADDR_HI 0x1540 + +/* Interrupt Status Register */ +#define REG_ISR 0x1600 +#define ISR_UR_DETECTED 0x1000000 +#define ISR_FERR_DETECTED 0x2000000 +#define ISR_NFERR_DETECTED 0x4000000 +#define ISR_CERR_DETECTED 0x8000000 +#define ISR_PHY_LINKDOWN 0x10000000 +#define ISR_DIS_INT 0x80000000 + +/* Interrupt Mask Register */ +#define REG_IMR 0x1604 + +#define REG_RFD_RRD_IDX 0x1800 +#define REG_TPD_IDX 0x1804 + +/* MII definitions */ + +/* PHY Common Register */ +#define MII_ATLX_CR 0x09 +#define MII_ATLX_SR 0x0A +#define MII_ATLX_ESR 0x0F +#define MII_ATLX_PSCR 0x10 +#define MII_ATLX_PSSR 0x11 + +/* PHY Control Register */ +#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, + * 00=10 + */ +#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, + * 00=10 + */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ +#define MII_CR_SPEED_MASK 0x2040 +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 + +/* PHY Status Register */ +#define MII_SR_EXTENDED_CAPS 0x0001 /* Ext register capabilities */ +#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ +#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ +#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext stat info in Reg 0x0F */ +#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ +#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ +#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ +#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ +#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ +#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ +#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ + +/* Link partner ability register */ +#define MII_LPA_SLCT 0x001f /* Same as advertise selector */ +#define MII_LPA_10HALF 0x0020 /* Can do 10mbps half-duplex */ +#define MII_LPA_10FULL 0x0040 /* Can do 10mbps full-duplex */ +#define MII_LPA_100HALF 0x0080 /* Can do 100mbps half-duplex */ +#define MII_LPA_100FULL 0x0100 /* Can do 100mbps full-duplex */ +#define MII_LPA_100BASE4 0x0200 /* 100BASE-T4 */ +#define MII_LPA_PAUSE 0x0400 /* PAUSE */ +#define MII_LPA_ASYPAUSE 0x0800 /* Asymmetrical PAUSE */ +#define MII_LPA_RFAULT 0x2000 /* Link partner faulted */ +#define MII_LPA_LPACK 0x4000 /* Link partner acked us */ +#define MII_LPA_NPAGE 0x8000 /* Next page bit */ + +/* Autoneg Advertisement Register */ +#define MII_AR_SELECTOR_FIELD 0x0001 /* IEEE 802.3 CSMA/CD */ +#define MII_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define MII_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define MII_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define MII_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define MII_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ +#define MII_AR_PAUSE 0x0400 /* Pause operation desired */ +#define MII_AR_ASM_DIR 0x0800 /* Asymmetric Pause Dir bit */ +#define MII_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ +#define MII_AR_NEXT_PAGE 0x8000 /* Next Page ability support */ +#define MII_AR_SPEED_MASK 0x01E0 +#define MII_AR_DEFAULT_CAP_MASK 0x0DE0 + +/* 1000BASE-T Control Register */ +#define MII_ATLX_CR_1000T_HD_CAPS 0x0100 /* Adv 1000T HD cap */ +#define MII_ATLX_CR_1000T_FD_CAPS 0x0200 /* Adv 1000T FD cap */ +#define MII_ATLX_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device, + * 0=DTE device */ +#define MII_ATLX_CR_1000T_MS_VALUE 0x0800 /* 1=Config PHY as Master, + * 0=Configure PHY as Slave */ +#define MII_ATLX_CR_1000T_MS_ENABLE 0x1000 /* 1=Man Master/Slave config, + * 0=Auto Master/Slave config + */ +#define MII_ATLX_CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ +#define MII_ATLX_CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ +#define MII_ATLX_CR_1000T_TEST_MODE_2 0x4000 /* Master Xmit Jitter test */ +#define MII_ATLX_CR_1000T_TEST_MODE_3 0x6000 /* Slave Xmit Jitter test */ +#define MII_ATLX_CR_1000T_TEST_MODE_4 0x8000 /* Xmitter Distortion test */ +#define MII_ATLX_CR_1000T_SPEED_MASK 0x0300 +#define MII_ATLX_CR_1000T_DEFAULT_CAP_MASK 0x0300 + +/* 1000BASE-T Status Register */ +#define MII_ATLX_SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ +#define MII_ATLX_SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ +#define MII_ATLX_SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define MII_ATLX_SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ +#define MII_ATLX_SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master + * 0=Slave + */ +#define MII_ATLX_SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config + * fault */ +#define MII_ATLX_SR_1000T_REMOTE_RX_STATUS_SHIFT 12 +#define MII_ATLX_SR_1000T_LOCAL_RX_STATUS_SHIFT 13 + +/* Extended Status Register */ +#define MII_ATLX_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */ +#define MII_ATLX_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */ +#define MII_ATLX_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */ +#define MII_ATLX_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */ + +/* ATLX PHY Specific Control Register */ +#define MII_ATLX_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Func disabled */ +#define MII_ATLX_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enbld */ +#define MII_ATLX_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */ +#define MII_ATLX_PSCR_MAC_POWERDOWN 0x0008 +#define MII_ATLX_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low + * 0=CLK125 toggling + */ +#define MII_ATLX_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5, + * Manual MDI configuration + */ +#define MII_ATLX_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +#define MII_ATLX_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover + * 100BASE-TX/10BASE-T: MDI + * Mode */ +#define MII_ATLX_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled + * all speeds. + */ +#define MII_ATLX_PSCR_10BT_EXT_DIST_ENABLE 0x0080 /* 1=Enable Extended + * 10BASE-T distance + * (Lower 10BASE-T RX + * Threshold) + * 0=Normal 10BASE-T RX + * Threshold + */ +#define MII_ATLX_PSCR_MII_5BIT_ENABLE 0x0100 /* 1=5-Bit interface in + * 100BASE-TX + * 0=MII interface in + * 100BASE-TX + */ +#define MII_ATLX_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler dsbl */ +#define MII_ATLX_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */ +#define MII_ATLX_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ +#define MII_ATLX_PSCR_POLARITY_REVERSAL_SHIFT 1 +#define MII_ATLX_PSCR_AUTO_X_MODE_SHIFT 5 +#define MII_ATLX_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7 + +/* ATLX PHY Specific Status Register */ +#define MII_ATLX_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ +#define MII_ATLX_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ +#define MII_ATLX_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define MII_ATLX_PSSR_10MBS 0x0000 /* 00=10Mbs */ +#define MII_ATLX_PSSR_100MBS 0x4000 /* 01=100Mbs */ +#define MII_ATLX_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define MII_DBG_ADDR 0x1D +#define MII_DBG_DATA 0x1E + +/* PCI Command Register Bit Definitions */ +#define PCI_REG_COMMAND 0x04 /* PCI Command Register */ +#define CMD_IO_SPACE 0x0001 +#define CMD_MEMORY_SPACE 0x0002 +#define CMD_BUS_MASTER 0x0004 + +/* Wake Up Filter Control */ +#define ATLX_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define ATLX_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define ATLX_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define ATLX_WUFC_MC 0x00000008 /* Multicast Wakeup Enable */ +#define ATLX_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ + +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 +#define ADVERTISE_1000_FULL 0x0020 +#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds */ +#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds */ + +#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */ +#define PHY_FORCE_TIME 20 /* 2.0 Seconds */ + +/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA */ +#define EEPROM_SUM 0xBABA +#define NODE_ADDRESS_SIZE 6 + +struct atlx_spi_flash_dev { + const char *manu_name; /* manufacturer id */ + /* op-code */ + u8 cmd_wrsr; + u8 cmd_read; + u8 cmd_program; + u8 cmd_wren; + u8 cmd_wrdi; + u8 cmd_rdsr; + u8 cmd_rdid; + u8 cmd_sector_erase; + u8 cmd_chip_erase; +}; + +#endif /* ATLX_H */ diff --git a/drivers/net/b44.c b/drivers/net/b44.c new file mode 100644 index 0000000..7b873c9 --- /dev/null +++ b/drivers/net/b44.c @@ -0,0 +1,2387 @@ +/* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver. + * + * Copyright (C) 2002 David S. Miller (davem@redhat.com) + * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi) + * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org) + * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org) + * Copyright (C) 2006 Broadcom Corporation. + * Copyright (C) 2007 Michael Buesch + * + * Distribute under GPL. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + + +#include "b44.h" + +#define DRV_MODULE_NAME "b44" +#define DRV_MODULE_VERSION "2.0" + +#define B44_DEF_MSG_ENABLE \ + (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | \ + NETIF_MSG_IFDOWN | \ + NETIF_MSG_IFUP | \ + NETIF_MSG_RX_ERR | \ + NETIF_MSG_TX_ERR) + +/* length of time before we decide the hardware is borked, + * and dev->tx_timeout() should be called to fix the problem + */ +#define B44_TX_TIMEOUT (5 * HZ) + +/* hardware minimum and maximum for a single frame's data payload */ +#define B44_MIN_MTU 60 +#define B44_MAX_MTU 1500 + +#define B44_RX_RING_SIZE 512 +#define B44_DEF_RX_RING_PENDING 200 +#define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \ + B44_RX_RING_SIZE) +#define B44_TX_RING_SIZE 512 +#define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1) +#define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \ + B44_TX_RING_SIZE) + +#define TX_RING_GAP(BP) \ + (B44_TX_RING_SIZE - (BP)->tx_pending) +#define TX_BUFFS_AVAIL(BP) \ + (((BP)->tx_cons <= (BP)->tx_prod) ? \ + (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \ + (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP)) +#define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1)) + +#define RX_PKT_OFFSET (RX_HEADER_LEN + 2) +#define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET) + +/* minimum number of free TX descriptors required to wake up TX process */ +#define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4) + +/* b44 internal pattern match filter info */ +#define B44_PATTERN_BASE 0x400 +#define B44_PATTERN_SIZE 0x80 +#define B44_PMASK_BASE 0x600 +#define B44_PMASK_SIZE 0x10 +#define B44_MAX_PATTERNS 16 +#define B44_ETHIPV6UDP_HLEN 62 +#define B44_ETHIPV4UDP_HLEN 42 + +static char version[] __devinitdata = + DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION "\n"; + +MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller"); +MODULE_DESCRIPTION("Broadcom 44xx/47xx 10/100 PCI ethernet driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); + +static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */ +module_param(b44_debug, int, 0); +MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value"); + + +#ifdef CONFIG_B44_PCI +static DEFINE_PCI_DEVICE_TABLE(b44_pci_tbl) = { + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) }, + { 0 } /* terminate list with empty entry */ +}; +MODULE_DEVICE_TABLE(pci, b44_pci_tbl); + +static struct pci_driver b44_pci_driver = { + .name = DRV_MODULE_NAME, + .id_table = b44_pci_tbl, +}; +#endif /* CONFIG_B44_PCI */ + +static const struct ssb_device_id b44_ssb_tbl[] = { + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV), + SSB_DEVTABLE_END +}; +MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl); + +static void b44_halt(struct b44 *); +static void b44_init_rings(struct b44 *); + +#define B44_FULL_RESET 1 +#define B44_FULL_RESET_SKIP_PHY 2 +#define B44_PARTIAL_RESET 3 +#define B44_CHIP_RESET_FULL 4 +#define B44_CHIP_RESET_PARTIAL 5 + +static void b44_init_hw(struct b44 *, int); + +static int dma_desc_align_mask; +static int dma_desc_sync_size; +static int instance; + +static const char b44_gstrings[][ETH_GSTRING_LEN] = { +#define _B44(x...) # x, +B44_STAT_REG_DECLARE +#undef _B44 +}; + +static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev, + dma_addr_t dma_base, + unsigned long offset, + enum dma_data_direction dir) +{ + ssb_dma_sync_single_range_for_device(sdev, dma_base, + offset & dma_desc_align_mask, + dma_desc_sync_size, dir); +} + +static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev, + dma_addr_t dma_base, + unsigned long offset, + enum dma_data_direction dir) +{ + ssb_dma_sync_single_range_for_cpu(sdev, dma_base, + offset & dma_desc_align_mask, + dma_desc_sync_size, dir); +} + +static inline unsigned long br32(const struct b44 *bp, unsigned long reg) +{ + return ssb_read32(bp->sdev, reg); +} + +static inline void bw32(const struct b44 *bp, + unsigned long reg, unsigned long val) +{ + ssb_write32(bp->sdev, reg, val); +} + +static int b44_wait_bit(struct b44 *bp, unsigned long reg, + u32 bit, unsigned long timeout, const int clear) +{ + unsigned long i; + + for (i = 0; i < timeout; i++) { + u32 val = br32(bp, reg); + + if (clear && !(val & bit)) + break; + if (!clear && (val & bit)) + break; + udelay(10); + } + if (i == timeout) { + if (net_ratelimit()) + netdev_err(bp->dev, "BUG! Timeout waiting for bit %08x of register %lx to %s\n", + bit, reg, clear ? "clear" : "set"); + + return -ENODEV; + } + return 0; +} + +static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index) +{ + u32 val; + + bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ | + (index << CAM_CTRL_INDEX_SHIFT))); + + b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1); + + val = br32(bp, B44_CAM_DATA_LO); + + data[2] = (val >> 24) & 0xFF; + data[3] = (val >> 16) & 0xFF; + data[4] = (val >> 8) & 0xFF; + data[5] = (val >> 0) & 0xFF; + + val = br32(bp, B44_CAM_DATA_HI); + + data[0] = (val >> 8) & 0xFF; + data[1] = (val >> 0) & 0xFF; +} + +static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index) +{ + u32 val; + + val = ((u32) data[2]) << 24; + val |= ((u32) data[3]) << 16; + val |= ((u32) data[4]) << 8; + val |= ((u32) data[5]) << 0; + bw32(bp, B44_CAM_DATA_LO, val); + val = (CAM_DATA_HI_VALID | + (((u32) data[0]) << 8) | + (((u32) data[1]) << 0)); + bw32(bp, B44_CAM_DATA_HI, val); + bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE | + (index << CAM_CTRL_INDEX_SHIFT))); + b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1); +} + +static inline void __b44_disable_ints(struct b44 *bp) +{ + bw32(bp, B44_IMASK, 0); +} + +static void b44_disable_ints(struct b44 *bp) +{ + __b44_disable_ints(bp); + + /* Flush posted writes. */ + br32(bp, B44_IMASK); +} + +static void b44_enable_ints(struct b44 *bp) +{ + bw32(bp, B44_IMASK, bp->imask); +} + +static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val) +{ + int err; + + bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII); + bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | + (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) | + (phy_addr << MDIO_DATA_PMD_SHIFT) | + (reg << MDIO_DATA_RA_SHIFT) | + (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT))); + err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0); + *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA; + + return err; +} + +static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val) +{ + bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII); + bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | + (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) | + (phy_addr << MDIO_DATA_PMD_SHIFT) | + (reg << MDIO_DATA_RA_SHIFT) | + (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) | + (val & MDIO_DATA_DATA))); + return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0); +} + +static inline int b44_readphy(struct b44 *bp, int reg, u32 *val) +{ + if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) + return 0; + + return __b44_readphy(bp, bp->phy_addr, reg, val); +} + +static inline int b44_writephy(struct b44 *bp, int reg, u32 val) +{ + if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) + return 0; + + return __b44_writephy(bp, bp->phy_addr, reg, val); +} + +/* miilib interface */ +static int b44_mii_read(struct net_device *dev, int phy_id, int location) +{ + u32 val; + struct b44 *bp = netdev_priv(dev); + int rc = __b44_readphy(bp, phy_id, location, &val); + if (rc) + return 0xffffffff; + return val; +} + +static void b44_mii_write(struct net_device *dev, int phy_id, int location, + int val) +{ + struct b44 *bp = netdev_priv(dev); + __b44_writephy(bp, phy_id, location, val); +} + +static int b44_phy_reset(struct b44 *bp) +{ + u32 val; + int err; + + if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) + return 0; + err = b44_writephy(bp, MII_BMCR, BMCR_RESET); + if (err) + return err; + udelay(100); + err = b44_readphy(bp, MII_BMCR, &val); + if (!err) { + if (val & BMCR_RESET) { + netdev_err(bp->dev, "PHY Reset would not complete\n"); + err = -ENODEV; + } + } + + return err; +} + +static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags) +{ + u32 val; + + bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE); + bp->flags |= pause_flags; + + val = br32(bp, B44_RXCONFIG); + if (pause_flags & B44_FLAG_RX_PAUSE) + val |= RXCONFIG_FLOW; + else + val &= ~RXCONFIG_FLOW; + bw32(bp, B44_RXCONFIG, val); + + val = br32(bp, B44_MAC_FLOW); + if (pause_flags & B44_FLAG_TX_PAUSE) + val |= (MAC_FLOW_PAUSE_ENAB | + (0xc0 & MAC_FLOW_RX_HI_WATER)); + else + val &= ~MAC_FLOW_PAUSE_ENAB; + bw32(bp, B44_MAC_FLOW, val); +} + +static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote) +{ + u32 pause_enab = 0; + + /* The driver supports only rx pause by default because + the b44 mac tx pause mechanism generates excessive + pause frames. + Use ethtool to turn on b44 tx pause if necessary. + */ + if ((local & ADVERTISE_PAUSE_CAP) && + (local & ADVERTISE_PAUSE_ASYM)){ + if ((remote & LPA_PAUSE_ASYM) && + !(remote & LPA_PAUSE_CAP)) + pause_enab |= B44_FLAG_RX_PAUSE; + } + + __b44_set_flow_ctrl(bp, pause_enab); +} + +#ifdef SSB_DRIVER_MIPS +extern char *nvram_get(char *name); +static void b44_wap54g10_workaround(struct b44 *bp) +{ + const char *str; + u32 val; + int err; + + /* + * workaround for bad hardware design in Linksys WAP54G v1.0 + * see https://dev.openwrt.org/ticket/146 + * check and reset bit "isolate" + */ + str = nvram_get("boardnum"); + if (!str) + return; + if (simple_strtoul(str, NULL, 0) == 2) { + err = __b44_readphy(bp, 0, MII_BMCR, &val); + if (err) + goto error; + if (!(val & BMCR_ISOLATE)) + return; + val &= ~BMCR_ISOLATE; + err = __b44_writephy(bp, 0, MII_BMCR, val); + if (err) + goto error; + } + return; +error: + pr_warning("PHY: cannot reset MII transceiver isolate bit\n"); +} +#else +static inline void b44_wap54g10_workaround(struct b44 *bp) +{ +} +#endif + +static int b44_setup_phy(struct b44 *bp) +{ + u32 val; + int err; + + b44_wap54g10_workaround(bp); + + if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) + return 0; + if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0) + goto out; + if ((err = b44_writephy(bp, B44_MII_ALEDCTRL, + val & MII_ALEDCTRL_ALLMSK)) != 0) + goto out; + if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0) + goto out; + if ((err = b44_writephy(bp, B44_MII_TLEDCTRL, + val | MII_TLEDCTRL_ENABLE)) != 0) + goto out; + + if (!(bp->flags & B44_FLAG_FORCE_LINK)) { + u32 adv = ADVERTISE_CSMA; + + if (bp->flags & B44_FLAG_ADV_10HALF) + adv |= ADVERTISE_10HALF; + if (bp->flags & B44_FLAG_ADV_10FULL) + adv |= ADVERTISE_10FULL; + if (bp->flags & B44_FLAG_ADV_100HALF) + adv |= ADVERTISE_100HALF; + if (bp->flags & B44_FLAG_ADV_100FULL) + adv |= ADVERTISE_100FULL; + + if (bp->flags & B44_FLAG_PAUSE_AUTO) + adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + + if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0) + goto out; + if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE | + BMCR_ANRESTART))) != 0) + goto out; + } else { + u32 bmcr; + + if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0) + goto out; + bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100); + if (bp->flags & B44_FLAG_100_BASE_T) + bmcr |= BMCR_SPEED100; + if (bp->flags & B44_FLAG_FULL_DUPLEX) + bmcr |= BMCR_FULLDPLX; + if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0) + goto out; + + /* Since we will not be negotiating there is no safe way + * to determine if the link partner supports flow control + * or not. So just disable it completely in this case. + */ + b44_set_flow_ctrl(bp, 0, 0); + } + +out: + return err; +} + +static void b44_stats_update(struct b44 *bp) +{ + unsigned long reg; + u32 *val; + + val = &bp->hw_stats.tx_good_octets; + for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) { + *val++ += br32(bp, reg); + } + + /* Pad */ + reg += 8*4UL; + + for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) { + *val++ += br32(bp, reg); + } +} + +static void b44_link_report(struct b44 *bp) +{ + if (!netif_carrier_ok(bp->dev)) { + netdev_info(bp->dev, "Link is down\n"); + } else { + netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n", + (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10, + (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half"); + + netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n", + (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off", + (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off"); + } +} + +static void b44_check_phy(struct b44 *bp) +{ + u32 bmsr, aux; + + if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) { + bp->flags |= B44_FLAG_100_BASE_T; + bp->flags |= B44_FLAG_FULL_DUPLEX; + if (!netif_carrier_ok(bp->dev)) { + u32 val = br32(bp, B44_TX_CTRL); + val |= TX_CTRL_DUPLEX; + bw32(bp, B44_TX_CTRL, val); + netif_carrier_on(bp->dev); + b44_link_report(bp); + } + return; + } + + if (!b44_readphy(bp, MII_BMSR, &bmsr) && + !b44_readphy(bp, B44_MII_AUXCTRL, &aux) && + (bmsr != 0xffff)) { + if (aux & MII_AUXCTRL_SPEED) + bp->flags |= B44_FLAG_100_BASE_T; + else + bp->flags &= ~B44_FLAG_100_BASE_T; + if (aux & MII_AUXCTRL_DUPLEX) + bp->flags |= B44_FLAG_FULL_DUPLEX; + else + bp->flags &= ~B44_FLAG_FULL_DUPLEX; + + if (!netif_carrier_ok(bp->dev) && + (bmsr & BMSR_LSTATUS)) { + u32 val = br32(bp, B44_TX_CTRL); + u32 local_adv, remote_adv; + + if (bp->flags & B44_FLAG_FULL_DUPLEX) + val |= TX_CTRL_DUPLEX; + else + val &= ~TX_CTRL_DUPLEX; + bw32(bp, B44_TX_CTRL, val); + + if (!(bp->flags & B44_FLAG_FORCE_LINK) && + !b44_readphy(bp, MII_ADVERTISE, &local_adv) && + !b44_readphy(bp, MII_LPA, &remote_adv)) + b44_set_flow_ctrl(bp, local_adv, remote_adv); + + /* Link now up */ + netif_carrier_on(bp->dev); + b44_link_report(bp); + } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) { + /* Link now down */ + netif_carrier_off(bp->dev); + b44_link_report(bp); + } + + if (bmsr & BMSR_RFAULT) + netdev_warn(bp->dev, "Remote fault detected in PHY\n"); + if (bmsr & BMSR_JCD) + netdev_warn(bp->dev, "Jabber detected in PHY\n"); + } +} + +static void b44_timer(unsigned long __opaque) +{ + struct b44 *bp = (struct b44 *) __opaque; + + spin_lock_irq(&bp->lock); + + b44_check_phy(bp); + + b44_stats_update(bp); + + spin_unlock_irq(&bp->lock); + + mod_timer(&bp->timer, round_jiffies(jiffies + HZ)); +} + +static void b44_tx(struct b44 *bp) +{ + u32 cur, cons; + + cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK; + cur /= sizeof(struct dma_desc); + + /* XXX needs updating when NETIF_F_SG is supported */ + for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) { + struct ring_info *rp = &bp->tx_buffers[cons]; + struct sk_buff *skb = rp->skb; + + BUG_ON(skb == NULL); + + ssb_dma_unmap_single(bp->sdev, + rp->mapping, + skb->len, + DMA_TO_DEVICE); + rp->skb = NULL; + dev_kfree_skb_irq(skb); + } + + bp->tx_cons = cons; + if (netif_queue_stopped(bp->dev) && + TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH) + netif_wake_queue(bp->dev); + + bw32(bp, B44_GPTIMER, 0); +} + +/* Works like this. This chip writes a 'struct rx_header" 30 bytes + * before the DMA address you give it. So we allocate 30 more bytes + * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then + * point the chip at 30 bytes past where the rx_header will go. + */ +static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) +{ + struct dma_desc *dp; + struct ring_info *src_map, *map; + struct rx_header *rh; + struct sk_buff *skb; + dma_addr_t mapping; + int dest_idx; + u32 ctrl; + + src_map = NULL; + if (src_idx >= 0) + src_map = &bp->rx_buffers[src_idx]; + dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1); + map = &bp->rx_buffers[dest_idx]; + skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ); + if (skb == NULL) + return -ENOMEM; + + mapping = ssb_dma_map_single(bp->sdev, skb->data, + RX_PKT_BUF_SZ, + DMA_FROM_DEVICE); + + /* Hardware bug work-around, the chip is unable to do PCI DMA + to/from anything above 1GB :-( */ + if (ssb_dma_mapping_error(bp->sdev, mapping) || + mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) { + /* Sigh... */ + if (!ssb_dma_mapping_error(bp->sdev, mapping)) + ssb_dma_unmap_single(bp->sdev, mapping, + RX_PKT_BUF_SZ, DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA); + if (skb == NULL) + return -ENOMEM; + mapping = ssb_dma_map_single(bp->sdev, skb->data, + RX_PKT_BUF_SZ, + DMA_FROM_DEVICE); + if (ssb_dma_mapping_error(bp->sdev, mapping) || + mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) { + if (!ssb_dma_mapping_error(bp->sdev, mapping)) + ssb_dma_unmap_single(bp->sdev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + return -ENOMEM; + } + bp->force_copybreak = 1; + } + + rh = (struct rx_header *) skb->data; + + rh->len = 0; + rh->flags = 0; + + map->skb = skb; + map->mapping = mapping; + + if (src_map != NULL) + src_map->skb = NULL; + + ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ); + if (dest_idx == (B44_RX_RING_SIZE - 1)) + ctrl |= DESC_CTRL_EOT; + + dp = &bp->rx_ring[dest_idx]; + dp->ctrl = cpu_to_le32(ctrl); + dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset); + + if (bp->flags & B44_FLAG_RX_RING_HACK) + b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma, + dest_idx * sizeof(*dp), + DMA_BIDIRECTIONAL); + + return RX_PKT_BUF_SZ; +} + +static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) +{ + struct dma_desc *src_desc, *dest_desc; + struct ring_info *src_map, *dest_map; + struct rx_header *rh; + int dest_idx; + __le32 ctrl; + + dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1); + dest_desc = &bp->rx_ring[dest_idx]; + dest_map = &bp->rx_buffers[dest_idx]; + src_desc = &bp->rx_ring[src_idx]; + src_map = &bp->rx_buffers[src_idx]; + + dest_map->skb = src_map->skb; + rh = (struct rx_header *) src_map->skb->data; + rh->len = 0; + rh->flags = 0; + dest_map->mapping = src_map->mapping; + + if (bp->flags & B44_FLAG_RX_RING_HACK) + b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma, + src_idx * sizeof(*src_desc), + DMA_BIDIRECTIONAL); + + ctrl = src_desc->ctrl; + if (dest_idx == (B44_RX_RING_SIZE - 1)) + ctrl |= cpu_to_le32(DESC_CTRL_EOT); + else + ctrl &= cpu_to_le32(~DESC_CTRL_EOT); + + dest_desc->ctrl = ctrl; + dest_desc->addr = src_desc->addr; + + src_map->skb = NULL; + + if (bp->flags & B44_FLAG_RX_RING_HACK) + b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma, + dest_idx * sizeof(*dest_desc), + DMA_BIDIRECTIONAL); + + ssb_dma_sync_single_for_device(bp->sdev, dest_map->mapping, + RX_PKT_BUF_SZ, + DMA_FROM_DEVICE); +} + +static int b44_rx(struct b44 *bp, int budget) +{ + int received; + u32 cons, prod; + + received = 0; + prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK; + prod /= sizeof(struct dma_desc); + cons = bp->rx_cons; + + while (cons != prod && budget > 0) { + struct ring_info *rp = &bp->rx_buffers[cons]; + struct sk_buff *skb = rp->skb; + dma_addr_t map = rp->mapping; + struct rx_header *rh; + u16 len; + + ssb_dma_sync_single_for_cpu(bp->sdev, map, + RX_PKT_BUF_SZ, + DMA_FROM_DEVICE); + rh = (struct rx_header *) skb->data; + len = le16_to_cpu(rh->len); + if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) || + (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) { + drop_it: + b44_recycle_rx(bp, cons, bp->rx_prod); + drop_it_no_recycle: + bp->dev->stats.rx_dropped++; + goto next_pkt; + } + + if (len == 0) { + int i = 0; + + do { + udelay(2); + barrier(); + len = le16_to_cpu(rh->len); + } while (len == 0 && i++ < 5); + if (len == 0) + goto drop_it; + } + + /* Omit CRC. */ + len -= 4; + + if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) { + int skb_size; + skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod); + if (skb_size < 0) + goto drop_it; + ssb_dma_unmap_single(bp->sdev, map, + skb_size, DMA_FROM_DEVICE); + /* Leave out rx_header */ + skb_put(skb, len + RX_PKT_OFFSET); + skb_pull(skb, RX_PKT_OFFSET); + } else { + struct sk_buff *copy_skb; + + b44_recycle_rx(bp, cons, bp->rx_prod); + copy_skb = netdev_alloc_skb(bp->dev, len + 2); + if (copy_skb == NULL) + goto drop_it_no_recycle; + + skb_reserve(copy_skb, 2); + skb_put(copy_skb, len); + /* DMA sync done above, copy just the actual packet */ + skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET, + copy_skb->data, len); + skb = copy_skb; + } + skb->ip_summed = CHECKSUM_NONE; + skb->protocol = eth_type_trans(skb, bp->dev); + netif_receive_skb(skb); + received++; + budget--; + next_pkt: + bp->rx_prod = (bp->rx_prod + 1) & + (B44_RX_RING_SIZE - 1); + cons = (cons + 1) & (B44_RX_RING_SIZE - 1); + } + + bp->rx_cons = cons; + bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc)); + + return received; +} + +static int b44_poll(struct napi_struct *napi, int budget) +{ + struct b44 *bp = container_of(napi, struct b44, napi); + int work_done; + unsigned long flags; + + spin_lock_irqsave(&bp->lock, flags); + + if (bp->istat & (ISTAT_TX | ISTAT_TO)) { + /* spin_lock(&bp->tx_lock); */ + b44_tx(bp); + /* spin_unlock(&bp->tx_lock); */ + } + spin_unlock_irqrestore(&bp->lock, flags); + + work_done = 0; + if (bp->istat & ISTAT_RX) + work_done += b44_rx(bp, budget); + + if (bp->istat & ISTAT_ERRORS) { + spin_lock_irqsave(&bp->lock, flags); + b44_halt(bp); + b44_init_rings(bp); + b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY); + netif_wake_queue(bp->dev); + spin_unlock_irqrestore(&bp->lock, flags); + work_done = 0; + } + + if (work_done < budget) { + napi_complete(napi); + b44_enable_ints(bp); + } + + return work_done; +} + +static irqreturn_t b44_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct b44 *bp = netdev_priv(dev); + u32 istat, imask; + int handled = 0; + + spin_lock(&bp->lock); + + istat = br32(bp, B44_ISTAT); + imask = br32(bp, B44_IMASK); + + /* The interrupt mask register controls which interrupt bits + * will actually raise an interrupt to the CPU when set by hw/firmware, + * but doesn't mask off the bits. + */ + istat &= imask; + if (istat) { + handled = 1; + + if (unlikely(!netif_running(dev))) { + netdev_info(dev, "late interrupt\n"); + goto irq_ack; + } + + if (napi_schedule_prep(&bp->napi)) { + /* NOTE: These writes are posted by the readback of + * the ISTAT register below. + */ + bp->istat = istat; + __b44_disable_ints(bp); + __napi_schedule(&bp->napi); + } + +irq_ack: + bw32(bp, B44_ISTAT, istat); + br32(bp, B44_ISTAT); + } + spin_unlock(&bp->lock); + return IRQ_RETVAL(handled); +} + +static void b44_tx_timeout(struct net_device *dev) +{ + struct b44 *bp = netdev_priv(dev); + + netdev_err(dev, "transmit timed out, resetting\n"); + + spin_lock_irq(&bp->lock); + + b44_halt(bp); + b44_init_rings(bp); + b44_init_hw(bp, B44_FULL_RESET); + + spin_unlock_irq(&bp->lock); + + b44_enable_ints(bp); + + netif_wake_queue(dev); +} + +static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct b44 *bp = netdev_priv(dev); + int rc = NETDEV_TX_OK; + dma_addr_t mapping; + u32 len, entry, ctrl; + unsigned long flags; + + len = skb->len; + spin_lock_irqsave(&bp->lock, flags); + + /* This is a hard error, log it. */ + if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) { + netif_stop_queue(dev); + netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); + goto err_out; + } + + mapping = ssb_dma_map_single(bp->sdev, skb->data, len, DMA_TO_DEVICE); + if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) { + struct sk_buff *bounce_skb; + + /* Chip can't handle DMA to/from >1GB, use bounce buffer */ + if (!ssb_dma_mapping_error(bp->sdev, mapping)) + ssb_dma_unmap_single(bp->sdev, mapping, len, + DMA_TO_DEVICE); + + bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA); + if (!bounce_skb) + goto err_out; + + mapping = ssb_dma_map_single(bp->sdev, bounce_skb->data, + len, DMA_TO_DEVICE); + if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) { + if (!ssb_dma_mapping_error(bp->sdev, mapping)) + ssb_dma_unmap_single(bp->sdev, mapping, + len, DMA_TO_DEVICE); + dev_kfree_skb_any(bounce_skb); + goto err_out; + } + + skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len); + dev_kfree_skb_any(skb); + skb = bounce_skb; + } + + entry = bp->tx_prod; + bp->tx_buffers[entry].skb = skb; + bp->tx_buffers[entry].mapping = mapping; + + ctrl = (len & DESC_CTRL_LEN); + ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF; + if (entry == (B44_TX_RING_SIZE - 1)) + ctrl |= DESC_CTRL_EOT; + + bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl); + bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset); + + if (bp->flags & B44_FLAG_TX_RING_HACK) + b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma, + entry * sizeof(bp->tx_ring[0]), + DMA_TO_DEVICE); + + entry = NEXT_TX(entry); + + bp->tx_prod = entry; + + wmb(); + + bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc)); + if (bp->flags & B44_FLAG_BUGGY_TXPTR) + bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc)); + if (bp->flags & B44_FLAG_REORDER_BUG) + br32(bp, B44_DMATX_PTR); + + if (TX_BUFFS_AVAIL(bp) < 1) + netif_stop_queue(dev); + + dev->trans_start = jiffies; + +out_unlock: + spin_unlock_irqrestore(&bp->lock, flags); + + return rc; + +err_out: + rc = NETDEV_TX_BUSY; + goto out_unlock; +} + +static int b44_change_mtu(struct net_device *dev, int new_mtu) +{ + struct b44 *bp = netdev_priv(dev); + + if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU) + return -EINVAL; + + if (!netif_running(dev)) { + /* We'll just catch it later when the + * device is up'd. + */ + dev->mtu = new_mtu; + return 0; + } + + spin_lock_irq(&bp->lock); + b44_halt(bp); + dev->mtu = new_mtu; + b44_init_rings(bp); + b44_init_hw(bp, B44_FULL_RESET); + spin_unlock_irq(&bp->lock); + + b44_enable_ints(bp); + + return 0; +} + +/* Free up pending packets in all rx/tx rings. + * + * The chip has been shut down and the driver detached from + * the networking, so no interrupts or new tx packets will + * end up in the driver. bp->lock is not held and we are not + * in an interrupt context and thus may sleep. + */ +static void b44_free_rings(struct b44 *bp) +{ + struct ring_info *rp; + int i; + + for (i = 0; i < B44_RX_RING_SIZE; i++) { + rp = &bp->rx_buffers[i]; + + if (rp->skb == NULL) + continue; + ssb_dma_unmap_single(bp->sdev, rp->mapping, RX_PKT_BUF_SZ, + DMA_FROM_DEVICE); + dev_kfree_skb_any(rp->skb); + rp->skb = NULL; + } + + /* XXX needs changes once NETIF_F_SG is set... */ + for (i = 0; i < B44_TX_RING_SIZE; i++) { + rp = &bp->tx_buffers[i]; + + if (rp->skb == NULL) + continue; + ssb_dma_unmap_single(bp->sdev, rp->mapping, rp->skb->len, + DMA_TO_DEVICE); + dev_kfree_skb_any(rp->skb); + rp->skb = NULL; + } +} + +/* Initialize tx/rx rings for packet processing. + * + * The chip has been shut down and the driver detached from + * the networking, so no interrupts or new tx packets will + * end up in the driver. + */ +static void b44_init_rings(struct b44 *bp) +{ + int i; + + b44_free_rings(bp); + + memset(bp->rx_ring, 0, B44_RX_RING_BYTES); + memset(bp->tx_ring, 0, B44_TX_RING_BYTES); + + if (bp->flags & B44_FLAG_RX_RING_HACK) + ssb_dma_sync_single_for_device(bp->sdev, bp->rx_ring_dma, + DMA_TABLE_BYTES, + DMA_BIDIRECTIONAL); + + if (bp->flags & B44_FLAG_TX_RING_HACK) + ssb_dma_sync_single_for_device(bp->sdev, bp->tx_ring_dma, + DMA_TABLE_BYTES, + DMA_TO_DEVICE); + + for (i = 0; i < bp->rx_pending; i++) { + if (b44_alloc_rx_skb(bp, -1, i) < 0) + break; + } +} + +/* + * Must not be invoked with interrupt sources disabled and + * the hardware shutdown down. + */ +static void b44_free_consistent(struct b44 *bp) +{ + kfree(bp->rx_buffers); + bp->rx_buffers = NULL; + kfree(bp->tx_buffers); + bp->tx_buffers = NULL; + if (bp->rx_ring) { + if (bp->flags & B44_FLAG_RX_RING_HACK) { + ssb_dma_unmap_single(bp->sdev, bp->rx_ring_dma, + DMA_TABLE_BYTES, + DMA_BIDIRECTIONAL); + kfree(bp->rx_ring); + } else + ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES, + bp->rx_ring, bp->rx_ring_dma, + GFP_KERNEL); + bp->rx_ring = NULL; + bp->flags &= ~B44_FLAG_RX_RING_HACK; + } + if (bp->tx_ring) { + if (bp->flags & B44_FLAG_TX_RING_HACK) { + ssb_dma_unmap_single(bp->sdev, bp->tx_ring_dma, + DMA_TABLE_BYTES, + DMA_TO_DEVICE); + kfree(bp->tx_ring); + } else + ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES, + bp->tx_ring, bp->tx_ring_dma, + GFP_KERNEL); + bp->tx_ring = NULL; + bp->flags &= ~B44_FLAG_TX_RING_HACK; + } +} + +/* + * Must not be invoked with interrupt sources disabled and + * the hardware shutdown down. Can sleep. + */ +static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) +{ + int size; + + size = B44_RX_RING_SIZE * sizeof(struct ring_info); + bp->rx_buffers = kzalloc(size, gfp); + if (!bp->rx_buffers) + goto out_err; + + size = B44_TX_RING_SIZE * sizeof(struct ring_info); + bp->tx_buffers = kzalloc(size, gfp); + if (!bp->tx_buffers) + goto out_err; + + size = DMA_TABLE_BYTES; + bp->rx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->rx_ring_dma, gfp); + if (!bp->rx_ring) { + /* Allocation may have failed due to pci_alloc_consistent + insisting on use of GFP_DMA, which is more restrictive + than necessary... */ + struct dma_desc *rx_ring; + dma_addr_t rx_ring_dma; + + rx_ring = kzalloc(size, gfp); + if (!rx_ring) + goto out_err; + + rx_ring_dma = ssb_dma_map_single(bp->sdev, rx_ring, + DMA_TABLE_BYTES, + DMA_BIDIRECTIONAL); + + if (ssb_dma_mapping_error(bp->sdev, rx_ring_dma) || + rx_ring_dma + size > DMA_BIT_MASK(30)) { + kfree(rx_ring); + goto out_err; + } + + bp->rx_ring = rx_ring; + bp->rx_ring_dma = rx_ring_dma; + bp->flags |= B44_FLAG_RX_RING_HACK; + } + + bp->tx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->tx_ring_dma, gfp); + if (!bp->tx_ring) { + /* Allocation may have failed due to ssb_dma_alloc_consistent + insisting on use of GFP_DMA, which is more restrictive + than necessary... */ + struct dma_desc *tx_ring; + dma_addr_t tx_ring_dma; + + tx_ring = kzalloc(size, gfp); + if (!tx_ring) + goto out_err; + + tx_ring_dma = ssb_dma_map_single(bp->sdev, tx_ring, + DMA_TABLE_BYTES, + DMA_TO_DEVICE); + + if (ssb_dma_mapping_error(bp->sdev, tx_ring_dma) || + tx_ring_dma + size > DMA_BIT_MASK(30)) { + kfree(tx_ring); + goto out_err; + } + + bp->tx_ring = tx_ring; + bp->tx_ring_dma = tx_ring_dma; + bp->flags |= B44_FLAG_TX_RING_HACK; + } + + return 0; + +out_err: + b44_free_consistent(bp); + return -ENOMEM; +} + +/* bp->lock is held. */ +static void b44_clear_stats(struct b44 *bp) +{ + unsigned long reg; + + bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ); + for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) + br32(bp, reg); + for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) + br32(bp, reg); +} + +/* bp->lock is held. */ +static void b44_chip_reset(struct b44 *bp, int reset_kind) +{ + struct ssb_device *sdev = bp->sdev; + bool was_enabled; + + was_enabled = ssb_device_is_enabled(bp->sdev); + + ssb_device_enable(bp->sdev, 0); + ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev); + + if (was_enabled) { + bw32(bp, B44_RCV_LAZY, 0); + bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE); + b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1); + bw32(bp, B44_DMATX_CTRL, 0); + bp->tx_prod = bp->tx_cons = 0; + if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) { + b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE, + 100, 0); + } + bw32(bp, B44_DMARX_CTRL, 0); + bp->rx_prod = bp->rx_cons = 0; + } + + b44_clear_stats(bp); + + /* + * Don't enable PHY if we are doing a partial reset + * we are probably going to power down + */ + if (reset_kind == B44_CHIP_RESET_PARTIAL) + return; + + switch (sdev->bus->bustype) { + case SSB_BUSTYPE_SSB: + bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE | + (DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus), + B44_MDC_RATIO) + & MDIO_CTRL_MAXF_MASK))); + break; + case SSB_BUSTYPE_PCI: + bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE | + (0x0d & MDIO_CTRL_MAXF_MASK))); + break; + case SSB_BUSTYPE_PCMCIA: + case SSB_BUSTYPE_SDIO: + WARN_ON(1); /* A device with this bus does not exist. */ + break; + } + + br32(bp, B44_MDIO_CTRL); + + if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) { + bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL); + br32(bp, B44_ENET_CTRL); + bp->flags &= ~B44_FLAG_INTERNAL_PHY; + } else { + u32 val = br32(bp, B44_DEVCTRL); + + if (val & DEVCTRL_EPR) { + bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR)); + br32(bp, B44_DEVCTRL); + udelay(100); + } + bp->flags |= B44_FLAG_INTERNAL_PHY; + } +} + +/* bp->lock is held. */ +static void b44_halt(struct b44 *bp) +{ + b44_disable_ints(bp); + /* reset PHY */ + b44_phy_reset(bp); + /* power down PHY */ + netdev_info(bp->dev, "powering down PHY\n"); + bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN); + /* now reset the chip, but without enabling the MAC&PHY + * part of it. This has to be done _after_ we shut down the PHY */ + b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL); +} + +/* bp->lock is held. */ +static void __b44_set_mac_addr(struct b44 *bp) +{ + bw32(bp, B44_CAM_CTRL, 0); + if (!(bp->dev->flags & IFF_PROMISC)) { + u32 val; + + __b44_cam_write(bp, bp->dev->dev_addr, 0); + val = br32(bp, B44_CAM_CTRL); + bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE); + } +} + +static int b44_set_mac_addr(struct net_device *dev, void *p) +{ + struct b44 *bp = netdev_priv(dev); + struct sockaddr *addr = p; + u32 val; + + if (netif_running(dev)) + return -EBUSY; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EINVAL; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + spin_lock_irq(&bp->lock); + + val = br32(bp, B44_RXCONFIG); + if (!(val & RXCONFIG_CAM_ABSENT)) + __b44_set_mac_addr(bp); + + spin_unlock_irq(&bp->lock); + + return 0; +} + +/* Called at device open time to get the chip ready for + * packet processing. Invoked with bp->lock held. + */ +static void __b44_set_rx_mode(struct net_device *); +static void b44_init_hw(struct b44 *bp, int reset_kind) +{ + u32 val; + + b44_chip_reset(bp, B44_CHIP_RESET_FULL); + if (reset_kind == B44_FULL_RESET) { + b44_phy_reset(bp); + b44_setup_phy(bp); + } + + /* Enable CRC32, set proper LED modes and power on PHY */ + bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL); + bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT)); + + /* This sets the MAC address too. */ + __b44_set_rx_mode(bp->dev); + + /* MTU + eth header + possible VLAN tag + struct rx_header */ + bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN); + bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN); + + bw32(bp, B44_TX_WMARK, 56); /* XXX magic */ + if (reset_kind == B44_PARTIAL_RESET) { + bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE | + (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT))); + } else { + bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE); + bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset); + bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE | + (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT))); + bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset); + + bw32(bp, B44_DMARX_PTR, bp->rx_pending); + bp->rx_prod = bp->rx_pending; + + bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ); + } + + val = br32(bp, B44_ENET_CTRL); + bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE)); +} + +static int b44_open(struct net_device *dev) +{ + struct b44 *bp = netdev_priv(dev); + int err; + + err = b44_alloc_consistent(bp, GFP_KERNEL); + if (err) + goto out; + + napi_enable(&bp->napi); + + b44_init_rings(bp); + b44_init_hw(bp, B44_FULL_RESET); + + b44_check_phy(bp); + + err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev); + if (unlikely(err < 0)) { + napi_disable(&bp->napi); + b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL); + b44_free_rings(bp); + b44_free_consistent(bp); + goto out; + } + + init_timer(&bp->timer); + bp->timer.expires = jiffies + HZ; + bp->timer.data = (unsigned long) bp; + bp->timer.function = b44_timer; + add_timer(&bp->timer); + + b44_enable_ints(bp); + netif_start_queue(dev); +out: + return err; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling receive - used by netconsole and other diagnostic tools + * to allow network i/o with interrupts disabled. + */ +static void b44_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + b44_interrupt(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + +static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset) +{ + u32 i; + u32 *pattern = (u32 *) pp; + + for (i = 0; i < bytes; i += sizeof(u32)) { + bw32(bp, B44_FILT_ADDR, table_offset + i); + bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]); + } +} + +static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset) +{ + int magicsync = 6; + int k, j, len = offset; + int ethaddr_bytes = ETH_ALEN; + + memset(ppattern + offset, 0xff, magicsync); + for (j = 0; j < magicsync; j++) + set_bit(len++, (unsigned long *) pmask); + + for (j = 0; j < B44_MAX_PATTERNS; j++) { + if ((B44_PATTERN_SIZE - len) >= ETH_ALEN) + ethaddr_bytes = ETH_ALEN; + else + ethaddr_bytes = B44_PATTERN_SIZE - len; + if (ethaddr_bytes <=0) + break; + for (k = 0; k< ethaddr_bytes; k++) { + ppattern[offset + magicsync + + (j * ETH_ALEN) + k] = macaddr[k]; + set_bit(len++, (unsigned long *) pmask); + } + } + return len - 1; +} + +/* Setup magic packet patterns in the b44 WOL + * pattern matching filter. + */ +static void b44_setup_pseudo_magicp(struct b44 *bp) +{ + + u32 val; + int plen0, plen1, plen2; + u8 *pwol_pattern; + u8 pwol_mask[B44_PMASK_SIZE]; + + pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL); + if (!pwol_pattern) { + pr_err("Memory not available for WOL\n"); + return; + } + + /* Ipv4 magic packet pattern - pattern 0.*/ + memset(pwol_mask, 0, B44_PMASK_SIZE); + plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask, + B44_ETHIPV4UDP_HLEN); + + bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE); + bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE); + + /* Raw ethernet II magic packet pattern - pattern 1 */ + memset(pwol_pattern, 0, B44_PATTERN_SIZE); + memset(pwol_mask, 0, B44_PMASK_SIZE); + plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask, + ETH_HLEN); + + bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, + B44_PATTERN_BASE + B44_PATTERN_SIZE); + bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, + B44_PMASK_BASE + B44_PMASK_SIZE); + + /* Ipv6 magic packet pattern - pattern 2 */ + memset(pwol_pattern, 0, B44_PATTERN_SIZE); + memset(pwol_mask, 0, B44_PMASK_SIZE); + plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask, + B44_ETHIPV6UDP_HLEN); + + bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, + B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE); + bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, + B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE); + + kfree(pwol_pattern); + + /* set these pattern's lengths: one less than each real length */ + val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE; + bw32(bp, B44_WKUP_LEN, val); + + /* enable wakeup pattern matching */ + val = br32(bp, B44_DEVCTRL); + bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE); + +} + +#ifdef CONFIG_B44_PCI +static void b44_setup_wol_pci(struct b44 *bp) +{ + u16 val; + + if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) { + bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE); + pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val); + pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE); + } +} +#else +static inline void b44_setup_wol_pci(struct b44 *bp) { } +#endif /* CONFIG_B44_PCI */ + +static void b44_setup_wol(struct b44 *bp) +{ + u32 val; + + bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI); + + if (bp->flags & B44_FLAG_B0_ANDLATER) { + + bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE); + + val = bp->dev->dev_addr[2] << 24 | + bp->dev->dev_addr[3] << 16 | + bp->dev->dev_addr[4] << 8 | + bp->dev->dev_addr[5]; + bw32(bp, B44_ADDR_LO, val); + + val = bp->dev->dev_addr[0] << 8 | + bp->dev->dev_addr[1]; + bw32(bp, B44_ADDR_HI, val); + + val = br32(bp, B44_DEVCTRL); + bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE); + + } else { + b44_setup_pseudo_magicp(bp); + } + b44_setup_wol_pci(bp); +} + +static int b44_close(struct net_device *dev) +{ + struct b44 *bp = netdev_priv(dev); + + netif_stop_queue(dev); + + napi_disable(&bp->napi); + + del_timer_sync(&bp->timer); + + spin_lock_irq(&bp->lock); + + b44_halt(bp); + b44_free_rings(bp); + netif_carrier_off(dev); + + spin_unlock_irq(&bp->lock); + + free_irq(dev->irq, dev); + + if (bp->flags & B44_FLAG_WOL_ENABLE) { + b44_init_hw(bp, B44_PARTIAL_RESET); + b44_setup_wol(bp); + } + + b44_free_consistent(bp); + + return 0; +} + +static struct net_device_stats *b44_get_stats(struct net_device *dev) +{ + struct b44 *bp = netdev_priv(dev); + struct net_device_stats *nstat = &dev->stats; + struct b44_hw_stats *hwstat = &bp->hw_stats; + + /* Convert HW stats into netdevice stats. */ + nstat->rx_packets = hwstat->rx_pkts; + nstat->tx_packets = hwstat->tx_pkts; + nstat->rx_bytes = hwstat->rx_octets; + nstat->tx_bytes = hwstat->tx_octets; + nstat->tx_errors = (hwstat->tx_jabber_pkts + + hwstat->tx_oversize_pkts + + hwstat->tx_underruns + + hwstat->tx_excessive_cols + + hwstat->tx_late_cols); + nstat->multicast = hwstat->tx_multicast_pkts; + nstat->collisions = hwstat->tx_total_cols; + + nstat->rx_length_errors = (hwstat->rx_oversize_pkts + + hwstat->rx_undersize); + nstat->rx_over_errors = hwstat->rx_missed_pkts; + nstat->rx_frame_errors = hwstat->rx_align_errs; + nstat->rx_crc_errors = hwstat->rx_crc_errs; + nstat->rx_errors = (hwstat->rx_jabber_pkts + + hwstat->rx_oversize_pkts + + hwstat->rx_missed_pkts + + hwstat->rx_crc_align_errs + + hwstat->rx_undersize + + hwstat->rx_crc_errs + + hwstat->rx_align_errs + + hwstat->rx_symbol_errs); + + nstat->tx_aborted_errors = hwstat->tx_underruns; +#if 0 + /* Carrier lost counter seems to be broken for some devices */ + nstat->tx_carrier_errors = hwstat->tx_carrier_lost; +#endif + + return nstat; +} + +static int __b44_load_mcast(struct b44 *bp, struct net_device *dev) +{ + struct dev_mc_list *mclist; + int i, num_ents; + + num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE); + i = 0; + netdev_for_each_mc_addr(mclist, dev) { + if (i == num_ents) + break; + __b44_cam_write(bp, mclist->dmi_addr, i++ + 1); + } + return i+1; +} + +static void __b44_set_rx_mode(struct net_device *dev) +{ + struct b44 *bp = netdev_priv(dev); + u32 val; + + val = br32(bp, B44_RXCONFIG); + val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI); + if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) { + val |= RXCONFIG_PROMISC; + bw32(bp, B44_RXCONFIG, val); + } else { + unsigned char zero[6] = {0, 0, 0, 0, 0, 0}; + int i = 1; + + __b44_set_mac_addr(bp); + + if ((dev->flags & IFF_ALLMULTI) || + (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE)) + val |= RXCONFIG_ALLMULTI; + else + i = __b44_load_mcast(bp, dev); + + for (; i < 64; i++) + __b44_cam_write(bp, zero, i); + + bw32(bp, B44_RXCONFIG, val); + val = br32(bp, B44_CAM_CTRL); + bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE); + } +} + +static void b44_set_rx_mode(struct net_device *dev) +{ + struct b44 *bp = netdev_priv(dev); + + spin_lock_irq(&bp->lock); + __b44_set_rx_mode(dev); + spin_unlock_irq(&bp->lock); +} + +static u32 b44_get_msglevel(struct net_device *dev) +{ + struct b44 *bp = netdev_priv(dev); + return bp->msg_enable; +} + +static void b44_set_msglevel(struct net_device *dev, u32 value) +{ + struct b44 *bp = netdev_priv(dev); + bp->msg_enable = value; +} + +static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct b44 *bp = netdev_priv(dev); + struct ssb_bus *bus = bp->sdev->bus; + + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); + switch (bus->bustype) { + case SSB_BUSTYPE_PCI: + strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info)); + break; + case SSB_BUSTYPE_SSB: + strlcpy(info->bus_info, "SSB", sizeof(info->bus_info)); + break; + case SSB_BUSTYPE_PCMCIA: + case SSB_BUSTYPE_SDIO: + WARN_ON(1); /* A device with this bus does not exist. */ + break; + } +} + +static int b44_nway_reset(struct net_device *dev) +{ + struct b44 *bp = netdev_priv(dev); + u32 bmcr; + int r; + + spin_lock_irq(&bp->lock); + b44_readphy(bp, MII_BMCR, &bmcr); + b44_readphy(bp, MII_BMCR, &bmcr); + r = -EINVAL; + if (bmcr & BMCR_ANENABLE) { + b44_writephy(bp, MII_BMCR, + bmcr | BMCR_ANRESTART); + r = 0; + } + spin_unlock_irq(&bp->lock); + + return r; +} + +static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct b44 *bp = netdev_priv(dev); + + cmd->supported = (SUPPORTED_Autoneg); + cmd->supported |= (SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_MII); + + cmd->advertising = 0; + if (bp->flags & B44_FLAG_ADV_10HALF) + cmd->advertising |= ADVERTISED_10baseT_Half; + if (bp->flags & B44_FLAG_ADV_10FULL) + cmd->advertising |= ADVERTISED_10baseT_Full; + if (bp->flags & B44_FLAG_ADV_100HALF) + cmd->advertising |= ADVERTISED_100baseT_Half; + if (bp->flags & B44_FLAG_ADV_100FULL) + cmd->advertising |= ADVERTISED_100baseT_Full; + cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; + cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ? + SPEED_100 : SPEED_10; + cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ? + DUPLEX_FULL : DUPLEX_HALF; + cmd->port = 0; + cmd->phy_address = bp->phy_addr; + cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ? + XCVR_INTERNAL : XCVR_EXTERNAL; + cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ? + AUTONEG_DISABLE : AUTONEG_ENABLE; + if (cmd->autoneg == AUTONEG_ENABLE) + cmd->advertising |= ADVERTISED_Autoneg; + if (!netif_running(dev)){ + cmd->speed = 0; + cmd->duplex = 0xff; + } + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 0; + return 0; +} + +static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct b44 *bp = netdev_priv(dev); + + /* We do not support gigabit. */ + if (cmd->autoneg == AUTONEG_ENABLE) { + if (cmd->advertising & + (ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full)) + return -EINVAL; + } else if ((cmd->speed != SPEED_100 && + cmd->speed != SPEED_10) || + (cmd->duplex != DUPLEX_HALF && + cmd->duplex != DUPLEX_FULL)) { + return -EINVAL; + } + + spin_lock_irq(&bp->lock); + + if (cmd->autoneg == AUTONEG_ENABLE) { + bp->flags &= ~(B44_FLAG_FORCE_LINK | + B44_FLAG_100_BASE_T | + B44_FLAG_FULL_DUPLEX | + B44_FLAG_ADV_10HALF | + B44_FLAG_ADV_10FULL | + B44_FLAG_ADV_100HALF | + B44_FLAG_ADV_100FULL); + if (cmd->advertising == 0) { + bp->flags |= (B44_FLAG_ADV_10HALF | + B44_FLAG_ADV_10FULL | + B44_FLAG_ADV_100HALF | + B44_FLAG_ADV_100FULL); + } else { + if (cmd->advertising & ADVERTISED_10baseT_Half) + bp->flags |= B44_FLAG_ADV_10HALF; + if (cmd->advertising & ADVERTISED_10baseT_Full) + bp->flags |= B44_FLAG_ADV_10FULL; + if (cmd->advertising & ADVERTISED_100baseT_Half) + bp->flags |= B44_FLAG_ADV_100HALF; + if (cmd->advertising & ADVERTISED_100baseT_Full) + bp->flags |= B44_FLAG_ADV_100FULL; + } + } else { + bp->flags |= B44_FLAG_FORCE_LINK; + bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX); + if (cmd->speed == SPEED_100) + bp->flags |= B44_FLAG_100_BASE_T; + if (cmd->duplex == DUPLEX_FULL) + bp->flags |= B44_FLAG_FULL_DUPLEX; + } + + if (netif_running(dev)) + b44_setup_phy(bp); + + spin_unlock_irq(&bp->lock); + + return 0; +} + +static void b44_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct b44 *bp = netdev_priv(dev); + + ering->rx_max_pending = B44_RX_RING_SIZE - 1; + ering->rx_pending = bp->rx_pending; + + /* XXX ethtool lacks a tx_max_pending, oops... */ +} + +static int b44_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct b44 *bp = netdev_priv(dev); + + if ((ering->rx_pending > B44_RX_RING_SIZE - 1) || + (ering->rx_mini_pending != 0) || + (ering->rx_jumbo_pending != 0) || + (ering->tx_pending > B44_TX_RING_SIZE - 1)) + return -EINVAL; + + spin_lock_irq(&bp->lock); + + bp->rx_pending = ering->rx_pending; + bp->tx_pending = ering->tx_pending; + + b44_halt(bp); + b44_init_rings(bp); + b44_init_hw(bp, B44_FULL_RESET); + netif_wake_queue(bp->dev); + spin_unlock_irq(&bp->lock); + + b44_enable_ints(bp); + + return 0; +} + +static void b44_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct b44 *bp = netdev_priv(dev); + + epause->autoneg = + (bp->flags & B44_FLAG_PAUSE_AUTO) != 0; + epause->rx_pause = + (bp->flags & B44_FLAG_RX_PAUSE) != 0; + epause->tx_pause = + (bp->flags & B44_FLAG_TX_PAUSE) != 0; +} + +static int b44_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct b44 *bp = netdev_priv(dev); + + spin_lock_irq(&bp->lock); + if (epause->autoneg) + bp->flags |= B44_FLAG_PAUSE_AUTO; + else + bp->flags &= ~B44_FLAG_PAUSE_AUTO; + if (epause->rx_pause) + bp->flags |= B44_FLAG_RX_PAUSE; + else + bp->flags &= ~B44_FLAG_RX_PAUSE; + if (epause->tx_pause) + bp->flags |= B44_FLAG_TX_PAUSE; + else + bp->flags &= ~B44_FLAG_TX_PAUSE; + if (bp->flags & B44_FLAG_PAUSE_AUTO) { + b44_halt(bp); + b44_init_rings(bp); + b44_init_hw(bp, B44_FULL_RESET); + } else { + __b44_set_flow_ctrl(bp, bp->flags); + } + spin_unlock_irq(&bp->lock); + + b44_enable_ints(bp); + + return 0; +} + +static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + switch(stringset) { + case ETH_SS_STATS: + memcpy(data, *b44_gstrings, sizeof(b44_gstrings)); + break; + } +} + +static int b44_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(b44_gstrings); + default: + return -EOPNOTSUPP; + } +} + +static void b44_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct b44 *bp = netdev_priv(dev); + u32 *val = &bp->hw_stats.tx_good_octets; + u32 i; + + spin_lock_irq(&bp->lock); + + b44_stats_update(bp); + + for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++) + *data++ = *val++; + + spin_unlock_irq(&bp->lock); +} + +static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct b44 *bp = netdev_priv(dev); + + wol->supported = WAKE_MAGIC; + if (bp->flags & B44_FLAG_WOL_ENABLE) + wol->wolopts = WAKE_MAGIC; + else + wol->wolopts = 0; + memset(&wol->sopass, 0, sizeof(wol->sopass)); +} + +static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct b44 *bp = netdev_priv(dev); + + spin_lock_irq(&bp->lock); + if (wol->wolopts & WAKE_MAGIC) + bp->flags |= B44_FLAG_WOL_ENABLE; + else + bp->flags &= ~B44_FLAG_WOL_ENABLE; + spin_unlock_irq(&bp->lock); + + return 0; +} + +static const struct ethtool_ops b44_ethtool_ops = { + .get_drvinfo = b44_get_drvinfo, + .get_settings = b44_get_settings, + .set_settings = b44_set_settings, + .nway_reset = b44_nway_reset, + .get_link = ethtool_op_get_link, + .get_wol = b44_get_wol, + .set_wol = b44_set_wol, + .get_ringparam = b44_get_ringparam, + .set_ringparam = b44_set_ringparam, + .get_pauseparam = b44_get_pauseparam, + .set_pauseparam = b44_set_pauseparam, + .get_msglevel = b44_get_msglevel, + .set_msglevel = b44_set_msglevel, + .get_strings = b44_get_strings, + .get_sset_count = b44_get_sset_count, + .get_ethtool_stats = b44_get_ethtool_stats, +}; + +static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct mii_ioctl_data *data = if_mii(ifr); + struct b44 *bp = netdev_priv(dev); + int err = -EINVAL; + + if (!netif_running(dev)) + goto out; + + spin_lock_irq(&bp->lock); + err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL); + spin_unlock_irq(&bp->lock); +out: + return err; +} + +static int __devinit b44_get_invariants(struct b44 *bp) +{ + struct ssb_device *sdev = bp->sdev; + int err = 0; + u8 *addr; + + bp->dma_offset = ssb_dma_translation(sdev); + + if (sdev->bus->bustype == SSB_BUSTYPE_SSB && + instance > 1) { + addr = sdev->bus->sprom.et1mac; + bp->phy_addr = sdev->bus->sprom.et1phyaddr; + } else { + addr = sdev->bus->sprom.et0mac; + bp->phy_addr = sdev->bus->sprom.et0phyaddr; + } + /* Some ROMs have buggy PHY addresses with the high + * bits set (sign extension?). Truncate them to a + * valid PHY address. */ + bp->phy_addr &= 0x1F; + + memcpy(bp->dev->dev_addr, addr, 6); + + if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){ + pr_err("Invalid MAC address found in EEPROM\n"); + return -EINVAL; + } + + memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len); + + bp->imask = IMASK_DEF; + + /* XXX - really required? + bp->flags |= B44_FLAG_BUGGY_TXPTR; + */ + + if (bp->sdev->id.revision >= 7) + bp->flags |= B44_FLAG_B0_ANDLATER; + + return err; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) +static const struct net_device_ops b44_netdev_ops = { + .ndo_open = b44_open, + .ndo_stop = b44_close, + .ndo_start_xmit = b44_start_xmit, + .ndo_get_stats = b44_get_stats, + .ndo_set_multicast_list = b44_set_rx_mode, + .ndo_set_mac_address = b44_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = b44_ioctl, + .ndo_tx_timeout = b44_tx_timeout, + .ndo_change_mtu = b44_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = b44_poll_controller, +#endif +}; +#endif + +static int __devinit b44_init_one(struct ssb_device *sdev, + const struct ssb_device_id *ent) +{ + static int b44_version_printed = 0; + struct net_device *dev; + struct b44 *bp; + int err; + + instance++; + + if (b44_version_printed++ == 0) + pr_info("%s", version); + + + dev = alloc_etherdev(sizeof(*bp)); + if (!dev) { + dev_err(sdev->dev, "Etherdev alloc failed, aborting\n"); + err = -ENOMEM; + goto out; + } + + SET_NETDEV_DEV(dev, sdev->dev); + + /* No interesting netdevice features in this card... */ + dev->features |= 0; + + bp = netdev_priv(dev); + bp->sdev = sdev; + bp->dev = dev; + bp->force_copybreak = 0; + + bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE); + + spin_lock_init(&bp->lock); + + bp->rx_pending = B44_DEF_RX_RING_PENDING; + bp->tx_pending = B44_DEF_TX_RING_PENDING; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29)) + dev->open = b44_open; + dev->stop = b44_close; + dev->hard_start_xmit = b44_start_xmit; + dev->get_stats = b44_get_stats; + dev->set_multicast_list = b44_set_rx_mode; + dev->set_mac_address = b44_set_mac_addr; + dev->do_ioctl = b44_ioctl; + dev->tx_timeout = b44_tx_timeout; + netif_napi_add(dev, &bp->napi, b44_poll, 64); + dev->watchdog_timeo = B44_TX_TIMEOUT; +#ifdef CONFIG_NET_POLL_CONTROLLER + dev->poll_controller = b44_poll_controller; +#endif + dev->change_mtu = b44_change_mtu; +#else + dev->netdev_ops = &b44_netdev_ops; + netif_napi_add(dev, &bp->napi, b44_poll, 64); + dev->watchdog_timeo = B44_TX_TIMEOUT; +#endif + dev->irq = sdev->irq; + SET_ETHTOOL_OPS(dev, &b44_ethtool_ops); + + netif_carrier_off(dev); + + err = ssb_bus_powerup(sdev->bus, 0); + if (err) { + dev_err(sdev->dev, + "Failed to powerup the bus\n"); + goto err_out_free_dev; + } + err = ssb_dma_set_mask(sdev, DMA_BIT_MASK(30)); + if (err) { + dev_err(sdev->dev, + "Required 30BIT DMA mask unsupported by the system\n"); + goto err_out_powerdown; + } + err = b44_get_invariants(bp); + if (err) { + dev_err(sdev->dev, + "Problem fetching invariants of chip, aborting\n"); + goto err_out_powerdown; + } + + bp->mii_if.dev = dev; + bp->mii_if.mdio_read = b44_mii_read; + bp->mii_if.mdio_write = b44_mii_write; + bp->mii_if.phy_id = bp->phy_addr; + bp->mii_if.phy_id_mask = 0x1f; + bp->mii_if.reg_num_mask = 0x1f; + + /* By default, advertise all speed/duplex settings. */ + bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL | + B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL); + + /* By default, auto-negotiate PAUSE. */ + bp->flags |= B44_FLAG_PAUSE_AUTO; + + err = register_netdev(dev); + if (err) { + dev_err(sdev->dev, "Cannot register net device, aborting\n"); + goto err_out_powerdown; + } + + ssb_set_drvdata(sdev, dev); + + /* Chip reset provides power to the b44 MAC & PCI cores, which + * is necessary for MAC register access. + */ + b44_chip_reset(bp, B44_CHIP_RESET_FULL); + + /* do a phy reset to test if there is an active phy */ + if (b44_phy_reset(bp) < 0) + bp->phy_addr = B44_PHY_ADDR_NO_PHY; + + netdev_info(dev, "Broadcom 44xx/47xx 10/100BaseT Ethernet %pM\n", + dev->dev_addr); + + return 0; + +err_out_powerdown: + ssb_bus_may_powerdown(sdev->bus); + +err_out_free_dev: + free_netdev(dev); + +out: + return err; +} + +static void __devexit b44_remove_one(struct ssb_device *sdev) +{ + struct net_device *dev = ssb_get_drvdata(sdev); + + unregister_netdev(dev); + ssb_device_disable(sdev, 0); + ssb_bus_may_powerdown(sdev->bus); + free_netdev(dev); + ssb_pcihost_set_power_state(sdev, PCI_D3hot); + ssb_set_drvdata(sdev, NULL); +} + +static int b44_suspend(struct ssb_device *sdev, pm_message_t state) +{ + struct net_device *dev = ssb_get_drvdata(sdev); + struct b44 *bp = netdev_priv(dev); + + if (!netif_running(dev)) + return 0; + + del_timer_sync(&bp->timer); + + spin_lock_irq(&bp->lock); + + b44_halt(bp); + netif_carrier_off(bp->dev); + netif_device_detach(bp->dev); + b44_free_rings(bp); + + spin_unlock_irq(&bp->lock); + + free_irq(dev->irq, dev); + if (bp->flags & B44_FLAG_WOL_ENABLE) { + b44_init_hw(bp, B44_PARTIAL_RESET); + b44_setup_wol(bp); + } + + ssb_pcihost_set_power_state(sdev, PCI_D3hot); + return 0; +} + +static int b44_resume(struct ssb_device *sdev) +{ + struct net_device *dev = ssb_get_drvdata(sdev); + struct b44 *bp = netdev_priv(dev); + int rc = 0; + + rc = ssb_bus_powerup(sdev->bus, 0); + if (rc) { + dev_err(sdev->dev, + "Failed to powerup the bus\n"); + return rc; + } + + if (!netif_running(dev)) + return 0; + + rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev); + if (rc) { + netdev_err(dev, "request_irq failed\n"); + return rc; + } + + spin_lock_irq(&bp->lock); + + b44_init_rings(bp); + b44_init_hw(bp, B44_FULL_RESET); + netif_device_attach(bp->dev); + spin_unlock_irq(&bp->lock); + + b44_enable_ints(bp); + netif_wake_queue(dev); + + mod_timer(&bp->timer, jiffies + 1); + + return 0; +} + +static struct ssb_driver b44_ssb_driver = { + .name = DRV_MODULE_NAME, + .id_table = b44_ssb_tbl, + .probe = b44_init_one, + .remove = __devexit_p(b44_remove_one), + .suspend = b44_suspend, + .resume = b44_resume, +}; + +static inline int b44_pci_init(void) +{ + int err = 0; +#ifdef CONFIG_B44_PCI + err = ssb_pcihost_register(&b44_pci_driver); +#endif + return err; +} + +static inline void b44_pci_exit(void) +{ +#ifdef CONFIG_B44_PCI + ssb_pcihost_unregister(&b44_pci_driver); +#endif +} + +static int __init b44_init(void) +{ + unsigned int dma_desc_align_size = dma_get_cache_alignment(); + int err; + + /* Setup paramaters for syncing RX/TX DMA descriptors */ + dma_desc_align_mask = ~(dma_desc_align_size - 1); + dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc)); + + err = b44_pci_init(); + if (err) + return err; + err = ssb_driver_register(&b44_ssb_driver); + if (err) + b44_pci_exit(); + return err; +} + +static void __exit b44_cleanup(void) +{ + ssb_driver_unregister(&b44_ssb_driver); + b44_pci_exit(); +} + +module_init(b44_init); +module_exit(b44_cleanup); + diff --git a/drivers/net/b44.h b/drivers/net/b44.h new file mode 100644 index 0000000..e1905a4 --- /dev/null +++ b/drivers/net/b44.h @@ -0,0 +1,401 @@ +#ifndef _B44_H +#define _B44_H + +/* Register layout. (These correspond to struct _bcmenettregs in bcm4400.) */ +#define B44_DEVCTRL 0x0000UL /* Device Control */ +#define DEVCTRL_MPM 0x00000040 /* Magic Packet PME Enable (B0 only) */ +#define DEVCTRL_PFE 0x00000080 /* Pattern Filtering Enable */ +#define DEVCTRL_IPP 0x00000400 /* Internal EPHY Present */ +#define DEVCTRL_EPR 0x00008000 /* EPHY Reset */ +#define DEVCTRL_PME 0x00001000 /* PHY Mode Enable */ +#define DEVCTRL_PMCE 0x00002000 /* PHY Mode Clocks Enable */ +#define DEVCTRL_PADDR 0x0007c000 /* PHY Address */ +#define DEVCTRL_PADDR_SHIFT 18 +#define B44_BIST_STAT 0x000CUL /* Built-In Self-Test Status */ +#define B44_WKUP_LEN 0x0010UL /* Wakeup Length */ +#define WKUP_LEN_P0_MASK 0x0000007f /* Pattern 0 */ +#define WKUP_LEN_D0 0x00000080 +#define WKUP_LEN_P1_MASK 0x00007f00 /* Pattern 1 */ +#define WKUP_LEN_P1_SHIFT 8 +#define WKUP_LEN_D1 0x00008000 +#define WKUP_LEN_P2_MASK 0x007f0000 /* Pattern 2 */ +#define WKUP_LEN_P2_SHIFT 16 +#define WKUP_LEN_D2 0x00000000 +#define WKUP_LEN_P3_MASK 0x7f000000 /* Pattern 3 */ +#define WKUP_LEN_P3_SHIFT 24 +#define WKUP_LEN_D3 0x80000000 +#define WKUP_LEN_DISABLE 0x80808080 +#define WKUP_LEN_ENABLE_TWO 0x80800000 +#define WKUP_LEN_ENABLE_THREE 0x80000000 +#define B44_ISTAT 0x0020UL /* Interrupt Status */ +#define ISTAT_LS 0x00000020 /* Link Change (B0 only) */ +#define ISTAT_PME 0x00000040 /* Power Management Event */ +#define ISTAT_TO 0x00000080 /* General Purpose Timeout */ +#define ISTAT_DSCE 0x00000400 /* Descriptor Error */ +#define ISTAT_DATAE 0x00000800 /* Data Error */ +#define ISTAT_DPE 0x00001000 /* Descr. Protocol Error */ +#define ISTAT_RDU 0x00002000 /* Receive Descr. Underflow */ +#define ISTAT_RFO 0x00004000 /* Receive FIFO Overflow */ +#define ISTAT_TFU 0x00008000 /* Transmit FIFO Underflow */ +#define ISTAT_RX 0x00010000 /* RX Interrupt */ +#define ISTAT_TX 0x01000000 /* TX Interrupt */ +#define ISTAT_EMAC 0x04000000 /* EMAC Interrupt */ +#define ISTAT_MII_WRITE 0x08000000 /* MII Write Interrupt */ +#define ISTAT_MII_READ 0x10000000 /* MII Read Interrupt */ +#define ISTAT_ERRORS (ISTAT_DSCE|ISTAT_DATAE|ISTAT_DPE|ISTAT_RDU|ISTAT_RFO|ISTAT_TFU) +#define B44_IMASK 0x0024UL /* Interrupt Mask */ +#define IMASK_DEF (ISTAT_ERRORS | ISTAT_TO | ISTAT_RX | ISTAT_TX) +#define B44_GPTIMER 0x0028UL /* General Purpose Timer */ +#define B44_ADDR_LO 0x0088UL /* ENET Address Lo (B0 only) */ +#define B44_ADDR_HI 0x008CUL /* ENET Address Hi (B0 only) */ +#define B44_FILT_ADDR 0x0090UL /* ENET Filter Address */ +#define B44_FILT_DATA 0x0094UL /* ENET Filter Data */ +#define B44_TXBURST 0x00A0UL /* TX Max Burst Length */ +#define B44_RXBURST 0x00A4UL /* RX Max Burst Length */ +#define B44_MAC_CTRL 0x00A8UL /* MAC Control */ +#define MAC_CTRL_CRC32_ENAB 0x00000001 /* CRC32 Generation Enable */ +#define MAC_CTRL_PHY_PDOWN 0x00000004 /* Onchip EPHY Powerdown */ +#define MAC_CTRL_PHY_EDET 0x00000008 /* Onchip EPHY Energy Detected */ +#define MAC_CTRL_PHY_LEDCTRL 0x000000e0 /* Onchip EPHY LED Control */ +#define MAC_CTRL_PHY_LEDCTRL_SHIFT 5 +#define B44_MAC_FLOW 0x00ACUL /* MAC Flow Control */ +#define MAC_FLOW_RX_HI_WATER 0x000000ff /* Receive FIFO HI Water Mark */ +#define MAC_FLOW_PAUSE_ENAB 0x00008000 /* Enable Pause Frame Generation */ +#define B44_RCV_LAZY 0x0100UL /* Lazy Interrupt Control */ +#define RCV_LAZY_TO_MASK 0x00ffffff /* Timeout */ +#define RCV_LAZY_FC_MASK 0xff000000 /* Frame Count */ +#define RCV_LAZY_FC_SHIFT 24 +#define B44_DMATX_CTRL 0x0200UL /* DMA TX Control */ +#define DMATX_CTRL_ENABLE 0x00000001 /* Enable */ +#define DMATX_CTRL_SUSPEND 0x00000002 /* Suepend Request */ +#define DMATX_CTRL_LPBACK 0x00000004 /* Loopback Enable */ +#define DMATX_CTRL_FAIRPRIOR 0x00000008 /* Fair Priority */ +#define DMATX_CTRL_FLUSH 0x00000010 /* Flush Request */ +#define B44_DMATX_ADDR 0x0204UL /* DMA TX Descriptor Ring Address */ +#define B44_DMATX_PTR 0x0208UL /* DMA TX Last Posted Descriptor */ +#define B44_DMATX_STAT 0x020CUL /* DMA TX Current Active Desc. + Status */ +#define DMATX_STAT_CDMASK 0x00000fff /* Current Descriptor Mask */ +#define DMATX_STAT_SMASK 0x0000f000 /* State Mask */ +#define DMATX_STAT_SDISABLED 0x00000000 /* State Disabled */ +#define DMATX_STAT_SACTIVE 0x00001000 /* State Active */ +#define DMATX_STAT_SIDLE 0x00002000 /* State Idle Wait */ +#define DMATX_STAT_SSTOPPED 0x00003000 /* State Stopped */ +#define DMATX_STAT_SSUSP 0x00004000 /* State Suspend Pending */ +#define DMATX_STAT_EMASK 0x000f0000 /* Error Mask */ +#define DMATX_STAT_ENONE 0x00000000 /* Error None */ +#define DMATX_STAT_EDPE 0x00010000 /* Error Desc. Protocol Error */ +#define DMATX_STAT_EDFU 0x00020000 /* Error Data FIFO Underrun */ +#define DMATX_STAT_EBEBR 0x00030000 /* Error Bus Error on Buffer Read */ +#define DMATX_STAT_EBEDA 0x00040000 /* Error Bus Error on Desc. Access */ +#define DMATX_STAT_FLUSHED 0x00100000 /* Flushed */ +#define B44_DMARX_CTRL 0x0210UL /* DMA RX Control */ +#define DMARX_CTRL_ENABLE 0x00000001 /* Enable */ +#define DMARX_CTRL_ROMASK 0x000000fe /* Receive Offset Mask */ +#define DMARX_CTRL_ROSHIFT 1 /* Receive Offset Shift */ +#define B44_DMARX_ADDR 0x0214UL /* DMA RX Descriptor Ring Address */ +#define B44_DMARX_PTR 0x0218UL /* DMA RX Last Posted Descriptor */ +#define B44_DMARX_STAT 0x021CUL /* DMA RX Current Active Desc. + Status */ +#define DMARX_STAT_CDMASK 0x00000fff /* Current Descriptor Mask */ +#define DMARX_STAT_SMASK 0x0000f000 /* State Mask */ +#define DMARX_STAT_SDISABLED 0x00000000 /* State Disabled */ +#define DMARX_STAT_SACTIVE 0x00001000 /* State Active */ +#define DMARX_STAT_SIDLE 0x00002000 /* State Idle Wait */ +#define DMARX_STAT_SSTOPPED 0x00003000 /* State Stopped */ +#define DMARX_STAT_EMASK 0x000f0000 /* Error Mask */ +#define DMARX_STAT_ENONE 0x00000000 /* Error None */ +#define DMARX_STAT_EDPE 0x00010000 /* Error Desc. Protocol Error */ +#define DMARX_STAT_EDFO 0x00020000 /* Error Data FIFO Overflow */ +#define DMARX_STAT_EBEBW 0x00030000 /* Error Bus Error on Buffer Write */ +#define DMARX_STAT_EBEDA 0x00040000 /* Error Bus Error on Desc. Access */ +#define B44_DMAFIFO_AD 0x0220UL /* DMA FIFO Diag Address */ +#define DMAFIFO_AD_OMASK 0x0000ffff /* Offset Mask */ +#define DMAFIFO_AD_SMASK 0x000f0000 /* Select Mask */ +#define DMAFIFO_AD_SXDD 0x00000000 /* Select Transmit DMA Data */ +#define DMAFIFO_AD_SXDP 0x00010000 /* Select Transmit DMA Pointers */ +#define DMAFIFO_AD_SRDD 0x00040000 /* Select Receive DMA Data */ +#define DMAFIFO_AD_SRDP 0x00050000 /* Select Receive DMA Pointers */ +#define DMAFIFO_AD_SXFD 0x00080000 /* Select Transmit FIFO Data */ +#define DMAFIFO_AD_SXFP 0x00090000 /* Select Transmit FIFO Pointers */ +#define DMAFIFO_AD_SRFD 0x000c0000 /* Select Receive FIFO Data */ +#define DMAFIFO_AD_SRFP 0x000c0000 /* Select Receive FIFO Pointers */ +#define B44_DMAFIFO_LO 0x0224UL /* DMA FIFO Diag Low Data */ +#define B44_DMAFIFO_HI 0x0228UL /* DMA FIFO Diag High Data */ +#define B44_RXCONFIG 0x0400UL /* EMAC RX Config */ +#define RXCONFIG_DBCAST 0x00000001 /* Disable Broadcast */ +#define RXCONFIG_ALLMULTI 0x00000002 /* Accept All Multicast */ +#define RXCONFIG_NORX_WHILE_TX 0x00000004 /* Receive Disable While Transmitting */ +#define RXCONFIG_PROMISC 0x00000008 /* Promiscuous Enable */ +#define RXCONFIG_LPBACK 0x00000010 /* Loopback Enable */ +#define RXCONFIG_FLOW 0x00000020 /* Flow Control Enable */ +#define RXCONFIG_FLOW_ACCEPT 0x00000040 /* Accept Unicast Flow Control Frame */ +#define RXCONFIG_RFILT 0x00000080 /* Reject Filter */ +#define RXCONFIG_CAM_ABSENT 0x00000100 /* CAM Absent */ +#define B44_RXMAXLEN 0x0404UL /* EMAC RX Max Packet Length */ +#define B44_TXMAXLEN 0x0408UL /* EMAC TX Max Packet Length */ +#define B44_MDIO_CTRL 0x0410UL /* EMAC MDIO Control */ +#define MDIO_CTRL_MAXF_MASK 0x0000007f /* MDC Frequency */ +#define MDIO_CTRL_PREAMBLE 0x00000080 /* MII Preamble Enable */ +#define B44_MDIO_DATA 0x0414UL /* EMAC MDIO Data */ +#define MDIO_DATA_DATA 0x0000ffff /* R/W Data */ +#define MDIO_DATA_TA_MASK 0x00030000 /* Turnaround Value */ +#define MDIO_DATA_TA_SHIFT 16 +#define MDIO_TA_VALID 2 +#define MDIO_DATA_RA_MASK 0x007c0000 /* Register Address */ +#define MDIO_DATA_RA_SHIFT 18 +#define MDIO_DATA_PMD_MASK 0x0f800000 /* Physical Media Device */ +#define MDIO_DATA_PMD_SHIFT 23 +#define MDIO_DATA_OP_MASK 0x30000000 /* Opcode */ +#define MDIO_DATA_OP_SHIFT 28 +#define MDIO_OP_WRITE 1 +#define MDIO_OP_READ 2 +#define MDIO_DATA_SB_MASK 0xc0000000 /* Start Bits */ +#define MDIO_DATA_SB_SHIFT 30 +#define MDIO_DATA_SB_START 0x40000000 /* Start Of Frame */ +#define B44_EMAC_IMASK 0x0418UL /* EMAC Interrupt Mask */ +#define B44_EMAC_ISTAT 0x041CUL /* EMAC Interrupt Status */ +#define EMAC_INT_MII 0x00000001 /* MII MDIO Interrupt */ +#define EMAC_INT_MIB 0x00000002 /* MIB Interrupt */ +#define EMAC_INT_FLOW 0x00000003 /* Flow Control Interrupt */ +#define B44_CAM_DATA_LO 0x0420UL /* EMAC CAM Data Low */ +#define B44_CAM_DATA_HI 0x0424UL /* EMAC CAM Data High */ +#define CAM_DATA_HI_VALID 0x00010000 /* Valid Bit */ +#define B44_CAM_CTRL 0x0428UL /* EMAC CAM Control */ +#define CAM_CTRL_ENABLE 0x00000001 /* CAM Enable */ +#define CAM_CTRL_MSEL 0x00000002 /* Mask Select */ +#define CAM_CTRL_READ 0x00000004 /* Read */ +#define CAM_CTRL_WRITE 0x00000008 /* Read */ +#define CAM_CTRL_INDEX_MASK 0x003f0000 /* Index Mask */ +#define CAM_CTRL_INDEX_SHIFT 16 +#define CAM_CTRL_BUSY 0x80000000 /* CAM Busy */ +#define B44_ENET_CTRL 0x042CUL /* EMAC ENET Control */ +#define ENET_CTRL_ENABLE 0x00000001 /* EMAC Enable */ +#define ENET_CTRL_DISABLE 0x00000002 /* EMAC Disable */ +#define ENET_CTRL_SRST 0x00000004 /* EMAC Soft Reset */ +#define ENET_CTRL_EPSEL 0x00000008 /* External PHY Select */ +#define B44_TX_CTRL 0x0430UL /* EMAC TX Control */ +#define TX_CTRL_DUPLEX 0x00000001 /* Full Duplex */ +#define TX_CTRL_FMODE 0x00000002 /* Flow Mode */ +#define TX_CTRL_SBENAB 0x00000004 /* Single Backoff Enable */ +#define TX_CTRL_SMALL_SLOT 0x00000008 /* Small Slottime */ +#define B44_TX_WMARK 0x0434UL /* EMAC TX Watermark */ +#define B44_MIB_CTRL 0x0438UL /* EMAC MIB Control */ +#define MIB_CTRL_CLR_ON_READ 0x00000001 /* Autoclear on Read */ +#define B44_TX_GOOD_O 0x0500UL /* MIB TX Good Octets */ +#define B44_TX_GOOD_P 0x0504UL /* MIB TX Good Packets */ +#define B44_TX_O 0x0508UL /* MIB TX Octets */ +#define B44_TX_P 0x050CUL /* MIB TX Packets */ +#define B44_TX_BCAST 0x0510UL /* MIB TX Broadcast Packets */ +#define B44_TX_MCAST 0x0514UL /* MIB TX Multicast Packets */ +#define B44_TX_64 0x0518UL /* MIB TX <= 64 byte Packets */ +#define B44_TX_65_127 0x051CUL /* MIB TX 65 to 127 byte Packets */ +#define B44_TX_128_255 0x0520UL /* MIB TX 128 to 255 byte Packets */ +#define B44_TX_256_511 0x0524UL /* MIB TX 256 to 511 byte Packets */ +#define B44_TX_512_1023 0x0528UL /* MIB TX 512 to 1023 byte Packets */ +#define B44_TX_1024_MAX 0x052CUL /* MIB TX 1024 to max byte Packets */ +#define B44_TX_JABBER 0x0530UL /* MIB TX Jabber Packets */ +#define B44_TX_OSIZE 0x0534UL /* MIB TX Oversize Packets */ +#define B44_TX_FRAG 0x0538UL /* MIB TX Fragment Packets */ +#define B44_TX_URUNS 0x053CUL /* MIB TX Underruns */ +#define B44_TX_TCOLS 0x0540UL /* MIB TX Total Collisions */ +#define B44_TX_SCOLS 0x0544UL /* MIB TX Single Collisions */ +#define B44_TX_MCOLS 0x0548UL /* MIB TX Multiple Collisions */ +#define B44_TX_ECOLS 0x054CUL /* MIB TX Excessive Collisions */ +#define B44_TX_LCOLS 0x0550UL /* MIB TX Late Collisions */ +#define B44_TX_DEFERED 0x0554UL /* MIB TX Defered Packets */ +#define B44_TX_CLOST 0x0558UL /* MIB TX Carrier Lost */ +#define B44_TX_PAUSE 0x055CUL /* MIB TX Pause Packets */ +#define B44_RX_GOOD_O 0x0580UL /* MIB RX Good Octets */ +#define B44_RX_GOOD_P 0x0584UL /* MIB RX Good Packets */ +#define B44_RX_O 0x0588UL /* MIB RX Octets */ +#define B44_RX_P 0x058CUL /* MIB RX Packets */ +#define B44_RX_BCAST 0x0590UL /* MIB RX Broadcast Packets */ +#define B44_RX_MCAST 0x0594UL /* MIB RX Multicast Packets */ +#define B44_RX_64 0x0598UL /* MIB RX <= 64 byte Packets */ +#define B44_RX_65_127 0x059CUL /* MIB RX 65 to 127 byte Packets */ +#define B44_RX_128_255 0x05A0UL /* MIB RX 128 to 255 byte Packets */ +#define B44_RX_256_511 0x05A4UL /* MIB RX 256 to 511 byte Packets */ +#define B44_RX_512_1023 0x05A8UL /* MIB RX 512 to 1023 byte Packets */ +#define B44_RX_1024_MAX 0x05ACUL /* MIB RX 1024 to max byte Packets */ +#define B44_RX_JABBER 0x05B0UL /* MIB RX Jabber Packets */ +#define B44_RX_OSIZE 0x05B4UL /* MIB RX Oversize Packets */ +#define B44_RX_FRAG 0x05B8UL /* MIB RX Fragment Packets */ +#define B44_RX_MISS 0x05BCUL /* MIB RX Missed Packets */ +#define B44_RX_CRCA 0x05C0UL /* MIB RX CRC Align Errors */ +#define B44_RX_USIZE 0x05C4UL /* MIB RX Undersize Packets */ +#define B44_RX_CRC 0x05C8UL /* MIB RX CRC Errors */ +#define B44_RX_ALIGN 0x05CCUL /* MIB RX Align Errors */ +#define B44_RX_SYM 0x05D0UL /* MIB RX Symbol Errors */ +#define B44_RX_PAUSE 0x05D4UL /* MIB RX Pause Packets */ +#define B44_RX_NPAUSE 0x05D8UL /* MIB RX Non-Pause Packets */ + +/* 4400 PHY registers */ +#define B44_MII_AUXCTRL 24 /* Auxiliary Control */ +#define MII_AUXCTRL_DUPLEX 0x0001 /* Full Duplex */ +#define MII_AUXCTRL_SPEED 0x0002 /* 1=100Mbps, 0=10Mbps */ +#define MII_AUXCTRL_FORCED 0x0004 /* Forced 10/100 */ +#define B44_MII_ALEDCTRL 26 /* Activity LED */ +#define MII_ALEDCTRL_ALLMSK 0x7fff +#define B44_MII_TLEDCTRL 27 /* Traffic Meter LED */ +#define MII_TLEDCTRL_ENABLE 0x0040 + +struct dma_desc { + __le32 ctrl; + __le32 addr; +}; + +/* There are only 12 bits in the DMA engine for descriptor offsetting + * so the table must be aligned on a boundary of this. + */ +#define DMA_TABLE_BYTES 4096 + +#define DESC_CTRL_LEN 0x00001fff +#define DESC_CTRL_CMASK 0x0ff00000 /* Core specific bits */ +#define DESC_CTRL_EOT 0x10000000 /* End of Table */ +#define DESC_CTRL_IOC 0x20000000 /* Interrupt On Completion */ +#define DESC_CTRL_EOF 0x40000000 /* End of Frame */ +#define DESC_CTRL_SOF 0x80000000 /* Start of Frame */ + +#define RX_COPY_THRESHOLD 256 + +struct rx_header { + __le16 len; + __le16 flags; + __le16 pad[12]; +}; +#define RX_HEADER_LEN 28 + +#define RX_FLAG_OFIFO 0x00000001 /* FIFO Overflow */ +#define RX_FLAG_CRCERR 0x00000002 /* CRC Error */ +#define RX_FLAG_SERR 0x00000004 /* Receive Symbol Error */ +#define RX_FLAG_ODD 0x00000008 /* Frame has odd number of nibbles */ +#define RX_FLAG_LARGE 0x00000010 /* Frame is > RX MAX Length */ +#define RX_FLAG_MCAST 0x00000020 /* Dest is Multicast Address */ +#define RX_FLAG_BCAST 0x00000040 /* Dest is Broadcast Address */ +#define RX_FLAG_MISS 0x00000080 /* Received due to promisc mode */ +#define RX_FLAG_LAST 0x00000800 /* Last buffer in frame */ +#define RX_FLAG_ERRORS (RX_FLAG_ODD | RX_FLAG_SERR | RX_FLAG_CRCERR | RX_FLAG_OFIFO) + +struct ring_info { + struct sk_buff *skb; + dma_addr_t mapping; +}; + +#define B44_MCAST_TABLE_SIZE 32 +#define B44_PHY_ADDR_NO_PHY 30 +#define B44_MDC_RATIO 5000000 + +#define B44_STAT_REG_DECLARE \ + _B44(tx_good_octets) \ + _B44(tx_good_pkts) \ + _B44(tx_octets) \ + _B44(tx_pkts) \ + _B44(tx_broadcast_pkts) \ + _B44(tx_multicast_pkts) \ + _B44(tx_len_64) \ + _B44(tx_len_65_to_127) \ + _B44(tx_len_128_to_255) \ + _B44(tx_len_256_to_511) \ + _B44(tx_len_512_to_1023) \ + _B44(tx_len_1024_to_max) \ + _B44(tx_jabber_pkts) \ + _B44(tx_oversize_pkts) \ + _B44(tx_fragment_pkts) \ + _B44(tx_underruns) \ + _B44(tx_total_cols) \ + _B44(tx_single_cols) \ + _B44(tx_multiple_cols) \ + _B44(tx_excessive_cols) \ + _B44(tx_late_cols) \ + _B44(tx_defered) \ + _B44(tx_carrier_lost) \ + _B44(tx_pause_pkts) \ + _B44(rx_good_octets) \ + _B44(rx_good_pkts) \ + _B44(rx_octets) \ + _B44(rx_pkts) \ + _B44(rx_broadcast_pkts) \ + _B44(rx_multicast_pkts) \ + _B44(rx_len_64) \ + _B44(rx_len_65_to_127) \ + _B44(rx_len_128_to_255) \ + _B44(rx_len_256_to_511) \ + _B44(rx_len_512_to_1023) \ + _B44(rx_len_1024_to_max) \ + _B44(rx_jabber_pkts) \ + _B44(rx_oversize_pkts) \ + _B44(rx_fragment_pkts) \ + _B44(rx_missed_pkts) \ + _B44(rx_crc_align_errs) \ + _B44(rx_undersize) \ + _B44(rx_crc_errs) \ + _B44(rx_align_errs) \ + _B44(rx_symbol_errs) \ + _B44(rx_pause_pkts) \ + _B44(rx_nonpause_pkts) + +/* SW copy of device statistics, kept up to date by periodic timer + * which probes HW values. Check b44_stats_update if you mess with + * the layout + */ +struct b44_hw_stats { +#define _B44(x) u32 x; +B44_STAT_REG_DECLARE +#undef _B44 +}; + +struct ssb_device; + +struct b44 { + spinlock_t lock; + + u32 imask, istat; + + struct dma_desc *rx_ring, *tx_ring; + + u32 tx_prod, tx_cons; + u32 rx_prod, rx_cons; + + struct ring_info *rx_buffers; + struct ring_info *tx_buffers; + + struct napi_struct napi; + + u32 dma_offset; + u32 flags; +#define B44_FLAG_B0_ANDLATER 0x00000001 +#define B44_FLAG_BUGGY_TXPTR 0x00000002 +#define B44_FLAG_REORDER_BUG 0x00000004 +#define B44_FLAG_PAUSE_AUTO 0x00008000 +#define B44_FLAG_FULL_DUPLEX 0x00010000 +#define B44_FLAG_100_BASE_T 0x00020000 +#define B44_FLAG_TX_PAUSE 0x00040000 +#define B44_FLAG_RX_PAUSE 0x00080000 +#define B44_FLAG_FORCE_LINK 0x00100000 +#define B44_FLAG_ADV_10HALF 0x01000000 +#define B44_FLAG_ADV_10FULL 0x02000000 +#define B44_FLAG_ADV_100HALF 0x04000000 +#define B44_FLAG_ADV_100FULL 0x08000000 +#define B44_FLAG_INTERNAL_PHY 0x10000000 +#define B44_FLAG_RX_RING_HACK 0x20000000 +#define B44_FLAG_TX_RING_HACK 0x40000000 +#define B44_FLAG_WOL_ENABLE 0x80000000 + + u32 msg_enable; + + struct timer_list timer; + + struct b44_hw_stats hw_stats; + + struct ssb_device *sdev; + struct net_device *dev; + + dma_addr_t rx_ring_dma, tx_ring_dma; + + u32 rx_pending; + u32 tx_pending; + u8 phy_addr; + u8 force_copybreak; + struct mii_if_info mii_if; +}; + +#endif /* _B44_H */ diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile new file mode 100644 index 0000000..7d96e2e --- /dev/null +++ b/drivers/net/usb/Makefile @@ -0,0 +1,8 @@ +# +# Makefile for USB Network drivers +# + +obj-$(CONFIG_USB_NET_COMPAT_CDCETHER) += cdc_ether.o +obj-$(CONFIG_USB_NET_COMPAT_RNDIS_HOST) += rndis_host.o +obj-$(CONFIG_USB_COMPAT_USBNET) += usbnet.o + diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c new file mode 100644 index 0000000..22be769 --- /dev/null +++ b/drivers/net/usb/cdc_ether.c @@ -0,0 +1,653 @@ +/* + * CDC Ethernet based networking peripherals + * Copyright (C) 2003-2005 by David Brownell + * Copyright (C) 2006 by Ole Andre Vadla Ravnas (ActiveSync) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +// #define DEBUG // error path messages, extra info +// #define VERBOSE // more; success messages + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#if defined(CONFIG_USB_NET_COMPAT_RNDIS_HOST) || defined(CONFIG_USB_NET_COMPAT_RNDIS_HOST_MODULE) + +static int is_rndis(struct usb_interface_descriptor *desc) +{ + return (desc->bInterfaceClass == USB_CLASS_COMM && + desc->bInterfaceSubClass == 2 && + desc->bInterfaceProtocol == 0xff); +} + +static int is_activesync(struct usb_interface_descriptor *desc) +{ + return (desc->bInterfaceClass == USB_CLASS_MISC && + desc->bInterfaceSubClass == 1 && + desc->bInterfaceProtocol == 1); +} + +static int is_wireless_rndis(struct usb_interface_descriptor *desc) +{ + return (desc->bInterfaceClass == USB_CLASS_WIRELESS_CONTROLLER && + desc->bInterfaceSubClass == 1 && + desc->bInterfaceProtocol == 3); +} + +#else + +#define is_rndis(desc) 0 +#define is_activesync(desc) 0 +#define is_wireless_rndis(desc) 0 + +#endif + +/* + * probes control interface, claims data interface, collects the bulk + * endpoints, activates data interface (if needed), maybe sets MTU. + * all pure cdc, except for certain firmware workarounds, and knowing + * that rndis uses one different rule. + */ +int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf) +{ + u8 *buf = intf->cur_altsetting->extra; + int len = intf->cur_altsetting->extralen; + struct usb_interface_descriptor *d; + struct cdc_state *info = (void *) &dev->data; + int status; + int rndis; + struct usb_driver *driver = driver_of(intf); + + if (sizeof dev->data < sizeof *info) + return -EDOM; + + /* expect strict spec conformance for the descriptors, but + * cope with firmware which stores them in the wrong place + */ + if (len == 0 && dev->udev->actconfig->extralen) { + /* Motorola SB4100 (and others: Brad Hards says it's + * from a Broadcom design) put CDC descriptors here + */ + buf = dev->udev->actconfig->extra; + len = dev->udev->actconfig->extralen; + if (len) + dev_dbg(&intf->dev, + "CDC descriptors on config\n"); + } + + /* Maybe CDC descriptors are after the endpoint? This bug has + * been seen on some 2Wire Inc RNDIS-ish products. + */ + if (len == 0) { + struct usb_host_endpoint *hep; + + hep = intf->cur_altsetting->endpoint; + if (hep) { + buf = hep->extra; + len = hep->extralen; + } + if (len) + dev_dbg(&intf->dev, + "CDC descriptors on endpoint\n"); + } + + /* this assumes that if there's a non-RNDIS vendor variant + * of cdc-acm, it'll fail RNDIS requests cleanly. + */ + rndis = (is_rndis(&intf->cur_altsetting->desc) || + is_activesync(&intf->cur_altsetting->desc) || + is_wireless_rndis(&intf->cur_altsetting->desc)); + + memset(info, 0, sizeof *info); + info->control = intf; + while (len > 3) { + if (buf [1] != USB_DT_CS_INTERFACE) + goto next_desc; + + /* use bDescriptorSubType to identify the CDC descriptors. + * We expect devices with CDC header and union descriptors. + * For CDC Ethernet we need the ethernet descriptor. + * For RNDIS, ignore two (pointless) CDC modem descriptors + * in favor of a complicated OID-based RPC scheme doing what + * CDC Ethernet achieves with a simple descriptor. + */ + switch (buf [2]) { + case USB_CDC_HEADER_TYPE: + if (info->header) { + dev_dbg(&intf->dev, "extra CDC header\n"); + goto bad_desc; + } + info->header = (void *) buf; + if (info->header->bLength != sizeof *info->header) { + dev_dbg(&intf->dev, "CDC header len %u\n", + info->header->bLength); + goto bad_desc; + } + break; + case USB_CDC_ACM_TYPE: + /* paranoia: disambiguate a "real" vendor-specific + * modem interface from an RNDIS non-modem. + */ + if (rndis) { + struct usb_cdc_acm_descriptor *acm; + + acm = (void *) buf; + if (acm->bmCapabilities) { + dev_dbg(&intf->dev, + "ACM capabilities %02x, " + "not really RNDIS?\n", + acm->bmCapabilities); + goto bad_desc; + } + } + break; + case USB_CDC_UNION_TYPE: + if (info->u) { + dev_dbg(&intf->dev, "extra CDC union\n"); + goto bad_desc; + } + info->u = (void *) buf; + if (info->u->bLength != sizeof *info->u) { + dev_dbg(&intf->dev, "CDC union len %u\n", + info->u->bLength); + goto bad_desc; + } + + /* we need a master/control interface (what we're + * probed with) and a slave/data interface; union + * descriptors sort this all out. + */ + info->control = usb_ifnum_to_if(dev->udev, + info->u->bMasterInterface0); + info->data = usb_ifnum_to_if(dev->udev, + info->u->bSlaveInterface0); + if (!info->control || !info->data) { + dev_dbg(&intf->dev, + "master #%u/%p slave #%u/%p\n", + info->u->bMasterInterface0, + info->control, + info->u->bSlaveInterface0, + info->data); + goto bad_desc; + } + if (info->control != intf) { + dev_dbg(&intf->dev, "bogus CDC Union\n"); + /* Ambit USB Cable Modem (and maybe others) + * interchanges master and slave interface. + */ + if (info->data == intf) { + info->data = info->control; + info->control = intf; + } else + goto bad_desc; + } + + /* a data interface altsetting does the real i/o */ + d = &info->data->cur_altsetting->desc; + if (d->bInterfaceClass != USB_CLASS_CDC_DATA) { + dev_dbg(&intf->dev, "slave class %u\n", + d->bInterfaceClass); + goto bad_desc; + } + break; + case USB_CDC_ETHERNET_TYPE: + if (info->ether) { + dev_dbg(&intf->dev, "extra CDC ether\n"); + goto bad_desc; + } + info->ether = (void *) buf; + if (info->ether->bLength != sizeof *info->ether) { + dev_dbg(&intf->dev, "CDC ether len %u\n", + info->ether->bLength); + goto bad_desc; + } + dev->hard_mtu = le16_to_cpu( + info->ether->wMaxSegmentSize); + /* because of Zaurus, we may be ignoring the host + * side link address we were given. + */ + break; + } +next_desc: + len -= buf [0]; /* bLength */ + buf += buf [0]; + } + + /* Microsoft ActiveSync based and some regular RNDIS devices lack the + * CDC descriptors, so we'll hard-wire the interfaces and not check + * for descriptors. + */ + if (rndis && !info->u) { + info->control = usb_ifnum_to_if(dev->udev, 0); + info->data = usb_ifnum_to_if(dev->udev, 1); + if (!info->control || !info->data) { + dev_dbg(&intf->dev, + "rndis: master #0/%p slave #1/%p\n", + info->control, + info->data); + goto bad_desc; + } + + } else if (!info->header || !info->u || (!rndis && !info->ether)) { + dev_dbg(&intf->dev, "missing cdc %s%s%sdescriptor\n", + info->header ? "" : "header ", + info->u ? "" : "union ", + info->ether ? "" : "ether "); + goto bad_desc; + } + + /* claim data interface and set it up ... with side effects. + * network traffic can't flow until an altsetting is enabled. + */ + status = usb_driver_claim_interface(driver, info->data, dev); + if (status < 0) + return status; + status = usbnet_get_endpoints(dev, info->data); + if (status < 0) { + /* ensure immediate exit from usbnet_disconnect */ + usb_set_intfdata(info->data, NULL); + usb_driver_release_interface(driver, info->data); + return status; + } + + /* status endpoint: optional for CDC Ethernet, not RNDIS (or ACM) */ + dev->status = NULL; + if (info->control->cur_altsetting->desc.bNumEndpoints == 1) { + struct usb_endpoint_descriptor *desc; + + dev->status = &info->control->cur_altsetting->endpoint [0]; + desc = &dev->status->desc; + if (!usb_endpoint_is_int_in(desc) || + (le16_to_cpu(desc->wMaxPacketSize) + < sizeof(struct usb_cdc_notification)) || + !desc->bInterval) { + dev_dbg(&intf->dev, "bad notification endpoint\n"); + dev->status = NULL; + } + } + if (rndis && !dev->status) { + dev_dbg(&intf->dev, "missing RNDIS status endpoint\n"); + usb_set_intfdata(info->data, NULL); + usb_driver_release_interface(driver, info->data); + return -ENODEV; + } + return 0; + +bad_desc: + dev_info(&dev->udev->dev, "bad CDC descriptors\n"); + return -ENODEV; +} +EXPORT_SYMBOL_GPL(usbnet_generic_cdc_bind); + +void usbnet_cdc_unbind(struct usbnet *dev, struct usb_interface *intf) +{ + struct cdc_state *info = (void *) &dev->data; + struct usb_driver *driver = driver_of(intf); + + /* disconnect master --> disconnect slave */ + if (intf == info->control && info->data) { + /* ensure immediate exit from usbnet_disconnect */ + usb_set_intfdata(info->data, NULL); + usb_driver_release_interface(driver, info->data); + info->data = NULL; + } + + /* and vice versa (just in case) */ + else if (intf == info->data && info->control) { + /* ensure immediate exit from usbnet_disconnect */ + usb_set_intfdata(info->control, NULL); + usb_driver_release_interface(driver, info->control); + info->control = NULL; + } +} +EXPORT_SYMBOL_GPL(usbnet_cdc_unbind); + +/*------------------------------------------------------------------------- + * + * Communications Device Class, Ethernet Control model + * + * Takes two interfaces. The DATA interface is inactive till an altsetting + * is selected. Configuration data includes class descriptors. There's + * an optional status endpoint on the control interface. + * + * This should interop with whatever the 2.4 "CDCEther.c" driver + * (by Brad Hards) talked with, with more functionality. + * + *-------------------------------------------------------------------------*/ + +static void dumpspeed(struct usbnet *dev, __le32 *speeds) +{ + netif_info(dev, timer, dev->net, + "link speeds: %u kbps up, %u kbps down\n", + __le32_to_cpu(speeds[0]) / 1000, + __le32_to_cpu(speeds[1]) / 1000); +} + +static void cdc_status(struct usbnet *dev, struct urb *urb) +{ + struct usb_cdc_notification *event; + + if (urb->actual_length < sizeof *event) + return; + + /* SPEED_CHANGE can get split into two 8-byte packets */ + if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) { + dumpspeed(dev, (__le32 *) urb->transfer_buffer); + return; + } + + event = urb->transfer_buffer; + switch (event->bNotificationType) { + case USB_CDC_NOTIFY_NETWORK_CONNECTION: + netif_dbg(dev, timer, dev->net, "CDC: carrier %s\n", + event->wValue ? "on" : "off"); + if (event->wValue) + netif_carrier_on(dev->net); + else + netif_carrier_off(dev->net); + break; + case USB_CDC_NOTIFY_SPEED_CHANGE: /* tx/rx rates */ + netif_dbg(dev, timer, dev->net, "CDC: speed change (len %d)\n", + urb->actual_length); + if (urb->actual_length != (sizeof *event + 8)) + set_bit(EVENT_STS_SPLIT, &dev->flags); + else + dumpspeed(dev, (__le32 *) &event[1]); + break; + /* USB_CDC_NOTIFY_RESPONSE_AVAILABLE can happen too (e.g. RNDIS), + * but there are no standard formats for the response data. + */ + default: + netdev_err(dev->net, "CDC: unexpected notification %02x!\n", + event->bNotificationType); + break; + } +} + +static int cdc_bind(struct usbnet *dev, struct usb_interface *intf) +{ + int status; + struct cdc_state *info = (void *) &dev->data; + + status = usbnet_generic_cdc_bind(dev, intf); + if (status < 0) + return status; + + status = usbnet_get_ethernet_addr(dev, info->ether->iMACAddress); + if (status < 0) { + usb_set_intfdata(info->data, NULL); + usb_driver_release_interface(driver_of(intf), info->data); + return status; + } + + /* FIXME cdc-ether has some multicast code too, though it complains + * in routine cases. info->ether describes the multicast support. + * Implement that here, manipulating the cdc filter as needed. + */ + return 0; +} + +static int cdc_manage_power(struct usbnet *dev, int on) +{ + dev->intf->needs_remote_wakeup = on; + return 0; +} + +static const struct driver_info cdc_info = { + .description = "CDC Ethernet Device", + .flags = FLAG_ETHER, + // .check_connect = cdc_check_connect, + .bind = cdc_bind, + .unbind = usbnet_cdc_unbind, + .status = cdc_status, + .manage_power = cdc_manage_power, +}; + +static const struct driver_info mbm_info = { + .description = "Mobile Broadband Network Device", + .flags = FLAG_WWAN, + .bind = cdc_bind, + .unbind = usbnet_cdc_unbind, + .status = cdc_status, +}; + +/*-------------------------------------------------------------------------*/ + + +static const struct usb_device_id products [] = { +/* + * BLACKLIST !! + * + * First blacklist any products that are egregiously nonconformant + * with the CDC Ethernet specs. Minor braindamage we cope with; when + * they're not even trying, needing a separate driver is only the first + * of the differences to show up. + */ + +#define ZAURUS_MASTER_INTERFACE \ + .bInterfaceClass = USB_CLASS_COMM, \ + .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \ + .bInterfaceProtocol = USB_CDC_PROTO_NONE + +/* SA-1100 based Sharp Zaurus ("collie"), or compatible; + * wire-incompatible with true CDC Ethernet implementations. + * (And, it seems, needlessly so...) + */ +{ + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, + .idProduct = 0x8004, + ZAURUS_MASTER_INTERFACE, + .driver_info = 0, +}, + +/* PXA-25x based Sharp Zaurii. Note that it seems some of these + * (later models especially) may have shipped only with firmware + * advertising false "CDC MDLM" compatibility ... but we're not + * clear which models did that, so for now let's assume the worst. + */ +{ + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, + .idProduct = 0x8005, /* A-300 */ + ZAURUS_MASTER_INTERFACE, + .driver_info = 0, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, + .idProduct = 0x8006, /* B-500/SL-5600 */ + ZAURUS_MASTER_INTERFACE, + .driver_info = 0, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, + .idProduct = 0x8007, /* C-700 */ + ZAURUS_MASTER_INTERFACE, + .driver_info = 0, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, + .idProduct = 0x9031, /* C-750 C-760 */ + ZAURUS_MASTER_INTERFACE, + .driver_info = 0, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, + .idProduct = 0x9032, /* SL-6000 */ + ZAURUS_MASTER_INTERFACE, + .driver_info = 0, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, + /* reported with some C860 units */ + .idProduct = 0x9050, /* C-860 */ + ZAURUS_MASTER_INTERFACE, + .driver_info = 0, +}, + +/* Olympus has some models with a Zaurus-compatible option. + * R-1000 uses a FreeScale i.MXL cpu (ARMv4T) + */ +{ + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x07B4, + .idProduct = 0x0F02, /* R-1000 */ + ZAURUS_MASTER_INTERFACE, + .driver_info = 0, +}, + +/* + * WHITELIST!!! + * + * CDC Ether uses two interfaces, not necessarily consecutive. + * We match the main interface, ignoring the optional device + * class so we could handle devices that aren't exclusively + * CDC ether. + * + * NOTE: this match must come AFTER entries blacklisting devices + * because of bugs/quirks in a given product (like Zaurus, above). + */ +{ + USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, + USB_CDC_PROTO_NONE), + .driver_info = (unsigned long) &cdc_info, +}, { + /* Ericsson F3507g */ + USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1900, USB_CLASS_COMM, + USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long) &mbm_info, +}, { + /* Ericsson F3507g ver. 2 */ + USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1902, USB_CLASS_COMM, + USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long) &mbm_info, +}, { + /* Ericsson F3607gw */ + USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1904, USB_CLASS_COMM, + USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long) &mbm_info, +}, { + /* Ericsson F3607gw ver 2 */ + USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1905, USB_CLASS_COMM, + USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long) &mbm_info, +}, { + /* Ericsson F3607gw ver 3 */ + USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1906, USB_CLASS_COMM, + USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long) &mbm_info, +}, { + /* Ericsson F3307 */ + USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x190a, USB_CLASS_COMM, + USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long) &mbm_info, +}, { + /* Ericsson F3307 ver 2 */ + USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1909, USB_CLASS_COMM, + USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long) &mbm_info, +}, { + /* Ericsson C3607w */ + USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1049, USB_CLASS_COMM, + USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long) &mbm_info, +}, { + /* Ericsson C3607w ver 2 */ + USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x190b, USB_CLASS_COMM, + USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long) &mbm_info, +}, { + /* Toshiba F3507g */ + USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM, + USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long) &mbm_info, +}, { + /* Toshiba F3607gw */ + USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130c, USB_CLASS_COMM, + USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long) &mbm_info, +}, { + /* Toshiba F3607gw ver 2 */ + USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x1311, USB_CLASS_COMM, + USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long) &mbm_info, +}, { + /* Dell F3507g */ + USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8147, USB_CLASS_COMM, + USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long) &mbm_info, +}, { + /* Dell F3607gw */ + USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8183, USB_CLASS_COMM, + USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long) &mbm_info, +}, { + /* Dell F3607gw ver 2 */ + USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8184, USB_CLASS_COMM, + USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long) &mbm_info, +}, + { }, // END +}; +MODULE_DEVICE_TABLE(usb, products); + +static struct usb_driver cdc_driver = { + .name = "cdc_ether", + .id_table = products, + .probe = usbnet_probe, + .disconnect = usbnet_disconnect, + .suspend = usbnet_suspend, + .resume = usbnet_resume, + .reset_resume = usbnet_resume, + .supports_autosuspend = 1, +}; + + +static int __init cdc_init(void) +{ + BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) + < sizeof(struct cdc_state))); + + return usb_register(&cdc_driver); +} +module_init(cdc_init); + +static void __exit cdc_exit(void) +{ + usb_deregister(&cdc_driver); +} +module_exit(cdc_exit); + +MODULE_AUTHOR("David Brownell"); +MODULE_DESCRIPTION("USB CDC Ethernet devices"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c new file mode 100644 index 0000000..21a8b68 --- /dev/null +++ b/drivers/net/usb/rndis_host.c @@ -0,0 +1,632 @@ +/* + * Host Side support for RNDIS Networking Links + * Copyright (C) 2005 by David Brownell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* + * RNDIS is NDIS remoted over USB. It's a MSFT variant of CDC ACM ... of + * course ACM was intended for modems, not Ethernet links! USB's standard + * for Ethernet links is "CDC Ethernet", which is significantly simpler. + * + * NOTE that Microsoft's "RNDIS 1.0" specification is incomplete. Issues + * include: + * - Power management in particular relies on information that's scattered + * through other documentation, and which is incomplete or incorrect even + * there. + * - There are various undocumented protocol requirements, such as the + * need to send unused garbage in control-OUT messages. + * - In some cases, MS-Windows will emit undocumented requests; this + * matters more to peripheral implementations than host ones. + * + * Moreover there's a no-open-specs variant of RNDIS called "ActiveSync". + * + * For these reasons and others, ** USE OF RNDIS IS STRONGLY DISCOURAGED ** in + * favor of such non-proprietary alternatives as CDC Ethernet or the newer (and + * currently rare) "Ethernet Emulation Model" (EEM). + */ + +/* + * RNDIS notifications from device: command completion; "reverse" + * keepalives; etc + */ +void rndis_status(struct usbnet *dev, struct urb *urb) +{ + netdev_dbg(dev->net, "rndis status urb, len %d stat %d\n", + urb->actual_length, urb->status); + // FIXME for keepalives, respond immediately (asynchronously) + // if not an RNDIS status, do like cdc_status(dev,urb) does +} +EXPORT_SYMBOL_GPL(rndis_status); + +/* + * RNDIS indicate messages. + */ +static void rndis_msg_indicate(struct usbnet *dev, struct rndis_indicate *msg, + int buflen) +{ + struct cdc_state *info = (void *)&dev->data; + struct device *udev = &info->control->dev; + + if (dev->driver_info->indication) { + dev->driver_info->indication(dev, msg, buflen); + } else { + switch (msg->status) { + case RNDIS_STATUS_MEDIA_CONNECT: + dev_info(udev, "rndis media connect\n"); + break; + case RNDIS_STATUS_MEDIA_DISCONNECT: + dev_info(udev, "rndis media disconnect\n"); + break; + default: + dev_info(udev, "rndis indication: 0x%08x\n", + le32_to_cpu(msg->status)); + } + } +} + +/* + * RPC done RNDIS-style. Caller guarantees: + * - message is properly byteswapped + * - there's no other request pending + * - buf can hold up to 1KB response (required by RNDIS spec) + * On return, the first few entries are already byteswapped. + * + * Call context is likely probe(), before interface name is known, + * which is why we won't try to use it in the diagnostics. + */ +int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen) +{ + struct cdc_state *info = (void *) &dev->data; + int master_ifnum; + int retval; + unsigned count; + __le32 rsp; + u32 xid = 0, msg_len, request_id; + + /* REVISIT when this gets called from contexts other than probe() or + * disconnect(): either serialize, or dispatch responses on xid + */ + + /* Issue the request; xid is unique, don't bother byteswapping it */ + if (likely(buf->msg_type != RNDIS_MSG_HALT && + buf->msg_type != RNDIS_MSG_RESET)) { + xid = dev->xid++; + if (!xid) + xid = dev->xid++; + buf->request_id = (__force __le32) xid; + } + master_ifnum = info->control->cur_altsetting->desc.bInterfaceNumber; + retval = usb_control_msg(dev->udev, + usb_sndctrlpipe(dev->udev, 0), + USB_CDC_SEND_ENCAPSULATED_COMMAND, + USB_TYPE_CLASS | USB_RECIP_INTERFACE, + 0, master_ifnum, + buf, le32_to_cpu(buf->msg_len), + RNDIS_CONTROL_TIMEOUT_MS); + if (unlikely(retval < 0 || xid == 0)) + return retval; + + // FIXME Seems like some devices discard responses when + // we time out and cancel our "get response" requests... + // so, this is fragile. Probably need to poll for status. + + /* ignore status endpoint, just poll the control channel; + * the request probably completed immediately + */ + rsp = buf->msg_type | RNDIS_MSG_COMPLETION; + for (count = 0; count < 10; count++) { + memset(buf, 0, CONTROL_BUFFER_SIZE); + retval = usb_control_msg(dev->udev, + usb_rcvctrlpipe(dev->udev, 0), + USB_CDC_GET_ENCAPSULATED_RESPONSE, + USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, + 0, master_ifnum, + buf, buflen, + RNDIS_CONTROL_TIMEOUT_MS); + if (likely(retval >= 8)) { + msg_len = le32_to_cpu(buf->msg_len); + request_id = (__force u32) buf->request_id; + if (likely(buf->msg_type == rsp)) { + if (likely(request_id == xid)) { + if (unlikely(rsp == RNDIS_MSG_RESET_C)) + return 0; + if (likely(RNDIS_STATUS_SUCCESS + == buf->status)) + return 0; + dev_dbg(&info->control->dev, + "rndis reply status %08x\n", + le32_to_cpu(buf->status)); + return -EL3RST; + } + dev_dbg(&info->control->dev, + "rndis reply id %d expected %d\n", + request_id, xid); + /* then likely retry */ + } else switch (buf->msg_type) { + case RNDIS_MSG_INDICATE: /* fault/event */ + rndis_msg_indicate(dev, (void *)buf, buflen); + + break; + case RNDIS_MSG_KEEPALIVE: { /* ping */ + struct rndis_keepalive_c *msg = (void *)buf; + + msg->msg_type = RNDIS_MSG_KEEPALIVE_C; + msg->msg_len = cpu_to_le32(sizeof *msg); + msg->status = RNDIS_STATUS_SUCCESS; + retval = usb_control_msg(dev->udev, + usb_sndctrlpipe(dev->udev, 0), + USB_CDC_SEND_ENCAPSULATED_COMMAND, + USB_TYPE_CLASS | USB_RECIP_INTERFACE, + 0, master_ifnum, + msg, sizeof *msg, + RNDIS_CONTROL_TIMEOUT_MS); + if (unlikely(retval < 0)) + dev_dbg(&info->control->dev, + "rndis keepalive err %d\n", + retval); + } + break; + default: + dev_dbg(&info->control->dev, + "unexpected rndis msg %08x len %d\n", + le32_to_cpu(buf->msg_type), msg_len); + } + } else { + /* device probably issued a protocol stall; ignore */ + dev_dbg(&info->control->dev, + "rndis response error, code %d\n", retval); + } + msleep(20); + } + dev_dbg(&info->control->dev, "rndis response timeout\n"); + return -ETIMEDOUT; +} +EXPORT_SYMBOL_GPL(rndis_command); + +/* + * rndis_query: + * + * Performs a query for @oid along with 0 or more bytes of payload as + * specified by @in_len. If @reply_len is not set to -1 then the reply + * length is checked against this value, resulting in an error if it + * doesn't match. + * + * NOTE: Adding a payload exactly or greater than the size of the expected + * response payload is an evident requirement MSFT added for ActiveSync. + * + * The only exception is for OIDs that return a variably sized response, + * in which case no payload should be added. This undocumented (and + * nonsensical!) issue was found by sniffing protocol requests from the + * ActiveSync 4.1 Windows driver. + */ +static int rndis_query(struct usbnet *dev, struct usb_interface *intf, + void *buf, __le32 oid, u32 in_len, + void **reply, int *reply_len) +{ + int retval; + union { + void *buf; + struct rndis_msg_hdr *header; + struct rndis_query *get; + struct rndis_query_c *get_c; + } u; + u32 off, len; + + u.buf = buf; + + memset(u.get, 0, sizeof *u.get + in_len); + u.get->msg_type = RNDIS_MSG_QUERY; + u.get->msg_len = cpu_to_le32(sizeof *u.get + in_len); + u.get->oid = oid; + u.get->len = cpu_to_le32(in_len); + u.get->offset = cpu_to_le32(20); + + retval = rndis_command(dev, u.header, CONTROL_BUFFER_SIZE); + if (unlikely(retval < 0)) { + dev_err(&intf->dev, "RNDIS_MSG_QUERY(0x%08x) failed, %d\n", + oid, retval); + return retval; + } + + off = le32_to_cpu(u.get_c->offset); + len = le32_to_cpu(u.get_c->len); + if (unlikely((8 + off + len) > CONTROL_BUFFER_SIZE)) + goto response_error; + + if (*reply_len != -1 && len != *reply_len) + goto response_error; + + *reply = (unsigned char *) &u.get_c->request_id + off; + *reply_len = len; + + return retval; + +response_error: + dev_err(&intf->dev, "RNDIS_MSG_QUERY(0x%08x) " + "invalid response - off %d len %d\n", + oid, off, len); + return -EDOM; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) +/* same as usbnet_netdev_ops but MTU change not allowed */ +static const struct net_device_ops rndis_netdev_ops = { + .ndo_open = usbnet_open, + .ndo_stop = usbnet_stop, + .ndo_start_xmit = usbnet_start_xmit, + .ndo_tx_timeout = usbnet_tx_timeout, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; +#endif + +int +generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags) +{ + int retval; + struct net_device *net = dev->net; + struct cdc_state *info = (void *) &dev->data; + union { + void *buf; + struct rndis_msg_hdr *header; + struct rndis_init *init; + struct rndis_init_c *init_c; + struct rndis_query *get; + struct rndis_query_c *get_c; + struct rndis_set *set; + struct rndis_set_c *set_c; + struct rndis_halt *halt; + } u; + u32 tmp; + __le32 phym_unspec, *phym; + int reply_len; + unsigned char *bp; + + /* we can't rely on i/o from stack working, or stack allocation */ + u.buf = kmalloc(CONTROL_BUFFER_SIZE, GFP_KERNEL); + if (!u.buf) + return -ENOMEM; + retval = usbnet_generic_cdc_bind(dev, intf); + if (retval < 0) + goto fail; + + u.init->msg_type = RNDIS_MSG_INIT; + u.init->msg_len = cpu_to_le32(sizeof *u.init); + u.init->major_version = cpu_to_le32(1); + u.init->minor_version = cpu_to_le32(0); + + /* max transfer (in spec) is 0x4000 at full speed, but for + * TX we'll stick to one Ethernet packet plus RNDIS framing. + * For RX we handle drivers that zero-pad to end-of-packet. + * Don't let userspace change these settings. + * + * NOTE: there still seems to be wierdness here, as if we need + * to do some more things to make sure WinCE targets accept this. + * They default to jumbograms of 8KB or 16KB, which is absurd + * for such low data rates and which is also more than Linux + * can usually expect to allocate for SKB data... + */ + net->hard_header_len += sizeof (struct rndis_data_hdr); + dev->hard_mtu = net->mtu + net->hard_header_len; + + dev->maxpacket = usb_maxpacket(dev->udev, dev->out, 1); + if (dev->maxpacket == 0) { + netif_dbg(dev, probe, dev->net, + "dev->maxpacket can't be 0\n"); + retval = -EINVAL; + goto fail_and_release; + } + + dev->rx_urb_size = dev->hard_mtu + (dev->maxpacket + 1); + dev->rx_urb_size &= ~(dev->maxpacket - 1); + u.init->max_transfer_size = cpu_to_le32(dev->rx_urb_size); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + net->netdev_ops = &rndis_netdev_ops; +#else + net->change_mtu = NULL; +#endif + + retval = rndis_command(dev, u.header, CONTROL_BUFFER_SIZE); + if (unlikely(retval < 0)) { + /* it might not even be an RNDIS device!! */ + dev_err(&intf->dev, "RNDIS init failed, %d\n", retval); + goto fail_and_release; + } + tmp = le32_to_cpu(u.init_c->max_transfer_size); + if (tmp < dev->hard_mtu) { + if (tmp <= net->hard_header_len) { + dev_err(&intf->dev, + "dev can't take %u byte packets (max %u)\n", + dev->hard_mtu, tmp); + retval = -EINVAL; + goto halt_fail_and_release; + } + dev_warn(&intf->dev, + "dev can't take %u byte packets (max %u), " + "adjusting MTU to %u\n", + dev->hard_mtu, tmp, tmp - net->hard_header_len); + dev->hard_mtu = tmp; + net->mtu = dev->hard_mtu - net->hard_header_len; + } + + /* REVISIT: peripheral "alignment" request is ignored ... */ + dev_dbg(&intf->dev, + "hard mtu %u (%u from dev), rx buflen %Zu, align %d\n", + dev->hard_mtu, tmp, dev->rx_urb_size, + 1 << le32_to_cpu(u.init_c->packet_alignment)); + + /* module has some device initialization code needs to be done right + * after RNDIS_INIT */ + if (dev->driver_info->early_init && + dev->driver_info->early_init(dev) != 0) + goto halt_fail_and_release; + + /* Check physical medium */ + phym = NULL; + reply_len = sizeof *phym; + retval = rndis_query(dev, intf, u.buf, OID_GEN_PHYSICAL_MEDIUM, + 0, (void **) &phym, &reply_len); + if (retval != 0 || !phym) { + /* OID is optional so don't fail here. */ + phym_unspec = RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED; + phym = &phym_unspec; + } + if ((flags & FLAG_RNDIS_PHYM_WIRELESS) && + *phym != RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN) { + netif_dbg(dev, probe, dev->net, + "driver requires wireless physical medium, but device is not\n"); + retval = -ENODEV; + goto halt_fail_and_release; + } + if ((flags & FLAG_RNDIS_PHYM_NOT_WIRELESS) && + *phym == RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN) { + netif_dbg(dev, probe, dev->net, + "driver requires non-wireless physical medium, but device is wireless.\n"); + retval = -ENODEV; + goto halt_fail_and_release; + } + + /* Get designated host ethernet address */ + reply_len = ETH_ALEN; + retval = rndis_query(dev, intf, u.buf, OID_802_3_PERMANENT_ADDRESS, + 48, (void **) &bp, &reply_len); + if (unlikely(retval< 0)) { + dev_err(&intf->dev, "rndis get ethaddr, %d\n", retval); + goto halt_fail_and_release; + } + memcpy(net->dev_addr, bp, ETH_ALEN); + memcpy(net->perm_addr, bp, ETH_ALEN); + + /* set a nonzero filter to enable data transfers */ + memset(u.set, 0, sizeof *u.set); + u.set->msg_type = RNDIS_MSG_SET; + u.set->msg_len = cpu_to_le32(4 + sizeof *u.set); + u.set->oid = OID_GEN_CURRENT_PACKET_FILTER; + u.set->len = cpu_to_le32(4); + u.set->offset = cpu_to_le32((sizeof *u.set) - 8); + *(__le32 *)(u.buf + sizeof *u.set) = RNDIS_DEFAULT_FILTER; + + retval = rndis_command(dev, u.header, CONTROL_BUFFER_SIZE); + if (unlikely(retval < 0)) { + dev_err(&intf->dev, "rndis set packet filter, %d\n", retval); + goto halt_fail_and_release; + } + + retval = 0; + + kfree(u.buf); + return retval; + +halt_fail_and_release: + memset(u.halt, 0, sizeof *u.halt); + u.halt->msg_type = RNDIS_MSG_HALT; + u.halt->msg_len = cpu_to_le32(sizeof *u.halt); + (void) rndis_command(dev, (void *)u.halt, CONTROL_BUFFER_SIZE); +fail_and_release: + usb_set_intfdata(info->data, NULL); + usb_driver_release_interface(driver_of(intf), info->data); + info->data = NULL; +fail: + kfree(u.buf); + return retval; +} +EXPORT_SYMBOL_GPL(generic_rndis_bind); + +static int rndis_bind(struct usbnet *dev, struct usb_interface *intf) +{ + return generic_rndis_bind(dev, intf, FLAG_RNDIS_PHYM_NOT_WIRELESS); +} + +void rndis_unbind(struct usbnet *dev, struct usb_interface *intf) +{ + struct rndis_halt *halt; + + /* try to clear any rndis state/activity (no i/o from stack!) */ + halt = kzalloc(CONTROL_BUFFER_SIZE, GFP_KERNEL); + if (halt) { + halt->msg_type = RNDIS_MSG_HALT; + halt->msg_len = cpu_to_le32(sizeof *halt); + (void) rndis_command(dev, (void *)halt, CONTROL_BUFFER_SIZE); + kfree(halt); + } + + usbnet_cdc_unbind(dev, intf); +} +EXPORT_SYMBOL_GPL(rndis_unbind); + +/* + * DATA -- host must not write zlps + */ +int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb) +{ + /* peripheral may have batched packets to us... */ + while (likely(skb->len)) { + struct rndis_data_hdr *hdr = (void *)skb->data; + struct sk_buff *skb2; + u32 msg_len, data_offset, data_len; + + msg_len = le32_to_cpu(hdr->msg_len); + data_offset = le32_to_cpu(hdr->data_offset); + data_len = le32_to_cpu(hdr->data_len); + + /* don't choke if we see oob, per-packet data, etc */ + if (unlikely(hdr->msg_type != RNDIS_MSG_PACKET || + skb->len < msg_len || + (data_offset + data_len + 8) > msg_len)) { + dev->net->stats.rx_frame_errors++; + netdev_dbg(dev->net, "bad rndis message %d/%d/%d/%d, len %d\n", + le32_to_cpu(hdr->msg_type), + msg_len, data_offset, data_len, skb->len); + return 0; + } + skb_pull(skb, 8 + data_offset); + + /* at most one packet left? */ + if (likely((data_len - skb->len) <= sizeof *hdr)) { + skb_trim(skb, data_len); + break; + } + + /* try to return all the packets in the batch */ + skb2 = skb_clone(skb, GFP_ATOMIC); + if (unlikely(!skb2)) + break; + skb_pull(skb, msg_len - sizeof *hdr); + skb_trim(skb2, data_len); + usbnet_skb_return(dev, skb2); + } + + /* caller will usbnet_skb_return the remaining packet */ + return 1; +} +EXPORT_SYMBOL_GPL(rndis_rx_fixup); + +struct sk_buff * +rndis_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) +{ + struct rndis_data_hdr *hdr; + struct sk_buff *skb2; + unsigned len = skb->len; + + if (likely(!skb_cloned(skb))) { + int room = skb_headroom(skb); + + /* enough head room as-is? */ + if (unlikely((sizeof *hdr) <= room)) + goto fill; + + /* enough room, but needs to be readjusted? */ + room += skb_tailroom(skb); + if (likely((sizeof *hdr) <= room)) { + skb->data = memmove(skb->head + sizeof *hdr, + skb->data, len); + skb_set_tail_pointer(skb, len); + goto fill; + } + } + + /* create a new skb, with the correct size (and tailpad) */ + skb2 = skb_copy_expand(skb, sizeof *hdr, 1, flags); + dev_kfree_skb_any(skb); + if (unlikely(!skb2)) + return skb2; + skb = skb2; + + /* fill out the RNDIS header. we won't bother trying to batch + * packets; Linux minimizes wasted bandwidth through tx queues. + */ +fill: + hdr = (void *) __skb_push(skb, sizeof *hdr); + memset(hdr, 0, sizeof *hdr); + hdr->msg_type = RNDIS_MSG_PACKET; + hdr->msg_len = cpu_to_le32(skb->len); + hdr->data_offset = cpu_to_le32(sizeof(*hdr) - 8); + hdr->data_len = cpu_to_le32(len); + + /* FIXME make the last packet always be short ... */ + return skb; +} +EXPORT_SYMBOL_GPL(rndis_tx_fixup); + + +static const struct driver_info rndis_info = { + .description = "RNDIS device", + .flags = FLAG_ETHER | FLAG_FRAMING_RN | FLAG_NO_SETINT, + .bind = rndis_bind, + .unbind = rndis_unbind, + .status = rndis_status, + .rx_fixup = rndis_rx_fixup, + .tx_fixup = rndis_tx_fixup, +}; + +/*-------------------------------------------------------------------------*/ + +static const struct usb_device_id products [] = { +{ + /* RNDIS is MSFT's un-official variant of CDC ACM */ + USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff), + .driver_info = (unsigned long) &rndis_info, +}, { + /* "ActiveSync" is an undocumented variant of RNDIS, used in WM5 */ + USB_INTERFACE_INFO(USB_CLASS_MISC, 1, 1), + .driver_info = (unsigned long) &rndis_info, +}, { + /* RNDIS for tethering */ + USB_INTERFACE_INFO(USB_CLASS_WIRELESS_CONTROLLER, 1, 3), + .driver_info = (unsigned long) &rndis_info, +}, + { }, // END +}; +MODULE_DEVICE_TABLE(usb, products); + +static struct usb_driver rndis_driver = { + .name = "rndis_host", + .id_table = products, + .probe = usbnet_probe, + .disconnect = usbnet_disconnect, + .suspend = usbnet_suspend, + .resume = usbnet_resume, +}; + +static int __init rndis_init(void) +{ + return usb_register(&rndis_driver); +} +module_init(rndis_init); + +static void __exit rndis_exit(void) +{ + usb_deregister(&rndis_driver); +} +module_exit(rndis_exit); + +MODULE_AUTHOR("David Brownell"); +MODULE_DESCRIPTION("USB Host side RNDIS driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c new file mode 100644 index 0000000..6fb3e6d --- /dev/null +++ b/drivers/net/usb/usbnet.c @@ -0,0 +1,1523 @@ +/* + * USB Network driver infrastructure + * Copyright (C) 2000-2005 by David Brownell + * Copyright (C) 2003-2005 David Hollis + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/* + * This is a generic "USB networking" framework that works with several + * kinds of full and high speed networking devices: host-to-host cables, + * smart usb peripherals, and actual Ethernet adapters. + * + * These devices usually differ in terms of control protocols (if they + * even have one!) and sometimes they define new framing to wrap or batch + * Ethernet packets. Otherwise, they talk to USB pretty much the same, + * so interface (un)binding, endpoint I/O queues, fault handling, and other + * issues can usefully be addressed by this framework. + */ + +// #define DEBUG // error path messages, extra info +// #define VERBOSE // more; success messages + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_VERSION "22-Aug-2005" + + +/*-------------------------------------------------------------------------*/ + +/* + * Nineteen USB 1.1 max size bulk transactions per frame (ms), max. + * Several dozen bytes of IPv4 data can fit in two such transactions. + * One maximum size Ethernet packet takes twenty four of them. + * For high speed, each frame comfortably fits almost 36 max size + * Ethernet packets (so queues should be bigger). + * + * REVISIT qlens should be members of 'struct usbnet'; the goal is to + * let the USB host controller be busy for 5msec or more before an irq + * is required, under load. Jumbograms change the equation. + */ +#define RX_MAX_QUEUE_MEMORY (60 * 1518) +#define RX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \ + (RX_MAX_QUEUE_MEMORY/(dev)->rx_urb_size) : 4) +#define TX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \ + (RX_MAX_QUEUE_MEMORY/(dev)->hard_mtu) : 4) + +// reawaken network queue this soon after stopping; else watchdog barks +#define TX_TIMEOUT_JIFFIES (5*HZ) + +// throttle rx/tx briefly after some faults, so khubd might disconnect() +// us (it polls at HZ/4 usually) before we report too many false errors. +#define THROTTLE_JIFFIES (HZ/8) + +// between wakeups +#define UNLINK_TIMEOUT_MS 3 + +/*-------------------------------------------------------------------------*/ + +// randomly generated ethernet address +static u8 node_id [ETH_ALEN]; + +static const char driver_name [] = "usbnet"; + +/* use ethtool to change the level for any given device */ +static int msg_level = -1; +module_param (msg_level, int, 0); +MODULE_PARM_DESC (msg_level, "Override default message level"); + +/*-------------------------------------------------------------------------*/ + +/* handles CDC Ethernet and many other network "bulk data" interfaces */ +int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf) +{ + int tmp; + struct usb_host_interface *alt = NULL; + struct usb_host_endpoint *in = NULL, *out = NULL; + struct usb_host_endpoint *status = NULL; + + for (tmp = 0; tmp < intf->num_altsetting; tmp++) { + unsigned ep; + + in = out = status = NULL; + alt = intf->altsetting + tmp; + + /* take the first altsetting with in-bulk + out-bulk; + * remember any status endpoint, just in case; + * ignore other endpoints and altsetttings. + */ + for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) { + struct usb_host_endpoint *e; + int intr = 0; + + e = alt->endpoint + ep; + switch (e->desc.bmAttributes) { + case USB_ENDPOINT_XFER_INT: + if (!usb_endpoint_dir_in(&e->desc)) + continue; + intr = 1; + /* FALLTHROUGH */ + case USB_ENDPOINT_XFER_BULK: + break; + default: + continue; + } + if (usb_endpoint_dir_in(&e->desc)) { + if (!intr && !in) + in = e; + else if (intr && !status) + status = e; + } else { + if (!out) + out = e; + } + } + if (in && out) + break; + } + if (!alt || !in || !out) + return -EINVAL; + + if (alt->desc.bAlternateSetting != 0 || + !(dev->driver_info->flags & FLAG_NO_SETINT)) { + tmp = usb_set_interface (dev->udev, alt->desc.bInterfaceNumber, + alt->desc.bAlternateSetting); + if (tmp < 0) + return tmp; + } + + dev->in = usb_rcvbulkpipe (dev->udev, + in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); + dev->out = usb_sndbulkpipe (dev->udev, + out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); + dev->status = status; + return 0; +} +EXPORT_SYMBOL_GPL(usbnet_get_endpoints); + +static u8 nibble(unsigned char c) +{ + if (likely(isdigit(c))) + return c - '0'; + c = toupper(c); + if (likely(isxdigit(c))) + return 10 + c - 'A'; + return 0; +} + +int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress) +{ + int tmp, i; + unsigned char buf [13]; + + tmp = usb_string(dev->udev, iMACAddress, buf, sizeof buf); + if (tmp != 12) { + dev_dbg(&dev->udev->dev, + "bad MAC string %d fetch, %d\n", iMACAddress, tmp); + if (tmp >= 0) + tmp = -EINVAL; + return tmp; + } + for (i = tmp = 0; i < 6; i++, tmp += 2) + dev->net->dev_addr [i] = + (nibble(buf [tmp]) << 4) + nibble(buf [tmp + 1]); + return 0; +} +EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr); + +static void intr_complete (struct urb *urb); + +static int init_status (struct usbnet *dev, struct usb_interface *intf) +{ + char *buf = NULL; + unsigned pipe = 0; + unsigned maxp; + unsigned period; + + if (!dev->driver_info->status) + return 0; + + pipe = usb_rcvintpipe (dev->udev, + dev->status->desc.bEndpointAddress + & USB_ENDPOINT_NUMBER_MASK); + maxp = usb_maxpacket (dev->udev, pipe, 0); + + /* avoid 1 msec chatter: min 8 msec poll rate */ + period = max ((int) dev->status->desc.bInterval, + (dev->udev->speed == USB_SPEED_HIGH) ? 7 : 3); + + buf = kmalloc (maxp, GFP_KERNEL); + if (buf) { + dev->interrupt = usb_alloc_urb (0, GFP_KERNEL); + if (!dev->interrupt) { + kfree (buf); + return -ENOMEM; + } else { + usb_fill_int_urb(dev->interrupt, dev->udev, pipe, + buf, maxp, intr_complete, dev, period); + dev_dbg(&intf->dev, + "status ep%din, %d bytes period %d\n", + usb_pipeendpoint(pipe), maxp, period); + } + } + return 0; +} + +/* Passes this packet up the stack, updating its accounting. + * Some link protocols batch packets, so their rx_fixup paths + * can return clones as well as just modify the original skb. + */ +void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb) +{ + int status; + + if (test_bit(EVENT_RX_PAUSED, &dev->flags)) { + skb_queue_tail(&dev->rxq_pause, skb); + return; + } + + skb->protocol = eth_type_trans (skb, dev->net); + dev->net->stats.rx_packets++; + dev->net->stats.rx_bytes += skb->len; + + netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n", + skb->len + sizeof (struct ethhdr), skb->protocol); + memset (skb->cb, 0, sizeof (struct skb_data)); + status = netif_rx (skb); + if (status != NET_RX_SUCCESS) + netif_dbg(dev, rx_err, dev->net, + "netif_rx status %d\n", status); +} +EXPORT_SYMBOL_GPL(usbnet_skb_return); + + +/*------------------------------------------------------------------------- + * + * Network Device Driver (peer link to "Host Device", from USB host) + * + *-------------------------------------------------------------------------*/ + +int usbnet_change_mtu (struct net_device *net, int new_mtu) +{ + struct usbnet *dev = netdev_priv(net); + int ll_mtu = new_mtu + net->hard_header_len; + int old_hard_mtu = dev->hard_mtu; + int old_rx_urb_size = dev->rx_urb_size; + + if (new_mtu <= 0) + return -EINVAL; + // no second zero-length packet read wanted after mtu-sized packets + if ((ll_mtu % dev->maxpacket) == 0) + return -EDOM; + net->mtu = new_mtu; + + dev->hard_mtu = net->mtu + net->hard_header_len; + if (dev->rx_urb_size == old_hard_mtu) { + dev->rx_urb_size = dev->hard_mtu; + if (dev->rx_urb_size > old_rx_urb_size) + usbnet_unlink_rx_urbs(dev); + } + + return 0; +} +EXPORT_SYMBOL_GPL(usbnet_change_mtu); + +/*-------------------------------------------------------------------------*/ + +/* some LK 2.4 HCDs oopsed if we freed or resubmitted urbs from + * completion callbacks. 2.5 should have fixed those bugs... + */ + +static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_head *list) +{ + unsigned long flags; + + spin_lock_irqsave(&list->lock, flags); + __skb_unlink(skb, list); + spin_unlock(&list->lock); + spin_lock(&dev->done.lock); + __skb_queue_tail(&dev->done, skb); + if (dev->done.qlen == 1) + tasklet_schedule(&dev->bh); + spin_unlock_irqrestore(&dev->done.lock, flags); +} + +/* some work can't be done in tasklets, so we use keventd + * + * NOTE: annoying asymmetry: if it's active, schedule_work() fails, + * but tasklet_schedule() doesn't. hope the failure is rare. + */ +void usbnet_defer_kevent (struct usbnet *dev, int work) +{ + set_bit (work, &dev->flags); + if (!schedule_work (&dev->kevent)) + netdev_err(dev->net, "kevent %d may have been dropped\n", work); + else + netdev_dbg(dev->net, "kevent %d scheduled\n", work); +} +EXPORT_SYMBOL_GPL(usbnet_defer_kevent); + +/*-------------------------------------------------------------------------*/ + +static void rx_complete (struct urb *urb); + +static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) +{ + struct sk_buff *skb; + struct skb_data *entry; + int retval = 0; + unsigned long lockflags; + size_t size = dev->rx_urb_size; + + if ((skb = alloc_skb (size + NET_IP_ALIGN, flags)) == NULL) { + netif_dbg(dev, rx_err, dev->net, "no rx skb\n"); + usbnet_defer_kevent (dev, EVENT_RX_MEMORY); + usb_free_urb (urb); + return; + } + skb_reserve (skb, NET_IP_ALIGN); + + entry = (struct skb_data *) skb->cb; + entry->urb = urb; + entry->dev = dev; + entry->state = rx_start; + entry->length = 0; + + usb_fill_bulk_urb (urb, dev->udev, dev->in, + skb->data, size, rx_complete, skb); + + spin_lock_irqsave (&dev->rxq.lock, lockflags); + + if (netif_running (dev->net) && + netif_device_present (dev->net) && + !test_bit (EVENT_RX_HALT, &dev->flags) && + !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) { + switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) { + case -EPIPE: + usbnet_defer_kevent (dev, EVENT_RX_HALT); + break; + case -ENOMEM: + usbnet_defer_kevent (dev, EVENT_RX_MEMORY); + break; + case -ENODEV: + netif_dbg(dev, ifdown, dev->net, "device gone\n"); + netif_device_detach (dev->net); + break; + default: + netif_dbg(dev, rx_err, dev->net, + "rx submit, %d\n", retval); + tasklet_schedule (&dev->bh); + break; + case 0: + __skb_queue_tail (&dev->rxq, skb); + } + } else { + netif_dbg(dev, ifdown, dev->net, "rx: stopped\n"); + retval = -ENOLINK; + } + spin_unlock_irqrestore (&dev->rxq.lock, lockflags); + if (retval) { + dev_kfree_skb_any (skb); + usb_free_urb (urb); + } +} + + +/*-------------------------------------------------------------------------*/ + +static inline void rx_process (struct usbnet *dev, struct sk_buff *skb) +{ + if (dev->driver_info->rx_fixup && + !dev->driver_info->rx_fixup (dev, skb)) + goto error; + // else network stack removes extra byte if we forced a short packet + + if (skb->len) + usbnet_skb_return (dev, skb); + else { + netif_dbg(dev, rx_err, dev->net, "drop\n"); +error: + dev->net->stats.rx_errors++; + skb_queue_tail (&dev->done, skb); + } +} + +/*-------------------------------------------------------------------------*/ + +static void rx_complete (struct urb *urb) +{ + struct sk_buff *skb = (struct sk_buff *) urb->context; + struct skb_data *entry = (struct skb_data *) skb->cb; + struct usbnet *dev = entry->dev; + int urb_status = urb->status; + + skb_put (skb, urb->actual_length); + entry->state = rx_done; + entry->urb = NULL; + + switch (urb_status) { + /* success */ + case 0: + if (skb->len < dev->net->hard_header_len) { + entry->state = rx_cleanup; + dev->net->stats.rx_errors++; + dev->net->stats.rx_length_errors++; + netif_dbg(dev, rx_err, dev->net, + "rx length %d\n", skb->len); + } + break; + + /* stalls need manual reset. this is rare ... except that + * when going through USB 2.0 TTs, unplug appears this way. + * we avoid the highspeed version of the ETIMEDOUT/EILSEQ + * storm, recovering as needed. + */ + case -EPIPE: + dev->net->stats.rx_errors++; + usbnet_defer_kevent (dev, EVENT_RX_HALT); + // FALLTHROUGH + + /* software-driven interface shutdown */ + case -ECONNRESET: /* async unlink */ + case -ESHUTDOWN: /* hardware gone */ + netif_dbg(dev, ifdown, dev->net, + "rx shutdown, code %d\n", urb_status); + goto block; + + /* we get controller i/o faults during khubd disconnect() delays. + * throttle down resubmits, to avoid log floods; just temporarily, + * so we still recover when the fault isn't a khubd delay. + */ + case -EPROTO: + case -ETIME: + case -EILSEQ: + dev->net->stats.rx_errors++; + if (!timer_pending (&dev->delay)) { + mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES); + netif_dbg(dev, link, dev->net, + "rx throttle %d\n", urb_status); + } +block: + entry->state = rx_cleanup; + entry->urb = urb; + urb = NULL; + break; + + /* data overrun ... flush fifo? */ + case -EOVERFLOW: + dev->net->stats.rx_over_errors++; + // FALLTHROUGH + + default: + entry->state = rx_cleanup; + dev->net->stats.rx_errors++; + netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status); + break; + } + + defer_bh(dev, skb, &dev->rxq); + + if (urb) { + if (netif_running (dev->net) && + !test_bit (EVENT_RX_HALT, &dev->flags)) { + rx_submit (dev, urb, GFP_ATOMIC); + return; + } + usb_free_urb (urb); + } + netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n"); +} + +static void intr_complete (struct urb *urb) +{ + struct usbnet *dev = urb->context; + int status = urb->status; + + switch (status) { + /* success */ + case 0: + dev->driver_info->status(dev, urb); + break; + + /* software-driven interface shutdown */ + case -ENOENT: /* urb killed */ + case -ESHUTDOWN: /* hardware gone */ + netif_dbg(dev, ifdown, dev->net, + "intr shutdown, code %d\n", status); + return; + + /* NOTE: not throttling like RX/TX, since this endpoint + * already polls infrequently + */ + default: + netdev_dbg(dev->net, "intr status %d\n", status); + break; + } + + if (!netif_running (dev->net)) + return; + + memset(urb->transfer_buffer, 0, urb->transfer_buffer_length); + status = usb_submit_urb (urb, GFP_ATOMIC); + if (status != 0) + netif_err(dev, timer, dev->net, + "intr resubmit --> %d\n", status); +} + +/*-------------------------------------------------------------------------*/ +void usbnet_pause_rx(struct usbnet *dev) +{ + set_bit(EVENT_RX_PAUSED, &dev->flags); + + netif_dbg(dev, rx_status, dev->net, "paused rx queue enabled\n"); +} +EXPORT_SYMBOL_GPL(usbnet_pause_rx); + +void usbnet_resume_rx(struct usbnet *dev) +{ + struct sk_buff *skb; + int num = 0; + + clear_bit(EVENT_RX_PAUSED, &dev->flags); + + while ((skb = skb_dequeue(&dev->rxq_pause)) != NULL) { + usbnet_skb_return(dev, skb); + num++; + } + + tasklet_schedule(&dev->bh); + + netif_dbg(dev, rx_status, dev->net, + "paused rx queue disabled, %d skbs requeued\n", num); +} +EXPORT_SYMBOL_GPL(usbnet_resume_rx); + +void usbnet_purge_paused_rxq(struct usbnet *dev) +{ + skb_queue_purge(&dev->rxq_pause); +} +EXPORT_SYMBOL_GPL(usbnet_purge_paused_rxq); + +/*-------------------------------------------------------------------------*/ + +// unlink pending rx/tx; completion handlers do all other cleanup + +static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q) +{ + unsigned long flags; + struct sk_buff *skb, *skbnext; + int count = 0; + + spin_lock_irqsave (&q->lock, flags); + skb_queue_walk_safe(q, skb, skbnext) { + struct skb_data *entry; + struct urb *urb; + int retval; + + entry = (struct skb_data *) skb->cb; + urb = entry->urb; + + // during some PM-driven resume scenarios, + // these (async) unlinks complete immediately + retval = usb_unlink_urb (urb); + if (retval != -EINPROGRESS && retval != 0) + netdev_dbg(dev->net, "unlink urb err, %d\n", retval); + else + count++; + } + spin_unlock_irqrestore (&q->lock, flags); + return count; +} + +// Flush all pending rx urbs +// minidrivers may need to do this when the MTU changes + +void usbnet_unlink_rx_urbs(struct usbnet *dev) +{ + if (netif_running(dev->net)) { + (void) unlink_urbs (dev, &dev->rxq); + tasklet_schedule(&dev->bh); + } +} +EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs); + +/*-------------------------------------------------------------------------*/ + +// precondition: never called in_interrupt +static void usbnet_terminate_urbs(struct usbnet *dev) +{ + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup); + DECLARE_WAITQUEUE(wait, current); + int temp; + + /* ensure there are no more active urbs */ + add_wait_queue(&unlink_wakeup, &wait); + set_current_state(TASK_UNINTERRUPTIBLE); + dev->wait = &unlink_wakeup; + temp = unlink_urbs(dev, &dev->txq) + + unlink_urbs(dev, &dev->rxq); + + /* maybe wait for deletions to finish. */ + while (!skb_queue_empty(&dev->rxq) + && !skb_queue_empty(&dev->txq) + && !skb_queue_empty(&dev->done)) { + schedule_timeout(UNLINK_TIMEOUT_MS); + set_current_state(TASK_UNINTERRUPTIBLE); + netif_dbg(dev, ifdown, dev->net, + "waited for %d urb completions\n", temp); + } + set_current_state(TASK_RUNNING); + dev->wait = NULL; + remove_wait_queue(&unlink_wakeup, &wait); +} + +int usbnet_stop (struct net_device *net) +{ + struct usbnet *dev = netdev_priv(net); + struct driver_info *info = dev->driver_info; + int retval; + + netif_stop_queue (net); + + netif_info(dev, ifdown, dev->net, + "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n", + net->stats.rx_packets, net->stats.tx_packets, + net->stats.rx_errors, net->stats.tx_errors); + + /* allow minidriver to stop correctly (wireless devices to turn off + * radio etc) */ + if (info->stop) { + retval = info->stop(dev); + if (retval < 0) + netif_info(dev, ifdown, dev->net, + "stop fail (%d) usbnet usb-%s-%s, %s\n", + retval, + dev->udev->bus->bus_name, dev->udev->devpath, + info->description); + } + + if (!(info->flags & FLAG_AVOID_UNLINK_URBS)) + usbnet_terminate_urbs(dev); + + usb_kill_urb(dev->interrupt); + + usbnet_purge_paused_rxq(dev); + + /* deferred work (task, timer, softirq) must also stop. + * can't flush_scheduled_work() until we drop rtnl (later), + * else workers could deadlock; so make workers a NOP. + */ + dev->flags = 0; + del_timer_sync (&dev->delay); + tasklet_kill (&dev->bh); + if (info->manage_power) + info->manage_power(dev, 0); + else + usb_autopm_put_interface(dev->intf); + + return 0; +} +EXPORT_SYMBOL_GPL(usbnet_stop); + +/*-------------------------------------------------------------------------*/ + +// posts reads, and enables write queuing + +// precondition: never called in_interrupt + +int usbnet_open (struct net_device *net) +{ + struct usbnet *dev = netdev_priv(net); + int retval; + struct driver_info *info = dev->driver_info; + + if ((retval = usb_autopm_get_interface(dev->intf)) < 0) { + netif_info(dev, ifup, dev->net, + "resumption fail (%d) usbnet usb-%s-%s, %s\n", + retval, + dev->udev->bus->bus_name, + dev->udev->devpath, + info->description); + goto done_nopm; + } + + // put into "known safe" state + if (info->reset && (retval = info->reset (dev)) < 0) { + netif_info(dev, ifup, dev->net, + "open reset fail (%d) usbnet usb-%s-%s, %s\n", + retval, + dev->udev->bus->bus_name, + dev->udev->devpath, + info->description); + goto done; + } + + // insist peer be connected + if (info->check_connect && (retval = info->check_connect (dev)) < 0) { + netif_dbg(dev, ifup, dev->net, "can't open; %d\n", retval); + goto done; + } + + /* start any status interrupt transfer */ + if (dev->interrupt) { + retval = usb_submit_urb (dev->interrupt, GFP_KERNEL); + if (retval < 0) { + netif_err(dev, ifup, dev->net, + "intr submit %d\n", retval); + goto done; + } + } + + netif_start_queue (net); + netif_info(dev, ifup, dev->net, + "open: enable queueing (rx %d, tx %d) mtu %d %s framing\n", + (int)RX_QLEN(dev), (int)TX_QLEN(dev), + dev->net->mtu, + (dev->driver_info->flags & FLAG_FRAMING_NC) ? "NetChip" : + (dev->driver_info->flags & FLAG_FRAMING_GL) ? "GeneSys" : + (dev->driver_info->flags & FLAG_FRAMING_Z) ? "Zaurus" : + (dev->driver_info->flags & FLAG_FRAMING_RN) ? "RNDIS" : + (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" : + "simple"); + + // delay posting reads until we're fully open + tasklet_schedule (&dev->bh); + if (info->manage_power) { + retval = info->manage_power(dev, 1); + if (retval < 0) + goto done; + usb_autopm_put_interface(dev->intf); + } + return retval; + +done: + usb_autopm_put_interface(dev->intf); +done_nopm: + return retval; +} +EXPORT_SYMBOL_GPL(usbnet_open); + +/*-------------------------------------------------------------------------*/ + +/* ethtool methods; minidrivers may need to add some more, but + * they'll probably want to use this base set. + */ + +int usbnet_get_settings (struct net_device *net, struct ethtool_cmd *cmd) +{ + struct usbnet *dev = netdev_priv(net); + + if (!dev->mii.mdio_read) + return -EOPNOTSUPP; + + return mii_ethtool_gset(&dev->mii, cmd); +} +EXPORT_SYMBOL_GPL(usbnet_get_settings); + +int usbnet_set_settings (struct net_device *net, struct ethtool_cmd *cmd) +{ + struct usbnet *dev = netdev_priv(net); + int retval; + + if (!dev->mii.mdio_write) + return -EOPNOTSUPP; + + retval = mii_ethtool_sset(&dev->mii, cmd); + + /* link speed/duplex might have changed */ + if (dev->driver_info->link_reset) + dev->driver_info->link_reset(dev); + + return retval; + +} +EXPORT_SYMBOL_GPL(usbnet_set_settings); + +u32 usbnet_get_link (struct net_device *net) +{ + struct usbnet *dev = netdev_priv(net); + + /* If a check_connect is defined, return its result */ + if (dev->driver_info->check_connect) + return dev->driver_info->check_connect (dev) == 0; + + /* if the device has mii operations, use those */ + if (dev->mii.mdio_read) + return mii_link_ok(&dev->mii); + + /* Otherwise, dtrt for drivers calling netif_carrier_{on,off} */ + return ethtool_op_get_link(net); +} +EXPORT_SYMBOL_GPL(usbnet_get_link); + +int usbnet_nway_reset(struct net_device *net) +{ + struct usbnet *dev = netdev_priv(net); + + if (!dev->mii.mdio_write) + return -EOPNOTSUPP; + + return mii_nway_restart(&dev->mii); +} +EXPORT_SYMBOL_GPL(usbnet_nway_reset); + +void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info) +{ + struct usbnet *dev = netdev_priv(net); + + strncpy (info->driver, dev->driver_name, sizeof info->driver); + strncpy (info->version, DRIVER_VERSION, sizeof info->version); + strncpy (info->fw_version, dev->driver_info->description, + sizeof info->fw_version); + usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info); +} +EXPORT_SYMBOL_GPL(usbnet_get_drvinfo); + +u32 usbnet_get_msglevel (struct net_device *net) +{ + struct usbnet *dev = netdev_priv(net); + + return dev->msg_enable; +} +EXPORT_SYMBOL_GPL(usbnet_get_msglevel); + +void usbnet_set_msglevel (struct net_device *net, u32 level) +{ + struct usbnet *dev = netdev_priv(net); + + dev->msg_enable = level; +} +EXPORT_SYMBOL_GPL(usbnet_set_msglevel); + +/* drivers may override default ethtool_ops in their bind() routine */ +static const struct ethtool_ops usbnet_ethtool_ops = { + .get_settings = usbnet_get_settings, + .set_settings = usbnet_set_settings, + .get_link = usbnet_get_link, + .nway_reset = usbnet_nway_reset, + .get_drvinfo = usbnet_get_drvinfo, + .get_msglevel = usbnet_get_msglevel, + .set_msglevel = usbnet_set_msglevel, +}; + +/*-------------------------------------------------------------------------*/ + +/* work that cannot be done in interrupt context uses keventd. + * + * NOTE: with 2.5 we could do more of this using completion callbacks, + * especially now that control transfers can be queued. + */ +static void +kevent (struct work_struct *work) +{ + struct usbnet *dev = + container_of(work, struct usbnet, kevent); + int status; + + /* usb_clear_halt() needs a thread context */ + if (test_bit (EVENT_TX_HALT, &dev->flags)) { + unlink_urbs (dev, &dev->txq); + status = usb_autopm_get_interface(dev->intf); + if (status < 0) + goto fail_pipe; + status = usb_clear_halt (dev->udev, dev->out); + usb_autopm_put_interface(dev->intf); + if (status < 0 && + status != -EPIPE && + status != -ESHUTDOWN) { + if (netif_msg_tx_err (dev)) +fail_pipe: + netdev_err(dev->net, "can't clear tx halt, status %d\n", + status); + } else { + clear_bit (EVENT_TX_HALT, &dev->flags); + if (status != -ESHUTDOWN) + netif_wake_queue (dev->net); + } + } + if (test_bit (EVENT_RX_HALT, &dev->flags)) { + unlink_urbs (dev, &dev->rxq); + status = usb_autopm_get_interface(dev->intf); + if (status < 0) + goto fail_halt; + status = usb_clear_halt (dev->udev, dev->in); + usb_autopm_put_interface(dev->intf); + if (status < 0 && + status != -EPIPE && + status != -ESHUTDOWN) { + if (netif_msg_rx_err (dev)) +fail_halt: + netdev_err(dev->net, "can't clear rx halt, status %d\n", + status); + } else { + clear_bit (EVENT_RX_HALT, &dev->flags); + tasklet_schedule (&dev->bh); + } + } + + /* tasklet could resubmit itself forever if memory is tight */ + if (test_bit (EVENT_RX_MEMORY, &dev->flags)) { + struct urb *urb = NULL; + + if (netif_running (dev->net)) + urb = usb_alloc_urb (0, GFP_KERNEL); + else + clear_bit (EVENT_RX_MEMORY, &dev->flags); + if (urb != NULL) { + clear_bit (EVENT_RX_MEMORY, &dev->flags); + status = usb_autopm_get_interface(dev->intf); + if (status < 0) + goto fail_lowmem; + rx_submit (dev, urb, GFP_KERNEL); + usb_autopm_put_interface(dev->intf); +fail_lowmem: + tasklet_schedule (&dev->bh); + } + } + + if (test_bit (EVENT_LINK_RESET, &dev->flags)) { + struct driver_info *info = dev->driver_info; + int retval = 0; + + clear_bit (EVENT_LINK_RESET, &dev->flags); + status = usb_autopm_get_interface(dev->intf); + if (status < 0) + goto skip_reset; + if(info->link_reset && (retval = info->link_reset(dev)) < 0) { + usb_autopm_put_interface(dev->intf); +skip_reset: + netdev_info(dev->net, "link reset failed (%d) usbnet usb-%s-%s, %s\n", + retval, + dev->udev->bus->bus_name, + dev->udev->devpath, + info->description); + } else { + usb_autopm_put_interface(dev->intf); + } + } + + if (dev->flags) + netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags); +} + +/*-------------------------------------------------------------------------*/ + +static void tx_complete (struct urb *urb) +{ + struct sk_buff *skb = (struct sk_buff *) urb->context; + struct skb_data *entry = (struct skb_data *) skb->cb; + struct usbnet *dev = entry->dev; + + if (urb->status == 0) { + dev->net->stats.tx_packets++; + dev->net->stats.tx_bytes += entry->length; + } else { + dev->net->stats.tx_errors++; + + switch (urb->status) { + case -EPIPE: + usbnet_defer_kevent (dev, EVENT_TX_HALT); + break; + + /* software-driven interface shutdown */ + case -ECONNRESET: // async unlink + case -ESHUTDOWN: // hardware gone + break; + + // like rx, tx gets controller i/o faults during khubd delays + // and so it uses the same throttling mechanism. + case -EPROTO: + case -ETIME: + case -EILSEQ: + usb_mark_last_busy(dev->udev); + if (!timer_pending (&dev->delay)) { + mod_timer (&dev->delay, + jiffies + THROTTLE_JIFFIES); + netif_dbg(dev, link, dev->net, + "tx throttle %d\n", urb->status); + } + netif_stop_queue (dev->net); + break; + default: + netif_dbg(dev, tx_err, dev->net, + "tx err %d\n", entry->urb->status); + break; + } + } + + usb_autopm_put_interface_async(dev->intf); + urb->dev = NULL; + entry->state = tx_done; + defer_bh(dev, skb, &dev->txq); +} + +/*-------------------------------------------------------------------------*/ + +void usbnet_tx_timeout (struct net_device *net) +{ + struct usbnet *dev = netdev_priv(net); + + unlink_urbs (dev, &dev->txq); + tasklet_schedule (&dev->bh); + + // FIXME: device recovery -- reset? +} +EXPORT_SYMBOL_GPL(usbnet_tx_timeout); + +/*-------------------------------------------------------------------------*/ + +netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, + struct net_device *net) +{ + struct usbnet *dev = netdev_priv(net); + int length; + struct urb *urb = NULL; + struct skb_data *entry; + struct driver_info *info = dev->driver_info; + unsigned long flags; + int retval; + + // some devices want funky USB-level framing, for + // win32 driver (usually) and/or hardware quirks + if (info->tx_fixup) { + skb = info->tx_fixup (dev, skb, GFP_ATOMIC); + if (!skb) { + netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n"); + goto drop; + } + } + length = skb->len; + + if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) { + netif_dbg(dev, tx_err, dev->net, "no urb\n"); + goto drop; + } + + entry = (struct skb_data *) skb->cb; + entry->urb = urb; + entry->dev = dev; + entry->state = tx_start; + entry->length = length; + + usb_fill_bulk_urb (urb, dev->udev, dev->out, + skb->data, skb->len, tx_complete, skb); + + /* don't assume the hardware handles USB_ZERO_PACKET + * NOTE: strictly conforming cdc-ether devices should expect + * the ZLP here, but ignore the one-byte packet. + */ + if (!(info->flags & FLAG_SEND_ZLP) && (length % dev->maxpacket) == 0) { + urb->transfer_buffer_length++; + if (skb_tailroom(skb)) { + skb->data[skb->len] = 0; + __skb_put(skb, 1); + } + } + + spin_lock_irqsave(&dev->txq.lock, flags); + retval = usb_autopm_get_interface_async(dev->intf); + if (retval < 0) { + spin_unlock_irqrestore(&dev->txq.lock, flags); + goto drop; + } + +#ifdef CONFIG_PM + /* if this triggers the device is still a sleep */ + if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) { + /* transmission will be done in resume */ + usb_anchor_urb(urb, &dev->deferred); + /* no use to process more packets */ + netif_stop_queue(net); + spin_unlock_irqrestore(&dev->txq.lock, flags); + netdev_dbg(dev->net, "Delaying transmission for resumption\n"); + goto deferred; + } +#endif + + switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) { + case -EPIPE: + netif_stop_queue (net); + usbnet_defer_kevent (dev, EVENT_TX_HALT); + usb_autopm_put_interface_async(dev->intf); + break; + default: + usb_autopm_put_interface_async(dev->intf); + netif_dbg(dev, tx_err, dev->net, + "tx: submit urb err %d\n", retval); + break; + case 0: + net->trans_start = jiffies; + __skb_queue_tail (&dev->txq, skb); + if (dev->txq.qlen >= TX_QLEN (dev)) + netif_stop_queue (net); + } + spin_unlock_irqrestore (&dev->txq.lock, flags); + + if (retval) { + netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval); +drop: + dev->net->stats.tx_dropped++; + if (skb) + dev_kfree_skb_any (skb); + usb_free_urb (urb); + } else + netif_dbg(dev, tx_queued, dev->net, + "> tx, len %d, type 0x%x\n", length, skb->protocol); +#ifdef CONFIG_PM +deferred: +#endif + return NETDEV_TX_OK; +} +EXPORT_SYMBOL_GPL(usbnet_start_xmit); + +/*-------------------------------------------------------------------------*/ + +// tasklet (work deferred from completions, in_irq) or timer + +static void usbnet_bh (unsigned long param) +{ + struct usbnet *dev = (struct usbnet *) param; + struct sk_buff *skb; + struct skb_data *entry; + + while ((skb = skb_dequeue (&dev->done))) { + entry = (struct skb_data *) skb->cb; + switch (entry->state) { + case rx_done: + entry->state = rx_cleanup; + rx_process (dev, skb); + continue; + case tx_done: + case rx_cleanup: + usb_free_urb (entry->urb); + dev_kfree_skb (skb); + continue; + default: + netdev_dbg(dev->net, "bogus skb state %d\n", entry->state); + } + } + + // waiting for all pending urbs to complete? + if (dev->wait) { + if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) { + wake_up (dev->wait); + } + + // or are we maybe short a few urbs? + } else if (netif_running (dev->net) && + netif_device_present (dev->net) && + !timer_pending (&dev->delay) && + !test_bit (EVENT_RX_HALT, &dev->flags)) { + int temp = dev->rxq.qlen; + int qlen = RX_QLEN (dev); + + if (temp < qlen) { + struct urb *urb; + int i; + + // don't refill the queue all at once + for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) { + urb = usb_alloc_urb (0, GFP_ATOMIC); + if (urb != NULL) + rx_submit (dev, urb, GFP_ATOMIC); + } + if (temp != dev->rxq.qlen) + netif_dbg(dev, link, dev->net, + "rxqlen %d --> %d\n", + temp, dev->rxq.qlen); + if (dev->rxq.qlen < qlen) + tasklet_schedule (&dev->bh); + } + if (dev->txq.qlen < TX_QLEN (dev)) + netif_wake_queue (dev->net); + } +} + + +/*------------------------------------------------------------------------- + * + * USB Device Driver support + * + *-------------------------------------------------------------------------*/ + +// precondition: never called in_interrupt + +void usbnet_disconnect (struct usb_interface *intf) +{ + struct usbnet *dev; + struct usb_device *xdev; + struct net_device *net; + + dev = usb_get_intfdata(intf); + usb_set_intfdata(intf, NULL); + if (!dev) + return; + + xdev = interface_to_usbdev (intf); + + netif_info(dev, probe, dev->net, "unregister '%s' usb-%s-%s, %s\n", + intf->dev.driver->name, + xdev->bus->bus_name, xdev->devpath, + dev->driver_info->description); + + net = dev->net; + unregister_netdev (net); + + /* we don't hold rtnl here ... */ + flush_scheduled_work (); + + if (dev->driver_info->unbind) + dev->driver_info->unbind (dev, intf); + + free_netdev(net); + usb_put_dev (xdev); +} +EXPORT_SYMBOL_GPL(usbnet_disconnect); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) +static const struct net_device_ops usbnet_netdev_ops = { + .ndo_open = usbnet_open, + .ndo_stop = usbnet_stop, + .ndo_start_xmit = usbnet_start_xmit, + .ndo_tx_timeout = usbnet_tx_timeout, + .ndo_change_mtu = usbnet_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; +#endif + +/*-------------------------------------------------------------------------*/ + +// precondition: never called in_interrupt + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) +static struct device_type wlan_type = { + .name = "wlan", +}; +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) +static struct device_type wwan_type = { + .name = "wwan", +}; +#endif + +int +usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) +{ + struct usbnet *dev; + struct net_device *net; + struct usb_host_interface *interface; + struct driver_info *info; + struct usb_device *xdev; + int status; + const char *name; + + name = udev->dev.driver->name; + info = (struct driver_info *) prod->driver_info; + if (!info) { + dev_dbg (&udev->dev, "blacklisted by %s\n", name); + return -ENODEV; + } + xdev = interface_to_usbdev (udev); + interface = udev->cur_altsetting; + + usb_get_dev (xdev); + + status = -ENOMEM; + + // set up our own records + net = alloc_etherdev(sizeof(*dev)); + if (!net) { + dbg ("can't kmalloc dev"); + goto out; + } + + dev = netdev_priv(net); + dev->udev = xdev; + dev->intf = udev; + dev->driver_info = info; + dev->driver_name = name; + dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV + | NETIF_MSG_PROBE | NETIF_MSG_LINK); + skb_queue_head_init (&dev->rxq); + skb_queue_head_init (&dev->txq); + skb_queue_head_init (&dev->done); + skb_queue_head_init(&dev->rxq_pause); + dev->bh.func = usbnet_bh; + dev->bh.data = (unsigned long) dev; + INIT_WORK (&dev->kevent, kevent); + init_usb_anchor(&dev->deferred); + dev->delay.function = usbnet_bh; + dev->delay.data = (unsigned long) dev; + init_timer (&dev->delay); + mutex_init (&dev->phy_mutex); + + dev->net = net; + strcpy (net->name, "usb%d"); + memcpy (net->dev_addr, node_id, sizeof node_id); + + /* rx and tx sides can use different message sizes; + * bind() should set rx_urb_size in that case. + */ + dev->hard_mtu = net->mtu + net->hard_header_len; +#if 0 +// dma_supported() is deeply broken on almost all architectures + // possible with some EHCI controllers + if (dma_supported (&udev->dev, DMA_BIT_MASK(64))) + net->features |= NETIF_F_HIGHDMA; +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + net->netdev_ops = &usbnet_netdev_ops; +#else + net->change_mtu = usbnet_change_mtu; + net->hard_start_xmit = usbnet_start_xmit; + net->open = usbnet_open; + net->stop = usbnet_stop; + net->tx_timeout = usbnet_tx_timeout; +#endif + net->watchdog_timeo = TX_TIMEOUT_JIFFIES; + net->ethtool_ops = &usbnet_ethtool_ops; + + // allow device-specific bind/init procedures + // NOTE net->name still not usable ... + if (info->bind) { + status = info->bind (dev, udev); + if (status < 0) + goto out1; + + // heuristic: "usb%d" for links we know are two-host, + // else "eth%d" when there's reasonable doubt. userspace + // can rename the link if it knows better. + if ((dev->driver_info->flags & FLAG_ETHER) != 0 && + (net->dev_addr [0] & 0x02) == 0) + strcpy (net->name, "eth%d"); + /* WLAN devices should always be named "wlan%d" */ + if ((dev->driver_info->flags & FLAG_WLAN) != 0) + strcpy(net->name, "wlan%d"); + /* WWAN devices should always be named "wwan%d" */ + if ((dev->driver_info->flags & FLAG_WWAN) != 0) + strcpy(net->name, "wwan%d"); + + /* maybe the remote can't receive an Ethernet MTU */ + if (net->mtu > (dev->hard_mtu - net->hard_header_len)) + net->mtu = dev->hard_mtu - net->hard_header_len; + } else if (!info->in || !info->out) + status = usbnet_get_endpoints (dev, udev); + else { + dev->in = usb_rcvbulkpipe (xdev, info->in); + dev->out = usb_sndbulkpipe (xdev, info->out); + if (!(info->flags & FLAG_NO_SETINT)) + status = usb_set_interface (xdev, + interface->desc.bInterfaceNumber, + interface->desc.bAlternateSetting); + else + status = 0; + + } + if (status >= 0 && dev->status) + status = init_status (dev, udev); + if (status < 0) + goto out3; + + if (!dev->rx_urb_size) + dev->rx_urb_size = dev->hard_mtu; + dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1); + + SET_NETDEV_DEV(net, &udev->dev); + + if ((dev->driver_info->flags & FLAG_WLAN) != 0) + SET_NETDEV_DEVTYPE(net, &wlan_type); + if ((dev->driver_info->flags & FLAG_WWAN) != 0) + SET_NETDEV_DEVTYPE(net, &wwan_type); + + status = register_netdev (net); + if (status) + goto out3; + netif_info(dev, probe, dev->net, + "register '%s' at usb-%s-%s, %s, %pM\n", + udev->dev.driver->name, + xdev->bus->bus_name, xdev->devpath, + dev->driver_info->description, + net->dev_addr); + + // ok, it's ready to go. + usb_set_intfdata (udev, dev); + + netif_device_attach (net); + + if (dev->driver_info->flags & FLAG_LINK_INTR) + netif_carrier_off(net); + + return 0; + +out3: + if (info->unbind) + info->unbind (dev, udev); +out1: + free_netdev(net); +out: + usb_put_dev(xdev); + return status; +} +EXPORT_SYMBOL_GPL(usbnet_probe); + +/*-------------------------------------------------------------------------*/ + +/* + * suspend the whole driver as soon as the first interface is suspended + * resume only when the last interface is resumed + */ + +int usbnet_suspend (struct usb_interface *intf, pm_message_t message) +{ + struct usbnet *dev = usb_get_intfdata(intf); + + if (!dev->suspend_count++) { + spin_lock_irq(&dev->txq.lock); + /* don't autosuspend while transmitting */ + if (dev->txq.qlen && (message.event & PM_EVENT_AUTO)) { + spin_unlock_irq(&dev->txq.lock); + return -EBUSY; + } else { + set_bit(EVENT_DEV_ASLEEP, &dev->flags); + spin_unlock_irq(&dev->txq.lock); + } + /* + * accelerate emptying of the rx and queues, to avoid + * having everything error out. + */ + netif_device_detach (dev->net); + usbnet_terminate_urbs(dev); + usb_kill_urb(dev->interrupt); + + /* + * reattach so runtime management can use and + * wake the device + */ + netif_device_attach (dev->net); + } + return 0; +} +EXPORT_SYMBOL_GPL(usbnet_suspend); + +int usbnet_resume (struct usb_interface *intf) +{ + struct usbnet *dev = usb_get_intfdata(intf); + struct sk_buff *skb; + struct urb *res; + int retval; + + if (!--dev->suspend_count) { + spin_lock_irq(&dev->txq.lock); + while ((res = usb_get_from_anchor(&dev->deferred))) { + + printk(KERN_INFO"%s has delayed data\n", __func__); + skb = (struct sk_buff *)res->context; + retval = usb_submit_urb(res, GFP_ATOMIC); + if (retval < 0) { + dev_kfree_skb_any(skb); + usb_free_urb(res); + usb_autopm_put_interface_async(dev->intf); + } else { + dev->net->trans_start = jiffies; + __skb_queue_tail(&dev->txq, skb); + } + } + + smp_mb(); + clear_bit(EVENT_DEV_ASLEEP, &dev->flags); + spin_unlock_irq(&dev->txq.lock); + if (!(dev->txq.qlen >= TX_QLEN(dev))) + netif_start_queue(dev->net); + tasklet_schedule (&dev->bh); + } + return 0; +} +EXPORT_SYMBOL_GPL(usbnet_resume); + + +/*-------------------------------------------------------------------------*/ + +static int __init usbnet_init(void) +{ + /* compiler should optimize this out */ + BUILD_BUG_ON (sizeof (((struct sk_buff *)0)->cb) + < sizeof (struct skb_data)); + + random_ether_addr(node_id); + return 0; +} +module_init(usbnet_init); + +static void __exit usbnet_exit(void) +{ +} +module_exit(usbnet_exit); + +MODULE_AUTHOR("David Brownell"); +MODULE_DESCRIPTION("USB network driver framework"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile new file mode 100644 index 0000000..f377c73 --- /dev/null +++ b/drivers/net/wireless/Makefile @@ -0,0 +1,37 @@ +# +# Makefile for the Linux Wireless network device drivers. +# + +obj-$(CONFIG_IPW2100) += ipw2x00/ +obj-$(CONFIG_IPW2200) += ipw2x00/ + +obj-$(CONFIG_AT76C50X_USB) += at76c50x-usb.o + +obj-$(CONFIG_B43) += b43/ +obj-$(CONFIG_B43LEGACY) += b43legacy/ +obj-$(CONFIG_ZD1211RW) += zd1211rw/ +obj-$(CONFIG_RTL8180) += rtl818x/ +obj-$(CONFIG_RTL8187) += rtl818x/ + +obj-$(CONFIG_USB_NET_COMPAT_RNDIS_WLAN) += rndis_wlan.o + +obj-$(CONFIG_LIBERTAS) += libertas/ + +obj-$(CONFIG_LIBERTAS_THINFIRM) += libertas_tf/ + +obj-$(CONFIG_ADM8211) += adm8211.o + +obj-$(CONFIG_MWL8K) += mwl8k.o + +obj-$(CONFIG_IWLWIFI) += iwlwifi/ +obj-$(CONFIG_RT2X00) += rt2x00/ + +obj-$(CONFIG_P54_COMMON) += p54/ + +obj-$(CONFIG_ATH_COMMON) += ath/ + +obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o + +obj-$(CONFIG_WL12XX) += wl12xx/ + +obj-$(CONFIG_IWM) += iwmc3200wifi/ diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c new file mode 100644 index 0000000..547912e --- /dev/null +++ b/drivers/net/wireless/adm8211.c @@ -0,0 +1,2011 @@ + +/* + * Linux device driver for ADMtek ADM8211 (IEEE 802.11b MAC/BBP) + * + * Copyright (c) 2003, Jouni Malinen + * Copyright (c) 2004-2007, Michael Wu + * Some parts copyright (c) 2003 by David Young + * and used with permission. + * + * Much thanks to Infineon-ADMtek for their support of this driver. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. See README and COPYING for + * more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "adm8211.h" + +MODULE_AUTHOR("Michael Wu "); +MODULE_AUTHOR("Jouni Malinen "); +MODULE_DESCRIPTION("Driver for IEEE 802.11b wireless cards based on ADMtek ADM8211"); +MODULE_SUPPORTED_DEVICE("ADM8211"); +MODULE_LICENSE("GPL"); + +static unsigned int tx_ring_size __read_mostly = 16; +static unsigned int rx_ring_size __read_mostly = 16; + +module_param(tx_ring_size, uint, 0); +module_param(rx_ring_size, uint, 0); + +static DEFINE_PCI_DEVICE_TABLE(adm8211_pci_id_table) = { + /* ADMtek ADM8211 */ + { PCI_DEVICE(0x10B7, 0x6000) }, /* 3Com 3CRSHPW796 */ + { PCI_DEVICE(0x1200, 0x8201) }, /* ? */ + { PCI_DEVICE(0x1317, 0x8201) }, /* ADM8211A */ + { PCI_DEVICE(0x1317, 0x8211) }, /* ADM8211B/C */ + { 0 } +}; + +static struct ieee80211_rate adm8211_rates[] = { + { .bitrate = 10, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 220, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, /* XX ?? */ +}; + +static const struct ieee80211_channel adm8211_channels[] = { + { .center_freq = 2412}, + { .center_freq = 2417}, + { .center_freq = 2422}, + { .center_freq = 2427}, + { .center_freq = 2432}, + { .center_freq = 2437}, + { .center_freq = 2442}, + { .center_freq = 2447}, + { .center_freq = 2452}, + { .center_freq = 2457}, + { .center_freq = 2462}, + { .center_freq = 2467}, + { .center_freq = 2472}, + { .center_freq = 2484}, +}; + + +static void adm8211_eeprom_register_read(struct eeprom_93cx6 *eeprom) +{ + struct adm8211_priv *priv = eeprom->data; + u32 reg = ADM8211_CSR_READ(SPR); + + eeprom->reg_data_in = reg & ADM8211_SPR_SDI; + eeprom->reg_data_out = reg & ADM8211_SPR_SDO; + eeprom->reg_data_clock = reg & ADM8211_SPR_SCLK; + eeprom->reg_chip_select = reg & ADM8211_SPR_SCS; +} + +static void adm8211_eeprom_register_write(struct eeprom_93cx6 *eeprom) +{ + struct adm8211_priv *priv = eeprom->data; + u32 reg = 0x4000 | ADM8211_SPR_SRS; + + if (eeprom->reg_data_in) + reg |= ADM8211_SPR_SDI; + if (eeprom->reg_data_out) + reg |= ADM8211_SPR_SDO; + if (eeprom->reg_data_clock) + reg |= ADM8211_SPR_SCLK; + if (eeprom->reg_chip_select) + reg |= ADM8211_SPR_SCS; + + ADM8211_CSR_WRITE(SPR, reg); + ADM8211_CSR_READ(SPR); /* eeprom_delay */ +} + +static int adm8211_read_eeprom(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + unsigned int words, i; + struct ieee80211_chan_range chan_range; + u16 cr49; + struct eeprom_93cx6 eeprom = { + .data = priv, + .register_read = adm8211_eeprom_register_read, + .register_write = adm8211_eeprom_register_write + }; + + if (ADM8211_CSR_READ(CSR_TEST0) & ADM8211_CSR_TEST0_EPTYP) { + /* 256 * 16-bit = 512 bytes */ + eeprom.width = PCI_EEPROM_WIDTH_93C66; + words = 256; + } else { + /* 64 * 16-bit = 128 bytes */ + eeprom.width = PCI_EEPROM_WIDTH_93C46; + words = 64; + } + + priv->eeprom_len = words * 2; + priv->eeprom = kmalloc(priv->eeprom_len, GFP_KERNEL); + if (!priv->eeprom) + return -ENOMEM; + + eeprom_93cx6_multiread(&eeprom, 0, (__le16 *)priv->eeprom, words); + + cr49 = le16_to_cpu(priv->eeprom->cr49); + priv->rf_type = (cr49 >> 3) & 0x7; + switch (priv->rf_type) { + case ADM8211_TYPE_INTERSIL: + case ADM8211_TYPE_RFMD: + case ADM8211_TYPE_MARVEL: + case ADM8211_TYPE_AIROHA: + case ADM8211_TYPE_ADMTEK: + break; + + default: + if (priv->pdev->revision < ADM8211_REV_CA) + priv->rf_type = ADM8211_TYPE_RFMD; + else + priv->rf_type = ADM8211_TYPE_AIROHA; + + printk(KERN_WARNING "%s (adm8211): Unknown RFtype %d\n", + pci_name(priv->pdev), (cr49 >> 3) & 0x7); + } + + priv->bbp_type = cr49 & 0x7; + switch (priv->bbp_type) { + case ADM8211_TYPE_INTERSIL: + case ADM8211_TYPE_RFMD: + case ADM8211_TYPE_MARVEL: + case ADM8211_TYPE_AIROHA: + case ADM8211_TYPE_ADMTEK: + break; + default: + if (priv->pdev->revision < ADM8211_REV_CA) + priv->bbp_type = ADM8211_TYPE_RFMD; + else + priv->bbp_type = ADM8211_TYPE_ADMTEK; + + printk(KERN_WARNING "%s (adm8211): Unknown BBPtype: %d\n", + pci_name(priv->pdev), cr49 >> 3); + } + + if (priv->eeprom->country_code >= ARRAY_SIZE(cranges)) { + printk(KERN_WARNING "%s (adm8211): Invalid country code (%d)\n", + pci_name(priv->pdev), priv->eeprom->country_code); + + chan_range = cranges[2]; + } else + chan_range = cranges[priv->eeprom->country_code]; + + printk(KERN_DEBUG "%s (adm8211): Channel range: %d - %d\n", + pci_name(priv->pdev), (int)chan_range.min, (int)chan_range.max); + + BUILD_BUG_ON(sizeof(priv->channels) != sizeof(adm8211_channels)); + + memcpy(priv->channels, adm8211_channels, sizeof(priv->channels)); + priv->band.channels = priv->channels; + priv->band.n_channels = ARRAY_SIZE(adm8211_channels); + priv->band.bitrates = adm8211_rates; + priv->band.n_bitrates = ARRAY_SIZE(adm8211_rates); + + for (i = 1; i <= ARRAY_SIZE(adm8211_channels); i++) + if (i < chan_range.min || i > chan_range.max) + priv->channels[i - 1].flags |= IEEE80211_CHAN_DISABLED; + + switch (priv->eeprom->specific_bbptype) { + case ADM8211_BBP_RFMD3000: + case ADM8211_BBP_RFMD3002: + case ADM8211_BBP_ADM8011: + priv->specific_bbptype = priv->eeprom->specific_bbptype; + break; + + default: + if (priv->pdev->revision < ADM8211_REV_CA) + priv->specific_bbptype = ADM8211_BBP_RFMD3000; + else + priv->specific_bbptype = ADM8211_BBP_ADM8011; + + printk(KERN_WARNING "%s (adm8211): Unknown specific BBP: %d\n", + pci_name(priv->pdev), priv->eeprom->specific_bbptype); + } + + switch (priv->eeprom->specific_rftype) { + case ADM8211_RFMD2948: + case ADM8211_RFMD2958: + case ADM8211_RFMD2958_RF3000_CONTROL_POWER: + case ADM8211_MAX2820: + case ADM8211_AL2210L: + priv->transceiver_type = priv->eeprom->specific_rftype; + break; + + default: + if (priv->pdev->revision == ADM8211_REV_BA) + priv->transceiver_type = ADM8211_RFMD2958_RF3000_CONTROL_POWER; + else if (priv->pdev->revision == ADM8211_REV_CA) + priv->transceiver_type = ADM8211_AL2210L; + else if (priv->pdev->revision == ADM8211_REV_AB) + priv->transceiver_type = ADM8211_RFMD2948; + + printk(KERN_WARNING "%s (adm8211): Unknown transceiver: %d\n", + pci_name(priv->pdev), priv->eeprom->specific_rftype); + + break; + } + + printk(KERN_DEBUG "%s (adm8211): RFtype=%d BBPtype=%d Specific BBP=%d " + "Transceiver=%d\n", pci_name(priv->pdev), priv->rf_type, + priv->bbp_type, priv->specific_bbptype, priv->transceiver_type); + + return 0; +} + +static inline void adm8211_write_sram(struct ieee80211_hw *dev, + u32 addr, u32 data) +{ + struct adm8211_priv *priv = dev->priv; + + ADM8211_CSR_WRITE(WEPCTL, addr | ADM8211_WEPCTL_TABLE_WR | + (priv->pdev->revision < ADM8211_REV_BA ? + 0 : ADM8211_WEPCTL_SEL_WEPTABLE )); + ADM8211_CSR_READ(WEPCTL); + msleep(1); + + ADM8211_CSR_WRITE(WESK, data); + ADM8211_CSR_READ(WESK); + msleep(1); +} + +static void adm8211_write_sram_bytes(struct ieee80211_hw *dev, + unsigned int addr, u8 *buf, + unsigned int len) +{ + struct adm8211_priv *priv = dev->priv; + u32 reg = ADM8211_CSR_READ(WEPCTL); + unsigned int i; + + if (priv->pdev->revision < ADM8211_REV_BA) { + for (i = 0; i < len; i += 2) { + u16 val = buf[i] | (buf[i + 1] << 8); + adm8211_write_sram(dev, addr + i / 2, val); + } + } else { + for (i = 0; i < len; i += 4) { + u32 val = (buf[i + 0] << 0 ) | (buf[i + 1] << 8 ) | + (buf[i + 2] << 16) | (buf[i + 3] << 24); + adm8211_write_sram(dev, addr + i / 4, val); + } + } + + ADM8211_CSR_WRITE(WEPCTL, reg); +} + +static void adm8211_clear_sram(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + u32 reg = ADM8211_CSR_READ(WEPCTL); + unsigned int addr; + + for (addr = 0; addr < ADM8211_SRAM_SIZE; addr++) + adm8211_write_sram(dev, addr, 0); + + ADM8211_CSR_WRITE(WEPCTL, reg); +} + +static int adm8211_get_stats(struct ieee80211_hw *dev, + struct ieee80211_low_level_stats *stats) +{ + struct adm8211_priv *priv = dev->priv; + + memcpy(stats, &priv->stats, sizeof(*stats)); + + return 0; +} + +static void adm8211_interrupt_tci(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + unsigned int dirty_tx; + + spin_lock(&priv->lock); + + for (dirty_tx = priv->dirty_tx; priv->cur_tx - dirty_tx; dirty_tx++) { + unsigned int entry = dirty_tx % priv->tx_ring_size; + u32 status = le32_to_cpu(priv->tx_ring[entry].status); + struct ieee80211_tx_info *txi; + struct adm8211_tx_ring_info *info; + struct sk_buff *skb; + + if (status & TDES0_CONTROL_OWN || + !(status & TDES0_CONTROL_DONE)) + break; + + info = &priv->tx_buffers[entry]; + skb = info->skb; + txi = IEEE80211_SKB_CB(skb); + + /* TODO: check TDES0_STATUS_TUF and TDES0_STATUS_TRO */ + + pci_unmap_single(priv->pdev, info->mapping, + info->skb->len, PCI_DMA_TODEVICE); + + ieee80211_tx_info_clear_status(txi); + + skb_pull(skb, sizeof(struct adm8211_tx_hdr)); + memcpy(skb_push(skb, info->hdrlen), skb->cb, info->hdrlen); + if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK) && + !(status & TDES0_STATUS_ES)) + txi->flags |= IEEE80211_TX_STAT_ACK; + + ieee80211_tx_status_irqsafe(dev, skb); + + info->skb = NULL; + } + + if (priv->cur_tx - dirty_tx < priv->tx_ring_size - 2) + ieee80211_wake_queue(dev, 0); + + priv->dirty_tx = dirty_tx; + spin_unlock(&priv->lock); +} + + +static void adm8211_interrupt_rci(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + unsigned int entry = priv->cur_rx % priv->rx_ring_size; + u32 status; + unsigned int pktlen; + struct sk_buff *skb, *newskb; + unsigned int limit = priv->rx_ring_size; + u8 rssi, rate; + + while (!(priv->rx_ring[entry].status & cpu_to_le32(RDES0_STATUS_OWN))) { + if (!limit--) + break; + + status = le32_to_cpu(priv->rx_ring[entry].status); + rate = (status & RDES0_STATUS_RXDR) >> 12; + rssi = le32_to_cpu(priv->rx_ring[entry].length) & + RDES1_STATUS_RSSI; + + pktlen = status & RDES0_STATUS_FL; + if (pktlen > RX_PKT_SIZE) { + if (net_ratelimit()) + printk(KERN_DEBUG "%s: frame too long (%d)\n", + wiphy_name(dev->wiphy), pktlen); + pktlen = RX_PKT_SIZE; + } + + if (!priv->soft_rx_crc && status & RDES0_STATUS_ES) { + skb = NULL; /* old buffer will be reused */ + /* TODO: update RX error stats */ + /* TODO: check RDES0_STATUS_CRC*E */ + } else if (pktlen < RX_COPY_BREAK) { + skb = dev_alloc_skb(pktlen); + if (skb) { + pci_dma_sync_single_for_cpu( + priv->pdev, + priv->rx_buffers[entry].mapping, + pktlen, PCI_DMA_FROMDEVICE); + memcpy(skb_put(skb, pktlen), + skb_tail_pointer(priv->rx_buffers[entry].skb), + pktlen); + pci_dma_sync_single_for_device( + priv->pdev, + priv->rx_buffers[entry].mapping, + RX_PKT_SIZE, PCI_DMA_FROMDEVICE); + } + } else { + newskb = dev_alloc_skb(RX_PKT_SIZE); + if (newskb) { + skb = priv->rx_buffers[entry].skb; + skb_put(skb, pktlen); + pci_unmap_single( + priv->pdev, + priv->rx_buffers[entry].mapping, + RX_PKT_SIZE, PCI_DMA_FROMDEVICE); + priv->rx_buffers[entry].skb = newskb; + priv->rx_buffers[entry].mapping = + pci_map_single(priv->pdev, + skb_tail_pointer(newskb), + RX_PKT_SIZE, + PCI_DMA_FROMDEVICE); + } else { + skb = NULL; + /* TODO: update rx dropped stats */ + } + + priv->rx_ring[entry].buffer1 = + cpu_to_le32(priv->rx_buffers[entry].mapping); + } + + priv->rx_ring[entry].status = cpu_to_le32(RDES0_STATUS_OWN | + RDES0_STATUS_SQL); + priv->rx_ring[entry].length = + cpu_to_le32(RX_PKT_SIZE | + (entry == priv->rx_ring_size - 1 ? + RDES1_CONTROL_RER : 0)); + + if (skb) { + struct ieee80211_rx_status rx_status = {0}; + + if (priv->pdev->revision < ADM8211_REV_CA) + rx_status.signal = rssi; + else + rx_status.signal = 100 - rssi; + + rx_status.rate_idx = rate; + + rx_status.freq = adm8211_channels[priv->channel - 1].center_freq; + rx_status.band = IEEE80211_BAND_2GHZ; + + memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); + ieee80211_rx_irqsafe(dev, skb); + } + + entry = (++priv->cur_rx) % priv->rx_ring_size; + } + + /* TODO: check LPC and update stats? */ +} + + +static irqreturn_t adm8211_interrupt(int irq, void *dev_id) +{ +#define ADM8211_INT(x) \ +do { \ + if (unlikely(stsr & ADM8211_STSR_ ## x)) \ + printk(KERN_DEBUG "%s: " #x "\n", wiphy_name(dev->wiphy)); \ +} while (0) + + struct ieee80211_hw *dev = dev_id; + struct adm8211_priv *priv = dev->priv; + u32 stsr = ADM8211_CSR_READ(STSR); + ADM8211_CSR_WRITE(STSR, stsr); + if (stsr == 0xffffffff) + return IRQ_HANDLED; + + if (!(stsr & (ADM8211_STSR_NISS | ADM8211_STSR_AISS))) + return IRQ_HANDLED; + + if (stsr & ADM8211_STSR_RCI) + adm8211_interrupt_rci(dev); + if (stsr & ADM8211_STSR_TCI) + adm8211_interrupt_tci(dev); + + ADM8211_INT(PCF); + ADM8211_INT(BCNTC); + ADM8211_INT(GPINT); + ADM8211_INT(ATIMTC); + ADM8211_INT(TSFTF); + ADM8211_INT(TSCZ); + ADM8211_INT(SQL); + ADM8211_INT(WEPTD); + ADM8211_INT(ATIME); + ADM8211_INT(TEIS); + ADM8211_INT(FBE); + ADM8211_INT(REIS); + ADM8211_INT(GPTT); + ADM8211_INT(RPS); + ADM8211_INT(RDU); + ADM8211_INT(TUF); + ADM8211_INT(TPS); + + return IRQ_HANDLED; + +#undef ADM8211_INT +} + +#define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\ +static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \ + u16 addr, u32 value) { \ + struct adm8211_priv *priv = dev->priv; \ + unsigned int i; \ + u32 reg, bitbuf; \ + \ + value &= v_mask; \ + addr &= a_mask; \ + bitbuf = (value << v_shift) | (addr << a_shift); \ + \ + ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \ + ADM8211_CSR_READ(SYNRF); \ + ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \ + ADM8211_CSR_READ(SYNRF); \ + \ + if (prewrite) { \ + ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \ + ADM8211_CSR_READ(SYNRF); \ + } \ + \ + for (i = 0; i <= bits; i++) { \ + if (bitbuf & (1 << (bits - i))) \ + reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \ + else \ + reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \ + \ + ADM8211_CSR_WRITE(SYNRF, reg); \ + ADM8211_CSR_READ(SYNRF); \ + \ + ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \ + ADM8211_CSR_READ(SYNRF); \ + ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \ + ADM8211_CSR_READ(SYNRF); \ + } \ + \ + if (postwrite == 1) { \ + ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \ + ADM8211_CSR_READ(SYNRF); \ + } \ + if (postwrite == 2) { \ + ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \ + ADM8211_CSR_READ(SYNRF); \ + } \ + \ + ADM8211_CSR_WRITE(SYNRF, 0); \ + ADM8211_CSR_READ(SYNRF); \ +} + +WRITE_SYN(max2820, 0x00FFF, 0, 0x0F, 12, 15, 1, 1) +WRITE_SYN(al2210l, 0xFFFFF, 4, 0x0F, 0, 23, 1, 1) +WRITE_SYN(rfmd2958, 0x3FFFF, 0, 0x1F, 18, 23, 0, 1) +WRITE_SYN(rfmd2948, 0x0FFFF, 4, 0x0F, 0, 21, 0, 2) + +#undef WRITE_SYN + +static int adm8211_write_bbp(struct ieee80211_hw *dev, u8 addr, u8 data) +{ + struct adm8211_priv *priv = dev->priv; + unsigned int timeout; + u32 reg; + + timeout = 10; + while (timeout > 0) { + reg = ADM8211_CSR_READ(BBPCTL); + if (!(reg & (ADM8211_BBPCTL_WR | ADM8211_BBPCTL_RD))) + break; + timeout--; + msleep(2); + } + + if (timeout == 0) { + printk(KERN_DEBUG "%s: adm8211_write_bbp(%d,%d) failed" + " prewrite (reg=0x%08x)\n", + wiphy_name(dev->wiphy), addr, data, reg); + return -ETIMEDOUT; + } + + switch (priv->bbp_type) { + case ADM8211_TYPE_INTERSIL: + reg = ADM8211_BBPCTL_MMISEL; /* three wire interface */ + break; + case ADM8211_TYPE_RFMD: + reg = (0x20 << 24) | ADM8211_BBPCTL_TXCE | ADM8211_BBPCTL_CCAP | + (0x01 << 18); + break; + case ADM8211_TYPE_ADMTEK: + reg = (0x20 << 24) | ADM8211_BBPCTL_TXCE | ADM8211_BBPCTL_CCAP | + (0x05 << 18); + break; + } + reg |= ADM8211_BBPCTL_WR | (addr << 8) | data; + + ADM8211_CSR_WRITE(BBPCTL, reg); + + timeout = 10; + while (timeout > 0) { + reg = ADM8211_CSR_READ(BBPCTL); + if (!(reg & ADM8211_BBPCTL_WR)) + break; + timeout--; + msleep(2); + } + + if (timeout == 0) { + ADM8211_CSR_WRITE(BBPCTL, ADM8211_CSR_READ(BBPCTL) & + ~ADM8211_BBPCTL_WR); + printk(KERN_DEBUG "%s: adm8211_write_bbp(%d,%d) failed" + " postwrite (reg=0x%08x)\n", + wiphy_name(dev->wiphy), addr, data, reg); + return -ETIMEDOUT; + } + + return 0; +} + +static int adm8211_rf_set_channel(struct ieee80211_hw *dev, unsigned int chan) +{ + static const u32 adm8211_rfmd2958_reg5[] = + {0x22BD, 0x22D2, 0x22E8, 0x22FE, 0x2314, 0x232A, 0x2340, + 0x2355, 0x236B, 0x2381, 0x2397, 0x23AD, 0x23C2, 0x23F7}; + static const u32 adm8211_rfmd2958_reg6[] = + {0x05D17, 0x3A2E8, 0x2E8BA, 0x22E8B, 0x1745D, 0x0BA2E, 0x00000, + 0x345D1, 0x28BA2, 0x1D174, 0x11745, 0x05D17, 0x3A2E8, 0x11745}; + + struct adm8211_priv *priv = dev->priv; + u8 ant_power = priv->ant_power > 0x3F ? + priv->eeprom->antenna_power[chan - 1] : priv->ant_power; + u8 tx_power = priv->tx_power > 0x3F ? + priv->eeprom->tx_power[chan - 1] : priv->tx_power; + u8 lpf_cutoff = priv->lpf_cutoff == 0xFF ? + priv->eeprom->lpf_cutoff[chan - 1] : priv->lpf_cutoff; + u8 lnags_thresh = priv->lnags_threshold == 0xFF ? + priv->eeprom->lnags_threshold[chan - 1] : priv->lnags_threshold; + u32 reg; + + ADM8211_IDLE(); + + /* Program synthesizer to new channel */ + switch (priv->transceiver_type) { + case ADM8211_RFMD2958: + case ADM8211_RFMD2958_RF3000_CONTROL_POWER: + adm8211_rf_write_syn_rfmd2958(dev, 0x00, 0x04007); + adm8211_rf_write_syn_rfmd2958(dev, 0x02, 0x00033); + + adm8211_rf_write_syn_rfmd2958(dev, 0x05, + adm8211_rfmd2958_reg5[chan - 1]); + adm8211_rf_write_syn_rfmd2958(dev, 0x06, + adm8211_rfmd2958_reg6[chan - 1]); + break; + + case ADM8211_RFMD2948: + adm8211_rf_write_syn_rfmd2948(dev, SI4126_MAIN_CONF, + SI4126_MAIN_XINDIV2); + adm8211_rf_write_syn_rfmd2948(dev, SI4126_POWERDOWN, + SI4126_POWERDOWN_PDIB | + SI4126_POWERDOWN_PDRB); + adm8211_rf_write_syn_rfmd2948(dev, SI4126_PHASE_DET_GAIN, 0); + adm8211_rf_write_syn_rfmd2948(dev, SI4126_RF2_N_DIV, + (chan == 14 ? + 2110 : (2033 + (chan * 5)))); + adm8211_rf_write_syn_rfmd2948(dev, SI4126_IF_N_DIV, 1496); + adm8211_rf_write_syn_rfmd2948(dev, SI4126_RF2_R_DIV, 44); + adm8211_rf_write_syn_rfmd2948(dev, SI4126_IF_R_DIV, 44); + break; + + case ADM8211_MAX2820: + adm8211_rf_write_syn_max2820(dev, 0x3, + (chan == 14 ? 0x054 : (0x7 + (chan * 5)))); + break; + + case ADM8211_AL2210L: + adm8211_rf_write_syn_al2210l(dev, 0x0, + (chan == 14 ? 0x229B4 : (0x22967 + (chan * 5)))); + break; + + default: + printk(KERN_DEBUG "%s: unsupported transceiver type %d\n", + wiphy_name(dev->wiphy), priv->transceiver_type); + break; + } + + /* write BBP regs */ + if (priv->bbp_type == ADM8211_TYPE_RFMD) { + + /* SMC 2635W specific? adm8211b doesn't use the 2948 though.. */ + /* TODO: remove if SMC 2635W doesn't need this */ + if (priv->transceiver_type == ADM8211_RFMD2948) { + reg = ADM8211_CSR_READ(GPIO); + reg &= 0xfffc0000; + reg |= ADM8211_CSR_GPIO_EN0; + if (chan != 14) + reg |= ADM8211_CSR_GPIO_O0; + ADM8211_CSR_WRITE(GPIO, reg); + } + + if (priv->transceiver_type == ADM8211_RFMD2958) { + /* set PCNT2 */ + adm8211_rf_write_syn_rfmd2958(dev, 0x0B, 0x07100); + /* set PCNT1 P_DESIRED/MID_BIAS */ + reg = le16_to_cpu(priv->eeprom->cr49); + reg >>= 13; + reg <<= 15; + reg |= ant_power << 9; + adm8211_rf_write_syn_rfmd2958(dev, 0x0A, reg); + /* set TXRX TX_GAIN */ + adm8211_rf_write_syn_rfmd2958(dev, 0x09, 0x00050 | + (priv->pdev->revision < ADM8211_REV_CA ? tx_power : 0)); + } else { + reg = ADM8211_CSR_READ(PLCPHD); + reg &= 0xff00ffff; + reg |= tx_power << 18; + ADM8211_CSR_WRITE(PLCPHD, reg); + } + + ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_SELRF | + ADM8211_SYNRF_PE1 | ADM8211_SYNRF_PHYRST); + ADM8211_CSR_READ(SYNRF); + msleep(30); + + /* RF3000 BBP */ + if (priv->transceiver_type != ADM8211_RFMD2958) + adm8211_write_bbp(dev, RF3000_TX_VAR_GAIN__TX_LEN_EXT, + tx_power<<2); + adm8211_write_bbp(dev, RF3000_LOW_GAIN_CALIB, lpf_cutoff); + adm8211_write_bbp(dev, RF3000_HIGH_GAIN_CALIB, lnags_thresh); + adm8211_write_bbp(dev, 0x1c, priv->pdev->revision == ADM8211_REV_BA ? + priv->eeprom->cr28 : 0); + adm8211_write_bbp(dev, 0x1d, priv->eeprom->cr29); + + ADM8211_CSR_WRITE(SYNRF, 0); + + /* Nothing to do for ADMtek BBP */ + } else if (priv->bbp_type != ADM8211_TYPE_ADMTEK) + printk(KERN_DEBUG "%s: unsupported BBP type %d\n", + wiphy_name(dev->wiphy), priv->bbp_type); + + ADM8211_RESTORE(); + + /* update current channel for adhoc (and maybe AP mode) */ + reg = ADM8211_CSR_READ(CAP0); + reg &= ~0xF; + reg |= chan; + ADM8211_CSR_WRITE(CAP0, reg); + + return 0; +} + +static void adm8211_update_mode(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + + ADM8211_IDLE(); + + priv->soft_rx_crc = 0; + switch (priv->mode) { + case NL80211_IFTYPE_STATION: + priv->nar &= ~(ADM8211_NAR_PR | ADM8211_NAR_EA); + priv->nar |= ADM8211_NAR_ST | ADM8211_NAR_SR; + break; + case NL80211_IFTYPE_ADHOC: + priv->nar &= ~ADM8211_NAR_PR; + priv->nar |= ADM8211_NAR_EA | ADM8211_NAR_ST | ADM8211_NAR_SR; + + /* don't trust the error bits on rev 0x20 and up in adhoc */ + if (priv->pdev->revision >= ADM8211_REV_BA) + priv->soft_rx_crc = 1; + break; + case NL80211_IFTYPE_MONITOR: + priv->nar &= ~(ADM8211_NAR_EA | ADM8211_NAR_ST); + priv->nar |= ADM8211_NAR_PR | ADM8211_NAR_SR; + break; + } + + ADM8211_RESTORE(); +} + +static void adm8211_hw_init_syn(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + + switch (priv->transceiver_type) { + case ADM8211_RFMD2958: + case ADM8211_RFMD2958_RF3000_CONTROL_POWER: + /* comments taken from ADMtek vendor driver */ + + /* Reset RF2958 after power on */ + adm8211_rf_write_syn_rfmd2958(dev, 0x1F, 0x00000); + /* Initialize RF VCO Core Bias to maximum */ + adm8211_rf_write_syn_rfmd2958(dev, 0x0C, 0x3001F); + /* Initialize IF PLL */ + adm8211_rf_write_syn_rfmd2958(dev, 0x01, 0x29C03); + /* Initialize IF PLL Coarse Tuning */ + adm8211_rf_write_syn_rfmd2958(dev, 0x03, 0x1FF6F); + /* Initialize RF PLL */ + adm8211_rf_write_syn_rfmd2958(dev, 0x04, 0x29403); + /* Initialize RF PLL Coarse Tuning */ + adm8211_rf_write_syn_rfmd2958(dev, 0x07, 0x1456F); + /* Initialize TX gain and filter BW (R9) */ + adm8211_rf_write_syn_rfmd2958(dev, 0x09, + (priv->transceiver_type == ADM8211_RFMD2958 ? + 0x10050 : 0x00050)); + /* Initialize CAL register */ + adm8211_rf_write_syn_rfmd2958(dev, 0x08, 0x3FFF8); + break; + + case ADM8211_MAX2820: + adm8211_rf_write_syn_max2820(dev, 0x1, 0x01E); + adm8211_rf_write_syn_max2820(dev, 0x2, 0x001); + adm8211_rf_write_syn_max2820(dev, 0x3, 0x054); + adm8211_rf_write_syn_max2820(dev, 0x4, 0x310); + adm8211_rf_write_syn_max2820(dev, 0x5, 0x000); + break; + + case ADM8211_AL2210L: + adm8211_rf_write_syn_al2210l(dev, 0x0, 0x0196C); + adm8211_rf_write_syn_al2210l(dev, 0x1, 0x007CB); + adm8211_rf_write_syn_al2210l(dev, 0x2, 0x3582F); + adm8211_rf_write_syn_al2210l(dev, 0x3, 0x010A9); + adm8211_rf_write_syn_al2210l(dev, 0x4, 0x77280); + adm8211_rf_write_syn_al2210l(dev, 0x5, 0x45641); + adm8211_rf_write_syn_al2210l(dev, 0x6, 0xEA130); + adm8211_rf_write_syn_al2210l(dev, 0x7, 0x80000); + adm8211_rf_write_syn_al2210l(dev, 0x8, 0x7850F); + adm8211_rf_write_syn_al2210l(dev, 0x9, 0xF900C); + adm8211_rf_write_syn_al2210l(dev, 0xA, 0x00000); + adm8211_rf_write_syn_al2210l(dev, 0xB, 0x00000); + break; + + case ADM8211_RFMD2948: + default: + break; + } +} + +static int adm8211_hw_init_bbp(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + u32 reg; + + /* write addresses */ + if (priv->bbp_type == ADM8211_TYPE_INTERSIL) { + ADM8211_CSR_WRITE(MMIWA, 0x100E0C0A); + ADM8211_CSR_WRITE(MMIRD0, 0x00007C7E); + ADM8211_CSR_WRITE(MMIRD1, 0x00100000); + } else if (priv->bbp_type == ADM8211_TYPE_RFMD || + priv->bbp_type == ADM8211_TYPE_ADMTEK) { + /* check specific BBP type */ + switch (priv->specific_bbptype) { + case ADM8211_BBP_RFMD3000: + case ADM8211_BBP_RFMD3002: + ADM8211_CSR_WRITE(MMIWA, 0x00009101); + ADM8211_CSR_WRITE(MMIRD0, 0x00000301); + break; + + case ADM8211_BBP_ADM8011: + ADM8211_CSR_WRITE(MMIWA, 0x00008903); + ADM8211_CSR_WRITE(MMIRD0, 0x00001716); + + reg = ADM8211_CSR_READ(BBPCTL); + reg &= ~ADM8211_BBPCTL_TYPE; + reg |= 0x5 << 18; + ADM8211_CSR_WRITE(BBPCTL, reg); + break; + } + + switch (priv->pdev->revision) { + case ADM8211_REV_CA: + if (priv->transceiver_type == ADM8211_RFMD2958 || + priv->transceiver_type == ADM8211_RFMD2958_RF3000_CONTROL_POWER || + priv->transceiver_type == ADM8211_RFMD2948) + ADM8211_CSR_WRITE(SYNCTL, 0x1 << 22); + else if (priv->transceiver_type == ADM8211_MAX2820 || + priv->transceiver_type == ADM8211_AL2210L) + ADM8211_CSR_WRITE(SYNCTL, 0x3 << 22); + break; + + case ADM8211_REV_BA: + reg = ADM8211_CSR_READ(MMIRD1); + reg &= 0x0000FFFF; + reg |= 0x7e100000; + ADM8211_CSR_WRITE(MMIRD1, reg); + break; + + case ADM8211_REV_AB: + case ADM8211_REV_AF: + default: + ADM8211_CSR_WRITE(MMIRD1, 0x7e100000); + break; + } + + /* For RFMD */ + ADM8211_CSR_WRITE(MACTEST, 0x800); + } + + adm8211_hw_init_syn(dev); + + /* Set RF Power control IF pin to PE1+PHYRST# */ + ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_SELRF | + ADM8211_SYNRF_PE1 | ADM8211_SYNRF_PHYRST); + ADM8211_CSR_READ(SYNRF); + msleep(20); + + /* write BBP regs */ + if (priv->bbp_type == ADM8211_TYPE_RFMD) { + /* RF3000 BBP */ + /* another set: + * 11: c8 + * 14: 14 + * 15: 50 (chan 1..13; chan 14: d0) + * 1c: 00 + * 1d: 84 + */ + adm8211_write_bbp(dev, RF3000_CCA_CTRL, 0x80); + /* antenna selection: diversity */ + adm8211_write_bbp(dev, RF3000_DIVERSITY__RSSI, 0x80); + adm8211_write_bbp(dev, RF3000_TX_VAR_GAIN__TX_LEN_EXT, 0x74); + adm8211_write_bbp(dev, RF3000_LOW_GAIN_CALIB, 0x38); + adm8211_write_bbp(dev, RF3000_HIGH_GAIN_CALIB, 0x40); + + if (priv->eeprom->major_version < 2) { + adm8211_write_bbp(dev, 0x1c, 0x00); + adm8211_write_bbp(dev, 0x1d, 0x80); + } else { + if (priv->pdev->revision == ADM8211_REV_BA) + adm8211_write_bbp(dev, 0x1c, priv->eeprom->cr28); + else + adm8211_write_bbp(dev, 0x1c, 0x00); + + adm8211_write_bbp(dev, 0x1d, priv->eeprom->cr29); + } + } else if (priv->bbp_type == ADM8211_TYPE_ADMTEK) { + /* reset baseband */ + adm8211_write_bbp(dev, 0x00, 0xFF); + /* antenna selection: diversity */ + adm8211_write_bbp(dev, 0x07, 0x0A); + + /* TODO: find documentation for this */ + switch (priv->transceiver_type) { + case ADM8211_RFMD2958: + case ADM8211_RFMD2958_RF3000_CONTROL_POWER: + adm8211_write_bbp(dev, 0x00, 0x00); + adm8211_write_bbp(dev, 0x01, 0x00); + adm8211_write_bbp(dev, 0x02, 0x00); + adm8211_write_bbp(dev, 0x03, 0x00); + adm8211_write_bbp(dev, 0x06, 0x0f); + adm8211_write_bbp(dev, 0x09, 0x00); + adm8211_write_bbp(dev, 0x0a, 0x00); + adm8211_write_bbp(dev, 0x0b, 0x00); + adm8211_write_bbp(dev, 0x0c, 0x00); + adm8211_write_bbp(dev, 0x0f, 0xAA); + adm8211_write_bbp(dev, 0x10, 0x8c); + adm8211_write_bbp(dev, 0x11, 0x43); + adm8211_write_bbp(dev, 0x18, 0x40); + adm8211_write_bbp(dev, 0x20, 0x23); + adm8211_write_bbp(dev, 0x21, 0x02); + adm8211_write_bbp(dev, 0x22, 0x28); + adm8211_write_bbp(dev, 0x23, 0x30); + adm8211_write_bbp(dev, 0x24, 0x2d); + adm8211_write_bbp(dev, 0x28, 0x35); + adm8211_write_bbp(dev, 0x2a, 0x8c); + adm8211_write_bbp(dev, 0x2b, 0x81); + adm8211_write_bbp(dev, 0x2c, 0x44); + adm8211_write_bbp(dev, 0x2d, 0x0A); + adm8211_write_bbp(dev, 0x29, 0x40); + adm8211_write_bbp(dev, 0x60, 0x08); + adm8211_write_bbp(dev, 0x64, 0x01); + break; + + case ADM8211_MAX2820: + adm8211_write_bbp(dev, 0x00, 0x00); + adm8211_write_bbp(dev, 0x01, 0x00); + adm8211_write_bbp(dev, 0x02, 0x00); + adm8211_write_bbp(dev, 0x03, 0x00); + adm8211_write_bbp(dev, 0x06, 0x0f); + adm8211_write_bbp(dev, 0x09, 0x05); + adm8211_write_bbp(dev, 0x0a, 0x02); + adm8211_write_bbp(dev, 0x0b, 0x00); + adm8211_write_bbp(dev, 0x0c, 0x0f); + adm8211_write_bbp(dev, 0x0f, 0x55); + adm8211_write_bbp(dev, 0x10, 0x8d); + adm8211_write_bbp(dev, 0x11, 0x43); + adm8211_write_bbp(dev, 0x18, 0x4a); + adm8211_write_bbp(dev, 0x20, 0x20); + adm8211_write_bbp(dev, 0x21, 0x02); + adm8211_write_bbp(dev, 0x22, 0x23); + adm8211_write_bbp(dev, 0x23, 0x30); + adm8211_write_bbp(dev, 0x24, 0x2d); + adm8211_write_bbp(dev, 0x2a, 0x8c); + adm8211_write_bbp(dev, 0x2b, 0x81); + adm8211_write_bbp(dev, 0x2c, 0x44); + adm8211_write_bbp(dev, 0x29, 0x4a); + adm8211_write_bbp(dev, 0x60, 0x2b); + adm8211_write_bbp(dev, 0x64, 0x01); + break; + + case ADM8211_AL2210L: + adm8211_write_bbp(dev, 0x00, 0x00); + adm8211_write_bbp(dev, 0x01, 0x00); + adm8211_write_bbp(dev, 0x02, 0x00); + adm8211_write_bbp(dev, 0x03, 0x00); + adm8211_write_bbp(dev, 0x06, 0x0f); + adm8211_write_bbp(dev, 0x07, 0x05); + adm8211_write_bbp(dev, 0x08, 0x03); + adm8211_write_bbp(dev, 0x09, 0x00); + adm8211_write_bbp(dev, 0x0a, 0x00); + adm8211_write_bbp(dev, 0x0b, 0x00); + adm8211_write_bbp(dev, 0x0c, 0x10); + adm8211_write_bbp(dev, 0x0f, 0x55); + adm8211_write_bbp(dev, 0x10, 0x8d); + adm8211_write_bbp(dev, 0x11, 0x43); + adm8211_write_bbp(dev, 0x18, 0x4a); + adm8211_write_bbp(dev, 0x20, 0x20); + adm8211_write_bbp(dev, 0x21, 0x02); + adm8211_write_bbp(dev, 0x22, 0x23); + adm8211_write_bbp(dev, 0x23, 0x30); + adm8211_write_bbp(dev, 0x24, 0x2d); + adm8211_write_bbp(dev, 0x2a, 0xaa); + adm8211_write_bbp(dev, 0x2b, 0x81); + adm8211_write_bbp(dev, 0x2c, 0x44); + adm8211_write_bbp(dev, 0x29, 0xfa); + adm8211_write_bbp(dev, 0x60, 0x2d); + adm8211_write_bbp(dev, 0x64, 0x01); + break; + + case ADM8211_RFMD2948: + break; + + default: + printk(KERN_DEBUG "%s: unsupported transceiver %d\n", + wiphy_name(dev->wiphy), priv->transceiver_type); + break; + } + } else + printk(KERN_DEBUG "%s: unsupported BBP %d\n", + wiphy_name(dev->wiphy), priv->bbp_type); + + ADM8211_CSR_WRITE(SYNRF, 0); + + /* Set RF CAL control source to MAC control */ + reg = ADM8211_CSR_READ(SYNCTL); + reg |= ADM8211_SYNCTL_SELCAL; + ADM8211_CSR_WRITE(SYNCTL, reg); + + return 0; +} + +/* configures hw beacons/probe responses */ +static int adm8211_set_rate(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + u32 reg; + int i = 0; + u8 rate_buf[12] = {0}; + + /* write supported rates */ + if (priv->pdev->revision != ADM8211_REV_BA) { + rate_buf[0] = ARRAY_SIZE(adm8211_rates); + for (i = 0; i < ARRAY_SIZE(adm8211_rates); i++) + rate_buf[i + 1] = (adm8211_rates[i].bitrate / 5) | 0x80; + } else { + /* workaround for rev BA specific bug */ + rate_buf[0] = 0x04; + rate_buf[1] = 0x82; + rate_buf[2] = 0x04; + rate_buf[3] = 0x0b; + rate_buf[4] = 0x16; + } + + adm8211_write_sram_bytes(dev, ADM8211_SRAM_SUPP_RATE, rate_buf, + ARRAY_SIZE(adm8211_rates) + 1); + + reg = ADM8211_CSR_READ(PLCPHD) & 0x00FFFFFF; /* keep bits 0-23 */ + reg |= 1 << 15; /* short preamble */ + reg |= 110 << 24; + ADM8211_CSR_WRITE(PLCPHD, reg); + + /* MTMLT = 512 TU (max TX MSDU lifetime) + * BCNTSIG = plcp_signal (beacon, probe resp, and atim TX rate) + * SRTYLIM = 224 (short retry limit, TX header value is default) */ + ADM8211_CSR_WRITE(TXLMT, (512 << 16) | (110 << 8) | (224 << 0)); + + return 0; +} + +static void adm8211_hw_init(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + u32 reg; + u8 cline; + + reg = ADM8211_CSR_READ(PAR); + reg |= ADM8211_PAR_MRLE | ADM8211_PAR_MRME; + reg &= ~(ADM8211_PAR_BAR | ADM8211_PAR_CAL); + + if (!pci_set_mwi(priv->pdev)) { + reg |= 0x1 << 24; + pci_read_config_byte(priv->pdev, PCI_CACHE_LINE_SIZE, &cline); + + switch (cline) { + case 0x8: reg |= (0x1 << 14); + break; + case 0x16: reg |= (0x2 << 14); + break; + case 0x32: reg |= (0x3 << 14); + break; + default: reg |= (0x0 << 14); + break; + } + } + + ADM8211_CSR_WRITE(PAR, reg); + + reg = ADM8211_CSR_READ(CSR_TEST1); + reg &= ~(0xF << 28); + reg |= (1 << 28) | (1 << 31); + ADM8211_CSR_WRITE(CSR_TEST1, reg); + + /* lose link after 4 lost beacons */ + reg = (0x04 << 21) | ADM8211_WCSR_TSFTWE | ADM8211_WCSR_LSOE; + ADM8211_CSR_WRITE(WCSR, reg); + + /* Disable APM, enable receive FIFO threshold, and set drain receive + * threshold to store-and-forward */ + reg = ADM8211_CSR_READ(CMDR); + reg &= ~(ADM8211_CMDR_APM | ADM8211_CMDR_DRT); + reg |= ADM8211_CMDR_RTE | ADM8211_CMDR_DRT_SF; + ADM8211_CSR_WRITE(CMDR, reg); + + adm8211_set_rate(dev); + + /* 4-bit values: + * PWR1UP = 8 * 2 ms + * PWR0PAPE = 8 us or 5 us + * PWR1PAPE = 1 us or 3 us + * PWR0TRSW = 5 us + * PWR1TRSW = 12 us + * PWR0PE2 = 13 us + * PWR1PE2 = 1 us + * PWR0TXPE = 8 or 6 */ + if (priv->pdev->revision < ADM8211_REV_CA) + ADM8211_CSR_WRITE(TOFS2, 0x8815cd18); + else + ADM8211_CSR_WRITE(TOFS2, 0x8535cd16); + + /* Enable store and forward for transmit */ + priv->nar = ADM8211_NAR_SF | ADM8211_NAR_PB; + ADM8211_CSR_WRITE(NAR, priv->nar); + + /* Reset RF */ + ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_RADIO); + ADM8211_CSR_READ(SYNRF); + msleep(10); + ADM8211_CSR_WRITE(SYNRF, 0); + ADM8211_CSR_READ(SYNRF); + msleep(5); + + /* Set CFP Max Duration to 0x10 TU */ + reg = ADM8211_CSR_READ(CFPP); + reg &= ~(0xffff << 8); + reg |= 0x0010 << 8; + ADM8211_CSR_WRITE(CFPP, reg); + + /* USCNT = 0x16 (number of system clocks, 22 MHz, in 1us + * TUCNT = 0x3ff - Tu counter 1024 us */ + ADM8211_CSR_WRITE(TOFS0, (0x16 << 24) | 0x3ff); + + /* SLOT=20 us, SIFS=110 cycles of 22 MHz (5 us), + * DIFS=50 us, EIFS=100 us */ + if (priv->pdev->revision < ADM8211_REV_CA) + ADM8211_CSR_WRITE(IFST, (20 << 23) | (110 << 15) | + (50 << 9) | 100); + else + ADM8211_CSR_WRITE(IFST, (20 << 23) | (24 << 15) | + (50 << 9) | 100); + + /* PCNT = 1 (MAC idle time awake/sleep, unit S) + * RMRD = 2346 * 8 + 1 us (max RX duration) */ + ADM8211_CSR_WRITE(RMD, (1 << 16) | 18769); + + /* MART=65535 us, MIRT=256 us, TSFTOFST=0 us */ + ADM8211_CSR_WRITE(RSPT, 0xffffff00); + + /* Initialize BBP (and SYN) */ + adm8211_hw_init_bbp(dev); + + /* make sure interrupts are off */ + ADM8211_CSR_WRITE(IER, 0); + + /* ACK interrupts */ + ADM8211_CSR_WRITE(STSR, ADM8211_CSR_READ(STSR)); + + /* Setup WEP (turns it off for now) */ + reg = ADM8211_CSR_READ(MACTEST); + reg &= ~(7 << 20); + ADM8211_CSR_WRITE(MACTEST, reg); + + reg = ADM8211_CSR_READ(WEPCTL); + reg &= ~ADM8211_WEPCTL_WEPENABLE; + reg |= ADM8211_WEPCTL_WEPRXBYP; + ADM8211_CSR_WRITE(WEPCTL, reg); + + /* Clear the missed-packet counter. */ + ADM8211_CSR_READ(LPC); +} + +static int adm8211_hw_reset(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + u32 reg, tmp; + int timeout = 100; + + /* Power-on issue */ + /* TODO: check if this is necessary */ + ADM8211_CSR_WRITE(FRCTL, 0); + + /* Reset the chip */ + tmp = ADM8211_CSR_READ(PAR); + ADM8211_CSR_WRITE(PAR, ADM8211_PAR_SWR); + + while ((ADM8211_CSR_READ(PAR) & ADM8211_PAR_SWR) && timeout--) + msleep(50); + + if (timeout <= 0) + return -ETIMEDOUT; + + ADM8211_CSR_WRITE(PAR, tmp); + + if (priv->pdev->revision == ADM8211_REV_BA && + (priv->transceiver_type == ADM8211_RFMD2958_RF3000_CONTROL_POWER || + priv->transceiver_type == ADM8211_RFMD2958)) { + reg = ADM8211_CSR_READ(CSR_TEST1); + reg |= (1 << 4) | (1 << 5); + ADM8211_CSR_WRITE(CSR_TEST1, reg); + } else if (priv->pdev->revision == ADM8211_REV_CA) { + reg = ADM8211_CSR_READ(CSR_TEST1); + reg &= ~((1 << 4) | (1 << 5)); + ADM8211_CSR_WRITE(CSR_TEST1, reg); + } + + ADM8211_CSR_WRITE(FRCTL, 0); + + reg = ADM8211_CSR_READ(CSR_TEST0); + reg |= ADM8211_CSR_TEST0_EPRLD; /* EEPROM Recall */ + ADM8211_CSR_WRITE(CSR_TEST0, reg); + + adm8211_clear_sram(dev); + + return 0; +} + +static u64 adm8211_get_tsft(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + u32 tsftl; + u64 tsft; + + tsftl = ADM8211_CSR_READ(TSFTL); + tsft = ADM8211_CSR_READ(TSFTH); + tsft <<= 32; + tsft |= tsftl; + + return tsft; +} + +static void adm8211_set_interval(struct ieee80211_hw *dev, + unsigned short bi, unsigned short li) +{ + struct adm8211_priv *priv = dev->priv; + u32 reg; + + /* BP (beacon interval) = data->beacon_interval + * LI (listen interval) = data->listen_interval (in beacon intervals) */ + reg = (bi << 16) | li; + ADM8211_CSR_WRITE(BPLI, reg); +} + +static void adm8211_set_bssid(struct ieee80211_hw *dev, const u8 *bssid) +{ + struct adm8211_priv *priv = dev->priv; + u32 reg; + + ADM8211_CSR_WRITE(BSSID0, le32_to_cpu(*(__le32 *)bssid)); + reg = ADM8211_CSR_READ(ABDA1); + reg &= 0x0000ffff; + reg |= (bssid[4] << 16) | (bssid[5] << 24); + ADM8211_CSR_WRITE(ABDA1, reg); +} + +static int adm8211_config(struct ieee80211_hw *dev, u32 changed) +{ + struct adm8211_priv *priv = dev->priv; + struct ieee80211_conf *conf = &dev->conf; + int channel = ieee80211_frequency_to_channel(conf->channel->center_freq); + + if (channel != priv->channel) { + priv->channel = channel; + adm8211_rf_set_channel(dev, priv->channel); + } + + return 0; +} + +static void adm8211_bss_info_changed(struct ieee80211_hw *dev, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *conf, + u32 changes) +{ + struct adm8211_priv *priv = dev->priv; + + if (!(changes & BSS_CHANGED_BSSID)) + return; + + if (memcmp(conf->bssid, priv->bssid, ETH_ALEN)) { + adm8211_set_bssid(dev, conf->bssid); + memcpy(priv->bssid, conf->bssid, ETH_ALEN); + } +} + +static u64 adm8211_prepare_multicast(struct ieee80211_hw *hw, + int mc_count, struct dev_addr_list *mclist) +{ + unsigned int bit_nr, i; + u32 mc_filter[2]; + + mc_filter[1] = mc_filter[0] = 0; + + for (i = 0; i < mc_count; i++) { + if (!mclist) + break; + bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; + + bit_nr &= 0x3F; + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + mclist = mclist->next; + } + + return mc_filter[0] | ((u64)(mc_filter[1]) << 32); +} + +static void adm8211_configure_filter(struct ieee80211_hw *dev, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) +{ + static const u8 bcast[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; + struct adm8211_priv *priv = dev->priv; + unsigned int new_flags; + u32 mc_filter[2]; + + mc_filter[0] = multicast; + mc_filter[1] = multicast >> 32; + + new_flags = 0; + + if (*total_flags & FIF_PROMISC_IN_BSS) { + new_flags |= FIF_PROMISC_IN_BSS; + priv->nar |= ADM8211_NAR_PR; + priv->nar &= ~ADM8211_NAR_MM; + mc_filter[1] = mc_filter[0] = ~0; + } else if (*total_flags & FIF_ALLMULTI || multicast == ~(0ULL)) { + new_flags |= FIF_ALLMULTI; + priv->nar &= ~ADM8211_NAR_PR; + priv->nar |= ADM8211_NAR_MM; + mc_filter[1] = mc_filter[0] = ~0; + } else { + priv->nar &= ~(ADM8211_NAR_MM | ADM8211_NAR_PR); + } + + ADM8211_IDLE_RX(); + + ADM8211_CSR_WRITE(MAR0, mc_filter[0]); + ADM8211_CSR_WRITE(MAR1, mc_filter[1]); + ADM8211_CSR_READ(NAR); + + if (priv->nar & ADM8211_NAR_PR) + dev->flags |= IEEE80211_HW_RX_INCLUDES_FCS; + else + dev->flags &= ~IEEE80211_HW_RX_INCLUDES_FCS; + + if (*total_flags & FIF_BCN_PRBRESP_PROMISC) + adm8211_set_bssid(dev, bcast); + else + adm8211_set_bssid(dev, priv->bssid); + + ADM8211_RESTORE(); + + *total_flags = new_flags; +} + +static int adm8211_add_interface(struct ieee80211_hw *dev, + struct ieee80211_vif *vif) +{ + struct adm8211_priv *priv = dev->priv; + if (priv->mode != NL80211_IFTYPE_MONITOR) + return -EOPNOTSUPP; + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + priv->mode = vif->type; + break; + default: + return -EOPNOTSUPP; + } + + ADM8211_IDLE(); + + ADM8211_CSR_WRITE(PAR0, le32_to_cpu(*(__le32 *)vif->addr)); + ADM8211_CSR_WRITE(PAR1, le16_to_cpu(*(__le16 *)(vif->addr + 4))); + + adm8211_update_mode(dev); + + ADM8211_RESTORE(); + + return 0; +} + +static void adm8211_remove_interface(struct ieee80211_hw *dev, + struct ieee80211_vif *vif) +{ + struct adm8211_priv *priv = dev->priv; + priv->mode = NL80211_IFTYPE_MONITOR; +} + +static int adm8211_init_rings(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + struct adm8211_desc *desc = NULL; + struct adm8211_rx_ring_info *rx_info; + struct adm8211_tx_ring_info *tx_info; + unsigned int i; + + for (i = 0; i < priv->rx_ring_size; i++) { + desc = &priv->rx_ring[i]; + desc->status = 0; + desc->length = cpu_to_le32(RX_PKT_SIZE); + priv->rx_buffers[i].skb = NULL; + } + /* Mark the end of RX ring; hw returns to base address after this + * descriptor */ + desc->length |= cpu_to_le32(RDES1_CONTROL_RER); + + for (i = 0; i < priv->rx_ring_size; i++) { + desc = &priv->rx_ring[i]; + rx_info = &priv->rx_buffers[i]; + + rx_info->skb = dev_alloc_skb(RX_PKT_SIZE); + if (rx_info->skb == NULL) + break; + rx_info->mapping = pci_map_single(priv->pdev, + skb_tail_pointer(rx_info->skb), + RX_PKT_SIZE, + PCI_DMA_FROMDEVICE); + desc->buffer1 = cpu_to_le32(rx_info->mapping); + desc->status = cpu_to_le32(RDES0_STATUS_OWN | RDES0_STATUS_SQL); + } + + /* Setup TX ring. TX buffers descriptors will be filled in as needed */ + for (i = 0; i < priv->tx_ring_size; i++) { + desc = &priv->tx_ring[i]; + tx_info = &priv->tx_buffers[i]; + + tx_info->skb = NULL; + tx_info->mapping = 0; + desc->status = 0; + } + desc->length = cpu_to_le32(TDES1_CONTROL_TER); + + priv->cur_rx = priv->cur_tx = priv->dirty_tx = 0; + ADM8211_CSR_WRITE(RDB, priv->rx_ring_dma); + ADM8211_CSR_WRITE(TDBD, priv->tx_ring_dma); + + return 0; +} + +static void adm8211_free_rings(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + unsigned int i; + + for (i = 0; i < priv->rx_ring_size; i++) { + if (!priv->rx_buffers[i].skb) + continue; + + pci_unmap_single( + priv->pdev, + priv->rx_buffers[i].mapping, + RX_PKT_SIZE, PCI_DMA_FROMDEVICE); + + dev_kfree_skb(priv->rx_buffers[i].skb); + } + + for (i = 0; i < priv->tx_ring_size; i++) { + if (!priv->tx_buffers[i].skb) + continue; + + pci_unmap_single(priv->pdev, + priv->tx_buffers[i].mapping, + priv->tx_buffers[i].skb->len, + PCI_DMA_TODEVICE); + + dev_kfree_skb(priv->tx_buffers[i].skb); + } +} + +static int adm8211_start(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + int retval; + + /* Power up MAC and RF chips */ + retval = adm8211_hw_reset(dev); + if (retval) { + printk(KERN_ERR "%s: hardware reset failed\n", + wiphy_name(dev->wiphy)); + goto fail; + } + + retval = adm8211_init_rings(dev); + if (retval) { + printk(KERN_ERR "%s: failed to initialize rings\n", + wiphy_name(dev->wiphy)); + goto fail; + } + + /* Init hardware */ + adm8211_hw_init(dev); + adm8211_rf_set_channel(dev, priv->channel); + + retval = request_irq(priv->pdev->irq, adm8211_interrupt, + IRQF_SHARED, "adm8211", dev); + if (retval) { + printk(KERN_ERR "%s: failed to register IRQ handler\n", + wiphy_name(dev->wiphy)); + goto fail; + } + + ADM8211_CSR_WRITE(IER, ADM8211_IER_NIE | ADM8211_IER_AIE | + ADM8211_IER_RCIE | ADM8211_IER_TCIE | + ADM8211_IER_TDUIE | ADM8211_IER_GPTIE); + priv->mode = NL80211_IFTYPE_MONITOR; + adm8211_update_mode(dev); + ADM8211_CSR_WRITE(RDR, 0); + + adm8211_set_interval(dev, 100, 10); + return 0; + +fail: + return retval; +} + +static void adm8211_stop(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + + priv->mode = NL80211_IFTYPE_UNSPECIFIED; + priv->nar = 0; + ADM8211_CSR_WRITE(NAR, 0); + ADM8211_CSR_WRITE(IER, 0); + ADM8211_CSR_READ(NAR); + + free_irq(priv->pdev->irq, dev); + + adm8211_free_rings(dev); +} + +static void adm8211_calc_durations(int *dur, int *plcp, size_t payload_len, int len, + int plcp_signal, int short_preamble) +{ + /* Alternative calculation from NetBSD: */ + +/* IEEE 802.11b durations for DSSS PHY in microseconds */ +#define IEEE80211_DUR_DS_LONG_PREAMBLE 144 +#define IEEE80211_DUR_DS_SHORT_PREAMBLE 72 +#define IEEE80211_DUR_DS_FAST_PLCPHDR 24 +#define IEEE80211_DUR_DS_SLOW_PLCPHDR 48 +#define IEEE80211_DUR_DS_SLOW_ACK 112 +#define IEEE80211_DUR_DS_FAST_ACK 56 +#define IEEE80211_DUR_DS_SLOW_CTS 112 +#define IEEE80211_DUR_DS_FAST_CTS 56 +#define IEEE80211_DUR_DS_SLOT 20 +#define IEEE80211_DUR_DS_SIFS 10 + + int remainder; + + *dur = (80 * (24 + payload_len) + plcp_signal - 1) + / plcp_signal; + + if (plcp_signal <= PLCP_SIGNAL_2M) + /* 1-2Mbps WLAN: send ACK/CTS at 1Mbps */ + *dur += 3 * (IEEE80211_DUR_DS_SIFS + + IEEE80211_DUR_DS_SHORT_PREAMBLE + + IEEE80211_DUR_DS_FAST_PLCPHDR) + + IEEE80211_DUR_DS_SLOW_CTS + IEEE80211_DUR_DS_SLOW_ACK; + else + /* 5-11Mbps WLAN: send ACK/CTS at 2Mbps */ + *dur += 3 * (IEEE80211_DUR_DS_SIFS + + IEEE80211_DUR_DS_SHORT_PREAMBLE + + IEEE80211_DUR_DS_FAST_PLCPHDR) + + IEEE80211_DUR_DS_FAST_CTS + IEEE80211_DUR_DS_FAST_ACK; + + /* lengthen duration if long preamble */ + if (!short_preamble) + *dur += 3 * (IEEE80211_DUR_DS_LONG_PREAMBLE - + IEEE80211_DUR_DS_SHORT_PREAMBLE) + + 3 * (IEEE80211_DUR_DS_SLOW_PLCPHDR - + IEEE80211_DUR_DS_FAST_PLCPHDR); + + + *plcp = (80 * len) / plcp_signal; + remainder = (80 * len) % plcp_signal; + if (plcp_signal == PLCP_SIGNAL_11M && + remainder <= 30 && remainder > 0) + *plcp = (*plcp | 0x8000) + 1; + else if (remainder) + (*plcp)++; +} + +/* Transmit skb w/adm8211_tx_hdr (802.11 header created by hardware) */ +static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb, + u16 plcp_signal, + size_t hdrlen) +{ + struct adm8211_priv *priv = dev->priv; + unsigned long flags; + dma_addr_t mapping; + unsigned int entry; + u32 flag; + + mapping = pci_map_single(priv->pdev, skb->data, skb->len, + PCI_DMA_TODEVICE); + + spin_lock_irqsave(&priv->lock, flags); + + if (priv->cur_tx - priv->dirty_tx == priv->tx_ring_size / 2) + flag = TDES1_CONTROL_IC | TDES1_CONTROL_LS | TDES1_CONTROL_FS; + else + flag = TDES1_CONTROL_LS | TDES1_CONTROL_FS; + + if (priv->cur_tx - priv->dirty_tx == priv->tx_ring_size - 2) + ieee80211_stop_queue(dev, 0); + + entry = priv->cur_tx % priv->tx_ring_size; + + priv->tx_buffers[entry].skb = skb; + priv->tx_buffers[entry].mapping = mapping; + priv->tx_buffers[entry].hdrlen = hdrlen; + priv->tx_ring[entry].buffer1 = cpu_to_le32(mapping); + + if (entry == priv->tx_ring_size - 1) + flag |= TDES1_CONTROL_TER; + priv->tx_ring[entry].length = cpu_to_le32(flag | skb->len); + + /* Set TX rate (SIGNAL field in PLCP PPDU format) */ + flag = TDES0_CONTROL_OWN | (plcp_signal << 20) | 8 /* ? */; + priv->tx_ring[entry].status = cpu_to_le32(flag); + + priv->cur_tx++; + + spin_unlock_irqrestore(&priv->lock, flags); + + /* Trigger transmit poll */ + ADM8211_CSR_WRITE(TDR, 0); +} + +/* Put adm8211_tx_hdr on skb and transmit */ +static int adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb) +{ + struct adm8211_tx_hdr *txhdr; + size_t payload_len, hdrlen; + int plcp, dur, len, plcp_signal, short_preamble; + struct ieee80211_hdr *hdr; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_rate *txrate = ieee80211_get_tx_rate(dev, info); + u8 rc_flags; + + rc_flags = info->control.rates[0].flags; + short_preamble = !!(rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE); + plcp_signal = txrate->bitrate; + + hdr = (struct ieee80211_hdr *)skb->data; + hdrlen = ieee80211_hdrlen(hdr->frame_control); + memcpy(skb->cb, skb->data, hdrlen); + hdr = (struct ieee80211_hdr *)skb->cb; + skb_pull(skb, hdrlen); + payload_len = skb->len; + + txhdr = (struct adm8211_tx_hdr *) skb_push(skb, sizeof(*txhdr)); + memset(txhdr, 0, sizeof(*txhdr)); + memcpy(txhdr->da, ieee80211_get_DA(hdr), ETH_ALEN); + txhdr->signal = plcp_signal; + txhdr->frame_body_size = cpu_to_le16(payload_len); + txhdr->frame_control = hdr->frame_control; + + len = hdrlen + payload_len + FCS_LEN; + + txhdr->frag = cpu_to_le16(0x0FFF); + adm8211_calc_durations(&dur, &plcp, payload_len, + len, plcp_signal, short_preamble); + txhdr->plcp_frag_head_len = cpu_to_le16(plcp); + txhdr->plcp_frag_tail_len = cpu_to_le16(plcp); + txhdr->dur_frag_head = cpu_to_le16(dur); + txhdr->dur_frag_tail = cpu_to_le16(dur); + + txhdr->header_control = cpu_to_le16(ADM8211_TXHDRCTL_ENABLE_EXTEND_HEADER); + + if (short_preamble) + txhdr->header_control |= cpu_to_le16(ADM8211_TXHDRCTL_SHORT_PREAMBLE); + + if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) + txhdr->header_control |= cpu_to_le16(ADM8211_TXHDRCTL_ENABLE_RTS); + + txhdr->retry_limit = info->control.rates[0].count; + + adm8211_tx_raw(dev, skb, plcp_signal, hdrlen); + + return NETDEV_TX_OK; +} + +static int adm8211_alloc_rings(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + unsigned int ring_size; + + priv->rx_buffers = kmalloc(sizeof(*priv->rx_buffers) * priv->rx_ring_size + + sizeof(*priv->tx_buffers) * priv->tx_ring_size, GFP_KERNEL); + if (!priv->rx_buffers) + return -ENOMEM; + + priv->tx_buffers = (void *)priv->rx_buffers + + sizeof(*priv->rx_buffers) * priv->rx_ring_size; + + /* Allocate TX/RX descriptors */ + ring_size = sizeof(struct adm8211_desc) * priv->rx_ring_size + + sizeof(struct adm8211_desc) * priv->tx_ring_size; + priv->rx_ring = pci_alloc_consistent(priv->pdev, ring_size, + &priv->rx_ring_dma); + + if (!priv->rx_ring) { + kfree(priv->rx_buffers); + priv->rx_buffers = NULL; + priv->tx_buffers = NULL; + return -ENOMEM; + } + + priv->tx_ring = (struct adm8211_desc *)(priv->rx_ring + + priv->rx_ring_size); + priv->tx_ring_dma = priv->rx_ring_dma + + sizeof(struct adm8211_desc) * priv->rx_ring_size; + + return 0; +} + +static const struct ieee80211_ops adm8211_ops = { + .tx = adm8211_tx, + .start = adm8211_start, + .stop = adm8211_stop, + .add_interface = adm8211_add_interface, + .remove_interface = adm8211_remove_interface, + .config = adm8211_config, + .bss_info_changed = adm8211_bss_info_changed, + .prepare_multicast = adm8211_prepare_multicast, + .configure_filter = adm8211_configure_filter, + .get_stats = adm8211_get_stats, + .get_tsf = adm8211_get_tsft +}; + +static int __devinit adm8211_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct ieee80211_hw *dev; + struct adm8211_priv *priv; + unsigned long mem_addr, mem_len; + unsigned int io_addr, io_len; + int err; + u32 reg; + u8 perm_addr[ETH_ALEN]; + + err = pci_enable_device(pdev); + if (err) { + printk(KERN_ERR "%s (adm8211): Cannot enable new PCI device\n", + pci_name(pdev)); + return err; + } + + io_addr = pci_resource_start(pdev, 0); + io_len = pci_resource_len(pdev, 0); + mem_addr = pci_resource_start(pdev, 1); + mem_len = pci_resource_len(pdev, 1); + if (io_len < 256 || mem_len < 1024) { + printk(KERN_ERR "%s (adm8211): Too short PCI resources\n", + pci_name(pdev)); + goto err_disable_pdev; + } + + + /* check signature */ + pci_read_config_dword(pdev, 0x80 /* CR32 */, ®); + if (reg != ADM8211_SIG1 && reg != ADM8211_SIG2) { + printk(KERN_ERR "%s (adm8211): Invalid signature (0x%x)\n", + pci_name(pdev), reg); + goto err_disable_pdev; + } + + err = pci_request_regions(pdev, "adm8211"); + if (err) { + printk(KERN_ERR "%s (adm8211): Cannot obtain PCI resources\n", + pci_name(pdev)); + return err; /* someone else grabbed it? don't disable it */ + } + + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) || + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { + printk(KERN_ERR "%s (adm8211): No suitable DMA available\n", + pci_name(pdev)); + goto err_free_reg; + } + + pci_set_master(pdev); + + dev = ieee80211_alloc_hw(sizeof(*priv), &adm8211_ops); + if (!dev) { + printk(KERN_ERR "%s (adm8211): ieee80211 alloc failed\n", + pci_name(pdev)); + err = -ENOMEM; + goto err_free_reg; + } + priv = dev->priv; + priv->pdev = pdev; + + spin_lock_init(&priv->lock); + + SET_IEEE80211_DEV(dev, &pdev->dev); + + pci_set_drvdata(pdev, dev); + + priv->map = pci_iomap(pdev, 1, mem_len); + if (!priv->map) + priv->map = pci_iomap(pdev, 0, io_len); + + if (!priv->map) { + printk(KERN_ERR "%s (adm8211): Cannot map device memory\n", + pci_name(pdev)); + goto err_free_dev; + } + + priv->rx_ring_size = rx_ring_size; + priv->tx_ring_size = tx_ring_size; + + if (adm8211_alloc_rings(dev)) { + printk(KERN_ERR "%s (adm8211): Cannot allocate TX/RX ring\n", + pci_name(pdev)); + goto err_iounmap; + } + + *(__le32 *)perm_addr = cpu_to_le32(ADM8211_CSR_READ(PAR0)); + *(__le16 *)&perm_addr[4] = + cpu_to_le16(ADM8211_CSR_READ(PAR1) & 0xFFFF); + + if (!is_valid_ether_addr(perm_addr)) { + printk(KERN_WARNING "%s (adm8211): Invalid hwaddr in EEPROM!\n", + pci_name(pdev)); + random_ether_addr(perm_addr); + } + SET_IEEE80211_PERM_ADDR(dev, perm_addr); + + dev->extra_tx_headroom = sizeof(struct adm8211_tx_hdr); + /* dev->flags = IEEE80211_HW_RX_INCLUDES_FCS in promisc mode */ + dev->flags = IEEE80211_HW_SIGNAL_UNSPEC; + dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); + + dev->channel_change_time = 1000; + dev->max_signal = 100; /* FIXME: find better value */ + + dev->queues = 1; /* ADM8211C supports more, maybe ADM8211B too */ + + priv->retry_limit = 3; + priv->ant_power = 0x40; + priv->tx_power = 0x40; + priv->lpf_cutoff = 0xFF; + priv->lnags_threshold = 0xFF; + priv->mode = NL80211_IFTYPE_UNSPECIFIED; + + /* Power-on issue. EEPROM won't read correctly without */ + if (pdev->revision >= ADM8211_REV_BA) { + ADM8211_CSR_WRITE(FRCTL, 0); + ADM8211_CSR_READ(FRCTL); + ADM8211_CSR_WRITE(FRCTL, 1); + ADM8211_CSR_READ(FRCTL); + msleep(100); + } + + err = adm8211_read_eeprom(dev); + if (err) { + printk(KERN_ERR "%s (adm8211): Can't alloc eeprom buffer\n", + pci_name(pdev)); + goto err_free_desc; + } + + priv->channel = 1; + + dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; + + err = ieee80211_register_hw(dev); + if (err) { + printk(KERN_ERR "%s (adm8211): Cannot register device\n", + pci_name(pdev)); + goto err_free_desc; + } + + printk(KERN_INFO "%s: hwaddr %pM, Rev 0x%02x\n", + wiphy_name(dev->wiphy), dev->wiphy->perm_addr, + pdev->revision); + + return 0; + + err_free_desc: + pci_free_consistent(pdev, + sizeof(struct adm8211_desc) * priv->rx_ring_size + + sizeof(struct adm8211_desc) * priv->tx_ring_size, + priv->rx_ring, priv->rx_ring_dma); + kfree(priv->rx_buffers); + + err_iounmap: + pci_iounmap(pdev, priv->map); + + err_free_dev: + pci_set_drvdata(pdev, NULL); + ieee80211_free_hw(dev); + + err_free_reg: + pci_release_regions(pdev); + + err_disable_pdev: + pci_disable_device(pdev); + return err; +} + + +static void __devexit adm8211_remove(struct pci_dev *pdev) +{ + struct ieee80211_hw *dev = pci_get_drvdata(pdev); + struct adm8211_priv *priv; + + if (!dev) + return; + + ieee80211_unregister_hw(dev); + + priv = dev->priv; + + pci_free_consistent(pdev, + sizeof(struct adm8211_desc) * priv->rx_ring_size + + sizeof(struct adm8211_desc) * priv->tx_ring_size, + priv->rx_ring, priv->rx_ring_dma); + + kfree(priv->rx_buffers); + kfree(priv->eeprom); + pci_iounmap(pdev, priv->map); + pci_release_regions(pdev); + pci_disable_device(pdev); + ieee80211_free_hw(dev); +} + + +#ifdef CONFIG_PM +static int adm8211_suspend(struct pci_dev *pdev, pm_message_t state) +{ + pci_save_state(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + return 0; +} + +static int adm8211_resume(struct pci_dev *pdev) +{ + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + return 0; +} +#endif /* CONFIG_PM */ + + +MODULE_DEVICE_TABLE(pci, adm8211_pci_id_table); + +/* TODO: implement enable_wake */ +static struct pci_driver adm8211_driver = { + .name = "adm8211", + .id_table = adm8211_pci_id_table, + .probe = adm8211_probe, + .remove = __devexit_p(adm8211_remove), +#ifdef CONFIG_PM + .suspend = adm8211_suspend, + .resume = adm8211_resume, +#endif /* CONFIG_PM */ +}; + + + +static int __init adm8211_init(void) +{ + return pci_register_driver(&adm8211_driver); +} + + +static void __exit adm8211_exit(void) +{ + pci_unregister_driver(&adm8211_driver); +} + + +module_init(adm8211_init); +module_exit(adm8211_exit); diff --git a/drivers/net/wireless/adm8211.h b/drivers/net/wireless/adm8211.h new file mode 100644 index 0000000..b07e4d3 --- /dev/null +++ b/drivers/net/wireless/adm8211.h @@ -0,0 +1,602 @@ +#ifndef ADM8211_H +#define ADM8211_H + +/* ADM8211 Registers */ + +/* CR32 (SIG) signature */ +#define ADM8211_SIG1 0x82011317 /* ADM8211A */ +#define ADM8211_SIG2 0x82111317 /* ADM8211B/ADM8211C */ + +#define ADM8211_CSR_READ(r) ioread32(&priv->map->r) +#define ADM8211_CSR_WRITE(r, val) iowrite32((val), &priv->map->r) + +/* CSR (Host Control and Status Registers) */ +struct adm8211_csr { + __le32 PAR; /* 0x00 CSR0 */ + __le32 FRCTL; /* 0x04 CSR0A */ + __le32 TDR; /* 0x08 CSR1 */ + __le32 WTDP; /* 0x0C CSR1A */ + __le32 RDR; /* 0x10 CSR2 */ + __le32 WRDP; /* 0x14 CSR2A */ + __le32 RDB; /* 0x18 CSR3 */ + __le32 TDBH; /* 0x1C CSR3A */ + __le32 TDBD; /* 0x20 CSR4 */ + __le32 TDBP; /* 0x24 CSR4A */ + __le32 STSR; /* 0x28 CSR5 */ + __le32 TDBB; /* 0x2C CSR5A */ + __le32 NAR; /* 0x30 CSR6 */ + __le32 CSR6A; /* reserved */ + __le32 IER; /* 0x38 CSR7 */ + __le32 TKIPSCEP; /* 0x3C CSR7A */ + __le32 LPC; /* 0x40 CSR8 */ + __le32 CSR_TEST1; /* 0x44 CSR8A */ + __le32 SPR; /* 0x48 CSR9 */ + __le32 CSR_TEST0; /* 0x4C CSR9A */ + __le32 WCSR; /* 0x50 CSR10 */ + __le32 WPDR; /* 0x54 CSR10A */ + __le32 GPTMR; /* 0x58 CSR11 */ + __le32 GPIO; /* 0x5C CSR11A */ + __le32 BBPCTL; /* 0x60 CSR12 */ + __le32 SYNCTL; /* 0x64 CSR12A */ + __le32 PLCPHD; /* 0x68 CSR13 */ + __le32 MMIWA; /* 0x6C CSR13A */ + __le32 MMIRD0; /* 0x70 CSR14 */ + __le32 MMIRD1; /* 0x74 CSR14A */ + __le32 TXBR; /* 0x78 CSR15 */ + __le32 SYNDATA; /* 0x7C CSR15A */ + __le32 ALCS; /* 0x80 CSR16 */ + __le32 TOFS2; /* 0x84 CSR17 */ + __le32 CMDR; /* 0x88 CSR18 */ + __le32 PCIC; /* 0x8C CSR19 */ + __le32 PMCSR; /* 0x90 CSR20 */ + __le32 PAR0; /* 0x94 CSR21 */ + __le32 PAR1; /* 0x98 CSR22 */ + __le32 MAR0; /* 0x9C CSR23 */ + __le32 MAR1; /* 0xA0 CSR24 */ + __le32 ATIMDA0; /* 0xA4 CSR25 */ + __le32 ABDA1; /* 0xA8 CSR26 */ + __le32 BSSID0; /* 0xAC CSR27 */ + __le32 TXLMT; /* 0xB0 CSR28 */ + __le32 MIBCNT; /* 0xB4 CSR29 */ + __le32 BCNT; /* 0xB8 CSR30 */ + __le32 TSFTH; /* 0xBC CSR31 */ + __le32 TSC; /* 0xC0 CSR32 */ + __le32 SYNRF; /* 0xC4 CSR33 */ + __le32 BPLI; /* 0xC8 CSR34 */ + __le32 CAP0; /* 0xCC CSR35 */ + __le32 CAP1; /* 0xD0 CSR36 */ + __le32 RMD; /* 0xD4 CSR37 */ + __le32 CFPP; /* 0xD8 CSR38 */ + __le32 TOFS0; /* 0xDC CSR39 */ + __le32 TOFS1; /* 0xE0 CSR40 */ + __le32 IFST; /* 0xE4 CSR41 */ + __le32 RSPT; /* 0xE8 CSR42 */ + __le32 TSFTL; /* 0xEC CSR43 */ + __le32 WEPCTL; /* 0xF0 CSR44 */ + __le32 WESK; /* 0xF4 CSR45 */ + __le32 WEPCNT; /* 0xF8 CSR46 */ + __le32 MACTEST; /* 0xFC CSR47 */ + __le32 FER; /* 0x100 */ + __le32 FEMR; /* 0x104 */ + __le32 FPSR; /* 0x108 */ + __le32 FFER; /* 0x10C */ +} __attribute__ ((packed)); + +/* CSR0 - PAR (PCI Address Register) */ +#define ADM8211_PAR_MWIE (1 << 24) +#define ADM8211_PAR_MRLE (1 << 23) +#define ADM8211_PAR_MRME (1 << 21) +#define ADM8211_PAR_RAP ((1 << 18) | (1 << 17)) +#define ADM8211_PAR_CAL ((1 << 15) | (1 << 14)) +#define ADM8211_PAR_PBL 0x00003f00 +#define ADM8211_PAR_BLE (1 << 7) +#define ADM8211_PAR_DSL 0x0000007c +#define ADM8211_PAR_BAR (1 << 1) +#define ADM8211_PAR_SWR (1 << 0) + +/* CSR1 - FRCTL (Frame Control Register) */ +#define ADM8211_FRCTL_PWRMGT (1 << 31) +#define ADM8211_FRCTL_MAXPSP (1 << 27) +#define ADM8211_FRCTL_DRVPRSP (1 << 26) +#define ADM8211_FRCTL_DRVBCON (1 << 25) +#define ADM8211_FRCTL_AID 0x0000ffff +#define ADM8211_FRCTL_AID_ON 0x0000c000 + +/* CSR5 - STSR (Status Register) */ +#define ADM8211_STSR_PCF (1 << 31) +#define ADM8211_STSR_BCNTC (1 << 30) +#define ADM8211_STSR_GPINT (1 << 29) +#define ADM8211_STSR_LinkOff (1 << 28) +#define ADM8211_STSR_ATIMTC (1 << 27) +#define ADM8211_STSR_TSFTF (1 << 26) +#define ADM8211_STSR_TSCZ (1 << 25) +#define ADM8211_STSR_LinkOn (1 << 24) +#define ADM8211_STSR_SQL (1 << 23) +#define ADM8211_STSR_WEPTD (1 << 22) +#define ADM8211_STSR_ATIME (1 << 21) +#define ADM8211_STSR_TBTT (1 << 20) +#define ADM8211_STSR_NISS (1 << 16) +#define ADM8211_STSR_AISS (1 << 15) +#define ADM8211_STSR_TEIS (1 << 14) +#define ADM8211_STSR_FBE (1 << 13) +#define ADM8211_STSR_REIS (1 << 12) +#define ADM8211_STSR_GPTT (1 << 11) +#define ADM8211_STSR_RPS (1 << 8) +#define ADM8211_STSR_RDU (1 << 7) +#define ADM8211_STSR_RCI (1 << 6) +#define ADM8211_STSR_TUF (1 << 5) +#define ADM8211_STSR_TRT (1 << 4) +#define ADM8211_STSR_TLT (1 << 3) +#define ADM8211_STSR_TDU (1 << 2) +#define ADM8211_STSR_TPS (1 << 1) +#define ADM8211_STSR_TCI (1 << 0) + +/* CSR6 - NAR (Network Access Register) */ +#define ADM8211_NAR_TXCF (1 << 31) +#define ADM8211_NAR_HF (1 << 30) +#define ADM8211_NAR_UTR (1 << 29) +#define ADM8211_NAR_SQ (1 << 28) +#define ADM8211_NAR_CFP (1 << 27) +#define ADM8211_NAR_SF (1 << 21) +#define ADM8211_NAR_TR ((1 << 15) | (1 << 14)) +#define ADM8211_NAR_ST (1 << 13) +#define ADM8211_NAR_OM ((1 << 11) | (1 << 10)) +#define ADM8211_NAR_MM (1 << 7) +#define ADM8211_NAR_PR (1 << 6) +#define ADM8211_NAR_EA (1 << 5) +#define ADM8211_NAR_PB (1 << 3) +#define ADM8211_NAR_STPDMA (1 << 2) +#define ADM8211_NAR_SR (1 << 1) +#define ADM8211_NAR_CTX (1 << 0) + +#define ADM8211_IDLE() \ +do { \ + if (priv->nar & (ADM8211_NAR_SR | ADM8211_NAR_ST)) { \ + ADM8211_CSR_WRITE(NAR, priv->nar & \ + ~(ADM8211_NAR_SR | ADM8211_NAR_ST));\ + ADM8211_CSR_READ(NAR); \ + msleep(20); \ + } \ +} while (0) + +#define ADM8211_IDLE_RX() \ +do { \ + if (priv->nar & ADM8211_NAR_SR) { \ + ADM8211_CSR_WRITE(NAR, priv->nar & ~ADM8211_NAR_SR); \ + ADM8211_CSR_READ(NAR); \ + mdelay(20); \ + } \ +} while (0) + +#define ADM8211_RESTORE() \ +do { \ + if (priv->nar & (ADM8211_NAR_SR | ADM8211_NAR_ST)) \ + ADM8211_CSR_WRITE(NAR, priv->nar); \ +} while (0) + +/* CSR7 - IER (Interrupt Enable Register) */ +#define ADM8211_IER_PCFIE (1 << 31) +#define ADM8211_IER_BCNTCIE (1 << 30) +#define ADM8211_IER_GPIE (1 << 29) +#define ADM8211_IER_LinkOffIE (1 << 28) +#define ADM8211_IER_ATIMTCIE (1 << 27) +#define ADM8211_IER_TSFTFIE (1 << 26) +#define ADM8211_IER_TSCZE (1 << 25) +#define ADM8211_IER_LinkOnIE (1 << 24) +#define ADM8211_IER_SQLIE (1 << 23) +#define ADM8211_IER_WEPIE (1 << 22) +#define ADM8211_IER_ATIMEIE (1 << 21) +#define ADM8211_IER_TBTTIE (1 << 20) +#define ADM8211_IER_NIE (1 << 16) +#define ADM8211_IER_AIE (1 << 15) +#define ADM8211_IER_TEIE (1 << 14) +#define ADM8211_IER_FBEIE (1 << 13) +#define ADM8211_IER_REIE (1 << 12) +#define ADM8211_IER_GPTIE (1 << 11) +#define ADM8211_IER_RSIE (1 << 8) +#define ADM8211_IER_RUIE (1 << 7) +#define ADM8211_IER_RCIE (1 << 6) +#define ADM8211_IER_TUIE (1 << 5) +#define ADM8211_IER_TRTIE (1 << 4) +#define ADM8211_IER_TLTTIE (1 << 3) +#define ADM8211_IER_TDUIE (1 << 2) +#define ADM8211_IER_TPSIE (1 << 1) +#define ADM8211_IER_TCIE (1 << 0) + +/* CSR9 - SPR (Serial Port Register) */ +#define ADM8211_SPR_SRS (1 << 11) +#define ADM8211_SPR_SDO (1 << 3) +#define ADM8211_SPR_SDI (1 << 2) +#define ADM8211_SPR_SCLK (1 << 1) +#define ADM8211_SPR_SCS (1 << 0) + +/* CSR9A - CSR_TEST0 */ +#define ADM8211_CSR_TEST0_EPNE (1 << 18) +#define ADM8211_CSR_TEST0_EPSNM (1 << 17) +#define ADM8211_CSR_TEST0_EPTYP (1 << 16) +#define ADM8211_CSR_TEST0_EPRLD (1 << 15) + +/* CSR10 - WCSR (Wake-up Control/Status Register) */ +#define ADM8211_WCSR_CRCT (1 << 30) +#define ADM8211_WCSR_TSFTWE (1 << 20) +#define ADM8211_WCSR_TIMWE (1 << 19) +#define ADM8211_WCSR_ATIMWE (1 << 18) +#define ADM8211_WCSR_KEYWE (1 << 17) +#define ADM8211_WCSR_MPRE (1 << 9) +#define ADM8211_WCSR_LSOE (1 << 8) +#define ADM8211_WCSR_KEYUP (1 << 6) +#define ADM8211_WCSR_TSFTW (1 << 5) +#define ADM8211_WCSR_TIMW (1 << 4) +#define ADM8211_WCSR_ATIMW (1 << 3) +#define ADM8211_WCSR_MPR (1 << 1) +#define ADM8211_WCSR_LSO (1 << 0) + +/* CSR11A - GPIO */ +#define ADM8211_CSR_GPIO_EN5 (1 << 17) +#define ADM8211_CSR_GPIO_EN4 (1 << 16) +#define ADM8211_CSR_GPIO_EN3 (1 << 15) +#define ADM8211_CSR_GPIO_EN2 (1 << 14) +#define ADM8211_CSR_GPIO_EN1 (1 << 13) +#define ADM8211_CSR_GPIO_EN0 (1 << 12) +#define ADM8211_CSR_GPIO_O5 (1 << 11) +#define ADM8211_CSR_GPIO_O4 (1 << 10) +#define ADM8211_CSR_GPIO_O3 (1 << 9) +#define ADM8211_CSR_GPIO_O2 (1 << 8) +#define ADM8211_CSR_GPIO_O1 (1 << 7) +#define ADM8211_CSR_GPIO_O0 (1 << 6) +#define ADM8211_CSR_GPIO_IN 0x0000003f + +/* CSR12 - BBPCTL (BBP Control port) */ +#define ADM8211_BBPCTL_MMISEL (1 << 31) +#define ADM8211_BBPCTL_SPICADD (0x7F << 24) +#define ADM8211_BBPCTL_RF3000 (0x20 << 24) +#define ADM8211_BBPCTL_TXCE (1 << 23) +#define ADM8211_BBPCTL_RXCE (1 << 22) +#define ADM8211_BBPCTL_CCAP (1 << 21) +#define ADM8211_BBPCTL_TYPE 0x001c0000 +#define ADM8211_BBPCTL_WR (1 << 17) +#define ADM8211_BBPCTL_RD (1 << 16) +#define ADM8211_BBPCTL_ADDR 0x0000ff00 +#define ADM8211_BBPCTL_DATA 0x000000ff + +/* CSR12A - SYNCTL (Synthesizer Control port) */ +#define ADM8211_SYNCTL_WR (1 << 31) +#define ADM8211_SYNCTL_RD (1 << 30) +#define ADM8211_SYNCTL_CS0 (1 << 29) +#define ADM8211_SYNCTL_CS1 (1 << 28) +#define ADM8211_SYNCTL_CAL (1 << 27) +#define ADM8211_SYNCTL_SELCAL (1 << 26) +#define ADM8211_SYNCTL_RFtype ((1 << 24) | (1 << 23) | (1 << 22)) +#define ADM8211_SYNCTL_RFMD (1 << 22) +#define ADM8211_SYNCTL_GENERAL (0x7 << 22) +/* SYNCTL 21:0 Data (Si4126: 18-bit data, 4-bit address) */ + +/* CSR18 - CMDR (Command Register) */ +#define ADM8211_CMDR_PM (1 << 19) +#define ADM8211_CMDR_APM (1 << 18) +#define ADM8211_CMDR_RTE (1 << 4) +#define ADM8211_CMDR_DRT ((1 << 3) | (1 << 2)) +#define ADM8211_CMDR_DRT_8DW (0x0 << 2) +#define ADM8211_CMDR_DRT_16DW (0x1 << 2) +#define ADM8211_CMDR_DRT_SF (0x2 << 2) + +/* CSR33 - SYNRF (SYNRF direct control) */ +#define ADM8211_SYNRF_SELSYN (1 << 31) +#define ADM8211_SYNRF_SELRF (1 << 30) +#define ADM8211_SYNRF_LERF (1 << 29) +#define ADM8211_SYNRF_LEIF (1 << 28) +#define ADM8211_SYNRF_SYNCLK (1 << 27) +#define ADM8211_SYNRF_SYNDATA (1 << 26) +#define ADM8211_SYNRF_PE1 (1 << 25) +#define ADM8211_SYNRF_PE2 (1 << 24) +#define ADM8211_SYNRF_PA_PE (1 << 23) +#define ADM8211_SYNRF_TR_SW (1 << 22) +#define ADM8211_SYNRF_TR_SWN (1 << 21) +#define ADM8211_SYNRF_RADIO (1 << 20) +#define ADM8211_SYNRF_CAL_EN (1 << 19) +#define ADM8211_SYNRF_PHYRST (1 << 18) + +#define ADM8211_SYNRF_IF_SELECT_0 (1 << 31) +#define ADM8211_SYNRF_IF_SELECT_1 ((1 << 31) | (1 << 28)) +#define ADM8211_SYNRF_WRITE_SYNDATA_0 (1 << 31) +#define ADM8211_SYNRF_WRITE_SYNDATA_1 ((1 << 31) | (1 << 26)) +#define ADM8211_SYNRF_WRITE_CLOCK_0 (1 << 31) +#define ADM8211_SYNRF_WRITE_CLOCK_1 ((1 << 31) | (1 << 27)) + +/* CSR44 - WEPCTL (WEP Control) */ +#define ADM8211_WEPCTL_WEPENABLE (1 << 31) +#define ADM8211_WEPCTL_WPAENABLE (1 << 30) +#define ADM8211_WEPCTL_CURRENT_TABLE (1 << 29) +#define ADM8211_WEPCTL_TABLE_WR (1 << 28) +#define ADM8211_WEPCTL_TABLE_RD (1 << 27) +#define ADM8211_WEPCTL_WEPRXBYP (1 << 25) +#define ADM8211_WEPCTL_SEL_WEPTABLE (1 << 23) +#define ADM8211_WEPCTL_ADDR (0x000001ff) + +/* CSR45 - WESK (Data Entry for Share/Individual Key) */ +#define ADM8211_WESK_DATA (0x0000ffff) + +/* FER (Function Event Register) */ +#define ADM8211_FER_INTR_EV_ENT (1 << 15) + + +/* Si4126 RF Synthesizer - Control Registers */ +#define SI4126_MAIN_CONF 0 +#define SI4126_PHASE_DET_GAIN 1 +#define SI4126_POWERDOWN 2 +#define SI4126_RF1_N_DIV 3 /* only Si4136 */ +#define SI4126_RF2_N_DIV 4 +#define SI4126_IF_N_DIV 5 +#define SI4126_RF1_R_DIV 6 /* only Si4136 */ +#define SI4126_RF2_R_DIV 7 +#define SI4126_IF_R_DIV 8 + +/* Main Configuration */ +#define SI4126_MAIN_XINDIV2 (1 << 6) +#define SI4126_MAIN_IFDIV ((1 << 11) | (1 << 10)) +/* Powerdown */ +#define SI4126_POWERDOWN_PDIB (1 << 1) +#define SI4126_POWERDOWN_PDRB (1 << 0) + + +/* RF3000 BBP - Control Port Registers */ +/* 0x00 - reserved */ +#define RF3000_MODEM_CTRL__RX_STATUS 0x01 +#define RF3000_CCA_CTRL 0x02 +#define RF3000_DIVERSITY__RSSI 0x03 +#define RF3000_RX_SIGNAL_FIELD 0x04 +#define RF3000_RX_LEN_MSB 0x05 +#define RF3000_RX_LEN_LSB 0x06 +#define RF3000_RX_SERVICE_FIELD 0x07 +#define RF3000_TX_VAR_GAIN__TX_LEN_EXT 0x11 +#define RF3000_TX_LEN_MSB 0x12 +#define RF3000_TX_LEN_LSB 0x13 +#define RF3000_LOW_GAIN_CALIB 0x14 +#define RF3000_HIGH_GAIN_CALIB 0x15 + +/* ADM8211 revisions */ +#define ADM8211_REV_AB 0x11 +#define ADM8211_REV_AF 0x15 +#define ADM8211_REV_BA 0x20 +#define ADM8211_REV_CA 0x30 + +struct adm8211_desc { + __le32 status; + __le32 length; + __le32 buffer1; + __le32 buffer2; +}; + +#define RDES0_STATUS_OWN (1 << 31) +#define RDES0_STATUS_ES (1 << 30) +#define RDES0_STATUS_SQL (1 << 29) +#define RDES0_STATUS_DE (1 << 28) +#define RDES0_STATUS_FS (1 << 27) +#define RDES0_STATUS_LS (1 << 26) +#define RDES0_STATUS_PCF (1 << 25) +#define RDES0_STATUS_SFDE (1 << 24) +#define RDES0_STATUS_SIGE (1 << 23) +#define RDES0_STATUS_CRC16E (1 << 22) +#define RDES0_STATUS_RXTOE (1 << 21) +#define RDES0_STATUS_CRC32E (1 << 20) +#define RDES0_STATUS_ICVE (1 << 19) +#define RDES0_STATUS_DA1 (1 << 17) +#define RDES0_STATUS_DA0 (1 << 16) +#define RDES0_STATUS_RXDR ((1 << 15) | (1 << 14) | (1 << 13) | (1 << 12)) +#define RDES0_STATUS_FL (0x00000fff) + +#define RDES1_CONTROL_RER (1 << 25) +#define RDES1_CONTROL_RCH (1 << 24) +#define RDES1_CONTROL_RBS2 (0x00fff000) +#define RDES1_CONTROL_RBS1 (0x00000fff) + +#define RDES1_STATUS_RSSI (0x0000007f) + + +#define TDES0_CONTROL_OWN (1 << 31) +#define TDES0_CONTROL_DONE (1 << 30) +#define TDES0_CONTROL_TXDR (0x0ff00000) + +#define TDES0_STATUS_OWN (1 << 31) +#define TDES0_STATUS_DONE (1 << 30) +#define TDES0_STATUS_ES (1 << 29) +#define TDES0_STATUS_TLT (1 << 28) +#define TDES0_STATUS_TRT (1 << 27) +#define TDES0_STATUS_TUF (1 << 26) +#define TDES0_STATUS_TRO (1 << 25) +#define TDES0_STATUS_SOFBR (1 << 24) +#define TDES0_STATUS_ACR (0x00000fff) + +#define TDES1_CONTROL_IC (1 << 31) +#define TDES1_CONTROL_LS (1 << 30) +#define TDES1_CONTROL_FS (1 << 29) +#define TDES1_CONTROL_TER (1 << 25) +#define TDES1_CONTROL_TCH (1 << 24) +#define TDES1_CONTROL_RBS2 (0x00fff000) +#define TDES1_CONTROL_RBS1 (0x00000fff) + +/* SRAM offsets */ +#define ADM8211_SRAM(x) (priv->pdev->revision < ADM8211_REV_BA ? \ + ADM8211_SRAM_A_ ## x : ADM8211_SRAM_B_ ## x) + +#define ADM8211_SRAM_INDIV_KEY 0x0000 +#define ADM8211_SRAM_A_SHARE_KEY 0x0160 +#define ADM8211_SRAM_B_SHARE_KEY 0x00c0 + +#define ADM8211_SRAM_A_SSID 0x0180 +#define ADM8211_SRAM_B_SSID 0x00d4 +#define ADM8211_SRAM_SSID ADM8211_SRAM(SSID) + +#define ADM8211_SRAM_A_SUPP_RATE 0x0191 +#define ADM8211_SRAM_B_SUPP_RATE 0x00dd +#define ADM8211_SRAM_SUPP_RATE ADM8211_SRAM(SUPP_RATE) + +#define ADM8211_SRAM_A_SIZE 0x0200 +#define ADM8211_SRAM_B_SIZE 0x01c0 +#define ADM8211_SRAM_SIZE ADM8211_SRAM(SIZE) + +struct adm8211_rx_ring_info { + struct sk_buff *skb; + dma_addr_t mapping; +}; + +struct adm8211_tx_ring_info { + struct sk_buff *skb; + dma_addr_t mapping; + size_t hdrlen; +}; + +#define PLCP_SIGNAL_1M 0x0a +#define PLCP_SIGNAL_2M 0x14 +#define PLCP_SIGNAL_5M5 0x37 +#define PLCP_SIGNAL_11M 0x6e + +struct adm8211_tx_hdr { + u8 da[6]; + u8 signal; /* PLCP signal / TX rate in 100 Kbps */ + u8 service; + __le16 frame_body_size; + __le16 frame_control; + __le16 plcp_frag_tail_len; + __le16 plcp_frag_head_len; + __le16 dur_frag_tail; + __le16 dur_frag_head; + u8 addr4[6]; + +#define ADM8211_TXHDRCTL_SHORT_PREAMBLE (1 << 0) +#define ADM8211_TXHDRCTL_MORE_FRAG (1 << 1) +#define ADM8211_TXHDRCTL_MORE_DATA (1 << 2) +#define ADM8211_TXHDRCTL_FRAG_NO (1 << 3) /* ? */ +#define ADM8211_TXHDRCTL_ENABLE_RTS (1 << 4) +#define ADM8211_TXHDRCTL_ENABLE_WEP_ENGINE (1 << 5) +#define ADM8211_TXHDRCTL_ENABLE_EXTEND_HEADER (1 << 15) /* ? */ + __le16 header_control; + __le16 frag; + u8 reserved_0; + u8 retry_limit; + + u32 wep2key0; + u32 wep2key1; + u32 wep2key2; + u32 wep2key3; + + u8 keyid; + u8 entry_control; // huh?? + u16 reserved_1; + u32 reserved_2; +} __attribute__ ((packed)); + + +#define RX_COPY_BREAK 128 +#define RX_PKT_SIZE 2500 + +struct adm8211_eeprom { + __le16 signature; /* 0x00 */ + u8 major_version; /* 0x02 */ + u8 minor_version; /* 0x03 */ + u8 reserved_1[4]; /* 0x04 */ + u8 hwaddr[6]; /* 0x08 */ + u8 reserved_2[8]; /* 0x1E */ + __le16 cr49; /* 0x16 */ + u8 cr03; /* 0x18 */ + u8 cr28; /* 0x19 */ + u8 cr29; /* 0x1A */ + u8 country_code; /* 0x1B */ + +/* specific bbp types */ +#define ADM8211_BBP_RFMD3000 0x00 +#define ADM8211_BBP_RFMD3002 0x01 +#define ADM8211_BBP_ADM8011 0x04 + u8 specific_bbptype; /* 0x1C */ + u8 specific_rftype; /* 0x1D */ + u8 reserved_3[2]; /* 0x1E */ + __le16 device_id; /* 0x20 */ + __le16 vendor_id; /* 0x22 */ + __le16 subsystem_id; /* 0x24 */ + __le16 subsystem_vendor_id; /* 0x26 */ + u8 maxlat; /* 0x28 */ + u8 mingnt; /* 0x29 */ + __le16 cis_pointer_low; /* 0x2A */ + __le16 cis_pointer_high; /* 0x2C */ + __le16 csr18; /* 0x2E */ + u8 reserved_4[16]; /* 0x30 */ + u8 d1_pwrdara; /* 0x40 */ + u8 d0_pwrdara; /* 0x41 */ + u8 d3_pwrdara; /* 0x42 */ + u8 d2_pwrdara; /* 0x43 */ + u8 antenna_power[14]; /* 0x44 */ + __le16 cis_wordcnt; /* 0x52 */ + u8 tx_power[14]; /* 0x54 */ + u8 lpf_cutoff[14]; /* 0x62 */ + u8 lnags_threshold[14]; /* 0x70 */ + __le16 checksum; /* 0x7E */ + u8 cis_data[0]; /* 0x80, 384 bytes */ +} __attribute__ ((packed)); + +struct adm8211_priv { + struct pci_dev *pdev; + spinlock_t lock; + struct adm8211_csr __iomem *map; + struct adm8211_desc *rx_ring; + struct adm8211_desc *tx_ring; + dma_addr_t rx_ring_dma; + dma_addr_t tx_ring_dma; + struct adm8211_rx_ring_info *rx_buffers; + struct adm8211_tx_ring_info *tx_buffers; + unsigned int rx_ring_size, tx_ring_size; + unsigned int cur_tx, dirty_tx, cur_rx; + + struct ieee80211_low_level_stats stats; + struct ieee80211_supported_band band; + struct ieee80211_channel channels[14]; + int mode; + + int channel; + u8 bssid[ETH_ALEN]; + + u8 soft_rx_crc; + u8 retry_limit; + + u8 ant_power; + u8 tx_power; + u8 lpf_cutoff; + u8 lnags_threshold; + struct adm8211_eeprom *eeprom; + size_t eeprom_len; + + u32 nar; + +#define ADM8211_TYPE_INTERSIL 0x00 +#define ADM8211_TYPE_RFMD 0x01 +#define ADM8211_TYPE_MARVEL 0x02 +#define ADM8211_TYPE_AIROHA 0x03 +#define ADM8211_TYPE_ADMTEK 0x05 + unsigned int rf_type:3; + unsigned int bbp_type:3; + + u8 specific_bbptype; + enum { + ADM8211_RFMD2948 = 0x0, + ADM8211_RFMD2958 = 0x1, + ADM8211_RFMD2958_RF3000_CONTROL_POWER = 0x2, + ADM8211_MAX2820 = 0x8, + ADM8211_AL2210L = 0xC, /* Airoha */ + } transceiver_type; +}; + +struct ieee80211_chan_range { + u8 min; + u8 max; +}; + +static const struct ieee80211_chan_range cranges[] = { + {1, 11}, /* FCC */ + {1, 11}, /* IC */ + {1, 13}, /* ETSI */ + {10, 11}, /* SPAIN */ + {10, 13}, /* FRANCE */ + {14, 14}, /* MMK */ + {1, 14}, /* MMK2 */ +}; + +#endif /* ADM8211_H */ diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c new file mode 100644 index 0000000..0fb4199 --- /dev/null +++ b/drivers/net/wireless/at76c50x-usb.c @@ -0,0 +1,2512 @@ +/* + * at76c503/at76c505 USB driver + * + * Copyright (c) 2002 - 2003 Oliver Kurth + * Copyright (c) 2004 Joerg Albert + * Copyright (c) 2004 Nick Jones + * Copyright (c) 2004 Balint Seeber + * Copyright (c) 2007 Guido Guenther + * Copyright (c) 2007 Kalle Valo + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This file is part of the Berlios driver for WLAN USB devices based on the + * Atmel AT76C503A/505/505A. + * + * Some iw_handler code was taken from airo.c, (C) 1999 Benjamin Reed + * + * TODO list is at the wiki: + * + * http://wireless.kernel.org/en/users/Drivers/at76c50x-usb#TODO + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "at76c50x-usb.h" + +/* Version information */ +#define DRIVER_NAME "at76c50x-usb" +#define DRIVER_VERSION "0.17" +#define DRIVER_DESC "Atmel at76x USB Wireless LAN Driver" + +/* at76_debug bits */ +#define DBG_PROGRESS 0x00000001 /* authentication/accociation */ +#define DBG_BSS_TABLE 0x00000002 /* show BSS table after scans */ +#define DBG_IOCTL 0x00000004 /* ioctl calls / settings */ +#define DBG_MAC_STATE 0x00000008 /* MAC state transitions */ +#define DBG_TX_DATA 0x00000010 /* tx header */ +#define DBG_TX_DATA_CONTENT 0x00000020 /* tx content */ +#define DBG_TX_MGMT 0x00000040 /* tx management */ +#define DBG_RX_DATA 0x00000080 /* rx data header */ +#define DBG_RX_DATA_CONTENT 0x00000100 /* rx data content */ +#define DBG_RX_MGMT 0x00000200 /* rx mgmt frame headers */ +#define DBG_RX_BEACON 0x00000400 /* rx beacon */ +#define DBG_RX_CTRL 0x00000800 /* rx control */ +#define DBG_RX_MGMT_CONTENT 0x00001000 /* rx mgmt content */ +#define DBG_RX_FRAGS 0x00002000 /* rx data fragment handling */ +#define DBG_DEVSTART 0x00004000 /* fw download, device start */ +#define DBG_URB 0x00008000 /* rx urb status, ... */ +#define DBG_RX_ATMEL_HDR 0x00010000 /* Atmel-specific Rx headers */ +#define DBG_PROC_ENTRY 0x00020000 /* procedure entries/exits */ +#define DBG_PM 0x00040000 /* power management settings */ +#define DBG_BSS_MATCH 0x00080000 /* BSS match failures */ +#define DBG_PARAMS 0x00100000 /* show configured parameters */ +#define DBG_WAIT_COMPLETE 0x00200000 /* command completion */ +#define DBG_RX_FRAGS_SKB 0x00400000 /* skb header of Rx fragments */ +#define DBG_BSS_TABLE_RM 0x00800000 /* purging bss table entries */ +#define DBG_MONITOR_MODE 0x01000000 /* monitor mode */ +#define DBG_MIB 0x02000000 /* dump all MIBs on startup */ +#define DBG_MGMT_TIMER 0x04000000 /* dump mgmt_timer ops */ +#define DBG_WE_EVENTS 0x08000000 /* dump wireless events */ +#define DBG_FW 0x10000000 /* firmware download */ +#define DBG_DFU 0x20000000 /* device firmware upgrade */ +#define DBG_CMD 0x40000000 +#define DBG_MAC80211 0x80000000 + +#define DBG_DEFAULTS 0 + +/* Use our own dbg macro */ +#define at76_dbg(bits, format, arg...) \ + do { \ + if (at76_debug & (bits)) \ + printk(KERN_DEBUG DRIVER_NAME ": " format "\n" , \ + ## arg); \ + } while (0) + +#define at76_dbg_dump(bits, buf, len, format, arg...) \ + do { \ + if (at76_debug & (bits)) { \ + printk(KERN_DEBUG DRIVER_NAME ": " format "\n" , \ + ## arg); \ + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, \ + buf, len); \ + } \ + } while (0) + +static uint at76_debug = DBG_DEFAULTS; + +/* Protect against concurrent firmware loading and parsing */ +static struct mutex fw_mutex; + +static struct fwentry firmwares[] = { + [0] = { "" }, + [BOARD_503_ISL3861] = { "atmel_at76c503-i3861.bin" }, + [BOARD_503_ISL3863] = { "atmel_at76c503-i3863.bin" }, + [BOARD_503] = { "atmel_at76c503-rfmd.bin" }, + [BOARD_503_ACC] = { "atmel_at76c503-rfmd-acc.bin" }, + [BOARD_505] = { "atmel_at76c505-rfmd.bin" }, + [BOARD_505_2958] = { "atmel_at76c505-rfmd2958.bin" }, + [BOARD_505A] = { "atmel_at76c505a-rfmd2958.bin" }, + [BOARD_505AMX] = { "atmel_at76c505amx-rfmd.bin" }, +}; +MODULE_FIRMWARE("atmel_at76c503-i3861.bin"); +MODULE_FIRMWARE("atmel_at76c503-i3863.bin"); +MODULE_FIRMWARE("atmel_at76c503-rfmd.bin"); +MODULE_FIRMWARE("atmel_at76c503-rfmd-acc.bin"); +MODULE_FIRMWARE("atmel_at76c505-rfmd.bin"); +MODULE_FIRMWARE("atmel_at76c505-rfmd2958.bin"); +MODULE_FIRMWARE("atmel_at76c505a-rfmd2958.bin"); +MODULE_FIRMWARE("atmel_at76c505amx-rfmd.bin"); + +#define USB_DEVICE_DATA(__ops) .driver_info = (kernel_ulong_t)(__ops) + +static struct usb_device_id dev_table[] = { + /* + * at76c503-i3861 + */ + /* Generic AT76C503/3861 device */ + { USB_DEVICE(0x03eb, 0x7603), USB_DEVICE_DATA(BOARD_503_ISL3861) }, + /* Linksys WUSB11 v2.1/v2.6 */ + { USB_DEVICE(0x066b, 0x2211), USB_DEVICE_DATA(BOARD_503_ISL3861) }, + /* Netgear MA101 rev. A */ + { USB_DEVICE(0x0864, 0x4100), USB_DEVICE_DATA(BOARD_503_ISL3861) }, + /* Tekram U300C / Allnet ALL0193 */ + { USB_DEVICE(0x0b3b, 0x1612), USB_DEVICE_DATA(BOARD_503_ISL3861) }, + /* HP HN210W J7801A */ + { USB_DEVICE(0x03f0, 0x011c), USB_DEVICE_DATA(BOARD_503_ISL3861) }, + /* Sitecom/Z-Com/Zyxel M4Y-750 */ + { USB_DEVICE(0x0cde, 0x0001), USB_DEVICE_DATA(BOARD_503_ISL3861) }, + /* Dynalink/Askey WLL013 (intersil) */ + { USB_DEVICE(0x069a, 0x0320), USB_DEVICE_DATA(BOARD_503_ISL3861) }, + /* EZ connect 11Mpbs Wireless USB Adapter SMC2662W v1 */ + { USB_DEVICE(0x0d5c, 0xa001), USB_DEVICE_DATA(BOARD_503_ISL3861) }, + /* BenQ AWL300 */ + { USB_DEVICE(0x04a5, 0x9000), USB_DEVICE_DATA(BOARD_503_ISL3861) }, + /* Addtron AWU-120, Compex WLU11 */ + { USB_DEVICE(0x05dd, 0xff31), USB_DEVICE_DATA(BOARD_503_ISL3861) }, + /* Intel AP310 AnyPoint II USB */ + { USB_DEVICE(0x8086, 0x0200), USB_DEVICE_DATA(BOARD_503_ISL3861) }, + /* Dynalink L11U */ + { USB_DEVICE(0x0d8e, 0x7100), USB_DEVICE_DATA(BOARD_503_ISL3861) }, + /* Arescom WL-210, FCC id 07J-GL2411USB */ + { USB_DEVICE(0x0d8e, 0x7110), USB_DEVICE_DATA(BOARD_503_ISL3861) }, + /* I-O DATA WN-B11/USB */ + { USB_DEVICE(0x04bb, 0x0919), USB_DEVICE_DATA(BOARD_503_ISL3861) }, + /* BT Voyager 1010 */ + { USB_DEVICE(0x069a, 0x0821), USB_DEVICE_DATA(BOARD_503_ISL3861) }, + /* + * at76c503-i3863 + */ + /* Generic AT76C503/3863 device */ + { USB_DEVICE(0x03eb, 0x7604), USB_DEVICE_DATA(BOARD_503_ISL3863) }, + /* Samsung SWL-2100U */ + { USB_DEVICE(0x055d, 0xa000), USB_DEVICE_DATA(BOARD_503_ISL3863) }, + /* + * at76c503-rfmd + */ + /* Generic AT76C503/RFMD device */ + { USB_DEVICE(0x03eb, 0x7605), USB_DEVICE_DATA(BOARD_503) }, + /* Dynalink/Askey WLL013 (rfmd) */ + { USB_DEVICE(0x069a, 0x0321), USB_DEVICE_DATA(BOARD_503) }, + /* Linksys WUSB11 v2.6 */ + { USB_DEVICE(0x077b, 0x2219), USB_DEVICE_DATA(BOARD_503) }, + /* Network Everywhere NWU11B */ + { USB_DEVICE(0x077b, 0x2227), USB_DEVICE_DATA(BOARD_503) }, + /* Netgear MA101 rev. B */ + { USB_DEVICE(0x0864, 0x4102), USB_DEVICE_DATA(BOARD_503) }, + /* D-Link DWL-120 rev. E */ + { USB_DEVICE(0x2001, 0x3200), USB_DEVICE_DATA(BOARD_503) }, + /* Actiontec 802UAT1, HWU01150-01UK */ + { USB_DEVICE(0x1668, 0x7605), USB_DEVICE_DATA(BOARD_503) }, + /* AirVast W-Buddie WN210 */ + { USB_DEVICE(0x03eb, 0x4102), USB_DEVICE_DATA(BOARD_503) }, + /* Dick Smith Electronics XH1153 802.11b USB adapter */ + { USB_DEVICE(0x1371, 0x5743), USB_DEVICE_DATA(BOARD_503) }, + /* CNet CNUSB611 */ + { USB_DEVICE(0x1371, 0x0001), USB_DEVICE_DATA(BOARD_503) }, + /* FiberLine FL-WL200U */ + { USB_DEVICE(0x1371, 0x0002), USB_DEVICE_DATA(BOARD_503) }, + /* BenQ AWL400 USB stick */ + { USB_DEVICE(0x04a5, 0x9001), USB_DEVICE_DATA(BOARD_503) }, + /* 3Com 3CRSHEW696 */ + { USB_DEVICE(0x0506, 0x0a01), USB_DEVICE_DATA(BOARD_503) }, + /* Siemens Santis ADSL WLAN USB adapter WLL 013 */ + { USB_DEVICE(0x0681, 0x001b), USB_DEVICE_DATA(BOARD_503) }, + /* Belkin F5D6050, version 2 */ + { USB_DEVICE(0x050d, 0x0050), USB_DEVICE_DATA(BOARD_503) }, + /* iBlitzz, BWU613 (not *B or *SB) */ + { USB_DEVICE(0x07b8, 0xb000), USB_DEVICE_DATA(BOARD_503) }, + /* Gigabyte GN-WLBM101 */ + { USB_DEVICE(0x1044, 0x8003), USB_DEVICE_DATA(BOARD_503) }, + /* Planex GW-US11S */ + { USB_DEVICE(0x2019, 0x3220), USB_DEVICE_DATA(BOARD_503) }, + /* Internal WLAN adapter in h5[4,5]xx series iPAQs */ + { USB_DEVICE(0x049f, 0x0032), USB_DEVICE_DATA(BOARD_503) }, + /* Corega Wireless LAN USB-11 mini */ + { USB_DEVICE(0x07aa, 0x0011), USB_DEVICE_DATA(BOARD_503) }, + /* Corega Wireless LAN USB-11 mini2 */ + { USB_DEVICE(0x07aa, 0x0018), USB_DEVICE_DATA(BOARD_503) }, + /* Uniden PCW100 */ + { USB_DEVICE(0x05dd, 0xff35), USB_DEVICE_DATA(BOARD_503) }, + /* + * at76c503-rfmd-acc + */ + /* SMC2664W */ + { USB_DEVICE(0x083a, 0x3501), USB_DEVICE_DATA(BOARD_503_ACC) }, + /* Belkin F5D6050, SMC2662W v2, SMC2662W-AR */ + { USB_DEVICE(0x0d5c, 0xa002), USB_DEVICE_DATA(BOARD_503_ACC) }, + /* + * at76c505-rfmd + */ + /* Generic AT76C505/RFMD */ + { USB_DEVICE(0x03eb, 0x7606), USB_DEVICE_DATA(BOARD_505) }, + /* + * at76c505-rfmd2958 + */ + /* Generic AT76C505/RFMD, OvisLink WL-1130USB */ + { USB_DEVICE(0x03eb, 0x7613), USB_DEVICE_DATA(BOARD_505_2958) }, + /* Fiberline FL-WL240U */ + { USB_DEVICE(0x1371, 0x0014), USB_DEVICE_DATA(BOARD_505_2958) }, + /* CNet CNUSB-611G */ + { USB_DEVICE(0x1371, 0x0013), USB_DEVICE_DATA(BOARD_505_2958) }, + /* Linksys WUSB11 v2.8 */ + { USB_DEVICE(0x1915, 0x2233), USB_DEVICE_DATA(BOARD_505_2958) }, + /* Xterasys XN-2122B, IBlitzz BWU613B/BWU613SB */ + { USB_DEVICE(0x12fd, 0x1001), USB_DEVICE_DATA(BOARD_505_2958) }, + /* Corega WLAN USB Stick 11 */ + { USB_DEVICE(0x07aa, 0x7613), USB_DEVICE_DATA(BOARD_505_2958) }, + /* Microstar MSI Box MS6978 */ + { USB_DEVICE(0x0db0, 0x1020), USB_DEVICE_DATA(BOARD_505_2958) }, + /* + * at76c505a-rfmd2958 + */ + /* Generic AT76C505A device */ + { USB_DEVICE(0x03eb, 0x7614), USB_DEVICE_DATA(BOARD_505A) }, + /* Generic AT76C505AS device */ + { USB_DEVICE(0x03eb, 0x7617), USB_DEVICE_DATA(BOARD_505A) }, + /* Siemens Gigaset USB WLAN Adapter 11 */ + { USB_DEVICE(0x1690, 0x0701), USB_DEVICE_DATA(BOARD_505A) }, + /* OQO Model 01+ Internal Wi-Fi */ + { USB_DEVICE(0x1557, 0x0002), USB_DEVICE_DATA(BOARD_505A) }, + /* + * at76c505amx-rfmd + */ + /* Generic AT76C505AMX device */ + { USB_DEVICE(0x03eb, 0x7615), USB_DEVICE_DATA(BOARD_505AMX) }, + { } +}; + +MODULE_DEVICE_TABLE(usb, dev_table); + +/* Supported rates of this hardware, bit 7 marks basic rates */ +static const u8 hw_rates[] = { 0x82, 0x84, 0x0b, 0x16 }; + +static const char *const preambles[] = { "long", "short", "auto" }; + +/* Firmware download */ +/* DFU states */ +#define STATE_IDLE 0x00 +#define STATE_DETACH 0x01 +#define STATE_DFU_IDLE 0x02 +#define STATE_DFU_DOWNLOAD_SYNC 0x03 +#define STATE_DFU_DOWNLOAD_BUSY 0x04 +#define STATE_DFU_DOWNLOAD_IDLE 0x05 +#define STATE_DFU_MANIFEST_SYNC 0x06 +#define STATE_DFU_MANIFEST 0x07 +#define STATE_DFU_MANIFEST_WAIT_RESET 0x08 +#define STATE_DFU_UPLOAD_IDLE 0x09 +#define STATE_DFU_ERROR 0x0a + +/* DFU commands */ +#define DFU_DETACH 0 +#define DFU_DNLOAD 1 +#define DFU_UPLOAD 2 +#define DFU_GETSTATUS 3 +#define DFU_CLRSTATUS 4 +#define DFU_GETSTATE 5 +#define DFU_ABORT 6 + +#define FW_BLOCK_SIZE 1024 + +struct dfu_status { + unsigned char status; + unsigned char poll_timeout[3]; + unsigned char state; + unsigned char string; +} __attribute__((packed)); + +static inline int at76_is_intersil(enum board_type board) +{ + return (board == BOARD_503_ISL3861 || board == BOARD_503_ISL3863); +} + +static inline int at76_is_503rfmd(enum board_type board) +{ + return (board == BOARD_503 || board == BOARD_503_ACC); +} + +static inline int at76_is_505a(enum board_type board) +{ + return (board == BOARD_505A || board == BOARD_505AMX); +} + +/* Load a block of the first (internal) part of the firmware */ +static int at76_load_int_fw_block(struct usb_device *udev, int blockno, + void *block, int size) +{ + return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), DFU_DNLOAD, + USB_TYPE_CLASS | USB_DIR_OUT | + USB_RECIP_INTERFACE, blockno, 0, block, size, + USB_CTRL_GET_TIMEOUT); +} + +static int at76_dfu_get_status(struct usb_device *udev, + struct dfu_status *status) +{ + int ret; + + ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), DFU_GETSTATUS, + USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE, + 0, 0, status, sizeof(struct dfu_status), + USB_CTRL_GET_TIMEOUT); + return ret; +} + +static u8 at76_dfu_get_state(struct usb_device *udev, u8 *state) +{ + int ret; + + ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), DFU_GETSTATE, + USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE, + 0, 0, state, 1, USB_CTRL_GET_TIMEOUT); + return ret; +} + +/* Convert timeout from the DFU status to jiffies */ +static inline unsigned long at76_get_timeout(struct dfu_status *s) +{ + return msecs_to_jiffies((s->poll_timeout[2] << 16) + | (s->poll_timeout[1] << 8) + | (s->poll_timeout[0])); +} + +/* Load internal firmware from the buffer. If manifest_sync_timeout > 0, use + * its value in jiffies in the MANIFEST_SYNC state. */ +static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size, + int manifest_sync_timeout) +{ + u8 *block; + struct dfu_status dfu_stat_buf; + int ret = 0; + int need_dfu_state = 1; + int is_done = 0; + u8 dfu_state = 0; + u32 dfu_timeout = 0; + int bsize = 0; + int blockno = 0; + + at76_dbg(DBG_DFU, "%s( %p, %u, %d)", __func__, buf, size, + manifest_sync_timeout); + + if (!size) { + dev_printk(KERN_ERR, &udev->dev, "FW buffer length invalid!\n"); + return -EINVAL; + } + + block = kmalloc(FW_BLOCK_SIZE, GFP_KERNEL); + if (!block) + return -ENOMEM; + + do { + if (need_dfu_state) { + ret = at76_dfu_get_state(udev, &dfu_state); + if (ret < 0) { + dev_printk(KERN_ERR, &udev->dev, + "cannot get DFU state: %d\n", ret); + goto exit; + } + need_dfu_state = 0; + } + + switch (dfu_state) { + case STATE_DFU_DOWNLOAD_SYNC: + at76_dbg(DBG_DFU, "STATE_DFU_DOWNLOAD_SYNC"); + ret = at76_dfu_get_status(udev, &dfu_stat_buf); + if (ret >= 0) { + dfu_state = dfu_stat_buf.state; + dfu_timeout = at76_get_timeout(&dfu_stat_buf); + need_dfu_state = 0; + } else + dev_printk(KERN_ERR, &udev->dev, + "at76_dfu_get_status returned %d\n", + ret); + break; + + case STATE_DFU_DOWNLOAD_BUSY: + at76_dbg(DBG_DFU, "STATE_DFU_DOWNLOAD_BUSY"); + need_dfu_state = 1; + + at76_dbg(DBG_DFU, "DFU: Resetting device"); + schedule_timeout_interruptible(dfu_timeout); + break; + + case STATE_DFU_DOWNLOAD_IDLE: + at76_dbg(DBG_DFU, "DOWNLOAD..."); + /* fall through */ + case STATE_DFU_IDLE: + at76_dbg(DBG_DFU, "DFU IDLE"); + + bsize = min_t(int, size, FW_BLOCK_SIZE); + memcpy(block, buf, bsize); + at76_dbg(DBG_DFU, "int fw, size left = %5d, " + "bsize = %4d, blockno = %2d", size, bsize, + blockno); + ret = + at76_load_int_fw_block(udev, blockno, block, bsize); + buf += bsize; + size -= bsize; + blockno++; + + if (ret != bsize) + dev_printk(KERN_ERR, &udev->dev, + "at76_load_int_fw_block " + "returned %d\n", ret); + need_dfu_state = 1; + break; + + case STATE_DFU_MANIFEST_SYNC: + at76_dbg(DBG_DFU, "STATE_DFU_MANIFEST_SYNC"); + + ret = at76_dfu_get_status(udev, &dfu_stat_buf); + if (ret < 0) + break; + + dfu_state = dfu_stat_buf.state; + dfu_timeout = at76_get_timeout(&dfu_stat_buf); + need_dfu_state = 0; + + /* override the timeout from the status response, + needed for AT76C505A */ + if (manifest_sync_timeout > 0) + dfu_timeout = manifest_sync_timeout; + + at76_dbg(DBG_DFU, "DFU: Waiting for manifest phase"); + schedule_timeout_interruptible(dfu_timeout); + break; + + case STATE_DFU_MANIFEST: + at76_dbg(DBG_DFU, "STATE_DFU_MANIFEST"); + is_done = 1; + break; + + case STATE_DFU_MANIFEST_WAIT_RESET: + at76_dbg(DBG_DFU, "STATE_DFU_MANIFEST_WAIT_RESET"); + is_done = 1; + break; + + case STATE_DFU_UPLOAD_IDLE: + at76_dbg(DBG_DFU, "STATE_DFU_UPLOAD_IDLE"); + break; + + case STATE_DFU_ERROR: + at76_dbg(DBG_DFU, "STATE_DFU_ERROR"); + ret = -EPIPE; + break; + + default: + at76_dbg(DBG_DFU, "DFU UNKNOWN STATE (%d)", dfu_state); + ret = -EINVAL; + break; + } + } while (!is_done && (ret >= 0)); + +exit: + kfree(block); + if (ret >= 0) + ret = 0; + + return ret; +} + +#define HEX2STR_BUFFERS 4 +#define HEX2STR_MAX_LEN 64 +#define BIN2HEX(x) ((x) < 10 ? '0' + (x) : (x) + 'A' - 10) + +/* Convert binary data into hex string */ +static char *hex2str(void *buf, int len) +{ + static atomic_t a = ATOMIC_INIT(0); + static char bufs[HEX2STR_BUFFERS][3 * HEX2STR_MAX_LEN + 1]; + char *ret = bufs[atomic_inc_return(&a) & (HEX2STR_BUFFERS - 1)]; + char *obuf = ret; + u8 *ibuf = buf; + + if (len > HEX2STR_MAX_LEN) + len = HEX2STR_MAX_LEN; + + if (len <= 0) { + ret[0] = '\0'; + return ret; + } + + while (len--) { + *obuf++ = BIN2HEX(*ibuf >> 4); + *obuf++ = BIN2HEX(*ibuf & 0xf); + *obuf++ = '-'; + ibuf++; + } + *(--obuf) = '\0'; + + return ret; +} + +/* LED trigger */ +static int tx_activity; +static void at76_ledtrig_tx_timerfunc(unsigned long data); +static DEFINE_TIMER(ledtrig_tx_timer, at76_ledtrig_tx_timerfunc, 0, 0); +DEFINE_LED_TRIGGER(ledtrig_tx); + +static void at76_ledtrig_tx_timerfunc(unsigned long data) +{ + static int tx_lastactivity; + + if (tx_lastactivity != tx_activity) { + tx_lastactivity = tx_activity; + led_trigger_event(ledtrig_tx, LED_FULL); + mod_timer(&ledtrig_tx_timer, jiffies + HZ / 4); + } else + led_trigger_event(ledtrig_tx, LED_OFF); +} + +static void at76_ledtrig_tx_activity(void) +{ + tx_activity++; + if (!timer_pending(&ledtrig_tx_timer)) + mod_timer(&ledtrig_tx_timer, jiffies + HZ / 4); +} + +static int at76_remap(struct usb_device *udev) +{ + int ret; + ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x0a, + USB_TYPE_VENDOR | USB_DIR_OUT | + USB_RECIP_INTERFACE, 0, 0, NULL, 0, + USB_CTRL_GET_TIMEOUT); + if (ret < 0) + return ret; + return 0; +} + +static int at76_get_op_mode(struct usb_device *udev) +{ + int ret; + u8 saved; + u8 *op_mode; + + op_mode = kmalloc(1, GFP_NOIO); + if (!op_mode) + return -ENOMEM; + ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x33, + USB_TYPE_VENDOR | USB_DIR_IN | + USB_RECIP_INTERFACE, 0x01, 0, op_mode, 1, + USB_CTRL_GET_TIMEOUT); + saved = *op_mode; + kfree(op_mode); + + if (ret < 0) + return ret; + else if (ret < 1) + return -EIO; + else + return saved; +} + +/* Load a block of the second ("external") part of the firmware */ +static inline int at76_load_ext_fw_block(struct usb_device *udev, int blockno, + void *block, int size) +{ + return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x0e, + USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE, + 0x0802, blockno, block, size, + USB_CTRL_GET_TIMEOUT); +} + +static inline int at76_get_hw_cfg(struct usb_device *udev, + union at76_hwcfg *buf, int buf_size) +{ + return usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x33, + USB_TYPE_VENDOR | USB_DIR_IN | + USB_RECIP_INTERFACE, 0x0a02, 0, + buf, buf_size, USB_CTRL_GET_TIMEOUT); +} + +/* Intersil boards use a different "value" for GetHWConfig requests */ +static inline int at76_get_hw_cfg_intersil(struct usb_device *udev, + union at76_hwcfg *buf, int buf_size) +{ + return usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x33, + USB_TYPE_VENDOR | USB_DIR_IN | + USB_RECIP_INTERFACE, 0x0902, 0, + buf, buf_size, USB_CTRL_GET_TIMEOUT); +} + +/* Get the hardware configuration for the adapter and put it to the appropriate + * fields of 'priv' (the GetHWConfig request and interpretation of the result + * depends on the board type) */ +static int at76_get_hw_config(struct at76_priv *priv) +{ + int ret; + union at76_hwcfg *hwcfg = kmalloc(sizeof(*hwcfg), GFP_KERNEL); + + if (!hwcfg) + return -ENOMEM; + + if (at76_is_intersil(priv->board_type)) { + ret = at76_get_hw_cfg_intersil(priv->udev, hwcfg, + sizeof(hwcfg->i)); + if (ret < 0) + goto exit; + memcpy(priv->mac_addr, hwcfg->i.mac_addr, ETH_ALEN); + priv->regulatory_domain = hwcfg->i.regulatory_domain; + } else if (at76_is_503rfmd(priv->board_type)) { + ret = at76_get_hw_cfg(priv->udev, hwcfg, sizeof(hwcfg->r3)); + if (ret < 0) + goto exit; + memcpy(priv->mac_addr, hwcfg->r3.mac_addr, ETH_ALEN); + priv->regulatory_domain = hwcfg->r3.regulatory_domain; + } else { + ret = at76_get_hw_cfg(priv->udev, hwcfg, sizeof(hwcfg->r5)); + if (ret < 0) + goto exit; + memcpy(priv->mac_addr, hwcfg->r5.mac_addr, ETH_ALEN); + priv->regulatory_domain = hwcfg->r5.regulatory_domain; + } + +exit: + kfree(hwcfg); + if (ret < 0) + printk(KERN_ERR "%s: cannot get HW Config (error %d)\n", + wiphy_name(priv->hw->wiphy), ret); + + return ret; +} + +static struct reg_domain const *at76_get_reg_domain(u16 code) +{ + int i; + static struct reg_domain const fd_tab[] = { + { 0x10, "FCC (USA)", 0x7ff }, /* ch 1-11 */ + { 0x20, "IC (Canada)", 0x7ff }, /* ch 1-11 */ + { 0x30, "ETSI (most of Europe)", 0x1fff }, /* ch 1-13 */ + { 0x31, "Spain", 0x600 }, /* ch 10-11 */ + { 0x32, "France", 0x1e00 }, /* ch 10-13 */ + { 0x40, "MKK (Japan)", 0x2000 }, /* ch 14 */ + { 0x41, "MKK1 (Japan)", 0x3fff }, /* ch 1-14 */ + { 0x50, "Israel", 0x3fc }, /* ch 3-9 */ + { 0x00, "", 0xffffffff } /* ch 1-32 */ + }; + + /* Last entry is fallback for unknown domain code */ + for (i = 0; i < ARRAY_SIZE(fd_tab) - 1; i++) + if (code == fd_tab[i].code) + break; + + return &fd_tab[i]; +} + +static inline int at76_get_mib(struct usb_device *udev, u16 mib, void *buf, + int buf_size) +{ + int ret; + + ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x33, + USB_TYPE_VENDOR | USB_DIR_IN | + USB_RECIP_INTERFACE, mib << 8, 0, buf, buf_size, + USB_CTRL_GET_TIMEOUT); + if (ret >= 0 && ret != buf_size) + return -EIO; + return ret; +} + +/* Return positive number for status, negative for an error */ +static inline int at76_get_cmd_status(struct usb_device *udev, u8 cmd) +{ + u8 *stat_buf; + int ret; + + stat_buf = kmalloc(40, GFP_NOIO); + if (!stat_buf) + return -ENOMEM; + + ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x22, + USB_TYPE_VENDOR | USB_DIR_IN | + USB_RECIP_INTERFACE, cmd, 0, stat_buf, + 40, USB_CTRL_GET_TIMEOUT); + if (ret >= 0) + ret = stat_buf[5]; + kfree(stat_buf); + + return ret; +} + +#define MAKE_CMD_CASE(c) case (c): return #c +static const char *at76_get_cmd_string(u8 cmd_status) +{ + switch (cmd_status) { + MAKE_CMD_CASE(CMD_SET_MIB); + MAKE_CMD_CASE(CMD_GET_MIB); + MAKE_CMD_CASE(CMD_SCAN); + MAKE_CMD_CASE(CMD_JOIN); + MAKE_CMD_CASE(CMD_START_IBSS); + MAKE_CMD_CASE(CMD_RADIO_ON); + MAKE_CMD_CASE(CMD_RADIO_OFF); + MAKE_CMD_CASE(CMD_STARTUP); + } + + return "UNKNOWN"; +} + +static int at76_set_card_command(struct usb_device *udev, u8 cmd, void *buf, + int buf_size) +{ + int ret; + struct at76_command *cmd_buf = kmalloc(sizeof(struct at76_command) + + buf_size, GFP_KERNEL); + + if (!cmd_buf) + return -ENOMEM; + + cmd_buf->cmd = cmd; + cmd_buf->reserved = 0; + cmd_buf->size = cpu_to_le16(buf_size); + memcpy(cmd_buf->data, buf, buf_size); + + at76_dbg_dump(DBG_CMD, cmd_buf, sizeof(struct at76_command) + buf_size, + "issuing command %s (0x%02x)", + at76_get_cmd_string(cmd), cmd); + + ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x0e, + USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE, + 0, 0, cmd_buf, + sizeof(struct at76_command) + buf_size, + USB_CTRL_GET_TIMEOUT); + kfree(cmd_buf); + return ret; +} + +#define MAKE_CMD_STATUS_CASE(c) case (c): return #c +static const char *at76_get_cmd_status_string(u8 cmd_status) +{ + switch (cmd_status) { + MAKE_CMD_STATUS_CASE(CMD_STATUS_IDLE); + MAKE_CMD_STATUS_CASE(CMD_STATUS_COMPLETE); + MAKE_CMD_STATUS_CASE(CMD_STATUS_UNKNOWN); + MAKE_CMD_STATUS_CASE(CMD_STATUS_INVALID_PARAMETER); + MAKE_CMD_STATUS_CASE(CMD_STATUS_FUNCTION_NOT_SUPPORTED); + MAKE_CMD_STATUS_CASE(CMD_STATUS_TIME_OUT); + MAKE_CMD_STATUS_CASE(CMD_STATUS_IN_PROGRESS); + MAKE_CMD_STATUS_CASE(CMD_STATUS_HOST_FAILURE); + MAKE_CMD_STATUS_CASE(CMD_STATUS_SCAN_FAILED); + } + + return "UNKNOWN"; +} + +/* Wait until the command is completed */ +static int at76_wait_completion(struct at76_priv *priv, int cmd) +{ + int status = 0; + unsigned long timeout = jiffies + CMD_COMPLETION_TIMEOUT; + + do { + status = at76_get_cmd_status(priv->udev, cmd); + if (status < 0) { + printk(KERN_ERR "%s: at76_get_cmd_status failed: %d\n", + wiphy_name(priv->hw->wiphy), status); + break; + } + + at76_dbg(DBG_WAIT_COMPLETE, + "%s: Waiting on cmd %d, status = %d (%s)", + wiphy_name(priv->hw->wiphy), cmd, status, + at76_get_cmd_status_string(status)); + + if (status != CMD_STATUS_IN_PROGRESS + && status != CMD_STATUS_IDLE) + break; + + schedule_timeout_interruptible(HZ / 10); /* 100 ms */ + if (time_after(jiffies, timeout)) { + printk(KERN_ERR + "%s: completion timeout for command %d\n", + wiphy_name(priv->hw->wiphy), cmd); + status = -ETIMEDOUT; + break; + } + } while (1); + + return status; +} + +static int at76_set_mib(struct at76_priv *priv, struct set_mib_buffer *buf) +{ + int ret; + + ret = at76_set_card_command(priv->udev, CMD_SET_MIB, buf, + offsetof(struct set_mib_buffer, + data) + buf->size); + if (ret < 0) + return ret; + + ret = at76_wait_completion(priv, CMD_SET_MIB); + if (ret != CMD_STATUS_COMPLETE) { + printk(KERN_INFO + "%s: set_mib: at76_wait_completion failed " + "with %d\n", wiphy_name(priv->hw->wiphy), ret); + ret = -EIO; + } + + return ret; +} + +/* Return < 0 on error, == 0 if no command sent, == 1 if cmd sent */ +static int at76_set_radio(struct at76_priv *priv, int enable) +{ + int ret; + int cmd; + + if (priv->radio_on == enable) + return 0; + + cmd = enable ? CMD_RADIO_ON : CMD_RADIO_OFF; + + ret = at76_set_card_command(priv->udev, cmd, NULL, 0); + if (ret < 0) + printk(KERN_ERR "%s: at76_set_card_command(%d) failed: %d\n", + wiphy_name(priv->hw->wiphy), cmd, ret); + else + ret = 1; + + priv->radio_on = enable; + return ret; +} + +/* Set current power save mode (AT76_PM_OFF/AT76_PM_ON/AT76_PM_SMART) */ +static int at76_set_pm_mode(struct at76_priv *priv) +{ + int ret = 0; + + priv->mib_buf.type = MIB_MAC_MGMT; + priv->mib_buf.size = 1; + priv->mib_buf.index = offsetof(struct mib_mac_mgmt, power_mgmt_mode); + priv->mib_buf.data.byte = priv->pm_mode; + + ret = at76_set_mib(priv, &priv->mib_buf); + if (ret < 0) + printk(KERN_ERR "%s: set_mib (pm_mode) failed: %d\n", + wiphy_name(priv->hw->wiphy), ret); + + return ret; +} + +static int at76_set_preamble(struct at76_priv *priv, u8 type) +{ + int ret = 0; + + priv->mib_buf.type = MIB_LOCAL; + priv->mib_buf.size = 1; + priv->mib_buf.index = offsetof(struct mib_local, preamble_type); + priv->mib_buf.data.byte = type; + + ret = at76_set_mib(priv, &priv->mib_buf); + if (ret < 0) + printk(KERN_ERR "%s: set_mib (preamble) failed: %d\n", + wiphy_name(priv->hw->wiphy), ret); + + return ret; +} + +static int at76_set_frag(struct at76_priv *priv, u16 size) +{ + int ret = 0; + + priv->mib_buf.type = MIB_MAC; + priv->mib_buf.size = 2; + priv->mib_buf.index = offsetof(struct mib_mac, frag_threshold); + priv->mib_buf.data.word = cpu_to_le16(size); + + ret = at76_set_mib(priv, &priv->mib_buf); + if (ret < 0) + printk(KERN_ERR "%s: set_mib (frag threshold) failed: %d\n", + wiphy_name(priv->hw->wiphy), ret); + + return ret; +} + +static int at76_set_rts(struct at76_priv *priv, u16 size) +{ + int ret = 0; + + priv->mib_buf.type = MIB_MAC; + priv->mib_buf.size = 2; + priv->mib_buf.index = offsetof(struct mib_mac, rts_threshold); + priv->mib_buf.data.word = cpu_to_le16(size); + + ret = at76_set_mib(priv, &priv->mib_buf); + if (ret < 0) + printk(KERN_ERR "%s: set_mib (rts) failed: %d\n", + wiphy_name(priv->hw->wiphy), ret); + + return ret; +} + +static int at76_set_autorate_fallback(struct at76_priv *priv, int onoff) +{ + int ret = 0; + + priv->mib_buf.type = MIB_LOCAL; + priv->mib_buf.size = 1; + priv->mib_buf.index = offsetof(struct mib_local, txautorate_fallback); + priv->mib_buf.data.byte = onoff; + + ret = at76_set_mib(priv, &priv->mib_buf); + if (ret < 0) + printk(KERN_ERR "%s: set_mib (autorate fallback) failed: %d\n", + wiphy_name(priv->hw->wiphy), ret); + + return ret; +} + +static void at76_dump_mib_mac_addr(struct at76_priv *priv) +{ + int i; + int ret; + struct mib_mac_addr *m = kmalloc(sizeof(struct mib_mac_addr), + GFP_KERNEL); + + if (!m) + return; + + ret = at76_get_mib(priv->udev, MIB_MAC_ADDR, m, + sizeof(struct mib_mac_addr)); + if (ret < 0) { + printk(KERN_ERR "%s: at76_get_mib (MAC_ADDR) failed: %d\n", + wiphy_name(priv->hw->wiphy), ret); + goto exit; + } + + at76_dbg(DBG_MIB, "%s: MIB MAC_ADDR: mac_addr %pM res 0x%x 0x%x", + wiphy_name(priv->hw->wiphy), + m->mac_addr, m->res[0], m->res[1]); + for (i = 0; i < ARRAY_SIZE(m->group_addr); i++) + at76_dbg(DBG_MIB, "%s: MIB MAC_ADDR: group addr %d: %pM, " + "status %d", wiphy_name(priv->hw->wiphy), i, + m->group_addr[i], m->group_addr_status[i]); +exit: + kfree(m); +} + +static void at76_dump_mib_mac_wep(struct at76_priv *priv) +{ + int i; + int ret; + int key_len; + struct mib_mac_wep *m = kmalloc(sizeof(struct mib_mac_wep), GFP_KERNEL); + + if (!m) + return; + + ret = at76_get_mib(priv->udev, MIB_MAC_WEP, m, + sizeof(struct mib_mac_wep)); + if (ret < 0) { + printk(KERN_ERR "%s: at76_get_mib (MAC_WEP) failed: %d\n", + wiphy_name(priv->hw->wiphy), ret); + goto exit; + } + + at76_dbg(DBG_MIB, "%s: MIB MAC_WEP: priv_invoked %u def_key_id %u " + "key_len %u excl_unencr %u wep_icv_err %u wep_excluded %u " + "encr_level %u key %d", wiphy_name(priv->hw->wiphy), + m->privacy_invoked, m->wep_default_key_id, + m->wep_key_mapping_len, m->exclude_unencrypted, + le32_to_cpu(m->wep_icv_error_count), + le32_to_cpu(m->wep_excluded_count), m->encryption_level, + m->wep_default_key_id); + + key_len = (m->encryption_level == 1) ? + WEP_SMALL_KEY_LEN : WEP_LARGE_KEY_LEN; + + for (i = 0; i < WEP_KEYS; i++) + at76_dbg(DBG_MIB, "%s: MIB MAC_WEP: key %d: %s", + wiphy_name(priv->hw->wiphy), i, + hex2str(m->wep_default_keyvalue[i], key_len)); +exit: + kfree(m); +} + +static void at76_dump_mib_mac_mgmt(struct at76_priv *priv) +{ + int ret; + struct mib_mac_mgmt *m = kmalloc(sizeof(struct mib_mac_mgmt), + GFP_KERNEL); + + if (!m) + return; + + ret = at76_get_mib(priv->udev, MIB_MAC_MGMT, m, + sizeof(struct mib_mac_mgmt)); + if (ret < 0) { + printk(KERN_ERR "%s: at76_get_mib (MAC_MGMT) failed: %d\n", + wiphy_name(priv->hw->wiphy), ret); + goto exit; + } + + at76_dbg(DBG_MIB, "%s: MIB MAC_MGMT: beacon_period %d CFP_max_duration " + "%d medium_occupancy_limit %d station_id 0x%x ATIM_window %d " + "CFP_mode %d privacy_opt_impl %d DTIM_period %d CFP_period %d " + "current_bssid %pM current_essid %s current_bss_type %d " + "pm_mode %d ibss_change %d res %d " + "multi_domain_capability_implemented %d " + "international_roaming %d country_string %.3s", + wiphy_name(priv->hw->wiphy), le16_to_cpu(m->beacon_period), + le16_to_cpu(m->CFP_max_duration), + le16_to_cpu(m->medium_occupancy_limit), + le16_to_cpu(m->station_id), le16_to_cpu(m->ATIM_window), + m->CFP_mode, m->privacy_option_implemented, m->DTIM_period, + m->CFP_period, m->current_bssid, + hex2str(m->current_essid, IW_ESSID_MAX_SIZE), + m->current_bss_type, m->power_mgmt_mode, m->ibss_change, + m->res, m->multi_domain_capability_implemented, + m->multi_domain_capability_enabled, m->country_string); +exit: + kfree(m); +} + +static void at76_dump_mib_mac(struct at76_priv *priv) +{ + int ret; + struct mib_mac *m = kmalloc(sizeof(struct mib_mac), GFP_KERNEL); + + if (!m) + return; + + ret = at76_get_mib(priv->udev, MIB_MAC, m, sizeof(struct mib_mac)); + if (ret < 0) { + printk(KERN_ERR "%s: at76_get_mib (MAC) failed: %d\n", + wiphy_name(priv->hw->wiphy), ret); + goto exit; + } + + at76_dbg(DBG_MIB, "%s: MIB MAC: max_tx_msdu_lifetime %d " + "max_rx_lifetime %d frag_threshold %d rts_threshold %d " + "cwmin %d cwmax %d short_retry_time %d long_retry_time %d " + "scan_type %d scan_channel %d probe_delay %u " + "min_channel_time %d max_channel_time %d listen_int %d " + "desired_ssid %s desired_bssid %pM desired_bsstype %d", + wiphy_name(priv->hw->wiphy), + le32_to_cpu(m->max_tx_msdu_lifetime), + le32_to_cpu(m->max_rx_lifetime), + le16_to_cpu(m->frag_threshold), le16_to_cpu(m->rts_threshold), + le16_to_cpu(m->cwmin), le16_to_cpu(m->cwmax), + m->short_retry_time, m->long_retry_time, m->scan_type, + m->scan_channel, le16_to_cpu(m->probe_delay), + le16_to_cpu(m->min_channel_time), + le16_to_cpu(m->max_channel_time), + le16_to_cpu(m->listen_interval), + hex2str(m->desired_ssid, IW_ESSID_MAX_SIZE), + m->desired_bssid, m->desired_bsstype); +exit: + kfree(m); +} + +static void at76_dump_mib_phy(struct at76_priv *priv) +{ + int ret; + struct mib_phy *m = kmalloc(sizeof(struct mib_phy), GFP_KERNEL); + + if (!m) + return; + + ret = at76_get_mib(priv->udev, MIB_PHY, m, sizeof(struct mib_phy)); + if (ret < 0) { + printk(KERN_ERR "%s: at76_get_mib (PHY) failed: %d\n", + wiphy_name(priv->hw->wiphy), ret); + goto exit; + } + + at76_dbg(DBG_MIB, "%s: MIB PHY: ed_threshold %d slot_time %d " + "sifs_time %d preamble_length %d plcp_header_length %d " + "mpdu_max_length %d cca_mode_supported %d operation_rate_set " + "0x%x 0x%x 0x%x 0x%x channel_id %d current_cca_mode %d " + "phy_type %d current_reg_domain %d", + wiphy_name(priv->hw->wiphy), le32_to_cpu(m->ed_threshold), + le16_to_cpu(m->slot_time), le16_to_cpu(m->sifs_time), + le16_to_cpu(m->preamble_length), + le16_to_cpu(m->plcp_header_length), + le16_to_cpu(m->mpdu_max_length), + le16_to_cpu(m->cca_mode_supported), m->operation_rate_set[0], + m->operation_rate_set[1], m->operation_rate_set[2], + m->operation_rate_set[3], m->channel_id, m->current_cca_mode, + m->phy_type, m->current_reg_domain); +exit: + kfree(m); +} + +static void at76_dump_mib_local(struct at76_priv *priv) +{ + int ret; + struct mib_local *m = kmalloc(sizeof(struct mib_phy), GFP_KERNEL); + + if (!m) + return; + + ret = at76_get_mib(priv->udev, MIB_LOCAL, m, sizeof(struct mib_local)); + if (ret < 0) { + printk(KERN_ERR "%s: at76_get_mib (LOCAL) failed: %d\n", + wiphy_name(priv->hw->wiphy), ret); + goto exit; + } + + at76_dbg(DBG_MIB, "%s: MIB LOCAL: beacon_enable %d " + "txautorate_fallback %d ssid_size %d promiscuous_mode %d " + "preamble_type %d", wiphy_name(priv->hw->wiphy), + m->beacon_enable, + m->txautorate_fallback, m->ssid_size, m->promiscuous_mode, + m->preamble_type); +exit: + kfree(m); +} + +static void at76_dump_mib_mdomain(struct at76_priv *priv) +{ + int ret; + struct mib_mdomain *m = kmalloc(sizeof(struct mib_mdomain), GFP_KERNEL); + + if (!m) + return; + + ret = at76_get_mib(priv->udev, MIB_MDOMAIN, m, + sizeof(struct mib_mdomain)); + if (ret < 0) { + printk(KERN_ERR "%s: at76_get_mib (MDOMAIN) failed: %d\n", + wiphy_name(priv->hw->wiphy), ret); + goto exit; + } + + at76_dbg(DBG_MIB, "%s: MIB MDOMAIN: channel_list %s", + wiphy_name(priv->hw->wiphy), + hex2str(m->channel_list, sizeof(m->channel_list))); + + at76_dbg(DBG_MIB, "%s: MIB MDOMAIN: tx_powerlevel %s", + wiphy_name(priv->hw->wiphy), + hex2str(m->tx_powerlevel, sizeof(m->tx_powerlevel))); +exit: + kfree(m); +} + +/* Enable monitor mode */ +static int at76_start_monitor(struct at76_priv *priv) +{ + struct at76_req_scan scan; + int ret; + + memset(&scan, 0, sizeof(struct at76_req_scan)); + memset(scan.bssid, 0xff, ETH_ALEN); + + scan.channel = priv->channel; + scan.scan_type = SCAN_TYPE_PASSIVE; + scan.international_scan = 0; + scan.min_channel_time = cpu_to_le16(priv->scan_min_time); + scan.max_channel_time = cpu_to_le16(priv->scan_max_time); + scan.probe_delay = cpu_to_le16(0); + + ret = at76_set_card_command(priv->udev, CMD_SCAN, &scan, sizeof(scan)); + if (ret >= 0) + ret = at76_get_cmd_status(priv->udev, CMD_SCAN); + + return ret; +} + +/* Calculate padding from txbuf->wlength (which excludes the USB TX header), + likely to compensate a flaw in the AT76C503A USB part ... */ +static inline int at76_calc_padding(int wlen) +{ + /* add the USB TX header */ + wlen += AT76_TX_HDRLEN; + + wlen = wlen % 64; + + if (wlen < 50) + return 50 - wlen; + + if (wlen >= 61) + return 64 + 50 - wlen; + + return 0; +} + +static void at76_rx_callback(struct urb *urb) +{ + struct at76_priv *priv = urb->context; + + priv->rx_tasklet.data = (unsigned long)urb; + tasklet_schedule(&priv->rx_tasklet); + return; +} + +static int at76_submit_rx_urb(struct at76_priv *priv) +{ + int ret; + int size; + struct sk_buff *skb = priv->rx_skb; + + if (!priv->rx_urb) { + printk(KERN_ERR "%s: %s: priv->rx_urb is NULL\n", + wiphy_name(priv->hw->wiphy), __func__); + return -EFAULT; + } + + if (!skb) { + skb = dev_alloc_skb(sizeof(struct at76_rx_buffer)); + if (!skb) { + printk(KERN_ERR "%s: cannot allocate rx skbuff\n", + wiphy_name(priv->hw->wiphy)); + ret = -ENOMEM; + goto exit; + } + priv->rx_skb = skb; + } else { + skb_push(skb, skb_headroom(skb)); + skb_trim(skb, 0); + } + + size = skb_tailroom(skb); + usb_fill_bulk_urb(priv->rx_urb, priv->udev, priv->rx_pipe, + skb_put(skb, size), size, at76_rx_callback, priv); + ret = usb_submit_urb(priv->rx_urb, GFP_ATOMIC); + if (ret < 0) { + if (ret == -ENODEV) + at76_dbg(DBG_DEVSTART, + "usb_submit_urb returned -ENODEV"); + else + printk(KERN_ERR "%s: rx, usb_submit_urb failed: %d\n", + wiphy_name(priv->hw->wiphy), ret); + } + +exit: + if (ret < 0 && ret != -ENODEV) + printk(KERN_ERR "%s: cannot submit rx urb - please unload the " + "driver and/or power cycle the device\n", + wiphy_name(priv->hw->wiphy)); + + return ret; +} + +/* Download external firmware */ +static int at76_load_external_fw(struct usb_device *udev, struct fwentry *fwe) +{ + int ret; + int op_mode; + int blockno = 0; + int bsize; + u8 *block; + u8 *buf = fwe->extfw; + int size = fwe->extfw_size; + + if (!buf || !size) + return -ENOENT; + + op_mode = at76_get_op_mode(udev); + at76_dbg(DBG_DEVSTART, "opmode %d", op_mode); + + if (op_mode != OPMODE_NORMAL_NIC_WITHOUT_FLASH) { + dev_printk(KERN_ERR, &udev->dev, "unexpected opmode %d\n", + op_mode); + return -EINVAL; + } + + block = kmalloc(FW_BLOCK_SIZE, GFP_KERNEL); + if (!block) + return -ENOMEM; + + at76_dbg(DBG_DEVSTART, "downloading external firmware"); + + /* for fw >= 0.100, the device needs an extra empty block */ + do { + bsize = min_t(int, size, FW_BLOCK_SIZE); + memcpy(block, buf, bsize); + at76_dbg(DBG_DEVSTART, + "ext fw, size left = %5d, bsize = %4d, blockno = %2d", + size, bsize, blockno); + ret = at76_load_ext_fw_block(udev, blockno, block, bsize); + if (ret != bsize) { + dev_printk(KERN_ERR, &udev->dev, + "loading %dth firmware block failed: %d\n", + blockno, ret); + goto exit; + } + buf += bsize; + size -= bsize; + blockno++; + } while (bsize > 0); + + if (at76_is_505a(fwe->board_type)) { + at76_dbg(DBG_DEVSTART, "200 ms delay for 505a"); + schedule_timeout_interruptible(HZ / 5 + 1); + } + +exit: + kfree(block); + if (ret < 0) + dev_printk(KERN_ERR, &udev->dev, + "downloading external firmware failed: %d\n", ret); + return ret; +} + +/* Download internal firmware */ +static int at76_load_internal_fw(struct usb_device *udev, struct fwentry *fwe) +{ + int ret; + int need_remap = !at76_is_505a(fwe->board_type); + + ret = at76_usbdfu_download(udev, fwe->intfw, fwe->intfw_size, + need_remap ? 0 : 2 * HZ); + + if (ret < 0) { + dev_printk(KERN_ERR, &udev->dev, + "downloading internal fw failed with %d\n", ret); + goto exit; + } + + at76_dbg(DBG_DEVSTART, "sending REMAP"); + + /* no REMAP for 505A (see SF driver) */ + if (need_remap) { + ret = at76_remap(udev); + if (ret < 0) { + dev_printk(KERN_ERR, &udev->dev, + "sending REMAP failed with %d\n", ret); + goto exit; + } + } + + at76_dbg(DBG_DEVSTART, "sleeping for 2 seconds"); + schedule_timeout_interruptible(2 * HZ + 1); + usb_reset_device(udev); + +exit: + return ret; +} + +static int at76_startup_device(struct at76_priv *priv) +{ + struct at76_card_config *ccfg = &priv->card_config; + int ret; + + at76_dbg(DBG_PARAMS, + "%s param: ssid %.*s (%s) mode %s ch %d wep %s key %d " + "keylen %d", wiphy_name(priv->hw->wiphy), priv->essid_size, + priv->essid, hex2str(priv->essid, IW_ESSID_MAX_SIZE), + priv->iw_mode == IW_MODE_ADHOC ? "adhoc" : "infra", + priv->channel, priv->wep_enabled ? "enabled" : "disabled", + priv->wep_key_id, priv->wep_keys_len[priv->wep_key_id]); + at76_dbg(DBG_PARAMS, + "%s param: preamble %s rts %d retry %d frag %d " + "txrate %s auth_mode %d", wiphy_name(priv->hw->wiphy), + preambles[priv->preamble_type], priv->rts_threshold, + priv->short_retry_limit, priv->frag_threshold, + priv->txrate == TX_RATE_1MBIT ? "1MBit" : priv->txrate == + TX_RATE_2MBIT ? "2MBit" : priv->txrate == + TX_RATE_5_5MBIT ? "5.5MBit" : priv->txrate == + TX_RATE_11MBIT ? "11MBit" : priv->txrate == + TX_RATE_AUTO ? "auto" : "", priv->auth_mode); + at76_dbg(DBG_PARAMS, + "%s param: pm_mode %d pm_period %d auth_mode %s " + "scan_times %d %d scan_mode %s", + wiphy_name(priv->hw->wiphy), priv->pm_mode, priv->pm_period, + priv->auth_mode == WLAN_AUTH_OPEN ? "open" : "shared_secret", + priv->scan_min_time, priv->scan_max_time, + priv->scan_mode == SCAN_TYPE_ACTIVE ? "active" : "passive"); + + memset(ccfg, 0, sizeof(struct at76_card_config)); + ccfg->promiscuous_mode = 0; + ccfg->short_retry_limit = priv->short_retry_limit; + + if (priv->wep_enabled) { + if (priv->wep_keys_len[priv->wep_key_id] > WEP_SMALL_KEY_LEN) + ccfg->encryption_type = 2; + else + ccfg->encryption_type = 1; + + /* jal: always exclude unencrypted if WEP is active */ + ccfg->exclude_unencrypted = 1; + } else { + ccfg->exclude_unencrypted = 0; + ccfg->encryption_type = 0; + } + + ccfg->rts_threshold = cpu_to_le16(priv->rts_threshold); + ccfg->fragmentation_threshold = cpu_to_le16(priv->frag_threshold); + + memcpy(ccfg->basic_rate_set, hw_rates, 4); + /* jal: really needed, we do a set_mib for autorate later ??? */ + ccfg->auto_rate_fallback = (priv->txrate == TX_RATE_AUTO ? 1 : 0); + ccfg->channel = priv->channel; + ccfg->privacy_invoked = priv->wep_enabled; + memcpy(ccfg->current_ssid, priv->essid, IW_ESSID_MAX_SIZE); + ccfg->ssid_len = priv->essid_size; + + ccfg->wep_default_key_id = priv->wep_key_id; + memcpy(ccfg->wep_default_key_value, priv->wep_keys, + sizeof(priv->wep_keys)); + + ccfg->short_preamble = priv->preamble_type; + ccfg->beacon_period = cpu_to_le16(priv->beacon_period); + + ret = at76_set_card_command(priv->udev, CMD_STARTUP, &priv->card_config, + sizeof(struct at76_card_config)); + if (ret < 0) { + printk(KERN_ERR "%s: at76_set_card_command failed: %d\n", + wiphy_name(priv->hw->wiphy), ret); + return ret; + } + + at76_wait_completion(priv, CMD_STARTUP); + + /* remove BSSID from previous run */ + memset(priv->bssid, 0, ETH_ALEN); + + if (at76_set_radio(priv, 1) == 1) + at76_wait_completion(priv, CMD_RADIO_ON); + + ret = at76_set_preamble(priv, priv->preamble_type); + if (ret < 0) + return ret; + + ret = at76_set_frag(priv, priv->frag_threshold); + if (ret < 0) + return ret; + + ret = at76_set_rts(priv, priv->rts_threshold); + if (ret < 0) + return ret; + + ret = at76_set_autorate_fallback(priv, + priv->txrate == TX_RATE_AUTO ? 1 : 0); + if (ret < 0) + return ret; + + ret = at76_set_pm_mode(priv); + if (ret < 0) + return ret; + + if (at76_debug & DBG_MIB) { + at76_dump_mib_mac(priv); + at76_dump_mib_mac_addr(priv); + at76_dump_mib_mac_mgmt(priv); + at76_dump_mib_mac_wep(priv); + at76_dump_mib_mdomain(priv); + at76_dump_mib_phy(priv); + at76_dump_mib_local(priv); + } + + return 0; +} + +/* Enable or disable promiscuous mode */ +static void at76_work_set_promisc(struct work_struct *work) +{ + struct at76_priv *priv = container_of(work, struct at76_priv, + work_set_promisc); + int ret = 0; + + if (priv->device_unplugged) + return; + + mutex_lock(&priv->mtx); + + priv->mib_buf.type = MIB_LOCAL; + priv->mib_buf.size = 1; + priv->mib_buf.index = offsetof(struct mib_local, promiscuous_mode); + priv->mib_buf.data.byte = priv->promisc ? 1 : 0; + + ret = at76_set_mib(priv, &priv->mib_buf); + if (ret < 0) + printk(KERN_ERR "%s: set_mib (promiscuous_mode) failed: %d\n", + wiphy_name(priv->hw->wiphy), ret); + + mutex_unlock(&priv->mtx); +} + +/* Submit Rx urb back to the device */ +static void at76_work_submit_rx(struct work_struct *work) +{ + struct at76_priv *priv = container_of(work, struct at76_priv, + work_submit_rx); + + mutex_lock(&priv->mtx); + at76_submit_rx_urb(priv); + mutex_unlock(&priv->mtx); +} + +static void at76_rx_tasklet(unsigned long param) +{ + struct urb *urb = (struct urb *)param; + struct at76_priv *priv = urb->context; + struct at76_rx_buffer *buf; + struct ieee80211_rx_status rx_status = { 0 }; + + if (priv->device_unplugged) { + at76_dbg(DBG_DEVSTART, "device unplugged"); + if (urb) + at76_dbg(DBG_DEVSTART, "urb status %d", urb->status); + return; + } + + if (!priv->rx_skb || !priv->rx_skb->data) + return; + + buf = (struct at76_rx_buffer *)priv->rx_skb->data; + + if (urb->status != 0) { + if (urb->status != -ENOENT && urb->status != -ECONNRESET) + at76_dbg(DBG_URB, + "%s %s: - nonzero Rx bulk status received: %d", + __func__, wiphy_name(priv->hw->wiphy), + urb->status); + return; + } + + at76_dbg(DBG_RX_ATMEL_HDR, + "%s: rx frame: rate %d rssi %d noise %d link %d", + wiphy_name(priv->hw->wiphy), buf->rx_rate, buf->rssi, + buf->noise_level, buf->link_quality); + + skb_pull(priv->rx_skb, AT76_RX_HDRLEN); + skb_trim(priv->rx_skb, le16_to_cpu(buf->wlength)); + at76_dbg_dump(DBG_RX_DATA, priv->rx_skb->data, + priv->rx_skb->len, "RX: len=%d", priv->rx_skb->len); + + rx_status.signal = buf->rssi; + rx_status.flag |= RX_FLAG_DECRYPTED; + rx_status.flag |= RX_FLAG_IV_STRIPPED; + + at76_dbg(DBG_MAC80211, "calling ieee80211_rx_irqsafe(): %d/%d", + priv->rx_skb->len, priv->rx_skb->data_len); + memcpy(IEEE80211_SKB_RXCB(priv->rx_skb), &rx_status, sizeof(rx_status)); + ieee80211_rx_irqsafe(priv->hw, priv->rx_skb); + + /* Use a new skb for the next receive */ + priv->rx_skb = NULL; + + at76_submit_rx_urb(priv); +} + +/* Load firmware into kernel memory and parse it */ +static struct fwentry *at76_load_firmware(struct usb_device *udev, + enum board_type board_type) +{ + int ret; + char *str; + struct at76_fw_header *fwh; + struct fwentry *fwe = &firmwares[board_type]; + + mutex_lock(&fw_mutex); + + if (fwe->loaded) { + at76_dbg(DBG_FW, "re-using previously loaded fw"); + goto exit; + } + + at76_dbg(DBG_FW, "downloading firmware %s", fwe->fwname); + ret = request_firmware(&fwe->fw, fwe->fwname, &udev->dev); + if (ret < 0) { + dev_printk(KERN_ERR, &udev->dev, "firmware %s not found!\n", + fwe->fwname); + dev_printk(KERN_ERR, &udev->dev, + "you may need to download the firmware from " + "http://developer.berlios.de/projects/at76c503a/\n"); + goto exit; + } + + at76_dbg(DBG_FW, "got it."); + fwh = (struct at76_fw_header *)(fwe->fw->data); + + if (fwe->fw->size <= sizeof(*fwh)) { + dev_printk(KERN_ERR, &udev->dev, + "firmware is too short (0x%zx)\n", fwe->fw->size); + goto exit; + } + + /* CRC currently not checked */ + fwe->board_type = le32_to_cpu(fwh->board_type); + if (fwe->board_type != board_type) { + dev_printk(KERN_ERR, &udev->dev, + "board type mismatch, requested %u, got %u\n", + board_type, fwe->board_type); + goto exit; + } + + fwe->fw_version.major = fwh->major; + fwe->fw_version.minor = fwh->minor; + fwe->fw_version.patch = fwh->patch; + fwe->fw_version.build = fwh->build; + + str = (char *)fwh + le32_to_cpu(fwh->str_offset); + fwe->intfw = (u8 *)fwh + le32_to_cpu(fwh->int_fw_offset); + fwe->intfw_size = le32_to_cpu(fwh->int_fw_len); + fwe->extfw = (u8 *)fwh + le32_to_cpu(fwh->ext_fw_offset); + fwe->extfw_size = le32_to_cpu(fwh->ext_fw_len); + + fwe->loaded = 1; + + dev_printk(KERN_DEBUG, &udev->dev, + "using firmware %s (version %d.%d.%d-%d)\n", + fwe->fwname, fwh->major, fwh->minor, fwh->patch, fwh->build); + + at76_dbg(DBG_DEVSTART, "board %u, int %d:%d, ext %d:%d", board_type, + le32_to_cpu(fwh->int_fw_offset), le32_to_cpu(fwh->int_fw_len), + le32_to_cpu(fwh->ext_fw_offset), le32_to_cpu(fwh->ext_fw_len)); + at76_dbg(DBG_DEVSTART, "firmware id %s", str); + +exit: + mutex_unlock(&fw_mutex); + + if (fwe->loaded) + return fwe; + else + return NULL; +} + +static void at76_mac80211_tx_callback(struct urb *urb) +{ + struct at76_priv *priv = urb->context; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(priv->tx_skb); + + at76_dbg(DBG_MAC80211, "%s()", __func__); + + switch (urb->status) { + case 0: + /* success */ + info->flags |= IEEE80211_TX_STAT_ACK; + break; + case -ENOENT: + case -ECONNRESET: + /* fail, urb has been unlinked */ + /* FIXME: add error message */ + break; + default: + at76_dbg(DBG_URB, "%s - nonzero tx status received: %d", + __func__, urb->status); + break; + } + + memset(&info->status, 0, sizeof(info->status)); + + ieee80211_tx_status_irqsafe(priv->hw, priv->tx_skb); + + priv->tx_skb = NULL; + + ieee80211_wake_queues(priv->hw); +} + +static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct at76_priv *priv = hw->priv; + struct at76_tx_buffer *tx_buffer = priv->bulk_out_buffer; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + int padding, submit_len, ret; + + at76_dbg(DBG_MAC80211, "%s()", __func__); + + if (priv->tx_urb->status == -EINPROGRESS) { + printk(KERN_ERR "%s: %s called while tx urb is pending\n", + wiphy_name(priv->hw->wiphy), __func__); + return NETDEV_TX_BUSY; + } + + ieee80211_stop_queues(hw); + + at76_ledtrig_tx_activity(); /* tell ledtrigger we send a packet */ + + WARN_ON(priv->tx_skb != NULL); + + priv->tx_skb = skb; + padding = at76_calc_padding(skb->len); + submit_len = AT76_TX_HDRLEN + skb->len + padding; + + /* setup 'Atmel' header */ + memset(tx_buffer, 0, sizeof(*tx_buffer)); + tx_buffer->padding = padding; + tx_buffer->wlength = cpu_to_le16(skb->len); + tx_buffer->tx_rate = ieee80211_get_tx_rate(hw, info)->hw_value; + memset(tx_buffer->reserved, 0, sizeof(tx_buffer->reserved)); + memcpy(tx_buffer->packet, skb->data, skb->len); + + at76_dbg(DBG_TX_DATA, "%s tx: wlen 0x%x pad 0x%x rate %d hdr", + wiphy_name(priv->hw->wiphy), le16_to_cpu(tx_buffer->wlength), + tx_buffer->padding, tx_buffer->tx_rate); + + /* send stuff */ + at76_dbg_dump(DBG_TX_DATA_CONTENT, tx_buffer, submit_len, + "%s(): tx_buffer %d bytes:", __func__, submit_len); + usb_fill_bulk_urb(priv->tx_urb, priv->udev, priv->tx_pipe, tx_buffer, + submit_len, at76_mac80211_tx_callback, priv); + ret = usb_submit_urb(priv->tx_urb, GFP_ATOMIC); + if (ret) { + printk(KERN_ERR "%s: error in tx submit urb: %d\n", + wiphy_name(priv->hw->wiphy), ret); + if (ret == -EINVAL) + printk(KERN_ERR + "%s: -EINVAL: tx urb %p hcpriv %p complete %p\n", + wiphy_name(priv->hw->wiphy), priv->tx_urb, + priv->tx_urb->hcpriv, priv->tx_urb->complete); + } + + return 0; +} + +static int at76_mac80211_start(struct ieee80211_hw *hw) +{ + struct at76_priv *priv = hw->priv; + int ret; + + at76_dbg(DBG_MAC80211, "%s()", __func__); + + mutex_lock(&priv->mtx); + + ret = at76_submit_rx_urb(priv); + if (ret < 0) { + printk(KERN_ERR "%s: open: submit_rx_urb failed: %d\n", + wiphy_name(priv->hw->wiphy), ret); + goto error; + } + + at76_startup_device(priv); + + at76_start_monitor(priv); + +error: + mutex_unlock(&priv->mtx); + + return 0; +} + +static void at76_mac80211_stop(struct ieee80211_hw *hw) +{ + struct at76_priv *priv = hw->priv; + + at76_dbg(DBG_MAC80211, "%s()", __func__); + + cancel_delayed_work(&priv->dwork_hw_scan); + cancel_work_sync(&priv->work_set_promisc); + + mutex_lock(&priv->mtx); + + if (!priv->device_unplugged) { + /* We are called by "ifconfig ethX down", not because the + * device is not available anymore. */ + at76_set_radio(priv, 0); + + /* We unlink rx_urb because at76_open() re-submits it. + * If unplugged, at76_delete_device() takes care of it. */ + usb_kill_urb(priv->rx_urb); + } + + mutex_unlock(&priv->mtx); +} + +static int at76_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct at76_priv *priv = hw->priv; + int ret = 0; + + at76_dbg(DBG_MAC80211, "%s()", __func__); + + mutex_lock(&priv->mtx); + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + priv->iw_mode = IW_MODE_INFRA; + break; + default: + ret = -EOPNOTSUPP; + goto exit; + } + +exit: + mutex_unlock(&priv->mtx); + + return ret; +} + +static void at76_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + at76_dbg(DBG_MAC80211, "%s()", __func__); +} + +static int at76_join(struct at76_priv *priv) +{ + struct at76_req_join join; + int ret; + + memset(&join, 0, sizeof(struct at76_req_join)); + memcpy(join.essid, priv->essid, priv->essid_size); + join.essid_size = priv->essid_size; + memcpy(join.bssid, priv->bssid, ETH_ALEN); + join.bss_type = INFRASTRUCTURE_MODE; + join.channel = priv->channel; + join.timeout = cpu_to_le16(2000); + + at76_dbg(DBG_MAC80211, "%s: sending CMD_JOIN", __func__); + ret = at76_set_card_command(priv->udev, CMD_JOIN, &join, + sizeof(struct at76_req_join)); + + if (ret < 0) { + printk(KERN_ERR "%s: at76_set_card_command failed: %d\n", + wiphy_name(priv->hw->wiphy), ret); + return 0; + } + + ret = at76_wait_completion(priv, CMD_JOIN); + at76_dbg(DBG_MAC80211, "%s: CMD_JOIN returned: 0x%02x", __func__, ret); + if (ret != CMD_STATUS_COMPLETE) { + printk(KERN_ERR "%s: at76_wait_completion failed: %d\n", + wiphy_name(priv->hw->wiphy), ret); + return 0; + } + + at76_set_pm_mode(priv); + + return 0; +} + +static void at76_dwork_hw_scan(struct work_struct *work) +{ + struct at76_priv *priv = container_of(work, struct at76_priv, + dwork_hw_scan.work); + int ret; + + if (priv->device_unplugged) + return; + + mutex_lock(&priv->mtx); + + ret = at76_get_cmd_status(priv->udev, CMD_SCAN); + at76_dbg(DBG_MAC80211, "%s: CMD_SCAN status 0x%02x", __func__, ret); + + /* FIXME: add maximum time for scan to complete */ + + if (ret != CMD_STATUS_COMPLETE) { + ieee80211_queue_delayed_work(priv->hw, &priv->dwork_hw_scan, + SCAN_POLL_INTERVAL); + mutex_unlock(&priv->mtx); + return; + } + + if (is_valid_ether_addr(priv->bssid)) + at76_join(priv); + + mutex_unlock(&priv->mtx); + + ieee80211_scan_completed(priv->hw, false); + + ieee80211_wake_queues(priv->hw); +} + +static int at76_hw_scan(struct ieee80211_hw *hw, + struct cfg80211_scan_request *req) +{ + struct at76_priv *priv = hw->priv; + struct at76_req_scan scan; + u8 *ssid = NULL; + int ret, len = 0; + + at76_dbg(DBG_MAC80211, "%s():", __func__); + + if (priv->device_unplugged) + return 0; + + mutex_lock(&priv->mtx); + + ieee80211_stop_queues(hw); + + memset(&scan, 0, sizeof(struct at76_req_scan)); + memset(scan.bssid, 0xFF, ETH_ALEN); + + if (req->n_ssids) { + scan.scan_type = SCAN_TYPE_ACTIVE; + ssid = req->ssids[0].ssid; + len = req->ssids[0].ssid_len; + } else { + scan.scan_type = SCAN_TYPE_PASSIVE; + } + + if (len) { + memcpy(scan.essid, ssid, len); + scan.essid_size = len; + } + + scan.min_channel_time = cpu_to_le16(priv->scan_min_time); + scan.max_channel_time = cpu_to_le16(priv->scan_max_time); + scan.probe_delay = cpu_to_le16(priv->scan_min_time * 1000); + scan.international_scan = 0; + + at76_dbg(DBG_MAC80211, "%s: sending CMD_SCAN", __func__); + ret = at76_set_card_command(priv->udev, CMD_SCAN, &scan, sizeof(scan)); + + if (ret < 0) { + err("CMD_SCAN failed: %d", ret); + goto exit; + } + + ieee80211_queue_delayed_work(priv->hw, &priv->dwork_hw_scan, + SCAN_POLL_INTERVAL); + +exit: + mutex_unlock(&priv->mtx); + + return 0; +} + +static int at76_config(struct ieee80211_hw *hw, u32 changed) +{ + struct at76_priv *priv = hw->priv; + + at76_dbg(DBG_MAC80211, "%s(): channel %d", + __func__, hw->conf.channel->hw_value); + at76_dbg_dump(DBG_MAC80211, priv->bssid, ETH_ALEN, "bssid:"); + + mutex_lock(&priv->mtx); + + priv->channel = hw->conf.channel->hw_value; + + if (is_valid_ether_addr(priv->bssid)) + at76_join(priv); + else + at76_start_monitor(priv); + + mutex_unlock(&priv->mtx); + + return 0; +} + +static void at76_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *conf, + u32 changed) +{ + struct at76_priv *priv = hw->priv; + + at76_dbg(DBG_MAC80211, "%s():", __func__); + + if (!(changed & BSS_CHANGED_BSSID)) + return; + + at76_dbg_dump(DBG_MAC80211, conf->bssid, ETH_ALEN, "bssid:"); + + mutex_lock(&priv->mtx); + + memcpy(priv->bssid, conf->bssid, ETH_ALEN); + + if (is_valid_ether_addr(priv->bssid)) + /* mac80211 is joining a bss */ + at76_join(priv); + + mutex_unlock(&priv->mtx); +} + +/* must be atomic */ +static void at76_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, u64 multicast) +{ + struct at76_priv *priv = hw->priv; + int flags; + + at76_dbg(DBG_MAC80211, "%s(): changed_flags=0x%08x " + "total_flags=0x%08x", + __func__, changed_flags, *total_flags); + + flags = changed_flags & AT76_SUPPORTED_FILTERS; + *total_flags = AT76_SUPPORTED_FILTERS; + + /* Bail out after updating flags to prevent a WARN_ON in mac80211. */ + if (priv->device_unplugged) + return; + + /* FIXME: access to priv->promisc should be protected with + * priv->mtx, but it's impossible because this function needs to be + * atomic */ + + if (flags && !priv->promisc) { + /* mac80211 wants us to enable promiscuous mode */ + priv->promisc = 1; + } else if (!flags && priv->promisc) { + /* we need to disable promiscuous mode */ + priv->promisc = 0; + } else + return; + + ieee80211_queue_work(hw, &priv->work_set_promisc); +} + +static int at76_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct at76_priv *priv = hw->priv; + + int i; + + at76_dbg(DBG_MAC80211, "%s(): cmd %d key->alg %d key->keyidx %d " + "key->keylen %d", + __func__, cmd, key->alg, key->keyidx, key->keylen); + + if (key->alg != ALG_WEP) + return -EOPNOTSUPP; + + key->hw_key_idx = key->keyidx; + + mutex_lock(&priv->mtx); + + switch (cmd) { + case SET_KEY: + memcpy(priv->wep_keys[key->keyidx], key->key, key->keylen); + priv->wep_keys_len[key->keyidx] = key->keylen; + + /* FIXME: find out how to do this properly */ + priv->wep_key_id = key->keyidx; + + break; + case DISABLE_KEY: + default: + priv->wep_keys_len[key->keyidx] = 0; + break; + } + + priv->wep_enabled = 0; + + for (i = 0; i < WEP_KEYS; i++) { + if (priv->wep_keys_len[i] != 0) + priv->wep_enabled = 1; + } + + at76_startup_device(priv); + + mutex_unlock(&priv->mtx); + + return 0; +} + +static const struct ieee80211_ops at76_ops = { + .tx = at76_mac80211_tx, + .add_interface = at76_add_interface, + .remove_interface = at76_remove_interface, + .config = at76_config, + .bss_info_changed = at76_bss_info_changed, + .configure_filter = at76_configure_filter, + .start = at76_mac80211_start, + .stop = at76_mac80211_stop, + .hw_scan = at76_hw_scan, + .set_key = at76_set_key, +}; + +/* Allocate network device and initialize private data */ +static struct at76_priv *at76_alloc_new_device(struct usb_device *udev) +{ + struct ieee80211_hw *hw; + struct at76_priv *priv; + + hw = ieee80211_alloc_hw(sizeof(struct at76_priv), &at76_ops); + if (!hw) { + printk(KERN_ERR DRIVER_NAME ": could not register" + " ieee80211_hw\n"); + return NULL; + } + + priv = hw->priv; + priv->hw = hw; + + priv->udev = udev; + + mutex_init(&priv->mtx); + INIT_WORK(&priv->work_set_promisc, at76_work_set_promisc); + INIT_WORK(&priv->work_submit_rx, at76_work_submit_rx); + INIT_DELAYED_WORK(&priv->dwork_hw_scan, at76_dwork_hw_scan); + + tasklet_init(&priv->rx_tasklet, at76_rx_tasklet, 0); + + priv->pm_mode = AT76_PM_OFF; + priv->pm_period = 0; + + /* unit us */ + priv->hw->channel_change_time = 100000; + + return priv; +} + +static int at76_alloc_urbs(struct at76_priv *priv, + struct usb_interface *interface) +{ + struct usb_endpoint_descriptor *endpoint, *ep_in, *ep_out; + int i; + int buffer_size; + struct usb_host_interface *iface_desc; + + at76_dbg(DBG_PROC_ENTRY, "%s: ENTER", __func__); + + at76_dbg(DBG_URB, "%s: NumEndpoints %d ", __func__, + interface->altsetting[0].desc.bNumEndpoints); + + ep_in = NULL; + ep_out = NULL; + iface_desc = interface->cur_altsetting; + for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) { + endpoint = &iface_desc->endpoint[i].desc; + + at76_dbg(DBG_URB, "%s: %d. endpoint: addr 0x%x attr 0x%x", + __func__, i, endpoint->bEndpointAddress, + endpoint->bmAttributes); + + if (!ep_in && usb_endpoint_is_bulk_in(endpoint)) + ep_in = endpoint; + + if (!ep_out && usb_endpoint_is_bulk_out(endpoint)) + ep_out = endpoint; + } + + if (!ep_in || !ep_out) { + dev_printk(KERN_ERR, &interface->dev, + "bulk endpoints missing\n"); + return -ENXIO; + } + + priv->rx_pipe = usb_rcvbulkpipe(priv->udev, ep_in->bEndpointAddress); + priv->tx_pipe = usb_sndbulkpipe(priv->udev, ep_out->bEndpointAddress); + + priv->rx_urb = usb_alloc_urb(0, GFP_KERNEL); + priv->tx_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!priv->rx_urb || !priv->tx_urb) { + dev_printk(KERN_ERR, &interface->dev, "cannot allocate URB\n"); + return -ENOMEM; + } + + buffer_size = sizeof(struct at76_tx_buffer) + MAX_PADDING_SIZE; + priv->bulk_out_buffer = kmalloc(buffer_size, GFP_KERNEL); + if (!priv->bulk_out_buffer) { + dev_printk(KERN_ERR, &interface->dev, + "cannot allocate output buffer\n"); + return -ENOMEM; + } + + at76_dbg(DBG_PROC_ENTRY, "%s: EXIT", __func__); + + return 0; +} + +static struct ieee80211_rate at76_rates[] = { + { .bitrate = 10, .hw_value = TX_RATE_1MBIT, }, + { .bitrate = 20, .hw_value = TX_RATE_2MBIT, }, + { .bitrate = 55, .hw_value = TX_RATE_5_5MBIT, }, + { .bitrate = 110, .hw_value = TX_RATE_11MBIT, }, +}; + +static struct ieee80211_channel at76_channels[] = { + { .center_freq = 2412, .hw_value = 1 }, + { .center_freq = 2417, .hw_value = 2 }, + { .center_freq = 2422, .hw_value = 3 }, + { .center_freq = 2427, .hw_value = 4 }, + { .center_freq = 2432, .hw_value = 5 }, + { .center_freq = 2437, .hw_value = 6 }, + { .center_freq = 2442, .hw_value = 7 }, + { .center_freq = 2447, .hw_value = 8 }, + { .center_freq = 2452, .hw_value = 9 }, + { .center_freq = 2457, .hw_value = 10 }, + { .center_freq = 2462, .hw_value = 11 }, + { .center_freq = 2467, .hw_value = 12 }, + { .center_freq = 2472, .hw_value = 13 }, + { .center_freq = 2484, .hw_value = 14 } +}; + +static struct ieee80211_supported_band at76_supported_band = { + .channels = at76_channels, + .n_channels = ARRAY_SIZE(at76_channels), + .bitrates = at76_rates, + .n_bitrates = ARRAY_SIZE(at76_rates), +}; + +/* Register network device and initialize the hardware */ +static int at76_init_new_device(struct at76_priv *priv, + struct usb_interface *interface) +{ + struct wiphy *wiphy; + size_t len; + int ret; + + /* set up the endpoint information */ + /* check out the endpoints */ + + at76_dbg(DBG_DEVSTART, "USB interface: %d endpoints", + interface->cur_altsetting->desc.bNumEndpoints); + + ret = at76_alloc_urbs(priv, interface); + if (ret < 0) + goto exit; + + /* MAC address */ + ret = at76_get_hw_config(priv); + if (ret < 0) { + dev_printk(KERN_ERR, &interface->dev, + "cannot get MAC address\n"); + goto exit; + } + + priv->domain = at76_get_reg_domain(priv->regulatory_domain); + + priv->channel = DEF_CHANNEL; + priv->iw_mode = IW_MODE_INFRA; + priv->rts_threshold = DEF_RTS_THRESHOLD; + priv->frag_threshold = DEF_FRAG_THRESHOLD; + priv->short_retry_limit = DEF_SHORT_RETRY_LIMIT; + priv->txrate = TX_RATE_AUTO; + priv->preamble_type = PREAMBLE_TYPE_LONG; + priv->beacon_period = 100; + priv->auth_mode = WLAN_AUTH_OPEN; + priv->scan_min_time = DEF_SCAN_MIN_TIME; + priv->scan_max_time = DEF_SCAN_MAX_TIME; + priv->scan_mode = SCAN_TYPE_ACTIVE; + priv->device_unplugged = 0; + + /* mac80211 initialisation */ + wiphy = priv->hw->wiphy; + priv->hw->wiphy->max_scan_ssids = 1; + priv->hw->wiphy->max_scan_ie_len = 0; + priv->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); + priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &at76_supported_band; + priv->hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | + IEEE80211_HW_SIGNAL_UNSPEC; + priv->hw->max_signal = 100; + + SET_IEEE80211_DEV(priv->hw, &interface->dev); + SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr); + + len = sizeof(wiphy->fw_version); + snprintf(wiphy->fw_version, len, "%d.%d.%d-%d", + priv->fw_version.major, priv->fw_version.minor, + priv->fw_version.patch, priv->fw_version.build); + + wiphy->hw_version = priv->board_type; + + ret = ieee80211_register_hw(priv->hw); + if (ret) { + printk(KERN_ERR "cannot register mac80211 hw (status %d)!\n", + ret); + goto exit; + } + + priv->mac80211_registered = 1; + + printk(KERN_INFO "%s: USB %s, MAC %pM, firmware %d.%d.%d-%d\n", + wiphy_name(priv->hw->wiphy), + dev_name(&interface->dev), priv->mac_addr, + priv->fw_version.major, priv->fw_version.minor, + priv->fw_version.patch, priv->fw_version.build); + printk(KERN_INFO "%s: regulatory domain 0x%02x: %s\n", + wiphy_name(priv->hw->wiphy), + priv->regulatory_domain, priv->domain->name); + +exit: + return ret; +} + +static void at76_delete_device(struct at76_priv *priv) +{ + at76_dbg(DBG_PROC_ENTRY, "%s: ENTER", __func__); + + /* The device is gone, don't bother turning it off */ + priv->device_unplugged = 1; + + tasklet_kill(&priv->rx_tasklet); + + if (priv->mac80211_registered) + ieee80211_unregister_hw(priv->hw); + + if (priv->tx_urb) { + usb_kill_urb(priv->tx_urb); + usb_free_urb(priv->tx_urb); + } + if (priv->rx_urb) { + usb_kill_urb(priv->rx_urb); + usb_free_urb(priv->rx_urb); + } + + at76_dbg(DBG_PROC_ENTRY, "%s: unlinked urbs", __func__); + + kfree(priv->bulk_out_buffer); + + del_timer_sync(&ledtrig_tx_timer); + + kfree_skb(priv->rx_skb); + + usb_put_dev(priv->udev); + + at76_dbg(DBG_PROC_ENTRY, "%s: before freeing priv/ieee80211_hw", + __func__); + ieee80211_free_hw(priv->hw); + + at76_dbg(DBG_PROC_ENTRY, "%s: EXIT", __func__); +} + +static int at76_probe(struct usb_interface *interface, + const struct usb_device_id *id) +{ + int ret; + struct at76_priv *priv; + struct fwentry *fwe; + struct usb_device *udev; + int op_mode; + int need_ext_fw = 0; + struct mib_fw_version fwv; + int board_type = (int)id->driver_info; + + udev = usb_get_dev(interface_to_usbdev(interface)); + + /* Load firmware into kernel memory */ + fwe = at76_load_firmware(udev, board_type); + if (!fwe) { + ret = -ENOENT; + goto error; + } + + op_mode = at76_get_op_mode(udev); + + at76_dbg(DBG_DEVSTART, "opmode %d", op_mode); + + /* we get OPMODE_NONE with 2.4.23, SMC2662W-AR ??? + we get 204 with 2.4.23, Fiberline FL-WL240u (505A+RFMD2958) ??? */ + + if (op_mode == OPMODE_HW_CONFIG_MODE) { + dev_printk(KERN_ERR, &interface->dev, + "cannot handle a device in HW_CONFIG_MODE\n"); + ret = -EBUSY; + goto error; + } + + if (op_mode != OPMODE_NORMAL_NIC_WITH_FLASH + && op_mode != OPMODE_NORMAL_NIC_WITHOUT_FLASH) { + /* download internal firmware part */ + dev_printk(KERN_DEBUG, &interface->dev, + "downloading internal firmware\n"); + ret = at76_load_internal_fw(udev, fwe); + if (ret < 0) { + dev_printk(KERN_ERR, &interface->dev, + "error %d downloading internal firmware\n", + ret); + goto error; + } + usb_put_dev(udev); + return ret; + } + + /* Internal firmware already inside the device. Get firmware + * version to test if external firmware is loaded. + * This works only for newer firmware, e.g. the Intersil 0.90.x + * says "control timeout on ep0in" and subsequent + * at76_get_op_mode() fail too :-( */ + + /* if version >= 0.100.x.y or device with built-in flash we can + * query the device for the fw version */ + if ((fwe->fw_version.major > 0 || fwe->fw_version.minor >= 100) + || (op_mode == OPMODE_NORMAL_NIC_WITH_FLASH)) { + ret = at76_get_mib(udev, MIB_FW_VERSION, &fwv, sizeof(fwv)); + if (ret < 0 || (fwv.major | fwv.minor) == 0) + need_ext_fw = 1; + } else + /* No way to check firmware version, reload to be sure */ + need_ext_fw = 1; + + if (need_ext_fw) { + dev_printk(KERN_DEBUG, &interface->dev, + "downloading external firmware\n"); + + ret = at76_load_external_fw(udev, fwe); + if (ret) + goto error; + + /* Re-check firmware version */ + ret = at76_get_mib(udev, MIB_FW_VERSION, &fwv, sizeof(fwv)); + if (ret < 0) { + dev_printk(KERN_ERR, &interface->dev, + "error %d getting firmware version\n", ret); + goto error; + } + } + + priv = at76_alloc_new_device(udev); + if (!priv) { + ret = -ENOMEM; + goto error; + } + + usb_set_intfdata(interface, priv); + + memcpy(&priv->fw_version, &fwv, sizeof(struct mib_fw_version)); + priv->board_type = board_type; + + ret = at76_init_new_device(priv, interface); + if (ret < 0) + at76_delete_device(priv); + + return ret; + +error: + usb_put_dev(udev); + return ret; +} + +static void at76_disconnect(struct usb_interface *interface) +{ + struct at76_priv *priv; + + priv = usb_get_intfdata(interface); + usb_set_intfdata(interface, NULL); + + /* Disconnect after loading internal firmware */ + if (!priv) + return; + + printk(KERN_INFO "%s: disconnecting\n", wiphy_name(priv->hw->wiphy)); + at76_delete_device(priv); + dev_printk(KERN_INFO, &interface->dev, "disconnected\n"); +} + +/* Structure for registering this driver with the USB subsystem */ +static struct usb_driver at76_driver = { + .name = DRIVER_NAME, + .probe = at76_probe, + .disconnect = at76_disconnect, + .id_table = dev_table, +}; + +static int __init at76_mod_init(void) +{ + int result; + + printk(KERN_INFO DRIVER_DESC " " DRIVER_VERSION " loading\n"); + + mutex_init(&fw_mutex); + + /* register this driver with the USB subsystem */ + result = usb_register(&at76_driver); + if (result < 0) + printk(KERN_ERR DRIVER_NAME + ": usb_register failed (status %d)\n", result); + + led_trigger_register_simple("at76_usb-tx", &ledtrig_tx); + return result; +} + +static void __exit at76_mod_exit(void) +{ + int i; + + printk(KERN_INFO DRIVER_DESC " " DRIVER_VERSION " unloading\n"); + usb_deregister(&at76_driver); + for (i = 0; i < ARRAY_SIZE(firmwares); i++) { + if (firmwares[i].fw) + release_firmware(firmwares[i].fw); + } + led_trigger_unregister_simple(ledtrig_tx); +} + +module_param_named(debug, at76_debug, uint, 0600); +MODULE_PARM_DESC(debug, "Debugging level"); + +module_init(at76_mod_init); +module_exit(at76_mod_exit); + +MODULE_AUTHOR("Oliver Kurth "); +MODULE_AUTHOR("Joerg Albert "); +MODULE_AUTHOR("Alex "); +MODULE_AUTHOR("Nick Jones"); +MODULE_AUTHOR("Balint Seeber "); +MODULE_AUTHOR("Pavel Roskin "); +MODULE_AUTHOR("Guido Guenther "); +MODULE_AUTHOR("Kalle Valo "); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/at76c50x-usb.h b/drivers/net/wireless/at76c50x-usb.h new file mode 100644 index 0000000..1ec5ccf --- /dev/null +++ b/drivers/net/wireless/at76c50x-usb.h @@ -0,0 +1,463 @@ +/* + * Copyright (c) 2002,2003 Oliver Kurth + * (c) 2003,2004 Joerg Albert + * (c) 2007 Guido Guenther + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This driver was based on information from the Sourceforge driver + * released and maintained by Atmel: + * + * http://sourceforge.net/projects/atmelwlandriver/ + * + * Although the code was completely re-written, + * it would have been impossible without Atmel's decision to + * release an Open Source driver (unfortunately the firmware was + * kept binary only). Thanks for that decision to Atmel! + */ + +#ifndef _AT76_USB_H +#define _AT76_USB_H + +/* Board types */ +enum board_type { + BOARD_503_ISL3861 = 1, + BOARD_503_ISL3863 = 2, + BOARD_503 = 3, + BOARD_503_ACC = 4, + BOARD_505 = 5, + BOARD_505_2958 = 6, + BOARD_505A = 7, + BOARD_505AMX = 8 +}; + +#define CMD_STATUS_IDLE 0x00 +#define CMD_STATUS_COMPLETE 0x01 +#define CMD_STATUS_UNKNOWN 0x02 +#define CMD_STATUS_INVALID_PARAMETER 0x03 +#define CMD_STATUS_FUNCTION_NOT_SUPPORTED 0x04 +#define CMD_STATUS_TIME_OUT 0x07 +#define CMD_STATUS_IN_PROGRESS 0x08 +#define CMD_STATUS_HOST_FAILURE 0xff +#define CMD_STATUS_SCAN_FAILED 0xf0 + +/* answers to get op mode */ +#define OPMODE_NONE 0x00 +#define OPMODE_NORMAL_NIC_WITH_FLASH 0x01 +#define OPMODE_HW_CONFIG_MODE 0x02 +#define OPMODE_DFU_MODE_WITH_FLASH 0x03 +#define OPMODE_NORMAL_NIC_WITHOUT_FLASH 0x04 + +#define CMD_SET_MIB 0x01 +#define CMD_GET_MIB 0x02 +#define CMD_SCAN 0x03 +#define CMD_JOIN 0x04 +#define CMD_START_IBSS 0x05 +#define CMD_RADIO_ON 0x06 +#define CMD_RADIO_OFF 0x07 +#define CMD_STARTUP 0x0B + +#define MIB_LOCAL 0x01 +#define MIB_MAC_ADDR 0x02 +#define MIB_MAC 0x03 +#define MIB_MAC_MGMT 0x05 +#define MIB_MAC_WEP 0x06 +#define MIB_PHY 0x07 +#define MIB_FW_VERSION 0x08 +#define MIB_MDOMAIN 0x09 + +#define ADHOC_MODE 1 +#define INFRASTRUCTURE_MODE 2 + +/* values for struct mib_local, field preamble_type */ +#define PREAMBLE_TYPE_LONG 0 +#define PREAMBLE_TYPE_SHORT 1 +#define PREAMBLE_TYPE_AUTO 2 + +/* values for tx_rate */ +#define TX_RATE_1MBIT 0 +#define TX_RATE_2MBIT 1 +#define TX_RATE_5_5MBIT 2 +#define TX_RATE_11MBIT 3 +#define TX_RATE_AUTO 4 + +/* power management modes */ +#define AT76_PM_OFF 1 +#define AT76_PM_ON 2 +#define AT76_PM_SMART 3 + +struct hwcfg_r505 { + u8 cr39_values[14]; + u8 reserved1[14]; + u8 bb_cr[14]; + u8 pidvid[4]; + u8 mac_addr[ETH_ALEN]; + u8 regulatory_domain; + u8 reserved2[14]; + u8 cr15_values[14]; + u8 reserved3[3]; +} __attribute__((packed)); + +struct hwcfg_rfmd { + u8 cr20_values[14]; + u8 cr21_values[14]; + u8 bb_cr[14]; + u8 pidvid[4]; + u8 mac_addr[ETH_ALEN]; + u8 regulatory_domain; + u8 low_power_values[14]; + u8 normal_power_values[14]; + u8 reserved1[3]; +} __attribute__((packed)); + +struct hwcfg_intersil { + u8 mac_addr[ETH_ALEN]; + u8 cr31_values[14]; + u8 cr58_values[14]; + u8 pidvid[4]; + u8 regulatory_domain; + u8 reserved[1]; +} __attribute__((packed)); + +union at76_hwcfg { + struct hwcfg_intersil i; + struct hwcfg_rfmd r3; + struct hwcfg_r505 r5; +}; + +#define WEP_SMALL_KEY_LEN (40 / 8) +#define WEP_LARGE_KEY_LEN (104 / 8) +#define WEP_KEYS (4) + +struct at76_card_config { + u8 exclude_unencrypted; + u8 promiscuous_mode; + u8 short_retry_limit; + u8 encryption_type; + __le16 rts_threshold; + __le16 fragmentation_threshold; /* 256..2346 */ + u8 basic_rate_set[4]; + u8 auto_rate_fallback; /* 0,1 */ + u8 channel; + u8 privacy_invoked; + u8 wep_default_key_id; /* 0..3 */ + u8 current_ssid[32]; + u8 wep_default_key_value[4][WEP_LARGE_KEY_LEN]; + u8 ssid_len; + u8 short_preamble; + __le16 beacon_period; +} __attribute__((packed)); + +struct at76_command { + u8 cmd; + u8 reserved; + __le16 size; + u8 data[0]; +} __attribute__((packed)); + +/* Length of Atmel-specific Rx header before 802.11 frame */ +#define AT76_RX_HDRLEN offsetof(struct at76_rx_buffer, packet) + +struct at76_rx_buffer { + __le16 wlength; + u8 rx_rate; + u8 newbss; + u8 fragmentation; + u8 rssi; + u8 link_quality; + u8 noise_level; + __le32 rx_time; + u8 packet[IEEE80211_MAX_FRAG_THRESHOLD]; +} __attribute__((packed)); + +/* Length of Atmel-specific Tx header before 802.11 frame */ +#define AT76_TX_HDRLEN offsetof(struct at76_tx_buffer, packet) + +struct at76_tx_buffer { + __le16 wlength; + u8 tx_rate; + u8 padding; + u8 reserved[4]; + u8 packet[IEEE80211_MAX_FRAG_THRESHOLD]; +} __attribute__((packed)); + +/* defines for scan_type below */ +#define SCAN_TYPE_ACTIVE 0 +#define SCAN_TYPE_PASSIVE 1 + +struct at76_req_scan { + u8 bssid[ETH_ALEN]; + u8 essid[32]; + u8 scan_type; + u8 channel; + __le16 probe_delay; + __le16 min_channel_time; + __le16 max_channel_time; + u8 essid_size; + u8 international_scan; +} __attribute__((packed)); + +struct at76_req_ibss { + u8 bssid[ETH_ALEN]; + u8 essid[32]; + u8 bss_type; + u8 channel; + u8 essid_size; + u8 reserved[3]; +} __attribute__((packed)); + +struct at76_req_join { + u8 bssid[ETH_ALEN]; + u8 essid[32]; + u8 bss_type; + u8 channel; + __le16 timeout; + u8 essid_size; + u8 reserved; +} __attribute__((packed)); + +struct set_mib_buffer { + u8 type; + u8 size; + u8 index; + u8 reserved; + union { + u8 byte; + __le16 word; + u8 addr[ETH_ALEN]; + } data; +} __attribute__((packed)); + +struct mib_local { + u16 reserved0; + u8 beacon_enable; + u8 txautorate_fallback; + u8 reserved1; + u8 ssid_size; + u8 promiscuous_mode; + u16 reserved2; + u8 preamble_type; + u16 reserved3; +} __attribute__((packed)); + +struct mib_mac_addr { + u8 mac_addr[ETH_ALEN]; + u8 res[2]; /* ??? */ + u8 group_addr[4][ETH_ALEN]; + u8 group_addr_status[4]; +} __attribute__((packed)); + +struct mib_mac { + __le32 max_tx_msdu_lifetime; + __le32 max_rx_lifetime; + __le16 frag_threshold; + __le16 rts_threshold; + __le16 cwmin; + __le16 cwmax; + u8 short_retry_time; + u8 long_retry_time; + u8 scan_type; /* active or passive */ + u8 scan_channel; + __le16 probe_delay; /* delay before ProbeReq in active scan, RO */ + __le16 min_channel_time; + __le16 max_channel_time; + __le16 listen_interval; + u8 desired_ssid[32]; + u8 desired_bssid[ETH_ALEN]; + u8 desired_bsstype; /* ad-hoc or infrastructure */ + u8 reserved2; +} __attribute__((packed)); + +struct mib_mac_mgmt { + __le16 beacon_period; + __le16 CFP_max_duration; + __le16 medium_occupancy_limit; + __le16 station_id; /* assoc id */ + __le16 ATIM_window; + u8 CFP_mode; + u8 privacy_option_implemented; + u8 DTIM_period; + u8 CFP_period; + u8 current_bssid[ETH_ALEN]; + u8 current_essid[32]; + u8 current_bss_type; + u8 power_mgmt_mode; + /* rfmd and 505 */ + u8 ibss_change; + u8 res; + u8 multi_domain_capability_implemented; + u8 multi_domain_capability_enabled; + u8 country_string[3]; + u8 reserved[3]; +} __attribute__((packed)); + +struct mib_mac_wep { + u8 privacy_invoked; /* 0 disable encr., 1 enable encr */ + u8 wep_default_key_id; + u8 wep_key_mapping_len; + u8 exclude_unencrypted; + __le32 wep_icv_error_count; + __le32 wep_excluded_count; + u8 wep_default_keyvalue[WEP_KEYS][WEP_LARGE_KEY_LEN]; + u8 encryption_level; /* 1 for 40bit, 2 for 104bit encryption */ +} __attribute__((packed)); + +struct mib_phy { + __le32 ed_threshold; + + __le16 slot_time; + __le16 sifs_time; + __le16 preamble_length; + __le16 plcp_header_length; + __le16 mpdu_max_length; + __le16 cca_mode_supported; + + u8 operation_rate_set[4]; + u8 channel_id; + u8 current_cca_mode; + u8 phy_type; + u8 current_reg_domain; +} __attribute__((packed)); + +struct mib_fw_version { + u8 major; + u8 minor; + u8 patch; + u8 build; +} __attribute__((packed)); + +struct mib_mdomain { + u8 tx_powerlevel[14]; + u8 channel_list[14]; /* 0 for invalid channels */ +} __attribute__((packed)); + +struct at76_fw_header { + __le32 crc; /* CRC32 of the whole image */ + __le32 board_type; /* firmware compatibility code */ + u8 build; /* firmware build number */ + u8 patch; /* firmware patch level */ + u8 minor; /* firmware minor version */ + u8 major; /* firmware major version */ + __le32 str_offset; /* offset of the copyright string */ + __le32 int_fw_offset; /* internal firmware image offset */ + __le32 int_fw_len; /* internal firmware image length */ + __le32 ext_fw_offset; /* external firmware image offset */ + __le32 ext_fw_len; /* external firmware image length */ +} __attribute__((packed)); + +/* a description of a regulatory domain and the allowed channels */ +struct reg_domain { + u16 code; + char const *name; + u32 channel_map; /* if bit N is set, channel (N+1) is allowed */ +}; + +/* Data for one loaded firmware file */ +struct fwentry { + const char *const fwname; + const struct firmware *fw; + int extfw_size; + int intfw_size; + /* pointer to loaded firmware, no need to free */ + u8 *extfw; /* external firmware, extfw_size bytes long */ + u8 *intfw; /* internal firmware, intfw_size bytes long */ + enum board_type board_type; /* board type */ + struct mib_fw_version fw_version; + int loaded; /* Loaded and parsed successfully */ +}; + +struct at76_priv { + struct usb_device *udev; /* USB device pointer */ + + struct sk_buff *rx_skb; /* skbuff for receiving data */ + struct sk_buff *tx_skb; /* skbuff for transmitting data */ + void *bulk_out_buffer; /* buffer for sending data */ + + struct urb *tx_urb; /* URB for sending data */ + struct urb *rx_urb; /* URB for receiving data */ + + unsigned int tx_pipe; /* bulk out pipe */ + unsigned int rx_pipe; /* bulk in pipe */ + + struct mutex mtx; /* locks this structure */ + + /* work queues */ + struct work_struct work_set_promisc; + struct work_struct work_submit_rx; + struct delayed_work dwork_hw_scan; + + struct tasklet_struct rx_tasklet; + + /* the WEP stuff */ + int wep_enabled; /* 1 if WEP is enabled */ + int wep_key_id; /* key id to be used */ + u8 wep_keys[WEP_KEYS][WEP_LARGE_KEY_LEN]; /* WEP keys */ + u8 wep_keys_len[WEP_KEYS]; /* length of WEP keys */ + + int channel; + int iw_mode; + u8 bssid[ETH_ALEN]; + u8 essid[IW_ESSID_MAX_SIZE]; + int essid_size; + int radio_on; + int promisc; + + int preamble_type; /* 0 - long, 1 - short, 2 - auto */ + int auth_mode; /* authentication type: 0 open, 1 shared key */ + int txrate; /* 0,1,2,3 = 1,2,5.5,11 Mbps, 4 is auto */ + int frag_threshold; /* threshold for fragmentation of tx packets */ + int rts_threshold; /* threshold for RTS mechanism */ + int short_retry_limit; + + int scan_min_time; /* scan min channel time */ + int scan_max_time; /* scan max channel time */ + int scan_mode; /* SCAN_TYPE_ACTIVE, SCAN_TYPE_PASSIVE */ + int scan_need_any; /* if set, need to scan for any ESSID */ + + u16 assoc_id; /* current association ID, if associated */ + + u8 pm_mode; /* power management mode */ + u32 pm_period; /* power management period in microseconds */ + + struct reg_domain const *domain; /* reg domain description */ + + /* These fields contain HW config provided by the device (not all of + * these fields are used by all board types) */ + u8 mac_addr[ETH_ALEN]; + u8 regulatory_domain; + + struct at76_card_config card_config; + + enum board_type board_type; + struct mib_fw_version fw_version; + + unsigned int device_unplugged:1; + unsigned int netdev_registered:1; + struct set_mib_buffer mib_buf; /* global buffer for set_mib calls */ + + int beacon_period; /* period of mgmt beacons, Kus */ + + struct ieee80211_hw *hw; + int mac80211_registered; +}; + +#define AT76_SUPPORTED_FILTERS FIF_PROMISC_IN_BSS + +#define SCAN_POLL_INTERVAL (HZ / 4) + +#define CMD_COMPLETION_TIMEOUT (5 * HZ) + +#define DEF_RTS_THRESHOLD 1536 +#define DEF_FRAG_THRESHOLD 1536 +#define DEF_SHORT_RETRY_LIMIT 8 +#define DEF_CHANNEL 10 +#define DEF_SCAN_MIN_TIME 10 +#define DEF_SCAN_MAX_TIME 120 + +/* the max padding size for tx in bytes (see calc_padding) */ +#define MAX_PADDING_SIZE 53 + +#endif /* _AT76_USB_H */ diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile new file mode 100644 index 0000000..8113a50 --- /dev/null +++ b/drivers/net/wireless/ath/Makefile @@ -0,0 +1,11 @@ +obj-$(CONFIG_ATH5K) += ath5k/ +obj-$(CONFIG_ATH9K_HW) += ath9k/ +obj-$(CONFIG_AR9170_USB) += ar9170/ + +obj-$(CONFIG_ATH_COMMON) += ath.o + +ath-objs := main.o \ + regd.o \ + hw.o + +ath-$(CONFIG_ATH_DEBUG) += debug.o diff --git a/drivers/net/wireless/ath/ar9170/Makefile b/drivers/net/wireless/ath/ar9170/Makefile new file mode 100644 index 0000000..8d91c7e --- /dev/null +++ b/drivers/net/wireless/ath/ar9170/Makefile @@ -0,0 +1,3 @@ +ar9170usb-objs := usb.o main.o cmd.o mac.o phy.o led.o + +obj-$(CONFIG_AR9170_USB) += ar9170usb.o diff --git a/drivers/net/wireless/ath/ar9170/ar9170.h b/drivers/net/wireless/ath/ar9170/ar9170.h new file mode 100644 index 0000000..dc662b7 --- /dev/null +++ b/drivers/net/wireless/ath/ar9170/ar9170.h @@ -0,0 +1,306 @@ +/* + * Atheros AR9170 driver + * + * Driver specific definitions + * + * Copyright 2008, Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, see + * http://www.gnu.org/licenses/. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * Copyright (c) 2007-2008 Atheros Communications, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#ifndef __AR9170_H +#define __AR9170_H + +#include +#include +#include +#include +#ifdef CONFIG_AR9170_LEDS +#include +#endif /* CONFIG_AR9170_LEDS */ +#include "eeprom.h" +#include "hw.h" + +#include "../regd.h" + +#define PAYLOAD_MAX (AR9170_MAX_CMD_LEN/4 - 1) + +enum ar9170_bw { + AR9170_BW_20, + AR9170_BW_40_BELOW, + AR9170_BW_40_ABOVE, + + __AR9170_NUM_BW, +}; + +static inline enum ar9170_bw nl80211_to_ar9170(enum nl80211_channel_type type) +{ + switch (type) { + case NL80211_CHAN_NO_HT: + case NL80211_CHAN_HT20: + return AR9170_BW_20; + case NL80211_CHAN_HT40MINUS: + return AR9170_BW_40_BELOW; + case NL80211_CHAN_HT40PLUS: + return AR9170_BW_40_ABOVE; + default: + BUG(); + } +} + +enum ar9170_rf_init_mode { + AR9170_RFI_NONE, + AR9170_RFI_WARM, + AR9170_RFI_COLD, +}; + +#define AR9170_MAX_RX_BUFFER_SIZE 8192 + +#ifdef CONFIG_AR9170_LEDS +struct ar9170; + +struct ar9170_led { + struct ar9170 *ar; + struct led_classdev l; + char name[32]; + unsigned int toggled; + bool last_state; + bool registered; +}; + +#endif /* CONFIG_AR9170_LEDS */ + +enum ar9170_device_state { + AR9170_UNKNOWN_STATE, + AR9170_STOPPED, + AR9170_IDLE, + AR9170_STARTED, +}; + +struct ar9170_rxstream_mpdu_merge { + struct ar9170_rx_head plcp; + bool has_plcp; +}; + +#define AR9170_NUM_TID 16 +#define WME_BA_BMP_SIZE 64 +#define AR9170_NUM_MAX_AGG_LEN (2 * WME_BA_BMP_SIZE) + +#define WME_AC_BE 2 +#define WME_AC_BK 3 +#define WME_AC_VI 1 +#define WME_AC_VO 0 + +#define TID_TO_WME_AC(_tid) \ + ((((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \ + (((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \ + (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \ + WME_AC_VO) + +#define BAW_WITHIN(_start, _bawsz, _seqno) \ + ((((_seqno) - (_start)) & 0xfff) < (_bawsz)) + +enum ar9170_tid_state { + AR9170_TID_STATE_INVALID, + AR9170_TID_STATE_SHUTDOWN, + AR9170_TID_STATE_PROGRESS, + AR9170_TID_STATE_COMPLETE, +}; + +struct ar9170_sta_tid { + struct list_head list; + struct sk_buff_head queue; + u8 addr[ETH_ALEN]; + u16 ssn; + u16 tid; + enum ar9170_tid_state state; + bool active; +}; + +struct ar9170_tx_queue_stats { + unsigned int len; + unsigned int limit; + unsigned int count; +}; + +#define AR9170_QUEUE_TIMEOUT 64 +#define AR9170_TX_TIMEOUT 8 +#define AR9170_BA_TIMEOUT 4 +#define AR9170_JANITOR_DELAY 128 +#define AR9170_TX_INVALID_RATE 0xffffffff + +#define AR9170_NUM_TX_STATUS 128 +#define AR9170_NUM_TX_AGG_MAX 30 +#define AR9170_NUM_TX_LIMIT_HARD AR9170_TXQ_DEPTH +#define AR9170_NUM_TX_LIMIT_SOFT (AR9170_TXQ_DEPTH - 10) + +struct ar9170 { + struct ieee80211_hw *hw; + struct ath_common common; + struct mutex mutex; + enum ar9170_device_state state; + bool registered; + unsigned long bad_hw_nagger; + + int (*open)(struct ar9170 *); + void (*stop)(struct ar9170 *); + int (*tx)(struct ar9170 *, struct sk_buff *); + int (*exec_cmd)(struct ar9170 *, enum ar9170_cmd, u32 , + void *, u32 , void *); + void (*callback_cmd)(struct ar9170 *, u32 , void *); + int (*flush)(struct ar9170 *); + + /* interface mode settings */ + struct ieee80211_vif *vif; + + /* beaconing */ + struct sk_buff *beacon; + struct work_struct beacon_work; + bool enable_beacon; + + /* cryptographic engine */ + u64 usedkeys; + bool rx_software_decryption; + bool disable_offload; + + /* filter settings */ + u64 cur_mc_hash; + u32 cur_filter; + unsigned int filter_state; + bool sniffer_enabled; + + /* PHY */ + struct ieee80211_channel *channel; + int noise[4]; + + /* power calibration data */ + u8 power_5G_leg[4]; + u8 power_2G_cck[4]; + u8 power_2G_ofdm[4]; + u8 power_5G_ht20[8]; + u8 power_5G_ht40[8]; + u8 power_2G_ht20[8]; + u8 power_2G_ht40[8]; + + u8 phy_heavy_clip; + +#ifdef CONFIG_AR9170_LEDS + struct delayed_work led_work; + struct ar9170_led leds[AR9170_NUM_LEDS]; +#endif /* CONFIG_AR9170_LEDS */ + + /* qos queue settings */ + spinlock_t tx_stats_lock; + struct ar9170_tx_queue_stats tx_stats[5]; + struct ieee80211_tx_queue_params edcf[5]; + + spinlock_t cmdlock; + __le32 cmdbuf[PAYLOAD_MAX + 1]; + + /* MAC statistics */ + struct ieee80211_low_level_stats stats; + + /* EEPROM */ + struct ar9170_eeprom eeprom; + + /* tx queues - as seen by hw - */ + struct sk_buff_head tx_pending[__AR9170_NUM_TXQ]; + struct sk_buff_head tx_status[__AR9170_NUM_TXQ]; + struct delayed_work tx_janitor; + /* tx ampdu */ + struct sk_buff_head tx_status_ampdu; + spinlock_t tx_ampdu_list_lock; + struct list_head tx_ampdu_list; + atomic_t tx_ampdu_pending; + + /* rxstream mpdu merge */ + struct ar9170_rxstream_mpdu_merge rx_mpdu; + struct sk_buff *rx_failover; + int rx_failover_missing; + + /* (cached) HW A-MPDU settings */ + u8 global_ampdu_density; + u8 global_ampdu_factor; +}; + +struct ar9170_sta_info { + struct ar9170_sta_tid agg[AR9170_NUM_TID]; + unsigned int ampdu_max_len; +}; + +struct ar9170_tx_info { + unsigned long timeout; +}; + +#define IS_STARTED(a) (((struct ar9170 *)a)->state >= AR9170_STARTED) +#define IS_ACCEPTING_CMD(a) (((struct ar9170 *)a)->state >= AR9170_IDLE) + +/* exported interface */ +void *ar9170_alloc(size_t priv_size); +int ar9170_register(struct ar9170 *ar, struct device *pdev); +void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb); +void ar9170_unregister(struct ar9170 *ar); +void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb); +void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len); +int ar9170_nag_limiter(struct ar9170 *ar); + +/* MAC */ +int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb); +int ar9170_init_mac(struct ar9170 *ar); +int ar9170_set_qos(struct ar9170 *ar); +int ar9170_update_multicast(struct ar9170 *ar, const u64 mc_hast); +int ar9170_update_frame_filter(struct ar9170 *ar, const u32 filter); +int ar9170_set_operating_mode(struct ar9170 *ar); +int ar9170_set_beacon_timers(struct ar9170 *ar); +int ar9170_set_dyn_sifs_ack(struct ar9170 *ar); +int ar9170_set_slot_time(struct ar9170 *ar); +int ar9170_set_basic_rates(struct ar9170 *ar); +int ar9170_set_hwretry_limit(struct ar9170 *ar, u32 max_retry); +int ar9170_update_beacon(struct ar9170 *ar); +void ar9170_new_beacon(struct work_struct *work); +int ar9170_upload_key(struct ar9170 *ar, u8 id, const u8 *mac, u8 ktype, + u8 keyidx, u8 *keydata, int keylen); +int ar9170_disable_key(struct ar9170 *ar, u8 id); + +/* LEDs */ +#ifdef CONFIG_AR9170_LEDS +int ar9170_register_leds(struct ar9170 *ar); +void ar9170_unregister_leds(struct ar9170 *ar); +#endif /* CONFIG_AR9170_LEDS */ +int ar9170_init_leds(struct ar9170 *ar); +int ar9170_set_leds_state(struct ar9170 *ar, u32 led_state); + +/* PHY / RF */ +int ar9170_init_phy(struct ar9170 *ar, enum ieee80211_band band); +int ar9170_init_rf(struct ar9170 *ar); +int ar9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel, + enum ar9170_rf_init_mode rfi, enum ar9170_bw bw); + +#endif /* __AR9170_H */ diff --git a/drivers/net/wireless/ath/ar9170/cmd.c b/drivers/net/wireless/ath/ar9170/cmd.c new file mode 100644 index 0000000..cf6f5c4 --- /dev/null +++ b/drivers/net/wireless/ath/ar9170/cmd.c @@ -0,0 +1,128 @@ +/* + * Atheros AR9170 driver + * + * Basic HW register/memory/command access functions + * + * Copyright 2008, Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, see + * http://www.gnu.org/licenses/. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * Copyright (c) 2007-2008 Atheros Communications, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "ar9170.h" +#include "cmd.h" + +int ar9170_write_mem(struct ar9170 *ar, const __le32 *data, size_t len) +{ + int err; + + if (unlikely(!IS_ACCEPTING_CMD(ar))) + return 0; + + err = ar->exec_cmd(ar, AR9170_CMD_WMEM, len, (u8 *) data, 0, NULL); + if (err) + printk(KERN_DEBUG "%s: writing memory failed\n", + wiphy_name(ar->hw->wiphy)); + return err; +} + +int ar9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val) +{ + __le32 buf[2] = { + cpu_to_le32(reg), + cpu_to_le32(val), + }; + int err; + + if (unlikely(!IS_ACCEPTING_CMD(ar))) + return 0; + + err = ar->exec_cmd(ar, AR9170_CMD_WREG, sizeof(buf), + (u8 *) buf, 0, NULL); + if (err) + printk(KERN_DEBUG "%s: writing reg %#x (val %#x) failed\n", + wiphy_name(ar->hw->wiphy), reg, val); + return err; +} + +int ar9170_read_mreg(struct ar9170 *ar, int nregs, const u32 *regs, u32 *out) +{ + int i, err; + __le32 *offs, *res; + + if (unlikely(!IS_ACCEPTING_CMD(ar))) + return 0; + + /* abuse "out" for the register offsets, must be same length */ + offs = (__le32 *)out; + for (i = 0; i < nregs; i++) + offs[i] = cpu_to_le32(regs[i]); + + /* also use the same buffer for the input */ + res = (__le32 *)out; + + err = ar->exec_cmd(ar, AR9170_CMD_RREG, + 4 * nregs, (u8 *)offs, + 4 * nregs, (u8 *)res); + if (err) + return err; + + /* convert result to cpu endian */ + for (i = 0; i < nregs; i++) + out[i] = le32_to_cpu(res[i]); + + return 0; +} + +int ar9170_read_reg(struct ar9170 *ar, u32 reg, u32 *val) +{ + return ar9170_read_mreg(ar, 1, ®, val); +} + +int ar9170_echo_test(struct ar9170 *ar, u32 v) +{ + __le32 echobuf = cpu_to_le32(v); + __le32 echores; + int err; + + if (unlikely(!IS_ACCEPTING_CMD(ar))) + return -ENODEV; + + err = ar->exec_cmd(ar, AR9170_CMD_ECHO, + 4, (u8 *)&echobuf, + 4, (u8 *)&echores); + if (err) + return err; + + if (echobuf != echores) + return -EINVAL; + + return 0; +} diff --git a/drivers/net/wireless/ath/ar9170/cmd.h b/drivers/net/wireless/ath/ar9170/cmd.h new file mode 100644 index 0000000..826c45e --- /dev/null +++ b/drivers/net/wireless/ath/ar9170/cmd.h @@ -0,0 +1,92 @@ +/* + * Atheros AR9170 driver + * + * Basic HW register/memory/command access functions + * + * Copyright 2008, Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, see + * http://www.gnu.org/licenses/. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * Copyright (c) 2007-2008 Atheros Communications, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#ifndef __CMD_H +#define __CMD_H + +#include "ar9170.h" + +/* basic HW access */ +int ar9170_write_mem(struct ar9170 *ar, const __le32 *data, size_t len); +int ar9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val); +int ar9170_read_reg(struct ar9170 *ar, u32 reg, u32 *val); +int ar9170_read_mreg(struct ar9170 *ar, int nregs, const u32 *regs, u32 *out); +int ar9170_echo_test(struct ar9170 *ar, u32 v); + +/* + * Macros to facilitate writing multiple registers in a single + * write-combining USB command. Note that when the first group + * fails the whole thing will fail without any others attempted, + * but you won't know which write in the group failed. + */ +#define ar9170_regwrite_begin(ar) \ +do { \ + int __nreg = 0, __err = 0; \ + struct ar9170 *__ar = ar; + +#define ar9170_regwrite(r, v) do { \ + __ar->cmdbuf[2 * __nreg + 1] = cpu_to_le32(r); \ + __ar->cmdbuf[2 * __nreg + 2] = cpu_to_le32(v); \ + __nreg++; \ + if ((__nreg >= PAYLOAD_MAX/2)) { \ + if (IS_ACCEPTING_CMD(__ar)) \ + __err = ar->exec_cmd(__ar, AR9170_CMD_WREG, \ + 8 * __nreg, \ + (u8 *) &__ar->cmdbuf[1], \ + 0, NULL); \ + __nreg = 0; \ + if (__err) \ + goto __regwrite_out; \ + } \ +} while (0) + +#define ar9170_regwrite_finish() \ +__regwrite_out : \ + if (__nreg) { \ + if (IS_ACCEPTING_CMD(__ar)) \ + __err = ar->exec_cmd(__ar, AR9170_CMD_WREG, \ + 8 * __nreg, \ + (u8 *) &__ar->cmdbuf[1], \ + 0, NULL); \ + __nreg = 0; \ + } + +#define ar9170_regwrite_result() \ + __err; \ +} while (0); + +#endif /* __CMD_H */ diff --git a/drivers/net/wireless/ath/ar9170/eeprom.h b/drivers/net/wireless/ath/ar9170/eeprom.h new file mode 100644 index 0000000..d2c8cc8 --- /dev/null +++ b/drivers/net/wireless/ath/ar9170/eeprom.h @@ -0,0 +1,179 @@ +/* + * Atheros AR9170 driver + * + * EEPROM layout + * + * Copyright 2008, Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, see + * http://www.gnu.org/licenses/. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * Copyright (c) 2007-2008 Atheros Communications, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#ifndef __AR9170_EEPROM_H +#define __AR9170_EEPROM_H + +#define AR5416_MAX_CHAINS 2 +#define AR5416_MODAL_SPURS 5 + +struct ar9170_eeprom_modal { + __le32 antCtrlChain[AR5416_MAX_CHAINS]; + __le32 antCtrlCommon; + s8 antennaGainCh[AR5416_MAX_CHAINS]; + u8 switchSettling; + u8 txRxAttenCh[AR5416_MAX_CHAINS]; + u8 rxTxMarginCh[AR5416_MAX_CHAINS]; + s8 adcDesiredSize; + s8 pgaDesiredSize; + u8 xlnaGainCh[AR5416_MAX_CHAINS]; + u8 txEndToXpaOff; + u8 txEndToRxOn; + u8 txFrameToXpaOn; + u8 thresh62; + s8 noiseFloorThreshCh[AR5416_MAX_CHAINS]; + u8 xpdGain; + u8 xpd; + s8 iqCalICh[AR5416_MAX_CHAINS]; + s8 iqCalQCh[AR5416_MAX_CHAINS]; + u8 pdGainOverlap; + u8 ob; + u8 db; + u8 xpaBiasLvl; + u8 pwrDecreaseFor2Chain; + u8 pwrDecreaseFor3Chain; + u8 txFrameToDataStart; + u8 txFrameToPaOn; + u8 ht40PowerIncForPdadc; + u8 bswAtten[AR5416_MAX_CHAINS]; + u8 bswMargin[AR5416_MAX_CHAINS]; + u8 swSettleHt40; + u8 reserved[22]; + struct spur_channel { + __le16 spurChan; + u8 spurRangeLow; + u8 spurRangeHigh; + } __packed spur_channels[AR5416_MODAL_SPURS]; +} __packed; + +#define AR5416_NUM_PD_GAINS 4 +#define AR5416_PD_GAIN_ICEPTS 5 + +struct ar9170_calibration_data_per_freq { + u8 pwr_pdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS]; + u8 vpd_pdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS]; +} __packed; + +#define AR5416_NUM_5G_CAL_PIERS 8 +#define AR5416_NUM_2G_CAL_PIERS 4 + +#define AR5416_NUM_5G_TARGET_PWRS 8 +#define AR5416_NUM_2G_CCK_TARGET_PWRS 3 +#define AR5416_NUM_2G_OFDM_TARGET_PWRS 4 +#define AR5416_MAX_NUM_TGT_PWRS 8 + +struct ar9170_calibration_target_power_legacy { + u8 freq; + u8 power[4]; +} __packed; + +struct ar9170_calibration_target_power_ht { + u8 freq; + u8 power[8]; +} __packed; + +#define AR5416_NUM_CTLS 24 + +struct ar9170_calctl_edges { + u8 channel; +#define AR9170_CALCTL_EDGE_FLAGS 0xC0 + u8 power_flags; +} __packed; + +#define AR5416_NUM_BAND_EDGES 8 + +struct ar9170_calctl_data { + struct ar9170_calctl_edges + control_edges[AR5416_MAX_CHAINS][AR5416_NUM_BAND_EDGES]; +} __packed; + + +struct ar9170_eeprom { + __le16 length; + __le16 checksum; + __le16 version; + u8 operating_flags; +#define AR9170_OPFLAG_5GHZ 1 +#define AR9170_OPFLAG_2GHZ 2 + u8 misc; + __le16 reg_domain[2]; + u8 mac_address[6]; + u8 rx_mask; + u8 tx_mask; + __le16 rf_silent; + __le16 bluetooth_options; + __le16 device_capabilities; + __le32 build_number; + u8 deviceType; + u8 reserved[33]; + + u8 customer_data[64]; + + struct ar9170_eeprom_modal + modal_header[2]; + + u8 cal_freq_pier_5G[AR5416_NUM_5G_CAL_PIERS]; + u8 cal_freq_pier_2G[AR5416_NUM_2G_CAL_PIERS]; + + struct ar9170_calibration_data_per_freq + cal_pier_data_5G[AR5416_MAX_CHAINS][AR5416_NUM_5G_CAL_PIERS], + cal_pier_data_2G[AR5416_MAX_CHAINS][AR5416_NUM_2G_CAL_PIERS]; + + /* power calibration data */ + struct ar9170_calibration_target_power_legacy + cal_tgt_pwr_5G[AR5416_NUM_5G_TARGET_PWRS]; + struct ar9170_calibration_target_power_ht + cal_tgt_pwr_5G_ht20[AR5416_NUM_5G_TARGET_PWRS], + cal_tgt_pwr_5G_ht40[AR5416_NUM_5G_TARGET_PWRS]; + + struct ar9170_calibration_target_power_legacy + cal_tgt_pwr_2G_cck[AR5416_NUM_2G_CCK_TARGET_PWRS], + cal_tgt_pwr_2G_ofdm[AR5416_NUM_2G_OFDM_TARGET_PWRS]; + struct ar9170_calibration_target_power_ht + cal_tgt_pwr_2G_ht20[AR5416_NUM_2G_OFDM_TARGET_PWRS], + cal_tgt_pwr_2G_ht40[AR5416_NUM_2G_OFDM_TARGET_PWRS]; + + /* conformance testing limits */ + u8 ctl_index[AR5416_NUM_CTLS]; + struct ar9170_calctl_data + ctl_data[AR5416_NUM_CTLS]; + + u8 pad; + __le16 subsystem_id; +} __packed; + +#endif /* __AR9170_EEPROM_H */ diff --git a/drivers/net/wireless/ath/ar9170/hw.h b/drivers/net/wireless/ath/ar9170/hw.h new file mode 100644 index 0000000..0a1d4c2 --- /dev/null +++ b/drivers/net/wireless/ath/ar9170/hw.h @@ -0,0 +1,429 @@ +/* + * Atheros AR9170 driver + * + * Hardware-specific definitions + * + * Copyright 2008, Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, see + * http://www.gnu.org/licenses/. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * Copyright (c) 2007-2008 Atheros Communications, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#ifndef __AR9170_HW_H +#define __AR9170_HW_H + +#define AR9170_MAX_CMD_LEN 64 + +enum ar9170_cmd { + AR9170_CMD_RREG = 0x00, + AR9170_CMD_WREG = 0x01, + AR9170_CMD_RMEM = 0x02, + AR9170_CMD_WMEM = 0x03, + AR9170_CMD_BITAND = 0x04, + AR9170_CMD_BITOR = 0x05, + AR9170_CMD_EKEY = 0x28, + AR9170_CMD_DKEY = 0x29, + AR9170_CMD_FREQUENCY = 0x30, + AR9170_CMD_RF_INIT = 0x31, + AR9170_CMD_SYNTH = 0x32, + AR9170_CMD_FREQ_START = 0x33, + AR9170_CMD_ECHO = 0x80, + AR9170_CMD_TALLY = 0x81, + AR9170_CMD_TALLY_APD = 0x82, + AR9170_CMD_CONFIG = 0x83, + AR9170_CMD_RESET = 0x90, + AR9170_CMD_DKRESET = 0x91, + AR9170_CMD_DKTX_STATUS = 0x92, + AR9170_CMD_FDC = 0xA0, + AR9170_CMD_WREEPROM = 0xB0, + AR9170_CMD_WFLASH = 0xB0, + AR9170_CMD_FLASH_ERASE = 0xB1, + AR9170_CMD_FLASH_PROG = 0xB2, + AR9170_CMD_FLASH_CHKSUM = 0xB3, + AR9170_CMD_FLASH_READ = 0xB4, + AR9170_CMD_FW_DL_INIT = 0xB5, + AR9170_CMD_MEM_WREEPROM = 0xBB, +}; + +/* endpoints */ +#define AR9170_EP_TX 1 +#define AR9170_EP_RX 2 +#define AR9170_EP_IRQ 3 +#define AR9170_EP_CMD 4 + +#define AR9170_EEPROM_START 0x1600 + +#define AR9170_GPIO_REG_BASE 0x1d0100 +#define AR9170_GPIO_REG_PORT_TYPE AR9170_GPIO_REG_BASE +#define AR9170_GPIO_REG_DATA (AR9170_GPIO_REG_BASE + 4) +#define AR9170_NUM_LEDS 2 + + +#define AR9170_USB_REG_BASE 0x1e1000 +#define AR9170_USB_REG_DMA_CTL (AR9170_USB_REG_BASE + 0x108) +#define AR9170_DMA_CTL_ENABLE_TO_DEVICE 0x1 +#define AR9170_DMA_CTL_ENABLE_FROM_DEVICE 0x2 +#define AR9170_DMA_CTL_HIGH_SPEED 0x4 +#define AR9170_DMA_CTL_PACKET_MODE 0x8 + +#define AR9170_USB_REG_MAX_AGG_UPLOAD (AR9170_USB_REG_BASE + 0x110) +#define AR9170_USB_REG_UPLOAD_TIME_CTL (AR9170_USB_REG_BASE + 0x114) + + + +#define AR9170_MAC_REG_BASE 0x1c3000 + +#define AR9170_MAC_REG_TSF_L (AR9170_MAC_REG_BASE + 0x514) +#define AR9170_MAC_REG_TSF_H (AR9170_MAC_REG_BASE + 0x518) + +#define AR9170_MAC_REG_ATIM_WINDOW (AR9170_MAC_REG_BASE + 0x51C) +#define AR9170_MAC_REG_BCN_PERIOD (AR9170_MAC_REG_BASE + 0x520) +#define AR9170_MAC_REG_PRETBTT (AR9170_MAC_REG_BASE + 0x524) + +#define AR9170_MAC_REG_MAC_ADDR_L (AR9170_MAC_REG_BASE + 0x610) +#define AR9170_MAC_REG_MAC_ADDR_H (AR9170_MAC_REG_BASE + 0x614) +#define AR9170_MAC_REG_BSSID_L (AR9170_MAC_REG_BASE + 0x618) +#define AR9170_MAC_REG_BSSID_H (AR9170_MAC_REG_BASE + 0x61c) + +#define AR9170_MAC_REG_GROUP_HASH_TBL_L (AR9170_MAC_REG_BASE + 0x624) +#define AR9170_MAC_REG_GROUP_HASH_TBL_H (AR9170_MAC_REG_BASE + 0x628) + +#define AR9170_MAC_REG_RX_TIMEOUT (AR9170_MAC_REG_BASE + 0x62C) + +#define AR9170_MAC_REG_BASIC_RATE (AR9170_MAC_REG_BASE + 0x630) +#define AR9170_MAC_REG_MANDATORY_RATE (AR9170_MAC_REG_BASE + 0x634) +#define AR9170_MAC_REG_RTS_CTS_RATE (AR9170_MAC_REG_BASE + 0x638) +#define AR9170_MAC_REG_BACKOFF_PROTECT (AR9170_MAC_REG_BASE + 0x63c) +#define AR9170_MAC_REG_RX_THRESHOLD (AR9170_MAC_REG_BASE + 0x640) +#define AR9170_MAC_REG_RX_PE_DELAY (AR9170_MAC_REG_BASE + 0x64C) + +#define AR9170_MAC_REG_DYNAMIC_SIFS_ACK (AR9170_MAC_REG_BASE + 0x658) +#define AR9170_MAC_REG_SNIFFER (AR9170_MAC_REG_BASE + 0x674) +#define AR9170_MAC_REG_SNIFFER_ENABLE_PROMISC BIT(0) +#define AR9170_MAC_REG_SNIFFER_DEFAULTS 0x02000000 +#define AR9170_MAC_REG_ENCRYPTION (AR9170_MAC_REG_BASE + 0x678) +#define AR9170_MAC_REG_ENCRYPTION_RX_SOFTWARE BIT(3) +#define AR9170_MAC_REG_ENCRYPTION_DEFAULTS 0x70 + +#define AR9170_MAC_REG_MISC_680 (AR9170_MAC_REG_BASE + 0x680) +#define AR9170_MAC_REG_TX_UNDERRUN (AR9170_MAC_REG_BASE + 0x688) + +#define AR9170_MAC_REG_FRAMETYPE_FILTER (AR9170_MAC_REG_BASE + 0x68c) +#define AR9170_MAC_REG_FTF_ASSOC_REQ BIT(0) +#define AR9170_MAC_REG_FTF_ASSOC_RESP BIT(1) +#define AR9170_MAC_REG_FTF_REASSOC_REQ BIT(2) +#define AR9170_MAC_REG_FTF_REASSOC_RESP BIT(3) +#define AR9170_MAC_REG_FTF_PRB_REQ BIT(4) +#define AR9170_MAC_REG_FTF_PRB_RESP BIT(5) +#define AR9170_MAC_REG_FTF_BIT6 BIT(6) +#define AR9170_MAC_REG_FTF_BIT7 BIT(7) +#define AR9170_MAC_REG_FTF_BEACON BIT(8) +#define AR9170_MAC_REG_FTF_ATIM BIT(9) +#define AR9170_MAC_REG_FTF_DEASSOC BIT(10) +#define AR9170_MAC_REG_FTF_AUTH BIT(11) +#define AR9170_MAC_REG_FTF_DEAUTH BIT(12) +#define AR9170_MAC_REG_FTF_BIT13 BIT(13) +#define AR9170_MAC_REG_FTF_BIT14 BIT(14) +#define AR9170_MAC_REG_FTF_BIT15 BIT(15) +#define AR9170_MAC_REG_FTF_BAR BIT(24) +#define AR9170_MAC_REG_FTF_BA BIT(25) +#define AR9170_MAC_REG_FTF_PSPOLL BIT(26) +#define AR9170_MAC_REG_FTF_RTS BIT(27) +#define AR9170_MAC_REG_FTF_CTS BIT(28) +#define AR9170_MAC_REG_FTF_ACK BIT(29) +#define AR9170_MAC_REG_FTF_CFE BIT(30) +#define AR9170_MAC_REG_FTF_CFE_ACK BIT(31) +#define AR9170_MAC_REG_FTF_DEFAULTS 0x0700ffff +#define AR9170_MAC_REG_FTF_MONITOR 0xfd00ffff + +#define AR9170_MAC_REG_RX_TOTAL (AR9170_MAC_REG_BASE + 0x6A0) +#define AR9170_MAC_REG_RX_CRC32 (AR9170_MAC_REG_BASE + 0x6A4) +#define AR9170_MAC_REG_RX_CRC16 (AR9170_MAC_REG_BASE + 0x6A8) +#define AR9170_MAC_REG_RX_ERR_DECRYPTION_UNI (AR9170_MAC_REG_BASE + 0x6AC) +#define AR9170_MAC_REG_RX_OVERRUN (AR9170_MAC_REG_BASE + 0x6B0) +#define AR9170_MAC_REG_RX_ERR_DECRYPTION_MUL (AR9170_MAC_REG_BASE + 0x6BC) +#define AR9170_MAC_REG_TX_RETRY (AR9170_MAC_REG_BASE + 0x6CC) +#define AR9170_MAC_REG_TX_TOTAL (AR9170_MAC_REG_BASE + 0x6F4) + + +#define AR9170_MAC_REG_ACK_EXTENSION (AR9170_MAC_REG_BASE + 0x690) +#define AR9170_MAC_REG_EIFS_AND_SIFS (AR9170_MAC_REG_BASE + 0x698) + +#define AR9170_MAC_REG_SLOT_TIME (AR9170_MAC_REG_BASE + 0x6F0) + +#define AR9170_MAC_REG_POWERMANAGEMENT (AR9170_MAC_REG_BASE + 0x700) +#define AR9170_MAC_REG_POWERMGT_IBSS 0xe0 +#define AR9170_MAC_REG_POWERMGT_AP 0xa1 +#define AR9170_MAC_REG_POWERMGT_STA 0x2 +#define AR9170_MAC_REG_POWERMGT_AP_WDS 0x3 +#define AR9170_MAC_REG_POWERMGT_DEFAULTS (0xf << 24) + +#define AR9170_MAC_REG_ROLL_CALL_TBL_L (AR9170_MAC_REG_BASE + 0x704) +#define AR9170_MAC_REG_ROLL_CALL_TBL_H (AR9170_MAC_REG_BASE + 0x708) + +#define AR9170_MAC_REG_AC0_CW (AR9170_MAC_REG_BASE + 0xB00) +#define AR9170_MAC_REG_AC1_CW (AR9170_MAC_REG_BASE + 0xB04) +#define AR9170_MAC_REG_AC2_CW (AR9170_MAC_REG_BASE + 0xB08) +#define AR9170_MAC_REG_AC3_CW (AR9170_MAC_REG_BASE + 0xB0C) +#define AR9170_MAC_REG_AC4_CW (AR9170_MAC_REG_BASE + 0xB10) +#define AR9170_MAC_REG_AC1_AC0_AIFS (AR9170_MAC_REG_BASE + 0xB14) +#define AR9170_MAC_REG_AC3_AC2_AIFS (AR9170_MAC_REG_BASE + 0xB18) + +#define AR9170_MAC_REG_RETRY_MAX (AR9170_MAC_REG_BASE + 0xB28) + +#define AR9170_MAC_REG_FCS_SELECT (AR9170_MAC_REG_BASE + 0xBB0) +#define AR9170_MAC_FCS_SWFCS 0x1 +#define AR9170_MAC_FCS_FIFO_PROT 0x4 + + +#define AR9170_MAC_REG_TXOP_NOT_ENOUGH_IND (AR9170_MAC_REG_BASE + 0xB30) + +#define AR9170_MAC_REG_AC1_AC0_TXOP (AR9170_MAC_REG_BASE + 0xB44) +#define AR9170_MAC_REG_AC3_AC2_TXOP (AR9170_MAC_REG_BASE + 0xB48) + +#define AR9170_MAC_REG_AMPDU_FACTOR (AR9170_MAC_REG_BASE + 0xB9C) +#define AR9170_MAC_REG_AMPDU_DENSITY (AR9170_MAC_REG_BASE + 0xBA0) + +#define AR9170_MAC_REG_ACK_TABLE (AR9170_MAC_REG_BASE + 0xC00) +#define AR9170_MAC_REG_AMPDU_RX_THRESH (AR9170_MAC_REG_BASE + 0xC50) + +#define AR9170_MAC_REG_TXRX_MPI (AR9170_MAC_REG_BASE + 0xD7C) +#define AR9170_MAC_TXRX_MPI_TX_MPI_MASK 0x0000000f +#define AR9170_MAC_TXRX_MPI_TX_TO_MASK 0x0000fff0 +#define AR9170_MAC_TXRX_MPI_RX_MPI_MASK 0x000f0000 +#define AR9170_MAC_TXRX_MPI_RX_TO_MASK 0xfff00000 + +#define AR9170_MAC_REG_BCN_ADDR (AR9170_MAC_REG_BASE + 0xD84) +#define AR9170_MAC_REG_BCN_LENGTH (AR9170_MAC_REG_BASE + 0xD88) +#define AR9170_MAC_REG_BCN_PLCP (AR9170_MAC_REG_BASE + 0xD90) +#define AR9170_MAC_REG_BCN_CTRL (AR9170_MAC_REG_BASE + 0xD94) +#define AR9170_MAC_REG_BCN_HT1 (AR9170_MAC_REG_BASE + 0xDA0) +#define AR9170_MAC_REG_BCN_HT2 (AR9170_MAC_REG_BASE + 0xDA4) + + +#define AR9170_PWR_REG_BASE 0x1D4000 + +#define AR9170_PWR_REG_CLOCK_SEL (AR9170_PWR_REG_BASE + 0x008) +#define AR9170_PWR_CLK_AHB_40MHZ 0 +#define AR9170_PWR_CLK_AHB_20_22MHZ 1 +#define AR9170_PWR_CLK_AHB_40_44MHZ 2 +#define AR9170_PWR_CLK_AHB_80_88MHZ 3 +#define AR9170_PWR_CLK_DAC_160_INV_DLY 0x70 + + +/* put beacon here in memory */ +#define AR9170_BEACON_BUFFER_ADDRESS 0x117900 + + +struct ar9170_tx_control { + __le16 length; + __le16 mac_control; + __le32 phy_control; + u8 frame_data[0]; +} __packed; + +/* these are either-or */ +#define AR9170_TX_MAC_PROT_RTS 0x0001 +#define AR9170_TX_MAC_PROT_CTS 0x0002 + +#define AR9170_TX_MAC_NO_ACK 0x0004 +/* if unset, MAC will only do SIFS space before frame */ +#define AR9170_TX_MAC_BACKOFF 0x0008 +#define AR9170_TX_MAC_BURST 0x0010 +#define AR9170_TX_MAC_AGGR 0x0020 + +/* encryption is a two-bit field */ +#define AR9170_TX_MAC_ENCR_NONE 0x0000 +#define AR9170_TX_MAC_ENCR_RC4 0x0040 +#define AR9170_TX_MAC_ENCR_CENC 0x0080 +#define AR9170_TX_MAC_ENCR_AES 0x00c0 + +#define AR9170_TX_MAC_MMIC 0x0100 +#define AR9170_TX_MAC_HW_DURATION 0x0200 +#define AR9170_TX_MAC_QOS_SHIFT 10 +#define AR9170_TX_MAC_QOS_MASK (3 << AR9170_TX_MAC_QOS_SHIFT) +#define AR9170_TX_MAC_AGGR_QOS_BIT1 0x0400 +#define AR9170_TX_MAC_AGGR_QOS_BIT2 0x0800 +#define AR9170_TX_MAC_DISABLE_TXOP 0x1000 +#define AR9170_TX_MAC_TXOP_RIFS 0x2000 +#define AR9170_TX_MAC_IMM_AMPDU 0x4000 +#define AR9170_TX_MAC_RATE_PROBE 0x8000 + +/* either-or */ +#define AR9170_TX_PHY_MOD_MASK 0x00000003 +#define AR9170_TX_PHY_MOD_CCK 0x00000000 +#define AR9170_TX_PHY_MOD_OFDM 0x00000001 +#define AR9170_TX_PHY_MOD_HT 0x00000002 + +/* depends on modulation */ +#define AR9170_TX_PHY_SHORT_PREAMBLE 0x00000004 +#define AR9170_TX_PHY_GREENFIELD 0x00000004 + +#define AR9170_TX_PHY_BW_SHIFT 3 +#define AR9170_TX_PHY_BW_MASK (3 << AR9170_TX_PHY_BW_SHIFT) +#define AR9170_TX_PHY_BW_20MHZ 0 +#define AR9170_TX_PHY_BW_40MHZ 2 +#define AR9170_TX_PHY_BW_40MHZ_DUP 3 + +#define AR9170_TX_PHY_TX_HEAVY_CLIP_SHIFT 6 +#define AR9170_TX_PHY_TX_HEAVY_CLIP_MASK (7 << AR9170_TX_PHY_TX_HEAVY_CLIP_SHIFT) + +#define AR9170_TX_PHY_TX_PWR_SHIFT 9 +#define AR9170_TX_PHY_TX_PWR_MASK (0x3f << AR9170_TX_PHY_TX_PWR_SHIFT) + +/* not part of the hw-spec */ +#define AR9170_TX_PHY_QOS_SHIFT 25 +#define AR9170_TX_PHY_QOS_MASK (3 << AR9170_TX_PHY_QOS_SHIFT) + +#define AR9170_TX_PHY_TXCHAIN_SHIFT 15 +#define AR9170_TX_PHY_TXCHAIN_MASK (7 << AR9170_TX_PHY_TXCHAIN_SHIFT) +#define AR9170_TX_PHY_TXCHAIN_1 1 +/* use for cck, ofdm 6/9/12/18/24 and HT if capable */ +#define AR9170_TX_PHY_TXCHAIN_2 5 + +#define AR9170_TX_PHY_MCS_SHIFT 18 +#define AR9170_TX_PHY_MCS_MASK (0x7f << AR9170_TX_PHY_MCS_SHIFT) + +#define AR9170_TX_PHY_SHORT_GI 0x80000000 + +#define AR5416_MAX_RATE_POWER 63 + +struct ar9170_rx_head { + u8 plcp[12]; +} __packed; + +struct ar9170_rx_phystatus { + union { + struct { + u8 rssi_ant0, rssi_ant1, rssi_ant2, + rssi_ant0x, rssi_ant1x, rssi_ant2x, + rssi_combined; + } __packed; + u8 rssi[7]; + } __packed; + + u8 evm_stream0[6], evm_stream1[6]; + u8 phy_err; +} __packed; + +struct ar9170_rx_macstatus { + u8 SAidx, DAidx; + u8 error; + u8 status; +} __packed; + +#define AR9170_ENC_ALG_NONE 0x0 +#define AR9170_ENC_ALG_WEP64 0x1 +#define AR9170_ENC_ALG_TKIP 0x2 +#define AR9170_ENC_ALG_AESCCMP 0x4 +#define AR9170_ENC_ALG_WEP128 0x5 +#define AR9170_ENC_ALG_WEP256 0x6 +#define AR9170_ENC_ALG_CENC 0x7 + +#define AR9170_RX_ENC_SOFTWARE 0x8 + +static inline u8 ar9170_get_decrypt_type(struct ar9170_rx_macstatus *t) +{ + return (t->SAidx & 0xc0) >> 4 | + (t->DAidx & 0xc0) >> 6; +} + +#define AR9170_RX_STATUS_MODULATION_MASK 0x03 +#define AR9170_RX_STATUS_MODULATION_CCK 0x00 +#define AR9170_RX_STATUS_MODULATION_OFDM 0x01 +#define AR9170_RX_STATUS_MODULATION_HT 0x02 +#define AR9170_RX_STATUS_MODULATION_DUPOFDM 0x03 + +/* depends on modulation */ +#define AR9170_RX_STATUS_SHORT_PREAMBLE 0x08 +#define AR9170_RX_STATUS_GREENFIELD 0x08 + +#define AR9170_RX_STATUS_MPDU_MASK 0x30 +#define AR9170_RX_STATUS_MPDU_SINGLE 0x00 +#define AR9170_RX_STATUS_MPDU_FIRST 0x20 +#define AR9170_RX_STATUS_MPDU_MIDDLE 0x30 +#define AR9170_RX_STATUS_MPDU_LAST 0x10 + +#define AR9170_RX_ERROR_RXTO 0x01 +#define AR9170_RX_ERROR_OVERRUN 0x02 +#define AR9170_RX_ERROR_DECRYPT 0x04 +#define AR9170_RX_ERROR_FCS 0x08 +#define AR9170_RX_ERROR_WRONG_RA 0x10 +#define AR9170_RX_ERROR_PLCP 0x20 +#define AR9170_RX_ERROR_MMIC 0x40 +#define AR9170_RX_ERROR_FATAL 0x80 + +struct ar9170_cmd_tx_status { + u8 dst[ETH_ALEN]; + __le32 rate; + __le16 status; +} __packed; + +#define AR9170_TX_STATUS_COMPLETE 0x00 +#define AR9170_TX_STATUS_RETRY 0x01 +#define AR9170_TX_STATUS_FAILED 0x02 + +struct ar9170_cmd_ba_failed_count { + __le16 failed; + __le16 rate; +} __packed; + +struct ar9170_cmd_response { + u8 flag; + u8 type; + __le16 padding; + + union { + struct ar9170_cmd_tx_status tx_status; + struct ar9170_cmd_ba_failed_count ba_fail_cnt; + u8 data[0]; + }; +} __packed; + +/* QoS */ + +/* mac80211 queue to HW/FW map */ +static const u8 ar9170_qos_hwmap[4] = { 3, 2, 0, 1 }; + +/* HW/FW queue to mac80211 map */ +static const u8 ar9170_qos_mac80211map[4] = { 2, 3, 1, 0 }; + +enum ar9170_txq { + AR9170_TXQ_BE, + AR9170_TXQ_BK, + AR9170_TXQ_VI, + AR9170_TXQ_VO, + + __AR9170_NUM_TXQ, +}; + +#define AR9170_TXQ_DEPTH 32 +#define AR9170_TX_MAX_PENDING 128 + +#endif /* __AR9170_HW_H */ diff --git a/drivers/net/wireless/ath/ar9170/led.c b/drivers/net/wireless/ath/ar9170/led.c new file mode 100644 index 0000000..86c4e79 --- /dev/null +++ b/drivers/net/wireless/ath/ar9170/led.c @@ -0,0 +1,181 @@ +/* + * Atheros AR9170 driver + * + * LED handling + * + * Copyright 2008, Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, see + * http://www.gnu.org/licenses/. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * Copyright (c) 2007-2008 Atheros Communications, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "ar9170.h" +#include "cmd.h" + +int ar9170_set_leds_state(struct ar9170 *ar, u32 led_state) +{ + return ar9170_write_reg(ar, AR9170_GPIO_REG_DATA, led_state); +} + +int ar9170_init_leds(struct ar9170 *ar) +{ + int err; + + /* disable LEDs */ + /* GPIO [0/1 mode: output, 2/3: input] */ + err = ar9170_write_reg(ar, AR9170_GPIO_REG_PORT_TYPE, 3); + if (err) + goto out; + + /* GPIO 0/1 value: off */ + err = ar9170_set_leds_state(ar, 0); + +out: + return err; +} + +#ifdef CONFIG_AR9170_LEDS +static void ar9170_update_leds(struct work_struct *work) +{ + struct ar9170 *ar = container_of(work, struct ar9170, led_work.work); + int i, tmp, blink_delay = 1000; + u32 led_val = 0; + bool rerun = false; + + if (unlikely(!IS_ACCEPTING_CMD(ar))) + return ; + + mutex_lock(&ar->mutex); + for (i = 0; i < AR9170_NUM_LEDS; i++) + if (ar->leds[i].registered && ar->leds[i].toggled) { + led_val |= 1 << i; + + tmp = 70 + 200 / (ar->leds[i].toggled); + if (tmp < blink_delay) + blink_delay = tmp; + + if (ar->leds[i].toggled > 1) + ar->leds[i].toggled = 0; + + rerun = true; + } + + ar9170_set_leds_state(ar, led_val); + mutex_unlock(&ar->mutex); + + if (!rerun) + return; + + ieee80211_queue_delayed_work(ar->hw, + &ar->led_work, + msecs_to_jiffies(blink_delay)); +} + +static void ar9170_led_brightness_set(struct led_classdev *led, + enum led_brightness brightness) +{ + struct ar9170_led *arl = container_of(led, struct ar9170_led, l); + struct ar9170 *ar = arl->ar; + + if (unlikely(!arl->registered)) + return ; + + if (arl->last_state != !!brightness) { + arl->toggled++; + arl->last_state = !!brightness; + } + + if (likely(IS_ACCEPTING_CMD(ar) && arl->toggled)) + ieee80211_queue_delayed_work(ar->hw, &ar->led_work, HZ/10); +} + +static int ar9170_register_led(struct ar9170 *ar, int i, char *name, + char *trigger) +{ + int err; + + snprintf(ar->leds[i].name, sizeof(ar->leds[i].name), + "ar9170-%s::%s", wiphy_name(ar->hw->wiphy), name); + + ar->leds[i].ar = ar; + ar->leds[i].l.name = ar->leds[i].name; + ar->leds[i].l.brightness_set = ar9170_led_brightness_set; + ar->leds[i].l.brightness = 0; + ar->leds[i].l.default_trigger = trigger; + + err = led_classdev_register(wiphy_dev(ar->hw->wiphy), + &ar->leds[i].l); + if (err) + printk(KERN_ERR "%s: failed to register %s LED (%d).\n", + wiphy_name(ar->hw->wiphy), ar->leds[i].name, err); + else + ar->leds[i].registered = true; + + return err; +} + +void ar9170_unregister_leds(struct ar9170 *ar) +{ + int i; + + for (i = 0; i < AR9170_NUM_LEDS; i++) + if (ar->leds[i].registered) { + led_classdev_unregister(&ar->leds[i].l); + ar->leds[i].registered = false; + ar->leds[i].toggled = 0; + } + + cancel_delayed_work_sync(&ar->led_work); +} + +int ar9170_register_leds(struct ar9170 *ar) +{ + int err; + + INIT_DELAYED_WORK(&ar->led_work, ar9170_update_leds); + + err = ar9170_register_led(ar, 0, "tx", + ieee80211_get_tx_led_name(ar->hw)); + if (err) + goto fail; + + err = ar9170_register_led(ar, 1, "assoc", + ieee80211_get_assoc_led_name(ar->hw)); + if (err) + goto fail; + + return 0; + +fail: + ar9170_unregister_leds(ar); + return err; +} + +#endif /* CONFIG_AR9170_LEDS */ diff --git a/drivers/net/wireless/ath/ar9170/mac.c b/drivers/net/wireless/ath/ar9170/mac.c new file mode 100644 index 0000000..857e861 --- /dev/null +++ b/drivers/net/wireless/ath/ar9170/mac.c @@ -0,0 +1,519 @@ +/* + * Atheros AR9170 driver + * + * MAC programming + * + * Copyright 2008, Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, see + * http://www.gnu.org/licenses/. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * Copyright (c) 2007-2008 Atheros Communications, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include + +#include "ar9170.h" +#include "cmd.h" + +int ar9170_set_dyn_sifs_ack(struct ar9170 *ar) +{ + u32 val; + + if (conf_is_ht40(&ar->hw->conf)) + val = 0x010a; + else { + if (ar->hw->conf.channel->band == IEEE80211_BAND_2GHZ) + val = 0x105; + else + val = 0x104; + } + + return ar9170_write_reg(ar, AR9170_MAC_REG_DYNAMIC_SIFS_ACK, val); +} + +int ar9170_set_slot_time(struct ar9170 *ar) +{ + u32 slottime = 20; + + if (!ar->vif) + return 0; + + if ((ar->hw->conf.channel->band == IEEE80211_BAND_5GHZ) || + ar->vif->bss_conf.use_short_slot) + slottime = 9; + + return ar9170_write_reg(ar, AR9170_MAC_REG_SLOT_TIME, slottime << 10); +} + +int ar9170_set_basic_rates(struct ar9170 *ar) +{ + u8 cck, ofdm; + + if (!ar->vif) + return 0; + + ofdm = ar->vif->bss_conf.basic_rates >> 4; + + /* FIXME: is still necessary? */ + if (ar->hw->conf.channel->band == IEEE80211_BAND_5GHZ) + cck = 0; + else + cck = ar->vif->bss_conf.basic_rates & 0xf; + + return ar9170_write_reg(ar, AR9170_MAC_REG_BASIC_RATE, + ofdm << 8 | cck); +} + +int ar9170_set_qos(struct ar9170 *ar) +{ + ar9170_regwrite_begin(ar); + + ar9170_regwrite(AR9170_MAC_REG_AC0_CW, ar->edcf[0].cw_min | + (ar->edcf[0].cw_max << 16)); + ar9170_regwrite(AR9170_MAC_REG_AC1_CW, ar->edcf[1].cw_min | + (ar->edcf[1].cw_max << 16)); + ar9170_regwrite(AR9170_MAC_REG_AC2_CW, ar->edcf[2].cw_min | + (ar->edcf[2].cw_max << 16)); + ar9170_regwrite(AR9170_MAC_REG_AC3_CW, ar->edcf[3].cw_min | + (ar->edcf[3].cw_max << 16)); + ar9170_regwrite(AR9170_MAC_REG_AC4_CW, ar->edcf[4].cw_min | + (ar->edcf[4].cw_max << 16)); + + ar9170_regwrite(AR9170_MAC_REG_AC1_AC0_AIFS, + ((ar->edcf[0].aifs * 9 + 10)) | + ((ar->edcf[1].aifs * 9 + 10) << 12) | + ((ar->edcf[2].aifs * 9 + 10) << 24)); + ar9170_regwrite(AR9170_MAC_REG_AC3_AC2_AIFS, + ((ar->edcf[2].aifs * 9 + 10) >> 8) | + ((ar->edcf[3].aifs * 9 + 10) << 4) | + ((ar->edcf[4].aifs * 9 + 10) << 16)); + + ar9170_regwrite(AR9170_MAC_REG_AC1_AC0_TXOP, + ar->edcf[0].txop | ar->edcf[1].txop << 16); + ar9170_regwrite(AR9170_MAC_REG_AC3_AC2_TXOP, + ar->edcf[2].txop | ar->edcf[3].txop << 16); + + ar9170_regwrite_finish(); + + return ar9170_regwrite_result(); +} + +static int ar9170_set_ampdu_density(struct ar9170 *ar, u8 mpdudensity) +{ + u32 val; + + /* don't allow AMPDU density > 8us */ + if (mpdudensity > 6) + return -EINVAL; + + /* Watch out! Otus uses slightly different density values. */ + val = 0x140a00 | (mpdudensity ? (mpdudensity + 1) : 0); + + ar9170_regwrite_begin(ar); + ar9170_regwrite(AR9170_MAC_REG_AMPDU_DENSITY, val); + ar9170_regwrite_finish(); + + return ar9170_regwrite_result(); +} + +int ar9170_init_mac(struct ar9170 *ar) +{ + ar9170_regwrite_begin(ar); + + ar9170_regwrite(AR9170_MAC_REG_ACK_EXTENSION, 0x40); + + ar9170_regwrite(AR9170_MAC_REG_RETRY_MAX, 0); + + /* enable MMIC */ + ar9170_regwrite(AR9170_MAC_REG_SNIFFER, + AR9170_MAC_REG_SNIFFER_DEFAULTS); + + ar9170_regwrite(AR9170_MAC_REG_RX_THRESHOLD, 0xc1f80); + + ar9170_regwrite(AR9170_MAC_REG_RX_PE_DELAY, 0x70); + ar9170_regwrite(AR9170_MAC_REG_EIFS_AND_SIFS, 0xa144000); + ar9170_regwrite(AR9170_MAC_REG_SLOT_TIME, 9 << 10); + + /* CF-END mode */ + ar9170_regwrite(0x1c3b2c, 0x19000000); + + /* NAV protects ACK only (in TXOP) */ + ar9170_regwrite(0x1c3b38, 0x201); + + /* Set Beacon PHY CTRL's TPC to 0x7, TA1=1 */ + /* OTUS set AM to 0x1 */ + ar9170_regwrite(AR9170_MAC_REG_BCN_HT1, 0x8000170); + + ar9170_regwrite(AR9170_MAC_REG_BACKOFF_PROTECT, 0x105); + + /* AGG test code*/ + /* Aggregation MAX number and timeout */ + ar9170_regwrite(0x1c3b9c, 0x10000a); + + ar9170_regwrite(AR9170_MAC_REG_FRAMETYPE_FILTER, + AR9170_MAC_REG_FTF_DEFAULTS); + + /* Enable deaggregator, response in sniffer mode */ + ar9170_regwrite(0x1c3c40, 0x1 | 1<<30); + + /* rate sets */ + ar9170_regwrite(AR9170_MAC_REG_BASIC_RATE, 0x150f); + ar9170_regwrite(AR9170_MAC_REG_MANDATORY_RATE, 0x150f); + ar9170_regwrite(AR9170_MAC_REG_RTS_CTS_RATE, 0x10b01bb); + + /* MIMO response control */ + ar9170_regwrite(0x1c3694, 0x4003C1E);/* bit 26~28 otus-AM */ + + /* switch MAC to OTUS interface */ + ar9170_regwrite(0x1c3600, 0x3); + + ar9170_regwrite(AR9170_MAC_REG_AMPDU_RX_THRESH, 0xffff); + + /* set PHY register read timeout (??) */ + ar9170_regwrite(AR9170_MAC_REG_MISC_680, 0xf00008); + + /* Disable Rx TimeOut, workaround for BB. */ + ar9170_regwrite(AR9170_MAC_REG_RX_TIMEOUT, 0x0); + + /* Set CPU clock frequency to 88/80MHz */ + ar9170_regwrite(AR9170_PWR_REG_CLOCK_SEL, + AR9170_PWR_CLK_AHB_80_88MHZ | + AR9170_PWR_CLK_DAC_160_INV_DLY); + + /* Set WLAN DMA interrupt mode: generate int per packet */ + ar9170_regwrite(AR9170_MAC_REG_TXRX_MPI, 0x110011); + + ar9170_regwrite(AR9170_MAC_REG_FCS_SELECT, + AR9170_MAC_FCS_FIFO_PROT); + + /* Disables the CF_END frame, undocumented register */ + ar9170_regwrite(AR9170_MAC_REG_TXOP_NOT_ENOUGH_IND, + 0x141E0F48); + + ar9170_regwrite_finish(); + + return ar9170_regwrite_result(); +} + +static int ar9170_set_mac_reg(struct ar9170 *ar, const u32 reg, const u8 *mac) +{ + static const u8 zero[ETH_ALEN] = { 0 }; + + if (!mac) + mac = zero; + + ar9170_regwrite_begin(ar); + + ar9170_regwrite(reg, get_unaligned_le32(mac)); + ar9170_regwrite(reg + 4, get_unaligned_le16(mac + 4)); + + ar9170_regwrite_finish(); + + return ar9170_regwrite_result(); +} + +int ar9170_update_multicast(struct ar9170 *ar, const u64 mc_hash) +{ + int err; + + ar9170_regwrite_begin(ar); + ar9170_regwrite(AR9170_MAC_REG_GROUP_HASH_TBL_H, mc_hash >> 32); + ar9170_regwrite(AR9170_MAC_REG_GROUP_HASH_TBL_L, mc_hash); + ar9170_regwrite_finish(); + err = ar9170_regwrite_result(); + if (err) + return err; + + ar->cur_mc_hash = mc_hash; + return 0; +} + +int ar9170_update_frame_filter(struct ar9170 *ar, const u32 filter) +{ + int err; + + err = ar9170_write_reg(ar, AR9170_MAC_REG_FRAMETYPE_FILTER, filter); + if (err) + return err; + + ar->cur_filter = filter; + return 0; +} + +static int ar9170_set_promiscouous(struct ar9170 *ar) +{ + u32 encr_mode, sniffer; + int err; + + err = ar9170_read_reg(ar, AR9170_MAC_REG_SNIFFER, &sniffer); + if (err) + return err; + + err = ar9170_read_reg(ar, AR9170_MAC_REG_ENCRYPTION, &encr_mode); + if (err) + return err; + + if (ar->sniffer_enabled) { + sniffer |= AR9170_MAC_REG_SNIFFER_ENABLE_PROMISC; + + /* + * Rx decryption works in place. + * + * If we don't disable it, the hardware will render all + * encrypted frames which are encrypted with an unknown + * key useless. + */ + + encr_mode |= AR9170_MAC_REG_ENCRYPTION_RX_SOFTWARE; + ar->sniffer_enabled = true; + } else { + sniffer &= ~AR9170_MAC_REG_SNIFFER_ENABLE_PROMISC; + + if (ar->rx_software_decryption) + encr_mode |= AR9170_MAC_REG_ENCRYPTION_RX_SOFTWARE; + else + encr_mode &= ~AR9170_MAC_REG_ENCRYPTION_RX_SOFTWARE; + } + + ar9170_regwrite_begin(ar); + ar9170_regwrite(AR9170_MAC_REG_ENCRYPTION, encr_mode); + ar9170_regwrite(AR9170_MAC_REG_SNIFFER, sniffer); + ar9170_regwrite_finish(); + + return ar9170_regwrite_result(); +} + +int ar9170_set_operating_mode(struct ar9170 *ar) +{ + struct ath_common *common = &ar->common; + u32 pm_mode = AR9170_MAC_REG_POWERMGT_DEFAULTS; + u8 *mac_addr, *bssid; + int err; + + if (ar->vif) { + mac_addr = common->macaddr; + bssid = common->curbssid; + + switch (ar->vif->type) { + case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_ADHOC: + pm_mode |= AR9170_MAC_REG_POWERMGT_IBSS; + break; + case NL80211_IFTYPE_AP: + pm_mode |= AR9170_MAC_REG_POWERMGT_AP; + break; + case NL80211_IFTYPE_WDS: + pm_mode |= AR9170_MAC_REG_POWERMGT_AP_WDS; + break; + case NL80211_IFTYPE_MONITOR: + ar->sniffer_enabled = true; + ar->rx_software_decryption = true; + break; + default: + pm_mode |= AR9170_MAC_REG_POWERMGT_STA; + break; + } + } else { + mac_addr = NULL; + bssid = NULL; + } + + err = ar9170_set_mac_reg(ar, AR9170_MAC_REG_MAC_ADDR_L, mac_addr); + if (err) + return err; + + err = ar9170_set_mac_reg(ar, AR9170_MAC_REG_BSSID_L, bssid); + if (err) + return err; + + err = ar9170_set_promiscouous(ar); + if (err) + return err; + + /* set AMPDU density to 8us. */ + err = ar9170_set_ampdu_density(ar, 6); + if (err) + return err; + + ar9170_regwrite_begin(ar); + + ar9170_regwrite(AR9170_MAC_REG_POWERMANAGEMENT, pm_mode); + ar9170_regwrite_finish(); + + return ar9170_regwrite_result(); +} + +int ar9170_set_hwretry_limit(struct ar9170 *ar, unsigned int max_retry) +{ + u32 tmp = min_t(u32, 0x33333, max_retry * 0x11111); + + return ar9170_write_reg(ar, AR9170_MAC_REG_RETRY_MAX, tmp); +} + +int ar9170_set_beacon_timers(struct ar9170 *ar) +{ + u32 v = 0; + u32 pretbtt = 0; + + if (ar->vif) { + v |= ar->vif->bss_conf.beacon_int; + + if (ar->enable_beacon) { + switch (ar->vif->type) { + case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_ADHOC: + v |= BIT(25); + break; + case NL80211_IFTYPE_AP: + v |= BIT(24); + pretbtt = (ar->vif->bss_conf.beacon_int - 6) << + 16; + break; + default: + break; + } + } + + v |= ar->vif->bss_conf.dtim_period << 16; + } + + ar9170_regwrite_begin(ar); + ar9170_regwrite(AR9170_MAC_REG_PRETBTT, pretbtt); + ar9170_regwrite(AR9170_MAC_REG_BCN_PERIOD, v); + ar9170_regwrite_finish(); + return ar9170_regwrite_result(); +} + +int ar9170_update_beacon(struct ar9170 *ar) +{ + struct sk_buff *skb; + __le32 *data, *old = NULL; + u32 word; + int i; + + skb = ieee80211_beacon_get(ar->hw, ar->vif); + if (!skb) + return -ENOMEM; + + data = (__le32 *)skb->data; + if (ar->beacon) + old = (__le32 *)ar->beacon->data; + + ar9170_regwrite_begin(ar); + for (i = 0; i < DIV_ROUND_UP(skb->len, 4); i++) { + /* + * XXX: This accesses beyond skb data for up + * to the last 3 bytes!! + */ + + if (old && (data[i] == old[i])) + continue; + + word = le32_to_cpu(data[i]); + ar9170_regwrite(AR9170_BEACON_BUFFER_ADDRESS + 4 * i, word); + } + + /* XXX: use skb->cb info */ + if (ar->hw->conf.channel->band == IEEE80211_BAND_2GHZ) + ar9170_regwrite(AR9170_MAC_REG_BCN_PLCP, + ((skb->len + 4) << (3 + 16)) + 0x0400); + else + ar9170_regwrite(AR9170_MAC_REG_BCN_PLCP, + ((skb->len + 4) << 16) + 0x001b); + + ar9170_regwrite(AR9170_MAC_REG_BCN_LENGTH, skb->len + 4); + ar9170_regwrite(AR9170_MAC_REG_BCN_ADDR, AR9170_BEACON_BUFFER_ADDRESS); + ar9170_regwrite(AR9170_MAC_REG_BCN_CTRL, 1); + + ar9170_regwrite_finish(); + + dev_kfree_skb(ar->beacon); + ar->beacon = skb; + + return ar9170_regwrite_result(); +} + +void ar9170_new_beacon(struct work_struct *work) +{ + struct ar9170 *ar = container_of(work, struct ar9170, + beacon_work); + struct sk_buff *skb; + + if (unlikely(!IS_STARTED(ar))) + return ; + + mutex_lock(&ar->mutex); + + if (!ar->vif) + goto out; + + ar9170_update_beacon(ar); + + rcu_read_lock(); + while ((skb = ieee80211_get_buffered_bc(ar->hw, ar->vif))) + ar9170_op_tx(ar->hw, skb); + + rcu_read_unlock(); + + out: + mutex_unlock(&ar->mutex); +} + +int ar9170_upload_key(struct ar9170 *ar, u8 id, const u8 *mac, u8 ktype, + u8 keyidx, u8 *keydata, int keylen) +{ + __le32 vals[7]; + static const u8 bcast[ETH_ALEN] = + { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + u8 dummy; + + mac = mac ? : bcast; + + vals[0] = cpu_to_le32((keyidx << 16) + id); + vals[1] = cpu_to_le32(mac[1] << 24 | mac[0] << 16 | ktype); + vals[2] = cpu_to_le32(mac[5] << 24 | mac[4] << 16 | + mac[3] << 8 | mac[2]); + memset(&vals[3], 0, 16); + if (keydata) + memcpy(&vals[3], keydata, keylen); + + return ar->exec_cmd(ar, AR9170_CMD_EKEY, + sizeof(vals), (u8 *)vals, + 1, &dummy); +} + +int ar9170_disable_key(struct ar9170 *ar, u8 id) +{ + __le32 val = cpu_to_le32(id); + u8 dummy; + + return ar->exec_cmd(ar, AR9170_CMD_EKEY, + sizeof(val), (u8 *)&val, + 1, &dummy); +} diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c new file mode 100644 index 0000000..257c734 --- /dev/null +++ b/drivers/net/wireless/ath/ar9170/main.c @@ -0,0 +1,2726 @@ +/* + * Atheros AR9170 driver + * + * mac80211 interaction code + * + * Copyright 2008, Johannes Berg + * Copyright 2009, Christian Lamparter + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, see + * http://www.gnu.org/licenses/. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * Copyright (c) 2007-2008 Atheros Communications, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include "ar9170.h" +#include "hw.h" +#include "cmd.h" + +static int modparam_nohwcrypt; +module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); +MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); + +static int modparam_ht; +module_param_named(ht, modparam_ht, bool, S_IRUGO); +MODULE_PARM_DESC(ht, "enable MPDU aggregation."); + +#define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \ + .bitrate = (_bitrate), \ + .flags = (_flags), \ + .hw_value = (_hw_rate) | (_txpidx) << 4, \ +} + +static struct ieee80211_rate __ar9170_ratetable[] = { + RATE(10, 0, 0, 0), + RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE), + RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE), + RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE), + RATE(60, 0xb, 0, 0), + RATE(90, 0xf, 0, 0), + RATE(120, 0xa, 0, 0), + RATE(180, 0xe, 0, 0), + RATE(240, 0x9, 0, 0), + RATE(360, 0xd, 1, 0), + RATE(480, 0x8, 2, 0), + RATE(540, 0xc, 3, 0), +}; +#undef RATE + +#define ar9170_g_ratetable (__ar9170_ratetable + 0) +#define ar9170_g_ratetable_size 12 +#define ar9170_a_ratetable (__ar9170_ratetable + 4) +#define ar9170_a_ratetable_size 8 + +/* + * NB: The hw_value is used as an index into the ar9170_phy_freq_params + * array in phy.c so that we don't have to do frequency lookups! + */ +#define CHAN(_freq, _idx) { \ + .center_freq = (_freq), \ + .hw_value = (_idx), \ + .max_power = 18, /* XXX */ \ +} + +static struct ieee80211_channel ar9170_2ghz_chantable[] = { + CHAN(2412, 0), + CHAN(2417, 1), + CHAN(2422, 2), + CHAN(2427, 3), + CHAN(2432, 4), + CHAN(2437, 5), + CHAN(2442, 6), + CHAN(2447, 7), + CHAN(2452, 8), + CHAN(2457, 9), + CHAN(2462, 10), + CHAN(2467, 11), + CHAN(2472, 12), + CHAN(2484, 13), +}; + +static struct ieee80211_channel ar9170_5ghz_chantable[] = { + CHAN(4920, 14), + CHAN(4940, 15), + CHAN(4960, 16), + CHAN(4980, 17), + CHAN(5040, 18), + CHAN(5060, 19), + CHAN(5080, 20), + CHAN(5180, 21), + CHAN(5200, 22), + CHAN(5220, 23), + CHAN(5240, 24), + CHAN(5260, 25), + CHAN(5280, 26), + CHAN(5300, 27), + CHAN(5320, 28), + CHAN(5500, 29), + CHAN(5520, 30), + CHAN(5540, 31), + CHAN(5560, 32), + CHAN(5580, 33), + CHAN(5600, 34), + CHAN(5620, 35), + CHAN(5640, 36), + CHAN(5660, 37), + CHAN(5680, 38), + CHAN(5700, 39), + CHAN(5745, 40), + CHAN(5765, 41), + CHAN(5785, 42), + CHAN(5805, 43), + CHAN(5825, 44), + CHAN(5170, 45), + CHAN(5190, 46), + CHAN(5210, 47), + CHAN(5230, 48), +}; +#undef CHAN + +#define AR9170_HT_CAP \ +{ \ + .ht_supported = true, \ + .cap = IEEE80211_HT_CAP_MAX_AMSDU | \ + IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \ + IEEE80211_HT_CAP_SGI_40 | \ + IEEE80211_HT_CAP_GRN_FLD | \ + IEEE80211_HT_CAP_DSSSCCK40 | \ + IEEE80211_HT_CAP_SM_PS, \ + .ampdu_factor = 3, \ + .ampdu_density = 6, \ + .mcs = { \ + .rx_mask = { 0xff, 0xff, 0, 0, 0x1, 0, 0, 0, 0, 0, }, \ + .rx_highest = cpu_to_le16(300), \ + .tx_params = IEEE80211_HT_MCS_TX_DEFINED, \ + }, \ +} + +static struct ieee80211_supported_band ar9170_band_2GHz = { + .channels = ar9170_2ghz_chantable, + .n_channels = ARRAY_SIZE(ar9170_2ghz_chantable), + .bitrates = ar9170_g_ratetable, + .n_bitrates = ar9170_g_ratetable_size, + .ht_cap = AR9170_HT_CAP, +}; + +static struct ieee80211_supported_band ar9170_band_5GHz = { + .channels = ar9170_5ghz_chantable, + .n_channels = ARRAY_SIZE(ar9170_5ghz_chantable), + .bitrates = ar9170_a_ratetable, + .n_bitrates = ar9170_a_ratetable_size, + .ht_cap = AR9170_HT_CAP, +}; + +static void ar9170_tx(struct ar9170 *ar); +static bool ar9170_tx_ampdu(struct ar9170 *ar); + +static inline u16 ar9170_get_seq_h(struct ieee80211_hdr *hdr) +{ + return le16_to_cpu(hdr->seq_ctrl) >> 4; +} + +static inline u16 ar9170_get_seq(struct sk_buff *skb) +{ + struct ar9170_tx_control *txc = (void *) skb->data; + return ar9170_get_seq_h((void *) txc->frame_data); +} + +static inline u16 ar9170_get_tid_h(struct ieee80211_hdr *hdr) +{ + return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK; +} + +static inline u16 ar9170_get_tid(struct sk_buff *skb) +{ + struct ar9170_tx_control *txc = (void *) skb->data; + return ar9170_get_tid_h((struct ieee80211_hdr *) txc->frame_data); +} + +#define GET_NEXT_SEQ(seq) ((seq + 1) & 0x0fff) +#define GET_NEXT_SEQ_FROM_SKB(skb) (GET_NEXT_SEQ(ar9170_get_seq(skb))) + +#if (defined AR9170_QUEUE_DEBUG) || (defined AR9170_TXAGG_DEBUG) +static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb) +{ + struct ar9170_tx_control *txc = (void *) skb->data; + struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb); + struct ar9170_tx_info *arinfo = (void *) txinfo->rate_driver_data; + struct ieee80211_hdr *hdr = (void *) txc->frame_data; + + printk(KERN_DEBUG "%s: => FRAME [skb:%p, q:%d, DA:[%pM] s:%d " + "mac_ctrl:%04x, phy_ctrl:%08x, timeout:[%d ms]]\n", + wiphy_name(ar->hw->wiphy), skb, skb_get_queue_mapping(skb), + ieee80211_get_DA(hdr), ar9170_get_seq_h(hdr), + le16_to_cpu(txc->mac_control), le32_to_cpu(txc->phy_control), + jiffies_to_msecs(arinfo->timeout - jiffies)); +} + +static void __ar9170_dump_txqueue(struct ar9170 *ar, + struct sk_buff_head *queue) +{ + struct sk_buff *skb; + int i = 0; + + printk(KERN_DEBUG "---[ cut here ]---\n"); + printk(KERN_DEBUG "%s: %d entries in queue.\n", + wiphy_name(ar->hw->wiphy), skb_queue_len(queue)); + + skb_queue_walk(queue, skb) { + printk(KERN_DEBUG "index:%d => \n", i++); + ar9170_print_txheader(ar, skb); + } + if (i != skb_queue_len(queue)) + printk(KERN_DEBUG "WARNING: queue frame counter " + "mismatch %d != %d\n", skb_queue_len(queue), i); + printk(KERN_DEBUG "---[ end ]---\n"); +} +#endif /* AR9170_QUEUE_DEBUG || AR9170_TXAGG_DEBUG */ + +#ifdef AR9170_QUEUE_DEBUG +static void ar9170_dump_txqueue(struct ar9170 *ar, + struct sk_buff_head *queue) +{ + unsigned long flags; + + spin_lock_irqsave(&queue->lock, flags); + __ar9170_dump_txqueue(ar, queue); + spin_unlock_irqrestore(&queue->lock, flags); +} +#endif /* AR9170_QUEUE_DEBUG */ + +#ifdef AR9170_QUEUE_STOP_DEBUG +static void __ar9170_dump_txstats(struct ar9170 *ar) +{ + int i; + + printk(KERN_DEBUG "%s: QoS queue stats\n", + wiphy_name(ar->hw->wiphy)); + + for (i = 0; i < __AR9170_NUM_TXQ; i++) + printk(KERN_DEBUG "%s: queue:%d limit:%d len:%d waitack:%d " + " stopped:%d\n", wiphy_name(ar->hw->wiphy), i, + ar->tx_stats[i].limit, ar->tx_stats[i].len, + skb_queue_len(&ar->tx_status[i]), + ieee80211_queue_stopped(ar->hw, i)); +} +#endif /* AR9170_QUEUE_STOP_DEBUG */ + +#ifdef AR9170_TXAGG_DEBUG +static void ar9170_dump_tx_status_ampdu(struct ar9170 *ar) +{ + unsigned long flags; + + spin_lock_irqsave(&ar->tx_status_ampdu.lock, flags); + printk(KERN_DEBUG "%s: A-MPDU tx_status queue => \n", + wiphy_name(ar->hw->wiphy)); + __ar9170_dump_txqueue(ar, &ar->tx_status_ampdu); + spin_unlock_irqrestore(&ar->tx_status_ampdu.lock, flags); +} + +#endif /* AR9170_TXAGG_DEBUG */ + +/* caller must guarantee exclusive access for _bin_ queue. */ +static void ar9170_recycle_expired(struct ar9170 *ar, + struct sk_buff_head *queue, + struct sk_buff_head *bin) +{ + struct sk_buff *skb, *old = NULL; + unsigned long flags; + + spin_lock_irqsave(&queue->lock, flags); + while ((skb = skb_peek(queue))) { + struct ieee80211_tx_info *txinfo; + struct ar9170_tx_info *arinfo; + + txinfo = IEEE80211_SKB_CB(skb); + arinfo = (void *) txinfo->rate_driver_data; + + if (time_is_before_jiffies(arinfo->timeout)) { +#ifdef AR9170_QUEUE_DEBUG + printk(KERN_DEBUG "%s: [%ld > %ld] frame expired => " + "recycle \n", wiphy_name(ar->hw->wiphy), + jiffies, arinfo->timeout); + ar9170_print_txheader(ar, skb); +#endif /* AR9170_QUEUE_DEBUG */ + __skb_unlink(skb, queue); + __skb_queue_tail(bin, skb); + } else { + break; + } + + if (unlikely(old == skb)) { + /* bail out - queue is shot. */ + + WARN_ON(1); + break; + } + old = skb; + } + spin_unlock_irqrestore(&queue->lock, flags); +} + +static void ar9170_tx_status(struct ar9170 *ar, struct sk_buff *skb, + u16 tx_status) +{ + struct ieee80211_tx_info *txinfo; + unsigned int retries = 0; + + txinfo = IEEE80211_SKB_CB(skb); + ieee80211_tx_info_clear_status(txinfo); + + switch (tx_status) { + case AR9170_TX_STATUS_RETRY: + retries = 2; + case AR9170_TX_STATUS_COMPLETE: + txinfo->flags |= IEEE80211_TX_STAT_ACK; + break; + + case AR9170_TX_STATUS_FAILED: + retries = ar->hw->conf.long_frame_max_tx_count; + break; + + default: + printk(KERN_ERR "%s: invalid tx_status response (%x).\n", + wiphy_name(ar->hw->wiphy), tx_status); + break; + } + + txinfo->status.rates[0].count = retries + 1; + skb_pull(skb, sizeof(struct ar9170_tx_control)); + ieee80211_tx_status_irqsafe(ar->hw, skb); +} + +static void ar9170_tx_fake_ampdu_status(struct ar9170 *ar) +{ + struct sk_buff_head success; + struct sk_buff *skb; + unsigned int i; + unsigned long queue_bitmap = 0; + + skb_queue_head_init(&success); + + while (skb_queue_len(&ar->tx_status_ampdu) > AR9170_NUM_TX_STATUS) + __skb_queue_tail(&success, skb_dequeue(&ar->tx_status_ampdu)); + + ar9170_recycle_expired(ar, &ar->tx_status_ampdu, &success); + +#ifdef AR9170_TXAGG_DEBUG + printk(KERN_DEBUG "%s: collected %d A-MPDU frames.\n", + wiphy_name(ar->hw->wiphy), skb_queue_len(&success)); + __ar9170_dump_txqueue(ar, &success); +#endif /* AR9170_TXAGG_DEBUG */ + + while ((skb = __skb_dequeue(&success))) { + struct ieee80211_tx_info *txinfo; + + queue_bitmap |= BIT(skb_get_queue_mapping(skb)); + + txinfo = IEEE80211_SKB_CB(skb); + ieee80211_tx_info_clear_status(txinfo); + + txinfo->flags |= IEEE80211_TX_STAT_ACK; + txinfo->status.rates[0].count = 1; + + skb_pull(skb, sizeof(struct ar9170_tx_control)); + ieee80211_tx_status_irqsafe(ar->hw, skb); + } + + for_each_set_bit(i, &queue_bitmap, BITS_PER_BYTE) { +#ifdef AR9170_QUEUE_STOP_DEBUG + printk(KERN_DEBUG "%s: wake queue %d\n", + wiphy_name(ar->hw->wiphy), i); + __ar9170_dump_txstats(ar); +#endif /* AR9170_QUEUE_STOP_DEBUG */ + ieee80211_wake_queue(ar->hw, i); + } + + if (queue_bitmap) + ar9170_tx(ar); +} + +static void ar9170_tx_ampdu_callback(struct ar9170 *ar, struct sk_buff *skb) +{ + struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb); + struct ar9170_tx_info *arinfo = (void *) txinfo->rate_driver_data; + + arinfo->timeout = jiffies + + msecs_to_jiffies(AR9170_BA_TIMEOUT); + + skb_queue_tail(&ar->tx_status_ampdu, skb); + ar9170_tx_fake_ampdu_status(ar); + + if (atomic_dec_and_test(&ar->tx_ampdu_pending) && + !list_empty(&ar->tx_ampdu_list)) + ar9170_tx_ampdu(ar); +} + +void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ar9170_tx_info *arinfo = (void *) info->rate_driver_data; + unsigned int queue = skb_get_queue_mapping(skb); + unsigned long flags; + + spin_lock_irqsave(&ar->tx_stats_lock, flags); + ar->tx_stats[queue].len--; + + if (ar->tx_stats[queue].len < AR9170_NUM_TX_LIMIT_SOFT) { +#ifdef AR9170_QUEUE_STOP_DEBUG + printk(KERN_DEBUG "%s: wake queue %d\n", + wiphy_name(ar->hw->wiphy), queue); + __ar9170_dump_txstats(ar); +#endif /* AR9170_QUEUE_STOP_DEBUG */ + ieee80211_wake_queue(ar->hw, queue); + } + spin_unlock_irqrestore(&ar->tx_stats_lock, flags); + + if (info->flags & IEEE80211_TX_CTL_NO_ACK) { + ar9170_tx_status(ar, skb, AR9170_TX_STATUS_FAILED); + } else { + if (info->flags & IEEE80211_TX_CTL_AMPDU) { + ar9170_tx_ampdu_callback(ar, skb); + } else { + arinfo->timeout = jiffies + + msecs_to_jiffies(AR9170_TX_TIMEOUT); + + skb_queue_tail(&ar->tx_status[queue], skb); + } + } + + if (!ar->tx_stats[queue].len && + !skb_queue_empty(&ar->tx_pending[queue])) { + ar9170_tx(ar); + } +} + +static struct sk_buff *ar9170_get_queued_skb(struct ar9170 *ar, + const u8 *mac, + struct sk_buff_head *queue, + const u32 rate) +{ + unsigned long flags; + struct sk_buff *skb; + + /* + * Unfortunately, the firmware does not tell to which (queued) frame + * this transmission status report belongs to. + * + * So we have to make risky guesses - with the scarce information + * the firmware provided (-> destination MAC, and phy_control) - + * and hope that we picked the right one... + */ + + spin_lock_irqsave(&queue->lock, flags); + skb_queue_walk(queue, skb) { + struct ar9170_tx_control *txc = (void *) skb->data; + struct ieee80211_hdr *hdr = (void *) txc->frame_data; + u32 r; + + if (mac && compare_ether_addr(ieee80211_get_DA(hdr), mac)) { +#ifdef AR9170_QUEUE_DEBUG + printk(KERN_DEBUG "%s: skip frame => DA %pM != %pM\n", + wiphy_name(ar->hw->wiphy), mac, + ieee80211_get_DA(hdr)); + ar9170_print_txheader(ar, skb); +#endif /* AR9170_QUEUE_DEBUG */ + continue; + } + + r = (le32_to_cpu(txc->phy_control) & AR9170_TX_PHY_MCS_MASK) >> + AR9170_TX_PHY_MCS_SHIFT; + + if ((rate != AR9170_TX_INVALID_RATE) && (r != rate)) { +#ifdef AR9170_QUEUE_DEBUG + printk(KERN_DEBUG "%s: skip frame => rate %d != %d\n", + wiphy_name(ar->hw->wiphy), rate, r); + ar9170_print_txheader(ar, skb); +#endif /* AR9170_QUEUE_DEBUG */ + continue; + } + + __skb_unlink(skb, queue); + spin_unlock_irqrestore(&queue->lock, flags); + return skb; + } + +#ifdef AR9170_QUEUE_DEBUG + printk(KERN_ERR "%s: ESS:[%pM] does not have any " + "outstanding frames in queue.\n", + wiphy_name(ar->hw->wiphy), mac); + __ar9170_dump_txqueue(ar, queue); +#endif /* AR9170_QUEUE_DEBUG */ + spin_unlock_irqrestore(&queue->lock, flags); + + return NULL; +} + +static void ar9170_handle_block_ack(struct ar9170 *ar, u16 count, u16 r) +{ + struct sk_buff *skb; + struct ieee80211_tx_info *txinfo; + + while (count) { + skb = ar9170_get_queued_skb(ar, NULL, &ar->tx_status_ampdu, r); + if (!skb) + break; + + txinfo = IEEE80211_SKB_CB(skb); + ieee80211_tx_info_clear_status(txinfo); + + /* FIXME: maybe more ? */ + txinfo->status.rates[0].count = 1; + + skb_pull(skb, sizeof(struct ar9170_tx_control)); + ieee80211_tx_status_irqsafe(ar->hw, skb); + count--; + } + +#ifdef AR9170_TXAGG_DEBUG + if (count) { + printk(KERN_DEBUG "%s: got %d more failed mpdus, but no more " + "suitable frames left in tx_status queue.\n", + wiphy_name(ar->hw->wiphy), count); + + ar9170_dump_tx_status_ampdu(ar); + } +#endif /* AR9170_TXAGG_DEBUG */ +} + +/* + * This worker tries to keeps an maintain tx_status queues. + * So we can guarantee that incoming tx_status reports are + * actually for a pending frame. + */ + +static void ar9170_tx_janitor(struct work_struct *work) +{ + struct ar9170 *ar = container_of(work, struct ar9170, + tx_janitor.work); + struct sk_buff_head waste; + unsigned int i; + bool resched = false; + + if (unlikely(!IS_STARTED(ar))) + return ; + + skb_queue_head_init(&waste); + + for (i = 0; i < __AR9170_NUM_TXQ; i++) { +#ifdef AR9170_QUEUE_DEBUG + printk(KERN_DEBUG "%s: garbage collector scans queue:%d\n", + wiphy_name(ar->hw->wiphy), i); + ar9170_dump_txqueue(ar, &ar->tx_pending[i]); + ar9170_dump_txqueue(ar, &ar->tx_status[i]); +#endif /* AR9170_QUEUE_DEBUG */ + + ar9170_recycle_expired(ar, &ar->tx_status[i], &waste); + ar9170_recycle_expired(ar, &ar->tx_pending[i], &waste); + skb_queue_purge(&waste); + + if (!skb_queue_empty(&ar->tx_status[i]) || + !skb_queue_empty(&ar->tx_pending[i])) + resched = true; + } + + ar9170_tx_fake_ampdu_status(ar); + + if (!resched) + return; + + ieee80211_queue_delayed_work(ar->hw, + &ar->tx_janitor, + msecs_to_jiffies(AR9170_JANITOR_DELAY)); +} + +void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len) +{ + struct ar9170_cmd_response *cmd = (void *) buf; + + if ((cmd->type & 0xc0) != 0xc0) { + ar->callback_cmd(ar, len, buf); + return; + } + + /* hardware event handlers */ + switch (cmd->type) { + case 0xc1: { + /* + * TX status notification: + * bytes: 0c c1 XX YY M1 M2 M3 M4 M5 M6 R4 R3 R2 R1 S2 S1 + * + * XX always 81 + * YY always 00 + * M1-M6 is the MAC address + * R1-R4 is the transmit rate + * S1-S2 is the transmit status + */ + + struct sk_buff *skb; + u32 phy = le32_to_cpu(cmd->tx_status.rate); + u32 q = (phy & AR9170_TX_PHY_QOS_MASK) >> + AR9170_TX_PHY_QOS_SHIFT; +#ifdef AR9170_QUEUE_DEBUG + printk(KERN_DEBUG "%s: recv tx_status for %pM, p:%08x, q:%d\n", + wiphy_name(ar->hw->wiphy), cmd->tx_status.dst, phy, q); +#endif /* AR9170_QUEUE_DEBUG */ + + skb = ar9170_get_queued_skb(ar, cmd->tx_status.dst, + &ar->tx_status[q], + AR9170_TX_INVALID_RATE); + if (unlikely(!skb)) + return ; + + ar9170_tx_status(ar, skb, le16_to_cpu(cmd->tx_status.status)); + break; + } + + case 0xc0: + /* + * pre-TBTT event + */ + if (ar->vif && ar->vif->type == NL80211_IFTYPE_AP) + ieee80211_queue_work(ar->hw, &ar->beacon_work); + break; + + case 0xc2: + /* + * (IBSS) beacon send notification + * bytes: 04 c2 XX YY B4 B3 B2 B1 + * + * XX always 80 + * YY always 00 + * B1-B4 "should" be the number of send out beacons. + */ + break; + + case 0xc3: + /* End of Atim Window */ + break; + + case 0xc4: + /* BlockACK bitmap */ + break; + + case 0xc5: + /* BlockACK events */ + ar9170_handle_block_ack(ar, + le16_to_cpu(cmd->ba_fail_cnt.failed), + le16_to_cpu(cmd->ba_fail_cnt.rate)); + ar9170_tx_fake_ampdu_status(ar); + break; + + case 0xc6: + /* Watchdog Interrupt */ + break; + + case 0xc9: + /* retransmission issue / SIFS/EIFS collision ?! */ + break; + + /* firmware debug */ + case 0xca: + printk(KERN_DEBUG "ar9170 FW: %.*s\n", len - 4, (char *)buf + 4); + break; + case 0xcb: + len -= 4; + + switch (len) { + case 1: + printk(KERN_DEBUG "ar9170 FW: u8: %#.2x\n", + *((char *)buf + 4)); + break; + case 2: + printk(KERN_DEBUG "ar9170 FW: u8: %#.4x\n", + le16_to_cpup((__le16 *)((char *)buf + 4))); + break; + case 4: + printk(KERN_DEBUG "ar9170 FW: u8: %#.8x\n", + le32_to_cpup((__le32 *)((char *)buf + 4))); + break; + case 8: + printk(KERN_DEBUG "ar9170 FW: u8: %#.16lx\n", + (unsigned long)le64_to_cpup( + (__le64 *)((char *)buf + 4))); + break; + } + break; + case 0xcc: + print_hex_dump_bytes("ar9170 FW:", DUMP_PREFIX_NONE, + (char *)buf + 4, len - 4); + break; + + default: + printk(KERN_INFO "received unhandled event %x\n", cmd->type); + print_hex_dump_bytes("dump:", DUMP_PREFIX_NONE, buf, len); + break; + } +} + +static void ar9170_rx_reset_rx_mpdu(struct ar9170 *ar) +{ + memset(&ar->rx_mpdu.plcp, 0, sizeof(struct ar9170_rx_head)); + ar->rx_mpdu.has_plcp = false; +} + +int ar9170_nag_limiter(struct ar9170 *ar) +{ + bool print_message; + + /* + * we expect all sorts of errors in promiscuous mode. + * don't bother with it, it's OK! + */ + if (ar->sniffer_enabled) + return false; + + /* + * only go for frequent errors! The hardware tends to + * do some stupid thing once in a while under load, in + * noisy environments or just for fun! + */ + if (time_before(jiffies, ar->bad_hw_nagger) && net_ratelimit()) + print_message = true; + else + print_message = false; + + /* reset threshold for "once in a while" */ + ar->bad_hw_nagger = jiffies + HZ / 4; + return print_message; +} + +static int ar9170_rx_mac_status(struct ar9170 *ar, + struct ar9170_rx_head *head, + struct ar9170_rx_macstatus *mac, + struct ieee80211_rx_status *status) +{ + u8 error, decrypt; + + BUILD_BUG_ON(sizeof(struct ar9170_rx_head) != 12); + BUILD_BUG_ON(sizeof(struct ar9170_rx_macstatus) != 4); + + error = mac->error; + if (error & AR9170_RX_ERROR_MMIC) { + status->flag |= RX_FLAG_MMIC_ERROR; + error &= ~AR9170_RX_ERROR_MMIC; + } + + if (error & AR9170_RX_ERROR_PLCP) { + status->flag |= RX_FLAG_FAILED_PLCP_CRC; + error &= ~AR9170_RX_ERROR_PLCP; + + if (!(ar->filter_state & FIF_PLCPFAIL)) + return -EINVAL; + } + + if (error & AR9170_RX_ERROR_FCS) { + status->flag |= RX_FLAG_FAILED_FCS_CRC; + error &= ~AR9170_RX_ERROR_FCS; + + if (!(ar->filter_state & FIF_FCSFAIL)) + return -EINVAL; + } + + decrypt = ar9170_get_decrypt_type(mac); + if (!(decrypt & AR9170_RX_ENC_SOFTWARE) && + decrypt != AR9170_ENC_ALG_NONE) + status->flag |= RX_FLAG_DECRYPTED; + + /* ignore wrong RA errors */ + error &= ~AR9170_RX_ERROR_WRONG_RA; + + if (error & AR9170_RX_ERROR_DECRYPT) { + error &= ~AR9170_RX_ERROR_DECRYPT; + /* + * Rx decryption is done in place, + * the original data is lost anyway. + */ + + return -EINVAL; + } + + /* drop any other error frames */ + if (unlikely(error)) { + /* TODO: update netdevice's RX dropped/errors statistics */ + + if (ar9170_nag_limiter(ar)) + printk(KERN_DEBUG "%s: received frame with " + "suspicious error code (%#x).\n", + wiphy_name(ar->hw->wiphy), error); + + return -EINVAL; + } + + status->band = ar->channel->band; + status->freq = ar->channel->center_freq; + + switch (mac->status & AR9170_RX_STATUS_MODULATION_MASK) { + case AR9170_RX_STATUS_MODULATION_CCK: + if (mac->status & AR9170_RX_STATUS_SHORT_PREAMBLE) + status->flag |= RX_FLAG_SHORTPRE; + switch (head->plcp[0]) { + case 0x0a: + status->rate_idx = 0; + break; + case 0x14: + status->rate_idx = 1; + break; + case 0x37: + status->rate_idx = 2; + break; + case 0x6e: + status->rate_idx = 3; + break; + default: + if (ar9170_nag_limiter(ar)) + printk(KERN_ERR "%s: invalid plcp cck rate " + "(%x).\n", wiphy_name(ar->hw->wiphy), + head->plcp[0]); + return -EINVAL; + } + break; + + case AR9170_RX_STATUS_MODULATION_DUPOFDM: + case AR9170_RX_STATUS_MODULATION_OFDM: + switch (head->plcp[0] & 0xf) { + case 0xb: + status->rate_idx = 0; + break; + case 0xf: + status->rate_idx = 1; + break; + case 0xa: + status->rate_idx = 2; + break; + case 0xe: + status->rate_idx = 3; + break; + case 0x9: + status->rate_idx = 4; + break; + case 0xd: + status->rate_idx = 5; + break; + case 0x8: + status->rate_idx = 6; + break; + case 0xc: + status->rate_idx = 7; + break; + default: + if (ar9170_nag_limiter(ar)) + printk(KERN_ERR "%s: invalid plcp ofdm rate " + "(%x).\n", wiphy_name(ar->hw->wiphy), + head->plcp[0]); + return -EINVAL; + } + if (status->band == IEEE80211_BAND_2GHZ) + status->rate_idx += 4; + break; + + case AR9170_RX_STATUS_MODULATION_HT: + if (head->plcp[3] & 0x80) + status->flag |= RX_FLAG_40MHZ; + if (head->plcp[6] & 0x80) + status->flag |= RX_FLAG_SHORT_GI; + + status->rate_idx = clamp(0, 75, head->plcp[6] & 0x7f); + status->flag |= RX_FLAG_HT; + break; + + default: + if (ar9170_nag_limiter(ar)) + printk(KERN_ERR "%s: invalid modulation\n", + wiphy_name(ar->hw->wiphy)); + return -EINVAL; + } + + return 0; +} + +static void ar9170_rx_phy_status(struct ar9170 *ar, + struct ar9170_rx_phystatus *phy, + struct ieee80211_rx_status *status) +{ + int i; + + BUILD_BUG_ON(sizeof(struct ar9170_rx_phystatus) != 20); + + for (i = 0; i < 3; i++) + if (phy->rssi[i] != 0x80) + status->antenna |= BIT(i); + + /* post-process RSSI */ + for (i = 0; i < 7; i++) + if (phy->rssi[i] & 0x80) + phy->rssi[i] = ((phy->rssi[i] & 0x7f) + 1) & 0x7f; + + /* TODO: we could do something with phy_errors */ + status->signal = ar->noise[0] + phy->rssi_combined; + status->noise = ar->noise[0]; +} + +static struct sk_buff *ar9170_rx_copy_data(u8 *buf, int len) +{ + struct sk_buff *skb; + int reserved = 0; + struct ieee80211_hdr *hdr = (void *) buf; + + if (ieee80211_is_data_qos(hdr->frame_control)) { + u8 *qc = ieee80211_get_qos_ctl(hdr); + reserved += NET_IP_ALIGN; + + if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT) + reserved += NET_IP_ALIGN; + } + + if (ieee80211_has_a4(hdr->frame_control)) + reserved += NET_IP_ALIGN; + + reserved = 32 + (reserved & NET_IP_ALIGN); + + skb = dev_alloc_skb(len + reserved); + if (likely(skb)) { + skb_reserve(skb, reserved); + memcpy(skb_put(skb, len), buf, len); + } + + return skb; +} + +/* + * If the frame alignment is right (or the kernel has + * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS), and there + * is only a single MPDU in the USB frame, then we could + * submit to mac80211 the SKB directly. However, since + * there may be multiple packets in one SKB in stream + * mode, and we need to observe the proper ordering, + * this is non-trivial. + */ + +static void ar9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len) +{ + struct ar9170_rx_head *head; + struct ar9170_rx_macstatus *mac; + struct ar9170_rx_phystatus *phy = NULL; + struct ieee80211_rx_status status; + struct sk_buff *skb; + int mpdu_len; + + if (unlikely(!IS_STARTED(ar) || len < (sizeof(*mac)))) + return ; + + /* Received MPDU */ + mpdu_len = len - sizeof(*mac); + + mac = (void *)(buf + mpdu_len); + if (unlikely(mac->error & AR9170_RX_ERROR_FATAL)) { + /* this frame is too damaged and can't be used - drop it */ + + return ; + } + + switch (mac->status & AR9170_RX_STATUS_MPDU_MASK) { + case AR9170_RX_STATUS_MPDU_FIRST: + /* first mpdu packet has the plcp header */ + if (likely(mpdu_len >= sizeof(struct ar9170_rx_head))) { + head = (void *) buf; + memcpy(&ar->rx_mpdu.plcp, (void *) buf, + sizeof(struct ar9170_rx_head)); + + mpdu_len -= sizeof(struct ar9170_rx_head); + buf += sizeof(struct ar9170_rx_head); + ar->rx_mpdu.has_plcp = true; + } else { + if (ar9170_nag_limiter(ar)) + printk(KERN_ERR "%s: plcp info is clipped.\n", + wiphy_name(ar->hw->wiphy)); + return ; + } + break; + + case AR9170_RX_STATUS_MPDU_LAST: + /* last mpdu has a extra tail with phy status information */ + + if (likely(mpdu_len >= sizeof(struct ar9170_rx_phystatus))) { + mpdu_len -= sizeof(struct ar9170_rx_phystatus); + phy = (void *)(buf + mpdu_len); + } else { + if (ar9170_nag_limiter(ar)) + printk(KERN_ERR "%s: frame tail is clipped.\n", + wiphy_name(ar->hw->wiphy)); + return ; + } + + case AR9170_RX_STATUS_MPDU_MIDDLE: + /* middle mpdus are just data */ + if (unlikely(!ar->rx_mpdu.has_plcp)) { + if (!ar9170_nag_limiter(ar)) + return ; + + printk(KERN_ERR "%s: rx stream did not start " + "with a first_mpdu frame tag.\n", + wiphy_name(ar->hw->wiphy)); + + return ; + } + + head = &ar->rx_mpdu.plcp; + break; + + case AR9170_RX_STATUS_MPDU_SINGLE: + /* single mpdu - has plcp (head) and phy status (tail) */ + head = (void *) buf; + + mpdu_len -= sizeof(struct ar9170_rx_head); + mpdu_len -= sizeof(struct ar9170_rx_phystatus); + + buf += sizeof(struct ar9170_rx_head); + phy = (void *)(buf + mpdu_len); + break; + + default: + BUG_ON(1); + break; + } + + if (unlikely(mpdu_len < FCS_LEN)) + return ; + + memset(&status, 0, sizeof(status)); + if (unlikely(ar9170_rx_mac_status(ar, head, mac, &status))) + return ; + + if (phy) + ar9170_rx_phy_status(ar, phy, &status); + + skb = ar9170_rx_copy_data(buf, mpdu_len); + if (likely(skb)) { + memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); + ieee80211_rx_irqsafe(ar->hw, skb); + } +} + +void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb) +{ + unsigned int i, tlen, resplen, wlen = 0, clen = 0; + u8 *tbuf, *respbuf; + + tbuf = skb->data; + tlen = skb->len; + + while (tlen >= 4) { + clen = tbuf[1] << 8 | tbuf[0]; + wlen = ALIGN(clen, 4); + + /* check if this is stream has a valid tag.*/ + if (tbuf[2] != 0 || tbuf[3] != 0x4e) { + /* + * TODO: handle the highly unlikely event that the + * corrupted stream has the TAG at the right position. + */ + + /* check if the frame can be repaired. */ + if (!ar->rx_failover_missing) { + /* this is no "short read". */ + if (ar9170_nag_limiter(ar)) { + printk(KERN_ERR "%s: missing tag!\n", + wiphy_name(ar->hw->wiphy)); + goto err_telluser; + } else + goto err_silent; + } + + if (ar->rx_failover_missing > tlen) { + if (ar9170_nag_limiter(ar)) { + printk(KERN_ERR "%s: possible multi " + "stream corruption!\n", + wiphy_name(ar->hw->wiphy)); + goto err_telluser; + } else + goto err_silent; + } + + memcpy(skb_put(ar->rx_failover, tlen), tbuf, tlen); + ar->rx_failover_missing -= tlen; + + if (ar->rx_failover_missing <= 0) { + /* + * nested ar9170_rx call! + * termination is guranteed, even when the + * combined frame also have a element with + * a bad tag. + */ + + ar->rx_failover_missing = 0; + ar9170_rx(ar, ar->rx_failover); + + skb_reset_tail_pointer(ar->rx_failover); + skb_trim(ar->rx_failover, 0); + } + + return ; + } + + /* check if stream is clipped */ + if (wlen > tlen - 4) { + if (ar->rx_failover_missing) { + /* TODO: handle double stream corruption. */ + if (ar9170_nag_limiter(ar)) { + printk(KERN_ERR "%s: double rx stream " + "corruption!\n", + wiphy_name(ar->hw->wiphy)); + goto err_telluser; + } else + goto err_silent; + } + + /* + * save incomplete data set. + * the firmware will resend the missing bits when + * the rx - descriptor comes round again. + */ + + memcpy(skb_put(ar->rx_failover, tlen), tbuf, tlen); + ar->rx_failover_missing = clen - tlen; + return ; + } + resplen = clen; + respbuf = tbuf + 4; + tbuf += wlen + 4; + tlen -= wlen + 4; + + i = 0; + + /* weird thing, but this is the same in the original driver */ + while (resplen > 2 && i < 12 && + respbuf[0] == 0xff && respbuf[1] == 0xff) { + i += 2; + resplen -= 2; + respbuf += 2; + } + + if (resplen < 4) + continue; + + /* found the 6 * 0xffff marker? */ + if (i == 12) + ar9170_handle_command_response(ar, respbuf, resplen); + else + ar9170_handle_mpdu(ar, respbuf, clen); + } + + if (tlen) { + if (net_ratelimit()) + printk(KERN_ERR "%s: %d bytes of unprocessed " + "data left in rx stream!\n", + wiphy_name(ar->hw->wiphy), tlen); + + goto err_telluser; + } + + return ; + +err_telluser: + printk(KERN_ERR "%s: damaged RX stream data [want:%d, " + "data:%d, rx:%d, pending:%d ]\n", + wiphy_name(ar->hw->wiphy), clen, wlen, tlen, + ar->rx_failover_missing); + + if (ar->rx_failover_missing) + print_hex_dump_bytes("rxbuf:", DUMP_PREFIX_OFFSET, + ar->rx_failover->data, + ar->rx_failover->len); + + print_hex_dump_bytes("stream:", DUMP_PREFIX_OFFSET, + skb->data, skb->len); + + printk(KERN_ERR "%s: please check your hardware and cables, if " + "you see this message frequently.\n", + wiphy_name(ar->hw->wiphy)); + +err_silent: + if (ar->rx_failover_missing) { + skb_reset_tail_pointer(ar->rx_failover); + skb_trim(ar->rx_failover, 0); + ar->rx_failover_missing = 0; + } +} + +#define AR9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop) \ +do { \ + queue.aifs = ai_fs; \ + queue.cw_min = cwmin; \ + queue.cw_max = cwmax; \ + queue.txop = _txop; \ +} while (0) + +static int ar9170_op_start(struct ieee80211_hw *hw) +{ + struct ar9170 *ar = hw->priv; + int err, i; + + mutex_lock(&ar->mutex); + + /* reinitialize queues statistics */ + memset(&ar->tx_stats, 0, sizeof(ar->tx_stats)); + for (i = 0; i < __AR9170_NUM_TXQ; i++) + ar->tx_stats[i].limit = AR9170_TXQ_DEPTH; + + /* reset QoS defaults */ + AR9170_FILL_QUEUE(ar->edcf[0], 3, 15, 1023, 0); /* BEST EFFORT*/ + AR9170_FILL_QUEUE(ar->edcf[1], 7, 15, 1023, 0); /* BACKGROUND */ + AR9170_FILL_QUEUE(ar->edcf[2], 2, 7, 15, 94); /* VIDEO */ + AR9170_FILL_QUEUE(ar->edcf[3], 2, 3, 7, 47); /* VOICE */ + AR9170_FILL_QUEUE(ar->edcf[4], 2, 3, 7, 0); /* SPECIAL */ + + /* set sane AMPDU defaults */ + ar->global_ampdu_density = 6; + ar->global_ampdu_factor = 3; + + atomic_set(&ar->tx_ampdu_pending, 0); + ar->bad_hw_nagger = jiffies; + + err = ar->open(ar); + if (err) + goto out; + + err = ar9170_init_mac(ar); + if (err) + goto out; + + err = ar9170_set_qos(ar); + if (err) + goto out; + + err = ar9170_init_phy(ar, IEEE80211_BAND_2GHZ); + if (err) + goto out; + + err = ar9170_init_rf(ar); + if (err) + goto out; + + /* start DMA */ + err = ar9170_write_reg(ar, 0x1c3d30, 0x100); + if (err) + goto out; + + ar->state = AR9170_STARTED; + +out: + mutex_unlock(&ar->mutex); + return err; +} + +static void ar9170_op_stop(struct ieee80211_hw *hw) +{ + struct ar9170 *ar = hw->priv; + unsigned int i; + + if (IS_STARTED(ar)) + ar->state = AR9170_IDLE; + + cancel_delayed_work_sync(&ar->tx_janitor); +#ifdef CONFIG_AR9170_LEDS + cancel_delayed_work_sync(&ar->led_work); +#endif + cancel_work_sync(&ar->beacon_work); + + mutex_lock(&ar->mutex); + + if (IS_ACCEPTING_CMD(ar)) { + ar9170_set_leds_state(ar, 0); + + /* stop DMA */ + ar9170_write_reg(ar, 0x1c3d30, 0); + ar->stop(ar); + } + + for (i = 0; i < __AR9170_NUM_TXQ; i++) { + skb_queue_purge(&ar->tx_pending[i]); + skb_queue_purge(&ar->tx_status[i]); + } + skb_queue_purge(&ar->tx_status_ampdu); + + mutex_unlock(&ar->mutex); +} + +static void ar9170_tx_indicate_immba(struct ar9170 *ar, struct sk_buff *skb) +{ + struct ar9170_tx_control *txc = (void *) skb->data; + + txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_IMM_AMPDU); +} + +static void ar9170_tx_copy_phy(struct ar9170 *ar, struct sk_buff *dst, + struct sk_buff *src) +{ + struct ar9170_tx_control *dst_txc, *src_txc; + struct ieee80211_tx_info *dst_info, *src_info; + struct ar9170_tx_info *dst_arinfo, *src_arinfo; + + src_txc = (void *) src->data; + src_info = IEEE80211_SKB_CB(src); + src_arinfo = (void *) src_info->rate_driver_data; + + dst_txc = (void *) dst->data; + dst_info = IEEE80211_SKB_CB(dst); + dst_arinfo = (void *) dst_info->rate_driver_data; + + dst_txc->phy_control = src_txc->phy_control; + + /* same MCS for the whole aggregate */ + memcpy(dst_info->driver_rates, src_info->driver_rates, + sizeof(dst_info->driver_rates)); +} + +static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr; + struct ar9170_tx_control *txc; + struct ieee80211_tx_info *info; + struct ieee80211_tx_rate *txrate; + struct ar9170_tx_info *arinfo; + unsigned int queue = skb_get_queue_mapping(skb); + u16 keytype = 0; + u16 len, icv = 0; + + BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data)); + + hdr = (void *)skb->data; + info = IEEE80211_SKB_CB(skb); + len = skb->len; + + txc = (void *)skb_push(skb, sizeof(*txc)); + + if (info->control.hw_key) { + icv = info->control.hw_key->icv_len; + + switch (info->control.hw_key->alg) { + case ALG_WEP: + keytype = AR9170_TX_MAC_ENCR_RC4; + break; + case ALG_TKIP: + keytype = AR9170_TX_MAC_ENCR_RC4; + break; + case ALG_CCMP: + keytype = AR9170_TX_MAC_ENCR_AES; + break; + default: + WARN_ON(1); + goto err_out; + } + } + + /* Length */ + txc->length = cpu_to_le16(len + icv + 4); + + txc->mac_control = cpu_to_le16(AR9170_TX_MAC_HW_DURATION | + AR9170_TX_MAC_BACKOFF); + txc->mac_control |= cpu_to_le16(ar9170_qos_hwmap[queue] << + AR9170_TX_MAC_QOS_SHIFT); + txc->mac_control |= cpu_to_le16(keytype); + txc->phy_control = cpu_to_le32(0); + + if (info->flags & IEEE80211_TX_CTL_NO_ACK) + txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_NO_ACK); + + txrate = &info->control.rates[0]; + if (txrate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT) + txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS); + else if (txrate->flags & IEEE80211_TX_RC_USE_RTS_CTS) + txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS); + + arinfo = (void *)info->rate_driver_data; + arinfo->timeout = jiffies + msecs_to_jiffies(AR9170_QUEUE_TIMEOUT); + + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) && + (is_valid_ether_addr(ieee80211_get_DA(hdr)))) { + /* + * WARNING: + * Putting the QoS queue bits into an unexplored territory is + * certainly not elegant. + * + * In my defense: This idea provides a reasonable way to + * smuggle valuable information to the tx_status callback. + * Also, the idea behind this bit-abuse came straight from + * the original driver code. + */ + + txc->phy_control |= + cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT); + + if (info->flags & IEEE80211_TX_CTL_AMPDU) { + if (unlikely(!info->control.sta)) + goto err_out; + + txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR); + } else { + txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE); + } + } + + return 0; + +err_out: + skb_pull(skb, sizeof(*txc)); + return -EINVAL; +} + +static void ar9170_tx_prepare_phy(struct ar9170 *ar, struct sk_buff *skb) +{ + struct ar9170_tx_control *txc; + struct ieee80211_tx_info *info; + struct ieee80211_rate *rate = NULL; + struct ieee80211_tx_rate *txrate; + u32 power, chains; + + txc = (void *) skb->data; + info = IEEE80211_SKB_CB(skb); + txrate = &info->control.rates[0]; + + if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD) + txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD); + + if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) + txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_SHORT_PREAMBLE); + + if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ); + /* this works because 40 MHz is 2 and dup is 3 */ + if (txrate->flags & IEEE80211_TX_RC_DUP_DATA) + txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ_DUP); + + if (txrate->flags & IEEE80211_TX_RC_SHORT_GI) + txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_SHORT_GI); + + if (txrate->flags & IEEE80211_TX_RC_MCS) { + u32 r = txrate->idx; + u8 *txpower; + + /* heavy clip control */ + txc->phy_control |= cpu_to_le32((r & 0x7) << 7); + + r <<= AR9170_TX_PHY_MCS_SHIFT; + BUG_ON(r & ~AR9170_TX_PHY_MCS_MASK); + + txc->phy_control |= cpu_to_le32(r & AR9170_TX_PHY_MCS_MASK); + txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_MOD_HT); + + if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) { + if (info->band == IEEE80211_BAND_5GHZ) + txpower = ar->power_5G_ht40; + else + txpower = ar->power_2G_ht40; + } else { + if (info->band == IEEE80211_BAND_5GHZ) + txpower = ar->power_5G_ht20; + else + txpower = ar->power_2G_ht20; + } + + power = txpower[(txrate->idx) & 7]; + } else { + u8 *txpower; + u32 mod; + u32 phyrate; + u8 idx = txrate->idx; + + if (info->band != IEEE80211_BAND_2GHZ) { + idx += 4; + txpower = ar->power_5G_leg; + mod = AR9170_TX_PHY_MOD_OFDM; + } else { + if (idx < 4) { + txpower = ar->power_2G_cck; + mod = AR9170_TX_PHY_MOD_CCK; + } else { + mod = AR9170_TX_PHY_MOD_OFDM; + txpower = ar->power_2G_ofdm; + } + } + + rate = &__ar9170_ratetable[idx]; + + phyrate = rate->hw_value & 0xF; + power = txpower[(rate->hw_value & 0x30) >> 4]; + phyrate <<= AR9170_TX_PHY_MCS_SHIFT; + + txc->phy_control |= cpu_to_le32(mod); + txc->phy_control |= cpu_to_le32(phyrate); + } + + power <<= AR9170_TX_PHY_TX_PWR_SHIFT; + power &= AR9170_TX_PHY_TX_PWR_MASK; + txc->phy_control |= cpu_to_le32(power); + + /* set TX chains */ + if (ar->eeprom.tx_mask == 1) { + chains = AR9170_TX_PHY_TXCHAIN_1; + } else { + chains = AR9170_TX_PHY_TXCHAIN_2; + + /* >= 36M legacy OFDM - use only one chain */ + if (rate && rate->bitrate >= 360) + chains = AR9170_TX_PHY_TXCHAIN_1; + } + txc->phy_control |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_SHIFT); +} + +static bool ar9170_tx_ampdu(struct ar9170 *ar) +{ + struct sk_buff_head agg; + struct ar9170_sta_tid *tid_info = NULL, *tmp; + struct sk_buff *skb, *first = NULL; + unsigned long flags, f2; + unsigned int i = 0; + u16 seq, queue, tmpssn; + bool run = false; + + skb_queue_head_init(&agg); + + spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags); + if (list_empty(&ar->tx_ampdu_list)) { +#ifdef AR9170_TXAGG_DEBUG + printk(KERN_DEBUG "%s: aggregation list is empty.\n", + wiphy_name(ar->hw->wiphy)); +#endif /* AR9170_TXAGG_DEBUG */ + goto out_unlock; + } + + list_for_each_entry_safe(tid_info, tmp, &ar->tx_ampdu_list, list) { + if (tid_info->state != AR9170_TID_STATE_COMPLETE) { +#ifdef AR9170_TXAGG_DEBUG + printk(KERN_DEBUG "%s: dangling aggregation entry!\n", + wiphy_name(ar->hw->wiphy)); +#endif /* AR9170_TXAGG_DEBUG */ + continue; + } + + if (++i > 64) { +#ifdef AR9170_TXAGG_DEBUG + printk(KERN_DEBUG "%s: enough frames aggregated.\n", + wiphy_name(ar->hw->wiphy)); +#endif /* AR9170_TXAGG_DEBUG */ + break; + } + + queue = TID_TO_WME_AC(tid_info->tid); + + if (skb_queue_len(&ar->tx_pending[queue]) >= + AR9170_NUM_TX_AGG_MAX) { +#ifdef AR9170_TXAGG_DEBUG + printk(KERN_DEBUG "%s: queue %d full.\n", + wiphy_name(ar->hw->wiphy), queue); +#endif /* AR9170_TXAGG_DEBUG */ + continue; + } + + list_del_init(&tid_info->list); + + spin_lock_irqsave(&tid_info->queue.lock, f2); + tmpssn = seq = tid_info->ssn; + first = skb_peek(&tid_info->queue); + + if (likely(first)) + tmpssn = ar9170_get_seq(first); + + if (unlikely(tmpssn != seq)) { +#ifdef AR9170_TXAGG_DEBUG + printk(KERN_DEBUG "%s: ssn mismatch [%d != %d]\n.", + wiphy_name(ar->hw->wiphy), seq, tmpssn); +#endif /* AR9170_TXAGG_DEBUG */ + tid_info->ssn = tmpssn; + } + +#ifdef AR9170_TXAGG_DEBUG + printk(KERN_DEBUG "%s: generate A-MPDU for tid:%d ssn:%d with " + "%d queued frames.\n", wiphy_name(ar->hw->wiphy), + tid_info->tid, tid_info->ssn, + skb_queue_len(&tid_info->queue)); + __ar9170_dump_txqueue(ar, &tid_info->queue); +#endif /* AR9170_TXAGG_DEBUG */ + + while ((skb = skb_peek(&tid_info->queue))) { + if (unlikely(ar9170_get_seq(skb) != seq)) + break; + + __skb_unlink(skb, &tid_info->queue); + tid_info->ssn = seq = GET_NEXT_SEQ(seq); + + if (unlikely(skb_get_queue_mapping(skb) != queue)) { +#ifdef AR9170_TXAGG_DEBUG + printk(KERN_DEBUG "%s: tid:%d(q:%d) queue:%d " + "!match.\n", wiphy_name(ar->hw->wiphy), + tid_info->tid, + TID_TO_WME_AC(tid_info->tid), + skb_get_queue_mapping(skb)); +#endif /* AR9170_TXAGG_DEBUG */ + dev_kfree_skb_any(skb); + continue; + } + + if (unlikely(first == skb)) { + ar9170_tx_prepare_phy(ar, skb); + __skb_queue_tail(&agg, skb); + first = skb; + } else { + ar9170_tx_copy_phy(ar, skb, first); + __skb_queue_tail(&agg, skb); + } + + if (unlikely(skb_queue_len(&agg) == + AR9170_NUM_TX_AGG_MAX)) + break; + } + + if (skb_queue_empty(&tid_info->queue)) + tid_info->active = false; + else + list_add_tail(&tid_info->list, + &ar->tx_ampdu_list); + + spin_unlock_irqrestore(&tid_info->queue.lock, f2); + + if (unlikely(skb_queue_empty(&agg))) { +#ifdef AR9170_TXAGG_DEBUG + printk(KERN_DEBUG "%s: queued empty list!\n", + wiphy_name(ar->hw->wiphy)); +#endif /* AR9170_TXAGG_DEBUG */ + continue; + } + + /* + * tell the FW/HW that this is the last frame, + * that way it will wait for the immediate block ack. + */ + ar9170_tx_indicate_immba(ar, skb_peek_tail(&agg)); + +#ifdef AR9170_TXAGG_DEBUG + printk(KERN_DEBUG "%s: generated A-MPDU looks like this:\n", + wiphy_name(ar->hw->wiphy)); + __ar9170_dump_txqueue(ar, &agg); +#endif /* AR9170_TXAGG_DEBUG */ + + spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags); + + spin_lock_irqsave(&ar->tx_pending[queue].lock, flags); + skb_queue_splice_tail_init(&agg, &ar->tx_pending[queue]); + spin_unlock_irqrestore(&ar->tx_pending[queue].lock, flags); + run = true; + + spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags); + } + +out_unlock: + spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags); + __skb_queue_purge(&agg); + + return run; +} + +static void ar9170_tx(struct ar9170 *ar) +{ + struct sk_buff *skb; + unsigned long flags; + struct ieee80211_tx_info *info; + struct ar9170_tx_info *arinfo; + unsigned int i, frames, frames_failed, remaining_space; + int err; + bool schedule_garbagecollector = false; + + BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data)); + + if (unlikely(!IS_STARTED(ar))) + return ; + + remaining_space = AR9170_TX_MAX_PENDING; + + for (i = 0; i < __AR9170_NUM_TXQ; i++) { + spin_lock_irqsave(&ar->tx_stats_lock, flags); + frames = min(ar->tx_stats[i].limit - ar->tx_stats[i].len, + skb_queue_len(&ar->tx_pending[i])); + + if (remaining_space < frames) { +#ifdef AR9170_QUEUE_DEBUG + printk(KERN_DEBUG "%s: tx quota reached queue:%d, " + "remaining slots:%d, needed:%d\n", + wiphy_name(ar->hw->wiphy), i, remaining_space, + frames); +#endif /* AR9170_QUEUE_DEBUG */ + frames = remaining_space; + } + + ar->tx_stats[i].len += frames; + ar->tx_stats[i].count += frames; + if (ar->tx_stats[i].len >= ar->tx_stats[i].limit) { +#ifdef AR9170_QUEUE_DEBUG + printk(KERN_DEBUG "%s: queue %d full\n", + wiphy_name(ar->hw->wiphy), i); + + printk(KERN_DEBUG "%s: stuck frames: ===> \n", + wiphy_name(ar->hw->wiphy)); + ar9170_dump_txqueue(ar, &ar->tx_pending[i]); + ar9170_dump_txqueue(ar, &ar->tx_status[i]); +#endif /* AR9170_QUEUE_DEBUG */ + +#ifdef AR9170_QUEUE_STOP_DEBUG + printk(KERN_DEBUG "%s: stop queue %d\n", + wiphy_name(ar->hw->wiphy), i); + __ar9170_dump_txstats(ar); +#endif /* AR9170_QUEUE_STOP_DEBUG */ + ieee80211_stop_queue(ar->hw, i); + } + + spin_unlock_irqrestore(&ar->tx_stats_lock, flags); + + if (!frames) + continue; + + frames_failed = 0; + while (frames) { + skb = skb_dequeue(&ar->tx_pending[i]); + if (unlikely(!skb)) { + frames_failed += frames; + frames = 0; + break; + } + + info = IEEE80211_SKB_CB(skb); + arinfo = (void *) info->rate_driver_data; + + /* TODO: cancel stuck frames */ + arinfo->timeout = jiffies + + msecs_to_jiffies(AR9170_TX_TIMEOUT); + + if (info->flags & IEEE80211_TX_CTL_AMPDU) + atomic_inc(&ar->tx_ampdu_pending); + +#ifdef AR9170_QUEUE_DEBUG + printk(KERN_DEBUG "%s: send frame q:%d =>\n", + wiphy_name(ar->hw->wiphy), i); + ar9170_print_txheader(ar, skb); +#endif /* AR9170_QUEUE_DEBUG */ + + err = ar->tx(ar, skb); + if (unlikely(err)) { + if (info->flags & IEEE80211_TX_CTL_AMPDU) + atomic_dec(&ar->tx_ampdu_pending); + + frames_failed++; + dev_kfree_skb_any(skb); + } else { + remaining_space--; + schedule_garbagecollector = true; + } + + frames--; + } + +#ifdef AR9170_QUEUE_DEBUG + printk(KERN_DEBUG "%s: ar9170_tx report for queue %d\n", + wiphy_name(ar->hw->wiphy), i); + + printk(KERN_DEBUG "%s: unprocessed pending frames left:\n", + wiphy_name(ar->hw->wiphy)); + ar9170_dump_txqueue(ar, &ar->tx_pending[i]); +#endif /* AR9170_QUEUE_DEBUG */ + + if (unlikely(frames_failed)) { +#ifdef AR9170_QUEUE_DEBUG + printk(KERN_DEBUG "%s: frames failed %d =>\n", + wiphy_name(ar->hw->wiphy), frames_failed); +#endif /* AR9170_QUEUE_DEBUG */ + + spin_lock_irqsave(&ar->tx_stats_lock, flags); + ar->tx_stats[i].len -= frames_failed; + ar->tx_stats[i].count -= frames_failed; +#ifdef AR9170_QUEUE_STOP_DEBUG + printk(KERN_DEBUG "%s: wake queue %d\n", + wiphy_name(ar->hw->wiphy), i); + __ar9170_dump_txstats(ar); +#endif /* AR9170_QUEUE_STOP_DEBUG */ + ieee80211_wake_queue(ar->hw, i); + spin_unlock_irqrestore(&ar->tx_stats_lock, flags); + } + } + + if (!schedule_garbagecollector) + return; + + ieee80211_queue_delayed_work(ar->hw, + &ar->tx_janitor, + msecs_to_jiffies(AR9170_JANITOR_DELAY)); +} + +static bool ar9170_tx_ampdu_queue(struct ar9170 *ar, struct sk_buff *skb) +{ + struct ieee80211_tx_info *txinfo; + struct ar9170_sta_info *sta_info; + struct ar9170_sta_tid *agg; + struct sk_buff *iter; + unsigned long flags, f2; + unsigned int max; + u16 tid, seq, qseq; + bool run = false, queue = false; + + tid = ar9170_get_tid(skb); + seq = ar9170_get_seq(skb); + txinfo = IEEE80211_SKB_CB(skb); + sta_info = (void *) txinfo->control.sta->drv_priv; + agg = &sta_info->agg[tid]; + max = sta_info->ampdu_max_len; + + spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags); + + if (unlikely(agg->state != AR9170_TID_STATE_COMPLETE)) { +#ifdef AR9170_TXAGG_DEBUG + printk(KERN_DEBUG "%s: BlockACK session not fully initialized " + "for ESS:%pM tid:%d state:%d.\n", + wiphy_name(ar->hw->wiphy), agg->addr, agg->tid, + agg->state); +#endif /* AR9170_TXAGG_DEBUG */ + goto err_unlock; + } + + if (!agg->active) { + agg->active = true; + agg->ssn = seq; + queue = true; + } + + /* check if seq is within the BA window */ + if (unlikely(!BAW_WITHIN(agg->ssn, max, seq))) { +#ifdef AR9170_TXAGG_DEBUG + printk(KERN_DEBUG "%s: frame with tid:%d seq:%d does not " + "fit into BA window (%d - %d)\n", + wiphy_name(ar->hw->wiphy), tid, seq, agg->ssn, + (agg->ssn + max) & 0xfff); +#endif /* AR9170_TXAGG_DEBUG */ + goto err_unlock; + } + + spin_lock_irqsave(&agg->queue.lock, f2); + + skb_queue_reverse_walk(&agg->queue, iter) { + qseq = ar9170_get_seq(iter); + + if (GET_NEXT_SEQ(qseq) == seq) { + __skb_queue_after(&agg->queue, iter, skb); + goto queued; + } + } + + __skb_queue_head(&agg->queue, skb); + +queued: + spin_unlock_irqrestore(&agg->queue.lock, f2); + +#ifdef AR9170_TXAGG_DEBUG + printk(KERN_DEBUG "%s: new aggregate %p queued.\n", + wiphy_name(ar->hw->wiphy), skb); + __ar9170_dump_txqueue(ar, &agg->queue); +#endif /* AR9170_TXAGG_DEBUG */ + + if (skb_queue_len(&agg->queue) >= AR9170_NUM_TX_AGG_MAX) + run = true; + + if (queue) + list_add_tail(&agg->list, &ar->tx_ampdu_list); + + spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags); + return run; + +err_unlock: + spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags); + dev_kfree_skb_irq(skb); + return false; +} + +int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct ar9170 *ar = hw->priv; + struct ieee80211_tx_info *info; + + if (unlikely(!IS_STARTED(ar))) + goto err_free; + + if (unlikely(ar9170_tx_prepare(ar, skb))) + goto err_free; + + info = IEEE80211_SKB_CB(skb); + if (info->flags & IEEE80211_TX_CTL_AMPDU) { + bool run = ar9170_tx_ampdu_queue(ar, skb); + + if (run || !atomic_read(&ar->tx_ampdu_pending)) + ar9170_tx_ampdu(ar); + } else { + unsigned int queue = skb_get_queue_mapping(skb); + + ar9170_tx_prepare_phy(ar, skb); + skb_queue_tail(&ar->tx_pending[queue], skb); + } + + ar9170_tx(ar); + return NETDEV_TX_OK; + +err_free: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +static int ar9170_op_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ar9170 *ar = hw->priv; + struct ath_common *common = &ar->common; + int err = 0; + + mutex_lock(&ar->mutex); + + if (ar->vif) { + err = -EBUSY; + goto unlock; + } + + ar->vif = vif; + memcpy(common->macaddr, vif->addr, ETH_ALEN); + + if (modparam_nohwcrypt || (ar->vif->type != NL80211_IFTYPE_STATION)) { + ar->rx_software_decryption = true; + ar->disable_offload = true; + } + + ar->cur_filter = 0; + err = ar9170_update_frame_filter(ar, AR9170_MAC_REG_FTF_DEFAULTS); + if (err) + goto unlock; + + err = ar9170_set_operating_mode(ar); + +unlock: + mutex_unlock(&ar->mutex); + return err; +} + +static void ar9170_op_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ar9170 *ar = hw->priv; + + mutex_lock(&ar->mutex); + ar->vif = NULL; + ar9170_update_frame_filter(ar, 0); + ar9170_set_beacon_timers(ar); + dev_kfree_skb(ar->beacon); + ar->beacon = NULL; + ar->sniffer_enabled = false; + ar->rx_software_decryption = false; + ar9170_set_operating_mode(ar); + mutex_unlock(&ar->mutex); +} + +static int ar9170_op_config(struct ieee80211_hw *hw, u32 changed) +{ + struct ar9170 *ar = hw->priv; + int err = 0; + + mutex_lock(&ar->mutex); + + if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) { + /* TODO */ + err = 0; + } + + if (changed & IEEE80211_CONF_CHANGE_PS) { + /* TODO */ + err = 0; + } + + if (changed & IEEE80211_CONF_CHANGE_POWER) { + /* TODO */ + err = 0; + } + + if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) { + /* + * is it long_frame_max_tx_count or short_frame_max_tx_count? + */ + + err = ar9170_set_hwretry_limit(ar, + ar->hw->conf.long_frame_max_tx_count); + if (err) + goto out; + } + + if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { + + /* adjust slot time for 5 GHz */ + err = ar9170_set_slot_time(ar); + if (err) + goto out; + + err = ar9170_set_dyn_sifs_ack(ar); + if (err) + goto out; + + err = ar9170_set_channel(ar, hw->conf.channel, + AR9170_RFI_NONE, + nl80211_to_ar9170(hw->conf.channel_type)); + if (err) + goto out; + } + +out: + mutex_unlock(&ar->mutex); + return err; +} + +static u64 ar9170_op_prepare_multicast(struct ieee80211_hw *hw, int mc_count, + struct dev_addr_list *mclist) +{ + u64 mchash; + int i; + + /* always get broadcast frames */ + mchash = 1ULL << (0xff >> 2); + + for (i = 0; i < mc_count; i++) { + if (WARN_ON(!mclist)) + break; + mchash |= 1ULL << (mclist->dmi_addr[5] >> 2); + mclist = mclist->next; + } + + return mchash; +} + +static void ar9170_op_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *new_flags, + u64 multicast) +{ + struct ar9170 *ar = hw->priv; + + if (unlikely(!IS_ACCEPTING_CMD(ar))) + return ; + + mutex_lock(&ar->mutex); + + /* mask supported flags */ + *new_flags &= FIF_ALLMULTI | FIF_CONTROL | FIF_BCN_PRBRESP_PROMISC | + FIF_PROMISC_IN_BSS | FIF_FCSFAIL | FIF_PLCPFAIL; + ar->filter_state = *new_flags; + /* + * We can support more by setting the sniffer bit and + * then checking the error flags, later. + */ + + if (changed_flags & FIF_ALLMULTI && *new_flags & FIF_ALLMULTI) + multicast = ~0ULL; + + if (multicast != ar->cur_mc_hash) + ar9170_update_multicast(ar, multicast); + + if (changed_flags & FIF_CONTROL) { + u32 filter = AR9170_MAC_REG_FTF_PSPOLL | + AR9170_MAC_REG_FTF_RTS | + AR9170_MAC_REG_FTF_CTS | + AR9170_MAC_REG_FTF_ACK | + AR9170_MAC_REG_FTF_CFE | + AR9170_MAC_REG_FTF_CFE_ACK; + + if (*new_flags & FIF_CONTROL) + filter |= ar->cur_filter; + else + filter &= (~ar->cur_filter); + + ar9170_update_frame_filter(ar, filter); + } + + if (changed_flags & FIF_PROMISC_IN_BSS) { + ar->sniffer_enabled = ((*new_flags) & FIF_PROMISC_IN_BSS) != 0; + ar9170_set_operating_mode(ar); + } + + mutex_unlock(&ar->mutex); +} + + +static void ar9170_op_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changed) +{ + struct ar9170 *ar = hw->priv; + struct ath_common *common = &ar->common; + int err = 0; + + mutex_lock(&ar->mutex); + + if (changed & BSS_CHANGED_BSSID) { + memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN); + err = ar9170_set_operating_mode(ar); + if (err) + goto out; + } + + if (changed & BSS_CHANGED_BEACON_ENABLED) + ar->enable_beacon = bss_conf->enable_beacon; + + if (changed & BSS_CHANGED_BEACON) { + err = ar9170_update_beacon(ar); + if (err) + goto out; + } + + if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON | + BSS_CHANGED_BEACON_INT)) { + err = ar9170_set_beacon_timers(ar); + if (err) + goto out; + } + + if (changed & BSS_CHANGED_ASSOC) { +#ifndef CONFIG_AR9170_LEDS + /* enable assoc LED. */ + err = ar9170_set_leds_state(ar, bss_conf->assoc ? 2 : 0); +#endif /* CONFIG_AR9170_LEDS */ + } + + if (changed & BSS_CHANGED_HT) { + /* TODO */ + err = 0; + } + + if (changed & BSS_CHANGED_ERP_SLOT) { + err = ar9170_set_slot_time(ar); + if (err) + goto out; + } + + if (changed & BSS_CHANGED_BASIC_RATES) { + err = ar9170_set_basic_rates(ar); + if (err) + goto out; + } + +out: + mutex_unlock(&ar->mutex); +} + +static u64 ar9170_op_get_tsf(struct ieee80211_hw *hw) +{ + struct ar9170 *ar = hw->priv; + int err; + u64 tsf; +#define NR 3 + static const u32 addr[NR] = { AR9170_MAC_REG_TSF_H, + AR9170_MAC_REG_TSF_L, + AR9170_MAC_REG_TSF_H }; + u32 val[NR]; + int loops = 0; + + mutex_lock(&ar->mutex); + + while (loops++ < 10) { + err = ar9170_read_mreg(ar, NR, addr, val); + if (err || val[0] == val[2]) + break; + } + + mutex_unlock(&ar->mutex); + + if (WARN_ON(err)) + return 0; + tsf = val[0]; + tsf = (tsf << 32) | val[1]; + return tsf; +#undef NR +} + +static int ar9170_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct ar9170 *ar = hw->priv; + int err = 0, i; + u8 ktype; + + if ((!ar->vif) || (ar->disable_offload)) + return -EOPNOTSUPP; + + switch (key->alg) { + case ALG_WEP: + if (key->keylen == WLAN_KEY_LEN_WEP40) + ktype = AR9170_ENC_ALG_WEP64; + else + ktype = AR9170_ENC_ALG_WEP128; + break; + case ALG_TKIP: + ktype = AR9170_ENC_ALG_TKIP; + break; + case ALG_CCMP: + ktype = AR9170_ENC_ALG_AESCCMP; + break; + default: + return -EOPNOTSUPP; + } + + mutex_lock(&ar->mutex); + if (cmd == SET_KEY) { + if (unlikely(!IS_STARTED(ar))) { + err = -EOPNOTSUPP; + goto out; + } + + /* group keys need all-zeroes address */ + if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + sta = NULL; + + if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { + for (i = 0; i < 64; i++) + if (!(ar->usedkeys & BIT(i))) + break; + if (i == 64) { + ar->rx_software_decryption = true; + ar9170_set_operating_mode(ar); + err = -ENOSPC; + goto out; + } + } else { + i = 64 + key->keyidx; + } + + key->hw_key_idx = i; + + err = ar9170_upload_key(ar, i, sta ? sta->addr : NULL, ktype, 0, + key->key, min_t(u8, 16, key->keylen)); + if (err) + goto out; + + if (key->alg == ALG_TKIP) { + err = ar9170_upload_key(ar, i, sta ? sta->addr : NULL, + ktype, 1, key->key + 16, 16); + if (err) + goto out; + + /* + * hardware is not capable generating the MMIC + * for fragmented frames! + */ + key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; + } + + if (i < 64) + ar->usedkeys |= BIT(i); + + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + } else { + if (unlikely(!IS_STARTED(ar))) { + /* The device is gone... together with the key ;-) */ + err = 0; + goto out; + } + + err = ar9170_disable_key(ar, key->hw_key_idx); + if (err) + goto out; + + if (key->hw_key_idx < 64) { + ar->usedkeys &= ~BIT(key->hw_key_idx); + } else { + err = ar9170_upload_key(ar, key->hw_key_idx, NULL, + AR9170_ENC_ALG_NONE, 0, + NULL, 0); + if (err) + goto out; + + if (key->alg == ALG_TKIP) { + err = ar9170_upload_key(ar, key->hw_key_idx, + NULL, + AR9170_ENC_ALG_NONE, 1, + NULL, 0); + if (err) + goto out; + } + + } + } + + ar9170_regwrite_begin(ar); + ar9170_regwrite(AR9170_MAC_REG_ROLL_CALL_TBL_L, ar->usedkeys); + ar9170_regwrite(AR9170_MAC_REG_ROLL_CALL_TBL_H, ar->usedkeys >> 32); + ar9170_regwrite_finish(); + err = ar9170_regwrite_result(); + +out: + mutex_unlock(&ar->mutex); + + return err; +} + +static int ar9170_sta_add(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct ar9170 *ar = hw->priv; + struct ar9170_sta_info *sta_info = (void *) sta->drv_priv; + unsigned int i; + + memset(sta_info, 0, sizeof(*sta_info)); + + if (!sta->ht_cap.ht_supported) + return 0; + + if (sta->ht_cap.ampdu_density > ar->global_ampdu_density) + ar->global_ampdu_density = sta->ht_cap.ampdu_density; + + if (sta->ht_cap.ampdu_factor < ar->global_ampdu_factor) + ar->global_ampdu_factor = sta->ht_cap.ampdu_factor; + + for (i = 0; i < AR9170_NUM_TID; i++) { + sta_info->agg[i].state = AR9170_TID_STATE_SHUTDOWN; + sta_info->agg[i].active = false; + sta_info->agg[i].ssn = 0; + sta_info->agg[i].tid = i; + INIT_LIST_HEAD(&sta_info->agg[i].list); + skb_queue_head_init(&sta_info->agg[i].queue); + } + + sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor); + + return 0; +} + +static int ar9170_sta_remove(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct ar9170_sta_info *sta_info = (void *) sta->drv_priv; + unsigned int i; + + if (!sta->ht_cap.ht_supported) + return 0; + + for (i = 0; i < AR9170_NUM_TID; i++) { + sta_info->agg[i].state = AR9170_TID_STATE_INVALID; + skb_queue_purge(&sta_info->agg[i].queue); + } + + return 0; +} + +static int ar9170_get_stats(struct ieee80211_hw *hw, + struct ieee80211_low_level_stats *stats) +{ + struct ar9170 *ar = hw->priv; + u32 val; + int err; + + mutex_lock(&ar->mutex); + err = ar9170_read_reg(ar, AR9170_MAC_REG_TX_RETRY, &val); + ar->stats.dot11ACKFailureCount += val; + + memcpy(stats, &ar->stats, sizeof(*stats)); + mutex_unlock(&ar->mutex); + + return 0; +} + +static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue, + const struct ieee80211_tx_queue_params *param) +{ + struct ar9170 *ar = hw->priv; + int ret; + + mutex_lock(&ar->mutex); + if (queue < __AR9170_NUM_TXQ) { + memcpy(&ar->edcf[ar9170_qos_hwmap[queue]], + param, sizeof(*param)); + + ret = ar9170_set_qos(ar); + } else { + ret = -EINVAL; + } + + mutex_unlock(&ar->mutex); + return ret; +} + +static int ar9170_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, u16 *ssn) +{ + struct ar9170 *ar = hw->priv; + struct ar9170_sta_info *sta_info = (void *) sta->drv_priv; + struct ar9170_sta_tid *tid_info = &sta_info->agg[tid]; + unsigned long flags; + + if (!modparam_ht) + return -EOPNOTSUPP; + + switch (action) { + case IEEE80211_AMPDU_TX_START: + spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags); + if (tid_info->state != AR9170_TID_STATE_SHUTDOWN || + !list_empty(&tid_info->list)) { + spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags); +#ifdef AR9170_TXAGG_DEBUG + printk(KERN_INFO "%s: A-MPDU [ESS:[%pM] tid:[%d]] " + "is in a very bad state!\n", + wiphy_name(hw->wiphy), sta->addr, tid); +#endif /* AR9170_TXAGG_DEBUG */ + return -EBUSY; + } + + *ssn = tid_info->ssn; + tid_info->state = AR9170_TID_STATE_PROGRESS; + tid_info->active = false; + spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags); + ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); + break; + + case IEEE80211_AMPDU_TX_STOP: + spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags); + tid_info->state = AR9170_TID_STATE_SHUTDOWN; + list_del_init(&tid_info->list); + tid_info->active = false; + skb_queue_purge(&tid_info->queue); + spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags); + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + break; + + case IEEE80211_AMPDU_TX_OPERATIONAL: +#ifdef AR9170_TXAGG_DEBUG + printk(KERN_INFO "%s: A-MPDU for %pM [tid:%d] Operational.\n", + wiphy_name(hw->wiphy), sta->addr, tid); +#endif /* AR9170_TXAGG_DEBUG */ + spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags); + sta_info->agg[tid].state = AR9170_TID_STATE_COMPLETE; + spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags); + break; + + case IEEE80211_AMPDU_RX_START: + case IEEE80211_AMPDU_RX_STOP: + /* Handled by firmware */ + break; + + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static const struct ieee80211_ops ar9170_ops = { + .start = ar9170_op_start, + .stop = ar9170_op_stop, + .tx = ar9170_op_tx, + .add_interface = ar9170_op_add_interface, + .remove_interface = ar9170_op_remove_interface, + .config = ar9170_op_config, + .prepare_multicast = ar9170_op_prepare_multicast, + .configure_filter = ar9170_op_configure_filter, + .conf_tx = ar9170_conf_tx, + .bss_info_changed = ar9170_op_bss_info_changed, + .get_tsf = ar9170_op_get_tsf, + .set_key = ar9170_set_key, + .sta_add = ar9170_sta_add, + .sta_remove = ar9170_sta_remove, + .get_stats = ar9170_get_stats, + .ampdu_action = ar9170_ampdu_action, +}; + +void *ar9170_alloc(size_t priv_size) +{ + struct ieee80211_hw *hw; + struct ar9170 *ar; + struct sk_buff *skb; + int i; + + /* + * this buffer is used for rx stream reconstruction. + * Under heavy load this device (or the transport layer?) + * tends to split the streams into separate rx descriptors. + */ + + skb = __dev_alloc_skb(AR9170_MAX_RX_BUFFER_SIZE, GFP_KERNEL); + if (!skb) + goto err_nomem; + + hw = ieee80211_alloc_hw(priv_size, &ar9170_ops); + if (!hw) + goto err_nomem; + + ar = hw->priv; + ar->hw = hw; + ar->rx_failover = skb; + + mutex_init(&ar->mutex); + spin_lock_init(&ar->cmdlock); + spin_lock_init(&ar->tx_stats_lock); + spin_lock_init(&ar->tx_ampdu_list_lock); + skb_queue_head_init(&ar->tx_status_ampdu); + for (i = 0; i < __AR9170_NUM_TXQ; i++) { + skb_queue_head_init(&ar->tx_status[i]); + skb_queue_head_init(&ar->tx_pending[i]); + } + ar9170_rx_reset_rx_mpdu(ar); + INIT_WORK(&ar->beacon_work, ar9170_new_beacon); + INIT_DELAYED_WORK(&ar->tx_janitor, ar9170_tx_janitor); + INIT_LIST_HEAD(&ar->tx_ampdu_list); + + /* all hw supports 2.4 GHz, so set channel to 1 by default */ + ar->channel = &ar9170_2ghz_chantable[0]; + + /* first part of wiphy init */ + ar->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_WDS) | + BIT(NL80211_IFTYPE_ADHOC); + ar->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS | + IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | + IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_NOISE_DBM; + + if (modparam_ht) { + ar->hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; + } else { + ar9170_band_2GHz.ht_cap.ht_supported = false; + ar9170_band_5GHz.ht_cap.ht_supported = false; + } + + ar->hw->queues = __AR9170_NUM_TXQ; + ar->hw->extra_tx_headroom = 8; + ar->hw->sta_data_size = sizeof(struct ar9170_sta_info); + + ar->hw->max_rates = 1; + ar->hw->max_rate_tries = 3; + + for (i = 0; i < ARRAY_SIZE(ar->noise); i++) + ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */ + + return ar; + +err_nomem: + kfree_skb(skb); + return ERR_PTR(-ENOMEM); +} + +static int ar9170_read_eeprom(struct ar9170 *ar) +{ +#define RW 8 /* number of words to read at once */ +#define RB (sizeof(u32) * RW) + struct ath_regulatory *regulatory = &ar->common.regulatory; + u8 *eeprom = (void *)&ar->eeprom; + u8 *addr = ar->eeprom.mac_address; + __le32 offsets[RW]; + unsigned int rx_streams, tx_streams, tx_params = 0; + int i, j, err, bands = 0; + + BUILD_BUG_ON(sizeof(ar->eeprom) & 3); + + BUILD_BUG_ON(RB > AR9170_MAX_CMD_LEN - 4); +#ifndef __CHECKER__ + /* don't want to handle trailing remains */ + BUILD_BUG_ON(sizeof(ar->eeprom) % RB); +#endif + + for (i = 0; i < sizeof(ar->eeprom)/RB; i++) { + for (j = 0; j < RW; j++) + offsets[j] = cpu_to_le32(AR9170_EEPROM_START + + RB * i + 4 * j); + + err = ar->exec_cmd(ar, AR9170_CMD_RREG, + RB, (u8 *) &offsets, + RB, eeprom + RB * i); + if (err) + return err; + } + +#undef RW +#undef RB + + if (ar->eeprom.length == cpu_to_le16(0xFFFF)) + return -ENODATA; + + if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) { + ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &ar9170_band_2GHz; + bands++; + } + if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) { + ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &ar9170_band_5GHz; + bands++; + } + + rx_streams = hweight8(ar->eeprom.rx_mask); + tx_streams = hweight8(ar->eeprom.tx_mask); + + if (rx_streams != tx_streams) + tx_params = IEEE80211_HT_MCS_TX_RX_DIFF; + + if (tx_streams >= 1 && tx_streams <= IEEE80211_HT_MCS_TX_MAX_STREAMS) + tx_params = (tx_streams - 1) << + IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT; + + ar9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params; + ar9170_band_5GHz.ht_cap.mcs.tx_params |= tx_params; + + /* + * I measured this, a bandswitch takes roughly + * 135 ms and a frequency switch about 80. + * + * FIXME: measure these values again once EEPROM settings + * are used, that will influence them! + */ + if (bands == 2) + ar->hw->channel_change_time = 135 * 1000; + else + ar->hw->channel_change_time = 80 * 1000; + + regulatory->current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]); + regulatory->current_rd_ext = le16_to_cpu(ar->eeprom.reg_domain[1]); + + /* second part of wiphy init */ + SET_IEEE80211_PERM_ADDR(ar->hw, addr); + + return bands ? 0 : -EINVAL; +} + +static int ar9170_reg_notifier(struct wiphy *wiphy, + struct regulatory_request *request) +{ + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct ar9170 *ar = hw->priv; + + return ath_reg_notifier_apply(wiphy, request, &ar->common.regulatory); +} + +int ar9170_register(struct ar9170 *ar, struct device *pdev) +{ + struct ath_regulatory *regulatory = &ar->common.regulatory; + int err; + + /* try to read EEPROM, init MAC addr */ + err = ar9170_read_eeprom(ar); + if (err) + goto err_out; + + err = ath_regd_init(regulatory, ar->hw->wiphy, + ar9170_reg_notifier); + if (err) + goto err_out; + + err = ieee80211_register_hw(ar->hw); + if (err) + goto err_out; + + if (!ath_is_world_regd(regulatory)) + regulatory_hint(ar->hw->wiphy, regulatory->alpha2); + + err = ar9170_init_leds(ar); + if (err) + goto err_unreg; + +#ifdef CONFIG_AR9170_LEDS + err = ar9170_register_leds(ar); + if (err) + goto err_unreg; +#endif /* CONFIG_AR9170_LEDS */ + + dev_info(pdev, "Atheros AR9170 is registered as '%s'\n", + wiphy_name(ar->hw->wiphy)); + + ar->registered = true; + return 0; + +err_unreg: + ieee80211_unregister_hw(ar->hw); + +err_out: + return err; +} + +void ar9170_unregister(struct ar9170 *ar) +{ + if (ar->registered) { +#ifdef CONFIG_AR9170_LEDS + ar9170_unregister_leds(ar); +#endif /* CONFIG_AR9170_LEDS */ + + ieee80211_unregister_hw(ar->hw); + } + + kfree_skb(ar->rx_failover); + mutex_destroy(&ar->mutex); +} diff --git a/drivers/net/wireless/ath/ar9170/phy.c b/drivers/net/wireless/ath/ar9170/phy.c new file mode 100644 index 0000000..45a415e --- /dev/null +++ b/drivers/net/wireless/ath/ar9170/phy.c @@ -0,0 +1,1721 @@ +/* + * Atheros AR9170 driver + * + * PHY and RF code + * + * Copyright 2008, Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, see + * http://www.gnu.org/licenses/. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * Copyright (c) 2007-2008 Atheros Communications, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include "ar9170.h" +#include "cmd.h" + +static int ar9170_init_power_cal(struct ar9170 *ar) +{ + ar9170_regwrite_begin(ar); + + ar9170_regwrite(0x1bc000 + 0x993c, 0x7f); + ar9170_regwrite(0x1bc000 + 0x9934, 0x3f3f3f3f); + ar9170_regwrite(0x1bc000 + 0x9938, 0x3f3f3f3f); + ar9170_regwrite(0x1bc000 + 0xa234, 0x3f3f3f3f); + ar9170_regwrite(0x1bc000 + 0xa238, 0x3f3f3f3f); + ar9170_regwrite(0x1bc000 + 0xa38c, 0x3f3f3f3f); + ar9170_regwrite(0x1bc000 + 0xa390, 0x3f3f3f3f); + ar9170_regwrite(0x1bc000 + 0xa3cc, 0x3f3f3f3f); + ar9170_regwrite(0x1bc000 + 0xa3d0, 0x3f3f3f3f); + ar9170_regwrite(0x1bc000 + 0xa3d4, 0x3f3f3f3f); + + ar9170_regwrite_finish(); + return ar9170_regwrite_result(); +} + +struct ar9170_phy_init { + u32 reg, _5ghz_20, _5ghz_40, _2ghz_40, _2ghz_20; +}; + +static struct ar9170_phy_init ar5416_phy_init[] = { + { 0x1c5800, 0x00000007, 0x00000007, 0x00000007, 0x00000007, }, + { 0x1c5804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, }, + { 0x1c5808, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c580c, 0xad848e19, 0xad848e19, 0xad848e19, 0xad848e19, }, + { 0x1c5810, 0x7d14e000, 0x7d14e000, 0x7d14e000, 0x7d14e000, }, + { 0x1c5814, 0x9c0a9f6b, 0x9c0a9f6b, 0x9c0a9f6b, 0x9c0a9f6b, }, + { 0x1c5818, 0x00000090, 0x00000090, 0x00000090, 0x00000090, }, + { 0x1c581c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, }, + { 0x1c5824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, }, + { 0x1c5828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, }, + { 0x1c582c, 0x0000a000, 0x0000a000, 0x0000a000, 0x0000a000, }, + { 0x1c5830, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, }, + { 0x1c5838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, }, + { 0x1c583c, 0x00200400, 0x00200400, 0x00200400, 0x00200400, }, + { 0x1c5840, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e, }, + { 0x1c5844, 0x1372161e, 0x13721c1e, 0x13721c24, 0x137216a4, }, + { 0x1c5848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, }, + { 0x1c584c, 0x1284233c, 0x1284233c, 0x1284233c, 0x1284233c, }, + { 0x1c5850, 0x6c48b4e4, 0x6c48b4e4, 0x6c48b0e4, 0x6c48b0e4, }, + { 0x1c5854, 0x00000859, 0x00000859, 0x00000859, 0x00000859, }, + { 0x1c5858, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, }, + { 0x1c585c, 0x31395c5e, 0x31395c5e, 0x31395c5e, 0x31395c5e, }, + { 0x1c5860, 0x0004dd10, 0x0004dd10, 0x0004dd20, 0x0004dd20, }, + { 0x1c5868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, }, + { 0x1c586c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, }, + { 0x1c5900, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5904, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5908, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c590c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, }, + { 0x1c5918, 0x00000118, 0x00000230, 0x00000268, 0x00000134, }, + { 0x1c591c, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff, }, + { 0x1c5920, 0x0510081c, 0x0510081c, 0x0510001c, 0x0510001c, }, + { 0x1c5924, 0xd0058a15, 0xd0058a15, 0xd0058a15, 0xd0058a15, }, + { 0x1c5928, 0x00000001, 0x00000001, 0x00000001, 0x00000001, }, + { 0x1c592c, 0x00000004, 0x00000004, 0x00000004, 0x00000004, }, + { 0x1c5934, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, }, + { 0x1c5938, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, }, + { 0x1c593c, 0x0000007f, 0x0000007f, 0x0000007f, 0x0000007f, }, + { 0x1c5944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, }, + { 0x1c5948, 0x9280b212, 0x9280b212, 0x9280b212, 0x9280b212, }, + { 0x1c594c, 0x00020028, 0x00020028, 0x00020028, 0x00020028, }, + { 0x1c5954, 0x5d50e188, 0x5d50e188, 0x5d50e188, 0x5d50e188, }, + { 0x1c5958, 0x00081fff, 0x00081fff, 0x00081fff, 0x00081fff, }, + { 0x1c5960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, }, + { 0x1c5964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, }, + { 0x1c5970, 0x190fb515, 0x190fb515, 0x190fb515, 0x190fb515, }, + { 0x1c5974, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5978, 0x00000001, 0x00000001, 0x00000001, 0x00000001, }, + { 0x1c597c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5980, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5984, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5988, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c598c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5990, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5994, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5998, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c599c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c59a0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c59a4, 0x00000007, 0x00000007, 0x00000007, 0x00000007, }, + { 0x1c59a8, 0x001fff00, 0x001fff00, 0x001fff00, 0x001fff00, }, + { 0x1c59ac, 0x006f00c4, 0x006f00c4, 0x006f00c4, 0x006f00c4, }, + { 0x1c59b0, 0x03051000, 0x03051000, 0x03051000, 0x03051000, }, + { 0x1c59b4, 0x00000820, 0x00000820, 0x00000820, 0x00000820, }, + { 0x1c59c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, }, + { 0x1c59c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, }, + { 0x1c59c8, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c, }, + { 0x1c59cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, }, + { 0x1c59d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, }, + { 0x1c59d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c59d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c59dc, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c59e0, 0x00000200, 0x00000200, 0x00000200, 0x00000200, }, + { 0x1c59e4, 0x64646464, 0x64646464, 0x64646464, 0x64646464, }, + { 0x1c59e8, 0x3c787878, 0x3c787878, 0x3c787878, 0x3c787878, }, + { 0x1c59ec, 0x000000aa, 0x000000aa, 0x000000aa, 0x000000aa, }, + { 0x1c59f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c59fc, 0x00001042, 0x00001042, 0x00001042, 0x00001042, }, + { 0x1c5a00, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5a04, 0x00000040, 0x00000040, 0x00000040, 0x00000040, }, + { 0x1c5a08, 0x00000080, 0x00000080, 0x00000080, 0x00000080, }, + { 0x1c5a0c, 0x000001a1, 0x000001a1, 0x00000141, 0x00000141, }, + { 0x1c5a10, 0x000001e1, 0x000001e1, 0x00000181, 0x00000181, }, + { 0x1c5a14, 0x00000021, 0x00000021, 0x000001c1, 0x000001c1, }, + { 0x1c5a18, 0x00000061, 0x00000061, 0x00000001, 0x00000001, }, + { 0x1c5a1c, 0x00000168, 0x00000168, 0x00000041, 0x00000041, }, + { 0x1c5a20, 0x000001a8, 0x000001a8, 0x000001a8, 0x000001a8, }, + { 0x1c5a24, 0x000001e8, 0x000001e8, 0x000001e8, 0x000001e8, }, + { 0x1c5a28, 0x00000028, 0x00000028, 0x00000028, 0x00000028, }, + { 0x1c5a2c, 0x00000068, 0x00000068, 0x00000068, 0x00000068, }, + { 0x1c5a30, 0x00000189, 0x00000189, 0x000000a8, 0x000000a8, }, + { 0x1c5a34, 0x000001c9, 0x000001c9, 0x00000169, 0x00000169, }, + { 0x1c5a38, 0x00000009, 0x00000009, 0x000001a9, 0x000001a9, }, + { 0x1c5a3c, 0x00000049, 0x00000049, 0x000001e9, 0x000001e9, }, + { 0x1c5a40, 0x00000089, 0x00000089, 0x00000029, 0x00000029, }, + { 0x1c5a44, 0x00000170, 0x00000170, 0x00000069, 0x00000069, }, + { 0x1c5a48, 0x000001b0, 0x000001b0, 0x00000190, 0x00000190, }, + { 0x1c5a4c, 0x000001f0, 0x000001f0, 0x000001d0, 0x000001d0, }, + { 0x1c5a50, 0x00000030, 0x00000030, 0x00000010, 0x00000010, }, + { 0x1c5a54, 0x00000070, 0x00000070, 0x00000050, 0x00000050, }, + { 0x1c5a58, 0x00000191, 0x00000191, 0x00000090, 0x00000090, }, + { 0x1c5a5c, 0x000001d1, 0x000001d1, 0x00000151, 0x00000151, }, + { 0x1c5a60, 0x00000011, 0x00000011, 0x00000191, 0x00000191, }, + { 0x1c5a64, 0x00000051, 0x00000051, 0x000001d1, 0x000001d1, }, + { 0x1c5a68, 0x00000091, 0x00000091, 0x00000011, 0x00000011, }, + { 0x1c5a6c, 0x000001b8, 0x000001b8, 0x00000051, 0x00000051, }, + { 0x1c5a70, 0x000001f8, 0x000001f8, 0x00000198, 0x00000198, }, + { 0x1c5a74, 0x00000038, 0x00000038, 0x000001d8, 0x000001d8, }, + { 0x1c5a78, 0x00000078, 0x00000078, 0x00000018, 0x00000018, }, + { 0x1c5a7c, 0x00000199, 0x00000199, 0x00000058, 0x00000058, }, + { 0x1c5a80, 0x000001d9, 0x000001d9, 0x00000098, 0x00000098, }, + { 0x1c5a84, 0x00000019, 0x00000019, 0x00000159, 0x00000159, }, + { 0x1c5a88, 0x00000059, 0x00000059, 0x00000199, 0x00000199, }, + { 0x1c5a8c, 0x00000099, 0x00000099, 0x000001d9, 0x000001d9, }, + { 0x1c5a90, 0x000000d9, 0x000000d9, 0x00000019, 0x00000019, }, + { 0x1c5a94, 0x000000f9, 0x000000f9, 0x00000059, 0x00000059, }, + { 0x1c5a98, 0x000000f9, 0x000000f9, 0x00000099, 0x00000099, }, + { 0x1c5a9c, 0x000000f9, 0x000000f9, 0x000000d9, 0x000000d9, }, + { 0x1c5aa0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5aa4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5aa8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5aac, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5ab0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5ab4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5ab8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5abc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5ac0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5ac4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5ac8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5acc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5ad0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5ad4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5ad8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5adc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5ae0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5ae4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5ae8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5aec, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5af0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5af4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5af8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5afc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, }, + { 0x1c5b00, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5b04, 0x00000001, 0x00000001, 0x00000001, 0x00000001, }, + { 0x1c5b08, 0x00000002, 0x00000002, 0x00000002, 0x00000002, }, + { 0x1c5b0c, 0x00000003, 0x00000003, 0x00000003, 0x00000003, }, + { 0x1c5b10, 0x00000004, 0x00000004, 0x00000004, 0x00000004, }, + { 0x1c5b14, 0x00000005, 0x00000005, 0x00000005, 0x00000005, }, + { 0x1c5b18, 0x00000008, 0x00000008, 0x00000008, 0x00000008, }, + { 0x1c5b1c, 0x00000009, 0x00000009, 0x00000009, 0x00000009, }, + { 0x1c5b20, 0x0000000a, 0x0000000a, 0x0000000a, 0x0000000a, }, + { 0x1c5b24, 0x0000000b, 0x0000000b, 0x0000000b, 0x0000000b, }, + { 0x1c5b28, 0x0000000c, 0x0000000c, 0x0000000c, 0x0000000c, }, + { 0x1c5b2c, 0x0000000d, 0x0000000d, 0x0000000d, 0x0000000d, }, + { 0x1c5b30, 0x00000010, 0x00000010, 0x00000010, 0x00000010, }, + { 0x1c5b34, 0x00000011, 0x00000011, 0x00000011, 0x00000011, }, + { 0x1c5b38, 0x00000012, 0x00000012, 0x00000012, 0x00000012, }, + { 0x1c5b3c, 0x00000013, 0x00000013, 0x00000013, 0x00000013, }, + { 0x1c5b40, 0x00000014, 0x00000014, 0x00000014, 0x00000014, }, + { 0x1c5b44, 0x00000015, 0x00000015, 0x00000015, 0x00000015, }, + { 0x1c5b48, 0x00000018, 0x00000018, 0x00000018, 0x00000018, }, + { 0x1c5b4c, 0x00000019, 0x00000019, 0x00000019, 0x00000019, }, + { 0x1c5b50, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a, }, + { 0x1c5b54, 0x0000001b, 0x0000001b, 0x0000001b, 0x0000001b, }, + { 0x1c5b58, 0x0000001c, 0x0000001c, 0x0000001c, 0x0000001c, }, + { 0x1c5b5c, 0x0000001d, 0x0000001d, 0x0000001d, 0x0000001d, }, + { 0x1c5b60, 0x00000020, 0x00000020, 0x00000020, 0x00000020, }, + { 0x1c5b64, 0x00000021, 0x00000021, 0x00000021, 0x00000021, }, + { 0x1c5b68, 0x00000022, 0x00000022, 0x00000022, 0x00000022, }, + { 0x1c5b6c, 0x00000023, 0x00000023, 0x00000023, 0x00000023, }, + { 0x1c5b70, 0x00000024, 0x00000024, 0x00000024, 0x00000024, }, + { 0x1c5b74, 0x00000025, 0x00000025, 0x00000025, 0x00000025, }, + { 0x1c5b78, 0x00000028, 0x00000028, 0x00000028, 0x00000028, }, + { 0x1c5b7c, 0x00000029, 0x00000029, 0x00000029, 0x00000029, }, + { 0x1c5b80, 0x0000002a, 0x0000002a, 0x0000002a, 0x0000002a, }, + { 0x1c5b84, 0x0000002b, 0x0000002b, 0x0000002b, 0x0000002b, }, + { 0x1c5b88, 0x0000002c, 0x0000002c, 0x0000002c, 0x0000002c, }, + { 0x1c5b8c, 0x0000002d, 0x0000002d, 0x0000002d, 0x0000002d, }, + { 0x1c5b90, 0x00000030, 0x00000030, 0x00000030, 0x00000030, }, + { 0x1c5b94, 0x00000031, 0x00000031, 0x00000031, 0x00000031, }, + { 0x1c5b98, 0x00000032, 0x00000032, 0x00000032, 0x00000032, }, + { 0x1c5b9c, 0x00000033, 0x00000033, 0x00000033, 0x00000033, }, + { 0x1c5ba0, 0x00000034, 0x00000034, 0x00000034, 0x00000034, }, + { 0x1c5ba4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5ba8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5bac, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5bb0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5bb4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5bb8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5bbc, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5bc0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5bc4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5bc8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5bcc, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5bd0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5bd4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5bd8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5bdc, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5be0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5be4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5be8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5bec, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5bf0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5bf4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, }, + { 0x1c5bf8, 0x00000010, 0x00000010, 0x00000010, 0x00000010, }, + { 0x1c5bfc, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a, }, + { 0x1c5c00, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5c0c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5c10, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5c14, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5c18, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5c1c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5c20, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5c24, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5c28, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5c2c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5c30, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5c34, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5c38, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5c3c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5cf0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5cf4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5cf8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c5cfc, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c6200, 0x00000008, 0x00000008, 0x0000000e, 0x0000000e, }, + { 0x1c6204, 0x00000440, 0x00000440, 0x00000440, 0x00000440, }, + { 0x1c6208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, }, + { 0x1c620c, 0x012e8160, 0x012e8160, 0x012a8160, 0x012a8160, }, + { 0x1c6210, 0x40806333, 0x40806333, 0x40806333, 0x40806333, }, + { 0x1c6214, 0x00106c10, 0x00106c10, 0x00106c10, 0x00106c10, }, + { 0x1c6218, 0x009c4060, 0x009c4060, 0x009c4060, 0x009c4060, }, + { 0x1c621c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, }, + { 0x1c6220, 0x018830c6, 0x018830c6, 0x018830c6, 0x018830c6, }, + { 0x1c6224, 0x00000400, 0x00000400, 0x00000400, 0x00000400, }, + { 0x1c6228, 0x000009b5, 0x000009b5, 0x000009b5, 0x000009b5, }, + { 0x1c622c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c6230, 0x00000108, 0x00000210, 0x00000210, 0x00000108, }, + { 0x1c6234, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, }, + { 0x1c6238, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, }, + { 0x1c623c, 0x13c889af, 0x13c889af, 0x13c889af, 0x13c889af, }, + { 0x1c6240, 0x38490a20, 0x38490a20, 0x38490a20, 0x38490a20, }, + { 0x1c6244, 0x00007bb6, 0x00007bb6, 0x00007bb6, 0x00007bb6, }, + { 0x1c6248, 0x0fff3ffc, 0x0fff3ffc, 0x0fff3ffc, 0x0fff3ffc, }, + { 0x1c624c, 0x00000001, 0x00000001, 0x00000001, 0x00000001, }, + { 0x1c6250, 0x0000a000, 0x0000a000, 0x0000a000, 0x0000a000, }, + { 0x1c6254, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c6258, 0x0cc75380, 0x0cc75380, 0x0cc75380, 0x0cc75380, }, + { 0x1c625c, 0x0f0f0f01, 0x0f0f0f01, 0x0f0f0f01, 0x0f0f0f01, }, + { 0x1c6260, 0xdfa91f01, 0xdfa91f01, 0xdfa91f01, 0xdfa91f01, }, + { 0x1c6264, 0x00418a11, 0x00418a11, 0x00418a11, 0x00418a11, }, + { 0x1c6268, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c626c, 0x09249126, 0x09249126, 0x09249126, 0x09249126, }, + { 0x1c6274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, }, + { 0x1c6278, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, }, + { 0x1c627c, 0x051701ce, 0x051701ce, 0x051701ce, 0x051701ce, }, + { 0x1c6300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, }, + { 0x1c6304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, }, + { 0x1c6308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, }, + { 0x1c630c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, }, + { 0x1c6310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, }, + { 0x1c6314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, }, + { 0x1c6318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, }, + { 0x1c631c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, }, + { 0x1c6320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, }, + { 0x1c6324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, }, + { 0x1c6328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, }, + { 0x1c632c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c6330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c6334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c6338, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c633c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c6340, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c6344, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c6348, 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff, }, + { 0x1c634c, 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff, }, + { 0x1c6350, 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff, }, + { 0x1c6354, 0x0003ffff, 0x0003ffff, 0x0003ffff, 0x0003ffff, }, + { 0x1c6358, 0x79a8aa1f, 0x79a8aa1f, 0x79a8aa1f, 0x79a8aa1f, }, + { 0x1c6388, 0x08000000, 0x08000000, 0x08000000, 0x08000000, }, + { 0x1c638c, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, }, + { 0x1c6390, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, }, + { 0x1c6394, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, }, + { 0x1c6398, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce, }, + { 0x1c639c, 0x00000007, 0x00000007, 0x00000007, 0x00000007, }, + { 0x1c63a0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c63a4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c63a8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c63ac, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c63b0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c63b4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c63b8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c63bc, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c63c0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c63c4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c63c8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c63cc, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, }, + { 0x1c63d0, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, }, + { 0x1c63d4, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, }, + { 0x1c63d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, + { 0x1c63dc, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, }, + { 0x1c63e0, 0x000000c0, 0x000000c0, 0x000000c0, 0x000000c0, }, + { 0x1c6848, 0x00180a65, 0x00180a65, 0x00180a68, 0x00180a68, }, + { 0x1c6920, 0x0510001c, 0x0510001c, 0x0510001c, 0x0510001c, }, + { 0x1c6960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, }, + { 0x1c720c, 0x012e8160, 0x012e8160, 0x012a8160, 0x012a8160, }, + { 0x1c726c, 0x09249126, 0x09249126, 0x09249126, 0x09249126, }, + { 0x1c7848, 0x00180a65, 0x00180a65, 0x00180a68, 0x00180a68, }, + { 0x1c7920, 0x0510001c, 0x0510001c, 0x0510001c, 0x0510001c, }, + { 0x1c7960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, }, + { 0x1c820c, 0x012e8160, 0x012e8160, 0x012a8160, 0x012a8160, }, + { 0x1c826c, 0x09249126, 0x09249126, 0x09249126, 0x09249126, }, +/* { 0x1c8864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, }, */ + { 0x1c8864, 0x0001c600, 0x0001c600, 0x0001c600, 0x0001c600, }, + { 0x1c895c, 0x004b6a8e, 0x004b6a8e, 0x004b6a8e, 0x004b6a8e, }, + { 0x1c8968, 0x000003ce, 0x000003ce, 0x000003ce, 0x000003ce, }, + { 0x1c89bc, 0x00181400, 0x00181400, 0x00181400, 0x00181400, }, + { 0x1c9270, 0x00820820, 0x00820820, 0x00820820, 0x00820820, }, + { 0x1c935c, 0x066c420f, 0x066c420f, 0x066c420f, 0x066c420f, }, + { 0x1c9360, 0x0f282207, 0x0f282207, 0x0f282207, 0x0f282207, }, + { 0x1c9364, 0x17601685, 0x17601685, 0x17601685, 0x17601685, }, + { 0x1c9368, 0x1f801104, 0x1f801104, 0x1f801104, 0x1f801104, }, + { 0x1c936c, 0x37a00c03, 0x37a00c03, 0x37a00c03, 0x37a00c03, }, + { 0x1c9370, 0x3fc40883, 0x3fc40883, 0x3fc40883, 0x3fc40883, }, + { 0x1c9374, 0x57c00803, 0x57c00803, 0x57c00803, 0x57c00803, }, + { 0x1c9378, 0x5fd80682, 0x5fd80682, 0x5fd80682, 0x5fd80682, }, + { 0x1c937c, 0x7fe00482, 0x7fe00482, 0x7fe00482, 0x7fe00482, }, + { 0x1c9380, 0x7f3c7bba, 0x7f3c7bba, 0x7f3c7bba, 0x7f3c7bba, }, + { 0x1c9384, 0xf3307ff0, 0xf3307ff0, 0xf3307ff0, 0xf3307ff0, } +}; + +/* + * look up a certain register in ar5416_phy_init[] and return the init. value + * for the band and bandwidth given. Return 0 if register address not found. + */ +static u32 ar9170_get_default_phy_reg_val(u32 reg, bool is_2ghz, bool is_40mhz) +{ + unsigned int i; + for (i = 0; i < ARRAY_SIZE(ar5416_phy_init); i++) { + if (ar5416_phy_init[i].reg != reg) + continue; + + if (is_2ghz) { + if (is_40mhz) + return ar5416_phy_init[i]._2ghz_40; + else + return ar5416_phy_init[i]._2ghz_20; + } else { + if (is_40mhz) + return ar5416_phy_init[i]._5ghz_40; + else + return ar5416_phy_init[i]._5ghz_20; + } + } + return 0; +} + +/* + * initialize some phy regs from eeprom values in modal_header[] + * acc. to band and bandwith + */ +static int ar9170_init_phy_from_eeprom(struct ar9170 *ar, + bool is_2ghz, bool is_40mhz) +{ + static const u8 xpd2pd[16] = { + 0x2, 0x2, 0x2, 0x1, 0x2, 0x2, 0x6, 0x2, + 0x2, 0x3, 0x7, 0x2, 0xB, 0x2, 0x2, 0x2 + }; + u32 defval, newval; + /* pointer to the modal_header acc. to band */ + struct ar9170_eeprom_modal *m = &ar->eeprom.modal_header[is_2ghz]; + + ar9170_regwrite_begin(ar); + + /* ant common control (index 0) */ + newval = le32_to_cpu(m->antCtrlCommon); + ar9170_regwrite(0x1c5964, newval); + + /* ant control chain 0 (index 1) */ + newval = le32_to_cpu(m->antCtrlChain[0]); + ar9170_regwrite(0x1c5960, newval); + + /* ant control chain 2 (index 2) */ + newval = le32_to_cpu(m->antCtrlChain[1]); + ar9170_regwrite(0x1c7960, newval); + + /* SwSettle (index 3) */ + if (!is_40mhz) { + defval = ar9170_get_default_phy_reg_val(0x1c5844, + is_2ghz, is_40mhz); + newval = (defval & ~0x3f80) | + ((m->switchSettling & 0x7f) << 7); + ar9170_regwrite(0x1c5844, newval); + } + + /* adcDesired, pdaDesired (index 4) */ + defval = ar9170_get_default_phy_reg_val(0x1c5850, is_2ghz, is_40mhz); + newval = (defval & ~0xffff) | ((u8)m->pgaDesiredSize << 8) | + ((u8)m->adcDesiredSize); + ar9170_regwrite(0x1c5850, newval); + + /* TxEndToXpaOff, TxFrameToXpaOn (index 5) */ + defval = ar9170_get_default_phy_reg_val(0x1c5834, is_2ghz, is_40mhz); + newval = (m->txEndToXpaOff << 24) | (m->txEndToXpaOff << 16) | + (m->txFrameToXpaOn << 8) | m->txFrameToXpaOn; + ar9170_regwrite(0x1c5834, newval); + + /* TxEndToRxOn (index 6) */ + defval = ar9170_get_default_phy_reg_val(0x1c5828, is_2ghz, is_40mhz); + newval = (defval & ~0xff0000) | (m->txEndToRxOn << 16); + ar9170_regwrite(0x1c5828, newval); + + /* thresh62 (index 7) */ + defval = ar9170_get_default_phy_reg_val(0x1c8864, is_2ghz, is_40mhz); + newval = (defval & ~0x7f000) | (m->thresh62 << 12); + ar9170_regwrite(0x1c8864, newval); + + /* tx/rx attenuation chain 0 (index 8) */ + defval = ar9170_get_default_phy_reg_val(0x1c5848, is_2ghz, is_40mhz); + newval = (defval & ~0x3f000) | ((m->txRxAttenCh[0] & 0x3f) << 12); + ar9170_regwrite(0x1c5848, newval); + + /* tx/rx attenuation chain 2 (index 9) */ + defval = ar9170_get_default_phy_reg_val(0x1c7848, is_2ghz, is_40mhz); + newval = (defval & ~0x3f000) | ((m->txRxAttenCh[1] & 0x3f) << 12); + ar9170_regwrite(0x1c7848, newval); + + /* tx/rx margin chain 0 (index 10) */ + defval = ar9170_get_default_phy_reg_val(0x1c620c, is_2ghz, is_40mhz); + newval = (defval & ~0xfc0000) | ((m->rxTxMarginCh[0] & 0x3f) << 18); + /* bsw margin chain 0 for 5GHz only */ + if (!is_2ghz) + newval = (newval & ~0x3c00) | ((m->bswMargin[0] & 0xf) << 10); + ar9170_regwrite(0x1c620c, newval); + + /* tx/rx margin chain 2 (index 11) */ + defval = ar9170_get_default_phy_reg_val(0x1c820c, is_2ghz, is_40mhz); + newval = (defval & ~0xfc0000) | ((m->rxTxMarginCh[1] & 0x3f) << 18); + ar9170_regwrite(0x1c820c, newval); + + /* iqCall, iqCallq chain 0 (index 12) */ + defval = ar9170_get_default_phy_reg_val(0x1c5920, is_2ghz, is_40mhz); + newval = (defval & ~0x7ff) | (((u8)m->iqCalICh[0] & 0x3f) << 5) | + ((u8)m->iqCalQCh[0] & 0x1f); + ar9170_regwrite(0x1c5920, newval); + + /* iqCall, iqCallq chain 2 (index 13) */ + defval = ar9170_get_default_phy_reg_val(0x1c7920, is_2ghz, is_40mhz); + newval = (defval & ~0x7ff) | (((u8)m->iqCalICh[1] & 0x3f) << 5) | + ((u8)m->iqCalQCh[1] & 0x1f); + ar9170_regwrite(0x1c7920, newval); + + /* xpd gain mask (index 14) */ + defval = ar9170_get_default_phy_reg_val(0x1c6258, is_2ghz, is_40mhz); + newval = (defval & ~0xf0000) | (xpd2pd[m->xpdGain & 0xf] << 16); + ar9170_regwrite(0x1c6258, newval); + ar9170_regwrite_finish(); + + return ar9170_regwrite_result(); +} + +int ar9170_init_phy(struct ar9170 *ar, enum ieee80211_band band) +{ + int i, err; + u32 val; + bool is_2ghz = band == IEEE80211_BAND_2GHZ; + bool is_40mhz = conf_is_ht40(&ar->hw->conf); + + ar9170_regwrite_begin(ar); + + for (i = 0; i < ARRAY_SIZE(ar5416_phy_init); i++) { + if (is_40mhz) { + if (is_2ghz) + val = ar5416_phy_init[i]._2ghz_40; + else + val = ar5416_phy_init[i]._5ghz_40; + } else { + if (is_2ghz) + val = ar5416_phy_init[i]._2ghz_20; + else + val = ar5416_phy_init[i]._5ghz_20; + } + + ar9170_regwrite(ar5416_phy_init[i].reg, val); + } + + ar9170_regwrite_finish(); + err = ar9170_regwrite_result(); + if (err) + return err; + + err = ar9170_init_phy_from_eeprom(ar, is_2ghz, is_40mhz); + if (err) + return err; + + err = ar9170_init_power_cal(ar); + if (err) + return err; + + /* XXX: remove magic! */ + if (is_2ghz) + err = ar9170_write_reg(ar, 0x1d4014, 0x5163); + else + err = ar9170_write_reg(ar, 0x1d4014, 0x5143); + + return err; +} + +struct ar9170_rf_init { + u32 reg, _5ghz, _2ghz; +}; + +static struct ar9170_rf_init ar9170_rf_init[] = { + /* bank 0 */ + { 0x1c58b0, 0x1e5795e5, 0x1e5795e5}, + { 0x1c58e0, 0x02008020, 0x02008020}, + /* bank 1 */ + { 0x1c58b0, 0x02108421, 0x02108421}, + { 0x1c58ec, 0x00000008, 0x00000008}, + /* bank 2 */ + { 0x1c58b0, 0x0e73ff17, 0x0e73ff17}, + { 0x1c58e0, 0x00000420, 0x00000420}, + /* bank 3 */ + { 0x1c58f0, 0x01400018, 0x01c00018}, + /* bank 4 */ + { 0x1c58b0, 0x000001a1, 0x000001a1}, + { 0x1c58e8, 0x00000001, 0x00000001}, + /* bank 5 */ + { 0x1c58b0, 0x00000013, 0x00000013}, + { 0x1c58e4, 0x00000002, 0x00000002}, + /* bank 6 */ + { 0x1c58b0, 0x00000000, 0x00000000}, + { 0x1c58b0, 0x00000000, 0x00000000}, + { 0x1c58b0, 0x00000000, 0x00000000}, + { 0x1c58b0, 0x00000000, 0x00000000}, + { 0x1c58b0, 0x00000000, 0x00000000}, + { 0x1c58b0, 0x00004000, 0x00004000}, + { 0x1c58b0, 0x00006c00, 0x00006c00}, + { 0x1c58b0, 0x00002c00, 0x00002c00}, + { 0x1c58b0, 0x00004800, 0x00004800}, + { 0x1c58b0, 0x00004000, 0x00004000}, + { 0x1c58b0, 0x00006000, 0x00006000}, + { 0x1c58b0, 0x00001000, 0x00001000}, + { 0x1c58b0, 0x00004000, 0x00004000}, + { 0x1c58b0, 0x00007c00, 0x00007c00}, + { 0x1c58b0, 0x00007c00, 0x00007c00}, + { 0x1c58b0, 0x00007c00, 0x00007c00}, + { 0x1c58b0, 0x00007c00, 0x00007c00}, + { 0x1c58b0, 0x00007c00, 0x00007c00}, + { 0x1c58b0, 0x00087c00, 0x00087c00}, + { 0x1c58b0, 0x00007c00, 0x00007c00}, + { 0x1c58b0, 0x00005400, 0x00005400}, + { 0x1c58b0, 0x00000c00, 0x00000c00}, + { 0x1c58b0, 0x00001800, 0x00001800}, + { 0x1c58b0, 0x00007c00, 0x00007c00}, + { 0x1c58b0, 0x00006c00, 0x00006c00}, + { 0x1c58b0, 0x00006c00, 0x00006c00}, + { 0x1c58b0, 0x00007c00, 0x00007c00}, + { 0x1c58b0, 0x00002c00, 0x00002c00}, + { 0x1c58b0, 0x00003c00, 0x00003c00}, + { 0x1c58b0, 0x00003800, 0x00003800}, + { 0x1c58b0, 0x00001c00, 0x00001c00}, + { 0x1c58b0, 0x00000800, 0x00000800}, + { 0x1c58b0, 0x00000408, 0x00000408}, + { 0x1c58b0, 0x00004c15, 0x00004c15}, + { 0x1c58b0, 0x00004188, 0x00004188}, + { 0x1c58b0, 0x0000201e, 0x0000201e}, + { 0x1c58b0, 0x00010408, 0x00010408}, + { 0x1c58b0, 0x00000801, 0x00000801}, + { 0x1c58b0, 0x00000c08, 0x00000c08}, + { 0x1c58b0, 0x0000181e, 0x0000181e}, + { 0x1c58b0, 0x00001016, 0x00001016}, + { 0x1c58b0, 0x00002800, 0x00002800}, + { 0x1c58b0, 0x00004010, 0x00004010}, + { 0x1c58b0, 0x0000081c, 0x0000081c}, + { 0x1c58b0, 0x00000115, 0x00000115}, + { 0x1c58b0, 0x00000015, 0x00000015}, + { 0x1c58b0, 0x00000066, 0x00000066}, + { 0x1c58b0, 0x0000001c, 0x0000001c}, + { 0x1c58b0, 0x00000000, 0x00000000}, + { 0x1c58b0, 0x00000004, 0x00000004}, + { 0x1c58b0, 0x00000015, 0x00000015}, + { 0x1c58b0, 0x0000001f, 0x0000001f}, + { 0x1c58e0, 0x00000000, 0x00000400}, + /* bank 7 */ + { 0x1c58b0, 0x000000a0, 0x000000a0}, + { 0x1c58b0, 0x00000000, 0x00000000}, + { 0x1c58b0, 0x00000040, 0x00000040}, + { 0x1c58f0, 0x0000001c, 0x0000001c}, +}; + +static int ar9170_init_rf_banks_0_7(struct ar9170 *ar, bool band5ghz) +{ + int err, i; + + ar9170_regwrite_begin(ar); + + for (i = 0; i < ARRAY_SIZE(ar9170_rf_init); i++) + ar9170_regwrite(ar9170_rf_init[i].reg, + band5ghz ? ar9170_rf_init[i]._5ghz + : ar9170_rf_init[i]._2ghz); + + ar9170_regwrite_finish(); + err = ar9170_regwrite_result(); + if (err) + printk(KERN_ERR "%s: rf init failed\n", + wiphy_name(ar->hw->wiphy)); + return err; +} + +static int ar9170_init_rf_bank4_pwr(struct ar9170 *ar, bool band5ghz, + u32 freq, enum ar9170_bw bw) +{ + int err; + u32 d0, d1, td0, td1, fd0, fd1; + u8 chansel; + u8 refsel0 = 1, refsel1 = 0; + u8 lf_synth = 0; + + switch (bw) { + case AR9170_BW_40_ABOVE: + freq += 10; + break; + case AR9170_BW_40_BELOW: + freq -= 10; + break; + case AR9170_BW_20: + break; + case __AR9170_NUM_BW: + BUG(); + } + + if (band5ghz) { + if (freq % 10) { + chansel = (freq - 4800) / 5; + } else { + chansel = ((freq - 4800) / 10) * 2; + refsel0 = 0; + refsel1 = 1; + } + chansel = byte_rev_table[chansel]; + } else { + if (freq == 2484) { + chansel = 10 + (freq - 2274) / 5; + lf_synth = 1; + } else + chansel = 16 + (freq - 2272) / 5; + chansel *= 4; + chansel = byte_rev_table[chansel]; + } + + d1 = chansel; + d0 = 0x21 | + refsel0 << 3 | + refsel1 << 2 | + lf_synth << 1; + td0 = d0 & 0x1f; + td1 = d1 & 0x1f; + fd0 = td1 << 5 | td0; + + td0 = (d0 >> 5) & 0x7; + td1 = (d1 >> 5) & 0x7; + fd1 = td1 << 5 | td0; + + ar9170_regwrite_begin(ar); + + ar9170_regwrite(0x1c58b0, fd0); + ar9170_regwrite(0x1c58e8, fd1); + + ar9170_regwrite_finish(); + err = ar9170_regwrite_result(); + if (err) + return err; + + msleep(10); + + return 0; +} + +struct ar9170_phy_freq_params { + u8 coeff_exp; + u16 coeff_man; + u8 coeff_exp_shgi; + u16 coeff_man_shgi; +}; + +struct ar9170_phy_freq_entry { + u16 freq; + struct ar9170_phy_freq_params params[__AR9170_NUM_BW]; +}; + +/* NB: must be in sync with channel tables in main! */ +static const struct ar9170_phy_freq_entry ar9170_phy_freq_params[] = { +/* + * freq, + * 20MHz, + * 40MHz (below), + * 40Mhz (above), + */ + { 2412, { + { 3, 21737, 3, 19563, }, + { 3, 21827, 3, 19644, }, + { 3, 21647, 3, 19482, }, + } }, + { 2417, { + { 3, 21692, 3, 19523, }, + { 3, 21782, 3, 19604, }, + { 3, 21602, 3, 19442, }, + } }, + { 2422, { + { 3, 21647, 3, 19482, }, + { 3, 21737, 3, 19563, }, + { 3, 21558, 3, 19402, }, + } }, + { 2427, { + { 3, 21602, 3, 19442, }, + { 3, 21692, 3, 19523, }, + { 3, 21514, 3, 19362, }, + } }, + { 2432, { + { 3, 21558, 3, 19402, }, + { 3, 21647, 3, 19482, }, + { 3, 21470, 3, 19323, }, + } }, + { 2437, { + { 3, 21514, 3, 19362, }, + { 3, 21602, 3, 19442, }, + { 3, 21426, 3, 19283, }, + } }, + { 2442, { + { 3, 21470, 3, 19323, }, + { 3, 21558, 3, 19402, }, + { 3, 21382, 3, 19244, }, + } }, + { 2447, { + { 3, 21426, 3, 19283, }, + { 3, 21514, 3, 19362, }, + { 3, 21339, 3, 19205, }, + } }, + { 2452, { + { 3, 21382, 3, 19244, }, + { 3, 21470, 3, 19323, }, + { 3, 21295, 3, 19166, }, + } }, + { 2457, { + { 3, 21339, 3, 19205, }, + { 3, 21426, 3, 19283, }, + { 3, 21252, 3, 19127, }, + } }, + { 2462, { + { 3, 21295, 3, 19166, }, + { 3, 21382, 3, 19244, }, + { 3, 21209, 3, 19088, }, + } }, + { 2467, { + { 3, 21252, 3, 19127, }, + { 3, 21339, 3, 19205, }, + { 3, 21166, 3, 19050, }, + } }, + { 2472, { + { 3, 21209, 3, 19088, }, + { 3, 21295, 3, 19166, }, + { 3, 21124, 3, 19011, }, + } }, + { 2484, { + { 3, 21107, 3, 18996, }, + { 3, 21192, 3, 19073, }, + { 3, 21022, 3, 18920, }, + } }, + { 4920, { + { 4, 21313, 4, 19181, }, + { 4, 21356, 4, 19220, }, + { 4, 21269, 4, 19142, }, + } }, + { 4940, { + { 4, 21226, 4, 19104, }, + { 4, 21269, 4, 19142, }, + { 4, 21183, 4, 19065, }, + } }, + { 4960, { + { 4, 21141, 4, 19027, }, + { 4, 21183, 4, 19065, }, + { 4, 21098, 4, 18988, }, + } }, + { 4980, { + { 4, 21056, 4, 18950, }, + { 4, 21098, 4, 18988, }, + { 4, 21014, 4, 18912, }, + } }, + { 5040, { + { 4, 20805, 4, 18725, }, + { 4, 20846, 4, 18762, }, + { 4, 20764, 4, 18687, }, + } }, + { 5060, { + { 4, 20723, 4, 18651, }, + { 4, 20764, 4, 18687, }, + { 4, 20682, 4, 18614, }, + } }, + { 5080, { + { 4, 20641, 4, 18577, }, + { 4, 20682, 4, 18614, }, + { 4, 20601, 4, 18541, }, + } }, + { 5180, { + { 4, 20243, 4, 18219, }, + { 4, 20282, 4, 18254, }, + { 4, 20204, 4, 18183, }, + } }, + { 5200, { + { 4, 20165, 4, 18148, }, + { 4, 20204, 4, 18183, }, + { 4, 20126, 4, 18114, }, + } }, + { 5220, { + { 4, 20088, 4, 18079, }, + { 4, 20126, 4, 18114, }, + { 4, 20049, 4, 18044, }, + } }, + { 5240, { + { 4, 20011, 4, 18010, }, + { 4, 20049, 4, 18044, }, + { 4, 19973, 4, 17976, }, + } }, + { 5260, { + { 4, 19935, 4, 17941, }, + { 4, 19973, 4, 17976, }, + { 4, 19897, 4, 17907, }, + } }, + { 5280, { + { 4, 19859, 4, 17873, }, + { 4, 19897, 4, 17907, }, + { 4, 19822, 4, 17840, }, + } }, + { 5300, { + { 4, 19784, 4, 17806, }, + { 4, 19822, 4, 17840, }, + { 4, 19747, 4, 17772, }, + } }, + { 5320, { + { 4, 19710, 4, 17739, }, + { 4, 19747, 4, 17772, }, + { 4, 19673, 4, 17706, }, + } }, + { 5500, { + { 4, 19065, 4, 17159, }, + { 4, 19100, 4, 17190, }, + { 4, 19030, 4, 17127, }, + } }, + { 5520, { + { 4, 18996, 4, 17096, }, + { 4, 19030, 4, 17127, }, + { 4, 18962, 4, 17065, }, + } }, + { 5540, { + { 4, 18927, 4, 17035, }, + { 4, 18962, 4, 17065, }, + { 4, 18893, 4, 17004, }, + } }, + { 5560, { + { 4, 18859, 4, 16973, }, + { 4, 18893, 4, 17004, }, + { 4, 18825, 4, 16943, }, + } }, + { 5580, { + { 4, 18792, 4, 16913, }, + { 4, 18825, 4, 16943, }, + { 4, 18758, 4, 16882, }, + } }, + { 5600, { + { 4, 18725, 4, 16852, }, + { 4, 18758, 4, 16882, }, + { 4, 18691, 4, 16822, }, + } }, + { 5620, { + { 4, 18658, 4, 16792, }, + { 4, 18691, 4, 16822, }, + { 4, 18625, 4, 16762, }, + } }, + { 5640, { + { 4, 18592, 4, 16733, }, + { 4, 18625, 4, 16762, }, + { 4, 18559, 4, 16703, }, + } }, + { 5660, { + { 4, 18526, 4, 16673, }, + { 4, 18559, 4, 16703, }, + { 4, 18493, 4, 16644, }, + } }, + { 5680, { + { 4, 18461, 4, 16615, }, + { 4, 18493, 4, 16644, }, + { 4, 18428, 4, 16586, }, + } }, + { 5700, { + { 4, 18396, 4, 16556, }, + { 4, 18428, 4, 16586, }, + { 4, 18364, 4, 16527, }, + } }, + { 5745, { + { 4, 18252, 4, 16427, }, + { 4, 18284, 4, 16455, }, + { 4, 18220, 4, 16398, }, + } }, + { 5765, { + { 4, 18189, 5, 32740, }, + { 4, 18220, 4, 16398, }, + { 4, 18157, 5, 32683, }, + } }, + { 5785, { + { 4, 18126, 5, 32626, }, + { 4, 18157, 5, 32683, }, + { 4, 18094, 5, 32570, }, + } }, + { 5805, { + { 4, 18063, 5, 32514, }, + { 4, 18094, 5, 32570, }, + { 4, 18032, 5, 32458, }, + } }, + { 5825, { + { 4, 18001, 5, 32402, }, + { 4, 18032, 5, 32458, }, + { 4, 17970, 5, 32347, }, + } }, + { 5170, { + { 4, 20282, 4, 18254, }, + { 4, 20321, 4, 18289, }, + { 4, 20243, 4, 18219, }, + } }, + { 5190, { + { 4, 20204, 4, 18183, }, + { 4, 20243, 4, 18219, }, + { 4, 20165, 4, 18148, }, + } }, + { 5210, { + { 4, 20126, 4, 18114, }, + { 4, 20165, 4, 18148, }, + { 4, 20088, 4, 18079, }, + } }, + { 5230, { + { 4, 20049, 4, 18044, }, + { 4, 20088, 4, 18079, }, + { 4, 20011, 4, 18010, }, + } }, +}; + +static const struct ar9170_phy_freq_params * +ar9170_get_hw_dyn_params(struct ieee80211_channel *channel, + enum ar9170_bw bw) +{ + unsigned int chanidx = 0; + u16 freq = 2412; + + if (channel) { + chanidx = channel->hw_value; + freq = channel->center_freq; + } + + BUG_ON(chanidx >= ARRAY_SIZE(ar9170_phy_freq_params)); + + BUILD_BUG_ON(__AR9170_NUM_BW != 3); + + WARN_ON(ar9170_phy_freq_params[chanidx].freq != freq); + + return &ar9170_phy_freq_params[chanidx].params[bw]; +} + + +int ar9170_init_rf(struct ar9170 *ar) +{ + const struct ar9170_phy_freq_params *freqpar; + __le32 cmd[7]; + int err; + + err = ar9170_init_rf_banks_0_7(ar, false); + if (err) + return err; + + err = ar9170_init_rf_bank4_pwr(ar, false, 2412, AR9170_BW_20); + if (err) + return err; + + freqpar = ar9170_get_hw_dyn_params(NULL, AR9170_BW_20); + + cmd[0] = cpu_to_le32(2412 * 1000); + cmd[1] = cpu_to_le32(0); + cmd[2] = cpu_to_le32(1); + cmd[3] = cpu_to_le32(freqpar->coeff_exp); + cmd[4] = cpu_to_le32(freqpar->coeff_man); + cmd[5] = cpu_to_le32(freqpar->coeff_exp_shgi); + cmd[6] = cpu_to_le32(freqpar->coeff_man_shgi); + + /* RF_INIT echoes the command back to us */ + err = ar->exec_cmd(ar, AR9170_CMD_RF_INIT, + sizeof(cmd), (u8 *)cmd, + sizeof(cmd), (u8 *)cmd); + if (err) + return err; + + msleep(1000); + + return ar9170_echo_test(ar, 0xaabbccdd); +} + +static int ar9170_find_freq_idx(int nfreqs, u8 *freqs, u8 f) +{ + int idx = nfreqs - 2; + + while (idx >= 0) { + if (f >= freqs[idx]) + return idx; + idx--; + } + + return 0; +} + +static s32 ar9170_interpolate_s32(s32 x, s32 x1, s32 y1, s32 x2, s32 y2) +{ + /* nothing to interpolate, it's horizontal */ + if (y2 == y1) + return y1; + + /* check if we hit one of the edges */ + if (x == x1) + return y1; + if (x == x2) + return y2; + + /* x1 == x2 is bad, hopefully == x */ + if (x2 == x1) + return y1; + + return y1 + (((y2 - y1) * (x - x1)) / (x2 - x1)); +} + +static u8 ar9170_interpolate_u8(u8 x, u8 x1, u8 y1, u8 x2, u8 y2) +{ +#define SHIFT 8 + s32 y; + + y = ar9170_interpolate_s32(x << SHIFT, + x1 << SHIFT, y1 << SHIFT, + x2 << SHIFT, y2 << SHIFT); + + /* + * XXX: unwrap this expression + * Isn't it just DIV_ROUND_UP(y, 1<> SHIFT) + ((y & (1<<(SHIFT-1))) >> (SHIFT - 1)); +#undef SHIFT +} + +static u8 ar9170_interpolate_val(u8 x, u8 *x_array, u8 *y_array) +{ + int i; + + for (i = 0; i < 3; i++) + if (x <= x_array[i + 1]) + break; + + return ar9170_interpolate_u8(x, + x_array[i], + y_array[i], + x_array[i + 1], + y_array[i + 1]); +} + +static int ar9170_set_freq_cal_data(struct ar9170 *ar, + struct ieee80211_channel *channel) +{ + u8 *cal_freq_pier; + u8 vpds[2][AR5416_PD_GAIN_ICEPTS]; + u8 pwrs[2][AR5416_PD_GAIN_ICEPTS]; + int chain, idx, i; + u32 phy_data = 0; + u8 f, tmp; + + switch (channel->band) { + case IEEE80211_BAND_2GHZ: + f = channel->center_freq - 2300; + cal_freq_pier = ar->eeprom.cal_freq_pier_2G; + i = AR5416_NUM_2G_CAL_PIERS - 1; + break; + + case IEEE80211_BAND_5GHZ: + f = (channel->center_freq - 4800) / 5; + cal_freq_pier = ar->eeprom.cal_freq_pier_5G; + i = AR5416_NUM_5G_CAL_PIERS - 1; + break; + + default: + return -EINVAL; + break; + } + + for (; i >= 0; i--) { + if (cal_freq_pier[i] != 0xff) + break; + } + if (i < 0) + return -EINVAL; + + idx = ar9170_find_freq_idx(i, cal_freq_pier, f); + + ar9170_regwrite_begin(ar); + + for (chain = 0; chain < AR5416_MAX_CHAINS; chain++) { + for (i = 0; i < AR5416_PD_GAIN_ICEPTS; i++) { + struct ar9170_calibration_data_per_freq *cal_pier_data; + int j; + + switch (channel->band) { + case IEEE80211_BAND_2GHZ: + cal_pier_data = &ar->eeprom. + cal_pier_data_2G[chain][idx]; + break; + + case IEEE80211_BAND_5GHZ: + cal_pier_data = &ar->eeprom. + cal_pier_data_5G[chain][idx]; + break; + + default: + return -EINVAL; + } + + for (j = 0; j < 2; j++) { + vpds[j][i] = ar9170_interpolate_u8(f, + cal_freq_pier[idx], + cal_pier_data->vpd_pdg[j][i], + cal_freq_pier[idx + 1], + cal_pier_data[1].vpd_pdg[j][i]); + + pwrs[j][i] = ar9170_interpolate_u8(f, + cal_freq_pier[idx], + cal_pier_data->pwr_pdg[j][i], + cal_freq_pier[idx + 1], + cal_pier_data[1].pwr_pdg[j][i]) / 2; + } + } + + for (i = 0; i < 76; i++) { + if (i < 25) { + tmp = ar9170_interpolate_val(i, &pwrs[0][0], + &vpds[0][0]); + } else { + tmp = ar9170_interpolate_val(i - 12, + &pwrs[1][0], + &vpds[1][0]); + } + + phy_data |= tmp << ((i & 3) << 3); + if ((i & 3) == 3) { + ar9170_regwrite(0x1c6280 + chain * 0x1000 + + (i & ~3), phy_data); + phy_data = 0; + } + } + + for (i = 19; i < 32; i++) + ar9170_regwrite(0x1c6280 + chain * 0x1000 + (i << 2), + 0x0); + } + + ar9170_regwrite_finish(); + return ar9170_regwrite_result(); +} + +static u8 ar9170_get_max_edge_power(struct ar9170 *ar, + struct ar9170_calctl_edges edges[], + u32 freq) +{ + int i; + u8 rc = AR5416_MAX_RATE_POWER; + u8 f; + if (freq < 3000) + f = freq - 2300; + else + f = (freq - 4800) / 5; + + for (i = 0; i < AR5416_NUM_BAND_EDGES; i++) { + if (edges[i].channel == 0xff) + break; + if (f == edges[i].channel) { + /* exact freq match */ + rc = edges[i].power_flags & ~AR9170_CALCTL_EDGE_FLAGS; + break; + } + if (i > 0 && f < edges[i].channel) { + if (f > edges[i - 1].channel && + edges[i - 1].power_flags & + AR9170_CALCTL_EDGE_FLAGS) { + /* lower channel has the inband flag set */ + rc = edges[i - 1].power_flags & + ~AR9170_CALCTL_EDGE_FLAGS; + } + break; + } + } + + if (i == AR5416_NUM_BAND_EDGES) { + if (f > edges[i - 1].channel && + edges[i - 1].power_flags & AR9170_CALCTL_EDGE_FLAGS) { + /* lower channel has the inband flag set */ + rc = edges[i - 1].power_flags & + ~AR9170_CALCTL_EDGE_FLAGS; + } + } + return rc; +} + +static u8 ar9170_get_heavy_clip(struct ar9170 *ar, + struct ar9170_calctl_edges edges[], + u32 freq, enum ar9170_bw bw) +{ + u8 f; + int i; + u8 rc = 0; + + if (freq < 3000) + f = freq - 2300; + else + f = (freq - 4800) / 5; + + if (bw == AR9170_BW_40_BELOW || bw == AR9170_BW_40_ABOVE) + rc |= 0xf0; + + for (i = 0; i < AR5416_NUM_BAND_EDGES; i++) { + if (edges[i].channel == 0xff) + break; + if (f == edges[i].channel) { + if (!(edges[i].power_flags & AR9170_CALCTL_EDGE_FLAGS)) + rc |= 0x0f; + break; + } + } + + return rc; +} + +/* + * calculate the conformance test limits and the heavy clip parameter + * and apply them to ar->power* (derived from otus hal/hpmain.c, line 3706) + */ +static void ar9170_calc_ctl(struct ar9170 *ar, u32 freq, enum ar9170_bw bw) +{ + u8 ctl_grp; /* CTL group */ + u8 ctl_idx; /* CTL index */ + int i, j; + struct ctl_modes { + u8 ctl_mode; + u8 max_power; + u8 *pwr_cal_data; + int pwr_cal_len; + } *modes; + + /* + * order is relevant in the mode_list_*: we fall back to the + * lower indices if any mode is missed in the EEPROM. + */ + struct ctl_modes mode_list_2ghz[] = { + { CTL_11B, 0, ar->power_2G_cck, 4 }, + { CTL_11G, 0, ar->power_2G_ofdm, 4 }, + { CTL_2GHT20, 0, ar->power_2G_ht20, 8 }, + { CTL_2GHT40, 0, ar->power_2G_ht40, 8 }, + }; + struct ctl_modes mode_list_5ghz[] = { + { CTL_11A, 0, ar->power_5G_leg, 4 }, + { CTL_5GHT20, 0, ar->power_5G_ht20, 8 }, + { CTL_5GHT40, 0, ar->power_5G_ht40, 8 }, + }; + int nr_modes; + +#define EDGES(c, n) (ar->eeprom.ctl_data[c].control_edges[n]) + + ar->phy_heavy_clip = 0; + + /* + * TODO: investigate the differences between OTUS' + * hpreg.c::zfHpGetRegulatoryDomain() and + * ath/regd.c::ath_regd_get_band_ctl() - + * e.g. for FCC3_WORLD the OTUS procedure + * always returns CTL_FCC, while the one in ath/ delivers + * CTL_ETSI for 2GHz and CTL_FCC for 5GHz. + */ + ctl_grp = ath_regd_get_band_ctl(&ar->common.regulatory, + ar->hw->conf.channel->band); + + /* ctl group not found - either invalid band (NO_CTL) or ww roaming */ + if (ctl_grp == NO_CTL || ctl_grp == SD_NO_CTL) + ctl_grp = CTL_FCC; + + if (ctl_grp != CTL_FCC) + /* skip CTL and heavy clip for CTL_MKK and CTL_ETSI */ + return; + + if (ar->hw->conf.channel->band == IEEE80211_BAND_2GHZ) { + modes = mode_list_2ghz; + nr_modes = ARRAY_SIZE(mode_list_2ghz); + } else { + modes = mode_list_5ghz; + nr_modes = ARRAY_SIZE(mode_list_5ghz); + } + + for (i = 0; i < nr_modes; i++) { + u8 c = ctl_grp | modes[i].ctl_mode; + for (ctl_idx = 0; ctl_idx < AR5416_NUM_CTLS; ctl_idx++) + if (c == ar->eeprom.ctl_index[ctl_idx]) + break; + if (ctl_idx < AR5416_NUM_CTLS) { + int f_off = 0; + + /* determine heav clip parameter from + the 11G edges array */ + if (modes[i].ctl_mode == CTL_11G) { + ar->phy_heavy_clip = + ar9170_get_heavy_clip(ar, + EDGES(ctl_idx, 1), + freq, bw); + } + + /* adjust freq for 40MHz */ + if (modes[i].ctl_mode == CTL_2GHT40 || + modes[i].ctl_mode == CTL_5GHT40) { + if (bw == AR9170_BW_40_BELOW) + f_off = -10; + else + f_off = 10; + } + + modes[i].max_power = + ar9170_get_max_edge_power(ar, EDGES(ctl_idx, 1), + freq+f_off); + + /* + * TODO: check if the regulatory max. power is + * controlled by cfg80211 for DFS + * (hpmain applies it to max_power itself for DFS freq) + */ + + } else { + /* + * Workaround in otus driver, hpmain.c, line 3906: + * if no data for 5GHT20 are found, take the + * legacy 5G value. + * We extend this here to fallback from any other *HT or + * 11G, too. + */ + int k = i; + + modes[i].max_power = AR5416_MAX_RATE_POWER; + while (k-- > 0) { + if (modes[k].max_power != + AR5416_MAX_RATE_POWER) { + modes[i].max_power = modes[k].max_power; + break; + } + } + } + + /* apply max power to pwr_cal_data (ar->power_*) */ + for (j = 0; j < modes[i].pwr_cal_len; j++) { + modes[i].pwr_cal_data[j] = min(modes[i].pwr_cal_data[j], + modes[i].max_power); + } + } + + if (ar->phy_heavy_clip & 0xf0) { + ar->power_2G_ht40[0]--; + ar->power_2G_ht40[1]--; + ar->power_2G_ht40[2]--; + } + if (ar->phy_heavy_clip & 0xf) { + ar->power_2G_ht20[0]++; + ar->power_2G_ht20[1]++; + ar->power_2G_ht20[2]++; + } + + +#undef EDGES +} + +static int ar9170_set_power_cal(struct ar9170 *ar, u32 freq, enum ar9170_bw bw) +{ + struct ar9170_calibration_target_power_legacy *ctpl; + struct ar9170_calibration_target_power_ht *ctph; + u8 *ctpres; + int ntargets; + int idx, i, n; + u8 ackpower, ackchains, f; + u8 pwr_freqs[AR5416_MAX_NUM_TGT_PWRS]; + + if (freq < 3000) + f = freq - 2300; + else + f = (freq - 4800)/5; + + /* + * cycle through the various modes + * + * legacy modes first: 5G, 2G CCK, 2G OFDM + */ + for (i = 0; i < 3; i++) { + switch (i) { + case 0: /* 5 GHz legacy */ + ctpl = &ar->eeprom.cal_tgt_pwr_5G[0]; + ntargets = AR5416_NUM_5G_TARGET_PWRS; + ctpres = ar->power_5G_leg; + break; + case 1: /* 2.4 GHz CCK */ + ctpl = &ar->eeprom.cal_tgt_pwr_2G_cck[0]; + ntargets = AR5416_NUM_2G_CCK_TARGET_PWRS; + ctpres = ar->power_2G_cck; + break; + case 2: /* 2.4 GHz OFDM */ + ctpl = &ar->eeprom.cal_tgt_pwr_2G_ofdm[0]; + ntargets = AR5416_NUM_2G_OFDM_TARGET_PWRS; + ctpres = ar->power_2G_ofdm; + break; + default: + BUG(); + } + + for (n = 0; n < ntargets; n++) { + if (ctpl[n].freq == 0xff) + break; + pwr_freqs[n] = ctpl[n].freq; + } + ntargets = n; + idx = ar9170_find_freq_idx(ntargets, pwr_freqs, f); + for (n = 0; n < 4; n++) + ctpres[n] = ar9170_interpolate_u8( + f, + ctpl[idx + 0].freq, + ctpl[idx + 0].power[n], + ctpl[idx + 1].freq, + ctpl[idx + 1].power[n]); + } + + /* + * HT modes now: 5G HT20, 5G HT40, 2G CCK, 2G OFDM, 2G HT20, 2G HT40 + */ + for (i = 0; i < 4; i++) { + switch (i) { + case 0: /* 5 GHz HT 20 */ + ctph = &ar->eeprom.cal_tgt_pwr_5G_ht20[0]; + ntargets = AR5416_NUM_5G_TARGET_PWRS; + ctpres = ar->power_5G_ht20; + break; + case 1: /* 5 GHz HT 40 */ + ctph = &ar->eeprom.cal_tgt_pwr_5G_ht40[0]; + ntargets = AR5416_NUM_5G_TARGET_PWRS; + ctpres = ar->power_5G_ht40; + break; + case 2: /* 2.4 GHz HT 20 */ + ctph = &ar->eeprom.cal_tgt_pwr_2G_ht20[0]; + ntargets = AR5416_NUM_2G_OFDM_TARGET_PWRS; + ctpres = ar->power_2G_ht20; + break; + case 3: /* 2.4 GHz HT 40 */ + ctph = &ar->eeprom.cal_tgt_pwr_2G_ht40[0]; + ntargets = AR5416_NUM_2G_OFDM_TARGET_PWRS; + ctpres = ar->power_2G_ht40; + break; + default: + BUG(); + } + + for (n = 0; n < ntargets; n++) { + if (ctph[n].freq == 0xff) + break; + pwr_freqs[n] = ctph[n].freq; + } + ntargets = n; + idx = ar9170_find_freq_idx(ntargets, pwr_freqs, f); + for (n = 0; n < 8; n++) + ctpres[n] = ar9170_interpolate_u8( + f, + ctph[idx + 0].freq, + ctph[idx + 0].power[n], + ctph[idx + 1].freq, + ctph[idx + 1].power[n]); + } + + + /* calc. conformance test limits and apply to ar->power*[] */ + ar9170_calc_ctl(ar, freq, bw); + + /* set ACK/CTS TX power */ + ar9170_regwrite_begin(ar); + + if (ar->eeprom.tx_mask != 1) + ackchains = AR9170_TX_PHY_TXCHAIN_2; + else + ackchains = AR9170_TX_PHY_TXCHAIN_1; + + if (freq < 3000) + ackpower = ar->power_2G_ofdm[0] & 0x3f; + else + ackpower = ar->power_5G_leg[0] & 0x3f; + + ar9170_regwrite(0x1c3694, ackpower << 20 | ackchains << 26); + ar9170_regwrite(0x1c3bb4, ackpower << 5 | ackchains << 11 | + ackpower << 21 | ackchains << 27); + + ar9170_regwrite_finish(); + return ar9170_regwrite_result(); +} + +static int ar9170_calc_noise_dbm(u32 raw_noise) +{ + if (raw_noise & 0x100) + return ~((raw_noise & 0x0ff) >> 1); + else + return (raw_noise & 0xff) >> 1; +} + +int ar9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel, + enum ar9170_rf_init_mode rfi, enum ar9170_bw bw) +{ + const struct ar9170_phy_freq_params *freqpar; + u32 cmd, tmp, offs; + __le32 vals[8]; + int i, err; + bool bandswitch; + + /* clear BB heavy clip enable */ + err = ar9170_write_reg(ar, 0x1c59e0, 0x200); + if (err) + return err; + + /* may be NULL at first setup */ + if (ar->channel) + bandswitch = ar->channel->band != channel->band; + else + bandswitch = true; + + /* HW workaround */ + if (!ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] && + channel->center_freq <= 2417) + bandswitch = true; + + err = ar->exec_cmd(ar, AR9170_CMD_FREQ_START, 0, NULL, 0, NULL); + if (err) + return err; + + if (rfi != AR9170_RFI_NONE || bandswitch) { + u32 val = 0x400; + + if (rfi == AR9170_RFI_COLD) + val = 0x800; + + /* warm/cold reset BB/ADDA */ + err = ar9170_write_reg(ar, 0x1d4004, val); + if (err) + return err; + + err = ar9170_write_reg(ar, 0x1d4004, 0x0); + if (err) + return err; + + err = ar9170_init_phy(ar, channel->band); + if (err) + return err; + + err = ar9170_init_rf_banks_0_7(ar, + channel->band == IEEE80211_BAND_5GHZ); + if (err) + return err; + + cmd = AR9170_CMD_RF_INIT; + } else { + cmd = AR9170_CMD_FREQUENCY; + } + + err = ar9170_init_rf_bank4_pwr(ar, + channel->band == IEEE80211_BAND_5GHZ, + channel->center_freq, bw); + if (err) + return err; + + switch (bw) { + case AR9170_BW_20: + tmp = 0x240; + offs = 0; + break; + case AR9170_BW_40_BELOW: + tmp = 0x2c4; + offs = 3; + break; + case AR9170_BW_40_ABOVE: + tmp = 0x2d4; + offs = 1; + break; + default: + BUG(); + return -ENOSYS; + } + + if (ar->eeprom.tx_mask != 1) + tmp |= 0x100; + + err = ar9170_write_reg(ar, 0x1c5804, tmp); + if (err) + return err; + + err = ar9170_set_freq_cal_data(ar, channel); + if (err) + return err; + + err = ar9170_set_power_cal(ar, channel->center_freq, bw); + if (err) + return err; + + freqpar = ar9170_get_hw_dyn_params(channel, bw); + + vals[0] = cpu_to_le32(channel->center_freq * 1000); + vals[1] = cpu_to_le32(conf_is_ht40(&ar->hw->conf)); + vals[2] = cpu_to_le32(offs << 2 | 1); + vals[3] = cpu_to_le32(freqpar->coeff_exp); + vals[4] = cpu_to_le32(freqpar->coeff_man); + vals[5] = cpu_to_le32(freqpar->coeff_exp_shgi); + vals[6] = cpu_to_le32(freqpar->coeff_man_shgi); + vals[7] = cpu_to_le32(1000); + + err = ar->exec_cmd(ar, cmd, sizeof(vals), (u8 *)vals, + sizeof(vals), (u8 *)vals); + if (err) + return err; + + if (ar->phy_heavy_clip) { + err = ar9170_write_reg(ar, 0x1c59e0, + 0x200 | ar->phy_heavy_clip); + if (err) { + if (ar9170_nag_limiter(ar)) + printk(KERN_ERR "%s: failed to set " + "heavy clip\n", + wiphy_name(ar->hw->wiphy)); + } + } + + for (i = 0; i < 2; i++) { + ar->noise[i] = ar9170_calc_noise_dbm( + (le32_to_cpu(vals[2 + i]) >> 19) & 0x1ff); + + ar->noise[i + 2] = ar9170_calc_noise_dbm( + (le32_to_cpu(vals[5 + i]) >> 23) & 0x1ff); + } + + ar->channel = channel; + return 0; +} diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c new file mode 100644 index 0000000..051f483 --- /dev/null +++ b/drivers/net/wireless/ath/ar9170/usb.c @@ -0,0 +1,976 @@ +/* + * Atheros AR9170 driver + * + * USB - frontend + * + * Copyright 2008, Johannes Berg + * Copyright 2009, Christian Lamparter + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, see + * http://www.gnu.org/licenses/. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * Copyright (c) 2007-2008 Atheros Communications, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include "ar9170.h" +#include "cmd.h" +#include "hw.h" +#include "usb.h" + +MODULE_AUTHOR("Johannes Berg "); +MODULE_AUTHOR("Christian Lamparter "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Atheros AR9170 802.11n USB wireless"); +MODULE_FIRMWARE("ar9170.fw"); +MODULE_FIRMWARE("ar9170-1.fw"); +MODULE_FIRMWARE("ar9170-2.fw"); + +enum ar9170_requirements { + AR9170_REQ_FW1_ONLY = 1, +}; + +static struct usb_device_id ar9170_usb_ids[] = { + /* Atheros 9170 */ + { USB_DEVICE(0x0cf3, 0x9170) }, + /* Atheros TG121N */ + { USB_DEVICE(0x0cf3, 0x1001) }, + /* TP-Link TL-WN821N v2 */ + { USB_DEVICE(0x0cf3, 0x1002) }, + /* Cace Airpcap NX */ + { USB_DEVICE(0xcace, 0x0300) }, + /* D-Link DWA 160 A1 */ + { USB_DEVICE(0x07d1, 0x3c10) }, + /* D-Link DWA 160 A2 */ + { USB_DEVICE(0x07d1, 0x3a09) }, + /* Netgear WNDA3100 */ + { USB_DEVICE(0x0846, 0x9010) }, + /* Netgear WN111 v2 */ + { USB_DEVICE(0x0846, 0x9001) }, + /* Zydas ZD1221 */ + { USB_DEVICE(0x0ace, 0x1221) }, + /* ZyXEL NWD271N */ + { USB_DEVICE(0x0586, 0x3417) }, + /* Z-Com UB81 BG */ + { USB_DEVICE(0x0cde, 0x0023) }, + /* Z-Com UB82 ABG */ + { USB_DEVICE(0x0cde, 0x0026) }, + /* Sphairon Homelink 1202 */ + { USB_DEVICE(0x0cde, 0x0027) }, + /* Arcadyan WN7512 */ + { USB_DEVICE(0x083a, 0xf522) }, + /* Planex GWUS300 */ + { USB_DEVICE(0x2019, 0x5304) }, + /* IO-Data WNGDNUS2 */ + { USB_DEVICE(0x04bb, 0x093f) }, + /* AVM FRITZ!WLAN USB Stick N */ + { USB_DEVICE(0x057C, 0x8401) }, + /* AVM FRITZ!WLAN USB Stick N 2.4 */ + { USB_DEVICE(0x057C, 0x8402), .driver_info = AR9170_REQ_FW1_ONLY }, + + /* terminate */ + {} +}; +MODULE_DEVICE_TABLE(usb, ar9170_usb_ids); + +static void ar9170_usb_submit_urb(struct ar9170_usb *aru) +{ + struct urb *urb; + unsigned long flags; + int err; + + if (unlikely(!IS_STARTED(&aru->common))) + return ; + + spin_lock_irqsave(&aru->tx_urb_lock, flags); + if (atomic_read(&aru->tx_submitted_urbs) >= AR9170_NUM_TX_URBS) { + spin_unlock_irqrestore(&aru->tx_urb_lock, flags); + return ; + } + atomic_inc(&aru->tx_submitted_urbs); + + urb = usb_get_from_anchor(&aru->tx_pending); + if (!urb) { + atomic_dec(&aru->tx_submitted_urbs); + spin_unlock_irqrestore(&aru->tx_urb_lock, flags); + + return ; + } + spin_unlock_irqrestore(&aru->tx_urb_lock, flags); + + aru->tx_pending_urbs--; + usb_anchor_urb(urb, &aru->tx_submitted); + + err = usb_submit_urb(urb, GFP_ATOMIC); + if (unlikely(err)) { + if (ar9170_nag_limiter(&aru->common)) + dev_err(&aru->udev->dev, "submit_urb failed (%d).\n", + err); + + usb_unanchor_urb(urb); + atomic_dec(&aru->tx_submitted_urbs); + ar9170_tx_callback(&aru->common, urb->context); + } + + usb_free_urb(urb); +} + +static void ar9170_usb_tx_urb_complete_frame(struct urb *urb) +{ + struct sk_buff *skb = urb->context; + struct ar9170_usb *aru = (struct ar9170_usb *) + usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0)); + + if (unlikely(!aru)) { + dev_kfree_skb_irq(skb); + return ; + } + + atomic_dec(&aru->tx_submitted_urbs); + + ar9170_tx_callback(&aru->common, skb); + + ar9170_usb_submit_urb(aru); +} + +static void ar9170_usb_tx_urb_complete(struct urb *urb) +{ +} + +static void ar9170_usb_irq_completed(struct urb *urb) +{ + struct ar9170_usb *aru = urb->context; + + switch (urb->status) { + /* everything is fine */ + case 0: + break; + + /* disconnect */ + case -ENOENT: + case -ECONNRESET: + case -ENODEV: + case -ESHUTDOWN: + goto free; + + default: + goto resubmit; + } + + ar9170_handle_command_response(&aru->common, urb->transfer_buffer, + urb->actual_length); + +resubmit: + usb_anchor_urb(urb, &aru->rx_submitted); + if (usb_submit_urb(urb, GFP_ATOMIC)) { + usb_unanchor_urb(urb); + goto free; + } + + return; + +free: + usb_buffer_free(aru->udev, 64, urb->transfer_buffer, urb->transfer_dma); +} + +static void ar9170_usb_rx_completed(struct urb *urb) +{ + struct sk_buff *skb = urb->context; + struct ar9170_usb *aru = (struct ar9170_usb *) + usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0)); + int err; + + if (!aru) + goto free; + + switch (urb->status) { + /* everything is fine */ + case 0: + break; + + /* disconnect */ + case -ENOENT: + case -ECONNRESET: + case -ENODEV: + case -ESHUTDOWN: + goto free; + + default: + goto resubmit; + } + + skb_put(skb, urb->actual_length); + ar9170_rx(&aru->common, skb); + +resubmit: + skb_reset_tail_pointer(skb); + skb_trim(skb, 0); + + usb_anchor_urb(urb, &aru->rx_submitted); + err = usb_submit_urb(urb, GFP_ATOMIC); + if (unlikely(err)) { + usb_unanchor_urb(urb); + goto free; + } + + return ; + +free: + dev_kfree_skb_irq(skb); +} + +static int ar9170_usb_prep_rx_urb(struct ar9170_usb *aru, + struct urb *urb, gfp_t gfp) +{ + struct sk_buff *skb; + + skb = __dev_alloc_skb(AR9170_MAX_RX_BUFFER_SIZE + 32, gfp); + if (!skb) + return -ENOMEM; + + /* reserve some space for mac80211's radiotap */ + skb_reserve(skb, 32); + + usb_fill_bulk_urb(urb, aru->udev, + usb_rcvbulkpipe(aru->udev, AR9170_EP_RX), + skb->data, min(skb_tailroom(skb), + AR9170_MAX_RX_BUFFER_SIZE), + ar9170_usb_rx_completed, skb); + + return 0; +} + +static int ar9170_usb_alloc_rx_irq_urb(struct ar9170_usb *aru) +{ + struct urb *urb = NULL; + void *ibuf; + int err = -ENOMEM; + + /* initialize interrupt endpoint */ + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) + goto out; + + ibuf = usb_buffer_alloc(aru->udev, 64, GFP_KERNEL, &urb->transfer_dma); + if (!ibuf) + goto out; + + usb_fill_int_urb(urb, aru->udev, + usb_rcvintpipe(aru->udev, AR9170_EP_IRQ), ibuf, + 64, ar9170_usb_irq_completed, aru, 1); + urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + + usb_anchor_urb(urb, &aru->rx_submitted); + err = usb_submit_urb(urb, GFP_KERNEL); + if (err) { + usb_unanchor_urb(urb); + usb_buffer_free(aru->udev, 64, urb->transfer_buffer, + urb->transfer_dma); + } + +out: + usb_free_urb(urb); + return err; +} + +static int ar9170_usb_alloc_rx_bulk_urbs(struct ar9170_usb *aru) +{ + struct urb *urb; + int i; + int err = -EINVAL; + + for (i = 0; i < AR9170_NUM_RX_URBS; i++) { + err = -ENOMEM; + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) + goto err_out; + + err = ar9170_usb_prep_rx_urb(aru, urb, GFP_KERNEL); + if (err) { + usb_free_urb(urb); + goto err_out; + } + + usb_anchor_urb(urb, &aru->rx_submitted); + err = usb_submit_urb(urb, GFP_KERNEL); + if (err) { + usb_unanchor_urb(urb); + dev_kfree_skb_any((void *) urb->transfer_buffer); + usb_free_urb(urb); + goto err_out; + } + usb_free_urb(urb); + } + + /* the device now waiting for a firmware. */ + aru->common.state = AR9170_IDLE; + return 0; + +err_out: + + usb_kill_anchored_urbs(&aru->rx_submitted); + return err; +} + +static int ar9170_usb_flush(struct ar9170 *ar) +{ + struct ar9170_usb *aru = (void *) ar; + struct urb *urb; + int ret, err = 0; + + if (IS_STARTED(ar)) + aru->common.state = AR9170_IDLE; + + usb_wait_anchor_empty_timeout(&aru->tx_pending, + msecs_to_jiffies(800)); + while ((urb = usb_get_from_anchor(&aru->tx_pending))) { + ar9170_tx_callback(&aru->common, (void *) urb->context); + usb_free_urb(urb); + } + + /* lets wait a while until the tx - queues are dried out */ + ret = usb_wait_anchor_empty_timeout(&aru->tx_submitted, + msecs_to_jiffies(100)); + if (ret == 0) + err = -ETIMEDOUT; + + usb_kill_anchored_urbs(&aru->tx_submitted); + + if (IS_ACCEPTING_CMD(ar)) + aru->common.state = AR9170_STARTED; + + return err; +} + +static void ar9170_usb_cancel_urbs(struct ar9170_usb *aru) +{ + int err; + + aru->common.state = AR9170_UNKNOWN_STATE; + + err = ar9170_usb_flush(&aru->common); + if (err) + dev_err(&aru->udev->dev, "stuck tx urbs!\n"); + + usb_poison_anchored_urbs(&aru->tx_submitted); + usb_poison_anchored_urbs(&aru->rx_submitted); +} + +static int ar9170_usb_exec_cmd(struct ar9170 *ar, enum ar9170_cmd cmd, + unsigned int plen, void *payload, + unsigned int outlen, void *out) +{ + struct ar9170_usb *aru = (void *) ar; + struct urb *urb = NULL; + unsigned long flags; + int err = -ENOMEM; + + if (unlikely(!IS_ACCEPTING_CMD(ar))) + return -EPERM; + + if (WARN_ON(plen > AR9170_MAX_CMD_LEN - 4)) + return -EINVAL; + + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (unlikely(!urb)) + goto err_free; + + ar->cmdbuf[0] = cpu_to_le32(plen); + ar->cmdbuf[0] |= cpu_to_le32(cmd << 8); + /* writing multiple regs fills this buffer already */ + if (plen && payload != (u8 *)(&ar->cmdbuf[1])) + memcpy(&ar->cmdbuf[1], payload, plen); + + spin_lock_irqsave(&aru->common.cmdlock, flags); + aru->readbuf = (u8 *)out; + aru->readlen = outlen; + spin_unlock_irqrestore(&aru->common.cmdlock, flags); + + usb_fill_int_urb(urb, aru->udev, + usb_sndbulkpipe(aru->udev, AR9170_EP_CMD), + aru->common.cmdbuf, plen + 4, + ar9170_usb_tx_urb_complete, NULL, 1); + + usb_anchor_urb(urb, &aru->tx_submitted); + err = usb_submit_urb(urb, GFP_ATOMIC); + if (unlikely(err)) { + usb_unanchor_urb(urb); + usb_free_urb(urb); + goto err_unbuf; + } + usb_free_urb(urb); + + err = wait_for_completion_timeout(&aru->cmd_wait, HZ); + if (err == 0) { + err = -ETIMEDOUT; + goto err_unbuf; + } + + if (aru->readlen != outlen) { + err = -EMSGSIZE; + goto err_unbuf; + } + + return 0; + +err_unbuf: + /* Maybe the device was removed in the second we were waiting? */ + if (IS_STARTED(ar)) { + dev_err(&aru->udev->dev, "no command feedback " + "received (%d).\n", err); + + /* provide some maybe useful debug information */ + print_hex_dump_bytes("ar9170 cmd: ", DUMP_PREFIX_NONE, + aru->common.cmdbuf, plen + 4); + dump_stack(); + } + + /* invalidate to avoid completing the next prematurely */ + spin_lock_irqsave(&aru->common.cmdlock, flags); + aru->readbuf = NULL; + aru->readlen = 0; + spin_unlock_irqrestore(&aru->common.cmdlock, flags); + +err_free: + + return err; +} + +static int ar9170_usb_tx(struct ar9170 *ar, struct sk_buff *skb) +{ + struct ar9170_usb *aru = (struct ar9170_usb *) ar; + struct urb *urb; + + if (unlikely(!IS_STARTED(ar))) { + /* Seriously, what were you drink... err... thinking!? */ + return -EPERM; + } + + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (unlikely(!urb)) + return -ENOMEM; + + usb_fill_bulk_urb(urb, aru->udev, + usb_sndbulkpipe(aru->udev, AR9170_EP_TX), + skb->data, skb->len, + ar9170_usb_tx_urb_complete_frame, skb); + urb->transfer_flags |= URB_ZERO_PACKET; + + usb_anchor_urb(urb, &aru->tx_pending); + aru->tx_pending_urbs++; + + usb_free_urb(urb); + + ar9170_usb_submit_urb(aru); + return 0; +} + +static void ar9170_usb_callback_cmd(struct ar9170 *ar, u32 len , void *buffer) +{ + struct ar9170_usb *aru = (void *) ar; + unsigned long flags; + u32 in, out; + + if (unlikely(!buffer)) + return ; + + in = le32_to_cpup((__le32 *)buffer); + out = le32_to_cpu(ar->cmdbuf[0]); + + /* mask off length byte */ + out &= ~0xFF; + + if (aru->readlen >= 0) { + /* add expected length */ + out |= aru->readlen; + } else { + /* add obtained length */ + out |= in & 0xFF; + } + + /* + * Some commands (e.g: AR9170_CMD_FREQUENCY) have a variable response + * length and we cannot predict the correct length in advance. + * So we only check if we provided enough space for the data. + */ + if (unlikely(out < in)) { + dev_warn(&aru->udev->dev, "received invalid command response " + "got %d bytes, instead of %d bytes " + "and the resp length is %d bytes\n", + in, out, len); + print_hex_dump_bytes("ar9170 invalid resp: ", + DUMP_PREFIX_OFFSET, buffer, len); + /* + * Do not complete, then the command times out, + * and we get a stack trace from there. + */ + return ; + } + + spin_lock_irqsave(&aru->common.cmdlock, flags); + if (aru->readbuf && len > 0) { + memcpy(aru->readbuf, buffer + 4, len - 4); + aru->readbuf = NULL; + } + complete(&aru->cmd_wait); + spin_unlock_irqrestore(&aru->common.cmdlock, flags); +} + +static int ar9170_usb_upload(struct ar9170_usb *aru, const void *data, + size_t len, u32 addr, bool complete) +{ + int transfer, err; + u8 *buf = kmalloc(4096, GFP_KERNEL); + + if (!buf) + return -ENOMEM; + + while (len) { + transfer = min_t(int, len, 4096); + memcpy(buf, data, transfer); + + err = usb_control_msg(aru->udev, usb_sndctrlpipe(aru->udev, 0), + 0x30 /* FW DL */, 0x40 | USB_DIR_OUT, + addr >> 8, 0, buf, transfer, 1000); + + if (err < 0) { + kfree(buf); + return err; + } + + len -= transfer; + data += transfer; + addr += transfer; + } + kfree(buf); + + if (complete) { + err = usb_control_msg(aru->udev, usb_sndctrlpipe(aru->udev, 0), + 0x31 /* FW DL COMPLETE */, + 0x40 | USB_DIR_OUT, 0, 0, NULL, 0, 5000); + } + + return 0; +} + +static int ar9170_usb_reset(struct ar9170_usb *aru) +{ + int ret, lock = (aru->intf->condition != USB_INTERFACE_BINDING); + + if (lock) { + ret = usb_lock_device_for_reset(aru->udev, aru->intf); + if (ret < 0) { + dev_err(&aru->udev->dev, "unable to lock device " + "for reset (%d).\n", ret); + return ret; + } + } + + ret = usb_reset_device(aru->udev); + if (lock) + usb_unlock_device(aru->udev); + + /* let it rest - for a second - */ + msleep(1000); + + return ret; +} + +static int ar9170_usb_upload_firmware(struct ar9170_usb *aru) +{ + int err; + + if (!aru->init_values) + goto upload_fw_start; + + /* First, upload initial values to device RAM */ + err = ar9170_usb_upload(aru, aru->init_values->data, + aru->init_values->size, 0x102800, false); + if (err) { + dev_err(&aru->udev->dev, "firmware part 1 " + "upload failed (%d).\n", err); + return err; + } + +upload_fw_start: + + /* Then, upload the firmware itself and start it */ + return ar9170_usb_upload(aru, aru->firmware->data, aru->firmware->size, + 0x200000, true); +} + +static int ar9170_usb_init_transport(struct ar9170_usb *aru) +{ + struct ar9170 *ar = (void *) &aru->common; + int err; + + ar9170_regwrite_begin(ar); + + /* Set USB Rx stream mode MAX packet number to 2 */ + ar9170_regwrite(AR9170_USB_REG_MAX_AGG_UPLOAD, 0x4); + + /* Set USB Rx stream mode timeout to 10us */ + ar9170_regwrite(AR9170_USB_REG_UPLOAD_TIME_CTL, 0x80); + + ar9170_regwrite_finish(); + + err = ar9170_regwrite_result(); + if (err) + dev_err(&aru->udev->dev, "USB setup failed (%d).\n", err); + + return err; +} + +static void ar9170_usb_stop(struct ar9170 *ar) +{ + struct ar9170_usb *aru = (void *) ar; + int ret; + + if (IS_ACCEPTING_CMD(ar)) + aru->common.state = AR9170_STOPPED; + + ret = ar9170_usb_flush(ar); + if (ret) + dev_err(&aru->udev->dev, "kill pending tx urbs.\n"); + + usb_poison_anchored_urbs(&aru->tx_submitted); + + /* + * Note: + * So far we freed all tx urbs, but we won't dare to touch any rx urbs. + * Else we would end up with a unresponsive device... + */ +} + +static int ar9170_usb_open(struct ar9170 *ar) +{ + struct ar9170_usb *aru = (void *) ar; + int err; + + usb_unpoison_anchored_urbs(&aru->tx_submitted); + err = ar9170_usb_init_transport(aru); + if (err) { + usb_poison_anchored_urbs(&aru->tx_submitted); + return err; + } + + aru->common.state = AR9170_IDLE; + return 0; +} + +static int ar9170_usb_init_device(struct ar9170_usb *aru) +{ + int err; + + err = ar9170_usb_alloc_rx_irq_urb(aru); + if (err) + goto err_out; + + err = ar9170_usb_alloc_rx_bulk_urbs(aru); + if (err) + goto err_unrx; + + err = ar9170_usb_upload_firmware(aru); + if (err) { + err = ar9170_echo_test(&aru->common, 0x60d43110); + if (err) { + /* force user invention, by disabling the device */ + err = usb_driver_set_configuration(aru->udev, -1); + dev_err(&aru->udev->dev, "device is in a bad state. " + "please reconnect it!\n"); + goto err_unrx; + } + } + + return 0; + +err_unrx: + ar9170_usb_cancel_urbs(aru); + +err_out: + return err; +} + +static void ar9170_usb_firmware_failed(struct ar9170_usb *aru) +{ + struct device *parent = aru->udev->dev.parent; + + /* unbind anything failed */ + if (parent) + device_lock(parent); + device_release_driver(&aru->udev->dev); + if (parent) + device_unlock(parent); +} + +static void ar9170_usb_firmware_finish(const struct firmware *fw, void *context) +{ + struct ar9170_usb *aru = context; + int err; + + aru->firmware = fw; + + if (!fw) { + dev_err(&aru->udev->dev, "firmware file not found.\n"); + goto err_freefw; + } + + err = ar9170_usb_init_device(aru); + if (err) + goto err_freefw; + + err = ar9170_usb_open(&aru->common); + if (err) + goto err_unrx; + + err = ar9170_register(&aru->common, &aru->udev->dev); + + ar9170_usb_stop(&aru->common); + if (err) + goto err_unrx; + + return; + + err_unrx: + ar9170_usb_cancel_urbs(aru); + + err_freefw: + ar9170_usb_firmware_failed(aru); +} + +static void ar9170_usb_firmware_inits(const struct firmware *fw, + void *context) +{ + struct ar9170_usb *aru = context; + int err; + + if (!fw) { + dev_err(&aru->udev->dev, "file with init values not found.\n"); + ar9170_usb_firmware_failed(aru); + return; + } + + aru->init_values = fw; + + /* ok so we have the init values -- get code for two-stage */ + + err = request_firmware_nowait(THIS_MODULE, 1, "ar9170-2.fw", + &aru->udev->dev, GFP_KERNEL, aru, + ar9170_usb_firmware_finish); + if (err) + ar9170_usb_firmware_failed(aru); +} + +static void ar9170_usb_firmware_step2(const struct firmware *fw, void *context) +{ + struct ar9170_usb *aru = context; + int err; + + if (fw) { + ar9170_usb_firmware_finish(fw, context); + return; + } + + if (aru->req_one_stage_fw) { + dev_err(&aru->udev->dev, "ar9170.fw firmware file " + "not found and is required for this device\n"); + ar9170_usb_firmware_failed(aru); + return; + } + + dev_err(&aru->udev->dev, "ar9170.fw firmware file " + "not found, trying old firmware...\n"); + + err = request_firmware_nowait(THIS_MODULE, 1, "ar9170-1.fw", + &aru->udev->dev, GFP_KERNEL, aru, + ar9170_usb_firmware_inits); + if (err) + ar9170_usb_firmware_failed(aru); +} + +static bool ar9170_requires_one_stage(const struct usb_device_id *id) +{ + if (!id->driver_info) + return false; + if (id->driver_info == AR9170_REQ_FW1_ONLY) + return true; + return false; +} + +static int ar9170_usb_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct ar9170_usb *aru; + struct ar9170 *ar; + struct usb_device *udev; + int err; + + aru = ar9170_alloc(sizeof(*aru)); + if (IS_ERR(aru)) { + err = PTR_ERR(aru); + goto out; + } + + udev = interface_to_usbdev(intf); + usb_get_dev(udev); + aru->udev = udev; + aru->intf = intf; + ar = &aru->common; + + aru->req_one_stage_fw = ar9170_requires_one_stage(id); + + usb_set_intfdata(intf, aru); + SET_IEEE80211_DEV(ar->hw, &intf->dev); + + init_usb_anchor(&aru->rx_submitted); + init_usb_anchor(&aru->tx_pending); + init_usb_anchor(&aru->tx_submitted); + init_completion(&aru->cmd_wait); + spin_lock_init(&aru->tx_urb_lock); + + aru->tx_pending_urbs = 0; + atomic_set(&aru->tx_submitted_urbs, 0); + + aru->common.stop = ar9170_usb_stop; + aru->common.flush = ar9170_usb_flush; + aru->common.open = ar9170_usb_open; + aru->common.tx = ar9170_usb_tx; + aru->common.exec_cmd = ar9170_usb_exec_cmd; + aru->common.callback_cmd = ar9170_usb_callback_cmd; + +#ifdef CONFIG_PM + udev->reset_resume = 1; +#endif /* CONFIG_PM */ + err = ar9170_usb_reset(aru); + if (err) + goto err_freehw; + + return request_firmware_nowait(THIS_MODULE, 1, "ar9170.fw", + &aru->udev->dev, GFP_KERNEL, aru, + ar9170_usb_firmware_step2); +err_freehw: + usb_set_intfdata(intf, NULL); + usb_put_dev(udev); + ieee80211_free_hw(ar->hw); +out: + return err; +} + +static void ar9170_usb_disconnect(struct usb_interface *intf) +{ + struct ar9170_usb *aru = usb_get_intfdata(intf); + + if (!aru) + return; + + aru->common.state = AR9170_IDLE; + ar9170_unregister(&aru->common); + ar9170_usb_cancel_urbs(aru); + + usb_put_dev(aru->udev); + usb_set_intfdata(intf, NULL); + ieee80211_free_hw(aru->common.hw); + + release_firmware(aru->init_values); + release_firmware(aru->firmware); +} + +#ifdef CONFIG_PM +static int ar9170_suspend(struct usb_interface *intf, + pm_message_t message) +{ + struct ar9170_usb *aru = usb_get_intfdata(intf); + + if (!aru) + return -ENODEV; + + aru->common.state = AR9170_IDLE; + ar9170_usb_cancel_urbs(aru); + + return 0; +} + +static int ar9170_resume(struct usb_interface *intf) +{ + struct ar9170_usb *aru = usb_get_intfdata(intf); + int err; + + if (!aru) + return -ENODEV; + + usb_unpoison_anchored_urbs(&aru->rx_submitted); + usb_unpoison_anchored_urbs(&aru->tx_submitted); + + err = ar9170_usb_init_device(aru); + if (err) + goto err_unrx; + + err = ar9170_usb_open(&aru->common); + if (err) + goto err_unrx; + + return 0; + +err_unrx: + aru->common.state = AR9170_IDLE; + ar9170_usb_cancel_urbs(aru); + + return err; +} +#endif /* CONFIG_PM */ + +static struct usb_driver ar9170_driver = { + .name = "ar9170usb", + .probe = ar9170_usb_probe, + .disconnect = ar9170_usb_disconnect, + .id_table = ar9170_usb_ids, + .soft_unbind = 1, +#ifdef CONFIG_PM + .suspend = ar9170_suspend, + .resume = ar9170_resume, + .reset_resume = ar9170_resume, +#endif /* CONFIG_PM */ +}; + +static int __init ar9170_init(void) +{ + return usb_register(&ar9170_driver); +} + +static void __exit ar9170_exit(void) +{ + usb_deregister(&ar9170_driver); +} + +module_init(ar9170_init); +module_exit(ar9170_exit); diff --git a/drivers/net/wireless/ath/ar9170/usb.h b/drivers/net/wireless/ath/ar9170/usb.h new file mode 100644 index 0000000..a2ce3b1 --- /dev/null +++ b/drivers/net/wireless/ath/ar9170/usb.h @@ -0,0 +1,81 @@ +/* + * Atheros AR9170 USB driver + * + * Driver specific definitions + * + * Copyright 2008, Johannes Berg + * Copyright 2009, Christian Lamparter + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, see + * http://www.gnu.org/licenses/. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * Copyright (c) 2007-2008 Atheros Communications, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#ifndef __USB_H +#define __USB_H + +#include +#include +#include +#include +#include +#include +#include +#include "eeprom.h" +#include "hw.h" +#include "ar9170.h" + +#define AR9170_NUM_RX_URBS 16 +#define AR9170_NUM_TX_URBS 8 + +struct firmware; + +struct ar9170_usb { + struct ar9170 common; + struct usb_device *udev; + struct usb_interface *intf; + + struct usb_anchor rx_submitted; + struct usb_anchor tx_pending; + struct usb_anchor tx_submitted; + + bool req_one_stage_fw; + + spinlock_t tx_urb_lock; + atomic_t tx_submitted_urbs; + unsigned int tx_pending_urbs; + + struct completion cmd_wait; + int readlen; + u8 *readbuf; + + const struct firmware *init_values; + const struct firmware *firmware; +}; + +#endif /* __USB_H */ diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h new file mode 100644 index 0000000..71fc960 --- /dev/null +++ b/drivers/net/wireless/ath/ath.h @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef ATH_H +#define ATH_H + +#include +#include +#include + +/* + * The key cache is used for h/w cipher state and also for + * tracking station state such as the current tx antenna. + * We also setup a mapping table between key cache slot indices + * and station state to short-circuit node lookups on rx. + * Different parts have different size key caches. We handle + * up to ATH_KEYMAX entries (could dynamically allocate state). + */ +#define ATH_KEYMAX 128 /* max key cache size we handle */ + +static const u8 ath_bcast_mac[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + +struct ath_ani { + bool caldone; + int16_t noise_floor; + unsigned int longcal_timer; + unsigned int shortcal_timer; + unsigned int resetcal_timer; + unsigned int checkani_timer; + struct timer_list timer; +}; + +enum ath_device_state { + ATH_HW_UNAVAILABLE, + ATH_HW_INITIALIZED, +}; + +struct reg_dmn_pair_mapping { + u16 regDmnEnum; + u16 reg_5ghz_ctl; + u16 reg_2ghz_ctl; +}; + +struct ath_regulatory { + char alpha2[2]; + u16 country_code; + u16 max_power_level; + u32 tp_scale; + u16 current_rd; + u16 current_rd_ext; + int16_t power_limit; + struct reg_dmn_pair_mapping *regpair; +}; + +struct ath_ops { + unsigned int (*read)(void *, u32 reg_offset); + void (*write)(void *, u32 val, u32 reg_offset); +}; + +struct ath_common; + +struct ath_bus_ops { + void (*read_cachesize)(struct ath_common *common, int *csz); + bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data); + void (*bt_coex_prep)(struct ath_common *common); +}; + +struct ath_common { + void *ah; + void *priv; + struct ieee80211_hw *hw; + int debug_mask; + enum ath_device_state state; + + struct ath_ani ani; + + u16 cachelsz; + u16 curaid; + u8 macaddr[ETH_ALEN]; + u8 curbssid[ETH_ALEN]; + u8 bssidmask[ETH_ALEN]; + + u8 tx_chainmask; + u8 rx_chainmask; + + u32 rx_bufsize; + + u32 keymax; + DECLARE_BITMAP(keymap, ATH_KEYMAX); + u8 splitmic; + + struct ath_regulatory regulatory; + const struct ath_ops *ops; + const struct ath_bus_ops *bus_ops; +}; + +struct sk_buff *ath_rxbuf_alloc(struct ath_common *common, + u32 len, + gfp_t gfp_mask); + +void ath_hw_setbssidmask(struct ath_common *common); + +#endif /* ATH_H */ diff --git a/drivers/net/wireless/ath/ath5k/Makefile b/drivers/net/wireless/ath/ath5k/Makefile new file mode 100644 index 0000000..090dc6d --- /dev/null +++ b/drivers/net/wireless/ath/ath5k/Makefile @@ -0,0 +1,16 @@ +ath5k-y += caps.o +ath5k-y += initvals.o +ath5k-y += eeprom.o +ath5k-y += gpio.o +ath5k-y += desc.o +ath5k-y += dma.o +ath5k-y += qcu.o +ath5k-y += pcu.o +ath5k-y += phy.o +ath5k-y += reset.o +ath5k-y += attach.o +ath5k-y += base.o +ath5k-y += led.o +ath5k-y += rfkill.o +ath5k-$(CONFIG_ATH5K_DEBUG) += debug.o +obj-$(CONFIG_ATH5K) += ath5k.o diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h new file mode 100644 index 0000000..ac67f02 --- /dev/null +++ b/drivers/net/wireless/ath/ath5k/ath5k.h @@ -0,0 +1,1378 @@ +/* + * Copyright (c) 2004-2007 Reyk Floeter + * Copyright (c) 2006-2007 Nick Kossifidis + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _ATH5K_H +#define _ATH5K_H + +/* TODO: Clean up channel debuging -doesn't work anyway- and start + * working on reg. control code using all available eeprom information + * -rev. engineering needed- */ +#define CHAN_DEBUG 0 + +#include +#include +#include + +/* RX/TX descriptor hw structs + * TODO: Driver part should only see sw structs */ +#include "desc.h" + +/* EEPROM structs/offsets + * TODO: Make a more generic struct (eg. add more stuff to ath5k_capabilities) + * and clean up common bits, then introduce set/get functions in eeprom.c */ +#include "eeprom.h" +#include "../ath.h" + +/* PCI IDs */ +#define PCI_DEVICE_ID_ATHEROS_AR5210 0x0007 /* AR5210 */ +#define PCI_DEVICE_ID_ATHEROS_AR5311 0x0011 /* AR5311 */ +#define PCI_DEVICE_ID_ATHEROS_AR5211 0x0012 /* AR5211 */ +#define PCI_DEVICE_ID_ATHEROS_AR5212 0x0013 /* AR5212 */ +#define PCI_DEVICE_ID_3COM_3CRDAG675 0x0013 /* 3CRDAG675 (Atheros AR5212) */ +#define PCI_DEVICE_ID_3COM_2_3CRPAG175 0x0013 /* 3CRPAG175 (Atheros AR5212) */ +#define PCI_DEVICE_ID_ATHEROS_AR5210_AP 0x0207 /* AR5210 (Early) */ +#define PCI_DEVICE_ID_ATHEROS_AR5212_IBM 0x1014 /* AR5212 (IBM MiniPCI) */ +#define PCI_DEVICE_ID_ATHEROS_AR5210_DEFAULT 0x1107 /* AR5210 (no eeprom) */ +#define PCI_DEVICE_ID_ATHEROS_AR5212_DEFAULT 0x1113 /* AR5212 (no eeprom) */ +#define PCI_DEVICE_ID_ATHEROS_AR5211_DEFAULT 0x1112 /* AR5211 (no eeprom) */ +#define PCI_DEVICE_ID_ATHEROS_AR5212_FPGA 0xf013 /* AR5212 (emulation board) */ +#define PCI_DEVICE_ID_ATHEROS_AR5211_LEGACY 0xff12 /* AR5211 (emulation board) */ +#define PCI_DEVICE_ID_ATHEROS_AR5211_FPGA11B 0xf11b /* AR5211 (emulation board) */ +#define PCI_DEVICE_ID_ATHEROS_AR5312_REV2 0x0052 /* AR5312 WMAC (AP31) */ +#define PCI_DEVICE_ID_ATHEROS_AR5312_REV7 0x0057 /* AR5312 WMAC (AP30-040) */ +#define PCI_DEVICE_ID_ATHEROS_AR5312_REV8 0x0058 /* AR5312 WMAC (AP43-030) */ +#define PCI_DEVICE_ID_ATHEROS_AR5212_0014 0x0014 /* AR5212 compatible */ +#define PCI_DEVICE_ID_ATHEROS_AR5212_0015 0x0015 /* AR5212 compatible */ +#define PCI_DEVICE_ID_ATHEROS_AR5212_0016 0x0016 /* AR5212 compatible */ +#define PCI_DEVICE_ID_ATHEROS_AR5212_0017 0x0017 /* AR5212 compatible */ +#define PCI_DEVICE_ID_ATHEROS_AR5212_0018 0x0018 /* AR5212 compatible */ +#define PCI_DEVICE_ID_ATHEROS_AR5212_0019 0x0019 /* AR5212 compatible */ +#define PCI_DEVICE_ID_ATHEROS_AR2413 0x001a /* AR2413 (Griffin-lite) */ +#define PCI_DEVICE_ID_ATHEROS_AR5413 0x001b /* AR5413 (Eagle) */ +#define PCI_DEVICE_ID_ATHEROS_AR5424 0x001c /* AR5424 (Condor PCI-E) */ +#define PCI_DEVICE_ID_ATHEROS_AR5416 0x0023 /* AR5416 */ +#define PCI_DEVICE_ID_ATHEROS_AR5418 0x0024 /* AR5418 */ + +/****************************\ + GENERIC DRIVER DEFINITIONS +\****************************/ + +#define ATH5K_PRINTF(fmt, ...) printk("%s: " fmt, __func__, ##__VA_ARGS__) + +#define ATH5K_PRINTK(_sc, _level, _fmt, ...) \ + printk(_level "ath5k %s: " _fmt, \ + ((_sc) && (_sc)->hw) ? wiphy_name((_sc)->hw->wiphy) : "", \ + ##__VA_ARGS__) + +#define ATH5K_PRINTK_LIMIT(_sc, _level, _fmt, ...) do { \ + if (net_ratelimit()) \ + ATH5K_PRINTK(_sc, _level, _fmt, ##__VA_ARGS__); \ + } while (0) + +#define ATH5K_INFO(_sc, _fmt, ...) \ + ATH5K_PRINTK(_sc, KERN_INFO, _fmt, ##__VA_ARGS__) + +#define ATH5K_WARN(_sc, _fmt, ...) \ + ATH5K_PRINTK_LIMIT(_sc, KERN_WARNING, _fmt, ##__VA_ARGS__) + +#define ATH5K_ERR(_sc, _fmt, ...) \ + ATH5K_PRINTK_LIMIT(_sc, KERN_ERR, _fmt, ##__VA_ARGS__) + +/* + * AR5K REGISTER ACCESS + */ + +/* Some macros to read/write fields */ + +/* First shift, then mask */ +#define AR5K_REG_SM(_val, _flags) \ + (((_val) << _flags##_S) & (_flags)) + +/* First mask, then shift */ +#define AR5K_REG_MS(_val, _flags) \ + (((_val) & (_flags)) >> _flags##_S) + +/* Some registers can hold multiple values of interest. For this + * reason when we want to write to these registers we must first + * retrieve the values which we do not want to clear (lets call this + * old_data) and then set the register with this and our new_value: + * ( old_data | new_value) */ +#define AR5K_REG_WRITE_BITS(ah, _reg, _flags, _val) \ + ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, _reg) & ~(_flags)) | \ + (((_val) << _flags##_S) & (_flags)), _reg) + +#define AR5K_REG_MASKED_BITS(ah, _reg, _flags, _mask) \ + ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, _reg) & \ + (_mask)) | (_flags), _reg) + +#define AR5K_REG_ENABLE_BITS(ah, _reg, _flags) \ + ath5k_hw_reg_write(ah, ath5k_hw_reg_read(ah, _reg) | (_flags), _reg) + +#define AR5K_REG_DISABLE_BITS(ah, _reg, _flags) \ + ath5k_hw_reg_write(ah, ath5k_hw_reg_read(ah, _reg) & ~(_flags), _reg) + +/* Access to PHY registers */ +#define AR5K_PHY_READ(ah, _reg) \ + ath5k_hw_reg_read(ah, (ah)->ah_phy + ((_reg) << 2)) + +#define AR5K_PHY_WRITE(ah, _reg, _val) \ + ath5k_hw_reg_write(ah, _val, (ah)->ah_phy + ((_reg) << 2)) + +/* Access QCU registers per queue */ +#define AR5K_REG_READ_Q(ah, _reg, _queue) \ + (ath5k_hw_reg_read(ah, _reg) & (1 << _queue)) \ + +#define AR5K_REG_WRITE_Q(ah, _reg, _queue) \ + ath5k_hw_reg_write(ah, (1 << _queue), _reg) + +#define AR5K_Q_ENABLE_BITS(_reg, _queue) do { \ + _reg |= 1 << _queue; \ +} while (0) + +#define AR5K_Q_DISABLE_BITS(_reg, _queue) do { \ + _reg &= ~(1 << _queue); \ +} while (0) + +/* Used while writing initvals */ +#define AR5K_REG_WAIT(_i) do { \ + if (_i % 64) \ + udelay(1); \ +} while (0) + +/* Register dumps are done per operation mode */ +#define AR5K_INI_RFGAIN_5GHZ 0 +#define AR5K_INI_RFGAIN_2GHZ 1 + +/* TODO: Clean this up */ +#define AR5K_INI_VAL_11A 0 +#define AR5K_INI_VAL_11A_TURBO 1 +#define AR5K_INI_VAL_11B 2 +#define AR5K_INI_VAL_11G 3 +#define AR5K_INI_VAL_11G_TURBO 4 +#define AR5K_INI_VAL_XR 0 +#define AR5K_INI_VAL_MAX 5 + +/* + * Some tuneable values (these should be changeable by the user) + * TODO: Make use of them and add more options OR use debug/configfs + */ +#define AR5K_TUNE_DMA_BEACON_RESP 2 +#define AR5K_TUNE_SW_BEACON_RESP 10 +#define AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF 0 +#define AR5K_TUNE_RADAR_ALERT false +#define AR5K_TUNE_MIN_TX_FIFO_THRES 1 +#define AR5K_TUNE_MAX_TX_FIFO_THRES ((IEEE80211_MAX_LEN / 64) + 1) +#define AR5K_TUNE_REGISTER_TIMEOUT 20000 +/* Register for RSSI threshold has a mask of 0xff, so 255 seems to + * be the max value. */ +#define AR5K_TUNE_RSSI_THRES 129 +/* This must be set when setting the RSSI threshold otherwise it can + * prevent a reset. If AR5K_RSSI_THR is read after writing to it + * the BMISS_THRES will be seen as 0, seems harware doesn't keep + * track of it. Max value depends on harware. For AR5210 this is just 7. + * For AR5211+ this seems to be up to 255. */ +#define AR5K_TUNE_BMISS_THRES 7 +#define AR5K_TUNE_REGISTER_DWELL_TIME 20000 +#define AR5K_TUNE_BEACON_INTERVAL 100 +#define AR5K_TUNE_AIFS 2 +#define AR5K_TUNE_AIFS_11B 2 +#define AR5K_TUNE_AIFS_XR 0 +#define AR5K_TUNE_CWMIN 15 +#define AR5K_TUNE_CWMIN_11B 31 +#define AR5K_TUNE_CWMIN_XR 3 +#define AR5K_TUNE_CWMAX 1023 +#define AR5K_TUNE_CWMAX_11B 1023 +#define AR5K_TUNE_CWMAX_XR 7 +#define AR5K_TUNE_NOISE_FLOOR -72 +#define AR5K_TUNE_CCA_MAX_GOOD_VALUE -95 +#define AR5K_TUNE_MAX_TXPOWER 63 +#define AR5K_TUNE_DEFAULT_TXPOWER 25 +#define AR5K_TUNE_TPC_TXPOWER false +#define AR5K_TUNE_HWTXTRIES 4 + +#define AR5K_INIT_CARR_SENSE_EN 1 + +/*Swap RX/TX Descriptor for big endian archs*/ +#if defined(__BIG_ENDIAN) +#define AR5K_INIT_CFG ( \ + AR5K_CFG_SWTD | AR5K_CFG_SWRD \ +) +#else +#define AR5K_INIT_CFG 0x00000000 +#endif + +/* Initial values */ +#define AR5K_INIT_CYCRSSI_THR1 2 +#define AR5K_INIT_TX_LATENCY 502 +#define AR5K_INIT_USEC 39 +#define AR5K_INIT_USEC_TURBO 79 +#define AR5K_INIT_USEC_32 31 +#define AR5K_INIT_SLOT_TIME 396 +#define AR5K_INIT_SLOT_TIME_TURBO 480 +#define AR5K_INIT_ACK_CTS_TIMEOUT 1024 +#define AR5K_INIT_ACK_CTS_TIMEOUT_TURBO 0x08000800 +#define AR5K_INIT_PROG_IFS 920 +#define AR5K_INIT_PROG_IFS_TURBO 960 +#define AR5K_INIT_EIFS 3440 +#define AR5K_INIT_EIFS_TURBO 6880 +#define AR5K_INIT_SIFS 560 +#define AR5K_INIT_SIFS_TURBO 480 +#define AR5K_INIT_SH_RETRY 10 +#define AR5K_INIT_LG_RETRY AR5K_INIT_SH_RETRY +#define AR5K_INIT_SSH_RETRY 32 +#define AR5K_INIT_SLG_RETRY AR5K_INIT_SSH_RETRY +#define AR5K_INIT_TX_RETRY 10 + +#define AR5K_INIT_TRANSMIT_LATENCY ( \ + (AR5K_INIT_TX_LATENCY << 14) | (AR5K_INIT_USEC_32 << 7) | \ + (AR5K_INIT_USEC) \ +) +#define AR5K_INIT_TRANSMIT_LATENCY_TURBO ( \ + (AR5K_INIT_TX_LATENCY << 14) | (AR5K_INIT_USEC_32 << 7) | \ + (AR5K_INIT_USEC_TURBO) \ +) +#define AR5K_INIT_PROTO_TIME_CNTRL ( \ + (AR5K_INIT_CARR_SENSE_EN << 26) | (AR5K_INIT_EIFS << 12) | \ + (AR5K_INIT_PROG_IFS) \ +) +#define AR5K_INIT_PROTO_TIME_CNTRL_TURBO ( \ + (AR5K_INIT_CARR_SENSE_EN << 26) | (AR5K_INIT_EIFS_TURBO << 12) | \ + (AR5K_INIT_PROG_IFS_TURBO) \ +) + +/* token to use for aifs, cwmin, cwmax in MadWiFi */ +#define AR5K_TXQ_USEDEFAULT ((u32) -1) + +/* GENERIC CHIPSET DEFINITIONS */ + +/* MAC Chips */ +enum ath5k_version { + AR5K_AR5210 = 0, + AR5K_AR5211 = 1, + AR5K_AR5212 = 2, +}; + +/* PHY Chips */ +enum ath5k_radio { + AR5K_RF5110 = 0, + AR5K_RF5111 = 1, + AR5K_RF5112 = 2, + AR5K_RF2413 = 3, + AR5K_RF5413 = 4, + AR5K_RF2316 = 5, + AR5K_RF2317 = 6, + AR5K_RF2425 = 7, +}; + +/* + * Common silicon revision/version values + */ + +enum ath5k_srev_type { + AR5K_VERSION_MAC, + AR5K_VERSION_RAD, +}; + +struct ath5k_srev_name { + const char *sr_name; + enum ath5k_srev_type sr_type; + u_int sr_val; +}; + +#define AR5K_SREV_UNKNOWN 0xffff + +#define AR5K_SREV_AR5210 0x00 /* Crete */ +#define AR5K_SREV_AR5311 0x10 /* Maui 1 */ +#define AR5K_SREV_AR5311A 0x20 /* Maui 2 */ +#define AR5K_SREV_AR5311B 0x30 /* Spirit */ +#define AR5K_SREV_AR5211 0x40 /* Oahu */ +#define AR5K_SREV_AR5212 0x50 /* Venice */ +#define AR5K_SREV_AR5212_V4 0x54 /* ??? */ +#define AR5K_SREV_AR5213 0x55 /* ??? */ +#define AR5K_SREV_AR5213A 0x59 /* Hainan */ +#define AR5K_SREV_AR2413 0x78 /* Griffin lite */ +#define AR5K_SREV_AR2414 0x70 /* Griffin */ +#define AR5K_SREV_AR5424 0x90 /* Condor */ +#define AR5K_SREV_AR5413 0xa4 /* Eagle lite */ +#define AR5K_SREV_AR5414 0xa0 /* Eagle */ +#define AR5K_SREV_AR2415 0xb0 /* Talon */ +#define AR5K_SREV_AR5416 0xc0 /* PCI-E */ +#define AR5K_SREV_AR5418 0xca /* PCI-E */ +#define AR5K_SREV_AR2425 0xe0 /* Swan */ +#define AR5K_SREV_AR2417 0xf0 /* Nala */ + +#define AR5K_SREV_RAD_5110 0x00 +#define AR5K_SREV_RAD_5111 0x10 +#define AR5K_SREV_RAD_5111A 0x15 +#define AR5K_SREV_RAD_2111 0x20 +#define AR5K_SREV_RAD_5112 0x30 +#define AR5K_SREV_RAD_5112A 0x35 +#define AR5K_SREV_RAD_5112B 0x36 +#define AR5K_SREV_RAD_2112 0x40 +#define AR5K_SREV_RAD_2112A 0x45 +#define AR5K_SREV_RAD_2112B 0x46 +#define AR5K_SREV_RAD_2413 0x50 +#define AR5K_SREV_RAD_5413 0x60 +#define AR5K_SREV_RAD_2316 0x70 /* Cobra SoC */ +#define AR5K_SREV_RAD_2317 0x80 +#define AR5K_SREV_RAD_5424 0xa0 /* Mostly same as 5413 */ +#define AR5K_SREV_RAD_2425 0xa2 +#define AR5K_SREV_RAD_5133 0xc0 + +#define AR5K_SREV_PHY_5211 0x30 +#define AR5K_SREV_PHY_5212 0x41 +#define AR5K_SREV_PHY_5212A 0x42 +#define AR5K_SREV_PHY_5212B 0x43 +#define AR5K_SREV_PHY_2413 0x45 +#define AR5K_SREV_PHY_5413 0x61 +#define AR5K_SREV_PHY_2425 0x70 + +/* IEEE defs */ +#define IEEE80211_MAX_LEN 2500 + +/* TODO add support to mac80211 for vendor-specific rates and modes */ + +/* + * Some of this information is based on Documentation from: + * + * http://madwifi.org/wiki/ChipsetFeatures/SuperAG + * + * Modulation for Atheros' eXtended Range - range enhancing extension that is + * supposed to double the distance an Atheros client device can keep a + * connection with an Atheros access point. This is achieved by increasing + * the receiver sensitivity up to, -105dBm, which is about 20dB above what + * the 802.11 specifications demand. In addition, new (proprietary) data rates + * are introduced: 3, 2, 1, 0.5 and 0.25 MBit/s. + * + * Please note that can you either use XR or TURBO but you cannot use both, + * they are exclusive. + * + */ +#define MODULATION_XR 0x00000200 +/* + * Modulation for Atheros' Turbo G and Turbo A, its supposed to provide a + * throughput transmission speed up to 40Mbit/s-60Mbit/s at a 108Mbit/s + * signaling rate achieved through the bonding of two 54Mbit/s 802.11g + * channels. To use this feature your Access Point must also suport it. + * There is also a distinction between "static" and "dynamic" turbo modes: + * + * - Static: is the dumb version: devices set to this mode stick to it until + * the mode is turned off. + * - Dynamic: is the intelligent version, the network decides itself if it + * is ok to use turbo. As soon as traffic is detected on adjacent channels + * (which would get used in turbo mode), or when a non-turbo station joins + * the network, turbo mode won't be used until the situation changes again. + * Dynamic mode is achieved by Atheros' Adaptive Radio (AR) feature which + * monitors the used radio band in order to decide whether turbo mode may + * be used or not. + * + * This article claims Super G sticks to bonding of channels 5 and 6 for + * USA: + * + * http://www.pcworld.com/article/id,113428-page,1/article.html + * + * The channel bonding seems to be driver specific though. In addition to + * deciding what channels will be used, these "Turbo" modes are accomplished + * by also enabling the following features: + * + * - Bursting: allows multiple frames to be sent at once, rather than pausing + * after each frame. Bursting is a standards-compliant feature that can be + * used with any Access Point. + * - Fast frames: increases the amount of information that can be sent per + * frame, also resulting in a reduction of transmission overhead. It is a + * proprietary feature that needs to be supported by the Access Point. + * - Compression: data frames are compressed in real time using a Lempel Ziv + * algorithm. This is done transparently. Once this feature is enabled, + * compression and decompression takes place inside the chipset, without + * putting additional load on the host CPU. + * + */ +#define MODULATION_TURBO 0x00000080 + +enum ath5k_driver_mode { + AR5K_MODE_11A = 0, + AR5K_MODE_11A_TURBO = 1, + AR5K_MODE_11B = 2, + AR5K_MODE_11G = 3, + AR5K_MODE_11G_TURBO = 4, + AR5K_MODE_XR = 0, + AR5K_MODE_MAX = 5 +}; + +enum ath5k_ant_mode { + AR5K_ANTMODE_DEFAULT = 0, /* default antenna setup */ + AR5K_ANTMODE_FIXED_A = 1, /* only antenna A is present */ + AR5K_ANTMODE_FIXED_B = 2, /* only antenna B is present */ + AR5K_ANTMODE_SINGLE_AP = 3, /* sta locked on a single ap */ + AR5K_ANTMODE_SECTOR_AP = 4, /* AP with tx antenna set on tx desc */ + AR5K_ANTMODE_SECTOR_STA = 5, /* STA with tx antenna set on tx desc */ + AR5K_ANTMODE_DEBUG = 6, /* Debug mode -A -> Rx, B-> Tx- */ + AR5K_ANTMODE_MAX, +}; + + +/****************\ + TX DEFINITIONS +\****************/ + +/* + * TX Status descriptor + */ +struct ath5k_tx_status { + u16 ts_seqnum; + u16 ts_tstamp; + u8 ts_status; + u8 ts_rate[4]; + u8 ts_retry[4]; + u8 ts_final_idx; + s8 ts_rssi; + u8 ts_shortretry; + u8 ts_longretry; + u8 ts_virtcol; + u8 ts_antenna; +}; + +#define AR5K_TXSTAT_ALTRATE 0x80 +#define AR5K_TXERR_XRETRY 0x01 +#define AR5K_TXERR_FILT 0x02 +#define AR5K_TXERR_FIFO 0x04 + +/** + * enum ath5k_tx_queue - Queue types used to classify tx queues. + * @AR5K_TX_QUEUE_INACTIVE: q is unused -- see ath5k_hw_release_tx_queue + * @AR5K_TX_QUEUE_DATA: A normal data queue + * @AR5K_TX_QUEUE_XR_DATA: An XR-data queue + * @AR5K_TX_QUEUE_BEACON: The beacon queue + * @AR5K_TX_QUEUE_CAB: The after-beacon queue + * @AR5K_TX_QUEUE_UAPSD: Unscheduled Automatic Power Save Delivery queue + */ +enum ath5k_tx_queue { + AR5K_TX_QUEUE_INACTIVE = 0, + AR5K_TX_QUEUE_DATA, + AR5K_TX_QUEUE_XR_DATA, + AR5K_TX_QUEUE_BEACON, + AR5K_TX_QUEUE_CAB, + AR5K_TX_QUEUE_UAPSD, +}; + +#define AR5K_NUM_TX_QUEUES 10 +#define AR5K_NUM_TX_QUEUES_NOQCU 2 + +/* + * Queue syb-types to classify normal data queues. + * These are the 4 Access Categories as defined in + * WME spec. 0 is the lowest priority and 4 is the + * highest. Normal data that hasn't been classified + * goes to the Best Effort AC. + */ +enum ath5k_tx_queue_subtype { + AR5K_WME_AC_BK = 0, /*Background traffic*/ + AR5K_WME_AC_BE, /*Best-effort (normal) traffic)*/ + AR5K_WME_AC_VI, /*Video traffic*/ + AR5K_WME_AC_VO, /*Voice traffic*/ +}; + +/* + * Queue ID numbers as returned by the hw functions, each number + * represents a hw queue. If hw does not support hw queues + * (eg 5210) all data goes in one queue. These match + * d80211 definitions (net80211/MadWiFi don't use them). + */ +enum ath5k_tx_queue_id { + AR5K_TX_QUEUE_ID_NOQCU_DATA = 0, + AR5K_TX_QUEUE_ID_NOQCU_BEACON = 1, + AR5K_TX_QUEUE_ID_DATA_MIN = 0, /*IEEE80211_TX_QUEUE_DATA0*/ + AR5K_TX_QUEUE_ID_DATA_MAX = 4, /*IEEE80211_TX_QUEUE_DATA4*/ + AR5K_TX_QUEUE_ID_DATA_SVP = 5, /*IEEE80211_TX_QUEUE_SVP - Spectralink Voice Protocol*/ + AR5K_TX_QUEUE_ID_CAB = 6, /*IEEE80211_TX_QUEUE_AFTER_BEACON*/ + AR5K_TX_QUEUE_ID_BEACON = 7, /*IEEE80211_TX_QUEUE_BEACON*/ + AR5K_TX_QUEUE_ID_UAPSD = 8, + AR5K_TX_QUEUE_ID_XR_DATA = 9, +}; + +/* + * Flags to set hw queue's parameters... + */ +#define AR5K_TXQ_FLAG_TXOKINT_ENABLE 0x0001 /* Enable TXOK interrupt */ +#define AR5K_TXQ_FLAG_TXERRINT_ENABLE 0x0002 /* Enable TXERR interrupt */ +#define AR5K_TXQ_FLAG_TXEOLINT_ENABLE 0x0004 /* Enable TXEOL interrupt -not used- */ +#define AR5K_TXQ_FLAG_TXDESCINT_ENABLE 0x0008 /* Enable TXDESC interrupt -not used- */ +#define AR5K_TXQ_FLAG_TXURNINT_ENABLE 0x0010 /* Enable TXURN interrupt */ +#define AR5K_TXQ_FLAG_CBRORNINT_ENABLE 0x0020 /* Enable CBRORN interrupt */ +#define AR5K_TXQ_FLAG_CBRURNINT_ENABLE 0x0040 /* Enable CBRURN interrupt */ +#define AR5K_TXQ_FLAG_QTRIGINT_ENABLE 0x0080 /* Enable QTRIG interrupt */ +#define AR5K_TXQ_FLAG_TXNOFRMINT_ENABLE 0x0100 /* Enable TXNOFRM interrupt */ +#define AR5K_TXQ_FLAG_BACKOFF_DISABLE 0x0200 /* Disable random post-backoff */ +#define AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE 0x0300 /* Enable ready time expiry policy (?)*/ +#define AR5K_TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE 0x0800 /* Enable backoff while bursting */ +#define AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS 0x1000 /* Disable backoff while bursting */ +#define AR5K_TXQ_FLAG_COMPRESSION_ENABLE 0x2000 /* Enable hw compression -not implemented-*/ + +/* + * A struct to hold tx queue's parameters + */ +struct ath5k_txq_info { + enum ath5k_tx_queue tqi_type; + enum ath5k_tx_queue_subtype tqi_subtype; + u16 tqi_flags; /* Tx queue flags (see above) */ + u32 tqi_aifs; /* Arbitrated Interframe Space */ + s32 tqi_cw_min; /* Minimum Contention Window */ + s32 tqi_cw_max; /* Maximum Contention Window */ + u32 tqi_cbr_period; /* Constant bit rate period */ + u32 tqi_cbr_overflow_limit; + u32 tqi_burst_time; + u32 tqi_ready_time; /* Time queue waits after an event */ +}; + +/* + * Transmit packet types. + * used on tx control descriptor + */ +enum ath5k_pkt_type { + AR5K_PKT_TYPE_NORMAL = 0, + AR5K_PKT_TYPE_ATIM = 1, + AR5K_PKT_TYPE_PSPOLL = 2, + AR5K_PKT_TYPE_BEACON = 3, + AR5K_PKT_TYPE_PROBE_RESP = 4, + AR5K_PKT_TYPE_PIFS = 5, +}; + +/* + * TX power and TPC settings + */ +#define AR5K_TXPOWER_OFDM(_r, _v) ( \ + ((0 & 1) << ((_v) + 6)) | \ + (((ah->ah_txpower.txp_rates_power_table[(_r)]) & 0x3f) << (_v)) \ +) + +#define AR5K_TXPOWER_CCK(_r, _v) ( \ + (ah->ah_txpower.txp_rates_power_table[(_r)] & 0x3f) << (_v) \ +) + +/* + * DMA size definitions (2^n+2) + */ +enum ath5k_dmasize { + AR5K_DMASIZE_4B = 0, + AR5K_DMASIZE_8B, + AR5K_DMASIZE_16B, + AR5K_DMASIZE_32B, + AR5K_DMASIZE_64B, + AR5K_DMASIZE_128B, + AR5K_DMASIZE_256B, + AR5K_DMASIZE_512B +}; + + +/****************\ + RX DEFINITIONS +\****************/ + +/* + * RX Status descriptor + */ +struct ath5k_rx_status { + u16 rs_datalen; + u16 rs_tstamp; + u8 rs_status; + u8 rs_phyerr; + s8 rs_rssi; + u8 rs_keyix; + u8 rs_rate; + u8 rs_antenna; + u8 rs_more; +}; + +#define AR5K_RXERR_CRC 0x01 +#define AR5K_RXERR_PHY 0x02 +#define AR5K_RXERR_FIFO 0x04 +#define AR5K_RXERR_DECRYPT 0x08 +#define AR5K_RXERR_MIC 0x10 +#define AR5K_RXKEYIX_INVALID ((u8) - 1) +#define AR5K_TXKEYIX_INVALID ((u32) - 1) + + +/**************************\ + BEACON TIMERS DEFINITIONS +\**************************/ + +#define AR5K_BEACON_PERIOD 0x0000ffff +#define AR5K_BEACON_ENA 0x00800000 /*enable beacon xmit*/ +#define AR5K_BEACON_RESET_TSF 0x01000000 /*force a TSF reset*/ + +#if 0 +/** + * struct ath5k_beacon_state - Per-station beacon timer state. + * @bs_interval: in TU's, can also include the above flags + * @bs_cfp_max_duration: if non-zero hw is setup to coexist with a + * Point Coordination Function capable AP + */ +struct ath5k_beacon_state { + u32 bs_next_beacon; + u32 bs_next_dtim; + u32 bs_interval; + u8 bs_dtim_period; + u8 bs_cfp_period; + u16 bs_cfp_max_duration; + u16 bs_cfp_du_remain; + u16 bs_tim_offset; + u16 bs_sleep_duration; + u16 bs_bmiss_threshold; + u32 bs_cfp_next; +}; +#endif + + +/* + * TSF to TU conversion: + * + * TSF is a 64bit value in usec (microseconds). + * TU is a 32bit value and defined by IEEE802.11 (page 6) as "A measurement of + * time equal to 1024 usec", so it's roughly milliseconds (usec / 1024). + */ +#define TSF_TO_TU(_tsf) (u32)((_tsf) >> 10) + + +/*******************************\ + GAIN OPTIMIZATION DEFINITIONS +\*******************************/ + +enum ath5k_rfgain { + AR5K_RFGAIN_INACTIVE = 0, + AR5K_RFGAIN_ACTIVE, + AR5K_RFGAIN_READ_REQUESTED, + AR5K_RFGAIN_NEED_CHANGE, +}; + +struct ath5k_gain { + u8 g_step_idx; + u8 g_current; + u8 g_target; + u8 g_low; + u8 g_high; + u8 g_f_corr; + u8 g_state; +}; + +/********************\ + COMMON DEFINITIONS +\********************/ + +#define AR5K_SLOT_TIME_9 396 +#define AR5K_SLOT_TIME_20 880 +#define AR5K_SLOT_TIME_MAX 0xffff + +/* channel_flags */ +#define CHANNEL_CW_INT 0x0008 /* Contention Window interference detected */ +#define CHANNEL_TURBO 0x0010 /* Turbo Channel */ +#define CHANNEL_CCK 0x0020 /* CCK channel */ +#define CHANNEL_OFDM 0x0040 /* OFDM channel */ +#define CHANNEL_2GHZ 0x0080 /* 2GHz channel. */ +#define CHANNEL_5GHZ 0x0100 /* 5GHz channel */ +#define CHANNEL_PASSIVE 0x0200 /* Only passive scan allowed */ +#define CHANNEL_DYN 0x0400 /* Dynamic CCK-OFDM channel (for g operation) */ +#define CHANNEL_XR 0x0800 /* XR channel */ + +#define CHANNEL_A (CHANNEL_5GHZ|CHANNEL_OFDM) +#define CHANNEL_B (CHANNEL_2GHZ|CHANNEL_CCK) +#define CHANNEL_G (CHANNEL_2GHZ|CHANNEL_OFDM) +#define CHANNEL_T (CHANNEL_5GHZ|CHANNEL_OFDM|CHANNEL_TURBO) +#define CHANNEL_TG (CHANNEL_2GHZ|CHANNEL_OFDM|CHANNEL_TURBO) +#define CHANNEL_108A CHANNEL_T +#define CHANNEL_108G CHANNEL_TG +#define CHANNEL_X (CHANNEL_5GHZ|CHANNEL_OFDM|CHANNEL_XR) + +#define CHANNEL_ALL (CHANNEL_OFDM|CHANNEL_CCK|CHANNEL_2GHZ|CHANNEL_5GHZ| \ + CHANNEL_TURBO) + +#define CHANNEL_ALL_NOTURBO (CHANNEL_ALL & ~CHANNEL_TURBO) +#define CHANNEL_MODES CHANNEL_ALL + +/* + * Used internaly for reset_tx_queue). + * Also see struct struct ieee80211_channel. + */ +#define IS_CHAN_XR(_c) ((_c->hw_value & CHANNEL_XR) != 0) +#define IS_CHAN_B(_c) ((_c->hw_value & CHANNEL_B) != 0) + +/* + * The following structure is used to map 2GHz channels to + * 5GHz Atheros channels. + * TODO: Clean up + */ +struct ath5k_athchan_2ghz { + u32 a2_flags; + u16 a2_athchan; +}; + + +/******************\ + RATE DEFINITIONS +\******************/ + +/** + * Seems the ar5xxx harware supports up to 32 rates, indexed by 1-32. + * + * The rate code is used to get the RX rate or set the TX rate on the + * hardware descriptors. It is also used for internal modulation control + * and settings. + * + * This is the hardware rate map we are aware of: + * + * rate_code 0x01 0x02 0x03 0x04 0x05 0x06 0x07 0x08 + * rate_kbps 3000 1000 ? ? ? 2000 500 48000 + * + * rate_code 0x09 0x0A 0x0B 0x0C 0x0D 0x0E 0x0F 0x10 + * rate_kbps 24000 12000 6000 54000 36000 18000 9000 ? + * + * rate_code 17 18 19 20 21 22 23 24 + * rate_kbps ? ? ? ? ? ? ? 11000 + * + * rate_code 25 26 27 28 29 30 31 32 + * rate_kbps 5500 2000 1000 11000S 5500S 2000S ? ? + * + * "S" indicates CCK rates with short preamble. + * + * AR5211 has different rate codes for CCK (802.11B) rates. It only uses the + * lowest 4 bits, so they are the same as below with a 0xF mask. + * (0xB, 0xA, 0x9 and 0x8 for 1M, 2M, 5.5M and 11M). + * We handle this in ath5k_setup_bands(). + */ +#define AR5K_MAX_RATES 32 + +/* B */ +#define ATH5K_RATE_CODE_1M 0x1B +#define ATH5K_RATE_CODE_2M 0x1A +#define ATH5K_RATE_CODE_5_5M 0x19 +#define ATH5K_RATE_CODE_11M 0x18 +/* A and G */ +#define ATH5K_RATE_CODE_6M 0x0B +#define ATH5K_RATE_CODE_9M 0x0F +#define ATH5K_RATE_CODE_12M 0x0A +#define ATH5K_RATE_CODE_18M 0x0E +#define ATH5K_RATE_CODE_24M 0x09 +#define ATH5K_RATE_CODE_36M 0x0D +#define ATH5K_RATE_CODE_48M 0x08 +#define ATH5K_RATE_CODE_54M 0x0C +/* XR */ +#define ATH5K_RATE_CODE_XR_500K 0x07 +#define ATH5K_RATE_CODE_XR_1M 0x02 +#define ATH5K_RATE_CODE_XR_2M 0x06 +#define ATH5K_RATE_CODE_XR_3M 0x01 + +/* adding this flag to rate_code enables short preamble */ +#define AR5K_SET_SHORT_PREAMBLE 0x04 + +/* + * Crypto definitions + */ + +#define AR5K_KEYCACHE_SIZE 8 + +/***********************\ + HW RELATED DEFINITIONS +\***********************/ + +/* + * Misc definitions + */ +#define AR5K_RSSI_EP_MULTIPLIER (1<<7) + +#define AR5K_ASSERT_ENTRY(_e, _s) do { \ + if (_e >= _s) \ + return (false); \ +} while (0) + +/* + * Hardware interrupt abstraction + */ + +/** + * enum ath5k_int - Hardware interrupt masks helpers + * + * @AR5K_INT_RX: mask to identify received frame interrupts, of type + * AR5K_ISR_RXOK or AR5K_ISR_RXERR + * @AR5K_INT_RXDESC: Request RX descriptor/Read RX descriptor (?) + * @AR5K_INT_RXNOFRM: No frame received (?) + * @AR5K_INT_RXEOL: received End Of List for VEOL (Virtual End Of List). The + * Queue Control Unit (QCU) signals an EOL interrupt only if a descriptor's + * LinkPtr is NULL. For more details, refer to: + * http://www.freepatentsonline.com/20030225739.html + * @AR5K_INT_RXORN: Indicates we got RX overrun (eg. no more descriptors). + * Note that Rx overrun is not always fatal, on some chips we can continue + * operation without reseting the card, that's why int_fatal is not + * common for all chips. + * @AR5K_INT_TX: mask to identify received frame interrupts, of type + * AR5K_ISR_TXOK or AR5K_ISR_TXERR + * @AR5K_INT_TXDESC: Request TX descriptor/Read TX status descriptor (?) + * @AR5K_INT_TXURN: received when we should increase the TX trigger threshold + * We currently do increments on interrupt by + * (AR5K_TUNE_MAX_TX_FIFO_THRES - current_trigger_level) / 2 + * @AR5K_INT_MIB: Indicates the Management Information Base counters should be + * checked. We should do this with ath5k_hw_update_mib_counters() but + * it seems we should also then do some noise immunity work. + * @AR5K_INT_RXPHY: RX PHY Error + * @AR5K_INT_RXKCM: RX Key cache miss + * @AR5K_INT_SWBA: SoftWare Beacon Alert - indicates its time to send a + * beacon that must be handled in software. The alternative is if you + * have VEOL support, in that case you let the hardware deal with things. + * @AR5K_INT_BMISS: If in STA mode this indicates we have stopped seeing + * beacons from the AP have associated with, we should probably try to + * reassociate. When in IBSS mode this might mean we have not received + * any beacons from any local stations. Note that every station in an + * IBSS schedules to send beacons at the Target Beacon Transmission Time + * (TBTT) with a random backoff. + * @AR5K_INT_BNR: Beacon Not Ready interrupt - ?? + * @AR5K_INT_GPIO: GPIO interrupt is used for RF Kill, disabled for now + * until properly handled + * @AR5K_INT_FATAL: Fatal errors were encountered, typically caused by DMA + * errors. These types of errors we can enable seem to be of type + * AR5K_SIMR2_MCABT, AR5K_SIMR2_SSERR and AR5K_SIMR2_DPERR. + * @AR5K_INT_GLOBAL: Used to clear and set the IER + * @AR5K_INT_NOCARD: signals the card has been removed + * @AR5K_INT_COMMON: common interrupts shared amogst MACs with the same + * bit value + * + * These are mapped to take advantage of some common bits + * between the MACs, to be able to set intr properties + * easier. Some of them are not used yet inside hw.c. Most map + * to the respective hw interrupt value as they are common amogst different + * MACs. + */ +enum ath5k_int { + AR5K_INT_RXOK = 0x00000001, + AR5K_INT_RXDESC = 0x00000002, + AR5K_INT_RXERR = 0x00000004, + AR5K_INT_RXNOFRM = 0x00000008, + AR5K_INT_RXEOL = 0x00000010, + AR5K_INT_RXORN = 0x00000020, + AR5K_INT_TXOK = 0x00000040, + AR5K_INT_TXDESC = 0x00000080, + AR5K_INT_TXERR = 0x00000100, + AR5K_INT_TXNOFRM = 0x00000200, + AR5K_INT_TXEOL = 0x00000400, + AR5K_INT_TXURN = 0x00000800, + AR5K_INT_MIB = 0x00001000, + AR5K_INT_SWI = 0x00002000, + AR5K_INT_RXPHY = 0x00004000, + AR5K_INT_RXKCM = 0x00008000, + AR5K_INT_SWBA = 0x00010000, + AR5K_INT_BRSSI = 0x00020000, + AR5K_INT_BMISS = 0x00040000, + AR5K_INT_FATAL = 0x00080000, /* Non common */ + AR5K_INT_BNR = 0x00100000, /* Non common */ + AR5K_INT_TIM = 0x00200000, /* Non common */ + AR5K_INT_DTIM = 0x00400000, /* Non common */ + AR5K_INT_DTIM_SYNC = 0x00800000, /* Non common */ + AR5K_INT_GPIO = 0x01000000, + AR5K_INT_BCN_TIMEOUT = 0x02000000, /* Non common */ + AR5K_INT_CAB_TIMEOUT = 0x04000000, /* Non common */ + AR5K_INT_RX_DOPPLER = 0x08000000, /* Non common */ + AR5K_INT_QCBRORN = 0x10000000, /* Non common */ + AR5K_INT_QCBRURN = 0x20000000, /* Non common */ + AR5K_INT_QTRIG = 0x40000000, /* Non common */ + AR5K_INT_GLOBAL = 0x80000000, + + AR5K_INT_COMMON = AR5K_INT_RXOK + | AR5K_INT_RXDESC + | AR5K_INT_RXERR + | AR5K_INT_RXNOFRM + | AR5K_INT_RXEOL + | AR5K_INT_RXORN + | AR5K_INT_TXOK + | AR5K_INT_TXDESC + | AR5K_INT_TXERR + | AR5K_INT_TXNOFRM + | AR5K_INT_TXEOL + | AR5K_INT_TXURN + | AR5K_INT_MIB + | AR5K_INT_SWI + | AR5K_INT_RXPHY + | AR5K_INT_RXKCM + | AR5K_INT_SWBA + | AR5K_INT_BRSSI + | AR5K_INT_BMISS + | AR5K_INT_GPIO + | AR5K_INT_GLOBAL, + + AR5K_INT_NOCARD = 0xffffffff +}; + +/* Software interrupts used for calibration */ +enum ath5k_software_interrupt { + AR5K_SWI_FULL_CALIBRATION = 0x01, + AR5K_SWI_SHORT_CALIBRATION = 0x02, +}; + +/* + * Power management + */ +enum ath5k_power_mode { + AR5K_PM_UNDEFINED = 0, + AR5K_PM_AUTO, + AR5K_PM_AWAKE, + AR5K_PM_FULL_SLEEP, + AR5K_PM_NETWORK_SLEEP, +}; + +/* + * These match net80211 definitions (not used in + * mac80211). + * TODO: Clean this up + */ +#define AR5K_LED_INIT 0 /*IEEE80211_S_INIT*/ +#define AR5K_LED_SCAN 1 /*IEEE80211_S_SCAN*/ +#define AR5K_LED_AUTH 2 /*IEEE80211_S_AUTH*/ +#define AR5K_LED_ASSOC 3 /*IEEE80211_S_ASSOC*/ +#define AR5K_LED_RUN 4 /*IEEE80211_S_RUN*/ + +/* GPIO-controlled software LED */ +#define AR5K_SOFTLED_PIN 0 +#define AR5K_SOFTLED_ON 0 +#define AR5K_SOFTLED_OFF 1 + +/* + * Chipset capabilities -see ath5k_hw_get_capability- + * get_capability function is not yet fully implemented + * in ath5k so most of these don't work yet... + * TODO: Implement these & merge with _TUNE_ stuff above + */ +enum ath5k_capability_type { + AR5K_CAP_REG_DMN = 0, /* Used to get current reg. domain id */ + AR5K_CAP_TKIP_MIC = 2, /* Can handle TKIP MIC in hardware */ + AR5K_CAP_TKIP_SPLIT = 3, /* TKIP uses split keys */ + AR5K_CAP_PHYCOUNTERS = 4, /* PHY error counters */ + AR5K_CAP_DIVERSITY = 5, /* Supports fast diversity */ + AR5K_CAP_NUM_TXQUEUES = 6, /* Used to get max number of hw txqueues */ + AR5K_CAP_VEOL = 7, /* Supports virtual EOL */ + AR5K_CAP_COMPRESSION = 8, /* Supports compression */ + AR5K_CAP_BURST = 9, /* Supports packet bursting */ + AR5K_CAP_FASTFRAME = 10, /* Supports fast frames */ + AR5K_CAP_TXPOW = 11, /* Used to get global tx power limit */ + AR5K_CAP_TPC = 12, /* Can do per-packet tx power control (needed for 802.11a) */ + AR5K_CAP_BSSIDMASK = 13, /* Supports bssid mask */ + AR5K_CAP_MCAST_KEYSRCH = 14, /* Supports multicast key search */ + AR5K_CAP_TSF_ADJUST = 15, /* Supports beacon tsf adjust */ + AR5K_CAP_XR = 16, /* Supports XR mode */ + AR5K_CAP_WME_TKIPMIC = 17, /* Supports TKIP MIC when using WMM */ + AR5K_CAP_CHAN_HALFRATE = 18, /* Supports half rate channels */ + AR5K_CAP_CHAN_QUARTERRATE = 19, /* Supports quarter rate channels */ + AR5K_CAP_RFSILENT = 20, /* Supports RFsilent */ +}; + + +/* XXX: we *may* move cap_range stuff to struct wiphy */ +struct ath5k_capabilities { + /* + * Supported PHY modes + * (ie. CHANNEL_A, CHANNEL_B, ...) + */ + DECLARE_BITMAP(cap_mode, AR5K_MODE_MAX); + + /* + * Frequency range (without regulation restrictions) + */ + struct { + u16 range_2ghz_min; + u16 range_2ghz_max; + u16 range_5ghz_min; + u16 range_5ghz_max; + } cap_range; + + /* + * Values stored in the EEPROM (some of them...) + */ + struct ath5k_eeprom_info cap_eeprom; + + /* + * Queue information + */ + struct { + u8 q_tx_num; + } cap_queues; +}; + +/* size of noise floor history (keep it a power of two) */ +#define ATH5K_NF_CAL_HIST_MAX 8 +struct ath5k_nfcal_hist +{ + s16 index; /* current index into nfval */ + s16 nfval[ATH5K_NF_CAL_HIST_MAX]; /* last few noise floors */ +}; + + +/***************************************\ + HARDWARE ABSTRACTION LAYER STRUCTURE +\***************************************/ + +/* + * Misc defines + */ + +#define AR5K_MAX_GPIO 10 +#define AR5K_MAX_RF_BANKS 8 + +/* TODO: Clean up and merge with ath5k_softc */ +struct ath5k_hw { + u32 ah_magic; + struct ath_common common; + + struct ath5k_softc *ah_sc; + void __iomem *ah_iobase; + + enum ath5k_int ah_imr; + + enum nl80211_iftype ah_op_mode; + struct ieee80211_channel *ah_current_channel; + bool ah_turbo; + bool ah_calibration; + bool ah_single_chip; + bool ah_aes_support; + bool ah_combined_mic; + + enum ath5k_version ah_version; + enum ath5k_radio ah_radio; + u32 ah_phy; + u32 ah_mac_srev; + u16 ah_mac_version; + u16 ah_mac_revision; + u16 ah_phy_revision; + u16 ah_radio_5ghz_revision; + u16 ah_radio_2ghz_revision; + +#define ah_modes ah_capabilities.cap_mode +#define ah_ee_version ah_capabilities.cap_eeprom.ee_version + + u32 ah_atim_window; + u32 ah_aifs; + u32 ah_cw_min; + u32 ah_cw_max; + u32 ah_limit_tx_retries; + u8 ah_coverage_class; + + /* Antenna Control */ + u32 ah_ant_ctl[AR5K_EEPROM_N_MODES][AR5K_ANT_MAX]; + u8 ah_ant_mode; + u8 ah_tx_ant; + u8 ah_def_ant; + bool ah_software_retry; + + int ah_gpio_npins; + + struct ath5k_capabilities ah_capabilities; + + struct ath5k_txq_info ah_txq[AR5K_NUM_TX_QUEUES]; + u32 ah_txq_status; + u32 ah_txq_imr_txok; + u32 ah_txq_imr_txerr; + u32 ah_txq_imr_txurn; + u32 ah_txq_imr_txdesc; + u32 ah_txq_imr_txeol; + u32 ah_txq_imr_cbrorn; + u32 ah_txq_imr_cbrurn; + u32 ah_txq_imr_qtrig; + u32 ah_txq_imr_nofrm; + u32 ah_txq_isr; + u32 *ah_rf_banks; + size_t ah_rf_banks_size; + size_t ah_rf_regs_count; + struct ath5k_gain ah_gain; + u8 ah_offset[AR5K_MAX_RF_BANKS]; + + + struct { + /* Temporary tables used for interpolation */ + u8 tmpL[AR5K_EEPROM_N_PD_GAINS] + [AR5K_EEPROM_POWER_TABLE_SIZE]; + u8 tmpR[AR5K_EEPROM_N_PD_GAINS] + [AR5K_EEPROM_POWER_TABLE_SIZE]; + u8 txp_pd_table[AR5K_EEPROM_POWER_TABLE_SIZE * 2]; + u16 txp_rates_power_table[AR5K_MAX_RATES]; + u8 txp_min_idx; + bool txp_tpc; + /* Values in 0.25dB units */ + s16 txp_min_pwr; + s16 txp_max_pwr; + /* Values in 0.5dB units */ + s16 txp_offset; + s16 txp_ofdm; + s16 txp_cck_ofdm_gainf_delta; + /* Value in dB units */ + s16 txp_cck_ofdm_pwr_delta; + } ah_txpower; + + struct { + bool r_enabled; + int r_last_alert; + struct ieee80211_channel r_last_channel; + } ah_radar; + + struct ath5k_nfcal_hist ah_nfcal_hist; + + /* noise floor from last periodic calibration */ + s32 ah_noise_floor; + + /* Calibration timestamp */ + unsigned long ah_cal_tstamp; + + /* Calibration interval (secs) */ + u8 ah_cal_intval; + + /* Software interrupt mask */ + u8 ah_swi_mask; + + /* + * Function pointers + */ + int (*ah_setup_rx_desc)(struct ath5k_hw *ah, struct ath5k_desc *desc, + u32 size, unsigned int flags); + int (*ah_setup_tx_desc)(struct ath5k_hw *, struct ath5k_desc *, + unsigned int, unsigned int, enum ath5k_pkt_type, unsigned int, + unsigned int, unsigned int, unsigned int, unsigned int, + unsigned int, unsigned int, unsigned int); + int (*ah_setup_mrr_tx_desc)(struct ath5k_hw *, struct ath5k_desc *, + unsigned int, unsigned int, unsigned int, unsigned int, + unsigned int, unsigned int); + int (*ah_proc_tx_desc)(struct ath5k_hw *, struct ath5k_desc *, + struct ath5k_tx_status *); + int (*ah_proc_rx_desc)(struct ath5k_hw *, struct ath5k_desc *, + struct ath5k_rx_status *); +}; + +/* + * Prototypes + */ + +/* Attach/Detach Functions */ +extern int ath5k_hw_attach(struct ath5k_softc *sc); +extern void ath5k_hw_detach(struct ath5k_hw *ah); + +/* LED functions */ +extern int ath5k_init_leds(struct ath5k_softc *sc); +extern void ath5k_led_enable(struct ath5k_softc *sc); +extern void ath5k_led_off(struct ath5k_softc *sc); +extern void ath5k_unregister_leds(struct ath5k_softc *sc); + +/* Reset Functions */ +extern int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial); +extern int ath5k_hw_on_hold(struct ath5k_hw *ah); +extern int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode, struct ieee80211_channel *channel, bool change_channel); +/* Power management functions */ +extern int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode, bool set_chip, u16 sleep_duration); + +/* DMA Related Functions */ +extern void ath5k_hw_start_rx_dma(struct ath5k_hw *ah); +extern int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah); +extern u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah); +extern void ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr); +extern int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue); +extern int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue); +extern u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue); +extern int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, + u32 phys_addr); +extern int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase); +/* Interrupt handling */ +extern bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah); +extern int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask); +extern enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum +ath5k_int new_mask); +extern void ath5k_hw_update_mib_counters(struct ath5k_hw *ah, struct ieee80211_low_level_stats *stats); + +/* EEPROM access functions */ +extern int ath5k_eeprom_init(struct ath5k_hw *ah); +extern void ath5k_eeprom_detach(struct ath5k_hw *ah); +extern int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac); +extern bool ath5k_eeprom_is_hb63(struct ath5k_hw *ah); + +/* Protocol Control Unit Functions */ +extern int ath5k_hw_set_opmode(struct ath5k_hw *ah); +extern void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class); +/* BSSID Functions */ +extern int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac); +extern void ath5k_hw_set_associd(struct ath5k_hw *ah); +extern void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask); +/* Receive start/stop functions */ +extern void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah); +extern void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah); +/* RX Filter functions */ +extern void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1); +extern int ath5k_hw_set_mcast_filter_idx(struct ath5k_hw *ah, u32 index); +extern int ath5k_hw_clear_mcast_filter_idx(struct ath5k_hw *ah, u32 index); +extern u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah); +extern void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter); +/* Beacon control functions */ +extern u32 ath5k_hw_get_tsf32(struct ath5k_hw *ah); +extern u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah); +extern void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64); +extern void ath5k_hw_reset_tsf(struct ath5k_hw *ah); +extern void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval); +#if 0 +extern int ath5k_hw_set_beacon_timers(struct ath5k_hw *ah, const struct ath5k_beacon_state *state); +extern void ath5k_hw_reset_beacon(struct ath5k_hw *ah); +extern int ath5k_hw_beaconq_finish(struct ath5k_hw *ah, unsigned long phys_addr); +#endif +/* ACK bit rate */ +void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high); +/* ACK/CTS Timeouts */ +extern int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout); +extern unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah); +extern int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout); +extern unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah); +/* Clock rate related functions */ +unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec); +unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock); +unsigned int ath5k_hw_get_clockrate(struct ath5k_hw *ah); +/* Key table (WEP) functions */ +extern int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry); +extern int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry); +extern int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry, const struct ieee80211_key_conf *key, const u8 *mac); +extern int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac); + +/* Queue Control Unit, DFS Control Unit Functions */ +extern int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, struct ath5k_txq_info *queue_info); +extern int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue, + const struct ath5k_txq_info *queue_info); +extern int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, + enum ath5k_tx_queue queue_type, + struct ath5k_txq_info *queue_info); +extern u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue); +extern void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue); +extern int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue); +extern unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah); +extern int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time); + +/* Hardware Descriptor Functions */ +extern int ath5k_hw_init_desc_functions(struct ath5k_hw *ah); + +/* GPIO Functions */ +extern void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state); +extern int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio); +extern int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio); +extern u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio); +extern int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val); +extern void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio, u32 interrupt_level); + +/* rfkill Functions */ +extern void ath5k_rfkill_hw_start(struct ath5k_hw *ah); +extern void ath5k_rfkill_hw_stop(struct ath5k_hw *ah); + +/* Misc functions */ +int ath5k_hw_set_capabilities(struct ath5k_hw *ah); +extern int ath5k_hw_get_capability(struct ath5k_hw *ah, enum ath5k_capability_type cap_type, u32 capability, u32 *result); +extern int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid, u16 assoc_id); +extern int ath5k_hw_disable_pspoll(struct ath5k_hw *ah); + +/* Initial register settings functions */ +extern int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel); + +/* Initialize RF */ +extern int ath5k_hw_rfregs_init(struct ath5k_hw *ah, + struct ieee80211_channel *channel, + unsigned int mode); +extern int ath5k_hw_rfgain_init(struct ath5k_hw *ah, unsigned int freq); +extern enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah); +extern int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah); +/* PHY/RF channel functions */ +extern bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags); +extern int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel); +/* PHY calibration */ +void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah); +extern int ath5k_hw_phy_calibrate(struct ath5k_hw *ah, struct ieee80211_channel *channel); +extern int ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq); +extern s16 ath5k_hw_get_noise_floor(struct ath5k_hw *ah); +extern void ath5k_hw_calibration_poll(struct ath5k_hw *ah); +/* Spur mitigation */ +bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah, + struct ieee80211_channel *channel); +void ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah, + struct ieee80211_channel *channel); +/* Misc PHY functions */ +extern u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan); +extern int ath5k_hw_phy_disable(struct ath5k_hw *ah); +/* Antenna control */ +extern void ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode); +extern void ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant); +extern unsigned int ath5k_hw_get_def_antenna(struct ath5k_hw *ah); +/* TX power setup */ +extern int ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, u8 ee_mode, u8 txpower); +extern int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower); + +/* + * Functions used internaly + */ + +static inline struct ath_common *ath5k_hw_common(struct ath5k_hw *ah) +{ + return &ah->common; +} + +static inline struct ath_regulatory *ath5k_hw_regulatory(struct ath5k_hw *ah) +{ + return &(ath5k_hw_common(ah)->regulatory); +} + +static inline u32 ath5k_hw_reg_read(struct ath5k_hw *ah, u16 reg) +{ + return ioread32(ah->ah_iobase + reg); +} + +static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg) +{ + iowrite32(val, ah->ah_iobase + reg); +} + +#if defined(_ATH5K_RESET) || defined(_ATH5K_PHY) +/* + * Check if a register write has been completed + */ +static int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, + u32 val, bool is_set) +{ + int i; + u32 data; + + for (i = AR5K_TUNE_REGISTER_TIMEOUT; i > 0; i--) { + data = ath5k_hw_reg_read(ah, reg); + if (is_set && (data & flag)) + break; + else if ((data & flag) == val) + break; + udelay(15); + } + + return (i <= 0) ? -EAGAIN : 0; +} +#endif + +static inline u32 ath5k_hw_bitswap(u32 val, unsigned int bits) +{ + u32 retval = 0, bit, i; + + for (i = 0; i < bits; i++) { + bit = (val >> i) & 1; + retval = (retval << 1) | bit; + } + + return retval; +} + +static inline int ath5k_pad_size(int hdrlen) +{ + return (hdrlen < 24) ? 0 : hdrlen & 3; +} + +#endif diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c new file mode 100644 index 0000000..4228444 --- /dev/null +++ b/drivers/net/wireless/ath/ath5k/attach.c @@ -0,0 +1,362 @@ +/* + * Copyright (c) 2004-2008 Reyk Floeter + * Copyright (c) 2006-2008 Nick Kossifidis + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +/*************************************\ +* Attach/Detach Functions and helpers * +\*************************************/ + +#include +#include "ath5k.h" +#include "reg.h" +#include "debug.h" +#include "base.h" + +/** + * ath5k_hw_post - Power On Self Test helper function + * + * @ah: The &struct ath5k_hw + */ +static int ath5k_hw_post(struct ath5k_hw *ah) +{ + + static const u32 static_pattern[4] = { + 0x55555555, 0xaaaaaaaa, + 0x66666666, 0x99999999 + }; + static const u16 regs[2] = { AR5K_STA_ID0, AR5K_PHY(8) }; + int i, c; + u16 cur_reg; + u32 var_pattern; + u32 init_val; + u32 cur_val; + + for (c = 0; c < 2; c++) { + + cur_reg = regs[c]; + + /* Save previous value */ + init_val = ath5k_hw_reg_read(ah, cur_reg); + + for (i = 0; i < 256; i++) { + var_pattern = i << 16 | i; + ath5k_hw_reg_write(ah, var_pattern, cur_reg); + cur_val = ath5k_hw_reg_read(ah, cur_reg); + + if (cur_val != var_pattern) { + ATH5K_ERR(ah->ah_sc, "POST Failed !!!\n"); + return -EAGAIN; + } + + /* Found on ndiswrapper dumps */ + var_pattern = 0x0039080f; + ath5k_hw_reg_write(ah, var_pattern, cur_reg); + } + + for (i = 0; i < 4; i++) { + var_pattern = static_pattern[i]; + ath5k_hw_reg_write(ah, var_pattern, cur_reg); + cur_val = ath5k_hw_reg_read(ah, cur_reg); + + if (cur_val != var_pattern) { + ATH5K_ERR(ah->ah_sc, "POST Failed !!!\n"); + return -EAGAIN; + } + + /* Found on ndiswrapper dumps */ + var_pattern = 0x003b080f; + ath5k_hw_reg_write(ah, var_pattern, cur_reg); + } + + /* Restore previous value */ + ath5k_hw_reg_write(ah, init_val, cur_reg); + + } + + return 0; + +} + +/** + * ath5k_hw_attach - Check if hw is supported and init the needed structs + * + * @sc: The &struct ath5k_softc we got from the driver's attach function + * + * Check if the device is supported, perform a POST and initialize the needed + * structs. Returns -ENOMEM if we don't have memory for the needed structs, + * -ENODEV if the device is not supported or prints an error msg if something + * else went wrong. + */ +int ath5k_hw_attach(struct ath5k_softc *sc) +{ + struct ath5k_hw *ah = sc->ah; + struct ath_common *common = ath5k_hw_common(ah); + struct pci_dev *pdev = sc->pdev; + struct ath5k_eeprom_info *ee; + int ret; + u32 srev; + + /* + * HW information + */ + ah->ah_op_mode = NL80211_IFTYPE_STATION; + ah->ah_radar.r_enabled = AR5K_TUNE_RADAR_ALERT; + ah->ah_turbo = false; + ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER; + ah->ah_imr = 0; + ah->ah_atim_window = 0; + ah->ah_aifs = AR5K_TUNE_AIFS; + ah->ah_cw_min = AR5K_TUNE_CWMIN; + ah->ah_limit_tx_retries = AR5K_INIT_TX_RETRY; + ah->ah_software_retry = false; + + /* + * Find the mac version + */ + srev = ath5k_hw_reg_read(ah, AR5K_SREV); + if (srev < AR5K_SREV_AR5311) + ah->ah_version = AR5K_AR5210; + else if (srev < AR5K_SREV_AR5212) + ah->ah_version = AR5K_AR5211; + else + ah->ah_version = AR5K_AR5212; + + /*Fill the ath5k_hw struct with the needed functions*/ + ret = ath5k_hw_init_desc_functions(ah); + if (ret) + goto err_free; + + /* Bring device out of sleep and reset it's units */ + ret = ath5k_hw_nic_wakeup(ah, 0, true); + if (ret) + goto err_free; + + /* Get MAC, PHY and RADIO revisions */ + ah->ah_mac_srev = srev; + ah->ah_mac_version = AR5K_REG_MS(srev, AR5K_SREV_VER); + ah->ah_mac_revision = AR5K_REG_MS(srev, AR5K_SREV_REV); + ah->ah_phy_revision = ath5k_hw_reg_read(ah, AR5K_PHY_CHIP_ID) & + 0xffffffff; + ah->ah_radio_5ghz_revision = ath5k_hw_radio_revision(ah, + CHANNEL_5GHZ); + ah->ah_phy = AR5K_PHY(0); + + /* Try to identify radio chip based on it's srev */ + switch (ah->ah_radio_5ghz_revision & 0xf0) { + case AR5K_SREV_RAD_5111: + ah->ah_radio = AR5K_RF5111; + ah->ah_single_chip = false; + ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah, + CHANNEL_2GHZ); + break; + case AR5K_SREV_RAD_5112: + case AR5K_SREV_RAD_2112: + ah->ah_radio = AR5K_RF5112; + ah->ah_single_chip = false; + ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah, + CHANNEL_2GHZ); + break; + case AR5K_SREV_RAD_2413: + ah->ah_radio = AR5K_RF2413; + ah->ah_single_chip = true; + break; + case AR5K_SREV_RAD_5413: + ah->ah_radio = AR5K_RF5413; + ah->ah_single_chip = true; + break; + case AR5K_SREV_RAD_2316: + ah->ah_radio = AR5K_RF2316; + ah->ah_single_chip = true; + break; + case AR5K_SREV_RAD_2317: + ah->ah_radio = AR5K_RF2317; + ah->ah_single_chip = true; + break; + case AR5K_SREV_RAD_5424: + if (ah->ah_mac_version == AR5K_SREV_AR2425 || + ah->ah_mac_version == AR5K_SREV_AR2417){ + ah->ah_radio = AR5K_RF2425; + ah->ah_single_chip = true; + } else { + ah->ah_radio = AR5K_RF5413; + ah->ah_single_chip = true; + } + break; + default: + /* Identify radio based on mac/phy srev */ + if (ah->ah_version == AR5K_AR5210) { + ah->ah_radio = AR5K_RF5110; + ah->ah_single_chip = false; + } else if (ah->ah_version == AR5K_AR5211) { + ah->ah_radio = AR5K_RF5111; + ah->ah_single_chip = false; + ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah, + CHANNEL_2GHZ); + } else if (ah->ah_mac_version == (AR5K_SREV_AR2425 >> 4) || + ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4) || + ah->ah_phy_revision == AR5K_SREV_PHY_2425) { + ah->ah_radio = AR5K_RF2425; + ah->ah_single_chip = true; + ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2425; + } else if (srev == AR5K_SREV_AR5213A && + ah->ah_phy_revision == AR5K_SREV_PHY_5212B) { + ah->ah_radio = AR5K_RF5112; + ah->ah_single_chip = false; + ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_5112B; + } else if (ah->ah_mac_version == (AR5K_SREV_AR2415 >> 4)) { + ah->ah_radio = AR5K_RF2316; + ah->ah_single_chip = true; + ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2316; + } else if (ah->ah_mac_version == (AR5K_SREV_AR5414 >> 4) || + ah->ah_phy_revision == AR5K_SREV_PHY_5413) { + ah->ah_radio = AR5K_RF5413; + ah->ah_single_chip = true; + ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_5413; + } else if (ah->ah_mac_version == (AR5K_SREV_AR2414 >> 4) || + ah->ah_phy_revision == AR5K_SREV_PHY_2413) { + ah->ah_radio = AR5K_RF2413; + ah->ah_single_chip = true; + ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2413; + } else { + ATH5K_ERR(sc, "Couldn't identify radio revision.\n"); + ret = -ENODEV; + goto err_free; + } + } + + + /* Return on unsuported chips (unsupported eeprom etc) */ + if ((srev >= AR5K_SREV_AR5416) && + (srev < AR5K_SREV_AR2425)) { + ATH5K_ERR(sc, "Device not yet supported.\n"); + ret = -ENODEV; + goto err_free; + } + + /* + * POST + */ + ret = ath5k_hw_post(ah); + if (ret) + goto err_free; + + /* Enable pci core retry fix on Hainan (5213A) and later chips */ + if (srev >= AR5K_SREV_AR5213A) + AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, AR5K_PCICFG_RETRY_FIX); + + /* + * Get card capabilities, calibration values etc + * TODO: EEPROM work + */ + ret = ath5k_eeprom_init(ah); + if (ret) { + ATH5K_ERR(sc, "unable to init EEPROM\n"); + goto err_free; + } + + ee = &ah->ah_capabilities.cap_eeprom; + + /* + * Write PCI-E power save settings + */ + if ((ah->ah_version == AR5K_AR5212) && (pdev->is_pcie)) { + ath5k_hw_reg_write(ah, 0x9248fc00, AR5K_PCIE_SERDES); + ath5k_hw_reg_write(ah, 0x24924924, AR5K_PCIE_SERDES); + + /* Shut off RX when elecidle is asserted */ + ath5k_hw_reg_write(ah, 0x28000039, AR5K_PCIE_SERDES); + ath5k_hw_reg_write(ah, 0x53160824, AR5K_PCIE_SERDES); + + /* If serdes programing is enabled, increase PCI-E + * tx power for systems with long trace from host + * to minicard connector. */ + if (ee->ee_serdes) + ath5k_hw_reg_write(ah, 0xe5980579, AR5K_PCIE_SERDES); + else + ath5k_hw_reg_write(ah, 0xf6800579, AR5K_PCIE_SERDES); + + /* Shut off PLL and CLKREQ active in L1 */ + ath5k_hw_reg_write(ah, 0x001defff, AR5K_PCIE_SERDES); + + /* Preserve other settings */ + ath5k_hw_reg_write(ah, 0x1aaabe40, AR5K_PCIE_SERDES); + ath5k_hw_reg_write(ah, 0xbe105554, AR5K_PCIE_SERDES); + ath5k_hw_reg_write(ah, 0x000e3007, AR5K_PCIE_SERDES); + + /* Reset SERDES to load new settings */ + ath5k_hw_reg_write(ah, 0x00000000, AR5K_PCIE_SERDES_RESET); + mdelay(1); + } + + /* Get misc capabilities */ + ret = ath5k_hw_set_capabilities(ah); + if (ret) { + ATH5K_ERR(sc, "unable to get device capabilities: 0x%04x\n", + sc->pdev->device); + goto err_free; + } + + /* Crypto settings */ + ah->ah_aes_support = srev >= AR5K_SREV_AR5212_V4 && + (ee->ee_version >= AR5K_EEPROM_VERSION_5_0 && + !AR5K_EEPROM_AES_DIS(ee->ee_misc5)); + + if (srev >= AR5K_SREV_AR2414) { + ah->ah_combined_mic = true; + AR5K_REG_ENABLE_BITS(ah, AR5K_MISC_MODE, + AR5K_MISC_MODE_COMBINED_MIC); + } + + /* MAC address is cleared until add_interface */ + ath5k_hw_set_lladdr(ah, (u8[ETH_ALEN]){}); + + /* Set BSSID to bcast address: ff:ff:ff:ff:ff:ff for now */ + memcpy(common->curbssid, ath_bcast_mac, ETH_ALEN); + ath5k_hw_set_associd(ah); + ath5k_hw_set_opmode(ah); + + ath5k_hw_rfgain_opt_init(ah); + + ath5k_hw_init_nfcal_hist(ah); + + /* turn on HW LEDs */ + ath5k_hw_set_ledstate(ah, AR5K_LED_INIT); + + return 0; +err_free: + kfree(ah); + return ret; +} + +/** + * ath5k_hw_detach - Free the ath5k_hw struct + * + * @ah: The &struct ath5k_hw + */ +void ath5k_hw_detach(struct ath5k_hw *ah) +{ + ATH5K_TRACE(ah->ah_sc); + + __set_bit(ATH_STAT_INVALID, ah->ah_sc->status); + + if (ah->ah_rf_banks != NULL) + kfree(ah->ah_rf_banks); + + ath5k_eeprom_detach(ah); + + /* assume interrupts are down */ +} diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c new file mode 100644 index 0000000..c1678db --- /dev/null +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -0,0 +1,3340 @@ +/*- + * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting + * Copyright (c) 2004-2005 Atheros Communications, Inc. + * Copyright (c) 2006 Devicescape Software, Inc. + * Copyright (c) 2007 Jiri Slaby + * Copyright (c) 2007 Luis R. Rodriguez + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any + * redistribution must be conditioned upon including a substantially + * similar Disclaimer requirement for further binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY + * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, + * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGES. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include "base.h" +#include "reg.h" +#include "debug.h" + +static u8 ath5k_calinterval = 10; /* Calibrate PHY every 10 secs (TODO: Fixme) */ +static int modparam_nohwcrypt; +module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); +MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); + +static int modparam_all_channels; +module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO); +MODULE_PARM_DESC(all_channels, "Expose all channels the device can use."); + + +/******************\ +* Internal defines * +\******************/ + +/* Module info */ +MODULE_AUTHOR("Jiri Slaby"); +MODULE_AUTHOR("Nick Kossifidis"); +MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards."); +MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION("0.6.0 (EXPERIMENTAL)"); + + +/* Known PCI ids */ +static DEFINE_PCI_DEVICE_TABLE(ath5k_pci_id_table) = { + { PCI_VDEVICE(ATHEROS, 0x0207) }, /* 5210 early */ + { PCI_VDEVICE(ATHEROS, 0x0007) }, /* 5210 */ + { PCI_VDEVICE(ATHEROS, 0x0011) }, /* 5311 - this is on AHB bus !*/ + { PCI_VDEVICE(ATHEROS, 0x0012) }, /* 5211 */ + { PCI_VDEVICE(ATHEROS, 0x0013) }, /* 5212 */ + { PCI_VDEVICE(3COM_2, 0x0013) }, /* 3com 5212 */ + { PCI_VDEVICE(3COM, 0x0013) }, /* 3com 3CRDAG675 5212 */ + { PCI_VDEVICE(ATHEROS, 0x1014) }, /* IBM minipci 5212 */ + { PCI_VDEVICE(ATHEROS, 0x0014) }, /* 5212 combatible */ + { PCI_VDEVICE(ATHEROS, 0x0015) }, /* 5212 combatible */ + { PCI_VDEVICE(ATHEROS, 0x0016) }, /* 5212 combatible */ + { PCI_VDEVICE(ATHEROS, 0x0017) }, /* 5212 combatible */ + { PCI_VDEVICE(ATHEROS, 0x0018) }, /* 5212 combatible */ + { PCI_VDEVICE(ATHEROS, 0x0019) }, /* 5212 combatible */ + { PCI_VDEVICE(ATHEROS, 0x001a) }, /* 2413 Griffin-lite */ + { PCI_VDEVICE(ATHEROS, 0x001b) }, /* 5413 Eagle */ + { PCI_VDEVICE(ATHEROS, 0x001c) }, /* PCI-E cards */ + { PCI_VDEVICE(ATHEROS, 0x001d) }, /* 2417 Nala */ + { 0 } +}; +MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table); + +/* Known SREVs */ +static const struct ath5k_srev_name srev_names[] = { + { "5210", AR5K_VERSION_MAC, AR5K_SREV_AR5210 }, + { "5311", AR5K_VERSION_MAC, AR5K_SREV_AR5311 }, + { "5311A", AR5K_VERSION_MAC, AR5K_SREV_AR5311A }, + { "5311B", AR5K_VERSION_MAC, AR5K_SREV_AR5311B }, + { "5211", AR5K_VERSION_MAC, AR5K_SREV_AR5211 }, + { "5212", AR5K_VERSION_MAC, AR5K_SREV_AR5212 }, + { "5213", AR5K_VERSION_MAC, AR5K_SREV_AR5213 }, + { "5213A", AR5K_VERSION_MAC, AR5K_SREV_AR5213A }, + { "2413", AR5K_VERSION_MAC, AR5K_SREV_AR2413 }, + { "2414", AR5K_VERSION_MAC, AR5K_SREV_AR2414 }, + { "5424", AR5K_VERSION_MAC, AR5K_SREV_AR5424 }, + { "5413", AR5K_VERSION_MAC, AR5K_SREV_AR5413 }, + { "5414", AR5K_VERSION_MAC, AR5K_SREV_AR5414 }, + { "2415", AR5K_VERSION_MAC, AR5K_SREV_AR2415 }, + { "5416", AR5K_VERSION_MAC, AR5K_SREV_AR5416 }, + { "5418", AR5K_VERSION_MAC, AR5K_SREV_AR5418 }, + { "2425", AR5K_VERSION_MAC, AR5K_SREV_AR2425 }, + { "2417", AR5K_VERSION_MAC, AR5K_SREV_AR2417 }, + { "xxxxx", AR5K_VERSION_MAC, AR5K_SREV_UNKNOWN }, + { "5110", AR5K_VERSION_RAD, AR5K_SREV_RAD_5110 }, + { "5111", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111 }, + { "5111A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111A }, + { "2111", AR5K_VERSION_RAD, AR5K_SREV_RAD_2111 }, + { "5112", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112 }, + { "5112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112A }, + { "5112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112B }, + { "2112", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112 }, + { "2112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112A }, + { "2112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112B }, + { "2413", AR5K_VERSION_RAD, AR5K_SREV_RAD_2413 }, + { "5413", AR5K_VERSION_RAD, AR5K_SREV_RAD_5413 }, + { "2316", AR5K_VERSION_RAD, AR5K_SREV_RAD_2316 }, + { "2317", AR5K_VERSION_RAD, AR5K_SREV_RAD_2317 }, + { "5424", AR5K_VERSION_RAD, AR5K_SREV_RAD_5424 }, + { "5133", AR5K_VERSION_RAD, AR5K_SREV_RAD_5133 }, + { "xxxxx", AR5K_VERSION_RAD, AR5K_SREV_UNKNOWN }, +}; + +static const struct ieee80211_rate ath5k_rates[] = { + { .bitrate = 10, + .hw_value = ATH5K_RATE_CODE_1M, }, + { .bitrate = 20, + .hw_value = ATH5K_RATE_CODE_2M, + .hw_value_short = ATH5K_RATE_CODE_2M | AR5K_SET_SHORT_PREAMBLE, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 55, + .hw_value = ATH5K_RATE_CODE_5_5M, + .hw_value_short = ATH5K_RATE_CODE_5_5M | AR5K_SET_SHORT_PREAMBLE, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 110, + .hw_value = ATH5K_RATE_CODE_11M, + .hw_value_short = ATH5K_RATE_CODE_11M | AR5K_SET_SHORT_PREAMBLE, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 60, + .hw_value = ATH5K_RATE_CODE_6M, + .flags = 0 }, + { .bitrate = 90, + .hw_value = ATH5K_RATE_CODE_9M, + .flags = 0 }, + { .bitrate = 120, + .hw_value = ATH5K_RATE_CODE_12M, + .flags = 0 }, + { .bitrate = 180, + .hw_value = ATH5K_RATE_CODE_18M, + .flags = 0 }, + { .bitrate = 240, + .hw_value = ATH5K_RATE_CODE_24M, + .flags = 0 }, + { .bitrate = 360, + .hw_value = ATH5K_RATE_CODE_36M, + .flags = 0 }, + { .bitrate = 480, + .hw_value = ATH5K_RATE_CODE_48M, + .flags = 0 }, + { .bitrate = 540, + .hw_value = ATH5K_RATE_CODE_54M, + .flags = 0 }, + /* XR missing */ +}; + +/* + * Prototypes - PCI stack related functions + */ +static int __devinit ath5k_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id); +static void __devexit ath5k_pci_remove(struct pci_dev *pdev); +#ifdef CONFIG_PM +static int ath5k_pci_suspend(struct device *dev); +static int ath5k_pci_resume(struct device *dev); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29)) +static int ath5k_pci_suspend_compat(struct pci_dev *pdev, pm_message_t state) +{ + int r; + + r = ath5k_pci_suspend(&pdev->dev); + if (r) + return r; + + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3hot); + return 0; +} + +static int ath5k_pci_resume_compat(struct pci_dev *pdev) +{ + int r; + + pci_restore_state(pdev); + r = pci_enable_device(pdev); + if (r) + return r; + + return ath5k_pci_resume(&pdev->dev); +} +#endif + +SIMPLE_DEV_PM_OPS(ath5k_pm_ops, ath5k_pci_suspend, ath5k_pci_resume); +#define ATH5K_PM_OPS (&ath5k_pm_ops) +#else +#define ATH5K_PM_OPS NULL +#endif /* CONFIG_PM */ + +static struct pci_driver ath5k_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = ath5k_pci_id_table, + .probe = ath5k_pci_probe, + .remove = __devexit_p(ath5k_pci_remove), +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + .driver.pm = ATH5K_PM_OPS, +#elif defined(CONFIG_PM) + .suspend = ath5k_pci_suspend_compat, + .resume = ath5k_pci_resume_compat, +#endif +}; + + + +/* + * Prototypes - MAC 802.11 stack related functions + */ +static int ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb); +static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, + struct ath5k_txq *txq); +static int ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan); +static int ath5k_reset_wake(struct ath5k_softc *sc); +static int ath5k_start(struct ieee80211_hw *hw); +static void ath5k_stop(struct ieee80211_hw *hw); +static int ath5k_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); +static void ath5k_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); +static int ath5k_config(struct ieee80211_hw *hw, u32 changed); +static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw, + int mc_count, struct dev_addr_list *mc_list); +static void ath5k_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *new_flags, + u64 multicast); +static int ath5k_set_key(struct ieee80211_hw *hw, + enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key); +static int ath5k_get_stats(struct ieee80211_hw *hw, + struct ieee80211_low_level_stats *stats); +static u64 ath5k_get_tsf(struct ieee80211_hw *hw); +static void ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf); +static void ath5k_reset_tsf(struct ieee80211_hw *hw); +static int ath5k_beacon_update(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); +static void ath5k_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changes); +static void ath5k_sw_scan_start(struct ieee80211_hw *hw); +static void ath5k_sw_scan_complete(struct ieee80211_hw *hw); +static void ath5k_set_coverage_class(struct ieee80211_hw *hw, + u8 coverage_class); + +static const struct ieee80211_ops ath5k_hw_ops = { + .tx = ath5k_tx, + .start = ath5k_start, + .stop = ath5k_stop, + .add_interface = ath5k_add_interface, + .remove_interface = ath5k_remove_interface, + .config = ath5k_config, + .prepare_multicast = ath5k_prepare_multicast, + .configure_filter = ath5k_configure_filter, + .set_key = ath5k_set_key, + .get_stats = ath5k_get_stats, + .conf_tx = NULL, + .get_tsf = ath5k_get_tsf, + .set_tsf = ath5k_set_tsf, + .reset_tsf = ath5k_reset_tsf, + .bss_info_changed = ath5k_bss_info_changed, + .sw_scan_start = ath5k_sw_scan_start, + .sw_scan_complete = ath5k_sw_scan_complete, + .set_coverage_class = ath5k_set_coverage_class, +}; + +/* + * Prototypes - Internal functions + */ +/* Attach detach */ +static int ath5k_attach(struct pci_dev *pdev, + struct ieee80211_hw *hw); +static void ath5k_detach(struct pci_dev *pdev, + struct ieee80211_hw *hw); +/* Channel/mode setup */ +static inline short ath5k_ieee2mhz(short chan); +static unsigned int ath5k_copy_channels(struct ath5k_hw *ah, + struct ieee80211_channel *channels, + unsigned int mode, + unsigned int max); +static int ath5k_setup_bands(struct ieee80211_hw *hw); +static int ath5k_chan_set(struct ath5k_softc *sc, + struct ieee80211_channel *chan); +static void ath5k_setcurmode(struct ath5k_softc *sc, + unsigned int mode); +static void ath5k_mode_setup(struct ath5k_softc *sc); + +/* Descriptor setup */ +static int ath5k_desc_alloc(struct ath5k_softc *sc, + struct pci_dev *pdev); +static void ath5k_desc_free(struct ath5k_softc *sc, + struct pci_dev *pdev); +/* Buffers setup */ +static int ath5k_rxbuf_setup(struct ath5k_softc *sc, + struct ath5k_buf *bf); +static int ath5k_txbuf_setup(struct ath5k_softc *sc, + struct ath5k_buf *bf, + struct ath5k_txq *txq); +static inline void ath5k_txbuf_free(struct ath5k_softc *sc, + struct ath5k_buf *bf) +{ + BUG_ON(!bf); + if (!bf->skb) + return; + pci_unmap_single(sc->pdev, bf->skbaddr, bf->skb->len, + PCI_DMA_TODEVICE); + dev_kfree_skb_any(bf->skb); + bf->skb = NULL; +} + +static inline void ath5k_rxbuf_free(struct ath5k_softc *sc, + struct ath5k_buf *bf) +{ + struct ath5k_hw *ah = sc->ah; + struct ath_common *common = ath5k_hw_common(ah); + + BUG_ON(!bf); + if (!bf->skb) + return; + pci_unmap_single(sc->pdev, bf->skbaddr, common->rx_bufsize, + PCI_DMA_FROMDEVICE); + dev_kfree_skb_any(bf->skb); + bf->skb = NULL; +} + + +/* Queues setup */ +static struct ath5k_txq *ath5k_txq_setup(struct ath5k_softc *sc, + int qtype, int subtype); +static int ath5k_beaconq_setup(struct ath5k_hw *ah); +static int ath5k_beaconq_config(struct ath5k_softc *sc); +static void ath5k_txq_drainq(struct ath5k_softc *sc, + struct ath5k_txq *txq); +static void ath5k_txq_cleanup(struct ath5k_softc *sc); +static void ath5k_txq_release(struct ath5k_softc *sc); +/* Rx handling */ +static int ath5k_rx_start(struct ath5k_softc *sc); +static void ath5k_rx_stop(struct ath5k_softc *sc); +static unsigned int ath5k_rx_decrypted(struct ath5k_softc *sc, + struct ath5k_desc *ds, + struct sk_buff *skb, + struct ath5k_rx_status *rs); +static void ath5k_tasklet_rx(unsigned long data); +/* Tx handling */ +static void ath5k_tx_processq(struct ath5k_softc *sc, + struct ath5k_txq *txq); +static void ath5k_tasklet_tx(unsigned long data); +/* Beacon handling */ +static int ath5k_beacon_setup(struct ath5k_softc *sc, + struct ath5k_buf *bf); +static void ath5k_beacon_send(struct ath5k_softc *sc); +static void ath5k_beacon_config(struct ath5k_softc *sc); +static void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf); +static void ath5k_tasklet_beacon(unsigned long data); + +static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp) +{ + u64 tsf = ath5k_hw_get_tsf64(ah); + + if ((tsf & 0x7fff) < rstamp) + tsf -= 0x8000; + + return (tsf & ~0x7fff) | rstamp; +} + +/* Interrupt handling */ +static int ath5k_init(struct ath5k_softc *sc); +static int ath5k_stop_locked(struct ath5k_softc *sc); +static int ath5k_stop_hw(struct ath5k_softc *sc); +static irqreturn_t ath5k_intr(int irq, void *dev_id); +static void ath5k_tasklet_reset(unsigned long data); + +static void ath5k_tasklet_calibrate(unsigned long data); + +/* + * Module init/exit functions + */ +static int __init +init_ath5k_pci(void) +{ + int ret; + + ath5k_debug_init(); + + ret = pci_register_driver(&ath5k_pci_driver); + if (ret) { + printk(KERN_ERR "ath5k_pci: can't register pci driver\n"); + return ret; + } + + return 0; +} + +static void __exit +exit_ath5k_pci(void) +{ + pci_unregister_driver(&ath5k_pci_driver); + + ath5k_debug_finish(); +} + +module_init(init_ath5k_pci); +module_exit(exit_ath5k_pci); + + +/********************\ +* PCI Initialization * +\********************/ + +static const char * +ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val) +{ + const char *name = "xxxxx"; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(srev_names); i++) { + if (srev_names[i].sr_type != type) + continue; + + if ((val & 0xf0) == srev_names[i].sr_val) + name = srev_names[i].sr_name; + + if ((val & 0xff) == srev_names[i].sr_val) { + name = srev_names[i].sr_name; + break; + } + } + + return name; +} +static unsigned int ath5k_ioread32(void *hw_priv, u32 reg_offset) +{ + struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv; + return ath5k_hw_reg_read(ah, reg_offset); +} + +static void ath5k_iowrite32(void *hw_priv, u32 val, u32 reg_offset) +{ + struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv; + ath5k_hw_reg_write(ah, val, reg_offset); +} + +static const struct ath_ops ath5k_common_ops = { + .read = ath5k_ioread32, + .write = ath5k_iowrite32, +}; + +static int __devinit +ath5k_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + void __iomem *mem; + struct ath5k_softc *sc; + struct ath_common *common; + struct ieee80211_hw *hw; + int ret; + u8 csz; + + ret = pci_enable_device(pdev); + if (ret) { + dev_err(&pdev->dev, "can't enable device\n"); + goto err; + } + + /* XXX 32-bit addressing only */ + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(&pdev->dev, "32-bit DMA not available\n"); + goto err_dis; + } + + /* + * Cache line size is used to size and align various + * structures used to communicate with the hardware. + */ + pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz); + if (csz == 0) { + /* + * Linux 2.4.18 (at least) writes the cache line size + * register as a 16-bit wide register which is wrong. + * We must have this setup properly for rx buffer + * DMA to work so force a reasonable value here if it + * comes up zero. + */ + csz = L1_CACHE_BYTES >> 2; + pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz); + } + /* + * The default setting of latency timer yields poor results, + * set it to the value used by other systems. It may be worth + * tweaking this setting more. + */ + pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8); + + /* Enable bus mastering */ + pci_set_master(pdev); + + /* + * Disable the RETRY_TIMEOUT register (0x41) to keep + * PCI Tx retries from interfering with C3 CPU state. + */ + pci_write_config_byte(pdev, 0x41, 0); + + ret = pci_request_region(pdev, 0, "ath5k"); + if (ret) { + dev_err(&pdev->dev, "cannot reserve PCI memory region\n"); + goto err_dis; + } + + mem = pci_iomap(pdev, 0, 0); + if (!mem) { + dev_err(&pdev->dev, "cannot remap PCI memory region\n") ; + ret = -EIO; + goto err_reg; + } + + /* + * Allocate hw (mac80211 main struct) + * and hw->priv (driver private data) + */ + hw = ieee80211_alloc_hw(sizeof(*sc), &ath5k_hw_ops); + if (hw == NULL) { + dev_err(&pdev->dev, "cannot allocate ieee80211_hw\n"); + ret = -ENOMEM; + goto err_map; + } + + dev_info(&pdev->dev, "registered as '%s'\n", wiphy_name(hw->wiphy)); + + /* Initialize driver private data */ + SET_IEEE80211_DEV(hw, &pdev->dev); + hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | + IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | + IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_NOISE_DBM; + + hw->wiphy->interface_modes = + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_ADHOC) | + BIT(NL80211_IFTYPE_MESH_POINT); + + hw->extra_tx_headroom = 2; + hw->channel_change_time = 5000; + sc = hw->priv; + sc->hw = hw; + sc->pdev = pdev; + + ath5k_debug_init_device(sc); + + /* + * Mark the device as detached to avoid processing + * interrupts until setup is complete. + */ + __set_bit(ATH_STAT_INVALID, sc->status); + + sc->iobase = mem; /* So we can unmap it on detach */ + sc->opmode = NL80211_IFTYPE_STATION; + sc->bintval = 1000; + mutex_init(&sc->lock); + spin_lock_init(&sc->rxbuflock); + spin_lock_init(&sc->txbuflock); + spin_lock_init(&sc->block); + + /* Set private data */ + pci_set_drvdata(pdev, hw); + + /* Setup interrupt handler */ + ret = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc); + if (ret) { + ATH5K_ERR(sc, "request_irq failed\n"); + goto err_free; + } + + /*If we passed the test malloc a ath5k_hw struct*/ + sc->ah = kzalloc(sizeof(struct ath5k_hw), GFP_KERNEL); + if (!sc->ah) { + ret = -ENOMEM; + ATH5K_ERR(sc, "out of memory\n"); + goto err_irq; + } + + sc->ah->ah_sc = sc; + sc->ah->ah_iobase = sc->iobase; + common = ath5k_hw_common(sc->ah); + common->ops = &ath5k_common_ops; + common->ah = sc->ah; + common->hw = hw; + common->cachelsz = csz << 2; /* convert to bytes */ + + /* Initialize device */ + ret = ath5k_hw_attach(sc); + if (ret) { + goto err_free_ah; + } + + /* set up multi-rate retry capabilities */ + if (sc->ah->ah_version == AR5K_AR5212) { + hw->max_rates = 4; + hw->max_rate_tries = 11; + } + + /* Finish private driver data initialization */ + ret = ath5k_attach(pdev, hw); + if (ret) + goto err_ah; + + ATH5K_INFO(sc, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n", + ath5k_chip_name(AR5K_VERSION_MAC, sc->ah->ah_mac_srev), + sc->ah->ah_mac_srev, + sc->ah->ah_phy_revision); + + if (!sc->ah->ah_single_chip) { + /* Single chip radio (!RF5111) */ + if (sc->ah->ah_radio_5ghz_revision && + !sc->ah->ah_radio_2ghz_revision) { + /* No 5GHz support -> report 2GHz radio */ + if (!test_bit(AR5K_MODE_11A, + sc->ah->ah_capabilities.cap_mode)) { + ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n", + ath5k_chip_name(AR5K_VERSION_RAD, + sc->ah->ah_radio_5ghz_revision), + sc->ah->ah_radio_5ghz_revision); + /* No 2GHz support (5110 and some + * 5Ghz only cards) -> report 5Ghz radio */ + } else if (!test_bit(AR5K_MODE_11B, + sc->ah->ah_capabilities.cap_mode)) { + ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n", + ath5k_chip_name(AR5K_VERSION_RAD, + sc->ah->ah_radio_5ghz_revision), + sc->ah->ah_radio_5ghz_revision); + /* Multiband radio */ + } else { + ATH5K_INFO(sc, "RF%s multiband radio found" + " (0x%x)\n", + ath5k_chip_name(AR5K_VERSION_RAD, + sc->ah->ah_radio_5ghz_revision), + sc->ah->ah_radio_5ghz_revision); + } + } + /* Multi chip radio (RF5111 - RF2111) -> + * report both 2GHz/5GHz radios */ + else if (sc->ah->ah_radio_5ghz_revision && + sc->ah->ah_radio_2ghz_revision){ + ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n", + ath5k_chip_name(AR5K_VERSION_RAD, + sc->ah->ah_radio_5ghz_revision), + sc->ah->ah_radio_5ghz_revision); + ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n", + ath5k_chip_name(AR5K_VERSION_RAD, + sc->ah->ah_radio_2ghz_revision), + sc->ah->ah_radio_2ghz_revision); + } + } + + + /* ready to process interrupts */ + __clear_bit(ATH_STAT_INVALID, sc->status); + + return 0; +err_ah: + ath5k_hw_detach(sc->ah); +err_irq: + free_irq(pdev->irq, sc); +err_free_ah: + kfree(sc->ah); +err_free: + ieee80211_free_hw(hw); +err_map: + pci_iounmap(pdev, mem); +err_reg: + pci_release_region(pdev, 0); +err_dis: + pci_disable_device(pdev); +err: + return ret; +} + +static void __devexit +ath5k_pci_remove(struct pci_dev *pdev) +{ + struct ieee80211_hw *hw = pci_get_drvdata(pdev); + struct ath5k_softc *sc = hw->priv; + + ath5k_debug_finish_device(sc); + ath5k_detach(pdev, hw); + ath5k_hw_detach(sc->ah); + kfree(sc->ah); + free_irq(pdev->irq, sc); + pci_iounmap(pdev, sc->iobase); + pci_release_region(pdev, 0); + pci_disable_device(pdev); + ieee80211_free_hw(hw); +} + +#ifdef CONFIG_PM +static int ath5k_pci_suspend(struct device *dev) +{ + struct ieee80211_hw *hw = pci_get_drvdata(to_pci_dev(dev)); + struct ath5k_softc *sc = hw->priv; + + ath5k_led_off(sc); + return 0; +} + +static int ath5k_pci_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct ieee80211_hw *hw = pci_get_drvdata(pdev); + struct ath5k_softc *sc = hw->priv; + + /* + * Suspend/Resume resets the PCI configuration space, so we have to + * re-disable the RETRY_TIMEOUT register (0x41) to keep + * PCI Tx retries from interfering with C3 CPU state + */ + pci_write_config_byte(pdev, 0x41, 0); + + ath5k_led_enable(sc); + return 0; +} +#endif /* CONFIG_PM */ + + +/***********************\ +* Driver Initialization * +\***********************/ + +static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) +{ + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct ath5k_softc *sc = hw->priv; + struct ath_regulatory *regulatory = ath5k_hw_regulatory(sc->ah); + + return ath_reg_notifier_apply(wiphy, request, regulatory); +} + +static int +ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw) +{ + struct ath5k_softc *sc = hw->priv; + struct ath5k_hw *ah = sc->ah; + struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah); + u8 mac[ETH_ALEN] = {}; + int ret; + + ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "devid 0x%x\n", pdev->device); + + /* + * Check if the MAC has multi-rate retry support. + * We do this by trying to setup a fake extended + * descriptor. MAC's that don't have support will + * return false w/o doing anything. MAC's that do + * support it will return true w/o doing anything. + */ + ret = ah->ah_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0); + if (ret < 0) + goto err; + if (ret > 0) + __set_bit(ATH_STAT_MRRETRY, sc->status); + + /* + * Collect the channel list. The 802.11 layer + * is resposible for filtering this list based + * on settings like the phy mode and regulatory + * domain restrictions. + */ + ret = ath5k_setup_bands(hw); + if (ret) { + ATH5K_ERR(sc, "can't get channels\n"); + goto err; + } + + /* NB: setup here so ath5k_rate_update is happy */ + if (test_bit(AR5K_MODE_11A, ah->ah_modes)) + ath5k_setcurmode(sc, AR5K_MODE_11A); + else + ath5k_setcurmode(sc, AR5K_MODE_11B); + + /* + * Allocate tx+rx descriptors and populate the lists. + */ + ret = ath5k_desc_alloc(sc, pdev); + if (ret) { + ATH5K_ERR(sc, "can't allocate descriptors\n"); + goto err; + } + + /* + * Allocate hardware transmit queues: one queue for + * beacon frames and one data queue for each QoS + * priority. Note that hw functions handle reseting + * these queues at the needed time. + */ + ret = ath5k_beaconq_setup(ah); + if (ret < 0) { + ATH5K_ERR(sc, "can't setup a beacon xmit queue\n"); + goto err_desc; + } + sc->bhalq = ret; + sc->cabq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_CAB, 0); + if (IS_ERR(sc->cabq)) { + ATH5K_ERR(sc, "can't setup cab queue\n"); + ret = PTR_ERR(sc->cabq); + goto err_bhal; + } + + sc->txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK); + if (IS_ERR(sc->txq)) { + ATH5K_ERR(sc, "can't setup xmit queue\n"); + ret = PTR_ERR(sc->txq); + goto err_queues; + } + + tasklet_init(&sc->rxtq, ath5k_tasklet_rx, (unsigned long)sc); + tasklet_init(&sc->txtq, ath5k_tasklet_tx, (unsigned long)sc); + tasklet_init(&sc->restq, ath5k_tasklet_reset, (unsigned long)sc); + tasklet_init(&sc->calib, ath5k_tasklet_calibrate, (unsigned long)sc); + tasklet_init(&sc->beacontq, ath5k_tasklet_beacon, (unsigned long)sc); + + ret = ath5k_eeprom_read_mac(ah, mac); + if (ret) { + ATH5K_ERR(sc, "unable to read address from EEPROM: 0x%04x\n", + sc->pdev->device); + goto err_queues; + } + + SET_IEEE80211_PERM_ADDR(hw, mac); + /* All MAC address bits matter for ACKs */ + memcpy(sc->bssidmask, ath_bcast_mac, ETH_ALEN); + ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask); + + regulatory->current_rd = ah->ah_capabilities.cap_eeprom.ee_regdomain; + ret = ath_regd_init(regulatory, hw->wiphy, ath5k_reg_notifier); + if (ret) { + ATH5K_ERR(sc, "can't initialize regulatory system\n"); + goto err_queues; + } + + ret = ieee80211_register_hw(hw); + if (ret) { + ATH5K_ERR(sc, "can't register ieee80211 hw\n"); + goto err_queues; + } + + if (!ath_is_world_regd(regulatory)) + regulatory_hint(hw->wiphy, regulatory->alpha2); + + ath5k_init_leds(sc); + + return 0; +err_queues: + ath5k_txq_release(sc); +err_bhal: + ath5k_hw_release_tx_queue(ah, sc->bhalq); +err_desc: + ath5k_desc_free(sc, pdev); +err: + return ret; +} + +static void +ath5k_detach(struct pci_dev *pdev, struct ieee80211_hw *hw) +{ + struct ath5k_softc *sc = hw->priv; + + /* + * NB: the order of these is important: + * o call the 802.11 layer before detaching ath5k_hw to + * insure callbacks into the driver to delete global + * key cache entries can be handled + * o reclaim the tx queue data structures after calling + * the 802.11 layer as we'll get called back to reclaim + * node state and potentially want to use them + * o to cleanup the tx queues the hal is called, so detach + * it last + * XXX: ??? detach ath5k_hw ??? + * Other than that, it's straightforward... + */ + ieee80211_unregister_hw(hw); + ath5k_desc_free(sc, pdev); + ath5k_txq_release(sc); + ath5k_hw_release_tx_queue(sc->ah, sc->bhalq); + ath5k_unregister_leds(sc); + + /* + * NB: can't reclaim these until after ieee80211_ifdetach + * returns because we'll get called back to reclaim node + * state and potentially want to use them. + */ +} + + + + +/********************\ +* Channel/mode setup * +\********************/ + +/* + * Convert IEEE channel number to MHz frequency. + */ +static inline short +ath5k_ieee2mhz(short chan) +{ + if (chan <= 14 || chan >= 27) + return ieee80211chan2mhz(chan); + else + return 2212 + chan * 20; +} + +/* + * Returns true for the channel numbers used without all_channels modparam. + */ +static bool ath5k_is_standard_channel(short chan) +{ + return ((chan <= 14) || + /* UNII 1,2 */ + ((chan & 3) == 0 && chan >= 36 && chan <= 64) || + /* midband */ + ((chan & 3) == 0 && chan >= 100 && chan <= 140) || + /* UNII-3 */ + ((chan & 3) == 1 && chan >= 149 && chan <= 165)); +} + +static unsigned int +ath5k_copy_channels(struct ath5k_hw *ah, + struct ieee80211_channel *channels, + unsigned int mode, + unsigned int max) +{ + unsigned int i, count, size, chfreq, freq, ch; + + if (!test_bit(mode, ah->ah_modes)) + return 0; + + switch (mode) { + case AR5K_MODE_11A: + case AR5K_MODE_11A_TURBO: + /* 1..220, but 2GHz frequencies are filtered by check_channel */ + size = 220 ; + chfreq = CHANNEL_5GHZ; + break; + case AR5K_MODE_11B: + case AR5K_MODE_11G: + case AR5K_MODE_11G_TURBO: + size = 26; + chfreq = CHANNEL_2GHZ; + break; + default: + ATH5K_WARN(ah->ah_sc, "bad mode, not copying channels\n"); + return 0; + } + + for (i = 0, count = 0; i < size && max > 0; i++) { + ch = i + 1 ; + freq = ath5k_ieee2mhz(ch); + + /* Check if channel is supported by the chipset */ + if (!ath5k_channel_ok(ah, freq, chfreq)) + continue; + + if (!modparam_all_channels && !ath5k_is_standard_channel(ch)) + continue; + + /* Write channel info and increment counter */ + channels[count].center_freq = freq; + channels[count].band = (chfreq == CHANNEL_2GHZ) ? + IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + switch (mode) { + case AR5K_MODE_11A: + case AR5K_MODE_11G: + channels[count].hw_value = chfreq | CHANNEL_OFDM; + break; + case AR5K_MODE_11A_TURBO: + case AR5K_MODE_11G_TURBO: + channels[count].hw_value = chfreq | + CHANNEL_OFDM | CHANNEL_TURBO; + break; + case AR5K_MODE_11B: + channels[count].hw_value = CHANNEL_B; + } + + count++; + max--; + } + + return count; +} + +static void +ath5k_setup_rate_idx(struct ath5k_softc *sc, struct ieee80211_supported_band *b) +{ + u8 i; + + for (i = 0; i < AR5K_MAX_RATES; i++) + sc->rate_idx[b->band][i] = -1; + + for (i = 0; i < b->n_bitrates; i++) { + sc->rate_idx[b->band][b->bitrates[i].hw_value] = i; + if (b->bitrates[i].hw_value_short) + sc->rate_idx[b->band][b->bitrates[i].hw_value_short] = i; + } +} + +static int +ath5k_setup_bands(struct ieee80211_hw *hw) +{ + struct ath5k_softc *sc = hw->priv; + struct ath5k_hw *ah = sc->ah; + struct ieee80211_supported_band *sband; + int max_c, count_c = 0; + int i; + + BUILD_BUG_ON(ARRAY_SIZE(sc->sbands) < IEEE80211_NUM_BANDS); + max_c = ARRAY_SIZE(sc->channels); + + /* 2GHz band */ + sband = &sc->sbands[IEEE80211_BAND_2GHZ]; + sband->band = IEEE80211_BAND_2GHZ; + sband->bitrates = &sc->rates[IEEE80211_BAND_2GHZ][0]; + + if (test_bit(AR5K_MODE_11G, sc->ah->ah_capabilities.cap_mode)) { + /* G mode */ + memcpy(sband->bitrates, &ath5k_rates[0], + sizeof(struct ieee80211_rate) * 12); + sband->n_bitrates = 12; + + sband->channels = sc->channels; + sband->n_channels = ath5k_copy_channels(ah, sband->channels, + AR5K_MODE_11G, max_c); + + hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; + count_c = sband->n_channels; + max_c -= count_c; + } else if (test_bit(AR5K_MODE_11B, sc->ah->ah_capabilities.cap_mode)) { + /* B mode */ + memcpy(sband->bitrates, &ath5k_rates[0], + sizeof(struct ieee80211_rate) * 4); + sband->n_bitrates = 4; + + /* 5211 only supports B rates and uses 4bit rate codes + * (e.g normally we have 0x1B for 1M, but on 5211 we have 0x0B) + * fix them up here: + */ + if (ah->ah_version == AR5K_AR5211) { + for (i = 0; i < 4; i++) { + sband->bitrates[i].hw_value = + sband->bitrates[i].hw_value & 0xF; + sband->bitrates[i].hw_value_short = + sband->bitrates[i].hw_value_short & 0xF; + } + } + + sband->channels = sc->channels; + sband->n_channels = ath5k_copy_channels(ah, sband->channels, + AR5K_MODE_11B, max_c); + + hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; + count_c = sband->n_channels; + max_c -= count_c; + } + ath5k_setup_rate_idx(sc, sband); + + /* 5GHz band, A mode */ + if (test_bit(AR5K_MODE_11A, sc->ah->ah_capabilities.cap_mode)) { + sband = &sc->sbands[IEEE80211_BAND_5GHZ]; + sband->band = IEEE80211_BAND_5GHZ; + sband->bitrates = &sc->rates[IEEE80211_BAND_5GHZ][0]; + + memcpy(sband->bitrates, &ath5k_rates[4], + sizeof(struct ieee80211_rate) * 8); + sband->n_bitrates = 8; + + sband->channels = &sc->channels[count_c]; + sband->n_channels = ath5k_copy_channels(ah, sband->channels, + AR5K_MODE_11A, max_c); + + hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband; + } + ath5k_setup_rate_idx(sc, sband); + + ath5k_debug_dump_bands(sc); + + return 0; +} + +/* + * Set/change channels. We always reset the chip. + * To accomplish this we must first cleanup any pending DMA, + * then restart stuff after a la ath5k_init. + * + * Called with sc->lock. + */ +static int +ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan) +{ + ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "(%u MHz) -> (%u MHz)\n", + sc->curchan->center_freq, chan->center_freq); + + /* + * To switch channels clear any pending DMA operations; + * wait long enough for the RX fifo to drain, reset the + * hardware at the new frequency, and then re-enable + * the relevant bits of the h/w. + */ + return ath5k_reset(sc, chan); +} + +static void +ath5k_setcurmode(struct ath5k_softc *sc, unsigned int mode) +{ + sc->curmode = mode; + + if (mode == AR5K_MODE_11A) { + sc->curband = &sc->sbands[IEEE80211_BAND_5GHZ]; + } else { + sc->curband = &sc->sbands[IEEE80211_BAND_2GHZ]; + } +} + +static void +ath5k_mode_setup(struct ath5k_softc *sc) +{ + struct ath5k_hw *ah = sc->ah; + u32 rfilt; + + ah->ah_op_mode = sc->opmode; + + /* configure rx filter */ + rfilt = sc->filter_flags; + ath5k_hw_set_rx_filter(ah, rfilt); + + if (ath5k_hw_hasbssidmask(ah)) + ath5k_hw_set_bssid_mask(ah, sc->bssidmask); + + /* configure operational mode */ + ath5k_hw_set_opmode(ah); + + ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt); +} + +static inline int +ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix) +{ + int rix; + + /* return base rate on errors */ + if (WARN(hw_rix < 0 || hw_rix >= AR5K_MAX_RATES, + "hw_rix out of bounds: %x\n", hw_rix)) + return 0; + + rix = sc->rate_idx[sc->curband->band][hw_rix]; + if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix)) + rix = 0; + + return rix; +} + +/***************\ +* Buffers setup * +\***************/ + +static +struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_softc *sc, dma_addr_t *skb_addr) +{ + struct ath_common *common = ath5k_hw_common(sc->ah); + struct sk_buff *skb; + + /* + * Allocate buffer with headroom_needed space for the + * fake physical layer header at the start. + */ + skb = ath_rxbuf_alloc(common, + common->rx_bufsize, + GFP_ATOMIC); + + if (!skb) { + ATH5K_ERR(sc, "can't alloc skbuff of size %u\n", + common->rx_bufsize); + return NULL; + } + + *skb_addr = pci_map_single(sc->pdev, + skb->data, common->rx_bufsize, + PCI_DMA_FROMDEVICE); + if (unlikely(pci_dma_mapping_error(sc->pdev, *skb_addr))) { + ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__); + dev_kfree_skb(skb); + return NULL; + } + return skb; +} + +static int +ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf) +{ + struct ath5k_hw *ah = sc->ah; + struct sk_buff *skb = bf->skb; + struct ath5k_desc *ds; + + if (!skb) { + skb = ath5k_rx_skb_alloc(sc, &bf->skbaddr); + if (!skb) + return -ENOMEM; + bf->skb = skb; + } + + /* + * Setup descriptors. For receive we always terminate + * the descriptor list with a self-linked entry so we'll + * not get overrun under high load (as can happen with a + * 5212 when ANI processing enables PHY error frames). + * + * To insure the last descriptor is self-linked we create + * each descriptor as self-linked and add it to the end. As + * each additional descriptor is added the previous self-linked + * entry is ``fixed'' naturally. This should be safe even + * if DMA is happening. When processing RX interrupts we + * never remove/process the last, self-linked, entry on the + * descriptor list. This insures the hardware always has + * someplace to write a new frame. + */ + ds = bf->desc; + ds->ds_link = bf->daddr; /* link to self */ + ds->ds_data = bf->skbaddr; + ah->ah_setup_rx_desc(ah, ds, + skb_tailroom(skb), /* buffer size */ + 0); + + if (sc->rxlink != NULL) + *sc->rxlink = bf->daddr; + sc->rxlink = &ds->ds_link; + return 0; +} + +static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr; + enum ath5k_pkt_type htype; + __le16 fc; + + hdr = (struct ieee80211_hdr *)skb->data; + fc = hdr->frame_control; + + if (ieee80211_is_beacon(fc)) + htype = AR5K_PKT_TYPE_BEACON; + else if (ieee80211_is_probe_resp(fc)) + htype = AR5K_PKT_TYPE_PROBE_RESP; + else if (ieee80211_is_atim(fc)) + htype = AR5K_PKT_TYPE_ATIM; + else if (ieee80211_is_pspoll(fc)) + htype = AR5K_PKT_TYPE_PSPOLL; + else + htype = AR5K_PKT_TYPE_NORMAL; + + return htype; +} + +static int +ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf, + struct ath5k_txq *txq) +{ + struct ath5k_hw *ah = sc->ah; + struct ath5k_desc *ds = bf->desc; + struct sk_buff *skb = bf->skb; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + unsigned int pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID; + struct ieee80211_rate *rate; + unsigned int mrr_rate[3], mrr_tries[3]; + int i, ret; + u16 hw_rate; + u16 cts_rate = 0; + u16 duration = 0; + u8 rc_flags; + + flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK; + + /* XXX endianness */ + bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len, + PCI_DMA_TODEVICE); + + rate = ieee80211_get_tx_rate(sc->hw, info); + + if (info->flags & IEEE80211_TX_CTL_NO_ACK) + flags |= AR5K_TXDESC_NOACK; + + rc_flags = info->control.rates[0].flags; + hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ? + rate->hw_value_short : rate->hw_value; + + pktlen = skb->len; + + /* FIXME: If we are in g mode and rate is a CCK rate + * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta + * from tx power (value is in dB units already) */ + if (info->control.hw_key) { + keyidx = info->control.hw_key->hw_key_idx; + pktlen += info->control.hw_key->icv_len; + } + if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) { + flags |= AR5K_TXDESC_RTSENA; + cts_rate = ieee80211_get_rts_cts_rate(sc->hw, info)->hw_value; + duration = le16_to_cpu(ieee80211_rts_duration(sc->hw, + sc->vif, pktlen, info)); + } + if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { + flags |= AR5K_TXDESC_CTSENA; + cts_rate = ieee80211_get_rts_cts_rate(sc->hw, info)->hw_value; + duration = le16_to_cpu(ieee80211_ctstoself_duration(sc->hw, + sc->vif, pktlen, info)); + } + ret = ah->ah_setup_tx_desc(ah, ds, pktlen, + ieee80211_get_hdrlen_from_skb(skb), + get_hw_packet_type(skb), + (sc->power_level * 2), + hw_rate, + info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags, + cts_rate, duration); + if (ret) + goto err_unmap; + + memset(mrr_rate, 0, sizeof(mrr_rate)); + memset(mrr_tries, 0, sizeof(mrr_tries)); + for (i = 0; i < 3; i++) { + rate = ieee80211_get_alt_retry_rate(sc->hw, info, i); + if (!rate) + break; + + mrr_rate[i] = rate->hw_value; + mrr_tries[i] = info->control.rates[i + 1].count; + } + + ah->ah_setup_mrr_tx_desc(ah, ds, + mrr_rate[0], mrr_tries[0], + mrr_rate[1], mrr_tries[1], + mrr_rate[2], mrr_tries[2]); + + ds->ds_link = 0; + ds->ds_data = bf->skbaddr; + + spin_lock_bh(&txq->lock); + list_add_tail(&bf->list, &txq->q); + if (txq->link == NULL) /* is this first packet? */ + ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr); + else /* no, so only link it */ + *txq->link = bf->daddr; + + txq->link = &ds->ds_link; + ath5k_hw_start_tx_dma(ah, txq->qnum); + mmiowb(); + spin_unlock_bh(&txq->lock); + + return 0; +err_unmap: + pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, PCI_DMA_TODEVICE); + return ret; +} + +/*******************\ +* Descriptors setup * +\*******************/ + +static int +ath5k_desc_alloc(struct ath5k_softc *sc, struct pci_dev *pdev) +{ + struct ath5k_desc *ds; + struct ath5k_buf *bf; + dma_addr_t da; + unsigned int i; + int ret; + + /* allocate descriptors */ + sc->desc_len = sizeof(struct ath5k_desc) * + (ATH_TXBUF + ATH_RXBUF + ATH_BCBUF + 1); + sc->desc = pci_alloc_consistent(pdev, sc->desc_len, &sc->desc_daddr); + if (sc->desc == NULL) { + ATH5K_ERR(sc, "can't allocate descriptors\n"); + ret = -ENOMEM; + goto err; + } + ds = sc->desc; + da = sc->desc_daddr; + ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "DMA map: %p (%zu) -> %llx\n", + ds, sc->desc_len, (unsigned long long)sc->desc_daddr); + + bf = kcalloc(1 + ATH_TXBUF + ATH_RXBUF + ATH_BCBUF, + sizeof(struct ath5k_buf), GFP_KERNEL); + if (bf == NULL) { + ATH5K_ERR(sc, "can't allocate bufptr\n"); + ret = -ENOMEM; + goto err_free; + } + sc->bufptr = bf; + + INIT_LIST_HEAD(&sc->rxbuf); + for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) { + bf->desc = ds; + bf->daddr = da; + list_add_tail(&bf->list, &sc->rxbuf); + } + + INIT_LIST_HEAD(&sc->txbuf); + sc->txbuf_len = ATH_TXBUF; + for (i = 0; i < ATH_TXBUF; i++, bf++, ds++, + da += sizeof(*ds)) { + bf->desc = ds; + bf->daddr = da; + list_add_tail(&bf->list, &sc->txbuf); + } + + /* beacon buffer */ + bf->desc = ds; + bf->daddr = da; + sc->bbuf = bf; + + return 0; +err_free: + pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr); +err: + sc->desc = NULL; + return ret; +} + +static void +ath5k_desc_free(struct ath5k_softc *sc, struct pci_dev *pdev) +{ + struct ath5k_buf *bf; + + ath5k_txbuf_free(sc, sc->bbuf); + list_for_each_entry(bf, &sc->txbuf, list) + ath5k_txbuf_free(sc, bf); + list_for_each_entry(bf, &sc->rxbuf, list) + ath5k_rxbuf_free(sc, bf); + + /* Free memory associated with all descriptors */ + pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr); + + kfree(sc->bufptr); + sc->bufptr = NULL; +} + + + + + +/**************\ +* Queues setup * +\**************/ + +static struct ath5k_txq * +ath5k_txq_setup(struct ath5k_softc *sc, + int qtype, int subtype) +{ + struct ath5k_hw *ah = sc->ah; + struct ath5k_txq *txq; + struct ath5k_txq_info qi = { + .tqi_subtype = subtype, + .tqi_aifs = AR5K_TXQ_USEDEFAULT, + .tqi_cw_min = AR5K_TXQ_USEDEFAULT, + .tqi_cw_max = AR5K_TXQ_USEDEFAULT + }; + int qnum; + + /* + * Enable interrupts only for EOL and DESC conditions. + * We mark tx descriptors to receive a DESC interrupt + * when a tx queue gets deep; otherwise waiting for the + * EOL to reap descriptors. Note that this is done to + * reduce interrupt load and this only defers reaping + * descriptors, never transmitting frames. Aside from + * reducing interrupts this also permits more concurrency. + * The only potential downside is if the tx queue backs + * up in which case the top half of the kernel may backup + * due to a lack of tx descriptors. + */ + qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE | + AR5K_TXQ_FLAG_TXDESCINT_ENABLE; + qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi); + if (qnum < 0) { + /* + * NB: don't print a message, this happens + * normally on parts with too few tx queues + */ + return ERR_PTR(qnum); + } + if (qnum >= ARRAY_SIZE(sc->txqs)) { + ATH5K_ERR(sc, "hw qnum %u out of range, max %tu!\n", + qnum, ARRAY_SIZE(sc->txqs)); + ath5k_hw_release_tx_queue(ah, qnum); + return ERR_PTR(-EINVAL); + } + txq = &sc->txqs[qnum]; + if (!txq->setup) { + txq->qnum = qnum; + txq->link = NULL; + INIT_LIST_HEAD(&txq->q); + spin_lock_init(&txq->lock); + txq->setup = true; + } + return &sc->txqs[qnum]; +} + +static int +ath5k_beaconq_setup(struct ath5k_hw *ah) +{ + struct ath5k_txq_info qi = { + .tqi_aifs = AR5K_TXQ_USEDEFAULT, + .tqi_cw_min = AR5K_TXQ_USEDEFAULT, + .tqi_cw_max = AR5K_TXQ_USEDEFAULT, + /* NB: for dynamic turbo, don't enable any other interrupts */ + .tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE + }; + + return ath5k_hw_setup_tx_queue(ah, AR5K_TX_QUEUE_BEACON, &qi); +} + +static int +ath5k_beaconq_config(struct ath5k_softc *sc) +{ + struct ath5k_hw *ah = sc->ah; + struct ath5k_txq_info qi; + int ret; + + ret = ath5k_hw_get_tx_queueprops(ah, sc->bhalq, &qi); + if (ret) + goto err; + + if (sc->opmode == NL80211_IFTYPE_AP || + sc->opmode == NL80211_IFTYPE_MESH_POINT) { + /* + * Always burst out beacon and CAB traffic + * (aifs = cwmin = cwmax = 0) + */ + qi.tqi_aifs = 0; + qi.tqi_cw_min = 0; + qi.tqi_cw_max = 0; + } else if (sc->opmode == NL80211_IFTYPE_ADHOC) { + /* + * Adhoc mode; backoff between 0 and (2 * cw_min). + */ + qi.tqi_aifs = 0; + qi.tqi_cw_min = 0; + qi.tqi_cw_max = 2 * ah->ah_cw_min; + } + + ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, + "beacon queueprops tqi_aifs:%d tqi_cw_min:%d tqi_cw_max:%d\n", + qi.tqi_aifs, qi.tqi_cw_min, qi.tqi_cw_max); + + ret = ath5k_hw_set_tx_queueprops(ah, sc->bhalq, &qi); + if (ret) { + ATH5K_ERR(sc, "%s: unable to update parameters for beacon " + "hardware queue!\n", __func__); + goto err; + } + ret = ath5k_hw_reset_tx_queue(ah, sc->bhalq); /* push to h/w */ + if (ret) + goto err; + + /* reconfigure cabq with ready time to 80% of beacon_interval */ + ret = ath5k_hw_get_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi); + if (ret) + goto err; + + qi.tqi_ready_time = (sc->bintval * 80) / 100; + ret = ath5k_hw_set_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi); + if (ret) + goto err; + + ret = ath5k_hw_reset_tx_queue(ah, AR5K_TX_QUEUE_ID_CAB); +err: + return ret; +} + +static void +ath5k_txq_drainq(struct ath5k_softc *sc, struct ath5k_txq *txq) +{ + struct ath5k_buf *bf, *bf0; + + /* + * NB: this assumes output has been stopped and + * we do not need to block ath5k_tx_tasklet + */ + spin_lock_bh(&txq->lock); + list_for_each_entry_safe(bf, bf0, &txq->q, list) { + ath5k_debug_printtxbuf(sc, bf); + + ath5k_txbuf_free(sc, bf); + + spin_lock_bh(&sc->txbuflock); + list_move_tail(&bf->list, &sc->txbuf); + sc->txbuf_len++; + spin_unlock_bh(&sc->txbuflock); + } + txq->link = NULL; + spin_unlock_bh(&txq->lock); +} + +/* + * Drain the transmit queues and reclaim resources. + */ +static void +ath5k_txq_cleanup(struct ath5k_softc *sc) +{ + struct ath5k_hw *ah = sc->ah; + unsigned int i; + + /* XXX return value */ + if (likely(!test_bit(ATH_STAT_INVALID, sc->status))) { + /* don't touch the hardware if marked invalid */ + ath5k_hw_stop_tx_dma(ah, sc->bhalq); + ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "beacon queue %x\n", + ath5k_hw_get_txdp(ah, sc->bhalq)); + for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) + if (sc->txqs[i].setup) { + ath5k_hw_stop_tx_dma(ah, sc->txqs[i].qnum); + ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "txq [%u] %x, " + "link %p\n", + sc->txqs[i].qnum, + ath5k_hw_get_txdp(ah, + sc->txqs[i].qnum), + sc->txqs[i].link); + } + } + ieee80211_wake_queues(sc->hw); /* XXX move to callers */ + + for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) + if (sc->txqs[i].setup) + ath5k_txq_drainq(sc, &sc->txqs[i]); +} + +static void +ath5k_txq_release(struct ath5k_softc *sc) +{ + struct ath5k_txq *txq = sc->txqs; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(sc->txqs); i++, txq++) + if (txq->setup) { + ath5k_hw_release_tx_queue(sc->ah, txq->qnum); + txq->setup = false; + } +} + + + + +/*************\ +* RX Handling * +\*************/ + +/* + * Enable the receive h/w following a reset. + */ +static int +ath5k_rx_start(struct ath5k_softc *sc) +{ + struct ath5k_hw *ah = sc->ah; + struct ath_common *common = ath5k_hw_common(ah); + struct ath5k_buf *bf; + int ret; + + common->rx_bufsize = roundup(IEEE80211_MAX_LEN, common->cachelsz); + + ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "cachelsz %u rx_bufsize %u\n", + common->cachelsz, common->rx_bufsize); + + spin_lock_bh(&sc->rxbuflock); + sc->rxlink = NULL; + list_for_each_entry(bf, &sc->rxbuf, list) { + ret = ath5k_rxbuf_setup(sc, bf); + if (ret != 0) { + spin_unlock_bh(&sc->rxbuflock); + goto err; + } + } + bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list); + ath5k_hw_set_rxdp(ah, bf->daddr); + spin_unlock_bh(&sc->rxbuflock); + + ath5k_hw_start_rx_dma(ah); /* enable recv descriptors */ + ath5k_mode_setup(sc); /* set filters, etc. */ + ath5k_hw_start_rx_pcu(ah); /* re-enable PCU/DMA engine */ + + return 0; +err: + return ret; +} + +/* + * Disable the receive h/w in preparation for a reset. + */ +static void +ath5k_rx_stop(struct ath5k_softc *sc) +{ + struct ath5k_hw *ah = sc->ah; + + ath5k_hw_stop_rx_pcu(ah); /* disable PCU */ + ath5k_hw_set_rx_filter(ah, 0); /* clear recv filter */ + ath5k_hw_stop_rx_dma(ah); /* disable DMA engine */ + + ath5k_debug_printrxbuffs(sc, ah); + + sc->rxlink = NULL; /* just in case */ +} + +static unsigned int +ath5k_rx_decrypted(struct ath5k_softc *sc, struct ath5k_desc *ds, + struct sk_buff *skb, struct ath5k_rx_status *rs) +{ + struct ath5k_hw *ah = sc->ah; + struct ath_common *common = ath5k_hw_common(ah); + struct ieee80211_hdr *hdr = (void *)skb->data; + unsigned int keyix, hlen; + + if (!(rs->rs_status & AR5K_RXERR_DECRYPT) && + rs->rs_keyix != AR5K_RXKEYIX_INVALID) + return RX_FLAG_DECRYPTED; + + /* Apparently when a default key is used to decrypt the packet + the hw does not set the index used to decrypt. In such cases + get the index from the packet. */ + hlen = ieee80211_hdrlen(hdr->frame_control); + if (ieee80211_has_protected(hdr->frame_control) && + !(rs->rs_status & AR5K_RXERR_DECRYPT) && + skb->len >= hlen + 4) { + keyix = skb->data[hlen + 3] >> 6; + + if (test_bit(keyix, common->keymap)) + return RX_FLAG_DECRYPTED; + } + + return 0; +} + + +static void +ath5k_check_ibss_tsf(struct ath5k_softc *sc, struct sk_buff *skb, + struct ieee80211_rx_status *rxs) +{ + struct ath_common *common = ath5k_hw_common(sc->ah); + u64 tsf, bc_tstamp; + u32 hw_tu; + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; + + if (ieee80211_is_beacon(mgmt->frame_control) && + le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS && + memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) == 0) { + /* + * Received an IBSS beacon with the same BSSID. Hardware *must* + * have updated the local TSF. We have to work around various + * hardware bugs, though... + */ + tsf = ath5k_hw_get_tsf64(sc->ah); + bc_tstamp = le64_to_cpu(mgmt->u.beacon.timestamp); + hw_tu = TSF_TO_TU(tsf); + + ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, + "beacon %llx mactime %llx (diff %lld) tsf now %llx\n", + (unsigned long long)bc_tstamp, + (unsigned long long)rxs->mactime, + (unsigned long long)(rxs->mactime - bc_tstamp), + (unsigned long long)tsf); + + /* + * Sometimes the HW will give us a wrong tstamp in the rx + * status, causing the timestamp extension to go wrong. + * (This seems to happen especially with beacon frames bigger + * than 78 byte (incl. FCS)) + * But we know that the receive timestamp must be later than the + * timestamp of the beacon since HW must have synced to that. + * + * NOTE: here we assume mactime to be after the frame was + * received, not like mac80211 which defines it at the start. + */ + if (bc_tstamp > rxs->mactime) { + ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, + "fixing mactime from %llx to %llx\n", + (unsigned long long)rxs->mactime, + (unsigned long long)tsf); + rxs->mactime = tsf; + } + + /* + * Local TSF might have moved higher than our beacon timers, + * in that case we have to update them to continue sending + * beacons. This also takes care of synchronizing beacon sending + * times with other stations. + */ + if (hw_tu >= sc->nexttbtt) + ath5k_beacon_update_timers(sc, bc_tstamp); + } +} + +static void +ath5k_tasklet_rx(unsigned long data) +{ + struct ieee80211_rx_status *rxs; + struct ath5k_rx_status rs = {}; + struct sk_buff *skb, *next_skb; + dma_addr_t next_skb_addr; + struct ath5k_softc *sc = (void *)data; + struct ath5k_hw *ah = sc->ah; + struct ath_common *common = ath5k_hw_common(ah); + struct ath5k_buf *bf; + struct ath5k_desc *ds; + int ret; + int hdrlen; + int padsize; + int rx_flag; + + spin_lock(&sc->rxbuflock); + if (list_empty(&sc->rxbuf)) { + ATH5K_WARN(sc, "empty rx buf pool\n"); + goto unlock; + } + do { + rx_flag = 0; + + bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list); + BUG_ON(bf->skb == NULL); + skb = bf->skb; + ds = bf->desc; + + /* bail if HW is still using self-linked descriptor */ + if (ath5k_hw_get_rxdp(sc->ah) == bf->daddr) + break; + + ret = sc->ah->ah_proc_rx_desc(sc->ah, ds, &rs); + if (unlikely(ret == -EINPROGRESS)) + break; + else if (unlikely(ret)) { + ATH5K_ERR(sc, "error in processing rx descriptor\n"); + spin_unlock(&sc->rxbuflock); + return; + } + + if (unlikely(rs.rs_more)) { + ATH5K_WARN(sc, "unsupported jumbo\n"); + goto next; + } + + if (unlikely(rs.rs_status)) { + if (rs.rs_status & AR5K_RXERR_PHY) + goto next; + if (rs.rs_status & AR5K_RXERR_DECRYPT) { + /* + * Decrypt error. If the error occurred + * because there was no hardware key, then + * let the frame through so the upper layers + * can process it. This is necessary for 5210 + * parts which have no way to setup a ``clear'' + * key cache entry. + * + * XXX do key cache faulting + */ + if (rs.rs_keyix == AR5K_RXKEYIX_INVALID && + !(rs.rs_status & AR5K_RXERR_CRC)) + goto accept; + } + if (rs.rs_status & AR5K_RXERR_MIC) { + rx_flag |= RX_FLAG_MMIC_ERROR; + goto accept; + } + + /* let crypto-error packets fall through in MNTR */ + if ((rs.rs_status & + ~(AR5K_RXERR_DECRYPT|AR5K_RXERR_MIC)) || + sc->opmode != NL80211_IFTYPE_MONITOR) + goto next; + } +accept: + next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr); + + /* + * If we can't replace bf->skb with a new skb under memory + * pressure, just skip this packet + */ + if (!next_skb) + goto next; + + pci_unmap_single(sc->pdev, bf->skbaddr, common->rx_bufsize, + PCI_DMA_FROMDEVICE); + skb_put(skb, rs.rs_datalen); + + /* The MAC header is padded to have 32-bit boundary if the + * packet payload is non-zero. The general calculation for + * padsize would take into account odd header lengths: + * padsize = (4 - hdrlen % 4) % 4; However, since only + * even-length headers are used, padding can only be 0 or 2 + * bytes and we can optimize this a bit. In addition, we must + * not try to remove padding from short control frames that do + * not have payload. */ + hdrlen = ieee80211_get_hdrlen_from_skb(skb); + padsize = ath5k_pad_size(hdrlen); + if (padsize) { + memmove(skb->data + padsize, skb->data, hdrlen); + skb_pull(skb, padsize); + } + rxs = IEEE80211_SKB_RXCB(skb); + + /* + * always extend the mac timestamp, since this information is + * also needed for proper IBSS merging. + * + * XXX: it might be too late to do it here, since rs_tstamp is + * 15bit only. that means TSF extension has to be done within + * 32768usec (about 32ms). it might be necessary to move this to + * the interrupt handler, like it is done in madwifi. + * + * Unfortunately we don't know when the hardware takes the rx + * timestamp (beginning of phy frame, data frame, end of rx?). + * The only thing we know is that it is hardware specific... + * On AR5213 it seems the rx timestamp is at the end of the + * frame, but i'm not sure. + * + * NOTE: mac80211 defines mactime at the beginning of the first + * data symbol. Since we don't have any time references it's + * impossible to comply to that. This affects IBSS merge only + * right now, so it's not too bad... + */ + rxs->mactime = ath5k_extend_tsf(sc->ah, rs.rs_tstamp); + rxs->flag = rx_flag | RX_FLAG_TSFT; + + rxs->freq = sc->curchan->center_freq; + rxs->band = sc->curband->band; + + rxs->noise = sc->ah->ah_noise_floor; + rxs->signal = rxs->noise + rs.rs_rssi; + + rxs->antenna = rs.rs_antenna; + rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate); + rxs->flag |= ath5k_rx_decrypted(sc, ds, skb, &rs); + + if (rxs->rate_idx >= 0 && rs.rs_rate == + sc->curband->bitrates[rxs->rate_idx].hw_value_short) + rxs->flag |= RX_FLAG_SHORTPRE; + + ath5k_debug_dump_skb(sc, skb, "RX ", 0); + + /* check beacons in IBSS mode */ + if (sc->opmode == NL80211_IFTYPE_ADHOC) + ath5k_check_ibss_tsf(sc, skb, rxs); + + ieee80211_rx(sc->hw, skb); + + bf->skb = next_skb; + bf->skbaddr = next_skb_addr; +next: + list_move_tail(&bf->list, &sc->rxbuf); + } while (ath5k_rxbuf_setup(sc, bf) == 0); +unlock: + spin_unlock(&sc->rxbuflock); +} + + + + +/*************\ +* TX Handling * +\*************/ + +static void +ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq) +{ + struct ath5k_tx_status ts = {}; + struct ath5k_buf *bf, *bf0; + struct ath5k_desc *ds; + struct sk_buff *skb; + struct ieee80211_tx_info *info; + int i, ret; + + spin_lock(&txq->lock); + list_for_each_entry_safe(bf, bf0, &txq->q, list) { + ds = bf->desc; + + ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts); + if (unlikely(ret == -EINPROGRESS)) + break; + else if (unlikely(ret)) { + ATH5K_ERR(sc, "error %d while processing queue %u\n", + ret, txq->qnum); + break; + } + + skb = bf->skb; + info = IEEE80211_SKB_CB(skb); + bf->skb = NULL; + + pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, + PCI_DMA_TODEVICE); + + ieee80211_tx_info_clear_status(info); + for (i = 0; i < 4; i++) { + struct ieee80211_tx_rate *r = + &info->status.rates[i]; + + if (ts.ts_rate[i]) { + r->idx = ath5k_hw_to_driver_rix(sc, ts.ts_rate[i]); + r->count = ts.ts_retry[i]; + } else { + r->idx = -1; + r->count = 0; + } + } + + /* count the successful attempt as well */ + info->status.rates[ts.ts_final_idx].count++; + + if (unlikely(ts.ts_status)) { + sc->ll_stats.dot11ACKFailureCount++; + if (ts.ts_status & AR5K_TXERR_FILT) + info->flags |= IEEE80211_TX_STAT_TX_FILTERED; + } else { + info->flags |= IEEE80211_TX_STAT_ACK; + info->status.ack_signal = ts.ts_rssi; + } + + ieee80211_tx_status(sc->hw, skb); + + spin_lock(&sc->txbuflock); + list_move_tail(&bf->list, &sc->txbuf); + sc->txbuf_len++; + spin_unlock(&sc->txbuflock); + } + if (likely(list_empty(&txq->q))) + txq->link = NULL; + spin_unlock(&txq->lock); + if (sc->txbuf_len > ATH_TXBUF / 5) + ieee80211_wake_queues(sc->hw); +} + +static void +ath5k_tasklet_tx(unsigned long data) +{ + int i; + struct ath5k_softc *sc = (void *)data; + + for (i=0; i < AR5K_NUM_TX_QUEUES; i++) + if (sc->txqs[i].setup && (sc->ah->ah_txq_isr & BIT(i))) + ath5k_tx_processq(sc, &sc->txqs[i]); +} + + +/*****************\ +* Beacon handling * +\*****************/ + +/* + * Setup the beacon frame for transmit. + */ +static int +ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf) +{ + struct sk_buff *skb = bf->skb; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ath5k_hw *ah = sc->ah; + struct ath5k_desc *ds; + int ret = 0; + u8 antenna; + u32 flags; + + bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len, + PCI_DMA_TODEVICE); + ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] " + "skbaddr %llx\n", skb, skb->data, skb->len, + (unsigned long long)bf->skbaddr); + if (pci_dma_mapping_error(sc->pdev, bf->skbaddr)) { + ATH5K_ERR(sc, "beacon DMA mapping failed\n"); + return -EIO; + } + + ds = bf->desc; + antenna = ah->ah_tx_ant; + + flags = AR5K_TXDESC_NOACK; + if (sc->opmode == NL80211_IFTYPE_ADHOC && ath5k_hw_hasveol(ah)) { + ds->ds_link = bf->daddr; /* self-linked */ + flags |= AR5K_TXDESC_VEOL; + } else + ds->ds_link = 0; + + /* + * If we use multiple antennas on AP and use + * the Sectored AP scenario, switch antenna every + * 4 beacons to make sure everybody hears our AP. + * When a client tries to associate, hw will keep + * track of the tx antenna to be used for this client + * automaticaly, based on ACKed packets. + * + * Note: AP still listens and transmits RTS on the + * default antenna which is supposed to be an omni. + * + * Note2: On sectored scenarios it's possible to have + * multiple antennas (1omni -the default- and 14 sectors) + * so if we choose to actually support this mode we need + * to allow user to set how many antennas we have and tweak + * the code below to send beacons on all of them. + */ + if (ah->ah_ant_mode == AR5K_ANTMODE_SECTOR_AP) + antenna = sc->bsent & 4 ? 2 : 1; + + + /* FIXME: If we are in g mode and rate is a CCK rate + * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta + * from tx power (value is in dB units already) */ + ds->ds_data = bf->skbaddr; + ret = ah->ah_setup_tx_desc(ah, ds, skb->len, + ieee80211_get_hdrlen_from_skb(skb), + AR5K_PKT_TYPE_BEACON, (sc->power_level * 2), + ieee80211_get_tx_rate(sc->hw, info)->hw_value, + 1, AR5K_TXKEYIX_INVALID, + antenna, flags, 0, 0); + if (ret) + goto err_unmap; + + return 0; +err_unmap: + pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, PCI_DMA_TODEVICE); + return ret; +} + +/* + * Transmit a beacon frame at SWBA. Dynamic updates to the + * frame contents are done as needed and the slot time is + * also adjusted based on current state. + * + * This is called from software irq context (beacontq or restq + * tasklets) or user context from ath5k_beacon_config. + */ +static void +ath5k_beacon_send(struct ath5k_softc *sc) +{ + struct ath5k_buf *bf = sc->bbuf; + struct ath5k_hw *ah = sc->ah; + struct sk_buff *skb; + + ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "in beacon_send\n"); + + if (unlikely(bf->skb == NULL || sc->opmode == NL80211_IFTYPE_STATION || + sc->opmode == NL80211_IFTYPE_MONITOR)) { + ATH5K_WARN(sc, "bf=%p bf_skb=%p\n", bf, bf ? bf->skb : NULL); + return; + } + /* + * Check if the previous beacon has gone out. If + * not don't don't try to post another, skip this + * period and wait for the next. Missed beacons + * indicate a problem and should not occur. If we + * miss too many consecutive beacons reset the device. + */ + if (unlikely(ath5k_hw_num_tx_pending(ah, sc->bhalq) != 0)) { + sc->bmisscount++; + ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, + "missed %u consecutive beacons\n", sc->bmisscount); + if (sc->bmisscount > 10) { /* NB: 10 is a guess */ + ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, + "stuck beacon time (%u missed)\n", + sc->bmisscount); + tasklet_schedule(&sc->restq); + } + return; + } + if (unlikely(sc->bmisscount != 0)) { + ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, + "resume beacon xmit after %u misses\n", + sc->bmisscount); + sc->bmisscount = 0; + } + + /* + * Stop any current dma and put the new frame on the queue. + * This should never fail since we check above that no frames + * are still pending on the queue. + */ + if (unlikely(ath5k_hw_stop_tx_dma(ah, sc->bhalq))) { + ATH5K_WARN(sc, "beacon queue %u didn't start/stop ?\n", sc->bhalq); + /* NB: hw still stops DMA, so proceed */ + } + + /* refresh the beacon for AP mode */ + if (sc->opmode == NL80211_IFTYPE_AP) + ath5k_beacon_update(sc->hw, sc->vif); + + ath5k_hw_set_txdp(ah, sc->bhalq, bf->daddr); + ath5k_hw_start_tx_dma(ah, sc->bhalq); + ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n", + sc->bhalq, (unsigned long long)bf->daddr, bf->desc); + + skb = ieee80211_get_buffered_bc(sc->hw, sc->vif); + while (skb) { + ath5k_tx_queue(sc->hw, skb, sc->cabq); + skb = ieee80211_get_buffered_bc(sc->hw, sc->vif); + } + + sc->bsent++; +} + + +/** + * ath5k_beacon_update_timers - update beacon timers + * + * @sc: struct ath5k_softc pointer we are operating on + * @bc_tsf: the timestamp of the beacon. 0 to reset the TSF. -1 to perform a + * beacon timer update based on the current HW TSF. + * + * Calculate the next target beacon transmit time (TBTT) based on the timestamp + * of a received beacon or the current local hardware TSF and write it to the + * beacon timer registers. + * + * This is called in a variety of situations, e.g. when a beacon is received, + * when a TSF update has been detected, but also when an new IBSS is created or + * when we otherwise know we have to update the timers, but we keep it in this + * function to have it all together in one place. + */ +static void +ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf) +{ + struct ath5k_hw *ah = sc->ah; + u32 nexttbtt, intval, hw_tu, bc_tu; + u64 hw_tsf; + + intval = sc->bintval & AR5K_BEACON_PERIOD; + if (WARN_ON(!intval)) + return; + + /* beacon TSF converted to TU */ + bc_tu = TSF_TO_TU(bc_tsf); + + /* current TSF converted to TU */ + hw_tsf = ath5k_hw_get_tsf64(ah); + hw_tu = TSF_TO_TU(hw_tsf); + +#define FUDGE 3 + /* we use FUDGE to make sure the next TBTT is ahead of the current TU */ + if (bc_tsf == -1) { + /* + * no beacons received, called internally. + * just need to refresh timers based on HW TSF. + */ + nexttbtt = roundup(hw_tu + FUDGE, intval); + } else if (bc_tsf == 0) { + /* + * no beacon received, probably called by ath5k_reset_tsf(). + * reset TSF to start with 0. + */ + nexttbtt = intval; + intval |= AR5K_BEACON_RESET_TSF; + } else if (bc_tsf > hw_tsf) { + /* + * beacon received, SW merge happend but HW TSF not yet updated. + * not possible to reconfigure timers yet, but next time we + * receive a beacon with the same BSSID, the hardware will + * automatically update the TSF and then we need to reconfigure + * the timers. + */ + ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, + "need to wait for HW TSF sync\n"); + return; + } else { + /* + * most important case for beacon synchronization between STA. + * + * beacon received and HW TSF has been already updated by HW. + * update next TBTT based on the TSF of the beacon, but make + * sure it is ahead of our local TSF timer. + */ + nexttbtt = bc_tu + roundup(hw_tu + FUDGE - bc_tu, intval); + } +#undef FUDGE + + sc->nexttbtt = nexttbtt; + + intval |= AR5K_BEACON_ENA; + ath5k_hw_init_beacon(ah, nexttbtt, intval); + + /* + * debugging output last in order to preserve the time critical aspect + * of this function + */ + if (bc_tsf == -1) + ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, + "reconfigured timers based on HW TSF\n"); + else if (bc_tsf == 0) + ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, + "reset HW TSF and timers\n"); + else + ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, + "updated timers based on beacon TSF\n"); + + ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, + "bc_tsf %llx hw_tsf %llx bc_tu %u hw_tu %u nexttbtt %u\n", + (unsigned long long) bc_tsf, + (unsigned long long) hw_tsf, bc_tu, hw_tu, nexttbtt); + ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "intval %u %s %s\n", + intval & AR5K_BEACON_PERIOD, + intval & AR5K_BEACON_ENA ? "AR5K_BEACON_ENA" : "", + intval & AR5K_BEACON_RESET_TSF ? "AR5K_BEACON_RESET_TSF" : ""); +} + + +/** + * ath5k_beacon_config - Configure the beacon queues and interrupts + * + * @sc: struct ath5k_softc pointer we are operating on + * + * In IBSS mode we use a self-linked tx descriptor if possible. We enable SWBA + * interrupts to detect TSF updates only. + */ +static void +ath5k_beacon_config(struct ath5k_softc *sc) +{ + struct ath5k_hw *ah = sc->ah; + unsigned long flags; + + spin_lock_irqsave(&sc->block, flags); + sc->bmisscount = 0; + sc->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA); + + if (sc->enable_beacon) { + /* + * In IBSS mode we use a self-linked tx descriptor and let the + * hardware send the beacons automatically. We have to load it + * only once here. + * We use the SWBA interrupt only to keep track of the beacon + * timers in order to detect automatic TSF updates. + */ + ath5k_beaconq_config(sc); + + sc->imask |= AR5K_INT_SWBA; + + if (sc->opmode == NL80211_IFTYPE_ADHOC) { + if (ath5k_hw_hasveol(ah)) + ath5k_beacon_send(sc); + } else + ath5k_beacon_update_timers(sc, -1); + } else { + ath5k_hw_stop_tx_dma(sc->ah, sc->bhalq); + } + + ath5k_hw_set_imr(ah, sc->imask); + mmiowb(); + spin_unlock_irqrestore(&sc->block, flags); +} + +static void ath5k_tasklet_beacon(unsigned long data) +{ + struct ath5k_softc *sc = (struct ath5k_softc *) data; + + /* + * Software beacon alert--time to send a beacon. + * + * In IBSS mode we use this interrupt just to + * keep track of the next TBTT (target beacon + * transmission time) in order to detect wether + * automatic TSF updates happened. + */ + if (sc->opmode == NL80211_IFTYPE_ADHOC) { + /* XXX: only if VEOL suppported */ + u64 tsf = ath5k_hw_get_tsf64(sc->ah); + sc->nexttbtt += sc->bintval; + ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, + "SWBA nexttbtt: %x hw_tu: %x " + "TSF: %llx\n", + sc->nexttbtt, + TSF_TO_TU(tsf), + (unsigned long long) tsf); + } else { + spin_lock(&sc->block); + ath5k_beacon_send(sc); + spin_unlock(&sc->block); + } +} + + +/********************\ +* Interrupt handling * +\********************/ + +static int +ath5k_init(struct ath5k_softc *sc) +{ + struct ath5k_hw *ah = sc->ah; + int ret, i; + + mutex_lock(&sc->lock); + + ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "mode %d\n", sc->opmode); + + /* + * Stop anything previously setup. This is safe + * no matter this is the first time through or not. + */ + ath5k_stop_locked(sc); + + /* Set PHY calibration interval */ + ah->ah_cal_intval = ath5k_calinterval; + + /* + * The basic interface to setting the hardware in a good + * state is ``reset''. On return the hardware is known to + * be powered up and with interrupts disabled. This must + * be followed by initialization of the appropriate bits + * and then setup of the interrupt mask. + */ + sc->curchan = sc->hw->conf.channel; + sc->curband = &sc->sbands[sc->curchan->band]; + sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL | + AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL | + AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_SWI; + ret = ath5k_reset(sc, NULL); + if (ret) + goto done; + + ath5k_rfkill_hw_start(ah); + + /* + * Reset the key cache since some parts do not reset the + * contents on initial power up or resume from suspend. + */ + for (i = 0; i < AR5K_KEYTABLE_SIZE; i++) + ath5k_hw_reset_key(ah, i); + + /* Set ack to be sent at low bit-rates */ + ath5k_hw_set_ack_bitrate_high(ah, false); + ret = 0; +done: + mmiowb(); + mutex_unlock(&sc->lock); + return ret; +} + +static int +ath5k_stop_locked(struct ath5k_softc *sc) +{ + struct ath5k_hw *ah = sc->ah; + + ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "invalid %u\n", + test_bit(ATH_STAT_INVALID, sc->status)); + + /* + * Shutdown the hardware and driver: + * stop output from above + * disable interrupts + * turn off timers + * turn off the radio + * clear transmit machinery + * clear receive machinery + * drain and release tx queues + * reclaim beacon resources + * power down hardware + * + * Note that some of this work is not possible if the + * hardware is gone (invalid). + */ + ieee80211_stop_queues(sc->hw); + + if (!test_bit(ATH_STAT_INVALID, sc->status)) { + ath5k_led_off(sc); + ath5k_hw_set_imr(ah, 0); + synchronize_irq(sc->pdev->irq); + } + ath5k_txq_cleanup(sc); + if (!test_bit(ATH_STAT_INVALID, sc->status)) { + ath5k_rx_stop(sc); + ath5k_hw_phy_disable(ah); + } else + sc->rxlink = NULL; + + return 0; +} + +/* + * Stop the device, grabbing the top-level lock to protect + * against concurrent entry through ath5k_init (which can happen + * if another thread does a system call and the thread doing the + * stop is preempted). + */ +static int +ath5k_stop_hw(struct ath5k_softc *sc) +{ + int ret; + + mutex_lock(&sc->lock); + ret = ath5k_stop_locked(sc); + if (ret == 0 && !test_bit(ATH_STAT_INVALID, sc->status)) { + /* + * Don't set the card in full sleep mode! + * + * a) When the device is in this state it must be carefully + * woken up or references to registers in the PCI clock + * domain may freeze the bus (and system). This varies + * by chip and is mostly an issue with newer parts + * (madwifi sources mentioned srev >= 0x78) that go to + * sleep more quickly. + * + * b) On older chips full sleep results a weird behaviour + * during wakeup. I tested various cards with srev < 0x78 + * and they don't wake up after module reload, a second + * module reload is needed to bring the card up again. + * + * Until we figure out what's going on don't enable + * full chip reset on any chip (this is what Legacy HAL + * and Sam's HAL do anyway). Instead Perform a full reset + * on the device (same as initial state after attach) and + * leave it idle (keep MAC/BB on warm reset) */ + ret = ath5k_hw_on_hold(sc->ah); + + ATH5K_DBG(sc, ATH5K_DEBUG_RESET, + "putting device to sleep\n"); + } + ath5k_txbuf_free(sc, sc->bbuf); + + mmiowb(); + mutex_unlock(&sc->lock); + + tasklet_kill(&sc->rxtq); + tasklet_kill(&sc->txtq); + tasklet_kill(&sc->restq); + tasklet_kill(&sc->calib); + tasklet_kill(&sc->beacontq); + + ath5k_rfkill_hw_stop(sc->ah); + + return ret; +} + +static irqreturn_t +ath5k_intr(int irq, void *dev_id) +{ + struct ath5k_softc *sc = dev_id; + struct ath5k_hw *ah = sc->ah; + enum ath5k_int status; + unsigned int counter = 1000; + + if (unlikely(test_bit(ATH_STAT_INVALID, sc->status) || + !ath5k_hw_is_intr_pending(ah))) + return IRQ_NONE; + + do { + ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */ + ATH5K_DBG(sc, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n", + status, sc->imask); + if (unlikely(status & AR5K_INT_FATAL)) { + /* + * Fatal errors are unrecoverable. + * Typically these are caused by DMA errors. + */ + tasklet_schedule(&sc->restq); + } else if (unlikely(status & AR5K_INT_RXORN)) { + tasklet_schedule(&sc->restq); + } else { + if (status & AR5K_INT_SWBA) { + tasklet_hi_schedule(&sc->beacontq); + } + if (status & AR5K_INT_RXEOL) { + /* + * NB: the hardware should re-read the link when + * RXE bit is written, but it doesn't work at + * least on older hardware revs. + */ + sc->rxlink = NULL; + } + if (status & AR5K_INT_TXURN) { + /* bump tx trigger level */ + ath5k_hw_update_tx_triglevel(ah, true); + } + if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR)) + tasklet_schedule(&sc->rxtq); + if (status & (AR5K_INT_TXOK | AR5K_INT_TXDESC + | AR5K_INT_TXERR | AR5K_INT_TXEOL)) + tasklet_schedule(&sc->txtq); + if (status & AR5K_INT_BMISS) { + /* TODO */ + } + if (status & AR5K_INT_SWI) { + tasklet_schedule(&sc->calib); + } + if (status & AR5K_INT_MIB) { + /* + * These stats are also used for ANI i think + * so how about updating them more often ? + */ + ath5k_hw_update_mib_counters(ah, &sc->ll_stats); + } + if (status & AR5K_INT_GPIO) + tasklet_schedule(&sc->rf_kill.toggleq); + + } + } while (ath5k_hw_is_intr_pending(ah) && --counter > 0); + + if (unlikely(!counter)) + ATH5K_WARN(sc, "too many interrupts, giving up for now\n"); + + ath5k_hw_calibration_poll(ah); + + return IRQ_HANDLED; +} + +static void +ath5k_tasklet_reset(unsigned long data) +{ + struct ath5k_softc *sc = (void *)data; + + ath5k_reset_wake(sc); +} + +/* + * Periodically recalibrate the PHY to account + * for temperature/environment changes. + */ +static void +ath5k_tasklet_calibrate(unsigned long data) +{ + struct ath5k_softc *sc = (void *)data; + struct ath5k_hw *ah = sc->ah; + + /* Only full calibration for now */ + if (ah->ah_swi_mask != AR5K_SWI_FULL_CALIBRATION) + return; + + /* Stop queues so that calibration + * doesn't interfere with tx */ + ieee80211_stop_queues(sc->hw); + + ATH5K_DBG(sc, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n", + ieee80211_frequency_to_channel(sc->curchan->center_freq), + sc->curchan->hw_value); + + if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) { + /* + * Rfgain is out of bounds, reset the chip + * to load new gain values. + */ + ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "calibration, resetting\n"); + ath5k_reset_wake(sc); + } + if (ath5k_hw_phy_calibrate(ah, sc->curchan)) + ATH5K_ERR(sc, "calibration of channel %u failed\n", + ieee80211_frequency_to_channel( + sc->curchan->center_freq)); + + ah->ah_swi_mask = 0; + + /* Wake queues */ + ieee80211_wake_queues(sc->hw); + +} + + +/********************\ +* Mac80211 functions * +\********************/ + +static int +ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct ath5k_softc *sc = hw->priv; + + return ath5k_tx_queue(hw, skb, sc->txq); +} + +static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, + struct ath5k_txq *txq) +{ + struct ath5k_softc *sc = hw->priv; + struct ath5k_buf *bf; + unsigned long flags; + int hdrlen; + int padsize; + + ath5k_debug_dump_skb(sc, skb, "TX ", 1); + + if (sc->opmode == NL80211_IFTYPE_MONITOR) + ATH5K_DBG(sc, ATH5K_DEBUG_XMIT, "tx in monitor (scan?)\n"); + + /* + * the hardware expects the header padded to 4 byte boundaries + * if this is not the case we add the padding after the header + */ + hdrlen = ieee80211_get_hdrlen_from_skb(skb); + padsize = ath5k_pad_size(hdrlen); + if (padsize) { + + if (skb_headroom(skb) < padsize) { + ATH5K_ERR(sc, "tx hdrlen not %%4: %d not enough" + " headroom to pad %d\n", hdrlen, padsize); + goto drop_packet; + } + skb_push(skb, padsize); + memmove(skb->data, skb->data+padsize, hdrlen); + } + + spin_lock_irqsave(&sc->txbuflock, flags); + if (list_empty(&sc->txbuf)) { + ATH5K_ERR(sc, "no further txbuf available, dropping packet\n"); + spin_unlock_irqrestore(&sc->txbuflock, flags); + ieee80211_stop_queue(hw, skb_get_queue_mapping(skb)); + goto drop_packet; + } + bf = list_first_entry(&sc->txbuf, struct ath5k_buf, list); + list_del(&bf->list); + sc->txbuf_len--; + if (list_empty(&sc->txbuf)) + ieee80211_stop_queues(hw); + spin_unlock_irqrestore(&sc->txbuflock, flags); + + bf->skb = skb; + + if (ath5k_txbuf_setup(sc, bf, txq)) { + bf->skb = NULL; + spin_lock_irqsave(&sc->txbuflock, flags); + list_add_tail(&bf->list, &sc->txbuf); + sc->txbuf_len++; + spin_unlock_irqrestore(&sc->txbuflock, flags); + goto drop_packet; + } + return NETDEV_TX_OK; + +drop_packet: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +/* + * Reset the hardware. If chan is not NULL, then also pause rx/tx + * and change to the given channel. + */ +static int +ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan) +{ + struct ath5k_hw *ah = sc->ah; + int ret; + + ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n"); + + if (chan) { + ath5k_hw_set_imr(ah, 0); + ath5k_txq_cleanup(sc); + ath5k_rx_stop(sc); + + sc->curchan = chan; + sc->curband = &sc->sbands[chan->band]; + } + ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, chan != NULL); + if (ret) { + ATH5K_ERR(sc, "can't reset hardware (%d)\n", ret); + goto err; + } + + ret = ath5k_rx_start(sc); + if (ret) { + ATH5K_ERR(sc, "can't start recv logic\n"); + goto err; + } + + /* + * Change channels and update the h/w rate map if we're switching; + * e.g. 11a to 11b/g. + * + * We may be doing a reset in response to an ioctl that changes the + * channel so update any state that might change as a result. + * + * XXX needed? + */ +/* ath5k_chan_change(sc, c); */ + + ath5k_beacon_config(sc); + /* intrs are enabled by ath5k_beacon_config */ + + return 0; +err: + return ret; +} + +static int +ath5k_reset_wake(struct ath5k_softc *sc) +{ + int ret; + + ret = ath5k_reset(sc, sc->curchan); + if (!ret) + ieee80211_wake_queues(sc->hw); + + return ret; +} + +static int ath5k_start(struct ieee80211_hw *hw) +{ + return ath5k_init(hw->priv); +} + +static void ath5k_stop(struct ieee80211_hw *hw) +{ + ath5k_stop_hw(hw->priv); +} + +static int ath5k_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ath5k_softc *sc = hw->priv; + int ret; + + mutex_lock(&sc->lock); + if (sc->vif) { + ret = 0; + goto end; + } + + sc->vif = vif; + + switch (vif->type) { + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_MONITOR: + sc->opmode = vif->type; + break; + default: + ret = -EOPNOTSUPP; + goto end; + } + + ath5k_hw_set_lladdr(sc->ah, vif->addr); + ath5k_mode_setup(sc); + + ret = 0; +end: + mutex_unlock(&sc->lock); + return ret; +} + +static void +ath5k_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ath5k_softc *sc = hw->priv; + u8 mac[ETH_ALEN] = {}; + + mutex_lock(&sc->lock); + if (sc->vif != vif) + goto end; + + ath5k_hw_set_lladdr(sc->ah, mac); + sc->vif = NULL; +end: + mutex_unlock(&sc->lock); +} + +/* + * TODO: Phy disable/diversity etc + */ +static int +ath5k_config(struct ieee80211_hw *hw, u32 changed) +{ + struct ath5k_softc *sc = hw->priv; + struct ath5k_hw *ah = sc->ah; + struct ieee80211_conf *conf = &hw->conf; + int ret = 0; + + mutex_lock(&sc->lock); + + if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { + ret = ath5k_chan_set(sc, conf->channel); + if (ret < 0) + goto unlock; + } + + if ((changed & IEEE80211_CONF_CHANGE_POWER) && + (sc->power_level != conf->power_level)) { + sc->power_level = conf->power_level; + + /* Half dB steps */ + ath5k_hw_set_txpower_limit(ah, (conf->power_level * 2)); + } + + /* TODO: + * 1) Move this on config_interface and handle each case + * separately eg. when we have only one STA vif, use + * AR5K_ANTMODE_SINGLE_AP + * + * 2) Allow the user to change antenna mode eg. when only + * one antenna is present + * + * 3) Allow the user to set default/tx antenna when possible + * + * 4) Default mode should handle 90% of the cases, together + * with fixed a/b and single AP modes we should be able to + * handle 99%. Sectored modes are extreme cases and i still + * haven't found a usage for them. If we decide to support them, + * then we must allow the user to set how many tx antennas we + * have available + */ + ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT); + +unlock: + mutex_unlock(&sc->lock); + return ret; +} + +static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw, + int mc_count, struct dev_addr_list *mclist) +{ + u32 mfilt[2], val; + int i; + u8 pos; + + mfilt[0] = 0; + mfilt[1] = 1; + + for (i = 0; i < mc_count; i++) { + if (!mclist) + break; + /* calculate XOR of eight 6-bit values */ + val = get_unaligned_le32(mclist->dmi_addr + 0); + pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; + val = get_unaligned_le32(mclist->dmi_addr + 3); + pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; + pos &= 0x3f; + mfilt[pos / 32] |= (1 << (pos % 32)); + /* XXX: we might be able to just do this instead, + * but not sure, needs testing, if we do use this we'd + * neet to inform below to not reset the mcast */ + /* ath5k_hw_set_mcast_filterindex(ah, + * mclist->dmi_addr[5]); */ + mclist = mclist->next; + } + + return ((u64)(mfilt[1]) << 32) | mfilt[0]; +} + +#define SUPPORTED_FIF_FLAGS \ + FIF_PROMISC_IN_BSS | FIF_ALLMULTI | FIF_FCSFAIL | \ + FIF_PLCPFAIL | FIF_CONTROL | FIF_OTHER_BSS | \ + FIF_BCN_PRBRESP_PROMISC +/* + * o always accept unicast, broadcast, and multicast traffic + * o multicast traffic for all BSSIDs will be enabled if mac80211 + * says it should be + * o maintain current state of phy ofdm or phy cck error reception. + * If the hardware detects any of these type of errors then + * ath5k_hw_get_rx_filter() will pass to us the respective + * hardware filters to be able to receive these type of frames. + * o probe request frames are accepted only when operating in + * hostap, adhoc, or monitor modes + * o enable promiscuous mode according to the interface state + * o accept beacons: + * - when operating in adhoc mode so the 802.11 layer creates + * node table entries for peers, + * - when operating in station mode for collecting rssi data when + * the station is otherwise quiet, or + * - when scanning + */ +static void ath5k_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *new_flags, + u64 multicast) +{ + struct ath5k_softc *sc = hw->priv; + struct ath5k_hw *ah = sc->ah; + u32 mfilt[2], rfilt; + + mutex_lock(&sc->lock); + + mfilt[0] = multicast; + mfilt[1] = multicast >> 32; + + /* Only deal with supported flags */ + changed_flags &= SUPPORTED_FIF_FLAGS; + *new_flags &= SUPPORTED_FIF_FLAGS; + + /* If HW detects any phy or radar errors, leave those filters on. + * Also, always enable Unicast, Broadcasts and Multicast + * XXX: move unicast, bssid broadcasts and multicast to mac80211 */ + rfilt = (ath5k_hw_get_rx_filter(ah) & (AR5K_RX_FILTER_PHYERR)) | + (AR5K_RX_FILTER_UCAST | AR5K_RX_FILTER_BCAST | + AR5K_RX_FILTER_MCAST); + + if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) { + if (*new_flags & FIF_PROMISC_IN_BSS) { + rfilt |= AR5K_RX_FILTER_PROM; + __set_bit(ATH_STAT_PROMISC, sc->status); + } else { + __clear_bit(ATH_STAT_PROMISC, sc->status); + } + } + + /* Note, AR5K_RX_FILTER_MCAST is already enabled */ + if (*new_flags & FIF_ALLMULTI) { + mfilt[0] = ~0; + mfilt[1] = ~0; + } + + /* This is the best we can do */ + if (*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL)) + rfilt |= AR5K_RX_FILTER_PHYERR; + + /* FIF_BCN_PRBRESP_PROMISC really means to enable beacons + * and probes for any BSSID, this needs testing */ + if (*new_flags & FIF_BCN_PRBRESP_PROMISC) + rfilt |= AR5K_RX_FILTER_BEACON | AR5K_RX_FILTER_PROBEREQ; + + /* FIF_CONTROL doc says that if FIF_PROMISC_IN_BSS is not + * set we should only pass on control frames for this + * station. This needs testing. I believe right now this + * enables *all* control frames, which is OK.. but + * but we should see if we can improve on granularity */ + if (*new_flags & FIF_CONTROL) + rfilt |= AR5K_RX_FILTER_CONTROL; + + /* Additional settings per mode -- this is per ath5k */ + + /* XXX move these to mac80211, and add a beacon IFF flag to mac80211 */ + + switch (sc->opmode) { + case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_MONITOR: + rfilt |= AR5K_RX_FILTER_CONTROL | + AR5K_RX_FILTER_BEACON | + AR5K_RX_FILTER_PROBEREQ | + AR5K_RX_FILTER_PROM; + break; + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_ADHOC: + rfilt |= AR5K_RX_FILTER_PROBEREQ | + AR5K_RX_FILTER_BEACON; + break; + case NL80211_IFTYPE_STATION: + if (sc->assoc) + rfilt |= AR5K_RX_FILTER_BEACON; + default: + break; + } + + /* Set filters */ + ath5k_hw_set_rx_filter(ah, rfilt); + + /* Set multicast bits */ + ath5k_hw_set_mcast_filter(ah, mfilt[0], mfilt[1]); + /* Set the cached hw filter flags, this will alter actually + * be set in HW */ + sc->filter_flags = rfilt; + + mutex_unlock(&sc->lock); +} + +static int +ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct ath5k_softc *sc = hw->priv; + struct ath5k_hw *ah = sc->ah; + struct ath_common *common = ath5k_hw_common(ah); + int ret = 0; + + if (modparam_nohwcrypt) + return -EOPNOTSUPP; + + if (sc->opmode == NL80211_IFTYPE_AP) + return -EOPNOTSUPP; + + switch (key->alg) { + case ALG_WEP: + case ALG_TKIP: + break; + case ALG_CCMP: + if (sc->ah->ah_aes_support) + break; + + return -EOPNOTSUPP; + default: + WARN_ON(1); + return -EINVAL; + } + + mutex_lock(&sc->lock); + + switch (cmd) { + case SET_KEY: + ret = ath5k_hw_set_key(sc->ah, key->keyidx, key, + sta ? sta->addr : NULL); + if (ret) { + ATH5K_ERR(sc, "can't set the key\n"); + goto unlock; + } + __set_bit(key->keyidx, common->keymap); + key->hw_key_idx = key->keyidx; + key->flags |= (IEEE80211_KEY_FLAG_GENERATE_IV | + IEEE80211_KEY_FLAG_GENERATE_MMIC); + break; + case DISABLE_KEY: + ath5k_hw_reset_key(sc->ah, key->keyidx); + __clear_bit(key->keyidx, common->keymap); + break; + default: + ret = -EINVAL; + goto unlock; + } + +unlock: + mmiowb(); + mutex_unlock(&sc->lock); + return ret; +} + +static int +ath5k_get_stats(struct ieee80211_hw *hw, + struct ieee80211_low_level_stats *stats) +{ + struct ath5k_softc *sc = hw->priv; + struct ath5k_hw *ah = sc->ah; + + /* Force update */ + ath5k_hw_update_mib_counters(ah, &sc->ll_stats); + + memcpy(stats, &sc->ll_stats, sizeof(sc->ll_stats)); + + return 0; +} + +static u64 +ath5k_get_tsf(struct ieee80211_hw *hw) +{ + struct ath5k_softc *sc = hw->priv; + + return ath5k_hw_get_tsf64(sc->ah); +} + +static void +ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf) +{ + struct ath5k_softc *sc = hw->priv; + + ath5k_hw_set_tsf64(sc->ah, tsf); +} + +static void +ath5k_reset_tsf(struct ieee80211_hw *hw) +{ + struct ath5k_softc *sc = hw->priv; + + /* + * in IBSS mode we need to update the beacon timers too. + * this will also reset the TSF if we call it with 0 + */ + if (sc->opmode == NL80211_IFTYPE_ADHOC) + ath5k_beacon_update_timers(sc, 0); + else + ath5k_hw_reset_tsf(sc->ah); +} + +/* + * Updates the beacon that is sent by ath5k_beacon_send. For adhoc, + * this is called only once at config_bss time, for AP we do it every + * SWBA interrupt so that the TIM will reflect buffered frames. + * + * Called with the beacon lock. + */ +static int +ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + int ret; + struct ath5k_softc *sc = hw->priv; + struct sk_buff *skb; + + if (WARN_ON(!vif)) { + ret = -EINVAL; + goto out; + } + + skb = ieee80211_beacon_get(hw, vif); + + if (!skb) { + ret = -ENOMEM; + goto out; + } + + ath5k_debug_dump_skb(sc, skb, "BC ", 1); + + ath5k_txbuf_free(sc, sc->bbuf); + sc->bbuf->skb = skb; + ret = ath5k_beacon_setup(sc, sc->bbuf); + if (ret) + sc->bbuf->skb = NULL; +out: + return ret; +} + +static void +set_beacon_filter(struct ieee80211_hw *hw, bool enable) +{ + struct ath5k_softc *sc = hw->priv; + struct ath5k_hw *ah = sc->ah; + u32 rfilt; + rfilt = ath5k_hw_get_rx_filter(ah); + if (enable) + rfilt |= AR5K_RX_FILTER_BEACON; + else + rfilt &= ~AR5K_RX_FILTER_BEACON; + ath5k_hw_set_rx_filter(ah, rfilt); + sc->filter_flags = rfilt; +} + +static void ath5k_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changes) +{ + struct ath5k_softc *sc = hw->priv; + struct ath5k_hw *ah = sc->ah; + struct ath_common *common = ath5k_hw_common(ah); + unsigned long flags; + + mutex_lock(&sc->lock); + if (WARN_ON(sc->vif != vif)) + goto unlock; + + if (changes & BSS_CHANGED_BSSID) { + /* Cache for later use during resets */ + memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN); + common->curaid = 0; + ath5k_hw_set_associd(ah); + mmiowb(); + } + + if (changes & BSS_CHANGED_BEACON_INT) + sc->bintval = bss_conf->beacon_int; + + if (changes & BSS_CHANGED_ASSOC) { + sc->assoc = bss_conf->assoc; + if (sc->opmode == NL80211_IFTYPE_STATION) + set_beacon_filter(hw, sc->assoc); + ath5k_hw_set_ledstate(sc->ah, sc->assoc ? + AR5K_LED_ASSOC : AR5K_LED_INIT); + if (bss_conf->assoc) { + ATH5K_DBG(sc, ATH5K_DEBUG_ANY, + "Bss Info ASSOC %d, bssid: %pM\n", + bss_conf->aid, common->curbssid); + common->curaid = bss_conf->aid; + ath5k_hw_set_associd(ah); + /* Once ANI is available you would start it here */ + } + } + + if (changes & BSS_CHANGED_BEACON) { + spin_lock_irqsave(&sc->block, flags); + ath5k_beacon_update(hw, vif); + spin_unlock_irqrestore(&sc->block, flags); + } + + if (changes & BSS_CHANGED_BEACON_ENABLED) + sc->enable_beacon = bss_conf->enable_beacon; + + if (changes & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED | + BSS_CHANGED_BEACON_INT)) + ath5k_beacon_config(sc); + + unlock: + mutex_unlock(&sc->lock); +} + +static void ath5k_sw_scan_start(struct ieee80211_hw *hw) +{ + struct ath5k_softc *sc = hw->priv; + if (!sc->assoc) + ath5k_hw_set_ledstate(sc->ah, AR5K_LED_SCAN); +} + +static void ath5k_sw_scan_complete(struct ieee80211_hw *hw) +{ + struct ath5k_softc *sc = hw->priv; + ath5k_hw_set_ledstate(sc->ah, sc->assoc ? + AR5K_LED_ASSOC : AR5K_LED_INIT); +} + +/** + * ath5k_set_coverage_class - Set IEEE 802.11 coverage class + * + * @hw: struct ieee80211_hw pointer + * @coverage_class: IEEE 802.11 coverage class number + * + * Mac80211 callback. Sets slot time, ACK timeout and CTS timeout for given + * coverage class. The values are persistent, they are restored after device + * reset. + */ +static void ath5k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class) +{ + struct ath5k_softc *sc = hw->priv; + + mutex_lock(&sc->lock); + ath5k_hw_set_coverage_class(sc->ah, coverage_class); + mutex_unlock(&sc->lock); +} diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h new file mode 100644 index 0000000..d06770f --- /dev/null +++ b/drivers/net/wireless/ath/ath5k/base.h @@ -0,0 +1,205 @@ +/*- + * Copyright (c) 2002-2007 Sam Leffler, Errno Consulting + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any + * redistribution must be conditioned upon including a substantially + * similar Disclaimer requirement for further binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY + * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, + * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGES. + * + */ + +/* + * Definitions for the Atheros Wireless LAN controller driver. + */ +#ifndef _DEV_ATH_ATHVAR_H +#define _DEV_ATH_ATHVAR_H + +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)) +#include +#else +#include +#endif + +#include "ath5k.h" +#include "debug.h" + +#include "../regd.h" +#include "../ath.h" + +#define ATH_RXBUF 40 /* number of RX buffers */ +#define ATH_TXBUF 200 /* number of TX buffers */ +#define ATH_BCBUF 1 /* number of beacon buffers */ + +struct ath5k_buf { + struct list_head list; + struct ath5k_desc *desc; /* virtual addr of desc */ + dma_addr_t daddr; /* physical addr of desc */ + struct sk_buff *skb; /* skbuff for buf */ + dma_addr_t skbaddr;/* physical addr of skb data */ +}; + +/* + * Data transmit queue state. One of these exists for each + * hardware transmit queue. Packets sent to us from above + * are assigned to queues based on their priority. Not all + * devices support a complete set of hardware transmit queues. + * For those devices the array sc_ac2q will map multiple + * priorities to fewer hardware queues (typically all to one + * hardware queue). + */ +struct ath5k_txq { + unsigned int qnum; /* hardware q number */ + u32 *link; /* link ptr in last TX desc */ + struct list_head q; /* transmit queue */ + spinlock_t lock; /* lock on q and link */ + bool setup; +}; + +#define ATH5K_LED_MAX_NAME_LEN 31 + +/* + * State for LED triggers + */ +struct ath5k_led +{ + char name[ATH5K_LED_MAX_NAME_LEN + 1]; /* name of the LED in sysfs */ + struct ath5k_softc *sc; /* driver state */ + struct led_classdev led_dev; /* led classdev */ +}; + +/* Rfkill */ +struct ath5k_rfkill { + /* GPIO PIN for rfkill */ + u16 gpio; + /* polarity of rfkill GPIO PIN */ + bool polarity; + /* RFKILL toggle tasklet */ + struct tasklet_struct toggleq; +}; + +#if CHAN_DEBUG +#define ATH_CHAN_MAX (26+26+26+200+200) +#else +#define ATH_CHAN_MAX (14+14+14+252+20) +#endif + +/* Software Carrier, keeps track of the driver state + * associated with an instance of a device */ +struct ath5k_softc { + struct pci_dev *pdev; /* for dma mapping */ + void __iomem *iobase; /* address of the device */ + struct mutex lock; /* dev-level lock */ + struct ieee80211_low_level_stats ll_stats; + struct ieee80211_hw *hw; /* IEEE 802.11 common */ + struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; + struct ieee80211_channel channels[ATH_CHAN_MAX]; + struct ieee80211_rate rates[IEEE80211_NUM_BANDS][AR5K_MAX_RATES]; + s8 rate_idx[IEEE80211_NUM_BANDS][AR5K_MAX_RATES]; + enum nl80211_iftype opmode; + struct ath5k_hw *ah; /* Atheros HW */ + + struct ieee80211_supported_band *curband; + +#ifdef CONFIG_ATH5K_DEBUG + struct ath5k_dbg_info debug; /* debug info */ +#endif /* CONFIG_ATH5K_DEBUG */ + + struct ath5k_buf *bufptr; /* allocated buffer ptr */ + struct ath5k_desc *desc; /* TX/RX descriptors */ + dma_addr_t desc_daddr; /* DMA (physical) address */ + size_t desc_len; /* size of TX/RX descriptors */ + + DECLARE_BITMAP(status, 5); +#define ATH_STAT_INVALID 0 /* disable hardware accesses */ +#define ATH_STAT_MRRETRY 1 /* multi-rate retry support */ +#define ATH_STAT_PROMISC 2 +#define ATH_STAT_LEDSOFT 3 /* enable LED gpio status */ +#define ATH_STAT_STARTED 4 /* opened & irqs enabled */ + + unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */ + unsigned int curmode; /* current phy mode */ + struct ieee80211_channel *curchan; /* current h/w channel */ + + struct ieee80211_vif *vif; + + enum ath5k_int imask; /* interrupt mask copy */ + + u8 bssidmask[ETH_ALEN]; + + unsigned int led_pin, /* GPIO pin for driving LED */ + led_on; /* pin setting for LED on */ + + struct tasklet_struct restq; /* reset tasklet */ + + unsigned int rxbufsize; /* rx size based on mtu */ + struct list_head rxbuf; /* receive buffer */ + spinlock_t rxbuflock; + u32 *rxlink; /* link ptr in last RX desc */ + struct tasklet_struct rxtq; /* rx intr tasklet */ + struct ath5k_led rx_led; /* rx led */ + + struct list_head txbuf; /* transmit buffer */ + spinlock_t txbuflock; + unsigned int txbuf_len; /* buf count in txbuf list */ + struct ath5k_txq txqs[AR5K_NUM_TX_QUEUES]; /* tx queues */ + struct ath5k_txq *txq; /* main tx queue */ + struct tasklet_struct txtq; /* tx intr tasklet */ + struct ath5k_led tx_led; /* tx led */ + + struct ath5k_rfkill rf_kill; + + struct tasklet_struct calib; /* calibration tasklet */ + + spinlock_t block; /* protects beacon */ + struct tasklet_struct beacontq; /* beacon intr tasklet */ + struct ath5k_buf *bbuf; /* beacon buffer */ + unsigned int bhalq, /* SW q for outgoing beacons */ + bmisscount, /* missed beacon transmits */ + bintval, /* beacon interval in TU */ + bsent; + unsigned int nexttbtt; /* next beacon time in TU */ + struct ath5k_txq *cabq; /* content after beacon */ + + int power_level; /* Requested tx power in dbm */ + bool assoc; /* associate state */ + bool enable_beacon; /* true if beacons are on */ +}; + +#define ath5k_hw_hasbssidmask(_ah) \ + (ath5k_hw_get_capability(_ah, AR5K_CAP_BSSIDMASK, 0, NULL) == 0) +#define ath5k_hw_hasveol(_ah) \ + (ath5k_hw_get_capability(_ah, AR5K_CAP_VEOL, 0, NULL) == 0) + +#endif diff --git a/drivers/net/wireless/ath/ath5k/caps.c b/drivers/net/wireless/ath/ath5k/caps.c new file mode 100644 index 0000000..367a6c7 --- /dev/null +++ b/drivers/net/wireless/ath/ath5k/caps.c @@ -0,0 +1,195 @@ +/* + * Copyright (c) 2004-2008 Reyk Floeter + * Copyright (c) 2006-2008 Nick Kossifidis + * Copyright (c) 2007-2008 Jiri Slaby + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +/**************\ +* Capabilities * +\**************/ + +#include "ath5k.h" +#include "reg.h" +#include "debug.h" +#include "base.h" + +/* + * Fill the capabilities struct + * TODO: Merge this with EEPROM code when we are done with it + */ +int ath5k_hw_set_capabilities(struct ath5k_hw *ah) +{ + u16 ee_header; + + ATH5K_TRACE(ah->ah_sc); + /* Capabilities stored in the EEPROM */ + ee_header = ah->ah_capabilities.cap_eeprom.ee_header; + + if (ah->ah_version == AR5K_AR5210) { + /* + * Set radio capabilities + * (The AR5110 only supports the middle 5GHz band) + */ + ah->ah_capabilities.cap_range.range_5ghz_min = 5120; + ah->ah_capabilities.cap_range.range_5ghz_max = 5430; + ah->ah_capabilities.cap_range.range_2ghz_min = 0; + ah->ah_capabilities.cap_range.range_2ghz_max = 0; + + /* Set supported modes */ + __set_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode); + __set_bit(AR5K_MODE_11A_TURBO, ah->ah_capabilities.cap_mode); + } else { + /* + * XXX The tranceiver supports frequencies from 4920 to 6100GHz + * XXX and from 2312 to 2732GHz. There are problems with the + * XXX current ieee80211 implementation because the IEEE + * XXX channel mapping does not support negative channel + * XXX numbers (2312MHz is channel -19). Of course, this + * XXX doesn't matter because these channels are out of range + * XXX but some regulation domains like MKK (Japan) will + * XXX support frequencies somewhere around 4.8GHz. + */ + + /* + * Set radio capabilities + */ + + if (AR5K_EEPROM_HDR_11A(ee_header)) { + /* 4920 */ + ah->ah_capabilities.cap_range.range_5ghz_min = 5005; + ah->ah_capabilities.cap_range.range_5ghz_max = 6100; + + /* Set supported modes */ + __set_bit(AR5K_MODE_11A, + ah->ah_capabilities.cap_mode); + __set_bit(AR5K_MODE_11A_TURBO, + ah->ah_capabilities.cap_mode); + if (ah->ah_version == AR5K_AR5212) + __set_bit(AR5K_MODE_11G_TURBO, + ah->ah_capabilities.cap_mode); + } + + /* Enable 802.11b if a 2GHz capable radio (2111/5112) is + * connected */ + if (AR5K_EEPROM_HDR_11B(ee_header) || + (AR5K_EEPROM_HDR_11G(ee_header) && + ah->ah_version != AR5K_AR5211)) { + /* 2312 */ + ah->ah_capabilities.cap_range.range_2ghz_min = 2412; + ah->ah_capabilities.cap_range.range_2ghz_max = 2732; + + if (AR5K_EEPROM_HDR_11B(ee_header)) + __set_bit(AR5K_MODE_11B, + ah->ah_capabilities.cap_mode); + + if (AR5K_EEPROM_HDR_11G(ee_header) && + ah->ah_version != AR5K_AR5211) + __set_bit(AR5K_MODE_11G, + ah->ah_capabilities.cap_mode); + } + } + + /* GPIO */ + ah->ah_gpio_npins = AR5K_NUM_GPIO; + + /* Set number of supported TX queues */ + if (ah->ah_version == AR5K_AR5210) + ah->ah_capabilities.cap_queues.q_tx_num = + AR5K_NUM_TX_QUEUES_NOQCU; + else + ah->ah_capabilities.cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES; + + return 0; +} + +/* Main function used by the driver part to check caps */ +int ath5k_hw_get_capability(struct ath5k_hw *ah, + enum ath5k_capability_type cap_type, + u32 capability, u32 *result) +{ + ATH5K_TRACE(ah->ah_sc); + + switch (cap_type) { + case AR5K_CAP_NUM_TXQUEUES: + if (result) { + if (ah->ah_version == AR5K_AR5210) + *result = AR5K_NUM_TX_QUEUES_NOQCU; + else + *result = AR5K_NUM_TX_QUEUES; + goto yes; + } + case AR5K_CAP_VEOL: + goto yes; + case AR5K_CAP_COMPRESSION: + if (ah->ah_version == AR5K_AR5212) + goto yes; + else + goto no; + case AR5K_CAP_BURST: + goto yes; + case AR5K_CAP_TPC: + goto yes; + case AR5K_CAP_BSSIDMASK: + if (ah->ah_version == AR5K_AR5212) + goto yes; + else + goto no; + case AR5K_CAP_XR: + if (ah->ah_version == AR5K_AR5212) + goto yes; + else + goto no; + default: + goto no; + } + +no: + return -EINVAL; +yes: + return 0; +} + +/* + * TODO: Following functions should be part of a new function + * set_capability + */ + +int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid, + u16 assoc_id) +{ + ATH5K_TRACE(ah->ah_sc); + + if (ah->ah_version == AR5K_AR5210) { + AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, + AR5K_STA_ID1_NO_PSPOLL | AR5K_STA_ID1_DEFAULT_ANTENNA); + return 0; + } + + return -EIO; +} + +int ath5k_hw_disable_pspoll(struct ath5k_hw *ah) +{ + ATH5K_TRACE(ah->ah_sc); + + if (ah->ah_version == AR5K_AR5210) { + AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, + AR5K_STA_ID1_NO_PSPOLL | AR5K_STA_ID1_DEFAULT_ANTENNA); + return 0; + } + + return -EIO; +} diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c new file mode 100644 index 0000000..747508c --- /dev/null +++ b/drivers/net/wireless/ath/ath5k/debug.c @@ -0,0 +1,540 @@ +/* + * Copyright (c) 2007-2008 Bruno Randolf + * + * This file is free software: you may copy, redistribute and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation, either version 2 of the License, or (at your + * option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * + * This file incorporates work covered by the following copyright and + * permission notice: + * + * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting + * Copyright (c) 2004-2005 Atheros Communications, Inc. + * Copyright (c) 2006 Devicescape Software, Inc. + * Copyright (c) 2007 Jiri Slaby + * Copyright (c) 2007 Luis R. Rodriguez + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any + * redistribution must be conditioned upon including a substantially + * similar Disclaimer requirement for further binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY + * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, + * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGES. + */ + +#include "base.h" +#include "debug.h" + +static unsigned int ath5k_debug; +module_param_named(debug, ath5k_debug, uint, 0); + + +#ifdef CONFIG_ATH5K_DEBUG + +#include +#include "reg.h" + +static struct dentry *ath5k_global_debugfs; + +static int ath5k_debugfs_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + + +/* debugfs: registers */ + +struct reg { + const char *name; + int addr; +}; + +#define REG_STRUCT_INIT(r) { #r, r } + +/* just a few random registers, might want to add more */ +static const struct reg regs[] = { + REG_STRUCT_INIT(AR5K_CR), + REG_STRUCT_INIT(AR5K_RXDP), + REG_STRUCT_INIT(AR5K_CFG), + REG_STRUCT_INIT(AR5K_IER), + REG_STRUCT_INIT(AR5K_BCR), + REG_STRUCT_INIT(AR5K_RTSD0), + REG_STRUCT_INIT(AR5K_RTSD1), + REG_STRUCT_INIT(AR5K_TXCFG), + REG_STRUCT_INIT(AR5K_RXCFG), + REG_STRUCT_INIT(AR5K_RXJLA), + REG_STRUCT_INIT(AR5K_MIBC), + REG_STRUCT_INIT(AR5K_TOPS), + REG_STRUCT_INIT(AR5K_RXNOFRM), + REG_STRUCT_INIT(AR5K_TXNOFRM), + REG_STRUCT_INIT(AR5K_RPGTO), + REG_STRUCT_INIT(AR5K_RFCNT), + REG_STRUCT_INIT(AR5K_MISC), + REG_STRUCT_INIT(AR5K_QCUDCU_CLKGT), + REG_STRUCT_INIT(AR5K_ISR), + REG_STRUCT_INIT(AR5K_PISR), + REG_STRUCT_INIT(AR5K_SISR0), + REG_STRUCT_INIT(AR5K_SISR1), + REG_STRUCT_INIT(AR5K_SISR2), + REG_STRUCT_INIT(AR5K_SISR3), + REG_STRUCT_INIT(AR5K_SISR4), + REG_STRUCT_INIT(AR5K_IMR), + REG_STRUCT_INIT(AR5K_PIMR), + REG_STRUCT_INIT(AR5K_SIMR0), + REG_STRUCT_INIT(AR5K_SIMR1), + REG_STRUCT_INIT(AR5K_SIMR2), + REG_STRUCT_INIT(AR5K_SIMR3), + REG_STRUCT_INIT(AR5K_SIMR4), + REG_STRUCT_INIT(AR5K_DCM_ADDR), + REG_STRUCT_INIT(AR5K_DCCFG), + REG_STRUCT_INIT(AR5K_CCFG), + REG_STRUCT_INIT(AR5K_CPC0), + REG_STRUCT_INIT(AR5K_CPC1), + REG_STRUCT_INIT(AR5K_CPC2), + REG_STRUCT_INIT(AR5K_CPC3), + REG_STRUCT_INIT(AR5K_CPCOVF), + REG_STRUCT_INIT(AR5K_RESET_CTL), + REG_STRUCT_INIT(AR5K_SLEEP_CTL), + REG_STRUCT_INIT(AR5K_INTPEND), + REG_STRUCT_INIT(AR5K_SFR), + REG_STRUCT_INIT(AR5K_PCICFG), + REG_STRUCT_INIT(AR5K_GPIOCR), + REG_STRUCT_INIT(AR5K_GPIODO), + REG_STRUCT_INIT(AR5K_SREV), +}; + +static void *reg_start(struct seq_file *seq, loff_t *pos) +{ + return *pos < ARRAY_SIZE(regs) ? (void *)®s[*pos] : NULL; +} + +static void reg_stop(struct seq_file *seq, void *p) +{ + /* nothing to do */ +} + +static void *reg_next(struct seq_file *seq, void *p, loff_t *pos) +{ + ++*pos; + return *pos < ARRAY_SIZE(regs) ? (void *)®s[*pos] : NULL; +} + +static int reg_show(struct seq_file *seq, void *p) +{ + struct ath5k_softc *sc = seq->private; + struct reg *r = p; + seq_printf(seq, "%-25s0x%08x\n", r->name, + ath5k_hw_reg_read(sc->ah, r->addr)); + return 0; +} + +static const struct seq_operations register_seq_ops = { + .start = reg_start, + .next = reg_next, + .stop = reg_stop, + .show = reg_show +}; + +static int open_file_registers(struct inode *inode, struct file *file) +{ + struct seq_file *s; + int res; + res = seq_open(file, ®ister_seq_ops); + if (res == 0) { + s = file->private_data; + s->private = inode->i_private; + } + return res; +} + +static const struct file_operations fops_registers = { + .open = open_file_registers, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, + .owner = THIS_MODULE, +}; + + +/* debugfs: beacons */ + +static ssize_t read_file_beacon(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath5k_softc *sc = file->private_data; + struct ath5k_hw *ah = sc->ah; + char buf[500]; + unsigned int len = 0; + unsigned int v; + u64 tsf; + + v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON); + len += snprintf(buf+len, sizeof(buf)-len, + "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n", + "AR5K_BEACON", v, v & AR5K_BEACON_PERIOD, + (v & AR5K_BEACON_TIM) >> AR5K_BEACON_TIM_S); + + len += snprintf(buf+len, sizeof(buf)-len, "%-24s0x%08x\n", + "AR5K_LAST_TSTP", ath5k_hw_reg_read(sc->ah, AR5K_LAST_TSTP)); + + len += snprintf(buf+len, sizeof(buf)-len, "%-24s0x%08x\n\n", + "AR5K_BEACON_CNT", ath5k_hw_reg_read(sc->ah, AR5K_BEACON_CNT)); + + v = ath5k_hw_reg_read(sc->ah, AR5K_TIMER0); + len += snprintf(buf+len, sizeof(buf)-len, "%-24s0x%08x\tTU: %08x\n", + "AR5K_TIMER0 (TBTT)", v, v); + + v = ath5k_hw_reg_read(sc->ah, AR5K_TIMER1); + len += snprintf(buf+len, sizeof(buf)-len, "%-24s0x%08x\tTU: %08x\n", + "AR5K_TIMER1 (DMA)", v, v >> 3); + + v = ath5k_hw_reg_read(sc->ah, AR5K_TIMER2); + len += snprintf(buf+len, sizeof(buf)-len, "%-24s0x%08x\tTU: %08x\n", + "AR5K_TIMER2 (SWBA)", v, v >> 3); + + v = ath5k_hw_reg_read(sc->ah, AR5K_TIMER3); + len += snprintf(buf+len, sizeof(buf)-len, "%-24s0x%08x\tTU: %08x\n", + "AR5K_TIMER3 (ATIM)", v, v); + + tsf = ath5k_hw_get_tsf64(sc->ah); + len += snprintf(buf+len, sizeof(buf)-len, + "TSF\t\t0x%016llx\tTU: %08x\n", + (unsigned long long)tsf, TSF_TO_TU(tsf)); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t write_file_beacon(struct file *file, + const char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct ath5k_softc *sc = file->private_data; + struct ath5k_hw *ah = sc->ah; + char buf[20]; + + if (copy_from_user(buf, userbuf, min(count, sizeof(buf)))) + return -EFAULT; + + if (strncmp(buf, "disable", 7) == 0) { + AR5K_REG_DISABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE); + printk(KERN_INFO "debugfs disable beacons\n"); + } else if (strncmp(buf, "enable", 6) == 0) { + AR5K_REG_ENABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE); + printk(KERN_INFO "debugfs enable beacons\n"); + } + return count; +} + +static const struct file_operations fops_beacon = { + .read = read_file_beacon, + .write = write_file_beacon, + .open = ath5k_debugfs_open, + .owner = THIS_MODULE, +}; + + +/* debugfs: reset */ + +static ssize_t write_file_reset(struct file *file, + const char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct ath5k_softc *sc = file->private_data; + tasklet_schedule(&sc->restq); + return count; +} + +static const struct file_operations fops_reset = { + .write = write_file_reset, + .open = ath5k_debugfs_open, + .owner = THIS_MODULE, +}; + + +/* debugfs: debug level */ + +static const struct { + enum ath5k_debug_level level; + const char *name; + const char *desc; +} dbg_info[] = { + { ATH5K_DEBUG_RESET, "reset", "reset and initialization" }, + { ATH5K_DEBUG_INTR, "intr", "interrupt handling" }, + { ATH5K_DEBUG_MODE, "mode", "mode init/setup" }, + { ATH5K_DEBUG_XMIT, "xmit", "basic xmit operation" }, + { ATH5K_DEBUG_BEACON, "beacon", "beacon handling" }, + { ATH5K_DEBUG_CALIBRATE, "calib", "periodic calibration" }, + { ATH5K_DEBUG_TXPOWER, "txpower", "transmit power setting" }, + { ATH5K_DEBUG_LED, "led", "LED management" }, + { ATH5K_DEBUG_DUMP_RX, "dumprx", "print received skb content" }, + { ATH5K_DEBUG_DUMP_TX, "dumptx", "print transmit skb content" }, + { ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" }, + { ATH5K_DEBUG_TRACE, "trace", "trace function calls" }, + { ATH5K_DEBUG_ANY, "all", "show all debug levels" }, +}; + +static ssize_t read_file_debug(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath5k_softc *sc = file->private_data; + char buf[700]; + unsigned int len = 0; + unsigned int i; + + len += snprintf(buf+len, sizeof(buf)-len, + "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level); + + for (i = 0; i < ARRAY_SIZE(dbg_info) - 1; i++) { + len += snprintf(buf+len, sizeof(buf)-len, + "%10s %c 0x%08x - %s\n", dbg_info[i].name, + sc->debug.level & dbg_info[i].level ? '+' : ' ', + dbg_info[i].level, dbg_info[i].desc); + } + len += snprintf(buf+len, sizeof(buf)-len, + "%10s %c 0x%08x - %s\n", dbg_info[i].name, + sc->debug.level == dbg_info[i].level ? '+' : ' ', + dbg_info[i].level, dbg_info[i].desc); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t write_file_debug(struct file *file, + const char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct ath5k_softc *sc = file->private_data; + unsigned int i; + char buf[20]; + + if (copy_from_user(buf, userbuf, min(count, sizeof(buf)))) + return -EFAULT; + + for (i = 0; i < ARRAY_SIZE(dbg_info); i++) { + if (strncmp(buf, dbg_info[i].name, + strlen(dbg_info[i].name)) == 0) { + sc->debug.level ^= dbg_info[i].level; /* toggle bit */ + break; + } + } + return count; +} + +static const struct file_operations fops_debug = { + .read = read_file_debug, + .write = write_file_debug, + .open = ath5k_debugfs_open, + .owner = THIS_MODULE, +}; + + +/* init */ + +void +ath5k_debug_init(void) +{ + ath5k_global_debugfs = debugfs_create_dir("ath5k", NULL); +} + +void +ath5k_debug_init_device(struct ath5k_softc *sc) +{ + sc->debug.level = ath5k_debug; + + sc->debug.debugfs_phydir = debugfs_create_dir(wiphy_name(sc->hw->wiphy), + ath5k_global_debugfs); + + sc->debug.debugfs_debug = debugfs_create_file("debug", + S_IWUSR | S_IRUSR, + sc->debug.debugfs_phydir, sc, &fops_debug); + + sc->debug.debugfs_registers = debugfs_create_file("registers", S_IRUSR, + sc->debug.debugfs_phydir, sc, &fops_registers); + + sc->debug.debugfs_beacon = debugfs_create_file("beacon", + S_IWUSR | S_IRUSR, + sc->debug.debugfs_phydir, sc, &fops_beacon); + + sc->debug.debugfs_reset = debugfs_create_file("reset", S_IWUSR, + sc->debug.debugfs_phydir, sc, &fops_reset); +} + +void +ath5k_debug_finish(void) +{ + debugfs_remove(ath5k_global_debugfs); +} + +void +ath5k_debug_finish_device(struct ath5k_softc *sc) +{ + debugfs_remove(sc->debug.debugfs_debug); + debugfs_remove(sc->debug.debugfs_registers); + debugfs_remove(sc->debug.debugfs_beacon); + debugfs_remove(sc->debug.debugfs_reset); + debugfs_remove(sc->debug.debugfs_phydir); +} + + +/* functions used in other places */ + +void +ath5k_debug_dump_bands(struct ath5k_softc *sc) +{ + unsigned int b, i; + + if (likely(!(sc->debug.level & ATH5K_DEBUG_DUMPBANDS))) + return; + + BUG_ON(!sc->sbands); + + for (b = 0; b < IEEE80211_NUM_BANDS; b++) { + struct ieee80211_supported_band *band = &sc->sbands[b]; + char bname[6]; + switch (band->band) { + case IEEE80211_BAND_2GHZ: + strcpy(bname, "2 GHz"); + break; + case IEEE80211_BAND_5GHZ: + strcpy(bname, "5 GHz"); + break; + default: + printk(KERN_DEBUG "Band not supported: %d\n", + band->band); + return; + } + printk(KERN_DEBUG "Band %s: channels %d, rates %d\n", bname, + band->n_channels, band->n_bitrates); + printk(KERN_DEBUG " channels:\n"); + for (i = 0; i < band->n_channels; i++) + printk(KERN_DEBUG " %3d %d %.4x %.4x\n", + ieee80211_frequency_to_channel( + band->channels[i].center_freq), + band->channels[i].center_freq, + band->channels[i].hw_value, + band->channels[i].flags); + printk(KERN_DEBUG " rates:\n"); + for (i = 0; i < band->n_bitrates; i++) + printk(KERN_DEBUG " %4d %.4x %.4x %.4x\n", + band->bitrates[i].bitrate, + band->bitrates[i].hw_value, + band->bitrates[i].flags, + band->bitrates[i].hw_value_short); + } +} + +static inline void +ath5k_debug_printrxbuf(struct ath5k_buf *bf, int done, + struct ath5k_rx_status *rs) +{ + struct ath5k_desc *ds = bf->desc; + struct ath5k_hw_all_rx_desc *rd = &ds->ud.ds_rx; + + printk(KERN_DEBUG "R (%p %llx) %08x %08x %08x %08x %08x %08x %c\n", + ds, (unsigned long long)bf->daddr, + ds->ds_link, ds->ds_data, + rd->rx_ctl.rx_control_0, rd->rx_ctl.rx_control_1, + rd->u.rx_stat.rx_status_0, rd->u.rx_stat.rx_status_0, + !done ? ' ' : (rs->rs_status == 0) ? '*' : '!'); +} + +void +ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah) +{ + struct ath5k_desc *ds; + struct ath5k_buf *bf; + struct ath5k_rx_status rs = {}; + int status; + + if (likely(!(sc->debug.level & ATH5K_DEBUG_RESET))) + return; + + printk(KERN_DEBUG "rx queue %x, link %p\n", + ath5k_hw_get_rxdp(ah), sc->rxlink); + + spin_lock_bh(&sc->rxbuflock); + list_for_each_entry(bf, &sc->rxbuf, list) { + ds = bf->desc; + status = ah->ah_proc_rx_desc(ah, ds, &rs); + if (!status) + ath5k_debug_printrxbuf(bf, status == 0, &rs); + } + spin_unlock_bh(&sc->rxbuflock); +} + +void +ath5k_debug_dump_skb(struct ath5k_softc *sc, + struct sk_buff *skb, const char *prefix, int tx) +{ + char buf[16]; + + if (likely(!((tx && (sc->debug.level & ATH5K_DEBUG_DUMP_TX)) || + (!tx && (sc->debug.level & ATH5K_DEBUG_DUMP_RX))))) + return; + + snprintf(buf, sizeof(buf), "%s %s", wiphy_name(sc->hw->wiphy), prefix); + + print_hex_dump_bytes(buf, DUMP_PREFIX_NONE, skb->data, + min(200U, skb->len)); + + printk(KERN_DEBUG "\n"); +} + +void +ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf) +{ + struct ath5k_desc *ds = bf->desc; + struct ath5k_hw_5212_tx_desc *td = &ds->ud.ds_tx5212; + struct ath5k_tx_status ts = {}; + int done; + + if (likely(!(sc->debug.level & ATH5K_DEBUG_RESET))) + return; + + done = sc->ah->ah_proc_tx_desc(sc->ah, bf->desc, &ts); + + printk(KERN_DEBUG "T (%p %llx) %08x %08x %08x %08x %08x %08x %08x " + "%08x %c\n", ds, (unsigned long long)bf->daddr, ds->ds_link, + ds->ds_data, td->tx_ctl.tx_control_0, td->tx_ctl.tx_control_1, + td->tx_ctl.tx_control_2, td->tx_ctl.tx_control_3, + td->tx_stat.tx_status_0, td->tx_stat.tx_status_1, + done ? ' ' : (ts.ts_status == 0) ? '*' : '!'); +} + +#endif /* ifdef CONFIG_ATH5K_DEBUG */ diff --git a/drivers/net/wireless/ath/ath5k/debug.h b/drivers/net/wireless/ath/ath5k/debug.h new file mode 100644 index 0000000..66f69f0 --- /dev/null +++ b/drivers/net/wireless/ath/ath5k/debug.h @@ -0,0 +1,203 @@ +/* + * Copyright (c) 2007 Bruno Randolf + * + * This file is free software: you may copy, redistribute and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation, either version 2 of the License, or (at your + * option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * + * This file incorporates work covered by the following copyright and + * permission notice: + * + * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting + * Copyright (c) 2004-2005 Atheros Communications, Inc. + * Copyright (c) 2006 Devicescape Software, Inc. + * Copyright (c) 2007 Jiri Slaby + * Copyright (c) 2007 Luis R. Rodriguez + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any + * redistribution must be conditioned upon including a substantially + * similar Disclaimer requirement for further binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY + * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, + * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGES. + */ + +#ifndef _ATH5K_DEBUG_H +#define _ATH5K_DEBUG_H + +struct ath5k_softc; +struct ath5k_hw; +struct sk_buff; +struct ath5k_buf; + +struct ath5k_dbg_info { + unsigned int level; /* debug level */ + /* debugfs entries */ + struct dentry *debugfs_phydir; + struct dentry *debugfs_debug; + struct dentry *debugfs_registers; + struct dentry *debugfs_beacon; + struct dentry *debugfs_reset; +}; + +/** + * enum ath5k_debug_level - ath5k debug level + * + * @ATH5K_DEBUG_RESET: reset processing + * @ATH5K_DEBUG_INTR: interrupt handling + * @ATH5K_DEBUG_MODE: mode init/setup + * @ATH5K_DEBUG_XMIT: basic xmit operation + * @ATH5K_DEBUG_BEACON: beacon handling + * @ATH5K_DEBUG_CALIBRATE: periodic calibration + * @ATH5K_DEBUG_TXPOWER: transmit power setting + * @ATH5K_DEBUG_LED: led management + * @ATH5K_DEBUG_DUMP_RX: print received skb content + * @ATH5K_DEBUG_DUMP_TX: print transmit skb content + * @ATH5K_DEBUG_DUMPBANDS: dump bands + * @ATH5K_DEBUG_TRACE: trace function calls + * @ATH5K_DEBUG_ANY: show at any debug level + * + * The debug level is used to control the amount and type of debugging output + * we want to see. The debug level is given in calls to ATH5K_DBG to specify + * where the message should appear, and the user can control the debugging + * messages he wants to see, either by the module parameter 'debug' on module + * load, or dynamically by using debugfs 'ath5k/phyX/debug'. these levels can + * be combined together by bitwise OR. + */ +enum ath5k_debug_level { + ATH5K_DEBUG_RESET = 0x00000001, + ATH5K_DEBUG_INTR = 0x00000002, + ATH5K_DEBUG_MODE = 0x00000004, + ATH5K_DEBUG_XMIT = 0x00000008, + ATH5K_DEBUG_BEACON = 0x00000010, + ATH5K_DEBUG_CALIBRATE = 0x00000020, + ATH5K_DEBUG_TXPOWER = 0x00000040, + ATH5K_DEBUG_LED = 0x00000080, + ATH5K_DEBUG_DUMP_RX = 0x00000100, + ATH5K_DEBUG_DUMP_TX = 0x00000200, + ATH5K_DEBUG_DUMPBANDS = 0x00000400, + ATH5K_DEBUG_TRACE = 0x00001000, + ATH5K_DEBUG_ANY = 0xffffffff +}; + +#ifdef CONFIG_ATH5K_DEBUG + +#define ATH5K_TRACE(_sc) do { \ + if (unlikely((_sc)->debug.level & ATH5K_DEBUG_TRACE)) \ + printk(KERN_DEBUG "ath5k trace %s:%d\n", __func__, __LINE__); \ + } while (0) + +#define ATH5K_DBG(_sc, _m, _fmt, ...) do { \ + if (unlikely((_sc)->debug.level & (_m) && net_ratelimit())) \ + ATH5K_PRINTK(_sc, KERN_DEBUG, "(%s:%d): " _fmt, \ + __func__, __LINE__, ##__VA_ARGS__); \ + } while (0) + +#define ATH5K_DBG_UNLIMIT(_sc, _m, _fmt, ...) do { \ + if (unlikely((_sc)->debug.level & (_m))) \ + ATH5K_PRINTK(_sc, KERN_DEBUG, "(%s:%d): " _fmt, \ + __func__, __LINE__, ##__VA_ARGS__); \ + } while (0) + +void +ath5k_debug_init(void); + +void +ath5k_debug_init_device(struct ath5k_softc *sc); + +void +ath5k_debug_finish(void); + +void +ath5k_debug_finish_device(struct ath5k_softc *sc); + +void +ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah); + +void +ath5k_debug_dump_bands(struct ath5k_softc *sc); + +void +ath5k_debug_dump_skb(struct ath5k_softc *sc, + struct sk_buff *skb, const char *prefix, int tx); + +void +ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf); + +#else /* no debugging */ + +#include + +#define ATH5K_TRACE(_sc) typecheck(struct ath5k_softc *, (_sc)) + +static inline void __attribute__ ((format (printf, 3, 4))) +ATH5K_DBG(struct ath5k_softc *sc, unsigned int m, const char *fmt, ...) {} + +static inline void __attribute__ ((format (printf, 3, 4))) +ATH5K_DBG_UNLIMIT(struct ath5k_softc *sc, unsigned int m, const char *fmt, ...) +{} + +static inline void +ath5k_debug_init(void) {} + +static inline void +ath5k_debug_init_device(struct ath5k_softc *sc) {} + +static inline void +ath5k_debug_finish(void) {} + +static inline void +ath5k_debug_finish_device(struct ath5k_softc *sc) {} + +static inline void +ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah) {} + +static inline void +ath5k_debug_dump_bands(struct ath5k_softc *sc) {} + +static inline void +ath5k_debug_dump_skb(struct ath5k_softc *sc, + struct sk_buff *skb, const char *prefix, int tx) {} + +static inline void +ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf) {} + +#endif /* ifdef CONFIG_ATH5K_DEBUG */ + +#endif /* ifndef _ATH5K_DEBUG_H */ diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c new file mode 100644 index 0000000..dc30a2b --- /dev/null +++ b/drivers/net/wireless/ath/ath5k/desc.c @@ -0,0 +1,696 @@ +/* + * Copyright (c) 2004-2008 Reyk Floeter + * Copyright (c) 2006-2008 Nick Kossifidis + * Copyright (c) 2007-2008 Pavel Roskin + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +/******************************\ + Hardware Descriptor Functions +\******************************/ + +#include "ath5k.h" +#include "reg.h" +#include "debug.h" +#include "base.h" + +/* + * TX Descriptors + */ + +/* + * Initialize the 2-word tx control descriptor on 5210/5211 + */ +static int +ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, + unsigned int pkt_len, unsigned int hdr_len, enum ath5k_pkt_type type, + unsigned int tx_power, unsigned int tx_rate0, unsigned int tx_tries0, + unsigned int key_index, unsigned int antenna_mode, unsigned int flags, + unsigned int rtscts_rate, unsigned int rtscts_duration) +{ + u32 frame_type; + struct ath5k_hw_2w_tx_ctl *tx_ctl; + unsigned int frame_len; + + tx_ctl = &desc->ud.ds_tx5210.tx_ctl; + + /* + * Validate input + * - Zero retries don't make sense. + * - A zero rate will put the HW into a mode where it continously sends + * noise on the channel, so it is important to avoid this. + */ + if (unlikely(tx_tries0 == 0)) { + ATH5K_ERR(ah->ah_sc, "zero retries\n"); + WARN_ON(1); + return -EINVAL; + } + if (unlikely(tx_rate0 == 0)) { + ATH5K_ERR(ah->ah_sc, "zero rate\n"); + WARN_ON(1); + return -EINVAL; + } + + /* Clear descriptor */ + memset(&desc->ud.ds_tx5210, 0, sizeof(struct ath5k_hw_5210_tx_desc)); + + /* Setup control descriptor */ + + /* Verify and set frame length */ + + /* remove padding we might have added before */ + frame_len = pkt_len - ath5k_pad_size(hdr_len) + FCS_LEN; + + if (frame_len & ~AR5K_2W_TX_DESC_CTL0_FRAME_LEN) + return -EINVAL; + + tx_ctl->tx_control_0 = frame_len & AR5K_2W_TX_DESC_CTL0_FRAME_LEN; + + /* Verify and set buffer length */ + + /* NB: beacon's BufLen must be a multiple of 4 bytes */ + if (type == AR5K_PKT_TYPE_BEACON) + pkt_len = roundup(pkt_len, 4); + + if (pkt_len & ~AR5K_2W_TX_DESC_CTL1_BUF_LEN) + return -EINVAL; + + tx_ctl->tx_control_1 = pkt_len & AR5K_2W_TX_DESC_CTL1_BUF_LEN; + + /* + * Verify and set header length + * XXX: I only found that on 5210 code, does it work on 5211 ? + */ + if (ah->ah_version == AR5K_AR5210) { + if (hdr_len & ~AR5K_2W_TX_DESC_CTL0_HEADER_LEN) + return -EINVAL; + tx_ctl->tx_control_0 |= + AR5K_REG_SM(hdr_len, AR5K_2W_TX_DESC_CTL0_HEADER_LEN); + } + + /*Diferences between 5210-5211*/ + if (ah->ah_version == AR5K_AR5210) { + switch (type) { + case AR5K_PKT_TYPE_BEACON: + case AR5K_PKT_TYPE_PROBE_RESP: + frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_NO_DELAY; + case AR5K_PKT_TYPE_PIFS: + frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS; + default: + frame_type = type /*<< 2 ?*/; + } + + tx_ctl->tx_control_0 |= + AR5K_REG_SM(frame_type, AR5K_2W_TX_DESC_CTL0_FRAME_TYPE) | + AR5K_REG_SM(tx_rate0, AR5K_2W_TX_DESC_CTL0_XMIT_RATE); + + } else { + tx_ctl->tx_control_0 |= + AR5K_REG_SM(tx_rate0, AR5K_2W_TX_DESC_CTL0_XMIT_RATE) | + AR5K_REG_SM(antenna_mode, + AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT); + tx_ctl->tx_control_1 |= + AR5K_REG_SM(type, AR5K_2W_TX_DESC_CTL1_FRAME_TYPE); + } +#define _TX_FLAGS(_c, _flag) \ + if (flags & AR5K_TXDESC_##_flag) { \ + tx_ctl->tx_control_##_c |= \ + AR5K_2W_TX_DESC_CTL##_c##_##_flag; \ + } + + _TX_FLAGS(0, CLRDMASK); + _TX_FLAGS(0, VEOL); + _TX_FLAGS(0, INTREQ); + _TX_FLAGS(0, RTSENA); + _TX_FLAGS(1, NOACK); + +#undef _TX_FLAGS + + /* + * WEP crap + */ + if (key_index != AR5K_TXKEYIX_INVALID) { + tx_ctl->tx_control_0 |= + AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID; + tx_ctl->tx_control_1 |= + AR5K_REG_SM(key_index, + AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX); + } + + /* + * RTS/CTS Duration [5210 ?] + */ + if ((ah->ah_version == AR5K_AR5210) && + (flags & (AR5K_TXDESC_RTSENA | AR5K_TXDESC_CTSENA))) + tx_ctl->tx_control_1 |= rtscts_duration & + AR5K_2W_TX_DESC_CTL1_RTS_DURATION; + + return 0; +} + +/* + * Initialize the 4-word tx control descriptor on 5212 + */ +static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah, + struct ath5k_desc *desc, unsigned int pkt_len, unsigned int hdr_len, + enum ath5k_pkt_type type, unsigned int tx_power, unsigned int tx_rate0, + unsigned int tx_tries0, unsigned int key_index, + unsigned int antenna_mode, unsigned int flags, + unsigned int rtscts_rate, + unsigned int rtscts_duration) +{ + struct ath5k_hw_4w_tx_ctl *tx_ctl; + unsigned int frame_len; + + ATH5K_TRACE(ah->ah_sc); + tx_ctl = &desc->ud.ds_tx5212.tx_ctl; + + /* + * Validate input + * - Zero retries don't make sense. + * - A zero rate will put the HW into a mode where it continously sends + * noise on the channel, so it is important to avoid this. + */ + if (unlikely(tx_tries0 == 0)) { + ATH5K_ERR(ah->ah_sc, "zero retries\n"); + WARN_ON(1); + return -EINVAL; + } + if (unlikely(tx_rate0 == 0)) { + ATH5K_ERR(ah->ah_sc, "zero rate\n"); + WARN_ON(1); + return -EINVAL; + } + + tx_power += ah->ah_txpower.txp_offset; + if (tx_power > AR5K_TUNE_MAX_TXPOWER) + tx_power = AR5K_TUNE_MAX_TXPOWER; + + /* Clear descriptor */ + memset(&desc->ud.ds_tx5212, 0, sizeof(struct ath5k_hw_5212_tx_desc)); + + /* Setup control descriptor */ + + /* Verify and set frame length */ + + /* remove padding we might have added before */ + frame_len = pkt_len - ath5k_pad_size(hdr_len) + FCS_LEN; + + if (frame_len & ~AR5K_4W_TX_DESC_CTL0_FRAME_LEN) + return -EINVAL; + + tx_ctl->tx_control_0 = frame_len & AR5K_4W_TX_DESC_CTL0_FRAME_LEN; + + /* Verify and set buffer length */ + + /* NB: beacon's BufLen must be a multiple of 4 bytes */ + if (type == AR5K_PKT_TYPE_BEACON) + pkt_len = roundup(pkt_len, 4); + + if (pkt_len & ~AR5K_4W_TX_DESC_CTL1_BUF_LEN) + return -EINVAL; + + tx_ctl->tx_control_1 = pkt_len & AR5K_4W_TX_DESC_CTL1_BUF_LEN; + + tx_ctl->tx_control_0 |= + AR5K_REG_SM(tx_power, AR5K_4W_TX_DESC_CTL0_XMIT_POWER) | + AR5K_REG_SM(antenna_mode, AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT); + tx_ctl->tx_control_1 |= AR5K_REG_SM(type, + AR5K_4W_TX_DESC_CTL1_FRAME_TYPE); + tx_ctl->tx_control_2 = AR5K_REG_SM(tx_tries0 + AR5K_TUNE_HWTXTRIES, + AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0); + tx_ctl->tx_control_3 = tx_rate0 & AR5K_4W_TX_DESC_CTL3_XMIT_RATE0; + +#define _TX_FLAGS(_c, _flag) \ + if (flags & AR5K_TXDESC_##_flag) { \ + tx_ctl->tx_control_##_c |= \ + AR5K_4W_TX_DESC_CTL##_c##_##_flag; \ + } + + _TX_FLAGS(0, CLRDMASK); + _TX_FLAGS(0, VEOL); + _TX_FLAGS(0, INTREQ); + _TX_FLAGS(0, RTSENA); + _TX_FLAGS(0, CTSENA); + _TX_FLAGS(1, NOACK); + +#undef _TX_FLAGS + + /* + * WEP crap + */ + if (key_index != AR5K_TXKEYIX_INVALID) { + tx_ctl->tx_control_0 |= AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID; + tx_ctl->tx_control_1 |= AR5K_REG_SM(key_index, + AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX); + } + + /* + * RTS/CTS + */ + if (flags & (AR5K_TXDESC_RTSENA | AR5K_TXDESC_CTSENA)) { + if ((flags & AR5K_TXDESC_RTSENA) && + (flags & AR5K_TXDESC_CTSENA)) + return -EINVAL; + tx_ctl->tx_control_2 |= rtscts_duration & + AR5K_4W_TX_DESC_CTL2_RTS_DURATION; + tx_ctl->tx_control_3 |= AR5K_REG_SM(rtscts_rate, + AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE); + } + + return 0; +} + +/* + * Initialize a 4-word multi rate retry tx control descriptor on 5212 + */ +static int +ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, + unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2, + u_int tx_tries2, unsigned int tx_rate3, u_int tx_tries3) +{ + struct ath5k_hw_4w_tx_ctl *tx_ctl; + + /* + * Rates can be 0 as long as the retry count is 0 too. + * A zero rate and nonzero retry count will put the HW into a mode where + * it continously sends noise on the channel, so it is important to + * avoid this. + */ + if (unlikely((tx_rate1 == 0 && tx_tries1 != 0) || + (tx_rate2 == 0 && tx_tries2 != 0) || + (tx_rate3 == 0 && tx_tries3 != 0))) { + ATH5K_ERR(ah->ah_sc, "zero rate\n"); + WARN_ON(1); + return -EINVAL; + } + + if (ah->ah_version == AR5K_AR5212) { + tx_ctl = &desc->ud.ds_tx5212.tx_ctl; + +#define _XTX_TRIES(_n) \ + if (tx_tries##_n) { \ + tx_ctl->tx_control_2 |= \ + AR5K_REG_SM(tx_tries##_n, \ + AR5K_4W_TX_DESC_CTL2_XMIT_TRIES##_n); \ + tx_ctl->tx_control_3 |= \ + AR5K_REG_SM(tx_rate##_n, \ + AR5K_4W_TX_DESC_CTL3_XMIT_RATE##_n); \ + } + + _XTX_TRIES(1); + _XTX_TRIES(2); + _XTX_TRIES(3); + +#undef _XTX_TRIES + + return 1; + } + + return 0; +} + +/* no mrr support for cards older than 5212 */ +static int +ath5k_hw_setup_no_mrr(struct ath5k_hw *ah, struct ath5k_desc *desc, + unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2, + u_int tx_tries2, unsigned int tx_rate3, u_int tx_tries3) +{ + return 0; +} + +/* + * Proccess the tx status descriptor on 5210/5211 + */ +static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah, + struct ath5k_desc *desc, struct ath5k_tx_status *ts) +{ + struct ath5k_hw_2w_tx_ctl *tx_ctl; + struct ath5k_hw_tx_status *tx_status; + + ATH5K_TRACE(ah->ah_sc); + + tx_ctl = &desc->ud.ds_tx5210.tx_ctl; + tx_status = &desc->ud.ds_tx5210.tx_stat; + + /* No frame has been send or error */ + if (unlikely((tx_status->tx_status_1 & AR5K_DESC_TX_STATUS1_DONE) == 0)) + return -EINPROGRESS; + + /* + * Get descriptor status + */ + ts->ts_tstamp = AR5K_REG_MS(tx_status->tx_status_0, + AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP); + ts->ts_shortretry = AR5K_REG_MS(tx_status->tx_status_0, + AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT); + ts->ts_longretry = AR5K_REG_MS(tx_status->tx_status_0, + AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT); + /*TODO: ts->ts_virtcol + test*/ + ts->ts_seqnum = AR5K_REG_MS(tx_status->tx_status_1, + AR5K_DESC_TX_STATUS1_SEQ_NUM); + ts->ts_rssi = AR5K_REG_MS(tx_status->tx_status_1, + AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH); + ts->ts_antenna = 1; + ts->ts_status = 0; + ts->ts_rate[0] = AR5K_REG_MS(tx_ctl->tx_control_0, + AR5K_2W_TX_DESC_CTL0_XMIT_RATE); + ts->ts_retry[0] = ts->ts_longretry; + ts->ts_final_idx = 0; + + if (!(tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK)) { + if (tx_status->tx_status_0 & + AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES) + ts->ts_status |= AR5K_TXERR_XRETRY; + + if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN) + ts->ts_status |= AR5K_TXERR_FIFO; + + if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FILTERED) + ts->ts_status |= AR5K_TXERR_FILT; + } + + return 0; +} + +/* + * Proccess a tx status descriptor on 5212 + */ +static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah, + struct ath5k_desc *desc, struct ath5k_tx_status *ts) +{ + struct ath5k_hw_4w_tx_ctl *tx_ctl; + struct ath5k_hw_tx_status *tx_status; + + ATH5K_TRACE(ah->ah_sc); + + tx_ctl = &desc->ud.ds_tx5212.tx_ctl; + tx_status = &desc->ud.ds_tx5212.tx_stat; + + /* No frame has been send or error */ + if (unlikely(!(tx_status->tx_status_1 & AR5K_DESC_TX_STATUS1_DONE))) + return -EINPROGRESS; + + /* + * Get descriptor status + */ + ts->ts_tstamp = AR5K_REG_MS(tx_status->tx_status_0, + AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP); + ts->ts_shortretry = AR5K_REG_MS(tx_status->tx_status_0, + AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT); + ts->ts_longretry = AR5K_REG_MS(tx_status->tx_status_0, + AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT); + ts->ts_seqnum = AR5K_REG_MS(tx_status->tx_status_1, + AR5K_DESC_TX_STATUS1_SEQ_NUM); + ts->ts_rssi = AR5K_REG_MS(tx_status->tx_status_1, + AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH); + ts->ts_antenna = (tx_status->tx_status_1 & + AR5K_DESC_TX_STATUS1_XMIT_ANTENNA) ? 2 : 1; + ts->ts_status = 0; + + ts->ts_final_idx = AR5K_REG_MS(tx_status->tx_status_1, + AR5K_DESC_TX_STATUS1_FINAL_TS_INDEX); + + /* The longretry counter has the number of un-acked retries + * for the final rate. To get the total number of retries + * we have to add the retry counters for the other rates + * as well + */ + ts->ts_retry[ts->ts_final_idx] = ts->ts_longretry; + switch (ts->ts_final_idx) { + case 3: + ts->ts_rate[3] = AR5K_REG_MS(tx_ctl->tx_control_3, + AR5K_4W_TX_DESC_CTL3_XMIT_RATE3); + + ts->ts_retry[2] = AR5K_REG_MS(tx_ctl->tx_control_2, + AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2); + ts->ts_longretry += ts->ts_retry[2]; + /* fall through */ + case 2: + ts->ts_rate[2] = AR5K_REG_MS(tx_ctl->tx_control_3, + AR5K_4W_TX_DESC_CTL3_XMIT_RATE2); + + ts->ts_retry[1] = AR5K_REG_MS(tx_ctl->tx_control_2, + AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1); + ts->ts_longretry += ts->ts_retry[1]; + /* fall through */ + case 1: + ts->ts_rate[1] = AR5K_REG_MS(tx_ctl->tx_control_3, + AR5K_4W_TX_DESC_CTL3_XMIT_RATE1); + + ts->ts_retry[0] = AR5K_REG_MS(tx_ctl->tx_control_2, + AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1); + ts->ts_longretry += ts->ts_retry[0]; + /* fall through */ + case 0: + ts->ts_rate[0] = tx_ctl->tx_control_3 & + AR5K_4W_TX_DESC_CTL3_XMIT_RATE0; + break; + } + + /* TX error */ + if (!(tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK)) { + if (tx_status->tx_status_0 & + AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES) + ts->ts_status |= AR5K_TXERR_XRETRY; + + if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN) + ts->ts_status |= AR5K_TXERR_FIFO; + + if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FILTERED) + ts->ts_status |= AR5K_TXERR_FILT; + } + + return 0; +} + +/* + * RX Descriptors + */ + +/* + * Initialize an rx control descriptor + */ +static int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, + u32 size, unsigned int flags) +{ + struct ath5k_hw_rx_ctl *rx_ctl; + + ATH5K_TRACE(ah->ah_sc); + rx_ctl = &desc->ud.ds_rx.rx_ctl; + + /* + * Clear the descriptor + * If we don't clean the status descriptor, + * while scanning we get too many results, + * most of them virtual, after some secs + * of scanning system hangs. M.F. + */ + memset(&desc->ud.ds_rx, 0, sizeof(struct ath5k_hw_all_rx_desc)); + + /* Setup descriptor */ + rx_ctl->rx_control_1 = size & AR5K_DESC_RX_CTL1_BUF_LEN; + if (unlikely(rx_ctl->rx_control_1 != size)) + return -EINVAL; + + if (flags & AR5K_RXDESC_INTREQ) + rx_ctl->rx_control_1 |= AR5K_DESC_RX_CTL1_INTREQ; + + return 0; +} + +/* + * Proccess the rx status descriptor on 5210/5211 + */ +static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah, + struct ath5k_desc *desc, struct ath5k_rx_status *rs) +{ + struct ath5k_hw_rx_status *rx_status; + + rx_status = &desc->ud.ds_rx.u.rx_stat; + + /* No frame received / not ready */ + if (unlikely(!(rx_status->rx_status_1 & + AR5K_5210_RX_DESC_STATUS1_DONE))) + return -EINPROGRESS; + + /* + * Frame receive status + */ + rs->rs_datalen = rx_status->rx_status_0 & + AR5K_5210_RX_DESC_STATUS0_DATA_LEN; + rs->rs_rssi = AR5K_REG_MS(rx_status->rx_status_0, + AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL); + rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0, + AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE); + rs->rs_antenna = AR5K_REG_MS(rx_status->rx_status_0, + AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA); + rs->rs_more = !!(rx_status->rx_status_0 & + AR5K_5210_RX_DESC_STATUS0_MORE); + /* TODO: this timestamp is 13 bit, later on we assume 15 bit */ + rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1, + AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP); + rs->rs_status = 0; + rs->rs_phyerr = 0; + + /* + * Key table status + */ + if (rx_status->rx_status_1 & AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_VALID) + rs->rs_keyix = AR5K_REG_MS(rx_status->rx_status_1, + AR5K_5210_RX_DESC_STATUS1_KEY_INDEX); + else + rs->rs_keyix = AR5K_RXKEYIX_INVALID; + + /* + * Receive/descriptor errors + */ + if (!(rx_status->rx_status_1 & + AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) { + if (rx_status->rx_status_1 & + AR5K_5210_RX_DESC_STATUS1_CRC_ERROR) + rs->rs_status |= AR5K_RXERR_CRC; + + if (rx_status->rx_status_1 & + AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN) + rs->rs_status |= AR5K_RXERR_FIFO; + + if (rx_status->rx_status_1 & + AR5K_5210_RX_DESC_STATUS1_PHY_ERROR) { + rs->rs_status |= AR5K_RXERR_PHY; + rs->rs_phyerr |= AR5K_REG_MS(rx_status->rx_status_1, + AR5K_5210_RX_DESC_STATUS1_PHY_ERROR); + } + + if (rx_status->rx_status_1 & + AR5K_5210_RX_DESC_STATUS1_DECRYPT_CRC_ERROR) + rs->rs_status |= AR5K_RXERR_DECRYPT; + } + + return 0; +} + +/* + * Proccess the rx status descriptor on 5212 + */ +static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah, + struct ath5k_desc *desc, struct ath5k_rx_status *rs) +{ + struct ath5k_hw_rx_status *rx_status; + struct ath5k_hw_rx_error *rx_err; + + ATH5K_TRACE(ah->ah_sc); + rx_status = &desc->ud.ds_rx.u.rx_stat; + + /* Overlay on error */ + rx_err = &desc->ud.ds_rx.u.rx_err; + + /* No frame received / not ready */ + if (unlikely(!(rx_status->rx_status_1 & + AR5K_5212_RX_DESC_STATUS1_DONE))) + return -EINPROGRESS; + + /* + * Frame receive status + */ + rs->rs_datalen = rx_status->rx_status_0 & + AR5K_5212_RX_DESC_STATUS0_DATA_LEN; + rs->rs_rssi = AR5K_REG_MS(rx_status->rx_status_0, + AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL); + rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0, + AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE); + rs->rs_antenna = AR5K_REG_MS(rx_status->rx_status_0, + AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA); + rs->rs_more = !!(rx_status->rx_status_0 & + AR5K_5212_RX_DESC_STATUS0_MORE); + rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1, + AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP); + rs->rs_status = 0; + rs->rs_phyerr = 0; + + /* + * Key table status + */ + if (rx_status->rx_status_1 & AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID) + rs->rs_keyix = AR5K_REG_MS(rx_status->rx_status_1, + AR5K_5212_RX_DESC_STATUS1_KEY_INDEX); + else + rs->rs_keyix = AR5K_RXKEYIX_INVALID; + + /* + * Receive/descriptor errors + */ + if (!(rx_status->rx_status_1 & + AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) { + if (rx_status->rx_status_1 & + AR5K_5212_RX_DESC_STATUS1_CRC_ERROR) + rs->rs_status |= AR5K_RXERR_CRC; + + if (rx_status->rx_status_1 & + AR5K_5212_RX_DESC_STATUS1_PHY_ERROR) { + rs->rs_status |= AR5K_RXERR_PHY; + rs->rs_phyerr |= AR5K_REG_MS(rx_err->rx_error_1, + AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE); + } + + if (rx_status->rx_status_1 & + AR5K_5212_RX_DESC_STATUS1_DECRYPT_CRC_ERROR) + rs->rs_status |= AR5K_RXERR_DECRYPT; + + if (rx_status->rx_status_1 & + AR5K_5212_RX_DESC_STATUS1_MIC_ERROR) + rs->rs_status |= AR5K_RXERR_MIC; + } + + return 0; +} + +/* + * Init function pointers inside ath5k_hw struct + */ +int ath5k_hw_init_desc_functions(struct ath5k_hw *ah) +{ + + if (ah->ah_version != AR5K_AR5210 && + ah->ah_version != AR5K_AR5211 && + ah->ah_version != AR5K_AR5212) + return -ENOTSUPP; + + /* XXX: What is this magic value and where is it used ? */ + if (ah->ah_version == AR5K_AR5212) + ah->ah_magic = AR5K_EEPROM_MAGIC_5212; + else if (ah->ah_version == AR5K_AR5211) + ah->ah_magic = AR5K_EEPROM_MAGIC_5211; + + if (ah->ah_version == AR5K_AR5212) { + ah->ah_setup_rx_desc = ath5k_hw_setup_rx_desc; + ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc; + ah->ah_setup_mrr_tx_desc = ath5k_hw_setup_mrr_tx_desc; + ah->ah_proc_tx_desc = ath5k_hw_proc_4word_tx_status; + } else { + ah->ah_setup_rx_desc = ath5k_hw_setup_rx_desc; + ah->ah_setup_tx_desc = ath5k_hw_setup_2word_tx_desc; + ah->ah_setup_mrr_tx_desc = ath5k_hw_setup_no_mrr; + ah->ah_proc_tx_desc = ath5k_hw_proc_2word_tx_status; + } + + if (ah->ah_version == AR5K_AR5212) + ah->ah_proc_rx_desc = ath5k_hw_proc_5212_rx_status; + else if (ah->ah_version <= AR5K_AR5211) + ah->ah_proc_rx_desc = ath5k_hw_proc_5210_rx_status; + + return 0; +} + diff --git a/drivers/net/wireless/ath/ath5k/desc.h b/drivers/net/wireless/ath/ath5k/desc.h new file mode 100644 index 0000000..56158c8 --- /dev/null +++ b/drivers/net/wireless/ath/ath5k/desc.h @@ -0,0 +1,332 @@ +/* + * Copyright (c) 2004-2008 Reyk Floeter + * Copyright (c) 2006-2008 Nick Kossifidis + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +/* + * Internal RX/TX descriptor structures + * (rX: reserved fields possibily used by future versions of the ar5k chipset) + */ + +/* + * common hardware RX control descriptor + */ +struct ath5k_hw_rx_ctl { + u32 rx_control_0; /* RX control word 0 */ + u32 rx_control_1; /* RX control word 1 */ +} __packed; + +/* RX control word 0 field/sflags */ +#define AR5K_DESC_RX_CTL0 0x00000000 + +/* RX control word 1 fields/flags */ +#define AR5K_DESC_RX_CTL1_BUF_LEN 0x00000fff +#define AR5K_DESC_RX_CTL1_INTREQ 0x00002000 + +/* + * common hardware RX status descriptor + * 5210/11 and 5212 differ only in the flags defined below + */ +struct ath5k_hw_rx_status { + u32 rx_status_0; /* RX status word 0 */ + u32 rx_status_1; /* RX status word 1 */ +} __packed; + +/* 5210/5211 */ +/* RX status word 0 fields/flags */ +#define AR5K_5210_RX_DESC_STATUS0_DATA_LEN 0x00000fff +#define AR5K_5210_RX_DESC_STATUS0_MORE 0x00001000 +#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE 0x00078000 +#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE_S 15 +#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL 0x07f80000 +#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL_S 19 +#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA 0x38000000 +#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA_S 27 + +/* RX status word 1 fields/flags */ +#define AR5K_5210_RX_DESC_STATUS1_DONE 0x00000001 +#define AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK 0x00000002 +#define AR5K_5210_RX_DESC_STATUS1_CRC_ERROR 0x00000004 +#define AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN 0x00000008 +#define AR5K_5210_RX_DESC_STATUS1_DECRYPT_CRC_ERROR 0x00000010 +#define AR5K_5210_RX_DESC_STATUS1_PHY_ERROR 0x000000e0 +#define AR5K_5210_RX_DESC_STATUS1_PHY_ERROR_S 5 +#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_VALID 0x00000100 +#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX 0x00007e00 +#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_S 9 +#define AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP 0x0fff8000 +#define AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP_S 15 +#define AR5K_5210_RX_DESC_STATUS1_KEY_CACHE_MISS 0x10000000 + +/* 5212 */ +/* RX status word 0 fields/flags */ +#define AR5K_5212_RX_DESC_STATUS0_DATA_LEN 0x00000fff +#define AR5K_5212_RX_DESC_STATUS0_MORE 0x00001000 +#define AR5K_5212_RX_DESC_STATUS0_DECOMP_CRC_ERROR 0x00002000 +#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE 0x000f8000 +#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE_S 15 +#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL 0x0ff00000 +#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL_S 20 +#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA 0xf0000000 +#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA_S 28 + +/* RX status word 1 fields/flags */ +#define AR5K_5212_RX_DESC_STATUS1_DONE 0x00000001 +#define AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK 0x00000002 +#define AR5K_5212_RX_DESC_STATUS1_CRC_ERROR 0x00000004 +#define AR5K_5212_RX_DESC_STATUS1_DECRYPT_CRC_ERROR 0x00000008 +#define AR5K_5212_RX_DESC_STATUS1_PHY_ERROR 0x00000010 +#define AR5K_5212_RX_DESC_STATUS1_MIC_ERROR 0x00000020 +#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID 0x00000100 +#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX 0x0000fe00 +#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_S 9 +#define AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP 0x7fff0000 +#define AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP_S 16 +#define AR5K_5212_RX_DESC_STATUS1_KEY_CACHE_MISS 0x80000000 + +/* + * common hardware RX error descriptor + */ +struct ath5k_hw_rx_error { + u32 rx_error_0; /* RX status word 0 */ + u32 rx_error_1; /* RX status word 1 */ +} __packed; + +/* RX error word 0 fields/flags */ +#define AR5K_RX_DESC_ERROR0 0x00000000 + +/* RX error word 1 fields/flags */ +#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE 0x0000ff00 +#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE_S 8 + +/* PHY Error codes */ +#define AR5K_DESC_RX_PHY_ERROR_NONE 0x00 +#define AR5K_DESC_RX_PHY_ERROR_TIMING 0x20 +#define AR5K_DESC_RX_PHY_ERROR_PARITY 0x40 +#define AR5K_DESC_RX_PHY_ERROR_RATE 0x60 +#define AR5K_DESC_RX_PHY_ERROR_LENGTH 0x80 +#define AR5K_DESC_RX_PHY_ERROR_64QAM 0xa0 +#define AR5K_DESC_RX_PHY_ERROR_SERVICE 0xc0 +#define AR5K_DESC_RX_PHY_ERROR_TRANSMITOVR 0xe0 + +/* + * 5210/5211 hardware 2-word TX control descriptor + */ +struct ath5k_hw_2w_tx_ctl { + u32 tx_control_0; /* TX control word 0 */ + u32 tx_control_1; /* TX control word 1 */ +} __packed; + +/* TX control word 0 fields/flags */ +#define AR5K_2W_TX_DESC_CTL0_FRAME_LEN 0x00000fff +#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN 0x0003f000 /*[5210 ?]*/ +#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN_S 12 +#define AR5K_2W_TX_DESC_CTL0_XMIT_RATE 0x003c0000 +#define AR5K_2W_TX_DESC_CTL0_XMIT_RATE_S 18 +#define AR5K_2W_TX_DESC_CTL0_RTSENA 0x00400000 +#define AR5K_2W_TX_DESC_CTL0_CLRDMASK 0x01000000 +#define AR5K_2W_TX_DESC_CTL0_LONG_PACKET 0x00800000 /*[5210]*/ +#define AR5K_2W_TX_DESC_CTL0_VEOL 0x00800000 /*[5211]*/ +#define AR5K_2W_TX_DESC_CTL0_FRAME_TYPE 0x1c000000 /*[5210]*/ +#define AR5K_2W_TX_DESC_CTL0_FRAME_TYPE_S 26 +#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5210 0x02000000 +#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5211 0x1e000000 + +#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT \ + (ah->ah_version == AR5K_AR5210 ? \ + AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5210 : \ + AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5211) + +#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_S 25 +#define AR5K_2W_TX_DESC_CTL0_INTREQ 0x20000000 +#define AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID 0x40000000 + +/* TX control word 1 fields/flags */ +#define AR5K_2W_TX_DESC_CTL1_BUF_LEN 0x00000fff +#define AR5K_2W_TX_DESC_CTL1_MORE 0x00001000 +#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5210 0x0007e000 +#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5211 0x000fe000 + +#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX \ + (ah->ah_version == AR5K_AR5210 ? \ + AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5210 : \ + AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5211) + +#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_S 13 +#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE 0x00700000 /*[5211]*/ +#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE_S 20 +#define AR5K_2W_TX_DESC_CTL1_NOACK 0x00800000 /*[5211]*/ +#define AR5K_2W_TX_DESC_CTL1_RTS_DURATION 0xfff80000 /*[5210 ?]*/ + +/* Frame types */ +#define AR5K_AR5210_TX_DESC_FRAME_TYPE_NORMAL 0x00 +#define AR5K_AR5210_TX_DESC_FRAME_TYPE_ATIM 0x04 +#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PSPOLL 0x08 +#define AR5K_AR5210_TX_DESC_FRAME_TYPE_NO_DELAY 0x0c +#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS 0x10 + +/* + * 5212 hardware 4-word TX control descriptor + */ +struct ath5k_hw_4w_tx_ctl { + u32 tx_control_0; /* TX control word 0 */ + +#define AR5K_4W_TX_DESC_CTL0_FRAME_LEN 0x00000fff +#define AR5K_4W_TX_DESC_CTL0_XMIT_POWER 0x003f0000 +#define AR5K_4W_TX_DESC_CTL0_XMIT_POWER_S 16 +#define AR5K_4W_TX_DESC_CTL0_RTSENA 0x00400000 +#define AR5K_4W_TX_DESC_CTL0_VEOL 0x00800000 +#define AR5K_4W_TX_DESC_CTL0_CLRDMASK 0x01000000 +#define AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT 0x1e000000 +#define AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT_S 25 +#define AR5K_4W_TX_DESC_CTL0_INTREQ 0x20000000 +#define AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID 0x40000000 +#define AR5K_4W_TX_DESC_CTL0_CTSENA 0x80000000 + + u32 tx_control_1; /* TX control word 1 */ + +#define AR5K_4W_TX_DESC_CTL1_BUF_LEN 0x00000fff +#define AR5K_4W_TX_DESC_CTL1_MORE 0x00001000 +#define AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX 0x000fe000 +#define AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_S 13 +#define AR5K_4W_TX_DESC_CTL1_FRAME_TYPE 0x00f00000 +#define AR5K_4W_TX_DESC_CTL1_FRAME_TYPE_S 20 +#define AR5K_4W_TX_DESC_CTL1_NOACK 0x01000000 +#define AR5K_4W_TX_DESC_CTL1_COMP_PROC 0x06000000 +#define AR5K_4W_TX_DESC_CTL1_COMP_PROC_S 25 +#define AR5K_4W_TX_DESC_CTL1_COMP_IV_LEN 0x18000000 +#define AR5K_4W_TX_DESC_CTL1_COMP_IV_LEN_S 27 +#define AR5K_4W_TX_DESC_CTL1_COMP_ICV_LEN 0x60000000 +#define AR5K_4W_TX_DESC_CTL1_COMP_ICV_LEN_S 29 + + u32 tx_control_2; /* TX control word 2 */ + +#define AR5K_4W_TX_DESC_CTL2_RTS_DURATION 0x00007fff +#define AR5K_4W_TX_DESC_CTL2_DURATION_UPDATE_ENABLE 0x00008000 +#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0 0x000f0000 +#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0_S 16 +#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1 0x00f00000 +#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1_S 20 +#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2 0x0f000000 +#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2_S 24 +#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3 0xf0000000 +#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3_S 28 + + u32 tx_control_3; /* TX control word 3 */ + +#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE0 0x0000001f +#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE1 0x000003e0 +#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE1_S 5 +#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE2 0x00007c00 +#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE2_S 10 +#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE3 0x000f8000 +#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE3_S 15 +#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE 0x01f00000 +#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE_S 20 +} __packed; + +/* + * Common TX status descriptor + */ +struct ath5k_hw_tx_status { + u32 tx_status_0; /* TX status word 0 */ + u32 tx_status_1; /* TX status word 1 */ +} __packed; + +/* TX status word 0 fields/flags */ +#define AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK 0x00000001 +#define AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES 0x00000002 +#define AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN 0x00000004 +#define AR5K_DESC_TX_STATUS0_FILTERED 0x00000008 +/*??? +#define AR5K_DESC_TX_STATUS0_RTS_FAIL_COUNT 0x000000f0 +#define AR5K_DESC_TX_STATUS0_RTS_FAIL_COUNT_S 4 +*/ +#define AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT 0x000000f0 +#define AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT_S 4 +/*??? +#define AR5K_DESC_TX_STATUS0_DATA_FAIL_COUNT 0x00000f00 +#define AR5K_DESC_TX_STATUS0_DATA_FAIL_COUNT_S 8 +*/ +#define AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT 0x00000f00 +#define AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT_S 8 +#define AR5K_DESC_TX_STATUS0_VIRT_COLL_COUNT 0x0000f000 +#define AR5K_DESC_TX_STATUS0_VIRT_COLL_COUNT_S 12 +#define AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP 0xffff0000 +#define AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP_S 16 + +/* TX status word 1 fields/flags */ +#define AR5K_DESC_TX_STATUS1_DONE 0x00000001 +#define AR5K_DESC_TX_STATUS1_SEQ_NUM 0x00001ffe +#define AR5K_DESC_TX_STATUS1_SEQ_NUM_S 1 +#define AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH 0x001fe000 +#define AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH_S 13 +#define AR5K_DESC_TX_STATUS1_FINAL_TS_INDEX 0x00600000 +#define AR5K_DESC_TX_STATUS1_FINAL_TS_INDEX_S 21 +#define AR5K_DESC_TX_STATUS1_COMP_SUCCESS 0x00800000 +#define AR5K_DESC_TX_STATUS1_XMIT_ANTENNA 0x01000000 + +/* + * 5210/5211 hardware TX descriptor + */ +struct ath5k_hw_5210_tx_desc { + struct ath5k_hw_2w_tx_ctl tx_ctl; + struct ath5k_hw_tx_status tx_stat; +} __packed; + +/* + * 5212 hardware TX descriptor + */ +struct ath5k_hw_5212_tx_desc { + struct ath5k_hw_4w_tx_ctl tx_ctl; + struct ath5k_hw_tx_status tx_stat; +} __packed; + +/* + * common hardware RX descriptor + */ +struct ath5k_hw_all_rx_desc { + struct ath5k_hw_rx_ctl rx_ctl; + union { + struct ath5k_hw_rx_status rx_stat; + struct ath5k_hw_rx_error rx_err; + } u; +} __packed; + +/* + * Atheros hardware descriptor + * This is read and written to by the hardware + */ +struct ath5k_desc { + u32 ds_link; /* physical address of the next descriptor */ + u32 ds_data; /* physical address of data buffer (skb) */ + + union { + struct ath5k_hw_5210_tx_desc ds_tx5210; + struct ath5k_hw_5212_tx_desc ds_tx5212; + struct ath5k_hw_all_rx_desc ds_rx; + } ud; +} __packed; + +#define AR5K_RXDESC_INTREQ 0x0020 + +#define AR5K_TXDESC_CLRDMASK 0x0001 +#define AR5K_TXDESC_NOACK 0x0002 /*[5211+]*/ +#define AR5K_TXDESC_RTSENA 0x0004 +#define AR5K_TXDESC_CTSENA 0x0008 +#define AR5K_TXDESC_INTREQ 0x0010 +#define AR5K_TXDESC_VEOL 0x0020 /*[5211+]*/ + diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c new file mode 100644 index 0000000..941b511 --- /dev/null +++ b/drivers/net/wireless/ath/ath5k/dma.c @@ -0,0 +1,703 @@ +/* + * Copyright (c) 2004-2008 Reyk Floeter + * Copyright (c) 2006-2008 Nick Kossifidis + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +/*************************************\ +* DMA and interrupt masking functions * +\*************************************/ + +/* + * dma.c - DMA and interrupt masking functions + * + * Here we setup descriptor pointers (rxdp/txdp) start/stop dma engine and + * handle queue setup for 5210 chipset (rest are handled on qcu.c). + * Also we setup interrupt mask register (IMR) and read the various iterrupt + * status registers (ISR). + * + * TODO: Handle SISR on 5211+ and introduce a function to return the queue + * number that resulted the interrupt. + */ + +#include "ath5k.h" +#include "reg.h" +#include "debug.h" +#include "base.h" + +/*********\ +* Receive * +\*********/ + +/** + * ath5k_hw_start_rx_dma - Start DMA receive + * + * @ah: The &struct ath5k_hw + */ +void ath5k_hw_start_rx_dma(struct ath5k_hw *ah) +{ + ATH5K_TRACE(ah->ah_sc); + ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR); + ath5k_hw_reg_read(ah, AR5K_CR); +} + +/** + * ath5k_hw_stop_rx_dma - Stop DMA receive + * + * @ah: The &struct ath5k_hw + */ +int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah) +{ + unsigned int i; + + ATH5K_TRACE(ah->ah_sc); + ath5k_hw_reg_write(ah, AR5K_CR_RXD, AR5K_CR); + + /* + * It may take some time to disable the DMA receive unit + */ + for (i = 1000; i > 0 && + (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) != 0; + i--) + udelay(10); + + return i ? 0 : -EBUSY; +} + +/** + * ath5k_hw_get_rxdp - Get RX Descriptor's address + * + * @ah: The &struct ath5k_hw + */ +u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah) +{ + return ath5k_hw_reg_read(ah, AR5K_RXDP); +} + +/** + * ath5k_hw_set_rxdp - Set RX Descriptor's address + * + * @ah: The &struct ath5k_hw + * @phys_addr: RX descriptor address + * + * XXX: Should we check if rx is enabled before setting rxdp ? + */ +void ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr) +{ + ATH5K_TRACE(ah->ah_sc); + + ath5k_hw_reg_write(ah, phys_addr, AR5K_RXDP); +} + + +/**********\ +* Transmit * +\**********/ + +/** + * ath5k_hw_start_tx_dma - Start DMA transmit for a specific queue + * + * @ah: The &struct ath5k_hw + * @queue: The hw queue number + * + * Start DMA transmit for a specific queue and since 5210 doesn't have + * QCU/DCU, set up queue parameters for 5210 here based on queue type (one + * queue for normal data and one queue for beacons). For queue setup + * on newer chips check out qcu.c. Returns -EINVAL if queue number is out + * of range or if queue is already disabled. + * + * NOTE: Must be called after setting up tx control descriptor for that + * queue (see below). + */ +int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue) +{ + u32 tx_queue; + + ATH5K_TRACE(ah->ah_sc); + AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); + + /* Return if queue is declared inactive */ + if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) + return -EIO; + + if (ah->ah_version == AR5K_AR5210) { + tx_queue = ath5k_hw_reg_read(ah, AR5K_CR); + + /* + * Set the queue by type on 5210 + */ + switch (ah->ah_txq[queue].tqi_type) { + case AR5K_TX_QUEUE_DATA: + tx_queue |= AR5K_CR_TXE0 & ~AR5K_CR_TXD0; + break; + case AR5K_TX_QUEUE_BEACON: + tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1; + ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE, + AR5K_BSR); + break; + case AR5K_TX_QUEUE_CAB: + tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1; + ath5k_hw_reg_write(ah, AR5K_BCR_TQ1FV | AR5K_BCR_TQ1V | + AR5K_BCR_BDMAE, AR5K_BSR); + break; + default: + return -EINVAL; + } + /* Start queue */ + ath5k_hw_reg_write(ah, tx_queue, AR5K_CR); + ath5k_hw_reg_read(ah, AR5K_CR); + } else { + /* Return if queue is disabled */ + if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue)) + return -EIO; + + /* Start queue */ + AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXE, queue); + } + + return 0; +} + +/** + * ath5k_hw_stop_tx_dma - Stop DMA transmit on a specific queue + * + * @ah: The &struct ath5k_hw + * @queue: The hw queue number + * + * Stop DMA transmit on a specific hw queue and drain queue so we don't + * have any pending frames. Returns -EBUSY if we still have pending frames, + * -EINVAL if queue number is out of range. + * + */ +int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue) +{ + unsigned int i = 40; + u32 tx_queue, pending; + + ATH5K_TRACE(ah->ah_sc); + AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); + + /* Return if queue is declared inactive */ + if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) + return -EIO; + + if (ah->ah_version == AR5K_AR5210) { + tx_queue = ath5k_hw_reg_read(ah, AR5K_CR); + + /* + * Set by queue type + */ + switch (ah->ah_txq[queue].tqi_type) { + case AR5K_TX_QUEUE_DATA: + tx_queue |= AR5K_CR_TXD0 & ~AR5K_CR_TXE0; + break; + case AR5K_TX_QUEUE_BEACON: + case AR5K_TX_QUEUE_CAB: + /* XXX Fix me... */ + tx_queue |= AR5K_CR_TXD1 & ~AR5K_CR_TXD1; + ath5k_hw_reg_write(ah, 0, AR5K_BSR); + break; + default: + return -EINVAL; + } + + /* Stop queue */ + ath5k_hw_reg_write(ah, tx_queue, AR5K_CR); + ath5k_hw_reg_read(ah, AR5K_CR); + } else { + /* + * Schedule TX disable and wait until queue is empty + */ + AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXD, queue); + + /*Check for pending frames*/ + do { + pending = ath5k_hw_reg_read(ah, + AR5K_QUEUE_STATUS(queue)) & + AR5K_QCU_STS_FRMPENDCNT; + udelay(100); + } while (--i && pending); + + /* For 2413+ order PCU to drop packets using + * QUIET mechanism */ + if (ah->ah_mac_version >= (AR5K_SREV_AR2414 >> 4) && + pending){ + /* Set periodicity and duration */ + ath5k_hw_reg_write(ah, + AR5K_REG_SM(100, AR5K_QUIET_CTL2_QT_PER)| + AR5K_REG_SM(10, AR5K_QUIET_CTL2_QT_DUR), + AR5K_QUIET_CTL2); + + /* Enable quiet period for current TSF */ + ath5k_hw_reg_write(ah, + AR5K_QUIET_CTL1_QT_EN | + AR5K_REG_SM(ath5k_hw_reg_read(ah, + AR5K_TSF_L32_5211) >> 10, + AR5K_QUIET_CTL1_NEXT_QT_TSF), + AR5K_QUIET_CTL1); + + /* Force channel idle high */ + AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW_5211, + AR5K_DIAG_SW_CHANEL_IDLE_HIGH); + + /* Wait a while and disable mechanism */ + udelay(200); + AR5K_REG_DISABLE_BITS(ah, AR5K_QUIET_CTL1, + AR5K_QUIET_CTL1_QT_EN); + + /* Re-check for pending frames */ + i = 40; + do { + pending = ath5k_hw_reg_read(ah, + AR5K_QUEUE_STATUS(queue)) & + AR5K_QCU_STS_FRMPENDCNT; + udelay(100); + } while (--i && pending); + + AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW_5211, + AR5K_DIAG_SW_CHANEL_IDLE_HIGH); + } + + /* Clear register */ + ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD); + if (pending) + return -EBUSY; + } + + /* TODO: Check for success on 5210 else return error */ + return 0; +} + +/** + * ath5k_hw_get_txdp - Get TX Descriptor's address for a specific queue + * + * @ah: The &struct ath5k_hw + * @queue: The hw queue number + * + * Get TX descriptor's address for a specific queue. For 5210 we ignore + * the queue number and use tx queue type since we only have 2 queues. + * We use TXDP0 for normal data queue and TXDP1 for beacon queue. + * For newer chips with QCU/DCU we just read the corresponding TXDP register. + * + * XXX: Is TXDP read and clear ? + */ +u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue) +{ + u16 tx_reg; + + ATH5K_TRACE(ah->ah_sc); + AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); + + /* + * Get the transmit queue descriptor pointer from the selected queue + */ + /*5210 doesn't have QCU*/ + if (ah->ah_version == AR5K_AR5210) { + switch (ah->ah_txq[queue].tqi_type) { + case AR5K_TX_QUEUE_DATA: + tx_reg = AR5K_NOQCU_TXDP0; + break; + case AR5K_TX_QUEUE_BEACON: + case AR5K_TX_QUEUE_CAB: + tx_reg = AR5K_NOQCU_TXDP1; + break; + default: + return 0xffffffff; + } + } else { + tx_reg = AR5K_QUEUE_TXDP(queue); + } + + return ath5k_hw_reg_read(ah, tx_reg); +} + +/** + * ath5k_hw_set_txdp - Set TX Descriptor's address for a specific queue + * + * @ah: The &struct ath5k_hw + * @queue: The hw queue number + * + * Set TX descriptor's address for a specific queue. For 5210 we ignore + * the queue number and we use tx queue type since we only have 2 queues + * so as above we use TXDP0 for normal data queue and TXDP1 for beacon queue. + * For newer chips with QCU/DCU we just set the corresponding TXDP register. + * Returns -EINVAL if queue type is invalid for 5210 and -EIO if queue is still + * active. + */ +int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr) +{ + u16 tx_reg; + + ATH5K_TRACE(ah->ah_sc); + AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); + + /* + * Set the transmit queue descriptor pointer register by type + * on 5210 + */ + if (ah->ah_version == AR5K_AR5210) { + switch (ah->ah_txq[queue].tqi_type) { + case AR5K_TX_QUEUE_DATA: + tx_reg = AR5K_NOQCU_TXDP0; + break; + case AR5K_TX_QUEUE_BEACON: + case AR5K_TX_QUEUE_CAB: + tx_reg = AR5K_NOQCU_TXDP1; + break; + default: + return -EINVAL; + } + } else { + /* + * Set the transmit queue descriptor pointer for + * the selected queue on QCU for 5211+ + * (this won't work if the queue is still active) + */ + if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue)) + return -EIO; + + tx_reg = AR5K_QUEUE_TXDP(queue); + } + + /* Set descriptor pointer */ + ath5k_hw_reg_write(ah, phys_addr, tx_reg); + + return 0; +} + +/** + * ath5k_hw_update_tx_triglevel - Update tx trigger level + * + * @ah: The &struct ath5k_hw + * @increase: Flag to force increase of trigger level + * + * This function increases/decreases the tx trigger level for the tx fifo + * buffer (aka FIFO threshold) that is used to indicate when PCU flushes + * the buffer and transmits it's data. Lowering this results sending small + * frames more quickly but can lead to tx underruns, raising it a lot can + * result other problems (i think bmiss is related). Right now we start with + * the lowest possible (64Bytes) and if we get tx underrun we increase it using + * the increase flag. Returns -EIO if we have have reached maximum/minimum. + * + * XXX: Link this with tx DMA size ? + * XXX: Use it to save interrupts ? + * TODO: Needs testing, i think it's related to bmiss... + */ +int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase) +{ + u32 trigger_level, imr; + int ret = -EIO; + + ATH5K_TRACE(ah->ah_sc); + + /* + * Disable interrupts by setting the mask + */ + imr = ath5k_hw_set_imr(ah, ah->ah_imr & ~AR5K_INT_GLOBAL); + + trigger_level = AR5K_REG_MS(ath5k_hw_reg_read(ah, AR5K_TXCFG), + AR5K_TXCFG_TXFULL); + + if (!increase) { + if (--trigger_level < AR5K_TUNE_MIN_TX_FIFO_THRES) + goto done; + } else + trigger_level += + ((AR5K_TUNE_MAX_TX_FIFO_THRES - trigger_level) / 2); + + /* + * Update trigger level on success + */ + if (ah->ah_version == AR5K_AR5210) + ath5k_hw_reg_write(ah, trigger_level, AR5K_TRIG_LVL); + else + AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG, + AR5K_TXCFG_TXFULL, trigger_level); + + ret = 0; + +done: + /* + * Restore interrupt mask + */ + ath5k_hw_set_imr(ah, imr); + + return ret; +} + +/*******************\ +* Interrupt masking * +\*******************/ + +/** + * ath5k_hw_is_intr_pending - Check if we have pending interrupts + * + * @ah: The &struct ath5k_hw + * + * Check if we have pending interrupts to process. Returns 1 if we + * have pending interrupts and 0 if we haven't. + */ +bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah) +{ + ATH5K_TRACE(ah->ah_sc); + return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0; +} + +/** + * ath5k_hw_get_isr - Get interrupt status + * + * @ah: The @struct ath5k_hw + * @interrupt_mask: Driver's interrupt mask used to filter out + * interrupts in sw. + * + * This function is used inside our interrupt handler to determine the reason + * for the interrupt by reading Primary Interrupt Status Register. Returns an + * abstract interrupt status mask which is mostly ISR with some uncommon bits + * being mapped on some standard non hw-specific positions + * (check out &ath5k_int). + * + * NOTE: We use read-and-clear register, so after this function is called ISR + * is zeroed. + */ +int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask) +{ + u32 data; + + ATH5K_TRACE(ah->ah_sc); + + /* + * Read interrupt status from the Interrupt Status register + * on 5210 + */ + if (ah->ah_version == AR5K_AR5210) { + data = ath5k_hw_reg_read(ah, AR5K_ISR); + if (unlikely(data == AR5K_INT_NOCARD)) { + *interrupt_mask = data; + return -ENODEV; + } + } else { + /* + * Read interrupt status from Interrupt + * Status Register shadow copy (Read And Clear) + * + * Note: PISR/SISR Not available on 5210 + */ + data = ath5k_hw_reg_read(ah, AR5K_RAC_PISR); + if (unlikely(data == AR5K_INT_NOCARD)) { + *interrupt_mask = data; + return -ENODEV; + } + } + + /* + * Get abstract interrupt mask (driver-compatible) + */ + *interrupt_mask = (data & AR5K_INT_COMMON) & ah->ah_imr; + + if (ah->ah_version != AR5K_AR5210) { + u32 sisr2 = ath5k_hw_reg_read(ah, AR5K_RAC_SISR2); + + /*HIU = Host Interface Unit (PCI etc)*/ + if (unlikely(data & (AR5K_ISR_HIUERR))) + *interrupt_mask |= AR5K_INT_FATAL; + + /*Beacon Not Ready*/ + if (unlikely(data & (AR5K_ISR_BNR))) + *interrupt_mask |= AR5K_INT_BNR; + + if (unlikely(sisr2 & (AR5K_SISR2_SSERR | + AR5K_SISR2_DPERR | + AR5K_SISR2_MCABT))) + *interrupt_mask |= AR5K_INT_FATAL; + + if (data & AR5K_ISR_TIM) + *interrupt_mask |= AR5K_INT_TIM; + + if (data & AR5K_ISR_BCNMISC) { + if (sisr2 & AR5K_SISR2_TIM) + *interrupt_mask |= AR5K_INT_TIM; + if (sisr2 & AR5K_SISR2_DTIM) + *interrupt_mask |= AR5K_INT_DTIM; + if (sisr2 & AR5K_SISR2_DTIM_SYNC) + *interrupt_mask |= AR5K_INT_DTIM_SYNC; + if (sisr2 & AR5K_SISR2_BCN_TIMEOUT) + *interrupt_mask |= AR5K_INT_BCN_TIMEOUT; + if (sisr2 & AR5K_SISR2_CAB_TIMEOUT) + *interrupt_mask |= AR5K_INT_CAB_TIMEOUT; + } + + if (data & AR5K_ISR_RXDOPPLER) + *interrupt_mask |= AR5K_INT_RX_DOPPLER; + if (data & AR5K_ISR_QCBRORN) { + *interrupt_mask |= AR5K_INT_QCBRORN; + ah->ah_txq_isr |= AR5K_REG_MS( + ath5k_hw_reg_read(ah, AR5K_RAC_SISR3), + AR5K_SISR3_QCBRORN); + } + if (data & AR5K_ISR_QCBRURN) { + *interrupt_mask |= AR5K_INT_QCBRURN; + ah->ah_txq_isr |= AR5K_REG_MS( + ath5k_hw_reg_read(ah, AR5K_RAC_SISR3), + AR5K_SISR3_QCBRURN); + } + if (data & AR5K_ISR_QTRIG) { + *interrupt_mask |= AR5K_INT_QTRIG; + ah->ah_txq_isr |= AR5K_REG_MS( + ath5k_hw_reg_read(ah, AR5K_RAC_SISR4), + AR5K_SISR4_QTRIG); + } + + if (data & AR5K_ISR_TXOK) + ah->ah_txq_isr |= AR5K_REG_MS( + ath5k_hw_reg_read(ah, AR5K_RAC_SISR0), + AR5K_SISR0_QCU_TXOK); + + if (data & AR5K_ISR_TXDESC) + ah->ah_txq_isr |= AR5K_REG_MS( + ath5k_hw_reg_read(ah, AR5K_RAC_SISR0), + AR5K_SISR0_QCU_TXDESC); + + if (data & AR5K_ISR_TXERR) + ah->ah_txq_isr |= AR5K_REG_MS( + ath5k_hw_reg_read(ah, AR5K_RAC_SISR1), + AR5K_SISR1_QCU_TXERR); + + if (data & AR5K_ISR_TXEOL) + ah->ah_txq_isr |= AR5K_REG_MS( + ath5k_hw_reg_read(ah, AR5K_RAC_SISR1), + AR5K_SISR1_QCU_TXEOL); + + if (data & AR5K_ISR_TXURN) + ah->ah_txq_isr |= AR5K_REG_MS( + ath5k_hw_reg_read(ah, AR5K_RAC_SISR2), + AR5K_SISR2_QCU_TXURN); + } else { + if (unlikely(data & (AR5K_ISR_SSERR | AR5K_ISR_MCABT + | AR5K_ISR_HIUERR | AR5K_ISR_DPERR))) + *interrupt_mask |= AR5K_INT_FATAL; + + /* + * XXX: BMISS interrupts may occur after association. + * I found this on 5210 code but it needs testing. If this is + * true we should disable them before assoc and re-enable them + * after a successful assoc + some jiffies. + interrupt_mask &= ~AR5K_INT_BMISS; + */ + } + + /* + * In case we didn't handle anything, + * print the register value. + */ + if (unlikely(*interrupt_mask == 0 && net_ratelimit())) + ATH5K_PRINTF("ISR: 0x%08x IMR: 0x%08x\n", data, ah->ah_imr); + + return 0; +} + +/** + * ath5k_hw_set_imr - Set interrupt mask + * + * @ah: The &struct ath5k_hw + * @new_mask: The new interrupt mask to be set + * + * Set the interrupt mask in hw to save interrupts. We do that by mapping + * ath5k_int bits to hw-specific bits to remove abstraction and writing + * Interrupt Mask Register. + */ +enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask) +{ + enum ath5k_int old_mask, int_mask; + + old_mask = ah->ah_imr; + + /* + * Disable card interrupts to prevent any race conditions + * (they will be re-enabled afterwards if AR5K_INT GLOBAL + * is set again on the new mask). + */ + if (old_mask & AR5K_INT_GLOBAL) { + ath5k_hw_reg_write(ah, AR5K_IER_DISABLE, AR5K_IER); + ath5k_hw_reg_read(ah, AR5K_IER); + } + + /* + * Add additional, chipset-dependent interrupt mask flags + * and write them to the IMR (interrupt mask register). + */ + int_mask = new_mask & AR5K_INT_COMMON; + + if (ah->ah_version != AR5K_AR5210) { + /* Preserve per queue TXURN interrupt mask */ + u32 simr2 = ath5k_hw_reg_read(ah, AR5K_SIMR2) + & AR5K_SIMR2_QCU_TXURN; + + if (new_mask & AR5K_INT_FATAL) { + int_mask |= AR5K_IMR_HIUERR; + simr2 |= (AR5K_SIMR2_MCABT | AR5K_SIMR2_SSERR + | AR5K_SIMR2_DPERR); + } + + /*Beacon Not Ready*/ + if (new_mask & AR5K_INT_BNR) + int_mask |= AR5K_INT_BNR; + + if (new_mask & AR5K_INT_TIM) + int_mask |= AR5K_IMR_TIM; + + if (new_mask & AR5K_INT_TIM) + simr2 |= AR5K_SISR2_TIM; + if (new_mask & AR5K_INT_DTIM) + simr2 |= AR5K_SISR2_DTIM; + if (new_mask & AR5K_INT_DTIM_SYNC) + simr2 |= AR5K_SISR2_DTIM_SYNC; + if (new_mask & AR5K_INT_BCN_TIMEOUT) + simr2 |= AR5K_SISR2_BCN_TIMEOUT; + if (new_mask & AR5K_INT_CAB_TIMEOUT) + simr2 |= AR5K_SISR2_CAB_TIMEOUT; + + if (new_mask & AR5K_INT_RX_DOPPLER) + int_mask |= AR5K_IMR_RXDOPPLER; + + /* Note: Per queue interrupt masks + * are set via reset_tx_queue (qcu.c) */ + ath5k_hw_reg_write(ah, int_mask, AR5K_PIMR); + ath5k_hw_reg_write(ah, simr2, AR5K_SIMR2); + + } else { + if (new_mask & AR5K_INT_FATAL) + int_mask |= (AR5K_IMR_SSERR | AR5K_IMR_MCABT + | AR5K_IMR_HIUERR | AR5K_IMR_DPERR); + + ath5k_hw_reg_write(ah, int_mask, AR5K_IMR); + } + + /* If RXNOFRM interrupt is masked disable it + * by setting AR5K_RXNOFRM to zero */ + if (!(new_mask & AR5K_INT_RXNOFRM)) + ath5k_hw_reg_write(ah, 0, AR5K_RXNOFRM); + + /* Store new interrupt mask */ + ah->ah_imr = new_mask; + + /* ..re-enable interrupts if AR5K_INT_GLOBAL is set */ + if (new_mask & AR5K_INT_GLOBAL) { + ath5k_hw_reg_write(ah, AR5K_IER_ENABLE, AR5K_IER); + ath5k_hw_reg_read(ah, AR5K_IER); + } + + return old_mask; +} + diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c new file mode 100644 index 0000000..6a3f4da --- /dev/null +++ b/drivers/net/wireless/ath/ath5k/eeprom.c @@ -0,0 +1,1818 @@ +/* + * Copyright (c) 2004-2008 Reyk Floeter + * Copyright (c) 2006-2009 Nick Kossifidis + * Copyright (c) 2008-2009 Felix Fietkau + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +/*************************************\ +* EEPROM access functions and helpers * +\*************************************/ + +#include "ath5k.h" +#include "reg.h" +#include "debug.h" +#include "base.h" + +/* + * Read from eeprom + */ +static int ath5k_hw_eeprom_read(struct ath5k_hw *ah, u32 offset, u16 *data) +{ + u32 status, timeout; + + ATH5K_TRACE(ah->ah_sc); + /* + * Initialize EEPROM access + */ + if (ah->ah_version == AR5K_AR5210) { + AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, AR5K_PCICFG_EEAE); + (void)ath5k_hw_reg_read(ah, AR5K_EEPROM_BASE + (4 * offset)); + } else { + ath5k_hw_reg_write(ah, offset, AR5K_EEPROM_BASE); + AR5K_REG_ENABLE_BITS(ah, AR5K_EEPROM_CMD, + AR5K_EEPROM_CMD_READ); + } + + for (timeout = AR5K_TUNE_REGISTER_TIMEOUT; timeout > 0; timeout--) { + status = ath5k_hw_reg_read(ah, AR5K_EEPROM_STATUS); + if (status & AR5K_EEPROM_STAT_RDDONE) { + if (status & AR5K_EEPROM_STAT_RDERR) + return -EIO; + *data = (u16)(ath5k_hw_reg_read(ah, AR5K_EEPROM_DATA) & + 0xffff); + return 0; + } + udelay(15); + } + + return -ETIMEDOUT; +} + +/* + * Translate binary channel representation in EEPROM to frequency + */ +static u16 ath5k_eeprom_bin2freq(struct ath5k_eeprom_info *ee, u16 bin, + unsigned int mode) +{ + u16 val; + + if (bin == AR5K_EEPROM_CHANNEL_DIS) + return bin; + + if (mode == AR5K_EEPROM_MODE_11A) { + if (ee->ee_version > AR5K_EEPROM_VERSION_3_2) + val = (5 * bin) + 4800; + else + val = bin > 62 ? (10 * 62) + (5 * (bin - 62)) + 5100 : + (bin * 10) + 5100; + } else { + if (ee->ee_version > AR5K_EEPROM_VERSION_3_2) + val = bin + 2300; + else + val = bin + 2400; + } + + return val; +} + +/* + * Initialize eeprom & capabilities structs + */ +static int +ath5k_eeprom_init_header(struct ath5k_hw *ah) +{ + struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + int ret; + u16 val; + u32 cksum, offset, eep_max = AR5K_EEPROM_INFO_MAX; + + /* + * Read values from EEPROM and store them in the capability structure + */ + AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MAGIC, ee_magic); + AR5K_EEPROM_READ_HDR(AR5K_EEPROM_PROTECT, ee_protect); + AR5K_EEPROM_READ_HDR(AR5K_EEPROM_REG_DOMAIN, ee_regdomain); + AR5K_EEPROM_READ_HDR(AR5K_EEPROM_VERSION, ee_version); + AR5K_EEPROM_READ_HDR(AR5K_EEPROM_HDR, ee_header); + + /* Return if we have an old EEPROM */ + if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_0) + return 0; + + /* + * Validate the checksum of the EEPROM date. There are some + * devices with invalid EEPROMs. + */ + AR5K_EEPROM_READ(AR5K_EEPROM_SIZE_UPPER, val); + if (val) { + eep_max = (val & AR5K_EEPROM_SIZE_UPPER_MASK) << + AR5K_EEPROM_SIZE_ENDLOC_SHIFT; + AR5K_EEPROM_READ(AR5K_EEPROM_SIZE_LOWER, val); + eep_max = (eep_max | val) - AR5K_EEPROM_INFO_BASE; + + /* + * Fail safe check to prevent stupid loops due + * to busted EEPROMs. XXX: This value is likely too + * big still, waiting on a better value. + */ + if (eep_max > (3 * AR5K_EEPROM_INFO_MAX)) { + ATH5K_ERR(ah->ah_sc, "Invalid max custom EEPROM size: " + "%d (0x%04x) max expected: %d (0x%04x)\n", + eep_max, eep_max, + 3 * AR5K_EEPROM_INFO_MAX, + 3 * AR5K_EEPROM_INFO_MAX); + return -EIO; + } + } + + for (cksum = 0, offset = 0; offset < eep_max; offset++) { + AR5K_EEPROM_READ(AR5K_EEPROM_INFO(offset), val); + cksum ^= val; + } + if (cksum != AR5K_EEPROM_INFO_CKSUM) { + ATH5K_ERR(ah->ah_sc, "Invalid EEPROM " + "checksum: 0x%04x eep_max: 0x%04x (%s)\n", + cksum, eep_max, + eep_max == AR5K_EEPROM_INFO_MAX ? + "default size" : "custom size"); + return -EIO; + } + + AR5K_EEPROM_READ_HDR(AR5K_EEPROM_ANT_GAIN(ah->ah_ee_version), + ee_ant_gain); + + if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) { + AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC0, ee_misc0); + AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC1, ee_misc1); + + /* XXX: Don't know which versions include these two */ + AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC2, ee_misc2); + + if (ee->ee_version >= AR5K_EEPROM_VERSION_4_3) + AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC3, ee_misc3); + + if (ee->ee_version >= AR5K_EEPROM_VERSION_5_0) { + AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC4, ee_misc4); + AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC5, ee_misc5); + AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC6, ee_misc6); + } + } + + if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_3) { + AR5K_EEPROM_READ(AR5K_EEPROM_OBDB0_2GHZ, val); + ee->ee_ob[AR5K_EEPROM_MODE_11B][0] = val & 0x7; + ee->ee_db[AR5K_EEPROM_MODE_11B][0] = (val >> 3) & 0x7; + + AR5K_EEPROM_READ(AR5K_EEPROM_OBDB1_2GHZ, val); + ee->ee_ob[AR5K_EEPROM_MODE_11G][0] = val & 0x7; + ee->ee_db[AR5K_EEPROM_MODE_11G][0] = (val >> 3) & 0x7; + } + + AR5K_EEPROM_READ(AR5K_EEPROM_IS_HB63, val); + + if ((ah->ah_mac_version == (AR5K_SREV_AR2425 >> 4)) && val) + ee->ee_is_hb63 = true; + else + ee->ee_is_hb63 = false; + + AR5K_EEPROM_READ(AR5K_EEPROM_RFKILL, val); + ee->ee_rfkill_pin = (u8) AR5K_REG_MS(val, AR5K_EEPROM_RFKILL_GPIO_SEL); + ee->ee_rfkill_pol = val & AR5K_EEPROM_RFKILL_POLARITY ? true : false; + + /* Check if PCIE_OFFSET points to PCIE_SERDES_SECTION + * and enable serdes programming if needed. + * + * XXX: Serdes values seem to be fixed so + * no need to read them here, we write them + * during ath5k_hw_attach */ + AR5K_EEPROM_READ(AR5K_EEPROM_PCIE_OFFSET, val); + ee->ee_serdes = (val == AR5K_EEPROM_PCIE_SERDES_SECTION) ? + true : false; + + return 0; +} + + +/* + * Read antenna infos from eeprom + */ +static int ath5k_eeprom_read_ants(struct ath5k_hw *ah, u32 *offset, + unsigned int mode) +{ + struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + u32 o = *offset; + u16 val; + int ret, i = 0; + + AR5K_EEPROM_READ(o++, val); + ee->ee_switch_settling[mode] = (val >> 8) & 0x7f; + ee->ee_atn_tx_rx[mode] = (val >> 2) & 0x3f; + ee->ee_ant_control[mode][i] = (val << 4) & 0x3f; + + AR5K_EEPROM_READ(o++, val); + ee->ee_ant_control[mode][i++] |= (val >> 12) & 0xf; + ee->ee_ant_control[mode][i++] = (val >> 6) & 0x3f; + ee->ee_ant_control[mode][i++] = val & 0x3f; + + AR5K_EEPROM_READ(o++, val); + ee->ee_ant_control[mode][i++] = (val >> 10) & 0x3f; + ee->ee_ant_control[mode][i++] = (val >> 4) & 0x3f; + ee->ee_ant_control[mode][i] = (val << 2) & 0x3f; + + AR5K_EEPROM_READ(o++, val); + ee->ee_ant_control[mode][i++] |= (val >> 14) & 0x3; + ee->ee_ant_control[mode][i++] = (val >> 8) & 0x3f; + ee->ee_ant_control[mode][i++] = (val >> 2) & 0x3f; + ee->ee_ant_control[mode][i] = (val << 4) & 0x3f; + + AR5K_EEPROM_READ(o++, val); + ee->ee_ant_control[mode][i++] |= (val >> 12) & 0xf; + ee->ee_ant_control[mode][i++] = (val >> 6) & 0x3f; + ee->ee_ant_control[mode][i++] = val & 0x3f; + + /* Get antenna switch tables */ + ah->ah_ant_ctl[mode][AR5K_ANT_CTL] = + (ee->ee_ant_control[mode][0] << 4); + ah->ah_ant_ctl[mode][AR5K_ANT_SWTABLE_A] = + ee->ee_ant_control[mode][1] | + (ee->ee_ant_control[mode][2] << 6) | + (ee->ee_ant_control[mode][3] << 12) | + (ee->ee_ant_control[mode][4] << 18) | + (ee->ee_ant_control[mode][5] << 24); + ah->ah_ant_ctl[mode][AR5K_ANT_SWTABLE_B] = + ee->ee_ant_control[mode][6] | + (ee->ee_ant_control[mode][7] << 6) | + (ee->ee_ant_control[mode][8] << 12) | + (ee->ee_ant_control[mode][9] << 18) | + (ee->ee_ant_control[mode][10] << 24); + + /* return new offset */ + *offset = o; + + return 0; +} + +/* + * Read supported modes and some mode-specific calibration data + * from eeprom + */ +static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset, + unsigned int mode) +{ + struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + u32 o = *offset; + u16 val; + int ret; + + ee->ee_n_piers[mode] = 0; + AR5K_EEPROM_READ(o++, val); + ee->ee_adc_desired_size[mode] = (s8)((val >> 8) & 0xff); + switch(mode) { + case AR5K_EEPROM_MODE_11A: + ee->ee_ob[mode][3] = (val >> 5) & 0x7; + ee->ee_db[mode][3] = (val >> 2) & 0x7; + ee->ee_ob[mode][2] = (val << 1) & 0x7; + + AR5K_EEPROM_READ(o++, val); + ee->ee_ob[mode][2] |= (val >> 15) & 0x1; + ee->ee_db[mode][2] = (val >> 12) & 0x7; + ee->ee_ob[mode][1] = (val >> 9) & 0x7; + ee->ee_db[mode][1] = (val >> 6) & 0x7; + ee->ee_ob[mode][0] = (val >> 3) & 0x7; + ee->ee_db[mode][0] = val & 0x7; + break; + case AR5K_EEPROM_MODE_11G: + case AR5K_EEPROM_MODE_11B: + ee->ee_ob[mode][1] = (val >> 4) & 0x7; + ee->ee_db[mode][1] = val & 0x7; + break; + } + + AR5K_EEPROM_READ(o++, val); + ee->ee_tx_end2xlna_enable[mode] = (val >> 8) & 0xff; + ee->ee_thr_62[mode] = val & 0xff; + + if (ah->ah_ee_version <= AR5K_EEPROM_VERSION_3_2) + ee->ee_thr_62[mode] = mode == AR5K_EEPROM_MODE_11A ? 15 : 28; + + AR5K_EEPROM_READ(o++, val); + ee->ee_tx_end2xpa_disable[mode] = (val >> 8) & 0xff; + ee->ee_tx_frm2xpa_enable[mode] = val & 0xff; + + AR5K_EEPROM_READ(o++, val); + ee->ee_pga_desired_size[mode] = (val >> 8) & 0xff; + + if ((val & 0xff) & 0x80) + ee->ee_noise_floor_thr[mode] = -((((val & 0xff) ^ 0xff)) + 1); + else + ee->ee_noise_floor_thr[mode] = val & 0xff; + + if (ah->ah_ee_version <= AR5K_EEPROM_VERSION_3_2) + ee->ee_noise_floor_thr[mode] = + mode == AR5K_EEPROM_MODE_11A ? -54 : -1; + + AR5K_EEPROM_READ(o++, val); + ee->ee_xlna_gain[mode] = (val >> 5) & 0xff; + ee->ee_x_gain[mode] = (val >> 1) & 0xf; + ee->ee_xpd[mode] = val & 0x1; + + if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) + ee->ee_fixed_bias[mode] = (val >> 13) & 0x1; + + if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_3_3) { + AR5K_EEPROM_READ(o++, val); + ee->ee_false_detect[mode] = (val >> 6) & 0x7f; + + if (mode == AR5K_EEPROM_MODE_11A) + ee->ee_xr_power[mode] = val & 0x3f; + else { + ee->ee_ob[mode][0] = val & 0x7; + ee->ee_db[mode][0] = (val >> 3) & 0x7; + } + } + + if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_4) { + ee->ee_i_gain[mode] = AR5K_EEPROM_I_GAIN; + ee->ee_cck_ofdm_power_delta = AR5K_EEPROM_CCK_OFDM_DELTA; + } else { + ee->ee_i_gain[mode] = (val >> 13) & 0x7; + + AR5K_EEPROM_READ(o++, val); + ee->ee_i_gain[mode] |= (val << 3) & 0x38; + + if (mode == AR5K_EEPROM_MODE_11G) { + ee->ee_cck_ofdm_power_delta = (val >> 3) & 0xff; + if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_6) + ee->ee_scaled_cck_delta = (val >> 11) & 0x1f; + } + } + + if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0 && + mode == AR5K_EEPROM_MODE_11A) { + ee->ee_i_cal[mode] = (val >> 8) & 0x3f; + ee->ee_q_cal[mode] = (val >> 3) & 0x1f; + } + + if (ah->ah_ee_version < AR5K_EEPROM_VERSION_4_0) + goto done; + + /* Note: >= v5 have bg freq piers on another location + * so these freq piers are ignored for >= v5 (should be 0xff + * anyway) */ + switch(mode) { + case AR5K_EEPROM_MODE_11A: + if (ah->ah_ee_version < AR5K_EEPROM_VERSION_4_1) + break; + + AR5K_EEPROM_READ(o++, val); + ee->ee_margin_tx_rx[mode] = val & 0x3f; + break; + case AR5K_EEPROM_MODE_11B: + AR5K_EEPROM_READ(o++, val); + + ee->ee_pwr_cal_b[0].freq = + ath5k_eeprom_bin2freq(ee, val & 0xff, mode); + if (ee->ee_pwr_cal_b[0].freq != AR5K_EEPROM_CHANNEL_DIS) + ee->ee_n_piers[mode]++; + + ee->ee_pwr_cal_b[1].freq = + ath5k_eeprom_bin2freq(ee, (val >> 8) & 0xff, mode); + if (ee->ee_pwr_cal_b[1].freq != AR5K_EEPROM_CHANNEL_DIS) + ee->ee_n_piers[mode]++; + + AR5K_EEPROM_READ(o++, val); + ee->ee_pwr_cal_b[2].freq = + ath5k_eeprom_bin2freq(ee, val & 0xff, mode); + if (ee->ee_pwr_cal_b[2].freq != AR5K_EEPROM_CHANNEL_DIS) + ee->ee_n_piers[mode]++; + + if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1) + ee->ee_margin_tx_rx[mode] = (val >> 8) & 0x3f; + break; + case AR5K_EEPROM_MODE_11G: + AR5K_EEPROM_READ(o++, val); + + ee->ee_pwr_cal_g[0].freq = + ath5k_eeprom_bin2freq(ee, val & 0xff, mode); + if (ee->ee_pwr_cal_g[0].freq != AR5K_EEPROM_CHANNEL_DIS) + ee->ee_n_piers[mode]++; + + ee->ee_pwr_cal_g[1].freq = + ath5k_eeprom_bin2freq(ee, (val >> 8) & 0xff, mode); + if (ee->ee_pwr_cal_g[1].freq != AR5K_EEPROM_CHANNEL_DIS) + ee->ee_n_piers[mode]++; + + AR5K_EEPROM_READ(o++, val); + ee->ee_turbo_max_power[mode] = val & 0x7f; + ee->ee_xr_power[mode] = (val >> 7) & 0x3f; + + AR5K_EEPROM_READ(o++, val); + ee->ee_pwr_cal_g[2].freq = + ath5k_eeprom_bin2freq(ee, val & 0xff, mode); + if (ee->ee_pwr_cal_g[2].freq != AR5K_EEPROM_CHANNEL_DIS) + ee->ee_n_piers[mode]++; + + if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1) + ee->ee_margin_tx_rx[mode] = (val >> 8) & 0x3f; + + AR5K_EEPROM_READ(o++, val); + ee->ee_i_cal[mode] = (val >> 8) & 0x3f; + ee->ee_q_cal[mode] = (val >> 3) & 0x1f; + + if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_2) { + AR5K_EEPROM_READ(o++, val); + ee->ee_cck_ofdm_gain_delta = val & 0xff; + } + break; + } + + /* + * Read turbo mode information on newer EEPROM versions + */ + if (ee->ee_version < AR5K_EEPROM_VERSION_5_0) + goto done; + + switch (mode){ + case AR5K_EEPROM_MODE_11A: + ee->ee_switch_settling_turbo[mode] = (val >> 6) & 0x7f; + + ee->ee_atn_tx_rx_turbo[mode] = (val >> 13) & 0x7; + AR5K_EEPROM_READ(o++, val); + ee->ee_atn_tx_rx_turbo[mode] |= (val & 0x7) << 3; + ee->ee_margin_tx_rx_turbo[mode] = (val >> 3) & 0x3f; + + ee->ee_adc_desired_size_turbo[mode] = (val >> 9) & 0x7f; + AR5K_EEPROM_READ(o++, val); + ee->ee_adc_desired_size_turbo[mode] |= (val & 0x1) << 7; + ee->ee_pga_desired_size_turbo[mode] = (val >> 1) & 0xff; + + if (AR5K_EEPROM_EEMAP(ee->ee_misc0) >=2) + ee->ee_pd_gain_overlap = (val >> 9) & 0xf; + break; + case AR5K_EEPROM_MODE_11G: + ee->ee_switch_settling_turbo[mode] = (val >> 8) & 0x7f; + + ee->ee_atn_tx_rx_turbo[mode] = (val >> 15) & 0x7; + AR5K_EEPROM_READ(o++, val); + ee->ee_atn_tx_rx_turbo[mode] |= (val & 0x1f) << 1; + ee->ee_margin_tx_rx_turbo[mode] = (val >> 5) & 0x3f; + + ee->ee_adc_desired_size_turbo[mode] = (val >> 11) & 0x7f; + AR5K_EEPROM_READ(o++, val); + ee->ee_adc_desired_size_turbo[mode] |= (val & 0x7) << 5; + ee->ee_pga_desired_size_turbo[mode] = (val >> 3) & 0xff; + break; + } + +done: + /* return new offset */ + *offset = o; + + return 0; +} + +/* Read mode-specific data (except power calibration data) */ +static int +ath5k_eeprom_init_modes(struct ath5k_hw *ah) +{ + struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + u32 mode_offset[3]; + unsigned int mode; + u32 offset; + int ret; + + /* + * Get values for all modes + */ + mode_offset[AR5K_EEPROM_MODE_11A] = AR5K_EEPROM_MODES_11A(ah->ah_ee_version); + mode_offset[AR5K_EEPROM_MODE_11B] = AR5K_EEPROM_MODES_11B(ah->ah_ee_version); + mode_offset[AR5K_EEPROM_MODE_11G] = AR5K_EEPROM_MODES_11G(ah->ah_ee_version); + + ee->ee_turbo_max_power[AR5K_EEPROM_MODE_11A] = + AR5K_EEPROM_HDR_T_5GHZ_DBM(ee->ee_header); + + for (mode = AR5K_EEPROM_MODE_11A; mode <= AR5K_EEPROM_MODE_11G; mode++) { + offset = mode_offset[mode]; + + ret = ath5k_eeprom_read_ants(ah, &offset, mode); + if (ret) + return ret; + + ret = ath5k_eeprom_read_modes(ah, &offset, mode); + if (ret) + return ret; + } + + /* override for older eeprom versions for better performance */ + if (ah->ah_ee_version <= AR5K_EEPROM_VERSION_3_2) { + ee->ee_thr_62[AR5K_EEPROM_MODE_11A] = 15; + ee->ee_thr_62[AR5K_EEPROM_MODE_11B] = 28; + ee->ee_thr_62[AR5K_EEPROM_MODE_11G] = 28; + } + + return 0; +} + +/* Read the frequency piers for each mode (mostly used on newer eeproms with 0xff + * frequency mask) */ +static inline int +ath5k_eeprom_read_freq_list(struct ath5k_hw *ah, int *offset, int max, + struct ath5k_chan_pcal_info *pc, unsigned int mode) +{ + struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + int o = *offset; + int i = 0; + u8 freq1, freq2; + int ret; + u16 val; + + ee->ee_n_piers[mode] = 0; + while(i < max) { + AR5K_EEPROM_READ(o++, val); + + freq1 = val & 0xff; + if (!freq1) + break; + + pc[i++].freq = ath5k_eeprom_bin2freq(ee, + freq1, mode); + ee->ee_n_piers[mode]++; + + freq2 = (val >> 8) & 0xff; + if (!freq2) + break; + + pc[i++].freq = ath5k_eeprom_bin2freq(ee, + freq2, mode); + ee->ee_n_piers[mode]++; + } + + /* return new offset */ + *offset = o; + + return 0; +} + +/* Read frequency piers for 802.11a */ +static int +ath5k_eeprom_init_11a_pcal_freq(struct ath5k_hw *ah, int offset) +{ + struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + struct ath5k_chan_pcal_info *pcal = ee->ee_pwr_cal_a; + int i, ret; + u16 val; + u8 mask; + + if (ee->ee_version >= AR5K_EEPROM_VERSION_3_3) { + ath5k_eeprom_read_freq_list(ah, &offset, + AR5K_EEPROM_N_5GHZ_CHAN, pcal, + AR5K_EEPROM_MODE_11A); + } else { + mask = AR5K_EEPROM_FREQ_M(ah->ah_ee_version); + + AR5K_EEPROM_READ(offset++, val); + pcal[0].freq = (val >> 9) & mask; + pcal[1].freq = (val >> 2) & mask; + pcal[2].freq = (val << 5) & mask; + + AR5K_EEPROM_READ(offset++, val); + pcal[2].freq |= (val >> 11) & 0x1f; + pcal[3].freq = (val >> 4) & mask; + pcal[4].freq = (val << 3) & mask; + + AR5K_EEPROM_READ(offset++, val); + pcal[4].freq |= (val >> 13) & 0x7; + pcal[5].freq = (val >> 6) & mask; + pcal[6].freq = (val << 1) & mask; + + AR5K_EEPROM_READ(offset++, val); + pcal[6].freq |= (val >> 15) & 0x1; + pcal[7].freq = (val >> 8) & mask; + pcal[8].freq = (val >> 1) & mask; + pcal[9].freq = (val << 6) & mask; + + AR5K_EEPROM_READ(offset++, val); + pcal[9].freq |= (val >> 10) & 0x3f; + + /* Fixed number of piers */ + ee->ee_n_piers[AR5K_EEPROM_MODE_11A] = 10; + + for (i = 0; i < AR5K_EEPROM_N_5GHZ_CHAN; i++) { + pcal[i].freq = ath5k_eeprom_bin2freq(ee, + pcal[i].freq, AR5K_EEPROM_MODE_11A); + } + } + + return 0; +} + +/* Read frequency piers for 802.11bg on eeprom versions >= 5 and eemap >= 2 */ +static inline int +ath5k_eeprom_init_11bg_2413(struct ath5k_hw *ah, unsigned int mode, int offset) +{ + struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + struct ath5k_chan_pcal_info *pcal; + + switch(mode) { + case AR5K_EEPROM_MODE_11B: + pcal = ee->ee_pwr_cal_b; + break; + case AR5K_EEPROM_MODE_11G: + pcal = ee->ee_pwr_cal_g; + break; + default: + return -EINVAL; + } + + ath5k_eeprom_read_freq_list(ah, &offset, + AR5K_EEPROM_N_2GHZ_CHAN_2413, pcal, + mode); + + return 0; +} + +/* + * Read power calibration for RF5111 chips + * + * For RF5111 we have an XPD -eXternal Power Detector- curve + * for each calibrated channel. Each curve has 0,5dB Power steps + * on x axis and PCDAC steps (offsets) on y axis and looks like an + * exponential function. To recreate the curve we read 11 points + * here and interpolate later. + */ + +/* Used to match PCDAC steps with power values on RF5111 chips + * (eeprom versions < 4). For RF5111 we have 11 pre-defined PCDAC + * steps that match with the power values we read from eeprom. On + * older eeprom versions (< 3.2) these steps are equaly spaced at + * 10% of the pcdac curve -until the curve reaches it's maximum- + * (11 steps from 0 to 100%) but on newer eeprom versions (>= 3.2) + * these 11 steps are spaced in a different way. This function returns + * the pcdac steps based on eeprom version and curve min/max so that we + * can have pcdac/pwr points. + */ +static inline void +ath5k_get_pcdac_intercepts(struct ath5k_hw *ah, u8 min, u8 max, u8 *vp) +{ + static const u16 intercepts3[] = + { 0, 5, 10, 20, 30, 50, 70, 85, 90, 95, 100 }; + static const u16 intercepts3_2[] = + { 0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100 }; + const u16 *ip; + int i; + + if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_3_2) + ip = intercepts3_2; + else + ip = intercepts3; + + for (i = 0; i < ARRAY_SIZE(intercepts3); i++) + vp[i] = (ip[i] * max + (100 - ip[i]) * min) / 100; +} + +/* Convert RF5111 specific data to generic raw data + * used by interpolation code */ +static int +ath5k_eeprom_convert_pcal_info_5111(struct ath5k_hw *ah, int mode, + struct ath5k_chan_pcal_info *chinfo) +{ + struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + struct ath5k_chan_pcal_info_rf5111 *pcinfo; + struct ath5k_pdgain_info *pd; + u8 pier, point, idx; + u8 *pdgain_idx = ee->ee_pdc_to_idx[mode]; + + /* Fill raw data for each calibration pier */ + for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) { + + pcinfo = &chinfo[pier].rf5111_info; + + /* Allocate pd_curves for this cal pier */ + chinfo[pier].pd_curves = + kcalloc(AR5K_EEPROM_N_PD_CURVES, + sizeof(struct ath5k_pdgain_info), + GFP_KERNEL); + + if (!chinfo[pier].pd_curves) + return -ENOMEM; + + /* Only one curve for RF5111 + * find out which one and place + * in in pd_curves. + * Note: ee_x_gain is reversed here */ + for (idx = 0; idx < AR5K_EEPROM_N_PD_CURVES; idx++) { + + if (!((ee->ee_x_gain[mode] >> idx) & 0x1)) { + pdgain_idx[0] = idx; + break; + } + } + + ee->ee_pd_gains[mode] = 1; + + pd = &chinfo[pier].pd_curves[idx]; + + pd->pd_points = AR5K_EEPROM_N_PWR_POINTS_5111; + + /* Allocate pd points for this curve */ + pd->pd_step = kcalloc(AR5K_EEPROM_N_PWR_POINTS_5111, + sizeof(u8), GFP_KERNEL); + if (!pd->pd_step) + return -ENOMEM; + + pd->pd_pwr = kcalloc(AR5K_EEPROM_N_PWR_POINTS_5111, + sizeof(s16), GFP_KERNEL); + if (!pd->pd_pwr) + return -ENOMEM; + + /* Fill raw dataset + * (convert power to 0.25dB units + * for RF5112 combatibility) */ + for (point = 0; point < pd->pd_points; point++) { + + /* Absolute values */ + pd->pd_pwr[point] = 2 * pcinfo->pwr[point]; + + /* Already sorted */ + pd->pd_step[point] = pcinfo->pcdac[point]; + } + + /* Set min/max pwr */ + chinfo[pier].min_pwr = pd->pd_pwr[0]; + chinfo[pier].max_pwr = pd->pd_pwr[10]; + + } + + return 0; +} + +/* Parse EEPROM data */ +static int +ath5k_eeprom_read_pcal_info_5111(struct ath5k_hw *ah, int mode) +{ + struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + struct ath5k_chan_pcal_info *pcal; + int offset, ret; + int i; + u16 val; + + offset = AR5K_EEPROM_GROUPS_START(ee->ee_version); + switch(mode) { + case AR5K_EEPROM_MODE_11A: + if (!AR5K_EEPROM_HDR_11A(ee->ee_header)) + return 0; + + ret = ath5k_eeprom_init_11a_pcal_freq(ah, + offset + AR5K_EEPROM_GROUP1_OFFSET); + if (ret < 0) + return ret; + + offset += AR5K_EEPROM_GROUP2_OFFSET; + pcal = ee->ee_pwr_cal_a; + break; + case AR5K_EEPROM_MODE_11B: + if (!AR5K_EEPROM_HDR_11B(ee->ee_header) && + !AR5K_EEPROM_HDR_11G(ee->ee_header)) + return 0; + + pcal = ee->ee_pwr_cal_b; + offset += AR5K_EEPROM_GROUP3_OFFSET; + + /* fixed piers */ + pcal[0].freq = 2412; + pcal[1].freq = 2447; + pcal[2].freq = 2484; + ee->ee_n_piers[mode] = 3; + break; + case AR5K_EEPROM_MODE_11G: + if (!AR5K_EEPROM_HDR_11G(ee->ee_header)) + return 0; + + pcal = ee->ee_pwr_cal_g; + offset += AR5K_EEPROM_GROUP4_OFFSET; + + /* fixed piers */ + pcal[0].freq = 2312; + pcal[1].freq = 2412; + pcal[2].freq = 2484; + ee->ee_n_piers[mode] = 3; + break; + default: + return -EINVAL; + } + + for (i = 0; i < ee->ee_n_piers[mode]; i++) { + struct ath5k_chan_pcal_info_rf5111 *cdata = + &pcal[i].rf5111_info; + + AR5K_EEPROM_READ(offset++, val); + cdata->pcdac_max = ((val >> 10) & AR5K_EEPROM_PCDAC_M); + cdata->pcdac_min = ((val >> 4) & AR5K_EEPROM_PCDAC_M); + cdata->pwr[0] = ((val << 2) & AR5K_EEPROM_POWER_M); + + AR5K_EEPROM_READ(offset++, val); + cdata->pwr[0] |= ((val >> 14) & 0x3); + cdata->pwr[1] = ((val >> 8) & AR5K_EEPROM_POWER_M); + cdata->pwr[2] = ((val >> 2) & AR5K_EEPROM_POWER_M); + cdata->pwr[3] = ((val << 4) & AR5K_EEPROM_POWER_M); + + AR5K_EEPROM_READ(offset++, val); + cdata->pwr[3] |= ((val >> 12) & 0xf); + cdata->pwr[4] = ((val >> 6) & AR5K_EEPROM_POWER_M); + cdata->pwr[5] = (val & AR5K_EEPROM_POWER_M); + + AR5K_EEPROM_READ(offset++, val); + cdata->pwr[6] = ((val >> 10) & AR5K_EEPROM_POWER_M); + cdata->pwr[7] = ((val >> 4) & AR5K_EEPROM_POWER_M); + cdata->pwr[8] = ((val << 2) & AR5K_EEPROM_POWER_M); + + AR5K_EEPROM_READ(offset++, val); + cdata->pwr[8] |= ((val >> 14) & 0x3); + cdata->pwr[9] = ((val >> 8) & AR5K_EEPROM_POWER_M); + cdata->pwr[10] = ((val >> 2) & AR5K_EEPROM_POWER_M); + + ath5k_get_pcdac_intercepts(ah, cdata->pcdac_min, + cdata->pcdac_max, cdata->pcdac); + } + + return ath5k_eeprom_convert_pcal_info_5111(ah, mode, pcal); +} + + +/* + * Read power calibration for RF5112 chips + * + * For RF5112 we have 4 XPD -eXternal Power Detector- curves + * for each calibrated channel on 0, -6, -12 and -18dbm but we only + * use the higher (3) and the lower (0) curves. Each curve has 0.5dB + * power steps on x axis and PCDAC steps on y axis and looks like a + * linear function. To recreate the curve and pass the power values + * on hw, we read 4 points for xpd 0 (lower gain -> max power) + * and 3 points for xpd 3 (higher gain -> lower power) here and + * interpolate later. + * + * Note: Many vendors just use xpd 0 so xpd 3 is zeroed. + */ + +/* Convert RF5112 specific data to generic raw data + * used by interpolation code */ +static int +ath5k_eeprom_convert_pcal_info_5112(struct ath5k_hw *ah, int mode, + struct ath5k_chan_pcal_info *chinfo) +{ + struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + struct ath5k_chan_pcal_info_rf5112 *pcinfo; + u8 *pdgain_idx = ee->ee_pdc_to_idx[mode]; + unsigned int pier, pdg, point; + + /* Fill raw data for each calibration pier */ + for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) { + + pcinfo = &chinfo[pier].rf5112_info; + + /* Allocate pd_curves for this cal pier */ + chinfo[pier].pd_curves = + kcalloc(AR5K_EEPROM_N_PD_CURVES, + sizeof(struct ath5k_pdgain_info), + GFP_KERNEL); + + if (!chinfo[pier].pd_curves) + return -ENOMEM; + + /* Fill pd_curves */ + for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) { + + u8 idx = pdgain_idx[pdg]; + struct ath5k_pdgain_info *pd = + &chinfo[pier].pd_curves[idx]; + + /* Lowest gain curve (max power) */ + if (pdg == 0) { + /* One more point for better accuracy */ + pd->pd_points = AR5K_EEPROM_N_XPD0_POINTS; + + /* Allocate pd points for this curve */ + pd->pd_step = kcalloc(pd->pd_points, + sizeof(u8), GFP_KERNEL); + + if (!pd->pd_step) + return -ENOMEM; + + pd->pd_pwr = kcalloc(pd->pd_points, + sizeof(s16), GFP_KERNEL); + + if (!pd->pd_pwr) + return -ENOMEM; + + + /* Fill raw dataset + * (all power levels are in 0.25dB units) */ + pd->pd_step[0] = pcinfo->pcdac_x0[0]; + pd->pd_pwr[0] = pcinfo->pwr_x0[0]; + + for (point = 1; point < pd->pd_points; + point++) { + /* Absolute values */ + pd->pd_pwr[point] = + pcinfo->pwr_x0[point]; + + /* Deltas */ + pd->pd_step[point] = + pd->pd_step[point - 1] + + pcinfo->pcdac_x0[point]; + } + + /* Set min power for this frequency */ + chinfo[pier].min_pwr = pd->pd_pwr[0]; + + /* Highest gain curve (min power) */ + } else if (pdg == 1) { + + pd->pd_points = AR5K_EEPROM_N_XPD3_POINTS; + + /* Allocate pd points for this curve */ + pd->pd_step = kcalloc(pd->pd_points, + sizeof(u8), GFP_KERNEL); + + if (!pd->pd_step) + return -ENOMEM; + + pd->pd_pwr = kcalloc(pd->pd_points, + sizeof(s16), GFP_KERNEL); + + if (!pd->pd_pwr) + return -ENOMEM; + + /* Fill raw dataset + * (all power levels are in 0.25dB units) */ + for (point = 0; point < pd->pd_points; + point++) { + /* Absolute values */ + pd->pd_pwr[point] = + pcinfo->pwr_x3[point]; + + /* Fixed points */ + pd->pd_step[point] = + pcinfo->pcdac_x3[point]; + } + + /* Since we have a higher gain curve + * override min power */ + chinfo[pier].min_pwr = pd->pd_pwr[0]; + } + } + } + + return 0; +} + +/* Parse EEPROM data */ +static int +ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode) +{ + struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + struct ath5k_chan_pcal_info_rf5112 *chan_pcal_info; + struct ath5k_chan_pcal_info *gen_chan_info; + u8 *pdgain_idx = ee->ee_pdc_to_idx[mode]; + u32 offset; + u8 i, c; + u16 val; + int ret; + u8 pd_gains = 0; + + /* Count how many curves we have and + * identify them (which one of the 4 + * available curves we have on each count). + * Curves are stored from lower (x0) to + * higher (x3) gain */ + for (i = 0; i < AR5K_EEPROM_N_PD_CURVES; i++) { + /* ee_x_gain[mode] is x gain mask */ + if ((ee->ee_x_gain[mode] >> i) & 0x1) + pdgain_idx[pd_gains++] = i; + } + ee->ee_pd_gains[mode] = pd_gains; + + if (pd_gains == 0 || pd_gains > 2) + return -EINVAL; + + switch (mode) { + case AR5K_EEPROM_MODE_11A: + /* + * Read 5GHz EEPROM channels + */ + offset = AR5K_EEPROM_GROUPS_START(ee->ee_version); + ath5k_eeprom_init_11a_pcal_freq(ah, offset); + + offset += AR5K_EEPROM_GROUP2_OFFSET; + gen_chan_info = ee->ee_pwr_cal_a; + break; + case AR5K_EEPROM_MODE_11B: + offset = AR5K_EEPROM_GROUPS_START(ee->ee_version); + if (AR5K_EEPROM_HDR_11A(ee->ee_header)) + offset += AR5K_EEPROM_GROUP3_OFFSET; + + /* NB: frequency piers parsed during mode init */ + gen_chan_info = ee->ee_pwr_cal_b; + break; + case AR5K_EEPROM_MODE_11G: + offset = AR5K_EEPROM_GROUPS_START(ee->ee_version); + if (AR5K_EEPROM_HDR_11A(ee->ee_header)) + offset += AR5K_EEPROM_GROUP4_OFFSET; + else if (AR5K_EEPROM_HDR_11B(ee->ee_header)) + offset += AR5K_EEPROM_GROUP2_OFFSET; + + /* NB: frequency piers parsed during mode init */ + gen_chan_info = ee->ee_pwr_cal_g; + break; + default: + return -EINVAL; + } + + for (i = 0; i < ee->ee_n_piers[mode]; i++) { + chan_pcal_info = &gen_chan_info[i].rf5112_info; + + /* Power values in quarter dB + * for the lower xpd gain curve + * (0 dBm -> higher output power) */ + for (c = 0; c < AR5K_EEPROM_N_XPD0_POINTS; c++) { + AR5K_EEPROM_READ(offset++, val); + chan_pcal_info->pwr_x0[c] = (s8) (val & 0xff); + chan_pcal_info->pwr_x0[++c] = (s8) ((val >> 8) & 0xff); + } + + /* PCDAC steps + * corresponding to the above power + * measurements */ + AR5K_EEPROM_READ(offset++, val); + chan_pcal_info->pcdac_x0[1] = (val & 0x1f); + chan_pcal_info->pcdac_x0[2] = ((val >> 5) & 0x1f); + chan_pcal_info->pcdac_x0[3] = ((val >> 10) & 0x1f); + + /* Power values in quarter dB + * for the higher xpd gain curve + * (18 dBm -> lower output power) */ + AR5K_EEPROM_READ(offset++, val); + chan_pcal_info->pwr_x3[0] = (s8) (val & 0xff); + chan_pcal_info->pwr_x3[1] = (s8) ((val >> 8) & 0xff); + + AR5K_EEPROM_READ(offset++, val); + chan_pcal_info->pwr_x3[2] = (val & 0xff); + + /* PCDAC steps + * corresponding to the above power + * measurements (fixed) */ + chan_pcal_info->pcdac_x3[0] = 20; + chan_pcal_info->pcdac_x3[1] = 35; + chan_pcal_info->pcdac_x3[2] = 63; + + if (ee->ee_version >= AR5K_EEPROM_VERSION_4_3) { + chan_pcal_info->pcdac_x0[0] = ((val >> 8) & 0x3f); + + /* Last xpd0 power level is also channel maximum */ + gen_chan_info[i].max_pwr = chan_pcal_info->pwr_x0[3]; + } else { + chan_pcal_info->pcdac_x0[0] = 1; + gen_chan_info[i].max_pwr = (s8) ((val >> 8) & 0xff); + } + + } + + return ath5k_eeprom_convert_pcal_info_5112(ah, mode, gen_chan_info); +} + + +/* + * Read power calibration for RF2413 chips + * + * For RF2413 we have a Power to PDDAC table (Power Detector) + * instead of a PCDAC and 4 pd gain curves for each calibrated channel. + * Each curve has power on x axis in 0.5 db steps and PDDADC steps on y + * axis and looks like an exponential function like the RF5111 curve. + * + * To recreate the curves we read here the points and interpolate + * later. Note that in most cases only 2 (higher and lower) curves are + * used (like RF5112) but vendors have the oportunity to include all + * 4 curves on eeprom. The final curve (higher power) has an extra + * point for better accuracy like RF5112. + */ + +/* For RF2413 power calibration data doesn't start on a fixed location and + * if a mode is not supported, it's section is missing -not zeroed-. + * So we need to calculate the starting offset for each section by using + * these two functions */ + +/* Return the size of each section based on the mode and the number of pd + * gains available (maximum 4). */ +static inline unsigned int +ath5k_pdgains_size_2413(struct ath5k_eeprom_info *ee, unsigned int mode) +{ + static const unsigned int pdgains_size[] = { 4, 6, 9, 12 }; + unsigned int sz; + + sz = pdgains_size[ee->ee_pd_gains[mode] - 1]; + sz *= ee->ee_n_piers[mode]; + + return sz; +} + +/* Return the starting offset for a section based on the modes supported + * and each section's size. */ +static unsigned int +ath5k_cal_data_offset_2413(struct ath5k_eeprom_info *ee, int mode) +{ + u32 offset = AR5K_EEPROM_CAL_DATA_START(ee->ee_misc4); + + switch(mode) { + case AR5K_EEPROM_MODE_11G: + if (AR5K_EEPROM_HDR_11B(ee->ee_header)) + offset += ath5k_pdgains_size_2413(ee, + AR5K_EEPROM_MODE_11B) + + AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2; + /* fall through */ + case AR5K_EEPROM_MODE_11B: + if (AR5K_EEPROM_HDR_11A(ee->ee_header)) + offset += ath5k_pdgains_size_2413(ee, + AR5K_EEPROM_MODE_11A) + + AR5K_EEPROM_N_5GHZ_CHAN / 2; + /* fall through */ + case AR5K_EEPROM_MODE_11A: + break; + default: + break; + } + + return offset; +} + +/* Convert RF2413 specific data to generic raw data + * used by interpolation code */ +static int +ath5k_eeprom_convert_pcal_info_2413(struct ath5k_hw *ah, int mode, + struct ath5k_chan_pcal_info *chinfo) +{ + struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + struct ath5k_chan_pcal_info_rf2413 *pcinfo; + u8 *pdgain_idx = ee->ee_pdc_to_idx[mode]; + unsigned int pier, pdg, point; + + /* Fill raw data for each calibration pier */ + for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) { + + pcinfo = &chinfo[pier].rf2413_info; + + /* Allocate pd_curves for this cal pier */ + chinfo[pier].pd_curves = + kcalloc(AR5K_EEPROM_N_PD_CURVES, + sizeof(struct ath5k_pdgain_info), + GFP_KERNEL); + + if (!chinfo[pier].pd_curves) + return -ENOMEM; + + /* Fill pd_curves */ + for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) { + + u8 idx = pdgain_idx[pdg]; + struct ath5k_pdgain_info *pd = + &chinfo[pier].pd_curves[idx]; + + /* One more point for the highest power + * curve (lowest gain) */ + if (pdg == ee->ee_pd_gains[mode] - 1) + pd->pd_points = AR5K_EEPROM_N_PD_POINTS; + else + pd->pd_points = AR5K_EEPROM_N_PD_POINTS - 1; + + /* Allocate pd points for this curve */ + pd->pd_step = kcalloc(pd->pd_points, + sizeof(u8), GFP_KERNEL); + + if (!pd->pd_step) + return -ENOMEM; + + pd->pd_pwr = kcalloc(pd->pd_points, + sizeof(s16), GFP_KERNEL); + + if (!pd->pd_pwr) + return -ENOMEM; + + /* Fill raw dataset + * convert all pwr levels to + * quarter dB for RF5112 combatibility */ + pd->pd_step[0] = pcinfo->pddac_i[pdg]; + pd->pd_pwr[0] = 4 * pcinfo->pwr_i[pdg]; + + for (point = 1; point < pd->pd_points; point++) { + + pd->pd_pwr[point] = pd->pd_pwr[point - 1] + + 2 * pcinfo->pwr[pdg][point - 1]; + + pd->pd_step[point] = pd->pd_step[point - 1] + + pcinfo->pddac[pdg][point - 1]; + + } + + /* Highest gain curve -> min power */ + if (pdg == 0) + chinfo[pier].min_pwr = pd->pd_pwr[0]; + + /* Lowest gain curve -> max power */ + if (pdg == ee->ee_pd_gains[mode] - 1) + chinfo[pier].max_pwr = + pd->pd_pwr[pd->pd_points - 1]; + } + } + + return 0; +} + +/* Parse EEPROM data */ +static int +ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode) +{ + struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + struct ath5k_chan_pcal_info_rf2413 *pcinfo; + struct ath5k_chan_pcal_info *chinfo; + u8 *pdgain_idx = ee->ee_pdc_to_idx[mode]; + u32 offset; + int idx, i, ret; + u16 val; + u8 pd_gains = 0; + + /* Count how many curves we have and + * identify them (which one of the 4 + * available curves we have on each count). + * Curves are stored from higher to + * lower gain so we go backwards */ + for (idx = AR5K_EEPROM_N_PD_CURVES - 1; idx >= 0; idx--) { + /* ee_x_gain[mode] is x gain mask */ + if ((ee->ee_x_gain[mode] >> idx) & 0x1) + pdgain_idx[pd_gains++] = idx; + + } + ee->ee_pd_gains[mode] = pd_gains; + + if (pd_gains == 0) + return -EINVAL; + + offset = ath5k_cal_data_offset_2413(ee, mode); + switch (mode) { + case AR5K_EEPROM_MODE_11A: + if (!AR5K_EEPROM_HDR_11A(ee->ee_header)) + return 0; + + ath5k_eeprom_init_11a_pcal_freq(ah, offset); + offset += AR5K_EEPROM_N_5GHZ_CHAN / 2; + chinfo = ee->ee_pwr_cal_a; + break; + case AR5K_EEPROM_MODE_11B: + if (!AR5K_EEPROM_HDR_11B(ee->ee_header)) + return 0; + + ath5k_eeprom_init_11bg_2413(ah, mode, offset); + offset += AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2; + chinfo = ee->ee_pwr_cal_b; + break; + case AR5K_EEPROM_MODE_11G: + if (!AR5K_EEPROM_HDR_11G(ee->ee_header)) + return 0; + + ath5k_eeprom_init_11bg_2413(ah, mode, offset); + offset += AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2; + chinfo = ee->ee_pwr_cal_g; + break; + default: + return -EINVAL; + } + + for (i = 0; i < ee->ee_n_piers[mode]; i++) { + pcinfo = &chinfo[i].rf2413_info; + + /* + * Read pwr_i, pddac_i and the first + * 2 pd points (pwr, pddac) + */ + AR5K_EEPROM_READ(offset++, val); + pcinfo->pwr_i[0] = val & 0x1f; + pcinfo->pddac_i[0] = (val >> 5) & 0x7f; + pcinfo->pwr[0][0] = (val >> 12) & 0xf; + + AR5K_EEPROM_READ(offset++, val); + pcinfo->pddac[0][0] = val & 0x3f; + pcinfo->pwr[0][1] = (val >> 6) & 0xf; + pcinfo->pddac[0][1] = (val >> 10) & 0x3f; + + AR5K_EEPROM_READ(offset++, val); + pcinfo->pwr[0][2] = val & 0xf; + pcinfo->pddac[0][2] = (val >> 4) & 0x3f; + + pcinfo->pwr[0][3] = 0; + pcinfo->pddac[0][3] = 0; + + if (pd_gains > 1) { + /* + * Pd gain 0 is not the last pd gain + * so it only has 2 pd points. + * Continue wih pd gain 1. + */ + pcinfo->pwr_i[1] = (val >> 10) & 0x1f; + + pcinfo->pddac_i[1] = (val >> 15) & 0x1; + AR5K_EEPROM_READ(offset++, val); + pcinfo->pddac_i[1] |= (val & 0x3F) << 1; + + pcinfo->pwr[1][0] = (val >> 6) & 0xf; + pcinfo->pddac[1][0] = (val >> 10) & 0x3f; + + AR5K_EEPROM_READ(offset++, val); + pcinfo->pwr[1][1] = val & 0xf; + pcinfo->pddac[1][1] = (val >> 4) & 0x3f; + pcinfo->pwr[1][2] = (val >> 10) & 0xf; + + pcinfo->pddac[1][2] = (val >> 14) & 0x3; + AR5K_EEPROM_READ(offset++, val); + pcinfo->pddac[1][2] |= (val & 0xF) << 2; + + pcinfo->pwr[1][3] = 0; + pcinfo->pddac[1][3] = 0; + } else if (pd_gains == 1) { + /* + * Pd gain 0 is the last one so + * read the extra point. + */ + pcinfo->pwr[0][3] = (val >> 10) & 0xf; + + pcinfo->pddac[0][3] = (val >> 14) & 0x3; + AR5K_EEPROM_READ(offset++, val); + pcinfo->pddac[0][3] |= (val & 0xF) << 2; + } + + /* + * Proceed with the other pd_gains + * as above. + */ + if (pd_gains > 2) { + pcinfo->pwr_i[2] = (val >> 4) & 0x1f; + pcinfo->pddac_i[2] = (val >> 9) & 0x7f; + + AR5K_EEPROM_READ(offset++, val); + pcinfo->pwr[2][0] = (val >> 0) & 0xf; + pcinfo->pddac[2][0] = (val >> 4) & 0x3f; + pcinfo->pwr[2][1] = (val >> 10) & 0xf; + + pcinfo->pddac[2][1] = (val >> 14) & 0x3; + AR5K_EEPROM_READ(offset++, val); + pcinfo->pddac[2][1] |= (val & 0xF) << 2; + + pcinfo->pwr[2][2] = (val >> 4) & 0xf; + pcinfo->pddac[2][2] = (val >> 8) & 0x3f; + + pcinfo->pwr[2][3] = 0; + pcinfo->pddac[2][3] = 0; + } else if (pd_gains == 2) { + pcinfo->pwr[1][3] = (val >> 4) & 0xf; + pcinfo->pddac[1][3] = (val >> 8) & 0x3f; + } + + if (pd_gains > 3) { + pcinfo->pwr_i[3] = (val >> 14) & 0x3; + AR5K_EEPROM_READ(offset++, val); + pcinfo->pwr_i[3] |= ((val >> 0) & 0x7) << 2; + + pcinfo->pddac_i[3] = (val >> 3) & 0x7f; + pcinfo->pwr[3][0] = (val >> 10) & 0xf; + pcinfo->pddac[3][0] = (val >> 14) & 0x3; + + AR5K_EEPROM_READ(offset++, val); + pcinfo->pddac[3][0] |= (val & 0xF) << 2; + pcinfo->pwr[3][1] = (val >> 4) & 0xf; + pcinfo->pddac[3][1] = (val >> 8) & 0x3f; + + pcinfo->pwr[3][2] = (val >> 14) & 0x3; + AR5K_EEPROM_READ(offset++, val); + pcinfo->pwr[3][2] |= ((val >> 0) & 0x3) << 2; + + pcinfo->pddac[3][2] = (val >> 2) & 0x3f; + pcinfo->pwr[3][3] = (val >> 8) & 0xf; + + pcinfo->pddac[3][3] = (val >> 12) & 0xF; + AR5K_EEPROM_READ(offset++, val); + pcinfo->pddac[3][3] |= ((val >> 0) & 0x3) << 4; + } else if (pd_gains == 3) { + pcinfo->pwr[2][3] = (val >> 14) & 0x3; + AR5K_EEPROM_READ(offset++, val); + pcinfo->pwr[2][3] |= ((val >> 0) & 0x3) << 2; + + pcinfo->pddac[2][3] = (val >> 2) & 0x3f; + } + } + + return ath5k_eeprom_convert_pcal_info_2413(ah, mode, chinfo); +} + + +/* + * Read per rate target power (this is the maximum tx power + * supported by the card). This info is used when setting + * tx power, no matter the channel. + * + * This also works for v5 EEPROMs. + */ +static int +ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode) +{ + struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + struct ath5k_rate_pcal_info *rate_pcal_info; + u8 *rate_target_pwr_num; + u32 offset; + u16 val; + int ret, i; + + offset = AR5K_EEPROM_TARGET_PWRSTART(ee->ee_misc1); + rate_target_pwr_num = &ee->ee_rate_target_pwr_num[mode]; + switch (mode) { + case AR5K_EEPROM_MODE_11A: + offset += AR5K_EEPROM_TARGET_PWR_OFF_11A(ee->ee_version); + rate_pcal_info = ee->ee_rate_tpwr_a; + ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_CHAN; + break; + case AR5K_EEPROM_MODE_11B: + offset += AR5K_EEPROM_TARGET_PWR_OFF_11B(ee->ee_version); + rate_pcal_info = ee->ee_rate_tpwr_b; + ee->ee_rate_target_pwr_num[mode] = 2; /* 3rd is g mode's 1st */ + break; + case AR5K_EEPROM_MODE_11G: + offset += AR5K_EEPROM_TARGET_PWR_OFF_11G(ee->ee_version); + rate_pcal_info = ee->ee_rate_tpwr_g; + ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_2GHZ_CHAN; + break; + default: + return -EINVAL; + } + + /* Different freq mask for older eeproms (<= v3.2) */ + if (ee->ee_version <= AR5K_EEPROM_VERSION_3_2) { + for (i = 0; i < (*rate_target_pwr_num); i++) { + AR5K_EEPROM_READ(offset++, val); + rate_pcal_info[i].freq = + ath5k_eeprom_bin2freq(ee, (val >> 9) & 0x7f, mode); + + rate_pcal_info[i].target_power_6to24 = ((val >> 3) & 0x3f); + rate_pcal_info[i].target_power_36 = (val << 3) & 0x3f; + + AR5K_EEPROM_READ(offset++, val); + + if (rate_pcal_info[i].freq == AR5K_EEPROM_CHANNEL_DIS || + val == 0) { + (*rate_target_pwr_num) = i; + break; + } + + rate_pcal_info[i].target_power_36 |= ((val >> 13) & 0x7); + rate_pcal_info[i].target_power_48 = ((val >> 7) & 0x3f); + rate_pcal_info[i].target_power_54 = ((val >> 1) & 0x3f); + } + } else { + for (i = 0; i < (*rate_target_pwr_num); i++) { + AR5K_EEPROM_READ(offset++, val); + rate_pcal_info[i].freq = + ath5k_eeprom_bin2freq(ee, (val >> 8) & 0xff, mode); + + rate_pcal_info[i].target_power_6to24 = ((val >> 2) & 0x3f); + rate_pcal_info[i].target_power_36 = (val << 4) & 0x3f; + + AR5K_EEPROM_READ(offset++, val); + + if (rate_pcal_info[i].freq == AR5K_EEPROM_CHANNEL_DIS || + val == 0) { + (*rate_target_pwr_num) = i; + break; + } + + rate_pcal_info[i].target_power_36 |= (val >> 12) & 0xf; + rate_pcal_info[i].target_power_48 = ((val >> 6) & 0x3f); + rate_pcal_info[i].target_power_54 = (val & 0x3f); + } + } + + return 0; +} + +/* + * Read per channel calibration info from EEPROM + * + * This info is used to calibrate the baseband power table. Imagine + * that for each channel there is a power curve that's hw specific + * (depends on amplifier etc) and we try to "correct" this curve using + * offsets we pass on to phy chip (baseband -> before amplifier) so that + * it can use accurate power values when setting tx power (takes amplifier's + * performance on each channel into account). + * + * EEPROM provides us with the offsets for some pre-calibrated channels + * and we have to interpolate to create the full table for these channels and + * also the table for any channel. + */ +static int +ath5k_eeprom_read_pcal_info(struct ath5k_hw *ah) +{ + struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + int (*read_pcal)(struct ath5k_hw *hw, int mode); + int mode; + int err; + + if ((ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) && + (AR5K_EEPROM_EEMAP(ee->ee_misc0) == 1)) + read_pcal = ath5k_eeprom_read_pcal_info_5112; + else if ((ah->ah_ee_version >= AR5K_EEPROM_VERSION_5_0) && + (AR5K_EEPROM_EEMAP(ee->ee_misc0) == 2)) + read_pcal = ath5k_eeprom_read_pcal_info_2413; + else + read_pcal = ath5k_eeprom_read_pcal_info_5111; + + + for (mode = AR5K_EEPROM_MODE_11A; mode <= AR5K_EEPROM_MODE_11G; + mode++) { + err = read_pcal(ah, mode); + if (err) + return err; + + err = ath5k_eeprom_read_target_rate_pwr_info(ah, mode); + if (err < 0) + return err; + } + + return 0; +} + +static int +ath5k_eeprom_free_pcal_info(struct ath5k_hw *ah, int mode) +{ + struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + struct ath5k_chan_pcal_info *chinfo; + u8 pier, pdg; + + switch (mode) { + case AR5K_EEPROM_MODE_11A: + if (!AR5K_EEPROM_HDR_11A(ee->ee_header)) + return 0; + chinfo = ee->ee_pwr_cal_a; + break; + case AR5K_EEPROM_MODE_11B: + if (!AR5K_EEPROM_HDR_11B(ee->ee_header)) + return 0; + chinfo = ee->ee_pwr_cal_b; + break; + case AR5K_EEPROM_MODE_11G: + if (!AR5K_EEPROM_HDR_11G(ee->ee_header)) + return 0; + chinfo = ee->ee_pwr_cal_g; + break; + default: + return -EINVAL; + } + + for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) { + if (!chinfo[pier].pd_curves) + continue; + + for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) { + struct ath5k_pdgain_info *pd = + &chinfo[pier].pd_curves[pdg]; + + if (pd != NULL) { + kfree(pd->pd_step); + kfree(pd->pd_pwr); + } + } + + kfree(chinfo[pier].pd_curves); + } + + return 0; +} + +void +ath5k_eeprom_detach(struct ath5k_hw *ah) +{ + u8 mode; + + for (mode = AR5K_EEPROM_MODE_11A; mode <= AR5K_EEPROM_MODE_11G; mode++) + ath5k_eeprom_free_pcal_info(ah, mode); +} + +/* Read conformance test limits used for regulatory control */ +static int +ath5k_eeprom_read_ctl_info(struct ath5k_hw *ah) +{ + struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + struct ath5k_edge_power *rep; + unsigned int fmask, pmask; + unsigned int ctl_mode; + int ret, i, j; + u32 offset; + u16 val; + + pmask = AR5K_EEPROM_POWER_M; + fmask = AR5K_EEPROM_FREQ_M(ee->ee_version); + offset = AR5K_EEPROM_CTL(ee->ee_version); + ee->ee_ctls = AR5K_EEPROM_N_CTLS(ee->ee_version); + for (i = 0; i < ee->ee_ctls; i += 2) { + AR5K_EEPROM_READ(offset++, val); + ee->ee_ctl[i] = (val >> 8) & 0xff; + ee->ee_ctl[i + 1] = val & 0xff; + } + + offset = AR5K_EEPROM_GROUP8_OFFSET; + if (ee->ee_version >= AR5K_EEPROM_VERSION_4_0) + offset += AR5K_EEPROM_TARGET_PWRSTART(ee->ee_misc1) - + AR5K_EEPROM_GROUP5_OFFSET; + else + offset += AR5K_EEPROM_GROUPS_START(ee->ee_version); + + rep = ee->ee_ctl_pwr; + for(i = 0; i < ee->ee_ctls; i++) { + switch(ee->ee_ctl[i] & AR5K_CTL_MODE_M) { + case AR5K_CTL_11A: + case AR5K_CTL_TURBO: + ctl_mode = AR5K_EEPROM_MODE_11A; + break; + default: + ctl_mode = AR5K_EEPROM_MODE_11G; + break; + } + if (ee->ee_ctl[i] == 0) { + if (ee->ee_version >= AR5K_EEPROM_VERSION_3_3) + offset += 8; + else + offset += 7; + rep += AR5K_EEPROM_N_EDGES; + continue; + } + if (ee->ee_version >= AR5K_EEPROM_VERSION_3_3) { + for (j = 0; j < AR5K_EEPROM_N_EDGES; j += 2) { + AR5K_EEPROM_READ(offset++, val); + rep[j].freq = (val >> 8) & fmask; + rep[j + 1].freq = val & fmask; + } + for (j = 0; j < AR5K_EEPROM_N_EDGES; j += 2) { + AR5K_EEPROM_READ(offset++, val); + rep[j].edge = (val >> 8) & pmask; + rep[j].flag = (val >> 14) & 1; + rep[j + 1].edge = val & pmask; + rep[j + 1].flag = (val >> 6) & 1; + } + } else { + AR5K_EEPROM_READ(offset++, val); + rep[0].freq = (val >> 9) & fmask; + rep[1].freq = (val >> 2) & fmask; + rep[2].freq = (val << 5) & fmask; + + AR5K_EEPROM_READ(offset++, val); + rep[2].freq |= (val >> 11) & 0x1f; + rep[3].freq = (val >> 4) & fmask; + rep[4].freq = (val << 3) & fmask; + + AR5K_EEPROM_READ(offset++, val); + rep[4].freq |= (val >> 13) & 0x7; + rep[5].freq = (val >> 6) & fmask; + rep[6].freq = (val << 1) & fmask; + + AR5K_EEPROM_READ(offset++, val); + rep[6].freq |= (val >> 15) & 0x1; + rep[7].freq = (val >> 8) & fmask; + + rep[0].edge = (val >> 2) & pmask; + rep[1].edge = (val << 4) & pmask; + + AR5K_EEPROM_READ(offset++, val); + rep[1].edge |= (val >> 12) & 0xf; + rep[2].edge = (val >> 6) & pmask; + rep[3].edge = val & pmask; + + AR5K_EEPROM_READ(offset++, val); + rep[4].edge = (val >> 10) & pmask; + rep[5].edge = (val >> 4) & pmask; + rep[6].edge = (val << 2) & pmask; + + AR5K_EEPROM_READ(offset++, val); + rep[6].edge |= (val >> 14) & 0x3; + rep[7].edge = (val >> 8) & pmask; + } + for (j = 0; j < AR5K_EEPROM_N_EDGES; j++) { + rep[j].freq = ath5k_eeprom_bin2freq(ee, + rep[j].freq, ctl_mode); + } + rep += AR5K_EEPROM_N_EDGES; + } + + return 0; +} + +static int +ath5k_eeprom_read_spur_chans(struct ath5k_hw *ah) +{ + struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + u32 offset; + u16 val; + int ret = 0, i; + + offset = AR5K_EEPROM_CTL(ee->ee_version) + + AR5K_EEPROM_N_CTLS(ee->ee_version); + + if (ee->ee_version < AR5K_EEPROM_VERSION_5_3) { + /* No spur info for 5GHz */ + ee->ee_spur_chans[0][0] = AR5K_EEPROM_NO_SPUR; + /* 2 channels for 2GHz (2464/2420) */ + ee->ee_spur_chans[0][1] = AR5K_EEPROM_5413_SPUR_CHAN_1; + ee->ee_spur_chans[1][1] = AR5K_EEPROM_5413_SPUR_CHAN_2; + ee->ee_spur_chans[2][1] = AR5K_EEPROM_NO_SPUR; + } else if (ee->ee_version >= AR5K_EEPROM_VERSION_5_3) { + for (i = 0; i < AR5K_EEPROM_N_SPUR_CHANS; i++) { + AR5K_EEPROM_READ(offset, val); + ee->ee_spur_chans[i][0] = val; + AR5K_EEPROM_READ(offset + AR5K_EEPROM_N_SPUR_CHANS, + val); + ee->ee_spur_chans[i][1] = val; + offset++; + } + } + + return ret; +} + +/* + * Initialize eeprom data structure + */ +int +ath5k_eeprom_init(struct ath5k_hw *ah) +{ + int err; + + err = ath5k_eeprom_init_header(ah); + if (err < 0) + return err; + + err = ath5k_eeprom_init_modes(ah); + if (err < 0) + return err; + + err = ath5k_eeprom_read_pcal_info(ah); + if (err < 0) + return err; + + err = ath5k_eeprom_read_ctl_info(ah); + if (err < 0) + return err; + + err = ath5k_eeprom_read_spur_chans(ah); + if (err < 0) + return err; + + return 0; +} + +/* + * Read the MAC address from eeprom + */ +int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac) +{ + u8 mac_d[ETH_ALEN] = {}; + u32 total, offset; + u16 data; + int octet, ret; + + ret = ath5k_hw_eeprom_read(ah, 0x20, &data); + if (ret) + return ret; + + for (offset = 0x1f, octet = 0, total = 0; offset >= 0x1d; offset--) { + ret = ath5k_hw_eeprom_read(ah, offset, &data); + if (ret) + return ret; + + total += data; + mac_d[octet + 1] = data & 0xff; + mac_d[octet] = data >> 8; + octet += 2; + } + + if (!total || total == 3 * 0xffff) + return -EINVAL; + + memcpy(mac, mac_d, ETH_ALEN); + + return 0; +} diff --git a/drivers/net/wireless/ath/ath5k/eeprom.h b/drivers/net/wireless/ath/ath5k/eeprom.h new file mode 100644 index 0000000..473a483 --- /dev/null +++ b/drivers/net/wireless/ath/ath5k/eeprom.h @@ -0,0 +1,479 @@ +/* + * Copyright (c) 2004-2008 Reyk Floeter + * Copyright (c) 2006-2008 Nick Kossifidis + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +/* + * Common ar5xxx EEPROM data offsets (set these on AR5K_EEPROM_BASE) + */ +#define AR5K_EEPROM_PCIE_OFFSET 0x02 /* Contains offset to PCI-E infos */ +#define AR5K_EEPROM_PCIE_SERDES_SECTION 0x40 /* PCIE_OFFSET points here when + * SERDES infos are present */ +#define AR5K_EEPROM_MAGIC 0x003d /* EEPROM Magic number */ +#define AR5K_EEPROM_MAGIC_VALUE 0x5aa5 /* Default - found on EEPROM */ +#define AR5K_EEPROM_MAGIC_5212 0x0000145c /* 5212 */ +#define AR5K_EEPROM_MAGIC_5211 0x0000145b /* 5211 */ +#define AR5K_EEPROM_MAGIC_5210 0x0000145a /* 5210 */ + +#define AR5K_EEPROM_IS_HB63 0x000b /* Talon detect */ + +#define AR5K_EEPROM_RFKILL 0x0f +#define AR5K_EEPROM_RFKILL_GPIO_SEL 0x0000001c +#define AR5K_EEPROM_RFKILL_GPIO_SEL_S 2 +#define AR5K_EEPROM_RFKILL_POLARITY 0x00000002 +#define AR5K_EEPROM_RFKILL_POLARITY_S 1 + +#define AR5K_EEPROM_REG_DOMAIN 0x00bf /* EEPROM regdom */ + +/* FLASH(EEPROM) Defines for AR531X chips */ +#define AR5K_EEPROM_SIZE_LOWER 0x1b /* size info -- lower */ +#define AR5K_EEPROM_SIZE_UPPER 0x1c /* size info -- upper */ +#define AR5K_EEPROM_SIZE_UPPER_MASK 0xfff0 +#define AR5K_EEPROM_SIZE_UPPER_SHIFT 4 +#define AR5K_EEPROM_SIZE_ENDLOC_SHIFT 12 + +#define AR5K_EEPROM_CHECKSUM 0x00c0 /* EEPROM checksum */ +#define AR5K_EEPROM_INFO_BASE 0x00c0 /* EEPROM header */ +#define AR5K_EEPROM_INFO_MAX (0x400 - AR5K_EEPROM_INFO_BASE) +#define AR5K_EEPROM_INFO_CKSUM 0xffff +#define AR5K_EEPROM_INFO(_n) (AR5K_EEPROM_INFO_BASE + (_n)) + +#define AR5K_EEPROM_VERSION AR5K_EEPROM_INFO(1) /* EEPROM Version */ +#define AR5K_EEPROM_VERSION_3_0 0x3000 /* No idea what's going on before this version */ +#define AR5K_EEPROM_VERSION_3_1 0x3001 /* ob/db values for 2Ghz (ar5211_rfregs) */ +#define AR5K_EEPROM_VERSION_3_2 0x3002 /* different frequency representation (eeprom_bin2freq) */ +#define AR5K_EEPROM_VERSION_3_3 0x3003 /* offsets changed, has 32 CTLs (see below) and ee_false_detect (eeprom_read_modes) */ +#define AR5K_EEPROM_VERSION_3_4 0x3004 /* has ee_i_gain, ee_cck_ofdm_power_delta (eeprom_read_modes) */ +#define AR5K_EEPROM_VERSION_4_0 0x4000 /* has ee_misc, ee_cal_pier, ee_turbo_max_power and ee_xr_power (eeprom_init) */ +#define AR5K_EEPROM_VERSION_4_1 0x4001 /* has ee_margin_tx_rx (eeprom_init) */ +#define AR5K_EEPROM_VERSION_4_2 0x4002 /* has ee_cck_ofdm_gain_delta (eeprom_init) */ +#define AR5K_EEPROM_VERSION_4_3 0x4003 /* power calibration changes */ +#define AR5K_EEPROM_VERSION_4_4 0x4004 +#define AR5K_EEPROM_VERSION_4_5 0x4005 +#define AR5K_EEPROM_VERSION_4_6 0x4006 /* has ee_scaled_cck_delta */ +#define AR5K_EEPROM_VERSION_4_7 0x3007 /* 4007 ? */ +#define AR5K_EEPROM_VERSION_4_9 0x4009 /* EAR futureproofing */ +#define AR5K_EEPROM_VERSION_5_0 0x5000 /* Has 2413 PDADC calibration etc */ +#define AR5K_EEPROM_VERSION_5_1 0x5001 /* Has capability values */ +#define AR5K_EEPROM_VERSION_5_3 0x5003 /* Has spur mitigation tables */ + +#define AR5K_EEPROM_MODE_11A 0 +#define AR5K_EEPROM_MODE_11B 1 +#define AR5K_EEPROM_MODE_11G 2 + +#define AR5K_EEPROM_HDR AR5K_EEPROM_INFO(2) /* Header that contains the device caps */ +#define AR5K_EEPROM_HDR_11A(_v) (((_v) >> AR5K_EEPROM_MODE_11A) & 0x1) +#define AR5K_EEPROM_HDR_11B(_v) (((_v) >> AR5K_EEPROM_MODE_11B) & 0x1) +#define AR5K_EEPROM_HDR_11G(_v) (((_v) >> AR5K_EEPROM_MODE_11G) & 0x1) +#define AR5K_EEPROM_HDR_T_2GHZ_DIS(_v) (((_v) >> 3) & 0x1) /* Disable turbo for 2Ghz (?) */ +#define AR5K_EEPROM_HDR_T_5GHZ_DBM(_v) (((_v) >> 4) & 0x7f) /* Max turbo power for a/XR mode (eeprom_init) */ +#define AR5K_EEPROM_HDR_DEVICE(_v) (((_v) >> 11) & 0x7) +#define AR5K_EEPROM_HDR_RFKILL(_v) (((_v) >> 14) & 0x1) /* Device has RFKill support */ +#define AR5K_EEPROM_HDR_T_5GHZ_DIS(_v) (((_v) >> 15) & 0x1) /* Disable turbo for 5Ghz */ + +/* Newer EEPROMs are using a different offset */ +#define AR5K_EEPROM_OFF(_v, _v3_0, _v3_3) \ + (((_v) >= AR5K_EEPROM_VERSION_3_3) ? _v3_3 : _v3_0) + +#define AR5K_EEPROM_ANT_GAIN(_v) AR5K_EEPROM_OFF(_v, 0x00c4, 0x00c3) +#define AR5K_EEPROM_ANT_GAIN_5GHZ(_v) ((s8)(((_v) >> 8) & 0xff)) +#define AR5K_EEPROM_ANT_GAIN_2GHZ(_v) ((s8)((_v) & 0xff)) + +/* Misc values available since EEPROM 4.0 */ +#define AR5K_EEPROM_MISC0 AR5K_EEPROM_INFO(4) +#define AR5K_EEPROM_EARSTART(_v) ((_v) & 0xfff) +#define AR5K_EEPROM_HDR_XR2_DIS(_v) (((_v) >> 12) & 0x1) +#define AR5K_EEPROM_HDR_XR5_DIS(_v) (((_v) >> 13) & 0x1) +#define AR5K_EEPROM_EEMAP(_v) (((_v) >> 14) & 0x3) + +#define AR5K_EEPROM_MISC1 AR5K_EEPROM_INFO(5) +#define AR5K_EEPROM_TARGET_PWRSTART(_v) ((_v) & 0xfff) +#define AR5K_EEPROM_HAS32KHZCRYSTAL(_v) (((_v) >> 14) & 0x1) +#define AR5K_EEPROM_HAS32KHZCRYSTAL_OLD(_v) (((_v) >> 15) & 0x1) + +#define AR5K_EEPROM_MISC2 AR5K_EEPROM_INFO(6) +#define AR5K_EEPROM_EEP_FILE_VERSION(_v) (((_v) >> 8) & 0xff) +#define AR5K_EEPROM_EAR_FILE_VERSION(_v) ((_v) & 0xff) + +#define AR5K_EEPROM_MISC3 AR5K_EEPROM_INFO(7) +#define AR5K_EEPROM_ART_BUILD_NUM(_v) (((_v) >> 10) & 0x3f) +#define AR5K_EEPROM_EAR_FILE_ID(_v) ((_v) & 0xff) + +#define AR5K_EEPROM_MISC4 AR5K_EEPROM_INFO(8) +#define AR5K_EEPROM_CAL_DATA_START(_v) (((_v) >> 4) & 0xfff) +#define AR5K_EEPROM_MASK_R0(_v) (((_v) >> 2) & 0x3) +#define AR5K_EEPROM_MASK_R1(_v) ((_v) & 0x3) + +#define AR5K_EEPROM_MISC5 AR5K_EEPROM_INFO(9) +#define AR5K_EEPROM_COMP_DIS(_v) ((_v) & 0x1) +#define AR5K_EEPROM_AES_DIS(_v) (((_v) >> 1) & 0x1) +#define AR5K_EEPROM_FF_DIS(_v) (((_v) >> 2) & 0x1) +#define AR5K_EEPROM_BURST_DIS(_v) (((_v) >> 3) & 0x1) +#define AR5K_EEPROM_MAX_QCU(_v) (((_v) >> 4) & 0xf) +#define AR5K_EEPROM_HEAVY_CLIP_EN(_v) (((_v) >> 8) & 0x1) +#define AR5K_EEPROM_KEY_CACHE_SIZE(_v) (((_v) >> 12) & 0xf) + +#define AR5K_EEPROM_MISC6 AR5K_EEPROM_INFO(10) +#define AR5K_EEPROM_TX_CHAIN_DIS ((_v) & 0x8) +#define AR5K_EEPROM_RX_CHAIN_DIS (((_v) >> 3) & 0x8) +#define AR5K_EEPROM_FCC_MID_EN (((_v) >> 6) & 0x1) +#define AR5K_EEPROM_JAP_U1EVEN_EN (((_v) >> 7) & 0x1) +#define AR5K_EEPROM_JAP_U2_EN (((_v) >> 8) & 0x1) +#define AR5K_EEPROM_JAP_U1ODD_EN (((_v) >> 9) & 0x1) +#define AR5K_EEPROM_JAP_11A_NEW_EN (((_v) >> 10) & 0x1) + +/* calibration settings */ +#define AR5K_EEPROM_MODES_11A(_v) AR5K_EEPROM_OFF(_v, 0x00c5, 0x00d4) +#define AR5K_EEPROM_MODES_11B(_v) AR5K_EEPROM_OFF(_v, 0x00d0, 0x00f2) +#define AR5K_EEPROM_MODES_11G(_v) AR5K_EEPROM_OFF(_v, 0x00da, 0x010d) +#define AR5K_EEPROM_CTL(_v) AR5K_EEPROM_OFF(_v, 0x00e4, 0x0128) /* Conformance test limits */ +#define AR5K_EEPROM_GROUPS_START(_v) AR5K_EEPROM_OFF(_v, 0x0100, 0x0150) /* Start of Groups */ +#define AR5K_EEPROM_GROUP1_OFFSET 0x0 +#define AR5K_EEPROM_GROUP2_OFFSET 0x5 +#define AR5K_EEPROM_GROUP3_OFFSET 0x37 +#define AR5K_EEPROM_GROUP4_OFFSET 0x46 +#define AR5K_EEPROM_GROUP5_OFFSET 0x55 +#define AR5K_EEPROM_GROUP6_OFFSET 0x65 +#define AR5K_EEPROM_GROUP7_OFFSET 0x69 +#define AR5K_EEPROM_GROUP8_OFFSET 0x6f + +#define AR5K_EEPROM_TARGET_PWR_OFF_11A(_v) AR5K_EEPROM_OFF(_v, AR5K_EEPROM_GROUPS_START(_v) + \ + AR5K_EEPROM_GROUP5_OFFSET, 0x0000) +#define AR5K_EEPROM_TARGET_PWR_OFF_11B(_v) AR5K_EEPROM_OFF(_v, AR5K_EEPROM_GROUPS_START(_v) + \ + AR5K_EEPROM_GROUP6_OFFSET, 0x0010) +#define AR5K_EEPROM_TARGET_PWR_OFF_11G(_v) AR5K_EEPROM_OFF(_v, AR5K_EEPROM_GROUPS_START(_v) + \ + AR5K_EEPROM_GROUP7_OFFSET, 0x0014) + +/* [3.1 - 3.3] */ +#define AR5K_EEPROM_OBDB0_2GHZ 0x00ec +#define AR5K_EEPROM_OBDB1_2GHZ 0x00ed + +#define AR5K_EEPROM_PROTECT 0x003f /* EEPROM protect status */ +#define AR5K_EEPROM_PROTECT_RD_0_31 0x0001 /* Read protection bit for offsets 0x0 - 0x1f */ +#define AR5K_EEPROM_PROTECT_WR_0_31 0x0002 /* Write protection bit for offsets 0x0 - 0x1f */ +#define AR5K_EEPROM_PROTECT_RD_32_63 0x0004 /* 0x20 - 0x3f */ +#define AR5K_EEPROM_PROTECT_WR_32_63 0x0008 +#define AR5K_EEPROM_PROTECT_RD_64_127 0x0010 /* 0x40 - 0x7f */ +#define AR5K_EEPROM_PROTECT_WR_64_127 0x0020 +#define AR5K_EEPROM_PROTECT_RD_128_191 0x0040 /* 0x80 - 0xbf (regdom) */ +#define AR5K_EEPROM_PROTECT_WR_128_191 0x0080 +#define AR5K_EEPROM_PROTECT_RD_192_207 0x0100 /* 0xc0 - 0xcf */ +#define AR5K_EEPROM_PROTECT_WR_192_207 0x0200 +#define AR5K_EEPROM_PROTECT_RD_208_223 0x0400 /* 0xd0 - 0xdf */ +#define AR5K_EEPROM_PROTECT_WR_208_223 0x0800 +#define AR5K_EEPROM_PROTECT_RD_224_239 0x1000 /* 0xe0 - 0xef */ +#define AR5K_EEPROM_PROTECT_WR_224_239 0x2000 +#define AR5K_EEPROM_PROTECT_RD_240_255 0x4000 /* 0xf0 - 0xff */ +#define AR5K_EEPROM_PROTECT_WR_240_255 0x8000 + +/* Some EEPROM defines */ +#define AR5K_EEPROM_EEP_SCALE 100 +#define AR5K_EEPROM_EEP_DELTA 10 +#define AR5K_EEPROM_N_MODES 3 +#define AR5K_EEPROM_N_5GHZ_CHAN 10 +#define AR5K_EEPROM_N_2GHZ_CHAN 3 +#define AR5K_EEPROM_N_2GHZ_CHAN_2413 4 +#define AR5K_EEPROM_N_2GHZ_CHAN_MAX 4 +#define AR5K_EEPROM_MAX_CHAN 10 +#define AR5K_EEPROM_N_PWR_POINTS_5111 11 +#define AR5K_EEPROM_N_PCDAC 11 +#define AR5K_EEPROM_N_PHASE_CAL 5 +#define AR5K_EEPROM_N_TEST_FREQ 8 +#define AR5K_EEPROM_N_EDGES 8 +#define AR5K_EEPROM_N_INTERCEPTS 11 +#define AR5K_EEPROM_FREQ_M(_v) AR5K_EEPROM_OFF(_v, 0x7f, 0xff) +#define AR5K_EEPROM_PCDAC_M 0x3f +#define AR5K_EEPROM_PCDAC_START 1 +#define AR5K_EEPROM_PCDAC_STOP 63 +#define AR5K_EEPROM_PCDAC_STEP 1 +#define AR5K_EEPROM_NON_EDGE_M 0x40 +#define AR5K_EEPROM_CHANNEL_POWER 8 +#define AR5K_EEPROM_N_OBDB 4 +#define AR5K_EEPROM_OBDB_DIS 0xffff +#define AR5K_EEPROM_CHANNEL_DIS 0xff +#define AR5K_EEPROM_SCALE_OC_DELTA(_x) (((_x) * 2) / 10) +#define AR5K_EEPROM_N_CTLS(_v) AR5K_EEPROM_OFF(_v, 16, 32) +#define AR5K_EEPROM_MAX_CTLS 32 +#define AR5K_EEPROM_N_PD_CURVES 4 +#define AR5K_EEPROM_N_XPD0_POINTS 4 +#define AR5K_EEPROM_N_XPD3_POINTS 3 +#define AR5K_EEPROM_N_PD_GAINS 4 +#define AR5K_EEPROM_N_PD_POINTS 5 +#define AR5K_EEPROM_N_INTERCEPT_10_2GHZ 35 +#define AR5K_EEPROM_N_INTERCEPT_10_5GHZ 55 +#define AR5K_EEPROM_POWER_M 0x3f +#define AR5K_EEPROM_POWER_MIN 0 +#define AR5K_EEPROM_POWER_MAX 3150 +#define AR5K_EEPROM_POWER_STEP 50 +#define AR5K_EEPROM_POWER_TABLE_SIZE 64 +#define AR5K_EEPROM_N_POWER_LOC_11B 4 +#define AR5K_EEPROM_N_POWER_LOC_11G 6 +#define AR5K_EEPROM_I_GAIN 10 +#define AR5K_EEPROM_CCK_OFDM_DELTA 15 +#define AR5K_EEPROM_N_IQ_CAL 2 +/* 5GHz/2GHz */ +enum ath5k_eeprom_freq_bands{ + AR5K_EEPROM_BAND_5GHZ = 0, + AR5K_EEPROM_BAND_2GHZ = 1, + AR5K_EEPROM_N_FREQ_BANDS, +}; +/* Spur chans per freq band */ +#define AR5K_EEPROM_N_SPUR_CHANS 5 +/* fbin value for chan 2464 x2 */ +#define AR5K_EEPROM_5413_SPUR_CHAN_1 1640 +/* fbin value for chan 2420 x2 */ +#define AR5K_EEPROM_5413_SPUR_CHAN_2 1200 +#define AR5K_EEPROM_SPUR_CHAN_MASK 0x3FFF +#define AR5K_EEPROM_NO_SPUR 0x8000 +#define AR5K_SPUR_CHAN_WIDTH 87 +#define AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz 3125 +#define AR5K_SPUR_SYMBOL_WIDTH_TURBO_100Hz 6250 + +#define AR5K_EEPROM_READ(_o, _v) do { \ + ret = ath5k_hw_eeprom_read(ah, (_o), &(_v)); \ + if (ret) \ + return ret; \ +} while (0) + +#define AR5K_EEPROM_READ_HDR(_o, _v) \ + AR5K_EEPROM_READ(_o, ah->ah_capabilities.cap_eeprom._v); \ + +enum ath5k_ant_table { + AR5K_ANT_CTL = 0, /* Idle switch table settings */ + AR5K_ANT_SWTABLE_A = 1, /* Switch table for antenna A */ + AR5K_ANT_SWTABLE_B = 2, /* Switch table for antenna B */ + AR5K_ANT_MAX, +}; + +enum ath5k_ctl_mode { + AR5K_CTL_11A = 0, + AR5K_CTL_11B = 1, + AR5K_CTL_11G = 2, + AR5K_CTL_TURBO = 3, + AR5K_CTL_TURBOG = 4, + AR5K_CTL_2GHT20 = 5, + AR5K_CTL_5GHT20 = 6, + AR5K_CTL_2GHT40 = 7, + AR5K_CTL_5GHT40 = 8, + AR5K_CTL_MODE_M = 15, +}; + +/* Default CTL ids for the 3 main reg domains. + * Atheros only uses these by default but vendors + * can have up to 32 different CTLs for different + * scenarios. Note that theese values are ORed with + * the mode id (above) so we can have up to 24 CTL + * datasets out of these 3 main regdomains. That leaves + * 8 ids that can be used by vendors and since 0x20 is + * missing from HAL sources i guess this is the set of + * custom CTLs vendors can use. */ +#define AR5K_CTL_FCC 0x10 +#define AR5K_CTL_CUSTOM 0x20 +#define AR5K_CTL_ETSI 0x30 +#define AR5K_CTL_MKK 0x40 + +/* Indicates a CTL with only mode set and + * no reg domain mapping, such CTLs are used + * for world roaming domains or simply when + * a reg domain is not set */ +#define AR5K_CTL_NO_REGDOMAIN 0xf0 + +/* Indicates an empty (invalid) CTL */ +#define AR5K_CTL_NO_CTL 0xff + +/* Per channel calibration data, used for power table setup */ +struct ath5k_chan_pcal_info_rf5111 { + /* Power levels in half dbm units + * for one power curve. */ + u8 pwr[AR5K_EEPROM_N_PWR_POINTS_5111]; + /* PCDAC table steps + * for the above values */ + u8 pcdac[AR5K_EEPROM_N_PWR_POINTS_5111]; + /* Starting PCDAC step */ + u8 pcdac_min; + /* Final PCDAC step */ + u8 pcdac_max; +}; + +struct ath5k_chan_pcal_info_rf5112 { + /* Power levels in quarter dBm units + * for lower (0) and higher (3) + * level curves in 0.25dB units */ + s8 pwr_x0[AR5K_EEPROM_N_XPD0_POINTS]; + s8 pwr_x3[AR5K_EEPROM_N_XPD3_POINTS]; + /* PCDAC table steps + * for the above values */ + u8 pcdac_x0[AR5K_EEPROM_N_XPD0_POINTS]; + u8 pcdac_x3[AR5K_EEPROM_N_XPD3_POINTS]; +}; + +struct ath5k_chan_pcal_info_rf2413 { + /* Starting pwr/pddac values */ + s8 pwr_i[AR5K_EEPROM_N_PD_GAINS]; + u8 pddac_i[AR5K_EEPROM_N_PD_GAINS]; + /* (pwr,pddac) points + * power levels in 0.5dB units */ + s8 pwr[AR5K_EEPROM_N_PD_GAINS] + [AR5K_EEPROM_N_PD_POINTS]; + u8 pddac[AR5K_EEPROM_N_PD_GAINS] + [AR5K_EEPROM_N_PD_POINTS]; +}; + +enum ath5k_powertable_type { + AR5K_PWRTABLE_PWR_TO_PCDAC = 0, + AR5K_PWRTABLE_LINEAR_PCDAC = 1, + AR5K_PWRTABLE_PWR_TO_PDADC = 2, +}; + +struct ath5k_pdgain_info { + u8 pd_points; + u8 *pd_step; + /* Power values are in + * 0.25dB units */ + s16 *pd_pwr; +}; + +struct ath5k_chan_pcal_info { + /* Frequency */ + u16 freq; + /* Tx power boundaries */ + s16 max_pwr; + s16 min_pwr; + union { + struct ath5k_chan_pcal_info_rf5111 rf5111_info; + struct ath5k_chan_pcal_info_rf5112 rf5112_info; + struct ath5k_chan_pcal_info_rf2413 rf2413_info; + }; + /* Raw values used by phy code + * Curves are stored in order from lower + * gain to higher gain (max txpower -> min txpower) */ + struct ath5k_pdgain_info *pd_curves; +}; + +/* Per rate calibration data for each mode, + * used for rate power table setup. + * Note: Values in 0.5dB units */ +struct ath5k_rate_pcal_info { + u16 freq; /* Frequency */ + /* Power level for 6-24Mbit/s rates or + * 1Mb rate */ + u16 target_power_6to24; + /* Power level for 36Mbit rate or + * 2Mb rate */ + u16 target_power_36; + /* Power level for 48Mbit rate or + * 5.5Mbit rate */ + u16 target_power_48; + /* Power level for 54Mbit rate or + * 11Mbit rate */ + u16 target_power_54; +}; + +/* Power edges for conformance test limits */ +struct ath5k_edge_power { + u16 freq; + u16 edge; /* in half dBm */ + bool flag; +}; + +/* EEPROM calibration data */ +struct ath5k_eeprom_info { + + /* Header information */ + u16 ee_magic; + u16 ee_protect; + u16 ee_regdomain; + u16 ee_version; + u16 ee_header; + u16 ee_ant_gain; + u8 ee_rfkill_pin; + bool ee_rfkill_pol; + bool ee_is_hb63; + bool ee_serdes; + u16 ee_misc0; + u16 ee_misc1; + u16 ee_misc2; + u16 ee_misc3; + u16 ee_misc4; + u16 ee_misc5; + u16 ee_misc6; + u16 ee_cck_ofdm_gain_delta; + u16 ee_cck_ofdm_power_delta; + u16 ee_scaled_cck_delta; + + /* RF Calibration settings (reset, rfregs) */ + u16 ee_i_cal[AR5K_EEPROM_N_MODES]; + u16 ee_q_cal[AR5K_EEPROM_N_MODES]; + u16 ee_fixed_bias[AR5K_EEPROM_N_MODES]; + u16 ee_turbo_max_power[AR5K_EEPROM_N_MODES]; + u16 ee_xr_power[AR5K_EEPROM_N_MODES]; + u16 ee_switch_settling[AR5K_EEPROM_N_MODES]; + u16 ee_atn_tx_rx[AR5K_EEPROM_N_MODES]; + u16 ee_ant_control[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_PCDAC]; + u16 ee_ob[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_OBDB]; + u16 ee_db[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_OBDB]; + u16 ee_tx_end2xlna_enable[AR5K_EEPROM_N_MODES]; + u16 ee_tx_end2xpa_disable[AR5K_EEPROM_N_MODES]; + u16 ee_tx_frm2xpa_enable[AR5K_EEPROM_N_MODES]; + u16 ee_thr_62[AR5K_EEPROM_N_MODES]; + u16 ee_xlna_gain[AR5K_EEPROM_N_MODES]; + u16 ee_xpd[AR5K_EEPROM_N_MODES]; + u16 ee_x_gain[AR5K_EEPROM_N_MODES]; + u16 ee_i_gain[AR5K_EEPROM_N_MODES]; + u16 ee_margin_tx_rx[AR5K_EEPROM_N_MODES]; + u16 ee_switch_settling_turbo[AR5K_EEPROM_N_MODES]; + u16 ee_margin_tx_rx_turbo[AR5K_EEPROM_N_MODES]; + u16 ee_atn_tx_rx_turbo[AR5K_EEPROM_N_MODES]; + + /* Power calibration data */ + u16 ee_false_detect[AR5K_EEPROM_N_MODES]; + + /* Number of pd gain curves per mode */ + u8 ee_pd_gains[AR5K_EEPROM_N_MODES]; + /* Back mapping pdcurve number -> pdcurve index in pd->pd_curves */ + u8 ee_pdc_to_idx[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_PD_GAINS]; + + u8 ee_n_piers[AR5K_EEPROM_N_MODES]; + struct ath5k_chan_pcal_info ee_pwr_cal_a[AR5K_EEPROM_N_5GHZ_CHAN]; + struct ath5k_chan_pcal_info ee_pwr_cal_b[AR5K_EEPROM_N_2GHZ_CHAN_MAX]; + struct ath5k_chan_pcal_info ee_pwr_cal_g[AR5K_EEPROM_N_2GHZ_CHAN_MAX]; + + /* Per rate target power levels */ + u8 ee_rate_target_pwr_num[AR5K_EEPROM_N_MODES]; + struct ath5k_rate_pcal_info ee_rate_tpwr_a[AR5K_EEPROM_N_5GHZ_CHAN]; + struct ath5k_rate_pcal_info ee_rate_tpwr_b[AR5K_EEPROM_N_2GHZ_CHAN_MAX]; + struct ath5k_rate_pcal_info ee_rate_tpwr_g[AR5K_EEPROM_N_2GHZ_CHAN_MAX]; + + /* Conformance test limits (Unused) */ + u8 ee_ctls; + u8 ee_ctl[AR5K_EEPROM_MAX_CTLS]; + struct ath5k_edge_power ee_ctl_pwr[AR5K_EEPROM_N_EDGES * AR5K_EEPROM_MAX_CTLS]; + + /* Noise Floor Calibration settings */ + s16 ee_noise_floor_thr[AR5K_EEPROM_N_MODES]; + s8 ee_adc_desired_size[AR5K_EEPROM_N_MODES]; + s8 ee_pga_desired_size[AR5K_EEPROM_N_MODES]; + s8 ee_adc_desired_size_turbo[AR5K_EEPROM_N_MODES]; + s8 ee_pga_desired_size_turbo[AR5K_EEPROM_N_MODES]; + s8 ee_pd_gain_overlap; + + /* Spur mitigation data (fbin values for spur channels) */ + u16 ee_spur_chans[AR5K_EEPROM_N_SPUR_CHANS][AR5K_EEPROM_N_FREQ_BANDS]; + + /* Antenna raw switch tables */ + u32 ee_antenna[AR5K_EEPROM_N_MODES][AR5K_ANT_MAX]; +}; + diff --git a/drivers/net/wireless/ath/ath5k/gpio.c b/drivers/net/wireless/ath/ath5k/gpio.c new file mode 100644 index 0000000..64a27e7 --- /dev/null +++ b/drivers/net/wireless/ath/ath5k/gpio.c @@ -0,0 +1,176 @@ +/* + * Copyright (c) 2004-2008 Reyk Floeter + * Copyright (c) 2006-2008 Nick Kossifidis + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +/****************\ + GPIO Functions +\****************/ + +#include "ath5k.h" +#include "reg.h" +#include "debug.h" +#include "base.h" + +/* + * Set led state + */ +void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state) +{ + u32 led; + /*5210 has different led mode handling*/ + u32 led_5210; + + ATH5K_TRACE(ah->ah_sc); + + /*Reset led status*/ + if (ah->ah_version != AR5K_AR5210) + AR5K_REG_DISABLE_BITS(ah, AR5K_PCICFG, + AR5K_PCICFG_LEDMODE | AR5K_PCICFG_LED); + else + AR5K_REG_DISABLE_BITS(ah, AR5K_PCICFG, AR5K_PCICFG_LED); + + /* + * Some blinking values, define at your wish + */ + switch (state) { + case AR5K_LED_SCAN: + case AR5K_LED_AUTH: + led = AR5K_PCICFG_LEDMODE_PROP | AR5K_PCICFG_LED_PEND; + led_5210 = AR5K_PCICFG_LED_PEND | AR5K_PCICFG_LED_BCTL; + break; + + case AR5K_LED_INIT: + led = AR5K_PCICFG_LEDMODE_PROP | AR5K_PCICFG_LED_NONE; + led_5210 = AR5K_PCICFG_LED_PEND; + break; + + case AR5K_LED_ASSOC: + case AR5K_LED_RUN: + led = AR5K_PCICFG_LEDMODE_PROP | AR5K_PCICFG_LED_ASSOC; + led_5210 = AR5K_PCICFG_LED_ASSOC; + break; + + default: + led = AR5K_PCICFG_LEDMODE_PROM | AR5K_PCICFG_LED_NONE; + led_5210 = AR5K_PCICFG_LED_PEND; + break; + } + + /*Write new status to the register*/ + if (ah->ah_version != AR5K_AR5210) + AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, led); + else + AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, led_5210); +} + +/* + * Set GPIO inputs + */ +int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio) +{ + ATH5K_TRACE(ah->ah_sc); + if (gpio >= AR5K_NUM_GPIO) + return -EINVAL; + + ath5k_hw_reg_write(ah, + (ath5k_hw_reg_read(ah, AR5K_GPIOCR) & ~AR5K_GPIOCR_OUT(gpio)) + | AR5K_GPIOCR_IN(gpio), AR5K_GPIOCR); + + return 0; +} + +/* + * Set GPIO outputs + */ +int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio) +{ + ATH5K_TRACE(ah->ah_sc); + if (gpio >= AR5K_NUM_GPIO) + return -EINVAL; + + ath5k_hw_reg_write(ah, + (ath5k_hw_reg_read(ah, AR5K_GPIOCR) & ~AR5K_GPIOCR_OUT(gpio)) + | AR5K_GPIOCR_OUT(gpio), AR5K_GPIOCR); + + return 0; +} + +/* + * Get GPIO state + */ +u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio) +{ + ATH5K_TRACE(ah->ah_sc); + if (gpio >= AR5K_NUM_GPIO) + return 0xffffffff; + + /* GPIO input magic */ + return ((ath5k_hw_reg_read(ah, AR5K_GPIODI) & AR5K_GPIODI_M) >> gpio) & + 0x1; +} + +/* + * Set GPIO state + */ +int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val) +{ + u32 data; + ATH5K_TRACE(ah->ah_sc); + + if (gpio >= AR5K_NUM_GPIO) + return -EINVAL; + + /* GPIO output magic */ + data = ath5k_hw_reg_read(ah, AR5K_GPIODO); + + data &= ~(1 << gpio); + data |= (val & 1) << gpio; + + ath5k_hw_reg_write(ah, data, AR5K_GPIODO); + + return 0; +} + +/* + * Initialize the GPIO interrupt (RFKill switch) + */ +void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio, + u32 interrupt_level) +{ + u32 data; + + ATH5K_TRACE(ah->ah_sc); + if (gpio >= AR5K_NUM_GPIO) + return; + + /* + * Set the GPIO interrupt + */ + data = (ath5k_hw_reg_read(ah, AR5K_GPIOCR) & + ~(AR5K_GPIOCR_INT_SEL(gpio) | AR5K_GPIOCR_INT_SELH | + AR5K_GPIOCR_INT_ENA | AR5K_GPIOCR_OUT(gpio))) | + (AR5K_GPIOCR_INT_SEL(gpio) | AR5K_GPIOCR_INT_ENA); + + ath5k_hw_reg_write(ah, interrupt_level ? data : + (data | AR5K_GPIOCR_INT_SELH), AR5K_GPIOCR); + + ah->ah_imr |= AR5K_IMR_GPIO; + + /* Enable GPIO interrupts */ + AR5K_REG_ENABLE_BITS(ah, AR5K_PIMR, AR5K_IMR_GPIO); +} + diff --git a/drivers/net/wireless/ath/ath5k/initvals.c b/drivers/net/wireless/ath/ath5k/initvals.c new file mode 100644 index 0000000..8fa4393 --- /dev/null +++ b/drivers/net/wireless/ath/ath5k/initvals.c @@ -0,0 +1,1555 @@ +/* + * Initial register settings functions + * + * Copyright (c) 2004-2007 Reyk Floeter + * Copyright (c) 2006-2009 Nick Kossifidis + * Copyright (c) 2007-2008 Jiri Slaby + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +#include "ath5k.h" +#include "reg.h" +#include "debug.h" +#include "base.h" + +/* + * Mode-independent initial register writes + */ + +struct ath5k_ini { + u16 ini_register; + u32 ini_value; + + enum { + AR5K_INI_WRITE = 0, /* Default */ + AR5K_INI_READ = 1, /* Cleared on read */ + } ini_mode; +}; + +/* + * Mode specific initial register values + */ + +struct ath5k_ini_mode { + u16 mode_register; + u32 mode_value[5]; +}; + +/* Initial register settings for AR5210 */ +static const struct ath5k_ini ar5210_ini[] = { + /* PCU and MAC registers */ + { AR5K_NOQCU_TXDP0, 0 }, + { AR5K_NOQCU_TXDP1, 0 }, + { AR5K_RXDP, 0 }, + { AR5K_CR, 0 }, + { AR5K_ISR, 0, AR5K_INI_READ }, + { AR5K_IMR, 0 }, + { AR5K_IER, AR5K_IER_DISABLE }, + { AR5K_BSR, 0, AR5K_INI_READ }, + { AR5K_TXCFG, AR5K_DMASIZE_128B }, + { AR5K_RXCFG, AR5K_DMASIZE_128B }, + { AR5K_CFG, AR5K_INIT_CFG }, + { AR5K_TOPS, 8 }, + { AR5K_RXNOFRM, 8 }, + { AR5K_RPGTO, 0 }, + { AR5K_TXNOFRM, 0 }, + { AR5K_SFR, 0 }, + { AR5K_MIBC, 0 }, + { AR5K_MISC, 0 }, + { AR5K_RX_FILTER_5210, 0 }, + { AR5K_MCAST_FILTER0_5210, 0 }, + { AR5K_MCAST_FILTER1_5210, 0 }, + { AR5K_TX_MASK0, 0 }, + { AR5K_TX_MASK1, 0 }, + { AR5K_CLR_TMASK, 0 }, + { AR5K_TRIG_LVL, AR5K_TUNE_MIN_TX_FIFO_THRES }, + { AR5K_DIAG_SW_5210, 0 }, + { AR5K_RSSI_THR, AR5K_TUNE_RSSI_THRES }, + { AR5K_TSF_L32_5210, 0 }, + { AR5K_TIMER0_5210, 0 }, + { AR5K_TIMER1_5210, 0xffffffff }, + { AR5K_TIMER2_5210, 0xffffffff }, + { AR5K_TIMER3_5210, 1 }, + { AR5K_CFP_DUR_5210, 0 }, + { AR5K_CFP_PERIOD_5210, 0 }, + /* PHY registers */ + { AR5K_PHY(0), 0x00000047 }, + { AR5K_PHY_AGC, 0x00000000 }, + { AR5K_PHY(3), 0x09848ea6 }, + { AR5K_PHY(4), 0x3d32e000 }, + { AR5K_PHY(5), 0x0000076b }, + { AR5K_PHY_ACT, AR5K_PHY_ACT_DISABLE }, + { AR5K_PHY(8), 0x02020200 }, + { AR5K_PHY(9), 0x00000e0e }, + { AR5K_PHY(10), 0x0a020201 }, + { AR5K_PHY(11), 0x00036ffc }, + { AR5K_PHY(12), 0x00000000 }, + { AR5K_PHY(13), 0x00000e0e }, + { AR5K_PHY(14), 0x00000007 }, + { AR5K_PHY(15), 0x00020100 }, + { AR5K_PHY(16), 0x89630000 }, + { AR5K_PHY(17), 0x1372169c }, + { AR5K_PHY(18), 0x0018b633 }, + { AR5K_PHY(19), 0x1284613c }, + { AR5K_PHY(20), 0x0de8b8e0 }, + { AR5K_PHY(21), 0x00074859 }, + { AR5K_PHY(22), 0x7e80beba }, + { AR5K_PHY(23), 0x313a665e }, + { AR5K_PHY_AGCCTL, 0x00001d08 }, + { AR5K_PHY(25), 0x0001ce00 }, + { AR5K_PHY(26), 0x409a4190 }, + { AR5K_PHY(28), 0x0000000f }, + { AR5K_PHY(29), 0x00000080 }, + { AR5K_PHY(30), 0x00000004 }, + { AR5K_PHY(31), 0x00000018 }, /* 0x987c */ + { AR5K_PHY(64), 0x00000000 }, /* 0x9900 */ + { AR5K_PHY(65), 0x00000000 }, + { AR5K_PHY(66), 0x00000000 }, + { AR5K_PHY(67), 0x00800000 }, + { AR5K_PHY(68), 0x00000003 }, + /* BB gain table (64bytes) */ + { AR5K_BB_GAIN(0), 0x00000000 }, + { AR5K_BB_GAIN(1), 0x00000020 }, + { AR5K_BB_GAIN(2), 0x00000010 }, + { AR5K_BB_GAIN(3), 0x00000030 }, + { AR5K_BB_GAIN(4), 0x00000008 }, + { AR5K_BB_GAIN(5), 0x00000028 }, + { AR5K_BB_GAIN(6), 0x00000028 }, + { AR5K_BB_GAIN(7), 0x00000004 }, + { AR5K_BB_GAIN(8), 0x00000024 }, + { AR5K_BB_GAIN(9), 0x00000014 }, + { AR5K_BB_GAIN(10), 0x00000034 }, + { AR5K_BB_GAIN(11), 0x0000000c }, + { AR5K_BB_GAIN(12), 0x0000002c }, + { AR5K_BB_GAIN(13), 0x00000002 }, + { AR5K_BB_GAIN(14), 0x00000022 }, + { AR5K_BB_GAIN(15), 0x00000012 }, + { AR5K_BB_GAIN(16), 0x00000032 }, + { AR5K_BB_GAIN(17), 0x0000000a }, + { AR5K_BB_GAIN(18), 0x0000002a }, + { AR5K_BB_GAIN(19), 0x00000001 }, + { AR5K_BB_GAIN(20), 0x00000021 }, + { AR5K_BB_GAIN(21), 0x00000011 }, + { AR5K_BB_GAIN(22), 0x00000031 }, + { AR5K_BB_GAIN(23), 0x00000009 }, + { AR5K_BB_GAIN(24), 0x00000029 }, + { AR5K_BB_GAIN(25), 0x00000005 }, + { AR5K_BB_GAIN(26), 0x00000025 }, + { AR5K_BB_GAIN(27), 0x00000015 }, + { AR5K_BB_GAIN(28), 0x00000035 }, + { AR5K_BB_GAIN(29), 0x0000000d }, + { AR5K_BB_GAIN(30), 0x0000002d }, + { AR5K_BB_GAIN(31), 0x00000003 }, + { AR5K_BB_GAIN(32), 0x00000023 }, + { AR5K_BB_GAIN(33), 0x00000013 }, + { AR5K_BB_GAIN(34), 0x00000033 }, + { AR5K_BB_GAIN(35), 0x0000000b }, + { AR5K_BB_GAIN(36), 0x0000002b }, + { AR5K_BB_GAIN(37), 0x00000007 }, + { AR5K_BB_GAIN(38), 0x00000027 }, + { AR5K_BB_GAIN(39), 0x00000017 }, + { AR5K_BB_GAIN(40), 0x00000037 }, + { AR5K_BB_GAIN(41), 0x0000000f }, + { AR5K_BB_GAIN(42), 0x0000002f }, + { AR5K_BB_GAIN(43), 0x0000002f }, + { AR5K_BB_GAIN(44), 0x0000002f }, + { AR5K_BB_GAIN(45), 0x0000002f }, + { AR5K_BB_GAIN(46), 0x0000002f }, + { AR5K_BB_GAIN(47), 0x0000002f }, + { AR5K_BB_GAIN(48), 0x0000002f }, + { AR5K_BB_GAIN(49), 0x0000002f }, + { AR5K_BB_GAIN(50), 0x0000002f }, + { AR5K_BB_GAIN(51), 0x0000002f }, + { AR5K_BB_GAIN(52), 0x0000002f }, + { AR5K_BB_GAIN(53), 0x0000002f }, + { AR5K_BB_GAIN(54), 0x0000002f }, + { AR5K_BB_GAIN(55), 0x0000002f }, + { AR5K_BB_GAIN(56), 0x0000002f }, + { AR5K_BB_GAIN(57), 0x0000002f }, + { AR5K_BB_GAIN(58), 0x0000002f }, + { AR5K_BB_GAIN(59), 0x0000002f }, + { AR5K_BB_GAIN(60), 0x0000002f }, + { AR5K_BB_GAIN(61), 0x0000002f }, + { AR5K_BB_GAIN(62), 0x0000002f }, + { AR5K_BB_GAIN(63), 0x0000002f }, + /* 5110 RF gain table (64btes) */ + { AR5K_RF_GAIN(0), 0x0000001d }, + { AR5K_RF_GAIN(1), 0x0000005d }, + { AR5K_RF_GAIN(2), 0x0000009d }, + { AR5K_RF_GAIN(3), 0x000000dd }, + { AR5K_RF_GAIN(4), 0x0000011d }, + { AR5K_RF_GAIN(5), 0x00000021 }, + { AR5K_RF_GAIN(6), 0x00000061 }, + { AR5K_RF_GAIN(7), 0x000000a1 }, + { AR5K_RF_GAIN(8), 0x000000e1 }, + { AR5K_RF_GAIN(9), 0x00000031 }, + { AR5K_RF_GAIN(10), 0x00000071 }, + { AR5K_RF_GAIN(11), 0x000000b1 }, + { AR5K_RF_GAIN(12), 0x0000001c }, + { AR5K_RF_GAIN(13), 0x0000005c }, + { AR5K_RF_GAIN(14), 0x00000029 }, + { AR5K_RF_GAIN(15), 0x00000069 }, + { AR5K_RF_GAIN(16), 0x000000a9 }, + { AR5K_RF_GAIN(17), 0x00000020 }, + { AR5K_RF_GAIN(18), 0x00000019 }, + { AR5K_RF_GAIN(19), 0x00000059 }, + { AR5K_RF_GAIN(20), 0x00000099 }, + { AR5K_RF_GAIN(21), 0x00000030 }, + { AR5K_RF_GAIN(22), 0x00000005 }, + { AR5K_RF_GAIN(23), 0x00000025 }, + { AR5K_RF_GAIN(24), 0x00000065 }, + { AR5K_RF_GAIN(25), 0x000000a5 }, + { AR5K_RF_GAIN(26), 0x00000028 }, + { AR5K_RF_GAIN(27), 0x00000068 }, + { AR5K_RF_GAIN(28), 0x0000001f }, + { AR5K_RF_GAIN(29), 0x0000001e }, + { AR5K_RF_GAIN(30), 0x00000018 }, + { AR5K_RF_GAIN(31), 0x00000058 }, + { AR5K_RF_GAIN(32), 0x00000098 }, + { AR5K_RF_GAIN(33), 0x00000003 }, + { AR5K_RF_GAIN(34), 0x00000004 }, + { AR5K_RF_GAIN(35), 0x00000044 }, + { AR5K_RF_GAIN(36), 0x00000084 }, + { AR5K_RF_GAIN(37), 0x00000013 }, + { AR5K_RF_GAIN(38), 0x00000012 }, + { AR5K_RF_GAIN(39), 0x00000052 }, + { AR5K_RF_GAIN(40), 0x00000092 }, + { AR5K_RF_GAIN(41), 0x000000d2 }, + { AR5K_RF_GAIN(42), 0x0000002b }, + { AR5K_RF_GAIN(43), 0x0000002a }, + { AR5K_RF_GAIN(44), 0x0000006a }, + { AR5K_RF_GAIN(45), 0x000000aa }, + { AR5K_RF_GAIN(46), 0x0000001b }, + { AR5K_RF_GAIN(47), 0x0000001a }, + { AR5K_RF_GAIN(48), 0x0000005a }, + { AR5K_RF_GAIN(49), 0x0000009a }, + { AR5K_RF_GAIN(50), 0x000000da }, + { AR5K_RF_GAIN(51), 0x00000006 }, + { AR5K_RF_GAIN(52), 0x00000006 }, + { AR5K_RF_GAIN(53), 0x00000006 }, + { AR5K_RF_GAIN(54), 0x00000006 }, + { AR5K_RF_GAIN(55), 0x00000006 }, + { AR5K_RF_GAIN(56), 0x00000006 }, + { AR5K_RF_GAIN(57), 0x00000006 }, + { AR5K_RF_GAIN(58), 0x00000006 }, + { AR5K_RF_GAIN(59), 0x00000006 }, + { AR5K_RF_GAIN(60), 0x00000006 }, + { AR5K_RF_GAIN(61), 0x00000006 }, + { AR5K_RF_GAIN(62), 0x00000006 }, + { AR5K_RF_GAIN(63), 0x00000006 }, + /* PHY activation */ + { AR5K_PHY(53), 0x00000020 }, + { AR5K_PHY(51), 0x00000004 }, + { AR5K_PHY(50), 0x00060106 }, + { AR5K_PHY(39), 0x0000006d }, + { AR5K_PHY(48), 0x00000000 }, + { AR5K_PHY(52), 0x00000014 }, + { AR5K_PHY_ACT, AR5K_PHY_ACT_ENABLE }, +}; + +/* Initial register settings for AR5211 */ +static const struct ath5k_ini ar5211_ini[] = { + { AR5K_RXDP, 0x00000000 }, + { AR5K_RTSD0, 0x84849c9c }, + { AR5K_RTSD1, 0x7c7c7c7c }, + { AR5K_RXCFG, 0x00000005 }, + { AR5K_MIBC, 0x00000000 }, + { AR5K_TOPS, 0x00000008 }, + { AR5K_RXNOFRM, 0x00000008 }, + { AR5K_TXNOFRM, 0x00000010 }, + { AR5K_RPGTO, 0x00000000 }, + { AR5K_RFCNT, 0x0000001f }, + { AR5K_QUEUE_TXDP(0), 0x00000000 }, + { AR5K_QUEUE_TXDP(1), 0x00000000 }, + { AR5K_QUEUE_TXDP(2), 0x00000000 }, + { AR5K_QUEUE_TXDP(3), 0x00000000 }, + { AR5K_QUEUE_TXDP(4), 0x00000000 }, + { AR5K_QUEUE_TXDP(5), 0x00000000 }, + { AR5K_QUEUE_TXDP(6), 0x00000000 }, + { AR5K_QUEUE_TXDP(7), 0x00000000 }, + { AR5K_QUEUE_TXDP(8), 0x00000000 }, + { AR5K_QUEUE_TXDP(9), 0x00000000 }, + { AR5K_DCU_FP, 0x00000000 }, + { AR5K_STA_ID1, 0x00000000 }, + { AR5K_BSS_ID0, 0x00000000 }, + { AR5K_BSS_ID1, 0x00000000 }, + { AR5K_RSSI_THR, 0x00000000 }, + { AR5K_CFP_PERIOD_5211, 0x00000000 }, + { AR5K_TIMER0_5211, 0x00000030 }, + { AR5K_TIMER1_5211, 0x0007ffff }, + { AR5K_TIMER2_5211, 0x01ffffff }, + { AR5K_TIMER3_5211, 0x00000031 }, + { AR5K_CFP_DUR_5211, 0x00000000 }, + { AR5K_RX_FILTER_5211, 0x00000000 }, + { AR5K_MCAST_FILTER0_5211, 0x00000000 }, + { AR5K_MCAST_FILTER1_5211, 0x00000002 }, + { AR5K_DIAG_SW_5211, 0x00000000 }, + { AR5K_ADDAC_TEST, 0x00000000 }, + { AR5K_DEFAULT_ANTENNA, 0x00000000 }, + /* PHY registers */ + { AR5K_PHY_AGC, 0x00000000 }, + { AR5K_PHY(3), 0x2d849093 }, + { AR5K_PHY(4), 0x7d32e000 }, + { AR5K_PHY(5), 0x00000f6b }, + { AR5K_PHY_ACT, 0x00000000 }, + { AR5K_PHY(11), 0x00026ffe }, + { AR5K_PHY(12), 0x00000000 }, + { AR5K_PHY(15), 0x00020100 }, + { AR5K_PHY(16), 0x206a017a }, + { AR5K_PHY(19), 0x1284613c }, + { AR5K_PHY(21), 0x00000859 }, + { AR5K_PHY(26), 0x409a4190 }, /* 0x9868 */ + { AR5K_PHY(27), 0x050cb081 }, + { AR5K_PHY(28), 0x0000000f }, + { AR5K_PHY(29), 0x00000080 }, + { AR5K_PHY(30), 0x0000000c }, + { AR5K_PHY(64), 0x00000000 }, + { AR5K_PHY(65), 0x00000000 }, + { AR5K_PHY(66), 0x00000000 }, + { AR5K_PHY(67), 0x00800000 }, + { AR5K_PHY(68), 0x00000001 }, + { AR5K_PHY(71), 0x0000092a }, + { AR5K_PHY_IQ, 0x00000000 }, + { AR5K_PHY(73), 0x00058a05 }, + { AR5K_PHY(74), 0x00000001 }, + { AR5K_PHY(75), 0x00000000 }, + { AR5K_PHY_PAPD_PROBE, 0x00000000 }, + { AR5K_PHY(77), 0x00000000 }, /* 0x9934 */ + { AR5K_PHY(78), 0x00000000 }, /* 0x9938 */ + { AR5K_PHY(79), 0x0000003f }, /* 0x993c */ + { AR5K_PHY(80), 0x00000004 }, + { AR5K_PHY(82), 0x00000000 }, + { AR5K_PHY(83), 0x00000000 }, + { AR5K_PHY(84), 0x00000000 }, + { AR5K_PHY_RADAR, 0x5d50f14c }, + { AR5K_PHY(86), 0x00000018 }, + { AR5K_PHY(87), 0x004b6a8e }, + /* Initial Power table (32bytes) + * common on all cards/modes. + * Note: Table is rewritten during + * txpower setup later using calibration + * data etc. so next write is non-common */ + { AR5K_PHY_PCDAC_TXPOWER(1), 0x06ff05ff }, + { AR5K_PHY_PCDAC_TXPOWER(2), 0x07ff07ff }, + { AR5K_PHY_PCDAC_TXPOWER(3), 0x08ff08ff }, + { AR5K_PHY_PCDAC_TXPOWER(4), 0x09ff09ff }, + { AR5K_PHY_PCDAC_TXPOWER(5), 0x0aff0aff }, + { AR5K_PHY_PCDAC_TXPOWER(6), 0x0bff0bff }, + { AR5K_PHY_PCDAC_TXPOWER(7), 0x0cff0cff }, + { AR5K_PHY_PCDAC_TXPOWER(8), 0x0dff0dff }, + { AR5K_PHY_PCDAC_TXPOWER(9), 0x0fff0eff }, + { AR5K_PHY_PCDAC_TXPOWER(10), 0x12ff12ff }, + { AR5K_PHY_PCDAC_TXPOWER(11), 0x14ff13ff }, + { AR5K_PHY_PCDAC_TXPOWER(12), 0x16ff15ff }, + { AR5K_PHY_PCDAC_TXPOWER(13), 0x19ff17ff }, + { AR5K_PHY_PCDAC_TXPOWER(14), 0x1bff1aff }, + { AR5K_PHY_PCDAC_TXPOWER(15), 0x1eff1dff }, + { AR5K_PHY_PCDAC_TXPOWER(16), 0x23ff20ff }, + { AR5K_PHY_PCDAC_TXPOWER(17), 0x27ff25ff }, + { AR5K_PHY_PCDAC_TXPOWER(18), 0x2cff29ff }, + { AR5K_PHY_PCDAC_TXPOWER(19), 0x31ff2fff }, + { AR5K_PHY_PCDAC_TXPOWER(20), 0x37ff34ff }, + { AR5K_PHY_PCDAC_TXPOWER(21), 0x3aff3aff }, + { AR5K_PHY_PCDAC_TXPOWER(22), 0x3aff3aff }, + { AR5K_PHY_PCDAC_TXPOWER(23), 0x3aff3aff }, + { AR5K_PHY_PCDAC_TXPOWER(24), 0x3aff3aff }, + { AR5K_PHY_PCDAC_TXPOWER(25), 0x3aff3aff }, + { AR5K_PHY_PCDAC_TXPOWER(26), 0x3aff3aff }, + { AR5K_PHY_PCDAC_TXPOWER(27), 0x3aff3aff }, + { AR5K_PHY_PCDAC_TXPOWER(28), 0x3aff3aff }, + { AR5K_PHY_PCDAC_TXPOWER(29), 0x3aff3aff }, + { AR5K_PHY_PCDAC_TXPOWER(30), 0x3aff3aff }, + { AR5K_PHY_PCDAC_TXPOWER(31), 0x3aff3aff }, + { AR5K_PHY_CCKTXCTL, 0x00000000 }, + { AR5K_PHY(642), 0x503e4646 }, + { AR5K_PHY_GAIN_2GHZ, 0x6480416c }, + { AR5K_PHY(644), 0x0199a003 }, + { AR5K_PHY(645), 0x044cd610 }, + { AR5K_PHY(646), 0x13800040 }, + { AR5K_PHY(647), 0x1be00060 }, + { AR5K_PHY(648), 0x0c53800a }, + { AR5K_PHY(649), 0x0014df3b }, + { AR5K_PHY(650), 0x000001b5 }, + { AR5K_PHY(651), 0x00000020 }, +}; + +/* Initial mode-specific settings for AR5211 + * 5211 supports OFDM-only g (draft g) but we + * need to test it ! + */ +static const struct ath5k_ini_mode ar5211_ini_mode[] = { + { AR5K_TXCFG, + /* a aTurbo b g (OFDM) */ + { 0x00000015, 0x00000015, 0x0000001d, 0x00000015 } }, + { AR5K_QUEUE_DFS_LOCAL_IFS(0), + { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, + { AR5K_QUEUE_DFS_LOCAL_IFS(1), + { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, + { AR5K_QUEUE_DFS_LOCAL_IFS(2), + { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, + { AR5K_QUEUE_DFS_LOCAL_IFS(3), + { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, + { AR5K_QUEUE_DFS_LOCAL_IFS(4), + { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, + { AR5K_QUEUE_DFS_LOCAL_IFS(5), + { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, + { AR5K_QUEUE_DFS_LOCAL_IFS(6), + { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, + { AR5K_QUEUE_DFS_LOCAL_IFS(7), + { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, + { AR5K_QUEUE_DFS_LOCAL_IFS(8), + { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, + { AR5K_QUEUE_DFS_LOCAL_IFS(9), + { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, + { AR5K_DCU_GBL_IFS_SLOT, + { 0x00000168, 0x000001e0, 0x000001b8, 0x00000168 } }, + { AR5K_DCU_GBL_IFS_SIFS, + { 0x00000230, 0x000001e0, 0x000000b0, 0x00000230 } }, + { AR5K_DCU_GBL_IFS_EIFS, + { 0x00000d98, 0x00001180, 0x00001f48, 0x00000d98 } }, + { AR5K_DCU_GBL_IFS_MISC, + { 0x0000a0e0, 0x00014068, 0x00005880, 0x0000a0e0 } }, + { AR5K_TIME_OUT, + { 0x04000400, 0x08000800, 0x20003000, 0x04000400 } }, + { AR5K_USEC_5211, + { 0x0e8d8fa7, 0x0e8d8fcf, 0x01608f95, 0x0e8d8fa7 } }, + { AR5K_PHY_TURBO, + { 0x00000000, 0x00000003, 0x00000000, 0x00000000 } }, + { AR5K_PHY(8), + { 0x02020200, 0x02020200, 0x02010200, 0x02020200 } }, + { AR5K_PHY(9), + { 0x00000e0e, 0x00000e0e, 0x00000707, 0x00000e0e } }, + { AR5K_PHY(10), + { 0x0a020001, 0x0a020001, 0x05010000, 0x0a020001 } }, + { AR5K_PHY(13), + { 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e } }, + { AR5K_PHY(14), + { 0x00000007, 0x00000007, 0x0000000b, 0x0000000b } }, + { AR5K_PHY(17), + { 0x1372169c, 0x137216a5, 0x137216a8, 0x1372169c } }, + { AR5K_PHY(18), + { 0x0018ba67, 0x0018ba67, 0x0018ba69, 0x0018ba69 } }, + { AR5K_PHY(20), + { 0x0c28b4e0, 0x0c28b4e0, 0x0c28b4e0, 0x0c28b4e0 } }, + { AR5K_PHY_SIG, + { 0x7e800d2e, 0x7e800d2e, 0x7ec00d2e, 0x7e800d2e } }, + { AR5K_PHY_AGCCOARSE, + { 0x31375d5e, 0x31375d5e, 0x313a5d5e, 0x31375d5e } }, + { AR5K_PHY_AGCCTL, + { 0x0000bd10, 0x0000bd10, 0x0000bd38, 0x0000bd10 } }, + { AR5K_PHY_NF, + { 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 } }, + { AR5K_PHY_RX_DELAY, + { 0x00002710, 0x00002710, 0x0000157c, 0x00002710 } }, + { AR5K_PHY(70), + { 0x00000190, 0x00000190, 0x00000084, 0x00000190 } }, + { AR5K_PHY_FRAME_CTL_5211, + { 0x6fe01020, 0x6fe01020, 0x6fe00920, 0x6fe01020 } }, + { AR5K_PHY_PCDAC_TXPOWER_BASE, + { 0x05ff14ff, 0x05ff14ff, 0x05ff14ff, 0x05ff19ff } }, + { AR5K_RF_BUFFER_CONTROL_4, + { 0x00000010, 0x00000014, 0x00000010, 0x00000010 } }, +}; + +/* Initial register settings for AR5212 */ +static const struct ath5k_ini ar5212_ini_common_start[] = { + { AR5K_RXDP, 0x00000000 }, + { AR5K_RXCFG, 0x00000005 }, + { AR5K_MIBC, 0x00000000 }, + { AR5K_TOPS, 0x00000008 }, + { AR5K_RXNOFRM, 0x00000008 }, + { AR5K_TXNOFRM, 0x00000010 }, + { AR5K_RPGTO, 0x00000000 }, + { AR5K_RFCNT, 0x0000001f }, + { AR5K_QUEUE_TXDP(0), 0x00000000 }, + { AR5K_QUEUE_TXDP(1), 0x00000000 }, + { AR5K_QUEUE_TXDP(2), 0x00000000 }, + { AR5K_QUEUE_TXDP(3), 0x00000000 }, + { AR5K_QUEUE_TXDP(4), 0x00000000 }, + { AR5K_QUEUE_TXDP(5), 0x00000000 }, + { AR5K_QUEUE_TXDP(6), 0x00000000 }, + { AR5K_QUEUE_TXDP(7), 0x00000000 }, + { AR5K_QUEUE_TXDP(8), 0x00000000 }, + { AR5K_QUEUE_TXDP(9), 0x00000000 }, + { AR5K_DCU_FP, 0x00000000 }, + { AR5K_DCU_TXP, 0x00000000 }, + /* Tx filter table 0 (32 entries) */ + { AR5K_DCU_TX_FILTER_0(0), 0x00000000 }, /* DCU 0 */ + { AR5K_DCU_TX_FILTER_0(1), 0x00000000 }, + { AR5K_DCU_TX_FILTER_0(2), 0x00000000 }, + { AR5K_DCU_TX_FILTER_0(3), 0x00000000 }, + { AR5K_DCU_TX_FILTER_0(4), 0x00000000 }, /* DCU 1 */ + { AR5K_DCU_TX_FILTER_0(5), 0x00000000 }, + { AR5K_DCU_TX_FILTER_0(6), 0x00000000 }, + { AR5K_DCU_TX_FILTER_0(7), 0x00000000 }, + { AR5K_DCU_TX_FILTER_0(8), 0x00000000 }, /* DCU 2 */ + { AR5K_DCU_TX_FILTER_0(9), 0x00000000 }, + { AR5K_DCU_TX_FILTER_0(10), 0x00000000 }, + { AR5K_DCU_TX_FILTER_0(11), 0x00000000 }, + { AR5K_DCU_TX_FILTER_0(12), 0x00000000 }, /* DCU 3 */ + { AR5K_DCU_TX_FILTER_0(13), 0x00000000 }, + { AR5K_DCU_TX_FILTER_0(14), 0x00000000 }, + { AR5K_DCU_TX_FILTER_0(15), 0x00000000 }, + { AR5K_DCU_TX_FILTER_0(16), 0x00000000 }, /* DCU 4 */ + { AR5K_DCU_TX_FILTER_0(17), 0x00000000 }, + { AR5K_DCU_TX_FILTER_0(18), 0x00000000 }, + { AR5K_DCU_TX_FILTER_0(19), 0x00000000 }, + { AR5K_DCU_TX_FILTER_0(20), 0x00000000 }, /* DCU 5 */ + { AR5K_DCU_TX_FILTER_0(21), 0x00000000 }, + { AR5K_DCU_TX_FILTER_0(22), 0x00000000 }, + { AR5K_DCU_TX_FILTER_0(23), 0x00000000 }, + { AR5K_DCU_TX_FILTER_0(24), 0x00000000 }, /* DCU 6 */ + { AR5K_DCU_TX_FILTER_0(25), 0x00000000 }, + { AR5K_DCU_TX_FILTER_0(26), 0x00000000 }, + { AR5K_DCU_TX_FILTER_0(27), 0x00000000 }, + { AR5K_DCU_TX_FILTER_0(28), 0x00000000 }, /* DCU 7 */ + { AR5K_DCU_TX_FILTER_0(29), 0x00000000 }, + { AR5K_DCU_TX_FILTER_0(30), 0x00000000 }, + { AR5K_DCU_TX_FILTER_0(31), 0x00000000 }, + /* Tx filter table 1 (16 entries) */ + { AR5K_DCU_TX_FILTER_1(0), 0x00000000 }, + { AR5K_DCU_TX_FILTER_1(1), 0x00000000 }, + { AR5K_DCU_TX_FILTER_1(2), 0x00000000 }, + { AR5K_DCU_TX_FILTER_1(3), 0x00000000 }, + { AR5K_DCU_TX_FILTER_1(4), 0x00000000 }, + { AR5K_DCU_TX_FILTER_1(5), 0x00000000 }, + { AR5K_DCU_TX_FILTER_1(6), 0x00000000 }, + { AR5K_DCU_TX_FILTER_1(7), 0x00000000 }, + { AR5K_DCU_TX_FILTER_1(8), 0x00000000 }, + { AR5K_DCU_TX_FILTER_1(9), 0x00000000 }, + { AR5K_DCU_TX_FILTER_1(10), 0x00000000 }, + { AR5K_DCU_TX_FILTER_1(11), 0x00000000 }, + { AR5K_DCU_TX_FILTER_1(12), 0x00000000 }, + { AR5K_DCU_TX_FILTER_1(13), 0x00000000 }, + { AR5K_DCU_TX_FILTER_1(14), 0x00000000 }, + { AR5K_DCU_TX_FILTER_1(15), 0x00000000 }, + { AR5K_DCU_TX_FILTER_CLR, 0x00000000 }, + { AR5K_DCU_TX_FILTER_SET, 0x00000000 }, + { AR5K_STA_ID1, 0x00000000 }, + { AR5K_BSS_ID0, 0x00000000 }, + { AR5K_BSS_ID1, 0x00000000 }, + { AR5K_BEACON_5211, 0x00000000 }, + { AR5K_CFP_PERIOD_5211, 0x00000000 }, + { AR5K_TIMER0_5211, 0x00000030 }, + { AR5K_TIMER1_5211, 0x0007ffff }, + { AR5K_TIMER2_5211, 0x01ffffff }, + { AR5K_TIMER3_5211, 0x00000031 }, + { AR5K_CFP_DUR_5211, 0x00000000 }, + { AR5K_RX_FILTER_5211, 0x00000000 }, + { AR5K_DIAG_SW_5211, 0x00000000 }, + { AR5K_ADDAC_TEST, 0x00000000 }, + { AR5K_DEFAULT_ANTENNA, 0x00000000 }, + { AR5K_FRAME_CTL_QOSM, 0x000fc78f }, + { AR5K_XRMODE, 0x2a82301a }, + { AR5K_XRDELAY, 0x05dc01e0 }, + { AR5K_XRTIMEOUT, 0x1f402710 }, + { AR5K_XRCHIRP, 0x01f40000 }, + { AR5K_XRSTOMP, 0x00001e1c }, + { AR5K_SLEEP0, 0x0002aaaa }, + { AR5K_SLEEP1, 0x02005555 }, + { AR5K_SLEEP2, 0x00000000 }, + { AR_BSSMSKL, 0xffffffff }, + { AR_BSSMSKU, 0x0000ffff }, + { AR5K_TXPC, 0x00000000 }, + { AR5K_PROFCNT_TX, 0x00000000 }, + { AR5K_PROFCNT_RX, 0x00000000 }, + { AR5K_PROFCNT_RXCLR, 0x00000000 }, + { AR5K_PROFCNT_CYCLE, 0x00000000 }, + { AR5K_QUIET_CTL1, 0x00000088 }, + /* Initial rate duration table (32 entries )*/ + { AR5K_RATE_DUR(0), 0x00000000 }, + { AR5K_RATE_DUR(1), 0x0000008c }, + { AR5K_RATE_DUR(2), 0x000000e4 }, + { AR5K_RATE_DUR(3), 0x000002d5 }, + { AR5K_RATE_DUR(4), 0x00000000 }, + { AR5K_RATE_DUR(5), 0x00000000 }, + { AR5K_RATE_DUR(6), 0x000000a0 }, + { AR5K_RATE_DUR(7), 0x000001c9 }, + { AR5K_RATE_DUR(8), 0x0000002c }, + { AR5K_RATE_DUR(9), 0x0000002c }, + { AR5K_RATE_DUR(10), 0x00000030 }, + { AR5K_RATE_DUR(11), 0x0000003c }, + { AR5K_RATE_DUR(12), 0x0000002c }, + { AR5K_RATE_DUR(13), 0x0000002c }, + { AR5K_RATE_DUR(14), 0x00000030 }, + { AR5K_RATE_DUR(15), 0x0000003c }, + { AR5K_RATE_DUR(16), 0x00000000 }, + { AR5K_RATE_DUR(17), 0x00000000 }, + { AR5K_RATE_DUR(18), 0x00000000 }, + { AR5K_RATE_DUR(19), 0x00000000 }, + { AR5K_RATE_DUR(20), 0x00000000 }, + { AR5K_RATE_DUR(21), 0x00000000 }, + { AR5K_RATE_DUR(22), 0x00000000 }, + { AR5K_RATE_DUR(23), 0x00000000 }, + { AR5K_RATE_DUR(24), 0x000000d5 }, + { AR5K_RATE_DUR(25), 0x000000df }, + { AR5K_RATE_DUR(26), 0x00000102 }, + { AR5K_RATE_DUR(27), 0x0000013a }, + { AR5K_RATE_DUR(28), 0x00000075 }, + { AR5K_RATE_DUR(29), 0x0000007f }, + { AR5K_RATE_DUR(30), 0x000000a2 }, + { AR5K_RATE_DUR(31), 0x00000000 }, + { AR5K_QUIET_CTL2, 0x00010002 }, + { AR5K_TSF_PARM, 0x00000001 }, + { AR5K_QOS_NOACK, 0x000000c0 }, + { AR5K_PHY_ERR_FIL, 0x00000000 }, + { AR5K_XRLAT_TX, 0x00000168 }, + { AR5K_ACKSIFS, 0x00000000 }, + /* Rate -> db table + * notice ...03<-02<-01<-00 ! */ + { AR5K_RATE2DB(0), 0x03020100 }, + { AR5K_RATE2DB(1), 0x07060504 }, + { AR5K_RATE2DB(2), 0x0b0a0908 }, + { AR5K_RATE2DB(3), 0x0f0e0d0c }, + { AR5K_RATE2DB(4), 0x13121110 }, + { AR5K_RATE2DB(5), 0x17161514 }, + { AR5K_RATE2DB(6), 0x1b1a1918 }, + { AR5K_RATE2DB(7), 0x1f1e1d1c }, + /* Db -> Rate table */ + { AR5K_DB2RATE(0), 0x03020100 }, + { AR5K_DB2RATE(1), 0x07060504 }, + { AR5K_DB2RATE(2), 0x0b0a0908 }, + { AR5K_DB2RATE(3), 0x0f0e0d0c }, + { AR5K_DB2RATE(4), 0x13121110 }, + { AR5K_DB2RATE(5), 0x17161514 }, + { AR5K_DB2RATE(6), 0x1b1a1918 }, + { AR5K_DB2RATE(7), 0x1f1e1d1c }, + /* PHY registers (Common settings + * for all chips/modes) */ + { AR5K_PHY(3), 0xad848e19 }, + { AR5K_PHY(4), 0x7d28e000 }, + { AR5K_PHY_TIMING_3, 0x9c0a9f6b }, + { AR5K_PHY_ACT, 0x00000000 }, + { AR5K_PHY(16), 0x206a017a }, + { AR5K_PHY(21), 0x00000859 }, + { AR5K_PHY_BIN_MASK_1, 0x00000000 }, + { AR5K_PHY_BIN_MASK_2, 0x00000000 }, + { AR5K_PHY_BIN_MASK_3, 0x00000000 }, + { AR5K_PHY_BIN_MASK_CTL, 0x00800000 }, + { AR5K_PHY_ANT_CTL, 0x00000001 }, + /*{ AR5K_PHY(71), 0x0000092a },*/ /* Old value */ + { AR5K_PHY_MAX_RX_LEN, 0x00000c80 }, + { AR5K_PHY_IQ, 0x05100000 }, + { AR5K_PHY_WARM_RESET, 0x00000001 }, + { AR5K_PHY_CTL, 0x00000004 }, + { AR5K_PHY_TXPOWER_RATE1, 0x1e1f2022 }, + { AR5K_PHY_TXPOWER_RATE2, 0x0a0b0c0d }, + { AR5K_PHY_TXPOWER_RATE_MAX, 0x0000003f }, + { AR5K_PHY(82), 0x9280b212 }, + { AR5K_PHY_RADAR, 0x5d50e188 }, + /*{ AR5K_PHY(86), 0x000000ff },*/ + { AR5K_PHY(87), 0x004b6a8e }, + { AR5K_PHY_NFTHRES, 0x000003ce }, + { AR5K_PHY_RESTART, 0x192fb515 }, + { AR5K_PHY(94), 0x00000001 }, + { AR5K_PHY_RFBUS_REQ, 0x00000000 }, + /*{ AR5K_PHY(644), 0x0080a333 },*/ /* Old value */ + /*{ AR5K_PHY(645), 0x00206c10 },*/ /* Old value */ + { AR5K_PHY(644), 0x00806333 }, + { AR5K_PHY(645), 0x00106c10 }, + { AR5K_PHY(646), 0x009c4060 }, + /* { AR5K_PHY(647), 0x1483800a }, */ + /* { AR5K_PHY(648), 0x01831061 }, */ /* Old value */ + { AR5K_PHY(648), 0x018830c6 }, + { AR5K_PHY(649), 0x00000400 }, + /*{ AR5K_PHY(650), 0x000001b5 },*/ + { AR5K_PHY(651), 0x00000000 }, + { AR5K_PHY_TXPOWER_RATE3, 0x20202020 }, + { AR5K_PHY_TXPOWER_RATE4, 0x20202020 }, + /*{ AR5K_PHY(655), 0x13c889af },*/ + { AR5K_PHY(656), 0x38490a20 }, + { AR5K_PHY(657), 0x00007bb6 }, + { AR5K_PHY(658), 0x0fff3ffc }, +}; + +/* Initial mode-specific settings for AR5212 (Written before ar5212_ini) */ +static const struct ath5k_ini_mode ar5212_ini_mode_start[] = { + { AR5K_QUEUE_DFS_LOCAL_IFS(0), + /* a/XR aTurbo b g (DYN) gTurbo */ + { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, + { AR5K_QUEUE_DFS_LOCAL_IFS(1), + { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, + { AR5K_QUEUE_DFS_LOCAL_IFS(2), + { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, + { AR5K_QUEUE_DFS_LOCAL_IFS(3), + { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, + { AR5K_QUEUE_DFS_LOCAL_IFS(4), + { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, + { AR5K_QUEUE_DFS_LOCAL_IFS(5), + { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, + { AR5K_QUEUE_DFS_LOCAL_IFS(6), + { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, + { AR5K_QUEUE_DFS_LOCAL_IFS(7), + { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, + { AR5K_QUEUE_DFS_LOCAL_IFS(8), + { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, + { AR5K_QUEUE_DFS_LOCAL_IFS(9), + { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, + { AR5K_DCU_GBL_IFS_SIFS, + { 0x00000230, 0x000001e0, 0x000000b0, 0x00000160, 0x000001e0 } }, + { AR5K_DCU_GBL_IFS_SLOT, + { 0x00000168, 0x000001e0, 0x000001b8, 0x0000018c, 0x000001e0 } }, + { AR5K_DCU_GBL_IFS_EIFS, + { 0x00000e60, 0x00001180, 0x00001f1c, 0x00003e38, 0x00001180 } }, + { AR5K_DCU_GBL_IFS_MISC, + { 0x0000a0e0, 0x00014068, 0x00005880, 0x0000b0e0, 0x00014068 } }, + { AR5K_TIME_OUT, + { 0x03e803e8, 0x06e006e0, 0x04200420, 0x08400840, 0x06e006e0 } }, + { AR5K_PHY_TURBO, + { 0x00000000, 0x00000003, 0x00000000, 0x00000000, 0x00000003 } }, + { AR5K_PHY(8), + { 0x02020200, 0x02020200, 0x02010200, 0x02020200, 0x02020200 } }, + { AR5K_PHY_RF_CTL2, + { 0x00000e0e, 0x00000e0e, 0x00000707, 0x00000e0e, 0x00000e0e } }, + { AR5K_PHY_SETTLING, + { 0x1372161c, 0x13721c25, 0x13721722, 0x137216a2, 0x13721c25 } }, + { AR5K_PHY_AGCCTL, + { 0x00009d10, 0x00009d10, 0x00009d18, 0x00009d18, 0x00009d10 } }, + { AR5K_PHY_NF, + { 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 } }, + { AR5K_PHY_WEAK_OFDM_HIGH_THR, + { 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190 } }, + { AR5K_PHY(70), + { 0x000001b8, 0x000001b8, 0x00000084, 0x00000108, 0x000001b8 } }, + { AR5K_PHY_OFDM_SELFCORR, + { 0x10058a05, 0x10058a05, 0x10058a05, 0x10058a05, 0x10058a05 } }, + { 0xa230, + { 0x00000000, 0x00000000, 0x00000000, 0x00000108, 0x00000000 } }, +}; + +/* Initial mode-specific settings for AR5212 + RF5111 (Written after ar5212_ini) */ +static const struct ath5k_ini_mode rf5111_ini_mode_end[] = { + { AR5K_TXCFG, + /* a/XR aTurbo b g (DYN) gTurbo */ + { 0x00008015, 0x00008015, 0x00008015, 0x00008015, 0x00008015 } }, + { AR5K_USEC_5211, + { 0x128d8fa7, 0x09880fcf, 0x04e00f95, 0x12e00fab, 0x09880fcf } }, + { AR5K_PHY_RF_CTL3, + { 0x0a020001, 0x0a020001, 0x05010100, 0x0a020001, 0x0a020001 } }, + { AR5K_PHY_RF_CTL4, + { 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e } }, + { AR5K_PHY_PA_CTL, + { 0x00000007, 0x00000007, 0x0000000b, 0x0000000b, 0x0000000b } }, + { AR5K_PHY_GAIN, + { 0x0018da5a, 0x0018da5a, 0x0018ca69, 0x0018ca69, 0x0018ca69 } }, + { AR5K_PHY_DESIRED_SIZE, + { 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0 } }, + { AR5K_PHY_SIG, + { 0x7e800d2e, 0x7e800d2e, 0x7ee84d2e, 0x7ee84d2e, 0x7e800d2e } }, + { AR5K_PHY_AGCCOARSE, + { 0x3137665e, 0x3137665e, 0x3137665e, 0x3137665e, 0x3137615e } }, + { AR5K_PHY_WEAK_OFDM_LOW_THR, + { 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb080, 0x050cb080 } }, + { AR5K_PHY_RX_DELAY, + { 0x00002710, 0x00002710, 0x0000157c, 0x00002af8, 0x00002710 } }, + { AR5K_PHY_FRAME_CTL_5211, + { 0xf7b81020, 0xf7b81020, 0xf7b80d20, 0xf7b81020, 0xf7b81020 } }, + { AR5K_PHY_GAIN_2GHZ, + { 0x642c416a, 0x642c416a, 0x6440416a, 0x6440416a, 0x6440416a } }, + { AR5K_PHY_CCK_RX_CTL_4, + { 0x1883800a, 0x1883800a, 0x1873800a, 0x1883800a, 0x1883800a } }, +}; + +static const struct ath5k_ini rf5111_ini_common_end[] = { + { AR5K_DCU_FP, 0x00000000 }, + { AR5K_PHY_AGC, 0x00000000 }, + { AR5K_PHY_ADC_CTL, 0x00022ffe }, + { 0x983c, 0x00020100 }, + { AR5K_PHY_GAIN_OFFSET, 0x1284613c }, + { AR5K_PHY_PAPD_PROBE, 0x00004883 }, + { 0x9940, 0x00000004 }, + { 0x9958, 0x000000ff }, + { 0x9974, 0x00000000 }, + { AR5K_PHY_SPENDING, 0x00000018 }, + { AR5K_PHY_CCKTXCTL, 0x00000000 }, + { AR5K_PHY_CCK_CROSSCORR, 0xd03e6788 }, + { AR5K_PHY_DAG_CCK_CTL, 0x000001b5 }, + { 0xa23c, 0x13c889af }, +}; + +/* Initial mode-specific settings for AR5212 + RF5112 (Written after ar5212_ini) */ +static const struct ath5k_ini_mode rf5112_ini_mode_end[] = { + { AR5K_TXCFG, + /* a/XR aTurbo b g (DYN) gTurbo */ + { 0x00008015, 0x00008015, 0x00008015, 0x00008015, 0x00008015 } }, + { AR5K_USEC_5211, + { 0x128d93a7, 0x098813cf, 0x04e01395, 0x12e013ab, 0x098813cf } }, + { AR5K_PHY_RF_CTL3, + { 0x0a020001, 0x0a020001, 0x05020100, 0x0a020001, 0x0a020001 } }, + { AR5K_PHY_RF_CTL4, + { 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e } }, + { AR5K_PHY_PA_CTL, + { 0x00000007, 0x00000007, 0x0000000b, 0x0000000b, 0x0000000b } }, + { AR5K_PHY_GAIN, + { 0x0018da6d, 0x0018da6d, 0x0018ca75, 0x0018ca75, 0x0018ca75 } }, + { AR5K_PHY_DESIRED_SIZE, + { 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0 } }, + { AR5K_PHY_SIG, + { 0x7e800d2e, 0x7e800d2e, 0x7ee80d2e, 0x7ee80d2e, 0x7e800d2e } }, + { AR5K_PHY_AGCCOARSE, + { 0x3137665e, 0x3137665e, 0x3137665e, 0x3137665e, 0x3137665e } }, + { AR5K_PHY_WEAK_OFDM_LOW_THR, + { 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 } }, + { AR5K_PHY_RX_DELAY, + { 0x000007d0, 0x000007d0, 0x0000044c, 0x00000898, 0x000007d0 } }, + { AR5K_PHY_FRAME_CTL_5211, + { 0xf7b81020, 0xf7b81020, 0xf7b80d10, 0xf7b81010, 0xf7b81010 } }, + { AR5K_PHY_CCKTXCTL, + { 0x00000000, 0x00000000, 0x00000008, 0x00000008, 0x00000008 } }, + { AR5K_PHY_CCK_CROSSCORR, + { 0xd6be6788, 0xd6be6788, 0xd03e6788, 0xd03e6788, 0xd03e6788 } }, + { AR5K_PHY_GAIN_2GHZ, + { 0x642c0140, 0x642c0140, 0x6442c160, 0x6442c160, 0x6442c160 } }, + { AR5K_PHY_CCK_RX_CTL_4, + { 0x1883800a, 0x1883800a, 0x1873800a, 0x1883800a, 0x1883800a } }, +}; + +static const struct ath5k_ini rf5112_ini_common_end[] = { + { AR5K_DCU_FP, 0x00000000 }, + { AR5K_PHY_AGC, 0x00000000 }, + { AR5K_PHY_ADC_CTL, 0x00022ffe }, + { 0x983c, 0x00020100 }, + { AR5K_PHY_GAIN_OFFSET, 0x1284613c }, + { AR5K_PHY_PAPD_PROBE, 0x00004882 }, + { 0x9940, 0x00000004 }, + { 0x9958, 0x000000ff }, + { 0x9974, 0x00000000 }, + { AR5K_PHY_DAG_CCK_CTL, 0x000001b5 }, + { 0xa23c, 0x13c889af }, +}; + +/* Initial mode-specific settings for RF5413/5414 (Written after ar5212_ini) */ +static const struct ath5k_ini_mode rf5413_ini_mode_end[] = { + { AR5K_TXCFG, + /* a/XR aTurbo b g (DYN) gTurbo */ + { 0x00000015, 0x00000015, 0x00000015, 0x00000015, 0x00000015 } }, + { AR5K_USEC_5211, + { 0x128d93a7, 0x098813cf, 0x04e01395, 0x12e013ab, 0x098813cf } }, + { AR5K_PHY_RF_CTL3, + { 0x0a020001, 0x0a020001, 0x05020100, 0x0a020001, 0x0a020001 } }, + { AR5K_PHY_RF_CTL4, + { 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e } }, + { AR5K_PHY_PA_CTL, + { 0x00000007, 0x00000007, 0x0000000b, 0x0000000b, 0x0000000b } }, + { AR5K_PHY_GAIN, + { 0x0018fa61, 0x0018fa61, 0x001a1a63, 0x001a1a63, 0x001a1a63 } }, + { AR5K_PHY_DESIRED_SIZE, + { 0x0c98b4e0, 0x0c98b4e0, 0x0c98b0da, 0x0c98b0da, 0x0c98b0da } }, + { AR5K_PHY_SIG, + { 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e } }, + { AR5K_PHY_AGCCOARSE, + { 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e } }, + { AR5K_PHY_WEAK_OFDM_LOW_THR, + { 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 } }, + { AR5K_PHY_RX_DELAY, + { 0x000007d0, 0x000007d0, 0x0000044c, 0x00000898, 0x000007d0 } }, + { AR5K_PHY_FRAME_CTL_5211, + { 0xf7b81000, 0xf7b81000, 0xf7b80d00, 0xf7b81000, 0xf7b81000 } }, + { AR5K_PHY_CCKTXCTL, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { AR5K_PHY_CCK_CROSSCORR, + { 0xd6be6788, 0xd6be6788, 0xd03e6788, 0xd03e6788, 0xd03e6788 } }, + { AR5K_PHY_GAIN_2GHZ, + { 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 } }, + { AR5K_PHY_CCK_RX_CTL_4, + { 0x1883800a, 0x1883800a, 0x1863800a, 0x1883800a, 0x1883800a } }, + { 0xa300, + { 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 } }, + { 0xa304, + { 0x30032602, 0x30032602, 0x30032602, 0x30032602, 0x30032602 } }, + { 0xa308, + { 0x48073e06, 0x48073e06, 0x48073e06, 0x48073e06, 0x48073e06 } }, + { 0xa30c, + { 0x560b4c0a, 0x560b4c0a, 0x560b4c0a, 0x560b4c0a, 0x560b4c0a } }, + { 0xa310, + { 0x641a600f, 0x641a600f, 0x641a600f, 0x641a600f, 0x641a600f } }, + { 0xa314, + { 0x784f6e1b, 0x784f6e1b, 0x784f6e1b, 0x784f6e1b, 0x784f6e1b } }, + { 0xa318, + { 0x868f7c5a, 0x868f7c5a, 0x868f7c5a, 0x868f7c5a, 0x868f7c5a } }, + { 0xa31c, + { 0x90cf865b, 0x90cf865b, 0x8ecf865b, 0x8ecf865b, 0x8ecf865b } }, + { 0xa320, + { 0x9d4f970f, 0x9d4f970f, 0x9b4f970f, 0x9b4f970f, 0x9b4f970f } }, + { 0xa324, + { 0xa7cfa38f, 0xa7cfa38f, 0xa3cf9f8f, 0xa3cf9f8f, 0xa3cf9f8f } }, + { 0xa328, + { 0xb55faf1f, 0xb55faf1f, 0xb35faf1f, 0xb35faf1f, 0xb35faf1f } }, + { 0xa32c, + { 0xbddfb99f, 0xbddfb99f, 0xbbdfb99f, 0xbbdfb99f, 0xbbdfb99f } }, + { 0xa330, + { 0xcb7fc53f, 0xcb7fc53f, 0xcb7fc73f, 0xcb7fc73f, 0xcb7fc73f } }, + { 0xa334, + { 0xd5ffd1bf, 0xd5ffd1bf, 0xd3ffd1bf, 0xd3ffd1bf, 0xd3ffd1bf } }, +}; + +static const struct ath5k_ini rf5413_ini_common_end[] = { + { AR5K_DCU_FP, 0x000003e0 }, + { AR5K_5414_CBCFG, 0x00000010 }, + { AR5K_SEQ_MASK, 0x0000000f }, + { 0x809c, 0x00000000 }, + { 0x80a0, 0x00000000 }, + { AR5K_MIC_QOS_CTL, 0x00000000 }, + { AR5K_MIC_QOS_SEL, 0x00000000 }, + { AR5K_MISC_MODE, 0x00000000 }, + { AR5K_OFDM_FIL_CNT, 0x00000000 }, + { AR5K_CCK_FIL_CNT, 0x00000000 }, + { AR5K_PHYERR_CNT1, 0x00000000 }, + { AR5K_PHYERR_CNT1_MASK, 0x00000000 }, + { AR5K_PHYERR_CNT2, 0x00000000 }, + { AR5K_PHYERR_CNT2_MASK, 0x00000000 }, + { AR5K_TSF_THRES, 0x00000000 }, + { 0x8140, 0x800003f9 }, + { 0x8144, 0x00000000 }, + { AR5K_PHY_AGC, 0x00000000 }, + { AR5K_PHY_ADC_CTL, 0x0000a000 }, + { 0x983c, 0x00200400 }, + { AR5K_PHY_GAIN_OFFSET, 0x1284233c }, + { AR5K_PHY_SCR, 0x0000001f }, + { AR5K_PHY_SLMT, 0x00000080 }, + { AR5K_PHY_SCAL, 0x0000000e }, + { 0x9958, 0x00081fff }, + { AR5K_PHY_TIMING_7, 0x00000000 }, + { AR5K_PHY_TIMING_8, 0x02800000 }, + { AR5K_PHY_TIMING_11, 0x00000000 }, + { AR5K_PHY_HEAVY_CLIP_ENABLE, 0x00000000 }, + { 0x99e4, 0xaaaaaaaa }, + { 0x99e8, 0x3c466478 }, + { 0x99ec, 0x000000aa }, + { AR5K_PHY_SCLOCK, 0x0000000c }, + { AR5K_PHY_SDELAY, 0x000000ff }, + { AR5K_PHY_SPENDING, 0x00000014 }, + { AR5K_PHY_DAG_CCK_CTL, 0x000009b5 }, + { 0xa23c, 0x93c889af }, + { AR5K_PHY_FAST_ADC, 0x00000001 }, + { 0xa250, 0x0000a000 }, + { AR5K_PHY_BLUETOOTH, 0x00000000 }, + { AR5K_PHY_TPC_RG1, 0x0cc75380 }, + { 0xa25c, 0x0f0f0f01 }, + { 0xa260, 0x5f690f01 }, + { 0xa264, 0x00418a11 }, + { 0xa268, 0x00000000 }, + { AR5K_PHY_TPC_RG5, 0x0c30c16a }, + { 0xa270, 0x00820820 }, + { 0xa274, 0x081b7caa }, + { 0xa278, 0x1ce739ce }, + { 0xa27c, 0x051701ce }, + { 0xa338, 0x00000000 }, + { 0xa33c, 0x00000000 }, + { 0xa340, 0x00000000 }, + { 0xa344, 0x00000000 }, + { 0xa348, 0x3fffffff }, + { 0xa34c, 0x3fffffff }, + { 0xa350, 0x3fffffff }, + { 0xa354, 0x0003ffff }, + { 0xa358, 0x79a8aa1f }, + { 0xa35c, 0x066c420f }, + { 0xa360, 0x0f282207 }, + { 0xa364, 0x17601685 }, + { 0xa368, 0x1f801104 }, + { 0xa36c, 0x37a00c03 }, + { 0xa370, 0x3fc40883 }, + { 0xa374, 0x57c00803 }, + { 0xa378, 0x5fd80682 }, + { 0xa37c, 0x7fe00482 }, + { 0xa380, 0x7f3c7bba }, + { 0xa384, 0xf3307ff0 }, +}; + +/* Initial mode-specific settings for RF2413/2414 (Written after ar5212_ini) */ +/* XXX: a mode ? */ +static const struct ath5k_ini_mode rf2413_ini_mode_end[] = { + { AR5K_TXCFG, + /* a/XR aTurbo b g (DYN) gTurbo */ + { 0x00000015, 0x00000015, 0x00000015, 0x00000015, 0x00000015 } }, + { AR5K_USEC_5211, + { 0x128d93a7, 0x098813cf, 0x04e01395, 0x12e013ab, 0x098813cf } }, + { AR5K_PHY_RF_CTL3, + { 0x0a020001, 0x0a020001, 0x05020000, 0x0a020001, 0x0a020001 } }, + { AR5K_PHY_RF_CTL4, + { 0x00000e00, 0x00000e00, 0x00000e00, 0x00000e00, 0x00000e00 } }, + { AR5K_PHY_PA_CTL, + { 0x00000002, 0x00000002, 0x0000000a, 0x0000000a, 0x0000000a } }, + { AR5K_PHY_GAIN, + { 0x0018da6d, 0x0018da6d, 0x001a6a64, 0x001a6a64, 0x001a6a64 } }, + { AR5K_PHY_DESIRED_SIZE, + { 0x0de8b4e0, 0x0de8b4e0, 0x0de8b0da, 0x0c98b0da, 0x0de8b0da } }, + { AR5K_PHY_SIG, + { 0x7e800d2e, 0x7e800d2e, 0x7ee80d2e, 0x7ec80d2e, 0x7e800d2e } }, + { AR5K_PHY_AGCCOARSE, + { 0x3137665e, 0x3137665e, 0x3137665e, 0x3139605e, 0x3137665e } }, + { AR5K_PHY_WEAK_OFDM_LOW_THR, + { 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 } }, + { AR5K_PHY_RX_DELAY, + { 0x000007d0, 0x000007d0, 0x0000044c, 0x00000898, 0x000007d0 } }, + { AR5K_PHY_FRAME_CTL_5211, + { 0xf7b81000, 0xf7b81000, 0xf7b80d00, 0xf7b81000, 0xf7b81000 } }, + { AR5K_PHY_CCKTXCTL, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { AR5K_PHY_CCK_CROSSCORR, + { 0xd6be6788, 0xd6be6788, 0xd03e6788, 0xd03e6788, 0xd03e6788 } }, + { AR5K_PHY_GAIN_2GHZ, + { 0x002c0140, 0x002c0140, 0x0042c140, 0x0042c140, 0x0042c140 } }, + { AR5K_PHY_CCK_RX_CTL_4, + { 0x1883800a, 0x1883800a, 0x1863800a, 0x1883800a, 0x1883800a } }, +}; + +static const struct ath5k_ini rf2413_ini_common_end[] = { + { AR5K_DCU_FP, 0x000003e0 }, + { AR5K_SEQ_MASK, 0x0000000f }, + { AR5K_MIC_QOS_CTL, 0x00000000 }, + { AR5K_MIC_QOS_SEL, 0x00000000 }, + { AR5K_MISC_MODE, 0x00000000 }, + { AR5K_OFDM_FIL_CNT, 0x00000000 }, + { AR5K_CCK_FIL_CNT, 0x00000000 }, + { AR5K_PHYERR_CNT1, 0x00000000 }, + { AR5K_PHYERR_CNT1_MASK, 0x00000000 }, + { AR5K_PHYERR_CNT2, 0x00000000 }, + { AR5K_PHYERR_CNT2_MASK, 0x00000000 }, + { AR5K_TSF_THRES, 0x00000000 }, + { 0x8140, 0x800000a8 }, + { 0x8144, 0x00000000 }, + { AR5K_PHY_AGC, 0x00000000 }, + { AR5K_PHY_ADC_CTL, 0x0000a000 }, + { 0x983c, 0x00200400 }, + { AR5K_PHY_GAIN_OFFSET, 0x1284233c }, + { AR5K_PHY_SCR, 0x0000001f }, + { AR5K_PHY_SLMT, 0x00000080 }, + { AR5K_PHY_SCAL, 0x0000000e }, + { 0x9958, 0x000000ff }, + { AR5K_PHY_TIMING_7, 0x00000000 }, + { AR5K_PHY_TIMING_8, 0x02800000 }, + { AR5K_PHY_TIMING_11, 0x00000000 }, + { AR5K_PHY_HEAVY_CLIP_ENABLE, 0x00000000 }, + { 0x99e4, 0xaaaaaaaa }, + { 0x99e8, 0x3c466478 }, + { 0x99ec, 0x000000aa }, + { AR5K_PHY_SCLOCK, 0x0000000c }, + { AR5K_PHY_SDELAY, 0x000000ff }, + { AR5K_PHY_SPENDING, 0x00000014 }, + { AR5K_PHY_DAG_CCK_CTL, 0x000009b5 }, + { 0xa23c, 0x93c889af }, + { AR5K_PHY_FAST_ADC, 0x00000001 }, + { 0xa250, 0x0000a000 }, + { AR5K_PHY_BLUETOOTH, 0x00000000 }, + { AR5K_PHY_TPC_RG1, 0x0cc75380 }, + { 0xa25c, 0x0f0f0f01 }, + { 0xa260, 0x5f690f01 }, + { 0xa264, 0x00418a11 }, + { 0xa268, 0x00000000 }, + { AR5K_PHY_TPC_RG5, 0x0c30c16a }, + { 0xa270, 0x00820820 }, + { 0xa274, 0x001b7caa }, + { 0xa278, 0x1ce739ce }, + { 0xa27c, 0x051701ce }, + { 0xa300, 0x18010000 }, + { 0xa304, 0x30032602 }, + { 0xa308, 0x48073e06 }, + { 0xa30c, 0x560b4c0a }, + { 0xa310, 0x641a600f }, + { 0xa314, 0x784f6e1b }, + { 0xa318, 0x868f7c5a }, + { 0xa31c, 0x8ecf865b }, + { 0xa320, 0x9d4f970f }, + { 0xa324, 0xa5cfa18f }, + { 0xa328, 0xb55faf1f }, + { 0xa32c, 0xbddfb99f }, + { 0xa330, 0xcd7fc73f }, + { 0xa334, 0xd5ffd1bf }, + { 0xa338, 0x00000000 }, + { 0xa33c, 0x00000000 }, + { 0xa340, 0x00000000 }, + { 0xa344, 0x00000000 }, + { 0xa348, 0x3fffffff }, + { 0xa34c, 0x3fffffff }, + { 0xa350, 0x3fffffff }, + { 0xa354, 0x0003ffff }, + { 0xa358, 0x79a8aa1f }, + { 0xa35c, 0x066c420f }, + { 0xa360, 0x0f282207 }, + { 0xa364, 0x17601685 }, + { 0xa368, 0x1f801104 }, + { 0xa36c, 0x37a00c03 }, + { 0xa370, 0x3fc40883 }, + { 0xa374, 0x57c00803 }, + { 0xa378, 0x5fd80682 }, + { 0xa37c, 0x7fe00482 }, + { 0xa380, 0x7f3c7bba }, + { 0xa384, 0xf3307ff0 }, +}; + +/* Initial mode-specific settings for RF2425 (Written after ar5212_ini) */ +/* XXX: a mode ? */ +static const struct ath5k_ini_mode rf2425_ini_mode_end[] = { + { AR5K_TXCFG, + /* a/XR aTurbo b g (DYN) gTurbo */ + { 0x00000015, 0x00000015, 0x00000015, 0x00000015, 0x00000015 } }, + { AR5K_USEC_5211, + { 0x128d93a7, 0x098813cf, 0x04e01395, 0x12e013ab, 0x098813cf } }, + { AR5K_PHY_TURBO, + { 0x00000000, 0x00000001, 0x00000000, 0x00000000, 0x00000001 } }, + { AR5K_PHY_RF_CTL3, + { 0x0a020001, 0x0a020001, 0x05020100, 0x0a020001, 0x0a020001 } }, + { AR5K_PHY_RF_CTL4, + { 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e } }, + { AR5K_PHY_PA_CTL, + { 0x00000003, 0x00000003, 0x0000000b, 0x0000000b, 0x0000000b } }, + { AR5K_PHY_SETTLING, + { 0x1372161c, 0x13721c25, 0x13721722, 0x13721422, 0x13721c25 } }, + { AR5K_PHY_GAIN, + { 0x0018fa61, 0x0018fa61, 0x00199a65, 0x00199a65, 0x00199a65 } }, + { AR5K_PHY_DESIRED_SIZE, + { 0x0c98b4e0, 0x0c98b4e0, 0x0c98b0da, 0x0c98b0da, 0x0c98b0da } }, + { AR5K_PHY_SIG, + { 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e } }, + { AR5K_PHY_AGCCOARSE, + { 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e } }, + { AR5K_PHY_WEAK_OFDM_LOW_THR, + { 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 } }, + { AR5K_PHY_RX_DELAY, + { 0x000007d0, 0x000007d0, 0x0000044c, 0x00000898, 0x000007d0 } }, + { AR5K_PHY_FRAME_CTL_5211, + { 0xf7b81000, 0xf7b81000, 0xf7b80d00, 0xf7b81000, 0xf7b81000 } }, + { AR5K_PHY_CCKTXCTL, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { AR5K_PHY_CCK_CROSSCORR, + { 0xd6be6788, 0xd6be6788, 0xd03e6788, 0xd03e6788, 0xd03e6788 } }, + { AR5K_PHY_GAIN_2GHZ, + { 0x00000140, 0x00000140, 0x0052c140, 0x0052c140, 0x0052c140 } }, + { AR5K_PHY_CCK_RX_CTL_4, + { 0x1883800a, 0x1883800a, 0x1863800a, 0x1883800a, 0x1883800a } }, + { 0xa324, + { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } }, + { 0xa328, + { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } }, + { 0xa32c, + { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } }, + { 0xa330, + { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } }, + { 0xa334, + { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } }, +}; + +static const struct ath5k_ini rf2425_ini_common_end[] = { + { AR5K_DCU_FP, 0x000003e0 }, + { AR5K_SEQ_MASK, 0x0000000f }, + { 0x809c, 0x00000000 }, + { 0x80a0, 0x00000000 }, + { AR5K_MIC_QOS_CTL, 0x00000000 }, + { AR5K_MIC_QOS_SEL, 0x00000000 }, + { AR5K_MISC_MODE, 0x00000000 }, + { AR5K_OFDM_FIL_CNT, 0x00000000 }, + { AR5K_CCK_FIL_CNT, 0x00000000 }, + { AR5K_PHYERR_CNT1, 0x00000000 }, + { AR5K_PHYERR_CNT1_MASK, 0x00000000 }, + { AR5K_PHYERR_CNT2, 0x00000000 }, + { AR5K_PHYERR_CNT2_MASK, 0x00000000 }, + { AR5K_TSF_THRES, 0x00000000 }, + { 0x8140, 0x800003f9 }, + { 0x8144, 0x00000000 }, + { AR5K_PHY_AGC, 0x00000000 }, + { AR5K_PHY_ADC_CTL, 0x0000a000 }, + { 0x983c, 0x00200400 }, + { AR5K_PHY_GAIN_OFFSET, 0x1284233c }, + { AR5K_PHY_SCR, 0x0000001f }, + { AR5K_PHY_SLMT, 0x00000080 }, + { AR5K_PHY_SCAL, 0x0000000e }, + { 0x9958, 0x00081fff }, + { AR5K_PHY_TIMING_7, 0x00000000 }, + { AR5K_PHY_TIMING_8, 0x02800000 }, + { AR5K_PHY_TIMING_11, 0x00000000 }, + { 0x99dc, 0xfebadbe8 }, + { AR5K_PHY_HEAVY_CLIP_ENABLE, 0x00000000 }, + { 0x99e4, 0xaaaaaaaa }, + { 0x99e8, 0x3c466478 }, + { 0x99ec, 0x000000aa }, + { AR5K_PHY_SCLOCK, 0x0000000c }, + { AR5K_PHY_SDELAY, 0x000000ff }, + { AR5K_PHY_SPENDING, 0x00000014 }, + { AR5K_PHY_DAG_CCK_CTL, 0x000009b5 }, + { AR5K_PHY_TXPOWER_RATE3, 0x20202020 }, + { AR5K_PHY_TXPOWER_RATE4, 0x20202020 }, + { 0xa23c, 0x93c889af }, + { AR5K_PHY_FAST_ADC, 0x00000001 }, + { 0xa250, 0x0000a000 }, + { AR5K_PHY_BLUETOOTH, 0x00000000 }, + { AR5K_PHY_TPC_RG1, 0x0cc75380 }, + { 0xa25c, 0x0f0f0f01 }, + { 0xa260, 0x5f690f01 }, + { 0xa264, 0x00418a11 }, + { 0xa268, 0x00000000 }, + { AR5K_PHY_TPC_RG5, 0x0c30c166 }, + { 0xa270, 0x00820820 }, + { 0xa274, 0x081a3caa }, + { 0xa278, 0x1ce739ce }, + { 0xa27c, 0x051701ce }, + { 0xa300, 0x16010000 }, + { 0xa304, 0x2c032402 }, + { 0xa308, 0x48433e42 }, + { 0xa30c, 0x5a0f500b }, + { 0xa310, 0x6c4b624a }, + { 0xa314, 0x7e8b748a }, + { 0xa318, 0x96cf8ccb }, + { 0xa31c, 0xa34f9d0f }, + { 0xa320, 0xa7cfa58f }, + { 0xa348, 0x3fffffff }, + { 0xa34c, 0x3fffffff }, + { 0xa350, 0x3fffffff }, + { 0xa354, 0x0003ffff }, + { 0xa358, 0x79a8aa1f }, + { 0xa35c, 0x066c420f }, + { 0xa360, 0x0f282207 }, + { 0xa364, 0x17601685 }, + { 0xa368, 0x1f801104 }, + { 0xa36c, 0x37a00c03 }, + { 0xa370, 0x3fc40883 }, + { 0xa374, 0x57c00803 }, + { 0xa378, 0x5fd80682 }, + { 0xa37c, 0x7fe00482 }, + { 0xa380, 0x7f3c7bba }, + { 0xa384, 0xf3307ff0 }, +}; + +/* + * Initial BaseBand Gain settings for RF5111/5112 (AR5210 comes with + * RF5110 only so initial BB Gain settings are included in AR5K_AR5210_INI) + */ + +/* RF5111 Initial BaseBand Gain settings */ +static const struct ath5k_ini rf5111_ini_bbgain[] = { + { AR5K_BB_GAIN(0), 0x00000000 }, + { AR5K_BB_GAIN(1), 0x00000020 }, + { AR5K_BB_GAIN(2), 0x00000010 }, + { AR5K_BB_GAIN(3), 0x00000030 }, + { AR5K_BB_GAIN(4), 0x00000008 }, + { AR5K_BB_GAIN(5), 0x00000028 }, + { AR5K_BB_GAIN(6), 0x00000004 }, + { AR5K_BB_GAIN(7), 0x00000024 }, + { AR5K_BB_GAIN(8), 0x00000014 }, + { AR5K_BB_GAIN(9), 0x00000034 }, + { AR5K_BB_GAIN(10), 0x0000000c }, + { AR5K_BB_GAIN(11), 0x0000002c }, + { AR5K_BB_GAIN(12), 0x00000002 }, + { AR5K_BB_GAIN(13), 0x00000022 }, + { AR5K_BB_GAIN(14), 0x00000012 }, + { AR5K_BB_GAIN(15), 0x00000032 }, + { AR5K_BB_GAIN(16), 0x0000000a }, + { AR5K_BB_GAIN(17), 0x0000002a }, + { AR5K_BB_GAIN(18), 0x00000006 }, + { AR5K_BB_GAIN(19), 0x00000026 }, + { AR5K_BB_GAIN(20), 0x00000016 }, + { AR5K_BB_GAIN(21), 0x00000036 }, + { AR5K_BB_GAIN(22), 0x0000000e }, + { AR5K_BB_GAIN(23), 0x0000002e }, + { AR5K_BB_GAIN(24), 0x00000001 }, + { AR5K_BB_GAIN(25), 0x00000021 }, + { AR5K_BB_GAIN(26), 0x00000011 }, + { AR5K_BB_GAIN(27), 0x00000031 }, + { AR5K_BB_GAIN(28), 0x00000009 }, + { AR5K_BB_GAIN(29), 0x00000029 }, + { AR5K_BB_GAIN(30), 0x00000005 }, + { AR5K_BB_GAIN(31), 0x00000025 }, + { AR5K_BB_GAIN(32), 0x00000015 }, + { AR5K_BB_GAIN(33), 0x00000035 }, + { AR5K_BB_GAIN(34), 0x0000000d }, + { AR5K_BB_GAIN(35), 0x0000002d }, + { AR5K_BB_GAIN(36), 0x00000003 }, + { AR5K_BB_GAIN(37), 0x00000023 }, + { AR5K_BB_GAIN(38), 0x00000013 }, + { AR5K_BB_GAIN(39), 0x00000033 }, + { AR5K_BB_GAIN(40), 0x0000000b }, + { AR5K_BB_GAIN(41), 0x0000002b }, + { AR5K_BB_GAIN(42), 0x0000002b }, + { AR5K_BB_GAIN(43), 0x0000002b }, + { AR5K_BB_GAIN(44), 0x0000002b }, + { AR5K_BB_GAIN(45), 0x0000002b }, + { AR5K_BB_GAIN(46), 0x0000002b }, + { AR5K_BB_GAIN(47), 0x0000002b }, + { AR5K_BB_GAIN(48), 0x0000002b }, + { AR5K_BB_GAIN(49), 0x0000002b }, + { AR5K_BB_GAIN(50), 0x0000002b }, + { AR5K_BB_GAIN(51), 0x0000002b }, + { AR5K_BB_GAIN(52), 0x0000002b }, + { AR5K_BB_GAIN(53), 0x0000002b }, + { AR5K_BB_GAIN(54), 0x0000002b }, + { AR5K_BB_GAIN(55), 0x0000002b }, + { AR5K_BB_GAIN(56), 0x0000002b }, + { AR5K_BB_GAIN(57), 0x0000002b }, + { AR5K_BB_GAIN(58), 0x0000002b }, + { AR5K_BB_GAIN(59), 0x0000002b }, + { AR5K_BB_GAIN(60), 0x0000002b }, + { AR5K_BB_GAIN(61), 0x0000002b }, + { AR5K_BB_GAIN(62), 0x00000002 }, + { AR5K_BB_GAIN(63), 0x00000016 }, +}; + +/* RF5112 Initial BaseBand Gain settings (Same for RF5413/5414+) */ +static const struct ath5k_ini rf5112_ini_bbgain[] = { + { AR5K_BB_GAIN(0), 0x00000000 }, + { AR5K_BB_GAIN(1), 0x00000001 }, + { AR5K_BB_GAIN(2), 0x00000002 }, + { AR5K_BB_GAIN(3), 0x00000003 }, + { AR5K_BB_GAIN(4), 0x00000004 }, + { AR5K_BB_GAIN(5), 0x00000005 }, + { AR5K_BB_GAIN(6), 0x00000008 }, + { AR5K_BB_GAIN(7), 0x00000009 }, + { AR5K_BB_GAIN(8), 0x0000000a }, + { AR5K_BB_GAIN(9), 0x0000000b }, + { AR5K_BB_GAIN(10), 0x0000000c }, + { AR5K_BB_GAIN(11), 0x0000000d }, + { AR5K_BB_GAIN(12), 0x00000010 }, + { AR5K_BB_GAIN(13), 0x00000011 }, + { AR5K_BB_GAIN(14), 0x00000012 }, + { AR5K_BB_GAIN(15), 0x00000013 }, + { AR5K_BB_GAIN(16), 0x00000014 }, + { AR5K_BB_GAIN(17), 0x00000015 }, + { AR5K_BB_GAIN(18), 0x00000018 }, + { AR5K_BB_GAIN(19), 0x00000019 }, + { AR5K_BB_GAIN(20), 0x0000001a }, + { AR5K_BB_GAIN(21), 0x0000001b }, + { AR5K_BB_GAIN(22), 0x0000001c }, + { AR5K_BB_GAIN(23), 0x0000001d }, + { AR5K_BB_GAIN(24), 0x00000020 }, + { AR5K_BB_GAIN(25), 0x00000021 }, + { AR5K_BB_GAIN(26), 0x00000022 }, + { AR5K_BB_GAIN(27), 0x00000023 }, + { AR5K_BB_GAIN(28), 0x00000024 }, + { AR5K_BB_GAIN(29), 0x00000025 }, + { AR5K_BB_GAIN(30), 0x00000028 }, + { AR5K_BB_GAIN(31), 0x00000029 }, + { AR5K_BB_GAIN(32), 0x0000002a }, + { AR5K_BB_GAIN(33), 0x0000002b }, + { AR5K_BB_GAIN(34), 0x0000002c }, + { AR5K_BB_GAIN(35), 0x0000002d }, + { AR5K_BB_GAIN(36), 0x00000030 }, + { AR5K_BB_GAIN(37), 0x00000031 }, + { AR5K_BB_GAIN(38), 0x00000032 }, + { AR5K_BB_GAIN(39), 0x00000033 }, + { AR5K_BB_GAIN(40), 0x00000034 }, + { AR5K_BB_GAIN(41), 0x00000035 }, + { AR5K_BB_GAIN(42), 0x00000035 }, + { AR5K_BB_GAIN(43), 0x00000035 }, + { AR5K_BB_GAIN(44), 0x00000035 }, + { AR5K_BB_GAIN(45), 0x00000035 }, + { AR5K_BB_GAIN(46), 0x00000035 }, + { AR5K_BB_GAIN(47), 0x00000035 }, + { AR5K_BB_GAIN(48), 0x00000035 }, + { AR5K_BB_GAIN(49), 0x00000035 }, + { AR5K_BB_GAIN(50), 0x00000035 }, + { AR5K_BB_GAIN(51), 0x00000035 }, + { AR5K_BB_GAIN(52), 0x00000035 }, + { AR5K_BB_GAIN(53), 0x00000035 }, + { AR5K_BB_GAIN(54), 0x00000035 }, + { AR5K_BB_GAIN(55), 0x00000035 }, + { AR5K_BB_GAIN(56), 0x00000035 }, + { AR5K_BB_GAIN(57), 0x00000035 }, + { AR5K_BB_GAIN(58), 0x00000035 }, + { AR5K_BB_GAIN(59), 0x00000035 }, + { AR5K_BB_GAIN(60), 0x00000035 }, + { AR5K_BB_GAIN(61), 0x00000035 }, + { AR5K_BB_GAIN(62), 0x00000010 }, + { AR5K_BB_GAIN(63), 0x0000001a }, +}; + + +/* + * Write initial register dump + */ +static void ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size, + const struct ath5k_ini *ini_regs, bool change_channel) +{ + unsigned int i; + + /* Write initial registers */ + for (i = 0; i < size; i++) { + /* On channel change there is + * no need to mess with PCU */ + if (change_channel && + ini_regs[i].ini_register >= AR5K_PCU_MIN && + ini_regs[i].ini_register <= AR5K_PCU_MAX) + continue; + + switch (ini_regs[i].ini_mode) { + case AR5K_INI_READ: + /* Cleared on read */ + ath5k_hw_reg_read(ah, ini_regs[i].ini_register); + break; + case AR5K_INI_WRITE: + default: + AR5K_REG_WAIT(i); + ath5k_hw_reg_write(ah, ini_regs[i].ini_value, + ini_regs[i].ini_register); + } + } +} + +static void ath5k_hw_ini_mode_registers(struct ath5k_hw *ah, + unsigned int size, const struct ath5k_ini_mode *ini_mode, + u8 mode) +{ + unsigned int i; + + for (i = 0; i < size; i++) { + AR5K_REG_WAIT(i); + ath5k_hw_reg_write(ah, ini_mode[i].mode_value[mode], + (u32)ini_mode[i].mode_register); + } + +} + +int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel) +{ + /* + * Write initial register settings + */ + + /* For AR5212 and combatible */ + if (ah->ah_version == AR5K_AR5212) { + + /* First set of mode-specific settings */ + ath5k_hw_ini_mode_registers(ah, + ARRAY_SIZE(ar5212_ini_mode_start), + ar5212_ini_mode_start, mode); + + /* + * Write initial settings common for all modes + */ + ath5k_hw_ini_registers(ah, ARRAY_SIZE(ar5212_ini_common_start), + ar5212_ini_common_start, change_channel); + + /* Second set of mode-specific settings */ + switch (ah->ah_radio) { + case AR5K_RF5111: + + ath5k_hw_ini_mode_registers(ah, + ARRAY_SIZE(rf5111_ini_mode_end), + rf5111_ini_mode_end, mode); + + ath5k_hw_ini_registers(ah, + ARRAY_SIZE(rf5111_ini_common_end), + rf5111_ini_common_end, change_channel); + + /* Baseband gain table */ + ath5k_hw_ini_registers(ah, + ARRAY_SIZE(rf5111_ini_bbgain), + rf5111_ini_bbgain, change_channel); + + break; + case AR5K_RF5112: + + ath5k_hw_ini_mode_registers(ah, + ARRAY_SIZE(rf5112_ini_mode_end), + rf5112_ini_mode_end, mode); + + ath5k_hw_ini_registers(ah, + ARRAY_SIZE(rf5112_ini_common_end), + rf5112_ini_common_end, change_channel); + + ath5k_hw_ini_registers(ah, + ARRAY_SIZE(rf5112_ini_bbgain), + rf5112_ini_bbgain, change_channel); + + break; + case AR5K_RF5413: + + ath5k_hw_ini_mode_registers(ah, + ARRAY_SIZE(rf5413_ini_mode_end), + rf5413_ini_mode_end, mode); + + ath5k_hw_ini_registers(ah, + ARRAY_SIZE(rf5413_ini_common_end), + rf5413_ini_common_end, change_channel); + + ath5k_hw_ini_registers(ah, + ARRAY_SIZE(rf5112_ini_bbgain), + rf5112_ini_bbgain, change_channel); + + break; + case AR5K_RF2316: + case AR5K_RF2413: + + ath5k_hw_ini_mode_registers(ah, + ARRAY_SIZE(rf2413_ini_mode_end), + rf2413_ini_mode_end, mode); + + ath5k_hw_ini_registers(ah, + ARRAY_SIZE(rf2413_ini_common_end), + rf2413_ini_common_end, change_channel); + + /* Override settings from rf2413_ini_common_end */ + if (ah->ah_radio == AR5K_RF2316) { + ath5k_hw_reg_write(ah, 0x00004000, + AR5K_PHY_AGC); + ath5k_hw_reg_write(ah, 0x081b7caa, + 0xa274); + } + + ath5k_hw_ini_registers(ah, + ARRAY_SIZE(rf5112_ini_bbgain), + rf5112_ini_bbgain, change_channel); + break; + case AR5K_RF2317: + case AR5K_RF2425: + + ath5k_hw_ini_mode_registers(ah, + ARRAY_SIZE(rf2425_ini_mode_end), + rf2425_ini_mode_end, mode); + + ath5k_hw_ini_registers(ah, + ARRAY_SIZE(rf2425_ini_common_end), + rf2425_ini_common_end, change_channel); + + ath5k_hw_ini_registers(ah, + ARRAY_SIZE(rf5112_ini_bbgain), + rf5112_ini_bbgain, change_channel); + break; + default: + return -EINVAL; + + } + + /* For AR5211 */ + } else if (ah->ah_version == AR5K_AR5211) { + + /* AR5K_MODE_11B */ + if (mode > 2) { + ATH5K_ERR(ah->ah_sc, + "unsupported channel mode: %d\n", mode); + return -EINVAL; + } + + /* Mode-specific settings */ + ath5k_hw_ini_mode_registers(ah, ARRAY_SIZE(ar5211_ini_mode), + ar5211_ini_mode, mode); + + /* + * Write initial settings common for all modes + */ + ath5k_hw_ini_registers(ah, ARRAY_SIZE(ar5211_ini), + ar5211_ini, change_channel); + + /* AR5211 only comes with 5111 */ + + /* Baseband gain table */ + ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf5111_ini_bbgain), + rf5111_ini_bbgain, change_channel); + /* For AR5210 (for mode settings check out ath5k_hw_reset_tx_queue) */ + } else if (ah->ah_version == AR5K_AR5210) { + ath5k_hw_ini_registers(ah, ARRAY_SIZE(ar5210_ini), + ar5210_ini, change_channel); + } + + return 0; +} diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c new file mode 100644 index 0000000..67aa52e --- /dev/null +++ b/drivers/net/wireless/ath/ath5k/led.c @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting + * Copyright (c) 2004-2005 Atheros Communications, Inc. + * Copyright (c) 2007 Jiri Slaby + * Copyright (c) 2009 Bob Copeland + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any + * redistribution must be conditioned upon including a substantially + * similar Disclaimer requirement for further binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY + * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, + * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGES. + * + */ + +#include +#include "ath5k.h" +#include "base.h" + +#define ATH_SDEVICE(subv,subd) \ + .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \ + .subvendor = (subv), .subdevice = (subd) + +#define ATH_LED(pin,polarity) .driver_data = (((pin) << 8) | (polarity)) +#define ATH_PIN(data) ((data) >> 8) +#define ATH_POLARITY(data) ((data) & 0xff) + +/* Devices we match on for LED config info (typically laptops) */ +static const struct pci_device_id ath5k_led_devices[] = { + /* AR5211 */ + { PCI_VDEVICE(ATHEROS, PCI_DEVICE_ID_ATHEROS_AR5211), ATH_LED(0, 0) }, + /* HP Compaq nc6xx, nc4000, nx6000 */ + { ATH_SDEVICE(PCI_VENDOR_ID_COMPAQ, PCI_ANY_ID), ATH_LED(1, 1) }, + /* Acer Aspire One A150 (maximlevitsky@gmail.com) */ + { ATH_SDEVICE(PCI_VENDOR_ID_FOXCONN, 0xe008), ATH_LED(3, 0) }, + /* Acer Aspire One AO531h AO751h (keng-yu.lin@canonical.com) */ + { ATH_SDEVICE(PCI_VENDOR_ID_FOXCONN, 0xe00d), ATH_LED(3, 0) }, + /* Acer Ferrari 5000 (russ.dill@gmail.com) */ + { ATH_SDEVICE(PCI_VENDOR_ID_AMBIT, 0x0422), ATH_LED(1, 1) }, + /* E-machines E510 (tuliom@gmail.com) */ + { ATH_SDEVICE(PCI_VENDOR_ID_AMBIT, 0x0428), ATH_LED(3, 0) }, + /* BenQ Joybook R55v (nowymarluk@wp.pl) */ + { ATH_SDEVICE(PCI_VENDOR_ID_QMI, 0x0100), ATH_LED(1, 0) }, + /* Acer Extensa 5620z (nekoreeve@gmail.com) */ + { ATH_SDEVICE(PCI_VENDOR_ID_QMI, 0x0105), ATH_LED(3, 0) }, + /* Fukato Datacask Jupiter 1014a (mrb74@gmx.at) */ + { ATH_SDEVICE(PCI_VENDOR_ID_AZWAVE, 0x1026), ATH_LED(3, 0) }, + /* IBM ThinkPad AR5BXB6 (legovini@spiro.fisica.unipd.it) */ + { ATH_SDEVICE(PCI_VENDOR_ID_IBM, 0x058a), ATH_LED(1, 0) }, + /* HP Compaq CQ60-206US (ddreggors@jumptv.com) */ + { ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137a), ATH_LED(3, 1) }, + /* HP Compaq C700 (nitrousnrg@gmail.com) */ + { ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 1) }, + /* LiteOn AR5BXB63 (magooz@salug.it) */ + { ATH_SDEVICE(PCI_VENDOR_ID_ATHEROS, 0x3067), ATH_LED(3, 0) }, + /* IBM-specific AR5212 (all others) */ + { PCI_VDEVICE(ATHEROS, PCI_DEVICE_ID_ATHEROS_AR5212_IBM), ATH_LED(0, 0) }, + /* Dell Vostro A860 (shahar@shahar-or.co.il) */ + { ATH_SDEVICE(PCI_VENDOR_ID_QMI, 0x0112), ATH_LED(3, 0) }, + { } +}; + +void ath5k_led_enable(struct ath5k_softc *sc) +{ + if (test_bit(ATH_STAT_LEDSOFT, sc->status)) { + ath5k_hw_set_gpio_output(sc->ah, sc->led_pin); + ath5k_led_off(sc); + } +} + +static void ath5k_led_on(struct ath5k_softc *sc) +{ + if (!test_bit(ATH_STAT_LEDSOFT, sc->status)) + return; + ath5k_hw_set_gpio(sc->ah, sc->led_pin, sc->led_on); +} + +void ath5k_led_off(struct ath5k_softc *sc) +{ + if (!test_bit(ATH_STAT_LEDSOFT, sc->status)) + return; + ath5k_hw_set_gpio(sc->ah, sc->led_pin, !sc->led_on); +} + +static void +ath5k_led_brightness_set(struct led_classdev *led_dev, + enum led_brightness brightness) +{ + struct ath5k_led *led = container_of(led_dev, struct ath5k_led, + led_dev); + + if (brightness == LED_OFF) + ath5k_led_off(led->sc); + else + ath5k_led_on(led->sc); +} + +static int +ath5k_register_led(struct ath5k_softc *sc, struct ath5k_led *led, + const char *name, char *trigger) +{ + int err; + + led->sc = sc; + strncpy(led->name, name, sizeof(led->name)); + led->led_dev.name = led->name; + led->led_dev.default_trigger = trigger; + led->led_dev.brightness_set = ath5k_led_brightness_set; + + err = led_classdev_register(&sc->pdev->dev, &led->led_dev); + if (err) { + ATH5K_WARN(sc, "could not register LED %s\n", name); + led->sc = NULL; + } + return err; +} + +static void +ath5k_unregister_led(struct ath5k_led *led) +{ + if (!led->sc) + return; + led_classdev_unregister(&led->led_dev); + ath5k_led_off(led->sc); + led->sc = NULL; +} + +void ath5k_unregister_leds(struct ath5k_softc *sc) +{ + ath5k_unregister_led(&sc->rx_led); + ath5k_unregister_led(&sc->tx_led); +} + +int ath5k_init_leds(struct ath5k_softc *sc) +{ + int ret = 0; + struct ieee80211_hw *hw = sc->hw; + struct pci_dev *pdev = sc->pdev; + char name[ATH5K_LED_MAX_NAME_LEN + 1]; + const struct pci_device_id *match; + + match = pci_match_id(&ath5k_led_devices[0], pdev); + if (match) { + __set_bit(ATH_STAT_LEDSOFT, sc->status); + sc->led_pin = ATH_PIN(match->driver_data); + sc->led_on = ATH_POLARITY(match->driver_data); + } + + if (!test_bit(ATH_STAT_LEDSOFT, sc->status)) + goto out; + + ath5k_led_enable(sc); + + snprintf(name, sizeof(name), "ath5k-%s::rx", wiphy_name(hw->wiphy)); + ret = ath5k_register_led(sc, &sc->rx_led, name, + ieee80211_get_rx_led_name(hw)); + if (ret) + goto out; + + snprintf(name, sizeof(name), "ath5k-%s::tx", wiphy_name(hw->wiphy)); + ret = ath5k_register_led(sc, &sc->tx_led, name, + ieee80211_get_tx_led_name(hw)); +out: + return ret; +} + diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c new file mode 100644 index 0000000..aefe84f --- /dev/null +++ b/drivers/net/wireless/ath/ath5k/pcu.c @@ -0,0 +1,1153 @@ +/* + * Copyright (c) 2004-2008 Reyk Floeter + * Copyright (c) 2006-2008 Nick Kossifidis + * Copyright (c) 2007-2008 Matthew W. S. Bell + * Copyright (c) 2007-2008 Luis Rodriguez + * Copyright (c) 2007-2008 Pavel Roskin + * Copyright (c) 2007-2008 Jiri Slaby + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +/*********************************\ +* Protocol Control Unit Functions * +\*********************************/ + +#include + +#include "ath5k.h" +#include "reg.h" +#include "debug.h" +#include "base.h" + +/*******************\ +* Generic functions * +\*******************/ + +/** + * ath5k_hw_set_opmode - Set PCU operating mode + * + * @ah: The &struct ath5k_hw + * + * Initialize PCU for the various operating modes (AP/STA etc) + * + * NOTE: ah->ah_op_mode must be set before calling this. + */ +int ath5k_hw_set_opmode(struct ath5k_hw *ah) +{ + struct ath_common *common = ath5k_hw_common(ah); + u32 pcu_reg, beacon_reg, low_id, high_id; + + + /* Preserve rest settings */ + pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000; + pcu_reg &= ~(AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_AP + | AR5K_STA_ID1_KEYSRCH_MODE + | (ah->ah_version == AR5K_AR5210 ? + (AR5K_STA_ID1_PWR_SV | AR5K_STA_ID1_NO_PSPOLL) : 0)); + + beacon_reg = 0; + + ATH5K_TRACE(ah->ah_sc); + + switch (ah->ah_op_mode) { + case NL80211_IFTYPE_ADHOC: + pcu_reg |= AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_KEYSRCH_MODE; + beacon_reg |= AR5K_BCR_ADHOC; + if (ah->ah_version == AR5K_AR5210) + pcu_reg |= AR5K_STA_ID1_NO_PSPOLL; + else + AR5K_REG_ENABLE_BITS(ah, AR5K_CFG, AR5K_CFG_IBSS); + break; + + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_MESH_POINT: + pcu_reg |= AR5K_STA_ID1_AP | AR5K_STA_ID1_KEYSRCH_MODE; + beacon_reg |= AR5K_BCR_AP; + if (ah->ah_version == AR5K_AR5210) + pcu_reg |= AR5K_STA_ID1_NO_PSPOLL; + else + AR5K_REG_DISABLE_BITS(ah, AR5K_CFG, AR5K_CFG_IBSS); + break; + + case NL80211_IFTYPE_STATION: + pcu_reg |= AR5K_STA_ID1_KEYSRCH_MODE + | (ah->ah_version == AR5K_AR5210 ? + AR5K_STA_ID1_PWR_SV : 0); + case NL80211_IFTYPE_MONITOR: + pcu_reg |= AR5K_STA_ID1_KEYSRCH_MODE + | (ah->ah_version == AR5K_AR5210 ? + AR5K_STA_ID1_NO_PSPOLL : 0); + break; + + default: + return -EINVAL; + } + + /* + * Set PCU registers + */ + low_id = get_unaligned_le32(common->macaddr); + high_id = get_unaligned_le16(common->macaddr + 4); + ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0); + ath5k_hw_reg_write(ah, pcu_reg | high_id, AR5K_STA_ID1); + + /* + * Set Beacon Control Register on 5210 + */ + if (ah->ah_version == AR5K_AR5210) + ath5k_hw_reg_write(ah, beacon_reg, AR5K_BCR); + + return 0; +} + +/** + * ath5k_hw_update - Update mib counters (mac layer statistics) + * + * @ah: The &struct ath5k_hw + * @stats: The &struct ieee80211_low_level_stats we use to track + * statistics on the driver + * + * Reads MIB counters from PCU and updates sw statistics. Must be + * called after a MIB interrupt. + */ +void ath5k_hw_update_mib_counters(struct ath5k_hw *ah, + struct ieee80211_low_level_stats *stats) +{ + ATH5K_TRACE(ah->ah_sc); + + /* Read-And-Clear */ + stats->dot11ACKFailureCount += ath5k_hw_reg_read(ah, AR5K_ACK_FAIL); + stats->dot11RTSFailureCount += ath5k_hw_reg_read(ah, AR5K_RTS_FAIL); + stats->dot11RTSSuccessCount += ath5k_hw_reg_read(ah, AR5K_RTS_OK); + stats->dot11FCSErrorCount += ath5k_hw_reg_read(ah, AR5K_FCS_FAIL); + + /* XXX: Should we use this to track beacon count ? + * -we read it anyway to clear the register */ + ath5k_hw_reg_read(ah, AR5K_BEACON_CNT); + + /* Reset profile count registers on 5212*/ + if (ah->ah_version == AR5K_AR5212) { + ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_TX); + ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RX); + ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RXCLR); + ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_CYCLE); + } + + /* TODO: Handle ANI stats */ +} + +/** + * ath5k_hw_set_ack_bitrate - set bitrate for ACKs + * + * @ah: The &struct ath5k_hw + * @high: Flag to determine if we want to use high transmition rate + * for ACKs or not + * + * If high flag is set, we tell hw to use a set of control rates based on + * the current transmition rate (check out control_rates array inside reset.c). + * If not hw just uses the lowest rate available for the current modulation + * scheme being used (1Mbit for CCK and 6Mbits for OFDM). + */ +void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high) +{ + if (ah->ah_version != AR5K_AR5212) + return; + else { + u32 val = AR5K_STA_ID1_BASE_RATE_11B | AR5K_STA_ID1_ACKCTS_6MB; + if (high) + AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, val); + else + AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, val); + } +} + + +/******************\ +* ACK/CTS Timeouts * +\******************/ + +/** + * ath5k_hw_het_ack_timeout - Get ACK timeout from PCU in usec + * + * @ah: The &struct ath5k_hw + */ +unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah) +{ + ATH5K_TRACE(ah->ah_sc); + + return ath5k_hw_clocktoh(ah, AR5K_REG_MS(ath5k_hw_reg_read(ah, + AR5K_TIME_OUT), AR5K_TIME_OUT_ACK)); +} + +/** + * ath5k_hw_set_ack_timeout - Set ACK timeout on PCU + * + * @ah: The &struct ath5k_hw + * @timeout: Timeout in usec + */ +int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout) +{ + ATH5K_TRACE(ah->ah_sc); + if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK)) + <= timeout) + return -EINVAL; + + AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_ACK, + ath5k_hw_htoclock(ah, timeout)); + + return 0; +} + +/** + * ath5k_hw_get_cts_timeout - Get CTS timeout from PCU in usec + * + * @ah: The &struct ath5k_hw + */ +unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah) +{ + ATH5K_TRACE(ah->ah_sc); + return ath5k_hw_clocktoh(ah, AR5K_REG_MS(ath5k_hw_reg_read(ah, + AR5K_TIME_OUT), AR5K_TIME_OUT_CTS)); +} + +/** + * ath5k_hw_set_cts_timeout - Set CTS timeout on PCU + * + * @ah: The &struct ath5k_hw + * @timeout: Timeout in usec + */ +int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout) +{ + ATH5K_TRACE(ah->ah_sc); + if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS)) + <= timeout) + return -EINVAL; + + AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_CTS, + ath5k_hw_htoclock(ah, timeout)); + + return 0; +} + +/** + * ath5k_hw_htoclock - Translate usec to hw clock units + * + * @ah: The &struct ath5k_hw + * @usec: value in microseconds + */ +unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec) +{ + return usec * ath5k_hw_get_clockrate(ah); +} + +/** + * ath5k_hw_clocktoh - Translate hw clock units to usec + * @clock: value in hw clock units + */ +unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock) +{ + return clock / ath5k_hw_get_clockrate(ah); +} + +/** + * ath5k_hw_get_clockrate - Get the clock rate for current mode + * + * @ah: The &struct ath5k_hw + */ +unsigned int ath5k_hw_get_clockrate(struct ath5k_hw *ah) +{ + struct ieee80211_channel *channel = ah->ah_current_channel; + int clock; + + if (channel->hw_value & CHANNEL_5GHZ) + clock = 40; /* 802.11a */ + else if (channel->hw_value & CHANNEL_CCK) + clock = 22; /* 802.11b */ + else + clock = 44; /* 802.11g */ + + /* Clock rate in turbo modes is twice the normal rate */ + if (channel->hw_value & CHANNEL_TURBO) + clock *= 2; + + return clock; +} + +/** + * ath5k_hw_get_default_slottime - Get the default slot time for current mode + * + * @ah: The &struct ath5k_hw + */ +unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah) +{ + struct ieee80211_channel *channel = ah->ah_current_channel; + + if (channel->hw_value & CHANNEL_TURBO) + return 6; /* both turbo modes */ + + if (channel->hw_value & CHANNEL_CCK) + return 20; /* 802.11b */ + + return 9; /* 802.11 a/g */ +} + +/** + * ath5k_hw_get_default_sifs - Get the default SIFS for current mode + * + * @ah: The &struct ath5k_hw + */ +unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah) +{ + struct ieee80211_channel *channel = ah->ah_current_channel; + + if (channel->hw_value & CHANNEL_TURBO) + return 8; /* both turbo modes */ + + if (channel->hw_value & CHANNEL_5GHZ) + return 16; /* 802.11a */ + + return 10; /* 802.11 b/g */ +} + +/** + * ath5k_hw_set_lladdr - Set station id + * + * @ah: The &struct ath5k_hw + * @mac: The card's mac address + * + * Set station id on hw using the provided mac address + */ +int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac) +{ + struct ath_common *common = ath5k_hw_common(ah); + u32 low_id, high_id; + u32 pcu_reg; + + ATH5K_TRACE(ah->ah_sc); + /* Set new station ID */ + memcpy(common->macaddr, mac, ETH_ALEN); + + pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000; + + low_id = get_unaligned_le32(mac); + high_id = get_unaligned_le16(mac + 4); + + ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0); + ath5k_hw_reg_write(ah, pcu_reg | high_id, AR5K_STA_ID1); + + return 0; +} + +/** + * ath5k_hw_set_associd - Set BSSID for association + * + * @ah: The &struct ath5k_hw + * @bssid: BSSID + * @assoc_id: Assoc id + * + * Sets the BSSID which trigers the "SME Join" operation + */ +void ath5k_hw_set_associd(struct ath5k_hw *ah) +{ + struct ath_common *common = ath5k_hw_common(ah); + u16 tim_offset = 0; + + /* + * Set simple BSSID mask on 5212 + */ + if (ah->ah_version == AR5K_AR5212) + ath_hw_setbssidmask(common); + + /* + * Set BSSID which triggers the "SME Join" operation + */ + ath5k_hw_reg_write(ah, + get_unaligned_le32(common->curbssid), + AR5K_BSS_ID0); + ath5k_hw_reg_write(ah, + get_unaligned_le16(common->curbssid + 4) | + ((common->curaid & 0x3fff) << AR5K_BSS_ID1_AID_S), + AR5K_BSS_ID1); + + if (common->curaid == 0) { + ath5k_hw_disable_pspoll(ah); + return; + } + + AR5K_REG_WRITE_BITS(ah, AR5K_BEACON, AR5K_BEACON_TIM, + tim_offset ? tim_offset + 4 : 0); + + ath5k_hw_enable_pspoll(ah, NULL, 0); +} + +void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask) +{ + struct ath_common *common = ath5k_hw_common(ah); + ATH5K_TRACE(ah->ah_sc); + + /* Cache bssid mask so that we can restore it + * on reset */ + memcpy(common->bssidmask, mask, ETH_ALEN); + if (ah->ah_version == AR5K_AR5212) + ath_hw_setbssidmask(common); +} + +/************\ +* RX Control * +\************/ + +/** + * ath5k_hw_start_rx_pcu - Start RX engine + * + * @ah: The &struct ath5k_hw + * + * Starts RX engine on PCU so that hw can process RXed frames + * (ACK etc). + * + * NOTE: RX DMA should be already enabled using ath5k_hw_start_rx_dma + * TODO: Init ANI here + */ +void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah) +{ + ATH5K_TRACE(ah->ah_sc); + AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX); +} + +/** + * at5k_hw_stop_rx_pcu - Stop RX engine + * + * @ah: The &struct ath5k_hw + * + * Stops RX engine on PCU + * + * TODO: Detach ANI here + */ +void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah) +{ + ATH5K_TRACE(ah->ah_sc); + AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX); +} + +/* + * Set multicast filter + */ +void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1) +{ + ATH5K_TRACE(ah->ah_sc); + /* Set the multicat filter */ + ath5k_hw_reg_write(ah, filter0, AR5K_MCAST_FILTER0); + ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1); +} + +/* + * Set multicast filter by index + */ +int ath5k_hw_set_mcast_filter_idx(struct ath5k_hw *ah, u32 index) +{ + + ATH5K_TRACE(ah->ah_sc); + if (index >= 64) + return -EINVAL; + else if (index >= 32) + AR5K_REG_ENABLE_BITS(ah, AR5K_MCAST_FILTER1, + (1 << (index - 32))); + else + AR5K_REG_ENABLE_BITS(ah, AR5K_MCAST_FILTER0, (1 << index)); + + return 0; +} + +/* + * Clear Multicast filter by index + */ +int ath5k_hw_clear_mcast_filter_idx(struct ath5k_hw *ah, u32 index) +{ + + ATH5K_TRACE(ah->ah_sc); + if (index >= 64) + return -EINVAL; + else if (index >= 32) + AR5K_REG_DISABLE_BITS(ah, AR5K_MCAST_FILTER1, + (1 << (index - 32))); + else + AR5K_REG_DISABLE_BITS(ah, AR5K_MCAST_FILTER0, (1 << index)); + + return 0; +} + +/** + * ath5k_hw_get_rx_filter - Get current rx filter + * + * @ah: The &struct ath5k_hw + * + * Returns the RX filter by reading rx filter and + * phy error filter registers. RX filter is used + * to set the allowed frame types that PCU will accept + * and pass to the driver. For a list of frame types + * check out reg.h. + */ +u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah) +{ + u32 data, filter = 0; + + ATH5K_TRACE(ah->ah_sc); + filter = ath5k_hw_reg_read(ah, AR5K_RX_FILTER); + + /*Radar detection for 5212*/ + if (ah->ah_version == AR5K_AR5212) { + data = ath5k_hw_reg_read(ah, AR5K_PHY_ERR_FIL); + + if (data & AR5K_PHY_ERR_FIL_RADAR) + filter |= AR5K_RX_FILTER_RADARERR; + if (data & (AR5K_PHY_ERR_FIL_OFDM | AR5K_PHY_ERR_FIL_CCK)) + filter |= AR5K_RX_FILTER_PHYERR; + } + + return filter; +} + +/** + * ath5k_hw_set_rx_filter - Set rx filter + * + * @ah: The &struct ath5k_hw + * @filter: RX filter mask (see reg.h) + * + * Sets RX filter register and also handles PHY error filter + * register on 5212 and newer chips so that we have proper PHY + * error reporting. + */ +void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter) +{ + u32 data = 0; + + ATH5K_TRACE(ah->ah_sc); + + /* Set PHY error filter register on 5212*/ + if (ah->ah_version == AR5K_AR5212) { + if (filter & AR5K_RX_FILTER_RADARERR) + data |= AR5K_PHY_ERR_FIL_RADAR; + if (filter & AR5K_RX_FILTER_PHYERR) + data |= AR5K_PHY_ERR_FIL_OFDM | AR5K_PHY_ERR_FIL_CCK; + } + + /* + * The AR5210 uses promiscous mode to detect radar activity + */ + if (ah->ah_version == AR5K_AR5210 && + (filter & AR5K_RX_FILTER_RADARERR)) { + filter &= ~AR5K_RX_FILTER_RADARERR; + filter |= AR5K_RX_FILTER_PROM; + } + + /*Zero length DMA (phy error reporting) */ + if (data) + AR5K_REG_ENABLE_BITS(ah, AR5K_RXCFG, AR5K_RXCFG_ZLFDMA); + else + AR5K_REG_DISABLE_BITS(ah, AR5K_RXCFG, AR5K_RXCFG_ZLFDMA); + + /*Write RX Filter register*/ + ath5k_hw_reg_write(ah, filter & 0xff, AR5K_RX_FILTER); + + /*Write PHY error filter register on 5212*/ + if (ah->ah_version == AR5K_AR5212) + ath5k_hw_reg_write(ah, data, AR5K_PHY_ERR_FIL); + +} + + +/****************\ +* Beacon control * +\****************/ + +/** + * ath5k_hw_get_tsf32 - Get a 32bit TSF + * + * @ah: The &struct ath5k_hw + * + * Returns lower 32 bits of current TSF + */ +u32 ath5k_hw_get_tsf32(struct ath5k_hw *ah) +{ + ATH5K_TRACE(ah->ah_sc); + return ath5k_hw_reg_read(ah, AR5K_TSF_L32); +} + +/** + * ath5k_hw_get_tsf64 - Get the full 64bit TSF + * + * @ah: The &struct ath5k_hw + * + * Returns the current TSF + */ +u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah) +{ + u64 tsf = ath5k_hw_reg_read(ah, AR5K_TSF_U32); + ATH5K_TRACE(ah->ah_sc); + + return ath5k_hw_reg_read(ah, AR5K_TSF_L32) | (tsf << 32); +} + +/** + * ath5k_hw_set_tsf64 - Set a new 64bit TSF + * + * @ah: The &struct ath5k_hw + * @tsf64: The new 64bit TSF + * + * Sets the new TSF + */ +void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64) +{ + ATH5K_TRACE(ah->ah_sc); + + ath5k_hw_reg_write(ah, tsf64 & 0xffffffff, AR5K_TSF_L32); + ath5k_hw_reg_write(ah, (tsf64 >> 32) & 0xffffffff, AR5K_TSF_U32); +} + +/** + * ath5k_hw_reset_tsf - Force a TSF reset + * + * @ah: The &struct ath5k_hw + * + * Forces a TSF reset on PCU + */ +void ath5k_hw_reset_tsf(struct ath5k_hw *ah) +{ + u32 val; + + ATH5K_TRACE(ah->ah_sc); + + val = ath5k_hw_reg_read(ah, AR5K_BEACON) | AR5K_BEACON_RESET_TSF; + + /* + * Each write to the RESET_TSF bit toggles a hardware internal + * signal to reset TSF, but if left high it will cause a TSF reset + * on the next chip reset as well. Thus we always write the value + * twice to clear the signal. + */ + ath5k_hw_reg_write(ah, val, AR5K_BEACON); + ath5k_hw_reg_write(ah, val, AR5K_BEACON); +} + +/* + * Initialize beacon timers + */ +void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval) +{ + u32 timer1, timer2, timer3; + + ATH5K_TRACE(ah->ah_sc); + /* + * Set the additional timers by mode + */ + switch (ah->ah_op_mode) { + case NL80211_IFTYPE_MONITOR: + case NL80211_IFTYPE_STATION: + /* In STA mode timer1 is used as next wakeup + * timer and timer2 as next CFP duration start + * timer. Both in 1/8TUs. */ + /* TODO: PCF handling */ + if (ah->ah_version == AR5K_AR5210) { + timer1 = 0xffffffff; + timer2 = 0xffffffff; + } else { + timer1 = 0x0000ffff; + timer2 = 0x0007ffff; + } + /* Mark associated AP as PCF incapable for now */ + AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, AR5K_STA_ID1_PCF); + break; + case NL80211_IFTYPE_ADHOC: + AR5K_REG_ENABLE_BITS(ah, AR5K_TXCFG, AR5K_TXCFG_ADHOC_BCN_ATIM); + default: + /* On non-STA modes timer1 is used as next DMA + * beacon alert (DBA) timer and timer2 as next + * software beacon alert. Both in 1/8TUs. */ + timer1 = (next_beacon - AR5K_TUNE_DMA_BEACON_RESP) << 3; + timer2 = (next_beacon - AR5K_TUNE_SW_BEACON_RESP) << 3; + break; + } + + /* Timer3 marks the end of our ATIM window + * a zero length window is not allowed because + * we 'll get no beacons */ + timer3 = next_beacon + (ah->ah_atim_window ? ah->ah_atim_window : 1); + + /* + * Set the beacon register and enable all timers. + */ + /* When in AP or Mesh Point mode zero timer0 to start TSF */ + if (ah->ah_op_mode == NL80211_IFTYPE_AP || + ah->ah_op_mode == NL80211_IFTYPE_MESH_POINT) + ath5k_hw_reg_write(ah, 0, AR5K_TIMER0); + + ath5k_hw_reg_write(ah, next_beacon, AR5K_TIMER0); + ath5k_hw_reg_write(ah, timer1, AR5K_TIMER1); + ath5k_hw_reg_write(ah, timer2, AR5K_TIMER2); + ath5k_hw_reg_write(ah, timer3, AR5K_TIMER3); + + /* Force a TSF reset if requested and enable beacons */ + if (interval & AR5K_BEACON_RESET_TSF) + ath5k_hw_reset_tsf(ah); + + ath5k_hw_reg_write(ah, interval & (AR5K_BEACON_PERIOD | + AR5K_BEACON_ENABLE), + AR5K_BEACON); + + /* Flush any pending BMISS interrupts on ISR by + * performing a clear-on-write operation on PISR + * register for the BMISS bit (writing a bit on + * ISR togles a reset for that bit and leaves + * the rest bits intact) */ + if (ah->ah_version == AR5K_AR5210) + ath5k_hw_reg_write(ah, AR5K_ISR_BMISS, AR5K_ISR); + else + ath5k_hw_reg_write(ah, AR5K_ISR_BMISS, AR5K_PISR); + + /* TODO: Set enchanced sleep registers on AR5212 + * based on vif->bss_conf params, until then + * disable power save reporting.*/ + AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, AR5K_STA_ID1_PWR_SV); + +} + +#if 0 +/* + * Set beacon timers + */ +int ath5k_hw_set_beacon_timers(struct ath5k_hw *ah, + const struct ath5k_beacon_state *state) +{ + u32 cfp_period, next_cfp, dtim, interval, next_beacon; + + /* + * TODO: should be changed through *state + * review struct ath5k_beacon_state struct + * + * XXX: These are used for cfp period bellow, are they + * ok ? Is it O.K. for tsf here to be 0 or should we use + * get_tsf ? + */ + u32 dtim_count = 0; /* XXX */ + u32 cfp_count = 0; /* XXX */ + u32 tsf = 0; /* XXX */ + + ATH5K_TRACE(ah->ah_sc); + /* Return on an invalid beacon state */ + if (state->bs_interval < 1) + return -EINVAL; + + interval = state->bs_interval; + dtim = state->bs_dtim_period; + + /* + * PCF support? + */ + if (state->bs_cfp_period > 0) { + /* + * Enable PCF mode and set the CFP + * (Contention Free Period) and timer registers + */ + cfp_period = state->bs_cfp_period * state->bs_dtim_period * + state->bs_interval; + next_cfp = (cfp_count * state->bs_dtim_period + dtim_count) * + state->bs_interval; + + AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, + AR5K_STA_ID1_DEFAULT_ANTENNA | + AR5K_STA_ID1_PCF); + ath5k_hw_reg_write(ah, cfp_period, AR5K_CFP_PERIOD); + ath5k_hw_reg_write(ah, state->bs_cfp_max_duration, + AR5K_CFP_DUR); + ath5k_hw_reg_write(ah, (tsf + (next_cfp == 0 ? cfp_period : + next_cfp)) << 3, AR5K_TIMER2); + } else { + /* Disable PCF mode */ + AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, + AR5K_STA_ID1_DEFAULT_ANTENNA | + AR5K_STA_ID1_PCF); + } + + /* + * Enable the beacon timer register + */ + ath5k_hw_reg_write(ah, state->bs_next_beacon, AR5K_TIMER0); + + /* + * Start the beacon timers + */ + ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, AR5K_BEACON) & + ~(AR5K_BEACON_PERIOD | AR5K_BEACON_TIM)) | + AR5K_REG_SM(state->bs_tim_offset ? state->bs_tim_offset + 4 : 0, + AR5K_BEACON_TIM) | AR5K_REG_SM(state->bs_interval, + AR5K_BEACON_PERIOD), AR5K_BEACON); + + /* + * Write new beacon miss threshold, if it appears to be valid + * XXX: Figure out right values for min <= bs_bmiss_threshold <= max + * and return if its not in range. We can test this by reading value and + * setting value to a largest value and seeing which values register. + */ + + AR5K_REG_WRITE_BITS(ah, AR5K_RSSI_THR, AR5K_RSSI_THR_BMISS, + state->bs_bmiss_threshold); + + /* + * Set sleep control register + * XXX: Didn't find this in 5210 code but since this register + * exists also in ar5k's 5210 headers i leave it as common code. + */ + AR5K_REG_WRITE_BITS(ah, AR5K_SLEEP_CTL, AR5K_SLEEP_CTL_SLDUR, + (state->bs_sleep_duration - 3) << 3); + + /* + * Set enhanced sleep registers on 5212 + */ + if (ah->ah_version == AR5K_AR5212) { + if (state->bs_sleep_duration > state->bs_interval && + roundup(state->bs_sleep_duration, interval) == + state->bs_sleep_duration) + interval = state->bs_sleep_duration; + + if (state->bs_sleep_duration > dtim && (dtim == 0 || + roundup(state->bs_sleep_duration, dtim) == + state->bs_sleep_duration)) + dtim = state->bs_sleep_duration; + + if (interval > dtim) + return -EINVAL; + + next_beacon = interval == dtim ? state->bs_next_dtim : + state->bs_next_beacon; + + ath5k_hw_reg_write(ah, + AR5K_REG_SM((state->bs_next_dtim - 3) << 3, + AR5K_SLEEP0_NEXT_DTIM) | + AR5K_REG_SM(10, AR5K_SLEEP0_CABTO) | + AR5K_SLEEP0_ENH_SLEEP_EN | + AR5K_SLEEP0_ASSUME_DTIM, AR5K_SLEEP0); + + ath5k_hw_reg_write(ah, AR5K_REG_SM((next_beacon - 3) << 3, + AR5K_SLEEP1_NEXT_TIM) | + AR5K_REG_SM(10, AR5K_SLEEP1_BEACON_TO), AR5K_SLEEP1); + + ath5k_hw_reg_write(ah, + AR5K_REG_SM(interval, AR5K_SLEEP2_TIM_PER) | + AR5K_REG_SM(dtim, AR5K_SLEEP2_DTIM_PER), AR5K_SLEEP2); + } + + return 0; +} + +/* + * Reset beacon timers + */ +void ath5k_hw_reset_beacon(struct ath5k_hw *ah) +{ + ATH5K_TRACE(ah->ah_sc); + /* + * Disable beacon timer + */ + ath5k_hw_reg_write(ah, 0, AR5K_TIMER0); + + /* + * Disable some beacon register values + */ + AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, + AR5K_STA_ID1_DEFAULT_ANTENNA | AR5K_STA_ID1_PCF); + ath5k_hw_reg_write(ah, AR5K_BEACON_PERIOD, AR5K_BEACON); +} + +/* + * Wait for beacon queue to finish + */ +int ath5k_hw_beaconq_finish(struct ath5k_hw *ah, unsigned long phys_addr) +{ + unsigned int i; + int ret; + + ATH5K_TRACE(ah->ah_sc); + + /* 5210 doesn't have QCU*/ + if (ah->ah_version == AR5K_AR5210) { + /* + * Wait for beaconn queue to finish by checking + * Control Register and Beacon Status Register. + */ + for (i = AR5K_TUNE_BEACON_INTERVAL / 2; i > 0; i--) { + if (!(ath5k_hw_reg_read(ah, AR5K_BSR) & AR5K_BSR_TXQ1F) + || + !(ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_BSR_TXQ1F)) + break; + udelay(10); + } + + /* Timeout... */ + if (i <= 0) { + /* + * Re-schedule the beacon queue + */ + ath5k_hw_reg_write(ah, phys_addr, AR5K_NOQCU_TXDP1); + ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE, + AR5K_BCR); + + return -EIO; + } + ret = 0; + } else { + /*5211/5212*/ + ret = ath5k_hw_register_timeout(ah, + AR5K_QUEUE_STATUS(AR5K_TX_QUEUE_ID_BEACON), + AR5K_QCU_STS_FRMPENDCNT, 0, false); + + if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, AR5K_TX_QUEUE_ID_BEACON)) + return -EIO; + } + + return ret; +} +#endif + + +/*********************\ +* Key table functions * +\*********************/ + +/* + * Reset a key entry on the table + */ +int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry) +{ + unsigned int i, type; + u16 micentry = entry + AR5K_KEYTABLE_MIC_OFFSET; + + ATH5K_TRACE(ah->ah_sc); + AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE); + + type = ath5k_hw_reg_read(ah, AR5K_KEYTABLE_TYPE(entry)); + + for (i = 0; i < AR5K_KEYCACHE_SIZE; i++) + ath5k_hw_reg_write(ah, 0, AR5K_KEYTABLE_OFF(entry, i)); + + /* Reset associated MIC entry if TKIP + * is enabled located at offset (entry + 64) */ + if (type == AR5K_KEYTABLE_TYPE_TKIP) { + AR5K_ASSERT_ENTRY(micentry, AR5K_KEYTABLE_SIZE); + for (i = 0; i < AR5K_KEYCACHE_SIZE / 2 ; i++) + ath5k_hw_reg_write(ah, 0, + AR5K_KEYTABLE_OFF(micentry, i)); + } + + /* + * Set NULL encryption on AR5212+ + * + * Note: AR5K_KEYTABLE_TYPE -> AR5K_KEYTABLE_OFF(entry, 5) + * AR5K_KEYTABLE_TYPE_NULL -> 0x00000007 + * + * Note2: Windows driver (ndiswrapper) sets this to + * 0x00000714 instead of 0x00000007 + */ + if (ah->ah_version >= AR5K_AR5211) { + ath5k_hw_reg_write(ah, AR5K_KEYTABLE_TYPE_NULL, + AR5K_KEYTABLE_TYPE(entry)); + + if (type == AR5K_KEYTABLE_TYPE_TKIP) { + ath5k_hw_reg_write(ah, AR5K_KEYTABLE_TYPE_NULL, + AR5K_KEYTABLE_TYPE(micentry)); + } + } + + return 0; +} + +/* + * Check if a table entry is valid + */ +int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry) +{ + ATH5K_TRACE(ah->ah_sc); + AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE); + + /* Check the validation flag at the end of the entry */ + return ath5k_hw_reg_read(ah, AR5K_KEYTABLE_MAC1(entry)) & + AR5K_KEYTABLE_VALID; +} + +static +int ath5k_keycache_type(const struct ieee80211_key_conf *key) +{ + switch (key->alg) { + case ALG_TKIP: + return AR5K_KEYTABLE_TYPE_TKIP; + case ALG_CCMP: + return AR5K_KEYTABLE_TYPE_CCM; + case ALG_WEP: + if (key->keylen == WLAN_KEY_LEN_WEP40) + return AR5K_KEYTABLE_TYPE_40; + else if (key->keylen == WLAN_KEY_LEN_WEP104) + return AR5K_KEYTABLE_TYPE_104; + return -EINVAL; + default: + return -EINVAL; + } + return -EINVAL; +} + +/* + * Set a key entry on the table + */ +int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry, + const struct ieee80211_key_conf *key, const u8 *mac) +{ + unsigned int i; + int keylen; + __le32 key_v[5] = {}; + __le32 key0 = 0, key1 = 0; + __le32 *rxmic, *txmic; + int keytype; + u16 micentry = entry + AR5K_KEYTABLE_MIC_OFFSET; + bool is_tkip; + const u8 *key_ptr; + + ATH5K_TRACE(ah->ah_sc); + + is_tkip = (key->alg == ALG_TKIP); + + /* + * key->keylen comes in from mac80211 in bytes. + * TKIP is 128 bit + 128 bit mic + */ + keylen = (is_tkip) ? (128 / 8) : key->keylen; + + if (entry > AR5K_KEYTABLE_SIZE || + (is_tkip && micentry > AR5K_KEYTABLE_SIZE)) + return -EOPNOTSUPP; + + if (unlikely(keylen > 16)) + return -EOPNOTSUPP; + + keytype = ath5k_keycache_type(key); + if (keytype < 0) + return keytype; + + /* + * each key block is 6 bytes wide, written as pairs of + * alternating 32 and 16 bit le values. + */ + key_ptr = key->key; + for (i = 0; keylen >= 6; keylen -= 6) { + memcpy(&key_v[i], key_ptr, 6); + i += 2; + key_ptr += 6; + } + if (keylen) + memcpy(&key_v[i], key_ptr, keylen); + + /* intentionally corrupt key until mic is installed */ + if (is_tkip) { + key0 = key_v[0] = ~key_v[0]; + key1 = key_v[1] = ~key_v[1]; + } + + for (i = 0; i < ARRAY_SIZE(key_v); i++) + ath5k_hw_reg_write(ah, le32_to_cpu(key_v[i]), + AR5K_KEYTABLE_OFF(entry, i)); + + ath5k_hw_reg_write(ah, keytype, AR5K_KEYTABLE_TYPE(entry)); + + if (is_tkip) { + /* Install rx/tx MIC */ + rxmic = (__le32 *) &key->key[16]; + txmic = (__le32 *) &key->key[24]; + + if (ah->ah_combined_mic) { + key_v[0] = rxmic[0]; + key_v[1] = cpu_to_le32(le32_to_cpu(txmic[0]) >> 16); + key_v[2] = rxmic[1]; + key_v[3] = cpu_to_le32(le32_to_cpu(txmic[0]) & 0xffff); + key_v[4] = txmic[1]; + } else { + key_v[0] = rxmic[0]; + key_v[1] = 0; + key_v[2] = rxmic[1]; + key_v[3] = 0; + key_v[4] = 0; + } + for (i = 0; i < ARRAY_SIZE(key_v); i++) + ath5k_hw_reg_write(ah, le32_to_cpu(key_v[i]), + AR5K_KEYTABLE_OFF(micentry, i)); + + ath5k_hw_reg_write(ah, AR5K_KEYTABLE_TYPE_NULL, + AR5K_KEYTABLE_TYPE(micentry)); + ath5k_hw_reg_write(ah, 0, AR5K_KEYTABLE_MAC0(micentry)); + ath5k_hw_reg_write(ah, 0, AR5K_KEYTABLE_MAC1(micentry)); + + /* restore first 2 words of key */ + ath5k_hw_reg_write(ah, le32_to_cpu(~key0), + AR5K_KEYTABLE_OFF(entry, 0)); + ath5k_hw_reg_write(ah, le32_to_cpu(~key1), + AR5K_KEYTABLE_OFF(entry, 1)); + } + + return ath5k_hw_set_key_lladdr(ah, entry, mac); +} + +int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac) +{ + u32 low_id, high_id; + + ATH5K_TRACE(ah->ah_sc); + /* Invalid entry (key table overflow) */ + AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE); + + /* + * MAC may be NULL if it's a broadcast key. In this case no need to + * to compute get_unaligned_le32 and get_unaligned_le16 as we + * already know it. + */ + if (!mac) { + low_id = 0xffffffff; + high_id = 0xffff | AR5K_KEYTABLE_VALID; + } else { + low_id = get_unaligned_le32(mac); + high_id = get_unaligned_le16(mac + 4) | AR5K_KEYTABLE_VALID; + } + + ath5k_hw_reg_write(ah, low_id, AR5K_KEYTABLE_MAC0(entry)); + ath5k_hw_reg_write(ah, high_id, AR5K_KEYTABLE_MAC1(entry)); + + return 0; +} + +/** + * ath5k_hw_set_coverage_class - Set IEEE 802.11 coverage class + * + * @ah: The &struct ath5k_hw + * @coverage_class: IEEE 802.11 coverage class number + * + * Sets slot time, ACK timeout and CTS timeout for given coverage class. + */ +void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class) +{ + /* As defined by IEEE 802.11-2007 17.3.8.6 */ + int slot_time = ath5k_hw_get_default_slottime(ah) + 3 * coverage_class; + int ack_timeout = ath5k_hw_get_default_sifs(ah) + slot_time; + int cts_timeout = ack_timeout; + + ath5k_hw_set_slot_time(ah, slot_time); + ath5k_hw_set_ack_timeout(ah, ack_timeout); + ath5k_hw_set_cts_timeout(ah, cts_timeout); + + ah->ah_coverage_class = coverage_class; +} diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c new file mode 100644 index 0000000..72474c0 --- /dev/null +++ b/drivers/net/wireless/ath/ath5k/phy.c @@ -0,0 +1,3146 @@ +/* + * PHY functions + * + * Copyright (c) 2004-2007 Reyk Floeter + * Copyright (c) 2006-2009 Nick Kossifidis + * Copyright (c) 2007-2008 Jiri Slaby + * Copyright (c) 2008-2009 Felix Fietkau + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +#define _ATH5K_PHY + +#include + +#include "ath5k.h" +#include "reg.h" +#include "base.h" +#include "rfbuffer.h" +#include "rfgain.h" + +/* + * Used to modify RF Banks before writing them to AR5K_RF_BUFFER + */ +static unsigned int ath5k_hw_rfb_op(struct ath5k_hw *ah, + const struct ath5k_rf_reg *rf_regs, + u32 val, u8 reg_id, bool set) +{ + const struct ath5k_rf_reg *rfreg = NULL; + u8 offset, bank, num_bits, col, position; + u16 entry; + u32 mask, data, last_bit, bits_shifted, first_bit; + u32 *rfb; + s32 bits_left; + int i; + + data = 0; + rfb = ah->ah_rf_banks; + + for (i = 0; i < ah->ah_rf_regs_count; i++) { + if (rf_regs[i].index == reg_id) { + rfreg = &rf_regs[i]; + break; + } + } + + if (rfb == NULL || rfreg == NULL) { + ATH5K_PRINTF("Rf register not found!\n"); + /* should not happen */ + return 0; + } + + bank = rfreg->bank; + num_bits = rfreg->field.len; + first_bit = rfreg->field.pos; + col = rfreg->field.col; + + /* first_bit is an offset from bank's + * start. Since we have all banks on + * the same array, we use this offset + * to mark each bank's start */ + offset = ah->ah_offset[bank]; + + /* Boundary check */ + if (!(col <= 3 && num_bits <= 32 && first_bit + num_bits <= 319)) { + ATH5K_PRINTF("invalid values at offset %u\n", offset); + return 0; + } + + entry = ((first_bit - 1) / 8) + offset; + position = (first_bit - 1) % 8; + + if (set) + data = ath5k_hw_bitswap(val, num_bits); + + for (bits_shifted = 0, bits_left = num_bits; bits_left > 0; + position = 0, entry++) { + + last_bit = (position + bits_left > 8) ? 8 : + position + bits_left; + + mask = (((1 << last_bit) - 1) ^ ((1 << position) - 1)) << + (col * 8); + + if (set) { + rfb[entry] &= ~mask; + rfb[entry] |= ((data << position) << (col * 8)) & mask; + data >>= (8 - position); + } else { + data |= (((rfb[entry] & mask) >> (col * 8)) >> position) + << bits_shifted; + bits_shifted += last_bit - position; + } + + bits_left -= 8 - position; + } + + data = set ? 1 : ath5k_hw_bitswap(data, num_bits); + + return data; +} + +/**********************\ +* RF Gain optimization * +\**********************/ + +/* + * This code is used to optimize rf gain on different environments + * (temperature mostly) based on feedback from a power detector. + * + * It's only used on RF5111 and RF5112, later RF chips seem to have + * auto adjustment on hw -notice they have a much smaller BANK 7 and + * no gain optimization ladder-. + * + * For more infos check out this patent doc + * http://www.freepatentsonline.com/7400691.html + * + * This paper describes power drops as seen on the receiver due to + * probe packets + * http://www.cnri.dit.ie/publications/ICT08%20-%20Practical%20Issues + * %20of%20Power%20Control.pdf + * + * And this is the MadWiFi bug entry related to the above + * http://madwifi-project.org/ticket/1659 + * with various measurements and diagrams + * + * TODO: Deal with power drops due to probes by setting an apropriate + * tx power on the probe packets ! Make this part of the calibration process. + */ + +/* Initialize ah_gain durring attach */ +int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah) +{ + /* Initialize the gain optimization values */ + switch (ah->ah_radio) { + case AR5K_RF5111: + ah->ah_gain.g_step_idx = rfgain_opt_5111.go_default; + ah->ah_gain.g_low = 20; + ah->ah_gain.g_high = 35; + ah->ah_gain.g_state = AR5K_RFGAIN_ACTIVE; + break; + case AR5K_RF5112: + ah->ah_gain.g_step_idx = rfgain_opt_5112.go_default; + ah->ah_gain.g_low = 20; + ah->ah_gain.g_high = 85; + ah->ah_gain.g_state = AR5K_RFGAIN_ACTIVE; + break; + default: + return -EINVAL; + } + + return 0; +} + +/* Schedule a gain probe check on the next transmited packet. + * That means our next packet is going to be sent with lower + * tx power and a Peak to Average Power Detector (PAPD) will try + * to measure the gain. + * + * XXX: How about forcing a tx packet (bypassing PCU arbitrator etc) + * just after we enable the probe so that we don't mess with + * standard traffic ? Maybe it's time to use sw interrupts and + * a probe tasklet !!! + */ +static void ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah) +{ + + /* Skip if gain calibration is inactive or + * we already handle a probe request */ + if (ah->ah_gain.g_state != AR5K_RFGAIN_ACTIVE) + return; + + /* Send the packet with 2dB below max power as + * patent doc suggest */ + ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txpower.txp_ofdm - 4, + AR5K_PHY_PAPD_PROBE_TXPOWER) | + AR5K_PHY_PAPD_PROBE_TX_NEXT, AR5K_PHY_PAPD_PROBE); + + ah->ah_gain.g_state = AR5K_RFGAIN_READ_REQUESTED; + +} + +/* Calculate gain_F measurement correction + * based on the current step for RF5112 rev. 2 */ +static u32 ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah) +{ + u32 mix, step; + u32 *rf; + const struct ath5k_gain_opt *go; + const struct ath5k_gain_opt_step *g_step; + const struct ath5k_rf_reg *rf_regs; + + /* Only RF5112 Rev. 2 supports it */ + if ((ah->ah_radio != AR5K_RF5112) || + (ah->ah_radio_5ghz_revision <= AR5K_SREV_RAD_5112A)) + return 0; + + go = &rfgain_opt_5112; + rf_regs = rf_regs_5112a; + ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_5112a); + + g_step = &go->go_step[ah->ah_gain.g_step_idx]; + + if (ah->ah_rf_banks == NULL) + return 0; + + rf = ah->ah_rf_banks; + ah->ah_gain.g_f_corr = 0; + + /* No VGA (Variable Gain Amplifier) override, skip */ + if (ath5k_hw_rfb_op(ah, rf_regs, 0, AR5K_RF_MIXVGA_OVR, false) != 1) + return 0; + + /* Mix gain stepping */ + step = ath5k_hw_rfb_op(ah, rf_regs, 0, AR5K_RF_MIXGAIN_STEP, false); + + /* Mix gain override */ + mix = g_step->gos_param[0]; + + switch (mix) { + case 3: + ah->ah_gain.g_f_corr = step * 2; + break; + case 2: + ah->ah_gain.g_f_corr = (step - 5) * 2; + break; + case 1: + ah->ah_gain.g_f_corr = step; + break; + default: + ah->ah_gain.g_f_corr = 0; + break; + } + + return ah->ah_gain.g_f_corr; +} + +/* Check if current gain_F measurement is in the range of our + * power detector windows. If we get a measurement outside range + * we know it's not accurate (detectors can't measure anything outside + * their detection window) so we must ignore it */ +static bool ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah) +{ + const struct ath5k_rf_reg *rf_regs; + u32 step, mix_ovr, level[4]; + u32 *rf; + + if (ah->ah_rf_banks == NULL) + return false; + + rf = ah->ah_rf_banks; + + if (ah->ah_radio == AR5K_RF5111) { + + rf_regs = rf_regs_5111; + ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_5111); + + step = ath5k_hw_rfb_op(ah, rf_regs, 0, AR5K_RF_RFGAIN_STEP, + false); + + level[0] = 0; + level[1] = (step == 63) ? 50 : step + 4; + level[2] = (step != 63) ? 64 : level[0]; + level[3] = level[2] + 50 ; + + ah->ah_gain.g_high = level[3] - + (step == 63 ? AR5K_GAIN_DYN_ADJUST_HI_MARGIN : -5); + ah->ah_gain.g_low = level[0] + + (step == 63 ? AR5K_GAIN_DYN_ADJUST_LO_MARGIN : 0); + } else { + + rf_regs = rf_regs_5112; + ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_5112); + + mix_ovr = ath5k_hw_rfb_op(ah, rf_regs, 0, AR5K_RF_MIXVGA_OVR, + false); + + level[0] = level[2] = 0; + + if (mix_ovr == 1) { + level[1] = level[3] = 83; + } else { + level[1] = level[3] = 107; + ah->ah_gain.g_high = 55; + } + } + + return (ah->ah_gain.g_current >= level[0] && + ah->ah_gain.g_current <= level[1]) || + (ah->ah_gain.g_current >= level[2] && + ah->ah_gain.g_current <= level[3]); +} + +/* Perform gain_F adjustment by choosing the right set + * of parameters from rf gain optimization ladder */ +static s8 ath5k_hw_rf_gainf_adjust(struct ath5k_hw *ah) +{ + const struct ath5k_gain_opt *go; + const struct ath5k_gain_opt_step *g_step; + int ret = 0; + + switch (ah->ah_radio) { + case AR5K_RF5111: + go = &rfgain_opt_5111; + break; + case AR5K_RF5112: + go = &rfgain_opt_5112; + break; + default: + return 0; + } + + g_step = &go->go_step[ah->ah_gain.g_step_idx]; + + if (ah->ah_gain.g_current >= ah->ah_gain.g_high) { + + /* Reached maximum */ + if (ah->ah_gain.g_step_idx == 0) + return -1; + + for (ah->ah_gain.g_target = ah->ah_gain.g_current; + ah->ah_gain.g_target >= ah->ah_gain.g_high && + ah->ah_gain.g_step_idx > 0; + g_step = &go->go_step[ah->ah_gain.g_step_idx]) + ah->ah_gain.g_target -= 2 * + (go->go_step[--(ah->ah_gain.g_step_idx)].gos_gain - + g_step->gos_gain); + + ret = 1; + goto done; + } + + if (ah->ah_gain.g_current <= ah->ah_gain.g_low) { + + /* Reached minimum */ + if (ah->ah_gain.g_step_idx == (go->go_steps_count - 1)) + return -2; + + for (ah->ah_gain.g_target = ah->ah_gain.g_current; + ah->ah_gain.g_target <= ah->ah_gain.g_low && + ah->ah_gain.g_step_idx < go->go_steps_count-1; + g_step = &go->go_step[ah->ah_gain.g_step_idx]) + ah->ah_gain.g_target -= 2 * + (go->go_step[++ah->ah_gain.g_step_idx].gos_gain - + g_step->gos_gain); + + ret = 2; + goto done; + } + +done: + ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE, + "ret %d, gain step %u, current gain %u, target gain %u\n", + ret, ah->ah_gain.g_step_idx, ah->ah_gain.g_current, + ah->ah_gain.g_target); + + return ret; +} + +/* Main callback for thermal rf gain calibration engine + * Check for a new gain reading and schedule an adjustment + * if needed. + * + * TODO: Use sw interrupt to schedule reset if gain_F needs + * adjustment */ +enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah) +{ + u32 data, type; + struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + + ATH5K_TRACE(ah->ah_sc); + + if (ah->ah_rf_banks == NULL || + ah->ah_gain.g_state == AR5K_RFGAIN_INACTIVE) + return AR5K_RFGAIN_INACTIVE; + + /* No check requested, either engine is inactive + * or an adjustment is already requested */ + if (ah->ah_gain.g_state != AR5K_RFGAIN_READ_REQUESTED) + goto done; + + /* Read the PAPD (Peak to Average Power Detector) + * register */ + data = ath5k_hw_reg_read(ah, AR5K_PHY_PAPD_PROBE); + + /* No probe is scheduled, read gain_F measurement */ + if (!(data & AR5K_PHY_PAPD_PROBE_TX_NEXT)) { + ah->ah_gain.g_current = data >> AR5K_PHY_PAPD_PROBE_GAINF_S; + type = AR5K_REG_MS(data, AR5K_PHY_PAPD_PROBE_TYPE); + + /* If tx packet is CCK correct the gain_F measurement + * by cck ofdm gain delta */ + if (type == AR5K_PHY_PAPD_PROBE_TYPE_CCK) { + if (ah->ah_radio_5ghz_revision >= AR5K_SREV_RAD_5112A) + ah->ah_gain.g_current += + ee->ee_cck_ofdm_gain_delta; + else + ah->ah_gain.g_current += + AR5K_GAIN_CCK_PROBE_CORR; + } + + /* Further correct gain_F measurement for + * RF5112A radios */ + if (ah->ah_radio_5ghz_revision >= AR5K_SREV_RAD_5112A) { + ath5k_hw_rf_gainf_corr(ah); + ah->ah_gain.g_current = + ah->ah_gain.g_current >= ah->ah_gain.g_f_corr ? + (ah->ah_gain.g_current-ah->ah_gain.g_f_corr) : + 0; + } + + /* Check if measurement is ok and if we need + * to adjust gain, schedule a gain adjustment, + * else switch back to the acive state */ + if (ath5k_hw_rf_check_gainf_readback(ah) && + AR5K_GAIN_CHECK_ADJUST(&ah->ah_gain) && + ath5k_hw_rf_gainf_adjust(ah)) { + ah->ah_gain.g_state = AR5K_RFGAIN_NEED_CHANGE; + } else { + ah->ah_gain.g_state = AR5K_RFGAIN_ACTIVE; + } + } + +done: + return ah->ah_gain.g_state; +} + +/* Write initial rf gain table to set the RF sensitivity + * this one works on all RF chips and has nothing to do + * with gain_F calibration */ +int ath5k_hw_rfgain_init(struct ath5k_hw *ah, unsigned int freq) +{ + const struct ath5k_ini_rfgain *ath5k_rfg; + unsigned int i, size; + + switch (ah->ah_radio) { + case AR5K_RF5111: + ath5k_rfg = rfgain_5111; + size = ARRAY_SIZE(rfgain_5111); + break; + case AR5K_RF5112: + ath5k_rfg = rfgain_5112; + size = ARRAY_SIZE(rfgain_5112); + break; + case AR5K_RF2413: + ath5k_rfg = rfgain_2413; + size = ARRAY_SIZE(rfgain_2413); + break; + case AR5K_RF2316: + ath5k_rfg = rfgain_2316; + size = ARRAY_SIZE(rfgain_2316); + break; + case AR5K_RF5413: + ath5k_rfg = rfgain_5413; + size = ARRAY_SIZE(rfgain_5413); + break; + case AR5K_RF2317: + case AR5K_RF2425: + ath5k_rfg = rfgain_2425; + size = ARRAY_SIZE(rfgain_2425); + break; + default: + return -EINVAL; + } + + switch (freq) { + case AR5K_INI_RFGAIN_2GHZ: + case AR5K_INI_RFGAIN_5GHZ: + break; + default: + return -EINVAL; + } + + for (i = 0; i < size; i++) { + AR5K_REG_WAIT(i); + ath5k_hw_reg_write(ah, ath5k_rfg[i].rfg_value[freq], + (u32)ath5k_rfg[i].rfg_register); + } + + return 0; +} + + + +/********************\ +* RF Registers setup * +\********************/ + + +/* + * Setup RF registers by writing rf buffer on hw + */ +int ath5k_hw_rfregs_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, + unsigned int mode) +{ + const struct ath5k_rf_reg *rf_regs; + const struct ath5k_ini_rfbuffer *ini_rfb; + const struct ath5k_gain_opt *go = NULL; + const struct ath5k_gain_opt_step *g_step; + struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + u8 ee_mode = 0; + u32 *rfb; + int i, obdb = -1, bank = -1; + + switch (ah->ah_radio) { + case AR5K_RF5111: + rf_regs = rf_regs_5111; + ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_5111); + ini_rfb = rfb_5111; + ah->ah_rf_banks_size = ARRAY_SIZE(rfb_5111); + go = &rfgain_opt_5111; + break; + case AR5K_RF5112: + if (ah->ah_radio_5ghz_revision >= AR5K_SREV_RAD_5112A) { + rf_regs = rf_regs_5112a; + ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_5112a); + ini_rfb = rfb_5112a; + ah->ah_rf_banks_size = ARRAY_SIZE(rfb_5112a); + } else { + rf_regs = rf_regs_5112; + ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_5112); + ini_rfb = rfb_5112; + ah->ah_rf_banks_size = ARRAY_SIZE(rfb_5112); + } + go = &rfgain_opt_5112; + break; + case AR5K_RF2413: + rf_regs = rf_regs_2413; + ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_2413); + ini_rfb = rfb_2413; + ah->ah_rf_banks_size = ARRAY_SIZE(rfb_2413); + break; + case AR5K_RF2316: + rf_regs = rf_regs_2316; + ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_2316); + ini_rfb = rfb_2316; + ah->ah_rf_banks_size = ARRAY_SIZE(rfb_2316); + break; + case AR5K_RF5413: + rf_regs = rf_regs_5413; + ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_5413); + ini_rfb = rfb_5413; + ah->ah_rf_banks_size = ARRAY_SIZE(rfb_5413); + break; + case AR5K_RF2317: + rf_regs = rf_regs_2425; + ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_2425); + ini_rfb = rfb_2317; + ah->ah_rf_banks_size = ARRAY_SIZE(rfb_2317); + break; + case AR5K_RF2425: + rf_regs = rf_regs_2425; + ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_2425); + if (ah->ah_mac_srev < AR5K_SREV_AR2417) { + ini_rfb = rfb_2425; + ah->ah_rf_banks_size = ARRAY_SIZE(rfb_2425); + } else { + ini_rfb = rfb_2417; + ah->ah_rf_banks_size = ARRAY_SIZE(rfb_2417); + } + break; + default: + return -EINVAL; + } + + /* If it's the first time we set rf buffer, allocate + * ah->ah_rf_banks based on ah->ah_rf_banks_size + * we set above */ + if (ah->ah_rf_banks == NULL) { + ah->ah_rf_banks = kmalloc(sizeof(u32) * ah->ah_rf_banks_size, + GFP_KERNEL); + if (ah->ah_rf_banks == NULL) { + ATH5K_ERR(ah->ah_sc, "out of memory\n"); + return -ENOMEM; + } + } + + /* Copy values to modify them */ + rfb = ah->ah_rf_banks; + + for (i = 0; i < ah->ah_rf_banks_size; i++) { + if (ini_rfb[i].rfb_bank >= AR5K_MAX_RF_BANKS) { + ATH5K_ERR(ah->ah_sc, "invalid bank\n"); + return -EINVAL; + } + + /* Bank changed, write down the offset */ + if (bank != ini_rfb[i].rfb_bank) { + bank = ini_rfb[i].rfb_bank; + ah->ah_offset[bank] = i; + } + + rfb[i] = ini_rfb[i].rfb_mode_data[mode]; + } + + /* Set Output and Driver bias current (OB/DB) */ + if (channel->hw_value & CHANNEL_2GHZ) { + + if (channel->hw_value & CHANNEL_CCK) + ee_mode = AR5K_EEPROM_MODE_11B; + else + ee_mode = AR5K_EEPROM_MODE_11G; + + /* For RF511X/RF211X combination we + * use b_OB and b_DB parameters stored + * in eeprom on ee->ee_ob[ee_mode][0] + * + * For all other chips we use OB/DB for 2Ghz + * stored in the b/g modal section just like + * 802.11a on ee->ee_ob[ee_mode][1] */ + if ((ah->ah_radio == AR5K_RF5111) || + (ah->ah_radio == AR5K_RF5112)) + obdb = 0; + else + obdb = 1; + + ath5k_hw_rfb_op(ah, rf_regs, ee->ee_ob[ee_mode][obdb], + AR5K_RF_OB_2GHZ, true); + + ath5k_hw_rfb_op(ah, rf_regs, ee->ee_db[ee_mode][obdb], + AR5K_RF_DB_2GHZ, true); + + /* RF5111 always needs OB/DB for 5GHz, even if we use 2GHz */ + } else if ((channel->hw_value & CHANNEL_5GHZ) || + (ah->ah_radio == AR5K_RF5111)) { + + /* For 11a, Turbo and XR we need to choose + * OB/DB based on frequency range */ + ee_mode = AR5K_EEPROM_MODE_11A; + obdb = channel->center_freq >= 5725 ? 3 : + (channel->center_freq >= 5500 ? 2 : + (channel->center_freq >= 5260 ? 1 : + (channel->center_freq > 4000 ? 0 : -1))); + + if (obdb < 0) + return -EINVAL; + + ath5k_hw_rfb_op(ah, rf_regs, ee->ee_ob[ee_mode][obdb], + AR5K_RF_OB_5GHZ, true); + + ath5k_hw_rfb_op(ah, rf_regs, ee->ee_db[ee_mode][obdb], + AR5K_RF_DB_5GHZ, true); + } + + g_step = &go->go_step[ah->ah_gain.g_step_idx]; + + /* Bank Modifications (chip-specific) */ + if (ah->ah_radio == AR5K_RF5111) { + + /* Set gain_F settings according to current step */ + if (channel->hw_value & CHANNEL_OFDM) { + + AR5K_REG_WRITE_BITS(ah, AR5K_PHY_FRAME_CTL, + AR5K_PHY_FRAME_CTL_TX_CLIP, + g_step->gos_param[0]); + + ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[1], + AR5K_RF_PWD_90, true); + + ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[2], + AR5K_RF_PWD_84, true); + + ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[3], + AR5K_RF_RFGAIN_SEL, true); + + /* We programmed gain_F parameters, switch back + * to active state */ + ah->ah_gain.g_state = AR5K_RFGAIN_ACTIVE; + + } + + /* Bank 6/7 setup */ + + ath5k_hw_rfb_op(ah, rf_regs, !ee->ee_xpd[ee_mode], + AR5K_RF_PWD_XPD, true); + + ath5k_hw_rfb_op(ah, rf_regs, ee->ee_x_gain[ee_mode], + AR5K_RF_XPD_GAIN, true); + + ath5k_hw_rfb_op(ah, rf_regs, ee->ee_i_gain[ee_mode], + AR5K_RF_GAIN_I, true); + + ath5k_hw_rfb_op(ah, rf_regs, ee->ee_xpd[ee_mode], + AR5K_RF_PLO_SEL, true); + + /* TODO: Half/quarter channel support */ + } + + if (ah->ah_radio == AR5K_RF5112) { + + /* Set gain_F settings according to current step */ + if (channel->hw_value & CHANNEL_OFDM) { + + ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[0], + AR5K_RF_MIXGAIN_OVR, true); + + ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[1], + AR5K_RF_PWD_138, true); + + ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[2], + AR5K_RF_PWD_137, true); + + ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[3], + AR5K_RF_PWD_136, true); + + ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[4], + AR5K_RF_PWD_132, true); + + ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[5], + AR5K_RF_PWD_131, true); + + ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[6], + AR5K_RF_PWD_130, true); + + /* We programmed gain_F parameters, switch back + * to active state */ + ah->ah_gain.g_state = AR5K_RFGAIN_ACTIVE; + } + + /* Bank 6/7 setup */ + + ath5k_hw_rfb_op(ah, rf_regs, ee->ee_xpd[ee_mode], + AR5K_RF_XPD_SEL, true); + + if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_5112A) { + /* Rev. 1 supports only one xpd */ + ath5k_hw_rfb_op(ah, rf_regs, + ee->ee_x_gain[ee_mode], + AR5K_RF_XPD_GAIN, true); + + } else { + u8 *pdg_curve_to_idx = ee->ee_pdc_to_idx[ee_mode]; + if (ee->ee_pd_gains[ee_mode] > 1) { + ath5k_hw_rfb_op(ah, rf_regs, + pdg_curve_to_idx[0], + AR5K_RF_PD_GAIN_LO, true); + ath5k_hw_rfb_op(ah, rf_regs, + pdg_curve_to_idx[1], + AR5K_RF_PD_GAIN_HI, true); + } else { + ath5k_hw_rfb_op(ah, rf_regs, + pdg_curve_to_idx[0], + AR5K_RF_PD_GAIN_LO, true); + ath5k_hw_rfb_op(ah, rf_regs, + pdg_curve_to_idx[0], + AR5K_RF_PD_GAIN_HI, true); + } + + /* Lower synth voltage on Rev 2 */ + ath5k_hw_rfb_op(ah, rf_regs, 2, + AR5K_RF_HIGH_VC_CP, true); + + ath5k_hw_rfb_op(ah, rf_regs, 2, + AR5K_RF_MID_VC_CP, true); + + ath5k_hw_rfb_op(ah, rf_regs, 2, + AR5K_RF_LOW_VC_CP, true); + + ath5k_hw_rfb_op(ah, rf_regs, 2, + AR5K_RF_PUSH_UP, true); + + /* Decrease power consumption on 5213+ BaseBand */ + if (ah->ah_phy_revision >= AR5K_SREV_PHY_5212A) { + ath5k_hw_rfb_op(ah, rf_regs, 1, + AR5K_RF_PAD2GND, true); + + ath5k_hw_rfb_op(ah, rf_regs, 1, + AR5K_RF_XB2_LVL, true); + + ath5k_hw_rfb_op(ah, rf_regs, 1, + AR5K_RF_XB5_LVL, true); + + ath5k_hw_rfb_op(ah, rf_regs, 1, + AR5K_RF_PWD_167, true); + + ath5k_hw_rfb_op(ah, rf_regs, 1, + AR5K_RF_PWD_166, true); + } + } + + ath5k_hw_rfb_op(ah, rf_regs, ee->ee_i_gain[ee_mode], + AR5K_RF_GAIN_I, true); + + /* TODO: Half/quarter channel support */ + + } + + if (ah->ah_radio == AR5K_RF5413 && + channel->hw_value & CHANNEL_2GHZ) { + + ath5k_hw_rfb_op(ah, rf_regs, 1, AR5K_RF_DERBY_CHAN_SEL_MODE, + true); + + /* Set optimum value for early revisions (on pci-e chips) */ + if (ah->ah_mac_srev >= AR5K_SREV_AR5424 && + ah->ah_mac_srev < AR5K_SREV_AR5413) + ath5k_hw_rfb_op(ah, rf_regs, ath5k_hw_bitswap(6, 3), + AR5K_RF_PWD_ICLOBUF_2G, true); + + } + + /* Write RF banks on hw */ + for (i = 0; i < ah->ah_rf_banks_size; i++) { + AR5K_REG_WAIT(i); + ath5k_hw_reg_write(ah, rfb[i], ini_rfb[i].rfb_ctrl_register); + } + + return 0; +} + + +/**************************\ + PHY/RF channel functions +\**************************/ + +/* + * Check if a channel is supported + */ +bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags) +{ + /* Check if the channel is in our supported range */ + if (flags & CHANNEL_2GHZ) { + if ((freq >= ah->ah_capabilities.cap_range.range_2ghz_min) && + (freq <= ah->ah_capabilities.cap_range.range_2ghz_max)) + return true; + } else if (flags & CHANNEL_5GHZ) + if ((freq >= ah->ah_capabilities.cap_range.range_5ghz_min) && + (freq <= ah->ah_capabilities.cap_range.range_5ghz_max)) + return true; + + return false; +} + +/* + * Convertion needed for RF5110 + */ +static u32 ath5k_hw_rf5110_chan2athchan(struct ieee80211_channel *channel) +{ + u32 athchan; + + /* + * Convert IEEE channel/MHz to an internal channel value used + * by the AR5210 chipset. This has not been verified with + * newer chipsets like the AR5212A who have a completely + * different RF/PHY part. + */ + athchan = (ath5k_hw_bitswap( + (ieee80211_frequency_to_channel( + channel->center_freq) - 24) / 2, 5) + << 1) | (1 << 6) | 0x1; + return athchan; +} + +/* + * Set channel on RF5110 + */ +static int ath5k_hw_rf5110_channel(struct ath5k_hw *ah, + struct ieee80211_channel *channel) +{ + u32 data; + + /* + * Set the channel and wait + */ + data = ath5k_hw_rf5110_chan2athchan(channel); + ath5k_hw_reg_write(ah, data, AR5K_RF_BUFFER); + ath5k_hw_reg_write(ah, 0, AR5K_RF_BUFFER_CONTROL_0); + mdelay(1); + + return 0; +} + +/* + * Convertion needed for 5111 + */ +static int ath5k_hw_rf5111_chan2athchan(unsigned int ieee, + struct ath5k_athchan_2ghz *athchan) +{ + int channel; + + /* Cast this value to catch negative channel numbers (>= -19) */ + channel = (int)ieee; + + /* + * Map 2GHz IEEE channel to 5GHz Atheros channel + */ + if (channel <= 13) { + athchan->a2_athchan = 115 + channel; + athchan->a2_flags = 0x46; + } else if (channel == 14) { + athchan->a2_athchan = 124; + athchan->a2_flags = 0x44; + } else if (channel >= 15 && channel <= 26) { + athchan->a2_athchan = ((channel - 14) * 4) + 132; + athchan->a2_flags = 0x46; + } else + return -EINVAL; + + return 0; +} + +/* + * Set channel on 5111 + */ +static int ath5k_hw_rf5111_channel(struct ath5k_hw *ah, + struct ieee80211_channel *channel) +{ + struct ath5k_athchan_2ghz ath5k_channel_2ghz; + unsigned int ath5k_channel = + ieee80211_frequency_to_channel(channel->center_freq); + u32 data0, data1, clock; + int ret; + + /* + * Set the channel on the RF5111 radio + */ + data0 = data1 = 0; + + if (channel->hw_value & CHANNEL_2GHZ) { + /* Map 2GHz channel to 5GHz Atheros channel ID */ + ret = ath5k_hw_rf5111_chan2athchan( + ieee80211_frequency_to_channel(channel->center_freq), + &ath5k_channel_2ghz); + if (ret) + return ret; + + ath5k_channel = ath5k_channel_2ghz.a2_athchan; + data0 = ((ath5k_hw_bitswap(ath5k_channel_2ghz.a2_flags, 8) & 0xff) + << 5) | (1 << 4); + } + + if (ath5k_channel < 145 || !(ath5k_channel & 1)) { + clock = 1; + data1 = ((ath5k_hw_bitswap(ath5k_channel - 24, 8) & 0xff) << 2) | + (clock << 1) | (1 << 10) | 1; + } else { + clock = 0; + data1 = ((ath5k_hw_bitswap((ath5k_channel - 24) / 2, 8) & 0xff) + << 2) | (clock << 1) | (1 << 10) | 1; + } + + ath5k_hw_reg_write(ah, (data1 & 0xff) | ((data0 & 0xff) << 8), + AR5K_RF_BUFFER); + ath5k_hw_reg_write(ah, ((data1 >> 8) & 0xff) | (data0 & 0xff00), + AR5K_RF_BUFFER_CONTROL_3); + + return 0; +} + +/* + * Set channel on 5112 and newer + */ +static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah, + struct ieee80211_channel *channel) +{ + u32 data, data0, data1, data2; + u16 c; + + data = data0 = data1 = data2 = 0; + c = channel->center_freq; + + if (c < 4800) { + if (!((c - 2224) % 5)) { + data0 = ((2 * (c - 704)) - 3040) / 10; + data1 = 1; + } else if (!((c - 2192) % 5)) { + data0 = ((2 * (c - 672)) - 3040) / 10; + data1 = 0; + } else + return -EINVAL; + + data0 = ath5k_hw_bitswap((data0 << 2) & 0xff, 8); + } else if ((c - (c % 5)) != 2 || c > 5435) { + if (!(c % 20) && c >= 5120) { + data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8); + data2 = ath5k_hw_bitswap(3, 2); + } else if (!(c % 10)) { + data0 = ath5k_hw_bitswap(((c - 4800) / 10 << 1), 8); + data2 = ath5k_hw_bitswap(2, 2); + } else if (!(c % 5)) { + data0 = ath5k_hw_bitswap((c - 4800) / 5, 8); + data2 = ath5k_hw_bitswap(1, 2); + } else + return -EINVAL; + } else { + data0 = ath5k_hw_bitswap((10 * (c - 2) - 4800) / 25 + 1, 8); + data2 = ath5k_hw_bitswap(0, 2); + } + + data = (data0 << 4) | (data1 << 1) | (data2 << 2) | 0x1001; + + ath5k_hw_reg_write(ah, data & 0xff, AR5K_RF_BUFFER); + ath5k_hw_reg_write(ah, (data >> 8) & 0x7f, AR5K_RF_BUFFER_CONTROL_5); + + return 0; +} + +/* + * Set the channel on the RF2425 + */ +static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah, + struct ieee80211_channel *channel) +{ + u32 data, data0, data2; + u16 c; + + data = data0 = data2 = 0; + c = channel->center_freq; + + if (c < 4800) { + data0 = ath5k_hw_bitswap((c - 2272), 8); + data2 = 0; + /* ? 5GHz ? */ + } else if ((c - (c % 5)) != 2 || c > 5435) { + if (!(c % 20) && c < 5120) + data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8); + else if (!(c % 10)) + data0 = ath5k_hw_bitswap(((c - 4800) / 10 << 1), 8); + else if (!(c % 5)) + data0 = ath5k_hw_bitswap((c - 4800) / 5, 8); + else + return -EINVAL; + data2 = ath5k_hw_bitswap(1, 2); + } else { + data0 = ath5k_hw_bitswap((10 * (c - 2) - 4800) / 25 + 1, 8); + data2 = ath5k_hw_bitswap(0, 2); + } + + data = (data0 << 4) | data2 << 2 | 0x1001; + + ath5k_hw_reg_write(ah, data & 0xff, AR5K_RF_BUFFER); + ath5k_hw_reg_write(ah, (data >> 8) & 0x7f, AR5K_RF_BUFFER_CONTROL_5); + + return 0; +} + +/* + * Set a channel on the radio chip + */ +int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel) +{ + int ret; + /* + * Check bounds supported by the PHY (we don't care about regultory + * restrictions at this point). Note: hw_value already has the band + * (CHANNEL_2GHZ, or CHANNEL_5GHZ) so we inform ath5k_channel_ok() + * of the band by that */ + if (!ath5k_channel_ok(ah, channel->center_freq, channel->hw_value)) { + ATH5K_ERR(ah->ah_sc, + "channel frequency (%u MHz) out of supported " + "band range\n", + channel->center_freq); + return -EINVAL; + } + + /* + * Set the channel and wait + */ + switch (ah->ah_radio) { + case AR5K_RF5110: + ret = ath5k_hw_rf5110_channel(ah, channel); + break; + case AR5K_RF5111: + ret = ath5k_hw_rf5111_channel(ah, channel); + break; + case AR5K_RF2425: + ret = ath5k_hw_rf2425_channel(ah, channel); + break; + default: + ret = ath5k_hw_rf5112_channel(ah, channel); + break; + } + + if (ret) + return ret; + + /* Set JAPAN setting for channel 14 */ + if (channel->center_freq == 2484) { + AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_CCKTXCTL, + AR5K_PHY_CCKTXCTL_JAPAN); + } else { + AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_CCKTXCTL, + AR5K_PHY_CCKTXCTL_WORLD); + } + + ah->ah_current_channel = channel; + ah->ah_turbo = channel->hw_value == CHANNEL_T ? true : false; + + return 0; +} + +/*****************\ + PHY calibration +\*****************/ + +void +ath5k_hw_calibration_poll(struct ath5k_hw *ah) +{ + /* Calibration interval in jiffies */ + unsigned long cal_intval; + + cal_intval = msecs_to_jiffies(ah->ah_cal_intval * 1000); + + /* Initialize timestamp if needed */ + if (!ah->ah_cal_tstamp) + ah->ah_cal_tstamp = jiffies; + + /* For now we always do full calibration + * Mark software interrupt mask and fire software + * interrupt (bit gets auto-cleared) */ + if (time_is_before_eq_jiffies(ah->ah_cal_tstamp + cal_intval)) { + ah->ah_cal_tstamp = jiffies; + ah->ah_swi_mask = AR5K_SWI_FULL_CALIBRATION; + AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); + } +} + +static int sign_extend(int val, const int nbits) +{ + int order = BIT(nbits-1); + return (val ^ order) - order; +} + +static s32 ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah) +{ + s32 val; + + val = ath5k_hw_reg_read(ah, AR5K_PHY_NF); + return sign_extend(AR5K_REG_MS(val, AR5K_PHY_NF_MINCCA_PWR), 9); +} + +void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah) +{ + int i; + + ah->ah_nfcal_hist.index = 0; + for (i = 0; i < ATH5K_NF_CAL_HIST_MAX; i++) + ah->ah_nfcal_hist.nfval[i] = AR5K_TUNE_CCA_MAX_GOOD_VALUE; +} + +static void ath5k_hw_update_nfcal_hist(struct ath5k_hw *ah, s16 noise_floor) +{ + struct ath5k_nfcal_hist *hist = &ah->ah_nfcal_hist; + hist->index = (hist->index + 1) & (ATH5K_NF_CAL_HIST_MAX-1); + hist->nfval[hist->index] = noise_floor; +} + +static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah) +{ + s16 sort[ATH5K_NF_CAL_HIST_MAX]; + s16 tmp; + int i, j; + + memcpy(sort, ah->ah_nfcal_hist.nfval, sizeof(sort)); + for (i = 0; i < ATH5K_NF_CAL_HIST_MAX - 1; i++) { + for (j = 1; j < ATH5K_NF_CAL_HIST_MAX - i; j++) { + if (sort[j] > sort[j-1]) { + tmp = sort[j]; + sort[j] = sort[j-1]; + sort[j-1] = tmp; + } + } + } + for (i = 0; i < ATH5K_NF_CAL_HIST_MAX; i++) { + ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE, + "cal %d:%d\n", i, sort[i]); + } + return sort[(ATH5K_NF_CAL_HIST_MAX-1) / 2]; +} + +/* + * When we tell the hardware to perform a noise floor calibration + * by setting the AR5K_PHY_AGCCTL_NF bit, it will periodically + * sample-and-hold the minimum noise level seen at the antennas. + * This value is then stored in a ring buffer of recently measured + * noise floor values so we have a moving window of the last few + * samples. + * + * The median of the values in the history is then loaded into the + * hardware for its own use for RSSI and CCA measurements. + */ +void ath5k_hw_update_noise_floor(struct ath5k_hw *ah) +{ + struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + u32 val; + s16 nf, threshold; + u8 ee_mode; + + /* keep last value if calibration hasn't completed */ + if (ath5k_hw_reg_read(ah, AR5K_PHY_AGCCTL) & AR5K_PHY_AGCCTL_NF) { + ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE, + "NF did not complete in calibration window\n"); + + return; + } + + switch (ah->ah_current_channel->hw_value & CHANNEL_MODES) { + case CHANNEL_A: + case CHANNEL_T: + case CHANNEL_XR: + ee_mode = AR5K_EEPROM_MODE_11A; + break; + case CHANNEL_G: + case CHANNEL_TG: + ee_mode = AR5K_EEPROM_MODE_11G; + break; + default: + case CHANNEL_B: + ee_mode = AR5K_EEPROM_MODE_11B; + break; + } + + + /* completed NF calibration, test threshold */ + nf = ath5k_hw_read_measured_noise_floor(ah); + threshold = ee->ee_noise_floor_thr[ee_mode]; + + if (nf > threshold) { + ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE, + "noise floor failure detected; " + "read %d, threshold %d\n", + nf, threshold); + + nf = AR5K_TUNE_CCA_MAX_GOOD_VALUE; + } + + ath5k_hw_update_nfcal_hist(ah, nf); + nf = ath5k_hw_get_median_noise_floor(ah); + + /* load noise floor (in .5 dBm) so the hardware will use it */ + val = ath5k_hw_reg_read(ah, AR5K_PHY_NF) & ~AR5K_PHY_NF_M; + val |= (nf * 2) & AR5K_PHY_NF_M; + ath5k_hw_reg_write(ah, val, AR5K_PHY_NF); + + AR5K_REG_MASKED_BITS(ah, AR5K_PHY_AGCCTL, AR5K_PHY_AGCCTL_NF, + ~(AR5K_PHY_AGCCTL_NF_EN | AR5K_PHY_AGCCTL_NF_NOUPDATE)); + + ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL, AR5K_PHY_AGCCTL_NF, + 0, false); + + /* + * Load a high max CCA Power value (-50 dBm in .5 dBm units) + * so that we're not capped by the median we just loaded. + * This will be used as the initial value for the next noise + * floor calibration. + */ + val = (val & ~AR5K_PHY_NF_M) | ((-50 * 2) & AR5K_PHY_NF_M); + ath5k_hw_reg_write(ah, val, AR5K_PHY_NF); + AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL, + AR5K_PHY_AGCCTL_NF_EN | + AR5K_PHY_AGCCTL_NF_NOUPDATE | + AR5K_PHY_AGCCTL_NF); + + ah->ah_noise_floor = nf; + + ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE, + "noise floor calibrated: %d\n", nf); +} + +/* + * Perform a PHY calibration on RF5110 + * -Fix BPSK/QAM Constellation (I/Q correction) + * -Calculate Noise Floor + */ +static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah, + struct ieee80211_channel *channel) +{ + u32 phy_sig, phy_agc, phy_sat, beacon; + int ret; + + /* + * Disable beacons and RX/TX queues, wait + */ + AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW_5210, + AR5K_DIAG_SW_DIS_TX | AR5K_DIAG_SW_DIS_RX_5210); + beacon = ath5k_hw_reg_read(ah, AR5K_BEACON_5210); + ath5k_hw_reg_write(ah, beacon & ~AR5K_BEACON_ENABLE, AR5K_BEACON_5210); + + mdelay(2); + + /* + * Set the channel (with AGC turned off) + */ + AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGC, AR5K_PHY_AGC_DISABLE); + udelay(10); + ret = ath5k_hw_channel(ah, channel); + + /* + * Activate PHY and wait + */ + ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT); + mdelay(1); + + AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_AGC, AR5K_PHY_AGC_DISABLE); + + if (ret) + return ret; + + /* + * Calibrate the radio chip + */ + + /* Remember normal state */ + phy_sig = ath5k_hw_reg_read(ah, AR5K_PHY_SIG); + phy_agc = ath5k_hw_reg_read(ah, AR5K_PHY_AGCCOARSE); + phy_sat = ath5k_hw_reg_read(ah, AR5K_PHY_ADCSAT); + + /* Update radio registers */ + ath5k_hw_reg_write(ah, (phy_sig & ~(AR5K_PHY_SIG_FIRPWR)) | + AR5K_REG_SM(-1, AR5K_PHY_SIG_FIRPWR), AR5K_PHY_SIG); + + ath5k_hw_reg_write(ah, (phy_agc & ~(AR5K_PHY_AGCCOARSE_HI | + AR5K_PHY_AGCCOARSE_LO)) | + AR5K_REG_SM(-1, AR5K_PHY_AGCCOARSE_HI) | + AR5K_REG_SM(-127, AR5K_PHY_AGCCOARSE_LO), AR5K_PHY_AGCCOARSE); + + ath5k_hw_reg_write(ah, (phy_sat & ~(AR5K_PHY_ADCSAT_ICNT | + AR5K_PHY_ADCSAT_THR)) | + AR5K_REG_SM(2, AR5K_PHY_ADCSAT_ICNT) | + AR5K_REG_SM(12, AR5K_PHY_ADCSAT_THR), AR5K_PHY_ADCSAT); + + udelay(20); + + AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGC, AR5K_PHY_AGC_DISABLE); + udelay(10); + ath5k_hw_reg_write(ah, AR5K_PHY_RFSTG_DISABLE, AR5K_PHY_RFSTG); + AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_AGC, AR5K_PHY_AGC_DISABLE); + + mdelay(1); + + /* + * Enable calibration and wait until completion + */ + AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL, AR5K_PHY_AGCCTL_CAL); + + ret = ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL, + AR5K_PHY_AGCCTL_CAL, 0, false); + + /* Reset to normal state */ + ath5k_hw_reg_write(ah, phy_sig, AR5K_PHY_SIG); + ath5k_hw_reg_write(ah, phy_agc, AR5K_PHY_AGCCOARSE); + ath5k_hw_reg_write(ah, phy_sat, AR5K_PHY_ADCSAT); + + if (ret) { + ATH5K_ERR(ah->ah_sc, "calibration timeout (%uMHz)\n", + channel->center_freq); + return ret; + } + + ath5k_hw_update_noise_floor(ah); + + /* + * Re-enable RX/TX and beacons + */ + AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW_5210, + AR5K_DIAG_SW_DIS_TX | AR5K_DIAG_SW_DIS_RX_5210); + ath5k_hw_reg_write(ah, beacon, AR5K_BEACON_5210); + + return 0; +} + +/* + * Perform a PHY calibration on RF5111/5112 and newer chips + */ +static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah, + struct ieee80211_channel *channel) +{ + u32 i_pwr, q_pwr; + s32 iq_corr, i_coff, i_coffd, q_coff, q_coffd; + int i; + ATH5K_TRACE(ah->ah_sc); + + if (!ah->ah_calibration || + ath5k_hw_reg_read(ah, AR5K_PHY_IQ) & AR5K_PHY_IQ_RUN) + goto done; + + /* Calibration has finished, get the results and re-run */ + for (i = 0; i <= 10; i++) { + iq_corr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_CORR); + i_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_I); + q_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_Q); + } + + i_coffd = ((i_pwr >> 1) + (q_pwr >> 1)) >> 7; + q_coffd = q_pwr >> 7; + + /* No correction */ + if (i_coffd == 0 || q_coffd == 0) + goto done; + + i_coff = ((-iq_corr) / i_coffd); + + /* Boundary check */ + if (i_coff > 31) + i_coff = 31; + if (i_coff < -32) + i_coff = -32; + + q_coff = (((s32)i_pwr / q_coffd) - 128); + + /* Boundary check */ + if (q_coff > 15) + q_coff = 15; + if (q_coff < -16) + q_coff = -16; + + /* Commit new I/Q value */ + AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_CORR_ENABLE | + ((u32)q_coff) | ((u32)i_coff << AR5K_PHY_IQ_CORR_Q_I_COFF_S)); + + /* Re-enable calibration -if we don't we'll commit + * the same values again and again */ + AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ, + AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15); + AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_RUN); + +done: + + /* TODO: Separate noise floor calibration from I/Q calibration + * since noise floor calibration interrupts rx path while I/Q + * calibration doesn't. We don't need to run noise floor calibration + * as often as I/Q calibration.*/ + ath5k_hw_update_noise_floor(ah); + + /* Initiate a gain_F calibration */ + ath5k_hw_request_rfgain_probe(ah); + + return 0; +} + +/* + * Perform a PHY calibration + */ +int ath5k_hw_phy_calibrate(struct ath5k_hw *ah, + struct ieee80211_channel *channel) +{ + int ret; + + if (ah->ah_radio == AR5K_RF5110) + ret = ath5k_hw_rf5110_calibrate(ah, channel); + else + ret = ath5k_hw_rf511x_calibrate(ah, channel); + + return ret; +} + +/***************************\ +* Spur mitigation functions * +\***************************/ + +bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah, + struct ieee80211_channel *channel) +{ + u8 refclk_freq; + + if ((ah->ah_radio == AR5K_RF5112) || + (ah->ah_radio == AR5K_RF5413) || + (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4))) + refclk_freq = 40; + else + refclk_freq = 32; + + if ((channel->center_freq % refclk_freq != 0) && + ((channel->center_freq % refclk_freq < 10) || + (channel->center_freq % refclk_freq > 22))) + return true; + else + return false; +} + +void +ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah, + struct ieee80211_channel *channel) +{ + struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + u32 mag_mask[4] = {0, 0, 0, 0}; + u32 pilot_mask[2] = {0, 0}; + /* Note: fbin values are scaled up by 2 */ + u16 spur_chan_fbin, chan_fbin, symbol_width, spur_detection_window; + s32 spur_delta_phase, spur_freq_sigma_delta; + s32 spur_offset, num_symbols_x16; + u8 num_symbol_offsets, i, freq_band; + + /* Convert current frequency to fbin value (the same way channels + * are stored on EEPROM, check out ath5k_eeprom_bin2freq) and scale + * up by 2 so we can compare it later */ + if (channel->hw_value & CHANNEL_2GHZ) { + chan_fbin = (channel->center_freq - 2300) * 10; + freq_band = AR5K_EEPROM_BAND_2GHZ; + } else { + chan_fbin = (channel->center_freq - 4900) * 10; + freq_band = AR5K_EEPROM_BAND_5GHZ; + } + + /* Check if any spur_chan_fbin from EEPROM is + * within our current channel's spur detection range */ + spur_chan_fbin = AR5K_EEPROM_NO_SPUR; + spur_detection_window = AR5K_SPUR_CHAN_WIDTH; + /* XXX: Half/Quarter channels ?*/ + if (channel->hw_value & CHANNEL_TURBO) + spur_detection_window *= 2; + + for (i = 0; i < AR5K_EEPROM_N_SPUR_CHANS; i++) { + spur_chan_fbin = ee->ee_spur_chans[i][freq_band]; + + /* Note: mask cleans AR5K_EEPROM_NO_SPUR flag + * so it's zero if we got nothing from EEPROM */ + if (spur_chan_fbin == AR5K_EEPROM_NO_SPUR) { + spur_chan_fbin &= AR5K_EEPROM_SPUR_CHAN_MASK; + break; + } + + if ((chan_fbin - spur_detection_window <= + (spur_chan_fbin & AR5K_EEPROM_SPUR_CHAN_MASK)) && + (chan_fbin + spur_detection_window >= + (spur_chan_fbin & AR5K_EEPROM_SPUR_CHAN_MASK))) { + spur_chan_fbin &= AR5K_EEPROM_SPUR_CHAN_MASK; + break; + } + } + + /* We need to enable spur filter for this channel */ + if (spur_chan_fbin) { + spur_offset = spur_chan_fbin - chan_fbin; + /* + * Calculate deltas: + * spur_freq_sigma_delta -> spur_offset / sample_freq << 21 + * spur_delta_phase -> spur_offset / chip_freq << 11 + * Note: Both values have 100KHz resolution + */ + /* XXX: Half/Quarter rate channels ? */ + switch (channel->hw_value) { + case CHANNEL_A: + /* Both sample_freq and chip_freq are 40MHz */ + spur_delta_phase = (spur_offset << 17) / 25; + spur_freq_sigma_delta = (spur_delta_phase >> 10); + symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz; + break; + case CHANNEL_G: + /* sample_freq -> 40MHz chip_freq -> 44MHz + * (for b compatibility) */ + spur_freq_sigma_delta = (spur_offset << 8) / 55; + spur_delta_phase = (spur_offset << 17) / 25; + symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz; + break; + case CHANNEL_T: + case CHANNEL_TG: + /* Both sample_freq and chip_freq are 80MHz */ + spur_delta_phase = (spur_offset << 16) / 25; + spur_freq_sigma_delta = (spur_delta_phase >> 10); + symbol_width = AR5K_SPUR_SYMBOL_WIDTH_TURBO_100Hz; + break; + default: + return; + } + + /* Calculate pilot and magnitude masks */ + + /* Scale up spur_offset by 1000 to switch to 100HZ resolution + * and divide by symbol_width to find how many symbols we have + * Note: number of symbols is scaled up by 16 */ + num_symbols_x16 = ((spur_offset * 1000) << 4) / symbol_width; + + /* Spur is on a symbol if num_symbols_x16 % 16 is zero */ + if (!(num_symbols_x16 & 0xF)) + /* _X_ */ + num_symbol_offsets = 3; + else + /* _xx_ */ + num_symbol_offsets = 4; + + for (i = 0; i < num_symbol_offsets; i++) { + + /* Calculate pilot mask */ + s32 curr_sym_off = + (num_symbols_x16 / 16) + i + 25; + + /* Pilot magnitude mask seems to be a way to + * declare the boundaries for our detection + * window or something, it's 2 for the middle + * value(s) where the symbol is expected to be + * and 1 on the boundary values */ + u8 plt_mag_map = + (i == 0 || i == (num_symbol_offsets - 1)) + ? 1 : 2; + + if (curr_sym_off >= 0 && curr_sym_off <= 32) { + if (curr_sym_off <= 25) + pilot_mask[0] |= 1 << curr_sym_off; + else if (curr_sym_off >= 27) + pilot_mask[0] |= 1 << (curr_sym_off - 1); + } else if (curr_sym_off >= 33 && curr_sym_off <= 52) + pilot_mask[1] |= 1 << (curr_sym_off - 33); + + /* Calculate magnitude mask (for viterbi decoder) */ + if (curr_sym_off >= -1 && curr_sym_off <= 14) + mag_mask[0] |= + plt_mag_map << (curr_sym_off + 1) * 2; + else if (curr_sym_off >= 15 && curr_sym_off <= 30) + mag_mask[1] |= + plt_mag_map << (curr_sym_off - 15) * 2; + else if (curr_sym_off >= 31 && curr_sym_off <= 46) + mag_mask[2] |= + plt_mag_map << (curr_sym_off - 31) * 2; + else if (curr_sym_off >= 46 && curr_sym_off <= 53) + mag_mask[3] |= + plt_mag_map << (curr_sym_off - 47) * 2; + + } + + /* Write settings on hw to enable spur filter */ + AR5K_REG_WRITE_BITS(ah, AR5K_PHY_BIN_MASK_CTL, + AR5K_PHY_BIN_MASK_CTL_RATE, 0xff); + /* XXX: Self correlator also ? */ + AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ, + AR5K_PHY_IQ_PILOT_MASK_EN | + AR5K_PHY_IQ_CHAN_MASK_EN | + AR5K_PHY_IQ_SPUR_FILT_EN); + + /* Set delta phase and freq sigma delta */ + ath5k_hw_reg_write(ah, + AR5K_REG_SM(spur_delta_phase, + AR5K_PHY_TIMING_11_SPUR_DELTA_PHASE) | + AR5K_REG_SM(spur_freq_sigma_delta, + AR5K_PHY_TIMING_11_SPUR_FREQ_SD) | + AR5K_PHY_TIMING_11_USE_SPUR_IN_AGC, + AR5K_PHY_TIMING_11); + + /* Write pilot masks */ + ath5k_hw_reg_write(ah, pilot_mask[0], AR5K_PHY_TIMING_7); + AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_8, + AR5K_PHY_TIMING_8_PILOT_MASK_2, + pilot_mask[1]); + + ath5k_hw_reg_write(ah, pilot_mask[0], AR5K_PHY_TIMING_9); + AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_10, + AR5K_PHY_TIMING_10_PILOT_MASK_2, + pilot_mask[1]); + + /* Write magnitude masks */ + ath5k_hw_reg_write(ah, mag_mask[0], AR5K_PHY_BIN_MASK_1); + ath5k_hw_reg_write(ah, mag_mask[1], AR5K_PHY_BIN_MASK_2); + ath5k_hw_reg_write(ah, mag_mask[2], AR5K_PHY_BIN_MASK_3); + AR5K_REG_WRITE_BITS(ah, AR5K_PHY_BIN_MASK_CTL, + AR5K_PHY_BIN_MASK_CTL_MASK_4, + mag_mask[3]); + + ath5k_hw_reg_write(ah, mag_mask[0], AR5K_PHY_BIN_MASK2_1); + ath5k_hw_reg_write(ah, mag_mask[1], AR5K_PHY_BIN_MASK2_2); + ath5k_hw_reg_write(ah, mag_mask[2], AR5K_PHY_BIN_MASK2_3); + AR5K_REG_WRITE_BITS(ah, AR5K_PHY_BIN_MASK2_4, + AR5K_PHY_BIN_MASK2_4_MASK_4, + mag_mask[3]); + + } else if (ath5k_hw_reg_read(ah, AR5K_PHY_IQ) & + AR5K_PHY_IQ_SPUR_FILT_EN) { + /* Clean up spur mitigation settings and disable fliter */ + AR5K_REG_WRITE_BITS(ah, AR5K_PHY_BIN_MASK_CTL, + AR5K_PHY_BIN_MASK_CTL_RATE, 0); + AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_IQ, + AR5K_PHY_IQ_PILOT_MASK_EN | + AR5K_PHY_IQ_CHAN_MASK_EN | + AR5K_PHY_IQ_SPUR_FILT_EN); + ath5k_hw_reg_write(ah, 0, AR5K_PHY_TIMING_11); + + /* Clear pilot masks */ + ath5k_hw_reg_write(ah, 0, AR5K_PHY_TIMING_7); + AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_8, + AR5K_PHY_TIMING_8_PILOT_MASK_2, + 0); + + ath5k_hw_reg_write(ah, 0, AR5K_PHY_TIMING_9); + AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_10, + AR5K_PHY_TIMING_10_PILOT_MASK_2, + 0); + + /* Clear magnitude masks */ + ath5k_hw_reg_write(ah, 0, AR5K_PHY_BIN_MASK_1); + ath5k_hw_reg_write(ah, 0, AR5K_PHY_BIN_MASK_2); + ath5k_hw_reg_write(ah, 0, AR5K_PHY_BIN_MASK_3); + AR5K_REG_WRITE_BITS(ah, AR5K_PHY_BIN_MASK_CTL, + AR5K_PHY_BIN_MASK_CTL_MASK_4, + 0); + + ath5k_hw_reg_write(ah, 0, AR5K_PHY_BIN_MASK2_1); + ath5k_hw_reg_write(ah, 0, AR5K_PHY_BIN_MASK2_2); + ath5k_hw_reg_write(ah, 0, AR5K_PHY_BIN_MASK2_3); + AR5K_REG_WRITE_BITS(ah, AR5K_PHY_BIN_MASK2_4, + AR5K_PHY_BIN_MASK2_4_MASK_4, + 0); + } +} + +/********************\ + Misc PHY functions +\********************/ + +int ath5k_hw_phy_disable(struct ath5k_hw *ah) +{ + ATH5K_TRACE(ah->ah_sc); + /*Just a try M.F.*/ + ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT); + + return 0; +} + +/* + * Get the PHY Chip revision + */ +u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan) +{ + unsigned int i; + u32 srev; + u16 ret; + + ATH5K_TRACE(ah->ah_sc); + + /* + * Set the radio chip access register + */ + switch (chan) { + case CHANNEL_2GHZ: + ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_2GHZ, AR5K_PHY(0)); + break; + case CHANNEL_5GHZ: + ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0)); + break; + default: + return 0; + } + + mdelay(2); + + /* ...wait until PHY is ready and read the selected radio revision */ + ath5k_hw_reg_write(ah, 0x00001c16, AR5K_PHY(0x34)); + + for (i = 0; i < 8; i++) + ath5k_hw_reg_write(ah, 0x00010000, AR5K_PHY(0x20)); + + if (ah->ah_version == AR5K_AR5210) { + srev = ath5k_hw_reg_read(ah, AR5K_PHY(256) >> 28) & 0xf; + ret = (u16)ath5k_hw_bitswap(srev, 4) + 1; + } else { + srev = (ath5k_hw_reg_read(ah, AR5K_PHY(0x100)) >> 24) & 0xff; + ret = (u16)ath5k_hw_bitswap(((srev & 0xf0) >> 4) | + ((srev & 0x0f) << 4), 8); + } + + /* Reset to the 5GHz mode */ + ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0)); + + return ret; +} + +/*****************\ +* Antenna control * +\*****************/ + +void /*TODO:Boundary check*/ +ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant) +{ + ATH5K_TRACE(ah->ah_sc); + + if (ah->ah_version != AR5K_AR5210) + ath5k_hw_reg_write(ah, ant & 0x7, AR5K_DEFAULT_ANTENNA); +} + +unsigned int ath5k_hw_get_def_antenna(struct ath5k_hw *ah) +{ + ATH5K_TRACE(ah->ah_sc); + + if (ah->ah_version != AR5K_AR5210) + return ath5k_hw_reg_read(ah, AR5K_DEFAULT_ANTENNA) & 0x7; + + return false; /*XXX: What do we return for 5210 ?*/ +} + +/* + * Enable/disable fast rx antenna diversity + */ +static void +ath5k_hw_set_fast_div(struct ath5k_hw *ah, u8 ee_mode, bool enable) +{ + switch (ee_mode) { + case AR5K_EEPROM_MODE_11G: + /* XXX: This is set to + * disabled on initvals !!! */ + case AR5K_EEPROM_MODE_11A: + if (enable) + AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_AGCCTL, + AR5K_PHY_AGCCTL_OFDM_DIV_DIS); + else + AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL, + AR5K_PHY_AGCCTL_OFDM_DIV_DIS); + break; + case AR5K_EEPROM_MODE_11B: + AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL, + AR5K_PHY_AGCCTL_OFDM_DIV_DIS); + break; + default: + return; + } + + if (enable) { + AR5K_REG_WRITE_BITS(ah, AR5K_PHY_RESTART, + AR5K_PHY_RESTART_DIV_GC, 0xc); + + AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_FAST_ANT_DIV, + AR5K_PHY_FAST_ANT_DIV_EN); + } else { + AR5K_REG_WRITE_BITS(ah, AR5K_PHY_RESTART, + AR5K_PHY_RESTART_DIV_GC, 0x8); + + AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_FAST_ANT_DIV, + AR5K_PHY_FAST_ANT_DIV_EN); + } +} + +/* + * Set antenna operating mode + */ +void +ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode) +{ + struct ieee80211_channel *channel = ah->ah_current_channel; + bool use_def_for_tx, update_def_on_tx, use_def_for_rts, fast_div; + bool use_def_for_sg; + u8 def_ant, tx_ant, ee_mode; + u32 sta_id1 = 0; + + def_ant = ah->ah_def_ant; + + ATH5K_TRACE(ah->ah_sc); + + switch (channel->hw_value & CHANNEL_MODES) { + case CHANNEL_A: + case CHANNEL_T: + case CHANNEL_XR: + ee_mode = AR5K_EEPROM_MODE_11A; + break; + case CHANNEL_G: + case CHANNEL_TG: + ee_mode = AR5K_EEPROM_MODE_11G; + break; + case CHANNEL_B: + ee_mode = AR5K_EEPROM_MODE_11B; + break; + default: + ATH5K_ERR(ah->ah_sc, + "invalid channel: %d\n", channel->center_freq); + return; + } + + switch (ant_mode) { + case AR5K_ANTMODE_DEFAULT: + tx_ant = 0; + use_def_for_tx = false; + update_def_on_tx = false; + use_def_for_rts = false; + use_def_for_sg = false; + fast_div = true; + break; + case AR5K_ANTMODE_FIXED_A: + def_ant = 1; + tx_ant = 0; + use_def_for_tx = true; + update_def_on_tx = false; + use_def_for_rts = true; + use_def_for_sg = true; + fast_div = false; + break; + case AR5K_ANTMODE_FIXED_B: + def_ant = 2; + tx_ant = 0; + use_def_for_tx = true; + update_def_on_tx = false; + use_def_for_rts = true; + use_def_for_sg = true; + fast_div = false; + break; + case AR5K_ANTMODE_SINGLE_AP: + def_ant = 1; /* updated on tx */ + tx_ant = 0; + use_def_for_tx = true; + update_def_on_tx = true; + use_def_for_rts = true; + use_def_for_sg = true; + fast_div = true; + break; + case AR5K_ANTMODE_SECTOR_AP: + tx_ant = 1; /* variable */ + use_def_for_tx = false; + update_def_on_tx = false; + use_def_for_rts = true; + use_def_for_sg = false; + fast_div = false; + break; + case AR5K_ANTMODE_SECTOR_STA: + tx_ant = 1; /* variable */ + use_def_for_tx = true; + update_def_on_tx = false; + use_def_for_rts = true; + use_def_for_sg = false; + fast_div = true; + break; + case AR5K_ANTMODE_DEBUG: + def_ant = 1; + tx_ant = 2; + use_def_for_tx = false; + update_def_on_tx = false; + use_def_for_rts = false; + use_def_for_sg = false; + fast_div = false; + break; + default: + return; + } + + ah->ah_tx_ant = tx_ant; + ah->ah_ant_mode = ant_mode; + + sta_id1 |= use_def_for_tx ? AR5K_STA_ID1_DEFAULT_ANTENNA : 0; + sta_id1 |= update_def_on_tx ? AR5K_STA_ID1_DESC_ANTENNA : 0; + sta_id1 |= use_def_for_rts ? AR5K_STA_ID1_RTS_DEF_ANTENNA : 0; + sta_id1 |= use_def_for_sg ? AR5K_STA_ID1_SELFGEN_DEF_ANT : 0; + + AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, AR5K_STA_ID1_ANTENNA_SETTINGS); + + if (sta_id1) + AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, sta_id1); + + /* Note: set diversity before default antenna + * because it won't work correctly */ + ath5k_hw_set_fast_div(ah, ee_mode, fast_div); + ath5k_hw_set_def_antenna(ah, def_ant); +} + + +/****************\ +* TX power setup * +\****************/ + +/* + * Helper functions + */ + +/* + * Do linear interpolation between two given (x, y) points + */ +static s16 +ath5k_get_interpolated_value(s16 target, s16 x_left, s16 x_right, + s16 y_left, s16 y_right) +{ + s16 ratio, result; + + /* Avoid divide by zero and skip interpolation + * if we have the same point */ + if ((x_left == x_right) || (y_left == y_right)) + return y_left; + + /* + * Since we use ints and not fps, we need to scale up in + * order to get a sane ratio value (or else we 'll eg. get + * always 1 instead of 1.25, 1.75 etc). We scale up by 100 + * to have some accuracy both for 0.5 and 0.25 steps. + */ + ratio = ((100 * y_right - 100 * y_left)/(x_right - x_left)); + + /* Now scale down to be in range */ + result = y_left + (ratio * (target - x_left) / 100); + + return result; +} + +/* + * Find vertical boundary (min pwr) for the linear PCDAC curve. + * + * Since we have the top of the curve and we draw the line below + * until we reach 1 (1 pcdac step) we need to know which point + * (x value) that is so that we don't go below y axis and have negative + * pcdac values when creating the curve, or fill the table with zeroes. + */ +static s16 +ath5k_get_linear_pcdac_min(const u8 *stepL, const u8 *stepR, + const s16 *pwrL, const s16 *pwrR) +{ + s8 tmp; + s16 min_pwrL, min_pwrR; + s16 pwr_i; + + /* Some vendors write the same pcdac value twice !!! */ + if (stepL[0] == stepL[1] || stepR[0] == stepR[1]) + return max(pwrL[0], pwrR[0]); + + if (pwrL[0] == pwrL[1]) + min_pwrL = pwrL[0]; + else { + pwr_i = pwrL[0]; + do { + pwr_i--; + tmp = (s8) ath5k_get_interpolated_value(pwr_i, + pwrL[0], pwrL[1], + stepL[0], stepL[1]); + } while (tmp > 1); + + min_pwrL = pwr_i; + } + + if (pwrR[0] == pwrR[1]) + min_pwrR = pwrR[0]; + else { + pwr_i = pwrR[0]; + do { + pwr_i--; + tmp = (s8) ath5k_get_interpolated_value(pwr_i, + pwrR[0], pwrR[1], + stepR[0], stepR[1]); + } while (tmp > 1); + + min_pwrR = pwr_i; + } + + /* Keep the right boundary so that it works for both curves */ + return max(min_pwrL, min_pwrR); +} + +/* + * Interpolate (pwr,vpd) points to create a Power to PDADC or a + * Power to PCDAC curve. + * + * Each curve has power on x axis (in 0.5dB units) and PCDAC/PDADC + * steps (offsets) on y axis. Power can go up to 31.5dB and max + * PCDAC/PDADC step for each curve is 64 but we can write more than + * one curves on hw so we can go up to 128 (which is the max step we + * can write on the final table). + * + * We write y values (PCDAC/PDADC steps) on hw. + */ +static void +ath5k_create_power_curve(s16 pmin, s16 pmax, + const s16 *pwr, const u8 *vpd, + u8 num_points, + u8 *vpd_table, u8 type) +{ + u8 idx[2] = { 0, 1 }; + s16 pwr_i = 2*pmin; + int i; + + if (num_points < 2) + return; + + /* We want the whole line, so adjust boundaries + * to cover the entire power range. Note that + * power values are already 0.25dB so no need + * to multiply pwr_i by 2 */ + if (type == AR5K_PWRTABLE_LINEAR_PCDAC) { + pwr_i = pmin; + pmin = 0; + pmax = 63; + } + + /* Find surrounding turning points (TPs) + * and interpolate between them */ + for (i = 0; (i <= (u16) (pmax - pmin)) && + (i < AR5K_EEPROM_POWER_TABLE_SIZE); i++) { + + /* We passed the right TP, move to the next set of TPs + * if we pass the last TP, extrapolate above using the last + * two TPs for ratio */ + if ((pwr_i > pwr[idx[1]]) && (idx[1] < num_points - 1)) { + idx[0]++; + idx[1]++; + } + + vpd_table[i] = (u8) ath5k_get_interpolated_value(pwr_i, + pwr[idx[0]], pwr[idx[1]], + vpd[idx[0]], vpd[idx[1]]); + + /* Increase by 0.5dB + * (0.25 dB units) */ + pwr_i += 2; + } +} + +/* + * Get the surrounding per-channel power calibration piers + * for a given frequency so that we can interpolate between + * them and come up with an apropriate dataset for our current + * channel. + */ +static void +ath5k_get_chan_pcal_surrounding_piers(struct ath5k_hw *ah, + struct ieee80211_channel *channel, + struct ath5k_chan_pcal_info **pcinfo_l, + struct ath5k_chan_pcal_info **pcinfo_r) +{ + struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + struct ath5k_chan_pcal_info *pcinfo; + u8 idx_l, idx_r; + u8 mode, max, i; + u32 target = channel->center_freq; + + idx_l = 0; + idx_r = 0; + + if (!(channel->hw_value & CHANNEL_OFDM)) { + pcinfo = ee->ee_pwr_cal_b; + mode = AR5K_EEPROM_MODE_11B; + } else if (channel->hw_value & CHANNEL_2GHZ) { + pcinfo = ee->ee_pwr_cal_g; + mode = AR5K_EEPROM_MODE_11G; + } else { + pcinfo = ee->ee_pwr_cal_a; + mode = AR5K_EEPROM_MODE_11A; + } + max = ee->ee_n_piers[mode] - 1; + + /* Frequency is below our calibrated + * range. Use the lowest power curve + * we have */ + if (target < pcinfo[0].freq) { + idx_l = idx_r = 0; + goto done; + } + + /* Frequency is above our calibrated + * range. Use the highest power curve + * we have */ + if (target > pcinfo[max].freq) { + idx_l = idx_r = max; + goto done; + } + + /* Frequency is inside our calibrated + * channel range. Pick the surrounding + * calibration piers so that we can + * interpolate */ + for (i = 0; i <= max; i++) { + + /* Frequency matches one of our calibration + * piers, no need to interpolate, just use + * that calibration pier */ + if (pcinfo[i].freq == target) { + idx_l = idx_r = i; + goto done; + } + + /* We found a calibration pier that's above + * frequency, use this pier and the previous + * one to interpolate */ + if (target < pcinfo[i].freq) { + idx_r = i; + idx_l = idx_r - 1; + goto done; + } + } + +done: + *pcinfo_l = &pcinfo[idx_l]; + *pcinfo_r = &pcinfo[idx_r]; + + return; +} + +/* + * Get the surrounding per-rate power calibration data + * for a given frequency and interpolate between power + * values to set max target power supported by hw for + * each rate. + */ +static void +ath5k_get_rate_pcal_data(struct ath5k_hw *ah, + struct ieee80211_channel *channel, + struct ath5k_rate_pcal_info *rates) +{ + struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + struct ath5k_rate_pcal_info *rpinfo; + u8 idx_l, idx_r; + u8 mode, max, i; + u32 target = channel->center_freq; + + idx_l = 0; + idx_r = 0; + + if (!(channel->hw_value & CHANNEL_OFDM)) { + rpinfo = ee->ee_rate_tpwr_b; + mode = AR5K_EEPROM_MODE_11B; + } else if (channel->hw_value & CHANNEL_2GHZ) { + rpinfo = ee->ee_rate_tpwr_g; + mode = AR5K_EEPROM_MODE_11G; + } else { + rpinfo = ee->ee_rate_tpwr_a; + mode = AR5K_EEPROM_MODE_11A; + } + max = ee->ee_rate_target_pwr_num[mode] - 1; + + /* Get the surrounding calibration + * piers - same as above */ + if (target < rpinfo[0].freq) { + idx_l = idx_r = 0; + goto done; + } + + if (target > rpinfo[max].freq) { + idx_l = idx_r = max; + goto done; + } + + for (i = 0; i <= max; i++) { + + if (rpinfo[i].freq == target) { + idx_l = idx_r = i; + goto done; + } + + if (target < rpinfo[i].freq) { + idx_r = i; + idx_l = idx_r - 1; + goto done; + } + } + +done: + /* Now interpolate power value, based on the frequency */ + rates->freq = target; + + rates->target_power_6to24 = + ath5k_get_interpolated_value(target, rpinfo[idx_l].freq, + rpinfo[idx_r].freq, + rpinfo[idx_l].target_power_6to24, + rpinfo[idx_r].target_power_6to24); + + rates->target_power_36 = + ath5k_get_interpolated_value(target, rpinfo[idx_l].freq, + rpinfo[idx_r].freq, + rpinfo[idx_l].target_power_36, + rpinfo[idx_r].target_power_36); + + rates->target_power_48 = + ath5k_get_interpolated_value(target, rpinfo[idx_l].freq, + rpinfo[idx_r].freq, + rpinfo[idx_l].target_power_48, + rpinfo[idx_r].target_power_48); + + rates->target_power_54 = + ath5k_get_interpolated_value(target, rpinfo[idx_l].freq, + rpinfo[idx_r].freq, + rpinfo[idx_l].target_power_54, + rpinfo[idx_r].target_power_54); +} + +/* + * Get the max edge power for this channel if + * we have such data from EEPROM's Conformance Test + * Limits (CTL), and limit max power if needed. + */ +static void +ath5k_get_max_ctl_power(struct ath5k_hw *ah, + struct ieee80211_channel *channel) +{ + struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah); + struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + struct ath5k_edge_power *rep = ee->ee_ctl_pwr; + u8 *ctl_val = ee->ee_ctl; + s16 max_chan_pwr = ah->ah_txpower.txp_max_pwr / 4; + s16 edge_pwr = 0; + u8 rep_idx; + u8 i, ctl_mode; + u8 ctl_idx = 0xFF; + u32 target = channel->center_freq; + + ctl_mode = ath_regd_get_band_ctl(regulatory, channel->band); + + switch (channel->hw_value & CHANNEL_MODES) { + case CHANNEL_A: + ctl_mode |= AR5K_CTL_11A; + break; + case CHANNEL_G: + ctl_mode |= AR5K_CTL_11G; + break; + case CHANNEL_B: + ctl_mode |= AR5K_CTL_11B; + break; + case CHANNEL_T: + ctl_mode |= AR5K_CTL_TURBO; + break; + case CHANNEL_TG: + ctl_mode |= AR5K_CTL_TURBOG; + break; + case CHANNEL_XR: + /* Fall through */ + default: + return; + } + + for (i = 0; i < ee->ee_ctls; i++) { + if (ctl_val[i] == ctl_mode) { + ctl_idx = i; + break; + } + } + + /* If we have a CTL dataset available grab it and find the + * edge power for our frequency */ + if (ctl_idx == 0xFF) + return; + + /* Edge powers are sorted by frequency from lower + * to higher. Each CTL corresponds to 8 edge power + * measurements. */ + rep_idx = ctl_idx * AR5K_EEPROM_N_EDGES; + + /* Don't do boundaries check because we + * might have more that one bands defined + * for this mode */ + + /* Get the edge power that's closer to our + * frequency */ + for (i = 0; i < AR5K_EEPROM_N_EDGES; i++) { + rep_idx += i; + if (target <= rep[rep_idx].freq) + edge_pwr = (s16) rep[rep_idx].edge; + } + + if (edge_pwr) + ah->ah_txpower.txp_max_pwr = 4*min(edge_pwr, max_chan_pwr); +} + + +/* + * Power to PCDAC table functions + */ + +/* + * Fill Power to PCDAC table on RF5111 + * + * No further processing is needed for RF5111, the only thing we have to + * do is fill the values below and above calibration range since eeprom data + * may not cover the entire PCDAC table. + */ +static void +ath5k_fill_pwr_to_pcdac_table(struct ath5k_hw *ah, s16* table_min, + s16 *table_max) +{ + u8 *pcdac_out = ah->ah_txpower.txp_pd_table; + u8 *pcdac_tmp = ah->ah_txpower.tmpL[0]; + u8 pcdac_0, pcdac_n, pcdac_i, pwr_idx, i; + s16 min_pwr, max_pwr; + + /* Get table boundaries */ + min_pwr = table_min[0]; + pcdac_0 = pcdac_tmp[0]; + + max_pwr = table_max[0]; + pcdac_n = pcdac_tmp[table_max[0] - table_min[0]]; + + /* Extrapolate below minimum using pcdac_0 */ + pcdac_i = 0; + for (i = 0; i < min_pwr; i++) + pcdac_out[pcdac_i++] = pcdac_0; + + /* Copy values from pcdac_tmp */ + pwr_idx = min_pwr; + for (i = 0 ; pwr_idx <= max_pwr && + pcdac_i < AR5K_EEPROM_POWER_TABLE_SIZE; i++) { + pcdac_out[pcdac_i++] = pcdac_tmp[i]; + pwr_idx++; + } + + /* Extrapolate above maximum */ + while (pcdac_i < AR5K_EEPROM_POWER_TABLE_SIZE) + pcdac_out[pcdac_i++] = pcdac_n; + +} + +/* + * Combine available XPD Curves and fill Linear Power to PCDAC table + * on RF5112 + * + * RFX112 can have up to 2 curves (one for low txpower range and one for + * higher txpower range). We need to put them both on pcdac_out and place + * them in the correct location. In case we only have one curve available + * just fit it on pcdac_out (it's supposed to cover the entire range of + * available pwr levels since it's always the higher power curve). Extrapolate + * below and above final table if needed. + */ +static void +ath5k_combine_linear_pcdac_curves(struct ath5k_hw *ah, s16* table_min, + s16 *table_max, u8 pdcurves) +{ + u8 *pcdac_out = ah->ah_txpower.txp_pd_table; + u8 *pcdac_low_pwr; + u8 *pcdac_high_pwr; + u8 *pcdac_tmp; + u8 pwr; + s16 max_pwr_idx; + s16 min_pwr_idx; + s16 mid_pwr_idx = 0; + /* Edge flag turs on the 7nth bit on the PCDAC + * to delcare the higher power curve (force values + * to be greater than 64). If we only have one curve + * we don't need to set this, if we have 2 curves and + * fill the table backwards this can also be used to + * switch from higher power curve to lower power curve */ + u8 edge_flag; + int i; + + /* When we have only one curve available + * that's the higher power curve. If we have + * two curves the first is the high power curve + * and the next is the low power curve. */ + if (pdcurves > 1) { + pcdac_low_pwr = ah->ah_txpower.tmpL[1]; + pcdac_high_pwr = ah->ah_txpower.tmpL[0]; + mid_pwr_idx = table_max[1] - table_min[1] - 1; + max_pwr_idx = (table_max[0] - table_min[0]) / 2; + + /* If table size goes beyond 31.5dB, keep the + * upper 31.5dB range when setting tx power. + * Note: 126 = 31.5 dB in quarter dB steps */ + if (table_max[0] - table_min[1] > 126) + min_pwr_idx = table_max[0] - 126; + else + min_pwr_idx = table_min[1]; + + /* Since we fill table backwards + * start from high power curve */ + pcdac_tmp = pcdac_high_pwr; + + edge_flag = 0x40; +#if 0 + /* If both min and max power limits are in lower + * power curve's range, only use the low power curve. + * TODO: min/max levels are related to target + * power values requested from driver/user + * XXX: Is this really needed ? */ + if (min_pwr < table_max[1] && + max_pwr < table_max[1]) { + edge_flag = 0; + pcdac_tmp = pcdac_low_pwr; + max_pwr_idx = (table_max[1] - table_min[1])/2; + } +#endif + } else { + pcdac_low_pwr = ah->ah_txpower.tmpL[1]; /* Zeroed */ + pcdac_high_pwr = ah->ah_txpower.tmpL[0]; + min_pwr_idx = table_min[0]; + max_pwr_idx = (table_max[0] - table_min[0]) / 2; + pcdac_tmp = pcdac_high_pwr; + edge_flag = 0; + } + + /* This is used when setting tx power*/ + ah->ah_txpower.txp_min_idx = min_pwr_idx/2; + + /* Fill Power to PCDAC table backwards */ + pwr = max_pwr_idx; + for (i = 63; i >= 0; i--) { + /* Entering lower power range, reset + * edge flag and set pcdac_tmp to lower + * power curve.*/ + if (edge_flag == 0x40 && + (2*pwr <= (table_max[1] - table_min[0]) || pwr == 0)) { + edge_flag = 0x00; + pcdac_tmp = pcdac_low_pwr; + pwr = mid_pwr_idx/2; + } + + /* Don't go below 1, extrapolate below if we have + * already swithced to the lower power curve -or + * we only have one curve and edge_flag is zero + * anyway */ + if (pcdac_tmp[pwr] < 1 && (edge_flag == 0x00)) { + while (i >= 0) { + pcdac_out[i] = pcdac_out[i + 1]; + i--; + } + break; + } + + pcdac_out[i] = pcdac_tmp[pwr] | edge_flag; + + /* Extrapolate above if pcdac is greater than + * 126 -this can happen because we OR pcdac_out + * value with edge_flag on high power curve */ + if (pcdac_out[i] > 126) + pcdac_out[i] = 126; + + /* Decrease by a 0.5dB step */ + pwr--; + } +} + +/* Write PCDAC values on hw */ +static void +ath5k_setup_pcdac_table(struct ath5k_hw *ah) +{ + u8 *pcdac_out = ah->ah_txpower.txp_pd_table; + int i; + + /* + * Write TX power values + */ + for (i = 0; i < (AR5K_EEPROM_POWER_TABLE_SIZE / 2); i++) { + ath5k_hw_reg_write(ah, + (((pcdac_out[2*i + 0] << 8 | 0xff) & 0xffff) << 0) | + (((pcdac_out[2*i + 1] << 8 | 0xff) & 0xffff) << 16), + AR5K_PHY_PCDAC_TXPOWER(i)); + } +} + + +/* + * Power to PDADC table functions + */ + +/* + * Set the gain boundaries and create final Power to PDADC table + * + * We can have up to 4 pd curves, we need to do a simmilar process + * as we do for RF5112. This time we don't have an edge_flag but we + * set the gain boundaries on a separate register. + */ +static void +ath5k_combine_pwr_to_pdadc_curves(struct ath5k_hw *ah, + s16 *pwr_min, s16 *pwr_max, u8 pdcurves) +{ + u8 gain_boundaries[AR5K_EEPROM_N_PD_GAINS]; + u8 *pdadc_out = ah->ah_txpower.txp_pd_table; + u8 *pdadc_tmp; + s16 pdadc_0; + u8 pdadc_i, pdadc_n, pwr_step, pdg, max_idx, table_size; + u8 pd_gain_overlap; + + /* Note: Register value is initialized on initvals + * there is no feedback from hw. + * XXX: What about pd_gain_overlap from EEPROM ? */ + pd_gain_overlap = (u8) ath5k_hw_reg_read(ah, AR5K_PHY_TPC_RG5) & + AR5K_PHY_TPC_RG5_PD_GAIN_OVERLAP; + + /* Create final PDADC table */ + for (pdg = 0, pdadc_i = 0; pdg < pdcurves; pdg++) { + pdadc_tmp = ah->ah_txpower.tmpL[pdg]; + + if (pdg == pdcurves - 1) + /* 2 dB boundary stretch for last + * (higher power) curve */ + gain_boundaries[pdg] = pwr_max[pdg] + 4; + else + /* Set gain boundary in the middle + * between this curve and the next one */ + gain_boundaries[pdg] = + (pwr_max[pdg] + pwr_min[pdg + 1]) / 2; + + /* Sanity check in case our 2 db stretch got out of + * range. */ + if (gain_boundaries[pdg] > AR5K_TUNE_MAX_TXPOWER) + gain_boundaries[pdg] = AR5K_TUNE_MAX_TXPOWER; + + /* For the first curve (lower power) + * start from 0 dB */ + if (pdg == 0) + pdadc_0 = 0; + else + /* For the other curves use the gain overlap */ + pdadc_0 = (gain_boundaries[pdg - 1] - pwr_min[pdg]) - + pd_gain_overlap; + + /* Force each power step to be at least 0.5 dB */ + if ((pdadc_tmp[1] - pdadc_tmp[0]) > 1) + pwr_step = pdadc_tmp[1] - pdadc_tmp[0]; + else + pwr_step = 1; + + /* If pdadc_0 is negative, we need to extrapolate + * below this pdgain by a number of pwr_steps */ + while ((pdadc_0 < 0) && (pdadc_i < 128)) { + s16 tmp = pdadc_tmp[0] + pdadc_0 * pwr_step; + pdadc_out[pdadc_i++] = (tmp < 0) ? 0 : (u8) tmp; + pdadc_0++; + } + + /* Set last pwr level, using gain boundaries */ + pdadc_n = gain_boundaries[pdg] + pd_gain_overlap - pwr_min[pdg]; + /* Limit it to be inside pwr range */ + table_size = pwr_max[pdg] - pwr_min[pdg]; + max_idx = (pdadc_n < table_size) ? pdadc_n : table_size; + + /* Fill pdadc_out table */ + while (pdadc_0 < max_idx) + pdadc_out[pdadc_i++] = pdadc_tmp[pdadc_0++]; + + /* Need to extrapolate above this pdgain? */ + if (pdadc_n <= max_idx) + continue; + + /* Force each power step to be at least 0.5 dB */ + if ((pdadc_tmp[table_size - 1] - pdadc_tmp[table_size - 2]) > 1) + pwr_step = pdadc_tmp[table_size - 1] - + pdadc_tmp[table_size - 2]; + else + pwr_step = 1; + + /* Extrapolate above */ + while ((pdadc_0 < (s16) pdadc_n) && + (pdadc_i < AR5K_EEPROM_POWER_TABLE_SIZE * 2)) { + s16 tmp = pdadc_tmp[table_size - 1] + + (pdadc_0 - max_idx) * pwr_step; + pdadc_out[pdadc_i++] = (tmp > 127) ? 127 : (u8) tmp; + pdadc_0++; + } + } + + while (pdg < AR5K_EEPROM_N_PD_GAINS) { + gain_boundaries[pdg] = gain_boundaries[pdg - 1]; + pdg++; + } + + while (pdadc_i < AR5K_EEPROM_POWER_TABLE_SIZE * 2) { + pdadc_out[pdadc_i] = pdadc_out[pdadc_i - 1]; + pdadc_i++; + } + + /* Set gain boundaries */ + ath5k_hw_reg_write(ah, + AR5K_REG_SM(pd_gain_overlap, + AR5K_PHY_TPC_RG5_PD_GAIN_OVERLAP) | + AR5K_REG_SM(gain_boundaries[0], + AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_1) | + AR5K_REG_SM(gain_boundaries[1], + AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_2) | + AR5K_REG_SM(gain_boundaries[2], + AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_3) | + AR5K_REG_SM(gain_boundaries[3], + AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_4), + AR5K_PHY_TPC_RG5); + + /* Used for setting rate power table */ + ah->ah_txpower.txp_min_idx = pwr_min[0]; + +} + +/* Write PDADC values on hw */ +static void +ath5k_setup_pwr_to_pdadc_table(struct ath5k_hw *ah, + u8 pdcurves, u8 *pdg_to_idx) +{ + u8 *pdadc_out = ah->ah_txpower.txp_pd_table; + u32 reg; + u8 i; + + /* Select the right pdgain curves */ + + /* Clear current settings */ + reg = ath5k_hw_reg_read(ah, AR5K_PHY_TPC_RG1); + reg &= ~(AR5K_PHY_TPC_RG1_PDGAIN_1 | + AR5K_PHY_TPC_RG1_PDGAIN_2 | + AR5K_PHY_TPC_RG1_PDGAIN_3 | + AR5K_PHY_TPC_RG1_NUM_PD_GAIN); + + /* + * Use pd_gains curve from eeprom + * + * This overrides the default setting from initvals + * in case some vendors (e.g. Zcomax) don't use the default + * curves. If we don't honor their settings we 'll get a + * 5dB (1 * gain overlap ?) drop. + */ + reg |= AR5K_REG_SM(pdcurves, AR5K_PHY_TPC_RG1_NUM_PD_GAIN); + + switch (pdcurves) { + case 3: + reg |= AR5K_REG_SM(pdg_to_idx[2], AR5K_PHY_TPC_RG1_PDGAIN_3); + /* Fall through */ + case 2: + reg |= AR5K_REG_SM(pdg_to_idx[1], AR5K_PHY_TPC_RG1_PDGAIN_2); + /* Fall through */ + case 1: + reg |= AR5K_REG_SM(pdg_to_idx[0], AR5K_PHY_TPC_RG1_PDGAIN_1); + break; + } + ath5k_hw_reg_write(ah, reg, AR5K_PHY_TPC_RG1); + + /* + * Write TX power values + */ + for (i = 0; i < (AR5K_EEPROM_POWER_TABLE_SIZE / 2); i++) { + ath5k_hw_reg_write(ah, + ((pdadc_out[4*i + 0] & 0xff) << 0) | + ((pdadc_out[4*i + 1] & 0xff) << 8) | + ((pdadc_out[4*i + 2] & 0xff) << 16) | + ((pdadc_out[4*i + 3] & 0xff) << 24), + AR5K_PHY_PDADC_TXPOWER(i)); + } +} + + +/* + * Common code for PCDAC/PDADC tables + */ + +/* + * This is the main function that uses all of the above + * to set PCDAC/PDADC table on hw for the current channel. + * This table is used for tx power calibration on the basband, + * without it we get weird tx power levels and in some cases + * distorted spectral mask + */ +static int +ath5k_setup_channel_powertable(struct ath5k_hw *ah, + struct ieee80211_channel *channel, + u8 ee_mode, u8 type) +{ + struct ath5k_pdgain_info *pdg_L, *pdg_R; + struct ath5k_chan_pcal_info *pcinfo_L; + struct ath5k_chan_pcal_info *pcinfo_R; + struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + u8 *pdg_curve_to_idx = ee->ee_pdc_to_idx[ee_mode]; + s16 table_min[AR5K_EEPROM_N_PD_GAINS]; + s16 table_max[AR5K_EEPROM_N_PD_GAINS]; + u8 *tmpL; + u8 *tmpR; + u32 target = channel->center_freq; + int pdg, i; + + /* Get surounding freq piers for this channel */ + ath5k_get_chan_pcal_surrounding_piers(ah, channel, + &pcinfo_L, + &pcinfo_R); + + /* Loop over pd gain curves on + * surounding freq piers by index */ + for (pdg = 0; pdg < ee->ee_pd_gains[ee_mode]; pdg++) { + + /* Fill curves in reverse order + * from lower power (max gain) + * to higher power. Use curve -> idx + * backmapping we did on eeprom init */ + u8 idx = pdg_curve_to_idx[pdg]; + + /* Grab the needed curves by index */ + pdg_L = &pcinfo_L->pd_curves[idx]; + pdg_R = &pcinfo_R->pd_curves[idx]; + + /* Initialize the temp tables */ + tmpL = ah->ah_txpower.tmpL[pdg]; + tmpR = ah->ah_txpower.tmpR[pdg]; + + /* Set curve's x boundaries and create + * curves so that they cover the same + * range (if we don't do that one table + * will have values on some range and the + * other one won't have any so interpolation + * will fail) */ + table_min[pdg] = min(pdg_L->pd_pwr[0], + pdg_R->pd_pwr[0]) / 2; + + table_max[pdg] = max(pdg_L->pd_pwr[pdg_L->pd_points - 1], + pdg_R->pd_pwr[pdg_R->pd_points - 1]) / 2; + + /* Now create the curves on surrounding channels + * and interpolate if needed to get the final + * curve for this gain on this channel */ + switch (type) { + case AR5K_PWRTABLE_LINEAR_PCDAC: + /* Override min/max so that we don't loose + * accuracy (don't divide by 2) */ + table_min[pdg] = min(pdg_L->pd_pwr[0], + pdg_R->pd_pwr[0]); + + table_max[pdg] = + max(pdg_L->pd_pwr[pdg_L->pd_points - 1], + pdg_R->pd_pwr[pdg_R->pd_points - 1]); + + /* Override minimum so that we don't get + * out of bounds while extrapolating + * below. Don't do this when we have 2 + * curves and we are on the high power curve + * because table_min is ok in this case */ + if (!(ee->ee_pd_gains[ee_mode] > 1 && pdg == 0)) { + + table_min[pdg] = + ath5k_get_linear_pcdac_min(pdg_L->pd_step, + pdg_R->pd_step, + pdg_L->pd_pwr, + pdg_R->pd_pwr); + + /* Don't go too low because we will + * miss the upper part of the curve. + * Note: 126 = 31.5dB (max power supported) + * in 0.25dB units */ + if (table_max[pdg] - table_min[pdg] > 126) + table_min[pdg] = table_max[pdg] - 126; + } + + /* Fall through */ + case AR5K_PWRTABLE_PWR_TO_PCDAC: + case AR5K_PWRTABLE_PWR_TO_PDADC: + + ath5k_create_power_curve(table_min[pdg], + table_max[pdg], + pdg_L->pd_pwr, + pdg_L->pd_step, + pdg_L->pd_points, tmpL, type); + + /* We are in a calibration + * pier, no need to interpolate + * between freq piers */ + if (pcinfo_L == pcinfo_R) + continue; + + ath5k_create_power_curve(table_min[pdg], + table_max[pdg], + pdg_R->pd_pwr, + pdg_R->pd_step, + pdg_R->pd_points, tmpR, type); + break; + default: + return -EINVAL; + } + + /* Interpolate between curves + * of surounding freq piers to + * get the final curve for this + * pd gain. Re-use tmpL for interpolation + * output */ + for (i = 0; (i < (u16) (table_max[pdg] - table_min[pdg])) && + (i < AR5K_EEPROM_POWER_TABLE_SIZE); i++) { + tmpL[i] = (u8) ath5k_get_interpolated_value(target, + (s16) pcinfo_L->freq, + (s16) pcinfo_R->freq, + (s16) tmpL[i], + (s16) tmpR[i]); + } + } + + /* Now we have a set of curves for this + * channel on tmpL (x range is table_max - table_min + * and y values are tmpL[pdg][]) sorted in the same + * order as EEPROM (because we've used the backmapping). + * So for RF5112 it's from higher power to lower power + * and for RF2413 it's from lower power to higher power. + * For RF5111 we only have one curve. */ + + /* Fill min and max power levels for this + * channel by interpolating the values on + * surounding channels to complete the dataset */ + ah->ah_txpower.txp_min_pwr = ath5k_get_interpolated_value(target, + (s16) pcinfo_L->freq, + (s16) pcinfo_R->freq, + pcinfo_L->min_pwr, pcinfo_R->min_pwr); + + ah->ah_txpower.txp_max_pwr = ath5k_get_interpolated_value(target, + (s16) pcinfo_L->freq, + (s16) pcinfo_R->freq, + pcinfo_L->max_pwr, pcinfo_R->max_pwr); + + /* We are ready to go, fill PCDAC/PDADC + * table and write settings on hardware */ + switch (type) { + case AR5K_PWRTABLE_LINEAR_PCDAC: + /* For RF5112 we can have one or two curves + * and each curve covers a certain power lvl + * range so we need to do some more processing */ + ath5k_combine_linear_pcdac_curves(ah, table_min, table_max, + ee->ee_pd_gains[ee_mode]); + + /* Set txp.offset so that we can + * match max power value with max + * table index */ + ah->ah_txpower.txp_offset = 64 - (table_max[0] / 2); + + /* Write settings on hw */ + ath5k_setup_pcdac_table(ah); + break; + case AR5K_PWRTABLE_PWR_TO_PCDAC: + /* We are done for RF5111 since it has only + * one curve, just fit the curve on the table */ + ath5k_fill_pwr_to_pcdac_table(ah, table_min, table_max); + + /* No rate powertable adjustment for RF5111 */ + ah->ah_txpower.txp_min_idx = 0; + ah->ah_txpower.txp_offset = 0; + + /* Write settings on hw */ + ath5k_setup_pcdac_table(ah); + break; + case AR5K_PWRTABLE_PWR_TO_PDADC: + /* Set PDADC boundaries and fill + * final PDADC table */ + ath5k_combine_pwr_to_pdadc_curves(ah, table_min, table_max, + ee->ee_pd_gains[ee_mode]); + + /* Write settings on hw */ + ath5k_setup_pwr_to_pdadc_table(ah, pdg, pdg_curve_to_idx); + + /* Set txp.offset, note that table_min + * can be negative */ + ah->ah_txpower.txp_offset = table_min[0]; + break; + default: + return -EINVAL; + } + + return 0; +} + + +/* + * Per-rate tx power setting + * + * This is the code that sets the desired tx power (below + * maximum) on hw for each rate (we also have TPC that sets + * power per packet). We do that by providing an index on the + * PCDAC/PDADC table we set up. + */ + +/* + * Set rate power table + * + * For now we only limit txpower based on maximum tx power + * supported by hw (what's inside rate_info). We need to limit + * this even more, based on regulatory domain etc. + * + * Rate power table contains indices to PCDAC/PDADC table (0.5dB steps) + * and is indexed as follows: + * rates[0] - rates[7] -> OFDM rates + * rates[8] - rates[14] -> CCK rates + * rates[15] -> XR rates (they all have the same power) + */ +static void +ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr, + struct ath5k_rate_pcal_info *rate_info, + u8 ee_mode) +{ + unsigned int i; + u16 *rates; + + /* max_pwr is power level we got from driver/user in 0.5dB + * units, switch to 0.25dB units so we can compare */ + max_pwr *= 2; + max_pwr = min(max_pwr, (u16) ah->ah_txpower.txp_max_pwr) / 2; + + /* apply rate limits */ + rates = ah->ah_txpower.txp_rates_power_table; + + /* OFDM rates 6 to 24Mb/s */ + for (i = 0; i < 5; i++) + rates[i] = min(max_pwr, rate_info->target_power_6to24); + + /* Rest OFDM rates */ + rates[5] = min(rates[0], rate_info->target_power_36); + rates[6] = min(rates[0], rate_info->target_power_48); + rates[7] = min(rates[0], rate_info->target_power_54); + + /* CCK rates */ + /* 1L */ + rates[8] = min(rates[0], rate_info->target_power_6to24); + /* 2L */ + rates[9] = min(rates[0], rate_info->target_power_36); + /* 2S */ + rates[10] = min(rates[0], rate_info->target_power_36); + /* 5L */ + rates[11] = min(rates[0], rate_info->target_power_48); + /* 5S */ + rates[12] = min(rates[0], rate_info->target_power_48); + /* 11L */ + rates[13] = min(rates[0], rate_info->target_power_54); + /* 11S */ + rates[14] = min(rates[0], rate_info->target_power_54); + + /* XR rates */ + rates[15] = min(rates[0], rate_info->target_power_6to24); + + /* CCK rates have different peak to average ratio + * so we have to tweak their power so that gainf + * correction works ok. For this we use OFDM to + * CCK delta from eeprom */ + if ((ee_mode == AR5K_EEPROM_MODE_11G) && + (ah->ah_phy_revision < AR5K_SREV_PHY_5212A)) + for (i = 8; i <= 15; i++) + rates[i] -= ah->ah_txpower.txp_cck_ofdm_gainf_delta; + + /* Now that we have all rates setup use table offset to + * match the power range set by user with the power indices + * on PCDAC/PDADC table */ + for (i = 0; i < 16; i++) { + rates[i] += ah->ah_txpower.txp_offset; + /* Don't get out of bounds */ + if (rates[i] > 63) + rates[i] = 63; + } + + /* Min/max in 0.25dB units */ + ah->ah_txpower.txp_min_pwr = 2 * rates[7]; + ah->ah_txpower.txp_max_pwr = 2 * rates[0]; + ah->ah_txpower.txp_ofdm = rates[7]; +} + + +/* + * Set transmition power + */ +int +ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, + u8 ee_mode, u8 txpower) +{ + struct ath5k_rate_pcal_info rate_info; + u8 type; + int ret; + + ATH5K_TRACE(ah->ah_sc); + if (txpower > AR5K_TUNE_MAX_TXPOWER) { + ATH5K_ERR(ah->ah_sc, "invalid tx power: %u\n", txpower); + return -EINVAL; + } + + /* Reset TX power values */ + memset(&ah->ah_txpower, 0, sizeof(ah->ah_txpower)); + ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER; + ah->ah_txpower.txp_min_pwr = 0; + ah->ah_txpower.txp_max_pwr = AR5K_TUNE_MAX_TXPOWER; + + /* Initialize TX power table */ + switch (ah->ah_radio) { + case AR5K_RF5111: + type = AR5K_PWRTABLE_PWR_TO_PCDAC; + break; + case AR5K_RF5112: + type = AR5K_PWRTABLE_LINEAR_PCDAC; + break; + case AR5K_RF2413: + case AR5K_RF5413: + case AR5K_RF2316: + case AR5K_RF2317: + case AR5K_RF2425: + type = AR5K_PWRTABLE_PWR_TO_PDADC; + break; + default: + return -EINVAL; + } + + /* FIXME: Only on channel/mode change */ + ret = ath5k_setup_channel_powertable(ah, channel, ee_mode, type); + if (ret) + return ret; + + /* Limit max power if we have a CTL available */ + ath5k_get_max_ctl_power(ah, channel); + + /* FIXME: Tx power limit for this regdomain + * XXX: Mac80211/CRDA will do that anyway ? */ + + /* FIXME: Antenna reduction stuff */ + + /* FIXME: Limit power on turbo modes */ + + /* FIXME: TPC scale reduction */ + + /* Get surounding channels for per-rate power table + * calibration */ + ath5k_get_rate_pcal_data(ah, channel, &rate_info); + + /* Setup rate power table */ + ath5k_setup_rate_powertable(ah, txpower, &rate_info, ee_mode); + + /* Write rate power table on hw */ + ath5k_hw_reg_write(ah, AR5K_TXPOWER_OFDM(3, 24) | + AR5K_TXPOWER_OFDM(2, 16) | AR5K_TXPOWER_OFDM(1, 8) | + AR5K_TXPOWER_OFDM(0, 0), AR5K_PHY_TXPOWER_RATE1); + + ath5k_hw_reg_write(ah, AR5K_TXPOWER_OFDM(7, 24) | + AR5K_TXPOWER_OFDM(6, 16) | AR5K_TXPOWER_OFDM(5, 8) | + AR5K_TXPOWER_OFDM(4, 0), AR5K_PHY_TXPOWER_RATE2); + + ath5k_hw_reg_write(ah, AR5K_TXPOWER_CCK(10, 24) | + AR5K_TXPOWER_CCK(9, 16) | AR5K_TXPOWER_CCK(15, 8) | + AR5K_TXPOWER_CCK(8, 0), AR5K_PHY_TXPOWER_RATE3); + + ath5k_hw_reg_write(ah, AR5K_TXPOWER_CCK(14, 24) | + AR5K_TXPOWER_CCK(13, 16) | AR5K_TXPOWER_CCK(12, 8) | + AR5K_TXPOWER_CCK(11, 0), AR5K_PHY_TXPOWER_RATE4); + + /* FIXME: TPC support */ + if (ah->ah_txpower.txp_tpc) { + ath5k_hw_reg_write(ah, AR5K_PHY_TXPOWER_RATE_MAX_TPC_ENABLE | + AR5K_TUNE_MAX_TXPOWER, AR5K_PHY_TXPOWER_RATE_MAX); + + ath5k_hw_reg_write(ah, + AR5K_REG_MS(AR5K_TUNE_MAX_TXPOWER, AR5K_TPC_ACK) | + AR5K_REG_MS(AR5K_TUNE_MAX_TXPOWER, AR5K_TPC_CTS) | + AR5K_REG_MS(AR5K_TUNE_MAX_TXPOWER, AR5K_TPC_CHIRP), + AR5K_TPC); + } else { + ath5k_hw_reg_write(ah, AR5K_PHY_TXPOWER_RATE_MAX | + AR5K_TUNE_MAX_TXPOWER, AR5K_PHY_TXPOWER_RATE_MAX); + } + + return 0; +} + +int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower) +{ + /*Just a try M.F.*/ + struct ieee80211_channel *channel = ah->ah_current_channel; + u8 ee_mode; + + ATH5K_TRACE(ah->ah_sc); + + switch (channel->hw_value & CHANNEL_MODES) { + case CHANNEL_A: + case CHANNEL_T: + case CHANNEL_XR: + ee_mode = AR5K_EEPROM_MODE_11A; + break; + case CHANNEL_G: + case CHANNEL_TG: + ee_mode = AR5K_EEPROM_MODE_11G; + break; + case CHANNEL_B: + ee_mode = AR5K_EEPROM_MODE_11B; + break; + default: + ATH5K_ERR(ah->ah_sc, + "invalid channel: %d\n", channel->center_freq); + return -EINVAL; + } + + ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_TXPOWER, + "changing txpower to %d\n", txpower); + + return ath5k_hw_txpower(ah, channel, ee_mode, txpower); +} + +#undef _ATH5K_PHY diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c new file mode 100644 index 0000000..9122a85 --- /dev/null +++ b/drivers/net/wireless/ath/ath5k/qcu.c @@ -0,0 +1,555 @@ +/* + * Copyright (c) 2004-2008 Reyk Floeter + * Copyright (c) 2006-2008 Nick Kossifidis + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +/********************************************\ +Queue Control Unit, DFS Control Unit Functions +\********************************************/ + +#include "ath5k.h" +#include "reg.h" +#include "debug.h" +#include "base.h" + +/* + * Get properties for a transmit queue + */ +int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, + struct ath5k_txq_info *queue_info) +{ + ATH5K_TRACE(ah->ah_sc); + memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info)); + return 0; +} + +/* + * Set properties for a transmit queue + */ +int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue, + const struct ath5k_txq_info *queue_info) +{ + ATH5K_TRACE(ah->ah_sc); + AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); + + if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) + return -EIO; + + memcpy(&ah->ah_txq[queue], queue_info, sizeof(struct ath5k_txq_info)); + + /*XXX: Is this supported on 5210 ?*/ + if ((queue_info->tqi_type == AR5K_TX_QUEUE_DATA && + ((queue_info->tqi_subtype == AR5K_WME_AC_VI) || + (queue_info->tqi_subtype == AR5K_WME_AC_VO))) || + queue_info->tqi_type == AR5K_TX_QUEUE_UAPSD) + ah->ah_txq[queue].tqi_flags |= AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS; + + return 0; +} + +/* + * Initialize a transmit queue + */ +int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type, + struct ath5k_txq_info *queue_info) +{ + unsigned int queue; + int ret; + + ATH5K_TRACE(ah->ah_sc); + + /* + * Get queue by type + */ + /*5210 only has 2 queues*/ + if (ah->ah_version == AR5K_AR5210) { + switch (queue_type) { + case AR5K_TX_QUEUE_DATA: + queue = AR5K_TX_QUEUE_ID_NOQCU_DATA; + break; + case AR5K_TX_QUEUE_BEACON: + case AR5K_TX_QUEUE_CAB: + queue = AR5K_TX_QUEUE_ID_NOQCU_BEACON; + break; + default: + return -EINVAL; + } + } else { + switch (queue_type) { + case AR5K_TX_QUEUE_DATA: + for (queue = AR5K_TX_QUEUE_ID_DATA_MIN; + ah->ah_txq[queue].tqi_type != + AR5K_TX_QUEUE_INACTIVE; queue++) { + + if (queue > AR5K_TX_QUEUE_ID_DATA_MAX) + return -EINVAL; + } + break; + case AR5K_TX_QUEUE_UAPSD: + queue = AR5K_TX_QUEUE_ID_UAPSD; + break; + case AR5K_TX_QUEUE_BEACON: + queue = AR5K_TX_QUEUE_ID_BEACON; + break; + case AR5K_TX_QUEUE_CAB: + queue = AR5K_TX_QUEUE_ID_CAB; + break; + case AR5K_TX_QUEUE_XR_DATA: + if (ah->ah_version != AR5K_AR5212) + ATH5K_ERR(ah->ah_sc, + "XR data queues only supported in" + " 5212!\n"); + queue = AR5K_TX_QUEUE_ID_XR_DATA; + break; + default: + return -EINVAL; + } + } + + /* + * Setup internal queue structure + */ + memset(&ah->ah_txq[queue], 0, sizeof(struct ath5k_txq_info)); + ah->ah_txq[queue].tqi_type = queue_type; + + if (queue_info != NULL) { + queue_info->tqi_type = queue_type; + ret = ath5k_hw_set_tx_queueprops(ah, queue, queue_info); + if (ret) + return ret; + } + + /* + * We use ah_txq_status to hold a temp value for + * the Secondary interrupt mask registers on 5211+ + * check out ath5k_hw_reset_tx_queue + */ + AR5K_Q_ENABLE_BITS(ah->ah_txq_status, queue); + + return queue; +} + +/* + * Get number of pending frames + * for a specific queue [5211+] + */ +u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue) +{ + u32 pending; + ATH5K_TRACE(ah->ah_sc); + AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); + + /* Return if queue is declared inactive */ + if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) + return false; + + /* XXX: How about AR5K_CFG_TXCNT ? */ + if (ah->ah_version == AR5K_AR5210) + return false; + + pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue)); + pending &= AR5K_QCU_STS_FRMPENDCNT; + + /* It's possible to have no frames pending even if TXE + * is set. To indicate that q has not stopped return + * true */ + if (!pending && AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue)) + return true; + + return pending; +} + +/* + * Set a transmit queue inactive + */ +void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue) +{ + ATH5K_TRACE(ah->ah_sc); + if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num)) + return; + + /* This queue will be skipped in further operations */ + ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE; + /*For SIMR setup*/ + AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue); +} + +/* + * Set DFS properties for a transmit queue on DCU + */ +int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue) +{ + u32 cw_min, cw_max, retry_lg, retry_sh; + struct ath5k_txq_info *tq = &ah->ah_txq[queue]; + + ATH5K_TRACE(ah->ah_sc); + AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); + + tq = &ah->ah_txq[queue]; + + if (tq->tqi_type == AR5K_TX_QUEUE_INACTIVE) + return 0; + + if (ah->ah_version == AR5K_AR5210) { + /* Only handle data queues, others will be ignored */ + if (tq->tqi_type != AR5K_TX_QUEUE_DATA) + return 0; + + /* Set Slot time */ + ath5k_hw_reg_write(ah, ah->ah_turbo ? + AR5K_INIT_SLOT_TIME_TURBO : AR5K_INIT_SLOT_TIME, + AR5K_SLOT_TIME); + /* Set ACK_CTS timeout */ + ath5k_hw_reg_write(ah, ah->ah_turbo ? + AR5K_INIT_ACK_CTS_TIMEOUT_TURBO : + AR5K_INIT_ACK_CTS_TIMEOUT, AR5K_SLOT_TIME); + /* Set Transmit Latency */ + ath5k_hw_reg_write(ah, ah->ah_turbo ? + AR5K_INIT_TRANSMIT_LATENCY_TURBO : + AR5K_INIT_TRANSMIT_LATENCY, AR5K_USEC_5210); + + /* Set IFS0 */ + if (ah->ah_turbo) { + ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS_TURBO + + (ah->ah_aifs + tq->tqi_aifs) * + AR5K_INIT_SLOT_TIME_TURBO) << + AR5K_IFS0_DIFS_S) | AR5K_INIT_SIFS_TURBO, + AR5K_IFS0); + } else { + ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS + + (ah->ah_aifs + tq->tqi_aifs) * + AR5K_INIT_SLOT_TIME) << AR5K_IFS0_DIFS_S) | + AR5K_INIT_SIFS, AR5K_IFS0); + } + + /* Set IFS1 */ + ath5k_hw_reg_write(ah, ah->ah_turbo ? + AR5K_INIT_PROTO_TIME_CNTRL_TURBO : + AR5K_INIT_PROTO_TIME_CNTRL, AR5K_IFS1); + /* Set AR5K_PHY_SETTLING */ + ath5k_hw_reg_write(ah, ah->ah_turbo ? + (ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F) + | 0x38 : + (ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F) + | 0x1C, + AR5K_PHY_SETTLING); + /* Set Frame Control Register */ + ath5k_hw_reg_write(ah, ah->ah_turbo ? + (AR5K_PHY_FRAME_CTL_INI | AR5K_PHY_TURBO_MODE | + AR5K_PHY_TURBO_SHORT | 0x2020) : + (AR5K_PHY_FRAME_CTL_INI | 0x1020), + AR5K_PHY_FRAME_CTL_5210); + } + + /* + * Calculate cwmin/max by channel mode + */ + cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN; + cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX; + ah->ah_aifs = AR5K_TUNE_AIFS; + /*XR is only supported on 5212*/ + if (IS_CHAN_XR(ah->ah_current_channel) && + ah->ah_version == AR5K_AR5212) { + cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN_XR; + cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX_XR; + ah->ah_aifs = AR5K_TUNE_AIFS_XR; + /*B mode is not supported on 5210*/ + } else if (IS_CHAN_B(ah->ah_current_channel) && + ah->ah_version != AR5K_AR5210) { + cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN_11B; + cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX_11B; + ah->ah_aifs = AR5K_TUNE_AIFS_11B; + } + + cw_min = 1; + while (cw_min < ah->ah_cw_min) + cw_min = (cw_min << 1) | 1; + + cw_min = tq->tqi_cw_min < 0 ? (cw_min >> (-tq->tqi_cw_min)) : + ((cw_min << tq->tqi_cw_min) + (1 << tq->tqi_cw_min) - 1); + cw_max = tq->tqi_cw_max < 0 ? (cw_max >> (-tq->tqi_cw_max)) : + ((cw_max << tq->tqi_cw_max) + (1 << tq->tqi_cw_max) - 1); + + /* + * Calculate and set retry limits + */ + if (ah->ah_software_retry) { + /* XXX Need to test this */ + retry_lg = ah->ah_limit_tx_retries; + retry_sh = retry_lg = retry_lg > AR5K_DCU_RETRY_LMT_SH_RETRY ? + AR5K_DCU_RETRY_LMT_SH_RETRY : retry_lg; + } else { + retry_lg = AR5K_INIT_LG_RETRY; + retry_sh = AR5K_INIT_SH_RETRY; + } + + /*No QCU/DCU [5210]*/ + if (ah->ah_version == AR5K_AR5210) { + ath5k_hw_reg_write(ah, + (cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S) + | AR5K_REG_SM(AR5K_INIT_SLG_RETRY, + AR5K_NODCU_RETRY_LMT_SLG_RETRY) + | AR5K_REG_SM(AR5K_INIT_SSH_RETRY, + AR5K_NODCU_RETRY_LMT_SSH_RETRY) + | AR5K_REG_SM(retry_lg, AR5K_NODCU_RETRY_LMT_LG_RETRY) + | AR5K_REG_SM(retry_sh, AR5K_NODCU_RETRY_LMT_SH_RETRY), + AR5K_NODCU_RETRY_LMT); + } else { + /*QCU/DCU [5211+]*/ + ath5k_hw_reg_write(ah, + AR5K_REG_SM(AR5K_INIT_SLG_RETRY, + AR5K_DCU_RETRY_LMT_SLG_RETRY) | + AR5K_REG_SM(AR5K_INIT_SSH_RETRY, + AR5K_DCU_RETRY_LMT_SSH_RETRY) | + AR5K_REG_SM(retry_lg, AR5K_DCU_RETRY_LMT_LG_RETRY) | + AR5K_REG_SM(retry_sh, AR5K_DCU_RETRY_LMT_SH_RETRY), + AR5K_QUEUE_DFS_RETRY_LIMIT(queue)); + + /*===Rest is also for QCU/DCU only [5211+]===*/ + + /* + * Set initial content window (cw_min/cw_max) + * and arbitrated interframe space (aifs)... + */ + ath5k_hw_reg_write(ah, + AR5K_REG_SM(cw_min, AR5K_DCU_LCL_IFS_CW_MIN) | + AR5K_REG_SM(cw_max, AR5K_DCU_LCL_IFS_CW_MAX) | + AR5K_REG_SM(ah->ah_aifs + tq->tqi_aifs, + AR5K_DCU_LCL_IFS_AIFS), + AR5K_QUEUE_DFS_LOCAL_IFS(queue)); + + /* + * Set misc registers + */ + /* Enable DCU early termination for this queue */ + AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), + AR5K_QCU_MISC_DCU_EARLY); + + /* Enable DCU to wait for next fragment from QCU */ + AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue), + AR5K_DCU_MISC_FRAG_WAIT); + + /* On Maui and Spirit use the global seqnum on DCU */ + if (ah->ah_mac_version < AR5K_SREV_AR5211) + AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue), + AR5K_DCU_MISC_SEQNUM_CTL); + + if (tq->tqi_cbr_period) { + ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_cbr_period, + AR5K_QCU_CBRCFG_INTVAL) | + AR5K_REG_SM(tq->tqi_cbr_overflow_limit, + AR5K_QCU_CBRCFG_ORN_THRES), + AR5K_QUEUE_CBRCFG(queue)); + AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), + AR5K_QCU_MISC_FRSHED_CBR); + if (tq->tqi_cbr_overflow_limit) + AR5K_REG_ENABLE_BITS(ah, + AR5K_QUEUE_MISC(queue), + AR5K_QCU_MISC_CBR_THRES_ENABLE); + } + + if (tq->tqi_ready_time && + (tq->tqi_type != AR5K_TX_QUEUE_CAB)) + ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_ready_time, + AR5K_QCU_RDYTIMECFG_INTVAL) | + AR5K_QCU_RDYTIMECFG_ENABLE, + AR5K_QUEUE_RDYTIMECFG(queue)); + + if (tq->tqi_burst_time) { + ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_burst_time, + AR5K_DCU_CHAN_TIME_DUR) | + AR5K_DCU_CHAN_TIME_ENABLE, + AR5K_QUEUE_DFS_CHANNEL_TIME(queue)); + + if (tq->tqi_flags + & AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE) + AR5K_REG_ENABLE_BITS(ah, + AR5K_QUEUE_MISC(queue), + AR5K_QCU_MISC_RDY_VEOL_POLICY); + } + + if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE) + ath5k_hw_reg_write(ah, AR5K_DCU_MISC_POST_FR_BKOFF_DIS, + AR5K_QUEUE_DFS_MISC(queue)); + + if (tq->tqi_flags & AR5K_TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) + ath5k_hw_reg_write(ah, AR5K_DCU_MISC_BACKOFF_FRAG, + AR5K_QUEUE_DFS_MISC(queue)); + + /* + * Set registers by queue type + */ + switch (tq->tqi_type) { + case AR5K_TX_QUEUE_BEACON: + AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), + AR5K_QCU_MISC_FRSHED_DBA_GT | + AR5K_QCU_MISC_CBREXP_BCN_DIS | + AR5K_QCU_MISC_BCN_ENABLE); + + AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue), + (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL << + AR5K_DCU_MISC_ARBLOCK_CTL_S) | + AR5K_DCU_MISC_ARBLOCK_IGNORE | + AR5K_DCU_MISC_POST_FR_BKOFF_DIS | + AR5K_DCU_MISC_BCN_ENABLE); + break; + + case AR5K_TX_QUEUE_CAB: + /* XXX: use BCN_SENT_GT, if we can figure out how */ + AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), + AR5K_QCU_MISC_FRSHED_DBA_GT | + AR5K_QCU_MISC_CBREXP_DIS | + AR5K_QCU_MISC_CBREXP_BCN_DIS); + + ath5k_hw_reg_write(ah, ((tq->tqi_ready_time - + (AR5K_TUNE_SW_BEACON_RESP - + AR5K_TUNE_DMA_BEACON_RESP) - + AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) | + AR5K_QCU_RDYTIMECFG_ENABLE, + AR5K_QUEUE_RDYTIMECFG(queue)); + + AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue), + (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL << + AR5K_DCU_MISC_ARBLOCK_CTL_S)); + break; + + case AR5K_TX_QUEUE_UAPSD: + AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), + AR5K_QCU_MISC_CBREXP_DIS); + break; + + case AR5K_TX_QUEUE_DATA: + default: + break; + } + + /* TODO: Handle frame compression */ + + /* + * Enable interrupts for this tx queue + * in the secondary interrupt mask registers + */ + if (tq->tqi_flags & AR5K_TXQ_FLAG_TXOKINT_ENABLE) + AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txok, queue); + + if (tq->tqi_flags & AR5K_TXQ_FLAG_TXERRINT_ENABLE) + AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txerr, queue); + + if (tq->tqi_flags & AR5K_TXQ_FLAG_TXURNINT_ENABLE) + AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txurn, queue); + + if (tq->tqi_flags & AR5K_TXQ_FLAG_TXDESCINT_ENABLE) + AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txdesc, queue); + + if (tq->tqi_flags & AR5K_TXQ_FLAG_TXEOLINT_ENABLE) + AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txeol, queue); + + if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRORNINT_ENABLE) + AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrorn, queue); + + if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRURNINT_ENABLE) + AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrurn, queue); + + if (tq->tqi_flags & AR5K_TXQ_FLAG_QTRIGINT_ENABLE) + AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_qtrig, queue); + + if (tq->tqi_flags & AR5K_TXQ_FLAG_TXNOFRMINT_ENABLE) + AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_nofrm, queue); + + /* Update secondary interrupt mask registers */ + + /* Filter out inactive queues */ + ah->ah_txq_imr_txok &= ah->ah_txq_status; + ah->ah_txq_imr_txerr &= ah->ah_txq_status; + ah->ah_txq_imr_txurn &= ah->ah_txq_status; + ah->ah_txq_imr_txdesc &= ah->ah_txq_status; + ah->ah_txq_imr_txeol &= ah->ah_txq_status; + ah->ah_txq_imr_cbrorn &= ah->ah_txq_status; + ah->ah_txq_imr_cbrurn &= ah->ah_txq_status; + ah->ah_txq_imr_qtrig &= ah->ah_txq_status; + ah->ah_txq_imr_nofrm &= ah->ah_txq_status; + + ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txok, + AR5K_SIMR0_QCU_TXOK) | + AR5K_REG_SM(ah->ah_txq_imr_txdesc, + AR5K_SIMR0_QCU_TXDESC), AR5K_SIMR0); + ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txerr, + AR5K_SIMR1_QCU_TXERR) | + AR5K_REG_SM(ah->ah_txq_imr_txeol, + AR5K_SIMR1_QCU_TXEOL), AR5K_SIMR1); + /* Update simr2 but don't overwrite rest simr2 settings */ + AR5K_REG_DISABLE_BITS(ah, AR5K_SIMR2, AR5K_SIMR2_QCU_TXURN); + AR5K_REG_ENABLE_BITS(ah, AR5K_SIMR2, + AR5K_REG_SM(ah->ah_txq_imr_txurn, + AR5K_SIMR2_QCU_TXURN)); + ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_cbrorn, + AR5K_SIMR3_QCBRORN) | + AR5K_REG_SM(ah->ah_txq_imr_cbrurn, + AR5K_SIMR3_QCBRURN), AR5K_SIMR3); + ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_qtrig, + AR5K_SIMR4_QTRIG), AR5K_SIMR4); + /* Set TXNOFRM_QCU for the queues with TXNOFRM enabled */ + ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_nofrm, + AR5K_TXNOFRM_QCU), AR5K_TXNOFRM); + /* No queue has TXNOFRM enabled, disable the interrupt + * by setting AR5K_TXNOFRM to zero */ + if (ah->ah_txq_imr_nofrm == 0) + ath5k_hw_reg_write(ah, 0, AR5K_TXNOFRM); + + /* Set QCU mask for this DCU to save power */ + AR5K_REG_WRITE_Q(ah, AR5K_QUEUE_QCUMASK(queue), queue); + } + + return 0; +} + +/* + * Get slot time from DCU + */ +unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah) +{ + unsigned int slot_time_clock; + + ATH5K_TRACE(ah->ah_sc); + + if (ah->ah_version == AR5K_AR5210) + slot_time_clock = ath5k_hw_reg_read(ah, AR5K_SLOT_TIME); + else + slot_time_clock = ath5k_hw_reg_read(ah, AR5K_DCU_GBL_IFS_SLOT); + + return ath5k_hw_clocktoh(ah, slot_time_clock & 0xffff); +} + +/* + * Set slot time on DCU + */ +int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time) +{ + u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time); + + ATH5K_TRACE(ah->ah_sc); + + if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX) + return -EINVAL; + + if (ah->ah_version == AR5K_AR5210) + ath5k_hw_reg_write(ah, slot_time_clock, AR5K_SLOT_TIME); + else + ath5k_hw_reg_write(ah, slot_time_clock, AR5K_DCU_GBL_IFS_SLOT); + + return 0; +} + diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h new file mode 100644 index 0000000..4cb9c5d --- /dev/null +++ b/drivers/net/wireless/ath/ath5k/reg.h @@ -0,0 +1,2587 @@ +/* + * Copyright (c) 2006-2008 Nick Kossifidis + * Copyright (c) 2004-2008 Reyk Floeter + * Copyright (c) 2007-2008 Michael Taylor + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +/* + * Register values for Atheros 5210/5211/5212 cards from OpenBSD's ar5k + * maintained by Reyk Floeter + * + * I tried to document those registers by looking at ar5k code, some + * 802.11 (802.11e mostly) papers and by reading various public available + * Atheros presentations and papers like these: + * + * 5210 - http://nova.stanford.edu/~bbaas/ps/isscc2002_slides.pdf + * http://www.it.iitb.ac.in/~janak/wifire/01222734.pdf + * + * 5211 - http://www.hotchips.org/archives/hc14/3_Tue/16_mcfarland.pdf + * + * This file also contains register values found on a memory dump of + * Atheros's ART program (Atheros Radio Test), on ath9k, on legacy-hal + * released by Atheros and on various debug messages found on the net. + */ + +#include "../reg.h" + +/*====MAC DMA REGISTERS====*/ + +/* + * AR5210-Specific TXDP registers + * 5210 has only 2 transmit queues so no DCU/QCU, just + * 2 transmit descriptor pointers... + */ +#define AR5K_NOQCU_TXDP0 0x0000 /* Queue 0 - data */ +#define AR5K_NOQCU_TXDP1 0x0004 /* Queue 1 - beacons */ + +/* + * Mac Control Register + */ +#define AR5K_CR 0x0008 /* Register Address */ +#define AR5K_CR_TXE0 0x00000001 /* TX Enable for queue 0 on 5210 */ +#define AR5K_CR_TXE1 0x00000002 /* TX Enable for queue 1 on 5210 */ +#define AR5K_CR_RXE 0x00000004 /* RX Enable */ +#define AR5K_CR_TXD0 0x00000008 /* TX Disable for queue 0 on 5210 */ +#define AR5K_CR_TXD1 0x00000010 /* TX Disable for queue 1 on 5210 */ +#define AR5K_CR_RXD 0x00000020 /* RX Disable */ +#define AR5K_CR_SWI 0x00000040 /* Software Interrupt */ + +/* + * RX Descriptor Pointer register + */ +#define AR5K_RXDP 0x000c + +/* + * Configuration and status register + */ +#define AR5K_CFG 0x0014 /* Register Address */ +#define AR5K_CFG_SWTD 0x00000001 /* Byte-swap TX descriptor (for big endian archs) */ +#define AR5K_CFG_SWTB 0x00000002 /* Byte-swap TX buffer */ +#define AR5K_CFG_SWRD 0x00000004 /* Byte-swap RX descriptor */ +#define AR5K_CFG_SWRB 0x00000008 /* Byte-swap RX buffer */ +#define AR5K_CFG_SWRG 0x00000010 /* Byte-swap Register access */ +#define AR5K_CFG_IBSS 0x00000020 /* 0-BSS, 1-IBSS [5211+] */ +#define AR5K_CFG_PHY_OK 0x00000100 /* [5211+] */ +#define AR5K_CFG_EEBS 0x00000200 /* EEPROM is busy */ +#define AR5K_CFG_CLKGD 0x00000400 /* Clock gated (Disable dynamic clock) */ +#define AR5K_CFG_TXCNT 0x00007800 /* Tx frame count (?) [5210] */ +#define AR5K_CFG_TXCNT_S 11 +#define AR5K_CFG_TXFSTAT 0x00008000 /* Tx frame status (?) [5210] */ +#define AR5K_CFG_TXFSTRT 0x00010000 /* [5210] */ +#define AR5K_CFG_PCI_THRES 0x00060000 /* PCI Master req q threshold [5211+] */ +#define AR5K_CFG_PCI_THRES_S 17 + +/* + * Interrupt enable register + */ +#define AR5K_IER 0x0024 /* Register Address */ +#define AR5K_IER_DISABLE 0x00000000 /* Disable card interrupts */ +#define AR5K_IER_ENABLE 0x00000001 /* Enable card interrupts */ + + +/* + * 0x0028 is Beacon Control Register on 5210 + * and first RTS duration register on 5211 + */ + +/* + * Beacon control register [5210] + */ +#define AR5K_BCR 0x0028 /* Register Address */ +#define AR5K_BCR_AP 0x00000000 /* AP mode */ +#define AR5K_BCR_ADHOC 0x00000001 /* Ad-Hoc mode */ +#define AR5K_BCR_BDMAE 0x00000002 /* DMA enable */ +#define AR5K_BCR_TQ1FV 0x00000004 /* Use Queue1 for CAB traffic */ +#define AR5K_BCR_TQ1V 0x00000008 /* Use Queue1 for Beacon traffic */ +#define AR5K_BCR_BCGET 0x00000010 + +/* + * First RTS duration register [5211] + */ +#define AR5K_RTSD0 0x0028 /* Register Address */ +#define AR5K_RTSD0_6 0x000000ff /* 6Mb RTS duration mask (?) */ +#define AR5K_RTSD0_6_S 0 /* 6Mb RTS duration shift (?) */ +#define AR5K_RTSD0_9 0x0000ff00 /* 9Mb*/ +#define AR5K_RTSD0_9_S 8 +#define AR5K_RTSD0_12 0x00ff0000 /* 12Mb*/ +#define AR5K_RTSD0_12_S 16 +#define AR5K_RTSD0_18 0xff000000 /* 16Mb*/ +#define AR5K_RTSD0_18_S 24 + + +/* + * 0x002c is Beacon Status Register on 5210 + * and second RTS duration register on 5211 + */ + +/* + * Beacon status register [5210] + * + * As i can see in ar5k_ar5210_tx_start Reyk uses some of the values of BCR + * for this register, so i guess TQ1V,TQ1FV and BDMAE have the same meaning + * here and SNP/SNAP means "snapshot" (so this register gets synced with BCR). + * So SNAPPEDBCRVALID sould also stand for "snapped BCR -values- valid", so i + * renamed it to SNAPSHOTSVALID to make more sense. I realy have no idea what + * else can it be. I also renamed SNPBCMD to SNPADHOC to match BCR. + */ +#define AR5K_BSR 0x002c /* Register Address */ +#define AR5K_BSR_BDLYSW 0x00000001 /* SW Beacon delay (?) */ +#define AR5K_BSR_BDLYDMA 0x00000002 /* DMA Beacon delay (?) */ +#define AR5K_BSR_TXQ1F 0x00000004 /* Beacon queue (1) finished */ +#define AR5K_BSR_ATIMDLY 0x00000008 /* ATIM delay (?) */ +#define AR5K_BSR_SNPADHOC 0x00000100 /* Ad-hoc mode set (?) */ +#define AR5K_BSR_SNPBDMAE 0x00000200 /* Beacon DMA enabled (?) */ +#define AR5K_BSR_SNPTQ1FV 0x00000400 /* Queue1 is used for CAB traffic (?) */ +#define AR5K_BSR_SNPTQ1V 0x00000800 /* Queue1 is used for Beacon traffic (?) */ +#define AR5K_BSR_SNAPSHOTSVALID 0x00001000 /* BCR snapshots are valid (?) */ +#define AR5K_BSR_SWBA_CNT 0x00ff0000 + +/* + * Second RTS duration register [5211] + */ +#define AR5K_RTSD1 0x002c /* Register Address */ +#define AR5K_RTSD1_24 0x000000ff /* 24Mb */ +#define AR5K_RTSD1_24_S 0 +#define AR5K_RTSD1_36 0x0000ff00 /* 36Mb */ +#define AR5K_RTSD1_36_S 8 +#define AR5K_RTSD1_48 0x00ff0000 /* 48Mb */ +#define AR5K_RTSD1_48_S 16 +#define AR5K_RTSD1_54 0xff000000 /* 54Mb */ +#define AR5K_RTSD1_54_S 24 + + +/* + * Transmit configuration register + */ +#define AR5K_TXCFG 0x0030 /* Register Address */ +#define AR5K_TXCFG_SDMAMR 0x00000007 /* DMA size (read) */ +#define AR5K_TXCFG_SDMAMR_S 0 +#define AR5K_TXCFG_B_MODE 0x00000008 /* Set b mode for 5111 (enable 2111) */ +#define AR5K_TXCFG_TXFSTP 0x00000008 /* TX DMA full Stop [5210] */ +#define AR5K_TXCFG_TXFULL 0x000003f0 /* TX Triger level mask */ +#define AR5K_TXCFG_TXFULL_S 4 +#define AR5K_TXCFG_TXFULL_0B 0x00000000 +#define AR5K_TXCFG_TXFULL_64B 0x00000010 +#define AR5K_TXCFG_TXFULL_128B 0x00000020 +#define AR5K_TXCFG_TXFULL_192B 0x00000030 +#define AR5K_TXCFG_TXFULL_256B 0x00000040 +#define AR5K_TXCFG_TXCONT_EN 0x00000080 +#define AR5K_TXCFG_DMASIZE 0x00000100 /* Flag for passing DMA size [5210] */ +#define AR5K_TXCFG_JUMBO_DESC_EN 0x00000400 /* Enable jumbo tx descriptors [5211+] */ +#define AR5K_TXCFG_ADHOC_BCN_ATIM 0x00000800 /* Adhoc Beacon ATIM Policy */ +#define AR5K_TXCFG_ATIM_WINDOW_DEF_DIS 0x00001000 /* Disable ATIM window defer [5211+] */ +#define AR5K_TXCFG_RTSRND 0x00001000 /* [5211+] */ +#define AR5K_TXCFG_FRMPAD_DIS 0x00002000 /* [5211+] */ +#define AR5K_TXCFG_RDY_CBR_DIS 0x00004000 /* Ready time CBR disable [5211+] */ +#define AR5K_TXCFG_JUMBO_FRM_MODE 0x00008000 /* Jumbo frame mode [5211+] */ +#define AR5K_TXCFG_DCU_DBL_BUF_DIS 0x00008000 /* Disable double buffering on DCU */ +#define AR5K_TXCFG_DCU_CACHING_DIS 0x00010000 /* Disable DCU caching */ + +/* + * Receive configuration register + */ +#define AR5K_RXCFG 0x0034 /* Register Address */ +#define AR5K_RXCFG_SDMAMW 0x00000007 /* DMA size (write) */ +#define AR5K_RXCFG_SDMAMW_S 0 +#define AR5K_RXCFG_ZLFDMA 0x00000008 /* Enable Zero-length frame DMA */ +#define AR5K_RXCFG_DEF_ANTENNA 0x00000010 /* Default antenna (?) */ +#define AR5K_RXCFG_JUMBO_RXE 0x00000020 /* Enable jumbo rx descriptors [5211+] */ +#define AR5K_RXCFG_JUMBO_WRAP 0x00000040 /* Wrap jumbo frames [5211+] */ +#define AR5K_RXCFG_SLE_ENTRY 0x00000080 /* Sleep entry policy */ + +/* + * Receive jumbo descriptor last address register + * Only found in 5211 (?) + */ +#define AR5K_RXJLA 0x0038 + +/* + * MIB control register + */ +#define AR5K_MIBC 0x0040 /* Register Address */ +#define AR5K_MIBC_COW 0x00000001 /* Warn test indicator */ +#define AR5K_MIBC_FMC 0x00000002 /* Freeze MIB Counters */ +#define AR5K_MIBC_CMC 0x00000004 /* Clean MIB Counters */ +#define AR5K_MIBC_MCS 0x00000008 /* MIB counter strobe */ + +/* + * Timeout prescale register + */ +#define AR5K_TOPS 0x0044 +#define AR5K_TOPS_M 0x0000ffff + +/* + * Receive timeout register (no frame received) + */ +#define AR5K_RXNOFRM 0x0048 +#define AR5K_RXNOFRM_M 0x000003ff + +/* + * Transmit timeout register (no frame sent) + */ +#define AR5K_TXNOFRM 0x004c +#define AR5K_TXNOFRM_M 0x000003ff +#define AR5K_TXNOFRM_QCU 0x000ffc00 +#define AR5K_TXNOFRM_QCU_S 10 + +/* + * Receive frame gap timeout register + */ +#define AR5K_RPGTO 0x0050 +#define AR5K_RPGTO_M 0x000003ff + +/* + * Receive frame count limit register + */ +#define AR5K_RFCNT 0x0054 +#define AR5K_RFCNT_M 0x0000001f /* [5211+] (?) */ +#define AR5K_RFCNT_RFCL 0x0000000f /* [5210] */ + +/* + * Misc settings register + * (reserved0-3) + */ +#define AR5K_MISC 0x0058 /* Register Address */ +#define AR5K_MISC_DMA_OBS_M 0x000001e0 +#define AR5K_MISC_DMA_OBS_S 5 +#define AR5K_MISC_MISC_OBS_M 0x00000e00 +#define AR5K_MISC_MISC_OBS_S 9 +#define AR5K_MISC_MAC_OBS_LSB_M 0x00007000 +#define AR5K_MISC_MAC_OBS_LSB_S 12 +#define AR5K_MISC_MAC_OBS_MSB_M 0x00038000 +#define AR5K_MISC_MAC_OBS_MSB_S 15 +#define AR5K_MISC_LED_DECAY 0x001c0000 /* [5210] */ +#define AR5K_MISC_LED_BLINK 0x00e00000 /* [5210] */ + +/* + * QCU/DCU clock gating register (5311) + * (reserved4-5) + */ +#define AR5K_QCUDCU_CLKGT 0x005c /* Register Address (?) */ +#define AR5K_QCUDCU_CLKGT_QCU 0x0000ffff /* Mask for QCU clock */ +#define AR5K_QCUDCU_CLKGT_DCU 0x07ff0000 /* Mask for DCU clock */ + +/* + * Interrupt Status Registers + * + * For 5210 there is only one status register but for + * 5211/5212 we have one primary and 4 secondary registers. + * So we have AR5K_ISR for 5210 and AR5K_PISR /SISRx for 5211/5212. + * Most of these bits are common for all chipsets. + */ +#define AR5K_ISR 0x001c /* Register Address [5210] */ +#define AR5K_PISR 0x0080 /* Register Address [5211+] */ +#define AR5K_ISR_RXOK 0x00000001 /* Frame successfuly recieved */ +#define AR5K_ISR_RXDESC 0x00000002 /* RX descriptor request */ +#define AR5K_ISR_RXERR 0x00000004 /* Receive error */ +#define AR5K_ISR_RXNOFRM 0x00000008 /* No frame received (receive timeout) */ +#define AR5K_ISR_RXEOL 0x00000010 /* Empty RX descriptor */ +#define AR5K_ISR_RXORN 0x00000020 /* Receive FIFO overrun */ +#define AR5K_ISR_TXOK 0x00000040 /* Frame successfuly transmited */ +#define AR5K_ISR_TXDESC 0x00000080 /* TX descriptor request */ +#define AR5K_ISR_TXERR 0x00000100 /* Transmit error */ +#define AR5K_ISR_TXNOFRM 0x00000200 /* No frame transmited (transmit timeout) */ +#define AR5K_ISR_TXEOL 0x00000400 /* Empty TX descriptor */ +#define AR5K_ISR_TXURN 0x00000800 /* Transmit FIFO underrun */ +#define AR5K_ISR_MIB 0x00001000 /* Update MIB counters */ +#define AR5K_ISR_SWI 0x00002000 /* Software interrupt */ +#define AR5K_ISR_RXPHY 0x00004000 /* PHY error */ +#define AR5K_ISR_RXKCM 0x00008000 /* RX Key cache miss */ +#define AR5K_ISR_SWBA 0x00010000 /* Software beacon alert */ +#define AR5K_ISR_BRSSI 0x00020000 /* Beacon rssi below threshold (?) */ +#define AR5K_ISR_BMISS 0x00040000 /* Beacon missed */ +#define AR5K_ISR_HIUERR 0x00080000 /* Host Interface Unit error [5211+] */ +#define AR5K_ISR_BNR 0x00100000 /* Beacon not ready [5211+] */ +#define AR5K_ISR_MCABT 0x00100000 /* Master Cycle Abort [5210] */ +#define AR5K_ISR_RXCHIRP 0x00200000 /* CHIRP Received [5212+] */ +#define AR5K_ISR_SSERR 0x00200000 /* Signaled System Error [5210] */ +#define AR5K_ISR_DPERR 0x00400000 /* Det par Error (?) [5210] */ +#define AR5K_ISR_RXDOPPLER 0x00400000 /* Doppler chirp received [5212+] */ +#define AR5K_ISR_TIM 0x00800000 /* [5211+] */ +#define AR5K_ISR_BCNMISC 0x00800000 /* 'or' of TIM, CAB_END, DTIM_SYNC, BCN_TIMEOUT, + CAB_TIMEOUT and DTIM bits from SISR2 [5212+] */ +#define AR5K_ISR_GPIO 0x01000000 /* GPIO (rf kill) */ +#define AR5K_ISR_QCBRORN 0x02000000 /* QCU CBR overrun [5211+] */ +#define AR5K_ISR_QCBRURN 0x04000000 /* QCU CBR underrun [5211+] */ +#define AR5K_ISR_QTRIG 0x08000000 /* QCU scheduling trigger [5211+] */ + +/* + * Secondary status registers [5211+] (0 - 4) + * + * These give the status for each QCU, only QCUs 0-9 are + * represented. + */ +#define AR5K_SISR0 0x0084 /* Register Address [5211+] */ +#define AR5K_SISR0_QCU_TXOK 0x000003ff /* Mask for QCU_TXOK */ +#define AR5K_SISR0_QCU_TXOK_S 0 +#define AR5K_SISR0_QCU_TXDESC 0x03ff0000 /* Mask for QCU_TXDESC */ +#define AR5K_SISR0_QCU_TXDESC_S 16 + +#define AR5K_SISR1 0x0088 /* Register Address [5211+] */ +#define AR5K_SISR1_QCU_TXERR 0x000003ff /* Mask for QCU_TXERR */ +#define AR5K_SISR1_QCU_TXERR_S 0 +#define AR5K_SISR1_QCU_TXEOL 0x03ff0000 /* Mask for QCU_TXEOL */ +#define AR5K_SISR1_QCU_TXEOL_S 16 + +#define AR5K_SISR2 0x008c /* Register Address [5211+] */ +#define AR5K_SISR2_QCU_TXURN 0x000003ff /* Mask for QCU_TXURN */ +#define AR5K_SISR2_QCU_TXURN_S 0 +#define AR5K_SISR2_MCABT 0x00010000 /* Master Cycle Abort */ +#define AR5K_SISR2_SSERR 0x00020000 /* Signaled System Error */ +#define AR5K_SISR2_DPERR 0x00040000 /* Bus parity error */ +#define AR5K_SISR2_TIM 0x01000000 /* [5212+] */ +#define AR5K_SISR2_CAB_END 0x02000000 /* [5212+] */ +#define AR5K_SISR2_DTIM_SYNC 0x04000000 /* DTIM sync lost [5212+] */ +#define AR5K_SISR2_BCN_TIMEOUT 0x08000000 /* Beacon Timeout [5212+] */ +#define AR5K_SISR2_CAB_TIMEOUT 0x10000000 /* CAB Timeout [5212+] */ +#define AR5K_SISR2_DTIM 0x20000000 /* [5212+] */ +#define AR5K_SISR2_TSFOOR 0x80000000 /* TSF OOR (?) */ + +#define AR5K_SISR3 0x0090 /* Register Address [5211+] */ +#define AR5K_SISR3_QCBRORN 0x000003ff /* Mask for QCBRORN */ +#define AR5K_SISR3_QCBRORN_S 0 +#define AR5K_SISR3_QCBRURN 0x03ff0000 /* Mask for QCBRURN */ +#define AR5K_SISR3_QCBRURN_S 16 + +#define AR5K_SISR4 0x0094 /* Register Address [5211+] */ +#define AR5K_SISR4_QTRIG 0x000003ff /* Mask for QTRIG */ +#define AR5K_SISR4_QTRIG_S 0 + +/* + * Shadow read-and-clear interrupt status registers [5211+] + */ +#define AR5K_RAC_PISR 0x00c0 /* Read and clear PISR */ +#define AR5K_RAC_SISR0 0x00c4 /* Read and clear SISR0 */ +#define AR5K_RAC_SISR1 0x00c8 /* Read and clear SISR1 */ +#define AR5K_RAC_SISR2 0x00cc /* Read and clear SISR2 */ +#define AR5K_RAC_SISR3 0x00d0 /* Read and clear SISR3 */ +#define AR5K_RAC_SISR4 0x00d4 /* Read and clear SISR4 */ + +/* + * Interrupt Mask Registers + * + * As whith ISRs 5210 has one IMR (AR5K_IMR) and 5211/5212 has one primary + * (AR5K_PIMR) and 4 secondary IMRs (AR5K_SIMRx). Note that ISR/IMR flags match. + */ +#define AR5K_IMR 0x0020 /* Register Address [5210] */ +#define AR5K_PIMR 0x00a0 /* Register Address [5211+] */ +#define AR5K_IMR_RXOK 0x00000001 /* Frame successfuly recieved*/ +#define AR5K_IMR_RXDESC 0x00000002 /* RX descriptor request*/ +#define AR5K_IMR_RXERR 0x00000004 /* Receive error*/ +#define AR5K_IMR_RXNOFRM 0x00000008 /* No frame received (receive timeout)*/ +#define AR5K_IMR_RXEOL 0x00000010 /* Empty RX descriptor*/ +#define AR5K_IMR_RXORN 0x00000020 /* Receive FIFO overrun*/ +#define AR5K_IMR_TXOK 0x00000040 /* Frame successfuly transmited*/ +#define AR5K_IMR_TXDESC 0x00000080 /* TX descriptor request*/ +#define AR5K_IMR_TXERR 0x00000100 /* Transmit error*/ +#define AR5K_IMR_TXNOFRM 0x00000200 /* No frame transmited (transmit timeout)*/ +#define AR5K_IMR_TXEOL 0x00000400 /* Empty TX descriptor*/ +#define AR5K_IMR_TXURN 0x00000800 /* Transmit FIFO underrun*/ +#define AR5K_IMR_MIB 0x00001000 /* Update MIB counters*/ +#define AR5K_IMR_SWI 0x00002000 /* Software interrupt */ +#define AR5K_IMR_RXPHY 0x00004000 /* PHY error*/ +#define AR5K_IMR_RXKCM 0x00008000 /* RX Key cache miss */ +#define AR5K_IMR_SWBA 0x00010000 /* Software beacon alert*/ +#define AR5K_IMR_BRSSI 0x00020000 /* Beacon rssi below threshold (?) */ +#define AR5K_IMR_BMISS 0x00040000 /* Beacon missed*/ +#define AR5K_IMR_HIUERR 0x00080000 /* Host Interface Unit error [5211+] */ +#define AR5K_IMR_BNR 0x00100000 /* Beacon not ready [5211+] */ +#define AR5K_IMR_MCABT 0x00100000 /* Master Cycle Abort [5210] */ +#define AR5K_IMR_RXCHIRP 0x00200000 /* CHIRP Received [5212+]*/ +#define AR5K_IMR_SSERR 0x00200000 /* Signaled System Error [5210] */ +#define AR5K_IMR_DPERR 0x00400000 /* Det par Error (?) [5210] */ +#define AR5K_IMR_RXDOPPLER 0x00400000 /* Doppler chirp received [5212+] */ +#define AR5K_IMR_TIM 0x00800000 /* [5211+] */ +#define AR5K_IMR_BCNMISC 0x00800000 /* 'or' of TIM, CAB_END, DTIM_SYNC, BCN_TIMEOUT, + CAB_TIMEOUT and DTIM bits from SISR2 [5212+] */ +#define AR5K_IMR_GPIO 0x01000000 /* GPIO (rf kill)*/ +#define AR5K_IMR_QCBRORN 0x02000000 /* QCU CBR overrun (?) [5211+] */ +#define AR5K_IMR_QCBRURN 0x04000000 /* QCU CBR underrun (?) [5211+] */ +#define AR5K_IMR_QTRIG 0x08000000 /* QCU scheduling trigger [5211+] */ + +/* + * Secondary interrupt mask registers [5211+] (0 - 4) + */ +#define AR5K_SIMR0 0x00a4 /* Register Address [5211+] */ +#define AR5K_SIMR0_QCU_TXOK 0x000003ff /* Mask for QCU_TXOK */ +#define AR5K_SIMR0_QCU_TXOK_S 0 +#define AR5K_SIMR0_QCU_TXDESC 0x03ff0000 /* Mask for QCU_TXDESC */ +#define AR5K_SIMR0_QCU_TXDESC_S 16 + +#define AR5K_SIMR1 0x00a8 /* Register Address [5211+] */ +#define AR5K_SIMR1_QCU_TXERR 0x000003ff /* Mask for QCU_TXERR */ +#define AR5K_SIMR1_QCU_TXERR_S 0 +#define AR5K_SIMR1_QCU_TXEOL 0x03ff0000 /* Mask for QCU_TXEOL */ +#define AR5K_SIMR1_QCU_TXEOL_S 16 + +#define AR5K_SIMR2 0x00ac /* Register Address [5211+] */ +#define AR5K_SIMR2_QCU_TXURN 0x000003ff /* Mask for QCU_TXURN */ +#define AR5K_SIMR2_QCU_TXURN_S 0 +#define AR5K_SIMR2_MCABT 0x00010000 /* Master Cycle Abort */ +#define AR5K_SIMR2_SSERR 0x00020000 /* Signaled System Error */ +#define AR5K_SIMR2_DPERR 0x00040000 /* Bus parity error */ +#define AR5K_SIMR2_TIM 0x01000000 /* [5212+] */ +#define AR5K_SIMR2_CAB_END 0x02000000 /* [5212+] */ +#define AR5K_SIMR2_DTIM_SYNC 0x04000000 /* DTIM Sync lost [5212+] */ +#define AR5K_SIMR2_BCN_TIMEOUT 0x08000000 /* Beacon Timeout [5212+] */ +#define AR5K_SIMR2_CAB_TIMEOUT 0x10000000 /* CAB Timeout [5212+] */ +#define AR5K_SIMR2_DTIM 0x20000000 /* [5212+] */ +#define AR5K_SIMR2_TSFOOR 0x80000000 /* TSF OOR (?) */ + +#define AR5K_SIMR3 0x00b0 /* Register Address [5211+] */ +#define AR5K_SIMR3_QCBRORN 0x000003ff /* Mask for QCBRORN */ +#define AR5K_SIMR3_QCBRORN_S 0 +#define AR5K_SIMR3_QCBRURN 0x03ff0000 /* Mask for QCBRURN */ +#define AR5K_SIMR3_QCBRURN_S 16 + +#define AR5K_SIMR4 0x00b4 /* Register Address [5211+] */ +#define AR5K_SIMR4_QTRIG 0x000003ff /* Mask for QTRIG */ +#define AR5K_SIMR4_QTRIG_S 0 + +/* + * DMA Debug registers 0-7 + * 0xe0 - 0xfc + */ + +/* + * Decompression mask registers [5212+] + */ +#define AR5K_DCM_ADDR 0x0400 /*Decompression mask address (index) */ +#define AR5K_DCM_DATA 0x0404 /*Decompression mask data */ + +/* + * Wake On Wireless pattern control register [5212+] + */ +#define AR5K_WOW_PCFG 0x0410 /* Register Address */ +#define AR5K_WOW_PCFG_PAT_MATCH_EN 0x00000001 /* Pattern match enable */ +#define AR5K_WOW_PCFG_LONG_FRAME_POL 0x00000002 /* Long frame policy */ +#define AR5K_WOW_PCFG_WOBMISS 0x00000004 /* Wake on bea(con) miss (?) */ +#define AR5K_WOW_PCFG_PAT_0_EN 0x00000100 /* Enable pattern 0 */ +#define AR5K_WOW_PCFG_PAT_1_EN 0x00000200 /* Enable pattern 1 */ +#define AR5K_WOW_PCFG_PAT_2_EN 0x00000400 /* Enable pattern 2 */ +#define AR5K_WOW_PCFG_PAT_3_EN 0x00000800 /* Enable pattern 3 */ +#define AR5K_WOW_PCFG_PAT_4_EN 0x00001000 /* Enable pattern 4 */ +#define AR5K_WOW_PCFG_PAT_5_EN 0x00002000 /* Enable pattern 5 */ + +/* + * Wake On Wireless pattern index register (?) [5212+] + */ +#define AR5K_WOW_PAT_IDX 0x0414 + +/* + * Wake On Wireless pattern data register [5212+] + */ +#define AR5K_WOW_PAT_DATA 0x0418 /* Register Address */ +#define AR5K_WOW_PAT_DATA_0_3_V 0x00000001 /* Pattern 0, 3 value */ +#define AR5K_WOW_PAT_DATA_1_4_V 0x00000100 /* Pattern 1, 4 value */ +#define AR5K_WOW_PAT_DATA_2_5_V 0x00010000 /* Pattern 2, 5 value */ +#define AR5K_WOW_PAT_DATA_0_3_M 0x01000000 /* Pattern 0, 3 mask */ +#define AR5K_WOW_PAT_DATA_1_4_M 0x04000000 /* Pattern 1, 4 mask */ +#define AR5K_WOW_PAT_DATA_2_5_M 0x10000000 /* Pattern 2, 5 mask */ + +/* + * Decompression configuration registers [5212+] + */ +#define AR5K_DCCFG 0x0420 /* Register Address */ +#define AR5K_DCCFG_GLOBAL_EN 0x00000001 /* Enable decompression on all queues */ +#define AR5K_DCCFG_BYPASS_EN 0x00000002 /* Bypass decompression */ +#define AR5K_DCCFG_BCAST_EN 0x00000004 /* Enable decompression for bcast frames */ +#define AR5K_DCCFG_MCAST_EN 0x00000008 /* Enable decompression for mcast frames */ + +/* + * Compression configuration registers [5212+] + */ +#define AR5K_CCFG 0x0600 /* Register Address */ +#define AR5K_CCFG_WINDOW_SIZE 0x00000007 /* Compression window size */ +#define AR5K_CCFG_CPC_EN 0x00000008 /* Enable performance counters */ + +#define AR5K_CCFG_CCU 0x0604 /* Register Address */ +#define AR5K_CCFG_CCU_CUP_EN 0x00000001 /* CCU Catchup enable */ +#define AR5K_CCFG_CCU_CREDIT 0x00000002 /* CCU Credit (field) */ +#define AR5K_CCFG_CCU_CD_THRES 0x00000080 /* CCU Cyc(lic?) debt threshold (field) */ +#define AR5K_CCFG_CCU_CUP_LCNT 0x00010000 /* CCU Catchup lit(?) count */ +#define AR5K_CCFG_CCU_INIT 0x00100200 /* Initial value during reset */ + +/* + * Compression performance counter registers [5212+] + */ +#define AR5K_CPC0 0x0610 /* Compression performance counter 0 */ +#define AR5K_CPC1 0x0614 /* Compression performance counter 1*/ +#define AR5K_CPC2 0x0618 /* Compression performance counter 2 */ +#define AR5K_CPC3 0x061c /* Compression performance counter 3 */ +#define AR5K_CPCOVF 0x0620 /* Compression performance overflow */ + + +/* + * Queue control unit (QCU) registers [5211+] + * + * Card has 12 TX Queues but i see that only 0-9 are used (?) + * both in binary HAL (see ah.h) and ar5k. Each queue has it's own + * TXDP at addresses 0x0800 - 0x082c, a CBR (Constant Bit Rate) + * configuration register (0x08c0 - 0x08ec), a ready time configuration + * register (0x0900 - 0x092c), a misc configuration register (0x09c0 - + * 0x09ec) and a status register (0x0a00 - 0x0a2c). We also have some + * global registers, QCU transmit enable/disable and "one shot arm (?)" + * set/clear, which contain status for all queues (we shift by 1 for each + * queue). To access these registers easily we define some macros here + * that are used inside HAL. For more infos check out *_tx_queue functs. + */ + +/* + * Generic QCU Register access macros + */ +#define AR5K_QUEUE_REG(_r, _q) (((_q) << 2) + _r) +#define AR5K_QCU_GLOBAL_READ(_r, _q) (AR5K_REG_READ(_r) & (1 << _q)) +#define AR5K_QCU_GLOBAL_WRITE(_r, _q) AR5K_REG_WRITE(_r, (1 << _q)) + +/* + * QCU Transmit descriptor pointer registers + */ +#define AR5K_QCU_TXDP_BASE 0x0800 /* Register Address - Queue0 TXDP */ +#define AR5K_QUEUE_TXDP(_q) AR5K_QUEUE_REG(AR5K_QCU_TXDP_BASE, _q) + +/* + * QCU Transmit enable register + */ +#define AR5K_QCU_TXE 0x0840 +#define AR5K_ENABLE_QUEUE(_q) AR5K_QCU_GLOBAL_WRITE(AR5K_QCU_TXE, _q) +#define AR5K_QUEUE_ENABLED(_q) AR5K_QCU_GLOBAL_READ(AR5K_QCU_TXE, _q) + +/* + * QCU Transmit disable register + */ +#define AR5K_QCU_TXD 0x0880 +#define AR5K_DISABLE_QUEUE(_q) AR5K_QCU_GLOBAL_WRITE(AR5K_QCU_TXD, _q) +#define AR5K_QUEUE_DISABLED(_q) AR5K_QCU_GLOBAL_READ(AR5K_QCU_TXD, _q) + +/* + * QCU Constant Bit Rate configuration registers + */ +#define AR5K_QCU_CBRCFG_BASE 0x08c0 /* Register Address - Queue0 CBRCFG */ +#define AR5K_QCU_CBRCFG_INTVAL 0x00ffffff /* CBR Interval mask */ +#define AR5K_QCU_CBRCFG_INTVAL_S 0 +#define AR5K_QCU_CBRCFG_ORN_THRES 0xff000000 /* CBR overrun threshold mask */ +#define AR5K_QCU_CBRCFG_ORN_THRES_S 24 +#define AR5K_QUEUE_CBRCFG(_q) AR5K_QUEUE_REG(AR5K_QCU_CBRCFG_BASE, _q) + +/* + * QCU Ready time configuration registers + */ +#define AR5K_QCU_RDYTIMECFG_BASE 0x0900 /* Register Address - Queue0 RDYTIMECFG */ +#define AR5K_QCU_RDYTIMECFG_INTVAL 0x00ffffff /* Ready time interval mask */ +#define AR5K_QCU_RDYTIMECFG_INTVAL_S 0 +#define AR5K_QCU_RDYTIMECFG_ENABLE 0x01000000 /* Ready time enable mask */ +#define AR5K_QUEUE_RDYTIMECFG(_q) AR5K_QUEUE_REG(AR5K_QCU_RDYTIMECFG_BASE, _q) + +/* + * QCU one shot arm set registers + */ +#define AR5K_QCU_ONESHOTARM_SET 0x0940 /* Register Address -QCU "one shot arm set (?)" */ +#define AR5K_QCU_ONESHOTARM_SET_M 0x0000ffff + +/* + * QCU one shot arm clear registers + */ +#define AR5K_QCU_ONESHOTARM_CLEAR 0x0980 /* Register Address -QCU "one shot arm clear (?)" */ +#define AR5K_QCU_ONESHOTARM_CLEAR_M 0x0000ffff + +/* + * QCU misc registers + */ +#define AR5K_QCU_MISC_BASE 0x09c0 /* Register Address -Queue0 MISC */ +#define AR5K_QCU_MISC_FRSHED_M 0x0000000f /* Frame sheduling mask */ +#define AR5K_QCU_MISC_FRSHED_ASAP 0 /* ASAP */ +#define AR5K_QCU_MISC_FRSHED_CBR 1 /* Constant Bit Rate */ +#define AR5K_QCU_MISC_FRSHED_DBA_GT 2 /* DMA Beacon alert gated */ +#define AR5K_QCU_MISC_FRSHED_TIM_GT 3 /* TIMT gated */ +#define AR5K_QCU_MISC_FRSHED_BCN_SENT_GT 4 /* Beacon sent gated */ +#define AR5K_QCU_MISC_ONESHOT_ENABLE 0x00000010 /* Oneshot enable */ +#define AR5K_QCU_MISC_CBREXP_DIS 0x00000020 /* Disable CBR expired counter (normal queue) */ +#define AR5K_QCU_MISC_CBREXP_BCN_DIS 0x00000040 /* Disable CBR expired counter (beacon queue) */ +#define AR5K_QCU_MISC_BCN_ENABLE 0x00000080 /* Enable Beacon use */ +#define AR5K_QCU_MISC_CBR_THRES_ENABLE 0x00000100 /* CBR expired threshold enabled */ +#define AR5K_QCU_MISC_RDY_VEOL_POLICY 0x00000200 /* TXE reset when RDYTIME expired or VEOL */ +#define AR5K_QCU_MISC_CBR_RESET_CNT 0x00000400 /* CBR threshold (counter) reset */ +#define AR5K_QCU_MISC_DCU_EARLY 0x00000800 /* DCU early termination */ +#define AR5K_QCU_MISC_DCU_CMP_EN 0x00001000 /* Enable frame compression */ +#define AR5K_QUEUE_MISC(_q) AR5K_QUEUE_REG(AR5K_QCU_MISC_BASE, _q) + + +/* + * QCU status registers + */ +#define AR5K_QCU_STS_BASE 0x0a00 /* Register Address - Queue0 STS */ +#define AR5K_QCU_STS_FRMPENDCNT 0x00000003 /* Frames pending counter */ +#define AR5K_QCU_STS_CBREXPCNT 0x0000ff00 /* CBR expired counter */ +#define AR5K_QUEUE_STATUS(_q) AR5K_QUEUE_REG(AR5K_QCU_STS_BASE, _q) + +/* + * QCU ready time shutdown register + */ +#define AR5K_QCU_RDYTIMESHDN 0x0a40 +#define AR5K_QCU_RDYTIMESHDN_M 0x000003ff + +/* + * QCU compression buffer base registers [5212+] + */ +#define AR5K_QCU_CBB_SELECT 0x0b00 +#define AR5K_QCU_CBB_ADDR 0x0b04 +#define AR5K_QCU_CBB_ADDR_S 9 + +/* + * QCU compression buffer configuration register [5212+] + * (buffer size) + */ +#define AR5K_QCU_CBCFG 0x0b08 + + + +/* + * Distributed Coordination Function (DCF) control unit (DCU) + * registers [5211+] + * + * These registers control the various characteristics of each queue + * for 802.11e (WME) combatibility so they go together with + * QCU registers in pairs. For each queue we have a QCU mask register, + * (0x1000 - 0x102c), a local-IFS settings register (0x1040 - 0x106c), + * a retry limit register (0x1080 - 0x10ac), a channel time register + * (0x10c0 - 0x10ec), a misc-settings register (0x1100 - 0x112c) and + * a sequence number register (0x1140 - 0x116c). It seems that "global" + * registers here afect all queues (see use of DCU_GBL_IFS_SLOT in ar5k). + * We use the same macros here for easier register access. + * + */ + +/* + * DCU QCU mask registers + */ +#define AR5K_DCU_QCUMASK_BASE 0x1000 /* Register Address -Queue0 DCU_QCUMASK */ +#define AR5K_DCU_QCUMASK_M 0x000003ff +#define AR5K_QUEUE_QCUMASK(_q) AR5K_QUEUE_REG(AR5K_DCU_QCUMASK_BASE, _q) + +/* + * DCU local Inter Frame Space settings register + */ +#define AR5K_DCU_LCL_IFS_BASE 0x1040 /* Register Address -Queue0 DCU_LCL_IFS */ +#define AR5K_DCU_LCL_IFS_CW_MIN 0x000003ff /* Minimum Contention Window */ +#define AR5K_DCU_LCL_IFS_CW_MIN_S 0 +#define AR5K_DCU_LCL_IFS_CW_MAX 0x000ffc00 /* Maximum Contention Window */ +#define AR5K_DCU_LCL_IFS_CW_MAX_S 10 +#define AR5K_DCU_LCL_IFS_AIFS 0x0ff00000 /* Arbitrated Interframe Space */ +#define AR5K_DCU_LCL_IFS_AIFS_S 20 +#define AR5K_DCU_LCL_IFS_AIFS_MAX 0xfc /* Anything above that can cause DCU to hang */ +#define AR5K_QUEUE_DFS_LOCAL_IFS(_q) AR5K_QUEUE_REG(AR5K_DCU_LCL_IFS_BASE, _q) + +/* + * DCU retry limit registers + */ +#define AR5K_DCU_RETRY_LMT_BASE 0x1080 /* Register Address -Queue0 DCU_RETRY_LMT */ +#define AR5K_DCU_RETRY_LMT_SH_RETRY 0x0000000f /* Short retry limit mask */ +#define AR5K_DCU_RETRY_LMT_SH_RETRY_S 0 +#define AR5K_DCU_RETRY_LMT_LG_RETRY 0x000000f0 /* Long retry limit mask */ +#define AR5K_DCU_RETRY_LMT_LG_RETRY_S 4 +#define AR5K_DCU_RETRY_LMT_SSH_RETRY 0x00003f00 /* Station short retry limit mask (?) */ +#define AR5K_DCU_RETRY_LMT_SSH_RETRY_S 8 +#define AR5K_DCU_RETRY_LMT_SLG_RETRY 0x000fc000 /* Station long retry limit mask (?) */ +#define AR5K_DCU_RETRY_LMT_SLG_RETRY_S 14 +#define AR5K_QUEUE_DFS_RETRY_LIMIT(_q) AR5K_QUEUE_REG(AR5K_DCU_RETRY_LMT_BASE, _q) + +/* + * DCU channel time registers + */ +#define AR5K_DCU_CHAN_TIME_BASE 0x10c0 /* Register Address -Queue0 DCU_CHAN_TIME */ +#define AR5K_DCU_CHAN_TIME_DUR 0x000fffff /* Channel time duration */ +#define AR5K_DCU_CHAN_TIME_DUR_S 0 +#define AR5K_DCU_CHAN_TIME_ENABLE 0x00100000 /* Enable channel time */ +#define AR5K_QUEUE_DFS_CHANNEL_TIME(_q) AR5K_QUEUE_REG(AR5K_DCU_CHAN_TIME_BASE, _q) + +/* + * DCU misc registers [5211+] + * + * Note: Arbiter lockout control controls the + * behaviour on low priority queues when we have multiple queues + * with pending frames. Intra-frame lockout means we wait until + * the queue's current frame transmits (with post frame backoff and bursting) + * before we transmit anything else and global lockout means we + * wait for the whole queue to finish before higher priority queues + * can transmit (this is used on beacon and CAB queues). + * No lockout means there is no special handling. + */ +#define AR5K_DCU_MISC_BASE 0x1100 /* Register Address -Queue0 DCU_MISC */ +#define AR5K_DCU_MISC_BACKOFF 0x0000003f /* Mask for backoff threshold */ +#define AR5K_DCU_MISC_ETS_RTS_POL 0x00000040 /* End of transmission series + station RTS/data failure count + reset policy (?) */ +#define AR5K_DCU_MISC_ETS_CW_POL 0x00000080 /* End of transmission series + CW reset policy */ +#define AR5K_DCU_MISC_FRAG_WAIT 0x00000100 /* Wait for next fragment */ +#define AR5K_DCU_MISC_BACKOFF_FRAG 0x00000200 /* Enable backoff while bursting */ +#define AR5K_DCU_MISC_HCFPOLL_ENABLE 0x00000800 /* CF - Poll enable */ +#define AR5K_DCU_MISC_BACKOFF_PERSIST 0x00001000 /* Persistent backoff */ +#define AR5K_DCU_MISC_FRMPRFTCH_ENABLE 0x00002000 /* Enable frame pre-fetch */ +#define AR5K_DCU_MISC_VIRTCOL 0x0000c000 /* Mask for Virtual Collision (?) */ +#define AR5K_DCU_MISC_VIRTCOL_NORMAL 0 +#define AR5K_DCU_MISC_VIRTCOL_IGNORE 1 +#define AR5K_DCU_MISC_BCN_ENABLE 0x00010000 /* Enable Beacon use */ +#define AR5K_DCU_MISC_ARBLOCK_CTL 0x00060000 /* Arbiter lockout control mask */ +#define AR5K_DCU_MISC_ARBLOCK_CTL_S 17 +#define AR5K_DCU_MISC_ARBLOCK_CTL_NONE 0 /* No arbiter lockout */ +#define AR5K_DCU_MISC_ARBLOCK_CTL_INTFRM 1 /* Intra-frame lockout */ +#define AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL 2 /* Global lockout */ +#define AR5K_DCU_MISC_ARBLOCK_IGNORE 0x00080000 /* Ignore Arbiter lockout */ +#define AR5K_DCU_MISC_SEQ_NUM_INCR_DIS 0x00100000 /* Disable sequence number increment */ +#define AR5K_DCU_MISC_POST_FR_BKOFF_DIS 0x00200000 /* Disable post-frame backoff */ +#define AR5K_DCU_MISC_VIRT_COLL_POLICY 0x00400000 /* Virtual Collision cw policy */ +#define AR5K_DCU_MISC_BLOWN_IFS_POLICY 0x00800000 /* Blown IFS policy (?) */ +#define AR5K_DCU_MISC_SEQNUM_CTL 0x01000000 /* Sequence number control (?) */ +#define AR5K_QUEUE_DFS_MISC(_q) AR5K_QUEUE_REG(AR5K_DCU_MISC_BASE, _q) + +/* + * DCU frame sequence number registers + */ +#define AR5K_DCU_SEQNUM_BASE 0x1140 +#define AR5K_DCU_SEQNUM_M 0x00000fff +#define AR5K_QUEUE_DCU_SEQNUM(_q) AR5K_QUEUE_REG(AR5K_DCU_SEQNUM_BASE, _q) + +/* + * DCU global IFS SIFS register + */ +#define AR5K_DCU_GBL_IFS_SIFS 0x1030 +#define AR5K_DCU_GBL_IFS_SIFS_M 0x0000ffff + +/* + * DCU global IFS slot interval register + */ +#define AR5K_DCU_GBL_IFS_SLOT 0x1070 +#define AR5K_DCU_GBL_IFS_SLOT_M 0x0000ffff + +/* + * DCU global IFS EIFS register + */ +#define AR5K_DCU_GBL_IFS_EIFS 0x10b0 +#define AR5K_DCU_GBL_IFS_EIFS_M 0x0000ffff + +/* + * DCU global IFS misc register + * + * LFSR stands for Linear Feedback Shift Register + * and it's used for generating pseudo-random + * number sequences. + * + * (If i understand corectly, random numbers are + * used for idle sensing -multiplied with cwmin/max etc-) + */ +#define AR5K_DCU_GBL_IFS_MISC 0x10f0 /* Register Address */ +#define AR5K_DCU_GBL_IFS_MISC_LFSR_SLICE 0x00000007 /* LFSR Slice Select */ +#define AR5K_DCU_GBL_IFS_MISC_TURBO_MODE 0x00000008 /* Turbo mode */ +#define AR5K_DCU_GBL_IFS_MISC_SIFS_DUR_USEC 0x000003f0 /* SIFS Duration mask */ +#define AR5K_DCU_GBL_IFS_MISC_USEC_DUR 0x000ffc00 /* USEC Duration mask */ +#define AR5K_DCU_GBL_IFS_MISC_USEC_DUR_S 10 +#define AR5K_DCU_GBL_IFS_MISC_DCU_ARB_DELAY 0x00300000 /* DCU Arbiter delay mask */ +#define AR5K_DCU_GBL_IFS_MISC_SIFS_CNT_RST 0x00400000 /* SIFS cnt reset policy (?) */ +#define AR5K_DCU_GBL_IFS_MISC_AIFS_CNT_RST 0x00800000 /* AIFS cnt reset policy (?) */ +#define AR5K_DCU_GBL_IFS_MISC_RND_LFSR_SL_DIS 0x01000000 /* Disable random LFSR slice */ + +/* + * DCU frame prefetch control register + */ +#define AR5K_DCU_FP 0x1230 /* Register Address */ +#define AR5K_DCU_FP_NOBURST_DCU_EN 0x00000001 /* Enable non-burst prefetch on DCU (?) */ +#define AR5K_DCU_FP_NOBURST_EN 0x00000010 /* Enable non-burst prefetch (?) */ +#define AR5K_DCU_FP_BURST_DCU_EN 0x00000020 /* Enable burst prefetch on DCU (?) */ + +/* + * DCU transmit pause control/status register + */ +#define AR5K_DCU_TXP 0x1270 /* Register Address */ +#define AR5K_DCU_TXP_M 0x000003ff /* Tx pause mask */ +#define AR5K_DCU_TXP_STATUS 0x00010000 /* Tx pause status */ + +/* + * DCU transmit filter table 0 (32 entries) + * each entry contains a 32bit slice of the + * 128bit tx filter for each DCU (4 slices per DCU) + */ +#define AR5K_DCU_TX_FILTER_0_BASE 0x1038 +#define AR5K_DCU_TX_FILTER_0(_n) (AR5K_DCU_TX_FILTER_0_BASE + (_n * 64)) + +/* + * DCU transmit filter table 1 (16 entries) + */ +#define AR5K_DCU_TX_FILTER_1_BASE 0x103c +#define AR5K_DCU_TX_FILTER_1(_n) (AR5K_DCU_TX_FILTER_1_BASE + (_n * 64)) + +/* + * DCU clear transmit filter register + */ +#define AR5K_DCU_TX_FILTER_CLR 0x143c + +/* + * DCU set transmit filter register + */ +#define AR5K_DCU_TX_FILTER_SET 0x147c + +/* + * Reset control register + */ +#define AR5K_RESET_CTL 0x4000 /* Register Address */ +#define AR5K_RESET_CTL_PCU 0x00000001 /* Protocol Control Unit reset */ +#define AR5K_RESET_CTL_DMA 0x00000002 /* DMA (Rx/Tx) reset [5210] */ +#define AR5K_RESET_CTL_BASEBAND 0x00000002 /* Baseband reset [5211+] */ +#define AR5K_RESET_CTL_MAC 0x00000004 /* MAC reset (PCU+Baseband ?) [5210] */ +#define AR5K_RESET_CTL_PHY 0x00000008 /* PHY reset [5210] */ +#define AR5K_RESET_CTL_PCI 0x00000010 /* PCI Core reset (interrupts etc) */ + +/* + * Sleep control register + */ +#define AR5K_SLEEP_CTL 0x4004 /* Register Address */ +#define AR5K_SLEEP_CTL_SLDUR 0x0000ffff /* Sleep duration mask */ +#define AR5K_SLEEP_CTL_SLDUR_S 0 +#define AR5K_SLEEP_CTL_SLE 0x00030000 /* Sleep enable mask */ +#define AR5K_SLEEP_CTL_SLE_S 16 +#define AR5K_SLEEP_CTL_SLE_WAKE 0x00000000 /* Force chip awake */ +#define AR5K_SLEEP_CTL_SLE_SLP 0x00010000 /* Force chip sleep */ +#define AR5K_SLEEP_CTL_SLE_ALLOW 0x00020000 /* Normal sleep policy */ +#define AR5K_SLEEP_CTL_SLE_UNITS 0x00000008 /* [5211+] */ +#define AR5K_SLEEP_CTL_DUR_TIM_POL 0x00040000 /* Sleep duration timing policy */ +#define AR5K_SLEEP_CTL_DUR_WRITE_POL 0x00080000 /* Sleep duration write policy */ +#define AR5K_SLEEP_CTL_SLE_POL 0x00100000 /* Sleep policy mode */ + +/* + * Interrupt pending register + */ +#define AR5K_INTPEND 0x4008 +#define AR5K_INTPEND_M 0x00000001 + +/* + * Sleep force register + */ +#define AR5K_SFR 0x400c +#define AR5K_SFR_EN 0x00000001 + +/* + * PCI configuration register + * TODO: Fix LED stuff + */ +#define AR5K_PCICFG 0x4010 /* Register Address */ +#define AR5K_PCICFG_EEAE 0x00000001 /* Eeprom access enable [5210] */ +#define AR5K_PCICFG_SLEEP_CLOCK_EN 0x00000002 /* Enable sleep clock */ +#define AR5K_PCICFG_CLKRUNEN 0x00000004 /* CLKRUN enable [5211+] */ +#define AR5K_PCICFG_EESIZE 0x00000018 /* Mask for EEPROM size [5211+] */ +#define AR5K_PCICFG_EESIZE_S 3 +#define AR5K_PCICFG_EESIZE_4K 0 /* 4K */ +#define AR5K_PCICFG_EESIZE_8K 1 /* 8K */ +#define AR5K_PCICFG_EESIZE_16K 2 /* 16K */ +#define AR5K_PCICFG_EESIZE_FAIL 3 /* Failed to get size [5211+] */ +#define AR5K_PCICFG_LED 0x00000060 /* Led status [5211+] */ +#define AR5K_PCICFG_LED_NONE 0x00000000 /* Default [5211+] */ +#define AR5K_PCICFG_LED_PEND 0x00000020 /* Scan / Auth pending */ +#define AR5K_PCICFG_LED_ASSOC 0x00000040 /* Associated */ +#define AR5K_PCICFG_BUS_SEL 0x00000380 /* Mask for "bus select" [5211+] (?) */ +#define AR5K_PCICFG_CBEFIX_DIS 0x00000400 /* Disable CBE fix */ +#define AR5K_PCICFG_SL_INTEN 0x00000800 /* Enable interrupts when asleep */ +#define AR5K_PCICFG_LED_BCTL 0x00001000 /* Led blink (?) [5210] */ +#define AR5K_PCICFG_RETRY_FIX 0x00001000 /* Enable pci core retry fix */ +#define AR5K_PCICFG_SL_INPEN 0x00002000 /* Sleep even whith pending interrupts*/ +#define AR5K_PCICFG_SPWR_DN 0x00010000 /* Mask for power status */ +#define AR5K_PCICFG_LEDMODE 0x000e0000 /* Ledmode [5211+] */ +#define AR5K_PCICFG_LEDMODE_PROP 0x00000000 /* Blink on standard traffic [5211+] */ +#define AR5K_PCICFG_LEDMODE_PROM 0x00020000 /* Default mode (blink on any traffic) [5211+] */ +#define AR5K_PCICFG_LEDMODE_PWR 0x00040000 /* Some other blinking mode (?) [5211+] */ +#define AR5K_PCICFG_LEDMODE_RAND 0x00060000 /* Random blinking (?) [5211+] */ +#define AR5K_PCICFG_LEDBLINK 0x00700000 /* Led blink rate */ +#define AR5K_PCICFG_LEDBLINK_S 20 +#define AR5K_PCICFG_LEDSLOW 0x00800000 /* Slowest led blink rate [5211+] */ +#define AR5K_PCICFG_LEDSTATE \ + (AR5K_PCICFG_LED | AR5K_PCICFG_LEDMODE | \ + AR5K_PCICFG_LEDBLINK | AR5K_PCICFG_LEDSLOW) +#define AR5K_PCICFG_SLEEP_CLOCK_RATE 0x03000000 /* Sleep clock rate */ +#define AR5K_PCICFG_SLEEP_CLOCK_RATE_S 24 + +/* + * "General Purpose Input/Output" (GPIO) control register + * + * I'm not sure about this but after looking at the code + * for all chipsets here is what i got. + * + * We have 6 GPIOs (pins), each GPIO has 4 modes (2 bits) + * Mode 0 -> always input + * Mode 1 -> output when GPIODO for this GPIO is set to 0 + * Mode 2 -> output when GPIODO for this GPIO is set to 1 + * Mode 3 -> always output + * + * For more infos check out get_gpio/set_gpio and + * set_gpio_input/set_gpio_output functs. + * For more infos on gpio interrupt check out set_gpio_intr. + */ +#define AR5K_NUM_GPIO 6 + +#define AR5K_GPIOCR 0x4014 /* Register Address */ +#define AR5K_GPIOCR_INT_ENA 0x00008000 /* Enable GPIO interrupt */ +#define AR5K_GPIOCR_INT_SELL 0x00000000 /* Generate interrupt when pin is low */ +#define AR5K_GPIOCR_INT_SELH 0x00010000 /* Generate interrupt when pin is high */ +#define AR5K_GPIOCR_IN(n) (0 << ((n) * 2)) /* Mode 0 for pin n */ +#define AR5K_GPIOCR_OUT0(n) (1 << ((n) * 2)) /* Mode 1 for pin n */ +#define AR5K_GPIOCR_OUT1(n) (2 << ((n) * 2)) /* Mode 2 for pin n */ +#define AR5K_GPIOCR_OUT(n) (3 << ((n) * 2)) /* Mode 3 for pin n */ +#define AR5K_GPIOCR_INT_SEL(n) ((n) << 12) /* Interrupt for GPIO pin n */ + +/* + * "General Purpose Input/Output" (GPIO) data output register + */ +#define AR5K_GPIODO 0x4018 + +/* + * "General Purpose Input/Output" (GPIO) data input register + */ +#define AR5K_GPIODI 0x401c +#define AR5K_GPIODI_M 0x0000002f + +/* + * Silicon revision register + */ +#define AR5K_SREV 0x4020 /* Register Address */ +#define AR5K_SREV_REV 0x0000000f /* Mask for revision */ +#define AR5K_SREV_REV_S 0 +#define AR5K_SREV_VER 0x000000ff /* Mask for version */ +#define AR5K_SREV_VER_S 4 + +/* + * TXE write posting register + */ +#define AR5K_TXEPOST 0x4028 + +/* + * QCU sleep mask + */ +#define AR5K_QCU_SLEEP_MASK 0x402c + +/* 0x4068 is compression buffer configuration + * register on 5414 and pm configuration register + * on 5424 and newer pci-e chips. */ + +/* + * Compression buffer configuration + * register (enable/disable) [5414] + */ +#define AR5K_5414_CBCFG 0x4068 +#define AR5K_5414_CBCFG_BUF_DIS 0x10 /* Disable buffer */ + +/* + * PCI-E Power management configuration + * and status register [5424+] + */ +#define AR5K_PCIE_PM_CTL 0x4068 /* Register address */ +/* Only 5424 */ +#define AR5K_PCIE_PM_CTL_L1_WHEN_D2 0x00000001 /* enable PCIe core enter L1 + when d2_sleep_en is asserted */ +#define AR5K_PCIE_PM_CTL_L0_L0S_CLEAR 0x00000002 /* Clear L0 and L0S counters */ +#define AR5K_PCIE_PM_CTL_L0_L0S_EN 0x00000004 /* Start L0 nd L0S counters */ +#define AR5K_PCIE_PM_CTL_LDRESET_EN 0x00000008 /* Enable reset when link goes + down */ +/* Wake On Wireless */ +#define AR5K_PCIE_PM_CTL_PME_EN 0x00000010 /* PME Enable */ +#define AR5K_PCIE_PM_CTL_AUX_PWR_DET 0x00000020 /* Aux power detect */ +#define AR5K_PCIE_PM_CTL_PME_CLEAR 0x00000040 /* Clear PME */ +#define AR5K_PCIE_PM_CTL_PSM_D0 0x00000080 +#define AR5K_PCIE_PM_CTL_PSM_D1 0x00000100 +#define AR5K_PCIE_PM_CTL_PSM_D2 0x00000200 +#define AR5K_PCIE_PM_CTL_PSM_D3 0x00000400 + +/* + * PCI-E Workaround enable register + */ +#define AR5K_PCIE_WAEN 0x407c + +/* + * PCI-E Serializer/Desirializer + * registers + */ +#define AR5K_PCIE_SERDES 0x4080 +#define AR5K_PCIE_SERDES_RESET 0x4084 + +/*====EEPROM REGISTERS====*/ + +/* + * EEPROM access registers + * + * Here we got a difference between 5210/5211-12 + * read data register for 5210 is at 0x6800 and + * status register is at 0x6c00. There is also + * no eeprom command register on 5210 and the + * offsets are different. + * + * To read eeprom data for a specific offset: + * 5210 - enable eeprom access (AR5K_PCICFG_EEAE) + * read AR5K_EEPROM_BASE +(4 * offset) + * check the eeprom status register + * and read eeprom data register. + * + * 5211 - write offset to AR5K_EEPROM_BASE + * 5212 write AR5K_EEPROM_CMD_READ on AR5K_EEPROM_CMD + * check the eeprom status register + * and read eeprom data register. + * + * To write eeprom data for a specific offset: + * 5210 - enable eeprom access (AR5K_PCICFG_EEAE) + * write data to AR5K_EEPROM_BASE +(4 * offset) + * check the eeprom status register + * 5211 - write AR5K_EEPROM_CMD_RESET on AR5K_EEPROM_CMD + * 5212 write offset to AR5K_EEPROM_BASE + * write data to data register + * write AR5K_EEPROM_CMD_WRITE on AR5K_EEPROM_CMD + * check the eeprom status register + * + * For more infos check eeprom_* functs and the ar5k.c + * file posted in madwifi-devel mailing list. + * http://sourceforge.net/mailarchive/message.php?msg_id=8966525 + * + */ +#define AR5K_EEPROM_BASE 0x6000 + +/* + * EEPROM data register + */ +#define AR5K_EEPROM_DATA_5211 0x6004 +#define AR5K_EEPROM_DATA_5210 0x6800 +#define AR5K_EEPROM_DATA (ah->ah_version == AR5K_AR5210 ? \ + AR5K_EEPROM_DATA_5210 : AR5K_EEPROM_DATA_5211) + +/* + * EEPROM command register + */ +#define AR5K_EEPROM_CMD 0x6008 /* Register Addres */ +#define AR5K_EEPROM_CMD_READ 0x00000001 /* EEPROM read */ +#define AR5K_EEPROM_CMD_WRITE 0x00000002 /* EEPROM write */ +#define AR5K_EEPROM_CMD_RESET 0x00000004 /* EEPROM reset */ + +/* + * EEPROM status register + */ +#define AR5K_EEPROM_STAT_5210 0x6c00 /* Register Address [5210] */ +#define AR5K_EEPROM_STAT_5211 0x600c /* Register Address [5211+] */ +#define AR5K_EEPROM_STATUS (ah->ah_version == AR5K_AR5210 ? \ + AR5K_EEPROM_STAT_5210 : AR5K_EEPROM_STAT_5211) +#define AR5K_EEPROM_STAT_RDERR 0x00000001 /* EEPROM read failed */ +#define AR5K_EEPROM_STAT_RDDONE 0x00000002 /* EEPROM read successful */ +#define AR5K_EEPROM_STAT_WRERR 0x00000004 /* EEPROM write failed */ +#define AR5K_EEPROM_STAT_WRDONE 0x00000008 /* EEPROM write successful */ + +/* + * EEPROM config register + */ +#define AR5K_EEPROM_CFG 0x6010 /* Register Addres */ +#define AR5K_EEPROM_CFG_SIZE 0x00000003 /* Size determination override */ +#define AR5K_EEPROM_CFG_SIZE_AUTO 0 +#define AR5K_EEPROM_CFG_SIZE_4KBIT 1 +#define AR5K_EEPROM_CFG_SIZE_8KBIT 2 +#define AR5K_EEPROM_CFG_SIZE_16KBIT 3 +#define AR5K_EEPROM_CFG_WR_WAIT_DIS 0x00000004 /* Disable write wait */ +#define AR5K_EEPROM_CFG_CLK_RATE 0x00000018 /* Clock rate */ +#define AR5K_EEPROM_CFG_CLK_RATE_S 3 +#define AR5K_EEPROM_CFG_CLK_RATE_156KHZ 0 +#define AR5K_EEPROM_CFG_CLK_RATE_312KHZ 1 +#define AR5K_EEPROM_CFG_CLK_RATE_625KHZ 2 +#define AR5K_EEPROM_CFG_PROT_KEY 0x00ffff00 /* Protection key */ +#define AR5K_EEPROM_CFG_PROT_KEY_S 8 +#define AR5K_EEPROM_CFG_LIND_EN 0x01000000 /* Enable length indicator (?) */ + + +/* + * TODO: Wake On Wireless registers + * Range 0x7000 - 0x7ce0 + */ + +/* + * Protocol Control Unit (PCU) registers + */ +/* + * Used for checking initial register writes + * during channel reset (see reset func) + */ +#define AR5K_PCU_MIN 0x8000 +#define AR5K_PCU_MAX 0x8fff + +/* + * First station id register (Lower 32 bits of MAC address) + */ +#define AR5K_STA_ID0 0x8000 +#define AR5K_STA_ID0_ARRD_L32 0xffffffff + +/* + * Second station id register (Upper 16 bits of MAC address + PCU settings) + */ +#define AR5K_STA_ID1 0x8004 /* Register Address */ +#define AR5K_STA_ID1_ADDR_U16 0x0000ffff /* Upper 16 bits of MAC addres */ +#define AR5K_STA_ID1_AP 0x00010000 /* Set AP mode */ +#define AR5K_STA_ID1_ADHOC 0x00020000 /* Set Ad-Hoc mode */ +#define AR5K_STA_ID1_PWR_SV 0x00040000 /* Power save reporting */ +#define AR5K_STA_ID1_NO_KEYSRCH 0x00080000 /* No key search */ +#define AR5K_STA_ID1_NO_PSPOLL 0x00100000 /* No power save polling [5210] */ +#define AR5K_STA_ID1_PCF_5211 0x00100000 /* Enable PCF on [5211+] */ +#define AR5K_STA_ID1_PCF_5210 0x00200000 /* Enable PCF on [5210]*/ +#define AR5K_STA_ID1_PCF (ah->ah_version == AR5K_AR5210 ? \ + AR5K_STA_ID1_PCF_5210 : AR5K_STA_ID1_PCF_5211) +#define AR5K_STA_ID1_DEFAULT_ANTENNA 0x00200000 /* Use default antenna */ +#define AR5K_STA_ID1_DESC_ANTENNA 0x00400000 /* Update antenna from descriptor */ +#define AR5K_STA_ID1_RTS_DEF_ANTENNA 0x00800000 /* Use default antenna for RTS */ +#define AR5K_STA_ID1_ACKCTS_6MB 0x01000000 /* Use 6Mbit/s for ACK/CTS */ +#define AR5K_STA_ID1_BASE_RATE_11B 0x02000000 /* Use 11b base rate for ACK/CTS [5211+] */ +#define AR5K_STA_ID1_SELFGEN_DEF_ANT 0x04000000 /* Use def. antenna for self generated frames */ +#define AR5K_STA_ID1_CRYPT_MIC_EN 0x08000000 /* Enable MIC */ +#define AR5K_STA_ID1_KEYSRCH_MODE 0x10000000 /* Look up key when key id != 0 */ +#define AR5K_STA_ID1_PRESERVE_SEQ_NUM 0x20000000 /* Preserve sequence number */ +#define AR5K_STA_ID1_CBCIV_ENDIAN 0x40000000 /* ??? */ +#define AR5K_STA_ID1_KEYSRCH_MCAST 0x80000000 /* Do key cache search for mcast frames */ + +#define AR5K_STA_ID1_ANTENNA_SETTINGS (AR5K_STA_ID1_DEFAULT_ANTENNA | \ + AR5K_STA_ID1_DESC_ANTENNA | \ + AR5K_STA_ID1_RTS_DEF_ANTENNA | \ + AR5K_STA_ID1_SELFGEN_DEF_ANT) + +/* + * First BSSID register (MAC address, lower 32bits) + */ +#define AR5K_BSS_ID0 0x8008 + +/* + * Second BSSID register (MAC address in upper 16 bits) + * + * AID: Association ID + */ +#define AR5K_BSS_ID1 0x800c +#define AR5K_BSS_ID1_AID 0xffff0000 +#define AR5K_BSS_ID1_AID_S 16 + +/* + * Backoff slot time register + */ +#define AR5K_SLOT_TIME 0x8010 + +/* + * ACK/CTS timeout register + */ +#define AR5K_TIME_OUT 0x8014 /* Register Address */ +#define AR5K_TIME_OUT_ACK 0x00001fff /* ACK timeout mask */ +#define AR5K_TIME_OUT_ACK_S 0 +#define AR5K_TIME_OUT_CTS 0x1fff0000 /* CTS timeout mask */ +#define AR5K_TIME_OUT_CTS_S 16 + +/* + * RSSI threshold register + */ +#define AR5K_RSSI_THR 0x8018 /* Register Address */ +#define AR5K_RSSI_THR_M 0x000000ff /* Mask for RSSI threshold [5211+] */ +#define AR5K_RSSI_THR_BMISS_5210 0x00000700 /* Mask for Beacon Missed threshold [5210] */ +#define AR5K_RSSI_THR_BMISS_5210_S 8 +#define AR5K_RSSI_THR_BMISS_5211 0x0000ff00 /* Mask for Beacon Missed threshold [5211+] */ +#define AR5K_RSSI_THR_BMISS_5211_S 8 +#define AR5K_RSSI_THR_BMISS (ah->ah_version == AR5K_AR5210 ? \ + AR5K_RSSI_THR_BMISS_5210 : AR5K_RSSI_THR_BMISS_5211) +#define AR5K_RSSI_THR_BMISS_S 8 + +/* + * 5210 has more PCU registers because there is no QCU/DCU + * so queue parameters are set here, this way a lot common + * registers have different address for 5210. To make things + * easier we define a macro based on ah->ah_version for common + * registers with different addresses and common flags. + */ + +/* + * Retry limit register + * + * Retry limit register for 5210 (no QCU/DCU so it's done in PCU) + */ +#define AR5K_NODCU_RETRY_LMT 0x801c /* Register Address */ +#define AR5K_NODCU_RETRY_LMT_SH_RETRY 0x0000000f /* Short retry limit mask */ +#define AR5K_NODCU_RETRY_LMT_SH_RETRY_S 0 +#define AR5K_NODCU_RETRY_LMT_LG_RETRY 0x000000f0 /* Long retry mask */ +#define AR5K_NODCU_RETRY_LMT_LG_RETRY_S 4 +#define AR5K_NODCU_RETRY_LMT_SSH_RETRY 0x00003f00 /* Station short retry limit mask */ +#define AR5K_NODCU_RETRY_LMT_SSH_RETRY_S 8 +#define AR5K_NODCU_RETRY_LMT_SLG_RETRY 0x000fc000 /* Station long retry limit mask */ +#define AR5K_NODCU_RETRY_LMT_SLG_RETRY_S 14 +#define AR5K_NODCU_RETRY_LMT_CW_MIN 0x3ff00000 /* Minimum contention window mask */ +#define AR5K_NODCU_RETRY_LMT_CW_MIN_S 20 + +/* + * Transmit latency register + */ +#define AR5K_USEC_5210 0x8020 /* Register Address [5210] */ +#define AR5K_USEC_5211 0x801c /* Register Address [5211+] */ +#define AR5K_USEC (ah->ah_version == AR5K_AR5210 ? \ + AR5K_USEC_5210 : AR5K_USEC_5211) +#define AR5K_USEC_1 0x0000007f /* clock cycles for 1us */ +#define AR5K_USEC_1_S 0 +#define AR5K_USEC_32 0x00003f80 /* clock cycles for 1us while on 32Mhz clock */ +#define AR5K_USEC_32_S 7 +#define AR5K_USEC_TX_LATENCY_5211 0x007fc000 +#define AR5K_USEC_TX_LATENCY_5211_S 14 +#define AR5K_USEC_RX_LATENCY_5211 0x1f800000 +#define AR5K_USEC_RX_LATENCY_5211_S 23 +#define AR5K_USEC_TX_LATENCY_5210 0x000fc000 /* also for 5311 */ +#define AR5K_USEC_TX_LATENCY_5210_S 14 +#define AR5K_USEC_RX_LATENCY_5210 0x03f00000 /* also for 5311 */ +#define AR5K_USEC_RX_LATENCY_5210_S 20 + +/* + * PCU beacon control register + */ +#define AR5K_BEACON_5210 0x8024 /*Register Address [5210] */ +#define AR5K_BEACON_5211 0x8020 /*Register Address [5211+] */ +#define AR5K_BEACON (ah->ah_version == AR5K_AR5210 ? \ + AR5K_BEACON_5210 : AR5K_BEACON_5211) +#define AR5K_BEACON_PERIOD 0x0000ffff /* Mask for beacon period */ +#define AR5K_BEACON_PERIOD_S 0 +#define AR5K_BEACON_TIM 0x007f0000 /* Mask for TIM offset */ +#define AR5K_BEACON_TIM_S 16 +#define AR5K_BEACON_ENABLE 0x00800000 /* Enable beacons */ +#define AR5K_BEACON_RESET_TSF 0x01000000 /* Force TSF reset */ + +/* + * CFP period register + */ +#define AR5K_CFP_PERIOD_5210 0x8028 +#define AR5K_CFP_PERIOD_5211 0x8024 +#define AR5K_CFP_PERIOD (ah->ah_version == AR5K_AR5210 ? \ + AR5K_CFP_PERIOD_5210 : AR5K_CFP_PERIOD_5211) + +/* + * Next beacon time register + */ +#define AR5K_TIMER0_5210 0x802c +#define AR5K_TIMER0_5211 0x8028 +#define AR5K_TIMER0 (ah->ah_version == AR5K_AR5210 ? \ + AR5K_TIMER0_5210 : AR5K_TIMER0_5211) + +/* + * Next DMA beacon alert register + */ +#define AR5K_TIMER1_5210 0x8030 +#define AR5K_TIMER1_5211 0x802c +#define AR5K_TIMER1 (ah->ah_version == AR5K_AR5210 ? \ + AR5K_TIMER1_5210 : AR5K_TIMER1_5211) + +/* + * Next software beacon alert register + */ +#define AR5K_TIMER2_5210 0x8034 +#define AR5K_TIMER2_5211 0x8030 +#define AR5K_TIMER2 (ah->ah_version == AR5K_AR5210 ? \ + AR5K_TIMER2_5210 : AR5K_TIMER2_5211) + +/* + * Next ATIM window time register + */ +#define AR5K_TIMER3_5210 0x8038 +#define AR5K_TIMER3_5211 0x8034 +#define AR5K_TIMER3 (ah->ah_version == AR5K_AR5210 ? \ + AR5K_TIMER3_5210 : AR5K_TIMER3_5211) + + +/* + * 5210 First inter frame spacing register (IFS) + */ +#define AR5K_IFS0 0x8040 +#define AR5K_IFS0_SIFS 0x000007ff +#define AR5K_IFS0_SIFS_S 0 +#define AR5K_IFS0_DIFS 0x007ff800 +#define AR5K_IFS0_DIFS_S 11 + +/* + * 5210 Second inter frame spacing register (IFS) + */ +#define AR5K_IFS1 0x8044 +#define AR5K_IFS1_PIFS 0x00000fff +#define AR5K_IFS1_PIFS_S 0 +#define AR5K_IFS1_EIFS 0x03fff000 +#define AR5K_IFS1_EIFS_S 12 +#define AR5K_IFS1_CS_EN 0x04000000 + + +/* + * CFP duration register + */ +#define AR5K_CFP_DUR_5210 0x8048 +#define AR5K_CFP_DUR_5211 0x8038 +#define AR5K_CFP_DUR (ah->ah_version == AR5K_AR5210 ? \ + AR5K_CFP_DUR_5210 : AR5K_CFP_DUR_5211) + +/* + * Receive filter register + */ +#define AR5K_RX_FILTER_5210 0x804c /* Register Address [5210] */ +#define AR5K_RX_FILTER_5211 0x803c /* Register Address [5211+] */ +#define AR5K_RX_FILTER (ah->ah_version == AR5K_AR5210 ? \ + AR5K_RX_FILTER_5210 : AR5K_RX_FILTER_5211) +#define AR5K_RX_FILTER_UCAST 0x00000001 /* Don't filter unicast frames */ +#define AR5K_RX_FILTER_MCAST 0x00000002 /* Don't filter multicast frames */ +#define AR5K_RX_FILTER_BCAST 0x00000004 /* Don't filter broadcast frames */ +#define AR5K_RX_FILTER_CONTROL 0x00000008 /* Don't filter control frames */ +#define AR5K_RX_FILTER_BEACON 0x00000010 /* Don't filter beacon frames */ +#define AR5K_RX_FILTER_PROM 0x00000020 /* Set promiscuous mode */ +#define AR5K_RX_FILTER_XRPOLL 0x00000040 /* Don't filter XR poll frame [5212+] */ +#define AR5K_RX_FILTER_PROBEREQ 0x00000080 /* Don't filter probe requests [5212+] */ +#define AR5K_RX_FILTER_PHYERR_5212 0x00000100 /* Don't filter phy errors [5212+] */ +#define AR5K_RX_FILTER_RADARERR_5212 0x00000200 /* Don't filter phy radar errors [5212+] */ +#define AR5K_RX_FILTER_PHYERR_5211 0x00000040 /* [5211] */ +#define AR5K_RX_FILTER_RADARERR_5211 0x00000080 /* [5211] */ +#define AR5K_RX_FILTER_PHYERR \ + ((ah->ah_version == AR5K_AR5211 ? \ + AR5K_RX_FILTER_PHYERR_5211 : AR5K_RX_FILTER_PHYERR_5212)) +#define AR5K_RX_FILTER_RADARERR \ + ((ah->ah_version == AR5K_AR5211 ? \ + AR5K_RX_FILTER_RADARERR_5211 : AR5K_RX_FILTER_RADARERR_5212)) + +/* + * Multicast filter register (lower 32 bits) + */ +#define AR5K_MCAST_FILTER0_5210 0x8050 +#define AR5K_MCAST_FILTER0_5211 0x8040 +#define AR5K_MCAST_FILTER0 (ah->ah_version == AR5K_AR5210 ? \ + AR5K_MCAST_FILTER0_5210 : AR5K_MCAST_FILTER0_5211) + +/* + * Multicast filter register (higher 16 bits) + */ +#define AR5K_MCAST_FILTER1_5210 0x8054 +#define AR5K_MCAST_FILTER1_5211 0x8044 +#define AR5K_MCAST_FILTER1 (ah->ah_version == AR5K_AR5210 ? \ + AR5K_MCAST_FILTER1_5210 : AR5K_MCAST_FILTER1_5211) + + +/* + * Transmit mask register (lower 32 bits) [5210] + */ +#define AR5K_TX_MASK0 0x8058 + +/* + * Transmit mask register (higher 16 bits) [5210] + */ +#define AR5K_TX_MASK1 0x805c + +/* + * Clear transmit mask [5210] + */ +#define AR5K_CLR_TMASK 0x8060 + +/* + * Trigger level register (before transmission) [5210] + */ +#define AR5K_TRIG_LVL 0x8064 + + +/* + * PCU control register + * + * Only DIS_RX is used in the code, the rest i guess are + * for tweaking/diagnostics. + */ +#define AR5K_DIAG_SW_5210 0x8068 /* Register Address [5210] */ +#define AR5K_DIAG_SW_5211 0x8048 /* Register Address [5211+] */ +#define AR5K_DIAG_SW (ah->ah_version == AR5K_AR5210 ? \ + AR5K_DIAG_SW_5210 : AR5K_DIAG_SW_5211) +#define AR5K_DIAG_SW_DIS_WEP_ACK 0x00000001 /* Disable ACKs if WEP key is invalid */ +#define AR5K_DIAG_SW_DIS_ACK 0x00000002 /* Disable ACKs */ +#define AR5K_DIAG_SW_DIS_CTS 0x00000004 /* Disable CTSs */ +#define AR5K_DIAG_SW_DIS_ENC 0x00000008 /* Disable encryption */ +#define AR5K_DIAG_SW_DIS_DEC 0x00000010 /* Disable decryption */ +#define AR5K_DIAG_SW_DIS_TX 0x00000020 /* Disable transmit [5210] */ +#define AR5K_DIAG_SW_DIS_RX_5210 0x00000040 /* Disable recieve */ +#define AR5K_DIAG_SW_DIS_RX_5211 0x00000020 +#define AR5K_DIAG_SW_DIS_RX (ah->ah_version == AR5K_AR5210 ? \ + AR5K_DIAG_SW_DIS_RX_5210 : AR5K_DIAG_SW_DIS_RX_5211) +#define AR5K_DIAG_SW_LOOP_BACK_5210 0x00000080 /* Loopback (i guess it goes with DIS_TX) [5210] */ +#define AR5K_DIAG_SW_LOOP_BACK_5211 0x00000040 +#define AR5K_DIAG_SW_LOOP_BACK (ah->ah_version == AR5K_AR5210 ? \ + AR5K_DIAG_SW_LOOP_BACK_5210 : AR5K_DIAG_SW_LOOP_BACK_5211) +#define AR5K_DIAG_SW_CORR_FCS_5210 0x00000100 /* Corrupted FCS */ +#define AR5K_DIAG_SW_CORR_FCS_5211 0x00000080 +#define AR5K_DIAG_SW_CORR_FCS (ah->ah_version == AR5K_AR5210 ? \ + AR5K_DIAG_SW_CORR_FCS_5210 : AR5K_DIAG_SW_CORR_FCS_5211) +#define AR5K_DIAG_SW_CHAN_INFO_5210 0x00000200 /* Dump channel info */ +#define AR5K_DIAG_SW_CHAN_INFO_5211 0x00000100 +#define AR5K_DIAG_SW_CHAN_INFO (ah->ah_version == AR5K_AR5210 ? \ + AR5K_DIAG_SW_CHAN_INFO_5210 : AR5K_DIAG_SW_CHAN_INFO_5211) +#define AR5K_DIAG_SW_EN_SCRAM_SEED_5210 0x00000400 /* Enable fixed scrambler seed */ +#define AR5K_DIAG_SW_EN_SCRAM_SEED_5211 0x00000200 +#define AR5K_DIAG_SW_EN_SCRAM_SEED (ah->ah_version == AR5K_AR5210 ? \ + AR5K_DIAG_SW_EN_SCRAM_SEED_5210 : AR5K_DIAG_SW_EN_SCRAM_SEED_5211) +#define AR5K_DIAG_SW_ECO_ENABLE 0x00000400 /* [5211+] */ +#define AR5K_DIAG_SW_SCVRAM_SEED 0x0003f800 /* [5210] */ +#define AR5K_DIAG_SW_SCRAM_SEED_M 0x0001fc00 /* Scrambler seed mask */ +#define AR5K_DIAG_SW_SCRAM_SEED_S 10 +#define AR5K_DIAG_SW_DIS_SEQ_INC 0x00040000 /* Disable seqnum increment (?)[5210] */ +#define AR5K_DIAG_SW_FRAME_NV0_5210 0x00080000 +#define AR5K_DIAG_SW_FRAME_NV0_5211 0x00020000 /* Accept frames of non-zero protocol number */ +#define AR5K_DIAG_SW_FRAME_NV0 (ah->ah_version == AR5K_AR5210 ? \ + AR5K_DIAG_SW_FRAME_NV0_5210 : AR5K_DIAG_SW_FRAME_NV0_5211) +#define AR5K_DIAG_SW_OBSPT_M 0x000c0000 /* Observation point select (?) */ +#define AR5K_DIAG_SW_OBSPT_S 18 +#define AR5K_DIAG_SW_RX_CLEAR_HIGH 0x0010000 /* Force RX Clear high */ +#define AR5K_DIAG_SW_IGNORE_CARR_SENSE 0x0020000 /* Ignore virtual carrier sense */ +#define AR5K_DIAG_SW_CHANEL_IDLE_HIGH 0x0040000 /* Force channel idle high */ +#define AR5K_DIAG_SW_PHEAR_ME 0x0080000 /* ??? */ + +/* + * TSF (clock) register (lower 32 bits) + */ +#define AR5K_TSF_L32_5210 0x806c +#define AR5K_TSF_L32_5211 0x804c +#define AR5K_TSF_L32 (ah->ah_version == AR5K_AR5210 ? \ + AR5K_TSF_L32_5210 : AR5K_TSF_L32_5211) + +/* + * TSF (clock) register (higher 32 bits) + */ +#define AR5K_TSF_U32_5210 0x8070 +#define AR5K_TSF_U32_5211 0x8050 +#define AR5K_TSF_U32 (ah->ah_version == AR5K_AR5210 ? \ + AR5K_TSF_U32_5210 : AR5K_TSF_U32_5211) + +/* + * Last beacon timestamp register (Read Only) + */ +#define AR5K_LAST_TSTP 0x8080 + +/* + * ADDAC test register [5211+] + */ +#define AR5K_ADDAC_TEST 0x8054 /* Register Address */ +#define AR5K_ADDAC_TEST_TXCONT 0x00000001 /* Test continuous tx */ +#define AR5K_ADDAC_TEST_TST_MODE 0x00000002 /* Test mode */ +#define AR5K_ADDAC_TEST_LOOP_EN 0x00000004 /* Enable loop */ +#define AR5K_ADDAC_TEST_LOOP_LEN 0x00000008 /* Loop length (field) */ +#define AR5K_ADDAC_TEST_USE_U8 0x00004000 /* Use upper 8 bits */ +#define AR5K_ADDAC_TEST_MSB 0x00008000 /* State of MSB */ +#define AR5K_ADDAC_TEST_TRIG_SEL 0x00010000 /* Trigger select */ +#define AR5K_ADDAC_TEST_TRIG_PTY 0x00020000 /* Trigger polarity */ +#define AR5K_ADDAC_TEST_RXCONT 0x00040000 /* Continuous capture */ +#define AR5K_ADDAC_TEST_CAPTURE 0x00080000 /* Begin capture */ +#define AR5K_ADDAC_TEST_TST_ARM 0x00100000 /* ARM rx buffer for capture */ + +/* + * Default antenna register [5211+] + */ +#define AR5K_DEFAULT_ANTENNA 0x8058 + +/* + * Frame control QoS mask register (?) [5211+] + * (FC_QOS_MASK) + */ +#define AR5K_FRAME_CTL_QOSM 0x805c + +/* + * Seq mask register (?) [5211+] + */ +#define AR5K_SEQ_MASK 0x8060 + +/* + * Retry count register [5210] + */ +#define AR5K_RETRY_CNT 0x8084 /* Register Address [5210] */ +#define AR5K_RETRY_CNT_SSH 0x0000003f /* Station short retry count (?) */ +#define AR5K_RETRY_CNT_SLG 0x00000fc0 /* Station long retry count (?) */ + +/* + * Back-off status register [5210] + */ +#define AR5K_BACKOFF 0x8088 /* Register Address [5210] */ +#define AR5K_BACKOFF_CW 0x000003ff /* Backoff Contention Window (?) */ +#define AR5K_BACKOFF_CNT 0x03ff0000 /* Backoff count (?) */ + + + +/* + * NAV register (current) + */ +#define AR5K_NAV_5210 0x808c +#define AR5K_NAV_5211 0x8084 +#define AR5K_NAV (ah->ah_version == AR5K_AR5210 ? \ + AR5K_NAV_5210 : AR5K_NAV_5211) + +/* + * RTS success register + */ +#define AR5K_RTS_OK_5210 0x8090 +#define AR5K_RTS_OK_5211 0x8088 +#define AR5K_RTS_OK (ah->ah_version == AR5K_AR5210 ? \ + AR5K_RTS_OK_5210 : AR5K_RTS_OK_5211) + +/* + * RTS failure register + */ +#define AR5K_RTS_FAIL_5210 0x8094 +#define AR5K_RTS_FAIL_5211 0x808c +#define AR5K_RTS_FAIL (ah->ah_version == AR5K_AR5210 ? \ + AR5K_RTS_FAIL_5210 : AR5K_RTS_FAIL_5211) + +/* + * ACK failure register + */ +#define AR5K_ACK_FAIL_5210 0x8098 +#define AR5K_ACK_FAIL_5211 0x8090 +#define AR5K_ACK_FAIL (ah->ah_version == AR5K_AR5210 ? \ + AR5K_ACK_FAIL_5210 : AR5K_ACK_FAIL_5211) + +/* + * FCS failure register + */ +#define AR5K_FCS_FAIL_5210 0x809c +#define AR5K_FCS_FAIL_5211 0x8094 +#define AR5K_FCS_FAIL (ah->ah_version == AR5K_AR5210 ? \ + AR5K_FCS_FAIL_5210 : AR5K_FCS_FAIL_5211) + +/* + * Beacon count register + */ +#define AR5K_BEACON_CNT_5210 0x80a0 +#define AR5K_BEACON_CNT_5211 0x8098 +#define AR5K_BEACON_CNT (ah->ah_version == AR5K_AR5210 ? \ + AR5K_BEACON_CNT_5210 : AR5K_BEACON_CNT_5211) + + +/*===5212 Specific PCU registers===*/ + +/* + * Transmit power control register + */ +#define AR5K_TPC 0x80e8 +#define AR5K_TPC_ACK 0x0000003f /* ack frames */ +#define AR5K_TPC_ACK_S 0 +#define AR5K_TPC_CTS 0x00003f00 /* cts frames */ +#define AR5K_TPC_CTS_S 8 +#define AR5K_TPC_CHIRP 0x003f0000 /* chirp frames */ +#define AR5K_TPC_CHIRP_S 16 +#define AR5K_TPC_DOPPLER 0x0f000000 /* doppler chirp span */ +#define AR5K_TPC_DOPPLER_S 24 + +/* + * XR (eXtended Range) mode register + */ +#define AR5K_XRMODE 0x80c0 /* Register Address */ +#define AR5K_XRMODE_POLL_TYPE_M 0x0000003f /* Mask for Poll type (?) */ +#define AR5K_XRMODE_POLL_TYPE_S 0 +#define AR5K_XRMODE_POLL_SUBTYPE_M 0x0000003c /* Mask for Poll subtype (?) */ +#define AR5K_XRMODE_POLL_SUBTYPE_S 2 +#define AR5K_XRMODE_POLL_WAIT_ALL 0x00000080 /* Wait for poll */ +#define AR5K_XRMODE_SIFS_DELAY 0x000fff00 /* Mask for SIFS delay */ +#define AR5K_XRMODE_FRAME_HOLD_M 0xfff00000 /* Mask for frame hold (?) */ +#define AR5K_XRMODE_FRAME_HOLD_S 20 + +/* + * XR delay register + */ +#define AR5K_XRDELAY 0x80c4 /* Register Address */ +#define AR5K_XRDELAY_SLOT_DELAY_M 0x0000ffff /* Mask for slot delay */ +#define AR5K_XRDELAY_SLOT_DELAY_S 0 +#define AR5K_XRDELAY_CHIRP_DELAY_M 0xffff0000 /* Mask for CHIRP data delay */ +#define AR5K_XRDELAY_CHIRP_DELAY_S 16 + +/* + * XR timeout register + */ +#define AR5K_XRTIMEOUT 0x80c8 /* Register Address */ +#define AR5K_XRTIMEOUT_CHIRP_M 0x0000ffff /* Mask for CHIRP timeout */ +#define AR5K_XRTIMEOUT_CHIRP_S 0 +#define AR5K_XRTIMEOUT_POLL_M 0xffff0000 /* Mask for Poll timeout */ +#define AR5K_XRTIMEOUT_POLL_S 16 + +/* + * XR chirp register + */ +#define AR5K_XRCHIRP 0x80cc /* Register Address */ +#define AR5K_XRCHIRP_SEND 0x00000001 /* Send CHIRP */ +#define AR5K_XRCHIRP_GAP 0xffff0000 /* Mask for CHIRP gap (?) */ + +/* + * XR stomp register + */ +#define AR5K_XRSTOMP 0x80d0 /* Register Address */ +#define AR5K_XRSTOMP_TX 0x00000001 /* Stomp Tx (?) */ +#define AR5K_XRSTOMP_RX 0x00000002 /* Stomp Rx (?) */ +#define AR5K_XRSTOMP_TX_RSSI 0x00000004 /* Stomp Tx RSSI (?) */ +#define AR5K_XRSTOMP_TX_BSSID 0x00000008 /* Stomp Tx BSSID (?) */ +#define AR5K_XRSTOMP_DATA 0x00000010 /* Stomp data (?)*/ +#define AR5K_XRSTOMP_RSSI_THRES 0x0000ff00 /* Mask for XR RSSI threshold */ + +/* + * First enhanced sleep register + */ +#define AR5K_SLEEP0 0x80d4 /* Register Address */ +#define AR5K_SLEEP0_NEXT_DTIM 0x0007ffff /* Mask for next DTIM (?) */ +#define AR5K_SLEEP0_NEXT_DTIM_S 0 +#define AR5K_SLEEP0_ASSUME_DTIM 0x00080000 /* Assume DTIM */ +#define AR5K_SLEEP0_ENH_SLEEP_EN 0x00100000 /* Enable enchanced sleep control */ +#define AR5K_SLEEP0_CABTO 0xff000000 /* Mask for CAB Time Out */ +#define AR5K_SLEEP0_CABTO_S 24 + +/* + * Second enhanced sleep register + */ +#define AR5K_SLEEP1 0x80d8 /* Register Address */ +#define AR5K_SLEEP1_NEXT_TIM 0x0007ffff /* Mask for next TIM (?) */ +#define AR5K_SLEEP1_NEXT_TIM_S 0 +#define AR5K_SLEEP1_BEACON_TO 0xff000000 /* Mask for Beacon Time Out */ +#define AR5K_SLEEP1_BEACON_TO_S 24 + +/* + * Third enhanced sleep register + */ +#define AR5K_SLEEP2 0x80dc /* Register Address */ +#define AR5K_SLEEP2_TIM_PER 0x0000ffff /* Mask for TIM period (?) */ +#define AR5K_SLEEP2_TIM_PER_S 0 +#define AR5K_SLEEP2_DTIM_PER 0xffff0000 /* Mask for DTIM period (?) */ +#define AR5K_SLEEP2_DTIM_PER_S 16 + +/* + * TX power control (TPC) register + * + * XXX: PCDAC steps (0.5dbm) or DBM ? + * + */ +#define AR5K_TXPC 0x80e8 /* Register Address */ +#define AR5K_TXPC_ACK_M 0x0000003f /* ACK tx power */ +#define AR5K_TXPC_ACK_S 0 +#define AR5K_TXPC_CTS_M 0x00003f00 /* CTS tx power */ +#define AR5K_TXPC_CTS_S 8 +#define AR5K_TXPC_CHIRP_M 0x003f0000 /* CHIRP tx power */ +#define AR5K_TXPC_CHIRP_S 16 +#define AR5K_TXPC_DOPPLER 0x0f000000 /* Doppler chirp span (?) */ +#define AR5K_TXPC_DOPPLER_S 24 + +/* + * Profile count registers + */ +#define AR5K_PROFCNT_TX 0x80ec /* Tx count */ +#define AR5K_PROFCNT_RX 0x80f0 /* Rx count */ +#define AR5K_PROFCNT_RXCLR 0x80f4 /* Clear Rx count */ +#define AR5K_PROFCNT_CYCLE 0x80f8 /* Cycle count (?) */ + +/* + * Quiet period control registers + */ +#define AR5K_QUIET_CTL1 0x80fc /* Register Address */ +#define AR5K_QUIET_CTL1_NEXT_QT_TSF 0x0000ffff /* Next quiet period TSF (TU) */ +#define AR5K_QUIET_CTL1_NEXT_QT_TSF_S 0 +#define AR5K_QUIET_CTL1_QT_EN 0x00010000 /* Enable quiet period */ +#define AR5K_QUIET_CTL1_ACK_CTS_EN 0x00020000 /* Send ACK/CTS during quiet period */ + +#define AR5K_QUIET_CTL2 0x8100 /* Register Address */ +#define AR5K_QUIET_CTL2_QT_PER 0x0000ffff /* Mask for quiet period periodicity */ +#define AR5K_QUIET_CTL2_QT_PER_S 0 +#define AR5K_QUIET_CTL2_QT_DUR 0xffff0000 /* Mask for quiet period duration */ +#define AR5K_QUIET_CTL2_QT_DUR_S 16 + +/* + * TSF parameter register + */ +#define AR5K_TSF_PARM 0x8104 /* Register Address */ +#define AR5K_TSF_PARM_INC 0x000000ff /* Mask for TSF increment */ +#define AR5K_TSF_PARM_INC_S 0 + +/* + * QoS NOACK policy + */ +#define AR5K_QOS_NOACK 0x8108 /* Register Address */ +#define AR5K_QOS_NOACK_2BIT_VALUES 0x0000000f /* ??? */ +#define AR5K_QOS_NOACK_2BIT_VALUES_S 0 +#define AR5K_QOS_NOACK_BIT_OFFSET 0x00000070 /* ??? */ +#define AR5K_QOS_NOACK_BIT_OFFSET_S 4 +#define AR5K_QOS_NOACK_BYTE_OFFSET 0x00000180 /* ??? */ +#define AR5K_QOS_NOACK_BYTE_OFFSET_S 7 + +/* + * PHY error filter register + */ +#define AR5K_PHY_ERR_FIL 0x810c +#define AR5K_PHY_ERR_FIL_RADAR 0x00000020 /* Radar signal */ +#define AR5K_PHY_ERR_FIL_OFDM 0x00020000 /* OFDM false detect (ANI) */ +#define AR5K_PHY_ERR_FIL_CCK 0x02000000 /* CCK false detect (ANI) */ + +/* + * XR latency register + */ +#define AR5K_XRLAT_TX 0x8110 + +/* + * ACK SIFS register + */ +#define AR5K_ACKSIFS 0x8114 /* Register Address */ +#define AR5K_ACKSIFS_INC 0x00000000 /* ACK SIFS Increment (field) */ + +/* + * MIC QoS control register (?) + */ +#define AR5K_MIC_QOS_CTL 0x8118 /* Register Address */ +#define AR5K_MIC_QOS_CTL_OFF(_n) (1 << (_n * 2)) +#define AR5K_MIC_QOS_CTL_MQ_EN 0x00010000 /* Enable MIC QoS */ + +/* + * MIC QoS select register (?) + */ +#define AR5K_MIC_QOS_SEL 0x811c +#define AR5K_MIC_QOS_SEL_OFF(_n) (1 << (_n * 4)) + +/* + * Misc mode control register (?) + */ +#define AR5K_MISC_MODE 0x8120 /* Register Address */ +#define AR5K_MISC_MODE_FBSSID_MATCH 0x00000001 /* Force BSSID match */ +#define AR5K_MISC_MODE_ACKSIFS_MEM 0x00000002 /* ACK SIFS memory (?) */ +#define AR5K_MISC_MODE_COMBINED_MIC 0x00000004 /* use rx/tx MIC key */ +/* more bits */ + +/* + * OFDM Filter counter + */ +#define AR5K_OFDM_FIL_CNT 0x8124 + +/* + * CCK Filter counter + */ +#define AR5K_CCK_FIL_CNT 0x8128 + +/* + * PHY Error Counters (?) + */ +#define AR5K_PHYERR_CNT1 0x812c +#define AR5K_PHYERR_CNT1_MASK 0x8130 + +#define AR5K_PHYERR_CNT2 0x8134 +#define AR5K_PHYERR_CNT2_MASK 0x8138 + +/* + * TSF Threshold register (?) + */ +#define AR5K_TSF_THRES 0x813c + +/* + * TODO: Wake On Wireless registers + * Range: 0x8147 - 0x818c + */ + +/* + * Rate -> ACK SIFS mapping table (32 entries) + */ +#define AR5K_RATE_ACKSIFS_BASE 0x8680 /* Register Address */ +#define AR5K_RATE_ACKSIFS(_n) (AR5K_RATE_ACKSIFS_BSE + ((_n) << 2)) +#define AR5K_RATE_ACKSIFS_NORMAL 0x00000001 /* Normal SIFS (field) */ +#define AR5K_RATE_ACKSIFS_TURBO 0x00000400 /* Turbo SIFS (field) */ + +/* + * Rate -> duration mapping table (32 entries) + */ +#define AR5K_RATE_DUR_BASE 0x8700 +#define AR5K_RATE_DUR(_n) (AR5K_RATE_DUR_BASE + ((_n) << 2)) + +/* + * Rate -> db mapping table + * (8 entries, each one has 4 8bit fields) + */ +#define AR5K_RATE2DB_BASE 0x87c0 +#define AR5K_RATE2DB(_n) (AR5K_RATE2DB_BASE + ((_n) << 2)) + +/* + * db -> Rate mapping table + * (8 entries, each one has 4 8bit fields) + */ +#define AR5K_DB2RATE_BASE 0x87e0 +#define AR5K_DB2RATE(_n) (AR5K_DB2RATE_BASE + ((_n) << 2)) + +/*===5212 end===*/ + +/* + * Key table (WEP) register + */ +#define AR5K_KEYTABLE_0_5210 0x9000 +#define AR5K_KEYTABLE_0_5211 0x8800 +#define AR5K_KEYTABLE_5210(_n) (AR5K_KEYTABLE_0_5210 + ((_n) << 5)) +#define AR5K_KEYTABLE_5211(_n) (AR5K_KEYTABLE_0_5211 + ((_n) << 5)) +#define AR5K_KEYTABLE(_n) (ah->ah_version == AR5K_AR5210 ? \ + AR5K_KEYTABLE_5210(_n) : AR5K_KEYTABLE_5211(_n)) +#define AR5K_KEYTABLE_OFF(_n, x) (AR5K_KEYTABLE(_n) + (x << 2)) +#define AR5K_KEYTABLE_TYPE(_n) AR5K_KEYTABLE_OFF(_n, 5) +#define AR5K_KEYTABLE_TYPE_40 0x00000000 +#define AR5K_KEYTABLE_TYPE_104 0x00000001 +#define AR5K_KEYTABLE_TYPE_128 0x00000003 +#define AR5K_KEYTABLE_TYPE_TKIP 0x00000004 /* [5212+] */ +#define AR5K_KEYTABLE_TYPE_AES 0x00000005 /* [5211+] */ +#define AR5K_KEYTABLE_TYPE_CCM 0x00000006 /* [5212+] */ +#define AR5K_KEYTABLE_TYPE_NULL 0x00000007 /* [5211+] */ +#define AR5K_KEYTABLE_ANTENNA 0x00000008 /* [5212+] */ +#define AR5K_KEYTABLE_MAC0(_n) AR5K_KEYTABLE_OFF(_n, 6) +#define AR5K_KEYTABLE_MAC1(_n) AR5K_KEYTABLE_OFF(_n, 7) +#define AR5K_KEYTABLE_VALID 0x00008000 + +/* If key type is TKIP and MIC is enabled + * MIC key goes in offset entry + 64 */ +#define AR5K_KEYTABLE_MIC_OFFSET 64 + +/* WEP 40-bit = 40-bit entered key + 24 bit IV = 64-bit + * WEP 104-bit = 104-bit entered key + 24-bit IV = 128-bit + * WEP 128-bit = 128-bit entered key + 24 bit IV = 152-bit + * + * Some vendors have introduced bigger WEP keys to address + * security vulnerabilities in WEP. This includes: + * + * WEP 232-bit = 232-bit entered key + 24 bit IV = 256-bit + * + * We can expand this if we find ar5k Atheros cards with a larger + * key table size. + */ +#define AR5K_KEYTABLE_SIZE_5210 64 +#define AR5K_KEYTABLE_SIZE_5211 128 +#define AR5K_KEYTABLE_SIZE (ah->ah_version == AR5K_AR5210 ? \ + AR5K_KEYTABLE_SIZE_5210 : AR5K_KEYTABLE_SIZE_5211) + + +/*===PHY REGISTERS===*/ + +/* + * PHY registers start + */ +#define AR5K_PHY_BASE 0x9800 +#define AR5K_PHY(_n) (AR5K_PHY_BASE + ((_n) << 2)) + +/* + * TST_2 (Misc config parameters) + */ +#define AR5K_PHY_TST2 0x9800 /* Register Address */ +#define AR5K_PHY_TST2_TRIG_SEL 0x00000007 /* Trigger select (?)*/ +#define AR5K_PHY_TST2_TRIG 0x00000010 /* Trigger (?) */ +#define AR5K_PHY_TST2_CBUS_MODE 0x00000060 /* Cardbus mode (?) */ +#define AR5K_PHY_TST2_CLK32 0x00000400 /* CLK_OUT is CLK32 (32Khz external) */ +#define AR5K_PHY_TST2_CHANCOR_DUMP_EN 0x00000800 /* Enable Chancor dump (?) */ +#define AR5K_PHY_TST2_EVEN_CHANCOR_DUMP 0x00001000 /* Even Chancor dump (?) */ +#define AR5K_PHY_TST2_RFSILENT_EN 0x00002000 /* Enable RFSILENT */ +#define AR5K_PHY_TST2_ALT_RFDATA 0x00004000 /* Alternate RFDATA (5-2GHz switch ?) */ +#define AR5K_PHY_TST2_MINI_OBS_EN 0x00008000 /* Enable mini OBS (?) */ +#define AR5K_PHY_TST2_RX2_IS_RX5_INV 0x00010000 /* 2GHz rx path is the 5GHz path inverted (?) */ +#define AR5K_PHY_TST2_SLOW_CLK160 0x00020000 /* Slow CLK160 (?) */ +#define AR5K_PHY_TST2_AGC_OBS_SEL_3 0x00040000 /* AGC OBS Select 3 (?) */ +#define AR5K_PHY_TST2_BBB_OBS_SEL 0x00080000 /* BB OBS Select (field ?) */ +#define AR5K_PHY_TST2_ADC_OBS_SEL 0x00800000 /* ADC OBS Select (field ?) */ +#define AR5K_PHY_TST2_RX_CLR_SEL 0x08000000 /* RX Clear Select (?) */ +#define AR5K_PHY_TST2_FORCE_AGC_CLR 0x10000000 /* Force AGC clear (?) */ +#define AR5K_PHY_SHIFT_2GHZ 0x00004007 /* Used to access 2GHz radios */ +#define AR5K_PHY_SHIFT_5GHZ 0x00000007 /* Used to access 5GHz radios (default) */ + +/* + * PHY frame control register [5110] /turbo mode register [5111+] + * + * There is another frame control register for [5111+] + * at address 0x9944 (see below) but the 2 first flags + * are common here between 5110 frame control register + * and [5111+] turbo mode register, so this also works as + * a "turbo mode register" for 5110. We treat this one as + * a frame control register for 5110 below. + */ +#define AR5K_PHY_TURBO 0x9804 /* Register Address */ +#define AR5K_PHY_TURBO_MODE 0x00000001 /* Enable turbo mode */ +#define AR5K_PHY_TURBO_SHORT 0x00000002 /* Set short symbols to turbo mode */ +#define AR5K_PHY_TURBO_MIMO 0x00000004 /* Set turbo for mimo mimo */ + +/* + * PHY agility command register + * (aka TST_1) + */ +#define AR5K_PHY_AGC 0x9808 /* Register Address */ +#define AR5K_PHY_TST1 0x9808 +#define AR5K_PHY_AGC_DISABLE 0x08000000 /* Disable AGC to A2 (?)*/ +#define AR5K_PHY_TST1_TXHOLD 0x00003800 /* Set tx hold (?) */ +#define AR5K_PHY_TST1_TXSRC_SRC 0x00000002 /* Used with bit 7 (?) */ +#define AR5K_PHY_TST1_TXSRC_SRC_S 1 +#define AR5K_PHY_TST1_TXSRC_ALT 0x00000080 /* Set input to tsdac (?) */ +#define AR5K_PHY_TST1_TXSRC_ALT_S 7 + + +/* + * PHY timing register 3 [5112+] + */ +#define AR5K_PHY_TIMING_3 0x9814 +#define AR5K_PHY_TIMING_3_DSC_MAN 0xfffe0000 +#define AR5K_PHY_TIMING_3_DSC_MAN_S 17 +#define AR5K_PHY_TIMING_3_DSC_EXP 0x0001e000 +#define AR5K_PHY_TIMING_3_DSC_EXP_S 13 + +/* + * PHY chip revision register + */ +#define AR5K_PHY_CHIP_ID 0x9818 + +/* + * PHY activation register + */ +#define AR5K_PHY_ACT 0x981c /* Register Address */ +#define AR5K_PHY_ACT_ENABLE 0x00000001 /* Activate PHY */ +#define AR5K_PHY_ACT_DISABLE 0x00000002 /* Deactivate PHY */ + +/* + * PHY RF control registers + */ +#define AR5K_PHY_RF_CTL2 0x9824 /* Register Address */ +#define AR5K_PHY_RF_CTL2_TXF2TXD_START 0x0000000f /* TX frame to TX data start */ +#define AR5K_PHY_RF_CTL2_TXF2TXD_START_S 0 + +#define AR5K_PHY_RF_CTL3 0x9828 /* Register Address */ +#define AR5K_PHY_RF_CTL3_TXE2XLNA_ON 0x0000ff00 /* TX end to XLNA on */ +#define AR5K_PHY_RF_CTL3_TXE2XLNA_ON_S 8 + +#define AR5K_PHY_ADC_CTL 0x982c +#define AR5K_PHY_ADC_CTL_INBUFGAIN_OFF 0x00000003 +#define AR5K_PHY_ADC_CTL_INBUFGAIN_OFF_S 0 +#define AR5K_PHY_ADC_CTL_PWD_DAC_OFF 0x00002000 +#define AR5K_PHY_ADC_CTL_PWD_BAND_GAP_OFF 0x00004000 +#define AR5K_PHY_ADC_CTL_PWD_ADC_OFF 0x00008000 +#define AR5K_PHY_ADC_CTL_INBUFGAIN_ON 0x00030000 +#define AR5K_PHY_ADC_CTL_INBUFGAIN_ON_S 16 + +#define AR5K_PHY_RF_CTL4 0x9834 /* Register Address */ +#define AR5K_PHY_RF_CTL4_TXF2XPA_A_ON 0x00000001 /* TX frame to XPA A on (field) */ +#define AR5K_PHY_RF_CTL4_TXF2XPA_B_ON 0x00000100 /* TX frame to XPA B on (field) */ +#define AR5K_PHY_RF_CTL4_TXE2XPA_A_OFF 0x00010000 /* TX end to XPA A off (field) */ +#define AR5K_PHY_RF_CTL4_TXE2XPA_B_OFF 0x01000000 /* TX end to XPA B off (field) */ + +/* + * Pre-Amplifier control register + * (XPA -> external pre-amplifier) + */ +#define AR5K_PHY_PA_CTL 0x9838 /* Register Address */ +#define AR5K_PHY_PA_CTL_XPA_A_HI 0x00000001 /* XPA A high (?) */ +#define AR5K_PHY_PA_CTL_XPA_B_HI 0x00000002 /* XPA B high (?) */ +#define AR5K_PHY_PA_CTL_XPA_A_EN 0x00000004 /* Enable XPA A */ +#define AR5K_PHY_PA_CTL_XPA_B_EN 0x00000008 /* Enable XPA B */ + +/* + * PHY settling register + */ +#define AR5K_PHY_SETTLING 0x9844 /* Register Address */ +#define AR5K_PHY_SETTLING_AGC 0x0000007f /* AGC settling time */ +#define AR5K_PHY_SETTLING_AGC_S 0 +#define AR5K_PHY_SETTLING_SWITCH 0x00003f80 /* Switch settlig time */ +#define AR5K_PHY_SETTLING_SWITCH_S 7 + +/* + * PHY Gain registers + */ +#define AR5K_PHY_GAIN 0x9848 /* Register Address */ +#define AR5K_PHY_GAIN_TXRX_ATTEN 0x0003f000 /* TX-RX Attenuation */ +#define AR5K_PHY_GAIN_TXRX_ATTEN_S 12 +#define AR5K_PHY_GAIN_TXRX_RF_MAX 0x007c0000 +#define AR5K_PHY_GAIN_TXRX_RF_MAX_S 18 + +#define AR5K_PHY_GAIN_OFFSET 0x984c /* Register Address */ +#define AR5K_PHY_GAIN_OFFSET_RXTX_FLAG 0x00020000 /* RX-TX flag (?) */ + +/* + * Desired ADC/PGA size register + * (for more infos read ANI patent) + */ +#define AR5K_PHY_DESIRED_SIZE 0x9850 /* Register Address */ +#define AR5K_PHY_DESIRED_SIZE_ADC 0x000000ff /* ADC desired size */ +#define AR5K_PHY_DESIRED_SIZE_ADC_S 0 +#define AR5K_PHY_DESIRED_SIZE_PGA 0x0000ff00 /* PGA desired size */ +#define AR5K_PHY_DESIRED_SIZE_PGA_S 8 +#define AR5K_PHY_DESIRED_SIZE_TOT 0x0ff00000 /* Total desired size */ +#define AR5K_PHY_DESIRED_SIZE_TOT_S 20 + +/* + * PHY signal register + * (for more infos read ANI patent) + */ +#define AR5K_PHY_SIG 0x9858 /* Register Address */ +#define AR5K_PHY_SIG_FIRSTEP 0x0003f000 /* FIRSTEP */ +#define AR5K_PHY_SIG_FIRSTEP_S 12 +#define AR5K_PHY_SIG_FIRPWR 0x03fc0000 /* FIPWR */ +#define AR5K_PHY_SIG_FIRPWR_S 18 + +/* + * PHY coarse agility control register + * (for more infos read ANI patent) + */ +#define AR5K_PHY_AGCCOARSE 0x985c /* Register Address */ +#define AR5K_PHY_AGCCOARSE_LO 0x00007f80 /* AGC Coarse low */ +#define AR5K_PHY_AGCCOARSE_LO_S 7 +#define AR5K_PHY_AGCCOARSE_HI 0x003f8000 /* AGC Coarse high */ +#define AR5K_PHY_AGCCOARSE_HI_S 15 + +/* + * PHY agility control register + */ +#define AR5K_PHY_AGCCTL 0x9860 /* Register address */ +#define AR5K_PHY_AGCCTL_CAL 0x00000001 /* Enable PHY calibration */ +#define AR5K_PHY_AGCCTL_NF 0x00000002 /* Enable Noise Floor calibration */ +#define AR5K_PHY_AGCCTL_OFDM_DIV_DIS 0x00000008 /* Disable antenna diversity on OFDM modes */ +#define AR5K_PHY_AGCCTL_NF_EN 0x00008000 /* Enable nf calibration to happen (?) */ +#define AR5K_PHY_AGCTL_FLTR_CAL 0x00010000 /* Allow filter calibration (?) */ +#define AR5K_PHY_AGCCTL_NF_NOUPDATE 0x00020000 /* Don't update nf automaticaly */ + +/* + * PHY noise floor status register (CCA = Clear Channel Assessment) + */ +#define AR5K_PHY_NF 0x9864 /* Register address */ +#define AR5K_PHY_NF_M 0x000001ff /* Noise floor, written to hardware in 1/2 dBm units */ +#define AR5K_PHY_NF_SVAL(_n) (((_n) & AR5K_PHY_NF_M) | (1 << 9)) +#define AR5K_PHY_NF_THRESH62 0x0007f000 /* Thresh62 -check ANI patent- (field) */ +#define AR5K_PHY_NF_THRESH62_S 12 +#define AR5K_PHY_NF_MINCCA_PWR 0x0ff80000 /* Minimum measured noise level, read from hardware in 1 dBm units */ +#define AR5K_PHY_NF_MINCCA_PWR_S 19 + +/* + * PHY ADC saturation register [5110] + */ +#define AR5K_PHY_ADCSAT 0x9868 +#define AR5K_PHY_ADCSAT_ICNT 0x0001f800 +#define AR5K_PHY_ADCSAT_ICNT_S 11 +#define AR5K_PHY_ADCSAT_THR 0x000007e0 +#define AR5K_PHY_ADCSAT_THR_S 5 + +/* + * PHY Weak ofdm signal detection threshold registers (ANI) [5212+] + */ + +/* High thresholds */ +#define AR5K_PHY_WEAK_OFDM_HIGH_THR 0x9868 +#define AR5K_PHY_WEAK_OFDM_HIGH_THR_M2_COUNT 0x0000001f +#define AR5K_PHY_WEAK_OFDM_HIGH_THR_M2_COUNT_S 0 +#define AR5K_PHY_WEAK_OFDM_HIGH_THR_M1 0x00fe0000 +#define AR5K_PHY_WEAK_OFDM_HIGH_THR_M1_S 17 +#define AR5K_PHY_WEAK_OFDM_HIGH_THR_M2 0x7f000000 +#define AR5K_PHY_WEAK_OFDM_HIGH_THR_M2_S 24 + +/* Low thresholds */ +#define AR5K_PHY_WEAK_OFDM_LOW_THR 0x986c +#define AR5K_PHY_WEAK_OFDM_LOW_THR_SELFCOR_EN 0x00000001 +#define AR5K_PHY_WEAK_OFDM_LOW_THR_M2_COUNT 0x00003f00 +#define AR5K_PHY_WEAK_OFDM_LOW_THR_M2_COUNT_S 8 +#define AR5K_PHY_WEAK_OFDM_LOW_THR_M1 0x001fc000 +#define AR5K_PHY_WEAK_OFDM_LOW_THR_M1_S 14 +#define AR5K_PHY_WEAK_OFDM_LOW_THR_M2 0x0fe00000 +#define AR5K_PHY_WEAK_OFDM_LOW_THR_M2_S 21 + + +/* + * PHY sleep registers [5112+] + */ +#define AR5K_PHY_SCR 0x9870 + +#define AR5K_PHY_SLMT 0x9874 +#define AR5K_PHY_SLMT_32MHZ 0x0000007f + +#define AR5K_PHY_SCAL 0x9878 +#define AR5K_PHY_SCAL_32MHZ 0x0000000e +#define AR5K_PHY_SCAL_32MHZ_2417 0x0000000a +#define AR5K_PHY_SCAL_32MHZ_HB63 0x00000032 + +/* + * PHY PLL (Phase Locked Loop) control register + */ +#define AR5K_PHY_PLL 0x987c +#define AR5K_PHY_PLL_20MHZ 0x00000013 /* For half rate (?) */ +/* 40MHz -> 5GHz band */ +#define AR5K_PHY_PLL_40MHZ_5211 0x00000018 +#define AR5K_PHY_PLL_40MHZ_5212 0x000000aa +#define AR5K_PHY_PLL_40MHZ_5413 0x00000004 +#define AR5K_PHY_PLL_40MHZ (ah->ah_version == AR5K_AR5211 ? \ + AR5K_PHY_PLL_40MHZ_5211 : AR5K_PHY_PLL_40MHZ_5212) +/* 44MHz -> 2.4GHz band */ +#define AR5K_PHY_PLL_44MHZ_5211 0x00000019 +#define AR5K_PHY_PLL_44MHZ_5212 0x000000ab +#define AR5K_PHY_PLL_44MHZ (ah->ah_version == AR5K_AR5211 ? \ + AR5K_PHY_PLL_44MHZ_5211 : AR5K_PHY_PLL_44MHZ_5212) + +#define AR5K_PHY_PLL_RF5111 0x00000000 +#define AR5K_PHY_PLL_RF5112 0x00000040 +#define AR5K_PHY_PLL_HALF_RATE 0x00000100 +#define AR5K_PHY_PLL_QUARTER_RATE 0x00000200 + +/* + * RF Buffer register + * + * It's obvious from the code that 0x989c is the buffer register but + * for the other special registers that we write to after sending each + * packet, i have no idea. So i'll name them BUFFER_CONTROL_X registers + * for now. It's interesting that they are also used for some other operations. + */ + +#define AR5K_RF_BUFFER 0x989c +#define AR5K_RF_BUFFER_CONTROL_0 0x98c0 /* Channel on 5110 */ +#define AR5K_RF_BUFFER_CONTROL_1 0x98c4 /* Bank 7 on 5112 */ +#define AR5K_RF_BUFFER_CONTROL_2 0x98cc /* Bank 7 on 5111 */ + +#define AR5K_RF_BUFFER_CONTROL_3 0x98d0 /* Bank 2 on 5112 */ + /* Channel set on 5111 */ + /* Used to read radio revision*/ + +#define AR5K_RF_BUFFER_CONTROL_4 0x98d4 /* RF Stage register on 5110 */ + /* Bank 0,1,2,6 on 5111 */ + /* Bank 1 on 5112 */ + /* Used during activation on 5111 */ + +#define AR5K_RF_BUFFER_CONTROL_5 0x98d8 /* Bank 3 on 5111 */ + /* Used during activation on 5111 */ + /* Channel on 5112 */ + /* Bank 6 on 5112 */ + +#define AR5K_RF_BUFFER_CONTROL_6 0x98dc /* Bank 3 on 5112 */ + +/* + * PHY RF stage register [5210] + */ +#define AR5K_PHY_RFSTG 0x98d4 +#define AR5K_PHY_RFSTG_DISABLE 0x00000021 + +/* + * BIN masks (?) + */ +#define AR5K_PHY_BIN_MASK_1 0x9900 +#define AR5K_PHY_BIN_MASK_2 0x9904 +#define AR5K_PHY_BIN_MASK_3 0x9908 + +#define AR5K_PHY_BIN_MASK_CTL 0x990c +#define AR5K_PHY_BIN_MASK_CTL_MASK_4 0x00003fff +#define AR5K_PHY_BIN_MASK_CTL_MASK_4_S 0 +#define AR5K_PHY_BIN_MASK_CTL_RATE 0xff000000 +#define AR5K_PHY_BIN_MASK_CTL_RATE_S 24 + +/* + * PHY Antenna control register + */ +#define AR5K_PHY_ANT_CTL 0x9910 /* Register Address */ +#define AR5K_PHY_ANT_CTL_TXRX_EN 0x00000001 /* Enable TX/RX (?) */ +#define AR5K_PHY_ANT_CTL_SECTORED_ANT 0x00000004 /* Sectored Antenna */ +#define AR5K_PHY_ANT_CTL_HITUNE5 0x00000008 /* Hitune5 (?) */ +#define AR5K_PHY_ANT_CTL_SWTABLE_IDLE 0x000003f0 /* Switch table idle (?) */ +#define AR5K_PHY_ANT_CTL_SWTABLE_IDLE_S 4 + +/* + * PHY receiver delay register [5111+] + */ +#define AR5K_PHY_RX_DELAY 0x9914 /* Register Address */ +#define AR5K_PHY_RX_DELAY_M 0x00003fff /* Mask for RX activate to receive delay (/100ns) */ + +/* + * PHY max rx length register (?) [5111] + */ +#define AR5K_PHY_MAX_RX_LEN 0x991c + +/* + * PHY timing register 4 + * I(nphase)/Q(adrature) calibration register [5111+] + */ +#define AR5K_PHY_IQ 0x9920 /* Register Address */ +#define AR5K_PHY_IQ_CORR_Q_Q_COFF 0x0000001f /* Mask for q correction info */ +#define AR5K_PHY_IQ_CORR_Q_I_COFF 0x000007e0 /* Mask for i correction info */ +#define AR5K_PHY_IQ_CORR_Q_I_COFF_S 5 +#define AR5K_PHY_IQ_CORR_ENABLE 0x00000800 /* Enable i/q correction */ +#define AR5K_PHY_IQ_CAL_NUM_LOG_MAX 0x0000f000 /* Mask for max number of samples in log scale */ +#define AR5K_PHY_IQ_CAL_NUM_LOG_MAX_S 12 +#define AR5K_PHY_IQ_RUN 0x00010000 /* Run i/q calibration */ +#define AR5K_PHY_IQ_USE_PT_DF 0x00020000 /* Use pilot track df (?) */ +#define AR5K_PHY_IQ_EARLY_TRIG_THR 0x00200000 /* Early trigger threshold (?) (field) */ +#define AR5K_PHY_IQ_PILOT_MASK_EN 0x10000000 /* Enable pilot mask (?) */ +#define AR5K_PHY_IQ_CHAN_MASK_EN 0x20000000 /* Enable channel mask (?) */ +#define AR5K_PHY_IQ_SPUR_FILT_EN 0x40000000 /* Enable spur filter */ +#define AR5K_PHY_IQ_SPUR_RSSI_EN 0x80000000 /* Enable spur rssi */ + +/* + * PHY timing register 5 + * OFDM Self-correlator Cyclic RSSI threshold params + * (Check out bb_cycpwr_thr1 on ANI patent) + */ +#define AR5K_PHY_OFDM_SELFCORR 0x9924 /* Register Address */ +#define AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1_EN 0x00000001 /* Enable cyclic RSSI thr 1 */ +#define AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1 0x000000fe /* Mask for Cyclic RSSI threshold 1 */ +#define AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1_S 1 +#define AR5K_PHY_OFDM_SELFCORR_CYPWR_THR3 0x00000100 /* Cyclic RSSI threshold 3 (field) (?) */ +#define AR5K_PHY_OFDM_SELFCORR_RSSI_1ATHR_EN 0x00008000 /* Enable 1A RSSI threshold (?) */ +#define AR5K_PHY_OFDM_SELFCORR_RSSI_1ATHR 0x00010000 /* 1A RSSI threshold (field) (?) */ +#define AR5K_PHY_OFDM_SELFCORR_LSCTHR_HIRSSI 0x00800000 /* Long sc threshold hi rssi (?) */ + +/* + * PHY-only warm reset register + */ +#define AR5K_PHY_WARM_RESET 0x9928 + +/* + * PHY-only control register + */ +#define AR5K_PHY_CTL 0x992c /* Register Address */ +#define AR5K_PHY_CTL_RX_DRAIN_RATE 0x00000001 /* RX drain rate (?) */ +#define AR5K_PHY_CTL_LATE_TX_SIG_SYM 0x00000002 /* Late tx signal symbol (?) */ +#define AR5K_PHY_CTL_GEN_SCRAMBLER 0x00000004 /* Generate scrambler */ +#define AR5K_PHY_CTL_TX_ANT_SEL 0x00000008 /* TX antenna select */ +#define AR5K_PHY_CTL_TX_ANT_STATIC 0x00000010 /* Static TX antenna */ +#define AR5K_PHY_CTL_RX_ANT_SEL 0x00000020 /* RX antenna select */ +#define AR5K_PHY_CTL_RX_ANT_STATIC 0x00000040 /* Static RX antenna */ +#define AR5K_PHY_CTL_LOW_FREQ_SLE_EN 0x00000080 /* Enable low freq sleep */ + +/* + * PHY PAPD probe register [5111+] + */ +#define AR5K_PHY_PAPD_PROBE 0x9930 +#define AR5K_PHY_PAPD_PROBE_SH_HI_PAR 0x00000001 +#define AR5K_PHY_PAPD_PROBE_PCDAC_BIAS 0x00000002 +#define AR5K_PHY_PAPD_PROBE_COMP_GAIN 0x00000040 +#define AR5K_PHY_PAPD_PROBE_TXPOWER 0x00007e00 +#define AR5K_PHY_PAPD_PROBE_TXPOWER_S 9 +#define AR5K_PHY_PAPD_PROBE_TX_NEXT 0x00008000 +#define AR5K_PHY_PAPD_PROBE_PREDIST_EN 0x00010000 +#define AR5K_PHY_PAPD_PROBE_TYPE 0x01800000 /* [5112+] */ +#define AR5K_PHY_PAPD_PROBE_TYPE_S 23 +#define AR5K_PHY_PAPD_PROBE_TYPE_OFDM 0 +#define AR5K_PHY_PAPD_PROBE_TYPE_XR 1 +#define AR5K_PHY_PAPD_PROBE_TYPE_CCK 2 +#define AR5K_PHY_PAPD_PROBE_GAINF 0xfe000000 +#define AR5K_PHY_PAPD_PROBE_GAINF_S 25 +#define AR5K_PHY_PAPD_PROBE_INI_5111 0x00004883 /* [5212+] */ +#define AR5K_PHY_PAPD_PROBE_INI_5112 0x00004882 /* [5212+] */ + +/* + * PHY TX rate power registers [5112+] + */ +#define AR5K_PHY_TXPOWER_RATE1 0x9934 +#define AR5K_PHY_TXPOWER_RATE2 0x9938 +#define AR5K_PHY_TXPOWER_RATE_MAX 0x993c +#define AR5K_PHY_TXPOWER_RATE_MAX_TPC_ENABLE 0x00000040 +#define AR5K_PHY_TXPOWER_RATE3 0xa234 +#define AR5K_PHY_TXPOWER_RATE4 0xa238 + +/* + * PHY frame control register [5111+] + */ +#define AR5K_PHY_FRAME_CTL_5210 0x9804 +#define AR5K_PHY_FRAME_CTL_5211 0x9944 +#define AR5K_PHY_FRAME_CTL (ah->ah_version == AR5K_AR5210 ? \ + AR5K_PHY_FRAME_CTL_5210 : AR5K_PHY_FRAME_CTL_5211) +/*---[5111+]---*/ +#define AR5K_PHY_FRAME_CTL_TX_CLIP 0x00000038 /* Mask for tx clip (?) */ +#define AR5K_PHY_FRAME_CTL_TX_CLIP_S 3 +#define AR5K_PHY_FRAME_CTL_PREP_CHINFO 0x00010000 /* Prepend chan info */ +#define AR5K_PHY_FRAME_CTL_EMU 0x80000000 +#define AR5K_PHY_FRAME_CTL_EMU_S 31 +/*---[5110/5111]---*/ +#define AR5K_PHY_FRAME_CTL_TIMING_ERR 0x01000000 /* PHY timing error */ +#define AR5K_PHY_FRAME_CTL_PARITY_ERR 0x02000000 /* Parity error */ +#define AR5K_PHY_FRAME_CTL_ILLRATE_ERR 0x04000000 /* Illegal rate */ +#define AR5K_PHY_FRAME_CTL_ILLLEN_ERR 0x08000000 /* Illegal length */ +#define AR5K_PHY_FRAME_CTL_SERVICE_ERR 0x20000000 +#define AR5K_PHY_FRAME_CTL_TXURN_ERR 0x40000000 /* TX underrun */ +#define AR5K_PHY_FRAME_CTL_INI AR5K_PHY_FRAME_CTL_SERVICE_ERR | \ + AR5K_PHY_FRAME_CTL_TXURN_ERR | \ + AR5K_PHY_FRAME_CTL_ILLLEN_ERR | \ + AR5K_PHY_FRAME_CTL_ILLRATE_ERR | \ + AR5K_PHY_FRAME_CTL_PARITY_ERR | \ + AR5K_PHY_FRAME_CTL_TIMING_ERR + +/* + * PHY Tx Power adjustment register [5212A+] + */ +#define AR5K_PHY_TX_PWR_ADJ 0x994c +#define AR5K_PHY_TX_PWR_ADJ_CCK_GAIN_DELTA 0x00000fc0 +#define AR5K_PHY_TX_PWR_ADJ_CCK_GAIN_DELTA_S 6 +#define AR5K_PHY_TX_PWR_ADJ_CCK_PCDAC_INDEX 0x00fc0000 +#define AR5K_PHY_TX_PWR_ADJ_CCK_PCDAC_INDEX_S 18 + +/* + * PHY radar detection register [5111+] + */ +#define AR5K_PHY_RADAR 0x9954 +#define AR5K_PHY_RADAR_ENABLE 0x00000001 +#define AR5K_PHY_RADAR_DISABLE 0x00000000 +#define AR5K_PHY_RADAR_INBANDTHR 0x0000003e /* Inband threshold + 5-bits, units unknown {0..31} + (? MHz ?) */ +#define AR5K_PHY_RADAR_INBANDTHR_S 1 + +#define AR5K_PHY_RADAR_PRSSI_THR 0x00000fc0 /* Pulse RSSI/SNR threshold + 6-bits, dBm range {0..63} + in dBm units. */ +#define AR5K_PHY_RADAR_PRSSI_THR_S 6 + +#define AR5K_PHY_RADAR_PHEIGHT_THR 0x0003f000 /* Pulse height threshold + 6-bits, dBm range {0..63} + in dBm units. */ +#define AR5K_PHY_RADAR_PHEIGHT_THR_S 12 + +#define AR5K_PHY_RADAR_RSSI_THR 0x00fc0000 /* Radar RSSI/SNR threshold. + 6-bits, dBm range {0..63} + in dBm units. */ +#define AR5K_PHY_RADAR_RSSI_THR_S 18 + +#define AR5K_PHY_RADAR_FIRPWR_THR 0x7f000000 /* Finite Impulse Response + filter power out threshold. + 7-bits, standard power range + {0..127} in 1/2 dBm units. */ +#define AR5K_PHY_RADAR_FIRPWR_THRS 24 + +/* + * PHY antenna switch table registers + */ +#define AR5K_PHY_ANT_SWITCH_TABLE_0 0x9960 +#define AR5K_PHY_ANT_SWITCH_TABLE_1 0x9964 + +/* + * PHY Noise floor threshold + */ +#define AR5K_PHY_NFTHRES 0x9968 + +/* + * Sigma Delta register (?) [5213] + */ +#define AR5K_PHY_SIGMA_DELTA 0x996C +#define AR5K_PHY_SIGMA_DELTA_ADC_SEL 0x00000003 +#define AR5K_PHY_SIGMA_DELTA_ADC_SEL_S 0 +#define AR5K_PHY_SIGMA_DELTA_FILT2 0x000000f8 +#define AR5K_PHY_SIGMA_DELTA_FILT2_S 3 +#define AR5K_PHY_SIGMA_DELTA_FILT1 0x00001f00 +#define AR5K_PHY_SIGMA_DELTA_FILT1_S 8 +#define AR5K_PHY_SIGMA_DELTA_ADC_CLIP 0x01ffe000 +#define AR5K_PHY_SIGMA_DELTA_ADC_CLIP_S 13 + +/* + * RF restart register [5112+] (?) + */ +#define AR5K_PHY_RESTART 0x9970 /* restart */ +#define AR5K_PHY_RESTART_DIV_GC 0x001c0000 /* Fast diversity gc_limit (?) */ +#define AR5K_PHY_RESTART_DIV_GC_S 18 + +/* + * RF Bus access request register (for synth-oly channel switching) + */ +#define AR5K_PHY_RFBUS_REQ 0x997C +#define AR5K_PHY_RFBUS_REQ_REQUEST 0x00000001 + +/* + * Spur mitigation masks (?) + */ +#define AR5K_PHY_TIMING_7 0x9980 +#define AR5K_PHY_TIMING_8 0x9984 +#define AR5K_PHY_TIMING_8_PILOT_MASK_2 0x000fffff +#define AR5K_PHY_TIMING_8_PILOT_MASK_2_S 0 + +#define AR5K_PHY_BIN_MASK2_1 0x9988 +#define AR5K_PHY_BIN_MASK2_2 0x998c +#define AR5K_PHY_BIN_MASK2_3 0x9990 + +#define AR5K_PHY_BIN_MASK2_4 0x9994 +#define AR5K_PHY_BIN_MASK2_4_MASK_4 0x00003fff +#define AR5K_PHY_BIN_MASK2_4_MASK_4_S 0 + +#define AR5K_PHY_TIMING_9 0x9998 +#define AR5K_PHY_TIMING_10 0x999c +#define AR5K_PHY_TIMING_10_PILOT_MASK_2 0x000fffff +#define AR5K_PHY_TIMING_10_PILOT_MASK_2_S 0 + +/* + * Spur mitigation control + */ +#define AR5K_PHY_TIMING_11 0x99a0 /* Register address */ +#define AR5K_PHY_TIMING_11_SPUR_DELTA_PHASE 0x000fffff /* Spur delta phase */ +#define AR5K_PHY_TIMING_11_SPUR_DELTA_PHASE_S 0 +#define AR5K_PHY_TIMING_11_SPUR_FREQ_SD 0x3ff00000 /* Freq sigma delta */ +#define AR5K_PHY_TIMING_11_SPUR_FREQ_SD_S 20 +#define AR5K_PHY_TIMING_11_USE_SPUR_IN_AGC 0x40000000 /* Spur filter in AGC detector */ +#define AR5K_PHY_TIMING_11_USE_SPUR_IN_SELFCOR 0x80000000 /* Spur filter in OFDM self correlator */ + +/* + * Gain tables + */ +#define AR5K_BB_GAIN_BASE 0x9b00 /* BaseBand Amplifier Gain table base address */ +#define AR5K_BB_GAIN(_n) (AR5K_BB_GAIN_BASE + ((_n) << 2)) +#define AR5K_RF_GAIN_BASE 0x9a00 /* RF Amplrifier Gain table base address */ +#define AR5K_RF_GAIN(_n) (AR5K_RF_GAIN_BASE + ((_n) << 2)) + +/* + * PHY timing IQ calibration result register [5111+] + */ +#define AR5K_PHY_IQRES_CAL_PWR_I 0x9c10 /* I (Inphase) power value */ +#define AR5K_PHY_IQRES_CAL_PWR_Q 0x9c14 /* Q (Quadrature) power value */ +#define AR5K_PHY_IQRES_CAL_CORR 0x9c18 /* I/Q Correlation */ + +/* + * PHY current RSSI register [5111+] + */ +#define AR5K_PHY_CURRENT_RSSI 0x9c1c + +/* + * PHY RF Bus grant register + */ +#define AR5K_PHY_RFBUS_GRANT 0x9c20 +#define AR5K_PHY_RFBUS_GRANT_OK 0x00000001 + +/* + * PHY ADC test register + */ +#define AR5K_PHY_ADC_TEST 0x9c24 +#define AR5K_PHY_ADC_TEST_I 0x00000001 +#define AR5K_PHY_ADC_TEST_Q 0x00000200 + +/* + * PHY DAC test register + */ +#define AR5K_PHY_DAC_TEST 0x9c28 +#define AR5K_PHY_DAC_TEST_I 0x00000001 +#define AR5K_PHY_DAC_TEST_Q 0x00000200 + +/* + * PHY PTAT register (?) + */ +#define AR5K_PHY_PTAT 0x9c2c + +/* + * PHY Illegal TX rate register [5112+] + */ +#define AR5K_PHY_BAD_TX_RATE 0x9c30 + +/* + * PHY SPUR Power register [5112+] + */ +#define AR5K_PHY_SPUR_PWR 0x9c34 /* Register Address */ +#define AR5K_PHY_SPUR_PWR_I 0x00000001 /* SPUR Power estimate for I (field) */ +#define AR5K_PHY_SPUR_PWR_Q 0x00000100 /* SPUR Power estimate for Q (field) */ +#define AR5K_PHY_SPUR_PWR_FILT 0x00010000 /* Power with SPUR removed (field) */ + +/* + * PHY Channel status register [5112+] (?) + */ +#define AR5K_PHY_CHAN_STATUS 0x9c38 +#define AR5K_PHY_CHAN_STATUS_BT_ACT 0x00000001 +#define AR5K_PHY_CHAN_STATUS_RX_CLR_RAW 0x00000002 +#define AR5K_PHY_CHAN_STATUS_RX_CLR_MAC 0x00000004 +#define AR5K_PHY_CHAN_STATUS_RX_CLR_PAP 0x00000008 + +/* + * Heavy clip enable register + */ +#define AR5K_PHY_HEAVY_CLIP_ENABLE 0x99e0 + +/* + * PHY clock sleep registers [5112+] + */ +#define AR5K_PHY_SCLOCK 0x99f0 +#define AR5K_PHY_SCLOCK_32MHZ 0x0000000c +#define AR5K_PHY_SDELAY 0x99f4 +#define AR5K_PHY_SDELAY_32MHZ 0x000000ff +#define AR5K_PHY_SPENDING 0x99f8 + + +/* + * PHY PAPD I (power?) table (?) + * (92! entries) + */ +#define AR5K_PHY_PAPD_I_BASE 0xa000 +#define AR5K_PHY_PAPD_I(_n) (AR5K_PHY_PAPD_I_BASE + ((_n) << 2)) + +/* + * PHY PCDAC TX power table + */ +#define AR5K_PHY_PCDAC_TXPOWER_BASE 0xa180 +#define AR5K_PHY_PCDAC_TXPOWER(_n) (AR5K_PHY_PCDAC_TXPOWER_BASE + ((_n) << 2)) + +/* + * PHY mode register [5111+] + */ +#define AR5K_PHY_MODE 0x0a200 /* Register Address */ +#define AR5K_PHY_MODE_MOD 0x00000001 /* PHY Modulation bit */ +#define AR5K_PHY_MODE_MOD_OFDM 0 +#define AR5K_PHY_MODE_MOD_CCK 1 +#define AR5K_PHY_MODE_FREQ 0x00000002 /* Freq mode bit */ +#define AR5K_PHY_MODE_FREQ_5GHZ 0 +#define AR5K_PHY_MODE_FREQ_2GHZ 2 +#define AR5K_PHY_MODE_MOD_DYN 0x00000004 /* Enable Dynamic OFDM/CCK mode [5112+] */ +#define AR5K_PHY_MODE_RAD 0x00000008 /* [5212+] */ +#define AR5K_PHY_MODE_RAD_RF5111 0 +#define AR5K_PHY_MODE_RAD_RF5112 8 +#define AR5K_PHY_MODE_XR 0x00000010 /* Enable XR mode [5112+] */ +#define AR5K_PHY_MODE_HALF_RATE 0x00000020 /* Enable Half rate (test) */ +#define AR5K_PHY_MODE_QUARTER_RATE 0x00000040 /* Enable Quarter rat (test) */ + +/* + * PHY CCK transmit control register [5111+ (?)] + */ +#define AR5K_PHY_CCKTXCTL 0xa204 +#define AR5K_PHY_CCKTXCTL_WORLD 0x00000000 +#define AR5K_PHY_CCKTXCTL_JAPAN 0x00000010 +#define AR5K_PHY_CCKTXCTL_SCRAMBLER_DIS 0x00000001 +#define AR5K_PHY_CCKTXCTK_DAC_SCALE 0x00000004 + +/* + * PHY CCK Cross-correlator Barker RSSI threshold register [5212+] + */ +#define AR5K_PHY_CCK_CROSSCORR 0xa208 +#define AR5K_PHY_CCK_CROSSCORR_WEAK_SIG_THR 0x0000003f +#define AR5K_PHY_CCK_CROSSCORR_WEAK_SIG_THR_S 0 + +/* Same address is used for antenna diversity activation */ +#define AR5K_PHY_FAST_ANT_DIV 0xa208 +#define AR5K_PHY_FAST_ANT_DIV_EN 0x00002000 + +/* + * PHY 2GHz gain register [5111+] + */ +#define AR5K_PHY_GAIN_2GHZ 0xa20c +#define AR5K_PHY_GAIN_2GHZ_MARGIN_TXRX 0x00fc0000 +#define AR5K_PHY_GAIN_2GHZ_MARGIN_TXRX_S 18 +#define AR5K_PHY_GAIN_2GHZ_INI_5111 0x6480416c + +#define AR5K_PHY_CCK_RX_CTL_4 0xa21c +#define AR5K_PHY_CCK_RX_CTL_4_FREQ_EST_SHORT 0x01f80000 +#define AR5K_PHY_CCK_RX_CTL_4_FREQ_EST_SHORT_S 19 + +#define AR5K_PHY_DAG_CCK_CTL 0xa228 +#define AR5K_PHY_DAG_CCK_CTL_EN_RSSI_THR 0x00000200 +#define AR5K_PHY_DAG_CCK_CTL_RSSI_THR 0x0001fc00 +#define AR5K_PHY_DAG_CCK_CTL_RSSI_THR_S 10 + +#define AR5K_PHY_FAST_ADC 0xa24c + +#define AR5K_PHY_BLUETOOTH 0xa254 + +/* + * Transmit Power Control register + * [2413+] + */ +#define AR5K_PHY_TPC_RG1 0xa258 +#define AR5K_PHY_TPC_RG1_NUM_PD_GAIN 0x0000c000 +#define AR5K_PHY_TPC_RG1_NUM_PD_GAIN_S 14 +#define AR5K_PHY_TPC_RG1_PDGAIN_1 0x00030000 +#define AR5K_PHY_TPC_RG1_PDGAIN_1_S 16 +#define AR5K_PHY_TPC_RG1_PDGAIN_2 0x000c0000 +#define AR5K_PHY_TPC_RG1_PDGAIN_2_S 18 +#define AR5K_PHY_TPC_RG1_PDGAIN_3 0x00300000 +#define AR5K_PHY_TPC_RG1_PDGAIN_3_S 20 + +#define AR5K_PHY_TPC_RG5 0xa26C +#define AR5K_PHY_TPC_RG5_PD_GAIN_OVERLAP 0x0000000F +#define AR5K_PHY_TPC_RG5_PD_GAIN_OVERLAP_S 0 +#define AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_1 0x000003F0 +#define AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_1_S 4 +#define AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_2 0x0000FC00 +#define AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_2_S 10 +#define AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_3 0x003F0000 +#define AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_3_S 16 +#define AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_4 0x0FC00000 +#define AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_4_S 22 + +/* + * PHY PDADC Tx power table + */ +#define AR5K_PHY_PDADC_TXPOWER_BASE 0xa280 +#define AR5K_PHY_PDADC_TXPOWER(_n) (AR5K_PHY_PDADC_TXPOWER_BASE + ((_n) << 2)) diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c new file mode 100644 index 0000000..a35a7db --- /dev/null +++ b/drivers/net/wireless/ath/ath5k/reset.c @@ -0,0 +1,1390 @@ +/* + * Copyright (c) 2004-2008 Reyk Floeter + * Copyright (c) 2006-2008 Nick Kossifidis + * Copyright (c) 2007-2008 Luis Rodriguez + * Copyright (c) 2007-2008 Pavel Roskin + * Copyright (c) 2007-2008 Jiri Slaby + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +#define _ATH5K_RESET + +/*****************************\ + Reset functions and helpers +\*****************************/ + +#include + +#include /* To determine if a card is pci-e */ +#include +#include "ath5k.h" +#include "reg.h" +#include "base.h" +#include "debug.h" + +/** + * ath5k_hw_write_ofdm_timings - set OFDM timings on AR5212 + * + * @ah: the &struct ath5k_hw + * @channel: the currently set channel upon reset + * + * Write the delta slope coefficient (used on pilot tracking ?) for OFDM + * operation on the AR5212 upon reset. This is a helper for ath5k_hw_reset(). + * + * Since delta slope is floating point we split it on its exponent and + * mantissa and provide these values on hw. + * + * For more infos i think this patent is related + * http://www.freepatentsonline.com/7184495.html + */ +static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah, + struct ieee80211_channel *channel) +{ + /* Get exponent and mantissa and set it */ + u32 coef_scaled, coef_exp, coef_man, + ds_coef_exp, ds_coef_man, clock; + + BUG_ON(!(ah->ah_version == AR5K_AR5212) || + !(channel->hw_value & CHANNEL_OFDM)); + + /* Get coefficient + * ALGO: coef = (5 * clock / carrier_freq) / 2 + * we scale coef by shifting clock value by 24 for + * better precision since we use integers */ + /* TODO: Half/quarter rate */ + clock = (channel->hw_value & CHANNEL_TURBO) ? 80 : 40; + coef_scaled = ((5 * (clock << 24)) / 2) / channel->center_freq; + + /* Get exponent + * ALGO: coef_exp = 14 - highest set bit position */ + coef_exp = ilog2(coef_scaled); + + /* Doesn't make sense if it's zero*/ + if (!coef_scaled || !coef_exp) + return -EINVAL; + + /* Note: we've shifted coef_scaled by 24 */ + coef_exp = 14 - (coef_exp - 24); + + + /* Get mantissa (significant digits) + * ALGO: coef_mant = floor(coef_scaled* 2^coef_exp+0.5) */ + coef_man = coef_scaled + + (1 << (24 - coef_exp - 1)); + + /* Calculate delta slope coefficient exponent + * and mantissa (remove scaling) and set them on hw */ + ds_coef_man = coef_man >> (24 - coef_exp); + ds_coef_exp = coef_exp - 16; + + AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_3, + AR5K_PHY_TIMING_3_DSC_MAN, ds_coef_man); + AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_3, + AR5K_PHY_TIMING_3_DSC_EXP, ds_coef_exp); + + return 0; +} + + +/* + * index into rates for control rates, we can set it up like this because + * this is only used for AR5212 and we know it supports G mode + */ +static const unsigned int control_rates[] = + { 0, 1, 1, 1, 4, 4, 6, 6, 8, 8, 8, 8 }; + +/** + * ath5k_hw_write_rate_duration - fill rate code to duration table + * + * @ah: the &struct ath5k_hw + * @mode: one of enum ath5k_driver_mode + * + * Write the rate code to duration table upon hw reset. This is a helper for + * ath5k_hw_reset(). It seems all this is doing is setting an ACK timeout on + * the hardware, based on current mode, for each rate. The rates which are + * capable of short preamble (802.11b rates 2Mbps, 5.5Mbps, and 11Mbps) have + * different rate code so we write their value twice (one for long preample + * and one for short). + * + * Note: Band doesn't matter here, if we set the values for OFDM it works + * on both a and g modes. So all we have to do is set values for all g rates + * that include all OFDM and CCK rates. If we operate in turbo or xr/half/ + * quarter rate mode, we need to use another set of bitrates (that's why we + * need the mode parameter) but we don't handle these proprietary modes yet. + */ +static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah, + unsigned int mode) +{ + struct ath5k_softc *sc = ah->ah_sc; + struct ieee80211_rate *rate; + unsigned int i; + + /* Write rate duration table */ + for (i = 0; i < sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates; i++) { + u32 reg; + u16 tx_time; + + rate = &sc->sbands[IEEE80211_BAND_2GHZ].bitrates[control_rates[i]]; + + /* Set ACK timeout */ + reg = AR5K_RATE_DUR(rate->hw_value); + + /* An ACK frame consists of 10 bytes. If you add the FCS, + * which ieee80211_generic_frame_duration() adds, + * its 14 bytes. Note we use the control rate and not the + * actual rate for this rate. See mac80211 tx.c + * ieee80211_duration() for a brief description of + * what rate we should choose to TX ACKs. */ + tx_time = le16_to_cpu(ieee80211_generic_frame_duration(sc->hw, + sc->vif, 10, rate)); + + ath5k_hw_reg_write(ah, tx_time, reg); + + if (!(rate->flags & IEEE80211_RATE_SHORT_PREAMBLE)) + continue; + + /* + * We're not distinguishing short preamble here, + * This is true, all we'll get is a longer value here + * which is not necessarilly bad. We could use + * export ieee80211_frame_duration() but that needs to be + * fixed first to be properly used by mac802111 drivers: + * + * - remove erp stuff and let the routine figure ofdm + * erp rates + * - remove passing argument ieee80211_local as + * drivers don't have access to it + * - move drivers using ieee80211_generic_frame_duration() + * to this + */ + ath5k_hw_reg_write(ah, tx_time, + reg + (AR5K_SET_SHORT_PREAMBLE << 2)); + } +} + +/* + * Reset chipset + */ +static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val) +{ + int ret; + u32 mask = val ? val : ~0U; + + ATH5K_TRACE(ah->ah_sc); + + /* Read-and-clear RX Descriptor Pointer*/ + ath5k_hw_reg_read(ah, AR5K_RXDP); + + /* + * Reset the device and wait until success + */ + ath5k_hw_reg_write(ah, val, AR5K_RESET_CTL); + + /* Wait at least 128 PCI clocks */ + udelay(15); + + if (ah->ah_version == AR5K_AR5210) { + val &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_DMA + | AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_PHY; + mask &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_DMA + | AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_PHY; + } else { + val &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_BASEBAND; + mask &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_BASEBAND; + } + + ret = ath5k_hw_register_timeout(ah, AR5K_RESET_CTL, mask, val, false); + + /* + * Reset configuration register (for hw byte-swap). Note that this + * is only set for big endian. We do the necessary magic in + * AR5K_INIT_CFG. + */ + if ((val & AR5K_RESET_CTL_PCU) == 0) + ath5k_hw_reg_write(ah, AR5K_INIT_CFG, AR5K_CFG); + + return ret; +} + +/* + * Sleep control + */ +int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode, + bool set_chip, u16 sleep_duration) +{ + unsigned int i; + u32 staid, data; + + ATH5K_TRACE(ah->ah_sc); + staid = ath5k_hw_reg_read(ah, AR5K_STA_ID1); + + switch (mode) { + case AR5K_PM_AUTO: + staid &= ~AR5K_STA_ID1_DEFAULT_ANTENNA; + /* fallthrough */ + case AR5K_PM_NETWORK_SLEEP: + if (set_chip) + ath5k_hw_reg_write(ah, + AR5K_SLEEP_CTL_SLE_ALLOW | + sleep_duration, + AR5K_SLEEP_CTL); + + staid |= AR5K_STA_ID1_PWR_SV; + break; + + case AR5K_PM_FULL_SLEEP: + if (set_chip) + ath5k_hw_reg_write(ah, AR5K_SLEEP_CTL_SLE_SLP, + AR5K_SLEEP_CTL); + + staid |= AR5K_STA_ID1_PWR_SV; + break; + + case AR5K_PM_AWAKE: + + staid &= ~AR5K_STA_ID1_PWR_SV; + + if (!set_chip) + goto commit; + + data = ath5k_hw_reg_read(ah, AR5K_SLEEP_CTL); + + /* If card is down we 'll get 0xffff... so we + * need to clean this up before we write the register + */ + if (data & 0xffc00000) + data = 0; + else + /* Preserve sleep duration etc */ + data = data & ~AR5K_SLEEP_CTL_SLE; + + ath5k_hw_reg_write(ah, data | AR5K_SLEEP_CTL_SLE_WAKE, + AR5K_SLEEP_CTL); + udelay(15); + + for (i = 200; i > 0; i--) { + /* Check if the chip did wake up */ + if ((ath5k_hw_reg_read(ah, AR5K_PCICFG) & + AR5K_PCICFG_SPWR_DN) == 0) + break; + + /* Wait a bit and retry */ + udelay(50); + ath5k_hw_reg_write(ah, data | AR5K_SLEEP_CTL_SLE_WAKE, + AR5K_SLEEP_CTL); + } + + /* Fail if the chip didn't wake up */ + if (i == 0) + return -EIO; + + break; + + default: + return -EINVAL; + } + +commit: + ath5k_hw_reg_write(ah, staid, AR5K_STA_ID1); + + return 0; +} + +/* + * Put device on hold + * + * Put MAC and Baseband on warm reset and + * keep that state (don't clean sleep control + * register). After this MAC and Baseband are + * disabled and a full reset is needed to come + * back. This way we save as much power as possible + * without puting the card on full sleep. + */ +int ath5k_hw_on_hold(struct ath5k_hw *ah) +{ + struct pci_dev *pdev = ah->ah_sc->pdev; + u32 bus_flags; + int ret; + + /* Make sure device is awake */ + ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0); + if (ret) { + ATH5K_ERR(ah->ah_sc, "failed to wakeup the MAC Chip\n"); + return ret; + } + + /* + * Put chipset on warm reset... + * + * Note: puting PCI core on warm reset on PCI-E cards + * results card to hang and always return 0xffff... so + * we ingore that flag for PCI-E cards. On PCI cards + * this flag gets cleared after 64 PCI clocks. + */ + bus_flags = (pdev->is_pcie) ? 0 : AR5K_RESET_CTL_PCI; + + if (ah->ah_version == AR5K_AR5210) { + ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU | + AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA | + AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI); + mdelay(2); + } else { + ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU | + AR5K_RESET_CTL_BASEBAND | bus_flags); + } + + if (ret) { + ATH5K_ERR(ah->ah_sc, "failed to put device on warm reset\n"); + return -EIO; + } + + /* ...wakeup again!*/ + ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0); + if (ret) { + ATH5K_ERR(ah->ah_sc, "failed to put device on hold\n"); + return ret; + } + + return ret; +} + +/* + * Bring up MAC + PHY Chips and program PLL + * TODO: Half/Quarter rate support + */ +int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial) +{ + struct pci_dev *pdev = ah->ah_sc->pdev; + u32 turbo, mode, clock, bus_flags; + int ret; + + turbo = 0; + mode = 0; + clock = 0; + + ATH5K_TRACE(ah->ah_sc); + + /* Wakeup the device */ + ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0); + if (ret) { + ATH5K_ERR(ah->ah_sc, "failed to wakeup the MAC Chip\n"); + return ret; + } + + /* + * Put chipset on warm reset... + * + * Note: puting PCI core on warm reset on PCI-E cards + * results card to hang and always return 0xffff... so + * we ingore that flag for PCI-E cards. On PCI cards + * this flag gets cleared after 64 PCI clocks. + */ + bus_flags = (pdev->is_pcie) ? 0 : AR5K_RESET_CTL_PCI; + + if (ah->ah_version == AR5K_AR5210) { + ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU | + AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA | + AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI); + mdelay(2); + } else { + ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU | + AR5K_RESET_CTL_BASEBAND | bus_flags); + } + + if (ret) { + ATH5K_ERR(ah->ah_sc, "failed to reset the MAC Chip\n"); + return -EIO; + } + + /* ...wakeup again!...*/ + ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0); + if (ret) { + ATH5K_ERR(ah->ah_sc, "failed to resume the MAC Chip\n"); + return ret; + } + + /* ...clear reset control register and pull device out of + * warm reset */ + if (ath5k_hw_nic_reset(ah, 0)) { + ATH5K_ERR(ah->ah_sc, "failed to warm reset the MAC Chip\n"); + return -EIO; + } + + /* On initialization skip PLL programming since we don't have + * a channel / mode set yet */ + if (initial) + return 0; + + if (ah->ah_version != AR5K_AR5210) { + /* + * Get channel mode flags + */ + + if (ah->ah_radio >= AR5K_RF5112) { + mode = AR5K_PHY_MODE_RAD_RF5112; + clock = AR5K_PHY_PLL_RF5112; + } else { + mode = AR5K_PHY_MODE_RAD_RF5111; /*Zero*/ + clock = AR5K_PHY_PLL_RF5111; /*Zero*/ + } + + if (flags & CHANNEL_2GHZ) { + mode |= AR5K_PHY_MODE_FREQ_2GHZ; + clock |= AR5K_PHY_PLL_44MHZ; + + if (flags & CHANNEL_CCK) { + mode |= AR5K_PHY_MODE_MOD_CCK; + } else if (flags & CHANNEL_OFDM) { + /* XXX Dynamic OFDM/CCK is not supported by the + * AR5211 so we set MOD_OFDM for plain g (no + * CCK headers) operation. We need to test + * this, 5211 might support ofdm-only g after + * all, there are also initial register values + * in the code for g mode (see initvals.c). */ + if (ah->ah_version == AR5K_AR5211) + mode |= AR5K_PHY_MODE_MOD_OFDM; + else + mode |= AR5K_PHY_MODE_MOD_DYN; + } else { + ATH5K_ERR(ah->ah_sc, + "invalid radio modulation mode\n"); + return -EINVAL; + } + } else if (flags & CHANNEL_5GHZ) { + mode |= AR5K_PHY_MODE_FREQ_5GHZ; + + if (ah->ah_radio == AR5K_RF5413) + clock = AR5K_PHY_PLL_40MHZ_5413; + else + clock |= AR5K_PHY_PLL_40MHZ; + + if (flags & CHANNEL_OFDM) + mode |= AR5K_PHY_MODE_MOD_OFDM; + else { + ATH5K_ERR(ah->ah_sc, + "invalid radio modulation mode\n"); + return -EINVAL; + } + } else { + ATH5K_ERR(ah->ah_sc, "invalid radio frequency mode\n"); + return -EINVAL; + } + + if (flags & CHANNEL_TURBO) + turbo = AR5K_PHY_TURBO_MODE | AR5K_PHY_TURBO_SHORT; + } else { /* Reset the device */ + + /* ...enable Atheros turbo mode if requested */ + if (flags & CHANNEL_TURBO) + ath5k_hw_reg_write(ah, AR5K_PHY_TURBO_MODE, + AR5K_PHY_TURBO); + } + + if (ah->ah_version != AR5K_AR5210) { + + /* ...update PLL if needed */ + if (ath5k_hw_reg_read(ah, AR5K_PHY_PLL) != clock) { + ath5k_hw_reg_write(ah, clock, AR5K_PHY_PLL); + udelay(300); + } + + /* ...set the PHY operating mode */ + ath5k_hw_reg_write(ah, mode, AR5K_PHY_MODE); + ath5k_hw_reg_write(ah, turbo, AR5K_PHY_TURBO); + } + + return 0; +} + +/* + * If there is an external 32KHz crystal available, use it + * as ref. clock instead of 32/40MHz clock and baseband clocks + * to save power during sleep or restore normal 32/40MHz + * operation. + * + * XXX: When operating on 32KHz certain PHY registers (27 - 31, + * 123 - 127) require delay on access. + */ +static void ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable) +{ + struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + u32 scal, spending, usec32; + + /* Only set 32KHz settings if we have an external + * 32KHz crystal present */ + if ((AR5K_EEPROM_HAS32KHZCRYSTAL(ee->ee_misc1) || + AR5K_EEPROM_HAS32KHZCRYSTAL_OLD(ee->ee_misc1)) && + enable) { + + /* 1 usec/cycle */ + AR5K_REG_WRITE_BITS(ah, AR5K_USEC_5211, AR5K_USEC_32, 1); + /* Set up tsf increment on each cycle */ + AR5K_REG_WRITE_BITS(ah, AR5K_TSF_PARM, AR5K_TSF_PARM_INC, 61); + + /* Set baseband sleep control registers + * and sleep control rate */ + ath5k_hw_reg_write(ah, 0x1f, AR5K_PHY_SCR); + + if ((ah->ah_radio == AR5K_RF5112) || + (ah->ah_radio == AR5K_RF5413) || + (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4))) + spending = 0x14; + else + spending = 0x18; + ath5k_hw_reg_write(ah, spending, AR5K_PHY_SPENDING); + + if ((ah->ah_radio == AR5K_RF5112) || + (ah->ah_radio == AR5K_RF5413) || + (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4))) { + ath5k_hw_reg_write(ah, 0x26, AR5K_PHY_SLMT); + ath5k_hw_reg_write(ah, 0x0d, AR5K_PHY_SCAL); + ath5k_hw_reg_write(ah, 0x07, AR5K_PHY_SCLOCK); + ath5k_hw_reg_write(ah, 0x3f, AR5K_PHY_SDELAY); + AR5K_REG_WRITE_BITS(ah, AR5K_PCICFG, + AR5K_PCICFG_SLEEP_CLOCK_RATE, 0x02); + } else { + ath5k_hw_reg_write(ah, 0x0a, AR5K_PHY_SLMT); + ath5k_hw_reg_write(ah, 0x0c, AR5K_PHY_SCAL); + ath5k_hw_reg_write(ah, 0x03, AR5K_PHY_SCLOCK); + ath5k_hw_reg_write(ah, 0x20, AR5K_PHY_SDELAY); + AR5K_REG_WRITE_BITS(ah, AR5K_PCICFG, + AR5K_PCICFG_SLEEP_CLOCK_RATE, 0x03); + } + + /* Enable sleep clock operation */ + AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, + AR5K_PCICFG_SLEEP_CLOCK_EN); + + } else { + + /* Disable sleep clock operation and + * restore default parameters */ + AR5K_REG_DISABLE_BITS(ah, AR5K_PCICFG, + AR5K_PCICFG_SLEEP_CLOCK_EN); + + AR5K_REG_WRITE_BITS(ah, AR5K_PCICFG, + AR5K_PCICFG_SLEEP_CLOCK_RATE, 0); + + ath5k_hw_reg_write(ah, 0x1f, AR5K_PHY_SCR); + ath5k_hw_reg_write(ah, AR5K_PHY_SLMT_32MHZ, AR5K_PHY_SLMT); + + if (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4)) + scal = AR5K_PHY_SCAL_32MHZ_2417; + else if (ee->ee_is_hb63) + scal = AR5K_PHY_SCAL_32MHZ_HB63; + else + scal = AR5K_PHY_SCAL_32MHZ; + ath5k_hw_reg_write(ah, scal, AR5K_PHY_SCAL); + + ath5k_hw_reg_write(ah, AR5K_PHY_SCLOCK_32MHZ, AR5K_PHY_SCLOCK); + ath5k_hw_reg_write(ah, AR5K_PHY_SDELAY_32MHZ, AR5K_PHY_SDELAY); + + if ((ah->ah_radio == AR5K_RF5112) || + (ah->ah_radio == AR5K_RF5413) || + (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4))) + spending = 0x14; + else + spending = 0x18; + ath5k_hw_reg_write(ah, spending, AR5K_PHY_SPENDING); + + if ((ah->ah_radio == AR5K_RF5112) || + (ah->ah_radio == AR5K_RF5413)) + usec32 = 39; + else + usec32 = 31; + AR5K_REG_WRITE_BITS(ah, AR5K_USEC_5211, AR5K_USEC_32, usec32); + + AR5K_REG_WRITE_BITS(ah, AR5K_TSF_PARM, AR5K_TSF_PARM_INC, 1); + } + return; +} + +/* TODO: Half/Quarter rate */ +static void ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah, + struct ieee80211_channel *channel) +{ + if (ah->ah_version == AR5K_AR5212 && + ah->ah_phy_revision >= AR5K_SREV_PHY_5212A) { + + /* Setup ADC control */ + ath5k_hw_reg_write(ah, + (AR5K_REG_SM(2, + AR5K_PHY_ADC_CTL_INBUFGAIN_OFF) | + AR5K_REG_SM(2, + AR5K_PHY_ADC_CTL_INBUFGAIN_ON) | + AR5K_PHY_ADC_CTL_PWD_DAC_OFF | + AR5K_PHY_ADC_CTL_PWD_ADC_OFF), + AR5K_PHY_ADC_CTL); + + + + /* Disable barker RSSI threshold */ + AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_DAG_CCK_CTL, + AR5K_PHY_DAG_CCK_CTL_EN_RSSI_THR); + + AR5K_REG_WRITE_BITS(ah, AR5K_PHY_DAG_CCK_CTL, + AR5K_PHY_DAG_CCK_CTL_RSSI_THR, 2); + + /* Set the mute mask */ + ath5k_hw_reg_write(ah, 0x0000000f, AR5K_SEQ_MASK); + } + + /* Clear PHY_BLUETOOTH to allow RX_CLEAR line debug */ + if (ah->ah_phy_revision >= AR5K_SREV_PHY_5212B) + ath5k_hw_reg_write(ah, 0, AR5K_PHY_BLUETOOTH); + + /* Enable DCU double buffering */ + if (ah->ah_phy_revision > AR5K_SREV_PHY_5212B) + AR5K_REG_DISABLE_BITS(ah, AR5K_TXCFG, + AR5K_TXCFG_DCU_DBL_BUF_DIS); + + /* Set DAC/ADC delays */ + if (ah->ah_version == AR5K_AR5212) { + u32 scal; + struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + if (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4)) + scal = AR5K_PHY_SCAL_32MHZ_2417; + else if (ee->ee_is_hb63) + scal = AR5K_PHY_SCAL_32MHZ_HB63; + else + scal = AR5K_PHY_SCAL_32MHZ; + ath5k_hw_reg_write(ah, scal, AR5K_PHY_SCAL); + } + + /* Set fast ADC */ + if ((ah->ah_radio == AR5K_RF5413) || + (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4))) { + u32 fast_adc = true; + + if (channel->center_freq == 2462 || + channel->center_freq == 2467) + fast_adc = 0; + + /* Only update if needed */ + if (ath5k_hw_reg_read(ah, AR5K_PHY_FAST_ADC) != fast_adc) + ath5k_hw_reg_write(ah, fast_adc, + AR5K_PHY_FAST_ADC); + } + + /* Fix for first revision of the RF5112 RF chipset */ + if (ah->ah_radio == AR5K_RF5112 && + ah->ah_radio_5ghz_revision < + AR5K_SREV_RAD_5112A) { + u32 data; + ath5k_hw_reg_write(ah, AR5K_PHY_CCKTXCTL_WORLD, + AR5K_PHY_CCKTXCTL); + if (channel->hw_value & CHANNEL_5GHZ) + data = 0xffb81020; + else + data = 0xffb80d20; + ath5k_hw_reg_write(ah, data, AR5K_PHY_FRAME_CTL); + } + + if (ah->ah_mac_srev < AR5K_SREV_AR5211) { + u32 usec_reg; + /* 5311 has different tx/rx latency masks + * from 5211, since we deal 5311 the same + * as 5211 when setting initvals, shift + * values here to their proper locations */ + usec_reg = ath5k_hw_reg_read(ah, AR5K_USEC_5211); + ath5k_hw_reg_write(ah, usec_reg & (AR5K_USEC_1 | + AR5K_USEC_32 | + AR5K_USEC_TX_LATENCY_5211 | + AR5K_REG_SM(29, + AR5K_USEC_RX_LATENCY_5210)), + AR5K_USEC_5211); + /* Clear QCU/DCU clock gating register */ + ath5k_hw_reg_write(ah, 0, AR5K_QCUDCU_CLKGT); + /* Set DAC/ADC delays */ + ath5k_hw_reg_write(ah, 0x08, AR5K_PHY_SCAL); + /* Enable PCU FIFO corruption ECO */ + AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW_5211, + AR5K_DIAG_SW_ECO_ENABLE); + } +} + +static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah, + struct ieee80211_channel *channel, u8 *ant, u8 ee_mode) +{ + struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; + s16 cck_ofdm_pwr_delta; + + /* Adjust power delta for channel 14 */ + if (channel->center_freq == 2484) + cck_ofdm_pwr_delta = + ((ee->ee_cck_ofdm_power_delta - + ee->ee_scaled_cck_delta) * 2) / 10; + else + cck_ofdm_pwr_delta = + (ee->ee_cck_ofdm_power_delta * 2) / 10; + + /* Set CCK to OFDM power delta on tx power + * adjustment register */ + if (ah->ah_phy_revision >= AR5K_SREV_PHY_5212A) { + if (channel->hw_value == CHANNEL_G) + ath5k_hw_reg_write(ah, + AR5K_REG_SM((ee->ee_cck_ofdm_gain_delta * -1), + AR5K_PHY_TX_PWR_ADJ_CCK_GAIN_DELTA) | + AR5K_REG_SM((cck_ofdm_pwr_delta * -1), + AR5K_PHY_TX_PWR_ADJ_CCK_PCDAC_INDEX), + AR5K_PHY_TX_PWR_ADJ); + else + ath5k_hw_reg_write(ah, 0, AR5K_PHY_TX_PWR_ADJ); + } else { + /* For older revs we scale power on sw during tx power + * setup */ + ah->ah_txpower.txp_cck_ofdm_pwr_delta = cck_ofdm_pwr_delta; + ah->ah_txpower.txp_cck_ofdm_gainf_delta = + ee->ee_cck_ofdm_gain_delta; + } + + /* Set antenna idle switch table */ + AR5K_REG_WRITE_BITS(ah, AR5K_PHY_ANT_CTL, + AR5K_PHY_ANT_CTL_SWTABLE_IDLE, + (ah->ah_ant_ctl[ee_mode][0] | + AR5K_PHY_ANT_CTL_TXRX_EN)); + + /* Set antenna switch tables */ + ath5k_hw_reg_write(ah, ah->ah_ant_ctl[ee_mode][ant[0]], + AR5K_PHY_ANT_SWITCH_TABLE_0); + ath5k_hw_reg_write(ah, ah->ah_ant_ctl[ee_mode][ant[1]], + AR5K_PHY_ANT_SWITCH_TABLE_1); + + /* Noise floor threshold */ + ath5k_hw_reg_write(ah, + AR5K_PHY_NF_SVAL(ee->ee_noise_floor_thr[ee_mode]), + AR5K_PHY_NFTHRES); + + if ((channel->hw_value & CHANNEL_TURBO) && + (ah->ah_ee_version >= AR5K_EEPROM_VERSION_5_0)) { + /* Switch settling time (Turbo) */ + AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SETTLING, + AR5K_PHY_SETTLING_SWITCH, + ee->ee_switch_settling_turbo[ee_mode]); + + /* Tx/Rx attenuation (Turbo) */ + AR5K_REG_WRITE_BITS(ah, AR5K_PHY_GAIN, + AR5K_PHY_GAIN_TXRX_ATTEN, + ee->ee_atn_tx_rx_turbo[ee_mode]); + + /* ADC/PGA desired size (Turbo) */ + AR5K_REG_WRITE_BITS(ah, AR5K_PHY_DESIRED_SIZE, + AR5K_PHY_DESIRED_SIZE_ADC, + ee->ee_adc_desired_size_turbo[ee_mode]); + + AR5K_REG_WRITE_BITS(ah, AR5K_PHY_DESIRED_SIZE, + AR5K_PHY_DESIRED_SIZE_PGA, + ee->ee_pga_desired_size_turbo[ee_mode]); + + /* Tx/Rx margin (Turbo) */ + AR5K_REG_WRITE_BITS(ah, AR5K_PHY_GAIN_2GHZ, + AR5K_PHY_GAIN_2GHZ_MARGIN_TXRX, + ee->ee_margin_tx_rx_turbo[ee_mode]); + + } else { + /* Switch settling time */ + AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SETTLING, + AR5K_PHY_SETTLING_SWITCH, + ee->ee_switch_settling[ee_mode]); + + /* Tx/Rx attenuation */ + AR5K_REG_WRITE_BITS(ah, AR5K_PHY_GAIN, + AR5K_PHY_GAIN_TXRX_ATTEN, + ee->ee_atn_tx_rx[ee_mode]); + + /* ADC/PGA desired size */ + AR5K_REG_WRITE_BITS(ah, AR5K_PHY_DESIRED_SIZE, + AR5K_PHY_DESIRED_SIZE_ADC, + ee->ee_adc_desired_size[ee_mode]); + + AR5K_REG_WRITE_BITS(ah, AR5K_PHY_DESIRED_SIZE, + AR5K_PHY_DESIRED_SIZE_PGA, + ee->ee_pga_desired_size[ee_mode]); + + /* Tx/Rx margin */ + if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1) + AR5K_REG_WRITE_BITS(ah, AR5K_PHY_GAIN_2GHZ, + AR5K_PHY_GAIN_2GHZ_MARGIN_TXRX, + ee->ee_margin_tx_rx[ee_mode]); + } + + /* XPA delays */ + ath5k_hw_reg_write(ah, + (ee->ee_tx_end2xpa_disable[ee_mode] << 24) | + (ee->ee_tx_end2xpa_disable[ee_mode] << 16) | + (ee->ee_tx_frm2xpa_enable[ee_mode] << 8) | + (ee->ee_tx_frm2xpa_enable[ee_mode]), AR5K_PHY_RF_CTL4); + + /* XLNA delay */ + AR5K_REG_WRITE_BITS(ah, AR5K_PHY_RF_CTL3, + AR5K_PHY_RF_CTL3_TXE2XLNA_ON, + ee->ee_tx_end2xlna_enable[ee_mode]); + + /* Thresh64 (ANI) */ + AR5K_REG_WRITE_BITS(ah, AR5K_PHY_NF, + AR5K_PHY_NF_THRESH62, + ee->ee_thr_62[ee_mode]); + + + /* False detect backoff for channels + * that have spur noise. Write the new + * cyclic power RSSI threshold. */ + if (ath5k_hw_chan_has_spur_noise(ah, channel)) + AR5K_REG_WRITE_BITS(ah, AR5K_PHY_OFDM_SELFCORR, + AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1, + AR5K_INIT_CYCRSSI_THR1 + + ee->ee_false_detect[ee_mode]); + else + AR5K_REG_WRITE_BITS(ah, AR5K_PHY_OFDM_SELFCORR, + AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1, + AR5K_INIT_CYCRSSI_THR1); + + /* I/Q correction + * TODO: Per channel i/q infos ? */ + AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ, + AR5K_PHY_IQ_CORR_ENABLE | + (ee->ee_i_cal[ee_mode] << AR5K_PHY_IQ_CORR_Q_I_COFF_S) | + ee->ee_q_cal[ee_mode]); + + /* Heavy clipping -disable for now */ + if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_5_1) + ath5k_hw_reg_write(ah, 0, AR5K_PHY_HEAVY_CLIP_ENABLE); + + return; +} + +/* + * Main reset function + */ +int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode, + struct ieee80211_channel *channel, bool change_channel) +{ + struct ath_common *common = ath5k_hw_common(ah); + u32 s_seq[10], s_ant, s_led[3], staid1_flags, tsf_up, tsf_lo; + u32 phy_tst1; + u8 mode, freq, ee_mode, ant[2]; + int i, ret; + + ATH5K_TRACE(ah->ah_sc); + + s_ant = 0; + ee_mode = 0; + staid1_flags = 0; + tsf_up = 0; + tsf_lo = 0; + freq = 0; + mode = 0; + + /* + * Save some registers before a reset + */ + /*DCU/Antenna selection not available on 5210*/ + if (ah->ah_version != AR5K_AR5210) { + + switch (channel->hw_value & CHANNEL_MODES) { + case CHANNEL_A: + mode = AR5K_MODE_11A; + freq = AR5K_INI_RFGAIN_5GHZ; + ee_mode = AR5K_EEPROM_MODE_11A; + break; + case CHANNEL_G: + mode = AR5K_MODE_11G; + freq = AR5K_INI_RFGAIN_2GHZ; + ee_mode = AR5K_EEPROM_MODE_11G; + break; + case CHANNEL_B: + mode = AR5K_MODE_11B; + freq = AR5K_INI_RFGAIN_2GHZ; + ee_mode = AR5K_EEPROM_MODE_11B; + break; + case CHANNEL_T: + mode = AR5K_MODE_11A_TURBO; + freq = AR5K_INI_RFGAIN_5GHZ; + ee_mode = AR5K_EEPROM_MODE_11A; + break; + case CHANNEL_TG: + if (ah->ah_version == AR5K_AR5211) { + ATH5K_ERR(ah->ah_sc, + "TurboG mode not available on 5211"); + return -EINVAL; + } + mode = AR5K_MODE_11G_TURBO; + freq = AR5K_INI_RFGAIN_2GHZ; + ee_mode = AR5K_EEPROM_MODE_11G; + break; + case CHANNEL_XR: + if (ah->ah_version == AR5K_AR5211) { + ATH5K_ERR(ah->ah_sc, + "XR mode not available on 5211"); + return -EINVAL; + } + mode = AR5K_MODE_XR; + freq = AR5K_INI_RFGAIN_5GHZ; + ee_mode = AR5K_EEPROM_MODE_11A; + break; + default: + ATH5K_ERR(ah->ah_sc, + "invalid channel: %d\n", channel->center_freq); + return -EINVAL; + } + + if (change_channel) { + /* + * Save frame sequence count + * For revs. after Oahu, only save + * seq num for DCU 0 (Global seq num) + */ + if (ah->ah_mac_srev < AR5K_SREV_AR5211) { + + for (i = 0; i < 10; i++) + s_seq[i] = ath5k_hw_reg_read(ah, + AR5K_QUEUE_DCU_SEQNUM(i)); + + } else { + s_seq[0] = ath5k_hw_reg_read(ah, + AR5K_QUEUE_DCU_SEQNUM(0)); + } + + /* TSF accelerates on AR5211 durring reset + * As a workaround save it here and restore + * it later so that it's back in time after + * reset. This way it'll get re-synced on the + * next beacon without breaking ad-hoc. + * + * On AR5212 TSF is almost preserved across a + * reset so it stays back in time anyway and + * we don't have to save/restore it. + * + * XXX: Since this breaks power saving we have + * to disable power saving until we receive the + * next beacon, so we can resync beacon timers */ + if (ah->ah_version == AR5K_AR5211) { + tsf_up = ath5k_hw_reg_read(ah, AR5K_TSF_U32); + tsf_lo = ath5k_hw_reg_read(ah, AR5K_TSF_L32); + } + } + + /* Save default antenna */ + s_ant = ath5k_hw_reg_read(ah, AR5K_DEFAULT_ANTENNA); + + if (ah->ah_version == AR5K_AR5212) { + /* Restore normal 32/40MHz clock operation + * to avoid register access delay on certain + * PHY registers */ + ath5k_hw_set_sleep_clock(ah, false); + + /* Since we are going to write rf buffer + * check if we have any pending gain_F + * optimization settings */ + if (change_channel && ah->ah_rf_banks != NULL) + ath5k_hw_gainf_calibrate(ah); + } + } + + /*GPIOs*/ + s_led[0] = ath5k_hw_reg_read(ah, AR5K_PCICFG) & + AR5K_PCICFG_LEDSTATE; + s_led[1] = ath5k_hw_reg_read(ah, AR5K_GPIOCR); + s_led[2] = ath5k_hw_reg_read(ah, AR5K_GPIODO); + + /* AR5K_STA_ID1 flags, only preserve antenna + * settings and ack/cts rate mode */ + staid1_flags = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & + (AR5K_STA_ID1_DEFAULT_ANTENNA | + AR5K_STA_ID1_DESC_ANTENNA | + AR5K_STA_ID1_RTS_DEF_ANTENNA | + AR5K_STA_ID1_ACKCTS_6MB | + AR5K_STA_ID1_BASE_RATE_11B | + AR5K_STA_ID1_SELFGEN_DEF_ANT); + + /* Wakeup the device */ + ret = ath5k_hw_nic_wakeup(ah, channel->hw_value, false); + if (ret) + return ret; + + /* + * Initialize operating mode + */ + ah->ah_op_mode = op_mode; + + /* PHY access enable */ + if (ah->ah_mac_srev >= AR5K_SREV_AR5211) + ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0)); + else + ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ | 0x40, + AR5K_PHY(0)); + + /* Write initial settings */ + ret = ath5k_hw_write_initvals(ah, mode, change_channel); + if (ret) + return ret; + + /* + * 5211/5212 Specific + */ + if (ah->ah_version != AR5K_AR5210) { + + /* + * Write initial RF gain settings + * This should work for both 5111/5112 + */ + ret = ath5k_hw_rfgain_init(ah, freq); + if (ret) + return ret; + + mdelay(1); + + /* + * Tweak initval settings for revised + * chipsets and add some more config + * bits + */ + ath5k_hw_tweak_initval_settings(ah, channel); + + /* + * Set TX power + */ + ret = ath5k_hw_txpower(ah, channel, ee_mode, + ah->ah_txpower.txp_max_pwr / 2); + if (ret) + return ret; + + /* Write rate duration table only on AR5212 and if + * virtual interface has already been brought up + * XXX: rethink this after new mode changes to + * mac80211 are integrated */ + if (ah->ah_version == AR5K_AR5212 && + ah->ah_sc->vif != NULL) + ath5k_hw_write_rate_duration(ah, mode); + + /* + * Write RF buffer + */ + ret = ath5k_hw_rfregs_init(ah, channel, mode); + if (ret) + return ret; + + + /* Write OFDM timings on 5212*/ + if (ah->ah_version == AR5K_AR5212 && + channel->hw_value & CHANNEL_OFDM) { + struct ath5k_eeprom_info *ee = + &ah->ah_capabilities.cap_eeprom; + + ret = ath5k_hw_write_ofdm_timings(ah, channel); + if (ret) + return ret; + + /* Note: According to docs we can have a newer + * EEPROM on old hardware, so we need to verify + * that our hardware is new enough to have spur + * mitigation registers (delta phase etc) */ + if (ah->ah_mac_srev >= AR5K_SREV_AR5424 || + (ah->ah_mac_srev >= AR5K_SREV_AR5424 && + ee->ee_version >= AR5K_EEPROM_VERSION_5_3)) + ath5k_hw_set_spur_mitigation_filter(ah, + channel); + } + + /*Enable/disable 802.11b mode on 5111 + (enable 2111 frequency converter + CCK)*/ + if (ah->ah_radio == AR5K_RF5111) { + if (mode == AR5K_MODE_11B) + AR5K_REG_ENABLE_BITS(ah, AR5K_TXCFG, + AR5K_TXCFG_B_MODE); + else + AR5K_REG_DISABLE_BITS(ah, AR5K_TXCFG, + AR5K_TXCFG_B_MODE); + } + + /* + * In case a fixed antenna was set as default + * use the same switch table twice. + */ + if (ah->ah_ant_mode == AR5K_ANTMODE_FIXED_A) + ant[0] = ant[1] = AR5K_ANT_SWTABLE_A; + else if (ah->ah_ant_mode == AR5K_ANTMODE_FIXED_B) + ant[0] = ant[1] = AR5K_ANT_SWTABLE_B; + else { + ant[0] = AR5K_ANT_SWTABLE_A; + ant[1] = AR5K_ANT_SWTABLE_B; + } + + /* Commit values from EEPROM */ + ath5k_hw_commit_eeprom_settings(ah, channel, ant, ee_mode); + + } else { + /* + * For 5210 we do all initialization using + * initvals, so we don't have to modify + * any settings (5210 also only supports + * a/aturbo modes) + */ + mdelay(1); + /* Disable phy and wait */ + ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT); + mdelay(1); + } + + /* + * Restore saved values + */ + + /*DCU/Antenna selection not available on 5210*/ + if (ah->ah_version != AR5K_AR5210) { + + if (change_channel) { + if (ah->ah_mac_srev < AR5K_SREV_AR5211) { + for (i = 0; i < 10; i++) + ath5k_hw_reg_write(ah, s_seq[i], + AR5K_QUEUE_DCU_SEQNUM(i)); + } else { + ath5k_hw_reg_write(ah, s_seq[0], + AR5K_QUEUE_DCU_SEQNUM(0)); + } + + + if (ah->ah_version == AR5K_AR5211) { + ath5k_hw_reg_write(ah, tsf_up, AR5K_TSF_U32); + ath5k_hw_reg_write(ah, tsf_lo, AR5K_TSF_L32); + } + } + + ath5k_hw_reg_write(ah, s_ant, AR5K_DEFAULT_ANTENNA); + } + + /* Ledstate */ + AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, s_led[0]); + + /* Gpio settings */ + ath5k_hw_reg_write(ah, s_led[1], AR5K_GPIOCR); + ath5k_hw_reg_write(ah, s_led[2], AR5K_GPIODO); + + /* Restore sta_id flags and preserve our mac address*/ + ath5k_hw_reg_write(ah, + get_unaligned_le32(common->macaddr), + AR5K_STA_ID0); + ath5k_hw_reg_write(ah, + staid1_flags | get_unaligned_le16(common->macaddr + 4), + AR5K_STA_ID1); + + + /* + * Configure PCU + */ + + /* Restore bssid and bssid mask */ + ath5k_hw_set_associd(ah); + + /* Set PCU config */ + ath5k_hw_set_opmode(ah); + + /* Clear any pending interrupts + * PISR/SISR Not available on 5210 */ + if (ah->ah_version != AR5K_AR5210) + ath5k_hw_reg_write(ah, 0xffffffff, AR5K_PISR); + + /* Set RSSI/BRSSI thresholds + * + * Note: If we decide to set this value + * dynamicaly, have in mind that when AR5K_RSSI_THR + * register is read it might return 0x40 if we haven't + * wrote anything to it plus BMISS RSSI threshold is zeroed. + * So doing a save/restore procedure here isn't the right + * choice. Instead store it on ath5k_hw */ + ath5k_hw_reg_write(ah, (AR5K_TUNE_RSSI_THRES | + AR5K_TUNE_BMISS_THRES << + AR5K_RSSI_THR_BMISS_S), + AR5K_RSSI_THR); + + /* MIC QoS support */ + if (ah->ah_mac_srev >= AR5K_SREV_AR2413) { + ath5k_hw_reg_write(ah, 0x000100aa, AR5K_MIC_QOS_CTL); + ath5k_hw_reg_write(ah, 0x00003210, AR5K_MIC_QOS_SEL); + } + + /* QoS NOACK Policy */ + if (ah->ah_version == AR5K_AR5212) { + ath5k_hw_reg_write(ah, + AR5K_REG_SM(2, AR5K_QOS_NOACK_2BIT_VALUES) | + AR5K_REG_SM(5, AR5K_QOS_NOACK_BIT_OFFSET) | + AR5K_REG_SM(0, AR5K_QOS_NOACK_BYTE_OFFSET), + AR5K_QOS_NOACK); + } + + + /* + * Configure PHY + */ + + /* Set channel on PHY */ + ret = ath5k_hw_channel(ah, channel); + if (ret) + return ret; + + /* + * Enable the PHY and wait until completion + * This includes BaseBand and Synthesizer + * activation. + */ + ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT); + + /* + * On 5211+ read activation -> rx delay + * and use it. + * + * TODO: Half/quarter rate support + */ + if (ah->ah_version != AR5K_AR5210) { + u32 delay; + delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) & + AR5K_PHY_RX_DELAY_M; + delay = (channel->hw_value & CHANNEL_CCK) ? + ((delay << 2) / 22) : (delay / 10); + + udelay(100 + (2 * delay)); + } else { + mdelay(1); + } + + /* + * Perform ADC test to see if baseband is ready + * Set tx hold and check adc test register + */ + phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1); + ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1); + for (i = 0; i <= 20; i++) { + if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10)) + break; + udelay(200); + } + ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1); + + /* + * Start automatic gain control calibration + * + * During AGC calibration RX path is re-routed to + * a power detector so we don't receive anything. + * + * This method is used to calibrate some static offsets + * used together with on-the fly I/Q calibration (the + * one performed via ath5k_hw_phy_calibrate), that doesn't + * interrupt rx path. + * + * While rx path is re-routed to the power detector we also + * start a noise floor calibration, to measure the + * card's noise floor (the noise we measure when we are not + * transmiting or receiving anything). + * + * If we are in a noisy environment AGC calibration may time + * out and/or noise floor calibration might timeout. + */ + AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL, + AR5K_PHY_AGCCTL_CAL | AR5K_PHY_AGCCTL_NF); + + /* At the same time start I/Q calibration for QAM constellation + * -no need for CCK- */ + ah->ah_calibration = false; + if (!(mode == AR5K_MODE_11B)) { + ah->ah_calibration = true; + AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ, + AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15); + AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ, + AR5K_PHY_IQ_RUN); + } + + /* Wait for gain calibration to finish (we check for I/Q calibration + * during ath5k_phy_calibrate) */ + if (ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL, + AR5K_PHY_AGCCTL_CAL, 0, false)) { + ATH5K_ERR(ah->ah_sc, "gain calibration timeout (%uMHz)\n", + channel->center_freq); + } + + /* Restore antenna mode */ + ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode); + + /* Restore slot time and ACK timeouts */ + if (ah->ah_coverage_class > 0) + ath5k_hw_set_coverage_class(ah, ah->ah_coverage_class); + + /* + * Configure QCUs/DCUs + */ + + /* TODO: HW Compression support for data queues */ + /* TODO: Burst prefetch for data queues */ + + /* + * Reset queues and start beacon timers at the end of the reset routine + * This also sets QCU mask on each DCU for 1:1 qcu to dcu mapping + * Note: If we want we can assign multiple qcus on one dcu. + */ + for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++) { + ret = ath5k_hw_reset_tx_queue(ah, i); + if (ret) { + ATH5K_ERR(ah->ah_sc, + "failed to reset TX queue #%d\n", i); + return ret; + } + } + + + /* + * Configure DMA/Interrupts + */ + + /* + * Set Rx/Tx DMA Configuration + * + * Set standard DMA size (128). Note that + * a DMA size of 512 causes rx overruns and tx errors + * on pci-e cards (tested on 5424 but since rx overruns + * also occur on 5416/5418 with madwifi we set 128 + * for all PCI-E cards to be safe). + * + * XXX: need to check 5210 for this + * TODO: Check out tx triger level, it's always 64 on dumps but I + * guess we can tweak it and see how it goes ;-) + */ + if (ah->ah_version != AR5K_AR5210) { + AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG, + AR5K_TXCFG_SDMAMR, AR5K_DMASIZE_128B); + AR5K_REG_WRITE_BITS(ah, AR5K_RXCFG, + AR5K_RXCFG_SDMAMW, AR5K_DMASIZE_128B); + } + + /* Pre-enable interrupts on 5211/5212*/ + if (ah->ah_version != AR5K_AR5210) + ath5k_hw_set_imr(ah, ah->ah_imr); + + /* Enable 32KHz clock function for AR5212+ chips + * Set clocks to 32KHz operation and use an + * external 32KHz crystal when sleeping if one + * exists */ + if (ah->ah_version == AR5K_AR5212 && + ah->ah_op_mode != NL80211_IFTYPE_AP) + ath5k_hw_set_sleep_clock(ah, true); + + /* + * Disable beacons and reset the register + */ + AR5K_REG_DISABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE | + AR5K_BEACON_RESET_TSF); + + return 0; +} + +#undef _ATH5K_RESET diff --git a/drivers/net/wireless/ath/ath5k/rfbuffer.h b/drivers/net/wireless/ath/ath5k/rfbuffer.h new file mode 100644 index 0000000..e50baff --- /dev/null +++ b/drivers/net/wireless/ath/ath5k/rfbuffer.h @@ -0,0 +1,1181 @@ +/* + * RF Buffer handling functions + * + * Copyright (c) 2009 Nick Kossifidis + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + + +/* + * There are some special registers on the RF chip + * that control various operation settings related mostly to + * the analog parts (channel, gain adjustment etc). + * + * We don't write on those registers directly but + * we send a data packet on the chip, using a special register, + * that holds all the settings we need. After we 've sent the + * data packet, we write on another special register to notify hw + * to apply the settings. This is done so that control registers + * can be dynamicaly programmed during operation and the settings + * are applied faster on the hw. + * + * We call each data packet an "RF Bank" and all the data we write + * (all RF Banks) "RF Buffer". This file holds initial RF Buffer + * data for the different RF chips, and various info to match RF + * Buffer offsets with specific RF registers so that we can access + * them. We tweak these settings on rfregs_init function. + * + * Also check out reg.h and U.S. Patent 6677779 B1 (about buffer + * registers and control registers): + * + * http://www.google.com/patents?id=qNURAAAAEBAJ + */ + + +/* + * Struct to hold default mode specific RF + * register values (RF Banks) + */ +struct ath5k_ini_rfbuffer { + u8 rfb_bank; /* RF Bank number */ + u16 rfb_ctrl_register; /* RF Buffer control register */ + u32 rfb_mode_data[5]; /* RF Buffer data for each mode */ +}; + +/* + * Struct to hold RF Buffer field + * infos used to access certain RF + * analog registers + */ +struct ath5k_rfb_field { + u8 len; /* Field length */ + u16 pos; /* Offset on the raw packet */ + u8 col; /* Column -used for shifting */ +}; + +/* + * RF analog register definition + */ +struct ath5k_rf_reg { + u8 bank; /* RF Buffer Bank number */ + u8 index; /* Register's index on rf_regs_idx */ + struct ath5k_rfb_field field; /* RF Buffer field for this register */ +}; + +/* Map RF registers to indexes + * We do this to handle common bits and make our + * life easier by using an index for each register + * instead of a full rfb_field */ +enum ath5k_rf_regs_idx { + /* BANK 6 */ + AR5K_RF_OB_2GHZ = 0, + AR5K_RF_OB_5GHZ, + AR5K_RF_DB_2GHZ, + AR5K_RF_DB_5GHZ, + AR5K_RF_FIXED_BIAS_A, + AR5K_RF_FIXED_BIAS_B, + AR5K_RF_PWD_XPD, + AR5K_RF_XPD_SEL, + AR5K_RF_XPD_GAIN, + AR5K_RF_PD_GAIN_LO, + AR5K_RF_PD_GAIN_HI, + AR5K_RF_HIGH_VC_CP, + AR5K_RF_MID_VC_CP, + AR5K_RF_LOW_VC_CP, + AR5K_RF_PUSH_UP, + AR5K_RF_PAD2GND, + AR5K_RF_XB2_LVL, + AR5K_RF_XB5_LVL, + AR5K_RF_PWD_ICLOBUF_2G, + AR5K_RF_PWD_84, + AR5K_RF_PWD_90, + AR5K_RF_PWD_130, + AR5K_RF_PWD_131, + AR5K_RF_PWD_132, + AR5K_RF_PWD_136, + AR5K_RF_PWD_137, + AR5K_RF_PWD_138, + AR5K_RF_PWD_166, + AR5K_RF_PWD_167, + AR5K_RF_DERBY_CHAN_SEL_MODE, + /* BANK 7 */ + AR5K_RF_GAIN_I, + AR5K_RF_PLO_SEL, + AR5K_RF_RFGAIN_SEL, + AR5K_RF_RFGAIN_STEP, + AR5K_RF_WAIT_S, + AR5K_RF_WAIT_I, + AR5K_RF_MAX_TIME, + AR5K_RF_MIXVGA_OVR, + AR5K_RF_MIXGAIN_OVR, + AR5K_RF_MIXGAIN_STEP, + AR5K_RF_PD_DELAY_A, + AR5K_RF_PD_DELAY_B, + AR5K_RF_PD_DELAY_XR, + AR5K_RF_PD_PERIOD_A, + AR5K_RF_PD_PERIOD_B, + AR5K_RF_PD_PERIOD_XR, +}; + + +/*******************\ +* RF5111 (Sombrero) * +\*******************/ + +/* BANK 6 len pos col */ +#define AR5K_RF5111_OB_2GHZ { 3, 119, 0 } +#define AR5K_RF5111_DB_2GHZ { 3, 122, 0 } + +#define AR5K_RF5111_OB_5GHZ { 3, 104, 0 } +#define AR5K_RF5111_DB_5GHZ { 3, 107, 0 } + +#define AR5K_RF5111_PWD_XPD { 1, 95, 0 } +#define AR5K_RF5111_XPD_GAIN { 4, 96, 0 } + +/* Access to PWD registers */ +#define AR5K_RF5111_PWD(_n) { 1, (135 - _n), 3 } + +/* BANK 7 len pos col */ +#define AR5K_RF5111_GAIN_I { 6, 29, 0 } +#define AR5K_RF5111_PLO_SEL { 1, 4, 0 } +#define AR5K_RF5111_RFGAIN_SEL { 1, 36, 0 } +#define AR5K_RF5111_RFGAIN_STEP { 6, 37, 0 } +/* Only on AR5212 BaseBand and up */ +#define AR5K_RF5111_WAIT_S { 5, 19, 0 } +#define AR5K_RF5111_WAIT_I { 5, 24, 0 } +#define AR5K_RF5111_MAX_TIME { 2, 49, 0 } + +static const struct ath5k_rf_reg rf_regs_5111[] = { + {6, AR5K_RF_OB_2GHZ, AR5K_RF5111_OB_2GHZ}, + {6, AR5K_RF_DB_2GHZ, AR5K_RF5111_DB_2GHZ}, + {6, AR5K_RF_OB_5GHZ, AR5K_RF5111_OB_5GHZ}, + {6, AR5K_RF_DB_5GHZ, AR5K_RF5111_DB_5GHZ}, + {6, AR5K_RF_PWD_XPD, AR5K_RF5111_PWD_XPD}, + {6, AR5K_RF_XPD_GAIN, AR5K_RF5111_XPD_GAIN}, + {6, AR5K_RF_PWD_84, AR5K_RF5111_PWD(84)}, + {6, AR5K_RF_PWD_90, AR5K_RF5111_PWD(90)}, + {7, AR5K_RF_GAIN_I, AR5K_RF5111_GAIN_I}, + {7, AR5K_RF_PLO_SEL, AR5K_RF5111_PLO_SEL}, + {7, AR5K_RF_RFGAIN_SEL, AR5K_RF5111_RFGAIN_SEL}, + {7, AR5K_RF_RFGAIN_STEP, AR5K_RF5111_RFGAIN_STEP}, + {7, AR5K_RF_WAIT_S, AR5K_RF5111_WAIT_S}, + {7, AR5K_RF_WAIT_I, AR5K_RF5111_WAIT_I}, + {7, AR5K_RF_MAX_TIME, AR5K_RF5111_MAX_TIME} +}; + +/* Default mode specific settings */ +static const struct ath5k_ini_rfbuffer rfb_5111[] = { + { 0, 0x989c, + /* mode a/XR mode aTurbo mode b mode g mode gTurbo */ + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 0, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 0, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 0, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 0, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 0, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 0, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 0, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 0, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 0, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 0, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 0, 0x989c, + { 0x00380000, 0x00380000, 0x00380000, 0x00380000, 0x00380000 } }, + { 0, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 0, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 0, 0x989c, + { 0x00000000, 0x00000000, 0x000000c0, 0x00000080, 0x00000080 } }, + { 0, 0x989c, + { 0x000400f9, 0x000400f9, 0x000400ff, 0x000400fd, 0x000400fd } }, + { 0, 0x98d4, + { 0x00000000, 0x00000000, 0x00000004, 0x00000004, 0x00000004 } }, + { 1, 0x98d4, + { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } }, + { 2, 0x98d4, + { 0x00000010, 0x00000014, 0x00000010, 0x00000010, 0x00000014 } }, + { 3, 0x98d8, + { 0x00601068, 0x00601068, 0x00601068, 0x00601068, 0x00601068 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x10000000, 0x10000000, 0x10000000, 0x10000000, 0x10000000 } }, + { 6, 0x989c, + { 0x04000000, 0x04000000, 0x04000000, 0x04000000, 0x04000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x0a000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x003800c0, 0x00380080, 0x023800c0, 0x003800c0, 0x003800c0 } }, + { 6, 0x989c, + { 0x00020006, 0x00020006, 0x00000006, 0x00020006, 0x00020006 } }, + { 6, 0x989c, + { 0x00000089, 0x00000089, 0x00000089, 0x00000089, 0x00000089 } }, + { 6, 0x989c, + { 0x000000a0, 0x000000a0, 0x000000a0, 0x000000a0, 0x000000a0 } }, + { 6, 0x989c, + { 0x00040007, 0x00040007, 0x00040007, 0x00040007, 0x00040007 } }, + { 6, 0x98d4, + { 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a } }, + { 7, 0x989c, + { 0x00000040, 0x00000048, 0x00000040, 0x00000040, 0x00000040 } }, + { 7, 0x989c, + { 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 } }, + { 7, 0x989c, + { 0x00000008, 0x00000008, 0x00000008, 0x00000008, 0x00000008 } }, + { 7, 0x989c, + { 0x0000004f, 0x0000004f, 0x0000004f, 0x0000004f, 0x0000004f } }, + { 7, 0x989c, + { 0x000000f1, 0x000000f1, 0x00000061, 0x000000f1, 0x000000f1 } }, + { 7, 0x989c, + { 0x0000904f, 0x0000904f, 0x0000904c, 0x0000904f, 0x0000904f } }, + { 7, 0x989c, + { 0x0000125a, 0x0000125a, 0x0000129a, 0x0000125a, 0x0000125a } }, + { 7, 0x98cc, + { 0x0000000e, 0x0000000e, 0x0000000f, 0x0000000e, 0x0000000e } }, +}; + + + +/***********************\ +* RF5112/RF2112 (Derby) * +\***********************/ + +/* BANK 7 (Common) len pos col */ +#define AR5K_RF5112X_GAIN_I { 6, 14, 0 } +#define AR5K_RF5112X_MIXVGA_OVR { 1, 36, 0 } +#define AR5K_RF5112X_MIXGAIN_OVR { 2, 37, 0 } +#define AR5K_RF5112X_MIXGAIN_STEP { 4, 32, 0 } +#define AR5K_RF5112X_PD_DELAY_A { 4, 58, 0 } +#define AR5K_RF5112X_PD_DELAY_B { 4, 62, 0 } +#define AR5K_RF5112X_PD_DELAY_XR { 4, 66, 0 } +#define AR5K_RF5112X_PD_PERIOD_A { 4, 70, 0 } +#define AR5K_RF5112X_PD_PERIOD_B { 4, 74, 0 } +#define AR5K_RF5112X_PD_PERIOD_XR { 4, 78, 0 } + +/* RFX112 (Derby 1) */ + +/* BANK 6 len pos col */ +#define AR5K_RF5112_OB_2GHZ { 3, 269, 0 } +#define AR5K_RF5112_DB_2GHZ { 3, 272, 0 } + +#define AR5K_RF5112_OB_5GHZ { 3, 261, 0 } +#define AR5K_RF5112_DB_5GHZ { 3, 264, 0 } + +#define AR5K_RF5112_FIXED_BIAS_A { 1, 260, 0 } +#define AR5K_RF5112_FIXED_BIAS_B { 1, 259, 0 } + +#define AR5K_RF5112_XPD_SEL { 1, 284, 0 } +#define AR5K_RF5112_XPD_GAIN { 2, 252, 0 } + +/* Access to PWD registers */ +#define AR5K_RF5112_PWD(_n) { 1, (302 - _n), 3 } + +static const struct ath5k_rf_reg rf_regs_5112[] = { + {6, AR5K_RF_OB_2GHZ, AR5K_RF5112_OB_2GHZ}, + {6, AR5K_RF_DB_2GHZ, AR5K_RF5112_DB_2GHZ}, + {6, AR5K_RF_OB_5GHZ, AR5K_RF5112_OB_5GHZ}, + {6, AR5K_RF_DB_5GHZ, AR5K_RF5112_DB_5GHZ}, + {6, AR5K_RF_FIXED_BIAS_A, AR5K_RF5112_FIXED_BIAS_A}, + {6, AR5K_RF_FIXED_BIAS_B, AR5K_RF5112_FIXED_BIAS_B}, + {6, AR5K_RF_XPD_SEL, AR5K_RF5112_XPD_SEL}, + {6, AR5K_RF_XPD_GAIN, AR5K_RF5112_XPD_GAIN}, + {6, AR5K_RF_PWD_130, AR5K_RF5112_PWD(130)}, + {6, AR5K_RF_PWD_131, AR5K_RF5112_PWD(131)}, + {6, AR5K_RF_PWD_132, AR5K_RF5112_PWD(132)}, + {6, AR5K_RF_PWD_136, AR5K_RF5112_PWD(136)}, + {6, AR5K_RF_PWD_137, AR5K_RF5112_PWD(137)}, + {6, AR5K_RF_PWD_138, AR5K_RF5112_PWD(138)}, + {7, AR5K_RF_GAIN_I, AR5K_RF5112X_GAIN_I}, + {7, AR5K_RF_MIXVGA_OVR, AR5K_RF5112X_MIXVGA_OVR}, + {7, AR5K_RF_MIXGAIN_OVR, AR5K_RF5112X_MIXGAIN_OVR}, + {7, AR5K_RF_MIXGAIN_STEP, AR5K_RF5112X_MIXGAIN_STEP}, + {7, AR5K_RF_PD_DELAY_A, AR5K_RF5112X_PD_DELAY_A}, + {7, AR5K_RF_PD_DELAY_B, AR5K_RF5112X_PD_DELAY_B}, + {7, AR5K_RF_PD_DELAY_XR, AR5K_RF5112X_PD_DELAY_XR}, + {7, AR5K_RF_PD_PERIOD_A, AR5K_RF5112X_PD_PERIOD_A}, + {7, AR5K_RF_PD_PERIOD_B, AR5K_RF5112X_PD_PERIOD_B}, + {7, AR5K_RF_PD_PERIOD_XR, AR5K_RF5112X_PD_PERIOD_XR}, +}; + +/* Default mode specific settings */ +static const struct ath5k_ini_rfbuffer rfb_5112[] = { + { 1, 0x98d4, + /* mode a/XR mode aTurbo mode b mode g mode gTurbo */ + { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } }, + { 2, 0x98d0, + { 0x03060408, 0x03070408, 0x03060408, 0x03060408, 0x03070408 } }, + { 3, 0x98dc, + { 0x00a0c0c0, 0x00a0c0c0, 0x00e0c0c0, 0x00e0c0c0, 0x00e0c0c0 } }, + { 6, 0x989c, + { 0x00a00000, 0x00a00000, 0x00a00000, 0x00a00000, 0x00a00000 } }, + { 6, 0x989c, + { 0x000a0000, 0x000a0000, 0x000a0000, 0x000a0000, 0x000a0000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00660000, 0x00660000, 0x00660000, 0x00660000, 0x00660000 } }, + { 6, 0x989c, + { 0x00db0000, 0x00db0000, 0x00db0000, 0x00db0000, 0x00db0000 } }, + { 6, 0x989c, + { 0x00f10000, 0x00f10000, 0x00f10000, 0x00f10000, 0x00f10000 } }, + { 6, 0x989c, + { 0x00120000, 0x00120000, 0x00120000, 0x00120000, 0x00120000 } }, + { 6, 0x989c, + { 0x00120000, 0x00120000, 0x00120000, 0x00120000, 0x00120000 } }, + { 6, 0x989c, + { 0x00730000, 0x00730000, 0x00730000, 0x00730000, 0x00730000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x000c0000, 0x000c0000, 0x000c0000, 0x000c0000, 0x000c0000 } }, + { 6, 0x989c, + { 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000 } }, + { 6, 0x989c, + { 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000 } }, + { 6, 0x989c, + { 0x008b0000, 0x008b0000, 0x008b0000, 0x008b0000, 0x008b0000 } }, + { 6, 0x989c, + { 0x00600000, 0x00600000, 0x00600000, 0x00600000, 0x00600000 } }, + { 6, 0x989c, + { 0x000c0000, 0x000c0000, 0x000c0000, 0x000c0000, 0x000c0000 } }, + { 6, 0x989c, + { 0x00840000, 0x00840000, 0x00840000, 0x00840000, 0x00840000 } }, + { 6, 0x989c, + { 0x00640000, 0x00640000, 0x00640000, 0x00640000, 0x00640000 } }, + { 6, 0x989c, + { 0x00200000, 0x00200000, 0x00200000, 0x00200000, 0x00200000 } }, + { 6, 0x989c, + { 0x00240000, 0x00240000, 0x00240000, 0x00240000, 0x00240000 } }, + { 6, 0x989c, + { 0x00250000, 0x00250000, 0x00250000, 0x00250000, 0x00250000 } }, + { 6, 0x989c, + { 0x00110000, 0x00110000, 0x00110000, 0x00110000, 0x00110000 } }, + { 6, 0x989c, + { 0x00110000, 0x00110000, 0x00110000, 0x00110000, 0x00110000 } }, + { 6, 0x989c, + { 0x00510000, 0x00510000, 0x00510000, 0x00510000, 0x00510000 } }, + { 6, 0x989c, + { 0x1c040000, 0x1c040000, 0x1c040000, 0x1c040000, 0x1c040000 } }, + { 6, 0x989c, + { 0x000a0000, 0x000a0000, 0x000a0000, 0x000a0000, 0x000a0000 } }, + { 6, 0x989c, + { 0x00a10000, 0x00a10000, 0x00a10000, 0x00a10000, 0x00a10000 } }, + { 6, 0x989c, + { 0x00400000, 0x00400000, 0x00400000, 0x00400000, 0x00400000 } }, + { 6, 0x989c, + { 0x03090000, 0x03090000, 0x03090000, 0x03090000, 0x03090000 } }, + { 6, 0x989c, + { 0x06000000, 0x06000000, 0x06000000, 0x06000000, 0x06000000 } }, + { 6, 0x989c, + { 0x000000b0, 0x000000b0, 0x000000a8, 0x000000a8, 0x000000a8 } }, + { 6, 0x989c, + { 0x0000002e, 0x0000002e, 0x0000002e, 0x0000002e, 0x0000002e } }, + { 6, 0x989c, + { 0x006c4a41, 0x006c4a41, 0x006c4af1, 0x006c4a61, 0x006c4a61 } }, + { 6, 0x989c, + { 0x0050892a, 0x0050892a, 0x0050892b, 0x0050892b, 0x0050892b } }, + { 6, 0x989c, + { 0x00842400, 0x00842400, 0x00842400, 0x00842400, 0x00842400 } }, + { 6, 0x989c, + { 0x00c69200, 0x00c69200, 0x00c69200, 0x00c69200, 0x00c69200 } }, + { 6, 0x98d0, + { 0x0002000c, 0x0002000c, 0x0002000c, 0x0002000c, 0x0002000c } }, + { 7, 0x989c, + { 0x00000094, 0x00000094, 0x00000094, 0x00000094, 0x00000094 } }, + { 7, 0x989c, + { 0x00000091, 0x00000091, 0x00000091, 0x00000091, 0x00000091 } }, + { 7, 0x989c, + { 0x0000000a, 0x0000000a, 0x00000012, 0x00000012, 0x00000012 } }, + { 7, 0x989c, + { 0x00000080, 0x00000080, 0x00000080, 0x00000080, 0x00000080 } }, + { 7, 0x989c, + { 0x000000c1, 0x000000c1, 0x000000c1, 0x000000c1, 0x000000c1 } }, + { 7, 0x989c, + { 0x00000060, 0x00000060, 0x00000060, 0x00000060, 0x00000060 } }, + { 7, 0x989c, + { 0x000000f0, 0x000000f0, 0x000000f0, 0x000000f0, 0x000000f0 } }, + { 7, 0x989c, + { 0x00000022, 0x00000022, 0x00000022, 0x00000022, 0x00000022 } }, + { 7, 0x989c, + { 0x00000092, 0x00000092, 0x00000092, 0x00000092, 0x00000092 } }, + { 7, 0x989c, + { 0x000000d4, 0x000000d4, 0x000000d4, 0x000000d4, 0x000000d4 } }, + { 7, 0x989c, + { 0x000014cc, 0x000014cc, 0x000014cc, 0x000014cc, 0x000014cc } }, + { 7, 0x989c, + { 0x0000048c, 0x0000048c, 0x0000048c, 0x0000048c, 0x0000048c } }, + { 7, 0x98c4, + { 0x00000003, 0x00000003, 0x00000003, 0x00000003, 0x00000003 } }, +}; + +/* RFX112A (Derby 2) */ + +/* BANK 6 len pos col */ +#define AR5K_RF5112A_OB_2GHZ { 3, 287, 0 } +#define AR5K_RF5112A_DB_2GHZ { 3, 290, 0 } + +#define AR5K_RF5112A_OB_5GHZ { 3, 279, 0 } +#define AR5K_RF5112A_DB_5GHZ { 3, 282, 0 } + +#define AR5K_RF5112A_FIXED_BIAS_A { 1, 278, 0 } +#define AR5K_RF5112A_FIXED_BIAS_B { 1, 277, 0 } + +#define AR5K_RF5112A_XPD_SEL { 1, 302, 0 } +#define AR5K_RF5112A_PDGAINLO { 2, 270, 0 } +#define AR5K_RF5112A_PDGAINHI { 2, 257, 0 } + +/* Access to PWD registers */ +#define AR5K_RF5112A_PWD(_n) { 1, (306 - _n), 3 } + +/* Voltage regulators */ +#define AR5K_RF5112A_HIGH_VC_CP { 2, 90, 2 } +#define AR5K_RF5112A_MID_VC_CP { 2, 92, 2 } +#define AR5K_RF5112A_LOW_VC_CP { 2, 94, 2 } +#define AR5K_RF5112A_PUSH_UP { 1, 254, 2 } + +/* Power consumption */ +#define AR5K_RF5112A_PAD2GND { 1, 281, 1 } +#define AR5K_RF5112A_XB2_LVL { 2, 1, 3 } +#define AR5K_RF5112A_XB5_LVL { 2, 3, 3 } + +static const struct ath5k_rf_reg rf_regs_5112a[] = { + {6, AR5K_RF_OB_2GHZ, AR5K_RF5112A_OB_2GHZ}, + {6, AR5K_RF_DB_2GHZ, AR5K_RF5112A_DB_2GHZ}, + {6, AR5K_RF_OB_5GHZ, AR5K_RF5112A_OB_5GHZ}, + {6, AR5K_RF_DB_5GHZ, AR5K_RF5112A_DB_5GHZ}, + {6, AR5K_RF_FIXED_BIAS_A, AR5K_RF5112A_FIXED_BIAS_A}, + {6, AR5K_RF_FIXED_BIAS_B, AR5K_RF5112A_FIXED_BIAS_B}, + {6, AR5K_RF_XPD_SEL, AR5K_RF5112A_XPD_SEL}, + {6, AR5K_RF_PD_GAIN_LO, AR5K_RF5112A_PDGAINLO}, + {6, AR5K_RF_PD_GAIN_HI, AR5K_RF5112A_PDGAINHI}, + {6, AR5K_RF_PWD_130, AR5K_RF5112A_PWD(130)}, + {6, AR5K_RF_PWD_131, AR5K_RF5112A_PWD(131)}, + {6, AR5K_RF_PWD_132, AR5K_RF5112A_PWD(132)}, + {6, AR5K_RF_PWD_136, AR5K_RF5112A_PWD(136)}, + {6, AR5K_RF_PWD_137, AR5K_RF5112A_PWD(137)}, + {6, AR5K_RF_PWD_138, AR5K_RF5112A_PWD(138)}, + {6, AR5K_RF_PWD_166, AR5K_RF5112A_PWD(166)}, + {6, AR5K_RF_PWD_167, AR5K_RF5112A_PWD(167)}, + {6, AR5K_RF_HIGH_VC_CP, AR5K_RF5112A_HIGH_VC_CP}, + {6, AR5K_RF_MID_VC_CP, AR5K_RF5112A_MID_VC_CP}, + {6, AR5K_RF_LOW_VC_CP, AR5K_RF5112A_LOW_VC_CP}, + {6, AR5K_RF_PUSH_UP, AR5K_RF5112A_PUSH_UP}, + {6, AR5K_RF_PAD2GND, AR5K_RF5112A_PAD2GND}, + {6, AR5K_RF_XB2_LVL, AR5K_RF5112A_XB2_LVL}, + {6, AR5K_RF_XB5_LVL, AR5K_RF5112A_XB5_LVL}, + {7, AR5K_RF_GAIN_I, AR5K_RF5112X_GAIN_I}, + {7, AR5K_RF_MIXVGA_OVR, AR5K_RF5112X_MIXVGA_OVR}, + {7, AR5K_RF_MIXGAIN_OVR, AR5K_RF5112X_MIXGAIN_OVR}, + {7, AR5K_RF_MIXGAIN_STEP, AR5K_RF5112X_MIXGAIN_STEP}, + {7, AR5K_RF_PD_DELAY_A, AR5K_RF5112X_PD_DELAY_A}, + {7, AR5K_RF_PD_DELAY_B, AR5K_RF5112X_PD_DELAY_B}, + {7, AR5K_RF_PD_DELAY_XR, AR5K_RF5112X_PD_DELAY_XR}, + {7, AR5K_RF_PD_PERIOD_A, AR5K_RF5112X_PD_PERIOD_A}, + {7, AR5K_RF_PD_PERIOD_B, AR5K_RF5112X_PD_PERIOD_B}, + {7, AR5K_RF_PD_PERIOD_XR, AR5K_RF5112X_PD_PERIOD_XR}, +}; + +/* Default mode specific settings */ +static const struct ath5k_ini_rfbuffer rfb_5112a[] = { + { 1, 0x98d4, + /* mode a/XR mode aTurbo mode b mode g mode gTurbo */ + { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } }, + { 2, 0x98d0, + { 0x03060408, 0x03070408, 0x03060408, 0x03060408, 0x03070408 } }, + { 3, 0x98dc, + { 0x00a020c0, 0x00a020c0, 0x00e020c0, 0x00e020c0, 0x00e020c0 } }, + { 6, 0x989c, + { 0x0f000000, 0x0f000000, 0x0f000000, 0x0f000000, 0x0f000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00800000, 0x00800000, 0x00800000, 0x00800000, 0x00800000 } }, + { 6, 0x989c, + { 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000 } }, + { 6, 0x989c, + { 0x00010000, 0x00010000, 0x00010000, 0x00010000, 0x00010000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00180000, 0x00180000, 0x00180000, 0x00180000, 0x00180000 } }, + { 6, 0x989c, + { 0x00600000, 0x00600000, 0x006e0000, 0x006e0000, 0x006e0000 } }, + { 6, 0x989c, + { 0x00c70000, 0x00c70000, 0x00c70000, 0x00c70000, 0x00c70000 } }, + { 6, 0x989c, + { 0x004b0000, 0x004b0000, 0x004b0000, 0x004b0000, 0x004b0000 } }, + { 6, 0x989c, + { 0x04480000, 0x04480000, 0x04480000, 0x04480000, 0x04480000 } }, + { 6, 0x989c, + { 0x004c0000, 0x004c0000, 0x004c0000, 0x004c0000, 0x004c0000 } }, + { 6, 0x989c, + { 0x00e40000, 0x00e40000, 0x00e40000, 0x00e40000, 0x00e40000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00fc0000, 0x00fc0000, 0x00fc0000, 0x00fc0000, 0x00fc0000 } }, + { 6, 0x989c, + { 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000 } }, + { 6, 0x989c, + { 0x043f0000, 0x043f0000, 0x043f0000, 0x043f0000, 0x043f0000 } }, + { 6, 0x989c, + { 0x000c0000, 0x000c0000, 0x000c0000, 0x000c0000, 0x000c0000 } }, + { 6, 0x989c, + { 0x02190000, 0x02190000, 0x02190000, 0x02190000, 0x02190000 } }, + { 6, 0x989c, + { 0x00240000, 0x00240000, 0x00240000, 0x00240000, 0x00240000 } }, + { 6, 0x989c, + { 0x00b40000, 0x00b40000, 0x00b40000, 0x00b40000, 0x00b40000 } }, + { 6, 0x989c, + { 0x00990000, 0x00990000, 0x00990000, 0x00990000, 0x00990000 } }, + { 6, 0x989c, + { 0x00500000, 0x00500000, 0x00500000, 0x00500000, 0x00500000 } }, + { 6, 0x989c, + { 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000 } }, + { 6, 0x989c, + { 0x00120000, 0x00120000, 0x00120000, 0x00120000, 0x00120000 } }, + { 6, 0x989c, + { 0xc0320000, 0xc0320000, 0xc0320000, 0xc0320000, 0xc0320000 } }, + { 6, 0x989c, + { 0x01740000, 0x01740000, 0x01740000, 0x01740000, 0x01740000 } }, + { 6, 0x989c, + { 0x00110000, 0x00110000, 0x00110000, 0x00110000, 0x00110000 } }, + { 6, 0x989c, + { 0x86280000, 0x86280000, 0x86280000, 0x86280000, 0x86280000 } }, + { 6, 0x989c, + { 0x31840000, 0x31840000, 0x31840000, 0x31840000, 0x31840000 } }, + { 6, 0x989c, + { 0x00f20080, 0x00f20080, 0x00f20080, 0x00f20080, 0x00f20080 } }, + { 6, 0x989c, + { 0x00270019, 0x00270019, 0x00270019, 0x00270019, 0x00270019 } }, + { 6, 0x989c, + { 0x00000003, 0x00000003, 0x00000003, 0x00000003, 0x00000003 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x000000b2, 0x000000b2, 0x000000b2, 0x000000b2, 0x000000b2 } }, + { 6, 0x989c, + { 0x00b02084, 0x00b02084, 0x00b02084, 0x00b02084, 0x00b02084 } }, + { 6, 0x989c, + { 0x004125a4, 0x004125a4, 0x004125a4, 0x004125a4, 0x004125a4 } }, + { 6, 0x989c, + { 0x00119220, 0x00119220, 0x00119220, 0x00119220, 0x00119220 } }, + { 6, 0x989c, + { 0x001a4800, 0x001a4800, 0x001a4800, 0x001a4800, 0x001a4800 } }, + { 6, 0x98d8, + { 0x000b0230, 0x000b0230, 0x000b0230, 0x000b0230, 0x000b0230 } }, + { 7, 0x989c, + { 0x00000094, 0x00000094, 0x00000094, 0x00000094, 0x00000094 } }, + { 7, 0x989c, + { 0x00000091, 0x00000091, 0x00000091, 0x00000091, 0x00000091 } }, + { 7, 0x989c, + { 0x00000012, 0x00000012, 0x00000012, 0x00000012, 0x00000012 } }, + { 7, 0x989c, + { 0x00000080, 0x00000080, 0x00000080, 0x00000080, 0x00000080 } }, + { 7, 0x989c, + { 0x000000d9, 0x000000d9, 0x000000d9, 0x000000d9, 0x000000d9 } }, + { 7, 0x989c, + { 0x00000060, 0x00000060, 0x00000060, 0x00000060, 0x00000060 } }, + { 7, 0x989c, + { 0x000000f0, 0x000000f0, 0x000000f0, 0x000000f0, 0x000000f0 } }, + { 7, 0x989c, + { 0x000000a2, 0x000000a2, 0x000000a2, 0x000000a2, 0x000000a2 } }, + { 7, 0x989c, + { 0x00000052, 0x00000052, 0x00000052, 0x00000052, 0x00000052 } }, + { 7, 0x989c, + { 0x000000d4, 0x000000d4, 0x000000d4, 0x000000d4, 0x000000d4 } }, + { 7, 0x989c, + { 0x000014cc, 0x000014cc, 0x000014cc, 0x000014cc, 0x000014cc } }, + { 7, 0x989c, + { 0x0000048c, 0x0000048c, 0x0000048c, 0x0000048c, 0x0000048c } }, + { 7, 0x98c4, + { 0x00000003, 0x00000003, 0x00000003, 0x00000003, 0x00000003 } }, +}; + + + +/******************\ +* RF2413 (Griffin) * +\******************/ + +/* BANK 6 len pos col */ +#define AR5K_RF2413_OB_2GHZ { 3, 168, 0 } +#define AR5K_RF2413_DB_2GHZ { 3, 165, 0 } + +static const struct ath5k_rf_reg rf_regs_2413[] = { + {6, AR5K_RF_OB_2GHZ, AR5K_RF2413_OB_2GHZ}, + {6, AR5K_RF_DB_2GHZ, AR5K_RF2413_DB_2GHZ}, +}; + +/* Default mode specific settings + * XXX: a/aTurbo ??? + */ +static const struct ath5k_ini_rfbuffer rfb_2413[] = { + { 1, 0x98d4, + /* mode a/XR mode aTurbo mode b mode g mode gTurbo */ + { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } }, + { 2, 0x98d0, + { 0x02001408, 0x02011408, 0x02001408, 0x02001408, 0x02011408 } }, + { 3, 0x98dc, + { 0x00a020c0, 0x00a020c0, 0x00e020c0, 0x00e020c0, 0x00e020c0 } }, + { 6, 0x989c, + { 0xf0000000, 0xf0000000, 0xf0000000, 0xf0000000, 0xf0000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x03000000, 0x03000000, 0x03000000, 0x03000000, 0x03000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x40400000, 0x40400000, 0x40400000, 0x40400000, 0x40400000 } }, + { 6, 0x989c, + { 0x65050000, 0x65050000, 0x65050000, 0x65050000, 0x65050000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00420000, 0x00420000, 0x00420000, 0x00420000, 0x00420000 } }, + { 6, 0x989c, + { 0x00b50000, 0x00b50000, 0x00b50000, 0x00b50000, 0x00b50000 } }, + { 6, 0x989c, + { 0x00030000, 0x00030000, 0x00030000, 0x00030000, 0x00030000 } }, + { 6, 0x989c, + { 0x00f70000, 0x00f70000, 0x00f70000, 0x00f70000, 0x00f70000 } }, + { 6, 0x989c, + { 0x009d0000, 0x009d0000, 0x009d0000, 0x009d0000, 0x009d0000 } }, + { 6, 0x989c, + { 0x00220000, 0x00220000, 0x00220000, 0x00220000, 0x00220000 } }, + { 6, 0x989c, + { 0x04220000, 0x04220000, 0x04220000, 0x04220000, 0x04220000 } }, + { 6, 0x989c, + { 0x00230018, 0x00230018, 0x00230018, 0x00230018, 0x00230018 } }, + { 6, 0x989c, + { 0x00280000, 0x00280000, 0x00280060, 0x00280060, 0x00280060 } }, + { 6, 0x989c, + { 0x005000c0, 0x005000c0, 0x005000c3, 0x005000c3, 0x005000c3 } }, + { 6, 0x989c, + { 0x0004007f, 0x0004007f, 0x0004007f, 0x0004007f, 0x0004007f } }, + { 6, 0x989c, + { 0x00000458, 0x00000458, 0x00000458, 0x00000458, 0x00000458 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x0000c000, 0x0000c000, 0x0000c000, 0x0000c000, 0x0000c000 } }, + { 6, 0x98d8, + { 0x00400230, 0x00400230, 0x00400230, 0x00400230, 0x00400230 } }, + { 7, 0x989c, + { 0x00006400, 0x00006400, 0x00006400, 0x00006400, 0x00006400 } }, + { 7, 0x989c, + { 0x00000800, 0x00000800, 0x00000800, 0x00000800, 0x00000800 } }, + { 7, 0x98cc, + { 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e } }, +}; + + + +/***************************\ +* RF2315/RF2316 (Cobra SoC) * +\***************************/ + +/* BANK 6 len pos col */ +#define AR5K_RF2316_OB_2GHZ { 3, 178, 0 } +#define AR5K_RF2316_DB_2GHZ { 3, 175, 0 } + +static const struct ath5k_rf_reg rf_regs_2316[] = { + {6, AR5K_RF_OB_2GHZ, AR5K_RF2316_OB_2GHZ}, + {6, AR5K_RF_DB_2GHZ, AR5K_RF2316_DB_2GHZ}, +}; + +/* Default mode specific settings */ +static const struct ath5k_ini_rfbuffer rfb_2316[] = { + { 1, 0x98d4, + /* mode a/XR mode aTurbo mode b mode g mode gTurbo */ + { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } }, + { 2, 0x98d0, + { 0x02001408, 0x02011408, 0x02001408, 0x02001408, 0x02011408 } }, + { 3, 0x98dc, + { 0x00a020c0, 0x00a020c0, 0x00e020c0, 0x00e020c0, 0x00e020c0 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0xc0000000, 0xc0000000, 0xc0000000, 0xc0000000, 0xc0000000 } }, + { 6, 0x989c, + { 0x0f000000, 0x0f000000, 0x0f000000, 0x0f000000, 0x0f000000 } }, + { 6, 0x989c, + { 0x02000000, 0x02000000, 0x02000000, 0x02000000, 0x02000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0xf8000000, 0xf8000000, 0xf8000000, 0xf8000000, 0xf8000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x95150000, 0x95150000, 0x95150000, 0x95150000, 0x95150000 } }, + { 6, 0x989c, + { 0xc1000000, 0xc1000000, 0xc1000000, 0xc1000000, 0xc1000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000 } }, + { 6, 0x989c, + { 0x00d50000, 0x00d50000, 0x00d50000, 0x00d50000, 0x00d50000 } }, + { 6, 0x989c, + { 0x000e0000, 0x000e0000, 0x000e0000, 0x000e0000, 0x000e0000 } }, + { 6, 0x989c, + { 0x00dc0000, 0x00dc0000, 0x00dc0000, 0x00dc0000, 0x00dc0000 } }, + { 6, 0x989c, + { 0x00770000, 0x00770000, 0x00770000, 0x00770000, 0x00770000 } }, + { 6, 0x989c, + { 0x008a0000, 0x008a0000, 0x008a0000, 0x008a0000, 0x008a0000 } }, + { 6, 0x989c, + { 0x10880000, 0x10880000, 0x10880000, 0x10880000, 0x10880000 } }, + { 6, 0x989c, + { 0x008c0060, 0x008c0060, 0x008c0060, 0x008c0060, 0x008c0060 } }, + { 6, 0x989c, + { 0x00a00000, 0x00a00000, 0x00a00080, 0x00a00080, 0x00a00080 } }, + { 6, 0x989c, + { 0x00400000, 0x00400000, 0x0040000d, 0x0040000d, 0x0040000d } }, + { 6, 0x989c, + { 0x00110400, 0x00110400, 0x00110400, 0x00110400, 0x00110400 } }, + { 6, 0x989c, + { 0x00000060, 0x00000060, 0x00000060, 0x00000060, 0x00000060 } }, + { 6, 0x989c, + { 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001 } }, + { 6, 0x989c, + { 0x00000b00, 0x00000b00, 0x00000b00, 0x00000b00, 0x00000b00 } }, + { 6, 0x989c, + { 0x00000be8, 0x00000be8, 0x00000be8, 0x00000be8, 0x00000be8 } }, + { 6, 0x98c0, + { 0x00010000, 0x00010000, 0x00010000, 0x00010000, 0x00010000 } }, + { 7, 0x989c, + { 0x00006400, 0x00006400, 0x00006400, 0x00006400, 0x00006400 } }, + { 7, 0x989c, + { 0x00000800, 0x00000800, 0x00000800, 0x00000800, 0x00000800 } }, + { 7, 0x98cc, + { 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e } }, +}; + + + +/******************************\ +* RF5413/RF5424 (Eagle/Condor) * +\******************************/ + +/* BANK 6 len pos col */ +#define AR5K_RF5413_OB_2GHZ { 3, 241, 0 } +#define AR5K_RF5413_DB_2GHZ { 3, 238, 0 } + +#define AR5K_RF5413_OB_5GHZ { 3, 247, 0 } +#define AR5K_RF5413_DB_5GHZ { 3, 244, 0 } + +#define AR5K_RF5413_PWD_ICLOBUF2G { 3, 131, 3 } +#define AR5K_RF5413_DERBY_CHAN_SEL_MODE { 1, 291, 2 } + +static const struct ath5k_rf_reg rf_regs_5413[] = { + {6, AR5K_RF_OB_2GHZ, AR5K_RF5413_OB_2GHZ}, + {6, AR5K_RF_DB_2GHZ, AR5K_RF5413_DB_2GHZ}, + {6, AR5K_RF_OB_5GHZ, AR5K_RF5413_OB_5GHZ}, + {6, AR5K_RF_DB_5GHZ, AR5K_RF5413_DB_5GHZ}, + {6, AR5K_RF_PWD_ICLOBUF_2G, AR5K_RF5413_PWD_ICLOBUF2G}, + {6, AR5K_RF_DERBY_CHAN_SEL_MODE, AR5K_RF5413_DERBY_CHAN_SEL_MODE}, +}; + +/* Default mode specific settings */ +static const struct ath5k_ini_rfbuffer rfb_5413[] = { + { 1, 0x98d4, + /* mode a/XR mode aTurbo mode b mode g mode gTurbo */ + { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } }, + { 2, 0x98d0, + { 0x00000008, 0x00000008, 0x00000008, 0x00000008, 0x00000008 } }, + { 3, 0x98dc, + { 0x00a000c0, 0x00a000c0, 0x00e000c0, 0x00e000c0, 0x00e000c0 } }, + { 6, 0x989c, + { 0x33000000, 0x33000000, 0x33000000, 0x33000000, 0x33000000 } }, + { 6, 0x989c, + { 0x01000000, 0x01000000, 0x01000000, 0x01000000, 0x01000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x1f000000, 0x1f000000, 0x1f000000, 0x1f000000, 0x1f000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00b80000, 0x00b80000, 0x00b80000, 0x00b80000, 0x00b80000 } }, + { 6, 0x989c, + { 0x00b70000, 0x00b70000, 0x00b70000, 0x00b70000, 0x00b70000 } }, + { 6, 0x989c, + { 0x00840000, 0x00840000, 0x00840000, 0x00840000, 0x00840000 } }, + { 6, 0x989c, + { 0x00980000, 0x00980000, 0x00980000, 0x00980000, 0x00980000 } }, + { 6, 0x989c, + { 0x00c00000, 0x00c00000, 0x00c00000, 0x00c00000, 0x00c00000 } }, + { 6, 0x989c, + { 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000 } }, + { 6, 0x989c, + { 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000 } }, + { 6, 0x989c, + { 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000 } }, + { 6, 0x989c, + { 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000 } }, + { 6, 0x989c, + { 0x00d70000, 0x00d70000, 0x00d70000, 0x00d70000, 0x00d70000 } }, + { 6, 0x989c, + { 0x00610000, 0x00610000, 0x00610000, 0x00610000, 0x00610000 } }, + { 6, 0x989c, + { 0x00fe0000, 0x00fe0000, 0x00fe0000, 0x00fe0000, 0x00fe0000 } }, + { 6, 0x989c, + { 0x00de0000, 0x00de0000, 0x00de0000, 0x00de0000, 0x00de0000 } }, + { 6, 0x989c, + { 0x007f0000, 0x007f0000, 0x007f0000, 0x007f0000, 0x007f0000 } }, + { 6, 0x989c, + { 0x043d0000, 0x043d0000, 0x043d0000, 0x043d0000, 0x043d0000 } }, + { 6, 0x989c, + { 0x00770000, 0x00770000, 0x00770000, 0x00770000, 0x00770000 } }, + { 6, 0x989c, + { 0x00440000, 0x00440000, 0x00440000, 0x00440000, 0x00440000 } }, + { 6, 0x989c, + { 0x00980000, 0x00980000, 0x00980000, 0x00980000, 0x00980000 } }, + { 6, 0x989c, + { 0x00100080, 0x00100080, 0x00100080, 0x00100080, 0x00100080 } }, + { 6, 0x989c, + { 0x0005c034, 0x0005c034, 0x0005c034, 0x0005c034, 0x0005c034 } }, + { 6, 0x989c, + { 0x003100f0, 0x003100f0, 0x003100f0, 0x003100f0, 0x003100f0 } }, + { 6, 0x989c, + { 0x000c011f, 0x000c011f, 0x000c011f, 0x000c011f, 0x000c011f } }, + { 6, 0x989c, + { 0x00510040, 0x00510040, 0x00510040, 0x00510040, 0x00510040 } }, + { 6, 0x989c, + { 0x005000da, 0x005000da, 0x005000da, 0x005000da, 0x005000da } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00004044, 0x00004044, 0x00004044, 0x00004044, 0x00004044 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x000060c0, 0x000060c0, 0x000060c0, 0x000060c0, 0x000060c0 } }, + { 6, 0x989c, + { 0x00002c00, 0x00002c00, 0x00003600, 0x00003600, 0x00002c00 } }, + { 6, 0x98c8, + { 0x00000403, 0x00000403, 0x00040403, 0x00040403, 0x00040403 } }, + { 7, 0x989c, + { 0x00006400, 0x00006400, 0x00006400, 0x00006400, 0x00006400 } }, + { 7, 0x989c, + { 0x00000800, 0x00000800, 0x00000800, 0x00000800, 0x00000800 } }, + { 7, 0x98cc, + { 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e } }, +}; + + + +/***************************\ +* RF2425/RF2417 (Swan/Nala) * +* AR2317 (Spider SoC) * +\***************************/ + +/* BANK 6 len pos col */ +#define AR5K_RF2425_OB_2GHZ { 3, 193, 0 } +#define AR5K_RF2425_DB_2GHZ { 3, 190, 0 } + +static const struct ath5k_rf_reg rf_regs_2425[] = { + {6, AR5K_RF_OB_2GHZ, AR5K_RF2425_OB_2GHZ}, + {6, AR5K_RF_DB_2GHZ, AR5K_RF2425_DB_2GHZ}, +}; + +/* Default mode specific settings + * XXX: a/aTurbo ? + */ +static const struct ath5k_ini_rfbuffer rfb_2425[] = { + { 1, 0x98d4, + /* mode a/XR mode aTurbo mode b mode g mode gTurbo */ + { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } }, + { 2, 0x98d0, + { 0x02001408, 0x02001408, 0x02001408, 0x02001408, 0x02001408 } }, + { 3, 0x98dc, + { 0x00a020c0, 0x00a020c0, 0x00e020c0, 0x00e020c0, 0x00e020c0 } }, + { 6, 0x989c, + { 0x10000000, 0x10000000, 0x10000000, 0x10000000, 0x10000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00100000, 0x00100000, 0x00100000, 0x00100000, 0x00100000 } }, + { 6, 0x989c, + { 0x00020000, 0x00020000, 0x00020000, 0x00020000, 0x00020000 } }, + { 6, 0x989c, + { 0x00730000, 0x00730000, 0x00730000, 0x00730000, 0x00730000 } }, + { 6, 0x989c, + { 0x00f80000, 0x00f80000, 0x00f80000, 0x00f80000, 0x00f80000 } }, + { 6, 0x989c, + { 0x00e70000, 0x00e70000, 0x00e70000, 0x00e70000, 0x00e70000 } }, + { 6, 0x989c, + { 0x00140000, 0x00140000, 0x00140000, 0x00140000, 0x00140000 } }, + { 6, 0x989c, + { 0x00910040, 0x00910040, 0x00910040, 0x00910040, 0x00910040 } }, + { 6, 0x989c, + { 0x0007001a, 0x0007001a, 0x0007001a, 0x0007001a, 0x0007001a } }, + { 6, 0x989c, + { 0x00410000, 0x00410000, 0x00410000, 0x00410000, 0x00410000 } }, + { 6, 0x989c, + { 0x00810000, 0x00810000, 0x00810060, 0x00810060, 0x00810060 } }, + { 6, 0x989c, + { 0x00020800, 0x00020800, 0x00020803, 0x00020803, 0x00020803 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00001660, 0x00001660, 0x00001660, 0x00001660, 0x00001660 } }, + { 6, 0x989c, + { 0x00001688, 0x00001688, 0x00001688, 0x00001688, 0x00001688 } }, + { 6, 0x98c4, + { 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001 } }, + { 7, 0x989c, + { 0x00006400, 0x00006400, 0x00006400, 0x00006400, 0x00006400 } }, + { 7, 0x989c, + { 0x00000800, 0x00000800, 0x00000800, 0x00000800, 0x00000800 } }, + { 7, 0x98cc, + { 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e } }, +}; + +/* + * TODO: Handle the few differences with swan during + * bank modification and get rid of this + */ +static const struct ath5k_ini_rfbuffer rfb_2317[] = { + { 1, 0x98d4, + /* mode a/XR mode aTurbo mode b mode g mode gTurbo */ + { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } }, + { 2, 0x98d0, + { 0x02001408, 0x02011408, 0x02001408, 0x02001408, 0x02011408 } }, + { 3, 0x98dc, + { 0x00a020c0, 0x00a020c0, 0x00e020c0, 0x00e020c0, 0x00e020c0 } }, + { 6, 0x989c, + { 0x10000000, 0x10000000, 0x10000000, 0x10000000, 0x10000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00100000, 0x00100000, 0x00100000, 0x00100000, 0x00100000 } }, + { 6, 0x989c, + { 0x00020000, 0x00020000, 0x00020000, 0x00020000, 0x00020000 } }, + { 6, 0x989c, + { 0x00730000, 0x00730000, 0x00730000, 0x00730000, 0x00730000 } }, + { 6, 0x989c, + { 0x00f80000, 0x00f80000, 0x00f80000, 0x00f80000, 0x00f80000 } }, + { 6, 0x989c, + { 0x00e70000, 0x00e70000, 0x00e70000, 0x00e70000, 0x00e70000 } }, + { 6, 0x989c, + { 0x00140100, 0x00140100, 0x00140100, 0x00140100, 0x00140100 } }, + { 6, 0x989c, + { 0x00910040, 0x00910040, 0x00910040, 0x00910040, 0x00910040 } }, + { 6, 0x989c, + { 0x0007001a, 0x0007001a, 0x0007001a, 0x0007001a, 0x0007001a } }, + { 6, 0x989c, + { 0x00410000, 0x00410000, 0x00410000, 0x00410000, 0x00410000 } }, + { 6, 0x989c, + { 0x00810000, 0x00810000, 0x00810060, 0x00810060, 0x00810060 } }, + { 6, 0x989c, + { 0x00020800, 0x00020800, 0x00020803, 0x00020803, 0x00020803 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00001660, 0x00001660, 0x00001660, 0x00001660, 0x00001660 } }, + { 6, 0x989c, + { 0x00009688, 0x00009688, 0x00009688, 0x00009688, 0x00009688 } }, + { 6, 0x98c4, + { 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001 } }, + { 7, 0x989c, + { 0x00006400, 0x00006400, 0x00006400, 0x00006400, 0x00006400 } }, + { 7, 0x989c, + { 0x00000800, 0x00000800, 0x00000800, 0x00000800, 0x00000800 } }, + { 7, 0x98cc, + { 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e } }, +}; + +/* + * TODO: Handle the few differences with swan during + * bank modification and get rid of this + * XXX: a/aTurbo ? + */ +static const struct ath5k_ini_rfbuffer rfb_2417[] = { + { 1, 0x98d4, + /* mode a/XR mode aTurbo mode b mode g mode gTurbo */ + { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } }, + { 2, 0x98d0, + { 0x02001408, 0x02001408, 0x02001408, 0x02001408, 0x02001408 } }, + { 3, 0x98dc, + { 0x00a020c0, 0x00a020c0, 0x00e020c0, 0x00e020c0, 0x00e020c0 } }, + { 6, 0x989c, + { 0x10000000, 0x10000000, 0x10000000, 0x10000000, 0x10000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00100000, 0x00100000, 0x00100000, 0x00100000, 0x00100000 } }, + { 6, 0x989c, + { 0x00020000, 0x00020000, 0x00020000, 0x00020000, 0x00020000 } }, + { 6, 0x989c, + { 0x00730000, 0x00730000, 0x00730000, 0x00730000, 0x00730000 } }, + { 6, 0x989c, + { 0x00f80000, 0x00f80000, 0x00f80000, 0x00f80000, 0x00f80000 } }, + { 6, 0x989c, + { 0x00e70000, 0x00e70000, 0x80e70000, 0x80e70000, 0x00e70000 } }, + { 6, 0x989c, + { 0x00140000, 0x00140000, 0x00140000, 0x00140000, 0x00140000 } }, + { 6, 0x989c, + { 0x00910040, 0x00910040, 0x00910040, 0x00910040, 0x00910040 } }, + { 6, 0x989c, + { 0x0007001a, 0x0007001a, 0x0207001a, 0x0207001a, 0x0007001a } }, + { 6, 0x989c, + { 0x00410000, 0x00410000, 0x00410000, 0x00410000, 0x00410000 } }, + { 6, 0x989c, + { 0x00810000, 0x00810000, 0x00810060, 0x00810060, 0x00810060 } }, + { 6, 0x989c, + { 0x00020800, 0x00020800, 0x00020803, 0x00020803, 0x00020803 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, + { 6, 0x989c, + { 0x00001660, 0x00001660, 0x00001660, 0x00001660, 0x00001660 } }, + { 6, 0x989c, + { 0x00001688, 0x00001688, 0x00001688, 0x00001688, 0x00001688 } }, + { 6, 0x98c4, + { 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001 } }, + { 7, 0x989c, + { 0x00006400, 0x00006400, 0x00006400, 0x00006400, 0x00006400 } }, + { 7, 0x989c, + { 0x00000800, 0x00000800, 0x00000800, 0x00000800, 0x00000800 } }, + { 7, 0x98cc, + { 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e } }, +}; diff --git a/drivers/net/wireless/ath/ath5k/rfgain.h b/drivers/net/wireless/ath/ath5k/rfgain.h new file mode 100644 index 0000000..1354d8c --- /dev/null +++ b/drivers/net/wireless/ath/ath5k/rfgain.h @@ -0,0 +1,516 @@ +/* + * RF Gain optimization + * + * Copyright (c) 2004-2009 Reyk Floeter + * Copyright (c) 2006-2009 Nick Kossifidis + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +/* + * Mode-specific RF Gain table (64bytes) for RF5111/5112 + * (RF5110 only comes with AR5210 and only supports a/turbo a mode so initial + * RF Gain values are included in AR5K_AR5210_INI) + */ +struct ath5k_ini_rfgain { + u16 rfg_register; /* RF Gain register address */ + u32 rfg_value[2]; /* [freq (see below)] */ +}; + +/* Initial RF Gain settings for RF5111 */ +static const struct ath5k_ini_rfgain rfgain_5111[] = { + /* 5Ghz 2Ghz */ + { AR5K_RF_GAIN(0), { 0x000001a9, 0x00000000 } }, + { AR5K_RF_GAIN(1), { 0x000001e9, 0x00000040 } }, + { AR5K_RF_GAIN(2), { 0x00000029, 0x00000080 } }, + { AR5K_RF_GAIN(3), { 0x00000069, 0x00000150 } }, + { AR5K_RF_GAIN(4), { 0x00000199, 0x00000190 } }, + { AR5K_RF_GAIN(5), { 0x000001d9, 0x000001d0 } }, + { AR5K_RF_GAIN(6), { 0x00000019, 0x00000010 } }, + { AR5K_RF_GAIN(7), { 0x00000059, 0x00000044 } }, + { AR5K_RF_GAIN(8), { 0x00000099, 0x00000084 } }, + { AR5K_RF_GAIN(9), { 0x000001a5, 0x00000148 } }, + { AR5K_RF_GAIN(10), { 0x000001e5, 0x00000188 } }, + { AR5K_RF_GAIN(11), { 0x00000025, 0x000001c8 } }, + { AR5K_RF_GAIN(12), { 0x000001c8, 0x00000014 } }, + { AR5K_RF_GAIN(13), { 0x00000008, 0x00000042 } }, + { AR5K_RF_GAIN(14), { 0x00000048, 0x00000082 } }, + { AR5K_RF_GAIN(15), { 0x00000088, 0x00000178 } }, + { AR5K_RF_GAIN(16), { 0x00000198, 0x000001b8 } }, + { AR5K_RF_GAIN(17), { 0x000001d8, 0x000001f8 } }, + { AR5K_RF_GAIN(18), { 0x00000018, 0x00000012 } }, + { AR5K_RF_GAIN(19), { 0x00000058, 0x00000052 } }, + { AR5K_RF_GAIN(20), { 0x00000098, 0x00000092 } }, + { AR5K_RF_GAIN(21), { 0x000001a4, 0x0000017c } }, + { AR5K_RF_GAIN(22), { 0x000001e4, 0x000001bc } }, + { AR5K_RF_GAIN(23), { 0x00000024, 0x000001fc } }, + { AR5K_RF_GAIN(24), { 0x00000064, 0x0000000a } }, + { AR5K_RF_GAIN(25), { 0x000000a4, 0x0000004a } }, + { AR5K_RF_GAIN(26), { 0x000000e4, 0x0000008a } }, + { AR5K_RF_GAIN(27), { 0x0000010a, 0x0000015a } }, + { AR5K_RF_GAIN(28), { 0x0000014a, 0x0000019a } }, + { AR5K_RF_GAIN(29), { 0x0000018a, 0x000001da } }, + { AR5K_RF_GAIN(30), { 0x000001ca, 0x0000000e } }, + { AR5K_RF_GAIN(31), { 0x0000000a, 0x0000004e } }, + { AR5K_RF_GAIN(32), { 0x0000004a, 0x0000008e } }, + { AR5K_RF_GAIN(33), { 0x0000008a, 0x0000015e } }, + { AR5K_RF_GAIN(34), { 0x000001ba, 0x0000019e } }, + { AR5K_RF_GAIN(35), { 0x000001fa, 0x000001de } }, + { AR5K_RF_GAIN(36), { 0x0000003a, 0x00000009 } }, + { AR5K_RF_GAIN(37), { 0x0000007a, 0x00000049 } }, + { AR5K_RF_GAIN(38), { 0x00000186, 0x00000089 } }, + { AR5K_RF_GAIN(39), { 0x000001c6, 0x00000179 } }, + { AR5K_RF_GAIN(40), { 0x00000006, 0x000001b9 } }, + { AR5K_RF_GAIN(41), { 0x00000046, 0x000001f9 } }, + { AR5K_RF_GAIN(42), { 0x00000086, 0x00000039 } }, + { AR5K_RF_GAIN(43), { 0x000000c6, 0x00000079 } }, + { AR5K_RF_GAIN(44), { 0x000000c6, 0x000000b9 } }, + { AR5K_RF_GAIN(45), { 0x000000c6, 0x000001bd } }, + { AR5K_RF_GAIN(46), { 0x000000c6, 0x000001fd } }, + { AR5K_RF_GAIN(47), { 0x000000c6, 0x0000003d } }, + { AR5K_RF_GAIN(48), { 0x000000c6, 0x0000007d } }, + { AR5K_RF_GAIN(49), { 0x000000c6, 0x000000bd } }, + { AR5K_RF_GAIN(50), { 0x000000c6, 0x000000fd } }, + { AR5K_RF_GAIN(51), { 0x000000c6, 0x000000fd } }, + { AR5K_RF_GAIN(52), { 0x000000c6, 0x000000fd } }, + { AR5K_RF_GAIN(53), { 0x000000c6, 0x000000fd } }, + { AR5K_RF_GAIN(54), { 0x000000c6, 0x000000fd } }, + { AR5K_RF_GAIN(55), { 0x000000c6, 0x000000fd } }, + { AR5K_RF_GAIN(56), { 0x000000c6, 0x000000fd } }, + { AR5K_RF_GAIN(57), { 0x000000c6, 0x000000fd } }, + { AR5K_RF_GAIN(58), { 0x000000c6, 0x000000fd } }, + { AR5K_RF_GAIN(59), { 0x000000c6, 0x000000fd } }, + { AR5K_RF_GAIN(60), { 0x000000c6, 0x000000fd } }, + { AR5K_RF_GAIN(61), { 0x000000c6, 0x000000fd } }, + { AR5K_RF_GAIN(62), { 0x000000c6, 0x000000fd } }, + { AR5K_RF_GAIN(63), { 0x000000c6, 0x000000fd } }, +}; + +/* Initial RF Gain settings for RF5112 */ +static const struct ath5k_ini_rfgain rfgain_5112[] = { + /* 5Ghz 2Ghz */ + { AR5K_RF_GAIN(0), { 0x00000007, 0x00000007 } }, + { AR5K_RF_GAIN(1), { 0x00000047, 0x00000047 } }, + { AR5K_RF_GAIN(2), { 0x00000087, 0x00000087 } }, + { AR5K_RF_GAIN(3), { 0x000001a0, 0x000001a0 } }, + { AR5K_RF_GAIN(4), { 0x000001e0, 0x000001e0 } }, + { AR5K_RF_GAIN(5), { 0x00000020, 0x00000020 } }, + { AR5K_RF_GAIN(6), { 0x00000060, 0x00000060 } }, + { AR5K_RF_GAIN(7), { 0x000001a1, 0x000001a1 } }, + { AR5K_RF_GAIN(8), { 0x000001e1, 0x000001e1 } }, + { AR5K_RF_GAIN(9), { 0x00000021, 0x00000021 } }, + { AR5K_RF_GAIN(10), { 0x00000061, 0x00000061 } }, + { AR5K_RF_GAIN(11), { 0x00000162, 0x00000162 } }, + { AR5K_RF_GAIN(12), { 0x000001a2, 0x000001a2 } }, + { AR5K_RF_GAIN(13), { 0x000001e2, 0x000001e2 } }, + { AR5K_RF_GAIN(14), { 0x00000022, 0x00000022 } }, + { AR5K_RF_GAIN(15), { 0x00000062, 0x00000062 } }, + { AR5K_RF_GAIN(16), { 0x00000163, 0x00000163 } }, + { AR5K_RF_GAIN(17), { 0x000001a3, 0x000001a3 } }, + { AR5K_RF_GAIN(18), { 0x000001e3, 0x000001e3 } }, + { AR5K_RF_GAIN(19), { 0x00000023, 0x00000023 } }, + { AR5K_RF_GAIN(20), { 0x00000063, 0x00000063 } }, + { AR5K_RF_GAIN(21), { 0x00000184, 0x00000184 } }, + { AR5K_RF_GAIN(22), { 0x000001c4, 0x000001c4 } }, + { AR5K_RF_GAIN(23), { 0x00000004, 0x00000004 } }, + { AR5K_RF_GAIN(24), { 0x000001ea, 0x0000000b } }, + { AR5K_RF_GAIN(25), { 0x0000002a, 0x0000004b } }, + { AR5K_RF_GAIN(26), { 0x0000006a, 0x0000008b } }, + { AR5K_RF_GAIN(27), { 0x000000aa, 0x000001ac } }, + { AR5K_RF_GAIN(28), { 0x000001ab, 0x000001ec } }, + { AR5K_RF_GAIN(29), { 0x000001eb, 0x0000002c } }, + { AR5K_RF_GAIN(30), { 0x0000002b, 0x00000012 } }, + { AR5K_RF_GAIN(31), { 0x0000006b, 0x00000052 } }, + { AR5K_RF_GAIN(32), { 0x000000ab, 0x00000092 } }, + { AR5K_RF_GAIN(33), { 0x000001ac, 0x00000193 } }, + { AR5K_RF_GAIN(34), { 0x000001ec, 0x000001d3 } }, + { AR5K_RF_GAIN(35), { 0x0000002c, 0x00000013 } }, + { AR5K_RF_GAIN(36), { 0x0000003a, 0x00000053 } }, + { AR5K_RF_GAIN(37), { 0x0000007a, 0x00000093 } }, + { AR5K_RF_GAIN(38), { 0x000000ba, 0x00000194 } }, + { AR5K_RF_GAIN(39), { 0x000001bb, 0x000001d4 } }, + { AR5K_RF_GAIN(40), { 0x000001fb, 0x00000014 } }, + { AR5K_RF_GAIN(41), { 0x0000003b, 0x0000003a } }, + { AR5K_RF_GAIN(42), { 0x0000007b, 0x0000007a } }, + { AR5K_RF_GAIN(43), { 0x000000bb, 0x000000ba } }, + { AR5K_RF_GAIN(44), { 0x000001bc, 0x000001bb } }, + { AR5K_RF_GAIN(45), { 0x000001fc, 0x000001fb } }, + { AR5K_RF_GAIN(46), { 0x0000003c, 0x0000003b } }, + { AR5K_RF_GAIN(47), { 0x0000007c, 0x0000007b } }, + { AR5K_RF_GAIN(48), { 0x000000bc, 0x000000bb } }, + { AR5K_RF_GAIN(49), { 0x000000fc, 0x000001bc } }, + { AR5K_RF_GAIN(50), { 0x000000fc, 0x000001fc } }, + { AR5K_RF_GAIN(51), { 0x000000fc, 0x0000003c } }, + { AR5K_RF_GAIN(52), { 0x000000fc, 0x0000007c } }, + { AR5K_RF_GAIN(53), { 0x000000fc, 0x000000bc } }, + { AR5K_RF_GAIN(54), { 0x000000fc, 0x000000fc } }, + { AR5K_RF_GAIN(55), { 0x000000fc, 0x000000fc } }, + { AR5K_RF_GAIN(56), { 0x000000fc, 0x000000fc } }, + { AR5K_RF_GAIN(57), { 0x000000fc, 0x000000fc } }, + { AR5K_RF_GAIN(58), { 0x000000fc, 0x000000fc } }, + { AR5K_RF_GAIN(59), { 0x000000fc, 0x000000fc } }, + { AR5K_RF_GAIN(60), { 0x000000fc, 0x000000fc } }, + { AR5K_RF_GAIN(61), { 0x000000fc, 0x000000fc } }, + { AR5K_RF_GAIN(62), { 0x000000fc, 0x000000fc } }, + { AR5K_RF_GAIN(63), { 0x000000fc, 0x000000fc } }, +}; + +/* Initial RF Gain settings for RF2413 */ +static const struct ath5k_ini_rfgain rfgain_2413[] = { + { AR5K_RF_GAIN(0), { 0x00000000, 0x00000000 } }, + { AR5K_RF_GAIN(1), { 0x00000000, 0x00000040 } }, + { AR5K_RF_GAIN(2), { 0x00000000, 0x00000080 } }, + { AR5K_RF_GAIN(3), { 0x00000000, 0x00000181 } }, + { AR5K_RF_GAIN(4), { 0x00000000, 0x000001c1 } }, + { AR5K_RF_GAIN(5), { 0x00000000, 0x00000001 } }, + { AR5K_RF_GAIN(6), { 0x00000000, 0x00000041 } }, + { AR5K_RF_GAIN(7), { 0x00000000, 0x00000081 } }, + { AR5K_RF_GAIN(8), { 0x00000000, 0x00000168 } }, + { AR5K_RF_GAIN(9), { 0x00000000, 0x000001a8 } }, + { AR5K_RF_GAIN(10), { 0x00000000, 0x000001e8 } }, + { AR5K_RF_GAIN(11), { 0x00000000, 0x00000028 } }, + { AR5K_RF_GAIN(12), { 0x00000000, 0x00000068 } }, + { AR5K_RF_GAIN(13), { 0x00000000, 0x00000189 } }, + { AR5K_RF_GAIN(14), { 0x00000000, 0x000001c9 } }, + { AR5K_RF_GAIN(15), { 0x00000000, 0x00000009 } }, + { AR5K_RF_GAIN(16), { 0x00000000, 0x00000049 } }, + { AR5K_RF_GAIN(17), { 0x00000000, 0x00000089 } }, + { AR5K_RF_GAIN(18), { 0x00000000, 0x00000190 } }, + { AR5K_RF_GAIN(19), { 0x00000000, 0x000001d0 } }, + { AR5K_RF_GAIN(20), { 0x00000000, 0x00000010 } }, + { AR5K_RF_GAIN(21), { 0x00000000, 0x00000050 } }, + { AR5K_RF_GAIN(22), { 0x00000000, 0x00000090 } }, + { AR5K_RF_GAIN(23), { 0x00000000, 0x00000191 } }, + { AR5K_RF_GAIN(24), { 0x00000000, 0x000001d1 } }, + { AR5K_RF_GAIN(25), { 0x00000000, 0x00000011 } }, + { AR5K_RF_GAIN(26), { 0x00000000, 0x00000051 } }, + { AR5K_RF_GAIN(27), { 0x00000000, 0x00000091 } }, + { AR5K_RF_GAIN(28), { 0x00000000, 0x00000178 } }, + { AR5K_RF_GAIN(29), { 0x00000000, 0x000001b8 } }, + { AR5K_RF_GAIN(30), { 0x00000000, 0x000001f8 } }, + { AR5K_RF_GAIN(31), { 0x00000000, 0x00000038 } }, + { AR5K_RF_GAIN(32), { 0x00000000, 0x00000078 } }, + { AR5K_RF_GAIN(33), { 0x00000000, 0x00000199 } }, + { AR5K_RF_GAIN(34), { 0x00000000, 0x000001d9 } }, + { AR5K_RF_GAIN(35), { 0x00000000, 0x00000019 } }, + { AR5K_RF_GAIN(36), { 0x00000000, 0x00000059 } }, + { AR5K_RF_GAIN(37), { 0x00000000, 0x00000099 } }, + { AR5K_RF_GAIN(38), { 0x00000000, 0x000000d9 } }, + { AR5K_RF_GAIN(39), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(40), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(41), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(42), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(43), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(44), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(45), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(46), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(47), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(48), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(49), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(50), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(51), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(52), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(53), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(54), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(55), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(56), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(57), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(58), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(59), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(60), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(61), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(62), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(63), { 0x00000000, 0x000000f9 } }, +}; + +/* Initial RF Gain settings for AR2316 */ +static const struct ath5k_ini_rfgain rfgain_2316[] = { + { AR5K_RF_GAIN(0), { 0x00000000, 0x00000000 } }, + { AR5K_RF_GAIN(1), { 0x00000000, 0x00000040 } }, + { AR5K_RF_GAIN(2), { 0x00000000, 0x00000080 } }, + { AR5K_RF_GAIN(3), { 0x00000000, 0x000000c0 } }, + { AR5K_RF_GAIN(4), { 0x00000000, 0x000000e0 } }, + { AR5K_RF_GAIN(5), { 0x00000000, 0x000000e0 } }, + { AR5K_RF_GAIN(6), { 0x00000000, 0x00000128 } }, + { AR5K_RF_GAIN(7), { 0x00000000, 0x00000128 } }, + { AR5K_RF_GAIN(8), { 0x00000000, 0x00000128 } }, + { AR5K_RF_GAIN(9), { 0x00000000, 0x00000168 } }, + { AR5K_RF_GAIN(10), { 0x00000000, 0x000001a8 } }, + { AR5K_RF_GAIN(11), { 0x00000000, 0x000001e8 } }, + { AR5K_RF_GAIN(12), { 0x00000000, 0x00000028 } }, + { AR5K_RF_GAIN(13), { 0x00000000, 0x00000068 } }, + { AR5K_RF_GAIN(14), { 0x00000000, 0x000000a8 } }, + { AR5K_RF_GAIN(15), { 0x00000000, 0x000000e8 } }, + { AR5K_RF_GAIN(16), { 0x00000000, 0x000000e8 } }, + { AR5K_RF_GAIN(17), { 0x00000000, 0x00000130 } }, + { AR5K_RF_GAIN(18), { 0x00000000, 0x00000130 } }, + { AR5K_RF_GAIN(19), { 0x00000000, 0x00000170 } }, + { AR5K_RF_GAIN(20), { 0x00000000, 0x000001b0 } }, + { AR5K_RF_GAIN(21), { 0x00000000, 0x000001f0 } }, + { AR5K_RF_GAIN(22), { 0x00000000, 0x00000030 } }, + { AR5K_RF_GAIN(23), { 0x00000000, 0x00000070 } }, + { AR5K_RF_GAIN(24), { 0x00000000, 0x000000b0 } }, + { AR5K_RF_GAIN(25), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(26), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(27), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(28), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(29), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(30), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(31), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(32), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(33), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(34), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(35), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(36), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(37), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(38), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(39), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(40), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(41), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(42), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(43), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(44), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(45), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(46), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(47), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(48), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(49), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(50), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(51), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(52), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(53), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(54), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(55), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(56), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(57), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(58), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(59), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(60), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(61), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(62), { 0x00000000, 0x000000f0 } }, + { AR5K_RF_GAIN(63), { 0x00000000, 0x000000f0 } }, +}; + + +/* Initial RF Gain settings for RF5413 */ +static const struct ath5k_ini_rfgain rfgain_5413[] = { + /* 5Ghz 2Ghz */ + { AR5K_RF_GAIN(0), { 0x00000000, 0x00000000 } }, + { AR5K_RF_GAIN(1), { 0x00000040, 0x00000040 } }, + { AR5K_RF_GAIN(2), { 0x00000080, 0x00000080 } }, + { AR5K_RF_GAIN(3), { 0x000001a1, 0x00000161 } }, + { AR5K_RF_GAIN(4), { 0x000001e1, 0x000001a1 } }, + { AR5K_RF_GAIN(5), { 0x00000021, 0x000001e1 } }, + { AR5K_RF_GAIN(6), { 0x00000061, 0x00000021 } }, + { AR5K_RF_GAIN(7), { 0x00000188, 0x00000061 } }, + { AR5K_RF_GAIN(8), { 0x000001c8, 0x00000188 } }, + { AR5K_RF_GAIN(9), { 0x00000008, 0x000001c8 } }, + { AR5K_RF_GAIN(10), { 0x00000048, 0x00000008 } }, + { AR5K_RF_GAIN(11), { 0x00000088, 0x00000048 } }, + { AR5K_RF_GAIN(12), { 0x000001a9, 0x00000088 } }, + { AR5K_RF_GAIN(13), { 0x000001e9, 0x00000169 } }, + { AR5K_RF_GAIN(14), { 0x00000029, 0x000001a9 } }, + { AR5K_RF_GAIN(15), { 0x00000069, 0x000001e9 } }, + { AR5K_RF_GAIN(16), { 0x000001d0, 0x00000029 } }, + { AR5K_RF_GAIN(17), { 0x00000010, 0x00000069 } }, + { AR5K_RF_GAIN(18), { 0x00000050, 0x00000190 } }, + { AR5K_RF_GAIN(19), { 0x00000090, 0x000001d0 } }, + { AR5K_RF_GAIN(20), { 0x000001b1, 0x00000010 } }, + { AR5K_RF_GAIN(21), { 0x000001f1, 0x00000050 } }, + { AR5K_RF_GAIN(22), { 0x00000031, 0x00000090 } }, + { AR5K_RF_GAIN(23), { 0x00000071, 0x00000171 } }, + { AR5K_RF_GAIN(24), { 0x000001b8, 0x000001b1 } }, + { AR5K_RF_GAIN(25), { 0x000001f8, 0x000001f1 } }, + { AR5K_RF_GAIN(26), { 0x00000038, 0x00000031 } }, + { AR5K_RF_GAIN(27), { 0x00000078, 0x00000071 } }, + { AR5K_RF_GAIN(28), { 0x00000199, 0x00000198 } }, + { AR5K_RF_GAIN(29), { 0x000001d9, 0x000001d8 } }, + { AR5K_RF_GAIN(30), { 0x00000019, 0x00000018 } }, + { AR5K_RF_GAIN(31), { 0x00000059, 0x00000058 } }, + { AR5K_RF_GAIN(32), { 0x00000099, 0x00000098 } }, + { AR5K_RF_GAIN(33), { 0x000000d9, 0x00000179 } }, + { AR5K_RF_GAIN(34), { 0x000000f9, 0x000001b9 } }, + { AR5K_RF_GAIN(35), { 0x000000f9, 0x000001f9 } }, + { AR5K_RF_GAIN(36), { 0x000000f9, 0x00000039 } }, + { AR5K_RF_GAIN(37), { 0x000000f9, 0x00000079 } }, + { AR5K_RF_GAIN(38), { 0x000000f9, 0x000000b9 } }, + { AR5K_RF_GAIN(39), { 0x000000f9, 0x000000f9 } }, + { AR5K_RF_GAIN(40), { 0x000000f9, 0x000000f9 } }, + { AR5K_RF_GAIN(41), { 0x000000f9, 0x000000f9 } }, + { AR5K_RF_GAIN(42), { 0x000000f9, 0x000000f9 } }, + { AR5K_RF_GAIN(43), { 0x000000f9, 0x000000f9 } }, + { AR5K_RF_GAIN(44), { 0x000000f9, 0x000000f9 } }, + { AR5K_RF_GAIN(45), { 0x000000f9, 0x000000f9 } }, + { AR5K_RF_GAIN(46), { 0x000000f9, 0x000000f9 } }, + { AR5K_RF_GAIN(47), { 0x000000f9, 0x000000f9 } }, + { AR5K_RF_GAIN(48), { 0x000000f9, 0x000000f9 } }, + { AR5K_RF_GAIN(49), { 0x000000f9, 0x000000f9 } }, + { AR5K_RF_GAIN(50), { 0x000000f9, 0x000000f9 } }, + { AR5K_RF_GAIN(51), { 0x000000f9, 0x000000f9 } }, + { AR5K_RF_GAIN(52), { 0x000000f9, 0x000000f9 } }, + { AR5K_RF_GAIN(53), { 0x000000f9, 0x000000f9 } }, + { AR5K_RF_GAIN(54), { 0x000000f9, 0x000000f9 } }, + { AR5K_RF_GAIN(55), { 0x000000f9, 0x000000f9 } }, + { AR5K_RF_GAIN(56), { 0x000000f9, 0x000000f9 } }, + { AR5K_RF_GAIN(57), { 0x000000f9, 0x000000f9 } }, + { AR5K_RF_GAIN(58), { 0x000000f9, 0x000000f9 } }, + { AR5K_RF_GAIN(59), { 0x000000f9, 0x000000f9 } }, + { AR5K_RF_GAIN(60), { 0x000000f9, 0x000000f9 } }, + { AR5K_RF_GAIN(61), { 0x000000f9, 0x000000f9 } }, + { AR5K_RF_GAIN(62), { 0x000000f9, 0x000000f9 } }, + { AR5K_RF_GAIN(63), { 0x000000f9, 0x000000f9 } }, +}; + + +/* Initial RF Gain settings for RF2425 */ +static const struct ath5k_ini_rfgain rfgain_2425[] = { + { AR5K_RF_GAIN(0), { 0x00000000, 0x00000000 } }, + { AR5K_RF_GAIN(1), { 0x00000000, 0x00000040 } }, + { AR5K_RF_GAIN(2), { 0x00000000, 0x00000080 } }, + { AR5K_RF_GAIN(3), { 0x00000000, 0x00000181 } }, + { AR5K_RF_GAIN(4), { 0x00000000, 0x000001c1 } }, + { AR5K_RF_GAIN(5), { 0x00000000, 0x00000001 } }, + { AR5K_RF_GAIN(6), { 0x00000000, 0x00000041 } }, + { AR5K_RF_GAIN(7), { 0x00000000, 0x00000081 } }, + { AR5K_RF_GAIN(8), { 0x00000000, 0x00000188 } }, + { AR5K_RF_GAIN(9), { 0x00000000, 0x000001c8 } }, + { AR5K_RF_GAIN(10), { 0x00000000, 0x00000008 } }, + { AR5K_RF_GAIN(11), { 0x00000000, 0x00000048 } }, + { AR5K_RF_GAIN(12), { 0x00000000, 0x00000088 } }, + { AR5K_RF_GAIN(13), { 0x00000000, 0x00000189 } }, + { AR5K_RF_GAIN(14), { 0x00000000, 0x000001c9 } }, + { AR5K_RF_GAIN(15), { 0x00000000, 0x00000009 } }, + { AR5K_RF_GAIN(16), { 0x00000000, 0x00000049 } }, + { AR5K_RF_GAIN(17), { 0x00000000, 0x00000089 } }, + { AR5K_RF_GAIN(18), { 0x00000000, 0x000001b0 } }, + { AR5K_RF_GAIN(19), { 0x00000000, 0x000001f0 } }, + { AR5K_RF_GAIN(20), { 0x00000000, 0x00000030 } }, + { AR5K_RF_GAIN(21), { 0x00000000, 0x00000070 } }, + { AR5K_RF_GAIN(22), { 0x00000000, 0x00000171 } }, + { AR5K_RF_GAIN(23), { 0x00000000, 0x000001b1 } }, + { AR5K_RF_GAIN(24), { 0x00000000, 0x000001f1 } }, + { AR5K_RF_GAIN(25), { 0x00000000, 0x00000031 } }, + { AR5K_RF_GAIN(26), { 0x00000000, 0x00000071 } }, + { AR5K_RF_GAIN(27), { 0x00000000, 0x000001b8 } }, + { AR5K_RF_GAIN(28), { 0x00000000, 0x000001f8 } }, + { AR5K_RF_GAIN(29), { 0x00000000, 0x00000038 } }, + { AR5K_RF_GAIN(30), { 0x00000000, 0x00000078 } }, + { AR5K_RF_GAIN(31), { 0x00000000, 0x000000b8 } }, + { AR5K_RF_GAIN(32), { 0x00000000, 0x000001b9 } }, + { AR5K_RF_GAIN(33), { 0x00000000, 0x000001f9 } }, + { AR5K_RF_GAIN(34), { 0x00000000, 0x00000039 } }, + { AR5K_RF_GAIN(35), { 0x00000000, 0x00000079 } }, + { AR5K_RF_GAIN(36), { 0x00000000, 0x000000b9 } }, + { AR5K_RF_GAIN(37), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(38), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(39), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(40), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(41), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(42), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(43), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(44), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(45), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(46), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(47), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(48), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(49), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(50), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(51), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(52), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(53), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(54), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(55), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(56), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(57), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(58), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(59), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(60), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(61), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(62), { 0x00000000, 0x000000f9 } }, + { AR5K_RF_GAIN(63), { 0x00000000, 0x000000f9 } }, +}; + +#define AR5K_GAIN_CRN_FIX_BITS_5111 4 +#define AR5K_GAIN_CRN_FIX_BITS_5112 7 +#define AR5K_GAIN_CRN_MAX_FIX_BITS AR5K_GAIN_CRN_FIX_BITS_5112 +#define AR5K_GAIN_DYN_ADJUST_HI_MARGIN 15 +#define AR5K_GAIN_DYN_ADJUST_LO_MARGIN 20 +#define AR5K_GAIN_CCK_PROBE_CORR 5 +#define AR5K_GAIN_CCK_OFDM_GAIN_DELTA 15 +#define AR5K_GAIN_STEP_COUNT 10 + +/* Check if our current measurement is inside our + * current variable attenuation window */ +#define AR5K_GAIN_CHECK_ADJUST(_g) \ + ((_g)->g_current <= (_g)->g_low || (_g)->g_current >= (_g)->g_high) + +struct ath5k_gain_opt_step { + s8 gos_param[AR5K_GAIN_CRN_MAX_FIX_BITS]; + s8 gos_gain; +}; + +struct ath5k_gain_opt { + u8 go_default; + u8 go_steps_count; + const struct ath5k_gain_opt_step go_step[AR5K_GAIN_STEP_COUNT]; +}; + +/* + * Parameters on gos_param: + * 1) Tx clip PHY register + * 2) PWD 90 RF register + * 3) PWD 84 RF register + * 4) RFGainSel RF register + */ +static const struct ath5k_gain_opt rfgain_opt_5111 = { + 4, + 9, + { + { { 4, 1, 1, 1 }, 6 }, + { { 4, 0, 1, 1 }, 4 }, + { { 3, 1, 1, 1 }, 3 }, + { { 4, 0, 0, 1 }, 1 }, + { { 4, 1, 1, 0 }, 0 }, + { { 4, 0, 1, 0 }, -2 }, + { { 3, 1, 1, 0 }, -3 }, + { { 4, 0, 0, 0 }, -4 }, + { { 2, 1, 1, 0 }, -6 } + } +}; + +/* + * Parameters on gos_param: + * 1) Mixgain ovr RF register + * 2) PWD 138 RF register + * 3) PWD 137 RF register + * 4) PWD 136 RF register + * 5) PWD 132 RF register + * 6) PWD 131 RF register + * 7) PWD 130 RF register + */ +static const struct ath5k_gain_opt rfgain_opt_5112 = { + 1, + 8, + { + { { 3, 0, 0, 0, 0, 0, 0 }, 6 }, + { { 2, 0, 0, 0, 0, 0, 0 }, 0 }, + { { 1, 0, 0, 0, 0, 0, 0 }, -3 }, + { { 0, 0, 0, 0, 0, 0, 0 }, -6 }, + { { 0, 1, 1, 0, 0, 0, 0 }, -8 }, + { { 0, 1, 1, 0, 1, 1, 0 }, -10 }, + { { 0, 1, 0, 1, 1, 1, 0 }, -13 }, + { { 0, 1, 0, 1, 1, 0, 1 }, -16 }, + } +}; + diff --git a/drivers/net/wireless/ath/ath5k/rfkill.c b/drivers/net/wireless/ath/ath5k/rfkill.c new file mode 100644 index 0000000..41a877b --- /dev/null +++ b/drivers/net/wireless/ath/ath5k/rfkill.c @@ -0,0 +1,121 @@ +/* + * RFKILL support for ath5k + * + * Copyright (c) 2009 Tobias Doerffel + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any + * redistribution must be conditioned upon including a substantially + * similar Disclaimer requirement for further binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY + * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, + * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGES. + */ + +#include "base.h" + + +static inline void ath5k_rfkill_disable(struct ath5k_softc *sc) +{ + ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "rfkill disable (gpio:%d polarity:%d)\n", + sc->rf_kill.gpio, sc->rf_kill.polarity); + ath5k_hw_set_gpio_output(sc->ah, sc->rf_kill.gpio); + ath5k_hw_set_gpio(sc->ah, sc->rf_kill.gpio, !sc->rf_kill.polarity); +} + + +static inline void ath5k_rfkill_enable(struct ath5k_softc *sc) +{ + ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "rfkill enable (gpio:%d polarity:%d)\n", + sc->rf_kill.gpio, sc->rf_kill.polarity); + ath5k_hw_set_gpio_output(sc->ah, sc->rf_kill.gpio); + ath5k_hw_set_gpio(sc->ah, sc->rf_kill.gpio, sc->rf_kill.polarity); +} + +static inline void ath5k_rfkill_set_intr(struct ath5k_softc *sc, bool enable) +{ + struct ath5k_hw *ah = sc->ah; + u32 curval; + + ath5k_hw_set_gpio_input(ah, sc->rf_kill.gpio); + curval = ath5k_hw_get_gpio(ah, sc->rf_kill.gpio); + ath5k_hw_set_gpio_intr(ah, sc->rf_kill.gpio, enable ? + !!curval : !curval); +} + +static bool +ath5k_is_rfkill_set(struct ath5k_softc *sc) +{ + /* configuring GPIO for input for some reason disables rfkill */ + /*ath5k_hw_set_gpio_input(sc->ah, sc->rf_kill.gpio);*/ + return ath5k_hw_get_gpio(sc->ah, sc->rf_kill.gpio) == + sc->rf_kill.polarity; +} + +static void +ath5k_tasklet_rfkill_toggle(unsigned long data) +{ + struct ath5k_softc *sc = (void *)data; + bool blocked; + + blocked = ath5k_is_rfkill_set(sc); + wiphy_rfkill_set_hw_state(sc->hw->wiphy, blocked); +} + + +void +ath5k_rfkill_hw_start(struct ath5k_hw *ah) +{ + struct ath5k_softc *sc = ah->ah_sc; + + /* read rfkill GPIO configuration from EEPROM header */ + sc->rf_kill.gpio = ah->ah_capabilities.cap_eeprom.ee_rfkill_pin; + sc->rf_kill.polarity = ah->ah_capabilities.cap_eeprom.ee_rfkill_pol; + + tasklet_init(&sc->rf_kill.toggleq, ath5k_tasklet_rfkill_toggle, + (unsigned long)sc); + + ath5k_rfkill_disable(sc); + + /* enable interrupt for rfkill switch */ + if (AR5K_EEPROM_HDR_RFKILL(ah->ah_capabilities.cap_eeprom.ee_header)) + ath5k_rfkill_set_intr(sc, true); +} + + +void +ath5k_rfkill_hw_stop(struct ath5k_hw *ah) +{ + struct ath5k_softc *sc = ah->ah_sc; + + /* disable interrupt for rfkill switch */ + if (AR5K_EEPROM_HDR_RFKILL(ah->ah_capabilities.cap_eeprom.ee_header)) + ath5k_rfkill_set_intr(sc, false); + + tasklet_kill(&sc->rf_kill.toggleq); + + /* enable RFKILL when stopping HW so Wifi LED is turned off */ + ath5k_rfkill_enable(sc); +} + diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile new file mode 100644 index 0000000..6b50d5e --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/Makefile @@ -0,0 +1,30 @@ +ath9k-y += beacon.o \ + gpio.o \ + init.o \ + main.o \ + recv.o \ + xmit.o \ + virtual.o \ + rc.o + +ath9k-$(CONFIG_PCI) += pci.o +ath9k-$(CONFIG_ATHEROS_AR71XX) += ahb.o +ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o + +obj-$(CONFIG_ATH9K) += ath9k.o + +ath9k_hw-y:= hw.o \ + eeprom.o \ + eeprom_def.o \ + eeprom_4k.o \ + eeprom_9287.o \ + calib.o \ + ani.o \ + phy.o \ + btcoex.o \ + mac.o \ + +obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o + +obj-$(CONFIG_ATH9K_COMMON) += ath9k_common.o +ath9k_common-y:= common.o diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c new file mode 100644 index 0000000..ca4994f --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/ahb.c @@ -0,0 +1,188 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * Copyright (c) 2009 Gabor Juhos + * Copyright (c) 2009 Imre Kaloz + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include "ath9k.h" + +/* return bus cachesize in 4B word units */ +static void ath_ahb_read_cachesize(struct ath_common *common, int *csz) +{ + *csz = L1_CACHE_BYTES >> 2; +} + +static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data) +{ + struct ath_softc *sc = (struct ath_softc *)common->priv; + struct platform_device *pdev = to_platform_device(sc->dev); + struct ath9k_platform_data *pdata; + + pdata = (struct ath9k_platform_data *) pdev->dev.platform_data; + if (off >= (ARRAY_SIZE(pdata->eeprom_data))) { + ath_print(common, ATH_DBG_FATAL, + "%s: flash read failed, offset %08x " + "is out of range\n", + __func__, off); + return false; + } + + *data = pdata->eeprom_data[off]; + return true; +} + +static struct ath_bus_ops ath_ahb_bus_ops = { + .read_cachesize = ath_ahb_read_cachesize, + .eeprom_read = ath_ahb_eeprom_read, +}; + +static int ath_ahb_probe(struct platform_device *pdev) +{ + void __iomem *mem; + struct ath_wiphy *aphy; + struct ath_softc *sc; + struct ieee80211_hw *hw; + struct resource *res; + int irq; + int ret = 0; + struct ath_hw *ah; + char hw_name[64]; + + if (!pdev->dev.platform_data) { + dev_err(&pdev->dev, "no platform data specified\n"); + ret = -EINVAL; + goto err_out; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res == NULL) { + dev_err(&pdev->dev, "no memory resource found\n"); + ret = -ENXIO; + goto err_out; + } + + mem = ioremap_nocache(res->start, res->end - res->start + 1); + if (mem == NULL) { + dev_err(&pdev->dev, "ioremap failed\n"); + ret = -ENOMEM; + goto err_out; + } + + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (res == NULL) { + dev_err(&pdev->dev, "no IRQ resource found\n"); + ret = -ENXIO; + goto err_iounmap; + } + + irq = res->start; + + hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy) + + sizeof(struct ath_softc), &ath9k_ops); + if (hw == NULL) { + dev_err(&pdev->dev, "no memory for ieee80211_hw\n"); + ret = -ENOMEM; + goto err_iounmap; + } + + SET_IEEE80211_DEV(hw, &pdev->dev); + platform_set_drvdata(pdev, hw); + + aphy = hw->priv; + sc = (struct ath_softc *) (aphy + 1); + aphy->sc = sc; + aphy->hw = hw; + sc->pri_wiphy = aphy; + sc->hw = hw; + sc->dev = &pdev->dev; + sc->mem = mem; + sc->irq = irq; + + /* Will be cleared in ath9k_start() */ + sc->sc_flags |= SC_OP_INVALID; + + ret = request_irq(irq, ath_isr, IRQF_SHARED, "ath9k", sc); + if (ret) { + dev_err(&pdev->dev, "request_irq failed\n"); + goto err_free_hw; + } + + ret = ath9k_init_device(AR5416_AR9100_DEVID, sc, 0x0, &ath_ahb_bus_ops); + if (ret) { + dev_err(&pdev->dev, "failed to initialize device\n"); + goto err_irq; + } + + ah = sc->sc_ah; + ath9k_hw_name(ah, hw_name, sizeof(hw_name)); + printk(KERN_INFO + "%s: %s mem=0x%lx, irq=%d\n", + wiphy_name(hw->wiphy), + hw_name, + (unsigned long)mem, irq); + + return 0; + + err_irq: + free_irq(irq, sc); + err_free_hw: + ieee80211_free_hw(hw); + platform_set_drvdata(pdev, NULL); + err_iounmap: + iounmap(mem); + err_out: + return ret; +} + +static int ath_ahb_remove(struct platform_device *pdev) +{ + struct ieee80211_hw *hw = platform_get_drvdata(pdev); + + if (hw) { + struct ath_wiphy *aphy = hw->priv; + struct ath_softc *sc = aphy->sc; + void __iomem *mem = sc->mem; + + ath9k_deinit_device(sc); + free_irq(sc->irq, sc); + ieee80211_free_hw(sc->hw); + iounmap(mem); + platform_set_drvdata(pdev, NULL); + } + + return 0; +} + +static struct platform_driver ath_ahb_driver = { + .probe = ath_ahb_probe, + .remove = ath_ahb_remove, + .driver = { + .name = "ath9k", + .owner = THIS_MODULE, + }, +}; + +int ath_ahb_init(void) +{ + return platform_driver_register(&ath_ahb_driver); +} + +void ath_ahb_exit(void) +{ + platform_driver_unregister(&ath_ahb_driver); +} diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c new file mode 100644 index 0000000..2a0cd64 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/ani.c @@ -0,0 +1,828 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "hw.h" + +static int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah, + struct ath9k_channel *chan) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(ah->ani); i++) { + if (ah->ani[i].c && + ah->ani[i].c->channel == chan->channel) + return i; + if (ah->ani[i].c == NULL) { + ah->ani[i].c = chan; + return i; + } + } + + ath_print(ath9k_hw_common(ah), ATH_DBG_ANI, + "No more channel states left. Using channel 0\n"); + + return 0; +} + +static bool ath9k_hw_ani_control(struct ath_hw *ah, + enum ath9k_ani_cmd cmd, int param) +{ + struct ar5416AniState *aniState = ah->curani; + struct ath_common *common = ath9k_hw_common(ah); + + switch (cmd & ah->ani_function) { + case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{ + u32 level = param; + + if (level >= ARRAY_SIZE(ah->totalSizeDesired)) { + ath_print(common, ATH_DBG_ANI, + "level out of range (%u > %u)\n", + level, + (unsigned)ARRAY_SIZE(ah->totalSizeDesired)); + return false; + } + + REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, + AR_PHY_DESIRED_SZ_TOT_DES, + ah->totalSizeDesired[level]); + REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1, + AR_PHY_AGC_CTL1_COARSE_LOW, + ah->coarse_low[level]); + REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1, + AR_PHY_AGC_CTL1_COARSE_HIGH, + ah->coarse_high[level]); + REG_RMW_FIELD(ah, AR_PHY_FIND_SIG, + AR_PHY_FIND_SIG_FIRPWR, + ah->firpwr[level]); + + if (level > aniState->noiseImmunityLevel) + ah->stats.ast_ani_niup++; + else if (level < aniState->noiseImmunityLevel) + ah->stats.ast_ani_nidown++; + aniState->noiseImmunityLevel = level; + break; + } + case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{ + const int m1ThreshLow[] = { 127, 50 }; + const int m2ThreshLow[] = { 127, 40 }; + const int m1Thresh[] = { 127, 0x4d }; + const int m2Thresh[] = { 127, 0x40 }; + const int m2CountThr[] = { 31, 16 }; + const int m2CountThrLow[] = { 63, 48 }; + u32 on = param ? 1 : 0; + + REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW, + AR_PHY_SFCORR_LOW_M1_THRESH_LOW, + m1ThreshLow[on]); + REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW, + AR_PHY_SFCORR_LOW_M2_THRESH_LOW, + m2ThreshLow[on]); + REG_RMW_FIELD(ah, AR_PHY_SFCORR, + AR_PHY_SFCORR_M1_THRESH, + m1Thresh[on]); + REG_RMW_FIELD(ah, AR_PHY_SFCORR, + AR_PHY_SFCORR_M2_THRESH, + m2Thresh[on]); + REG_RMW_FIELD(ah, AR_PHY_SFCORR, + AR_PHY_SFCORR_M2COUNT_THR, + m2CountThr[on]); + REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW, + AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW, + m2CountThrLow[on]); + + REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, + AR_PHY_SFCORR_EXT_M1_THRESH_LOW, + m1ThreshLow[on]); + REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, + AR_PHY_SFCORR_EXT_M2_THRESH_LOW, + m2ThreshLow[on]); + REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, + AR_PHY_SFCORR_EXT_M1_THRESH, + m1Thresh[on]); + REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, + AR_PHY_SFCORR_EXT_M2_THRESH, + m2Thresh[on]); + + if (on) + REG_SET_BIT(ah, AR_PHY_SFCORR_LOW, + AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW); + else + REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW, + AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW); + + if (!on != aniState->ofdmWeakSigDetectOff) { + if (on) + ah->stats.ast_ani_ofdmon++; + else + ah->stats.ast_ani_ofdmoff++; + aniState->ofdmWeakSigDetectOff = !on; + } + break; + } + case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{ + const int weakSigThrCck[] = { 8, 6 }; + u32 high = param ? 1 : 0; + + REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT, + AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK, + weakSigThrCck[high]); + if (high != aniState->cckWeakSigThreshold) { + if (high) + ah->stats.ast_ani_cckhigh++; + else + ah->stats.ast_ani_ccklow++; + aniState->cckWeakSigThreshold = high; + } + break; + } + case ATH9K_ANI_FIRSTEP_LEVEL:{ + const int firstep[] = { 0, 4, 8 }; + u32 level = param; + + if (level >= ARRAY_SIZE(firstep)) { + ath_print(common, ATH_DBG_ANI, + "level out of range (%u > %u)\n", + level, + (unsigned) ARRAY_SIZE(firstep)); + return false; + } + REG_RMW_FIELD(ah, AR_PHY_FIND_SIG, + AR_PHY_FIND_SIG_FIRSTEP, + firstep[level]); + if (level > aniState->firstepLevel) + ah->stats.ast_ani_stepup++; + else if (level < aniState->firstepLevel) + ah->stats.ast_ani_stepdown++; + aniState->firstepLevel = level; + break; + } + case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{ + const int cycpwrThr1[] = + { 2, 4, 6, 8, 10, 12, 14, 16 }; + u32 level = param; + + if (level >= ARRAY_SIZE(cycpwrThr1)) { + ath_print(common, ATH_DBG_ANI, + "level out of range (%u > %u)\n", + level, + (unsigned) ARRAY_SIZE(cycpwrThr1)); + return false; + } + REG_RMW_FIELD(ah, AR_PHY_TIMING5, + AR_PHY_TIMING5_CYCPWR_THR1, + cycpwrThr1[level]); + if (level > aniState->spurImmunityLevel) + ah->stats.ast_ani_spurup++; + else if (level < aniState->spurImmunityLevel) + ah->stats.ast_ani_spurdown++; + aniState->spurImmunityLevel = level; + break; + } + case ATH9K_ANI_PRESENT: + break; + default: + ath_print(common, ATH_DBG_ANI, + "invalid cmd %u\n", cmd); + return false; + } + + ath_print(common, ATH_DBG_ANI, "ANI parameters:\n"); + ath_print(common, ATH_DBG_ANI, + "noiseImmunityLevel=%d, spurImmunityLevel=%d, " + "ofdmWeakSigDetectOff=%d\n", + aniState->noiseImmunityLevel, + aniState->spurImmunityLevel, + !aniState->ofdmWeakSigDetectOff); + ath_print(common, ATH_DBG_ANI, + "cckWeakSigThreshold=%d, " + "firstepLevel=%d, listenTime=%d\n", + aniState->cckWeakSigThreshold, + aniState->firstepLevel, + aniState->listenTime); + ath_print(common, ATH_DBG_ANI, + "cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n", + aniState->cycleCount, + aniState->ofdmPhyErrCount, + aniState->cckPhyErrCount); + + return true; +} + +static void ath9k_hw_update_mibstats(struct ath_hw *ah, + struct ath9k_mib_stats *stats) +{ + stats->ackrcv_bad += REG_READ(ah, AR_ACK_FAIL); + stats->rts_bad += REG_READ(ah, AR_RTS_FAIL); + stats->fcs_bad += REG_READ(ah, AR_FCS_FAIL); + stats->rts_good += REG_READ(ah, AR_RTS_OK); + stats->beacons += REG_READ(ah, AR_BEACON_CNT); +} + +static void ath9k_ani_restart(struct ath_hw *ah) +{ + struct ar5416AniState *aniState; + struct ath_common *common = ath9k_hw_common(ah); + + if (!DO_ANI(ah)) + return; + + aniState = ah->curani; + aniState->listenTime = 0; + + if (aniState->ofdmTrigHigh > AR_PHY_COUNTMAX) { + aniState->ofdmPhyErrBase = 0; + ath_print(common, ATH_DBG_ANI, + "OFDM Trigger is too high for hw counters\n"); + } else { + aniState->ofdmPhyErrBase = + AR_PHY_COUNTMAX - aniState->ofdmTrigHigh; + } + if (aniState->cckTrigHigh > AR_PHY_COUNTMAX) { + aniState->cckPhyErrBase = 0; + ath_print(common, ATH_DBG_ANI, + "CCK Trigger is too high for hw counters\n"); + } else { + aniState->cckPhyErrBase = + AR_PHY_COUNTMAX - aniState->cckTrigHigh; + } + ath_print(common, ATH_DBG_ANI, + "Writing ofdmbase=%u cckbase=%u\n", + aniState->ofdmPhyErrBase, + aniState->cckPhyErrBase); + REG_WRITE(ah, AR_PHY_ERR_1, aniState->ofdmPhyErrBase); + REG_WRITE(ah, AR_PHY_ERR_2, aniState->cckPhyErrBase); + REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING); + REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING); + + ath9k_hw_update_mibstats(ah, &ah->ah_mibStats); + + aniState->ofdmPhyErrCount = 0; + aniState->cckPhyErrCount = 0; +} + +static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah) +{ + struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf; + struct ar5416AniState *aniState; + int32_t rssi; + + if (!DO_ANI(ah)) + return; + + aniState = ah->curani; + + if (aniState->noiseImmunityLevel < HAL_NOISE_IMMUNE_MAX) { + if (ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL, + aniState->noiseImmunityLevel + 1)) { + return; + } + } + + if (aniState->spurImmunityLevel < HAL_SPUR_IMMUNE_MAX) { + if (ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL, + aniState->spurImmunityLevel + 1)) { + return; + } + } + + if (ah->opmode == NL80211_IFTYPE_AP) { + if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) { + ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, + aniState->firstepLevel + 1); + } + return; + } + rssi = BEACON_RSSI(ah); + if (rssi > aniState->rssiThrHigh) { + if (!aniState->ofdmWeakSigDetectOff) { + if (ath9k_hw_ani_control(ah, + ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION, + false)) { + ath9k_hw_ani_control(ah, + ATH9K_ANI_SPUR_IMMUNITY_LEVEL, 0); + return; + } + } + if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) { + ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, + aniState->firstepLevel + 1); + return; + } + } else if (rssi > aniState->rssiThrLow) { + if (aniState->ofdmWeakSigDetectOff) + ath9k_hw_ani_control(ah, + ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION, + true); + if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) + ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, + aniState->firstepLevel + 1); + return; + } else { + if ((conf->channel->band == IEEE80211_BAND_2GHZ) && + !conf_is_ht(conf)) { + if (!aniState->ofdmWeakSigDetectOff) + ath9k_hw_ani_control(ah, + ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION, + false); + if (aniState->firstepLevel > 0) + ath9k_hw_ani_control(ah, + ATH9K_ANI_FIRSTEP_LEVEL, 0); + return; + } + } +} + +static void ath9k_hw_ani_cck_err_trigger(struct ath_hw *ah) +{ + struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf; + struct ar5416AniState *aniState; + int32_t rssi; + + if (!DO_ANI(ah)) + return; + + aniState = ah->curani; + if (aniState->noiseImmunityLevel < HAL_NOISE_IMMUNE_MAX) { + if (ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL, + aniState->noiseImmunityLevel + 1)) { + return; + } + } + if (ah->opmode == NL80211_IFTYPE_AP) { + if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) { + ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, + aniState->firstepLevel + 1); + } + return; + } + rssi = BEACON_RSSI(ah); + if (rssi > aniState->rssiThrLow) { + if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) + ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, + aniState->firstepLevel + 1); + } else { + if ((conf->channel->band == IEEE80211_BAND_2GHZ) && + !conf_is_ht(conf)) { + if (aniState->firstepLevel > 0) + ath9k_hw_ani_control(ah, + ATH9K_ANI_FIRSTEP_LEVEL, 0); + } + } +} + +static void ath9k_hw_ani_lower_immunity(struct ath_hw *ah) +{ + struct ar5416AniState *aniState; + int32_t rssi; + + aniState = ah->curani; + + if (ah->opmode == NL80211_IFTYPE_AP) { + if (aniState->firstepLevel > 0) { + if (ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, + aniState->firstepLevel - 1)) + return; + } + } else { + rssi = BEACON_RSSI(ah); + if (rssi > aniState->rssiThrHigh) { + /* XXX: Handle me */ + } else if (rssi > aniState->rssiThrLow) { + if (aniState->ofdmWeakSigDetectOff) { + if (ath9k_hw_ani_control(ah, + ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION, + true) == true) + return; + } + if (aniState->firstepLevel > 0) { + if (ath9k_hw_ani_control(ah, + ATH9K_ANI_FIRSTEP_LEVEL, + aniState->firstepLevel - 1) == true) + return; + } + } else { + if (aniState->firstepLevel > 0) { + if (ath9k_hw_ani_control(ah, + ATH9K_ANI_FIRSTEP_LEVEL, + aniState->firstepLevel - 1) == true) + return; + } + } + } + + if (aniState->spurImmunityLevel > 0) { + if (ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL, + aniState->spurImmunityLevel - 1)) + return; + } + + if (aniState->noiseImmunityLevel > 0) { + ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL, + aniState->noiseImmunityLevel - 1); + return; + } +} + +static int32_t ath9k_hw_ani_get_listen_time(struct ath_hw *ah) +{ + struct ar5416AniState *aniState; + u32 txFrameCount, rxFrameCount, cycleCount; + int32_t listenTime; + + txFrameCount = REG_READ(ah, AR_TFCNT); + rxFrameCount = REG_READ(ah, AR_RFCNT); + cycleCount = REG_READ(ah, AR_CCCNT); + + aniState = ah->curani; + if (aniState->cycleCount == 0 || aniState->cycleCount > cycleCount) { + + listenTime = 0; + ah->stats.ast_ani_lzero++; + } else { + int32_t ccdelta = cycleCount - aniState->cycleCount; + int32_t rfdelta = rxFrameCount - aniState->rxFrameCount; + int32_t tfdelta = txFrameCount - aniState->txFrameCount; + listenTime = (ccdelta - rfdelta - tfdelta) / 44000; + } + aniState->cycleCount = cycleCount; + aniState->txFrameCount = txFrameCount; + aniState->rxFrameCount = rxFrameCount; + + return listenTime; +} + +void ath9k_ani_reset(struct ath_hw *ah) +{ + struct ar5416AniState *aniState; + struct ath9k_channel *chan = ah->curchan; + struct ath_common *common = ath9k_hw_common(ah); + int index; + + if (!DO_ANI(ah)) + return; + + index = ath9k_hw_get_ani_channel_idx(ah, chan); + aniState = &ah->ani[index]; + ah->curani = aniState; + + if (DO_ANI(ah) && ah->opmode != NL80211_IFTYPE_STATION + && ah->opmode != NL80211_IFTYPE_ADHOC) { + ath_print(common, ATH_DBG_ANI, + "Reset ANI state opmode %u\n", ah->opmode); + ah->stats.ast_ani_reset++; + + if (ah->opmode == NL80211_IFTYPE_AP) { + /* + * ath9k_hw_ani_control() will only process items set on + * ah->ani_function + */ + if (IS_CHAN_2GHZ(chan)) + ah->ani_function = (ATH9K_ANI_SPUR_IMMUNITY_LEVEL | + ATH9K_ANI_FIRSTEP_LEVEL); + else + ah->ani_function = 0; + } + + ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL, 0); + ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL, 0); + ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, 0); + ath9k_hw_ani_control(ah, ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION, + !ATH9K_ANI_USE_OFDM_WEAK_SIG); + ath9k_hw_ani_control(ah, ATH9K_ANI_CCK_WEAK_SIGNAL_THR, + ATH9K_ANI_CCK_WEAK_SIG_THR); + + ath9k_hw_setrxfilter(ah, ath9k_hw_getrxfilter(ah) | + ATH9K_RX_FILTER_PHYERR); + + if (ah->opmode == NL80211_IFTYPE_AP) { + ah->curani->ofdmTrigHigh = + ah->config.ofdm_trig_high; + ah->curani->ofdmTrigLow = + ah->config.ofdm_trig_low; + ah->curani->cckTrigHigh = + ah->config.cck_trig_high; + ah->curani->cckTrigLow = + ah->config.cck_trig_low; + } + ath9k_ani_restart(ah); + return; + } + + if (aniState->noiseImmunityLevel != 0) + ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL, + aniState->noiseImmunityLevel); + if (aniState->spurImmunityLevel != 0) + ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL, + aniState->spurImmunityLevel); + if (aniState->ofdmWeakSigDetectOff) + ath9k_hw_ani_control(ah, ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION, + !aniState->ofdmWeakSigDetectOff); + if (aniState->cckWeakSigThreshold) + ath9k_hw_ani_control(ah, ATH9K_ANI_CCK_WEAK_SIGNAL_THR, + aniState->cckWeakSigThreshold); + if (aniState->firstepLevel != 0) + ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, + aniState->firstepLevel); + + ath9k_hw_setrxfilter(ah, ath9k_hw_getrxfilter(ah) & + ~ATH9K_RX_FILTER_PHYERR); + ath9k_ani_restart(ah); + REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING); + REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING); +} + +void ath9k_hw_ani_monitor(struct ath_hw *ah, + struct ath9k_channel *chan) +{ + struct ar5416AniState *aniState; + struct ath_common *common = ath9k_hw_common(ah); + int32_t listenTime; + u32 phyCnt1, phyCnt2; + u32 ofdmPhyErrCnt, cckPhyErrCnt; + + if (!DO_ANI(ah)) + return; + + aniState = ah->curani; + + listenTime = ath9k_hw_ani_get_listen_time(ah); + if (listenTime < 0) { + ah->stats.ast_ani_lneg++; + ath9k_ani_restart(ah); + return; + } + + aniState->listenTime += listenTime; + + ath9k_hw_update_mibstats(ah, &ah->ah_mibStats); + + phyCnt1 = REG_READ(ah, AR_PHY_ERR_1); + phyCnt2 = REG_READ(ah, AR_PHY_ERR_2); + + if (phyCnt1 < aniState->ofdmPhyErrBase || + phyCnt2 < aniState->cckPhyErrBase) { + if (phyCnt1 < aniState->ofdmPhyErrBase) { + ath_print(common, ATH_DBG_ANI, + "phyCnt1 0x%x, resetting " + "counter value to 0x%x\n", + phyCnt1, + aniState->ofdmPhyErrBase); + REG_WRITE(ah, AR_PHY_ERR_1, + aniState->ofdmPhyErrBase); + REG_WRITE(ah, AR_PHY_ERR_MASK_1, + AR_PHY_ERR_OFDM_TIMING); + } + if (phyCnt2 < aniState->cckPhyErrBase) { + ath_print(common, ATH_DBG_ANI, + "phyCnt2 0x%x, resetting " + "counter value to 0x%x\n", + phyCnt2, + aniState->cckPhyErrBase); + REG_WRITE(ah, AR_PHY_ERR_2, + aniState->cckPhyErrBase); + REG_WRITE(ah, AR_PHY_ERR_MASK_2, + AR_PHY_ERR_CCK_TIMING); + } + return; + } + + ofdmPhyErrCnt = phyCnt1 - aniState->ofdmPhyErrBase; + ah->stats.ast_ani_ofdmerrs += + ofdmPhyErrCnt - aniState->ofdmPhyErrCount; + aniState->ofdmPhyErrCount = ofdmPhyErrCnt; + + cckPhyErrCnt = phyCnt2 - aniState->cckPhyErrBase; + ah->stats.ast_ani_cckerrs += + cckPhyErrCnt - aniState->cckPhyErrCount; + aniState->cckPhyErrCount = cckPhyErrCnt; + + if (aniState->listenTime > 5 * ah->aniperiod) { + if (aniState->ofdmPhyErrCount <= aniState->listenTime * + aniState->ofdmTrigLow / 1000 && + aniState->cckPhyErrCount <= aniState->listenTime * + aniState->cckTrigLow / 1000) + ath9k_hw_ani_lower_immunity(ah); + ath9k_ani_restart(ah); + } else if (aniState->listenTime > ah->aniperiod) { + if (aniState->ofdmPhyErrCount > aniState->listenTime * + aniState->ofdmTrigHigh / 1000) { + ath9k_hw_ani_ofdm_err_trigger(ah); + ath9k_ani_restart(ah); + } else if (aniState->cckPhyErrCount > + aniState->listenTime * aniState->cckTrigHigh / + 1000) { + ath9k_hw_ani_cck_err_trigger(ah); + ath9k_ani_restart(ah); + } + } +} +EXPORT_SYMBOL(ath9k_hw_ani_monitor); + +void ath9k_enable_mib_counters(struct ath_hw *ah) +{ + struct ath_common *common = ath9k_hw_common(ah); + + ath_print(common, ATH_DBG_ANI, "Enable MIB counters\n"); + + ath9k_hw_update_mibstats(ah, &ah->ah_mibStats); + + REG_WRITE(ah, AR_FILT_OFDM, 0); + REG_WRITE(ah, AR_FILT_CCK, 0); + REG_WRITE(ah, AR_MIBC, + ~(AR_MIBC_COW | AR_MIBC_FMC | AR_MIBC_CMC | AR_MIBC_MCS) + & 0x0f); + REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING); + REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING); +} + +/* Freeze the MIB counters, get the stats and then clear them */ +void ath9k_hw_disable_mib_counters(struct ath_hw *ah) +{ + struct ath_common *common = ath9k_hw_common(ah); + + ath_print(common, ATH_DBG_ANI, "Disable MIB counters\n"); + + REG_WRITE(ah, AR_MIBC, AR_MIBC_FMC); + ath9k_hw_update_mibstats(ah, &ah->ah_mibStats); + REG_WRITE(ah, AR_MIBC, AR_MIBC_CMC); + REG_WRITE(ah, AR_FILT_OFDM, 0); + REG_WRITE(ah, AR_FILT_CCK, 0); +} + +u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hw *ah, + u32 *rxc_pcnt, + u32 *rxf_pcnt, + u32 *txf_pcnt) +{ + struct ath_common *common = ath9k_hw_common(ah); + static u32 cycles, rx_clear, rx_frame, tx_frame; + u32 good = 1; + + u32 rc = REG_READ(ah, AR_RCCNT); + u32 rf = REG_READ(ah, AR_RFCNT); + u32 tf = REG_READ(ah, AR_TFCNT); + u32 cc = REG_READ(ah, AR_CCCNT); + + if (cycles == 0 || cycles > cc) { + ath_print(common, ATH_DBG_ANI, + "cycle counter wrap. ExtBusy = 0\n"); + good = 0; + } else { + u32 cc_d = cc - cycles; + u32 rc_d = rc - rx_clear; + u32 rf_d = rf - rx_frame; + u32 tf_d = tf - tx_frame; + + if (cc_d != 0) { + *rxc_pcnt = rc_d * 100 / cc_d; + *rxf_pcnt = rf_d * 100 / cc_d; + *txf_pcnt = tf_d * 100 / cc_d; + } else { + good = 0; + } + } + + cycles = cc; + rx_frame = rf; + rx_clear = rc; + tx_frame = tf; + + return good; +} + +/* + * Process a MIB interrupt. We may potentially be invoked because + * any of the MIB counters overflow/trigger so don't assume we're + * here because a PHY error counter triggered. + */ +void ath9k_hw_procmibevent(struct ath_hw *ah) +{ + u32 phyCnt1, phyCnt2; + + /* Reset these counters regardless */ + REG_WRITE(ah, AR_FILT_OFDM, 0); + REG_WRITE(ah, AR_FILT_CCK, 0); + if (!(REG_READ(ah, AR_SLP_MIB_CTRL) & AR_SLP_MIB_PENDING)) + REG_WRITE(ah, AR_SLP_MIB_CTRL, AR_SLP_MIB_CLEAR); + + /* Clear the mib counters and save them in the stats */ + ath9k_hw_update_mibstats(ah, &ah->ah_mibStats); + + if (!DO_ANI(ah)) + return; + + /* NB: these are not reset-on-read */ + phyCnt1 = REG_READ(ah, AR_PHY_ERR_1); + phyCnt2 = REG_READ(ah, AR_PHY_ERR_2); + if (((phyCnt1 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK) || + ((phyCnt2 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK)) { + struct ar5416AniState *aniState = ah->curani; + u32 ofdmPhyErrCnt, cckPhyErrCnt; + + /* NB: only use ast_ani_*errs with AH_PRIVATE_DIAG */ + ofdmPhyErrCnt = phyCnt1 - aniState->ofdmPhyErrBase; + ah->stats.ast_ani_ofdmerrs += + ofdmPhyErrCnt - aniState->ofdmPhyErrCount; + aniState->ofdmPhyErrCount = ofdmPhyErrCnt; + + cckPhyErrCnt = phyCnt2 - aniState->cckPhyErrBase; + ah->stats.ast_ani_cckerrs += + cckPhyErrCnt - aniState->cckPhyErrCount; + aniState->cckPhyErrCount = cckPhyErrCnt; + + /* + * NB: figure out which counter triggered. If both + * trigger we'll only deal with one as the processing + * clobbers the error counter so the trigger threshold + * check will never be true. + */ + if (aniState->ofdmPhyErrCount > aniState->ofdmTrigHigh) + ath9k_hw_ani_ofdm_err_trigger(ah); + if (aniState->cckPhyErrCount > aniState->cckTrigHigh) + ath9k_hw_ani_cck_err_trigger(ah); + /* NB: always restart to insure the h/w counters are reset */ + ath9k_ani_restart(ah); + } +} +EXPORT_SYMBOL(ath9k_hw_procmibevent); + +void ath9k_hw_ani_setup(struct ath_hw *ah) +{ + int i; + + const int totalSizeDesired[] = { -55, -55, -55, -55, -62 }; + const int coarseHigh[] = { -14, -14, -14, -14, -12 }; + const int coarseLow[] = { -64, -64, -64, -64, -70 }; + const int firpwr[] = { -78, -78, -78, -78, -80 }; + + for (i = 0; i < 5; i++) { + ah->totalSizeDesired[i] = totalSizeDesired[i]; + ah->coarse_high[i] = coarseHigh[i]; + ah->coarse_low[i] = coarseLow[i]; + ah->firpwr[i] = firpwr[i]; + } +} + +void ath9k_hw_ani_init(struct ath_hw *ah) +{ + struct ath_common *common = ath9k_hw_common(ah); + int i; + + ath_print(common, ATH_DBG_ANI, "Initialize ANI\n"); + + memset(ah->ani, 0, sizeof(ah->ani)); + for (i = 0; i < ARRAY_SIZE(ah->ani); i++) { + ah->ani[i].ofdmTrigHigh = ATH9K_ANI_OFDM_TRIG_HIGH; + ah->ani[i].ofdmTrigLow = ATH9K_ANI_OFDM_TRIG_LOW; + ah->ani[i].cckTrigHigh = ATH9K_ANI_CCK_TRIG_HIGH; + ah->ani[i].cckTrigLow = ATH9K_ANI_CCK_TRIG_LOW; + ah->ani[i].rssiThrHigh = ATH9K_ANI_RSSI_THR_HIGH; + ah->ani[i].rssiThrLow = ATH9K_ANI_RSSI_THR_LOW; + ah->ani[i].ofdmWeakSigDetectOff = + !ATH9K_ANI_USE_OFDM_WEAK_SIG; + ah->ani[i].cckWeakSigThreshold = + ATH9K_ANI_CCK_WEAK_SIG_THR; + ah->ani[i].spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL; + ah->ani[i].firstepLevel = ATH9K_ANI_FIRSTEP_LVL; + ah->ani[i].ofdmPhyErrBase = + AR_PHY_COUNTMAX - ATH9K_ANI_OFDM_TRIG_HIGH; + ah->ani[i].cckPhyErrBase = + AR_PHY_COUNTMAX - ATH9K_ANI_CCK_TRIG_HIGH; + } + + ath_print(common, ATH_DBG_ANI, + "Setting OfdmErrBase = 0x%08x\n", + ah->ani[0].ofdmPhyErrBase); + ath_print(common, ATH_DBG_ANI, "Setting cckErrBase = 0x%08x\n", + ah->ani[0].cckPhyErrBase); + + REG_WRITE(ah, AR_PHY_ERR_1, ah->ani[0].ofdmPhyErrBase); + REG_WRITE(ah, AR_PHY_ERR_2, ah->ani[0].cckPhyErrBase); + ath9k_enable_mib_counters(ah); + + ah->aniperiod = ATH9K_ANI_PERIOD; + if (ah->config.enable_ani) + ah->proc_phyerr |= HAL_PROCESS_ANI; +} + +void ath9k_hw_ani_disable(struct ath_hw *ah) +{ + ath_print(ath9k_hw_common(ah), ATH_DBG_ANI, "Disabling ANI\n"); + + ath9k_hw_disable_mib_counters(ah); + REG_WRITE(ah, AR_PHY_ERR_1, 0); + REG_WRITE(ah, AR_PHY_ERR_2, 0); +} diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h new file mode 100644 index 0000000..4e1ab94 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/ani.h @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef ANI_H +#define ANI_H + +#define HAL_PROCESS_ANI 0x00000001 + +#define DO_ANI(ah) (((ah)->proc_phyerr & HAL_PROCESS_ANI)) + +#define BEACON_RSSI(ahp) (ahp->stats.avgbrssi) + +#define ATH9K_ANI_OFDM_TRIG_HIGH 500 +#define ATH9K_ANI_OFDM_TRIG_LOW 200 +#define ATH9K_ANI_CCK_TRIG_HIGH 200 +#define ATH9K_ANI_CCK_TRIG_LOW 100 +#define ATH9K_ANI_NOISE_IMMUNE_LVL 4 +#define ATH9K_ANI_USE_OFDM_WEAK_SIG true +#define ATH9K_ANI_CCK_WEAK_SIG_THR false +#define ATH9K_ANI_SPUR_IMMUNE_LVL 7 +#define ATH9K_ANI_FIRSTEP_LVL 0 +#define ATH9K_ANI_RSSI_THR_HIGH 40 +#define ATH9K_ANI_RSSI_THR_LOW 7 +#define ATH9K_ANI_PERIOD 100 + +#define HAL_NOISE_IMMUNE_MAX 4 +#define HAL_SPUR_IMMUNE_MAX 7 +#define HAL_FIRST_STEP_MAX 2 + +enum ath9k_ani_cmd { + ATH9K_ANI_PRESENT = 0x1, + ATH9K_ANI_NOISE_IMMUNITY_LEVEL = 0x2, + ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION = 0x4, + ATH9K_ANI_CCK_WEAK_SIGNAL_THR = 0x8, + ATH9K_ANI_FIRSTEP_LEVEL = 0x10, + ATH9K_ANI_SPUR_IMMUNITY_LEVEL = 0x20, + ATH9K_ANI_MODE = 0x40, + ATH9K_ANI_PHYERR_RESET = 0x80, + ATH9K_ANI_ALL = 0xff +}; + +struct ath9k_mib_stats { + u32 ackrcv_bad; + u32 rts_bad; + u32 rts_good; + u32 fcs_bad; + u32 beacons; +}; + +struct ar5416AniState { + struct ath9k_channel *c; + u8 noiseImmunityLevel; + u8 spurImmunityLevel; + u8 firstepLevel; + u8 ofdmWeakSigDetectOff; + u8 cckWeakSigThreshold; + u32 listenTime; + u32 ofdmTrigHigh; + u32 ofdmTrigLow; + int32_t cckTrigHigh; + int32_t cckTrigLow; + int32_t rssiThrLow; + int32_t rssiThrHigh; + u32 noiseFloor; + u32 txFrameCount; + u32 rxFrameCount; + u32 cycleCount; + u32 ofdmPhyErrCount; + u32 cckPhyErrCount; + u32 ofdmPhyErrBase; + u32 cckPhyErrBase; + int16_t pktRssi[2]; + int16_t ofdmErrRssi[2]; + int16_t cckErrRssi[2]; +}; + +struct ar5416Stats { + u32 ast_ani_niup; + u32 ast_ani_nidown; + u32 ast_ani_spurup; + u32 ast_ani_spurdown; + u32 ast_ani_ofdmon; + u32 ast_ani_ofdmoff; + u32 ast_ani_cckhigh; + u32 ast_ani_ccklow; + u32 ast_ani_stepup; + u32 ast_ani_stepdown; + u32 ast_ani_ofdmerrs; + u32 ast_ani_cckerrs; + u32 ast_ani_reset; + u32 ast_ani_lzero; + u32 ast_ani_lneg; + u32 avgbrssi; + struct ath9k_mib_stats ast_mibstats; +}; +#define ah_mibStats stats.ast_mibstats + +void ath9k_ani_reset(struct ath_hw *ah); +void ath9k_hw_ani_monitor(struct ath_hw *ah, + struct ath9k_channel *chan); +void ath9k_enable_mib_counters(struct ath_hw *ah); +void ath9k_hw_disable_mib_counters(struct ath_hw *ah); +u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hw *ah, u32 *rxc_pcnt, + u32 *rxf_pcnt, u32 *txf_pcnt); +void ath9k_hw_procmibevent(struct ath_hw *ah); +void ath9k_hw_ani_setup(struct ath_hw *ah); +void ath9k_hw_ani_init(struct ath_hw *ah); +void ath9k_hw_ani_disable(struct ath_hw *ah); + +#endif /* ANI_H */ diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h new file mode 100644 index 0000000..83c7ea4 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -0,0 +1,606 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef ATH9K_H +#define ATH9K_H + +#include +#include +#include + +#include "debug.h" +#include "common.h" + +/* + * Header for the ath9k.ko driver core *only* -- hw code nor any other driver + * should rely on this file or its contents. + */ + +struct ath_node; + +/* Macro to expand scalars to 64-bit objects */ + +#define ito64(x) (sizeof(x) == 1) ? \ + (((unsigned long long int)(x)) & (0xff)) : \ + (sizeof(x) == 2) ? \ + (((unsigned long long int)(x)) & 0xffff) : \ + ((sizeof(x) == 4) ? \ + (((unsigned long long int)(x)) & 0xffffffff) : \ + (unsigned long long int)(x)) + +/* increment with wrap-around */ +#define INCR(_l, _sz) do { \ + (_l)++; \ + (_l) &= ((_sz) - 1); \ + } while (0) + +/* decrement with wrap-around */ +#define DECR(_l, _sz) do { \ + (_l)--; \ + (_l) &= ((_sz) - 1); \ + } while (0) + +#define A_MAX(a, b) ((a) > (b) ? (a) : (b)) + +#define TSF_TO_TU(_h,_l) \ + ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10)) + +#define ATH_TXQ_SETUP(sc, i) ((sc)->tx.txqsetup & (1<bf_stale = false; \ + (_bf)->bf_lastbf = NULL; \ + (_bf)->bf_next = NULL; \ + memset(&((_bf)->bf_state), 0, \ + sizeof(struct ath_buf_state)); \ + } while (0) + +#define ATH_RXBUF_RESET(_bf) do { \ + (_bf)->bf_stale = false; \ + } while (0) + +/** + * enum buffer_type - Buffer type flags + * + * @BUF_HT: Send this buffer using HT capabilities + * @BUF_AMPDU: This buffer is an ampdu, as part of an aggregate (during TX) + * @BUF_AGGR: Indicates whether the buffer can be aggregated + * (used in aggregation scheduling) + * @BUF_RETRY: Indicates whether the buffer is retried + * @BUF_XRETRY: To denote excessive retries of the buffer + */ +enum buffer_type { + BUF_HT = BIT(1), + BUF_AMPDU = BIT(2), + BUF_AGGR = BIT(3), + BUF_RETRY = BIT(4), + BUF_XRETRY = BIT(5), +}; + +#define bf_nframes bf_state.bfs_nframes +#define bf_al bf_state.bfs_al +#define bf_frmlen bf_state.bfs_frmlen +#define bf_retries bf_state.bfs_retries +#define bf_seqno bf_state.bfs_seqno +#define bf_tidno bf_state.bfs_tidno +#define bf_keyix bf_state.bfs_keyix +#define bf_keytype bf_state.bfs_keytype +#define bf_isht(bf) (bf->bf_state.bf_type & BUF_HT) +#define bf_isampdu(bf) (bf->bf_state.bf_type & BUF_AMPDU) +#define bf_isaggr(bf) (bf->bf_state.bf_type & BUF_AGGR) +#define bf_isretried(bf) (bf->bf_state.bf_type & BUF_RETRY) +#define bf_isxretried(bf) (bf->bf_state.bf_type & BUF_XRETRY) + +struct ath_descdma { + struct ath_desc *dd_desc; + dma_addr_t dd_desc_paddr; + u32 dd_desc_len; + struct ath_buf *dd_bufptr; +}; + +int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd, + struct list_head *head, const char *name, + int nbuf, int ndesc); +void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd, + struct list_head *head); + +/***********/ +/* RX / TX */ +/***********/ + +#define ATH_MAX_ANTENNA 3 +#define ATH_RXBUF 512 +#define ATH_TXBUF 512 +#define ATH_TXMAXTRY 13 +#define ATH_MGT_TXMAXTRY 4 + +#define TID_TO_WME_AC(_tid) \ + ((((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \ + (((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \ + (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \ + WME_AC_VO) + +#define ADDBA_EXCHANGE_ATTEMPTS 10 +#define ATH_AGGR_DELIM_SZ 4 +#define ATH_AGGR_MINPLEN 256 /* in bytes, minimum packet length */ +/* number of delimiters for encryption padding */ +#define ATH_AGGR_ENCRYPTDELIM 10 +/* minimum h/w qdepth to be sustained to maximize aggregation */ +#define ATH_AGGR_MIN_QDEPTH 2 +#define ATH_AMPDU_SUBFRAME_DEFAULT 32 + +#define IEEE80211_SEQ_SEQ_SHIFT 4 +#define IEEE80211_SEQ_MAX 4096 +#define IEEE80211_WEP_IVLEN 3 +#define IEEE80211_WEP_KIDLEN 1 +#define IEEE80211_WEP_CRCLEN 4 +#define IEEE80211_MAX_MPDU_LEN (3840 + FCS_LEN + \ + (IEEE80211_WEP_IVLEN + \ + IEEE80211_WEP_KIDLEN + \ + IEEE80211_WEP_CRCLEN)) + +/* return whether a bit at index _n in bitmap _bm is set + * _sz is the size of the bitmap */ +#define ATH_BA_ISSET(_bm, _n) (((_n) < (WME_BA_BMP_SIZE)) && \ + ((_bm)[(_n) >> 5] & (1 << ((_n) & 31)))) + +/* return block-ack bitmap index given sequence and starting sequence */ +#define ATH_BA_INDEX(_st, _seq) (((_seq) - (_st)) & (IEEE80211_SEQ_MAX - 1)) + +/* returns delimiter padding required given the packet length */ +#define ATH_AGGR_GET_NDELIM(_len) \ + (((((_len) + ATH_AGGR_DELIM_SZ) < ATH_AGGR_MINPLEN) ? \ + (ATH_AGGR_MINPLEN - (_len) - ATH_AGGR_DELIM_SZ) : 0) >> 2) + +#define BAW_WITHIN(_start, _bawsz, _seqno) \ + ((((_seqno) - (_start)) & 4095) < (_bawsz)) + +#define ATH_DS_BA_SEQ(_ds) ((_ds)->ds_us.tx.ts_seqnum) +#define ATH_DS_BA_BITMAP(_ds) (&(_ds)->ds_us.tx.ba_low) +#define ATH_DS_TX_BA(_ds) ((_ds)->ds_us.tx.ts_flags & ATH9K_TX_BA) +#define ATH_AN_2_TID(_an, _tidno) (&(_an)->tid[(_tidno)]) + +#define ATH_TX_COMPLETE_POLL_INT 1000 + +enum ATH_AGGR_STATUS { + ATH_AGGR_DONE, + ATH_AGGR_BAW_CLOSED, + ATH_AGGR_LIMITED, +}; + +struct ath_txq { + u32 axq_qnum; + u32 *axq_link; + struct list_head axq_q; + spinlock_t axq_lock; + u32 axq_depth; + bool stopped; + bool axq_tx_inprogress; + struct list_head axq_acq; +}; + +#define AGGR_CLEANUP BIT(1) +#define AGGR_ADDBA_COMPLETE BIT(2) +#define AGGR_ADDBA_PROGRESS BIT(3) + +struct ath_tx_control { + struct ath_txq *txq; + int if_id; + enum ath9k_internal_frame_type frame_type; +}; + +#define ATH_TX_ERROR 0x01 +#define ATH_TX_XRETRY 0x02 +#define ATH_TX_BAR 0x04 + +struct ath_tx { + u16 seq_no; + u32 txqsetup; + int hwq_map[ATH9K_WME_AC_VO+1]; + spinlock_t txbuflock; + struct list_head txbuf; + struct ath_txq txq[ATH9K_NUM_TX_QUEUES]; + struct ath_descdma txdma; +}; + +struct ath_rx { + u8 defant; + u8 rxotherant; + u32 *rxlink; + unsigned int rxfilter; + spinlock_t rxflushlock; + spinlock_t rxbuflock; + struct list_head rxbuf; + struct ath_descdma rxdma; +}; + +int ath_startrecv(struct ath_softc *sc); +bool ath_stoprecv(struct ath_softc *sc); +void ath_flushrecv(struct ath_softc *sc); +u32 ath_calcrxfilter(struct ath_softc *sc); +int ath_rx_init(struct ath_softc *sc, int nbufs); +void ath_rx_cleanup(struct ath_softc *sc); +int ath_rx_tasklet(struct ath_softc *sc, int flush); +struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype); +void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq); +int ath_tx_setup(struct ath_softc *sc, int haltype); +void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx); +void ath_draintxq(struct ath_softc *sc, + struct ath_txq *txq, bool retry_tx); +void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an); +void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an); +void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq); +int ath_tx_init(struct ath_softc *sc, int nbufs); +void ath_tx_cleanup(struct ath_softc *sc); +struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb); +int ath_txq_update(struct ath_softc *sc, int qnum, + struct ath9k_tx_queue_info *q); +int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, + struct ath_tx_control *txctl); +void ath_tx_tasklet(struct ath_softc *sc); +void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb); +bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno); +void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, + u16 tid, u16 *ssn); +void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid); +void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid); +void ath9k_enable_ps(struct ath_softc *sc); + +/********/ +/* VIFs */ +/********/ + +struct ath_vif { + int av_bslot; + __le64 tsf_adjust; /* TSF adjustment for staggered beacons */ + enum nl80211_iftype av_opmode; + struct ath_buf *av_bcbuf; + struct ath_tx_control av_btxctl; + u8 bssid[ETH_ALEN]; /* current BSSID from config_interface */ +}; + +/*******************/ +/* Beacon Handling */ +/*******************/ + +/* + * Regardless of the number of beacons we stagger, (i.e. regardless of the + * number of BSSIDs) if a given beacon does not go out even after waiting this + * number of beacon intervals, the game's up. + */ +#define BSTUCK_THRESH (9 * ATH_BCBUF) +#define ATH_BCBUF 4 +#define ATH_DEFAULT_BINTVAL 100 /* TU */ +#define ATH_DEFAULT_BMISS_LIMIT 10 +#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024) + +struct ath_beacon_config { + u16 beacon_interval; + u16 listen_interval; + u16 dtim_period; + u16 bmiss_timeout; + u8 dtim_count; +}; + +struct ath_beacon { + enum { + OK, /* no change needed */ + UPDATE, /* update pending */ + COMMIT /* beacon sent, commit change */ + } updateslot; /* slot time update fsm */ + + u32 beaconq; + u32 bmisscnt; + u32 ast_be_xmit; + u64 bc_tstamp; + struct ieee80211_vif *bslot[ATH_BCBUF]; + struct ath_wiphy *bslot_aphy[ATH_BCBUF]; + int slottime; + int slotupdate; + struct ath9k_tx_queue_info beacon_qi; + struct ath_descdma bdma; + struct ath_txq *cabq; + struct list_head bbuf; +}; + +void ath_beacon_tasklet(unsigned long data); +void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif); +int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif); +void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp); +int ath_beaconq_config(struct ath_softc *sc); + +/*******/ +/* ANI */ +/*******/ + +#define ATH_STA_SHORT_CALINTERVAL 1000 /* 1 second */ +#define ATH_AP_SHORT_CALINTERVAL 100 /* 100 ms */ +#define ATH_ANI_POLLINTERVAL 100 /* 100 ms */ +#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */ +#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */ + +void ath_ani_calibrate(unsigned long data); + +/**********/ +/* BTCOEX */ +/**********/ + +/* Defines the BT AR_BT_COEX_WGHT used */ +enum ath_stomp_type { + ATH_BTCOEX_NO_STOMP, + ATH_BTCOEX_STOMP_ALL, + ATH_BTCOEX_STOMP_LOW, + ATH_BTCOEX_STOMP_NONE +}; + +struct ath_btcoex { + bool hw_timer_enabled; + spinlock_t btcoex_lock; + struct timer_list period_timer; /* Timer for BT period */ + u32 bt_priority_cnt; + unsigned long bt_priority_time; + int bt_stomp_type; /* Types of BT stomping */ + u32 btcoex_no_stomp; /* in usec */ + u32 btcoex_period; /* in usec */ + u32 btscan_no_stomp; /* in usec */ + struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */ +}; + +int ath_init_btcoex_timer(struct ath_softc *sc); +void ath9k_btcoex_timer_resume(struct ath_softc *sc); +void ath9k_btcoex_timer_pause(struct ath_softc *sc); + +/********************/ +/* LED Control */ +/********************/ + +#define ATH_LED_PIN_DEF 1 +#define ATH_LED_PIN_9287 8 +#define ATH_LED_ON_DURATION_IDLE 350 /* in msecs */ +#define ATH_LED_OFF_DURATION_IDLE 250 /* in msecs */ + +enum ath_led_type { + ATH_LED_RADIO, + ATH_LED_ASSOC, + ATH_LED_TX, + ATH_LED_RX +}; + +struct ath_led { + struct ath_softc *sc; + struct led_classdev led_cdev; + enum ath_led_type led_type; + char name[32]; + bool registered; +}; + +void ath_init_leds(struct ath_softc *sc); +void ath_deinit_leds(struct ath_softc *sc); + +/********************/ +/* Main driver core */ +/********************/ + +/* + * Default cache line size, in bytes. + * Used when PCI device not fully initialized by bootrom/BIOS +*/ +#define DEFAULT_CACHELINE 32 +#define ATH_REGCLASSIDS_MAX 10 +#define ATH_CABQ_READY_TIME 80 /* % of beacon interval */ +#define ATH_MAX_SW_RETRIES 10 +#define ATH_CHAN_MAX 255 +#define IEEE80211_WEP_NKID 4 /* number of key ids */ + +#define ATH_TXPOWER_MAX 100 /* .5 dBm units */ +#define ATH_RATE_DUMMY_MARKER 0 + +#define SC_OP_INVALID BIT(0) +#define SC_OP_BEACONS BIT(1) +#define SC_OP_RXAGGR BIT(2) +#define SC_OP_TXAGGR BIT(3) +#define SC_OP_FULL_RESET BIT(4) +#define SC_OP_PREAMBLE_SHORT BIT(5) +#define SC_OP_PROTECT_ENABLE BIT(6) +#define SC_OP_RXFLUSH BIT(7) +#define SC_OP_LED_ASSOCIATED BIT(8) +#define SC_OP_LED_ON BIT(9) +#define SC_OP_SCANNING BIT(10) +#define SC_OP_TSF_RESET BIT(11) +#define SC_OP_BT_PRIORITY_DETECTED BIT(12) +#define SC_OP_BT_SCAN BIT(13) + +/* Powersave flags */ +#define PS_WAIT_FOR_BEACON BIT(0) +#define PS_WAIT_FOR_CAB BIT(1) +#define PS_WAIT_FOR_PSPOLL_DATA BIT(2) +#define PS_WAIT_FOR_TX_ACK BIT(3) +#define PS_BEACON_SYNC BIT(4) +#define PS_NULLFUNC_COMPLETED BIT(5) +#define PS_ENABLED BIT(6) + +struct ath_wiphy; +struct ath_rate_table; + +struct ath_softc { + struct ieee80211_hw *hw; + struct device *dev; + + spinlock_t wiphy_lock; /* spinlock to protect ath_wiphy data */ + struct ath_wiphy *pri_wiphy; + struct ath_wiphy **sec_wiphy; /* secondary wiphys (virtual radios); may + * have NULL entries */ + int num_sec_wiphy; /* number of sec_wiphy pointers in the array */ + int chan_idx; + int chan_is_ht; + struct ath_wiphy *next_wiphy; + struct work_struct chan_work; + int wiphy_select_failures; + unsigned long wiphy_select_first_fail; + struct delayed_work wiphy_work; + unsigned long wiphy_scheduler_int; + int wiphy_scheduler_index; + + struct tasklet_struct intr_tq; + struct tasklet_struct bcon_tasklet; + struct ath_hw *sc_ah; + void __iomem *mem; + int irq; + spinlock_t sc_resetlock; + spinlock_t sc_serial_rw; + spinlock_t sc_pm_lock; + struct mutex mutex; + + u32 intrstatus; + u32 sc_flags; /* SC_OP_* */ + u16 ps_flags; /* PS_* */ + u16 curtxpow; + u8 nbcnvifs; + u16 nvifs; + bool ps_enabled; + bool ps_idle; + unsigned long ps_usecount; + enum ath9k_int imask; + + struct ath_config config; + struct ath_rx rx; + struct ath_tx tx; + struct ath_beacon beacon; + const struct ath_rate_table *cur_rate_table; + enum wireless_mode cur_rate_mode; + struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; + + struct ath_led radio_led; + struct ath_led assoc_led; + struct ath_led tx_led; + struct ath_led rx_led; + struct delayed_work ath_led_blink_work; + int led_on_duration; + int led_off_duration; + int led_on_cnt; + int led_off_cnt; + + int beacon_interval; + +#ifdef CONFIG_ATH9K_DEBUGFS + struct ath9k_debug debug; +#endif + struct ath_beacon_config cur_beacon_conf; + struct delayed_work tx_complete_work; + struct ath_btcoex btcoex; +}; + +struct ath_wiphy { + struct ath_softc *sc; /* shared for all virtual wiphys */ + struct ieee80211_hw *hw; + enum ath_wiphy_state { + ATH_WIPHY_INACTIVE, + ATH_WIPHY_ACTIVE, + ATH_WIPHY_PAUSING, + ATH_WIPHY_PAUSED, + ATH_WIPHY_SCAN, + } state; + bool idle; + int chan_idx; + int chan_is_ht; +}; + +void ath9k_tasklet(unsigned long data); +int ath_reset(struct ath_softc *sc, bool retry_tx); +int ath_get_hal_qnum(u16 queue, struct ath_softc *sc); +int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc); +int ath_cabq_update(struct ath_softc *); + +static inline void ath_read_cachesize(struct ath_common *common, int *csz) +{ + common->bus_ops->read_cachesize(common, csz); +} + +extern struct ieee80211_ops ath9k_ops; +extern int modparam_nohwcrypt; + +irqreturn_t ath_isr(int irq, void *dev); +int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid, + const struct ath_bus_ops *bus_ops); +void ath9k_deinit_device(struct ath_softc *sc); +const char *ath_mac_bb_name(u32 mac_bb_version); +const char *ath_rf_name(u16 rf_version); +void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw); +void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw, + struct ath9k_channel *ichan); +void ath_update_chainmask(struct ath_softc *sc, int is_ht); +int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw, + struct ath9k_channel *hchan); + +void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw); +void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw); +bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode); + +#ifdef CONFIG_PCI +int ath_pci_init(void); +void ath_pci_exit(void); +#else +static inline int ath_pci_init(void) { return 0; }; +static inline void ath_pci_exit(void) {}; +#endif + +#ifdef CONFIG_ATHEROS_AR71XX +int ath_ahb_init(void); +void ath_ahb_exit(void); +#else +static inline int ath_ahb_init(void) { return 0; }; +static inline void ath_ahb_exit(void) {}; +#endif + +void ath9k_ps_wakeup(struct ath_softc *sc); +void ath9k_ps_restore(struct ath_softc *sc); + +void ath9k_set_bssid_mask(struct ieee80211_hw *hw); +int ath9k_wiphy_add(struct ath_softc *sc); +int ath9k_wiphy_del(struct ath_wiphy *aphy); +void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb); +int ath9k_wiphy_pause(struct ath_wiphy *aphy); +int ath9k_wiphy_unpause(struct ath_wiphy *aphy); +int ath9k_wiphy_select(struct ath_wiphy *aphy); +void ath9k_wiphy_set_scheduler(struct ath_softc *sc, unsigned int msec_int); +void ath9k_wiphy_chan_work(struct work_struct *work); +bool ath9k_wiphy_started(struct ath_softc *sc); +void ath9k_wiphy_pause_all_forced(struct ath_softc *sc, + struct ath_wiphy *selected); +bool ath9k_wiphy_scanning(struct ath_softc *sc); +void ath9k_wiphy_work(struct work_struct *work); +bool ath9k_all_wiphys_idle(struct ath_softc *sc); +void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle); + +void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue); +void ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue); + +int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype); + +void ath_start_rfkill_poll(struct ath_softc *sc); +extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw); + +#endif /* ATH9K_H */ diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c new file mode 100644 index 0000000..b4a31a4 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/beacon.c @@ -0,0 +1,788 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "ath9k.h" + +#define FUDGE 2 + +/* + * This function will modify certain transmit queue properties depending on + * the operating mode of the station (AP or AdHoc). Parameters are AIFS + * settings and channel width min/max +*/ +int ath_beaconq_config(struct ath_softc *sc) +{ + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + struct ath9k_tx_queue_info qi, qi_be; + int qnum; + + ath9k_hw_get_txq_props(ah, sc->beacon.beaconq, &qi); + if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) { + /* Always burst out beacon and CAB traffic. */ + qi.tqi_aifs = 1; + qi.tqi_cwmin = 0; + qi.tqi_cwmax = 0; + } else { + /* Adhoc mode; important thing is to use 2x cwmin. */ + qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, + ATH9K_WME_AC_BE); + ath9k_hw_get_txq_props(ah, qnum, &qi_be); + qi.tqi_aifs = qi_be.tqi_aifs; + qi.tqi_cwmin = 4*qi_be.tqi_cwmin; + qi.tqi_cwmax = qi_be.tqi_cwmax; + } + + if (!ath9k_hw_set_txq_props(ah, sc->beacon.beaconq, &qi)) { + ath_print(common, ATH_DBG_FATAL, + "Unable to update h/w beacon queue parameters\n"); + return 0; + } else { + ath9k_hw_resettxqueue(ah, sc->beacon.beaconq); + return 1; + } +} + +/* + * Associates the beacon frame buffer with a transmit descriptor. Will set + * up all required antenna switch parameters, rate codes, and channel flags. + * Beacons are always sent out at the lowest rate, and are not retried. +*/ +static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp, + struct ath_buf *bf, int rateidx) +{ + struct sk_buff *skb = bf->bf_mpdu; + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + struct ath_desc *ds; + struct ath9k_11n_rate_series series[4]; + int flags, antenna, ctsrate = 0, ctsduration = 0; + struct ieee80211_supported_band *sband; + u8 rate = 0; + + ds = bf->bf_desc; + flags = ATH9K_TXDESC_NOACK; + + if (((sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) || + (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) && + (ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)) { + ds->ds_link = bf->bf_daddr; /* self-linked */ + flags |= ATH9K_TXDESC_VEOL; + /* Let hardware handle antenna switching. */ + antenna = 0; + } else { + ds->ds_link = 0; + /* + * Switch antenna every beacon. + * Should only switch every beacon period, not for every SWBA + * XXX assumes two antennae + */ + antenna = ((sc->beacon.ast_be_xmit / sc->nbcnvifs) & 1 ? 2 : 1); + } + + ds->ds_data = bf->bf_buf_addr; + + sband = &sc->sbands[common->hw->conf.channel->band]; + rate = sband->bitrates[rateidx].hw_value; + if (sc->sc_flags & SC_OP_PREAMBLE_SHORT) + rate |= sband->bitrates[rateidx].hw_value_short; + + ath9k_hw_set11n_txdesc(ah, ds, skb->len + FCS_LEN, + ATH9K_PKT_TYPE_BEACON, + MAX_RATE_POWER, + ATH9K_TXKEYIX_INVALID, + ATH9K_KEY_TYPE_CLEAR, + flags); + + /* NB: beacon's BufLen must be a multiple of 4 bytes */ + ath9k_hw_filltxdesc(ah, ds, roundup(skb->len, 4), + true, true, ds); + + memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4); + series[0].Tries = 1; + series[0].Rate = rate; + series[0].ChSel = common->tx_chainmask; + series[0].RateFlags = (ctsrate) ? ATH9K_RATESERIES_RTS_CTS : 0; + ath9k_hw_set11n_ratescenario(ah, ds, ds, 0, ctsrate, ctsduration, + series, 4, 0); +} + +static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ath_wiphy *aphy = hw->priv; + struct ath_softc *sc = aphy->sc; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ath_buf *bf; + struct ath_vif *avp; + struct sk_buff *skb; + struct ath_txq *cabq; + struct ieee80211_tx_info *info; + int cabq_depth; + + if (aphy->state != ATH_WIPHY_ACTIVE) + return NULL; + + avp = (void *)vif->drv_priv; + cabq = sc->beacon.cabq; + + if (avp->av_bcbuf == NULL) + return NULL; + + /* Release the old beacon first */ + + bf = avp->av_bcbuf; + skb = bf->bf_mpdu; + if (skb) { + dma_unmap_single(sc->dev, bf->bf_dmacontext, + skb->len, DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + } + + /* Get a new beacon from mac80211 */ + + skb = ieee80211_beacon_get(hw, vif); + bf->bf_mpdu = skb; + if (skb == NULL) + return NULL; + ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp = + avp->tsf_adjust; + + info = IEEE80211_SKB_CB(skb); + if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { + /* + * TODO: make sure the seq# gets assigned properly (vs. other + * TX frames) + */ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + sc->tx.seq_no += 0x10; + hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); + hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no); + } + + bf->bf_buf_addr = bf->bf_dmacontext = + dma_map_single(sc->dev, skb->data, + skb->len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) { + dev_kfree_skb_any(skb); + bf->bf_mpdu = NULL; + ath_print(common, ATH_DBG_FATAL, + "dma_mapping_error on beaconing\n"); + return NULL; + } + + skb = ieee80211_get_buffered_bc(hw, vif); + + /* + * if the CABQ traffic from previous DTIM is pending and the current + * beacon is also a DTIM. + * 1) if there is only one vif let the cab traffic continue. + * 2) if there are more than one vif and we are using staggered + * beacons, then drain the cabq by dropping all the frames in + * the cabq so that the current vifs cab traffic can be scheduled. + */ + spin_lock_bh(&cabq->axq_lock); + cabq_depth = cabq->axq_depth; + spin_unlock_bh(&cabq->axq_lock); + + if (skb && cabq_depth) { + if (sc->nvifs > 1) { + ath_print(common, ATH_DBG_BEACON, + "Flushing previous cabq traffic\n"); + ath_draintxq(sc, cabq, false); + } + } + + ath_beacon_setup(sc, avp, bf, info->control.rates[0].idx); + + while (skb) { + ath_tx_cabq(hw, skb); + skb = ieee80211_get_buffered_bc(hw, vif); + } + + return bf; +} + +/* + * Startup beacon transmission for adhoc mode when they are sent entirely + * by the hardware using the self-linked descriptor + veol trick. +*/ +static void ath_beacon_start_adhoc(struct ath_softc *sc, + struct ieee80211_vif *vif) +{ + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + struct ath_buf *bf; + struct ath_vif *avp; + struct sk_buff *skb; + + avp = (void *)vif->drv_priv; + + if (avp->av_bcbuf == NULL) + return; + + bf = avp->av_bcbuf; + skb = bf->bf_mpdu; + + ath_beacon_setup(sc, avp, bf, 0); + + /* NB: caller is known to have already stopped tx dma */ + ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bf->bf_daddr); + ath9k_hw_txstart(ah, sc->beacon.beaconq); + ath_print(common, ATH_DBG_BEACON, "TXDP%u = %llx (%p)\n", + sc->beacon.beaconq, ito64(bf->bf_daddr), bf->bf_desc); +} + +int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif) +{ + struct ath_softc *sc = aphy->sc; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ath_vif *avp; + struct ath_buf *bf; + struct sk_buff *skb; + __le64 tstamp; + + avp = (void *)vif->drv_priv; + + /* Allocate a beacon descriptor if we haven't done so. */ + if (!avp->av_bcbuf) { + /* Allocate beacon state for hostap/ibss. We know + * a buffer is available. */ + avp->av_bcbuf = list_first_entry(&sc->beacon.bbuf, + struct ath_buf, list); + list_del(&avp->av_bcbuf->list); + + if (sc->sc_ah->opmode == NL80211_IFTYPE_AP || + !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)) { + int slot; + /* + * Assign the vif to a beacon xmit slot. As + * above, this cannot fail to find one. + */ + avp->av_bslot = 0; + for (slot = 0; slot < ATH_BCBUF; slot++) + if (sc->beacon.bslot[slot] == NULL) { + /* + * XXX hack, space out slots to better + * deal with misses + */ + if (slot+1 < ATH_BCBUF && + sc->beacon.bslot[slot+1] == NULL) { + avp->av_bslot = slot+1; + break; + } + avp->av_bslot = slot; + /* NB: keep looking for a double slot */ + } + BUG_ON(sc->beacon.bslot[avp->av_bslot] != NULL); + sc->beacon.bslot[avp->av_bslot] = vif; + sc->beacon.bslot_aphy[avp->av_bslot] = aphy; + sc->nbcnvifs++; + } + } + + /* release the previous beacon frame, if it already exists. */ + bf = avp->av_bcbuf; + if (bf->bf_mpdu != NULL) { + skb = bf->bf_mpdu; + dma_unmap_single(sc->dev, bf->bf_dmacontext, + skb->len, DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + bf->bf_mpdu = NULL; + } + + /* NB: the beacon data buffer must be 32-bit aligned. */ + skb = ieee80211_beacon_get(sc->hw, vif); + if (skb == NULL) { + ath_print(common, ATH_DBG_BEACON, "cannot get skb\n"); + return -ENOMEM; + } + + tstamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp; + sc->beacon.bc_tstamp = le64_to_cpu(tstamp); + /* Calculate a TSF adjustment factor required for staggered beacons. */ + if (avp->av_bslot > 0) { + u64 tsfadjust; + int intval; + + intval = sc->beacon_interval ? : ATH_DEFAULT_BINTVAL; + + /* + * Calculate the TSF offset for this beacon slot, i.e., the + * number of usecs that need to be added to the timestamp field + * in Beacon and Probe Response frames. Beacon slot 0 is + * processed at the correct offset, so it does not require TSF + * adjustment. Other slots are adjusted to get the timestamp + * close to the TBTT for the BSS. + */ + tsfadjust = intval * avp->av_bslot / ATH_BCBUF; + avp->tsf_adjust = cpu_to_le64(TU_TO_USEC(tsfadjust)); + + ath_print(common, ATH_DBG_BEACON, + "stagger beacons, bslot %d intval " + "%u tsfadjust %llu\n", + avp->av_bslot, intval, (unsigned long long)tsfadjust); + + ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp = + avp->tsf_adjust; + } else + avp->tsf_adjust = cpu_to_le64(0); + + bf->bf_mpdu = skb; + bf->bf_buf_addr = bf->bf_dmacontext = + dma_map_single(sc->dev, skb->data, + skb->len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) { + dev_kfree_skb_any(skb); + bf->bf_mpdu = NULL; + ath_print(common, ATH_DBG_FATAL, + "dma_mapping_error on beacon alloc\n"); + return -ENOMEM; + } + + return 0; +} + +void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp) +{ + if (avp->av_bcbuf != NULL) { + struct ath_buf *bf; + + if (avp->av_bslot != -1) { + sc->beacon.bslot[avp->av_bslot] = NULL; + sc->beacon.bslot_aphy[avp->av_bslot] = NULL; + sc->nbcnvifs--; + } + + bf = avp->av_bcbuf; + if (bf->bf_mpdu != NULL) { + struct sk_buff *skb = bf->bf_mpdu; + dma_unmap_single(sc->dev, bf->bf_dmacontext, + skb->len, DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + bf->bf_mpdu = NULL; + } + list_add_tail(&bf->list, &sc->beacon.bbuf); + + avp->av_bcbuf = NULL; + } +} + +void ath_beacon_tasklet(unsigned long data) +{ + struct ath_softc *sc = (struct ath_softc *)data; + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + struct ath_buf *bf = NULL; + struct ieee80211_vif *vif; + struct ath_wiphy *aphy; + int slot; + u32 bfaddr, bc = 0, tsftu; + u64 tsf; + u16 intval; + + /* + * Check if the previous beacon has gone out. If + * not don't try to post another, skip this period + * and wait for the next. Missed beacons indicate + * a problem and should not occur. If we miss too + * many consecutive beacons reset the device. + */ + if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0) { + sc->beacon.bmisscnt++; + + if (sc->beacon.bmisscnt < BSTUCK_THRESH) { + ath_print(common, ATH_DBG_BEACON, + "missed %u consecutive beacons\n", + sc->beacon.bmisscnt); + } else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) { + ath_print(common, ATH_DBG_BEACON, + "beacon is officially stuck\n"); + sc->sc_flags |= SC_OP_TSF_RESET; + ath_reset(sc, false); + } + + return; + } + + if (sc->beacon.bmisscnt != 0) { + ath_print(common, ATH_DBG_BEACON, + "resume beacon xmit after %u misses\n", + sc->beacon.bmisscnt); + sc->beacon.bmisscnt = 0; + } + + /* + * Generate beacon frames. we are sending frames + * staggered so calculate the slot for this frame based + * on the tsf to safeguard against missing an swba. + */ + + intval = sc->beacon_interval ? : ATH_DEFAULT_BINTVAL; + + tsf = ath9k_hw_gettsf64(ah); + tsftu = TSF_TO_TU(tsf>>32, tsf); + slot = ((tsftu % intval) * ATH_BCBUF) / intval; + /* + * Reverse the slot order to get slot 0 on the TBTT offset that does + * not require TSF adjustment and other slots adding + * slot/ATH_BCBUF * beacon_int to timestamp. For example, with + * ATH_BCBUF = 4, we process beacon slots as follows: 3 2 1 0 3 2 1 .. + * and slot 0 is at correct offset to TBTT. + */ + slot = ATH_BCBUF - slot - 1; + vif = sc->beacon.bslot[slot]; + aphy = sc->beacon.bslot_aphy[slot]; + + ath_print(common, ATH_DBG_BEACON, + "slot %d [tsf %llu tsftu %u intval %u] vif %p\n", + slot, tsf, tsftu, intval, vif); + + bfaddr = 0; + if (vif) { + bf = ath_beacon_generate(aphy->hw, vif); + if (bf != NULL) { + bfaddr = bf->bf_daddr; + bc = 1; + } + } + + /* + * Handle slot time change when a non-ERP station joins/leaves + * an 11g network. The 802.11 layer notifies us via callback, + * we mark updateslot, then wait one beacon before effecting + * the change. This gives associated stations at least one + * beacon interval to note the state change. + * + * NB: The slot time change state machine is clocked according + * to whether we are bursting or staggering beacons. We + * recognize the request to update and record the current + * slot then don't transition until that slot is reached + * again. If we miss a beacon for that slot then we'll be + * slow to transition but we'll be sure at least one beacon + * interval has passed. When bursting slot is always left + * set to ATH_BCBUF so this check is a noop. + */ + if (sc->beacon.updateslot == UPDATE) { + sc->beacon.updateslot = COMMIT; /* commit next beacon */ + sc->beacon.slotupdate = slot; + } else if (sc->beacon.updateslot == COMMIT && sc->beacon.slotupdate == slot) { + ah->slottime = sc->beacon.slottime; + ath9k_hw_init_global_settings(ah); + sc->beacon.updateslot = OK; + } + if (bfaddr != 0) { + /* + * Stop any current dma and put the new frame(s) on the queue. + * This should never fail since we check above that no frames + * are still pending on the queue. + */ + if (!ath9k_hw_stoptxdma(ah, sc->beacon.beaconq)) { + ath_print(common, ATH_DBG_FATAL, + "beacon queue %u did not stop?\n", sc->beacon.beaconq); + } + + /* NB: cabq traffic should already be queued and primed */ + ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bfaddr); + ath9k_hw_txstart(ah, sc->beacon.beaconq); + + sc->beacon.ast_be_xmit += bc; /* XXX per-vif? */ + } +} + +static void ath9k_beacon_init(struct ath_softc *sc, + u32 next_beacon, + u32 beacon_period) +{ + if (beacon_period & ATH9K_BEACON_RESET_TSF) + ath9k_ps_wakeup(sc); + + ath9k_hw_beaconinit(sc->sc_ah, next_beacon, beacon_period); + + if (beacon_period & ATH9K_BEACON_RESET_TSF) + ath9k_ps_restore(sc); +} + +/* + * For multi-bss ap support beacons are either staggered evenly over N slots or + * burst together. For the former arrange for the SWBA to be delivered for each + * slot. Slots that are not occupied will generate nothing. + */ +static void ath_beacon_config_ap(struct ath_softc *sc, + struct ath_beacon_config *conf) +{ + u32 nexttbtt, intval; + + /* NB: the beacon interval is kept internally in TU's */ + intval = conf->beacon_interval & ATH9K_BEACON_PERIOD; + intval /= ATH_BCBUF; /* for staggered beacons */ + nexttbtt = intval; + + if (sc->sc_flags & SC_OP_TSF_RESET) + intval |= ATH9K_BEACON_RESET_TSF; + + /* + * In AP mode we enable the beacon timers and SWBA interrupts to + * prepare beacon frames. + */ + intval |= ATH9K_BEACON_ENA; + sc->imask |= ATH9K_INT_SWBA; + ath_beaconq_config(sc); + + /* Set the computed AP beacon timers */ + + ath9k_hw_set_interrupts(sc->sc_ah, 0); + ath9k_beacon_init(sc, nexttbtt, intval); + sc->beacon.bmisscnt = 0; + ath9k_hw_set_interrupts(sc->sc_ah, sc->imask); + + /* Clear the reset TSF flag, so that subsequent beacon updation + will not reset the HW TSF. */ + + sc->sc_flags &= ~SC_OP_TSF_RESET; +} + +/* + * This sets up the beacon timers according to the timestamp of the last + * received beacon and the current TSF, configures PCF and DTIM + * handling, programs the sleep registers so the hardware will wakeup in + * time to receive beacons, and configures the beacon miss handling so + * we'll receive a BMISS interrupt when we stop seeing beacons from the AP + * we've associated with. + */ +static void ath_beacon_config_sta(struct ath_softc *sc, + struct ath_beacon_config *conf) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ath9k_beacon_state bs; + int dtimperiod, dtimcount, sleepduration; + int cfpperiod, cfpcount; + u32 nexttbtt = 0, intval, tsftu; + u64 tsf; + int num_beacons, offset, dtim_dec_count, cfp_dec_count; + + /* No need to configure beacon if we are not associated */ + if (!common->curaid) { + ath_print(common, ATH_DBG_BEACON, + "STA is not yet associated..skipping beacon config\n"); + return; + } + + memset(&bs, 0, sizeof(bs)); + intval = conf->beacon_interval & ATH9K_BEACON_PERIOD; + + /* + * Setup dtim and cfp parameters according to + * last beacon we received (which may be none). + */ + dtimperiod = conf->dtim_period; + if (dtimperiod <= 0) /* NB: 0 if not known */ + dtimperiod = 1; + dtimcount = conf->dtim_count; + if (dtimcount >= dtimperiod) /* NB: sanity check */ + dtimcount = 0; + cfpperiod = 1; /* NB: no PCF support yet */ + cfpcount = 0; + + sleepduration = conf->listen_interval * intval; + if (sleepduration <= 0) + sleepduration = intval; + + /* + * Pull nexttbtt forward to reflect the current + * TSF and calculate dtim+cfp state for the result. + */ + tsf = ath9k_hw_gettsf64(sc->sc_ah); + tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE; + + num_beacons = tsftu / intval + 1; + offset = tsftu % intval; + nexttbtt = tsftu - offset; + if (offset) + nexttbtt += intval; + + /* DTIM Beacon every dtimperiod Beacon */ + dtim_dec_count = num_beacons % dtimperiod; + /* CFP every cfpperiod DTIM Beacon */ + cfp_dec_count = (num_beacons / dtimperiod) % cfpperiod; + if (dtim_dec_count) + cfp_dec_count++; + + dtimcount -= dtim_dec_count; + if (dtimcount < 0) + dtimcount += dtimperiod; + + cfpcount -= cfp_dec_count; + if (cfpcount < 0) + cfpcount += cfpperiod; + + bs.bs_intval = intval; + bs.bs_nexttbtt = nexttbtt; + bs.bs_dtimperiod = dtimperiod*intval; + bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval; + bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod; + bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod; + bs.bs_cfpmaxduration = 0; + + /* + * Calculate the number of consecutive beacons to miss* before taking + * a BMISS interrupt. The configuration is specified in TU so we only + * need calculate based on the beacon interval. Note that we clamp the + * result to at most 15 beacons. + */ + if (sleepduration > intval) { + bs.bs_bmissthreshold = conf->listen_interval * + ATH_DEFAULT_BMISS_LIMIT / 2; + } else { + bs.bs_bmissthreshold = DIV_ROUND_UP(conf->bmiss_timeout, intval); + if (bs.bs_bmissthreshold > 15) + bs.bs_bmissthreshold = 15; + else if (bs.bs_bmissthreshold <= 0) + bs.bs_bmissthreshold = 1; + } + + /* + * Calculate sleep duration. The configuration is given in ms. + * We ensure a multiple of the beacon period is used. Also, if the sleep + * duration is greater than the DTIM period then it makes senses + * to make it a multiple of that. + * + * XXX fixed at 100ms + */ + + bs.bs_sleepduration = roundup(IEEE80211_MS_TO_TU(100), sleepduration); + if (bs.bs_sleepduration > bs.bs_dtimperiod) + bs.bs_sleepduration = bs.bs_dtimperiod; + + /* TSF out of range threshold fixed at 1 second */ + bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD; + + ath_print(common, ATH_DBG_BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu); + ath_print(common, ATH_DBG_BEACON, + "bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n", + bs.bs_bmissthreshold, bs.bs_sleepduration, + bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext); + + /* Set the computed STA beacon timers */ + + ath9k_hw_set_interrupts(sc->sc_ah, 0); + ath9k_hw_set_sta_beacon_timers(sc->sc_ah, &bs); + sc->imask |= ATH9K_INT_BMISS; + ath9k_hw_set_interrupts(sc->sc_ah, sc->imask); +} + +static void ath_beacon_config_adhoc(struct ath_softc *sc, + struct ath_beacon_config *conf, + struct ieee80211_vif *vif) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + u64 tsf; + u32 tsftu, intval, nexttbtt; + + intval = conf->beacon_interval & ATH9K_BEACON_PERIOD; + + + /* Pull nexttbtt forward to reflect the current TSF */ + + nexttbtt = TSF_TO_TU(sc->beacon.bc_tstamp >> 32, sc->beacon.bc_tstamp); + if (nexttbtt == 0) + nexttbtt = intval; + else if (intval) + nexttbtt = roundup(nexttbtt, intval); + + tsf = ath9k_hw_gettsf64(sc->sc_ah); + tsftu = TSF_TO_TU((u32)(tsf>>32), (u32)tsf) + FUDGE; + do { + nexttbtt += intval; + } while (nexttbtt < tsftu); + + ath_print(common, ATH_DBG_BEACON, + "IBSS nexttbtt %u intval %u (%u)\n", + nexttbtt, intval, conf->beacon_interval); + + /* + * In IBSS mode enable the beacon timers but only enable SWBA interrupts + * if we need to manually prepare beacon frames. Otherwise we use a + * self-linked tx descriptor and let the hardware deal with things. + */ + intval |= ATH9K_BEACON_ENA; + if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)) + sc->imask |= ATH9K_INT_SWBA; + + ath_beaconq_config(sc); + + /* Set the computed ADHOC beacon timers */ + + ath9k_hw_set_interrupts(sc->sc_ah, 0); + ath9k_beacon_init(sc, nexttbtt, intval); + sc->beacon.bmisscnt = 0; + ath9k_hw_set_interrupts(sc->sc_ah, sc->imask); + + /* FIXME: Handle properly when vif is NULL */ + if (vif && sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_VEOL) + ath_beacon_start_adhoc(sc, vif); +} + +void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif) +{ + struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + enum nl80211_iftype iftype; + + /* Setup the beacon configuration parameters */ + if (vif) { + struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; + + iftype = vif->type; + + cur_conf->beacon_interval = bss_conf->beacon_int; + cur_conf->dtim_period = bss_conf->dtim_period; + cur_conf->listen_interval = 1; + cur_conf->dtim_count = 1; + cur_conf->bmiss_timeout = + ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval; + } else { + iftype = sc->sc_ah->opmode; + } + + /* + * It looks like mac80211 may end up using beacon interval of zero in + * some cases (at least for mesh point). Avoid getting into an + * infinite loop by using a bit safer value instead. To be safe, + * do sanity check on beacon interval for all operating modes. + */ + if (cur_conf->beacon_interval == 0) + cur_conf->beacon_interval = 100; + + switch (iftype) { + case NL80211_IFTYPE_AP: + ath_beacon_config_ap(sc, cur_conf); + break; + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_MESH_POINT: + ath_beacon_config_adhoc(sc, cur_conf, vif); + break; + case NL80211_IFTYPE_STATION: + ath_beacon_config_sta(sc, cur_conf); + break; + default: + ath_print(common, ATH_DBG_CONFIG, + "Unsupported beaconing mode\n"); + return; + } + + sc->sc_flags |= SC_OP_BEACONS; +} diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c new file mode 100644 index 0000000..fb4ac15 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/btcoex.c @@ -0,0 +1,227 @@ +/* + * Copyright (c) 2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "hw.h" + +enum ath_bt_mode { + ATH_BT_COEX_MODE_LEGACY, /* legacy rx_clear mode */ + ATH_BT_COEX_MODE_UNSLOTTED, /* untimed/unslotted mode */ + ATH_BT_COEX_MODE_SLOTTED, /* slotted mode */ + ATH_BT_COEX_MODE_DISALBED, /* coexistence disabled */ +}; + +struct ath_btcoex_config { + u8 bt_time_extend; + bool bt_txstate_extend; + bool bt_txframe_extend; + enum ath_bt_mode bt_mode; /* coexistence mode */ + bool bt_quiet_collision; + bool bt_rxclear_polarity; /* invert rx_clear as WLAN_ACTIVE*/ + u8 bt_priority_time; + u8 bt_first_slot_time; + bool bt_hold_rx_clear; +}; + +static const u16 ath_subsysid_tbl[] = { + AR9280_COEX2WIRE_SUBSYSID, + AT9285_COEX3WIRE_SA_SUBSYSID, + AT9285_COEX3WIRE_DA_SUBSYSID +}; + +/* + * Checks the subsystem id of the device to see if it + * supports btcoex + */ +bool ath9k_hw_btcoex_supported(struct ath_hw *ah) +{ + int i; + + if (!ah->hw_version.subsysid) + return false; + + for (i = 0; i < ARRAY_SIZE(ath_subsysid_tbl); i++) + if (ah->hw_version.subsysid == ath_subsysid_tbl[i]) + return true; + + return false; +} + +void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum) +{ + struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; + const struct ath_btcoex_config ath_bt_config = { + .bt_time_extend = 0, + .bt_txstate_extend = true, + .bt_txframe_extend = true, + .bt_mode = ATH_BT_COEX_MODE_SLOTTED, + .bt_quiet_collision = true, + .bt_rxclear_polarity = true, + .bt_priority_time = 2, + .bt_first_slot_time = 5, + .bt_hold_rx_clear = true, + }; + u32 i; + + btcoex_hw->bt_coex_mode = + (btcoex_hw->bt_coex_mode & AR_BT_QCU_THRESH) | + SM(ath_bt_config.bt_time_extend, AR_BT_TIME_EXTEND) | + SM(ath_bt_config.bt_txstate_extend, AR_BT_TXSTATE_EXTEND) | + SM(ath_bt_config.bt_txframe_extend, AR_BT_TX_FRAME_EXTEND) | + SM(ath_bt_config.bt_mode, AR_BT_MODE) | + SM(ath_bt_config.bt_quiet_collision, AR_BT_QUIET) | + SM(ath_bt_config.bt_rxclear_polarity, AR_BT_RX_CLEAR_POLARITY) | + SM(ath_bt_config.bt_priority_time, AR_BT_PRIORITY_TIME) | + SM(ath_bt_config.bt_first_slot_time, AR_BT_FIRST_SLOT_TIME) | + SM(qnum, AR_BT_QCU_THRESH); + + btcoex_hw->bt_coex_mode2 = + SM(ath_bt_config.bt_hold_rx_clear, AR_BT_HOLD_RX_CLEAR) | + SM(ATH_BTCOEX_BMISS_THRESH, AR_BT_BCN_MISS_THRESH) | + AR_BT_DISABLE_BT_ANT; + + for (i = 0; i < 32; i++) + ah->hw_gen_timers.gen_timer_index[(debruijn32 << i) >> 27] = i; +} +EXPORT_SYMBOL(ath9k_hw_init_btcoex_hw); + +void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah) +{ + struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; + + /* connect bt_active to baseband */ + REG_CLR_BIT(ah, AR_GPIO_INPUT_EN_VAL, + (AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_DEF | + AR_GPIO_INPUT_EN_VAL_BT_FREQUENCY_DEF)); + + REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, + AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB); + + /* Set input mux for bt_active to gpio pin */ + REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1, + AR_GPIO_INPUT_MUX1_BT_ACTIVE, + btcoex_hw->btactive_gpio); + + /* Configure the desired gpio port for input */ + ath9k_hw_cfg_gpio_input(ah, btcoex_hw->btactive_gpio); +} +EXPORT_SYMBOL(ath9k_hw_btcoex_init_2wire); + +void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah) +{ + struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; + + /* btcoex 3-wire */ + REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, + (AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB | + AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB)); + + /* Set input mux for bt_prority_async and + * bt_active_async to GPIO pins */ + REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1, + AR_GPIO_INPUT_MUX1_BT_ACTIVE, + btcoex_hw->btactive_gpio); + + REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1, + AR_GPIO_INPUT_MUX1_BT_PRIORITY, + btcoex_hw->btpriority_gpio); + + /* Configure the desired GPIO ports for input */ + + ath9k_hw_cfg_gpio_input(ah, btcoex_hw->btactive_gpio); + ath9k_hw_cfg_gpio_input(ah, btcoex_hw->btpriority_gpio); +} +EXPORT_SYMBOL(ath9k_hw_btcoex_init_3wire); + +static void ath9k_hw_btcoex_enable_2wire(struct ath_hw *ah) +{ + struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; + + /* Configure the desired GPIO port for TX_FRAME output */ + ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio, + AR_GPIO_OUTPUT_MUX_AS_TX_FRAME); +} + +void ath9k_hw_btcoex_set_weight(struct ath_hw *ah, + u32 bt_weight, + u32 wlan_weight) +{ + struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; + + btcoex_hw->bt_coex_weights = SM(bt_weight, AR_BTCOEX_BT_WGHT) | + SM(wlan_weight, AR_BTCOEX_WL_WGHT); +} +EXPORT_SYMBOL(ath9k_hw_btcoex_set_weight); + +static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah) +{ + struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; + + /* + * Program coex mode and weight registers to + * enable coex 3-wire + */ + REG_WRITE(ah, AR_BT_COEX_MODE, btcoex_hw->bt_coex_mode); + REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex_hw->bt_coex_weights); + REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex_hw->bt_coex_mode2); + + REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1); + REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0); + + ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio, + AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL); +} + +void ath9k_hw_btcoex_enable(struct ath_hw *ah) +{ + struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; + + switch (btcoex_hw->scheme) { + case ATH_BTCOEX_CFG_NONE: + break; + case ATH_BTCOEX_CFG_2WIRE: + ath9k_hw_btcoex_enable_2wire(ah); + break; + case ATH_BTCOEX_CFG_3WIRE: + ath9k_hw_btcoex_enable_3wire(ah); + break; + } + + REG_RMW(ah, AR_GPIO_PDPU, + (0x2 << (btcoex_hw->btactive_gpio * 2)), + (0x3 << (btcoex_hw->btactive_gpio * 2))); + + ah->btcoex_hw.enabled = true; +} +EXPORT_SYMBOL(ath9k_hw_btcoex_enable); + +void ath9k_hw_btcoex_disable(struct ath_hw *ah) +{ + struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; + + ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0); + + ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio, + AR_GPIO_OUTPUT_MUX_AS_OUTPUT); + + if (btcoex_hw->scheme == ATH_BTCOEX_CFG_3WIRE) { + REG_WRITE(ah, AR_BT_COEX_MODE, AR_BT_QUIET | AR_BT_MODE); + REG_WRITE(ah, AR_BT_COEX_WEIGHT, 0); + REG_WRITE(ah, AR_BT_COEX_MODE2, 0); + } + + ah->btcoex_hw.enabled = false; +} +EXPORT_SYMBOL(ath9k_hw_btcoex_disable); diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h new file mode 100644 index 0000000..1ee5a15 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/btcoex.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef BTCOEX_H +#define BTCOEX_H + +#include "hw.h" + +#define ATH_WLANACTIVE_GPIO 5 +#define ATH_BTACTIVE_GPIO 6 +#define ATH_BTPRIORITY_GPIO 7 + +#define ATH_BTCOEX_DEF_BT_PERIOD 45 +#define ATH_BTCOEX_DEF_DUTY_CYCLE 55 +#define ATH_BTCOEX_BTSCAN_DUTY_CYCLE 90 +#define ATH_BTCOEX_BMISS_THRESH 50 + +#define ATH_BT_PRIORITY_TIME_THRESHOLD 1000 /* ms */ +#define ATH_BT_CNT_THRESHOLD 3 +#define ATH_BT_CNT_SCAN_THRESHOLD 15 + +enum ath_btcoex_scheme { + ATH_BTCOEX_CFG_NONE, + ATH_BTCOEX_CFG_2WIRE, + ATH_BTCOEX_CFG_3WIRE, +}; + +struct ath_btcoex_hw { + enum ath_btcoex_scheme scheme; + bool enabled; + u8 wlanactive_gpio; + u8 btactive_gpio; + u8 btpriority_gpio; + u32 bt_coex_mode; /* Register setting for AR_BT_COEX_MODE */ + u32 bt_coex_weights; /* Register setting for AR_BT_COEX_WEIGHT */ + u32 bt_coex_mode2; /* Register setting for AR_BT_COEX_MODE2 */ +}; + +bool ath9k_hw_btcoex_supported(struct ath_hw *ah); +void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah); +void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah); +void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum); +void ath9k_hw_btcoex_set_weight(struct ath_hw *ah, + u32 bt_weight, + u32 wlan_weight); +void ath9k_hw_btcoex_enable(struct ath_hw *ah); +void ath9k_hw_btcoex_disable(struct ath_hw *ah); + +#endif diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c new file mode 100644 index 0000000..238a574 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/calib.c @@ -0,0 +1,1255 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "hw.h" + +/* We can tune this as we go by monitoring really low values */ +#define ATH9K_NF_TOO_LOW -60 + +/* AR5416 may return very high value (like -31 dBm), in those cases the nf + * is incorrect and we should use the static NF value. Later we can try to + * find out why they are reporting these values */ + +static bool ath9k_hw_nf_in_range(struct ath_hw *ah, s16 nf) +{ + if (nf > ATH9K_NF_TOO_LOW) { + ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE, + "noise floor value detected (%d) is " + "lower than what we think is a " + "reasonable value (%d)\n", + nf, ATH9K_NF_TOO_LOW); + return false; + } + return true; +} + +static int16_t ath9k_hw_get_nf_hist_mid(int16_t *nfCalBuffer) +{ + int16_t nfval; + int16_t sort[ATH9K_NF_CAL_HIST_MAX]; + int i, j; + + for (i = 0; i < ATH9K_NF_CAL_HIST_MAX; i++) + sort[i] = nfCalBuffer[i]; + + for (i = 0; i < ATH9K_NF_CAL_HIST_MAX - 1; i++) { + for (j = 1; j < ATH9K_NF_CAL_HIST_MAX - i; j++) { + if (sort[j] > sort[j - 1]) { + nfval = sort[j]; + sort[j] = sort[j - 1]; + sort[j - 1] = nfval; + } + } + } + nfval = sort[(ATH9K_NF_CAL_HIST_MAX - 1) >> 1]; + + return nfval; +} + +static void ath9k_hw_update_nfcal_hist_buffer(struct ath9k_nfcal_hist *h, + int16_t *nfarray) +{ + int i; + + for (i = 0; i < NUM_NF_READINGS; i++) { + h[i].nfCalBuffer[h[i].currIndex] = nfarray[i]; + + if (++h[i].currIndex >= ATH9K_NF_CAL_HIST_MAX) + h[i].currIndex = 0; + + if (h[i].invalidNFcount > 0) { + if (nfarray[i] < AR_PHY_CCA_MIN_BAD_VALUE || + nfarray[i] > AR_PHY_CCA_MAX_HIGH_VALUE) { + h[i].invalidNFcount = ATH9K_NF_CAL_HIST_MAX; + } else { + h[i].invalidNFcount--; + h[i].privNF = nfarray[i]; + } + } else { + h[i].privNF = + ath9k_hw_get_nf_hist_mid(h[i].nfCalBuffer); + } + } + return; +} + +static void ath9k_hw_do_getnf(struct ath_hw *ah, + int16_t nfarray[NUM_NF_READINGS]) +{ + struct ath_common *common = ath9k_hw_common(ah); + int16_t nf; + + if (AR_SREV_9280_10_OR_LATER(ah)) + nf = MS(REG_READ(ah, AR_PHY_CCA), AR9280_PHY_MINCCA_PWR); + else + nf = MS(REG_READ(ah, AR_PHY_CCA), AR_PHY_MINCCA_PWR); + + if (nf & 0x100) + nf = 0 - ((nf ^ 0x1ff) + 1); + ath_print(common, ATH_DBG_CALIBRATE, + "NF calibrated [ctl] [chain 0] is %d\n", nf); + nfarray[0] = nf; + + if (!AR_SREV_9285(ah)) { + if (AR_SREV_9280_10_OR_LATER(ah)) + nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), + AR9280_PHY_CH1_MINCCA_PWR); + else + nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), + AR_PHY_CH1_MINCCA_PWR); + + if (nf & 0x100) + nf = 0 - ((nf ^ 0x1ff) + 1); + ath_print(common, ATH_DBG_CALIBRATE, + "NF calibrated [ctl] [chain 1] is %d\n", nf); + nfarray[1] = nf; + + if (!AR_SREV_9280(ah) && !AR_SREV_9287(ah)) { + nf = MS(REG_READ(ah, AR_PHY_CH2_CCA), + AR_PHY_CH2_MINCCA_PWR); + if (nf & 0x100) + nf = 0 - ((nf ^ 0x1ff) + 1); + ath_print(common, ATH_DBG_CALIBRATE, + "NF calibrated [ctl] [chain 2] is %d\n", nf); + nfarray[2] = nf; + } + } + + if (AR_SREV_9280_10_OR_LATER(ah)) + nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), + AR9280_PHY_EXT_MINCCA_PWR); + else + nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), + AR_PHY_EXT_MINCCA_PWR); + + if (nf & 0x100) + nf = 0 - ((nf ^ 0x1ff) + 1); + ath_print(common, ATH_DBG_CALIBRATE, + "NF calibrated [ext] [chain 0] is %d\n", nf); + nfarray[3] = nf; + + if (!AR_SREV_9285(ah)) { + if (AR_SREV_9280_10_OR_LATER(ah)) + nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), + AR9280_PHY_CH1_EXT_MINCCA_PWR); + else + nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), + AR_PHY_CH1_EXT_MINCCA_PWR); + + if (nf & 0x100) + nf = 0 - ((nf ^ 0x1ff) + 1); + ath_print(common, ATH_DBG_CALIBRATE, + "NF calibrated [ext] [chain 1] is %d\n", nf); + nfarray[4] = nf; + + if (!AR_SREV_9280(ah) && !AR_SREV_9287(ah)) { + nf = MS(REG_READ(ah, AR_PHY_CH2_EXT_CCA), + AR_PHY_CH2_EXT_MINCCA_PWR); + if (nf & 0x100) + nf = 0 - ((nf ^ 0x1ff) + 1); + ath_print(common, ATH_DBG_CALIBRATE, + "NF calibrated [ext] [chain 2] is %d\n", nf); + nfarray[5] = nf; + } + } +} + +static bool getNoiseFloorThresh(struct ath_hw *ah, + enum ieee80211_band band, + int16_t *nft) +{ + switch (band) { + case IEEE80211_BAND_5GHZ: + *nft = (int8_t)ah->eep_ops->get_eeprom(ah, EEP_NFTHRESH_5); + break; + case IEEE80211_BAND_2GHZ: + *nft = (int8_t)ah->eep_ops->get_eeprom(ah, EEP_NFTHRESH_2); + break; + default: + BUG_ON(1); + return false; + } + + return true; +} + +static void ath9k_hw_setup_calibration(struct ath_hw *ah, + struct ath9k_cal_list *currCal) +{ + struct ath_common *common = ath9k_hw_common(ah); + + REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(0), + AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX, + currCal->calData->calCountMax); + + switch (currCal->calData->calType) { + case IQ_MISMATCH_CAL: + REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ); + ath_print(common, ATH_DBG_CALIBRATE, + "starting IQ Mismatch Calibration\n"); + break; + case ADC_GAIN_CAL: + REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_GAIN); + ath_print(common, ATH_DBG_CALIBRATE, + "starting ADC Gain Calibration\n"); + break; + case ADC_DC_CAL: + REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_PER); + ath_print(common, ATH_DBG_CALIBRATE, + "starting ADC DC Calibration\n"); + break; + case ADC_DC_INIT_CAL: + REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_INIT); + ath_print(common, ATH_DBG_CALIBRATE, + "starting Init ADC DC Calibration\n"); + break; + } + + REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0), + AR_PHY_TIMING_CTRL4_DO_CAL); +} + +static void ath9k_hw_reset_calibration(struct ath_hw *ah, + struct ath9k_cal_list *currCal) +{ + int i; + + ath9k_hw_setup_calibration(ah, currCal); + + currCal->calState = CAL_RUNNING; + + for (i = 0; i < AR5416_MAX_CHAINS; i++) { + ah->meas0.sign[i] = 0; + ah->meas1.sign[i] = 0; + ah->meas2.sign[i] = 0; + ah->meas3.sign[i] = 0; + } + + ah->cal_samples = 0; +} + +static bool ath9k_hw_per_calibration(struct ath_hw *ah, + struct ath9k_channel *ichan, + u8 rxchainmask, + struct ath9k_cal_list *currCal) +{ + bool iscaldone = false; + + if (currCal->calState == CAL_RUNNING) { + if (!(REG_READ(ah, AR_PHY_TIMING_CTRL4(0)) & + AR_PHY_TIMING_CTRL4_DO_CAL)) { + + currCal->calData->calCollect(ah); + ah->cal_samples++; + + if (ah->cal_samples >= currCal->calData->calNumSamples) { + int i, numChains = 0; + for (i = 0; i < AR5416_MAX_CHAINS; i++) { + if (rxchainmask & (1 << i)) + numChains++; + } + + currCal->calData->calPostProc(ah, numChains); + ichan->CalValid |= currCal->calData->calType; + currCal->calState = CAL_DONE; + iscaldone = true; + } else { + ath9k_hw_setup_calibration(ah, currCal); + } + } + } else if (!(ichan->CalValid & currCal->calData->calType)) { + ath9k_hw_reset_calibration(ah, currCal); + } + + return iscaldone; +} + +/* Assumes you are talking about the currently configured channel */ +static bool ath9k_hw_iscal_supported(struct ath_hw *ah, + enum ath9k_cal_types calType) +{ + struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf; + + switch (calType & ah->supp_cals) { + case IQ_MISMATCH_CAL: /* Both 2 GHz and 5 GHz support OFDM */ + return true; + case ADC_GAIN_CAL: + case ADC_DC_CAL: + if (!(conf->channel->band == IEEE80211_BAND_2GHZ && + conf_is_ht20(conf))) + return true; + break; + } + return false; +} + +static void ath9k_hw_iqcal_collect(struct ath_hw *ah) +{ + int i; + + for (i = 0; i < AR5416_MAX_CHAINS; i++) { + ah->totalPowerMeasI[i] += + REG_READ(ah, AR_PHY_CAL_MEAS_0(i)); + ah->totalPowerMeasQ[i] += + REG_READ(ah, AR_PHY_CAL_MEAS_1(i)); + ah->totalIqCorrMeas[i] += + (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i)); + ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE, + "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n", + ah->cal_samples, i, ah->totalPowerMeasI[i], + ah->totalPowerMeasQ[i], + ah->totalIqCorrMeas[i]); + } +} + +static void ath9k_hw_adc_gaincal_collect(struct ath_hw *ah) +{ + int i; + + for (i = 0; i < AR5416_MAX_CHAINS; i++) { + ah->totalAdcIOddPhase[i] += + REG_READ(ah, AR_PHY_CAL_MEAS_0(i)); + ah->totalAdcIEvenPhase[i] += + REG_READ(ah, AR_PHY_CAL_MEAS_1(i)); + ah->totalAdcQOddPhase[i] += + REG_READ(ah, AR_PHY_CAL_MEAS_2(i)); + ah->totalAdcQEvenPhase[i] += + REG_READ(ah, AR_PHY_CAL_MEAS_3(i)); + + ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE, + "%d: Chn %d oddi=0x%08x; eveni=0x%08x; " + "oddq=0x%08x; evenq=0x%08x;\n", + ah->cal_samples, i, + ah->totalAdcIOddPhase[i], + ah->totalAdcIEvenPhase[i], + ah->totalAdcQOddPhase[i], + ah->totalAdcQEvenPhase[i]); + } +} + +static void ath9k_hw_adc_dccal_collect(struct ath_hw *ah) +{ + int i; + + for (i = 0; i < AR5416_MAX_CHAINS; i++) { + ah->totalAdcDcOffsetIOddPhase[i] += + (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_0(i)); + ah->totalAdcDcOffsetIEvenPhase[i] += + (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_1(i)); + ah->totalAdcDcOffsetQOddPhase[i] += + (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i)); + ah->totalAdcDcOffsetQEvenPhase[i] += + (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_3(i)); + + ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE, + "%d: Chn %d oddi=0x%08x; eveni=0x%08x; " + "oddq=0x%08x; evenq=0x%08x;\n", + ah->cal_samples, i, + ah->totalAdcDcOffsetIOddPhase[i], + ah->totalAdcDcOffsetIEvenPhase[i], + ah->totalAdcDcOffsetQOddPhase[i], + ah->totalAdcDcOffsetQEvenPhase[i]); + } +} + +static void ath9k_hw_iqcalibrate(struct ath_hw *ah, u8 numChains) +{ + struct ath_common *common = ath9k_hw_common(ah); + u32 powerMeasQ, powerMeasI, iqCorrMeas; + u32 qCoffDenom, iCoffDenom; + int32_t qCoff, iCoff; + int iqCorrNeg, i; + + for (i = 0; i < numChains; i++) { + powerMeasI = ah->totalPowerMeasI[i]; + powerMeasQ = ah->totalPowerMeasQ[i]; + iqCorrMeas = ah->totalIqCorrMeas[i]; + + ath_print(common, ATH_DBG_CALIBRATE, + "Starting IQ Cal and Correction for Chain %d\n", + i); + + ath_print(common, ATH_DBG_CALIBRATE, + "Orignal: Chn %diq_corr_meas = 0x%08x\n", + i, ah->totalIqCorrMeas[i]); + + iqCorrNeg = 0; + + if (iqCorrMeas > 0x80000000) { + iqCorrMeas = (0xffffffff - iqCorrMeas) + 1; + iqCorrNeg = 1; + } + + ath_print(common, ATH_DBG_CALIBRATE, + "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI); + ath_print(common, ATH_DBG_CALIBRATE, + "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ); + ath_print(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n", + iqCorrNeg); + + iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 128; + qCoffDenom = powerMeasQ / 64; + + if ((powerMeasQ != 0) && (iCoffDenom != 0) && + (qCoffDenom != 0)) { + iCoff = iqCorrMeas / iCoffDenom; + qCoff = powerMeasI / qCoffDenom - 64; + ath_print(common, ATH_DBG_CALIBRATE, + "Chn %d iCoff = 0x%08x\n", i, iCoff); + ath_print(common, ATH_DBG_CALIBRATE, + "Chn %d qCoff = 0x%08x\n", i, qCoff); + + iCoff = iCoff & 0x3f; + ath_print(common, ATH_DBG_CALIBRATE, + "New: Chn %d iCoff = 0x%08x\n", i, iCoff); + if (iqCorrNeg == 0x0) + iCoff = 0x40 - iCoff; + + if (qCoff > 15) + qCoff = 15; + else if (qCoff <= -16) + qCoff = 16; + + ath_print(common, ATH_DBG_CALIBRATE, + "Chn %d : iCoff = 0x%x qCoff = 0x%x\n", + i, iCoff, qCoff); + + REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i), + AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF, + iCoff); + REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i), + AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF, + qCoff); + ath_print(common, ATH_DBG_CALIBRATE, + "IQ Cal and Correction done for Chain %d\n", + i); + } + } + + REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0), + AR_PHY_TIMING_CTRL4_IQCORR_ENABLE); +} + +static void ath9k_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains) +{ + struct ath_common *common = ath9k_hw_common(ah); + u32 iOddMeasOffset, iEvenMeasOffset, qOddMeasOffset, qEvenMeasOffset; + u32 qGainMismatch, iGainMismatch, val, i; + + for (i = 0; i < numChains; i++) { + iOddMeasOffset = ah->totalAdcIOddPhase[i]; + iEvenMeasOffset = ah->totalAdcIEvenPhase[i]; + qOddMeasOffset = ah->totalAdcQOddPhase[i]; + qEvenMeasOffset = ah->totalAdcQEvenPhase[i]; + + ath_print(common, ATH_DBG_CALIBRATE, + "Starting ADC Gain Cal for Chain %d\n", i); + + ath_print(common, ATH_DBG_CALIBRATE, + "Chn %d pwr_meas_odd_i = 0x%08x\n", i, + iOddMeasOffset); + ath_print(common, ATH_DBG_CALIBRATE, + "Chn %d pwr_meas_even_i = 0x%08x\n", i, + iEvenMeasOffset); + ath_print(common, ATH_DBG_CALIBRATE, + "Chn %d pwr_meas_odd_q = 0x%08x\n", i, + qOddMeasOffset); + ath_print(common, ATH_DBG_CALIBRATE, + "Chn %d pwr_meas_even_q = 0x%08x\n", i, + qEvenMeasOffset); + + if (iOddMeasOffset != 0 && qEvenMeasOffset != 0) { + iGainMismatch = + ((iEvenMeasOffset * 32) / + iOddMeasOffset) & 0x3f; + qGainMismatch = + ((qOddMeasOffset * 32) / + qEvenMeasOffset) & 0x3f; + + ath_print(common, ATH_DBG_CALIBRATE, + "Chn %d gain_mismatch_i = 0x%08x\n", i, + iGainMismatch); + ath_print(common, ATH_DBG_CALIBRATE, + "Chn %d gain_mismatch_q = 0x%08x\n", i, + qGainMismatch); + + val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i)); + val &= 0xfffff000; + val |= (qGainMismatch) | (iGainMismatch << 6); + REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val); + + ath_print(common, ATH_DBG_CALIBRATE, + "ADC Gain Cal done for Chain %d\n", i); + } + } + + REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0), + REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) | + AR_PHY_NEW_ADC_GAIN_CORR_ENABLE); +} + +static void ath9k_hw_adc_dccal_calibrate(struct ath_hw *ah, u8 numChains) +{ + struct ath_common *common = ath9k_hw_common(ah); + u32 iOddMeasOffset, iEvenMeasOffset, val, i; + int32_t qOddMeasOffset, qEvenMeasOffset, qDcMismatch, iDcMismatch; + const struct ath9k_percal_data *calData = + ah->cal_list_curr->calData; + u32 numSamples = + (1 << (calData->calCountMax + 5)) * calData->calNumSamples; + + for (i = 0; i < numChains; i++) { + iOddMeasOffset = ah->totalAdcDcOffsetIOddPhase[i]; + iEvenMeasOffset = ah->totalAdcDcOffsetIEvenPhase[i]; + qOddMeasOffset = ah->totalAdcDcOffsetQOddPhase[i]; + qEvenMeasOffset = ah->totalAdcDcOffsetQEvenPhase[i]; + + ath_print(common, ATH_DBG_CALIBRATE, + "Starting ADC DC Offset Cal for Chain %d\n", i); + + ath_print(common, ATH_DBG_CALIBRATE, + "Chn %d pwr_meas_odd_i = %d\n", i, + iOddMeasOffset); + ath_print(common, ATH_DBG_CALIBRATE, + "Chn %d pwr_meas_even_i = %d\n", i, + iEvenMeasOffset); + ath_print(common, ATH_DBG_CALIBRATE, + "Chn %d pwr_meas_odd_q = %d\n", i, + qOddMeasOffset); + ath_print(common, ATH_DBG_CALIBRATE, + "Chn %d pwr_meas_even_q = %d\n", i, + qEvenMeasOffset); + + iDcMismatch = (((iEvenMeasOffset - iOddMeasOffset) * 2) / + numSamples) & 0x1ff; + qDcMismatch = (((qOddMeasOffset - qEvenMeasOffset) * 2) / + numSamples) & 0x1ff; + + ath_print(common, ATH_DBG_CALIBRATE, + "Chn %d dc_offset_mismatch_i = 0x%08x\n", i, + iDcMismatch); + ath_print(common, ATH_DBG_CALIBRATE, + "Chn %d dc_offset_mismatch_q = 0x%08x\n", i, + qDcMismatch); + + val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i)); + val &= 0xc0000fff; + val |= (qDcMismatch << 12) | (iDcMismatch << 21); + REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val); + + ath_print(common, ATH_DBG_CALIBRATE, + "ADC DC Offset Cal done for Chain %d\n", i); + } + + REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0), + REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) | + AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE); +} + +/* This is done for the currently configured channel */ +bool ath9k_hw_reset_calvalid(struct ath_hw *ah) +{ + struct ath_common *common = ath9k_hw_common(ah); + struct ieee80211_conf *conf = &common->hw->conf; + struct ath9k_cal_list *currCal = ah->cal_list_curr; + + if (!ah->curchan) + return true; + + if (!AR_SREV_9100(ah) && !AR_SREV_9160_10_OR_LATER(ah)) + return true; + + if (currCal == NULL) + return true; + + if (currCal->calState != CAL_DONE) { + ath_print(common, ATH_DBG_CALIBRATE, + "Calibration state incorrect, %d\n", + currCal->calState); + return true; + } + + if (!ath9k_hw_iscal_supported(ah, currCal->calData->calType)) + return true; + + ath_print(common, ATH_DBG_CALIBRATE, + "Resetting Cal %d state for channel %u\n", + currCal->calData->calType, conf->channel->center_freq); + + ah->curchan->CalValid &= ~currCal->calData->calType; + currCal->calState = CAL_WAITING; + + return false; +} +EXPORT_SYMBOL(ath9k_hw_reset_calvalid); + +void ath9k_hw_start_nfcal(struct ath_hw *ah) +{ + REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, + AR_PHY_AGC_CONTROL_ENABLE_NF); + REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, + AR_PHY_AGC_CONTROL_NO_UPDATE_NF); + REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF); +} + +void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) +{ + struct ath9k_nfcal_hist *h; + int i, j; + int32_t val; + const u32 ar5416_cca_regs[6] = { + AR_PHY_CCA, + AR_PHY_CH1_CCA, + AR_PHY_CH2_CCA, + AR_PHY_EXT_CCA, + AR_PHY_CH1_EXT_CCA, + AR_PHY_CH2_EXT_CCA + }; + u8 chainmask, rx_chain_status; + + rx_chain_status = REG_READ(ah, AR_PHY_RX_CHAINMASK); + if (AR_SREV_9285(ah)) + chainmask = 0x9; + else if (AR_SREV_9280(ah) || AR_SREV_9287(ah)) { + if ((rx_chain_status & 0x2) || (rx_chain_status & 0x4)) + chainmask = 0x1B; + else + chainmask = 0x09; + } else { + if (rx_chain_status & 0x4) + chainmask = 0x3F; + else if (rx_chain_status & 0x2) + chainmask = 0x1B; + else + chainmask = 0x09; + } + + h = ah->nfCalHist; + + for (i = 0; i < NUM_NF_READINGS; i++) { + if (chainmask & (1 << i)) { + val = REG_READ(ah, ar5416_cca_regs[i]); + val &= 0xFFFFFE00; + val |= (((u32) (h[i].privNF) << 1) & 0x1ff); + REG_WRITE(ah, ar5416_cca_regs[i], val); + } + } + + REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, + AR_PHY_AGC_CONTROL_ENABLE_NF); + REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, + AR_PHY_AGC_CONTROL_NO_UPDATE_NF); + REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF); + + for (j = 0; j < 5; j++) { + if ((REG_READ(ah, AR_PHY_AGC_CONTROL) & + AR_PHY_AGC_CONTROL_NF) == 0) + break; + udelay(50); + } + + for (i = 0; i < NUM_NF_READINGS; i++) { + if (chainmask & (1 << i)) { + val = REG_READ(ah, ar5416_cca_regs[i]); + val &= 0xFFFFFE00; + val |= (((u32) (-50) << 1) & 0x1ff); + REG_WRITE(ah, ar5416_cca_regs[i], val); + } + } +} + +int16_t ath9k_hw_getnf(struct ath_hw *ah, + struct ath9k_channel *chan) +{ + struct ath_common *common = ath9k_hw_common(ah); + int16_t nf, nfThresh; + int16_t nfarray[NUM_NF_READINGS] = { 0 }; + struct ath9k_nfcal_hist *h; + struct ieee80211_channel *c = chan->chan; + + chan->channelFlags &= (~CHANNEL_CW_INT); + if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) { + ath_print(common, ATH_DBG_CALIBRATE, + "NF did not complete in calibration window\n"); + nf = 0; + chan->rawNoiseFloor = nf; + return chan->rawNoiseFloor; + } else { + ath9k_hw_do_getnf(ah, nfarray); + nf = nfarray[0]; + if (getNoiseFloorThresh(ah, c->band, &nfThresh) + && nf > nfThresh) { + ath_print(common, ATH_DBG_CALIBRATE, + "noise floor failed detected; " + "detected %d, threshold %d\n", + nf, nfThresh); + chan->channelFlags |= CHANNEL_CW_INT; + } + } + + h = ah->nfCalHist; + + ath9k_hw_update_nfcal_hist_buffer(h, nfarray); + chan->rawNoiseFloor = h[0].privNF; + + return chan->rawNoiseFloor; +} + +void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah) +{ + int i, j; + s16 noise_floor; + + if (AR_SREV_9280(ah)) + noise_floor = AR_PHY_CCA_MAX_AR9280_GOOD_VALUE; + else if (AR_SREV_9285(ah)) + noise_floor = AR_PHY_CCA_MAX_AR9285_GOOD_VALUE; + else if (AR_SREV_9287(ah)) + noise_floor = AR_PHY_CCA_MAX_AR9287_GOOD_VALUE; + else + noise_floor = AR_PHY_CCA_MAX_AR5416_GOOD_VALUE; + + for (i = 0; i < NUM_NF_READINGS; i++) { + ah->nfCalHist[i].currIndex = 0; + ah->nfCalHist[i].privNF = noise_floor; + ah->nfCalHist[i].invalidNFcount = + AR_PHY_CCA_FILTERWINDOW_LENGTH; + for (j = 0; j < ATH9K_NF_CAL_HIST_MAX; j++) { + ah->nfCalHist[i].nfCalBuffer[j] = noise_floor; + } + } +} + +s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan) +{ + s16 nf; + + if (chan->rawNoiseFloor == 0) + nf = -96; + else + nf = chan->rawNoiseFloor; + + if (!ath9k_hw_nf_in_range(ah, nf)) + nf = ATH_DEFAULT_NOISE_FLOOR; + + return nf; +} +EXPORT_SYMBOL(ath9k_hw_getchan_noise); + +static void ath9k_olc_temp_compensation_9287(struct ath_hw *ah) +{ + u32 rddata; + int32_t delta, currPDADC, slope; + + rddata = REG_READ(ah, AR_PHY_TX_PWRCTRL4); + currPDADC = MS(rddata, AR_PHY_TX_PWRCTRL_PD_AVG_OUT); + + if (ah->initPDADC == 0 || currPDADC == 0) { + /* + * Zero value indicates that no frames have been transmitted yet, + * can't do temperature compensation until frames are transmitted. + */ + return; + } else { + slope = ah->eep_ops->get_eeprom(ah, EEP_TEMPSENSE_SLOPE); + + if (slope == 0) { /* to avoid divide by zero case */ + delta = 0; + } else { + delta = ((currPDADC - ah->initPDADC)*4) / slope; + } + REG_RMW_FIELD(ah, AR_PHY_CH0_TX_PWRCTRL11, + AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta); + REG_RMW_FIELD(ah, AR_PHY_CH1_TX_PWRCTRL11, + AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta); + } +} + +static void ath9k_olc_temp_compensation(struct ath_hw *ah) +{ + u32 rddata, i; + int delta, currPDADC, regval; + + if (OLC_FOR_AR9287_10_LATER) { + ath9k_olc_temp_compensation_9287(ah); + } else { + rddata = REG_READ(ah, AR_PHY_TX_PWRCTRL4); + currPDADC = MS(rddata, AR_PHY_TX_PWRCTRL_PD_AVG_OUT); + + if (ah->initPDADC == 0 || currPDADC == 0) { + return; + } else { + if (ah->eep_ops->get_eeprom(ah, EEP_DAC_HPWR_5G)) + delta = (currPDADC - ah->initPDADC + 4) / 8; + else + delta = (currPDADC - ah->initPDADC + 5) / 10; + + if (delta != ah->PDADCdelta) { + ah->PDADCdelta = delta; + for (i = 1; i < AR9280_TX_GAIN_TABLE_SIZE; i++) { + regval = ah->originalGain[i] - delta; + if (regval < 0) + regval = 0; + + REG_RMW_FIELD(ah, + AR_PHY_TX_GAIN_TBL1 + i * 4, + AR_PHY_TX_GAIN, regval); + } + } + } + } +} + +static void ath9k_hw_9271_pa_cal(struct ath_hw *ah, bool is_reset) +{ + u32 regVal; + unsigned int i; + u32 regList [][2] = { + { 0x786c, 0 }, + { 0x7854, 0 }, + { 0x7820, 0 }, + { 0x7824, 0 }, + { 0x7868, 0 }, + { 0x783c, 0 }, + { 0x7838, 0 } , + { 0x7828, 0 } , + }; + + for (i = 0; i < ARRAY_SIZE(regList); i++) + regList[i][1] = REG_READ(ah, regList[i][0]); + + regVal = REG_READ(ah, 0x7834); + regVal &= (~(0x1)); + REG_WRITE(ah, 0x7834, regVal); + regVal = REG_READ(ah, 0x9808); + regVal |= (0x1 << 27); + REG_WRITE(ah, 0x9808, regVal); + + /* 786c,b23,1, pwddac=1 */ + REG_RMW_FIELD(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC, 1); + /* 7854, b5,1, pdrxtxbb=1 */ + REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1, 1); + /* 7854, b7,1, pdv2i=1 */ + REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I, 1); + /* 7854, b8,1, pddacinterface=1 */ + REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF, 1); + /* 7824,b12,0, offcal=0 */ + REG_RMW_FIELD(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL, 0); + /* 7838, b1,0, pwddb=0 */ + REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB, 0); + /* 7820,b11,0, enpacal=0 */ + REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL, 0); + /* 7820,b25,1, pdpadrv1=0 */ + REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1, 0); + /* 7820,b24,0, pdpadrv2=0 */ + REG_RMW_FIELD(ah, AR9285_AN_RF2G1,AR9285_AN_RF2G1_PDPADRV2,0); + /* 7820,b23,0, pdpaout=0 */ + REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT, 0); + /* 783c,b14-16,7, padrvgn2tab_0=7 */ + REG_RMW_FIELD(ah, AR9285_AN_RF2G8,AR9285_AN_RF2G8_PADRVGN2TAB0, 7); + /* + * 7838,b29-31,0, padrvgn1tab_0=0 + * does not matter since we turn it off + */ + REG_RMW_FIELD(ah, AR9285_AN_RF2G7,AR9285_AN_RF2G7_PADRVGN2TAB0, 0); + + REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_CCOMP, 0xfff); + + /* Set: + * localmode=1,bmode=1,bmoderxtx=1,synthon=1, + * txon=1,paon=1,oscon=1,synthon_force=1 + */ + REG_WRITE(ah, AR9285_AN_TOP2, 0xca0358a0); + udelay(30); + REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9271_AN_RF2G6_OFFS, 0); + + /* find off_6_1; */ + for (i = 6; i > 0; i--) { + regVal = REG_READ(ah, 0x7834); + regVal |= (1 << (20 + i)); + REG_WRITE(ah, 0x7834, regVal); + udelay(1); + //regVal = REG_READ(ah, 0x7834); + regVal &= (~(0x1 << (20 + i))); + regVal |= (MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9) + << (20 + i)); + REG_WRITE(ah, 0x7834, regVal); + } + + regVal = (regVal >>20) & 0x7f; + + /* Update PA cal info */ + if ((!is_reset) && (ah->pacal_info.prev_offset == regVal)) { + if (ah->pacal_info.max_skipcount < MAX_PACAL_SKIPCOUNT) + ah->pacal_info.max_skipcount = + 2 * ah->pacal_info.max_skipcount; + ah->pacal_info.skipcount = ah->pacal_info.max_skipcount; + } else { + ah->pacal_info.max_skipcount = 1; + ah->pacal_info.skipcount = 0; + ah->pacal_info.prev_offset = regVal; + } + + regVal = REG_READ(ah, 0x7834); + regVal |= 0x1; + REG_WRITE(ah, 0x7834, regVal); + regVal = REG_READ(ah, 0x9808); + regVal &= (~(0x1 << 27)); + REG_WRITE(ah, 0x9808, regVal); + + for (i = 0; i < ARRAY_SIZE(regList); i++) + REG_WRITE(ah, regList[i][0], regList[i][1]); +} + +static inline void ath9k_hw_9285_pa_cal(struct ath_hw *ah, bool is_reset) +{ + struct ath_common *common = ath9k_hw_common(ah); + u32 regVal; + int i, offset, offs_6_1, offs_0; + u32 ccomp_org, reg_field; + u32 regList[][2] = { + { 0x786c, 0 }, + { 0x7854, 0 }, + { 0x7820, 0 }, + { 0x7824, 0 }, + { 0x7868, 0 }, + { 0x783c, 0 }, + { 0x7838, 0 }, + }; + + ath_print(common, ATH_DBG_CALIBRATE, "Running PA Calibration\n"); + + /* PA CAL is not needed for high power solution */ + if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) == + AR5416_EEP_TXGAIN_HIGH_POWER) + return; + + if (AR_SREV_9285_11(ah)) { + REG_WRITE(ah, AR9285_AN_TOP4, (AR9285_AN_TOP4_DEFAULT | 0x14)); + udelay(10); + } + + for (i = 0; i < ARRAY_SIZE(regList); i++) + regList[i][1] = REG_READ(ah, regList[i][0]); + + regVal = REG_READ(ah, 0x7834); + regVal &= (~(0x1)); + REG_WRITE(ah, 0x7834, regVal); + regVal = REG_READ(ah, 0x9808); + regVal |= (0x1 << 27); + REG_WRITE(ah, 0x9808, regVal); + + REG_RMW_FIELD(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC, 1); + REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1, 1); + REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I, 1); + REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF, 1); + REG_RMW_FIELD(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL, 0); + REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB, 0); + REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL, 0); + REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1, 0); + REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV2, 0); + REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT, 0); + REG_RMW_FIELD(ah, AR9285_AN_RF2G8, AR9285_AN_RF2G8_PADRVGN2TAB0, 7); + REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PADRVGN2TAB0, 0); + ccomp_org = MS(REG_READ(ah, AR9285_AN_RF2G6), AR9285_AN_RF2G6_CCOMP); + REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_CCOMP, 0xf); + + REG_WRITE(ah, AR9285_AN_TOP2, 0xca0358a0); + udelay(30); + REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_OFFS, 0); + REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, 0); + + for (i = 6; i > 0; i--) { + regVal = REG_READ(ah, 0x7834); + regVal |= (1 << (19 + i)); + REG_WRITE(ah, 0x7834, regVal); + udelay(1); + regVal = REG_READ(ah, 0x7834); + regVal &= (~(0x1 << (19 + i))); + reg_field = MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9); + regVal |= (reg_field << (19 + i)); + REG_WRITE(ah, 0x7834, regVal); + } + + REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, 1); + udelay(1); + reg_field = MS(REG_READ(ah, AR9285_AN_RF2G9), AR9285_AN_RXTXBB1_SPARE9); + REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, reg_field); + offs_6_1 = MS(REG_READ(ah, AR9285_AN_RF2G6), AR9285_AN_RF2G6_OFFS); + offs_0 = MS(REG_READ(ah, AR9285_AN_RF2G3), AR9285_AN_RF2G3_PDVCCOMP); + + offset = (offs_6_1<<1) | offs_0; + offset = offset - 0; + offs_6_1 = offset>>1; + offs_0 = offset & 1; + + if ((!is_reset) && (ah->pacal_info.prev_offset == offset)) { + if (ah->pacal_info.max_skipcount < MAX_PACAL_SKIPCOUNT) + ah->pacal_info.max_skipcount = + 2 * ah->pacal_info.max_skipcount; + ah->pacal_info.skipcount = ah->pacal_info.max_skipcount; + } else { + ah->pacal_info.max_skipcount = 1; + ah->pacal_info.skipcount = 0; + ah->pacal_info.prev_offset = offset; + } + + REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_OFFS, offs_6_1); + REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, offs_0); + + regVal = REG_READ(ah, 0x7834); + regVal |= 0x1; + REG_WRITE(ah, 0x7834, regVal); + regVal = REG_READ(ah, 0x9808); + regVal &= (~(0x1 << 27)); + REG_WRITE(ah, 0x9808, regVal); + + for (i = 0; i < ARRAY_SIZE(regList); i++) + REG_WRITE(ah, regList[i][0], regList[i][1]); + + REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_CCOMP, ccomp_org); + + if (AR_SREV_9285_11(ah)) + REG_WRITE(ah, AR9285_AN_TOP4, AR9285_AN_TOP4_DEFAULT); + +} + +bool ath9k_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan, + u8 rxchainmask, bool longcal) +{ + bool iscaldone = true; + struct ath9k_cal_list *currCal = ah->cal_list_curr; + + if (currCal && + (currCal->calState == CAL_RUNNING || + currCal->calState == CAL_WAITING)) { + iscaldone = ath9k_hw_per_calibration(ah, chan, + rxchainmask, currCal); + if (iscaldone) { + ah->cal_list_curr = currCal = currCal->calNext; + + if (currCal->calState == CAL_WAITING) { + iscaldone = false; + ath9k_hw_reset_calibration(ah, currCal); + } + } + } + + /* Do NF cal only at longer intervals */ + if (longcal) { + /* Do periodic PAOffset Cal */ + if (AR_SREV_9271(ah)) + ath9k_hw_9271_pa_cal(ah, false); + else if (AR_SREV_9285_11_OR_LATER(ah)) { + if (!ah->pacal_info.skipcount) + ath9k_hw_9285_pa_cal(ah, false); + else + ah->pacal_info.skipcount--; + } + + if (OLC_FOR_AR9280_20_LATER || OLC_FOR_AR9287_10_LATER) + ath9k_olc_temp_compensation(ah); + + /* Get the value from the previous NF cal and update history buffer */ + ath9k_hw_getnf(ah, chan); + + /* + * Load the NF from history buffer of the current channel. + * NF is slow time-variant, so it is OK to use a historical value. + */ + ath9k_hw_loadnf(ah, ah->curchan); + + ath9k_hw_start_nfcal(ah); + } + + return iscaldone; +} +EXPORT_SYMBOL(ath9k_hw_calibrate); + +/* Carrier leakage Calibration fix */ +static bool ar9285_clc(struct ath_hw *ah, struct ath9k_channel *chan) +{ + struct ath_common *common = ath9k_hw_common(ah); + + REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE); + if (IS_CHAN_HT20(chan)) { + REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_PARALLEL_CAL_ENABLE); + REG_SET_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN); + REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, + AR_PHY_AGC_CONTROL_FLTR_CAL); + REG_CLR_BIT(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_CAL_ENABLE); + REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL); + if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, + AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) { + ath_print(common, ATH_DBG_CALIBRATE, "offset " + "calibration failed to complete in " + "1ms; noisy ??\n"); + return false; + } + REG_CLR_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN); + REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_PARALLEL_CAL_ENABLE); + REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE); + } + REG_CLR_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC); + REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL); + REG_SET_BIT(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_CAL_ENABLE); + REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL); + if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL, + 0, AH_WAIT_TIMEOUT)) { + ath_print(common, ATH_DBG_CALIBRATE, "offset calibration " + "failed to complete in 1ms; noisy ??\n"); + return false; + } + + REG_SET_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC); + REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE); + REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL); + + return true; +} + +bool ath9k_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan) +{ + struct ath_common *common = ath9k_hw_common(ah); + + if (AR_SREV_9271(ah) || AR_SREV_9285_12_OR_LATER(ah)) { + if (!ar9285_clc(ah, chan)) + return false; + } else { + if (AR_SREV_9280_10_OR_LATER(ah)) { + if (!AR_SREV_9287_10_OR_LATER(ah)) + REG_CLR_BIT(ah, AR_PHY_ADC_CTL, + AR_PHY_ADC_CTL_OFF_PWDADC); + REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, + AR_PHY_AGC_CONTROL_FLTR_CAL); + } + + /* Calibrate the AGC */ + REG_WRITE(ah, AR_PHY_AGC_CONTROL, + REG_READ(ah, AR_PHY_AGC_CONTROL) | + AR_PHY_AGC_CONTROL_CAL); + + /* Poll for offset calibration complete */ + if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL, + 0, AH_WAIT_TIMEOUT)) { + ath_print(common, ATH_DBG_CALIBRATE, + "offset calibration failed to " + "complete in 1ms; noisy environment?\n"); + return false; + } + + if (AR_SREV_9280_10_OR_LATER(ah)) { + if (!AR_SREV_9287_10_OR_LATER(ah)) + REG_SET_BIT(ah, AR_PHY_ADC_CTL, + AR_PHY_ADC_CTL_OFF_PWDADC); + REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, + AR_PHY_AGC_CONTROL_FLTR_CAL); + } + } + + /* Do PA Calibration */ + if (AR_SREV_9271(ah)) + ath9k_hw_9271_pa_cal(ah, true); + else if (AR_SREV_9285_11_OR_LATER(ah)) + ath9k_hw_9285_pa_cal(ah, true); + + /* Do NF Calibration after DC offset and other calibrations */ + REG_WRITE(ah, AR_PHY_AGC_CONTROL, + REG_READ(ah, AR_PHY_AGC_CONTROL) | AR_PHY_AGC_CONTROL_NF); + + ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL; + + /* Enable IQ, ADC Gain and ADC DC offset CALs */ + if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah)) { + if (ath9k_hw_iscal_supported(ah, ADC_GAIN_CAL)) { + INIT_CAL(&ah->adcgain_caldata); + INSERT_CAL(ah, &ah->adcgain_caldata); + ath_print(common, ATH_DBG_CALIBRATE, + "enabling ADC Gain Calibration.\n"); + } + if (ath9k_hw_iscal_supported(ah, ADC_DC_CAL)) { + INIT_CAL(&ah->adcdc_caldata); + INSERT_CAL(ah, &ah->adcdc_caldata); + ath_print(common, ATH_DBG_CALIBRATE, + "enabling ADC DC Calibration.\n"); + } + if (ath9k_hw_iscal_supported(ah, IQ_MISMATCH_CAL)) { + INIT_CAL(&ah->iq_caldata); + INSERT_CAL(ah, &ah->iq_caldata); + ath_print(common, ATH_DBG_CALIBRATE, + "enabling IQ Calibration.\n"); + } + + ah->cal_list_curr = ah->cal_list; + + if (ah->cal_list_curr) + ath9k_hw_reset_calibration(ah, ah->cal_list_curr); + } + + chan->CalValid = 0; + + return true; +} + +const struct ath9k_percal_data iq_cal_multi_sample = { + IQ_MISMATCH_CAL, + MAX_CAL_SAMPLES, + PER_MIN_LOG_COUNT, + ath9k_hw_iqcal_collect, + ath9k_hw_iqcalibrate +}; +const struct ath9k_percal_data iq_cal_single_sample = { + IQ_MISMATCH_CAL, + MIN_CAL_SAMPLES, + PER_MAX_LOG_COUNT, + ath9k_hw_iqcal_collect, + ath9k_hw_iqcalibrate +}; +const struct ath9k_percal_data adc_gain_cal_multi_sample = { + ADC_GAIN_CAL, + MAX_CAL_SAMPLES, + PER_MIN_LOG_COUNT, + ath9k_hw_adc_gaincal_collect, + ath9k_hw_adc_gaincal_calibrate +}; +const struct ath9k_percal_data adc_gain_cal_single_sample = { + ADC_GAIN_CAL, + MIN_CAL_SAMPLES, + PER_MAX_LOG_COUNT, + ath9k_hw_adc_gaincal_collect, + ath9k_hw_adc_gaincal_calibrate +}; +const struct ath9k_percal_data adc_dc_cal_multi_sample = { + ADC_DC_CAL, + MAX_CAL_SAMPLES, + PER_MIN_LOG_COUNT, + ath9k_hw_adc_dccal_collect, + ath9k_hw_adc_dccal_calibrate +}; +const struct ath9k_percal_data adc_dc_cal_single_sample = { + ADC_DC_CAL, + MIN_CAL_SAMPLES, + PER_MAX_LOG_COUNT, + ath9k_hw_adc_dccal_collect, + ath9k_hw_adc_dccal_calibrate +}; +const struct ath9k_percal_data adc_init_dc_cal = { + ADC_DC_INIT_CAL, + MIN_CAL_SAMPLES, + INIT_LOG_COUNT, + ath9k_hw_adc_dccal_collect, + ath9k_hw_adc_dccal_calibrate +}; diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h new file mode 100644 index 0000000..b2c873e --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/calib.h @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef CALIB_H +#define CALIB_H + +#include "hw.h" + +extern const struct ath9k_percal_data iq_cal_multi_sample; +extern const struct ath9k_percal_data iq_cal_single_sample; +extern const struct ath9k_percal_data adc_gain_cal_multi_sample; +extern const struct ath9k_percal_data adc_gain_cal_single_sample; +extern const struct ath9k_percal_data adc_dc_cal_multi_sample; +extern const struct ath9k_percal_data adc_dc_cal_single_sample; +extern const struct ath9k_percal_data adc_init_dc_cal; + +#define AR_PHY_CCA_MAX_AR5416_GOOD_VALUE -85 +#define AR_PHY_CCA_MAX_AR9280_GOOD_VALUE -112 +#define AR_PHY_CCA_MAX_AR9285_GOOD_VALUE -118 +#define AR_PHY_CCA_MAX_AR9287_GOOD_VALUE -118 +#define AR_PHY_CCA_MAX_HIGH_VALUE -62 +#define AR_PHY_CCA_MIN_BAD_VALUE -140 +#define AR_PHY_CCA_FILTERWINDOW_LENGTH_INIT 3 +#define AR_PHY_CCA_FILTERWINDOW_LENGTH 5 + +#define NUM_NF_READINGS 6 +#define ATH9K_NF_CAL_HIST_MAX 5 + +struct ar5416IniArray { + u32 *ia_array; + u32 ia_rows; + u32 ia_columns; +}; + +#define INIT_INI_ARRAY(iniarray, array, rows, columns) do { \ + (iniarray)->ia_array = (u32 *)(array); \ + (iniarray)->ia_rows = (rows); \ + (iniarray)->ia_columns = (columns); \ + } while (0) + +#define INI_RA(iniarray, row, column) \ + (((iniarray)->ia_array)[(row) * ((iniarray)->ia_columns) + (column)]) + +#define INIT_CAL(_perCal) do { \ + (_perCal)->calState = CAL_WAITING; \ + (_perCal)->calNext = NULL; \ + } while (0) + +#define INSERT_CAL(_ahp, _perCal) \ + do { \ + if ((_ahp)->cal_list_last == NULL) { \ + (_ahp)->cal_list = \ + (_ahp)->cal_list_last = (_perCal); \ + ((_ahp)->cal_list_last)->calNext = (_perCal); \ + } else { \ + ((_ahp)->cal_list_last)->calNext = (_perCal); \ + (_ahp)->cal_list_last = (_perCal); \ + (_perCal)->calNext = (_ahp)->cal_list; \ + } \ + } while (0) + +enum ath9k_cal_types { + ADC_DC_INIT_CAL = 0x1, + ADC_GAIN_CAL = 0x2, + ADC_DC_CAL = 0x4, + IQ_MISMATCH_CAL = 0x8 +}; + +enum ath9k_cal_state { + CAL_INACTIVE, + CAL_WAITING, + CAL_RUNNING, + CAL_DONE +}; + +#define MIN_CAL_SAMPLES 1 +#define MAX_CAL_SAMPLES 64 +#define INIT_LOG_COUNT 5 +#define PER_MIN_LOG_COUNT 2 +#define PER_MAX_LOG_COUNT 10 + +struct ath9k_percal_data { + enum ath9k_cal_types calType; + u32 calNumSamples; + u32 calCountMax; + void (*calCollect) (struct ath_hw *); + void (*calPostProc) (struct ath_hw *, u8); +}; + +struct ath9k_cal_list { + const struct ath9k_percal_data *calData; + enum ath9k_cal_state calState; + struct ath9k_cal_list *calNext; +}; + +struct ath9k_nfcal_hist { + int16_t nfCalBuffer[ATH9K_NF_CAL_HIST_MAX]; + u8 currIndex; + int16_t privNF; + u8 invalidNFcount; +}; + +#define MAX_PACAL_SKIPCOUNT 8 +struct ath9k_pacal_info{ + int32_t prev_offset; /* Previous value of PA offset value */ + int8_t max_skipcount; /* Max No. of times PACAL can be skipped */ + int8_t skipcount; /* No. of times the PACAL to be skipped */ +}; + +bool ath9k_hw_reset_calvalid(struct ath_hw *ah); +void ath9k_hw_start_nfcal(struct ath_hw *ah); +void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan); +int16_t ath9k_hw_getnf(struct ath_hw *ah, + struct ath9k_channel *chan); +void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah); +s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan); +bool ath9k_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan, + u8 rxchainmask, bool longcal); +bool ath9k_hw_init_cal(struct ath_hw *ah, + struct ath9k_channel *chan); + +#endif /* CALIB_H */ diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c new file mode 100644 index 0000000..4d775ae --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/common.c @@ -0,0 +1,299 @@ +/* + * Copyright (c) 2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * Module for common driver code between ath9k and ath9k_htc + */ + +#include +#include + +#include "common.h" + +MODULE_AUTHOR("Atheros Communications"); +MODULE_DESCRIPTION("Shared library for Atheros wireless 802.11n LAN cards."); +MODULE_LICENSE("Dual BSD/GPL"); + +/* Common RX processing */ + +/* Assumes you've already done the endian to CPU conversion */ +static bool ath9k_rx_accept(struct ath_common *common, + struct sk_buff *skb, + struct ieee80211_rx_status *rxs, + struct ath_rx_status *rx_stats, + bool *decrypt_error) +{ + struct ath_hw *ah = common->ah; + struct ieee80211_hdr *hdr; + __le16 fc; + + hdr = (struct ieee80211_hdr *) skb->data; + fc = hdr->frame_control; + + if (!rx_stats->rs_datalen) + return false; + /* + * rs_status follows rs_datalen so if rs_datalen is too large + * we can take a hint that hardware corrupted it, so ignore + * those frames. + */ + if (rx_stats->rs_datalen > common->rx_bufsize) + return false; + + /* + * rs_more indicates chained descriptors which can be used + * to link buffers together for a sort of scatter-gather + * operation. + * + * The rx_stats->rs_status will not be set until the end of the + * chained descriptors so it can be ignored if rs_more is set. The + * rs_more will be false at the last element of the chained + * descriptors. + */ + if (!rx_stats->rs_more && rx_stats->rs_status != 0) { + if (rx_stats->rs_status & ATH9K_RXERR_CRC) + rxs->flag |= RX_FLAG_FAILED_FCS_CRC; + if (rx_stats->rs_status & ATH9K_RXERR_PHY) + return false; + + if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) { + *decrypt_error = true; + } else if (rx_stats->rs_status & ATH9K_RXERR_MIC) { + if (ieee80211_is_ctl(fc)) + /* + * Sometimes, we get invalid + * MIC failures on valid control frames. + * Remove these mic errors. + */ + rx_stats->rs_status &= ~ATH9K_RXERR_MIC; + else + rxs->flag |= RX_FLAG_MMIC_ERROR; + } + /* + * Reject error frames with the exception of + * decryption and MIC failures. For monitor mode, + * we also ignore the CRC error. + */ + if (ah->opmode == NL80211_IFTYPE_MONITOR) { + if (rx_stats->rs_status & + ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | + ATH9K_RXERR_CRC)) + return false; + } else { + if (rx_stats->rs_status & + ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) { + return false; + } + } + } + return true; +} + +static u8 ath9k_process_rate(struct ath_common *common, + struct ieee80211_hw *hw, + struct ath_rx_status *rx_stats, + struct ieee80211_rx_status *rxs, + struct sk_buff *skb) +{ + struct ieee80211_supported_band *sband; + enum ieee80211_band band; + unsigned int i = 0; + + band = hw->conf.channel->band; + sband = hw->wiphy->bands[band]; + + if (rx_stats->rs_rate & 0x80) { + /* HT rate */ + rxs->flag |= RX_FLAG_HT; + if (rx_stats->rs_flags & ATH9K_RX_2040) + rxs->flag |= RX_FLAG_40MHZ; + if (rx_stats->rs_flags & ATH9K_RX_GI) + rxs->flag |= RX_FLAG_SHORT_GI; + return rx_stats->rs_rate & 0x7f; + } + + for (i = 0; i < sband->n_bitrates; i++) { + if (sband->bitrates[i].hw_value == rx_stats->rs_rate) + return i; + if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) { + rxs->flag |= RX_FLAG_SHORTPRE; + return i; + } + } + + /* No valid hardware bitrate found -- we should not get here */ + ath_print(common, ATH_DBG_XMIT, "unsupported hw bitrate detected " + "0x%02x using 1 Mbit\n", rx_stats->rs_rate); + if ((common->debug_mask & ATH_DBG_XMIT)) + print_hex_dump_bytes("", DUMP_PREFIX_NONE, skb->data, skb->len); + + return 0; +} + +static void ath9k_process_rssi(struct ath_common *common, + struct ieee80211_hw *hw, + struct sk_buff *skb, + struct ath_rx_status *rx_stats) +{ + struct ath_hw *ah = common->ah; + struct ieee80211_sta *sta; + struct ieee80211_hdr *hdr; + struct ath_node *an; + int last_rssi = ATH_RSSI_DUMMY_MARKER; + __le16 fc; + + hdr = (struct ieee80211_hdr *)skb->data; + fc = hdr->frame_control; + + rcu_read_lock(); + /* + * XXX: use ieee80211_find_sta! This requires quite a bit of work + * under the current ath9k virtual wiphy implementation as we have + * no way of tying a vif to wiphy. Typically vifs are attached to + * at least one sdata of a wiphy on mac80211 but with ath9k virtual + * wiphy you'd have to iterate over every wiphy and each sdata. + */ + sta = ieee80211_find_sta_by_hw(hw, hdr->addr2); + if (sta) { + an = (struct ath_node *) sta->drv_priv; + if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && + !rx_stats->rs_moreaggr) + ATH_RSSI_LPF(an->last_rssi, rx_stats->rs_rssi); + last_rssi = an->last_rssi; + } + rcu_read_unlock(); + + if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) + rx_stats->rs_rssi = ATH_EP_RND(last_rssi, + ATH_RSSI_EP_MULTIPLIER); + if (rx_stats->rs_rssi < 0) + rx_stats->rs_rssi = 0; + + /* Update Beacon RSSI, this is used by ANI. */ + if (ieee80211_is_beacon(fc)) + ah->stats.avgbrssi = rx_stats->rs_rssi; +} + +/* + * For Decrypt or Demic errors, we only mark packet status here and always push + * up the frame up to let mac80211 handle the actual error case, be it no + * decryption key or real decryption error. This let us keep statistics there. + */ +int ath9k_cmn_rx_skb_preprocess(struct ath_common *common, + struct ieee80211_hw *hw, + struct sk_buff *skb, + struct ath_rx_status *rx_stats, + struct ieee80211_rx_status *rx_status, + bool *decrypt_error) +{ + struct ath_hw *ah = common->ah; + + memset(rx_status, 0, sizeof(struct ieee80211_rx_status)); + if (!ath9k_rx_accept(common, skb, rx_status, rx_stats, decrypt_error)) + return -EINVAL; + + ath9k_process_rssi(common, hw, skb, rx_stats); + + rx_status->rate_idx = ath9k_process_rate(common, hw, + rx_stats, rx_status, skb); + rx_status->mactime = ath9k_hw_extend_tsf(ah, rx_stats->rs_tstamp); + rx_status->band = hw->conf.channel->band; + rx_status->freq = hw->conf.channel->center_freq; + rx_status->noise = common->ani.noise_floor; + rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi; + rx_status->antenna = rx_stats->rs_antenna; + rx_status->flag |= RX_FLAG_TSFT; + + return 0; +} +EXPORT_SYMBOL(ath9k_cmn_rx_skb_preprocess); + +void ath9k_cmn_rx_skb_postprocess(struct ath_common *common, + struct sk_buff *skb, + struct ath_rx_status *rx_stats, + struct ieee80211_rx_status *rxs, + bool decrypt_error) +{ + struct ath_hw *ah = common->ah; + struct ieee80211_hdr *hdr; + int hdrlen, padpos, padsize; + u8 keyix; + __le16 fc; + + /* see if any padding is done by the hw and remove it */ + hdr = (struct ieee80211_hdr *) skb->data; + hdrlen = ieee80211_get_hdrlen_from_skb(skb); + fc = hdr->frame_control; + padpos = ath9k_cmn_padpos(hdr->frame_control); + + /* The MAC header is padded to have 32-bit boundary if the + * packet payload is non-zero. The general calculation for + * padsize would take into account odd header lengths: + * padsize = (4 - padpos % 4) % 4; However, since only + * even-length headers are used, padding can only be 0 or 2 + * bytes and we can optimize this a bit. In addition, we must + * not try to remove padding from short control frames that do + * not have payload. */ + padsize = padpos & 3; + if (padsize && skb->len>=padpos+padsize+FCS_LEN) { + memmove(skb->data + padsize, skb->data, padpos); + skb_pull(skb, padsize); + } + + keyix = rx_stats->rs_keyix; + + if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error) { + rxs->flag |= RX_FLAG_DECRYPTED; + } else if (ieee80211_has_protected(fc) + && !decrypt_error && skb->len >= hdrlen + 4) { + keyix = skb->data[hdrlen + 3] >> 6; + + if (test_bit(keyix, common->keymap)) + rxs->flag |= RX_FLAG_DECRYPTED; + } + if (ah->sw_mgmt_crypto && + (rxs->flag & RX_FLAG_DECRYPTED) && + ieee80211_is_mgmt(fc)) + /* Use software decrypt for management frames. */ + rxs->flag &= ~RX_FLAG_DECRYPTED; +} +EXPORT_SYMBOL(ath9k_cmn_rx_skb_postprocess); + +int ath9k_cmn_padpos(__le16 frame_control) +{ + int padpos = 24; + if (ieee80211_has_a4(frame_control)) { + padpos += ETH_ALEN; + } + if (ieee80211_is_data_qos(frame_control)) { + padpos += IEEE80211_QOS_CTL_LEN; + } + + return padpos; +} +EXPORT_SYMBOL(ath9k_cmn_padpos); + +static int __init ath9k_cmn_init(void) +{ + return 0; +} +module_init(ath9k_cmn_init); + +static void __exit ath9k_cmn_exit(void) +{ + return; +} +module_exit(ath9k_cmn_exit); diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h new file mode 100644 index 0000000..042999c --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/common.h @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include + +#include "../ath.h" +#include "../debug.h" + +#include "hw.h" + +/* Common header for Atheros 802.11n base driver cores */ + +#define WME_NUM_TID 16 +#define WME_BA_BMP_SIZE 64 +#define WME_MAX_BA WME_BA_BMP_SIZE +#define ATH_TID_MAX_BUFS (2 * WME_MAX_BA) + +#define WME_AC_BE 0 +#define WME_AC_BK 1 +#define WME_AC_VI 2 +#define WME_AC_VO 3 +#define WME_NUM_AC 4 + +#define ATH_RSSI_DUMMY_MARKER 0x127 +#define ATH_RSSI_LPF_LEN 10 +#define RSSI_LPF_THRESHOLD -20 +#define ATH_RSSI_EP_MULTIPLIER (1<<7) +#define ATH_EP_MUL(x, mul) ((x) * (mul)) +#define ATH_RSSI_IN(x) (ATH_EP_MUL((x), ATH_RSSI_EP_MULTIPLIER)) +#define ATH_LPF_RSSI(x, y, len) \ + ((x != ATH_RSSI_DUMMY_MARKER) ? (((x) * ((len) - 1) + (y)) / (len)) : (y)) +#define ATH_RSSI_LPF(x, y) do { \ + if ((y) >= RSSI_LPF_THRESHOLD) \ + x = ATH_LPF_RSSI((x), ATH_RSSI_IN((y)), ATH_RSSI_LPF_LEN); \ +} while (0) +#define ATH_EP_RND(x, mul) \ + ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul)) + +struct ath_atx_ac { + int sched; + int qnum; + struct list_head list; + struct list_head tid_q; +}; + +struct ath_buf_state { + int bfs_nframes; + u16 bfs_al; + u16 bfs_frmlen; + int bfs_seqno; + int bfs_tidno; + int bfs_retries; + u8 bf_type; + u32 bfs_keyix; + enum ath9k_key_type bfs_keytype; +}; + +struct ath_buf { + struct list_head list; + struct ath_buf *bf_lastbf; /* last buf of this unit (a frame or + an aggregate) */ + struct ath_buf *bf_next; /* next subframe in the aggregate */ + struct sk_buff *bf_mpdu; /* enclosing frame structure */ + struct ath_desc *bf_desc; /* virtual addr of desc */ + dma_addr_t bf_daddr; /* physical addr of desc */ + dma_addr_t bf_buf_addr; /* physical addr of data buffer */ + bool bf_stale; + bool bf_isnullfunc; + u16 bf_flags; + struct ath_buf_state bf_state; + dma_addr_t bf_dmacontext; + struct ath_wiphy *aphy; +}; + +struct ath_atx_tid { + struct list_head list; + struct list_head buf_q; + struct ath_node *an; + struct ath_atx_ac *ac; + struct ath_buf *tx_buf[ATH_TID_MAX_BUFS]; + u16 seq_start; + u16 seq_next; + u16 baw_size; + int tidno; + int baw_head; /* first un-acked tx buffer */ + int baw_tail; /* next unused tx buffer slot */ + int sched; + int paused; + u8 state; +}; + +struct ath_node { + struct ath_common *common; + struct ath_atx_tid tid[WME_NUM_TID]; + struct ath_atx_ac ac[WME_NUM_AC]; + u16 maxampdu; + u8 mpdudensity; + int last_rssi; +}; + +int ath9k_cmn_rx_skb_preprocess(struct ath_common *common, + struct ieee80211_hw *hw, + struct sk_buff *skb, + struct ath_rx_status *rx_stats, + struct ieee80211_rx_status *rx_status, + bool *decrypt_error); + +void ath9k_cmn_rx_skb_postprocess(struct ath_common *common, + struct sk_buff *skb, + struct ath_rx_status *rx_stats, + struct ieee80211_rx_status *rxs, + bool decrypt_error); + +int ath9k_cmn_padpos(__le16 frame_control); diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c new file mode 100644 index 0000000..42d2a50 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -0,0 +1,795 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include + +#include "ath9k.h" + +#define REG_WRITE_D(_ah, _reg, _val) \ + ath9k_hw_common(_ah)->ops->write((_ah), (_val), (_reg)) +#define REG_READ_D(_ah, _reg) \ + ath9k_hw_common(_ah)->ops->read((_ah), (_reg)) + +static struct dentry *ath9k_debugfs_root; + +static int ath9k_debugfs_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +#ifdef CONFIG_ATH_DEBUG + +static ssize_t read_file_debug(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + char buf[32]; + unsigned int len; + + len = snprintf(buf, sizeof(buf), "0x%08x\n", common->debug_mask); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t write_file_debug(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + unsigned long mask; + char buf[32]; + ssize_t len; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EINVAL; + + buf[len] = '\0'; + if (strict_strtoul(buf, 0, &mask)) + return -EINVAL; + + common->debug_mask = mask; + return count; +} + +static const struct file_operations fops_debug = { + .read = read_file_debug, + .write = write_file_debug, + .open = ath9k_debugfs_open, + .owner = THIS_MODULE +}; + +#endif + +#define DMA_BUF_LEN 1024 + +static ssize_t read_file_dma(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + struct ath_hw *ah = sc->sc_ah; + char *buf; + int retval; + unsigned int len = 0; + u32 val[ATH9K_NUM_DMA_DEBUG_REGS]; + int i, qcuOffset = 0, dcuOffset = 0; + u32 *qcuBase = &val[0], *dcuBase = &val[4]; + + buf = kmalloc(DMA_BUF_LEN, GFP_KERNEL); + if (!buf) + return 0; + + ath9k_ps_wakeup(sc); + + REG_WRITE_D(ah, AR_MACMISC, + ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) | + (AR_MACMISC_MISC_OBS_BUS_1 << + AR_MACMISC_MISC_OBS_BUS_MSB_S))); + + len += snprintf(buf + len, DMA_BUF_LEN - len, + "Raw DMA Debug values:\n"); + + for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) { + if (i % 4 == 0) + len += snprintf(buf + len, DMA_BUF_LEN - len, "\n"); + + val[i] = REG_READ_D(ah, AR_DMADBG_0 + (i * sizeof(u32))); + len += snprintf(buf + len, DMA_BUF_LEN - len, "%d: %08x ", + i, val[i]); + } + + len += snprintf(buf + len, DMA_BUF_LEN - len, "\n\n"); + len += snprintf(buf + len, DMA_BUF_LEN - len, + "Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n"); + + for (i = 0; i < ATH9K_NUM_QUEUES; i++, qcuOffset += 4, dcuOffset += 5) { + if (i == 8) { + qcuOffset = 0; + qcuBase++; + } + + if (i == 6) { + dcuOffset = 0; + dcuBase++; + } + + len += snprintf(buf + len, DMA_BUF_LEN - len, + "%2d %2x %1x %2x %2x\n", + i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset, + (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3), + val[2] & (0x7 << (i * 3)) >> (i * 3), + (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset); + } + + len += snprintf(buf + len, DMA_BUF_LEN - len, "\n"); + + len += snprintf(buf + len, DMA_BUF_LEN - len, + "qcu_stitch state: %2x qcu_fetch state: %2x\n", + (val[3] & 0x003c0000) >> 18, (val[3] & 0x03c00000) >> 22); + len += snprintf(buf + len, DMA_BUF_LEN - len, + "qcu_complete state: %2x dcu_complete state: %2x\n", + (val[3] & 0x1c000000) >> 26, (val[6] & 0x3)); + len += snprintf(buf + len, DMA_BUF_LEN - len, + "dcu_arb state: %2x dcu_fp state: %2x\n", + (val[5] & 0x06000000) >> 25, (val[5] & 0x38000000) >> 27); + len += snprintf(buf + len, DMA_BUF_LEN - len, + "chan_idle_dur: %3d chan_idle_dur_valid: %1d\n", + (val[6] & 0x000003fc) >> 2, (val[6] & 0x00000400) >> 10); + len += snprintf(buf + len, DMA_BUF_LEN - len, + "txfifo_valid_0: %1d txfifo_valid_1: %1d\n", + (val[6] & 0x00000800) >> 11, (val[6] & 0x00001000) >> 12); + len += snprintf(buf + len, DMA_BUF_LEN - len, + "txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n", + (val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17); + + len += snprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x \n", + REG_READ_D(ah, AR_OBS_BUS_1)); + len += snprintf(buf + len, DMA_BUF_LEN - len, + "AR_CR: 0x%x \n", REG_READ_D(ah, AR_CR)); + + ath9k_ps_restore(sc); + + retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kfree(buf); + return retval; +} + +static const struct file_operations fops_dma = { + .read = read_file_dma, + .open = ath9k_debugfs_open, + .owner = THIS_MODULE +}; + + +void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status) +{ + if (status) + sc->debug.stats.istats.total++; + if (status & ATH9K_INT_RX) + sc->debug.stats.istats.rxok++; + if (status & ATH9K_INT_RXEOL) + sc->debug.stats.istats.rxeol++; + if (status & ATH9K_INT_RXORN) + sc->debug.stats.istats.rxorn++; + if (status & ATH9K_INT_TX) + sc->debug.stats.istats.txok++; + if (status & ATH9K_INT_TXURN) + sc->debug.stats.istats.txurn++; + if (status & ATH9K_INT_MIB) + sc->debug.stats.istats.mib++; + if (status & ATH9K_INT_RXPHY) + sc->debug.stats.istats.rxphyerr++; + if (status & ATH9K_INT_RXKCM) + sc->debug.stats.istats.rx_keycache_miss++; + if (status & ATH9K_INT_SWBA) + sc->debug.stats.istats.swba++; + if (status & ATH9K_INT_BMISS) + sc->debug.stats.istats.bmiss++; + if (status & ATH9K_INT_BNR) + sc->debug.stats.istats.bnr++; + if (status & ATH9K_INT_CST) + sc->debug.stats.istats.cst++; + if (status & ATH9K_INT_GTT) + sc->debug.stats.istats.gtt++; + if (status & ATH9K_INT_TIM) + sc->debug.stats.istats.tim++; + if (status & ATH9K_INT_CABEND) + sc->debug.stats.istats.cabend++; + if (status & ATH9K_INT_DTIMSYNC) + sc->debug.stats.istats.dtimsync++; + if (status & ATH9K_INT_DTIM) + sc->debug.stats.istats.dtim++; +} + +static ssize_t read_file_interrupt(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + char buf[512]; + unsigned int len = 0; + + len += snprintf(buf + len, sizeof(buf) - len, + "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok); + len += snprintf(buf + len, sizeof(buf) - len, + "%8s: %10u\n", "RXEOL", sc->debug.stats.istats.rxeol); + len += snprintf(buf + len, sizeof(buf) - len, + "%8s: %10u\n", "RXORN", sc->debug.stats.istats.rxorn); + len += snprintf(buf + len, sizeof(buf) - len, + "%8s: %10u\n", "TX", sc->debug.stats.istats.txok); + len += snprintf(buf + len, sizeof(buf) - len, + "%8s: %10u\n", "TXURN", sc->debug.stats.istats.txurn); + len += snprintf(buf + len, sizeof(buf) - len, + "%8s: %10u\n", "MIB", sc->debug.stats.istats.mib); + len += snprintf(buf + len, sizeof(buf) - len, + "%8s: %10u\n", "RXPHY", sc->debug.stats.istats.rxphyerr); + len += snprintf(buf + len, sizeof(buf) - len, + "%8s: %10u\n", "RXKCM", sc->debug.stats.istats.rx_keycache_miss); + len += snprintf(buf + len, sizeof(buf) - len, + "%8s: %10u\n", "SWBA", sc->debug.stats.istats.swba); + len += snprintf(buf + len, sizeof(buf) - len, + "%8s: %10u\n", "BMISS", sc->debug.stats.istats.bmiss); + len += snprintf(buf + len, sizeof(buf) - len, + "%8s: %10u\n", "BNR", sc->debug.stats.istats.bnr); + len += snprintf(buf + len, sizeof(buf) - len, + "%8s: %10u\n", "CST", sc->debug.stats.istats.cst); + len += snprintf(buf + len, sizeof(buf) - len, + "%8s: %10u\n", "GTT", sc->debug.stats.istats.gtt); + len += snprintf(buf + len, sizeof(buf) - len, + "%8s: %10u\n", "TIM", sc->debug.stats.istats.tim); + len += snprintf(buf + len, sizeof(buf) - len, + "%8s: %10u\n", "CABEND", sc->debug.stats.istats.cabend); + len += snprintf(buf + len, sizeof(buf) - len, + "%8s: %10u\n", "DTIMSYNC", sc->debug.stats.istats.dtimsync); + len += snprintf(buf + len, sizeof(buf) - len, + "%8s: %10u\n", "DTIM", sc->debug.stats.istats.dtim); + len += snprintf(buf + len, sizeof(buf) - len, + "%8s: %10u\n", "TOTAL", sc->debug.stats.istats.total); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_interrupt = { + .read = read_file_interrupt, + .open = ath9k_debugfs_open, + .owner = THIS_MODULE +}; + +void ath_debug_stat_rc(struct ath_softc *sc, int final_rate) +{ + struct ath_rc_stats *stats; + + stats = &sc->debug.stats.rcstats[final_rate]; + stats->success++; +} + +void ath_debug_stat_retries(struct ath_softc *sc, int rix, + int xretries, int retries, u8 per) +{ + struct ath_rc_stats *stats = &sc->debug.stats.rcstats[rix]; + + stats->xretries += xretries; + stats->retries += retries; + stats->per = per; +} + +static ssize_t read_file_rcstat(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + char *buf; + unsigned int len = 0, max; + int i = 0; + ssize_t retval; + + if (sc->cur_rate_table == NULL) + return 0; + + max = 80 + sc->cur_rate_table->rate_cnt * 1024; + buf = kmalloc(max + 1, GFP_KERNEL); + if (buf == NULL) + return 0; + buf[max] = 0; + + len += sprintf(buf, "%6s %6s %6s " + "%10s %10s %10s %10s\n", + "HT", "MCS", "Rate", + "Success", "Retries", "XRetries", "PER"); + + for (i = 0; i < sc->cur_rate_table->rate_cnt; i++) { + u32 ratekbps = sc->cur_rate_table->info[i].ratekbps; + struct ath_rc_stats *stats = &sc->debug.stats.rcstats[i]; + char mcs[5]; + char htmode[5]; + int used_mcs = 0, used_htmode = 0; + + if (WLAN_RC_PHY_HT(sc->cur_rate_table->info[i].phy)) { + used_mcs = snprintf(mcs, 5, "%d", + sc->cur_rate_table->info[i].ratecode); + + if (WLAN_RC_PHY_40(sc->cur_rate_table->info[i].phy)) + used_htmode = snprintf(htmode, 5, "HT40"); + else if (WLAN_RC_PHY_20(sc->cur_rate_table->info[i].phy)) + used_htmode = snprintf(htmode, 5, "HT20"); + else + used_htmode = snprintf(htmode, 5, "????"); + } + + mcs[used_mcs] = '\0'; + htmode[used_htmode] = '\0'; + + len += snprintf(buf + len, max - len, + "%6s %6s %3u.%d: " + "%10u %10u %10u %10u\n", + htmode, + mcs, + ratekbps / 1000, + (ratekbps % 1000) / 100, + stats->success, + stats->retries, + stats->xretries, + stats->per); + } + + retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kfree(buf); + return retval; +} + +static const struct file_operations fops_rcstat = { + .read = read_file_rcstat, + .open = ath9k_debugfs_open, + .owner = THIS_MODULE +}; + +static const char * ath_wiphy_state_str(enum ath_wiphy_state state) +{ + switch (state) { + case ATH_WIPHY_INACTIVE: + return "INACTIVE"; + case ATH_WIPHY_ACTIVE: + return "ACTIVE"; + case ATH_WIPHY_PAUSING: + return "PAUSING"; + case ATH_WIPHY_PAUSED: + return "PAUSED"; + case ATH_WIPHY_SCAN: + return "SCAN"; + } + return "?"; +} + +static ssize_t read_file_wiphy(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + char buf[512]; + unsigned int len = 0; + int i; + u8 addr[ETH_ALEN]; + + len += snprintf(buf + len, sizeof(buf) - len, + "primary: %s (%s chan=%d ht=%d)\n", + wiphy_name(sc->pri_wiphy->hw->wiphy), + ath_wiphy_state_str(sc->pri_wiphy->state), + sc->pri_wiphy->chan_idx, sc->pri_wiphy->chan_is_ht); + for (i = 0; i < sc->num_sec_wiphy; i++) { + struct ath_wiphy *aphy = sc->sec_wiphy[i]; + if (aphy == NULL) + continue; + len += snprintf(buf + len, sizeof(buf) - len, + "secondary: %s (%s chan=%d ht=%d)\n", + wiphy_name(aphy->hw->wiphy), + ath_wiphy_state_str(aphy->state), + aphy->chan_idx, aphy->chan_is_ht); + } + + put_unaligned_le32(REG_READ_D(sc->sc_ah, AR_STA_ID0), addr); + put_unaligned_le16(REG_READ_D(sc->sc_ah, AR_STA_ID1) & 0xffff, addr + 4); + len += snprintf(buf + len, sizeof(buf) - len, + "addr: %pM\n", addr); + put_unaligned_le32(REG_READ_D(sc->sc_ah, AR_BSSMSKL), addr); + put_unaligned_le16(REG_READ_D(sc->sc_ah, AR_BSSMSKU) & 0xffff, addr + 4); + len += snprintf(buf + len, sizeof(buf) - len, + "addrmask: %pM\n", addr); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static struct ath_wiphy * get_wiphy(struct ath_softc *sc, const char *name) +{ + int i; + if (strcmp(name, wiphy_name(sc->pri_wiphy->hw->wiphy)) == 0) + return sc->pri_wiphy; + for (i = 0; i < sc->num_sec_wiphy; i++) { + struct ath_wiphy *aphy = sc->sec_wiphy[i]; + if (aphy && strcmp(name, wiphy_name(aphy->hw->wiphy)) == 0) + return aphy; + } + return NULL; +} + +static int del_wiphy(struct ath_softc *sc, const char *name) +{ + struct ath_wiphy *aphy = get_wiphy(sc, name); + if (!aphy) + return -ENOENT; + return ath9k_wiphy_del(aphy); +} + +static int pause_wiphy(struct ath_softc *sc, const char *name) +{ + struct ath_wiphy *aphy = get_wiphy(sc, name); + if (!aphy) + return -ENOENT; + return ath9k_wiphy_pause(aphy); +} + +static int unpause_wiphy(struct ath_softc *sc, const char *name) +{ + struct ath_wiphy *aphy = get_wiphy(sc, name); + if (!aphy) + return -ENOENT; + return ath9k_wiphy_unpause(aphy); +} + +static int select_wiphy(struct ath_softc *sc, const char *name) +{ + struct ath_wiphy *aphy = get_wiphy(sc, name); + if (!aphy) + return -ENOENT; + return ath9k_wiphy_select(aphy); +} + +static int schedule_wiphy(struct ath_softc *sc, const char *msec) +{ + ath9k_wiphy_set_scheduler(sc, simple_strtoul(msec, NULL, 0)); + return 0; +} + +static ssize_t write_file_wiphy(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + char buf[50]; + size_t len; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + buf[len] = '\0'; + if (len > 0 && buf[len - 1] == '\n') + buf[len - 1] = '\0'; + + if (strncmp(buf, "add", 3) == 0) { + int res = ath9k_wiphy_add(sc); + if (res < 0) + return res; + } else if (strncmp(buf, "del=", 4) == 0) { + int res = del_wiphy(sc, buf + 4); + if (res < 0) + return res; + } else if (strncmp(buf, "pause=", 6) == 0) { + int res = pause_wiphy(sc, buf + 6); + if (res < 0) + return res; + } else if (strncmp(buf, "unpause=", 8) == 0) { + int res = unpause_wiphy(sc, buf + 8); + if (res < 0) + return res; + } else if (strncmp(buf, "select=", 7) == 0) { + int res = select_wiphy(sc, buf + 7); + if (res < 0) + return res; + } else if (strncmp(buf, "schedule=", 9) == 0) { + int res = schedule_wiphy(sc, buf + 9); + if (res < 0) + return res; + } else + return -EOPNOTSUPP; + + return count; +} + +static const struct file_operations fops_wiphy = { + .read = read_file_wiphy, + .write = write_file_wiphy, + .open = ath9k_debugfs_open, + .owner = THIS_MODULE +}; + +#define PR(str, elem) \ + do { \ + len += snprintf(buf + len, size - len, \ + "%s%13u%11u%10u%10u\n", str, \ + sc->debug.stats.txstats[sc->tx.hwq_map[ATH9K_WME_AC_BE]].elem, \ + sc->debug.stats.txstats[sc->tx.hwq_map[ATH9K_WME_AC_BK]].elem, \ + sc->debug.stats.txstats[sc->tx.hwq_map[ATH9K_WME_AC_VI]].elem, \ + sc->debug.stats.txstats[sc->tx.hwq_map[ATH9K_WME_AC_VO]].elem); \ +} while(0) + +static ssize_t read_file_xmit(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + char *buf; + unsigned int len = 0, size = 2048; + ssize_t retval = 0; + + buf = kzalloc(size, GFP_KERNEL); + if (buf == NULL) + return 0; + + len += sprintf(buf, "%30s %10s%10s%10s\n\n", "BE", "BK", "VI", "VO"); + + PR("MPDUs Queued: ", queued); + PR("MPDUs Completed: ", completed); + PR("Aggregates: ", a_aggr); + PR("AMPDUs Queued: ", a_queued); + PR("AMPDUs Completed:", a_completed); + PR("AMPDUs Retried: ", a_retries); + PR("AMPDUs XRetried: ", a_xretries); + PR("FIFO Underrun: ", fifo_underrun); + PR("TXOP Exceeded: ", xtxop); + PR("TXTIMER Expiry: ", timer_exp); + PR("DESC CFG Error: ", desc_cfg_err); + PR("DATA Underrun: ", data_underrun); + PR("DELIM Underrun: ", delim_underrun); + + retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kfree(buf); + + return retval; +} + +void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq, + struct ath_buf *bf) +{ + struct ath_desc *ds = bf->bf_desc; + + if (bf_isampdu(bf)) { + if (bf_isxretried(bf)) + TX_STAT_INC(txq->axq_qnum, a_xretries); + else + TX_STAT_INC(txq->axq_qnum, a_completed); + } else { + TX_STAT_INC(txq->axq_qnum, completed); + } + + if (ds->ds_txstat.ts_status & ATH9K_TXERR_FIFO) + TX_STAT_INC(txq->axq_qnum, fifo_underrun); + if (ds->ds_txstat.ts_status & ATH9K_TXERR_XTXOP) + TX_STAT_INC(txq->axq_qnum, xtxop); + if (ds->ds_txstat.ts_status & ATH9K_TXERR_TIMER_EXPIRED) + TX_STAT_INC(txq->axq_qnum, timer_exp); + if (ds->ds_txstat.ts_flags & ATH9K_TX_DESC_CFG_ERR) + TX_STAT_INC(txq->axq_qnum, desc_cfg_err); + if (ds->ds_txstat.ts_flags & ATH9K_TX_DATA_UNDERRUN) + TX_STAT_INC(txq->axq_qnum, data_underrun); + if (ds->ds_txstat.ts_flags & ATH9K_TX_DELIM_UNDERRUN) + TX_STAT_INC(txq->axq_qnum, delim_underrun); +} + +static const struct file_operations fops_xmit = { + .read = read_file_xmit, + .open = ath9k_debugfs_open, + .owner = THIS_MODULE +}; + +static ssize_t read_file_recv(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ +#define PHY_ERR(s, p) \ + len += snprintf(buf + len, size - len, "%18s : %10u\n", s, \ + sc->debug.stats.rxstats.phy_err_stats[p]); + + struct ath_softc *sc = file->private_data; + char *buf; + unsigned int len = 0, size = 1152; + ssize_t retval = 0; + + buf = kzalloc(size, GFP_KERNEL); + if (buf == NULL) + return 0; + + len += snprintf(buf + len, size - len, + "%18s : %10u\n", "CRC ERR", + sc->debug.stats.rxstats.crc_err); + len += snprintf(buf + len, size - len, + "%18s : %10u\n", "DECRYPT CRC ERR", + sc->debug.stats.rxstats.decrypt_crc_err); + len += snprintf(buf + len, size - len, + "%18s : %10u\n", "PHY ERR", + sc->debug.stats.rxstats.phy_err); + len += snprintf(buf + len, size - len, + "%18s : %10u\n", "MIC ERR", + sc->debug.stats.rxstats.mic_err); + len += snprintf(buf + len, size - len, + "%18s : %10u\n", "PRE-DELIM CRC ERR", + sc->debug.stats.rxstats.pre_delim_crc_err); + len += snprintf(buf + len, size - len, + "%18s : %10u\n", "POST-DELIM CRC ERR", + sc->debug.stats.rxstats.post_delim_crc_err); + len += snprintf(buf + len, size - len, + "%18s : %10u\n", "DECRYPT BUSY ERR", + sc->debug.stats.rxstats.decrypt_busy_err); + + PHY_ERR("UNDERRUN", ATH9K_PHYERR_UNDERRUN); + PHY_ERR("TIMING", ATH9K_PHYERR_TIMING); + PHY_ERR("PARITY", ATH9K_PHYERR_PARITY); + PHY_ERR("RATE", ATH9K_PHYERR_RATE); + PHY_ERR("LENGTH", ATH9K_PHYERR_LENGTH); + PHY_ERR("RADAR", ATH9K_PHYERR_RADAR); + PHY_ERR("SERVICE", ATH9K_PHYERR_SERVICE); + PHY_ERR("TOR", ATH9K_PHYERR_TOR); + PHY_ERR("OFDM-TIMING", ATH9K_PHYERR_OFDM_TIMING); + PHY_ERR("OFDM-SIGNAL-PARITY", ATH9K_PHYERR_OFDM_SIGNAL_PARITY); + PHY_ERR("OFDM-RATE", ATH9K_PHYERR_OFDM_RATE_ILLEGAL); + PHY_ERR("OFDM-LENGTH", ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL); + PHY_ERR("OFDM-POWER-DROP", ATH9K_PHYERR_OFDM_POWER_DROP); + PHY_ERR("OFDM-SERVICE", ATH9K_PHYERR_OFDM_SERVICE); + PHY_ERR("OFDM-RESTART", ATH9K_PHYERR_OFDM_RESTART); + PHY_ERR("FALSE-RADAR-EXT", ATH9K_PHYERR_FALSE_RADAR_EXT); + PHY_ERR("CCK-TIMING", ATH9K_PHYERR_CCK_TIMING); + PHY_ERR("CCK-HEADER-CRC", ATH9K_PHYERR_CCK_HEADER_CRC); + PHY_ERR("CCK-RATE", ATH9K_PHYERR_CCK_RATE_ILLEGAL); + PHY_ERR("CCK-SERVICE", ATH9K_PHYERR_CCK_SERVICE); + PHY_ERR("CCK-RESTART", ATH9K_PHYERR_CCK_RESTART); + PHY_ERR("CCK-LENGTH", ATH9K_PHYERR_CCK_LENGTH_ILLEGAL); + PHY_ERR("CCK-POWER-DROP", ATH9K_PHYERR_CCK_POWER_DROP); + PHY_ERR("HT-CRC", ATH9K_PHYERR_HT_CRC_ERROR); + PHY_ERR("HT-LENGTH", ATH9K_PHYERR_HT_LENGTH_ILLEGAL); + PHY_ERR("HT-RATE", ATH9K_PHYERR_HT_RATE_ILLEGAL); + + retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kfree(buf); + + return retval; + +#undef PHY_ERR +} + +void ath_debug_stat_rx(struct ath_softc *sc, struct ath_buf *bf) +{ +#define RX_STAT_INC(c) sc->debug.stats.rxstats.c++ +#define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++ + + struct ath_desc *ds = bf->bf_desc; + u32 phyerr; + + if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC) + RX_STAT_INC(crc_err); + if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT) + RX_STAT_INC(decrypt_crc_err); + if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC) + RX_STAT_INC(mic_err); + if (ds->ds_rxstat.rs_status & ATH9K_RX_DELIM_CRC_PRE) + RX_STAT_INC(pre_delim_crc_err); + if (ds->ds_rxstat.rs_status & ATH9K_RX_DELIM_CRC_POST) + RX_STAT_INC(post_delim_crc_err); + if (ds->ds_rxstat.rs_status & ATH9K_RX_DECRYPT_BUSY) + RX_STAT_INC(decrypt_busy_err); + + if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY) { + RX_STAT_INC(phy_err); + phyerr = ds->ds_rxstat.rs_phyerr & 0x24; + RX_PHY_ERR_INC(phyerr); + } + +#undef RX_STAT_INC +#undef RX_PHY_ERR_INC +} + +static const struct file_operations fops_recv = { + .read = read_file_recv, + .open = ath9k_debugfs_open, + .owner = THIS_MODULE +}; + +int ath9k_init_debug(struct ath_hw *ah) +{ + struct ath_common *common = ath9k_hw_common(ah); + struct ath_softc *sc = (struct ath_softc *) common->priv; + + if (!ath9k_debugfs_root) + return -ENOENT; + + sc->debug.debugfs_phy = debugfs_create_dir(wiphy_name(sc->hw->wiphy), + ath9k_debugfs_root); + if (!sc->debug.debugfs_phy) + goto err; + +#ifdef CONFIG_ATH_DEBUG + sc->debug.debugfs_debug = debugfs_create_file("debug", + S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, sc, &fops_debug); + if (!sc->debug.debugfs_debug) + goto err; +#endif + + sc->debug.debugfs_dma = debugfs_create_file("dma", S_IRUSR, + sc->debug.debugfs_phy, sc, &fops_dma); + if (!sc->debug.debugfs_dma) + goto err; + + sc->debug.debugfs_interrupt = debugfs_create_file("interrupt", + S_IRUSR, + sc->debug.debugfs_phy, + sc, &fops_interrupt); + if (!sc->debug.debugfs_interrupt) + goto err; + + sc->debug.debugfs_rcstat = debugfs_create_file("rcstat", + S_IRUSR, + sc->debug.debugfs_phy, + sc, &fops_rcstat); + if (!sc->debug.debugfs_rcstat) + goto err; + + sc->debug.debugfs_wiphy = debugfs_create_file( + "wiphy", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, sc, + &fops_wiphy); + if (!sc->debug.debugfs_wiphy) + goto err; + + sc->debug.debugfs_xmit = debugfs_create_file("xmit", + S_IRUSR, + sc->debug.debugfs_phy, + sc, &fops_xmit); + if (!sc->debug.debugfs_xmit) + goto err; + + sc->debug.debugfs_recv = debugfs_create_file("recv", + S_IRUSR, + sc->debug.debugfs_phy, + sc, &fops_recv); + if (!sc->debug.debugfs_recv) + goto err; + + return 0; +err: + ath9k_exit_debug(ah); + return -ENOMEM; +} + +void ath9k_exit_debug(struct ath_hw *ah) +{ + struct ath_common *common = ath9k_hw_common(ah); + struct ath_softc *sc = (struct ath_softc *) common->priv; + + debugfs_remove(sc->debug.debugfs_recv); + debugfs_remove(sc->debug.debugfs_xmit); + debugfs_remove(sc->debug.debugfs_wiphy); + debugfs_remove(sc->debug.debugfs_rcstat); + debugfs_remove(sc->debug.debugfs_interrupt); + debugfs_remove(sc->debug.debugfs_dma); + debugfs_remove(sc->debug.debugfs_debug); + debugfs_remove(sc->debug.debugfs_phy); +} + +int ath9k_debug_create_root(void) +{ + ath9k_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); + if (!ath9k_debugfs_root) + return -ENOENT; + + return 0; +} + +void ath9k_debug_remove_root(void) +{ + debugfs_remove(ath9k_debugfs_root); + ath9k_debugfs_root = NULL; +} diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h new file mode 100644 index 0000000..86780e6 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/debug.h @@ -0,0 +1,223 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef DEBUG_H +#define DEBUG_H + +#include "hw.h" +#include "rc.h" + +struct ath_txq; +struct ath_buf; + +#ifdef CONFIG_ATH9K_DEBUGFS +#define TX_STAT_INC(q, c) sc->debug.stats.txstats[q].c++ +#else +#define TX_STAT_INC(q, c) do { } while (0) +#endif + +#ifdef CONFIG_ATH9K_DEBUGFS + +/** + * struct ath_interrupt_stats - Contains statistics about interrupts + * @total: Total no. of interrupts generated so far + * @rxok: RX with no errors + * @rxeol: RX with no more RXDESC available + * @rxorn: RX FIFO overrun + * @txok: TX completed at the requested rate + * @txurn: TX FIFO underrun + * @mib: MIB regs reaching its threshold + * @rxphyerr: RX with phy errors + * @rx_keycache_miss: RX with key cache misses + * @swba: Software Beacon Alert + * @bmiss: Beacon Miss + * @bnr: Beacon Not Ready + * @cst: Carrier Sense TImeout + * @gtt: Global TX Timeout + * @tim: RX beacon TIM occurrence + * @cabend: RX End of CAB traffic + * @dtimsync: DTIM sync lossage + * @dtim: RX Beacon with DTIM + */ +struct ath_interrupt_stats { + u32 total; + u32 rxok; + u32 rxeol; + u32 rxorn; + u32 txok; + u32 txeol; + u32 txurn; + u32 mib; + u32 rxphyerr; + u32 rx_keycache_miss; + u32 swba; + u32 bmiss; + u32 bnr; + u32 cst; + u32 gtt; + u32 tim; + u32 cabend; + u32 dtimsync; + u32 dtim; +}; + +struct ath_rc_stats { + u32 success; + u32 retries; + u32 xretries; + u8 per; +}; + +/** + * struct ath_tx_stats - Statistics about TX + * @queued: Total MPDUs (non-aggr) queued + * @completed: Total MPDUs (non-aggr) completed + * @a_aggr: Total no. of aggregates queued + * @a_queued: Total AMPDUs queued + * @a_completed: Total AMPDUs completed + * @a_retries: No. of AMPDUs retried (SW) + * @a_xretries: No. of AMPDUs dropped due to xretries + * @fifo_underrun: FIFO underrun occurrences + Valid only for: + - non-aggregate condition. + - first packet of aggregate. + * @xtxop: No. of frames filtered because of TXOP limit + * @timer_exp: Transmit timer expiry + * @desc_cfg_err: Descriptor configuration errors + * @data_urn: TX data underrun errors + * @delim_urn: TX delimiter underrun errors + */ +struct ath_tx_stats { + u32 queued; + u32 completed; + u32 a_aggr; + u32 a_queued; + u32 a_completed; + u32 a_retries; + u32 a_xretries; + u32 fifo_underrun; + u32 xtxop; + u32 timer_exp; + u32 desc_cfg_err; + u32 data_underrun; + u32 delim_underrun; +}; + +/** + * struct ath_rx_stats - RX Statistics + * @crc_err: No. of frames with incorrect CRC value + * @decrypt_crc_err: No. of frames whose CRC check failed after + decryption process completed + * @phy_err: No. of frames whose reception failed because the PHY + encountered an error + * @mic_err: No. of frames with incorrect TKIP MIC verification failure + * @pre_delim_crc_err: Pre-Frame delimiter CRC error detections + * @post_delim_crc_err: Post-Frame delimiter CRC error detections + * @decrypt_busy_err: Decryption interruptions counter + * @phy_err_stats: Individual PHY error statistics + */ +struct ath_rx_stats { + u32 crc_err; + u32 decrypt_crc_err; + u32 phy_err; + u32 mic_err; + u32 pre_delim_crc_err; + u32 post_delim_crc_err; + u32 decrypt_busy_err; + u32 phy_err_stats[ATH9K_PHYERR_MAX]; +}; + +struct ath_stats { + struct ath_interrupt_stats istats; + struct ath_rc_stats rcstats[RATE_TABLE_SIZE]; + struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES]; + struct ath_rx_stats rxstats; +}; + +struct ath9k_debug { + struct dentry *debugfs_phy; + struct dentry *debugfs_debug; + struct dentry *debugfs_dma; + struct dentry *debugfs_interrupt; + struct dentry *debugfs_rcstat; + struct dentry *debugfs_wiphy; + struct dentry *debugfs_xmit; + struct dentry *debugfs_recv; + struct ath_stats stats; +}; + +int ath9k_init_debug(struct ath_hw *ah); +void ath9k_exit_debug(struct ath_hw *ah); + +int ath9k_debug_create_root(void); +void ath9k_debug_remove_root(void); +void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status); +void ath_debug_stat_rc(struct ath_softc *sc, int final_rate); +void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq, + struct ath_buf *bf); +void ath_debug_stat_rx(struct ath_softc *sc, struct ath_buf *bf); +void ath_debug_stat_retries(struct ath_softc *sc, int rix, + int xretries, int retries, u8 per); + +#else + +static inline int ath9k_init_debug(struct ath_hw *ah) +{ + return 0; +} + +static inline void ath9k_exit_debug(struct ath_hw *ah) +{ +} + +static inline int ath9k_debug_create_root(void) +{ + return 0; +} + +static inline void ath9k_debug_remove_root(void) +{ +} + +static inline void ath_debug_stat_interrupt(struct ath_softc *sc, + enum ath9k_int status) +{ +} + +static inline void ath_debug_stat_rc(struct ath_softc *sc, + int final_rate) +{ +} + +static inline void ath_debug_stat_tx(struct ath_softc *sc, + struct ath_txq *txq, + struct ath_buf *bf) +{ +} + +static inline void ath_debug_stat_rx(struct ath_softc *sc, + struct ath_buf *bf) +{ +} + +static inline void ath_debug_stat_retries(struct ath_softc *sc, int rix, + int xretries, int retries, u8 per) +{ +} + +#endif /* CONFIG_ATH9K_DEBUGFS */ + +#endif /* DEBUG_H */ diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c new file mode 100644 index 0000000..dacaae9 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/eeprom.c @@ -0,0 +1,276 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "hw.h" + +static inline u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz) +{ + if (fbin == AR5416_BCHAN_UNUSED) + return fbin; + + return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin)); +} + +void ath9k_hw_analog_shift_rmw(struct ath_hw *ah, u32 reg, u32 mask, + u32 shift, u32 val) +{ + u32 regVal; + + regVal = REG_READ(ah, reg) & ~mask; + regVal |= (val << shift) & mask; + + REG_WRITE(ah, reg, regVal); + + if (ah->config.analog_shiftreg) + udelay(100); + + return; +} + +int16_t ath9k_hw_interpolate(u16 target, u16 srcLeft, u16 srcRight, + int16_t targetLeft, int16_t targetRight) +{ + int16_t rv; + + if (srcRight == srcLeft) { + rv = targetLeft; + } else { + rv = (int16_t) (((target - srcLeft) * targetRight + + (srcRight - target) * targetLeft) / + (srcRight - srcLeft)); + } + return rv; +} + +bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize, + u16 *indexL, u16 *indexR) +{ + u16 i; + + if (target <= pList[0]) { + *indexL = *indexR = 0; + return true; + } + if (target >= pList[listSize - 1]) { + *indexL = *indexR = (u16) (listSize - 1); + return true; + } + + for (i = 0; i < listSize - 1; i++) { + if (pList[i] == target) { + *indexL = *indexR = i; + return true; + } + if (target < pList[i + 1]) { + *indexL = i; + *indexR = (u16) (i + 1); + return false; + } + } + return false; +} + +bool ath9k_hw_nvram_read(struct ath_common *common, u32 off, u16 *data) +{ + return common->bus_ops->eeprom_read(common, off, data); +} + +void ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList, + u8 *pVpdList, u16 numIntercepts, + u8 *pRetVpdList) +{ + u16 i, k; + u8 currPwr = pwrMin; + u16 idxL = 0, idxR = 0; + + for (i = 0; i <= (pwrMax - pwrMin) / 2; i++) { + ath9k_hw_get_lower_upper_index(currPwr, pPwrList, + numIntercepts, &(idxL), + &(idxR)); + if (idxR < 1) + idxR = 1; + if (idxL == numIntercepts - 1) + idxL = (u16) (numIntercepts - 2); + if (pPwrList[idxL] == pPwrList[idxR]) + k = pVpdList[idxL]; + else + k = (u16)(((currPwr - pPwrList[idxL]) * pVpdList[idxR] + + (pPwrList[idxR] - currPwr) * pVpdList[idxL]) / + (pPwrList[idxR] - pPwrList[idxL])); + pRetVpdList[i] = (u8) k; + currPwr += 2; + } +} + +void ath9k_hw_get_legacy_target_powers(struct ath_hw *ah, + struct ath9k_channel *chan, + struct cal_target_power_leg *powInfo, + u16 numChannels, + struct cal_target_power_leg *pNewPower, + u16 numRates, bool isExtTarget) +{ + struct chan_centers centers; + u16 clo, chi; + int i; + int matchIndex = -1, lowIndex = -1; + u16 freq; + + ath9k_hw_get_channel_centers(ah, chan, ¢ers); + freq = (isExtTarget) ? centers.ext_center : centers.ctl_center; + + if (freq <= ath9k_hw_fbin2freq(powInfo[0].bChannel, + IS_CHAN_2GHZ(chan))) { + matchIndex = 0; + } else { + for (i = 0; (i < numChannels) && + (powInfo[i].bChannel != AR5416_BCHAN_UNUSED); i++) { + if (freq == ath9k_hw_fbin2freq(powInfo[i].bChannel, + IS_CHAN_2GHZ(chan))) { + matchIndex = i; + break; + } else if (freq < ath9k_hw_fbin2freq(powInfo[i].bChannel, + IS_CHAN_2GHZ(chan)) && i > 0 && + freq > ath9k_hw_fbin2freq(powInfo[i - 1].bChannel, + IS_CHAN_2GHZ(chan))) { + lowIndex = i - 1; + break; + } + } + if ((matchIndex == -1) && (lowIndex == -1)) + matchIndex = i - 1; + } + + if (matchIndex != -1) { + *pNewPower = powInfo[matchIndex]; + } else { + clo = ath9k_hw_fbin2freq(powInfo[lowIndex].bChannel, + IS_CHAN_2GHZ(chan)); + chi = ath9k_hw_fbin2freq(powInfo[lowIndex + 1].bChannel, + IS_CHAN_2GHZ(chan)); + + for (i = 0; i < numRates; i++) { + pNewPower->tPow2x[i] = + (u8)ath9k_hw_interpolate(freq, clo, chi, + powInfo[lowIndex].tPow2x[i], + powInfo[lowIndex + 1].tPow2x[i]); + } + } +} + +void ath9k_hw_get_target_powers(struct ath_hw *ah, + struct ath9k_channel *chan, + struct cal_target_power_ht *powInfo, + u16 numChannels, + struct cal_target_power_ht *pNewPower, + u16 numRates, bool isHt40Target) +{ + struct chan_centers centers; + u16 clo, chi; + int i; + int matchIndex = -1, lowIndex = -1; + u16 freq; + + ath9k_hw_get_channel_centers(ah, chan, ¢ers); + freq = isHt40Target ? centers.synth_center : centers.ctl_center; + + if (freq <= ath9k_hw_fbin2freq(powInfo[0].bChannel, IS_CHAN_2GHZ(chan))) { + matchIndex = 0; + } else { + for (i = 0; (i < numChannels) && + (powInfo[i].bChannel != AR5416_BCHAN_UNUSED); i++) { + if (freq == ath9k_hw_fbin2freq(powInfo[i].bChannel, + IS_CHAN_2GHZ(chan))) { + matchIndex = i; + break; + } else + if (freq < ath9k_hw_fbin2freq(powInfo[i].bChannel, + IS_CHAN_2GHZ(chan)) && i > 0 && + freq > ath9k_hw_fbin2freq(powInfo[i - 1].bChannel, + IS_CHAN_2GHZ(chan))) { + lowIndex = i - 1; + break; + } + } + if ((matchIndex == -1) && (lowIndex == -1)) + matchIndex = i - 1; + } + + if (matchIndex != -1) { + *pNewPower = powInfo[matchIndex]; + } else { + clo = ath9k_hw_fbin2freq(powInfo[lowIndex].bChannel, + IS_CHAN_2GHZ(chan)); + chi = ath9k_hw_fbin2freq(powInfo[lowIndex + 1].bChannel, + IS_CHAN_2GHZ(chan)); + + for (i = 0; i < numRates; i++) { + pNewPower->tPow2x[i] = (u8)ath9k_hw_interpolate(freq, + clo, chi, + powInfo[lowIndex].tPow2x[i], + powInfo[lowIndex + 1].tPow2x[i]); + } + } +} + +u16 ath9k_hw_get_max_edge_power(u16 freq, struct cal_ctl_edges *pRdEdgesPower, + bool is2GHz, int num_band_edges) +{ + u16 twiceMaxEdgePower = AR5416_MAX_RATE_POWER; + int i; + + for (i = 0; (i < num_band_edges) && + (pRdEdgesPower[i].bChannel != AR5416_BCHAN_UNUSED); i++) { + if (freq == ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel, is2GHz)) { + twiceMaxEdgePower = pRdEdgesPower[i].tPower; + break; + } else if ((i > 0) && + (freq < ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel, + is2GHz))) { + if (ath9k_hw_fbin2freq(pRdEdgesPower[i - 1].bChannel, + is2GHz) < freq && + pRdEdgesPower[i - 1].flag) { + twiceMaxEdgePower = + pRdEdgesPower[i - 1].tPower; + } + break; + } + } + + return twiceMaxEdgePower; +} + +int ath9k_hw_eeprom_init(struct ath_hw *ah) +{ + int status; + + if (AR_SREV_9287(ah)) { + ah->eep_map = EEP_MAP_AR9287; + ah->eep_ops = &eep_AR9287_ops; + } else if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) { + ah->eep_map = EEP_MAP_4KBITS; + ah->eep_ops = &eep_4k_ops; + } else { + ah->eep_map = EEP_MAP_DEFAULT; + ah->eep_ops = &eep_def_ops; + } + + if (!ah->eep_ops->fill_eeprom(ah)) + return -EIO; + + status = ah->eep_ops->check_eeprom(ah); + + return status; +} diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h new file mode 100644 index 0000000..2f2993b --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/eeprom.h @@ -0,0 +1,718 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef EEPROM_H +#define EEPROM_H + +#include "../ath.h" +#include + +#define AH_USE_EEPROM 0x1 + +#ifdef __BIG_ENDIAN +#define AR5416_EEPROM_MAGIC 0x5aa5 +#else +#define AR5416_EEPROM_MAGIC 0xa55a +#endif + +#define CTRY_DEBUG 0x1ff +#define CTRY_DEFAULT 0 + +#define AR_EEPROM_EEPCAP_COMPRESS_DIS 0x0001 +#define AR_EEPROM_EEPCAP_AES_DIS 0x0002 +#define AR_EEPROM_EEPCAP_FASTFRAME_DIS 0x0004 +#define AR_EEPROM_EEPCAP_BURST_DIS 0x0008 +#define AR_EEPROM_EEPCAP_MAXQCU 0x01F0 +#define AR_EEPROM_EEPCAP_MAXQCU_S 4 +#define AR_EEPROM_EEPCAP_HEAVY_CLIP_EN 0x0200 +#define AR_EEPROM_EEPCAP_KC_ENTRIES 0xF000 +#define AR_EEPROM_EEPCAP_KC_ENTRIES_S 12 + +#define AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND 0x0040 +#define AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN 0x0080 +#define AR_EEPROM_EEREGCAP_EN_KK_U2 0x0100 +#define AR_EEPROM_EEREGCAP_EN_KK_MIDBAND 0x0200 +#define AR_EEPROM_EEREGCAP_EN_KK_U1_ODD 0x0400 +#define AR_EEPROM_EEREGCAP_EN_KK_NEW_11A 0x0800 + +#define AR_EEPROM_EEREGCAP_EN_KK_U1_ODD_PRE4_0 0x4000 +#define AR_EEPROM_EEREGCAP_EN_KK_NEW_11A_PRE4_0 0x8000 + +#define AR5416_EEPROM_MAGIC_OFFSET 0x0 +#define AR5416_EEPROM_S 2 +#define AR5416_EEPROM_OFFSET 0x2000 +#define AR5416_EEPROM_MAX 0xae0 + +#define AR5416_EEPROM_START_ADDR \ + (AR_SREV_9100(ah)) ? 0x1fff1000 : 0x503f1200 + +#define SD_NO_CTL 0xE0 +#define NO_CTL 0xff +#define CTL_MODE_M 7 +#define CTL_11A 0 +#define CTL_11B 1 +#define CTL_11G 2 +#define CTL_2GHT20 5 +#define CTL_5GHT20 6 +#define CTL_2GHT40 7 +#define CTL_5GHT40 8 + +#define EXT_ADDITIVE (0x8000) +#define CTL_11A_EXT (CTL_11A | EXT_ADDITIVE) +#define CTL_11G_EXT (CTL_11G | EXT_ADDITIVE) +#define CTL_11B_EXT (CTL_11B | EXT_ADDITIVE) + +#define SUB_NUM_CTL_MODES_AT_5G_40 2 +#define SUB_NUM_CTL_MODES_AT_2G_40 3 + +#define INCREASE_MAXPOW_BY_TWO_CHAIN 6 /* 10*log10(2)*2 */ +#define INCREASE_MAXPOW_BY_THREE_CHAIN 10 /* 10*log10(3)*2 */ + +/* + * For AR9285 and later chipsets, the following bits are not being programmed + * in EEPROM and so need to be enabled always. + * + * Bit 0: en_fcc_mid + * Bit 1: en_jap_mid + * Bit 2: en_fcc_dfs_ht40 + * Bit 3: en_jap_ht40 + * Bit 4: en_jap_dfs_ht40 + */ +#define AR9285_RDEXT_DEFAULT 0x1F + +#define AR_EEPROM_MAC(i) (0x1d+(i)) +#define ATH9K_POW_SM(_r, _s) (((_r) & 0x3f) << (_s)) +#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5)) +#define ath9k_hw_use_flash(_ah) (!(_ah->ah_flags & AH_USE_EEPROM)) + +#define AR5416_VER_MASK (eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) +#define OLC_FOR_AR9280_20_LATER (AR_SREV_9280_20_OR_LATER(ah) && \ + ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)) +#define OLC_FOR_AR9287_10_LATER (AR_SREV_9287_10_OR_LATER(ah) && \ + ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)) + +#define AR_EEPROM_RFSILENT_GPIO_SEL 0x001c +#define AR_EEPROM_RFSILENT_GPIO_SEL_S 2 +#define AR_EEPROM_RFSILENT_POLARITY 0x0002 +#define AR_EEPROM_RFSILENT_POLARITY_S 1 + +#define EEP_RFSILENT_ENABLED 0x0001 +#define EEP_RFSILENT_ENABLED_S 0 +#define EEP_RFSILENT_POLARITY 0x0002 +#define EEP_RFSILENT_POLARITY_S 1 +#define EEP_RFSILENT_GPIO_SEL 0x001c +#define EEP_RFSILENT_GPIO_SEL_S 2 + +#define AR5416_OPFLAGS_11A 0x01 +#define AR5416_OPFLAGS_11G 0x02 +#define AR5416_OPFLAGS_N_5G_HT40 0x04 +#define AR5416_OPFLAGS_N_2G_HT40 0x08 +#define AR5416_OPFLAGS_N_5G_HT20 0x10 +#define AR5416_OPFLAGS_N_2G_HT20 0x20 + +#define AR5416_EEP_NO_BACK_VER 0x1 +#define AR5416_EEP_VER 0xE +#define AR5416_EEP_VER_MINOR_MASK 0x0FFF +#define AR5416_EEP_MINOR_VER_2 0x2 +#define AR5416_EEP_MINOR_VER_3 0x3 +#define AR5416_EEP_MINOR_VER_7 0x7 +#define AR5416_EEP_MINOR_VER_9 0x9 +#define AR5416_EEP_MINOR_VER_16 0x10 +#define AR5416_EEP_MINOR_VER_17 0x11 +#define AR5416_EEP_MINOR_VER_19 0x13 +#define AR5416_EEP_MINOR_VER_20 0x14 +#define AR5416_EEP_MINOR_VER_21 0x15 +#define AR5416_EEP_MINOR_VER_22 0x16 + +#define AR5416_NUM_5G_CAL_PIERS 8 +#define AR5416_NUM_2G_CAL_PIERS 4 +#define AR5416_NUM_5G_20_TARGET_POWERS 8 +#define AR5416_NUM_5G_40_TARGET_POWERS 8 +#define AR5416_NUM_2G_CCK_TARGET_POWERS 3 +#define AR5416_NUM_2G_20_TARGET_POWERS 4 +#define AR5416_NUM_2G_40_TARGET_POWERS 4 +#define AR5416_NUM_CTLS 24 +#define AR5416_NUM_BAND_EDGES 8 +#define AR5416_NUM_PD_GAINS 4 +#define AR5416_PD_GAINS_IN_MASK 4 +#define AR5416_PD_GAIN_ICEPTS 5 +#define AR5416_EEPROM_MODAL_SPURS 5 +#define AR5416_MAX_RATE_POWER 63 +#define AR5416_NUM_PDADC_VALUES 128 +#define AR5416_BCHAN_UNUSED 0xFF +#define AR5416_MAX_PWR_RANGE_IN_HALF_DB 64 +#define AR5416_MAX_CHAINS 3 +#define AR5416_PWR_TABLE_OFFSET_DB -5 + +/* Rx gain type values */ +#define AR5416_EEP_RXGAIN_23DB_BACKOFF 0 +#define AR5416_EEP_RXGAIN_13DB_BACKOFF 1 +#define AR5416_EEP_RXGAIN_ORIG 2 + +/* Tx gain type values */ +#define AR5416_EEP_TXGAIN_ORIGINAL 0 +#define AR5416_EEP_TXGAIN_HIGH_POWER 1 + +#define AR5416_EEP4K_START_LOC 64 +#define AR5416_EEP4K_NUM_2G_CAL_PIERS 3 +#define AR5416_EEP4K_NUM_2G_CCK_TARGET_POWERS 3 +#define AR5416_EEP4K_NUM_2G_20_TARGET_POWERS 3 +#define AR5416_EEP4K_NUM_2G_40_TARGET_POWERS 3 +#define AR5416_EEP4K_NUM_CTLS 12 +#define AR5416_EEP4K_NUM_BAND_EDGES 4 +#define AR5416_EEP4K_NUM_PD_GAINS 2 +#define AR5416_EEP4K_PD_GAINS_IN_MASK 4 +#define AR5416_EEP4K_PD_GAIN_ICEPTS 5 +#define AR5416_EEP4K_MAX_CHAINS 1 + +#define AR9280_TX_GAIN_TABLE_SIZE 22 + +#define AR9287_EEP_VER 0xE +#define AR9287_EEP_VER_MINOR_MASK 0xFFF +#define AR9287_EEP_MINOR_VER_1 0x1 +#define AR9287_EEP_MINOR_VER_2 0x2 +#define AR9287_EEP_MINOR_VER_3 0x3 +#define AR9287_EEP_MINOR_VER AR9287_EEP_MINOR_VER_3 +#define AR9287_EEP_MINOR_VER_b AR9287_EEP_MINOR_VER +#define AR9287_EEP_NO_BACK_VER AR9287_EEP_MINOR_VER_1 + +#define AR9287_EEP_START_LOC 128 +#define AR9287_NUM_2G_CAL_PIERS 3 +#define AR9287_NUM_2G_CCK_TARGET_POWERS 3 +#define AR9287_NUM_2G_20_TARGET_POWERS 3 +#define AR9287_NUM_2G_40_TARGET_POWERS 3 +#define AR9287_NUM_CTLS 12 +#define AR9287_NUM_BAND_EDGES 4 +#define AR9287_NUM_PD_GAINS 4 +#define AR9287_PD_GAINS_IN_MASK 4 +#define AR9287_PD_GAIN_ICEPTS 1 +#define AR9287_EEPROM_MODAL_SPURS 5 +#define AR9287_MAX_RATE_POWER 63 +#define AR9287_NUM_PDADC_VALUES 128 +#define AR9287_NUM_RATES 16 +#define AR9287_BCHAN_UNUSED 0xFF +#define AR9287_MAX_PWR_RANGE_IN_HALF_DB 64 +#define AR9287_OPFLAGS_11A 0x01 +#define AR9287_OPFLAGS_11G 0x02 +#define AR9287_OPFLAGS_2G_HT40 0x08 +#define AR9287_OPFLAGS_2G_HT20 0x20 +#define AR9287_OPFLAGS_5G_HT40 0x04 +#define AR9287_OPFLAGS_5G_HT20 0x10 +#define AR9287_EEPMISC_BIG_ENDIAN 0x01 +#define AR9287_EEPMISC_WOW 0x02 +#define AR9287_MAX_CHAINS 2 +#define AR9287_ANT_16S 32 +#define AR9287_custdatasize 20 + +#define AR9287_NUM_ANT_CHAIN_FIELDS 6 +#define AR9287_NUM_ANT_COMMON_FIELDS 4 +#define AR9287_SIZE_ANT_CHAIN_FIELD 2 +#define AR9287_SIZE_ANT_COMMON_FIELD 4 +#define AR9287_ANT_CHAIN_MASK 0x3 +#define AR9287_ANT_COMMON_MASK 0xf +#define AR9287_CHAIN_0_IDX 0 +#define AR9287_CHAIN_1_IDX 1 +#define AR9287_DATA_SZ 32 + +#define AR9287_PWR_TABLE_OFFSET_DB -5 + +#define AR9287_CHECKSUM_LOCATION (AR9287_EEP_START_LOC + 1) + +enum eeprom_param { + EEP_NFTHRESH_5, + EEP_NFTHRESH_2, + EEP_MAC_MSW, + EEP_MAC_MID, + EEP_MAC_LSW, + EEP_REG_0, + EEP_REG_1, + EEP_OP_CAP, + EEP_OP_MODE, + EEP_RF_SILENT, + EEP_OB_5, + EEP_DB_5, + EEP_OB_2, + EEP_DB_2, + EEP_MINOR_REV, + EEP_TX_MASK, + EEP_RX_MASK, + EEP_RXGAIN_TYPE, + EEP_TXGAIN_TYPE, + EEP_OL_PWRCTRL, + EEP_RC_CHAIN_MASK, + EEP_DAC_HPWR_5G, + EEP_FRAC_N_5G, + EEP_DEV_TYPE, + EEP_TEMPSENSE_SLOPE, + EEP_TEMPSENSE_SLOPE_PAL_ON, + EEP_PWR_TABLE_OFFSET +}; + +enum ar5416_rates { + rate6mb, rate9mb, rate12mb, rate18mb, + rate24mb, rate36mb, rate48mb, rate54mb, + rate1l, rate2l, rate2s, rate5_5l, + rate5_5s, rate11l, rate11s, rateXr, + rateHt20_0, rateHt20_1, rateHt20_2, rateHt20_3, + rateHt20_4, rateHt20_5, rateHt20_6, rateHt20_7, + rateHt40_0, rateHt40_1, rateHt40_2, rateHt40_3, + rateHt40_4, rateHt40_5, rateHt40_6, rateHt40_7, + rateDupCck, rateDupOfdm, rateExtCck, rateExtOfdm, + Ar5416RateSize +}; + +enum ath9k_hal_freq_band { + ATH9K_HAL_FREQ_BAND_5GHZ = 0, + ATH9K_HAL_FREQ_BAND_2GHZ = 1 +}; + +struct base_eep_header { + u16 length; + u16 checksum; + u16 version; + u8 opCapFlags; + u8 eepMisc; + u16 regDmn[2]; + u8 macAddr[6]; + u8 rxMask; + u8 txMask; + u16 rfSilent; + u16 blueToothOptions; + u16 deviceCap; + u32 binBuildNumber; + u8 deviceType; + u8 pwdclkind; + u8 futureBase_1[2]; + u8 rxGainType; + u8 dacHiPwrMode_5G; + u8 openLoopPwrCntl; + u8 dacLpMode; + u8 txGainType; + u8 rcChainMask; + u8 desiredScaleCCK; + u8 pwr_table_offset; + u8 frac_n_5g; + u8 futureBase_3[21]; +} __packed; + +struct base_eep_header_4k { + u16 length; + u16 checksum; + u16 version; + u8 opCapFlags; + u8 eepMisc; + u16 regDmn[2]; + u8 macAddr[6]; + u8 rxMask; + u8 txMask; + u16 rfSilent; + u16 blueToothOptions; + u16 deviceCap; + u32 binBuildNumber; + u8 deviceType; + u8 txGainType; +} __packed; + + +struct spur_chan { + u16 spurChan; + u8 spurRangeLow; + u8 spurRangeHigh; +} __packed; + +struct modal_eep_header { + u32 antCtrlChain[AR5416_MAX_CHAINS]; + u32 antCtrlCommon; + u8 antennaGainCh[AR5416_MAX_CHAINS]; + u8 switchSettling; + u8 txRxAttenCh[AR5416_MAX_CHAINS]; + u8 rxTxMarginCh[AR5416_MAX_CHAINS]; + u8 adcDesiredSize; + u8 pgaDesiredSize; + u8 xlnaGainCh[AR5416_MAX_CHAINS]; + u8 txEndToXpaOff; + u8 txEndToRxOn; + u8 txFrameToXpaOn; + u8 thresh62; + u8 noiseFloorThreshCh[AR5416_MAX_CHAINS]; + u8 xpdGain; + u8 xpd; + u8 iqCalICh[AR5416_MAX_CHAINS]; + u8 iqCalQCh[AR5416_MAX_CHAINS]; + u8 pdGainOverlap; + u8 ob; + u8 db; + u8 xpaBiasLvl; + u8 pwrDecreaseFor2Chain; + u8 pwrDecreaseFor3Chain; + u8 txFrameToDataStart; + u8 txFrameToPaOn; + u8 ht40PowerIncForPdadc; + u8 bswAtten[AR5416_MAX_CHAINS]; + u8 bswMargin[AR5416_MAX_CHAINS]; + u8 swSettleHt40; + u8 xatten2Db[AR5416_MAX_CHAINS]; + u8 xatten2Margin[AR5416_MAX_CHAINS]; + u8 ob_ch1; + u8 db_ch1; + u8 useAnt1:1, + force_xpaon:1, + local_bias:1, + femBandSelectUsed:1, xlnabufin:1, xlnaisel:2, xlnabufmode:1; + u8 miscBits; + u16 xpaBiasLvlFreq[3]; + u8 futureModal[6]; + + struct spur_chan spurChans[AR5416_EEPROM_MODAL_SPURS]; +} __packed; + +struct calDataPerFreqOpLoop { + u8 pwrPdg[2][5]; + u8 vpdPdg[2][5]; + u8 pcdac[2][5]; + u8 empty[2][5]; +} __packed; + +struct modal_eep_4k_header { + u32 antCtrlChain[AR5416_EEP4K_MAX_CHAINS]; + u32 antCtrlCommon; + u8 antennaGainCh[AR5416_EEP4K_MAX_CHAINS]; + u8 switchSettling; + u8 txRxAttenCh[AR5416_EEP4K_MAX_CHAINS]; + u8 rxTxMarginCh[AR5416_EEP4K_MAX_CHAINS]; + u8 adcDesiredSize; + u8 pgaDesiredSize; + u8 xlnaGainCh[AR5416_EEP4K_MAX_CHAINS]; + u8 txEndToXpaOff; + u8 txEndToRxOn; + u8 txFrameToXpaOn; + u8 thresh62; + u8 noiseFloorThreshCh[AR5416_EEP4K_MAX_CHAINS]; + u8 xpdGain; + u8 xpd; + u8 iqCalICh[AR5416_EEP4K_MAX_CHAINS]; + u8 iqCalQCh[AR5416_EEP4K_MAX_CHAINS]; + u8 pdGainOverlap; +#ifdef __BIG_ENDIAN_BITFIELD + u8 ob_1:4, ob_0:4; + u8 db1_1:4, db1_0:4; +#else + u8 ob_0:4, ob_1:4; + u8 db1_0:4, db1_1:4; +#endif + u8 xpaBiasLvl; + u8 txFrameToDataStart; + u8 txFrameToPaOn; + u8 ht40PowerIncForPdadc; + u8 bswAtten[AR5416_EEP4K_MAX_CHAINS]; + u8 bswMargin[AR5416_EEP4K_MAX_CHAINS]; + u8 swSettleHt40; + u8 xatten2Db[AR5416_EEP4K_MAX_CHAINS]; + u8 xatten2Margin[AR5416_EEP4K_MAX_CHAINS]; +#ifdef __BIG_ENDIAN_BITFIELD + u8 db2_1:4, db2_0:4; +#else + u8 db2_0:4, db2_1:4; +#endif + u8 version; +#ifdef __BIG_ENDIAN_BITFIELD + u8 ob_3:4, ob_2:4; + u8 antdiv_ctl1:4, ob_4:4; + u8 db1_3:4, db1_2:4; + u8 antdiv_ctl2:4, db1_4:4; + u8 db2_2:4, db2_3:4; + u8 reserved:4, db2_4:4; +#else + u8 ob_2:4, ob_3:4; + u8 ob_4:4, antdiv_ctl1:4; + u8 db1_2:4, db1_3:4; + u8 db1_4:4, antdiv_ctl2:4; + u8 db2_2:4, db2_3:4; + u8 db2_4:4, reserved:4; +#endif + u8 futureModal[4]; + struct spur_chan spurChans[AR5416_EEPROM_MODAL_SPURS]; +} __packed; + +struct base_eep_ar9287_header { + u16 length; + u16 checksum; + u16 version; + u8 opCapFlags; + u8 eepMisc; + u16 regDmn[2]; + u8 macAddr[6]; + u8 rxMask; + u8 txMask; + u16 rfSilent; + u16 blueToothOptions; + u16 deviceCap; + u32 binBuildNumber; + u8 deviceType; + u8 openLoopPwrCntl; + int8_t pwrTableOffset; + int8_t tempSensSlope; + int8_t tempSensSlopePalOn; + u8 futureBase[29]; +} __packed; + +struct modal_eep_ar9287_header { + u32 antCtrlChain[AR9287_MAX_CHAINS]; + u32 antCtrlCommon; + int8_t antennaGainCh[AR9287_MAX_CHAINS]; + u8 switchSettling; + u8 txRxAttenCh[AR9287_MAX_CHAINS]; + u8 rxTxMarginCh[AR9287_MAX_CHAINS]; + int8_t adcDesiredSize; + u8 txEndToXpaOff; + u8 txEndToRxOn; + u8 txFrameToXpaOn; + u8 thresh62; + int8_t noiseFloorThreshCh[AR9287_MAX_CHAINS]; + u8 xpdGain; + u8 xpd; + int8_t iqCalICh[AR9287_MAX_CHAINS]; + int8_t iqCalQCh[AR9287_MAX_CHAINS]; + u8 pdGainOverlap; + u8 xpaBiasLvl; + u8 txFrameToDataStart; + u8 txFrameToPaOn; + u8 ht40PowerIncForPdadc; + u8 bswAtten[AR9287_MAX_CHAINS]; + u8 bswMargin[AR9287_MAX_CHAINS]; + u8 swSettleHt40; + u8 version; + u8 db1; + u8 db2; + u8 ob_cck; + u8 ob_psk; + u8 ob_qam; + u8 ob_pal_off; + u8 futureModal[30]; + struct spur_chan spurChans[AR9287_EEPROM_MODAL_SPURS]; +} __packed; + +struct cal_data_per_freq { + u8 pwrPdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS]; + u8 vpdPdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS]; +} __packed; + +struct cal_data_per_freq_4k { + u8 pwrPdg[AR5416_EEP4K_NUM_PD_GAINS][AR5416_EEP4K_PD_GAIN_ICEPTS]; + u8 vpdPdg[AR5416_EEP4K_NUM_PD_GAINS][AR5416_EEP4K_PD_GAIN_ICEPTS]; +} __packed; + +struct cal_target_power_leg { + u8 bChannel; + u8 tPow2x[4]; +} __packed; + +struct cal_target_power_ht { + u8 bChannel; + u8 tPow2x[8]; +} __packed; + + +#ifdef __BIG_ENDIAN_BITFIELD +struct cal_ctl_edges { + u8 bChannel; + u8 flag:2, tPower:6; +} __packed; +#else +struct cal_ctl_edges { + u8 bChannel; + u8 tPower:6, flag:2; +} __packed; +#endif + +struct cal_data_op_loop_ar9287 { + u8 pwrPdg[2][5]; + u8 vpdPdg[2][5]; + u8 pcdac[2][5]; + u8 empty[2][5]; +} __packed; + +struct cal_data_per_freq_ar9287 { + u8 pwrPdg[AR9287_NUM_PD_GAINS][AR9287_PD_GAIN_ICEPTS]; + u8 vpdPdg[AR9287_NUM_PD_GAINS][AR9287_PD_GAIN_ICEPTS]; +} __packed; + +union cal_data_per_freq_ar9287_u { + struct cal_data_op_loop_ar9287 calDataOpen; + struct cal_data_per_freq_ar9287 calDataClose; +} __packed; + +struct cal_ctl_data_ar9287 { + struct cal_ctl_edges + ctlEdges[AR9287_MAX_CHAINS][AR9287_NUM_BAND_EDGES]; +} __packed; + +struct cal_ctl_data { + struct cal_ctl_edges + ctlEdges[AR5416_MAX_CHAINS][AR5416_NUM_BAND_EDGES]; +} __packed; + +struct cal_ctl_data_4k { + struct cal_ctl_edges + ctlEdges[AR5416_EEP4K_MAX_CHAINS][AR5416_EEP4K_NUM_BAND_EDGES]; +} __packed; + +struct ar5416_eeprom_def { + struct base_eep_header baseEepHeader; + u8 custData[64]; + struct modal_eep_header modalHeader[2]; + u8 calFreqPier5G[AR5416_NUM_5G_CAL_PIERS]; + u8 calFreqPier2G[AR5416_NUM_2G_CAL_PIERS]; + struct cal_data_per_freq + calPierData5G[AR5416_MAX_CHAINS][AR5416_NUM_5G_CAL_PIERS]; + struct cal_data_per_freq + calPierData2G[AR5416_MAX_CHAINS][AR5416_NUM_2G_CAL_PIERS]; + struct cal_target_power_leg + calTargetPower5G[AR5416_NUM_5G_20_TARGET_POWERS]; + struct cal_target_power_ht + calTargetPower5GHT20[AR5416_NUM_5G_20_TARGET_POWERS]; + struct cal_target_power_ht + calTargetPower5GHT40[AR5416_NUM_5G_40_TARGET_POWERS]; + struct cal_target_power_leg + calTargetPowerCck[AR5416_NUM_2G_CCK_TARGET_POWERS]; + struct cal_target_power_leg + calTargetPower2G[AR5416_NUM_2G_20_TARGET_POWERS]; + struct cal_target_power_ht + calTargetPower2GHT20[AR5416_NUM_2G_20_TARGET_POWERS]; + struct cal_target_power_ht + calTargetPower2GHT40[AR5416_NUM_2G_40_TARGET_POWERS]; + u8 ctlIndex[AR5416_NUM_CTLS]; + struct cal_ctl_data ctlData[AR5416_NUM_CTLS]; + u8 padding; +} __packed; + +struct ar5416_eeprom_4k { + struct base_eep_header_4k baseEepHeader; + u8 custData[20]; + struct modal_eep_4k_header modalHeader; + u8 calFreqPier2G[AR5416_EEP4K_NUM_2G_CAL_PIERS]; + struct cal_data_per_freq_4k + calPierData2G[AR5416_EEP4K_MAX_CHAINS][AR5416_EEP4K_NUM_2G_CAL_PIERS]; + struct cal_target_power_leg + calTargetPowerCck[AR5416_EEP4K_NUM_2G_CCK_TARGET_POWERS]; + struct cal_target_power_leg + calTargetPower2G[AR5416_EEP4K_NUM_2G_20_TARGET_POWERS]; + struct cal_target_power_ht + calTargetPower2GHT20[AR5416_EEP4K_NUM_2G_20_TARGET_POWERS]; + struct cal_target_power_ht + calTargetPower2GHT40[AR5416_EEP4K_NUM_2G_40_TARGET_POWERS]; + u8 ctlIndex[AR5416_EEP4K_NUM_CTLS]; + struct cal_ctl_data_4k ctlData[AR5416_EEP4K_NUM_CTLS]; + u8 padding; +} __packed; + +struct ar9287_eeprom { + struct base_eep_ar9287_header baseEepHeader; + u8 custData[AR9287_DATA_SZ]; + struct modal_eep_ar9287_header modalHeader; + u8 calFreqPier2G[AR9287_NUM_2G_CAL_PIERS]; + union cal_data_per_freq_ar9287_u + calPierData2G[AR9287_MAX_CHAINS][AR9287_NUM_2G_CAL_PIERS]; + struct cal_target_power_leg + calTargetPowerCck[AR9287_NUM_2G_CCK_TARGET_POWERS]; + struct cal_target_power_leg + calTargetPower2G[AR9287_NUM_2G_20_TARGET_POWERS]; + struct cal_target_power_ht + calTargetPower2GHT20[AR9287_NUM_2G_20_TARGET_POWERS]; + struct cal_target_power_ht + calTargetPower2GHT40[AR9287_NUM_2G_40_TARGET_POWERS]; + u8 ctlIndex[AR9287_NUM_CTLS]; + struct cal_ctl_data_ar9287 ctlData[AR9287_NUM_CTLS]; + u8 padding; +} __packed; + +enum reg_ext_bitmap { + REG_EXT_FCC_MIDBAND = 0, + REG_EXT_JAPAN_MIDBAND = 1, + REG_EXT_FCC_DFS_HT40 = 2, + REG_EXT_JAPAN_NONDFS_HT40 = 3, + REG_EXT_JAPAN_DFS_HT40 = 4 +}; + +struct ath9k_country_entry { + u16 countryCode; + u16 regDmnEnum; + u16 regDmn5G; + u16 regDmn2G; + u8 isMultidomain; + u8 iso[3]; +}; + +enum ath9k_eep_map { + EEP_MAP_DEFAULT = 0x0, + EEP_MAP_4KBITS, + EEP_MAP_AR9287, + EEP_MAP_MAX +}; + +struct eeprom_ops { + int (*check_eeprom)(struct ath_hw *hw); + u32 (*get_eeprom)(struct ath_hw *hw, enum eeprom_param param); + bool (*fill_eeprom)(struct ath_hw *hw); + int (*get_eeprom_ver)(struct ath_hw *hw); + int (*get_eeprom_rev)(struct ath_hw *hw); + u8 (*get_num_ant_config)(struct ath_hw *hw, enum ieee80211_band band); + u16 (*get_eeprom_antenna_cfg)(struct ath_hw *hw, + struct ath9k_channel *chan); + void (*set_board_values)(struct ath_hw *hw, struct ath9k_channel *chan); + void (*set_addac)(struct ath_hw *hw, struct ath9k_channel *chan); + void (*set_txpower)(struct ath_hw *hw, struct ath9k_channel *chan, + u16 cfgCtl, u8 twiceAntennaReduction, + u8 twiceMaxRegulatoryPower, u8 powerLimit); + u16 (*get_spur_channel)(struct ath_hw *ah, u16 i, bool is2GHz); +}; + +void ath9k_hw_analog_shift_rmw(struct ath_hw *ah, u32 reg, u32 mask, + u32 shift, u32 val); +int16_t ath9k_hw_interpolate(u16 target, u16 srcLeft, u16 srcRight, + int16_t targetLeft, + int16_t targetRight); +bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize, + u16 *indexL, u16 *indexR); +bool ath9k_hw_nvram_read(struct ath_common *common, u32 off, u16 *data); +void ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList, + u8 *pVpdList, u16 numIntercepts, + u8 *pRetVpdList); +void ath9k_hw_get_legacy_target_powers(struct ath_hw *ah, + struct ath9k_channel *chan, + struct cal_target_power_leg *powInfo, + u16 numChannels, + struct cal_target_power_leg *pNewPower, + u16 numRates, bool isExtTarget); +void ath9k_hw_get_target_powers(struct ath_hw *ah, + struct ath9k_channel *chan, + struct cal_target_power_ht *powInfo, + u16 numChannels, + struct cal_target_power_ht *pNewPower, + u16 numRates, bool isHt40Target); +u16 ath9k_hw_get_max_edge_power(u16 freq, struct cal_ctl_edges *pRdEdgesPower, + bool is2GHz, int num_band_edges); +int ath9k_hw_eeprom_init(struct ath_hw *ah); + +#define ar5416_get_ntxchains(_txchainmask) \ + (((_txchainmask >> 2) & 1) + \ + ((_txchainmask >> 1) & 1) + (_txchainmask & 1)) + +extern const struct eeprom_ops eep_def_ops; +extern const struct eeprom_ops eep_4k_ops; +extern const struct eeprom_ops eep_AR9287_ops; + +#endif /* EEPROM_H */ diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c new file mode 100644 index 0000000..68db166 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c @@ -0,0 +1,1198 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "hw.h" + +static int ath9k_hw_4k_get_eeprom_ver(struct ath_hw *ah) +{ + return ((ah->eeprom.map4k.baseEepHeader.version >> 12) & 0xF); +} + +static int ath9k_hw_4k_get_eeprom_rev(struct ath_hw *ah) +{ + return ((ah->eeprom.map4k.baseEepHeader.version) & 0xFFF); +} + +static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah) +{ +#define SIZE_EEPROM_4K (sizeof(struct ar5416_eeprom_4k) / sizeof(u16)) + struct ath_common *common = ath9k_hw_common(ah); + u16 *eep_data = (u16 *)&ah->eeprom.map4k; + int addr, eep_start_loc = 0; + + eep_start_loc = 64; + + if (!ath9k_hw_use_flash(ah)) { + ath_print(common, ATH_DBG_EEPROM, + "Reading from EEPROM, not flash\n"); + } + + for (addr = 0; addr < SIZE_EEPROM_4K; addr++) { + if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) { + ath_print(common, ATH_DBG_EEPROM, + "Unable to read eeprom region \n"); + return false; + } + eep_data++; + } + + return true; +#undef SIZE_EEPROM_4K +} + +static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah) +{ +#define EEPROM_4K_SIZE (sizeof(struct ar5416_eeprom_4k) / sizeof(u16)) + struct ath_common *common = ath9k_hw_common(ah); + struct ar5416_eeprom_4k *eep = + (struct ar5416_eeprom_4k *) &ah->eeprom.map4k; + u16 *eepdata, temp, magic, magic2; + u32 sum = 0, el; + bool need_swap = false; + int i, addr; + + + if (!ath9k_hw_use_flash(ah)) { + if (!ath9k_hw_nvram_read(common, AR5416_EEPROM_MAGIC_OFFSET, + &magic)) { + ath_print(common, ATH_DBG_FATAL, + "Reading Magic # failed\n"); + return false; + } + + ath_print(common, ATH_DBG_EEPROM, + "Read Magic = 0x%04X\n", magic); + + if (magic != AR5416_EEPROM_MAGIC) { + magic2 = swab16(magic); + + if (magic2 == AR5416_EEPROM_MAGIC) { + need_swap = true; + eepdata = (u16 *) (&ah->eeprom); + + for (addr = 0; addr < EEPROM_4K_SIZE; addr++) { + temp = swab16(*eepdata); + *eepdata = temp; + eepdata++; + } + } else { + ath_print(common, ATH_DBG_FATAL, + "Invalid EEPROM Magic. " + "endianness mismatch.\n"); + return -EINVAL; + } + } + } + + ath_print(common, ATH_DBG_EEPROM, "need_swap = %s.\n", + need_swap ? "True" : "False"); + + if (need_swap) + el = swab16(ah->eeprom.map4k.baseEepHeader.length); + else + el = ah->eeprom.map4k.baseEepHeader.length; + + if (el > sizeof(struct ar5416_eeprom_4k)) + el = sizeof(struct ar5416_eeprom_4k) / sizeof(u16); + else + el = el / sizeof(u16); + + eepdata = (u16 *)(&ah->eeprom); + + for (i = 0; i < el; i++) + sum ^= *eepdata++; + + if (need_swap) { + u32 integer; + u16 word; + + ath_print(common, ATH_DBG_EEPROM, + "EEPROM Endianness is not native.. Changing\n"); + + word = swab16(eep->baseEepHeader.length); + eep->baseEepHeader.length = word; + + word = swab16(eep->baseEepHeader.checksum); + eep->baseEepHeader.checksum = word; + + word = swab16(eep->baseEepHeader.version); + eep->baseEepHeader.version = word; + + word = swab16(eep->baseEepHeader.regDmn[0]); + eep->baseEepHeader.regDmn[0] = word; + + word = swab16(eep->baseEepHeader.regDmn[1]); + eep->baseEepHeader.regDmn[1] = word; + + word = swab16(eep->baseEepHeader.rfSilent); + eep->baseEepHeader.rfSilent = word; + + word = swab16(eep->baseEepHeader.blueToothOptions); + eep->baseEepHeader.blueToothOptions = word; + + word = swab16(eep->baseEepHeader.deviceCap); + eep->baseEepHeader.deviceCap = word; + + integer = swab32(eep->modalHeader.antCtrlCommon); + eep->modalHeader.antCtrlCommon = integer; + + for (i = 0; i < AR5416_EEP4K_MAX_CHAINS; i++) { + integer = swab32(eep->modalHeader.antCtrlChain[i]); + eep->modalHeader.antCtrlChain[i] = integer; + } + + for (i = 0; i < AR5416_EEPROM_MODAL_SPURS; i++) { + word = swab16(eep->modalHeader.spurChans[i].spurChan); + eep->modalHeader.spurChans[i].spurChan = word; + } + } + + if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER || + ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) { + ath_print(common, ATH_DBG_FATAL, + "Bad EEPROM checksum 0x%x or revision 0x%04x\n", + sum, ah->eep_ops->get_eeprom_ver(ah)); + return -EINVAL; + } + + return 0; +#undef EEPROM_4K_SIZE +} + +static u32 ath9k_hw_4k_get_eeprom(struct ath_hw *ah, + enum eeprom_param param) +{ + struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k; + struct modal_eep_4k_header *pModal = &eep->modalHeader; + struct base_eep_header_4k *pBase = &eep->baseEepHeader; + + switch (param) { + case EEP_NFTHRESH_2: + return pModal->noiseFloorThreshCh[0]; + case AR_EEPROM_MAC(0): + return pBase->macAddr[0] << 8 | pBase->macAddr[1]; + case AR_EEPROM_MAC(1): + return pBase->macAddr[2] << 8 | pBase->macAddr[3]; + case AR_EEPROM_MAC(2): + return pBase->macAddr[4] << 8 | pBase->macAddr[5]; + case EEP_REG_0: + return pBase->regDmn[0]; + case EEP_REG_1: + return pBase->regDmn[1]; + case EEP_OP_CAP: + return pBase->deviceCap; + case EEP_OP_MODE: + return pBase->opCapFlags; + case EEP_RF_SILENT: + return pBase->rfSilent; + case EEP_OB_2: + return pModal->ob_0; + case EEP_DB_2: + return pModal->db1_1; + case EEP_MINOR_REV: + return pBase->version & AR5416_EEP_VER_MINOR_MASK; + case EEP_TX_MASK: + return pBase->txMask; + case EEP_RX_MASK: + return pBase->rxMask; + case EEP_FRAC_N_5G: + return 0; + case EEP_PWR_TABLE_OFFSET: + return AR5416_PWR_TABLE_OFFSET_DB; + default: + return 0; + } +} + +static void ath9k_hw_get_4k_gain_boundaries_pdadcs(struct ath_hw *ah, + struct ath9k_channel *chan, + struct cal_data_per_freq_4k *pRawDataSet, + u8 *bChans, u16 availPiers, + u16 tPdGainOverlap, int16_t *pMinCalPower, + u16 *pPdGainBoundaries, u8 *pPDADCValues, + u16 numXpdGains) +{ +#define TMP_VAL_VPD_TABLE \ + ((vpdTableI[i][sizeCurrVpdTable - 1] + (ss - maxIndex + 1) * vpdStep)); + int i, j, k; + int16_t ss; + u16 idxL = 0, idxR = 0, numPiers; + static u8 vpdTableL[AR5416_EEP4K_NUM_PD_GAINS] + [AR5416_MAX_PWR_RANGE_IN_HALF_DB]; + static u8 vpdTableR[AR5416_EEP4K_NUM_PD_GAINS] + [AR5416_MAX_PWR_RANGE_IN_HALF_DB]; + static u8 vpdTableI[AR5416_EEP4K_NUM_PD_GAINS] + [AR5416_MAX_PWR_RANGE_IN_HALF_DB]; + + u8 *pVpdL, *pVpdR, *pPwrL, *pPwrR; + u8 minPwrT4[AR5416_EEP4K_NUM_PD_GAINS]; + u8 maxPwrT4[AR5416_EEP4K_NUM_PD_GAINS]; + int16_t vpdStep; + int16_t tmpVal; + u16 sizeCurrVpdTable, maxIndex, tgtIndex; + bool match; + int16_t minDelta = 0; + struct chan_centers centers; +#define PD_GAIN_BOUNDARY_DEFAULT 58; + + ath9k_hw_get_channel_centers(ah, chan, ¢ers); + + for (numPiers = 0; numPiers < availPiers; numPiers++) { + if (bChans[numPiers] == AR5416_BCHAN_UNUSED) + break; + } + + match = ath9k_hw_get_lower_upper_index( + (u8)FREQ2FBIN(centers.synth_center, + IS_CHAN_2GHZ(chan)), bChans, numPiers, + &idxL, &idxR); + + if (match) { + for (i = 0; i < numXpdGains; i++) { + minPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][0]; + maxPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][4]; + ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i], + pRawDataSet[idxL].pwrPdg[i], + pRawDataSet[idxL].vpdPdg[i], + AR5416_EEP4K_PD_GAIN_ICEPTS, + vpdTableI[i]); + } + } else { + for (i = 0; i < numXpdGains; i++) { + pVpdL = pRawDataSet[idxL].vpdPdg[i]; + pPwrL = pRawDataSet[idxL].pwrPdg[i]; + pVpdR = pRawDataSet[idxR].vpdPdg[i]; + pPwrR = pRawDataSet[idxR].pwrPdg[i]; + + minPwrT4[i] = max(pPwrL[0], pPwrR[0]); + + maxPwrT4[i] = + min(pPwrL[AR5416_EEP4K_PD_GAIN_ICEPTS - 1], + pPwrR[AR5416_EEP4K_PD_GAIN_ICEPTS - 1]); + + + ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i], + pPwrL, pVpdL, + AR5416_EEP4K_PD_GAIN_ICEPTS, + vpdTableL[i]); + ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i], + pPwrR, pVpdR, + AR5416_EEP4K_PD_GAIN_ICEPTS, + vpdTableR[i]); + + for (j = 0; j <= (maxPwrT4[i] - minPwrT4[i]) / 2; j++) { + vpdTableI[i][j] = + (u8)(ath9k_hw_interpolate((u16) + FREQ2FBIN(centers. + synth_center, + IS_CHAN_2GHZ + (chan)), + bChans[idxL], bChans[idxR], + vpdTableL[i][j], vpdTableR[i][j])); + } + } + } + + *pMinCalPower = (int16_t)(minPwrT4[0] / 2); + + k = 0; + + for (i = 0; i < numXpdGains; i++) { + if (i == (numXpdGains - 1)) + pPdGainBoundaries[i] = + (u16)(maxPwrT4[i] / 2); + else + pPdGainBoundaries[i] = + (u16)((maxPwrT4[i] + minPwrT4[i + 1]) / 4); + + pPdGainBoundaries[i] = + min((u16)AR5416_MAX_RATE_POWER, pPdGainBoundaries[i]); + + if ((i == 0) && !AR_SREV_5416_20_OR_LATER(ah)) { + minDelta = pPdGainBoundaries[0] - 23; + pPdGainBoundaries[0] = 23; + } else { + minDelta = 0; + } + + if (i == 0) { + if (AR_SREV_9280_10_OR_LATER(ah)) + ss = (int16_t)(0 - (minPwrT4[i] / 2)); + else + ss = 0; + } else { + ss = (int16_t)((pPdGainBoundaries[i - 1] - + (minPwrT4[i] / 2)) - + tPdGainOverlap + 1 + minDelta); + } + vpdStep = (int16_t)(vpdTableI[i][1] - vpdTableI[i][0]); + vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep); + + while ((ss < 0) && (k < (AR5416_NUM_PDADC_VALUES - 1))) { + tmpVal = (int16_t)(vpdTableI[i][0] + ss * vpdStep); + pPDADCValues[k++] = (u8)((tmpVal < 0) ? 0 : tmpVal); + ss++; + } + + sizeCurrVpdTable = (u8) ((maxPwrT4[i] - minPwrT4[i]) / 2 + 1); + tgtIndex = (u8)(pPdGainBoundaries[i] + tPdGainOverlap - + (minPwrT4[i] / 2)); + maxIndex = (tgtIndex < sizeCurrVpdTable) ? + tgtIndex : sizeCurrVpdTable; + + while ((ss < maxIndex) && (k < (AR5416_NUM_PDADC_VALUES - 1))) + pPDADCValues[k++] = vpdTableI[i][ss++]; + + vpdStep = (int16_t)(vpdTableI[i][sizeCurrVpdTable - 1] - + vpdTableI[i][sizeCurrVpdTable - 2]); + vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep); + + if (tgtIndex >= maxIndex) { + while ((ss <= tgtIndex) && + (k < (AR5416_NUM_PDADC_VALUES - 1))) { + tmpVal = (int16_t) TMP_VAL_VPD_TABLE; + pPDADCValues[k++] = (u8)((tmpVal > 255) ? + 255 : tmpVal); + ss++; + } + } + } + + while (i < AR5416_EEP4K_PD_GAINS_IN_MASK) { + pPdGainBoundaries[i] = PD_GAIN_BOUNDARY_DEFAULT; + i++; + } + + while (k < AR5416_NUM_PDADC_VALUES) { + pPDADCValues[k] = pPDADCValues[k - 1]; + k++; + } + + return; +#undef TMP_VAL_VPD_TABLE +} + +static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah, + struct ath9k_channel *chan, + int16_t *pTxPowerIndexOffset) +{ + struct ath_common *common = ath9k_hw_common(ah); + struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k; + struct cal_data_per_freq_4k *pRawDataset; + u8 *pCalBChans = NULL; + u16 pdGainOverlap_t2; + static u8 pdadcValues[AR5416_NUM_PDADC_VALUES]; + u16 gainBoundaries[AR5416_EEP4K_PD_GAINS_IN_MASK]; + u16 numPiers, i, j; + int16_t tMinCalPower; + u16 numXpdGain, xpdMask; + u16 xpdGainValues[AR5416_EEP4K_NUM_PD_GAINS] = { 0, 0 }; + u32 reg32, regOffset, regChainOffset; + + xpdMask = pEepData->modalHeader.xpdGain; + + if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= + AR5416_EEP_MINOR_VER_2) { + pdGainOverlap_t2 = + pEepData->modalHeader.pdGainOverlap; + } else { + pdGainOverlap_t2 = (u16)(MS(REG_READ(ah, AR_PHY_TPCRG5), + AR_PHY_TPCRG5_PD_GAIN_OVERLAP)); + } + + pCalBChans = pEepData->calFreqPier2G; + numPiers = AR5416_EEP4K_NUM_2G_CAL_PIERS; + + numXpdGain = 0; + + for (i = 1; i <= AR5416_EEP4K_PD_GAINS_IN_MASK; i++) { + if ((xpdMask >> (AR5416_EEP4K_PD_GAINS_IN_MASK - i)) & 1) { + if (numXpdGain >= AR5416_EEP4K_NUM_PD_GAINS) + break; + xpdGainValues[numXpdGain] = + (u16)(AR5416_EEP4K_PD_GAINS_IN_MASK - i); + numXpdGain++; + } + } + + REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_NUM_PD_GAIN, + (numXpdGain - 1) & 0x3); + REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_1, + xpdGainValues[0]); + REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_2, + xpdGainValues[1]); + REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_3, 0); + + for (i = 0; i < AR5416_EEP4K_MAX_CHAINS; i++) { + if (AR_SREV_5416_20_OR_LATER(ah) && + (ah->rxchainmask == 5 || ah->txchainmask == 5) && + (i != 0)) { + regChainOffset = (i == 1) ? 0x2000 : 0x1000; + } else + regChainOffset = i * 0x1000; + + if (pEepData->baseEepHeader.txMask & (1 << i)) { + pRawDataset = pEepData->calPierData2G[i]; + + ath9k_hw_get_4k_gain_boundaries_pdadcs(ah, chan, + pRawDataset, pCalBChans, + numPiers, pdGainOverlap_t2, + &tMinCalPower, gainBoundaries, + pdadcValues, numXpdGain); + + if ((i == 0) || AR_SREV_5416_20_OR_LATER(ah)) { + REG_WRITE(ah, AR_PHY_TPCRG5 + regChainOffset, + SM(pdGainOverlap_t2, + AR_PHY_TPCRG5_PD_GAIN_OVERLAP) + | SM(gainBoundaries[0], + AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1) + | SM(gainBoundaries[1], + AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2) + | SM(gainBoundaries[2], + AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3) + | SM(gainBoundaries[3], + AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4)); + } + + regOffset = AR_PHY_BASE + (672 << 2) + regChainOffset; + for (j = 0; j < 32; j++) { + reg32 = ((pdadcValues[4 * j + 0] & 0xFF) << 0) | + ((pdadcValues[4 * j + 1] & 0xFF) << 8) | + ((pdadcValues[4 * j + 2] & 0xFF) << 16)| + ((pdadcValues[4 * j + 3] & 0xFF) << 24); + REG_WRITE(ah, regOffset, reg32); + + ath_print(common, ATH_DBG_EEPROM, + "PDADC (%d,%4x): %4.4x %8.8x\n", + i, regChainOffset, regOffset, + reg32); + ath_print(common, ATH_DBG_EEPROM, + "PDADC: Chain %d | " + "PDADC %3d Value %3d | " + "PDADC %3d Value %3d | " + "PDADC %3d Value %3d | " + "PDADC %3d Value %3d |\n", + i, 4 * j, pdadcValues[4 * j], + 4 * j + 1, pdadcValues[4 * j + 1], + 4 * j + 2, pdadcValues[4 * j + 2], + 4 * j + 3, + pdadcValues[4 * j + 3]); + + regOffset += 4; + } + } + } + + *pTxPowerIndexOffset = 0; +} + +static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah, + struct ath9k_channel *chan, + int16_t *ratesArray, + u16 cfgCtl, + u16 AntennaReduction, + u16 twiceMaxRegulatoryPower, + u16 powerLimit) +{ +#define CMP_TEST_GRP \ + (((cfgCtl & ~CTL_MODE_M)| (pCtlMode[ctlMode] & CTL_MODE_M)) == \ + pEepData->ctlIndex[i]) \ + || (((cfgCtl & ~CTL_MODE_M) | (pCtlMode[ctlMode] & CTL_MODE_M)) == \ + ((pEepData->ctlIndex[i] & CTL_MODE_M) | SD_NO_CTL)) + + struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); + int i; + int16_t twiceLargestAntenna; + u16 twiceMinEdgePower; + u16 twiceMaxEdgePower = AR5416_MAX_RATE_POWER; + u16 scaledPower = 0, minCtlPower, maxRegAllowedPower; + u16 numCtlModes, *pCtlMode, ctlMode, freq; + struct chan_centers centers; + struct cal_ctl_data_4k *rep; + struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k; + static const u16 tpScaleReductionTable[5] = + { 0, 3, 6, 9, AR5416_MAX_RATE_POWER }; + struct cal_target_power_leg targetPowerOfdm, targetPowerCck = { + 0, { 0, 0, 0, 0} + }; + struct cal_target_power_leg targetPowerOfdmExt = { + 0, { 0, 0, 0, 0} }, targetPowerCckExt = { + 0, { 0, 0, 0, 0 } + }; + struct cal_target_power_ht targetPowerHt20, targetPowerHt40 = { + 0, {0, 0, 0, 0} + }; + u16 ctlModesFor11g[] = + { CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT, CTL_11G_EXT, + CTL_2GHT40 + }; + + ath9k_hw_get_channel_centers(ah, chan, ¢ers); + + twiceLargestAntenna = pEepData->modalHeader.antennaGainCh[0]; + twiceLargestAntenna = (int16_t)min(AntennaReduction - + twiceLargestAntenna, 0); + + maxRegAllowedPower = twiceMaxRegulatoryPower + twiceLargestAntenna; + if (regulatory->tp_scale != ATH9K_TP_SCALE_MAX) { + maxRegAllowedPower -= + (tpScaleReductionTable[(regulatory->tp_scale)] * 2); + } + + scaledPower = min(powerLimit, maxRegAllowedPower); + scaledPower = max((u16)0, scaledPower); + + numCtlModes = ARRAY_SIZE(ctlModesFor11g) - SUB_NUM_CTL_MODES_AT_2G_40; + pCtlMode = ctlModesFor11g; + + ath9k_hw_get_legacy_target_powers(ah, chan, + pEepData->calTargetPowerCck, + AR5416_NUM_2G_CCK_TARGET_POWERS, + &targetPowerCck, 4, false); + ath9k_hw_get_legacy_target_powers(ah, chan, + pEepData->calTargetPower2G, + AR5416_NUM_2G_20_TARGET_POWERS, + &targetPowerOfdm, 4, false); + ath9k_hw_get_target_powers(ah, chan, + pEepData->calTargetPower2GHT20, + AR5416_NUM_2G_20_TARGET_POWERS, + &targetPowerHt20, 8, false); + + if (IS_CHAN_HT40(chan)) { + numCtlModes = ARRAY_SIZE(ctlModesFor11g); + ath9k_hw_get_target_powers(ah, chan, + pEepData->calTargetPower2GHT40, + AR5416_NUM_2G_40_TARGET_POWERS, + &targetPowerHt40, 8, true); + ath9k_hw_get_legacy_target_powers(ah, chan, + pEepData->calTargetPowerCck, + AR5416_NUM_2G_CCK_TARGET_POWERS, + &targetPowerCckExt, 4, true); + ath9k_hw_get_legacy_target_powers(ah, chan, + pEepData->calTargetPower2G, + AR5416_NUM_2G_20_TARGET_POWERS, + &targetPowerOfdmExt, 4, true); + } + + for (ctlMode = 0; ctlMode < numCtlModes; ctlMode++) { + bool isHt40CtlMode = (pCtlMode[ctlMode] == CTL_5GHT40) || + (pCtlMode[ctlMode] == CTL_2GHT40); + + if (isHt40CtlMode) + freq = centers.synth_center; + else if (pCtlMode[ctlMode] & EXT_ADDITIVE) + freq = centers.ext_center; + else + freq = centers.ctl_center; + + if (ah->eep_ops->get_eeprom_ver(ah) == 14 && + ah->eep_ops->get_eeprom_rev(ah) <= 2) + twiceMaxEdgePower = AR5416_MAX_RATE_POWER; + + for (i = 0; (i < AR5416_EEP4K_NUM_CTLS) && + pEepData->ctlIndex[i]; i++) { + + if (CMP_TEST_GRP) { + rep = &(pEepData->ctlData[i]); + + twiceMinEdgePower = ath9k_hw_get_max_edge_power( + freq, + rep->ctlEdges[ + ar5416_get_ntxchains(ah->txchainmask) - 1], + IS_CHAN_2GHZ(chan), + AR5416_EEP4K_NUM_BAND_EDGES); + + if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) { + twiceMaxEdgePower = + min(twiceMaxEdgePower, + twiceMinEdgePower); + } else { + twiceMaxEdgePower = twiceMinEdgePower; + break; + } + } + } + + minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower); + + switch (pCtlMode[ctlMode]) { + case CTL_11B: + for (i = 0; i < ARRAY_SIZE(targetPowerCck.tPow2x); i++) { + targetPowerCck.tPow2x[i] = + min((u16)targetPowerCck.tPow2x[i], + minCtlPower); + } + break; + case CTL_11G: + for (i = 0; i < ARRAY_SIZE(targetPowerOfdm.tPow2x); i++) { + targetPowerOfdm.tPow2x[i] = + min((u16)targetPowerOfdm.tPow2x[i], + minCtlPower); + } + break; + case CTL_2GHT20: + for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x); i++) { + targetPowerHt20.tPow2x[i] = + min((u16)targetPowerHt20.tPow2x[i], + minCtlPower); + } + break; + case CTL_11B_EXT: + targetPowerCckExt.tPow2x[0] = + min((u16)targetPowerCckExt.tPow2x[0], + minCtlPower); + break; + case CTL_11G_EXT: + targetPowerOfdmExt.tPow2x[0] = + min((u16)targetPowerOfdmExt.tPow2x[0], + minCtlPower); + break; + case CTL_2GHT40: + for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++) { + targetPowerHt40.tPow2x[i] = + min((u16)targetPowerHt40.tPow2x[i], + minCtlPower); + } + break; + default: + break; + } + } + + ratesArray[rate6mb] = + ratesArray[rate9mb] = + ratesArray[rate12mb] = + ratesArray[rate18mb] = + ratesArray[rate24mb] = + targetPowerOfdm.tPow2x[0]; + + ratesArray[rate36mb] = targetPowerOfdm.tPow2x[1]; + ratesArray[rate48mb] = targetPowerOfdm.tPow2x[2]; + ratesArray[rate54mb] = targetPowerOfdm.tPow2x[3]; + ratesArray[rateXr] = targetPowerOfdm.tPow2x[0]; + + for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x); i++) + ratesArray[rateHt20_0 + i] = targetPowerHt20.tPow2x[i]; + + ratesArray[rate1l] = targetPowerCck.tPow2x[0]; + ratesArray[rate2s] = ratesArray[rate2l] = targetPowerCck.tPow2x[1]; + ratesArray[rate5_5s] = ratesArray[rate5_5l] = targetPowerCck.tPow2x[2]; + ratesArray[rate11s] = ratesArray[rate11l] = targetPowerCck.tPow2x[3]; + + if (IS_CHAN_HT40(chan)) { + for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++) { + ratesArray[rateHt40_0 + i] = + targetPowerHt40.tPow2x[i]; + } + ratesArray[rateDupOfdm] = targetPowerHt40.tPow2x[0]; + ratesArray[rateDupCck] = targetPowerHt40.tPow2x[0]; + ratesArray[rateExtOfdm] = targetPowerOfdmExt.tPow2x[0]; + ratesArray[rateExtCck] = targetPowerCckExt.tPow2x[0]; + } + +#undef CMP_TEST_GRP +} + +static void ath9k_hw_4k_set_txpower(struct ath_hw *ah, + struct ath9k_channel *chan, + u16 cfgCtl, + u8 twiceAntennaReduction, + u8 twiceMaxRegulatoryPower, + u8 powerLimit) +{ + struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); + struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k; + struct modal_eep_4k_header *pModal = &pEepData->modalHeader; + int16_t ratesArray[Ar5416RateSize]; + int16_t txPowerIndexOffset = 0; + u8 ht40PowerIncForPdadc = 2; + int i; + + memset(ratesArray, 0, sizeof(ratesArray)); + + if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= + AR5416_EEP_MINOR_VER_2) { + ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc; + } + + ath9k_hw_set_4k_power_per_rate_table(ah, chan, + &ratesArray[0], cfgCtl, + twiceAntennaReduction, + twiceMaxRegulatoryPower, + powerLimit); + + ath9k_hw_set_4k_power_cal_table(ah, chan, &txPowerIndexOffset); + + for (i = 0; i < ARRAY_SIZE(ratesArray); i++) { + ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]); + if (ratesArray[i] > AR5416_MAX_RATE_POWER) + ratesArray[i] = AR5416_MAX_RATE_POWER; + } + + + /* Update regulatory */ + + i = rate6mb; + if (IS_CHAN_HT40(chan)) + i = rateHt40_0; + else if (IS_CHAN_HT20(chan)) + i = rateHt20_0; + + regulatory->max_power_level = ratesArray[i]; + + if (AR_SREV_9280_10_OR_LATER(ah)) { + for (i = 0; i < Ar5416RateSize; i++) + ratesArray[i] -= AR5416_PWR_TABLE_OFFSET_DB * 2; + } + + /* OFDM power per rate */ + REG_WRITE(ah, AR_PHY_POWER_TX_RATE1, + ATH9K_POW_SM(ratesArray[rate18mb], 24) + | ATH9K_POW_SM(ratesArray[rate12mb], 16) + | ATH9K_POW_SM(ratesArray[rate9mb], 8) + | ATH9K_POW_SM(ratesArray[rate6mb], 0)); + REG_WRITE(ah, AR_PHY_POWER_TX_RATE2, + ATH9K_POW_SM(ratesArray[rate54mb], 24) + | ATH9K_POW_SM(ratesArray[rate48mb], 16) + | ATH9K_POW_SM(ratesArray[rate36mb], 8) + | ATH9K_POW_SM(ratesArray[rate24mb], 0)); + + /* CCK power per rate */ + REG_WRITE(ah, AR_PHY_POWER_TX_RATE3, + ATH9K_POW_SM(ratesArray[rate2s], 24) + | ATH9K_POW_SM(ratesArray[rate2l], 16) + | ATH9K_POW_SM(ratesArray[rateXr], 8) + | ATH9K_POW_SM(ratesArray[rate1l], 0)); + REG_WRITE(ah, AR_PHY_POWER_TX_RATE4, + ATH9K_POW_SM(ratesArray[rate11s], 24) + | ATH9K_POW_SM(ratesArray[rate11l], 16) + | ATH9K_POW_SM(ratesArray[rate5_5s], 8) + | ATH9K_POW_SM(ratesArray[rate5_5l], 0)); + + /* HT20 power per rate */ + REG_WRITE(ah, AR_PHY_POWER_TX_RATE5, + ATH9K_POW_SM(ratesArray[rateHt20_3], 24) + | ATH9K_POW_SM(ratesArray[rateHt20_2], 16) + | ATH9K_POW_SM(ratesArray[rateHt20_1], 8) + | ATH9K_POW_SM(ratesArray[rateHt20_0], 0)); + REG_WRITE(ah, AR_PHY_POWER_TX_RATE6, + ATH9K_POW_SM(ratesArray[rateHt20_7], 24) + | ATH9K_POW_SM(ratesArray[rateHt20_6], 16) + | ATH9K_POW_SM(ratesArray[rateHt20_5], 8) + | ATH9K_POW_SM(ratesArray[rateHt20_4], 0)); + + /* HT40 power per rate */ + if (IS_CHAN_HT40(chan)) { + REG_WRITE(ah, AR_PHY_POWER_TX_RATE7, + ATH9K_POW_SM(ratesArray[rateHt40_3] + + ht40PowerIncForPdadc, 24) + | ATH9K_POW_SM(ratesArray[rateHt40_2] + + ht40PowerIncForPdadc, 16) + | ATH9K_POW_SM(ratesArray[rateHt40_1] + + ht40PowerIncForPdadc, 8) + | ATH9K_POW_SM(ratesArray[rateHt40_0] + + ht40PowerIncForPdadc, 0)); + REG_WRITE(ah, AR_PHY_POWER_TX_RATE8, + ATH9K_POW_SM(ratesArray[rateHt40_7] + + ht40PowerIncForPdadc, 24) + | ATH9K_POW_SM(ratesArray[rateHt40_6] + + ht40PowerIncForPdadc, 16) + | ATH9K_POW_SM(ratesArray[rateHt40_5] + + ht40PowerIncForPdadc, 8) + | ATH9K_POW_SM(ratesArray[rateHt40_4] + + ht40PowerIncForPdadc, 0)); + REG_WRITE(ah, AR_PHY_POWER_TX_RATE9, + ATH9K_POW_SM(ratesArray[rateExtOfdm], 24) + | ATH9K_POW_SM(ratesArray[rateExtCck], 16) + | ATH9K_POW_SM(ratesArray[rateDupOfdm], 8) + | ATH9K_POW_SM(ratesArray[rateDupCck], 0)); + } +} + +static void ath9k_hw_4k_set_addac(struct ath_hw *ah, + struct ath9k_channel *chan) +{ + struct modal_eep_4k_header *pModal; + struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k; + u8 biaslevel; + + if (ah->hw_version.macVersion != AR_SREV_VERSION_9160) + return; + + if (ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_MINOR_VER_7) + return; + + pModal = &eep->modalHeader; + + if (pModal->xpaBiasLvl != 0xff) { + biaslevel = pModal->xpaBiasLvl; + INI_RA(&ah->iniAddac, 7, 1) = + (INI_RA(&ah->iniAddac, 7, 1) & (~0x18)) | biaslevel << 3; + } +} + +static void ath9k_hw_4k_set_gain(struct ath_hw *ah, + struct modal_eep_4k_header *pModal, + struct ar5416_eeprom_4k *eep, + u8 txRxAttenLocal) +{ + REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0, + pModal->antCtrlChain[0]); + + REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), + (REG_READ(ah, AR_PHY_TIMING_CTRL4(0)) & + ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF | + AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) | + SM(pModal->iqCalICh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) | + SM(pModal->iqCalQCh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF)); + + if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= + AR5416_EEP_MINOR_VER_3) { + txRxAttenLocal = pModal->txRxAttenCh[0]; + + REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ, + AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN, pModal->bswMargin[0]); + REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ, + AR_PHY_GAIN_2GHZ_XATTEN1_DB, pModal->bswAtten[0]); + REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ, + AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN, + pModal->xatten2Margin[0]); + REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ, + AR_PHY_GAIN_2GHZ_XATTEN2_DB, pModal->xatten2Db[0]); + + /* Set the block 1 value to block 0 value */ + REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + 0x1000, + AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN, + pModal->bswMargin[0]); + REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + 0x1000, + AR_PHY_GAIN_2GHZ_XATTEN1_DB, pModal->bswAtten[0]); + REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + 0x1000, + AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN, + pModal->xatten2Margin[0]); + REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + 0x1000, + AR_PHY_GAIN_2GHZ_XATTEN2_DB, + pModal->xatten2Db[0]); + } + + REG_RMW_FIELD(ah, AR_PHY_RXGAIN, + AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal); + REG_RMW_FIELD(ah, AR_PHY_RXGAIN, + AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[0]); + + REG_RMW_FIELD(ah, AR_PHY_RXGAIN + 0x1000, + AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal); + REG_RMW_FIELD(ah, AR_PHY_RXGAIN + 0x1000, + AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[0]); + + if (AR_SREV_9285_11(ah)) + REG_WRITE(ah, AR9285_AN_TOP4, (AR9285_AN_TOP4_DEFAULT | 0x14)); +} + +/* + * Read EEPROM header info and program the device for correct operation + * given the channel value. + */ +static void ath9k_hw_4k_set_board_values(struct ath_hw *ah, + struct ath9k_channel *chan) +{ + struct modal_eep_4k_header *pModal; + struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k; + u8 txRxAttenLocal; + u8 ob[5], db1[5], db2[5]; + u8 ant_div_control1, ant_div_control2; + u32 regVal; + + pModal = &eep->modalHeader; + txRxAttenLocal = 23; + + REG_WRITE(ah, AR_PHY_SWITCH_COM, + ah->eep_ops->get_eeprom_antenna_cfg(ah, chan)); + + /* Single chain for 4K EEPROM*/ + ath9k_hw_4k_set_gain(ah, pModal, eep, txRxAttenLocal); + + /* Initialize Ant Diversity settings from EEPROM */ + if (pModal->version >= 3) { + ant_div_control1 = pModal->antdiv_ctl1; + ant_div_control2 = pModal->antdiv_ctl2; + + regVal = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL); + regVal &= (~(AR_PHY_9285_ANT_DIV_CTL_ALL)); + + regVal |= SM(ant_div_control1, + AR_PHY_9285_ANT_DIV_CTL); + regVal |= SM(ant_div_control2, + AR_PHY_9285_ANT_DIV_ALT_LNACONF); + regVal |= SM((ant_div_control2 >> 2), + AR_PHY_9285_ANT_DIV_MAIN_LNACONF); + regVal |= SM((ant_div_control1 >> 1), + AR_PHY_9285_ANT_DIV_ALT_GAINTB); + regVal |= SM((ant_div_control1 >> 2), + AR_PHY_9285_ANT_DIV_MAIN_GAINTB); + + + REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regVal); + regVal = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL); + regVal = REG_READ(ah, AR_PHY_CCK_DETECT); + regVal &= (~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV); + regVal |= SM((ant_div_control1 >> 3), + AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV); + + REG_WRITE(ah, AR_PHY_CCK_DETECT, regVal); + regVal = REG_READ(ah, AR_PHY_CCK_DETECT); + } + + if (pModal->version >= 2) { + ob[0] = pModal->ob_0; + ob[1] = pModal->ob_1; + ob[2] = pModal->ob_2; + ob[3] = pModal->ob_3; + ob[4] = pModal->ob_4; + + db1[0] = pModal->db1_0; + db1[1] = pModal->db1_1; + db1[2] = pModal->db1_2; + db1[3] = pModal->db1_3; + db1[4] = pModal->db1_4; + + db2[0] = pModal->db2_0; + db2[1] = pModal->db2_1; + db2[2] = pModal->db2_2; + db2[3] = pModal->db2_3; + db2[4] = pModal->db2_4; + } else if (pModal->version == 1) { + ob[0] = pModal->ob_0; + ob[1] = ob[2] = ob[3] = ob[4] = pModal->ob_1; + db1[0] = pModal->db1_0; + db1[1] = db1[2] = db1[3] = db1[4] = pModal->db1_1; + db2[0] = pModal->db2_0; + db2[1] = db2[2] = db2[3] = db2[4] = pModal->db2_1; + } else { + int i; + + for (i = 0; i < 5; i++) { + ob[i] = pModal->ob_0; + db1[i] = pModal->db1_0; + db2[i] = pModal->db1_0; + } + } + + if (AR_SREV_9271(ah)) { + ath9k_hw_analog_shift_rmw(ah, + AR9285_AN_RF2G3, + AR9271_AN_RF2G3_OB_cck, + AR9271_AN_RF2G3_OB_cck_S, + ob[0]); + ath9k_hw_analog_shift_rmw(ah, + AR9285_AN_RF2G3, + AR9271_AN_RF2G3_OB_psk, + AR9271_AN_RF2G3_OB_psk_S, + ob[1]); + ath9k_hw_analog_shift_rmw(ah, + AR9285_AN_RF2G3, + AR9271_AN_RF2G3_OB_qam, + AR9271_AN_RF2G3_OB_qam_S, + ob[2]); + ath9k_hw_analog_shift_rmw(ah, + AR9285_AN_RF2G3, + AR9271_AN_RF2G3_DB_1, + AR9271_AN_RF2G3_DB_1_S, + db1[0]); + ath9k_hw_analog_shift_rmw(ah, + AR9285_AN_RF2G4, + AR9271_AN_RF2G4_DB_2, + AR9271_AN_RF2G4_DB_2_S, + db2[0]); + } else { + ath9k_hw_analog_shift_rmw(ah, + AR9285_AN_RF2G3, + AR9285_AN_RF2G3_OB_0, + AR9285_AN_RF2G3_OB_0_S, + ob[0]); + ath9k_hw_analog_shift_rmw(ah, + AR9285_AN_RF2G3, + AR9285_AN_RF2G3_OB_1, + AR9285_AN_RF2G3_OB_1_S, + ob[1]); + ath9k_hw_analog_shift_rmw(ah, + AR9285_AN_RF2G3, + AR9285_AN_RF2G3_OB_2, + AR9285_AN_RF2G3_OB_2_S, + ob[2]); + ath9k_hw_analog_shift_rmw(ah, + AR9285_AN_RF2G3, + AR9285_AN_RF2G3_OB_3, + AR9285_AN_RF2G3_OB_3_S, + ob[3]); + ath9k_hw_analog_shift_rmw(ah, + AR9285_AN_RF2G3, + AR9285_AN_RF2G3_OB_4, + AR9285_AN_RF2G3_OB_4_S, + ob[4]); + + ath9k_hw_analog_shift_rmw(ah, + AR9285_AN_RF2G3, + AR9285_AN_RF2G3_DB1_0, + AR9285_AN_RF2G3_DB1_0_S, + db1[0]); + ath9k_hw_analog_shift_rmw(ah, + AR9285_AN_RF2G3, + AR9285_AN_RF2G3_DB1_1, + AR9285_AN_RF2G3_DB1_1_S, + db1[1]); + ath9k_hw_analog_shift_rmw(ah, + AR9285_AN_RF2G3, + AR9285_AN_RF2G3_DB1_2, + AR9285_AN_RF2G3_DB1_2_S, + db1[2]); + ath9k_hw_analog_shift_rmw(ah, + AR9285_AN_RF2G4, + AR9285_AN_RF2G4_DB1_3, + AR9285_AN_RF2G4_DB1_3_S, + db1[3]); + ath9k_hw_analog_shift_rmw(ah, + AR9285_AN_RF2G4, + AR9285_AN_RF2G4_DB1_4, + AR9285_AN_RF2G4_DB1_4_S, db1[4]); + + ath9k_hw_analog_shift_rmw(ah, + AR9285_AN_RF2G4, + AR9285_AN_RF2G4_DB2_0, + AR9285_AN_RF2G4_DB2_0_S, + db2[0]); + ath9k_hw_analog_shift_rmw(ah, + AR9285_AN_RF2G4, + AR9285_AN_RF2G4_DB2_1, + AR9285_AN_RF2G4_DB2_1_S, + db2[1]); + ath9k_hw_analog_shift_rmw(ah, + AR9285_AN_RF2G4, + AR9285_AN_RF2G4_DB2_2, + AR9285_AN_RF2G4_DB2_2_S, + db2[2]); + ath9k_hw_analog_shift_rmw(ah, + AR9285_AN_RF2G4, + AR9285_AN_RF2G4_DB2_3, + AR9285_AN_RF2G4_DB2_3_S, + db2[3]); + ath9k_hw_analog_shift_rmw(ah, + AR9285_AN_RF2G4, + AR9285_AN_RF2G4_DB2_4, + AR9285_AN_RF2G4_DB2_4_S, + db2[4]); + } + + + if (AR_SREV_9285_11(ah)) + REG_WRITE(ah, AR9285_AN_TOP4, AR9285_AN_TOP4_DEFAULT); + + REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH, + pModal->switchSettling); + REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, AR_PHY_DESIRED_SZ_ADC, + pModal->adcDesiredSize); + + REG_WRITE(ah, AR_PHY_RF_CTL4, + SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAA_OFF) | + SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAB_OFF) | + SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAA_ON) | + SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAB_ON)); + + REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON, + pModal->txEndToRxOn); + + if (AR_SREV_9271_10(ah)) + REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON, + pModal->txEndToRxOn); + REG_RMW_FIELD(ah, AR_PHY_CCA, AR9280_PHY_CCA_THRESH62, + pModal->thresh62); + REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0, AR_PHY_EXT_CCA0_THRESH62, + pModal->thresh62); + + if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= + AR5416_EEP_MINOR_VER_2) { + REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_DATA_START, + pModal->txFrameToDataStart); + REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_PA_ON, + pModal->txFrameToPaOn); + } + + if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= + AR5416_EEP_MINOR_VER_3) { + if (IS_CHAN_HT40(chan)) + REG_RMW_FIELD(ah, AR_PHY_SETTLING, + AR_PHY_SETTLING_SWITCH, + pModal->swSettleHt40); + } +} + +static u16 ath9k_hw_4k_get_eeprom_antenna_cfg(struct ath_hw *ah, + struct ath9k_channel *chan) +{ + struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k; + struct modal_eep_4k_header *pModal = &eep->modalHeader; + + return pModal->antCtrlCommon & 0xFFFF; +} + +static u8 ath9k_hw_4k_get_num_ant_config(struct ath_hw *ah, + enum ieee80211_band freq_band) +{ + return 1; +} + +static u16 ath9k_hw_4k_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz) +{ +#define EEP_MAP4K_SPURCHAN \ + (ah->eeprom.map4k.modalHeader.spurChans[i].spurChan) + struct ath_common *common = ath9k_hw_common(ah); + + u16 spur_val = AR_NO_SPUR; + + ath_print(common, ATH_DBG_ANI, + "Getting spur idx %d is2Ghz. %d val %x\n", + i, is2GHz, ah->config.spurchans[i][is2GHz]); + + switch (ah->config.spurmode) { + case SPUR_DISABLE: + break; + case SPUR_ENABLE_IOCTL: + spur_val = ah->config.spurchans[i][is2GHz]; + ath_print(common, ATH_DBG_ANI, + "Getting spur val from new loc. %d\n", spur_val); + break; + case SPUR_ENABLE_EEPROM: + spur_val = EEP_MAP4K_SPURCHAN; + break; + } + + return spur_val; + +#undef EEP_MAP4K_SPURCHAN +} + +const struct eeprom_ops eep_4k_ops = { + .check_eeprom = ath9k_hw_4k_check_eeprom, + .get_eeprom = ath9k_hw_4k_get_eeprom, + .fill_eeprom = ath9k_hw_4k_fill_eeprom, + .get_eeprom_ver = ath9k_hw_4k_get_eeprom_ver, + .get_eeprom_rev = ath9k_hw_4k_get_eeprom_rev, + .get_num_ant_config = ath9k_hw_4k_get_num_ant_config, + .get_eeprom_antenna_cfg = ath9k_hw_4k_get_eeprom_antenna_cfg, + .set_board_values = ath9k_hw_4k_set_board_values, + .set_addac = ath9k_hw_4k_set_addac, + .set_txpower = ath9k_hw_4k_set_txpower, + .get_spur_channel = ath9k_hw_4k_get_spur_channel +}; diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c new file mode 100644 index 0000000..839d05a --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c @@ -0,0 +1,1184 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "hw.h" + +static int ath9k_hw_AR9287_get_eeprom_ver(struct ath_hw *ah) +{ + return (ah->eeprom.map9287.baseEepHeader.version >> 12) & 0xF; +} + +static int ath9k_hw_AR9287_get_eeprom_rev(struct ath_hw *ah) +{ + return (ah->eeprom.map9287.baseEepHeader.version) & 0xFFF; +} + +static bool ath9k_hw_AR9287_fill_eeprom(struct ath_hw *ah) +{ + struct ar9287_eeprom *eep = &ah->eeprom.map9287; + struct ath_common *common = ath9k_hw_common(ah); + u16 *eep_data; + int addr, eep_start_loc = AR9287_EEP_START_LOC; + eep_data = (u16 *)eep; + + if (!ath9k_hw_use_flash(ah)) { + ath_print(common, ATH_DBG_EEPROM, + "Reading from EEPROM, not flash\n"); + } + + for (addr = 0; addr < sizeof(struct ar9287_eeprom) / sizeof(u16); + addr++) { + if (!ath9k_hw_nvram_read(common, + addr + eep_start_loc, eep_data)) { + ath_print(common, ATH_DBG_EEPROM, + "Unable to read eeprom region \n"); + return false; + } + eep_data++; + } + return true; +} + +static int ath9k_hw_AR9287_check_eeprom(struct ath_hw *ah) +{ + u32 sum = 0, el, integer; + u16 temp, word, magic, magic2, *eepdata; + int i, addr; + bool need_swap = false; + struct ar9287_eeprom *eep = &ah->eeprom.map9287; + struct ath_common *common = ath9k_hw_common(ah); + + if (!ath9k_hw_use_flash(ah)) { + if (!ath9k_hw_nvram_read(common, + AR5416_EEPROM_MAGIC_OFFSET, &magic)) { + ath_print(common, ATH_DBG_FATAL, + "Reading Magic # failed\n"); + return false; + } + + ath_print(common, ATH_DBG_EEPROM, + "Read Magic = 0x%04X\n", magic); + if (magic != AR5416_EEPROM_MAGIC) { + magic2 = swab16(magic); + + if (magic2 == AR5416_EEPROM_MAGIC) { + need_swap = true; + eepdata = (u16 *)(&ah->eeprom); + + for (addr = 0; + addr < sizeof(struct ar9287_eeprom) / sizeof(u16); + addr++) { + temp = swab16(*eepdata); + *eepdata = temp; + eepdata++; + } + } else { + ath_print(common, ATH_DBG_FATAL, + "Invalid EEPROM Magic. " + "endianness mismatch.\n"); + return -EINVAL; + } + } + } + ath_print(common, ATH_DBG_EEPROM, "need_swap = %s.\n", need_swap ? + "True" : "False"); + + if (need_swap) + el = swab16(ah->eeprom.map9287.baseEepHeader.length); + else + el = ah->eeprom.map9287.baseEepHeader.length; + + if (el > sizeof(struct ar9287_eeprom)) + el = sizeof(struct ar9287_eeprom) / sizeof(u16); + else + el = el / sizeof(u16); + + eepdata = (u16 *)(&ah->eeprom); + for (i = 0; i < el; i++) + sum ^= *eepdata++; + + if (need_swap) { + word = swab16(eep->baseEepHeader.length); + eep->baseEepHeader.length = word; + + word = swab16(eep->baseEepHeader.checksum); + eep->baseEepHeader.checksum = word; + + word = swab16(eep->baseEepHeader.version); + eep->baseEepHeader.version = word; + + word = swab16(eep->baseEepHeader.regDmn[0]); + eep->baseEepHeader.regDmn[0] = word; + + word = swab16(eep->baseEepHeader.regDmn[1]); + eep->baseEepHeader.regDmn[1] = word; + + word = swab16(eep->baseEepHeader.rfSilent); + eep->baseEepHeader.rfSilent = word; + + word = swab16(eep->baseEepHeader.blueToothOptions); + eep->baseEepHeader.blueToothOptions = word; + + word = swab16(eep->baseEepHeader.deviceCap); + eep->baseEepHeader.deviceCap = word; + + integer = swab32(eep->modalHeader.antCtrlCommon); + eep->modalHeader.antCtrlCommon = integer; + + for (i = 0; i < AR9287_MAX_CHAINS; i++) { + integer = swab32(eep->modalHeader.antCtrlChain[i]); + eep->modalHeader.antCtrlChain[i] = integer; + } + + for (i = 0; i < AR9287_EEPROM_MODAL_SPURS; i++) { + word = swab16(eep->modalHeader.spurChans[i].spurChan); + eep->modalHeader.spurChans[i].spurChan = word; + } + } + + if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR9287_EEP_VER + || ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) { + ath_print(common, ATH_DBG_FATAL, + "Bad EEPROM checksum 0x%x or revision 0x%04x\n", + sum, ah->eep_ops->get_eeprom_ver(ah)); + return -EINVAL; + } + + return 0; +} + +static u32 ath9k_hw_AR9287_get_eeprom(struct ath_hw *ah, + enum eeprom_param param) +{ + struct ar9287_eeprom *eep = &ah->eeprom.map9287; + struct modal_eep_ar9287_header *pModal = &eep->modalHeader; + struct base_eep_ar9287_header *pBase = &eep->baseEepHeader; + u16 ver_minor; + + ver_minor = pBase->version & AR9287_EEP_VER_MINOR_MASK; + switch (param) { + case EEP_NFTHRESH_2: + return pModal->noiseFloorThreshCh[0]; + case AR_EEPROM_MAC(0): + return pBase->macAddr[0] << 8 | pBase->macAddr[1]; + case AR_EEPROM_MAC(1): + return pBase->macAddr[2] << 8 | pBase->macAddr[3]; + case AR_EEPROM_MAC(2): + return pBase->macAddr[4] << 8 | pBase->macAddr[5]; + case EEP_REG_0: + return pBase->regDmn[0]; + case EEP_REG_1: + return pBase->regDmn[1]; + case EEP_OP_CAP: + return pBase->deviceCap; + case EEP_OP_MODE: + return pBase->opCapFlags; + case EEP_RF_SILENT: + return pBase->rfSilent; + case EEP_MINOR_REV: + return ver_minor; + case EEP_TX_MASK: + return pBase->txMask; + case EEP_RX_MASK: + return pBase->rxMask; + case EEP_DEV_TYPE: + return pBase->deviceType; + case EEP_OL_PWRCTRL: + return pBase->openLoopPwrCntl; + case EEP_TEMPSENSE_SLOPE: + if (ver_minor >= AR9287_EEP_MINOR_VER_2) + return pBase->tempSensSlope; + else + return 0; + case EEP_TEMPSENSE_SLOPE_PAL_ON: + if (ver_minor >= AR9287_EEP_MINOR_VER_3) + return pBase->tempSensSlopePalOn; + else + return 0; + default: + return 0; + } +} + + +static void ath9k_hw_get_AR9287_gain_boundaries_pdadcs(struct ath_hw *ah, + struct ath9k_channel *chan, + struct cal_data_per_freq_ar9287 *pRawDataSet, + u8 *bChans, u16 availPiers, + u16 tPdGainOverlap, int16_t *pMinCalPower, + u16 *pPdGainBoundaries, u8 *pPDADCValues, + u16 numXpdGains) +{ +#define TMP_VAL_VPD_TABLE \ + ((vpdTableI[i][sizeCurrVpdTable - 1] + (ss - maxIndex + 1) * vpdStep)); + + int i, j, k; + int16_t ss; + u16 idxL = 0, idxR = 0, numPiers; + u8 *pVpdL, *pVpdR, *pPwrL, *pPwrR; + u8 minPwrT4[AR9287_NUM_PD_GAINS]; + u8 maxPwrT4[AR9287_NUM_PD_GAINS]; + int16_t vpdStep; + int16_t tmpVal; + u16 sizeCurrVpdTable, maxIndex, tgtIndex; + bool match; + int16_t minDelta = 0; + struct chan_centers centers; + static u8 vpdTableL[AR5416_EEP4K_NUM_PD_GAINS] + [AR5416_MAX_PWR_RANGE_IN_HALF_DB]; + static u8 vpdTableR[AR5416_EEP4K_NUM_PD_GAINS] + [AR5416_MAX_PWR_RANGE_IN_HALF_DB]; + static u8 vpdTableI[AR5416_EEP4K_NUM_PD_GAINS] + [AR5416_MAX_PWR_RANGE_IN_HALF_DB]; + + ath9k_hw_get_channel_centers(ah, chan, ¢ers); + + for (numPiers = 0; numPiers < availPiers; numPiers++) { + if (bChans[numPiers] == AR9287_BCHAN_UNUSED) + break; + } + + match = ath9k_hw_get_lower_upper_index( + (u8)FREQ2FBIN(centers.synth_center, + IS_CHAN_2GHZ(chan)), bChans, numPiers, + &idxL, &idxR); + + if (match) { + for (i = 0; i < numXpdGains; i++) { + minPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][0]; + maxPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][4]; + ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i], + pRawDataSet[idxL].pwrPdg[i], + pRawDataSet[idxL].vpdPdg[i], + AR9287_PD_GAIN_ICEPTS, vpdTableI[i]); + } + } else { + for (i = 0; i < numXpdGains; i++) { + pVpdL = pRawDataSet[idxL].vpdPdg[i]; + pPwrL = pRawDataSet[idxL].pwrPdg[i]; + pVpdR = pRawDataSet[idxR].vpdPdg[i]; + pPwrR = pRawDataSet[idxR].pwrPdg[i]; + + minPwrT4[i] = max(pPwrL[0], pPwrR[0]); + + maxPwrT4[i] = + min(pPwrL[AR9287_PD_GAIN_ICEPTS - 1], + pPwrR[AR9287_PD_GAIN_ICEPTS - 1]); + + ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i], + pPwrL, pVpdL, + AR9287_PD_GAIN_ICEPTS, + vpdTableL[i]); + ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i], + pPwrR, pVpdR, + AR9287_PD_GAIN_ICEPTS, + vpdTableR[i]); + + for (j = 0; j <= (maxPwrT4[i] - minPwrT4[i]) / 2; j++) { + vpdTableI[i][j] = + (u8)(ath9k_hw_interpolate((u16) + FREQ2FBIN(centers. synth_center, + IS_CHAN_2GHZ(chan)), + bChans[idxL], bChans[idxR], + vpdTableL[i][j], vpdTableR[i][j])); + } + } + } + *pMinCalPower = (int16_t)(minPwrT4[0] / 2); + + k = 0; + for (i = 0; i < numXpdGains; i++) { + if (i == (numXpdGains - 1)) + pPdGainBoundaries[i] = (u16)(maxPwrT4[i] / 2); + else + pPdGainBoundaries[i] = (u16)((maxPwrT4[i] + + minPwrT4[i+1]) / 4); + + pPdGainBoundaries[i] = min((u16)AR5416_MAX_RATE_POWER, + pPdGainBoundaries[i]); + + + if ((i == 0) && !AR_SREV_5416_20_OR_LATER(ah)) { + minDelta = pPdGainBoundaries[0] - 23; + pPdGainBoundaries[0] = 23; + } else + minDelta = 0; + + if (i == 0) { + if (AR_SREV_9280_10_OR_LATER(ah)) + ss = (int16_t)(0 - (minPwrT4[i] / 2)); + else + ss = 0; + } else + ss = (int16_t)((pPdGainBoundaries[i-1] - + (minPwrT4[i] / 2)) - + tPdGainOverlap + 1 + minDelta); + + vpdStep = (int16_t)(vpdTableI[i][1] - vpdTableI[i][0]); + vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep); + while ((ss < 0) && (k < (AR9287_NUM_PDADC_VALUES - 1))) { + tmpVal = (int16_t)(vpdTableI[i][0] + ss * vpdStep); + pPDADCValues[k++] = (u8)((tmpVal < 0) ? 0 : tmpVal); + ss++; + } + + sizeCurrVpdTable = (u8)((maxPwrT4[i] - minPwrT4[i]) / 2 + 1); + tgtIndex = (u8)(pPdGainBoundaries[i] + + tPdGainOverlap - (minPwrT4[i] / 2)); + maxIndex = (tgtIndex < sizeCurrVpdTable) ? + tgtIndex : sizeCurrVpdTable; + + while ((ss < maxIndex) && (k < (AR9287_NUM_PDADC_VALUES - 1))) + pPDADCValues[k++] = vpdTableI[i][ss++]; + + vpdStep = (int16_t)(vpdTableI[i][sizeCurrVpdTable - 1] - + vpdTableI[i][sizeCurrVpdTable - 2]); + vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep); + if (tgtIndex > maxIndex) { + while ((ss <= tgtIndex) && + (k < (AR9287_NUM_PDADC_VALUES - 1))) { + tmpVal = (int16_t) TMP_VAL_VPD_TABLE; + pPDADCValues[k++] = (u8)((tmpVal > 255) ? + 255 : tmpVal); + ss++; + } + } + } + + while (i < AR9287_PD_GAINS_IN_MASK) { + pPdGainBoundaries[i] = pPdGainBoundaries[i-1]; + i++; + } + + while (k < AR9287_NUM_PDADC_VALUES) { + pPDADCValues[k] = pPDADCValues[k-1]; + k++; + } + +#undef TMP_VAL_VPD_TABLE +} + +static void ar9287_eeprom_get_tx_gain_index(struct ath_hw *ah, + struct ath9k_channel *chan, + struct cal_data_op_loop_ar9287 *pRawDatasetOpLoop, + u8 *pCalChans, u16 availPiers, + int8_t *pPwr) +{ + u16 idxL = 0, idxR = 0, numPiers; + bool match; + struct chan_centers centers; + + ath9k_hw_get_channel_centers(ah, chan, ¢ers); + + for (numPiers = 0; numPiers < availPiers; numPiers++) { + if (pCalChans[numPiers] == AR9287_BCHAN_UNUSED) + break; + } + + match = ath9k_hw_get_lower_upper_index( + (u8)FREQ2FBIN(centers.synth_center, IS_CHAN_2GHZ(chan)), + pCalChans, numPiers, + &idxL, &idxR); + + if (match) { + *pPwr = (int8_t) pRawDatasetOpLoop[idxL].pwrPdg[0][0]; + } else { + *pPwr = ((int8_t) pRawDatasetOpLoop[idxL].pwrPdg[0][0] + + (int8_t) pRawDatasetOpLoop[idxR].pwrPdg[0][0])/2; + } + +} + +static void ar9287_eeprom_olpc_set_pdadcs(struct ath_hw *ah, + int32_t txPower, u16 chain) +{ + u32 tmpVal; + u32 a; + + tmpVal = REG_READ(ah, 0xa270); + tmpVal = tmpVal & 0xFCFFFFFF; + tmpVal = tmpVal | (0x3 << 24); + REG_WRITE(ah, 0xa270, tmpVal); + + tmpVal = REG_READ(ah, 0xb270); + tmpVal = tmpVal & 0xFCFFFFFF; + tmpVal = tmpVal | (0x3 << 24); + REG_WRITE(ah, 0xb270, tmpVal); + + if (chain == 0) { + tmpVal = REG_READ(ah, 0xa398); + tmpVal = tmpVal & 0xff00ffff; + a = (txPower)&0xff; + tmpVal = tmpVal | (a << 16); + REG_WRITE(ah, 0xa398, tmpVal); + } + + if (chain == 1) { + tmpVal = REG_READ(ah, 0xb398); + tmpVal = tmpVal & 0xff00ffff; + a = (txPower)&0xff; + tmpVal = tmpVal | (a << 16); + REG_WRITE(ah, 0xb398, tmpVal); + } +} + +static void ath9k_hw_set_AR9287_power_cal_table(struct ath_hw *ah, + struct ath9k_channel *chan, + int16_t *pTxPowerIndexOffset) +{ + struct ath_common *common = ath9k_hw_common(ah); + struct cal_data_per_freq_ar9287 *pRawDataset; + struct cal_data_op_loop_ar9287 *pRawDatasetOpenLoop; + u8 *pCalBChans = NULL; + u16 pdGainOverlap_t2; + u8 pdadcValues[AR9287_NUM_PDADC_VALUES]; + u16 gainBoundaries[AR9287_PD_GAINS_IN_MASK]; + u16 numPiers = 0, i, j; + int16_t tMinCalPower; + u16 numXpdGain, xpdMask; + u16 xpdGainValues[AR9287_NUM_PD_GAINS] = {0, 0, 0, 0}; + u32 reg32, regOffset, regChainOffset; + int16_t modalIdx, diff = 0; + struct ar9287_eeprom *pEepData = &ah->eeprom.map9287; + modalIdx = IS_CHAN_2GHZ(chan) ? 1 : 0; + xpdMask = pEepData->modalHeader.xpdGain; + if ((pEepData->baseEepHeader.version & AR9287_EEP_VER_MINOR_MASK) >= + AR9287_EEP_MINOR_VER_2) + pdGainOverlap_t2 = pEepData->modalHeader.pdGainOverlap; + else + pdGainOverlap_t2 = (u16)(MS(REG_READ(ah, AR_PHY_TPCRG5), + AR_PHY_TPCRG5_PD_GAIN_OVERLAP)); + + if (IS_CHAN_2GHZ(chan)) { + pCalBChans = pEepData->calFreqPier2G; + numPiers = AR9287_NUM_2G_CAL_PIERS; + if (ath9k_hw_AR9287_get_eeprom(ah, EEP_OL_PWRCTRL)) { + pRawDatasetOpenLoop = + (struct cal_data_op_loop_ar9287 *) + pEepData->calPierData2G[0]; + ah->initPDADC = pRawDatasetOpenLoop->vpdPdg[0][0]; + } + } + + numXpdGain = 0; + for (i = 1; i <= AR9287_PD_GAINS_IN_MASK; i++) { + if ((xpdMask >> (AR9287_PD_GAINS_IN_MASK - i)) & 1) { + if (numXpdGain >= AR9287_NUM_PD_GAINS) + break; + xpdGainValues[numXpdGain] = + (u16)(AR9287_PD_GAINS_IN_MASK-i); + numXpdGain++; + } + } + + REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_NUM_PD_GAIN, + (numXpdGain - 1) & 0x3); + REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_1, + xpdGainValues[0]); + REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_2, + xpdGainValues[1]); + REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_3, + xpdGainValues[2]); + + for (i = 0; i < AR9287_MAX_CHAINS; i++) { + regChainOffset = i * 0x1000; + if (pEepData->baseEepHeader.txMask & (1 << i)) { + pRawDatasetOpenLoop = (struct cal_data_op_loop_ar9287 *) + pEepData->calPierData2G[i]; + if (ath9k_hw_AR9287_get_eeprom(ah, EEP_OL_PWRCTRL)) { + int8_t txPower; + ar9287_eeprom_get_tx_gain_index(ah, chan, + pRawDatasetOpenLoop, + pCalBChans, numPiers, + &txPower); + ar9287_eeprom_olpc_set_pdadcs(ah, txPower, i); + } else { + pRawDataset = + (struct cal_data_per_freq_ar9287 *) + pEepData->calPierData2G[i]; + ath9k_hw_get_AR9287_gain_boundaries_pdadcs( + ah, chan, pRawDataset, + pCalBChans, numPiers, + pdGainOverlap_t2, + &tMinCalPower, gainBoundaries, + pdadcValues, numXpdGain); + } + + if (i == 0) { + if (!ath9k_hw_AR9287_get_eeprom( + ah, EEP_OL_PWRCTRL)) { + REG_WRITE(ah, AR_PHY_TPCRG5 + + regChainOffset, + SM(pdGainOverlap_t2, + AR_PHY_TPCRG5_PD_GAIN_OVERLAP) | + SM(gainBoundaries[0], + AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1) + | SM(gainBoundaries[1], + AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2) + | SM(gainBoundaries[2], + AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3) + | SM(gainBoundaries[3], + AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4)); + } + } + + if ((int32_t)AR9287_PWR_TABLE_OFFSET_DB != + pEepData->baseEepHeader.pwrTableOffset) { + diff = (u16) + (pEepData->baseEepHeader.pwrTableOffset + - (int32_t)AR9287_PWR_TABLE_OFFSET_DB); + diff *= 2; + + for (j = 0; + j < ((u16)AR9287_NUM_PDADC_VALUES-diff); + j++) + pdadcValues[j] = pdadcValues[j+diff]; + + for (j = (u16)(AR9287_NUM_PDADC_VALUES-diff); + j < AR9287_NUM_PDADC_VALUES; j++) + pdadcValues[j] = + pdadcValues[ + AR9287_NUM_PDADC_VALUES-diff]; + } + + if (!ath9k_hw_AR9287_get_eeprom(ah, EEP_OL_PWRCTRL)) { + regOffset = AR_PHY_BASE + (672 << 2) + + regChainOffset; + for (j = 0; j < 32; j++) { + reg32 = ((pdadcValues[4*j + 0] + & 0xFF) << 0) | + ((pdadcValues[4*j + 1] + & 0xFF) << 8) | + ((pdadcValues[4*j + 2] + & 0xFF) << 16) | + ((pdadcValues[4*j + 3] + & 0xFF) << 24) ; + REG_WRITE(ah, regOffset, reg32); + + ath_print(common, ATH_DBG_EEPROM, + "PDADC (%d,%4x): %4.4x " + "%8.8x\n", + i, regChainOffset, regOffset, + reg32); + + ath_print(common, ATH_DBG_EEPROM, + "PDADC: Chain %d | " + "PDADC %3d Value %3d | " + "PDADC %3d Value %3d | " + "PDADC %3d Value %3d | " + "PDADC %3d Value %3d |\n", + i, 4 * j, pdadcValues[4 * j], + 4 * j + 1, + pdadcValues[4 * j + 1], + 4 * j + 2, + pdadcValues[4 * j + 2], + 4 * j + 3, + pdadcValues[4 * j + 3]); + + regOffset += 4; + } + } + } + } + + *pTxPowerIndexOffset = 0; +} + +static void ath9k_hw_set_AR9287_power_per_rate_table(struct ath_hw *ah, + struct ath9k_channel *chan, int16_t *ratesArray, u16 cfgCtl, + u16 AntennaReduction, u16 twiceMaxRegulatoryPower, + u16 powerLimit) +{ +#define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6 +#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 10 + struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); + u16 twiceMaxEdgePower = AR5416_MAX_RATE_POWER; + static const u16 tpScaleReductionTable[5] = + { 0, 3, 6, 9, AR5416_MAX_RATE_POWER }; + int i; + int16_t twiceLargestAntenna; + struct cal_ctl_data_ar9287 *rep; + struct cal_target_power_leg targetPowerOfdm = {0, {0, 0, 0, 0} }, + targetPowerCck = {0, {0, 0, 0, 0} }; + struct cal_target_power_leg targetPowerOfdmExt = {0, {0, 0, 0, 0} }, + targetPowerCckExt = {0, {0, 0, 0, 0} }; + struct cal_target_power_ht targetPowerHt20, + targetPowerHt40 = {0, {0, 0, 0, 0} }; + u16 scaledPower = 0, minCtlPower, maxRegAllowedPower; + u16 ctlModesFor11g[] = + {CTL_11B, CTL_11G, CTL_2GHT20, + CTL_11B_EXT, CTL_11G_EXT, CTL_2GHT40}; + u16 numCtlModes = 0, *pCtlMode = NULL, ctlMode, freq; + struct chan_centers centers; + int tx_chainmask; + u16 twiceMinEdgePower; + struct ar9287_eeprom *pEepData = &ah->eeprom.map9287; + tx_chainmask = ah->txchainmask; + + ath9k_hw_get_channel_centers(ah, chan, ¢ers); + + twiceLargestAntenna = max(pEepData->modalHeader.antennaGainCh[0], + pEepData->modalHeader.antennaGainCh[1]); + + twiceLargestAntenna = (int16_t)min((AntennaReduction) - + twiceLargestAntenna, 0); + + maxRegAllowedPower = twiceMaxRegulatoryPower + twiceLargestAntenna; + if (regulatory->tp_scale != ATH9K_TP_SCALE_MAX) + maxRegAllowedPower -= + (tpScaleReductionTable[(regulatory->tp_scale)] * 2); + + scaledPower = min(powerLimit, maxRegAllowedPower); + + switch (ar5416_get_ntxchains(tx_chainmask)) { + case 1: + break; + case 2: + scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN; + break; + case 3: + scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN; + break; + } + scaledPower = max((u16)0, scaledPower); + + if (IS_CHAN_2GHZ(chan)) { + numCtlModes = + ARRAY_SIZE(ctlModesFor11g) - SUB_NUM_CTL_MODES_AT_2G_40; + pCtlMode = ctlModesFor11g; + + ath9k_hw_get_legacy_target_powers(ah, chan, + pEepData->calTargetPowerCck, + AR9287_NUM_2G_CCK_TARGET_POWERS, + &targetPowerCck, 4, false); + ath9k_hw_get_legacy_target_powers(ah, chan, + pEepData->calTargetPower2G, + AR9287_NUM_2G_20_TARGET_POWERS, + &targetPowerOfdm, 4, false); + ath9k_hw_get_target_powers(ah, chan, + pEepData->calTargetPower2GHT20, + AR9287_NUM_2G_20_TARGET_POWERS, + &targetPowerHt20, 8, false); + + if (IS_CHAN_HT40(chan)) { + numCtlModes = ARRAY_SIZE(ctlModesFor11g); + ath9k_hw_get_target_powers(ah, chan, + pEepData->calTargetPower2GHT40, + AR9287_NUM_2G_40_TARGET_POWERS, + &targetPowerHt40, 8, true); + ath9k_hw_get_legacy_target_powers(ah, chan, + pEepData->calTargetPowerCck, + AR9287_NUM_2G_CCK_TARGET_POWERS, + &targetPowerCckExt, 4, true); + ath9k_hw_get_legacy_target_powers(ah, chan, + pEepData->calTargetPower2G, + AR9287_NUM_2G_20_TARGET_POWERS, + &targetPowerOfdmExt, 4, true); + } + } + + for (ctlMode = 0; ctlMode < numCtlModes; ctlMode++) { + bool isHt40CtlMode = (pCtlMode[ctlMode] == CTL_5GHT40) || + (pCtlMode[ctlMode] == CTL_2GHT40); + if (isHt40CtlMode) + freq = centers.synth_center; + else if (pCtlMode[ctlMode] & EXT_ADDITIVE) + freq = centers.ext_center; + else + freq = centers.ctl_center; + + if (ah->eep_ops->get_eeprom_ver(ah) == 14 && + ah->eep_ops->get_eeprom_rev(ah) <= 2) + twiceMaxEdgePower = AR5416_MAX_RATE_POWER; + + for (i = 0; (i < AR9287_NUM_CTLS) && pEepData->ctlIndex[i]; i++) { + if ((((cfgCtl & ~CTL_MODE_M) | + (pCtlMode[ctlMode] & CTL_MODE_M)) == + pEepData->ctlIndex[i]) || + (((cfgCtl & ~CTL_MODE_M) | + (pCtlMode[ctlMode] & CTL_MODE_M)) == + ((pEepData->ctlIndex[i] & + CTL_MODE_M) | SD_NO_CTL))) { + + rep = &(pEepData->ctlData[i]); + twiceMinEdgePower = ath9k_hw_get_max_edge_power( + freq, + rep->ctlEdges[ar5416_get_ntxchains( + tx_chainmask) - 1], + IS_CHAN_2GHZ(chan), AR5416_NUM_BAND_EDGES); + + if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) + twiceMaxEdgePower = min( + twiceMaxEdgePower, + twiceMinEdgePower); + else { + twiceMaxEdgePower = twiceMinEdgePower; + break; + } + } + } + + minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower); + + switch (pCtlMode[ctlMode]) { + case CTL_11B: + for (i = 0; + i < ARRAY_SIZE(targetPowerCck.tPow2x); + i++) { + targetPowerCck.tPow2x[i] = (u8)min( + (u16)targetPowerCck.tPow2x[i], + minCtlPower); + } + break; + case CTL_11A: + case CTL_11G: + for (i = 0; + i < ARRAY_SIZE(targetPowerOfdm.tPow2x); + i++) { + targetPowerOfdm.tPow2x[i] = (u8)min( + (u16)targetPowerOfdm.tPow2x[i], + minCtlPower); + } + break; + case CTL_5GHT20: + case CTL_2GHT20: + for (i = 0; + i < ARRAY_SIZE(targetPowerHt20.tPow2x); + i++) { + targetPowerHt20.tPow2x[i] = (u8)min( + (u16)targetPowerHt20.tPow2x[i], + minCtlPower); + } + break; + case CTL_11B_EXT: + targetPowerCckExt.tPow2x[0] = (u8)min( + (u16)targetPowerCckExt.tPow2x[0], + minCtlPower); + break; + case CTL_11A_EXT: + case CTL_11G_EXT: + targetPowerOfdmExt.tPow2x[0] = (u8)min( + (u16)targetPowerOfdmExt.tPow2x[0], + minCtlPower); + break; + case CTL_5GHT40: + case CTL_2GHT40: + for (i = 0; + i < ARRAY_SIZE(targetPowerHt40.tPow2x); + i++) { + targetPowerHt40.tPow2x[i] = (u8)min( + (u16)targetPowerHt40.tPow2x[i], + minCtlPower); + } + break; + default: + break; + } + } + + ratesArray[rate6mb] = + ratesArray[rate9mb] = + ratesArray[rate12mb] = + ratesArray[rate18mb] = + ratesArray[rate24mb] = + targetPowerOfdm.tPow2x[0]; + + ratesArray[rate36mb] = targetPowerOfdm.tPow2x[1]; + ratesArray[rate48mb] = targetPowerOfdm.tPow2x[2]; + ratesArray[rate54mb] = targetPowerOfdm.tPow2x[3]; + ratesArray[rateXr] = targetPowerOfdm.tPow2x[0]; + + for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x); i++) + ratesArray[rateHt20_0 + i] = targetPowerHt20.tPow2x[i]; + + if (IS_CHAN_2GHZ(chan)) { + ratesArray[rate1l] = targetPowerCck.tPow2x[0]; + ratesArray[rate2s] = ratesArray[rate2l] = + targetPowerCck.tPow2x[1]; + ratesArray[rate5_5s] = ratesArray[rate5_5l] = + targetPowerCck.tPow2x[2]; + ratesArray[rate11s] = ratesArray[rate11l] = + targetPowerCck.tPow2x[3]; + } + if (IS_CHAN_HT40(chan)) { + for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++) + ratesArray[rateHt40_0 + i] = targetPowerHt40.tPow2x[i]; + + ratesArray[rateDupOfdm] = targetPowerHt40.tPow2x[0]; + ratesArray[rateDupCck] = targetPowerHt40.tPow2x[0]; + ratesArray[rateExtOfdm] = targetPowerOfdmExt.tPow2x[0]; + if (IS_CHAN_2GHZ(chan)) + ratesArray[rateExtCck] = targetPowerCckExt.tPow2x[0]; + } + +#undef REDUCE_SCALED_POWER_BY_TWO_CHAIN +#undef REDUCE_SCALED_POWER_BY_THREE_CHAIN +} + +static void ath9k_hw_AR9287_set_txpower(struct ath_hw *ah, + struct ath9k_channel *chan, u16 cfgCtl, + u8 twiceAntennaReduction, + u8 twiceMaxRegulatoryPower, + u8 powerLimit) +{ +#define INCREASE_MAXPOW_BY_TWO_CHAIN 6 +#define INCREASE_MAXPOW_BY_THREE_CHAIN 10 + struct ath_common *common = ath9k_hw_common(ah); + struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); + struct ar9287_eeprom *pEepData = &ah->eeprom.map9287; + struct modal_eep_ar9287_header *pModal = &pEepData->modalHeader; + int16_t ratesArray[Ar5416RateSize]; + int16_t txPowerIndexOffset = 0; + u8 ht40PowerIncForPdadc = 2; + int i; + + memset(ratesArray, 0, sizeof(ratesArray)); + + if ((pEepData->baseEepHeader.version & AR9287_EEP_VER_MINOR_MASK) >= + AR9287_EEP_MINOR_VER_2) + ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc; + + ath9k_hw_set_AR9287_power_per_rate_table(ah, chan, + &ratesArray[0], cfgCtl, + twiceAntennaReduction, + twiceMaxRegulatoryPower, + powerLimit); + + ath9k_hw_set_AR9287_power_cal_table(ah, chan, &txPowerIndexOffset); + + for (i = 0; i < ARRAY_SIZE(ratesArray); i++) { + ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]); + if (ratesArray[i] > AR9287_MAX_RATE_POWER) + ratesArray[i] = AR9287_MAX_RATE_POWER; + } + + if (AR_SREV_9280_10_OR_LATER(ah)) { + for (i = 0; i < Ar5416RateSize; i++) + ratesArray[i] -= AR9287_PWR_TABLE_OFFSET_DB * 2; + } + + REG_WRITE(ah, AR_PHY_POWER_TX_RATE1, + ATH9K_POW_SM(ratesArray[rate18mb], 24) + | ATH9K_POW_SM(ratesArray[rate12mb], 16) + | ATH9K_POW_SM(ratesArray[rate9mb], 8) + | ATH9K_POW_SM(ratesArray[rate6mb], 0)); + + REG_WRITE(ah, AR_PHY_POWER_TX_RATE2, + ATH9K_POW_SM(ratesArray[rate54mb], 24) + | ATH9K_POW_SM(ratesArray[rate48mb], 16) + | ATH9K_POW_SM(ratesArray[rate36mb], 8) + | ATH9K_POW_SM(ratesArray[rate24mb], 0)); + + if (IS_CHAN_2GHZ(chan)) { + REG_WRITE(ah, AR_PHY_POWER_TX_RATE3, + ATH9K_POW_SM(ratesArray[rate2s], 24) + | ATH9K_POW_SM(ratesArray[rate2l], 16) + | ATH9K_POW_SM(ratesArray[rateXr], 8) + | ATH9K_POW_SM(ratesArray[rate1l], 0)); + REG_WRITE(ah, AR_PHY_POWER_TX_RATE4, + ATH9K_POW_SM(ratesArray[rate11s], 24) + | ATH9K_POW_SM(ratesArray[rate11l], 16) + | ATH9K_POW_SM(ratesArray[rate5_5s], 8) + | ATH9K_POW_SM(ratesArray[rate5_5l], 0)); + } + + REG_WRITE(ah, AR_PHY_POWER_TX_RATE5, + ATH9K_POW_SM(ratesArray[rateHt20_3], 24) + | ATH9K_POW_SM(ratesArray[rateHt20_2], 16) + | ATH9K_POW_SM(ratesArray[rateHt20_1], 8) + | ATH9K_POW_SM(ratesArray[rateHt20_0], 0)); + + REG_WRITE(ah, AR_PHY_POWER_TX_RATE6, + ATH9K_POW_SM(ratesArray[rateHt20_7], 24) + | ATH9K_POW_SM(ratesArray[rateHt20_6], 16) + | ATH9K_POW_SM(ratesArray[rateHt20_5], 8) + | ATH9K_POW_SM(ratesArray[rateHt20_4], 0)); + + if (IS_CHAN_HT40(chan)) { + if (ath9k_hw_AR9287_get_eeprom(ah, EEP_OL_PWRCTRL)) { + REG_WRITE(ah, AR_PHY_POWER_TX_RATE7, + ATH9K_POW_SM(ratesArray[rateHt40_3], 24) + | ATH9K_POW_SM(ratesArray[rateHt40_2], 16) + | ATH9K_POW_SM(ratesArray[rateHt40_1], 8) + | ATH9K_POW_SM(ratesArray[rateHt40_0], 0)); + + REG_WRITE(ah, AR_PHY_POWER_TX_RATE8, + ATH9K_POW_SM(ratesArray[rateHt40_7], 24) + | ATH9K_POW_SM(ratesArray[rateHt40_6], 16) + | ATH9K_POW_SM(ratesArray[rateHt40_5], 8) + | ATH9K_POW_SM(ratesArray[rateHt40_4], 0)); + } else { + REG_WRITE(ah, AR_PHY_POWER_TX_RATE7, + ATH9K_POW_SM(ratesArray[rateHt40_3] + + ht40PowerIncForPdadc, 24) + | ATH9K_POW_SM(ratesArray[rateHt40_2] + + ht40PowerIncForPdadc, 16) + | ATH9K_POW_SM(ratesArray[rateHt40_1] + + ht40PowerIncForPdadc, 8) + | ATH9K_POW_SM(ratesArray[rateHt40_0] + + ht40PowerIncForPdadc, 0)); + + REG_WRITE(ah, AR_PHY_POWER_TX_RATE8, + ATH9K_POW_SM(ratesArray[rateHt40_7] + + ht40PowerIncForPdadc, 24) + | ATH9K_POW_SM(ratesArray[rateHt40_6] + + ht40PowerIncForPdadc, 16) + | ATH9K_POW_SM(ratesArray[rateHt40_5] + + ht40PowerIncForPdadc, 8) + | ATH9K_POW_SM(ratesArray[rateHt40_4] + + ht40PowerIncForPdadc, 0)); + } + + REG_WRITE(ah, AR_PHY_POWER_TX_RATE9, + ATH9K_POW_SM(ratesArray[rateExtOfdm], 24) + | ATH9K_POW_SM(ratesArray[rateExtCck], 16) + | ATH9K_POW_SM(ratesArray[rateDupOfdm], 8) + | ATH9K_POW_SM(ratesArray[rateDupCck], 0)); + } + + if (IS_CHAN_2GHZ(chan)) + i = rate1l; + else + i = rate6mb; + + if (AR_SREV_9280_10_OR_LATER(ah)) + regulatory->max_power_level = + ratesArray[i] + AR9287_PWR_TABLE_OFFSET_DB * 2; + else + regulatory->max_power_level = ratesArray[i]; + + switch (ar5416_get_ntxchains(ah->txchainmask)) { + case 1: + break; + case 2: + regulatory->max_power_level += + INCREASE_MAXPOW_BY_TWO_CHAIN; + break; + case 3: + regulatory->max_power_level += + INCREASE_MAXPOW_BY_THREE_CHAIN; + break; + default: + ath_print(common, ATH_DBG_EEPROM, + "Invalid chainmask configuration\n"); + break; + } +} + +static void ath9k_hw_AR9287_set_addac(struct ath_hw *ah, + struct ath9k_channel *chan) +{ +} + +static void ath9k_hw_AR9287_set_board_values(struct ath_hw *ah, + struct ath9k_channel *chan) +{ + struct ar9287_eeprom *eep = &ah->eeprom.map9287; + struct modal_eep_ar9287_header *pModal = &eep->modalHeader; + u16 antWrites[AR9287_ANT_16S]; + u32 regChainOffset; + u8 txRxAttenLocal; + int i, j, offset_num; + + pModal = &eep->modalHeader; + + antWrites[0] = (u16)((pModal->antCtrlCommon >> 28) & 0xF); + antWrites[1] = (u16)((pModal->antCtrlCommon >> 24) & 0xF); + antWrites[2] = (u16)((pModal->antCtrlCommon >> 20) & 0xF); + antWrites[3] = (u16)((pModal->antCtrlCommon >> 16) & 0xF); + antWrites[4] = (u16)((pModal->antCtrlCommon >> 12) & 0xF); + antWrites[5] = (u16)((pModal->antCtrlCommon >> 8) & 0xF); + antWrites[6] = (u16)((pModal->antCtrlCommon >> 4) & 0xF); + antWrites[7] = (u16)(pModal->antCtrlCommon & 0xF); + + offset_num = 8; + + for (i = 0, j = offset_num; i < AR9287_MAX_CHAINS; i++) { + antWrites[j++] = (u16)((pModal->antCtrlChain[i] >> 28) & 0xf); + antWrites[j++] = (u16)((pModal->antCtrlChain[i] >> 10) & 0x3); + antWrites[j++] = (u16)((pModal->antCtrlChain[i] >> 8) & 0x3); + antWrites[j++] = 0; + antWrites[j++] = (u16)((pModal->antCtrlChain[i] >> 6) & 0x3); + antWrites[j++] = (u16)((pModal->antCtrlChain[i] >> 4) & 0x3); + antWrites[j++] = (u16)((pModal->antCtrlChain[i] >> 2) & 0x3); + antWrites[j++] = (u16)(pModal->antCtrlChain[i] & 0x3); + } + + REG_WRITE(ah, AR_PHY_SWITCH_COM, + ah->eep_ops->get_eeprom_antenna_cfg(ah, chan)); + + for (i = 0; i < AR9287_MAX_CHAINS; i++) { + regChainOffset = i * 0x1000; + + REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0 + regChainOffset, + pModal->antCtrlChain[i]); + + REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset, + (REG_READ(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset) + & ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF | + AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) | + SM(pModal->iqCalICh[i], + AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) | + SM(pModal->iqCalQCh[i], + AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF)); + + txRxAttenLocal = pModal->txRxAttenCh[i]; + + REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset, + AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN, + pModal->bswMargin[i]); + REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset, + AR_PHY_GAIN_2GHZ_XATTEN1_DB, + pModal->bswAtten[i]); + REG_RMW_FIELD(ah, AR_PHY_RXGAIN + regChainOffset, + AR9280_PHY_RXGAIN_TXRX_ATTEN, + txRxAttenLocal); + REG_RMW_FIELD(ah, AR_PHY_RXGAIN + regChainOffset, + AR9280_PHY_RXGAIN_TXRX_MARGIN, + pModal->rxTxMarginCh[i]); + } + + + if (IS_CHAN_HT40(chan)) + REG_RMW_FIELD(ah, AR_PHY_SETTLING, + AR_PHY_SETTLING_SWITCH, pModal->swSettleHt40); + else + REG_RMW_FIELD(ah, AR_PHY_SETTLING, + AR_PHY_SETTLING_SWITCH, pModal->switchSettling); + + REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, + AR_PHY_DESIRED_SZ_ADC, pModal->adcDesiredSize); + + REG_WRITE(ah, AR_PHY_RF_CTL4, + SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAA_OFF) + | SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAB_OFF) + | SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAA_ON) + | SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAB_ON)); + + REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, + AR_PHY_TX_END_TO_A2_RX_ON, pModal->txEndToRxOn); + + REG_RMW_FIELD(ah, AR_PHY_CCA, + AR9280_PHY_CCA_THRESH62, pModal->thresh62); + REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0, + AR_PHY_EXT_CCA0_THRESH62, pModal->thresh62); + + ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH0, AR9287_AN_RF2G3_DB1, + AR9287_AN_RF2G3_DB1_S, pModal->db1); + ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH0, AR9287_AN_RF2G3_DB2, + AR9287_AN_RF2G3_DB2_S, pModal->db2); + ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH0, + AR9287_AN_RF2G3_OB_CCK, + AR9287_AN_RF2G3_OB_CCK_S, pModal->ob_cck); + ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH0, + AR9287_AN_RF2G3_OB_PSK, + AR9287_AN_RF2G3_OB_PSK_S, pModal->ob_psk); + ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH0, + AR9287_AN_RF2G3_OB_QAM, + AR9287_AN_RF2G3_OB_QAM_S, pModal->ob_qam); + ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH0, + AR9287_AN_RF2G3_OB_PAL_OFF, + AR9287_AN_RF2G3_OB_PAL_OFF_S, + pModal->ob_pal_off); + + ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH1, + AR9287_AN_RF2G3_DB1, AR9287_AN_RF2G3_DB1_S, + pModal->db1); + ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH1, AR9287_AN_RF2G3_DB2, + AR9287_AN_RF2G3_DB2_S, pModal->db2); + ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH1, + AR9287_AN_RF2G3_OB_CCK, + AR9287_AN_RF2G3_OB_CCK_S, pModal->ob_cck); + ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH1, + AR9287_AN_RF2G3_OB_PSK, + AR9287_AN_RF2G3_OB_PSK_S, pModal->ob_psk); + ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH1, + AR9287_AN_RF2G3_OB_QAM, + AR9287_AN_RF2G3_OB_QAM_S, pModal->ob_qam); + ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH1, + AR9287_AN_RF2G3_OB_PAL_OFF, + AR9287_AN_RF2G3_OB_PAL_OFF_S, + pModal->ob_pal_off); + + REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, + AR_PHY_TX_END_DATA_START, pModal->txFrameToDataStart); + REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, + AR_PHY_TX_END_PA_ON, pModal->txFrameToPaOn); + + ath9k_hw_analog_shift_rmw(ah, AR9287_AN_TOP2, + AR9287_AN_TOP2_XPABIAS_LVL, + AR9287_AN_TOP2_XPABIAS_LVL_S, + pModal->xpaBiasLvl); +} + +static u8 ath9k_hw_AR9287_get_num_ant_config(struct ath_hw *ah, + enum ieee80211_band freq_band) +{ + return 1; +} + +static u16 ath9k_hw_AR9287_get_eeprom_antenna_cfg(struct ath_hw *ah, + struct ath9k_channel *chan) +{ + struct ar9287_eeprom *eep = &ah->eeprom.map9287; + struct modal_eep_ar9287_header *pModal = &eep->modalHeader; + + return pModal->antCtrlCommon & 0xFFFF; +} + +static u16 ath9k_hw_AR9287_get_spur_channel(struct ath_hw *ah, + u16 i, bool is2GHz) +{ +#define EEP_MAP9287_SPURCHAN \ + (ah->eeprom.map9287.modalHeader.spurChans[i].spurChan) + struct ath_common *common = ath9k_hw_common(ah); + u16 spur_val = AR_NO_SPUR; + + ath_print(common, ATH_DBG_ANI, + "Getting spur idx %d is2Ghz. %d val %x\n", + i, is2GHz, ah->config.spurchans[i][is2GHz]); + + switch (ah->config.spurmode) { + case SPUR_DISABLE: + break; + case SPUR_ENABLE_IOCTL: + spur_val = ah->config.spurchans[i][is2GHz]; + ath_print(common, ATH_DBG_ANI, + "Getting spur val from new loc. %d\n", spur_val); + break; + case SPUR_ENABLE_EEPROM: + spur_val = EEP_MAP9287_SPURCHAN; + break; + } + + return spur_val; + +#undef EEP_MAP9287_SPURCHAN +} + +const struct eeprom_ops eep_AR9287_ops = { + .check_eeprom = ath9k_hw_AR9287_check_eeprom, + .get_eeprom = ath9k_hw_AR9287_get_eeprom, + .fill_eeprom = ath9k_hw_AR9287_fill_eeprom, + .get_eeprom_ver = ath9k_hw_AR9287_get_eeprom_ver, + .get_eeprom_rev = ath9k_hw_AR9287_get_eeprom_rev, + .get_num_ant_config = ath9k_hw_AR9287_get_num_ant_config, + .get_eeprom_antenna_cfg = ath9k_hw_AR9287_get_eeprom_antenna_cfg, + .set_board_values = ath9k_hw_AR9287_set_board_values, + .set_addac = ath9k_hw_AR9287_set_addac, + .set_txpower = ath9k_hw_AR9287_set_txpower, + .get_spur_channel = ath9k_hw_AR9287_get_spur_channel +}; diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c new file mode 100644 index 0000000..404a034 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c @@ -0,0 +1,1486 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "hw.h" + +static void ath9k_get_txgain_index(struct ath_hw *ah, + struct ath9k_channel *chan, + struct calDataPerFreqOpLoop *rawDatasetOpLoop, + u8 *calChans, u16 availPiers, u8 *pwr, u8 *pcdacIdx) +{ + u8 pcdac, i = 0; + u16 idxL = 0, idxR = 0, numPiers; + bool match; + struct chan_centers centers; + + ath9k_hw_get_channel_centers(ah, chan, ¢ers); + + for (numPiers = 0; numPiers < availPiers; numPiers++) + if (calChans[numPiers] == AR5416_BCHAN_UNUSED) + break; + + match = ath9k_hw_get_lower_upper_index( + (u8)FREQ2FBIN(centers.synth_center, IS_CHAN_2GHZ(chan)), + calChans, numPiers, &idxL, &idxR); + if (match) { + pcdac = rawDatasetOpLoop[idxL].pcdac[0][0]; + *pwr = rawDatasetOpLoop[idxL].pwrPdg[0][0]; + } else { + pcdac = rawDatasetOpLoop[idxR].pcdac[0][0]; + *pwr = (rawDatasetOpLoop[idxL].pwrPdg[0][0] + + rawDatasetOpLoop[idxR].pwrPdg[0][0])/2; + } + + while (pcdac > ah->originalGain[i] && + i < (AR9280_TX_GAIN_TABLE_SIZE - 1)) + i++; + + *pcdacIdx = i; + return; +} + +static void ath9k_olc_get_pdadcs(struct ath_hw *ah, + u32 initTxGain, + int txPower, + u8 *pPDADCValues) +{ + u32 i; + u32 offset; + + REG_RMW_FIELD(ah, AR_PHY_TX_PWRCTRL6_0, + AR_PHY_TX_PWRCTRL_ERR_EST_MODE, 3); + REG_RMW_FIELD(ah, AR_PHY_TX_PWRCTRL6_1, + AR_PHY_TX_PWRCTRL_ERR_EST_MODE, 3); + + REG_RMW_FIELD(ah, AR_PHY_TX_PWRCTRL7, + AR_PHY_TX_PWRCTRL_INIT_TX_GAIN, initTxGain); + + offset = txPower; + for (i = 0; i < AR5416_NUM_PDADC_VALUES; i++) + if (i < offset) + pPDADCValues[i] = 0x0; + else + pPDADCValues[i] = 0xFF; +} + +static int ath9k_hw_def_get_eeprom_ver(struct ath_hw *ah) +{ + return ((ah->eeprom.def.baseEepHeader.version >> 12) & 0xF); +} + +static int ath9k_hw_def_get_eeprom_rev(struct ath_hw *ah) +{ + return ((ah->eeprom.def.baseEepHeader.version) & 0xFFF); +} + +static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah) +{ +#define SIZE_EEPROM_DEF (sizeof(struct ar5416_eeprom_def) / sizeof(u16)) + struct ath_common *common = ath9k_hw_common(ah); + u16 *eep_data = (u16 *)&ah->eeprom.def; + int addr, ar5416_eep_start_loc = 0x100; + + for (addr = 0; addr < SIZE_EEPROM_DEF; addr++) { + if (!ath9k_hw_nvram_read(common, addr + ar5416_eep_start_loc, + eep_data)) { + ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, + "Unable to read eeprom region\n"); + return false; + } + eep_data++; + } + return true; +#undef SIZE_EEPROM_DEF +} + +static int ath9k_hw_def_check_eeprom(struct ath_hw *ah) +{ + struct ar5416_eeprom_def *eep = + (struct ar5416_eeprom_def *) &ah->eeprom.def; + struct ath_common *common = ath9k_hw_common(ah); + u16 *eepdata, temp, magic, magic2; + u32 sum = 0, el; + bool need_swap = false; + int i, addr, size; + + if (!ath9k_hw_nvram_read(common, AR5416_EEPROM_MAGIC_OFFSET, &magic)) { + ath_print(common, ATH_DBG_FATAL, "Reading Magic # failed\n"); + return false; + } + + if (!ath9k_hw_use_flash(ah)) { + ath_print(common, ATH_DBG_EEPROM, + "Read Magic = 0x%04X\n", magic); + + if (magic != AR5416_EEPROM_MAGIC) { + magic2 = swab16(magic); + + if (magic2 == AR5416_EEPROM_MAGIC) { + size = sizeof(struct ar5416_eeprom_def); + need_swap = true; + eepdata = (u16 *) (&ah->eeprom); + + for (addr = 0; addr < size / sizeof(u16); addr++) { + temp = swab16(*eepdata); + *eepdata = temp; + eepdata++; + } + } else { + ath_print(common, ATH_DBG_FATAL, + "Invalid EEPROM Magic. " + "Endianness mismatch.\n"); + return -EINVAL; + } + } + } + + ath_print(common, ATH_DBG_EEPROM, "need_swap = %s.\n", + need_swap ? "True" : "False"); + + if (need_swap) + el = swab16(ah->eeprom.def.baseEepHeader.length); + else + el = ah->eeprom.def.baseEepHeader.length; + + if (el > sizeof(struct ar5416_eeprom_def)) + el = sizeof(struct ar5416_eeprom_def) / sizeof(u16); + else + el = el / sizeof(u16); + + eepdata = (u16 *)(&ah->eeprom); + + for (i = 0; i < el; i++) + sum ^= *eepdata++; + + if (need_swap) { + u32 integer, j; + u16 word; + + ath_print(common, ATH_DBG_EEPROM, + "EEPROM Endianness is not native.. Changing.\n"); + + word = swab16(eep->baseEepHeader.length); + eep->baseEepHeader.length = word; + + word = swab16(eep->baseEepHeader.checksum); + eep->baseEepHeader.checksum = word; + + word = swab16(eep->baseEepHeader.version); + eep->baseEepHeader.version = word; + + word = swab16(eep->baseEepHeader.regDmn[0]); + eep->baseEepHeader.regDmn[0] = word; + + word = swab16(eep->baseEepHeader.regDmn[1]); + eep->baseEepHeader.regDmn[1] = word; + + word = swab16(eep->baseEepHeader.rfSilent); + eep->baseEepHeader.rfSilent = word; + + word = swab16(eep->baseEepHeader.blueToothOptions); + eep->baseEepHeader.blueToothOptions = word; + + word = swab16(eep->baseEepHeader.deviceCap); + eep->baseEepHeader.deviceCap = word; + + for (j = 0; j < ARRAY_SIZE(eep->modalHeader); j++) { + struct modal_eep_header *pModal = + &eep->modalHeader[j]; + integer = swab32(pModal->antCtrlCommon); + pModal->antCtrlCommon = integer; + + for (i = 0; i < AR5416_MAX_CHAINS; i++) { + integer = swab32(pModal->antCtrlChain[i]); + pModal->antCtrlChain[i] = integer; + } + + for (i = 0; i < AR5416_EEPROM_MODAL_SPURS; i++) { + word = swab16(pModal->spurChans[i].spurChan); + pModal->spurChans[i].spurChan = word; + } + } + } + + if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER || + ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) { + ath_print(common, ATH_DBG_FATAL, + "Bad EEPROM checksum 0x%x or revision 0x%04x\n", + sum, ah->eep_ops->get_eeprom_ver(ah)); + return -EINVAL; + } + + return 0; +} + +static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah, + enum eeprom_param param) +{ + struct ar5416_eeprom_def *eep = &ah->eeprom.def; + struct modal_eep_header *pModal = eep->modalHeader; + struct base_eep_header *pBase = &eep->baseEepHeader; + + switch (param) { + case EEP_NFTHRESH_5: + return pModal[0].noiseFloorThreshCh[0]; + case EEP_NFTHRESH_2: + return pModal[1].noiseFloorThreshCh[0]; + case AR_EEPROM_MAC(0): + return pBase->macAddr[0] << 8 | pBase->macAddr[1]; + case AR_EEPROM_MAC(1): + return pBase->macAddr[2] << 8 | pBase->macAddr[3]; + case AR_EEPROM_MAC(2): + return pBase->macAddr[4] << 8 | pBase->macAddr[5]; + case EEP_REG_0: + return pBase->regDmn[0]; + case EEP_REG_1: + return pBase->regDmn[1]; + case EEP_OP_CAP: + return pBase->deviceCap; + case EEP_OP_MODE: + return pBase->opCapFlags; + case EEP_RF_SILENT: + return pBase->rfSilent; + case EEP_OB_5: + return pModal[0].ob; + case EEP_DB_5: + return pModal[0].db; + case EEP_OB_2: + return pModal[1].ob; + case EEP_DB_2: + return pModal[1].db; + case EEP_MINOR_REV: + return AR5416_VER_MASK; + case EEP_TX_MASK: + return pBase->txMask; + case EEP_RX_MASK: + return pBase->rxMask; + case EEP_RXGAIN_TYPE: + return pBase->rxGainType; + case EEP_TXGAIN_TYPE: + return pBase->txGainType; + case EEP_OL_PWRCTRL: + if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_19) + return pBase->openLoopPwrCntl ? true : false; + else + return false; + case EEP_RC_CHAIN_MASK: + if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_19) + return pBase->rcChainMask; + else + return 0; + case EEP_DAC_HPWR_5G: + if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_20) + return pBase->dacHiPwrMode_5G; + else + return 0; + case EEP_FRAC_N_5G: + if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_22) + return pBase->frac_n_5g; + else + return 0; + case EEP_PWR_TABLE_OFFSET: + if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_21) + return pBase->pwr_table_offset; + else + return AR5416_PWR_TABLE_OFFSET_DB; + default: + return 0; + } +} + +static void ath9k_hw_def_set_gain(struct ath_hw *ah, + struct modal_eep_header *pModal, + struct ar5416_eeprom_def *eep, + u8 txRxAttenLocal, int regChainOffset, int i) +{ + if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_3) { + txRxAttenLocal = pModal->txRxAttenCh[i]; + + if (AR_SREV_9280_10_OR_LATER(ah)) { + REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset, + AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN, + pModal->bswMargin[i]); + REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset, + AR_PHY_GAIN_2GHZ_XATTEN1_DB, + pModal->bswAtten[i]); + REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset, + AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN, + pModal->xatten2Margin[i]); + REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset, + AR_PHY_GAIN_2GHZ_XATTEN2_DB, + pModal->xatten2Db[i]); + } else { + REG_WRITE(ah, AR_PHY_GAIN_2GHZ + regChainOffset, + (REG_READ(ah, AR_PHY_GAIN_2GHZ + regChainOffset) & + ~AR_PHY_GAIN_2GHZ_BSW_MARGIN) + | SM(pModal-> bswMargin[i], + AR_PHY_GAIN_2GHZ_BSW_MARGIN)); + REG_WRITE(ah, AR_PHY_GAIN_2GHZ + regChainOffset, + (REG_READ(ah, AR_PHY_GAIN_2GHZ + regChainOffset) & + ~AR_PHY_GAIN_2GHZ_BSW_ATTEN) + | SM(pModal->bswAtten[i], + AR_PHY_GAIN_2GHZ_BSW_ATTEN)); + } + } + + if (AR_SREV_9280_10_OR_LATER(ah)) { + REG_RMW_FIELD(ah, + AR_PHY_RXGAIN + regChainOffset, + AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal); + REG_RMW_FIELD(ah, + AR_PHY_RXGAIN + regChainOffset, + AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[i]); + } else { + REG_WRITE(ah, + AR_PHY_RXGAIN + regChainOffset, + (REG_READ(ah, AR_PHY_RXGAIN + regChainOffset) & + ~AR_PHY_RXGAIN_TXRX_ATTEN) + | SM(txRxAttenLocal, AR_PHY_RXGAIN_TXRX_ATTEN)); + REG_WRITE(ah, + AR_PHY_GAIN_2GHZ + regChainOffset, + (REG_READ(ah, AR_PHY_GAIN_2GHZ + regChainOffset) & + ~AR_PHY_GAIN_2GHZ_RXTX_MARGIN) | + SM(pModal->rxTxMarginCh[i], AR_PHY_GAIN_2GHZ_RXTX_MARGIN)); + } +} + +static void ath9k_hw_def_set_board_values(struct ath_hw *ah, + struct ath9k_channel *chan) +{ + struct modal_eep_header *pModal; + struct ar5416_eeprom_def *eep = &ah->eeprom.def; + int i, regChainOffset; + u8 txRxAttenLocal; + + pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]); + txRxAttenLocal = IS_CHAN_2GHZ(chan) ? 23 : 44; + + REG_WRITE(ah, AR_PHY_SWITCH_COM, + ah->eep_ops->get_eeprom_antenna_cfg(ah, chan)); + + for (i = 0; i < AR5416_MAX_CHAINS; i++) { + if (AR_SREV_9280(ah)) { + if (i >= 2) + break; + } + + if (AR_SREV_5416_20_OR_LATER(ah) && + (ah->rxchainmask == 5 || ah->txchainmask == 5) && (i != 0)) + regChainOffset = (i == 1) ? 0x2000 : 0x1000; + else + regChainOffset = i * 0x1000; + + REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0 + regChainOffset, + pModal->antCtrlChain[i]); + + REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset, + (REG_READ(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset) & + ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF | + AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) | + SM(pModal->iqCalICh[i], + AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) | + SM(pModal->iqCalQCh[i], + AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF)); + + if ((i == 0) || AR_SREV_5416_20_OR_LATER(ah)) + ath9k_hw_def_set_gain(ah, pModal, eep, txRxAttenLocal, + regChainOffset, i); + } + + if (AR_SREV_9280_10_OR_LATER(ah)) { + if (IS_CHAN_2GHZ(chan)) { + ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH0, + AR_AN_RF2G1_CH0_OB, + AR_AN_RF2G1_CH0_OB_S, + pModal->ob); + ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH0, + AR_AN_RF2G1_CH0_DB, + AR_AN_RF2G1_CH0_DB_S, + pModal->db); + ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH1, + AR_AN_RF2G1_CH1_OB, + AR_AN_RF2G1_CH1_OB_S, + pModal->ob_ch1); + ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH1, + AR_AN_RF2G1_CH1_DB, + AR_AN_RF2G1_CH1_DB_S, + pModal->db_ch1); + } else { + ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH0, + AR_AN_RF5G1_CH0_OB5, + AR_AN_RF5G1_CH0_OB5_S, + pModal->ob); + ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH0, + AR_AN_RF5G1_CH0_DB5, + AR_AN_RF5G1_CH0_DB5_S, + pModal->db); + ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH1, + AR_AN_RF5G1_CH1_OB5, + AR_AN_RF5G1_CH1_OB5_S, + pModal->ob_ch1); + ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH1, + AR_AN_RF5G1_CH1_DB5, + AR_AN_RF5G1_CH1_DB5_S, + pModal->db_ch1); + } + ath9k_hw_analog_shift_rmw(ah, AR_AN_TOP2, + AR_AN_TOP2_XPABIAS_LVL, + AR_AN_TOP2_XPABIAS_LVL_S, + pModal->xpaBiasLvl); + ath9k_hw_analog_shift_rmw(ah, AR_AN_TOP2, + AR_AN_TOP2_LOCALBIAS, + AR_AN_TOP2_LOCALBIAS_S, + pModal->local_bias); + REG_RMW_FIELD(ah, AR_PHY_XPA_CFG, AR_PHY_FORCE_XPA_CFG, + pModal->force_xpaon); + } + + REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH, + pModal->switchSettling); + REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, AR_PHY_DESIRED_SZ_ADC, + pModal->adcDesiredSize); + + if (!AR_SREV_9280_10_OR_LATER(ah)) + REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, + AR_PHY_DESIRED_SZ_PGA, + pModal->pgaDesiredSize); + + REG_WRITE(ah, AR_PHY_RF_CTL4, + SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAA_OFF) + | SM(pModal->txEndToXpaOff, + AR_PHY_RF_CTL4_TX_END_XPAB_OFF) + | SM(pModal->txFrameToXpaOn, + AR_PHY_RF_CTL4_FRAME_XPAA_ON) + | SM(pModal->txFrameToXpaOn, + AR_PHY_RF_CTL4_FRAME_XPAB_ON)); + + REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON, + pModal->txEndToRxOn); + + if (AR_SREV_9280_10_OR_LATER(ah)) { + REG_RMW_FIELD(ah, AR_PHY_CCA, AR9280_PHY_CCA_THRESH62, + pModal->thresh62); + REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0, + AR_PHY_EXT_CCA0_THRESH62, + pModal->thresh62); + } else { + REG_RMW_FIELD(ah, AR_PHY_CCA, AR_PHY_CCA_THRESH62, + pModal->thresh62); + REG_RMW_FIELD(ah, AR_PHY_EXT_CCA, + AR_PHY_EXT_CCA_THRESH62, + pModal->thresh62); + } + + if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_2) { + REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, + AR_PHY_TX_END_DATA_START, + pModal->txFrameToDataStart); + REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_PA_ON, + pModal->txFrameToPaOn); + } + + if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_3) { + if (IS_CHAN_HT40(chan)) + REG_RMW_FIELD(ah, AR_PHY_SETTLING, + AR_PHY_SETTLING_SWITCH, + pModal->swSettleHt40); + } + + if (AR_SREV_9280_20_OR_LATER(ah) && + AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_19) + REG_RMW_FIELD(ah, AR_PHY_CCK_TX_CTRL, + AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK, + pModal->miscBits); + + + if (AR_SREV_9280_20(ah) && AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_20) { + if (IS_CHAN_2GHZ(chan)) + REG_RMW_FIELD(ah, AR_AN_TOP1, AR_AN_TOP1_DACIPMODE, + eep->baseEepHeader.dacLpMode); + else if (eep->baseEepHeader.dacHiPwrMode_5G) + REG_RMW_FIELD(ah, AR_AN_TOP1, AR_AN_TOP1_DACIPMODE, 0); + else + REG_RMW_FIELD(ah, AR_AN_TOP1, AR_AN_TOP1_DACIPMODE, + eep->baseEepHeader.dacLpMode); + + udelay(100); + + REG_RMW_FIELD(ah, AR_PHY_FRAME_CTL, AR_PHY_FRAME_CTL_TX_CLIP, + pModal->miscBits >> 2); + + REG_RMW_FIELD(ah, AR_PHY_TX_PWRCTRL9, + AR_PHY_TX_DESIRED_SCALE_CCK, + eep->baseEepHeader.desiredScaleCCK); + } +} + +static void ath9k_hw_def_set_addac(struct ath_hw *ah, + struct ath9k_channel *chan) +{ +#define XPA_LVL_FREQ(cnt) (pModal->xpaBiasLvlFreq[cnt]) + struct modal_eep_header *pModal; + struct ar5416_eeprom_def *eep = &ah->eeprom.def; + u8 biaslevel; + + if (ah->hw_version.macVersion != AR_SREV_VERSION_9160) + return; + + if (ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_MINOR_VER_7) + return; + + pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]); + + if (pModal->xpaBiasLvl != 0xff) { + biaslevel = pModal->xpaBiasLvl; + } else { + u16 resetFreqBin, freqBin, freqCount = 0; + struct chan_centers centers; + + ath9k_hw_get_channel_centers(ah, chan, ¢ers); + + resetFreqBin = FREQ2FBIN(centers.synth_center, + IS_CHAN_2GHZ(chan)); + freqBin = XPA_LVL_FREQ(0) & 0xff; + biaslevel = (u8) (XPA_LVL_FREQ(0) >> 14); + + freqCount++; + + while (freqCount < 3) { + if (XPA_LVL_FREQ(freqCount) == 0x0) + break; + + freqBin = XPA_LVL_FREQ(freqCount) & 0xff; + if (resetFreqBin >= freqBin) + biaslevel = (u8)(XPA_LVL_FREQ(freqCount) >> 14); + else + break; + freqCount++; + } + } + + if (IS_CHAN_2GHZ(chan)) { + INI_RA(&ah->iniAddac, 7, 1) = (INI_RA(&ah->iniAddac, + 7, 1) & (~0x18)) | biaslevel << 3; + } else { + INI_RA(&ah->iniAddac, 6, 1) = (INI_RA(&ah->iniAddac, + 6, 1) & (~0xc0)) | biaslevel << 6; + } +#undef XPA_LVL_FREQ +} + +static void ath9k_hw_get_def_gain_boundaries_pdadcs(struct ath_hw *ah, + struct ath9k_channel *chan, + struct cal_data_per_freq *pRawDataSet, + u8 *bChans, u16 availPiers, + u16 tPdGainOverlap, int16_t *pMinCalPower, + u16 *pPdGainBoundaries, u8 *pPDADCValues, + u16 numXpdGains) +{ + int i, j, k; + int16_t ss; + u16 idxL = 0, idxR = 0, numPiers; + static u8 vpdTableL[AR5416_NUM_PD_GAINS] + [AR5416_MAX_PWR_RANGE_IN_HALF_DB]; + static u8 vpdTableR[AR5416_NUM_PD_GAINS] + [AR5416_MAX_PWR_RANGE_IN_HALF_DB]; + static u8 vpdTableI[AR5416_NUM_PD_GAINS] + [AR5416_MAX_PWR_RANGE_IN_HALF_DB]; + + u8 *pVpdL, *pVpdR, *pPwrL, *pPwrR; + u8 minPwrT4[AR5416_NUM_PD_GAINS]; + u8 maxPwrT4[AR5416_NUM_PD_GAINS]; + int16_t vpdStep; + int16_t tmpVal; + u16 sizeCurrVpdTable, maxIndex, tgtIndex; + bool match; + int16_t minDelta = 0; + struct chan_centers centers; + + ath9k_hw_get_channel_centers(ah, chan, ¢ers); + + for (numPiers = 0; numPiers < availPiers; numPiers++) { + if (bChans[numPiers] == AR5416_BCHAN_UNUSED) + break; + } + + match = ath9k_hw_get_lower_upper_index((u8)FREQ2FBIN(centers.synth_center, + IS_CHAN_2GHZ(chan)), + bChans, numPiers, &idxL, &idxR); + + if (match) { + for (i = 0; i < numXpdGains; i++) { + minPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][0]; + maxPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][4]; + ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i], + pRawDataSet[idxL].pwrPdg[i], + pRawDataSet[idxL].vpdPdg[i], + AR5416_PD_GAIN_ICEPTS, + vpdTableI[i]); + } + } else { + for (i = 0; i < numXpdGains; i++) { + pVpdL = pRawDataSet[idxL].vpdPdg[i]; + pPwrL = pRawDataSet[idxL].pwrPdg[i]; + pVpdR = pRawDataSet[idxR].vpdPdg[i]; + pPwrR = pRawDataSet[idxR].pwrPdg[i]; + + minPwrT4[i] = max(pPwrL[0], pPwrR[0]); + + maxPwrT4[i] = + min(pPwrL[AR5416_PD_GAIN_ICEPTS - 1], + pPwrR[AR5416_PD_GAIN_ICEPTS - 1]); + + + ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i], + pPwrL, pVpdL, + AR5416_PD_GAIN_ICEPTS, + vpdTableL[i]); + ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i], + pPwrR, pVpdR, + AR5416_PD_GAIN_ICEPTS, + vpdTableR[i]); + + for (j = 0; j <= (maxPwrT4[i] - minPwrT4[i]) / 2; j++) { + vpdTableI[i][j] = + (u8)(ath9k_hw_interpolate((u16) + FREQ2FBIN(centers. + synth_center, + IS_CHAN_2GHZ + (chan)), + bChans[idxL], bChans[idxR], + vpdTableL[i][j], vpdTableR[i][j])); + } + } + } + + *pMinCalPower = (int16_t)(minPwrT4[0] / 2); + + k = 0; + + for (i = 0; i < numXpdGains; i++) { + if (i == (numXpdGains - 1)) + pPdGainBoundaries[i] = + (u16)(maxPwrT4[i] / 2); + else + pPdGainBoundaries[i] = + (u16)((maxPwrT4[i] + minPwrT4[i + 1]) / 4); + + pPdGainBoundaries[i] = + min((u16)AR5416_MAX_RATE_POWER, pPdGainBoundaries[i]); + + if ((i == 0) && !AR_SREV_5416_20_OR_LATER(ah)) { + minDelta = pPdGainBoundaries[0] - 23; + pPdGainBoundaries[0] = 23; + } else { + minDelta = 0; + } + + if (i == 0) { + if (AR_SREV_9280_10_OR_LATER(ah)) + ss = (int16_t)(0 - (minPwrT4[i] / 2)); + else + ss = 0; + } else { + ss = (int16_t)((pPdGainBoundaries[i - 1] - + (minPwrT4[i] / 2)) - + tPdGainOverlap + 1 + minDelta); + } + vpdStep = (int16_t)(vpdTableI[i][1] - vpdTableI[i][0]); + vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep); + + while ((ss < 0) && (k < (AR5416_NUM_PDADC_VALUES - 1))) { + tmpVal = (int16_t)(vpdTableI[i][0] + ss * vpdStep); + pPDADCValues[k++] = (u8)((tmpVal < 0) ? 0 : tmpVal); + ss++; + } + + sizeCurrVpdTable = (u8) ((maxPwrT4[i] - minPwrT4[i]) / 2 + 1); + tgtIndex = (u8)(pPdGainBoundaries[i] + tPdGainOverlap - + (minPwrT4[i] / 2)); + maxIndex = (tgtIndex < sizeCurrVpdTable) ? + tgtIndex : sizeCurrVpdTable; + + while ((ss < maxIndex) && (k < (AR5416_NUM_PDADC_VALUES - 1))) { + pPDADCValues[k++] = vpdTableI[i][ss++]; + } + + vpdStep = (int16_t)(vpdTableI[i][sizeCurrVpdTable - 1] - + vpdTableI[i][sizeCurrVpdTable - 2]); + vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep); + + if (tgtIndex > maxIndex) { + while ((ss <= tgtIndex) && + (k < (AR5416_NUM_PDADC_VALUES - 1))) { + tmpVal = (int16_t)((vpdTableI[i][sizeCurrVpdTable - 1] + + (ss - maxIndex + 1) * vpdStep)); + pPDADCValues[k++] = (u8)((tmpVal > 255) ? + 255 : tmpVal); + ss++; + } + } + } + + while (i < AR5416_PD_GAINS_IN_MASK) { + pPdGainBoundaries[i] = pPdGainBoundaries[i - 1]; + i++; + } + + while (k < AR5416_NUM_PDADC_VALUES) { + pPDADCValues[k] = pPDADCValues[k - 1]; + k++; + } + + return; +} + +static int16_t ath9k_change_gain_boundary_setting(struct ath_hw *ah, + u16 *gb, + u16 numXpdGain, + u16 pdGainOverlap_t2, + int8_t pwr_table_offset, + int16_t *diff) + +{ + u16 k; + + /* Prior to writing the boundaries or the pdadc vs. power table + * into the chip registers the default starting point on the pdadc + * vs. power table needs to be checked and the curve boundaries + * adjusted accordingly + */ + if (AR_SREV_9280_20_OR_LATER(ah)) { + u16 gb_limit; + + if (AR5416_PWR_TABLE_OFFSET_DB != pwr_table_offset) { + /* get the difference in dB */ + *diff = (u16)(pwr_table_offset - AR5416_PWR_TABLE_OFFSET_DB); + /* get the number of half dB steps */ + *diff *= 2; + /* change the original gain boundary settings + * by the number of half dB steps + */ + for (k = 0; k < numXpdGain; k++) + gb[k] = (u16)(gb[k] - *diff); + } + /* Because of a hardware limitation, ensure the gain boundary + * is not larger than (63 - overlap) + */ + gb_limit = (u16)(AR5416_MAX_RATE_POWER - pdGainOverlap_t2); + + for (k = 0; k < numXpdGain; k++) + gb[k] = (u16)min(gb_limit, gb[k]); + } + + return *diff; +} + +static void ath9k_adjust_pdadc_values(struct ath_hw *ah, + int8_t pwr_table_offset, + int16_t diff, + u8 *pdadcValues) +{ +#define NUM_PDADC(diff) (AR5416_NUM_PDADC_VALUES - diff) + u16 k; + + /* If this is a board that has a pwrTableOffset that differs from + * the default AR5416_PWR_TABLE_OFFSET_DB then the start of the + * pdadc vs pwr table needs to be adjusted prior to writing to the + * chip. + */ + if (AR_SREV_9280_20_OR_LATER(ah)) { + if (AR5416_PWR_TABLE_OFFSET_DB != pwr_table_offset) { + /* shift the table to start at the new offset */ + for (k = 0; k < (u16)NUM_PDADC(diff); k++ ) { + pdadcValues[k] = pdadcValues[k + diff]; + } + + /* fill the back of the table */ + for (k = (u16)NUM_PDADC(diff); k < NUM_PDADC(0); k++) { + pdadcValues[k] = pdadcValues[NUM_PDADC(diff)]; + } + } + } +#undef NUM_PDADC +} + +static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah, + struct ath9k_channel *chan, + int16_t *pTxPowerIndexOffset) +{ +#define SM_PD_GAIN(x) SM(0x38, AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_##x) +#define SM_PDGAIN_B(x, y) \ + SM((gainBoundaries[x]), AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_##y) + struct ath_common *common = ath9k_hw_common(ah); + struct ar5416_eeprom_def *pEepData = &ah->eeprom.def; + struct cal_data_per_freq *pRawDataset; + u8 *pCalBChans = NULL; + u16 pdGainOverlap_t2; + static u8 pdadcValues[AR5416_NUM_PDADC_VALUES]; + u16 gainBoundaries[AR5416_PD_GAINS_IN_MASK]; + u16 numPiers, i, j; + int16_t tMinCalPower, diff = 0; + u16 numXpdGain, xpdMask; + u16 xpdGainValues[AR5416_NUM_PD_GAINS] = { 0, 0, 0, 0 }; + u32 reg32, regOffset, regChainOffset; + int16_t modalIdx; + int8_t pwr_table_offset; + + modalIdx = IS_CHAN_2GHZ(chan) ? 1 : 0; + xpdMask = pEepData->modalHeader[modalIdx].xpdGain; + + pwr_table_offset = ah->eep_ops->get_eeprom(ah, EEP_PWR_TABLE_OFFSET); + + if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= + AR5416_EEP_MINOR_VER_2) { + pdGainOverlap_t2 = + pEepData->modalHeader[modalIdx].pdGainOverlap; + } else { + pdGainOverlap_t2 = (u16)(MS(REG_READ(ah, AR_PHY_TPCRG5), + AR_PHY_TPCRG5_PD_GAIN_OVERLAP)); + } + + if (IS_CHAN_2GHZ(chan)) { + pCalBChans = pEepData->calFreqPier2G; + numPiers = AR5416_NUM_2G_CAL_PIERS; + } else { + pCalBChans = pEepData->calFreqPier5G; + numPiers = AR5416_NUM_5G_CAL_PIERS; + } + + if (OLC_FOR_AR9280_20_LATER && IS_CHAN_2GHZ(chan)) { + pRawDataset = pEepData->calPierData2G[0]; + ah->initPDADC = ((struct calDataPerFreqOpLoop *) + pRawDataset)->vpdPdg[0][0]; + } + + numXpdGain = 0; + + for (i = 1; i <= AR5416_PD_GAINS_IN_MASK; i++) { + if ((xpdMask >> (AR5416_PD_GAINS_IN_MASK - i)) & 1) { + if (numXpdGain >= AR5416_NUM_PD_GAINS) + break; + xpdGainValues[numXpdGain] = + (u16)(AR5416_PD_GAINS_IN_MASK - i); + numXpdGain++; + } + } + + REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_NUM_PD_GAIN, + (numXpdGain - 1) & 0x3); + REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_1, + xpdGainValues[0]); + REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_2, + xpdGainValues[1]); + REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_3, + xpdGainValues[2]); + + for (i = 0; i < AR5416_MAX_CHAINS; i++) { + if (AR_SREV_5416_20_OR_LATER(ah) && + (ah->rxchainmask == 5 || ah->txchainmask == 5) && + (i != 0)) { + regChainOffset = (i == 1) ? 0x2000 : 0x1000; + } else + regChainOffset = i * 0x1000; + + if (pEepData->baseEepHeader.txMask & (1 << i)) { + if (IS_CHAN_2GHZ(chan)) + pRawDataset = pEepData->calPierData2G[i]; + else + pRawDataset = pEepData->calPierData5G[i]; + + + if (OLC_FOR_AR9280_20_LATER) { + u8 pcdacIdx; + u8 txPower; + + ath9k_get_txgain_index(ah, chan, + (struct calDataPerFreqOpLoop *)pRawDataset, + pCalBChans, numPiers, &txPower, &pcdacIdx); + ath9k_olc_get_pdadcs(ah, pcdacIdx, + txPower/2, pdadcValues); + } else { + ath9k_hw_get_def_gain_boundaries_pdadcs(ah, + chan, pRawDataset, + pCalBChans, numPiers, + pdGainOverlap_t2, + &tMinCalPower, + gainBoundaries, + pdadcValues, + numXpdGain); + } + + diff = ath9k_change_gain_boundary_setting(ah, + gainBoundaries, + numXpdGain, + pdGainOverlap_t2, + pwr_table_offset, + &diff); + + if ((i == 0) || AR_SREV_5416_20_OR_LATER(ah)) { + if (OLC_FOR_AR9280_20_LATER) { + REG_WRITE(ah, + AR_PHY_TPCRG5 + regChainOffset, + SM(0x6, + AR_PHY_TPCRG5_PD_GAIN_OVERLAP) | + SM_PD_GAIN(1) | SM_PD_GAIN(2) | + SM_PD_GAIN(3) | SM_PD_GAIN(4)); + } else { + REG_WRITE(ah, + AR_PHY_TPCRG5 + regChainOffset, + SM(pdGainOverlap_t2, + AR_PHY_TPCRG5_PD_GAIN_OVERLAP)| + SM_PDGAIN_B(0, 1) | + SM_PDGAIN_B(1, 2) | + SM_PDGAIN_B(2, 3) | + SM_PDGAIN_B(3, 4)); + } + } + + + ath9k_adjust_pdadc_values(ah, pwr_table_offset, + diff, pdadcValues); + + regOffset = AR_PHY_BASE + (672 << 2) + regChainOffset; + for (j = 0; j < 32; j++) { + reg32 = ((pdadcValues[4 * j + 0] & 0xFF) << 0) | + ((pdadcValues[4 * j + 1] & 0xFF) << 8) | + ((pdadcValues[4 * j + 2] & 0xFF) << 16)| + ((pdadcValues[4 * j + 3] & 0xFF) << 24); + REG_WRITE(ah, regOffset, reg32); + + ath_print(common, ATH_DBG_EEPROM, + "PDADC (%d,%4x): %4.4x %8.8x\n", + i, regChainOffset, regOffset, + reg32); + ath_print(common, ATH_DBG_EEPROM, + "PDADC: Chain %d | PDADC %3d " + "Value %3d | PDADC %3d Value %3d | " + "PDADC %3d Value %3d | PDADC %3d " + "Value %3d |\n", + i, 4 * j, pdadcValues[4 * j], + 4 * j + 1, pdadcValues[4 * j + 1], + 4 * j + 2, pdadcValues[4 * j + 2], + 4 * j + 3, + pdadcValues[4 * j + 3]); + + regOffset += 4; + } + } + } + + *pTxPowerIndexOffset = 0; +#undef SM_PD_GAIN +#undef SM_PDGAIN_B +} + +static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah, + struct ath9k_channel *chan, + int16_t *ratesArray, + u16 cfgCtl, + u16 AntennaReduction, + u16 twiceMaxRegulatoryPower, + u16 powerLimit) +{ +#define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6 /* 10*log10(2)*2 */ +#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 9 /* 10*log10(3)*2 */ + + struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); + struct ar5416_eeprom_def *pEepData = &ah->eeprom.def; + u16 twiceMaxEdgePower = AR5416_MAX_RATE_POWER; + static const u16 tpScaleReductionTable[5] = + { 0, 3, 6, 9, AR5416_MAX_RATE_POWER }; + + int i; + int16_t twiceLargestAntenna; + struct cal_ctl_data *rep; + struct cal_target_power_leg targetPowerOfdm, targetPowerCck = { + 0, { 0, 0, 0, 0} + }; + struct cal_target_power_leg targetPowerOfdmExt = { + 0, { 0, 0, 0, 0} }, targetPowerCckExt = { + 0, { 0, 0, 0, 0 } + }; + struct cal_target_power_ht targetPowerHt20, targetPowerHt40 = { + 0, {0, 0, 0, 0} + }; + u16 scaledPower = 0, minCtlPower, maxRegAllowedPower; + u16 ctlModesFor11a[] = + { CTL_11A, CTL_5GHT20, CTL_11A_EXT, CTL_5GHT40 }; + u16 ctlModesFor11g[] = + { CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT, CTL_11G_EXT, + CTL_2GHT40 + }; + u16 numCtlModes, *pCtlMode, ctlMode, freq; + struct chan_centers centers; + int tx_chainmask; + u16 twiceMinEdgePower; + + tx_chainmask = ah->txchainmask; + + ath9k_hw_get_channel_centers(ah, chan, ¢ers); + + twiceLargestAntenna = max( + pEepData->modalHeader + [IS_CHAN_2GHZ(chan)].antennaGainCh[0], + pEepData->modalHeader + [IS_CHAN_2GHZ(chan)].antennaGainCh[1]); + + twiceLargestAntenna = max((u8)twiceLargestAntenna, + pEepData->modalHeader + [IS_CHAN_2GHZ(chan)].antennaGainCh[2]); + + twiceLargestAntenna = (int16_t)min(AntennaReduction - + twiceLargestAntenna, 0); + + maxRegAllowedPower = twiceMaxRegulatoryPower + twiceLargestAntenna; + + if (regulatory->tp_scale != ATH9K_TP_SCALE_MAX) { + maxRegAllowedPower -= + (tpScaleReductionTable[(regulatory->tp_scale)] * 2); + } + + scaledPower = min(powerLimit, maxRegAllowedPower); + + switch (ar5416_get_ntxchains(tx_chainmask)) { + case 1: + break; + case 2: + scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN; + break; + case 3: + scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN; + break; + } + + scaledPower = max((u16)0, scaledPower); + + if (IS_CHAN_2GHZ(chan)) { + numCtlModes = ARRAY_SIZE(ctlModesFor11g) - + SUB_NUM_CTL_MODES_AT_2G_40; + pCtlMode = ctlModesFor11g; + + ath9k_hw_get_legacy_target_powers(ah, chan, + pEepData->calTargetPowerCck, + AR5416_NUM_2G_CCK_TARGET_POWERS, + &targetPowerCck, 4, false); + ath9k_hw_get_legacy_target_powers(ah, chan, + pEepData->calTargetPower2G, + AR5416_NUM_2G_20_TARGET_POWERS, + &targetPowerOfdm, 4, false); + ath9k_hw_get_target_powers(ah, chan, + pEepData->calTargetPower2GHT20, + AR5416_NUM_2G_20_TARGET_POWERS, + &targetPowerHt20, 8, false); + + if (IS_CHAN_HT40(chan)) { + numCtlModes = ARRAY_SIZE(ctlModesFor11g); + ath9k_hw_get_target_powers(ah, chan, + pEepData->calTargetPower2GHT40, + AR5416_NUM_2G_40_TARGET_POWERS, + &targetPowerHt40, 8, true); + ath9k_hw_get_legacy_target_powers(ah, chan, + pEepData->calTargetPowerCck, + AR5416_NUM_2G_CCK_TARGET_POWERS, + &targetPowerCckExt, 4, true); + ath9k_hw_get_legacy_target_powers(ah, chan, + pEepData->calTargetPower2G, + AR5416_NUM_2G_20_TARGET_POWERS, + &targetPowerOfdmExt, 4, true); + } + } else { + numCtlModes = ARRAY_SIZE(ctlModesFor11a) - + SUB_NUM_CTL_MODES_AT_5G_40; + pCtlMode = ctlModesFor11a; + + ath9k_hw_get_legacy_target_powers(ah, chan, + pEepData->calTargetPower5G, + AR5416_NUM_5G_20_TARGET_POWERS, + &targetPowerOfdm, 4, false); + ath9k_hw_get_target_powers(ah, chan, + pEepData->calTargetPower5GHT20, + AR5416_NUM_5G_20_TARGET_POWERS, + &targetPowerHt20, 8, false); + + if (IS_CHAN_HT40(chan)) { + numCtlModes = ARRAY_SIZE(ctlModesFor11a); + ath9k_hw_get_target_powers(ah, chan, + pEepData->calTargetPower5GHT40, + AR5416_NUM_5G_40_TARGET_POWERS, + &targetPowerHt40, 8, true); + ath9k_hw_get_legacy_target_powers(ah, chan, + pEepData->calTargetPower5G, + AR5416_NUM_5G_20_TARGET_POWERS, + &targetPowerOfdmExt, 4, true); + } + } + + for (ctlMode = 0; ctlMode < numCtlModes; ctlMode++) { + bool isHt40CtlMode = (pCtlMode[ctlMode] == CTL_5GHT40) || + (pCtlMode[ctlMode] == CTL_2GHT40); + if (isHt40CtlMode) + freq = centers.synth_center; + else if (pCtlMode[ctlMode] & EXT_ADDITIVE) + freq = centers.ext_center; + else + freq = centers.ctl_center; + + if (ah->eep_ops->get_eeprom_ver(ah) == 14 && + ah->eep_ops->get_eeprom_rev(ah) <= 2) + twiceMaxEdgePower = AR5416_MAX_RATE_POWER; + + for (i = 0; (i < AR5416_NUM_CTLS) && pEepData->ctlIndex[i]; i++) { + if ((((cfgCtl & ~CTL_MODE_M) | + (pCtlMode[ctlMode] & CTL_MODE_M)) == + pEepData->ctlIndex[i]) || + (((cfgCtl & ~CTL_MODE_M) | + (pCtlMode[ctlMode] & CTL_MODE_M)) == + ((pEepData->ctlIndex[i] & CTL_MODE_M) | SD_NO_CTL))) { + rep = &(pEepData->ctlData[i]); + + twiceMinEdgePower = ath9k_hw_get_max_edge_power(freq, + rep->ctlEdges[ar5416_get_ntxchains(tx_chainmask) - 1], + IS_CHAN_2GHZ(chan), AR5416_NUM_BAND_EDGES); + + if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) { + twiceMaxEdgePower = min(twiceMaxEdgePower, + twiceMinEdgePower); + } else { + twiceMaxEdgePower = twiceMinEdgePower; + break; + } + } + } + + minCtlPower = min(twiceMaxEdgePower, scaledPower); + + switch (pCtlMode[ctlMode]) { + case CTL_11B: + for (i = 0; i < ARRAY_SIZE(targetPowerCck.tPow2x); i++) { + targetPowerCck.tPow2x[i] = + min((u16)targetPowerCck.tPow2x[i], + minCtlPower); + } + break; + case CTL_11A: + case CTL_11G: + for (i = 0; i < ARRAY_SIZE(targetPowerOfdm.tPow2x); i++) { + targetPowerOfdm.tPow2x[i] = + min((u16)targetPowerOfdm.tPow2x[i], + minCtlPower); + } + break; + case CTL_5GHT20: + case CTL_2GHT20: + for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x); i++) { + targetPowerHt20.tPow2x[i] = + min((u16)targetPowerHt20.tPow2x[i], + minCtlPower); + } + break; + case CTL_11B_EXT: + targetPowerCckExt.tPow2x[0] = min((u16) + targetPowerCckExt.tPow2x[0], + minCtlPower); + break; + case CTL_11A_EXT: + case CTL_11G_EXT: + targetPowerOfdmExt.tPow2x[0] = min((u16) + targetPowerOfdmExt.tPow2x[0], + minCtlPower); + break; + case CTL_5GHT40: + case CTL_2GHT40: + for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++) { + targetPowerHt40.tPow2x[i] = + min((u16)targetPowerHt40.tPow2x[i], + minCtlPower); + } + break; + default: + break; + } + } + + ratesArray[rate6mb] = ratesArray[rate9mb] = ratesArray[rate12mb] = + ratesArray[rate18mb] = ratesArray[rate24mb] = + targetPowerOfdm.tPow2x[0]; + ratesArray[rate36mb] = targetPowerOfdm.tPow2x[1]; + ratesArray[rate48mb] = targetPowerOfdm.tPow2x[2]; + ratesArray[rate54mb] = targetPowerOfdm.tPow2x[3]; + ratesArray[rateXr] = targetPowerOfdm.tPow2x[0]; + + for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x); i++) + ratesArray[rateHt20_0 + i] = targetPowerHt20.tPow2x[i]; + + if (IS_CHAN_2GHZ(chan)) { + ratesArray[rate1l] = targetPowerCck.tPow2x[0]; + ratesArray[rate2s] = ratesArray[rate2l] = + targetPowerCck.tPow2x[1]; + ratesArray[rate5_5s] = ratesArray[rate5_5l] = + targetPowerCck.tPow2x[2]; + ratesArray[rate11s] = ratesArray[rate11l] = + targetPowerCck.tPow2x[3]; + } + if (IS_CHAN_HT40(chan)) { + for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++) { + ratesArray[rateHt40_0 + i] = + targetPowerHt40.tPow2x[i]; + } + ratesArray[rateDupOfdm] = targetPowerHt40.tPow2x[0]; + ratesArray[rateDupCck] = targetPowerHt40.tPow2x[0]; + ratesArray[rateExtOfdm] = targetPowerOfdmExt.tPow2x[0]; + if (IS_CHAN_2GHZ(chan)) { + ratesArray[rateExtCck] = + targetPowerCckExt.tPow2x[0]; + } + } +} + +static void ath9k_hw_def_set_txpower(struct ath_hw *ah, + struct ath9k_channel *chan, + u16 cfgCtl, + u8 twiceAntennaReduction, + u8 twiceMaxRegulatoryPower, + u8 powerLimit) +{ +#define RT_AR_DELTA(x) (ratesArray[x] - cck_ofdm_delta) + struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); + struct ar5416_eeprom_def *pEepData = &ah->eeprom.def; + struct modal_eep_header *pModal = + &(pEepData->modalHeader[IS_CHAN_2GHZ(chan)]); + int16_t ratesArray[Ar5416RateSize]; + int16_t txPowerIndexOffset = 0; + u8 ht40PowerIncForPdadc = 2; + int i, cck_ofdm_delta = 0; + + memset(ratesArray, 0, sizeof(ratesArray)); + + if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= + AR5416_EEP_MINOR_VER_2) { + ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc; + } + + ath9k_hw_set_def_power_per_rate_table(ah, chan, + &ratesArray[0], cfgCtl, + twiceAntennaReduction, + twiceMaxRegulatoryPower, + powerLimit); + + ath9k_hw_set_def_power_cal_table(ah, chan, &txPowerIndexOffset); + + for (i = 0; i < ARRAY_SIZE(ratesArray); i++) { + ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]); + if (ratesArray[i] > AR5416_MAX_RATE_POWER) + ratesArray[i] = AR5416_MAX_RATE_POWER; + } + + if (AR_SREV_9280_10_OR_LATER(ah)) { + for (i = 0; i < Ar5416RateSize; i++) { + int8_t pwr_table_offset; + + pwr_table_offset = ah->eep_ops->get_eeprom(ah, + EEP_PWR_TABLE_OFFSET); + ratesArray[i] -= pwr_table_offset * 2; + } + } + + REG_WRITE(ah, AR_PHY_POWER_TX_RATE1, + ATH9K_POW_SM(ratesArray[rate18mb], 24) + | ATH9K_POW_SM(ratesArray[rate12mb], 16) + | ATH9K_POW_SM(ratesArray[rate9mb], 8) + | ATH9K_POW_SM(ratesArray[rate6mb], 0)); + REG_WRITE(ah, AR_PHY_POWER_TX_RATE2, + ATH9K_POW_SM(ratesArray[rate54mb], 24) + | ATH9K_POW_SM(ratesArray[rate48mb], 16) + | ATH9K_POW_SM(ratesArray[rate36mb], 8) + | ATH9K_POW_SM(ratesArray[rate24mb], 0)); + + if (IS_CHAN_2GHZ(chan)) { + if (OLC_FOR_AR9280_20_LATER) { + cck_ofdm_delta = 2; + REG_WRITE(ah, AR_PHY_POWER_TX_RATE3, + ATH9K_POW_SM(RT_AR_DELTA(rate2s), 24) + | ATH9K_POW_SM(RT_AR_DELTA(rate2l), 16) + | ATH9K_POW_SM(ratesArray[rateXr], 8) + | ATH9K_POW_SM(RT_AR_DELTA(rate1l), 0)); + REG_WRITE(ah, AR_PHY_POWER_TX_RATE4, + ATH9K_POW_SM(RT_AR_DELTA(rate11s), 24) + | ATH9K_POW_SM(RT_AR_DELTA(rate11l), 16) + | ATH9K_POW_SM(RT_AR_DELTA(rate5_5s), 8) + | ATH9K_POW_SM(RT_AR_DELTA(rate5_5l), 0)); + } else { + REG_WRITE(ah, AR_PHY_POWER_TX_RATE3, + ATH9K_POW_SM(ratesArray[rate2s], 24) + | ATH9K_POW_SM(ratesArray[rate2l], 16) + | ATH9K_POW_SM(ratesArray[rateXr], 8) + | ATH9K_POW_SM(ratesArray[rate1l], 0)); + REG_WRITE(ah, AR_PHY_POWER_TX_RATE4, + ATH9K_POW_SM(ratesArray[rate11s], 24) + | ATH9K_POW_SM(ratesArray[rate11l], 16) + | ATH9K_POW_SM(ratesArray[rate5_5s], 8) + | ATH9K_POW_SM(ratesArray[rate5_5l], 0)); + } + } + + REG_WRITE(ah, AR_PHY_POWER_TX_RATE5, + ATH9K_POW_SM(ratesArray[rateHt20_3], 24) + | ATH9K_POW_SM(ratesArray[rateHt20_2], 16) + | ATH9K_POW_SM(ratesArray[rateHt20_1], 8) + | ATH9K_POW_SM(ratesArray[rateHt20_0], 0)); + REG_WRITE(ah, AR_PHY_POWER_TX_RATE6, + ATH9K_POW_SM(ratesArray[rateHt20_7], 24) + | ATH9K_POW_SM(ratesArray[rateHt20_6], 16) + | ATH9K_POW_SM(ratesArray[rateHt20_5], 8) + | ATH9K_POW_SM(ratesArray[rateHt20_4], 0)); + + if (IS_CHAN_HT40(chan)) { + REG_WRITE(ah, AR_PHY_POWER_TX_RATE7, + ATH9K_POW_SM(ratesArray[rateHt40_3] + + ht40PowerIncForPdadc, 24) + | ATH9K_POW_SM(ratesArray[rateHt40_2] + + ht40PowerIncForPdadc, 16) + | ATH9K_POW_SM(ratesArray[rateHt40_1] + + ht40PowerIncForPdadc, 8) + | ATH9K_POW_SM(ratesArray[rateHt40_0] + + ht40PowerIncForPdadc, 0)); + REG_WRITE(ah, AR_PHY_POWER_TX_RATE8, + ATH9K_POW_SM(ratesArray[rateHt40_7] + + ht40PowerIncForPdadc, 24) + | ATH9K_POW_SM(ratesArray[rateHt40_6] + + ht40PowerIncForPdadc, 16) + | ATH9K_POW_SM(ratesArray[rateHt40_5] + + ht40PowerIncForPdadc, 8) + | ATH9K_POW_SM(ratesArray[rateHt40_4] + + ht40PowerIncForPdadc, 0)); + if (OLC_FOR_AR9280_20_LATER) { + REG_WRITE(ah, AR_PHY_POWER_TX_RATE9, + ATH9K_POW_SM(ratesArray[rateExtOfdm], 24) + | ATH9K_POW_SM(RT_AR_DELTA(rateExtCck), 16) + | ATH9K_POW_SM(ratesArray[rateDupOfdm], 8) + | ATH9K_POW_SM(RT_AR_DELTA(rateDupCck), 0)); + } else { + REG_WRITE(ah, AR_PHY_POWER_TX_RATE9, + ATH9K_POW_SM(ratesArray[rateExtOfdm], 24) + | ATH9K_POW_SM(ratesArray[rateExtCck], 16) + | ATH9K_POW_SM(ratesArray[rateDupOfdm], 8) + | ATH9K_POW_SM(ratesArray[rateDupCck], 0)); + } + } + + REG_WRITE(ah, AR_PHY_POWER_TX_SUB, + ATH9K_POW_SM(pModal->pwrDecreaseFor3Chain, 6) + | ATH9K_POW_SM(pModal->pwrDecreaseFor2Chain, 0)); + + i = rate6mb; + + if (IS_CHAN_HT40(chan)) + i = rateHt40_0; + else if (IS_CHAN_HT20(chan)) + i = rateHt20_0; + + if (AR_SREV_9280_10_OR_LATER(ah)) + regulatory->max_power_level = + ratesArray[i] + AR5416_PWR_TABLE_OFFSET_DB * 2; + else + regulatory->max_power_level = ratesArray[i]; + + switch(ar5416_get_ntxchains(ah->txchainmask)) { + case 1: + break; + case 2: + regulatory->max_power_level += INCREASE_MAXPOW_BY_TWO_CHAIN; + break; + case 3: + regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN; + break; + default: + ath_print(ath9k_hw_common(ah), ATH_DBG_EEPROM, + "Invalid chainmask configuration\n"); + break; + } +} + +static u8 ath9k_hw_def_get_num_ant_config(struct ath_hw *ah, + enum ieee80211_band freq_band) +{ + struct ar5416_eeprom_def *eep = &ah->eeprom.def; + struct modal_eep_header *pModal = + &(eep->modalHeader[ATH9K_HAL_FREQ_BAND_2GHZ == freq_band]); + struct base_eep_header *pBase = &eep->baseEepHeader; + u8 num_ant_config; + + num_ant_config = 1; + + if (pBase->version >= 0x0E0D) + if (pModal->useAnt1) + num_ant_config += 1; + + return num_ant_config; +} + +static u16 ath9k_hw_def_get_eeprom_antenna_cfg(struct ath_hw *ah, + struct ath9k_channel *chan) +{ + struct ar5416_eeprom_def *eep = &ah->eeprom.def; + struct modal_eep_header *pModal = + &(eep->modalHeader[IS_CHAN_2GHZ(chan)]); + + return pModal->antCtrlCommon & 0xFFFF; +} + +static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz) +{ +#define EEP_DEF_SPURCHAN \ + (ah->eeprom.def.modalHeader[is2GHz].spurChans[i].spurChan) + struct ath_common *common = ath9k_hw_common(ah); + + u16 spur_val = AR_NO_SPUR; + + ath_print(common, ATH_DBG_ANI, + "Getting spur idx %d is2Ghz. %d val %x\n", + i, is2GHz, ah->config.spurchans[i][is2GHz]); + + switch (ah->config.spurmode) { + case SPUR_DISABLE: + break; + case SPUR_ENABLE_IOCTL: + spur_val = ah->config.spurchans[i][is2GHz]; + ath_print(common, ATH_DBG_ANI, + "Getting spur val from new loc. %d\n", spur_val); + break; + case SPUR_ENABLE_EEPROM: + spur_val = EEP_DEF_SPURCHAN; + break; + } + + return spur_val; + +#undef EEP_DEF_SPURCHAN +} + +const struct eeprom_ops eep_def_ops = { + .check_eeprom = ath9k_hw_def_check_eeprom, + .get_eeprom = ath9k_hw_def_get_eeprom, + .fill_eeprom = ath9k_hw_def_fill_eeprom, + .get_eeprom_ver = ath9k_hw_def_get_eeprom_ver, + .get_eeprom_rev = ath9k_hw_def_get_eeprom_rev, + .get_num_ant_config = ath9k_hw_def_get_num_ant_config, + .get_eeprom_antenna_cfg = ath9k_hw_def_get_eeprom_antenna_cfg, + .set_board_values = ath9k_hw_def_set_board_values, + .set_addac = ath9k_hw_def_set_addac, + .set_txpower = ath9k_hw_def_set_txpower, + .get_spur_channel = ath9k_hw_def_get_spur_channel +}; diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c new file mode 100644 index 0000000..deab8be --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/gpio.c @@ -0,0 +1,442 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "ath9k.h" + +/********************************/ +/* LED functions */ +/********************************/ + +static void ath_led_blink_work(struct work_struct *work) +{ + struct ath_softc *sc = container_of(work, struct ath_softc, + ath_led_blink_work.work); + + if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED)) + return; + + if ((sc->led_on_duration == ATH_LED_ON_DURATION_IDLE) || + (sc->led_off_duration == ATH_LED_OFF_DURATION_IDLE)) + ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0); + else + ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, + (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0); + + ieee80211_queue_delayed_work(sc->hw, + &sc->ath_led_blink_work, + (sc->sc_flags & SC_OP_LED_ON) ? + msecs_to_jiffies(sc->led_off_duration) : + msecs_to_jiffies(sc->led_on_duration)); + + sc->led_on_duration = sc->led_on_cnt ? + max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25) : + ATH_LED_ON_DURATION_IDLE; + sc->led_off_duration = sc->led_off_cnt ? + max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10) : + ATH_LED_OFF_DURATION_IDLE; + sc->led_on_cnt = sc->led_off_cnt = 0; + if (sc->sc_flags & SC_OP_LED_ON) + sc->sc_flags &= ~SC_OP_LED_ON; + else + sc->sc_flags |= SC_OP_LED_ON; +} + +static void ath_led_brightness(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev); + struct ath_softc *sc = led->sc; + + switch (brightness) { + case LED_OFF: + if (led->led_type == ATH_LED_ASSOC || + led->led_type == ATH_LED_RADIO) { + ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, + (led->led_type == ATH_LED_RADIO)); + sc->sc_flags &= ~SC_OP_LED_ASSOCIATED; + if (led->led_type == ATH_LED_RADIO) + sc->sc_flags &= ~SC_OP_LED_ON; + } else { + sc->led_off_cnt++; + } + break; + case LED_FULL: + if (led->led_type == ATH_LED_ASSOC) { + sc->sc_flags |= SC_OP_LED_ASSOCIATED; + ieee80211_queue_delayed_work(sc->hw, + &sc->ath_led_blink_work, 0); + } else if (led->led_type == ATH_LED_RADIO) { + ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0); + sc->sc_flags |= SC_OP_LED_ON; + } else { + sc->led_on_cnt++; + } + break; + default: + break; + } +} + +static int ath_register_led(struct ath_softc *sc, struct ath_led *led, + char *trigger) +{ + int ret; + + led->sc = sc; + led->led_cdev.name = led->name; + led->led_cdev.default_trigger = trigger; + led->led_cdev.brightness_set = ath_led_brightness; + + ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev); + if (ret) + ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, + "Failed to register led:%s", led->name); + else + led->registered = 1; + return ret; +} + +static void ath_unregister_led(struct ath_led *led) +{ + if (led->registered) { + led_classdev_unregister(&led->led_cdev); + led->registered = 0; + } +} + +void ath_deinit_leds(struct ath_softc *sc) +{ + ath_unregister_led(&sc->assoc_led); + sc->sc_flags &= ~SC_OP_LED_ASSOCIATED; + ath_unregister_led(&sc->tx_led); + ath_unregister_led(&sc->rx_led); + ath_unregister_led(&sc->radio_led); + ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1); +} + +void ath_init_leds(struct ath_softc *sc) +{ + char *trigger; + int ret; + + if (AR_SREV_9287(sc->sc_ah)) + sc->sc_ah->led_pin = ATH_LED_PIN_9287; + else + sc->sc_ah->led_pin = ATH_LED_PIN_DEF; + + /* Configure gpio 1 for output */ + ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin, + AR_GPIO_OUTPUT_MUX_AS_OUTPUT); + /* LED off, active low */ + ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1); + + INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work); + + trigger = ieee80211_get_radio_led_name(sc->hw); + snprintf(sc->radio_led.name, sizeof(sc->radio_led.name), + "ath9k-%s::radio", wiphy_name(sc->hw->wiphy)); + ret = ath_register_led(sc, &sc->radio_led, trigger); + sc->radio_led.led_type = ATH_LED_RADIO; + if (ret) + goto fail; + + trigger = ieee80211_get_assoc_led_name(sc->hw); + snprintf(sc->assoc_led.name, sizeof(sc->assoc_led.name), + "ath9k-%s::assoc", wiphy_name(sc->hw->wiphy)); + ret = ath_register_led(sc, &sc->assoc_led, trigger); + sc->assoc_led.led_type = ATH_LED_ASSOC; + if (ret) + goto fail; + + trigger = ieee80211_get_tx_led_name(sc->hw); + snprintf(sc->tx_led.name, sizeof(sc->tx_led.name), + "ath9k-%s::tx", wiphy_name(sc->hw->wiphy)); + ret = ath_register_led(sc, &sc->tx_led, trigger); + sc->tx_led.led_type = ATH_LED_TX; + if (ret) + goto fail; + + trigger = ieee80211_get_rx_led_name(sc->hw); + snprintf(sc->rx_led.name, sizeof(sc->rx_led.name), + "ath9k-%s::rx", wiphy_name(sc->hw->wiphy)); + ret = ath_register_led(sc, &sc->rx_led, trigger); + sc->rx_led.led_type = ATH_LED_RX; + if (ret) + goto fail; + + return; + +fail: + cancel_delayed_work_sync(&sc->ath_led_blink_work); + ath_deinit_leds(sc); +} + +/*******************/ +/* Rfkill */ +/*******************/ + +static bool ath_is_rfkill_set(struct ath_softc *sc) +{ + struct ath_hw *ah = sc->sc_ah; + + return ath9k_hw_gpio_get(ah, ah->rfkill_gpio) == + ah->rfkill_polarity; +} + +void ath9k_rfkill_poll_state(struct ieee80211_hw *hw) +{ + struct ath_wiphy *aphy = hw->priv; + struct ath_softc *sc = aphy->sc; + bool blocked = !!ath_is_rfkill_set(sc); + + wiphy_rfkill_set_hw_state(hw->wiphy, blocked); +} + +void ath_start_rfkill_poll(struct ath_softc *sc) +{ + struct ath_hw *ah = sc->sc_ah; + + if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT) + wiphy_rfkill_start_polling(sc->hw->wiphy); +} + +/******************/ +/* BTCOEX */ +/******************/ + +/* + * Detects if there is any priority bt traffic + */ +static void ath_detect_bt_priority(struct ath_softc *sc) +{ + struct ath_btcoex *btcoex = &sc->btcoex; + struct ath_hw *ah = sc->sc_ah; + + if (ath9k_hw_gpio_get(sc->sc_ah, ah->btcoex_hw.btpriority_gpio)) + btcoex->bt_priority_cnt++; + + if (time_after(jiffies, btcoex->bt_priority_time + + msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) { + sc->sc_flags &= ~(SC_OP_BT_PRIORITY_DETECTED | SC_OP_BT_SCAN); + /* Detect if colocated bt started scanning */ + if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) { + ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX, + "BT scan detected"); + sc->sc_flags |= (SC_OP_BT_SCAN | + SC_OP_BT_PRIORITY_DETECTED); + } else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) { + ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX, + "BT priority traffic detected"); + sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED; + } + + btcoex->bt_priority_cnt = 0; + btcoex->bt_priority_time = jiffies; + } +} + +/* + * Configures appropriate weight based on stomp type. + */ +static void ath9k_btcoex_bt_stomp(struct ath_softc *sc, + enum ath_stomp_type stomp_type) +{ + struct ath_hw *ah = sc->sc_ah; + + switch (stomp_type) { + case ATH_BTCOEX_STOMP_ALL: + ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT, + AR_STOMP_ALL_WLAN_WGHT); + break; + case ATH_BTCOEX_STOMP_LOW: + ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT, + AR_STOMP_LOW_WLAN_WGHT); + break; + case ATH_BTCOEX_STOMP_NONE: + ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT, + AR_STOMP_NONE_WLAN_WGHT); + break; + default: + ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX, + "Invalid Stomptype\n"); + break; + } + + ath9k_hw_btcoex_enable(ah); +} + +static void ath9k_gen_timer_start(struct ath_hw *ah, + struct ath_gen_timer *timer, + u32 timer_next, + u32 timer_period) +{ + struct ath_common *common = ath9k_hw_common(ah); + struct ath_softc *sc = (struct ath_softc *) common->priv; + + ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period); + + if ((sc->imask & ATH9K_INT_GENTIMER) == 0) { + ath9k_hw_set_interrupts(ah, 0); + sc->imask |= ATH9K_INT_GENTIMER; + ath9k_hw_set_interrupts(ah, sc->imask); + } +} + +static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer) +{ + struct ath_common *common = ath9k_hw_common(ah); + struct ath_softc *sc = (struct ath_softc *) common->priv; + struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers; + + ath9k_hw_gen_timer_stop(ah, timer); + + /* if no timer is enabled, turn off interrupt mask */ + if (timer_table->timer_mask.val == 0) { + ath9k_hw_set_interrupts(ah, 0); + sc->imask &= ~ATH9K_INT_GENTIMER; + ath9k_hw_set_interrupts(ah, sc->imask); + } +} + +/* + * This is the master bt coex timer which runs for every + * 45ms, bt traffic will be given priority during 55% of this + * period while wlan gets remaining 45% + */ +static void ath_btcoex_period_timer(unsigned long data) +{ + struct ath_softc *sc = (struct ath_softc *) data; + struct ath_hw *ah = sc->sc_ah; + struct ath_btcoex *btcoex = &sc->btcoex; + u32 timer_period; + bool is_btscan; + + ath_detect_bt_priority(sc); + + is_btscan = sc->sc_flags & SC_OP_BT_SCAN; + + spin_lock_bh(&btcoex->btcoex_lock); + + ath9k_btcoex_bt_stomp(sc, is_btscan ? ATH_BTCOEX_STOMP_ALL : + btcoex->bt_stomp_type); + + spin_unlock_bh(&btcoex->btcoex_lock); + + if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) { + if (btcoex->hw_timer_enabled) + ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer); + + timer_period = is_btscan ? btcoex->btscan_no_stomp : + btcoex->btcoex_no_stomp; + ath9k_gen_timer_start(ah, + btcoex->no_stomp_timer, + (ath9k_hw_gettsf32(ah) + + timer_period), timer_period * 10); + btcoex->hw_timer_enabled = true; + } + + mod_timer(&btcoex->period_timer, jiffies + + msecs_to_jiffies(ATH_BTCOEX_DEF_BT_PERIOD)); +} + +/* + * Generic tsf based hw timer which configures weight + * registers to time slice between wlan and bt traffic + */ +static void ath_btcoex_no_stomp_timer(void *arg) +{ + struct ath_softc *sc = (struct ath_softc *)arg; + struct ath_hw *ah = sc->sc_ah; + struct ath_btcoex *btcoex = &sc->btcoex; + bool is_btscan = sc->sc_flags & SC_OP_BT_SCAN; + + ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX, + "no stomp timer running \n"); + + spin_lock_bh(&btcoex->btcoex_lock); + + if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW || is_btscan) + ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_NONE); + else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL) + ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_LOW); + + spin_unlock_bh(&btcoex->btcoex_lock); +} + +int ath_init_btcoex_timer(struct ath_softc *sc) +{ + struct ath_btcoex *btcoex = &sc->btcoex; + + btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000; + btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) * + btcoex->btcoex_period / 100; + btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) * + btcoex->btcoex_period / 100; + + setup_timer(&btcoex->period_timer, ath_btcoex_period_timer, + (unsigned long) sc); + + spin_lock_init(&btcoex->btcoex_lock); + + btcoex->no_stomp_timer = ath_gen_timer_alloc(sc->sc_ah, + ath_btcoex_no_stomp_timer, + ath_btcoex_no_stomp_timer, + (void *) sc, AR_FIRST_NDP_TIMER); + + if (!btcoex->no_stomp_timer) + return -ENOMEM; + + return 0; +} + +/* + * (Re)start btcoex timers + */ +void ath9k_btcoex_timer_resume(struct ath_softc *sc) +{ + struct ath_btcoex *btcoex = &sc->btcoex; + struct ath_hw *ah = sc->sc_ah; + + ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX, + "Starting btcoex timers"); + + /* make sure duty cycle timer is also stopped when resuming */ + if (btcoex->hw_timer_enabled) + ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer); + + btcoex->bt_priority_cnt = 0; + btcoex->bt_priority_time = jiffies; + sc->sc_flags &= ~(SC_OP_BT_PRIORITY_DETECTED | SC_OP_BT_SCAN); + + mod_timer(&btcoex->period_timer, jiffies); +} + + +/* + * Pause btcoex timer and bt duty cycle timer + */ +void ath9k_btcoex_timer_pause(struct ath_softc *sc) +{ + struct ath_btcoex *btcoex = &sc->btcoex; + struct ath_hw *ah = sc->sc_ah; + + del_timer_sync(&btcoex->period_timer); + + if (btcoex->hw_timer_enabled) + ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer); + + btcoex->hw_timer_enabled = false; +} diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c new file mode 100644 index 0000000..f73476b --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -0,0 +1,3954 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include + +#include "hw.h" +#include "rc.h" +#include "initvals.h" + +#define ATH9K_CLOCK_RATE_CCK 22 +#define ATH9K_CLOCK_RATE_5GHZ_OFDM 40 +#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44 + +static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type); +static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan); +static u32 ath9k_hw_ini_fixup(struct ath_hw *ah, + struct ar5416_eeprom_def *pEepData, + u32 reg, u32 value); + +MODULE_AUTHOR("Atheros Communications"); +MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards."); +MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards"); +MODULE_LICENSE("Dual BSD/GPL"); + +static int __init ath9k_init(void) +{ + return 0; +} +module_init(ath9k_init); + +static void __exit ath9k_exit(void) +{ + return; +} +module_exit(ath9k_exit); + +/********************/ +/* Helper Functions */ +/********************/ + +static u32 ath9k_hw_mac_clks(struct ath_hw *ah, u32 usecs) +{ + struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf; + + if (!ah->curchan) /* should really check for CCK instead */ + return usecs *ATH9K_CLOCK_RATE_CCK; + if (conf->channel->band == IEEE80211_BAND_2GHZ) + return usecs *ATH9K_CLOCK_RATE_2GHZ_OFDM; + return usecs *ATH9K_CLOCK_RATE_5GHZ_OFDM; +} + +static u32 ath9k_hw_mac_to_clks(struct ath_hw *ah, u32 usecs) +{ + struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf; + + if (conf_is_ht40(conf)) + return ath9k_hw_mac_clks(ah, usecs) * 2; + else + return ath9k_hw_mac_clks(ah, usecs); +} + +bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout) +{ + int i; + + BUG_ON(timeout < AH_TIME_QUANTUM); + + for (i = 0; i < (timeout / AH_TIME_QUANTUM); i++) { + if ((REG_READ(ah, reg) & mask) == val) + return true; + + udelay(AH_TIME_QUANTUM); + } + + ath_print(ath9k_hw_common(ah), ATH_DBG_ANY, + "timeout (%d us) on reg 0x%x: 0x%08x & 0x%08x != 0x%08x\n", + timeout, reg, REG_READ(ah, reg), mask, val); + + return false; +} +EXPORT_SYMBOL(ath9k_hw_wait); + +u32 ath9k_hw_reverse_bits(u32 val, u32 n) +{ + u32 retval; + int i; + + for (i = 0, retval = 0; i < n; i++) { + retval = (retval << 1) | (val & 1); + val >>= 1; + } + return retval; +} + +bool ath9k_get_channel_edges(struct ath_hw *ah, + u16 flags, u16 *low, + u16 *high) +{ + struct ath9k_hw_capabilities *pCap = &ah->caps; + + if (flags & CHANNEL_5GHZ) { + *low = pCap->low_5ghz_chan; + *high = pCap->high_5ghz_chan; + return true; + } + if ((flags & CHANNEL_2GHZ)) { + *low = pCap->low_2ghz_chan; + *high = pCap->high_2ghz_chan; + return true; + } + return false; +} + +u16 ath9k_hw_computetxtime(struct ath_hw *ah, + u8 phy, int kbps, + u32 frameLen, u16 rateix, + bool shortPreamble) +{ + u32 bitsPerSymbol, numBits, numSymbols, phyTime, txTime; + + if (kbps == 0) + return 0; + + switch (phy) { + case WLAN_RC_PHY_CCK: + phyTime = CCK_PREAMBLE_BITS + CCK_PLCP_BITS; + if (shortPreamble) + phyTime >>= 1; + numBits = frameLen << 3; + txTime = CCK_SIFS_TIME + phyTime + ((numBits * 1000) / kbps); + break; + case WLAN_RC_PHY_OFDM: + if (ah->curchan && IS_CHAN_QUARTER_RATE(ah->curchan)) { + bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME_QUARTER) / 1000; + numBits = OFDM_PLCP_BITS + (frameLen << 3); + numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol); + txTime = OFDM_SIFS_TIME_QUARTER + + OFDM_PREAMBLE_TIME_QUARTER + + (numSymbols * OFDM_SYMBOL_TIME_QUARTER); + } else if (ah->curchan && + IS_CHAN_HALF_RATE(ah->curchan)) { + bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME_HALF) / 1000; + numBits = OFDM_PLCP_BITS + (frameLen << 3); + numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol); + txTime = OFDM_SIFS_TIME_HALF + + OFDM_PREAMBLE_TIME_HALF + + (numSymbols * OFDM_SYMBOL_TIME_HALF); + } else { + bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME) / 1000; + numBits = OFDM_PLCP_BITS + (frameLen << 3); + numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol); + txTime = OFDM_SIFS_TIME + OFDM_PREAMBLE_TIME + + (numSymbols * OFDM_SYMBOL_TIME); + } + break; + default: + ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, + "Unknown phy %u (rate ix %u)\n", phy, rateix); + txTime = 0; + break; + } + + return txTime; +} +EXPORT_SYMBOL(ath9k_hw_computetxtime); + +void ath9k_hw_get_channel_centers(struct ath_hw *ah, + struct ath9k_channel *chan, + struct chan_centers *centers) +{ + int8_t extoff; + + if (!IS_CHAN_HT40(chan)) { + centers->ctl_center = centers->ext_center = + centers->synth_center = chan->channel; + return; + } + + if ((chan->chanmode == CHANNEL_A_HT40PLUS) || + (chan->chanmode == CHANNEL_G_HT40PLUS)) { + centers->synth_center = + chan->channel + HT40_CHANNEL_CENTER_SHIFT; + extoff = 1; + } else { + centers->synth_center = + chan->channel - HT40_CHANNEL_CENTER_SHIFT; + extoff = -1; + } + + centers->ctl_center = + centers->synth_center - (extoff * HT40_CHANNEL_CENTER_SHIFT); + /* 25 MHz spacing is supported by hw but not on upper layers */ + centers->ext_center = + centers->synth_center + (extoff * HT40_CHANNEL_CENTER_SHIFT); +} + +/******************/ +/* Chip Revisions */ +/******************/ + +static void ath9k_hw_read_revisions(struct ath_hw *ah) +{ + u32 val; + + val = REG_READ(ah, AR_SREV) & AR_SREV_ID; + + if (val == 0xFF) { + val = REG_READ(ah, AR_SREV); + ah->hw_version.macVersion = + (val & AR_SREV_VERSION2) >> AR_SREV_TYPE2_S; + ah->hw_version.macRev = MS(val, AR_SREV_REVISION2); + ah->is_pciexpress = (val & AR_SREV_TYPE2_HOST_MODE) ? 0 : 1; + } else { + if (!AR_SREV_9100(ah)) + ah->hw_version.macVersion = MS(val, AR_SREV_VERSION); + + ah->hw_version.macRev = val & AR_SREV_REVISION; + + if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCIE) + ah->is_pciexpress = true; + } +} + +static int ath9k_hw_get_radiorev(struct ath_hw *ah) +{ + u32 val; + int i; + + REG_WRITE(ah, AR_PHY(0x36), 0x00007058); + + for (i = 0; i < 8; i++) + REG_WRITE(ah, AR_PHY(0x20), 0x00010000); + val = (REG_READ(ah, AR_PHY(256)) >> 24) & 0xff; + val = ((val & 0xf0) >> 4) | ((val & 0x0f) << 4); + + return ath9k_hw_reverse_bits(val, 8); +} + +/************************************/ +/* HW Attach, Detach, Init Routines */ +/************************************/ + +static void ath9k_hw_disablepcie(struct ath_hw *ah) +{ + if (AR_SREV_9100(ah)) + return; + + REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00); + REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924); + REG_WRITE(ah, AR_PCIE_SERDES, 0x28000029); + REG_WRITE(ah, AR_PCIE_SERDES, 0x57160824); + REG_WRITE(ah, AR_PCIE_SERDES, 0x25980579); + REG_WRITE(ah, AR_PCIE_SERDES, 0x00000000); + REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40); + REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554); + REG_WRITE(ah, AR_PCIE_SERDES, 0x000e1007); + + REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000); +} + +static bool ath9k_hw_chip_test(struct ath_hw *ah) +{ + struct ath_common *common = ath9k_hw_common(ah); + u32 regAddr[2] = { AR_STA_ID0, AR_PHY_BASE + (8 << 2) }; + u32 regHold[2]; + u32 patternData[4] = { 0x55555555, + 0xaaaaaaaa, + 0x66666666, + 0x99999999 }; + int i, j; + + for (i = 0; i < 2; i++) { + u32 addr = regAddr[i]; + u32 wrData, rdData; + + regHold[i] = REG_READ(ah, addr); + for (j = 0; j < 0x100; j++) { + wrData = (j << 16) | j; + REG_WRITE(ah, addr, wrData); + rdData = REG_READ(ah, addr); + if (rdData != wrData) { + ath_print(common, ATH_DBG_FATAL, + "address test failed " + "addr: 0x%08x - wr:0x%08x != " + "rd:0x%08x\n", + addr, wrData, rdData); + return false; + } + } + for (j = 0; j < 4; j++) { + wrData = patternData[j]; + REG_WRITE(ah, addr, wrData); + rdData = REG_READ(ah, addr); + if (wrData != rdData) { + ath_print(common, ATH_DBG_FATAL, + "address test failed " + "addr: 0x%08x - wr:0x%08x != " + "rd:0x%08x\n", + addr, wrData, rdData); + return false; + } + } + REG_WRITE(ah, regAddr[i], regHold[i]); + } + udelay(100); + + return true; +} + +static void ath9k_hw_init_config(struct ath_hw *ah) +{ + int i; + + ah->config.dma_beacon_response_time = 2; + ah->config.sw_beacon_response_time = 10; + ah->config.additional_swba_backoff = 0; + ah->config.ack_6mb = 0x0; + ah->config.cwm_ignore_extcca = 0; + ah->config.pcie_powersave_enable = 0; + ah->config.pcie_clock_req = 0; + ah->config.pcie_waen = 0; + ah->config.analog_shiftreg = 1; + ah->config.ofdm_trig_low = 200; + ah->config.ofdm_trig_high = 500; + ah->config.cck_trig_high = 200; + ah->config.cck_trig_low = 100; + ah->config.enable_ani = 1; + + for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { + ah->config.spurchans[i][0] = AR_NO_SPUR; + ah->config.spurchans[i][1] = AR_NO_SPUR; + } + + if (ah->hw_version.devid != AR2427_DEVID_PCIE) + ah->config.ht_enable = 1; + else + ah->config.ht_enable = 0; + + ah->config.rx_intr_mitigation = true; + + /* + * We need this for PCI devices only (Cardbus, PCI, miniPCI) + * _and_ if on non-uniprocessor systems (Multiprocessor/HT). + * This means we use it for all AR5416 devices, and the few + * minor PCI AR9280 devices out there. + * + * Serialization is required because these devices do not handle + * well the case of two concurrent reads/writes due to the latency + * involved. During one read/write another read/write can be issued + * on another CPU while the previous read/write may still be working + * on our hardware, if we hit this case the hardware poops in a loop. + * We prevent this by serializing reads and writes. + * + * This issue is not present on PCI-Express devices or pre-AR5416 + * devices (legacy, 802.11abg). + */ + if (num_possible_cpus() > 1) + ah->config.serialize_regmode = SER_REG_MODE_AUTO; +} +EXPORT_SYMBOL(ath9k_hw_init); + +static void ath9k_hw_init_defaults(struct ath_hw *ah) +{ + struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); + + regulatory->country_code = CTRY_DEFAULT; + regulatory->power_limit = MAX_RATE_POWER; + regulatory->tp_scale = ATH9K_TP_SCALE_MAX; + + ah->hw_version.magic = AR5416_MAGIC; + ah->hw_version.subvendorid = 0; + + ah->ah_flags = 0; + if (ah->hw_version.devid == AR5416_AR9100_DEVID) + ah->hw_version.macVersion = AR_SREV_VERSION_9100; + if (!AR_SREV_9100(ah)) + ah->ah_flags = AH_USE_EEPROM; + + ah->atim_window = 0; + ah->sta_id1_defaults = AR_STA_ID1_CRPT_MIC_ENABLE; + ah->beacon_interval = 100; + ah->enable_32kHz_clock = DONT_USE_32KHZ; + ah->slottime = (u32) -1; + ah->globaltxtimeout = (u32) -1; + ah->power_mode = ATH9K_PM_UNDEFINED; +} + +static int ath9k_hw_rf_claim(struct ath_hw *ah) +{ + u32 val; + + REG_WRITE(ah, AR_PHY(0), 0x00000007); + + val = ath9k_hw_get_radiorev(ah); + switch (val & AR_RADIO_SREV_MAJOR) { + case 0: + val = AR_RAD5133_SREV_MAJOR; + break; + case AR_RAD5133_SREV_MAJOR: + case AR_RAD5122_SREV_MAJOR: + case AR_RAD2133_SREV_MAJOR: + case AR_RAD2122_SREV_MAJOR: + break; + default: + ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, + "Radio Chip Rev 0x%02X not supported\n", + val & AR_RADIO_SREV_MAJOR); + return -EOPNOTSUPP; + } + + ah->hw_version.analog5GhzRev = val; + + return 0; +} + +static int ath9k_hw_init_macaddr(struct ath_hw *ah) +{ + struct ath_common *common = ath9k_hw_common(ah); + u32 sum; + int i; + u16 eeval; + + sum = 0; + for (i = 0; i < 3; i++) { + eeval = ah->eep_ops->get_eeprom(ah, AR_EEPROM_MAC(i)); + sum += eeval; + common->macaddr[2 * i] = eeval >> 8; + common->macaddr[2 * i + 1] = eeval & 0xff; + } + if (sum == 0 || sum == 0xffff * 3) + return -EADDRNOTAVAIL; + + return 0; +} + +static void ath9k_hw_init_rxgain_ini(struct ath_hw *ah) +{ + u32 rxgain_type; + + if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >= AR5416_EEP_MINOR_VER_17) { + rxgain_type = ah->eep_ops->get_eeprom(ah, EEP_RXGAIN_TYPE); + + if (rxgain_type == AR5416_EEP_RXGAIN_13DB_BACKOFF) + INIT_INI_ARRAY(&ah->iniModesRxGain, + ar9280Modes_backoff_13db_rxgain_9280_2, + ARRAY_SIZE(ar9280Modes_backoff_13db_rxgain_9280_2), 6); + else if (rxgain_type == AR5416_EEP_RXGAIN_23DB_BACKOFF) + INIT_INI_ARRAY(&ah->iniModesRxGain, + ar9280Modes_backoff_23db_rxgain_9280_2, + ARRAY_SIZE(ar9280Modes_backoff_23db_rxgain_9280_2), 6); + else + INIT_INI_ARRAY(&ah->iniModesRxGain, + ar9280Modes_original_rxgain_9280_2, + ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6); + } else { + INIT_INI_ARRAY(&ah->iniModesRxGain, + ar9280Modes_original_rxgain_9280_2, + ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6); + } +} + +static void ath9k_hw_init_txgain_ini(struct ath_hw *ah) +{ + u32 txgain_type; + + if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >= AR5416_EEP_MINOR_VER_19) { + txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE); + + if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER) + INIT_INI_ARRAY(&ah->iniModesTxGain, + ar9280Modes_high_power_tx_gain_9280_2, + ARRAY_SIZE(ar9280Modes_high_power_tx_gain_9280_2), 6); + else + INIT_INI_ARRAY(&ah->iniModesTxGain, + ar9280Modes_original_tx_gain_9280_2, + ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6); + } else { + INIT_INI_ARRAY(&ah->iniModesTxGain, + ar9280Modes_original_tx_gain_9280_2, + ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6); + } +} + +static int ath9k_hw_post_init(struct ath_hw *ah) +{ + int ecode; + + if (!ath9k_hw_chip_test(ah)) + return -ENODEV; + + ecode = ath9k_hw_rf_claim(ah); + if (ecode != 0) + return ecode; + + ecode = ath9k_hw_eeprom_init(ah); + if (ecode != 0) + return ecode; + + ath_print(ath9k_hw_common(ah), ATH_DBG_CONFIG, + "Eeprom VER: %d, REV: %d\n", + ah->eep_ops->get_eeprom_ver(ah), + ah->eep_ops->get_eeprom_rev(ah)); + + if (!AR_SREV_9280_10_OR_LATER(ah)) { + ecode = ath9k_hw_rf_alloc_ext_banks(ah); + if (ecode) { + ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, + "Failed allocating banks for " + "external radio\n"); + return ecode; + } + } + + if (!AR_SREV_9100(ah)) { + ath9k_hw_ani_setup(ah); + ath9k_hw_ani_init(ah); + } + + return 0; +} + +static bool ath9k_hw_devid_supported(u16 devid) +{ + switch (devid) { + case AR5416_DEVID_PCI: + case AR5416_DEVID_PCIE: + case AR5416_AR9100_DEVID: + case AR9160_DEVID_PCI: + case AR9280_DEVID_PCI: + case AR9280_DEVID_PCIE: + case AR9285_DEVID_PCIE: + case AR5416_DEVID_AR9287_PCI: + case AR5416_DEVID_AR9287_PCIE: + case AR9271_USB: + case AR2427_DEVID_PCIE: + return true; + default: + break; + } + return false; +} + +static bool ath9k_hw_macversion_supported(u32 macversion) +{ + switch (macversion) { + case AR_SREV_VERSION_5416_PCI: + case AR_SREV_VERSION_5416_PCIE: + case AR_SREV_VERSION_9160: + case AR_SREV_VERSION_9100: + case AR_SREV_VERSION_9280: + case AR_SREV_VERSION_9285: + case AR_SREV_VERSION_9287: + case AR_SREV_VERSION_9271: + return true; + default: + break; + } + return false; +} + +static void ath9k_hw_init_cal_settings(struct ath_hw *ah) +{ + if (AR_SREV_9160_10_OR_LATER(ah)) { + if (AR_SREV_9280_10_OR_LATER(ah)) { + ah->iq_caldata.calData = &iq_cal_single_sample; + ah->adcgain_caldata.calData = + &adc_gain_cal_single_sample; + ah->adcdc_caldata.calData = + &adc_dc_cal_single_sample; + ah->adcdc_calinitdata.calData = + &adc_init_dc_cal; + } else { + ah->iq_caldata.calData = &iq_cal_multi_sample; + ah->adcgain_caldata.calData = + &adc_gain_cal_multi_sample; + ah->adcdc_caldata.calData = + &adc_dc_cal_multi_sample; + ah->adcdc_calinitdata.calData = + &adc_init_dc_cal; + } + ah->supp_cals = ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL; + } +} + +static void ath9k_hw_init_mode_regs(struct ath_hw *ah) +{ + if (AR_SREV_9271(ah)) { + INIT_INI_ARRAY(&ah->iniModes, ar9271Modes_9271, + ARRAY_SIZE(ar9271Modes_9271), 6); + INIT_INI_ARRAY(&ah->iniCommon, ar9271Common_9271, + ARRAY_SIZE(ar9271Common_9271), 2); + INIT_INI_ARRAY(&ah->iniModes_9271_1_0_only, + ar9271Modes_9271_1_0_only, + ARRAY_SIZE(ar9271Modes_9271_1_0_only), 6); + return; + } + + if (AR_SREV_9287_11_OR_LATER(ah)) { + INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_1, + ARRAY_SIZE(ar9287Modes_9287_1_1), 6); + INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_1, + ARRAY_SIZE(ar9287Common_9287_1_1), 2); + if (ah->config.pcie_clock_req) + INIT_INI_ARRAY(&ah->iniPcieSerdes, + ar9287PciePhy_clkreq_off_L1_9287_1_1, + ARRAY_SIZE(ar9287PciePhy_clkreq_off_L1_9287_1_1), 2); + else + INIT_INI_ARRAY(&ah->iniPcieSerdes, + ar9287PciePhy_clkreq_always_on_L1_9287_1_1, + ARRAY_SIZE(ar9287PciePhy_clkreq_always_on_L1_9287_1_1), + 2); + } else if (AR_SREV_9287_10_OR_LATER(ah)) { + INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_0, + ARRAY_SIZE(ar9287Modes_9287_1_0), 6); + INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_0, + ARRAY_SIZE(ar9287Common_9287_1_0), 2); + + if (ah->config.pcie_clock_req) + INIT_INI_ARRAY(&ah->iniPcieSerdes, + ar9287PciePhy_clkreq_off_L1_9287_1_0, + ARRAY_SIZE(ar9287PciePhy_clkreq_off_L1_9287_1_0), 2); + else + INIT_INI_ARRAY(&ah->iniPcieSerdes, + ar9287PciePhy_clkreq_always_on_L1_9287_1_0, + ARRAY_SIZE(ar9287PciePhy_clkreq_always_on_L1_9287_1_0), + 2); + } else if (AR_SREV_9285_12_OR_LATER(ah)) { + + + INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285_1_2, + ARRAY_SIZE(ar9285Modes_9285_1_2), 6); + INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285_1_2, + ARRAY_SIZE(ar9285Common_9285_1_2), 2); + + if (ah->config.pcie_clock_req) { + INIT_INI_ARRAY(&ah->iniPcieSerdes, + ar9285PciePhy_clkreq_off_L1_9285_1_2, + ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285_1_2), 2); + } else { + INIT_INI_ARRAY(&ah->iniPcieSerdes, + ar9285PciePhy_clkreq_always_on_L1_9285_1_2, + ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285_1_2), + 2); + } + } else if (AR_SREV_9285_10_OR_LATER(ah)) { + INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285, + ARRAY_SIZE(ar9285Modes_9285), 6); + INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285, + ARRAY_SIZE(ar9285Common_9285), 2); + + if (ah->config.pcie_clock_req) { + INIT_INI_ARRAY(&ah->iniPcieSerdes, + ar9285PciePhy_clkreq_off_L1_9285, + ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285), 2); + } else { + INIT_INI_ARRAY(&ah->iniPcieSerdes, + ar9285PciePhy_clkreq_always_on_L1_9285, + ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285), 2); + } + } else if (AR_SREV_9280_20_OR_LATER(ah)) { + INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280_2, + ARRAY_SIZE(ar9280Modes_9280_2), 6); + INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280_2, + ARRAY_SIZE(ar9280Common_9280_2), 2); + + if (ah->config.pcie_clock_req) { + INIT_INI_ARRAY(&ah->iniPcieSerdes, + ar9280PciePhy_clkreq_off_L1_9280, + ARRAY_SIZE(ar9280PciePhy_clkreq_off_L1_9280),2); + } else { + INIT_INI_ARRAY(&ah->iniPcieSerdes, + ar9280PciePhy_clkreq_always_on_L1_9280, + ARRAY_SIZE(ar9280PciePhy_clkreq_always_on_L1_9280), 2); + } + INIT_INI_ARRAY(&ah->iniModesAdditional, + ar9280Modes_fast_clock_9280_2, + ARRAY_SIZE(ar9280Modes_fast_clock_9280_2), 3); + } else if (AR_SREV_9280_10_OR_LATER(ah)) { + INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280, + ARRAY_SIZE(ar9280Modes_9280), 6); + INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280, + ARRAY_SIZE(ar9280Common_9280), 2); + } else if (AR_SREV_9160_10_OR_LATER(ah)) { + INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9160, + ARRAY_SIZE(ar5416Modes_9160), 6); + INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9160, + ARRAY_SIZE(ar5416Common_9160), 2); + INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9160, + ARRAY_SIZE(ar5416Bank0_9160), 2); + INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain_9160, + ARRAY_SIZE(ar5416BB_RfGain_9160), 3); + INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1_9160, + ARRAY_SIZE(ar5416Bank1_9160), 2); + INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2_9160, + ARRAY_SIZE(ar5416Bank2_9160), 2); + INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3_9160, + ARRAY_SIZE(ar5416Bank3_9160), 3); + INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9160, + ARRAY_SIZE(ar5416Bank6_9160), 3); + INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9160, + ARRAY_SIZE(ar5416Bank6TPC_9160), 3); + INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7_9160, + ARRAY_SIZE(ar5416Bank7_9160), 2); + if (AR_SREV_9160_11(ah)) { + INIT_INI_ARRAY(&ah->iniAddac, + ar5416Addac_91601_1, + ARRAY_SIZE(ar5416Addac_91601_1), 2); + } else { + INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9160, + ARRAY_SIZE(ar5416Addac_9160), 2); + } + } else if (AR_SREV_9100_OR_LATER(ah)) { + INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9100, + ARRAY_SIZE(ar5416Modes_9100), 6); + INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9100, + ARRAY_SIZE(ar5416Common_9100), 2); + INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9100, + ARRAY_SIZE(ar5416Bank0_9100), 2); + INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain_9100, + ARRAY_SIZE(ar5416BB_RfGain_9100), 3); + INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1_9100, + ARRAY_SIZE(ar5416Bank1_9100), 2); + INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2_9100, + ARRAY_SIZE(ar5416Bank2_9100), 2); + INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3_9100, + ARRAY_SIZE(ar5416Bank3_9100), 3); + INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9100, + ARRAY_SIZE(ar5416Bank6_9100), 3); + INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9100, + ARRAY_SIZE(ar5416Bank6TPC_9100), 3); + INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7_9100, + ARRAY_SIZE(ar5416Bank7_9100), 2); + INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9100, + ARRAY_SIZE(ar5416Addac_9100), 2); + } else { + INIT_INI_ARRAY(&ah->iniModes, ar5416Modes, + ARRAY_SIZE(ar5416Modes), 6); + INIT_INI_ARRAY(&ah->iniCommon, ar5416Common, + ARRAY_SIZE(ar5416Common), 2); + INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0, + ARRAY_SIZE(ar5416Bank0), 2); + INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain, + ARRAY_SIZE(ar5416BB_RfGain), 3); + INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1, + ARRAY_SIZE(ar5416Bank1), 2); + INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2, + ARRAY_SIZE(ar5416Bank2), 2); + INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3, + ARRAY_SIZE(ar5416Bank3), 3); + INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6, + ARRAY_SIZE(ar5416Bank6), 3); + INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC, + ARRAY_SIZE(ar5416Bank6TPC), 3); + INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7, + ARRAY_SIZE(ar5416Bank7), 2); + INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac, + ARRAY_SIZE(ar5416Addac), 2); + } +} + +static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah) +{ + if (AR_SREV_9287_11_OR_LATER(ah)) + INIT_INI_ARRAY(&ah->iniModesRxGain, + ar9287Modes_rx_gain_9287_1_1, + ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_1), 6); + else if (AR_SREV_9287_10(ah)) + INIT_INI_ARRAY(&ah->iniModesRxGain, + ar9287Modes_rx_gain_9287_1_0, + ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_0), 6); + else if (AR_SREV_9280_20(ah)) + ath9k_hw_init_rxgain_ini(ah); + + if (AR_SREV_9287_11_OR_LATER(ah)) { + INIT_INI_ARRAY(&ah->iniModesTxGain, + ar9287Modes_tx_gain_9287_1_1, + ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_1), 6); + } else if (AR_SREV_9287_10(ah)) { + INIT_INI_ARRAY(&ah->iniModesTxGain, + ar9287Modes_tx_gain_9287_1_0, + ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_0), 6); + } else if (AR_SREV_9280_20(ah)) { + ath9k_hw_init_txgain_ini(ah); + } else if (AR_SREV_9285_12_OR_LATER(ah)) { + u32 txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE); + + /* txgain table */ + if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER) { + INIT_INI_ARRAY(&ah->iniModesTxGain, + ar9285Modes_high_power_tx_gain_9285_1_2, + ARRAY_SIZE(ar9285Modes_high_power_tx_gain_9285_1_2), 6); + } else { + INIT_INI_ARRAY(&ah->iniModesTxGain, + ar9285Modes_original_tx_gain_9285_1_2, + ARRAY_SIZE(ar9285Modes_original_tx_gain_9285_1_2), 6); + } + + } +} + +static void ath9k_hw_init_eeprom_fix(struct ath_hw *ah) +{ + u32 i, j; + + if (ah->hw_version.devid == AR9280_DEVID_PCI) { + + /* EEPROM Fixup */ + for (i = 0; i < ah->iniModes.ia_rows; i++) { + u32 reg = INI_RA(&ah->iniModes, i, 0); + + for (j = 1; j < ah->iniModes.ia_columns; j++) { + u32 val = INI_RA(&ah->iniModes, i, j); + + INI_RA(&ah->iniModes, i, j) = + ath9k_hw_ini_fixup(ah, + &ah->eeprom.def, + reg, val); + } + } + } +} + +int ath9k_hw_init(struct ath_hw *ah) +{ + struct ath_common *common = ath9k_hw_common(ah); + int r = 0; + + if (!ath9k_hw_devid_supported(ah->hw_version.devid)) { + ath_print(common, ATH_DBG_FATAL, + "Unsupported device ID: 0x%0x\n", + ah->hw_version.devid); + return -EOPNOTSUPP; + } + + ath9k_hw_init_defaults(ah); + ath9k_hw_init_config(ah); + + if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) { + ath_print(common, ATH_DBG_FATAL, + "Couldn't reset chip\n"); + return -EIO; + } + + if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) { + ath_print(common, ATH_DBG_FATAL, "Couldn't wakeup chip\n"); + return -EIO; + } + + if (ah->config.serialize_regmode == SER_REG_MODE_AUTO) { + if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI || + (AR_SREV_9280(ah) && !ah->is_pciexpress)) { + ah->config.serialize_regmode = + SER_REG_MODE_ON; + } else { + ah->config.serialize_regmode = + SER_REG_MODE_OFF; + } + } + + ath_print(common, ATH_DBG_RESET, "serialize_regmode is %d\n", + ah->config.serialize_regmode); + + if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) + ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD >> 1; + else + ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD; + + if (!ath9k_hw_macversion_supported(ah->hw_version.macVersion)) { + ath_print(common, ATH_DBG_FATAL, + "Mac Chip Rev 0x%02x.%x is not supported by " + "this driver\n", ah->hw_version.macVersion, + ah->hw_version.macRev); + return -EOPNOTSUPP; + } + + if (AR_SREV_9100(ah)) { + ah->iq_caldata.calData = &iq_cal_multi_sample; + ah->supp_cals = IQ_MISMATCH_CAL; + ah->is_pciexpress = false; + } + + if (AR_SREV_9271(ah)) + ah->is_pciexpress = false; + + ah->hw_version.phyRev = REG_READ(ah, AR_PHY_CHIP_ID); + + ath9k_hw_init_cal_settings(ah); + + ah->ani_function = ATH9K_ANI_ALL; + if (AR_SREV_9280_10_OR_LATER(ah)) { + ah->ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL; + ah->ath9k_hw_rf_set_freq = &ath9k_hw_ar9280_set_channel; + ah->ath9k_hw_spur_mitigate_freq = &ath9k_hw_9280_spur_mitigate; + } else { + ah->ath9k_hw_rf_set_freq = &ath9k_hw_set_channel; + ah->ath9k_hw_spur_mitigate_freq = &ath9k_hw_spur_mitigate; + } + + ath9k_hw_init_mode_regs(ah); + + if (ah->is_pciexpress) + ath9k_hw_configpcipowersave(ah, 0, 0); + else + ath9k_hw_disablepcie(ah); + + /* Support for Japan ch.14 (2484) spread */ + if (AR_SREV_9287_11_OR_LATER(ah)) { + INIT_INI_ARRAY(&ah->iniCckfirNormal, + ar9287Common_normal_cck_fir_coeff_92871_1, + ARRAY_SIZE(ar9287Common_normal_cck_fir_coeff_92871_1), 2); + INIT_INI_ARRAY(&ah->iniCckfirJapan2484, + ar9287Common_japan_2484_cck_fir_coeff_92871_1, + ARRAY_SIZE(ar9287Common_japan_2484_cck_fir_coeff_92871_1), 2); + } + + r = ath9k_hw_post_init(ah); + if (r) + return r; + + ath9k_hw_init_mode_gain_regs(ah); + r = ath9k_hw_fill_cap_info(ah); + if (r) + return r; + + ath9k_hw_init_eeprom_fix(ah); + + r = ath9k_hw_init_macaddr(ah); + if (r) { + ath_print(common, ATH_DBG_FATAL, + "Failed to initialize MAC address\n"); + return r; + } + + if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) + ah->tx_trig_level = (AR_FTRIG_256B >> AR_FTRIG_S); + else + ah->tx_trig_level = (AR_FTRIG_512B >> AR_FTRIG_S); + + ath9k_init_nfcal_hist_buffer(ah); + + common->state = ATH_HW_INITIALIZED; + + return 0; +} + +static void ath9k_hw_init_bb(struct ath_hw *ah, + struct ath9k_channel *chan) +{ + u32 synthDelay; + + synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY; + if (IS_CHAN_B(chan)) + synthDelay = (4 * synthDelay) / 22; + else + synthDelay /= 10; + + REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN); + + udelay(synthDelay + BASE_ACTIVATE_DELAY); +} + +static void ath9k_hw_init_qos(struct ath_hw *ah) +{ + REG_WRITE(ah, AR_MIC_QOS_CONTROL, 0x100aa); + REG_WRITE(ah, AR_MIC_QOS_SELECT, 0x3210); + + REG_WRITE(ah, AR_QOS_NO_ACK, + SM(2, AR_QOS_NO_ACK_TWO_BIT) | + SM(5, AR_QOS_NO_ACK_BIT_OFF) | + SM(0, AR_QOS_NO_ACK_BYTE_OFF)); + + REG_WRITE(ah, AR_TXOP_X, AR_TXOP_X_VAL); + REG_WRITE(ah, AR_TXOP_0_3, 0xFFFFFFFF); + REG_WRITE(ah, AR_TXOP_4_7, 0xFFFFFFFF); + REG_WRITE(ah, AR_TXOP_8_11, 0xFFFFFFFF); + REG_WRITE(ah, AR_TXOP_12_15, 0xFFFFFFFF); +} + +static void ath9k_hw_change_target_baud(struct ath_hw *ah, u32 freq, u32 baud) +{ + u32 lcr; + u32 baud_divider = freq * 1000 * 1000 / 16 / baud; + + lcr = REG_READ(ah , 0x5100c); + lcr |= 0x80; + + REG_WRITE(ah, 0x5100c, lcr); + REG_WRITE(ah, 0x51004, (baud_divider >> 8)); + REG_WRITE(ah, 0x51000, (baud_divider & 0xff)); + + lcr &= ~0x80; + REG_WRITE(ah, 0x5100c, lcr); +} + +static void ath9k_hw_init_pll(struct ath_hw *ah, + struct ath9k_channel *chan) +{ + u32 pll; + + if (AR_SREV_9100(ah)) { + if (chan && IS_CHAN_5GHZ(chan)) + pll = 0x1450; + else + pll = 0x1458; + } else { + if (AR_SREV_9280_10_OR_LATER(ah)) { + pll = SM(0x5, AR_RTC_9160_PLL_REFDIV); + + if (chan && IS_CHAN_HALF_RATE(chan)) + pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL); + else if (chan && IS_CHAN_QUARTER_RATE(chan)) + pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL); + + if (chan && IS_CHAN_5GHZ(chan)) { + pll |= SM(0x28, AR_RTC_9160_PLL_DIV); + + + if (AR_SREV_9280_20(ah)) { + if (((chan->channel % 20) == 0) + || ((chan->channel % 10) == 0)) + pll = 0x2850; + else + pll = 0x142c; + } + } else { + pll |= SM(0x2c, AR_RTC_9160_PLL_DIV); + } + + } else if (AR_SREV_9160_10_OR_LATER(ah)) { + + pll = SM(0x5, AR_RTC_9160_PLL_REFDIV); + + if (chan && IS_CHAN_HALF_RATE(chan)) + pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL); + else if (chan && IS_CHAN_QUARTER_RATE(chan)) + pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL); + + if (chan && IS_CHAN_5GHZ(chan)) + pll |= SM(0x50, AR_RTC_9160_PLL_DIV); + else + pll |= SM(0x58, AR_RTC_9160_PLL_DIV); + } else { + pll = AR_RTC_PLL_REFDIV_5 | AR_RTC_PLL_DIV2; + + if (chan && IS_CHAN_HALF_RATE(chan)) + pll |= SM(0x1, AR_RTC_PLL_CLKSEL); + else if (chan && IS_CHAN_QUARTER_RATE(chan)) + pll |= SM(0x2, AR_RTC_PLL_CLKSEL); + + if (chan && IS_CHAN_5GHZ(chan)) + pll |= SM(0xa, AR_RTC_PLL_DIV); + else + pll |= SM(0xb, AR_RTC_PLL_DIV); + } + } + REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll); + + /* Switch the core clock for ar9271 to 117Mhz */ + if (AR_SREV_9271(ah)) { + if ((pll == 0x142c) || (pll == 0x2850) ) { + udelay(500); + /* set CLKOBS to output AHB clock */ + REG_WRITE(ah, 0x7020, 0xe); + /* + * 0x304: 117Mhz, ahb_ratio: 1x1 + * 0x306: 40Mhz, ahb_ratio: 1x1 + */ + REG_WRITE(ah, 0x50040, 0x304); + /* + * makes adjustments for the baud dividor to keep the + * targetted baud rate based on the used core clock. + */ + ath9k_hw_change_target_baud(ah, AR9271_CORE_CLOCK, + AR9271_TARGET_BAUD_RATE); + } + } + + udelay(RTC_PLL_SETTLE_DELAY); + + REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK); +} + +static void ath9k_hw_init_chain_masks(struct ath_hw *ah) +{ + int rx_chainmask, tx_chainmask; + + rx_chainmask = ah->rxchainmask; + tx_chainmask = ah->txchainmask; + + switch (rx_chainmask) { + case 0x5: + REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP, + AR_PHY_SWAP_ALT_CHAIN); + case 0x3: + if (ah->hw_version.macVersion == AR_SREV_REVISION_5416_10) { + REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7); + REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7); + break; + } + case 0x1: + case 0x2: + case 0x7: + REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask); + REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask); + break; + default: + break; + } + + REG_WRITE(ah, AR_SELFGEN_MASK, tx_chainmask); + if (tx_chainmask == 0x5) { + REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP, + AR_PHY_SWAP_ALT_CHAIN); + } + if (AR_SREV_9100(ah)) + REG_WRITE(ah, AR_PHY_ANALOG_SWAP, + REG_READ(ah, AR_PHY_ANALOG_SWAP) | 0x00000001); +} + +static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah, + enum nl80211_iftype opmode) +{ + ah->mask_reg = AR_IMR_TXERR | + AR_IMR_TXURN | + AR_IMR_RXERR | + AR_IMR_RXORN | + AR_IMR_BCNMISC; + + if (ah->config.rx_intr_mitigation) + ah->mask_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR; + else + ah->mask_reg |= AR_IMR_RXOK; + + ah->mask_reg |= AR_IMR_TXOK; + + if (opmode == NL80211_IFTYPE_AP) + ah->mask_reg |= AR_IMR_MIB; + + REG_WRITE(ah, AR_IMR, ah->mask_reg); + REG_WRITE(ah, AR_IMR_S2, REG_READ(ah, AR_IMR_S2) | AR_IMR_S2_GTT); + + if (!AR_SREV_9100(ah)) { + REG_WRITE(ah, AR_INTR_SYNC_CAUSE, 0xFFFFFFFF); + REG_WRITE(ah, AR_INTR_SYNC_ENABLE, AR_INTR_SYNC_DEFAULT); + REG_WRITE(ah, AR_INTR_SYNC_MASK, 0); + } +} + +static void ath9k_hw_setslottime(struct ath_hw *ah, u32 us) +{ + u32 val = ath9k_hw_mac_to_clks(ah, us); + val = min(val, (u32) 0xFFFF); + REG_WRITE(ah, AR_D_GBL_IFS_SLOT, val); +} + +static void ath9k_hw_set_ack_timeout(struct ath_hw *ah, u32 us) +{ + u32 val = ath9k_hw_mac_to_clks(ah, us); + val = min(val, (u32) MS(0xFFFFFFFF, AR_TIME_OUT_ACK)); + REG_RMW_FIELD(ah, AR_TIME_OUT, AR_TIME_OUT_ACK, val); +} + +static void ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us) +{ + u32 val = ath9k_hw_mac_to_clks(ah, us); + val = min(val, (u32) MS(0xFFFFFFFF, AR_TIME_OUT_CTS)); + REG_RMW_FIELD(ah, AR_TIME_OUT, AR_TIME_OUT_CTS, val); +} + +static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu) +{ + if (tu > 0xFFFF) { + ath_print(ath9k_hw_common(ah), ATH_DBG_XMIT, + "bad global tx timeout %u\n", tu); + ah->globaltxtimeout = (u32) -1; + return false; + } else { + REG_RMW_FIELD(ah, AR_GTXTO, AR_GTXTO_TIMEOUT_LIMIT, tu); + ah->globaltxtimeout = tu; + return true; + } +} + +void ath9k_hw_init_global_settings(struct ath_hw *ah) +{ + struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf; + int acktimeout; + int slottime; + int sifstime; + + ath_print(ath9k_hw_common(ah), ATH_DBG_RESET, "ah->misc_mode 0x%x\n", + ah->misc_mode); + + if (ah->misc_mode != 0) + REG_WRITE(ah, AR_PCU_MISC, + REG_READ(ah, AR_PCU_MISC) | ah->misc_mode); + + if (conf->channel && conf->channel->band == IEEE80211_BAND_5GHZ) + sifstime = 16; + else + sifstime = 10; + + /* As defined by IEEE 802.11-2007 17.3.8.6 */ + slottime = ah->slottime + 3 * ah->coverage_class; + acktimeout = slottime + sifstime; + + /* + * Workaround for early ACK timeouts, add an offset to match the + * initval's 64us ack timeout value. + * This was initially only meant to work around an issue with delayed + * BA frames in some implementations, but it has been found to fix ACK + * timeout issues in other cases as well. + */ + if (conf->channel && conf->channel->band == IEEE80211_BAND_2GHZ) + acktimeout += 64 - sifstime - ah->slottime; + + ath9k_hw_setslottime(ah, slottime); + ath9k_hw_set_ack_timeout(ah, acktimeout); + ath9k_hw_set_cts_timeout(ah, acktimeout); + if (ah->globaltxtimeout != (u32) -1) + ath9k_hw_set_global_txtimeout(ah, ah->globaltxtimeout); +} +EXPORT_SYMBOL(ath9k_hw_init_global_settings); + +void ath9k_hw_deinit(struct ath_hw *ah) +{ + struct ath_common *common = ath9k_hw_common(ah); + + if (common->state <= ATH_HW_INITIALIZED) + goto free_hw; + + if (!AR_SREV_9100(ah)) + ath9k_hw_ani_disable(ah); + + ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP); + +free_hw: + if (!AR_SREV_9280_10_OR_LATER(ah)) + ath9k_hw_rf_free_ext_banks(ah); + kfree(ah); + ah = NULL; +} +EXPORT_SYMBOL(ath9k_hw_deinit); + +/*******/ +/* INI */ +/*******/ + +static void ath9k_hw_override_ini(struct ath_hw *ah, + struct ath9k_channel *chan) +{ + u32 val; + + if (AR_SREV_9271(ah)) { + /* + * Enable spectral scan to solution for issues with stuck + * beacons on AR9271 1.0. The beacon stuck issue is not seeon on + * AR9271 1.1 + */ + if (AR_SREV_9271_10(ah)) { + val = REG_READ(ah, AR_PHY_SPECTRAL_SCAN) | + AR_PHY_SPECTRAL_SCAN_ENABLE; + REG_WRITE(ah, AR_PHY_SPECTRAL_SCAN, val); + } + else if (AR_SREV_9271_11(ah)) + /* + * change AR_PHY_RF_CTL3 setting to fix MAC issue + * present on AR9271 1.1 + */ + REG_WRITE(ah, AR_PHY_RF_CTL3, 0x3a020001); + return; + } + + /* + * Set the RX_ABORT and RX_DIS and clear if off only after + * RXE is set for MAC. This prevents frames with corrupted + * descriptor status. + */ + REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); + + if (AR_SREV_9280_10_OR_LATER(ah)) { + val = REG_READ(ah, AR_PCU_MISC_MODE2) & + (~AR_PCU_MISC_MODE2_HWWAR1); + + if (AR_SREV_9287_10_OR_LATER(ah)) + val = val & (~AR_PCU_MISC_MODE2_HWWAR2); + + REG_WRITE(ah, AR_PCU_MISC_MODE2, val); + } + + if (!AR_SREV_5416_20_OR_LATER(ah) || + AR_SREV_9280_10_OR_LATER(ah)) + return; + /* + * Disable BB clock gating + * Necessary to avoid issues on AR5416 2.0 + */ + REG_WRITE(ah, 0x9800 + (651 << 2), 0x11); + + /* + * Disable RIFS search on some chips to avoid baseband + * hang issues. + */ + if (AR_SREV_9100(ah) || AR_SREV_9160(ah)) { + val = REG_READ(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS); + val &= ~AR_PHY_RIFS_INIT_DELAY; + REG_WRITE(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS, val); + } +} + +static u32 ath9k_hw_def_ini_fixup(struct ath_hw *ah, + struct ar5416_eeprom_def *pEepData, + u32 reg, u32 value) +{ + struct base_eep_header *pBase = &(pEepData->baseEepHeader); + struct ath_common *common = ath9k_hw_common(ah); + + switch (ah->hw_version.devid) { + case AR9280_DEVID_PCI: + if (reg == 0x7894) { + ath_print(common, ATH_DBG_EEPROM, + "ini VAL: %x EEPROM: %x\n", value, + (pBase->version & 0xff)); + + if ((pBase->version & 0xff) > 0x0a) { + ath_print(common, ATH_DBG_EEPROM, + "PWDCLKIND: %d\n", + pBase->pwdclkind); + value &= ~AR_AN_TOP2_PWDCLKIND; + value |= AR_AN_TOP2_PWDCLKIND & + (pBase->pwdclkind << AR_AN_TOP2_PWDCLKIND_S); + } else { + ath_print(common, ATH_DBG_EEPROM, + "PWDCLKIND Earlier Rev\n"); + } + + ath_print(common, ATH_DBG_EEPROM, + "final ini VAL: %x\n", value); + } + break; + } + + return value; +} + +static u32 ath9k_hw_ini_fixup(struct ath_hw *ah, + struct ar5416_eeprom_def *pEepData, + u32 reg, u32 value) +{ + if (ah->eep_map == EEP_MAP_4KBITS) + return value; + else + return ath9k_hw_def_ini_fixup(ah, pEepData, reg, value); +} + +static void ath9k_olc_init(struct ath_hw *ah) +{ + u32 i; + + if (OLC_FOR_AR9287_10_LATER) { + REG_SET_BIT(ah, AR_PHY_TX_PWRCTRL9, + AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL); + ath9k_hw_analog_shift_rmw(ah, AR9287_AN_TXPC0, + AR9287_AN_TXPC0_TXPCMODE, + AR9287_AN_TXPC0_TXPCMODE_S, + AR9287_AN_TXPC0_TXPCMODE_TEMPSENSE); + udelay(100); + } else { + for (i = 0; i < AR9280_TX_GAIN_TABLE_SIZE; i++) + ah->originalGain[i] = + MS(REG_READ(ah, AR_PHY_TX_GAIN_TBL1 + i * 4), + AR_PHY_TX_GAIN); + ah->PDADCdelta = 0; + } +} + +static u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, + struct ath9k_channel *chan) +{ + u32 ctl = ath_regd_get_band_ctl(reg, chan->chan->band); + + if (IS_CHAN_B(chan)) + ctl |= CTL_11B; + else if (IS_CHAN_G(chan)) + ctl |= CTL_11G; + else + ctl |= CTL_11A; + + return ctl; +} + +static int ath9k_hw_process_ini(struct ath_hw *ah, + struct ath9k_channel *chan) +{ + struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); + int i, regWrites = 0; + struct ieee80211_channel *channel = chan->chan; + u32 modesIndex, freqIndex; + + switch (chan->chanmode) { + case CHANNEL_A: + case CHANNEL_A_HT20: + modesIndex = 1; + freqIndex = 1; + break; + case CHANNEL_A_HT40PLUS: + case CHANNEL_A_HT40MINUS: + modesIndex = 2; + freqIndex = 1; + break; + case CHANNEL_G: + case CHANNEL_G_HT20: + case CHANNEL_B: + modesIndex = 4; + freqIndex = 2; + break; + case CHANNEL_G_HT40PLUS: + case CHANNEL_G_HT40MINUS: + modesIndex = 3; + freqIndex = 2; + break; + + default: + return -EINVAL; + } + + REG_WRITE(ah, AR_PHY(0), 0x00000007); + REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_EXTERNAL_RADIO); + ah->eep_ops->set_addac(ah, chan); + + if (AR_SREV_5416_22_OR_LATER(ah)) { + REG_WRITE_ARRAY(&ah->iniAddac, 1, regWrites); + } else { + struct ar5416IniArray temp; + u32 addacSize = + sizeof(u32) * ah->iniAddac.ia_rows * + ah->iniAddac.ia_columns; + + memcpy(ah->addac5416_21, + ah->iniAddac.ia_array, addacSize); + + (ah->addac5416_21)[31 * ah->iniAddac.ia_columns + 1] = 0; + + temp.ia_array = ah->addac5416_21; + temp.ia_columns = ah->iniAddac.ia_columns; + temp.ia_rows = ah->iniAddac.ia_rows; + REG_WRITE_ARRAY(&temp, 1, regWrites); + } + + REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_INTERNAL_ADDAC); + + for (i = 0; i < ah->iniModes.ia_rows; i++) { + u32 reg = INI_RA(&ah->iniModes, i, 0); + u32 val = INI_RA(&ah->iniModes, i, modesIndex); + + REG_WRITE(ah, reg, val); + + if (reg >= 0x7800 && reg < 0x78a0 + && ah->config.analog_shiftreg) { + udelay(100); + } + + DO_DELAY(regWrites); + } + + if (AR_SREV_9280(ah) || AR_SREV_9287_10_OR_LATER(ah)) + REG_WRITE_ARRAY(&ah->iniModesRxGain, modesIndex, regWrites); + + if (AR_SREV_9280(ah) || AR_SREV_9285_12_OR_LATER(ah) || + AR_SREV_9287_10_OR_LATER(ah)) + REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites); + + for (i = 0; i < ah->iniCommon.ia_rows; i++) { + u32 reg = INI_RA(&ah->iniCommon, i, 0); + u32 val = INI_RA(&ah->iniCommon, i, 1); + + REG_WRITE(ah, reg, val); + + if (reg >= 0x7800 && reg < 0x78a0 + && ah->config.analog_shiftreg) { + udelay(100); + } + + DO_DELAY(regWrites); + } + + ath9k_hw_write_regs(ah, freqIndex, regWrites); + + if (AR_SREV_9271_10(ah)) + REG_WRITE_ARRAY(&ah->iniModes_9271_1_0_only, + modesIndex, regWrites); + + if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan)) { + REG_WRITE_ARRAY(&ah->iniModesAdditional, modesIndex, + regWrites); + } + + ath9k_hw_override_ini(ah, chan); + ath9k_hw_set_regs(ah, chan); + ath9k_hw_init_chain_masks(ah); + + if (OLC_FOR_AR9280_20_LATER) + ath9k_olc_init(ah); + + ah->eep_ops->set_txpower(ah, chan, + ath9k_regd_get_ctl(regulatory, chan), + channel->max_antenna_gain * 2, + channel->max_power * 2, + min((u32) MAX_RATE_POWER, + (u32) regulatory->power_limit)); + + if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) { + ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, + "ar5416SetRfRegs failed\n"); + return -EIO; + } + + return 0; +} + +/****************************************/ +/* Reset and Channel Switching Routines */ +/****************************************/ + +static void ath9k_hw_set_rfmode(struct ath_hw *ah, struct ath9k_channel *chan) +{ + u32 rfMode = 0; + + if (chan == NULL) + return; + + rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan)) + ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM; + + if (!AR_SREV_9280_10_OR_LATER(ah)) + rfMode |= (IS_CHAN_5GHZ(chan)) ? + AR_PHY_MODE_RF5GHZ : AR_PHY_MODE_RF2GHZ; + + if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan)) + rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE); + + REG_WRITE(ah, AR_PHY_MODE, rfMode); +} + +static void ath9k_hw_mark_phy_inactive(struct ath_hw *ah) +{ + REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS); +} + +static inline void ath9k_hw_set_dma(struct ath_hw *ah) +{ + u32 regval; + + /* + * set AHB_MODE not to do cacheline prefetches + */ + regval = REG_READ(ah, AR_AHB_MODE); + REG_WRITE(ah, AR_AHB_MODE, regval | AR_AHB_PREFETCH_RD_EN); + + /* + * let mac dma reads be in 128 byte chunks + */ + regval = REG_READ(ah, AR_TXCFG) & ~AR_TXCFG_DMASZ_MASK; + REG_WRITE(ah, AR_TXCFG, regval | AR_TXCFG_DMASZ_128B); + + /* + * Restore TX Trigger Level to its pre-reset value. + * The initial value depends on whether aggregation is enabled, and is + * adjusted whenever underruns are detected. + */ + REG_RMW_FIELD(ah, AR_TXCFG, AR_FTRIG, ah->tx_trig_level); + + /* + * let mac dma writes be in 128 byte chunks + */ + regval = REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_DMASZ_MASK; + REG_WRITE(ah, AR_RXCFG, regval | AR_RXCFG_DMASZ_128B); + + /* + * Setup receive FIFO threshold to hold off TX activities + */ + REG_WRITE(ah, AR_RXFIFO_CFG, 0x200); + + /* + * reduce the number of usable entries in PCU TXBUF to avoid + * wrap around issues. + */ + if (AR_SREV_9285(ah)) { + /* For AR9285 the number of Fifos are reduced to half. + * So set the usable tx buf size also to half to + * avoid data/delimiter underruns + */ + REG_WRITE(ah, AR_PCU_TXBUF_CTRL, + AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE); + } else if (!AR_SREV_9271(ah)) { + REG_WRITE(ah, AR_PCU_TXBUF_CTRL, + AR_PCU_TXBUF_CTRL_USABLE_SIZE); + } +} + +static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode) +{ + u32 val; + + val = REG_READ(ah, AR_STA_ID1); + val &= ~(AR_STA_ID1_STA_AP | AR_STA_ID1_ADHOC); + switch (opmode) { + case NL80211_IFTYPE_AP: + REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_STA_AP + | AR_STA_ID1_KSRCH_MODE); + REG_CLR_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION); + break; + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_MESH_POINT: + REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_ADHOC + | AR_STA_ID1_KSRCH_MODE); + REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION); + break; + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_MONITOR: + REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE); + break; + } +} + +static inline void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, + u32 coef_scaled, + u32 *coef_mantissa, + u32 *coef_exponent) +{ + u32 coef_exp, coef_man; + + for (coef_exp = 31; coef_exp > 0; coef_exp--) + if ((coef_scaled >> coef_exp) & 0x1) + break; + + coef_exp = 14 - (coef_exp - COEF_SCALE_S); + + coef_man = coef_scaled + (1 << (COEF_SCALE_S - coef_exp - 1)); + + *coef_mantissa = coef_man >> (COEF_SCALE_S - coef_exp); + *coef_exponent = coef_exp - 16; +} + +static void ath9k_hw_set_delta_slope(struct ath_hw *ah, + struct ath9k_channel *chan) +{ + u32 coef_scaled, ds_coef_exp, ds_coef_man; + u32 clockMhzScaled = 0x64000000; + struct chan_centers centers; + + if (IS_CHAN_HALF_RATE(chan)) + clockMhzScaled = clockMhzScaled >> 1; + else if (IS_CHAN_QUARTER_RATE(chan)) + clockMhzScaled = clockMhzScaled >> 2; + + ath9k_hw_get_channel_centers(ah, chan, ¢ers); + coef_scaled = clockMhzScaled / centers.synth_center; + + ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man, + &ds_coef_exp); + + REG_RMW_FIELD(ah, AR_PHY_TIMING3, + AR_PHY_TIMING3_DSC_MAN, ds_coef_man); + REG_RMW_FIELD(ah, AR_PHY_TIMING3, + AR_PHY_TIMING3_DSC_EXP, ds_coef_exp); + + coef_scaled = (9 * coef_scaled) / 10; + + ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man, + &ds_coef_exp); + + REG_RMW_FIELD(ah, AR_PHY_HALFGI, + AR_PHY_HALFGI_DSC_MAN, ds_coef_man); + REG_RMW_FIELD(ah, AR_PHY_HALFGI, + AR_PHY_HALFGI_DSC_EXP, ds_coef_exp); +} + +static bool ath9k_hw_set_reset(struct ath_hw *ah, int type) +{ + u32 rst_flags; + u32 tmpReg; + + if (AR_SREV_9100(ah)) { + u32 val = REG_READ(ah, AR_RTC_DERIVED_CLK); + val &= ~AR_RTC_DERIVED_CLK_PERIOD; + val |= SM(1, AR_RTC_DERIVED_CLK_PERIOD); + REG_WRITE(ah, AR_RTC_DERIVED_CLK, val); + (void)REG_READ(ah, AR_RTC_DERIVED_CLK); + } + + REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN | + AR_RTC_FORCE_WAKE_ON_INT); + + if (AR_SREV_9100(ah)) { + rst_flags = AR_RTC_RC_MAC_WARM | AR_RTC_RC_MAC_COLD | + AR_RTC_RC_COLD_RESET | AR_RTC_RC_WARM_RESET; + } else { + tmpReg = REG_READ(ah, AR_INTR_SYNC_CAUSE); + if (tmpReg & + (AR_INTR_SYNC_LOCAL_TIMEOUT | + AR_INTR_SYNC_RADM_CPL_TIMEOUT)) { + REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0); + REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF); + } else { + REG_WRITE(ah, AR_RC, AR_RC_AHB); + } + + rst_flags = AR_RTC_RC_MAC_WARM; + if (type == ATH9K_RESET_COLD) + rst_flags |= AR_RTC_RC_MAC_COLD; + } + + REG_WRITE(ah, AR_RTC_RC, rst_flags); + udelay(50); + + REG_WRITE(ah, AR_RTC_RC, 0); + if (!ath9k_hw_wait(ah, AR_RTC_RC, AR_RTC_RC_M, 0, AH_WAIT_TIMEOUT)) { + ath_print(ath9k_hw_common(ah), ATH_DBG_RESET, + "RTC stuck in MAC reset\n"); + return false; + } + + if (!AR_SREV_9100(ah)) + REG_WRITE(ah, AR_RC, 0); + + if (AR_SREV_9100(ah)) + udelay(50); + + return true; +} + +static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah) +{ + REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN | + AR_RTC_FORCE_WAKE_ON_INT); + + if (!AR_SREV_9100(ah)) + REG_WRITE(ah, AR_RC, AR_RC_AHB); + + REG_WRITE(ah, AR_RTC_RESET, 0); + udelay(2); + + if (!AR_SREV_9100(ah)) + REG_WRITE(ah, AR_RC, 0); + + REG_WRITE(ah, AR_RTC_RESET, 1); + + if (!ath9k_hw_wait(ah, + AR_RTC_STATUS, + AR_RTC_STATUS_M, + AR_RTC_STATUS_ON, + AH_WAIT_TIMEOUT)) { + ath_print(ath9k_hw_common(ah), ATH_DBG_RESET, + "RTC not waking up\n"); + return false; + } + + ath9k_hw_read_revisions(ah); + + return ath9k_hw_set_reset(ah, ATH9K_RESET_WARM); +} + +static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type) +{ + REG_WRITE(ah, AR_RTC_FORCE_WAKE, + AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT); + + switch (type) { + case ATH9K_RESET_POWER_ON: + return ath9k_hw_set_reset_power_on(ah); + case ATH9K_RESET_WARM: + case ATH9K_RESET_COLD: + return ath9k_hw_set_reset(ah, type); + default: + return false; + } +} + +static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan) +{ + u32 phymode; + u32 enableDacFifo = 0; + + if (AR_SREV_9285_10_OR_LATER(ah)) + enableDacFifo = (REG_READ(ah, AR_PHY_TURBO) & + AR_PHY_FC_ENABLE_DAC_FIFO); + + phymode = AR_PHY_FC_HT_EN | AR_PHY_FC_SHORT_GI_40 + | AR_PHY_FC_SINGLE_HT_LTF1 | AR_PHY_FC_WALSH | enableDacFifo; + + if (IS_CHAN_HT40(chan)) { + phymode |= AR_PHY_FC_DYN2040_EN; + + if ((chan->chanmode == CHANNEL_A_HT40PLUS) || + (chan->chanmode == CHANNEL_G_HT40PLUS)) + phymode |= AR_PHY_FC_DYN2040_PRI_CH; + + } + REG_WRITE(ah, AR_PHY_TURBO, phymode); + + ath9k_hw_set11nmac2040(ah); + + REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S); + REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S); +} + +static bool ath9k_hw_chip_reset(struct ath_hw *ah, + struct ath9k_channel *chan) +{ + if (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)) { + if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) + return false; + } else if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM)) + return false; + + if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) + return false; + + ah->chip_fullsleep = false; + ath9k_hw_init_pll(ah, chan); + ath9k_hw_set_rfmode(ah, chan); + + return true; +} + +static bool ath9k_hw_channel_change(struct ath_hw *ah, + struct ath9k_channel *chan) +{ + struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); + struct ath_common *common = ath9k_hw_common(ah); + struct ieee80211_channel *channel = chan->chan; + u32 synthDelay, qnum; + int r; + + for (qnum = 0; qnum < AR_NUM_QCU; qnum++) { + if (ath9k_hw_numtxpending(ah, qnum)) { + ath_print(common, ATH_DBG_QUEUE, + "Transmit frames pending on " + "queue %d\n", qnum); + return false; + } + } + + REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN); + if (!ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN, + AR_PHY_RFBUS_GRANT_EN, AH_WAIT_TIMEOUT)) { + ath_print(common, ATH_DBG_FATAL, + "Could not kill baseband RX\n"); + return false; + } + + ath9k_hw_set_regs(ah, chan); + + r = ah->ath9k_hw_rf_set_freq(ah, chan); + if (r) { + ath_print(common, ATH_DBG_FATAL, + "Failed to set channel\n"); + return false; + } + + ah->eep_ops->set_txpower(ah, chan, + ath9k_regd_get_ctl(regulatory, chan), + channel->max_antenna_gain * 2, + channel->max_power * 2, + min((u32) MAX_RATE_POWER, + (u32) regulatory->power_limit)); + + synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY; + if (IS_CHAN_B(chan)) + synthDelay = (4 * synthDelay) / 22; + else + synthDelay /= 10; + + udelay(synthDelay + BASE_ACTIVATE_DELAY); + + REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0); + + if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan)) + ath9k_hw_set_delta_slope(ah, chan); + + ah->ath9k_hw_spur_mitigate_freq(ah, chan); + + if (!chan->oneTimeCalsDone) + chan->oneTimeCalsDone = true; + + return true; +} + +static void ath9k_enable_rfkill(struct ath_hw *ah) +{ + REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, + AR_GPIO_INPUT_EN_VAL_RFSILENT_BB); + + REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2, + AR_GPIO_INPUT_MUX2_RFSILENT); + + ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio); + REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB); +} + +int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, + bool bChannelChange) +{ + struct ath_common *common = ath9k_hw_common(ah); + u32 saveLedState; + struct ath9k_channel *curchan = ah->curchan; + u32 saveDefAntenna; + u32 macStaId1; + u64 tsf = 0; + int i, rx_chainmask, r; + + ah->txchainmask = common->tx_chainmask; + ah->rxchainmask = common->rx_chainmask; + + if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) + return -EIO; + + if (curchan && !ah->chip_fullsleep) + ath9k_hw_getnf(ah, curchan); + + if (bChannelChange && + (ah->chip_fullsleep != true) && + (ah->curchan != NULL) && + (chan->channel != ah->curchan->channel) && + ((chan->channelFlags & CHANNEL_ALL) == + (ah->curchan->channelFlags & CHANNEL_ALL)) && + !(AR_SREV_9280(ah) || IS_CHAN_A_5MHZ_SPACED(chan) || + IS_CHAN_A_5MHZ_SPACED(ah->curchan))) { + + if (ath9k_hw_channel_change(ah, chan)) { + ath9k_hw_loadnf(ah, ah->curchan); + ath9k_hw_start_nfcal(ah); + return 0; + } + } + + saveDefAntenna = REG_READ(ah, AR_DEF_ANTENNA); + if (saveDefAntenna == 0) + saveDefAntenna = 1; + + macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B; + + /* For chips on which RTC reset is done, save TSF before it gets cleared */ + if (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)) + tsf = ath9k_hw_gettsf64(ah); + + saveLedState = REG_READ(ah, AR_CFG_LED) & + (AR_CFG_LED_ASSOC_CTL | AR_CFG_LED_MODE_SEL | + AR_CFG_LED_BLINK_THRESH_SEL | AR_CFG_LED_BLINK_SLOW); + + ath9k_hw_mark_phy_inactive(ah); + + if (AR_SREV_9271(ah) && ah->htc_reset_init) { + REG_WRITE(ah, + AR9271_RESET_POWER_DOWN_CONTROL, + AR9271_RADIO_RF_RST); + udelay(50); + } + + if (!ath9k_hw_chip_reset(ah, chan)) { + ath_print(common, ATH_DBG_FATAL, "Chip reset failed\n"); + return -EINVAL; + } + + if (AR_SREV_9271(ah) && ah->htc_reset_init) { + ah->htc_reset_init = false; + REG_WRITE(ah, + AR9271_RESET_POWER_DOWN_CONTROL, + AR9271_GATE_MAC_CTL); + udelay(50); + } + + /* Restore TSF */ + if (tsf && AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)) + ath9k_hw_settsf64(ah, tsf); + + if (AR_SREV_9280_10_OR_LATER(ah)) + REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE); + + if (AR_SREV_9287_12_OR_LATER(ah)) { + /* Enable ASYNC FIFO */ + REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3, + AR_MAC_PCU_ASYNC_FIFO_REG3_DATAPATH_SEL); + REG_SET_BIT(ah, AR_PHY_MODE, AR_PHY_MODE_ASYNCFIFO); + REG_CLR_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3, + AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET); + REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3, + AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET); + } + r = ath9k_hw_process_ini(ah, chan); + if (r) + return r; + + /* Setup MFP options for CCMP */ + if (AR_SREV_9280_20_OR_LATER(ah)) { + /* Mask Retry(b11), PwrMgt(b12), MoreData(b13) to 0 in mgmt + * frames when constructing CCMP AAD. */ + REG_RMW_FIELD(ah, AR_AES_MUTE_MASK1, AR_AES_MUTE_MASK1_FC_MGMT, + 0xc7ff); + ah->sw_mgmt_crypto = false; + } else if (AR_SREV_9160_10_OR_LATER(ah)) { + /* Disable hardware crypto for management frames */ + REG_CLR_BIT(ah, AR_PCU_MISC_MODE2, + AR_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE); + REG_SET_BIT(ah, AR_PCU_MISC_MODE2, + AR_PCU_MISC_MODE2_NO_CRYPTO_FOR_NON_DATA_PKT); + ah->sw_mgmt_crypto = true; + } else + ah->sw_mgmt_crypto = true; + + if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan)) + ath9k_hw_set_delta_slope(ah, chan); + + ah->ath9k_hw_spur_mitigate_freq(ah, chan); + ah->eep_ops->set_board_values(ah, chan); + + REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr)); + REG_WRITE(ah, AR_STA_ID1, get_unaligned_le16(common->macaddr + 4) + | macStaId1 + | AR_STA_ID1_RTS_USE_DEF + | (ah->config. + ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0) + | ah->sta_id1_defaults); + ath9k_hw_set_operating_mode(ah, ah->opmode); + + ath_hw_setbssidmask(common); + + REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna); + + ath9k_hw_write_associd(ah); + + REG_WRITE(ah, AR_ISR, ~0); + + REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR); + + r = ah->ath9k_hw_rf_set_freq(ah, chan); + if (r) + return r; + + for (i = 0; i < AR_NUM_DCU; i++) + REG_WRITE(ah, AR_DQCUMASK(i), 1 << i); + + ah->intr_txqs = 0; + for (i = 0; i < ah->caps.total_queues; i++) + ath9k_hw_resettxqueue(ah, i); + + ath9k_hw_init_interrupt_masks(ah, ah->opmode); + ath9k_hw_init_qos(ah); + + if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT) + ath9k_enable_rfkill(ah); + + ath9k_hw_init_global_settings(ah); + + if (AR_SREV_9287_12_OR_LATER(ah)) { + REG_WRITE(ah, AR_D_GBL_IFS_SIFS, + AR_D_GBL_IFS_SIFS_ASYNC_FIFO_DUR); + REG_WRITE(ah, AR_D_GBL_IFS_SLOT, + AR_D_GBL_IFS_SLOT_ASYNC_FIFO_DUR); + REG_WRITE(ah, AR_D_GBL_IFS_EIFS, + AR_D_GBL_IFS_EIFS_ASYNC_FIFO_DUR); + + REG_WRITE(ah, AR_TIME_OUT, AR_TIME_OUT_ACK_CTS_ASYNC_FIFO_DUR); + REG_WRITE(ah, AR_USEC, AR_USEC_ASYNC_FIFO_DUR); + + REG_SET_BIT(ah, AR_MAC_PCU_LOGIC_ANALYZER, + AR_MAC_PCU_LOGIC_ANALYZER_DISBUG20768); + REG_RMW_FIELD(ah, AR_AHB_MODE, AR_AHB_CUSTOM_BURST_EN, + AR_AHB_CUSTOM_BURST_ASYNC_FIFO_VAL); + } + if (AR_SREV_9287_12_OR_LATER(ah)) { + REG_SET_BIT(ah, AR_PCU_MISC_MODE2, + AR_PCU_MISC_MODE2_ENABLE_AGGWEP); + } + + REG_WRITE(ah, AR_STA_ID1, + REG_READ(ah, AR_STA_ID1) | AR_STA_ID1_PRESERVE_SEQNUM); + + ath9k_hw_set_dma(ah); + + REG_WRITE(ah, AR_OBS, 8); + + if (ah->config.rx_intr_mitigation) { + REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500); + REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, 2000); + } + + ath9k_hw_init_bb(ah, chan); + + if (!ath9k_hw_init_cal(ah, chan)) + return -EIO; + + rx_chainmask = ah->rxchainmask; + if ((rx_chainmask == 0x5) || (rx_chainmask == 0x3)) { + REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask); + REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask); + } + + REG_WRITE(ah, AR_CFG_LED, saveLedState | AR_CFG_SCLK_32KHZ); + + /* + * For big endian systems turn on swapping for descriptors + */ + if (AR_SREV_9100(ah)) { + u32 mask; + mask = REG_READ(ah, AR_CFG); + if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) { + ath_print(common, ATH_DBG_RESET, + "CFG Byte Swap Set 0x%x\n", mask); + } else { + mask = + INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB; + REG_WRITE(ah, AR_CFG, mask); + ath_print(common, ATH_DBG_RESET, + "Setting CFG 0x%x\n", REG_READ(ah, AR_CFG)); + } + } else { + /* Configure AR9271 target WLAN */ + if (AR_SREV_9271(ah)) + REG_WRITE(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB); +#ifdef __BIG_ENDIAN + else + REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD); +#endif + } + + if (ah->btcoex_hw.enabled) + ath9k_hw_btcoex_enable(ah); + + return 0; +} +EXPORT_SYMBOL(ath9k_hw_reset); + +/************************/ +/* Key Cache Management */ +/************************/ + +bool ath9k_hw_keyreset(struct ath_hw *ah, u16 entry) +{ + u32 keyType; + + if (entry >= ah->caps.keycache_size) { + ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, + "keychache entry %u out of range\n", entry); + return false; + } + + keyType = REG_READ(ah, AR_KEYTABLE_TYPE(entry)); + + REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), 0); + REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), 0); + REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), 0); + REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), 0); + REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), 0); + REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), AR_KEYTABLE_TYPE_CLR); + REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), 0); + REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), 0); + + if (keyType == AR_KEYTABLE_TYPE_TKIP && ATH9K_IS_MIC_ENABLED(ah)) { + u16 micentry = entry + 64; + + REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), 0); + REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0); + REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), 0); + REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0); + + } + + return true; +} +EXPORT_SYMBOL(ath9k_hw_keyreset); + +bool ath9k_hw_keysetmac(struct ath_hw *ah, u16 entry, const u8 *mac) +{ + u32 macHi, macLo; + + if (entry >= ah->caps.keycache_size) { + ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, + "keychache entry %u out of range\n", entry); + return false; + } + + if (mac != NULL) { + macHi = (mac[5] << 8) | mac[4]; + macLo = (mac[3] << 24) | + (mac[2] << 16) | + (mac[1] << 8) | + mac[0]; + macLo >>= 1; + macLo |= (macHi & 1) << 31; + macHi >>= 1; + } else { + macLo = macHi = 0; + } + REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), macLo); + REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), macHi | AR_KEYTABLE_VALID); + + return true; +} +EXPORT_SYMBOL(ath9k_hw_keysetmac); + +bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry, + const struct ath9k_keyval *k, + const u8 *mac) +{ + const struct ath9k_hw_capabilities *pCap = &ah->caps; + struct ath_common *common = ath9k_hw_common(ah); + u32 key0, key1, key2, key3, key4; + u32 keyType; + + if (entry >= pCap->keycache_size) { + ath_print(common, ATH_DBG_FATAL, + "keycache entry %u out of range\n", entry); + return false; + } + + switch (k->kv_type) { + case ATH9K_CIPHER_AES_OCB: + keyType = AR_KEYTABLE_TYPE_AES; + break; + case ATH9K_CIPHER_AES_CCM: + if (!(pCap->hw_caps & ATH9K_HW_CAP_CIPHER_AESCCM)) { + ath_print(common, ATH_DBG_ANY, + "AES-CCM not supported by mac rev 0x%x\n", + ah->hw_version.macRev); + return false; + } + keyType = AR_KEYTABLE_TYPE_CCM; + break; + case ATH9K_CIPHER_TKIP: + keyType = AR_KEYTABLE_TYPE_TKIP; + if (ATH9K_IS_MIC_ENABLED(ah) + && entry + 64 >= pCap->keycache_size) { + ath_print(common, ATH_DBG_ANY, + "entry %u inappropriate for TKIP\n", entry); + return false; + } + break; + case ATH9K_CIPHER_WEP: + if (k->kv_len < WLAN_KEY_LEN_WEP40) { + ath_print(common, ATH_DBG_ANY, + "WEP key length %u too small\n", k->kv_len); + return false; + } + if (k->kv_len <= WLAN_KEY_LEN_WEP40) + keyType = AR_KEYTABLE_TYPE_40; + else if (k->kv_len <= WLAN_KEY_LEN_WEP104) + keyType = AR_KEYTABLE_TYPE_104; + else + keyType = AR_KEYTABLE_TYPE_128; + break; + case ATH9K_CIPHER_CLR: + keyType = AR_KEYTABLE_TYPE_CLR; + break; + default: + ath_print(common, ATH_DBG_FATAL, + "cipher %u not supported\n", k->kv_type); + return false; + } + + key0 = get_unaligned_le32(k->kv_val + 0); + key1 = get_unaligned_le16(k->kv_val + 4); + key2 = get_unaligned_le32(k->kv_val + 6); + key3 = get_unaligned_le16(k->kv_val + 10); + key4 = get_unaligned_le32(k->kv_val + 12); + if (k->kv_len <= WLAN_KEY_LEN_WEP104) + key4 &= 0xff; + + /* + * Note: Key cache registers access special memory area that requires + * two 32-bit writes to actually update the values in the internal + * memory. Consequently, the exact order and pairs used here must be + * maintained. + */ + + if (keyType == AR_KEYTABLE_TYPE_TKIP && ATH9K_IS_MIC_ENABLED(ah)) { + u16 micentry = entry + 64; + + /* + * Write inverted key[47:0] first to avoid Michael MIC errors + * on frames that could be sent or received at the same time. + * The correct key will be written in the end once everything + * else is ready. + */ + REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), ~key0); + REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), ~key1); + + /* Write key[95:48] */ + REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2); + REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3); + + /* Write key[127:96] and key type */ + REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4); + REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType); + + /* Write MAC address for the entry */ + (void) ath9k_hw_keysetmac(ah, entry, mac); + + if (ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA) { + /* + * TKIP uses two key cache entries: + * Michael MIC TX/RX keys in the same key cache entry + * (idx = main index + 64): + * key0 [31:0] = RX key [31:0] + * key1 [15:0] = TX key [31:16] + * key1 [31:16] = reserved + * key2 [31:0] = RX key [63:32] + * key3 [15:0] = TX key [15:0] + * key3 [31:16] = reserved + * key4 [31:0] = TX key [63:32] + */ + u32 mic0, mic1, mic2, mic3, mic4; + + mic0 = get_unaligned_le32(k->kv_mic + 0); + mic2 = get_unaligned_le32(k->kv_mic + 4); + mic1 = get_unaligned_le16(k->kv_txmic + 2) & 0xffff; + mic3 = get_unaligned_le16(k->kv_txmic + 0) & 0xffff; + mic4 = get_unaligned_le32(k->kv_txmic + 4); + + /* Write RX[31:0] and TX[31:16] */ + REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0); + REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), mic1); + + /* Write RX[63:32] and TX[15:0] */ + REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), mic2); + REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), mic3); + + /* Write TX[63:32] and keyType(reserved) */ + REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), mic4); + REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry), + AR_KEYTABLE_TYPE_CLR); + + } else { + /* + * TKIP uses four key cache entries (two for group + * keys): + * Michael MIC TX/RX keys are in different key cache + * entries (idx = main index + 64 for TX and + * main index + 32 + 96 for RX): + * key0 [31:0] = TX/RX MIC key [31:0] + * key1 [31:0] = reserved + * key2 [31:0] = TX/RX MIC key [63:32] + * key3 [31:0] = reserved + * key4 [31:0] = reserved + * + * Upper layer code will call this function separately + * for TX and RX keys when these registers offsets are + * used. + */ + u32 mic0, mic2; + + mic0 = get_unaligned_le32(k->kv_mic + 0); + mic2 = get_unaligned_le32(k->kv_mic + 4); + + /* Write MIC key[31:0] */ + REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0); + REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0); + + /* Write MIC key[63:32] */ + REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), mic2); + REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0); + + /* Write TX[63:32] and keyType(reserved) */ + REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), 0); + REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry), + AR_KEYTABLE_TYPE_CLR); + } + + /* MAC address registers are reserved for the MIC entry */ + REG_WRITE(ah, AR_KEYTABLE_MAC0(micentry), 0); + REG_WRITE(ah, AR_KEYTABLE_MAC1(micentry), 0); + + /* + * Write the correct (un-inverted) key[47:0] last to enable + * TKIP now that all other registers are set with correct + * values. + */ + REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0); + REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1); + } else { + /* Write key[47:0] */ + REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0); + REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1); + + /* Write key[95:48] */ + REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2); + REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3); + + /* Write key[127:96] and key type */ + REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4); + REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType); + + /* Write MAC address for the entry */ + (void) ath9k_hw_keysetmac(ah, entry, mac); + } + + return true; +} +EXPORT_SYMBOL(ath9k_hw_set_keycache_entry); + +bool ath9k_hw_keyisvalid(struct ath_hw *ah, u16 entry) +{ + if (entry < ah->caps.keycache_size) { + u32 val = REG_READ(ah, AR_KEYTABLE_MAC1(entry)); + if (val & AR_KEYTABLE_VALID) + return true; + } + return false; +} +EXPORT_SYMBOL(ath9k_hw_keyisvalid); + +/******************************/ +/* Power Management (Chipset) */ +/******************************/ + +static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip) +{ + REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); + if (setChip) { + REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, + AR_RTC_FORCE_WAKE_EN); + if (!AR_SREV_9100(ah)) + REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF); + + if(!AR_SREV_5416(ah)) + REG_CLR_BIT(ah, (AR_RTC_RESET), + AR_RTC_RESET_EN); + } +} + +static void ath9k_set_power_network_sleep(struct ath_hw *ah, int setChip) +{ + REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); + if (setChip) { + struct ath9k_hw_capabilities *pCap = &ah->caps; + + if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { + REG_WRITE(ah, AR_RTC_FORCE_WAKE, + AR_RTC_FORCE_WAKE_ON_INT); + } else { + REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, + AR_RTC_FORCE_WAKE_EN); + } + } +} + +static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip) +{ + u32 val; + int i; + + if (setChip) { + if ((REG_READ(ah, AR_RTC_STATUS) & + AR_RTC_STATUS_M) == AR_RTC_STATUS_SHUTDOWN) { + if (ath9k_hw_set_reset_reg(ah, + ATH9K_RESET_POWER_ON) != true) { + return false; + } + ath9k_hw_init_pll(ah, NULL); + } + if (AR_SREV_9100(ah)) + REG_SET_BIT(ah, AR_RTC_RESET, + AR_RTC_RESET_EN); + + REG_SET_BIT(ah, AR_RTC_FORCE_WAKE, + AR_RTC_FORCE_WAKE_EN); + udelay(50); + + for (i = POWER_UP_TIME / 50; i > 0; i--) { + val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M; + if (val == AR_RTC_STATUS_ON) + break; + udelay(50); + REG_SET_BIT(ah, AR_RTC_FORCE_WAKE, + AR_RTC_FORCE_WAKE_EN); + } + if (i == 0) { + ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, + "Failed to wakeup in %uus\n", + POWER_UP_TIME / 20); + return false; + } + } + + REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); + + return true; +} + +bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode) +{ + struct ath_common *common = ath9k_hw_common(ah); + int status = true, setChip = true; + static const char *modes[] = { + "AWAKE", + "FULL-SLEEP", + "NETWORK SLEEP", + "UNDEFINED" + }; + + if (ah->power_mode == mode) + return status; + + ath_print(common, ATH_DBG_RESET, "%s -> %s\n", + modes[ah->power_mode], modes[mode]); + + switch (mode) { + case ATH9K_PM_AWAKE: + status = ath9k_hw_set_power_awake(ah, setChip); + break; + case ATH9K_PM_FULL_SLEEP: + ath9k_set_power_sleep(ah, setChip); + ah->chip_fullsleep = true; + break; + case ATH9K_PM_NETWORK_SLEEP: + ath9k_set_power_network_sleep(ah, setChip); + break; + default: + ath_print(common, ATH_DBG_FATAL, + "Unknown power mode %u\n", mode); + return false; + } + ah->power_mode = mode; + + return status; +} +EXPORT_SYMBOL(ath9k_hw_setpower); + +/* + * Helper for ASPM support. + * + * Disable PLL when in L0s as well as receiver clock when in L1. + * This power saving option must be enabled through the SerDes. + * + * Programming the SerDes must go through the same 288 bit serial shift + * register as the other analog registers. Hence the 9 writes. + */ +void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore, int power_off) +{ + u8 i; + u32 val; + + if (ah->is_pciexpress != true) + return; + + /* Do not touch SerDes registers */ + if (ah->config.pcie_powersave_enable == 2) + return; + + /* Nothing to do on restore for 11N */ + if (!restore) { + if (AR_SREV_9280_20_OR_LATER(ah)) { + /* + * AR9280 2.0 or later chips use SerDes values from the + * initvals.h initialized depending on chipset during + * ath9k_hw_init() + */ + for (i = 0; i < ah->iniPcieSerdes.ia_rows; i++) { + REG_WRITE(ah, INI_RA(&ah->iniPcieSerdes, i, 0), + INI_RA(&ah->iniPcieSerdes, i, 1)); + } + } else if (AR_SREV_9280(ah) && + (ah->hw_version.macRev == AR_SREV_REVISION_9280_10)) { + REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fd00); + REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924); + + /* RX shut off when elecidle is asserted */ + REG_WRITE(ah, AR_PCIE_SERDES, 0xa8000019); + REG_WRITE(ah, AR_PCIE_SERDES, 0x13160820); + REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980560); + + /* Shut off CLKREQ active in L1 */ + if (ah->config.pcie_clock_req) + REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffc); + else + REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffd); + + REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40); + REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554); + REG_WRITE(ah, AR_PCIE_SERDES, 0x00043007); + + /* Load the new settings */ + REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000); + + } else { + REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00); + REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924); + + /* RX shut off when elecidle is asserted */ + REG_WRITE(ah, AR_PCIE_SERDES, 0x28000039); + REG_WRITE(ah, AR_PCIE_SERDES, 0x53160824); + REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980579); + + /* + * Ignore ah->ah_config.pcie_clock_req setting for + * pre-AR9280 11n + */ + REG_WRITE(ah, AR_PCIE_SERDES, 0x001defff); + + REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40); + REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554); + REG_WRITE(ah, AR_PCIE_SERDES, 0x000e3007); + + /* Load the new settings */ + REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000); + } + + udelay(1000); + + /* set bit 19 to allow forcing of pcie core into L1 state */ + REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA); + + /* Several PCIe massages to ensure proper behaviour */ + if (ah->config.pcie_waen) { + val = ah->config.pcie_waen; + if (!power_off) + val &= (~AR_WA_D3_L1_DISABLE); + } else { + if (AR_SREV_9285(ah) || AR_SREV_9271(ah) || + AR_SREV_9287(ah)) { + val = AR9285_WA_DEFAULT; + if (!power_off) + val &= (~AR_WA_D3_L1_DISABLE); + } else if (AR_SREV_9280(ah)) { + /* + * On AR9280 chips bit 22 of 0x4004 needs to be + * set otherwise card may disappear. + */ + val = AR9280_WA_DEFAULT; + if (!power_off) + val &= (~AR_WA_D3_L1_DISABLE); + } else + val = AR_WA_DEFAULT; + } + + REG_WRITE(ah, AR_WA, val); + } + + if (power_off) { + /* + * Set PCIe workaround bits + * bit 14 in WA register (disable L1) should only + * be set when device enters D3 and be cleared + * when device comes back to D0. + */ + if (ah->config.pcie_waen) { + if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE) + REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE); + } else { + if (((AR_SREV_9285(ah) || AR_SREV_9271(ah) || + AR_SREV_9287(ah)) && + (AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)) || + (AR_SREV_9280(ah) && + (AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE))) { + REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE); + } + } + } +} +EXPORT_SYMBOL(ath9k_hw_configpcipowersave); + +/**********************/ +/* Interrupt Handling */ +/**********************/ + +bool ath9k_hw_intrpend(struct ath_hw *ah) +{ + u32 host_isr; + + if (AR_SREV_9100(ah)) + return true; + + host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE); + if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS)) + return true; + + host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE); + if ((host_isr & AR_INTR_SYNC_DEFAULT) + && (host_isr != AR_INTR_SPURIOUS)) + return true; + + return false; +} +EXPORT_SYMBOL(ath9k_hw_intrpend); + +bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked) +{ + u32 isr = 0; + u32 mask2 = 0; + struct ath9k_hw_capabilities *pCap = &ah->caps; + u32 sync_cause = 0; + bool fatal_int = false; + struct ath_common *common = ath9k_hw_common(ah); + + if (!AR_SREV_9100(ah)) { + if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) { + if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M) + == AR_RTC_STATUS_ON) { + isr = REG_READ(ah, AR_ISR); + } + } + + sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) & + AR_INTR_SYNC_DEFAULT; + + *masked = 0; + + if (!isr && !sync_cause) + return false; + } else { + *masked = 0; + isr = REG_READ(ah, AR_ISR); + } + + if (isr) { + if (isr & AR_ISR_BCNMISC) { + u32 isr2; + isr2 = REG_READ(ah, AR_ISR_S2); + if (isr2 & AR_ISR_S2_TIM) + mask2 |= ATH9K_INT_TIM; + if (isr2 & AR_ISR_S2_DTIM) + mask2 |= ATH9K_INT_DTIM; + if (isr2 & AR_ISR_S2_DTIMSYNC) + mask2 |= ATH9K_INT_DTIMSYNC; + if (isr2 & (AR_ISR_S2_CABEND)) + mask2 |= ATH9K_INT_CABEND; + if (isr2 & AR_ISR_S2_GTT) + mask2 |= ATH9K_INT_GTT; + if (isr2 & AR_ISR_S2_CST) + mask2 |= ATH9K_INT_CST; + if (isr2 & AR_ISR_S2_TSFOOR) + mask2 |= ATH9K_INT_TSFOOR; + } + + isr = REG_READ(ah, AR_ISR_RAC); + if (isr == 0xffffffff) { + *masked = 0; + return false; + } + + *masked = isr & ATH9K_INT_COMMON; + + if (ah->config.rx_intr_mitigation) { + if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM)) + *masked |= ATH9K_INT_RX; + } + + if (isr & (AR_ISR_RXOK | AR_ISR_RXERR)) + *masked |= ATH9K_INT_RX; + if (isr & + (AR_ISR_TXOK | AR_ISR_TXDESC | AR_ISR_TXERR | + AR_ISR_TXEOL)) { + u32 s0_s, s1_s; + + *masked |= ATH9K_INT_TX; + + s0_s = REG_READ(ah, AR_ISR_S0_S); + ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK); + ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC); + + s1_s = REG_READ(ah, AR_ISR_S1_S); + ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR); + ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL); + } + + if (isr & AR_ISR_RXORN) { + ath_print(common, ATH_DBG_INTERRUPT, + "receive FIFO overrun interrupt\n"); + } + + if (!AR_SREV_9100(ah)) { + if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { + u32 isr5 = REG_READ(ah, AR_ISR_S5_S); + if (isr5 & AR_ISR_S5_TIM_TIMER) + *masked |= ATH9K_INT_TIM_TIMER; + } + } + + *masked |= mask2; + } + + if (AR_SREV_9100(ah)) + return true; + + if (isr & AR_ISR_GENTMR) { + u32 s5_s; + + s5_s = REG_READ(ah, AR_ISR_S5_S); + if (isr & AR_ISR_GENTMR) { + ah->intr_gen_timer_trigger = + MS(s5_s, AR_ISR_S5_GENTIMER_TRIG); + + ah->intr_gen_timer_thresh = + MS(s5_s, AR_ISR_S5_GENTIMER_THRESH); + + if (ah->intr_gen_timer_trigger) + *masked |= ATH9K_INT_GENTIMER; + + } + } + + if (sync_cause) { + fatal_int = + (sync_cause & + (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR)) + ? true : false; + + if (fatal_int) { + if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) { + ath_print(common, ATH_DBG_ANY, + "received PCI FATAL interrupt\n"); + } + if (sync_cause & AR_INTR_SYNC_HOST1_PERR) { + ath_print(common, ATH_DBG_ANY, + "received PCI PERR interrupt\n"); + } + *masked |= ATH9K_INT_FATAL; + } + if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) { + ath_print(common, ATH_DBG_INTERRUPT, + "AR_INTR_SYNC_RADM_CPL_TIMEOUT\n"); + REG_WRITE(ah, AR_RC, AR_RC_HOSTIF); + REG_WRITE(ah, AR_RC, 0); + *masked |= ATH9K_INT_FATAL; + } + if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) { + ath_print(common, ATH_DBG_INTERRUPT, + "AR_INTR_SYNC_LOCAL_TIMEOUT\n"); + } + + REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause); + (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR); + } + + return true; +} +EXPORT_SYMBOL(ath9k_hw_getisr); + +enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints) +{ + u32 omask = ah->mask_reg; + u32 mask, mask2; + struct ath9k_hw_capabilities *pCap = &ah->caps; + struct ath_common *common = ath9k_hw_common(ah); + + ath_print(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints); + + if (omask & ATH9K_INT_GLOBAL) { + ath_print(common, ATH_DBG_INTERRUPT, "disable IER\n"); + REG_WRITE(ah, AR_IER, AR_IER_DISABLE); + (void) REG_READ(ah, AR_IER); + if (!AR_SREV_9100(ah)) { + REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0); + (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE); + + REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0); + (void) REG_READ(ah, AR_INTR_SYNC_ENABLE); + } + } + + mask = ints & ATH9K_INT_COMMON; + mask2 = 0; + + if (ints & ATH9K_INT_TX) { + if (ah->txok_interrupt_mask) + mask |= AR_IMR_TXOK; + if (ah->txdesc_interrupt_mask) + mask |= AR_IMR_TXDESC; + if (ah->txerr_interrupt_mask) + mask |= AR_IMR_TXERR; + if (ah->txeol_interrupt_mask) + mask |= AR_IMR_TXEOL; + } + if (ints & ATH9K_INT_RX) { + mask |= AR_IMR_RXERR; + if (ah->config.rx_intr_mitigation) + mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM; + else + mask |= AR_IMR_RXOK | AR_IMR_RXDESC; + if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) + mask |= AR_IMR_GENTMR; + } + + if (ints & (ATH9K_INT_BMISC)) { + mask |= AR_IMR_BCNMISC; + if (ints & ATH9K_INT_TIM) + mask2 |= AR_IMR_S2_TIM; + if (ints & ATH9K_INT_DTIM) + mask2 |= AR_IMR_S2_DTIM; + if (ints & ATH9K_INT_DTIMSYNC) + mask2 |= AR_IMR_S2_DTIMSYNC; + if (ints & ATH9K_INT_CABEND) + mask2 |= AR_IMR_S2_CABEND; + if (ints & ATH9K_INT_TSFOOR) + mask2 |= AR_IMR_S2_TSFOOR; + } + + if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) { + mask |= AR_IMR_BCNMISC; + if (ints & ATH9K_INT_GTT) + mask2 |= AR_IMR_S2_GTT; + if (ints & ATH9K_INT_CST) + mask2 |= AR_IMR_S2_CST; + } + + ath_print(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask); + REG_WRITE(ah, AR_IMR, mask); + mask = REG_READ(ah, AR_IMR_S2) & ~(AR_IMR_S2_TIM | + AR_IMR_S2_DTIM | + AR_IMR_S2_DTIMSYNC | + AR_IMR_S2_CABEND | + AR_IMR_S2_CABTO | + AR_IMR_S2_TSFOOR | + AR_IMR_S2_GTT | AR_IMR_S2_CST); + REG_WRITE(ah, AR_IMR_S2, mask | mask2); + ah->mask_reg = ints; + + if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { + if (ints & ATH9K_INT_TIM_TIMER) + REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER); + else + REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER); + } + + if (ints & ATH9K_INT_GLOBAL) { + ath_print(common, ATH_DBG_INTERRUPT, "enable IER\n"); + REG_WRITE(ah, AR_IER, AR_IER_ENABLE); + if (!AR_SREV_9100(ah)) { + REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, + AR_INTR_MAC_IRQ); + REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ); + + + REG_WRITE(ah, AR_INTR_SYNC_ENABLE, + AR_INTR_SYNC_DEFAULT); + REG_WRITE(ah, AR_INTR_SYNC_MASK, + AR_INTR_SYNC_DEFAULT); + } + ath_print(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n", + REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER)); + } + + return omask; +} +EXPORT_SYMBOL(ath9k_hw_set_interrupts); + +/*******************/ +/* Beacon Handling */ +/*******************/ + +void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period) +{ + int flags = 0; + + ah->beacon_interval = beacon_period; + + switch (ah->opmode) { + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_MONITOR: + REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon)); + REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, 0xffff); + REG_WRITE(ah, AR_NEXT_SWBA, 0x7ffff); + flags |= AR_TBTT_TIMER_EN; + break; + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_MESH_POINT: + REG_SET_BIT(ah, AR_TXCFG, + AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY); + REG_WRITE(ah, AR_NEXT_NDP_TIMER, + TU_TO_USEC(next_beacon + + (ah->atim_window ? ah-> + atim_window : 1))); + flags |= AR_NDP_TIMER_EN; + case NL80211_IFTYPE_AP: + REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon)); + REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, + TU_TO_USEC(next_beacon - + ah->config. + dma_beacon_response_time)); + REG_WRITE(ah, AR_NEXT_SWBA, + TU_TO_USEC(next_beacon - + ah->config. + sw_beacon_response_time)); + flags |= + AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN; + break; + default: + ath_print(ath9k_hw_common(ah), ATH_DBG_BEACON, + "%s: unsupported opmode: %d\n", + __func__, ah->opmode); + return; + break; + } + + REG_WRITE(ah, AR_BEACON_PERIOD, TU_TO_USEC(beacon_period)); + REG_WRITE(ah, AR_DMA_BEACON_PERIOD, TU_TO_USEC(beacon_period)); + REG_WRITE(ah, AR_SWBA_PERIOD, TU_TO_USEC(beacon_period)); + REG_WRITE(ah, AR_NDP_PERIOD, TU_TO_USEC(beacon_period)); + + beacon_period &= ~ATH9K_BEACON_ENA; + if (beacon_period & ATH9K_BEACON_RESET_TSF) { + ath9k_hw_reset_tsf(ah); + } + + REG_SET_BIT(ah, AR_TIMER_MODE, flags); +} +EXPORT_SYMBOL(ath9k_hw_beaconinit); + +void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah, + const struct ath9k_beacon_state *bs) +{ + u32 nextTbtt, beaconintval, dtimperiod, beacontimeout; + struct ath9k_hw_capabilities *pCap = &ah->caps; + struct ath_common *common = ath9k_hw_common(ah); + + REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(bs->bs_nexttbtt)); + + REG_WRITE(ah, AR_BEACON_PERIOD, + TU_TO_USEC(bs->bs_intval & ATH9K_BEACON_PERIOD)); + REG_WRITE(ah, AR_DMA_BEACON_PERIOD, + TU_TO_USEC(bs->bs_intval & ATH9K_BEACON_PERIOD)); + + REG_RMW_FIELD(ah, AR_RSSI_THR, + AR_RSSI_THR_BM_THR, bs->bs_bmissthreshold); + + beaconintval = bs->bs_intval & ATH9K_BEACON_PERIOD; + + if (bs->bs_sleepduration > beaconintval) + beaconintval = bs->bs_sleepduration; + + dtimperiod = bs->bs_dtimperiod; + if (bs->bs_sleepduration > dtimperiod) + dtimperiod = bs->bs_sleepduration; + + if (beaconintval == dtimperiod) + nextTbtt = bs->bs_nextdtim; + else + nextTbtt = bs->bs_nexttbtt; + + ath_print(common, ATH_DBG_BEACON, "next DTIM %d\n", bs->bs_nextdtim); + ath_print(common, ATH_DBG_BEACON, "next beacon %d\n", nextTbtt); + ath_print(common, ATH_DBG_BEACON, "beacon period %d\n", beaconintval); + ath_print(common, ATH_DBG_BEACON, "DTIM period %d\n", dtimperiod); + + REG_WRITE(ah, AR_NEXT_DTIM, + TU_TO_USEC(bs->bs_nextdtim - SLEEP_SLOP)); + REG_WRITE(ah, AR_NEXT_TIM, TU_TO_USEC(nextTbtt - SLEEP_SLOP)); + + REG_WRITE(ah, AR_SLEEP1, + SM((CAB_TIMEOUT_VAL << 3), AR_SLEEP1_CAB_TIMEOUT) + | AR_SLEEP1_ASSUME_DTIM); + + if (pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP) + beacontimeout = (BEACON_TIMEOUT_VAL << 3); + else + beacontimeout = MIN_BEACON_TIMEOUT_VAL; + + REG_WRITE(ah, AR_SLEEP2, + SM(beacontimeout, AR_SLEEP2_BEACON_TIMEOUT)); + + REG_WRITE(ah, AR_TIM_PERIOD, TU_TO_USEC(beaconintval)); + REG_WRITE(ah, AR_DTIM_PERIOD, TU_TO_USEC(dtimperiod)); + + REG_SET_BIT(ah, AR_TIMER_MODE, + AR_TBTT_TIMER_EN | AR_TIM_TIMER_EN | + AR_DTIM_TIMER_EN); + + /* TSF Out of Range Threshold */ + REG_WRITE(ah, AR_TSFOOR_THRESHOLD, bs->bs_tsfoor_threshold); +} +EXPORT_SYMBOL(ath9k_hw_set_sta_beacon_timers); + +/*******************/ +/* HW Capabilities */ +/*******************/ + +int ath9k_hw_fill_cap_info(struct ath_hw *ah) +{ + struct ath9k_hw_capabilities *pCap = &ah->caps; + struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); + struct ath_common *common = ath9k_hw_common(ah); + struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; + + u16 capField = 0, eeval; + + eeval = ah->eep_ops->get_eeprom(ah, EEP_REG_0); + regulatory->current_rd = eeval; + + eeval = ah->eep_ops->get_eeprom(ah, EEP_REG_1); + if (AR_SREV_9285_10_OR_LATER(ah)) + eeval |= AR9285_RDEXT_DEFAULT; + regulatory->current_rd_ext = eeval; + + capField = ah->eep_ops->get_eeprom(ah, EEP_OP_CAP); + + if (ah->opmode != NL80211_IFTYPE_AP && + ah->hw_version.subvendorid == AR_SUBVENDOR_ID_NEW_A) { + if (regulatory->current_rd == 0x64 || + regulatory->current_rd == 0x65) + regulatory->current_rd += 5; + else if (regulatory->current_rd == 0x41) + regulatory->current_rd = 0x43; + ath_print(common, ATH_DBG_REGULATORY, + "regdomain mapped to 0x%x\n", regulatory->current_rd); + } + + eeval = ah->eep_ops->get_eeprom(ah, EEP_OP_MODE); + if ((eeval & (AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A)) == 0) { + ath_print(common, ATH_DBG_FATAL, + "no band has been marked as supported in EEPROM.\n"); + return -EINVAL; + } + + bitmap_zero(pCap->wireless_modes, ATH9K_MODE_MAX); + + if (eeval & AR5416_OPFLAGS_11A) { + set_bit(ATH9K_MODE_11A, pCap->wireless_modes); + if (ah->config.ht_enable) { + if (!(eeval & AR5416_OPFLAGS_N_5G_HT20)) + set_bit(ATH9K_MODE_11NA_HT20, + pCap->wireless_modes); + if (!(eeval & AR5416_OPFLAGS_N_5G_HT40)) { + set_bit(ATH9K_MODE_11NA_HT40PLUS, + pCap->wireless_modes); + set_bit(ATH9K_MODE_11NA_HT40MINUS, + pCap->wireless_modes); + } + } + } + + if (eeval & AR5416_OPFLAGS_11G) { + set_bit(ATH9K_MODE_11G, pCap->wireless_modes); + if (ah->config.ht_enable) { + if (!(eeval & AR5416_OPFLAGS_N_2G_HT20)) + set_bit(ATH9K_MODE_11NG_HT20, + pCap->wireless_modes); + if (!(eeval & AR5416_OPFLAGS_N_2G_HT40)) { + set_bit(ATH9K_MODE_11NG_HT40PLUS, + pCap->wireless_modes); + set_bit(ATH9K_MODE_11NG_HT40MINUS, + pCap->wireless_modes); + } + } + } + + pCap->tx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_TX_MASK); + /* + * For AR9271 we will temporarilly uses the rx chainmax as read from + * the EEPROM. + */ + if ((ah->hw_version.devid == AR5416_DEVID_PCI) && + !(eeval & AR5416_OPFLAGS_11A) && + !(AR_SREV_9271(ah))) + /* CB71: GPIO 0 is pulled down to indicate 3 rx chains */ + pCap->rx_chainmask = ath9k_hw_gpio_get(ah, 0) ? 0x5 : 0x7; + else + /* Use rx_chainmask from EEPROM. */ + pCap->rx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_RX_MASK); + + if (!(AR_SREV_9280(ah) && (ah->hw_version.macRev == 0))) + ah->misc_mode |= AR_PCU_MIC_NEW_LOC_ENA; + + pCap->low_2ghz_chan = 2312; + pCap->high_2ghz_chan = 2732; + + pCap->low_5ghz_chan = 4920; + pCap->high_5ghz_chan = 6100; + + pCap->hw_caps &= ~ATH9K_HW_CAP_CIPHER_CKIP; + pCap->hw_caps |= ATH9K_HW_CAP_CIPHER_TKIP; + pCap->hw_caps |= ATH9K_HW_CAP_CIPHER_AESCCM; + + pCap->hw_caps &= ~ATH9K_HW_CAP_MIC_CKIP; + pCap->hw_caps |= ATH9K_HW_CAP_MIC_TKIP; + pCap->hw_caps |= ATH9K_HW_CAP_MIC_AESCCM; + + if (ah->config.ht_enable) + pCap->hw_caps |= ATH9K_HW_CAP_HT; + else + pCap->hw_caps &= ~ATH9K_HW_CAP_HT; + + pCap->hw_caps |= ATH9K_HW_CAP_GTT; + pCap->hw_caps |= ATH9K_HW_CAP_VEOL; + pCap->hw_caps |= ATH9K_HW_CAP_BSSIDMASK; + pCap->hw_caps &= ~ATH9K_HW_CAP_MCAST_KEYSEARCH; + + if (capField & AR_EEPROM_EEPCAP_MAXQCU) + pCap->total_queues = + MS(capField, AR_EEPROM_EEPCAP_MAXQCU); + else + pCap->total_queues = ATH9K_NUM_TX_QUEUES; + + if (capField & AR_EEPROM_EEPCAP_KC_ENTRIES) + pCap->keycache_size = + 1 << MS(capField, AR_EEPROM_EEPCAP_KC_ENTRIES); + else + pCap->keycache_size = AR_KEYTABLE_SIZE; + + pCap->hw_caps |= ATH9K_HW_CAP_FASTCC; + + if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) + pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD >> 1; + else + pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD; + + if (AR_SREV_9285_10_OR_LATER(ah)) + pCap->num_gpio_pins = AR9285_NUM_GPIO; + else if (AR_SREV_9280_10_OR_LATER(ah)) + pCap->num_gpio_pins = AR928X_NUM_GPIO; + else + pCap->num_gpio_pins = AR_NUM_GPIO; + + if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah)) { + pCap->hw_caps |= ATH9K_HW_CAP_CST; + pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX; + } else { + pCap->rts_aggr_limit = (8 * 1024); + } + + pCap->hw_caps |= ATH9K_HW_CAP_ENHANCEDPM; + +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)) && defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)) || ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)) && defined(CONFIG_RFKILL_BACKPORT) || defined(CONFIG_RFKILL_BACKPORT_MODULE)) + ah->rfsilent = ah->eep_ops->get_eeprom(ah, EEP_RF_SILENT); + if (ah->rfsilent & EEP_RFSILENT_ENABLED) { + ah->rfkill_gpio = + MS(ah->rfsilent, EEP_RFSILENT_GPIO_SEL); + ah->rfkill_polarity = + MS(ah->rfsilent, EEP_RFSILENT_POLARITY); + + pCap->hw_caps |= ATH9K_HW_CAP_RFSILENT; + } +#endif + + pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP; + + if (AR_SREV_9280(ah) || AR_SREV_9285(ah)) + pCap->hw_caps &= ~ATH9K_HW_CAP_4KB_SPLITTRANS; + else + pCap->hw_caps |= ATH9K_HW_CAP_4KB_SPLITTRANS; + + if (regulatory->current_rd_ext & (1 << REG_EXT_JAPAN_MIDBAND)) { + pCap->reg_cap = + AR_EEPROM_EEREGCAP_EN_KK_NEW_11A | + AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN | + AR_EEPROM_EEREGCAP_EN_KK_U2 | + AR_EEPROM_EEREGCAP_EN_KK_MIDBAND; + } else { + pCap->reg_cap = + AR_EEPROM_EEREGCAP_EN_KK_NEW_11A | + AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN; + } + + /* Advertise midband for AR5416 with FCC midband set in eeprom */ + if (regulatory->current_rd_ext & (1 << REG_EXT_FCC_MIDBAND) && + AR_SREV_5416(ah)) + pCap->reg_cap |= AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND; + + pCap->num_antcfg_5ghz = + ah->eep_ops->get_num_ant_config(ah, ATH9K_HAL_FREQ_BAND_5GHZ); + pCap->num_antcfg_2ghz = + ah->eep_ops->get_num_ant_config(ah, ATH9K_HAL_FREQ_BAND_2GHZ); + + if (AR_SREV_9280_10_OR_LATER(ah) && + ath9k_hw_btcoex_supported(ah)) { + btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO; + btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO; + + if (AR_SREV_9285(ah)) { + btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE; + btcoex_hw->btpriority_gpio = ATH_BTPRIORITY_GPIO; + } else { + btcoex_hw->scheme = ATH_BTCOEX_CFG_2WIRE; + } + } else { + btcoex_hw->scheme = ATH_BTCOEX_CFG_NONE; + } + + return 0; +} + +bool ath9k_hw_getcapability(struct ath_hw *ah, enum ath9k_capability_type type, + u32 capability, u32 *result) +{ + struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); + switch (type) { + case ATH9K_CAP_CIPHER: + switch (capability) { + case ATH9K_CIPHER_AES_CCM: + case ATH9K_CIPHER_AES_OCB: + case ATH9K_CIPHER_TKIP: + case ATH9K_CIPHER_WEP: + case ATH9K_CIPHER_MIC: + case ATH9K_CIPHER_CLR: + return true; + default: + return false; + } + case ATH9K_CAP_TKIP_MIC: + switch (capability) { + case 0: + return true; + case 1: + return (ah->sta_id1_defaults & + AR_STA_ID1_CRPT_MIC_ENABLE) ? true : + false; + } + case ATH9K_CAP_TKIP_SPLIT: + return (ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA) ? + false : true; + case ATH9K_CAP_DIVERSITY: + return (REG_READ(ah, AR_PHY_CCK_DETECT) & + AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV) ? + true : false; + case ATH9K_CAP_MCAST_KEYSRCH: + switch (capability) { + case 0: + return true; + case 1: + if (REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_ADHOC) { + return false; + } else { + return (ah->sta_id1_defaults & + AR_STA_ID1_MCAST_KSRCH) ? true : + false; + } + } + return false; + case ATH9K_CAP_TXPOW: + switch (capability) { + case 0: + return 0; + case 1: + *result = regulatory->power_limit; + return 0; + case 2: + *result = regulatory->max_power_level; + return 0; + case 3: + *result = regulatory->tp_scale; + return 0; + } + return false; + case ATH9K_CAP_DS: + return (AR_SREV_9280_20_OR_LATER(ah) && + (ah->eep_ops->get_eeprom(ah, EEP_RC_CHAIN_MASK) == 1)) + ? false : true; + default: + return false; + } +} +EXPORT_SYMBOL(ath9k_hw_getcapability); + +bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type, + u32 capability, u32 setting, int *status) +{ + u32 v; + + switch (type) { + case ATH9K_CAP_TKIP_MIC: + if (setting) + ah->sta_id1_defaults |= + AR_STA_ID1_CRPT_MIC_ENABLE; + else + ah->sta_id1_defaults &= + ~AR_STA_ID1_CRPT_MIC_ENABLE; + return true; + case ATH9K_CAP_DIVERSITY: + v = REG_READ(ah, AR_PHY_CCK_DETECT); + if (setting) + v |= AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV; + else + v &= ~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV; + REG_WRITE(ah, AR_PHY_CCK_DETECT, v); + return true; + case ATH9K_CAP_MCAST_KEYSRCH: + if (setting) + ah->sta_id1_defaults |= AR_STA_ID1_MCAST_KSRCH; + else + ah->sta_id1_defaults &= ~AR_STA_ID1_MCAST_KSRCH; + return true; + default: + return false; + } +} +EXPORT_SYMBOL(ath9k_hw_setcapability); + +/****************************/ +/* GPIO / RFKILL / Antennae */ +/****************************/ + +static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah, + u32 gpio, u32 type) +{ + int addr; + u32 gpio_shift, tmp; + + if (gpio > 11) + addr = AR_GPIO_OUTPUT_MUX3; + else if (gpio > 5) + addr = AR_GPIO_OUTPUT_MUX2; + else + addr = AR_GPIO_OUTPUT_MUX1; + + gpio_shift = (gpio % 6) * 5; + + if (AR_SREV_9280_20_OR_LATER(ah) + || (addr != AR_GPIO_OUTPUT_MUX1)) { + REG_RMW(ah, addr, (type << gpio_shift), + (0x1f << gpio_shift)); + } else { + tmp = REG_READ(ah, addr); + tmp = ((tmp & 0x1F0) << 1) | (tmp & ~0x1F0); + tmp &= ~(0x1f << gpio_shift); + tmp |= (type << gpio_shift); + REG_WRITE(ah, addr, tmp); + } +} + +void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio) +{ + u32 gpio_shift; + + BUG_ON(gpio >= ah->caps.num_gpio_pins); + + gpio_shift = gpio << 1; + + REG_RMW(ah, + AR_GPIO_OE_OUT, + (AR_GPIO_OE_OUT_DRV_NO << gpio_shift), + (AR_GPIO_OE_OUT_DRV << gpio_shift)); +} +EXPORT_SYMBOL(ath9k_hw_cfg_gpio_input); + +u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio) +{ +#define MS_REG_READ(x, y) \ + (MS(REG_READ(ah, AR_GPIO_IN_OUT), x##_GPIO_IN_VAL) & (AR_GPIO_BIT(y))) + + if (gpio >= ah->caps.num_gpio_pins) + return 0xffffffff; + + if (AR_SREV_9287_10_OR_LATER(ah)) + return MS_REG_READ(AR9287, gpio) != 0; + else if (AR_SREV_9285_10_OR_LATER(ah)) + return MS_REG_READ(AR9285, gpio) != 0; + else if (AR_SREV_9280_10_OR_LATER(ah)) + return MS_REG_READ(AR928X, gpio) != 0; + else + return MS_REG_READ(AR, gpio) != 0; +} +EXPORT_SYMBOL(ath9k_hw_gpio_get); + +void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio, + u32 ah_signal_type) +{ + u32 gpio_shift; + + ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type); + + gpio_shift = 2 * gpio; + + REG_RMW(ah, + AR_GPIO_OE_OUT, + (AR_GPIO_OE_OUT_DRV_ALL << gpio_shift), + (AR_GPIO_OE_OUT_DRV << gpio_shift)); +} +EXPORT_SYMBOL(ath9k_hw_cfg_output); + +void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val) +{ + REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio), + AR_GPIO_BIT(gpio)); +} +EXPORT_SYMBOL(ath9k_hw_set_gpio); + +u32 ath9k_hw_getdefantenna(struct ath_hw *ah) +{ + return REG_READ(ah, AR_DEF_ANTENNA) & 0x7; +} +EXPORT_SYMBOL(ath9k_hw_getdefantenna); + +void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna) +{ + REG_WRITE(ah, AR_DEF_ANTENNA, (antenna & 0x7)); +} +EXPORT_SYMBOL(ath9k_hw_setantenna); + +/*********************/ +/* General Operation */ +/*********************/ + +u32 ath9k_hw_getrxfilter(struct ath_hw *ah) +{ + u32 bits = REG_READ(ah, AR_RX_FILTER); + u32 phybits = REG_READ(ah, AR_PHY_ERR); + + if (phybits & AR_PHY_ERR_RADAR) + bits |= ATH9K_RX_FILTER_PHYRADAR; + if (phybits & (AR_PHY_ERR_OFDM_TIMING | AR_PHY_ERR_CCK_TIMING)) + bits |= ATH9K_RX_FILTER_PHYERR; + + return bits; +} +EXPORT_SYMBOL(ath9k_hw_getrxfilter); + +void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits) +{ + u32 phybits; + + REG_WRITE(ah, AR_RX_FILTER, bits); + + phybits = 0; + if (bits & ATH9K_RX_FILTER_PHYRADAR) + phybits |= AR_PHY_ERR_RADAR; + if (bits & ATH9K_RX_FILTER_PHYERR) + phybits |= AR_PHY_ERR_OFDM_TIMING | AR_PHY_ERR_CCK_TIMING; + REG_WRITE(ah, AR_PHY_ERR, phybits); + + if (phybits) + REG_WRITE(ah, AR_RXCFG, + REG_READ(ah, AR_RXCFG) | AR_RXCFG_ZLFDMA); + else + REG_WRITE(ah, AR_RXCFG, + REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_ZLFDMA); +} +EXPORT_SYMBOL(ath9k_hw_setrxfilter); + +bool ath9k_hw_phy_disable(struct ath_hw *ah) +{ + if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM)) + return false; + + ath9k_hw_init_pll(ah, NULL); + return true; +} +EXPORT_SYMBOL(ath9k_hw_phy_disable); + +bool ath9k_hw_disable(struct ath_hw *ah) +{ + if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) + return false; + + if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_COLD)) + return false; + + ath9k_hw_init_pll(ah, NULL); + return true; +} +EXPORT_SYMBOL(ath9k_hw_disable); + +void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit) +{ + struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); + struct ath9k_channel *chan = ah->curchan; + struct ieee80211_channel *channel = chan->chan; + + regulatory->power_limit = min(limit, (u32) MAX_RATE_POWER); + + ah->eep_ops->set_txpower(ah, chan, + ath9k_regd_get_ctl(regulatory, chan), + channel->max_antenna_gain * 2, + channel->max_power * 2, + min((u32) MAX_RATE_POWER, + (u32) regulatory->power_limit)); +} +EXPORT_SYMBOL(ath9k_hw_set_txpowerlimit); + +void ath9k_hw_setmac(struct ath_hw *ah, const u8 *mac) +{ + memcpy(ath9k_hw_common(ah)->macaddr, mac, ETH_ALEN); +} +EXPORT_SYMBOL(ath9k_hw_setmac); + +void ath9k_hw_setopmode(struct ath_hw *ah) +{ + ath9k_hw_set_operating_mode(ah, ah->opmode); +} +EXPORT_SYMBOL(ath9k_hw_setopmode); + +void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1) +{ + REG_WRITE(ah, AR_MCAST_FIL0, filter0); + REG_WRITE(ah, AR_MCAST_FIL1, filter1); +} +EXPORT_SYMBOL(ath9k_hw_setmcastfilter); + +void ath9k_hw_write_associd(struct ath_hw *ah) +{ + struct ath_common *common = ath9k_hw_common(ah); + + REG_WRITE(ah, AR_BSS_ID0, get_unaligned_le32(common->curbssid)); + REG_WRITE(ah, AR_BSS_ID1, get_unaligned_le16(common->curbssid + 4) | + ((common->curaid & 0x3fff) << AR_BSS_ID1_AID_S)); +} +EXPORT_SYMBOL(ath9k_hw_write_associd); + +u64 ath9k_hw_gettsf64(struct ath_hw *ah) +{ + u64 tsf; + + tsf = REG_READ(ah, AR_TSF_U32); + tsf = (tsf << 32) | REG_READ(ah, AR_TSF_L32); + + return tsf; +} +EXPORT_SYMBOL(ath9k_hw_gettsf64); + +void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64) +{ + REG_WRITE(ah, AR_TSF_L32, tsf64 & 0xffffffff); + REG_WRITE(ah, AR_TSF_U32, (tsf64 >> 32) & 0xffffffff); +} +EXPORT_SYMBOL(ath9k_hw_settsf64); + +void ath9k_hw_reset_tsf(struct ath_hw *ah) +{ + if (!ath9k_hw_wait(ah, AR_SLP32_MODE, AR_SLP32_TSF_WRITE_STATUS, 0, + AH_TSF_WRITE_TIMEOUT)) + ath_print(ath9k_hw_common(ah), ATH_DBG_RESET, + "AR_SLP32_TSF_WRITE_STATUS limit exceeded\n"); + + REG_WRITE(ah, AR_RESET_TSF, AR_RESET_TSF_ONCE); +} +EXPORT_SYMBOL(ath9k_hw_reset_tsf); + +void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting) +{ + if (setting) + ah->misc_mode |= AR_PCU_TX_ADD_TSF; + else + ah->misc_mode &= ~AR_PCU_TX_ADD_TSF; +} +EXPORT_SYMBOL(ath9k_hw_set_tsfadjust); + +/* + * Extend 15-bit time stamp from rx descriptor to + * a full 64-bit TSF using the current h/w TSF. +*/ +u64 ath9k_hw_extend_tsf(struct ath_hw *ah, u32 rstamp) +{ + u64 tsf; + + tsf = ath9k_hw_gettsf64(ah); + if ((tsf & 0x7fff) < rstamp) + tsf -= 0x8000; + return (tsf & ~0x7fff) | rstamp; +} +EXPORT_SYMBOL(ath9k_hw_extend_tsf); + +void ath9k_hw_set11nmac2040(struct ath_hw *ah) +{ + struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf; + u32 macmode; + + if (conf_is_ht40(conf) && !ah->config.cwm_ignore_extcca) + macmode = AR_2040_JOINED_RX_CLEAR; + else + macmode = 0; + + REG_WRITE(ah, AR_2040_MODE, macmode); +} + +/* HW Generic timers configuration */ + +static const struct ath_gen_timer_configuration gen_tmr_configuration[] = +{ + {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, + {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, + {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, + {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, + {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, + {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, + {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, + {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, + {AR_NEXT_NDP2_TIMER, AR_NDP2_PERIOD, AR_NDP2_TIMER_MODE, 0x0001}, + {AR_NEXT_NDP2_TIMER + 1*4, AR_NDP2_PERIOD + 1*4, + AR_NDP2_TIMER_MODE, 0x0002}, + {AR_NEXT_NDP2_TIMER + 2*4, AR_NDP2_PERIOD + 2*4, + AR_NDP2_TIMER_MODE, 0x0004}, + {AR_NEXT_NDP2_TIMER + 3*4, AR_NDP2_PERIOD + 3*4, + AR_NDP2_TIMER_MODE, 0x0008}, + {AR_NEXT_NDP2_TIMER + 4*4, AR_NDP2_PERIOD + 4*4, + AR_NDP2_TIMER_MODE, 0x0010}, + {AR_NEXT_NDP2_TIMER + 5*4, AR_NDP2_PERIOD + 5*4, + AR_NDP2_TIMER_MODE, 0x0020}, + {AR_NEXT_NDP2_TIMER + 6*4, AR_NDP2_PERIOD + 6*4, + AR_NDP2_TIMER_MODE, 0x0040}, + {AR_NEXT_NDP2_TIMER + 7*4, AR_NDP2_PERIOD + 7*4, + AR_NDP2_TIMER_MODE, 0x0080} +}; + +/* HW generic timer primitives */ + +/* compute and clear index of rightmost 1 */ +static u32 rightmost_index(struct ath_gen_timer_table *timer_table, u32 *mask) +{ + u32 b; + + b = *mask; + b &= (0-b); + *mask &= ~b; + b *= debruijn32; + b >>= 27; + + return timer_table->gen_timer_index[b]; +} + +u32 ath9k_hw_gettsf32(struct ath_hw *ah) +{ + return REG_READ(ah, AR_TSF_L32); +} +EXPORT_SYMBOL(ath9k_hw_gettsf32); + +struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah, + void (*trigger)(void *), + void (*overflow)(void *), + void *arg, + u8 timer_index) +{ + struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers; + struct ath_gen_timer *timer; + + timer = kzalloc(sizeof(struct ath_gen_timer), GFP_KERNEL); + + if (timer == NULL) { + ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, + "Failed to allocate memory" + "for hw timer[%d]\n", timer_index); + return NULL; + } + + /* allocate a hardware generic timer slot */ + timer_table->timers[timer_index] = timer; + timer->index = timer_index; + timer->trigger = trigger; + timer->overflow = overflow; + timer->arg = arg; + + return timer; +} +EXPORT_SYMBOL(ath_gen_timer_alloc); + +void ath9k_hw_gen_timer_start(struct ath_hw *ah, + struct ath_gen_timer *timer, + u32 timer_next, + u32 timer_period) +{ + struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers; + u32 tsf; + + BUG_ON(!timer_period); + + set_bit(timer->index, &timer_table->timer_mask.timer_bits); + + tsf = ath9k_hw_gettsf32(ah); + + ath_print(ath9k_hw_common(ah), ATH_DBG_HWTIMER, + "curent tsf %x period %x" + "timer_next %x\n", tsf, timer_period, timer_next); + + /* + * Pull timer_next forward if the current TSF already passed it + * because of software latency + */ + if (timer_next < tsf) + timer_next = tsf + timer_period; + + /* + * Program generic timer registers + */ + REG_WRITE(ah, gen_tmr_configuration[timer->index].next_addr, + timer_next); + REG_WRITE(ah, gen_tmr_configuration[timer->index].period_addr, + timer_period); + REG_SET_BIT(ah, gen_tmr_configuration[timer->index].mode_addr, + gen_tmr_configuration[timer->index].mode_mask); + + /* Enable both trigger and thresh interrupt masks */ + REG_SET_BIT(ah, AR_IMR_S5, + (SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) | + SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_TRIG))); +} +EXPORT_SYMBOL(ath9k_hw_gen_timer_start); + +void ath9k_hw_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer) +{ + struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers; + + if ((timer->index < AR_FIRST_NDP_TIMER) || + (timer->index >= ATH_MAX_GEN_TIMER)) { + return; + } + + /* Clear generic timer enable bits. */ + REG_CLR_BIT(ah, gen_tmr_configuration[timer->index].mode_addr, + gen_tmr_configuration[timer->index].mode_mask); + + /* Disable both trigger and thresh interrupt masks */ + REG_CLR_BIT(ah, AR_IMR_S5, + (SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) | + SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_TRIG))); + + clear_bit(timer->index, &timer_table->timer_mask.timer_bits); +} +EXPORT_SYMBOL(ath9k_hw_gen_timer_stop); + +void ath_gen_timer_free(struct ath_hw *ah, struct ath_gen_timer *timer) +{ + struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers; + + /* free the hardware generic timer slot */ + timer_table->timers[timer->index] = NULL; + kfree(timer); +} +EXPORT_SYMBOL(ath_gen_timer_free); + +/* + * Generic Timer Interrupts handling + */ +void ath_gen_timer_isr(struct ath_hw *ah) +{ + struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers; + struct ath_gen_timer *timer; + struct ath_common *common = ath9k_hw_common(ah); + u32 trigger_mask, thresh_mask, index; + + /* get hardware generic timer interrupt status */ + trigger_mask = ah->intr_gen_timer_trigger; + thresh_mask = ah->intr_gen_timer_thresh; + trigger_mask &= timer_table->timer_mask.val; + thresh_mask &= timer_table->timer_mask.val; + + trigger_mask &= ~thresh_mask; + + while (thresh_mask) { + index = rightmost_index(timer_table, &thresh_mask); + timer = timer_table->timers[index]; + BUG_ON(!timer); + ath_print(common, ATH_DBG_HWTIMER, + "TSF overflow for Gen timer %d\n", index); + timer->overflow(timer->arg); + } + + while (trigger_mask) { + index = rightmost_index(timer_table, &trigger_mask); + timer = timer_table->timers[index]; + BUG_ON(!timer); + ath_print(common, ATH_DBG_HWTIMER, + "Gen timer[%d] trigger\n", index); + timer->trigger(timer->arg); + } +} +EXPORT_SYMBOL(ath_gen_timer_isr); + +static struct { + u32 version; + const char * name; +} ath_mac_bb_names[] = { + /* Devices with external radios */ + { AR_SREV_VERSION_5416_PCI, "5416" }, + { AR_SREV_VERSION_5416_PCIE, "5418" }, + { AR_SREV_VERSION_9100, "9100" }, + { AR_SREV_VERSION_9160, "9160" }, + /* Single-chip solutions */ + { AR_SREV_VERSION_9280, "9280" }, + { AR_SREV_VERSION_9285, "9285" }, + { AR_SREV_VERSION_9287, "9287" }, + { AR_SREV_VERSION_9271, "9271" }, +}; + +/* For devices with external radios */ +static struct { + u16 version; + const char * name; +} ath_rf_names[] = { + { 0, "5133" }, + { AR_RAD5133_SREV_MAJOR, "5133" }, + { AR_RAD5122_SREV_MAJOR, "5122" }, + { AR_RAD2133_SREV_MAJOR, "2133" }, + { AR_RAD2122_SREV_MAJOR, "2122" } +}; + +/* + * Return the MAC/BB name. "????" is returned if the MAC/BB is unknown. + */ +static const char *ath9k_hw_mac_bb_name(u32 mac_bb_version) +{ + int i; + + for (i=0; i= AR9280 are single-chip */ + if (AR_SREV_9280_10_OR_LATER(ah)) { + used = snprintf(hw_name, len, + "Atheros AR%s Rev:%x", + ath9k_hw_mac_bb_name(ah->hw_version.macVersion), + ah->hw_version.macRev); + } + else { + used = snprintf(hw_name, len, + "Atheros AR%s MAC/BB Rev:%x AR%s RF Rev:%x", + ath9k_hw_mac_bb_name(ah->hw_version.macVersion), + ah->hw_version.macRev, + ath9k_hw_rf_name((ah->hw_version.analog5GhzRev & + AR_RADIO_SREV_MAJOR)), + ah->hw_version.phyRev); + } + + hw_name[used] = '\0'; +} +EXPORT_SYMBOL(ath9k_hw_name); diff --git a/drivers/net/wireless/ath/ath9k/hw.c.orig b/drivers/net/wireless/ath/ath9k/hw.c.orig new file mode 100644 index 0000000..2e767cf --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/hw.c.orig @@ -0,0 +1,3954 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include + +#include "hw.h" +#include "rc.h" +#include "initvals.h" + +#define ATH9K_CLOCK_RATE_CCK 22 +#define ATH9K_CLOCK_RATE_5GHZ_OFDM 40 +#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44 + +static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type); +static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan); +static u32 ath9k_hw_ini_fixup(struct ath_hw *ah, + struct ar5416_eeprom_def *pEepData, + u32 reg, u32 value); + +MODULE_AUTHOR("Atheros Communications"); +MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards."); +MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards"); +MODULE_LICENSE("Dual BSD/GPL"); + +static int __init ath9k_init(void) +{ + return 0; +} +module_init(ath9k_init); + +static void __exit ath9k_exit(void) +{ + return; +} +module_exit(ath9k_exit); + +/********************/ +/* Helper Functions */ +/********************/ + +static u32 ath9k_hw_mac_clks(struct ath_hw *ah, u32 usecs) +{ + struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf; + + if (!ah->curchan) /* should really check for CCK instead */ + return usecs *ATH9K_CLOCK_RATE_CCK; + if (conf->channel->band == IEEE80211_BAND_2GHZ) + return usecs *ATH9K_CLOCK_RATE_2GHZ_OFDM; + return usecs *ATH9K_CLOCK_RATE_5GHZ_OFDM; +} + +static u32 ath9k_hw_mac_to_clks(struct ath_hw *ah, u32 usecs) +{ + struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf; + + if (conf_is_ht40(conf)) + return ath9k_hw_mac_clks(ah, usecs) * 2; + else + return ath9k_hw_mac_clks(ah, usecs); +} + +bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout) +{ + int i; + + BUG_ON(timeout < AH_TIME_QUANTUM); + + for (i = 0; i < (timeout / AH_TIME_QUANTUM); i++) { + if ((REG_READ(ah, reg) & mask) == val) + return true; + + udelay(AH_TIME_QUANTUM); + } + + ath_print(ath9k_hw_common(ah), ATH_DBG_ANY, + "timeout (%d us) on reg 0x%x: 0x%08x & 0x%08x != 0x%08x\n", + timeout, reg, REG_READ(ah, reg), mask, val); + + return false; +} +EXPORT_SYMBOL(ath9k_hw_wait); + +u32 ath9k_hw_reverse_bits(u32 val, u32 n) +{ + u32 retval; + int i; + + for (i = 0, retval = 0; i < n; i++) { + retval = (retval << 1) | (val & 1); + val >>= 1; + } + return retval; +} + +bool ath9k_get_channel_edges(struct ath_hw *ah, + u16 flags, u16 *low, + u16 *high) +{ + struct ath9k_hw_capabilities *pCap = &ah->caps; + + if (flags & CHANNEL_5GHZ) { + *low = pCap->low_5ghz_chan; + *high = pCap->high_5ghz_chan; + return true; + } + if ((flags & CHANNEL_2GHZ)) { + *low = pCap->low_2ghz_chan; + *high = pCap->high_2ghz_chan; + return true; + } + return false; +} + +u16 ath9k_hw_computetxtime(struct ath_hw *ah, + u8 phy, int kbps, + u32 frameLen, u16 rateix, + bool shortPreamble) +{ + u32 bitsPerSymbol, numBits, numSymbols, phyTime, txTime; + + if (kbps == 0) + return 0; + + switch (phy) { + case WLAN_RC_PHY_CCK: + phyTime = CCK_PREAMBLE_BITS + CCK_PLCP_BITS; + if (shortPreamble) + phyTime >>= 1; + numBits = frameLen << 3; + txTime = CCK_SIFS_TIME + phyTime + ((numBits * 1000) / kbps); + break; + case WLAN_RC_PHY_OFDM: + if (ah->curchan && IS_CHAN_QUARTER_RATE(ah->curchan)) { + bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME_QUARTER) / 1000; + numBits = OFDM_PLCP_BITS + (frameLen << 3); + numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol); + txTime = OFDM_SIFS_TIME_QUARTER + + OFDM_PREAMBLE_TIME_QUARTER + + (numSymbols * OFDM_SYMBOL_TIME_QUARTER); + } else if (ah->curchan && + IS_CHAN_HALF_RATE(ah->curchan)) { + bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME_HALF) / 1000; + numBits = OFDM_PLCP_BITS + (frameLen << 3); + numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol); + txTime = OFDM_SIFS_TIME_HALF + + OFDM_PREAMBLE_TIME_HALF + + (numSymbols * OFDM_SYMBOL_TIME_HALF); + } else { + bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME) / 1000; + numBits = OFDM_PLCP_BITS + (frameLen << 3); + numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol); + txTime = OFDM_SIFS_TIME + OFDM_PREAMBLE_TIME + + (numSymbols * OFDM_SYMBOL_TIME); + } + break; + default: + ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, + "Unknown phy %u (rate ix %u)\n", phy, rateix); + txTime = 0; + break; + } + + return txTime; +} +EXPORT_SYMBOL(ath9k_hw_computetxtime); + +void ath9k_hw_get_channel_centers(struct ath_hw *ah, + struct ath9k_channel *chan, + struct chan_centers *centers) +{ + int8_t extoff; + + if (!IS_CHAN_HT40(chan)) { + centers->ctl_center = centers->ext_center = + centers->synth_center = chan->channel; + return; + } + + if ((chan->chanmode == CHANNEL_A_HT40PLUS) || + (chan->chanmode == CHANNEL_G_HT40PLUS)) { + centers->synth_center = + chan->channel + HT40_CHANNEL_CENTER_SHIFT; + extoff = 1; + } else { + centers->synth_center = + chan->channel - HT40_CHANNEL_CENTER_SHIFT; + extoff = -1; + } + + centers->ctl_center = + centers->synth_center - (extoff * HT40_CHANNEL_CENTER_SHIFT); + /* 25 MHz spacing is supported by hw but not on upper layers */ + centers->ext_center = + centers->synth_center + (extoff * HT40_CHANNEL_CENTER_SHIFT); +} + +/******************/ +/* Chip Revisions */ +/******************/ + +static void ath9k_hw_read_revisions(struct ath_hw *ah) +{ + u32 val; + + val = REG_READ(ah, AR_SREV) & AR_SREV_ID; + + if (val == 0xFF) { + val = REG_READ(ah, AR_SREV); + ah->hw_version.macVersion = + (val & AR_SREV_VERSION2) >> AR_SREV_TYPE2_S; + ah->hw_version.macRev = MS(val, AR_SREV_REVISION2); + ah->is_pciexpress = (val & AR_SREV_TYPE2_HOST_MODE) ? 0 : 1; + } else { + if (!AR_SREV_9100(ah)) + ah->hw_version.macVersion = MS(val, AR_SREV_VERSION); + + ah->hw_version.macRev = val & AR_SREV_REVISION; + + if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCIE) + ah->is_pciexpress = true; + } +} + +static int ath9k_hw_get_radiorev(struct ath_hw *ah) +{ + u32 val; + int i; + + REG_WRITE(ah, AR_PHY(0x36), 0x00007058); + + for (i = 0; i < 8; i++) + REG_WRITE(ah, AR_PHY(0x20), 0x00010000); + val = (REG_READ(ah, AR_PHY(256)) >> 24) & 0xff; + val = ((val & 0xf0) >> 4) | ((val & 0x0f) << 4); + + return ath9k_hw_reverse_bits(val, 8); +} + +/************************************/ +/* HW Attach, Detach, Init Routines */ +/************************************/ + +static void ath9k_hw_disablepcie(struct ath_hw *ah) +{ + if (AR_SREV_9100(ah)) + return; + + REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00); + REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924); + REG_WRITE(ah, AR_PCIE_SERDES, 0x28000029); + REG_WRITE(ah, AR_PCIE_SERDES, 0x57160824); + REG_WRITE(ah, AR_PCIE_SERDES, 0x25980579); + REG_WRITE(ah, AR_PCIE_SERDES, 0x00000000); + REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40); + REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554); + REG_WRITE(ah, AR_PCIE_SERDES, 0x000e1007); + + REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000); +} + +static bool ath9k_hw_chip_test(struct ath_hw *ah) +{ + struct ath_common *common = ath9k_hw_common(ah); + u32 regAddr[2] = { AR_STA_ID0, AR_PHY_BASE + (8 << 2) }; + u32 regHold[2]; + u32 patternData[4] = { 0x55555555, + 0xaaaaaaaa, + 0x66666666, + 0x99999999 }; + int i, j; + + for (i = 0; i < 2; i++) { + u32 addr = regAddr[i]; + u32 wrData, rdData; + + regHold[i] = REG_READ(ah, addr); + for (j = 0; j < 0x100; j++) { + wrData = (j << 16) | j; + REG_WRITE(ah, addr, wrData); + rdData = REG_READ(ah, addr); + if (rdData != wrData) { + ath_print(common, ATH_DBG_FATAL, + "address test failed " + "addr: 0x%08x - wr:0x%08x != " + "rd:0x%08x\n", + addr, wrData, rdData); + return false; + } + } + for (j = 0; j < 4; j++) { + wrData = patternData[j]; + REG_WRITE(ah, addr, wrData); + rdData = REG_READ(ah, addr); + if (wrData != rdData) { + ath_print(common, ATH_DBG_FATAL, + "address test failed " + "addr: 0x%08x - wr:0x%08x != " + "rd:0x%08x\n", + addr, wrData, rdData); + return false; + } + } + REG_WRITE(ah, regAddr[i], regHold[i]); + } + udelay(100); + + return true; +} + +static void ath9k_hw_init_config(struct ath_hw *ah) +{ + int i; + + ah->config.dma_beacon_response_time = 2; + ah->config.sw_beacon_response_time = 10; + ah->config.additional_swba_backoff = 0; + ah->config.ack_6mb = 0x0; + ah->config.cwm_ignore_extcca = 0; + ah->config.pcie_powersave_enable = 0; + ah->config.pcie_clock_req = 0; + ah->config.pcie_waen = 0; + ah->config.analog_shiftreg = 1; + ah->config.ofdm_trig_low = 200; + ah->config.ofdm_trig_high = 500; + ah->config.cck_trig_high = 200; + ah->config.cck_trig_low = 100; + ah->config.enable_ani = 1; + + for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { + ah->config.spurchans[i][0] = AR_NO_SPUR; + ah->config.spurchans[i][1] = AR_NO_SPUR; + } + + if (ah->hw_version.devid != AR2427_DEVID_PCIE) + ah->config.ht_enable = 1; + else + ah->config.ht_enable = 0; + + ah->config.rx_intr_mitigation = true; + + /* + * We need this for PCI devices only (Cardbus, PCI, miniPCI) + * _and_ if on non-uniprocessor systems (Multiprocessor/HT). + * This means we use it for all AR5416 devices, and the few + * minor PCI AR9280 devices out there. + * + * Serialization is required because these devices do not handle + * well the case of two concurrent reads/writes due to the latency + * involved. During one read/write another read/write can be issued + * on another CPU while the previous read/write may still be working + * on our hardware, if we hit this case the hardware poops in a loop. + * We prevent this by serializing reads and writes. + * + * This issue is not present on PCI-Express devices or pre-AR5416 + * devices (legacy, 802.11abg). + */ + if (num_possible_cpus() > 1) + ah->config.serialize_regmode = SER_REG_MODE_AUTO; +} +EXPORT_SYMBOL(ath9k_hw_init); + +static void ath9k_hw_init_defaults(struct ath_hw *ah) +{ + struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); + + regulatory->country_code = CTRY_DEFAULT; + regulatory->power_limit = MAX_RATE_POWER; + regulatory->tp_scale = ATH9K_TP_SCALE_MAX; + + ah->hw_version.magic = AR5416_MAGIC; + ah->hw_version.subvendorid = 0; + + ah->ah_flags = 0; + if (ah->hw_version.devid == AR5416_AR9100_DEVID) + ah->hw_version.macVersion = AR_SREV_VERSION_9100; + if (!AR_SREV_9100(ah)) + ah->ah_flags = AH_USE_EEPROM; + + ah->atim_window = 0; + ah->sta_id1_defaults = AR_STA_ID1_CRPT_MIC_ENABLE; + ah->beacon_interval = 100; + ah->enable_32kHz_clock = DONT_USE_32KHZ; + ah->slottime = (u32) -1; + ah->globaltxtimeout = (u32) -1; + ah->power_mode = ATH9K_PM_UNDEFINED; +} + +static int ath9k_hw_rf_claim(struct ath_hw *ah) +{ + u32 val; + + REG_WRITE(ah, AR_PHY(0), 0x00000007); + + val = ath9k_hw_get_radiorev(ah); + switch (val & AR_RADIO_SREV_MAJOR) { + case 0: + val = AR_RAD5133_SREV_MAJOR; + break; + case AR_RAD5133_SREV_MAJOR: + case AR_RAD5122_SREV_MAJOR: + case AR_RAD2133_SREV_MAJOR: + case AR_RAD2122_SREV_MAJOR: + break; + default: + ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, + "Radio Chip Rev 0x%02X not supported\n", + val & AR_RADIO_SREV_MAJOR); + return -EOPNOTSUPP; + } + + ah->hw_version.analog5GhzRev = val; + + return 0; +} + +static int ath9k_hw_init_macaddr(struct ath_hw *ah) +{ + struct ath_common *common = ath9k_hw_common(ah); + u32 sum; + int i; + u16 eeval; + + sum = 0; + for (i = 0; i < 3; i++) { + eeval = ah->eep_ops->get_eeprom(ah, AR_EEPROM_MAC(i)); + sum += eeval; + common->macaddr[2 * i] = eeval >> 8; + common->macaddr[2 * i + 1] = eeval & 0xff; + } + if (sum == 0 || sum == 0xffff * 3) + return -EADDRNOTAVAIL; + + return 0; +} + +static void ath9k_hw_init_rxgain_ini(struct ath_hw *ah) +{ + u32 rxgain_type; + + if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >= AR5416_EEP_MINOR_VER_17) { + rxgain_type = ah->eep_ops->get_eeprom(ah, EEP_RXGAIN_TYPE); + + if (rxgain_type == AR5416_EEP_RXGAIN_13DB_BACKOFF) + INIT_INI_ARRAY(&ah->iniModesRxGain, + ar9280Modes_backoff_13db_rxgain_9280_2, + ARRAY_SIZE(ar9280Modes_backoff_13db_rxgain_9280_2), 6); + else if (rxgain_type == AR5416_EEP_RXGAIN_23DB_BACKOFF) + INIT_INI_ARRAY(&ah->iniModesRxGain, + ar9280Modes_backoff_23db_rxgain_9280_2, + ARRAY_SIZE(ar9280Modes_backoff_23db_rxgain_9280_2), 6); + else + INIT_INI_ARRAY(&ah->iniModesRxGain, + ar9280Modes_original_rxgain_9280_2, + ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6); + } else { + INIT_INI_ARRAY(&ah->iniModesRxGain, + ar9280Modes_original_rxgain_9280_2, + ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6); + } +} + +static void ath9k_hw_init_txgain_ini(struct ath_hw *ah) +{ + u32 txgain_type; + + if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >= AR5416_EEP_MINOR_VER_19) { + txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE); + + if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER) + INIT_INI_ARRAY(&ah->iniModesTxGain, + ar9280Modes_high_power_tx_gain_9280_2, + ARRAY_SIZE(ar9280Modes_high_power_tx_gain_9280_2), 6); + else + INIT_INI_ARRAY(&ah->iniModesTxGain, + ar9280Modes_original_tx_gain_9280_2, + ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6); + } else { + INIT_INI_ARRAY(&ah->iniModesTxGain, + ar9280Modes_original_tx_gain_9280_2, + ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6); + } +} + +static int ath9k_hw_post_init(struct ath_hw *ah) +{ + int ecode; + + if (!ath9k_hw_chip_test(ah)) + return -ENODEV; + + ecode = ath9k_hw_rf_claim(ah); + if (ecode != 0) + return ecode; + + ecode = ath9k_hw_eeprom_init(ah); + if (ecode != 0) + return ecode; + + ath_print(ath9k_hw_common(ah), ATH_DBG_CONFIG, + "Eeprom VER: %d, REV: %d\n", + ah->eep_ops->get_eeprom_ver(ah), + ah->eep_ops->get_eeprom_rev(ah)); + + if (!AR_SREV_9280_10_OR_LATER(ah)) { + ecode = ath9k_hw_rf_alloc_ext_banks(ah); + if (ecode) { + ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, + "Failed allocating banks for " + "external radio\n"); + return ecode; + } + } + + if (!AR_SREV_9100(ah)) { + ath9k_hw_ani_setup(ah); + ath9k_hw_ani_init(ah); + } + + return 0; +} + +static bool ath9k_hw_devid_supported(u16 devid) +{ + switch (devid) { + case AR5416_DEVID_PCI: + case AR5416_DEVID_PCIE: + case AR5416_AR9100_DEVID: + case AR9160_DEVID_PCI: + case AR9280_DEVID_PCI: + case AR9280_DEVID_PCIE: + case AR9285_DEVID_PCIE: + case AR5416_DEVID_AR9287_PCI: + case AR5416_DEVID_AR9287_PCIE: + case AR9271_USB: + case AR2427_DEVID_PCIE: + return true; + default: + break; + } + return false; +} + +static bool ath9k_hw_macversion_supported(u32 macversion) +{ + switch (macversion) { + case AR_SREV_VERSION_5416_PCI: + case AR_SREV_VERSION_5416_PCIE: + case AR_SREV_VERSION_9160: + case AR_SREV_VERSION_9100: + case AR_SREV_VERSION_9280: + case AR_SREV_VERSION_9285: + case AR_SREV_VERSION_9287: + case AR_SREV_VERSION_9271: + return true; + default: + break; + } + return false; +} + +static void ath9k_hw_init_cal_settings(struct ath_hw *ah) +{ + if (AR_SREV_9160_10_OR_LATER(ah)) { + if (AR_SREV_9280_10_OR_LATER(ah)) { + ah->iq_caldata.calData = &iq_cal_single_sample; + ah->adcgain_caldata.calData = + &adc_gain_cal_single_sample; + ah->adcdc_caldata.calData = + &adc_dc_cal_single_sample; + ah->adcdc_calinitdata.calData = + &adc_init_dc_cal; + } else { + ah->iq_caldata.calData = &iq_cal_multi_sample; + ah->adcgain_caldata.calData = + &adc_gain_cal_multi_sample; + ah->adcdc_caldata.calData = + &adc_dc_cal_multi_sample; + ah->adcdc_calinitdata.calData = + &adc_init_dc_cal; + } + ah->supp_cals = ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL; + } +} + +static void ath9k_hw_init_mode_regs(struct ath_hw *ah) +{ + if (AR_SREV_9271(ah)) { + INIT_INI_ARRAY(&ah->iniModes, ar9271Modes_9271, + ARRAY_SIZE(ar9271Modes_9271), 6); + INIT_INI_ARRAY(&ah->iniCommon, ar9271Common_9271, + ARRAY_SIZE(ar9271Common_9271), 2); + INIT_INI_ARRAY(&ah->iniModes_9271_1_0_only, + ar9271Modes_9271_1_0_only, + ARRAY_SIZE(ar9271Modes_9271_1_0_only), 6); + return; + } + + if (AR_SREV_9287_11_OR_LATER(ah)) { + INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_1, + ARRAY_SIZE(ar9287Modes_9287_1_1), 6); + INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_1, + ARRAY_SIZE(ar9287Common_9287_1_1), 2); + if (ah->config.pcie_clock_req) + INIT_INI_ARRAY(&ah->iniPcieSerdes, + ar9287PciePhy_clkreq_off_L1_9287_1_1, + ARRAY_SIZE(ar9287PciePhy_clkreq_off_L1_9287_1_1), 2); + else + INIT_INI_ARRAY(&ah->iniPcieSerdes, + ar9287PciePhy_clkreq_always_on_L1_9287_1_1, + ARRAY_SIZE(ar9287PciePhy_clkreq_always_on_L1_9287_1_1), + 2); + } else if (AR_SREV_9287_10_OR_LATER(ah)) { + INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_0, + ARRAY_SIZE(ar9287Modes_9287_1_0), 6); + INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_0, + ARRAY_SIZE(ar9287Common_9287_1_0), 2); + + if (ah->config.pcie_clock_req) + INIT_INI_ARRAY(&ah->iniPcieSerdes, + ar9287PciePhy_clkreq_off_L1_9287_1_0, + ARRAY_SIZE(ar9287PciePhy_clkreq_off_L1_9287_1_0), 2); + else + INIT_INI_ARRAY(&ah->iniPcieSerdes, + ar9287PciePhy_clkreq_always_on_L1_9287_1_0, + ARRAY_SIZE(ar9287PciePhy_clkreq_always_on_L1_9287_1_0), + 2); + } else if (AR_SREV_9285_12_OR_LATER(ah)) { + + + INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285_1_2, + ARRAY_SIZE(ar9285Modes_9285_1_2), 6); + INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285_1_2, + ARRAY_SIZE(ar9285Common_9285_1_2), 2); + + if (ah->config.pcie_clock_req) { + INIT_INI_ARRAY(&ah->iniPcieSerdes, + ar9285PciePhy_clkreq_off_L1_9285_1_2, + ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285_1_2), 2); + } else { + INIT_INI_ARRAY(&ah->iniPcieSerdes, + ar9285PciePhy_clkreq_always_on_L1_9285_1_2, + ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285_1_2), + 2); + } + } else if (AR_SREV_9285_10_OR_LATER(ah)) { + INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285, + ARRAY_SIZE(ar9285Modes_9285), 6); + INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285, + ARRAY_SIZE(ar9285Common_9285), 2); + + if (ah->config.pcie_clock_req) { + INIT_INI_ARRAY(&ah->iniPcieSerdes, + ar9285PciePhy_clkreq_off_L1_9285, + ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285), 2); + } else { + INIT_INI_ARRAY(&ah->iniPcieSerdes, + ar9285PciePhy_clkreq_always_on_L1_9285, + ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285), 2); + } + } else if (AR_SREV_9280_20_OR_LATER(ah)) { + INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280_2, + ARRAY_SIZE(ar9280Modes_9280_2), 6); + INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280_2, + ARRAY_SIZE(ar9280Common_9280_2), 2); + + if (ah->config.pcie_clock_req) { + INIT_INI_ARRAY(&ah->iniPcieSerdes, + ar9280PciePhy_clkreq_off_L1_9280, + ARRAY_SIZE(ar9280PciePhy_clkreq_off_L1_9280),2); + } else { + INIT_INI_ARRAY(&ah->iniPcieSerdes, + ar9280PciePhy_clkreq_always_on_L1_9280, + ARRAY_SIZE(ar9280PciePhy_clkreq_always_on_L1_9280), 2); + } + INIT_INI_ARRAY(&ah->iniModesAdditional, + ar9280Modes_fast_clock_9280_2, + ARRAY_SIZE(ar9280Modes_fast_clock_9280_2), 3); + } else if (AR_SREV_9280_10_OR_LATER(ah)) { + INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280, + ARRAY_SIZE(ar9280Modes_9280), 6); + INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280, + ARRAY_SIZE(ar9280Common_9280), 2); + } else if (AR_SREV_9160_10_OR_LATER(ah)) { + INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9160, + ARRAY_SIZE(ar5416Modes_9160), 6); + INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9160, + ARRAY_SIZE(ar5416Common_9160), 2); + INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9160, + ARRAY_SIZE(ar5416Bank0_9160), 2); + INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain_9160, + ARRAY_SIZE(ar5416BB_RfGain_9160), 3); + INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1_9160, + ARRAY_SIZE(ar5416Bank1_9160), 2); + INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2_9160, + ARRAY_SIZE(ar5416Bank2_9160), 2); + INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3_9160, + ARRAY_SIZE(ar5416Bank3_9160), 3); + INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9160, + ARRAY_SIZE(ar5416Bank6_9160), 3); + INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9160, + ARRAY_SIZE(ar5416Bank6TPC_9160), 3); + INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7_9160, + ARRAY_SIZE(ar5416Bank7_9160), 2); + if (AR_SREV_9160_11(ah)) { + INIT_INI_ARRAY(&ah->iniAddac, + ar5416Addac_91601_1, + ARRAY_SIZE(ar5416Addac_91601_1), 2); + } else { + INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9160, + ARRAY_SIZE(ar5416Addac_9160), 2); + } + } else if (AR_SREV_9100_OR_LATER(ah)) { + INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9100, + ARRAY_SIZE(ar5416Modes_9100), 6); + INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9100, + ARRAY_SIZE(ar5416Common_9100), 2); + INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9100, + ARRAY_SIZE(ar5416Bank0_9100), 2); + INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain_9100, + ARRAY_SIZE(ar5416BB_RfGain_9100), 3); + INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1_9100, + ARRAY_SIZE(ar5416Bank1_9100), 2); + INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2_9100, + ARRAY_SIZE(ar5416Bank2_9100), 2); + INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3_9100, + ARRAY_SIZE(ar5416Bank3_9100), 3); + INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9100, + ARRAY_SIZE(ar5416Bank6_9100), 3); + INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9100, + ARRAY_SIZE(ar5416Bank6TPC_9100), 3); + INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7_9100, + ARRAY_SIZE(ar5416Bank7_9100), 2); + INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9100, + ARRAY_SIZE(ar5416Addac_9100), 2); + } else { + INIT_INI_ARRAY(&ah->iniModes, ar5416Modes, + ARRAY_SIZE(ar5416Modes), 6); + INIT_INI_ARRAY(&ah->iniCommon, ar5416Common, + ARRAY_SIZE(ar5416Common), 2); + INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0, + ARRAY_SIZE(ar5416Bank0), 2); + INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain, + ARRAY_SIZE(ar5416BB_RfGain), 3); + INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1, + ARRAY_SIZE(ar5416Bank1), 2); + INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2, + ARRAY_SIZE(ar5416Bank2), 2); + INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3, + ARRAY_SIZE(ar5416Bank3), 3); + INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6, + ARRAY_SIZE(ar5416Bank6), 3); + INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC, + ARRAY_SIZE(ar5416Bank6TPC), 3); + INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7, + ARRAY_SIZE(ar5416Bank7), 2); + INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac, + ARRAY_SIZE(ar5416Addac), 2); + } +} + +static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah) +{ + if (AR_SREV_9287_11_OR_LATER(ah)) + INIT_INI_ARRAY(&ah->iniModesRxGain, + ar9287Modes_rx_gain_9287_1_1, + ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_1), 6); + else if (AR_SREV_9287_10(ah)) + INIT_INI_ARRAY(&ah->iniModesRxGain, + ar9287Modes_rx_gain_9287_1_0, + ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_0), 6); + else if (AR_SREV_9280_20(ah)) + ath9k_hw_init_rxgain_ini(ah); + + if (AR_SREV_9287_11_OR_LATER(ah)) { + INIT_INI_ARRAY(&ah->iniModesTxGain, + ar9287Modes_tx_gain_9287_1_1, + ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_1), 6); + } else if (AR_SREV_9287_10(ah)) { + INIT_INI_ARRAY(&ah->iniModesTxGain, + ar9287Modes_tx_gain_9287_1_0, + ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_0), 6); + } else if (AR_SREV_9280_20(ah)) { + ath9k_hw_init_txgain_ini(ah); + } else if (AR_SREV_9285_12_OR_LATER(ah)) { + u32 txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE); + + /* txgain table */ + if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER) { + INIT_INI_ARRAY(&ah->iniModesTxGain, + ar9285Modes_high_power_tx_gain_9285_1_2, + ARRAY_SIZE(ar9285Modes_high_power_tx_gain_9285_1_2), 6); + } else { + INIT_INI_ARRAY(&ah->iniModesTxGain, + ar9285Modes_original_tx_gain_9285_1_2, + ARRAY_SIZE(ar9285Modes_original_tx_gain_9285_1_2), 6); + } + + } +} + +static void ath9k_hw_init_eeprom_fix(struct ath_hw *ah) +{ + u32 i, j; + + if (ah->hw_version.devid == AR9280_DEVID_PCI) { + + /* EEPROM Fixup */ + for (i = 0; i < ah->iniModes.ia_rows; i++) { + u32 reg = INI_RA(&ah->iniModes, i, 0); + + for (j = 1; j < ah->iniModes.ia_columns; j++) { + u32 val = INI_RA(&ah->iniModes, i, j); + + INI_RA(&ah->iniModes, i, j) = + ath9k_hw_ini_fixup(ah, + &ah->eeprom.def, + reg, val); + } + } + } +} + +int ath9k_hw_init(struct ath_hw *ah) +{ + struct ath_common *common = ath9k_hw_common(ah); + int r = 0; + + if (!ath9k_hw_devid_supported(ah->hw_version.devid)) { + ath_print(common, ATH_DBG_FATAL, + "Unsupported device ID: 0x%0x\n", + ah->hw_version.devid); + return -EOPNOTSUPP; + } + + ath9k_hw_init_defaults(ah); + ath9k_hw_init_config(ah); + + if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) { + ath_print(common, ATH_DBG_FATAL, + "Couldn't reset chip\n"); + return -EIO; + } + + if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) { + ath_print(common, ATH_DBG_FATAL, "Couldn't wakeup chip\n"); + return -EIO; + } + + if (ah->config.serialize_regmode == SER_REG_MODE_AUTO) { + if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI || + (AR_SREV_9280(ah) && !ah->is_pciexpress)) { + ah->config.serialize_regmode = + SER_REG_MODE_ON; + } else { + ah->config.serialize_regmode = + SER_REG_MODE_OFF; + } + } + + ath_print(common, ATH_DBG_RESET, "serialize_regmode is %d\n", + ah->config.serialize_regmode); + + if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) + ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD >> 1; + else + ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD; + + if (!ath9k_hw_macversion_supported(ah->hw_version.macVersion)) { + ath_print(common, ATH_DBG_FATAL, + "Mac Chip Rev 0x%02x.%x is not supported by " + "this driver\n", ah->hw_version.macVersion, + ah->hw_version.macRev); + return -EOPNOTSUPP; + } + + if (AR_SREV_9100(ah)) { + ah->iq_caldata.calData = &iq_cal_multi_sample; + ah->supp_cals = IQ_MISMATCH_CAL; + ah->is_pciexpress = false; + } + + if (AR_SREV_9271(ah)) + ah->is_pciexpress = false; + + ah->hw_version.phyRev = REG_READ(ah, AR_PHY_CHIP_ID); + + ath9k_hw_init_cal_settings(ah); + + ah->ani_function = ATH9K_ANI_ALL; + if (AR_SREV_9280_10_OR_LATER(ah)) { + ah->ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL; + ah->ath9k_hw_rf_set_freq = &ath9k_hw_ar9280_set_channel; + ah->ath9k_hw_spur_mitigate_freq = &ath9k_hw_9280_spur_mitigate; + } else { + ah->ath9k_hw_rf_set_freq = &ath9k_hw_set_channel; + ah->ath9k_hw_spur_mitigate_freq = &ath9k_hw_spur_mitigate; + } + + ath9k_hw_init_mode_regs(ah); + + if (ah->is_pciexpress) + ath9k_hw_configpcipowersave(ah, 0, 0); + else + ath9k_hw_disablepcie(ah); + + /* Support for Japan ch.14 (2484) spread */ + if (AR_SREV_9287_11_OR_LATER(ah)) { + INIT_INI_ARRAY(&ah->iniCckfirNormal, + ar9287Common_normal_cck_fir_coeff_92871_1, + ARRAY_SIZE(ar9287Common_normal_cck_fir_coeff_92871_1), 2); + INIT_INI_ARRAY(&ah->iniCckfirJapan2484, + ar9287Common_japan_2484_cck_fir_coeff_92871_1, + ARRAY_SIZE(ar9287Common_japan_2484_cck_fir_coeff_92871_1), 2); + } + + r = ath9k_hw_post_init(ah); + if (r) + return r; + + ath9k_hw_init_mode_gain_regs(ah); + r = ath9k_hw_fill_cap_info(ah); + if (r) + return r; + + ath9k_hw_init_eeprom_fix(ah); + + r = ath9k_hw_init_macaddr(ah); + if (r) { + ath_print(common, ATH_DBG_FATAL, + "Failed to initialize MAC address\n"); + return r; + } + + if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) + ah->tx_trig_level = (AR_FTRIG_256B >> AR_FTRIG_S); + else + ah->tx_trig_level = (AR_FTRIG_512B >> AR_FTRIG_S); + + ath9k_init_nfcal_hist_buffer(ah); + + common->state = ATH_HW_INITIALIZED; + + return 0; +} + +static void ath9k_hw_init_bb(struct ath_hw *ah, + struct ath9k_channel *chan) +{ + u32 synthDelay; + + synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY; + if (IS_CHAN_B(chan)) + synthDelay = (4 * synthDelay) / 22; + else + synthDelay /= 10; + + REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN); + + udelay(synthDelay + BASE_ACTIVATE_DELAY); +} + +static void ath9k_hw_init_qos(struct ath_hw *ah) +{ + REG_WRITE(ah, AR_MIC_QOS_CONTROL, 0x100aa); + REG_WRITE(ah, AR_MIC_QOS_SELECT, 0x3210); + + REG_WRITE(ah, AR_QOS_NO_ACK, + SM(2, AR_QOS_NO_ACK_TWO_BIT) | + SM(5, AR_QOS_NO_ACK_BIT_OFF) | + SM(0, AR_QOS_NO_ACK_BYTE_OFF)); + + REG_WRITE(ah, AR_TXOP_X, AR_TXOP_X_VAL); + REG_WRITE(ah, AR_TXOP_0_3, 0xFFFFFFFF); + REG_WRITE(ah, AR_TXOP_4_7, 0xFFFFFFFF); + REG_WRITE(ah, AR_TXOP_8_11, 0xFFFFFFFF); + REG_WRITE(ah, AR_TXOP_12_15, 0xFFFFFFFF); +} + +static void ath9k_hw_change_target_baud(struct ath_hw *ah, u32 freq, u32 baud) +{ + u32 lcr; + u32 baud_divider = freq * 1000 * 1000 / 16 / baud; + + lcr = REG_READ(ah , 0x5100c); + lcr |= 0x80; + + REG_WRITE(ah, 0x5100c, lcr); + REG_WRITE(ah, 0x51004, (baud_divider >> 8)); + REG_WRITE(ah, 0x51000, (baud_divider & 0xff)); + + lcr &= ~0x80; + REG_WRITE(ah, 0x5100c, lcr); +} + +static void ath9k_hw_init_pll(struct ath_hw *ah, + struct ath9k_channel *chan) +{ + u32 pll; + + if (AR_SREV_9100(ah)) { + if (chan && IS_CHAN_5GHZ(chan)) + pll = 0x1450; + else + pll = 0x1458; + } else { + if (AR_SREV_9280_10_OR_LATER(ah)) { + pll = SM(0x5, AR_RTC_9160_PLL_REFDIV); + + if (chan && IS_CHAN_HALF_RATE(chan)) + pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL); + else if (chan && IS_CHAN_QUARTER_RATE(chan)) + pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL); + + if (chan && IS_CHAN_5GHZ(chan)) { + pll |= SM(0x28, AR_RTC_9160_PLL_DIV); + + + if (AR_SREV_9280_20(ah)) { + if (((chan->channel % 20) == 0) + || ((chan->channel % 10) == 0)) + pll = 0x2850; + else + pll = 0x142c; + } + } else { + pll |= SM(0x2c, AR_RTC_9160_PLL_DIV); + } + + } else if (AR_SREV_9160_10_OR_LATER(ah)) { + + pll = SM(0x5, AR_RTC_9160_PLL_REFDIV); + + if (chan && IS_CHAN_HALF_RATE(chan)) + pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL); + else if (chan && IS_CHAN_QUARTER_RATE(chan)) + pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL); + + if (chan && IS_CHAN_5GHZ(chan)) + pll |= SM(0x50, AR_RTC_9160_PLL_DIV); + else + pll |= SM(0x58, AR_RTC_9160_PLL_DIV); + } else { + pll = AR_RTC_PLL_REFDIV_5 | AR_RTC_PLL_DIV2; + + if (chan && IS_CHAN_HALF_RATE(chan)) + pll |= SM(0x1, AR_RTC_PLL_CLKSEL); + else if (chan && IS_CHAN_QUARTER_RATE(chan)) + pll |= SM(0x2, AR_RTC_PLL_CLKSEL); + + if (chan && IS_CHAN_5GHZ(chan)) + pll |= SM(0xa, AR_RTC_PLL_DIV); + else + pll |= SM(0xb, AR_RTC_PLL_DIV); + } + } + REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll); + + /* Switch the core clock for ar9271 to 117Mhz */ + if (AR_SREV_9271(ah)) { + if ((pll == 0x142c) || (pll == 0x2850) ) { + udelay(500); + /* set CLKOBS to output AHB clock */ + REG_WRITE(ah, 0x7020, 0xe); + /* + * 0x304: 117Mhz, ahb_ratio: 1x1 + * 0x306: 40Mhz, ahb_ratio: 1x1 + */ + REG_WRITE(ah, 0x50040, 0x304); + /* + * makes adjustments for the baud dividor to keep the + * targetted baud rate based on the used core clock. + */ + ath9k_hw_change_target_baud(ah, AR9271_CORE_CLOCK, + AR9271_TARGET_BAUD_RATE); + } + } + + udelay(RTC_PLL_SETTLE_DELAY); + + REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK); +} + +static void ath9k_hw_init_chain_masks(struct ath_hw *ah) +{ + int rx_chainmask, tx_chainmask; + + rx_chainmask = ah->rxchainmask; + tx_chainmask = ah->txchainmask; + + switch (rx_chainmask) { + case 0x5: + REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP, + AR_PHY_SWAP_ALT_CHAIN); + case 0x3: + if (ah->hw_version.macVersion == AR_SREV_REVISION_5416_10) { + REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7); + REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7); + break; + } + case 0x1: + case 0x2: + case 0x7: + REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask); + REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask); + break; + default: + break; + } + + REG_WRITE(ah, AR_SELFGEN_MASK, tx_chainmask); + if (tx_chainmask == 0x5) { + REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP, + AR_PHY_SWAP_ALT_CHAIN); + } + if (AR_SREV_9100(ah)) + REG_WRITE(ah, AR_PHY_ANALOG_SWAP, + REG_READ(ah, AR_PHY_ANALOG_SWAP) | 0x00000001); +} + +static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah, + enum nl80211_iftype opmode) +{ + ah->mask_reg = AR_IMR_TXERR | + AR_IMR_TXURN | + AR_IMR_RXERR | + AR_IMR_RXORN | + AR_IMR_BCNMISC; + + if (ah->config.rx_intr_mitigation) + ah->mask_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR; + else + ah->mask_reg |= AR_IMR_RXOK; + + ah->mask_reg |= AR_IMR_TXOK; + + if (opmode == NL80211_IFTYPE_AP) + ah->mask_reg |= AR_IMR_MIB; + + REG_WRITE(ah, AR_IMR, ah->mask_reg); + REG_WRITE(ah, AR_IMR_S2, REG_READ(ah, AR_IMR_S2) | AR_IMR_S2_GTT); + + if (!AR_SREV_9100(ah)) { + REG_WRITE(ah, AR_INTR_SYNC_CAUSE, 0xFFFFFFFF); + REG_WRITE(ah, AR_INTR_SYNC_ENABLE, AR_INTR_SYNC_DEFAULT); + REG_WRITE(ah, AR_INTR_SYNC_MASK, 0); + } +} + +static void ath9k_hw_setslottime(struct ath_hw *ah, u32 us) +{ + u32 val = ath9k_hw_mac_to_clks(ah, us); + val = min(val, (u32) 0xFFFF); + REG_WRITE(ah, AR_D_GBL_IFS_SLOT, val); +} + +static void ath9k_hw_set_ack_timeout(struct ath_hw *ah, u32 us) +{ + u32 val = ath9k_hw_mac_to_clks(ah, us); + val = min(val, (u32) MS(0xFFFFFFFF, AR_TIME_OUT_ACK)); + REG_RMW_FIELD(ah, AR_TIME_OUT, AR_TIME_OUT_ACK, val); +} + +static void ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us) +{ + u32 val = ath9k_hw_mac_to_clks(ah, us); + val = min(val, (u32) MS(0xFFFFFFFF, AR_TIME_OUT_CTS)); + REG_RMW_FIELD(ah, AR_TIME_OUT, AR_TIME_OUT_CTS, val); +} + +static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu) +{ + if (tu > 0xFFFF) { + ath_print(ath9k_hw_common(ah), ATH_DBG_XMIT, + "bad global tx timeout %u\n", tu); + ah->globaltxtimeout = (u32) -1; + return false; + } else { + REG_RMW_FIELD(ah, AR_GTXTO, AR_GTXTO_TIMEOUT_LIMIT, tu); + ah->globaltxtimeout = tu; + return true; + } +} + +void ath9k_hw_init_global_settings(struct ath_hw *ah) +{ + struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf; + int acktimeout; + int slottime; + int sifstime; + + ath_print(ath9k_hw_common(ah), ATH_DBG_RESET, "ah->misc_mode 0x%x\n", + ah->misc_mode); + + if (ah->misc_mode != 0) + REG_WRITE(ah, AR_PCU_MISC, + REG_READ(ah, AR_PCU_MISC) | ah->misc_mode); + + if (conf->channel && conf->channel->band == IEEE80211_BAND_5GHZ) + sifstime = 16; + else + sifstime = 10; + + /* As defined by IEEE 802.11-2007 17.3.8.6 */ + slottime = ah->slottime + 3 * ah->coverage_class; + acktimeout = slottime + sifstime; + + /* + * Workaround for early ACK timeouts, add an offset to match the + * initval's 64us ack timeout value. + * This was initially only meant to work around an issue with delayed + * BA frames in some implementations, but it has been found to fix ACK + * timeout issues in other cases as well. + */ + if (conf->channel && conf->channel->band == IEEE80211_BAND_2GHZ) + acktimeout += 64 - sifstime - ah->slottime; + + ath9k_hw_setslottime(ah, slottime); + ath9k_hw_set_ack_timeout(ah, acktimeout); + ath9k_hw_set_cts_timeout(ah, acktimeout); + if (ah->globaltxtimeout != (u32) -1) + ath9k_hw_set_global_txtimeout(ah, ah->globaltxtimeout); +} +EXPORT_SYMBOL(ath9k_hw_init_global_settings); + +void ath9k_hw_deinit(struct ath_hw *ah) +{ + struct ath_common *common = ath9k_hw_common(ah); + + if (common->state <= ATH_HW_INITIALIZED) + goto free_hw; + + if (!AR_SREV_9100(ah)) + ath9k_hw_ani_disable(ah); + + ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP); + +free_hw: + if (!AR_SREV_9280_10_OR_LATER(ah)) + ath9k_hw_rf_free_ext_banks(ah); + kfree(ah); + ah = NULL; +} +EXPORT_SYMBOL(ath9k_hw_deinit); + +/*******/ +/* INI */ +/*******/ + +static void ath9k_hw_override_ini(struct ath_hw *ah, + struct ath9k_channel *chan) +{ + u32 val; + + if (AR_SREV_9271(ah)) { + /* + * Enable spectral scan to solution for issues with stuck + * beacons on AR9271 1.0. The beacon stuck issue is not seeon on + * AR9271 1.1 + */ + if (AR_SREV_9271_10(ah)) { + val = REG_READ(ah, AR_PHY_SPECTRAL_SCAN) | + AR_PHY_SPECTRAL_SCAN_ENABLE; + REG_WRITE(ah, AR_PHY_SPECTRAL_SCAN, val); + } + else if (AR_SREV_9271_11(ah)) + /* + * change AR_PHY_RF_CTL3 setting to fix MAC issue + * present on AR9271 1.1 + */ + REG_WRITE(ah, AR_PHY_RF_CTL3, 0x3a020001); + return; + } + + /* + * Set the RX_ABORT and RX_DIS and clear if off only after + * RXE is set for MAC. This prevents frames with corrupted + * descriptor status. + */ + REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); + + if (AR_SREV_9280_10_OR_LATER(ah)) { + val = REG_READ(ah, AR_PCU_MISC_MODE2) & + (~AR_PCU_MISC_MODE2_HWWAR1); + + if (AR_SREV_9287_10_OR_LATER(ah)) + val = val & (~AR_PCU_MISC_MODE2_HWWAR2); + + REG_WRITE(ah, AR_PCU_MISC_MODE2, val); + } + + if (!AR_SREV_5416_20_OR_LATER(ah) || + AR_SREV_9280_10_OR_LATER(ah)) + return; + /* + * Disable BB clock gating + * Necessary to avoid issues on AR5416 2.0 + */ + REG_WRITE(ah, 0x9800 + (651 << 2), 0x11); + + /* + * Disable RIFS search on some chips to avoid baseband + * hang issues. + */ + if (AR_SREV_9100(ah) || AR_SREV_9160(ah)) { + val = REG_READ(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS); + val &= ~AR_PHY_RIFS_INIT_DELAY; + REG_WRITE(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS, val); + } +} + +static u32 ath9k_hw_def_ini_fixup(struct ath_hw *ah, + struct ar5416_eeprom_def *pEepData, + u32 reg, u32 value) +{ + struct base_eep_header *pBase = &(pEepData->baseEepHeader); + struct ath_common *common = ath9k_hw_common(ah); + + switch (ah->hw_version.devid) { + case AR9280_DEVID_PCI: + if (reg == 0x7894) { + ath_print(common, ATH_DBG_EEPROM, + "ini VAL: %x EEPROM: %x\n", value, + (pBase->version & 0xff)); + + if ((pBase->version & 0xff) > 0x0a) { + ath_print(common, ATH_DBG_EEPROM, + "PWDCLKIND: %d\n", + pBase->pwdclkind); + value &= ~AR_AN_TOP2_PWDCLKIND; + value |= AR_AN_TOP2_PWDCLKIND & + (pBase->pwdclkind << AR_AN_TOP2_PWDCLKIND_S); + } else { + ath_print(common, ATH_DBG_EEPROM, + "PWDCLKIND Earlier Rev\n"); + } + + ath_print(common, ATH_DBG_EEPROM, + "final ini VAL: %x\n", value); + } + break; + } + + return value; +} + +static u32 ath9k_hw_ini_fixup(struct ath_hw *ah, + struct ar5416_eeprom_def *pEepData, + u32 reg, u32 value) +{ + if (ah->eep_map == EEP_MAP_4KBITS) + return value; + else + return ath9k_hw_def_ini_fixup(ah, pEepData, reg, value); +} + +static void ath9k_olc_init(struct ath_hw *ah) +{ + u32 i; + + if (OLC_FOR_AR9287_10_LATER) { + REG_SET_BIT(ah, AR_PHY_TX_PWRCTRL9, + AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL); + ath9k_hw_analog_shift_rmw(ah, AR9287_AN_TXPC0, + AR9287_AN_TXPC0_TXPCMODE, + AR9287_AN_TXPC0_TXPCMODE_S, + AR9287_AN_TXPC0_TXPCMODE_TEMPSENSE); + udelay(100); + } else { + for (i = 0; i < AR9280_TX_GAIN_TABLE_SIZE; i++) + ah->originalGain[i] = + MS(REG_READ(ah, AR_PHY_TX_GAIN_TBL1 + i * 4), + AR_PHY_TX_GAIN); + ah->PDADCdelta = 0; + } +} + +static u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, + struct ath9k_channel *chan) +{ + u32 ctl = ath_regd_get_band_ctl(reg, chan->chan->band); + + if (IS_CHAN_B(chan)) + ctl |= CTL_11B; + else if (IS_CHAN_G(chan)) + ctl |= CTL_11G; + else + ctl |= CTL_11A; + + return ctl; +} + +static int ath9k_hw_process_ini(struct ath_hw *ah, + struct ath9k_channel *chan) +{ + struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); + int i, regWrites = 0; + struct ieee80211_channel *channel = chan->chan; + u32 modesIndex, freqIndex; + + switch (chan->chanmode) { + case CHANNEL_A: + case CHANNEL_A_HT20: + modesIndex = 1; + freqIndex = 1; + break; + case CHANNEL_A_HT40PLUS: + case CHANNEL_A_HT40MINUS: + modesIndex = 2; + freqIndex = 1; + break; + case CHANNEL_G: + case CHANNEL_G_HT20: + case CHANNEL_B: + modesIndex = 4; + freqIndex = 2; + break; + case CHANNEL_G_HT40PLUS: + case CHANNEL_G_HT40MINUS: + modesIndex = 3; + freqIndex = 2; + break; + + default: + return -EINVAL; + } + + REG_WRITE(ah, AR_PHY(0), 0x00000007); + REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_EXTERNAL_RADIO); + ah->eep_ops->set_addac(ah, chan); + + if (AR_SREV_5416_22_OR_LATER(ah)) { + REG_WRITE_ARRAY(&ah->iniAddac, 1, regWrites); + } else { + struct ar5416IniArray temp; + u32 addacSize = + sizeof(u32) * ah->iniAddac.ia_rows * + ah->iniAddac.ia_columns; + + memcpy(ah->addac5416_21, + ah->iniAddac.ia_array, addacSize); + + (ah->addac5416_21)[31 * ah->iniAddac.ia_columns + 1] = 0; + + temp.ia_array = ah->addac5416_21; + temp.ia_columns = ah->iniAddac.ia_columns; + temp.ia_rows = ah->iniAddac.ia_rows; + REG_WRITE_ARRAY(&temp, 1, regWrites); + } + + REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_INTERNAL_ADDAC); + + for (i = 0; i < ah->iniModes.ia_rows; i++) { + u32 reg = INI_RA(&ah->iniModes, i, 0); + u32 val = INI_RA(&ah->iniModes, i, modesIndex); + + REG_WRITE(ah, reg, val); + + if (reg >= 0x7800 && reg < 0x78a0 + && ah->config.analog_shiftreg) { + udelay(100); + } + + DO_DELAY(regWrites); + } + + if (AR_SREV_9280(ah) || AR_SREV_9287_10_OR_LATER(ah)) + REG_WRITE_ARRAY(&ah->iniModesRxGain, modesIndex, regWrites); + + if (AR_SREV_9280(ah) || AR_SREV_9285_12_OR_LATER(ah) || + AR_SREV_9287_10_OR_LATER(ah)) + REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites); + + for (i = 0; i < ah->iniCommon.ia_rows; i++) { + u32 reg = INI_RA(&ah->iniCommon, i, 0); + u32 val = INI_RA(&ah->iniCommon, i, 1); + + REG_WRITE(ah, reg, val); + + if (reg >= 0x7800 && reg < 0x78a0 + && ah->config.analog_shiftreg) { + udelay(100); + } + + DO_DELAY(regWrites); + } + + ath9k_hw_write_regs(ah, freqIndex, regWrites); + + if (AR_SREV_9271_10(ah)) + REG_WRITE_ARRAY(&ah->iniModes_9271_1_0_only, + modesIndex, regWrites); + + if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan)) { + REG_WRITE_ARRAY(&ah->iniModesAdditional, modesIndex, + regWrites); + } + + ath9k_hw_override_ini(ah, chan); + ath9k_hw_set_regs(ah, chan); + ath9k_hw_init_chain_masks(ah); + + if (OLC_FOR_AR9280_20_LATER) + ath9k_olc_init(ah); + + ah->eep_ops->set_txpower(ah, chan, + ath9k_regd_get_ctl(regulatory, chan), + channel->max_antenna_gain * 2, + channel->max_power * 2, + min((u32) MAX_RATE_POWER, + (u32) regulatory->power_limit)); + + if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) { + ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, + "ar5416SetRfRegs failed\n"); + return -EIO; + } + + return 0; +} + +/****************************************/ +/* Reset and Channel Switching Routines */ +/****************************************/ + +static void ath9k_hw_set_rfmode(struct ath_hw *ah, struct ath9k_channel *chan) +{ + u32 rfMode = 0; + + if (chan == NULL) + return; + + rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan)) + ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM; + + if (!AR_SREV_9280_10_OR_LATER(ah)) + rfMode |= (IS_CHAN_5GHZ(chan)) ? + AR_PHY_MODE_RF5GHZ : AR_PHY_MODE_RF2GHZ; + + if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan)) + rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE); + + REG_WRITE(ah, AR_PHY_MODE, rfMode); +} + +static void ath9k_hw_mark_phy_inactive(struct ath_hw *ah) +{ + REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS); +} + +static inline void ath9k_hw_set_dma(struct ath_hw *ah) +{ + u32 regval; + + /* + * set AHB_MODE not to do cacheline prefetches + */ + regval = REG_READ(ah, AR_AHB_MODE); + REG_WRITE(ah, AR_AHB_MODE, regval | AR_AHB_PREFETCH_RD_EN); + + /* + * let mac dma reads be in 128 byte chunks + */ + regval = REG_READ(ah, AR_TXCFG) & ~AR_TXCFG_DMASZ_MASK; + REG_WRITE(ah, AR_TXCFG, regval | AR_TXCFG_DMASZ_128B); + + /* + * Restore TX Trigger Level to its pre-reset value. + * The initial value depends on whether aggregation is enabled, and is + * adjusted whenever underruns are detected. + */ + REG_RMW_FIELD(ah, AR_TXCFG, AR_FTRIG, ah->tx_trig_level); + + /* + * let mac dma writes be in 128 byte chunks + */ + regval = REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_DMASZ_MASK; + REG_WRITE(ah, AR_RXCFG, regval | AR_RXCFG_DMASZ_128B); + + /* + * Setup receive FIFO threshold to hold off TX activities + */ + REG_WRITE(ah, AR_RXFIFO_CFG, 0x200); + + /* + * reduce the number of usable entries in PCU TXBUF to avoid + * wrap around issues. + */ + if (AR_SREV_9285(ah)) { + /* For AR9285 the number of Fifos are reduced to half. + * So set the usable tx buf size also to half to + * avoid data/delimiter underruns + */ + REG_WRITE(ah, AR_PCU_TXBUF_CTRL, + AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE); + } else if (!AR_SREV_9271(ah)) { + REG_WRITE(ah, AR_PCU_TXBUF_CTRL, + AR_PCU_TXBUF_CTRL_USABLE_SIZE); + } +} + +static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode) +{ + u32 val; + + val = REG_READ(ah, AR_STA_ID1); + val &= ~(AR_STA_ID1_STA_AP | AR_STA_ID1_ADHOC); + switch (opmode) { + case NL80211_IFTYPE_AP: + REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_STA_AP + | AR_STA_ID1_KSRCH_MODE); + REG_CLR_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION); + break; + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_MESH_POINT: + REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_ADHOC + | AR_STA_ID1_KSRCH_MODE); + REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION); + break; + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_MONITOR: + REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE); + break; + } +} + +static inline void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, + u32 coef_scaled, + u32 *coef_mantissa, + u32 *coef_exponent) +{ + u32 coef_exp, coef_man; + + for (coef_exp = 31; coef_exp > 0; coef_exp--) + if ((coef_scaled >> coef_exp) & 0x1) + break; + + coef_exp = 14 - (coef_exp - COEF_SCALE_S); + + coef_man = coef_scaled + (1 << (COEF_SCALE_S - coef_exp - 1)); + + *coef_mantissa = coef_man >> (COEF_SCALE_S - coef_exp); + *coef_exponent = coef_exp - 16; +} + +static void ath9k_hw_set_delta_slope(struct ath_hw *ah, + struct ath9k_channel *chan) +{ + u32 coef_scaled, ds_coef_exp, ds_coef_man; + u32 clockMhzScaled = 0x64000000; + struct chan_centers centers; + + if (IS_CHAN_HALF_RATE(chan)) + clockMhzScaled = clockMhzScaled >> 1; + else if (IS_CHAN_QUARTER_RATE(chan)) + clockMhzScaled = clockMhzScaled >> 2; + + ath9k_hw_get_channel_centers(ah, chan, ¢ers); + coef_scaled = clockMhzScaled / centers.synth_center; + + ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man, + &ds_coef_exp); + + REG_RMW_FIELD(ah, AR_PHY_TIMING3, + AR_PHY_TIMING3_DSC_MAN, ds_coef_man); + REG_RMW_FIELD(ah, AR_PHY_TIMING3, + AR_PHY_TIMING3_DSC_EXP, ds_coef_exp); + + coef_scaled = (9 * coef_scaled) / 10; + + ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man, + &ds_coef_exp); + + REG_RMW_FIELD(ah, AR_PHY_HALFGI, + AR_PHY_HALFGI_DSC_MAN, ds_coef_man); + REG_RMW_FIELD(ah, AR_PHY_HALFGI, + AR_PHY_HALFGI_DSC_EXP, ds_coef_exp); +} + +static bool ath9k_hw_set_reset(struct ath_hw *ah, int type) +{ + u32 rst_flags; + u32 tmpReg; + + if (AR_SREV_9100(ah)) { + u32 val = REG_READ(ah, AR_RTC_DERIVED_CLK); + val &= ~AR_RTC_DERIVED_CLK_PERIOD; + val |= SM(1, AR_RTC_DERIVED_CLK_PERIOD); + REG_WRITE(ah, AR_RTC_DERIVED_CLK, val); + (void)REG_READ(ah, AR_RTC_DERIVED_CLK); + } + + REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN | + AR_RTC_FORCE_WAKE_ON_INT); + + if (AR_SREV_9100(ah)) { + rst_flags = AR_RTC_RC_MAC_WARM | AR_RTC_RC_MAC_COLD | + AR_RTC_RC_COLD_RESET | AR_RTC_RC_WARM_RESET; + } else { + tmpReg = REG_READ(ah, AR_INTR_SYNC_CAUSE); + if (tmpReg & + (AR_INTR_SYNC_LOCAL_TIMEOUT | + AR_INTR_SYNC_RADM_CPL_TIMEOUT)) { + REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0); + REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF); + } else { + REG_WRITE(ah, AR_RC, AR_RC_AHB); + } + + rst_flags = AR_RTC_RC_MAC_WARM; + if (type == ATH9K_RESET_COLD) + rst_flags |= AR_RTC_RC_MAC_COLD; + } + + REG_WRITE(ah, AR_RTC_RC, rst_flags); + udelay(50); + + REG_WRITE(ah, AR_RTC_RC, 0); + if (!ath9k_hw_wait(ah, AR_RTC_RC, AR_RTC_RC_M, 0, AH_WAIT_TIMEOUT)) { + ath_print(ath9k_hw_common(ah), ATH_DBG_RESET, + "RTC stuck in MAC reset\n"); + return false; + } + + if (!AR_SREV_9100(ah)) + REG_WRITE(ah, AR_RC, 0); + + if (AR_SREV_9100(ah)) + udelay(50); + + return true; +} + +static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah) +{ + REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN | + AR_RTC_FORCE_WAKE_ON_INT); + + if (!AR_SREV_9100(ah)) + REG_WRITE(ah, AR_RC, AR_RC_AHB); + + REG_WRITE(ah, AR_RTC_RESET, 0); + udelay(2); + + if (!AR_SREV_9100(ah)) + REG_WRITE(ah, AR_RC, 0); + + REG_WRITE(ah, AR_RTC_RESET, 1); + + if (!ath9k_hw_wait(ah, + AR_RTC_STATUS, + AR_RTC_STATUS_M, + AR_RTC_STATUS_ON, + AH_WAIT_TIMEOUT)) { + ath_print(ath9k_hw_common(ah), ATH_DBG_RESET, + "RTC not waking up\n"); + return false; + } + + ath9k_hw_read_revisions(ah); + + return ath9k_hw_set_reset(ah, ATH9K_RESET_WARM); +} + +static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type) +{ + REG_WRITE(ah, AR_RTC_FORCE_WAKE, + AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT); + + switch (type) { + case ATH9K_RESET_POWER_ON: + return ath9k_hw_set_reset_power_on(ah); + case ATH9K_RESET_WARM: + case ATH9K_RESET_COLD: + return ath9k_hw_set_reset(ah, type); + default: + return false; + } +} + +static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan) +{ + u32 phymode; + u32 enableDacFifo = 0; + + if (AR_SREV_9285_10_OR_LATER(ah)) + enableDacFifo = (REG_READ(ah, AR_PHY_TURBO) & + AR_PHY_FC_ENABLE_DAC_FIFO); + + phymode = AR_PHY_FC_HT_EN | AR_PHY_FC_SHORT_GI_40 + | AR_PHY_FC_SINGLE_HT_LTF1 | AR_PHY_FC_WALSH | enableDacFifo; + + if (IS_CHAN_HT40(chan)) { + phymode |= AR_PHY_FC_DYN2040_EN; + + if ((chan->chanmode == CHANNEL_A_HT40PLUS) || + (chan->chanmode == CHANNEL_G_HT40PLUS)) + phymode |= AR_PHY_FC_DYN2040_PRI_CH; + + } + REG_WRITE(ah, AR_PHY_TURBO, phymode); + + ath9k_hw_set11nmac2040(ah); + + REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S); + REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S); +} + +static bool ath9k_hw_chip_reset(struct ath_hw *ah, + struct ath9k_channel *chan) +{ + if (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)) { + if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) + return false; + } else if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM)) + return false; + + if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) + return false; + + ah->chip_fullsleep = false; + ath9k_hw_init_pll(ah, chan); + ath9k_hw_set_rfmode(ah, chan); + + return true; +} + +static bool ath9k_hw_channel_change(struct ath_hw *ah, + struct ath9k_channel *chan) +{ + struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); + struct ath_common *common = ath9k_hw_common(ah); + struct ieee80211_channel *channel = chan->chan; + u32 synthDelay, qnum; + int r; + + for (qnum = 0; qnum < AR_NUM_QCU; qnum++) { + if (ath9k_hw_numtxpending(ah, qnum)) { + ath_print(common, ATH_DBG_QUEUE, + "Transmit frames pending on " + "queue %d\n", qnum); + return false; + } + } + + REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN); + if (!ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN, + AR_PHY_RFBUS_GRANT_EN, AH_WAIT_TIMEOUT)) { + ath_print(common, ATH_DBG_FATAL, + "Could not kill baseband RX\n"); + return false; + } + + ath9k_hw_set_regs(ah, chan); + + r = ah->ath9k_hw_rf_set_freq(ah, chan); + if (r) { + ath_print(common, ATH_DBG_FATAL, + "Failed to set channel\n"); + return false; + } + + ah->eep_ops->set_txpower(ah, chan, + ath9k_regd_get_ctl(regulatory, chan), + channel->max_antenna_gain * 2, + channel->max_power * 2, + min((u32) MAX_RATE_POWER, + (u32) regulatory->power_limit)); + + synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY; + if (IS_CHAN_B(chan)) + synthDelay = (4 * synthDelay) / 22; + else + synthDelay /= 10; + + udelay(synthDelay + BASE_ACTIVATE_DELAY); + + REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0); + + if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan)) + ath9k_hw_set_delta_slope(ah, chan); + + ah->ath9k_hw_spur_mitigate_freq(ah, chan); + + if (!chan->oneTimeCalsDone) + chan->oneTimeCalsDone = true; + + return true; +} + +static void ath9k_enable_rfkill(struct ath_hw *ah) +{ + REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, + AR_GPIO_INPUT_EN_VAL_RFSILENT_BB); + + REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2, + AR_GPIO_INPUT_MUX2_RFSILENT); + + ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio); + REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB); +} + +int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, + bool bChannelChange) +{ + struct ath_common *common = ath9k_hw_common(ah); + u32 saveLedState; + struct ath9k_channel *curchan = ah->curchan; + u32 saveDefAntenna; + u32 macStaId1; + u64 tsf = 0; + int i, rx_chainmask, r; + + ah->txchainmask = common->tx_chainmask; + ah->rxchainmask = common->rx_chainmask; + + if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) + return -EIO; + + if (curchan && !ah->chip_fullsleep) + ath9k_hw_getnf(ah, curchan); + + if (bChannelChange && + (ah->chip_fullsleep != true) && + (ah->curchan != NULL) && + (chan->channel != ah->curchan->channel) && + ((chan->channelFlags & CHANNEL_ALL) == + (ah->curchan->channelFlags & CHANNEL_ALL)) && + !(AR_SREV_9280(ah) || IS_CHAN_A_5MHZ_SPACED(chan) || + IS_CHAN_A_5MHZ_SPACED(ah->curchan))) { + + if (ath9k_hw_channel_change(ah, chan)) { + ath9k_hw_loadnf(ah, ah->curchan); + ath9k_hw_start_nfcal(ah); + return 0; + } + } + + saveDefAntenna = REG_READ(ah, AR_DEF_ANTENNA); + if (saveDefAntenna == 0) + saveDefAntenna = 1; + + macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B; + + /* For chips on which RTC reset is done, save TSF before it gets cleared */ + if (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)) + tsf = ath9k_hw_gettsf64(ah); + + saveLedState = REG_READ(ah, AR_CFG_LED) & + (AR_CFG_LED_ASSOC_CTL | AR_CFG_LED_MODE_SEL | + AR_CFG_LED_BLINK_THRESH_SEL | AR_CFG_LED_BLINK_SLOW); + + ath9k_hw_mark_phy_inactive(ah); + + if (AR_SREV_9271(ah) && ah->htc_reset_init) { + REG_WRITE(ah, + AR9271_RESET_POWER_DOWN_CONTROL, + AR9271_RADIO_RF_RST); + udelay(50); + } + + if (!ath9k_hw_chip_reset(ah, chan)) { + ath_print(common, ATH_DBG_FATAL, "Chip reset failed\n"); + return -EINVAL; + } + + if (AR_SREV_9271(ah) && ah->htc_reset_init) { + ah->htc_reset_init = false; + REG_WRITE(ah, + AR9271_RESET_POWER_DOWN_CONTROL, + AR9271_GATE_MAC_CTL); + udelay(50); + } + + /* Restore TSF */ + if (tsf && AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)) + ath9k_hw_settsf64(ah, tsf); + + if (AR_SREV_9280_10_OR_LATER(ah)) + REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE); + + if (AR_SREV_9287_12_OR_LATER(ah)) { + /* Enable ASYNC FIFO */ + REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3, + AR_MAC_PCU_ASYNC_FIFO_REG3_DATAPATH_SEL); + REG_SET_BIT(ah, AR_PHY_MODE, AR_PHY_MODE_ASYNCFIFO); + REG_CLR_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3, + AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET); + REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3, + AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET); + } + r = ath9k_hw_process_ini(ah, chan); + if (r) + return r; + + /* Setup MFP options for CCMP */ + if (AR_SREV_9280_20_OR_LATER(ah)) { + /* Mask Retry(b11), PwrMgt(b12), MoreData(b13) to 0 in mgmt + * frames when constructing CCMP AAD. */ + REG_RMW_FIELD(ah, AR_AES_MUTE_MASK1, AR_AES_MUTE_MASK1_FC_MGMT, + 0xc7ff); + ah->sw_mgmt_crypto = false; + } else if (AR_SREV_9160_10_OR_LATER(ah)) { + /* Disable hardware crypto for management frames */ + REG_CLR_BIT(ah, AR_PCU_MISC_MODE2, + AR_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE); + REG_SET_BIT(ah, AR_PCU_MISC_MODE2, + AR_PCU_MISC_MODE2_NO_CRYPTO_FOR_NON_DATA_PKT); + ah->sw_mgmt_crypto = true; + } else + ah->sw_mgmt_crypto = true; + + if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan)) + ath9k_hw_set_delta_slope(ah, chan); + + ah->ath9k_hw_spur_mitigate_freq(ah, chan); + ah->eep_ops->set_board_values(ah, chan); + + REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr)); + REG_WRITE(ah, AR_STA_ID1, get_unaligned_le16(common->macaddr + 4) + | macStaId1 + | AR_STA_ID1_RTS_USE_DEF + | (ah->config. + ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0) + | ah->sta_id1_defaults); + ath9k_hw_set_operating_mode(ah, ah->opmode); + + ath_hw_setbssidmask(common); + + REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna); + + ath9k_hw_write_associd(ah); + + REG_WRITE(ah, AR_ISR, ~0); + + REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR); + + r = ah->ath9k_hw_rf_set_freq(ah, chan); + if (r) + return r; + + for (i = 0; i < AR_NUM_DCU; i++) + REG_WRITE(ah, AR_DQCUMASK(i), 1 << i); + + ah->intr_txqs = 0; + for (i = 0; i < ah->caps.total_queues; i++) + ath9k_hw_resettxqueue(ah, i); + + ath9k_hw_init_interrupt_masks(ah, ah->opmode); + ath9k_hw_init_qos(ah); + + if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT) + ath9k_enable_rfkill(ah); + + ath9k_hw_init_global_settings(ah); + + if (AR_SREV_9287_12_OR_LATER(ah)) { + REG_WRITE(ah, AR_D_GBL_IFS_SIFS, + AR_D_GBL_IFS_SIFS_ASYNC_FIFO_DUR); + REG_WRITE(ah, AR_D_GBL_IFS_SLOT, + AR_D_GBL_IFS_SLOT_ASYNC_FIFO_DUR); + REG_WRITE(ah, AR_D_GBL_IFS_EIFS, + AR_D_GBL_IFS_EIFS_ASYNC_FIFO_DUR); + + REG_WRITE(ah, AR_TIME_OUT, AR_TIME_OUT_ACK_CTS_ASYNC_FIFO_DUR); + REG_WRITE(ah, AR_USEC, AR_USEC_ASYNC_FIFO_DUR); + + REG_SET_BIT(ah, AR_MAC_PCU_LOGIC_ANALYZER, + AR_MAC_PCU_LOGIC_ANALYZER_DISBUG20768); + REG_RMW_FIELD(ah, AR_AHB_MODE, AR_AHB_CUSTOM_BURST_EN, + AR_AHB_CUSTOM_BURST_ASYNC_FIFO_VAL); + } + if (AR_SREV_9287_12_OR_LATER(ah)) { + REG_SET_BIT(ah, AR_PCU_MISC_MODE2, + AR_PCU_MISC_MODE2_ENABLE_AGGWEP); + } + + REG_WRITE(ah, AR_STA_ID1, + REG_READ(ah, AR_STA_ID1) | AR_STA_ID1_PRESERVE_SEQNUM); + + ath9k_hw_set_dma(ah); + + REG_WRITE(ah, AR_OBS, 8); + + if (ah->config.rx_intr_mitigation) { + REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500); + REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, 2000); + } + + ath9k_hw_init_bb(ah, chan); + + if (!ath9k_hw_init_cal(ah, chan)) + return -EIO; + + rx_chainmask = ah->rxchainmask; + if ((rx_chainmask == 0x5) || (rx_chainmask == 0x3)) { + REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask); + REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask); + } + + REG_WRITE(ah, AR_CFG_LED, saveLedState | AR_CFG_SCLK_32KHZ); + + /* + * For big endian systems turn on swapping for descriptors + */ + if (AR_SREV_9100(ah)) { + u32 mask; + mask = REG_READ(ah, AR_CFG); + if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) { + ath_print(common, ATH_DBG_RESET, + "CFG Byte Swap Set 0x%x\n", mask); + } else { + mask = + INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB; + REG_WRITE(ah, AR_CFG, mask); + ath_print(common, ATH_DBG_RESET, + "Setting CFG 0x%x\n", REG_READ(ah, AR_CFG)); + } + } else { + /* Configure AR9271 target WLAN */ + if (AR_SREV_9271(ah)) + REG_WRITE(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB); +#ifdef __BIG_ENDIAN + else + REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD); +#endif + } + + if (ah->btcoex_hw.enabled) + ath9k_hw_btcoex_enable(ah); + + return 0; +} +EXPORT_SYMBOL(ath9k_hw_reset); + +/************************/ +/* Key Cache Management */ +/************************/ + +bool ath9k_hw_keyreset(struct ath_hw *ah, u16 entry) +{ + u32 keyType; + + if (entry >= ah->caps.keycache_size) { + ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, + "keychache entry %u out of range\n", entry); + return false; + } + + keyType = REG_READ(ah, AR_KEYTABLE_TYPE(entry)); + + REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), 0); + REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), 0); + REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), 0); + REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), 0); + REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), 0); + REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), AR_KEYTABLE_TYPE_CLR); + REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), 0); + REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), 0); + + if (keyType == AR_KEYTABLE_TYPE_TKIP && ATH9K_IS_MIC_ENABLED(ah)) { + u16 micentry = entry + 64; + + REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), 0); + REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0); + REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), 0); + REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0); + + } + + return true; +} +EXPORT_SYMBOL(ath9k_hw_keyreset); + +bool ath9k_hw_keysetmac(struct ath_hw *ah, u16 entry, const u8 *mac) +{ + u32 macHi, macLo; + + if (entry >= ah->caps.keycache_size) { + ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, + "keychache entry %u out of range\n", entry); + return false; + } + + if (mac != NULL) { + macHi = (mac[5] << 8) | mac[4]; + macLo = (mac[3] << 24) | + (mac[2] << 16) | + (mac[1] << 8) | + mac[0]; + macLo >>= 1; + macLo |= (macHi & 1) << 31; + macHi >>= 1; + } else { + macLo = macHi = 0; + } + REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), macLo); + REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), macHi | AR_KEYTABLE_VALID); + + return true; +} +EXPORT_SYMBOL(ath9k_hw_keysetmac); + +bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry, + const struct ath9k_keyval *k, + const u8 *mac) +{ + const struct ath9k_hw_capabilities *pCap = &ah->caps; + struct ath_common *common = ath9k_hw_common(ah); + u32 key0, key1, key2, key3, key4; + u32 keyType; + + if (entry >= pCap->keycache_size) { + ath_print(common, ATH_DBG_FATAL, + "keycache entry %u out of range\n", entry); + return false; + } + + switch (k->kv_type) { + case ATH9K_CIPHER_AES_OCB: + keyType = AR_KEYTABLE_TYPE_AES; + break; + case ATH9K_CIPHER_AES_CCM: + if (!(pCap->hw_caps & ATH9K_HW_CAP_CIPHER_AESCCM)) { + ath_print(common, ATH_DBG_ANY, + "AES-CCM not supported by mac rev 0x%x\n", + ah->hw_version.macRev); + return false; + } + keyType = AR_KEYTABLE_TYPE_CCM; + break; + case ATH9K_CIPHER_TKIP: + keyType = AR_KEYTABLE_TYPE_TKIP; + if (ATH9K_IS_MIC_ENABLED(ah) + && entry + 64 >= pCap->keycache_size) { + ath_print(common, ATH_DBG_ANY, + "entry %u inappropriate for TKIP\n", entry); + return false; + } + break; + case ATH9K_CIPHER_WEP: + if (k->kv_len < WLAN_KEY_LEN_WEP40) { + ath_print(common, ATH_DBG_ANY, + "WEP key length %u too small\n", k->kv_len); + return false; + } + if (k->kv_len <= WLAN_KEY_LEN_WEP40) + keyType = AR_KEYTABLE_TYPE_40; + else if (k->kv_len <= WLAN_KEY_LEN_WEP104) + keyType = AR_KEYTABLE_TYPE_104; + else + keyType = AR_KEYTABLE_TYPE_128; + break; + case ATH9K_CIPHER_CLR: + keyType = AR_KEYTABLE_TYPE_CLR; + break; + default: + ath_print(common, ATH_DBG_FATAL, + "cipher %u not supported\n", k->kv_type); + return false; + } + + key0 = get_unaligned_le32(k->kv_val + 0); + key1 = get_unaligned_le16(k->kv_val + 4); + key2 = get_unaligned_le32(k->kv_val + 6); + key3 = get_unaligned_le16(k->kv_val + 10); + key4 = get_unaligned_le32(k->kv_val + 12); + if (k->kv_len <= WLAN_KEY_LEN_WEP104) + key4 &= 0xff; + + /* + * Note: Key cache registers access special memory area that requires + * two 32-bit writes to actually update the values in the internal + * memory. Consequently, the exact order and pairs used here must be + * maintained. + */ + + if (keyType == AR_KEYTABLE_TYPE_TKIP && ATH9K_IS_MIC_ENABLED(ah)) { + u16 micentry = entry + 64; + + /* + * Write inverted key[47:0] first to avoid Michael MIC errors + * on frames that could be sent or received at the same time. + * The correct key will be written in the end once everything + * else is ready. + */ + REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), ~key0); + REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), ~key1); + + /* Write key[95:48] */ + REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2); + REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3); + + /* Write key[127:96] and key type */ + REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4); + REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType); + + /* Write MAC address for the entry */ + (void) ath9k_hw_keysetmac(ah, entry, mac); + + if (ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA) { + /* + * TKIP uses two key cache entries: + * Michael MIC TX/RX keys in the same key cache entry + * (idx = main index + 64): + * key0 [31:0] = RX key [31:0] + * key1 [15:0] = TX key [31:16] + * key1 [31:16] = reserved + * key2 [31:0] = RX key [63:32] + * key3 [15:0] = TX key [15:0] + * key3 [31:16] = reserved + * key4 [31:0] = TX key [63:32] + */ + u32 mic0, mic1, mic2, mic3, mic4; + + mic0 = get_unaligned_le32(k->kv_mic + 0); + mic2 = get_unaligned_le32(k->kv_mic + 4); + mic1 = get_unaligned_le16(k->kv_txmic + 2) & 0xffff; + mic3 = get_unaligned_le16(k->kv_txmic + 0) & 0xffff; + mic4 = get_unaligned_le32(k->kv_txmic + 4); + + /* Write RX[31:0] and TX[31:16] */ + REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0); + REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), mic1); + + /* Write RX[63:32] and TX[15:0] */ + REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), mic2); + REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), mic3); + + /* Write TX[63:32] and keyType(reserved) */ + REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), mic4); + REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry), + AR_KEYTABLE_TYPE_CLR); + + } else { + /* + * TKIP uses four key cache entries (two for group + * keys): + * Michael MIC TX/RX keys are in different key cache + * entries (idx = main index + 64 for TX and + * main index + 32 + 96 for RX): + * key0 [31:0] = TX/RX MIC key [31:0] + * key1 [31:0] = reserved + * key2 [31:0] = TX/RX MIC key [63:32] + * key3 [31:0] = reserved + * key4 [31:0] = reserved + * + * Upper layer code will call this function separately + * for TX and RX keys when these registers offsets are + * used. + */ + u32 mic0, mic2; + + mic0 = get_unaligned_le32(k->kv_mic + 0); + mic2 = get_unaligned_le32(k->kv_mic + 4); + + /* Write MIC key[31:0] */ + REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0); + REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0); + + /* Write MIC key[63:32] */ + REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), mic2); + REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0); + + /* Write TX[63:32] and keyType(reserved) */ + REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), 0); + REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry), + AR_KEYTABLE_TYPE_CLR); + } + + /* MAC address registers are reserved for the MIC entry */ + REG_WRITE(ah, AR_KEYTABLE_MAC0(micentry), 0); + REG_WRITE(ah, AR_KEYTABLE_MAC1(micentry), 0); + + /* + * Write the correct (un-inverted) key[47:0] last to enable + * TKIP now that all other registers are set with correct + * values. + */ + REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0); + REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1); + } else { + /* Write key[47:0] */ + REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0); + REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1); + + /* Write key[95:48] */ + REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2); + REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3); + + /* Write key[127:96] and key type */ + REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4); + REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType); + + /* Write MAC address for the entry */ + (void) ath9k_hw_keysetmac(ah, entry, mac); + } + + return true; +} +EXPORT_SYMBOL(ath9k_hw_set_keycache_entry); + +bool ath9k_hw_keyisvalid(struct ath_hw *ah, u16 entry) +{ + if (entry < ah->caps.keycache_size) { + u32 val = REG_READ(ah, AR_KEYTABLE_MAC1(entry)); + if (val & AR_KEYTABLE_VALID) + return true; + } + return false; +} +EXPORT_SYMBOL(ath9k_hw_keyisvalid); + +/******************************/ +/* Power Management (Chipset) */ +/******************************/ + +static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip) +{ + REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); + if (setChip) { + REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, + AR_RTC_FORCE_WAKE_EN); + if (!AR_SREV_9100(ah)) + REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF); + + if(!AR_SREV_5416(ah)) + REG_CLR_BIT(ah, (AR_RTC_RESET), + AR_RTC_RESET_EN); + } +} + +static void ath9k_set_power_network_sleep(struct ath_hw *ah, int setChip) +{ + REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); + if (setChip) { + struct ath9k_hw_capabilities *pCap = &ah->caps; + + if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { + REG_WRITE(ah, AR_RTC_FORCE_WAKE, + AR_RTC_FORCE_WAKE_ON_INT); + } else { + REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, + AR_RTC_FORCE_WAKE_EN); + } + } +} + +static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip) +{ + u32 val; + int i; + + if (setChip) { + if ((REG_READ(ah, AR_RTC_STATUS) & + AR_RTC_STATUS_M) == AR_RTC_STATUS_SHUTDOWN) { + if (ath9k_hw_set_reset_reg(ah, + ATH9K_RESET_POWER_ON) != true) { + return false; + } + ath9k_hw_init_pll(ah, NULL); + } + if (AR_SREV_9100(ah)) + REG_SET_BIT(ah, AR_RTC_RESET, + AR_RTC_RESET_EN); + + REG_SET_BIT(ah, AR_RTC_FORCE_WAKE, + AR_RTC_FORCE_WAKE_EN); + udelay(50); + + for (i = POWER_UP_TIME / 50; i > 0; i--) { + val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M; + if (val == AR_RTC_STATUS_ON) + break; + udelay(50); + REG_SET_BIT(ah, AR_RTC_FORCE_WAKE, + AR_RTC_FORCE_WAKE_EN); + } + if (i == 0) { + ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, + "Failed to wakeup in %uus\n", + POWER_UP_TIME / 20); + return false; + } + } + + REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); + + return true; +} + +bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode) +{ + struct ath_common *common = ath9k_hw_common(ah); + int status = true, setChip = true; + static const char *modes[] = { + "AWAKE", + "FULL-SLEEP", + "NETWORK SLEEP", + "UNDEFINED" + }; + + if (ah->power_mode == mode) + return status; + + ath_print(common, ATH_DBG_RESET, "%s -> %s\n", + modes[ah->power_mode], modes[mode]); + + switch (mode) { + case ATH9K_PM_AWAKE: + status = ath9k_hw_set_power_awake(ah, setChip); + break; + case ATH9K_PM_FULL_SLEEP: + ath9k_set_power_sleep(ah, setChip); + ah->chip_fullsleep = true; + break; + case ATH9K_PM_NETWORK_SLEEP: + ath9k_set_power_network_sleep(ah, setChip); + break; + default: + ath_print(common, ATH_DBG_FATAL, + "Unknown power mode %u\n", mode); + return false; + } + ah->power_mode = mode; + + return status; +} +EXPORT_SYMBOL(ath9k_hw_setpower); + +/* + * Helper for ASPM support. + * + * Disable PLL when in L0s as well as receiver clock when in L1. + * This power saving option must be enabled through the SerDes. + * + * Programming the SerDes must go through the same 288 bit serial shift + * register as the other analog registers. Hence the 9 writes. + */ +void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore, int power_off) +{ + u8 i; + u32 val; + + if (ah->is_pciexpress != true) + return; + + /* Do not touch SerDes registers */ + if (ah->config.pcie_powersave_enable == 2) + return; + + /* Nothing to do on restore for 11N */ + if (!restore) { + if (AR_SREV_9280_20_OR_LATER(ah)) { + /* + * AR9280 2.0 or later chips use SerDes values from the + * initvals.h initialized depending on chipset during + * ath9k_hw_init() + */ + for (i = 0; i < ah->iniPcieSerdes.ia_rows; i++) { + REG_WRITE(ah, INI_RA(&ah->iniPcieSerdes, i, 0), + INI_RA(&ah->iniPcieSerdes, i, 1)); + } + } else if (AR_SREV_9280(ah) && + (ah->hw_version.macRev == AR_SREV_REVISION_9280_10)) { + REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fd00); + REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924); + + /* RX shut off when elecidle is asserted */ + REG_WRITE(ah, AR_PCIE_SERDES, 0xa8000019); + REG_WRITE(ah, AR_PCIE_SERDES, 0x13160820); + REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980560); + + /* Shut off CLKREQ active in L1 */ + if (ah->config.pcie_clock_req) + REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffc); + else + REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffd); + + REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40); + REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554); + REG_WRITE(ah, AR_PCIE_SERDES, 0x00043007); + + /* Load the new settings */ + REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000); + + } else { + REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00); + REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924); + + /* RX shut off when elecidle is asserted */ + REG_WRITE(ah, AR_PCIE_SERDES, 0x28000039); + REG_WRITE(ah, AR_PCIE_SERDES, 0x53160824); + REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980579); + + /* + * Ignore ah->ah_config.pcie_clock_req setting for + * pre-AR9280 11n + */ + REG_WRITE(ah, AR_PCIE_SERDES, 0x001defff); + + REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40); + REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554); + REG_WRITE(ah, AR_PCIE_SERDES, 0x000e3007); + + /* Load the new settings */ + REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000); + } + + udelay(1000); + + /* set bit 19 to allow forcing of pcie core into L1 state */ + REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA); + + /* Several PCIe massages to ensure proper behaviour */ + if (ah->config.pcie_waen) { + val = ah->config.pcie_waen; + if (!power_off) + val &= (~AR_WA_D3_L1_DISABLE); + } else { + if (AR_SREV_9285(ah) || AR_SREV_9271(ah) || + AR_SREV_9287(ah)) { + val = AR9285_WA_DEFAULT; + if (!power_off) + val &= (~AR_WA_D3_L1_DISABLE); + } else if (AR_SREV_9280(ah)) { + /* + * On AR9280 chips bit 22 of 0x4004 needs to be + * set otherwise card may disappear. + */ + val = AR9280_WA_DEFAULT; + if (!power_off) + val &= (~AR_WA_D3_L1_DISABLE); + } else + val = AR_WA_DEFAULT; + } + + REG_WRITE(ah, AR_WA, val); + } + + if (power_off) { + /* + * Set PCIe workaround bits + * bit 14 in WA register (disable L1) should only + * be set when device enters D3 and be cleared + * when device comes back to D0. + */ + if (ah->config.pcie_waen) { + if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE) + REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE); + } else { + if (((AR_SREV_9285(ah) || AR_SREV_9271(ah) || + AR_SREV_9287(ah)) && + (AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)) || + (AR_SREV_9280(ah) && + (AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE))) { + REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE); + } + } + } +} +EXPORT_SYMBOL(ath9k_hw_configpcipowersave); + +/**********************/ +/* Interrupt Handling */ +/**********************/ + +bool ath9k_hw_intrpend(struct ath_hw *ah) +{ + u32 host_isr; + + if (AR_SREV_9100(ah)) + return true; + + host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE); + if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS)) + return true; + + host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE); + if ((host_isr & AR_INTR_SYNC_DEFAULT) + && (host_isr != AR_INTR_SPURIOUS)) + return true; + + return false; +} +EXPORT_SYMBOL(ath9k_hw_intrpend); + +bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked) +{ + u32 isr = 0; + u32 mask2 = 0; + struct ath9k_hw_capabilities *pCap = &ah->caps; + u32 sync_cause = 0; + bool fatal_int = false; + struct ath_common *common = ath9k_hw_common(ah); + + if (!AR_SREV_9100(ah)) { + if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) { + if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M) + == AR_RTC_STATUS_ON) { + isr = REG_READ(ah, AR_ISR); + } + } + + sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) & + AR_INTR_SYNC_DEFAULT; + + *masked = 0; + + if (!isr && !sync_cause) + return false; + } else { + *masked = 0; + isr = REG_READ(ah, AR_ISR); + } + + if (isr) { + if (isr & AR_ISR_BCNMISC) { + u32 isr2; + isr2 = REG_READ(ah, AR_ISR_S2); + if (isr2 & AR_ISR_S2_TIM) + mask2 |= ATH9K_INT_TIM; + if (isr2 & AR_ISR_S2_DTIM) + mask2 |= ATH9K_INT_DTIM; + if (isr2 & AR_ISR_S2_DTIMSYNC) + mask2 |= ATH9K_INT_DTIMSYNC; + if (isr2 & (AR_ISR_S2_CABEND)) + mask2 |= ATH9K_INT_CABEND; + if (isr2 & AR_ISR_S2_GTT) + mask2 |= ATH9K_INT_GTT; + if (isr2 & AR_ISR_S2_CST) + mask2 |= ATH9K_INT_CST; + if (isr2 & AR_ISR_S2_TSFOOR) + mask2 |= ATH9K_INT_TSFOOR; + } + + isr = REG_READ(ah, AR_ISR_RAC); + if (isr == 0xffffffff) { + *masked = 0; + return false; + } + + *masked = isr & ATH9K_INT_COMMON; + + if (ah->config.rx_intr_mitigation) { + if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM)) + *masked |= ATH9K_INT_RX; + } + + if (isr & (AR_ISR_RXOK | AR_ISR_RXERR)) + *masked |= ATH9K_INT_RX; + if (isr & + (AR_ISR_TXOK | AR_ISR_TXDESC | AR_ISR_TXERR | + AR_ISR_TXEOL)) { + u32 s0_s, s1_s; + + *masked |= ATH9K_INT_TX; + + s0_s = REG_READ(ah, AR_ISR_S0_S); + ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK); + ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC); + + s1_s = REG_READ(ah, AR_ISR_S1_S); + ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR); + ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL); + } + + if (isr & AR_ISR_RXORN) { + ath_print(common, ATH_DBG_INTERRUPT, + "receive FIFO overrun interrupt\n"); + } + + if (!AR_SREV_9100(ah)) { + if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { + u32 isr5 = REG_READ(ah, AR_ISR_S5_S); + if (isr5 & AR_ISR_S5_TIM_TIMER) + *masked |= ATH9K_INT_TIM_TIMER; + } + } + + *masked |= mask2; + } + + if (AR_SREV_9100(ah)) + return true; + + if (isr & AR_ISR_GENTMR) { + u32 s5_s; + + s5_s = REG_READ(ah, AR_ISR_S5_S); + if (isr & AR_ISR_GENTMR) { + ah->intr_gen_timer_trigger = + MS(s5_s, AR_ISR_S5_GENTIMER_TRIG); + + ah->intr_gen_timer_thresh = + MS(s5_s, AR_ISR_S5_GENTIMER_THRESH); + + if (ah->intr_gen_timer_trigger) + *masked |= ATH9K_INT_GENTIMER; + + } + } + + if (sync_cause) { + fatal_int = + (sync_cause & + (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR)) + ? true : false; + + if (fatal_int) { + if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) { + ath_print(common, ATH_DBG_ANY, + "received PCI FATAL interrupt\n"); + } + if (sync_cause & AR_INTR_SYNC_HOST1_PERR) { + ath_print(common, ATH_DBG_ANY, + "received PCI PERR interrupt\n"); + } + *masked |= ATH9K_INT_FATAL; + } + if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) { + ath_print(common, ATH_DBG_INTERRUPT, + "AR_INTR_SYNC_RADM_CPL_TIMEOUT\n"); + REG_WRITE(ah, AR_RC, AR_RC_HOSTIF); + REG_WRITE(ah, AR_RC, 0); + *masked |= ATH9K_INT_FATAL; + } + if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) { + ath_print(common, ATH_DBG_INTERRUPT, + "AR_INTR_SYNC_LOCAL_TIMEOUT\n"); + } + + REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause); + (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR); + } + + return true; +} +EXPORT_SYMBOL(ath9k_hw_getisr); + +enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints) +{ + u32 omask = ah->mask_reg; + u32 mask, mask2; + struct ath9k_hw_capabilities *pCap = &ah->caps; + struct ath_common *common = ath9k_hw_common(ah); + + ath_print(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints); + + if (omask & ATH9K_INT_GLOBAL) { + ath_print(common, ATH_DBG_INTERRUPT, "disable IER\n"); + REG_WRITE(ah, AR_IER, AR_IER_DISABLE); + (void) REG_READ(ah, AR_IER); + if (!AR_SREV_9100(ah)) { + REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0); + (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE); + + REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0); + (void) REG_READ(ah, AR_INTR_SYNC_ENABLE); + } + } + + mask = ints & ATH9K_INT_COMMON; + mask2 = 0; + + if (ints & ATH9K_INT_TX) { + if (ah->txok_interrupt_mask) + mask |= AR_IMR_TXOK; + if (ah->txdesc_interrupt_mask) + mask |= AR_IMR_TXDESC; + if (ah->txerr_interrupt_mask) + mask |= AR_IMR_TXERR; + if (ah->txeol_interrupt_mask) + mask |= AR_IMR_TXEOL; + } + if (ints & ATH9K_INT_RX) { + mask |= AR_IMR_RXERR; + if (ah->config.rx_intr_mitigation) + mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM; + else + mask |= AR_IMR_RXOK | AR_IMR_RXDESC; + if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) + mask |= AR_IMR_GENTMR; + } + + if (ints & (ATH9K_INT_BMISC)) { + mask |= AR_IMR_BCNMISC; + if (ints & ATH9K_INT_TIM) + mask2 |= AR_IMR_S2_TIM; + if (ints & ATH9K_INT_DTIM) + mask2 |= AR_IMR_S2_DTIM; + if (ints & ATH9K_INT_DTIMSYNC) + mask2 |= AR_IMR_S2_DTIMSYNC; + if (ints & ATH9K_INT_CABEND) + mask2 |= AR_IMR_S2_CABEND; + if (ints & ATH9K_INT_TSFOOR) + mask2 |= AR_IMR_S2_TSFOOR; + } + + if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) { + mask |= AR_IMR_BCNMISC; + if (ints & ATH9K_INT_GTT) + mask2 |= AR_IMR_S2_GTT; + if (ints & ATH9K_INT_CST) + mask2 |= AR_IMR_S2_CST; + } + + ath_print(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask); + REG_WRITE(ah, AR_IMR, mask); + mask = REG_READ(ah, AR_IMR_S2) & ~(AR_IMR_S2_TIM | + AR_IMR_S2_DTIM | + AR_IMR_S2_DTIMSYNC | + AR_IMR_S2_CABEND | + AR_IMR_S2_CABTO | + AR_IMR_S2_TSFOOR | + AR_IMR_S2_GTT | AR_IMR_S2_CST); + REG_WRITE(ah, AR_IMR_S2, mask | mask2); + ah->mask_reg = ints; + + if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { + if (ints & ATH9K_INT_TIM_TIMER) + REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER); + else + REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER); + } + + if (ints & ATH9K_INT_GLOBAL) { + ath_print(common, ATH_DBG_INTERRUPT, "enable IER\n"); + REG_WRITE(ah, AR_IER, AR_IER_ENABLE); + if (!AR_SREV_9100(ah)) { + REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, + AR_INTR_MAC_IRQ); + REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ); + + + REG_WRITE(ah, AR_INTR_SYNC_ENABLE, + AR_INTR_SYNC_DEFAULT); + REG_WRITE(ah, AR_INTR_SYNC_MASK, + AR_INTR_SYNC_DEFAULT); + } + ath_print(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n", + REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER)); + } + + return omask; +} +EXPORT_SYMBOL(ath9k_hw_set_interrupts); + +/*******************/ +/* Beacon Handling */ +/*******************/ + +void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period) +{ + int flags = 0; + + ah->beacon_interval = beacon_period; + + switch (ah->opmode) { + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_MONITOR: + REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon)); + REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, 0xffff); + REG_WRITE(ah, AR_NEXT_SWBA, 0x7ffff); + flags |= AR_TBTT_TIMER_EN; + break; + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_MESH_POINT: + REG_SET_BIT(ah, AR_TXCFG, + AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY); + REG_WRITE(ah, AR_NEXT_NDP_TIMER, + TU_TO_USEC(next_beacon + + (ah->atim_window ? ah-> + atim_window : 1))); + flags |= AR_NDP_TIMER_EN; + case NL80211_IFTYPE_AP: + REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon)); + REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, + TU_TO_USEC(next_beacon - + ah->config. + dma_beacon_response_time)); + REG_WRITE(ah, AR_NEXT_SWBA, + TU_TO_USEC(next_beacon - + ah->config. + sw_beacon_response_time)); + flags |= + AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN; + break; + default: + ath_print(ath9k_hw_common(ah), ATH_DBG_BEACON, + "%s: unsupported opmode: %d\n", + __func__, ah->opmode); + return; + break; + } + + REG_WRITE(ah, AR_BEACON_PERIOD, TU_TO_USEC(beacon_period)); + REG_WRITE(ah, AR_DMA_BEACON_PERIOD, TU_TO_USEC(beacon_period)); + REG_WRITE(ah, AR_SWBA_PERIOD, TU_TO_USEC(beacon_period)); + REG_WRITE(ah, AR_NDP_PERIOD, TU_TO_USEC(beacon_period)); + + beacon_period &= ~ATH9K_BEACON_ENA; + if (beacon_period & ATH9K_BEACON_RESET_TSF) { + ath9k_hw_reset_tsf(ah); + } + + REG_SET_BIT(ah, AR_TIMER_MODE, flags); +} +EXPORT_SYMBOL(ath9k_hw_beaconinit); + +void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah, + const struct ath9k_beacon_state *bs) +{ + u32 nextTbtt, beaconintval, dtimperiod, beacontimeout; + struct ath9k_hw_capabilities *pCap = &ah->caps; + struct ath_common *common = ath9k_hw_common(ah); + + REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(bs->bs_nexttbtt)); + + REG_WRITE(ah, AR_BEACON_PERIOD, + TU_TO_USEC(bs->bs_intval & ATH9K_BEACON_PERIOD)); + REG_WRITE(ah, AR_DMA_BEACON_PERIOD, + TU_TO_USEC(bs->bs_intval & ATH9K_BEACON_PERIOD)); + + REG_RMW_FIELD(ah, AR_RSSI_THR, + AR_RSSI_THR_BM_THR, bs->bs_bmissthreshold); + + beaconintval = bs->bs_intval & ATH9K_BEACON_PERIOD; + + if (bs->bs_sleepduration > beaconintval) + beaconintval = bs->bs_sleepduration; + + dtimperiod = bs->bs_dtimperiod; + if (bs->bs_sleepduration > dtimperiod) + dtimperiod = bs->bs_sleepduration; + + if (beaconintval == dtimperiod) + nextTbtt = bs->bs_nextdtim; + else + nextTbtt = bs->bs_nexttbtt; + + ath_print(common, ATH_DBG_BEACON, "next DTIM %d\n", bs->bs_nextdtim); + ath_print(common, ATH_DBG_BEACON, "next beacon %d\n", nextTbtt); + ath_print(common, ATH_DBG_BEACON, "beacon period %d\n", beaconintval); + ath_print(common, ATH_DBG_BEACON, "DTIM period %d\n", dtimperiod); + + REG_WRITE(ah, AR_NEXT_DTIM, + TU_TO_USEC(bs->bs_nextdtim - SLEEP_SLOP)); + REG_WRITE(ah, AR_NEXT_TIM, TU_TO_USEC(nextTbtt - SLEEP_SLOP)); + + REG_WRITE(ah, AR_SLEEP1, + SM((CAB_TIMEOUT_VAL << 3), AR_SLEEP1_CAB_TIMEOUT) + | AR_SLEEP1_ASSUME_DTIM); + + if (pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP) + beacontimeout = (BEACON_TIMEOUT_VAL << 3); + else + beacontimeout = MIN_BEACON_TIMEOUT_VAL; + + REG_WRITE(ah, AR_SLEEP2, + SM(beacontimeout, AR_SLEEP2_BEACON_TIMEOUT)); + + REG_WRITE(ah, AR_TIM_PERIOD, TU_TO_USEC(beaconintval)); + REG_WRITE(ah, AR_DTIM_PERIOD, TU_TO_USEC(dtimperiod)); + + REG_SET_BIT(ah, AR_TIMER_MODE, + AR_TBTT_TIMER_EN | AR_TIM_TIMER_EN | + AR_DTIM_TIMER_EN); + + /* TSF Out of Range Threshold */ + REG_WRITE(ah, AR_TSFOOR_THRESHOLD, bs->bs_tsfoor_threshold); +} +EXPORT_SYMBOL(ath9k_hw_set_sta_beacon_timers); + +/*******************/ +/* HW Capabilities */ +/*******************/ + +int ath9k_hw_fill_cap_info(struct ath_hw *ah) +{ + struct ath9k_hw_capabilities *pCap = &ah->caps; + struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); + struct ath_common *common = ath9k_hw_common(ah); + struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; + + u16 capField = 0, eeval; + + eeval = ah->eep_ops->get_eeprom(ah, EEP_REG_0); + regulatory->current_rd = eeval; + + eeval = ah->eep_ops->get_eeprom(ah, EEP_REG_1); + if (AR_SREV_9285_10_OR_LATER(ah)) + eeval |= AR9285_RDEXT_DEFAULT; + regulatory->current_rd_ext = eeval; + + capField = ah->eep_ops->get_eeprom(ah, EEP_OP_CAP); + + if (ah->opmode != NL80211_IFTYPE_AP && + ah->hw_version.subvendorid == AR_SUBVENDOR_ID_NEW_A) { + if (regulatory->current_rd == 0x64 || + regulatory->current_rd == 0x65) + regulatory->current_rd += 5; + else if (regulatory->current_rd == 0x41) + regulatory->current_rd = 0x43; + ath_print(common, ATH_DBG_REGULATORY, + "regdomain mapped to 0x%x\n", regulatory->current_rd); + } + + eeval = ah->eep_ops->get_eeprom(ah, EEP_OP_MODE); + if ((eeval & (AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A)) == 0) { + ath_print(common, ATH_DBG_FATAL, + "no band has been marked as supported in EEPROM.\n"); + return -EINVAL; + } + + bitmap_zero(pCap->wireless_modes, ATH9K_MODE_MAX); + + if (eeval & AR5416_OPFLAGS_11A) { + set_bit(ATH9K_MODE_11A, pCap->wireless_modes); + if (ah->config.ht_enable) { + if (!(eeval & AR5416_OPFLAGS_N_5G_HT20)) + set_bit(ATH9K_MODE_11NA_HT20, + pCap->wireless_modes); + if (!(eeval & AR5416_OPFLAGS_N_5G_HT40)) { + set_bit(ATH9K_MODE_11NA_HT40PLUS, + pCap->wireless_modes); + set_bit(ATH9K_MODE_11NA_HT40MINUS, + pCap->wireless_modes); + } + } + } + + if (eeval & AR5416_OPFLAGS_11G) { + set_bit(ATH9K_MODE_11G, pCap->wireless_modes); + if (ah->config.ht_enable) { + if (!(eeval & AR5416_OPFLAGS_N_2G_HT20)) + set_bit(ATH9K_MODE_11NG_HT20, + pCap->wireless_modes); + if (!(eeval & AR5416_OPFLAGS_N_2G_HT40)) { + set_bit(ATH9K_MODE_11NG_HT40PLUS, + pCap->wireless_modes); + set_bit(ATH9K_MODE_11NG_HT40MINUS, + pCap->wireless_modes); + } + } + } + + pCap->tx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_TX_MASK); + /* + * For AR9271 we will temporarilly uses the rx chainmax as read from + * the EEPROM. + */ + if ((ah->hw_version.devid == AR5416_DEVID_PCI) && + !(eeval & AR5416_OPFLAGS_11A) && + !(AR_SREV_9271(ah))) + /* CB71: GPIO 0 is pulled down to indicate 3 rx chains */ + pCap->rx_chainmask = ath9k_hw_gpio_get(ah, 0) ? 0x5 : 0x7; + else + /* Use rx_chainmask from EEPROM. */ + pCap->rx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_RX_MASK); + + if (!(AR_SREV_9280(ah) && (ah->hw_version.macRev == 0))) + ah->misc_mode |= AR_PCU_MIC_NEW_LOC_ENA; + + pCap->low_2ghz_chan = 2312; + pCap->high_2ghz_chan = 2732; + + pCap->low_5ghz_chan = 4920; + pCap->high_5ghz_chan = 6100; + + pCap->hw_caps &= ~ATH9K_HW_CAP_CIPHER_CKIP; + pCap->hw_caps |= ATH9K_HW_CAP_CIPHER_TKIP; + pCap->hw_caps |= ATH9K_HW_CAP_CIPHER_AESCCM; + + pCap->hw_caps &= ~ATH9K_HW_CAP_MIC_CKIP; + pCap->hw_caps |= ATH9K_HW_CAP_MIC_TKIP; + pCap->hw_caps |= ATH9K_HW_CAP_MIC_AESCCM; + + if (ah->config.ht_enable) + pCap->hw_caps |= ATH9K_HW_CAP_HT; + else + pCap->hw_caps &= ~ATH9K_HW_CAP_HT; + + pCap->hw_caps |= ATH9K_HW_CAP_GTT; + pCap->hw_caps |= ATH9K_HW_CAP_VEOL; + pCap->hw_caps |= ATH9K_HW_CAP_BSSIDMASK; + pCap->hw_caps &= ~ATH9K_HW_CAP_MCAST_KEYSEARCH; + + if (capField & AR_EEPROM_EEPCAP_MAXQCU) + pCap->total_queues = + MS(capField, AR_EEPROM_EEPCAP_MAXQCU); + else + pCap->total_queues = ATH9K_NUM_TX_QUEUES; + + if (capField & AR_EEPROM_EEPCAP_KC_ENTRIES) + pCap->keycache_size = + 1 << MS(capField, AR_EEPROM_EEPCAP_KC_ENTRIES); + else + pCap->keycache_size = AR_KEYTABLE_SIZE; + + pCap->hw_caps |= ATH9K_HW_CAP_FASTCC; + + if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) + pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD >> 1; + else + pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD; + + if (AR_SREV_9285_10_OR_LATER(ah)) + pCap->num_gpio_pins = AR9285_NUM_GPIO; + else if (AR_SREV_9280_10_OR_LATER(ah)) + pCap->num_gpio_pins = AR928X_NUM_GPIO; + else + pCap->num_gpio_pins = AR_NUM_GPIO; + + if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah)) { + pCap->hw_caps |= ATH9K_HW_CAP_CST; + pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX; + } else { + pCap->rts_aggr_limit = (8 * 1024); + } + + pCap->hw_caps |= ATH9K_HW_CAP_ENHANCEDPM; + +#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) + ah->rfsilent = ah->eep_ops->get_eeprom(ah, EEP_RF_SILENT); + if (ah->rfsilent & EEP_RFSILENT_ENABLED) { + ah->rfkill_gpio = + MS(ah->rfsilent, EEP_RFSILENT_GPIO_SEL); + ah->rfkill_polarity = + MS(ah->rfsilent, EEP_RFSILENT_POLARITY); + + pCap->hw_caps |= ATH9K_HW_CAP_RFSILENT; + } +#endif + + pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP; + + if (AR_SREV_9280(ah) || AR_SREV_9285(ah)) + pCap->hw_caps &= ~ATH9K_HW_CAP_4KB_SPLITTRANS; + else + pCap->hw_caps |= ATH9K_HW_CAP_4KB_SPLITTRANS; + + if (regulatory->current_rd_ext & (1 << REG_EXT_JAPAN_MIDBAND)) { + pCap->reg_cap = + AR_EEPROM_EEREGCAP_EN_KK_NEW_11A | + AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN | + AR_EEPROM_EEREGCAP_EN_KK_U2 | + AR_EEPROM_EEREGCAP_EN_KK_MIDBAND; + } else { + pCap->reg_cap = + AR_EEPROM_EEREGCAP_EN_KK_NEW_11A | + AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN; + } + + /* Advertise midband for AR5416 with FCC midband set in eeprom */ + if (regulatory->current_rd_ext & (1 << REG_EXT_FCC_MIDBAND) && + AR_SREV_5416(ah)) + pCap->reg_cap |= AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND; + + pCap->num_antcfg_5ghz = + ah->eep_ops->get_num_ant_config(ah, ATH9K_HAL_FREQ_BAND_5GHZ); + pCap->num_antcfg_2ghz = + ah->eep_ops->get_num_ant_config(ah, ATH9K_HAL_FREQ_BAND_2GHZ); + + if (AR_SREV_9280_10_OR_LATER(ah) && + ath9k_hw_btcoex_supported(ah)) { + btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO; + btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO; + + if (AR_SREV_9285(ah)) { + btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE; + btcoex_hw->btpriority_gpio = ATH_BTPRIORITY_GPIO; + } else { + btcoex_hw->scheme = ATH_BTCOEX_CFG_2WIRE; + } + } else { + btcoex_hw->scheme = ATH_BTCOEX_CFG_NONE; + } + + return 0; +} + +bool ath9k_hw_getcapability(struct ath_hw *ah, enum ath9k_capability_type type, + u32 capability, u32 *result) +{ + struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); + switch (type) { + case ATH9K_CAP_CIPHER: + switch (capability) { + case ATH9K_CIPHER_AES_CCM: + case ATH9K_CIPHER_AES_OCB: + case ATH9K_CIPHER_TKIP: + case ATH9K_CIPHER_WEP: + case ATH9K_CIPHER_MIC: + case ATH9K_CIPHER_CLR: + return true; + default: + return false; + } + case ATH9K_CAP_TKIP_MIC: + switch (capability) { + case 0: + return true; + case 1: + return (ah->sta_id1_defaults & + AR_STA_ID1_CRPT_MIC_ENABLE) ? true : + false; + } + case ATH9K_CAP_TKIP_SPLIT: + return (ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA) ? + false : true; + case ATH9K_CAP_DIVERSITY: + return (REG_READ(ah, AR_PHY_CCK_DETECT) & + AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV) ? + true : false; + case ATH9K_CAP_MCAST_KEYSRCH: + switch (capability) { + case 0: + return true; + case 1: + if (REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_ADHOC) { + return false; + } else { + return (ah->sta_id1_defaults & + AR_STA_ID1_MCAST_KSRCH) ? true : + false; + } + } + return false; + case ATH9K_CAP_TXPOW: + switch (capability) { + case 0: + return 0; + case 1: + *result = regulatory->power_limit; + return 0; + case 2: + *result = regulatory->max_power_level; + return 0; + case 3: + *result = regulatory->tp_scale; + return 0; + } + return false; + case ATH9K_CAP_DS: + return (AR_SREV_9280_20_OR_LATER(ah) && + (ah->eep_ops->get_eeprom(ah, EEP_RC_CHAIN_MASK) == 1)) + ? false : true; + default: + return false; + } +} +EXPORT_SYMBOL(ath9k_hw_getcapability); + +bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type, + u32 capability, u32 setting, int *status) +{ + u32 v; + + switch (type) { + case ATH9K_CAP_TKIP_MIC: + if (setting) + ah->sta_id1_defaults |= + AR_STA_ID1_CRPT_MIC_ENABLE; + else + ah->sta_id1_defaults &= + ~AR_STA_ID1_CRPT_MIC_ENABLE; + return true; + case ATH9K_CAP_DIVERSITY: + v = REG_READ(ah, AR_PHY_CCK_DETECT); + if (setting) + v |= AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV; + else + v &= ~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV; + REG_WRITE(ah, AR_PHY_CCK_DETECT, v); + return true; + case ATH9K_CAP_MCAST_KEYSRCH: + if (setting) + ah->sta_id1_defaults |= AR_STA_ID1_MCAST_KSRCH; + else + ah->sta_id1_defaults &= ~AR_STA_ID1_MCAST_KSRCH; + return true; + default: + return false; + } +} +EXPORT_SYMBOL(ath9k_hw_setcapability); + +/****************************/ +/* GPIO / RFKILL / Antennae */ +/****************************/ + +static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah, + u32 gpio, u32 type) +{ + int addr; + u32 gpio_shift, tmp; + + if (gpio > 11) + addr = AR_GPIO_OUTPUT_MUX3; + else if (gpio > 5) + addr = AR_GPIO_OUTPUT_MUX2; + else + addr = AR_GPIO_OUTPUT_MUX1; + + gpio_shift = (gpio % 6) * 5; + + if (AR_SREV_9280_20_OR_LATER(ah) + || (addr != AR_GPIO_OUTPUT_MUX1)) { + REG_RMW(ah, addr, (type << gpio_shift), + (0x1f << gpio_shift)); + } else { + tmp = REG_READ(ah, addr); + tmp = ((tmp & 0x1F0) << 1) | (tmp & ~0x1F0); + tmp &= ~(0x1f << gpio_shift); + tmp |= (type << gpio_shift); + REG_WRITE(ah, addr, tmp); + } +} + +void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio) +{ + u32 gpio_shift; + + BUG_ON(gpio >= ah->caps.num_gpio_pins); + + gpio_shift = gpio << 1; + + REG_RMW(ah, + AR_GPIO_OE_OUT, + (AR_GPIO_OE_OUT_DRV_NO << gpio_shift), + (AR_GPIO_OE_OUT_DRV << gpio_shift)); +} +EXPORT_SYMBOL(ath9k_hw_cfg_gpio_input); + +u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio) +{ +#define MS_REG_READ(x, y) \ + (MS(REG_READ(ah, AR_GPIO_IN_OUT), x##_GPIO_IN_VAL) & (AR_GPIO_BIT(y))) + + if (gpio >= ah->caps.num_gpio_pins) + return 0xffffffff; + + if (AR_SREV_9287_10_OR_LATER(ah)) + return MS_REG_READ(AR9287, gpio) != 0; + else if (AR_SREV_9285_10_OR_LATER(ah)) + return MS_REG_READ(AR9285, gpio) != 0; + else if (AR_SREV_9280_10_OR_LATER(ah)) + return MS_REG_READ(AR928X, gpio) != 0; + else + return MS_REG_READ(AR, gpio) != 0; +} +EXPORT_SYMBOL(ath9k_hw_gpio_get); + +void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio, + u32 ah_signal_type) +{ + u32 gpio_shift; + + ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type); + + gpio_shift = 2 * gpio; + + REG_RMW(ah, + AR_GPIO_OE_OUT, + (AR_GPIO_OE_OUT_DRV_ALL << gpio_shift), + (AR_GPIO_OE_OUT_DRV << gpio_shift)); +} +EXPORT_SYMBOL(ath9k_hw_cfg_output); + +void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val) +{ + REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio), + AR_GPIO_BIT(gpio)); +} +EXPORT_SYMBOL(ath9k_hw_set_gpio); + +u32 ath9k_hw_getdefantenna(struct ath_hw *ah) +{ + return REG_READ(ah, AR_DEF_ANTENNA) & 0x7; +} +EXPORT_SYMBOL(ath9k_hw_getdefantenna); + +void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna) +{ + REG_WRITE(ah, AR_DEF_ANTENNA, (antenna & 0x7)); +} +EXPORT_SYMBOL(ath9k_hw_setantenna); + +/*********************/ +/* General Operation */ +/*********************/ + +u32 ath9k_hw_getrxfilter(struct ath_hw *ah) +{ + u32 bits = REG_READ(ah, AR_RX_FILTER); + u32 phybits = REG_READ(ah, AR_PHY_ERR); + + if (phybits & AR_PHY_ERR_RADAR) + bits |= ATH9K_RX_FILTER_PHYRADAR; + if (phybits & (AR_PHY_ERR_OFDM_TIMING | AR_PHY_ERR_CCK_TIMING)) + bits |= ATH9K_RX_FILTER_PHYERR; + + return bits; +} +EXPORT_SYMBOL(ath9k_hw_getrxfilter); + +void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits) +{ + u32 phybits; + + REG_WRITE(ah, AR_RX_FILTER, bits); + + phybits = 0; + if (bits & ATH9K_RX_FILTER_PHYRADAR) + phybits |= AR_PHY_ERR_RADAR; + if (bits & ATH9K_RX_FILTER_PHYERR) + phybits |= AR_PHY_ERR_OFDM_TIMING | AR_PHY_ERR_CCK_TIMING; + REG_WRITE(ah, AR_PHY_ERR, phybits); + + if (phybits) + REG_WRITE(ah, AR_RXCFG, + REG_READ(ah, AR_RXCFG) | AR_RXCFG_ZLFDMA); + else + REG_WRITE(ah, AR_RXCFG, + REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_ZLFDMA); +} +EXPORT_SYMBOL(ath9k_hw_setrxfilter); + +bool ath9k_hw_phy_disable(struct ath_hw *ah) +{ + if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM)) + return false; + + ath9k_hw_init_pll(ah, NULL); + return true; +} +EXPORT_SYMBOL(ath9k_hw_phy_disable); + +bool ath9k_hw_disable(struct ath_hw *ah) +{ + if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) + return false; + + if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_COLD)) + return false; + + ath9k_hw_init_pll(ah, NULL); + return true; +} +EXPORT_SYMBOL(ath9k_hw_disable); + +void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit) +{ + struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); + struct ath9k_channel *chan = ah->curchan; + struct ieee80211_channel *channel = chan->chan; + + regulatory->power_limit = min(limit, (u32) MAX_RATE_POWER); + + ah->eep_ops->set_txpower(ah, chan, + ath9k_regd_get_ctl(regulatory, chan), + channel->max_antenna_gain * 2, + channel->max_power * 2, + min((u32) MAX_RATE_POWER, + (u32) regulatory->power_limit)); +} +EXPORT_SYMBOL(ath9k_hw_set_txpowerlimit); + +void ath9k_hw_setmac(struct ath_hw *ah, const u8 *mac) +{ + memcpy(ath9k_hw_common(ah)->macaddr, mac, ETH_ALEN); +} +EXPORT_SYMBOL(ath9k_hw_setmac); + +void ath9k_hw_setopmode(struct ath_hw *ah) +{ + ath9k_hw_set_operating_mode(ah, ah->opmode); +} +EXPORT_SYMBOL(ath9k_hw_setopmode); + +void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1) +{ + REG_WRITE(ah, AR_MCAST_FIL0, filter0); + REG_WRITE(ah, AR_MCAST_FIL1, filter1); +} +EXPORT_SYMBOL(ath9k_hw_setmcastfilter); + +void ath9k_hw_write_associd(struct ath_hw *ah) +{ + struct ath_common *common = ath9k_hw_common(ah); + + REG_WRITE(ah, AR_BSS_ID0, get_unaligned_le32(common->curbssid)); + REG_WRITE(ah, AR_BSS_ID1, get_unaligned_le16(common->curbssid + 4) | + ((common->curaid & 0x3fff) << AR_BSS_ID1_AID_S)); +} +EXPORT_SYMBOL(ath9k_hw_write_associd); + +u64 ath9k_hw_gettsf64(struct ath_hw *ah) +{ + u64 tsf; + + tsf = REG_READ(ah, AR_TSF_U32); + tsf = (tsf << 32) | REG_READ(ah, AR_TSF_L32); + + return tsf; +} +EXPORT_SYMBOL(ath9k_hw_gettsf64); + +void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64) +{ + REG_WRITE(ah, AR_TSF_L32, tsf64 & 0xffffffff); + REG_WRITE(ah, AR_TSF_U32, (tsf64 >> 32) & 0xffffffff); +} +EXPORT_SYMBOL(ath9k_hw_settsf64); + +void ath9k_hw_reset_tsf(struct ath_hw *ah) +{ + if (!ath9k_hw_wait(ah, AR_SLP32_MODE, AR_SLP32_TSF_WRITE_STATUS, 0, + AH_TSF_WRITE_TIMEOUT)) + ath_print(ath9k_hw_common(ah), ATH_DBG_RESET, + "AR_SLP32_TSF_WRITE_STATUS limit exceeded\n"); + + REG_WRITE(ah, AR_RESET_TSF, AR_RESET_TSF_ONCE); +} +EXPORT_SYMBOL(ath9k_hw_reset_tsf); + +void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting) +{ + if (setting) + ah->misc_mode |= AR_PCU_TX_ADD_TSF; + else + ah->misc_mode &= ~AR_PCU_TX_ADD_TSF; +} +EXPORT_SYMBOL(ath9k_hw_set_tsfadjust); + +/* + * Extend 15-bit time stamp from rx descriptor to + * a full 64-bit TSF using the current h/w TSF. +*/ +u64 ath9k_hw_extend_tsf(struct ath_hw *ah, u32 rstamp) +{ + u64 tsf; + + tsf = ath9k_hw_gettsf64(ah); + if ((tsf & 0x7fff) < rstamp) + tsf -= 0x8000; + return (tsf & ~0x7fff) | rstamp; +} +EXPORT_SYMBOL(ath9k_hw_extend_tsf); + +void ath9k_hw_set11nmac2040(struct ath_hw *ah) +{ + struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf; + u32 macmode; + + if (conf_is_ht40(conf) && !ah->config.cwm_ignore_extcca) + macmode = AR_2040_JOINED_RX_CLEAR; + else + macmode = 0; + + REG_WRITE(ah, AR_2040_MODE, macmode); +} + +/* HW Generic timers configuration */ + +static const struct ath_gen_timer_configuration gen_tmr_configuration[] = +{ + {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, + {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, + {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, + {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, + {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, + {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, + {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, + {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, + {AR_NEXT_NDP2_TIMER, AR_NDP2_PERIOD, AR_NDP2_TIMER_MODE, 0x0001}, + {AR_NEXT_NDP2_TIMER + 1*4, AR_NDP2_PERIOD + 1*4, + AR_NDP2_TIMER_MODE, 0x0002}, + {AR_NEXT_NDP2_TIMER + 2*4, AR_NDP2_PERIOD + 2*4, + AR_NDP2_TIMER_MODE, 0x0004}, + {AR_NEXT_NDP2_TIMER + 3*4, AR_NDP2_PERIOD + 3*4, + AR_NDP2_TIMER_MODE, 0x0008}, + {AR_NEXT_NDP2_TIMER + 4*4, AR_NDP2_PERIOD + 4*4, + AR_NDP2_TIMER_MODE, 0x0010}, + {AR_NEXT_NDP2_TIMER + 5*4, AR_NDP2_PERIOD + 5*4, + AR_NDP2_TIMER_MODE, 0x0020}, + {AR_NEXT_NDP2_TIMER + 6*4, AR_NDP2_PERIOD + 6*4, + AR_NDP2_TIMER_MODE, 0x0040}, + {AR_NEXT_NDP2_TIMER + 7*4, AR_NDP2_PERIOD + 7*4, + AR_NDP2_TIMER_MODE, 0x0080} +}; + +/* HW generic timer primitives */ + +/* compute and clear index of rightmost 1 */ +static u32 rightmost_index(struct ath_gen_timer_table *timer_table, u32 *mask) +{ + u32 b; + + b = *mask; + b &= (0-b); + *mask &= ~b; + b *= debruijn32; + b >>= 27; + + return timer_table->gen_timer_index[b]; +} + +u32 ath9k_hw_gettsf32(struct ath_hw *ah) +{ + return REG_READ(ah, AR_TSF_L32); +} +EXPORT_SYMBOL(ath9k_hw_gettsf32); + +struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah, + void (*trigger)(void *), + void (*overflow)(void *), + void *arg, + u8 timer_index) +{ + struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers; + struct ath_gen_timer *timer; + + timer = kzalloc(sizeof(struct ath_gen_timer), GFP_KERNEL); + + if (timer == NULL) { + ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, + "Failed to allocate memory" + "for hw timer[%d]\n", timer_index); + return NULL; + } + + /* allocate a hardware generic timer slot */ + timer_table->timers[timer_index] = timer; + timer->index = timer_index; + timer->trigger = trigger; + timer->overflow = overflow; + timer->arg = arg; + + return timer; +} +EXPORT_SYMBOL(ath_gen_timer_alloc); + +void ath9k_hw_gen_timer_start(struct ath_hw *ah, + struct ath_gen_timer *timer, + u32 timer_next, + u32 timer_period) +{ + struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers; + u32 tsf; + + BUG_ON(!timer_period); + + set_bit(timer->index, &timer_table->timer_mask.timer_bits); + + tsf = ath9k_hw_gettsf32(ah); + + ath_print(ath9k_hw_common(ah), ATH_DBG_HWTIMER, + "curent tsf %x period %x" + "timer_next %x\n", tsf, timer_period, timer_next); + + /* + * Pull timer_next forward if the current TSF already passed it + * because of software latency + */ + if (timer_next < tsf) + timer_next = tsf + timer_period; + + /* + * Program generic timer registers + */ + REG_WRITE(ah, gen_tmr_configuration[timer->index].next_addr, + timer_next); + REG_WRITE(ah, gen_tmr_configuration[timer->index].period_addr, + timer_period); + REG_SET_BIT(ah, gen_tmr_configuration[timer->index].mode_addr, + gen_tmr_configuration[timer->index].mode_mask); + + /* Enable both trigger and thresh interrupt masks */ + REG_SET_BIT(ah, AR_IMR_S5, + (SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) | + SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_TRIG))); +} +EXPORT_SYMBOL(ath9k_hw_gen_timer_start); + +void ath9k_hw_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer) +{ + struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers; + + if ((timer->index < AR_FIRST_NDP_TIMER) || + (timer->index >= ATH_MAX_GEN_TIMER)) { + return; + } + + /* Clear generic timer enable bits. */ + REG_CLR_BIT(ah, gen_tmr_configuration[timer->index].mode_addr, + gen_tmr_configuration[timer->index].mode_mask); + + /* Disable both trigger and thresh interrupt masks */ + REG_CLR_BIT(ah, AR_IMR_S5, + (SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) | + SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_TRIG))); + + clear_bit(timer->index, &timer_table->timer_mask.timer_bits); +} +EXPORT_SYMBOL(ath9k_hw_gen_timer_stop); + +void ath_gen_timer_free(struct ath_hw *ah, struct ath_gen_timer *timer) +{ + struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers; + + /* free the hardware generic timer slot */ + timer_table->timers[timer->index] = NULL; + kfree(timer); +} +EXPORT_SYMBOL(ath_gen_timer_free); + +/* + * Generic Timer Interrupts handling + */ +void ath_gen_timer_isr(struct ath_hw *ah) +{ + struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers; + struct ath_gen_timer *timer; + struct ath_common *common = ath9k_hw_common(ah); + u32 trigger_mask, thresh_mask, index; + + /* get hardware generic timer interrupt status */ + trigger_mask = ah->intr_gen_timer_trigger; + thresh_mask = ah->intr_gen_timer_thresh; + trigger_mask &= timer_table->timer_mask.val; + thresh_mask &= timer_table->timer_mask.val; + + trigger_mask &= ~thresh_mask; + + while (thresh_mask) { + index = rightmost_index(timer_table, &thresh_mask); + timer = timer_table->timers[index]; + BUG_ON(!timer); + ath_print(common, ATH_DBG_HWTIMER, + "TSF overflow for Gen timer %d\n", index); + timer->overflow(timer->arg); + } + + while (trigger_mask) { + index = rightmost_index(timer_table, &trigger_mask); + timer = timer_table->timers[index]; + BUG_ON(!timer); + ath_print(common, ATH_DBG_HWTIMER, + "Gen timer[%d] trigger\n", index); + timer->trigger(timer->arg); + } +} +EXPORT_SYMBOL(ath_gen_timer_isr); + +static struct { + u32 version; + const char * name; +} ath_mac_bb_names[] = { + /* Devices with external radios */ + { AR_SREV_VERSION_5416_PCI, "5416" }, + { AR_SREV_VERSION_5416_PCIE, "5418" }, + { AR_SREV_VERSION_9100, "9100" }, + { AR_SREV_VERSION_9160, "9160" }, + /* Single-chip solutions */ + { AR_SREV_VERSION_9280, "9280" }, + { AR_SREV_VERSION_9285, "9285" }, + { AR_SREV_VERSION_9287, "9287" }, + { AR_SREV_VERSION_9271, "9271" }, +}; + +/* For devices with external radios */ +static struct { + u16 version; + const char * name; +} ath_rf_names[] = { + { 0, "5133" }, + { AR_RAD5133_SREV_MAJOR, "5133" }, + { AR_RAD5122_SREV_MAJOR, "5122" }, + { AR_RAD2133_SREV_MAJOR, "2133" }, + { AR_RAD2122_SREV_MAJOR, "2122" } +}; + +/* + * Return the MAC/BB name. "????" is returned if the MAC/BB is unknown. + */ +static const char *ath9k_hw_mac_bb_name(u32 mac_bb_version) +{ + int i; + + for (i=0; i= AR9280 are single-chip */ + if (AR_SREV_9280_10_OR_LATER(ah)) { + used = snprintf(hw_name, len, + "Atheros AR%s Rev:%x", + ath9k_hw_mac_bb_name(ah->hw_version.macVersion), + ah->hw_version.macRev); + } + else { + used = snprintf(hw_name, len, + "Atheros AR%s MAC/BB Rev:%x AR%s RF Rev:%x", + ath9k_hw_mac_bb_name(ah->hw_version.macVersion), + ah->hw_version.macRev, + ath9k_hw_rf_name((ah->hw_version.analog5GhzRev & + AR_RADIO_SREV_MAJOR)), + ah->hw_version.phyRev); + } + + hw_name[used] = '\0'; +} +EXPORT_SYMBOL(ath9k_hw_name); diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h new file mode 100644 index 0000000..dbbf7ca --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -0,0 +1,708 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef HW_H +#define HW_H + +#include +#include +#include + +#include "mac.h" +#include "ani.h" +#include "eeprom.h" +#include "calib.h" +#include "reg.h" +#include "phy.h" +#include "btcoex.h" + +#include "../regd.h" +#include "../debug.h" + +#define ATHEROS_VENDOR_ID 0x168c + +#define AR5416_DEVID_PCI 0x0023 +#define AR5416_DEVID_PCIE 0x0024 +#define AR9160_DEVID_PCI 0x0027 +#define AR9280_DEVID_PCI 0x0029 +#define AR9280_DEVID_PCIE 0x002a +#define AR9285_DEVID_PCIE 0x002b +#define AR2427_DEVID_PCIE 0x002c + +#define AR5416_AR9100_DEVID 0x000b + +#define AR9271_USB 0x9271 + +#define AR_SUBVENDOR_ID_NOG 0x0e11 +#define AR_SUBVENDOR_ID_NEW_A 0x7065 +#define AR5416_MAGIC 0x19641014 + +#define AR5416_DEVID_AR9287_PCI 0x002D +#define AR5416_DEVID_AR9287_PCIE 0x002E + +#define AR9280_COEX2WIRE_SUBSYSID 0x309b +#define AT9285_COEX3WIRE_SA_SUBSYSID 0x30aa +#define AT9285_COEX3WIRE_DA_SUBSYSID 0x30ab + +#define ATH_AMPDU_LIMIT_MAX (64 * 1024 - 1) + +#define ATH_DEFAULT_NOISE_FLOOR -95 + +#define ATH9K_RSSI_BAD -128 + +/* Register read/write primitives */ +#define REG_WRITE(_ah, _reg, _val) \ + ath9k_hw_common(_ah)->ops->write((_ah), (_val), (_reg)) + +#define REG_READ(_ah, _reg) \ + ath9k_hw_common(_ah)->ops->read((_ah), (_reg)) + +#define SM(_v, _f) (((_v) << _f##_S) & _f) +#define MS(_v, _f) (((_v) & _f) >> _f##_S) +#define REG_RMW(_a, _r, _set, _clr) \ + REG_WRITE(_a, _r, (REG_READ(_a, _r) & ~(_clr)) | (_set)) +#define REG_RMW_FIELD(_a, _r, _f, _v) \ + REG_WRITE(_a, _r, \ + (REG_READ(_a, _r) & ~_f) | (((_v) << _f##_S) & _f)) +#define REG_SET_BIT(_a, _r, _f) \ + REG_WRITE(_a, _r, REG_READ(_a, _r) | _f) +#define REG_CLR_BIT(_a, _r, _f) \ + REG_WRITE(_a, _r, REG_READ(_a, _r) & ~_f) + +#define DO_DELAY(x) do { \ + if ((++(x) % 64) == 0) \ + udelay(1); \ + } while (0) + +#define REG_WRITE_ARRAY(iniarray, column, regWr) do { \ + int r; \ + for (r = 0; r < ((iniarray)->ia_rows); r++) { \ + REG_WRITE(ah, INI_RA((iniarray), (r), 0), \ + INI_RA((iniarray), r, (column))); \ + DO_DELAY(regWr); \ + } \ + } while (0) + +#define AR_GPIO_OUTPUT_MUX_AS_OUTPUT 0 +#define AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED 1 +#define AR_GPIO_OUTPUT_MUX_AS_PCIE_POWER_LED 2 +#define AR_GPIO_OUTPUT_MUX_AS_TX_FRAME 3 +#define AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL 4 +#define AR_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED 5 +#define AR_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED 6 + +#define AR_GPIOD_MASK 0x00001FFF +#define AR_GPIO_BIT(_gpio) (1 << (_gpio)) + +#define BASE_ACTIVATE_DELAY 100 +#define RTC_PLL_SETTLE_DELAY 100 +#define COEF_SCALE_S 24 +#define HT40_CHANNEL_CENTER_SHIFT 10 + +#define ATH9K_ANTENNA0_CHAINMASK 0x1 +#define ATH9K_ANTENNA1_CHAINMASK 0x2 + +#define ATH9K_NUM_DMA_DEBUG_REGS 8 +#define ATH9K_NUM_QUEUES 10 + +#define MAX_RATE_POWER 63 +#define AH_WAIT_TIMEOUT 100000 /* (us) */ +#define AH_TSF_WRITE_TIMEOUT 100 /* (us) */ +#define AH_TIME_QUANTUM 10 +#define AR_KEYTABLE_SIZE 128 +#define POWER_UP_TIME 10000 +#define SPUR_RSSI_THRESH 40 + +#define CAB_TIMEOUT_VAL 10 +#define BEACON_TIMEOUT_VAL 10 +#define MIN_BEACON_TIMEOUT_VAL 1 +#define SLEEP_SLOP 3 + +#define INIT_CONFIG_STATUS 0x00000000 +#define INIT_RSSI_THR 0x00000700 +#define INIT_BCON_CNTRL_REG 0x00000000 + +#define TU_TO_USEC(_tu) ((_tu) << 10) + +enum wireless_mode { + ATH9K_MODE_11A = 0, + ATH9K_MODE_11G, + ATH9K_MODE_11NA_HT20, + ATH9K_MODE_11NG_HT20, + ATH9K_MODE_11NA_HT40PLUS, + ATH9K_MODE_11NA_HT40MINUS, + ATH9K_MODE_11NG_HT40PLUS, + ATH9K_MODE_11NG_HT40MINUS, + ATH9K_MODE_MAX, +}; + +enum ath9k_hw_caps { + ATH9K_HW_CAP_MIC_AESCCM = BIT(0), + ATH9K_HW_CAP_MIC_CKIP = BIT(1), + ATH9K_HW_CAP_MIC_TKIP = BIT(2), + ATH9K_HW_CAP_CIPHER_AESCCM = BIT(3), + ATH9K_HW_CAP_CIPHER_CKIP = BIT(4), + ATH9K_HW_CAP_CIPHER_TKIP = BIT(5), + ATH9K_HW_CAP_VEOL = BIT(6), + ATH9K_HW_CAP_BSSIDMASK = BIT(7), + ATH9K_HW_CAP_MCAST_KEYSEARCH = BIT(8), + ATH9K_HW_CAP_HT = BIT(9), + ATH9K_HW_CAP_GTT = BIT(10), + ATH9K_HW_CAP_FASTCC = BIT(11), + ATH9K_HW_CAP_RFSILENT = BIT(12), + ATH9K_HW_CAP_CST = BIT(13), + ATH9K_HW_CAP_ENHANCEDPM = BIT(14), + ATH9K_HW_CAP_AUTOSLEEP = BIT(15), + ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(16), +}; + +enum ath9k_capability_type { + ATH9K_CAP_CIPHER = 0, + ATH9K_CAP_TKIP_MIC, + ATH9K_CAP_TKIP_SPLIT, + ATH9K_CAP_DIVERSITY, + ATH9K_CAP_TXPOW, + ATH9K_CAP_MCAST_KEYSRCH, + ATH9K_CAP_DS +}; + +struct ath9k_hw_capabilities { + u32 hw_caps; /* ATH9K_HW_CAP_* from ath9k_hw_caps */ + DECLARE_BITMAP(wireless_modes, ATH9K_MODE_MAX); /* ATH9K_MODE_* */ + u16 total_queues; + u16 keycache_size; + u16 low_5ghz_chan, high_5ghz_chan; + u16 low_2ghz_chan, high_2ghz_chan; + u16 rts_aggr_limit; + u8 tx_chainmask; + u8 rx_chainmask; + u16 tx_triglevel_max; + u16 reg_cap; + u8 num_gpio_pins; + u8 num_antcfg_2ghz; + u8 num_antcfg_5ghz; +}; + +struct ath9k_ops_config { + int dma_beacon_response_time; + int sw_beacon_response_time; + int additional_swba_backoff; + int ack_6mb; + int cwm_ignore_extcca; + u8 pcie_powersave_enable; + u8 pcie_clock_req; + u32 pcie_waen; + u8 analog_shiftreg; + u8 ht_enable; + u32 ofdm_trig_low; + u32 ofdm_trig_high; + u32 cck_trig_high; + u32 cck_trig_low; + u32 enable_ani; + int serialize_regmode; + bool rx_intr_mitigation; +#define SPUR_DISABLE 0 +#define SPUR_ENABLE_IOCTL 1 +#define SPUR_ENABLE_EEPROM 2 +#define AR_EEPROM_MODAL_SPURS 5 +#define AR_SPUR_5413_1 1640 +#define AR_SPUR_5413_2 1200 +#define AR_NO_SPUR 0x8000 +#define AR_BASE_FREQ_2GHZ 2300 +#define AR_BASE_FREQ_5GHZ 4900 +#define AR_SPUR_FEEQ_BOUND_HT40 19 +#define AR_SPUR_FEEQ_BOUND_HT20 10 + int spurmode; + u16 spurchans[AR_EEPROM_MODAL_SPURS][2]; + u8 max_txtrig_level; +}; + +enum ath9k_int { + ATH9K_INT_RX = 0x00000001, + ATH9K_INT_RXDESC = 0x00000002, + ATH9K_INT_RXNOFRM = 0x00000008, + ATH9K_INT_RXEOL = 0x00000010, + ATH9K_INT_RXORN = 0x00000020, + ATH9K_INT_TX = 0x00000040, + ATH9K_INT_TXDESC = 0x00000080, + ATH9K_INT_TIM_TIMER = 0x00000100, + ATH9K_INT_TXURN = 0x00000800, + ATH9K_INT_MIB = 0x00001000, + ATH9K_INT_RXPHY = 0x00004000, + ATH9K_INT_RXKCM = 0x00008000, + ATH9K_INT_SWBA = 0x00010000, + ATH9K_INT_BMISS = 0x00040000, + ATH9K_INT_BNR = 0x00100000, + ATH9K_INT_TIM = 0x00200000, + ATH9K_INT_DTIM = 0x00400000, + ATH9K_INT_DTIMSYNC = 0x00800000, + ATH9K_INT_GPIO = 0x01000000, + ATH9K_INT_CABEND = 0x02000000, + ATH9K_INT_TSFOOR = 0x04000000, + ATH9K_INT_GENTIMER = 0x08000000, + ATH9K_INT_CST = 0x10000000, + ATH9K_INT_GTT = 0x20000000, + ATH9K_INT_FATAL = 0x40000000, + ATH9K_INT_GLOBAL = 0x80000000, + ATH9K_INT_BMISC = ATH9K_INT_TIM | + ATH9K_INT_DTIM | + ATH9K_INT_DTIMSYNC | + ATH9K_INT_TSFOOR | + ATH9K_INT_CABEND, + ATH9K_INT_COMMON = ATH9K_INT_RXNOFRM | + ATH9K_INT_RXDESC | + ATH9K_INT_RXEOL | + ATH9K_INT_RXORN | + ATH9K_INT_TXURN | + ATH9K_INT_TXDESC | + ATH9K_INT_MIB | + ATH9K_INT_RXPHY | + ATH9K_INT_RXKCM | + ATH9K_INT_SWBA | + ATH9K_INT_BMISS | + ATH9K_INT_GPIO, + ATH9K_INT_NOCARD = 0xffffffff +}; + +#define CHANNEL_CW_INT 0x00002 +#define CHANNEL_CCK 0x00020 +#define CHANNEL_OFDM 0x00040 +#define CHANNEL_2GHZ 0x00080 +#define CHANNEL_5GHZ 0x00100 +#define CHANNEL_PASSIVE 0x00200 +#define CHANNEL_DYN 0x00400 +#define CHANNEL_HALF 0x04000 +#define CHANNEL_QUARTER 0x08000 +#define CHANNEL_HT20 0x10000 +#define CHANNEL_HT40PLUS 0x20000 +#define CHANNEL_HT40MINUS 0x40000 + +#define CHANNEL_A (CHANNEL_5GHZ|CHANNEL_OFDM) +#define CHANNEL_B (CHANNEL_2GHZ|CHANNEL_CCK) +#define CHANNEL_G (CHANNEL_2GHZ|CHANNEL_OFDM) +#define CHANNEL_G_HT20 (CHANNEL_2GHZ|CHANNEL_HT20) +#define CHANNEL_A_HT20 (CHANNEL_5GHZ|CHANNEL_HT20) +#define CHANNEL_G_HT40PLUS (CHANNEL_2GHZ|CHANNEL_HT40PLUS) +#define CHANNEL_G_HT40MINUS (CHANNEL_2GHZ|CHANNEL_HT40MINUS) +#define CHANNEL_A_HT40PLUS (CHANNEL_5GHZ|CHANNEL_HT40PLUS) +#define CHANNEL_A_HT40MINUS (CHANNEL_5GHZ|CHANNEL_HT40MINUS) +#define CHANNEL_ALL \ + (CHANNEL_OFDM| \ + CHANNEL_CCK| \ + CHANNEL_2GHZ | \ + CHANNEL_5GHZ | \ + CHANNEL_HT20 | \ + CHANNEL_HT40PLUS | \ + CHANNEL_HT40MINUS) + +struct ath9k_channel { + struct ieee80211_channel *chan; + u16 channel; + u32 channelFlags; + u32 chanmode; + int32_t CalValid; + bool oneTimeCalsDone; + int8_t iCoff; + int8_t qCoff; + int16_t rawNoiseFloor; +}; + +#define IS_CHAN_G(_c) ((((_c)->channelFlags & (CHANNEL_G)) == CHANNEL_G) || \ + (((_c)->channelFlags & CHANNEL_G_HT20) == CHANNEL_G_HT20) || \ + (((_c)->channelFlags & CHANNEL_G_HT40PLUS) == CHANNEL_G_HT40PLUS) || \ + (((_c)->channelFlags & CHANNEL_G_HT40MINUS) == CHANNEL_G_HT40MINUS)) +#define IS_CHAN_OFDM(_c) (((_c)->channelFlags & CHANNEL_OFDM) != 0) +#define IS_CHAN_5GHZ(_c) (((_c)->channelFlags & CHANNEL_5GHZ) != 0) +#define IS_CHAN_2GHZ(_c) (((_c)->channelFlags & CHANNEL_2GHZ) != 0) +#define IS_CHAN_HALF_RATE(_c) (((_c)->channelFlags & CHANNEL_HALF) != 0) +#define IS_CHAN_QUARTER_RATE(_c) (((_c)->channelFlags & CHANNEL_QUARTER) != 0) +#define IS_CHAN_A_5MHZ_SPACED(_c) \ + ((((_c)->channelFlags & CHANNEL_5GHZ) != 0) && \ + (((_c)->channel % 20) != 0) && \ + (((_c)->channel % 10) != 0)) + +/* These macros check chanmode and not channelFlags */ +#define IS_CHAN_B(_c) ((_c)->chanmode == CHANNEL_B) +#define IS_CHAN_HT20(_c) (((_c)->chanmode == CHANNEL_A_HT20) || \ + ((_c)->chanmode == CHANNEL_G_HT20)) +#define IS_CHAN_HT40(_c) (((_c)->chanmode == CHANNEL_A_HT40PLUS) || \ + ((_c)->chanmode == CHANNEL_A_HT40MINUS) || \ + ((_c)->chanmode == CHANNEL_G_HT40PLUS) || \ + ((_c)->chanmode == CHANNEL_G_HT40MINUS)) +#define IS_CHAN_HT(_c) (IS_CHAN_HT20((_c)) || IS_CHAN_HT40((_c))) + +enum ath9k_power_mode { + ATH9K_PM_AWAKE = 0, + ATH9K_PM_FULL_SLEEP, + ATH9K_PM_NETWORK_SLEEP, + ATH9K_PM_UNDEFINED +}; + +enum ath9k_tp_scale { + ATH9K_TP_SCALE_MAX = 0, + ATH9K_TP_SCALE_50, + ATH9K_TP_SCALE_25, + ATH9K_TP_SCALE_12, + ATH9K_TP_SCALE_MIN +}; + +enum ser_reg_mode { + SER_REG_MODE_OFF = 0, + SER_REG_MODE_ON = 1, + SER_REG_MODE_AUTO = 2, +}; + +struct ath9k_beacon_state { + u32 bs_nexttbtt; + u32 bs_nextdtim; + u32 bs_intval; +#define ATH9K_BEACON_PERIOD 0x0000ffff +#define ATH9K_BEACON_ENA 0x00800000 +#define ATH9K_BEACON_RESET_TSF 0x01000000 +#define ATH9K_TSFOOR_THRESHOLD 0x00004240 /* 16k us */ + u32 bs_dtimperiod; + u16 bs_cfpperiod; + u16 bs_cfpmaxduration; + u32 bs_cfpnext; + u16 bs_timoffset; + u16 bs_bmissthreshold; + u32 bs_sleepduration; + u32 bs_tsfoor_threshold; +}; + +struct chan_centers { + u16 synth_center; + u16 ctl_center; + u16 ext_center; +}; + +enum { + ATH9K_RESET_POWER_ON, + ATH9K_RESET_WARM, + ATH9K_RESET_COLD, +}; + +struct ath9k_hw_version { + u32 magic; + u16 devid; + u16 subvendorid; + u32 macVersion; + u16 macRev; + u16 phyRev; + u16 analog5GhzRev; + u16 analog2GhzRev; + u16 subsysid; +}; + +/* Generic TSF timer definitions */ + +#define ATH_MAX_GEN_TIMER 16 + +#define AR_GENTMR_BIT(_index) (1 << (_index)) + +/* + * Using de Bruijin sequence to to look up 1's index in a 32 bit number + * debruijn32 = 0000 0111 0111 1100 1011 0101 0011 0001 + */ +#define debruijn32 0x077CB531U + +struct ath_gen_timer_configuration { + u32 next_addr; + u32 period_addr; + u32 mode_addr; + u32 mode_mask; +}; + +struct ath_gen_timer { + void (*trigger)(void *arg); + void (*overflow)(void *arg); + void *arg; + u8 index; +}; + +struct ath_gen_timer_table { + u32 gen_timer_index[32]; + struct ath_gen_timer *timers[ATH_MAX_GEN_TIMER]; + union { + unsigned long timer_bits; + u16 val; + } timer_mask; +}; + +struct ath_hw { + struct ieee80211_hw *hw; + struct ath_common common; + struct ath9k_hw_version hw_version; + struct ath9k_ops_config config; + struct ath9k_hw_capabilities caps; + struct ath9k_channel channels[38]; + struct ath9k_channel *curchan; + + union { + struct ar5416_eeprom_def def; + struct ar5416_eeprom_4k map4k; + struct ar9287_eeprom map9287; + } eeprom; + const struct eeprom_ops *eep_ops; + enum ath9k_eep_map eep_map; + + bool sw_mgmt_crypto; + bool is_pciexpress; + u16 tx_trig_level; + u16 rfsilent; + u32 rfkill_gpio; + u32 rfkill_polarity; + u32 ah_flags; + + bool htc_reset_init; + + enum nl80211_iftype opmode; + enum ath9k_power_mode power_mode; + + struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS]; + struct ath9k_pacal_info pacal_info; + struct ar5416Stats stats; + struct ath9k_tx_queue_info txq[ATH9K_NUM_TX_QUEUES]; + + int16_t curchan_rad_index; + u32 mask_reg; + u32 txok_interrupt_mask; + u32 txerr_interrupt_mask; + u32 txdesc_interrupt_mask; + u32 txeol_interrupt_mask; + u32 txurn_interrupt_mask; + bool chip_fullsleep; + u32 atim_window; + + /* Calibration */ + enum ath9k_cal_types supp_cals; + struct ath9k_cal_list iq_caldata; + struct ath9k_cal_list adcgain_caldata; + struct ath9k_cal_list adcdc_calinitdata; + struct ath9k_cal_list adcdc_caldata; + struct ath9k_cal_list *cal_list; + struct ath9k_cal_list *cal_list_last; + struct ath9k_cal_list *cal_list_curr; +#define totalPowerMeasI meas0.unsign +#define totalPowerMeasQ meas1.unsign +#define totalIqCorrMeas meas2.sign +#define totalAdcIOddPhase meas0.unsign +#define totalAdcIEvenPhase meas1.unsign +#define totalAdcQOddPhase meas2.unsign +#define totalAdcQEvenPhase meas3.unsign +#define totalAdcDcOffsetIOddPhase meas0.sign +#define totalAdcDcOffsetIEvenPhase meas1.sign +#define totalAdcDcOffsetQOddPhase meas2.sign +#define totalAdcDcOffsetQEvenPhase meas3.sign + union { + u32 unsign[AR5416_MAX_CHAINS]; + int32_t sign[AR5416_MAX_CHAINS]; + } meas0; + union { + u32 unsign[AR5416_MAX_CHAINS]; + int32_t sign[AR5416_MAX_CHAINS]; + } meas1; + union { + u32 unsign[AR5416_MAX_CHAINS]; + int32_t sign[AR5416_MAX_CHAINS]; + } meas2; + union { + u32 unsign[AR5416_MAX_CHAINS]; + int32_t sign[AR5416_MAX_CHAINS]; + } meas3; + u16 cal_samples; + + u32 sta_id1_defaults; + u32 misc_mode; + enum { + AUTO_32KHZ, + USE_32KHZ, + DONT_USE_32KHZ, + } enable_32kHz_clock; + + /* Callback for radio frequency change */ + int (*ath9k_hw_rf_set_freq)(struct ath_hw *ah, struct ath9k_channel *chan); + + /* Callback for baseband spur frequency */ + void (*ath9k_hw_spur_mitigate_freq)(struct ath_hw *ah, + struct ath9k_channel *chan); + + /* Used to program the radio on non single-chip devices */ + u32 *analogBank0Data; + u32 *analogBank1Data; + u32 *analogBank2Data; + u32 *analogBank3Data; + u32 *analogBank6Data; + u32 *analogBank6TPCData; + u32 *analogBank7Data; + u32 *addac5416_21; + u32 *bank6Temp; + + int16_t txpower_indexoffset; + int coverage_class; + u32 beacon_interval; + u32 slottime; + u32 globaltxtimeout; + + /* ANI */ + u32 proc_phyerr; + u32 aniperiod; + struct ar5416AniState *curani; + struct ar5416AniState ani[255]; + int totalSizeDesired[5]; + int coarse_high[5]; + int coarse_low[5]; + int firpwr[5]; + enum ath9k_ani_cmd ani_function; + + /* Bluetooth coexistance */ + struct ath_btcoex_hw btcoex_hw; + + u32 intr_txqs; + u8 txchainmask; + u8 rxchainmask; + + u32 originalGain[22]; + int initPDADC; + int PDADCdelta; + u8 led_pin; + + struct ar5416IniArray iniModes; + struct ar5416IniArray iniCommon; + struct ar5416IniArray iniBank0; + struct ar5416IniArray iniBB_RfGain; + struct ar5416IniArray iniBank1; + struct ar5416IniArray iniBank2; + struct ar5416IniArray iniBank3; + struct ar5416IniArray iniBank6; + struct ar5416IniArray iniBank6TPC; + struct ar5416IniArray iniBank7; + struct ar5416IniArray iniAddac; + struct ar5416IniArray iniPcieSerdes; + struct ar5416IniArray iniModesAdditional; + struct ar5416IniArray iniModesRxGain; + struct ar5416IniArray iniModesTxGain; + struct ar5416IniArray iniModes_9271_1_0_only; + struct ar5416IniArray iniCckfirNormal; + struct ar5416IniArray iniCckfirJapan2484; + + u32 intr_gen_timer_trigger; + u32 intr_gen_timer_thresh; + struct ath_gen_timer_table hw_gen_timers; +}; + +static inline struct ath_common *ath9k_hw_common(struct ath_hw *ah) +{ + return &ah->common; +} + +static inline struct ath_regulatory *ath9k_hw_regulatory(struct ath_hw *ah) +{ + return &(ath9k_hw_common(ah)->regulatory); +} + +/* Initialization, Detach, Reset */ +const char *ath9k_hw_probe(u16 vendorid, u16 devid); +void ath9k_hw_deinit(struct ath_hw *ah); +int ath9k_hw_init(struct ath_hw *ah); +int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, + bool bChannelChange); +int ath9k_hw_fill_cap_info(struct ath_hw *ah); +bool ath9k_hw_getcapability(struct ath_hw *ah, enum ath9k_capability_type type, + u32 capability, u32 *result); +bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type, + u32 capability, u32 setting, int *status); + +/* Key Cache Management */ +bool ath9k_hw_keyreset(struct ath_hw *ah, u16 entry); +bool ath9k_hw_keysetmac(struct ath_hw *ah, u16 entry, const u8 *mac); +bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry, + const struct ath9k_keyval *k, + const u8 *mac); +bool ath9k_hw_keyisvalid(struct ath_hw *ah, u16 entry); + +/* GPIO / RFKILL / Antennae */ +void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio); +u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio); +void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio, + u32 ah_signal_type); +void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val); +u32 ath9k_hw_getdefantenna(struct ath_hw *ah); +void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna); + +/* General Operation */ +bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout); +u32 ath9k_hw_reverse_bits(u32 val, u32 n); +bool ath9k_get_channel_edges(struct ath_hw *ah, u16 flags, u16 *low, u16 *high); +u16 ath9k_hw_computetxtime(struct ath_hw *ah, + u8 phy, int kbps, + u32 frameLen, u16 rateix, bool shortPreamble); +void ath9k_hw_get_channel_centers(struct ath_hw *ah, + struct ath9k_channel *chan, + struct chan_centers *centers); +u32 ath9k_hw_getrxfilter(struct ath_hw *ah); +void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits); +bool ath9k_hw_phy_disable(struct ath_hw *ah); +bool ath9k_hw_disable(struct ath_hw *ah); +void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit); +void ath9k_hw_setmac(struct ath_hw *ah, const u8 *mac); +void ath9k_hw_setopmode(struct ath_hw *ah); +void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1); +void ath9k_hw_setbssidmask(struct ath_hw *ah); +void ath9k_hw_write_associd(struct ath_hw *ah); +u64 ath9k_hw_gettsf64(struct ath_hw *ah); +void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64); +void ath9k_hw_reset_tsf(struct ath_hw *ah); +void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting); +u64 ath9k_hw_extend_tsf(struct ath_hw *ah, u32 rstamp); +void ath9k_hw_init_global_settings(struct ath_hw *ah); +void ath9k_hw_set11nmac2040(struct ath_hw *ah); +void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period); +void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah, + const struct ath9k_beacon_state *bs); + +bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode); + +void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore, int power_off); + +/* Interrupt Handling */ +bool ath9k_hw_intrpend(struct ath_hw *ah); +bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked); +enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints); + +/* Generic hw timer primitives */ +struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah, + void (*trigger)(void *), + void (*overflow)(void *), + void *arg, + u8 timer_index); +void ath9k_hw_gen_timer_start(struct ath_hw *ah, + struct ath_gen_timer *timer, + u32 timer_next, + u32 timer_period); +void ath9k_hw_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer); + +void ath_gen_timer_free(struct ath_hw *ah, struct ath_gen_timer *timer); +void ath_gen_timer_isr(struct ath_hw *hw); +u32 ath9k_hw_gettsf32(struct ath_hw *ah); + +void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len); + +#define ATH_PCIE_CAP_LINK_CTRL 0x70 +#define ATH_PCIE_CAP_LINK_L0S 1 +#define ATH_PCIE_CAP_LINK_L1 2 + +#endif diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c new file mode 100644 index 0000000..623c2f8 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -0,0 +1,863 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "ath9k.h" + +static char *dev_info = "ath9k"; + +MODULE_AUTHOR("Atheros Communications"); +MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards."); +MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards"); +MODULE_LICENSE("Dual BSD/GPL"); + +static unsigned int ath9k_debug = ATH_DBG_DEFAULT; +module_param_named(debug, ath9k_debug, uint, 0); +MODULE_PARM_DESC(debug, "Debugging mask"); + +int modparam_nohwcrypt; +module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444); +MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption"); + +/* We use the hw_value as an index into our private channel structure */ + +#define CHAN2G(_freq, _idx) { \ + .center_freq = (_freq), \ + .hw_value = (_idx), \ + .max_power = 20, \ +} + +#define CHAN5G(_freq, _idx) { \ + .band = IEEE80211_BAND_5GHZ, \ + .center_freq = (_freq), \ + .hw_value = (_idx), \ + .max_power = 20, \ +} + +/* Some 2 GHz radios are actually tunable on 2312-2732 + * on 5 MHz steps, we support the channels which we know + * we have calibration data for all cards though to make + * this static */ +static struct ieee80211_channel ath9k_2ghz_chantable[] = { + CHAN2G(2412, 0), /* Channel 1 */ + CHAN2G(2417, 1), /* Channel 2 */ + CHAN2G(2422, 2), /* Channel 3 */ + CHAN2G(2427, 3), /* Channel 4 */ + CHAN2G(2432, 4), /* Channel 5 */ + CHAN2G(2437, 5), /* Channel 6 */ + CHAN2G(2442, 6), /* Channel 7 */ + CHAN2G(2447, 7), /* Channel 8 */ + CHAN2G(2452, 8), /* Channel 9 */ + CHAN2G(2457, 9), /* Channel 10 */ + CHAN2G(2462, 10), /* Channel 11 */ + CHAN2G(2467, 11), /* Channel 12 */ + CHAN2G(2472, 12), /* Channel 13 */ + CHAN2G(2484, 13), /* Channel 14 */ +}; + +/* Some 5 GHz radios are actually tunable on XXXX-YYYY + * on 5 MHz steps, we support the channels which we know + * we have calibration data for all cards though to make + * this static */ +static struct ieee80211_channel ath9k_5ghz_chantable[] = { + /* _We_ call this UNII 1 */ + CHAN5G(5180, 14), /* Channel 36 */ + CHAN5G(5200, 15), /* Channel 40 */ + CHAN5G(5220, 16), /* Channel 44 */ + CHAN5G(5240, 17), /* Channel 48 */ + /* _We_ call this UNII 2 */ + CHAN5G(5260, 18), /* Channel 52 */ + CHAN5G(5280, 19), /* Channel 56 */ + CHAN5G(5300, 20), /* Channel 60 */ + CHAN5G(5320, 21), /* Channel 64 */ + /* _We_ call this "Middle band" */ + CHAN5G(5500, 22), /* Channel 100 */ + CHAN5G(5520, 23), /* Channel 104 */ + CHAN5G(5540, 24), /* Channel 108 */ + CHAN5G(5560, 25), /* Channel 112 */ + CHAN5G(5580, 26), /* Channel 116 */ + CHAN5G(5600, 27), /* Channel 120 */ + CHAN5G(5620, 28), /* Channel 124 */ + CHAN5G(5640, 29), /* Channel 128 */ + CHAN5G(5660, 30), /* Channel 132 */ + CHAN5G(5680, 31), /* Channel 136 */ + CHAN5G(5700, 32), /* Channel 140 */ + /* _We_ call this UNII 3 */ + CHAN5G(5745, 33), /* Channel 149 */ + CHAN5G(5765, 34), /* Channel 153 */ + CHAN5G(5785, 35), /* Channel 157 */ + CHAN5G(5805, 36), /* Channel 161 */ + CHAN5G(5825, 37), /* Channel 165 */ +}; + +/* Atheros hardware rate code addition for short premble */ +#define SHPCHECK(__hw_rate, __flags) \ + ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0) + +#define RATE(_bitrate, _hw_rate, _flags) { \ + .bitrate = (_bitrate), \ + .flags = (_flags), \ + .hw_value = (_hw_rate), \ + .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \ +} + +static struct ieee80211_rate ath9k_legacy_rates[] = { + RATE(10, 0x1b, 0), + RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE), + RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE), + RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE), + RATE(60, 0x0b, 0), + RATE(90, 0x0f, 0), + RATE(120, 0x0a, 0), + RATE(180, 0x0e, 0), + RATE(240, 0x09, 0), + RATE(360, 0x0d, 0), + RATE(480, 0x08, 0), + RATE(540, 0x0c, 0), +}; + +static void ath9k_deinit_softc(struct ath_softc *sc); + +/* + * Read and write, they both share the same lock. We do this to serialize + * reads and writes on Atheros 802.11n PCI devices only. This is required + * as the FIFO on these devices can only accept sanely 2 requests. + */ + +static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset) +{ + struct ath_hw *ah = (struct ath_hw *) hw_priv; + struct ath_common *common = ath9k_hw_common(ah); + struct ath_softc *sc = (struct ath_softc *) common->priv; + + if (ah->config.serialize_regmode == SER_REG_MODE_ON) { + unsigned long flags; + spin_lock_irqsave(&sc->sc_serial_rw, flags); + iowrite32(val, sc->mem + reg_offset); + spin_unlock_irqrestore(&sc->sc_serial_rw, flags); + } else + iowrite32(val, sc->mem + reg_offset); +} + +static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset) +{ + struct ath_hw *ah = (struct ath_hw *) hw_priv; + struct ath_common *common = ath9k_hw_common(ah); + struct ath_softc *sc = (struct ath_softc *) common->priv; + u32 val; + + if (ah->config.serialize_regmode == SER_REG_MODE_ON) { + unsigned long flags; + spin_lock_irqsave(&sc->sc_serial_rw, flags); + val = ioread32(sc->mem + reg_offset); + spin_unlock_irqrestore(&sc->sc_serial_rw, flags); + } else + val = ioread32(sc->mem + reg_offset); + return val; +} + +static const struct ath_ops ath9k_common_ops = { + .read = ath9k_ioread32, + .write = ath9k_iowrite32, +}; + +/**************************/ +/* Initialization */ +/**************************/ + +static void setup_ht_cap(struct ath_softc *sc, + struct ieee80211_sta_ht_cap *ht_info) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + u8 tx_streams, rx_streams; + + ht_info->ht_supported = true; + ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 | + IEEE80211_HT_CAP_SM_PS | + IEEE80211_HT_CAP_SGI_40 | + IEEE80211_HT_CAP_DSSSCCK40; + + ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; + ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8; + + /* set up supported mcs set */ + memset(&ht_info->mcs, 0, sizeof(ht_info->mcs)); + tx_streams = !(common->tx_chainmask & (common->tx_chainmask - 1)) ? + 1 : 2; + rx_streams = !(common->rx_chainmask & (common->rx_chainmask - 1)) ? + 1 : 2; + + if (tx_streams != rx_streams) { + ath_print(common, ATH_DBG_CONFIG, + "TX streams %d, RX streams: %d\n", + tx_streams, rx_streams); + ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; + ht_info->mcs.tx_params |= ((tx_streams - 1) << + IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); + } + + ht_info->mcs.rx_mask[0] = 0xff; + if (rx_streams >= 2) + ht_info->mcs.rx_mask[1] = 0xff; + + ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED; +} + +static int ath9k_reg_notifier(struct wiphy *wiphy, + struct regulatory_request *request) +{ + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct ath_wiphy *aphy = hw->priv; + struct ath_softc *sc = aphy->sc; + struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah); + + return ath_reg_notifier_apply(wiphy, request, reg); +} + +/* + * This function will allocate both the DMA descriptor structure, and the + * buffers it contains. These are used to contain the descriptors used + * by the system. +*/ +int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd, + struct list_head *head, const char *name, + int nbuf, int ndesc) +{ +#define DS2PHYS(_dd, _ds) \ + ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) +#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0) +#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096) + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ath_desc *ds; + struct ath_buf *bf; + int i, bsize, error; + + ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n", + name, nbuf, ndesc); + + INIT_LIST_HEAD(head); + /* ath_desc must be a multiple of DWORDs */ + if ((sizeof(struct ath_desc) % 4) != 0) { + ath_print(common, ATH_DBG_FATAL, + "ath_desc not DWORD aligned\n"); + BUG_ON((sizeof(struct ath_desc) % 4) != 0); + error = -ENOMEM; + goto fail; + } + + dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc; + + /* + * Need additional DMA memory because we can't use + * descriptors that cross the 4K page boundary. Assume + * one skipped descriptor per 4K page. + */ + if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) { + u32 ndesc_skipped = + ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len); + u32 dma_len; + + while (ndesc_skipped) { + dma_len = ndesc_skipped * sizeof(struct ath_desc); + dd->dd_desc_len += dma_len; + + ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len); + }; + } + + /* allocate descriptors */ + dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len, + &dd->dd_desc_paddr, GFP_KERNEL); + if (dd->dd_desc == NULL) { + error = -ENOMEM; + goto fail; + } + ds = dd->dd_desc; + ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n", + name, ds, (u32) dd->dd_desc_len, + ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len); + + /* allocate buffers */ + bsize = sizeof(struct ath_buf) * nbuf; + bf = kzalloc(bsize, GFP_KERNEL); + if (bf == NULL) { + error = -ENOMEM; + goto fail2; + } + dd->dd_bufptr = bf; + + for (i = 0; i < nbuf; i++, bf++, ds += ndesc) { + bf->bf_desc = ds; + bf->bf_daddr = DS2PHYS(dd, ds); + + if (!(sc->sc_ah->caps.hw_caps & + ATH9K_HW_CAP_4KB_SPLITTRANS)) { + /* + * Skip descriptor addresses which can cause 4KB + * boundary crossing (addr + length) with a 32 dword + * descriptor fetch. + */ + while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) { + BUG_ON((caddr_t) bf->bf_desc >= + ((caddr_t) dd->dd_desc + + dd->dd_desc_len)); + + ds += ndesc; + bf->bf_desc = ds; + bf->bf_daddr = DS2PHYS(dd, ds); + } + } + list_add_tail(&bf->list, head); + } + return 0; +fail2: + dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc, + dd->dd_desc_paddr); +fail: + memset(dd, 0, sizeof(*dd)); + return error; +#undef ATH_DESC_4KB_BOUND_CHECK +#undef ATH_DESC_4KB_BOUND_NUM_SKIPPED +#undef DS2PHYS +} + +static void ath9k_init_crypto(struct ath_softc *sc) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + int i = 0; + + /* Get the hardware key cache size. */ + common->keymax = sc->sc_ah->caps.keycache_size; + if (common->keymax > ATH_KEYMAX) { + ath_print(common, ATH_DBG_ANY, + "Warning, using only %u entries in %u key cache\n", + ATH_KEYMAX, common->keymax); + common->keymax = ATH_KEYMAX; + } + + /* + * Reset the key cache since some parts do not + * reset the contents on initial power up. + */ + for (i = 0; i < common->keymax; i++) + ath9k_hw_keyreset(sc->sc_ah, (u16) i); + + if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER, + ATH9K_CIPHER_TKIP, NULL)) { + /* + * Whether we should enable h/w TKIP MIC. + * XXX: if we don't support WME TKIP MIC, then we wouldn't + * report WMM capable, so it's always safe to turn on + * TKIP MIC in this case. + */ + ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC, 0, 1, NULL); + } + + /* + * Check whether the separate key cache entries + * are required to handle both tx+rx MIC keys. + * With split mic keys the number of stations is limited + * to 27 otherwise 59. + */ + if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER, + ATH9K_CIPHER_TKIP, NULL) + && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER, + ATH9K_CIPHER_MIC, NULL) + && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_TKIP_SPLIT, + 0, NULL)) + common->splitmic = 1; + + /* turn on mcast key search if possible */ + if (!ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL)) + (void)ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH, + 1, 1, NULL); + +} + +static int ath9k_init_btcoex(struct ath_softc *sc) +{ + int r, qnum; + + switch (sc->sc_ah->btcoex_hw.scheme) { + case ATH_BTCOEX_CFG_NONE: + break; + case ATH_BTCOEX_CFG_2WIRE: + ath9k_hw_btcoex_init_2wire(sc->sc_ah); + break; + case ATH_BTCOEX_CFG_3WIRE: + ath9k_hw_btcoex_init_3wire(sc->sc_ah); + r = ath_init_btcoex_timer(sc); + if (r) + return -1; + qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE); + ath9k_hw_init_btcoex_hw(sc->sc_ah, qnum); + sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW; + break; + default: + WARN_ON(1); + break; + } + + return 0; +} + +static int ath9k_init_queues(struct ath_softc *sc) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + int i = 0; + + for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++) + sc->tx.hwq_map[i] = -1; + + sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah); + if (sc->beacon.beaconq == -1) { + ath_print(common, ATH_DBG_FATAL, + "Unable to setup a beacon xmit queue\n"); + goto err; + } + + sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0); + if (sc->beacon.cabq == NULL) { + ath_print(common, ATH_DBG_FATAL, + "Unable to setup CAB xmit queue\n"); + goto err; + } + + sc->config.cabqReadytime = ATH_CABQ_READY_TIME; + ath_cabq_update(sc); + + if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) { + ath_print(common, ATH_DBG_FATAL, + "Unable to setup xmit queue for BK traffic\n"); + goto err; + } + + if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) { + ath_print(common, ATH_DBG_FATAL, + "Unable to setup xmit queue for BE traffic\n"); + goto err; + } + if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) { + ath_print(common, ATH_DBG_FATAL, + "Unable to setup xmit queue for VI traffic\n"); + goto err; + } + if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) { + ath_print(common, ATH_DBG_FATAL, + "Unable to setup xmit queue for VO traffic\n"); + goto err; + } + + return 0; + +err: + for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) + if (ATH_TXQ_SETUP(sc, i)) + ath_tx_cleanupq(sc, &sc->tx.txq[i]); + + return -EIO; +} + +static void ath9k_init_channels_rates(struct ath_softc *sc) +{ + if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) { + sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable; + sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ; + sc->sbands[IEEE80211_BAND_2GHZ].n_channels = + ARRAY_SIZE(ath9k_2ghz_chantable); + sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates; + sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates = + ARRAY_SIZE(ath9k_legacy_rates); + } + + if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) { + sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable; + sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ; + sc->sbands[IEEE80211_BAND_5GHZ].n_channels = + ARRAY_SIZE(ath9k_5ghz_chantable); + sc->sbands[IEEE80211_BAND_5GHZ].bitrates = + ath9k_legacy_rates + 4; + sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates = + ARRAY_SIZE(ath9k_legacy_rates) - 4; + } +} + +static void ath9k_init_misc(struct ath_softc *sc) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + int i = 0; + + common->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR; + setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc); + + sc->config.txpowlimit = ATH_TXPOWER_MAX; + + if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) { + sc->sc_flags |= SC_OP_TXAGGR; + sc->sc_flags |= SC_OP_RXAGGR; + } + + common->tx_chainmask = sc->sc_ah->caps.tx_chainmask; + common->rx_chainmask = sc->sc_ah->caps.rx_chainmask; + + ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_DIVERSITY, 1, true, NULL); + sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah); + + if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) + memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN); + + sc->beacon.slottime = ATH9K_SLOT_TIME_9; + + for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) { + sc->beacon.bslot[i] = NULL; + sc->beacon.bslot_aphy[i] = NULL; + } +} + +static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid, + const struct ath_bus_ops *bus_ops) +{ + struct ath_hw *ah = NULL; + struct ath_common *common; + int ret = 0, i; + int csz = 0; + + ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL); + if (!ah) + return -ENOMEM; + + ah->hw_version.devid = devid; + ah->hw_version.subsysid = subsysid; + sc->sc_ah = ah; + + common = ath9k_hw_common(ah); + common->ops = &ath9k_common_ops; + common->bus_ops = bus_ops; + common->ah = ah; + common->hw = sc->hw; + common->priv = sc; + common->debug_mask = ath9k_debug; + + spin_lock_init(&sc->wiphy_lock); + spin_lock_init(&sc->sc_resetlock); + spin_lock_init(&sc->sc_serial_rw); + spin_lock_init(&sc->sc_pm_lock); + mutex_init(&sc->mutex); + tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc); + tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet, + (unsigned long)sc); + + /* + * Cache line size is used to size and align various + * structures used to communicate with the hardware. + */ + ath_read_cachesize(common, &csz); + common->cachelsz = csz << 2; /* convert to bytes */ + + ret = ath9k_hw_init(ah); + if (ret) { + ath_print(common, ATH_DBG_FATAL, + "Unable to initialize hardware; " + "initialization status: %d\n", ret); + goto err_hw; + } + + ret = ath9k_init_debug(ah); + if (ret) { + ath_print(common, ATH_DBG_FATAL, + "Unable to create debugfs files\n"); + goto err_debug; + } + + ret = ath9k_init_queues(sc); + if (ret) + goto err_queues; + + ret = ath9k_init_btcoex(sc); + if (ret) + goto err_btcoex; + + ath9k_init_crypto(sc); + ath9k_init_channels_rates(sc); + ath9k_init_misc(sc); + + return 0; + +err_btcoex: + for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) + if (ATH_TXQ_SETUP(sc, i)) + ath_tx_cleanupq(sc, &sc->tx.txq[i]); +err_queues: + ath9k_exit_debug(ah); +err_debug: + ath9k_hw_deinit(ah); +err_hw: + tasklet_kill(&sc->intr_tq); + tasklet_kill(&sc->bcon_tasklet); + + kfree(ah); + sc->sc_ah = NULL; + + return ret; +} + +void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + + hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | + IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | + IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_SUPPORTS_PS | + IEEE80211_HW_PS_NULLFUNC_STACK | + IEEE80211_HW_SPECTRUM_MGMT | + IEEE80211_HW_REPORTS_TX_ACK_STATUS; + + if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) + hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; + + if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt) + hw->flags |= IEEE80211_HW_MFP_CAPABLE; + + hw->wiphy->interface_modes = + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_ADHOC) | + BIT(NL80211_IFTYPE_MESH_POINT); + + hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; + + hw->queues = 4; + hw->max_rates = 4; + hw->channel_change_time = 5000; + hw->max_listen_interval = 10; + hw->max_rate_tries = 10; + hw->sta_data_size = sizeof(struct ath_node); + hw->vif_data_size = sizeof(struct ath_vif); + + hw->rate_control_algorithm = "ath9k_rate_control"; + + if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) + hw->wiphy->bands[IEEE80211_BAND_2GHZ] = + &sc->sbands[IEEE80211_BAND_2GHZ]; + if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) + hw->wiphy->bands[IEEE80211_BAND_5GHZ] = + &sc->sbands[IEEE80211_BAND_5GHZ]; + + if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) { + if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) + setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap); + if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) + setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap); + } + + SET_IEEE80211_PERM_ADDR(hw, common->macaddr); +} + +int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid, + const struct ath_bus_ops *bus_ops) +{ + struct ieee80211_hw *hw = sc->hw; + struct ath_common *common; + struct ath_hw *ah; + int error = 0; + struct ath_regulatory *reg; + + /* Bring up device */ + error = ath9k_init_softc(devid, sc, subsysid, bus_ops); + if (error != 0) + goto error_init; + + ah = sc->sc_ah; + common = ath9k_hw_common(ah); + ath9k_set_hw_capab(sc, hw); + + /* Initialize regulatory */ + error = ath_regd_init(&common->regulatory, sc->hw->wiphy, + ath9k_reg_notifier); + if (error) + goto error_regd; + + reg = &common->regulatory; + + /* Setup TX DMA */ + error = ath_tx_init(sc, ATH_TXBUF); + if (error != 0) + goto error_tx; + + /* Setup RX DMA */ + error = ath_rx_init(sc, ATH_RXBUF); + if (error != 0) + goto error_rx; + + /* Register with mac80211 */ + error = ieee80211_register_hw(hw); + if (error) + goto error_register; + + /* Handle world regulatory */ + if (!ath_is_world_regd(reg)) { + error = regulatory_hint(hw->wiphy, reg->alpha2); + if (error) + goto error_world; + } + + INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work); + INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work); + sc->wiphy_scheduler_int = msecs_to_jiffies(500); + + ath_init_leds(sc); + ath_start_rfkill_poll(sc); + + return 0; + +error_world: + ieee80211_unregister_hw(hw); +error_register: + ath_rx_cleanup(sc); +error_rx: + ath_tx_cleanup(sc); +error_tx: + /* Nothing */ +error_regd: + ath9k_deinit_softc(sc); +error_init: + return error; +} + +/*****************************/ +/* De-Initialization */ +/*****************************/ + +static void ath9k_deinit_softc(struct ath_softc *sc) +{ + int i = 0; + + if ((sc->btcoex.no_stomp_timer) && + sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) + ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer); + + for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) + if (ATH_TXQ_SETUP(sc, i)) + ath_tx_cleanupq(sc, &sc->tx.txq[i]); + + ath9k_exit_debug(sc->sc_ah); + ath9k_hw_deinit(sc->sc_ah); + + tasklet_kill(&sc->intr_tq); + tasklet_kill(&sc->bcon_tasklet); +} + +void ath9k_deinit_device(struct ath_softc *sc) +{ + struct ieee80211_hw *hw = sc->hw; + int i = 0; + + ath9k_ps_wakeup(sc); + + wiphy_rfkill_stop_polling(sc->hw->wiphy); + ath_deinit_leds(sc); + + for (i = 0; i < sc->num_sec_wiphy; i++) { + struct ath_wiphy *aphy = sc->sec_wiphy[i]; + if (aphy == NULL) + continue; + sc->sec_wiphy[i] = NULL; + ieee80211_unregister_hw(aphy->hw); + ieee80211_free_hw(aphy->hw); + } + kfree(sc->sec_wiphy); + + ieee80211_unregister_hw(hw); + ath_rx_cleanup(sc); + ath_tx_cleanup(sc); + ath9k_deinit_softc(sc); +} + +void ath_descdma_cleanup(struct ath_softc *sc, + struct ath_descdma *dd, + struct list_head *head) +{ + dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc, + dd->dd_desc_paddr); + + INIT_LIST_HEAD(head); + kfree(dd->dd_bufptr); + memset(dd, 0, sizeof(*dd)); +} + +/************************/ +/* Module Hooks */ +/************************/ + +static int __init ath9k_init(void) +{ + int error; + + /* Register rate control algorithm */ + error = ath_rate_control_register(); + if (error != 0) { + printk(KERN_ERR + "ath9k: Unable to register rate control " + "algorithm: %d\n", + error); + goto err_out; + } + + error = ath9k_debug_create_root(); + if (error) { + printk(KERN_ERR + "ath9k: Unable to create debugfs root: %d\n", + error); + goto err_rate_unregister; + } + + error = ath_pci_init(); + if (error < 0) { + printk(KERN_ERR + "ath9k: No PCI devices found, driver not installed.\n"); + error = -ENODEV; + goto err_remove_root; + } + + error = ath_ahb_init(); + if (error < 0) { + error = -ENODEV; + goto err_pci_exit; + } + + return 0; + + err_pci_exit: + ath_pci_exit(); + + err_remove_root: + ath9k_debug_remove_root(); + err_rate_unregister: + ath_rate_control_unregister(); + err_out: + return error; +} +module_init(ath9k_init); + +static void __exit ath9k_exit(void) +{ + ath_ahb_exit(); + ath_pci_exit(); + ath9k_debug_remove_root(); + ath_rate_control_unregister(); + printk(KERN_INFO "%s: Driver unloaded\n", dev_info); +} +module_exit(ath9k_exit); diff --git a/drivers/net/wireless/ath/ath9k/initvals.h b/drivers/net/wireless/ath/ath9k/initvals.h new file mode 100644 index 0000000..8a3bf3a --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/initvals.h @@ -0,0 +1,7052 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +static const u32 ar5416Modes[][6] = { + { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, + { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, + { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 }, + { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 }, + { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 }, + { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf }, + { 0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810 }, + { 0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a, 0x0000320a }, + { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 }, + { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 }, + { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, + { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 }, + { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, + { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 }, + { 0x00009844, 0x1372161e, 0x1372161e, 0x137216a0, 0x137216a0, 0x137216a0 }, + { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, + { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, + { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, + { 0x00009850, 0x6c48b4e0, 0x6d48b4e0, 0x6d48b0de, 0x6c48b0de, 0x6c48b0de }, + { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e }, + { 0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e, 0x31395d5e }, + { 0x00009860, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18 }, + { 0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, + { 0x00009868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190 }, + { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 }, + { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 }, + { 0x00009918, 0x000001b8, 0x00000370, 0x00000268, 0x00000134, 0x00000134 }, + { 0x00009924, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b }, + { 0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020 }, + { 0x00009960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 }, + { 0x0000a960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 }, + { 0x0000b960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 }, + { 0x00009964, 0x00000000, 0x00000000, 0x00001120, 0x00001120, 0x00001120 }, + { 0x000099bc, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00 }, + { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be }, + { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 }, + { 0x000099c8, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c }, + { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 }, + { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 }, + { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 }, + { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 }, + { 0x0000a20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 }, + { 0x0000b20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 }, + { 0x0000c20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 }, + { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a }, + { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, + { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa }, + { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 }, + { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 }, + { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 }, + { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b }, + { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b }, + { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a }, + { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf }, + { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f }, + { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f }, + { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f }, + { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, +}; + +static const u32 ar5416Common[][2] = { + { 0x0000000c, 0x00000000 }, + { 0x00000030, 0x00020015 }, + { 0x00000034, 0x00000005 }, + { 0x00000040, 0x00000000 }, + { 0x00000044, 0x00000008 }, + { 0x00000048, 0x00000008 }, + { 0x0000004c, 0x00000010 }, + { 0x00000050, 0x00000000 }, + { 0x00000054, 0x0000001f }, + { 0x00000800, 0x00000000 }, + { 0x00000804, 0x00000000 }, + { 0x00000808, 0x00000000 }, + { 0x0000080c, 0x00000000 }, + { 0x00000810, 0x00000000 }, + { 0x00000814, 0x00000000 }, + { 0x00000818, 0x00000000 }, + { 0x0000081c, 0x00000000 }, + { 0x00000820, 0x00000000 }, + { 0x00000824, 0x00000000 }, + { 0x00001040, 0x002ffc0f }, + { 0x00001044, 0x002ffc0f }, + { 0x00001048, 0x002ffc0f }, + { 0x0000104c, 0x002ffc0f }, + { 0x00001050, 0x002ffc0f }, + { 0x00001054, 0x002ffc0f }, + { 0x00001058, 0x002ffc0f }, + { 0x0000105c, 0x002ffc0f }, + { 0x00001060, 0x002ffc0f }, + { 0x00001064, 0x002ffc0f }, + { 0x00001230, 0x00000000 }, + { 0x00001270, 0x00000000 }, + { 0x00001038, 0x00000000 }, + { 0x00001078, 0x00000000 }, + { 0x000010b8, 0x00000000 }, + { 0x000010f8, 0x00000000 }, + { 0x00001138, 0x00000000 }, + { 0x00001178, 0x00000000 }, + { 0x000011b8, 0x00000000 }, + { 0x000011f8, 0x00000000 }, + { 0x00001238, 0x00000000 }, + { 0x00001278, 0x00000000 }, + { 0x000012b8, 0x00000000 }, + { 0x000012f8, 0x00000000 }, + { 0x00001338, 0x00000000 }, + { 0x00001378, 0x00000000 }, + { 0x000013b8, 0x00000000 }, + { 0x000013f8, 0x00000000 }, + { 0x00001438, 0x00000000 }, + { 0x00001478, 0x00000000 }, + { 0x000014b8, 0x00000000 }, + { 0x000014f8, 0x00000000 }, + { 0x00001538, 0x00000000 }, + { 0x00001578, 0x00000000 }, + { 0x000015b8, 0x00000000 }, + { 0x000015f8, 0x00000000 }, + { 0x00001638, 0x00000000 }, + { 0x00001678, 0x00000000 }, + { 0x000016b8, 0x00000000 }, + { 0x000016f8, 0x00000000 }, + { 0x00001738, 0x00000000 }, + { 0x00001778, 0x00000000 }, + { 0x000017b8, 0x00000000 }, + { 0x000017f8, 0x00000000 }, + { 0x0000103c, 0x00000000 }, + { 0x0000107c, 0x00000000 }, + { 0x000010bc, 0x00000000 }, + { 0x000010fc, 0x00000000 }, + { 0x0000113c, 0x00000000 }, + { 0x0000117c, 0x00000000 }, + { 0x000011bc, 0x00000000 }, + { 0x000011fc, 0x00000000 }, + { 0x0000123c, 0x00000000 }, + { 0x0000127c, 0x00000000 }, + { 0x000012bc, 0x00000000 }, + { 0x000012fc, 0x00000000 }, + { 0x0000133c, 0x00000000 }, + { 0x0000137c, 0x00000000 }, + { 0x000013bc, 0x00000000 }, + { 0x000013fc, 0x00000000 }, + { 0x0000143c, 0x00000000 }, + { 0x0000147c, 0x00000000 }, + { 0x00004030, 0x00000002 }, + { 0x0000403c, 0x00000002 }, + { 0x00007010, 0x00000000 }, + { 0x00007038, 0x000004c2 }, + { 0x00008004, 0x00000000 }, + { 0x00008008, 0x00000000 }, + { 0x0000800c, 0x00000000 }, + { 0x00008018, 0x00000700 }, + { 0x00008020, 0x00000000 }, + { 0x00008038, 0x00000000 }, + { 0x0000803c, 0x00000000 }, + { 0x00008048, 0x40000000 }, + { 0x00008054, 0x00000000 }, + { 0x00008058, 0x00000000 }, + { 0x0000805c, 0x000fc78f }, + { 0x00008060, 0x0000000f }, + { 0x00008064, 0x00000000 }, + { 0x000080c0, 0x2a82301a }, + { 0x000080c4, 0x05dc01e0 }, + { 0x000080c8, 0x1f402710 }, + { 0x000080cc, 0x01f40000 }, + { 0x000080d0, 0x00001e00 }, + { 0x000080d4, 0x00000000 }, + { 0x000080d8, 0x00400000 }, + { 0x000080e0, 0xffffffff }, + { 0x000080e4, 0x0000ffff }, + { 0x000080e8, 0x003f3f3f }, + { 0x000080ec, 0x00000000 }, + { 0x000080f0, 0x00000000 }, + { 0x000080f4, 0x00000000 }, + { 0x000080f8, 0x00000000 }, + { 0x000080fc, 0x00020000 }, + { 0x00008100, 0x00020000 }, + { 0x00008104, 0x00000001 }, + { 0x00008108, 0x00000052 }, + { 0x0000810c, 0x00000000 }, + { 0x00008110, 0x00000168 }, + { 0x00008118, 0x000100aa }, + { 0x0000811c, 0x00003210 }, + { 0x00008124, 0x00000000 }, + { 0x00008128, 0x00000000 }, + { 0x0000812c, 0x00000000 }, + { 0x00008130, 0x00000000 }, + { 0x00008134, 0x00000000 }, + { 0x00008138, 0x00000000 }, + { 0x0000813c, 0x00000000 }, + { 0x00008144, 0xffffffff }, + { 0x00008168, 0x00000000 }, + { 0x0000816c, 0x00000000 }, + { 0x00008170, 0x32143320 }, + { 0x00008174, 0xfaa4fa50 }, + { 0x00008178, 0x00000100 }, + { 0x0000817c, 0x00000000 }, + { 0x000081c4, 0x00000000 }, + { 0x000081ec, 0x00000000 }, + { 0x000081f0, 0x00000000 }, + { 0x000081f4, 0x00000000 }, + { 0x000081f8, 0x00000000 }, + { 0x000081fc, 0x00000000 }, + { 0x00008200, 0x00000000 }, + { 0x00008204, 0x00000000 }, + { 0x00008208, 0x00000000 }, + { 0x0000820c, 0x00000000 }, + { 0x00008210, 0x00000000 }, + { 0x00008214, 0x00000000 }, + { 0x00008218, 0x00000000 }, + { 0x0000821c, 0x00000000 }, + { 0x00008220, 0x00000000 }, + { 0x00008224, 0x00000000 }, + { 0x00008228, 0x00000000 }, + { 0x0000822c, 0x00000000 }, + { 0x00008230, 0x00000000 }, + { 0x00008234, 0x00000000 }, + { 0x00008238, 0x00000000 }, + { 0x0000823c, 0x00000000 }, + { 0x00008240, 0x00100000 }, + { 0x00008244, 0x0010f400 }, + { 0x00008248, 0x00000100 }, + { 0x0000824c, 0x0001e800 }, + { 0x00008250, 0x00000000 }, + { 0x00008254, 0x00000000 }, + { 0x00008258, 0x00000000 }, + { 0x0000825c, 0x400000ff }, + { 0x00008260, 0x00080922 }, + { 0x00008264, 0xa8000010 }, + { 0x00008270, 0x00000000 }, + { 0x00008274, 0x40000000 }, + { 0x00008278, 0x003e4180 }, + { 0x0000827c, 0x00000000 }, + { 0x00008284, 0x0000002c }, + { 0x00008288, 0x0000002c }, + { 0x0000828c, 0x00000000 }, + { 0x00008294, 0x00000000 }, + { 0x00008298, 0x00000000 }, + { 0x00008300, 0x00000000 }, + { 0x00008304, 0x00000000 }, + { 0x00008308, 0x00000000 }, + { 0x0000830c, 0x00000000 }, + { 0x00008310, 0x00000000 }, + { 0x00008314, 0x00000000 }, + { 0x00008318, 0x00000000 }, + { 0x00008328, 0x00000000 }, + { 0x0000832c, 0x00000007 }, + { 0x00008330, 0x00000302 }, + { 0x00008334, 0x00000e00 }, + { 0x00008338, 0x00070000 }, + { 0x0000833c, 0x00000000 }, + { 0x00008340, 0x000107ff }, + { 0x00009808, 0x00000000 }, + { 0x0000980c, 0xad848e19 }, + { 0x00009810, 0x7d14e000 }, + { 0x00009814, 0x9c0a9f6b }, + { 0x0000981c, 0x00000000 }, + { 0x0000982c, 0x0000a000 }, + { 0x00009830, 0x00000000 }, + { 0x0000983c, 0x00200400 }, + { 0x00009840, 0x206a002e }, + { 0x0000984c, 0x1284233c }, + { 0x00009854, 0x00000859 }, + { 0x00009900, 0x00000000 }, + { 0x00009904, 0x00000000 }, + { 0x00009908, 0x00000000 }, + { 0x0000990c, 0x00000000 }, + { 0x0000991c, 0x10000fff }, + { 0x00009920, 0x05100000 }, + { 0x0000a920, 0x05100000 }, + { 0x0000b920, 0x05100000 }, + { 0x00009928, 0x00000001 }, + { 0x0000992c, 0x00000004 }, + { 0x00009934, 0x1e1f2022 }, + { 0x00009938, 0x0a0b0c0d }, + { 0x0000993c, 0x00000000 }, + { 0x00009948, 0x9280b212 }, + { 0x0000994c, 0x00020028 }, + { 0x00009954, 0x5d50e188 }, + { 0x00009958, 0x00081fff }, + { 0x0000c95c, 0x004b6a8e }, + { 0x0000c968, 0x000003ce }, + { 0x00009970, 0x190fb515 }, + { 0x00009974, 0x00000000 }, + { 0x00009978, 0x00000001 }, + { 0x0000997c, 0x00000000 }, + { 0x00009980, 0x00000000 }, + { 0x00009984, 0x00000000 }, + { 0x00009988, 0x00000000 }, + { 0x0000998c, 0x00000000 }, + { 0x00009990, 0x00000000 }, + { 0x00009994, 0x00000000 }, + { 0x00009998, 0x00000000 }, + { 0x0000999c, 0x00000000 }, + { 0x000099a0, 0x00000000 }, + { 0x000099a4, 0x00000001 }, + { 0x000099a8, 0x001fff00 }, + { 0x000099ac, 0x00000000 }, + { 0x000099b0, 0x03051000 }, + { 0x000099dc, 0x00000000 }, + { 0x000099e0, 0x00000200 }, + { 0x000099e4, 0xaaaaaaaa }, + { 0x000099e8, 0x3c466478 }, + { 0x000099ec, 0x000000aa }, + { 0x000099fc, 0x00001042 }, + { 0x00009b00, 0x00000000 }, + { 0x00009b04, 0x00000001 }, + { 0x00009b08, 0x00000002 }, + { 0x00009b0c, 0x00000003 }, + { 0x00009b10, 0x00000004 }, + { 0x00009b14, 0x00000005 }, + { 0x00009b18, 0x00000008 }, + { 0x00009b1c, 0x00000009 }, + { 0x00009b20, 0x0000000a }, + { 0x00009b24, 0x0000000b }, + { 0x00009b28, 0x0000000c }, + { 0x00009b2c, 0x0000000d }, + { 0x00009b30, 0x00000010 }, + { 0x00009b34, 0x00000011 }, + { 0x00009b38, 0x00000012 }, + { 0x00009b3c, 0x00000013 }, + { 0x00009b40, 0x00000014 }, + { 0x00009b44, 0x00000015 }, + { 0x00009b48, 0x00000018 }, + { 0x00009b4c, 0x00000019 }, + { 0x00009b50, 0x0000001a }, + { 0x00009b54, 0x0000001b }, + { 0x00009b58, 0x0000001c }, + { 0x00009b5c, 0x0000001d }, + { 0x00009b60, 0x00000020 }, + { 0x00009b64, 0x00000021 }, + { 0x00009b68, 0x00000022 }, + { 0x00009b6c, 0x00000023 }, + { 0x00009b70, 0x00000024 }, + { 0x00009b74, 0x00000025 }, + { 0x00009b78, 0x00000028 }, + { 0x00009b7c, 0x00000029 }, + { 0x00009b80, 0x0000002a }, + { 0x00009b84, 0x0000002b }, + { 0x00009b88, 0x0000002c }, + { 0x00009b8c, 0x0000002d }, + { 0x00009b90, 0x00000030 }, + { 0x00009b94, 0x00000031 }, + { 0x00009b98, 0x00000032 }, + { 0x00009b9c, 0x00000033 }, + { 0x00009ba0, 0x00000034 }, + { 0x00009ba4, 0x00000035 }, + { 0x00009ba8, 0x00000035 }, + { 0x00009bac, 0x00000035 }, + { 0x00009bb0, 0x00000035 }, + { 0x00009bb4, 0x00000035 }, + { 0x00009bb8, 0x00000035 }, + { 0x00009bbc, 0x00000035 }, + { 0x00009bc0, 0x00000035 }, + { 0x00009bc4, 0x00000035 }, + { 0x00009bc8, 0x00000035 }, + { 0x00009bcc, 0x00000035 }, + { 0x00009bd0, 0x00000035 }, + { 0x00009bd4, 0x00000035 }, + { 0x00009bd8, 0x00000035 }, + { 0x00009bdc, 0x00000035 }, + { 0x00009be0, 0x00000035 }, + { 0x00009be4, 0x00000035 }, + { 0x00009be8, 0x00000035 }, + { 0x00009bec, 0x00000035 }, + { 0x00009bf0, 0x00000035 }, + { 0x00009bf4, 0x00000035 }, + { 0x00009bf8, 0x00000010 }, + { 0x00009bfc, 0x0000001a }, + { 0x0000a210, 0x40806333 }, + { 0x0000a214, 0x00106c10 }, + { 0x0000a218, 0x009c4060 }, + { 0x0000a220, 0x018830c6 }, + { 0x0000a224, 0x00000400 }, + { 0x0000a228, 0x00000bb5 }, + { 0x0000a22c, 0x00000011 }, + { 0x0000a234, 0x20202020 }, + { 0x0000a238, 0x20202020 }, + { 0x0000a23c, 0x13c889af }, + { 0x0000a240, 0x38490a20 }, + { 0x0000a244, 0x00007bb6 }, + { 0x0000a248, 0x0fff3ffc }, + { 0x0000a24c, 0x00000001 }, + { 0x0000a250, 0x0000a000 }, + { 0x0000a254, 0x00000000 }, + { 0x0000a258, 0x0cc75380 }, + { 0x0000a25c, 0x0f0f0f01 }, + { 0x0000a260, 0xdfa91f01 }, + { 0x0000a268, 0x00000000 }, + { 0x0000a26c, 0x0e79e5c6 }, + { 0x0000b26c, 0x0e79e5c6 }, + { 0x0000c26c, 0x0e79e5c6 }, + { 0x0000d270, 0x00820820 }, + { 0x0000a278, 0x1ce739ce }, + { 0x0000a27c, 0x051701ce }, + { 0x0000a338, 0x00000000 }, + { 0x0000a33c, 0x00000000 }, + { 0x0000a340, 0x00000000 }, + { 0x0000a344, 0x00000000 }, + { 0x0000a348, 0x3fffffff }, + { 0x0000a34c, 0x3fffffff }, + { 0x0000a350, 0x3fffffff }, + { 0x0000a354, 0x0003ffff }, + { 0x0000a358, 0x79a8aa1f }, + { 0x0000d35c, 0x07ffffef }, + { 0x0000d360, 0x0fffffe7 }, + { 0x0000d364, 0x17ffffe5 }, + { 0x0000d368, 0x1fffffe4 }, + { 0x0000d36c, 0x37ffffe3 }, + { 0x0000d370, 0x3fffffe3 }, + { 0x0000d374, 0x57ffffe3 }, + { 0x0000d378, 0x5fffffe2 }, + { 0x0000d37c, 0x7fffffe2 }, + { 0x0000d380, 0x7f3c7bba }, + { 0x0000d384, 0xf3307ff0 }, + { 0x0000a388, 0x08000000 }, + { 0x0000a38c, 0x20202020 }, + { 0x0000a390, 0x20202020 }, + { 0x0000a394, 0x1ce739ce }, + { 0x0000a398, 0x000001ce }, + { 0x0000a39c, 0x00000001 }, + { 0x0000a3a0, 0x00000000 }, + { 0x0000a3a4, 0x00000000 }, + { 0x0000a3a8, 0x00000000 }, + { 0x0000a3ac, 0x00000000 }, + { 0x0000a3b0, 0x00000000 }, + { 0x0000a3b4, 0x00000000 }, + { 0x0000a3b8, 0x00000000 }, + { 0x0000a3bc, 0x00000000 }, + { 0x0000a3c0, 0x00000000 }, + { 0x0000a3c4, 0x00000000 }, + { 0x0000a3c8, 0x00000246 }, + { 0x0000a3cc, 0x20202020 }, + { 0x0000a3d0, 0x20202020 }, + { 0x0000a3d4, 0x20202020 }, + { 0x0000a3dc, 0x1ce739ce }, + { 0x0000a3e0, 0x000001ce }, +}; + +static const u32 ar5416Bank0[][2] = { + { 0x000098b0, 0x1e5795e5 }, + { 0x000098e0, 0x02008020 }, +}; + +static const u32 ar5416BB_RfGain[][3] = { + { 0x00009a00, 0x00000000, 0x00000000 }, + { 0x00009a04, 0x00000040, 0x00000040 }, + { 0x00009a08, 0x00000080, 0x00000080 }, + { 0x00009a0c, 0x000001a1, 0x00000141 }, + { 0x00009a10, 0x000001e1, 0x00000181 }, + { 0x00009a14, 0x00000021, 0x000001c1 }, + { 0x00009a18, 0x00000061, 0x00000001 }, + { 0x00009a1c, 0x00000168, 0x00000041 }, + { 0x00009a20, 0x000001a8, 0x000001a8 }, + { 0x00009a24, 0x000001e8, 0x000001e8 }, + { 0x00009a28, 0x00000028, 0x00000028 }, + { 0x00009a2c, 0x00000068, 0x00000068 }, + { 0x00009a30, 0x00000189, 0x000000a8 }, + { 0x00009a34, 0x000001c9, 0x00000169 }, + { 0x00009a38, 0x00000009, 0x000001a9 }, + { 0x00009a3c, 0x00000049, 0x000001e9 }, + { 0x00009a40, 0x00000089, 0x00000029 }, + { 0x00009a44, 0x00000170, 0x00000069 }, + { 0x00009a48, 0x000001b0, 0x00000190 }, + { 0x00009a4c, 0x000001f0, 0x000001d0 }, + { 0x00009a50, 0x00000030, 0x00000010 }, + { 0x00009a54, 0x00000070, 0x00000050 }, + { 0x00009a58, 0x00000191, 0x00000090 }, + { 0x00009a5c, 0x000001d1, 0x00000151 }, + { 0x00009a60, 0x00000011, 0x00000191 }, + { 0x00009a64, 0x00000051, 0x000001d1 }, + { 0x00009a68, 0x00000091, 0x00000011 }, + { 0x00009a6c, 0x000001b8, 0x00000051 }, + { 0x00009a70, 0x000001f8, 0x00000198 }, + { 0x00009a74, 0x00000038, 0x000001d8 }, + { 0x00009a78, 0x00000078, 0x00000018 }, + { 0x00009a7c, 0x00000199, 0x00000058 }, + { 0x00009a80, 0x000001d9, 0x00000098 }, + { 0x00009a84, 0x00000019, 0x00000159 }, + { 0x00009a88, 0x00000059, 0x00000199 }, + { 0x00009a8c, 0x00000099, 0x000001d9 }, + { 0x00009a90, 0x000000d9, 0x00000019 }, + { 0x00009a94, 0x000000f9, 0x00000059 }, + { 0x00009a98, 0x000000f9, 0x00000099 }, + { 0x00009a9c, 0x000000f9, 0x000000d9 }, + { 0x00009aa0, 0x000000f9, 0x000000f9 }, + { 0x00009aa4, 0x000000f9, 0x000000f9 }, + { 0x00009aa8, 0x000000f9, 0x000000f9 }, + { 0x00009aac, 0x000000f9, 0x000000f9 }, + { 0x00009ab0, 0x000000f9, 0x000000f9 }, + { 0x00009ab4, 0x000000f9, 0x000000f9 }, + { 0x00009ab8, 0x000000f9, 0x000000f9 }, + { 0x00009abc, 0x000000f9, 0x000000f9 }, + { 0x00009ac0, 0x000000f9, 0x000000f9 }, + { 0x00009ac4, 0x000000f9, 0x000000f9 }, + { 0x00009ac8, 0x000000f9, 0x000000f9 }, + { 0x00009acc, 0x000000f9, 0x000000f9 }, + { 0x00009ad0, 0x000000f9, 0x000000f9 }, + { 0x00009ad4, 0x000000f9, 0x000000f9 }, + { 0x00009ad8, 0x000000f9, 0x000000f9 }, + { 0x00009adc, 0x000000f9, 0x000000f9 }, + { 0x00009ae0, 0x000000f9, 0x000000f9 }, + { 0x00009ae4, 0x000000f9, 0x000000f9 }, + { 0x00009ae8, 0x000000f9, 0x000000f9 }, + { 0x00009aec, 0x000000f9, 0x000000f9 }, + { 0x00009af0, 0x000000f9, 0x000000f9 }, + { 0x00009af4, 0x000000f9, 0x000000f9 }, + { 0x00009af8, 0x000000f9, 0x000000f9 }, + { 0x00009afc, 0x000000f9, 0x000000f9 }, +}; + +static const u32 ar5416Bank1[][2] = { + { 0x000098b0, 0x02108421 }, + { 0x000098ec, 0x00000008 }, +}; + +static const u32 ar5416Bank2[][2] = { + { 0x000098b0, 0x0e73ff17 }, + { 0x000098e0, 0x00000420 }, +}; + +static const u32 ar5416Bank3[][3] = { + { 0x000098f0, 0x01400018, 0x01c00018 }, +}; + +static const u32 ar5416Bank6[][3] = { + + { 0x0000989c, 0x00000000, 0x00000000 }, + { 0x0000989c, 0x00000000, 0x00000000 }, + { 0x0000989c, 0x00000000, 0x00000000 }, + { 0x0000989c, 0x00e00000, 0x00e00000 }, + { 0x0000989c, 0x005e0000, 0x005e0000 }, + { 0x0000989c, 0x00120000, 0x00120000 }, + { 0x0000989c, 0x00620000, 0x00620000 }, + { 0x0000989c, 0x00020000, 0x00020000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x40ff0000, 0x40ff0000 }, + { 0x0000989c, 0x005f0000, 0x005f0000 }, + { 0x0000989c, 0x00870000, 0x00870000 }, + { 0x0000989c, 0x00f90000, 0x00f90000 }, + { 0x0000989c, 0x007b0000, 0x007b0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00f50000, 0x00f50000 }, + { 0x0000989c, 0x00dc0000, 0x00dc0000 }, + { 0x0000989c, 0x00110000, 0x00110000 }, + { 0x0000989c, 0x006100a8, 0x006100a8 }, + { 0x0000989c, 0x004210a2, 0x004210a2 }, + { 0x0000989c, 0x0014008f, 0x0014008f }, + { 0x0000989c, 0x00c40003, 0x00c40003 }, + { 0x0000989c, 0x003000f2, 0x003000f2 }, + { 0x0000989c, 0x00440016, 0x00440016 }, + { 0x0000989c, 0x00410040, 0x00410040 }, + { 0x0000989c, 0x0001805e, 0x0001805e }, + { 0x0000989c, 0x0000c0ab, 0x0000c0ab }, + { 0x0000989c, 0x000000f1, 0x000000f1 }, + { 0x0000989c, 0x00002081, 0x00002081 }, + { 0x0000989c, 0x000000d4, 0x000000d4 }, + { 0x000098d0, 0x0000000f, 0x0010000f }, +}; + +static const u32 ar5416Bank6TPC[][3] = { + { 0x0000989c, 0x00000000, 0x00000000 }, + { 0x0000989c, 0x00000000, 0x00000000 }, + { 0x0000989c, 0x00000000, 0x00000000 }, + { 0x0000989c, 0x00e00000, 0x00e00000 }, + { 0x0000989c, 0x005e0000, 0x005e0000 }, + { 0x0000989c, 0x00120000, 0x00120000 }, + { 0x0000989c, 0x00620000, 0x00620000 }, + { 0x0000989c, 0x00020000, 0x00020000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x40ff0000, 0x40ff0000 }, + { 0x0000989c, 0x005f0000, 0x005f0000 }, + { 0x0000989c, 0x00870000, 0x00870000 }, + { 0x0000989c, 0x00f90000, 0x00f90000 }, + { 0x0000989c, 0x007b0000, 0x007b0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00f50000, 0x00f50000 }, + { 0x0000989c, 0x00dc0000, 0x00dc0000 }, + { 0x0000989c, 0x00110000, 0x00110000 }, + { 0x0000989c, 0x006100a8, 0x006100a8 }, + { 0x0000989c, 0x00423022, 0x00423022 }, + { 0x0000989c, 0x201400df, 0x201400df }, + { 0x0000989c, 0x00c40002, 0x00c40002 }, + { 0x0000989c, 0x003000f2, 0x003000f2 }, + { 0x0000989c, 0x00440016, 0x00440016 }, + { 0x0000989c, 0x00410040, 0x00410040 }, + { 0x0000989c, 0x0001805e, 0x0001805e }, + { 0x0000989c, 0x0000c0ab, 0x0000c0ab }, + { 0x0000989c, 0x000000e1, 0x000000e1 }, + { 0x0000989c, 0x00007081, 0x00007081 }, + { 0x0000989c, 0x000000d4, 0x000000d4 }, + { 0x000098d0, 0x0000000f, 0x0010000f }, +}; + +static const u32 ar5416Bank7[][2] = { + { 0x0000989c, 0x00000500 }, + { 0x0000989c, 0x00000800 }, + { 0x000098cc, 0x0000000e }, +}; + +static const u32 ar5416Addac[][2] = { + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000003 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x0000000c }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000030 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000060 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000058 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x000098cc, 0x00000000 }, +}; + +static const u32 ar5416Modes_9100[][6] = { + { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, + { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, + { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 }, + { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 }, + { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 }, + { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf }, + { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 }, + { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 }, + { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, + { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 }, + { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, + { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 }, + { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 }, + { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, + { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, + { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, + { 0x00009850, 0x6d48b4e2, 0x6d48b4e2, 0x6d48b0e2, 0x6d48b0e2, 0x6d48b0e2 }, + { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec86d2e, 0x7ec84d2e, 0x7ec82d2e }, + { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e }, + { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 }, + { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, + { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 }, + { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 }, + { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 }, + { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 }, + { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a11, 0xd00a8a0d, 0xd00a8a0d }, + { 0x00009940, 0x00754604, 0x00754604, 0xfff81204, 0xfff81204, 0xfff81204 }, + { 0x00009944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020 }, + { 0x00009954, 0x5f3ca3de, 0x5f3ca3de, 0xe250a51e, 0xe250a51e, 0xe250a51e }, + { 0x00009958, 0x2108ecff, 0x2108ecff, 0x3388ffff, 0x3388ffff, 0x3388ffff }, +#ifdef TB243 + { 0x00009960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 }, + { 0x0000a960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 }, + { 0x0000b960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 }, + { 0x00009964, 0x00000000, 0x00000000, 0x00002210, 0x00002210, 0x00001120 }, +#else + { 0x00009960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 }, + { 0x0000a960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 }, + { 0x0000b960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 }, + { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 }, +#endif + { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a1000, 0x001a0c00, 0x001a0c00 }, + { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be }, + { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 }, + { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 }, + { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 }, + { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 }, + { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 }, + { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 }, + { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 }, + { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 }, + { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 }, + { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a }, + { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, + { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa }, + { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 }, + { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 }, + { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 }, + { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b }, + { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b }, + { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a }, + { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf }, + { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f }, + { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f }, + { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f }, + { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, +}; + +static const u32 ar5416Common_9100[][2] = { + { 0x0000000c, 0x00000000 }, + { 0x00000030, 0x00020015 }, + { 0x00000034, 0x00000005 }, + { 0x00000040, 0x00000000 }, + { 0x00000044, 0x00000008 }, + { 0x00000048, 0x00000008 }, + { 0x0000004c, 0x00000010 }, + { 0x00000050, 0x00000000 }, + { 0x00000054, 0x0000001f }, + { 0x00000800, 0x00000000 }, + { 0x00000804, 0x00000000 }, + { 0x00000808, 0x00000000 }, + { 0x0000080c, 0x00000000 }, + { 0x00000810, 0x00000000 }, + { 0x00000814, 0x00000000 }, + { 0x00000818, 0x00000000 }, + { 0x0000081c, 0x00000000 }, + { 0x00000820, 0x00000000 }, + { 0x00000824, 0x00000000 }, + { 0x00001040, 0x002ffc0f }, + { 0x00001044, 0x002ffc0f }, + { 0x00001048, 0x002ffc0f }, + { 0x0000104c, 0x002ffc0f }, + { 0x00001050, 0x002ffc0f }, + { 0x00001054, 0x002ffc0f }, + { 0x00001058, 0x002ffc0f }, + { 0x0000105c, 0x002ffc0f }, + { 0x00001060, 0x002ffc0f }, + { 0x00001064, 0x002ffc0f }, + { 0x00001230, 0x00000000 }, + { 0x00001270, 0x00000000 }, + { 0x00001038, 0x00000000 }, + { 0x00001078, 0x00000000 }, + { 0x000010b8, 0x00000000 }, + { 0x000010f8, 0x00000000 }, + { 0x00001138, 0x00000000 }, + { 0x00001178, 0x00000000 }, + { 0x000011b8, 0x00000000 }, + { 0x000011f8, 0x00000000 }, + { 0x00001238, 0x00000000 }, + { 0x00001278, 0x00000000 }, + { 0x000012b8, 0x00000000 }, + { 0x000012f8, 0x00000000 }, + { 0x00001338, 0x00000000 }, + { 0x00001378, 0x00000000 }, + { 0x000013b8, 0x00000000 }, + { 0x000013f8, 0x00000000 }, + { 0x00001438, 0x00000000 }, + { 0x00001478, 0x00000000 }, + { 0x000014b8, 0x00000000 }, + { 0x000014f8, 0x00000000 }, + { 0x00001538, 0x00000000 }, + { 0x00001578, 0x00000000 }, + { 0x000015b8, 0x00000000 }, + { 0x000015f8, 0x00000000 }, + { 0x00001638, 0x00000000 }, + { 0x00001678, 0x00000000 }, + { 0x000016b8, 0x00000000 }, + { 0x000016f8, 0x00000000 }, + { 0x00001738, 0x00000000 }, + { 0x00001778, 0x00000000 }, + { 0x000017b8, 0x00000000 }, + { 0x000017f8, 0x00000000 }, + { 0x0000103c, 0x00000000 }, + { 0x0000107c, 0x00000000 }, + { 0x000010bc, 0x00000000 }, + { 0x000010fc, 0x00000000 }, + { 0x0000113c, 0x00000000 }, + { 0x0000117c, 0x00000000 }, + { 0x000011bc, 0x00000000 }, + { 0x000011fc, 0x00000000 }, + { 0x0000123c, 0x00000000 }, + { 0x0000127c, 0x00000000 }, + { 0x000012bc, 0x00000000 }, + { 0x000012fc, 0x00000000 }, + { 0x0000133c, 0x00000000 }, + { 0x0000137c, 0x00000000 }, + { 0x000013bc, 0x00000000 }, + { 0x000013fc, 0x00000000 }, + { 0x0000143c, 0x00000000 }, + { 0x0000147c, 0x00000000 }, + { 0x00020010, 0x00000003 }, + { 0x00020038, 0x000004c2 }, + { 0x00008004, 0x00000000 }, + { 0x00008008, 0x00000000 }, + { 0x0000800c, 0x00000000 }, + { 0x00008018, 0x00000700 }, + { 0x00008020, 0x00000000 }, + { 0x00008038, 0x00000000 }, + { 0x0000803c, 0x00000000 }, + { 0x00008048, 0x40000000 }, + { 0x00008054, 0x00004000 }, + { 0x00008058, 0x00000000 }, + { 0x0000805c, 0x000fc78f }, + { 0x00008060, 0x0000000f }, + { 0x00008064, 0x00000000 }, + { 0x000080c0, 0x2a82301a }, + { 0x000080c4, 0x05dc01e0 }, + { 0x000080c8, 0x1f402710 }, + { 0x000080cc, 0x01f40000 }, + { 0x000080d0, 0x00001e00 }, + { 0x000080d4, 0x00000000 }, + { 0x000080d8, 0x00400000 }, + { 0x000080e0, 0xffffffff }, + { 0x000080e4, 0x0000ffff }, + { 0x000080e8, 0x003f3f3f }, + { 0x000080ec, 0x00000000 }, + { 0x000080f0, 0x00000000 }, + { 0x000080f4, 0x00000000 }, + { 0x000080f8, 0x00000000 }, + { 0x000080fc, 0x00020000 }, + { 0x00008100, 0x00020000 }, + { 0x00008104, 0x00000001 }, + { 0x00008108, 0x00000052 }, + { 0x0000810c, 0x00000000 }, + { 0x00008110, 0x00000168 }, + { 0x00008118, 0x000100aa }, + { 0x0000811c, 0x00003210 }, + { 0x00008120, 0x08f04800 }, + { 0x00008124, 0x00000000 }, + { 0x00008128, 0x00000000 }, + { 0x0000812c, 0x00000000 }, + { 0x00008130, 0x00000000 }, + { 0x00008134, 0x00000000 }, + { 0x00008138, 0x00000000 }, + { 0x0000813c, 0x00000000 }, + { 0x00008144, 0x00000000 }, + { 0x00008168, 0x00000000 }, + { 0x0000816c, 0x00000000 }, + { 0x00008170, 0x32143320 }, + { 0x00008174, 0xfaa4fa50 }, + { 0x00008178, 0x00000100 }, + { 0x0000817c, 0x00000000 }, + { 0x000081c4, 0x00000000 }, + { 0x000081d0, 0x00003210 }, + { 0x000081ec, 0x00000000 }, + { 0x000081f0, 0x00000000 }, + { 0x000081f4, 0x00000000 }, + { 0x000081f8, 0x00000000 }, + { 0x000081fc, 0x00000000 }, + { 0x00008200, 0x00000000 }, + { 0x00008204, 0x00000000 }, + { 0x00008208, 0x00000000 }, + { 0x0000820c, 0x00000000 }, + { 0x00008210, 0x00000000 }, + { 0x00008214, 0x00000000 }, + { 0x00008218, 0x00000000 }, + { 0x0000821c, 0x00000000 }, + { 0x00008220, 0x00000000 }, + { 0x00008224, 0x00000000 }, + { 0x00008228, 0x00000000 }, + { 0x0000822c, 0x00000000 }, + { 0x00008230, 0x00000000 }, + { 0x00008234, 0x00000000 }, + { 0x00008238, 0x00000000 }, + { 0x0000823c, 0x00000000 }, + { 0x00008240, 0x00100000 }, + { 0x00008244, 0x0010f400 }, + { 0x00008248, 0x00000100 }, + { 0x0000824c, 0x0001e800 }, + { 0x00008250, 0x00000000 }, + { 0x00008254, 0x00000000 }, + { 0x00008258, 0x00000000 }, + { 0x0000825c, 0x400000ff }, + { 0x00008260, 0x00080922 }, + { 0x00008270, 0x00000000 }, + { 0x00008274, 0x40000000 }, + { 0x00008278, 0x003e4180 }, + { 0x0000827c, 0x00000000 }, + { 0x00008284, 0x0000002c }, + { 0x00008288, 0x0000002c }, + { 0x0000828c, 0x00000000 }, + { 0x00008294, 0x00000000 }, + { 0x00008298, 0x00000000 }, + { 0x00008300, 0x00000000 }, + { 0x00008304, 0x00000000 }, + { 0x00008308, 0x00000000 }, + { 0x0000830c, 0x00000000 }, + { 0x00008310, 0x00000000 }, + { 0x00008314, 0x00000000 }, + { 0x00008318, 0x00000000 }, + { 0x00008328, 0x00000000 }, + { 0x0000832c, 0x00000007 }, + { 0x00008330, 0x00000302 }, + { 0x00008334, 0x00000e00 }, + { 0x00008338, 0x00000000 }, + { 0x0000833c, 0x00000000 }, + { 0x00008340, 0x000107ff }, + { 0x00009808, 0x00000000 }, + { 0x0000980c, 0xad848e19 }, + { 0x00009810, 0x7d14e000 }, + { 0x00009814, 0x9c0a9f6b }, + { 0x0000981c, 0x00000000 }, + { 0x0000982c, 0x0000a000 }, + { 0x00009830, 0x00000000 }, + { 0x0000983c, 0x00200400 }, + { 0x00009840, 0x206a01ae }, + { 0x0000984c, 0x1284233c }, + { 0x00009854, 0x00000859 }, + { 0x00009900, 0x00000000 }, + { 0x00009904, 0x00000000 }, + { 0x00009908, 0x00000000 }, + { 0x0000990c, 0x00000000 }, + { 0x0000991c, 0x10000fff }, + { 0x00009920, 0x05100000 }, + { 0x0000a920, 0x05100000 }, + { 0x0000b920, 0x05100000 }, + { 0x00009928, 0x00000001 }, + { 0x0000992c, 0x00000004 }, + { 0x00009934, 0x1e1f2022 }, + { 0x00009938, 0x0a0b0c0d }, + { 0x0000993c, 0x00000000 }, + { 0x00009948, 0x9280b212 }, + { 0x0000994c, 0x00020028 }, + { 0x0000c95c, 0x004b6a8e }, + { 0x0000c968, 0x000003ce }, + { 0x00009970, 0x190fb515 }, + { 0x00009974, 0x00000000 }, + { 0x00009978, 0x00000001 }, + { 0x0000997c, 0x00000000 }, + { 0x00009980, 0x00000000 }, + { 0x00009984, 0x00000000 }, + { 0x00009988, 0x00000000 }, + { 0x0000998c, 0x00000000 }, + { 0x00009990, 0x00000000 }, + { 0x00009994, 0x00000000 }, + { 0x00009998, 0x00000000 }, + { 0x0000999c, 0x00000000 }, + { 0x000099a0, 0x00000000 }, + { 0x000099a4, 0x00000001 }, + { 0x000099a8, 0x201fff00 }, + { 0x000099ac, 0x006f0000 }, + { 0x000099b0, 0x03051000 }, + { 0x000099dc, 0x00000000 }, + { 0x000099e0, 0x00000200 }, + { 0x000099e4, 0xaaaaaaaa }, + { 0x000099e8, 0x3c466478 }, + { 0x000099ec, 0x0cc80caa }, + { 0x000099fc, 0x00001042 }, + { 0x00009b00, 0x00000000 }, + { 0x00009b04, 0x00000001 }, + { 0x00009b08, 0x00000002 }, + { 0x00009b0c, 0x00000003 }, + { 0x00009b10, 0x00000004 }, + { 0x00009b14, 0x00000005 }, + { 0x00009b18, 0x00000008 }, + { 0x00009b1c, 0x00000009 }, + { 0x00009b20, 0x0000000a }, + { 0x00009b24, 0x0000000b }, + { 0x00009b28, 0x0000000c }, + { 0x00009b2c, 0x0000000d }, + { 0x00009b30, 0x00000010 }, + { 0x00009b34, 0x00000011 }, + { 0x00009b38, 0x00000012 }, + { 0x00009b3c, 0x00000013 }, + { 0x00009b40, 0x00000014 }, + { 0x00009b44, 0x00000015 }, + { 0x00009b48, 0x00000018 }, + { 0x00009b4c, 0x00000019 }, + { 0x00009b50, 0x0000001a }, + { 0x00009b54, 0x0000001b }, + { 0x00009b58, 0x0000001c }, + { 0x00009b5c, 0x0000001d }, + { 0x00009b60, 0x00000020 }, + { 0x00009b64, 0x00000021 }, + { 0x00009b68, 0x00000022 }, + { 0x00009b6c, 0x00000023 }, + { 0x00009b70, 0x00000024 }, + { 0x00009b74, 0x00000025 }, + { 0x00009b78, 0x00000028 }, + { 0x00009b7c, 0x00000029 }, + { 0x00009b80, 0x0000002a }, + { 0x00009b84, 0x0000002b }, + { 0x00009b88, 0x0000002c }, + { 0x00009b8c, 0x0000002d }, + { 0x00009b90, 0x00000030 }, + { 0x00009b94, 0x00000031 }, + { 0x00009b98, 0x00000032 }, + { 0x00009b9c, 0x00000033 }, + { 0x00009ba0, 0x00000034 }, + { 0x00009ba4, 0x00000035 }, + { 0x00009ba8, 0x00000035 }, + { 0x00009bac, 0x00000035 }, + { 0x00009bb0, 0x00000035 }, + { 0x00009bb4, 0x00000035 }, + { 0x00009bb8, 0x00000035 }, + { 0x00009bbc, 0x00000035 }, + { 0x00009bc0, 0x00000035 }, + { 0x00009bc4, 0x00000035 }, + { 0x00009bc8, 0x00000035 }, + { 0x00009bcc, 0x00000035 }, + { 0x00009bd0, 0x00000035 }, + { 0x00009bd4, 0x00000035 }, + { 0x00009bd8, 0x00000035 }, + { 0x00009bdc, 0x00000035 }, + { 0x00009be0, 0x00000035 }, + { 0x00009be4, 0x00000035 }, + { 0x00009be8, 0x00000035 }, + { 0x00009bec, 0x00000035 }, + { 0x00009bf0, 0x00000035 }, + { 0x00009bf4, 0x00000035 }, + { 0x00009bf8, 0x00000010 }, + { 0x00009bfc, 0x0000001a }, + { 0x0000a210, 0x40806333 }, + { 0x0000a214, 0x00106c10 }, + { 0x0000a218, 0x009c4060 }, + { 0x0000a220, 0x018830c6 }, + { 0x0000a224, 0x00000400 }, + { 0x0000a228, 0x001a0bb5 }, + { 0x0000a22c, 0x00000000 }, + { 0x0000a234, 0x20202020 }, + { 0x0000a238, 0x20202020 }, + { 0x0000a23c, 0x13c889ae }, + { 0x0000a240, 0x38490a20 }, + { 0x0000a244, 0x00007bb6 }, + { 0x0000a248, 0x0fff3ffc }, + { 0x0000a24c, 0x00000001 }, + { 0x0000a250, 0x0000a000 }, + { 0x0000a254, 0x00000000 }, + { 0x0000a258, 0x0cc75380 }, + { 0x0000a25c, 0x0f0f0f01 }, + { 0x0000a260, 0xdfa91f01 }, + { 0x0000a268, 0x00000001 }, + { 0x0000a26c, 0x0ebae9c6 }, + { 0x0000b26c, 0x0ebae9c6 }, + { 0x0000c26c, 0x0ebae9c6 }, + { 0x0000d270, 0x00820820 }, + { 0x0000a278, 0x1ce739ce }, + { 0x0000a27c, 0x050701ce }, + { 0x0000a338, 0x00000000 }, + { 0x0000a33c, 0x00000000 }, + { 0x0000a340, 0x00000000 }, + { 0x0000a344, 0x00000000 }, + { 0x0000a348, 0x3fffffff }, + { 0x0000a34c, 0x3fffffff }, + { 0x0000a350, 0x3fffffff }, + { 0x0000a354, 0x0003ffff }, + { 0x0000a358, 0x79a8aa33 }, + { 0x0000d35c, 0x07ffffef }, + { 0x0000d360, 0x0fffffe7 }, + { 0x0000d364, 0x17ffffe5 }, + { 0x0000d368, 0x1fffffe4 }, + { 0x0000d36c, 0x37ffffe3 }, + { 0x0000d370, 0x3fffffe3 }, + { 0x0000d374, 0x57ffffe3 }, + { 0x0000d378, 0x5fffffe2 }, + { 0x0000d37c, 0x7fffffe2 }, + { 0x0000d380, 0x7f3c7bba }, + { 0x0000d384, 0xf3307ff0 }, + { 0x0000a388, 0x0c000000 }, + { 0x0000a38c, 0x20202020 }, + { 0x0000a390, 0x20202020 }, + { 0x0000a394, 0x1ce739ce }, + { 0x0000a398, 0x000001ce }, + { 0x0000a39c, 0x00000001 }, + { 0x0000a3a0, 0x00000000 }, + { 0x0000a3a4, 0x00000000 }, + { 0x0000a3a8, 0x00000000 }, + { 0x0000a3ac, 0x00000000 }, + { 0x0000a3b0, 0x00000000 }, + { 0x0000a3b4, 0x00000000 }, + { 0x0000a3b8, 0x00000000 }, + { 0x0000a3bc, 0x00000000 }, + { 0x0000a3c0, 0x00000000 }, + { 0x0000a3c4, 0x00000000 }, + { 0x0000a3c8, 0x00000246 }, + { 0x0000a3cc, 0x20202020 }, + { 0x0000a3d0, 0x20202020 }, + { 0x0000a3d4, 0x20202020 }, + { 0x0000a3dc, 0x1ce739ce }, + { 0x0000a3e0, 0x000001ce }, +}; + +static const u32 ar5416Bank0_9100[][2] = { + { 0x000098b0, 0x1e5795e5 }, + { 0x000098e0, 0x02008020 }, +}; + +static const u32 ar5416BB_RfGain_9100[][3] = { + { 0x00009a00, 0x00000000, 0x00000000 }, + { 0x00009a04, 0x00000040, 0x00000040 }, + { 0x00009a08, 0x00000080, 0x00000080 }, + { 0x00009a0c, 0x000001a1, 0x00000141 }, + { 0x00009a10, 0x000001e1, 0x00000181 }, + { 0x00009a14, 0x00000021, 0x000001c1 }, + { 0x00009a18, 0x00000061, 0x00000001 }, + { 0x00009a1c, 0x00000168, 0x00000041 }, + { 0x00009a20, 0x000001a8, 0x000001a8 }, + { 0x00009a24, 0x000001e8, 0x000001e8 }, + { 0x00009a28, 0x00000028, 0x00000028 }, + { 0x00009a2c, 0x00000068, 0x00000068 }, + { 0x00009a30, 0x00000189, 0x000000a8 }, + { 0x00009a34, 0x000001c9, 0x00000169 }, + { 0x00009a38, 0x00000009, 0x000001a9 }, + { 0x00009a3c, 0x00000049, 0x000001e9 }, + { 0x00009a40, 0x00000089, 0x00000029 }, + { 0x00009a44, 0x00000170, 0x00000069 }, + { 0x00009a48, 0x000001b0, 0x00000190 }, + { 0x00009a4c, 0x000001f0, 0x000001d0 }, + { 0x00009a50, 0x00000030, 0x00000010 }, + { 0x00009a54, 0x00000070, 0x00000050 }, + { 0x00009a58, 0x00000191, 0x00000090 }, + { 0x00009a5c, 0x000001d1, 0x00000151 }, + { 0x00009a60, 0x00000011, 0x00000191 }, + { 0x00009a64, 0x00000051, 0x000001d1 }, + { 0x00009a68, 0x00000091, 0x00000011 }, + { 0x00009a6c, 0x000001b8, 0x00000051 }, + { 0x00009a70, 0x000001f8, 0x00000198 }, + { 0x00009a74, 0x00000038, 0x000001d8 }, + { 0x00009a78, 0x00000078, 0x00000018 }, + { 0x00009a7c, 0x00000199, 0x00000058 }, + { 0x00009a80, 0x000001d9, 0x00000098 }, + { 0x00009a84, 0x00000019, 0x00000159 }, + { 0x00009a88, 0x00000059, 0x00000199 }, + { 0x00009a8c, 0x00000099, 0x000001d9 }, + { 0x00009a90, 0x000000d9, 0x00000019 }, + { 0x00009a94, 0x000000f9, 0x00000059 }, + { 0x00009a98, 0x000000f9, 0x00000099 }, + { 0x00009a9c, 0x000000f9, 0x000000d9 }, + { 0x00009aa0, 0x000000f9, 0x000000f9 }, + { 0x00009aa4, 0x000000f9, 0x000000f9 }, + { 0x00009aa8, 0x000000f9, 0x000000f9 }, + { 0x00009aac, 0x000000f9, 0x000000f9 }, + { 0x00009ab0, 0x000000f9, 0x000000f9 }, + { 0x00009ab4, 0x000000f9, 0x000000f9 }, + { 0x00009ab8, 0x000000f9, 0x000000f9 }, + { 0x00009abc, 0x000000f9, 0x000000f9 }, + { 0x00009ac0, 0x000000f9, 0x000000f9 }, + { 0x00009ac4, 0x000000f9, 0x000000f9 }, + { 0x00009ac8, 0x000000f9, 0x000000f9 }, + { 0x00009acc, 0x000000f9, 0x000000f9 }, + { 0x00009ad0, 0x000000f9, 0x000000f9 }, + { 0x00009ad4, 0x000000f9, 0x000000f9 }, + { 0x00009ad8, 0x000000f9, 0x000000f9 }, + { 0x00009adc, 0x000000f9, 0x000000f9 }, + { 0x00009ae0, 0x000000f9, 0x000000f9 }, + { 0x00009ae4, 0x000000f9, 0x000000f9 }, + { 0x00009ae8, 0x000000f9, 0x000000f9 }, + { 0x00009aec, 0x000000f9, 0x000000f9 }, + { 0x00009af0, 0x000000f9, 0x000000f9 }, + { 0x00009af4, 0x000000f9, 0x000000f9 }, + { 0x00009af8, 0x000000f9, 0x000000f9 }, + { 0x00009afc, 0x000000f9, 0x000000f9 }, +}; + +static const u32 ar5416Bank1_9100[][2] = { + { 0x000098b0, 0x02108421}, + { 0x000098ec, 0x00000008}, +}; + +static const u32 ar5416Bank2_9100[][2] = { + { 0x000098b0, 0x0e73ff17}, + { 0x000098e0, 0x00000420}, +}; + +static const u32 ar5416Bank3_9100[][3] = { + { 0x000098f0, 0x01400018, 0x01c00018 }, +}; + +static const u32 ar5416Bank6_9100[][3] = { + + { 0x0000989c, 0x00000000, 0x00000000 }, + { 0x0000989c, 0x00000000, 0x00000000 }, + { 0x0000989c, 0x00000000, 0x00000000 }, + { 0x0000989c, 0x00e00000, 0x00e00000 }, + { 0x0000989c, 0x005e0000, 0x005e0000 }, + { 0x0000989c, 0x00120000, 0x00120000 }, + { 0x0000989c, 0x00620000, 0x00620000 }, + { 0x0000989c, 0x00020000, 0x00020000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x005f0000, 0x005f0000 }, + { 0x0000989c, 0x00870000, 0x00870000 }, + { 0x0000989c, 0x00f90000, 0x00f90000 }, + { 0x0000989c, 0x007b0000, 0x007b0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00f50000, 0x00f50000 }, + { 0x0000989c, 0x00dc0000, 0x00dc0000 }, + { 0x0000989c, 0x00110000, 0x00110000 }, + { 0x0000989c, 0x006100a8, 0x006100a8 }, + { 0x0000989c, 0x004210a2, 0x004210a2 }, + { 0x0000989c, 0x0014000f, 0x0014000f }, + { 0x0000989c, 0x00c40002, 0x00c40002 }, + { 0x0000989c, 0x003000f2, 0x003000f2 }, + { 0x0000989c, 0x00440016, 0x00440016 }, + { 0x0000989c, 0x00410040, 0x00410040 }, + { 0x0000989c, 0x000180d6, 0x000180d6 }, + { 0x0000989c, 0x0000c0aa, 0x0000c0aa }, + { 0x0000989c, 0x000000b1, 0x000000b1 }, + { 0x0000989c, 0x00002000, 0x00002000 }, + { 0x0000989c, 0x000000d4, 0x000000d4 }, + { 0x000098d0, 0x0000000f, 0x0010000f }, +}; + + +static const u32 ar5416Bank6TPC_9100[][3] = { + + { 0x0000989c, 0x00000000, 0x00000000 }, + { 0x0000989c, 0x00000000, 0x00000000 }, + { 0x0000989c, 0x00000000, 0x00000000 }, + { 0x0000989c, 0x00e00000, 0x00e00000 }, + { 0x0000989c, 0x005e0000, 0x005e0000 }, + { 0x0000989c, 0x00120000, 0x00120000 }, + { 0x0000989c, 0x00620000, 0x00620000 }, + { 0x0000989c, 0x00020000, 0x00020000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x40ff0000, 0x40ff0000 }, + { 0x0000989c, 0x005f0000, 0x005f0000 }, + { 0x0000989c, 0x00870000, 0x00870000 }, + { 0x0000989c, 0x00f90000, 0x00f90000 }, + { 0x0000989c, 0x007b0000, 0x007b0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00f50000, 0x00f50000 }, + { 0x0000989c, 0x00dc0000, 0x00dc0000 }, + { 0x0000989c, 0x00110000, 0x00110000 }, + { 0x0000989c, 0x006100a8, 0x006100a8 }, + { 0x0000989c, 0x00423022, 0x00423022 }, + { 0x0000989c, 0x2014008f, 0x2014008f }, + { 0x0000989c, 0x00c40002, 0x00c40002 }, + { 0x0000989c, 0x003000f2, 0x003000f2 }, + { 0x0000989c, 0x00440016, 0x00440016 }, + { 0x0000989c, 0x00410040, 0x00410040 }, + { 0x0000989c, 0x0001805e, 0x0001805e }, + { 0x0000989c, 0x0000c0ab, 0x0000c0ab }, + { 0x0000989c, 0x000000e1, 0x000000e1 }, + { 0x0000989c, 0x00007080, 0x00007080 }, + { 0x0000989c, 0x000000d4, 0x000000d4 }, + { 0x000098d0, 0x0000000f, 0x0010000f }, +}; + +static const u32 ar5416Bank7_9100[][2] = { + { 0x0000989c, 0x00000500 }, + { 0x0000989c, 0x00000800 }, + { 0x000098cc, 0x0000000e }, +}; + +static const u32 ar5416Addac_9100[][2] = { + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000010 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x000000c0 }, + {0x0000989c, 0x00000015 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x000098cc, 0x00000000 }, +}; + +static const u32 ar5416Modes_9160[][6] = { + { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, + { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, + { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 }, + { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 }, + { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 }, + { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf }, + { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 }, + { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 }, + { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, + { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 }, + { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, + { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 }, + { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 }, + { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, + { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, + { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, + { 0x00009850, 0x6c48b4e2, 0x6c48b4e2, 0x6c48b0e2, 0x6c48b0e2, 0x6c48b0e2 }, + { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e }, + { 0x0000985c, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e }, + { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 }, + { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, + { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 }, + { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 }, + { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 }, + { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 }, + { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d }, + { 0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020 }, + { 0x00009960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 }, + { 0x0000a960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 }, + { 0x0000b960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 }, + { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 }, + { 0x0000c968, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce, 0x000003ce }, + { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a0c00, 0x001a0c00, 0x001a0c00 }, + { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be }, + { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 }, + { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 }, + { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 }, + { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 }, + { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 }, + { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 }, + { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 }, + { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 }, + { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 }, + { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a }, + { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, + { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa }, + { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 }, + { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 }, + { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 }, + { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b }, + { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b }, + { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a }, + { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf }, + { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f }, + { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f }, + { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f }, + { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, +}; + +static const u32 ar5416Common_9160[][2] = { + { 0x0000000c, 0x00000000 }, + { 0x00000030, 0x00020015 }, + { 0x00000034, 0x00000005 }, + { 0x00000040, 0x00000000 }, + { 0x00000044, 0x00000008 }, + { 0x00000048, 0x00000008 }, + { 0x0000004c, 0x00000010 }, + { 0x00000050, 0x00000000 }, + { 0x00000054, 0x0000001f }, + { 0x00000800, 0x00000000 }, + { 0x00000804, 0x00000000 }, + { 0x00000808, 0x00000000 }, + { 0x0000080c, 0x00000000 }, + { 0x00000810, 0x00000000 }, + { 0x00000814, 0x00000000 }, + { 0x00000818, 0x00000000 }, + { 0x0000081c, 0x00000000 }, + { 0x00000820, 0x00000000 }, + { 0x00000824, 0x00000000 }, + { 0x00001040, 0x002ffc0f }, + { 0x00001044, 0x002ffc0f }, + { 0x00001048, 0x002ffc0f }, + { 0x0000104c, 0x002ffc0f }, + { 0x00001050, 0x002ffc0f }, + { 0x00001054, 0x002ffc0f }, + { 0x00001058, 0x002ffc0f }, + { 0x0000105c, 0x002ffc0f }, + { 0x00001060, 0x002ffc0f }, + { 0x00001064, 0x002ffc0f }, + { 0x00001230, 0x00000000 }, + { 0x00001270, 0x00000000 }, + { 0x00001038, 0x00000000 }, + { 0x00001078, 0x00000000 }, + { 0x000010b8, 0x00000000 }, + { 0x000010f8, 0x00000000 }, + { 0x00001138, 0x00000000 }, + { 0x00001178, 0x00000000 }, + { 0x000011b8, 0x00000000 }, + { 0x000011f8, 0x00000000 }, + { 0x00001238, 0x00000000 }, + { 0x00001278, 0x00000000 }, + { 0x000012b8, 0x00000000 }, + { 0x000012f8, 0x00000000 }, + { 0x00001338, 0x00000000 }, + { 0x00001378, 0x00000000 }, + { 0x000013b8, 0x00000000 }, + { 0x000013f8, 0x00000000 }, + { 0x00001438, 0x00000000 }, + { 0x00001478, 0x00000000 }, + { 0x000014b8, 0x00000000 }, + { 0x000014f8, 0x00000000 }, + { 0x00001538, 0x00000000 }, + { 0x00001578, 0x00000000 }, + { 0x000015b8, 0x00000000 }, + { 0x000015f8, 0x00000000 }, + { 0x00001638, 0x00000000 }, + { 0x00001678, 0x00000000 }, + { 0x000016b8, 0x00000000 }, + { 0x000016f8, 0x00000000 }, + { 0x00001738, 0x00000000 }, + { 0x00001778, 0x00000000 }, + { 0x000017b8, 0x00000000 }, + { 0x000017f8, 0x00000000 }, + { 0x0000103c, 0x00000000 }, + { 0x0000107c, 0x00000000 }, + { 0x000010bc, 0x00000000 }, + { 0x000010fc, 0x00000000 }, + { 0x0000113c, 0x00000000 }, + { 0x0000117c, 0x00000000 }, + { 0x000011bc, 0x00000000 }, + { 0x000011fc, 0x00000000 }, + { 0x0000123c, 0x00000000 }, + { 0x0000127c, 0x00000000 }, + { 0x000012bc, 0x00000000 }, + { 0x000012fc, 0x00000000 }, + { 0x0000133c, 0x00000000 }, + { 0x0000137c, 0x00000000 }, + { 0x000013bc, 0x00000000 }, + { 0x000013fc, 0x00000000 }, + { 0x0000143c, 0x00000000 }, + { 0x0000147c, 0x00000000 }, + { 0x00004030, 0x00000002 }, + { 0x0000403c, 0x00000002 }, + { 0x00007010, 0x00000020 }, + { 0x00007038, 0x000004c2 }, + { 0x00008004, 0x00000000 }, + { 0x00008008, 0x00000000 }, + { 0x0000800c, 0x00000000 }, + { 0x00008018, 0x00000700 }, + { 0x00008020, 0x00000000 }, + { 0x00008038, 0x00000000 }, + { 0x0000803c, 0x00000000 }, + { 0x00008048, 0x40000000 }, + { 0x00008054, 0x00000000 }, + { 0x00008058, 0x00000000 }, + { 0x0000805c, 0x000fc78f }, + { 0x00008060, 0x0000000f }, + { 0x00008064, 0x00000000 }, + { 0x000080c0, 0x2a82301a }, + { 0x000080c4, 0x05dc01e0 }, + { 0x000080c8, 0x1f402710 }, + { 0x000080cc, 0x01f40000 }, + { 0x000080d0, 0x00001e00 }, + { 0x000080d4, 0x00000000 }, + { 0x000080d8, 0x00400000 }, + { 0x000080e0, 0xffffffff }, + { 0x000080e4, 0x0000ffff }, + { 0x000080e8, 0x003f3f3f }, + { 0x000080ec, 0x00000000 }, + { 0x000080f0, 0x00000000 }, + { 0x000080f4, 0x00000000 }, + { 0x000080f8, 0x00000000 }, + { 0x000080fc, 0x00020000 }, + { 0x00008100, 0x00020000 }, + { 0x00008104, 0x00000001 }, + { 0x00008108, 0x00000052 }, + { 0x0000810c, 0x00000000 }, + { 0x00008110, 0x00000168 }, + { 0x00008118, 0x000100aa }, + { 0x0000811c, 0x00003210 }, + { 0x00008120, 0x08f04800 }, + { 0x00008124, 0x00000000 }, + { 0x00008128, 0x00000000 }, + { 0x0000812c, 0x00000000 }, + { 0x00008130, 0x00000000 }, + { 0x00008134, 0x00000000 }, + { 0x00008138, 0x00000000 }, + { 0x0000813c, 0x00000000 }, + { 0x00008144, 0xffffffff }, + { 0x00008168, 0x00000000 }, + { 0x0000816c, 0x00000000 }, + { 0x00008170, 0x32143320 }, + { 0x00008174, 0xfaa4fa50 }, + { 0x00008178, 0x00000100 }, + { 0x0000817c, 0x00000000 }, + { 0x000081c4, 0x00000000 }, + { 0x000081d0, 0x00003210 }, + { 0x000081ec, 0x00000000 }, + { 0x000081f0, 0x00000000 }, + { 0x000081f4, 0x00000000 }, + { 0x000081f8, 0x00000000 }, + { 0x000081fc, 0x00000000 }, + { 0x00008200, 0x00000000 }, + { 0x00008204, 0x00000000 }, + { 0x00008208, 0x00000000 }, + { 0x0000820c, 0x00000000 }, + { 0x00008210, 0x00000000 }, + { 0x00008214, 0x00000000 }, + { 0x00008218, 0x00000000 }, + { 0x0000821c, 0x00000000 }, + { 0x00008220, 0x00000000 }, + { 0x00008224, 0x00000000 }, + { 0x00008228, 0x00000000 }, + { 0x0000822c, 0x00000000 }, + { 0x00008230, 0x00000000 }, + { 0x00008234, 0x00000000 }, + { 0x00008238, 0x00000000 }, + { 0x0000823c, 0x00000000 }, + { 0x00008240, 0x00100000 }, + { 0x00008244, 0x0010f400 }, + { 0x00008248, 0x00000100 }, + { 0x0000824c, 0x0001e800 }, + { 0x00008250, 0x00000000 }, + { 0x00008254, 0x00000000 }, + { 0x00008258, 0x00000000 }, + { 0x0000825c, 0x400000ff }, + { 0x00008260, 0x00080922 }, + { 0x00008270, 0x00000000 }, + { 0x00008274, 0x40000000 }, + { 0x00008278, 0x003e4180 }, + { 0x0000827c, 0x00000000 }, + { 0x00008284, 0x0000002c }, + { 0x00008288, 0x0000002c }, + { 0x0000828c, 0x00000000 }, + { 0x00008294, 0x00000000 }, + { 0x00008298, 0x00000000 }, + { 0x00008300, 0x00000000 }, + { 0x00008304, 0x00000000 }, + { 0x00008308, 0x00000000 }, + { 0x0000830c, 0x00000000 }, + { 0x00008310, 0x00000000 }, + { 0x00008314, 0x00000000 }, + { 0x00008318, 0x00000000 }, + { 0x00008328, 0x00000000 }, + { 0x0000832c, 0x00000007 }, + { 0x00008330, 0x00000302 }, + { 0x00008334, 0x00000e00 }, + { 0x00008338, 0x00ff0000 }, + { 0x0000833c, 0x00000000 }, + { 0x00008340, 0x000107ff }, + { 0x00009808, 0x00000000 }, + { 0x0000980c, 0xad848e19 }, + { 0x00009810, 0x7d14e000 }, + { 0x00009814, 0x9c0a9f6b }, + { 0x0000981c, 0x00000000 }, + { 0x0000982c, 0x0000a000 }, + { 0x00009830, 0x00000000 }, + { 0x0000983c, 0x00200400 }, + { 0x00009840, 0x206a01ae }, + { 0x0000984c, 0x1284233c }, + { 0x00009854, 0x00000859 }, + { 0x00009900, 0x00000000 }, + { 0x00009904, 0x00000000 }, + { 0x00009908, 0x00000000 }, + { 0x0000990c, 0x00000000 }, + { 0x0000991c, 0x10000fff }, + { 0x00009920, 0x05100000 }, + { 0x0000a920, 0x05100000 }, + { 0x0000b920, 0x05100000 }, + { 0x00009928, 0x00000001 }, + { 0x0000992c, 0x00000004 }, + { 0x00009934, 0x1e1f2022 }, + { 0x00009938, 0x0a0b0c0d }, + { 0x0000993c, 0x00000000 }, + { 0x00009948, 0x9280b212 }, + { 0x0000994c, 0x00020028 }, + { 0x00009954, 0x5f3ca3de }, + { 0x00009958, 0x2108ecff }, + { 0x00009940, 0x00750604 }, + { 0x0000c95c, 0x004b6a8e }, + { 0x00009970, 0x190fb515 }, + { 0x00009974, 0x00000000 }, + { 0x00009978, 0x00000001 }, + { 0x0000997c, 0x00000000 }, + { 0x00009980, 0x00000000 }, + { 0x00009984, 0x00000000 }, + { 0x00009988, 0x00000000 }, + { 0x0000998c, 0x00000000 }, + { 0x00009990, 0x00000000 }, + { 0x00009994, 0x00000000 }, + { 0x00009998, 0x00000000 }, + { 0x0000999c, 0x00000000 }, + { 0x000099a0, 0x00000000 }, + { 0x000099a4, 0x00000001 }, + { 0x000099a8, 0x201fff00 }, + { 0x000099ac, 0x006f0000 }, + { 0x000099b0, 0x03051000 }, + { 0x000099dc, 0x00000000 }, + { 0x000099e0, 0x00000200 }, + { 0x000099e4, 0xaaaaaaaa }, + { 0x000099e8, 0x3c466478 }, + { 0x000099ec, 0x0cc80caa }, + { 0x000099fc, 0x00001042 }, + { 0x00009b00, 0x00000000 }, + { 0x00009b04, 0x00000001 }, + { 0x00009b08, 0x00000002 }, + { 0x00009b0c, 0x00000003 }, + { 0x00009b10, 0x00000004 }, + { 0x00009b14, 0x00000005 }, + { 0x00009b18, 0x00000008 }, + { 0x00009b1c, 0x00000009 }, + { 0x00009b20, 0x0000000a }, + { 0x00009b24, 0x0000000b }, + { 0x00009b28, 0x0000000c }, + { 0x00009b2c, 0x0000000d }, + { 0x00009b30, 0x00000010 }, + { 0x00009b34, 0x00000011 }, + { 0x00009b38, 0x00000012 }, + { 0x00009b3c, 0x00000013 }, + { 0x00009b40, 0x00000014 }, + { 0x00009b44, 0x00000015 }, + { 0x00009b48, 0x00000018 }, + { 0x00009b4c, 0x00000019 }, + { 0x00009b50, 0x0000001a }, + { 0x00009b54, 0x0000001b }, + { 0x00009b58, 0x0000001c }, + { 0x00009b5c, 0x0000001d }, + { 0x00009b60, 0x00000020 }, + { 0x00009b64, 0x00000021 }, + { 0x00009b68, 0x00000022 }, + { 0x00009b6c, 0x00000023 }, + { 0x00009b70, 0x00000024 }, + { 0x00009b74, 0x00000025 }, + { 0x00009b78, 0x00000028 }, + { 0x00009b7c, 0x00000029 }, + { 0x00009b80, 0x0000002a }, + { 0x00009b84, 0x0000002b }, + { 0x00009b88, 0x0000002c }, + { 0x00009b8c, 0x0000002d }, + { 0x00009b90, 0x00000030 }, + { 0x00009b94, 0x00000031 }, + { 0x00009b98, 0x00000032 }, + { 0x00009b9c, 0x00000033 }, + { 0x00009ba0, 0x00000034 }, + { 0x00009ba4, 0x00000035 }, + { 0x00009ba8, 0x00000035 }, + { 0x00009bac, 0x00000035 }, + { 0x00009bb0, 0x00000035 }, + { 0x00009bb4, 0x00000035 }, + { 0x00009bb8, 0x00000035 }, + { 0x00009bbc, 0x00000035 }, + { 0x00009bc0, 0x00000035 }, + { 0x00009bc4, 0x00000035 }, + { 0x00009bc8, 0x00000035 }, + { 0x00009bcc, 0x00000035 }, + { 0x00009bd0, 0x00000035 }, + { 0x00009bd4, 0x00000035 }, + { 0x00009bd8, 0x00000035 }, + { 0x00009bdc, 0x00000035 }, + { 0x00009be0, 0x00000035 }, + { 0x00009be4, 0x00000035 }, + { 0x00009be8, 0x00000035 }, + { 0x00009bec, 0x00000035 }, + { 0x00009bf0, 0x00000035 }, + { 0x00009bf4, 0x00000035 }, + { 0x00009bf8, 0x00000010 }, + { 0x00009bfc, 0x0000001a }, + { 0x0000a210, 0x40806333 }, + { 0x0000a214, 0x00106c10 }, + { 0x0000a218, 0x009c4060 }, + { 0x0000a220, 0x018830c6 }, + { 0x0000a224, 0x00000400 }, + { 0x0000a228, 0x001a0bb5 }, + { 0x0000a22c, 0x00000000 }, + { 0x0000a234, 0x20202020 }, + { 0x0000a238, 0x20202020 }, + { 0x0000a23c, 0x13c889af }, + { 0x0000a240, 0x38490a20 }, + { 0x0000a244, 0x00007bb6 }, + { 0x0000a248, 0x0fff3ffc }, + { 0x0000a24c, 0x00000001 }, + { 0x0000a250, 0x0000e000 }, + { 0x0000a254, 0x00000000 }, + { 0x0000a258, 0x0cc75380 }, + { 0x0000a25c, 0x0f0f0f01 }, + { 0x0000a260, 0xdfa91f01 }, + { 0x0000a268, 0x00000001 }, + { 0x0000a26c, 0x0ebae9c6 }, + { 0x0000b26c, 0x0ebae9c6 }, + { 0x0000c26c, 0x0ebae9c6 }, + { 0x0000d270, 0x00820820 }, + { 0x0000a278, 0x1ce739ce }, + { 0x0000a27c, 0x050701ce }, + { 0x0000a338, 0x00000000 }, + { 0x0000a33c, 0x00000000 }, + { 0x0000a340, 0x00000000 }, + { 0x0000a344, 0x00000000 }, + { 0x0000a348, 0x3fffffff }, + { 0x0000a34c, 0x3fffffff }, + { 0x0000a350, 0x3fffffff }, + { 0x0000a354, 0x0003ffff }, + { 0x0000a358, 0x79bfaa03 }, + { 0x0000d35c, 0x07ffffef }, + { 0x0000d360, 0x0fffffe7 }, + { 0x0000d364, 0x17ffffe5 }, + { 0x0000d368, 0x1fffffe4 }, + { 0x0000d36c, 0x37ffffe3 }, + { 0x0000d370, 0x3fffffe3 }, + { 0x0000d374, 0x57ffffe3 }, + { 0x0000d378, 0x5fffffe2 }, + { 0x0000d37c, 0x7fffffe2 }, + { 0x0000d380, 0x7f3c7bba }, + { 0x0000d384, 0xf3307ff0 }, + { 0x0000a388, 0x0c000000 }, + { 0x0000a38c, 0x20202020 }, + { 0x0000a390, 0x20202020 }, + { 0x0000a394, 0x1ce739ce }, + { 0x0000a398, 0x000001ce }, + { 0x0000a39c, 0x00000001 }, + { 0x0000a3a0, 0x00000000 }, + { 0x0000a3a4, 0x00000000 }, + { 0x0000a3a8, 0x00000000 }, + { 0x0000a3ac, 0x00000000 }, + { 0x0000a3b0, 0x00000000 }, + { 0x0000a3b4, 0x00000000 }, + { 0x0000a3b8, 0x00000000 }, + { 0x0000a3bc, 0x00000000 }, + { 0x0000a3c0, 0x00000000 }, + { 0x0000a3c4, 0x00000000 }, + { 0x0000a3c8, 0x00000246 }, + { 0x0000a3cc, 0x20202020 }, + { 0x0000a3d0, 0x20202020 }, + { 0x0000a3d4, 0x20202020 }, + { 0x0000a3dc, 0x1ce739ce }, + { 0x0000a3e0, 0x000001ce }, +}; + +static const u32 ar5416Bank0_9160[][2] = { + { 0x000098b0, 0x1e5795e5 }, + { 0x000098e0, 0x02008020 }, +}; + +static const u32 ar5416BB_RfGain_9160[][3] = { + { 0x00009a00, 0x00000000, 0x00000000 }, + { 0x00009a04, 0x00000040, 0x00000040 }, + { 0x00009a08, 0x00000080, 0x00000080 }, + { 0x00009a0c, 0x000001a1, 0x00000141 }, + { 0x00009a10, 0x000001e1, 0x00000181 }, + { 0x00009a14, 0x00000021, 0x000001c1 }, + { 0x00009a18, 0x00000061, 0x00000001 }, + { 0x00009a1c, 0x00000168, 0x00000041 }, + { 0x00009a20, 0x000001a8, 0x000001a8 }, + { 0x00009a24, 0x000001e8, 0x000001e8 }, + { 0x00009a28, 0x00000028, 0x00000028 }, + { 0x00009a2c, 0x00000068, 0x00000068 }, + { 0x00009a30, 0x00000189, 0x000000a8 }, + { 0x00009a34, 0x000001c9, 0x00000169 }, + { 0x00009a38, 0x00000009, 0x000001a9 }, + { 0x00009a3c, 0x00000049, 0x000001e9 }, + { 0x00009a40, 0x00000089, 0x00000029 }, + { 0x00009a44, 0x00000170, 0x00000069 }, + { 0x00009a48, 0x000001b0, 0x00000190 }, + { 0x00009a4c, 0x000001f0, 0x000001d0 }, + { 0x00009a50, 0x00000030, 0x00000010 }, + { 0x00009a54, 0x00000070, 0x00000050 }, + { 0x00009a58, 0x00000191, 0x00000090 }, + { 0x00009a5c, 0x000001d1, 0x00000151 }, + { 0x00009a60, 0x00000011, 0x00000191 }, + { 0x00009a64, 0x00000051, 0x000001d1 }, + { 0x00009a68, 0x00000091, 0x00000011 }, + { 0x00009a6c, 0x000001b8, 0x00000051 }, + { 0x00009a70, 0x000001f8, 0x00000198 }, + { 0x00009a74, 0x00000038, 0x000001d8 }, + { 0x00009a78, 0x00000078, 0x00000018 }, + { 0x00009a7c, 0x00000199, 0x00000058 }, + { 0x00009a80, 0x000001d9, 0x00000098 }, + { 0x00009a84, 0x00000019, 0x00000159 }, + { 0x00009a88, 0x00000059, 0x00000199 }, + { 0x00009a8c, 0x00000099, 0x000001d9 }, + { 0x00009a90, 0x000000d9, 0x00000019 }, + { 0x00009a94, 0x000000f9, 0x00000059 }, + { 0x00009a98, 0x000000f9, 0x00000099 }, + { 0x00009a9c, 0x000000f9, 0x000000d9 }, + { 0x00009aa0, 0x000000f9, 0x000000f9 }, + { 0x00009aa4, 0x000000f9, 0x000000f9 }, + { 0x00009aa8, 0x000000f9, 0x000000f9 }, + { 0x00009aac, 0x000000f9, 0x000000f9 }, + { 0x00009ab0, 0x000000f9, 0x000000f9 }, + { 0x00009ab4, 0x000000f9, 0x000000f9 }, + { 0x00009ab8, 0x000000f9, 0x000000f9 }, + { 0x00009abc, 0x000000f9, 0x000000f9 }, + { 0x00009ac0, 0x000000f9, 0x000000f9 }, + { 0x00009ac4, 0x000000f9, 0x000000f9 }, + { 0x00009ac8, 0x000000f9, 0x000000f9 }, + { 0x00009acc, 0x000000f9, 0x000000f9 }, + { 0x00009ad0, 0x000000f9, 0x000000f9 }, + { 0x00009ad4, 0x000000f9, 0x000000f9 }, + { 0x00009ad8, 0x000000f9, 0x000000f9 }, + { 0x00009adc, 0x000000f9, 0x000000f9 }, + { 0x00009ae0, 0x000000f9, 0x000000f9 }, + { 0x00009ae4, 0x000000f9, 0x000000f9 }, + { 0x00009ae8, 0x000000f9, 0x000000f9 }, + { 0x00009aec, 0x000000f9, 0x000000f9 }, + { 0x00009af0, 0x000000f9, 0x000000f9 }, + { 0x00009af4, 0x000000f9, 0x000000f9 }, + { 0x00009af8, 0x000000f9, 0x000000f9 }, + { 0x00009afc, 0x000000f9, 0x000000f9 }, +}; + +static const u32 ar5416Bank1_9160[][2] = { + { 0x000098b0, 0x02108421 }, + { 0x000098ec, 0x00000008 }, +}; + +static const u32 ar5416Bank2_9160[][2] = { + { 0x000098b0, 0x0e73ff17 }, + { 0x000098e0, 0x00000420 }, +}; + +static const u32 ar5416Bank3_9160[][3] = { + { 0x000098f0, 0x01400018, 0x01c00018 }, +}; + +static const u32 ar5416Bank6_9160[][3] = { + { 0x0000989c, 0x00000000, 0x00000000 }, + { 0x0000989c, 0x00000000, 0x00000000 }, + { 0x0000989c, 0x00000000, 0x00000000 }, + { 0x0000989c, 0x00e00000, 0x00e00000 }, + { 0x0000989c, 0x005e0000, 0x005e0000 }, + { 0x0000989c, 0x00120000, 0x00120000 }, + { 0x0000989c, 0x00620000, 0x00620000 }, + { 0x0000989c, 0x00020000, 0x00020000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x40ff0000, 0x40ff0000 }, + { 0x0000989c, 0x005f0000, 0x005f0000 }, + { 0x0000989c, 0x00870000, 0x00870000 }, + { 0x0000989c, 0x00f90000, 0x00f90000 }, + { 0x0000989c, 0x007b0000, 0x007b0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00f50000, 0x00f50000 }, + { 0x0000989c, 0x00dc0000, 0x00dc0000 }, + { 0x0000989c, 0x00110000, 0x00110000 }, + { 0x0000989c, 0x006100a8, 0x006100a8 }, + { 0x0000989c, 0x004210a2, 0x004210a2 }, + { 0x0000989c, 0x0014008f, 0x0014008f }, + { 0x0000989c, 0x00c40003, 0x00c40003 }, + { 0x0000989c, 0x003000f2, 0x003000f2 }, + { 0x0000989c, 0x00440016, 0x00440016 }, + { 0x0000989c, 0x00410040, 0x00410040 }, + { 0x0000989c, 0x0001805e, 0x0001805e }, + { 0x0000989c, 0x0000c0ab, 0x0000c0ab }, + { 0x0000989c, 0x000000f1, 0x000000f1 }, + { 0x0000989c, 0x00002081, 0x00002081 }, + { 0x0000989c, 0x000000d4, 0x000000d4 }, + { 0x000098d0, 0x0000000f, 0x0010000f }, +}; + +static const u32 ar5416Bank6TPC_9160[][3] = { + { 0x0000989c, 0x00000000, 0x00000000 }, + { 0x0000989c, 0x00000000, 0x00000000 }, + { 0x0000989c, 0x00000000, 0x00000000 }, + { 0x0000989c, 0x00e00000, 0x00e00000 }, + { 0x0000989c, 0x005e0000, 0x005e0000 }, + { 0x0000989c, 0x00120000, 0x00120000 }, + { 0x0000989c, 0x00620000, 0x00620000 }, + { 0x0000989c, 0x00020000, 0x00020000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x40ff0000, 0x40ff0000 }, + { 0x0000989c, 0x005f0000, 0x005f0000 }, + { 0x0000989c, 0x00870000, 0x00870000 }, + { 0x0000989c, 0x00f90000, 0x00f90000 }, + { 0x0000989c, 0x007b0000, 0x007b0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00f50000, 0x00f50000 }, + { 0x0000989c, 0x00dc0000, 0x00dc0000 }, + { 0x0000989c, 0x00110000, 0x00110000 }, + { 0x0000989c, 0x006100a8, 0x006100a8 }, + { 0x0000989c, 0x00423022, 0x00423022 }, + { 0x0000989c, 0x2014008f, 0x2014008f }, + { 0x0000989c, 0x00c40002, 0x00c40002 }, + { 0x0000989c, 0x003000f2, 0x003000f2 }, + { 0x0000989c, 0x00440016, 0x00440016 }, + { 0x0000989c, 0x00410040, 0x00410040 }, + { 0x0000989c, 0x0001805e, 0x0001805e }, + { 0x0000989c, 0x0000c0ab, 0x0000c0ab }, + { 0x0000989c, 0x000000e1, 0x000000e1 }, + { 0x0000989c, 0x00007080, 0x00007080 }, + { 0x0000989c, 0x000000d4, 0x000000d4 }, + { 0x000098d0, 0x0000000f, 0x0010000f }, +}; + +static const u32 ar5416Bank7_9160[][2] = { + { 0x0000989c, 0x00000500 }, + { 0x0000989c, 0x00000800 }, + { 0x000098cc, 0x0000000e }, +}; + +static u32 ar5416Addac_9160[][2] = { + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x000000c0 }, + {0x0000989c, 0x00000018 }, + {0x0000989c, 0x00000004 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x000000c0 }, + {0x0000989c, 0x00000019 }, + {0x0000989c, 0x00000004 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000004 }, + {0x0000989c, 0x00000003 }, + {0x0000989c, 0x00000008 }, + {0x0000989c, 0x00000000 }, + {0x000098cc, 0x00000000 }, +}; + +static u32 ar5416Addac_91601_1[][2] = { + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x000000c0 }, + {0x0000989c, 0x00000018 }, + {0x0000989c, 0x00000004 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x000000c0 }, + {0x0000989c, 0x00000019 }, + {0x0000989c, 0x00000004 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x000098cc, 0x00000000 }, +}; + +/* XXX 9280 1 */ +static const u32 ar9280Modes_9280[][6] = { + { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, + { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, + { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 }, + { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 }, + { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801080, 0x08400840, 0x06e006e0 }, + { 0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f }, + { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 }, + { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 }, + { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, + { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 }, + { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, + { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 }, + { 0x00009844, 0x1372161e, 0x1372161e, 0x137216a0, 0x137216a0, 0x137216a0 }, + { 0x00009848, 0x00028566, 0x00028566, 0x00028563, 0x00028563, 0x00028563 }, + { 0x0000a848, 0x00028566, 0x00028566, 0x00028563, 0x00028563, 0x00028563 }, + { 0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2 }, + { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e }, + { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e }, + { 0x00009860, 0x00049d18, 0x00049d18, 0x00049d20, 0x00049d20, 0x00049d18 }, + { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, + { 0x00009868, 0x5ac64190, 0x5ac64190, 0x5ac64190, 0x5ac64190, 0x5ac64190 }, + { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 }, + { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 }, + { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 }, + { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d }, + { 0x00009944, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010 }, + { 0x00009960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 }, + { 0x0000a960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 }, + { 0x00009964, 0x00000210, 0x00000210, 0x00000210, 0x00000210, 0x00000210 }, + { 0x0000c9b8, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a }, + { 0x0000c9bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00, 0x00000c00 }, + { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 }, + { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 }, + { 0x000099c8, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c }, + { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 }, + { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 }, + { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x00009a00, 0x00008184, 0x00008184, 0x00000214, 0x00000214, 0x00000214 }, + { 0x00009a04, 0x00008188, 0x00008188, 0x00000218, 0x00000218, 0x00000218 }, + { 0x00009a08, 0x0000818c, 0x0000818c, 0x00000224, 0x00000224, 0x00000224 }, + { 0x00009a0c, 0x00008190, 0x00008190, 0x00000228, 0x00000228, 0x00000228 }, + { 0x00009a10, 0x00008194, 0x00008194, 0x0000022c, 0x0000022c, 0x0000022c }, + { 0x00009a14, 0x00008200, 0x00008200, 0x00000230, 0x00000230, 0x00000230 }, + { 0x00009a18, 0x00008204, 0x00008204, 0x000002a4, 0x000002a4, 0x000002a4 }, + { 0x00009a1c, 0x00008208, 0x00008208, 0x000002a8, 0x000002a8, 0x000002a8 }, + { 0x00009a20, 0x0000820c, 0x0000820c, 0x000002ac, 0x000002ac, 0x000002ac }, + { 0x00009a24, 0x00008210, 0x00008210, 0x000002b0, 0x000002b0, 0x000002b0 }, + { 0x00009a28, 0x00008214, 0x00008214, 0x000002b4, 0x000002b4, 0x000002b4 }, + { 0x00009a2c, 0x00008280, 0x00008280, 0x000002b8, 0x000002b8, 0x000002b8 }, + { 0x00009a30, 0x00008284, 0x00008284, 0x00000390, 0x00000390, 0x00000390 }, + { 0x00009a34, 0x00008288, 0x00008288, 0x00000394, 0x00000394, 0x00000394 }, + { 0x00009a38, 0x0000828c, 0x0000828c, 0x00000398, 0x00000398, 0x00000398 }, + { 0x00009a3c, 0x00008290, 0x00008290, 0x00000334, 0x00000334, 0x00000334 }, + { 0x00009a40, 0x00008300, 0x00008300, 0x00000338, 0x00000338, 0x00000338 }, + { 0x00009a44, 0x00008304, 0x00008304, 0x000003ac, 0x000003ac, 0x000003ac }, + { 0x00009a48, 0x00008308, 0x00008308, 0x000003b0, 0x000003b0, 0x000003b0 }, + { 0x00009a4c, 0x0000830c, 0x0000830c, 0x000003b4, 0x000003b4, 0x000003b4 }, + { 0x00009a50, 0x00008310, 0x00008310, 0x000003b8, 0x000003b8, 0x000003b8 }, + { 0x00009a54, 0x00008314, 0x00008314, 0x000003a5, 0x000003a5, 0x000003a5 }, + { 0x00009a58, 0x00008380, 0x00008380, 0x000003a9, 0x000003a9, 0x000003a9 }, + { 0x00009a5c, 0x00008384, 0x00008384, 0x000003ad, 0x000003ad, 0x000003ad }, + { 0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194, 0x00008194 }, + { 0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0, 0x000081a0 }, + { 0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c, 0x0000820c }, + { 0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8, 0x000081a8 }, + { 0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284, 0x00008284 }, + { 0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288, 0x00008288 }, + { 0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224, 0x00008224 }, + { 0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290, 0x00008290 }, + { 0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300, 0x00008300 }, + { 0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304, 0x00008304 }, + { 0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308, 0x00008308 }, + { 0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c, 0x0000830c }, + { 0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380, 0x00008380 }, + { 0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384, 0x00008384 }, + { 0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700, 0x00008700 }, + { 0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704, 0x00008704 }, + { 0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708, 0x00008708 }, + { 0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c, 0x0000870c }, + { 0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780, 0x00008780 }, + { 0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784, 0x00008784 }, + { 0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00, 0x00008b00 }, + { 0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04, 0x00008b04 }, + { 0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08, 0x00008b08 }, + { 0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c, 0x00008b0c }, + { 0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b80, 0x00008b80, 0x00008b80 }, + { 0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b84, 0x00008b84, 0x00008b84 }, + { 0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b88, 0x00008b88, 0x00008b88 }, + { 0x00009acc, 0x0000b380, 0x0000b380, 0x00008b8c, 0x00008b8c, 0x00008b8c }, + { 0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b90, 0x00008b90, 0x00008b90 }, + { 0x00009ad4, 0x0000b388, 0x0000b388, 0x00008f80, 0x00008f80, 0x00008f80 }, + { 0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008f84, 0x00008f84, 0x00008f84 }, + { 0x00009adc, 0x0000b390, 0x0000b390, 0x00008f88, 0x00008f88, 0x00008f88 }, + { 0x00009ae0, 0x0000b394, 0x0000b394, 0x00008f8c, 0x00008f8c, 0x00008f8c }, + { 0x00009ae4, 0x0000b398, 0x0000b398, 0x00008f90, 0x00008f90, 0x00008f90 }, + { 0x00009ae8, 0x0000b780, 0x0000b780, 0x0000930c, 0x0000930c, 0x0000930c }, + { 0x00009aec, 0x0000b784, 0x0000b784, 0x00009310, 0x00009310, 0x00009310 }, + { 0x00009af0, 0x0000b788, 0x0000b788, 0x00009384, 0x00009384, 0x00009384 }, + { 0x00009af4, 0x0000b78c, 0x0000b78c, 0x00009388, 0x00009388, 0x00009388 }, + { 0x00009af8, 0x0000b790, 0x0000b790, 0x00009324, 0x00009324, 0x00009324 }, + { 0x00009afc, 0x0000b794, 0x0000b794, 0x00009704, 0x00009704, 0x00009704 }, + { 0x00009b00, 0x0000b798, 0x0000b798, 0x000096a4, 0x000096a4, 0x000096a4 }, + { 0x00009b04, 0x0000d784, 0x0000d784, 0x000096a8, 0x000096a8, 0x000096a8 }, + { 0x00009b08, 0x0000d788, 0x0000d788, 0x00009710, 0x00009710, 0x00009710 }, + { 0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00009714, 0x00009714, 0x00009714 }, + { 0x00009b10, 0x0000d790, 0x0000d790, 0x00009720, 0x00009720, 0x00009720 }, + { 0x00009b14, 0x0000f780, 0x0000f780, 0x00009724, 0x00009724, 0x00009724 }, + { 0x00009b18, 0x0000f784, 0x0000f784, 0x00009728, 0x00009728, 0x00009728 }, + { 0x00009b1c, 0x0000f788, 0x0000f788, 0x0000972c, 0x0000972c, 0x0000972c }, + { 0x00009b20, 0x0000f78c, 0x0000f78c, 0x000097a0, 0x000097a0, 0x000097a0 }, + { 0x00009b24, 0x0000f790, 0x0000f790, 0x000097a4, 0x000097a4, 0x000097a4 }, + { 0x00009b28, 0x0000f794, 0x0000f794, 0x000097a8, 0x000097a8, 0x000097a8 }, + { 0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x000097b0, 0x000097b0, 0x000097b0 }, + { 0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x000097b4, 0x000097b4, 0x000097b4 }, + { 0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x000097b8, 0x000097b8, 0x000097b8 }, + { 0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x000097a5, 0x000097a5, 0x000097a5 }, + { 0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x000097a9, 0x000097a9, 0x000097a9 }, + { 0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x000097ad, 0x000097ad, 0x000097ad }, + { 0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x000097b1, 0x000097b1, 0x000097b1 }, + { 0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x000097b5, 0x000097b5, 0x000097b5 }, + { 0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x000097b9, 0x000097b9, 0x000097b9 }, + { 0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x000097c5, 0x000097c5, 0x000097c5 }, + { 0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x000097c9, 0x000097c9, 0x000097c9 }, + { 0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x000097d1, 0x000097d1, 0x000097d1 }, + { 0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x000097d5, 0x000097d5, 0x000097d5 }, + { 0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x000097d9, 0x000097d9, 0x000097d9 }, + { 0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x000097c6, 0x000097c6, 0x000097c6 }, + { 0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x000097ca, 0x000097ca, 0x000097ca }, + { 0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x000097ce, 0x000097ce, 0x000097ce }, + { 0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x000097d2, 0x000097d2, 0x000097d2 }, + { 0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x000097d6, 0x000097d6, 0x000097d6 }, + { 0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x000097c3, 0x000097c3, 0x000097c3 }, + { 0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x000097c7, 0x000097c7, 0x000097c7 }, + { 0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x000097cb, 0x000097cb, 0x000097cb }, + { 0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x000097cf, 0x000097cf, 0x000097cf }, + { 0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x000097d7, 0x000097d7, 0x000097d7 }, + { 0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009b98, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009b9c, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009ba0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009ba4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009ba8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bac, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bb0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bb4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bb8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bbc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bc0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bc4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bc8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bcc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bd0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bd4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bd8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bdc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009be0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009be4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009be8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bec, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bf0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bf4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bf8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bfc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x0000a204, 0x00000444, 0x00000444, 0x00000444, 0x00000444, 0x00000444 }, + { 0x0000a208, 0x803e4788, 0x803e4788, 0x803e4788, 0x803e4788, 0x803e4788 }, + { 0x0000a20c, 0x000c6019, 0x000c6019, 0x000c6019, 0x000c6019, 0x000c6019 }, + { 0x0000b20c, 0x000c6019, 0x000c6019, 0x000c6019, 0x000c6019, 0x000c6019 }, + { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a }, + { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, + { 0x0000a274, 0x0a19c652, 0x0a19c652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652 }, + { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a304, 0x00003002, 0x00003002, 0x00003002, 0x00003002, 0x00003002 }, + { 0x0000a308, 0x00006004, 0x00006004, 0x00008009, 0x00008009, 0x00008009 }, + { 0x0000a30c, 0x0000a006, 0x0000a006, 0x0000b00b, 0x0000b00b, 0x0000b00b }, + { 0x0000a310, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012 }, + { 0x0000a314, 0x00011014, 0x00011014, 0x00012048, 0x00012048, 0x00012048 }, + { 0x0000a318, 0x0001504a, 0x0001504a, 0x0001604a, 0x0001604a, 0x0001604a }, + { 0x0000a31c, 0x0001904c, 0x0001904c, 0x0001a211, 0x0001a211, 0x0001a211 }, + { 0x0000a320, 0x0001c04e, 0x0001c04e, 0x0001e213, 0x0001e213, 0x0001e213 }, + { 0x0000a324, 0x00020092, 0x00020092, 0x0002121b, 0x0002121b, 0x0002121b }, + { 0x0000a328, 0x0002410a, 0x0002410a, 0x00024412, 0x00024412, 0x00024412 }, + { 0x0000a32c, 0x0002710c, 0x0002710c, 0x00028414, 0x00028414, 0x00028414 }, + { 0x0000a330, 0x0002b18b, 0x0002b18b, 0x0002b44a, 0x0002b44a, 0x0002b44a }, + { 0x0000a334, 0x0002e1cc, 0x0002e1cc, 0x00030649, 0x00030649, 0x00030649 }, + { 0x0000a338, 0x000321ec, 0x000321ec, 0x0003364b, 0x0003364b, 0x0003364b }, + { 0x0000a33c, 0x000321ec, 0x000321ec, 0x00038a49, 0x00038a49, 0x00038a49 }, + { 0x0000a340, 0x000321ec, 0x000321ec, 0x0003be48, 0x0003be48, 0x0003be48 }, + { 0x0000a344, 0x000321ec, 0x000321ec, 0x0003ee4a, 0x0003ee4a, 0x0003ee4a }, + { 0x0000a348, 0x000321ec, 0x000321ec, 0x00042e88, 0x00042e88, 0x00042e88 }, + { 0x0000a34c, 0x000321ec, 0x000321ec, 0x00046e8a, 0x00046e8a, 0x00046e8a }, + { 0x0000a350, 0x000321ec, 0x000321ec, 0x00049ec9, 0x00049ec9, 0x00049ec9 }, + { 0x0000a354, 0x000321ec, 0x000321ec, 0x0004bf42, 0x0004bf42, 0x0004bf42 }, + { 0x0000784c, 0x0e4f048c, 0x0e4f048c, 0x0e4d048c, 0x0e4d048c, 0x0e4d048c }, + { 0x00007854, 0x12031828, 0x12031828, 0x12035828, 0x12035828, 0x12035828 }, + { 0x00007870, 0x807ec400, 0x807ec400, 0x807ec000, 0x807ec000, 0x807ec000 }, + { 0x0000788c, 0x00010000, 0x00010000, 0x00110000, 0x00110000, 0x00110000 }, +}; + +static const u32 ar9280Common_9280[][2] = { + { 0x0000000c, 0x00000000 }, + { 0x00000030, 0x00020015 }, + { 0x00000034, 0x00000005 }, + { 0x00000040, 0x00000000 }, + { 0x00000044, 0x00000008 }, + { 0x00000048, 0x00000008 }, + { 0x0000004c, 0x00000010 }, + { 0x00000050, 0x00000000 }, + { 0x00000054, 0x0000001f }, + { 0x00000800, 0x00000000 }, + { 0x00000804, 0x00000000 }, + { 0x00000808, 0x00000000 }, + { 0x0000080c, 0x00000000 }, + { 0x00000810, 0x00000000 }, + { 0x00000814, 0x00000000 }, + { 0x00000818, 0x00000000 }, + { 0x0000081c, 0x00000000 }, + { 0x00000820, 0x00000000 }, + { 0x00000824, 0x00000000 }, + { 0x00001040, 0x002ffc0f }, + { 0x00001044, 0x002ffc0f }, + { 0x00001048, 0x002ffc0f }, + { 0x0000104c, 0x002ffc0f }, + { 0x00001050, 0x002ffc0f }, + { 0x00001054, 0x002ffc0f }, + { 0x00001058, 0x002ffc0f }, + { 0x0000105c, 0x002ffc0f }, + { 0x00001060, 0x002ffc0f }, + { 0x00001064, 0x002ffc0f }, + { 0x00001230, 0x00000000 }, + { 0x00001270, 0x00000000 }, + { 0x00001038, 0x00000000 }, + { 0x00001078, 0x00000000 }, + { 0x000010b8, 0x00000000 }, + { 0x000010f8, 0x00000000 }, + { 0x00001138, 0x00000000 }, + { 0x00001178, 0x00000000 }, + { 0x000011b8, 0x00000000 }, + { 0x000011f8, 0x00000000 }, + { 0x00001238, 0x00000000 }, + { 0x00001278, 0x00000000 }, + { 0x000012b8, 0x00000000 }, + { 0x000012f8, 0x00000000 }, + { 0x00001338, 0x00000000 }, + { 0x00001378, 0x00000000 }, + { 0x000013b8, 0x00000000 }, + { 0x000013f8, 0x00000000 }, + { 0x00001438, 0x00000000 }, + { 0x00001478, 0x00000000 }, + { 0x000014b8, 0x00000000 }, + { 0x000014f8, 0x00000000 }, + { 0x00001538, 0x00000000 }, + { 0x00001578, 0x00000000 }, + { 0x000015b8, 0x00000000 }, + { 0x000015f8, 0x00000000 }, + { 0x00001638, 0x00000000 }, + { 0x00001678, 0x00000000 }, + { 0x000016b8, 0x00000000 }, + { 0x000016f8, 0x00000000 }, + { 0x00001738, 0x00000000 }, + { 0x00001778, 0x00000000 }, + { 0x000017b8, 0x00000000 }, + { 0x000017f8, 0x00000000 }, + { 0x0000103c, 0x00000000 }, + { 0x0000107c, 0x00000000 }, + { 0x000010bc, 0x00000000 }, + { 0x000010fc, 0x00000000 }, + { 0x0000113c, 0x00000000 }, + { 0x0000117c, 0x00000000 }, + { 0x000011bc, 0x00000000 }, + { 0x000011fc, 0x00000000 }, + { 0x0000123c, 0x00000000 }, + { 0x0000127c, 0x00000000 }, + { 0x000012bc, 0x00000000 }, + { 0x000012fc, 0x00000000 }, + { 0x0000133c, 0x00000000 }, + { 0x0000137c, 0x00000000 }, + { 0x000013bc, 0x00000000 }, + { 0x000013fc, 0x00000000 }, + { 0x0000143c, 0x00000000 }, + { 0x0000147c, 0x00000000 }, + { 0x00004030, 0x00000002 }, + { 0x0000403c, 0x00000002 }, + { 0x00004024, 0x0000001f }, + { 0x00007010, 0x00000033 }, + { 0x00007038, 0x000004c2 }, + { 0x00008004, 0x00000000 }, + { 0x00008008, 0x00000000 }, + { 0x0000800c, 0x00000000 }, + { 0x00008018, 0x00000700 }, + { 0x00008020, 0x00000000 }, + { 0x00008038, 0x00000000 }, + { 0x0000803c, 0x00000000 }, + { 0x00008048, 0x40000000 }, + { 0x00008054, 0x00000000 }, + { 0x00008058, 0x00000000 }, + { 0x0000805c, 0x000fc78f }, + { 0x00008060, 0x0000000f }, + { 0x00008064, 0x00000000 }, + { 0x00008070, 0x00000000 }, + { 0x000080c0, 0x2a82301a }, + { 0x000080c4, 0x05dc01e0 }, + { 0x000080c8, 0x1f402710 }, + { 0x000080cc, 0x01f40000 }, + { 0x000080d0, 0x00001e00 }, + { 0x000080d4, 0x00000000 }, + { 0x000080d8, 0x00400000 }, + { 0x000080e0, 0xffffffff }, + { 0x000080e4, 0x0000ffff }, + { 0x000080e8, 0x003f3f3f }, + { 0x000080ec, 0x00000000 }, + { 0x000080f0, 0x00000000 }, + { 0x000080f4, 0x00000000 }, + { 0x000080f8, 0x00000000 }, + { 0x000080fc, 0x00020000 }, + { 0x00008100, 0x00020000 }, + { 0x00008104, 0x00000001 }, + { 0x00008108, 0x00000052 }, + { 0x0000810c, 0x00000000 }, + { 0x00008110, 0x00000168 }, + { 0x00008118, 0x000100aa }, + { 0x0000811c, 0x00003210 }, + { 0x00008120, 0x08f04800 }, + { 0x00008124, 0x00000000 }, + { 0x00008128, 0x00000000 }, + { 0x0000812c, 0x00000000 }, + { 0x00008130, 0x00000000 }, + { 0x00008134, 0x00000000 }, + { 0x00008138, 0x00000000 }, + { 0x0000813c, 0x00000000 }, + { 0x00008144, 0x00000000 }, + { 0x00008168, 0x00000000 }, + { 0x0000816c, 0x00000000 }, + { 0x00008170, 0x32143320 }, + { 0x00008174, 0xfaa4fa50 }, + { 0x00008178, 0x00000100 }, + { 0x0000817c, 0x00000000 }, + { 0x000081c4, 0x00000000 }, + { 0x000081d0, 0x00003210 }, + { 0x000081ec, 0x00000000 }, + { 0x000081f0, 0x00000000 }, + { 0x000081f4, 0x00000000 }, + { 0x000081f8, 0x00000000 }, + { 0x000081fc, 0x00000000 }, + { 0x00008200, 0x00000000 }, + { 0x00008204, 0x00000000 }, + { 0x00008208, 0x00000000 }, + { 0x0000820c, 0x00000000 }, + { 0x00008210, 0x00000000 }, + { 0x00008214, 0x00000000 }, + { 0x00008218, 0x00000000 }, + { 0x0000821c, 0x00000000 }, + { 0x00008220, 0x00000000 }, + { 0x00008224, 0x00000000 }, + { 0x00008228, 0x00000000 }, + { 0x0000822c, 0x00000000 }, + { 0x00008230, 0x00000000 }, + { 0x00008234, 0x00000000 }, + { 0x00008238, 0x00000000 }, + { 0x0000823c, 0x00000000 }, + { 0x00008240, 0x00100000 }, + { 0x00008244, 0x0010f400 }, + { 0x00008248, 0x00000100 }, + { 0x0000824c, 0x0001e800 }, + { 0x00008250, 0x00000000 }, + { 0x00008254, 0x00000000 }, + { 0x00008258, 0x00000000 }, + { 0x0000825c, 0x400000ff }, + { 0x00008260, 0x00080922 }, + { 0x00008270, 0x00000000 }, + { 0x00008274, 0x40000000 }, + { 0x00008278, 0x003e4180 }, + { 0x0000827c, 0x00000000 }, + { 0x00008284, 0x0000002c }, + { 0x00008288, 0x0000002c }, + { 0x0000828c, 0x00000000 }, + { 0x00008294, 0x00000000 }, + { 0x00008298, 0x00000000 }, + { 0x00008300, 0x00000000 }, + { 0x00008304, 0x00000000 }, + { 0x00008308, 0x00000000 }, + { 0x0000830c, 0x00000000 }, + { 0x00008310, 0x00000000 }, + { 0x00008314, 0x00000000 }, + { 0x00008318, 0x00000000 }, + { 0x00008328, 0x00000000 }, + { 0x0000832c, 0x00000007 }, + { 0x00008330, 0x00000302 }, + { 0x00008334, 0x00000e00 }, + { 0x00008338, 0x00000000 }, + { 0x0000833c, 0x00000000 }, + { 0x00008340, 0x000107ff }, + { 0x00008344, 0x00000000 }, + { 0x00009808, 0x00000000 }, + { 0x0000980c, 0xaf268e30 }, + { 0x00009810, 0xfd14e000 }, + { 0x00009814, 0x9c0a9f6b }, + { 0x0000981c, 0x00000000 }, + { 0x0000982c, 0x0000a000 }, + { 0x00009830, 0x00000000 }, + { 0x0000983c, 0x00200400 }, + { 0x00009840, 0x206a01ae }, + { 0x0000984c, 0x0040233c }, + { 0x0000a84c, 0x0040233c }, + { 0x00009854, 0x00000044 }, + { 0x00009900, 0x00000000 }, + { 0x00009904, 0x00000000 }, + { 0x00009908, 0x00000000 }, + { 0x0000990c, 0x00000000 }, + { 0x0000991c, 0x10000fff }, + { 0x00009920, 0x04900000 }, + { 0x0000a920, 0x04900000 }, + { 0x00009928, 0x00000001 }, + { 0x0000992c, 0x00000004 }, + { 0x00009934, 0x1e1f2022 }, + { 0x00009938, 0x0a0b0c0d }, + { 0x0000993c, 0x00000000 }, + { 0x00009948, 0x9280c00a }, + { 0x0000994c, 0x00020028 }, + { 0x00009954, 0xe250a51e }, + { 0x00009958, 0x3388ffff }, + { 0x00009940, 0x00781204 }, + { 0x0000c95c, 0x004b6a8e }, + { 0x0000c968, 0x000003ce }, + { 0x00009970, 0x190fb514 }, + { 0x00009974, 0x00000000 }, + { 0x00009978, 0x00000001 }, + { 0x0000997c, 0x00000000 }, + { 0x00009980, 0x00000000 }, + { 0x00009984, 0x00000000 }, + { 0x00009988, 0x00000000 }, + { 0x0000998c, 0x00000000 }, + { 0x00009990, 0x00000000 }, + { 0x00009994, 0x00000000 }, + { 0x00009998, 0x00000000 }, + { 0x0000999c, 0x00000000 }, + { 0x000099a0, 0x00000000 }, + { 0x000099a4, 0x00000001 }, + { 0x000099a8, 0x201fff00 }, + { 0x000099ac, 0x006f00c4 }, + { 0x000099b0, 0x03051000 }, + { 0x000099b4, 0x00000820 }, + { 0x000099dc, 0x00000000 }, + { 0x000099e0, 0x00000000 }, + { 0x000099e4, 0xaaaaaaaa }, + { 0x000099e8, 0x3c466478 }, + { 0x000099ec, 0x0cc80caa }, + { 0x000099fc, 0x00001042 }, + { 0x0000a210, 0x4080a333 }, + { 0x0000a214, 0x40206c10 }, + { 0x0000a218, 0x009c4060 }, + { 0x0000a220, 0x01834061 }, + { 0x0000a224, 0x00000400 }, + { 0x0000a228, 0x000003b5 }, + { 0x0000a22c, 0x23277200 }, + { 0x0000a234, 0x20202020 }, + { 0x0000a238, 0x20202020 }, + { 0x0000a23c, 0x13c889af }, + { 0x0000a240, 0x38490a20 }, + { 0x0000a244, 0x00007bb6 }, + { 0x0000a248, 0x0fff3ffc }, + { 0x0000a24c, 0x00000001 }, + { 0x0000a250, 0x001da000 }, + { 0x0000a254, 0x00000000 }, + { 0x0000a258, 0x0cdbd380 }, + { 0x0000a25c, 0x0f0f0f01 }, + { 0x0000a260, 0xdfa91f01 }, + { 0x0000a268, 0x00000000 }, + { 0x0000a26c, 0x0ebae9c6 }, + { 0x0000b26c, 0x0ebae9c6 }, + { 0x0000d270, 0x00820820 }, + { 0x0000a278, 0x1ce739ce }, + { 0x0000a27c, 0x050701ce }, + { 0x0000a358, 0x7999aa0f }, + { 0x0000d35c, 0x07ffffef }, + { 0x0000d360, 0x0fffffe7 }, + { 0x0000d364, 0x17ffffe5 }, + { 0x0000d368, 0x1fffffe4 }, + { 0x0000d36c, 0x37ffffe3 }, + { 0x0000d370, 0x3fffffe3 }, + { 0x0000d374, 0x57ffffe3 }, + { 0x0000d378, 0x5fffffe2 }, + { 0x0000d37c, 0x7fffffe2 }, + { 0x0000d380, 0x7f3c7bba }, + { 0x0000d384, 0xf3307ff0 }, + { 0x0000a388, 0x0c000000 }, + { 0x0000a38c, 0x20202020 }, + { 0x0000a390, 0x20202020 }, + { 0x0000a394, 0x1ce739ce }, + { 0x0000a398, 0x000001ce }, + { 0x0000a39c, 0x00000001 }, + { 0x0000a3a0, 0x00000000 }, + { 0x0000a3a4, 0x00000000 }, + { 0x0000a3a8, 0x00000000 }, + { 0x0000a3ac, 0x00000000 }, + { 0x0000a3b0, 0x00000000 }, + { 0x0000a3b4, 0x00000000 }, + { 0x0000a3b8, 0x00000000 }, + { 0x0000a3bc, 0x00000000 }, + { 0x0000a3c0, 0x00000000 }, + { 0x0000a3c4, 0x00000000 }, + { 0x0000a3c8, 0x00000246 }, + { 0x0000a3cc, 0x20202020 }, + { 0x0000a3d0, 0x20202020 }, + { 0x0000a3d4, 0x20202020 }, + { 0x0000a3dc, 0x1ce739ce }, + { 0x0000a3e0, 0x000001ce }, + { 0x0000a3e4, 0x00000000 }, + { 0x0000a3e8, 0x18c43433 }, + { 0x0000a3ec, 0x00f38081 }, + { 0x00007800, 0x00040000 }, + { 0x00007804, 0xdb005012 }, + { 0x00007808, 0x04924914 }, + { 0x0000780c, 0x21084210 }, + { 0x00007810, 0x6d801300 }, + { 0x00007814, 0x0019beff }, + { 0x00007818, 0x07e40000 }, + { 0x0000781c, 0x00492000 }, + { 0x00007820, 0x92492480 }, + { 0x00007824, 0x00040000 }, + { 0x00007828, 0xdb005012 }, + { 0x0000782c, 0x04924914 }, + { 0x00007830, 0x21084210 }, + { 0x00007834, 0x6d801300 }, + { 0x00007838, 0x0019beff }, + { 0x0000783c, 0x07e40000 }, + { 0x00007840, 0x00492000 }, + { 0x00007844, 0x92492480 }, + { 0x00007848, 0x00120000 }, + { 0x00007850, 0x54214514 }, + { 0x00007858, 0x92592692 }, + { 0x00007860, 0x52802000 }, + { 0x00007864, 0x0a8e370e }, + { 0x00007868, 0xc0102850 }, + { 0x0000786c, 0x812d4000 }, + { 0x00007874, 0x001b6db0 }, + { 0x00007878, 0x00376b63 }, + { 0x0000787c, 0x06db6db6 }, + { 0x00007880, 0x006d8000 }, + { 0x00007884, 0xffeffffe }, + { 0x00007888, 0xffeffffe }, + { 0x00007890, 0x00060aeb }, + { 0x00007894, 0x5a108000 }, + { 0x00007898, 0x2a850160 }, +}; + +/* XXX 9280 2 */ +static const u32 ar9280Modes_9280_2[][6] = { + { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, + { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, + { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 }, + { 0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008 }, + { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 }, + { 0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f }, + { 0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810 }, + { 0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a, 0x0000320a }, + { 0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880 }, + { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 }, + { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 }, + { 0x00009824, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e }, + { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 }, + { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, + { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 }, + { 0x00009840, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e, 0x206a012e }, + { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 }, + { 0x00009850, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2, 0x6c4000e2 }, + { 0x00009858, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e }, + { 0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e, 0x31395d5e }, + { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 }, + { 0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, + { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 }, + { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 }, + { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 }, + { 0x00009918, 0x0000000a, 0x00000014, 0x00000268, 0x0000000b, 0x00000016 }, + { 0x00009924, 0xd00a8a0b, 0xd00a8a0b, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d }, + { 0x00009944, 0xffbc1010, 0xffbc1010, 0xffbc1010, 0xffbc1010, 0xffbc1010 }, + { 0x00009960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 }, + { 0x0000a960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 }, + { 0x00009964, 0x00000210, 0x00000210, 0x00000210, 0x00000210, 0x00000210 }, + { 0x0000c968, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce, 0x000003ce }, + { 0x000099b8, 0x0000001c, 0x0000001c, 0x0000001c, 0x0000001c, 0x0000001c }, + { 0x000099bc, 0x00000a00, 0x00000a00, 0x00000c00, 0x00000c00, 0x00000c00 }, + { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 }, + { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 }, + { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 }, + { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 }, + { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 }, + { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a204, 0x00000444, 0x00000444, 0x00000444, 0x00000444, 0x00000444 }, + { 0x0000a20c, 0x00000014, 0x00000014, 0x0001f019, 0x0001f019, 0x0001f019 }, + { 0x0000b20c, 0x00000014, 0x00000014, 0x0001f019, 0x0001f019, 0x0001f019 }, + { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a }, + { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, + { 0x0000a23c, 0x13c88000, 0x13c88000, 0x13c88001, 0x13c88000, 0x13c88000 }, + { 0x0000a250, 0x001ff000, 0x001ff000, 0x0004a000, 0x0004a000, 0x0004a000 }, + { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e }, + { 0x0000a388, 0x0c000000, 0x0c000000, 0x08000000, 0x0c000000, 0x0c000000 }, + { 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x00007894, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000 }, +}; + +static const u32 ar9280Common_9280_2[][2] = { + { 0x0000000c, 0x00000000 }, + { 0x00000030, 0x00020015 }, + { 0x00000034, 0x00000005 }, + { 0x00000040, 0x00000000 }, + { 0x00000044, 0x00000008 }, + { 0x00000048, 0x00000008 }, + { 0x0000004c, 0x00000010 }, + { 0x00000050, 0x00000000 }, + { 0x00000054, 0x0000001f }, + { 0x00000800, 0x00000000 }, + { 0x00000804, 0x00000000 }, + { 0x00000808, 0x00000000 }, + { 0x0000080c, 0x00000000 }, + { 0x00000810, 0x00000000 }, + { 0x00000814, 0x00000000 }, + { 0x00000818, 0x00000000 }, + { 0x0000081c, 0x00000000 }, + { 0x00000820, 0x00000000 }, + { 0x00000824, 0x00000000 }, + { 0x00001040, 0x002ffc0f }, + { 0x00001044, 0x002ffc0f }, + { 0x00001048, 0x002ffc0f }, + { 0x0000104c, 0x002ffc0f }, + { 0x00001050, 0x002ffc0f }, + { 0x00001054, 0x002ffc0f }, + { 0x00001058, 0x002ffc0f }, + { 0x0000105c, 0x002ffc0f }, + { 0x00001060, 0x002ffc0f }, + { 0x00001064, 0x002ffc0f }, + { 0x00001230, 0x00000000 }, + { 0x00001270, 0x00000000 }, + { 0x00001038, 0x00000000 }, + { 0x00001078, 0x00000000 }, + { 0x000010b8, 0x00000000 }, + { 0x000010f8, 0x00000000 }, + { 0x00001138, 0x00000000 }, + { 0x00001178, 0x00000000 }, + { 0x000011b8, 0x00000000 }, + { 0x000011f8, 0x00000000 }, + { 0x00001238, 0x00000000 }, + { 0x00001278, 0x00000000 }, + { 0x000012b8, 0x00000000 }, + { 0x000012f8, 0x00000000 }, + { 0x00001338, 0x00000000 }, + { 0x00001378, 0x00000000 }, + { 0x000013b8, 0x00000000 }, + { 0x000013f8, 0x00000000 }, + { 0x00001438, 0x00000000 }, + { 0x00001478, 0x00000000 }, + { 0x000014b8, 0x00000000 }, + { 0x000014f8, 0x00000000 }, + { 0x00001538, 0x00000000 }, + { 0x00001578, 0x00000000 }, + { 0x000015b8, 0x00000000 }, + { 0x000015f8, 0x00000000 }, + { 0x00001638, 0x00000000 }, + { 0x00001678, 0x00000000 }, + { 0x000016b8, 0x00000000 }, + { 0x000016f8, 0x00000000 }, + { 0x00001738, 0x00000000 }, + { 0x00001778, 0x00000000 }, + { 0x000017b8, 0x00000000 }, + { 0x000017f8, 0x00000000 }, + { 0x0000103c, 0x00000000 }, + { 0x0000107c, 0x00000000 }, + { 0x000010bc, 0x00000000 }, + { 0x000010fc, 0x00000000 }, + { 0x0000113c, 0x00000000 }, + { 0x0000117c, 0x00000000 }, + { 0x000011bc, 0x00000000 }, + { 0x000011fc, 0x00000000 }, + { 0x0000123c, 0x00000000 }, + { 0x0000127c, 0x00000000 }, + { 0x000012bc, 0x00000000 }, + { 0x000012fc, 0x00000000 }, + { 0x0000133c, 0x00000000 }, + { 0x0000137c, 0x00000000 }, + { 0x000013bc, 0x00000000 }, + { 0x000013fc, 0x00000000 }, + { 0x0000143c, 0x00000000 }, + { 0x0000147c, 0x00000000 }, + { 0x00004030, 0x00000002 }, + { 0x0000403c, 0x00000002 }, + { 0x00004024, 0x0000001f }, + { 0x00004060, 0x00000000 }, + { 0x00004064, 0x00000000 }, + { 0x00007010, 0x00000033 }, + { 0x00007034, 0x00000002 }, + { 0x00007038, 0x000004c2 }, + { 0x00008004, 0x00000000 }, + { 0x00008008, 0x00000000 }, + { 0x0000800c, 0x00000000 }, + { 0x00008018, 0x00000700 }, + { 0x00008020, 0x00000000 }, + { 0x00008038, 0x00000000 }, + { 0x0000803c, 0x00000000 }, + { 0x00008048, 0x40000000 }, + { 0x00008054, 0x00000000 }, + { 0x00008058, 0x00000000 }, + { 0x0000805c, 0x000fc78f }, + { 0x00008060, 0x0000000f }, + { 0x00008064, 0x00000000 }, + { 0x00008070, 0x00000000 }, + { 0x000080c0, 0x2a80001a }, + { 0x000080c4, 0x05dc01e0 }, + { 0x000080c8, 0x1f402710 }, + { 0x000080cc, 0x01f40000 }, + { 0x000080d0, 0x00001e00 }, + { 0x000080d4, 0x00000000 }, + { 0x000080d8, 0x00400000 }, + { 0x000080e0, 0xffffffff }, + { 0x000080e4, 0x0000ffff }, + { 0x000080e8, 0x003f3f3f }, + { 0x000080ec, 0x00000000 }, + { 0x000080f0, 0x00000000 }, + { 0x000080f4, 0x00000000 }, + { 0x000080f8, 0x00000000 }, + { 0x000080fc, 0x00020000 }, + { 0x00008100, 0x00020000 }, + { 0x00008104, 0x00000001 }, + { 0x00008108, 0x00000052 }, + { 0x0000810c, 0x00000000 }, + { 0x00008110, 0x00000168 }, + { 0x00008118, 0x000100aa }, + { 0x0000811c, 0x00003210 }, + { 0x00008124, 0x00000000 }, + { 0x00008128, 0x00000000 }, + { 0x0000812c, 0x00000000 }, + { 0x00008130, 0x00000000 }, + { 0x00008134, 0x00000000 }, + { 0x00008138, 0x00000000 }, + { 0x0000813c, 0x00000000 }, + { 0x00008144, 0xffffffff }, + { 0x00008168, 0x00000000 }, + { 0x0000816c, 0x00000000 }, + { 0x00008170, 0x32143320 }, + { 0x00008174, 0xfaa4fa50 }, + { 0x00008178, 0x00000100 }, + { 0x0000817c, 0x00000000 }, + { 0x000081c0, 0x00000000 }, + { 0x000081ec, 0x00000000 }, + { 0x000081f0, 0x00000000 }, + { 0x000081f4, 0x00000000 }, + { 0x000081f8, 0x00000000 }, + { 0x000081fc, 0x00000000 }, + { 0x00008200, 0x00000000 }, + { 0x00008204, 0x00000000 }, + { 0x00008208, 0x00000000 }, + { 0x0000820c, 0x00000000 }, + { 0x00008210, 0x00000000 }, + { 0x00008214, 0x00000000 }, + { 0x00008218, 0x00000000 }, + { 0x0000821c, 0x00000000 }, + { 0x00008220, 0x00000000 }, + { 0x00008224, 0x00000000 }, + { 0x00008228, 0x00000000 }, + { 0x0000822c, 0x00000000 }, + { 0x00008230, 0x00000000 }, + { 0x00008234, 0x00000000 }, + { 0x00008238, 0x00000000 }, + { 0x0000823c, 0x00000000 }, + { 0x00008240, 0x00100000 }, + { 0x00008244, 0x0010f400 }, + { 0x00008248, 0x00000100 }, + { 0x0000824c, 0x0001e800 }, + { 0x00008250, 0x00000000 }, + { 0x00008254, 0x00000000 }, + { 0x00008258, 0x00000000 }, + { 0x0000825c, 0x400000ff }, + { 0x00008260, 0x00080922 }, + { 0x00008264, 0xa8a00010 }, + { 0x00008270, 0x00000000 }, + { 0x00008274, 0x40000000 }, + { 0x00008278, 0x003e4180 }, + { 0x0000827c, 0x00000000 }, + { 0x00008284, 0x0000002c }, + { 0x00008288, 0x0000002c }, + { 0x0000828c, 0x00000000 }, + { 0x00008294, 0x00000000 }, + { 0x00008298, 0x00000000 }, + { 0x0000829c, 0x00000000 }, + { 0x00008300, 0x00000040 }, + { 0x00008314, 0x00000000 }, + { 0x00008328, 0x00000000 }, + { 0x0000832c, 0x00000007 }, + { 0x00008330, 0x00000302 }, + { 0x00008334, 0x00000e00 }, + { 0x00008338, 0x00ff0000 }, + { 0x0000833c, 0x00000000 }, + { 0x00008340, 0x000107ff }, + { 0x00008344, 0x00481043 }, + { 0x00009808, 0x00000000 }, + { 0x0000980c, 0xafa68e30 }, + { 0x00009810, 0xfd14e000 }, + { 0x00009814, 0x9c0a9f6b }, + { 0x0000981c, 0x00000000 }, + { 0x0000982c, 0x0000a000 }, + { 0x00009830, 0x00000000 }, + { 0x0000983c, 0x00200400 }, + { 0x0000984c, 0x0040233c }, + { 0x0000a84c, 0x0040233c }, + { 0x00009854, 0x00000044 }, + { 0x00009900, 0x00000000 }, + { 0x00009904, 0x00000000 }, + { 0x00009908, 0x00000000 }, + { 0x0000990c, 0x00000000 }, + { 0x00009910, 0x01002310 }, + { 0x0000991c, 0x10000fff }, + { 0x00009920, 0x04900000 }, + { 0x0000a920, 0x04900000 }, + { 0x00009928, 0x00000001 }, + { 0x0000992c, 0x00000004 }, + { 0x00009934, 0x1e1f2022 }, + { 0x00009938, 0x0a0b0c0d }, + { 0x0000993c, 0x00000000 }, + { 0x00009948, 0x9280c00a }, + { 0x0000994c, 0x00020028 }, + { 0x00009954, 0x5f3ca3de }, + { 0x00009958, 0x2108ecff }, + { 0x00009940, 0x14750604 }, + { 0x0000c95c, 0x004b6a8e }, + { 0x00009970, 0x190fb515 }, + { 0x00009974, 0x00000000 }, + { 0x00009978, 0x00000001 }, + { 0x0000997c, 0x00000000 }, + { 0x00009980, 0x00000000 }, + { 0x00009984, 0x00000000 }, + { 0x00009988, 0x00000000 }, + { 0x0000998c, 0x00000000 }, + { 0x00009990, 0x00000000 }, + { 0x00009994, 0x00000000 }, + { 0x00009998, 0x00000000 }, + { 0x0000999c, 0x00000000 }, + { 0x000099a0, 0x00000000 }, + { 0x000099a4, 0x00000001 }, + { 0x000099a8, 0x201fff00 }, + { 0x000099ac, 0x006f0000 }, + { 0x000099b0, 0x03051000 }, + { 0x000099b4, 0x00000820 }, + { 0x000099dc, 0x00000000 }, + { 0x000099e0, 0x00000000 }, + { 0x000099e4, 0xaaaaaaaa }, + { 0x000099e8, 0x3c466478 }, + { 0x000099ec, 0x0cc80caa }, + { 0x000099f0, 0x00000000 }, + { 0x000099fc, 0x00001042 }, + { 0x0000a208, 0x803e4788 }, + { 0x0000a210, 0x4080a333 }, + { 0x0000a214, 0x40206c10 }, + { 0x0000a218, 0x009c4060 }, + { 0x0000a220, 0x01834061 }, + { 0x0000a224, 0x00000400 }, + { 0x0000a228, 0x000003b5 }, + { 0x0000a22c, 0x233f7180 }, + { 0x0000a234, 0x20202020 }, + { 0x0000a238, 0x20202020 }, + { 0x0000a240, 0x38490a20 }, + { 0x0000a244, 0x00007bb6 }, + { 0x0000a248, 0x0fff3ffc }, + { 0x0000a24c, 0x00000000 }, + { 0x0000a254, 0x00000000 }, + { 0x0000a258, 0x0cdbd380 }, + { 0x0000a25c, 0x0f0f0f01 }, + { 0x0000a260, 0xdfa91f01 }, + { 0x0000a268, 0x00000000 }, + { 0x0000a26c, 0x0e79e5c6 }, + { 0x0000b26c, 0x0e79e5c6 }, + { 0x0000d270, 0x00820820 }, + { 0x0000a278, 0x1ce739ce }, + { 0x0000d35c, 0x07ffffef }, + { 0x0000d360, 0x0fffffe7 }, + { 0x0000d364, 0x17ffffe5 }, + { 0x0000d368, 0x1fffffe4 }, + { 0x0000d36c, 0x37ffffe3 }, + { 0x0000d370, 0x3fffffe3 }, + { 0x0000d374, 0x57ffffe3 }, + { 0x0000d378, 0x5fffffe2 }, + { 0x0000d37c, 0x7fffffe2 }, + { 0x0000d380, 0x7f3c7bba }, + { 0x0000d384, 0xf3307ff0 }, + { 0x0000a38c, 0x20202020 }, + { 0x0000a390, 0x20202020 }, + { 0x0000a394, 0x1ce739ce }, + { 0x0000a398, 0x000001ce }, + { 0x0000a39c, 0x00000001 }, + { 0x0000a3a0, 0x00000000 }, + { 0x0000a3a4, 0x00000000 }, + { 0x0000a3a8, 0x00000000 }, + { 0x0000a3ac, 0x00000000 }, + { 0x0000a3b0, 0x00000000 }, + { 0x0000a3b4, 0x00000000 }, + { 0x0000a3b8, 0x00000000 }, + { 0x0000a3bc, 0x00000000 }, + { 0x0000a3c0, 0x00000000 }, + { 0x0000a3c4, 0x00000000 }, + { 0x0000a3c8, 0x00000246 }, + { 0x0000a3cc, 0x20202020 }, + { 0x0000a3d0, 0x20202020 }, + { 0x0000a3d4, 0x20202020 }, + { 0x0000a3dc, 0x1ce739ce }, + { 0x0000a3e0, 0x000001ce }, + { 0x0000a3e4, 0x00000000 }, + { 0x0000a3e8, 0x18c43433 }, + { 0x0000a3ec, 0x00f70081 }, + { 0x00007800, 0x00040000 }, + { 0x00007804, 0xdb005012 }, + { 0x00007808, 0x04924914 }, + { 0x0000780c, 0x21084210 }, + { 0x00007810, 0x6d801300 }, + { 0x00007818, 0x07e41000 }, + { 0x00007824, 0x00040000 }, + { 0x00007828, 0xdb005012 }, + { 0x0000782c, 0x04924914 }, + { 0x00007830, 0x21084210 }, + { 0x00007834, 0x6d801300 }, + { 0x0000783c, 0x07e40000 }, + { 0x00007848, 0x00100000 }, + { 0x0000784c, 0x773f0567 }, + { 0x00007850, 0x54214514 }, + { 0x00007854, 0x12035828 }, + { 0x00007858, 0x9259269a }, + { 0x00007860, 0x52802000 }, + { 0x00007864, 0x0a8e370e }, + { 0x00007868, 0xc0102850 }, + { 0x0000786c, 0x812d4000 }, + { 0x00007870, 0x807ec400 }, + { 0x00007874, 0x001b6db0 }, + { 0x00007878, 0x00376b63 }, + { 0x0000787c, 0x06db6db6 }, + { 0x00007880, 0x006d8000 }, + { 0x00007884, 0xffeffffe }, + { 0x00007888, 0xffeffffe }, + { 0x0000788c, 0x00010000 }, + { 0x00007890, 0x02060aeb }, + { 0x00007898, 0x2a850160 }, +}; + +static const u32 ar9280Modes_fast_clock_9280_2[][3] = { + { 0x00001030, 0x00000268, 0x000004d0 }, + { 0x00001070, 0x0000018c, 0x00000318 }, + { 0x000010b0, 0x00000fd0, 0x00001fa0 }, + { 0x00008014, 0x044c044c, 0x08980898 }, + { 0x0000801c, 0x148ec02b, 0x148ec057 }, + { 0x00008318, 0x000044c0, 0x00008980 }, + { 0x00009820, 0x02020200, 0x02020200 }, + { 0x00009824, 0x01000f0f, 0x01000f0f }, + { 0x00009828, 0x0b020001, 0x0b020001 }, + { 0x00009834, 0x00000f0f, 0x00000f0f }, + { 0x00009844, 0x03721821, 0x03721821 }, + { 0x00009914, 0x00000898, 0x00001130 }, + { 0x00009918, 0x0000000b, 0x00000016 }, +}; + +static const u32 ar9280Modes_backoff_23db_rxgain_9280_2[][6] = { + { 0x00009a00, 0x00008184, 0x00008184, 0x00000290, 0x00000290, 0x00000290 }, + { 0x00009a04, 0x00008188, 0x00008188, 0x00000300, 0x00000300, 0x00000300 }, + { 0x00009a08, 0x0000818c, 0x0000818c, 0x00000304, 0x00000304, 0x00000304 }, + { 0x00009a0c, 0x00008190, 0x00008190, 0x00000308, 0x00000308, 0x00000308 }, + { 0x00009a10, 0x00008194, 0x00008194, 0x0000030c, 0x0000030c, 0x0000030c }, + { 0x00009a14, 0x00008200, 0x00008200, 0x00008000, 0x00008000, 0x00008000 }, + { 0x00009a18, 0x00008204, 0x00008204, 0x00008004, 0x00008004, 0x00008004 }, + { 0x00009a1c, 0x00008208, 0x00008208, 0x00008008, 0x00008008, 0x00008008 }, + { 0x00009a20, 0x0000820c, 0x0000820c, 0x0000800c, 0x0000800c, 0x0000800c }, + { 0x00009a24, 0x00008210, 0x00008210, 0x00008080, 0x00008080, 0x00008080 }, + { 0x00009a28, 0x00008214, 0x00008214, 0x00008084, 0x00008084, 0x00008084 }, + { 0x00009a2c, 0x00008280, 0x00008280, 0x00008088, 0x00008088, 0x00008088 }, + { 0x00009a30, 0x00008284, 0x00008284, 0x0000808c, 0x0000808c, 0x0000808c }, + { 0x00009a34, 0x00008288, 0x00008288, 0x00008100, 0x00008100, 0x00008100 }, + { 0x00009a38, 0x0000828c, 0x0000828c, 0x00008104, 0x00008104, 0x00008104 }, + { 0x00009a3c, 0x00008290, 0x00008290, 0x00008108, 0x00008108, 0x00008108 }, + { 0x00009a40, 0x00008300, 0x00008300, 0x0000810c, 0x0000810c, 0x0000810c }, + { 0x00009a44, 0x00008304, 0x00008304, 0x00008110, 0x00008110, 0x00008110 }, + { 0x00009a48, 0x00008308, 0x00008308, 0x00008114, 0x00008114, 0x00008114 }, + { 0x00009a4c, 0x0000830c, 0x0000830c, 0x00008180, 0x00008180, 0x00008180 }, + { 0x00009a50, 0x00008310, 0x00008310, 0x00008184, 0x00008184, 0x00008184 }, + { 0x00009a54, 0x00008314, 0x00008314, 0x00008188, 0x00008188, 0x00008188 }, + { 0x00009a58, 0x00008380, 0x00008380, 0x0000818c, 0x0000818c, 0x0000818c }, + { 0x00009a5c, 0x00008384, 0x00008384, 0x00008190, 0x00008190, 0x00008190 }, + { 0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194, 0x00008194 }, + { 0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0, 0x000081a0 }, + { 0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c, 0x0000820c }, + { 0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8, 0x000081a8 }, + { 0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284, 0x00008284 }, + { 0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288, 0x00008288 }, + { 0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224, 0x00008224 }, + { 0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290, 0x00008290 }, + { 0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300, 0x00008300 }, + { 0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304, 0x00008304 }, + { 0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308, 0x00008308 }, + { 0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c, 0x0000830c }, + { 0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380, 0x00008380 }, + { 0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384, 0x00008384 }, + { 0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700, 0x00008700 }, + { 0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704, 0x00008704 }, + { 0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708, 0x00008708 }, + { 0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c, 0x0000870c }, + { 0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780, 0x00008780 }, + { 0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784, 0x00008784 }, + { 0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00, 0x00008b00 }, + { 0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04, 0x00008b04 }, + { 0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08, 0x00008b08 }, + { 0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c, 0x00008b0c }, + { 0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b10, 0x00008b10, 0x00008b10 }, + { 0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b14, 0x00008b14, 0x00008b14 }, + { 0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b01, 0x00008b01, 0x00008b01 }, + { 0x00009acc, 0x0000b380, 0x0000b380, 0x00008b05, 0x00008b05, 0x00008b05 }, + { 0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b09, 0x00008b09, 0x00008b09 }, + { 0x00009ad4, 0x0000b388, 0x0000b388, 0x00008b0d, 0x00008b0d, 0x00008b0d }, + { 0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008b11, 0x00008b11, 0x00008b11 }, + { 0x00009adc, 0x0000b390, 0x0000b390, 0x00008b15, 0x00008b15, 0x00008b15 }, + { 0x00009ae0, 0x0000b394, 0x0000b394, 0x00008b02, 0x00008b02, 0x00008b02 }, + { 0x00009ae4, 0x0000b398, 0x0000b398, 0x00008b06, 0x00008b06, 0x00008b06 }, + { 0x00009ae8, 0x0000b780, 0x0000b780, 0x00008b0a, 0x00008b0a, 0x00008b0a }, + { 0x00009aec, 0x0000b784, 0x0000b784, 0x00008b0e, 0x00008b0e, 0x00008b0e }, + { 0x00009af0, 0x0000b788, 0x0000b788, 0x00008b12, 0x00008b12, 0x00008b12 }, + { 0x00009af4, 0x0000b78c, 0x0000b78c, 0x00008b16, 0x00008b16, 0x00008b16 }, + { 0x00009af8, 0x0000b790, 0x0000b790, 0x00008b03, 0x00008b03, 0x00008b03 }, + { 0x00009afc, 0x0000b794, 0x0000b794, 0x00008b07, 0x00008b07, 0x00008b07 }, + { 0x00009b00, 0x0000b798, 0x0000b798, 0x00008b0b, 0x00008b0b, 0x00008b0b }, + { 0x00009b04, 0x0000d784, 0x0000d784, 0x00008b0f, 0x00008b0f, 0x00008b0f }, + { 0x00009b08, 0x0000d788, 0x0000d788, 0x00008b13, 0x00008b13, 0x00008b13 }, + { 0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00008b17, 0x00008b17, 0x00008b17 }, + { 0x00009b10, 0x0000d790, 0x0000d790, 0x00008b23, 0x00008b23, 0x00008b23 }, + { 0x00009b14, 0x0000f780, 0x0000f780, 0x00008b27, 0x00008b27, 0x00008b27 }, + { 0x00009b18, 0x0000f784, 0x0000f784, 0x00008b2b, 0x00008b2b, 0x00008b2b }, + { 0x00009b1c, 0x0000f788, 0x0000f788, 0x00008b2f, 0x00008b2f, 0x00008b2f }, + { 0x00009b20, 0x0000f78c, 0x0000f78c, 0x00008b33, 0x00008b33, 0x00008b33 }, + { 0x00009b24, 0x0000f790, 0x0000f790, 0x00008b37, 0x00008b37, 0x00008b37 }, + { 0x00009b28, 0x0000f794, 0x0000f794, 0x00008b43, 0x00008b43, 0x00008b43 }, + { 0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x00008b47, 0x00008b47, 0x00008b47 }, + { 0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x00008b4b, 0x00008b4b, 0x00008b4b }, + { 0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x00008b4f, 0x00008b4f, 0x00008b4f }, + { 0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x00008b53, 0x00008b53, 0x00008b53 }, + { 0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x00008b57, 0x00008b57, 0x00008b57 }, + { 0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009b98, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009b9c, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009ba0, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009ba4, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009ba8, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009bac, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009bb0, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009bb4, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009bb8, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009bbc, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009bc0, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009bc4, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009bc8, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009bcc, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009bd0, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009bd4, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009bd8, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009bdc, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009be0, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009be4, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009be8, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009bec, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009bf0, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009bf4, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009bf8, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009bfc, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b }, + { 0x00009848, 0x00001066, 0x00001066, 0x00001050, 0x00001050, 0x00001050 }, + { 0x0000a848, 0x00001066, 0x00001066, 0x00001050, 0x00001050, 0x00001050 }, +}; + +static const u32 ar9280Modes_original_rxgain_9280_2[][6] = { + { 0x00009a00, 0x00008184, 0x00008184, 0x00000290, 0x00000290, 0x00000290 }, + { 0x00009a04, 0x00008188, 0x00008188, 0x00000300, 0x00000300, 0x00000300 }, + { 0x00009a08, 0x0000818c, 0x0000818c, 0x00000304, 0x00000304, 0x00000304 }, + { 0x00009a0c, 0x00008190, 0x00008190, 0x00000308, 0x00000308, 0x00000308 }, + { 0x00009a10, 0x00008194, 0x00008194, 0x0000030c, 0x0000030c, 0x0000030c }, + { 0x00009a14, 0x00008200, 0x00008200, 0x00008000, 0x00008000, 0x00008000 }, + { 0x00009a18, 0x00008204, 0x00008204, 0x00008004, 0x00008004, 0x00008004 }, + { 0x00009a1c, 0x00008208, 0x00008208, 0x00008008, 0x00008008, 0x00008008 }, + { 0x00009a20, 0x0000820c, 0x0000820c, 0x0000800c, 0x0000800c, 0x0000800c }, + { 0x00009a24, 0x00008210, 0x00008210, 0x00008080, 0x00008080, 0x00008080 }, + { 0x00009a28, 0x00008214, 0x00008214, 0x00008084, 0x00008084, 0x00008084 }, + { 0x00009a2c, 0x00008280, 0x00008280, 0x00008088, 0x00008088, 0x00008088 }, + { 0x00009a30, 0x00008284, 0x00008284, 0x0000808c, 0x0000808c, 0x0000808c }, + { 0x00009a34, 0x00008288, 0x00008288, 0x00008100, 0x00008100, 0x00008100 }, + { 0x00009a38, 0x0000828c, 0x0000828c, 0x00008104, 0x00008104, 0x00008104 }, + { 0x00009a3c, 0x00008290, 0x00008290, 0x00008108, 0x00008108, 0x00008108 }, + { 0x00009a40, 0x00008300, 0x00008300, 0x0000810c, 0x0000810c, 0x0000810c }, + { 0x00009a44, 0x00008304, 0x00008304, 0x00008110, 0x00008110, 0x00008110 }, + { 0x00009a48, 0x00008308, 0x00008308, 0x00008114, 0x00008114, 0x00008114 }, + { 0x00009a4c, 0x0000830c, 0x0000830c, 0x00008180, 0x00008180, 0x00008180 }, + { 0x00009a50, 0x00008310, 0x00008310, 0x00008184, 0x00008184, 0x00008184 }, + { 0x00009a54, 0x00008314, 0x00008314, 0x00008188, 0x00008188, 0x00008188 }, + { 0x00009a58, 0x00008380, 0x00008380, 0x0000818c, 0x0000818c, 0x0000818c }, + { 0x00009a5c, 0x00008384, 0x00008384, 0x00008190, 0x00008190, 0x00008190 }, + { 0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194, 0x00008194 }, + { 0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0, 0x000081a0 }, + { 0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c, 0x0000820c }, + { 0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8, 0x000081a8 }, + { 0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284, 0x00008284 }, + { 0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288, 0x00008288 }, + { 0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224, 0x00008224 }, + { 0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290, 0x00008290 }, + { 0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300, 0x00008300 }, + { 0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304, 0x00008304 }, + { 0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308, 0x00008308 }, + { 0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c, 0x0000830c }, + { 0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380, 0x00008380 }, + { 0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384, 0x00008384 }, + { 0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700, 0x00008700 }, + { 0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704, 0x00008704 }, + { 0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708, 0x00008708 }, + { 0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c, 0x0000870c }, + { 0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780, 0x00008780 }, + { 0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784, 0x00008784 }, + { 0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00, 0x00008b00 }, + { 0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04, 0x00008b04 }, + { 0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08, 0x00008b08 }, + { 0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c, 0x00008b0c }, + { 0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b80, 0x00008b80, 0x00008b80 }, + { 0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b84, 0x00008b84, 0x00008b84 }, + { 0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b88, 0x00008b88, 0x00008b88 }, + { 0x00009acc, 0x0000b380, 0x0000b380, 0x00008b8c, 0x00008b8c, 0x00008b8c }, + { 0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b90, 0x00008b90, 0x00008b90 }, + { 0x00009ad4, 0x0000b388, 0x0000b388, 0x00008f80, 0x00008f80, 0x00008f80 }, + { 0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008f84, 0x00008f84, 0x00008f84 }, + { 0x00009adc, 0x0000b390, 0x0000b390, 0x00008f88, 0x00008f88, 0x00008f88 }, + { 0x00009ae0, 0x0000b394, 0x0000b394, 0x00008f8c, 0x00008f8c, 0x00008f8c }, + { 0x00009ae4, 0x0000b398, 0x0000b398, 0x00008f90, 0x00008f90, 0x00008f90 }, + { 0x00009ae8, 0x0000b780, 0x0000b780, 0x0000930c, 0x0000930c, 0x0000930c }, + { 0x00009aec, 0x0000b784, 0x0000b784, 0x00009310, 0x00009310, 0x00009310 }, + { 0x00009af0, 0x0000b788, 0x0000b788, 0x00009384, 0x00009384, 0x00009384 }, + { 0x00009af4, 0x0000b78c, 0x0000b78c, 0x00009388, 0x00009388, 0x00009388 }, + { 0x00009af8, 0x0000b790, 0x0000b790, 0x00009324, 0x00009324, 0x00009324 }, + { 0x00009afc, 0x0000b794, 0x0000b794, 0x00009704, 0x00009704, 0x00009704 }, + { 0x00009b00, 0x0000b798, 0x0000b798, 0x000096a4, 0x000096a4, 0x000096a4 }, + { 0x00009b04, 0x0000d784, 0x0000d784, 0x000096a8, 0x000096a8, 0x000096a8 }, + { 0x00009b08, 0x0000d788, 0x0000d788, 0x00009710, 0x00009710, 0x00009710 }, + { 0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00009714, 0x00009714, 0x00009714 }, + { 0x00009b10, 0x0000d790, 0x0000d790, 0x00009720, 0x00009720, 0x00009720 }, + { 0x00009b14, 0x0000f780, 0x0000f780, 0x00009724, 0x00009724, 0x00009724 }, + { 0x00009b18, 0x0000f784, 0x0000f784, 0x00009728, 0x00009728, 0x00009728 }, + { 0x00009b1c, 0x0000f788, 0x0000f788, 0x0000972c, 0x0000972c, 0x0000972c }, + { 0x00009b20, 0x0000f78c, 0x0000f78c, 0x000097a0, 0x000097a0, 0x000097a0 }, + { 0x00009b24, 0x0000f790, 0x0000f790, 0x000097a4, 0x000097a4, 0x000097a4 }, + { 0x00009b28, 0x0000f794, 0x0000f794, 0x000097a8, 0x000097a8, 0x000097a8 }, + { 0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x000097b0, 0x000097b0, 0x000097b0 }, + { 0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x000097b4, 0x000097b4, 0x000097b4 }, + { 0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x000097b8, 0x000097b8, 0x000097b8 }, + { 0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x000097a5, 0x000097a5, 0x000097a5 }, + { 0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x000097a9, 0x000097a9, 0x000097a9 }, + { 0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x000097ad, 0x000097ad, 0x000097ad }, + { 0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x000097b1, 0x000097b1, 0x000097b1 }, + { 0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x000097b5, 0x000097b5, 0x000097b5 }, + { 0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x000097b9, 0x000097b9, 0x000097b9 }, + { 0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x000097c5, 0x000097c5, 0x000097c5 }, + { 0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x000097c9, 0x000097c9, 0x000097c9 }, + { 0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x000097d1, 0x000097d1, 0x000097d1 }, + { 0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x000097d5, 0x000097d5, 0x000097d5 }, + { 0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x000097d9, 0x000097d9, 0x000097d9 }, + { 0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x000097c6, 0x000097c6, 0x000097c6 }, + { 0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x000097ca, 0x000097ca, 0x000097ca }, + { 0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x000097ce, 0x000097ce, 0x000097ce }, + { 0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x000097d2, 0x000097d2, 0x000097d2 }, + { 0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x000097d6, 0x000097d6, 0x000097d6 }, + { 0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x000097c3, 0x000097c3, 0x000097c3 }, + { 0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x000097c7, 0x000097c7, 0x000097c7 }, + { 0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x000097cb, 0x000097cb, 0x000097cb }, + { 0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x000097cf, 0x000097cf, 0x000097cf }, + { 0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x000097d7, 0x000097d7, 0x000097d7 }, + { 0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009b98, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009b9c, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009ba0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009ba4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009ba8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bac, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bb0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bb4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bb8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bbc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bc0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bc4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bc8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bcc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bd0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bd4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bd8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bdc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009be0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009be4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009be8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bec, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bf0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bf4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bf8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bfc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009848, 0x00001066, 0x00001066, 0x00001063, 0x00001063, 0x00001063 }, + { 0x0000a848, 0x00001066, 0x00001066, 0x00001063, 0x00001063, 0x00001063 }, +}; + +static const u32 ar9280Modes_backoff_13db_rxgain_9280_2[][6] = { + { 0x00009a00, 0x00008184, 0x00008184, 0x00000290, 0x00000290, 0x00000290 }, + { 0x00009a04, 0x00008188, 0x00008188, 0x00000300, 0x00000300, 0x00000300 }, + { 0x00009a08, 0x0000818c, 0x0000818c, 0x00000304, 0x00000304, 0x00000304 }, + { 0x00009a0c, 0x00008190, 0x00008190, 0x00000308, 0x00000308, 0x00000308 }, + { 0x00009a10, 0x00008194, 0x00008194, 0x0000030c, 0x0000030c, 0x0000030c }, + { 0x00009a14, 0x00008200, 0x00008200, 0x00008000, 0x00008000, 0x00008000 }, + { 0x00009a18, 0x00008204, 0x00008204, 0x00008004, 0x00008004, 0x00008004 }, + { 0x00009a1c, 0x00008208, 0x00008208, 0x00008008, 0x00008008, 0x00008008 }, + { 0x00009a20, 0x0000820c, 0x0000820c, 0x0000800c, 0x0000800c, 0x0000800c }, + { 0x00009a24, 0x00008210, 0x00008210, 0x00008080, 0x00008080, 0x00008080 }, + { 0x00009a28, 0x00008214, 0x00008214, 0x00008084, 0x00008084, 0x00008084 }, + { 0x00009a2c, 0x00008280, 0x00008280, 0x00008088, 0x00008088, 0x00008088 }, + { 0x00009a30, 0x00008284, 0x00008284, 0x0000808c, 0x0000808c, 0x0000808c }, + { 0x00009a34, 0x00008288, 0x00008288, 0x00008100, 0x00008100, 0x00008100 }, + { 0x00009a38, 0x0000828c, 0x0000828c, 0x00008104, 0x00008104, 0x00008104 }, + { 0x00009a3c, 0x00008290, 0x00008290, 0x00008108, 0x00008108, 0x00008108 }, + { 0x00009a40, 0x00008300, 0x00008300, 0x0000810c, 0x0000810c, 0x0000810c }, + { 0x00009a44, 0x00008304, 0x00008304, 0x00008110, 0x00008110, 0x00008110 }, + { 0x00009a48, 0x00008308, 0x00008308, 0x00008114, 0x00008114, 0x00008114 }, + { 0x00009a4c, 0x0000830c, 0x0000830c, 0x00008180, 0x00008180, 0x00008180 }, + { 0x00009a50, 0x00008310, 0x00008310, 0x00008184, 0x00008184, 0x00008184 }, + { 0x00009a54, 0x00008314, 0x00008314, 0x00008188, 0x00008188, 0x00008188 }, + { 0x00009a58, 0x00008380, 0x00008380, 0x0000818c, 0x0000818c, 0x0000818c }, + { 0x00009a5c, 0x00008384, 0x00008384, 0x00008190, 0x00008190, 0x00008190 }, + { 0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194, 0x00008194 }, + { 0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0, 0x000081a0 }, + { 0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c, 0x0000820c }, + { 0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8, 0x000081a8 }, + { 0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284, 0x00008284 }, + { 0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288, 0x00008288 }, + { 0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224, 0x00008224 }, + { 0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290, 0x00008290 }, + { 0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300, 0x00008300 }, + { 0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304, 0x00008304 }, + { 0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308, 0x00008308 }, + { 0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c, 0x0000830c }, + { 0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380, 0x00008380 }, + { 0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384, 0x00008384 }, + { 0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700, 0x00008700 }, + { 0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704, 0x00008704 }, + { 0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708, 0x00008708 }, + { 0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c, 0x0000870c }, + { 0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780, 0x00008780 }, + { 0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784, 0x00008784 }, + { 0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00, 0x00008b00 }, + { 0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04, 0x00008b04 }, + { 0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08, 0x00008b08 }, + { 0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c, 0x00008b0c }, + { 0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b80, 0x00008b80, 0x00008b80 }, + { 0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b84, 0x00008b84, 0x00008b84 }, + { 0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b88, 0x00008b88, 0x00008b88 }, + { 0x00009acc, 0x0000b380, 0x0000b380, 0x00008b8c, 0x00008b8c, 0x00008b8c }, + { 0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b90, 0x00008b90, 0x00008b90 }, + { 0x00009ad4, 0x0000b388, 0x0000b388, 0x00008f80, 0x00008f80, 0x00008f80 }, + { 0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008f84, 0x00008f84, 0x00008f84 }, + { 0x00009adc, 0x0000b390, 0x0000b390, 0x00008f88, 0x00008f88, 0x00008f88 }, + { 0x00009ae0, 0x0000b394, 0x0000b394, 0x00008f8c, 0x00008f8c, 0x00008f8c }, + { 0x00009ae4, 0x0000b398, 0x0000b398, 0x00008f90, 0x00008f90, 0x00008f90 }, + { 0x00009ae8, 0x0000b780, 0x0000b780, 0x00009310, 0x00009310, 0x00009310 }, + { 0x00009aec, 0x0000b784, 0x0000b784, 0x00009314, 0x00009314, 0x00009314 }, + { 0x00009af0, 0x0000b788, 0x0000b788, 0x00009320, 0x00009320, 0x00009320 }, + { 0x00009af4, 0x0000b78c, 0x0000b78c, 0x00009324, 0x00009324, 0x00009324 }, + { 0x00009af8, 0x0000b790, 0x0000b790, 0x00009328, 0x00009328, 0x00009328 }, + { 0x00009afc, 0x0000b794, 0x0000b794, 0x0000932c, 0x0000932c, 0x0000932c }, + { 0x00009b00, 0x0000b798, 0x0000b798, 0x00009330, 0x00009330, 0x00009330 }, + { 0x00009b04, 0x0000d784, 0x0000d784, 0x00009334, 0x00009334, 0x00009334 }, + { 0x00009b08, 0x0000d788, 0x0000d788, 0x00009321, 0x00009321, 0x00009321 }, + { 0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00009325, 0x00009325, 0x00009325 }, + { 0x00009b10, 0x0000d790, 0x0000d790, 0x00009329, 0x00009329, 0x00009329 }, + { 0x00009b14, 0x0000f780, 0x0000f780, 0x0000932d, 0x0000932d, 0x0000932d }, + { 0x00009b18, 0x0000f784, 0x0000f784, 0x00009331, 0x00009331, 0x00009331 }, + { 0x00009b1c, 0x0000f788, 0x0000f788, 0x00009335, 0x00009335, 0x00009335 }, + { 0x00009b20, 0x0000f78c, 0x0000f78c, 0x00009322, 0x00009322, 0x00009322 }, + { 0x00009b24, 0x0000f790, 0x0000f790, 0x00009326, 0x00009326, 0x00009326 }, + { 0x00009b28, 0x0000f794, 0x0000f794, 0x0000932a, 0x0000932a, 0x0000932a }, + { 0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x0000932e, 0x0000932e, 0x0000932e }, + { 0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x00009332, 0x00009332, 0x00009332 }, + { 0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x00009336, 0x00009336, 0x00009336 }, + { 0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x00009323, 0x00009323, 0x00009323 }, + { 0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x00009327, 0x00009327, 0x00009327 }, + { 0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x0000932b, 0x0000932b, 0x0000932b }, + { 0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x0000932f, 0x0000932f, 0x0000932f }, + { 0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x00009333, 0x00009333, 0x00009333 }, + { 0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x00009337, 0x00009337, 0x00009337 }, + { 0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x00009343, 0x00009343, 0x00009343 }, + { 0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x00009347, 0x00009347, 0x00009347 }, + { 0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x0000934b, 0x0000934b, 0x0000934b }, + { 0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x0000934f, 0x0000934f, 0x0000934f }, + { 0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x00009353, 0x00009353, 0x00009353 }, + { 0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x00009357, 0x00009357, 0x00009357 }, + { 0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009b98, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009b9c, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009ba0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009ba4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009ba8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009bac, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009bb0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009bb4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009bb8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009bbc, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009bc0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009bc4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009bc8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009bcc, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009bd0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009bd4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009bd8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009bdc, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009be0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009be4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009be8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009bec, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009bf0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009bf4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009bf8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009bfc, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b }, + { 0x00009848, 0x00001066, 0x00001066, 0x0000105a, 0x0000105a, 0x0000105a }, + { 0x0000a848, 0x00001066, 0x00001066, 0x0000105a, 0x0000105a, 0x0000105a }, +}; + +static const u32 ar9280Modes_high_power_tx_gain_9280_2[][6] = { + { 0x0000a274, 0x0a19e652, 0x0a19e652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652 }, + { 0x0000a27c, 0x050739ce, 0x050739ce, 0x050739ce, 0x050739ce, 0x050739ce }, + { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a304, 0x00003002, 0x00003002, 0x00004002, 0x00004002, 0x00004002 }, + { 0x0000a308, 0x00006004, 0x00006004, 0x00007008, 0x00007008, 0x00007008 }, + { 0x0000a30c, 0x0000a006, 0x0000a006, 0x0000c010, 0x0000c010, 0x0000c010 }, + { 0x0000a310, 0x0000e012, 0x0000e012, 0x00010012, 0x00010012, 0x00010012 }, + { 0x0000a314, 0x00011014, 0x00011014, 0x00013014, 0x00013014, 0x00013014 }, + { 0x0000a318, 0x0001504a, 0x0001504a, 0x0001820a, 0x0001820a, 0x0001820a }, + { 0x0000a31c, 0x0001904c, 0x0001904c, 0x0001b211, 0x0001b211, 0x0001b211 }, + { 0x0000a320, 0x0001c04e, 0x0001c04e, 0x0001e213, 0x0001e213, 0x0001e213 }, + { 0x0000a324, 0x00021092, 0x00021092, 0x00022411, 0x00022411, 0x00022411 }, + { 0x0000a328, 0x0002510a, 0x0002510a, 0x00025413, 0x00025413, 0x00025413 }, + { 0x0000a32c, 0x0002910c, 0x0002910c, 0x00029811, 0x00029811, 0x00029811 }, + { 0x0000a330, 0x0002c18b, 0x0002c18b, 0x0002c813, 0x0002c813, 0x0002c813 }, + { 0x0000a334, 0x0002f1cc, 0x0002f1cc, 0x00030a14, 0x00030a14, 0x00030a14 }, + { 0x0000a338, 0x000321eb, 0x000321eb, 0x00035a50, 0x00035a50, 0x00035a50 }, + { 0x0000a33c, 0x000341ec, 0x000341ec, 0x00039c4c, 0x00039c4c, 0x00039c4c }, + { 0x0000a340, 0x000341ec, 0x000341ec, 0x0003de8a, 0x0003de8a, 0x0003de8a }, + { 0x0000a344, 0x000341ec, 0x000341ec, 0x00042e92, 0x00042e92, 0x00042e92 }, + { 0x0000a348, 0x000341ec, 0x000341ec, 0x00046ed2, 0x00046ed2, 0x00046ed2 }, + { 0x0000a34c, 0x000341ec, 0x000341ec, 0x0004bed5, 0x0004bed5, 0x0004bed5 }, + { 0x0000a350, 0x000341ec, 0x000341ec, 0x0004ff54, 0x0004ff54, 0x0004ff54 }, + { 0x0000a354, 0x000341ec, 0x000341ec, 0x00055fd5, 0x00055fd5, 0x00055fd5 }, + { 0x00007814, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff }, + { 0x00007838, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff }, + { 0x0000781c, 0x00172000, 0x00172000, 0x00172000, 0x00172000, 0x00172000 }, + { 0x00007840, 0x00172000, 0x00172000, 0x00172000, 0x00172000, 0x00172000 }, + { 0x00007820, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480 }, + { 0x00007844, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480 }, +}; + +static const u32 ar9280Modes_original_tx_gain_9280_2[][6] = { + { 0x0000a274, 0x0a19c652, 0x0a19c652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652 }, + { 0x0000a27c, 0x050701ce, 0x050701ce, 0x050701ce, 0x050701ce, 0x050701ce }, + { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a304, 0x00003002, 0x00003002, 0x00003002, 0x00003002, 0x00003002 }, + { 0x0000a308, 0x00006004, 0x00006004, 0x00008009, 0x00008009, 0x00008009 }, + { 0x0000a30c, 0x0000a006, 0x0000a006, 0x0000b00b, 0x0000b00b, 0x0000b00b }, + { 0x0000a310, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012 }, + { 0x0000a314, 0x00011014, 0x00011014, 0x00012048, 0x00012048, 0x00012048 }, + { 0x0000a318, 0x0001504a, 0x0001504a, 0x0001604a, 0x0001604a, 0x0001604a }, + { 0x0000a31c, 0x0001904c, 0x0001904c, 0x0001a211, 0x0001a211, 0x0001a211 }, + { 0x0000a320, 0x0001c04e, 0x0001c04e, 0x0001e213, 0x0001e213, 0x0001e213 }, + { 0x0000a324, 0x00020092, 0x00020092, 0x0002121b, 0x0002121b, 0x0002121b }, + { 0x0000a328, 0x0002410a, 0x0002410a, 0x00024412, 0x00024412, 0x00024412 }, + { 0x0000a32c, 0x0002710c, 0x0002710c, 0x00028414, 0x00028414, 0x00028414 }, + { 0x0000a330, 0x0002b18b, 0x0002b18b, 0x0002b44a, 0x0002b44a, 0x0002b44a }, + { 0x0000a334, 0x0002e1cc, 0x0002e1cc, 0x00030649, 0x00030649, 0x00030649 }, + { 0x0000a338, 0x000321ec, 0x000321ec, 0x0003364b, 0x0003364b, 0x0003364b }, + { 0x0000a33c, 0x000321ec, 0x000321ec, 0x00038a49, 0x00038a49, 0x00038a49 }, + { 0x0000a340, 0x000321ec, 0x000321ec, 0x0003be48, 0x0003be48, 0x0003be48 }, + { 0x0000a344, 0x000321ec, 0x000321ec, 0x0003ee4a, 0x0003ee4a, 0x0003ee4a }, + { 0x0000a348, 0x000321ec, 0x000321ec, 0x00042e88, 0x00042e88, 0x00042e88 }, + { 0x0000a34c, 0x000321ec, 0x000321ec, 0x00046e8a, 0x00046e8a, 0x00046e8a }, + { 0x0000a350, 0x000321ec, 0x000321ec, 0x00049ec9, 0x00049ec9, 0x00049ec9 }, + { 0x0000a354, 0x000321ec, 0x000321ec, 0x0004bf42, 0x0004bf42, 0x0004bf42 }, + { 0x00007814, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff }, + { 0x00007838, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff }, + { 0x0000781c, 0x00392000, 0x00392000, 0x00392000, 0x00392000, 0x00392000 }, + { 0x00007840, 0x00392000, 0x00392000, 0x00392000, 0x00392000, 0x00392000 }, + { 0x00007820, 0x92592480, 0x92592480, 0x92592480, 0x92592480, 0x92592480 }, + { 0x00007844, 0x92592480, 0x92592480, 0x92592480, 0x92592480, 0x92592480 }, +}; + +static const u32 ar9280PciePhy_clkreq_off_L1_9280[][2] = { + {0x00004040, 0x9248fd00 }, + {0x00004040, 0x24924924 }, + {0x00004040, 0xa8000019 }, + {0x00004040, 0x13160820 }, + {0x00004040, 0xe5980560 }, + {0x00004040, 0xc01dcffc }, + {0x00004040, 0x1aaabe41 }, + {0x00004040, 0xbe105554 }, + {0x00004040, 0x00043007 }, + {0x00004044, 0x00000000 }, +}; + +static const u32 ar9280PciePhy_clkreq_always_on_L1_9280[][2] = { + {0x00004040, 0x9248fd00 }, + {0x00004040, 0x24924924 }, + {0x00004040, 0xa8000019 }, + {0x00004040, 0x13160820 }, + {0x00004040, 0xe5980560 }, + {0x00004040, 0xc01dcffd }, + {0x00004040, 0x1aaabe41 }, + {0x00004040, 0xbe105554 }, + {0x00004040, 0x00043007 }, + {0x00004044, 0x00000000 }, +}; + +/* AR9285 Revsion 10*/ +static const u_int32_t ar9285Modes_9285[][6] = { + { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, + { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, + { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 }, + { 0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008 }, + { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 }, + { 0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f }, + { 0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880 }, + { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 }, + { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 }, + { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, + { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 }, + { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, + { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 }, + { 0x00009840, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e }, + { 0x00009844, 0x0372161e, 0x0372161e, 0x03720020, 0x03720020, 0x037216a0 }, + { 0x00009848, 0x00001066, 0x00001066, 0x0000004e, 0x0000004e, 0x00001059 }, + { 0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2 }, + { 0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e }, + { 0x0000985c, 0x3139605e, 0x3139605e, 0x3136605e, 0x3136605e, 0x3139605e }, + { 0x00009860, 0x00058d18, 0x00058d18, 0x00058d20, 0x00058d20, 0x00058d18 }, + { 0x00009864, 0x0000fe00, 0x0000fe00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, + { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 }, + { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 }, + { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 }, + { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 }, + { 0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d, 0xd00a800d }, + { 0x00009944, 0xdfbc1010, 0xdfbc1010, 0xdfbc1020, 0xdfbc1020, 0xdfbc1010 }, + { 0x00009960, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x00009964, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x000099b8, 0x00cf4d1c, 0x00cf4d1c, 0x00cf4d1c, 0x00cf4d1c, 0x00cf4d1c }, + { 0x000099bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00, 0x00000c00 }, + { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 }, + { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 }, + { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 }, + { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 }, + { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 }, + { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x00009a00, 0x00000000, 0x00000000, 0x00068084, 0x00068084, 0x00000000 }, + { 0x00009a04, 0x00000000, 0x00000000, 0x00068088, 0x00068088, 0x00000000 }, + { 0x00009a08, 0x00000000, 0x00000000, 0x0006808c, 0x0006808c, 0x00000000 }, + { 0x00009a0c, 0x00000000, 0x00000000, 0x00068100, 0x00068100, 0x00000000 }, + { 0x00009a10, 0x00000000, 0x00000000, 0x00068104, 0x00068104, 0x00000000 }, + { 0x00009a14, 0x00000000, 0x00000000, 0x00068108, 0x00068108, 0x00000000 }, + { 0x00009a18, 0x00000000, 0x00000000, 0x0006810c, 0x0006810c, 0x00000000 }, + { 0x00009a1c, 0x00000000, 0x00000000, 0x00068110, 0x00068110, 0x00000000 }, + { 0x00009a20, 0x00000000, 0x00000000, 0x00068114, 0x00068114, 0x00000000 }, + { 0x00009a24, 0x00000000, 0x00000000, 0x00068180, 0x00068180, 0x00000000 }, + { 0x00009a28, 0x00000000, 0x00000000, 0x00068184, 0x00068184, 0x00000000 }, + { 0x00009a2c, 0x00000000, 0x00000000, 0x00068188, 0x00068188, 0x00000000 }, + { 0x00009a30, 0x00000000, 0x00000000, 0x0006818c, 0x0006818c, 0x00000000 }, + { 0x00009a34, 0x00000000, 0x00000000, 0x00068190, 0x00068190, 0x00000000 }, + { 0x00009a38, 0x00000000, 0x00000000, 0x00068194, 0x00068194, 0x00000000 }, + { 0x00009a3c, 0x00000000, 0x00000000, 0x000681a0, 0x000681a0, 0x00000000 }, + { 0x00009a40, 0x00000000, 0x00000000, 0x0006820c, 0x0006820c, 0x00000000 }, + { 0x00009a44, 0x00000000, 0x00000000, 0x000681a8, 0x000681a8, 0x00000000 }, + { 0x00009a48, 0x00000000, 0x00000000, 0x00068284, 0x00068284, 0x00000000 }, + { 0x00009a4c, 0x00000000, 0x00000000, 0x00068288, 0x00068288, 0x00000000 }, + { 0x00009a50, 0x00000000, 0x00000000, 0x00068220, 0x00068220, 0x00000000 }, + { 0x00009a54, 0x00000000, 0x00000000, 0x00068290, 0x00068290, 0x00000000 }, + { 0x00009a58, 0x00000000, 0x00000000, 0x00068300, 0x00068300, 0x00000000 }, + { 0x00009a5c, 0x00000000, 0x00000000, 0x00068304, 0x00068304, 0x00000000 }, + { 0x00009a60, 0x00000000, 0x00000000, 0x00068308, 0x00068308, 0x00000000 }, + { 0x00009a64, 0x00000000, 0x00000000, 0x0006830c, 0x0006830c, 0x00000000 }, + { 0x00009a68, 0x00000000, 0x00000000, 0x00068380, 0x00068380, 0x00000000 }, + { 0x00009a6c, 0x00000000, 0x00000000, 0x00068384, 0x00068384, 0x00000000 }, + { 0x00009a70, 0x00000000, 0x00000000, 0x00068700, 0x00068700, 0x00000000 }, + { 0x00009a74, 0x00000000, 0x00000000, 0x00068704, 0x00068704, 0x00000000 }, + { 0x00009a78, 0x00000000, 0x00000000, 0x00068708, 0x00068708, 0x00000000 }, + { 0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 }, + { 0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 }, + { 0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 }, + { 0x00009a88, 0x00000000, 0x00000000, 0x00068b04, 0x00068b04, 0x00000000 }, + { 0x00009a8c, 0x00000000, 0x00000000, 0x00068b08, 0x00068b08, 0x00000000 }, + { 0x00009a90, 0x00000000, 0x00000000, 0x00068b08, 0x00068b08, 0x00000000 }, + { 0x00009a94, 0x00000000, 0x00000000, 0x00068b0c, 0x00068b0c, 0x00000000 }, + { 0x00009a98, 0x00000000, 0x00000000, 0x00068b80, 0x00068b80, 0x00000000 }, + { 0x00009a9c, 0x00000000, 0x00000000, 0x00068b84, 0x00068b84, 0x00000000 }, + { 0x00009aa0, 0x00000000, 0x00000000, 0x00068b88, 0x00068b88, 0x00000000 }, + { 0x00009aa4, 0x00000000, 0x00000000, 0x00068b8c, 0x00068b8c, 0x00000000 }, + { 0x00009aa8, 0x00000000, 0x00000000, 0x000b8b90, 0x000b8b90, 0x00000000 }, + { 0x00009aac, 0x00000000, 0x00000000, 0x000b8f80, 0x000b8f80, 0x00000000 }, + { 0x00009ab0, 0x00000000, 0x00000000, 0x000b8f84, 0x000b8f84, 0x00000000 }, + { 0x00009ab4, 0x00000000, 0x00000000, 0x000b8f88, 0x000b8f88, 0x00000000 }, + { 0x00009ab8, 0x00000000, 0x00000000, 0x000b8f8c, 0x000b8f8c, 0x00000000 }, + { 0x00009abc, 0x00000000, 0x00000000, 0x000b8f90, 0x000b8f90, 0x00000000 }, + { 0x00009ac0, 0x00000000, 0x00000000, 0x000bb30c, 0x000bb30c, 0x00000000 }, + { 0x00009ac4, 0x00000000, 0x00000000, 0x000bb310, 0x000bb310, 0x00000000 }, + { 0x00009ac8, 0x00000000, 0x00000000, 0x000bb384, 0x000bb384, 0x00000000 }, + { 0x00009acc, 0x00000000, 0x00000000, 0x000bb388, 0x000bb388, 0x00000000 }, + { 0x00009ad0, 0x00000000, 0x00000000, 0x000bb324, 0x000bb324, 0x00000000 }, + { 0x00009ad4, 0x00000000, 0x00000000, 0x000bb704, 0x000bb704, 0x00000000 }, + { 0x00009ad8, 0x00000000, 0x00000000, 0x000f96a4, 0x000f96a4, 0x00000000 }, + { 0x00009adc, 0x00000000, 0x00000000, 0x000f96a8, 0x000f96a8, 0x00000000 }, + { 0x00009ae0, 0x00000000, 0x00000000, 0x000f9710, 0x000f9710, 0x00000000 }, + { 0x00009ae4, 0x00000000, 0x00000000, 0x000f9714, 0x000f9714, 0x00000000 }, + { 0x00009ae8, 0x00000000, 0x00000000, 0x000f9720, 0x000f9720, 0x00000000 }, + { 0x00009aec, 0x00000000, 0x00000000, 0x000f9724, 0x000f9724, 0x00000000 }, + { 0x00009af0, 0x00000000, 0x00000000, 0x000f9728, 0x000f9728, 0x00000000 }, + { 0x00009af4, 0x00000000, 0x00000000, 0x000f972c, 0x000f972c, 0x00000000 }, + { 0x00009af8, 0x00000000, 0x00000000, 0x000f97a0, 0x000f97a0, 0x00000000 }, + { 0x00009afc, 0x00000000, 0x00000000, 0x000f97a4, 0x000f97a4, 0x00000000 }, + { 0x00009b00, 0x00000000, 0x00000000, 0x000fb7a8, 0x000fb7a8, 0x00000000 }, + { 0x00009b04, 0x00000000, 0x00000000, 0x000fb7b0, 0x000fb7b0, 0x00000000 }, + { 0x00009b08, 0x00000000, 0x00000000, 0x000fb7b4, 0x000fb7b4, 0x00000000 }, + { 0x00009b0c, 0x00000000, 0x00000000, 0x000fb7b8, 0x000fb7b8, 0x00000000 }, + { 0x00009b10, 0x00000000, 0x00000000, 0x000fb7a5, 0x000fb7a5, 0x00000000 }, + { 0x00009b14, 0x00000000, 0x00000000, 0x000fb7a9, 0x000fb7a9, 0x00000000 }, + { 0x00009b18, 0x00000000, 0x00000000, 0x000fb7ad, 0x000fb7ad, 0x00000000 }, + { 0x00009b1c, 0x00000000, 0x00000000, 0x000fb7b1, 0x000fb7b1, 0x00000000 }, + { 0x00009b20, 0x00000000, 0x00000000, 0x000fb7b5, 0x000fb7b5, 0x00000000 }, + { 0x00009b24, 0x00000000, 0x00000000, 0x000fb7b9, 0x000fb7b9, 0x00000000 }, + { 0x00009b28, 0x00000000, 0x00000000, 0x000fb7c5, 0x000fb7c5, 0x00000000 }, + { 0x00009b2c, 0x00000000, 0x00000000, 0x000fb7c9, 0x000fb7c9, 0x00000000 }, + { 0x00009b30, 0x00000000, 0x00000000, 0x000fb7d1, 0x000fb7d1, 0x00000000 }, + { 0x00009b34, 0x00000000, 0x00000000, 0x000fb7d5, 0x000fb7d5, 0x00000000 }, + { 0x00009b38, 0x00000000, 0x00000000, 0x000fb7d9, 0x000fb7d9, 0x00000000 }, + { 0x00009b3c, 0x00000000, 0x00000000, 0x000fb7c6, 0x000fb7c6, 0x00000000 }, + { 0x00009b40, 0x00000000, 0x00000000, 0x000fb7ca, 0x000fb7ca, 0x00000000 }, + { 0x00009b44, 0x00000000, 0x00000000, 0x000fb7ce, 0x000fb7ce, 0x00000000 }, + { 0x00009b48, 0x00000000, 0x00000000, 0x000fb7d2, 0x000fb7d2, 0x00000000 }, + { 0x00009b4c, 0x00000000, 0x00000000, 0x000fb7d6, 0x000fb7d6, 0x00000000 }, + { 0x00009b50, 0x00000000, 0x00000000, 0x000fb7c3, 0x000fb7c3, 0x00000000 }, + { 0x00009b54, 0x00000000, 0x00000000, 0x000fb7c7, 0x000fb7c7, 0x00000000 }, + { 0x00009b58, 0x00000000, 0x00000000, 0x000fb7cb, 0x000fb7cb, 0x00000000 }, + { 0x00009b5c, 0x00000000, 0x00000000, 0x000fb7cf, 0x000fb7cf, 0x00000000 }, + { 0x00009b60, 0x00000000, 0x00000000, 0x000fb7d7, 0x000fb7d7, 0x00000000 }, + { 0x00009b64, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009b68, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009b6c, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009b70, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009b74, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009b78, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009b7c, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009b80, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009b84, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009b88, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009b8c, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009b90, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009b94, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009b98, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009b9c, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009ba0, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009ba4, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009ba8, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009bac, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009bb0, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009bb4, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009bb8, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009bbc, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009bc0, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009bc4, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009bc8, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009bcc, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009bd0, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009bd4, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009bd8, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009bdc, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009be0, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009be4, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009be8, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009bec, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009bf0, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009bf4, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009bf8, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x00009bfc, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 }, + { 0x0000aa00, 0x00000000, 0x00000000, 0x0006801c, 0x0006801c, 0x00000000 }, + { 0x0000aa04, 0x00000000, 0x00000000, 0x00068080, 0x00068080, 0x00000000 }, + { 0x0000aa08, 0x00000000, 0x00000000, 0x00068084, 0x00068084, 0x00000000 }, + { 0x0000aa0c, 0x00000000, 0x00000000, 0x00068088, 0x00068088, 0x00000000 }, + { 0x0000aa10, 0x00000000, 0x00000000, 0x0006808c, 0x0006808c, 0x00000000 }, + { 0x0000aa14, 0x00000000, 0x00000000, 0x00068100, 0x00068100, 0x00000000 }, + { 0x0000aa18, 0x00000000, 0x00000000, 0x00068104, 0x00068104, 0x00000000 }, + { 0x0000aa1c, 0x00000000, 0x00000000, 0x00068108, 0x00068108, 0x00000000 }, + { 0x0000aa20, 0x00000000, 0x00000000, 0x0006810c, 0x0006810c, 0x00000000 }, + { 0x0000aa24, 0x00000000, 0x00000000, 0x00068110, 0x00068110, 0x00000000 }, + { 0x0000aa28, 0x00000000, 0x00000000, 0x00068110, 0x00068110, 0x00000000 }, + { 0x0000aa2c, 0x00000000, 0x00000000, 0x00068180, 0x00068180, 0x00000000 }, + { 0x0000aa30, 0x00000000, 0x00000000, 0x00068184, 0x00068184, 0x00000000 }, + { 0x0000aa34, 0x00000000, 0x00000000, 0x00068188, 0x00068188, 0x00000000 }, + { 0x0000aa38, 0x00000000, 0x00000000, 0x0006818c, 0x0006818c, 0x00000000 }, + { 0x0000aa3c, 0x00000000, 0x00000000, 0x00068190, 0x00068190, 0x00000000 }, + { 0x0000aa40, 0x00000000, 0x00000000, 0x00068194, 0x00068194, 0x00000000 }, + { 0x0000aa44, 0x00000000, 0x00000000, 0x000681a0, 0x000681a0, 0x00000000 }, + { 0x0000aa48, 0x00000000, 0x00000000, 0x0006820c, 0x0006820c, 0x00000000 }, + { 0x0000aa4c, 0x00000000, 0x00000000, 0x000681a8, 0x000681a8, 0x00000000 }, + { 0x0000aa50, 0x00000000, 0x00000000, 0x000681ac, 0x000681ac, 0x00000000 }, + { 0x0000aa54, 0x00000000, 0x00000000, 0x0006821c, 0x0006821c, 0x00000000 }, + { 0x0000aa58, 0x00000000, 0x00000000, 0x00068224, 0x00068224, 0x00000000 }, + { 0x0000aa5c, 0x00000000, 0x00000000, 0x00068290, 0x00068290, 0x00000000 }, + { 0x0000aa60, 0x00000000, 0x00000000, 0x00068300, 0x00068300, 0x00000000 }, + { 0x0000aa64, 0x00000000, 0x00000000, 0x00068308, 0x00068308, 0x00000000 }, + { 0x0000aa68, 0x00000000, 0x00000000, 0x0006830c, 0x0006830c, 0x00000000 }, + { 0x0000aa6c, 0x00000000, 0x00000000, 0x00068310, 0x00068310, 0x00000000 }, + { 0x0000aa70, 0x00000000, 0x00000000, 0x00068788, 0x00068788, 0x00000000 }, + { 0x0000aa74, 0x00000000, 0x00000000, 0x0006878c, 0x0006878c, 0x00000000 }, + { 0x0000aa78, 0x00000000, 0x00000000, 0x00068790, 0x00068790, 0x00000000 }, + { 0x0000aa7c, 0x00000000, 0x00000000, 0x00068794, 0x00068794, 0x00000000 }, + { 0x0000aa80, 0x00000000, 0x00000000, 0x00068798, 0x00068798, 0x00000000 }, + { 0x0000aa84, 0x00000000, 0x00000000, 0x0006879c, 0x0006879c, 0x00000000 }, + { 0x0000aa88, 0x00000000, 0x00000000, 0x00068b89, 0x00068b89, 0x00000000 }, + { 0x0000aa8c, 0x00000000, 0x00000000, 0x00068b8d, 0x00068b8d, 0x00000000 }, + { 0x0000aa90, 0x00000000, 0x00000000, 0x00068b91, 0x00068b91, 0x00000000 }, + { 0x0000aa94, 0x00000000, 0x00000000, 0x00068b95, 0x00068b95, 0x00000000 }, + { 0x0000aa98, 0x00000000, 0x00000000, 0x00068b99, 0x00068b99, 0x00000000 }, + { 0x0000aa9c, 0x00000000, 0x00000000, 0x00068ba5, 0x00068ba5, 0x00000000 }, + { 0x0000aaa0, 0x00000000, 0x00000000, 0x00068ba9, 0x00068ba9, 0x00000000 }, + { 0x0000aaa4, 0x00000000, 0x00000000, 0x00068bad, 0x00068bad, 0x00000000 }, + { 0x0000aaa8, 0x00000000, 0x00000000, 0x000b8b0c, 0x000b8b0c, 0x00000000 }, + { 0x0000aaac, 0x00000000, 0x00000000, 0x000b8f10, 0x000b8f10, 0x00000000 }, + { 0x0000aab0, 0x00000000, 0x00000000, 0x000b8f14, 0x000b8f14, 0x00000000 }, + { 0x0000aab4, 0x00000000, 0x00000000, 0x000b8f84, 0x000b8f84, 0x00000000 }, + { 0x0000aab8, 0x00000000, 0x00000000, 0x000b8f84, 0x000b8f84, 0x00000000 }, + { 0x0000aabc, 0x00000000, 0x00000000, 0x000b8f88, 0x000b8f88, 0x00000000 }, + { 0x0000aac0, 0x00000000, 0x00000000, 0x000bb380, 0x000bb380, 0x00000000 }, + { 0x0000aac4, 0x00000000, 0x00000000, 0x000bb384, 0x000bb384, 0x00000000 }, + { 0x0000aac8, 0x00000000, 0x00000000, 0x000bb388, 0x000bb388, 0x00000000 }, + { 0x0000aacc, 0x00000000, 0x00000000, 0x000bb38c, 0x000bb38c, 0x00000000 }, + { 0x0000aad0, 0x00000000, 0x00000000, 0x000bb394, 0x000bb394, 0x00000000 }, + { 0x0000aad4, 0x00000000, 0x00000000, 0x000bb798, 0x000bb798, 0x00000000 }, + { 0x0000aad8, 0x00000000, 0x00000000, 0x000f970c, 0x000f970c, 0x00000000 }, + { 0x0000aadc, 0x00000000, 0x00000000, 0x000f9710, 0x000f9710, 0x00000000 }, + { 0x0000aae0, 0x00000000, 0x00000000, 0x000f9714, 0x000f9714, 0x00000000 }, + { 0x0000aae4, 0x00000000, 0x00000000, 0x000f9718, 0x000f9718, 0x00000000 }, + { 0x0000aae8, 0x00000000, 0x00000000, 0x000f9705, 0x000f9705, 0x00000000 }, + { 0x0000aaec, 0x00000000, 0x00000000, 0x000f9709, 0x000f9709, 0x00000000 }, + { 0x0000aaf0, 0x00000000, 0x00000000, 0x000f970d, 0x000f970d, 0x00000000 }, + { 0x0000aaf4, 0x00000000, 0x00000000, 0x000f9711, 0x000f9711, 0x00000000 }, + { 0x0000aaf8, 0x00000000, 0x00000000, 0x000f9715, 0x000f9715, 0x00000000 }, + { 0x0000aafc, 0x00000000, 0x00000000, 0x000f9719, 0x000f9719, 0x00000000 }, + { 0x0000ab00, 0x00000000, 0x00000000, 0x000fb7a4, 0x000fb7a4, 0x00000000 }, + { 0x0000ab04, 0x00000000, 0x00000000, 0x000fb7a8, 0x000fb7a8, 0x00000000 }, + { 0x0000ab08, 0x00000000, 0x00000000, 0x000fb7ac, 0x000fb7ac, 0x00000000 }, + { 0x0000ab0c, 0x00000000, 0x00000000, 0x000fb7ac, 0x000fb7ac, 0x00000000 }, + { 0x0000ab10, 0x00000000, 0x00000000, 0x000fb7b0, 0x000fb7b0, 0x00000000 }, + { 0x0000ab14, 0x00000000, 0x00000000, 0x000fb7b8, 0x000fb7b8, 0x00000000 }, + { 0x0000ab18, 0x00000000, 0x00000000, 0x000fb7bc, 0x000fb7bc, 0x00000000 }, + { 0x0000ab1c, 0x00000000, 0x00000000, 0x000fb7a1, 0x000fb7a1, 0x00000000 }, + { 0x0000ab20, 0x00000000, 0x00000000, 0x000fb7a5, 0x000fb7a5, 0x00000000 }, + { 0x0000ab24, 0x00000000, 0x00000000, 0x000fb7a9, 0x000fb7a9, 0x00000000 }, + { 0x0000ab28, 0x00000000, 0x00000000, 0x000fb7b1, 0x000fb7b1, 0x00000000 }, + { 0x0000ab2c, 0x00000000, 0x00000000, 0x000fb7b5, 0x000fb7b5, 0x00000000 }, + { 0x0000ab30, 0x00000000, 0x00000000, 0x000fb7bd, 0x000fb7bd, 0x00000000 }, + { 0x0000ab34, 0x00000000, 0x00000000, 0x000fb7c9, 0x000fb7c9, 0x00000000 }, + { 0x0000ab38, 0x00000000, 0x00000000, 0x000fb7cd, 0x000fb7cd, 0x00000000 }, + { 0x0000ab3c, 0x00000000, 0x00000000, 0x000fb7d1, 0x000fb7d1, 0x00000000 }, + { 0x0000ab40, 0x00000000, 0x00000000, 0x000fb7d9, 0x000fb7d9, 0x00000000 }, + { 0x0000ab44, 0x00000000, 0x00000000, 0x000fb7c2, 0x000fb7c2, 0x00000000 }, + { 0x0000ab48, 0x00000000, 0x00000000, 0x000fb7c6, 0x000fb7c6, 0x00000000 }, + { 0x0000ab4c, 0x00000000, 0x00000000, 0x000fb7ca, 0x000fb7ca, 0x00000000 }, + { 0x0000ab50, 0x00000000, 0x00000000, 0x000fb7ce, 0x000fb7ce, 0x00000000 }, + { 0x0000ab54, 0x00000000, 0x00000000, 0x000fb7d2, 0x000fb7d2, 0x00000000 }, + { 0x0000ab58, 0x00000000, 0x00000000, 0x000fb7d6, 0x000fb7d6, 0x00000000 }, + { 0x0000ab5c, 0x00000000, 0x00000000, 0x000fb7c3, 0x000fb7c3, 0x00000000 }, + { 0x0000ab60, 0x00000000, 0x00000000, 0x000fb7cb, 0x000fb7cb, 0x00000000 }, + { 0x0000ab64, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000ab68, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000ab6c, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000ab70, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000ab74, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000ab78, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000ab7c, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000ab80, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000ab84, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000ab88, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000ab8c, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000ab90, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000ab94, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000ab98, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000ab9c, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000aba0, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000aba4, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000aba8, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000abac, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000abb0, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000abb4, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000abb8, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000abbc, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000abc0, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000abc4, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000abc8, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000abcc, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000abd0, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000abd4, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000abd8, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000abdc, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000abe0, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000abe4, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000abe8, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000abec, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000abf0, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000abf4, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000abf8, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000abfc, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 }, + { 0x0000a204, 0x00000004, 0x00000004, 0x00000004, 0x00000004, 0x00000004 }, + { 0x0000a20c, 0x00000014, 0x00000014, 0x00000000, 0x00000000, 0x0001f000 }, + { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a }, + { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, + { 0x0000a250, 0x001ff000, 0x001ff000, 0x001ca000, 0x001ca000, 0x001da000 }, + { 0x0000a274, 0x0a81c652, 0x0a81c652, 0x0a820652, 0x0a820652, 0x0a82a652 }, + { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a304, 0x00000000, 0x00000000, 0x00007201, 0x00007201, 0x00000000 }, + { 0x0000a308, 0x00000000, 0x00000000, 0x00010408, 0x00010408, 0x00000000 }, + { 0x0000a30c, 0x00000000, 0x00000000, 0x0001860a, 0x0001860a, 0x00000000 }, + { 0x0000a310, 0x00000000, 0x00000000, 0x00020818, 0x00020818, 0x00000000 }, + { 0x0000a314, 0x00000000, 0x00000000, 0x00024858, 0x00024858, 0x00000000 }, + { 0x0000a318, 0x00000000, 0x00000000, 0x00026859, 0x00026859, 0x00000000 }, + { 0x0000a31c, 0x00000000, 0x00000000, 0x0002985b, 0x0002985b, 0x00000000 }, + { 0x0000a320, 0x00000000, 0x00000000, 0x0002c89a, 0x0002c89a, 0x00000000 }, + { 0x0000a324, 0x00000000, 0x00000000, 0x0002e89b, 0x0002e89b, 0x00000000 }, + { 0x0000a328, 0x00000000, 0x00000000, 0x0003089c, 0x0003089c, 0x00000000 }, + { 0x0000a32c, 0x00000000, 0x00000000, 0x0003289d, 0x0003289d, 0x00000000 }, + { 0x0000a330, 0x00000000, 0x00000000, 0x0003489e, 0x0003489e, 0x00000000 }, + { 0x0000a334, 0x00000000, 0x00000000, 0x000388de, 0x000388de, 0x00000000 }, + { 0x0000a338, 0x00000000, 0x00000000, 0x0003b91e, 0x0003b91e, 0x00000000 }, + { 0x0000a33c, 0x00000000, 0x00000000, 0x0003d95e, 0x0003d95e, 0x00000000 }, + { 0x0000a340, 0x00000000, 0x00000000, 0x000419df, 0x000419df, 0x00000000 }, + { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, + { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e }, +}; + +static const u_int32_t ar9285Common_9285[][2] = { + { 0x0000000c, 0x00000000 }, + { 0x00000030, 0x00020045 }, + { 0x00000034, 0x00000005 }, + { 0x00000040, 0x00000000 }, + { 0x00000044, 0x00000008 }, + { 0x00000048, 0x00000008 }, + { 0x0000004c, 0x00000010 }, + { 0x00000050, 0x00000000 }, + { 0x00000054, 0x0000001f }, + { 0x00000800, 0x00000000 }, + { 0x00000804, 0x00000000 }, + { 0x00000808, 0x00000000 }, + { 0x0000080c, 0x00000000 }, + { 0x00000810, 0x00000000 }, + { 0x00000814, 0x00000000 }, + { 0x00000818, 0x00000000 }, + { 0x0000081c, 0x00000000 }, + { 0x00000820, 0x00000000 }, + { 0x00000824, 0x00000000 }, + { 0x00001040, 0x002ffc0f }, + { 0x00001044, 0x002ffc0f }, + { 0x00001048, 0x002ffc0f }, + { 0x0000104c, 0x002ffc0f }, + { 0x00001050, 0x002ffc0f }, + { 0x00001054, 0x002ffc0f }, + { 0x00001058, 0x002ffc0f }, + { 0x0000105c, 0x002ffc0f }, + { 0x00001060, 0x002ffc0f }, + { 0x00001064, 0x002ffc0f }, + { 0x00001230, 0x00000000 }, + { 0x00001270, 0x00000000 }, + { 0x00001038, 0x00000000 }, + { 0x00001078, 0x00000000 }, + { 0x000010b8, 0x00000000 }, + { 0x000010f8, 0x00000000 }, + { 0x00001138, 0x00000000 }, + { 0x00001178, 0x00000000 }, + { 0x000011b8, 0x00000000 }, + { 0x000011f8, 0x00000000 }, + { 0x00001238, 0x00000000 }, + { 0x00001278, 0x00000000 }, + { 0x000012b8, 0x00000000 }, + { 0x000012f8, 0x00000000 }, + { 0x00001338, 0x00000000 }, + { 0x00001378, 0x00000000 }, + { 0x000013b8, 0x00000000 }, + { 0x000013f8, 0x00000000 }, + { 0x00001438, 0x00000000 }, + { 0x00001478, 0x00000000 }, + { 0x000014b8, 0x00000000 }, + { 0x000014f8, 0x00000000 }, + { 0x00001538, 0x00000000 }, + { 0x00001578, 0x00000000 }, + { 0x000015b8, 0x00000000 }, + { 0x000015f8, 0x00000000 }, + { 0x00001638, 0x00000000 }, + { 0x00001678, 0x00000000 }, + { 0x000016b8, 0x00000000 }, + { 0x000016f8, 0x00000000 }, + { 0x00001738, 0x00000000 }, + { 0x00001778, 0x00000000 }, + { 0x000017b8, 0x00000000 }, + { 0x000017f8, 0x00000000 }, + { 0x0000103c, 0x00000000 }, + { 0x0000107c, 0x00000000 }, + { 0x000010bc, 0x00000000 }, + { 0x000010fc, 0x00000000 }, + { 0x0000113c, 0x00000000 }, + { 0x0000117c, 0x00000000 }, + { 0x000011bc, 0x00000000 }, + { 0x000011fc, 0x00000000 }, + { 0x0000123c, 0x00000000 }, + { 0x0000127c, 0x00000000 }, + { 0x000012bc, 0x00000000 }, + { 0x000012fc, 0x00000000 }, + { 0x0000133c, 0x00000000 }, + { 0x0000137c, 0x00000000 }, + { 0x000013bc, 0x00000000 }, + { 0x000013fc, 0x00000000 }, + { 0x0000143c, 0x00000000 }, + { 0x0000147c, 0x00000000 }, + { 0x00004030, 0x00000002 }, + { 0x0000403c, 0x00000002 }, + { 0x00004024, 0x0000001f }, + { 0x00004060, 0x00000000 }, + { 0x00004064, 0x00000000 }, + { 0x00007010, 0x00000031 }, + { 0x00007034, 0x00000002 }, + { 0x00007038, 0x000004c2 }, + { 0x00008004, 0x00000000 }, + { 0x00008008, 0x00000000 }, + { 0x0000800c, 0x00000000 }, + { 0x00008018, 0x00000700 }, + { 0x00008020, 0x00000000 }, + { 0x00008038, 0x00000000 }, + { 0x0000803c, 0x00000000 }, + { 0x00008048, 0x00000000 }, + { 0x00008054, 0x00000000 }, + { 0x00008058, 0x00000000 }, + { 0x0000805c, 0x000fc78f }, + { 0x00008060, 0x0000000f }, + { 0x00008064, 0x00000000 }, + { 0x00008070, 0x00000000 }, + { 0x000080c0, 0x2a80001a }, + { 0x000080c4, 0x05dc01e0 }, + { 0x000080c8, 0x1f402710 }, + { 0x000080cc, 0x01f40000 }, + { 0x000080d0, 0x00001e00 }, + { 0x000080d4, 0x00000000 }, + { 0x000080d8, 0x00400000 }, + { 0x000080e0, 0xffffffff }, + { 0x000080e4, 0x0000ffff }, + { 0x000080e8, 0x003f3f3f }, + { 0x000080ec, 0x00000000 }, + { 0x000080f0, 0x00000000 }, + { 0x000080f4, 0x00000000 }, + { 0x000080f8, 0x00000000 }, + { 0x000080fc, 0x00020000 }, + { 0x00008100, 0x00020000 }, + { 0x00008104, 0x00000001 }, + { 0x00008108, 0x00000052 }, + { 0x0000810c, 0x00000000 }, + { 0x00008110, 0x00000168 }, + { 0x00008118, 0x000100aa }, + { 0x0000811c, 0x00003210 }, + { 0x00008120, 0x08f04800 }, + { 0x00008124, 0x00000000 }, + { 0x00008128, 0x00000000 }, + { 0x0000812c, 0x00000000 }, + { 0x00008130, 0x00000000 }, + { 0x00008134, 0x00000000 }, + { 0x00008138, 0x00000000 }, + { 0x0000813c, 0x00000000 }, + { 0x00008144, 0x00000000 }, + { 0x00008168, 0x00000000 }, + { 0x0000816c, 0x00000000 }, + { 0x00008170, 0x32143320 }, + { 0x00008174, 0xfaa4fa50 }, + { 0x00008178, 0x00000100 }, + { 0x0000817c, 0x00000000 }, + { 0x000081c0, 0x00000000 }, + { 0x000081d0, 0x00003210 }, + { 0x000081ec, 0x00000000 }, + { 0x000081f0, 0x00000000 }, + { 0x000081f4, 0x00000000 }, + { 0x000081f8, 0x00000000 }, + { 0x000081fc, 0x00000000 }, + { 0x00008200, 0x00000000 }, + { 0x00008204, 0x00000000 }, + { 0x00008208, 0x00000000 }, + { 0x0000820c, 0x00000000 }, + { 0x00008210, 0x00000000 }, + { 0x00008214, 0x00000000 }, + { 0x00008218, 0x00000000 }, + { 0x0000821c, 0x00000000 }, + { 0x00008220, 0x00000000 }, + { 0x00008224, 0x00000000 }, + { 0x00008228, 0x00000000 }, + { 0x0000822c, 0x00000000 }, + { 0x00008230, 0x00000000 }, + { 0x00008234, 0x00000000 }, + { 0x00008238, 0x00000000 }, + { 0x0000823c, 0x00000000 }, + { 0x00008240, 0x00100000 }, + { 0x00008244, 0x0010f400 }, + { 0x00008248, 0x00000100 }, + { 0x0000824c, 0x0001e800 }, + { 0x00008250, 0x00000000 }, + { 0x00008254, 0x00000000 }, + { 0x00008258, 0x00000000 }, + { 0x0000825c, 0x400000ff }, + { 0x00008260, 0x00080922 }, + { 0x00008264, 0xa8a00010 }, + { 0x00008270, 0x00000000 }, + { 0x00008274, 0x40000000 }, + { 0x00008278, 0x003e4180 }, + { 0x0000827c, 0x00000000 }, + { 0x00008284, 0x0000002c }, + { 0x00008288, 0x0000002c }, + { 0x0000828c, 0x00000000 }, + { 0x00008294, 0x00000000 }, + { 0x00008298, 0x00000000 }, + { 0x0000829c, 0x00000000 }, + { 0x00008300, 0x00000040 }, + { 0x00008314, 0x00000000 }, + { 0x00008328, 0x00000000 }, + { 0x0000832c, 0x00000001 }, + { 0x00008330, 0x00000302 }, + { 0x00008334, 0x00000e00 }, + { 0x00008338, 0x00000000 }, + { 0x0000833c, 0x00000000 }, + { 0x00008340, 0x00010380 }, + { 0x00008344, 0x00481043 }, + { 0x00009808, 0x00000000 }, + { 0x0000980c, 0xafe68e30 }, + { 0x00009810, 0xfd14e000 }, + { 0x00009814, 0x9c0a9f6b }, + { 0x0000981c, 0x00000000 }, + { 0x0000982c, 0x0000a000 }, + { 0x00009830, 0x00000000 }, + { 0x0000983c, 0x00200400 }, + { 0x0000984c, 0x0040233c }, + { 0x00009854, 0x00000044 }, + { 0x00009900, 0x00000000 }, + { 0x00009904, 0x00000000 }, + { 0x00009908, 0x00000000 }, + { 0x0000990c, 0x00000000 }, + { 0x00009910, 0x01002310 }, + { 0x0000991c, 0x10000fff }, + { 0x00009920, 0x04900000 }, + { 0x00009928, 0x00000001 }, + { 0x0000992c, 0x00000004 }, + { 0x00009934, 0x1e1f2022 }, + { 0x00009938, 0x0a0b0c0d }, + { 0x0000993c, 0x00000000 }, + { 0x00009940, 0x14750604 }, + { 0x00009948, 0x9280c00a }, + { 0x0000994c, 0x00020028 }, + { 0x00009954, 0x5f3ca3de }, + { 0x00009958, 0x2108ecff }, + { 0x00009968, 0x000003ce }, + { 0x00009970, 0x1927b515 }, + { 0x00009974, 0x00000000 }, + { 0x00009978, 0x00000001 }, + { 0x0000997c, 0x00000000 }, + { 0x00009980, 0x00000000 }, + { 0x00009984, 0x00000000 }, + { 0x00009988, 0x00000000 }, + { 0x0000998c, 0x00000000 }, + { 0x00009990, 0x00000000 }, + { 0x00009994, 0x00000000 }, + { 0x00009998, 0x00000000 }, + { 0x0000999c, 0x00000000 }, + { 0x000099a0, 0x00000000 }, + { 0x000099a4, 0x00000001 }, + { 0x000099a8, 0x201fff00 }, + { 0x000099ac, 0x2def0a00 }, + { 0x000099b0, 0x03051000 }, + { 0x000099b4, 0x00000820 }, + { 0x000099dc, 0x00000000 }, + { 0x000099e0, 0x00000000 }, + { 0x000099e4, 0xaaaaaaaa }, + { 0x000099e8, 0x3c466478 }, + { 0x000099ec, 0x0cc80caa }, + { 0x000099f0, 0x00000000 }, + { 0x0000a208, 0x803e6788 }, + { 0x0000a210, 0x4080a333 }, + { 0x0000a214, 0x00206c10 }, + { 0x0000a218, 0x009c4060 }, + { 0x0000a220, 0x01834061 }, + { 0x0000a224, 0x00000400 }, + { 0x0000a228, 0x000003b5 }, + { 0x0000a22c, 0x00000000 }, + { 0x0000a234, 0x20202020 }, + { 0x0000a238, 0x20202020 }, + { 0x0000a244, 0x00000000 }, + { 0x0000a248, 0xfffffffc }, + { 0x0000a24c, 0x00000000 }, + { 0x0000a254, 0x00000000 }, + { 0x0000a258, 0x0ccb5380 }, + { 0x0000a25c, 0x15151501 }, + { 0x0000a260, 0xdfa90f01 }, + { 0x0000a268, 0x00000000 }, + { 0x0000a26c, 0x0ebae9e6 }, + { 0x0000d270, 0x0d820820 }, + { 0x0000a278, 0x39ce739c }, + { 0x0000a27c, 0x050e039c }, + { 0x0000d35c, 0x07ffffef }, + { 0x0000d360, 0x0fffffe7 }, + { 0x0000d364, 0x17ffffe5 }, + { 0x0000d368, 0x1fffffe4 }, + { 0x0000d36c, 0x37ffffe3 }, + { 0x0000d370, 0x3fffffe3 }, + { 0x0000d374, 0x57ffffe3 }, + { 0x0000d378, 0x5fffffe2 }, + { 0x0000d37c, 0x7fffffe2 }, + { 0x0000d380, 0x7f3c7bba }, + { 0x0000d384, 0xf3307ff0 }, + { 0x0000a388, 0x0c000000 }, + { 0x0000a38c, 0x20202020 }, + { 0x0000a390, 0x20202020 }, + { 0x0000a394, 0x39ce739c }, + { 0x0000a398, 0x0000039c }, + { 0x0000a39c, 0x00000001 }, + { 0x0000a3a0, 0x00000000 }, + { 0x0000a3a4, 0x00000000 }, + { 0x0000a3a8, 0x00000000 }, + { 0x0000a3ac, 0x00000000 }, + { 0x0000a3b0, 0x00000000 }, + { 0x0000a3b4, 0x00000000 }, + { 0x0000a3b8, 0x00000000 }, + { 0x0000a3bc, 0x00000000 }, + { 0x0000a3c0, 0x00000000 }, + { 0x0000a3c4, 0x00000000 }, + { 0x0000a3cc, 0x20202020 }, + { 0x0000a3d0, 0x20202020 }, + { 0x0000a3d4, 0x20202020 }, + { 0x0000a3dc, 0x39ce739c }, + { 0x0000a3e0, 0x0000039c }, + { 0x0000a3e4, 0x00000000 }, + { 0x0000a3e8, 0x18c43433 }, + { 0x0000a3ec, 0x00f70081 }, + { 0x00007800, 0x00140000 }, + { 0x00007804, 0x0e4548d8 }, + { 0x00007808, 0x54214514 }, + { 0x0000780c, 0x02025820 }, + { 0x00007810, 0x71c0d388 }, + { 0x00007814, 0x924934a8 }, + { 0x0000781c, 0x00000000 }, + { 0x00007820, 0x00000c04 }, + { 0x00007824, 0x00d86fff }, + { 0x00007828, 0x26d2491b }, + { 0x0000782c, 0x6e36d97b }, + { 0x00007830, 0xedb6d96c }, + { 0x00007834, 0x71400086 }, + { 0x00007838, 0xfac68800 }, + { 0x0000783c, 0x0001fffe }, + { 0x00007840, 0xffeb1a20 }, + { 0x00007844, 0x000c0db6 }, + { 0x00007848, 0x6db61b6f }, + { 0x0000784c, 0x6d9b66db }, + { 0x00007850, 0x6d8c6dba }, + { 0x00007854, 0x00040000 }, + { 0x00007858, 0xdb003012 }, + { 0x0000785c, 0x04924914 }, + { 0x00007860, 0x21084210 }, + { 0x00007864, 0xf7d7ffde }, + { 0x00007868, 0xc2034080 }, + { 0x0000786c, 0x48609eb4 }, + { 0x00007870, 0x10142c00 }, +}; + +static const u_int32_t ar9285PciePhy_clkreq_always_on_L1_9285[][2] = { + {0x00004040, 0x9248fd00 }, + {0x00004040, 0x24924924 }, + {0x00004040, 0xa8000019 }, + {0x00004040, 0x13160820 }, + {0x00004040, 0xe5980560 }, + {0x00004040, 0xc01dcffd }, + {0x00004040, 0x1aaabe41 }, + {0x00004040, 0xbe105554 }, + {0x00004040, 0x00043007 }, + {0x00004044, 0x00000000 }, +}; + +static const u_int32_t ar9285PciePhy_clkreq_off_L1_9285[][2] = { + {0x00004040, 0x9248fd00 }, + {0x00004040, 0x24924924 }, + {0x00004040, 0xa8000019 }, + {0x00004040, 0x13160820 }, + {0x00004040, 0xe5980560 }, + {0x00004040, 0xc01dcffc }, + {0x00004040, 0x1aaabe41 }, + {0x00004040, 0xbe105554 }, + {0x00004040, 0x00043007 }, + {0x00004044, 0x00000000 }, +}; + +/* AR9285 v1_2 PCI Register Writes. Created: 04/13/09 */ +static const u_int32_t ar9285Modes_9285_1_2[][6] = { + /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ + { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, + { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, + { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 }, + { 0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008 }, + { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 }, + { 0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f }, + { 0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880 }, + { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 }, + { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 }, + { 0x00009824, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e }, + { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 }, + { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, + { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 }, + { 0x00009840, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e }, + { 0x00009844, 0x0372161e, 0x0372161e, 0x03721620, 0x03721620, 0x037216a0 }, + { 0x00009848, 0x00001066, 0x00001066, 0x00001053, 0x00001053, 0x00001059 }, + { 0x0000a848, 0x00001066, 0x00001066, 0x00001053, 0x00001053, 0x00001059 }, + { 0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2 }, + { 0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e }, + { 0x0000985c, 0x3139605e, 0x3139605e, 0x3137605e, 0x3137605e, 0x3139605e }, + { 0x00009860, 0x00058d18, 0x00058d18, 0x00058d20, 0x00058d20, 0x00058d18 }, + { 0x00009864, 0x0000fe00, 0x0000fe00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, + { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 }, + { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 }, + { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 }, + { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 }, + { 0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d, 0xd00a800d }, + { 0x00009944, 0xffbc1010, 0xffbc1010, 0xffbc1020, 0xffbc1020, 0xffbc1010 }, + { 0x00009960, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x00009964, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x000099b8, 0x0000421c, 0x0000421c, 0x0000421c, 0x0000421c, 0x0000421c }, + { 0x000099bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00, 0x00000c00 }, + { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 }, + { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 }, + { 0x000099c8, 0x6af6532f, 0x6af6532f, 0x6af6532f, 0x6af6532f, 0x6af6532f }, + { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 }, + { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 }, + { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x00009a00, 0x00000000, 0x00000000, 0x00058084, 0x00058084, 0x00000000 }, + { 0x00009a04, 0x00000000, 0x00000000, 0x00058088, 0x00058088, 0x00000000 }, + { 0x00009a08, 0x00000000, 0x00000000, 0x0005808c, 0x0005808c, 0x00000000 }, + { 0x00009a0c, 0x00000000, 0x00000000, 0x00058100, 0x00058100, 0x00000000 }, + { 0x00009a10, 0x00000000, 0x00000000, 0x00058104, 0x00058104, 0x00000000 }, + { 0x00009a14, 0x00000000, 0x00000000, 0x00058108, 0x00058108, 0x00000000 }, + { 0x00009a18, 0x00000000, 0x00000000, 0x0005810c, 0x0005810c, 0x00000000 }, + { 0x00009a1c, 0x00000000, 0x00000000, 0x00058110, 0x00058110, 0x00000000 }, + { 0x00009a20, 0x00000000, 0x00000000, 0x00058114, 0x00058114, 0x00000000 }, + { 0x00009a24, 0x00000000, 0x00000000, 0x00058180, 0x00058180, 0x00000000 }, + { 0x00009a28, 0x00000000, 0x00000000, 0x00058184, 0x00058184, 0x00000000 }, + { 0x00009a2c, 0x00000000, 0x00000000, 0x00058188, 0x00058188, 0x00000000 }, + { 0x00009a30, 0x00000000, 0x00000000, 0x0005818c, 0x0005818c, 0x00000000 }, + { 0x00009a34, 0x00000000, 0x00000000, 0x00058190, 0x00058190, 0x00000000 }, + { 0x00009a38, 0x00000000, 0x00000000, 0x00058194, 0x00058194, 0x00000000 }, + { 0x00009a3c, 0x00000000, 0x00000000, 0x000581a0, 0x000581a0, 0x00000000 }, + { 0x00009a40, 0x00000000, 0x00000000, 0x0005820c, 0x0005820c, 0x00000000 }, + { 0x00009a44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 }, + { 0x00009a48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 }, + { 0x00009a4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 }, + { 0x00009a50, 0x00000000, 0x00000000, 0x00058220, 0x00058220, 0x00000000 }, + { 0x00009a54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 }, + { 0x00009a58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 }, + { 0x00009a5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 }, + { 0x00009a60, 0x00000000, 0x00000000, 0x00058308, 0x00058308, 0x00000000 }, + { 0x00009a64, 0x00000000, 0x00000000, 0x0005830c, 0x0005830c, 0x00000000 }, + { 0x00009a68, 0x00000000, 0x00000000, 0x00058380, 0x00058380, 0x00000000 }, + { 0x00009a6c, 0x00000000, 0x00000000, 0x00058384, 0x00058384, 0x00000000 }, + { 0x00009a70, 0x00000000, 0x00000000, 0x00068700, 0x00068700, 0x00000000 }, + { 0x00009a74, 0x00000000, 0x00000000, 0x00068704, 0x00068704, 0x00000000 }, + { 0x00009a78, 0x00000000, 0x00000000, 0x00068708, 0x00068708, 0x00000000 }, + { 0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 }, + { 0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 }, + { 0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 }, + { 0x00009a88, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 }, + { 0x00009a8c, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 }, + { 0x00009a90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 }, + { 0x00009a94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 }, + { 0x00009a98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 }, + { 0x00009a9c, 0x00000000, 0x00000000, 0x00078b84, 0x00078b84, 0x00000000 }, + { 0x00009aa0, 0x00000000, 0x00000000, 0x00078b88, 0x00078b88, 0x00000000 }, + { 0x00009aa4, 0x00000000, 0x00000000, 0x00078b8c, 0x00078b8c, 0x00000000 }, + { 0x00009aa8, 0x00000000, 0x00000000, 0x00078b90, 0x00078b90, 0x00000000 }, + { 0x00009aac, 0x00000000, 0x00000000, 0x000caf80, 0x000caf80, 0x00000000 }, + { 0x00009ab0, 0x00000000, 0x00000000, 0x000caf84, 0x000caf84, 0x00000000 }, + { 0x00009ab4, 0x00000000, 0x00000000, 0x000caf88, 0x000caf88, 0x00000000 }, + { 0x00009ab8, 0x00000000, 0x00000000, 0x000caf8c, 0x000caf8c, 0x00000000 }, + { 0x00009abc, 0x00000000, 0x00000000, 0x000caf90, 0x000caf90, 0x00000000 }, + { 0x00009ac0, 0x00000000, 0x00000000, 0x000db30c, 0x000db30c, 0x00000000 }, + { 0x00009ac4, 0x00000000, 0x00000000, 0x000db310, 0x000db310, 0x00000000 }, + { 0x00009ac8, 0x00000000, 0x00000000, 0x000db384, 0x000db384, 0x00000000 }, + { 0x00009acc, 0x00000000, 0x00000000, 0x000db388, 0x000db388, 0x00000000 }, + { 0x00009ad0, 0x00000000, 0x00000000, 0x000db324, 0x000db324, 0x00000000 }, + { 0x00009ad4, 0x00000000, 0x00000000, 0x000eb704, 0x000eb704, 0x00000000 }, + { 0x00009ad8, 0x00000000, 0x00000000, 0x000eb6a4, 0x000eb6a4, 0x00000000 }, + { 0x00009adc, 0x00000000, 0x00000000, 0x000eb6a8, 0x000eb6a8, 0x00000000 }, + { 0x00009ae0, 0x00000000, 0x00000000, 0x000eb710, 0x000eb710, 0x00000000 }, + { 0x00009ae4, 0x00000000, 0x00000000, 0x000eb714, 0x000eb714, 0x00000000 }, + { 0x00009ae8, 0x00000000, 0x00000000, 0x000eb720, 0x000eb720, 0x00000000 }, + { 0x00009aec, 0x00000000, 0x00000000, 0x000eb724, 0x000eb724, 0x00000000 }, + { 0x00009af0, 0x00000000, 0x00000000, 0x000eb728, 0x000eb728, 0x00000000 }, + { 0x00009af4, 0x00000000, 0x00000000, 0x000eb72c, 0x000eb72c, 0x00000000 }, + { 0x00009af8, 0x00000000, 0x00000000, 0x000eb7a0, 0x000eb7a0, 0x00000000 }, + { 0x00009afc, 0x00000000, 0x00000000, 0x000eb7a4, 0x000eb7a4, 0x00000000 }, + { 0x00009b00, 0x00000000, 0x00000000, 0x000eb7a8, 0x000eb7a8, 0x00000000 }, + { 0x00009b04, 0x00000000, 0x00000000, 0x000eb7b0, 0x000eb7b0, 0x00000000 }, + { 0x00009b08, 0x00000000, 0x00000000, 0x000eb7b4, 0x000eb7b4, 0x00000000 }, + { 0x00009b0c, 0x00000000, 0x00000000, 0x000eb7b8, 0x000eb7b8, 0x00000000 }, + { 0x00009b10, 0x00000000, 0x00000000, 0x000eb7a5, 0x000eb7a5, 0x00000000 }, + { 0x00009b14, 0x00000000, 0x00000000, 0x000eb7a9, 0x000eb7a9, 0x00000000 }, + { 0x00009b18, 0x00000000, 0x00000000, 0x000eb7ad, 0x000eb7ad, 0x00000000 }, + { 0x00009b1c, 0x00000000, 0x00000000, 0x000eb7b1, 0x000eb7b1, 0x00000000 }, + { 0x00009b20, 0x00000000, 0x00000000, 0x000eb7b5, 0x000eb7b5, 0x00000000 }, + { 0x00009b24, 0x00000000, 0x00000000, 0x000eb7b9, 0x000eb7b9, 0x00000000 }, + { 0x00009b28, 0x00000000, 0x00000000, 0x000eb7c5, 0x000eb7c5, 0x00000000 }, + { 0x00009b2c, 0x00000000, 0x00000000, 0x000eb7c9, 0x000eb7c9, 0x00000000 }, + { 0x00009b30, 0x00000000, 0x00000000, 0x000eb7d1, 0x000eb7d1, 0x00000000 }, + { 0x00009b34, 0x00000000, 0x00000000, 0x000eb7d5, 0x000eb7d5, 0x00000000 }, + { 0x00009b38, 0x00000000, 0x00000000, 0x000eb7d9, 0x000eb7d9, 0x00000000 }, + { 0x00009b3c, 0x00000000, 0x00000000, 0x000eb7c6, 0x000eb7c6, 0x00000000 }, + { 0x00009b40, 0x00000000, 0x00000000, 0x000eb7ca, 0x000eb7ca, 0x00000000 }, + { 0x00009b44, 0x00000000, 0x00000000, 0x000eb7ce, 0x000eb7ce, 0x00000000 }, + { 0x00009b48, 0x00000000, 0x00000000, 0x000eb7d2, 0x000eb7d2, 0x00000000 }, + { 0x00009b4c, 0x00000000, 0x00000000, 0x000eb7d6, 0x000eb7d6, 0x00000000 }, + { 0x00009b50, 0x00000000, 0x00000000, 0x000eb7c3, 0x000eb7c3, 0x00000000 }, + { 0x00009b54, 0x00000000, 0x00000000, 0x000eb7c7, 0x000eb7c7, 0x00000000 }, + { 0x00009b58, 0x00000000, 0x00000000, 0x000eb7cb, 0x000eb7cb, 0x00000000 }, + { 0x00009b5c, 0x00000000, 0x00000000, 0x000eb7cf, 0x000eb7cf, 0x00000000 }, + { 0x00009b60, 0x00000000, 0x00000000, 0x000eb7d7, 0x000eb7d7, 0x00000000 }, + { 0x00009b64, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009b68, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009b6c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009b70, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009b74, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009b78, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009b7c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009b80, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009b84, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009b88, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009b8c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009b90, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009b94, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009b98, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009b9c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009ba0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009ba4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009ba8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009bac, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009bb0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009bb4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009bb8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009bbc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009bc0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009bc4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009bc8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009bcc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009bd0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009bd4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009bd8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009bdc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009be0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009be4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009be8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009bec, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009bf0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009bf4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009bf8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009bfc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000aa00, 0x00000000, 0x00000000, 0x00058084, 0x00058084, 0x00000000 }, + { 0x0000aa04, 0x00000000, 0x00000000, 0x00058088, 0x00058088, 0x00000000 }, + { 0x0000aa08, 0x00000000, 0x00000000, 0x0005808c, 0x0005808c, 0x00000000 }, + { 0x0000aa0c, 0x00000000, 0x00000000, 0x00058100, 0x00058100, 0x00000000 }, + { 0x0000aa10, 0x00000000, 0x00000000, 0x00058104, 0x00058104, 0x00000000 }, + { 0x0000aa14, 0x00000000, 0x00000000, 0x00058108, 0x00058108, 0x00000000 }, + { 0x0000aa18, 0x00000000, 0x00000000, 0x0005810c, 0x0005810c, 0x00000000 }, + { 0x0000aa1c, 0x00000000, 0x00000000, 0x00058110, 0x00058110, 0x00000000 }, + { 0x0000aa20, 0x00000000, 0x00000000, 0x00058114, 0x00058114, 0x00000000 }, + { 0x0000aa24, 0x00000000, 0x00000000, 0x00058180, 0x00058180, 0x00000000 }, + { 0x0000aa28, 0x00000000, 0x00000000, 0x00058184, 0x00058184, 0x00000000 }, + { 0x0000aa2c, 0x00000000, 0x00000000, 0x00058188, 0x00058188, 0x00000000 }, + { 0x0000aa30, 0x00000000, 0x00000000, 0x0005818c, 0x0005818c, 0x00000000 }, + { 0x0000aa34, 0x00000000, 0x00000000, 0x00058190, 0x00058190, 0x00000000 }, + { 0x0000aa38, 0x00000000, 0x00000000, 0x00058194, 0x00058194, 0x00000000 }, + { 0x0000aa3c, 0x00000000, 0x00000000, 0x000581a0, 0x000581a0, 0x00000000 }, + { 0x0000aa40, 0x00000000, 0x00000000, 0x0005820c, 0x0005820c, 0x00000000 }, + { 0x0000aa44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 }, + { 0x0000aa48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 }, + { 0x0000aa4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 }, + { 0x0000aa50, 0x00000000, 0x00000000, 0x00058220, 0x00058220, 0x00000000 }, + { 0x0000aa54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 }, + { 0x0000aa58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 }, + { 0x0000aa5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 }, + { 0x0000aa60, 0x00000000, 0x00000000, 0x00058308, 0x00058308, 0x00000000 }, + { 0x0000aa64, 0x00000000, 0x00000000, 0x0005830c, 0x0005830c, 0x00000000 }, + { 0x0000aa68, 0x00000000, 0x00000000, 0x00058380, 0x00058380, 0x00000000 }, + { 0x0000aa6c, 0x00000000, 0x00000000, 0x00058384, 0x00058384, 0x00000000 }, + { 0x0000aa70, 0x00000000, 0x00000000, 0x00068700, 0x00068700, 0x00000000 }, + { 0x0000aa74, 0x00000000, 0x00000000, 0x00068704, 0x00068704, 0x00000000 }, + { 0x0000aa78, 0x00000000, 0x00000000, 0x00068708, 0x00068708, 0x00000000 }, + { 0x0000aa7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 }, + { 0x0000aa80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 }, + { 0x0000aa84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 }, + { 0x0000aa88, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 }, + { 0x0000aa8c, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 }, + { 0x0000aa90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 }, + { 0x0000aa94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 }, + { 0x0000aa98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 }, + { 0x0000aa9c, 0x00000000, 0x00000000, 0x00078b84, 0x00078b84, 0x00000000 }, + { 0x0000aaa0, 0x00000000, 0x00000000, 0x00078b88, 0x00078b88, 0x00000000 }, + { 0x0000aaa4, 0x00000000, 0x00000000, 0x00078b8c, 0x00078b8c, 0x00000000 }, + { 0x0000aaa8, 0x00000000, 0x00000000, 0x00078b90, 0x00078b90, 0x00000000 }, + { 0x0000aaac, 0x00000000, 0x00000000, 0x000caf80, 0x000caf80, 0x00000000 }, + { 0x0000aab0, 0x00000000, 0x00000000, 0x000caf84, 0x000caf84, 0x00000000 }, + { 0x0000aab4, 0x00000000, 0x00000000, 0x000caf88, 0x000caf88, 0x00000000 }, + { 0x0000aab8, 0x00000000, 0x00000000, 0x000caf8c, 0x000caf8c, 0x00000000 }, + { 0x0000aabc, 0x00000000, 0x00000000, 0x000caf90, 0x000caf90, 0x00000000 }, + { 0x0000aac0, 0x00000000, 0x00000000, 0x000db30c, 0x000db30c, 0x00000000 }, + { 0x0000aac4, 0x00000000, 0x00000000, 0x000db310, 0x000db310, 0x00000000 }, + { 0x0000aac8, 0x00000000, 0x00000000, 0x000db384, 0x000db384, 0x00000000 }, + { 0x0000aacc, 0x00000000, 0x00000000, 0x000db388, 0x000db388, 0x00000000 }, + { 0x0000aad0, 0x00000000, 0x00000000, 0x000db324, 0x000db324, 0x00000000 }, + { 0x0000aad4, 0x00000000, 0x00000000, 0x000eb704, 0x000eb704, 0x00000000 }, + { 0x0000aad8, 0x00000000, 0x00000000, 0x000eb6a4, 0x000eb6a4, 0x00000000 }, + { 0x0000aadc, 0x00000000, 0x00000000, 0x000eb6a8, 0x000eb6a8, 0x00000000 }, + { 0x0000aae0, 0x00000000, 0x00000000, 0x000eb710, 0x000eb710, 0x00000000 }, + { 0x0000aae4, 0x00000000, 0x00000000, 0x000eb714, 0x000eb714, 0x00000000 }, + { 0x0000aae8, 0x00000000, 0x00000000, 0x000eb720, 0x000eb720, 0x00000000 }, + { 0x0000aaec, 0x00000000, 0x00000000, 0x000eb724, 0x000eb724, 0x00000000 }, + { 0x0000aaf0, 0x00000000, 0x00000000, 0x000eb728, 0x000eb728, 0x00000000 }, + { 0x0000aaf4, 0x00000000, 0x00000000, 0x000eb72c, 0x000eb72c, 0x00000000 }, + { 0x0000aaf8, 0x00000000, 0x00000000, 0x000eb7a0, 0x000eb7a0, 0x00000000 }, + { 0x0000aafc, 0x00000000, 0x00000000, 0x000eb7a4, 0x000eb7a4, 0x00000000 }, + { 0x0000ab00, 0x00000000, 0x00000000, 0x000eb7a8, 0x000eb7a8, 0x00000000 }, + { 0x0000ab04, 0x00000000, 0x00000000, 0x000eb7b0, 0x000eb7b0, 0x00000000 }, + { 0x0000ab08, 0x00000000, 0x00000000, 0x000eb7b4, 0x000eb7b4, 0x00000000 }, + { 0x0000ab0c, 0x00000000, 0x00000000, 0x000eb7b8, 0x000eb7b8, 0x00000000 }, + { 0x0000ab10, 0x00000000, 0x00000000, 0x000eb7a5, 0x000eb7a5, 0x00000000 }, + { 0x0000ab14, 0x00000000, 0x00000000, 0x000eb7a9, 0x000eb7a9, 0x00000000 }, + { 0x0000ab18, 0x00000000, 0x00000000, 0x000eb7ad, 0x000eb7ad, 0x00000000 }, + { 0x0000ab1c, 0x00000000, 0x00000000, 0x000eb7b1, 0x000eb7b1, 0x00000000 }, + { 0x0000ab20, 0x00000000, 0x00000000, 0x000eb7b5, 0x000eb7b5, 0x00000000 }, + { 0x0000ab24, 0x00000000, 0x00000000, 0x000eb7b9, 0x000eb7b9, 0x00000000 }, + { 0x0000ab28, 0x00000000, 0x00000000, 0x000eb7c5, 0x000eb7c5, 0x00000000 }, + { 0x0000ab2c, 0x00000000, 0x00000000, 0x000eb7c9, 0x000eb7c9, 0x00000000 }, + { 0x0000ab30, 0x00000000, 0x00000000, 0x000eb7d1, 0x000eb7d1, 0x00000000 }, + { 0x0000ab34, 0x00000000, 0x00000000, 0x000eb7d5, 0x000eb7d5, 0x00000000 }, + { 0x0000ab38, 0x00000000, 0x00000000, 0x000eb7d9, 0x000eb7d9, 0x00000000 }, + { 0x0000ab3c, 0x00000000, 0x00000000, 0x000eb7c6, 0x000eb7c6, 0x00000000 }, + { 0x0000ab40, 0x00000000, 0x00000000, 0x000eb7ca, 0x000eb7ca, 0x00000000 }, + { 0x0000ab44, 0x00000000, 0x00000000, 0x000eb7ce, 0x000eb7ce, 0x00000000 }, + { 0x0000ab48, 0x00000000, 0x00000000, 0x000eb7d2, 0x000eb7d2, 0x00000000 }, + { 0x0000ab4c, 0x00000000, 0x00000000, 0x000eb7d6, 0x000eb7d6, 0x00000000 }, + { 0x0000ab50, 0x00000000, 0x00000000, 0x000eb7c3, 0x000eb7c3, 0x00000000 }, + { 0x0000ab54, 0x00000000, 0x00000000, 0x000eb7c7, 0x000eb7c7, 0x00000000 }, + { 0x0000ab58, 0x00000000, 0x00000000, 0x000eb7cb, 0x000eb7cb, 0x00000000 }, + { 0x0000ab5c, 0x00000000, 0x00000000, 0x000eb7cf, 0x000eb7cf, 0x00000000 }, + { 0x0000ab60, 0x00000000, 0x00000000, 0x000eb7d7, 0x000eb7d7, 0x00000000 }, + { 0x0000ab64, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000ab68, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000ab6c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000ab70, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000ab74, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000ab78, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000ab7c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000ab80, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000ab84, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000ab88, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000ab8c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000ab90, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000ab94, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000ab98, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000ab9c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000aba0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000aba4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000aba8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abac, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abb0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abb4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abb8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abbc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abc0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abc4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abc8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abcc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abd0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abd4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abd8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abdc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abe0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abe4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abe8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abec, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abf0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abf4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abf8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abfc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000a204, 0x00000004, 0x00000004, 0x00000004, 0x00000004, 0x00000004 }, + { 0x0000a20c, 0x00000014, 0x00000014, 0x0001f000, 0x0001f000, 0x0001f000 }, + { 0x0000b20c, 0x00000014, 0x00000014, 0x0001f000, 0x0001f000, 0x0001f000 }, + { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a }, + { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, + { 0x0000a250, 0x0004f000, 0x0004f000, 0x0004a000, 0x0004a000, 0x0004a000 }, + { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e }, +}; + +static const u_int32_t ar9285Common_9285_1_2[][2] = { + { 0x0000000c, 0x00000000 }, + { 0x00000030, 0x00020045 }, + { 0x00000034, 0x00000005 }, + { 0x00000040, 0x00000000 }, + { 0x00000044, 0x00000008 }, + { 0x00000048, 0x00000008 }, + { 0x0000004c, 0x00000010 }, + { 0x00000050, 0x00000000 }, + { 0x00000054, 0x0000001f }, + { 0x00000800, 0x00000000 }, + { 0x00000804, 0x00000000 }, + { 0x00000808, 0x00000000 }, + { 0x0000080c, 0x00000000 }, + { 0x00000810, 0x00000000 }, + { 0x00000814, 0x00000000 }, + { 0x00000818, 0x00000000 }, + { 0x0000081c, 0x00000000 }, + { 0x00000820, 0x00000000 }, + { 0x00000824, 0x00000000 }, + { 0x00001040, 0x002ffc0f }, + { 0x00001044, 0x002ffc0f }, + { 0x00001048, 0x002ffc0f }, + { 0x0000104c, 0x002ffc0f }, + { 0x00001050, 0x002ffc0f }, + { 0x00001054, 0x002ffc0f }, + { 0x00001058, 0x002ffc0f }, + { 0x0000105c, 0x002ffc0f }, + { 0x00001060, 0x002ffc0f }, + { 0x00001064, 0x002ffc0f }, + { 0x00001230, 0x00000000 }, + { 0x00001270, 0x00000000 }, + { 0x00001038, 0x00000000 }, + { 0x00001078, 0x00000000 }, + { 0x000010b8, 0x00000000 }, + { 0x000010f8, 0x00000000 }, + { 0x00001138, 0x00000000 }, + { 0x00001178, 0x00000000 }, + { 0x000011b8, 0x00000000 }, + { 0x000011f8, 0x00000000 }, + { 0x00001238, 0x00000000 }, + { 0x00001278, 0x00000000 }, + { 0x000012b8, 0x00000000 }, + { 0x000012f8, 0x00000000 }, + { 0x00001338, 0x00000000 }, + { 0x00001378, 0x00000000 }, + { 0x000013b8, 0x00000000 }, + { 0x000013f8, 0x00000000 }, + { 0x00001438, 0x00000000 }, + { 0x00001478, 0x00000000 }, + { 0x000014b8, 0x00000000 }, + { 0x000014f8, 0x00000000 }, + { 0x00001538, 0x00000000 }, + { 0x00001578, 0x00000000 }, + { 0x000015b8, 0x00000000 }, + { 0x000015f8, 0x00000000 }, + { 0x00001638, 0x00000000 }, + { 0x00001678, 0x00000000 }, + { 0x000016b8, 0x00000000 }, + { 0x000016f8, 0x00000000 }, + { 0x00001738, 0x00000000 }, + { 0x00001778, 0x00000000 }, + { 0x000017b8, 0x00000000 }, + { 0x000017f8, 0x00000000 }, + { 0x0000103c, 0x00000000 }, + { 0x0000107c, 0x00000000 }, + { 0x000010bc, 0x00000000 }, + { 0x000010fc, 0x00000000 }, + { 0x0000113c, 0x00000000 }, + { 0x0000117c, 0x00000000 }, + { 0x000011bc, 0x00000000 }, + { 0x000011fc, 0x00000000 }, + { 0x0000123c, 0x00000000 }, + { 0x0000127c, 0x00000000 }, + { 0x000012bc, 0x00000000 }, + { 0x000012fc, 0x00000000 }, + { 0x0000133c, 0x00000000 }, + { 0x0000137c, 0x00000000 }, + { 0x000013bc, 0x00000000 }, + { 0x000013fc, 0x00000000 }, + { 0x0000143c, 0x00000000 }, + { 0x0000147c, 0x00000000 }, + { 0x00004030, 0x00000002 }, + { 0x0000403c, 0x00000002 }, + { 0x00004024, 0x0000001f }, + { 0x00004060, 0x00000000 }, + { 0x00004064, 0x00000000 }, + { 0x00007010, 0x00000031 }, + { 0x00007034, 0x00000002 }, + { 0x00007038, 0x000004c2 }, + { 0x00008004, 0x00000000 }, + { 0x00008008, 0x00000000 }, + { 0x0000800c, 0x00000000 }, + { 0x00008018, 0x00000700 }, + { 0x00008020, 0x00000000 }, + { 0x00008038, 0x00000000 }, + { 0x0000803c, 0x00000000 }, + { 0x00008048, 0x00000000 }, + { 0x00008054, 0x00000000 }, + { 0x00008058, 0x00000000 }, + { 0x0000805c, 0x000fc78f }, + { 0x00008060, 0x0000000f }, + { 0x00008064, 0x00000000 }, + { 0x00008070, 0x00000000 }, + { 0x000080c0, 0x2a80001a }, + { 0x000080c4, 0x05dc01e0 }, + { 0x000080c8, 0x1f402710 }, + { 0x000080cc, 0x01f40000 }, + { 0x000080d0, 0x00001e00 }, + { 0x000080d4, 0x00000000 }, + { 0x000080d8, 0x00400000 }, + { 0x000080e0, 0xffffffff }, + { 0x000080e4, 0x0000ffff }, + { 0x000080e8, 0x003f3f3f }, + { 0x000080ec, 0x00000000 }, + { 0x000080f0, 0x00000000 }, + { 0x000080f4, 0x00000000 }, + { 0x000080f8, 0x00000000 }, + { 0x000080fc, 0x00020000 }, + { 0x00008100, 0x00020000 }, + { 0x00008104, 0x00000001 }, + { 0x00008108, 0x00000052 }, + { 0x0000810c, 0x00000000 }, + { 0x00008110, 0x00000168 }, + { 0x00008118, 0x000100aa }, + { 0x0000811c, 0x00003210 }, + { 0x00008120, 0x08f04810 }, + { 0x00008124, 0x00000000 }, + { 0x00008128, 0x00000000 }, + { 0x0000812c, 0x00000000 }, + { 0x00008130, 0x00000000 }, + { 0x00008134, 0x00000000 }, + { 0x00008138, 0x00000000 }, + { 0x0000813c, 0x00000000 }, + { 0x00008144, 0xffffffff }, + { 0x00008168, 0x00000000 }, + { 0x0000816c, 0x00000000 }, + { 0x00008170, 0x32143320 }, + { 0x00008174, 0xfaa4fa50 }, + { 0x00008178, 0x00000100 }, + { 0x0000817c, 0x00000000 }, + { 0x000081c0, 0x00000000 }, + { 0x000081d0, 0x0000320a }, + { 0x000081ec, 0x00000000 }, + { 0x000081f0, 0x00000000 }, + { 0x000081f4, 0x00000000 }, + { 0x000081f8, 0x00000000 }, + { 0x000081fc, 0x00000000 }, + { 0x00008200, 0x00000000 }, + { 0x00008204, 0x00000000 }, + { 0x00008208, 0x00000000 }, + { 0x0000820c, 0x00000000 }, + { 0x00008210, 0x00000000 }, + { 0x00008214, 0x00000000 }, + { 0x00008218, 0x00000000 }, + { 0x0000821c, 0x00000000 }, + { 0x00008220, 0x00000000 }, + { 0x00008224, 0x00000000 }, + { 0x00008228, 0x00000000 }, + { 0x0000822c, 0x00000000 }, + { 0x00008230, 0x00000000 }, + { 0x00008234, 0x00000000 }, + { 0x00008238, 0x00000000 }, + { 0x0000823c, 0x00000000 }, + { 0x00008240, 0x00100000 }, + { 0x00008244, 0x0010f400 }, + { 0x00008248, 0x00000100 }, + { 0x0000824c, 0x0001e800 }, + { 0x00008250, 0x00000000 }, + { 0x00008254, 0x00000000 }, + { 0x00008258, 0x00000000 }, + { 0x0000825c, 0x400000ff }, + { 0x00008260, 0x00080922 }, + { 0x00008264, 0x88a00010 }, + { 0x00008270, 0x00000000 }, + { 0x00008274, 0x40000000 }, + { 0x00008278, 0x003e4180 }, + { 0x0000827c, 0x00000000 }, + { 0x00008284, 0x0000002c }, + { 0x00008288, 0x0000002c }, + { 0x0000828c, 0x00000000 }, + { 0x00008294, 0x00000000 }, + { 0x00008298, 0x00000000 }, + { 0x0000829c, 0x00000000 }, + { 0x00008300, 0x00000040 }, + { 0x00008314, 0x00000000 }, + { 0x00008328, 0x00000000 }, + { 0x0000832c, 0x00000001 }, + { 0x00008330, 0x00000302 }, + { 0x00008334, 0x00000e00 }, + { 0x00008338, 0x00ff0000 }, + { 0x0000833c, 0x00000000 }, + { 0x00008340, 0x00010380 }, + { 0x00008344, 0x00481043 }, + { 0x00009808, 0x00000000 }, + { 0x0000980c, 0xafe68e30 }, + { 0x00009810, 0xfd14e000 }, + { 0x00009814, 0x9c0a9f6b }, + { 0x0000981c, 0x00000000 }, + { 0x0000982c, 0x0000a000 }, + { 0x00009830, 0x00000000 }, + { 0x0000983c, 0x00200400 }, + { 0x0000984c, 0x0040233c }, + { 0x00009854, 0x00000044 }, + { 0x00009900, 0x00000000 }, + { 0x00009904, 0x00000000 }, + { 0x00009908, 0x00000000 }, + { 0x0000990c, 0x00000000 }, + { 0x00009910, 0x01002310 }, + { 0x0000991c, 0x10000fff }, + { 0x00009920, 0x04900000 }, + { 0x00009928, 0x00000001 }, + { 0x0000992c, 0x00000004 }, + { 0x00009934, 0x1e1f2022 }, + { 0x00009938, 0x0a0b0c0d }, + { 0x0000993c, 0x00000000 }, + { 0x00009940, 0x14750604 }, + { 0x00009948, 0x9280c00a }, + { 0x0000994c, 0x00020028 }, + { 0x00009954, 0x5f3ca3de }, + { 0x00009958, 0x2108ecff }, + { 0x00009968, 0x000003ce }, + { 0x00009970, 0x192bb514 }, + { 0x00009974, 0x00000000 }, + { 0x00009978, 0x00000001 }, + { 0x0000997c, 0x00000000 }, + { 0x00009980, 0x00000000 }, + { 0x00009984, 0x00000000 }, + { 0x00009988, 0x00000000 }, + { 0x0000998c, 0x00000000 }, + { 0x00009990, 0x00000000 }, + { 0x00009994, 0x00000000 }, + { 0x00009998, 0x00000000 }, + { 0x0000999c, 0x00000000 }, + { 0x000099a0, 0x00000000 }, + { 0x000099a4, 0x00000001 }, + { 0x000099a8, 0x201fff00 }, + { 0x000099ac, 0x2def0400 }, + { 0x000099b0, 0x03051000 }, + { 0x000099b4, 0x00000820 }, + { 0x000099dc, 0x00000000 }, + { 0x000099e0, 0x00000000 }, + { 0x000099e4, 0xaaaaaaaa }, + { 0x000099e8, 0x3c466478 }, + { 0x000099ec, 0x0cc80caa }, + { 0x000099f0, 0x00000000 }, + { 0x0000a208, 0x803e68c8 }, + { 0x0000a210, 0x4080a333 }, + { 0x0000a214, 0x00206c10 }, + { 0x0000a218, 0x009c4060 }, + { 0x0000a220, 0x01834061 }, + { 0x0000a224, 0x00000400 }, + { 0x0000a228, 0x000003b5 }, + { 0x0000a22c, 0x00000000 }, + { 0x0000a234, 0x20202020 }, + { 0x0000a238, 0x20202020 }, + { 0x0000a244, 0x00000000 }, + { 0x0000a248, 0xfffffffc }, + { 0x0000a24c, 0x00000000 }, + { 0x0000a254, 0x00000000 }, + { 0x0000a258, 0x0ccb5380 }, + { 0x0000a25c, 0x15151501 }, + { 0x0000a260, 0xdfa90f01 }, + { 0x0000a268, 0x00000000 }, + { 0x0000a26c, 0x0ebae9e6 }, + { 0x0000d270, 0x0d820820 }, + { 0x0000d35c, 0x07ffffef }, + { 0x0000d360, 0x0fffffe7 }, + { 0x0000d364, 0x17ffffe5 }, + { 0x0000d368, 0x1fffffe4 }, + { 0x0000d36c, 0x37ffffe3 }, + { 0x0000d370, 0x3fffffe3 }, + { 0x0000d374, 0x57ffffe3 }, + { 0x0000d378, 0x5fffffe2 }, + { 0x0000d37c, 0x7fffffe2 }, + { 0x0000d380, 0x7f3c7bba }, + { 0x0000d384, 0xf3307ff0 }, + { 0x0000a388, 0x0c000000 }, + { 0x0000a38c, 0x20202020 }, + { 0x0000a390, 0x20202020 }, + { 0x0000a39c, 0x00000001 }, + { 0x0000a3a0, 0x00000000 }, + { 0x0000a3a4, 0x00000000 }, + { 0x0000a3a8, 0x00000000 }, + { 0x0000a3ac, 0x00000000 }, + { 0x0000a3b0, 0x00000000 }, + { 0x0000a3b4, 0x00000000 }, + { 0x0000a3b8, 0x00000000 }, + { 0x0000a3bc, 0x00000000 }, + { 0x0000a3c0, 0x00000000 }, + { 0x0000a3c4, 0x00000000 }, + { 0x0000a3cc, 0x20202020 }, + { 0x0000a3d0, 0x20202020 }, + { 0x0000a3d4, 0x20202020 }, + { 0x0000a3e4, 0x00000000 }, + { 0x0000a3e8, 0x18c43433 }, + { 0x0000a3ec, 0x00f70081 }, + { 0x00007800, 0x00140000 }, + { 0x00007804, 0x0e4548d8 }, + { 0x00007808, 0x54214514 }, + { 0x0000780c, 0x02025830 }, + { 0x00007810, 0x71c0d388 }, + { 0x00007814, 0x924934a8 }, + { 0x0000781c, 0x00000000 }, + { 0x00007824, 0x00d86fff }, + { 0x00007828, 0x26d2491b }, + { 0x0000782c, 0x6e36d97b }, + { 0x00007830, 0xedb6d96e }, + { 0x00007834, 0x71400087 }, + { 0x0000783c, 0x0001fffe }, + { 0x00007840, 0xffeb1a20 }, + { 0x00007844, 0x000c0db6 }, + { 0x00007848, 0x6db61b6f }, + { 0x0000784c, 0x6d9b66db }, + { 0x00007850, 0x6d8c6dba }, + { 0x00007854, 0x00040000 }, + { 0x00007858, 0xdb003012 }, + { 0x0000785c, 0x04924914 }, + { 0x00007860, 0x21084210 }, + { 0x00007864, 0xf7d7ffde }, + { 0x00007868, 0xc2034080 }, + { 0x00007870, 0x10142c00 }, +}; + +static const u_int32_t ar9285Modes_high_power_tx_gain_9285_1_2[][6] = { + /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ + { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a304, 0x00000000, 0x00000000, 0x00006200, 0x00006200, 0x00000000 }, + { 0x0000a308, 0x00000000, 0x00000000, 0x00008201, 0x00008201, 0x00000000 }, + { 0x0000a30c, 0x00000000, 0x00000000, 0x0000b240, 0x0000b240, 0x00000000 }, + { 0x0000a310, 0x00000000, 0x00000000, 0x0000d241, 0x0000d241, 0x00000000 }, + { 0x0000a314, 0x00000000, 0x00000000, 0x0000f600, 0x0000f600, 0x00000000 }, + { 0x0000a318, 0x00000000, 0x00000000, 0x00012800, 0x00012800, 0x00000000 }, + { 0x0000a31c, 0x00000000, 0x00000000, 0x00016802, 0x00016802, 0x00000000 }, + { 0x0000a320, 0x00000000, 0x00000000, 0x0001b805, 0x0001b805, 0x00000000 }, + { 0x0000a324, 0x00000000, 0x00000000, 0x00021a80, 0x00021a80, 0x00000000 }, + { 0x0000a328, 0x00000000, 0x00000000, 0x00028b00, 0x00028b00, 0x00000000 }, + { 0x0000a32c, 0x00000000, 0x00000000, 0x0002ab40, 0x0002ab40, 0x00000000 }, + { 0x0000a330, 0x00000000, 0x00000000, 0x0002cd80, 0x0002cd80, 0x00000000 }, + { 0x0000a334, 0x00000000, 0x00000000, 0x00033d82, 0x00033d82, 0x00000000 }, + { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 }, + { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 }, + { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, + { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, + { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, + { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, + { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, + { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, + { 0x00007838, 0xfac68803, 0xfac68803, 0xfac68803, 0xfac68803, 0xfac68803 }, + { 0x0000786c, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe }, + { 0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00 }, + { 0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a216652, 0x0a216652, 0x0a22a652 }, + { 0x0000a278, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 }, + { 0x0000a27c, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7 }, + { 0x0000a394, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 }, + { 0x0000a398, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7 }, + { 0x0000a3dc, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 }, + { 0x0000a3e0, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7 }, +}; + +static const u_int32_t ar9285Modes_original_tx_gain_9285_1_2[][6] = { + /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ + { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 }, + { 0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000 }, + { 0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608, 0x00000000 }, + { 0x0000a310, 0x00000000, 0x00000000, 0x00022618, 0x00022618, 0x00000000 }, + { 0x0000a314, 0x00000000, 0x00000000, 0x0002a6c9, 0x0002a6c9, 0x00000000 }, + { 0x0000a318, 0x00000000, 0x00000000, 0x00031710, 0x00031710, 0x00000000 }, + { 0x0000a31c, 0x00000000, 0x00000000, 0x00035718, 0x00035718, 0x00000000 }, + { 0x0000a320, 0x00000000, 0x00000000, 0x00038758, 0x00038758, 0x00000000 }, + { 0x0000a324, 0x00000000, 0x00000000, 0x0003c75a, 0x0003c75a, 0x00000000 }, + { 0x0000a328, 0x00000000, 0x00000000, 0x0004075c, 0x0004075c, 0x00000000 }, + { 0x0000a32c, 0x00000000, 0x00000000, 0x0004475e, 0x0004475e, 0x00000000 }, + { 0x0000a330, 0x00000000, 0x00000000, 0x0004679f, 0x0004679f, 0x00000000 }, + { 0x0000a334, 0x00000000, 0x00000000, 0x000487df, 0x000487df, 0x00000000 }, + { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 }, + { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 }, + { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, + { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, + { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, + { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, + { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, + { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, + { 0x00007838, 0xfac68801, 0xfac68801, 0xfac68801, 0xfac68801, 0xfac68801 }, + { 0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4 }, + { 0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04 }, + { 0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a21a652, 0x0a21a652, 0x0a22a652 }, + { 0x0000a278, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c }, + { 0x0000a27c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c }, + { 0x0000a394, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c }, + { 0x0000a398, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c }, + { 0x0000a3dc, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c }, + { 0x0000a3e0, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c }, +}; + +static const u_int32_t ar9285PciePhy_clkreq_always_on_L1_9285_1_2[][2] = { + {0x00004040, 0x9248fd00 }, + {0x00004040, 0x24924924 }, + {0x00004040, 0xa8000019 }, + {0x00004040, 0x13160820 }, + {0x00004040, 0xe5980560 }, + {0x00004040, 0xc01dcffd }, + {0x00004040, 0x1aaabe41 }, + {0x00004040, 0xbe105554 }, + {0x00004040, 0x00043007 }, + {0x00004044, 0x00000000 }, +}; + +static const u_int32_t ar9285PciePhy_clkreq_off_L1_9285_1_2[][2] = { + {0x00004040, 0x9248fd00 }, + {0x00004040, 0x24924924 }, + {0x00004040, 0xa8000019 }, + {0x00004040, 0x13160820 }, + {0x00004040, 0xe5980560 }, + {0x00004040, 0xc01dcffc }, + {0x00004040, 0x1aaabe41 }, + {0x00004040, 0xbe105554 }, + {0x00004040, 0x00043007 }, + {0x00004044, 0x00000000 }, +}; + +/* AR9287 Revision 10 */ +static const u_int32_t ar9287Modes_9287_1_0[][6] = { + /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ + { 0x00001030, 0x00000000, 0x00000000, 0x000002c0, 0x00000160, 0x000001e0 }, + { 0x00001070, 0x00000000, 0x00000000, 0x00000318, 0x0000018c, 0x000001e0 }, + { 0x000010b0, 0x00000000, 0x00000000, 0x00007c70, 0x00003e38, 0x00001180 }, + { 0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008 }, + { 0x00008014, 0x00000000, 0x00000000, 0x10801600, 0x08400b00, 0x06e006e0 }, + { 0x0000801c, 0x00000000, 0x00000000, 0x12e00057, 0x12e0002b, 0x0988004f }, + { 0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810 }, + { 0x000081d0, 0x00003200, 0x00003200, 0x0000320a, 0x0000320a, 0x0000320a }, + { 0x00008318, 0x00000000, 0x00000000, 0x00006880, 0x00003440, 0x00006880 }, + { 0x00009804, 0x00000000, 0x00000000, 0x000003c4, 0x00000300, 0x00000303 }, + { 0x00009820, 0x00000000, 0x00000000, 0x02020200, 0x02020200, 0x02020200 }, + { 0x00009824, 0x00000000, 0x00000000, 0x01000e0e, 0x01000e0e, 0x01000e0e }, + { 0x00009828, 0x00000000, 0x00000000, 0x0a020001, 0x0a020001, 0x0a020001 }, + { 0x00009834, 0x00000000, 0x00000000, 0x00000e0e, 0x00000e0e, 0x00000e0e }, + { 0x00009838, 0x00000003, 0x00000003, 0x00000007, 0x00000007, 0x00000007 }, + { 0x00009840, 0x206a002e, 0x206a002e, 0x206a012e, 0x206a012e, 0x206a012e }, + { 0x00009844, 0x03720000, 0x03720000, 0x037216a0, 0x037216a0, 0x037216a0 }, + { 0x00009850, 0x60000000, 0x60000000, 0x6d4000e2, 0x6c4000e2, 0x6c4000e2 }, + { 0x00009858, 0x7c000d00, 0x7c000d00, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e }, + { 0x0000985c, 0x3100005e, 0x3100005e, 0x3139605e, 0x31395d5e, 0x31395d5e }, + { 0x00009860, 0x00058d00, 0x00058d00, 0x00058d20, 0x00058d20, 0x00058d18 }, + { 0x00009864, 0x00000e00, 0x00000e00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, + { 0x00009868, 0x000040c0, 0x000040c0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 }, + { 0x0000986c, 0x00000080, 0x00000080, 0x06903881, 0x06903881, 0x06903881 }, + { 0x00009914, 0x00000000, 0x00000000, 0x00001130, 0x00000898, 0x000007d0 }, + { 0x00009918, 0x00000000, 0x00000000, 0x00000016, 0x0000000b, 0x00000016 }, + { 0x00009924, 0xd00a8a01, 0xd00a8a01, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d }, + { 0x00009944, 0xefbc0000, 0xefbc0000, 0xefbc1010, 0xefbc1010, 0xefbc1010 }, + { 0x00009960, 0x00000000, 0x00000000, 0x00000010, 0x00000010, 0x00000010 }, + { 0x0000a960, 0x00000000, 0x00000000, 0x00000010, 0x00000010, 0x00000010 }, + { 0x00009964, 0x00000000, 0x00000000, 0x00000210, 0x00000210, 0x00000210 }, + { 0x0000c968, 0x00000200, 0x00000200, 0x000003ce, 0x000003ce, 0x000003ce }, + { 0x000099b8, 0x00000000, 0x00000000, 0x0000001c, 0x0000001c, 0x0000001c }, + { 0x000099bc, 0x00000000, 0x00000000, 0x00000c00, 0x00000c00, 0x00000c00 }, + { 0x000099c0, 0x00000000, 0x00000000, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 }, + { 0x0000a204, 0x00000440, 0x00000440, 0x00000444, 0x00000444, 0x00000444 }, + { 0x0000a20c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000b20c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a21c, 0x1803800a, 0x1803800a, 0x1883800a, 0x1883800a, 0x1883800a }, + { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, + { 0x0000a250, 0x00000000, 0x00000000, 0x0004a000, 0x0004a000, 0x0004a000 }, + { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e }, + { 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, +}; + +static const u_int32_t ar9287Common_9287_1_0[][2] = { + { 0x0000000c, 0x00000000 }, + { 0x00000030, 0x00020015 }, + { 0x00000034, 0x00000005 }, + { 0x00000040, 0x00000000 }, + { 0x00000044, 0x00000008 }, + { 0x00000048, 0x00000008 }, + { 0x0000004c, 0x00000010 }, + { 0x00000050, 0x00000000 }, + { 0x00000054, 0x0000001f }, + { 0x00000800, 0x00000000 }, + { 0x00000804, 0x00000000 }, + { 0x00000808, 0x00000000 }, + { 0x0000080c, 0x00000000 }, + { 0x00000810, 0x00000000 }, + { 0x00000814, 0x00000000 }, + { 0x00000818, 0x00000000 }, + { 0x0000081c, 0x00000000 }, + { 0x00000820, 0x00000000 }, + { 0x00000824, 0x00000000 }, + { 0x00001040, 0x002ffc0f }, + { 0x00001044, 0x002ffc0f }, + { 0x00001048, 0x002ffc0f }, + { 0x0000104c, 0x002ffc0f }, + { 0x00001050, 0x002ffc0f }, + { 0x00001054, 0x002ffc0f }, + { 0x00001058, 0x002ffc0f }, + { 0x0000105c, 0x002ffc0f }, + { 0x00001060, 0x002ffc0f }, + { 0x00001064, 0x002ffc0f }, + { 0x00001230, 0x00000000 }, + { 0x00001270, 0x00000000 }, + { 0x00001038, 0x00000000 }, + { 0x00001078, 0x00000000 }, + { 0x000010b8, 0x00000000 }, + { 0x000010f8, 0x00000000 }, + { 0x00001138, 0x00000000 }, + { 0x00001178, 0x00000000 }, + { 0x000011b8, 0x00000000 }, + { 0x000011f8, 0x00000000 }, + { 0x00001238, 0x00000000 }, + { 0x00001278, 0x00000000 }, + { 0x000012b8, 0x00000000 }, + { 0x000012f8, 0x00000000 }, + { 0x00001338, 0x00000000 }, + { 0x00001378, 0x00000000 }, + { 0x000013b8, 0x00000000 }, + { 0x000013f8, 0x00000000 }, + { 0x00001438, 0x00000000 }, + { 0x00001478, 0x00000000 }, + { 0x000014b8, 0x00000000 }, + { 0x000014f8, 0x00000000 }, + { 0x00001538, 0x00000000 }, + { 0x00001578, 0x00000000 }, + { 0x000015b8, 0x00000000 }, + { 0x000015f8, 0x00000000 }, + { 0x00001638, 0x00000000 }, + { 0x00001678, 0x00000000 }, + { 0x000016b8, 0x00000000 }, + { 0x000016f8, 0x00000000 }, + { 0x00001738, 0x00000000 }, + { 0x00001778, 0x00000000 }, + { 0x000017b8, 0x00000000 }, + { 0x000017f8, 0x00000000 }, + { 0x0000103c, 0x00000000 }, + { 0x0000107c, 0x00000000 }, + { 0x000010bc, 0x00000000 }, + { 0x000010fc, 0x00000000 }, + { 0x0000113c, 0x00000000 }, + { 0x0000117c, 0x00000000 }, + { 0x000011bc, 0x00000000 }, + { 0x000011fc, 0x00000000 }, + { 0x0000123c, 0x00000000 }, + { 0x0000127c, 0x00000000 }, + { 0x000012bc, 0x00000000 }, + { 0x000012fc, 0x00000000 }, + { 0x0000133c, 0x00000000 }, + { 0x0000137c, 0x00000000 }, + { 0x000013bc, 0x00000000 }, + { 0x000013fc, 0x00000000 }, + { 0x0000143c, 0x00000000 }, + { 0x0000147c, 0x00000000 }, + { 0x00004030, 0x00000002 }, + { 0x0000403c, 0x00000002 }, + { 0x00004024, 0x0000001f }, + { 0x00004060, 0x00000000 }, + { 0x00004064, 0x00000000 }, + { 0x00007010, 0x00000033 }, + { 0x00007020, 0x00000000 }, + { 0x00007034, 0x00000002 }, + { 0x00007038, 0x000004c2 }, + { 0x00008004, 0x00000000 }, + { 0x00008008, 0x00000000 }, + { 0x0000800c, 0x00000000 }, + { 0x00008018, 0x00000700 }, + { 0x00008020, 0x00000000 }, + { 0x00008038, 0x00000000 }, + { 0x0000803c, 0x00000000 }, + { 0x00008048, 0x40000000 }, + { 0x00008054, 0x00000000 }, + { 0x00008058, 0x00000000 }, + { 0x0000805c, 0x000fc78f }, + { 0x00008060, 0x0000000f }, + { 0x00008064, 0x00000000 }, + { 0x00008070, 0x00000000 }, + { 0x000080c0, 0x2a80001a }, + { 0x000080c4, 0x05dc01e0 }, + { 0x000080c8, 0x1f402710 }, + { 0x000080cc, 0x01f40000 }, + { 0x000080d0, 0x00001e00 }, + { 0x000080d4, 0x00000000 }, + { 0x000080d8, 0x00400000 }, + { 0x000080e0, 0xffffffff }, + { 0x000080e4, 0x0000ffff }, + { 0x000080e8, 0x003f3f3f }, + { 0x000080ec, 0x00000000 }, + { 0x000080f0, 0x00000000 }, + { 0x000080f4, 0x00000000 }, + { 0x000080f8, 0x00000000 }, + { 0x000080fc, 0x00020000 }, + { 0x00008100, 0x00020000 }, + { 0x00008104, 0x00000001 }, + { 0x00008108, 0x00000052 }, + { 0x0000810c, 0x00000000 }, + { 0x00008110, 0x00000168 }, + { 0x00008118, 0x000100aa }, + { 0x0000811c, 0x00003210 }, + { 0x00008124, 0x00000000 }, + { 0x00008128, 0x00000000 }, + { 0x0000812c, 0x00000000 }, + { 0x00008130, 0x00000000 }, + { 0x00008134, 0x00000000 }, + { 0x00008138, 0x00000000 }, + { 0x0000813c, 0x00000000 }, + { 0x00008144, 0xffffffff }, + { 0x00008168, 0x00000000 }, + { 0x0000816c, 0x00000000 }, + { 0x00008170, 0x18487320 }, + { 0x00008174, 0xfaa4fa50 }, + { 0x00008178, 0x00000100 }, + { 0x0000817c, 0x00000000 }, + { 0x000081c0, 0x00000000 }, + { 0x000081c4, 0x00000000 }, + { 0x000081d4, 0x00000000 }, + { 0x000081ec, 0x00000000 }, + { 0x000081f0, 0x00000000 }, + { 0x000081f4, 0x00000000 }, + { 0x000081f8, 0x00000000 }, + { 0x000081fc, 0x00000000 }, + { 0x00008200, 0x00000000 }, + { 0x00008204, 0x00000000 }, + { 0x00008208, 0x00000000 }, + { 0x0000820c, 0x00000000 }, + { 0x00008210, 0x00000000 }, + { 0x00008214, 0x00000000 }, + { 0x00008218, 0x00000000 }, + { 0x0000821c, 0x00000000 }, + { 0x00008220, 0x00000000 }, + { 0x00008224, 0x00000000 }, + { 0x00008228, 0x00000000 }, + { 0x0000822c, 0x00000000 }, + { 0x00008230, 0x00000000 }, + { 0x00008234, 0x00000000 }, + { 0x00008238, 0x00000000 }, + { 0x0000823c, 0x00000000 }, + { 0x00008240, 0x00100000 }, + { 0x00008244, 0x0010f400 }, + { 0x00008248, 0x00000100 }, + { 0x0000824c, 0x0001e800 }, + { 0x00008250, 0x00000000 }, + { 0x00008254, 0x00000000 }, + { 0x00008258, 0x00000000 }, + { 0x0000825c, 0x400000ff }, + { 0x00008260, 0x00080922 }, + { 0x00008264, 0xa8a00010 }, + { 0x00008270, 0x00000000 }, + { 0x00008274, 0x40000000 }, + { 0x00008278, 0x003e4180 }, + { 0x0000827c, 0x00000000 }, + { 0x00008284, 0x0000002c }, + { 0x00008288, 0x0000002c }, + { 0x0000828c, 0x000000ff }, + { 0x00008294, 0x00000000 }, + { 0x00008298, 0x00000000 }, + { 0x0000829c, 0x00000000 }, + { 0x00008300, 0x00000040 }, + { 0x00008314, 0x00000000 }, + { 0x00008328, 0x00000000 }, + { 0x0000832c, 0x00000007 }, + { 0x00008330, 0x00000302 }, + { 0x00008334, 0x00000e00 }, + { 0x00008338, 0x00ff0000 }, + { 0x0000833c, 0x00000000 }, + { 0x00008340, 0x000107ff }, + { 0x00008344, 0x01c81043 }, + { 0x00008360, 0xffffffff }, + { 0x00008364, 0xffffffff }, + { 0x00008368, 0x00000000 }, + { 0x00008370, 0x00000000 }, + { 0x00008374, 0x000000ff }, + { 0x00008378, 0x00000000 }, + { 0x0000837c, 0x00000000 }, + { 0x00008380, 0xffffffff }, + { 0x00008384, 0xffffffff }, + { 0x00008390, 0x0fffffff }, + { 0x00008394, 0x0fffffff }, + { 0x00008398, 0x00000000 }, + { 0x0000839c, 0x00000000 }, + { 0x000083a0, 0x00000000 }, + { 0x00009808, 0x00000000 }, + { 0x0000980c, 0xafe68e30 }, + { 0x00009810, 0xfd14e000 }, + { 0x00009814, 0x9c0a9f6b }, + { 0x0000981c, 0x00000000 }, + { 0x0000982c, 0x0000a000 }, + { 0x00009830, 0x00000000 }, + { 0x0000983c, 0x00200400 }, + { 0x0000984c, 0x0040233c }, + { 0x0000a84c, 0x0040233c }, + { 0x00009854, 0x00000044 }, + { 0x00009900, 0x00000000 }, + { 0x00009904, 0x00000000 }, + { 0x00009908, 0x00000000 }, + { 0x0000990c, 0x00000000 }, + { 0x00009910, 0x10002310 }, + { 0x0000991c, 0x10000fff }, + { 0x00009920, 0x04900000 }, + { 0x0000a920, 0x04900000 }, + { 0x00009928, 0x00000001 }, + { 0x0000992c, 0x00000004 }, + { 0x00009930, 0x00000000 }, + { 0x0000a930, 0x00000000 }, + { 0x00009934, 0x1e1f2022 }, + { 0x00009938, 0x0a0b0c0d }, + { 0x0000993c, 0x00000000 }, + { 0x00009948, 0x9280c00a }, + { 0x0000994c, 0x00020028 }, + { 0x00009954, 0x5f3ca3de }, + { 0x00009958, 0x0108ecff }, + { 0x00009940, 0x14750604 }, + { 0x0000c95c, 0x004b6a8e }, + { 0x00009970, 0x990bb515 }, + { 0x00009974, 0x00000000 }, + { 0x00009978, 0x00000001 }, + { 0x0000997c, 0x00000000 }, + { 0x000099a0, 0x00000000 }, + { 0x000099a4, 0x00000001 }, + { 0x000099a8, 0x201fff00 }, + { 0x000099ac, 0x0c6f0000 }, + { 0x000099b0, 0x03051000 }, + { 0x000099b4, 0x00000820 }, + { 0x000099c4, 0x06336f77 }, + { 0x000099c8, 0x6af65329 }, + { 0x000099cc, 0x08f186c8 }, + { 0x000099d0, 0x00046384 }, + { 0x000099dc, 0x00000000 }, + { 0x000099e0, 0x00000000 }, + { 0x000099e4, 0xaaaaaaaa }, + { 0x000099e8, 0x3c466478 }, + { 0x000099ec, 0x0cc80caa }, + { 0x000099f0, 0x00000000 }, + { 0x000099fc, 0x00001042 }, + { 0x0000a1f4, 0x00fffeff }, + { 0x0000a1f8, 0x00f5f9ff }, + { 0x0000a1fc, 0xb79f6427 }, + { 0x0000a208, 0x803e4788 }, + { 0x0000a210, 0x4080a333 }, + { 0x0000a214, 0x40206c10 }, + { 0x0000a218, 0x009c4060 }, + { 0x0000a220, 0x01834061 }, + { 0x0000a224, 0x00000400 }, + { 0x0000a228, 0x000003b5 }, + { 0x0000a22c, 0x233f7180 }, + { 0x0000a234, 0x20202020 }, + { 0x0000a238, 0x20202020 }, + { 0x0000a23c, 0x13c889af }, + { 0x0000a240, 0x38490a20 }, + { 0x0000a244, 0x00000000 }, + { 0x0000a248, 0xfffffffc }, + { 0x0000a24c, 0x00000000 }, + { 0x0000a254, 0x00000000 }, + { 0x0000a258, 0x0cdbd380 }, + { 0x0000a25c, 0x0f0f0f01 }, + { 0x0000a260, 0xdfa91f01 }, + { 0x0000a264, 0x00418a11 }, + { 0x0000b264, 0x00418a11 }, + { 0x0000a268, 0x00000000 }, + { 0x0000a26c, 0x0e79e5c6 }, + { 0x0000b26c, 0x0e79e5c6 }, + { 0x0000d270, 0x00820820 }, + { 0x0000a278, 0x1ce739ce }, + { 0x0000a27c, 0x050701ce }, + { 0x0000d35c, 0x07ffffef }, + { 0x0000d360, 0x0fffffe7 }, + { 0x0000d364, 0x17ffffe5 }, + { 0x0000d368, 0x1fffffe4 }, + { 0x0000d36c, 0x37ffffe3 }, + { 0x0000d370, 0x3fffffe3 }, + { 0x0000d374, 0x57ffffe3 }, + { 0x0000d378, 0x5fffffe2 }, + { 0x0000d37c, 0x7fffffe2 }, + { 0x0000d380, 0x7f3c7bba }, + { 0x0000d384, 0xf3307ff0 }, + { 0x0000a388, 0x0c000000 }, + { 0x0000a38c, 0x20202020 }, + { 0x0000a390, 0x20202020 }, + { 0x0000a394, 0x1ce739ce }, + { 0x0000a398, 0x000001ce }, + { 0x0000b398, 0x000001ce }, + { 0x0000a39c, 0x00000001 }, + { 0x0000a3c8, 0x00000246 }, + { 0x0000a3cc, 0x20202020 }, + { 0x0000a3d0, 0x20202020 }, + { 0x0000a3d4, 0x20202020 }, + { 0x0000a3dc, 0x1ce739ce }, + { 0x0000a3e0, 0x000001ce }, + { 0x0000a3e4, 0x00000000 }, + { 0x0000a3e8, 0x18c43433 }, + { 0x0000a3ec, 0x00f70081 }, + { 0x0000a3f0, 0x01036a1e }, + { 0x0000a3f4, 0x00000000 }, + { 0x0000b3f4, 0x00000000 }, + { 0x0000a7d8, 0x00000001 }, + { 0x00007800, 0x00000800 }, + { 0x00007804, 0x6c35ffb0 }, + { 0x00007808, 0x6db6c000 }, + { 0x0000780c, 0x6db6cb30 }, + { 0x00007810, 0x6db6cb6c }, + { 0x00007814, 0x0501e200 }, + { 0x00007818, 0x0094128d }, + { 0x0000781c, 0x976ee392 }, + { 0x00007820, 0xf75ff6fc }, + { 0x00007824, 0x00040000 }, + { 0x00007828, 0xdb003012 }, + { 0x0000782c, 0x04924914 }, + { 0x00007830, 0x21084210 }, + { 0x00007834, 0x00140000 }, + { 0x00007838, 0x0e4548d8 }, + { 0x0000783c, 0x54214514 }, + { 0x00007840, 0x02025820 }, + { 0x00007844, 0x71c0d388 }, + { 0x00007848, 0x934934a8 }, + { 0x00007850, 0x00000000 }, + { 0x00007854, 0x00000800 }, + { 0x00007858, 0x6c35ffb0 }, + { 0x0000785c, 0x6db6c000 }, + { 0x00007860, 0x6db6cb2c }, + { 0x00007864, 0x6db6cb6c }, + { 0x00007868, 0x0501e200 }, + { 0x0000786c, 0x0094128d }, + { 0x00007870, 0x976ee392 }, + { 0x00007874, 0xf75ff6fc }, + { 0x00007878, 0x00040000 }, + { 0x0000787c, 0xdb003012 }, + { 0x00007880, 0x04924914 }, + { 0x00007884, 0x21084210 }, + { 0x00007888, 0x001b6db0 }, + { 0x0000788c, 0x00376b63 }, + { 0x00007890, 0x06db6db6 }, + { 0x00007894, 0x006d8000 }, + { 0x00007898, 0x48100000 }, + { 0x0000789c, 0x00000000 }, + { 0x000078a0, 0x08000000 }, + { 0x000078a4, 0x0007ffd8 }, + { 0x000078a8, 0x0007ffd8 }, + { 0x000078ac, 0x001c0020 }, + { 0x000078b0, 0x000611eb }, + { 0x000078b4, 0x40008080 }, + { 0x000078b8, 0x2a850160 }, +}; + +static const u_int32_t ar9287Modes_tx_gain_9287_1_0[][6] = { + /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ + { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a304, 0x00000000, 0x00000000, 0x00004002, 0x00004002, 0x00004002 }, + { 0x0000a308, 0x00000000, 0x00000000, 0x00008004, 0x00008004, 0x00008004 }, + { 0x0000a30c, 0x00000000, 0x00000000, 0x0000c00a, 0x0000c00a, 0x0000c00a }, + { 0x0000a310, 0x00000000, 0x00000000, 0x0001000c, 0x0001000c, 0x0001000c }, + { 0x0000a314, 0x00000000, 0x00000000, 0x0001420b, 0x0001420b, 0x0001420b }, + { 0x0000a318, 0x00000000, 0x00000000, 0x0001824a, 0x0001824a, 0x0001824a }, + { 0x0000a31c, 0x00000000, 0x00000000, 0x0001c44a, 0x0001c44a, 0x0001c44a }, + { 0x0000a320, 0x00000000, 0x00000000, 0x0002064a, 0x0002064a, 0x0002064a }, + { 0x0000a324, 0x00000000, 0x00000000, 0x0002484a, 0x0002484a, 0x0002484a }, + { 0x0000a328, 0x00000000, 0x00000000, 0x00028a4a, 0x00028a4a, 0x00028a4a }, + { 0x0000a32c, 0x00000000, 0x00000000, 0x0002cc4a, 0x0002cc4a, 0x0002cc4a }, + { 0x0000a330, 0x00000000, 0x00000000, 0x00030e4a, 0x00030e4a, 0x00030e4a }, + { 0x0000a334, 0x00000000, 0x00000000, 0x00034e8a, 0x00034e8a, 0x00034e8a }, + { 0x0000a338, 0x00000000, 0x00000000, 0x00038e8c, 0x00038e8c, 0x00038e8c }, + { 0x0000a33c, 0x00000000, 0x00000000, 0x0003cecc, 0x0003cecc, 0x0003cecc }, + { 0x0000a340, 0x00000000, 0x00000000, 0x00040ed4, 0x00040ed4, 0x00040ed4 }, + { 0x0000a344, 0x00000000, 0x00000000, 0x00044edc, 0x00044edc, 0x00044edc }, + { 0x0000a348, 0x00000000, 0x00000000, 0x00048ede, 0x00048ede, 0x00048ede }, + { 0x0000a34c, 0x00000000, 0x00000000, 0x0004cf1e, 0x0004cf1e, 0x0004cf1e }, + { 0x0000a350, 0x00000000, 0x00000000, 0x00050f5e, 0x00050f5e, 0x00050f5e }, + { 0x0000a354, 0x00000000, 0x00000000, 0x00054f9e, 0x00054f9e, 0x00054f9e }, + { 0x0000a780, 0x00000000, 0x00000000, 0x00000060, 0x00000060, 0x00000060 }, + { 0x0000a784, 0x00000000, 0x00000000, 0x00004062, 0x00004062, 0x00004062 }, + { 0x0000a788, 0x00000000, 0x00000000, 0x00008064, 0x00008064, 0x00008064 }, + { 0x0000a78c, 0x00000000, 0x00000000, 0x0000c0a4, 0x0000c0a4, 0x0000c0a4 }, + { 0x0000a790, 0x00000000, 0x00000000, 0x000100b0, 0x000100b0, 0x000100b0 }, + { 0x0000a794, 0x00000000, 0x00000000, 0x000140b2, 0x000140b2, 0x000140b2 }, + { 0x0000a798, 0x00000000, 0x00000000, 0x000180b4, 0x000180b4, 0x000180b4 }, + { 0x0000a79c, 0x00000000, 0x00000000, 0x0001c0f4, 0x0001c0f4, 0x0001c0f4 }, + { 0x0000a7a0, 0x00000000, 0x00000000, 0x00020134, 0x00020134, 0x00020134 }, + { 0x0000a7a4, 0x00000000, 0x00000000, 0x000240fe, 0x000240fe, 0x000240fe }, + { 0x0000a7a8, 0x00000000, 0x00000000, 0x0002813e, 0x0002813e, 0x0002813e }, + { 0x0000a7ac, 0x00000000, 0x00000000, 0x0002c17e, 0x0002c17e, 0x0002c17e }, + { 0x0000a7b0, 0x00000000, 0x00000000, 0x000301be, 0x000301be, 0x000301be }, + { 0x0000a7b4, 0x00000000, 0x00000000, 0x000341fe, 0x000341fe, 0x000341fe }, + { 0x0000a7b8, 0x00000000, 0x00000000, 0x000341fe, 0x000341fe, 0x000341fe }, + { 0x0000a7bc, 0x00000000, 0x00000000, 0x000341fe, 0x000341fe, 0x000341fe }, + { 0x0000a7c0, 0x00000000, 0x00000000, 0x000341fe, 0x000341fe, 0x000341fe }, + { 0x0000a7c4, 0x00000000, 0x00000000, 0x000341fe, 0x000341fe, 0x000341fe }, + { 0x0000a7c8, 0x00000000, 0x00000000, 0x000341fe, 0x000341fe, 0x000341fe }, + { 0x0000a7cc, 0x00000000, 0x00000000, 0x000341fe, 0x000341fe, 0x000341fe }, + { 0x0000a7d0, 0x00000000, 0x00000000, 0x000341fe, 0x000341fe, 0x000341fe }, + { 0x0000a7d4, 0x00000000, 0x00000000, 0x000341fe, 0x000341fe, 0x000341fe }, + { 0x0000a274, 0x0a180000, 0x0a180000, 0x0a1aa000, 0x0a1aa000, 0x0a1aa000 }, +}; + + +static const u_int32_t ar9287Modes_rx_gain_9287_1_0[][6] = { + /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ + { 0x00009a00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120, 0x0000a120 }, + { 0x00009a04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124, 0x0000a124 }, + { 0x00009a08, 0x00000000, 0x00000000, 0x0000a128, 0x0000a128, 0x0000a128 }, + { 0x00009a0c, 0x00000000, 0x00000000, 0x0000a12c, 0x0000a12c, 0x0000a12c }, + { 0x00009a10, 0x00000000, 0x00000000, 0x0000a130, 0x0000a130, 0x0000a130 }, + { 0x00009a14, 0x00000000, 0x00000000, 0x0000a194, 0x0000a194, 0x0000a194 }, + { 0x00009a18, 0x00000000, 0x00000000, 0x0000a198, 0x0000a198, 0x0000a198 }, + { 0x00009a1c, 0x00000000, 0x00000000, 0x0000a20c, 0x0000a20c, 0x0000a20c }, + { 0x00009a20, 0x00000000, 0x00000000, 0x0000a210, 0x0000a210, 0x0000a210 }, + { 0x00009a24, 0x00000000, 0x00000000, 0x0000a284, 0x0000a284, 0x0000a284 }, + { 0x00009a28, 0x00000000, 0x00000000, 0x0000a288, 0x0000a288, 0x0000a288 }, + { 0x00009a2c, 0x00000000, 0x00000000, 0x0000a28c, 0x0000a28c, 0x0000a28c }, + { 0x00009a30, 0x00000000, 0x00000000, 0x0000a290, 0x0000a290, 0x0000a290 }, + { 0x00009a34, 0x00000000, 0x00000000, 0x0000a294, 0x0000a294, 0x0000a294 }, + { 0x00009a38, 0x00000000, 0x00000000, 0x0000a2a0, 0x0000a2a0, 0x0000a2a0 }, + { 0x00009a3c, 0x00000000, 0x00000000, 0x0000a2a4, 0x0000a2a4, 0x0000a2a4 }, + { 0x00009a40, 0x00000000, 0x00000000, 0x0000a2a8, 0x0000a2a8, 0x0000a2a8 }, + { 0x00009a44, 0x00000000, 0x00000000, 0x0000a2ac, 0x0000a2ac, 0x0000a2ac }, + { 0x00009a48, 0x00000000, 0x00000000, 0x0000a2b0, 0x0000a2b0, 0x0000a2b0 }, + { 0x00009a4c, 0x00000000, 0x00000000, 0x0000a2b4, 0x0000a2b4, 0x0000a2b4 }, + { 0x00009a50, 0x00000000, 0x00000000, 0x0000a2b8, 0x0000a2b8, 0x0000a2b8 }, + { 0x00009a54, 0x00000000, 0x00000000, 0x0000a2c4, 0x0000a2c4, 0x0000a2c4 }, + { 0x00009a58, 0x00000000, 0x00000000, 0x0000a708, 0x0000a708, 0x0000a708 }, + { 0x00009a5c, 0x00000000, 0x00000000, 0x0000a70c, 0x0000a70c, 0x0000a70c }, + { 0x00009a60, 0x00000000, 0x00000000, 0x0000a710, 0x0000a710, 0x0000a710 }, + { 0x00009a64, 0x00000000, 0x00000000, 0x0000ab04, 0x0000ab04, 0x0000ab04 }, + { 0x00009a68, 0x00000000, 0x00000000, 0x0000ab08, 0x0000ab08, 0x0000ab08 }, + { 0x00009a6c, 0x00000000, 0x00000000, 0x0000ab0c, 0x0000ab0c, 0x0000ab0c }, + { 0x00009a70, 0x00000000, 0x00000000, 0x0000ab10, 0x0000ab10, 0x0000ab10 }, + { 0x00009a74, 0x00000000, 0x00000000, 0x0000ab14, 0x0000ab14, 0x0000ab14 }, + { 0x00009a78, 0x00000000, 0x00000000, 0x0000ab18, 0x0000ab18, 0x0000ab18 }, + { 0x00009a7c, 0x00000000, 0x00000000, 0x0000ab8c, 0x0000ab8c, 0x0000ab8c }, + { 0x00009a80, 0x00000000, 0x00000000, 0x0000ab90, 0x0000ab90, 0x0000ab90 }, + { 0x00009a84, 0x00000000, 0x00000000, 0x0000ab94, 0x0000ab94, 0x0000ab94 }, + { 0x00009a88, 0x00000000, 0x00000000, 0x0000ab98, 0x0000ab98, 0x0000ab98 }, + { 0x00009a8c, 0x00000000, 0x00000000, 0x0000aba4, 0x0000aba4, 0x0000aba4 }, + { 0x00009a90, 0x00000000, 0x00000000, 0x0000aba8, 0x0000aba8, 0x0000aba8 }, + { 0x00009a94, 0x00000000, 0x00000000, 0x0000cb04, 0x0000cb04, 0x0000cb04 }, + { 0x00009a98, 0x00000000, 0x00000000, 0x0000cb08, 0x0000cb08, 0x0000cb08 }, + { 0x00009a9c, 0x00000000, 0x00000000, 0x0000cb0c, 0x0000cb0c, 0x0000cb0c }, + { 0x00009aa0, 0x00000000, 0x00000000, 0x0000cb10, 0x0000cb10, 0x0000cb10 }, + { 0x00009aa4, 0x00000000, 0x00000000, 0x0000cb14, 0x0000cb14, 0x0000cb14 }, + { 0x00009aa8, 0x00000000, 0x00000000, 0x0000cb18, 0x0000cb18, 0x0000cb18 }, + { 0x00009aac, 0x00000000, 0x00000000, 0x0000cb8c, 0x0000cb8c, 0x0000cb8c }, + { 0x00009ab0, 0x00000000, 0x00000000, 0x0000cb90, 0x0000cb90, 0x0000cb90 }, + { 0x00009ab4, 0x00000000, 0x00000000, 0x0000cf18, 0x0000cf18, 0x0000cf18 }, + { 0x00009ab8, 0x00000000, 0x00000000, 0x0000cf24, 0x0000cf24, 0x0000cf24 }, + { 0x00009abc, 0x00000000, 0x00000000, 0x0000cf28, 0x0000cf28, 0x0000cf28 }, + { 0x00009ac0, 0x00000000, 0x00000000, 0x0000d314, 0x0000d314, 0x0000d314 }, + { 0x00009ac4, 0x00000000, 0x00000000, 0x0000d318, 0x0000d318, 0x0000d318 }, + { 0x00009ac8, 0x00000000, 0x00000000, 0x0000d38c, 0x0000d38c, 0x0000d38c }, + { 0x00009acc, 0x00000000, 0x00000000, 0x0000d390, 0x0000d390, 0x0000d390 }, + { 0x00009ad0, 0x00000000, 0x00000000, 0x0000d394, 0x0000d394, 0x0000d394 }, + { 0x00009ad4, 0x00000000, 0x00000000, 0x0000d398, 0x0000d398, 0x0000d398 }, + { 0x00009ad8, 0x00000000, 0x00000000, 0x0000d3a4, 0x0000d3a4, 0x0000d3a4 }, + { 0x00009adc, 0x00000000, 0x00000000, 0x0000d3a8, 0x0000d3a8, 0x0000d3a8 }, + { 0x00009ae0, 0x00000000, 0x00000000, 0x0000d3ac, 0x0000d3ac, 0x0000d3ac }, + { 0x00009ae4, 0x00000000, 0x00000000, 0x0000d3b0, 0x0000d3b0, 0x0000d3b0 }, + { 0x00009ae8, 0x00000000, 0x00000000, 0x0000f380, 0x0000f380, 0x0000f380 }, + { 0x00009aec, 0x00000000, 0x00000000, 0x0000f384, 0x0000f384, 0x0000f384 }, + { 0x00009af0, 0x00000000, 0x00000000, 0x0000f388, 0x0000f388, 0x0000f388 }, + { 0x00009af4, 0x00000000, 0x00000000, 0x0000f710, 0x0000f710, 0x0000f710 }, + { 0x00009af8, 0x00000000, 0x00000000, 0x0000f714, 0x0000f714, 0x0000f714 }, + { 0x00009afc, 0x00000000, 0x00000000, 0x0000f718, 0x0000f718, 0x0000f718 }, + { 0x00009b00, 0x00000000, 0x00000000, 0x0000fb10, 0x0000fb10, 0x0000fb10 }, + { 0x00009b04, 0x00000000, 0x00000000, 0x0000fb14, 0x0000fb14, 0x0000fb14 }, + { 0x00009b08, 0x00000000, 0x00000000, 0x0000fb18, 0x0000fb18, 0x0000fb18 }, + { 0x00009b0c, 0x00000000, 0x00000000, 0x0000fb8c, 0x0000fb8c, 0x0000fb8c }, + { 0x00009b10, 0x00000000, 0x00000000, 0x0000fb90, 0x0000fb90, 0x0000fb90 }, + { 0x00009b14, 0x00000000, 0x00000000, 0x0000fb94, 0x0000fb94, 0x0000fb94 }, + { 0x00009b18, 0x00000000, 0x00000000, 0x0000ff8c, 0x0000ff8c, 0x0000ff8c }, + { 0x00009b1c, 0x00000000, 0x00000000, 0x0000ff90, 0x0000ff90, 0x0000ff90 }, + { 0x00009b20, 0x00000000, 0x00000000, 0x0000ff94, 0x0000ff94, 0x0000ff94 }, + { 0x00009b24, 0x00000000, 0x00000000, 0x0000ffa0, 0x0000ffa0, 0x0000ffa0 }, + { 0x00009b28, 0x00000000, 0x00000000, 0x0000ffa4, 0x0000ffa4, 0x0000ffa4 }, + { 0x00009b2c, 0x00000000, 0x00000000, 0x0000ffa8, 0x0000ffa8, 0x0000ffa8 }, + { 0x00009b30, 0x00000000, 0x00000000, 0x0000ffac, 0x0000ffac, 0x0000ffac }, + { 0x00009b34, 0x00000000, 0x00000000, 0x0000ffb0, 0x0000ffb0, 0x0000ffb0 }, + { 0x00009b38, 0x00000000, 0x00000000, 0x0000ffb4, 0x0000ffb4, 0x0000ffb4 }, + { 0x00009b3c, 0x00000000, 0x00000000, 0x0000ffa1, 0x0000ffa1, 0x0000ffa1 }, + { 0x00009b40, 0x00000000, 0x00000000, 0x0000ffa5, 0x0000ffa5, 0x0000ffa5 }, + { 0x00009b44, 0x00000000, 0x00000000, 0x0000ffa9, 0x0000ffa9, 0x0000ffa9 }, + { 0x00009b48, 0x00000000, 0x00000000, 0x0000ffad, 0x0000ffad, 0x0000ffad }, + { 0x00009b4c, 0x00000000, 0x00000000, 0x0000ffb1, 0x0000ffb1, 0x0000ffb1 }, + { 0x00009b50, 0x00000000, 0x00000000, 0x0000ffb5, 0x0000ffb5, 0x0000ffb5 }, + { 0x00009b54, 0x00000000, 0x00000000, 0x0000ffb9, 0x0000ffb9, 0x0000ffb9 }, + { 0x00009b58, 0x00000000, 0x00000000, 0x0000ffc5, 0x0000ffc5, 0x0000ffc5 }, + { 0x00009b5c, 0x00000000, 0x00000000, 0x0000ffc9, 0x0000ffc9, 0x0000ffc9 }, + { 0x00009b60, 0x00000000, 0x00000000, 0x0000ffcd, 0x0000ffcd, 0x0000ffcd }, + { 0x00009b64, 0x00000000, 0x00000000, 0x0000ffd1, 0x0000ffd1, 0x0000ffd1 }, + { 0x00009b68, 0x00000000, 0x00000000, 0x0000ffd5, 0x0000ffd5, 0x0000ffd5 }, + { 0x00009b6c, 0x00000000, 0x00000000, 0x0000ffc2, 0x0000ffc2, 0x0000ffc2 }, + { 0x00009b70, 0x00000000, 0x00000000, 0x0000ffc6, 0x0000ffc6, 0x0000ffc6 }, + { 0x00009b74, 0x00000000, 0x00000000, 0x0000ffca, 0x0000ffca, 0x0000ffca }, + { 0x00009b78, 0x00000000, 0x00000000, 0x0000ffce, 0x0000ffce, 0x0000ffce }, + { 0x00009b7c, 0x00000000, 0x00000000, 0x0000ffd2, 0x0000ffd2, 0x0000ffd2 }, + { 0x00009b80, 0x00000000, 0x00000000, 0x0000ffd6, 0x0000ffd6, 0x0000ffd6 }, + { 0x00009b84, 0x00000000, 0x00000000, 0x0000ffda, 0x0000ffda, 0x0000ffda }, + { 0x00009b88, 0x00000000, 0x00000000, 0x0000ffc7, 0x0000ffc7, 0x0000ffc7 }, + { 0x00009b8c, 0x00000000, 0x00000000, 0x0000ffcb, 0x0000ffcb, 0x0000ffcb }, + { 0x00009b90, 0x00000000, 0x00000000, 0x0000ffcf, 0x0000ffcf, 0x0000ffcf }, + { 0x00009b94, 0x00000000, 0x00000000, 0x0000ffd3, 0x0000ffd3, 0x0000ffd3 }, + { 0x00009b98, 0x00000000, 0x00000000, 0x0000ffd7, 0x0000ffd7, 0x0000ffd7 }, + { 0x00009b9c, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009ba0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009ba4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009ba8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009bac, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009bb0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009bb4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009bb8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009bbc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009bc0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009bc4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009bc8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009bcc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009bd0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009bd4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009bd8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009bdc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009be0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009be4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009be8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009bec, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009bf0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009bf4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009bf8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009bfc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000aa00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120, 0x0000a120 }, + { 0x0000aa04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124, 0x0000a124 }, + { 0x0000aa08, 0x00000000, 0x00000000, 0x0000a128, 0x0000a128, 0x0000a128 }, + { 0x0000aa0c, 0x00000000, 0x00000000, 0x0000a12c, 0x0000a12c, 0x0000a12c }, + { 0x0000aa10, 0x00000000, 0x00000000, 0x0000a130, 0x0000a130, 0x0000a130 }, + { 0x0000aa14, 0x00000000, 0x00000000, 0x0000a194, 0x0000a194, 0x0000a194 }, + { 0x0000aa18, 0x00000000, 0x00000000, 0x0000a198, 0x0000a198, 0x0000a198 }, + { 0x0000aa1c, 0x00000000, 0x00000000, 0x0000a20c, 0x0000a20c, 0x0000a20c }, + { 0x0000aa20, 0x00000000, 0x00000000, 0x0000a210, 0x0000a210, 0x0000a210 }, + { 0x0000aa24, 0x00000000, 0x00000000, 0x0000a284, 0x0000a284, 0x0000a284 }, + { 0x0000aa28, 0x00000000, 0x00000000, 0x0000a288, 0x0000a288, 0x0000a288 }, + { 0x0000aa2c, 0x00000000, 0x00000000, 0x0000a28c, 0x0000a28c, 0x0000a28c }, + { 0x0000aa30, 0x00000000, 0x00000000, 0x0000a290, 0x0000a290, 0x0000a290 }, + { 0x0000aa34, 0x00000000, 0x00000000, 0x0000a294, 0x0000a294, 0x0000a294 }, + { 0x0000aa38, 0x00000000, 0x00000000, 0x0000a2a0, 0x0000a2a0, 0x0000a2a0 }, + { 0x0000aa3c, 0x00000000, 0x00000000, 0x0000a2a4, 0x0000a2a4, 0x0000a2a4 }, + { 0x0000aa40, 0x00000000, 0x00000000, 0x0000a2a8, 0x0000a2a8, 0x0000a2a8 }, + { 0x0000aa44, 0x00000000, 0x00000000, 0x0000a2ac, 0x0000a2ac, 0x0000a2ac }, + { 0x0000aa48, 0x00000000, 0x00000000, 0x0000a2b0, 0x0000a2b0, 0x0000a2b0 }, + { 0x0000aa4c, 0x00000000, 0x00000000, 0x0000a2b4, 0x0000a2b4, 0x0000a2b4 }, + { 0x0000aa50, 0x00000000, 0x00000000, 0x0000a2b8, 0x0000a2b8, 0x0000a2b8 }, + { 0x0000aa54, 0x00000000, 0x00000000, 0x0000a2c4, 0x0000a2c4, 0x0000a2c4 }, + { 0x0000aa58, 0x00000000, 0x00000000, 0x0000a708, 0x0000a708, 0x0000a708 }, + { 0x0000aa5c, 0x00000000, 0x00000000, 0x0000a70c, 0x0000a70c, 0x0000a70c }, + { 0x0000aa60, 0x00000000, 0x00000000, 0x0000a710, 0x0000a710, 0x0000a710 }, + { 0x0000aa64, 0x00000000, 0x00000000, 0x0000ab04, 0x0000ab04, 0x0000ab04 }, + { 0x0000aa68, 0x00000000, 0x00000000, 0x0000ab08, 0x0000ab08, 0x0000ab08 }, + { 0x0000aa6c, 0x00000000, 0x00000000, 0x0000ab0c, 0x0000ab0c, 0x0000ab0c }, + { 0x0000aa70, 0x00000000, 0x00000000, 0x0000ab10, 0x0000ab10, 0x0000ab10 }, + { 0x0000aa74, 0x00000000, 0x00000000, 0x0000ab14, 0x0000ab14, 0x0000ab14 }, + { 0x0000aa78, 0x00000000, 0x00000000, 0x0000ab18, 0x0000ab18, 0x0000ab18 }, + { 0x0000aa7c, 0x00000000, 0x00000000, 0x0000ab8c, 0x0000ab8c, 0x0000ab8c }, + { 0x0000aa80, 0x00000000, 0x00000000, 0x0000ab90, 0x0000ab90, 0x0000ab90 }, + { 0x0000aa84, 0x00000000, 0x00000000, 0x0000ab94, 0x0000ab94, 0x0000ab94 }, + { 0x0000aa88, 0x00000000, 0x00000000, 0x0000ab98, 0x0000ab98, 0x0000ab98 }, + { 0x0000aa8c, 0x00000000, 0x00000000, 0x0000aba4, 0x0000aba4, 0x0000aba4 }, + { 0x0000aa90, 0x00000000, 0x00000000, 0x0000aba8, 0x0000aba8, 0x0000aba8 }, + { 0x0000aa94, 0x00000000, 0x00000000, 0x0000cb04, 0x0000cb04, 0x0000cb04 }, + { 0x0000aa98, 0x00000000, 0x00000000, 0x0000cb08, 0x0000cb08, 0x0000cb08 }, + { 0x0000aa9c, 0x00000000, 0x00000000, 0x0000cb0c, 0x0000cb0c, 0x0000cb0c }, + { 0x0000aaa0, 0x00000000, 0x00000000, 0x0000cb10, 0x0000cb10, 0x0000cb10 }, + { 0x0000aaa4, 0x00000000, 0x00000000, 0x0000cb14, 0x0000cb14, 0x0000cb14 }, + { 0x0000aaa8, 0x00000000, 0x00000000, 0x0000cb18, 0x0000cb18, 0x0000cb18 }, + { 0x0000aaac, 0x00000000, 0x00000000, 0x0000cb8c, 0x0000cb8c, 0x0000cb8c }, + { 0x0000aab0, 0x00000000, 0x00000000, 0x0000cb90, 0x0000cb90, 0x0000cb90 }, + { 0x0000aab4, 0x00000000, 0x00000000, 0x0000cf18, 0x0000cf18, 0x0000cf18 }, + { 0x0000aab8, 0x00000000, 0x00000000, 0x0000cf24, 0x0000cf24, 0x0000cf24 }, + { 0x0000aabc, 0x00000000, 0x00000000, 0x0000cf28, 0x0000cf28, 0x0000cf28 }, + { 0x0000aac0, 0x00000000, 0x00000000, 0x0000d314, 0x0000d314, 0x0000d314 }, + { 0x0000aac4, 0x00000000, 0x00000000, 0x0000d318, 0x0000d318, 0x0000d318 }, + { 0x0000aac8, 0x00000000, 0x00000000, 0x0000d38c, 0x0000d38c, 0x0000d38c }, + { 0x0000aacc, 0x00000000, 0x00000000, 0x0000d390, 0x0000d390, 0x0000d390 }, + { 0x0000aad0, 0x00000000, 0x00000000, 0x0000d394, 0x0000d394, 0x0000d394 }, + { 0x0000aad4, 0x00000000, 0x00000000, 0x0000d398, 0x0000d398, 0x0000d398 }, + { 0x0000aad8, 0x00000000, 0x00000000, 0x0000d3a4, 0x0000d3a4, 0x0000d3a4 }, + { 0x0000aadc, 0x00000000, 0x00000000, 0x0000d3a8, 0x0000d3a8, 0x0000d3a8 }, + { 0x0000aae0, 0x00000000, 0x00000000, 0x0000d3ac, 0x0000d3ac, 0x0000d3ac }, + { 0x0000aae4, 0x00000000, 0x00000000, 0x0000d3b0, 0x0000d3b0, 0x0000d3b0 }, + { 0x0000aae8, 0x00000000, 0x00000000, 0x0000f380, 0x0000f380, 0x0000f380 }, + { 0x0000aaec, 0x00000000, 0x00000000, 0x0000f384, 0x0000f384, 0x0000f384 }, + { 0x0000aaf0, 0x00000000, 0x00000000, 0x0000f388, 0x0000f388, 0x0000f388 }, + { 0x0000aaf4, 0x00000000, 0x00000000, 0x0000f710, 0x0000f710, 0x0000f710 }, + { 0x0000aaf8, 0x00000000, 0x00000000, 0x0000f714, 0x0000f714, 0x0000f714 }, + { 0x0000aafc, 0x00000000, 0x00000000, 0x0000f718, 0x0000f718, 0x0000f718 }, + { 0x0000ab00, 0x00000000, 0x00000000, 0x0000fb10, 0x0000fb10, 0x0000fb10 }, + { 0x0000ab04, 0x00000000, 0x00000000, 0x0000fb14, 0x0000fb14, 0x0000fb14 }, + { 0x0000ab08, 0x00000000, 0x00000000, 0x0000fb18, 0x0000fb18, 0x0000fb18 }, + { 0x0000ab0c, 0x00000000, 0x00000000, 0x0000fb8c, 0x0000fb8c, 0x0000fb8c }, + { 0x0000ab10, 0x00000000, 0x00000000, 0x0000fb90, 0x0000fb90, 0x0000fb90 }, + { 0x0000ab14, 0x00000000, 0x00000000, 0x0000fb94, 0x0000fb94, 0x0000fb94 }, + { 0x0000ab18, 0x00000000, 0x00000000, 0x0000ff8c, 0x0000ff8c, 0x0000ff8c }, + { 0x0000ab1c, 0x00000000, 0x00000000, 0x0000ff90, 0x0000ff90, 0x0000ff90 }, + { 0x0000ab20, 0x00000000, 0x00000000, 0x0000ff94, 0x0000ff94, 0x0000ff94 }, + { 0x0000ab24, 0x00000000, 0x00000000, 0x0000ffa0, 0x0000ffa0, 0x0000ffa0 }, + { 0x0000ab28, 0x00000000, 0x00000000, 0x0000ffa4, 0x0000ffa4, 0x0000ffa4 }, + { 0x0000ab2c, 0x00000000, 0x00000000, 0x0000ffa8, 0x0000ffa8, 0x0000ffa8 }, + { 0x0000ab30, 0x00000000, 0x00000000, 0x0000ffac, 0x0000ffac, 0x0000ffac }, + { 0x0000ab34, 0x00000000, 0x00000000, 0x0000ffb0, 0x0000ffb0, 0x0000ffb0 }, + { 0x0000ab38, 0x00000000, 0x00000000, 0x0000ffb4, 0x0000ffb4, 0x0000ffb4 }, + { 0x0000ab3c, 0x00000000, 0x00000000, 0x0000ffa1, 0x0000ffa1, 0x0000ffa1 }, + { 0x0000ab40, 0x00000000, 0x00000000, 0x0000ffa5, 0x0000ffa5, 0x0000ffa5 }, + { 0x0000ab44, 0x00000000, 0x00000000, 0x0000ffa9, 0x0000ffa9, 0x0000ffa9 }, + { 0x0000ab48, 0x00000000, 0x00000000, 0x0000ffad, 0x0000ffad, 0x0000ffad }, + { 0x0000ab4c, 0x00000000, 0x00000000, 0x0000ffb1, 0x0000ffb1, 0x0000ffb1 }, + { 0x0000ab50, 0x00000000, 0x00000000, 0x0000ffb5, 0x0000ffb5, 0x0000ffb5 }, + { 0x0000ab54, 0x00000000, 0x00000000, 0x0000ffb9, 0x0000ffb9, 0x0000ffb9 }, + { 0x0000ab58, 0x00000000, 0x00000000, 0x0000ffc5, 0x0000ffc5, 0x0000ffc5 }, + { 0x0000ab5c, 0x00000000, 0x00000000, 0x0000ffc9, 0x0000ffc9, 0x0000ffc9 }, + { 0x0000ab60, 0x00000000, 0x00000000, 0x0000ffcd, 0x0000ffcd, 0x0000ffcd }, + { 0x0000ab64, 0x00000000, 0x00000000, 0x0000ffd1, 0x0000ffd1, 0x0000ffd1 }, + { 0x0000ab68, 0x00000000, 0x00000000, 0x0000ffd5, 0x0000ffd5, 0x0000ffd5 }, + { 0x0000ab6c, 0x00000000, 0x00000000, 0x0000ffc2, 0x0000ffc2, 0x0000ffc2 }, + { 0x0000ab70, 0x00000000, 0x00000000, 0x0000ffc6, 0x0000ffc6, 0x0000ffc6 }, + { 0x0000ab74, 0x00000000, 0x00000000, 0x0000ffca, 0x0000ffca, 0x0000ffca }, + { 0x0000ab78, 0x00000000, 0x00000000, 0x0000ffce, 0x0000ffce, 0x0000ffce }, + { 0x0000ab7c, 0x00000000, 0x00000000, 0x0000ffd2, 0x0000ffd2, 0x0000ffd2 }, + { 0x0000ab80, 0x00000000, 0x00000000, 0x0000ffd6, 0x0000ffd6, 0x0000ffd6 }, + { 0x0000ab84, 0x00000000, 0x00000000, 0x0000ffda, 0x0000ffda, 0x0000ffda }, + { 0x0000ab88, 0x00000000, 0x00000000, 0x0000ffc7, 0x0000ffc7, 0x0000ffc7 }, + { 0x0000ab8c, 0x00000000, 0x00000000, 0x0000ffcb, 0x0000ffcb, 0x0000ffcb }, + { 0x0000ab90, 0x00000000, 0x00000000, 0x0000ffcf, 0x0000ffcf, 0x0000ffcf }, + { 0x0000ab94, 0x00000000, 0x00000000, 0x0000ffd3, 0x0000ffd3, 0x0000ffd3 }, + { 0x0000ab98, 0x00000000, 0x00000000, 0x0000ffd7, 0x0000ffd7, 0x0000ffd7 }, + { 0x0000ab9c, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000aba0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000aba4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000aba8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abac, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abb0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abb4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abb8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abbc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abc0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abc4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abc8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abcc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abd0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abd4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abd8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abdc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abe0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abe4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abe8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abec, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abf0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abf4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abf8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abfc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009848, 0x00000000, 0x00000000, 0x00001067, 0x00001067, 0x00001067 }, + { 0x0000a848, 0x00000000, 0x00000000, 0x00001067, 0x00001067, 0x00001067 }, +}; + +static const u_int32_t ar9287PciePhy_clkreq_always_on_L1_9287_1_0[][2] = { + {0x00004040, 0x9248fd00 }, + {0x00004040, 0x24924924 }, + {0x00004040, 0xa8000019 }, + {0x00004040, 0x13160820 }, + {0x00004040, 0xe5980560 }, + {0x00004040, 0xc01dcffd }, + {0x00004040, 0x1aaabe41 }, + {0x00004040, 0xbe105554 }, + {0x00004040, 0x00043007 }, + {0x00004044, 0x00000000 }, +}; + +static const u_int32_t ar9287PciePhy_clkreq_off_L1_9287_1_0[][2] = { + {0x00004040, 0x9248fd00 }, + {0x00004040, 0x24924924 }, + {0x00004040, 0xa8000019 }, + {0x00004040, 0x13160820 }, + {0x00004040, 0xe5980560 }, + {0x00004040, 0xc01dcffc }, + {0x00004040, 0x1aaabe41 }, + {0x00004040, 0xbe105554 }, + {0x00004040, 0x00043007 }, + {0x00004044, 0x00000000 }, +}; + +/* AR9287 Revision 11 */ + +static const u_int32_t ar9287Modes_9287_1_1[][6] = { + /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ + { 0x00001030, 0x00000000, 0x00000000, 0x000002c0, 0x00000160, 0x000001e0 }, + { 0x00001070, 0x00000000, 0x00000000, 0x00000318, 0x0000018c, 0x000001e0 }, + { 0x000010b0, 0x00000000, 0x00000000, 0x00007c70, 0x00003e38, 0x00001180 }, + { 0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008 }, + { 0x00008014, 0x00000000, 0x00000000, 0x10801600, 0x08400b00, 0x06e006e0 }, + { 0x0000801c, 0x00000000, 0x00000000, 0x12e00057, 0x12e0002b, 0x0988004f }, + { 0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810 }, + { 0x000081d0, 0x00003200, 0x00003200, 0x0000320a, 0x0000320a, 0x0000320a }, + { 0x00008318, 0x00000000, 0x00000000, 0x00006880, 0x00003440, 0x00006880 }, + { 0x00009804, 0x00000000, 0x00000000, 0x000003c4, 0x00000300, 0x00000303 }, + { 0x00009820, 0x00000000, 0x00000000, 0x02020200, 0x02020200, 0x02020200 }, + { 0x00009824, 0x00000000, 0x00000000, 0x01000e0e, 0x01000e0e, 0x01000e0e }, + { 0x00009828, 0x00000000, 0x00000000, 0x3a020001, 0x3a020001, 0x3a020001 }, + { 0x00009834, 0x00000000, 0x00000000, 0x00000e0e, 0x00000e0e, 0x00000e0e }, + { 0x00009838, 0x00000003, 0x00000003, 0x00000007, 0x00000007, 0x00000007 }, + { 0x00009840, 0x206a002e, 0x206a002e, 0x206a012e, 0x206a012e, 0x206a012e }, + { 0x00009844, 0x03720000, 0x03720000, 0x037216a0, 0x037216a0, 0x037216a0 }, + { 0x00009850, 0x60000000, 0x60000000, 0x6d4000e2, 0x6c4000e2, 0x6c4000e2 }, + { 0x00009858, 0x7c000d00, 0x7c000d00, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e }, + { 0x0000985c, 0x3100005e, 0x3100005e, 0x3139605e, 0x31395d5e, 0x31395d5e }, + { 0x00009860, 0x00058d00, 0x00058d00, 0x00058d20, 0x00058d20, 0x00058d18 }, + { 0x00009864, 0x00000e00, 0x00000e00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, + { 0x00009868, 0x000040c0, 0x000040c0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 }, + { 0x0000986c, 0x00000080, 0x00000080, 0x06903881, 0x06903881, 0x06903881 }, + { 0x00009914, 0x00000000, 0x00000000, 0x00001130, 0x00000898, 0x000007d0 }, + { 0x00009918, 0x00000000, 0x00000000, 0x00000016, 0x0000000b, 0x00000016 }, + { 0x00009924, 0xd00a8a01, 0xd00a8a01, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d }, + { 0x00009944, 0xefbc0000, 0xefbc0000, 0xefbc1010, 0xefbc1010, 0xefbc1010 }, + { 0x00009960, 0x00000000, 0x00000000, 0x00000010, 0x00000010, 0x00000010 }, + { 0x0000a960, 0x00000000, 0x00000000, 0x00000010, 0x00000010, 0x00000010 }, + { 0x00009964, 0x00000000, 0x00000000, 0x00000210, 0x00000210, 0x00000210 }, + { 0x0000c968, 0x00000200, 0x00000200, 0x000003ce, 0x000003ce, 0x000003ce }, + { 0x000099b8, 0x00000000, 0x00000000, 0x0000001c, 0x0000001c, 0x0000001c }, + { 0x000099bc, 0x00000000, 0x00000000, 0x00000c00, 0x00000c00, 0x00000c00 }, + { 0x000099c0, 0x00000000, 0x00000000, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 }, + { 0x0000a204, 0x00000440, 0x00000440, 0x00000444, 0x00000444, 0x00000444 }, + { 0x0000a20c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000b20c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a21c, 0x1803800a, 0x1803800a, 0x1883800a, 0x1883800a, 0x1883800a }, + { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, + { 0x0000a250, 0x00000000, 0x00000000, 0x0004a000, 0x0004a000, 0x0004a000 }, + { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e }, + { 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, +}; + +static const u_int32_t ar9287Common_9287_1_1[][2] = { + { 0x0000000c, 0x00000000 }, + { 0x00000030, 0x00020015 }, + { 0x00000034, 0x00000005 }, + { 0x00000040, 0x00000000 }, + { 0x00000044, 0x00000008 }, + { 0x00000048, 0x00000008 }, + { 0x0000004c, 0x00000010 }, + { 0x00000050, 0x00000000 }, + { 0x00000054, 0x0000001f }, + { 0x00000800, 0x00000000 }, + { 0x00000804, 0x00000000 }, + { 0x00000808, 0x00000000 }, + { 0x0000080c, 0x00000000 }, + { 0x00000810, 0x00000000 }, + { 0x00000814, 0x00000000 }, + { 0x00000818, 0x00000000 }, + { 0x0000081c, 0x00000000 }, + { 0x00000820, 0x00000000 }, + { 0x00000824, 0x00000000 }, + { 0x00001040, 0x002ffc0f }, + { 0x00001044, 0x002ffc0f }, + { 0x00001048, 0x002ffc0f }, + { 0x0000104c, 0x002ffc0f }, + { 0x00001050, 0x002ffc0f }, + { 0x00001054, 0x002ffc0f }, + { 0x00001058, 0x002ffc0f }, + { 0x0000105c, 0x002ffc0f }, + { 0x00001060, 0x002ffc0f }, + { 0x00001064, 0x002ffc0f }, + { 0x00001230, 0x00000000 }, + { 0x00001270, 0x00000000 }, + { 0x00001038, 0x00000000 }, + { 0x00001078, 0x00000000 }, + { 0x000010b8, 0x00000000 }, + { 0x000010f8, 0x00000000 }, + { 0x00001138, 0x00000000 }, + { 0x00001178, 0x00000000 }, + { 0x000011b8, 0x00000000 }, + { 0x000011f8, 0x00000000 }, + { 0x00001238, 0x00000000 }, + { 0x00001278, 0x00000000 }, + { 0x000012b8, 0x00000000 }, + { 0x000012f8, 0x00000000 }, + { 0x00001338, 0x00000000 }, + { 0x00001378, 0x00000000 }, + { 0x000013b8, 0x00000000 }, + { 0x000013f8, 0x00000000 }, + { 0x00001438, 0x00000000 }, + { 0x00001478, 0x00000000 }, + { 0x000014b8, 0x00000000 }, + { 0x000014f8, 0x00000000 }, + { 0x00001538, 0x00000000 }, + { 0x00001578, 0x00000000 }, + { 0x000015b8, 0x00000000 }, + { 0x000015f8, 0x00000000 }, + { 0x00001638, 0x00000000 }, + { 0x00001678, 0x00000000 }, + { 0x000016b8, 0x00000000 }, + { 0x000016f8, 0x00000000 }, + { 0x00001738, 0x00000000 }, + { 0x00001778, 0x00000000 }, + { 0x000017b8, 0x00000000 }, + { 0x000017f8, 0x00000000 }, + { 0x0000103c, 0x00000000 }, + { 0x0000107c, 0x00000000 }, + { 0x000010bc, 0x00000000 }, + { 0x000010fc, 0x00000000 }, + { 0x0000113c, 0x00000000 }, + { 0x0000117c, 0x00000000 }, + { 0x000011bc, 0x00000000 }, + { 0x000011fc, 0x00000000 }, + { 0x0000123c, 0x00000000 }, + { 0x0000127c, 0x00000000 }, + { 0x000012bc, 0x00000000 }, + { 0x000012fc, 0x00000000 }, + { 0x0000133c, 0x00000000 }, + { 0x0000137c, 0x00000000 }, + { 0x000013bc, 0x00000000 }, + { 0x000013fc, 0x00000000 }, + { 0x0000143c, 0x00000000 }, + { 0x0000147c, 0x00000000 }, + { 0x00004030, 0x00000002 }, + { 0x0000403c, 0x00000002 }, + { 0x00004024, 0x0000001f }, + { 0x00004060, 0x00000000 }, + { 0x00004064, 0x00000000 }, + { 0x00007010, 0x00000033 }, + { 0x00007020, 0x00000000 }, + { 0x00007034, 0x00000002 }, + { 0x00007038, 0x000004c2 }, + { 0x00008004, 0x00000000 }, + { 0x00008008, 0x00000000 }, + { 0x0000800c, 0x00000000 }, + { 0x00008018, 0x00000700 }, + { 0x00008020, 0x00000000 }, + { 0x00008038, 0x00000000 }, + { 0x0000803c, 0x00000000 }, + { 0x00008048, 0x40000000 }, + { 0x00008054, 0x00000000 }, + { 0x00008058, 0x00000000 }, + { 0x0000805c, 0x000fc78f }, + { 0x00008060, 0x0000000f }, + { 0x00008064, 0x00000000 }, + { 0x00008070, 0x00000000 }, + { 0x000080c0, 0x2a80001a }, + { 0x000080c4, 0x05dc01e0 }, + { 0x000080c8, 0x1f402710 }, + { 0x000080cc, 0x01f40000 }, + { 0x000080d0, 0x00001e00 }, + { 0x000080d4, 0x00000000 }, + { 0x000080d8, 0x00400000 }, + { 0x000080e0, 0xffffffff }, + { 0x000080e4, 0x0000ffff }, + { 0x000080e8, 0x003f3f3f }, + { 0x000080ec, 0x00000000 }, + { 0x000080f0, 0x00000000 }, + { 0x000080f4, 0x00000000 }, + { 0x000080f8, 0x00000000 }, + { 0x000080fc, 0x00020000 }, + { 0x00008100, 0x00020000 }, + { 0x00008104, 0x00000001 }, + { 0x00008108, 0x00000052 }, + { 0x0000810c, 0x00000000 }, + { 0x00008110, 0x00000168 }, + { 0x00008118, 0x000100aa }, + { 0x0000811c, 0x00003210 }, + { 0x00008124, 0x00000000 }, + { 0x00008128, 0x00000000 }, + { 0x0000812c, 0x00000000 }, + { 0x00008130, 0x00000000 }, + { 0x00008134, 0x00000000 }, + { 0x00008138, 0x00000000 }, + { 0x0000813c, 0x00000000 }, + { 0x00008144, 0xffffffff }, + { 0x00008168, 0x00000000 }, + { 0x0000816c, 0x00000000 }, + { 0x00008170, 0x18487320 }, + { 0x00008174, 0xfaa4fa50 }, + { 0x00008178, 0x00000100 }, + { 0x0000817c, 0x00000000 }, + { 0x000081c0, 0x00000000 }, + { 0x000081c4, 0x00000000 }, + { 0x000081d4, 0x00000000 }, + { 0x000081ec, 0x00000000 }, + { 0x000081f0, 0x00000000 }, + { 0x000081f4, 0x00000000 }, + { 0x000081f8, 0x00000000 }, + { 0x000081fc, 0x00000000 }, + { 0x00008200, 0x00000000 }, + { 0x00008204, 0x00000000 }, + { 0x00008208, 0x00000000 }, + { 0x0000820c, 0x00000000 }, + { 0x00008210, 0x00000000 }, + { 0x00008214, 0x00000000 }, + { 0x00008218, 0x00000000 }, + { 0x0000821c, 0x00000000 }, + { 0x00008220, 0x00000000 }, + { 0x00008224, 0x00000000 }, + { 0x00008228, 0x00000000 }, + { 0x0000822c, 0x00000000 }, + { 0x00008230, 0x00000000 }, + { 0x00008234, 0x00000000 }, + { 0x00008238, 0x00000000 }, + { 0x0000823c, 0x00000000 }, + { 0x00008240, 0x00100000 }, + { 0x00008244, 0x0010f400 }, + { 0x00008248, 0x00000100 }, + { 0x0000824c, 0x0001e800 }, + { 0x00008250, 0x00000000 }, + { 0x00008254, 0x00000000 }, + { 0x00008258, 0x00000000 }, + { 0x0000825c, 0x400000ff }, + { 0x00008260, 0x00080922 }, + { 0x00008264, 0x88a00010 }, + { 0x00008270, 0x00000000 }, + { 0x00008274, 0x40000000 }, + { 0x00008278, 0x003e4180 }, + { 0x0000827c, 0x00000000 }, + { 0x00008284, 0x0000002c }, + { 0x00008288, 0x0000002c }, + { 0x0000828c, 0x000000ff }, + { 0x00008294, 0x00000000 }, + { 0x00008298, 0x00000000 }, + { 0x0000829c, 0x00000000 }, + { 0x00008300, 0x00000040 }, + { 0x00008314, 0x00000000 }, + { 0x00008328, 0x00000000 }, + { 0x0000832c, 0x00000007 }, + { 0x00008330, 0x00000302 }, + { 0x00008334, 0x00000e00 }, + { 0x00008338, 0x00ff0000 }, + { 0x0000833c, 0x00000000 }, + { 0x00008340, 0x000107ff }, + { 0x00008344, 0x01c81043 }, + { 0x00008360, 0xffffffff }, + { 0x00008364, 0xffffffff }, + { 0x00008368, 0x00000000 }, + { 0x00008370, 0x00000000 }, + { 0x00008374, 0x000000ff }, + { 0x00008378, 0x00000000 }, + { 0x0000837c, 0x00000000 }, + { 0x00008380, 0xffffffff }, + { 0x00008384, 0xffffffff }, + { 0x00008390, 0x0fffffff }, + { 0x00008394, 0x0fffffff }, + { 0x00008398, 0x00000000 }, + { 0x0000839c, 0x00000000 }, + { 0x000083a0, 0x00000000 }, + { 0x00009808, 0x00000000 }, + { 0x0000980c, 0xafe68e30 }, + { 0x00009810, 0xfd14e000 }, + { 0x00009814, 0x9c0a9f6b }, + { 0x0000981c, 0x00000000 }, + { 0x0000982c, 0x0000a000 }, + { 0x00009830, 0x00000000 }, + { 0x0000983c, 0x00200400 }, + { 0x0000984c, 0x0040233c }, + { 0x0000a84c, 0x0040233c }, + { 0x00009854, 0x00000044 }, + { 0x00009900, 0x00000000 }, + { 0x00009904, 0x00000000 }, + { 0x00009908, 0x00000000 }, + { 0x0000990c, 0x00000000 }, + { 0x00009910, 0x10002310 }, + { 0x0000991c, 0x10000fff }, + { 0x00009920, 0x04900000 }, + { 0x0000a920, 0x04900000 }, + { 0x00009928, 0x00000001 }, + { 0x0000992c, 0x00000004 }, + { 0x00009930, 0x00000000 }, + { 0x0000a930, 0x00000000 }, + { 0x00009934, 0x1e1f2022 }, + { 0x00009938, 0x0a0b0c0d }, + { 0x0000993c, 0x00000000 }, + { 0x00009948, 0x9280c00a }, + { 0x0000994c, 0x00020028 }, + { 0x00009954, 0x5f3ca3de }, + { 0x00009958, 0x0108ecff }, + { 0x00009940, 0x14750604 }, + { 0x0000c95c, 0x004b6a8e }, + { 0x00009970, 0x990bb514 }, + { 0x00009974, 0x00000000 }, + { 0x00009978, 0x00000001 }, + { 0x0000997c, 0x00000000 }, + { 0x000099a0, 0x00000000 }, + { 0x000099a4, 0x00000001 }, + { 0x000099a8, 0x201fff00 }, + { 0x000099ac, 0x0c6f0000 }, + { 0x000099b0, 0x03051000 }, + { 0x000099b4, 0x00000820 }, + { 0x000099c4, 0x06336f77 }, + { 0x000099c8, 0x6af6532f }, + { 0x000099cc, 0x08f186c8 }, + { 0x000099d0, 0x00046384 }, + { 0x000099dc, 0x00000000 }, + { 0x000099e0, 0x00000000 }, + { 0x000099e4, 0xaaaaaaaa }, + { 0x000099e8, 0x3c466478 }, + { 0x000099ec, 0x0cc80caa }, + { 0x000099f0, 0x00000000 }, + { 0x000099fc, 0x00001042 }, + { 0x0000a208, 0x803e4788 }, + { 0x0000a210, 0x4080a333 }, + { 0x0000a214, 0x40206c10 }, + { 0x0000a218, 0x009c4060 }, + { 0x0000a220, 0x01834061 }, + { 0x0000a224, 0x00000400 }, + { 0x0000a228, 0x000003b5 }, + { 0x0000a22c, 0x233f7180 }, + { 0x0000a234, 0x20202020 }, + { 0x0000a238, 0x20202020 }, + { 0x0000a23c, 0x13c889af }, + { 0x0000a240, 0x38490a20 }, + { 0x0000a244, 0x00000000 }, + { 0x0000a248, 0xfffffffc }, + { 0x0000a24c, 0x00000000 }, + { 0x0000a254, 0x00000000 }, + { 0x0000a258, 0x0cdbd380 }, + { 0x0000a25c, 0x0f0f0f01 }, + { 0x0000a260, 0xdfa91f01 }, + { 0x0000a264, 0x00418a11 }, + { 0x0000b264, 0x00418a11 }, + { 0x0000a268, 0x00000000 }, + { 0x0000a26c, 0x0e79e5c6 }, + { 0x0000b26c, 0x0e79e5c6 }, + { 0x0000d270, 0x00820820 }, + { 0x0000a278, 0x1ce739ce }, + { 0x0000a27c, 0x050701ce }, + { 0x0000d35c, 0x07ffffef }, + { 0x0000d360, 0x0fffffe7 }, + { 0x0000d364, 0x17ffffe5 }, + { 0x0000d368, 0x1fffffe4 }, + { 0x0000d36c, 0x37ffffe3 }, + { 0x0000d370, 0x3fffffe3 }, + { 0x0000d374, 0x57ffffe3 }, + { 0x0000d378, 0x5fffffe2 }, + { 0x0000d37c, 0x7fffffe2 }, + { 0x0000d380, 0x7f3c7bba }, + { 0x0000d384, 0xf3307ff0 }, + { 0x0000a388, 0x0c000000 }, + { 0x0000a38c, 0x20202020 }, + { 0x0000a390, 0x20202020 }, + { 0x0000a394, 0x1ce739ce }, + { 0x0000a398, 0x000001ce }, + { 0x0000b398, 0x000001ce }, + { 0x0000a39c, 0x00000001 }, + { 0x0000a3c8, 0x00000246 }, + { 0x0000a3cc, 0x20202020 }, + { 0x0000a3d0, 0x20202020 }, + { 0x0000a3d4, 0x20202020 }, + { 0x0000a3dc, 0x1ce739ce }, + { 0x0000a3e0, 0x000001ce }, + { 0x0000a3e4, 0x00000000 }, + { 0x0000a3e8, 0x18c43433 }, + { 0x0000a3ec, 0x00f70081 }, + { 0x0000a3f0, 0x01036a1e }, + { 0x0000a3f4, 0x00000000 }, + { 0x0000b3f4, 0x00000000 }, + { 0x0000a7d8, 0x000003f1 }, + { 0x00007800, 0x00000800 }, + { 0x00007804, 0x6c35ffd2 }, + { 0x00007808, 0x6db6c000 }, + { 0x0000780c, 0x6db6cb30 }, + { 0x00007810, 0x6db6cb6c }, + { 0x00007814, 0x0501e200 }, + { 0x00007818, 0x0094128d }, + { 0x0000781c, 0x976ee392 }, + { 0x00007820, 0xf75ff6fc }, + { 0x00007824, 0x00040000 }, + { 0x00007828, 0xdb003012 }, + { 0x0000782c, 0x04924914 }, + { 0x00007830, 0x21084210 }, + { 0x00007834, 0x00140000 }, + { 0x00007838, 0x0e4548d8 }, + { 0x0000783c, 0x54214514 }, + { 0x00007840, 0x02025830 }, + { 0x00007844, 0x71c0d388 }, + { 0x00007848, 0x934934a8 }, + { 0x00007850, 0x00000000 }, + { 0x00007854, 0x00000800 }, + { 0x00007858, 0x6c35ffd2 }, + { 0x0000785c, 0x6db6c000 }, + { 0x00007860, 0x6db6cb30 }, + { 0x00007864, 0x6db6cb6c }, + { 0x00007868, 0x0501e200 }, + { 0x0000786c, 0x0094128d }, + { 0x00007870, 0x976ee392 }, + { 0x00007874, 0xf75ff6fc }, + { 0x00007878, 0x00040000 }, + { 0x0000787c, 0xdb003012 }, + { 0x00007880, 0x04924914 }, + { 0x00007884, 0x21084210 }, + { 0x00007888, 0x001b6db0 }, + { 0x0000788c, 0x00376b63 }, + { 0x00007890, 0x06db6db6 }, + { 0x00007894, 0x006d8000 }, + { 0x00007898, 0x48100000 }, + { 0x0000789c, 0x00000000 }, + { 0x000078a0, 0x08000000 }, + { 0x000078a4, 0x0007ffd8 }, + { 0x000078a8, 0x0007ffd8 }, + { 0x000078ac, 0x001c0020 }, + { 0x000078b0, 0x00060aeb }, + { 0x000078b4, 0x40008080 }, + { 0x000078b8, 0x2a850160 }, +}; + +/* + * For Japanese regulatory requirements, 2484 MHz requires the following three + * registers be programmed differently from the channel between 2412 and 2472 MHz. + */ +static const u_int32_t ar9287Common_normal_cck_fir_coeff_92871_1[][2] = { + { 0x0000a1f4, 0x00fffeff }, + { 0x0000a1f8, 0x00f5f9ff }, + { 0x0000a1fc, 0xb79f6427 }, +}; + +static const u_int32_t ar9287Common_japan_2484_cck_fir_coeff_92871_1[][2] = { + { 0x0000a1f4, 0x00000000 }, + { 0x0000a1f8, 0xefff0301 }, + { 0x0000a1fc, 0xca9228ee }, +}; + +static const u_int32_t ar9287Modes_tx_gain_9287_1_1[][6] = { + /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ + { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a304, 0x00000000, 0x00000000, 0x00004002, 0x00004002, 0x00004002 }, + { 0x0000a308, 0x00000000, 0x00000000, 0x00008004, 0x00008004, 0x00008004 }, + { 0x0000a30c, 0x00000000, 0x00000000, 0x0000c00a, 0x0000c00a, 0x0000c00a }, + { 0x0000a310, 0x00000000, 0x00000000, 0x0001000c, 0x0001000c, 0x0001000c }, + { 0x0000a314, 0x00000000, 0x00000000, 0x0001420b, 0x0001420b, 0x0001420b }, + { 0x0000a318, 0x00000000, 0x00000000, 0x0001824a, 0x0001824a, 0x0001824a }, + { 0x0000a31c, 0x00000000, 0x00000000, 0x0001c44a, 0x0001c44a, 0x0001c44a }, + { 0x0000a320, 0x00000000, 0x00000000, 0x0002064a, 0x0002064a, 0x0002064a }, + { 0x0000a324, 0x00000000, 0x00000000, 0x0002484a, 0x0002484a, 0x0002484a }, + { 0x0000a328, 0x00000000, 0x00000000, 0x00028a4a, 0x00028a4a, 0x00028a4a }, + { 0x0000a32c, 0x00000000, 0x00000000, 0x0002cc4a, 0x0002cc4a, 0x0002cc4a }, + { 0x0000a330, 0x00000000, 0x00000000, 0x00030e4a, 0x00030e4a, 0x00030e4a }, + { 0x0000a334, 0x00000000, 0x00000000, 0x00034e8a, 0x00034e8a, 0x00034e8a }, + { 0x0000a338, 0x00000000, 0x00000000, 0x00038e8c, 0x00038e8c, 0x00038e8c }, + { 0x0000a33c, 0x00000000, 0x00000000, 0x0003cecc, 0x0003cecc, 0x0003cecc }, + { 0x0000a340, 0x00000000, 0x00000000, 0x00040ed4, 0x00040ed4, 0x00040ed4 }, + { 0x0000a344, 0x00000000, 0x00000000, 0x00044edc, 0x00044edc, 0x00044edc }, + { 0x0000a348, 0x00000000, 0x00000000, 0x00048ede, 0x00048ede, 0x00048ede }, + { 0x0000a34c, 0x00000000, 0x00000000, 0x0004cf1e, 0x0004cf1e, 0x0004cf1e }, + { 0x0000a350, 0x00000000, 0x00000000, 0x00050f5e, 0x00050f5e, 0x00050f5e }, + { 0x0000a354, 0x00000000, 0x00000000, 0x00054f9e, 0x00054f9e, 0x00054f9e }, + { 0x0000a780, 0x00000000, 0x00000000, 0x00000062, 0x00000062, 0x00000062 }, + { 0x0000a784, 0x00000000, 0x00000000, 0x00004064, 0x00004064, 0x00004064 }, + { 0x0000a788, 0x00000000, 0x00000000, 0x000080a4, 0x000080a4, 0x000080a4 }, + { 0x0000a78c, 0x00000000, 0x00000000, 0x0000c0aa, 0x0000c0aa, 0x0000c0aa }, + { 0x0000a790, 0x00000000, 0x00000000, 0x000100ac, 0x000100ac, 0x000100ac }, + { 0x0000a794, 0x00000000, 0x00000000, 0x000140b4, 0x000140b4, 0x000140b4 }, + { 0x0000a798, 0x00000000, 0x00000000, 0x000180f4, 0x000180f4, 0x000180f4 }, + { 0x0000a79c, 0x00000000, 0x00000000, 0x0001c134, 0x0001c134, 0x0001c134 }, + { 0x0000a7a0, 0x00000000, 0x00000000, 0x00020174, 0x00020174, 0x00020174 }, + { 0x0000a7a4, 0x00000000, 0x00000000, 0x0002417c, 0x0002417c, 0x0002417c }, + { 0x0000a7a8, 0x00000000, 0x00000000, 0x0002817e, 0x0002817e, 0x0002817e }, + { 0x0000a7ac, 0x00000000, 0x00000000, 0x0002c1be, 0x0002c1be, 0x0002c1be }, + { 0x0000a7b0, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe }, + { 0x0000a7b4, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe }, + { 0x0000a7b8, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe }, + { 0x0000a7bc, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe }, + { 0x0000a7c0, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe }, + { 0x0000a7c4, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe }, + { 0x0000a7c8, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe }, + { 0x0000a7cc, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe }, + { 0x0000a7d0, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe }, + { 0x0000a7d4, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe }, + { 0x0000a274, 0x0a180000, 0x0a180000, 0x0a1aa000, 0x0a1aa000, 0x0a1aa000 }, +}; + +static const u_int32_t ar9287Modes_rx_gain_9287_1_1[][6] = { + /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ + { 0x00009a00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120, 0x0000a120 }, + { 0x00009a04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124, 0x0000a124 }, + { 0x00009a08, 0x00000000, 0x00000000, 0x0000a128, 0x0000a128, 0x0000a128 }, + { 0x00009a0c, 0x00000000, 0x00000000, 0x0000a12c, 0x0000a12c, 0x0000a12c }, + { 0x00009a10, 0x00000000, 0x00000000, 0x0000a130, 0x0000a130, 0x0000a130 }, + { 0x00009a14, 0x00000000, 0x00000000, 0x0000a194, 0x0000a194, 0x0000a194 }, + { 0x00009a18, 0x00000000, 0x00000000, 0x0000a198, 0x0000a198, 0x0000a198 }, + { 0x00009a1c, 0x00000000, 0x00000000, 0x0000a20c, 0x0000a20c, 0x0000a20c }, + { 0x00009a20, 0x00000000, 0x00000000, 0x0000a210, 0x0000a210, 0x0000a210 }, + { 0x00009a24, 0x00000000, 0x00000000, 0x0000a284, 0x0000a284, 0x0000a284 }, + { 0x00009a28, 0x00000000, 0x00000000, 0x0000a288, 0x0000a288, 0x0000a288 }, + { 0x00009a2c, 0x00000000, 0x00000000, 0x0000a28c, 0x0000a28c, 0x0000a28c }, + { 0x00009a30, 0x00000000, 0x00000000, 0x0000a290, 0x0000a290, 0x0000a290 }, + { 0x00009a34, 0x00000000, 0x00000000, 0x0000a294, 0x0000a294, 0x0000a294 }, + { 0x00009a38, 0x00000000, 0x00000000, 0x0000a2a0, 0x0000a2a0, 0x0000a2a0 }, + { 0x00009a3c, 0x00000000, 0x00000000, 0x0000a2a4, 0x0000a2a4, 0x0000a2a4 }, + { 0x00009a40, 0x00000000, 0x00000000, 0x0000a2a8, 0x0000a2a8, 0x0000a2a8 }, + { 0x00009a44, 0x00000000, 0x00000000, 0x0000a2ac, 0x0000a2ac, 0x0000a2ac }, + { 0x00009a48, 0x00000000, 0x00000000, 0x0000a2b0, 0x0000a2b0, 0x0000a2b0 }, + { 0x00009a4c, 0x00000000, 0x00000000, 0x0000a2b4, 0x0000a2b4, 0x0000a2b4 }, + { 0x00009a50, 0x00000000, 0x00000000, 0x0000a2b8, 0x0000a2b8, 0x0000a2b8 }, + { 0x00009a54, 0x00000000, 0x00000000, 0x0000a2c4, 0x0000a2c4, 0x0000a2c4 }, + { 0x00009a58, 0x00000000, 0x00000000, 0x0000a708, 0x0000a708, 0x0000a708 }, + { 0x00009a5c, 0x00000000, 0x00000000, 0x0000a70c, 0x0000a70c, 0x0000a70c }, + { 0x00009a60, 0x00000000, 0x00000000, 0x0000a710, 0x0000a710, 0x0000a710 }, + { 0x00009a64, 0x00000000, 0x00000000, 0x0000ab04, 0x0000ab04, 0x0000ab04 }, + { 0x00009a68, 0x00000000, 0x00000000, 0x0000ab08, 0x0000ab08, 0x0000ab08 }, + { 0x00009a6c, 0x00000000, 0x00000000, 0x0000ab0c, 0x0000ab0c, 0x0000ab0c }, + { 0x00009a70, 0x00000000, 0x00000000, 0x0000ab10, 0x0000ab10, 0x0000ab10 }, + { 0x00009a74, 0x00000000, 0x00000000, 0x0000ab14, 0x0000ab14, 0x0000ab14 }, + { 0x00009a78, 0x00000000, 0x00000000, 0x0000ab18, 0x0000ab18, 0x0000ab18 }, + { 0x00009a7c, 0x00000000, 0x00000000, 0x0000ab8c, 0x0000ab8c, 0x0000ab8c }, + { 0x00009a80, 0x00000000, 0x00000000, 0x0000ab90, 0x0000ab90, 0x0000ab90 }, + { 0x00009a84, 0x00000000, 0x00000000, 0x0000ab94, 0x0000ab94, 0x0000ab94 }, + { 0x00009a88, 0x00000000, 0x00000000, 0x0000ab98, 0x0000ab98, 0x0000ab98 }, + { 0x00009a8c, 0x00000000, 0x00000000, 0x0000aba4, 0x0000aba4, 0x0000aba4 }, + { 0x00009a90, 0x00000000, 0x00000000, 0x0000aba8, 0x0000aba8, 0x0000aba8 }, + { 0x00009a94, 0x00000000, 0x00000000, 0x0000cb04, 0x0000cb04, 0x0000cb04 }, + { 0x00009a98, 0x00000000, 0x00000000, 0x0000cb08, 0x0000cb08, 0x0000cb08 }, + { 0x00009a9c, 0x00000000, 0x00000000, 0x0000cb0c, 0x0000cb0c, 0x0000cb0c }, + { 0x00009aa0, 0x00000000, 0x00000000, 0x0000cb10, 0x0000cb10, 0x0000cb10 }, + { 0x00009aa4, 0x00000000, 0x00000000, 0x0000cb14, 0x0000cb14, 0x0000cb14 }, + { 0x00009aa8, 0x00000000, 0x00000000, 0x0000cb18, 0x0000cb18, 0x0000cb18 }, + { 0x00009aac, 0x00000000, 0x00000000, 0x0000cb8c, 0x0000cb8c, 0x0000cb8c }, + { 0x00009ab0, 0x00000000, 0x00000000, 0x0000cb90, 0x0000cb90, 0x0000cb90 }, + { 0x00009ab4, 0x00000000, 0x00000000, 0x0000cf18, 0x0000cf18, 0x0000cf18 }, + { 0x00009ab8, 0x00000000, 0x00000000, 0x0000cf24, 0x0000cf24, 0x0000cf24 }, + { 0x00009abc, 0x00000000, 0x00000000, 0x0000cf28, 0x0000cf28, 0x0000cf28 }, + { 0x00009ac0, 0x00000000, 0x00000000, 0x0000d314, 0x0000d314, 0x0000d314 }, + { 0x00009ac4, 0x00000000, 0x00000000, 0x0000d318, 0x0000d318, 0x0000d318 }, + { 0x00009ac8, 0x00000000, 0x00000000, 0x0000d38c, 0x0000d38c, 0x0000d38c }, + { 0x00009acc, 0x00000000, 0x00000000, 0x0000d390, 0x0000d390, 0x0000d390 }, + { 0x00009ad0, 0x00000000, 0x00000000, 0x0000d394, 0x0000d394, 0x0000d394 }, + { 0x00009ad4, 0x00000000, 0x00000000, 0x0000d398, 0x0000d398, 0x0000d398 }, + { 0x00009ad8, 0x00000000, 0x00000000, 0x0000d3a4, 0x0000d3a4, 0x0000d3a4 }, + { 0x00009adc, 0x00000000, 0x00000000, 0x0000d3a8, 0x0000d3a8, 0x0000d3a8 }, + { 0x00009ae0, 0x00000000, 0x00000000, 0x0000d3ac, 0x0000d3ac, 0x0000d3ac }, + { 0x00009ae4, 0x00000000, 0x00000000, 0x0000d3b0, 0x0000d3b0, 0x0000d3b0 }, + { 0x00009ae8, 0x00000000, 0x00000000, 0x0000f380, 0x0000f380, 0x0000f380 }, + { 0x00009aec, 0x00000000, 0x00000000, 0x0000f384, 0x0000f384, 0x0000f384 }, + { 0x00009af0, 0x00000000, 0x00000000, 0x0000f388, 0x0000f388, 0x0000f388 }, + { 0x00009af4, 0x00000000, 0x00000000, 0x0000f710, 0x0000f710, 0x0000f710 }, + { 0x00009af8, 0x00000000, 0x00000000, 0x0000f714, 0x0000f714, 0x0000f714 }, + { 0x00009afc, 0x00000000, 0x00000000, 0x0000f718, 0x0000f718, 0x0000f718 }, + { 0x00009b00, 0x00000000, 0x00000000, 0x0000fb10, 0x0000fb10, 0x0000fb10 }, + { 0x00009b04, 0x00000000, 0x00000000, 0x0000fb14, 0x0000fb14, 0x0000fb14 }, + { 0x00009b08, 0x00000000, 0x00000000, 0x0000fb18, 0x0000fb18, 0x0000fb18 }, + { 0x00009b0c, 0x00000000, 0x00000000, 0x0000fb8c, 0x0000fb8c, 0x0000fb8c }, + { 0x00009b10, 0x00000000, 0x00000000, 0x0000fb90, 0x0000fb90, 0x0000fb90 }, + { 0x00009b14, 0x00000000, 0x00000000, 0x0000fb94, 0x0000fb94, 0x0000fb94 }, + { 0x00009b18, 0x00000000, 0x00000000, 0x0000ff8c, 0x0000ff8c, 0x0000ff8c }, + { 0x00009b1c, 0x00000000, 0x00000000, 0x0000ff90, 0x0000ff90, 0x0000ff90 }, + { 0x00009b20, 0x00000000, 0x00000000, 0x0000ff94, 0x0000ff94, 0x0000ff94 }, + { 0x00009b24, 0x00000000, 0x00000000, 0x0000ffa0, 0x0000ffa0, 0x0000ffa0 }, + { 0x00009b28, 0x00000000, 0x00000000, 0x0000ffa4, 0x0000ffa4, 0x0000ffa4 }, + { 0x00009b2c, 0x00000000, 0x00000000, 0x0000ffa8, 0x0000ffa8, 0x0000ffa8 }, + { 0x00009b30, 0x00000000, 0x00000000, 0x0000ffac, 0x0000ffac, 0x0000ffac }, + { 0x00009b34, 0x00000000, 0x00000000, 0x0000ffb0, 0x0000ffb0, 0x0000ffb0 }, + { 0x00009b38, 0x00000000, 0x00000000, 0x0000ffb4, 0x0000ffb4, 0x0000ffb4 }, + { 0x00009b3c, 0x00000000, 0x00000000, 0x0000ffa1, 0x0000ffa1, 0x0000ffa1 }, + { 0x00009b40, 0x00000000, 0x00000000, 0x0000ffa5, 0x0000ffa5, 0x0000ffa5 }, + { 0x00009b44, 0x00000000, 0x00000000, 0x0000ffa9, 0x0000ffa9, 0x0000ffa9 }, + { 0x00009b48, 0x00000000, 0x00000000, 0x0000ffad, 0x0000ffad, 0x0000ffad }, + { 0x00009b4c, 0x00000000, 0x00000000, 0x0000ffb1, 0x0000ffb1, 0x0000ffb1 }, + { 0x00009b50, 0x00000000, 0x00000000, 0x0000ffb5, 0x0000ffb5, 0x0000ffb5 }, + { 0x00009b54, 0x00000000, 0x00000000, 0x0000ffb9, 0x0000ffb9, 0x0000ffb9 }, + { 0x00009b58, 0x00000000, 0x00000000, 0x0000ffc5, 0x0000ffc5, 0x0000ffc5 }, + { 0x00009b5c, 0x00000000, 0x00000000, 0x0000ffc9, 0x0000ffc9, 0x0000ffc9 }, + { 0x00009b60, 0x00000000, 0x00000000, 0x0000ffcd, 0x0000ffcd, 0x0000ffcd }, + { 0x00009b64, 0x00000000, 0x00000000, 0x0000ffd1, 0x0000ffd1, 0x0000ffd1 }, + { 0x00009b68, 0x00000000, 0x00000000, 0x0000ffd5, 0x0000ffd5, 0x0000ffd5 }, + { 0x00009b6c, 0x00000000, 0x00000000, 0x0000ffc2, 0x0000ffc2, 0x0000ffc2 }, + { 0x00009b70, 0x00000000, 0x00000000, 0x0000ffc6, 0x0000ffc6, 0x0000ffc6 }, + { 0x00009b74, 0x00000000, 0x00000000, 0x0000ffca, 0x0000ffca, 0x0000ffca }, + { 0x00009b78, 0x00000000, 0x00000000, 0x0000ffce, 0x0000ffce, 0x0000ffce }, + { 0x00009b7c, 0x00000000, 0x00000000, 0x0000ffd2, 0x0000ffd2, 0x0000ffd2 }, + { 0x00009b80, 0x00000000, 0x00000000, 0x0000ffd6, 0x0000ffd6, 0x0000ffd6 }, + { 0x00009b84, 0x00000000, 0x00000000, 0x0000ffda, 0x0000ffda, 0x0000ffda }, + { 0x00009b88, 0x00000000, 0x00000000, 0x0000ffc7, 0x0000ffc7, 0x0000ffc7 }, + { 0x00009b8c, 0x00000000, 0x00000000, 0x0000ffcb, 0x0000ffcb, 0x0000ffcb }, + { 0x00009b90, 0x00000000, 0x00000000, 0x0000ffcf, 0x0000ffcf, 0x0000ffcf }, + { 0x00009b94, 0x00000000, 0x00000000, 0x0000ffd3, 0x0000ffd3, 0x0000ffd3 }, + { 0x00009b98, 0x00000000, 0x00000000, 0x0000ffd7, 0x0000ffd7, 0x0000ffd7 }, + { 0x00009b9c, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009ba0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009ba4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009ba8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009bac, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009bb0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009bb4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009bb8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009bbc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009bc0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009bc4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009bc8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009bcc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009bd0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009bd4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009bd8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009bdc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009be0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009be4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009be8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009bec, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009bf0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009bf4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009bf8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009bfc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000aa00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120, 0x0000a120 }, + { 0x0000aa04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124, 0x0000a124 }, + { 0x0000aa08, 0x00000000, 0x00000000, 0x0000a128, 0x0000a128, 0x0000a128 }, + { 0x0000aa0c, 0x00000000, 0x00000000, 0x0000a12c, 0x0000a12c, 0x0000a12c }, + { 0x0000aa10, 0x00000000, 0x00000000, 0x0000a130, 0x0000a130, 0x0000a130 }, + { 0x0000aa14, 0x00000000, 0x00000000, 0x0000a194, 0x0000a194, 0x0000a194 }, + { 0x0000aa18, 0x00000000, 0x00000000, 0x0000a198, 0x0000a198, 0x0000a198 }, + { 0x0000aa1c, 0x00000000, 0x00000000, 0x0000a20c, 0x0000a20c, 0x0000a20c }, + { 0x0000aa20, 0x00000000, 0x00000000, 0x0000a210, 0x0000a210, 0x0000a210 }, + { 0x0000aa24, 0x00000000, 0x00000000, 0x0000a284, 0x0000a284, 0x0000a284 }, + { 0x0000aa28, 0x00000000, 0x00000000, 0x0000a288, 0x0000a288, 0x0000a288 }, + { 0x0000aa2c, 0x00000000, 0x00000000, 0x0000a28c, 0x0000a28c, 0x0000a28c }, + { 0x0000aa30, 0x00000000, 0x00000000, 0x0000a290, 0x0000a290, 0x0000a290 }, + { 0x0000aa34, 0x00000000, 0x00000000, 0x0000a294, 0x0000a294, 0x0000a294 }, + { 0x0000aa38, 0x00000000, 0x00000000, 0x0000a2a0, 0x0000a2a0, 0x0000a2a0 }, + { 0x0000aa3c, 0x00000000, 0x00000000, 0x0000a2a4, 0x0000a2a4, 0x0000a2a4 }, + { 0x0000aa40, 0x00000000, 0x00000000, 0x0000a2a8, 0x0000a2a8, 0x0000a2a8 }, + { 0x0000aa44, 0x00000000, 0x00000000, 0x0000a2ac, 0x0000a2ac, 0x0000a2ac }, + { 0x0000aa48, 0x00000000, 0x00000000, 0x0000a2b0, 0x0000a2b0, 0x0000a2b0 }, + { 0x0000aa4c, 0x00000000, 0x00000000, 0x0000a2b4, 0x0000a2b4, 0x0000a2b4 }, + { 0x0000aa50, 0x00000000, 0x00000000, 0x0000a2b8, 0x0000a2b8, 0x0000a2b8 }, + { 0x0000aa54, 0x00000000, 0x00000000, 0x0000a2c4, 0x0000a2c4, 0x0000a2c4 }, + { 0x0000aa58, 0x00000000, 0x00000000, 0x0000a708, 0x0000a708, 0x0000a708 }, + { 0x0000aa5c, 0x00000000, 0x00000000, 0x0000a70c, 0x0000a70c, 0x0000a70c }, + { 0x0000aa60, 0x00000000, 0x00000000, 0x0000a710, 0x0000a710, 0x0000a710 }, + { 0x0000aa64, 0x00000000, 0x00000000, 0x0000ab04, 0x0000ab04, 0x0000ab04 }, + { 0x0000aa68, 0x00000000, 0x00000000, 0x0000ab08, 0x0000ab08, 0x0000ab08 }, + { 0x0000aa6c, 0x00000000, 0x00000000, 0x0000ab0c, 0x0000ab0c, 0x0000ab0c }, + { 0x0000aa70, 0x00000000, 0x00000000, 0x0000ab10, 0x0000ab10, 0x0000ab10 }, + { 0x0000aa74, 0x00000000, 0x00000000, 0x0000ab14, 0x0000ab14, 0x0000ab14 }, + { 0x0000aa78, 0x00000000, 0x00000000, 0x0000ab18, 0x0000ab18, 0x0000ab18 }, + { 0x0000aa7c, 0x00000000, 0x00000000, 0x0000ab8c, 0x0000ab8c, 0x0000ab8c }, + { 0x0000aa80, 0x00000000, 0x00000000, 0x0000ab90, 0x0000ab90, 0x0000ab90 }, + { 0x0000aa84, 0x00000000, 0x00000000, 0x0000ab94, 0x0000ab94, 0x0000ab94 }, + { 0x0000aa88, 0x00000000, 0x00000000, 0x0000ab98, 0x0000ab98, 0x0000ab98 }, + { 0x0000aa8c, 0x00000000, 0x00000000, 0x0000aba4, 0x0000aba4, 0x0000aba4 }, + { 0x0000aa90, 0x00000000, 0x00000000, 0x0000aba8, 0x0000aba8, 0x0000aba8 }, + { 0x0000aa94, 0x00000000, 0x00000000, 0x0000cb04, 0x0000cb04, 0x0000cb04 }, + { 0x0000aa98, 0x00000000, 0x00000000, 0x0000cb08, 0x0000cb08, 0x0000cb08 }, + { 0x0000aa9c, 0x00000000, 0x00000000, 0x0000cb0c, 0x0000cb0c, 0x0000cb0c }, + { 0x0000aaa0, 0x00000000, 0x00000000, 0x0000cb10, 0x0000cb10, 0x0000cb10 }, + { 0x0000aaa4, 0x00000000, 0x00000000, 0x0000cb14, 0x0000cb14, 0x0000cb14 }, + { 0x0000aaa8, 0x00000000, 0x00000000, 0x0000cb18, 0x0000cb18, 0x0000cb18 }, + { 0x0000aaac, 0x00000000, 0x00000000, 0x0000cb8c, 0x0000cb8c, 0x0000cb8c }, + { 0x0000aab0, 0x00000000, 0x00000000, 0x0000cb90, 0x0000cb90, 0x0000cb90 }, + { 0x0000aab4, 0x00000000, 0x00000000, 0x0000cf18, 0x0000cf18, 0x0000cf18 }, + { 0x0000aab8, 0x00000000, 0x00000000, 0x0000cf24, 0x0000cf24, 0x0000cf24 }, + { 0x0000aabc, 0x00000000, 0x00000000, 0x0000cf28, 0x0000cf28, 0x0000cf28 }, + { 0x0000aac0, 0x00000000, 0x00000000, 0x0000d314, 0x0000d314, 0x0000d314 }, + { 0x0000aac4, 0x00000000, 0x00000000, 0x0000d318, 0x0000d318, 0x0000d318 }, + { 0x0000aac8, 0x00000000, 0x00000000, 0x0000d38c, 0x0000d38c, 0x0000d38c }, + { 0x0000aacc, 0x00000000, 0x00000000, 0x0000d390, 0x0000d390, 0x0000d390 }, + { 0x0000aad0, 0x00000000, 0x00000000, 0x0000d394, 0x0000d394, 0x0000d394 }, + { 0x0000aad4, 0x00000000, 0x00000000, 0x0000d398, 0x0000d398, 0x0000d398 }, + { 0x0000aad8, 0x00000000, 0x00000000, 0x0000d3a4, 0x0000d3a4, 0x0000d3a4 }, + { 0x0000aadc, 0x00000000, 0x00000000, 0x0000d3a8, 0x0000d3a8, 0x0000d3a8 }, + { 0x0000aae0, 0x00000000, 0x00000000, 0x0000d3ac, 0x0000d3ac, 0x0000d3ac }, + { 0x0000aae4, 0x00000000, 0x00000000, 0x0000d3b0, 0x0000d3b0, 0x0000d3b0 }, + { 0x0000aae8, 0x00000000, 0x00000000, 0x0000f380, 0x0000f380, 0x0000f380 }, + { 0x0000aaec, 0x00000000, 0x00000000, 0x0000f384, 0x0000f384, 0x0000f384 }, + { 0x0000aaf0, 0x00000000, 0x00000000, 0x0000f388, 0x0000f388, 0x0000f388 }, + { 0x0000aaf4, 0x00000000, 0x00000000, 0x0000f710, 0x0000f710, 0x0000f710 }, + { 0x0000aaf8, 0x00000000, 0x00000000, 0x0000f714, 0x0000f714, 0x0000f714 }, + { 0x0000aafc, 0x00000000, 0x00000000, 0x0000f718, 0x0000f718, 0x0000f718 }, + { 0x0000ab00, 0x00000000, 0x00000000, 0x0000fb10, 0x0000fb10, 0x0000fb10 }, + { 0x0000ab04, 0x00000000, 0x00000000, 0x0000fb14, 0x0000fb14, 0x0000fb14 }, + { 0x0000ab08, 0x00000000, 0x00000000, 0x0000fb18, 0x0000fb18, 0x0000fb18 }, + { 0x0000ab0c, 0x00000000, 0x00000000, 0x0000fb8c, 0x0000fb8c, 0x0000fb8c }, + { 0x0000ab10, 0x00000000, 0x00000000, 0x0000fb90, 0x0000fb90, 0x0000fb90 }, + { 0x0000ab14, 0x00000000, 0x00000000, 0x0000fb94, 0x0000fb94, 0x0000fb94 }, + { 0x0000ab18, 0x00000000, 0x00000000, 0x0000ff8c, 0x0000ff8c, 0x0000ff8c }, + { 0x0000ab1c, 0x00000000, 0x00000000, 0x0000ff90, 0x0000ff90, 0x0000ff90 }, + { 0x0000ab20, 0x00000000, 0x00000000, 0x0000ff94, 0x0000ff94, 0x0000ff94 }, + { 0x0000ab24, 0x00000000, 0x00000000, 0x0000ffa0, 0x0000ffa0, 0x0000ffa0 }, + { 0x0000ab28, 0x00000000, 0x00000000, 0x0000ffa4, 0x0000ffa4, 0x0000ffa4 }, + { 0x0000ab2c, 0x00000000, 0x00000000, 0x0000ffa8, 0x0000ffa8, 0x0000ffa8 }, + { 0x0000ab30, 0x00000000, 0x00000000, 0x0000ffac, 0x0000ffac, 0x0000ffac }, + { 0x0000ab34, 0x00000000, 0x00000000, 0x0000ffb0, 0x0000ffb0, 0x0000ffb0 }, + { 0x0000ab38, 0x00000000, 0x00000000, 0x0000ffb4, 0x0000ffb4, 0x0000ffb4 }, + { 0x0000ab3c, 0x00000000, 0x00000000, 0x0000ffa1, 0x0000ffa1, 0x0000ffa1 }, + { 0x0000ab40, 0x00000000, 0x00000000, 0x0000ffa5, 0x0000ffa5, 0x0000ffa5 }, + { 0x0000ab44, 0x00000000, 0x00000000, 0x0000ffa9, 0x0000ffa9, 0x0000ffa9 }, + { 0x0000ab48, 0x00000000, 0x00000000, 0x0000ffad, 0x0000ffad, 0x0000ffad }, + { 0x0000ab4c, 0x00000000, 0x00000000, 0x0000ffb1, 0x0000ffb1, 0x0000ffb1 }, + { 0x0000ab50, 0x00000000, 0x00000000, 0x0000ffb5, 0x0000ffb5, 0x0000ffb5 }, + { 0x0000ab54, 0x00000000, 0x00000000, 0x0000ffb9, 0x0000ffb9, 0x0000ffb9 }, + { 0x0000ab58, 0x00000000, 0x00000000, 0x0000ffc5, 0x0000ffc5, 0x0000ffc5 }, + { 0x0000ab5c, 0x00000000, 0x00000000, 0x0000ffc9, 0x0000ffc9, 0x0000ffc9 }, + { 0x0000ab60, 0x00000000, 0x00000000, 0x0000ffcd, 0x0000ffcd, 0x0000ffcd }, + { 0x0000ab64, 0x00000000, 0x00000000, 0x0000ffd1, 0x0000ffd1, 0x0000ffd1 }, + { 0x0000ab68, 0x00000000, 0x00000000, 0x0000ffd5, 0x0000ffd5, 0x0000ffd5 }, + { 0x0000ab6c, 0x00000000, 0x00000000, 0x0000ffc2, 0x0000ffc2, 0x0000ffc2 }, + { 0x0000ab70, 0x00000000, 0x00000000, 0x0000ffc6, 0x0000ffc6, 0x0000ffc6 }, + { 0x0000ab74, 0x00000000, 0x00000000, 0x0000ffca, 0x0000ffca, 0x0000ffca }, + { 0x0000ab78, 0x00000000, 0x00000000, 0x0000ffce, 0x0000ffce, 0x0000ffce }, + { 0x0000ab7c, 0x00000000, 0x00000000, 0x0000ffd2, 0x0000ffd2, 0x0000ffd2 }, + { 0x0000ab80, 0x00000000, 0x00000000, 0x0000ffd6, 0x0000ffd6, 0x0000ffd6 }, + { 0x0000ab84, 0x00000000, 0x00000000, 0x0000ffda, 0x0000ffda, 0x0000ffda }, + { 0x0000ab88, 0x00000000, 0x00000000, 0x0000ffc7, 0x0000ffc7, 0x0000ffc7 }, + { 0x0000ab8c, 0x00000000, 0x00000000, 0x0000ffcb, 0x0000ffcb, 0x0000ffcb }, + { 0x0000ab90, 0x00000000, 0x00000000, 0x0000ffcf, 0x0000ffcf, 0x0000ffcf }, + { 0x0000ab94, 0x00000000, 0x00000000, 0x0000ffd3, 0x0000ffd3, 0x0000ffd3 }, + { 0x0000ab98, 0x00000000, 0x00000000, 0x0000ffd7, 0x0000ffd7, 0x0000ffd7 }, + { 0x0000ab9c, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000aba0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000aba4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000aba8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abac, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abb0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abb4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abb8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abbc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abc0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abc4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abc8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abcc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abd0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abd4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abd8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abdc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abe0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abe4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abe8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abec, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abf0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abf4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abf8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x0000abfc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb }, + { 0x00009848, 0x00000000, 0x00000000, 0x00001067, 0x00001067, 0x00001067 }, + { 0x0000a848, 0x00000000, 0x00000000, 0x00001067, 0x00001067, 0x00001067 }, +}; + +static const u_int32_t ar9287PciePhy_clkreq_always_on_L1_9287_1_1[][2] = { + {0x00004040, 0x9248fd00 }, + {0x00004040, 0x24924924 }, + {0x00004040, 0xa8000019 }, + {0x00004040, 0x13160820 }, + {0x00004040, 0xe5980560 }, + {0x00004040, 0xc01dcffd }, + {0x00004040, 0x1aaabe41 }, + {0x00004040, 0xbe105554 }, + {0x00004040, 0x00043007 }, + {0x00004044, 0x00000000 }, +}; + +static const u_int32_t ar9287PciePhy_clkreq_off_L1_9287_1_1[][2] = { + {0x00004040, 0x9248fd00 }, + {0x00004040, 0x24924924 }, + {0x00004040, 0xa8000019 }, + {0x00004040, 0x13160820 }, + {0x00004040, 0xe5980560 }, + {0x00004040, 0xc01dcffc }, + {0x00004040, 0x1aaabe41 }, + {0x00004040, 0xbe105554 }, + {0x00004040, 0x00043007 }, + {0x00004044, 0x00000000 }, +}; + + +/* AR9271 initialization values automaticaly created: 06/04/09 */ +static const u_int32_t ar9271Modes_9271[][6] = { + { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, + { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, + { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 }, + { 0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008 }, + { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 }, + { 0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f }, + { 0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880 }, + { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 }, + { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 }, + { 0x00009824, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e }, + { 0x00009828, 0x3a020001, 0x3a020001, 0x3a020001, 0x3a020001, 0x3a020001 }, + { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, + { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 }, + { 0x00009840, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e }, + { 0x00009844, 0x0372161e, 0x0372161e, 0x03721620, 0x03721620, 0x037216a0 }, + { 0x00009848, 0x00001066, 0x00001066, 0x00001053, 0x00001053, 0x00001059 }, + { 0x0000a848, 0x00001066, 0x00001066, 0x00001053, 0x00001053, 0x00001059 }, + { 0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2 }, + { 0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e }, + { 0x0000985c, 0x3139605e, 0x3139605e, 0x3137605e, 0x3137605e, 0x3139605e }, + { 0x00009860, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18 }, + { 0x00009864, 0x0000fe00, 0x0000fe00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, + { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 }, + { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 }, + { 0x00009910, 0x30002310, 0x30002310, 0x30002310, 0x30002310, 0x30002310 }, + { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 }, + { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 }, + { 0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d, 0xd00a800d }, + { 0x00009944, 0xffbc1010, 0xffbc1010, 0xffbc1020, 0xffbc1020, 0xffbc1010 }, + { 0x00009960, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x00009964, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x000099b8, 0x0000421c, 0x0000421c, 0x0000421c, 0x0000421c, 0x0000421c }, + { 0x000099bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00, 0x00000c00 }, + { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 }, + { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 }, + { 0x000099c8, 0x6af6532f, 0x6af6532f, 0x6af6532f, 0x6af6532f, 0x6af6532f }, + { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 }, + { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 }, + { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x00009a00, 0x00000000, 0x00000000, 0x00058084, 0x00058084, 0x00000000 }, + { 0x00009a04, 0x00000000, 0x00000000, 0x00058088, 0x00058088, 0x00000000 }, + { 0x00009a08, 0x00000000, 0x00000000, 0x0005808c, 0x0005808c, 0x00000000 }, + { 0x00009a0c, 0x00000000, 0x00000000, 0x00058100, 0x00058100, 0x00000000 }, + { 0x00009a10, 0x00000000, 0x00000000, 0x00058104, 0x00058104, 0x00000000 }, + { 0x00009a14, 0x00000000, 0x00000000, 0x00058108, 0x00058108, 0x00000000 }, + { 0x00009a18, 0x00000000, 0x00000000, 0x0005810c, 0x0005810c, 0x00000000 }, + { 0x00009a1c, 0x00000000, 0x00000000, 0x00058110, 0x00058110, 0x00000000 }, + { 0x00009a20, 0x00000000, 0x00000000, 0x00058114, 0x00058114, 0x00000000 }, + { 0x00009a24, 0x00000000, 0x00000000, 0x00058180, 0x00058180, 0x00000000 }, + { 0x00009a28, 0x00000000, 0x00000000, 0x00058184, 0x00058184, 0x00000000 }, + { 0x00009a2c, 0x00000000, 0x00000000, 0x00058188, 0x00058188, 0x00000000 }, + { 0x00009a30, 0x00000000, 0x00000000, 0x0005818c, 0x0005818c, 0x00000000 }, + { 0x00009a34, 0x00000000, 0x00000000, 0x00058190, 0x00058190, 0x00000000 }, + { 0x00009a38, 0x00000000, 0x00000000, 0x00058194, 0x00058194, 0x00000000 }, + { 0x00009a3c, 0x00000000, 0x00000000, 0x000581a0, 0x000581a0, 0x00000000 }, + { 0x00009a40, 0x00000000, 0x00000000, 0x0005820c, 0x0005820c, 0x00000000 }, + { 0x00009a44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 }, + { 0x00009a48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 }, + { 0x00009a4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 }, + { 0x00009a50, 0x00000000, 0x00000000, 0x00058220, 0x00058220, 0x00000000 }, + { 0x00009a54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 }, + { 0x00009a58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 }, + { 0x00009a5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 }, + { 0x00009a60, 0x00000000, 0x00000000, 0x00058308, 0x00058308, 0x00000000 }, + { 0x00009a64, 0x00000000, 0x00000000, 0x0005830c, 0x0005830c, 0x00000000 }, + { 0x00009a68, 0x00000000, 0x00000000, 0x00058380, 0x00058380, 0x00000000 }, + { 0x00009a6c, 0x00000000, 0x00000000, 0x00058384, 0x00058384, 0x00000000 }, + { 0x00009a70, 0x00000000, 0x00000000, 0x00068700, 0x00068700, 0x00000000 }, + { 0x00009a74, 0x00000000, 0x00000000, 0x00068704, 0x00068704, 0x00000000 }, + { 0x00009a78, 0x00000000, 0x00000000, 0x00068708, 0x00068708, 0x00000000 }, + { 0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 }, + { 0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 }, + { 0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 }, + { 0x00009a88, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 }, + { 0x00009a8c, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 }, + { 0x00009a90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 }, + { 0x00009a94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 }, + { 0x00009a98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 }, + { 0x00009a9c, 0x00000000, 0x00000000, 0x00078b84, 0x00078b84, 0x00000000 }, + { 0x00009aa0, 0x00000000, 0x00000000, 0x00078b88, 0x00078b88, 0x00000000 }, + { 0x00009aa4, 0x00000000, 0x00000000, 0x00078b8c, 0x00078b8c, 0x00000000 }, + { 0x00009aa8, 0x00000000, 0x00000000, 0x00078b90, 0x00078b90, 0x00000000 }, + { 0x00009aac, 0x00000000, 0x00000000, 0x000caf80, 0x000caf80, 0x00000000 }, + { 0x00009ab0, 0x00000000, 0x00000000, 0x000caf84, 0x000caf84, 0x00000000 }, + { 0x00009ab4, 0x00000000, 0x00000000, 0x000caf88, 0x000caf88, 0x00000000 }, + { 0x00009ab8, 0x00000000, 0x00000000, 0x000caf8c, 0x000caf8c, 0x00000000 }, + { 0x00009abc, 0x00000000, 0x00000000, 0x000caf90, 0x000caf90, 0x00000000 }, + { 0x00009ac0, 0x00000000, 0x00000000, 0x000db30c, 0x000db30c, 0x00000000 }, + { 0x00009ac4, 0x00000000, 0x00000000, 0x000db310, 0x000db310, 0x00000000 }, + { 0x00009ac8, 0x00000000, 0x00000000, 0x000db384, 0x000db384, 0x00000000 }, + { 0x00009acc, 0x00000000, 0x00000000, 0x000db388, 0x000db388, 0x00000000 }, + { 0x00009ad0, 0x00000000, 0x00000000, 0x000db324, 0x000db324, 0x00000000 }, + { 0x00009ad4, 0x00000000, 0x00000000, 0x000eb704, 0x000eb704, 0x00000000 }, + { 0x00009ad8, 0x00000000, 0x00000000, 0x000eb6a4, 0x000eb6a4, 0x00000000 }, + { 0x00009adc, 0x00000000, 0x00000000, 0x000eb6a8, 0x000eb6a8, 0x00000000 }, + { 0x00009ae0, 0x00000000, 0x00000000, 0x000eb710, 0x000eb710, 0x00000000 }, + { 0x00009ae4, 0x00000000, 0x00000000, 0x000eb714, 0x000eb714, 0x00000000 }, + { 0x00009ae8, 0x00000000, 0x00000000, 0x000eb720, 0x000eb720, 0x00000000 }, + { 0x00009aec, 0x00000000, 0x00000000, 0x000eb724, 0x000eb724, 0x00000000 }, + { 0x00009af0, 0x00000000, 0x00000000, 0x000eb728, 0x000eb728, 0x00000000 }, + { 0x00009af4, 0x00000000, 0x00000000, 0x000eb72c, 0x000eb72c, 0x00000000 }, + { 0x00009af8, 0x00000000, 0x00000000, 0x000eb7a0, 0x000eb7a0, 0x00000000 }, + { 0x00009afc, 0x00000000, 0x00000000, 0x000eb7a4, 0x000eb7a4, 0x00000000 }, + { 0x00009b00, 0x00000000, 0x00000000, 0x000eb7a8, 0x000eb7a8, 0x00000000 }, + { 0x00009b04, 0x00000000, 0x00000000, 0x000eb7b0, 0x000eb7b0, 0x00000000 }, + { 0x00009b08, 0x00000000, 0x00000000, 0x000eb7b4, 0x000eb7b4, 0x00000000 }, + { 0x00009b0c, 0x00000000, 0x00000000, 0x000eb7b8, 0x000eb7b8, 0x00000000 }, + { 0x00009b10, 0x00000000, 0x00000000, 0x000eb7a5, 0x000eb7a5, 0x00000000 }, + { 0x00009b14, 0x00000000, 0x00000000, 0x000eb7a9, 0x000eb7a9, 0x00000000 }, + { 0x00009b18, 0x00000000, 0x00000000, 0x000eb7ad, 0x000eb7ad, 0x00000000 }, + { 0x00009b1c, 0x00000000, 0x00000000, 0x000eb7b1, 0x000eb7b1, 0x00000000 }, + { 0x00009b20, 0x00000000, 0x00000000, 0x000eb7b5, 0x000eb7b5, 0x00000000 }, + { 0x00009b24, 0x00000000, 0x00000000, 0x000eb7b9, 0x000eb7b9, 0x00000000 }, + { 0x00009b28, 0x00000000, 0x00000000, 0x000eb7c5, 0x000eb7c5, 0x00000000 }, + { 0x00009b2c, 0x00000000, 0x00000000, 0x000eb7c9, 0x000eb7c9, 0x00000000 }, + { 0x00009b30, 0x00000000, 0x00000000, 0x000eb7d1, 0x000eb7d1, 0x00000000 }, + { 0x00009b34, 0x00000000, 0x00000000, 0x000eb7d5, 0x000eb7d5, 0x00000000 }, + { 0x00009b38, 0x00000000, 0x00000000, 0x000eb7d9, 0x000eb7d9, 0x00000000 }, + { 0x00009b3c, 0x00000000, 0x00000000, 0x000eb7c6, 0x000eb7c6, 0x00000000 }, + { 0x00009b40, 0x00000000, 0x00000000, 0x000eb7ca, 0x000eb7ca, 0x00000000 }, + { 0x00009b44, 0x00000000, 0x00000000, 0x000eb7ce, 0x000eb7ce, 0x00000000 }, + { 0x00009b48, 0x00000000, 0x00000000, 0x000eb7d2, 0x000eb7d2, 0x00000000 }, + { 0x00009b4c, 0x00000000, 0x00000000, 0x000eb7d6, 0x000eb7d6, 0x00000000 }, + { 0x00009b50, 0x00000000, 0x00000000, 0x000eb7c3, 0x000eb7c3, 0x00000000 }, + { 0x00009b54, 0x00000000, 0x00000000, 0x000eb7c7, 0x000eb7c7, 0x00000000 }, + { 0x00009b58, 0x00000000, 0x00000000, 0x000eb7cb, 0x000eb7cb, 0x00000000 }, + { 0x00009b5c, 0x00000000, 0x00000000, 0x000eb7cf, 0x000eb7cf, 0x00000000 }, + { 0x00009b60, 0x00000000, 0x00000000, 0x000eb7d7, 0x000eb7d7, 0x00000000 }, + { 0x00009b64, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009b68, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009b6c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009b70, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009b74, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009b78, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009b7c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009b80, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009b84, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009b88, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009b8c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009b90, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009b94, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009b98, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009b9c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009ba0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009ba4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009ba8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009bac, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009bb0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009bb4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009bb8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009bbc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009bc0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009bc4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009bc8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009bcc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009bd0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009bd4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009bd8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009bdc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009be0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009be4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009be8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009bec, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009bf0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009bf4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009bf8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x00009bfc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000aa00, 0x00000000, 0x00000000, 0x00058084, 0x00058084, 0x00000000 }, + { 0x0000aa04, 0x00000000, 0x00000000, 0x00058088, 0x00058088, 0x00000000 }, + { 0x0000aa08, 0x00000000, 0x00000000, 0x0005808c, 0x0005808c, 0x00000000 }, + { 0x0000aa0c, 0x00000000, 0x00000000, 0x00058100, 0x00058100, 0x00000000 }, + { 0x0000aa10, 0x00000000, 0x00000000, 0x00058104, 0x00058104, 0x00000000 }, + { 0x0000aa14, 0x00000000, 0x00000000, 0x00058108, 0x00058108, 0x00000000 }, + { 0x0000aa18, 0x00000000, 0x00000000, 0x0005810c, 0x0005810c, 0x00000000 }, + { 0x0000aa1c, 0x00000000, 0x00000000, 0x00058110, 0x00058110, 0x00000000 }, + { 0x0000aa20, 0x00000000, 0x00000000, 0x00058114, 0x00058114, 0x00000000 }, + { 0x0000aa24, 0x00000000, 0x00000000, 0x00058180, 0x00058180, 0x00000000 }, + { 0x0000aa28, 0x00000000, 0x00000000, 0x00058184, 0x00058184, 0x00000000 }, + { 0x0000aa2c, 0x00000000, 0x00000000, 0x00058188, 0x00058188, 0x00000000 }, + { 0x0000aa30, 0x00000000, 0x00000000, 0x0005818c, 0x0005818c, 0x00000000 }, + { 0x0000aa34, 0x00000000, 0x00000000, 0x00058190, 0x00058190, 0x00000000 }, + { 0x0000aa38, 0x00000000, 0x00000000, 0x00058194, 0x00058194, 0x00000000 }, + { 0x0000aa3c, 0x00000000, 0x00000000, 0x000581a0, 0x000581a0, 0x00000000 }, + { 0x0000aa40, 0x00000000, 0x00000000, 0x0005820c, 0x0005820c, 0x00000000 }, + { 0x0000aa44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 }, + { 0x0000aa48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 }, + { 0x0000aa4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 }, + { 0x0000aa50, 0x00000000, 0x00000000, 0x00058220, 0x00058220, 0x00000000 }, + { 0x0000aa54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 }, + { 0x0000aa58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 }, + { 0x0000aa5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 }, + { 0x0000aa60, 0x00000000, 0x00000000, 0x00058308, 0x00058308, 0x00000000 }, + { 0x0000aa64, 0x00000000, 0x00000000, 0x0005830c, 0x0005830c, 0x00000000 }, + { 0x0000aa68, 0x00000000, 0x00000000, 0x00058380, 0x00058380, 0x00000000 }, + { 0x0000aa6c, 0x00000000, 0x00000000, 0x00058384, 0x00058384, 0x00000000 }, + { 0x0000aa70, 0x00000000, 0x00000000, 0x00068700, 0x00068700, 0x00000000 }, + { 0x0000aa74, 0x00000000, 0x00000000, 0x00068704, 0x00068704, 0x00000000 }, + { 0x0000aa78, 0x00000000, 0x00000000, 0x00068708, 0x00068708, 0x00000000 }, + { 0x0000aa7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 }, + { 0x0000aa80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 }, + { 0x0000aa84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 }, + { 0x0000aa88, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 }, + { 0x0000aa8c, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 }, + { 0x0000aa90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 }, + { 0x0000aa94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 }, + { 0x0000aa98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 }, + { 0x0000aa9c, 0x00000000, 0x00000000, 0x00078b84, 0x00078b84, 0x00000000 }, + { 0x0000aaa0, 0x00000000, 0x00000000, 0x00078b88, 0x00078b88, 0x00000000 }, + { 0x0000aaa4, 0x00000000, 0x00000000, 0x00078b8c, 0x00078b8c, 0x00000000 }, + { 0x0000aaa8, 0x00000000, 0x00000000, 0x00078b90, 0x00078b90, 0x00000000 }, + { 0x0000aaac, 0x00000000, 0x00000000, 0x000caf80, 0x000caf80, 0x00000000 }, + { 0x0000aab0, 0x00000000, 0x00000000, 0x000caf84, 0x000caf84, 0x00000000 }, + { 0x0000aab4, 0x00000000, 0x00000000, 0x000caf88, 0x000caf88, 0x00000000 }, + { 0x0000aab8, 0x00000000, 0x00000000, 0x000caf8c, 0x000caf8c, 0x00000000 }, + { 0x0000aabc, 0x00000000, 0x00000000, 0x000caf90, 0x000caf90, 0x00000000 }, + { 0x0000aac0, 0x00000000, 0x00000000, 0x000db30c, 0x000db30c, 0x00000000 }, + { 0x0000aac4, 0x00000000, 0x00000000, 0x000db310, 0x000db310, 0x00000000 }, + { 0x0000aac8, 0x00000000, 0x00000000, 0x000db384, 0x000db384, 0x00000000 }, + { 0x0000aacc, 0x00000000, 0x00000000, 0x000db388, 0x000db388, 0x00000000 }, + { 0x0000aad0, 0x00000000, 0x00000000, 0x000db324, 0x000db324, 0x00000000 }, + { 0x0000aad4, 0x00000000, 0x00000000, 0x000eb704, 0x000eb704, 0x00000000 }, + { 0x0000aad8, 0x00000000, 0x00000000, 0x000eb6a4, 0x000eb6a4, 0x00000000 }, + { 0x0000aadc, 0x00000000, 0x00000000, 0x000eb6a8, 0x000eb6a8, 0x00000000 }, + { 0x0000aae0, 0x00000000, 0x00000000, 0x000eb710, 0x000eb710, 0x00000000 }, + { 0x0000aae4, 0x00000000, 0x00000000, 0x000eb714, 0x000eb714, 0x00000000 }, + { 0x0000aae8, 0x00000000, 0x00000000, 0x000eb720, 0x000eb720, 0x00000000 }, + { 0x0000aaec, 0x00000000, 0x00000000, 0x000eb724, 0x000eb724, 0x00000000 }, + { 0x0000aaf0, 0x00000000, 0x00000000, 0x000eb728, 0x000eb728, 0x00000000 }, + { 0x0000aaf4, 0x00000000, 0x00000000, 0x000eb72c, 0x000eb72c, 0x00000000 }, + { 0x0000aaf8, 0x00000000, 0x00000000, 0x000eb7a0, 0x000eb7a0, 0x00000000 }, + { 0x0000aafc, 0x00000000, 0x00000000, 0x000eb7a4, 0x000eb7a4, 0x00000000 }, + { 0x0000ab00, 0x00000000, 0x00000000, 0x000eb7a8, 0x000eb7a8, 0x00000000 }, + { 0x0000ab04, 0x00000000, 0x00000000, 0x000eb7b0, 0x000eb7b0, 0x00000000 }, + { 0x0000ab08, 0x00000000, 0x00000000, 0x000eb7b4, 0x000eb7b4, 0x00000000 }, + { 0x0000ab0c, 0x00000000, 0x00000000, 0x000eb7b8, 0x000eb7b8, 0x00000000 }, + { 0x0000ab10, 0x00000000, 0x00000000, 0x000eb7a5, 0x000eb7a5, 0x00000000 }, + { 0x0000ab14, 0x00000000, 0x00000000, 0x000eb7a9, 0x000eb7a9, 0x00000000 }, + { 0x0000ab18, 0x00000000, 0x00000000, 0x000eb7ad, 0x000eb7ad, 0x00000000 }, + { 0x0000ab1c, 0x00000000, 0x00000000, 0x000eb7b1, 0x000eb7b1, 0x00000000 }, + { 0x0000ab20, 0x00000000, 0x00000000, 0x000eb7b5, 0x000eb7b5, 0x00000000 }, + { 0x0000ab24, 0x00000000, 0x00000000, 0x000eb7b9, 0x000eb7b9, 0x00000000 }, + { 0x0000ab28, 0x00000000, 0x00000000, 0x000eb7c5, 0x000eb7c5, 0x00000000 }, + { 0x0000ab2c, 0x00000000, 0x00000000, 0x000eb7c9, 0x000eb7c9, 0x00000000 }, + { 0x0000ab30, 0x00000000, 0x00000000, 0x000eb7d1, 0x000eb7d1, 0x00000000 }, + { 0x0000ab34, 0x00000000, 0x00000000, 0x000eb7d5, 0x000eb7d5, 0x00000000 }, + { 0x0000ab38, 0x00000000, 0x00000000, 0x000eb7d9, 0x000eb7d9, 0x00000000 }, + { 0x0000ab3c, 0x00000000, 0x00000000, 0x000eb7c6, 0x000eb7c6, 0x00000000 }, + { 0x0000ab40, 0x00000000, 0x00000000, 0x000eb7ca, 0x000eb7ca, 0x00000000 }, + { 0x0000ab44, 0x00000000, 0x00000000, 0x000eb7ce, 0x000eb7ce, 0x00000000 }, + { 0x0000ab48, 0x00000000, 0x00000000, 0x000eb7d2, 0x000eb7d2, 0x00000000 }, + { 0x0000ab4c, 0x00000000, 0x00000000, 0x000eb7d6, 0x000eb7d6, 0x00000000 }, + { 0x0000ab50, 0x00000000, 0x00000000, 0x000eb7c3, 0x000eb7c3, 0x00000000 }, + { 0x0000ab54, 0x00000000, 0x00000000, 0x000eb7c7, 0x000eb7c7, 0x00000000 }, + { 0x0000ab58, 0x00000000, 0x00000000, 0x000eb7cb, 0x000eb7cb, 0x00000000 }, + { 0x0000ab5c, 0x00000000, 0x00000000, 0x000eb7cf, 0x000eb7cf, 0x00000000 }, + { 0x0000ab60, 0x00000000, 0x00000000, 0x000eb7d7, 0x000eb7d7, 0x00000000 }, + { 0x0000ab64, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000ab68, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000ab6c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000ab70, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000ab74, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000ab78, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000ab7c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000ab80, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000ab84, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000ab88, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000ab8c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000ab90, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000ab94, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000ab98, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000ab9c, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000aba0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000aba4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000aba8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abac, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abb0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abb4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abb8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abbc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abc0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abc4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abc8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abcc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abd0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abd4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abd8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abdc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abe0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abe4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abe8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abec, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abf0, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abf4, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abf8, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000abfc, 0x00000000, 0x00000000, 0x000eb7db, 0x000eb7db, 0x00000000 }, + { 0x0000a204, 0x00000004, 0x00000004, 0x00000004, 0x00000004, 0x00000004 }, + { 0x0000a20c, 0x00000014, 0x00000014, 0x0001f000, 0x0001f000, 0x0001f000 }, + { 0x0000b20c, 0x00000014, 0x00000014, 0x0001f000, 0x0001f000, 0x0001f000 }, + { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a }, + { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, + { 0x0000a250, 0x0004f000, 0x0004f000, 0x0004a000, 0x0004a000, 0x0004a000 }, + { 0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a218652, 0x0a218652, 0x0a22a652 }, + { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 }, + { 0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000 }, + { 0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608, 0x00000000 }, + { 0x0000a310, 0x00000000, 0x00000000, 0x0001e610, 0x0001e610, 0x00000000 }, + { 0x0000a314, 0x00000000, 0x00000000, 0x0002d6d0, 0x0002d6d0, 0x00000000 }, + { 0x0000a318, 0x00000000, 0x00000000, 0x00039758, 0x00039758, 0x00000000 }, + { 0x0000a31c, 0x00000000, 0x00000000, 0x0003b759, 0x0003b759, 0x00000000 }, + { 0x0000a320, 0x00000000, 0x00000000, 0x0003d75a, 0x0003d75a, 0x00000000 }, + { 0x0000a324, 0x00000000, 0x00000000, 0x0004175c, 0x0004175c, 0x00000000 }, + { 0x0000a328, 0x00000000, 0x00000000, 0x0004575e, 0x0004575e, 0x00000000 }, + { 0x0000a32c, 0x00000000, 0x00000000, 0x0004979f, 0x0004979f, 0x00000000 }, + { 0x0000a330, 0x00000000, 0x00000000, 0x0004d7df, 0x0004d7df, 0x00000000 }, + { 0x0000a334, 0x000368de, 0x000368de, 0x000368de, 0x000368de, 0x00000000 }, + { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 }, + { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 }, + { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, + { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, + { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e }, +}; + +static const u_int32_t ar9271Common_9271[][2] = { + { 0x0000000c, 0x00000000 }, + { 0x00000030, 0x00020045 }, + { 0x00000034, 0x00000005 }, + { 0x00000040, 0x00000000 }, + { 0x00000044, 0x00000008 }, + { 0x00000048, 0x00000008 }, + { 0x0000004c, 0x00000010 }, + { 0x00000050, 0x00000000 }, + { 0x00000054, 0x0000001f }, + { 0x00000800, 0x00000000 }, + { 0x00000804, 0x00000000 }, + { 0x00000808, 0x00000000 }, + { 0x0000080c, 0x00000000 }, + { 0x00000810, 0x00000000 }, + { 0x00000814, 0x00000000 }, + { 0x00000818, 0x00000000 }, + { 0x0000081c, 0x00000000 }, + { 0x00000820, 0x00000000 }, + { 0x00000824, 0x00000000 }, + { 0x00001040, 0x002ffc0f }, + { 0x00001044, 0x002ffc0f }, + { 0x00001048, 0x002ffc0f }, + { 0x0000104c, 0x002ffc0f }, + { 0x00001050, 0x002ffc0f }, + { 0x00001054, 0x002ffc0f }, + { 0x00001058, 0x002ffc0f }, + { 0x0000105c, 0x002ffc0f }, + { 0x00001060, 0x002ffc0f }, + { 0x00001064, 0x002ffc0f }, + { 0x00001230, 0x00000000 }, + { 0x00001270, 0x00000000 }, + { 0x00001038, 0x00000000 }, + { 0x00001078, 0x00000000 }, + { 0x000010b8, 0x00000000 }, + { 0x000010f8, 0x00000000 }, + { 0x00001138, 0x00000000 }, + { 0x00001178, 0x00000000 }, + { 0x000011b8, 0x00000000 }, + { 0x000011f8, 0x00000000 }, + { 0x00001238, 0x00000000 }, + { 0x00001278, 0x00000000 }, + { 0x000012b8, 0x00000000 }, + { 0x000012f8, 0x00000000 }, + { 0x00001338, 0x00000000 }, + { 0x00001378, 0x00000000 }, + { 0x000013b8, 0x00000000 }, + { 0x000013f8, 0x00000000 }, + { 0x00001438, 0x00000000 }, + { 0x00001478, 0x00000000 }, + { 0x000014b8, 0x00000000 }, + { 0x000014f8, 0x00000000 }, + { 0x00001538, 0x00000000 }, + { 0x00001578, 0x00000000 }, + { 0x000015b8, 0x00000000 }, + { 0x000015f8, 0x00000000 }, + { 0x00001638, 0x00000000 }, + { 0x00001678, 0x00000000 }, + { 0x000016b8, 0x00000000 }, + { 0x000016f8, 0x00000000 }, + { 0x00001738, 0x00000000 }, + { 0x00001778, 0x00000000 }, + { 0x000017b8, 0x00000000 }, + { 0x000017f8, 0x00000000 }, + { 0x0000103c, 0x00000000 }, + { 0x0000107c, 0x00000000 }, + { 0x000010bc, 0x00000000 }, + { 0x000010fc, 0x00000000 }, + { 0x0000113c, 0x00000000 }, + { 0x0000117c, 0x00000000 }, + { 0x000011bc, 0x00000000 }, + { 0x000011fc, 0x00000000 }, + { 0x0000123c, 0x00000000 }, + { 0x0000127c, 0x00000000 }, + { 0x000012bc, 0x00000000 }, + { 0x000012fc, 0x00000000 }, + { 0x0000133c, 0x00000000 }, + { 0x0000137c, 0x00000000 }, + { 0x000013bc, 0x00000000 }, + { 0x000013fc, 0x00000000 }, + { 0x0000143c, 0x00000000 }, + { 0x0000147c, 0x00000000 }, + { 0x00004030, 0x00000002 }, + { 0x0000403c, 0x00000002 }, + { 0x00004024, 0x0000001f }, + { 0x00004060, 0x00000000 }, + { 0x00004064, 0x00000000 }, + { 0x00008004, 0x00000000 }, + { 0x00008008, 0x00000000 }, + { 0x0000800c, 0x00000000 }, + { 0x00008018, 0x00000700 }, + { 0x00008020, 0x00000000 }, + { 0x00008038, 0x00000000 }, + { 0x0000803c, 0x00000000 }, + { 0x00008048, 0x00000000 }, + { 0x00008054, 0x00000000 }, + { 0x00008058, 0x00000000 }, + { 0x0000805c, 0x000fc78f }, + { 0x00008060, 0x0000000f }, + { 0x00008064, 0x00000000 }, + { 0x00008070, 0x00000000 }, + { 0x000080b0, 0x00000000 }, + { 0x000080b4, 0x00000000 }, + { 0x000080b8, 0x00000000 }, + { 0x000080bc, 0x00000000 }, + { 0x000080c0, 0x2a80001a }, + { 0x000080c4, 0x05dc01e0 }, + { 0x000080c8, 0x1f402710 }, + { 0x000080cc, 0x01f40000 }, + { 0x000080d0, 0x00001e00 }, + { 0x000080d4, 0x00000000 }, + { 0x000080d8, 0x00400000 }, + { 0x000080e0, 0xffffffff }, + { 0x000080e4, 0x0000ffff }, + { 0x000080e8, 0x003f3f3f }, + { 0x000080ec, 0x00000000 }, + { 0x000080f0, 0x00000000 }, + { 0x000080f4, 0x00000000 }, + { 0x000080f8, 0x00000000 }, + { 0x000080fc, 0x00020000 }, + { 0x00008100, 0x00020000 }, + { 0x00008104, 0x00000001 }, + { 0x00008108, 0x00000052 }, + { 0x0000810c, 0x00000000 }, + { 0x00008110, 0x00000168 }, + { 0x00008118, 0x000100aa }, + { 0x0000811c, 0x00003210 }, + { 0x00008120, 0x08f04810 }, + { 0x00008124, 0x00000000 }, + { 0x00008128, 0x00000000 }, + { 0x0000812c, 0x00000000 }, + { 0x00008130, 0x00000000 }, + { 0x00008134, 0x00000000 }, + { 0x00008138, 0x00000000 }, + { 0x0000813c, 0x00000000 }, + { 0x00008144, 0xffffffff }, + { 0x00008168, 0x00000000 }, + { 0x0000816c, 0x00000000 }, + { 0x00008170, 0x32143320 }, + { 0x00008174, 0xfaa4fa50 }, + { 0x00008178, 0x00000100 }, + { 0x0000817c, 0x00000000 }, + { 0x000081c0, 0x00000000 }, + { 0x000081d0, 0x0000320a }, + { 0x000081ec, 0x00000000 }, + { 0x000081f0, 0x00000000 }, + { 0x000081f4, 0x00000000 }, + { 0x000081f8, 0x00000000 }, + { 0x000081fc, 0x00000000 }, + { 0x00008200, 0x00000000 }, + { 0x00008204, 0x00000000 }, + { 0x00008208, 0x00000000 }, + { 0x0000820c, 0x00000000 }, + { 0x00008210, 0x00000000 }, + { 0x00008214, 0x00000000 }, + { 0x00008218, 0x00000000 }, + { 0x0000821c, 0x00000000 }, + { 0x00008220, 0x00000000 }, + { 0x00008224, 0x00000000 }, + { 0x00008228, 0x00000000 }, + { 0x0000822c, 0x00000000 }, + { 0x00008230, 0x00000000 }, + { 0x00008234, 0x00000000 }, + { 0x00008238, 0x00000000 }, + { 0x0000823c, 0x00000000 }, + { 0x00008240, 0x00100000 }, + { 0x00008244, 0x0010f400 }, + { 0x00008248, 0x00000100 }, + { 0x0000824c, 0x0001e800 }, + { 0x00008250, 0x00000000 }, + { 0x00008254, 0x00000000 }, + { 0x00008258, 0x00000000 }, + { 0x0000825c, 0x400000ff }, + { 0x00008260, 0x00080922 }, + { 0x00008264, 0x88a00010 }, + { 0x00008270, 0x00000000 }, + { 0x00008274, 0x40000000 }, + { 0x00008278, 0x003e4180 }, + { 0x0000827c, 0x00000000 }, + { 0x00008284, 0x0000002c }, + { 0x00008288, 0x0000002c }, + { 0x0000828c, 0x00000000 }, + { 0x00008294, 0x00000000 }, + { 0x00008298, 0x00000000 }, + { 0x0000829c, 0x00000000 }, + { 0x00008300, 0x00000040 }, + { 0x00008314, 0x00000000 }, + { 0x00008328, 0x00000000 }, + { 0x0000832c, 0x00000001 }, + { 0x00008330, 0x00000302 }, + { 0x00008334, 0x00000e00 }, + { 0x00008338, 0x00ff0000 }, + { 0x0000833c, 0x00000000 }, + { 0x00008340, 0x00010380 }, + { 0x00008344, 0x00581043 }, + { 0x00007010, 0x00000030 }, + { 0x00007034, 0x00000002 }, + { 0x00007038, 0x000004c2 }, + { 0x00007800, 0x00140000 }, + { 0x00007804, 0x0e4548d8 }, + { 0x00007808, 0x54214514 }, + { 0x0000780c, 0x02025820 }, + { 0x00007810, 0x71c0d388 }, + { 0x00007814, 0x924934a8 }, + { 0x0000781c, 0x00000000 }, + { 0x00007820, 0x00000c04 }, + { 0x00007824, 0x00d8abff }, + { 0x00007828, 0x66964300 }, + { 0x0000782c, 0x8db6d961 }, + { 0x00007830, 0x8db6d96c }, + { 0x00007834, 0x6140008b }, + { 0x00007838, 0x00000029 }, + { 0x0000783c, 0x72ee0a72 }, + { 0x00007840, 0xbbfffffc }, + { 0x00007844, 0x000c0db6 }, + { 0x00007848, 0x6db61b6f }, + { 0x0000784c, 0x6d9b66db }, + { 0x00007850, 0x6d8c6dba }, + { 0x00007854, 0x00040000 }, + { 0x00007858, 0xdb003012 }, + { 0x0000785c, 0x04924914 }, + { 0x00007860, 0x21084210 }, + { 0x00007864, 0xf7d7ffde }, + { 0x00007868, 0xc2034080 }, + { 0x0000786c, 0x48609eb4 }, + { 0x00007870, 0x10142c00 }, + { 0x00009808, 0x00000000 }, + { 0x0000980c, 0xafe68e30 }, + { 0x00009810, 0xfd14e000 }, + { 0x00009814, 0x9c0a9f6b }, + { 0x0000981c, 0x00000000 }, + { 0x0000982c, 0x0000a000 }, + { 0x00009830, 0x00000000 }, + { 0x0000983c, 0x00200400 }, + { 0x0000984c, 0x0040233c }, + { 0x00009854, 0x00000044 }, + { 0x00009900, 0x00000000 }, + { 0x00009904, 0x00000000 }, + { 0x00009908, 0x00000000 }, + { 0x0000990c, 0x00000000 }, + { 0x0000991c, 0x10000fff }, + { 0x00009920, 0x04900000 }, + { 0x00009928, 0x00000001 }, + { 0x0000992c, 0x00000004 }, + { 0x00009934, 0x1e1f2022 }, + { 0x00009938, 0x0a0b0c0d }, + { 0x0000993c, 0x00000000 }, + { 0x00009940, 0x14750604 }, + { 0x00009948, 0x9280c00a }, + { 0x0000994c, 0x00020028 }, + { 0x00009954, 0x5f3ca3de }, + { 0x00009958, 0x0108ecff }, + { 0x00009968, 0x000003ce }, + { 0x00009970, 0x192bb514 }, + { 0x00009974, 0x00000000 }, + { 0x00009978, 0x00000001 }, + { 0x0000997c, 0x00000000 }, + { 0x00009980, 0x00000000 }, + { 0x00009984, 0x00000000 }, + { 0x00009988, 0x00000000 }, + { 0x0000998c, 0x00000000 }, + { 0x00009990, 0x00000000 }, + { 0x00009994, 0x00000000 }, + { 0x00009998, 0x00000000 }, + { 0x0000999c, 0x00000000 }, + { 0x000099a0, 0x00000000 }, + { 0x000099a4, 0x00000001 }, + { 0x000099a8, 0x201fff00 }, + { 0x000099ac, 0x2def0400 }, + { 0x000099b0, 0x03051000 }, + { 0x000099b4, 0x00000820 }, + { 0x000099dc, 0x00000000 }, + { 0x000099e0, 0x00000000 }, + { 0x000099e4, 0xaaaaaaaa }, + { 0x000099e8, 0x3c466478 }, + { 0x000099ec, 0x0cc80caa }, + { 0x000099f0, 0x00000000 }, + { 0x0000a1f4, 0x00000000 }, + { 0x0000a1f8, 0x71733d01 }, + { 0x0000a1fc, 0xd0ad5c12 }, + { 0x0000a208, 0x803e68c8 }, + { 0x0000a210, 0x4080a333 }, + { 0x0000a214, 0x00206c10 }, + { 0x0000a218, 0x009c4060 }, + { 0x0000a220, 0x01834061 }, + { 0x0000a224, 0x00000400 }, + { 0x0000a228, 0x000003b5 }, + { 0x0000a22c, 0x00000000 }, + { 0x0000a234, 0x20202020 }, + { 0x0000a238, 0x20202020 }, + { 0x0000a244, 0x00000000 }, + { 0x0000a248, 0xfffffffc }, + { 0x0000a24c, 0x00000000 }, + { 0x0000a254, 0x00000000 }, + { 0x0000a258, 0x0ccb5380 }, + { 0x0000a25c, 0x15151501 }, + { 0x0000a260, 0xdfa90f01 }, + { 0x0000a268, 0x00000000 }, + { 0x0000a26c, 0x0ebae9e6 }, + { 0x0000a278, 0x3bdef7bd }, + { 0x0000a27c, 0x050e83bd }, + { 0x0000a388, 0x0c000000 }, + { 0x0000a38c, 0x20202020 }, + { 0x0000a390, 0x20202020 }, + { 0x0000a394, 0x3bdef7bd }, + { 0x0000a398, 0x000003bd }, + { 0x0000a39c, 0x00000001 }, + { 0x0000a3a0, 0x00000000 }, + { 0x0000a3a4, 0x00000000 }, + { 0x0000a3a8, 0x00000000 }, + { 0x0000a3ac, 0x00000000 }, + { 0x0000a3b0, 0x00000000 }, + { 0x0000a3b4, 0x00000000 }, + { 0x0000a3b8, 0x00000000 }, + { 0x0000a3bc, 0x00000000 }, + { 0x0000a3c0, 0x00000000 }, + { 0x0000a3c4, 0x00000000 }, + { 0x0000a3cc, 0x20202020 }, + { 0x0000a3d0, 0x20202020 }, + { 0x0000a3d4, 0x20202020 }, + { 0x0000a3dc, 0x3bdef7bd }, + { 0x0000a3e0, 0x000003bd }, + { 0x0000a3e4, 0x00000000 }, + { 0x0000a3e8, 0x18c43433 }, + { 0x0000a3ec, 0x00f70081 }, + { 0x0000a3f0, 0x01036a2f }, + { 0x0000a3f4, 0x00000000 }, + { 0x0000d270, 0x0d820820 }, + { 0x0000d35c, 0x07ffffef }, + { 0x0000d360, 0x0fffffe7 }, + { 0x0000d364, 0x17ffffe5 }, + { 0x0000d368, 0x1fffffe4 }, + { 0x0000d36c, 0x37ffffe3 }, + { 0x0000d370, 0x3fffffe3 }, + { 0x0000d374, 0x57ffffe3 }, + { 0x0000d378, 0x5fffffe2 }, + { 0x0000d37c, 0x7fffffe2 }, + { 0x0000d380, 0x7f3c7bba }, + { 0x0000d384, 0xf3307ff0 }, +}; + +static const u_int32_t ar9271Modes_9271_1_0_only[][6] = { + { 0x00009910, 0x30002311, 0x30002311, 0x30002311, 0x30002311, 0x30002311 }, + { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 }, +}; diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c new file mode 100644 index 0000000..efc420c --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/mac.c @@ -0,0 +1,1067 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "hw.h" + +static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah, + struct ath9k_tx_queue_info *qi) +{ + ath_print(ath9k_hw_common(ah), ATH_DBG_INTERRUPT, + "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n", + ah->txok_interrupt_mask, ah->txerr_interrupt_mask, + ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask, + ah->txurn_interrupt_mask); + + REG_WRITE(ah, AR_IMR_S0, + SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK) + | SM(ah->txdesc_interrupt_mask, AR_IMR_S0_QCU_TXDESC)); + REG_WRITE(ah, AR_IMR_S1, + SM(ah->txerr_interrupt_mask, AR_IMR_S1_QCU_TXERR) + | SM(ah->txeol_interrupt_mask, AR_IMR_S1_QCU_TXEOL)); + REG_RMW_FIELD(ah, AR_IMR_S2, + AR_IMR_S2_QCU_TXURN, ah->txurn_interrupt_mask); +} + +u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q) +{ + return REG_READ(ah, AR_QTXDP(q)); +} +EXPORT_SYMBOL(ath9k_hw_gettxbuf); + +void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp) +{ + REG_WRITE(ah, AR_QTXDP(q), txdp); +} +EXPORT_SYMBOL(ath9k_hw_puttxbuf); + +void ath9k_hw_txstart(struct ath_hw *ah, u32 q) +{ + ath_print(ath9k_hw_common(ah), ATH_DBG_QUEUE, + "Enable TXE on queue: %u\n", q); + REG_WRITE(ah, AR_Q_TXE, 1 << q); +} +EXPORT_SYMBOL(ath9k_hw_txstart); + +u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q) +{ + u32 npend; + + npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT; + if (npend == 0) { + + if (REG_READ(ah, AR_Q_TXE) & (1 << q)) + npend = 1; + } + + return npend; +} +EXPORT_SYMBOL(ath9k_hw_numtxpending); + +/** + * ath9k_hw_updatetxtriglevel - adjusts the frame trigger level + * + * @ah: atheros hardware struct + * @bIncTrigLevel: whether or not the frame trigger level should be updated + * + * The frame trigger level specifies the minimum number of bytes, + * in units of 64 bytes, that must be DMA'ed into the PCU TX FIFO + * before the PCU will initiate sending the frame on the air. This can + * mean we initiate transmit before a full frame is on the PCU TX FIFO. + * Resets to 0x1 (meaning 64 bytes or a full frame, whichever occurs + * first) + * + * Caution must be taken to ensure to set the frame trigger level based + * on the DMA request size. For example if the DMA request size is set to + * 128 bytes the trigger level cannot exceed 6 * 64 = 384. This is because + * there need to be enough space in the tx FIFO for the requested transfer + * size. Hence the tx FIFO will stop with 512 - 128 = 384 bytes. If we set + * the threshold to a value beyond 6, then the transmit will hang. + * + * Current dual stream devices have a PCU TX FIFO size of 8 KB. + * Current single stream devices have a PCU TX FIFO size of 4 KB, however, + * there is a hardware issue which forces us to use 2 KB instead so the + * frame trigger level must not exceed 2 KB for these chipsets. + */ +bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel) +{ + u32 txcfg, curLevel, newLevel; + enum ath9k_int omask; + + if (ah->tx_trig_level >= ah->config.max_txtrig_level) + return false; + + omask = ath9k_hw_set_interrupts(ah, ah->mask_reg & ~ATH9K_INT_GLOBAL); + + txcfg = REG_READ(ah, AR_TXCFG); + curLevel = MS(txcfg, AR_FTRIG); + newLevel = curLevel; + if (bIncTrigLevel) { + if (curLevel < ah->config.max_txtrig_level) + newLevel++; + } else if (curLevel > MIN_TX_FIFO_THRESHOLD) + newLevel--; + if (newLevel != curLevel) + REG_WRITE(ah, AR_TXCFG, + (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG)); + + ath9k_hw_set_interrupts(ah, omask); + + ah->tx_trig_level = newLevel; + + return newLevel != curLevel; +} +EXPORT_SYMBOL(ath9k_hw_updatetxtriglevel); + +bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q) +{ +#define ATH9K_TX_STOP_DMA_TIMEOUT 4000 /* usec */ +#define ATH9K_TIME_QUANTUM 100 /* usec */ + struct ath_common *common = ath9k_hw_common(ah); + struct ath9k_hw_capabilities *pCap = &ah->caps; + struct ath9k_tx_queue_info *qi; + u32 tsfLow, j, wait; + u32 wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM; + + if (q >= pCap->total_queues) { + ath_print(common, ATH_DBG_QUEUE, "Stopping TX DMA, " + "invalid queue: %u\n", q); + return false; + } + + qi = &ah->txq[q]; + if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { + ath_print(common, ATH_DBG_QUEUE, "Stopping TX DMA, " + "inactive queue: %u\n", q); + return false; + } + + REG_WRITE(ah, AR_Q_TXD, 1 << q); + + for (wait = wait_time; wait != 0; wait--) { + if (ath9k_hw_numtxpending(ah, q) == 0) + break; + udelay(ATH9K_TIME_QUANTUM); + } + + if (ath9k_hw_numtxpending(ah, q)) { + ath_print(common, ATH_DBG_QUEUE, + "%s: Num of pending TX Frames %d on Q %d\n", + __func__, ath9k_hw_numtxpending(ah, q), q); + + for (j = 0; j < 2; j++) { + tsfLow = REG_READ(ah, AR_TSF_L32); + REG_WRITE(ah, AR_QUIET2, + SM(10, AR_QUIET2_QUIET_DUR)); + REG_WRITE(ah, AR_QUIET_PERIOD, 100); + REG_WRITE(ah, AR_NEXT_QUIET_TIMER, tsfLow >> 10); + REG_SET_BIT(ah, AR_TIMER_MODE, + AR_QUIET_TIMER_EN); + + if ((REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10)) + break; + + ath_print(common, ATH_DBG_QUEUE, + "TSF has moved while trying to set " + "quiet time TSF: 0x%08x\n", tsfLow); + } + + REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH); + + udelay(200); + REG_CLR_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN); + + wait = wait_time; + while (ath9k_hw_numtxpending(ah, q)) { + if ((--wait) == 0) { + ath_print(common, ATH_DBG_FATAL, + "Failed to stop TX DMA in 100 " + "msec after killing last frame\n"); + break; + } + udelay(ATH9K_TIME_QUANTUM); + } + + REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH); + } + + REG_WRITE(ah, AR_Q_TXD, 0); + return wait != 0; + +#undef ATH9K_TX_STOP_DMA_TIMEOUT +#undef ATH9K_TIME_QUANTUM +} +EXPORT_SYMBOL(ath9k_hw_stoptxdma); + +void ath9k_hw_filltxdesc(struct ath_hw *ah, struct ath_desc *ds, + u32 segLen, bool firstSeg, + bool lastSeg, const struct ath_desc *ds0) +{ + struct ar5416_desc *ads = AR5416DESC(ds); + + if (firstSeg) { + ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore); + } else if (lastSeg) { + ads->ds_ctl0 = 0; + ads->ds_ctl1 = segLen; + ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2; + ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3; + } else { + ads->ds_ctl0 = 0; + ads->ds_ctl1 = segLen | AR_TxMore; + ads->ds_ctl2 = 0; + ads->ds_ctl3 = 0; + } + ads->ds_txstatus0 = ads->ds_txstatus1 = 0; + ads->ds_txstatus2 = ads->ds_txstatus3 = 0; + ads->ds_txstatus4 = ads->ds_txstatus5 = 0; + ads->ds_txstatus6 = ads->ds_txstatus7 = 0; + ads->ds_txstatus8 = ads->ds_txstatus9 = 0; +} +EXPORT_SYMBOL(ath9k_hw_filltxdesc); + +void ath9k_hw_cleartxdesc(struct ath_hw *ah, struct ath_desc *ds) +{ + struct ar5416_desc *ads = AR5416DESC(ds); + + ads->ds_txstatus0 = ads->ds_txstatus1 = 0; + ads->ds_txstatus2 = ads->ds_txstatus3 = 0; + ads->ds_txstatus4 = ads->ds_txstatus5 = 0; + ads->ds_txstatus6 = ads->ds_txstatus7 = 0; + ads->ds_txstatus8 = ads->ds_txstatus9 = 0; +} +EXPORT_SYMBOL(ath9k_hw_cleartxdesc); + +int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds) +{ + struct ar5416_desc *ads = AR5416DESC(ds); + + if ((ads->ds_txstatus9 & AR_TxDone) == 0) + return -EINPROGRESS; + + ds->ds_txstat.ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum); + ds->ds_txstat.ts_tstamp = ads->AR_SendTimestamp; + ds->ds_txstat.ts_status = 0; + ds->ds_txstat.ts_flags = 0; + + if (ads->ds_txstatus1 & AR_FrmXmitOK) + ds->ds_txstat.ts_status |= ATH9K_TX_ACKED; + if (ads->ds_txstatus1 & AR_ExcessiveRetries) + ds->ds_txstat.ts_status |= ATH9K_TXERR_XRETRY; + if (ads->ds_txstatus1 & AR_Filtered) + ds->ds_txstat.ts_status |= ATH9K_TXERR_FILT; + if (ads->ds_txstatus1 & AR_FIFOUnderrun) { + ds->ds_txstat.ts_status |= ATH9K_TXERR_FIFO; + ath9k_hw_updatetxtriglevel(ah, true); + } + if (ads->ds_txstatus9 & AR_TxOpExceeded) + ds->ds_txstat.ts_status |= ATH9K_TXERR_XTXOP; + if (ads->ds_txstatus1 & AR_TxTimerExpired) + ds->ds_txstat.ts_status |= ATH9K_TXERR_TIMER_EXPIRED; + + if (ads->ds_txstatus1 & AR_DescCfgErr) + ds->ds_txstat.ts_flags |= ATH9K_TX_DESC_CFG_ERR; + if (ads->ds_txstatus1 & AR_TxDataUnderrun) { + ds->ds_txstat.ts_flags |= ATH9K_TX_DATA_UNDERRUN; + ath9k_hw_updatetxtriglevel(ah, true); + } + if (ads->ds_txstatus1 & AR_TxDelimUnderrun) { + ds->ds_txstat.ts_flags |= ATH9K_TX_DELIM_UNDERRUN; + ath9k_hw_updatetxtriglevel(ah, true); + } + if (ads->ds_txstatus0 & AR_TxBaStatus) { + ds->ds_txstat.ts_flags |= ATH9K_TX_BA; + ds->ds_txstat.ba_low = ads->AR_BaBitmapLow; + ds->ds_txstat.ba_high = ads->AR_BaBitmapHigh; + } + + ds->ds_txstat.ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx); + switch (ds->ds_txstat.ts_rateindex) { + case 0: + ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0); + break; + case 1: + ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1); + break; + case 2: + ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2); + break; + case 3: + ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3); + break; + } + + ds->ds_txstat.ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined); + ds->ds_txstat.ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00); + ds->ds_txstat.ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01); + ds->ds_txstat.ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02); + ds->ds_txstat.ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10); + ds->ds_txstat.ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11); + ds->ds_txstat.ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12); + ds->ds_txstat.evm0 = ads->AR_TxEVM0; + ds->ds_txstat.evm1 = ads->AR_TxEVM1; + ds->ds_txstat.evm2 = ads->AR_TxEVM2; + ds->ds_txstat.ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt); + ds->ds_txstat.ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt); + ds->ds_txstat.ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt); + ds->ds_txstat.ts_antenna = 0; + + return 0; +} +EXPORT_SYMBOL(ath9k_hw_txprocdesc); + +void ath9k_hw_set11n_txdesc(struct ath_hw *ah, struct ath_desc *ds, + u32 pktLen, enum ath9k_pkt_type type, u32 txPower, + u32 keyIx, enum ath9k_key_type keyType, u32 flags) +{ + struct ar5416_desc *ads = AR5416DESC(ds); + + txPower += ah->txpower_indexoffset; + if (txPower > 63) + txPower = 63; + + ads->ds_ctl0 = (pktLen & AR_FrameLen) + | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0) + | SM(txPower, AR_XmitPower) + | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0) + | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0) + | (flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0) + | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0); + + ads->ds_ctl1 = + (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0) + | SM(type, AR_FrameType) + | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0) + | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0) + | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0); + + ads->ds_ctl6 = SM(keyType, AR_EncrType); + + if (AR_SREV_9285(ah)) { + ads->ds_ctl8 = 0; + ads->ds_ctl9 = 0; + ads->ds_ctl10 = 0; + ads->ds_ctl11 = 0; + } +} +EXPORT_SYMBOL(ath9k_hw_set11n_txdesc); + +void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, struct ath_desc *ds, + struct ath_desc *lastds, + u32 durUpdateEn, u32 rtsctsRate, + u32 rtsctsDuration, + struct ath9k_11n_rate_series series[], + u32 nseries, u32 flags) +{ + struct ar5416_desc *ads = AR5416DESC(ds); + struct ar5416_desc *last_ads = AR5416DESC(lastds); + u32 ds_ctl0; + + if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) { + ds_ctl0 = ads->ds_ctl0; + + if (flags & ATH9K_TXDESC_RTSENA) { + ds_ctl0 &= ~AR_CTSEnable; + ds_ctl0 |= AR_RTSEnable; + } else { + ds_ctl0 &= ~AR_RTSEnable; + ds_ctl0 |= AR_CTSEnable; + } + + ads->ds_ctl0 = ds_ctl0; + } else { + ads->ds_ctl0 = + (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable)); + } + + ads->ds_ctl2 = set11nTries(series, 0) + | set11nTries(series, 1) + | set11nTries(series, 2) + | set11nTries(series, 3) + | (durUpdateEn ? AR_DurUpdateEna : 0) + | SM(0, AR_BurstDur); + + ads->ds_ctl3 = set11nRate(series, 0) + | set11nRate(series, 1) + | set11nRate(series, 2) + | set11nRate(series, 3); + + ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0) + | set11nPktDurRTSCTS(series, 1); + + ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2) + | set11nPktDurRTSCTS(series, 3); + + ads->ds_ctl7 = set11nRateFlags(series, 0) + | set11nRateFlags(series, 1) + | set11nRateFlags(series, 2) + | set11nRateFlags(series, 3) + | SM(rtsctsRate, AR_RTSCTSRate); + last_ads->ds_ctl2 = ads->ds_ctl2; + last_ads->ds_ctl3 = ads->ds_ctl3; +} +EXPORT_SYMBOL(ath9k_hw_set11n_ratescenario); + +void ath9k_hw_set11n_aggr_first(struct ath_hw *ah, struct ath_desc *ds, + u32 aggrLen) +{ + struct ar5416_desc *ads = AR5416DESC(ds); + + ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr); + ads->ds_ctl6 &= ~AR_AggrLen; + ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen); +} +EXPORT_SYMBOL(ath9k_hw_set11n_aggr_first); + +void ath9k_hw_set11n_aggr_middle(struct ath_hw *ah, struct ath_desc *ds, + u32 numDelims) +{ + struct ar5416_desc *ads = AR5416DESC(ds); + unsigned int ctl6; + + ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr); + + ctl6 = ads->ds_ctl6; + ctl6 &= ~AR_PadDelim; + ctl6 |= SM(numDelims, AR_PadDelim); + ads->ds_ctl6 = ctl6; +} +EXPORT_SYMBOL(ath9k_hw_set11n_aggr_middle); + +void ath9k_hw_set11n_aggr_last(struct ath_hw *ah, struct ath_desc *ds) +{ + struct ar5416_desc *ads = AR5416DESC(ds); + + ads->ds_ctl1 |= AR_IsAggr; + ads->ds_ctl1 &= ~AR_MoreAggr; + ads->ds_ctl6 &= ~AR_PadDelim; +} +EXPORT_SYMBOL(ath9k_hw_set11n_aggr_last); + +void ath9k_hw_clr11n_aggr(struct ath_hw *ah, struct ath_desc *ds) +{ + struct ar5416_desc *ads = AR5416DESC(ds); + + ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr); +} +EXPORT_SYMBOL(ath9k_hw_clr11n_aggr); + +void ath9k_hw_set11n_burstduration(struct ath_hw *ah, struct ath_desc *ds, + u32 burstDuration) +{ + struct ar5416_desc *ads = AR5416DESC(ds); + + ads->ds_ctl2 &= ~AR_BurstDur; + ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur); +} +EXPORT_SYMBOL(ath9k_hw_set11n_burstduration); + +void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, struct ath_desc *ds, + u32 vmf) +{ + struct ar5416_desc *ads = AR5416DESC(ds); + + if (vmf) + ads->ds_ctl0 |= AR_VirtMoreFrag; + else + ads->ds_ctl0 &= ~AR_VirtMoreFrag; +} + +void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs) +{ + *txqs &= ah->intr_txqs; + ah->intr_txqs &= ~(*txqs); +} +EXPORT_SYMBOL(ath9k_hw_gettxintrtxqs); + +bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q, + const struct ath9k_tx_queue_info *qinfo) +{ + u32 cw; + struct ath_common *common = ath9k_hw_common(ah); + struct ath9k_hw_capabilities *pCap = &ah->caps; + struct ath9k_tx_queue_info *qi; + + if (q >= pCap->total_queues) { + ath_print(common, ATH_DBG_QUEUE, "Set TXQ properties, " + "invalid queue: %u\n", q); + return false; + } + + qi = &ah->txq[q]; + if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { + ath_print(common, ATH_DBG_QUEUE, "Set TXQ properties, " + "inactive queue: %u\n", q); + return false; + } + + ath_print(common, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q); + + qi->tqi_ver = qinfo->tqi_ver; + qi->tqi_subtype = qinfo->tqi_subtype; + qi->tqi_qflags = qinfo->tqi_qflags; + qi->tqi_priority = qinfo->tqi_priority; + if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT) + qi->tqi_aifs = min(qinfo->tqi_aifs, 255U); + else + qi->tqi_aifs = INIT_AIFS; + if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) { + cw = min(qinfo->tqi_cwmin, 1024U); + qi->tqi_cwmin = 1; + while (qi->tqi_cwmin < cw) + qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1; + } else + qi->tqi_cwmin = qinfo->tqi_cwmin; + if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) { + cw = min(qinfo->tqi_cwmax, 1024U); + qi->tqi_cwmax = 1; + while (qi->tqi_cwmax < cw) + qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1; + } else + qi->tqi_cwmax = INIT_CWMAX; + + if (qinfo->tqi_shretry != 0) + qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U); + else + qi->tqi_shretry = INIT_SH_RETRY; + if (qinfo->tqi_lgretry != 0) + qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U); + else + qi->tqi_lgretry = INIT_LG_RETRY; + qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod; + qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit; + qi->tqi_burstTime = qinfo->tqi_burstTime; + qi->tqi_readyTime = qinfo->tqi_readyTime; + + switch (qinfo->tqi_subtype) { + case ATH9K_WME_UPSD: + if (qi->tqi_type == ATH9K_TX_QUEUE_DATA) + qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS; + break; + default: + break; + } + + return true; +} +EXPORT_SYMBOL(ath9k_hw_set_txq_props); + +bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q, + struct ath9k_tx_queue_info *qinfo) +{ + struct ath_common *common = ath9k_hw_common(ah); + struct ath9k_hw_capabilities *pCap = &ah->caps; + struct ath9k_tx_queue_info *qi; + + if (q >= pCap->total_queues) { + ath_print(common, ATH_DBG_QUEUE, "Get TXQ properties, " + "invalid queue: %u\n", q); + return false; + } + + qi = &ah->txq[q]; + if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { + ath_print(common, ATH_DBG_QUEUE, "Get TXQ properties, " + "inactive queue: %u\n", q); + return false; + } + + qinfo->tqi_qflags = qi->tqi_qflags; + qinfo->tqi_ver = qi->tqi_ver; + qinfo->tqi_subtype = qi->tqi_subtype; + qinfo->tqi_qflags = qi->tqi_qflags; + qinfo->tqi_priority = qi->tqi_priority; + qinfo->tqi_aifs = qi->tqi_aifs; + qinfo->tqi_cwmin = qi->tqi_cwmin; + qinfo->tqi_cwmax = qi->tqi_cwmax; + qinfo->tqi_shretry = qi->tqi_shretry; + qinfo->tqi_lgretry = qi->tqi_lgretry; + qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod; + qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit; + qinfo->tqi_burstTime = qi->tqi_burstTime; + qinfo->tqi_readyTime = qi->tqi_readyTime; + + return true; +} +EXPORT_SYMBOL(ath9k_hw_get_txq_props); + +int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type, + const struct ath9k_tx_queue_info *qinfo) +{ + struct ath_common *common = ath9k_hw_common(ah); + struct ath9k_tx_queue_info *qi; + struct ath9k_hw_capabilities *pCap = &ah->caps; + int q; + + switch (type) { + case ATH9K_TX_QUEUE_BEACON: + q = pCap->total_queues - 1; + break; + case ATH9K_TX_QUEUE_CAB: + q = pCap->total_queues - 2; + break; + case ATH9K_TX_QUEUE_PSPOLL: + q = 1; + break; + case ATH9K_TX_QUEUE_UAPSD: + q = pCap->total_queues - 3; + break; + case ATH9K_TX_QUEUE_DATA: + for (q = 0; q < pCap->total_queues; q++) + if (ah->txq[q].tqi_type == + ATH9K_TX_QUEUE_INACTIVE) + break; + if (q == pCap->total_queues) { + ath_print(common, ATH_DBG_FATAL, + "No available TX queue\n"); + return -1; + } + break; + default: + ath_print(common, ATH_DBG_FATAL, + "Invalid TX queue type: %u\n", type); + return -1; + } + + ath_print(common, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q); + + qi = &ah->txq[q]; + if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) { + ath_print(common, ATH_DBG_FATAL, + "TX queue: %u already active\n", q); + return -1; + } + memset(qi, 0, sizeof(struct ath9k_tx_queue_info)); + qi->tqi_type = type; + if (qinfo == NULL) { + qi->tqi_qflags = + TXQ_FLAG_TXOKINT_ENABLE + | TXQ_FLAG_TXERRINT_ENABLE + | TXQ_FLAG_TXDESCINT_ENABLE | TXQ_FLAG_TXURNINT_ENABLE; + qi->tqi_aifs = INIT_AIFS; + qi->tqi_cwmin = ATH9K_TXQ_USEDEFAULT; + qi->tqi_cwmax = INIT_CWMAX; + qi->tqi_shretry = INIT_SH_RETRY; + qi->tqi_lgretry = INIT_LG_RETRY; + qi->tqi_physCompBuf = 0; + } else { + qi->tqi_physCompBuf = qinfo->tqi_physCompBuf; + (void) ath9k_hw_set_txq_props(ah, q, qinfo); + } + + return q; +} +EXPORT_SYMBOL(ath9k_hw_setuptxqueue); + +bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q) +{ + struct ath9k_hw_capabilities *pCap = &ah->caps; + struct ath_common *common = ath9k_hw_common(ah); + struct ath9k_tx_queue_info *qi; + + if (q >= pCap->total_queues) { + ath_print(common, ATH_DBG_QUEUE, "Release TXQ, " + "invalid queue: %u\n", q); + return false; + } + qi = &ah->txq[q]; + if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { + ath_print(common, ATH_DBG_QUEUE, "Release TXQ, " + "inactive queue: %u\n", q); + return false; + } + + ath_print(common, ATH_DBG_QUEUE, "Release TX queue: %u\n", q); + + qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE; + ah->txok_interrupt_mask &= ~(1 << q); + ah->txerr_interrupt_mask &= ~(1 << q); + ah->txdesc_interrupt_mask &= ~(1 << q); + ah->txeol_interrupt_mask &= ~(1 << q); + ah->txurn_interrupt_mask &= ~(1 << q); + ath9k_hw_set_txq_interrupts(ah, qi); + + return true; +} +EXPORT_SYMBOL(ath9k_hw_releasetxqueue); + +bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q) +{ + struct ath9k_hw_capabilities *pCap = &ah->caps; + struct ath_common *common = ath9k_hw_common(ah); + struct ath9k_channel *chan = ah->curchan; + struct ath9k_tx_queue_info *qi; + u32 cwMin, chanCwMin, value; + + if (q >= pCap->total_queues) { + ath_print(common, ATH_DBG_QUEUE, "Reset TXQ, " + "invalid queue: %u\n", q); + return false; + } + + qi = &ah->txq[q]; + if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { + ath_print(common, ATH_DBG_QUEUE, "Reset TXQ, " + "inactive queue: %u\n", q); + return true; + } + + ath_print(common, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q); + + if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) { + if (chan && IS_CHAN_B(chan)) + chanCwMin = INIT_CWMIN_11B; + else + chanCwMin = INIT_CWMIN; + + for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1); + } else + cwMin = qi->tqi_cwmin; + + REG_WRITE(ah, AR_DLCL_IFS(q), + SM(cwMin, AR_D_LCL_IFS_CWMIN) | + SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) | + SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS)); + + REG_WRITE(ah, AR_DRETRY_LIMIT(q), + SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) | + SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) | + SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH)); + + REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ); + REG_WRITE(ah, AR_DMISC(q), + AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2); + + if (qi->tqi_cbrPeriod) { + REG_WRITE(ah, AR_QCBRCFG(q), + SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) | + SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH)); + REG_WRITE(ah, AR_QMISC(q), + REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_FSP_CBR | + (qi->tqi_cbrOverflowLimit ? + AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0)); + } + if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) { + REG_WRITE(ah, AR_QRDYTIMECFG(q), + SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) | + AR_Q_RDYTIMECFG_EN); + } + + REG_WRITE(ah, AR_DCHNTIME(q), + SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) | + (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0)); + + if (qi->tqi_burstTime + && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)) { + REG_WRITE(ah, AR_QMISC(q), + REG_READ(ah, AR_QMISC(q)) | + AR_Q_MISC_RDYTIME_EXP_POLICY); + + } + + if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE) { + REG_WRITE(ah, AR_DMISC(q), + REG_READ(ah, AR_DMISC(q)) | + AR_D_MISC_POST_FR_BKOFF_DIS); + } + if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) { + REG_WRITE(ah, AR_DMISC(q), + REG_READ(ah, AR_DMISC(q)) | + AR_D_MISC_FRAG_BKOFF_EN); + } + switch (qi->tqi_type) { + case ATH9K_TX_QUEUE_BEACON: + REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q)) + | AR_Q_MISC_FSP_DBA_GATED + | AR_Q_MISC_BEACON_USE + | AR_Q_MISC_CBR_INCR_DIS1); + + REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) + | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL << + AR_D_MISC_ARB_LOCKOUT_CNTRL_S) + | AR_D_MISC_BEACON_USE + | AR_D_MISC_POST_FR_BKOFF_DIS); + break; + case ATH9K_TX_QUEUE_CAB: + REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q)) + | AR_Q_MISC_FSP_DBA_GATED + | AR_Q_MISC_CBR_INCR_DIS1 + | AR_Q_MISC_CBR_INCR_DIS0); + value = (qi->tqi_readyTime - + (ah->config.sw_beacon_response_time - + ah->config.dma_beacon_response_time) - + ah->config.additional_swba_backoff) * 1024; + REG_WRITE(ah, AR_QRDYTIMECFG(q), + value | AR_Q_RDYTIMECFG_EN); + REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) + | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL << + AR_D_MISC_ARB_LOCKOUT_CNTRL_S)); + break; + case ATH9K_TX_QUEUE_PSPOLL: + REG_WRITE(ah, AR_QMISC(q), + REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_CBR_INCR_DIS1); + break; + case ATH9K_TX_QUEUE_UAPSD: + REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) | + AR_D_MISC_POST_FR_BKOFF_DIS); + break; + default: + break; + } + + if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) { + REG_WRITE(ah, AR_DMISC(q), + REG_READ(ah, AR_DMISC(q)) | + SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL, + AR_D_MISC_ARB_LOCKOUT_CNTRL) | + AR_D_MISC_POST_FR_BKOFF_DIS); + } + + if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE) + ah->txok_interrupt_mask |= 1 << q; + else + ah->txok_interrupt_mask &= ~(1 << q); + if (qi->tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE) + ah->txerr_interrupt_mask |= 1 << q; + else + ah->txerr_interrupt_mask &= ~(1 << q); + if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE) + ah->txdesc_interrupt_mask |= 1 << q; + else + ah->txdesc_interrupt_mask &= ~(1 << q); + if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE) + ah->txeol_interrupt_mask |= 1 << q; + else + ah->txeol_interrupt_mask &= ~(1 << q); + if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE) + ah->txurn_interrupt_mask |= 1 << q; + else + ah->txurn_interrupt_mask &= ~(1 << q); + ath9k_hw_set_txq_interrupts(ah, qi); + + return true; +} +EXPORT_SYMBOL(ath9k_hw_resettxqueue); + +int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds, + u32 pa, struct ath_desc *nds, u64 tsf) +{ + struct ar5416_desc ads; + struct ar5416_desc *adsp = AR5416DESC(ds); + u32 phyerr; + + if ((adsp->ds_rxstatus8 & AR_RxDone) == 0) + return -EINPROGRESS; + + ads.u.rx = adsp->u.rx; + + ds->ds_rxstat.rs_status = 0; + ds->ds_rxstat.rs_flags = 0; + + ds->ds_rxstat.rs_datalen = ads.ds_rxstatus1 & AR_DataLen; + ds->ds_rxstat.rs_tstamp = ads.AR_RcvTimestamp; + + if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) { + ds->ds_rxstat.rs_rssi = ATH9K_RSSI_BAD; + ds->ds_rxstat.rs_rssi_ctl0 = ATH9K_RSSI_BAD; + ds->ds_rxstat.rs_rssi_ctl1 = ATH9K_RSSI_BAD; + ds->ds_rxstat.rs_rssi_ctl2 = ATH9K_RSSI_BAD; + ds->ds_rxstat.rs_rssi_ext0 = ATH9K_RSSI_BAD; + ds->ds_rxstat.rs_rssi_ext1 = ATH9K_RSSI_BAD; + ds->ds_rxstat.rs_rssi_ext2 = ATH9K_RSSI_BAD; + } else { + ds->ds_rxstat.rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined); + ds->ds_rxstat.rs_rssi_ctl0 = MS(ads.ds_rxstatus0, + AR_RxRSSIAnt00); + ds->ds_rxstat.rs_rssi_ctl1 = MS(ads.ds_rxstatus0, + AR_RxRSSIAnt01); + ds->ds_rxstat.rs_rssi_ctl2 = MS(ads.ds_rxstatus0, + AR_RxRSSIAnt02); + ds->ds_rxstat.rs_rssi_ext0 = MS(ads.ds_rxstatus4, + AR_RxRSSIAnt10); + ds->ds_rxstat.rs_rssi_ext1 = MS(ads.ds_rxstatus4, + AR_RxRSSIAnt11); + ds->ds_rxstat.rs_rssi_ext2 = MS(ads.ds_rxstatus4, + AR_RxRSSIAnt12); + } + if (ads.ds_rxstatus8 & AR_RxKeyIdxValid) + ds->ds_rxstat.rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx); + else + ds->ds_rxstat.rs_keyix = ATH9K_RXKEYIX_INVALID; + + ds->ds_rxstat.rs_rate = RXSTATUS_RATE(ah, (&ads)); + ds->ds_rxstat.rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0; + + ds->ds_rxstat.rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0; + ds->ds_rxstat.rs_moreaggr = + (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0; + ds->ds_rxstat.rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna); + ds->ds_rxstat.rs_flags = + (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0; + ds->ds_rxstat.rs_flags |= + (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0; + + if (ads.ds_rxstatus8 & AR_PreDelimCRCErr) + ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_PRE; + if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) + ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_POST; + if (ads.ds_rxstatus8 & AR_DecryptBusyErr) + ds->ds_rxstat.rs_flags |= ATH9K_RX_DECRYPT_BUSY; + + if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) { + if (ads.ds_rxstatus8 & AR_CRCErr) + ds->ds_rxstat.rs_status |= ATH9K_RXERR_CRC; + else if (ads.ds_rxstatus8 & AR_PHYErr) { + ds->ds_rxstat.rs_status |= ATH9K_RXERR_PHY; + phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode); + ds->ds_rxstat.rs_phyerr = phyerr; + } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr) + ds->ds_rxstat.rs_status |= ATH9K_RXERR_DECRYPT; + else if (ads.ds_rxstatus8 & AR_MichaelErr) + ds->ds_rxstat.rs_status |= ATH9K_RXERR_MIC; + } + + return 0; +} +EXPORT_SYMBOL(ath9k_hw_rxprocdesc); + +void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds, + u32 size, u32 flags) +{ + struct ar5416_desc *ads = AR5416DESC(ds); + struct ath9k_hw_capabilities *pCap = &ah->caps; + + ads->ds_ctl1 = size & AR_BufLen; + if (flags & ATH9K_RXDESC_INTREQ) + ads->ds_ctl1 |= AR_RxIntrReq; + + ads->ds_rxstatus8 &= ~AR_RxDone; + if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) + memset(&(ads->u), 0, sizeof(ads->u)); +} +EXPORT_SYMBOL(ath9k_hw_setuprxdesc); + +/* + * This can stop or re-enables RX. + * + * If bool is set this will kill any frame which is currently being + * transferred between the MAC and baseband and also prevent any new + * frames from getting started. + */ +bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set) +{ + u32 reg; + + if (set) { + REG_SET_BIT(ah, AR_DIAG_SW, + (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); + + if (!ath9k_hw_wait(ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE, + 0, AH_WAIT_TIMEOUT)) { + REG_CLR_BIT(ah, AR_DIAG_SW, + (AR_DIAG_RX_DIS | + AR_DIAG_RX_ABORT)); + + reg = REG_READ(ah, AR_OBS_BUS_1); + ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, + "RX failed to go idle in 10 ms RXSM=0x%x\n", + reg); + + return false; + } + } else { + REG_CLR_BIT(ah, AR_DIAG_SW, + (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); + } + + return true; +} +EXPORT_SYMBOL(ath9k_hw_setrxabort); + +void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp) +{ + REG_WRITE(ah, AR_RXDP, rxdp); +} +EXPORT_SYMBOL(ath9k_hw_putrxbuf); + +void ath9k_hw_rxena(struct ath_hw *ah) +{ + REG_WRITE(ah, AR_CR, AR_CR_RXE); +} +EXPORT_SYMBOL(ath9k_hw_rxena); + +void ath9k_hw_startpcureceive(struct ath_hw *ah) +{ + ath9k_enable_mib_counters(ah); + + ath9k_ani_reset(ah); + + REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); +} +EXPORT_SYMBOL(ath9k_hw_startpcureceive); + +void ath9k_hw_stoppcurecv(struct ath_hw *ah) +{ + REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS); + + ath9k_hw_disable_mib_counters(ah); +} +EXPORT_SYMBOL(ath9k_hw_stoppcurecv); + +bool ath9k_hw_stopdmarecv(struct ath_hw *ah) +{ +#define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */ +#define AH_RX_TIME_QUANTUM 100 /* usec */ + struct ath_common *common = ath9k_hw_common(ah); + int i; + + REG_WRITE(ah, AR_CR, AR_CR_RXD); + + /* Wait for rx enable bit to go low */ + for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) { + if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0) + break; + udelay(AH_TIME_QUANTUM); + } + + if (i == 0) { + ath_print(common, ATH_DBG_FATAL, + "DMA failed to stop in %d ms " + "AR_CR=0x%08x AR_DIAG_SW=0x%08x\n", + AH_RX_STOP_DMA_TIMEOUT / 1000, + REG_READ(ah, AR_CR), + REG_READ(ah, AR_DIAG_SW)); + return false; + } else { + return true; + } + +#undef AH_RX_TIME_QUANTUM +#undef AH_RX_STOP_DMA_TIMEOUT +} +EXPORT_SYMBOL(ath9k_hw_stopdmarecv); + +int ath9k_hw_beaconq_setup(struct ath_hw *ah) +{ + struct ath9k_tx_queue_info qi; + + memset(&qi, 0, sizeof(qi)); + qi.tqi_aifs = 1; + qi.tqi_cwmin = 0; + qi.tqi_cwmax = 0; + /* NB: don't enable any interrupts */ + return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi); +} +EXPORT_SYMBOL(ath9k_hw_beaconq_setup); diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h new file mode 100644 index 0000000..29851e6 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/mac.h @@ -0,0 +1,720 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef MAC_H +#define MAC_H + +#define RXSTATUS_RATE(ah, ads) (AR_SREV_5416_20_OR_LATER(ah) ? \ + MS(ads->ds_rxstatus0, AR_RxRate) : \ + (ads->ds_rxstatus3 >> 2) & 0xFF) + +#define set11nTries(_series, _index) \ + (SM((_series)[_index].Tries, AR_XmitDataTries##_index)) + +#define set11nRate(_series, _index) \ + (SM((_series)[_index].Rate, AR_XmitRate##_index)) + +#define set11nPktDurRTSCTS(_series, _index) \ + (SM((_series)[_index].PktDuration, AR_PacketDur##_index) | \ + ((_series)[_index].RateFlags & ATH9K_RATESERIES_RTS_CTS ? \ + AR_RTSCTSQual##_index : 0)) + +#define set11nRateFlags(_series, _index) \ + (((_series)[_index].RateFlags & ATH9K_RATESERIES_2040 ? \ + AR_2040_##_index : 0) \ + |((_series)[_index].RateFlags & ATH9K_RATESERIES_HALFGI ? \ + AR_GI##_index : 0) \ + |SM((_series)[_index].ChSel, AR_ChainSel##_index)) + +#define CCK_SIFS_TIME 10 +#define CCK_PREAMBLE_BITS 144 +#define CCK_PLCP_BITS 48 + +#define OFDM_SIFS_TIME 16 +#define OFDM_PREAMBLE_TIME 20 +#define OFDM_PLCP_BITS 22 +#define OFDM_SYMBOL_TIME 4 + +#define OFDM_SIFS_TIME_HALF 32 +#define OFDM_PREAMBLE_TIME_HALF 40 +#define OFDM_PLCP_BITS_HALF 22 +#define OFDM_SYMBOL_TIME_HALF 8 + +#define OFDM_SIFS_TIME_QUARTER 64 +#define OFDM_PREAMBLE_TIME_QUARTER 80 +#define OFDM_PLCP_BITS_QUARTER 22 +#define OFDM_SYMBOL_TIME_QUARTER 16 + +#define INIT_AIFS 2 +#define INIT_CWMIN 15 +#define INIT_CWMIN_11B 31 +#define INIT_CWMAX 1023 +#define INIT_SH_RETRY 10 +#define INIT_LG_RETRY 10 +#define INIT_SSH_RETRY 32 +#define INIT_SLG_RETRY 32 + +#define ATH9K_SLOT_TIME_6 6 +#define ATH9K_SLOT_TIME_9 9 +#define ATH9K_SLOT_TIME_20 20 + +#define ATH9K_TXERR_XRETRY 0x01 +#define ATH9K_TXERR_FILT 0x02 +#define ATH9K_TXERR_FIFO 0x04 +#define ATH9K_TXERR_XTXOP 0x08 +#define ATH9K_TXERR_TIMER_EXPIRED 0x10 +#define ATH9K_TX_ACKED 0x20 +#define ATH9K_TXERR_MASK \ + (ATH9K_TXERR_XRETRY | ATH9K_TXERR_FILT | ATH9K_TXERR_FIFO | \ + ATH9K_TXERR_XTXOP | ATH9K_TXERR_TIMER_EXPIRED) + +#define ATH9K_TX_BA 0x01 +#define ATH9K_TX_PWRMGMT 0x02 +#define ATH9K_TX_DESC_CFG_ERR 0x04 +#define ATH9K_TX_DATA_UNDERRUN 0x08 +#define ATH9K_TX_DELIM_UNDERRUN 0x10 +#define ATH9K_TX_SW_ABORTED 0x40 +#define ATH9K_TX_SW_FILTERED 0x80 + +/* 64 bytes */ +#define MIN_TX_FIFO_THRESHOLD 0x1 + +/* + * Single stream device AR9285 and AR9271 require 2 KB + * to work around a hardware issue, all other devices + * have can use the max 4 KB limit. + */ +#define MAX_TX_FIFO_THRESHOLD ((4096 / 64) - 1) + +struct ath_tx_status { + u32 ts_tstamp; + u16 ts_seqnum; + u8 ts_status; + u8 ts_ratecode; + u8 ts_rateindex; + int8_t ts_rssi; + u8 ts_shortretry; + u8 ts_longretry; + u8 ts_virtcol; + u8 ts_antenna; + u8 ts_flags; + int8_t ts_rssi_ctl0; + int8_t ts_rssi_ctl1; + int8_t ts_rssi_ctl2; + int8_t ts_rssi_ext0; + int8_t ts_rssi_ext1; + int8_t ts_rssi_ext2; + u8 pad[3]; + u32 ba_low; + u32 ba_high; + u32 evm0; + u32 evm1; + u32 evm2; +}; + +struct ath_rx_status { + u32 rs_tstamp; + u16 rs_datalen; + u8 rs_status; + u8 rs_phyerr; + int8_t rs_rssi; + u8 rs_keyix; + u8 rs_rate; + u8 rs_antenna; + u8 rs_more; + int8_t rs_rssi_ctl0; + int8_t rs_rssi_ctl1; + int8_t rs_rssi_ctl2; + int8_t rs_rssi_ext0; + int8_t rs_rssi_ext1; + int8_t rs_rssi_ext2; + u8 rs_isaggr; + u8 rs_moreaggr; + u8 rs_num_delims; + u8 rs_flags; + u32 evm0; + u32 evm1; + u32 evm2; +}; + +#define ATH9K_RXERR_CRC 0x01 +#define ATH9K_RXERR_PHY 0x02 +#define ATH9K_RXERR_FIFO 0x04 +#define ATH9K_RXERR_DECRYPT 0x08 +#define ATH9K_RXERR_MIC 0x10 + +#define ATH9K_RX_MORE 0x01 +#define ATH9K_RX_MORE_AGGR 0x02 +#define ATH9K_RX_GI 0x04 +#define ATH9K_RX_2040 0x08 +#define ATH9K_RX_DELIM_CRC_PRE 0x10 +#define ATH9K_RX_DELIM_CRC_POST 0x20 +#define ATH9K_RX_DECRYPT_BUSY 0x40 + +#define ATH9K_RXKEYIX_INVALID ((u8)-1) +#define ATH9K_TXKEYIX_INVALID ((u32)-1) + +enum ath9k_phyerr { + ATH9K_PHYERR_UNDERRUN = 0, /* Transmit underrun */ + ATH9K_PHYERR_TIMING = 1, /* Timing error */ + ATH9K_PHYERR_PARITY = 2, /* Illegal parity */ + ATH9K_PHYERR_RATE = 3, /* Illegal rate */ + ATH9K_PHYERR_LENGTH = 4, /* Illegal length */ + ATH9K_PHYERR_RADAR = 5, /* Radar detect */ + ATH9K_PHYERR_SERVICE = 6, /* Illegal service */ + ATH9K_PHYERR_TOR = 7, /* Transmit override receive */ + + ATH9K_PHYERR_OFDM_TIMING = 17, + ATH9K_PHYERR_OFDM_SIGNAL_PARITY = 18, + ATH9K_PHYERR_OFDM_RATE_ILLEGAL = 19, + ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL = 20, + ATH9K_PHYERR_OFDM_POWER_DROP = 21, + ATH9K_PHYERR_OFDM_SERVICE = 22, + ATH9K_PHYERR_OFDM_RESTART = 23, + ATH9K_PHYERR_FALSE_RADAR_EXT = 24, + + ATH9K_PHYERR_CCK_TIMING = 25, + ATH9K_PHYERR_CCK_HEADER_CRC = 26, + ATH9K_PHYERR_CCK_RATE_ILLEGAL = 27, + ATH9K_PHYERR_CCK_SERVICE = 30, + ATH9K_PHYERR_CCK_RESTART = 31, + ATH9K_PHYERR_CCK_LENGTH_ILLEGAL = 32, + ATH9K_PHYERR_CCK_POWER_DROP = 33, + + ATH9K_PHYERR_HT_CRC_ERROR = 34, + ATH9K_PHYERR_HT_LENGTH_ILLEGAL = 35, + ATH9K_PHYERR_HT_RATE_ILLEGAL = 36, + + ATH9K_PHYERR_MAX = 37, +}; + +struct ath_desc { + u32 ds_link; + u32 ds_data; + u32 ds_ctl0; + u32 ds_ctl1; + u32 ds_hw[20]; + union { + struct ath_tx_status tx; + struct ath_rx_status rx; + void *stats; + } ds_us; + void *ds_vdata; +} __packed; + +#define ds_txstat ds_us.tx +#define ds_rxstat ds_us.rx +#define ds_stat ds_us.stats + +#define ATH9K_TXDESC_CLRDMASK 0x0001 +#define ATH9K_TXDESC_NOACK 0x0002 +#define ATH9K_TXDESC_RTSENA 0x0004 +#define ATH9K_TXDESC_CTSENA 0x0008 +/* ATH9K_TXDESC_INTREQ forces a tx interrupt to be generated for + * the descriptor its marked on. We take a tx interrupt to reap + * descriptors when the h/w hits an EOL condition or + * when the descriptor is specifically marked to generate + * an interrupt with this flag. Descriptors should be + * marked periodically to insure timely replenishing of the + * supply needed for sending frames. Defering interrupts + * reduces system load and potentially allows more concurrent + * work to be done but if done to aggressively can cause + * senders to backup. When the hardware queue is left too + * large rate control information may also be too out of + * date. An Alternative for this is TX interrupt mitigation + * but this needs more testing. */ +#define ATH9K_TXDESC_INTREQ 0x0010 +#define ATH9K_TXDESC_VEOL 0x0020 +#define ATH9K_TXDESC_EXT_ONLY 0x0040 +#define ATH9K_TXDESC_EXT_AND_CTL 0x0080 +#define ATH9K_TXDESC_VMF 0x0100 +#define ATH9K_TXDESC_FRAG_IS_ON 0x0200 +#define ATH9K_TXDESC_CAB 0x0400 + +#define ATH9K_RXDESC_INTREQ 0x0020 + +struct ar5416_desc { + u32 ds_link; + u32 ds_data; + u32 ds_ctl0; + u32 ds_ctl1; + union { + struct { + u32 ctl2; + u32 ctl3; + u32 ctl4; + u32 ctl5; + u32 ctl6; + u32 ctl7; + u32 ctl8; + u32 ctl9; + u32 ctl10; + u32 ctl11; + u32 status0; + u32 status1; + u32 status2; + u32 status3; + u32 status4; + u32 status5; + u32 status6; + u32 status7; + u32 status8; + u32 status9; + } tx; + struct { + u32 status0; + u32 status1; + u32 status2; + u32 status3; + u32 status4; + u32 status5; + u32 status6; + u32 status7; + u32 status8; + } rx; + } u; +} __packed; + +#define AR5416DESC(_ds) ((struct ar5416_desc *)(_ds)) +#define AR5416DESC_CONST(_ds) ((const struct ar5416_desc *)(_ds)) + +#define ds_ctl2 u.tx.ctl2 +#define ds_ctl3 u.tx.ctl3 +#define ds_ctl4 u.tx.ctl4 +#define ds_ctl5 u.tx.ctl5 +#define ds_ctl6 u.tx.ctl6 +#define ds_ctl7 u.tx.ctl7 +#define ds_ctl8 u.tx.ctl8 +#define ds_ctl9 u.tx.ctl9 +#define ds_ctl10 u.tx.ctl10 +#define ds_ctl11 u.tx.ctl11 + +#define ds_txstatus0 u.tx.status0 +#define ds_txstatus1 u.tx.status1 +#define ds_txstatus2 u.tx.status2 +#define ds_txstatus3 u.tx.status3 +#define ds_txstatus4 u.tx.status4 +#define ds_txstatus5 u.tx.status5 +#define ds_txstatus6 u.tx.status6 +#define ds_txstatus7 u.tx.status7 +#define ds_txstatus8 u.tx.status8 +#define ds_txstatus9 u.tx.status9 + +#define ds_rxstatus0 u.rx.status0 +#define ds_rxstatus1 u.rx.status1 +#define ds_rxstatus2 u.rx.status2 +#define ds_rxstatus3 u.rx.status3 +#define ds_rxstatus4 u.rx.status4 +#define ds_rxstatus5 u.rx.status5 +#define ds_rxstatus6 u.rx.status6 +#define ds_rxstatus7 u.rx.status7 +#define ds_rxstatus8 u.rx.status8 + +#define AR_FrameLen 0x00000fff +#define AR_VirtMoreFrag 0x00001000 +#define AR_TxCtlRsvd00 0x0000e000 +#define AR_XmitPower 0x003f0000 +#define AR_XmitPower_S 16 +#define AR_RTSEnable 0x00400000 +#define AR_VEOL 0x00800000 +#define AR_ClrDestMask 0x01000000 +#define AR_TxCtlRsvd01 0x1e000000 +#define AR_TxIntrReq 0x20000000 +#define AR_DestIdxValid 0x40000000 +#define AR_CTSEnable 0x80000000 + +#define AR_BufLen 0x00000fff +#define AR_TxMore 0x00001000 +#define AR_DestIdx 0x000fe000 +#define AR_DestIdx_S 13 +#define AR_FrameType 0x00f00000 +#define AR_FrameType_S 20 +#define AR_NoAck 0x01000000 +#define AR_InsertTS 0x02000000 +#define AR_CorruptFCS 0x04000000 +#define AR_ExtOnly 0x08000000 +#define AR_ExtAndCtl 0x10000000 +#define AR_MoreAggr 0x20000000 +#define AR_IsAggr 0x40000000 + +#define AR_BurstDur 0x00007fff +#define AR_BurstDur_S 0 +#define AR_DurUpdateEna 0x00008000 +#define AR_XmitDataTries0 0x000f0000 +#define AR_XmitDataTries0_S 16 +#define AR_XmitDataTries1 0x00f00000 +#define AR_XmitDataTries1_S 20 +#define AR_XmitDataTries2 0x0f000000 +#define AR_XmitDataTries2_S 24 +#define AR_XmitDataTries3 0xf0000000 +#define AR_XmitDataTries3_S 28 + +#define AR_XmitRate0 0x000000ff +#define AR_XmitRate0_S 0 +#define AR_XmitRate1 0x0000ff00 +#define AR_XmitRate1_S 8 +#define AR_XmitRate2 0x00ff0000 +#define AR_XmitRate2_S 16 +#define AR_XmitRate3 0xff000000 +#define AR_XmitRate3_S 24 + +#define AR_PacketDur0 0x00007fff +#define AR_PacketDur0_S 0 +#define AR_RTSCTSQual0 0x00008000 +#define AR_PacketDur1 0x7fff0000 +#define AR_PacketDur1_S 16 +#define AR_RTSCTSQual1 0x80000000 + +#define AR_PacketDur2 0x00007fff +#define AR_PacketDur2_S 0 +#define AR_RTSCTSQual2 0x00008000 +#define AR_PacketDur3 0x7fff0000 +#define AR_PacketDur3_S 16 +#define AR_RTSCTSQual3 0x80000000 + +#define AR_AggrLen 0x0000ffff +#define AR_AggrLen_S 0 +#define AR_TxCtlRsvd60 0x00030000 +#define AR_PadDelim 0x03fc0000 +#define AR_PadDelim_S 18 +#define AR_EncrType 0x0c000000 +#define AR_EncrType_S 26 +#define AR_TxCtlRsvd61 0xf0000000 + +#define AR_2040_0 0x00000001 +#define AR_GI0 0x00000002 +#define AR_ChainSel0 0x0000001c +#define AR_ChainSel0_S 2 +#define AR_2040_1 0x00000020 +#define AR_GI1 0x00000040 +#define AR_ChainSel1 0x00000380 +#define AR_ChainSel1_S 7 +#define AR_2040_2 0x00000400 +#define AR_GI2 0x00000800 +#define AR_ChainSel2 0x00007000 +#define AR_ChainSel2_S 12 +#define AR_2040_3 0x00008000 +#define AR_GI3 0x00010000 +#define AR_ChainSel3 0x000e0000 +#define AR_ChainSel3_S 17 +#define AR_RTSCTSRate 0x0ff00000 +#define AR_RTSCTSRate_S 20 +#define AR_TxCtlRsvd70 0xf0000000 + +#define AR_TxRSSIAnt00 0x000000ff +#define AR_TxRSSIAnt00_S 0 +#define AR_TxRSSIAnt01 0x0000ff00 +#define AR_TxRSSIAnt01_S 8 +#define AR_TxRSSIAnt02 0x00ff0000 +#define AR_TxRSSIAnt02_S 16 +#define AR_TxStatusRsvd00 0x3f000000 +#define AR_TxBaStatus 0x40000000 +#define AR_TxStatusRsvd01 0x80000000 + +/* + * AR_FrmXmitOK - Frame transmission success flag. If set, the frame was + * transmitted successfully. If clear, no ACK or BA was received to indicate + * successful transmission when we were expecting an ACK or BA. + */ +#define AR_FrmXmitOK 0x00000001 +#define AR_ExcessiveRetries 0x00000002 +#define AR_FIFOUnderrun 0x00000004 +#define AR_Filtered 0x00000008 +#define AR_RTSFailCnt 0x000000f0 +#define AR_RTSFailCnt_S 4 +#define AR_DataFailCnt 0x00000f00 +#define AR_DataFailCnt_S 8 +#define AR_VirtRetryCnt 0x0000f000 +#define AR_VirtRetryCnt_S 12 +#define AR_TxDelimUnderrun 0x00010000 +#define AR_TxDataUnderrun 0x00020000 +#define AR_DescCfgErr 0x00040000 +#define AR_TxTimerExpired 0x00080000 +#define AR_TxStatusRsvd10 0xfff00000 + +#define AR_SendTimestamp ds_txstatus2 +#define AR_BaBitmapLow ds_txstatus3 +#define AR_BaBitmapHigh ds_txstatus4 + +#define AR_TxRSSIAnt10 0x000000ff +#define AR_TxRSSIAnt10_S 0 +#define AR_TxRSSIAnt11 0x0000ff00 +#define AR_TxRSSIAnt11_S 8 +#define AR_TxRSSIAnt12 0x00ff0000 +#define AR_TxRSSIAnt12_S 16 +#define AR_TxRSSICombined 0xff000000 +#define AR_TxRSSICombined_S 24 + +#define AR_TxEVM0 ds_txstatus5 +#define AR_TxEVM1 ds_txstatus6 +#define AR_TxEVM2 ds_txstatus7 + +#define AR_TxDone 0x00000001 +#define AR_SeqNum 0x00001ffe +#define AR_SeqNum_S 1 +#define AR_TxStatusRsvd80 0x0001e000 +#define AR_TxOpExceeded 0x00020000 +#define AR_TxStatusRsvd81 0x001c0000 +#define AR_FinalTxIdx 0x00600000 +#define AR_FinalTxIdx_S 21 +#define AR_TxStatusRsvd82 0x01800000 +#define AR_PowerMgmt 0x02000000 +#define AR_TxStatusRsvd83 0xfc000000 + +#define AR_RxCTLRsvd00 0xffffffff + +#define AR_BufLen 0x00000fff +#define AR_RxCtlRsvd00 0x00001000 +#define AR_RxIntrReq 0x00002000 +#define AR_RxCtlRsvd01 0xffffc000 + +#define AR_RxRSSIAnt00 0x000000ff +#define AR_RxRSSIAnt00_S 0 +#define AR_RxRSSIAnt01 0x0000ff00 +#define AR_RxRSSIAnt01_S 8 +#define AR_RxRSSIAnt02 0x00ff0000 +#define AR_RxRSSIAnt02_S 16 +#define AR_RxRate 0xff000000 +#define AR_RxRate_S 24 +#define AR_RxStatusRsvd00 0xff000000 + +#define AR_DataLen 0x00000fff +#define AR_RxMore 0x00001000 +#define AR_NumDelim 0x003fc000 +#define AR_NumDelim_S 14 +#define AR_RxStatusRsvd10 0xff800000 + +#define AR_RcvTimestamp ds_rxstatus2 + +#define AR_GI 0x00000001 +#define AR_2040 0x00000002 +#define AR_Parallel40 0x00000004 +#define AR_Parallel40_S 2 +#define AR_RxStatusRsvd30 0x000000f8 +#define AR_RxAntenna 0xffffff00 +#define AR_RxAntenna_S 8 + +#define AR_RxRSSIAnt10 0x000000ff +#define AR_RxRSSIAnt10_S 0 +#define AR_RxRSSIAnt11 0x0000ff00 +#define AR_RxRSSIAnt11_S 8 +#define AR_RxRSSIAnt12 0x00ff0000 +#define AR_RxRSSIAnt12_S 16 +#define AR_RxRSSICombined 0xff000000 +#define AR_RxRSSICombined_S 24 + +#define AR_RxEVM0 ds_rxstatus4 +#define AR_RxEVM1 ds_rxstatus5 +#define AR_RxEVM2 ds_rxstatus6 + +#define AR_RxDone 0x00000001 +#define AR_RxFrameOK 0x00000002 +#define AR_CRCErr 0x00000004 +#define AR_DecryptCRCErr 0x00000008 +#define AR_PHYErr 0x00000010 +#define AR_MichaelErr 0x00000020 +#define AR_PreDelimCRCErr 0x00000040 +#define AR_RxStatusRsvd70 0x00000080 +#define AR_RxKeyIdxValid 0x00000100 +#define AR_KeyIdx 0x0000fe00 +#define AR_KeyIdx_S 9 +#define AR_PHYErrCode 0x0000ff00 +#define AR_PHYErrCode_S 8 +#define AR_RxMoreAggr 0x00010000 +#define AR_RxAggr 0x00020000 +#define AR_PostDelimCRCErr 0x00040000 +#define AR_RxStatusRsvd71 0x3ff80000 +#define AR_DecryptBusyErr 0x40000000 +#define AR_KeyMiss 0x80000000 + +enum ath9k_tx_queue { + ATH9K_TX_QUEUE_INACTIVE = 0, + ATH9K_TX_QUEUE_DATA, + ATH9K_TX_QUEUE_BEACON, + ATH9K_TX_QUEUE_CAB, + ATH9K_TX_QUEUE_UAPSD, + ATH9K_TX_QUEUE_PSPOLL +}; + +#define ATH9K_NUM_TX_QUEUES 10 + +enum ath9k_tx_queue_subtype { + ATH9K_WME_AC_BK = 0, + ATH9K_WME_AC_BE, + ATH9K_WME_AC_VI, + ATH9K_WME_AC_VO, + ATH9K_WME_UPSD +}; + +enum ath9k_tx_queue_flags { + TXQ_FLAG_TXOKINT_ENABLE = 0x0001, + TXQ_FLAG_TXERRINT_ENABLE = 0x0001, + TXQ_FLAG_TXDESCINT_ENABLE = 0x0002, + TXQ_FLAG_TXEOLINT_ENABLE = 0x0004, + TXQ_FLAG_TXURNINT_ENABLE = 0x0008, + TXQ_FLAG_BACKOFF_DISABLE = 0x0010, + TXQ_FLAG_COMPRESSION_ENABLE = 0x0020, + TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE = 0x0040, + TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE = 0x0080, +}; + +#define ATH9K_TXQ_USEDEFAULT ((u32) -1) +#define ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS 0x00000001 + +#define ATH9K_DECOMP_MASK_SIZE 128 +#define ATH9K_READY_TIME_LO_BOUND 50 +#define ATH9K_READY_TIME_HI_BOUND 96 + +enum ath9k_pkt_type { + ATH9K_PKT_TYPE_NORMAL = 0, + ATH9K_PKT_TYPE_ATIM, + ATH9K_PKT_TYPE_PSPOLL, + ATH9K_PKT_TYPE_BEACON, + ATH9K_PKT_TYPE_PROBE_RESP, + ATH9K_PKT_TYPE_CHIRP, + ATH9K_PKT_TYPE_GRP_POLL, +}; + +struct ath9k_tx_queue_info { + u32 tqi_ver; + enum ath9k_tx_queue tqi_type; + enum ath9k_tx_queue_subtype tqi_subtype; + enum ath9k_tx_queue_flags tqi_qflags; + u32 tqi_priority; + u32 tqi_aifs; + u32 tqi_cwmin; + u32 tqi_cwmax; + u16 tqi_shretry; + u16 tqi_lgretry; + u32 tqi_cbrPeriod; + u32 tqi_cbrOverflowLimit; + u32 tqi_burstTime; + u32 tqi_readyTime; + u32 tqi_physCompBuf; + u32 tqi_intFlags; +}; + +enum ath9k_rx_filter { + ATH9K_RX_FILTER_UCAST = 0x00000001, + ATH9K_RX_FILTER_MCAST = 0x00000002, + ATH9K_RX_FILTER_BCAST = 0x00000004, + ATH9K_RX_FILTER_CONTROL = 0x00000008, + ATH9K_RX_FILTER_BEACON = 0x00000010, + ATH9K_RX_FILTER_PROM = 0x00000020, + ATH9K_RX_FILTER_PROBEREQ = 0x00000080, + ATH9K_RX_FILTER_PHYERR = 0x00000100, + ATH9K_RX_FILTER_MYBEACON = 0x00000200, + ATH9K_RX_FILTER_COMP_BAR = 0x00000400, + ATH9K_RX_FILTER_PSPOLL = 0x00004000, + ATH9K_RX_FILTER_PHYRADAR = 0x00002000, + ATH9K_RX_FILTER_MCAST_BCAST_ALL = 0x00008000, +}; + +#define ATH9K_RATESERIES_RTS_CTS 0x0001 +#define ATH9K_RATESERIES_2040 0x0002 +#define ATH9K_RATESERIES_HALFGI 0x0004 + +struct ath9k_11n_rate_series { + u32 Tries; + u32 Rate; + u32 PktDuration; + u32 ChSel; + u32 RateFlags; +}; + +struct ath9k_keyval { + u8 kv_type; + u8 kv_pad; + u16 kv_len; + u8 kv_val[16]; /* TK */ + u8 kv_mic[8]; /* Michael MIC key */ + u8 kv_txmic[8]; /* Michael MIC TX key (used only if the hardware + * supports both MIC keys in the same key cache entry; + * in that case, kv_mic is the RX key) */ +}; + +enum ath9k_key_type { + ATH9K_KEY_TYPE_CLEAR, + ATH9K_KEY_TYPE_WEP, + ATH9K_KEY_TYPE_AES, + ATH9K_KEY_TYPE_TKIP, +}; + +enum ath9k_cipher { + ATH9K_CIPHER_WEP = 0, + ATH9K_CIPHER_AES_OCB = 1, + ATH9K_CIPHER_AES_CCM = 2, + ATH9K_CIPHER_CKIP = 3, + ATH9K_CIPHER_TKIP = 4, + ATH9K_CIPHER_CLR = 5, + ATH9K_CIPHER_MIC = 127 +}; + +struct ath_hw; +struct ath9k_channel; + +u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q); +void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp); +void ath9k_hw_txstart(struct ath_hw *ah, u32 q); +u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q); +bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel); +bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q); +void ath9k_hw_filltxdesc(struct ath_hw *ah, struct ath_desc *ds, + u32 segLen, bool firstSeg, + bool lastSeg, const struct ath_desc *ds0); +void ath9k_hw_cleartxdesc(struct ath_hw *ah, struct ath_desc *ds); +int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds); +void ath9k_hw_set11n_txdesc(struct ath_hw *ah, struct ath_desc *ds, + u32 pktLen, enum ath9k_pkt_type type, u32 txPower, + u32 keyIx, enum ath9k_key_type keyType, u32 flags); +void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, struct ath_desc *ds, + struct ath_desc *lastds, + u32 durUpdateEn, u32 rtsctsRate, + u32 rtsctsDuration, + struct ath9k_11n_rate_series series[], + u32 nseries, u32 flags); +void ath9k_hw_set11n_aggr_first(struct ath_hw *ah, struct ath_desc *ds, + u32 aggrLen); +void ath9k_hw_set11n_aggr_middle(struct ath_hw *ah, struct ath_desc *ds, + u32 numDelims); +void ath9k_hw_set11n_aggr_last(struct ath_hw *ah, struct ath_desc *ds); +void ath9k_hw_clr11n_aggr(struct ath_hw *ah, struct ath_desc *ds); +void ath9k_hw_set11n_burstduration(struct ath_hw *ah, struct ath_desc *ds, + u32 burstDuration); +void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, struct ath_desc *ds, + u32 vmf); +void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs); +bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q, + const struct ath9k_tx_queue_info *qinfo); +bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q, + struct ath9k_tx_queue_info *qinfo); +int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type, + const struct ath9k_tx_queue_info *qinfo); +bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q); +bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q); +int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds, + u32 pa, struct ath_desc *nds, u64 tsf); +void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds, + u32 size, u32 flags); +bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set); +void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp); +void ath9k_hw_rxena(struct ath_hw *ah); +void ath9k_hw_startpcureceive(struct ath_hw *ah); +void ath9k_hw_stoppcurecv(struct ath_hw *ah); +bool ath9k_hw_stopdmarecv(struct ath_hw *ah); +int ath9k_hw_beaconq_setup(struct ath_hw *ah); + +#endif /* MAC_H */ diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c new file mode 100644 index 0000000..67ca4e5 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -0,0 +1,2065 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include "ath9k.h" +#include "btcoex.h" + +static void ath_cache_conf_rate(struct ath_softc *sc, + struct ieee80211_conf *conf) +{ + switch (conf->channel->band) { + case IEEE80211_BAND_2GHZ: + if (conf_is_ht20(conf)) + sc->cur_rate_mode = ATH9K_MODE_11NG_HT20; + else if (conf_is_ht40_minus(conf)) + sc->cur_rate_mode = ATH9K_MODE_11NG_HT40MINUS; + else if (conf_is_ht40_plus(conf)) + sc->cur_rate_mode = ATH9K_MODE_11NG_HT40PLUS; + else + sc->cur_rate_mode = ATH9K_MODE_11G; + break; + case IEEE80211_BAND_5GHZ: + if (conf_is_ht20(conf)) + sc->cur_rate_mode = ATH9K_MODE_11NA_HT20; + else if (conf_is_ht40_minus(conf)) + sc->cur_rate_mode = ATH9K_MODE_11NA_HT40MINUS; + else if (conf_is_ht40_plus(conf)) + sc->cur_rate_mode = ATH9K_MODE_11NA_HT40PLUS; + else + sc->cur_rate_mode = ATH9K_MODE_11A; + break; + default: + BUG_ON(1); + break; + } +} + +static void ath_update_txpow(struct ath_softc *sc) +{ + struct ath_hw *ah = sc->sc_ah; + u32 txpow; + + if (sc->curtxpow != sc->config.txpowlimit) { + ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit); + /* read back in case value is clamped */ + ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow); + sc->curtxpow = txpow; + } +} + +static u8 parse_mpdudensity(u8 mpdudensity) +{ + /* + * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing": + * 0 for no restriction + * 1 for 1/4 us + * 2 for 1/2 us + * 3 for 1 us + * 4 for 2 us + * 5 for 4 us + * 6 for 8 us + * 7 for 16 us + */ + switch (mpdudensity) { + case 0: + return 0; + case 1: + case 2: + case 3: + /* Our lower layer calculations limit our precision to + 1 microsecond */ + return 1; + case 4: + return 2; + case 5: + return 4; + case 6: + return 8; + case 7: + return 16; + default: + return 0; + } +} + +static struct ath9k_channel *ath_get_curchannel(struct ath_softc *sc, + struct ieee80211_hw *hw) +{ + struct ieee80211_channel *curchan = hw->conf.channel; + struct ath9k_channel *channel; + u8 chan_idx; + + chan_idx = curchan->hw_value; + channel = &sc->sc_ah->channels[chan_idx]; + ath9k_update_ichannel(sc, hw, channel); + return channel; +} + +bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode) +{ + unsigned long flags; + bool ret; + + spin_lock_irqsave(&sc->sc_pm_lock, flags); + ret = ath9k_hw_setpower(sc->sc_ah, mode); + spin_unlock_irqrestore(&sc->sc_pm_lock, flags); + + return ret; +} + +void ath9k_ps_wakeup(struct ath_softc *sc) +{ + unsigned long flags; + + spin_lock_irqsave(&sc->sc_pm_lock, flags); + if (++sc->ps_usecount != 1) + goto unlock; + + ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE); + + unlock: + spin_unlock_irqrestore(&sc->sc_pm_lock, flags); +} + +void ath9k_ps_restore(struct ath_softc *sc) +{ + unsigned long flags; + + spin_lock_irqsave(&sc->sc_pm_lock, flags); + if (--sc->ps_usecount != 0) + goto unlock; + + if (sc->ps_idle) + ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP); + else if (sc->ps_enabled && + !(sc->ps_flags & (PS_WAIT_FOR_BEACON | + PS_WAIT_FOR_CAB | + PS_WAIT_FOR_PSPOLL_DATA | + PS_WAIT_FOR_TX_ACK))) + ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_NETWORK_SLEEP); + + unlock: + spin_unlock_irqrestore(&sc->sc_pm_lock, flags); +} + +/* + * Set/change channels. If the channel is really being changed, it's done + * by reseting the chip. To accomplish this we must first cleanup any pending + * DMA, then restart stuff. +*/ +int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw, + struct ath9k_channel *hchan) +{ + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + struct ieee80211_conf *conf = &common->hw->conf; + bool fastcc = true, stopped; + struct ieee80211_channel *channel = hw->conf.channel; + int r; + + if (sc->sc_flags & SC_OP_INVALID) + return -EIO; + + ath9k_ps_wakeup(sc); + + /* + * This is only performed if the channel settings have + * actually changed. + * + * To switch channels clear any pending DMA operations; + * wait long enough for the RX fifo to drain, reset the + * hardware at the new frequency, and then re-enable + * the relevant bits of the h/w. + */ + ath9k_hw_set_interrupts(ah, 0); + ath_drain_all_txq(sc, false); + stopped = ath_stoprecv(sc); + + /* XXX: do not flush receive queue here. We don't want + * to flush data frames already in queue because of + * changing channel. */ + + if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET)) + fastcc = false; + + ath_print(common, ATH_DBG_CONFIG, + "(%u MHz) -> (%u MHz), conf_is_ht40: %d\n", + sc->sc_ah->curchan->channel, + channel->center_freq, conf_is_ht40(conf)); + + spin_lock_bh(&sc->sc_resetlock); + + r = ath9k_hw_reset(ah, hchan, fastcc); + if (r) { + ath_print(common, ATH_DBG_FATAL, + "Unable to reset channel (%u MHz), " + "reset status %d\n", + channel->center_freq, r); + spin_unlock_bh(&sc->sc_resetlock); + goto ps_restore; + } + spin_unlock_bh(&sc->sc_resetlock); + + sc->sc_flags &= ~SC_OP_FULL_RESET; + + if (ath_startrecv(sc) != 0) { + ath_print(common, ATH_DBG_FATAL, + "Unable to restart recv logic\n"); + r = -EIO; + goto ps_restore; + } + + ath_cache_conf_rate(sc, &hw->conf); + ath_update_txpow(sc); + ath9k_hw_set_interrupts(ah, sc->imask); + + ps_restore: + ath9k_ps_restore(sc); + return r; +} + +/* + * This routine performs the periodic noise floor calibration function + * that is used to adjust and optimize the chip performance. This + * takes environmental changes (location, temperature) into account. + * When the task is complete, it reschedules itself depending on the + * appropriate interval that was calculated. + */ +void ath_ani_calibrate(unsigned long data) +{ + struct ath_softc *sc = (struct ath_softc *)data; + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + bool longcal = false; + bool shortcal = false; + bool aniflag = false; + unsigned int timestamp = jiffies_to_msecs(jiffies); + u32 cal_interval, short_cal_interval; + + short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ? + ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL; + + /* Only calibrate if awake */ + if (sc->sc_ah->power_mode != ATH9K_PM_AWAKE) + goto set_timer; + + ath9k_ps_wakeup(sc); + + /* Long calibration runs independently of short calibration. */ + if ((timestamp - common->ani.longcal_timer) >= ATH_LONG_CALINTERVAL) { + longcal = true; + ath_print(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies); + common->ani.longcal_timer = timestamp; + } + + /* Short calibration applies only while caldone is false */ + if (!common->ani.caldone) { + if ((timestamp - common->ani.shortcal_timer) >= short_cal_interval) { + shortcal = true; + ath_print(common, ATH_DBG_ANI, + "shortcal @%lu\n", jiffies); + common->ani.shortcal_timer = timestamp; + common->ani.resetcal_timer = timestamp; + } + } else { + if ((timestamp - common->ani.resetcal_timer) >= + ATH_RESTART_CALINTERVAL) { + common->ani.caldone = ath9k_hw_reset_calvalid(ah); + if (common->ani.caldone) + common->ani.resetcal_timer = timestamp; + } + } + + /* Verify whether we must check ANI */ + if ((timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) { + aniflag = true; + common->ani.checkani_timer = timestamp; + } + + /* Skip all processing if there's nothing to do. */ + if (longcal || shortcal || aniflag) { + /* Call ANI routine if necessary */ + if (aniflag) + ath9k_hw_ani_monitor(ah, ah->curchan); + + /* Perform calibration if necessary */ + if (longcal || shortcal) { + common->ani.caldone = + ath9k_hw_calibrate(ah, + ah->curchan, + common->rx_chainmask, + longcal); + + if (longcal) + common->ani.noise_floor = ath9k_hw_getchan_noise(ah, + ah->curchan); + + ath_print(common, ATH_DBG_ANI, + " calibrate chan %u/%x nf: %d\n", + ah->curchan->channel, + ah->curchan->channelFlags, + common->ani.noise_floor); + } + } + + ath9k_ps_restore(sc); + +set_timer: + /* + * Set timer interval based on previous results. + * The interval must be the shortest necessary to satisfy ANI, + * short calibration and long calibration. + */ + cal_interval = ATH_LONG_CALINTERVAL; + if (sc->sc_ah->config.enable_ani) + cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL); + if (!common->ani.caldone) + cal_interval = min(cal_interval, (u32)short_cal_interval); + + mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval)); +} + +static void ath_start_ani(struct ath_common *common) +{ + unsigned long timestamp = jiffies_to_msecs(jiffies); + + common->ani.longcal_timer = timestamp; + common->ani.shortcal_timer = timestamp; + common->ani.checkani_timer = timestamp; + + mod_timer(&common->ani.timer, + jiffies + msecs_to_jiffies(ATH_ANI_POLLINTERVAL)); +} + +/* + * Update tx/rx chainmask. For legacy association, + * hard code chainmask to 1x1, for 11n association, use + * the chainmask configuration, for bt coexistence, use + * the chainmask configuration even in legacy mode. + */ +void ath_update_chainmask(struct ath_softc *sc, int is_ht) +{ + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + + if ((sc->sc_flags & SC_OP_SCANNING) || is_ht || + (ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE)) { + common->tx_chainmask = ah->caps.tx_chainmask; + common->rx_chainmask = ah->caps.rx_chainmask; + } else { + common->tx_chainmask = 1; + common->rx_chainmask = 1; + } + + ath_print(common, ATH_DBG_CONFIG, + "tx chmask: %d, rx chmask: %d\n", + common->tx_chainmask, + common->rx_chainmask); +} + +static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta) +{ + struct ath_node *an; + + an = (struct ath_node *)sta->drv_priv; + + if (sc->sc_flags & SC_OP_TXAGGR) { + ath_tx_node_init(sc, an); + an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + + sta->ht_cap.ampdu_factor); + an->mpdudensity = parse_mpdudensity(sta->ht_cap.ampdu_density); + an->last_rssi = ATH_RSSI_DUMMY_MARKER; + } +} + +static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta) +{ + struct ath_node *an = (struct ath_node *)sta->drv_priv; + + if (sc->sc_flags & SC_OP_TXAGGR) + ath_tx_node_cleanup(sc, an); +} + +void ath9k_tasklet(unsigned long data) +{ + struct ath_softc *sc = (struct ath_softc *)data; + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + + u32 status = sc->intrstatus; + + ath9k_ps_wakeup(sc); + + if (status & ATH9K_INT_FATAL) { + ath_reset(sc, false); + ath9k_ps_restore(sc); + return; + } + + if (status & (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) { + spin_lock_bh(&sc->rx.rxflushlock); + ath_rx_tasklet(sc, 0); + spin_unlock_bh(&sc->rx.rxflushlock); + } + + if (status & ATH9K_INT_TX) + ath_tx_tasklet(sc); + + if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) { + /* + * TSF sync does not look correct; remain awake to sync with + * the next Beacon. + */ + ath_print(common, ATH_DBG_PS, + "TSFOOR - Sync with next Beacon\n"); + sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC; + } + + if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) + if (status & ATH9K_INT_GENTIMER) + ath_gen_timer_isr(sc->sc_ah); + + /* re-enable hardware interrupt */ + ath9k_hw_set_interrupts(ah, sc->imask); + ath9k_ps_restore(sc); +} + +irqreturn_t ath_isr(int irq, void *dev) +{ +#define SCHED_INTR ( \ + ATH9K_INT_FATAL | \ + ATH9K_INT_RXORN | \ + ATH9K_INT_RXEOL | \ + ATH9K_INT_RX | \ + ATH9K_INT_TX | \ + ATH9K_INT_BMISS | \ + ATH9K_INT_CST | \ + ATH9K_INT_TSFOOR | \ + ATH9K_INT_GENTIMER) + + struct ath_softc *sc = dev; + struct ath_hw *ah = sc->sc_ah; + enum ath9k_int status; + bool sched = false; + + /* + * The hardware is not ready/present, don't + * touch anything. Note this can happen early + * on if the IRQ is shared. + */ + if (sc->sc_flags & SC_OP_INVALID) + return IRQ_NONE; + + + /* shared irq, not for us */ + + if (!ath9k_hw_intrpend(ah)) + return IRQ_NONE; + + /* + * Figure out the reason(s) for the interrupt. Note + * that the hal returns a pseudo-ISR that may include + * bits we haven't explicitly enabled so we mask the + * value to insure we only process bits we requested. + */ + ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */ + status &= sc->imask; /* discard unasked-for bits */ + + /* + * If there are no status bits set, then this interrupt was not + * for me (should have been caught above). + */ + if (!status) + return IRQ_NONE; + + /* Cache the status */ + sc->intrstatus = status; + + if (status & SCHED_INTR) + sched = true; + + /* + * If a FATAL or RXORN interrupt is received, we have to reset the + * chip immediately. + */ + if (status & (ATH9K_INT_FATAL | ATH9K_INT_RXORN)) + goto chip_reset; + + if (status & ATH9K_INT_SWBA) + tasklet_schedule(&sc->bcon_tasklet); + + if (status & ATH9K_INT_TXURN) + ath9k_hw_updatetxtriglevel(ah, true); + + if (status & ATH9K_INT_MIB) { + /* + * Disable interrupts until we service the MIB + * interrupt; otherwise it will continue to + * fire. + */ + ath9k_hw_set_interrupts(ah, 0); + /* + * Let the hal handle the event. We assume + * it will clear whatever condition caused + * the interrupt. + */ + ath9k_hw_procmibevent(ah); + ath9k_hw_set_interrupts(ah, sc->imask); + } + + if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) + if (status & ATH9K_INT_TIM_TIMER) { + /* Clear RxAbort bit so that we can + * receive frames */ + ath9k_setpower(sc, ATH9K_PM_AWAKE); + ath9k_hw_setrxabort(sc->sc_ah, 0); + sc->ps_flags |= PS_WAIT_FOR_BEACON; + } + +chip_reset: + + ath_debug_stat_interrupt(sc, status); + + if (sched) { + /* turn off every interrupt except SWBA */ + ath9k_hw_set_interrupts(ah, (sc->imask & ATH9K_INT_SWBA)); + tasklet_schedule(&sc->intr_tq); + } + + return IRQ_HANDLED; + +#undef SCHED_INTR +} + +static u32 ath_get_extchanmode(struct ath_softc *sc, + struct ieee80211_channel *chan, + enum nl80211_channel_type channel_type) +{ + u32 chanmode = 0; + + switch (chan->band) { + case IEEE80211_BAND_2GHZ: + switch(channel_type) { + case NL80211_CHAN_NO_HT: + case NL80211_CHAN_HT20: + chanmode = CHANNEL_G_HT20; + break; + case NL80211_CHAN_HT40PLUS: + chanmode = CHANNEL_G_HT40PLUS; + break; + case NL80211_CHAN_HT40MINUS: + chanmode = CHANNEL_G_HT40MINUS; + break; + } + break; + case IEEE80211_BAND_5GHZ: + switch(channel_type) { + case NL80211_CHAN_NO_HT: + case NL80211_CHAN_HT20: + chanmode = CHANNEL_A_HT20; + break; + case NL80211_CHAN_HT40PLUS: + chanmode = CHANNEL_A_HT40PLUS; + break; + case NL80211_CHAN_HT40MINUS: + chanmode = CHANNEL_A_HT40MINUS; + break; + } + break; + default: + break; + } + + return chanmode; +} + +static int ath_setkey_tkip(struct ath_common *common, u16 keyix, const u8 *key, + struct ath9k_keyval *hk, const u8 *addr, + bool authenticator) +{ + struct ath_hw *ah = common->ah; + const u8 *key_rxmic; + const u8 *key_txmic; + + key_txmic = key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY; + key_rxmic = key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY; + + if (addr == NULL) { + /* + * Group key installation - only two key cache entries are used + * regardless of splitmic capability since group key is only + * used either for TX or RX. + */ + if (authenticator) { + memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic)); + memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_mic)); + } else { + memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic)); + memcpy(hk->kv_txmic, key_rxmic, sizeof(hk->kv_mic)); + } + return ath9k_hw_set_keycache_entry(ah, keyix, hk, addr); + } + if (!common->splitmic) { + /* TX and RX keys share the same key cache entry. */ + memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic)); + memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_txmic)); + return ath9k_hw_set_keycache_entry(ah, keyix, hk, addr); + } + + /* Separate key cache entries for TX and RX */ + + /* TX key goes at first index, RX key at +32. */ + memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic)); + if (!ath9k_hw_set_keycache_entry(ah, keyix, hk, NULL)) { + /* TX MIC entry failed. No need to proceed further */ + ath_print(common, ATH_DBG_FATAL, + "Setting TX MIC Key Failed\n"); + return 0; + } + + memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic)); + /* XXX delete tx key on failure? */ + return ath9k_hw_set_keycache_entry(ah, keyix + 32, hk, addr); +} + +static int ath_reserve_key_cache_slot_tkip(struct ath_common *common) +{ + int i; + + for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) { + if (test_bit(i, common->keymap) || + test_bit(i + 64, common->keymap)) + continue; /* At least one part of TKIP key allocated */ + if (common->splitmic && + (test_bit(i + 32, common->keymap) || + test_bit(i + 64 + 32, common->keymap))) + continue; /* At least one part of TKIP key allocated */ + + /* Found a free slot for a TKIP key */ + return i; + } + return -1; +} + +static int ath_reserve_key_cache_slot(struct ath_common *common) +{ + int i; + + /* First, try to find slots that would not be available for TKIP. */ + if (common->splitmic) { + for (i = IEEE80211_WEP_NKID; i < common->keymax / 4; i++) { + if (!test_bit(i, common->keymap) && + (test_bit(i + 32, common->keymap) || + test_bit(i + 64, common->keymap) || + test_bit(i + 64 + 32, common->keymap))) + return i; + if (!test_bit(i + 32, common->keymap) && + (test_bit(i, common->keymap) || + test_bit(i + 64, common->keymap) || + test_bit(i + 64 + 32, common->keymap))) + return i + 32; + if (!test_bit(i + 64, common->keymap) && + (test_bit(i , common->keymap) || + test_bit(i + 32, common->keymap) || + test_bit(i + 64 + 32, common->keymap))) + return i + 64; + if (!test_bit(i + 64 + 32, common->keymap) && + (test_bit(i, common->keymap) || + test_bit(i + 32, common->keymap) || + test_bit(i + 64, common->keymap))) + return i + 64 + 32; + } + } else { + for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) { + if (!test_bit(i, common->keymap) && + test_bit(i + 64, common->keymap)) + return i; + if (test_bit(i, common->keymap) && + !test_bit(i + 64, common->keymap)) + return i + 64; + } + } + + /* No partially used TKIP slots, pick any available slot */ + for (i = IEEE80211_WEP_NKID; i < common->keymax; i++) { + /* Do not allow slots that could be needed for TKIP group keys + * to be used. This limitation could be removed if we know that + * TKIP will not be used. */ + if (i >= 64 && i < 64 + IEEE80211_WEP_NKID) + continue; + if (common->splitmic) { + if (i >= 32 && i < 32 + IEEE80211_WEP_NKID) + continue; + if (i >= 64 + 32 && i < 64 + 32 + IEEE80211_WEP_NKID) + continue; + } + + if (!test_bit(i, common->keymap)) + return i; /* Found a free slot for a key */ + } + + /* No free slot found */ + return -1; +} + +static int ath_key_config(struct ath_common *common, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct ath_hw *ah = common->ah; + struct ath9k_keyval hk; + const u8 *mac = NULL; + int ret = 0; + int idx; + + memset(&hk, 0, sizeof(hk)); + + switch (key->alg) { + case ALG_WEP: + hk.kv_type = ATH9K_CIPHER_WEP; + break; + case ALG_TKIP: + hk.kv_type = ATH9K_CIPHER_TKIP; + break; + case ALG_CCMP: + hk.kv_type = ATH9K_CIPHER_AES_CCM; + break; + default: + return -EOPNOTSUPP; + } + + hk.kv_len = key->keylen; + memcpy(hk.kv_val, key->key, key->keylen); + + if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { + /* For now, use the default keys for broadcast keys. This may + * need to change with virtual interfaces. */ + idx = key->keyidx; + } else if (key->keyidx) { + if (WARN_ON(!sta)) + return -EOPNOTSUPP; + mac = sta->addr; + + if (vif->type != NL80211_IFTYPE_AP) { + /* Only keyidx 0 should be used with unicast key, but + * allow this for client mode for now. */ + idx = key->keyidx; + } else + return -EIO; + } else { + if (WARN_ON(!sta)) + return -EOPNOTSUPP; + mac = sta->addr; + + if (key->alg == ALG_TKIP) + idx = ath_reserve_key_cache_slot_tkip(common); + else + idx = ath_reserve_key_cache_slot(common); + if (idx < 0) + return -ENOSPC; /* no free key cache entries */ + } + + if (key->alg == ALG_TKIP) + ret = ath_setkey_tkip(common, idx, key->key, &hk, mac, + vif->type == NL80211_IFTYPE_AP); + else + ret = ath9k_hw_set_keycache_entry(ah, idx, &hk, mac); + + if (!ret) + return -EIO; + + set_bit(idx, common->keymap); + if (key->alg == ALG_TKIP) { + set_bit(idx + 64, common->keymap); + if (common->splitmic) { + set_bit(idx + 32, common->keymap); + set_bit(idx + 64 + 32, common->keymap); + } + } + + return idx; +} + +static void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key) +{ + struct ath_hw *ah = common->ah; + + ath9k_hw_keyreset(ah, key->hw_key_idx); + if (key->hw_key_idx < IEEE80211_WEP_NKID) + return; + + clear_bit(key->hw_key_idx, common->keymap); + if (key->alg != ALG_TKIP) + return; + + clear_bit(key->hw_key_idx + 64, common->keymap); + if (common->splitmic) { + ath9k_hw_keyreset(ah, key->hw_key_idx + 32); + clear_bit(key->hw_key_idx + 32, common->keymap); + clear_bit(key->hw_key_idx + 64 + 32, common->keymap); + } +} + +static void ath9k_bss_assoc_info(struct ath_softc *sc, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf) +{ + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + + if (bss_conf->assoc) { + ath_print(common, ATH_DBG_CONFIG, + "Bss Info ASSOC %d, bssid: %pM\n", + bss_conf->aid, common->curbssid); + + /* New association, store aid */ + common->curaid = bss_conf->aid; + ath9k_hw_write_associd(ah); + + /* + * Request a re-configuration of Beacon related timers + * on the receipt of the first Beacon frame (i.e., + * after time sync with the AP). + */ + sc->ps_flags |= PS_BEACON_SYNC; + + /* Configure the beacon */ + ath_beacon_config(sc, vif); + + /* Reset rssi stats */ + sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER; + + ath_start_ani(common); + } else { + ath_print(common, ATH_DBG_CONFIG, "Bss Info DISASSOC\n"); + common->curaid = 0; + /* Stop ANI */ + del_timer_sync(&common->ani.timer); + } +} + +void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw) +{ + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + struct ieee80211_channel *channel = hw->conf.channel; + int r; + + ath9k_ps_wakeup(sc); + ath9k_hw_configpcipowersave(ah, 0, 0); + + if (!ah->curchan) + ah->curchan = ath_get_curchannel(sc, sc->hw); + + spin_lock_bh(&sc->sc_resetlock); + r = ath9k_hw_reset(ah, ah->curchan, false); + if (r) { + ath_print(common, ATH_DBG_FATAL, + "Unable to reset channel (%u MHz), " + "reset status %d\n", + channel->center_freq, r); + } + spin_unlock_bh(&sc->sc_resetlock); + + ath_update_txpow(sc); + if (ath_startrecv(sc) != 0) { + ath_print(common, ATH_DBG_FATAL, + "Unable to restart recv logic\n"); + return; + } + + if (sc->sc_flags & SC_OP_BEACONS) + ath_beacon_config(sc, NULL); /* restart beacons */ + + /* Re-Enable interrupts */ + ath9k_hw_set_interrupts(ah, sc->imask); + + /* Enable LED */ + ath9k_hw_cfg_output(ah, ah->led_pin, + AR_GPIO_OUTPUT_MUX_AS_OUTPUT); + ath9k_hw_set_gpio(ah, ah->led_pin, 0); + + ieee80211_wake_queues(hw); + ath9k_ps_restore(sc); +} + +void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw) +{ + struct ath_hw *ah = sc->sc_ah; + struct ieee80211_channel *channel = hw->conf.channel; + int r; + + ath9k_ps_wakeup(sc); + ieee80211_stop_queues(hw); + + /* Disable LED */ + ath9k_hw_set_gpio(ah, ah->led_pin, 1); + ath9k_hw_cfg_gpio_input(ah, ah->led_pin); + + /* Disable interrupts */ + ath9k_hw_set_interrupts(ah, 0); + + ath_drain_all_txq(sc, false); /* clear pending tx frames */ + ath_stoprecv(sc); /* turn off frame recv */ + ath_flushrecv(sc); /* flush recv queue */ + + if (!ah->curchan) + ah->curchan = ath_get_curchannel(sc, hw); + + spin_lock_bh(&sc->sc_resetlock); + r = ath9k_hw_reset(ah, ah->curchan, false); + if (r) { + ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, + "Unable to reset channel (%u MHz), " + "reset status %d\n", + channel->center_freq, r); + } + spin_unlock_bh(&sc->sc_resetlock); + + ath9k_hw_phy_disable(ah); + ath9k_hw_configpcipowersave(ah, 1, 1); + ath9k_ps_restore(sc); + ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP); +} + +int ath_reset(struct ath_softc *sc, bool retry_tx) +{ + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + struct ieee80211_hw *hw = sc->hw; + int r; + + /* Stop ANI */ + del_timer_sync(&common->ani.timer); + + ieee80211_stop_queues(hw); + + ath9k_hw_set_interrupts(ah, 0); + ath_drain_all_txq(sc, retry_tx); + ath_stoprecv(sc); + ath_flushrecv(sc); + + spin_lock_bh(&sc->sc_resetlock); + r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false); + if (r) + ath_print(common, ATH_DBG_FATAL, + "Unable to reset hardware; reset status %d\n", r); + spin_unlock_bh(&sc->sc_resetlock); + + if (ath_startrecv(sc) != 0) + ath_print(common, ATH_DBG_FATAL, + "Unable to start recv logic\n"); + + /* + * We may be doing a reset in response to a request + * that changes the channel so update any state that + * might change as a result. + */ + ath_cache_conf_rate(sc, &hw->conf); + + ath_update_txpow(sc); + + if (sc->sc_flags & SC_OP_BEACONS) + ath_beacon_config(sc, NULL); /* restart beacons */ + + ath9k_hw_set_interrupts(ah, sc->imask); + + if (retry_tx) { + int i; + for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { + if (ATH_TXQ_SETUP(sc, i)) { + spin_lock_bh(&sc->tx.txq[i].axq_lock); + ath_txq_schedule(sc, &sc->tx.txq[i]); + spin_unlock_bh(&sc->tx.txq[i].axq_lock); + } + } + } + + ieee80211_wake_queues(hw); + + /* Start ANI */ + ath_start_ani(common); + + return r; +} + +int ath_get_hal_qnum(u16 queue, struct ath_softc *sc) +{ + int qnum; + + switch (queue) { + case 0: + qnum = sc->tx.hwq_map[ATH9K_WME_AC_VO]; + break; + case 1: + qnum = sc->tx.hwq_map[ATH9K_WME_AC_VI]; + break; + case 2: + qnum = sc->tx.hwq_map[ATH9K_WME_AC_BE]; + break; + case 3: + qnum = sc->tx.hwq_map[ATH9K_WME_AC_BK]; + break; + default: + qnum = sc->tx.hwq_map[ATH9K_WME_AC_BE]; + break; + } + + return qnum; +} + +int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc) +{ + int qnum; + + switch (queue) { + case ATH9K_WME_AC_VO: + qnum = 0; + break; + case ATH9K_WME_AC_VI: + qnum = 1; + break; + case ATH9K_WME_AC_BE: + qnum = 2; + break; + case ATH9K_WME_AC_BK: + qnum = 3; + break; + default: + qnum = -1; + break; + } + + return qnum; +} + +/* XXX: Remove me once we don't depend on ath9k_channel for all + * this redundant data */ +void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw, + struct ath9k_channel *ichan) +{ + struct ieee80211_channel *chan = hw->conf.channel; + struct ieee80211_conf *conf = &hw->conf; + + ichan->channel = chan->center_freq; + ichan->chan = chan; + + if (chan->band == IEEE80211_BAND_2GHZ) { + ichan->chanmode = CHANNEL_G; + ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM | CHANNEL_G; + } else { + ichan->chanmode = CHANNEL_A; + ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM; + } + + if (conf_is_ht(conf)) + ichan->chanmode = ath_get_extchanmode(sc, chan, + conf->channel_type); +} + +/**********************/ +/* mac80211 callbacks */ +/**********************/ + +static int ath9k_start(struct ieee80211_hw *hw) +{ + struct ath_wiphy *aphy = hw->priv; + struct ath_softc *sc = aphy->sc; + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + struct ieee80211_channel *curchan = hw->conf.channel; + struct ath9k_channel *init_channel; + int r; + + ath_print(common, ATH_DBG_CONFIG, + "Starting driver with initial channel: %d MHz\n", + curchan->center_freq); + + mutex_lock(&sc->mutex); + + if (ath9k_wiphy_started(sc)) { + if (sc->chan_idx == curchan->hw_value) { + /* + * Already on the operational channel, the new wiphy + * can be marked active. + */ + aphy->state = ATH_WIPHY_ACTIVE; + ieee80211_wake_queues(hw); + } else { + /* + * Another wiphy is on another channel, start the new + * wiphy in paused state. + */ + aphy->state = ATH_WIPHY_PAUSED; + ieee80211_stop_queues(hw); + } + mutex_unlock(&sc->mutex); + return 0; + } + aphy->state = ATH_WIPHY_ACTIVE; + + /* setup initial channel */ + + sc->chan_idx = curchan->hw_value; + + init_channel = ath_get_curchannel(sc, hw); + + /* Reset SERDES registers */ + ath9k_hw_configpcipowersave(ah, 0, 0); + + /* + * The basic interface to setting the hardware in a good + * state is ``reset''. On return the hardware is known to + * be powered up and with interrupts disabled. This must + * be followed by initialization of the appropriate bits + * and then setup of the interrupt mask. + */ + spin_lock_bh(&sc->sc_resetlock); + r = ath9k_hw_reset(ah, init_channel, false); + if (r) { + ath_print(common, ATH_DBG_FATAL, + "Unable to reset hardware; reset status %d " + "(freq %u MHz)\n", r, + curchan->center_freq); + spin_unlock_bh(&sc->sc_resetlock); + goto mutex_unlock; + } + spin_unlock_bh(&sc->sc_resetlock); + + /* + * This is needed only to setup initial state + * but it's best done after a reset. + */ + ath_update_txpow(sc); + + /* + * Setup the hardware after reset: + * The receive engine is set going. + * Frame transmit is handled entirely + * in the frame output path; there's nothing to do + * here except setup the interrupt mask. + */ + if (ath_startrecv(sc) != 0) { + ath_print(common, ATH_DBG_FATAL, + "Unable to start recv logic\n"); + r = -EIO; + goto mutex_unlock; + } + + /* Setup our intr mask. */ + sc->imask = ATH9K_INT_RX | ATH9K_INT_TX + | ATH9K_INT_RXEOL | ATH9K_INT_RXORN + | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL; + + if (ah->caps.hw_caps & ATH9K_HW_CAP_GTT) + sc->imask |= ATH9K_INT_GTT; + + if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) + sc->imask |= ATH9K_INT_CST; + + ath_cache_conf_rate(sc, &hw->conf); + + sc->sc_flags &= ~SC_OP_INVALID; + + /* Disable BMISS interrupt when we're not associated */ + sc->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS); + ath9k_hw_set_interrupts(ah, sc->imask); + + ieee80211_wake_queues(hw); + + ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0); + + if ((ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE) && + !ah->btcoex_hw.enabled) { + ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT, + AR_STOMP_LOW_WLAN_WGHT); + ath9k_hw_btcoex_enable(ah); + + if (common->bus_ops->bt_coex_prep) + common->bus_ops->bt_coex_prep(common); + if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) + ath9k_btcoex_timer_resume(sc); + } + +mutex_unlock: + mutex_unlock(&sc->mutex); + + return r; +} + +static int ath9k_tx(struct ieee80211_hw *hw, + struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ath_wiphy *aphy = hw->priv; + struct ath_softc *sc = aphy->sc; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ath_tx_control txctl; + int padpos, padsize; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + + if (aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN) { + ath_print(common, ATH_DBG_XMIT, + "ath9k: %s: TX in unexpected wiphy state " + "%d\n", wiphy_name(hw->wiphy), aphy->state); + goto exit; + } + + if (sc->ps_enabled) { + /* + * mac80211 does not set PM field for normal data frames, so we + * need to update that based on the current PS mode. + */ + if (ieee80211_is_data(hdr->frame_control) && + !ieee80211_is_nullfunc(hdr->frame_control) && + !ieee80211_has_pm(hdr->frame_control)) { + ath_print(common, ATH_DBG_PS, "Add PM=1 for a TX frame " + "while in PS mode\n"); + hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); + } + } + + if (unlikely(sc->sc_ah->power_mode != ATH9K_PM_AWAKE)) { + /* + * We are using PS-Poll and mac80211 can request TX while in + * power save mode. Need to wake up hardware for the TX to be + * completed and if needed, also for RX of buffered frames. + */ + ath9k_ps_wakeup(sc); + ath9k_hw_setrxabort(sc->sc_ah, 0); + if (ieee80211_is_pspoll(hdr->frame_control)) { + ath_print(common, ATH_DBG_PS, + "Sending PS-Poll to pick a buffered frame\n"); + sc->ps_flags |= PS_WAIT_FOR_PSPOLL_DATA; + } else { + ath_print(common, ATH_DBG_PS, + "Wake up to complete TX\n"); + sc->ps_flags |= PS_WAIT_FOR_TX_ACK; + } + /* + * The actual restore operation will happen only after + * the sc_flags bit is cleared. We are just dropping + * the ps_usecount here. + */ + ath9k_ps_restore(sc); + } + + memset(&txctl, 0, sizeof(struct ath_tx_control)); + + /* + * As a temporary workaround, assign seq# here; this will likely need + * to be cleaned up to work better with Beacon transmission and virtual + * BSSes. + */ + if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { + if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) + sc->tx.seq_no += 0x10; + hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); + hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no); + } + + /* Add the padding after the header if this is not already done */ + padpos = ath9k_cmn_padpos(hdr->frame_control); + padsize = padpos & 3; + if (padsize && skb->len>padpos) { + if (skb_headroom(skb) < padsize) + return -1; + skb_push(skb, padsize); + memmove(skb->data, skb->data + padsize, padpos); + } + + /* Check if a tx queue is available */ + + txctl.txq = ath_test_get_txq(sc, skb); + if (!txctl.txq) + goto exit; + + ath_print(common, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb); + + if (ath_tx_start(hw, skb, &txctl) != 0) { + ath_print(common, ATH_DBG_XMIT, "TX failed\n"); + goto exit; + } + + return 0; +exit: + dev_kfree_skb_any(skb); + return 0; +} + +static void ath9k_stop(struct ieee80211_hw *hw) +{ + struct ath_wiphy *aphy = hw->priv; + struct ath_softc *sc = aphy->sc; + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + + mutex_lock(&sc->mutex); + + aphy->state = ATH_WIPHY_INACTIVE; + + cancel_delayed_work_sync(&sc->ath_led_blink_work); + cancel_delayed_work_sync(&sc->tx_complete_work); + + if (!sc->num_sec_wiphy) { + cancel_delayed_work_sync(&sc->wiphy_work); + cancel_work_sync(&sc->chan_work); + } + + if (sc->sc_flags & SC_OP_INVALID) { + ath_print(common, ATH_DBG_ANY, "Device not present\n"); + mutex_unlock(&sc->mutex); + return; + } + + if (ath9k_wiphy_started(sc)) { + mutex_unlock(&sc->mutex); + return; /* another wiphy still in use */ + } + + /* Ensure HW is awake when we try to shut it down. */ + ath9k_ps_wakeup(sc); + + if (ah->btcoex_hw.enabled) { + ath9k_hw_btcoex_disable(ah); + if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) + ath9k_btcoex_timer_pause(sc); + } + + /* make sure h/w will not generate any interrupt + * before setting the invalid flag. */ + ath9k_hw_set_interrupts(ah, 0); + + if (!(sc->sc_flags & SC_OP_INVALID)) { + ath_drain_all_txq(sc, false); + ath_stoprecv(sc); + ath9k_hw_phy_disable(ah); + } else + sc->rx.rxlink = NULL; + + /* disable HAL and put h/w to sleep */ + ath9k_hw_disable(ah); + ath9k_hw_configpcipowersave(ah, 1, 1); + ath9k_ps_restore(sc); + + /* Finally, put the chip in FULL SLEEP mode */ + ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP); + + sc->sc_flags |= SC_OP_INVALID; + + mutex_unlock(&sc->mutex); + + ath_print(common, ATH_DBG_CONFIG, "Driver halt\n"); +} + +static int ath9k_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ath_wiphy *aphy = hw->priv; + struct ath_softc *sc = aphy->sc; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ath_vif *avp = (void *)vif->drv_priv; + enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED; + int ret = 0; + + mutex_lock(&sc->mutex); + + if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) && + sc->nvifs > 0) { + ret = -ENOBUFS; + goto out; + } + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + ic_opmode = NL80211_IFTYPE_STATION; + break; + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_MESH_POINT: + if (sc->nbcnvifs >= ATH_BCBUF) { + ret = -ENOBUFS; + goto out; + } + ic_opmode = vif->type; + break; + default: + ath_print(common, ATH_DBG_FATAL, + "Interface type %d not yet supported\n", vif->type); + ret = -EOPNOTSUPP; + goto out; + } + + ath_print(common, ATH_DBG_CONFIG, + "Attach a VIF of type: %d\n", ic_opmode); + + /* Set the VIF opmode */ + avp->av_opmode = ic_opmode; + avp->av_bslot = -1; + + sc->nvifs++; + + if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) + ath9k_set_bssid_mask(hw); + + if (sc->nvifs > 1) + goto out; /* skip global settings for secondary vif */ + + if (ic_opmode == NL80211_IFTYPE_AP) { + ath9k_hw_set_tsfadjust(sc->sc_ah, 1); + sc->sc_flags |= SC_OP_TSF_RESET; + } + + /* Set the device opmode */ + sc->sc_ah->opmode = ic_opmode; + + /* + * Enable MIB interrupts when there are hardware phy counters. + * Note we only do this (at the moment) for station mode. + */ + if ((vif->type == NL80211_IFTYPE_STATION) || + (vif->type == NL80211_IFTYPE_ADHOC) || + (vif->type == NL80211_IFTYPE_MESH_POINT)) { + sc->imask |= ATH9K_INT_MIB; + sc->imask |= ATH9K_INT_TSFOOR; + } + + ath9k_hw_set_interrupts(sc->sc_ah, sc->imask); + + if (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_ADHOC || + vif->type == NL80211_IFTYPE_MONITOR) + ath_start_ani(common); + +out: + mutex_unlock(&sc->mutex); + return ret; +} + +static void ath9k_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ath_wiphy *aphy = hw->priv; + struct ath_softc *sc = aphy->sc; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ath_vif *avp = (void *)vif->drv_priv; + int i; + + ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n"); + + mutex_lock(&sc->mutex); + + /* Stop ANI */ + del_timer_sync(&common->ani.timer); + + /* Reclaim beacon resources */ + if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || + (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) || + (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) { + ath9k_ps_wakeup(sc); + ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); + ath9k_ps_restore(sc); + } + + ath_beacon_return(sc, avp); + sc->sc_flags &= ~SC_OP_BEACONS; + + for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) { + if (sc->beacon.bslot[i] == vif) { + printk(KERN_DEBUG "%s: vif had allocated beacon " + "slot\n", __func__); + sc->beacon.bslot[i] = NULL; + sc->beacon.bslot_aphy[i] = NULL; + } + } + + sc->nvifs--; + + mutex_unlock(&sc->mutex); +} + +void ath9k_enable_ps(struct ath_softc *sc) +{ + sc->ps_enabled = true; + if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { + if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) { + sc->imask |= ATH9K_INT_TIM_TIMER; + ath9k_hw_set_interrupts(sc->sc_ah, + sc->imask); + } + } + ath9k_hw_setrxabort(sc->sc_ah, 1); +} + +static int ath9k_config(struct ieee80211_hw *hw, u32 changed) +{ + struct ath_wiphy *aphy = hw->priv; + struct ath_softc *sc = aphy->sc; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ieee80211_conf *conf = &hw->conf; + struct ath_hw *ah = sc->sc_ah; + bool disable_radio; + + mutex_lock(&sc->mutex); + + /* + * Leave this as the first check because we need to turn on the + * radio if it was disabled before prior to processing the rest + * of the changes. Likewise we must only disable the radio towards + * the end. + */ + if (changed & IEEE80211_CONF_CHANGE_IDLE) { + bool enable_radio; + bool all_wiphys_idle; + bool idle = !!(conf->flags & IEEE80211_CONF_IDLE); + + spin_lock_bh(&sc->wiphy_lock); + all_wiphys_idle = ath9k_all_wiphys_idle(sc); + ath9k_set_wiphy_idle(aphy, idle); + + if (!idle && all_wiphys_idle) + enable_radio = true; + + /* + * After we unlock here its possible another wiphy + * can be re-renabled so to account for that we will + * only disable the radio toward the end of this routine + * if by then all wiphys are still idle. + */ + spin_unlock_bh(&sc->wiphy_lock); + + if (enable_radio) { + sc->ps_idle = false; + ath_radio_enable(sc, hw); + ath_print(common, ATH_DBG_CONFIG, + "not-idle: enabling radio\n"); + } + } + + /* + * We just prepare to enable PS. We have to wait until our AP has + * ACK'd our null data frame to disable RX otherwise we'll ignore + * those ACKs and end up retransmitting the same null data frames. + * IEEE80211_CONF_CHANGE_PS is only passed by mac80211 for STA mode. + */ + if (changed & IEEE80211_CONF_CHANGE_PS) { + if (conf->flags & IEEE80211_CONF_PS) { + sc->ps_flags |= PS_ENABLED; + /* + * At this point we know hardware has received an ACK + * of a previously sent null data frame. + */ + if ((sc->ps_flags & PS_NULLFUNC_COMPLETED)) { + sc->ps_flags &= ~PS_NULLFUNC_COMPLETED; + ath9k_enable_ps(sc); + } + } else { + sc->ps_enabled = false; + sc->ps_flags &= ~(PS_ENABLED | + PS_NULLFUNC_COMPLETED); + ath9k_setpower(sc, ATH9K_PM_AWAKE); + if (!(ah->caps.hw_caps & + ATH9K_HW_CAP_AUTOSLEEP)) { + ath9k_hw_setrxabort(sc->sc_ah, 0); + sc->ps_flags &= ~(PS_WAIT_FOR_BEACON | + PS_WAIT_FOR_CAB | + PS_WAIT_FOR_PSPOLL_DATA | + PS_WAIT_FOR_TX_ACK); + if (sc->imask & ATH9K_INT_TIM_TIMER) { + sc->imask &= ~ATH9K_INT_TIM_TIMER; + ath9k_hw_set_interrupts(sc->sc_ah, + sc->imask); + } + } + } + } + + if (changed & IEEE80211_CONF_CHANGE_MONITOR) { + if (conf->flags & IEEE80211_CONF_MONITOR) { + ath_print(common, ATH_DBG_CONFIG, + "HW opmode set to Monitor mode\n"); + sc->sc_ah->opmode = NL80211_IFTYPE_MONITOR; + } + } + + if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { + struct ieee80211_channel *curchan = hw->conf.channel; + int pos = curchan->hw_value; + + aphy->chan_idx = pos; + aphy->chan_is_ht = conf_is_ht(conf); + + if (aphy->state == ATH_WIPHY_SCAN || + aphy->state == ATH_WIPHY_ACTIVE) + ath9k_wiphy_pause_all_forced(sc, aphy); + else { + /* + * Do not change operational channel based on a paused + * wiphy changes. + */ + goto skip_chan_change; + } + + ath_print(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n", + curchan->center_freq); + + /* XXX: remove me eventualy */ + ath9k_update_ichannel(sc, hw, &sc->sc_ah->channels[pos]); + + ath_update_chainmask(sc, conf_is_ht(conf)); + + if (ath_set_channel(sc, hw, &sc->sc_ah->channels[pos]) < 0) { + ath_print(common, ATH_DBG_FATAL, + "Unable to set channel\n"); + mutex_unlock(&sc->mutex); + return -EINVAL; + } + } + +skip_chan_change: + if (changed & IEEE80211_CONF_CHANGE_POWER) { + sc->config.txpowlimit = 2 * conf->power_level; + ath_update_txpow(sc); + } + + spin_lock_bh(&sc->wiphy_lock); + disable_radio = ath9k_all_wiphys_idle(sc); + spin_unlock_bh(&sc->wiphy_lock); + + if (disable_radio) { + ath_print(common, ATH_DBG_CONFIG, "idle: disabling radio\n"); + sc->ps_idle = true; + ath_radio_disable(sc, hw); + } + + mutex_unlock(&sc->mutex); + + return 0; +} + +#define SUPPORTED_FILTERS \ + (FIF_PROMISC_IN_BSS | \ + FIF_ALLMULTI | \ + FIF_CONTROL | \ + FIF_PSPOLL | \ + FIF_OTHER_BSS | \ + FIF_BCN_PRBRESP_PROMISC | \ + FIF_FCSFAIL) + +/* FIXME: sc->sc_full_reset ? */ +static void ath9k_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) +{ + struct ath_wiphy *aphy = hw->priv; + struct ath_softc *sc = aphy->sc; + u32 rfilt; + + changed_flags &= SUPPORTED_FILTERS; + *total_flags &= SUPPORTED_FILTERS; + + sc->rx.rxfilter = *total_flags; + ath9k_ps_wakeup(sc); + rfilt = ath_calcrxfilter(sc); + ath9k_hw_setrxfilter(sc->sc_ah, rfilt); + ath9k_ps_restore(sc); + + ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG, + "Set HW RX filter: 0x%x\n", rfilt); +} + +static int ath9k_sta_add(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct ath_wiphy *aphy = hw->priv; + struct ath_softc *sc = aphy->sc; + + ath_node_attach(sc, sta); + + return 0; +} + +static int ath9k_sta_remove(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct ath_wiphy *aphy = hw->priv; + struct ath_softc *sc = aphy->sc; + + ath_node_detach(sc, sta); + + return 0; +} + +static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue, + const struct ieee80211_tx_queue_params *params) +{ + struct ath_wiphy *aphy = hw->priv; + struct ath_softc *sc = aphy->sc; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ath9k_tx_queue_info qi; + int ret = 0, qnum; + + if (queue >= WME_NUM_AC) + return 0; + + mutex_lock(&sc->mutex); + + memset(&qi, 0, sizeof(struct ath9k_tx_queue_info)); + + qi.tqi_aifs = params->aifs; + qi.tqi_cwmin = params->cw_min; + qi.tqi_cwmax = params->cw_max; + qi.tqi_burstTime = params->txop; + qnum = ath_get_hal_qnum(queue, sc); + + ath_print(common, ATH_DBG_CONFIG, + "Configure tx [queue/halq] [%d/%d], " + "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n", + queue, qnum, params->aifs, params->cw_min, + params->cw_max, params->txop); + + ret = ath_txq_update(sc, qnum, &qi); + if (ret) + ath_print(common, ATH_DBG_FATAL, "TXQ Update failed\n"); + + if (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) + if ((qnum == sc->tx.hwq_map[ATH9K_WME_AC_BE]) && !ret) + ath_beaconq_config(sc); + + mutex_unlock(&sc->mutex); + + return ret; +} + +static int ath9k_set_key(struct ieee80211_hw *hw, + enum set_key_cmd cmd, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct ath_wiphy *aphy = hw->priv; + struct ath_softc *sc = aphy->sc; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + int ret = 0; + + if (modparam_nohwcrypt) + return -ENOSPC; + + mutex_lock(&sc->mutex); + ath9k_ps_wakeup(sc); + ath_print(common, ATH_DBG_CONFIG, "Set HW Key\n"); + + switch (cmd) { + case SET_KEY: + ret = ath_key_config(common, vif, sta, key); + if (ret >= 0) { + key->hw_key_idx = ret; + /* push IV and Michael MIC generation to stack */ + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + if (key->alg == ALG_TKIP) + key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; + if (sc->sc_ah->sw_mgmt_crypto && key->alg == ALG_CCMP) + key->flags |= IEEE80211_KEY_FLAG_SW_MGMT; + ret = 0; + } + break; + case DISABLE_KEY: + ath_key_delete(common, key); + break; + default: + ret = -EINVAL; + } + + ath9k_ps_restore(sc); + mutex_unlock(&sc->mutex); + + return ret; +} + +static void ath9k_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changed) +{ + struct ath_wiphy *aphy = hw->priv; + struct ath_softc *sc = aphy->sc; + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + struct ath_vif *avp = (void *)vif->drv_priv; + int slottime; + int error; + + mutex_lock(&sc->mutex); + + if (changed & BSS_CHANGED_BSSID) { + /* Set BSSID */ + memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN); + memcpy(avp->bssid, bss_conf->bssid, ETH_ALEN); + common->curaid = 0; + ath9k_hw_write_associd(ah); + + /* Set aggregation protection mode parameters */ + sc->config.ath_aggr_prot = 0; + + /* Only legacy IBSS for now */ + if (vif->type == NL80211_IFTYPE_ADHOC) + ath_update_chainmask(sc, 0); + + ath_print(common, ATH_DBG_CONFIG, + "BSSID: %pM aid: 0x%x\n", + common->curbssid, common->curaid); + + /* need to reconfigure the beacon */ + sc->sc_flags &= ~SC_OP_BEACONS ; + } + + /* Enable transmission of beacons (AP, IBSS, MESH) */ + if ((changed & BSS_CHANGED_BEACON) || + ((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon)) { + ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); + error = ath_beacon_alloc(aphy, vif); + if (!error) + ath_beacon_config(sc, vif); + } + + if (changed & BSS_CHANGED_ERP_SLOT) { + if (bss_conf->use_short_slot) + slottime = 9; + else + slottime = 20; + if (vif->type == NL80211_IFTYPE_AP) { + /* + * Defer update, so that connected stations can adjust + * their settings at the same time. + * See beacon.c for more details + */ + sc->beacon.slottime = slottime; + sc->beacon.updateslot = UPDATE; + } else { + ah->slottime = slottime; + ath9k_hw_init_global_settings(ah); + } + } + + /* Disable transmission of beacons */ + if ((changed & BSS_CHANGED_BEACON_ENABLED) && !bss_conf->enable_beacon) + ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); + + if (changed & BSS_CHANGED_BEACON_INT) { + sc->beacon_interval = bss_conf->beacon_int; + /* + * In case of AP mode, the HW TSF has to be reset + * when the beacon interval changes. + */ + if (vif->type == NL80211_IFTYPE_AP) { + sc->sc_flags |= SC_OP_TSF_RESET; + ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); + error = ath_beacon_alloc(aphy, vif); + if (!error) + ath_beacon_config(sc, vif); + } else { + ath_beacon_config(sc, vif); + } + } + + if (changed & BSS_CHANGED_ERP_PREAMBLE) { + ath_print(common, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n", + bss_conf->use_short_preamble); + if (bss_conf->use_short_preamble) + sc->sc_flags |= SC_OP_PREAMBLE_SHORT; + else + sc->sc_flags &= ~SC_OP_PREAMBLE_SHORT; + } + + if (changed & BSS_CHANGED_ERP_CTS_PROT) { + ath_print(common, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n", + bss_conf->use_cts_prot); + if (bss_conf->use_cts_prot && + hw->conf.channel->band != IEEE80211_BAND_5GHZ) + sc->sc_flags |= SC_OP_PROTECT_ENABLE; + else + sc->sc_flags &= ~SC_OP_PROTECT_ENABLE; + } + + if (changed & BSS_CHANGED_ASSOC) { + ath_print(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n", + bss_conf->assoc); + ath9k_bss_assoc_info(sc, vif, bss_conf); + } + + mutex_unlock(&sc->mutex); +} + +static u64 ath9k_get_tsf(struct ieee80211_hw *hw) +{ + u64 tsf; + struct ath_wiphy *aphy = hw->priv; + struct ath_softc *sc = aphy->sc; + + mutex_lock(&sc->mutex); + tsf = ath9k_hw_gettsf64(sc->sc_ah); + mutex_unlock(&sc->mutex); + + return tsf; +} + +static void ath9k_set_tsf(struct ieee80211_hw *hw, u64 tsf) +{ + struct ath_wiphy *aphy = hw->priv; + struct ath_softc *sc = aphy->sc; + + mutex_lock(&sc->mutex); + ath9k_hw_settsf64(sc->sc_ah, tsf); + mutex_unlock(&sc->mutex); +} + +static void ath9k_reset_tsf(struct ieee80211_hw *hw) +{ + struct ath_wiphy *aphy = hw->priv; + struct ath_softc *sc = aphy->sc; + + mutex_lock(&sc->mutex); + + ath9k_ps_wakeup(sc); + ath9k_hw_reset_tsf(sc->sc_ah); + ath9k_ps_restore(sc); + + mutex_unlock(&sc->mutex); +} + +static int ath9k_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, + u16 tid, u16 *ssn) +{ + struct ath_wiphy *aphy = hw->priv; + struct ath_softc *sc = aphy->sc; + int ret = 0; + + switch (action) { + case IEEE80211_AMPDU_RX_START: + if (!(sc->sc_flags & SC_OP_RXAGGR)) + ret = -ENOTSUPP; + break; + case IEEE80211_AMPDU_RX_STOP: + break; + case IEEE80211_AMPDU_TX_START: + ath9k_ps_wakeup(sc); + ath_tx_aggr_start(sc, sta, tid, ssn); + ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); + ath9k_ps_restore(sc); + break; + case IEEE80211_AMPDU_TX_STOP: + ath9k_ps_wakeup(sc); + ath_tx_aggr_stop(sc, sta, tid); + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + ath9k_ps_restore(sc); + break; + case IEEE80211_AMPDU_TX_OPERATIONAL: + ath9k_ps_wakeup(sc); + ath_tx_aggr_resume(sc, sta, tid); + ath9k_ps_restore(sc); + break; + default: + ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, + "Unknown AMPDU action\n"); + } + + return ret; +} + +static void ath9k_sw_scan_start(struct ieee80211_hw *hw) +{ + struct ath_wiphy *aphy = hw->priv; + struct ath_softc *sc = aphy->sc; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + + mutex_lock(&sc->mutex); + if (ath9k_wiphy_scanning(sc)) { + printk(KERN_DEBUG "ath9k: Two wiphys trying to scan at the " + "same time\n"); + /* + * Do not allow the concurrent scanning state for now. This + * could be improved with scanning control moved into ath9k. + */ + mutex_unlock(&sc->mutex); + return; + } + + aphy->state = ATH_WIPHY_SCAN; + ath9k_wiphy_pause_all_forced(sc, aphy); + sc->sc_flags |= SC_OP_SCANNING; + del_timer_sync(&common->ani.timer); + cancel_delayed_work_sync(&sc->tx_complete_work); + mutex_unlock(&sc->mutex); +} + +static void ath9k_sw_scan_complete(struct ieee80211_hw *hw) +{ + struct ath_wiphy *aphy = hw->priv; + struct ath_softc *sc = aphy->sc; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + + mutex_lock(&sc->mutex); + aphy->state = ATH_WIPHY_ACTIVE; + sc->sc_flags &= ~SC_OP_SCANNING; + sc->sc_flags |= SC_OP_FULL_RESET; + ath_start_ani(common); + ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0); + ath_beacon_config(sc, NULL); + mutex_unlock(&sc->mutex); +} + +static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class) +{ + struct ath_wiphy *aphy = hw->priv; + struct ath_softc *sc = aphy->sc; + struct ath_hw *ah = sc->sc_ah; + + mutex_lock(&sc->mutex); + ah->coverage_class = coverage_class; + ath9k_hw_init_global_settings(ah); + mutex_unlock(&sc->mutex); +} + +struct ieee80211_ops ath9k_ops = { + .tx = ath9k_tx, + .start = ath9k_start, + .stop = ath9k_stop, + .add_interface = ath9k_add_interface, + .remove_interface = ath9k_remove_interface, + .config = ath9k_config, + .configure_filter = ath9k_configure_filter, + .sta_add = ath9k_sta_add, + .sta_remove = ath9k_sta_remove, + .conf_tx = ath9k_conf_tx, + .bss_info_changed = ath9k_bss_info_changed, + .set_key = ath9k_set_key, + .get_tsf = ath9k_get_tsf, + .set_tsf = ath9k_set_tsf, + .reset_tsf = ath9k_reset_tsf, + .ampdu_action = ath9k_ampdu_action, + .sw_scan_start = ath9k_sw_scan_start, + .sw_scan_complete = ath9k_sw_scan_complete, + .rfkill_poll = ath9k_rfkill_poll_state, + .set_coverage_class = ath9k_set_coverage_class, +}; diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c new file mode 100644 index 0000000..6bebd50 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -0,0 +1,324 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include "ath9k.h" + +static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = { + { PCI_VDEVICE(ATHEROS, 0x0023) }, /* PCI */ + { PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */ + { PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI */ + { PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */ + { PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */ + { PCI_VDEVICE(ATHEROS, 0x002B) }, /* PCI-E */ + { PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */ + { PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */ + { PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */ + { 0 } +}; + +/* return bus cachesize in 4B word units */ +static void ath_pci_read_cachesize(struct ath_common *common, int *csz) +{ + struct ath_softc *sc = (struct ath_softc *) common->priv; + u8 u8tmp; + + pci_read_config_byte(to_pci_dev(sc->dev), PCI_CACHE_LINE_SIZE, &u8tmp); + *csz = (int)u8tmp; + + /* + * This check was put in to avoid "unplesant" consequences if + * the bootrom has not fully initialized all PCI devices. + * Sometimes the cache line size register is not set + */ + + if (*csz == 0) + *csz = DEFAULT_CACHELINE >> 2; /* Use the default size */ +} + +static bool ath_pci_eeprom_read(struct ath_common *common, u32 off, u16 *data) +{ + struct ath_hw *ah = (struct ath_hw *) common->ah; + + common->ops->read(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S)); + + if (!ath9k_hw_wait(ah, + AR_EEPROM_STATUS_DATA, + AR_EEPROM_STATUS_DATA_BUSY | + AR_EEPROM_STATUS_DATA_PROT_ACCESS, 0, + AH_WAIT_TIMEOUT)) { + return false; + } + + *data = MS(common->ops->read(ah, AR_EEPROM_STATUS_DATA), + AR_EEPROM_STATUS_DATA_VAL); + + return true; +} + +/* + * Bluetooth coexistance requires disabling ASPM. + */ +static void ath_pci_bt_coex_prep(struct ath_common *common) +{ + struct ath_softc *sc = (struct ath_softc *) common->priv; + struct pci_dev *pdev = to_pci_dev(sc->dev); + u8 aspm; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) + if (!pdev->is_pcie) +#else + if (!compat_is_pcie(pdev)) +#endif + return; + + pci_read_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, &aspm); + aspm &= ~(ATH_PCIE_CAP_LINK_L0S | ATH_PCIE_CAP_LINK_L1); + pci_write_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, aspm); +} + +static const struct ath_bus_ops ath_pci_bus_ops = { + .read_cachesize = ath_pci_read_cachesize, + .eeprom_read = ath_pci_eeprom_read, + .bt_coex_prep = ath_pci_bt_coex_prep, +}; + +static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + void __iomem *mem; + struct ath_wiphy *aphy; + struct ath_softc *sc; + struct ieee80211_hw *hw; + u8 csz; + u16 subsysid; + u32 val; + int ret = 0; + char hw_name[64]; + + if (pci_enable_device(pdev)) + return -EIO; + + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (ret) { + printk(KERN_ERR "ath9k: 32-bit DMA not available\n"); + goto err_dma; + } + + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (ret) { + printk(KERN_ERR "ath9k: 32-bit DMA consistent " + "DMA enable failed\n"); + goto err_dma; + } + + /* + * Cache line size is used to size and align various + * structures used to communicate with the hardware. + */ + pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz); + if (csz == 0) { + /* + * Linux 2.4.18 (at least) writes the cache line size + * register as a 16-bit wide register which is wrong. + * We must have this setup properly for rx buffer + * DMA to work so force a reasonable value here if it + * comes up zero. + */ + csz = L1_CACHE_BYTES / sizeof(u32); + pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz); + } + /* + * The default setting of latency timer yields poor results, + * set it to the value used by other systems. It may be worth + * tweaking this setting more. + */ + pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8); + + pci_set_master(pdev); + + /* + * Disable the RETRY_TIMEOUT register (0x41) to keep + * PCI Tx retries from interfering with C3 CPU state. + */ + pci_read_config_dword(pdev, 0x40, &val); + if ((val & 0x0000ff00) != 0) + pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); + + ret = pci_request_region(pdev, 0, "ath9k"); + if (ret) { + dev_err(&pdev->dev, "PCI memory region reserve error\n"); + ret = -ENODEV; + goto err_region; + } + + mem = pci_iomap(pdev, 0, 0); + if (!mem) { + printk(KERN_ERR "PCI memory map error\n") ; + ret = -EIO; + goto err_iomap; + } + + hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy) + + sizeof(struct ath_softc), &ath9k_ops); + if (!hw) { + dev_err(&pdev->dev, "No memory for ieee80211_hw\n"); + ret = -ENOMEM; + goto err_alloc_hw; + } + + SET_IEEE80211_DEV(hw, &pdev->dev); + pci_set_drvdata(pdev, hw); + + aphy = hw->priv; + sc = (struct ath_softc *) (aphy + 1); + aphy->sc = sc; + aphy->hw = hw; + sc->pri_wiphy = aphy; + sc->hw = hw; + sc->dev = &pdev->dev; + sc->mem = mem; + + /* Will be cleared in ath9k_start() */ + sc->sc_flags |= SC_OP_INVALID; + + ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc); + if (ret) { + dev_err(&pdev->dev, "request_irq failed\n"); + goto err_irq; + } + + sc->irq = pdev->irq; + + pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsysid); + ret = ath9k_init_device(id->device, sc, subsysid, &ath_pci_bus_ops); + if (ret) { + dev_err(&pdev->dev, "Failed to initialize device\n"); + goto err_init; + } + + ath9k_hw_name(sc->sc_ah, hw_name, sizeof(hw_name)); + printk(KERN_INFO + "%s: %s mem=0x%lx, irq=%d\n", + wiphy_name(hw->wiphy), + hw_name, + (unsigned long)mem, pdev->irq); + + return 0; + +err_init: + free_irq(sc->irq, sc); +err_irq: + ieee80211_free_hw(hw); +err_alloc_hw: + pci_iounmap(pdev, mem); +err_iomap: + pci_release_region(pdev, 0); +err_region: + /* Nothing */ +err_dma: + pci_disable_device(pdev); + return ret; +} + +static void ath_pci_remove(struct pci_dev *pdev) +{ + struct ieee80211_hw *hw = pci_get_drvdata(pdev); + struct ath_wiphy *aphy = hw->priv; + struct ath_softc *sc = aphy->sc; + void __iomem *mem = sc->mem; + + ath9k_deinit_device(sc); + free_irq(sc->irq, sc); + ieee80211_free_hw(sc->hw); + + pci_iounmap(pdev, mem); + pci_disable_device(pdev); + pci_release_region(pdev, 0); +} + +#ifdef CONFIG_PM + +static int ath_pci_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct ieee80211_hw *hw = pci_get_drvdata(pdev); + struct ath_wiphy *aphy = hw->priv; + struct ath_softc *sc = aphy->sc; + + ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1); + + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3hot); + + return 0; +} + +static int ath_pci_resume(struct pci_dev *pdev) +{ + struct ieee80211_hw *hw = pci_get_drvdata(pdev); + struct ath_wiphy *aphy = hw->priv; + struct ath_softc *sc = aphy->sc; + u32 val; + int err; + + pci_restore_state(pdev); + + err = pci_enable_device(pdev); + if (err) + return err; + + /* + * Suspend/Resume resets the PCI configuration space, so we have to + * re-disable the RETRY_TIMEOUT register (0x41) to keep + * PCI Tx retries from interfering with C3 CPU state + */ + pci_read_config_dword(pdev, 0x40, &val); + if ((val & 0x0000ff00) != 0) + pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); + + /* Enable LED */ + ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin, + AR_GPIO_OUTPUT_MUX_AS_OUTPUT); + ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1); + + return 0; +} + +#endif /* CONFIG_PM */ + +MODULE_DEVICE_TABLE(pci, ath_pci_id_table); + +static struct pci_driver ath_pci_driver = { + .name = "ath9k", + .id_table = ath_pci_id_table, + .probe = ath_pci_probe, + .remove = ath_pci_remove, +#ifdef CONFIG_PM + .suspend = ath_pci_suspend, + .resume = ath_pci_resume, +#endif /* CONFIG_PM */ +}; + +int ath_pci_init(void) +{ + return pci_register_driver(&ath_pci_driver); +} + +void ath_pci_exit(void) +{ + pci_unregister_driver(&ath_pci_driver); +} diff --git a/drivers/net/wireless/ath/ath9k/phy.c b/drivers/net/wireless/ath/ath9k/phy.c new file mode 100644 index 0000000..c3b5939 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/phy.c @@ -0,0 +1,976 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * DOC: Programming Atheros 802.11n analog front end radios + * + * AR5416 MAC based PCI devices and AR518 MAC based PCI-Express + * devices have either an external AR2133 analog front end radio for single + * band 2.4 GHz communication or an AR5133 analog front end radio for dual + * band 2.4 GHz / 5 GHz communication. + * + * All devices after the AR5416 and AR5418 family starting with the AR9280 + * have their analog front radios, MAC/BB and host PCIe/USB interface embedded + * into a single-chip and require less programming. + * + * The following single-chips exist with a respective embedded radio: + * + * AR9280 - 11n dual-band 2x2 MIMO for PCIe + * AR9281 - 11n single-band 1x2 MIMO for PCIe + * AR9285 - 11n single-band 1x1 for PCIe + * AR9287 - 11n single-band 2x2 MIMO for PCIe + * + * AR9220 - 11n dual-band 2x2 MIMO for PCI + * AR9223 - 11n single-band 2x2 MIMO for PCI + * + * AR9287 - 11n single-band 1x1 MIMO for USB + */ + +#include "hw.h" + +/** + * ath9k_hw_write_regs - ?? + * + * @ah: atheros hardware structure + * @freqIndex: + * @regWrites: + * + * Used for both the chipsets with an external AR2133/AR5133 radios and + * single-chip devices. + */ +void ath9k_hw_write_regs(struct ath_hw *ah, u32 freqIndex, int regWrites) +{ + REG_WRITE_ARRAY(&ah->iniBB_RfGain, freqIndex, regWrites); +} + +/** + * ath9k_hw_ar9280_set_channel - set channel on single-chip device + * @ah: atheros hardware structure + * @chan: + * + * This is the function to change channel on single-chip devices, that is + * all devices after ar9280. + * + * This function takes the channel value in MHz and sets + * hardware channel value. Assumes writes have been enabled to analog bus. + * + * Actual Expression, + * + * For 2GHz channel, + * Channel Frequency = (3/4) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17) + * (freq_ref = 40MHz) + * + * For 5GHz channel, + * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^10) + * (freq_ref = 40MHz/(24>>amodeRefSel)) + */ +int ath9k_hw_ar9280_set_channel(struct ath_hw *ah, struct ath9k_channel *chan) +{ + u16 bMode, fracMode, aModeRefSel = 0; + u32 freq, ndiv, channelSel = 0, channelFrac = 0, reg32 = 0; + struct chan_centers centers; + u32 refDivA = 24; + + ath9k_hw_get_channel_centers(ah, chan, ¢ers); + freq = centers.synth_center; + + reg32 = REG_READ(ah, AR_PHY_SYNTH_CONTROL); + reg32 &= 0xc0000000; + + if (freq < 4800) { /* 2 GHz, fractional mode */ + u32 txctl; + int regWrites = 0; + + bMode = 1; + fracMode = 1; + aModeRefSel = 0; + channelSel = (freq * 0x10000) / 15; + + if (AR_SREV_9287_11_OR_LATER(ah)) { + if (freq == 2484) { + /* Enable channel spreading for channel 14 */ + REG_WRITE_ARRAY(&ah->iniCckfirJapan2484, + 1, regWrites); + } else { + REG_WRITE_ARRAY(&ah->iniCckfirNormal, + 1, regWrites); + } + } else { + txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL); + if (freq == 2484) { + /* Enable channel spreading for channel 14 */ + REG_WRITE(ah, AR_PHY_CCK_TX_CTRL, + txctl | AR_PHY_CCK_TX_CTRL_JAPAN); + } else { + REG_WRITE(ah, AR_PHY_CCK_TX_CTRL, + txctl &~ AR_PHY_CCK_TX_CTRL_JAPAN); + } + } + } else { + bMode = 0; + fracMode = 0; + + switch(ah->eep_ops->get_eeprom(ah, EEP_FRAC_N_5G)) { + case 0: + if ((freq % 20) == 0) { + aModeRefSel = 3; + } else if ((freq % 10) == 0) { + aModeRefSel = 2; + } + if (aModeRefSel) + break; + case 1: + default: + aModeRefSel = 0; + /* + * Enable 2G (fractional) mode for channels + * which are 5MHz spaced. + */ + fracMode = 1; + refDivA = 1; + channelSel = (freq * 0x8000) / 15; + + /* RefDivA setting */ + REG_RMW_FIELD(ah, AR_AN_SYNTH9, + AR_AN_SYNTH9_REFDIVA, refDivA); + + } + + if (!fracMode) { + ndiv = (freq * (refDivA >> aModeRefSel)) / 60; + channelSel = ndiv & 0x1ff; + channelFrac = (ndiv & 0xfffffe00) * 2; + channelSel = (channelSel << 17) | channelFrac; + } + } + + reg32 = reg32 | + (bMode << 29) | + (fracMode << 28) | (aModeRefSel << 26) | (channelSel); + + REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32); + + ah->curchan = chan; + ah->curchan_rad_index = -1; + + return 0; +} + +/** + * ath9k_hw_9280_spur_mitigate - convert baseband spur frequency + * @ah: atheros hardware structure + * @chan: + * + * For single-chip solutions. Converts to baseband spur frequency given the + * input channel frequency and compute register settings below. + */ +void ath9k_hw_9280_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan) +{ + int bb_spur = AR_NO_SPUR; + int freq; + int bin, cur_bin; + int bb_spur_off, spur_subchannel_sd; + int spur_freq_sd; + int spur_delta_phase; + int denominator; + int upper, lower, cur_vit_mask; + int tmp, newVal; + int i; + int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8, + AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60 + }; + int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10, + AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60 + }; + int inc[4] = { 0, 100, 0, 0 }; + struct chan_centers centers; + + int8_t mask_m[123]; + int8_t mask_p[123]; + int8_t mask_amt; + int tmp_mask; + int cur_bb_spur; + bool is2GHz = IS_CHAN_2GHZ(chan); + + memset(&mask_m, 0, sizeof(int8_t) * 123); + memset(&mask_p, 0, sizeof(int8_t) * 123); + + ath9k_hw_get_channel_centers(ah, chan, ¢ers); + freq = centers.synth_center; + + ah->config.spurmode = SPUR_ENABLE_EEPROM; + for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { + cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz); + + if (is2GHz) + cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_2GHZ; + else + cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_5GHZ; + + if (AR_NO_SPUR == cur_bb_spur) + break; + cur_bb_spur = cur_bb_spur - freq; + + if (IS_CHAN_HT40(chan)) { + if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT40) && + (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT40)) { + bb_spur = cur_bb_spur; + break; + } + } else if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT20) && + (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT20)) { + bb_spur = cur_bb_spur; + break; + } + } + + if (AR_NO_SPUR == bb_spur) { + REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK, + AR_PHY_FORCE_CLKEN_CCK_MRC_MUX); + return; + } else { + REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK, + AR_PHY_FORCE_CLKEN_CCK_MRC_MUX); + } + + bin = bb_spur * 320; + + tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0)); + + newVal = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI | + AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER | + AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK | + AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK); + REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), newVal); + + newVal = (AR_PHY_SPUR_REG_MASK_RATE_CNTL | + AR_PHY_SPUR_REG_ENABLE_MASK_PPM | + AR_PHY_SPUR_REG_MASK_RATE_SELECT | + AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI | + SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH)); + REG_WRITE(ah, AR_PHY_SPUR_REG, newVal); + + if (IS_CHAN_HT40(chan)) { + if (bb_spur < 0) { + spur_subchannel_sd = 1; + bb_spur_off = bb_spur + 10; + } else { + spur_subchannel_sd = 0; + bb_spur_off = bb_spur - 10; + } + } else { + spur_subchannel_sd = 0; + bb_spur_off = bb_spur; + } + + if (IS_CHAN_HT40(chan)) + spur_delta_phase = + ((bb_spur * 262144) / + 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE; + else + spur_delta_phase = + ((bb_spur * 524288) / + 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE; + + denominator = IS_CHAN_2GHZ(chan) ? 44 : 40; + spur_freq_sd = ((bb_spur_off * 2048) / denominator) & 0x3ff; + + newVal = (AR_PHY_TIMING11_USE_SPUR_IN_AGC | + SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) | + SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE)); + REG_WRITE(ah, AR_PHY_TIMING11, newVal); + + newVal = spur_subchannel_sd << AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S; + REG_WRITE(ah, AR_PHY_SFCORR_EXT, newVal); + + cur_bin = -6000; + upper = bin + 100; + lower = bin - 100; + + for (i = 0; i < 4; i++) { + int pilot_mask = 0; + int chan_mask = 0; + int bp = 0; + for (bp = 0; bp < 30; bp++) { + if ((cur_bin > lower) && (cur_bin < upper)) { + pilot_mask = pilot_mask | 0x1 << bp; + chan_mask = chan_mask | 0x1 << bp; + } + cur_bin += 100; + } + cur_bin += inc[i]; + REG_WRITE(ah, pilot_mask_reg[i], pilot_mask); + REG_WRITE(ah, chan_mask_reg[i], chan_mask); + } + + cur_vit_mask = 6100; + upper = bin + 120; + lower = bin - 120; + + for (i = 0; i < 123; i++) { + if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) { + + /* workaround for gcc bug #37014 */ + volatile int tmp_v = abs(cur_vit_mask - bin); + + if (tmp_v < 75) + mask_amt = 1; + else + mask_amt = 0; + if (cur_vit_mask < 0) + mask_m[abs(cur_vit_mask / 100)] = mask_amt; + else + mask_p[cur_vit_mask / 100] = mask_amt; + } + cur_vit_mask -= 100; + } + + tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28) + | (mask_m[48] << 26) | (mask_m[49] << 24) + | (mask_m[50] << 22) | (mask_m[51] << 20) + | (mask_m[52] << 18) | (mask_m[53] << 16) + | (mask_m[54] << 14) | (mask_m[55] << 12) + | (mask_m[56] << 10) | (mask_m[57] << 8) + | (mask_m[58] << 6) | (mask_m[59] << 4) + | (mask_m[60] << 2) | (mask_m[61] << 0); + REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask); + REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask); + + tmp_mask = (mask_m[31] << 28) + | (mask_m[32] << 26) | (mask_m[33] << 24) + | (mask_m[34] << 22) | (mask_m[35] << 20) + | (mask_m[36] << 18) | (mask_m[37] << 16) + | (mask_m[48] << 14) | (mask_m[39] << 12) + | (mask_m[40] << 10) | (mask_m[41] << 8) + | (mask_m[42] << 6) | (mask_m[43] << 4) + | (mask_m[44] << 2) | (mask_m[45] << 0); + REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask); + REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask); + + tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28) + | (mask_m[18] << 26) | (mask_m[18] << 24) + | (mask_m[20] << 22) | (mask_m[20] << 20) + | (mask_m[22] << 18) | (mask_m[22] << 16) + | (mask_m[24] << 14) | (mask_m[24] << 12) + | (mask_m[25] << 10) | (mask_m[26] << 8) + | (mask_m[27] << 6) | (mask_m[28] << 4) + | (mask_m[29] << 2) | (mask_m[30] << 0); + REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask); + REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask); + + tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28) + | (mask_m[2] << 26) | (mask_m[3] << 24) + | (mask_m[4] << 22) | (mask_m[5] << 20) + | (mask_m[6] << 18) | (mask_m[7] << 16) + | (mask_m[8] << 14) | (mask_m[9] << 12) + | (mask_m[10] << 10) | (mask_m[11] << 8) + | (mask_m[12] << 6) | (mask_m[13] << 4) + | (mask_m[14] << 2) | (mask_m[15] << 0); + REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask); + REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask); + + tmp_mask = (mask_p[15] << 28) + | (mask_p[14] << 26) | (mask_p[13] << 24) + | (mask_p[12] << 22) | (mask_p[11] << 20) + | (mask_p[10] << 18) | (mask_p[9] << 16) + | (mask_p[8] << 14) | (mask_p[7] << 12) + | (mask_p[6] << 10) | (mask_p[5] << 8) + | (mask_p[4] << 6) | (mask_p[3] << 4) + | (mask_p[2] << 2) | (mask_p[1] << 0); + REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask); + REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask); + + tmp_mask = (mask_p[30] << 28) + | (mask_p[29] << 26) | (mask_p[28] << 24) + | (mask_p[27] << 22) | (mask_p[26] << 20) + | (mask_p[25] << 18) | (mask_p[24] << 16) + | (mask_p[23] << 14) | (mask_p[22] << 12) + | (mask_p[21] << 10) | (mask_p[20] << 8) + | (mask_p[19] << 6) | (mask_p[18] << 4) + | (mask_p[17] << 2) | (mask_p[16] << 0); + REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask); + REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask); + + tmp_mask = (mask_p[45] << 28) + | (mask_p[44] << 26) | (mask_p[43] << 24) + | (mask_p[42] << 22) | (mask_p[41] << 20) + | (mask_p[40] << 18) | (mask_p[39] << 16) + | (mask_p[38] << 14) | (mask_p[37] << 12) + | (mask_p[36] << 10) | (mask_p[35] << 8) + | (mask_p[34] << 6) | (mask_p[33] << 4) + | (mask_p[32] << 2) | (mask_p[31] << 0); + REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask); + REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask); + + tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28) + | (mask_p[59] << 26) | (mask_p[58] << 24) + | (mask_p[57] << 22) | (mask_p[56] << 20) + | (mask_p[55] << 18) | (mask_p[54] << 16) + | (mask_p[53] << 14) | (mask_p[52] << 12) + | (mask_p[51] << 10) | (mask_p[50] << 8) + | (mask_p[49] << 6) | (mask_p[48] << 4) + | (mask_p[47] << 2) | (mask_p[46] << 0); + REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask); + REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask); +} + +/* All code below is for non single-chip solutions */ + +/** + * ath9k_phy_modify_rx_buffer() - perform analog swizzling of parameters + * @rfbuf: + * @reg32: + * @numBits: + * @firstBit: + * @column: + * + * Performs analog "swizzling" of parameters into their location. + * Used on external AR2133/AR5133 radios. + */ +static void ath9k_phy_modify_rx_buffer(u32 *rfBuf, u32 reg32, + u32 numBits, u32 firstBit, + u32 column) +{ + u32 tmp32, mask, arrayEntry, lastBit; + int32_t bitPosition, bitsLeft; + + tmp32 = ath9k_hw_reverse_bits(reg32, numBits); + arrayEntry = (firstBit - 1) / 8; + bitPosition = (firstBit - 1) % 8; + bitsLeft = numBits; + while (bitsLeft > 0) { + lastBit = (bitPosition + bitsLeft > 8) ? + 8 : bitPosition + bitsLeft; + mask = (((1 << lastBit) - 1) ^ ((1 << bitPosition) - 1)) << + (column * 8); + rfBuf[arrayEntry] &= ~mask; + rfBuf[arrayEntry] |= ((tmp32 << bitPosition) << + (column * 8)) & mask; + bitsLeft -= 8 - bitPosition; + tmp32 = tmp32 >> (8 - bitPosition); + bitPosition = 0; + arrayEntry++; + } +} + +/* + * Fix on 2.4 GHz band for orientation sensitivity issue by increasing + * rf_pwd_icsyndiv. + * + * Theoretical Rules: + * if 2 GHz band + * if forceBiasAuto + * if synth_freq < 2412 + * bias = 0 + * else if 2412 <= synth_freq <= 2422 + * bias = 1 + * else // synth_freq > 2422 + * bias = 2 + * else if forceBias > 0 + * bias = forceBias & 7 + * else + * no change, use value from ini file + * else + * no change, invalid band + * + * 1st Mod: + * 2422 also uses value of 2 + * + * + * 2nd Mod: + * Less than 2412 uses value of 0, 2412 and above uses value of 2 + */ +static void ath9k_hw_force_bias(struct ath_hw *ah, u16 synth_freq) +{ + struct ath_common *common = ath9k_hw_common(ah); + u32 tmp_reg; + int reg_writes = 0; + u32 new_bias = 0; + + if (!AR_SREV_5416(ah) || synth_freq >= 3000) { + return; + } + + BUG_ON(AR_SREV_9280_10_OR_LATER(ah)); + + if (synth_freq < 2412) + new_bias = 0; + else if (synth_freq < 2422) + new_bias = 1; + else + new_bias = 2; + + /* pre-reverse this field */ + tmp_reg = ath9k_hw_reverse_bits(new_bias, 3); + + ath_print(common, ATH_DBG_CONFIG, + "Force rf_pwd_icsyndiv to %1d on %4d\n", + new_bias, synth_freq); + + /* swizzle rf_pwd_icsyndiv */ + ath9k_phy_modify_rx_buffer(ah->analogBank6Data, tmp_reg, 3, 181, 3); + + /* write Bank 6 with new params */ + REG_WRITE_RF_ARRAY(&ah->iniBank6, ah->analogBank6Data, reg_writes); +} + +/** + * ath9k_hw_set_channel - tune to a channel on the external AR2133/AR5133 radios + * @ah: atheros hardware stucture + * @chan: + * + * For the external AR2133/AR5133 radios, takes the MHz channel value and set + * the channel value. Assumes writes enabled to analog bus and bank6 register + * cache in ah->analogBank6Data. + */ +int ath9k_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan) +{ + struct ath_common *common = ath9k_hw_common(ah); + u32 channelSel = 0; + u32 bModeSynth = 0; + u32 aModeRefSel = 0; + u32 reg32 = 0; + u16 freq; + struct chan_centers centers; + + ath9k_hw_get_channel_centers(ah, chan, ¢ers); + freq = centers.synth_center; + + if (freq < 4800) { + u32 txctl; + + if (((freq - 2192) % 5) == 0) { + channelSel = ((freq - 672) * 2 - 3040) / 10; + bModeSynth = 0; + } else if (((freq - 2224) % 5) == 0) { + channelSel = ((freq - 704) * 2 - 3040) / 10; + bModeSynth = 1; + } else { + ath_print(common, ATH_DBG_FATAL, + "Invalid channel %u MHz\n", freq); + return -EINVAL; + } + + channelSel = (channelSel << 2) & 0xff; + channelSel = ath9k_hw_reverse_bits(channelSel, 8); + + txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL); + if (freq == 2484) { + + REG_WRITE(ah, AR_PHY_CCK_TX_CTRL, + txctl | AR_PHY_CCK_TX_CTRL_JAPAN); + } else { + REG_WRITE(ah, AR_PHY_CCK_TX_CTRL, + txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN); + } + + } else if ((freq % 20) == 0 && freq >= 5120) { + channelSel = + ath9k_hw_reverse_bits(((freq - 4800) / 20 << 2), 8); + aModeRefSel = ath9k_hw_reverse_bits(1, 2); + } else if ((freq % 10) == 0) { + channelSel = + ath9k_hw_reverse_bits(((freq - 4800) / 10 << 1), 8); + if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah)) + aModeRefSel = ath9k_hw_reverse_bits(2, 2); + else + aModeRefSel = ath9k_hw_reverse_bits(1, 2); + } else if ((freq % 5) == 0) { + channelSel = ath9k_hw_reverse_bits((freq - 4800) / 5, 8); + aModeRefSel = ath9k_hw_reverse_bits(1, 2); + } else { + ath_print(common, ATH_DBG_FATAL, + "Invalid channel %u MHz\n", freq); + return -EINVAL; + } + + ath9k_hw_force_bias(ah, freq); + + reg32 = + (channelSel << 8) | (aModeRefSel << 2) | (bModeSynth << 1) | + (1 << 5) | 0x1; + + REG_WRITE(ah, AR_PHY(0x37), reg32); + + ah->curchan = chan; + ah->curchan_rad_index = -1; + + return 0; +} + +/** + * ath9k_hw_spur_mitigate - convert baseband spur frequency for external radios + * @ah: atheros hardware structure + * @chan: + * + * For non single-chip solutions. Converts to baseband spur frequency given the + * input channel frequency and compute register settings below. + */ +void ath9k_hw_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan) +{ + int bb_spur = AR_NO_SPUR; + int bin, cur_bin; + int spur_freq_sd; + int spur_delta_phase; + int denominator; + int upper, lower, cur_vit_mask; + int tmp, new; + int i; + int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8, + AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60 + }; + int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10, + AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60 + }; + int inc[4] = { 0, 100, 0, 0 }; + + int8_t mask_m[123]; + int8_t mask_p[123]; + int8_t mask_amt; + int tmp_mask; + int cur_bb_spur; + bool is2GHz = IS_CHAN_2GHZ(chan); + + memset(&mask_m, 0, sizeof(int8_t) * 123); + memset(&mask_p, 0, sizeof(int8_t) * 123); + + for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { + cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz); + if (AR_NO_SPUR == cur_bb_spur) + break; + cur_bb_spur = cur_bb_spur - (chan->channel * 10); + if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) { + bb_spur = cur_bb_spur; + break; + } + } + + if (AR_NO_SPUR == bb_spur) + return; + + bin = bb_spur * 32; + + tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0)); + new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI | + AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER | + AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK | + AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK); + + REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new); + + new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL | + AR_PHY_SPUR_REG_ENABLE_MASK_PPM | + AR_PHY_SPUR_REG_MASK_RATE_SELECT | + AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI | + SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH)); + REG_WRITE(ah, AR_PHY_SPUR_REG, new); + + spur_delta_phase = ((bb_spur * 524288) / 100) & + AR_PHY_TIMING11_SPUR_DELTA_PHASE; + + denominator = IS_CHAN_2GHZ(chan) ? 440 : 400; + spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff; + + new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC | + SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) | + SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE)); + REG_WRITE(ah, AR_PHY_TIMING11, new); + + cur_bin = -6000; + upper = bin + 100; + lower = bin - 100; + + for (i = 0; i < 4; i++) { + int pilot_mask = 0; + int chan_mask = 0; + int bp = 0; + for (bp = 0; bp < 30; bp++) { + if ((cur_bin > lower) && (cur_bin < upper)) { + pilot_mask = pilot_mask | 0x1 << bp; + chan_mask = chan_mask | 0x1 << bp; + } + cur_bin += 100; + } + cur_bin += inc[i]; + REG_WRITE(ah, pilot_mask_reg[i], pilot_mask); + REG_WRITE(ah, chan_mask_reg[i], chan_mask); + } + + cur_vit_mask = 6100; + upper = bin + 120; + lower = bin - 120; + + for (i = 0; i < 123; i++) { + if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) { + + /* workaround for gcc bug #37014 */ + volatile int tmp_v = abs(cur_vit_mask - bin); + + if (tmp_v < 75) + mask_amt = 1; + else + mask_amt = 0; + if (cur_vit_mask < 0) + mask_m[abs(cur_vit_mask / 100)] = mask_amt; + else + mask_p[cur_vit_mask / 100] = mask_amt; + } + cur_vit_mask -= 100; + } + + tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28) + | (mask_m[48] << 26) | (mask_m[49] << 24) + | (mask_m[50] << 22) | (mask_m[51] << 20) + | (mask_m[52] << 18) | (mask_m[53] << 16) + | (mask_m[54] << 14) | (mask_m[55] << 12) + | (mask_m[56] << 10) | (mask_m[57] << 8) + | (mask_m[58] << 6) | (mask_m[59] << 4) + | (mask_m[60] << 2) | (mask_m[61] << 0); + REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask); + REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask); + + tmp_mask = (mask_m[31] << 28) + | (mask_m[32] << 26) | (mask_m[33] << 24) + | (mask_m[34] << 22) | (mask_m[35] << 20) + | (mask_m[36] << 18) | (mask_m[37] << 16) + | (mask_m[48] << 14) | (mask_m[39] << 12) + | (mask_m[40] << 10) | (mask_m[41] << 8) + | (mask_m[42] << 6) | (mask_m[43] << 4) + | (mask_m[44] << 2) | (mask_m[45] << 0); + REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask); + REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask); + + tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28) + | (mask_m[18] << 26) | (mask_m[18] << 24) + | (mask_m[20] << 22) | (mask_m[20] << 20) + | (mask_m[22] << 18) | (mask_m[22] << 16) + | (mask_m[24] << 14) | (mask_m[24] << 12) + | (mask_m[25] << 10) | (mask_m[26] << 8) + | (mask_m[27] << 6) | (mask_m[28] << 4) + | (mask_m[29] << 2) | (mask_m[30] << 0); + REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask); + REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask); + + tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28) + | (mask_m[2] << 26) | (mask_m[3] << 24) + | (mask_m[4] << 22) | (mask_m[5] << 20) + | (mask_m[6] << 18) | (mask_m[7] << 16) + | (mask_m[8] << 14) | (mask_m[9] << 12) + | (mask_m[10] << 10) | (mask_m[11] << 8) + | (mask_m[12] << 6) | (mask_m[13] << 4) + | (mask_m[14] << 2) | (mask_m[15] << 0); + REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask); + REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask); + + tmp_mask = (mask_p[15] << 28) + | (mask_p[14] << 26) | (mask_p[13] << 24) + | (mask_p[12] << 22) | (mask_p[11] << 20) + | (mask_p[10] << 18) | (mask_p[9] << 16) + | (mask_p[8] << 14) | (mask_p[7] << 12) + | (mask_p[6] << 10) | (mask_p[5] << 8) + | (mask_p[4] << 6) | (mask_p[3] << 4) + | (mask_p[2] << 2) | (mask_p[1] << 0); + REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask); + REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask); + + tmp_mask = (mask_p[30] << 28) + | (mask_p[29] << 26) | (mask_p[28] << 24) + | (mask_p[27] << 22) | (mask_p[26] << 20) + | (mask_p[25] << 18) | (mask_p[24] << 16) + | (mask_p[23] << 14) | (mask_p[22] << 12) + | (mask_p[21] << 10) | (mask_p[20] << 8) + | (mask_p[19] << 6) | (mask_p[18] << 4) + | (mask_p[17] << 2) | (mask_p[16] << 0); + REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask); + REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask); + + tmp_mask = (mask_p[45] << 28) + | (mask_p[44] << 26) | (mask_p[43] << 24) + | (mask_p[42] << 22) | (mask_p[41] << 20) + | (mask_p[40] << 18) | (mask_p[39] << 16) + | (mask_p[38] << 14) | (mask_p[37] << 12) + | (mask_p[36] << 10) | (mask_p[35] << 8) + | (mask_p[34] << 6) | (mask_p[33] << 4) + | (mask_p[32] << 2) | (mask_p[31] << 0); + REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask); + REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask); + + tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28) + | (mask_p[59] << 26) | (mask_p[58] << 24) + | (mask_p[57] << 22) | (mask_p[56] << 20) + | (mask_p[55] << 18) | (mask_p[54] << 16) + | (mask_p[53] << 14) | (mask_p[52] << 12) + | (mask_p[51] << 10) | (mask_p[50] << 8) + | (mask_p[49] << 6) | (mask_p[48] << 4) + | (mask_p[47] << 2) | (mask_p[46] << 0); + REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask); + REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask); +} + +/** + * ath9k_hw_rf_alloc_ext_banks - allocates banks for external radio programming + * @ah: atheros hardware structure + * + * Only required for older devices with external AR2133/AR5133 radios. + */ +int ath9k_hw_rf_alloc_ext_banks(struct ath_hw *ah) +{ +#define ATH_ALLOC_BANK(bank, size) do { \ + bank = kzalloc((sizeof(u32) * size), GFP_KERNEL); \ + if (!bank) { \ + ath_print(common, ATH_DBG_FATAL, \ + "Cannot allocate RF banks\n"); \ + return -ENOMEM; \ + } \ + } while (0); + + struct ath_common *common = ath9k_hw_common(ah); + + BUG_ON(AR_SREV_9280_10_OR_LATER(ah)); + + ATH_ALLOC_BANK(ah->analogBank0Data, ah->iniBank0.ia_rows); + ATH_ALLOC_BANK(ah->analogBank1Data, ah->iniBank1.ia_rows); + ATH_ALLOC_BANK(ah->analogBank2Data, ah->iniBank2.ia_rows); + ATH_ALLOC_BANK(ah->analogBank3Data, ah->iniBank3.ia_rows); + ATH_ALLOC_BANK(ah->analogBank6Data, ah->iniBank6.ia_rows); + ATH_ALLOC_BANK(ah->analogBank6TPCData, ah->iniBank6TPC.ia_rows); + ATH_ALLOC_BANK(ah->analogBank7Data, ah->iniBank7.ia_rows); + ATH_ALLOC_BANK(ah->addac5416_21, + ah->iniAddac.ia_rows * ah->iniAddac.ia_columns); + ATH_ALLOC_BANK(ah->bank6Temp, ah->iniBank6.ia_rows); + + return 0; +#undef ATH_ALLOC_BANK +} + + +/** + * ath9k_hw_rf_free_ext_banks - Free memory for analog bank scratch buffers + * @ah: atheros hardware struture + * For the external AR2133/AR5133 radios banks. + */ +void +ath9k_hw_rf_free_ext_banks(struct ath_hw *ah) +{ +#define ATH_FREE_BANK(bank) do { \ + kfree(bank); \ + bank = NULL; \ + } while (0); + + BUG_ON(AR_SREV_9280_10_OR_LATER(ah)); + + ATH_FREE_BANK(ah->analogBank0Data); + ATH_FREE_BANK(ah->analogBank1Data); + ATH_FREE_BANK(ah->analogBank2Data); + ATH_FREE_BANK(ah->analogBank3Data); + ATH_FREE_BANK(ah->analogBank6Data); + ATH_FREE_BANK(ah->analogBank6TPCData); + ATH_FREE_BANK(ah->analogBank7Data); + ATH_FREE_BANK(ah->addac5416_21); + ATH_FREE_BANK(ah->bank6Temp); + +#undef ATH_FREE_BANK +} + +/* * + * ath9k_hw_set_rf_regs - programs rf registers based on EEPROM + * @ah: atheros hardware structure + * @chan: + * @modesIndex: + * + * Used for the external AR2133/AR5133 radios. + * + * Reads the EEPROM header info from the device structure and programs + * all rf registers. This routine requires access to the analog + * rf device. This is not required for single-chip devices. + */ +bool ath9k_hw_set_rf_regs(struct ath_hw *ah, struct ath9k_channel *chan, + u16 modesIndex) +{ + u32 eepMinorRev; + u32 ob5GHz = 0, db5GHz = 0; + u32 ob2GHz = 0, db2GHz = 0; + int regWrites = 0; + + /* + * Software does not need to program bank data + * for single chip devices, that is AR9280 or anything + * after that. + */ + if (AR_SREV_9280_10_OR_LATER(ah)) + return true; + + /* Setup rf parameters */ + eepMinorRev = ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV); + + /* Setup Bank 0 Write */ + RF_BANK_SETUP(ah->analogBank0Data, &ah->iniBank0, 1); + + /* Setup Bank 1 Write */ + RF_BANK_SETUP(ah->analogBank1Data, &ah->iniBank1, 1); + + /* Setup Bank 2 Write */ + RF_BANK_SETUP(ah->analogBank2Data, &ah->iniBank2, 1); + + /* Setup Bank 6 Write */ + RF_BANK_SETUP(ah->analogBank3Data, &ah->iniBank3, + modesIndex); + { + int i; + for (i = 0; i < ah->iniBank6TPC.ia_rows; i++) { + ah->analogBank6Data[i] = + INI_RA(&ah->iniBank6TPC, i, modesIndex); + } + } + + /* Only the 5 or 2 GHz OB/DB need to be set for a mode */ + if (eepMinorRev >= 2) { + if (IS_CHAN_2GHZ(chan)) { + ob2GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_2); + db2GHz = ah->eep_ops->get_eeprom(ah, EEP_DB_2); + ath9k_phy_modify_rx_buffer(ah->analogBank6Data, + ob2GHz, 3, 197, 0); + ath9k_phy_modify_rx_buffer(ah->analogBank6Data, + db2GHz, 3, 194, 0); + } else { + ob5GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_5); + db5GHz = ah->eep_ops->get_eeprom(ah, EEP_DB_5); + ath9k_phy_modify_rx_buffer(ah->analogBank6Data, + ob5GHz, 3, 203, 0); + ath9k_phy_modify_rx_buffer(ah->analogBank6Data, + db5GHz, 3, 200, 0); + } + } + + /* Setup Bank 7 Setup */ + RF_BANK_SETUP(ah->analogBank7Data, &ah->iniBank7, 1); + + /* Write Analog registers */ + REG_WRITE_RF_ARRAY(&ah->iniBank0, ah->analogBank0Data, + regWrites); + REG_WRITE_RF_ARRAY(&ah->iniBank1, ah->analogBank1Data, + regWrites); + REG_WRITE_RF_ARRAY(&ah->iniBank2, ah->analogBank2Data, + regWrites); + REG_WRITE_RF_ARRAY(&ah->iniBank3, ah->analogBank3Data, + regWrites); + REG_WRITE_RF_ARRAY(&ah->iniBank6TPC, ah->analogBank6Data, + regWrites); + REG_WRITE_RF_ARRAY(&ah->iniBank7, ah->analogBank7Data, + regWrites); + + return true; +} diff --git a/drivers/net/wireless/ath/ath9k/phy.h b/drivers/net/wireless/ath/ath9k/phy.h new file mode 100644 index 0000000..0999a49 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/phy.h @@ -0,0 +1,625 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef PHY_H +#define PHY_H + +/* Common between single chip and non single-chip solutions */ +void ath9k_hw_write_regs(struct ath_hw *ah, u32 freqIndex, int regWrites); + +/* Single chip radio settings */ +int ath9k_hw_ar9280_set_channel(struct ath_hw *ah, struct ath9k_channel *chan); +void ath9k_hw_9280_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan); + +/* Routines below are for non single-chip solutions */ +int ath9k_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan); +void ath9k_hw_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan); + +int ath9k_hw_rf_alloc_ext_banks(struct ath_hw *ah); +void ath9k_hw_rf_free_ext_banks(struct ath_hw *ah); + +bool ath9k_hw_set_rf_regs(struct ath_hw *ah, + struct ath9k_channel *chan, + u16 modesIndex); + +#define AR_PHY_BASE 0x9800 +#define AR_PHY(_n) (AR_PHY_BASE + ((_n)<<2)) + +#define AR_PHY_TEST 0x9800 +#define PHY_AGC_CLR 0x10000000 +#define RFSILENT_BB 0x00002000 + +#define AR_PHY_TURBO 0x9804 +#define AR_PHY_FC_TURBO_MODE 0x00000001 +#define AR_PHY_FC_TURBO_SHORT 0x00000002 +#define AR_PHY_FC_DYN2040_EN 0x00000004 +#define AR_PHY_FC_DYN2040_PRI_ONLY 0x00000008 +#define AR_PHY_FC_DYN2040_PRI_CH 0x00000010 +/* For 25 MHz channel spacing -- not used but supported by hw */ +#define AR_PHY_FC_DYN2040_EXT_CH 0x00000020 +#define AR_PHY_FC_HT_EN 0x00000040 +#define AR_PHY_FC_SHORT_GI_40 0x00000080 +#define AR_PHY_FC_WALSH 0x00000100 +#define AR_PHY_FC_SINGLE_HT_LTF1 0x00000200 +#define AR_PHY_FC_ENABLE_DAC_FIFO 0x00000800 + +#define AR_PHY_TEST2 0x9808 + +#define AR_PHY_TIMING2 0x9810 +#define AR_PHY_TIMING3 0x9814 +#define AR_PHY_TIMING3_DSC_MAN 0xFFFE0000 +#define AR_PHY_TIMING3_DSC_MAN_S 17 +#define AR_PHY_TIMING3_DSC_EXP 0x0001E000 +#define AR_PHY_TIMING3_DSC_EXP_S 13 + +#define AR_PHY_CHIP_ID 0x9818 +#define AR_PHY_CHIP_ID_REV_0 0x80 +#define AR_PHY_CHIP_ID_REV_1 0x81 +#define AR_PHY_CHIP_ID_9160_REV_0 0xb0 + +#define AR_PHY_ACTIVE 0x981C +#define AR_PHY_ACTIVE_EN 0x00000001 +#define AR_PHY_ACTIVE_DIS 0x00000000 + +#define AR_PHY_RF_CTL2 0x9824 +#define AR_PHY_TX_END_DATA_START 0x000000FF +#define AR_PHY_TX_END_DATA_START_S 0 +#define AR_PHY_TX_END_PA_ON 0x0000FF00 +#define AR_PHY_TX_END_PA_ON_S 8 + +#define AR_PHY_RF_CTL3 0x9828 +#define AR_PHY_TX_END_TO_A2_RX_ON 0x00FF0000 +#define AR_PHY_TX_END_TO_A2_RX_ON_S 16 + +#define AR_PHY_ADC_CTL 0x982C +#define AR_PHY_ADC_CTL_OFF_INBUFGAIN 0x00000003 +#define AR_PHY_ADC_CTL_OFF_INBUFGAIN_S 0 +#define AR_PHY_ADC_CTL_OFF_PWDDAC 0x00002000 +#define AR_PHY_ADC_CTL_OFF_PWDBANDGAP 0x00004000 +#define AR_PHY_ADC_CTL_OFF_PWDADC 0x00008000 +#define AR_PHY_ADC_CTL_ON_INBUFGAIN 0x00030000 +#define AR_PHY_ADC_CTL_ON_INBUFGAIN_S 16 + +#define AR_PHY_ADC_SERIAL_CTL 0x9830 +#define AR_PHY_SEL_INTERNAL_ADDAC 0x00000000 +#define AR_PHY_SEL_EXTERNAL_RADIO 0x00000001 + +#define AR_PHY_RF_CTL4 0x9834 +#define AR_PHY_RF_CTL4_TX_END_XPAB_OFF 0xFF000000 +#define AR_PHY_RF_CTL4_TX_END_XPAB_OFF_S 24 +#define AR_PHY_RF_CTL4_TX_END_XPAA_OFF 0x00FF0000 +#define AR_PHY_RF_CTL4_TX_END_XPAA_OFF_S 16 +#define AR_PHY_RF_CTL4_FRAME_XPAB_ON 0x0000FF00 +#define AR_PHY_RF_CTL4_FRAME_XPAB_ON_S 8 +#define AR_PHY_RF_CTL4_FRAME_XPAA_ON 0x000000FF +#define AR_PHY_RF_CTL4_FRAME_XPAA_ON_S 0 + +#define AR_PHY_TSTDAC_CONST 0x983c + +#define AR_PHY_SETTLING 0x9844 +#define AR_PHY_SETTLING_SWITCH 0x00003F80 +#define AR_PHY_SETTLING_SWITCH_S 7 + +#define AR_PHY_RXGAIN 0x9848 +#define AR_PHY_RXGAIN_TXRX_ATTEN 0x0003F000 +#define AR_PHY_RXGAIN_TXRX_ATTEN_S 12 +#define AR_PHY_RXGAIN_TXRX_RF_MAX 0x007C0000 +#define AR_PHY_RXGAIN_TXRX_RF_MAX_S 18 +#define AR9280_PHY_RXGAIN_TXRX_ATTEN 0x00003F80 +#define AR9280_PHY_RXGAIN_TXRX_ATTEN_S 7 +#define AR9280_PHY_RXGAIN_TXRX_MARGIN 0x001FC000 +#define AR9280_PHY_RXGAIN_TXRX_MARGIN_S 14 + +#define AR_PHY_DESIRED_SZ 0x9850 +#define AR_PHY_DESIRED_SZ_ADC 0x000000FF +#define AR_PHY_DESIRED_SZ_ADC_S 0 +#define AR_PHY_DESIRED_SZ_PGA 0x0000FF00 +#define AR_PHY_DESIRED_SZ_PGA_S 8 +#define AR_PHY_DESIRED_SZ_TOT_DES 0x0FF00000 +#define AR_PHY_DESIRED_SZ_TOT_DES_S 20 + +#define AR_PHY_FIND_SIG 0x9858 +#define AR_PHY_FIND_SIG_FIRSTEP 0x0003F000 +#define AR_PHY_FIND_SIG_FIRSTEP_S 12 +#define AR_PHY_FIND_SIG_FIRPWR 0x03FC0000 +#define AR_PHY_FIND_SIG_FIRPWR_S 18 + +#define AR_PHY_AGC_CTL1 0x985C +#define AR_PHY_AGC_CTL1_COARSE_LOW 0x00007F80 +#define AR_PHY_AGC_CTL1_COARSE_LOW_S 7 +#define AR_PHY_AGC_CTL1_COARSE_HIGH 0x003F8000 +#define AR_PHY_AGC_CTL1_COARSE_HIGH_S 15 + +#define AR_PHY_AGC_CONTROL 0x9860 +#define AR_PHY_AGC_CONTROL_CAL 0x00000001 +#define AR_PHY_AGC_CONTROL_NF 0x00000002 +#define AR_PHY_AGC_CONTROL_ENABLE_NF 0x00008000 +#define AR_PHY_AGC_CONTROL_FLTR_CAL 0x00010000 +#define AR_PHY_AGC_CONTROL_NO_UPDATE_NF 0x00020000 + +#define AR_PHY_CCA 0x9864 +#define AR_PHY_MINCCA_PWR 0x0FF80000 +#define AR_PHY_MINCCA_PWR_S 19 +#define AR_PHY_CCA_THRESH62 0x0007F000 +#define AR_PHY_CCA_THRESH62_S 12 +#define AR9280_PHY_MINCCA_PWR 0x1FF00000 +#define AR9280_PHY_MINCCA_PWR_S 20 +#define AR9280_PHY_CCA_THRESH62 0x000FF000 +#define AR9280_PHY_CCA_THRESH62_S 12 + +#define AR_PHY_SFCORR_LOW 0x986C +#define AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW 0x00000001 +#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW 0x00003F00 +#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW_S 8 +#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW 0x001FC000 +#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW_S 14 +#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW 0x0FE00000 +#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW_S 21 + +#define AR_PHY_SFCORR 0x9868 +#define AR_PHY_SFCORR_M2COUNT_THR 0x0000001F +#define AR_PHY_SFCORR_M2COUNT_THR_S 0 +#define AR_PHY_SFCORR_M1_THRESH 0x00FE0000 +#define AR_PHY_SFCORR_M1_THRESH_S 17 +#define AR_PHY_SFCORR_M2_THRESH 0x7F000000 +#define AR_PHY_SFCORR_M2_THRESH_S 24 + +#define AR_PHY_SLEEP_CTR_CONTROL 0x9870 +#define AR_PHY_SLEEP_CTR_LIMIT 0x9874 +#define AR_PHY_SYNTH_CONTROL 0x9874 +#define AR_PHY_SLEEP_SCAL 0x9878 + +#define AR_PHY_PLL_CTL 0x987c +#define AR_PHY_PLL_CTL_40 0xaa +#define AR_PHY_PLL_CTL_40_5413 0x04 +#define AR_PHY_PLL_CTL_44 0xab +#define AR_PHY_PLL_CTL_44_2133 0xeb +#define AR_PHY_PLL_CTL_40_2133 0xea + +#define AR_PHY_SPECTRAL_SCAN 0x9910 /* AR9280 spectral scan configuration register */ +#define AR_PHY_SPECTRAL_SCAN_ENABLE 0x1 +#define AR_PHY_SPECTRAL_SCAN_ENA 0x00000001 /* Enable spectral scan, reg 68, bit 0 */ +#define AR_PHY_SPECTRAL_SCAN_ENA_S 0 /* Enable spectral scan, reg 68, bit 0 */ +#define AR_PHY_SPECTRAL_SCAN_ACTIVE 0x00000002 /* Activate spectral scan reg 68, bit 1*/ +#define AR_PHY_SPECTRAL_SCAN_ACTIVE_S 1 /* Activate spectral scan reg 68, bit 1*/ +#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD 0x000000F0 /* Interval for FFT reports, reg 68, bits 4-7*/ +#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD_S 4 +#define AR_PHY_SPECTRAL_SCAN_PERIOD 0x0000FF00 /* Interval for FFT reports, reg 68, bits 8-15*/ +#define AR_PHY_SPECTRAL_SCAN_PERIOD_S 8 +#define AR_PHY_SPECTRAL_SCAN_COUNT 0x00FF0000 /* Number of reports, reg 68, bits 16-23*/ +#define AR_PHY_SPECTRAL_SCAN_COUNT_S 16 +#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT 0x01000000 /* Short repeat, reg 68, bit 24*/ +#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_S 24 /* Short repeat, reg 68, bit 24*/ + +#define AR_PHY_RX_DELAY 0x9914 +#define AR_PHY_SEARCH_START_DELAY 0x9918 +#define AR_PHY_RX_DELAY_DELAY 0x00003FFF + +#define AR_PHY_TIMING_CTRL4(_i) (0x9920 + ((_i) << 12)) +#define AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF 0x01F +#define AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF_S 0 +#define AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF 0x7E0 +#define AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF_S 5 +#define AR_PHY_TIMING_CTRL4_IQCORR_ENABLE 0x800 +#define AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX 0xF000 +#define AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX_S 12 +#define AR_PHY_TIMING_CTRL4_DO_CAL 0x10000 + +#define AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI 0x80000000 +#define AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER 0x40000000 +#define AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK 0x20000000 +#define AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK 0x10000000 + +#define AR_PHY_TIMING5 0x9924 +#define AR_PHY_TIMING5_CYCPWR_THR1 0x000000FE +#define AR_PHY_TIMING5_CYCPWR_THR1_S 1 + +#define AR_PHY_POWER_TX_RATE1 0x9934 +#define AR_PHY_POWER_TX_RATE2 0x9938 +#define AR_PHY_POWER_TX_RATE_MAX 0x993c +#define AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE 0x00000040 + +#define AR_PHY_FRAME_CTL 0x9944 +#define AR_PHY_FRAME_CTL_TX_CLIP 0x00000038 +#define AR_PHY_FRAME_CTL_TX_CLIP_S 3 + +#define AR_PHY_TXPWRADJ 0x994C +#define AR_PHY_TXPWRADJ_CCK_GAIN_DELTA 0x00000FC0 +#define AR_PHY_TXPWRADJ_CCK_GAIN_DELTA_S 6 +#define AR_PHY_TXPWRADJ_CCK_PCDAC_INDEX 0x00FC0000 +#define AR_PHY_TXPWRADJ_CCK_PCDAC_INDEX_S 18 + +#define AR_PHY_RADAR_EXT 0x9940 +#define AR_PHY_RADAR_EXT_ENA 0x00004000 + +#define AR_PHY_RADAR_0 0x9954 +#define AR_PHY_RADAR_0_ENA 0x00000001 +#define AR_PHY_RADAR_0_FFT_ENA 0x80000000 +#define AR_PHY_RADAR_0_INBAND 0x0000003e +#define AR_PHY_RADAR_0_INBAND_S 1 +#define AR_PHY_RADAR_0_PRSSI 0x00000FC0 +#define AR_PHY_RADAR_0_PRSSI_S 6 +#define AR_PHY_RADAR_0_HEIGHT 0x0003F000 +#define AR_PHY_RADAR_0_HEIGHT_S 12 +#define AR_PHY_RADAR_0_RRSSI 0x00FC0000 +#define AR_PHY_RADAR_0_RRSSI_S 18 +#define AR_PHY_RADAR_0_FIRPWR 0x7F000000 +#define AR_PHY_RADAR_0_FIRPWR_S 24 + +#define AR_PHY_RADAR_1 0x9958 +#define AR_PHY_RADAR_1_RELPWR_ENA 0x00800000 +#define AR_PHY_RADAR_1_USE_FIR128 0x00400000 +#define AR_PHY_RADAR_1_RELPWR_THRESH 0x003F0000 +#define AR_PHY_RADAR_1_RELPWR_THRESH_S 16 +#define AR_PHY_RADAR_1_BLOCK_CHECK 0x00008000 +#define AR_PHY_RADAR_1_MAX_RRSSI 0x00004000 +#define AR_PHY_RADAR_1_RELSTEP_CHECK 0x00002000 +#define AR_PHY_RADAR_1_RELSTEP_THRESH 0x00001F00 +#define AR_PHY_RADAR_1_RELSTEP_THRESH_S 8 +#define AR_PHY_RADAR_1_MAXLEN 0x000000FF +#define AR_PHY_RADAR_1_MAXLEN_S 0 + +#define AR_PHY_SWITCH_CHAIN_0 0x9960 +#define AR_PHY_SWITCH_COM 0x9964 + +#define AR_PHY_SIGMA_DELTA 0x996C +#define AR_PHY_SIGMA_DELTA_ADC_SEL 0x00000003 +#define AR_PHY_SIGMA_DELTA_ADC_SEL_S 0 +#define AR_PHY_SIGMA_DELTA_FILT2 0x000000F8 +#define AR_PHY_SIGMA_DELTA_FILT2_S 3 +#define AR_PHY_SIGMA_DELTA_FILT1 0x00001F00 +#define AR_PHY_SIGMA_DELTA_FILT1_S 8 +#define AR_PHY_SIGMA_DELTA_ADC_CLIP 0x01FFE000 +#define AR_PHY_SIGMA_DELTA_ADC_CLIP_S 13 + +#define AR_PHY_RESTART 0x9970 +#define AR_PHY_RESTART_DIV_GC 0x001C0000 +#define AR_PHY_RESTART_DIV_GC_S 18 + +#define AR_PHY_RFBUS_REQ 0x997C +#define AR_PHY_RFBUS_REQ_EN 0x00000001 + +#define AR_PHY_TIMING7 0x9980 +#define AR_PHY_TIMING8 0x9984 +#define AR_PHY_TIMING8_PILOT_MASK_2 0x000FFFFF +#define AR_PHY_TIMING8_PILOT_MASK_2_S 0 + +#define AR_PHY_BIN_MASK2_1 0x9988 +#define AR_PHY_BIN_MASK2_2 0x998c +#define AR_PHY_BIN_MASK2_3 0x9990 +#define AR_PHY_BIN_MASK2_4 0x9994 + +#define AR_PHY_BIN_MASK_1 0x9900 +#define AR_PHY_BIN_MASK_2 0x9904 +#define AR_PHY_BIN_MASK_3 0x9908 + +#define AR_PHY_MASK_CTL 0x990c + +#define AR_PHY_BIN_MASK2_4_MASK_4 0x00003FFF +#define AR_PHY_BIN_MASK2_4_MASK_4_S 0 + +#define AR_PHY_TIMING9 0x9998 +#define AR_PHY_TIMING10 0x999c +#define AR_PHY_TIMING10_PILOT_MASK_2 0x000FFFFF +#define AR_PHY_TIMING10_PILOT_MASK_2_S 0 + +#define AR_PHY_TIMING11 0x99a0 +#define AR_PHY_TIMING11_SPUR_DELTA_PHASE 0x000FFFFF +#define AR_PHY_TIMING11_SPUR_DELTA_PHASE_S 0 +#define AR_PHY_TIMING11_SPUR_FREQ_SD 0x3FF00000 +#define AR_PHY_TIMING11_SPUR_FREQ_SD_S 20 +#define AR_PHY_TIMING11_USE_SPUR_IN_AGC 0x40000000 +#define AR_PHY_TIMING11_USE_SPUR_IN_SELFCOR 0x80000000 + +#define AR_PHY_RX_CHAINMASK 0x99a4 +#define AR_PHY_NEW_ADC_DC_GAIN_CORR(_i) (0x99b4 + ((_i) << 12)) +#define AR_PHY_NEW_ADC_GAIN_CORR_ENABLE 0x40000000 +#define AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE 0x80000000 + +#define AR_PHY_MULTICHAIN_GAIN_CTL 0x99ac +#define AR_PHY_9285_ANT_DIV_CTL_ALL 0x7f000000 +#define AR_PHY_9285_ANT_DIV_CTL 0x01000000 +#define AR_PHY_9285_ANT_DIV_CTL_S 24 +#define AR_PHY_9285_ANT_DIV_ALT_LNACONF 0x06000000 +#define AR_PHY_9285_ANT_DIV_ALT_LNACONF_S 25 +#define AR_PHY_9285_ANT_DIV_MAIN_LNACONF 0x18000000 +#define AR_PHY_9285_ANT_DIV_MAIN_LNACONF_S 27 +#define AR_PHY_9285_ANT_DIV_ALT_GAINTB 0x20000000 +#define AR_PHY_9285_ANT_DIV_ALT_GAINTB_S 29 +#define AR_PHY_9285_ANT_DIV_MAIN_GAINTB 0x40000000 +#define AR_PHY_9285_ANT_DIV_MAIN_GAINTB_S 30 +#define AR_PHY_9285_ANT_DIV_LNA1 2 +#define AR_PHY_9285_ANT_DIV_LNA2 1 +#define AR_PHY_9285_ANT_DIV_LNA1_PLUS_LNA2 3 +#define AR_PHY_9285_ANT_DIV_LNA1_MINUS_LNA2 0 +#define AR_PHY_9285_ANT_DIV_GAINTB_0 0 +#define AR_PHY_9285_ANT_DIV_GAINTB_1 1 + +#define AR_PHY_EXT_CCA0 0x99b8 +#define AR_PHY_EXT_CCA0_THRESH62 0x000000FF +#define AR_PHY_EXT_CCA0_THRESH62_S 0 + +#define AR_PHY_EXT_CCA 0x99bc +#define AR_PHY_EXT_CCA_CYCPWR_THR1 0x0000FE00 +#define AR_PHY_EXT_CCA_CYCPWR_THR1_S 9 +#define AR_PHY_EXT_CCA_THRESH62 0x007F0000 +#define AR_PHY_EXT_CCA_THRESH62_S 16 +#define AR_PHY_EXT_MINCCA_PWR 0xFF800000 +#define AR_PHY_EXT_MINCCA_PWR_S 23 +#define AR9280_PHY_EXT_MINCCA_PWR 0x01FF0000 +#define AR9280_PHY_EXT_MINCCA_PWR_S 16 + +#define AR_PHY_SFCORR_EXT 0x99c0 +#define AR_PHY_SFCORR_EXT_M1_THRESH 0x0000007F +#define AR_PHY_SFCORR_EXT_M1_THRESH_S 0 +#define AR_PHY_SFCORR_EXT_M2_THRESH 0x00003F80 +#define AR_PHY_SFCORR_EXT_M2_THRESH_S 7 +#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW 0x001FC000 +#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW_S 14 +#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW 0x0FE00000 +#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW_S 21 +#define AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S 28 + +#define AR_PHY_HALFGI 0x99D0 +#define AR_PHY_HALFGI_DSC_MAN 0x0007FFF0 +#define AR_PHY_HALFGI_DSC_MAN_S 4 +#define AR_PHY_HALFGI_DSC_EXP 0x0000000F +#define AR_PHY_HALFGI_DSC_EXP_S 0 + +#define AR_PHY_CHAN_INFO_MEMORY 0x99DC +#define AR_PHY_CHAN_INFO_MEMORY_CAPTURE_MASK 0x0001 + +#define AR_PHY_HEAVY_CLIP_ENABLE 0x99E0 + +#define AR_PHY_HEAVY_CLIP_FACTOR_RIFS 0x99EC +#define AR_PHY_RIFS_INIT_DELAY 0x03ff0000 + +#define AR_PHY_M_SLEEP 0x99f0 +#define AR_PHY_REFCLKDLY 0x99f4 +#define AR_PHY_REFCLKPD 0x99f8 + +#define AR_PHY_CALMODE 0x99f0 + +#define AR_PHY_CALMODE_IQ 0x00000000 +#define AR_PHY_CALMODE_ADC_GAIN 0x00000001 +#define AR_PHY_CALMODE_ADC_DC_PER 0x00000002 +#define AR_PHY_CALMODE_ADC_DC_INIT 0x00000003 + +#define AR_PHY_CAL_MEAS_0(_i) (0x9c10 + ((_i) << 12)) +#define AR_PHY_CAL_MEAS_1(_i) (0x9c14 + ((_i) << 12)) +#define AR_PHY_CAL_MEAS_2(_i) (0x9c18 + ((_i) << 12)) +#define AR_PHY_CAL_MEAS_3(_i) (0x9c1c + ((_i) << 12)) + +#define AR_PHY_CURRENT_RSSI 0x9c1c +#define AR9280_PHY_CURRENT_RSSI 0x9c3c + +#define AR_PHY_RFBUS_GRANT 0x9C20 +#define AR_PHY_RFBUS_GRANT_EN 0x00000001 + +#define AR_PHY_CHAN_INFO_GAIN_DIFF 0x9CF4 +#define AR_PHY_CHAN_INFO_GAIN_DIFF_UPPER_LIMIT 320 + +#define AR_PHY_CHAN_INFO_GAIN 0x9CFC + +#define AR_PHY_MODE 0xA200 +#define AR_PHY_MODE_ASYNCFIFO 0x80 +#define AR_PHY_MODE_AR2133 0x08 +#define AR_PHY_MODE_AR5111 0x00 +#define AR_PHY_MODE_AR5112 0x08 +#define AR_PHY_MODE_DYNAMIC 0x04 +#define AR_PHY_MODE_RF2GHZ 0x02 +#define AR_PHY_MODE_RF5GHZ 0x00 +#define AR_PHY_MODE_CCK 0x01 +#define AR_PHY_MODE_OFDM 0x00 +#define AR_PHY_MODE_DYN_CCK_DISABLE 0x100 + +#define AR_PHY_CCK_TX_CTRL 0xA204 +#define AR_PHY_CCK_TX_CTRL_JAPAN 0x00000010 +#define AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK 0x0000000C +#define AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK_S 2 + +#define AR_PHY_CCK_DETECT 0xA208 +#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK 0x0000003F +#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK_S 0 +/* [12:6] settling time for antenna switch */ +#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME 0x00001FC0 +#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME_S 6 +#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV 0x2000 +#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV_S 13 + +#define AR_PHY_GAIN_2GHZ 0xA20C +#define AR_PHY_GAIN_2GHZ_RXTX_MARGIN 0x00FC0000 +#define AR_PHY_GAIN_2GHZ_RXTX_MARGIN_S 18 +#define AR_PHY_GAIN_2GHZ_BSW_MARGIN 0x00003C00 +#define AR_PHY_GAIN_2GHZ_BSW_MARGIN_S 10 +#define AR_PHY_GAIN_2GHZ_BSW_ATTEN 0x0000001F +#define AR_PHY_GAIN_2GHZ_BSW_ATTEN_S 0 + +#define AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN 0x003E0000 +#define AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN_S 17 +#define AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN 0x0001F000 +#define AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN_S 12 +#define AR_PHY_GAIN_2GHZ_XATTEN2_DB 0x00000FC0 +#define AR_PHY_GAIN_2GHZ_XATTEN2_DB_S 6 +#define AR_PHY_GAIN_2GHZ_XATTEN1_DB 0x0000003F +#define AR_PHY_GAIN_2GHZ_XATTEN1_DB_S 0 + +#define AR_PHY_CCK_RXCTRL4 0xA21C +#define AR_PHY_CCK_RXCTRL4_FREQ_EST_SHORT 0x01F80000 +#define AR_PHY_CCK_RXCTRL4_FREQ_EST_SHORT_S 19 + +#define AR_PHY_DAG_CTRLCCK 0xA228 +#define AR_PHY_DAG_CTRLCCK_EN_RSSI_THR 0x00000200 +#define AR_PHY_DAG_CTRLCCK_RSSI_THR 0x0001FC00 +#define AR_PHY_DAG_CTRLCCK_RSSI_THR_S 10 + +#define AR_PHY_FORCE_CLKEN_CCK 0xA22C +#define AR_PHY_FORCE_CLKEN_CCK_MRC_MUX 0x00000040 + +#define AR_PHY_POWER_TX_RATE3 0xA234 +#define AR_PHY_POWER_TX_RATE4 0xA238 + +#define AR_PHY_SCRM_SEQ_XR 0xA23C +#define AR_PHY_HEADER_DETECT_XR 0xA240 +#define AR_PHY_CHIRP_DETECTED_XR 0xA244 +#define AR_PHY_BLUETOOTH 0xA254 + +#define AR_PHY_TPCRG1 0xA258 +#define AR_PHY_TPCRG1_NUM_PD_GAIN 0x0000c000 +#define AR_PHY_TPCRG1_NUM_PD_GAIN_S 14 + +#define AR_PHY_TPCRG1_PD_GAIN_1 0x00030000 +#define AR_PHY_TPCRG1_PD_GAIN_1_S 16 +#define AR_PHY_TPCRG1_PD_GAIN_2 0x000C0000 +#define AR_PHY_TPCRG1_PD_GAIN_2_S 18 +#define AR_PHY_TPCRG1_PD_GAIN_3 0x00300000 +#define AR_PHY_TPCRG1_PD_GAIN_3_S 20 + +#define AR_PHY_TPCRG1_PD_CAL_ENABLE 0x00400000 +#define AR_PHY_TPCRG1_PD_CAL_ENABLE_S 22 + +#define AR_PHY_TX_PWRCTRL4 0xa264 +#define AR_PHY_TX_PWRCTRL_PD_AVG_VALID 0x00000001 +#define AR_PHY_TX_PWRCTRL_PD_AVG_VALID_S 0 +#define AR_PHY_TX_PWRCTRL_PD_AVG_OUT 0x000001FE +#define AR_PHY_TX_PWRCTRL_PD_AVG_OUT_S 1 + +#define AR_PHY_TX_PWRCTRL6_0 0xa270 +#define AR_PHY_TX_PWRCTRL6_1 0xb270 +#define AR_PHY_TX_PWRCTRL_ERR_EST_MODE 0x03000000 +#define AR_PHY_TX_PWRCTRL_ERR_EST_MODE_S 24 + +#define AR_PHY_TX_PWRCTRL7 0xa274 +#define AR_PHY_TX_PWRCTRL_INIT_TX_GAIN 0x01F80000 +#define AR_PHY_TX_PWRCTRL_INIT_TX_GAIN_S 19 + +#define AR_PHY_TX_PWRCTRL9 0xa27C +#define AR_PHY_TX_DESIRED_SCALE_CCK 0x00007C00 +#define AR_PHY_TX_DESIRED_SCALE_CCK_S 10 +#define AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL 0x80000000 +#define AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL_S 31 + +#define AR_PHY_TX_GAIN_TBL1 0xa300 +#define AR_PHY_TX_GAIN 0x0007F000 +#define AR_PHY_TX_GAIN_S 12 + +#define AR_PHY_CH0_TX_PWRCTRL11 0xa398 +#define AR_PHY_CH1_TX_PWRCTRL11 0xb398 +#define AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP 0x0000FC00 +#define AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP_S 10 + +#define AR_PHY_VIT_MASK2_M_46_61 0xa3a0 +#define AR_PHY_MASK2_M_31_45 0xa3a4 +#define AR_PHY_MASK2_M_16_30 0xa3a8 +#define AR_PHY_MASK2_M_00_15 0xa3ac +#define AR_PHY_MASK2_P_15_01 0xa3b8 +#define AR_PHY_MASK2_P_30_16 0xa3bc +#define AR_PHY_MASK2_P_45_31 0xa3c0 +#define AR_PHY_MASK2_P_61_45 0xa3c4 +#define AR_PHY_SPUR_REG 0x994c + +#define AR_PHY_SPUR_REG_MASK_RATE_CNTL (0xFF << 18) +#define AR_PHY_SPUR_REG_MASK_RATE_CNTL_S 18 + +#define AR_PHY_SPUR_REG_ENABLE_MASK_PPM 0x20000 +#define AR_PHY_SPUR_REG_MASK_RATE_SELECT (0xFF << 9) +#define AR_PHY_SPUR_REG_MASK_RATE_SELECT_S 9 +#define AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI 0x100 +#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH 0x7F +#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH_S 0 + +#define AR_PHY_PILOT_MASK_01_30 0xa3b0 +#define AR_PHY_PILOT_MASK_31_60 0xa3b4 + +#define AR_PHY_CHANNEL_MASK_01_30 0x99d4 +#define AR_PHY_CHANNEL_MASK_31_60 0x99d8 + +#define AR_PHY_ANALOG_SWAP 0xa268 +#define AR_PHY_SWAP_ALT_CHAIN 0x00000040 + +#define AR_PHY_TPCRG5 0xA26C +#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP 0x0000000F +#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP_S 0 +#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1 0x000003F0 +#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1_S 4 +#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2 0x0000FC00 +#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2_S 10 +#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3 0x003F0000 +#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3_S 16 +#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4 0x0FC00000 +#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4_S 22 + +/* Carrier leak calibration control, do it after AGC calibration */ +#define AR_PHY_CL_CAL_CTL 0xA358 +#define AR_PHY_CL_CAL_ENABLE 0x00000002 +#define AR_PHY_PARALLEL_CAL_ENABLE 0x00000001 + +#define AR_PHY_POWER_TX_RATE5 0xA38C +#define AR_PHY_POWER_TX_RATE6 0xA390 + +#define AR_PHY_CAL_CHAINMASK 0xA39C + +#define AR_PHY_POWER_TX_SUB 0xA3C8 +#define AR_PHY_POWER_TX_RATE7 0xA3CC +#define AR_PHY_POWER_TX_RATE8 0xA3D0 +#define AR_PHY_POWER_TX_RATE9 0xA3D4 + +#define AR_PHY_XPA_CFG 0xA3D8 +#define AR_PHY_FORCE_XPA_CFG 0x000000001 +#define AR_PHY_FORCE_XPA_CFG_S 0 + +#define AR_PHY_CH1_CCA 0xa864 +#define AR_PHY_CH1_MINCCA_PWR 0x0FF80000 +#define AR_PHY_CH1_MINCCA_PWR_S 19 +#define AR9280_PHY_CH1_MINCCA_PWR 0x1FF00000 +#define AR9280_PHY_CH1_MINCCA_PWR_S 20 + +#define AR_PHY_CH2_CCA 0xb864 +#define AR_PHY_CH2_MINCCA_PWR 0x0FF80000 +#define AR_PHY_CH2_MINCCA_PWR_S 19 + +#define AR_PHY_CH1_EXT_CCA 0xa9bc +#define AR_PHY_CH1_EXT_MINCCA_PWR 0xFF800000 +#define AR_PHY_CH1_EXT_MINCCA_PWR_S 23 +#define AR9280_PHY_CH1_EXT_MINCCA_PWR 0x01FF0000 +#define AR9280_PHY_CH1_EXT_MINCCA_PWR_S 16 + +#define AR_PHY_CH2_EXT_CCA 0xb9bc +#define AR_PHY_CH2_EXT_MINCCA_PWR 0xFF800000 +#define AR_PHY_CH2_EXT_MINCCA_PWR_S 23 + +#define REG_WRITE_RF_ARRAY(iniarray, regData, regWr) do { \ + int r; \ + for (r = 0; r < ((iniarray)->ia_rows); r++) { \ + REG_WRITE(ah, INI_RA((iniarray), r, 0), (regData)[r]); \ + DO_DELAY(regWr); \ + } \ + } while (0) + +#define ATH9K_IS_MIC_ENABLED(ah) \ + ((ah)->sta_id1_defaults & AR_STA_ID1_CRPT_MIC_ENABLE) + +#define ANTSWAP_AB 0x0001 +#define REDUCE_CHAIN_0 0x00000050 +#define REDUCE_CHAIN_1 0x00000051 + +#define RF_BANK_SETUP(_bank, _iniarray, _col) do { \ + int i; \ + for (i = 0; i < (_iniarray)->ia_rows; i++) \ + (_bank)[i] = INI_RA((_iniarray), i, _col);; \ + } while (0) + +#endif diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c new file mode 100644 index 0000000..0e79e58 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/rc.c @@ -0,0 +1,1422 @@ +/* + * Copyright (c) 2004 Video54 Technologies, Inc. + * Copyright (c) 2004-2009 Atheros Communications, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "ath9k.h" + +static const struct ath_rate_table ar5416_11na_ratetable = { + 42, + 8, /* MCS start */ + { + { VALID, VALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */ + 5400, 0, 12, 0, 0, 0, 0, 0 }, + { VALID, VALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */ + 7800, 1, 18, 0, 1, 1, 1, 1 }, + { VALID, VALID, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */ + 10000, 2, 24, 2, 2, 2, 2, 2 }, + { VALID, VALID, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */ + 13900, 3, 36, 2, 3, 3, 3, 3 }, + { VALID, VALID, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */ + 17300, 4, 48, 4, 4, 4, 4, 4 }, + { VALID, VALID, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */ + 23000, 5, 72, 4, 5, 5, 5, 5 }, + { VALID, VALID, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */ + 27400, 6, 96, 4, 6, 6, 6, 6 }, + { VALID, VALID, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */ + 29300, 7, 108, 4, 7, 7, 7, 7 }, + { VALID_2040, VALID_2040, WLAN_RC_PHY_HT_20_SS, 6500, /* 6.5 Mb */ + 6400, 0, 0, 0, 8, 24, 8, 24 }, + { VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 13000, /* 13 Mb */ + 12700, 1, 1, 2, 9, 25, 9, 25 }, + { VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 19500, /* 19.5 Mb */ + 18800, 2, 2, 2, 10, 26, 10, 26 }, + { VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 26000, /* 26 Mb */ + 25000, 3, 3, 4, 11, 27, 11, 27 }, + { VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 39000, /* 39 Mb */ + 36700, 4, 4, 4, 12, 28, 12, 28 }, + { INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 52000, /* 52 Mb */ + 48100, 5, 5, 4, 13, 29, 13, 29 }, + { INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 58500, /* 58.5 Mb */ + 53500, 6, 6, 4, 14, 30, 14, 30 }, + { INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 65000, /* 65 Mb */ + 59000, 7, 7, 4, 15, 31, 15, 32 }, + { INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 13000, /* 13 Mb */ + 12700, 8, 8, 3, 16, 33, 16, 33 }, + { INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 26000, /* 26 Mb */ + 24800, 9, 9, 2, 17, 34, 17, 34 }, + { INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 39000, /* 39 Mb */ + 36600, 10, 10, 2, 18, 35, 18, 35 }, + { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 52000, /* 52 Mb */ + 48100, 11, 11, 4, 19, 36, 19, 36 }, + { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 78000, /* 78 Mb */ + 69500, 12, 12, 4, 20, 37, 20, 37 }, + { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 104000, /* 104 Mb */ + 89500, 13, 13, 4, 21, 38, 21, 38 }, + { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 117000, /* 117 Mb */ + 98900, 14, 14, 4, 22, 39, 22, 39 }, + { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 130000, /* 130 Mb */ + 108300, 15, 15, 4, 23, 40, 23, 41 }, + { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 13500, /* 13.5 Mb */ + 13200, 0, 0, 0, 8, 24, 24, 24 }, + { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 27500, /* 27.0 Mb */ + 25900, 1, 1, 2, 9, 25, 25, 25 }, + { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 40500, /* 40.5 Mb */ + 38600, 2, 2, 2, 10, 26, 26, 26 }, + { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 54000, /* 54 Mb */ + 49800, 3, 3, 4, 11, 27, 27, 27 }, + { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 81500, /* 81 Mb */ + 72200, 4, 4, 4, 12, 28, 28, 28 }, + { INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 108000, /* 108 Mb */ + 92900, 5, 5, 4, 13, 29, 29, 29 }, + { INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 121500, /* 121.5 Mb */ + 102700, 6, 6, 4, 14, 30, 30, 30 }, + { INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 135000, /* 135 Mb */ + 112000, 7, 7, 4, 15, 31, 32, 32 }, + { INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000, /* 150 Mb */ + 122000, 7, 7, 4, 15, 31, 32, 32 }, + { INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 27000, /* 27 Mb */ + 25800, 8, 8, 0, 16, 33, 33, 33 }, + { INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 54000, /* 54 Mb */ + 49800, 9, 9, 2, 17, 34, 34, 34 }, + { INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 81000, /* 81 Mb */ + 71900, 10, 10, 2, 18, 35, 35, 35 }, + { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 108000, /* 108 Mb */ + 92500, 11, 11, 4, 19, 36, 36, 36 }, + { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 162000, /* 162 Mb */ + 130300, 12, 12, 4, 20, 37, 37, 37 }, + { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 216000, /* 216 Mb */ + 162800, 13, 13, 4, 21, 38, 38, 38 }, + { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 243000, /* 243 Mb */ + 178200, 14, 14, 4, 22, 39, 39, 39 }, + { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 270000, /* 270 Mb */ + 192100, 15, 15, 4, 23, 40, 41, 41 }, + { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS_HGI, 300000, /* 300 Mb */ + 207000, 15, 15, 4, 23, 40, 41, 41 }, + }, + 50, /* probe interval */ + WLAN_RC_HT_FLAG, /* Phy rates allowed initially */ +}; + +/* 4ms frame limit not used for NG mode. The values filled + * for HT are the 64K max aggregate limit */ + +static const struct ath_rate_table ar5416_11ng_ratetable = { + 46, + 12, /* MCS start */ + { + { VALID_ALL, VALID_ALL, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */ + 900, 0, 2, 0, 0, 0, 0, 0 }, + { VALID_ALL, VALID_ALL, WLAN_RC_PHY_CCK, 2000, /* 2 Mb */ + 1900, 1, 4, 1, 1, 1, 1, 1 }, + { VALID_ALL, VALID_ALL, WLAN_RC_PHY_CCK, 5500, /* 5.5 Mb */ + 4900, 2, 11, 2, 2, 2, 2, 2 }, + { VALID_ALL, VALID_ALL, WLAN_RC_PHY_CCK, 11000, /* 11 Mb */ + 8100, 3, 22, 3, 3, 3, 3, 3 }, + { INVALID, INVALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */ + 5400, 4, 12, 4, 4, 4, 4, 4 }, + { INVALID, INVALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */ + 7800, 5, 18, 4, 5, 5, 5, 5 }, + { VALID, VALID, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */ + 10100, 6, 24, 6, 6, 6, 6, 6 }, + { VALID, VALID, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */ + 14100, 7, 36, 6, 7, 7, 7, 7 }, + { VALID, VALID, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */ + 17700, 8, 48, 8, 8, 8, 8, 8 }, + { VALID, VALID, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */ + 23700, 9, 72, 8, 9, 9, 9, 9 }, + { VALID, VALID, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */ + 27400, 10, 96, 8, 10, 10, 10, 10 }, + { VALID, VALID, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */ + 30900, 11, 108, 8, 11, 11, 11, 11 }, + { INVALID, INVALID, WLAN_RC_PHY_HT_20_SS, 6500, /* 6.5 Mb */ + 6400, 0, 0, 4, 12, 28, 12, 28 }, + { VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 13000, /* 13 Mb */ + 12700, 1, 1, 6, 13, 29, 13, 29 }, + { VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 19500, /* 19.5 Mb */ + 18800, 2, 2, 6, 14, 30, 14, 30 }, + { VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 26000, /* 26 Mb */ + 25000, 3, 3, 8, 15, 31, 15, 31 }, + { VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 39000, /* 39 Mb */ + 36700, 4, 4, 8, 16, 32, 16, 32 }, + { INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 52000, /* 52 Mb */ + 48100, 5, 5, 8, 17, 33, 17, 33 }, + { INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 58500, /* 58.5 Mb */ + 53500, 6, 6, 8, 18, 34, 18, 34 }, + { INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 65000, /* 65 Mb */ + 59000, 7, 7, 8, 19, 35, 19, 36 }, + { INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 13000, /* 13 Mb */ + 12700, 8, 8, 4, 20, 37, 20, 37 }, + { INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 26000, /* 26 Mb */ + 24800, 9, 9, 6, 21, 38, 21, 38 }, + { INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 39000, /* 39 Mb */ + 36600, 10, 10, 6, 22, 39, 22, 39 }, + { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 52000, /* 52 Mb */ + 48100, 11, 11, 8, 23, 40, 23, 40 }, + { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 78000, /* 78 Mb */ + 69500, 12, 12, 8, 24, 41, 24, 41 }, + { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 104000, /* 104 Mb */ + 89500, 13, 13, 8, 25, 42, 25, 42 }, + { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 117000, /* 117 Mb */ + 98900, 14, 14, 8, 26, 43, 26, 44 }, + { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 130000, /* 130 Mb */ + 108300, 15, 15, 8, 27, 44, 27, 45 }, + { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 13500, /* 13.5 Mb */ + 13200, 0, 0, 8, 12, 28, 28, 28 }, + { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 27500, /* 27.0 Mb */ + 25900, 1, 1, 8, 13, 29, 29, 29 }, + { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 40500, /* 40.5 Mb */ + 38600, 2, 2, 8, 14, 30, 30, 30 }, + { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 54000, /* 54 Mb */ + 49800, 3, 3, 8, 15, 31, 31, 31 }, + { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 81500, /* 81 Mb */ + 72200, 4, 4, 8, 16, 32, 32, 32 }, + { INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 108000, /* 108 Mb */ + 92900, 5, 5, 8, 17, 33, 33, 33 }, + { INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 121500, /* 121.5 Mb */ + 102700, 6, 6, 8, 18, 34, 34, 34 }, + { INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 135000, /* 135 Mb */ + 112000, 7, 7, 8, 19, 35, 36, 36 }, + { INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000, /* 150 Mb */ + 122000, 7, 7, 8, 19, 35, 36, 36 }, + { INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 27000, /* 27 Mb */ + 25800, 8, 8, 8, 20, 37, 37, 37 }, + { INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 54000, /* 54 Mb */ + 49800, 9, 9, 8, 21, 38, 38, 38 }, + { INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 81000, /* 81 Mb */ + 71900, 10, 10, 8, 22, 39, 39, 39 }, + { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 108000, /* 108 Mb */ + 92500, 11, 11, 8, 23, 40, 40, 40 }, + { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 162000, /* 162 Mb */ + 130300, 12, 12, 8, 24, 41, 41, 41 }, + { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 216000, /* 216 Mb */ + 162800, 13, 13, 8, 25, 42, 42, 42 }, + { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 243000, /* 243 Mb */ + 178200, 14, 14, 8, 26, 43, 43, 43 }, + { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 270000, /* 270 Mb */ + 192100, 15, 15, 8, 27, 44, 45, 45 }, + { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS_HGI, 300000, /* 300 Mb */ + 207000, 15, 15, 8, 27, 44, 45, 45 }, + }, + 50, /* probe interval */ + WLAN_RC_HT_FLAG, /* Phy rates allowed initially */ +}; + +static const struct ath_rate_table ar5416_11a_ratetable = { + 8, + 0, + { + { VALID, VALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */ + 5400, 0, 12, 0, 0, 0 }, + { VALID, VALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */ + 7800, 1, 18, 0, 1, 0 }, + { VALID, VALID, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */ + 10000, 2, 24, 2, 2, 0 }, + { VALID, VALID, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */ + 13900, 3, 36, 2, 3, 0 }, + { VALID, VALID, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */ + 17300, 4, 48, 4, 4, 0 }, + { VALID, VALID, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */ + 23000, 5, 72, 4, 5, 0 }, + { VALID, VALID, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */ + 27400, 6, 96, 4, 6, 0 }, + { VALID, VALID, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */ + 29300, 7, 108, 4, 7, 0 }, + }, + 50, /* probe interval */ + 0, /* Phy rates allowed initially */ +}; + +static const struct ath_rate_table ar5416_11g_ratetable = { + 12, + 0, + { + { VALID, VALID, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */ + 900, 0, 2, 0, 0, 0 }, + { VALID, VALID, WLAN_RC_PHY_CCK, 2000, /* 2 Mb */ + 1900, 1, 4, 1, 1, 0 }, + { VALID, VALID, WLAN_RC_PHY_CCK, 5500, /* 5.5 Mb */ + 4900, 2, 11, 2, 2, 0 }, + { VALID, VALID, WLAN_RC_PHY_CCK, 11000, /* 11 Mb */ + 8100, 3, 22, 3, 3, 0 }, + { INVALID, INVALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */ + 5400, 4, 12, 4, 4, 0 }, + { INVALID, INVALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */ + 7800, 5, 18, 4, 5, 0 }, + { VALID, VALID, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */ + 10000, 6, 24, 6, 6, 0 }, + { VALID, VALID, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */ + 13900, 7, 36, 6, 7, 0 }, + { VALID, VALID, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */ + 17300, 8, 48, 8, 8, 0 }, + { VALID, VALID, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */ + 23000, 9, 72, 8, 9, 0 }, + { VALID, VALID, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */ + 27400, 10, 96, 8, 10, 0 }, + { VALID, VALID, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */ + 29300, 11, 108, 8, 11, 0 }, + }, + 50, /* probe interval */ + 0, /* Phy rates allowed initially */ +}; + +static const struct ath_rate_table *hw_rate_table[ATH9K_MODE_MAX] = { + [ATH9K_MODE_11A] = &ar5416_11a_ratetable, + [ATH9K_MODE_11G] = &ar5416_11g_ratetable, + [ATH9K_MODE_11NA_HT20] = &ar5416_11na_ratetable, + [ATH9K_MODE_11NG_HT20] = &ar5416_11ng_ratetable, + [ATH9K_MODE_11NA_HT40PLUS] = &ar5416_11na_ratetable, + [ATH9K_MODE_11NA_HT40MINUS] = &ar5416_11na_ratetable, + [ATH9K_MODE_11NG_HT40PLUS] = &ar5416_11ng_ratetable, + [ATH9K_MODE_11NG_HT40MINUS] = &ar5416_11ng_ratetable, +}; + +static int ath_rc_get_rateindex(const struct ath_rate_table *rate_table, + struct ieee80211_tx_rate *rate); + +static inline int8_t median(int8_t a, int8_t b, int8_t c) +{ + if (a >= b) { + if (b >= c) + return b; + else if (a > c) + return c; + else + return a; + } else { + if (a >= c) + return a; + else if (b >= c) + return c; + else + return b; + } +} + +static void ath_rc_sort_validrates(const struct ath_rate_table *rate_table, + struct ath_rate_priv *ath_rc_priv) +{ + u8 i, j, idx, idx_next; + + for (i = ath_rc_priv->max_valid_rate - 1; i > 0; i--) { + for (j = 0; j <= i-1; j++) { + idx = ath_rc_priv->valid_rate_index[j]; + idx_next = ath_rc_priv->valid_rate_index[j+1]; + + if (rate_table->info[idx].ratekbps > + rate_table->info[idx_next].ratekbps) { + ath_rc_priv->valid_rate_index[j] = idx_next; + ath_rc_priv->valid_rate_index[j+1] = idx; + } + } + } +} + +static void ath_rc_init_valid_txmask(struct ath_rate_priv *ath_rc_priv) +{ + u8 i; + + for (i = 0; i < ath_rc_priv->rate_table_size; i++) + ath_rc_priv->valid_rate_index[i] = 0; +} + +static inline void ath_rc_set_valid_txmask(struct ath_rate_priv *ath_rc_priv, + u8 index, int valid_tx_rate) +{ + BUG_ON(index > ath_rc_priv->rate_table_size); + ath_rc_priv->valid_rate_index[index] = valid_tx_rate ? 1 : 0; +} + +static inline +int ath_rc_get_nextvalid_txrate(const struct ath_rate_table *rate_table, + struct ath_rate_priv *ath_rc_priv, + u8 cur_valid_txrate, + u8 *next_idx) +{ + u8 i; + + for (i = 0; i < ath_rc_priv->max_valid_rate - 1; i++) { + if (ath_rc_priv->valid_rate_index[i] == cur_valid_txrate) { + *next_idx = ath_rc_priv->valid_rate_index[i+1]; + return 1; + } + } + + /* No more valid rates */ + *next_idx = 0; + + return 0; +} + +/* Return true only for single stream */ + +static int ath_rc_valid_phyrate(u32 phy, u32 capflag, int ignore_cw) +{ + if (WLAN_RC_PHY_HT(phy) && !(capflag & WLAN_RC_HT_FLAG)) + return 0; + if (WLAN_RC_PHY_DS(phy) && !(capflag & WLAN_RC_DS_FLAG)) + return 0; + if (WLAN_RC_PHY_SGI(phy) && !(capflag & WLAN_RC_SGI_FLAG)) + return 0; + if (!ignore_cw && WLAN_RC_PHY_HT(phy)) + if (WLAN_RC_PHY_40(phy) && !(capflag & WLAN_RC_40_FLAG)) + return 0; + return 1; +} + +static inline int +ath_rc_get_lower_rix(const struct ath_rate_table *rate_table, + struct ath_rate_priv *ath_rc_priv, + u8 cur_valid_txrate, u8 *next_idx) +{ + int8_t i; + + for (i = 1; i < ath_rc_priv->max_valid_rate ; i++) { + if (ath_rc_priv->valid_rate_index[i] == cur_valid_txrate) { + *next_idx = ath_rc_priv->valid_rate_index[i-1]; + return 1; + } + } + + return 0; +} + +static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv, + const struct ath_rate_table *rate_table, + u32 capflag) +{ + u8 i, hi = 0; + u32 valid; + + for (i = 0; i < rate_table->rate_cnt; i++) { + valid = (!(ath_rc_priv->ht_cap & WLAN_RC_DS_FLAG) ? + rate_table->info[i].valid_single_stream : + rate_table->info[i].valid); + if (valid == 1) { + u32 phy = rate_table->info[i].phy; + u8 valid_rate_count = 0; + + if (!ath_rc_valid_phyrate(phy, capflag, 0)) + continue; + + valid_rate_count = ath_rc_priv->valid_phy_ratecnt[phy]; + + ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = i; + ath_rc_priv->valid_phy_ratecnt[phy] += 1; + ath_rc_set_valid_txmask(ath_rc_priv, i, 1); + hi = A_MAX(hi, i); + } + } + + return hi; +} + +static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv, + const struct ath_rate_table *rate_table, + struct ath_rateset *rateset, + u32 capflag) +{ + u8 i, j, hi = 0; + + /* Use intersection of working rates and valid rates */ + for (i = 0; i < rateset->rs_nrates; i++) { + for (j = 0; j < rate_table->rate_cnt; j++) { + u32 phy = rate_table->info[j].phy; + u32 valid = (!(ath_rc_priv->ht_cap & WLAN_RC_DS_FLAG) ? + rate_table->info[j].valid_single_stream : + rate_table->info[j].valid); + u8 rate = rateset->rs_rates[i]; + u8 dot11rate = rate_table->info[j].dot11rate; + + /* We allow a rate only if its valid and the + * capflag matches one of the validity + * (VALID/VALID_20/VALID_40) flags */ + + if ((rate == dot11rate) && + ((valid & WLAN_RC_CAP_MODE(capflag)) == + WLAN_RC_CAP_MODE(capflag)) && + !WLAN_RC_PHY_HT(phy)) { + u8 valid_rate_count = 0; + + if (!ath_rc_valid_phyrate(phy, capflag, 0)) + continue; + + valid_rate_count = + ath_rc_priv->valid_phy_ratecnt[phy]; + + ath_rc_priv->valid_phy_rateidx[phy] + [valid_rate_count] = j; + ath_rc_priv->valid_phy_ratecnt[phy] += 1; + ath_rc_set_valid_txmask(ath_rc_priv, j, 1); + hi = A_MAX(hi, j); + } + } + } + + return hi; +} + +static u8 ath_rc_setvalid_htrates(struct ath_rate_priv *ath_rc_priv, + const struct ath_rate_table *rate_table, + u8 *mcs_set, u32 capflag) +{ + struct ath_rateset *rateset = (struct ath_rateset *)mcs_set; + + u8 i, j, hi = 0; + + /* Use intersection of working rates and valid rates */ + for (i = 0; i < rateset->rs_nrates; i++) { + for (j = 0; j < rate_table->rate_cnt; j++) { + u32 phy = rate_table->info[j].phy; + u32 valid = (!(ath_rc_priv->ht_cap & WLAN_RC_DS_FLAG) ? + rate_table->info[j].valid_single_stream : + rate_table->info[j].valid); + u8 rate = rateset->rs_rates[i]; + u8 dot11rate = rate_table->info[j].dot11rate; + + if ((rate != dot11rate) || !WLAN_RC_PHY_HT(phy) || + !WLAN_RC_PHY_HT_VALID(valid, capflag)) + continue; + + if (!ath_rc_valid_phyrate(phy, capflag, 0)) + continue; + + ath_rc_priv->valid_phy_rateidx[phy] + [ath_rc_priv->valid_phy_ratecnt[phy]] = j; + ath_rc_priv->valid_phy_ratecnt[phy] += 1; + ath_rc_set_valid_txmask(ath_rc_priv, j, 1); + hi = A_MAX(hi, j); + } + } + + return hi; +} + +/* Finds the highest rate index we can use */ +static u8 ath_rc_get_highest_rix(struct ath_softc *sc, + struct ath_rate_priv *ath_rc_priv, + const struct ath_rate_table *rate_table, + int *is_probing) +{ + u32 best_thruput, this_thruput, now_msec; + u8 rate, next_rate, best_rate, maxindex, minindex; + int8_t index = 0; + + now_msec = jiffies_to_msecs(jiffies); + *is_probing = 0; + best_thruput = 0; + maxindex = ath_rc_priv->max_valid_rate-1; + minindex = 0; + best_rate = minindex; + + /* + * Try the higher rate first. It will reduce memory moving time + * if we have very good channel characteristics. + */ + for (index = maxindex; index >= minindex ; index--) { + u8 per_thres; + + rate = ath_rc_priv->valid_rate_index[index]; + if (rate > ath_rc_priv->rate_max_phy) + continue; + + /* + * For TCP the average collision rate is around 11%, + * so we ignore PERs less than this. This is to + * prevent the rate we are currently using (whose + * PER might be in the 10-15 range because of TCP + * collisions) looking worse than the next lower + * rate whose PER has decayed close to 0. If we + * used to next lower rate, its PER would grow to + * 10-15 and we would be worse off then staying + * at the current rate. + */ + per_thres = ath_rc_priv->per[rate]; + if (per_thres < 12) + per_thres = 12; + + this_thruput = rate_table->info[rate].user_ratekbps * + (100 - per_thres); + + if (best_thruput <= this_thruput) { + best_thruput = this_thruput; + best_rate = rate; + } + } + + rate = best_rate; + + /* + * Must check the actual rate (ratekbps) to account for + * non-monoticity of 11g's rate table + */ + + if (rate >= ath_rc_priv->rate_max_phy) { + rate = ath_rc_priv->rate_max_phy; + + /* Probe the next allowed phy state */ + if (ath_rc_get_nextvalid_txrate(rate_table, + ath_rc_priv, rate, &next_rate) && + (now_msec - ath_rc_priv->probe_time > + rate_table->probe_interval) && + (ath_rc_priv->hw_maxretry_pktcnt >= 1)) { + rate = next_rate; + ath_rc_priv->probe_rate = rate; + ath_rc_priv->probe_time = now_msec; + ath_rc_priv->hw_maxretry_pktcnt = 0; + *is_probing = 1; + } + } + + if (rate > (ath_rc_priv->rate_table_size - 1)) + rate = ath_rc_priv->rate_table_size - 1; + + if (rate_table->info[rate].valid && + (ath_rc_priv->ht_cap & WLAN_RC_DS_FLAG)) + return rate; + + if (rate_table->info[rate].valid_single_stream && + !(ath_rc_priv->ht_cap & WLAN_RC_DS_FLAG)) + return rate; + + /* This should not happen */ + WARN_ON(1); + + rate = ath_rc_priv->valid_rate_index[0]; + + return rate; +} + +static void ath_rc_rate_set_series(const struct ath_rate_table *rate_table, + struct ieee80211_tx_rate *rate, + struct ieee80211_tx_rate_control *txrc, + u8 tries, u8 rix, int rtsctsenable) +{ + rate->count = tries; + rate->idx = rate_table->info[rix].ratecode; + + if (txrc->short_preamble) + rate->flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE; + if (txrc->rts || rtsctsenable) + rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS; + + if (WLAN_RC_PHY_HT(rate_table->info[rix].phy)) { + rate->flags |= IEEE80211_TX_RC_MCS; + if (WLAN_RC_PHY_40(rate_table->info[rix].phy)) + rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; + if (WLAN_RC_PHY_SGI(rate_table->info[rix].phy)) + rate->flags |= IEEE80211_TX_RC_SHORT_GI; + } +} + +static void ath_rc_rate_set_rtscts(struct ath_softc *sc, + const struct ath_rate_table *rate_table, + struct ieee80211_tx_info *tx_info) +{ + struct ieee80211_tx_rate *rates = tx_info->control.rates; + int i = 0, rix = 0, cix, enable_g_protection = 0; + + /* get the cix for the lowest valid rix */ + for (i = 3; i >= 0; i--) { + if (rates[i].count && (rates[i].idx >= 0)) { + rix = ath_rc_get_rateindex(rate_table, &rates[i]); + break; + } + } + cix = rate_table->info[rix].ctrl_rate; + + /* All protection frames are transmited at 2Mb/s for 802.11g, + * otherwise we transmit them at 1Mb/s */ + if (sc->hw->conf.channel->band == IEEE80211_BAND_2GHZ && + !conf_is_ht(&sc->hw->conf)) + enable_g_protection = 1; + + /* + * If 802.11g protection is enabled, determine whether to use RTS/CTS or + * just CTS. Note that this is only done for OFDM/HT unicast frames. + */ + if ((sc->sc_flags & SC_OP_PROTECT_ENABLE) && + (rate_table->info[rix].phy == WLAN_RC_PHY_OFDM || + WLAN_RC_PHY_HT(rate_table->info[rix].phy))) { + rates[0].flags |= IEEE80211_TX_RC_USE_CTS_PROTECT; + cix = rate_table->info[enable_g_protection].ctrl_rate; + } + + tx_info->control.rts_cts_rate_idx = cix; +} + +static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, + struct ieee80211_tx_rate_control *txrc) +{ + struct ath_softc *sc = priv; + struct ath_rate_priv *ath_rc_priv = priv_sta; + const struct ath_rate_table *rate_table; + struct sk_buff *skb = txrc->skb; + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); + struct ieee80211_tx_rate *rates = tx_info->control.rates; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + __le16 fc = hdr->frame_control; + u8 try_per_rate, i = 0, rix; + int is_probe = 0; + + if (rate_control_send_low(sta, priv_sta, txrc)) + return; + + /* + * For Multi Rate Retry we use a different number of + * retry attempt counts. This ends up looking like this: + * + * MRR[0] = 4 + * MRR[1] = 4 + * MRR[2] = 4 + * MRR[3] = 8 + * + */ + try_per_rate = 4; + + rate_table = sc->cur_rate_table; + rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table, &is_probe); + + if (is_probe) { + /* set one try for probe rates. For the + * probes don't enable rts */ + ath_rc_rate_set_series(rate_table, &rates[i++], txrc, + 1, rix, 0); + + /* Get the next tried/allowed rate. No RTS for the next series + * after the probe rate + */ + ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix); + ath_rc_rate_set_series(rate_table, &rates[i++], txrc, + try_per_rate, rix, 0); + + tx_info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; + } else { + /* Set the choosen rate. No RTS for first series entry. */ + ath_rc_rate_set_series(rate_table, &rates[i++], txrc, + try_per_rate, rix, 0); + } + + /* Fill in the other rates for multirate retry */ + for ( ; i < 4; i++) { + /* Use twice the number of tries for the last MRR segment. */ + if (i + 1 == 4) + try_per_rate = 8; + + ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix); + /* All other rates in the series have RTS enabled */ + ath_rc_rate_set_series(rate_table, &rates[i], txrc, + try_per_rate, rix, 1); + } + + /* + * NB:Change rate series to enable aggregation when operating + * at lower MCS rates. When first rate in series is MCS2 + * in HT40 @ 2.4GHz, series should look like: + * + * {MCS2, MCS1, MCS0, MCS0}. + * + * When first rate in series is MCS3 in HT20 @ 2.4GHz, series should + * look like: + * + * {MCS3, MCS2, MCS1, MCS1} + * + * So, set fourth rate in series to be same as third one for + * above conditions. + */ + if ((sc->hw->conf.channel->band == IEEE80211_BAND_2GHZ) && + (conf_is_ht(&sc->hw->conf))) { + u8 dot11rate = rate_table->info[rix].dot11rate; + u8 phy = rate_table->info[rix].phy; + if (i == 4 && + ((dot11rate == 2 && phy == WLAN_RC_PHY_HT_40_SS) || + (dot11rate == 3 && phy == WLAN_RC_PHY_HT_20_SS))) { + rates[3].idx = rates[2].idx; + rates[3].flags = rates[2].flags; + } + } + + /* + * Force hardware to use computed duration for next + * fragment by disabling multi-rate retry, which + * updates duration based on the multi-rate duration table. + * + * FIXME: Fix duration + */ + if (ieee80211_has_morefrags(fc) || + (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) { + rates[1].count = rates[2].count = rates[3].count = 0; + rates[1].idx = rates[2].idx = rates[3].idx = 0; + rates[0].count = ATH_TXMAXTRY; + } + + /* Setup RTS/CTS */ + ath_rc_rate_set_rtscts(sc, rate_table, tx_info); +} + +static bool ath_rc_update_per(struct ath_softc *sc, + const struct ath_rate_table *rate_table, + struct ath_rate_priv *ath_rc_priv, + struct ieee80211_tx_info *tx_info, + int tx_rate, int xretries, int retries, + u32 now_msec) +{ + bool state_change = false; + int count, n_bad_frames; + u8 last_per; + static u32 nretry_to_per_lookup[10] = { + 100 * 0 / 1, + 100 * 1 / 4, + 100 * 1 / 2, + 100 * 3 / 4, + 100 * 4 / 5, + 100 * 5 / 6, + 100 * 6 / 7, + 100 * 7 / 8, + 100 * 8 / 9, + 100 * 9 / 10 + }; + + last_per = ath_rc_priv->per[tx_rate]; + n_bad_frames = tx_info->status.ampdu_len - tx_info->status.ampdu_ack_len; + + if (xretries) { + if (xretries == 1) { + ath_rc_priv->per[tx_rate] += 30; + if (ath_rc_priv->per[tx_rate] > 100) + ath_rc_priv->per[tx_rate] = 100; + } else { + /* xretries == 2 */ + count = ARRAY_SIZE(nretry_to_per_lookup); + if (retries >= count) + retries = count - 1; + + /* new_PER = 7/8*old_PER + 1/8*(currentPER) */ + ath_rc_priv->per[tx_rate] = + (u8)(last_per - (last_per >> 3) + (100 >> 3)); + } + + /* xretries == 1 or 2 */ + + if (ath_rc_priv->probe_rate == tx_rate) + ath_rc_priv->probe_rate = 0; + + } else { /* xretries == 0 */ + count = ARRAY_SIZE(nretry_to_per_lookup); + if (retries >= count) + retries = count - 1; + + if (n_bad_frames) { + /* new_PER = 7/8*old_PER + 1/8*(currentPER) + * Assuming that n_frames is not 0. The current PER + * from the retries is 100 * retries / (retries+1), + * since the first retries attempts failed, and the + * next one worked. For the one that worked, + * n_bad_frames subframes out of n_frames wored, + * so the PER for that part is + * 100 * n_bad_frames / n_frames, and it contributes + * 100 * n_bad_frames / (n_frames * (retries+1)) to + * the above PER. The expression below is a + * simplified version of the sum of these two terms. + */ + if (tx_info->status.ampdu_len > 0) { + int n_frames, n_bad_tries; + u8 cur_per, new_per; + + n_bad_tries = retries * tx_info->status.ampdu_len + + n_bad_frames; + n_frames = tx_info->status.ampdu_len * (retries + 1); + cur_per = (100 * n_bad_tries / n_frames) >> 3; + new_per = (u8)(last_per - (last_per >> 3) + cur_per); + ath_rc_priv->per[tx_rate] = new_per; + } + } else { + ath_rc_priv->per[tx_rate] = + (u8)(last_per - (last_per >> 3) + + (nretry_to_per_lookup[retries] >> 3)); + } + + + /* + * If we got at most one retry then increase the max rate if + * this was a probe. Otherwise, ignore the probe. + */ + if (ath_rc_priv->probe_rate && ath_rc_priv->probe_rate == tx_rate) { + if (retries > 0 || 2 * n_bad_frames > tx_info->status.ampdu_len) { + /* + * Since we probed with just a single attempt, + * any retries means the probe failed. Also, + * if the attempt worked, but more than half + * the subframes were bad then also consider + * the probe a failure. + */ + ath_rc_priv->probe_rate = 0; + } else { + u8 probe_rate = 0; + + ath_rc_priv->rate_max_phy = + ath_rc_priv->probe_rate; + probe_rate = ath_rc_priv->probe_rate; + + if (ath_rc_priv->per[probe_rate] > 30) + ath_rc_priv->per[probe_rate] = 20; + + ath_rc_priv->probe_rate = 0; + + /* + * Since this probe succeeded, we allow the next + * probe twice as soon. This allows the maxRate + * to move up faster if the probes are + * successful. + */ + ath_rc_priv->probe_time = + now_msec - rate_table->probe_interval / 2; + } + } + + if (retries > 0) { + /* + * Don't update anything. We don't know if + * this was because of collisions or poor signal. + */ + ath_rc_priv->hw_maxretry_pktcnt = 0; + } else { + /* + * It worked with no retries. First ignore bogus (small) + * rssi_ack values. + */ + if (tx_rate == ath_rc_priv->rate_max_phy && + ath_rc_priv->hw_maxretry_pktcnt < 255) { + ath_rc_priv->hw_maxretry_pktcnt++; + } + + } + } + + return state_change; +} + +/* Update PER, RSSI and whatever else that the code thinks it is doing. + If you can make sense of all this, you really need to go out more. */ + +static void ath_rc_update_ht(struct ath_softc *sc, + struct ath_rate_priv *ath_rc_priv, + struct ieee80211_tx_info *tx_info, + int tx_rate, int xretries, int retries) +{ + u32 now_msec = jiffies_to_msecs(jiffies); + int rate; + u8 last_per; + bool state_change = false; + const struct ath_rate_table *rate_table = sc->cur_rate_table; + int size = ath_rc_priv->rate_table_size; + + if ((tx_rate < 0) || (tx_rate > rate_table->rate_cnt)) + return; + + last_per = ath_rc_priv->per[tx_rate]; + + /* Update PER first */ + state_change = ath_rc_update_per(sc, rate_table, ath_rc_priv, + tx_info, tx_rate, xretries, + retries, now_msec); + + /* + * If this rate looks bad (high PER) then stop using it for + * a while (except if we are probing). + */ + if (ath_rc_priv->per[tx_rate] >= 55 && tx_rate > 0 && + rate_table->info[tx_rate].ratekbps <= + rate_table->info[ath_rc_priv->rate_max_phy].ratekbps) { + ath_rc_get_lower_rix(rate_table, ath_rc_priv, + (u8)tx_rate, &ath_rc_priv->rate_max_phy); + + /* Don't probe for a little while. */ + ath_rc_priv->probe_time = now_msec; + } + + /* Make sure the rates below this have lower PER */ + /* Monotonicity is kept only for rates below the current rate. */ + if (ath_rc_priv->per[tx_rate] < last_per) { + for (rate = tx_rate - 1; rate >= 0; rate--) { + + if (ath_rc_priv->per[rate] > + ath_rc_priv->per[rate+1]) { + ath_rc_priv->per[rate] = + ath_rc_priv->per[rate+1]; + } + } + } + + /* Maintain monotonicity for rates above the current rate */ + for (rate = tx_rate; rate < size - 1; rate++) { + if (ath_rc_priv->per[rate+1] < + ath_rc_priv->per[rate]) + ath_rc_priv->per[rate+1] = + ath_rc_priv->per[rate]; + } + + /* Every so often, we reduce the thresholds + * and PER (different for CCK and OFDM). */ + if (now_msec - ath_rc_priv->per_down_time >= + rate_table->probe_interval) { + for (rate = 0; rate < size; rate++) { + ath_rc_priv->per[rate] = + 7 * ath_rc_priv->per[rate] / 8; + } + + ath_rc_priv->per_down_time = now_msec; + } + + ath_debug_stat_retries(sc, tx_rate, xretries, retries, + ath_rc_priv->per[tx_rate]); + +} + +static int ath_rc_get_rateindex(const struct ath_rate_table *rate_table, + struct ieee80211_tx_rate *rate) +{ + int rix; + + if (!(rate->flags & IEEE80211_TX_RC_MCS)) + return rate->idx; + + rix = rate->idx + rate_table->mcs_start; + if ((rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) && + (rate->flags & IEEE80211_TX_RC_SHORT_GI)) + rix = rate_table->info[rix].ht_index; + else if (rate->flags & IEEE80211_TX_RC_SHORT_GI) + rix = rate_table->info[rix].sgi_index; + else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + rix = rate_table->info[rix].cw40index; + else + rix = rate_table->info[rix].base_index; + + return rix; +} + +static void ath_rc_tx_status(struct ath_softc *sc, + struct ath_rate_priv *ath_rc_priv, + struct ieee80211_tx_info *tx_info, + int final_ts_idx, int xretries, int long_retry) +{ + const struct ath_rate_table *rate_table; + struct ieee80211_tx_rate *rates = tx_info->status.rates; + u8 flags; + u32 i = 0, rix; + + rate_table = sc->cur_rate_table; + + /* + * If the first rate is not the final index, there + * are intermediate rate failures to be processed. + */ + if (final_ts_idx != 0) { + /* Process intermediate rates that failed.*/ + for (i = 0; i < final_ts_idx ; i++) { + if (rates[i].count != 0 && (rates[i].idx >= 0)) { + flags = rates[i].flags; + + /* If HT40 and we have switched mode from + * 40 to 20 => don't update */ + + if ((flags & IEEE80211_TX_RC_40_MHZ_WIDTH) && + !(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG)) + return; + + rix = ath_rc_get_rateindex(rate_table, &rates[i]); + ath_rc_update_ht(sc, ath_rc_priv, tx_info, + rix, xretries ? 1 : 2, + rates[i].count); + } + } + } else { + /* + * Handle the special case of MIMO PS burst, where the second + * aggregate is sent out with only one rate and one try. + * Treating it as an excessive retry penalizes the rate + * inordinately. + */ + if (rates[0].count == 1 && xretries == 1) + xretries = 2; + } + + flags = rates[i].flags; + + /* If HT40 and we have switched mode from 40 to 20 => don't update */ + if ((flags & IEEE80211_TX_RC_40_MHZ_WIDTH) && + !(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG)) + return; + + rix = ath_rc_get_rateindex(rate_table, &rates[i]); + ath_rc_update_ht(sc, ath_rc_priv, tx_info, rix, xretries, long_retry); +} + +static const +struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc, + enum ieee80211_band band, + bool is_ht, + bool is_cw_40) +{ + int mode = 0; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + + switch(band) { + case IEEE80211_BAND_2GHZ: + mode = ATH9K_MODE_11G; + if (is_ht) + mode = ATH9K_MODE_11NG_HT20; + if (is_cw_40) + mode = ATH9K_MODE_11NG_HT40PLUS; + break; + case IEEE80211_BAND_5GHZ: + mode = ATH9K_MODE_11A; + if (is_ht) + mode = ATH9K_MODE_11NA_HT20; + if (is_cw_40) + mode = ATH9K_MODE_11NA_HT40PLUS; + break; + default: + ath_print(common, ATH_DBG_CONFIG, "Invalid band\n"); + return NULL; + } + + BUG_ON(mode >= ATH9K_MODE_MAX); + + ath_print(common, ATH_DBG_CONFIG, + "Choosing rate table for mode: %d\n", mode); + + sc->cur_rate_mode = mode; + return hw_rate_table[mode]; +} + +static void ath_rc_init(struct ath_softc *sc, + struct ath_rate_priv *ath_rc_priv, + struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, + const struct ath_rate_table *rate_table) +{ + struct ath_rateset *rateset = &ath_rc_priv->neg_rates; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + u8 *ht_mcs = (u8 *)&ath_rc_priv->neg_ht_rates; + u8 i, j, k, hi = 0, hthi = 0; + + /* Initial rate table size. Will change depending + * on the working rate set */ + ath_rc_priv->rate_table_size = RATE_TABLE_SIZE; + + /* Initialize thresholds according to the global rate table */ + for (i = 0 ; i < ath_rc_priv->rate_table_size; i++) { + ath_rc_priv->per[i] = 0; + } + + /* Determine the valid rates */ + ath_rc_init_valid_txmask(ath_rc_priv); + + for (i = 0; i < WLAN_RC_PHY_MAX; i++) { + for (j = 0; j < MAX_TX_RATE_PHY; j++) + ath_rc_priv->valid_phy_rateidx[i][j] = 0; + ath_rc_priv->valid_phy_ratecnt[i] = 0; + } + + if (!rateset->rs_nrates) { + /* No working rate, just initialize valid rates */ + hi = ath_rc_init_validrates(ath_rc_priv, rate_table, + ath_rc_priv->ht_cap); + } else { + /* Use intersection of working rates and valid rates */ + hi = ath_rc_setvalid_rates(ath_rc_priv, rate_table, + rateset, ath_rc_priv->ht_cap); + if (ath_rc_priv->ht_cap & WLAN_RC_HT_FLAG) { + hthi = ath_rc_setvalid_htrates(ath_rc_priv, + rate_table, + ht_mcs, + ath_rc_priv->ht_cap); + } + hi = A_MAX(hi, hthi); + } + + ath_rc_priv->rate_table_size = hi + 1; + ath_rc_priv->rate_max_phy = 0; + BUG_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE); + + for (i = 0, k = 0; i < WLAN_RC_PHY_MAX; i++) { + for (j = 0; j < ath_rc_priv->valid_phy_ratecnt[i]; j++) { + ath_rc_priv->valid_rate_index[k++] = + ath_rc_priv->valid_phy_rateidx[i][j]; + } + + if (!ath_rc_valid_phyrate(i, rate_table->initial_ratemax, 1) + || !ath_rc_priv->valid_phy_ratecnt[i]) + continue; + + ath_rc_priv->rate_max_phy = ath_rc_priv->valid_phy_rateidx[i][j-1]; + } + BUG_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE); + BUG_ON(k > RATE_TABLE_SIZE); + + ath_rc_priv->max_valid_rate = k; + ath_rc_sort_validrates(rate_table, ath_rc_priv); + ath_rc_priv->rate_max_phy = ath_rc_priv->valid_rate_index[k-4]; + sc->cur_rate_table = rate_table; + + ath_print(common, ATH_DBG_CONFIG, + "RC Initialized with capabilities: 0x%x\n", + ath_rc_priv->ht_cap); +} + +static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta, + bool is_cw40, bool is_sgi40) +{ + u8 caps = 0; + + if (sta->ht_cap.ht_supported) { + caps = WLAN_RC_HT_FLAG; + if (sc->sc_ah->caps.tx_chainmask != 1 && + ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_DS, 0, NULL)) { + if (sta->ht_cap.mcs.rx_mask[1]) + caps |= WLAN_RC_DS_FLAG; + } + if (is_cw40) + caps |= WLAN_RC_40_FLAG; + if (is_sgi40) + caps |= WLAN_RC_SGI_FLAG; + } + + return caps; +} + +/***********************************/ +/* mac80211 Rate Control callbacks */ +/***********************************/ + +static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta, + struct sk_buff *skb) +{ + struct ath_softc *sc = priv; + struct ath_rate_priv *ath_rc_priv = priv_sta; + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr; + int final_ts_idx = 0, tx_status = 0, is_underrun = 0; + int long_retry = 0; + __le16 fc; + int i; + + hdr = (struct ieee80211_hdr *)skb->data; + fc = hdr->frame_control; + for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { + struct ieee80211_tx_rate *rate = &tx_info->status.rates[i]; + if (!rate->count) + break; + + final_ts_idx = i; + long_retry = rate->count - 1; + } + + if (!priv_sta || !ieee80211_is_data(fc) || + !(tx_info->pad[0] & ATH_TX_INFO_UPDATE_RC)) + return; + + if (tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED) + return; + + /* + * If an underrun error is seen assume it as an excessive retry only + * if max frame trigger level has been reached (2 KB for singel stream, + * and 4 KB for dual stream). Adjust the long retry as if the frame was + * tried hw->max_rate_tries times to affect how ratectrl updates PER for + * the failed rate. In case of congestion on the bus penalizing these + * type of underruns should help hardware actually transmit new frames + * successfully by eventually preferring slower rates. This itself + * should also alleviate congestion on the bus. + */ + if ((tx_info->pad[0] & ATH_TX_INFO_UNDERRUN) && + (sc->sc_ah->tx_trig_level >= ath_rc_priv->tx_triglevel_max)) { + tx_status = 1; + is_underrun = 1; + } + + if (tx_info->pad[0] & ATH_TX_INFO_XRETRY) + tx_status = 1; + + ath_rc_tx_status(sc, ath_rc_priv, tx_info, final_ts_idx, tx_status, + (is_underrun) ? sc->hw->max_rate_tries : long_retry); + + /* Check if aggregation has to be enabled for this tid */ + if (conf_is_ht(&sc->hw->conf) && + !(skb->protocol == cpu_to_be16(ETH_P_PAE))) { + if (ieee80211_is_data_qos(fc)) { + u8 *qc, tid; + struct ath_node *an; + + qc = ieee80211_get_qos_ctl(hdr); + tid = qc[0] & 0xf; + an = (struct ath_node *)sta->drv_priv; + + if(ath_tx_aggr_check(sc, an, tid)) + ieee80211_start_tx_ba_session(sta, tid); + } + } + + ath_debug_stat_rc(sc, ath_rc_get_rateindex(sc->cur_rate_table, + &tx_info->status.rates[final_ts_idx])); +} + +static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta) +{ + struct ath_softc *sc = priv; + struct ath_rate_priv *ath_rc_priv = priv_sta; + const struct ath_rate_table *rate_table; + bool is_cw40, is_sgi40; + int i, j = 0; + + for (i = 0; i < sband->n_bitrates; i++) { + if (sta->supp_rates[sband->band] & BIT(i)) { + ath_rc_priv->neg_rates.rs_rates[j] + = (sband->bitrates[i].bitrate * 2) / 10; + j++; + } + } + ath_rc_priv->neg_rates.rs_nrates = j; + + if (sta->ht_cap.ht_supported) { + for (i = 0, j = 0; i < 77; i++) { + if (sta->ht_cap.mcs.rx_mask[i/8] & (1<<(i%8))) + ath_rc_priv->neg_ht_rates.rs_rates[j++] = i; + if (j == ATH_RATE_MAX) + break; + } + ath_rc_priv->neg_ht_rates.rs_nrates = j; + } + + is_cw40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40; + is_sgi40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40; + + /* Choose rate table first */ + + if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) || + (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT) || + (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC)) { + rate_table = ath_choose_rate_table(sc, sband->band, + sta->ht_cap.ht_supported, is_cw40); + } else { + rate_table = hw_rate_table[sc->cur_rate_mode]; + } + + ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta, is_cw40, is_sgi40); + ath_rc_init(sc, priv_sta, sband, sta, rate_table); +} + +static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta, + u32 changed, enum nl80211_channel_type oper_chan_type) +{ + struct ath_softc *sc = priv; + struct ath_rate_priv *ath_rc_priv = priv_sta; + const struct ath_rate_table *rate_table = NULL; + bool oper_cw40 = false, oper_sgi40; + bool local_cw40 = (ath_rc_priv->ht_cap & WLAN_RC_40_FLAG) ? + true : false; + bool local_sgi40 = (ath_rc_priv->ht_cap & WLAN_RC_SGI_FLAG) ? + true : false; + + /* FIXME: Handle AP mode later when we support CWM */ + + if (changed & IEEE80211_RC_HT_CHANGED) { + if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION) + return; + + if (oper_chan_type == NL80211_CHAN_HT40MINUS || + oper_chan_type == NL80211_CHAN_HT40PLUS) + oper_cw40 = true; + + oper_sgi40 = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? + true : false; + + if ((local_cw40 != oper_cw40) || (local_sgi40 != oper_sgi40)) { + rate_table = ath_choose_rate_table(sc, sband->band, + sta->ht_cap.ht_supported, + oper_cw40); + ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta, + oper_cw40, oper_sgi40); + ath_rc_init(sc, priv_sta, sband, sta, rate_table); + + ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG, + "Operating HT Bandwidth changed to: %d\n", + sc->hw->conf.channel_type); + sc->cur_rate_table = hw_rate_table[sc->cur_rate_mode]; + } + } +} + +static void *ath_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) +{ + struct ath_wiphy *aphy = hw->priv; + return aphy->sc; +} + +static void ath_rate_free(void *priv) +{ + return; +} + +static void *ath_rate_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp) +{ + struct ath_softc *sc = priv; + struct ath_rate_priv *rate_priv; + + rate_priv = kzalloc(sizeof(struct ath_rate_priv), gfp); + if (!rate_priv) { + ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, + "Unable to allocate private rc structure\n"); + return NULL; + } + + rate_priv->tx_triglevel_max = sc->sc_ah->caps.tx_triglevel_max; + + return rate_priv; +} + +static void ath_rate_free_sta(void *priv, struct ieee80211_sta *sta, + void *priv_sta) +{ + struct ath_rate_priv *rate_priv = priv_sta; + kfree(rate_priv); +} + +static struct rate_control_ops ath_rate_ops = { + .module = NULL, + .name = "ath9k_rate_control", + .tx_status = ath_tx_status, + .get_rate = ath_get_rate, + .rate_init = ath_rate_init, + .rate_update = ath_rate_update, + .alloc = ath_rate_alloc, + .free = ath_rate_free, + .alloc_sta = ath_rate_alloc_sta, + .free_sta = ath_rate_free_sta, +}; + +int ath_rate_control_register(void) +{ + return ieee80211_rate_control_register(&ath_rate_ops); +} + +void ath_rate_control_unregister(void) +{ + ieee80211_rate_control_unregister(&ath_rate_ops); +} diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h new file mode 100644 index 0000000..4f6d6fd --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/rc.h @@ -0,0 +1,188 @@ +/* + * Copyright (c) 2004 Sam Leffler, Errno Consulting + * Copyright (c) 2004 Video54 Technologies, Inc. + * Copyright (c) 2008-2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef RC_H +#define RC_H + +#include "hw.h" + +struct ath_softc; + +#define ATH_RATE_MAX 30 +#define RATE_TABLE_SIZE 64 +#define MAX_TX_RATE_PHY 48 + +/* VALID_ALL - valid for 20/40/Legacy, + * VALID - Legacy only, + * VALID_20 - HT 20 only, + * VALID_40 - HT 40 only */ + +#define INVALID 0x0 +#define VALID 0x1 +#define VALID_20 0x2 +#define VALID_40 0x4 +#define VALID_2040 (VALID_20|VALID_40) +#define VALID_ALL (VALID_2040|VALID) + +enum { + WLAN_RC_PHY_OFDM, + WLAN_RC_PHY_CCK, + WLAN_RC_PHY_HT_20_SS, + WLAN_RC_PHY_HT_20_DS, + WLAN_RC_PHY_HT_40_SS, + WLAN_RC_PHY_HT_40_DS, + WLAN_RC_PHY_HT_20_SS_HGI, + WLAN_RC_PHY_HT_20_DS_HGI, + WLAN_RC_PHY_HT_40_SS_HGI, + WLAN_RC_PHY_HT_40_DS_HGI, + WLAN_RC_PHY_MAX +}; + +#define WLAN_RC_PHY_DS(_phy) ((_phy == WLAN_RC_PHY_HT_20_DS) \ + || (_phy == WLAN_RC_PHY_HT_40_DS) \ + || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \ + || (_phy == WLAN_RC_PHY_HT_40_DS_HGI)) +#define WLAN_RC_PHY_20(_phy) ((_phy == WLAN_RC_PHY_HT_20_SS) \ + || (_phy == WLAN_RC_PHY_HT_20_DS) \ + || (_phy == WLAN_RC_PHY_HT_20_SS_HGI) \ + || (_phy == WLAN_RC_PHY_HT_20_DS_HGI)) +#define WLAN_RC_PHY_40(_phy) ((_phy == WLAN_RC_PHY_HT_40_SS) \ + || (_phy == WLAN_RC_PHY_HT_40_DS) \ + || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \ + || (_phy == WLAN_RC_PHY_HT_40_DS_HGI)) +#define WLAN_RC_PHY_SGI(_phy) ((_phy == WLAN_RC_PHY_HT_20_SS_HGI) \ + || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \ + || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \ + || (_phy == WLAN_RC_PHY_HT_40_DS_HGI)) + +#define WLAN_RC_PHY_HT(_phy) (_phy >= WLAN_RC_PHY_HT_20_SS) + +#define WLAN_RC_CAP_MODE(capflag) (((capflag & WLAN_RC_HT_FLAG) ? \ + (capflag & WLAN_RC_40_FLAG) ? VALID_40 : VALID_20 : VALID)) + +/* Return TRUE if flag supports HT20 && client supports HT20 or + * return TRUE if flag supports HT40 && client supports HT40. + * This is used becos some rates overlap between HT20/HT40. + */ +#define WLAN_RC_PHY_HT_VALID(flag, capflag) \ + (((flag & VALID_20) && !(capflag & WLAN_RC_40_FLAG)) || \ + ((flag & VALID_40) && (capflag & WLAN_RC_40_FLAG))) + +#define WLAN_RC_DS_FLAG (0x01) +#define WLAN_RC_40_FLAG (0x02) +#define WLAN_RC_SGI_FLAG (0x04) +#define WLAN_RC_HT_FLAG (0x08) + +/** + * struct ath_rate_table - Rate Control table + * @valid: valid for use in rate control + * @valid_single_stream: valid for use in rate control for + * single stream operation + * @phy: CCK/OFDM + * @ratekbps: rate in Kbits per second + * @user_ratekbps: user rate in Kbits per second + * @ratecode: rate that goes into HW descriptors + * @short_preamble: Mask for enabling short preamble in ratecode for CCK + * @dot11rate: value that goes into supported + * rates info element of MLME + * @ctrl_rate: Index of next lower basic rate, used for duration computation + * @max_4ms_framelen: maximum frame length(bytes) for tx duration + * @probe_interval: interval for rate control to probe for other rates + * @rssi_reduce_interval: interval for rate control to reduce rssi + * @initial_ratemax: initial ratemax value + */ +struct ath_rate_table { + int rate_cnt; + int mcs_start; + struct { + int valid; + int valid_single_stream; + u8 phy; + u32 ratekbps; + u32 user_ratekbps; + u8 ratecode; + u8 dot11rate; + u8 ctrl_rate; + u8 base_index; + u8 cw40index; + u8 sgi_index; + u8 ht_index; + } info[RATE_TABLE_SIZE]; + u32 probe_interval; + u8 initial_ratemax; +}; + +struct ath_rateset { + u8 rs_nrates; + u8 rs_rates[ATH_RATE_MAX]; +}; + +/** + * struct ath_rate_priv - Rate Control priv data + * @state: RC state + * @probe_rate: rate we are probing at + * @probe_time: msec timestamp for last probe + * @hw_maxretry_pktcnt: num of packets since we got HW max retry error + * @max_valid_rate: maximum number of valid rate + * @per_down_time: msec timestamp for last PER down step + * @valid_phy_ratecnt: valid rate count + * @rate_max_phy: phy index for the max rate + * @per: PER for every valid rate in % + * @probe_interval: interval for ratectrl to probe for other rates + * @prev_data_rix: rate idx of last data frame + * @ht_cap: HT capabilities + * @neg_rates: Negotatied rates + * @neg_ht_rates: Negotiated HT rates + */ +struct ath_rate_priv { + u8 rate_table_size; + u8 probe_rate; + u8 hw_maxretry_pktcnt; + u8 max_valid_rate; + u8 valid_rate_index[RATE_TABLE_SIZE]; + u8 ht_cap; + u8 valid_phy_ratecnt[WLAN_RC_PHY_MAX]; + u8 valid_phy_rateidx[WLAN_RC_PHY_MAX][RATE_TABLE_SIZE]; + u8 rate_max_phy; + u8 per[RATE_TABLE_SIZE]; + u32 probe_time; + u32 per_down_time; + u32 probe_interval; + u32 prev_data_rix; + u32 tx_triglevel_max; + struct ath_rateset neg_rates; + struct ath_rateset neg_ht_rates; + struct ath_rate_softc *asc; +}; + +#define ATH_TX_INFO_FRAME_TYPE_INTERNAL (1 << 0) +#define ATH_TX_INFO_FRAME_TYPE_PAUSE (1 << 1) +#define ATH_TX_INFO_UPDATE_RC (1 << 2) +#define ATH_TX_INFO_XRETRY (1 << 3) +#define ATH_TX_INFO_UNDERRUN (1 << 4) + +enum ath9k_internal_frame_type { + ATH9K_NOT_INTERNAL, + ATH9K_INT_PAUSE, + ATH9K_INT_UNPAUSE +}; + +int ath_rate_control_register(void); +void ath_rate_control_unregister(void); + +#endif /* RC_H */ diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c new file mode 100644 index 0000000..1ca42e5 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -0,0 +1,652 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "ath9k.h" + +static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc, + struct ieee80211_hdr *hdr) +{ + struct ieee80211_hw *hw = sc->pri_wiphy->hw; + int i; + + spin_lock_bh(&sc->wiphy_lock); + for (i = 0; i < sc->num_sec_wiphy; i++) { + struct ath_wiphy *aphy = sc->sec_wiphy[i]; + if (aphy == NULL) + continue; + if (compare_ether_addr(hdr->addr1, aphy->hw->wiphy->perm_addr) + == 0) { + hw = aphy->hw; + break; + } + } + spin_unlock_bh(&sc->wiphy_lock); + return hw; +} + +/* + * Setup and link descriptors. + * + * 11N: we can no longer afford to self link the last descriptor. + * MAC acknowledges BA status as long as it copies frames to host + * buffer (or rx fifo). This can incorrectly acknowledge packets + * to a sender if last desc is self-linked. + */ +static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) +{ + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + struct ath_desc *ds; + struct sk_buff *skb; + + ATH_RXBUF_RESET(bf); + + ds = bf->bf_desc; + ds->ds_link = 0; /* link to null */ + ds->ds_data = bf->bf_buf_addr; + + /* virtual addr of the beginning of the buffer. */ + skb = bf->bf_mpdu; + BUG_ON(skb == NULL); + ds->ds_vdata = skb->data; + + /* + * setup rx descriptors. The rx_bufsize here tells the hardware + * how much data it can DMA to us and that we are prepared + * to process + */ + ath9k_hw_setuprxdesc(ah, ds, + common->rx_bufsize, + 0); + + if (sc->rx.rxlink == NULL) + ath9k_hw_putrxbuf(ah, bf->bf_daddr); + else + *sc->rx.rxlink = bf->bf_daddr; + + sc->rx.rxlink = &ds->ds_link; + ath9k_hw_rxena(ah); +} + +static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) +{ + /* XXX block beacon interrupts */ + ath9k_hw_setantenna(sc->sc_ah, antenna); + sc->rx.defant = antenna; + sc->rx.rxotherant = 0; +} + +static void ath_opmode_init(struct ath_softc *sc) +{ + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + + u32 rfilt, mfilt[2]; + + /* configure rx filter */ + rfilt = ath_calcrxfilter(sc); + ath9k_hw_setrxfilter(ah, rfilt); + + /* configure bssid mask */ + if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) + ath_hw_setbssidmask(common); + + /* configure operational mode */ + ath9k_hw_setopmode(ah); + + /* Handle any link-level address change. */ + ath9k_hw_setmac(ah, common->macaddr); + + /* calculate and install multicast filter */ + mfilt[0] = mfilt[1] = ~0; + ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); +} + +int ath_rx_init(struct ath_softc *sc, int nbufs) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct sk_buff *skb; + struct ath_buf *bf; + int error = 0; + + spin_lock_init(&sc->rx.rxflushlock); + sc->sc_flags &= ~SC_OP_RXFLUSH; + spin_lock_init(&sc->rx.rxbuflock); + + common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN, + min(common->cachelsz, (u16)64)); + + ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", + common->cachelsz, common->rx_bufsize); + + /* Initialize rx descriptors */ + + error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, + "rx", nbufs, 1); + if (error != 0) { + ath_print(common, ATH_DBG_FATAL, + "failed to allocate rx descriptors: %d\n", error); + goto err; + } + + list_for_each_entry(bf, &sc->rx.rxbuf, list) { + skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); + if (skb == NULL) { + error = -ENOMEM; + goto err; + } + + bf->bf_mpdu = skb; + bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, + common->rx_bufsize, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(sc->dev, + bf->bf_buf_addr))) { + dev_kfree_skb_any(skb); + bf->bf_mpdu = NULL; + ath_print(common, ATH_DBG_FATAL, + "dma_mapping_error() on RX init\n"); + error = -ENOMEM; + goto err; + } + bf->bf_dmacontext = bf->bf_buf_addr; + } + sc->rx.rxlink = NULL; + +err: + if (error) + ath_rx_cleanup(sc); + + return error; +} + +void ath_rx_cleanup(struct ath_softc *sc) +{ + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + struct sk_buff *skb; + struct ath_buf *bf; + + list_for_each_entry(bf, &sc->rx.rxbuf, list) { + skb = bf->bf_mpdu; + if (skb) { + dma_unmap_single(sc->dev, bf->bf_buf_addr, + common->rx_bufsize, DMA_FROM_DEVICE); + dev_kfree_skb(skb); + } + } + + if (sc->rx.rxdma.dd_desc_len != 0) + ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf); +} + +/* + * Calculate the receive filter according to the + * operating mode and state: + * + * o always accept unicast, broadcast, and multicast traffic + * o maintain current state of phy error reception (the hal + * may enable phy error frames for noise immunity work) + * o probe request frames are accepted only when operating in + * hostap, adhoc, or monitor modes + * o enable promiscuous mode according to the interface state + * o accept beacons: + * - when operating in adhoc mode so the 802.11 layer creates + * node table entries for peers, + * - when operating in station mode for collecting rssi data when + * the station is otherwise quiet, or + * - when operating as a repeater so we see repeater-sta beacons + * - when scanning + */ + +u32 ath_calcrxfilter(struct ath_softc *sc) +{ +#define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR) + + u32 rfilt; + + rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE) + | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST + | ATH9K_RX_FILTER_MCAST; + + /* If not a STA, enable processing of Probe Requests */ + if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION) + rfilt |= ATH9K_RX_FILTER_PROBEREQ; + + /* + * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station + * mode interface or when in monitor mode. AP mode does not need this + * since it receives all in-BSS frames anyway. + */ + if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) && + (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) || + (sc->sc_ah->opmode == NL80211_IFTYPE_MONITOR)) + rfilt |= ATH9K_RX_FILTER_PROM; + + if (sc->rx.rxfilter & FIF_CONTROL) + rfilt |= ATH9K_RX_FILTER_CONTROL; + + if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) && + !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC)) + rfilt |= ATH9K_RX_FILTER_MYBEACON; + else + rfilt |= ATH9K_RX_FILTER_BEACON; + + if ((AR_SREV_9280_10_OR_LATER(sc->sc_ah) || + AR_SREV_9285_10_OR_LATER(sc->sc_ah)) && + (sc->sc_ah->opmode == NL80211_IFTYPE_AP) && + (sc->rx.rxfilter & FIF_PSPOLL)) + rfilt |= ATH9K_RX_FILTER_PSPOLL; + + if (conf_is_ht(&sc->hw->conf)) + rfilt |= ATH9K_RX_FILTER_COMP_BAR; + + if (sc->sec_wiphy || (sc->rx.rxfilter & FIF_OTHER_BSS)) { + /* TODO: only needed if more than one BSSID is in use in + * station/adhoc mode */ + /* The following may also be needed for other older chips */ + if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160) + rfilt |= ATH9K_RX_FILTER_PROM; + rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; + } + + return rfilt; + +#undef RX_FILTER_PRESERVE +} + +int ath_startrecv(struct ath_softc *sc) +{ + struct ath_hw *ah = sc->sc_ah; + struct ath_buf *bf, *tbf; + + spin_lock_bh(&sc->rx.rxbuflock); + if (list_empty(&sc->rx.rxbuf)) + goto start_recv; + + sc->rx.rxlink = NULL; + list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { + ath_rx_buf_link(sc, bf); + } + + /* We could have deleted elements so the list may be empty now */ + if (list_empty(&sc->rx.rxbuf)) + goto start_recv; + + bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); + ath9k_hw_putrxbuf(ah, bf->bf_daddr); + ath9k_hw_rxena(ah); + +start_recv: + spin_unlock_bh(&sc->rx.rxbuflock); + ath_opmode_init(sc); + ath9k_hw_startpcureceive(ah); + + return 0; +} + +bool ath_stoprecv(struct ath_softc *sc) +{ + struct ath_hw *ah = sc->sc_ah; + bool stopped; + + ath9k_hw_stoppcurecv(ah); + ath9k_hw_setrxfilter(ah, 0); + stopped = ath9k_hw_stopdmarecv(ah); + sc->rx.rxlink = NULL; + + return stopped; +} + +void ath_flushrecv(struct ath_softc *sc) +{ + spin_lock_bh(&sc->rx.rxflushlock); + sc->sc_flags |= SC_OP_RXFLUSH; + ath_rx_tasklet(sc, 1); + sc->sc_flags &= ~SC_OP_RXFLUSH; + spin_unlock_bh(&sc->rx.rxflushlock); +} + +static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) +{ + /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */ + struct ieee80211_mgmt *mgmt; + u8 *pos, *end, id, elen; + struct ieee80211_tim_ie *tim; + + mgmt = (struct ieee80211_mgmt *)skb->data; + pos = mgmt->u.beacon.variable; + end = skb->data + skb->len; + + while (pos + 2 < end) { + id = *pos++; + elen = *pos++; + if (pos + elen > end) + break; + + if (id == WLAN_EID_TIM) { + if (elen < sizeof(*tim)) + break; + tim = (struct ieee80211_tim_ie *) pos; + if (tim->dtim_count != 0) + break; + return tim->bitmap_ctrl & 0x01; + } + + pos += elen; + } + + return false; +} + +static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) +{ + struct ieee80211_mgmt *mgmt; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + + if (skb->len < 24 + 8 + 2 + 2) + return; + + mgmt = (struct ieee80211_mgmt *)skb->data; + if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0) + return; /* not from our current AP */ + + sc->ps_flags &= ~PS_WAIT_FOR_BEACON; + + if (sc->ps_flags & PS_BEACON_SYNC) { + sc->ps_flags &= ~PS_BEACON_SYNC; + ath_print(common, ATH_DBG_PS, + "Reconfigure Beacon timers based on " + "timestamp from the AP\n"); + ath_beacon_config(sc, NULL); + } + + if (ath_beacon_dtim_pending_cab(skb)) { + /* + * Remain awake waiting for buffered broadcast/multicast + * frames. If the last broadcast/multicast frame is not + * received properly, the next beacon frame will work as + * a backup trigger for returning into NETWORK SLEEP state, + * so we are waiting for it as well. + */ + ath_print(common, ATH_DBG_PS, "Received DTIM beacon indicating " + "buffered broadcast/multicast frame(s)\n"); + sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON; + return; + } + + if (sc->ps_flags & PS_WAIT_FOR_CAB) { + /* + * This can happen if a broadcast frame is dropped or the AP + * fails to send a frame indicating that all CAB frames have + * been delivered. + */ + sc->ps_flags &= ~PS_WAIT_FOR_CAB; + ath_print(common, ATH_DBG_PS, + "PS wait for CAB frames timed out\n"); + } +} + +static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + + hdr = (struct ieee80211_hdr *)skb->data; + + /* Process Beacon and CAB receive in PS state */ + if ((sc->ps_flags & PS_WAIT_FOR_BEACON) && + ieee80211_is_beacon(hdr->frame_control)) + ath_rx_ps_beacon(sc, skb); + else if ((sc->ps_flags & PS_WAIT_FOR_CAB) && + (ieee80211_is_data(hdr->frame_control) || + ieee80211_is_action(hdr->frame_control)) && + is_multicast_ether_addr(hdr->addr1) && + !ieee80211_has_moredata(hdr->frame_control)) { + /* + * No more broadcast/multicast frames to be received at this + * point. + */ + sc->ps_flags &= ~PS_WAIT_FOR_CAB; + ath_print(common, ATH_DBG_PS, + "All PS CAB frames received, back to sleep\n"); + } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) && + !is_multicast_ether_addr(hdr->addr1) && + !ieee80211_has_morefrags(hdr->frame_control)) { + sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA; + ath_print(common, ATH_DBG_PS, + "Going back to sleep after having received " + "PS-Poll data (0x%lx)\n", + sc->ps_flags & (PS_WAIT_FOR_BEACON | + PS_WAIT_FOR_CAB | + PS_WAIT_FOR_PSPOLL_DATA | + PS_WAIT_FOR_TX_ACK)); + } +} + +static void ath_rx_send_to_mac80211(struct ieee80211_hw *hw, + struct ath_softc *sc, struct sk_buff *skb, + struct ieee80211_rx_status *rxs) +{ + struct ieee80211_hdr *hdr; + + hdr = (struct ieee80211_hdr *)skb->data; + + /* Send the frame to mac80211 */ + if (is_multicast_ether_addr(hdr->addr1)) { + int i; + /* + * Deliver broadcast/multicast frames to all suitable + * virtual wiphys. + */ + /* TODO: filter based on channel configuration */ + for (i = 0; i < sc->num_sec_wiphy; i++) { + struct ath_wiphy *aphy = sc->sec_wiphy[i]; + struct sk_buff *nskb; + if (aphy == NULL) + continue; + nskb = skb_copy(skb, GFP_ATOMIC); + if (!nskb) + continue; + ieee80211_rx(aphy->hw, nskb); + } + ieee80211_rx(sc->hw, skb); + } else + /* Deliver unicast frames based on receiver address */ + ieee80211_rx(hw, skb); +} + +int ath_rx_tasklet(struct ath_softc *sc, int flush) +{ +#define PA2DESC(_sc, _pa) \ + ((struct ath_desc *)((caddr_t)(_sc)->rx.rxdma.dd_desc + \ + ((_pa) - (_sc)->rx.rxdma.dd_desc_paddr))) + + struct ath_buf *bf; + struct ath_desc *ds; + struct ath_rx_status *rx_stats; + struct sk_buff *skb = NULL, *requeue_skb; + struct ieee80211_rx_status *rxs; + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + /* + * The hw can techncically differ from common->hw when using ath9k + * virtual wiphy so to account for that we iterate over the active + * wiphys and find the appropriate wiphy and therefore hw. + */ + struct ieee80211_hw *hw = NULL; + struct ieee80211_hdr *hdr; + int retval; + bool decrypt_error = false; + + spin_lock_bh(&sc->rx.rxbuflock); + + do { + /* If handling rx interrupt and flush is in progress => exit */ + if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) + break; + + if (list_empty(&sc->rx.rxbuf)) { + sc->rx.rxlink = NULL; + break; + } + + bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); + ds = bf->bf_desc; + + /* + * Must provide the virtual address of the current + * descriptor, the physical address, and the virtual + * address of the next descriptor in the h/w chain. + * This allows the HAL to look ahead to see if the + * hardware is done with a descriptor by checking the + * done bit in the following descriptor and the address + * of the current descriptor the DMA engine is working + * on. All this is necessary because of our use of + * a self-linked list to avoid rx overruns. + */ + retval = ath9k_hw_rxprocdesc(ah, ds, + bf->bf_daddr, + PA2DESC(sc, ds->ds_link), + 0); + if (retval == -EINPROGRESS) { + struct ath_buf *tbf; + struct ath_desc *tds; + + if (list_is_last(&bf->list, &sc->rx.rxbuf)) { + sc->rx.rxlink = NULL; + break; + } + + tbf = list_entry(bf->list.next, struct ath_buf, list); + + /* + * On some hardware the descriptor status words could + * get corrupted, including the done bit. Because of + * this, check if the next descriptor's done bit is + * set or not. + * + * If the next descriptor's done bit is set, the current + * descriptor has been corrupted. Force s/w to discard + * this descriptor and continue... + */ + + tds = tbf->bf_desc; + retval = ath9k_hw_rxprocdesc(ah, tds, tbf->bf_daddr, + PA2DESC(sc, tds->ds_link), 0); + if (retval == -EINPROGRESS) { + break; + } + } + + skb = bf->bf_mpdu; + if (!skb) + continue; + + /* + * Synchronize the DMA transfer with CPU before + * 1. accessing the frame + * 2. requeueing the same buffer to h/w + */ + dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, + common->rx_bufsize, + DMA_FROM_DEVICE); + + hdr = (struct ieee80211_hdr *) skb->data; + rxs = IEEE80211_SKB_RXCB(skb); + + hw = ath_get_virt_hw(sc, hdr); + rx_stats = &ds->ds_rxstat; + + ath_debug_stat_rx(sc, bf); + + /* + * If we're asked to flush receive queue, directly + * chain it back at the queue without processing it. + */ + if (flush) + goto requeue; + + retval = ath9k_cmn_rx_skb_preprocess(common, hw, skb, rx_stats, + rxs, &decrypt_error); + if (retval) + goto requeue; + + /* Ensure we always have an skb to requeue once we are done + * processing the current buffer's skb */ + requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); + + /* If there is no memory we ignore the current RX'd frame, + * tell hardware it can give us a new frame using the old + * skb and put it at the tail of the sc->rx.rxbuf list for + * processing. */ + if (!requeue_skb) + goto requeue; + + /* Unmap the frame */ + dma_unmap_single(sc->dev, bf->bf_buf_addr, + common->rx_bufsize, + DMA_FROM_DEVICE); + + skb_put(skb, rx_stats->rs_datalen); + + ath9k_cmn_rx_skb_postprocess(common, skb, rx_stats, + rxs, decrypt_error); + + /* We will now give hardware our shiny new allocated skb */ + bf->bf_mpdu = requeue_skb; + bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data, + common->rx_bufsize, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(sc->dev, + bf->bf_buf_addr))) { + dev_kfree_skb_any(requeue_skb); + bf->bf_mpdu = NULL; + ath_print(common, ATH_DBG_FATAL, + "dma_mapping_error() on RX\n"); + ath_rx_send_to_mac80211(hw, sc, skb, rxs); + break; + } + bf->bf_dmacontext = bf->bf_buf_addr; + + /* + * change the default rx antenna if rx diversity chooses the + * other antenna 3 times in a row. + */ + if (sc->rx.defant != ds->ds_rxstat.rs_antenna) { + if (++sc->rx.rxotherant >= 3) + ath_setdefantenna(sc, rx_stats->rs_antenna); + } else { + sc->rx.rxotherant = 0; + } + + if (unlikely(sc->ps_flags & (PS_WAIT_FOR_BEACON | + PS_WAIT_FOR_CAB | + PS_WAIT_FOR_PSPOLL_DATA))) + ath_rx_ps(sc, skb); + + ath_rx_send_to_mac80211(hw, sc, skb, rxs); + +requeue: + list_move_tail(&bf->list, &sc->rx.rxbuf); + ath_rx_buf_link(sc, bf); + } while (1); + + spin_unlock_bh(&sc->rx.rxbuflock); + + return 0; +#undef PA2DESC +} diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h new file mode 100644 index 0000000..72cfa8e --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/reg.h @@ -0,0 +1,1719 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef REG_H +#define REG_H + +#include "../reg.h" + +#define AR_CR 0x0008 +#define AR_CR_RXE 0x00000004 +#define AR_CR_RXD 0x00000020 +#define AR_CR_SWI 0x00000040 + +#define AR_RXDP 0x000C + +#define AR_CFG 0x0014 +#define AR_CFG_SWTD 0x00000001 +#define AR_CFG_SWTB 0x00000002 +#define AR_CFG_SWRD 0x00000004 +#define AR_CFG_SWRB 0x00000008 +#define AR_CFG_SWRG 0x00000010 +#define AR_CFG_AP_ADHOC_INDICATION 0x00000020 +#define AR_CFG_PHOK 0x00000100 +#define AR_CFG_CLK_GATE_DIS 0x00000400 +#define AR_CFG_EEBS 0x00000200 +#define AR_CFG_PCI_MASTER_REQ_Q_THRESH 0x00060000 +#define AR_CFG_PCI_MASTER_REQ_Q_THRESH_S 17 + +#define AR_MIRT 0x0020 +#define AR_MIRT_VAL 0x0000ffff +#define AR_MIRT_VAL_S 16 + +#define AR_IER 0x0024 +#define AR_IER_ENABLE 0x00000001 +#define AR_IER_DISABLE 0x00000000 + +#define AR_TIMT 0x0028 +#define AR_TIMT_LAST 0x0000ffff +#define AR_TIMT_LAST_S 0 +#define AR_TIMT_FIRST 0xffff0000 +#define AR_TIMT_FIRST_S 16 + +#define AR_RIMT 0x002C +#define AR_RIMT_LAST 0x0000ffff +#define AR_RIMT_LAST_S 0 +#define AR_RIMT_FIRST 0xffff0000 +#define AR_RIMT_FIRST_S 16 + +#define AR_DMASIZE_4B 0x00000000 +#define AR_DMASIZE_8B 0x00000001 +#define AR_DMASIZE_16B 0x00000002 +#define AR_DMASIZE_32B 0x00000003 +#define AR_DMASIZE_64B 0x00000004 +#define AR_DMASIZE_128B 0x00000005 +#define AR_DMASIZE_256B 0x00000006 +#define AR_DMASIZE_512B 0x00000007 + +#define AR_TXCFG 0x0030 +#define AR_TXCFG_DMASZ_MASK 0x00000007 +#define AR_TXCFG_DMASZ_4B 0 +#define AR_TXCFG_DMASZ_8B 1 +#define AR_TXCFG_DMASZ_16B 2 +#define AR_TXCFG_DMASZ_32B 3 +#define AR_TXCFG_DMASZ_64B 4 +#define AR_TXCFG_DMASZ_128B 5 +#define AR_TXCFG_DMASZ_256B 6 +#define AR_TXCFG_DMASZ_512B 7 +#define AR_FTRIG 0x000003F0 +#define AR_FTRIG_S 4 +#define AR_FTRIG_IMMED 0x00000000 +#define AR_FTRIG_64B 0x00000010 +#define AR_FTRIG_128B 0x00000020 +#define AR_FTRIG_192B 0x00000030 +#define AR_FTRIG_256B 0x00000040 +#define AR_FTRIG_512B 0x00000080 +#define AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY 0x00000800 + +#define AR_RXCFG 0x0034 +#define AR_RXCFG_CHIRP 0x00000008 +#define AR_RXCFG_ZLFDMA 0x00000010 +#define AR_RXCFG_DMASZ_MASK 0x00000007 +#define AR_RXCFG_DMASZ_4B 0 +#define AR_RXCFG_DMASZ_8B 1 +#define AR_RXCFG_DMASZ_16B 2 +#define AR_RXCFG_DMASZ_32B 3 +#define AR_RXCFG_DMASZ_64B 4 +#define AR_RXCFG_DMASZ_128B 5 +#define AR_RXCFG_DMASZ_256B 6 +#define AR_RXCFG_DMASZ_512B 7 + +#define AR_MIBC 0x0040 +#define AR_MIBC_COW 0x00000001 +#define AR_MIBC_FMC 0x00000002 +#define AR_MIBC_CMC 0x00000004 +#define AR_MIBC_MCS 0x00000008 + +#define AR_TOPS 0x0044 +#define AR_TOPS_MASK 0x0000FFFF + +#define AR_RXNPTO 0x0048 +#define AR_RXNPTO_MASK 0x000003FF + +#define AR_TXNPTO 0x004C +#define AR_TXNPTO_MASK 0x000003FF +#define AR_TXNPTO_QCU_MASK 0x000FFC00 + +#define AR_RPGTO 0x0050 +#define AR_RPGTO_MASK 0x000003FF + +#define AR_RPCNT 0x0054 +#define AR_RPCNT_MASK 0x0000001F + +#define AR_MACMISC 0x0058 +#define AR_MACMISC_PCI_EXT_FORCE 0x00000010 +#define AR_MACMISC_DMA_OBS 0x000001E0 +#define AR_MACMISC_DMA_OBS_S 5 +#define AR_MACMISC_DMA_OBS_LINE_0 0 +#define AR_MACMISC_DMA_OBS_LINE_1 1 +#define AR_MACMISC_DMA_OBS_LINE_2 2 +#define AR_MACMISC_DMA_OBS_LINE_3 3 +#define AR_MACMISC_DMA_OBS_LINE_4 4 +#define AR_MACMISC_DMA_OBS_LINE_5 5 +#define AR_MACMISC_DMA_OBS_LINE_6 6 +#define AR_MACMISC_DMA_OBS_LINE_7 7 +#define AR_MACMISC_DMA_OBS_LINE_8 8 +#define AR_MACMISC_MISC_OBS 0x00000E00 +#define AR_MACMISC_MISC_OBS_S 9 +#define AR_MACMISC_MISC_OBS_BUS_LSB 0x00007000 +#define AR_MACMISC_MISC_OBS_BUS_LSB_S 12 +#define AR_MACMISC_MISC_OBS_BUS_MSB 0x00038000 +#define AR_MACMISC_MISC_OBS_BUS_MSB_S 15 +#define AR_MACMISC_MISC_OBS_BUS_1 1 + +#define AR_GTXTO 0x0064 +#define AR_GTXTO_TIMEOUT_COUNTER 0x0000FFFF +#define AR_GTXTO_TIMEOUT_LIMIT 0xFFFF0000 +#define AR_GTXTO_TIMEOUT_LIMIT_S 16 + +#define AR_GTTM 0x0068 +#define AR_GTTM_USEC 0x00000001 +#define AR_GTTM_IGNORE_IDLE 0x00000002 +#define AR_GTTM_RESET_IDLE 0x00000004 +#define AR_GTTM_CST_USEC 0x00000008 + +#define AR_CST 0x006C +#define AR_CST_TIMEOUT_COUNTER 0x0000FFFF +#define AR_CST_TIMEOUT_LIMIT 0xFFFF0000 +#define AR_CST_TIMEOUT_LIMIT_S 16 + +#define AR_ISR 0x0080 +#define AR_ISR_RXOK 0x00000001 +#define AR_ISR_RXDESC 0x00000002 +#define AR_ISR_RXERR 0x00000004 +#define AR_ISR_RXNOPKT 0x00000008 +#define AR_ISR_RXEOL 0x00000010 +#define AR_ISR_RXORN 0x00000020 +#define AR_ISR_TXOK 0x00000040 +#define AR_ISR_TXDESC 0x00000080 +#define AR_ISR_TXERR 0x00000100 +#define AR_ISR_TXNOPKT 0x00000200 +#define AR_ISR_TXEOL 0x00000400 +#define AR_ISR_TXURN 0x00000800 +#define AR_ISR_MIB 0x00001000 +#define AR_ISR_SWI 0x00002000 +#define AR_ISR_RXPHY 0x00004000 +#define AR_ISR_RXKCM 0x00008000 +#define AR_ISR_SWBA 0x00010000 +#define AR_ISR_BRSSI 0x00020000 +#define AR_ISR_BMISS 0x00040000 +#define AR_ISR_BNR 0x00100000 +#define AR_ISR_RXCHIRP 0x00200000 +#define AR_ISR_BCNMISC 0x00800000 +#define AR_ISR_TIM 0x00800000 +#define AR_ISR_QCBROVF 0x02000000 +#define AR_ISR_QCBRURN 0x04000000 +#define AR_ISR_QTRIG 0x08000000 +#define AR_ISR_GENTMR 0x10000000 + +#define AR_ISR_TXMINTR 0x00080000 +#define AR_ISR_RXMINTR 0x01000000 +#define AR_ISR_TXINTM 0x40000000 +#define AR_ISR_RXINTM 0x80000000 + +#define AR_ISR_S0 0x0084 +#define AR_ISR_S0_QCU_TXOK 0x000003FF +#define AR_ISR_S0_QCU_TXOK_S 0 +#define AR_ISR_S0_QCU_TXDESC 0x03FF0000 +#define AR_ISR_S0_QCU_TXDESC_S 16 + +#define AR_ISR_S1 0x0088 +#define AR_ISR_S1_QCU_TXERR 0x000003FF +#define AR_ISR_S1_QCU_TXERR_S 0 +#define AR_ISR_S1_QCU_TXEOL 0x03FF0000 +#define AR_ISR_S1_QCU_TXEOL_S 16 + +#define AR_ISR_S2 0x008c +#define AR_ISR_S2_QCU_TXURN 0x000003FF +#define AR_ISR_S2_CST 0x00400000 +#define AR_ISR_S2_GTT 0x00800000 +#define AR_ISR_S2_TIM 0x01000000 +#define AR_ISR_S2_CABEND 0x02000000 +#define AR_ISR_S2_DTIMSYNC 0x04000000 +#define AR_ISR_S2_BCNTO 0x08000000 +#define AR_ISR_S2_CABTO 0x10000000 +#define AR_ISR_S2_DTIM 0x20000000 +#define AR_ISR_S2_TSFOOR 0x40000000 +#define AR_ISR_S2_TBTT_TIME 0x80000000 + +#define AR_ISR_S3 0x0090 +#define AR_ISR_S3_QCU_QCBROVF 0x000003FF +#define AR_ISR_S3_QCU_QCBRURN 0x03FF0000 + +#define AR_ISR_S4 0x0094 +#define AR_ISR_S4_QCU_QTRIG 0x000003FF +#define AR_ISR_S4_RESV0 0xFFFFFC00 + +#define AR_ISR_S5 0x0098 +#define AR_ISR_S5_TIMER_TRIG 0x000000FF +#define AR_ISR_S5_TIMER_THRESH 0x0007FE00 +#define AR_ISR_S5_TIM_TIMER 0x00000010 +#define AR_ISR_S5_DTIM_TIMER 0x00000020 +#define AR_ISR_S5_S 0x00d8 +#define AR_IMR_S5 0x00b8 +#define AR_IMR_S5_TIM_TIMER 0x00000010 +#define AR_IMR_S5_DTIM_TIMER 0x00000020 +#define AR_ISR_S5_GENTIMER_TRIG 0x0000FF80 +#define AR_ISR_S5_GENTIMER_TRIG_S 0 +#define AR_ISR_S5_GENTIMER_THRESH 0xFF800000 +#define AR_ISR_S5_GENTIMER_THRESH_S 16 +#define AR_ISR_S5_S 0x00d8 +#define AR_IMR_S5_GENTIMER_TRIG 0x0000FF80 +#define AR_IMR_S5_GENTIMER_TRIG_S 0 +#define AR_IMR_S5_GENTIMER_THRESH 0xFF800000 +#define AR_IMR_S5_GENTIMER_THRESH_S 16 + +#define AR_IMR 0x00a0 +#define AR_IMR_RXOK 0x00000001 +#define AR_IMR_RXDESC 0x00000002 +#define AR_IMR_RXERR 0x00000004 +#define AR_IMR_RXNOPKT 0x00000008 +#define AR_IMR_RXEOL 0x00000010 +#define AR_IMR_RXORN 0x00000020 +#define AR_IMR_TXOK 0x00000040 +#define AR_IMR_TXDESC 0x00000080 +#define AR_IMR_TXERR 0x00000100 +#define AR_IMR_TXNOPKT 0x00000200 +#define AR_IMR_TXEOL 0x00000400 +#define AR_IMR_TXURN 0x00000800 +#define AR_IMR_MIB 0x00001000 +#define AR_IMR_SWI 0x00002000 +#define AR_IMR_RXPHY 0x00004000 +#define AR_IMR_RXKCM 0x00008000 +#define AR_IMR_SWBA 0x00010000 +#define AR_IMR_BRSSI 0x00020000 +#define AR_IMR_BMISS 0x00040000 +#define AR_IMR_BNR 0x00100000 +#define AR_IMR_RXCHIRP 0x00200000 +#define AR_IMR_BCNMISC 0x00800000 +#define AR_IMR_TIM 0x00800000 +#define AR_IMR_QCBROVF 0x02000000 +#define AR_IMR_QCBRURN 0x04000000 +#define AR_IMR_QTRIG 0x08000000 +#define AR_IMR_GENTMR 0x10000000 + +#define AR_IMR_TXMINTR 0x00080000 +#define AR_IMR_RXMINTR 0x01000000 +#define AR_IMR_TXINTM 0x40000000 +#define AR_IMR_RXINTM 0x80000000 + +#define AR_IMR_S0 0x00a4 +#define AR_IMR_S0_QCU_TXOK 0x000003FF +#define AR_IMR_S0_QCU_TXOK_S 0 +#define AR_IMR_S0_QCU_TXDESC 0x03FF0000 +#define AR_IMR_S0_QCU_TXDESC_S 16 + +#define AR_IMR_S1 0x00a8 +#define AR_IMR_S1_QCU_TXERR 0x000003FF +#define AR_IMR_S1_QCU_TXERR_S 0 +#define AR_IMR_S1_QCU_TXEOL 0x03FF0000 +#define AR_IMR_S1_QCU_TXEOL_S 16 + +#define AR_IMR_S2 0x00ac +#define AR_IMR_S2_QCU_TXURN 0x000003FF +#define AR_IMR_S2_QCU_TXURN_S 0 +#define AR_IMR_S2_CST 0x00400000 +#define AR_IMR_S2_GTT 0x00800000 +#define AR_IMR_S2_TIM 0x01000000 +#define AR_IMR_S2_CABEND 0x02000000 +#define AR_IMR_S2_DTIMSYNC 0x04000000 +#define AR_IMR_S2_BCNTO 0x08000000 +#define AR_IMR_S2_CABTO 0x10000000 +#define AR_IMR_S2_DTIM 0x20000000 +#define AR_IMR_S2_TSFOOR 0x40000000 + +#define AR_IMR_S3 0x00b0 +#define AR_IMR_S3_QCU_QCBROVF 0x000003FF +#define AR_IMR_S3_QCU_QCBRURN 0x03FF0000 +#define AR_IMR_S3_QCU_QCBRURN_S 16 + +#define AR_IMR_S4 0x00b4 +#define AR_IMR_S4_QCU_QTRIG 0x000003FF +#define AR_IMR_S4_RESV0 0xFFFFFC00 + +#define AR_IMR_S5 0x00b8 +#define AR_IMR_S5_TIMER_TRIG 0x000000FF +#define AR_IMR_S5_TIMER_THRESH 0x0000FF00 + + +#define AR_ISR_RAC 0x00c0 +#define AR_ISR_S0_S 0x00c4 +#define AR_ISR_S0_QCU_TXOK 0x000003FF +#define AR_ISR_S0_QCU_TXOK_S 0 +#define AR_ISR_S0_QCU_TXDESC 0x03FF0000 +#define AR_ISR_S0_QCU_TXDESC_S 16 + +#define AR_ISR_S1_S 0x00c8 +#define AR_ISR_S1_QCU_TXERR 0x000003FF +#define AR_ISR_S1_QCU_TXERR_S 0 +#define AR_ISR_S1_QCU_TXEOL 0x03FF0000 +#define AR_ISR_S1_QCU_TXEOL_S 16 + +#define AR_ISR_S2_S 0x00cc +#define AR_ISR_S3_S 0x00d0 +#define AR_ISR_S4_S 0x00d4 +#define AR_ISR_S5_S 0x00d8 +#define AR_DMADBG_0 0x00e0 +#define AR_DMADBG_1 0x00e4 +#define AR_DMADBG_2 0x00e8 +#define AR_DMADBG_3 0x00ec +#define AR_DMADBG_4 0x00f0 +#define AR_DMADBG_5 0x00f4 +#define AR_DMADBG_6 0x00f8 +#define AR_DMADBG_7 0x00fc + +#define AR_NUM_QCU 10 +#define AR_QCU_0 0x0001 +#define AR_QCU_1 0x0002 +#define AR_QCU_2 0x0004 +#define AR_QCU_3 0x0008 +#define AR_QCU_4 0x0010 +#define AR_QCU_5 0x0020 +#define AR_QCU_6 0x0040 +#define AR_QCU_7 0x0080 +#define AR_QCU_8 0x0100 +#define AR_QCU_9 0x0200 + +#define AR_Q0_TXDP 0x0800 +#define AR_Q1_TXDP 0x0804 +#define AR_Q2_TXDP 0x0808 +#define AR_Q3_TXDP 0x080c +#define AR_Q4_TXDP 0x0810 +#define AR_Q5_TXDP 0x0814 +#define AR_Q6_TXDP 0x0818 +#define AR_Q7_TXDP 0x081c +#define AR_Q8_TXDP 0x0820 +#define AR_Q9_TXDP 0x0824 +#define AR_QTXDP(_i) (AR_Q0_TXDP + ((_i)<<2)) + +#define AR_Q_TXE 0x0840 +#define AR_Q_TXE_M 0x000003FF + +#define AR_Q_TXD 0x0880 +#define AR_Q_TXD_M 0x000003FF + +#define AR_Q0_CBRCFG 0x08c0 +#define AR_Q1_CBRCFG 0x08c4 +#define AR_Q2_CBRCFG 0x08c8 +#define AR_Q3_CBRCFG 0x08cc +#define AR_Q4_CBRCFG 0x08d0 +#define AR_Q5_CBRCFG 0x08d4 +#define AR_Q6_CBRCFG 0x08d8 +#define AR_Q7_CBRCFG 0x08dc +#define AR_Q8_CBRCFG 0x08e0 +#define AR_Q9_CBRCFG 0x08e4 +#define AR_QCBRCFG(_i) (AR_Q0_CBRCFG + ((_i)<<2)) +#define AR_Q_CBRCFG_INTERVAL 0x00FFFFFF +#define AR_Q_CBRCFG_INTERVAL_S 0 +#define AR_Q_CBRCFG_OVF_THRESH 0xFF000000 +#define AR_Q_CBRCFG_OVF_THRESH_S 24 + +#define AR_Q0_RDYTIMECFG 0x0900 +#define AR_Q1_RDYTIMECFG 0x0904 +#define AR_Q2_RDYTIMECFG 0x0908 +#define AR_Q3_RDYTIMECFG 0x090c +#define AR_Q4_RDYTIMECFG 0x0910 +#define AR_Q5_RDYTIMECFG 0x0914 +#define AR_Q6_RDYTIMECFG 0x0918 +#define AR_Q7_RDYTIMECFG 0x091c +#define AR_Q8_RDYTIMECFG 0x0920 +#define AR_Q9_RDYTIMECFG 0x0924 +#define AR_QRDYTIMECFG(_i) (AR_Q0_RDYTIMECFG + ((_i)<<2)) +#define AR_Q_RDYTIMECFG_DURATION 0x00FFFFFF +#define AR_Q_RDYTIMECFG_DURATION_S 0 +#define AR_Q_RDYTIMECFG_EN 0x01000000 + +#define AR_Q_ONESHOTARM_SC 0x0940 +#define AR_Q_ONESHOTARM_SC_M 0x000003FF +#define AR_Q_ONESHOTARM_SC_RESV0 0xFFFFFC00 + +#define AR_Q_ONESHOTARM_CC 0x0980 +#define AR_Q_ONESHOTARM_CC_M 0x000003FF +#define AR_Q_ONESHOTARM_CC_RESV0 0xFFFFFC00 + +#define AR_Q0_MISC 0x09c0 +#define AR_Q1_MISC 0x09c4 +#define AR_Q2_MISC 0x09c8 +#define AR_Q3_MISC 0x09cc +#define AR_Q4_MISC 0x09d0 +#define AR_Q5_MISC 0x09d4 +#define AR_Q6_MISC 0x09d8 +#define AR_Q7_MISC 0x09dc +#define AR_Q8_MISC 0x09e0 +#define AR_Q9_MISC 0x09e4 +#define AR_QMISC(_i) (AR_Q0_MISC + ((_i)<<2)) +#define AR_Q_MISC_FSP 0x0000000F +#define AR_Q_MISC_FSP_ASAP 0 +#define AR_Q_MISC_FSP_CBR 1 +#define AR_Q_MISC_FSP_DBA_GATED 2 +#define AR_Q_MISC_FSP_TIM_GATED 3 +#define AR_Q_MISC_FSP_BEACON_SENT_GATED 4 +#define AR_Q_MISC_FSP_BEACON_RCVD_GATED 5 +#define AR_Q_MISC_ONE_SHOT_EN 0x00000010 +#define AR_Q_MISC_CBR_INCR_DIS1 0x00000020 +#define AR_Q_MISC_CBR_INCR_DIS0 0x00000040 +#define AR_Q_MISC_BEACON_USE 0x00000080 +#define AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN 0x00000100 +#define AR_Q_MISC_RDYTIME_EXP_POLICY 0x00000200 +#define AR_Q_MISC_RESET_CBR_EXP_CTR 0x00000400 +#define AR_Q_MISC_DCU_EARLY_TERM_REQ 0x00000800 +#define AR_Q_MISC_RESV0 0xFFFFF000 + +#define AR_Q0_STS 0x0a00 +#define AR_Q1_STS 0x0a04 +#define AR_Q2_STS 0x0a08 +#define AR_Q3_STS 0x0a0c +#define AR_Q4_STS 0x0a10 +#define AR_Q5_STS 0x0a14 +#define AR_Q6_STS 0x0a18 +#define AR_Q7_STS 0x0a1c +#define AR_Q8_STS 0x0a20 +#define AR_Q9_STS 0x0a24 +#define AR_QSTS(_i) (AR_Q0_STS + ((_i)<<2)) +#define AR_Q_STS_PEND_FR_CNT 0x00000003 +#define AR_Q_STS_RESV0 0x000000FC +#define AR_Q_STS_CBR_EXP_CNT 0x0000FF00 +#define AR_Q_STS_RESV1 0xFFFF0000 + +#define AR_Q_RDYTIMESHDN 0x0a40 +#define AR_Q_RDYTIMESHDN_M 0x000003FF + + +#define AR_NUM_DCU 10 +#define AR_DCU_0 0x0001 +#define AR_DCU_1 0x0002 +#define AR_DCU_2 0x0004 +#define AR_DCU_3 0x0008 +#define AR_DCU_4 0x0010 +#define AR_DCU_5 0x0020 +#define AR_DCU_6 0x0040 +#define AR_DCU_7 0x0080 +#define AR_DCU_8 0x0100 +#define AR_DCU_9 0x0200 + +#define AR_D0_QCUMASK 0x1000 +#define AR_D1_QCUMASK 0x1004 +#define AR_D2_QCUMASK 0x1008 +#define AR_D3_QCUMASK 0x100c +#define AR_D4_QCUMASK 0x1010 +#define AR_D5_QCUMASK 0x1014 +#define AR_D6_QCUMASK 0x1018 +#define AR_D7_QCUMASK 0x101c +#define AR_D8_QCUMASK 0x1020 +#define AR_D9_QCUMASK 0x1024 +#define AR_DQCUMASK(_i) (AR_D0_QCUMASK + ((_i)<<2)) +#define AR_D_QCUMASK 0x000003FF +#define AR_D_QCUMASK_RESV0 0xFFFFFC00 + +#define AR_D_TXBLK_CMD 0x1038 +#define AR_D_TXBLK_DATA(i) (AR_D_TXBLK_CMD+(i)) + +#define AR_D0_LCL_IFS 0x1040 +#define AR_D1_LCL_IFS 0x1044 +#define AR_D2_LCL_IFS 0x1048 +#define AR_D3_LCL_IFS 0x104c +#define AR_D4_LCL_IFS 0x1050 +#define AR_D5_LCL_IFS 0x1054 +#define AR_D6_LCL_IFS 0x1058 +#define AR_D7_LCL_IFS 0x105c +#define AR_D8_LCL_IFS 0x1060 +#define AR_D9_LCL_IFS 0x1064 +#define AR_DLCL_IFS(_i) (AR_D0_LCL_IFS + ((_i)<<2)) +#define AR_D_LCL_IFS_CWMIN 0x000003FF +#define AR_D_LCL_IFS_CWMIN_S 0 +#define AR_D_LCL_IFS_CWMAX 0x000FFC00 +#define AR_D_LCL_IFS_CWMAX_S 10 +#define AR_D_LCL_IFS_AIFS 0x0FF00000 +#define AR_D_LCL_IFS_AIFS_S 20 + +#define AR_D_LCL_IFS_RESV0 0xF0000000 + +#define AR_D0_RETRY_LIMIT 0x1080 +#define AR_D1_RETRY_LIMIT 0x1084 +#define AR_D2_RETRY_LIMIT 0x1088 +#define AR_D3_RETRY_LIMIT 0x108c +#define AR_D4_RETRY_LIMIT 0x1090 +#define AR_D5_RETRY_LIMIT 0x1094 +#define AR_D6_RETRY_LIMIT 0x1098 +#define AR_D7_RETRY_LIMIT 0x109c +#define AR_D8_RETRY_LIMIT 0x10a0 +#define AR_D9_RETRY_LIMIT 0x10a4 +#define AR_DRETRY_LIMIT(_i) (AR_D0_RETRY_LIMIT + ((_i)<<2)) +#define AR_D_RETRY_LIMIT_FR_SH 0x0000000F +#define AR_D_RETRY_LIMIT_FR_SH_S 0 +#define AR_D_RETRY_LIMIT_STA_SH 0x00003F00 +#define AR_D_RETRY_LIMIT_STA_SH_S 8 +#define AR_D_RETRY_LIMIT_STA_LG 0x000FC000 +#define AR_D_RETRY_LIMIT_STA_LG_S 14 +#define AR_D_RETRY_LIMIT_RESV0 0xFFF00000 + +#define AR_D0_CHNTIME 0x10c0 +#define AR_D1_CHNTIME 0x10c4 +#define AR_D2_CHNTIME 0x10c8 +#define AR_D3_CHNTIME 0x10cc +#define AR_D4_CHNTIME 0x10d0 +#define AR_D5_CHNTIME 0x10d4 +#define AR_D6_CHNTIME 0x10d8 +#define AR_D7_CHNTIME 0x10dc +#define AR_D8_CHNTIME 0x10e0 +#define AR_D9_CHNTIME 0x10e4 +#define AR_DCHNTIME(_i) (AR_D0_CHNTIME + ((_i)<<2)) +#define AR_D_CHNTIME_DUR 0x000FFFFF +#define AR_D_CHNTIME_DUR_S 0 +#define AR_D_CHNTIME_EN 0x00100000 +#define AR_D_CHNTIME_RESV0 0xFFE00000 + +#define AR_D0_MISC 0x1100 +#define AR_D1_MISC 0x1104 +#define AR_D2_MISC 0x1108 +#define AR_D3_MISC 0x110c +#define AR_D4_MISC 0x1110 +#define AR_D5_MISC 0x1114 +#define AR_D6_MISC 0x1118 +#define AR_D7_MISC 0x111c +#define AR_D8_MISC 0x1120 +#define AR_D9_MISC 0x1124 +#define AR_DMISC(_i) (AR_D0_MISC + ((_i)<<2)) +#define AR_D_MISC_BKOFF_THRESH 0x0000003F +#define AR_D_MISC_RETRY_CNT_RESET_EN 0x00000040 +#define AR_D_MISC_CW_RESET_EN 0x00000080 +#define AR_D_MISC_FRAG_WAIT_EN 0x00000100 +#define AR_D_MISC_FRAG_BKOFF_EN 0x00000200 +#define AR_D_MISC_CW_BKOFF_EN 0x00001000 +#define AR_D_MISC_VIR_COL_HANDLING 0x0000C000 +#define AR_D_MISC_VIR_COL_HANDLING_S 14 +#define AR_D_MISC_VIR_COL_HANDLING_DEFAULT 0 +#define AR_D_MISC_VIR_COL_HANDLING_IGNORE 1 +#define AR_D_MISC_BEACON_USE 0x00010000 +#define AR_D_MISC_ARB_LOCKOUT_CNTRL 0x00060000 +#define AR_D_MISC_ARB_LOCKOUT_CNTRL_S 17 +#define AR_D_MISC_ARB_LOCKOUT_CNTRL_NONE 0 +#define AR_D_MISC_ARB_LOCKOUT_CNTRL_INTRA_FR 1 +#define AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL 2 +#define AR_D_MISC_ARB_LOCKOUT_IGNORE 0x00080000 +#define AR_D_MISC_SEQ_NUM_INCR_DIS 0x00100000 +#define AR_D_MISC_POST_FR_BKOFF_DIS 0x00200000 +#define AR_D_MISC_VIT_COL_CW_BKOFF_EN 0x00400000 +#define AR_D_MISC_BLOWN_IFS_RETRY_EN 0x00800000 +#define AR_D_MISC_RESV0 0xFF000000 + +#define AR_D_SEQNUM 0x1140 + +#define AR_D_GBL_IFS_SIFS 0x1030 +#define AR_D_GBL_IFS_SIFS_M 0x0000FFFF +#define AR_D_GBL_IFS_SIFS_ASYNC_FIFO_DUR 0x000003AB +#define AR_D_GBL_IFS_SIFS_RESV0 0xFFFFFFFF + +#define AR_D_TXBLK_BASE 0x1038 +#define AR_D_TXBLK_WRITE_BITMASK 0x0000FFFF +#define AR_D_TXBLK_WRITE_BITMASK_S 0 +#define AR_D_TXBLK_WRITE_SLICE 0x000F0000 +#define AR_D_TXBLK_WRITE_SLICE_S 16 +#define AR_D_TXBLK_WRITE_DCU 0x00F00000 +#define AR_D_TXBLK_WRITE_DCU_S 20 +#define AR_D_TXBLK_WRITE_COMMAND 0x0F000000 +#define AR_D_TXBLK_WRITE_COMMAND_S 24 + +#define AR_D_GBL_IFS_SLOT 0x1070 +#define AR_D_GBL_IFS_SLOT_M 0x0000FFFF +#define AR_D_GBL_IFS_SLOT_RESV0 0xFFFF0000 +#define AR_D_GBL_IFS_SLOT_ASYNC_FIFO_DUR 0x00000420 + +#define AR_D_GBL_IFS_EIFS 0x10b0 +#define AR_D_GBL_IFS_EIFS_M 0x0000FFFF +#define AR_D_GBL_IFS_EIFS_RESV0 0xFFFF0000 +#define AR_D_GBL_IFS_EIFS_ASYNC_FIFO_DUR 0x0000A5EB + +#define AR_D_GBL_IFS_MISC 0x10f0 +#define AR_D_GBL_IFS_MISC_LFSR_SLICE_SEL 0x00000007 +#define AR_D_GBL_IFS_MISC_TURBO_MODE 0x00000008 +#define AR_D_GBL_IFS_MISC_USEC_DURATION 0x000FFC00 +#define AR_D_GBL_IFS_MISC_DCU_ARBITER_DLY 0x00300000 +#define AR_D_GBL_IFS_MISC_RANDOM_LFSR_SLICE_DIS 0x01000000 +#define AR_D_GBL_IFS_MISC_SLOT_XMIT_WIND_LEN 0x06000000 +#define AR_D_GBL_IFS_MISC_FORCE_XMIT_SLOT_BOUND 0x08000000 +#define AR_D_GBL_IFS_MISC_IGNORE_BACKOFF 0x10000000 + +#define AR_D_FPCTL 0x1230 +#define AR_D_FPCTL_DCU 0x0000000F +#define AR_D_FPCTL_DCU_S 0 +#define AR_D_FPCTL_PREFETCH_EN 0x00000010 +#define AR_D_FPCTL_BURST_PREFETCH 0x00007FE0 +#define AR_D_FPCTL_BURST_PREFETCH_S 5 + +#define AR_D_TXPSE 0x1270 +#define AR_D_TXPSE_CTRL 0x000003FF +#define AR_D_TXPSE_RESV0 0x0000FC00 +#define AR_D_TXPSE_STATUS 0x00010000 +#define AR_D_TXPSE_RESV1 0xFFFE0000 + +#define AR_D_TXSLOTMASK 0x12f0 +#define AR_D_TXSLOTMASK_NUM 0x0000000F + +#define AR_CFG_LED 0x1f04 +#define AR_CFG_SCLK_RATE_IND 0x00000003 +#define AR_CFG_SCLK_RATE_IND_S 0 +#define AR_CFG_SCLK_32MHZ 0x00000000 +#define AR_CFG_SCLK_4MHZ 0x00000001 +#define AR_CFG_SCLK_1MHZ 0x00000002 +#define AR_CFG_SCLK_32KHZ 0x00000003 +#define AR_CFG_LED_BLINK_SLOW 0x00000008 +#define AR_CFG_LED_BLINK_THRESH_SEL 0x00000070 +#define AR_CFG_LED_MODE_SEL 0x00000380 +#define AR_CFG_LED_MODE_SEL_S 7 +#define AR_CFG_LED_POWER 0x00000280 +#define AR_CFG_LED_POWER_S 7 +#define AR_CFG_LED_NETWORK 0x00000300 +#define AR_CFG_LED_NETWORK_S 7 +#define AR_CFG_LED_MODE_PROP 0x0 +#define AR_CFG_LED_MODE_RPROP 0x1 +#define AR_CFG_LED_MODE_SPLIT 0x2 +#define AR_CFG_LED_MODE_RAND 0x3 +#define AR_CFG_LED_MODE_POWER_OFF 0x4 +#define AR_CFG_LED_MODE_POWER_ON 0x5 +#define AR_CFG_LED_MODE_NETWORK_OFF 0x4 +#define AR_CFG_LED_MODE_NETWORK_ON 0x6 +#define AR_CFG_LED_ASSOC_CTL 0x00000c00 +#define AR_CFG_LED_ASSOC_CTL_S 10 +#define AR_CFG_LED_ASSOC_NONE 0x0 +#define AR_CFG_LED_ASSOC_ACTIVE 0x1 +#define AR_CFG_LED_ASSOC_PENDING 0x2 + +#define AR_CFG_LED_BLINK_SLOW 0x00000008 +#define AR_CFG_LED_BLINK_SLOW_S 3 + +#define AR_CFG_LED_BLINK_THRESH_SEL 0x00000070 +#define AR_CFG_LED_BLINK_THRESH_SEL_S 4 + +#define AR_MAC_SLEEP 0x1f00 +#define AR_MAC_SLEEP_MAC_AWAKE 0x00000000 +#define AR_MAC_SLEEP_MAC_ASLEEP 0x00000001 + +#define AR_RC 0x4000 +#define AR_RC_AHB 0x00000001 +#define AR_RC_APB 0x00000002 +#define AR_RC_HOSTIF 0x00000100 + +#define AR_WA 0x4004 +#define AR_WA_D3_L1_DISABLE (1 << 14) +#define AR9285_WA_DEFAULT 0x004a05cb +#define AR9280_WA_DEFAULT 0x0040073b +#define AR_WA_DEFAULT 0x0000073f + + +#define AR_PM_STATE 0x4008 +#define AR_PM_STATE_PME_D3COLD_VAUX 0x00100000 + +#define AR_HOST_TIMEOUT 0x4018 +#define AR_HOST_TIMEOUT_APB_CNTR 0x0000FFFF +#define AR_HOST_TIMEOUT_APB_CNTR_S 0 +#define AR_HOST_TIMEOUT_LCL_CNTR 0xFFFF0000 +#define AR_HOST_TIMEOUT_LCL_CNTR_S 16 + +#define AR_EEPROM 0x401c +#define AR_EEPROM_ABSENT 0x00000100 +#define AR_EEPROM_CORRUPT 0x00000200 +#define AR_EEPROM_PROT_MASK 0x03FFFC00 +#define AR_EEPROM_PROT_MASK_S 10 + +#define EEPROM_PROTECT_RP_0_31 0x0001 +#define EEPROM_PROTECT_WP_0_31 0x0002 +#define EEPROM_PROTECT_RP_32_63 0x0004 +#define EEPROM_PROTECT_WP_32_63 0x0008 +#define EEPROM_PROTECT_RP_64_127 0x0010 +#define EEPROM_PROTECT_WP_64_127 0x0020 +#define EEPROM_PROTECT_RP_128_191 0x0040 +#define EEPROM_PROTECT_WP_128_191 0x0080 +#define EEPROM_PROTECT_RP_192_255 0x0100 +#define EEPROM_PROTECT_WP_192_255 0x0200 +#define EEPROM_PROTECT_RP_256_511 0x0400 +#define EEPROM_PROTECT_WP_256_511 0x0800 +#define EEPROM_PROTECT_RP_512_1023 0x1000 +#define EEPROM_PROTECT_WP_512_1023 0x2000 +#define EEPROM_PROTECT_RP_1024_2047 0x4000 +#define EEPROM_PROTECT_WP_1024_2047 0x8000 + +#define AR_SREV \ + ((AR_SREV_9100(ah)) ? 0x0600 : 0x4020) + +#define AR_SREV_ID \ + ((AR_SREV_9100(ah)) ? 0x00000FFF : 0x000000FF) +#define AR_SREV_VERSION 0x000000F0 +#define AR_SREV_VERSION_S 4 +#define AR_SREV_REVISION 0x00000007 + +#define AR_SREV_ID2 0xFFFFFFFF +#define AR_SREV_VERSION2 0xFFFC0000 +#define AR_SREV_VERSION2_S 18 +#define AR_SREV_TYPE2 0x0003F000 +#define AR_SREV_TYPE2_S 12 +#define AR_SREV_TYPE2_CHAIN 0x00001000 +#define AR_SREV_TYPE2_HOST_MODE 0x00002000 +#define AR_SREV_REVISION2 0x00000F00 +#define AR_SREV_REVISION2_S 8 + +#define AR_SREV_VERSION_5416_PCI 0xD +#define AR_SREV_VERSION_5416_PCIE 0xC +#define AR_SREV_REVISION_5416_10 0 +#define AR_SREV_REVISION_5416_20 1 +#define AR_SREV_REVISION_5416_22 2 +#define AR_SREV_VERSION_9100 0x14 +#define AR_SREV_VERSION_9160 0x40 +#define AR_SREV_REVISION_9160_10 0 +#define AR_SREV_REVISION_9160_11 1 +#define AR_SREV_VERSION_9280 0x80 +#define AR_SREV_REVISION_9280_10 0 +#define AR_SREV_REVISION_9280_20 1 +#define AR_SREV_REVISION_9280_21 2 +#define AR_SREV_VERSION_9285 0xC0 +#define AR_SREV_REVISION_9285_10 0 +#define AR_SREV_REVISION_9285_11 1 +#define AR_SREV_REVISION_9285_12 2 +#define AR_SREV_VERSION_9287 0x180 +#define AR_SREV_REVISION_9287_10 0 +#define AR_SREV_REVISION_9287_11 1 +#define AR_SREV_REVISION_9287_12 2 +#define AR_SREV_VERSION_9271 0x140 +#define AR_SREV_REVISION_9271_10 0 +#define AR_SREV_REVISION_9271_11 1 + +#define AR_SREV_5416(_ah) \ + (((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \ + ((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCIE)) +#define AR_SREV_5416_20_OR_LATER(_ah) \ + (((AR_SREV_5416(_ah)) && \ + ((_ah)->hw_version.macRev >= AR_SREV_REVISION_5416_20)) || \ + ((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9100)) +#define AR_SREV_5416_22_OR_LATER(_ah) \ + (((AR_SREV_5416(_ah)) && \ + ((_ah)->hw_version.macRev >= AR_SREV_REVISION_5416_22)) || \ + ((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9100)) + +#define AR_SREV_9100(ah) \ + ((ah->hw_version.macVersion) == AR_SREV_VERSION_9100) +#define AR_SREV_9100_OR_LATER(_ah) \ + (((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9100)) + +#define AR_SREV_9160(_ah) \ + (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9160)) +#define AR_SREV_9160_10_OR_LATER(_ah) \ + (((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9160)) +#define AR_SREV_9160_11(_ah) \ + (AR_SREV_9160(_ah) && \ + ((_ah)->hw_version.macRev == AR_SREV_REVISION_9160_11)) +#define AR_SREV_9280(_ah) \ + (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9280)) +#define AR_SREV_9280_10_OR_LATER(_ah) \ + (((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9280)) +#define AR_SREV_9280_20(_ah) \ + (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9280) && \ + ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9280_20)) +#define AR_SREV_9280_20_OR_LATER(_ah) \ + (((_ah)->hw_version.macVersion > AR_SREV_VERSION_9280) || \ + (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9280) && \ + ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9280_20))) + +#define AR_SREV_9285(_ah) \ + (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9285)) +#define AR_SREV_9285_10_OR_LATER(_ah) \ + (((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9285)) +#define AR_SREV_9285_11(_ah) \ + (AR_SREV_9285(ah) && \ + ((_ah)->hw_version.macRev == AR_SREV_REVISION_9285_11)) +#define AR_SREV_9285_11_OR_LATER(_ah) \ + (((_ah)->hw_version.macVersion > AR_SREV_VERSION_9285) || \ + (AR_SREV_9285(ah) && ((_ah)->hw_version.macRev >= \ + AR_SREV_REVISION_9285_11))) +#define AR_SREV_9285_12(_ah) \ + (AR_SREV_9285(ah) && \ + ((_ah)->hw_version.macRev == AR_SREV_REVISION_9285_12)) +#define AR_SREV_9285_12_OR_LATER(_ah) \ + (((_ah)->hw_version.macVersion > AR_SREV_VERSION_9285) || \ + (AR_SREV_9285(ah) && ((_ah)->hw_version.macRev >= \ + AR_SREV_REVISION_9285_12))) + +#define AR_SREV_9287(_ah) \ + (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9287)) +#define AR_SREV_9287_10_OR_LATER(_ah) \ + (((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9287)) +#define AR_SREV_9287_10(_ah) \ + (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9287) && \ + ((_ah)->hw_version.macRev == AR_SREV_REVISION_9287_10)) +#define AR_SREV_9287_11(_ah) \ + (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9287) && \ + ((_ah)->hw_version.macRev == AR_SREV_REVISION_9287_11)) +#define AR_SREV_9287_11_OR_LATER(_ah) \ + (((_ah)->hw_version.macVersion > AR_SREV_VERSION_9287) || \ + (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9287) && \ + ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9287_11))) +#define AR_SREV_9287_12(_ah) \ + (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9287) && \ + ((_ah)->hw_version.macRev == AR_SREV_REVISION_9287_12)) +#define AR_SREV_9287_12_OR_LATER(_ah) \ + (((_ah)->hw_version.macVersion > AR_SREV_VERSION_9287) || \ + (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9287) && \ + ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9287_12))) +#define AR_SREV_9271(_ah) \ + (((_ah))->hw_version.macVersion == AR_SREV_VERSION_9271) +#define AR_SREV_9271_10(_ah) \ + (AR_SREV_9271(_ah) && \ + ((_ah)->hw_version.macRev == AR_SREV_REVISION_9271_10)) +#define AR_SREV_9271_11(_ah) \ + (AR_SREV_9271(_ah) && \ + ((_ah)->hw_version.macRev == AR_SREV_REVISION_9271_11)) + +#define AR_RADIO_SREV_MAJOR 0xf0 +#define AR_RAD5133_SREV_MAJOR 0xc0 +#define AR_RAD2133_SREV_MAJOR 0xd0 +#define AR_RAD5122_SREV_MAJOR 0xe0 +#define AR_RAD2122_SREV_MAJOR 0xf0 + +#define AR_AHB_MODE 0x4024 +#define AR_AHB_EXACT_WR_EN 0x00000000 +#define AR_AHB_BUF_WR_EN 0x00000001 +#define AR_AHB_EXACT_RD_EN 0x00000000 +#define AR_AHB_CACHELINE_RD_EN 0x00000002 +#define AR_AHB_PREFETCH_RD_EN 0x00000004 +#define AR_AHB_PAGE_SIZE_1K 0x00000000 +#define AR_AHB_PAGE_SIZE_2K 0x00000008 +#define AR_AHB_PAGE_SIZE_4K 0x00000010 +#define AR_AHB_CUSTOM_BURST_EN 0x000000C0 +#define AR_AHB_CUSTOM_BURST_EN_S 6 +#define AR_AHB_CUSTOM_BURST_ASYNC_FIFO_VAL 3 + +#define AR_INTR_RTC_IRQ 0x00000001 +#define AR_INTR_MAC_IRQ 0x00000002 +#define AR_INTR_EEP_PROT_ACCESS 0x00000004 +#define AR_INTR_MAC_AWAKE 0x00020000 +#define AR_INTR_MAC_ASLEEP 0x00040000 +#define AR_INTR_SPURIOUS 0xFFFFFFFF + + +#define AR_INTR_SYNC_CAUSE_CLR 0x4028 + +#define AR_INTR_SYNC_CAUSE 0x4028 + +#define AR_INTR_SYNC_ENABLE 0x402c +#define AR_INTR_SYNC_ENABLE_GPIO 0xFFFC0000 +#define AR_INTR_SYNC_ENABLE_GPIO_S 18 + +enum { + AR_INTR_SYNC_RTC_IRQ = 0x00000001, + AR_INTR_SYNC_MAC_IRQ = 0x00000002, + AR_INTR_SYNC_EEPROM_ILLEGAL_ACCESS = 0x00000004, + AR_INTR_SYNC_APB_TIMEOUT = 0x00000008, + AR_INTR_SYNC_PCI_MODE_CONFLICT = 0x00000010, + AR_INTR_SYNC_HOST1_FATAL = 0x00000020, + AR_INTR_SYNC_HOST1_PERR = 0x00000040, + AR_INTR_SYNC_TRCV_FIFO_PERR = 0x00000080, + AR_INTR_SYNC_RADM_CPL_EP = 0x00000100, + AR_INTR_SYNC_RADM_CPL_DLLP_ABORT = 0x00000200, + AR_INTR_SYNC_RADM_CPL_TLP_ABORT = 0x00000400, + AR_INTR_SYNC_RADM_CPL_ECRC_ERR = 0x00000800, + AR_INTR_SYNC_RADM_CPL_TIMEOUT = 0x00001000, + AR_INTR_SYNC_LOCAL_TIMEOUT = 0x00002000, + AR_INTR_SYNC_PM_ACCESS = 0x00004000, + AR_INTR_SYNC_MAC_AWAKE = 0x00008000, + AR_INTR_SYNC_MAC_ASLEEP = 0x00010000, + AR_INTR_SYNC_MAC_SLEEP_ACCESS = 0x00020000, + AR_INTR_SYNC_ALL = 0x0003FFFF, + + + AR_INTR_SYNC_DEFAULT = (AR_INTR_SYNC_HOST1_FATAL | + AR_INTR_SYNC_HOST1_PERR | + AR_INTR_SYNC_RADM_CPL_EP | + AR_INTR_SYNC_RADM_CPL_DLLP_ABORT | + AR_INTR_SYNC_RADM_CPL_TLP_ABORT | + AR_INTR_SYNC_RADM_CPL_ECRC_ERR | + AR_INTR_SYNC_RADM_CPL_TIMEOUT | + AR_INTR_SYNC_LOCAL_TIMEOUT | + AR_INTR_SYNC_MAC_SLEEP_ACCESS), + + AR_INTR_SYNC_SPURIOUS = 0xFFFFFFFF, + +}; + +#define AR_INTR_ASYNC_MASK 0x4030 +#define AR_INTR_ASYNC_MASK_GPIO 0xFFFC0000 +#define AR_INTR_ASYNC_MASK_GPIO_S 18 + +#define AR_INTR_SYNC_MASK 0x4034 +#define AR_INTR_SYNC_MASK_GPIO 0xFFFC0000 +#define AR_INTR_SYNC_MASK_GPIO_S 18 + +#define AR_INTR_ASYNC_CAUSE_CLR 0x4038 +#define AR_INTR_ASYNC_CAUSE 0x4038 + +#define AR_INTR_ASYNC_ENABLE 0x403c +#define AR_INTR_ASYNC_ENABLE_GPIO 0xFFFC0000 +#define AR_INTR_ASYNC_ENABLE_GPIO_S 18 + +#define AR_PCIE_SERDES 0x4040 +#define AR_PCIE_SERDES2 0x4044 +#define AR_PCIE_PM_CTRL 0x4014 +#define AR_PCIE_PM_CTRL_ENA 0x00080000 + +#define AR_NUM_GPIO 14 +#define AR928X_NUM_GPIO 10 +#define AR9285_NUM_GPIO 12 +#define AR9287_NUM_GPIO 11 + +#define AR_GPIO_IN_OUT 0x4048 +#define AR_GPIO_IN_VAL 0x0FFFC000 +#define AR_GPIO_IN_VAL_S 14 +#define AR928X_GPIO_IN_VAL 0x000FFC00 +#define AR928X_GPIO_IN_VAL_S 10 +#define AR9285_GPIO_IN_VAL 0x00FFF000 +#define AR9285_GPIO_IN_VAL_S 12 +#define AR9287_GPIO_IN_VAL 0x003FF800 +#define AR9287_GPIO_IN_VAL_S 11 + +#define AR_GPIO_OE_OUT 0x404c +#define AR_GPIO_OE_OUT_DRV 0x3 +#define AR_GPIO_OE_OUT_DRV_NO 0x0 +#define AR_GPIO_OE_OUT_DRV_LOW 0x1 +#define AR_GPIO_OE_OUT_DRV_HI 0x2 +#define AR_GPIO_OE_OUT_DRV_ALL 0x3 + +#define AR_GPIO_INTR_POL 0x4050 +#define AR_GPIO_INTR_POL_VAL 0x00001FFF +#define AR_GPIO_INTR_POL_VAL_S 0 + +#define AR_GPIO_INPUT_EN_VAL 0x4054 +#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_DEF 0x00000004 +#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_S 2 +#define AR_GPIO_INPUT_EN_VAL_BT_FREQUENCY_DEF 0x00000008 +#define AR_GPIO_INPUT_EN_VAL_BT_FREQUENCY_S 3 +#define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_DEF 0x00000010 +#define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_S 4 +#define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF 0x00000080 +#define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF_S 7 +#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB 0x00000400 +#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB_S 10 +#define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB 0x00001000 +#define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB_S 12 +#define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB 0x00008000 +#define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB_S 15 +#define AR_GPIO_RTC_RESET_OVERRIDE_ENABLE 0x00010000 +#define AR_GPIO_JTAG_DISABLE 0x00020000 + +#define AR_GPIO_INPUT_MUX1 0x4058 +#define AR_GPIO_INPUT_MUX1_BT_ACTIVE 0x000f0000 +#define AR_GPIO_INPUT_MUX1_BT_ACTIVE_S 16 +#define AR_GPIO_INPUT_MUX1_BT_PRIORITY 0x00000f00 +#define AR_GPIO_INPUT_MUX1_BT_PRIORITY_S 8 + +#define AR_GPIO_INPUT_MUX2 0x405c +#define AR_GPIO_INPUT_MUX2_CLK25 0x0000000f +#define AR_GPIO_INPUT_MUX2_CLK25_S 0 +#define AR_GPIO_INPUT_MUX2_RFSILENT 0x000000f0 +#define AR_GPIO_INPUT_MUX2_RFSILENT_S 4 +#define AR_GPIO_INPUT_MUX2_RTC_RESET 0x00000f00 +#define AR_GPIO_INPUT_MUX2_RTC_RESET_S 8 + +#define AR_GPIO_OUTPUT_MUX1 0x4060 +#define AR_GPIO_OUTPUT_MUX2 0x4064 +#define AR_GPIO_OUTPUT_MUX3 0x4068 + +#define AR_INPUT_STATE 0x406c + +#define AR_EEPROM_STATUS_DATA 0x407c +#define AR_EEPROM_STATUS_DATA_VAL 0x0000ffff +#define AR_EEPROM_STATUS_DATA_VAL_S 0 +#define AR_EEPROM_STATUS_DATA_BUSY 0x00010000 +#define AR_EEPROM_STATUS_DATA_BUSY_ACCESS 0x00020000 +#define AR_EEPROM_STATUS_DATA_PROT_ACCESS 0x00040000 +#define AR_EEPROM_STATUS_DATA_ABSENT_ACCESS 0x00080000 + +#define AR_OBS 0x4080 + +#define AR_GPIO_PDPU 0x4088 + +#define AR_PCIE_MSI 0x4094 +#define AR_PCIE_MSI_ENABLE 0x00000001 + + +#define AR_RTC_9160_PLL_DIV 0x000003ff +#define AR_RTC_9160_PLL_DIV_S 0 +#define AR_RTC_9160_PLL_REFDIV 0x00003C00 +#define AR_RTC_9160_PLL_REFDIV_S 10 +#define AR_RTC_9160_PLL_CLKSEL 0x0000C000 +#define AR_RTC_9160_PLL_CLKSEL_S 14 + +#define AR_RTC_BASE 0x00020000 +#define AR_RTC_RC \ + ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0000) : 0x7000) +#define AR_RTC_RC_M 0x00000003 +#define AR_RTC_RC_MAC_WARM 0x00000001 +#define AR_RTC_RC_MAC_COLD 0x00000002 +#define AR_RTC_RC_COLD_RESET 0x00000004 +#define AR_RTC_RC_WARM_RESET 0x00000008 + +#define AR_RTC_PLL_CONTROL \ + ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0014) : 0x7014) + +#define AR_RTC_PLL_DIV 0x0000001f +#define AR_RTC_PLL_DIV_S 0 +#define AR_RTC_PLL_DIV2 0x00000020 +#define AR_RTC_PLL_REFDIV_5 0x000000c0 +#define AR_RTC_PLL_CLKSEL 0x00000300 +#define AR_RTC_PLL_CLKSEL_S 8 + +#define AR_RTC_RESET \ + ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0040) : 0x7040) +#define AR_RTC_RESET_EN (0x00000001) + +#define AR_RTC_STATUS \ + ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0044) : 0x7044) + +#define AR_RTC_STATUS_M \ + ((AR_SREV_9100(ah)) ? 0x0000003f : 0x0000000f) + +#define AR_RTC_PM_STATUS_M 0x0000000f + +#define AR_RTC_STATUS_SHUTDOWN 0x00000001 +#define AR_RTC_STATUS_ON 0x00000002 +#define AR_RTC_STATUS_SLEEP 0x00000004 +#define AR_RTC_STATUS_WAKEUP 0x00000008 + +#define AR_RTC_SLEEP_CLK \ + ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0048) : 0x7048) +#define AR_RTC_FORCE_DERIVED_CLK 0x2 + +#define AR_RTC_FORCE_WAKE \ + ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x004c) : 0x704c) +#define AR_RTC_FORCE_WAKE_EN 0x00000001 +#define AR_RTC_FORCE_WAKE_ON_INT 0x00000002 + + +#define AR_RTC_INTR_CAUSE \ + ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0050) : 0x7050) + +#define AR_RTC_INTR_ENABLE \ + ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0054) : 0x7054) + +#define AR_RTC_INTR_MASK \ + ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0058) : 0x7058) + +/* RTC_DERIVED_* - only for AR9100 */ + +#define AR_RTC_DERIVED_CLK (AR_RTC_BASE + 0x0038) +#define AR_RTC_DERIVED_CLK_PERIOD 0x0000fffe +#define AR_RTC_DERIVED_CLK_PERIOD_S 1 + +#define AR_SEQ_MASK 0x8060 + +#define AR_AN_RF2G1_CH0 0x7810 +#define AR_AN_RF2G1_CH0_OB 0x03800000 +#define AR_AN_RF2G1_CH0_OB_S 23 +#define AR_AN_RF2G1_CH0_DB 0x1C000000 +#define AR_AN_RF2G1_CH0_DB_S 26 + +#define AR_AN_RF5G1_CH0 0x7818 +#define AR_AN_RF5G1_CH0_OB5 0x00070000 +#define AR_AN_RF5G1_CH0_OB5_S 16 +#define AR_AN_RF5G1_CH0_DB5 0x00380000 +#define AR_AN_RF5G1_CH0_DB5_S 19 + +#define AR_AN_RF2G1_CH1 0x7834 +#define AR_AN_RF2G1_CH1_OB 0x03800000 +#define AR_AN_RF2G1_CH1_OB_S 23 +#define AR_AN_RF2G1_CH1_DB 0x1C000000 +#define AR_AN_RF2G1_CH1_DB_S 26 + +#define AR_AN_RF5G1_CH1 0x783C +#define AR_AN_RF5G1_CH1_OB5 0x00070000 +#define AR_AN_RF5G1_CH1_OB5_S 16 +#define AR_AN_RF5G1_CH1_DB5 0x00380000 +#define AR_AN_RF5G1_CH1_DB5_S 19 + +#define AR_AN_TOP1 0x7890 +#define AR_AN_TOP1_DACIPMODE 0x00040000 +#define AR_AN_TOP1_DACIPMODE_S 18 + +#define AR_AN_TOP2 0x7894 +#define AR_AN_TOP2_XPABIAS_LVL 0xC0000000 +#define AR_AN_TOP2_XPABIAS_LVL_S 30 +#define AR_AN_TOP2_LOCALBIAS 0x00200000 +#define AR_AN_TOP2_LOCALBIAS_S 21 +#define AR_AN_TOP2_PWDCLKIND 0x00400000 +#define AR_AN_TOP2_PWDCLKIND_S 22 + +#define AR_AN_SYNTH9 0x7868 +#define AR_AN_SYNTH9_REFDIVA 0xf8000000 +#define AR_AN_SYNTH9_REFDIVA_S 27 + +#define AR9285_AN_RF2G1 0x7820 +#define AR9285_AN_RF2G1_ENPACAL 0x00000800 +#define AR9285_AN_RF2G1_ENPACAL_S 11 +#define AR9285_AN_RF2G1_PDPADRV1 0x02000000 +#define AR9285_AN_RF2G1_PDPADRV1_S 25 +#define AR9285_AN_RF2G1_PDPADRV2 0x01000000 +#define AR9285_AN_RF2G1_PDPADRV2_S 24 +#define AR9285_AN_RF2G1_PDPAOUT 0x00800000 +#define AR9285_AN_RF2G1_PDPAOUT_S 23 + + +#define AR9285_AN_RF2G2 0x7824 +#define AR9285_AN_RF2G2_OFFCAL 0x00001000 +#define AR9285_AN_RF2G2_OFFCAL_S 12 + +#define AR9285_AN_RF2G3 0x7828 +#define AR9285_AN_RF2G3_PDVCCOMP 0x02000000 +#define AR9285_AN_RF2G3_PDVCCOMP_S 25 +#define AR9285_AN_RF2G3_OB_0 0x00E00000 +#define AR9285_AN_RF2G3_OB_0_S 21 +#define AR9285_AN_RF2G3_OB_1 0x001C0000 +#define AR9285_AN_RF2G3_OB_1_S 18 +#define AR9285_AN_RF2G3_OB_2 0x00038000 +#define AR9285_AN_RF2G3_OB_2_S 15 +#define AR9285_AN_RF2G3_OB_3 0x00007000 +#define AR9285_AN_RF2G3_OB_3_S 12 +#define AR9285_AN_RF2G3_OB_4 0x00000E00 +#define AR9285_AN_RF2G3_OB_4_S 9 + +#define AR9285_AN_RF2G3_DB1_0 0x000001C0 +#define AR9285_AN_RF2G3_DB1_0_S 6 +#define AR9285_AN_RF2G3_DB1_1 0x00000038 +#define AR9285_AN_RF2G3_DB1_1_S 3 +#define AR9285_AN_RF2G3_DB1_2 0x00000007 +#define AR9285_AN_RF2G3_DB1_2_S 0 +#define AR9285_AN_RF2G4 0x782C +#define AR9285_AN_RF2G4_DB1_3 0xE0000000 +#define AR9285_AN_RF2G4_DB1_3_S 29 +#define AR9285_AN_RF2G4_DB1_4 0x1C000000 +#define AR9285_AN_RF2G4_DB1_4_S 26 + +#define AR9285_AN_RF2G4_DB2_0 0x03800000 +#define AR9285_AN_RF2G4_DB2_0_S 23 +#define AR9285_AN_RF2G4_DB2_1 0x00700000 +#define AR9285_AN_RF2G4_DB2_1_S 20 +#define AR9285_AN_RF2G4_DB2_2 0x000E0000 +#define AR9285_AN_RF2G4_DB2_2_S 17 +#define AR9285_AN_RF2G4_DB2_3 0x0001C000 +#define AR9285_AN_RF2G4_DB2_3_S 14 +#define AR9285_AN_RF2G4_DB2_4 0x00003800 +#define AR9285_AN_RF2G4_DB2_4_S 11 + +/* AR9271 : 0x7828, 0x782c different setting from AR9285 */ +#define AR9271_AN_RF2G3_OB_cck 0x001C0000 +#define AR9271_AN_RF2G3_OB_cck_S 18 +#define AR9271_AN_RF2G3_OB_psk 0x00038000 +#define AR9271_AN_RF2G3_OB_psk_S 15 +#define AR9271_AN_RF2G3_OB_qam 0x00007000 +#define AR9271_AN_RF2G3_OB_qam_S 12 + +#define AR9271_AN_RF2G3_DB_1 0x00E00000 +#define AR9271_AN_RF2G3_DB_1_S 21 + +#define AR9271_AN_RF2G3_CCOMP 0xFFF +#define AR9271_AN_RF2G3_CCOMP_S 0 + +#define AR9271_AN_RF2G4_DB_2 0xE0000000 +#define AR9271_AN_RF2G4_DB_2_S 29 + +#define AR9285_AN_RF2G6 0x7834 +#define AR9285_AN_RF2G6_CCOMP 0x00007800 +#define AR9285_AN_RF2G6_CCOMP_S 11 +#define AR9285_AN_RF2G6_OFFS 0x03f00000 +#define AR9285_AN_RF2G6_OFFS_S 20 + +#define AR9271_AN_RF2G6_OFFS 0x07f00000 +#define AR9271_AN_RF2G6_OFFS_S 20 + +#define AR9285_AN_RF2G7 0x7838 +#define AR9285_AN_RF2G7_PWDDB 0x00000002 +#define AR9285_AN_RF2G7_PWDDB_S 1 +#define AR9285_AN_RF2G7_PADRVGN2TAB0 0xE0000000 +#define AR9285_AN_RF2G7_PADRVGN2TAB0_S 29 + +#define AR9285_AN_RF2G8 0x783C +#define AR9285_AN_RF2G8_PADRVGN2TAB0 0x0001C000 +#define AR9285_AN_RF2G8_PADRVGN2TAB0_S 14 + + +#define AR9285_AN_RF2G9 0x7840 +#define AR9285_AN_RXTXBB1 0x7854 +#define AR9285_AN_RXTXBB1_PDRXTXBB1 0x00000020 +#define AR9285_AN_RXTXBB1_PDRXTXBB1_S 5 +#define AR9285_AN_RXTXBB1_PDV2I 0x00000080 +#define AR9285_AN_RXTXBB1_PDV2I_S 7 +#define AR9285_AN_RXTXBB1_PDDACIF 0x00000100 +#define AR9285_AN_RXTXBB1_PDDACIF_S 8 +#define AR9285_AN_RXTXBB1_SPARE9 0x00000001 +#define AR9285_AN_RXTXBB1_SPARE9_S 0 + +#define AR9285_AN_TOP2 0x7868 + +#define AR9285_AN_TOP3 0x786c +#define AR9285_AN_TOP3_XPABIAS_LVL 0x0000000C +#define AR9285_AN_TOP3_XPABIAS_LVL_S 2 +#define AR9285_AN_TOP3_PWDDAC 0x00800000 +#define AR9285_AN_TOP3_PWDDAC_S 23 + +#define AR9285_AN_TOP4 0x7870 +#define AR9285_AN_TOP4_DEFAULT 0x10142c00 + +#define AR9287_AN_RF2G3_CH0 0x7808 +#define AR9287_AN_RF2G3_CH1 0x785c +#define AR9287_AN_RF2G3_DB1 0xE0000000 +#define AR9287_AN_RF2G3_DB1_S 29 +#define AR9287_AN_RF2G3_DB2 0x1C000000 +#define AR9287_AN_RF2G3_DB2_S 26 +#define AR9287_AN_RF2G3_OB_CCK 0x03800000 +#define AR9287_AN_RF2G3_OB_CCK_S 23 +#define AR9287_AN_RF2G3_OB_PSK 0x00700000 +#define AR9287_AN_RF2G3_OB_PSK_S 20 +#define AR9287_AN_RF2G3_OB_QAM 0x000E0000 +#define AR9287_AN_RF2G3_OB_QAM_S 17 +#define AR9287_AN_RF2G3_OB_PAL_OFF 0x0001C000 +#define AR9287_AN_RF2G3_OB_PAL_OFF_S 14 + +#define AR9287_AN_TXPC0 0x7898 +#define AR9287_AN_TXPC0_TXPCMODE 0x0000C000 +#define AR9287_AN_TXPC0_TXPCMODE_S 14 +#define AR9287_AN_TXPC0_TXPCMODE_NORMAL 0 +#define AR9287_AN_TXPC0_TXPCMODE_TEST 1 +#define AR9287_AN_TXPC0_TXPCMODE_TEMPSENSE 2 +#define AR9287_AN_TXPC0_TXPCMODE_ATBTEST 3 + +#define AR9287_AN_TOP2 0x78b4 +#define AR9287_AN_TOP2_XPABIAS_LVL 0xC0000000 +#define AR9287_AN_TOP2_XPABIAS_LVL_S 30 + +/* AR9271 specific stuff */ +#define AR9271_RESET_POWER_DOWN_CONTROL 0x50044 +#define AR9271_RADIO_RF_RST 0x20 +#define AR9271_GATE_MAC_CTL 0x4000 + +#define AR_STA_ID0 0x8000 +#define AR_STA_ID1 0x8004 +#define AR_STA_ID1_SADH_MASK 0x0000FFFF +#define AR_STA_ID1_STA_AP 0x00010000 +#define AR_STA_ID1_ADHOC 0x00020000 +#define AR_STA_ID1_PWR_SAV 0x00040000 +#define AR_STA_ID1_KSRCHDIS 0x00080000 +#define AR_STA_ID1_PCF 0x00100000 +#define AR_STA_ID1_USE_DEFANT 0x00200000 +#define AR_STA_ID1_DEFANT_UPDATE 0x00400000 +#define AR_STA_ID1_RTS_USE_DEF 0x00800000 +#define AR_STA_ID1_ACKCTS_6MB 0x01000000 +#define AR_STA_ID1_BASE_RATE_11B 0x02000000 +#define AR_STA_ID1_SECTOR_SELF_GEN 0x04000000 +#define AR_STA_ID1_CRPT_MIC_ENABLE 0x08000000 +#define AR_STA_ID1_KSRCH_MODE 0x10000000 +#define AR_STA_ID1_PRESERVE_SEQNUM 0x20000000 +#define AR_STA_ID1_CBCIV_ENDIAN 0x40000000 +#define AR_STA_ID1_MCAST_KSRCH 0x80000000 + +#define AR_BSS_ID0 0x8008 +#define AR_BSS_ID1 0x800C +#define AR_BSS_ID1_U16 0x0000FFFF +#define AR_BSS_ID1_AID 0x07FF0000 +#define AR_BSS_ID1_AID_S 16 + +#define AR_BCN_RSSI_AVE 0x8010 +#define AR_BCN_RSSI_AVE_MASK 0x00000FFF + +#define AR_TIME_OUT 0x8014 +#define AR_TIME_OUT_ACK 0x00003FFF +#define AR_TIME_OUT_ACK_S 0 +#define AR_TIME_OUT_CTS 0x3FFF0000 +#define AR_TIME_OUT_CTS_S 16 +#define AR_TIME_OUT_ACK_CTS_ASYNC_FIFO_DUR 0x16001D56 + +#define AR_RSSI_THR 0x8018 +#define AR_RSSI_THR_MASK 0x000000FF +#define AR_RSSI_THR_BM_THR 0x0000FF00 +#define AR_RSSI_THR_BM_THR_S 8 +#define AR_RSSI_BCN_WEIGHT 0x1F000000 +#define AR_RSSI_BCN_WEIGHT_S 24 +#define AR_RSSI_BCN_RSSI_RST 0x20000000 + +#define AR_USEC 0x801c +#define AR_USEC_USEC 0x0000007F +#define AR_USEC_TX_LAT 0x007FC000 +#define AR_USEC_TX_LAT_S 14 +#define AR_USEC_RX_LAT 0x1F800000 +#define AR_USEC_RX_LAT_S 23 +#define AR_USEC_ASYNC_FIFO_DUR 0x12e00074 + +#define AR_RESET_TSF 0x8020 +#define AR_RESET_TSF_ONCE 0x01000000 + +#define AR_MAX_CFP_DUR 0x8038 +#define AR_CFP_VAL 0x0000FFFF + +#define AR_RX_FILTER 0x803C + +#define AR_MCAST_FIL0 0x8040 +#define AR_MCAST_FIL1 0x8044 + +/* + * AR_DIAG_SW - Register which can be used for diagnostics and testing purposes. + * + * The force RX abort (AR_DIAG_RX_ABORT, bit 25) can be used in conjunction with + * RX block (AR_DIAG_RX_DIS, bit 5) to help fast channel change to shut down + * receive. The force RX abort bit will kill any frame which is currently being + * transferred between the MAC and baseband. The RX block bit (AR_DIAG_RX_DIS) + * will prevent any new frames from getting started. + */ +#define AR_DIAG_SW 0x8048 +#define AR_DIAG_CACHE_ACK 0x00000001 +#define AR_DIAG_ACK_DIS 0x00000002 +#define AR_DIAG_CTS_DIS 0x00000004 +#define AR_DIAG_ENCRYPT_DIS 0x00000008 +#define AR_DIAG_DECRYPT_DIS 0x00000010 +#define AR_DIAG_RX_DIS 0x00000020 /* RX block */ +#define AR_DIAG_LOOP_BACK 0x00000040 +#define AR_DIAG_CORR_FCS 0x00000080 +#define AR_DIAG_CHAN_INFO 0x00000100 +#define AR_DIAG_SCRAM_SEED 0x0001FE00 +#define AR_DIAG_SCRAM_SEED_S 8 +#define AR_DIAG_FRAME_NV0 0x00020000 +#define AR_DIAG_OBS_PT_SEL1 0x000C0000 +#define AR_DIAG_OBS_PT_SEL1_S 18 +#define AR_DIAG_FORCE_RX_CLEAR 0x00100000 /* force rx_clear high */ +#define AR_DIAG_IGNORE_VIRT_CS 0x00200000 +#define AR_DIAG_FORCE_CH_IDLE_HIGH 0x00400000 +#define AR_DIAG_EIFS_CTRL_ENA 0x00800000 +#define AR_DIAG_DUAL_CHAIN_INFO 0x01000000 +#define AR_DIAG_RX_ABORT 0x02000000 /* Force RX abort */ +#define AR_DIAG_SATURATE_CYCLE_CNT 0x04000000 +#define AR_DIAG_OBS_PT_SEL2 0x08000000 +#define AR_DIAG_RX_CLEAR_CTL_LOW 0x10000000 +#define AR_DIAG_RX_CLEAR_EXT_LOW 0x20000000 + +#define AR_TSF_L32 0x804c +#define AR_TSF_U32 0x8050 + +#define AR_TST_ADDAC 0x8054 +#define AR_DEF_ANTENNA 0x8058 + +#define AR_AES_MUTE_MASK0 0x805c +#define AR_AES_MUTE_MASK0_FC 0x0000FFFF +#define AR_AES_MUTE_MASK0_QOS 0xFFFF0000 +#define AR_AES_MUTE_MASK0_QOS_S 16 + +#define AR_AES_MUTE_MASK1 0x8060 +#define AR_AES_MUTE_MASK1_SEQ 0x0000FFFF +#define AR_AES_MUTE_MASK1_FC_MGMT 0xFFFF0000 +#define AR_AES_MUTE_MASK1_FC_MGMT_S 16 + +#define AR_GATED_CLKS 0x8064 +#define AR_GATED_CLKS_TX 0x00000002 +#define AR_GATED_CLKS_RX 0x00000004 +#define AR_GATED_CLKS_REG 0x00000008 + +#define AR_OBS_BUS_CTRL 0x8068 +#define AR_OBS_BUS_SEL_1 0x00040000 +#define AR_OBS_BUS_SEL_2 0x00080000 +#define AR_OBS_BUS_SEL_3 0x000C0000 +#define AR_OBS_BUS_SEL_4 0x08040000 +#define AR_OBS_BUS_SEL_5 0x08080000 + +#define AR_OBS_BUS_1 0x806c +#define AR_OBS_BUS_1_PCU 0x00000001 +#define AR_OBS_BUS_1_RX_END 0x00000002 +#define AR_OBS_BUS_1_RX_WEP 0x00000004 +#define AR_OBS_BUS_1_RX_BEACON 0x00000008 +#define AR_OBS_BUS_1_RX_FILTER 0x00000010 +#define AR_OBS_BUS_1_TX_HCF 0x00000020 +#define AR_OBS_BUS_1_QUIET_TIME 0x00000040 +#define AR_OBS_BUS_1_CHAN_IDLE 0x00000080 +#define AR_OBS_BUS_1_TX_HOLD 0x00000100 +#define AR_OBS_BUS_1_TX_FRAME 0x00000200 +#define AR_OBS_BUS_1_RX_FRAME 0x00000400 +#define AR_OBS_BUS_1_RX_CLEAR 0x00000800 +#define AR_OBS_BUS_1_WEP_STATE 0x0003F000 +#define AR_OBS_BUS_1_WEP_STATE_S 12 +#define AR_OBS_BUS_1_RX_STATE 0x01F00000 +#define AR_OBS_BUS_1_RX_STATE_S 20 +#define AR_OBS_BUS_1_TX_STATE 0x7E000000 +#define AR_OBS_BUS_1_TX_STATE_S 25 + +#define AR_LAST_TSTP 0x8080 +#define AR_NAV 0x8084 +#define AR_RTS_OK 0x8088 +#define AR_RTS_FAIL 0x808c +#define AR_ACK_FAIL 0x8090 +#define AR_FCS_FAIL 0x8094 +#define AR_BEACON_CNT 0x8098 + +#define AR_SLEEP1 0x80d4 +#define AR_SLEEP1_ASSUME_DTIM 0x00080000 +#define AR_SLEEP1_CAB_TIMEOUT 0xFFE00000 +#define AR_SLEEP1_CAB_TIMEOUT_S 21 + +#define AR_SLEEP2 0x80d8 +#define AR_SLEEP2_BEACON_TIMEOUT 0xFFE00000 +#define AR_SLEEP2_BEACON_TIMEOUT_S 21 + +#define AR_TPC 0x80e8 +#define AR_TPC_ACK 0x0000003f +#define AR_TPC_ACK_S 0x00 +#define AR_TPC_CTS 0x00003f00 +#define AR_TPC_CTS_S 0x08 +#define AR_TPC_CHIRP 0x003f0000 +#define AR_TPC_CHIRP_S 0x16 + +#define AR_TFCNT 0x80ec +#define AR_RFCNT 0x80f0 +#define AR_RCCNT 0x80f4 +#define AR_CCCNT 0x80f8 + +#define AR_QUIET1 0x80fc +#define AR_QUIET1_NEXT_QUIET_S 0 +#define AR_QUIET1_NEXT_QUIET_M 0x0000ffff +#define AR_QUIET1_QUIET_ENABLE 0x00010000 +#define AR_QUIET1_QUIET_ACK_CTS_ENABLE 0x00020000 +#define AR_QUIET1_QUIET_ACK_CTS_ENABLE_S 17 +#define AR_QUIET2 0x8100 +#define AR_QUIET2_QUIET_PERIOD_S 0 +#define AR_QUIET2_QUIET_PERIOD_M 0x0000ffff +#define AR_QUIET2_QUIET_DUR_S 16 +#define AR_QUIET2_QUIET_DUR 0xffff0000 + +#define AR_TSF_PARM 0x8104 +#define AR_TSF_INCREMENT_M 0x000000ff +#define AR_TSF_INCREMENT_S 0x00 + +#define AR_QOS_NO_ACK 0x8108 +#define AR_QOS_NO_ACK_TWO_BIT 0x0000000f +#define AR_QOS_NO_ACK_TWO_BIT_S 0 +#define AR_QOS_NO_ACK_BIT_OFF 0x00000070 +#define AR_QOS_NO_ACK_BIT_OFF_S 4 +#define AR_QOS_NO_ACK_BYTE_OFF 0x00000180 +#define AR_QOS_NO_ACK_BYTE_OFF_S 7 + +#define AR_PHY_ERR 0x810c + +#define AR_PHY_ERR_DCHIRP 0x00000008 +#define AR_PHY_ERR_RADAR 0x00000020 +#define AR_PHY_ERR_OFDM_TIMING 0x00020000 +#define AR_PHY_ERR_CCK_TIMING 0x02000000 + +#define AR_RXFIFO_CFG 0x8114 + + +#define AR_MIC_QOS_CONTROL 0x8118 +#define AR_MIC_QOS_SELECT 0x811c + +#define AR_PCU_MISC 0x8120 +#define AR_PCU_FORCE_BSSID_MATCH 0x00000001 +#define AR_PCU_MIC_NEW_LOC_ENA 0x00000004 +#define AR_PCU_TX_ADD_TSF 0x00000008 +#define AR_PCU_CCK_SIFS_MODE 0x00000010 +#define AR_PCU_RX_ANT_UPDT 0x00000800 +#define AR_PCU_TXOP_TBTT_LIMIT_ENA 0x00001000 +#define AR_PCU_MISS_BCN_IN_SLEEP 0x00004000 +#define AR_PCU_BUG_12306_FIX_ENA 0x00020000 +#define AR_PCU_FORCE_QUIET_COLL 0x00040000 +#define AR_PCU_TBTT_PROTECT 0x00200000 +#define AR_PCU_CLEAR_VMF 0x01000000 +#define AR_PCU_CLEAR_BA_VALID 0x04000000 + +#define AR_PCU_BT_ANT_PREVENT_RX 0x00100000 +#define AR_PCU_BT_ANT_PREVENT_RX_S 20 + +#define AR_FILT_OFDM 0x8124 +#define AR_FILT_OFDM_COUNT 0x00FFFFFF + +#define AR_FILT_CCK 0x8128 +#define AR_FILT_CCK_COUNT 0x00FFFFFF + +#define AR_PHY_ERR_1 0x812c +#define AR_PHY_ERR_1_COUNT 0x00FFFFFF +#define AR_PHY_ERR_MASK_1 0x8130 + +#define AR_PHY_ERR_2 0x8134 +#define AR_PHY_ERR_2_COUNT 0x00FFFFFF +#define AR_PHY_ERR_MASK_2 0x8138 + +#define AR_PHY_COUNTMAX (3 << 22) +#define AR_MIBCNT_INTRMASK (3 << 22) + +#define AR_TSFOOR_THRESHOLD 0x813c +#define AR_TSFOOR_THRESHOLD_VAL 0x0000FFFF + +#define AR_PHY_ERR_EIFS_MASK 8144 + +#define AR_PHY_ERR_3 0x8168 +#define AR_PHY_ERR_3_COUNT 0x00FFFFFF +#define AR_PHY_ERR_MASK_3 0x816c + +#define AR_BT_COEX_MODE 0x8170 +#define AR_BT_TIME_EXTEND 0x000000ff +#define AR_BT_TIME_EXTEND_S 0 +#define AR_BT_TXSTATE_EXTEND 0x00000100 +#define AR_BT_TXSTATE_EXTEND_S 8 +#define AR_BT_TX_FRAME_EXTEND 0x00000200 +#define AR_BT_TX_FRAME_EXTEND_S 9 +#define AR_BT_MODE 0x00000c00 +#define AR_BT_MODE_S 10 +#define AR_BT_QUIET 0x00001000 +#define AR_BT_QUIET_S 12 +#define AR_BT_QCU_THRESH 0x0001e000 +#define AR_BT_QCU_THRESH_S 13 +#define AR_BT_RX_CLEAR_POLARITY 0x00020000 +#define AR_BT_RX_CLEAR_POLARITY_S 17 +#define AR_BT_PRIORITY_TIME 0x00fc0000 +#define AR_BT_PRIORITY_TIME_S 18 +#define AR_BT_FIRST_SLOT_TIME 0xff000000 +#define AR_BT_FIRST_SLOT_TIME_S 24 + +#define AR_BT_COEX_WEIGHT 0x8174 +#define AR_BT_COEX_WGHT 0xff55 +#define AR_STOMP_ALL_WLAN_WGHT 0xfcfc +#define AR_STOMP_LOW_WLAN_WGHT 0xa8a8 +#define AR_STOMP_NONE_WLAN_WGHT 0x0000 +#define AR_BTCOEX_BT_WGHT 0x0000ffff +#define AR_BTCOEX_BT_WGHT_S 0 +#define AR_BTCOEX_WL_WGHT 0xffff0000 +#define AR_BTCOEX_WL_WGHT_S 16 + +#define AR_BT_COEX_MODE2 0x817c +#define AR_BT_BCN_MISS_THRESH 0x000000ff +#define AR_BT_BCN_MISS_THRESH_S 0 +#define AR_BT_BCN_MISS_CNT 0x0000ff00 +#define AR_BT_BCN_MISS_CNT_S 8 +#define AR_BT_HOLD_RX_CLEAR 0x00010000 +#define AR_BT_HOLD_RX_CLEAR_S 16 +#define AR_BT_DISABLE_BT_ANT 0x00100000 +#define AR_BT_DISABLE_BT_ANT_S 20 + +#define AR_TXSIFS 0x81d0 +#define AR_TXSIFS_TIME 0x000000FF +#define AR_TXSIFS_TX_LATENCY 0x00000F00 +#define AR_TXSIFS_TX_LATENCY_S 8 +#define AR_TXSIFS_ACK_SHIFT 0x00007000 +#define AR_TXSIFS_ACK_SHIFT_S 12 + +#define AR_TXOP_X 0x81ec +#define AR_TXOP_X_VAL 0x000000FF + + +#define AR_TXOP_0_3 0x81f0 +#define AR_TXOP_4_7 0x81f4 +#define AR_TXOP_8_11 0x81f8 +#define AR_TXOP_12_15 0x81fc + +#define AR_NEXT_NDP2_TIMER 0x8180 +#define AR_FIRST_NDP_TIMER 7 +#define AR_NDP2_PERIOD 0x81a0 +#define AR_NDP2_TIMER_MODE 0x81c0 +#define AR_NEXT_TBTT_TIMER 0x8200 +#define AR_NEXT_DMA_BEACON_ALERT 0x8204 +#define AR_NEXT_SWBA 0x8208 +#define AR_NEXT_CFP 0x8208 +#define AR_NEXT_HCF 0x820C +#define AR_NEXT_TIM 0x8210 +#define AR_NEXT_DTIM 0x8214 +#define AR_NEXT_QUIET_TIMER 0x8218 +#define AR_NEXT_NDP_TIMER 0x821C + +#define AR_BEACON_PERIOD 0x8220 +#define AR_DMA_BEACON_PERIOD 0x8224 +#define AR_SWBA_PERIOD 0x8228 +#define AR_HCF_PERIOD 0x822C +#define AR_TIM_PERIOD 0x8230 +#define AR_DTIM_PERIOD 0x8234 +#define AR_QUIET_PERIOD 0x8238 +#define AR_NDP_PERIOD 0x823C + +#define AR_TIMER_MODE 0x8240 +#define AR_TBTT_TIMER_EN 0x00000001 +#define AR_DBA_TIMER_EN 0x00000002 +#define AR_SWBA_TIMER_EN 0x00000004 +#define AR_HCF_TIMER_EN 0x00000008 +#define AR_TIM_TIMER_EN 0x00000010 +#define AR_DTIM_TIMER_EN 0x00000020 +#define AR_QUIET_TIMER_EN 0x00000040 +#define AR_NDP_TIMER_EN 0x00000080 +#define AR_TIMER_OVERFLOW_INDEX 0x00000700 +#define AR_TIMER_OVERFLOW_INDEX_S 8 +#define AR_TIMER_THRESH 0xFFFFF000 +#define AR_TIMER_THRESH_S 12 + +#define AR_SLP32_MODE 0x8244 +#define AR_SLP32_HALF_CLK_LATENCY 0x000FFFFF +#define AR_SLP32_ENA 0x00100000 +#define AR_SLP32_TSF_WRITE_STATUS 0x00200000 + +#define AR_SLP32_WAKE 0x8248 +#define AR_SLP32_WAKE_XTL_TIME 0x0000FFFF + +#define AR_SLP32_INC 0x824c +#define AR_SLP32_TST_INC 0x000FFFFF + +#define AR_SLP_CNT 0x8250 +#define AR_SLP_CYCLE_CNT 0x8254 + +#define AR_SLP_MIB_CTRL 0x8258 +#define AR_SLP_MIB_CLEAR 0x00000001 +#define AR_SLP_MIB_PENDING 0x00000002 + +#define AR_MAC_PCU_LOGIC_ANALYZER 0x8264 +#define AR_MAC_PCU_LOGIC_ANALYZER_DISBUG20768 0x20000000 + + +#define AR_2040_MODE 0x8318 +#define AR_2040_JOINED_RX_CLEAR 0x00000001 + + +#define AR_EXTRCCNT 0x8328 + +#define AR_SELFGEN_MASK 0x832c + +#define AR_PCU_TXBUF_CTRL 0x8340 +#define AR_PCU_TXBUF_CTRL_SIZE_MASK 0x7FF +#define AR_PCU_TXBUF_CTRL_USABLE_SIZE 0x700 +#define AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE 0x380 + +#define AR_PCU_MISC_MODE2 0x8344 +#define AR_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE 0x00000002 +#define AR_PCU_MISC_MODE2_NO_CRYPTO_FOR_NON_DATA_PKT 0x00000004 + +#define AR_PCU_MISC_MODE2_RESERVED 0x00000038 +#define AR_PCU_MISC_MODE2_ADHOC_MCAST_KEYID_ENABLE 0x00000040 +#define AR_PCU_MISC_MODE2_CFP_IGNORE 0x00000080 +#define AR_PCU_MISC_MODE2_MGMT_QOS 0x0000FF00 +#define AR_PCU_MISC_MODE2_MGMT_QOS_S 8 +#define AR_PCU_MISC_MODE2_ENABLE_LOAD_NAV_BEACON_DURATION 0x00010000 +#define AR_PCU_MISC_MODE2_ENABLE_AGGWEP 0x00020000 +#define AR_PCU_MISC_MODE2_HWWAR1 0x00100000 +#define AR_PCU_MISC_MODE2_HWWAR2 0x02000000 +#define AR_PCU_MISC_MODE2_RESERVED2 0xFFFE0000 + +#define AR_MAC_PCU_ASYNC_FIFO_REG3 0x8358 +#define AR_MAC_PCU_ASYNC_FIFO_REG3_DATAPATH_SEL 0x00000400 +#define AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET 0x80000000 + + +#define AR_AES_MUTE_MASK0 0x805c +#define AR_AES_MUTE_MASK0_FC 0x0000FFFF +#define AR_AES_MUTE_MASK0_QOS 0xFFFF0000 +#define AR_AES_MUTE_MASK0_QOS_S 16 + +#define AR_AES_MUTE_MASK1 0x8060 +#define AR_AES_MUTE_MASK1_SEQ 0x0000FFFF +#define AR_AES_MUTE_MASK1_SEQ_S 0 +#define AR_AES_MUTE_MASK1_FC_MGMT 0xFFFF0000 +#define AR_AES_MUTE_MASK1_FC_MGMT_S 16 + +#define AR_RATE_DURATION_0 0x8700 +#define AR_RATE_DURATION_31 0x87CC +#define AR_RATE_DURATION_32 0x8780 +#define AR_RATE_DURATION(_n) (AR_RATE_DURATION_0 + ((_n)<<2)) + + +#define AR_KEYTABLE_0 0x8800 +#define AR_KEYTABLE(_n) (AR_KEYTABLE_0 + ((_n)*32)) +#define AR_KEY_CACHE_SIZE 128 +#define AR_RSVD_KEYTABLE_ENTRIES 4 +#define AR_KEY_TYPE 0x00000007 +#define AR_KEYTABLE_TYPE_40 0x00000000 +#define AR_KEYTABLE_TYPE_104 0x00000001 +#define AR_KEYTABLE_TYPE_128 0x00000003 +#define AR_KEYTABLE_TYPE_TKIP 0x00000004 +#define AR_KEYTABLE_TYPE_AES 0x00000005 +#define AR_KEYTABLE_TYPE_CCM 0x00000006 +#define AR_KEYTABLE_TYPE_CLR 0x00000007 +#define AR_KEYTABLE_ANT 0x00000008 +#define AR_KEYTABLE_VALID 0x00008000 +#define AR_KEYTABLE_KEY0(_n) (AR_KEYTABLE(_n) + 0) +#define AR_KEYTABLE_KEY1(_n) (AR_KEYTABLE(_n) + 4) +#define AR_KEYTABLE_KEY2(_n) (AR_KEYTABLE(_n) + 8) +#define AR_KEYTABLE_KEY3(_n) (AR_KEYTABLE(_n) + 12) +#define AR_KEYTABLE_KEY4(_n) (AR_KEYTABLE(_n) + 16) +#define AR_KEYTABLE_TYPE(_n) (AR_KEYTABLE(_n) + 20) +#define AR_KEYTABLE_MAC0(_n) (AR_KEYTABLE(_n) + 24) +#define AR_KEYTABLE_MAC1(_n) (AR_KEYTABLE(_n) + 28) + +#define AR9271_CORE_CLOCK 117 /* clock to 117Mhz */ +#define AR9271_TARGET_BAUD_RATE 19200 /* 115200 */ + +#endif diff --git a/drivers/net/wireless/ath/ath9k/virtual.c b/drivers/net/wireless/ath/ath9k/virtual.c new file mode 100644 index 0000000..a43fbf8 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/virtual.c @@ -0,0 +1,746 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "ath9k.h" + +struct ath9k_vif_iter_data { + int count; + u8 *addr; +}; + +static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) +{ + struct ath9k_vif_iter_data *iter_data = data; + u8 *nbuf; + + nbuf = krealloc(iter_data->addr, (iter_data->count + 1) * ETH_ALEN, + GFP_ATOMIC); + if (nbuf == NULL) + return; + + memcpy(nbuf + iter_data->count * ETH_ALEN, mac, ETH_ALEN); + iter_data->addr = nbuf; + iter_data->count++; +} + +void ath9k_set_bssid_mask(struct ieee80211_hw *hw) +{ + struct ath_wiphy *aphy = hw->priv; + struct ath_softc *sc = aphy->sc; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ath9k_vif_iter_data iter_data; + int i, j; + u8 mask[ETH_ALEN]; + + /* + * Add primary MAC address even if it is not in active use since it + * will be configured to the hardware as the starting point and the + * BSSID mask will need to be changed if another address is active. + */ + iter_data.addr = kmalloc(ETH_ALEN, GFP_ATOMIC); + if (iter_data.addr) { + memcpy(iter_data.addr, common->macaddr, ETH_ALEN); + iter_data.count = 1; + } else + iter_data.count = 0; + + /* Get list of all active MAC addresses */ + spin_lock_bh(&sc->wiphy_lock); + ieee80211_iterate_active_interfaces_atomic(sc->hw, ath9k_vif_iter, + &iter_data); + for (i = 0; i < sc->num_sec_wiphy; i++) { + if (sc->sec_wiphy[i] == NULL) + continue; + ieee80211_iterate_active_interfaces_atomic( + sc->sec_wiphy[i]->hw, ath9k_vif_iter, &iter_data); + } + spin_unlock_bh(&sc->wiphy_lock); + + /* Generate an address mask to cover all active addresses */ + memset(mask, 0, ETH_ALEN); + for (i = 0; i < iter_data.count; i++) { + u8 *a1 = iter_data.addr + i * ETH_ALEN; + for (j = i + 1; j < iter_data.count; j++) { + u8 *a2 = iter_data.addr + j * ETH_ALEN; + mask[0] |= a1[0] ^ a2[0]; + mask[1] |= a1[1] ^ a2[1]; + mask[2] |= a1[2] ^ a2[2]; + mask[3] |= a1[3] ^ a2[3]; + mask[4] |= a1[4] ^ a2[4]; + mask[5] |= a1[5] ^ a2[5]; + } + } + + kfree(iter_data.addr); + + /* Invert the mask and configure hardware */ + common->bssidmask[0] = ~mask[0]; + common->bssidmask[1] = ~mask[1]; + common->bssidmask[2] = ~mask[2]; + common->bssidmask[3] = ~mask[3]; + common->bssidmask[4] = ~mask[4]; + common->bssidmask[5] = ~mask[5]; + + ath_hw_setbssidmask(common); +} + +int ath9k_wiphy_add(struct ath_softc *sc) +{ + int i, error; + struct ath_wiphy *aphy; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ieee80211_hw *hw; + u8 addr[ETH_ALEN]; + + hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy), &ath9k_ops); + if (hw == NULL) + return -ENOMEM; + + spin_lock_bh(&sc->wiphy_lock); + for (i = 0; i < sc->num_sec_wiphy; i++) { + if (sc->sec_wiphy[i] == NULL) + break; + } + + if (i == sc->num_sec_wiphy) { + /* No empty slot available; increase array length */ + struct ath_wiphy **n; + n = krealloc(sc->sec_wiphy, + (sc->num_sec_wiphy + 1) * + sizeof(struct ath_wiphy *), + GFP_ATOMIC); + if (n == NULL) { + spin_unlock_bh(&sc->wiphy_lock); + ieee80211_free_hw(hw); + return -ENOMEM; + } + n[i] = NULL; + sc->sec_wiphy = n; + sc->num_sec_wiphy++; + } + + SET_IEEE80211_DEV(hw, sc->dev); + + aphy = hw->priv; + aphy->sc = sc; + aphy->hw = hw; + sc->sec_wiphy[i] = aphy; + spin_unlock_bh(&sc->wiphy_lock); + + memcpy(addr, common->macaddr, ETH_ALEN); + addr[0] |= 0x02; /* Locally managed address */ + /* + * XOR virtual wiphy index into the least significant bits to generate + * a different MAC address for each virtual wiphy. + */ + addr[5] ^= i & 0xff; + addr[4] ^= (i & 0xff00) >> 8; + addr[3] ^= (i & 0xff0000) >> 16; + + SET_IEEE80211_PERM_ADDR(hw, addr); + + ath9k_set_hw_capab(sc, hw); + + error = ieee80211_register_hw(hw); + + if (error == 0) { + /* Make sure wiphy scheduler is started (if enabled) */ + ath9k_wiphy_set_scheduler(sc, sc->wiphy_scheduler_int); + } + + return error; +} + +int ath9k_wiphy_del(struct ath_wiphy *aphy) +{ + struct ath_softc *sc = aphy->sc; + int i; + + spin_lock_bh(&sc->wiphy_lock); + for (i = 0; i < sc->num_sec_wiphy; i++) { + if (aphy == sc->sec_wiphy[i]) { + sc->sec_wiphy[i] = NULL; + spin_unlock_bh(&sc->wiphy_lock); + ieee80211_unregister_hw(aphy->hw); + ieee80211_free_hw(aphy->hw); + return 0; + } + } + spin_unlock_bh(&sc->wiphy_lock); + return -ENOENT; +} + +static int ath9k_send_nullfunc(struct ath_wiphy *aphy, + struct ieee80211_vif *vif, const u8 *bssid, + int ps) +{ + struct ath_softc *sc = aphy->sc; + struct ath_tx_control txctl; + struct sk_buff *skb; + struct ieee80211_hdr *hdr; + __le16 fc; + struct ieee80211_tx_info *info; + + skb = dev_alloc_skb(24); + if (skb == NULL) + return -ENOMEM; + hdr = (struct ieee80211_hdr *) skb_put(skb, 24); + memset(hdr, 0, 24); + fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC | + IEEE80211_FCTL_TODS); + if (ps) + fc |= cpu_to_le16(IEEE80211_FCTL_PM); + hdr->frame_control = fc; + memcpy(hdr->addr1, bssid, ETH_ALEN); + memcpy(hdr->addr2, aphy->hw->wiphy->perm_addr, ETH_ALEN); + memcpy(hdr->addr3, bssid, ETH_ALEN); + + info = IEEE80211_SKB_CB(skb); + memset(info, 0, sizeof(*info)); + info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS; + info->control.vif = vif; + info->control.rates[0].idx = 0; + info->control.rates[0].count = 4; + info->control.rates[1].idx = -1; + + memset(&txctl, 0, sizeof(struct ath_tx_control)); + txctl.txq = &sc->tx.txq[sc->tx.hwq_map[ATH9K_WME_AC_VO]]; + txctl.frame_type = ps ? ATH9K_INT_PAUSE : ATH9K_INT_UNPAUSE; + + if (ath_tx_start(aphy->hw, skb, &txctl) != 0) + goto exit; + + return 0; +exit: + dev_kfree_skb_any(skb); + return -1; +} + +static bool __ath9k_wiphy_pausing(struct ath_softc *sc) +{ + int i; + if (sc->pri_wiphy->state == ATH_WIPHY_PAUSING) + return true; + for (i = 0; i < sc->num_sec_wiphy; i++) { + if (sc->sec_wiphy[i] && + sc->sec_wiphy[i]->state == ATH_WIPHY_PAUSING) + return true; + } + return false; +} + +static bool ath9k_wiphy_pausing(struct ath_softc *sc) +{ + bool ret; + spin_lock_bh(&sc->wiphy_lock); + ret = __ath9k_wiphy_pausing(sc); + spin_unlock_bh(&sc->wiphy_lock); + return ret; +} + +static bool __ath9k_wiphy_scanning(struct ath_softc *sc) +{ + int i; + if (sc->pri_wiphy->state == ATH_WIPHY_SCAN) + return true; + for (i = 0; i < sc->num_sec_wiphy; i++) { + if (sc->sec_wiphy[i] && + sc->sec_wiphy[i]->state == ATH_WIPHY_SCAN) + return true; + } + return false; +} + +bool ath9k_wiphy_scanning(struct ath_softc *sc) +{ + bool ret; + spin_lock_bh(&sc->wiphy_lock); + ret = __ath9k_wiphy_scanning(sc); + spin_unlock_bh(&sc->wiphy_lock); + return ret; +} + +static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy); + +/* caller must hold wiphy_lock */ +static void __ath9k_wiphy_unpause_ch(struct ath_wiphy *aphy) +{ + if (aphy == NULL) + return; + if (aphy->chan_idx != aphy->sc->chan_idx) + return; /* wiphy not on the selected channel */ + __ath9k_wiphy_unpause(aphy); +} + +static void ath9k_wiphy_unpause_channel(struct ath_softc *sc) +{ + int i; + spin_lock_bh(&sc->wiphy_lock); + __ath9k_wiphy_unpause_ch(sc->pri_wiphy); + for (i = 0; i < sc->num_sec_wiphy; i++) + __ath9k_wiphy_unpause_ch(sc->sec_wiphy[i]); + spin_unlock_bh(&sc->wiphy_lock); +} + +void ath9k_wiphy_chan_work(struct work_struct *work) +{ + struct ath_softc *sc = container_of(work, struct ath_softc, chan_work); + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ath_wiphy *aphy = sc->next_wiphy; + + if (aphy == NULL) + return; + + /* + * All pending interfaces paused; ready to change + * channels. + */ + + /* Change channels */ + mutex_lock(&sc->mutex); + /* XXX: remove me eventually */ + ath9k_update_ichannel(sc, aphy->hw, + &sc->sc_ah->channels[sc->chan_idx]); + + /* sync hw configuration for hw code */ + common->hw = aphy->hw; + + ath_update_chainmask(sc, sc->chan_is_ht); + if (ath_set_channel(sc, aphy->hw, + &sc->sc_ah->channels[sc->chan_idx]) < 0) { + printk(KERN_DEBUG "ath9k: Failed to set channel for new " + "virtual wiphy\n"); + mutex_unlock(&sc->mutex); + return; + } + mutex_unlock(&sc->mutex); + + ath9k_wiphy_unpause_channel(sc); +} + +/* + * ath9k version of ieee80211_tx_status() for TX frames that are generated + * internally in the driver. + */ +void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct ath_wiphy *aphy = hw->priv; + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); + + if ((tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_PAUSE) && + aphy->state == ATH_WIPHY_PAUSING) { + if (!(tx_info->flags & IEEE80211_TX_STAT_ACK)) { + printk(KERN_DEBUG "ath9k: %s: no ACK for pause " + "frame\n", wiphy_name(hw->wiphy)); + /* + * The AP did not reply; ignore this to allow us to + * continue. + */ + } + aphy->state = ATH_WIPHY_PAUSED; + if (!ath9k_wiphy_pausing(aphy->sc)) { + /* + * Drop from tasklet to work to allow mutex for channel + * change. + */ + ieee80211_queue_work(aphy->sc->hw, + &aphy->sc->chan_work); + } + } + + dev_kfree_skb(skb); +} + +static void ath9k_mark_paused(struct ath_wiphy *aphy) +{ + struct ath_softc *sc = aphy->sc; + aphy->state = ATH_WIPHY_PAUSED; + if (!__ath9k_wiphy_pausing(sc)) + ieee80211_queue_work(sc->hw, &sc->chan_work); +} + +static void ath9k_pause_iter(void *data, u8 *mac, struct ieee80211_vif *vif) +{ + struct ath_wiphy *aphy = data; + struct ath_vif *avp = (void *) vif->drv_priv; + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + if (!vif->bss_conf.assoc) { + ath9k_mark_paused(aphy); + break; + } + /* TODO: could avoid this if already in PS mode */ + if (ath9k_send_nullfunc(aphy, vif, avp->bssid, 1)) { + printk(KERN_DEBUG "%s: failed to send PS nullfunc\n", + __func__); + ath9k_mark_paused(aphy); + } + break; + case NL80211_IFTYPE_AP: + /* Beacon transmission is paused by aphy->state change */ + ath9k_mark_paused(aphy); + break; + default: + break; + } +} + +/* caller must hold wiphy_lock */ +static int __ath9k_wiphy_pause(struct ath_wiphy *aphy) +{ + ieee80211_stop_queues(aphy->hw); + aphy->state = ATH_WIPHY_PAUSING; + /* + * TODO: handle PAUSING->PAUSED for the case where there are multiple + * active vifs (now we do it on the first vif getting ready; should be + * on the last) + */ + ieee80211_iterate_active_interfaces_atomic(aphy->hw, ath9k_pause_iter, + aphy); + return 0; +} + +int ath9k_wiphy_pause(struct ath_wiphy *aphy) +{ + int ret; + spin_lock_bh(&aphy->sc->wiphy_lock); + ret = __ath9k_wiphy_pause(aphy); + spin_unlock_bh(&aphy->sc->wiphy_lock); + return ret; +} + +static void ath9k_unpause_iter(void *data, u8 *mac, struct ieee80211_vif *vif) +{ + struct ath_wiphy *aphy = data; + struct ath_vif *avp = (void *) vif->drv_priv; + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + if (!vif->bss_conf.assoc) + break; + ath9k_send_nullfunc(aphy, vif, avp->bssid, 0); + break; + case NL80211_IFTYPE_AP: + /* Beacon transmission is re-enabled by aphy->state change */ + break; + default: + break; + } +} + +/* caller must hold wiphy_lock */ +static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy) +{ + ieee80211_iterate_active_interfaces_atomic(aphy->hw, + ath9k_unpause_iter, aphy); + aphy->state = ATH_WIPHY_ACTIVE; + ieee80211_wake_queues(aphy->hw); + return 0; +} + +int ath9k_wiphy_unpause(struct ath_wiphy *aphy) +{ + int ret; + spin_lock_bh(&aphy->sc->wiphy_lock); + ret = __ath9k_wiphy_unpause(aphy); + spin_unlock_bh(&aphy->sc->wiphy_lock); + return ret; +} + +static void __ath9k_wiphy_mark_all_paused(struct ath_softc *sc) +{ + int i; + if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE) + sc->pri_wiphy->state = ATH_WIPHY_PAUSED; + for (i = 0; i < sc->num_sec_wiphy; i++) { + if (sc->sec_wiphy[i] && + sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE) + sc->sec_wiphy[i]->state = ATH_WIPHY_PAUSED; + } +} + +/* caller must hold wiphy_lock */ +static void __ath9k_wiphy_pause_all(struct ath_softc *sc) +{ + int i; + if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE) + __ath9k_wiphy_pause(sc->pri_wiphy); + for (i = 0; i < sc->num_sec_wiphy; i++) { + if (sc->sec_wiphy[i] && + sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE) + __ath9k_wiphy_pause(sc->sec_wiphy[i]); + } +} + +int ath9k_wiphy_select(struct ath_wiphy *aphy) +{ + struct ath_softc *sc = aphy->sc; + bool now; + + spin_lock_bh(&sc->wiphy_lock); + if (__ath9k_wiphy_scanning(sc)) { + /* + * For now, we are using mac80211 sw scan and it expects to + * have full control over channel changes, so avoid wiphy + * scheduling during a scan. This could be optimized if the + * scanning control were moved into the driver. + */ + spin_unlock_bh(&sc->wiphy_lock); + return -EBUSY; + } + if (__ath9k_wiphy_pausing(sc)) { + if (sc->wiphy_select_failures == 0) + sc->wiphy_select_first_fail = jiffies; + sc->wiphy_select_failures++; + if (time_after(jiffies, sc->wiphy_select_first_fail + HZ / 2)) + { + printk(KERN_DEBUG "ath9k: Previous wiphy select timed " + "out; disable/enable hw to recover\n"); + __ath9k_wiphy_mark_all_paused(sc); + /* + * TODO: this workaround to fix hardware is unlikely to + * be specific to virtual wiphy changes. It can happen + * on normal channel change, too, and as such, this + * should really be made more generic. For example, + * tricker radio disable/enable on GTT interrupt burst + * (say, 10 GTT interrupts received without any TX + * frame being completed) + */ + spin_unlock_bh(&sc->wiphy_lock); + ath_radio_disable(sc, aphy->hw); + ath_radio_enable(sc, aphy->hw); + /* Only the primary wiphy hw is used for queuing work */ + ieee80211_queue_work(aphy->sc->hw, + &aphy->sc->chan_work); + return -EBUSY; /* previous select still in progress */ + } + spin_unlock_bh(&sc->wiphy_lock); + return -EBUSY; /* previous select still in progress */ + } + sc->wiphy_select_failures = 0; + + /* Store the new channel */ + sc->chan_idx = aphy->chan_idx; + sc->chan_is_ht = aphy->chan_is_ht; + sc->next_wiphy = aphy; + + __ath9k_wiphy_pause_all(sc); + now = !__ath9k_wiphy_pausing(aphy->sc); + spin_unlock_bh(&sc->wiphy_lock); + + if (now) { + /* Ready to request channel change immediately */ + ieee80211_queue_work(aphy->sc->hw, &aphy->sc->chan_work); + } + + /* + * wiphys will be unpaused in ath9k_tx_status() once channel has been + * changed if any wiphy needs time to become paused. + */ + + return 0; +} + +bool ath9k_wiphy_started(struct ath_softc *sc) +{ + int i; + spin_lock_bh(&sc->wiphy_lock); + if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE) { + spin_unlock_bh(&sc->wiphy_lock); + return true; + } + for (i = 0; i < sc->num_sec_wiphy; i++) { + if (sc->sec_wiphy[i] && + sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE) { + spin_unlock_bh(&sc->wiphy_lock); + return true; + } + } + spin_unlock_bh(&sc->wiphy_lock); + return false; +} + +static void ath9k_wiphy_pause_chan(struct ath_wiphy *aphy, + struct ath_wiphy *selected) +{ + if (selected->state == ATH_WIPHY_SCAN) { + if (aphy == selected) + return; + /* + * Pause all other wiphys for the duration of the scan even if + * they are on the current channel now. + */ + } else if (aphy->chan_idx == selected->chan_idx) + return; + aphy->state = ATH_WIPHY_PAUSED; + ieee80211_stop_queues(aphy->hw); +} + +void ath9k_wiphy_pause_all_forced(struct ath_softc *sc, + struct ath_wiphy *selected) +{ + int i; + spin_lock_bh(&sc->wiphy_lock); + if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE) + ath9k_wiphy_pause_chan(sc->pri_wiphy, selected); + for (i = 0; i < sc->num_sec_wiphy; i++) { + if (sc->sec_wiphy[i] && + sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE) + ath9k_wiphy_pause_chan(sc->sec_wiphy[i], selected); + } + spin_unlock_bh(&sc->wiphy_lock); +} + +void ath9k_wiphy_work(struct work_struct *work) +{ + struct ath_softc *sc = container_of(work, struct ath_softc, + wiphy_work.work); + struct ath_wiphy *aphy = NULL; + bool first = true; + + spin_lock_bh(&sc->wiphy_lock); + + if (sc->wiphy_scheduler_int == 0) { + /* wiphy scheduler is disabled */ + spin_unlock_bh(&sc->wiphy_lock); + return; + } + +try_again: + sc->wiphy_scheduler_index++; + while (sc->wiphy_scheduler_index <= sc->num_sec_wiphy) { + aphy = sc->sec_wiphy[sc->wiphy_scheduler_index - 1]; + if (aphy && aphy->state != ATH_WIPHY_INACTIVE) + break; + + sc->wiphy_scheduler_index++; + aphy = NULL; + } + if (aphy == NULL) { + sc->wiphy_scheduler_index = 0; + if (sc->pri_wiphy->state == ATH_WIPHY_INACTIVE) { + if (first) { + first = false; + goto try_again; + } + /* No wiphy is ready to be scheduled */ + } else + aphy = sc->pri_wiphy; + } + + spin_unlock_bh(&sc->wiphy_lock); + + if (aphy && + aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN && + ath9k_wiphy_select(aphy)) { + printk(KERN_DEBUG "ath9k: Failed to schedule virtual wiphy " + "change\n"); + } + + ieee80211_queue_delayed_work(sc->hw, + &sc->wiphy_work, + sc->wiphy_scheduler_int); +} + +void ath9k_wiphy_set_scheduler(struct ath_softc *sc, unsigned int msec_int) +{ + cancel_delayed_work_sync(&sc->wiphy_work); + sc->wiphy_scheduler_int = msecs_to_jiffies(msec_int); + if (sc->wiphy_scheduler_int) + ieee80211_queue_delayed_work(sc->hw, &sc->wiphy_work, + sc->wiphy_scheduler_int); +} + +/* caller must hold wiphy_lock */ +bool ath9k_all_wiphys_idle(struct ath_softc *sc) +{ + unsigned int i; + if (!sc->pri_wiphy->idle) + return false; + for (i = 0; i < sc->num_sec_wiphy; i++) { + struct ath_wiphy *aphy = sc->sec_wiphy[i]; + if (!aphy) + continue; + if (!aphy->idle) + return false; + } + return true; +} + +/* caller must hold wiphy_lock */ +void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle) +{ + struct ath_softc *sc = aphy->sc; + + aphy->idle = idle; + ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG, + "Marking %s as %s\n", + wiphy_name(aphy->hw->wiphy), + idle ? "idle" : "not-idle"); +} +/* Only bother starting a queue on an active virtual wiphy */ +void ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue) +{ + struct ieee80211_hw *hw = sc->pri_wiphy->hw; + unsigned int i; + + spin_lock_bh(&sc->wiphy_lock); + + /* Start the primary wiphy */ + if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE) { + ieee80211_wake_queue(hw, skb_queue); + goto unlock; + } + + /* Now start the secondary wiphy queues */ + for (i = 0; i < sc->num_sec_wiphy; i++) { + struct ath_wiphy *aphy = sc->sec_wiphy[i]; + if (!aphy) + continue; + if (aphy->state != ATH_WIPHY_ACTIVE) + continue; + + hw = aphy->hw; + ieee80211_wake_queue(hw, skb_queue); + break; + } + +unlock: + spin_unlock_bh(&sc->wiphy_lock); +} + +/* Go ahead and propagate information to all virtual wiphys, it won't hurt */ +void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue) +{ + struct ieee80211_hw *hw = sc->pri_wiphy->hw; + unsigned int i; + + spin_lock_bh(&sc->wiphy_lock); + + /* Stop the primary wiphy */ + ieee80211_stop_queue(hw, skb_queue); + + /* Now stop the secondary wiphy queues */ + for (i = 0; i < sc->num_sec_wiphy; i++) { + struct ath_wiphy *aphy = sc->sec_wiphy[i]; + if (!aphy) + continue; + hw = aphy->hw; + ieee80211_stop_queue(hw, skb_queue); + } + spin_unlock_bh(&sc->wiphy_lock); +} diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c new file mode 100644 index 0000000..b2c8207 --- /dev/null +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -0,0 +1,2285 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "ath9k.h" + +#define BITS_PER_BYTE 8 +#define OFDM_PLCP_BITS 22 +#define HT_RC_2_MCS(_rc) ((_rc) & 0x0f) +#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1) +#define L_STF 8 +#define L_LTF 8 +#define L_SIG 4 +#define HT_SIG 8 +#define HT_STF 4 +#define HT_LTF(_ns) (4 * (_ns)) +#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */ +#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */ +#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2) +#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18) + +#define OFDM_SIFS_TIME 16 + +static u32 bits_per_symbol[][2] = { + /* 20MHz 40MHz */ + { 26, 54 }, /* 0: BPSK */ + { 52, 108 }, /* 1: QPSK 1/2 */ + { 78, 162 }, /* 2: QPSK 3/4 */ + { 104, 216 }, /* 3: 16-QAM 1/2 */ + { 156, 324 }, /* 4: 16-QAM 3/4 */ + { 208, 432 }, /* 5: 64-QAM 2/3 */ + { 234, 486 }, /* 6: 64-QAM 3/4 */ + { 260, 540 }, /* 7: 64-QAM 5/6 */ + { 52, 108 }, /* 8: BPSK */ + { 104, 216 }, /* 9: QPSK 1/2 */ + { 156, 324 }, /* 10: QPSK 3/4 */ + { 208, 432 }, /* 11: 16-QAM 1/2 */ + { 312, 648 }, /* 12: 16-QAM 3/4 */ + { 416, 864 }, /* 13: 64-QAM 2/3 */ + { 468, 972 }, /* 14: 64-QAM 3/4 */ + { 520, 1080 }, /* 15: 64-QAM 5/6 */ +}; + +#define IS_HT_RATE(_rate) ((_rate) & 0x80) + +static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq, + struct ath_atx_tid *tid, + struct list_head *bf_head); +static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, + struct ath_txq *txq, + struct list_head *bf_q, + int txok, int sendbar); +static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, + struct list_head *head); +static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf); +static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf, + int txok); +static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds, + int nbad, int txok, bool update_rc); + +enum { + MCS_DEFAULT, + MCS_HT40, + MCS_HT40_SGI, +}; + +static int ath_max_4ms_framelen[3][16] = { + [MCS_DEFAULT] = { + 3216, 6434, 9650, 12868, 19304, 25740, 28956, 32180, + 6430, 12860, 19300, 25736, 38600, 51472, 57890, 64320, + }, + [MCS_HT40] = { + 6684, 13368, 20052, 26738, 40104, 53476, 60156, 66840, + 13360, 26720, 40080, 53440, 80160, 106880, 120240, 133600, + }, + [MCS_HT40_SGI] = { + /* TODO: Only MCS 7 and 15 updated, recalculate the rest */ + 6684, 13368, 20052, 26738, 40104, 53476, 60156, 74200, + 13360, 26720, 40080, 53440, 80160, 106880, 120240, 148400, + } +}; + + +/*********************/ +/* Aggregation logic */ +/*********************/ + +static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid) +{ + struct ath_atx_ac *ac = tid->ac; + + if (tid->paused) + return; + + if (tid->sched) + return; + + tid->sched = true; + list_add_tail(&tid->list, &ac->tid_q); + + if (ac->sched) + return; + + ac->sched = true; + list_add_tail(&ac->list, &txq->axq_acq); +} + +static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid) +{ + struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; + + spin_lock_bh(&txq->axq_lock); + tid->paused++; + spin_unlock_bh(&txq->axq_lock); +} + +static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid) +{ + struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; + + BUG_ON(tid->paused <= 0); + spin_lock_bh(&txq->axq_lock); + + tid->paused--; + + if (tid->paused > 0) + goto unlock; + + if (list_empty(&tid->buf_q)) + goto unlock; + + ath_tx_queue_tid(txq, tid); + ath_txq_schedule(sc, txq); +unlock: + spin_unlock_bh(&txq->axq_lock); +} + +static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) +{ + struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; + struct ath_buf *bf; + struct list_head bf_head; + INIT_LIST_HEAD(&bf_head); + + BUG_ON(tid->paused <= 0); + spin_lock_bh(&txq->axq_lock); + + tid->paused--; + + if (tid->paused > 0) { + spin_unlock_bh(&txq->axq_lock); + return; + } + + while (!list_empty(&tid->buf_q)) { + bf = list_first_entry(&tid->buf_q, struct ath_buf, list); + BUG_ON(bf_isretried(bf)); + list_move_tail(&bf->list, &bf_head); + ath_tx_send_ht_normal(sc, txq, tid, &bf_head); + } + + spin_unlock_bh(&txq->axq_lock); +} + +static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, + int seqno) +{ + int index, cindex; + + index = ATH_BA_INDEX(tid->seq_start, seqno); + cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); + + tid->tx_buf[cindex] = NULL; + + while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) { + INCR(tid->seq_start, IEEE80211_SEQ_MAX); + INCR(tid->baw_head, ATH_TID_MAX_BUFS); + } +} + +static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid, + struct ath_buf *bf) +{ + int index, cindex; + + if (bf_isretried(bf)) + return; + + index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno); + cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); + + BUG_ON(tid->tx_buf[cindex] != NULL); + tid->tx_buf[cindex] = bf; + + if (index >= ((tid->baw_tail - tid->baw_head) & + (ATH_TID_MAX_BUFS - 1))) { + tid->baw_tail = cindex; + INCR(tid->baw_tail, ATH_TID_MAX_BUFS); + } +} + +/* + * TODO: For frame(s) that are in the retry state, we will reuse the + * sequence number(s) without setting the retry bit. The + * alternative is to give up on these and BAR the receiver's window + * forward. + */ +static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq, + struct ath_atx_tid *tid) + +{ + struct ath_buf *bf; + struct list_head bf_head; + INIT_LIST_HEAD(&bf_head); + + for (;;) { + if (list_empty(&tid->buf_q)) + break; + + bf = list_first_entry(&tid->buf_q, struct ath_buf, list); + list_move_tail(&bf->list, &bf_head); + + if (bf_isretried(bf)) + ath_tx_update_baw(sc, tid, bf->bf_seqno); + + spin_unlock(&txq->axq_lock); + ath_tx_complete_buf(sc, bf, txq, &bf_head, 0, 0); + spin_lock(&txq->axq_lock); + } + + tid->seq_next = tid->seq_start; + tid->baw_tail = tid->baw_head; +} + +static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq, + struct ath_buf *bf) +{ + struct sk_buff *skb; + struct ieee80211_hdr *hdr; + + bf->bf_state.bf_type |= BUF_RETRY; + bf->bf_retries++; + TX_STAT_INC(txq->axq_qnum, a_retries); + + skb = bf->bf_mpdu; + hdr = (struct ieee80211_hdr *)skb->data; + hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY); +} + +static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf) +{ + struct ath_buf *tbf; + + spin_lock_bh(&sc->tx.txbuflock); + if (WARN_ON(list_empty(&sc->tx.txbuf))) { + spin_unlock_bh(&sc->tx.txbuflock); + return NULL; + } + tbf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list); + list_del(&tbf->list); + spin_unlock_bh(&sc->tx.txbuflock); + + ATH_TXBUF_RESET(tbf); + + tbf->aphy = bf->aphy; + tbf->bf_mpdu = bf->bf_mpdu; + tbf->bf_buf_addr = bf->bf_buf_addr; + *(tbf->bf_desc) = *(bf->bf_desc); + tbf->bf_state = bf->bf_state; + tbf->bf_dmacontext = bf->bf_dmacontext; + + return tbf; +} + +static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, + struct ath_buf *bf, struct list_head *bf_q, + int txok) +{ + struct ath_node *an = NULL; + struct sk_buff *skb; + struct ieee80211_sta *sta; + struct ieee80211_hw *hw; + struct ieee80211_hdr *hdr; + struct ieee80211_tx_info *tx_info; + struct ath_atx_tid *tid = NULL; + struct ath_buf *bf_next, *bf_last = bf->bf_lastbf; + struct ath_desc *ds = bf_last->bf_desc; + struct list_head bf_head, bf_pending; + u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0; + u32 ba[WME_BA_BMP_SIZE >> 5]; + int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0; + bool rc_update = true; + + skb = bf->bf_mpdu; + hdr = (struct ieee80211_hdr *)skb->data; + + tx_info = IEEE80211_SKB_CB(skb); + hw = bf->aphy->hw; + + rcu_read_lock(); + + /* XXX: use ieee80211_find_sta! */ + sta = ieee80211_find_sta_by_hw(hw, hdr->addr1); + if (!sta) { + rcu_read_unlock(); + return; + } + + an = (struct ath_node *)sta->drv_priv; + tid = ATH_AN_2_TID(an, bf->bf_tidno); + + isaggr = bf_isaggr(bf); + memset(ba, 0, WME_BA_BMP_SIZE >> 3); + + if (isaggr && txok) { + if (ATH_DS_TX_BA(ds)) { + seq_st = ATH_DS_BA_SEQ(ds); + memcpy(ba, ATH_DS_BA_BITMAP(ds), + WME_BA_BMP_SIZE >> 3); + } else { + /* + * AR5416 can become deaf/mute when BA + * issue happens. Chip needs to be reset. + * But AP code may have sychronization issues + * when perform internal reset in this routine. + * Only enable reset in STA mode for now. + */ + if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) + needreset = 1; + } + } + + INIT_LIST_HEAD(&bf_pending); + INIT_LIST_HEAD(&bf_head); + + nbad = ath_tx_num_badfrms(sc, bf, txok); + while (bf) { + txfail = txpending = 0; + bf_next = bf->bf_next; + + if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) { + /* transmit completion, subframe is + * acked by block ack */ + acked_cnt++; + } else if (!isaggr && txok) { + /* transmit completion */ + acked_cnt++; + } else { + if (!(tid->state & AGGR_CLEANUP) && + ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) { + if (bf->bf_retries < ATH_MAX_SW_RETRIES) { + ath_tx_set_retry(sc, txq, bf); + txpending = 1; + } else { + bf->bf_state.bf_type |= BUF_XRETRY; + txfail = 1; + sendbar = 1; + txfail_cnt++; + } + } else { + /* + * cleanup in progress, just fail + * the un-acked sub-frames + */ + txfail = 1; + } + } + + if (bf_next == NULL) { + /* + * Make sure the last desc is reclaimed if it + * not a holding desc. + */ + if (!bf_last->bf_stale) + list_move_tail(&bf->list, &bf_head); + else + INIT_LIST_HEAD(&bf_head); + } else { + BUG_ON(list_empty(bf_q)); + list_move_tail(&bf->list, &bf_head); + } + + if (!txpending) { + /* + * complete the acked-ones/xretried ones; update + * block-ack window + */ + spin_lock_bh(&txq->axq_lock); + ath_tx_update_baw(sc, tid, bf->bf_seqno); + spin_unlock_bh(&txq->axq_lock); + + if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) { + ath_tx_rc_status(bf, ds, nbad, txok, true); + rc_update = false; + } else { + ath_tx_rc_status(bf, ds, nbad, txok, false); + } + + ath_tx_complete_buf(sc, bf, txq, &bf_head, !txfail, sendbar); + } else { + /* retry the un-acked ones */ + if (bf->bf_next == NULL && bf_last->bf_stale) { + struct ath_buf *tbf; + + tbf = ath_clone_txbuf(sc, bf_last); + /* + * Update tx baw and complete the frame with + * failed status if we run out of tx buf + */ + if (!tbf) { + spin_lock_bh(&txq->axq_lock); + ath_tx_update_baw(sc, tid, + bf->bf_seqno); + spin_unlock_bh(&txq->axq_lock); + + bf->bf_state.bf_type |= BUF_XRETRY; + ath_tx_rc_status(bf, ds, nbad, + 0, false); + ath_tx_complete_buf(sc, bf, txq, + &bf_head, 0, 0); + break; + } + + ath9k_hw_cleartxdesc(sc->sc_ah, tbf->bf_desc); + list_add_tail(&tbf->list, &bf_head); + } else { + /* + * Clear descriptor status words for + * software retry + */ + ath9k_hw_cleartxdesc(sc->sc_ah, bf->bf_desc); + } + + /* + * Put this buffer to the temporary pending + * queue to retain ordering + */ + list_splice_tail_init(&bf_head, &bf_pending); + } + + bf = bf_next; + } + + if (tid->state & AGGR_CLEANUP) { + if (tid->baw_head == tid->baw_tail) { + tid->state &= ~AGGR_ADDBA_COMPLETE; + tid->state &= ~AGGR_CLEANUP; + + /* send buffered frames as singles */ + ath_tx_flush_tid(sc, tid); + } + rcu_read_unlock(); + return; + } + + /* prepend un-acked frames to the beginning of the pending frame queue */ + if (!list_empty(&bf_pending)) { + spin_lock_bh(&txq->axq_lock); + list_splice(&bf_pending, &tid->buf_q); + ath_tx_queue_tid(txq, tid); + spin_unlock_bh(&txq->axq_lock); + } + + rcu_read_unlock(); + + if (needreset) + ath_reset(sc, false); +} + +static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, + struct ath_atx_tid *tid) +{ + struct sk_buff *skb; + struct ieee80211_tx_info *tx_info; + struct ieee80211_tx_rate *rates; + u32 max_4ms_framelen, frmlen; + u16 aggr_limit, legacy = 0; + int i; + + skb = bf->bf_mpdu; + tx_info = IEEE80211_SKB_CB(skb); + rates = tx_info->control.rates; + + /* + * Find the lowest frame length among the rate series that will have a + * 4ms transmit duration. + * TODO - TXOP limit needs to be considered. + */ + max_4ms_framelen = ATH_AMPDU_LIMIT_MAX; + + for (i = 0; i < 4; i++) { + if (rates[i].count) { + int modeidx; + if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) { + legacy = 1; + break; + } + + if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI) + modeidx = MCS_HT40_SGI; + else if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + modeidx = MCS_HT40; + else + modeidx = MCS_DEFAULT; + + frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx]; + max_4ms_framelen = min(max_4ms_framelen, frmlen); + } + } + + /* + * limit aggregate size by the minimum rate if rate selected is + * not a probe rate, if rate selected is a probe rate then + * avoid aggregation of this packet. + */ + if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy) + return 0; + + if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED) + aggr_limit = min((max_4ms_framelen * 3) / 8, + (u32)ATH_AMPDU_LIMIT_MAX); + else + aggr_limit = min(max_4ms_framelen, + (u32)ATH_AMPDU_LIMIT_MAX); + + /* + * h/w can accept aggregates upto 16 bit lengths (65535). + * The IE, however can hold upto 65536, which shows up here + * as zero. Ignore 65536 since we are constrained by hw. + */ + if (tid->an->maxampdu) + aggr_limit = min(aggr_limit, tid->an->maxampdu); + + return aggr_limit; +} + +/* + * Returns the number of delimiters to be added to + * meet the minimum required mpdudensity. + */ +static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid, + struct ath_buf *bf, u16 frmlen) +{ + struct sk_buff *skb = bf->bf_mpdu; + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); + u32 nsymbits, nsymbols; + u16 minlen; + u8 flags, rix; + int width, half_gi, ndelim, mindelim; + + /* Select standard number of delimiters based on frame length alone */ + ndelim = ATH_AGGR_GET_NDELIM(frmlen); + + /* + * If encryption enabled, hardware requires some more padding between + * subframes. + * TODO - this could be improved to be dependent on the rate. + * The hardware can keep up at lower rates, but not higher rates + */ + if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) + ndelim += ATH_AGGR_ENCRYPTDELIM; + + /* + * Convert desired mpdu density from microeconds to bytes based + * on highest rate in rate series (i.e. first rate) to determine + * required minimum length for subframe. Take into account + * whether high rate is 20 or 40Mhz and half or full GI. + * + * If there is no mpdu density restriction, no further calculation + * is needed. + */ + + if (tid->an->mpdudensity == 0) + return ndelim; + + rix = tx_info->control.rates[0].idx; + flags = tx_info->control.rates[0].flags; + width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0; + half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0; + + if (half_gi) + nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity); + else + nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity); + + if (nsymbols == 0) + nsymbols = 1; + + nsymbits = bits_per_symbol[rix][width]; + minlen = (nsymbols * nsymbits) / BITS_PER_BYTE; + + if (frmlen < minlen) { + mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ; + ndelim = max(mindelim, ndelim); + } + + return ndelim; +} + +static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, + struct ath_txq *txq, + struct ath_atx_tid *tid, + struct list_head *bf_q) +{ +#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4) + struct ath_buf *bf, *bf_first, *bf_prev = NULL; + int rl = 0, nframes = 0, ndelim, prev_al = 0; + u16 aggr_limit = 0, al = 0, bpad = 0, + al_delta, h_baw = tid->baw_size / 2; + enum ATH_AGGR_STATUS status = ATH_AGGR_DONE; + + bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list); + + do { + bf = list_first_entry(&tid->buf_q, struct ath_buf, list); + + /* do not step over block-ack window */ + if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) { + status = ATH_AGGR_BAW_CLOSED; + break; + } + + if (!rl) { + aggr_limit = ath_lookup_rate(sc, bf, tid); + rl = 1; + } + + /* do not exceed aggregation limit */ + al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen; + + if (nframes && + (aggr_limit < (al + bpad + al_delta + prev_al))) { + status = ATH_AGGR_LIMITED; + break; + } + + /* do not exceed subframe limit */ + if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) { + status = ATH_AGGR_LIMITED; + break; + } + nframes++; + + /* add padding for previous frame to aggregation length */ + al += bpad + al_delta; + + /* + * Get the delimiters needed to meet the MPDU + * density for this node. + */ + ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen); + bpad = PADBYTES(al_delta) + (ndelim << 2); + + bf->bf_next = NULL; + bf->bf_desc->ds_link = 0; + + /* link buffers of this frame to the aggregate */ + ath_tx_addto_baw(sc, tid, bf); + ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim); + list_move_tail(&bf->list, bf_q); + if (bf_prev) { + bf_prev->bf_next = bf; + bf_prev->bf_desc->ds_link = bf->bf_daddr; + } + bf_prev = bf; + + } while (!list_empty(&tid->buf_q)); + + bf_first->bf_al = al; + bf_first->bf_nframes = nframes; + + return status; +#undef PADBYTES +} + +static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, + struct ath_atx_tid *tid) +{ + struct ath_buf *bf; + enum ATH_AGGR_STATUS status; + struct list_head bf_q; + + do { + if (list_empty(&tid->buf_q)) + return; + + INIT_LIST_HEAD(&bf_q); + + status = ath_tx_form_aggr(sc, txq, tid, &bf_q); + + /* + * no frames picked up to be aggregated; + * block-ack window is not open. + */ + if (list_empty(&bf_q)) + break; + + bf = list_first_entry(&bf_q, struct ath_buf, list); + bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list); + + /* if only one frame, send as non-aggregate */ + if (bf->bf_nframes == 1) { + bf->bf_state.bf_type &= ~BUF_AGGR; + ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc); + ath_buf_set_rate(sc, bf); + ath_tx_txqaddbuf(sc, txq, &bf_q); + continue; + } + + /* setup first desc of aggregate */ + bf->bf_state.bf_type |= BUF_AGGR; + ath_buf_set_rate(sc, bf); + ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al); + + /* anchor last desc of aggregate */ + ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc); + + ath_tx_txqaddbuf(sc, txq, &bf_q); + TX_STAT_INC(txq->axq_qnum, a_aggr); + + } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH && + status != ATH_AGGR_BAW_CLOSED); +} + +void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, + u16 tid, u16 *ssn) +{ + struct ath_atx_tid *txtid; + struct ath_node *an; + + an = (struct ath_node *)sta->drv_priv; + txtid = ATH_AN_2_TID(an, tid); + txtid->state |= AGGR_ADDBA_PROGRESS; + ath_tx_pause_tid(sc, txtid); + *ssn = txtid->seq_start; +} + +void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) +{ + struct ath_node *an = (struct ath_node *)sta->drv_priv; + struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); + struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum]; + struct ath_buf *bf; + struct list_head bf_head; + INIT_LIST_HEAD(&bf_head); + + if (txtid->state & AGGR_CLEANUP) + return; + + if (!(txtid->state & AGGR_ADDBA_COMPLETE)) { + txtid->state &= ~AGGR_ADDBA_PROGRESS; + return; + } + + ath_tx_pause_tid(sc, txtid); + + /* drop all software retried frames and mark this TID */ + spin_lock_bh(&txq->axq_lock); + while (!list_empty(&txtid->buf_q)) { + bf = list_first_entry(&txtid->buf_q, struct ath_buf, list); + if (!bf_isretried(bf)) { + /* + * NB: it's based on the assumption that + * software retried frame will always stay + * at the head of software queue. + */ + break; + } + list_move_tail(&bf->list, &bf_head); + ath_tx_update_baw(sc, txtid, bf->bf_seqno); + ath_tx_complete_buf(sc, bf, txq, &bf_head, 0, 0); + } + spin_unlock_bh(&txq->axq_lock); + + if (txtid->baw_head != txtid->baw_tail) { + txtid->state |= AGGR_CLEANUP; + } else { + txtid->state &= ~AGGR_ADDBA_COMPLETE; + ath_tx_flush_tid(sc, txtid); + } +} + +void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) +{ + struct ath_atx_tid *txtid; + struct ath_node *an; + + an = (struct ath_node *)sta->drv_priv; + + if (sc->sc_flags & SC_OP_TXAGGR) { + txtid = ATH_AN_2_TID(an, tid); + txtid->baw_size = + IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor; + txtid->state |= AGGR_ADDBA_COMPLETE; + txtid->state &= ~AGGR_ADDBA_PROGRESS; + ath_tx_resume_tid(sc, txtid); + } +} + +bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno) +{ + struct ath_atx_tid *txtid; + + if (!(sc->sc_flags & SC_OP_TXAGGR)) + return false; + + txtid = ATH_AN_2_TID(an, tidno); + + if (!(txtid->state & (AGGR_ADDBA_COMPLETE | AGGR_ADDBA_PROGRESS))) + return true; + return false; +} + +/********************/ +/* Queue Management */ +/********************/ + +static void ath_txq_drain_pending_buffers(struct ath_softc *sc, + struct ath_txq *txq) +{ + struct ath_atx_ac *ac, *ac_tmp; + struct ath_atx_tid *tid, *tid_tmp; + + list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) { + list_del(&ac->list); + ac->sched = false; + list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) { + list_del(&tid->list); + tid->sched = false; + ath_tid_drain(sc, txq, tid); + } + } +} + +struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) +{ + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + struct ath9k_tx_queue_info qi; + int qnum; + + memset(&qi, 0, sizeof(qi)); + qi.tqi_subtype = subtype; + qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT; + qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT; + qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT; + qi.tqi_physCompBuf = 0; + + /* + * Enable interrupts only for EOL and DESC conditions. + * We mark tx descriptors to receive a DESC interrupt + * when a tx queue gets deep; otherwise waiting for the + * EOL to reap descriptors. Note that this is done to + * reduce interrupt load and this only defers reaping + * descriptors, never transmitting frames. Aside from + * reducing interrupts this also permits more concurrency. + * The only potential downside is if the tx queue backs + * up in which case the top half of the kernel may backup + * due to a lack of tx descriptors. + * + * The UAPSD queue is an exception, since we take a desc- + * based intr on the EOSP frames. + */ + if (qtype == ATH9K_TX_QUEUE_UAPSD) + qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE; + else + qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | + TXQ_FLAG_TXDESCINT_ENABLE; + qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi); + if (qnum == -1) { + /* + * NB: don't print a message, this happens + * normally on parts with too few tx queues + */ + return NULL; + } + if (qnum >= ARRAY_SIZE(sc->tx.txq)) { + ath_print(common, ATH_DBG_FATAL, + "qnum %u out of range, max %u!\n", + qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq)); + ath9k_hw_releasetxqueue(ah, qnum); + return NULL; + } + if (!ATH_TXQ_SETUP(sc, qnum)) { + struct ath_txq *txq = &sc->tx.txq[qnum]; + + txq->axq_qnum = qnum; + txq->axq_link = NULL; + INIT_LIST_HEAD(&txq->axq_q); + INIT_LIST_HEAD(&txq->axq_acq); + spin_lock_init(&txq->axq_lock); + txq->axq_depth = 0; + txq->axq_tx_inprogress = false; + sc->tx.txqsetup |= 1<tx.txq[qnum]; +} + +int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype) +{ + int qnum; + + switch (qtype) { + case ATH9K_TX_QUEUE_DATA: + if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) { + ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, + "HAL AC %u out of range, max %zu!\n", + haltype, ARRAY_SIZE(sc->tx.hwq_map)); + return -1; + } + qnum = sc->tx.hwq_map[haltype]; + break; + case ATH9K_TX_QUEUE_BEACON: + qnum = sc->beacon.beaconq; + break; + case ATH9K_TX_QUEUE_CAB: + qnum = sc->beacon.cabq->axq_qnum; + break; + default: + qnum = -1; + } + return qnum; +} + +struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb) +{ + struct ath_txq *txq = NULL; + u16 skb_queue = skb_get_queue_mapping(skb); + int qnum; + + qnum = ath_get_hal_qnum(skb_queue, sc); + txq = &sc->tx.txq[qnum]; + + spin_lock_bh(&txq->axq_lock); + + if (txq->axq_depth >= (ATH_TXBUF - 20)) { + ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_XMIT, + "TX queue: %d is full, depth: %d\n", + qnum, txq->axq_depth); + ath_mac80211_stop_queue(sc, skb_queue); + txq->stopped = 1; + spin_unlock_bh(&txq->axq_lock); + return NULL; + } + + spin_unlock_bh(&txq->axq_lock); + + return txq; +} + +int ath_txq_update(struct ath_softc *sc, int qnum, + struct ath9k_tx_queue_info *qinfo) +{ + struct ath_hw *ah = sc->sc_ah; + int error = 0; + struct ath9k_tx_queue_info qi; + + if (qnum == sc->beacon.beaconq) { + /* + * XXX: for beacon queue, we just save the parameter. + * It will be picked up by ath_beaconq_config when + * it's necessary. + */ + sc->beacon.beacon_qi = *qinfo; + return 0; + } + + BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum); + + ath9k_hw_get_txq_props(ah, qnum, &qi); + qi.tqi_aifs = qinfo->tqi_aifs; + qi.tqi_cwmin = qinfo->tqi_cwmin; + qi.tqi_cwmax = qinfo->tqi_cwmax; + qi.tqi_burstTime = qinfo->tqi_burstTime; + qi.tqi_readyTime = qinfo->tqi_readyTime; + + if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) { + ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, + "Unable to update hardware queue %u!\n", qnum); + error = -EIO; + } else { + ath9k_hw_resettxqueue(ah, qnum); + } + + return error; +} + +int ath_cabq_update(struct ath_softc *sc) +{ + struct ath9k_tx_queue_info qi; + int qnum = sc->beacon.cabq->axq_qnum; + + ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi); + /* + * Ensure the readytime % is within the bounds. + */ + if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND) + sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND; + else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND) + sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND; + + qi.tqi_readyTime = (sc->beacon_interval * + sc->config.cabqReadytime) / 100; + ath_txq_update(sc, qnum, &qi); + + return 0; +} + +/* + * Drain a given TX queue (could be Beacon or Data) + * + * This assumes output has been stopped and + * we do not need to block ath_tx_tasklet. + */ +void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx) +{ + struct ath_buf *bf, *lastbf; + struct list_head bf_head; + + INIT_LIST_HEAD(&bf_head); + + for (;;) { + spin_lock_bh(&txq->axq_lock); + + if (list_empty(&txq->axq_q)) { + txq->axq_link = NULL; + spin_unlock_bh(&txq->axq_lock); + break; + } + + bf = list_first_entry(&txq->axq_q, struct ath_buf, list); + + if (bf->bf_stale) { + list_del(&bf->list); + spin_unlock_bh(&txq->axq_lock); + + spin_lock_bh(&sc->tx.txbuflock); + list_add_tail(&bf->list, &sc->tx.txbuf); + spin_unlock_bh(&sc->tx.txbuflock); + continue; + } + + lastbf = bf->bf_lastbf; + if (!retry_tx) + lastbf->bf_desc->ds_txstat.ts_flags = + ATH9K_TX_SW_ABORTED; + + /* remove ath_buf's of the same mpdu from txq */ + list_cut_position(&bf_head, &txq->axq_q, &lastbf->list); + txq->axq_depth--; + + spin_unlock_bh(&txq->axq_lock); + + if (bf_isampdu(bf)) + ath_tx_complete_aggr(sc, txq, bf, &bf_head, 0); + else + ath_tx_complete_buf(sc, bf, txq, &bf_head, 0, 0); + } + + spin_lock_bh(&txq->axq_lock); + txq->axq_tx_inprogress = false; + spin_unlock_bh(&txq->axq_lock); + + /* flush any pending frames if aggregation is enabled */ + if (sc->sc_flags & SC_OP_TXAGGR) { + if (!retry_tx) { + spin_lock_bh(&txq->axq_lock); + ath_txq_drain_pending_buffers(sc, txq); + spin_unlock_bh(&txq->axq_lock); + } + } +} + +void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) +{ + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ath_txq *txq; + int i, npend = 0; + + if (sc->sc_flags & SC_OP_INVALID) + return; + + /* Stop beacon queue */ + ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); + + /* Stop data queues */ + for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { + if (ATH_TXQ_SETUP(sc, i)) { + txq = &sc->tx.txq[i]; + ath9k_hw_stoptxdma(ah, txq->axq_qnum); + npend += ath9k_hw_numtxpending(ah, txq->axq_qnum); + } + } + + if (npend) { + int r; + + ath_print(common, ATH_DBG_FATAL, + "Unable to stop TxDMA. Reset HAL!\n"); + + spin_lock_bh(&sc->sc_resetlock); + r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false); + if (r) + ath_print(common, ATH_DBG_FATAL, + "Unable to reset hardware; reset status %d\n", + r); + spin_unlock_bh(&sc->sc_resetlock); + } + + for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { + if (ATH_TXQ_SETUP(sc, i)) + ath_draintxq(sc, &sc->tx.txq[i], retry_tx); + } +} + +void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) +{ + ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum); + sc->tx.txqsetup &= ~(1<axq_qnum); +} + +void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) +{ + struct ath_atx_ac *ac; + struct ath_atx_tid *tid; + + if (list_empty(&txq->axq_acq)) + return; + + ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list); + list_del(&ac->list); + ac->sched = false; + + do { + if (list_empty(&ac->tid_q)) + return; + + tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list); + list_del(&tid->list); + tid->sched = false; + + if (tid->paused) + continue; + + ath_tx_sched_aggr(sc, txq, tid); + + /* + * add tid to round-robin queue if more frames + * are pending for the tid + */ + if (!list_empty(&tid->buf_q)) + ath_tx_queue_tid(txq, tid); + + break; + } while (!list_empty(&ac->tid_q)); + + if (!list_empty(&ac->tid_q)) { + if (!ac->sched) { + ac->sched = true; + list_add_tail(&ac->list, &txq->axq_acq); + } + } +} + +int ath_tx_setup(struct ath_softc *sc, int haltype) +{ + struct ath_txq *txq; + + if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) { + ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, + "HAL AC %u out of range, max %zu!\n", + haltype, ARRAY_SIZE(sc->tx.hwq_map)); + return 0; + } + txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype); + if (txq != NULL) { + sc->tx.hwq_map[haltype] = txq->axq_qnum; + return 1; + } else + return 0; +} + +/***********/ +/* TX, DMA */ +/***********/ + +/* + * Insert a chain of ath_buf (descriptors) on a txq and + * assume the descriptors are already chained together by caller. + */ +static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, + struct list_head *head) +{ + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + struct ath_buf *bf; + + /* + * Insert the frame on the outbound list and + * pass it on to the hardware. + */ + + if (list_empty(head)) + return; + + bf = list_first_entry(head, struct ath_buf, list); + + list_splice_tail_init(head, &txq->axq_q); + txq->axq_depth++; + + ath_print(common, ATH_DBG_QUEUE, + "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth); + + if (txq->axq_link == NULL) { + ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); + ath_print(common, ATH_DBG_XMIT, + "TXDP[%u] = %llx (%p)\n", + txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc); + } else { + *txq->axq_link = bf->bf_daddr; + ath_print(common, ATH_DBG_XMIT, "link[%u] (%p)=%llx (%p)\n", + txq->axq_qnum, txq->axq_link, + ito64(bf->bf_daddr), bf->bf_desc); + } + txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link); + ath9k_hw_txstart(ah, txq->axq_qnum); +} + +static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc) +{ + struct ath_buf *bf = NULL; + + spin_lock_bh(&sc->tx.txbuflock); + + if (unlikely(list_empty(&sc->tx.txbuf))) { + spin_unlock_bh(&sc->tx.txbuflock); + return NULL; + } + + bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list); + list_del(&bf->list); + + spin_unlock_bh(&sc->tx.txbuflock); + + return bf; +} + +static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, + struct list_head *bf_head, + struct ath_tx_control *txctl) +{ + struct ath_buf *bf; + + bf = list_first_entry(bf_head, struct ath_buf, list); + bf->bf_state.bf_type |= BUF_AMPDU; + TX_STAT_INC(txctl->txq->axq_qnum, a_queued); + + /* + * Do not queue to h/w when any of the following conditions is true: + * - there are pending frames in software queue + * - the TID is currently paused for ADDBA/BAR request + * - seqno is not within block-ack window + * - h/w queue depth exceeds low water mark + */ + if (!list_empty(&tid->buf_q) || tid->paused || + !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) || + txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) { + /* + * Add this frame to software queue for scheduling later + * for aggregation. + */ + list_move_tail(&bf->list, &tid->buf_q); + ath_tx_queue_tid(txctl->txq, tid); + return; + } + + /* Add sub-frame to BAW */ + ath_tx_addto_baw(sc, tid, bf); + + /* Queue to h/w without aggregation */ + bf->bf_nframes = 1; + bf->bf_lastbf = bf; + ath_buf_set_rate(sc, bf); + ath_tx_txqaddbuf(sc, txctl->txq, bf_head); +} + +static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq, + struct ath_atx_tid *tid, + struct list_head *bf_head) +{ + struct ath_buf *bf; + + bf = list_first_entry(bf_head, struct ath_buf, list); + bf->bf_state.bf_type &= ~BUF_AMPDU; + + /* update starting sequence number for subsequent ADDBA request */ + INCR(tid->seq_start, IEEE80211_SEQ_MAX); + + bf->bf_nframes = 1; + bf->bf_lastbf = bf; + ath_buf_set_rate(sc, bf); + ath_tx_txqaddbuf(sc, txq, bf_head); + TX_STAT_INC(txq->axq_qnum, queued); +} + +static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, + struct list_head *bf_head) +{ + struct ath_buf *bf; + + bf = list_first_entry(bf_head, struct ath_buf, list); + + bf->bf_lastbf = bf; + bf->bf_nframes = 1; + ath_buf_set_rate(sc, bf); + ath_tx_txqaddbuf(sc, txq, bf_head); + TX_STAT_INC(txq->axq_qnum, queued); +} + +static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr; + enum ath9k_pkt_type htype; + __le16 fc; + + hdr = (struct ieee80211_hdr *)skb->data; + fc = hdr->frame_control; + + if (ieee80211_is_beacon(fc)) + htype = ATH9K_PKT_TYPE_BEACON; + else if (ieee80211_is_probe_resp(fc)) + htype = ATH9K_PKT_TYPE_PROBE_RESP; + else if (ieee80211_is_atim(fc)) + htype = ATH9K_PKT_TYPE_ATIM; + else if (ieee80211_is_pspoll(fc)) + htype = ATH9K_PKT_TYPE_PSPOLL; + else + htype = ATH9K_PKT_TYPE_NORMAL; + + return htype; +} + +static bool is_pae(struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr; + __le16 fc; + + hdr = (struct ieee80211_hdr *)skb->data; + fc = hdr->frame_control; + + if (ieee80211_is_data(fc)) { + if (ieee80211_is_nullfunc(fc) || + /* Port Access Entity (IEEE 802.1X) */ + (skb->protocol == cpu_to_be16(ETH_P_PAE))) { + return true; + } + } + + return false; +} + +static int get_hw_crypto_keytype(struct sk_buff *skb) +{ + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); + + if (tx_info->control.hw_key) { + if (tx_info->control.hw_key->alg == ALG_WEP) + return ATH9K_KEY_TYPE_WEP; + else if (tx_info->control.hw_key->alg == ALG_TKIP) + return ATH9K_KEY_TYPE_TKIP; + else if (tx_info->control.hw_key->alg == ALG_CCMP) + return ATH9K_KEY_TYPE_AES; + } + + return ATH9K_KEY_TYPE_CLEAR; +} + +static void assign_aggr_tid_seqno(struct sk_buff *skb, + struct ath_buf *bf) +{ + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr; + struct ath_node *an; + struct ath_atx_tid *tid; + __le16 fc; + u8 *qc; + + if (!tx_info->control.sta) + return; + + an = (struct ath_node *)tx_info->control.sta->drv_priv; + hdr = (struct ieee80211_hdr *)skb->data; + fc = hdr->frame_control; + + if (ieee80211_is_data_qos(fc)) { + qc = ieee80211_get_qos_ctl(hdr); + bf->bf_tidno = qc[0] & 0xf; + } + + /* + * For HT capable stations, we save tidno for later use. + * We also override seqno set by upper layer with the one + * in tx aggregation state. + */ + tid = ATH_AN_2_TID(an, bf->bf_tidno); + hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT); + bf->bf_seqno = tid->seq_next; + INCR(tid->seq_next, IEEE80211_SEQ_MAX); +} + +static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb, + struct ath_txq *txq) +{ + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); + int flags = 0; + + flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */ + flags |= ATH9K_TXDESC_INTREQ; + + if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) + flags |= ATH9K_TXDESC_NOACK; + + return flags; +} + +/* + * rix - rate index + * pktlen - total bytes (delims + data + fcs + pads + pad delims) + * width - 0 for 20 MHz, 1 for 40 MHz + * half_gi - to use 4us v/s 3.6 us for symbol time + */ +static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf, + int width, int half_gi, bool shortPreamble) +{ + u32 nbits, nsymbits, duration, nsymbols; + int streams, pktlen; + + pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen; + + /* find number of symbols: PLCP + data */ + nbits = (pktlen << 3) + OFDM_PLCP_BITS; + nsymbits = bits_per_symbol[rix][width]; + nsymbols = (nbits + nsymbits - 1) / nsymbits; + + if (!half_gi) + duration = SYMBOL_TIME(nsymbols); + else + duration = SYMBOL_TIME_HALFGI(nsymbols); + + /* addup duration for legacy/ht training and signal fields */ + streams = HT_RC_2_STREAMS(rix); + duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams); + + return duration; +} + +static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ath9k_11n_rate_series series[4]; + struct sk_buff *skb; + struct ieee80211_tx_info *tx_info; + struct ieee80211_tx_rate *rates; + const struct ieee80211_rate *rate; + struct ieee80211_hdr *hdr; + int i, flags = 0; + u8 rix = 0, ctsrate = 0; + bool is_pspoll; + + memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4); + + skb = bf->bf_mpdu; + tx_info = IEEE80211_SKB_CB(skb); + rates = tx_info->control.rates; + hdr = (struct ieee80211_hdr *)skb->data; + is_pspoll = ieee80211_is_pspoll(hdr->frame_control); + + /* + * We check if Short Preamble is needed for the CTS rate by + * checking the BSS's global flag. + * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used. + */ + rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info); + ctsrate = rate->hw_value; + if (sc->sc_flags & SC_OP_PREAMBLE_SHORT) + ctsrate |= rate->hw_value_short; + + for (i = 0; i < 4; i++) { + bool is_40, is_sgi, is_sp; + int phy; + + if (!rates[i].count || (rates[i].idx < 0)) + continue; + + rix = rates[i].idx; + series[i].Tries = rates[i].count; + series[i].ChSel = common->tx_chainmask; + + if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) || + (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) { + series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; + flags |= ATH9K_TXDESC_RTSENA; + } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { + series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; + flags |= ATH9K_TXDESC_CTSENA; + } + + if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + series[i].RateFlags |= ATH9K_RATESERIES_2040; + if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI) + series[i].RateFlags |= ATH9K_RATESERIES_HALFGI; + + is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI); + is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH); + is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE); + + if (rates[i].flags & IEEE80211_TX_RC_MCS) { + /* MCS rates */ + series[i].Rate = rix | 0x80; + series[i].PktDuration = ath_pkt_duration(sc, rix, bf, + is_40, is_sgi, is_sp); + continue; + } + + /* legcay rates */ + if ((tx_info->band == IEEE80211_BAND_2GHZ) && + !(rate->flags & IEEE80211_RATE_ERP_G)) + phy = WLAN_RC_PHY_CCK; + else + phy = WLAN_RC_PHY_OFDM; + + rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx]; + series[i].Rate = rate->hw_value; + if (rate->hw_value_short) { + if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) + series[i].Rate |= rate->hw_value_short; + } else { + is_sp = false; + } + + series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah, + phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp); + } + + /* For AR5416 - RTS cannot be followed by a frame larger than 8K */ + if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit)) + flags &= ~ATH9K_TXDESC_RTSENA; + + /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */ + if (flags & ATH9K_TXDESC_RTSENA) + flags &= ~ATH9K_TXDESC_CTSENA; + + /* set dur_update_en for l-sig computation except for PS-Poll frames */ + ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc, + bf->bf_lastbf->bf_desc, + !is_pspoll, ctsrate, + 0, series, 4, flags); + + if (sc->config.ath_aggr_prot && flags) + ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192); +} + +static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf, + struct sk_buff *skb, + struct ath_tx_control *txctl) +{ + struct ath_wiphy *aphy = hw->priv; + struct ath_softc *sc = aphy->sc; + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + int hdrlen; + __le16 fc; + int padpos, padsize; + + tx_info->pad[0] = 0; + switch (txctl->frame_type) { + case ATH9K_NOT_INTERNAL: + break; + case ATH9K_INT_PAUSE: + tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE; + /* fall through */ + case ATH9K_INT_UNPAUSE: + tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL; + break; + } + hdrlen = ieee80211_get_hdrlen_from_skb(skb); + fc = hdr->frame_control; + + ATH_TXBUF_RESET(bf); + + bf->aphy = aphy; + bf->bf_frmlen = skb->len + FCS_LEN; + /* Remove the padding size from bf_frmlen, if any */ + padpos = ath9k_cmn_padpos(hdr->frame_control); + padsize = padpos & 3; + if (padsize && skb->len>padpos+padsize) { + bf->bf_frmlen -= padsize; + } + + if (conf_is_ht(&hw->conf)) + bf->bf_state.bf_type |= BUF_HT; + + bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq); + + bf->bf_keytype = get_hw_crypto_keytype(skb); + if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) { + bf->bf_frmlen += tx_info->control.hw_key->icv_len; + bf->bf_keyix = tx_info->control.hw_key->hw_key_idx; + } else { + bf->bf_keyix = ATH9K_TXKEYIX_INVALID; + } + + if (ieee80211_is_data_qos(fc) && bf_isht(bf) && + (sc->sc_flags & SC_OP_TXAGGR)) + assign_aggr_tid_seqno(skb, bf); + + bf->bf_mpdu = skb; + + bf->bf_dmacontext = dma_map_single(sc->dev, skb->data, + skb->len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(sc->dev, bf->bf_dmacontext))) { + bf->bf_mpdu = NULL; + ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, + "dma_mapping_error() on TX\n"); + return -ENOMEM; + } + + bf->bf_buf_addr = bf->bf_dmacontext; + + /* tag if this is a nullfunc frame to enable PS when AP acks it */ + if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) { + bf->bf_isnullfunc = true; + sc->ps_flags &= ~PS_NULLFUNC_COMPLETED; + } else + bf->bf_isnullfunc = false; + + return 0; +} + +/* FIXME: tx power */ +static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf, + struct ath_tx_control *txctl) +{ + struct sk_buff *skb = bf->bf_mpdu; + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ath_node *an = NULL; + struct list_head bf_head; + struct ath_desc *ds; + struct ath_atx_tid *tid; + struct ath_hw *ah = sc->sc_ah; + int frm_type; + __le16 fc; + + frm_type = get_hw_packet_type(skb); + fc = hdr->frame_control; + + INIT_LIST_HEAD(&bf_head); + list_add_tail(&bf->list, &bf_head); + + ds = bf->bf_desc; + ds->ds_link = 0; + ds->ds_data = bf->bf_buf_addr; + + ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER, + bf->bf_keyix, bf->bf_keytype, bf->bf_flags); + + ath9k_hw_filltxdesc(ah, ds, + skb->len, /* segment length */ + true, /* first segment */ + true, /* last segment */ + ds); /* first descriptor */ + + spin_lock_bh(&txctl->txq->axq_lock); + + if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) && + tx_info->control.sta) { + an = (struct ath_node *)tx_info->control.sta->drv_priv; + tid = ATH_AN_2_TID(an, bf->bf_tidno); + + if (!ieee80211_is_data_qos(fc)) { + ath_tx_send_normal(sc, txctl->txq, &bf_head); + goto tx_done; + } + + if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && !is_pae(skb)) { + /* + * Try aggregation if it's a unicast data frame + * and the destination is HT capable. + */ + ath_tx_send_ampdu(sc, tid, &bf_head, txctl); + } else { + /* + * Send this frame as regular when ADDBA + * exchange is neither complete nor pending. + */ + ath_tx_send_ht_normal(sc, txctl->txq, + tid, &bf_head); + } + } else { + ath_tx_send_normal(sc, txctl->txq, &bf_head); + } + +tx_done: + spin_unlock_bh(&txctl->txq->axq_lock); +} + +/* Upon failure caller should free skb */ +int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, + struct ath_tx_control *txctl) +{ + struct ath_wiphy *aphy = hw->priv; + struct ath_softc *sc = aphy->sc; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ath_buf *bf; + int r; + + bf = ath_tx_get_buffer(sc); + if (!bf) { + ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n"); + return -1; + } + + r = ath_tx_setup_buffer(hw, bf, skb, txctl); + if (unlikely(r)) { + struct ath_txq *txq = txctl->txq; + + ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n"); + + /* upon ath_tx_processq() this TX queue will be resumed, we + * guarantee this will happen by knowing beforehand that + * we will at least have to run TX completionon one buffer + * on the queue */ + spin_lock_bh(&txq->axq_lock); + if (sc->tx.txq[txq->axq_qnum].axq_depth > 1) { + ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb)); + txq->stopped = 1; + } + spin_unlock_bh(&txq->axq_lock); + + spin_lock_bh(&sc->tx.txbuflock); + list_add_tail(&bf->list, &sc->tx.txbuf); + spin_unlock_bh(&sc->tx.txbuflock); + + return r; + } + + ath_tx_start_dma(sc, bf, txctl); + + return 0; +} + +void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct ath_wiphy *aphy = hw->priv; + struct ath_softc *sc = aphy->sc; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + int padpos, padsize; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ath_tx_control txctl; + + memset(&txctl, 0, sizeof(struct ath_tx_control)); + + /* + * As a temporary workaround, assign seq# here; this will likely need + * to be cleaned up to work better with Beacon transmission and virtual + * BSSes. + */ + if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { + if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) + sc->tx.seq_no += 0x10; + hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); + hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no); + } + + /* Add the padding after the header if this is not already done */ + padpos = ath9k_cmn_padpos(hdr->frame_control); + padsize = padpos & 3; + if (padsize && skb->len>padpos) { + if (skb_headroom(skb) < padsize) { + ath_print(common, ATH_DBG_XMIT, + "TX CABQ padding failed\n"); + dev_kfree_skb_any(skb); + return; + } + skb_push(skb, padsize); + memmove(skb->data, skb->data + padsize, padpos); + } + + txctl.txq = sc->beacon.cabq; + + ath_print(common, ATH_DBG_XMIT, + "transmitting CABQ packet, skb: %p\n", skb); + + if (ath_tx_start(hw, skb, &txctl) != 0) { + ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n"); + goto exit; + } + + return; +exit: + dev_kfree_skb_any(skb); +} + +/*****************/ +/* TX Completion */ +/*****************/ + +static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, + struct ath_wiphy *aphy, int tx_flags) +{ + struct ieee80211_hw *hw = sc->hw; + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data; + int padpos, padsize; + + ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb); + + if (aphy) + hw = aphy->hw; + + if (tx_flags & ATH_TX_BAR) + tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; + + if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) { + /* Frame was ACKed */ + tx_info->flags |= IEEE80211_TX_STAT_ACK; + } + + padpos = ath9k_cmn_padpos(hdr->frame_control); + padsize = padpos & 3; + if (padsize && skb->len>padpos+padsize) { + /* + * Remove MAC header padding before giving the frame back to + * mac80211. + */ + memmove(skb->data + padsize, skb->data, padpos); + skb_pull(skb, padsize); + } + + if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) { + sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK; + ath_print(common, ATH_DBG_PS, + "Going back to sleep after having " + "received TX status (0x%lx)\n", + sc->ps_flags & (PS_WAIT_FOR_BEACON | + PS_WAIT_FOR_CAB | + PS_WAIT_FOR_PSPOLL_DATA | + PS_WAIT_FOR_TX_ACK)); + } + + if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL)) + ath9k_tx_status(hw, skb); + else + ieee80211_tx_status(hw, skb); +} + +static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, + struct ath_txq *txq, + struct list_head *bf_q, + int txok, int sendbar) +{ + struct sk_buff *skb = bf->bf_mpdu; + unsigned long flags; + int tx_flags = 0; + + if (sendbar) + tx_flags = ATH_TX_BAR; + + if (!txok) { + tx_flags |= ATH_TX_ERROR; + + if (bf_isxretried(bf)) + tx_flags |= ATH_TX_XRETRY; + } + + dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE); + ath_tx_complete(sc, skb, bf->aphy, tx_flags); + ath_debug_stat_tx(sc, txq, bf); + + /* + * Return the list of ath_buf of this mpdu to free queue + */ + spin_lock_irqsave(&sc->tx.txbuflock, flags); + list_splice_tail_init(bf_q, &sc->tx.txbuf); + spin_unlock_irqrestore(&sc->tx.txbuflock, flags); +} + +static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf, + int txok) +{ + struct ath_buf *bf_last = bf->bf_lastbf; + struct ath_desc *ds = bf_last->bf_desc; + u16 seq_st = 0; + u32 ba[WME_BA_BMP_SIZE >> 5]; + int ba_index; + int nbad = 0; + int isaggr = 0; + + if (ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED) + return 0; + + isaggr = bf_isaggr(bf); + if (isaggr) { + seq_st = ATH_DS_BA_SEQ(ds); + memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3); + } + + while (bf) { + ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno); + if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index))) + nbad++; + + bf = bf->bf_next; + } + + return nbad; +} + +static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds, + int nbad, int txok, bool update_rc) +{ + struct sk_buff *skb = bf->bf_mpdu; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); + struct ieee80211_hw *hw = bf->aphy->hw; + u8 i, tx_rateindex; + + if (txok) + tx_info->status.ack_signal = ds->ds_txstat.ts_rssi; + + tx_rateindex = ds->ds_txstat.ts_rateindex; + WARN_ON(tx_rateindex >= hw->max_rates); + + if (update_rc) + tx_info->pad[0] |= ATH_TX_INFO_UPDATE_RC; + if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) + tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; + + if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 && + (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) { + if (ieee80211_is_data(hdr->frame_control)) { + if (ds->ds_txstat.ts_flags & + (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN)) + tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN; + if ((ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY) || + (ds->ds_txstat.ts_status & ATH9K_TXERR_FIFO)) + tx_info->pad[0] |= ATH_TX_INFO_XRETRY; + tx_info->status.ampdu_len = bf->bf_nframes; + tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad; + } + } + + for (i = tx_rateindex + 1; i < hw->max_rates; i++) { + tx_info->status.rates[i].count = 0; + tx_info->status.rates[i].idx = -1; + } + + tx_info->status.rates[tx_rateindex].count = bf->bf_retries + 1; +} + +static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq) +{ + int qnum; + + spin_lock_bh(&txq->axq_lock); + if (txq->stopped && + sc->tx.txq[txq->axq_qnum].axq_depth <= (ATH_TXBUF - 20)) { + qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc); + if (qnum != -1) { + ath_mac80211_start_queue(sc, qnum); + txq->stopped = 0; + } + } + spin_unlock_bh(&txq->axq_lock); +} + +static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) +{ + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + struct ath_buf *bf, *lastbf, *bf_held = NULL; + struct list_head bf_head; + struct ath_desc *ds; + int txok; + int status; + + ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n", + txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum), + txq->axq_link); + + for (;;) { + spin_lock_bh(&txq->axq_lock); + if (list_empty(&txq->axq_q)) { + txq->axq_link = NULL; + spin_unlock_bh(&txq->axq_lock); + break; + } + bf = list_first_entry(&txq->axq_q, struct ath_buf, list); + + /* + * There is a race condition that a BH gets scheduled + * after sw writes TxE and before hw re-load the last + * descriptor to get the newly chained one. + * Software must keep the last DONE descriptor as a + * holding descriptor - software does so by marking + * it with the STALE flag. + */ + bf_held = NULL; + if (bf->bf_stale) { + bf_held = bf; + if (list_is_last(&bf_held->list, &txq->axq_q)) { + spin_unlock_bh(&txq->axq_lock); + break; + } else { + bf = list_entry(bf_held->list.next, + struct ath_buf, list); + } + } + + lastbf = bf->bf_lastbf; + ds = lastbf->bf_desc; + + status = ath9k_hw_txprocdesc(ah, ds); + if (status == -EINPROGRESS) { + spin_unlock_bh(&txq->axq_lock); + break; + } + + /* + * We now know the nullfunc frame has been ACKed so we + * can disable RX. + */ + if (bf->bf_isnullfunc && + (ds->ds_txstat.ts_status & ATH9K_TX_ACKED)) { + if ((sc->ps_flags & PS_ENABLED)) + ath9k_enable_ps(sc); + else + sc->ps_flags |= PS_NULLFUNC_COMPLETED; + } + + /* + * Remove ath_buf's of the same transmit unit from txq, + * however leave the last descriptor back as the holding + * descriptor for hw. + */ + lastbf->bf_stale = true; + INIT_LIST_HEAD(&bf_head); + if (!list_is_singular(&lastbf->list)) + list_cut_position(&bf_head, + &txq->axq_q, lastbf->list.prev); + + txq->axq_depth--; + txok = !(ds->ds_txstat.ts_status & ATH9K_TXERR_MASK); + txq->axq_tx_inprogress = false; + spin_unlock_bh(&txq->axq_lock); + + if (bf_held) { + spin_lock_bh(&sc->tx.txbuflock); + list_move_tail(&bf_held->list, &sc->tx.txbuf); + spin_unlock_bh(&sc->tx.txbuflock); + } + + if (!bf_isampdu(bf)) { + /* + * This frame is sent out as a single frame. + * Use hardware retry status for this frame. + */ + bf->bf_retries = ds->ds_txstat.ts_longretry; + if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY) + bf->bf_state.bf_type |= BUF_XRETRY; + ath_tx_rc_status(bf, ds, 0, txok, true); + } + + if (bf_isampdu(bf)) + ath_tx_complete_aggr(sc, txq, bf, &bf_head, txok); + else + ath_tx_complete_buf(sc, bf, txq, &bf_head, txok, 0); + + ath_wake_mac80211_queue(sc, txq); + + spin_lock_bh(&txq->axq_lock); + if (sc->sc_flags & SC_OP_TXAGGR) + ath_txq_schedule(sc, txq); + spin_unlock_bh(&txq->axq_lock); + } +} + +static void ath_tx_complete_poll_work(struct work_struct *work) +{ + struct ath_softc *sc = container_of(work, struct ath_softc, + tx_complete_work.work); + struct ath_txq *txq; + int i; + bool needreset = false; + + for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) + if (ATH_TXQ_SETUP(sc, i)) { + txq = &sc->tx.txq[i]; + spin_lock_bh(&txq->axq_lock); + if (txq->axq_depth) { + if (txq->axq_tx_inprogress) { + needreset = true; + spin_unlock_bh(&txq->axq_lock); + break; + } else { + txq->axq_tx_inprogress = true; + } + } + spin_unlock_bh(&txq->axq_lock); + } + + if (needreset) { + ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET, + "tx hung, resetting the chip\n"); + ath9k_ps_wakeup(sc); + ath_reset(sc, false); + ath9k_ps_restore(sc); + } + + ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, + msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT)); +} + + + +void ath_tx_tasklet(struct ath_softc *sc) +{ + int i; + u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1); + + ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask); + + for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { + if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i))) + ath_tx_processq(sc, &sc->tx.txq[i]); + } +} + +/*****************/ +/* Init, Cleanup */ +/*****************/ + +int ath_tx_init(struct ath_softc *sc, int nbufs) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + int error = 0; + + spin_lock_init(&sc->tx.txbuflock); + + error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf, + "tx", nbufs, 1); + if (error != 0) { + ath_print(common, ATH_DBG_FATAL, + "Failed to allocate tx descriptors: %d\n", error); + goto err; + } + + error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf, + "beacon", ATH_BCBUF, 1); + if (error != 0) { + ath_print(common, ATH_DBG_FATAL, + "Failed to allocate beacon descriptors: %d\n", error); + goto err; + } + + INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work); + +err: + if (error != 0) + ath_tx_cleanup(sc); + + return error; +} + +void ath_tx_cleanup(struct ath_softc *sc) +{ + if (sc->beacon.bdma.dd_desc_len != 0) + ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf); + + if (sc->tx.txdma.dd_desc_len != 0) + ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf); +} + +void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) +{ + struct ath_atx_tid *tid; + struct ath_atx_ac *ac; + int tidno, acno; + + for (tidno = 0, tid = &an->tid[tidno]; + tidno < WME_NUM_TID; + tidno++, tid++) { + tid->an = an; + tid->tidno = tidno; + tid->seq_start = tid->seq_next = 0; + tid->baw_size = WME_MAX_BA; + tid->baw_head = tid->baw_tail = 0; + tid->sched = false; + tid->paused = false; + tid->state &= ~AGGR_CLEANUP; + INIT_LIST_HEAD(&tid->buf_q); + acno = TID_TO_WME_AC(tidno); + tid->ac = &an->ac[acno]; + tid->state &= ~AGGR_ADDBA_COMPLETE; + tid->state &= ~AGGR_ADDBA_PROGRESS; + } + + for (acno = 0, ac = &an->ac[acno]; + acno < WME_NUM_AC; acno++, ac++) { + ac->sched = false; + INIT_LIST_HEAD(&ac->tid_q); + + switch (acno) { + case WME_AC_BE: + ac->qnum = ath_tx_get_qnum(sc, + ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE); + break; + case WME_AC_BK: + ac->qnum = ath_tx_get_qnum(sc, + ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK); + break; + case WME_AC_VI: + ac->qnum = ath_tx_get_qnum(sc, + ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI); + break; + case WME_AC_VO: + ac->qnum = ath_tx_get_qnum(sc, + ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO); + break; + } + } +} + +void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) +{ + int i; + struct ath_atx_ac *ac, *ac_tmp; + struct ath_atx_tid *tid, *tid_tmp; + struct ath_txq *txq; + + for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { + if (ATH_TXQ_SETUP(sc, i)) { + txq = &sc->tx.txq[i]; + + spin_lock_bh(&txq->axq_lock); + + list_for_each_entry_safe(ac, + ac_tmp, &txq->axq_acq, list) { + tid = list_first_entry(&ac->tid_q, + struct ath_atx_tid, list); + if (tid && tid->an != an) + continue; + list_del(&ac->list); + ac->sched = false; + + list_for_each_entry_safe(tid, + tid_tmp, &ac->tid_q, list) { + list_del(&tid->list); + tid->sched = false; + ath_tid_drain(sc, txq, tid); + tid->state &= ~AGGR_ADDBA_COMPLETE; + tid->state &= ~AGGR_CLEANUP; + } + } + + spin_unlock_bh(&txq->axq_lock); + } + } +} diff --git a/drivers/net/wireless/ath/debug.c b/drivers/net/wireless/ath/debug.c new file mode 100644 index 0000000..53e77bd --- /dev/null +++ b/drivers/net/wireless/ath/debug.c @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "ath.h" +#include "debug.h" + +void ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...) +{ + va_list args; + + if (likely(!(common->debug_mask & dbg_mask))) + return; + + va_start(args, fmt); + printk(KERN_DEBUG "ath: "); + vprintk(fmt, args); + va_end(args); +} +EXPORT_SYMBOL(ath_print); diff --git a/drivers/net/wireless/ath/debug.h b/drivers/net/wireless/ath/debug.h new file mode 100644 index 0000000..8263633 --- /dev/null +++ b/drivers/net/wireless/ath/debug.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef ATH_DEBUG_H +#define ATH_DEBUG_H + +#include "ath.h" + +/** + * enum ath_debug_level - atheros wireless debug level + * + * @ATH_DBG_RESET: reset processing + * @ATH_DBG_QUEUE: hardware queue management + * @ATH_DBG_EEPROM: eeprom processing + * @ATH_DBG_CALIBRATE: periodic calibration + * @ATH_DBG_INTERRUPT: interrupt processing + * @ATH_DBG_REGULATORY: regulatory processing + * @ATH_DBG_ANI: adaptive noise immunitive processing + * @ATH_DBG_XMIT: basic xmit operation + * @ATH_DBG_BEACON: beacon handling + * @ATH_DBG_CONFIG: configuration of the hardware + * @ATH_DBG_FATAL: fatal errors, this is the default, DBG_DEFAULT + * @ATH_DBG_PS: power save processing + * @ATH_DBG_HWTIMER: hardware timer handling + * @ATH_DBG_BTCOEX: bluetooth coexistance + * @ATH_DBG_ANY: enable all debugging + * + * The debug level is used to control the amount and type of debugging output + * we want to see. Each driver has its own method for enabling debugging and + * modifying debug level states -- but this is typically done through a + * module parameter 'debug' along with a respective 'debug' debugfs file + * entry. + */ +enum ATH_DEBUG { + ATH_DBG_RESET = 0x00000001, + ATH_DBG_QUEUE = 0x00000002, + ATH_DBG_EEPROM = 0x00000004, + ATH_DBG_CALIBRATE = 0x00000008, + ATH_DBG_INTERRUPT = 0x00000010, + ATH_DBG_REGULATORY = 0x00000020, + ATH_DBG_ANI = 0x00000040, + ATH_DBG_XMIT = 0x00000080, + ATH_DBG_BEACON = 0x00000100, + ATH_DBG_CONFIG = 0x00000200, + ATH_DBG_FATAL = 0x00000400, + ATH_DBG_PS = 0x00000800, + ATH_DBG_HWTIMER = 0x00001000, + ATH_DBG_BTCOEX = 0x00002000, + ATH_DBG_ANY = 0xffffffff +}; + +#define ATH_DBG_DEFAULT (ATH_DBG_FATAL) + +#ifdef CONFIG_ATH_DEBUG +void ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...) + __attribute__ ((format (printf, 3, 4))); +#else +static inline void __attribute__ ((format (printf, 3, 4))) +ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...) +{ +} +#endif /* CONFIG_ATH_DEBUG */ + +#endif /* ATH_DEBUG_H */ diff --git a/drivers/net/wireless/ath/hw.c b/drivers/net/wireless/ath/hw.c new file mode 100644 index 0000000..ecc9eb0 --- /dev/null +++ b/drivers/net/wireless/ath/hw.c @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include + +#include "ath.h" +#include "reg.h" + +#define REG_READ common->ops->read +#define REG_WRITE common->ops->write + +/** + * ath_hw_set_bssid_mask - filter out bssids we listen + * + * @common: the ath_common struct for the device. + * + * BSSID masking is a method used by AR5212 and newer hardware to inform PCU + * which bits of the interface's MAC address should be looked at when trying + * to decide which packets to ACK. In station mode and AP mode with a single + * BSS every bit matters since we lock to only one BSS. In AP mode with + * multiple BSSes (virtual interfaces) not every bit matters because hw must + * accept frames for all BSSes and so we tweak some bits of our mac address + * in order to have multiple BSSes. + * + * NOTE: This is a simple filter and does *not* filter out all + * relevant frames. Some frames that are not for us might get ACKed from us + * by PCU because they just match the mask. + * + * When handling multiple BSSes you can get the BSSID mask by computing the + * set of ~ ( MAC XOR BSSID ) for all bssids we handle. + * + * When you do this you are essentially computing the common bits of all your + * BSSes. Later it is assumed the harware will "and" (&) the BSSID mask with + * the MAC address to obtain the relevant bits and compare the result with + * (frame's BSSID & mask) to see if they match. + * + * Simple example: on your card you have have two BSSes you have created with + * BSSID-01 and BSSID-02. Lets assume BSSID-01 will not use the MAC address. + * There is another BSSID-03 but you are not part of it. For simplicity's sake, + * assuming only 4 bits for a mac address and for BSSIDs you can then have: + * + * \ + * MAC: 0001 | + * BSSID-01: 0100 | --> Belongs to us + * BSSID-02: 1001 | + * / + * ------------------- + * BSSID-03: 0110 | --> External + * ------------------- + * + * Our bssid_mask would then be: + * + * On loop iteration for BSSID-01: + * ~(0001 ^ 0100) -> ~(0101) + * -> 1010 + * bssid_mask = 1010 + * + * On loop iteration for BSSID-02: + * bssid_mask &= ~(0001 ^ 1001) + * bssid_mask = (1010) & ~(0001 ^ 1001) + * bssid_mask = (1010) & ~(1001) + * bssid_mask = (1010) & (0110) + * bssid_mask = 0010 + * + * A bssid_mask of 0010 means "only pay attention to the second least + * significant bit". This is because its the only bit common + * amongst the MAC and all BSSIDs we support. To findout what the real + * common bit is we can simply "&" the bssid_mask now with any BSSID we have + * or our MAC address (we assume the hardware uses the MAC address). + * + * Now, suppose there's an incoming frame for BSSID-03: + * + * IFRAME-01: 0110 + * + * An easy eye-inspeciton of this already should tell you that this frame + * will not pass our check. This is beacuse the bssid_mask tells the + * hardware to only look at the second least significant bit and the + * common bit amongst the MAC and BSSIDs is 0, this frame has the 2nd LSB + * as 1, which does not match 0. + * + * So with IFRAME-01 we *assume* the hardware will do: + * + * allow = (IFRAME-01 & bssid_mask) == (bssid_mask & MAC) ? 1 : 0; + * --> allow = (0110 & 0010) == (0010 & 0001) ? 1 : 0; + * --> allow = (0010) == 0000 ? 1 : 0; + * --> allow = 0 + * + * Lets now test a frame that should work: + * + * IFRAME-02: 0001 (we should allow) + * + * allow = (0001 & 1010) == 1010 + * + * allow = (IFRAME-02 & bssid_mask) == (bssid_mask & MAC) ? 1 : 0; + * --> allow = (0001 & 0010) == (0010 & 0001) ? 1 :0; + * --> allow = (0010) == (0010) + * --> allow = 1 + * + * Other examples: + * + * IFRAME-03: 0100 --> allowed + * IFRAME-04: 1001 --> allowed + * IFRAME-05: 1101 --> allowed but its not for us!!! + * + */ +void ath_hw_setbssidmask(struct ath_common *common) +{ + void *ah = common->ah; + + REG_WRITE(ah, get_unaligned_le32(common->bssidmask), AR_BSSMSKL); + REG_WRITE(ah, get_unaligned_le16(common->bssidmask + 4), AR_BSSMSKU); +} +EXPORT_SYMBOL(ath_hw_setbssidmask); diff --git a/drivers/net/wireless/ath/main.c b/drivers/net/wireless/ath/main.c new file mode 100644 index 0000000..487193f --- /dev/null +++ b/drivers/net/wireless/ath/main.c @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include + +#include "ath.h" + +MODULE_AUTHOR("Atheros Communications"); +MODULE_DESCRIPTION("Shared library for Atheros wireless LAN cards."); +MODULE_LICENSE("Dual BSD/GPL"); + +struct sk_buff *ath_rxbuf_alloc(struct ath_common *common, + u32 len, + gfp_t gfp_mask) +{ + struct sk_buff *skb; + u32 off; + + /* + * Cache-line-align. This is important (for the + * 5210 at least) as not doing so causes bogus data + * in rx'd frames. + */ + + /* Note: the kernel can allocate a value greater than + * what we ask it to give us. We really only need 4 KB as that + * is this hardware supports and in fact we need at least 3849 + * as that is the MAX AMSDU size this hardware supports. + * Unfortunately this means we may get 8 KB here from the + * kernel... and that is actually what is observed on some + * systems :( */ + skb = __dev_alloc_skb(len + common->cachelsz - 1, gfp_mask); + if (skb != NULL) { + off = ((unsigned long) skb->data) % common->cachelsz; + if (off != 0) + skb_reserve(skb, common->cachelsz - off); + } else { + printk(KERN_ERR "skbuff alloc of size %u failed\n", len); + return NULL; + } + + return skb; +} +EXPORT_SYMBOL(ath_rxbuf_alloc); diff --git a/drivers/net/wireless/ath/reg.h b/drivers/net/wireless/ath/reg.h new file mode 100644 index 0000000..dfe1fbe --- /dev/null +++ b/drivers/net/wireless/ath/reg.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef ATH_REGISTERS_H +#define ATH_REGISTERS_H + +/* + * BSSID mask registers. See ath_hw_set_bssid_mask() + * for detailed documentation about these registers. + */ +#define AR_BSSMSKL 0x80e0 +#define AR_BSSMSKU 0x80e4 + +#endif /* ATH_REGISTERS_H */ diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c new file mode 100644 index 0000000..04abd1f --- /dev/null +++ b/drivers/net/wireless/ath/regd.c @@ -0,0 +1,590 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include "regd.h" +#include "regd_common.h" + +/* + * This is a set of common rules used by our world regulatory domains. + * We have 12 world regulatory domains. To save space we consolidate + * the regulatory domains in 5 structures by frequency and change + * the flags on our reg_notifier() on a case by case basis. + */ + +/* Only these channels all allow active scan on all world regulatory domains */ +#define ATH9K_2GHZ_CH01_11 REG_RULE(2412-10, 2462+10, 40, 0, 20, 0) + +/* We enable active scan on these a case by case basis by regulatory domain */ +#define ATH9K_2GHZ_CH12_13 REG_RULE(2467-10, 2472+10, 40, 0, 20,\ + NL80211_RRF_PASSIVE_SCAN) +#define ATH9K_2GHZ_CH14 REG_RULE(2484-10, 2484+10, 40, 0, 20,\ + NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_OFDM) + +/* We allow IBSS on these on a case by case basis by regulatory domain */ +#define ATH9K_5GHZ_5150_5350 REG_RULE(5150-10, 5350+10, 40, 0, 30,\ + NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS) +#define ATH9K_5GHZ_5470_5850 REG_RULE(5470-10, 5850+10, 40, 0, 30,\ + NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS) +#define ATH9K_5GHZ_5725_5850 REG_RULE(5725-10, 5850+10, 40, 0, 30,\ + NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS) + +#define ATH9K_2GHZ_ALL ATH9K_2GHZ_CH01_11, \ + ATH9K_2GHZ_CH12_13, \ + ATH9K_2GHZ_CH14 + +#define ATH9K_5GHZ_ALL ATH9K_5GHZ_5150_5350, \ + ATH9K_5GHZ_5470_5850 +/* This one skips what we call "mid band" */ +#define ATH9K_5GHZ_NO_MIDBAND ATH9K_5GHZ_5150_5350, \ + ATH9K_5GHZ_5725_5850 + +/* Can be used for: + * 0x60, 0x61, 0x62 */ +static const struct ieee80211_regdomain ath_world_regdom_60_61_62 = { + .n_reg_rules = 5, + .alpha2 = "99", + .reg_rules = { + ATH9K_2GHZ_ALL, + ATH9K_5GHZ_ALL, + } +}; + +/* Can be used by 0x63 and 0x65 */ +static const struct ieee80211_regdomain ath_world_regdom_63_65 = { + .n_reg_rules = 4, + .alpha2 = "99", + .reg_rules = { + ATH9K_2GHZ_CH01_11, + ATH9K_2GHZ_CH12_13, + ATH9K_5GHZ_NO_MIDBAND, + } +}; + +/* Can be used by 0x64 only */ +static const struct ieee80211_regdomain ath_world_regdom_64 = { + .n_reg_rules = 3, + .alpha2 = "99", + .reg_rules = { + ATH9K_2GHZ_CH01_11, + ATH9K_5GHZ_NO_MIDBAND, + } +}; + +/* Can be used by 0x66 and 0x69 */ +static const struct ieee80211_regdomain ath_world_regdom_66_69 = { + .n_reg_rules = 3, + .alpha2 = "99", + .reg_rules = { + ATH9K_2GHZ_CH01_11, + ATH9K_5GHZ_ALL, + } +}; + +/* Can be used by 0x67, 0x6A and 0x68 */ +static const struct ieee80211_regdomain ath_world_regdom_67_68_6A = { + .n_reg_rules = 4, + .alpha2 = "99", + .reg_rules = { + ATH9K_2GHZ_CH01_11, + ATH9K_2GHZ_CH12_13, + ATH9K_5GHZ_ALL, + } +}; + +static inline bool is_wwr_sku(u16 regd) +{ + return ((regd & COUNTRY_ERD_FLAG) != COUNTRY_ERD_FLAG) && + (((regd & WORLD_SKU_MASK) == WORLD_SKU_PREFIX) || + (regd == WORLD)); +} + +static u16 ath_regd_get_eepromRD(struct ath_regulatory *reg) +{ + return reg->current_rd & ~WORLDWIDE_ROAMING_FLAG; +} + +bool ath_is_world_regd(struct ath_regulatory *reg) +{ + return is_wwr_sku(ath_regd_get_eepromRD(reg)); +} +EXPORT_SYMBOL(ath_is_world_regd); + +static const struct ieee80211_regdomain *ath_default_world_regdomain(void) +{ + /* this is the most restrictive */ + return &ath_world_regdom_64; +} + +static const struct +ieee80211_regdomain *ath_world_regdomain(struct ath_regulatory *reg) +{ + switch (reg->regpair->regDmnEnum) { + case 0x60: + case 0x61: + case 0x62: + return &ath_world_regdom_60_61_62; + case 0x63: + case 0x65: + return &ath_world_regdom_63_65; + case 0x64: + return &ath_world_regdom_64; + case 0x66: + case 0x69: + return &ath_world_regdom_66_69; + case 0x67: + case 0x68: + case 0x6A: + return &ath_world_regdom_67_68_6A; + default: + WARN_ON(1); + return ath_default_world_regdomain(); + } +} + +/* Frequency is one where radar detection is required */ +static bool ath_is_radar_freq(u16 center_freq) +{ + return (center_freq >= 5260 && center_freq <= 5700); +} + +/* + * N.B: These exception rules do not apply radar freqs. + * + * - We enable adhoc (or beaconing) if allowed by 11d + * - We enable active scan if the channel is allowed by 11d + * - If no country IE has been processed and a we determine we have + * received a beacon on a channel we can enable active scan and + * adhoc (or beaconing). + */ +static void +ath_reg_apply_beaconing_flags(struct wiphy *wiphy, + enum nl80211_reg_initiator initiator) +{ + enum ieee80211_band band; + struct ieee80211_supported_band *sband; + const struct ieee80211_reg_rule *reg_rule; + struct ieee80211_channel *ch; + unsigned int i; + u32 bandwidth = 0; + int r; + + for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + + if (!wiphy->bands[band]) + continue; + + sband = wiphy->bands[band]; + + for (i = 0; i < sband->n_channels; i++) { + + ch = &sband->channels[i]; + + if (ath_is_radar_freq(ch->center_freq) || + (ch->flags & IEEE80211_CHAN_RADAR)) + continue; + + if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) { + r = freq_reg_info(wiphy, + ch->center_freq, + bandwidth, + ®_rule); + if (r) + continue; + /* + * If 11d had a rule for this channel ensure + * we enable adhoc/beaconing if it allows us to + * use it. Note that we would have disabled it + * by applying our static world regdomain by + * default during init, prior to calling our + * regulatory_hint(). + */ + if (!(reg_rule->flags & + NL80211_RRF_NO_IBSS)) + ch->flags &= + ~IEEE80211_CHAN_NO_IBSS; + if (!(reg_rule->flags & + NL80211_RRF_PASSIVE_SCAN)) + ch->flags &= + ~IEEE80211_CHAN_PASSIVE_SCAN; + } else { + if (ch->beacon_found) + ch->flags &= ~(IEEE80211_CHAN_NO_IBSS | + IEEE80211_CHAN_PASSIVE_SCAN); + } + } + } + +} + +/* Allows active scan scan on Ch 12 and 13 */ +static void +ath_reg_apply_active_scan_flags(struct wiphy *wiphy, + enum nl80211_reg_initiator initiator) +{ + struct ieee80211_supported_band *sband; + struct ieee80211_channel *ch; + const struct ieee80211_reg_rule *reg_rule; + u32 bandwidth = 0; + int r; + + sband = wiphy->bands[IEEE80211_BAND_2GHZ]; + + /* + * If no country IE has been received always enable active scan + * on these channels. This is only done for specific regulatory SKUs + */ + if (initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE) { + ch = &sband->channels[11]; /* CH 12 */ + if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN) + ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; + ch = &sband->channels[12]; /* CH 13 */ + if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN) + ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; + return; + } + + /* + * If a country IE has been recieved check its rule for this + * channel first before enabling active scan. The passive scan + * would have been enforced by the initial processing of our + * custom regulatory domain. + */ + + ch = &sband->channels[11]; /* CH 12 */ + r = freq_reg_info(wiphy, ch->center_freq, bandwidth, ®_rule); + if (!r) { + if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN)) + if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN) + ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; + } + + ch = &sband->channels[12]; /* CH 13 */ + r = freq_reg_info(wiphy, ch->center_freq, bandwidth, ®_rule); + if (!r) { + if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN)) + if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN) + ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; + } +} + +/* Always apply Radar/DFS rules on freq range 5260 MHz - 5700 MHz */ +static void ath_reg_apply_radar_flags(struct wiphy *wiphy) +{ + struct ieee80211_supported_band *sband; + struct ieee80211_channel *ch; + unsigned int i; + + if (!wiphy->bands[IEEE80211_BAND_5GHZ]) + return; + + sband = wiphy->bands[IEEE80211_BAND_5GHZ]; + + for (i = 0; i < sband->n_channels; i++) { + ch = &sband->channels[i]; + if (!ath_is_radar_freq(ch->center_freq)) + continue; + /* We always enable radar detection/DFS on this + * frequency range. Additionally we also apply on + * this frequency range: + * - If STA mode does not yet have DFS supports disable + * active scanning + * - If adhoc mode does not support DFS yet then + * disable adhoc in the frequency. + * - If AP mode does not yet support radar detection/DFS + * do not allow AP mode + */ + if (!(ch->flags & IEEE80211_CHAN_DISABLED)) + ch->flags |= IEEE80211_CHAN_RADAR | + IEEE80211_CHAN_NO_IBSS | + IEEE80211_CHAN_PASSIVE_SCAN; + } +} + +static void ath_reg_apply_world_flags(struct wiphy *wiphy, + enum nl80211_reg_initiator initiator, + struct ath_regulatory *reg) +{ + switch (reg->regpair->regDmnEnum) { + case 0x60: + case 0x63: + case 0x66: + case 0x67: + ath_reg_apply_beaconing_flags(wiphy, initiator); + break; + case 0x68: + ath_reg_apply_beaconing_flags(wiphy, initiator); + ath_reg_apply_active_scan_flags(wiphy, initiator); + break; + } + return; +} + +int ath_reg_notifier_apply(struct wiphy *wiphy, + struct regulatory_request *request, + struct ath_regulatory *reg) +{ + /* We always apply this */ + ath_reg_apply_radar_flags(wiphy); + + switch (request->initiator) { + case NL80211_REGDOM_SET_BY_DRIVER: + case NL80211_REGDOM_SET_BY_CORE: + case NL80211_REGDOM_SET_BY_USER: + break; + case NL80211_REGDOM_SET_BY_COUNTRY_IE: + if (ath_is_world_regd(reg)) + ath_reg_apply_world_flags(wiphy, request->initiator, + reg); + break; + } + + return 0; +} +EXPORT_SYMBOL(ath_reg_notifier_apply); + +static bool ath_regd_is_eeprom_valid(struct ath_regulatory *reg) +{ + u16 rd = ath_regd_get_eepromRD(reg); + int i; + + if (rd & COUNTRY_ERD_FLAG) { + /* EEPROM value is a country code */ + u16 cc = rd & ~COUNTRY_ERD_FLAG; + printk(KERN_DEBUG + "ath: EEPROM indicates we should expect " + "a country code\n"); + for (i = 0; i < ARRAY_SIZE(allCountries); i++) + if (allCountries[i].countryCode == cc) + return true; + } else { + /* EEPROM value is a regpair value */ + if (rd != CTRY_DEFAULT) + printk(KERN_DEBUG "ath: EEPROM indicates we " + "should expect a direct regpair map\n"); + for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++) + if (regDomainPairs[i].regDmnEnum == rd) + return true; + } + printk(KERN_DEBUG + "ath: invalid regulatory domain/country code 0x%x\n", rd); + return false; +} + +/* EEPROM country code to regpair mapping */ +static struct country_code_to_enum_rd* +ath_regd_find_country(u16 countryCode) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(allCountries); i++) { + if (allCountries[i].countryCode == countryCode) + return &allCountries[i]; + } + return NULL; +} + +/* EEPROM rd code to regpair mapping */ +static struct country_code_to_enum_rd* +ath_regd_find_country_by_rd(int regdmn) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(allCountries); i++) { + if (allCountries[i].regDmnEnum == regdmn) + return &allCountries[i]; + } + return NULL; +} + +/* Returns the map of the EEPROM set RD to a country code */ +static u16 ath_regd_get_default_country(u16 rd) +{ + if (rd & COUNTRY_ERD_FLAG) { + struct country_code_to_enum_rd *country = NULL; + u16 cc = rd & ~COUNTRY_ERD_FLAG; + + country = ath_regd_find_country(cc); + if (country != NULL) + return cc; + } + + return CTRY_DEFAULT; +} + +static struct reg_dmn_pair_mapping* +ath_get_regpair(int regdmn) +{ + int i; + + if (regdmn == NO_ENUMRD) + return NULL; + for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++) { + if (regDomainPairs[i].regDmnEnum == regdmn) + return ®DomainPairs[i]; + } + return NULL; +} + +static int +ath_regd_init_wiphy(struct ath_regulatory *reg, + struct wiphy *wiphy, + int (*reg_notifier)(struct wiphy *wiphy, + struct regulatory_request *request)) +{ + const struct ieee80211_regdomain *regd; + + wiphy->reg_notifier = reg_notifier; + wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY; + + if (ath_is_world_regd(reg)) { + /* + * Anything applied here (prior to wiphy registration) gets + * saved on the wiphy orig_* parameters + */ + regd = ath_world_regdomain(reg); + wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY; + } else { + /* + * This gets applied in the case of the absense of CRDA, + * it's our own custom world regulatory domain, similar to + * cfg80211's but we enable passive scanning. + */ + regd = ath_default_world_regdomain(); + } + wiphy_apply_custom_regulatory(wiphy, regd); + ath_reg_apply_radar_flags(wiphy); + ath_reg_apply_world_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER, reg); + return 0; +} + +/* + * Some users have reported their EEPROM programmed with + * 0x8000 set, this is not a supported regulatory domain + * but since we have more than one user with it we need + * a solution for them. We default to 0x64, which is the + * default Atheros world regulatory domain. + */ +static void ath_regd_sanitize(struct ath_regulatory *reg) +{ + if (reg->current_rd != COUNTRY_ERD_FLAG) + return; + printk(KERN_DEBUG "ath: EEPROM regdomain sanitized\n"); + reg->current_rd = 0x64; +} + +int +ath_regd_init(struct ath_regulatory *reg, + struct wiphy *wiphy, + int (*reg_notifier)(struct wiphy *wiphy, + struct regulatory_request *request)) +{ + struct country_code_to_enum_rd *country = NULL; + u16 regdmn; + + if (!reg) + return -EINVAL; + + ath_regd_sanitize(reg); + + printk(KERN_DEBUG "ath: EEPROM regdomain: 0x%0x\n", reg->current_rd); + + if (!ath_regd_is_eeprom_valid(reg)) { + printk(KERN_ERR "ath: Invalid EEPROM contents\n"); + return -EINVAL; + } + + regdmn = ath_regd_get_eepromRD(reg); + reg->country_code = ath_regd_get_default_country(regdmn); + + if (reg->country_code == CTRY_DEFAULT && + regdmn == CTRY_DEFAULT) { + printk(KERN_DEBUG "ath: EEPROM indicates default " + "country code should be used\n"); + reg->country_code = CTRY_UNITED_STATES; + } + + if (reg->country_code == CTRY_DEFAULT) { + country = NULL; + } else { + printk(KERN_DEBUG "ath: doing EEPROM country->regdmn " + "map search\n"); + country = ath_regd_find_country(reg->country_code); + if (country == NULL) { + printk(KERN_DEBUG + "ath: no valid country maps found for " + "country code: 0x%0x\n", + reg->country_code); + return -EINVAL; + } else { + regdmn = country->regDmnEnum; + printk(KERN_DEBUG "ath: country maps to " + "regdmn code: 0x%0x\n", + regdmn); + } + } + + reg->regpair = ath_get_regpair(regdmn); + + if (!reg->regpair) { + printk(KERN_DEBUG "ath: " + "No regulatory domain pair found, cannot continue\n"); + return -EINVAL; + } + + if (!country) + country = ath_regd_find_country_by_rd(regdmn); + + if (country) { + reg->alpha2[0] = country->isoName[0]; + reg->alpha2[1] = country->isoName[1]; + } else { + reg->alpha2[0] = '0'; + reg->alpha2[1] = '0'; + } + + printk(KERN_DEBUG "ath: Country alpha2 being used: %c%c\n", + reg->alpha2[0], reg->alpha2[1]); + printk(KERN_DEBUG "ath: Regpair used: 0x%0x\n", + reg->regpair->regDmnEnum); + + ath_regd_init_wiphy(reg, wiphy, reg_notifier); + return 0; +} +EXPORT_SYMBOL(ath_regd_init); + +u32 ath_regd_get_band_ctl(struct ath_regulatory *reg, + enum ieee80211_band band) +{ + if (!reg->regpair || + (reg->country_code == CTRY_DEFAULT && + is_wwr_sku(ath_regd_get_eepromRD(reg)))) { + return SD_NO_CTL; + } + + switch (band) { + case IEEE80211_BAND_2GHZ: + return reg->regpair->reg_2ghz_ctl; + case IEEE80211_BAND_5GHZ: + return reg->regpair->reg_5ghz_ctl; + default: + return NO_CTL; + } +} +EXPORT_SYMBOL(ath_regd_get_band_ctl); diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h new file mode 100644 index 0000000..a1c3952 --- /dev/null +++ b/drivers/net/wireless/ath/regd.h @@ -0,0 +1,263 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef REGD_H +#define REGD_H + +#include +#include + +#include "ath.h" + +enum ctl_group { + CTL_FCC = 0x10, + CTL_MKK = 0x40, + CTL_ETSI = 0x30, +}; + +#define NO_CTL 0xff +#define SD_NO_CTL 0xE0 +#define NO_CTL 0xff +#define CTL_MODE_M 7 +#define CTL_11A 0 +#define CTL_11B 1 +#define CTL_11G 2 +#define CTL_2GHT20 5 +#define CTL_5GHT20 6 +#define CTL_2GHT40 7 +#define CTL_5GHT40 8 + +#define CTRY_DEBUG 0x1ff +#define CTRY_DEFAULT 0 + +#define COUNTRY_ERD_FLAG 0x8000 +#define WORLDWIDE_ROAMING_FLAG 0x4000 + +#define MULTI_DOMAIN_MASK 0xFF00 + +#define WORLD_SKU_MASK 0x00F0 +#define WORLD_SKU_PREFIX 0x0060 + +#define CHANNEL_HALF_BW 10 +#define CHANNEL_QUARTER_BW 5 + +struct country_code_to_enum_rd { + u16 countryCode; + u16 regDmnEnum; + const char *isoName; +}; + +enum CountryCode { + CTRY_ALBANIA = 8, + CTRY_ALGERIA = 12, + CTRY_ARGENTINA = 32, + CTRY_ARMENIA = 51, + CTRY_ARUBA = 533, + CTRY_AUSTRALIA = 36, + CTRY_AUSTRIA = 40, + CTRY_AZERBAIJAN = 31, + CTRY_BAHRAIN = 48, + CTRY_BANGLADESH = 50, + CTRY_BARBADOS = 52, + CTRY_BELARUS = 112, + CTRY_BELGIUM = 56, + CTRY_BELIZE = 84, + CTRY_BOLIVIA = 68, + CTRY_BOSNIA_HERZ = 70, + CTRY_BRAZIL = 76, + CTRY_BRUNEI_DARUSSALAM = 96, + CTRY_BULGARIA = 100, + CTRY_CAMBODIA = 116, + CTRY_CANADA = 124, + CTRY_CHILE = 152, + CTRY_CHINA = 156, + CTRY_COLOMBIA = 170, + CTRY_COSTA_RICA = 188, + CTRY_CROATIA = 191, + CTRY_CYPRUS = 196, + CTRY_CZECH = 203, + CTRY_DENMARK = 208, + CTRY_DOMINICAN_REPUBLIC = 214, + CTRY_ECUADOR = 218, + CTRY_EGYPT = 818, + CTRY_EL_SALVADOR = 222, + CTRY_ESTONIA = 233, + CTRY_FAEROE_ISLANDS = 234, + CTRY_FINLAND = 246, + CTRY_FRANCE = 250, + CTRY_GEORGIA = 268, + CTRY_GERMANY = 276, + CTRY_GREECE = 300, + CTRY_GREENLAND = 304, + CTRY_GRENEDA = 308, + CTRY_GUAM = 316, + CTRY_GUATEMALA = 320, + CTRY_HAITI = 332, + CTRY_HONDURAS = 340, + CTRY_HONG_KONG = 344, + CTRY_HUNGARY = 348, + CTRY_ICELAND = 352, + CTRY_INDIA = 356, + CTRY_INDONESIA = 360, + CTRY_IRAN = 364, + CTRY_IRAQ = 368, + CTRY_IRELAND = 372, + CTRY_ISRAEL = 376, + CTRY_ITALY = 380, + CTRY_JAMAICA = 388, + CTRY_JAPAN = 392, + CTRY_JORDAN = 400, + CTRY_KAZAKHSTAN = 398, + CTRY_KENYA = 404, + CTRY_KOREA_NORTH = 408, + CTRY_KOREA_ROC = 410, + CTRY_KOREA_ROC2 = 411, + CTRY_KOREA_ROC3 = 412, + CTRY_KUWAIT = 414, + CTRY_LATVIA = 428, + CTRY_LEBANON = 422, + CTRY_LIBYA = 434, + CTRY_LIECHTENSTEIN = 438, + CTRY_LITHUANIA = 440, + CTRY_LUXEMBOURG = 442, + CTRY_MACAU = 446, + CTRY_MACEDONIA = 807, + CTRY_MALAYSIA = 458, + CTRY_MALTA = 470, + CTRY_MEXICO = 484, + CTRY_MONACO = 492, + CTRY_MOROCCO = 504, + CTRY_NEPAL = 524, + CTRY_NETHERLANDS = 528, + CTRY_NETHERLANDS_ANTILLES = 530, + CTRY_NEW_ZEALAND = 554, + CTRY_NICARAGUA = 558, + CTRY_NORWAY = 578, + CTRY_OMAN = 512, + CTRY_PAKISTAN = 586, + CTRY_PANAMA = 591, + CTRY_PAPUA_NEW_GUINEA = 598, + CTRY_PARAGUAY = 600, + CTRY_PERU = 604, + CTRY_PHILIPPINES = 608, + CTRY_POLAND = 616, + CTRY_PORTUGAL = 620, + CTRY_PUERTO_RICO = 630, + CTRY_QATAR = 634, + CTRY_ROMANIA = 642, + CTRY_RUSSIA = 643, + CTRY_SAUDI_ARABIA = 682, + CTRY_SERBIA_MONTENEGRO = 891, + CTRY_SINGAPORE = 702, + CTRY_SLOVAKIA = 703, + CTRY_SLOVENIA = 705, + CTRY_SOUTH_AFRICA = 710, + CTRY_SPAIN = 724, + CTRY_SRI_LANKA = 144, + CTRY_SWEDEN = 752, + CTRY_SWITZERLAND = 756, + CTRY_SYRIA = 760, + CTRY_TAIWAN = 158, + CTRY_THAILAND = 764, + CTRY_TRINIDAD_Y_TOBAGO = 780, + CTRY_TUNISIA = 788, + CTRY_TURKEY = 792, + CTRY_UAE = 784, + CTRY_UKRAINE = 804, + CTRY_UNITED_KINGDOM = 826, + CTRY_UNITED_STATES = 840, + CTRY_UNITED_STATES_FCC49 = 842, + CTRY_URUGUAY = 858, + CTRY_UZBEKISTAN = 860, + CTRY_VENEZUELA = 862, + CTRY_VIET_NAM = 704, + CTRY_YEMEN = 887, + CTRY_ZIMBABWE = 716, + CTRY_JAPAN1 = 393, + CTRY_JAPAN2 = 394, + CTRY_JAPAN3 = 395, + CTRY_JAPAN4 = 396, + CTRY_JAPAN5 = 397, + CTRY_JAPAN6 = 4006, + CTRY_JAPAN7 = 4007, + CTRY_JAPAN8 = 4008, + CTRY_JAPAN9 = 4009, + CTRY_JAPAN10 = 4010, + CTRY_JAPAN11 = 4011, + CTRY_JAPAN12 = 4012, + CTRY_JAPAN13 = 4013, + CTRY_JAPAN14 = 4014, + CTRY_JAPAN15 = 4015, + CTRY_JAPAN16 = 4016, + CTRY_JAPAN17 = 4017, + CTRY_JAPAN18 = 4018, + CTRY_JAPAN19 = 4019, + CTRY_JAPAN20 = 4020, + CTRY_JAPAN21 = 4021, + CTRY_JAPAN22 = 4022, + CTRY_JAPAN23 = 4023, + CTRY_JAPAN24 = 4024, + CTRY_JAPAN25 = 4025, + CTRY_JAPAN26 = 4026, + CTRY_JAPAN27 = 4027, + CTRY_JAPAN28 = 4028, + CTRY_JAPAN29 = 4029, + CTRY_JAPAN30 = 4030, + CTRY_JAPAN31 = 4031, + CTRY_JAPAN32 = 4032, + CTRY_JAPAN33 = 4033, + CTRY_JAPAN34 = 4034, + CTRY_JAPAN35 = 4035, + CTRY_JAPAN36 = 4036, + CTRY_JAPAN37 = 4037, + CTRY_JAPAN38 = 4038, + CTRY_JAPAN39 = 4039, + CTRY_JAPAN40 = 4040, + CTRY_JAPAN41 = 4041, + CTRY_JAPAN42 = 4042, + CTRY_JAPAN43 = 4043, + CTRY_JAPAN44 = 4044, + CTRY_JAPAN45 = 4045, + CTRY_JAPAN46 = 4046, + CTRY_JAPAN47 = 4047, + CTRY_JAPAN48 = 4048, + CTRY_JAPAN49 = 4049, + CTRY_JAPAN50 = 4050, + CTRY_JAPAN51 = 4051, + CTRY_JAPAN52 = 4052, + CTRY_JAPAN53 = 4053, + CTRY_JAPAN54 = 4054, + CTRY_JAPAN55 = 4055, + CTRY_JAPAN56 = 4056, + CTRY_JAPAN57 = 4057, + CTRY_JAPAN58 = 4058, + CTRY_JAPAN59 = 4059, + CTRY_AUSTRALIA2 = 5000, + CTRY_CANADA2 = 5001, + CTRY_BELGIUM2 = 5002 +}; + +bool ath_is_world_regd(struct ath_regulatory *reg); +int ath_regd_init(struct ath_regulatory *reg, struct wiphy *wiphy, + int (*reg_notifier)(struct wiphy *wiphy, + struct regulatory_request *request)); +u32 ath_regd_get_band_ctl(struct ath_regulatory *reg, + enum ieee80211_band band); +int ath_reg_notifier_apply(struct wiphy *wiphy, + struct regulatory_request *request, + struct ath_regulatory *reg); + +#endif diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h new file mode 100644 index 0000000..248c670 --- /dev/null +++ b/drivers/net/wireless/ath/regd_common.h @@ -0,0 +1,475 @@ +/* + * Copyright (c) 2008-2009 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef REGD_COMMON_H +#define REGD_COMMON_H + +enum EnumRd { + NO_ENUMRD = 0x00, + NULL1_WORLD = 0x03, + NULL1_ETSIB = 0x07, + NULL1_ETSIC = 0x08, + FCC1_FCCA = 0x10, + FCC1_WORLD = 0x11, + FCC4_FCCA = 0x12, + FCC5_FCCA = 0x13, + FCC6_FCCA = 0x14, + + FCC2_FCCA = 0x20, + FCC2_WORLD = 0x21, + FCC2_ETSIC = 0x22, + FCC6_WORLD = 0x23, + FRANCE_RES = 0x31, + FCC3_FCCA = 0x3A, + FCC3_WORLD = 0x3B, + + ETSI1_WORLD = 0x37, + ETSI3_ETSIA = 0x32, + ETSI2_WORLD = 0x35, + ETSI3_WORLD = 0x36, + ETSI4_WORLD = 0x30, + ETSI4_ETSIC = 0x38, + ETSI5_WORLD = 0x39, + ETSI6_WORLD = 0x34, + ETSI_RESERVED = 0x33, + + MKK1_MKKA = 0x40, + MKK1_MKKB = 0x41, + APL4_WORLD = 0x42, + MKK2_MKKA = 0x43, + APL_RESERVED = 0x44, + APL2_WORLD = 0x45, + APL2_APLC = 0x46, + APL3_WORLD = 0x47, + MKK1_FCCA = 0x48, + APL2_APLD = 0x49, + MKK1_MKKA1 = 0x4A, + MKK1_MKKA2 = 0x4B, + MKK1_MKKC = 0x4C, + + APL3_FCCA = 0x50, + APL1_WORLD = 0x52, + APL1_FCCA = 0x53, + APL1_APLA = 0x54, + APL1_ETSIC = 0x55, + APL2_ETSIC = 0x56, + APL5_WORLD = 0x58, + APL6_WORLD = 0x5B, + APL7_FCCA = 0x5C, + APL8_WORLD = 0x5D, + APL9_WORLD = 0x5E, + + WOR0_WORLD = 0x60, + WOR1_WORLD = 0x61, + WOR2_WORLD = 0x62, + WOR3_WORLD = 0x63, + WOR4_WORLD = 0x64, + WOR5_ETSIC = 0x65, + + WOR01_WORLD = 0x66, + WOR02_WORLD = 0x67, + EU1_WORLD = 0x68, + + WOR9_WORLD = 0x69, + WORA_WORLD = 0x6A, + WORB_WORLD = 0x6B, + + MKK3_MKKB = 0x80, + MKK3_MKKA2 = 0x81, + MKK3_MKKC = 0x82, + + MKK4_MKKB = 0x83, + MKK4_MKKA2 = 0x84, + MKK4_MKKC = 0x85, + + MKK5_MKKB = 0x86, + MKK5_MKKA2 = 0x87, + MKK5_MKKC = 0x88, + + MKK6_MKKB = 0x89, + MKK6_MKKA2 = 0x8A, + MKK6_MKKC = 0x8B, + + MKK7_MKKB = 0x8C, + MKK7_MKKA2 = 0x8D, + MKK7_MKKC = 0x8E, + + MKK8_MKKB = 0x8F, + MKK8_MKKA2 = 0x90, + MKK8_MKKC = 0x91, + + MKK14_MKKA1 = 0x92, + MKK15_MKKA1 = 0x93, + + MKK10_FCCA = 0xD0, + MKK10_MKKA1 = 0xD1, + MKK10_MKKC = 0xD2, + MKK10_MKKA2 = 0xD3, + + MKK11_MKKA = 0xD4, + MKK11_FCCA = 0xD5, + MKK11_MKKA1 = 0xD6, + MKK11_MKKC = 0xD7, + MKK11_MKKA2 = 0xD8, + + MKK12_MKKA = 0xD9, + MKK12_FCCA = 0xDA, + MKK12_MKKA1 = 0xDB, + MKK12_MKKC = 0xDC, + MKK12_MKKA2 = 0xDD, + + MKK13_MKKB = 0xDE, + + MKK3_MKKA = 0xF0, + MKK3_MKKA1 = 0xF1, + MKK3_FCCA = 0xF2, + MKK4_MKKA = 0xF3, + MKK4_MKKA1 = 0xF4, + MKK4_FCCA = 0xF5, + MKK9_MKKA = 0xF6, + MKK10_MKKA = 0xF7, + MKK6_MKKA1 = 0xF8, + MKK6_FCCA = 0xF9, + MKK7_MKKA1 = 0xFA, + MKK7_FCCA = 0xFB, + MKK9_FCCA = 0xFC, + MKK9_MKKA1 = 0xFD, + MKK9_MKKC = 0xFE, + MKK9_MKKA2 = 0xFF, + + WORLD = 0x0199, + DEBUG_REG_DMN = 0x01ff, +}; + +/* Regpair to CTL band mapping */ +static struct reg_dmn_pair_mapping regDomainPairs[] = { + /* regpair, 5 GHz CTL, 2 GHz CTL */ + {NO_ENUMRD, DEBUG_REG_DMN, DEBUG_REG_DMN}, + {NULL1_WORLD, NO_CTL, CTL_ETSI}, + {NULL1_ETSIB, NO_CTL, CTL_ETSI}, + {NULL1_ETSIC, NO_CTL, CTL_ETSI}, + + {FCC2_FCCA, CTL_FCC, CTL_FCC}, + {FCC2_WORLD, CTL_FCC, CTL_ETSI}, + {FCC2_ETSIC, CTL_FCC, CTL_ETSI}, + {FCC3_FCCA, CTL_FCC, CTL_FCC}, + {FCC3_WORLD, CTL_FCC, CTL_ETSI}, + {FCC4_FCCA, CTL_FCC, CTL_FCC}, + {FCC5_FCCA, CTL_FCC, CTL_FCC}, + {FCC6_FCCA, CTL_FCC, CTL_FCC}, + {FCC6_WORLD, CTL_FCC, CTL_ETSI}, + + {ETSI1_WORLD, CTL_ETSI, CTL_ETSI}, + {ETSI2_WORLD, CTL_ETSI, CTL_ETSI}, + {ETSI3_WORLD, CTL_ETSI, CTL_ETSI}, + {ETSI4_WORLD, CTL_ETSI, CTL_ETSI}, + {ETSI5_WORLD, CTL_ETSI, CTL_ETSI}, + {ETSI6_WORLD, CTL_ETSI, CTL_ETSI}, + + /* XXX: For ETSI3_ETSIA, Was NO_CTL meant for the 2 GHz band ? */ + {ETSI3_ETSIA, CTL_ETSI, CTL_ETSI}, + {FRANCE_RES, CTL_ETSI, CTL_ETSI}, + + {FCC1_WORLD, CTL_FCC, CTL_ETSI}, + {FCC1_FCCA, CTL_FCC, CTL_FCC}, + {APL1_WORLD, CTL_FCC, CTL_ETSI}, + {APL2_WORLD, CTL_FCC, CTL_ETSI}, + {APL3_WORLD, CTL_FCC, CTL_ETSI}, + {APL4_WORLD, CTL_FCC, CTL_ETSI}, + {APL5_WORLD, CTL_FCC, CTL_ETSI}, + {APL6_WORLD, CTL_ETSI, CTL_ETSI}, + {APL8_WORLD, CTL_ETSI, CTL_ETSI}, + {APL9_WORLD, CTL_ETSI, CTL_ETSI}, + + {APL3_FCCA, CTL_FCC, CTL_FCC}, + {APL1_ETSIC, CTL_FCC, CTL_ETSI}, + {APL2_ETSIC, CTL_FCC, CTL_ETSI}, + {APL2_APLD, CTL_FCC, NO_CTL}, + + {MKK1_MKKA, CTL_MKK, CTL_MKK}, + {MKK1_MKKB, CTL_MKK, CTL_MKK}, + {MKK1_FCCA, CTL_MKK, CTL_FCC}, + {MKK1_MKKA1, CTL_MKK, CTL_MKK}, + {MKK1_MKKA2, CTL_MKK, CTL_MKK}, + {MKK1_MKKC, CTL_MKK, CTL_MKK}, + + {MKK2_MKKA, CTL_MKK, CTL_MKK}, + {MKK3_MKKA, CTL_MKK, CTL_MKK}, + {MKK3_MKKB, CTL_MKK, CTL_MKK}, + {MKK3_MKKA1, CTL_MKK, CTL_MKK}, + {MKK3_MKKA2, CTL_MKK, CTL_MKK}, + {MKK3_MKKC, CTL_MKK, CTL_MKK}, + {MKK3_FCCA, CTL_MKK, CTL_FCC}, + + {MKK4_MKKA, CTL_MKK, CTL_MKK}, + {MKK4_MKKB, CTL_MKK, CTL_MKK}, + {MKK4_MKKA1, CTL_MKK, CTL_MKK}, + {MKK4_MKKA2, CTL_MKK, CTL_MKK}, + {MKK4_MKKC, CTL_MKK, CTL_MKK}, + {MKK4_FCCA, CTL_MKK, CTL_FCC}, + + {MKK5_MKKB, CTL_MKK, CTL_MKK}, + {MKK5_MKKA2, CTL_MKK, CTL_MKK}, + {MKK5_MKKC, CTL_MKK, CTL_MKK}, + + {MKK6_MKKB, CTL_MKK, CTL_MKK}, + {MKK6_MKKA1, CTL_MKK, CTL_MKK}, + {MKK6_MKKA2, CTL_MKK, CTL_MKK}, + {MKK6_MKKC, CTL_MKK, CTL_MKK}, + {MKK6_FCCA, CTL_MKK, CTL_FCC}, + + {MKK7_MKKB, CTL_MKK, CTL_MKK}, + {MKK7_MKKA1, CTL_MKK, CTL_MKK}, + {MKK7_MKKA2, CTL_MKK, CTL_MKK}, + {MKK7_MKKC, CTL_MKK, CTL_MKK}, + {MKK7_FCCA, CTL_MKK, CTL_FCC}, + + {MKK8_MKKB, CTL_MKK, CTL_MKK}, + {MKK8_MKKA2, CTL_MKK, CTL_MKK}, + {MKK8_MKKC, CTL_MKK, CTL_MKK}, + + {MKK9_MKKA, CTL_MKK, CTL_MKK}, + {MKK9_FCCA, CTL_MKK, CTL_FCC}, + {MKK9_MKKA1, CTL_MKK, CTL_MKK}, + {MKK9_MKKA2, CTL_MKK, CTL_MKK}, + {MKK9_MKKC, CTL_MKK, CTL_MKK}, + + {MKK10_MKKA, CTL_MKK, CTL_MKK}, + {MKK10_FCCA, CTL_MKK, CTL_FCC}, + {MKK10_MKKA1, CTL_MKK, CTL_MKK}, + {MKK10_MKKA2, CTL_MKK, CTL_MKK}, + {MKK10_MKKC, CTL_MKK, CTL_MKK}, + + {MKK11_MKKA, CTL_MKK, CTL_MKK}, + {MKK11_FCCA, CTL_MKK, CTL_FCC}, + {MKK11_MKKA1, CTL_MKK, CTL_MKK}, + {MKK11_MKKA2, CTL_MKK, CTL_MKK}, + {MKK11_MKKC, CTL_MKK, CTL_MKK}, + + {MKK12_MKKA, CTL_MKK, CTL_MKK}, + {MKK12_FCCA, CTL_MKK, CTL_FCC}, + {MKK12_MKKA1, CTL_MKK, CTL_MKK}, + {MKK12_MKKA2, CTL_MKK, CTL_MKK}, + {MKK12_MKKC, CTL_MKK, CTL_MKK}, + + {MKK13_MKKB, CTL_MKK, CTL_MKK}, + {MKK14_MKKA1, CTL_MKK, CTL_MKK}, + {MKK15_MKKA1, CTL_MKK, CTL_MKK}, + + {WOR0_WORLD, NO_CTL, NO_CTL}, + {WOR1_WORLD, NO_CTL, NO_CTL}, + {WOR2_WORLD, NO_CTL, NO_CTL}, + {WOR3_WORLD, NO_CTL, NO_CTL}, + {WOR4_WORLD, NO_CTL, NO_CTL}, + {WOR5_ETSIC, NO_CTL, NO_CTL}, + {WOR01_WORLD, NO_CTL, NO_CTL}, + {WOR02_WORLD, NO_CTL, NO_CTL}, + {EU1_WORLD, NO_CTL, NO_CTL}, + {WOR9_WORLD, NO_CTL, NO_CTL}, + {WORA_WORLD, NO_CTL, NO_CTL}, + {WORB_WORLD, NO_CTL, NO_CTL}, +}; + +static struct country_code_to_enum_rd allCountries[] = { + {CTRY_DEBUG, NO_ENUMRD, "DB"}, + {CTRY_DEFAULT, FCC1_FCCA, "CO"}, + {CTRY_ALBANIA, NULL1_WORLD, "AL"}, + {CTRY_ALGERIA, NULL1_WORLD, "DZ"}, + {CTRY_ARGENTINA, FCC3_WORLD, "AR"}, + {CTRY_ARMENIA, ETSI4_WORLD, "AM"}, + {CTRY_ARUBA, ETSI1_WORLD, "AW"}, + {CTRY_AUSTRALIA, FCC2_WORLD, "AU"}, + {CTRY_AUSTRALIA2, FCC6_WORLD, "AU"}, + {CTRY_AUSTRIA, ETSI1_WORLD, "AT"}, + {CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ"}, + {CTRY_BAHRAIN, APL6_WORLD, "BH"}, + {CTRY_BANGLADESH, NULL1_WORLD, "BD"}, + {CTRY_BARBADOS, FCC2_WORLD, "BB"}, + {CTRY_BELARUS, ETSI1_WORLD, "BY"}, + {CTRY_BELGIUM, ETSI1_WORLD, "BE"}, + {CTRY_BELGIUM2, ETSI4_WORLD, "BL"}, + {CTRY_BELIZE, APL1_ETSIC, "BZ"}, + {CTRY_BOLIVIA, APL1_ETSIC, "BO"}, + {CTRY_BOSNIA_HERZ, ETSI1_WORLD, "BA"}, + {CTRY_BRAZIL, FCC3_WORLD, "BR"}, + {CTRY_BRUNEI_DARUSSALAM, APL1_WORLD, "BN"}, + {CTRY_BULGARIA, ETSI6_WORLD, "BG"}, + {CTRY_CAMBODIA, ETSI1_WORLD, "KH"}, + {CTRY_CANADA, FCC3_FCCA, "CA"}, + {CTRY_CANADA2, FCC6_FCCA, "CA"}, + {CTRY_CHILE, APL6_WORLD, "CL"}, + {CTRY_CHINA, APL1_WORLD, "CN"}, + {CTRY_COLOMBIA, FCC1_FCCA, "CO"}, + {CTRY_COSTA_RICA, FCC1_WORLD, "CR"}, + {CTRY_CROATIA, ETSI1_WORLD, "HR"}, + {CTRY_CYPRUS, ETSI1_WORLD, "CY"}, + {CTRY_CZECH, ETSI3_WORLD, "CZ"}, + {CTRY_DENMARK, ETSI1_WORLD, "DK"}, + {CTRY_DOMINICAN_REPUBLIC, FCC1_FCCA, "DO"}, + {CTRY_ECUADOR, FCC1_WORLD, "EC"}, + {CTRY_EGYPT, ETSI3_WORLD, "EG"}, + {CTRY_EL_SALVADOR, FCC1_WORLD, "SV"}, + {CTRY_ESTONIA, ETSI1_WORLD, "EE"}, + {CTRY_FINLAND, ETSI1_WORLD, "FI"}, + {CTRY_FRANCE, ETSI1_WORLD, "FR"}, + {CTRY_GEORGIA, ETSI4_WORLD, "GE"}, + {CTRY_GERMANY, ETSI1_WORLD, "DE"}, + {CTRY_GREECE, ETSI1_WORLD, "GR"}, + {CTRY_GREENLAND, ETSI1_WORLD, "GL"}, + {CTRY_GRENEDA, FCC3_FCCA, "GD"}, + {CTRY_GUAM, FCC1_FCCA, "GU"}, + {CTRY_GUATEMALA, FCC1_FCCA, "GT"}, + {CTRY_HAITI, ETSI1_WORLD, "HT"}, + {CTRY_HONDURAS, NULL1_WORLD, "HN"}, + {CTRY_HONG_KONG, FCC3_WORLD, "HK"}, + {CTRY_HUNGARY, ETSI1_WORLD, "HU"}, + {CTRY_ICELAND, ETSI1_WORLD, "IS"}, + {CTRY_INDIA, APL6_WORLD, "IN"}, + {CTRY_INDONESIA, NULL1_WORLD, "ID"}, + {CTRY_IRAN, APL1_WORLD, "IR"}, + {CTRY_IRELAND, ETSI1_WORLD, "IE"}, + {CTRY_ISRAEL, NULL1_WORLD, "IL"}, + {CTRY_ITALY, ETSI1_WORLD, "IT"}, + {CTRY_JAMAICA, FCC3_WORLD, "JM"}, + + {CTRY_JAPAN, MKK1_MKKA, "JP"}, + {CTRY_JAPAN1, MKK1_MKKB, "JP"}, + {CTRY_JAPAN2, MKK1_FCCA, "JP"}, + {CTRY_JAPAN3, MKK2_MKKA, "JP"}, + {CTRY_JAPAN4, MKK1_MKKA1, "JP"}, + {CTRY_JAPAN5, MKK1_MKKA2, "JP"}, + {CTRY_JAPAN6, MKK1_MKKC, "JP"}, + {CTRY_JAPAN7, MKK3_MKKB, "JP"}, + {CTRY_JAPAN8, MKK3_MKKA2, "JP"}, + {CTRY_JAPAN9, MKK3_MKKC, "JP"}, + {CTRY_JAPAN10, MKK4_MKKB, "JP"}, + {CTRY_JAPAN11, MKK4_MKKA2, "JP"}, + {CTRY_JAPAN12, MKK4_MKKC, "JP"}, + {CTRY_JAPAN13, MKK5_MKKB, "JP"}, + {CTRY_JAPAN14, MKK5_MKKA2, "JP"}, + {CTRY_JAPAN15, MKK5_MKKC, "JP"}, + {CTRY_JAPAN16, MKK6_MKKB, "JP"}, + {CTRY_JAPAN17, MKK6_MKKA2, "JP"}, + {CTRY_JAPAN18, MKK6_MKKC, "JP"}, + {CTRY_JAPAN19, MKK7_MKKB, "JP"}, + {CTRY_JAPAN20, MKK7_MKKA2, "JP"}, + {CTRY_JAPAN21, MKK7_MKKC, "JP"}, + {CTRY_JAPAN22, MKK8_MKKB, "JP"}, + {CTRY_JAPAN23, MKK8_MKKA2, "JP"}, + {CTRY_JAPAN24, MKK8_MKKC, "JP"}, + {CTRY_JAPAN25, MKK3_MKKA, "JP"}, + {CTRY_JAPAN26, MKK3_MKKA1, "JP"}, + {CTRY_JAPAN27, MKK3_FCCA, "JP"}, + {CTRY_JAPAN28, MKK4_MKKA1, "JP"}, + {CTRY_JAPAN29, MKK4_FCCA, "JP"}, + {CTRY_JAPAN30, MKK6_MKKA1, "JP"}, + {CTRY_JAPAN31, MKK6_FCCA, "JP"}, + {CTRY_JAPAN32, MKK7_MKKA1, "JP"}, + {CTRY_JAPAN33, MKK7_FCCA, "JP"}, + {CTRY_JAPAN34, MKK9_MKKA, "JP"}, + {CTRY_JAPAN35, MKK10_MKKA, "JP"}, + {CTRY_JAPAN36, MKK4_MKKA, "JP"}, + {CTRY_JAPAN37, MKK9_FCCA, "JP"}, + {CTRY_JAPAN38, MKK9_MKKA1, "JP"}, + {CTRY_JAPAN39, MKK9_MKKC, "JP"}, + {CTRY_JAPAN40, MKK9_MKKA2, "JP"}, + {CTRY_JAPAN41, MKK10_FCCA, "JP"}, + {CTRY_JAPAN42, MKK10_MKKA1, "JP"}, + {CTRY_JAPAN43, MKK10_MKKC, "JP"}, + {CTRY_JAPAN44, MKK10_MKKA2, "JP"}, + {CTRY_JAPAN45, MKK11_MKKA, "JP"}, + {CTRY_JAPAN46, MKK11_FCCA, "JP"}, + {CTRY_JAPAN47, MKK11_MKKA1, "JP"}, + {CTRY_JAPAN48, MKK11_MKKC, "JP"}, + {CTRY_JAPAN49, MKK11_MKKA2, "JP"}, + {CTRY_JAPAN50, MKK12_MKKA, "JP"}, + {CTRY_JAPAN51, MKK12_FCCA, "JP"}, + {CTRY_JAPAN52, MKK12_MKKA1, "JP"}, + {CTRY_JAPAN53, MKK12_MKKC, "JP"}, + {CTRY_JAPAN54, MKK12_MKKA2, "JP"}, + {CTRY_JAPAN57, MKK13_MKKB, "JP"}, + {CTRY_JAPAN58, MKK14_MKKA1, "JP"}, + {CTRY_JAPAN59, MKK15_MKKA1, "JP"}, + + {CTRY_JORDAN, ETSI2_WORLD, "JO"}, + {CTRY_KAZAKHSTAN, NULL1_WORLD, "KZ"}, + {CTRY_KOREA_NORTH, APL9_WORLD, "KP"}, + {CTRY_KOREA_ROC, APL9_WORLD, "KR"}, + {CTRY_KOREA_ROC2, APL2_WORLD, "K2"}, + {CTRY_KOREA_ROC3, APL9_WORLD, "K3"}, + {CTRY_KUWAIT, ETSI3_WORLD, "KW"}, + {CTRY_LATVIA, ETSI1_WORLD, "LV"}, + {CTRY_LEBANON, NULL1_WORLD, "LB"}, + {CTRY_LIECHTENSTEIN, ETSI1_WORLD, "LI"}, + {CTRY_LITHUANIA, ETSI1_WORLD, "LT"}, + {CTRY_LUXEMBOURG, ETSI1_WORLD, "LU"}, + {CTRY_MACAU, FCC2_WORLD, "MO"}, + {CTRY_MACEDONIA, NULL1_WORLD, "MK"}, + {CTRY_MALAYSIA, APL8_WORLD, "MY"}, + {CTRY_MALTA, ETSI1_WORLD, "MT"}, + {CTRY_MEXICO, FCC1_FCCA, "MX"}, + {CTRY_MONACO, ETSI4_WORLD, "MC"}, + {CTRY_MOROCCO, APL4_WORLD, "MA"}, + {CTRY_NEPAL, APL1_WORLD, "NP"}, + {CTRY_NETHERLANDS, ETSI1_WORLD, "NL"}, + {CTRY_NETHERLANDS_ANTILLES, ETSI1_WORLD, "AN"}, + {CTRY_NEW_ZEALAND, FCC2_ETSIC, "NZ"}, + {CTRY_NORWAY, ETSI1_WORLD, "NO"}, + {CTRY_OMAN, FCC3_WORLD, "OM"}, + {CTRY_PAKISTAN, NULL1_WORLD, "PK"}, + {CTRY_PANAMA, FCC1_FCCA, "PA"}, + {CTRY_PAPUA_NEW_GUINEA, FCC1_WORLD, "PG"}, + {CTRY_PERU, APL1_WORLD, "PE"}, + {CTRY_PHILIPPINES, APL1_WORLD, "PH"}, + {CTRY_POLAND, ETSI1_WORLD, "PL"}, + {CTRY_PORTUGAL, ETSI1_WORLD, "PT"}, + {CTRY_PUERTO_RICO, FCC1_FCCA, "PR"}, + {CTRY_QATAR, APL1_WORLD, "QA"}, + {CTRY_ROMANIA, NULL1_WORLD, "RO"}, + {CTRY_RUSSIA, NULL1_WORLD, "RU"}, + {CTRY_SAUDI_ARABIA, NULL1_WORLD, "SA"}, + {CTRY_SERBIA_MONTENEGRO, ETSI1_WORLD, "CS"}, + {CTRY_SINGAPORE, APL6_WORLD, "SG"}, + {CTRY_SLOVAKIA, ETSI1_WORLD, "SK"}, + {CTRY_SLOVENIA, ETSI1_WORLD, "SI"}, + {CTRY_SOUTH_AFRICA, FCC3_WORLD, "ZA"}, + {CTRY_SPAIN, ETSI1_WORLD, "ES"}, + {CTRY_SRI_LANKA, FCC3_WORLD, "LK"}, + {CTRY_SWEDEN, ETSI1_WORLD, "SE"}, + {CTRY_SWITZERLAND, ETSI1_WORLD, "CH"}, + {CTRY_SYRIA, NULL1_WORLD, "SY"}, + {CTRY_TAIWAN, APL3_FCCA, "TW"}, + {CTRY_THAILAND, FCC3_WORLD, "TH"}, + {CTRY_TRINIDAD_Y_TOBAGO, FCC3_WORLD, "TT"}, + {CTRY_TUNISIA, ETSI3_WORLD, "TN"}, + {CTRY_TURKEY, ETSI3_WORLD, "TR"}, + {CTRY_UKRAINE, NULL1_WORLD, "UA"}, + {CTRY_UAE, NULL1_WORLD, "AE"}, + {CTRY_UNITED_KINGDOM, ETSI1_WORLD, "GB"}, + {CTRY_UNITED_STATES, FCC3_FCCA, "US"}, + /* This "PS" is for US public safety actually... to support this we + * would need to assign new special alpha2 to CRDA db as with the world + * regdomain and use another alpha2 */ + {CTRY_UNITED_STATES_FCC49, FCC4_FCCA, "PS"}, + {CTRY_URUGUAY, FCC3_WORLD, "UY"}, + {CTRY_UZBEKISTAN, FCC3_FCCA, "UZ"}, + {CTRY_VENEZUELA, APL2_ETSIC, "VE"}, + {CTRY_VIET_NAM, NULL1_WORLD, "VN"}, + {CTRY_YEMEN, NULL1_WORLD, "YE"}, + {CTRY_ZIMBABWE, NULL1_WORLD, "ZW"}, +}; + +#endif diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile new file mode 100644 index 0000000..5e83b6f --- /dev/null +++ b/drivers/net/wireless/b43/Makefile @@ -0,0 +1,22 @@ +b43-y += main.o +b43-y += tables.o +b43-$(CONFIG_B43_NPHY) += tables_nphy.o +b43-y += phy_common.o +b43-y += phy_g.o +b43-y += phy_a.o +b43-$(CONFIG_B43_NPHY) += phy_n.o +b43-$(CONFIG_B43_PHY_LP) += phy_lp.o +b43-$(CONFIG_B43_PHY_LP) += tables_lpphy.o +b43-y += sysfs.o +b43-y += xmit.o +b43-y += lo.o +b43-y += wa.o +b43-y += dma.o +b43-y += pio.o +b43-y += rfkill.o +b43-$(CONFIG_B43_LEDS) += leds.o +b43-$(CONFIG_B43_PCMCIA) += pcmcia.o +b43-$(CONFIG_B43_SDIO) += sdio.o +b43-$(CONFIG_B43_DEBUG) += debugfs.o + +obj-$(CONFIG_B43) += b43.o diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h new file mode 100644 index 0000000..2510fbf --- /dev/null +++ b/drivers/net/wireless/b43/b43.h @@ -0,0 +1,927 @@ +#ifndef B43_H_ +#define B43_H_ + +#include +#include +#include +#include +#include +#include + +#include "debugfs.h" +#include "leds.h" +#include "rfkill.h" +#include "lo.h" +#include "phy_common.h" + + +/* The unique identifier of the firmware that's officially supported by + * this driver version. */ +#define B43_SUPPORTED_FIRMWARE_ID "FW13" + + +#ifdef CONFIG_B43_DEBUG +# define B43_DEBUG 1 +#else +# define B43_DEBUG 0 +#endif + +/* MMIO offsets */ +#define B43_MMIO_DMA0_REASON 0x20 +#define B43_MMIO_DMA0_IRQ_MASK 0x24 +#define B43_MMIO_DMA1_REASON 0x28 +#define B43_MMIO_DMA1_IRQ_MASK 0x2C +#define B43_MMIO_DMA2_REASON 0x30 +#define B43_MMIO_DMA2_IRQ_MASK 0x34 +#define B43_MMIO_DMA3_REASON 0x38 +#define B43_MMIO_DMA3_IRQ_MASK 0x3C +#define B43_MMIO_DMA4_REASON 0x40 +#define B43_MMIO_DMA4_IRQ_MASK 0x44 +#define B43_MMIO_DMA5_REASON 0x48 +#define B43_MMIO_DMA5_IRQ_MASK 0x4C +#define B43_MMIO_MACCTL 0x120 /* MAC control */ +#define B43_MMIO_MACCMD 0x124 /* MAC command */ +#define B43_MMIO_GEN_IRQ_REASON 0x128 +#define B43_MMIO_GEN_IRQ_MASK 0x12C +#define B43_MMIO_RAM_CONTROL 0x130 +#define B43_MMIO_RAM_DATA 0x134 +#define B43_MMIO_PS_STATUS 0x140 +#define B43_MMIO_RADIO_HWENABLED_HI 0x158 +#define B43_MMIO_SHM_CONTROL 0x160 +#define B43_MMIO_SHM_DATA 0x164 +#define B43_MMIO_SHM_DATA_UNALIGNED 0x166 +#define B43_MMIO_XMITSTAT_0 0x170 +#define B43_MMIO_XMITSTAT_1 0x174 +#define B43_MMIO_REV3PLUS_TSF_LOW 0x180 /* core rev >= 3 only */ +#define B43_MMIO_REV3PLUS_TSF_HIGH 0x184 /* core rev >= 3 only */ +#define B43_MMIO_TSF_CFP_REP 0x188 +#define B43_MMIO_TSF_CFP_START 0x18C +#define B43_MMIO_TSF_CFP_MAXDUR 0x190 + +/* 32-bit DMA */ +#define B43_MMIO_DMA32_BASE0 0x200 +#define B43_MMIO_DMA32_BASE1 0x220 +#define B43_MMIO_DMA32_BASE2 0x240 +#define B43_MMIO_DMA32_BASE3 0x260 +#define B43_MMIO_DMA32_BASE4 0x280 +#define B43_MMIO_DMA32_BASE5 0x2A0 +/* 64-bit DMA */ +#define B43_MMIO_DMA64_BASE0 0x200 +#define B43_MMIO_DMA64_BASE1 0x240 +#define B43_MMIO_DMA64_BASE2 0x280 +#define B43_MMIO_DMA64_BASE3 0x2C0 +#define B43_MMIO_DMA64_BASE4 0x300 +#define B43_MMIO_DMA64_BASE5 0x340 + +/* PIO on core rev < 11 */ +#define B43_MMIO_PIO_BASE0 0x300 +#define B43_MMIO_PIO_BASE1 0x310 +#define B43_MMIO_PIO_BASE2 0x320 +#define B43_MMIO_PIO_BASE3 0x330 +#define B43_MMIO_PIO_BASE4 0x340 +#define B43_MMIO_PIO_BASE5 0x350 +#define B43_MMIO_PIO_BASE6 0x360 +#define B43_MMIO_PIO_BASE7 0x370 +/* PIO on core rev >= 11 */ +#define B43_MMIO_PIO11_BASE0 0x200 +#define B43_MMIO_PIO11_BASE1 0x240 +#define B43_MMIO_PIO11_BASE2 0x280 +#define B43_MMIO_PIO11_BASE3 0x2C0 +#define B43_MMIO_PIO11_BASE4 0x300 +#define B43_MMIO_PIO11_BASE5 0x340 + +#define B43_MMIO_PHY_VER 0x3E0 +#define B43_MMIO_PHY_RADIO 0x3E2 +#define B43_MMIO_PHY0 0x3E6 +#define B43_MMIO_ANTENNA 0x3E8 +#define B43_MMIO_CHANNEL 0x3F0 +#define B43_MMIO_CHANNEL_EXT 0x3F4 +#define B43_MMIO_RADIO_CONTROL 0x3F6 +#define B43_MMIO_RADIO_DATA_HIGH 0x3F8 +#define B43_MMIO_RADIO_DATA_LOW 0x3FA +#define B43_MMIO_PHY_CONTROL 0x3FC +#define B43_MMIO_PHY_DATA 0x3FE +#define B43_MMIO_MACFILTER_CONTROL 0x420 +#define B43_MMIO_MACFILTER_DATA 0x422 +#define B43_MMIO_RCMTA_COUNT 0x43C +#define B43_MMIO_RADIO_HWENABLED_LO 0x49A +#define B43_MMIO_GPIO_CONTROL 0x49C +#define B43_MMIO_GPIO_MASK 0x49E +#define B43_MMIO_TSF_CFP_START_LOW 0x604 +#define B43_MMIO_TSF_CFP_START_HIGH 0x606 +#define B43_MMIO_TSF_CFP_PRETBTT 0x612 +#define B43_MMIO_TSF_0 0x632 /* core rev < 3 only */ +#define B43_MMIO_TSF_1 0x634 /* core rev < 3 only */ +#define B43_MMIO_TSF_2 0x636 /* core rev < 3 only */ +#define B43_MMIO_TSF_3 0x638 /* core rev < 3 only */ +#define B43_MMIO_RNG 0x65A +#define B43_MMIO_IFSSLOT 0x684 /* Interframe slot time */ +#define B43_MMIO_IFSCTL 0x688 /* Interframe space control */ +#define B43_MMIO_IFSCTL_USE_EDCF 0x0004 +#define B43_MMIO_POWERUP_DELAY 0x6A8 +#define B43_MMIO_BTCOEX_CTL 0x6B4 /* Bluetooth Coexistence Control */ +#define B43_MMIO_BTCOEX_STAT 0x6B6 /* Bluetooth Coexistence Status */ +#define B43_MMIO_BTCOEX_TXCTL 0x6B8 /* Bluetooth Coexistence Transmit Control */ + +/* SPROM boardflags_lo values */ +#define B43_BFL_BTCOEXIST 0x0001 /* implements Bluetooth coexistance */ +#define B43_BFL_PACTRL 0x0002 /* GPIO 9 controlling the PA */ +#define B43_BFL_AIRLINEMODE 0x0004 /* implements GPIO 13 radio disable indication */ +#define B43_BFL_RSSI 0x0008 /* software calculates nrssi slope. */ +#define B43_BFL_ENETSPI 0x0010 /* has ephy roboswitch spi */ +#define B43_BFL_XTAL_NOSLOW 0x0020 /* no slow clock available */ +#define B43_BFL_CCKHIPWR 0x0040 /* can do high power CCK transmission */ +#define B43_BFL_ENETADM 0x0080 /* has ADMtek switch */ +#define B43_BFL_ENETVLAN 0x0100 /* can do vlan */ +#define B43_BFL_AFTERBURNER 0x0200 /* supports Afterburner mode */ +#define B43_BFL_NOPCI 0x0400 /* leaves PCI floating */ +#define B43_BFL_FEM 0x0800 /* supports the Front End Module */ +#define B43_BFL_EXTLNA 0x1000 /* has an external LNA */ +#define B43_BFL_HGPA 0x2000 /* had high gain PA */ +#define B43_BFL_BTCMOD 0x4000 /* BFL_BTCOEXIST is given in alternate GPIOs */ +#define B43_BFL_ALTIQ 0x8000 /* alternate I/Q settings */ + +/* SPROM boardflags_hi values */ +#define B43_BFH_NOPA 0x0001 /* has no PA */ +#define B43_BFH_RSSIINV 0x0002 /* RSSI uses positive slope (not TSSI) */ +#define B43_BFH_PAREF 0x0004 /* uses the PARef LDO */ +#define B43_BFH_3TSWITCH 0x0008 /* uses a triple throw switch shared + * with bluetooth */ +#define B43_BFH_PHASESHIFT 0x0010 /* can support phase shifter */ +#define B43_BFH_BUCKBOOST 0x0020 /* has buck/booster */ +#define B43_BFH_FEM_BT 0x0040 /* has FEM and switch to share antenna + * with bluetooth */ + +/* GPIO register offset, in both ChipCommon and PCI core. */ +#define B43_GPIO_CONTROL 0x6c + +/* SHM Routing */ +enum { + B43_SHM_UCODE, /* Microcode memory */ + B43_SHM_SHARED, /* Shared memory */ + B43_SHM_SCRATCH, /* Scratch memory */ + B43_SHM_HW, /* Internal hardware register */ + B43_SHM_RCMTA, /* Receive match transmitter address (rev >= 5 only) */ +}; +/* SHM Routing modifiers */ +#define B43_SHM_AUTOINC_R 0x0200 /* Auto-increment address on read */ +#define B43_SHM_AUTOINC_W 0x0100 /* Auto-increment address on write */ +#define B43_SHM_AUTOINC_RW (B43_SHM_AUTOINC_R | \ + B43_SHM_AUTOINC_W) + +/* Misc SHM_SHARED offsets */ +#define B43_SHM_SH_WLCOREREV 0x0016 /* 802.11 core revision */ +#define B43_SHM_SH_PCTLWDPOS 0x0008 +#define B43_SHM_SH_RXPADOFF 0x0034 /* RX Padding data offset (PIO only) */ +#define B43_SHM_SH_FWCAPA 0x0042 /* Firmware capabilities (Opensource firmware only) */ +#define B43_SHM_SH_PHYVER 0x0050 /* PHY version */ +#define B43_SHM_SH_PHYTYPE 0x0052 /* PHY type */ +#define B43_SHM_SH_ANTSWAP 0x005C /* Antenna swap threshold */ +#define B43_SHM_SH_HOSTFLO 0x005E /* Hostflags for ucode options (low) */ +#define B43_SHM_SH_HOSTFMI 0x0060 /* Hostflags for ucode options (middle) */ +#define B43_SHM_SH_HOSTFHI 0x0062 /* Hostflags for ucode options (high) */ +#define B43_SHM_SH_RFATT 0x0064 /* Current radio attenuation value */ +#define B43_SHM_SH_RADAR 0x0066 /* Radar register */ +#define B43_SHM_SH_PHYTXNOI 0x006E /* PHY noise directly after TX (lower 8bit only) */ +#define B43_SHM_SH_RFRXSP1 0x0072 /* RF RX SP Register 1 */ +#define B43_SHM_SH_CHAN 0x00A0 /* Current channel (low 8bit only) */ +#define B43_SHM_SH_CHAN_5GHZ 0x0100 /* Bit set, if 5Ghz channel */ +#define B43_SHM_SH_BCMCFIFOID 0x0108 /* Last posted cookie to the bcast/mcast FIFO */ +/* TSSI information */ +#define B43_SHM_SH_TSSI_CCK 0x0058 /* TSSI for last 4 CCK frames (32bit) */ +#define B43_SHM_SH_TSSI_OFDM_A 0x0068 /* TSSI for last 4 OFDM frames (32bit) */ +#define B43_SHM_SH_TSSI_OFDM_G 0x0070 /* TSSI for last 4 OFDM frames (32bit) */ +#define B43_TSSI_MAX 0x7F /* Max value for one TSSI value */ +/* SHM_SHARED TX FIFO variables */ +#define B43_SHM_SH_SIZE01 0x0098 /* TX FIFO size for FIFO 0 (low) and 1 (high) */ +#define B43_SHM_SH_SIZE23 0x009A /* TX FIFO size for FIFO 2 and 3 */ +#define B43_SHM_SH_SIZE45 0x009C /* TX FIFO size for FIFO 4 and 5 */ +#define B43_SHM_SH_SIZE67 0x009E /* TX FIFO size for FIFO 6 and 7 */ +/* SHM_SHARED background noise */ +#define B43_SHM_SH_JSSI0 0x0088 /* Measure JSSI 0 */ +#define B43_SHM_SH_JSSI1 0x008A /* Measure JSSI 1 */ +#define B43_SHM_SH_JSSIAUX 0x008C /* Measure JSSI AUX */ +/* SHM_SHARED crypto engine */ +#define B43_SHM_SH_DEFAULTIV 0x003C /* Default IV location */ +#define B43_SHM_SH_NRRXTRANS 0x003E /* # of soft RX transmitter addresses (max 8) */ +#define B43_SHM_SH_KTP 0x0056 /* Key table pointer */ +#define B43_SHM_SH_TKIPTSCTTAK 0x0318 +#define B43_SHM_SH_KEYIDXBLOCK 0x05D4 /* Key index/algorithm block (v4 firmware) */ +#define B43_SHM_SH_PSM 0x05F4 /* PSM transmitter address match block (rev < 5) */ +/* SHM_SHARED WME variables */ +#define B43_SHM_SH_EDCFSTAT 0x000E /* EDCF status */ +#define B43_SHM_SH_TXFCUR 0x0030 /* TXF current index */ +#define B43_SHM_SH_EDCFQ 0x0240 /* EDCF Q info */ +/* SHM_SHARED powersave mode related */ +#define B43_SHM_SH_SLOTT 0x0010 /* Slot time */ +#define B43_SHM_SH_DTIMPER 0x0012 /* DTIM period */ +#define B43_SHM_SH_NOSLPZNATDTIM 0x004C /* NOSLPZNAT DTIM */ +/* SHM_SHARED beacon/AP variables */ +#define B43_SHM_SH_BTL0 0x0018 /* Beacon template length 0 */ +#define B43_SHM_SH_BTL1 0x001A /* Beacon template length 1 */ +#define B43_SHM_SH_BTSFOFF 0x001C /* Beacon TSF offset */ +#define B43_SHM_SH_TIMBPOS 0x001E /* TIM B position in beacon */ +#define B43_SHM_SH_DTIMP 0x0012 /* DTIP period */ +#define B43_SHM_SH_MCASTCOOKIE 0x00A8 /* Last bcast/mcast frame ID */ +#define B43_SHM_SH_SFFBLIM 0x0044 /* Short frame fallback retry limit */ +#define B43_SHM_SH_LFFBLIM 0x0046 /* Long frame fallback retry limit */ +#define B43_SHM_SH_BEACPHYCTL 0x0054 /* Beacon PHY TX control word (see PHY TX control) */ +#define B43_SHM_SH_EXTNPHYCTL 0x00B0 /* Extended bytes for beacon PHY control (N) */ +/* SHM_SHARED ACK/CTS control */ +#define B43_SHM_SH_ACKCTSPHYCTL 0x0022 /* ACK/CTS PHY control word (see PHY TX control) */ +/* SHM_SHARED probe response variables */ +#define B43_SHM_SH_PRSSID 0x0160 /* Probe Response SSID */ +#define B43_SHM_SH_PRSSIDLEN 0x0048 /* Probe Response SSID length */ +#define B43_SHM_SH_PRTLEN 0x004A /* Probe Response template length */ +#define B43_SHM_SH_PRMAXTIME 0x0074 /* Probe Response max time */ +#define B43_SHM_SH_PRPHYCTL 0x0188 /* Probe Response PHY TX control word */ +/* SHM_SHARED rate tables */ +#define B43_SHM_SH_OFDMDIRECT 0x01C0 /* Pointer to OFDM direct map */ +#define B43_SHM_SH_OFDMBASIC 0x01E0 /* Pointer to OFDM basic rate map */ +#define B43_SHM_SH_CCKDIRECT 0x0200 /* Pointer to CCK direct map */ +#define B43_SHM_SH_CCKBASIC 0x0220 /* Pointer to CCK basic rate map */ +/* SHM_SHARED microcode soft registers */ +#define B43_SHM_SH_UCODEREV 0x0000 /* Microcode revision */ +#define B43_SHM_SH_UCODEPATCH 0x0002 /* Microcode patchlevel */ +#define B43_SHM_SH_UCODEDATE 0x0004 /* Microcode date */ +#define B43_SHM_SH_UCODETIME 0x0006 /* Microcode time */ +#define B43_SHM_SH_UCODESTAT 0x0040 /* Microcode debug status code */ +#define B43_SHM_SH_UCODESTAT_INVALID 0 +#define B43_SHM_SH_UCODESTAT_INIT 1 +#define B43_SHM_SH_UCODESTAT_ACTIVE 2 +#define B43_SHM_SH_UCODESTAT_SUSP 3 /* suspended */ +#define B43_SHM_SH_UCODESTAT_SLEEP 4 /* asleep (PS) */ +#define B43_SHM_SH_MAXBFRAMES 0x0080 /* Maximum number of frames in a burst */ +#define B43_SHM_SH_SPUWKUP 0x0094 /* pre-wakeup for synth PU in us */ +#define B43_SHM_SH_PRETBTT 0x0096 /* pre-TBTT in us */ +/* SHM_SHARED tx iq workarounds */ +#define B43_SHM_SH_NPHY_TXIQW0 0x0700 +#define B43_SHM_SH_NPHY_TXIQW1 0x0702 +#define B43_SHM_SH_NPHY_TXIQW2 0x0704 +#define B43_SHM_SH_NPHY_TXIQW3 0x0706 +/* SHM_SHARED tx pwr ctrl */ +#define B43_SHM_SH_NPHY_TXPWR_INDX0 0x0708 +#define B43_SHM_SH_NPHY_TXPWR_INDX1 0x070E + +/* SHM_SCRATCH offsets */ +#define B43_SHM_SC_MINCONT 0x0003 /* Minimum contention window */ +#define B43_SHM_SC_MAXCONT 0x0004 /* Maximum contention window */ +#define B43_SHM_SC_CURCONT 0x0005 /* Current contention window */ +#define B43_SHM_SC_SRLIMIT 0x0006 /* Short retry count limit */ +#define B43_SHM_SC_LRLIMIT 0x0007 /* Long retry count limit */ +#define B43_SHM_SC_DTIMC 0x0008 /* Current DTIM count */ +#define B43_SHM_SC_BTL0LEN 0x0015 /* Beacon 0 template length */ +#define B43_SHM_SC_BTL1LEN 0x0016 /* Beacon 1 template length */ +#define B43_SHM_SC_SCFB 0x0017 /* Short frame transmit count threshold for rate fallback */ +#define B43_SHM_SC_LCFB 0x0018 /* Long frame transmit count threshold for rate fallback */ + +/* Hardware Radio Enable masks */ +#define B43_MMIO_RADIO_HWENABLED_HI_MASK (1 << 16) +#define B43_MMIO_RADIO_HWENABLED_LO_MASK (1 << 4) + +/* HostFlags. See b43_hf_read/write() */ +#define B43_HF_ANTDIVHELP 0x000000000001ULL /* ucode antenna div helper */ +#define B43_HF_SYMW 0x000000000002ULL /* G-PHY SYM workaround */ +#define B43_HF_RXPULLW 0x000000000004ULL /* RX pullup workaround */ +#define B43_HF_CCKBOOST 0x000000000008ULL /* 4dB CCK power boost (exclusive with OFDM boost) */ +#define B43_HF_BTCOEX 0x000000000010ULL /* Bluetooth coexistance */ +#define B43_HF_GDCW 0x000000000020ULL /* G-PHY DC canceller filter bw workaround */ +#define B43_HF_OFDMPABOOST 0x000000000040ULL /* Enable PA gain boost for OFDM */ +#define B43_HF_ACPR 0x000000000080ULL /* Disable for Japan, channel 14 */ +#define B43_HF_EDCF 0x000000000100ULL /* on if WME and MAC suspended */ +#define B43_HF_TSSIRPSMW 0x000000000200ULL /* TSSI reset PSM ucode workaround */ +#define B43_HF_20IN40IQW 0x000000000200ULL /* 20 in 40 MHz I/Q workaround (rev >= 13 only) */ +#define B43_HF_DSCRQ 0x000000000400ULL /* Disable slow clock request in ucode */ +#define B43_HF_ACIW 0x000000000800ULL /* ACI workaround: shift bits by 2 on PHY CRS */ +#define B43_HF_2060W 0x000000001000ULL /* 2060 radio workaround */ +#define B43_HF_RADARW 0x000000002000ULL /* Radar workaround */ +#define B43_HF_USEDEFKEYS 0x000000004000ULL /* Enable use of default keys */ +#define B43_HF_AFTERBURNER 0x000000008000ULL /* Afterburner enabled */ +#define B43_HF_BT4PRIOCOEX 0x000000010000ULL /* Bluetooth 4-priority coexistance */ +#define B43_HF_FWKUP 0x000000020000ULL /* Fast wake-up ucode */ +#define B43_HF_VCORECALC 0x000000040000ULL /* Force VCO recalculation when powering up synthpu */ +#define B43_HF_PCISCW 0x000000080000ULL /* PCI slow clock workaround */ +#define B43_HF_4318TSSI 0x000000200000ULL /* 4318 TSSI */ +#define B43_HF_FBCMCFIFO 0x000000400000ULL /* Flush bcast/mcast FIFO immediately */ +#define B43_HF_HWPCTL 0x000000800000ULL /* Enable hardwarre power control */ +#define B43_HF_BTCOEXALT 0x000001000000ULL /* Bluetooth coexistance in alternate pins */ +#define B43_HF_TXBTCHECK 0x000002000000ULL /* Bluetooth check during transmission */ +#define B43_HF_SKCFPUP 0x000004000000ULL /* Skip CFP update */ +#define B43_HF_N40W 0x000008000000ULL /* N PHY 40 MHz workaround (rev >= 13 only) */ +#define B43_HF_ANTSEL 0x000020000000ULL /* Antenna selection (for testing antenna div.) */ +#define B43_HF_BT3COEXT 0x000020000000ULL /* Bluetooth 3-wire coexistence (rev >= 13 only) */ +#define B43_HF_BTCANT 0x000040000000ULL /* Bluetooth coexistence (antenna mode) (rev >= 13 only) */ +#define B43_HF_ANTSELEN 0x000100000000ULL /* Antenna selection enabled (rev >= 13 only) */ +#define B43_HF_ANTSELMODE 0x000200000000ULL /* Antenna selection mode (rev >= 13 only) */ +#define B43_HF_MLADVW 0x001000000000ULL /* N PHY ML ADV workaround (rev >= 13 only) */ +#define B43_HF_PR45960W 0x080000000000ULL /* PR 45960 workaround (rev >= 13 only) */ + +/* Firmware capabilities field in SHM (Opensource firmware only) */ +#define B43_FWCAPA_HWCRYPTO 0x0001 +#define B43_FWCAPA_QOS 0x0002 + +/* MacFilter offsets. */ +#define B43_MACFILTER_SELF 0x0000 +#define B43_MACFILTER_BSSID 0x0003 + +/* PowerControl */ +#define B43_PCTL_IN 0xB0 +#define B43_PCTL_OUT 0xB4 +#define B43_PCTL_OUTENABLE 0xB8 +#define B43_PCTL_XTAL_POWERUP 0x40 +#define B43_PCTL_PLL_POWERDOWN 0x80 + +/* PowerControl Clock Modes */ +#define B43_PCTL_CLK_FAST 0x00 +#define B43_PCTL_CLK_SLOW 0x01 +#define B43_PCTL_CLK_DYNAMIC 0x02 + +#define B43_PCTL_FORCE_SLOW 0x0800 +#define B43_PCTL_FORCE_PLL 0x1000 +#define B43_PCTL_DYN_XTAL 0x2000 + +/* PHYVersioning */ +#define B43_PHYTYPE_A 0x00 +#define B43_PHYTYPE_B 0x01 +#define B43_PHYTYPE_G 0x02 +#define B43_PHYTYPE_N 0x04 +#define B43_PHYTYPE_LP 0x05 + +/* PHYRegisters */ +#define B43_PHY_ILT_A_CTRL 0x0072 +#define B43_PHY_ILT_A_DATA1 0x0073 +#define B43_PHY_ILT_A_DATA2 0x0074 +#define B43_PHY_G_LO_CONTROL 0x0810 +#define B43_PHY_ILT_G_CTRL 0x0472 +#define B43_PHY_ILT_G_DATA1 0x0473 +#define B43_PHY_ILT_G_DATA2 0x0474 +#define B43_PHY_A_PCTL 0x007B +#define B43_PHY_G_PCTL 0x0029 +#define B43_PHY_A_CRS 0x0029 +#define B43_PHY_RADIO_BITFIELD 0x0401 +#define B43_PHY_G_CRS 0x0429 +#define B43_PHY_NRSSILT_CTRL 0x0803 +#define B43_PHY_NRSSILT_DATA 0x0804 + +/* RadioRegisters */ +#define B43_RADIOCTL_ID 0x01 + +/* MAC Control bitfield */ +#define B43_MACCTL_ENABLED 0x00000001 /* MAC Enabled */ +#define B43_MACCTL_PSM_RUN 0x00000002 /* Run Microcode */ +#define B43_MACCTL_PSM_JMP0 0x00000004 /* Microcode jump to 0 */ +#define B43_MACCTL_SHM_ENABLED 0x00000100 /* SHM Enabled */ +#define B43_MACCTL_SHM_UPPER 0x00000200 /* SHM Upper */ +#define B43_MACCTL_IHR_ENABLED 0x00000400 /* IHR Region Enabled */ +#define B43_MACCTL_PSM_DBG 0x00002000 /* Microcode debugging enabled */ +#define B43_MACCTL_GPOUTSMSK 0x0000C000 /* GPOUT Select Mask */ +#define B43_MACCTL_BE 0x00010000 /* Big Endian mode */ +#define B43_MACCTL_INFRA 0x00020000 /* Infrastructure mode */ +#define B43_MACCTL_AP 0x00040000 /* AccessPoint mode */ +#define B43_MACCTL_RADIOLOCK 0x00080000 /* Radio lock */ +#define B43_MACCTL_BEACPROMISC 0x00100000 /* Beacon Promiscuous */ +#define B43_MACCTL_KEEP_BADPLCP 0x00200000 /* Keep frames with bad PLCP */ +#define B43_MACCTL_KEEP_CTL 0x00400000 /* Keep control frames */ +#define B43_MACCTL_KEEP_BAD 0x00800000 /* Keep bad frames (FCS) */ +#define B43_MACCTL_PROMISC 0x01000000 /* Promiscuous mode */ +#define B43_MACCTL_HWPS 0x02000000 /* Hardware Power Saving */ +#define B43_MACCTL_AWAKE 0x04000000 /* Device is awake */ +#define B43_MACCTL_CLOSEDNET 0x08000000 /* Closed net (no SSID bcast) */ +#define B43_MACCTL_TBTTHOLD 0x10000000 /* TBTT Hold */ +#define B43_MACCTL_DISCTXSTAT 0x20000000 /* Discard TX status */ +#define B43_MACCTL_DISCPMQ 0x40000000 /* Discard Power Management Queue */ +#define B43_MACCTL_GMODE 0x80000000 /* G Mode */ + +/* MAC Command bitfield */ +#define B43_MACCMD_BEACON0_VALID 0x00000001 /* Beacon 0 in template RAM is busy/valid */ +#define B43_MACCMD_BEACON1_VALID 0x00000002 /* Beacon 1 in template RAM is busy/valid */ +#define B43_MACCMD_DFQ_VALID 0x00000004 /* Directed frame queue valid (IBSS PS mode, ATIM) */ +#define B43_MACCMD_CCA 0x00000008 /* Clear channel assessment */ +#define B43_MACCMD_BGNOISE 0x00000010 /* Background noise */ + +/* 802.11 core specific TM State Low (SSB_TMSLOW) flags */ +#define B43_TMSLOW_GMODE 0x20000000 /* G Mode Enable */ +#define B43_TMSLOW_PHYCLKSPEED 0x00C00000 /* PHY clock speed mask (N-PHY only) */ +#define B43_TMSLOW_PHYCLKSPEED_40MHZ 0x00000000 /* 40 MHz PHY */ +#define B43_TMSLOW_PHYCLKSPEED_80MHZ 0x00400000 /* 80 MHz PHY */ +#define B43_TMSLOW_PHYCLKSPEED_160MHZ 0x00800000 /* 160 MHz PHY */ +#define B43_TMSLOW_PLLREFSEL 0x00200000 /* PLL Frequency Reference Select (rev >= 5) */ +#define B43_TMSLOW_MACPHYCLKEN 0x00100000 /* MAC PHY Clock Control Enable (rev >= 5) */ +#define B43_TMSLOW_PHYRESET 0x00080000 /* PHY Reset */ +#define B43_TMSLOW_PHYCLKEN 0x00040000 /* PHY Clock Enable */ + +/* 802.11 core specific TM State High (SSB_TMSHIGH) flags */ +#define B43_TMSHIGH_DUALBAND_PHY 0x00080000 /* Dualband PHY available */ +#define B43_TMSHIGH_FCLOCK 0x00040000 /* Fast Clock Available (rev >= 5) */ +#define B43_TMSHIGH_HAVE_5GHZ_PHY 0x00020000 /* 5 GHz PHY available (rev >= 5) */ +#define B43_TMSHIGH_HAVE_2GHZ_PHY 0x00010000 /* 2.4 GHz PHY available (rev >= 5) */ + +/* Generic-Interrupt reasons. */ +#define B43_IRQ_MAC_SUSPENDED 0x00000001 +#define B43_IRQ_BEACON 0x00000002 +#define B43_IRQ_TBTT_INDI 0x00000004 +#define B43_IRQ_BEACON_TX_OK 0x00000008 +#define B43_IRQ_BEACON_CANCEL 0x00000010 +#define B43_IRQ_ATIM_END 0x00000020 +#define B43_IRQ_PMQ 0x00000040 +#define B43_IRQ_PIO_WORKAROUND 0x00000100 +#define B43_IRQ_MAC_TXERR 0x00000200 +#define B43_IRQ_PHY_TXERR 0x00000800 +#define B43_IRQ_PMEVENT 0x00001000 +#define B43_IRQ_TIMER0 0x00002000 +#define B43_IRQ_TIMER1 0x00004000 +#define B43_IRQ_DMA 0x00008000 +#define B43_IRQ_TXFIFO_FLUSH_OK 0x00010000 +#define B43_IRQ_CCA_MEASURE_OK 0x00020000 +#define B43_IRQ_NOISESAMPLE_OK 0x00040000 +#define B43_IRQ_UCODE_DEBUG 0x08000000 +#define B43_IRQ_RFKILL 0x10000000 +#define B43_IRQ_TX_OK 0x20000000 +#define B43_IRQ_PHY_G_CHANGED 0x40000000 +#define B43_IRQ_TIMEOUT 0x80000000 + +#define B43_IRQ_ALL 0xFFFFFFFF +#define B43_IRQ_MASKTEMPLATE (B43_IRQ_TBTT_INDI | \ + B43_IRQ_ATIM_END | \ + B43_IRQ_PMQ | \ + B43_IRQ_MAC_TXERR | \ + B43_IRQ_PHY_TXERR | \ + B43_IRQ_DMA | \ + B43_IRQ_TXFIFO_FLUSH_OK | \ + B43_IRQ_NOISESAMPLE_OK | \ + B43_IRQ_UCODE_DEBUG | \ + B43_IRQ_RFKILL | \ + B43_IRQ_TX_OK) + +/* The firmware register to fetch the debug-IRQ reason from. */ +#define B43_DEBUGIRQ_REASON_REG 63 +/* Debug-IRQ reasons. */ +#define B43_DEBUGIRQ_PANIC 0 /* The firmware panic'ed */ +#define B43_DEBUGIRQ_DUMP_SHM 1 /* Dump shared SHM */ +#define B43_DEBUGIRQ_DUMP_REGS 2 /* Dump the microcode registers */ +#define B43_DEBUGIRQ_MARKER 3 /* A "marker" was thrown by the firmware. */ +#define B43_DEBUGIRQ_ACK 0xFFFF /* The host writes that to ACK the IRQ */ + +/* The firmware register that contains the "marker" line. */ +#define B43_MARKER_ID_REG 2 +#define B43_MARKER_LINE_REG 3 + +/* The firmware register to fetch the panic reason from. */ +#define B43_FWPANIC_REASON_REG 3 +/* Firmware panic reason codes */ +#define B43_FWPANIC_DIE 0 /* Firmware died. Don't auto-restart it. */ +#define B43_FWPANIC_RESTART 1 /* Firmware died. Schedule a controller reset. */ + +/* The firmware register that contains the watchdog counter. */ +#define B43_WATCHDOG_REG 1 + +/* Device specific rate values. + * The actual values defined here are (rate_in_mbps * 2). + * Some code depends on this. Don't change it. */ +#define B43_CCK_RATE_1MB 0x02 +#define B43_CCK_RATE_2MB 0x04 +#define B43_CCK_RATE_5MB 0x0B +#define B43_CCK_RATE_11MB 0x16 +#define B43_OFDM_RATE_6MB 0x0C +#define B43_OFDM_RATE_9MB 0x12 +#define B43_OFDM_RATE_12MB 0x18 +#define B43_OFDM_RATE_18MB 0x24 +#define B43_OFDM_RATE_24MB 0x30 +#define B43_OFDM_RATE_36MB 0x48 +#define B43_OFDM_RATE_48MB 0x60 +#define B43_OFDM_RATE_54MB 0x6C +/* Convert a b43 rate value to a rate in 100kbps */ +#define B43_RATE_TO_BASE100KBPS(rate) (((rate) * 10) / 2) + +#define B43_DEFAULT_SHORT_RETRY_LIMIT 7 +#define B43_DEFAULT_LONG_RETRY_LIMIT 4 + +#define B43_PHY_TX_BADNESS_LIMIT 1000 + +/* Max size of a security key */ +#define B43_SEC_KEYSIZE 16 +/* Max number of group keys */ +#define B43_NR_GROUP_KEYS 4 +/* Max number of pairwise keys */ +#define B43_NR_PAIRWISE_KEYS 50 +/* Security algorithms. */ +enum { + B43_SEC_ALGO_NONE = 0, /* unencrypted, as of TX header. */ + B43_SEC_ALGO_WEP40, + B43_SEC_ALGO_TKIP, + B43_SEC_ALGO_AES, + B43_SEC_ALGO_WEP104, + B43_SEC_ALGO_AES_LEGACY, +}; + +struct b43_dmaring; + +/* The firmware file header */ +#define B43_FW_TYPE_UCODE 'u' +#define B43_FW_TYPE_PCM 'p' +#define B43_FW_TYPE_IV 'i' +struct b43_fw_header { + /* File type */ + u8 type; + /* File format version */ + u8 ver; + u8 __padding[2]; + /* Size of the data. For ucode and PCM this is in bytes. + * For IV this is number-of-ivs. */ + __be32 size; +} __attribute__((__packed__)); + +/* Initial Value file format */ +#define B43_IV_OFFSET_MASK 0x7FFF +#define B43_IV_32BIT 0x8000 +struct b43_iv { + __be16 offset_size; + union { + __be16 d16; + __be32 d32; + } data __attribute__((__packed__)); +} __attribute__((__packed__)); + + +/* Data structures for DMA transmission, per 80211 core. */ +struct b43_dma { + struct b43_dmaring *tx_ring_AC_BK; /* Background */ + struct b43_dmaring *tx_ring_AC_BE; /* Best Effort */ + struct b43_dmaring *tx_ring_AC_VI; /* Video */ + struct b43_dmaring *tx_ring_AC_VO; /* Voice */ + struct b43_dmaring *tx_ring_mcast; /* Multicast */ + + struct b43_dmaring *rx_ring; +}; + +struct b43_pio_txqueue; +struct b43_pio_rxqueue; + +/* Data structures for PIO transmission, per 80211 core. */ +struct b43_pio { + struct b43_pio_txqueue *tx_queue_AC_BK; /* Background */ + struct b43_pio_txqueue *tx_queue_AC_BE; /* Best Effort */ + struct b43_pio_txqueue *tx_queue_AC_VI; /* Video */ + struct b43_pio_txqueue *tx_queue_AC_VO; /* Voice */ + struct b43_pio_txqueue *tx_queue_mcast; /* Multicast */ + + struct b43_pio_rxqueue *rx_queue; +}; + +/* Context information for a noise calculation (Link Quality). */ +struct b43_noise_calculation { + bool calculation_running; + u8 nr_samples; + s8 samples[8][4]; +}; + +struct b43_stats { + u8 link_noise; +}; + +struct b43_key { + /* If keyconf is NULL, this key is disabled. + * keyconf is a cookie. Don't derefenrence it outside of the set_key + * path, because b43 doesn't own it. */ + struct ieee80211_key_conf *keyconf; + u8 algorithm; +}; + +/* SHM offsets to the QOS data structures for the 4 different queues. */ +#define B43_QOS_PARAMS(queue) (B43_SHM_SH_EDCFQ + \ + (B43_NR_QOSPARAMS * sizeof(u16) * (queue))) +#define B43_QOS_BACKGROUND B43_QOS_PARAMS(0) +#define B43_QOS_BESTEFFORT B43_QOS_PARAMS(1) +#define B43_QOS_VIDEO B43_QOS_PARAMS(2) +#define B43_QOS_VOICE B43_QOS_PARAMS(3) + +/* QOS parameter hardware data structure offsets. */ +#define B43_NR_QOSPARAMS 16 +enum { + B43_QOSPARAM_TXOP = 0, + B43_QOSPARAM_CWMIN, + B43_QOSPARAM_CWMAX, + B43_QOSPARAM_CWCUR, + B43_QOSPARAM_AIFS, + B43_QOSPARAM_BSLOTS, + B43_QOSPARAM_REGGAP, + B43_QOSPARAM_STATUS, +}; + +/* QOS parameters for a queue. */ +struct b43_qos_params { + /* The QOS parameters */ + struct ieee80211_tx_queue_params p; +}; + +struct b43_wl; + +/* The type of the firmware file. */ +enum b43_firmware_file_type { + B43_FWTYPE_PROPRIETARY, + B43_FWTYPE_OPENSOURCE, + B43_NR_FWTYPES, +}; + +/* Context data for fetching firmware. */ +struct b43_request_fw_context { + /* The device we are requesting the fw for. */ + struct b43_wldev *dev; + /* The type of firmware to request. */ + enum b43_firmware_file_type req_type; + /* Error messages for each firmware type. */ + char errors[B43_NR_FWTYPES][128]; + /* Temporary buffer for storing the firmware name. */ + char fwname[64]; + /* A fatal error occured while requesting. Firmware reqest + * can not continue, as any other reqest will also fail. */ + int fatal_failure; +}; + +/* In-memory representation of a cached microcode file. */ +struct b43_firmware_file { + const char *filename; + const struct firmware *data; + /* Type of the firmware file name. Note that this does only indicate + * the type by the firmware name. NOT the file contents. + * If you want to check for proprietary vs opensource, use (struct b43_firmware)->opensource + * instead! The (struct b43_firmware)->opensource flag is derived from the actual firmware + * binary code, not just the filename. + */ + enum b43_firmware_file_type type; +}; + +/* Pointers to the firmware data and meta information about it. */ +struct b43_firmware { + /* Microcode */ + struct b43_firmware_file ucode; + /* PCM code */ + struct b43_firmware_file pcm; + /* Initial MMIO values for the firmware */ + struct b43_firmware_file initvals; + /* Initial MMIO values for the firmware, band-specific */ + struct b43_firmware_file initvals_band; + + /* Firmware revision */ + u16 rev; + /* Firmware patchlevel */ + u16 patch; + + /* Set to true, if we are using an opensource firmware. + * Use this to check for proprietary vs opensource. */ + bool opensource; + /* Set to true, if the core needs a PCM firmware, but + * we failed to load one. This is always false for + * core rev > 10, as these don't need PCM firmware. */ + bool pcm_request_failed; +}; + +/* Device (802.11 core) initialization status. */ +enum { + B43_STAT_UNINIT = 0, /* Uninitialized. */ + B43_STAT_INITIALIZED = 1, /* Initialized, but not started, yet. */ + B43_STAT_STARTED = 2, /* Up and running. */ +}; +#define b43_status(wldev) atomic_read(&(wldev)->__init_status) +#define b43_set_status(wldev, stat) do { \ + atomic_set(&(wldev)->__init_status, (stat)); \ + smp_wmb(); \ + } while (0) + +/* Data structure for one wireless device (802.11 core) */ +struct b43_wldev { + struct ssb_device *dev; + struct b43_wl *wl; + + /* The device initialization status. + * Use b43_status() to query. */ + atomic_t __init_status; + + bool bad_frames_preempt; /* Use "Bad Frames Preemption" (default off) */ + bool dfq_valid; /* Directed frame queue valid (IBSS PS mode, ATIM) */ + bool radio_hw_enable; /* saved state of radio hardware enabled state */ + bool qos_enabled; /* TRUE, if QoS is used. */ + bool hwcrypto_enabled; /* TRUE, if HW crypto acceleration is enabled. */ + bool use_pio; /* TRUE if next init should use PIO */ + + /* PHY/Radio device. */ + struct b43_phy phy; + + union { + /* DMA engines. */ + struct b43_dma dma; + /* PIO engines. */ + struct b43_pio pio; + }; + /* Use b43_using_pio_transfers() to check whether we are using + * DMA or PIO data transfers. */ + bool __using_pio_transfers; + + /* Various statistics about the physical device. */ + struct b43_stats stats; + + /* Reason code of the last interrupt. */ + u32 irq_reason; + u32 dma_reason[6]; + /* The currently active generic-interrupt mask. */ + u32 irq_mask; + + /* Link Quality calculation context. */ + struct b43_noise_calculation noisecalc; + /* if > 0 MAC is suspended. if == 0 MAC is enabled. */ + int mac_suspended; + + /* Periodic tasks */ + struct delayed_work periodic_work; + unsigned int periodic_state; + + struct work_struct restart_work; + + /* encryption/decryption */ + u16 ktp; /* Key table pointer */ + struct b43_key key[B43_NR_GROUP_KEYS * 2 + B43_NR_PAIRWISE_KEYS]; + + /* Firmware data */ + struct b43_firmware fw; + + /* Devicelist in struct b43_wl (all 802.11 cores) */ + struct list_head list; + + /* Debugging stuff follows. */ +#ifdef CONFIG_B43_DEBUG + struct b43_dfsentry *dfsentry; + unsigned int irq_count; + unsigned int irq_bit_count[32]; + unsigned int tx_count; + unsigned int rx_count; +#endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) + struct compat_threaded_irq irq_compat; +#endif +}; + +/* Data structure for the WLAN parts (802.11 cores) of the b43 chip. */ +struct b43_wl { + /* Pointer to the active wireless device on this chip */ + struct b43_wldev *current_dev; + /* Pointer to the ieee80211 hardware data structure */ + struct ieee80211_hw *hw; + + /* Global driver mutex. Every operation must run with this mutex locked. */ + struct mutex mutex; + /* Hard-IRQ spinlock. This lock protects things used in the hard-IRQ + * handler, only. This basically is just the IRQ mask register. */ + spinlock_t hardirq_lock; + + /* The number of queues that were registered with the mac80211 subsystem + * initially. This is a backup copy of hw->queues in case hw->queues has + * to be dynamically lowered at runtime (Firmware does not support QoS). + * hw->queues has to be restored to the original value before unregistering + * from the mac80211 subsystem. */ + u16 mac80211_initially_registered_queues; + + /* We can only have one operating interface (802.11 core) + * at a time. General information about this interface follows. + */ + + struct ieee80211_vif *vif; + /* The MAC address of the operating interface. */ + u8 mac_addr[ETH_ALEN]; + /* Current BSSID */ + u8 bssid[ETH_ALEN]; + /* Interface type. (NL80211_IFTYPE_XXX) */ + int if_type; + /* Is the card operating in AP, STA or IBSS mode? */ + bool operating; + /* filter flags */ + unsigned int filter_flags; + /* Stats about the wireless interface */ + struct ieee80211_low_level_stats ieee_stats; + +#ifdef CONFIG_B43_HWRNG + struct hwrng rng; + bool rng_initialized; + char rng_name[30 + 1]; +#endif /* CONFIG_B43_HWRNG */ + + /* List of all wireless devices on this chip */ + struct list_head devlist; + u8 nr_devs; + + bool radiotap_enabled; + bool radio_enabled; + + /* The beacon we are currently using (AP or IBSS mode). */ + struct sk_buff *current_beacon; + bool beacon0_uploaded; + bool beacon1_uploaded; + bool beacon_templates_virgin; /* Never wrote the templates? */ + struct work_struct beacon_update_trigger; + + /* The current QOS parameters for the 4 queues. */ + struct b43_qos_params qos_params[4]; + + /* Work for adjustment of the transmission power. + * This is scheduled when we determine that the actual TX output + * power doesn't match what we want. */ + struct work_struct txpower_adjust_work; + + /* Packet transmit work */ + struct work_struct tx_work; + /* Queue of packets to be transmitted. */ + struct sk_buff_head tx_queue; + + /* The device LEDs. */ + struct b43_leds leds; + + /* Kmalloc'ed scratch space for PIO TX/RX. Protected by wl->mutex. */ + u8 pio_scratchspace[110] __attribute__((__aligned__(8))); + u8 pio_tailspace[4] __attribute__((__aligned__(8))); +}; + +static inline struct b43_wl *hw_to_b43_wl(struct ieee80211_hw *hw) +{ + return hw->priv; +} + +static inline struct b43_wldev *dev_to_b43_wldev(struct device *dev) +{ + struct ssb_device *ssb_dev = dev_to_ssb_dev(dev); + return ssb_get_drvdata(ssb_dev); +} + +/* Is the device operating in a specified mode (NL80211_IFTYPE_XXX). */ +static inline int b43_is_mode(struct b43_wl *wl, int type) +{ + return (wl->operating && wl->if_type == type); +} + +/** + * b43_current_band - Returns the currently used band. + * Returns one of IEEE80211_BAND_2GHZ and IEEE80211_BAND_5GHZ. + */ +static inline enum ieee80211_band b43_current_band(struct b43_wl *wl) +{ + return wl->hw->conf.channel->band; +} + +static inline u16 b43_read16(struct b43_wldev *dev, u16 offset) +{ + return ssb_read16(dev->dev, offset); +} + +static inline void b43_write16(struct b43_wldev *dev, u16 offset, u16 value) +{ + ssb_write16(dev->dev, offset, value); +} + +static inline u32 b43_read32(struct b43_wldev *dev, u16 offset) +{ + return ssb_read32(dev->dev, offset); +} + +static inline void b43_write32(struct b43_wldev *dev, u16 offset, u32 value) +{ + ssb_write32(dev->dev, offset, value); +} + +static inline bool b43_using_pio_transfers(struct b43_wldev *dev) +{ + return dev->__using_pio_transfers; +} + +#ifdef CONFIG_B43_FORCE_PIO +# define B43_PIO_DEFAULT 1 +#else +# define B43_PIO_DEFAULT 0 +#endif + +/* Message printing */ +void b43info(struct b43_wl *wl, const char *fmt, ...) + __attribute__ ((format(printf, 2, 3))); +void b43err(struct b43_wl *wl, const char *fmt, ...) + __attribute__ ((format(printf, 2, 3))); +void b43warn(struct b43_wl *wl, const char *fmt, ...) + __attribute__ ((format(printf, 2, 3))); +void b43dbg(struct b43_wl *wl, const char *fmt, ...) + __attribute__ ((format(printf, 2, 3))); + + +/* A WARN_ON variant that vanishes when b43 debugging is disabled. + * This _also_ evaluates the arg with debugging disabled. */ +#if B43_DEBUG +# define B43_WARN_ON(x) WARN_ON(x) +#else +static inline bool __b43_warn_on_dummy(bool x) { return x; } +# define B43_WARN_ON(x) __b43_warn_on_dummy(unlikely(!!(x))) +#endif + +/* Convert an integer to a Q5.2 value */ +#define INT_TO_Q52(i) ((i) << 2) +/* Convert a Q5.2 value to an integer (precision loss!) */ +#define Q52_TO_INT(q52) ((q52) >> 2) +/* Macros for printing a value in Q5.2 format */ +#define Q52_FMT "%u.%u" +#define Q52_ARG(q52) Q52_TO_INT(q52), ((((q52) & 0x3) * 100) / 4) + +#endif /* B43_H_ */ diff --git a/drivers/net/wireless/b43/b43.h.orig b/drivers/net/wireless/b43/b43.h.orig new file mode 100644 index 0000000..b8807fb --- /dev/null +++ b/drivers/net/wireless/b43/b43.h.orig @@ -0,0 +1,924 @@ +#ifndef B43_H_ +#define B43_H_ + +#include +#include +#include +#include +#include +#include + +#include "debugfs.h" +#include "leds.h" +#include "rfkill.h" +#include "lo.h" +#include "phy_common.h" + + +/* The unique identifier of the firmware that's officially supported by + * this driver version. */ +#define B43_SUPPORTED_FIRMWARE_ID "FW13" + + +#ifdef CONFIG_B43_DEBUG +# define B43_DEBUG 1 +#else +# define B43_DEBUG 0 +#endif + +/* MMIO offsets */ +#define B43_MMIO_DMA0_REASON 0x20 +#define B43_MMIO_DMA0_IRQ_MASK 0x24 +#define B43_MMIO_DMA1_REASON 0x28 +#define B43_MMIO_DMA1_IRQ_MASK 0x2C +#define B43_MMIO_DMA2_REASON 0x30 +#define B43_MMIO_DMA2_IRQ_MASK 0x34 +#define B43_MMIO_DMA3_REASON 0x38 +#define B43_MMIO_DMA3_IRQ_MASK 0x3C +#define B43_MMIO_DMA4_REASON 0x40 +#define B43_MMIO_DMA4_IRQ_MASK 0x44 +#define B43_MMIO_DMA5_REASON 0x48 +#define B43_MMIO_DMA5_IRQ_MASK 0x4C +#define B43_MMIO_MACCTL 0x120 /* MAC control */ +#define B43_MMIO_MACCMD 0x124 /* MAC command */ +#define B43_MMIO_GEN_IRQ_REASON 0x128 +#define B43_MMIO_GEN_IRQ_MASK 0x12C +#define B43_MMIO_RAM_CONTROL 0x130 +#define B43_MMIO_RAM_DATA 0x134 +#define B43_MMIO_PS_STATUS 0x140 +#define B43_MMIO_RADIO_HWENABLED_HI 0x158 +#define B43_MMIO_SHM_CONTROL 0x160 +#define B43_MMIO_SHM_DATA 0x164 +#define B43_MMIO_SHM_DATA_UNALIGNED 0x166 +#define B43_MMIO_XMITSTAT_0 0x170 +#define B43_MMIO_XMITSTAT_1 0x174 +#define B43_MMIO_REV3PLUS_TSF_LOW 0x180 /* core rev >= 3 only */ +#define B43_MMIO_REV3PLUS_TSF_HIGH 0x184 /* core rev >= 3 only */ +#define B43_MMIO_TSF_CFP_REP 0x188 +#define B43_MMIO_TSF_CFP_START 0x18C +#define B43_MMIO_TSF_CFP_MAXDUR 0x190 + +/* 32-bit DMA */ +#define B43_MMIO_DMA32_BASE0 0x200 +#define B43_MMIO_DMA32_BASE1 0x220 +#define B43_MMIO_DMA32_BASE2 0x240 +#define B43_MMIO_DMA32_BASE3 0x260 +#define B43_MMIO_DMA32_BASE4 0x280 +#define B43_MMIO_DMA32_BASE5 0x2A0 +/* 64-bit DMA */ +#define B43_MMIO_DMA64_BASE0 0x200 +#define B43_MMIO_DMA64_BASE1 0x240 +#define B43_MMIO_DMA64_BASE2 0x280 +#define B43_MMIO_DMA64_BASE3 0x2C0 +#define B43_MMIO_DMA64_BASE4 0x300 +#define B43_MMIO_DMA64_BASE5 0x340 + +/* PIO on core rev < 11 */ +#define B43_MMIO_PIO_BASE0 0x300 +#define B43_MMIO_PIO_BASE1 0x310 +#define B43_MMIO_PIO_BASE2 0x320 +#define B43_MMIO_PIO_BASE3 0x330 +#define B43_MMIO_PIO_BASE4 0x340 +#define B43_MMIO_PIO_BASE5 0x350 +#define B43_MMIO_PIO_BASE6 0x360 +#define B43_MMIO_PIO_BASE7 0x370 +/* PIO on core rev >= 11 */ +#define B43_MMIO_PIO11_BASE0 0x200 +#define B43_MMIO_PIO11_BASE1 0x240 +#define B43_MMIO_PIO11_BASE2 0x280 +#define B43_MMIO_PIO11_BASE3 0x2C0 +#define B43_MMIO_PIO11_BASE4 0x300 +#define B43_MMIO_PIO11_BASE5 0x340 + +#define B43_MMIO_PHY_VER 0x3E0 +#define B43_MMIO_PHY_RADIO 0x3E2 +#define B43_MMIO_PHY0 0x3E6 +#define B43_MMIO_ANTENNA 0x3E8 +#define B43_MMIO_CHANNEL 0x3F0 +#define B43_MMIO_CHANNEL_EXT 0x3F4 +#define B43_MMIO_RADIO_CONTROL 0x3F6 +#define B43_MMIO_RADIO_DATA_HIGH 0x3F8 +#define B43_MMIO_RADIO_DATA_LOW 0x3FA +#define B43_MMIO_PHY_CONTROL 0x3FC +#define B43_MMIO_PHY_DATA 0x3FE +#define B43_MMIO_MACFILTER_CONTROL 0x420 +#define B43_MMIO_MACFILTER_DATA 0x422 +#define B43_MMIO_RCMTA_COUNT 0x43C +#define B43_MMIO_RADIO_HWENABLED_LO 0x49A +#define B43_MMIO_GPIO_CONTROL 0x49C +#define B43_MMIO_GPIO_MASK 0x49E +#define B43_MMIO_TSF_CFP_START_LOW 0x604 +#define B43_MMIO_TSF_CFP_START_HIGH 0x606 +#define B43_MMIO_TSF_CFP_PRETBTT 0x612 +#define B43_MMIO_TSF_0 0x632 /* core rev < 3 only */ +#define B43_MMIO_TSF_1 0x634 /* core rev < 3 only */ +#define B43_MMIO_TSF_2 0x636 /* core rev < 3 only */ +#define B43_MMIO_TSF_3 0x638 /* core rev < 3 only */ +#define B43_MMIO_RNG 0x65A +#define B43_MMIO_IFSSLOT 0x684 /* Interframe slot time */ +#define B43_MMIO_IFSCTL 0x688 /* Interframe space control */ +#define B43_MMIO_IFSCTL_USE_EDCF 0x0004 +#define B43_MMIO_POWERUP_DELAY 0x6A8 +#define B43_MMIO_BTCOEX_CTL 0x6B4 /* Bluetooth Coexistence Control */ +#define B43_MMIO_BTCOEX_STAT 0x6B6 /* Bluetooth Coexistence Status */ +#define B43_MMIO_BTCOEX_TXCTL 0x6B8 /* Bluetooth Coexistence Transmit Control */ + +/* SPROM boardflags_lo values */ +#define B43_BFL_BTCOEXIST 0x0001 /* implements Bluetooth coexistance */ +#define B43_BFL_PACTRL 0x0002 /* GPIO 9 controlling the PA */ +#define B43_BFL_AIRLINEMODE 0x0004 /* implements GPIO 13 radio disable indication */ +#define B43_BFL_RSSI 0x0008 /* software calculates nrssi slope. */ +#define B43_BFL_ENETSPI 0x0010 /* has ephy roboswitch spi */ +#define B43_BFL_XTAL_NOSLOW 0x0020 /* no slow clock available */ +#define B43_BFL_CCKHIPWR 0x0040 /* can do high power CCK transmission */ +#define B43_BFL_ENETADM 0x0080 /* has ADMtek switch */ +#define B43_BFL_ENETVLAN 0x0100 /* can do vlan */ +#define B43_BFL_AFTERBURNER 0x0200 /* supports Afterburner mode */ +#define B43_BFL_NOPCI 0x0400 /* leaves PCI floating */ +#define B43_BFL_FEM 0x0800 /* supports the Front End Module */ +#define B43_BFL_EXTLNA 0x1000 /* has an external LNA */ +#define B43_BFL_HGPA 0x2000 /* had high gain PA */ +#define B43_BFL_BTCMOD 0x4000 /* BFL_BTCOEXIST is given in alternate GPIOs */ +#define B43_BFL_ALTIQ 0x8000 /* alternate I/Q settings */ + +/* SPROM boardflags_hi values */ +#define B43_BFH_NOPA 0x0001 /* has no PA */ +#define B43_BFH_RSSIINV 0x0002 /* RSSI uses positive slope (not TSSI) */ +#define B43_BFH_PAREF 0x0004 /* uses the PARef LDO */ +#define B43_BFH_3TSWITCH 0x0008 /* uses a triple throw switch shared + * with bluetooth */ +#define B43_BFH_PHASESHIFT 0x0010 /* can support phase shifter */ +#define B43_BFH_BUCKBOOST 0x0020 /* has buck/booster */ +#define B43_BFH_FEM_BT 0x0040 /* has FEM and switch to share antenna + * with bluetooth */ + +/* GPIO register offset, in both ChipCommon and PCI core. */ +#define B43_GPIO_CONTROL 0x6c + +/* SHM Routing */ +enum { + B43_SHM_UCODE, /* Microcode memory */ + B43_SHM_SHARED, /* Shared memory */ + B43_SHM_SCRATCH, /* Scratch memory */ + B43_SHM_HW, /* Internal hardware register */ + B43_SHM_RCMTA, /* Receive match transmitter address (rev >= 5 only) */ +}; +/* SHM Routing modifiers */ +#define B43_SHM_AUTOINC_R 0x0200 /* Auto-increment address on read */ +#define B43_SHM_AUTOINC_W 0x0100 /* Auto-increment address on write */ +#define B43_SHM_AUTOINC_RW (B43_SHM_AUTOINC_R | \ + B43_SHM_AUTOINC_W) + +/* Misc SHM_SHARED offsets */ +#define B43_SHM_SH_WLCOREREV 0x0016 /* 802.11 core revision */ +#define B43_SHM_SH_PCTLWDPOS 0x0008 +#define B43_SHM_SH_RXPADOFF 0x0034 /* RX Padding data offset (PIO only) */ +#define B43_SHM_SH_FWCAPA 0x0042 /* Firmware capabilities (Opensource firmware only) */ +#define B43_SHM_SH_PHYVER 0x0050 /* PHY version */ +#define B43_SHM_SH_PHYTYPE 0x0052 /* PHY type */ +#define B43_SHM_SH_ANTSWAP 0x005C /* Antenna swap threshold */ +#define B43_SHM_SH_HOSTFLO 0x005E /* Hostflags for ucode options (low) */ +#define B43_SHM_SH_HOSTFMI 0x0060 /* Hostflags for ucode options (middle) */ +#define B43_SHM_SH_HOSTFHI 0x0062 /* Hostflags for ucode options (high) */ +#define B43_SHM_SH_RFATT 0x0064 /* Current radio attenuation value */ +#define B43_SHM_SH_RADAR 0x0066 /* Radar register */ +#define B43_SHM_SH_PHYTXNOI 0x006E /* PHY noise directly after TX (lower 8bit only) */ +#define B43_SHM_SH_RFRXSP1 0x0072 /* RF RX SP Register 1 */ +#define B43_SHM_SH_CHAN 0x00A0 /* Current channel (low 8bit only) */ +#define B43_SHM_SH_CHAN_5GHZ 0x0100 /* Bit set, if 5Ghz channel */ +#define B43_SHM_SH_BCMCFIFOID 0x0108 /* Last posted cookie to the bcast/mcast FIFO */ +/* TSSI information */ +#define B43_SHM_SH_TSSI_CCK 0x0058 /* TSSI for last 4 CCK frames (32bit) */ +#define B43_SHM_SH_TSSI_OFDM_A 0x0068 /* TSSI for last 4 OFDM frames (32bit) */ +#define B43_SHM_SH_TSSI_OFDM_G 0x0070 /* TSSI for last 4 OFDM frames (32bit) */ +#define B43_TSSI_MAX 0x7F /* Max value for one TSSI value */ +/* SHM_SHARED TX FIFO variables */ +#define B43_SHM_SH_SIZE01 0x0098 /* TX FIFO size for FIFO 0 (low) and 1 (high) */ +#define B43_SHM_SH_SIZE23 0x009A /* TX FIFO size for FIFO 2 and 3 */ +#define B43_SHM_SH_SIZE45 0x009C /* TX FIFO size for FIFO 4 and 5 */ +#define B43_SHM_SH_SIZE67 0x009E /* TX FIFO size for FIFO 6 and 7 */ +/* SHM_SHARED background noise */ +#define B43_SHM_SH_JSSI0 0x0088 /* Measure JSSI 0 */ +#define B43_SHM_SH_JSSI1 0x008A /* Measure JSSI 1 */ +#define B43_SHM_SH_JSSIAUX 0x008C /* Measure JSSI AUX */ +/* SHM_SHARED crypto engine */ +#define B43_SHM_SH_DEFAULTIV 0x003C /* Default IV location */ +#define B43_SHM_SH_NRRXTRANS 0x003E /* # of soft RX transmitter addresses (max 8) */ +#define B43_SHM_SH_KTP 0x0056 /* Key table pointer */ +#define B43_SHM_SH_TKIPTSCTTAK 0x0318 +#define B43_SHM_SH_KEYIDXBLOCK 0x05D4 /* Key index/algorithm block (v4 firmware) */ +#define B43_SHM_SH_PSM 0x05F4 /* PSM transmitter address match block (rev < 5) */ +/* SHM_SHARED WME variables */ +#define B43_SHM_SH_EDCFSTAT 0x000E /* EDCF status */ +#define B43_SHM_SH_TXFCUR 0x0030 /* TXF current index */ +#define B43_SHM_SH_EDCFQ 0x0240 /* EDCF Q info */ +/* SHM_SHARED powersave mode related */ +#define B43_SHM_SH_SLOTT 0x0010 /* Slot time */ +#define B43_SHM_SH_DTIMPER 0x0012 /* DTIM period */ +#define B43_SHM_SH_NOSLPZNATDTIM 0x004C /* NOSLPZNAT DTIM */ +/* SHM_SHARED beacon/AP variables */ +#define B43_SHM_SH_BTL0 0x0018 /* Beacon template length 0 */ +#define B43_SHM_SH_BTL1 0x001A /* Beacon template length 1 */ +#define B43_SHM_SH_BTSFOFF 0x001C /* Beacon TSF offset */ +#define B43_SHM_SH_TIMBPOS 0x001E /* TIM B position in beacon */ +#define B43_SHM_SH_DTIMP 0x0012 /* DTIP period */ +#define B43_SHM_SH_MCASTCOOKIE 0x00A8 /* Last bcast/mcast frame ID */ +#define B43_SHM_SH_SFFBLIM 0x0044 /* Short frame fallback retry limit */ +#define B43_SHM_SH_LFFBLIM 0x0046 /* Long frame fallback retry limit */ +#define B43_SHM_SH_BEACPHYCTL 0x0054 /* Beacon PHY TX control word (see PHY TX control) */ +#define B43_SHM_SH_EXTNPHYCTL 0x00B0 /* Extended bytes for beacon PHY control (N) */ +/* SHM_SHARED ACK/CTS control */ +#define B43_SHM_SH_ACKCTSPHYCTL 0x0022 /* ACK/CTS PHY control word (see PHY TX control) */ +/* SHM_SHARED probe response variables */ +#define B43_SHM_SH_PRSSID 0x0160 /* Probe Response SSID */ +#define B43_SHM_SH_PRSSIDLEN 0x0048 /* Probe Response SSID length */ +#define B43_SHM_SH_PRTLEN 0x004A /* Probe Response template length */ +#define B43_SHM_SH_PRMAXTIME 0x0074 /* Probe Response max time */ +#define B43_SHM_SH_PRPHYCTL 0x0188 /* Probe Response PHY TX control word */ +/* SHM_SHARED rate tables */ +#define B43_SHM_SH_OFDMDIRECT 0x01C0 /* Pointer to OFDM direct map */ +#define B43_SHM_SH_OFDMBASIC 0x01E0 /* Pointer to OFDM basic rate map */ +#define B43_SHM_SH_CCKDIRECT 0x0200 /* Pointer to CCK direct map */ +#define B43_SHM_SH_CCKBASIC 0x0220 /* Pointer to CCK basic rate map */ +/* SHM_SHARED microcode soft registers */ +#define B43_SHM_SH_UCODEREV 0x0000 /* Microcode revision */ +#define B43_SHM_SH_UCODEPATCH 0x0002 /* Microcode patchlevel */ +#define B43_SHM_SH_UCODEDATE 0x0004 /* Microcode date */ +#define B43_SHM_SH_UCODETIME 0x0006 /* Microcode time */ +#define B43_SHM_SH_UCODESTAT 0x0040 /* Microcode debug status code */ +#define B43_SHM_SH_UCODESTAT_INVALID 0 +#define B43_SHM_SH_UCODESTAT_INIT 1 +#define B43_SHM_SH_UCODESTAT_ACTIVE 2 +#define B43_SHM_SH_UCODESTAT_SUSP 3 /* suspended */ +#define B43_SHM_SH_UCODESTAT_SLEEP 4 /* asleep (PS) */ +#define B43_SHM_SH_MAXBFRAMES 0x0080 /* Maximum number of frames in a burst */ +#define B43_SHM_SH_SPUWKUP 0x0094 /* pre-wakeup for synth PU in us */ +#define B43_SHM_SH_PRETBTT 0x0096 /* pre-TBTT in us */ +/* SHM_SHARED tx iq workarounds */ +#define B43_SHM_SH_NPHY_TXIQW0 0x0700 +#define B43_SHM_SH_NPHY_TXIQW1 0x0702 +#define B43_SHM_SH_NPHY_TXIQW2 0x0704 +#define B43_SHM_SH_NPHY_TXIQW3 0x0706 +/* SHM_SHARED tx pwr ctrl */ +#define B43_SHM_SH_NPHY_TXPWR_INDX0 0x0708 +#define B43_SHM_SH_NPHY_TXPWR_INDX1 0x070E + +/* SHM_SCRATCH offsets */ +#define B43_SHM_SC_MINCONT 0x0003 /* Minimum contention window */ +#define B43_SHM_SC_MAXCONT 0x0004 /* Maximum contention window */ +#define B43_SHM_SC_CURCONT 0x0005 /* Current contention window */ +#define B43_SHM_SC_SRLIMIT 0x0006 /* Short retry count limit */ +#define B43_SHM_SC_LRLIMIT 0x0007 /* Long retry count limit */ +#define B43_SHM_SC_DTIMC 0x0008 /* Current DTIM count */ +#define B43_SHM_SC_BTL0LEN 0x0015 /* Beacon 0 template length */ +#define B43_SHM_SC_BTL1LEN 0x0016 /* Beacon 1 template length */ +#define B43_SHM_SC_SCFB 0x0017 /* Short frame transmit count threshold for rate fallback */ +#define B43_SHM_SC_LCFB 0x0018 /* Long frame transmit count threshold for rate fallback */ + +/* Hardware Radio Enable masks */ +#define B43_MMIO_RADIO_HWENABLED_HI_MASK (1 << 16) +#define B43_MMIO_RADIO_HWENABLED_LO_MASK (1 << 4) + +/* HostFlags. See b43_hf_read/write() */ +#define B43_HF_ANTDIVHELP 0x000000000001ULL /* ucode antenna div helper */ +#define B43_HF_SYMW 0x000000000002ULL /* G-PHY SYM workaround */ +#define B43_HF_RXPULLW 0x000000000004ULL /* RX pullup workaround */ +#define B43_HF_CCKBOOST 0x000000000008ULL /* 4dB CCK power boost (exclusive with OFDM boost) */ +#define B43_HF_BTCOEX 0x000000000010ULL /* Bluetooth coexistance */ +#define B43_HF_GDCW 0x000000000020ULL /* G-PHY DC canceller filter bw workaround */ +#define B43_HF_OFDMPABOOST 0x000000000040ULL /* Enable PA gain boost for OFDM */ +#define B43_HF_ACPR 0x000000000080ULL /* Disable for Japan, channel 14 */ +#define B43_HF_EDCF 0x000000000100ULL /* on if WME and MAC suspended */ +#define B43_HF_TSSIRPSMW 0x000000000200ULL /* TSSI reset PSM ucode workaround */ +#define B43_HF_20IN40IQW 0x000000000200ULL /* 20 in 40 MHz I/Q workaround (rev >= 13 only) */ +#define B43_HF_DSCRQ 0x000000000400ULL /* Disable slow clock request in ucode */ +#define B43_HF_ACIW 0x000000000800ULL /* ACI workaround: shift bits by 2 on PHY CRS */ +#define B43_HF_2060W 0x000000001000ULL /* 2060 radio workaround */ +#define B43_HF_RADARW 0x000000002000ULL /* Radar workaround */ +#define B43_HF_USEDEFKEYS 0x000000004000ULL /* Enable use of default keys */ +#define B43_HF_AFTERBURNER 0x000000008000ULL /* Afterburner enabled */ +#define B43_HF_BT4PRIOCOEX 0x000000010000ULL /* Bluetooth 4-priority coexistance */ +#define B43_HF_FWKUP 0x000000020000ULL /* Fast wake-up ucode */ +#define B43_HF_VCORECALC 0x000000040000ULL /* Force VCO recalculation when powering up synthpu */ +#define B43_HF_PCISCW 0x000000080000ULL /* PCI slow clock workaround */ +#define B43_HF_4318TSSI 0x000000200000ULL /* 4318 TSSI */ +#define B43_HF_FBCMCFIFO 0x000000400000ULL /* Flush bcast/mcast FIFO immediately */ +#define B43_HF_HWPCTL 0x000000800000ULL /* Enable hardwarre power control */ +#define B43_HF_BTCOEXALT 0x000001000000ULL /* Bluetooth coexistance in alternate pins */ +#define B43_HF_TXBTCHECK 0x000002000000ULL /* Bluetooth check during transmission */ +#define B43_HF_SKCFPUP 0x000004000000ULL /* Skip CFP update */ +#define B43_HF_N40W 0x000008000000ULL /* N PHY 40 MHz workaround (rev >= 13 only) */ +#define B43_HF_ANTSEL 0x000020000000ULL /* Antenna selection (for testing antenna div.) */ +#define B43_HF_BT3COEXT 0x000020000000ULL /* Bluetooth 3-wire coexistence (rev >= 13 only) */ +#define B43_HF_BTCANT 0x000040000000ULL /* Bluetooth coexistence (antenna mode) (rev >= 13 only) */ +#define B43_HF_ANTSELEN 0x000100000000ULL /* Antenna selection enabled (rev >= 13 only) */ +#define B43_HF_ANTSELMODE 0x000200000000ULL /* Antenna selection mode (rev >= 13 only) */ +#define B43_HF_MLADVW 0x001000000000ULL /* N PHY ML ADV workaround (rev >= 13 only) */ +#define B43_HF_PR45960W 0x080000000000ULL /* PR 45960 workaround (rev >= 13 only) */ + +/* Firmware capabilities field in SHM (Opensource firmware only) */ +#define B43_FWCAPA_HWCRYPTO 0x0001 +#define B43_FWCAPA_QOS 0x0002 + +/* MacFilter offsets. */ +#define B43_MACFILTER_SELF 0x0000 +#define B43_MACFILTER_BSSID 0x0003 + +/* PowerControl */ +#define B43_PCTL_IN 0xB0 +#define B43_PCTL_OUT 0xB4 +#define B43_PCTL_OUTENABLE 0xB8 +#define B43_PCTL_XTAL_POWERUP 0x40 +#define B43_PCTL_PLL_POWERDOWN 0x80 + +/* PowerControl Clock Modes */ +#define B43_PCTL_CLK_FAST 0x00 +#define B43_PCTL_CLK_SLOW 0x01 +#define B43_PCTL_CLK_DYNAMIC 0x02 + +#define B43_PCTL_FORCE_SLOW 0x0800 +#define B43_PCTL_FORCE_PLL 0x1000 +#define B43_PCTL_DYN_XTAL 0x2000 + +/* PHYVersioning */ +#define B43_PHYTYPE_A 0x00 +#define B43_PHYTYPE_B 0x01 +#define B43_PHYTYPE_G 0x02 +#define B43_PHYTYPE_N 0x04 +#define B43_PHYTYPE_LP 0x05 + +/* PHYRegisters */ +#define B43_PHY_ILT_A_CTRL 0x0072 +#define B43_PHY_ILT_A_DATA1 0x0073 +#define B43_PHY_ILT_A_DATA2 0x0074 +#define B43_PHY_G_LO_CONTROL 0x0810 +#define B43_PHY_ILT_G_CTRL 0x0472 +#define B43_PHY_ILT_G_DATA1 0x0473 +#define B43_PHY_ILT_G_DATA2 0x0474 +#define B43_PHY_A_PCTL 0x007B +#define B43_PHY_G_PCTL 0x0029 +#define B43_PHY_A_CRS 0x0029 +#define B43_PHY_RADIO_BITFIELD 0x0401 +#define B43_PHY_G_CRS 0x0429 +#define B43_PHY_NRSSILT_CTRL 0x0803 +#define B43_PHY_NRSSILT_DATA 0x0804 + +/* RadioRegisters */ +#define B43_RADIOCTL_ID 0x01 + +/* MAC Control bitfield */ +#define B43_MACCTL_ENABLED 0x00000001 /* MAC Enabled */ +#define B43_MACCTL_PSM_RUN 0x00000002 /* Run Microcode */ +#define B43_MACCTL_PSM_JMP0 0x00000004 /* Microcode jump to 0 */ +#define B43_MACCTL_SHM_ENABLED 0x00000100 /* SHM Enabled */ +#define B43_MACCTL_SHM_UPPER 0x00000200 /* SHM Upper */ +#define B43_MACCTL_IHR_ENABLED 0x00000400 /* IHR Region Enabled */ +#define B43_MACCTL_PSM_DBG 0x00002000 /* Microcode debugging enabled */ +#define B43_MACCTL_GPOUTSMSK 0x0000C000 /* GPOUT Select Mask */ +#define B43_MACCTL_BE 0x00010000 /* Big Endian mode */ +#define B43_MACCTL_INFRA 0x00020000 /* Infrastructure mode */ +#define B43_MACCTL_AP 0x00040000 /* AccessPoint mode */ +#define B43_MACCTL_RADIOLOCK 0x00080000 /* Radio lock */ +#define B43_MACCTL_BEACPROMISC 0x00100000 /* Beacon Promiscuous */ +#define B43_MACCTL_KEEP_BADPLCP 0x00200000 /* Keep frames with bad PLCP */ +#define B43_MACCTL_KEEP_CTL 0x00400000 /* Keep control frames */ +#define B43_MACCTL_KEEP_BAD 0x00800000 /* Keep bad frames (FCS) */ +#define B43_MACCTL_PROMISC 0x01000000 /* Promiscuous mode */ +#define B43_MACCTL_HWPS 0x02000000 /* Hardware Power Saving */ +#define B43_MACCTL_AWAKE 0x04000000 /* Device is awake */ +#define B43_MACCTL_CLOSEDNET 0x08000000 /* Closed net (no SSID bcast) */ +#define B43_MACCTL_TBTTHOLD 0x10000000 /* TBTT Hold */ +#define B43_MACCTL_DISCTXSTAT 0x20000000 /* Discard TX status */ +#define B43_MACCTL_DISCPMQ 0x40000000 /* Discard Power Management Queue */ +#define B43_MACCTL_GMODE 0x80000000 /* G Mode */ + +/* MAC Command bitfield */ +#define B43_MACCMD_BEACON0_VALID 0x00000001 /* Beacon 0 in template RAM is busy/valid */ +#define B43_MACCMD_BEACON1_VALID 0x00000002 /* Beacon 1 in template RAM is busy/valid */ +#define B43_MACCMD_DFQ_VALID 0x00000004 /* Directed frame queue valid (IBSS PS mode, ATIM) */ +#define B43_MACCMD_CCA 0x00000008 /* Clear channel assessment */ +#define B43_MACCMD_BGNOISE 0x00000010 /* Background noise */ + +/* 802.11 core specific TM State Low (SSB_TMSLOW) flags */ +#define B43_TMSLOW_GMODE 0x20000000 /* G Mode Enable */ +#define B43_TMSLOW_PHYCLKSPEED 0x00C00000 /* PHY clock speed mask (N-PHY only) */ +#define B43_TMSLOW_PHYCLKSPEED_40MHZ 0x00000000 /* 40 MHz PHY */ +#define B43_TMSLOW_PHYCLKSPEED_80MHZ 0x00400000 /* 80 MHz PHY */ +#define B43_TMSLOW_PHYCLKSPEED_160MHZ 0x00800000 /* 160 MHz PHY */ +#define B43_TMSLOW_PLLREFSEL 0x00200000 /* PLL Frequency Reference Select (rev >= 5) */ +#define B43_TMSLOW_MACPHYCLKEN 0x00100000 /* MAC PHY Clock Control Enable (rev >= 5) */ +#define B43_TMSLOW_PHYRESET 0x00080000 /* PHY Reset */ +#define B43_TMSLOW_PHYCLKEN 0x00040000 /* PHY Clock Enable */ + +/* 802.11 core specific TM State High (SSB_TMSHIGH) flags */ +#define B43_TMSHIGH_DUALBAND_PHY 0x00080000 /* Dualband PHY available */ +#define B43_TMSHIGH_FCLOCK 0x00040000 /* Fast Clock Available (rev >= 5) */ +#define B43_TMSHIGH_HAVE_5GHZ_PHY 0x00020000 /* 5 GHz PHY available (rev >= 5) */ +#define B43_TMSHIGH_HAVE_2GHZ_PHY 0x00010000 /* 2.4 GHz PHY available (rev >= 5) */ + +/* Generic-Interrupt reasons. */ +#define B43_IRQ_MAC_SUSPENDED 0x00000001 +#define B43_IRQ_BEACON 0x00000002 +#define B43_IRQ_TBTT_INDI 0x00000004 +#define B43_IRQ_BEACON_TX_OK 0x00000008 +#define B43_IRQ_BEACON_CANCEL 0x00000010 +#define B43_IRQ_ATIM_END 0x00000020 +#define B43_IRQ_PMQ 0x00000040 +#define B43_IRQ_PIO_WORKAROUND 0x00000100 +#define B43_IRQ_MAC_TXERR 0x00000200 +#define B43_IRQ_PHY_TXERR 0x00000800 +#define B43_IRQ_PMEVENT 0x00001000 +#define B43_IRQ_TIMER0 0x00002000 +#define B43_IRQ_TIMER1 0x00004000 +#define B43_IRQ_DMA 0x00008000 +#define B43_IRQ_TXFIFO_FLUSH_OK 0x00010000 +#define B43_IRQ_CCA_MEASURE_OK 0x00020000 +#define B43_IRQ_NOISESAMPLE_OK 0x00040000 +#define B43_IRQ_UCODE_DEBUG 0x08000000 +#define B43_IRQ_RFKILL 0x10000000 +#define B43_IRQ_TX_OK 0x20000000 +#define B43_IRQ_PHY_G_CHANGED 0x40000000 +#define B43_IRQ_TIMEOUT 0x80000000 + +#define B43_IRQ_ALL 0xFFFFFFFF +#define B43_IRQ_MASKTEMPLATE (B43_IRQ_TBTT_INDI | \ + B43_IRQ_ATIM_END | \ + B43_IRQ_PMQ | \ + B43_IRQ_MAC_TXERR | \ + B43_IRQ_PHY_TXERR | \ + B43_IRQ_DMA | \ + B43_IRQ_TXFIFO_FLUSH_OK | \ + B43_IRQ_NOISESAMPLE_OK | \ + B43_IRQ_UCODE_DEBUG | \ + B43_IRQ_RFKILL | \ + B43_IRQ_TX_OK) + +/* The firmware register to fetch the debug-IRQ reason from. */ +#define B43_DEBUGIRQ_REASON_REG 63 +/* Debug-IRQ reasons. */ +#define B43_DEBUGIRQ_PANIC 0 /* The firmware panic'ed */ +#define B43_DEBUGIRQ_DUMP_SHM 1 /* Dump shared SHM */ +#define B43_DEBUGIRQ_DUMP_REGS 2 /* Dump the microcode registers */ +#define B43_DEBUGIRQ_MARKER 3 /* A "marker" was thrown by the firmware. */ +#define B43_DEBUGIRQ_ACK 0xFFFF /* The host writes that to ACK the IRQ */ + +/* The firmware register that contains the "marker" line. */ +#define B43_MARKER_ID_REG 2 +#define B43_MARKER_LINE_REG 3 + +/* The firmware register to fetch the panic reason from. */ +#define B43_FWPANIC_REASON_REG 3 +/* Firmware panic reason codes */ +#define B43_FWPANIC_DIE 0 /* Firmware died. Don't auto-restart it. */ +#define B43_FWPANIC_RESTART 1 /* Firmware died. Schedule a controller reset. */ + +/* The firmware register that contains the watchdog counter. */ +#define B43_WATCHDOG_REG 1 + +/* Device specific rate values. + * The actual values defined here are (rate_in_mbps * 2). + * Some code depends on this. Don't change it. */ +#define B43_CCK_RATE_1MB 0x02 +#define B43_CCK_RATE_2MB 0x04 +#define B43_CCK_RATE_5MB 0x0B +#define B43_CCK_RATE_11MB 0x16 +#define B43_OFDM_RATE_6MB 0x0C +#define B43_OFDM_RATE_9MB 0x12 +#define B43_OFDM_RATE_12MB 0x18 +#define B43_OFDM_RATE_18MB 0x24 +#define B43_OFDM_RATE_24MB 0x30 +#define B43_OFDM_RATE_36MB 0x48 +#define B43_OFDM_RATE_48MB 0x60 +#define B43_OFDM_RATE_54MB 0x6C +/* Convert a b43 rate value to a rate in 100kbps */ +#define B43_RATE_TO_BASE100KBPS(rate) (((rate) * 10) / 2) + +#define B43_DEFAULT_SHORT_RETRY_LIMIT 7 +#define B43_DEFAULT_LONG_RETRY_LIMIT 4 + +#define B43_PHY_TX_BADNESS_LIMIT 1000 + +/* Max size of a security key */ +#define B43_SEC_KEYSIZE 16 +/* Max number of group keys */ +#define B43_NR_GROUP_KEYS 4 +/* Max number of pairwise keys */ +#define B43_NR_PAIRWISE_KEYS 50 +/* Security algorithms. */ +enum { + B43_SEC_ALGO_NONE = 0, /* unencrypted, as of TX header. */ + B43_SEC_ALGO_WEP40, + B43_SEC_ALGO_TKIP, + B43_SEC_ALGO_AES, + B43_SEC_ALGO_WEP104, + B43_SEC_ALGO_AES_LEGACY, +}; + +struct b43_dmaring; + +/* The firmware file header */ +#define B43_FW_TYPE_UCODE 'u' +#define B43_FW_TYPE_PCM 'p' +#define B43_FW_TYPE_IV 'i' +struct b43_fw_header { + /* File type */ + u8 type; + /* File format version */ + u8 ver; + u8 __padding[2]; + /* Size of the data. For ucode and PCM this is in bytes. + * For IV this is number-of-ivs. */ + __be32 size; +} __attribute__((__packed__)); + +/* Initial Value file format */ +#define B43_IV_OFFSET_MASK 0x7FFF +#define B43_IV_32BIT 0x8000 +struct b43_iv { + __be16 offset_size; + union { + __be16 d16; + __be32 d32; + } data __attribute__((__packed__)); +} __attribute__((__packed__)); + + +/* Data structures for DMA transmission, per 80211 core. */ +struct b43_dma { + struct b43_dmaring *tx_ring_AC_BK; /* Background */ + struct b43_dmaring *tx_ring_AC_BE; /* Best Effort */ + struct b43_dmaring *tx_ring_AC_VI; /* Video */ + struct b43_dmaring *tx_ring_AC_VO; /* Voice */ + struct b43_dmaring *tx_ring_mcast; /* Multicast */ + + struct b43_dmaring *rx_ring; +}; + +struct b43_pio_txqueue; +struct b43_pio_rxqueue; + +/* Data structures for PIO transmission, per 80211 core. */ +struct b43_pio { + struct b43_pio_txqueue *tx_queue_AC_BK; /* Background */ + struct b43_pio_txqueue *tx_queue_AC_BE; /* Best Effort */ + struct b43_pio_txqueue *tx_queue_AC_VI; /* Video */ + struct b43_pio_txqueue *tx_queue_AC_VO; /* Voice */ + struct b43_pio_txqueue *tx_queue_mcast; /* Multicast */ + + struct b43_pio_rxqueue *rx_queue; +}; + +/* Context information for a noise calculation (Link Quality). */ +struct b43_noise_calculation { + bool calculation_running; + u8 nr_samples; + s8 samples[8][4]; +}; + +struct b43_stats { + u8 link_noise; +}; + +struct b43_key { + /* If keyconf is NULL, this key is disabled. + * keyconf is a cookie. Don't derefenrence it outside of the set_key + * path, because b43 doesn't own it. */ + struct ieee80211_key_conf *keyconf; + u8 algorithm; +}; + +/* SHM offsets to the QOS data structures for the 4 different queues. */ +#define B43_QOS_PARAMS(queue) (B43_SHM_SH_EDCFQ + \ + (B43_NR_QOSPARAMS * sizeof(u16) * (queue))) +#define B43_QOS_BACKGROUND B43_QOS_PARAMS(0) +#define B43_QOS_BESTEFFORT B43_QOS_PARAMS(1) +#define B43_QOS_VIDEO B43_QOS_PARAMS(2) +#define B43_QOS_VOICE B43_QOS_PARAMS(3) + +/* QOS parameter hardware data structure offsets. */ +#define B43_NR_QOSPARAMS 16 +enum { + B43_QOSPARAM_TXOP = 0, + B43_QOSPARAM_CWMIN, + B43_QOSPARAM_CWMAX, + B43_QOSPARAM_CWCUR, + B43_QOSPARAM_AIFS, + B43_QOSPARAM_BSLOTS, + B43_QOSPARAM_REGGAP, + B43_QOSPARAM_STATUS, +}; + +/* QOS parameters for a queue. */ +struct b43_qos_params { + /* The QOS parameters */ + struct ieee80211_tx_queue_params p; +}; + +struct b43_wl; + +/* The type of the firmware file. */ +enum b43_firmware_file_type { + B43_FWTYPE_PROPRIETARY, + B43_FWTYPE_OPENSOURCE, + B43_NR_FWTYPES, +}; + +/* Context data for fetching firmware. */ +struct b43_request_fw_context { + /* The device we are requesting the fw for. */ + struct b43_wldev *dev; + /* The type of firmware to request. */ + enum b43_firmware_file_type req_type; + /* Error messages for each firmware type. */ + char errors[B43_NR_FWTYPES][128]; + /* Temporary buffer for storing the firmware name. */ + char fwname[64]; + /* A fatal error occured while requesting. Firmware reqest + * can not continue, as any other reqest will also fail. */ + int fatal_failure; +}; + +/* In-memory representation of a cached microcode file. */ +struct b43_firmware_file { + const char *filename; + const struct firmware *data; + /* Type of the firmware file name. Note that this does only indicate + * the type by the firmware name. NOT the file contents. + * If you want to check for proprietary vs opensource, use (struct b43_firmware)->opensource + * instead! The (struct b43_firmware)->opensource flag is derived from the actual firmware + * binary code, not just the filename. + */ + enum b43_firmware_file_type type; +}; + +/* Pointers to the firmware data and meta information about it. */ +struct b43_firmware { + /* Microcode */ + struct b43_firmware_file ucode; + /* PCM code */ + struct b43_firmware_file pcm; + /* Initial MMIO values for the firmware */ + struct b43_firmware_file initvals; + /* Initial MMIO values for the firmware, band-specific */ + struct b43_firmware_file initvals_band; + + /* Firmware revision */ + u16 rev; + /* Firmware patchlevel */ + u16 patch; + + /* Set to true, if we are using an opensource firmware. + * Use this to check for proprietary vs opensource. */ + bool opensource; + /* Set to true, if the core needs a PCM firmware, but + * we failed to load one. This is always false for + * core rev > 10, as these don't need PCM firmware. */ + bool pcm_request_failed; +}; + +/* Device (802.11 core) initialization status. */ +enum { + B43_STAT_UNINIT = 0, /* Uninitialized. */ + B43_STAT_INITIALIZED = 1, /* Initialized, but not started, yet. */ + B43_STAT_STARTED = 2, /* Up and running. */ +}; +#define b43_status(wldev) atomic_read(&(wldev)->__init_status) +#define b43_set_status(wldev, stat) do { \ + atomic_set(&(wldev)->__init_status, (stat)); \ + smp_wmb(); \ + } while (0) + +/* Data structure for one wireless device (802.11 core) */ +struct b43_wldev { + struct ssb_device *dev; + struct b43_wl *wl; + + /* The device initialization status. + * Use b43_status() to query. */ + atomic_t __init_status; + + bool bad_frames_preempt; /* Use "Bad Frames Preemption" (default off) */ + bool dfq_valid; /* Directed frame queue valid (IBSS PS mode, ATIM) */ + bool radio_hw_enable; /* saved state of radio hardware enabled state */ + bool qos_enabled; /* TRUE, if QoS is used. */ + bool hwcrypto_enabled; /* TRUE, if HW crypto acceleration is enabled. */ + bool use_pio; /* TRUE if next init should use PIO */ + + /* PHY/Radio device. */ + struct b43_phy phy; + + union { + /* DMA engines. */ + struct b43_dma dma; + /* PIO engines. */ + struct b43_pio pio; + }; + /* Use b43_using_pio_transfers() to check whether we are using + * DMA or PIO data transfers. */ + bool __using_pio_transfers; + + /* Various statistics about the physical device. */ + struct b43_stats stats; + + /* Reason code of the last interrupt. */ + u32 irq_reason; + u32 dma_reason[6]; + /* The currently active generic-interrupt mask. */ + u32 irq_mask; + + /* Link Quality calculation context. */ + struct b43_noise_calculation noisecalc; + /* if > 0 MAC is suspended. if == 0 MAC is enabled. */ + int mac_suspended; + + /* Periodic tasks */ + struct delayed_work periodic_work; + unsigned int periodic_state; + + struct work_struct restart_work; + + /* encryption/decryption */ + u16 ktp; /* Key table pointer */ + struct b43_key key[B43_NR_GROUP_KEYS * 2 + B43_NR_PAIRWISE_KEYS]; + + /* Firmware data */ + struct b43_firmware fw; + + /* Devicelist in struct b43_wl (all 802.11 cores) */ + struct list_head list; + + /* Debugging stuff follows. */ +#ifdef CONFIG_B43_DEBUG + struct b43_dfsentry *dfsentry; + unsigned int irq_count; + unsigned int irq_bit_count[32]; + unsigned int tx_count; + unsigned int rx_count; +#endif +}; + +/* Data structure for the WLAN parts (802.11 cores) of the b43 chip. */ +struct b43_wl { + /* Pointer to the active wireless device on this chip */ + struct b43_wldev *current_dev; + /* Pointer to the ieee80211 hardware data structure */ + struct ieee80211_hw *hw; + + /* Global driver mutex. Every operation must run with this mutex locked. */ + struct mutex mutex; + /* Hard-IRQ spinlock. This lock protects things used in the hard-IRQ + * handler, only. This basically is just the IRQ mask register. */ + spinlock_t hardirq_lock; + + /* The number of queues that were registered with the mac80211 subsystem + * initially. This is a backup copy of hw->queues in case hw->queues has + * to be dynamically lowered at runtime (Firmware does not support QoS). + * hw->queues has to be restored to the original value before unregistering + * from the mac80211 subsystem. */ + u16 mac80211_initially_registered_queues; + + /* We can only have one operating interface (802.11 core) + * at a time. General information about this interface follows. + */ + + struct ieee80211_vif *vif; + /* The MAC address of the operating interface. */ + u8 mac_addr[ETH_ALEN]; + /* Current BSSID */ + u8 bssid[ETH_ALEN]; + /* Interface type. (NL80211_IFTYPE_XXX) */ + int if_type; + /* Is the card operating in AP, STA or IBSS mode? */ + bool operating; + /* filter flags */ + unsigned int filter_flags; + /* Stats about the wireless interface */ + struct ieee80211_low_level_stats ieee_stats; + +#ifdef CONFIG_B43_HWRNG + struct hwrng rng; + bool rng_initialized; + char rng_name[30 + 1]; +#endif /* CONFIG_B43_HWRNG */ + + /* List of all wireless devices on this chip */ + struct list_head devlist; + u8 nr_devs; + + bool radiotap_enabled; + bool radio_enabled; + + /* The beacon we are currently using (AP or IBSS mode). */ + struct sk_buff *current_beacon; + bool beacon0_uploaded; + bool beacon1_uploaded; + bool beacon_templates_virgin; /* Never wrote the templates? */ + struct work_struct beacon_update_trigger; + + /* The current QOS parameters for the 4 queues. */ + struct b43_qos_params qos_params[4]; + + /* Work for adjustment of the transmission power. + * This is scheduled when we determine that the actual TX output + * power doesn't match what we want. */ + struct work_struct txpower_adjust_work; + + /* Packet transmit work */ + struct work_struct tx_work; + /* Queue of packets to be transmitted. */ + struct sk_buff_head tx_queue; + + /* The device LEDs. */ + struct b43_leds leds; + + /* Kmalloc'ed scratch space for PIO TX/RX. Protected by wl->mutex. */ + u8 pio_scratchspace[110] __attribute__((__aligned__(8))); + u8 pio_tailspace[4] __attribute__((__aligned__(8))); +}; + +static inline struct b43_wl *hw_to_b43_wl(struct ieee80211_hw *hw) +{ + return hw->priv; +} + +static inline struct b43_wldev *dev_to_b43_wldev(struct device *dev) +{ + struct ssb_device *ssb_dev = dev_to_ssb_dev(dev); + return ssb_get_drvdata(ssb_dev); +} + +/* Is the device operating in a specified mode (NL80211_IFTYPE_XXX). */ +static inline int b43_is_mode(struct b43_wl *wl, int type) +{ + return (wl->operating && wl->if_type == type); +} + +/** + * b43_current_band - Returns the currently used band. + * Returns one of IEEE80211_BAND_2GHZ and IEEE80211_BAND_5GHZ. + */ +static inline enum ieee80211_band b43_current_band(struct b43_wl *wl) +{ + return wl->hw->conf.channel->band; +} + +static inline u16 b43_read16(struct b43_wldev *dev, u16 offset) +{ + return ssb_read16(dev->dev, offset); +} + +static inline void b43_write16(struct b43_wldev *dev, u16 offset, u16 value) +{ + ssb_write16(dev->dev, offset, value); +} + +static inline u32 b43_read32(struct b43_wldev *dev, u16 offset) +{ + return ssb_read32(dev->dev, offset); +} + +static inline void b43_write32(struct b43_wldev *dev, u16 offset, u32 value) +{ + ssb_write32(dev->dev, offset, value); +} + +static inline bool b43_using_pio_transfers(struct b43_wldev *dev) +{ + return dev->__using_pio_transfers; +} + +#ifdef CONFIG_B43_FORCE_PIO +# define B43_PIO_DEFAULT 1 +#else +# define B43_PIO_DEFAULT 0 +#endif + +/* Message printing */ +void b43info(struct b43_wl *wl, const char *fmt, ...) + __attribute__ ((format(printf, 2, 3))); +void b43err(struct b43_wl *wl, const char *fmt, ...) + __attribute__ ((format(printf, 2, 3))); +void b43warn(struct b43_wl *wl, const char *fmt, ...) + __attribute__ ((format(printf, 2, 3))); +void b43dbg(struct b43_wl *wl, const char *fmt, ...) + __attribute__ ((format(printf, 2, 3))); + + +/* A WARN_ON variant that vanishes when b43 debugging is disabled. + * This _also_ evaluates the arg with debugging disabled. */ +#if B43_DEBUG +# define B43_WARN_ON(x) WARN_ON(x) +#else +static inline bool __b43_warn_on_dummy(bool x) { return x; } +# define B43_WARN_ON(x) __b43_warn_on_dummy(unlikely(!!(x))) +#endif + +/* Convert an integer to a Q5.2 value */ +#define INT_TO_Q52(i) ((i) << 2) +/* Convert a Q5.2 value to an integer (precision loss!) */ +#define Q52_TO_INT(q52) ((q52) >> 2) +/* Macros for printing a value in Q5.2 format */ +#define Q52_FMT "%u.%u" +#define Q52_ARG(q52) Q52_TO_INT(q52), ((((q52) & 0x3) * 100) / 4) + +#endif /* B43_H_ */ diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c new file mode 100644 index 0000000..80b19a4 --- /dev/null +++ b/drivers/net/wireless/b43/debugfs.c @@ -0,0 +1,831 @@ +/* + + Broadcom B43 wireless driver + + debugfs driver debugging code + + Copyright (c) 2005-2007 Michael Buesch + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include +#include +#include +#include +#include +#include + +#include "b43.h" +#include "main.h" +#include "debugfs.h" +#include "dma.h" +#include "xmit.h" + + +/* The root directory. */ +static struct dentry *rootdir; + +struct b43_debugfs_fops { + ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize); + int (*write)(struct b43_wldev *dev, const char *buf, size_t count); + struct file_operations fops; + /* Offset of struct b43_dfs_file in struct b43_dfsentry */ + size_t file_struct_offset; +}; + +static inline +struct b43_dfs_file *fops_to_dfs_file(struct b43_wldev *dev, + const struct b43_debugfs_fops *dfops) +{ + void *p; + + p = dev->dfsentry; + p += dfops->file_struct_offset; + + return p; +} + + +#define fappend(fmt, x...) \ + do { \ + if (bufsize - count) \ + count += snprintf(buf + count, \ + bufsize - count, \ + fmt , ##x); \ + else \ + printk(KERN_ERR "b43: fappend overflow\n"); \ + } while (0) + + +/* The biggest address values for SHM access from the debugfs files. */ +#define B43_MAX_SHM_ROUTING 4 +#define B43_MAX_SHM_ADDR 0xFFFF + +static ssize_t shm16read__read_file(struct b43_wldev *dev, + char *buf, size_t bufsize) +{ + ssize_t count = 0; + unsigned int routing, addr; + u16 val; + + routing = dev->dfsentry->shm16read_routing_next; + addr = dev->dfsentry->shm16read_addr_next; + if ((routing > B43_MAX_SHM_ROUTING) || + (addr > B43_MAX_SHM_ADDR)) + return -EDESTADDRREQ; + + val = b43_shm_read16(dev, routing, addr); + fappend("0x%04X\n", val); + + return count; +} + +static int shm16read__write_file(struct b43_wldev *dev, + const char *buf, size_t count) +{ + unsigned int routing, addr; + int res; + + res = sscanf(buf, "0x%X 0x%X", &routing, &addr); + if (res != 2) + return -EINVAL; + if (routing > B43_MAX_SHM_ROUTING) + return -EADDRNOTAVAIL; + if (addr > B43_MAX_SHM_ADDR) + return -EADDRNOTAVAIL; + if (routing == B43_SHM_SHARED) { + if ((addr % 2) != 0) + return -EADDRNOTAVAIL; + } + + dev->dfsentry->shm16read_routing_next = routing; + dev->dfsentry->shm16read_addr_next = addr; + + return 0; +} + +static int shm16write__write_file(struct b43_wldev *dev, + const char *buf, size_t count) +{ + unsigned int routing, addr, mask, set; + u16 val; + int res; + + res = sscanf(buf, "0x%X 0x%X 0x%X 0x%X", + &routing, &addr, &mask, &set); + if (res != 4) + return -EINVAL; + if (routing > B43_MAX_SHM_ROUTING) + return -EADDRNOTAVAIL; + if (addr > B43_MAX_SHM_ADDR) + return -EADDRNOTAVAIL; + if (routing == B43_SHM_SHARED) { + if ((addr % 2) != 0) + return -EADDRNOTAVAIL; + } + if ((mask > 0xFFFF) || (set > 0xFFFF)) + return -E2BIG; + + if (mask == 0) + val = 0; + else + val = b43_shm_read16(dev, routing, addr); + val &= mask; + val |= set; + b43_shm_write16(dev, routing, addr, val); + + return 0; +} + +static ssize_t shm32read__read_file(struct b43_wldev *dev, + char *buf, size_t bufsize) +{ + ssize_t count = 0; + unsigned int routing, addr; + u32 val; + + routing = dev->dfsentry->shm32read_routing_next; + addr = dev->dfsentry->shm32read_addr_next; + if ((routing > B43_MAX_SHM_ROUTING) || + (addr > B43_MAX_SHM_ADDR)) + return -EDESTADDRREQ; + + val = b43_shm_read32(dev, routing, addr); + fappend("0x%08X\n", val); + + return count; +} + +static int shm32read__write_file(struct b43_wldev *dev, + const char *buf, size_t count) +{ + unsigned int routing, addr; + int res; + + res = sscanf(buf, "0x%X 0x%X", &routing, &addr); + if (res != 2) + return -EINVAL; + if (routing > B43_MAX_SHM_ROUTING) + return -EADDRNOTAVAIL; + if (addr > B43_MAX_SHM_ADDR) + return -EADDRNOTAVAIL; + if (routing == B43_SHM_SHARED) { + if ((addr % 2) != 0) + return -EADDRNOTAVAIL; + } + + dev->dfsentry->shm32read_routing_next = routing; + dev->dfsentry->shm32read_addr_next = addr; + + return 0; +} + +static int shm32write__write_file(struct b43_wldev *dev, + const char *buf, size_t count) +{ + unsigned int routing, addr, mask, set; + u32 val; + int res; + + res = sscanf(buf, "0x%X 0x%X 0x%X 0x%X", + &routing, &addr, &mask, &set); + if (res != 4) + return -EINVAL; + if (routing > B43_MAX_SHM_ROUTING) + return -EADDRNOTAVAIL; + if (addr > B43_MAX_SHM_ADDR) + return -EADDRNOTAVAIL; + if (routing == B43_SHM_SHARED) { + if ((addr % 2) != 0) + return -EADDRNOTAVAIL; + } + if ((mask > 0xFFFFFFFF) || (set > 0xFFFFFFFF)) + return -E2BIG; + + if (mask == 0) + val = 0; + else + val = b43_shm_read32(dev, routing, addr); + val &= mask; + val |= set; + b43_shm_write32(dev, routing, addr, val); + + return 0; +} + +/* The biggest MMIO address that we allow access to from the debugfs files. */ +#define B43_MAX_MMIO_ACCESS (0xF00 - 1) + +static ssize_t mmio16read__read_file(struct b43_wldev *dev, + char *buf, size_t bufsize) +{ + ssize_t count = 0; + unsigned int addr; + u16 val; + + addr = dev->dfsentry->mmio16read_next; + if (addr > B43_MAX_MMIO_ACCESS) + return -EDESTADDRREQ; + + val = b43_read16(dev, addr); + fappend("0x%04X\n", val); + + return count; +} + +static int mmio16read__write_file(struct b43_wldev *dev, + const char *buf, size_t count) +{ + unsigned int addr; + int res; + + res = sscanf(buf, "0x%X", &addr); + if (res != 1) + return -EINVAL; + if (addr > B43_MAX_MMIO_ACCESS) + return -EADDRNOTAVAIL; + if ((addr % 2) != 0) + return -EINVAL; + + dev->dfsentry->mmio16read_next = addr; + + return 0; +} + +static int mmio16write__write_file(struct b43_wldev *dev, + const char *buf, size_t count) +{ + unsigned int addr, mask, set; + int res; + u16 val; + + res = sscanf(buf, "0x%X 0x%X 0x%X", &addr, &mask, &set); + if (res != 3) + return -EINVAL; + if (addr > B43_MAX_MMIO_ACCESS) + return -EADDRNOTAVAIL; + if ((mask > 0xFFFF) || (set > 0xFFFF)) + return -E2BIG; + if ((addr % 2) != 0) + return -EINVAL; + + if (mask == 0) + val = 0; + else + val = b43_read16(dev, addr); + val &= mask; + val |= set; + b43_write16(dev, addr, val); + + return 0; +} + +static ssize_t mmio32read__read_file(struct b43_wldev *dev, + char *buf, size_t bufsize) +{ + ssize_t count = 0; + unsigned int addr; + u32 val; + + addr = dev->dfsentry->mmio32read_next; + if (addr > B43_MAX_MMIO_ACCESS) + return -EDESTADDRREQ; + + val = b43_read32(dev, addr); + fappend("0x%08X\n", val); + + return count; +} + +static int mmio32read__write_file(struct b43_wldev *dev, + const char *buf, size_t count) +{ + unsigned int addr; + int res; + + res = sscanf(buf, "0x%X", &addr); + if (res != 1) + return -EINVAL; + if (addr > B43_MAX_MMIO_ACCESS) + return -EADDRNOTAVAIL; + if ((addr % 4) != 0) + return -EINVAL; + + dev->dfsentry->mmio32read_next = addr; + + return 0; +} + +static int mmio32write__write_file(struct b43_wldev *dev, + const char *buf, size_t count) +{ + unsigned int addr, mask, set; + int res; + u32 val; + + res = sscanf(buf, "0x%X 0x%X 0x%X", &addr, &mask, &set); + if (res != 3) + return -EINVAL; + if (addr > B43_MAX_MMIO_ACCESS) + return -EADDRNOTAVAIL; + if ((mask > 0xFFFFFFFF) || (set > 0xFFFFFFFF)) + return -E2BIG; + if ((addr % 4) != 0) + return -EINVAL; + + if (mask == 0) + val = 0; + else + val = b43_read32(dev, addr); + val &= mask; + val |= set; + b43_write32(dev, addr, val); + + return 0; +} + +static ssize_t txstat_read_file(struct b43_wldev *dev, + char *buf, size_t bufsize) +{ + struct b43_txstatus_log *log = &dev->dfsentry->txstatlog; + ssize_t count = 0; + int i, idx; + struct b43_txstatus *stat; + + if (log->end < 0) { + fappend("Nothing transmitted, yet\n"); + goto out; + } + fappend("b43 TX status reports:\n\n" + "index | cookie | seq | phy_stat | frame_count | " + "rts_count | supp_reason | pm_indicated | " + "intermediate | for_ampdu | acked\n" "---\n"); + i = log->end + 1; + idx = 0; + while (1) { + if (i == B43_NR_LOGGED_TXSTATUS) + i = 0; + stat = &(log->log[i]); + if (stat->cookie) { + fappend("%03d | " + "0x%04X | 0x%04X | 0x%02X | " + "0x%X | 0x%X | " + "%u | %u | " + "%u | %u | %u\n", + idx, + stat->cookie, stat->seq, stat->phy_stat, + stat->frame_count, stat->rts_count, + stat->supp_reason, stat->pm_indicated, + stat->intermediate, stat->for_ampdu, + stat->acked); + idx++; + } + if (i == log->end) + break; + i++; + } +out: + + return count; +} + +static int restart_write_file(struct b43_wldev *dev, + const char *buf, size_t count) +{ + int err = 0; + + if (count > 0 && buf[0] == '1') { + b43_controller_restart(dev, "manually restarted"); + } else + err = -EINVAL; + + return err; +} + +static unsigned long calc_expire_secs(unsigned long now, + unsigned long time, + unsigned long expire) +{ + expire = time + expire; + + if (time_after(now, expire)) + return 0; /* expired */ + if (expire < now) { + /* jiffies wrapped */ + expire -= MAX_JIFFY_OFFSET; + now -= MAX_JIFFY_OFFSET; + } + B43_WARN_ON(expire < now); + + return (expire - now) / HZ; +} + +static ssize_t loctls_read_file(struct b43_wldev *dev, + char *buf, size_t bufsize) +{ + ssize_t count = 0; + struct b43_txpower_lo_control *lo; + int i, err = 0; + struct b43_lo_calib *cal; + unsigned long now = jiffies; + struct b43_phy *phy = &dev->phy; + + if (phy->type != B43_PHYTYPE_G) { + fappend("Device is not a G-PHY\n"); + err = -ENODEV; + goto out; + } + lo = phy->g->lo_control; + fappend("-- Local Oscillator calibration data --\n\n"); + fappend("HW-power-control enabled: %d\n", + dev->phy.hardware_power_control); + fappend("TX Bias: 0x%02X, TX Magn: 0x%02X (expire in %lu sec)\n", + lo->tx_bias, lo->tx_magn, + calc_expire_secs(now, lo->txctl_measured_time, + B43_LO_TXCTL_EXPIRE)); + fappend("Power Vector: 0x%08X%08X (expires in %lu sec)\n", + (unsigned int)((lo->power_vector & 0xFFFFFFFF00000000ULL) >> 32), + (unsigned int)(lo->power_vector & 0x00000000FFFFFFFFULL), + calc_expire_secs(now, lo->pwr_vec_read_time, + B43_LO_PWRVEC_EXPIRE)); + + fappend("\nCalibrated settings:\n"); + list_for_each_entry(cal, &lo->calib_list, list) { + bool active; + + active = (b43_compare_bbatt(&cal->bbatt, &phy->g->bbatt) && + b43_compare_rfatt(&cal->rfatt, &phy->g->rfatt)); + fappend("BB(%d), RF(%d,%d) -> I=%d, Q=%d " + "(expires in %lu sec)%s\n", + cal->bbatt.att, + cal->rfatt.att, cal->rfatt.with_padmix, + cal->ctl.i, cal->ctl.q, + calc_expire_secs(now, cal->calib_time, + B43_LO_CALIB_EXPIRE), + active ? " ACTIVE" : ""); + } + + fappend("\nUsed RF attenuation values: Value(WithPadmix flag)\n"); + for (i = 0; i < lo->rfatt_list.len; i++) { + fappend("%u(%d), ", + lo->rfatt_list.list[i].att, + lo->rfatt_list.list[i].with_padmix); + } + fappend("\n"); + fappend("\nUsed Baseband attenuation values:\n"); + for (i = 0; i < lo->bbatt_list.len; i++) { + fappend("%u, ", + lo->bbatt_list.list[i].att); + } + fappend("\n"); + +out: + return err ? err : count; +} + +#undef fappend + +static int b43_debugfs_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t b43_debugfs_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct b43_wldev *dev; + struct b43_debugfs_fops *dfops; + struct b43_dfs_file *dfile; + ssize_t uninitialized_var(ret); + char *buf; + const size_t bufsize = 1024 * 16; /* 16 kiB buffer */ + const size_t buforder = get_order(bufsize); + int err = 0; + + if (!count) + return 0; + dev = file->private_data; + if (!dev) + return -ENODEV; + + mutex_lock(&dev->wl->mutex); + if (b43_status(dev) < B43_STAT_INITIALIZED) { + err = -ENODEV; + goto out_unlock; + } + + dfops = container_of(file->f_op, struct b43_debugfs_fops, fops); + if (!dfops->read) { + err = -ENOSYS; + goto out_unlock; + } + dfile = fops_to_dfs_file(dev, dfops); + + if (!dfile->buffer) { + buf = (char *)__get_free_pages(GFP_KERNEL, buforder); + if (!buf) { + err = -ENOMEM; + goto out_unlock; + } + memset(buf, 0, bufsize); + ret = dfops->read(dev, buf, bufsize); + if (ret <= 0) { + free_pages((unsigned long)buf, buforder); + err = ret; + goto out_unlock; + } + dfile->data_len = ret; + dfile->buffer = buf; + } + + ret = simple_read_from_buffer(userbuf, count, ppos, + dfile->buffer, + dfile->data_len); + if (*ppos >= dfile->data_len) { + free_pages((unsigned long)dfile->buffer, buforder); + dfile->buffer = NULL; + dfile->data_len = 0; + } +out_unlock: + mutex_unlock(&dev->wl->mutex); + + return err ? err : ret; +} + +static ssize_t b43_debugfs_write(struct file *file, + const char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct b43_wldev *dev; + struct b43_debugfs_fops *dfops; + char *buf; + int err = 0; + + if (!count) + return 0; + if (count > PAGE_SIZE) + return -E2BIG; + dev = file->private_data; + if (!dev) + return -ENODEV; + + mutex_lock(&dev->wl->mutex); + if (b43_status(dev) < B43_STAT_INITIALIZED) { + err = -ENODEV; + goto out_unlock; + } + + dfops = container_of(file->f_op, struct b43_debugfs_fops, fops); + if (!dfops->write) { + err = -ENOSYS; + goto out_unlock; + } + + buf = (char *)get_zeroed_page(GFP_KERNEL); + if (!buf) { + err = -ENOMEM; + goto out_unlock; + } + if (copy_from_user(buf, userbuf, count)) { + err = -EFAULT; + goto out_freepage; + } + err = dfops->write(dev, buf, count); + if (err) + goto out_freepage; + +out_freepage: + free_page((unsigned long)buf); +out_unlock: + mutex_unlock(&dev->wl->mutex); + + return err ? err : count; +} + + +#define B43_DEBUGFS_FOPS(name, _read, _write) \ + static struct b43_debugfs_fops fops_##name = { \ + .read = _read, \ + .write = _write, \ + .fops = { \ + .open = b43_debugfs_open, \ + .read = b43_debugfs_read, \ + .write = b43_debugfs_write, \ + }, \ + .file_struct_offset = offsetof(struct b43_dfsentry, \ + file_##name), \ + } + +B43_DEBUGFS_FOPS(shm16read, shm16read__read_file, shm16read__write_file); +B43_DEBUGFS_FOPS(shm16write, NULL, shm16write__write_file); +B43_DEBUGFS_FOPS(shm32read, shm32read__read_file, shm32read__write_file); +B43_DEBUGFS_FOPS(shm32write, NULL, shm32write__write_file); +B43_DEBUGFS_FOPS(mmio16read, mmio16read__read_file, mmio16read__write_file); +B43_DEBUGFS_FOPS(mmio16write, NULL, mmio16write__write_file); +B43_DEBUGFS_FOPS(mmio32read, mmio32read__read_file, mmio32read__write_file); +B43_DEBUGFS_FOPS(mmio32write, NULL, mmio32write__write_file); +B43_DEBUGFS_FOPS(txstat, txstat_read_file, NULL); +B43_DEBUGFS_FOPS(restart, NULL, restart_write_file); +B43_DEBUGFS_FOPS(loctls, loctls_read_file, NULL); + + +bool b43_debug(struct b43_wldev *dev, enum b43_dyndbg feature) +{ + bool enabled; + + enabled = (dev->dfsentry && dev->dfsentry->dyn_debug[feature]); + if (unlikely(enabled)) { + /* Force full debugging messages, if the user enabled + * some dynamic debugging feature. */ + b43_modparam_verbose = B43_VERBOSITY_MAX; + } + + return enabled; +} + +static void b43_remove_dynamic_debug(struct b43_wldev *dev) +{ + struct b43_dfsentry *e = dev->dfsentry; + int i; + + for (i = 0; i < __B43_NR_DYNDBG; i++) + debugfs_remove(e->dyn_debug_dentries[i]); +} + +static void b43_add_dynamic_debug(struct b43_wldev *dev) +{ + struct b43_dfsentry *e = dev->dfsentry; + struct dentry *d; + +#define add_dyn_dbg(name, id, initstate) do { \ + e->dyn_debug[id] = (initstate); \ + d = debugfs_create_bool(name, 0600, e->subdir, \ + &(e->dyn_debug[id])); \ + if (!IS_ERR(d)) \ + e->dyn_debug_dentries[id] = d; \ + } while (0) + + add_dyn_dbg("debug_xmitpower", B43_DBG_XMITPOWER, 0); + add_dyn_dbg("debug_dmaoverflow", B43_DBG_DMAOVERFLOW, 0); + add_dyn_dbg("debug_dmaverbose", B43_DBG_DMAVERBOSE, 0); + add_dyn_dbg("debug_pwork_fast", B43_DBG_PWORK_FAST, 0); + add_dyn_dbg("debug_pwork_stop", B43_DBG_PWORK_STOP, 0); + add_dyn_dbg("debug_lo", B43_DBG_LO, 0); + add_dyn_dbg("debug_firmware", B43_DBG_FIRMWARE, 0); + add_dyn_dbg("debug_keys", B43_DBG_KEYS, 0); + add_dyn_dbg("debug_verbose_stats", B43_DBG_VERBOSESTATS, 0); + +#undef add_dyn_dbg +} + +void b43_debugfs_add_device(struct b43_wldev *dev) +{ + struct b43_dfsentry *e; + struct b43_txstatus_log *log; + char devdir[16]; + + B43_WARN_ON(!dev); + e = kzalloc(sizeof(*e), GFP_KERNEL); + if (!e) { + b43err(dev->wl, "debugfs: add device OOM\n"); + return; + } + e->dev = dev; + log = &e->txstatlog; + log->log = kcalloc(B43_NR_LOGGED_TXSTATUS, + sizeof(struct b43_txstatus), GFP_KERNEL); + if (!log->log) { + b43err(dev->wl, "debugfs: add device txstatus OOM\n"); + kfree(e); + return; + } + log->end = -1; + + dev->dfsentry = e; + + snprintf(devdir, sizeof(devdir), "%s", wiphy_name(dev->wl->hw->wiphy)); + e->subdir = debugfs_create_dir(devdir, rootdir); + if (!e->subdir || IS_ERR(e->subdir)) { + if (e->subdir == ERR_PTR(-ENODEV)) { + b43dbg(dev->wl, "DebugFS (CONFIG_DEBUG_FS) not " + "enabled in kernel config\n"); + } else { + b43err(dev->wl, "debugfs: cannot create %s directory\n", + devdir); + } + dev->dfsentry = NULL; + kfree(log->log); + kfree(e); + return; + } + + e->mmio16read_next = 0xFFFF; /* invalid address */ + e->mmio32read_next = 0xFFFF; /* invalid address */ + e->shm16read_routing_next = 0xFFFFFFFF; /* invalid routing */ + e->shm16read_addr_next = 0xFFFFFFFF; /* invalid address */ + e->shm32read_routing_next = 0xFFFFFFFF; /* invalid routing */ + e->shm32read_addr_next = 0xFFFFFFFF; /* invalid address */ + +#define ADD_FILE(name, mode) \ + do { \ + struct dentry *d; \ + d = debugfs_create_file(__stringify(name), \ + mode, e->subdir, dev, \ + &fops_##name.fops); \ + e->file_##name.dentry = NULL; \ + if (!IS_ERR(d)) \ + e->file_##name.dentry = d; \ + } while (0) + + + ADD_FILE(shm16read, 0600); + ADD_FILE(shm16write, 0200); + ADD_FILE(shm32read, 0600); + ADD_FILE(shm32write, 0200); + ADD_FILE(mmio16read, 0600); + ADD_FILE(mmio16write, 0200); + ADD_FILE(mmio32read, 0600); + ADD_FILE(mmio32write, 0200); + ADD_FILE(txstat, 0400); + ADD_FILE(restart, 0200); + ADD_FILE(loctls, 0400); + +#undef ADD_FILE + + b43_add_dynamic_debug(dev); +} + +void b43_debugfs_remove_device(struct b43_wldev *dev) +{ + struct b43_dfsentry *e; + + if (!dev) + return; + e = dev->dfsentry; + if (!e) + return; + b43_remove_dynamic_debug(dev); + + debugfs_remove(e->file_shm16read.dentry); + debugfs_remove(e->file_shm16write.dentry); + debugfs_remove(e->file_shm32read.dentry); + debugfs_remove(e->file_shm32write.dentry); + debugfs_remove(e->file_mmio16read.dentry); + debugfs_remove(e->file_mmio16write.dentry); + debugfs_remove(e->file_mmio32read.dentry); + debugfs_remove(e->file_mmio32write.dentry); + debugfs_remove(e->file_txstat.dentry); + debugfs_remove(e->file_restart.dentry); + debugfs_remove(e->file_loctls.dentry); + + debugfs_remove(e->subdir); + kfree(e->txstatlog.log); + kfree(e); +} + +void b43_debugfs_log_txstat(struct b43_wldev *dev, + const struct b43_txstatus *status) +{ + struct b43_dfsentry *e = dev->dfsentry; + struct b43_txstatus_log *log; + struct b43_txstatus *cur; + int i; + + if (!e) + return; + log = &e->txstatlog; + i = log->end + 1; + if (i == B43_NR_LOGGED_TXSTATUS) + i = 0; + log->end = i; + cur = &(log->log[i]); + memcpy(cur, status, sizeof(*cur)); +} + +void b43_debugfs_init(void) +{ + rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL); + if (IS_ERR(rootdir)) + rootdir = NULL; +} + +void b43_debugfs_exit(void) +{ + debugfs_remove(rootdir); +} diff --git a/drivers/net/wireless/b43/debugfs.h b/drivers/net/wireless/b43/debugfs.h new file mode 100644 index 0000000..822aad8 --- /dev/null +++ b/drivers/net/wireless/b43/debugfs.h @@ -0,0 +1,111 @@ +#ifndef B43_DEBUGFS_H_ +#define B43_DEBUGFS_H_ + +struct b43_wldev; +struct b43_txstatus; + +enum b43_dyndbg { /* Dynamic debugging features */ + B43_DBG_XMITPOWER, + B43_DBG_DMAOVERFLOW, + B43_DBG_DMAVERBOSE, + B43_DBG_PWORK_FAST, + B43_DBG_PWORK_STOP, + B43_DBG_LO, + B43_DBG_FIRMWARE, + B43_DBG_KEYS, + B43_DBG_VERBOSESTATS, + __B43_NR_DYNDBG, +}; + +#ifdef CONFIG_B43_DEBUG + +struct dentry; + +#define B43_NR_LOGGED_TXSTATUS 100 + +struct b43_txstatus_log { + /* This structure is protected by wl->mutex */ + + struct b43_txstatus *log; + int end; +}; + +struct b43_dfs_file { + struct dentry *dentry; + char *buffer; + size_t data_len; +}; + +struct b43_dfsentry { + struct b43_wldev *dev; + struct dentry *subdir; + + struct b43_dfs_file file_shm16read; + struct b43_dfs_file file_shm16write; + struct b43_dfs_file file_shm32read; + struct b43_dfs_file file_shm32write; + struct b43_dfs_file file_mmio16read; + struct b43_dfs_file file_mmio16write; + struct b43_dfs_file file_mmio32read; + struct b43_dfs_file file_mmio32write; + struct b43_dfs_file file_txstat; + struct b43_dfs_file file_txpower_g; + struct b43_dfs_file file_restart; + struct b43_dfs_file file_loctls; + + struct b43_txstatus_log txstatlog; + + /* The cached address for the next mmio16read file read */ + u16 mmio16read_next; + /* The cached address for the next mmio32read file read */ + u16 mmio32read_next; + + /* The cached address for the next shm16read file read */ + u32 shm16read_routing_next; + u32 shm16read_addr_next; + /* The cached address for the next shm32read file read */ + u32 shm32read_routing_next; + u32 shm32read_addr_next; + + /* Enabled/Disabled list for the dynamic debugging features. */ + u32 dyn_debug[__B43_NR_DYNDBG]; + /* Dentries for the dynamic debugging entries. */ + struct dentry *dyn_debug_dentries[__B43_NR_DYNDBG]; +}; + +bool b43_debug(struct b43_wldev *dev, enum b43_dyndbg feature); + +void b43_debugfs_init(void); +void b43_debugfs_exit(void); +void b43_debugfs_add_device(struct b43_wldev *dev); +void b43_debugfs_remove_device(struct b43_wldev *dev); +void b43_debugfs_log_txstat(struct b43_wldev *dev, + const struct b43_txstatus *status); + +#else /* CONFIG_B43_DEBUG */ + +static inline bool b43_debug(struct b43_wldev *dev, enum b43_dyndbg feature) +{ + return 0; +} + +static inline void b43_debugfs_init(void) +{ +} +static inline void b43_debugfs_exit(void) +{ +} +static inline void b43_debugfs_add_device(struct b43_wldev *dev) +{ +} +static inline void b43_debugfs_remove_device(struct b43_wldev *dev) +{ +} +static inline void b43_debugfs_log_txstat(struct b43_wldev *dev, + const struct b43_txstatus *status) +{ +} + +#endif /* CONFIG_B43_DEBUG */ + +#endif /* B43_DEBUGFS_H_ */ diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c new file mode 100644 index 0000000..be7abf8 --- /dev/null +++ b/drivers/net/wireless/b43/dma.c @@ -0,0 +1,1671 @@ +/* + + Broadcom B43 wireless driver + + DMA ringbuffer and descriptor allocation/management + + Copyright (c) 2005, 2006 Michael Buesch + + Some code in this file is derived from the b44.c driver + Copyright (C) 2002 David S. Miller + Copyright (C) Pekka Pietikainen + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "b43.h" +#include "dma.h" +#include "main.h" +#include "debugfs.h" +#include "xmit.h" + +#include +#include +#include +#include +#include +#include + + +/* Required number of TX DMA slots per TX frame. + * This currently is 2, because we put the header and the ieee80211 frame + * into separate slots. */ +#define TX_SLOTS_PER_FRAME 2 + + +/* 32bit DMA ops. */ +static +struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring, + int slot, + struct b43_dmadesc_meta **meta) +{ + struct b43_dmadesc32 *desc; + + *meta = &(ring->meta[slot]); + desc = ring->descbase; + desc = &(desc[slot]); + + return (struct b43_dmadesc_generic *)desc; +} + +static void op32_fill_descriptor(struct b43_dmaring *ring, + struct b43_dmadesc_generic *desc, + dma_addr_t dmaaddr, u16 bufsize, + int start, int end, int irq) +{ + struct b43_dmadesc32 *descbase = ring->descbase; + int slot; + u32 ctl; + u32 addr; + u32 addrext; + + slot = (int)(&(desc->dma32) - descbase); + B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); + + addr = (u32) (dmaaddr & ~SSB_DMA_TRANSLATION_MASK); + addrext = (u32) (dmaaddr & SSB_DMA_TRANSLATION_MASK) + >> SSB_DMA_TRANSLATION_SHIFT; + addr |= ssb_dma_translation(ring->dev->dev); + ctl = bufsize & B43_DMA32_DCTL_BYTECNT; + if (slot == ring->nr_slots - 1) + ctl |= B43_DMA32_DCTL_DTABLEEND; + if (start) + ctl |= B43_DMA32_DCTL_FRAMESTART; + if (end) + ctl |= B43_DMA32_DCTL_FRAMEEND; + if (irq) + ctl |= B43_DMA32_DCTL_IRQ; + ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT) + & B43_DMA32_DCTL_ADDREXT_MASK; + + desc->dma32.control = cpu_to_le32(ctl); + desc->dma32.address = cpu_to_le32(addr); +} + +static void op32_poke_tx(struct b43_dmaring *ring, int slot) +{ + b43_dma_write(ring, B43_DMA32_TXINDEX, + (u32) (slot * sizeof(struct b43_dmadesc32))); +} + +static void op32_tx_suspend(struct b43_dmaring *ring) +{ + b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL) + | B43_DMA32_TXSUSPEND); +} + +static void op32_tx_resume(struct b43_dmaring *ring) +{ + b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL) + & ~B43_DMA32_TXSUSPEND); +} + +static int op32_get_current_rxslot(struct b43_dmaring *ring) +{ + u32 val; + + val = b43_dma_read(ring, B43_DMA32_RXSTATUS); + val &= B43_DMA32_RXDPTR; + + return (val / sizeof(struct b43_dmadesc32)); +} + +static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot) +{ + b43_dma_write(ring, B43_DMA32_RXINDEX, + (u32) (slot * sizeof(struct b43_dmadesc32))); +} + +static const struct b43_dma_ops dma32_ops = { + .idx2desc = op32_idx2desc, + .fill_descriptor = op32_fill_descriptor, + .poke_tx = op32_poke_tx, + .tx_suspend = op32_tx_suspend, + .tx_resume = op32_tx_resume, + .get_current_rxslot = op32_get_current_rxslot, + .set_current_rxslot = op32_set_current_rxslot, +}; + +/* 64bit DMA ops. */ +static +struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring, + int slot, + struct b43_dmadesc_meta **meta) +{ + struct b43_dmadesc64 *desc; + + *meta = &(ring->meta[slot]); + desc = ring->descbase; + desc = &(desc[slot]); + + return (struct b43_dmadesc_generic *)desc; +} + +static void op64_fill_descriptor(struct b43_dmaring *ring, + struct b43_dmadesc_generic *desc, + dma_addr_t dmaaddr, u16 bufsize, + int start, int end, int irq) +{ + struct b43_dmadesc64 *descbase = ring->descbase; + int slot; + u32 ctl0 = 0, ctl1 = 0; + u32 addrlo, addrhi; + u32 addrext; + + slot = (int)(&(desc->dma64) - descbase); + B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); + + addrlo = (u32) (dmaaddr & 0xFFFFFFFF); + addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK); + addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK) + >> SSB_DMA_TRANSLATION_SHIFT; + addrhi |= (ssb_dma_translation(ring->dev->dev) << 1); + if (slot == ring->nr_slots - 1) + ctl0 |= B43_DMA64_DCTL0_DTABLEEND; + if (start) + ctl0 |= B43_DMA64_DCTL0_FRAMESTART; + if (end) + ctl0 |= B43_DMA64_DCTL0_FRAMEEND; + if (irq) + ctl0 |= B43_DMA64_DCTL0_IRQ; + ctl1 |= bufsize & B43_DMA64_DCTL1_BYTECNT; + ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) + & B43_DMA64_DCTL1_ADDREXT_MASK; + + desc->dma64.control0 = cpu_to_le32(ctl0); + desc->dma64.control1 = cpu_to_le32(ctl1); + desc->dma64.address_low = cpu_to_le32(addrlo); + desc->dma64.address_high = cpu_to_le32(addrhi); +} + +static void op64_poke_tx(struct b43_dmaring *ring, int slot) +{ + b43_dma_write(ring, B43_DMA64_TXINDEX, + (u32) (slot * sizeof(struct b43_dmadesc64))); +} + +static void op64_tx_suspend(struct b43_dmaring *ring) +{ + b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL) + | B43_DMA64_TXSUSPEND); +} + +static void op64_tx_resume(struct b43_dmaring *ring) +{ + b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL) + & ~B43_DMA64_TXSUSPEND); +} + +static int op64_get_current_rxslot(struct b43_dmaring *ring) +{ + u32 val; + + val = b43_dma_read(ring, B43_DMA64_RXSTATUS); + val &= B43_DMA64_RXSTATDPTR; + + return (val / sizeof(struct b43_dmadesc64)); +} + +static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot) +{ + b43_dma_write(ring, B43_DMA64_RXINDEX, + (u32) (slot * sizeof(struct b43_dmadesc64))); +} + +static const struct b43_dma_ops dma64_ops = { + .idx2desc = op64_idx2desc, + .fill_descriptor = op64_fill_descriptor, + .poke_tx = op64_poke_tx, + .tx_suspend = op64_tx_suspend, + .tx_resume = op64_tx_resume, + .get_current_rxslot = op64_get_current_rxslot, + .set_current_rxslot = op64_set_current_rxslot, +}; + +static inline int free_slots(struct b43_dmaring *ring) +{ + return (ring->nr_slots - ring->used_slots); +} + +static inline int next_slot(struct b43_dmaring *ring, int slot) +{ + B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1)); + if (slot == ring->nr_slots - 1) + return 0; + return slot + 1; +} + +static inline int prev_slot(struct b43_dmaring *ring, int slot) +{ + B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1)); + if (slot == 0) + return ring->nr_slots - 1; + return slot - 1; +} + +#ifdef CONFIG_B43_DEBUG +static void update_max_used_slots(struct b43_dmaring *ring, + int current_used_slots) +{ + if (current_used_slots <= ring->max_used_slots) + return; + ring->max_used_slots = current_used_slots; + if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) { + b43dbg(ring->dev->wl, + "max_used_slots increased to %d on %s ring %d\n", + ring->max_used_slots, + ring->tx ? "TX" : "RX", ring->index); + } +} +#else +static inline + void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots) +{ +} +#endif /* DEBUG */ + +/* Request a slot for usage. */ +static inline int request_slot(struct b43_dmaring *ring) +{ + int slot; + + B43_WARN_ON(!ring->tx); + B43_WARN_ON(ring->stopped); + B43_WARN_ON(free_slots(ring) == 0); + + slot = next_slot(ring, ring->current_slot); + ring->current_slot = slot; + ring->used_slots++; + + update_max_used_slots(ring, ring->used_slots); + + return slot; +} + +static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx) +{ + static const u16 map64[] = { + B43_MMIO_DMA64_BASE0, + B43_MMIO_DMA64_BASE1, + B43_MMIO_DMA64_BASE2, + B43_MMIO_DMA64_BASE3, + B43_MMIO_DMA64_BASE4, + B43_MMIO_DMA64_BASE5, + }; + static const u16 map32[] = { + B43_MMIO_DMA32_BASE0, + B43_MMIO_DMA32_BASE1, + B43_MMIO_DMA32_BASE2, + B43_MMIO_DMA32_BASE3, + B43_MMIO_DMA32_BASE4, + B43_MMIO_DMA32_BASE5, + }; + + if (type == B43_DMA_64BIT) { + B43_WARN_ON(!(controller_idx >= 0 && + controller_idx < ARRAY_SIZE(map64))); + return map64[controller_idx]; + } + B43_WARN_ON(!(controller_idx >= 0 && + controller_idx < ARRAY_SIZE(map32))); + return map32[controller_idx]; +} + +static inline + dma_addr_t map_descbuffer(struct b43_dmaring *ring, + unsigned char *buf, size_t len, int tx) +{ + dma_addr_t dmaaddr; + + if (tx) { + dmaaddr = ssb_dma_map_single(ring->dev->dev, + buf, len, DMA_TO_DEVICE); + } else { + dmaaddr = ssb_dma_map_single(ring->dev->dev, + buf, len, DMA_FROM_DEVICE); + } + + return dmaaddr; +} + +static inline + void unmap_descbuffer(struct b43_dmaring *ring, + dma_addr_t addr, size_t len, int tx) +{ + if (tx) { + ssb_dma_unmap_single(ring->dev->dev, + addr, len, DMA_TO_DEVICE); + } else { + ssb_dma_unmap_single(ring->dev->dev, + addr, len, DMA_FROM_DEVICE); + } +} + +static inline + void sync_descbuffer_for_cpu(struct b43_dmaring *ring, + dma_addr_t addr, size_t len) +{ + B43_WARN_ON(ring->tx); + ssb_dma_sync_single_for_cpu(ring->dev->dev, + addr, len, DMA_FROM_DEVICE); +} + +static inline + void sync_descbuffer_for_device(struct b43_dmaring *ring, + dma_addr_t addr, size_t len) +{ + B43_WARN_ON(ring->tx); + ssb_dma_sync_single_for_device(ring->dev->dev, + addr, len, DMA_FROM_DEVICE); +} + +static inline + void free_descriptor_buffer(struct b43_dmaring *ring, + struct b43_dmadesc_meta *meta) +{ + if (meta->skb) { + dev_kfree_skb_any(meta->skb); + meta->skb = NULL; + } +} + +static int alloc_ringmemory(struct b43_dmaring *ring) +{ + gfp_t flags = GFP_KERNEL; + + /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K + * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing + * has shown that 4K is sufficient for the latter as long as the buffer + * does not cross an 8K boundary. + * + * For unknown reasons - possibly a hardware error - the BCM4311 rev + * 02, which uses 64-bit DMA, needs the ring buffer in very low memory, + * which accounts for the GFP_DMA flag below. + * + * The flags here must match the flags in free_ringmemory below! + */ + if (ring->type == B43_DMA_64BIT) + flags |= GFP_DMA; + ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev, + B43_DMA_RINGMEMSIZE, + &(ring->dmabase), flags); + if (!ring->descbase) { + b43err(ring->dev->wl, "DMA ringmemory allocation failed\n"); + return -ENOMEM; + } + memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE); + + return 0; +} + +static void free_ringmemory(struct b43_dmaring *ring) +{ + gfp_t flags = GFP_KERNEL; + + if (ring->type == B43_DMA_64BIT) + flags |= GFP_DMA; + + ssb_dma_free_consistent(ring->dev->dev, B43_DMA_RINGMEMSIZE, + ring->descbase, ring->dmabase, flags); +} + +/* Reset the RX DMA channel */ +static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base, + enum b43_dmatype type) +{ + int i; + u32 value; + u16 offset; + + might_sleep(); + + offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL; + b43_write32(dev, mmio_base + offset, 0); + for (i = 0; i < 10; i++) { + offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS : + B43_DMA32_RXSTATUS; + value = b43_read32(dev, mmio_base + offset); + if (type == B43_DMA_64BIT) { + value &= B43_DMA64_RXSTAT; + if (value == B43_DMA64_RXSTAT_DISABLED) { + i = -1; + break; + } + } else { + value &= B43_DMA32_RXSTATE; + if (value == B43_DMA32_RXSTAT_DISABLED) { + i = -1; + break; + } + } + msleep(1); + } + if (i != -1) { + b43err(dev->wl, "DMA RX reset timed out\n"); + return -ENODEV; + } + + return 0; +} + +/* Reset the TX DMA channel */ +static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, + enum b43_dmatype type) +{ + int i; + u32 value; + u16 offset; + + might_sleep(); + + for (i = 0; i < 10; i++) { + offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS : + B43_DMA32_TXSTATUS; + value = b43_read32(dev, mmio_base + offset); + if (type == B43_DMA_64BIT) { + value &= B43_DMA64_TXSTAT; + if (value == B43_DMA64_TXSTAT_DISABLED || + value == B43_DMA64_TXSTAT_IDLEWAIT || + value == B43_DMA64_TXSTAT_STOPPED) + break; + } else { + value &= B43_DMA32_TXSTATE; + if (value == B43_DMA32_TXSTAT_DISABLED || + value == B43_DMA32_TXSTAT_IDLEWAIT || + value == B43_DMA32_TXSTAT_STOPPED) + break; + } + msleep(1); + } + offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL; + b43_write32(dev, mmio_base + offset, 0); + for (i = 0; i < 10; i++) { + offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS : + B43_DMA32_TXSTATUS; + value = b43_read32(dev, mmio_base + offset); + if (type == B43_DMA_64BIT) { + value &= B43_DMA64_TXSTAT; + if (value == B43_DMA64_TXSTAT_DISABLED) { + i = -1; + break; + } + } else { + value &= B43_DMA32_TXSTATE; + if (value == B43_DMA32_TXSTAT_DISABLED) { + i = -1; + break; + } + } + msleep(1); + } + if (i != -1) { + b43err(dev->wl, "DMA TX reset timed out\n"); + return -ENODEV; + } + /* ensure the reset is completed. */ + msleep(1); + + return 0; +} + +/* Check if a DMA mapping address is invalid. */ +static bool b43_dma_mapping_error(struct b43_dmaring *ring, + dma_addr_t addr, + size_t buffersize, bool dma_to_device) +{ + if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr))) + return 1; + + switch (ring->type) { + case B43_DMA_30BIT: + if ((u64)addr + buffersize > (1ULL << 30)) + goto address_error; + break; + case B43_DMA_32BIT: + if ((u64)addr + buffersize > (1ULL << 32)) + goto address_error; + break; + case B43_DMA_64BIT: + /* Currently we can't have addresses beyond + * 64bit in the kernel. */ + break; + } + + /* The address is OK. */ + return 0; + +address_error: + /* We can't support this address. Unmap it again. */ + unmap_descbuffer(ring, addr, buffersize, dma_to_device); + + return 1; +} + +static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb) +{ + unsigned char *f = skb->data + ring->frameoffset; + + return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xFF); +} + +static void b43_poison_rx_buffer(struct b43_dmaring *ring, struct sk_buff *skb) +{ + struct b43_rxhdr_fw4 *rxhdr; + unsigned char *frame; + + /* This poisons the RX buffer to detect DMA failures. */ + + rxhdr = (struct b43_rxhdr_fw4 *)(skb->data); + rxhdr->frame_len = 0; + + B43_WARN_ON(ring->rx_buffersize < ring->frameoffset + sizeof(struct b43_plcp_hdr6) + 2); + frame = skb->data + ring->frameoffset; + memset(frame, 0xFF, sizeof(struct b43_plcp_hdr6) + 2 /* padding */); +} + +static int setup_rx_descbuffer(struct b43_dmaring *ring, + struct b43_dmadesc_generic *desc, + struct b43_dmadesc_meta *meta, gfp_t gfp_flags) +{ + dma_addr_t dmaaddr; + struct sk_buff *skb; + + B43_WARN_ON(ring->tx); + + skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); + if (unlikely(!skb)) + return -ENOMEM; + b43_poison_rx_buffer(ring, skb); + dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0); + if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { + /* ugh. try to realloc in zone_dma */ + gfp_flags |= GFP_DMA; + + dev_kfree_skb_any(skb); + + skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); + if (unlikely(!skb)) + return -ENOMEM; + b43_poison_rx_buffer(ring, skb); + dmaaddr = map_descbuffer(ring, skb->data, + ring->rx_buffersize, 0); + if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { + b43err(ring->dev->wl, "RX DMA buffer allocation failed\n"); + dev_kfree_skb_any(skb); + return -EIO; + } + } + + meta->skb = skb; + meta->dmaaddr = dmaaddr; + ring->ops->fill_descriptor(ring, desc, dmaaddr, + ring->rx_buffersize, 0, 0, 0); + + return 0; +} + +/* Allocate the initial descbuffers. + * This is used for an RX ring only. + */ +static int alloc_initial_descbuffers(struct b43_dmaring *ring) +{ + int i, err = -ENOMEM; + struct b43_dmadesc_generic *desc; + struct b43_dmadesc_meta *meta; + + for (i = 0; i < ring->nr_slots; i++) { + desc = ring->ops->idx2desc(ring, i, &meta); + + err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL); + if (err) { + b43err(ring->dev->wl, + "Failed to allocate initial descbuffers\n"); + goto err_unwind; + } + } + mb(); + ring->used_slots = ring->nr_slots; + err = 0; + out: + return err; + + err_unwind: + for (i--; i >= 0; i--) { + desc = ring->ops->idx2desc(ring, i, &meta); + + unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0); + dev_kfree_skb(meta->skb); + } + goto out; +} + +/* Do initial setup of the DMA controller. + * Reset the controller, write the ring busaddress + * and switch the "enable" bit on. + */ +static int dmacontroller_setup(struct b43_dmaring *ring) +{ + int err = 0; + u32 value; + u32 addrext; + u32 trans = ssb_dma_translation(ring->dev->dev); + + if (ring->tx) { + if (ring->type == B43_DMA_64BIT) { + u64 ringbase = (u64) (ring->dmabase); + + addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) + >> SSB_DMA_TRANSLATION_SHIFT; + value = B43_DMA64_TXENABLE; + value |= (addrext << B43_DMA64_TXADDREXT_SHIFT) + & B43_DMA64_TXADDREXT_MASK; + b43_dma_write(ring, B43_DMA64_TXCTL, value); + b43_dma_write(ring, B43_DMA64_TXRINGLO, + (ringbase & 0xFFFFFFFF)); + b43_dma_write(ring, B43_DMA64_TXRINGHI, + ((ringbase >> 32) & + ~SSB_DMA_TRANSLATION_MASK) + | (trans << 1)); + } else { + u32 ringbase = (u32) (ring->dmabase); + + addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) + >> SSB_DMA_TRANSLATION_SHIFT; + value = B43_DMA32_TXENABLE; + value |= (addrext << B43_DMA32_TXADDREXT_SHIFT) + & B43_DMA32_TXADDREXT_MASK; + b43_dma_write(ring, B43_DMA32_TXCTL, value); + b43_dma_write(ring, B43_DMA32_TXRING, + (ringbase & ~SSB_DMA_TRANSLATION_MASK) + | trans); + } + } else { + err = alloc_initial_descbuffers(ring); + if (err) + goto out; + if (ring->type == B43_DMA_64BIT) { + u64 ringbase = (u64) (ring->dmabase); + + addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) + >> SSB_DMA_TRANSLATION_SHIFT; + value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT); + value |= B43_DMA64_RXENABLE; + value |= (addrext << B43_DMA64_RXADDREXT_SHIFT) + & B43_DMA64_RXADDREXT_MASK; + b43_dma_write(ring, B43_DMA64_RXCTL, value); + b43_dma_write(ring, B43_DMA64_RXRINGLO, + (ringbase & 0xFFFFFFFF)); + b43_dma_write(ring, B43_DMA64_RXRINGHI, + ((ringbase >> 32) & + ~SSB_DMA_TRANSLATION_MASK) + | (trans << 1)); + b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots * + sizeof(struct b43_dmadesc64)); + } else { + u32 ringbase = (u32) (ring->dmabase); + + addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) + >> SSB_DMA_TRANSLATION_SHIFT; + value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT); + value |= B43_DMA32_RXENABLE; + value |= (addrext << B43_DMA32_RXADDREXT_SHIFT) + & B43_DMA32_RXADDREXT_MASK; + b43_dma_write(ring, B43_DMA32_RXCTL, value); + b43_dma_write(ring, B43_DMA32_RXRING, + (ringbase & ~SSB_DMA_TRANSLATION_MASK) + | trans); + b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots * + sizeof(struct b43_dmadesc32)); + } + } + +out: + return err; +} + +/* Shutdown the DMA controller. */ +static void dmacontroller_cleanup(struct b43_dmaring *ring) +{ + if (ring->tx) { + b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base, + ring->type); + if (ring->type == B43_DMA_64BIT) { + b43_dma_write(ring, B43_DMA64_TXRINGLO, 0); + b43_dma_write(ring, B43_DMA64_TXRINGHI, 0); + } else + b43_dma_write(ring, B43_DMA32_TXRING, 0); + } else { + b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base, + ring->type); + if (ring->type == B43_DMA_64BIT) { + b43_dma_write(ring, B43_DMA64_RXRINGLO, 0); + b43_dma_write(ring, B43_DMA64_RXRINGHI, 0); + } else + b43_dma_write(ring, B43_DMA32_RXRING, 0); + } +} + +static void free_all_descbuffers(struct b43_dmaring *ring) +{ + struct b43_dmadesc_generic *desc; + struct b43_dmadesc_meta *meta; + int i; + + if (!ring->used_slots) + return; + for (i = 0; i < ring->nr_slots; i++) { + desc = ring->ops->idx2desc(ring, i, &meta); + + if (!meta->skb || b43_dma_ptr_is_poisoned(meta->skb)) { + B43_WARN_ON(!ring->tx); + continue; + } + if (ring->tx) { + unmap_descbuffer(ring, meta->dmaaddr, + meta->skb->len, 1); + } else { + unmap_descbuffer(ring, meta->dmaaddr, + ring->rx_buffersize, 0); + } + free_descriptor_buffer(ring, meta); + } +} + +static u64 supported_dma_mask(struct b43_wldev *dev) +{ + u32 tmp; + u16 mmio_base; + + tmp = b43_read32(dev, SSB_TMSHIGH); + if (tmp & SSB_TMSHIGH_DMA64) + return DMA_BIT_MASK(64); + mmio_base = b43_dmacontroller_base(0, 0); + b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK); + tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL); + if (tmp & B43_DMA32_TXADDREXT_MASK) + return DMA_BIT_MASK(32); + + return DMA_BIT_MASK(30); +} + +static enum b43_dmatype dma_mask_to_engine_type(u64 dmamask) +{ + if (dmamask == DMA_BIT_MASK(30)) + return B43_DMA_30BIT; + if (dmamask == DMA_BIT_MASK(32)) + return B43_DMA_32BIT; + if (dmamask == DMA_BIT_MASK(64)) + return B43_DMA_64BIT; + B43_WARN_ON(1); + return B43_DMA_30BIT; +} + +/* Main initialization function. */ +static +struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, + int controller_index, + int for_tx, + enum b43_dmatype type) +{ + struct b43_dmaring *ring; + int i, err; + dma_addr_t dma_test; + + ring = kzalloc(sizeof(*ring), GFP_KERNEL); + if (!ring) + goto out; + + ring->nr_slots = B43_RXRING_SLOTS; + if (for_tx) + ring->nr_slots = B43_TXRING_SLOTS; + + ring->meta = kcalloc(ring->nr_slots, sizeof(struct b43_dmadesc_meta), + GFP_KERNEL); + if (!ring->meta) + goto err_kfree_ring; + for (i = 0; i < ring->nr_slots; i++) + ring->meta->skb = B43_DMA_PTR_POISON; + + ring->type = type; + ring->dev = dev; + ring->mmio_base = b43_dmacontroller_base(type, controller_index); + ring->index = controller_index; + if (type == B43_DMA_64BIT) + ring->ops = &dma64_ops; + else + ring->ops = &dma32_ops; + if (for_tx) { + ring->tx = 1; + ring->current_slot = -1; + } else { + if (ring->index == 0) { + ring->rx_buffersize = B43_DMA0_RX_BUFFERSIZE; + ring->frameoffset = B43_DMA0_RX_FRAMEOFFSET; + } else + B43_WARN_ON(1); + } +#ifdef CONFIG_B43_DEBUG + ring->last_injected_overflow = jiffies; +#endif + + if (for_tx) { + /* Assumption: B43_TXRING_SLOTS can be divided by TX_SLOTS_PER_FRAME */ + BUILD_BUG_ON(B43_TXRING_SLOTS % TX_SLOTS_PER_FRAME != 0); + + ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME, + b43_txhdr_size(dev), + GFP_KERNEL); + if (!ring->txhdr_cache) + goto err_kfree_meta; + + /* test for ability to dma to txhdr_cache */ + dma_test = ssb_dma_map_single(dev->dev, + ring->txhdr_cache, + b43_txhdr_size(dev), + DMA_TO_DEVICE); + + if (b43_dma_mapping_error(ring, dma_test, + b43_txhdr_size(dev), 1)) { + /* ugh realloc */ + kfree(ring->txhdr_cache); + ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME, + b43_txhdr_size(dev), + GFP_KERNEL | GFP_DMA); + if (!ring->txhdr_cache) + goto err_kfree_meta; + + dma_test = ssb_dma_map_single(dev->dev, + ring->txhdr_cache, + b43_txhdr_size(dev), + DMA_TO_DEVICE); + + if (b43_dma_mapping_error(ring, dma_test, + b43_txhdr_size(dev), 1)) { + + b43err(dev->wl, + "TXHDR DMA allocation failed\n"); + goto err_kfree_txhdr_cache; + } + } + + ssb_dma_unmap_single(dev->dev, + dma_test, b43_txhdr_size(dev), + DMA_TO_DEVICE); + } + + err = alloc_ringmemory(ring); + if (err) + goto err_kfree_txhdr_cache; + err = dmacontroller_setup(ring); + if (err) + goto err_free_ringmemory; + + out: + return ring; + + err_free_ringmemory: + free_ringmemory(ring); + err_kfree_txhdr_cache: + kfree(ring->txhdr_cache); + err_kfree_meta: + kfree(ring->meta); + err_kfree_ring: + kfree(ring); + ring = NULL; + goto out; +} + +#define divide(a, b) ({ \ + typeof(a) __a = a; \ + do_div(__a, b); \ + __a; \ + }) + +#define modulo(a, b) ({ \ + typeof(a) __a = a; \ + do_div(__a, b); \ + }) + +/* Main cleanup function. */ +static void b43_destroy_dmaring(struct b43_dmaring *ring, + const char *ringname) +{ + if (!ring) + return; + +#ifdef CONFIG_B43_DEBUG + { + /* Print some statistics. */ + u64 failed_packets = ring->nr_failed_tx_packets; + u64 succeed_packets = ring->nr_succeed_tx_packets; + u64 nr_packets = failed_packets + succeed_packets; + u64 permille_failed = 0, average_tries = 0; + + if (nr_packets) + permille_failed = divide(failed_packets * 1000, nr_packets); + if (nr_packets) + average_tries = divide(ring->nr_total_packet_tries * 100, nr_packets); + + b43dbg(ring->dev->wl, "DMA-%u %s: " + "Used slots %d/%d, Failed frames %llu/%llu = %llu.%01llu%%, " + "Average tries %llu.%02llu\n", + (unsigned int)(ring->type), ringname, + ring->max_used_slots, + ring->nr_slots, + (unsigned long long)failed_packets, + (unsigned long long)nr_packets, + (unsigned long long)divide(permille_failed, 10), + (unsigned long long)modulo(permille_failed, 10), + (unsigned long long)divide(average_tries, 100), + (unsigned long long)modulo(average_tries, 100)); + } +#endif /* DEBUG */ + + /* Device IRQs are disabled prior entering this function, + * so no need to take care of concurrency with rx handler stuff. + */ + dmacontroller_cleanup(ring); + free_all_descbuffers(ring); + free_ringmemory(ring); + + kfree(ring->txhdr_cache); + kfree(ring->meta); + kfree(ring); +} + +#define destroy_ring(dma, ring) do { \ + b43_destroy_dmaring((dma)->ring, __stringify(ring)); \ + (dma)->ring = NULL; \ + } while (0) + +void b43_dma_free(struct b43_wldev *dev) +{ + struct b43_dma *dma; + + if (b43_using_pio_transfers(dev)) + return; + dma = &dev->dma; + + destroy_ring(dma, rx_ring); + destroy_ring(dma, tx_ring_AC_BK); + destroy_ring(dma, tx_ring_AC_BE); + destroy_ring(dma, tx_ring_AC_VI); + destroy_ring(dma, tx_ring_AC_VO); + destroy_ring(dma, tx_ring_mcast); +} + +static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask) +{ + u64 orig_mask = mask; + bool fallback = 0; + int err; + + /* Try to set the DMA mask. If it fails, try falling back to a + * lower mask, as we can always also support a lower one. */ + while (1) { + err = ssb_dma_set_mask(dev->dev, mask); + if (!err) + break; + if (mask == DMA_BIT_MASK(64)) { + mask = DMA_BIT_MASK(32); + fallback = 1; + continue; + } + if (mask == DMA_BIT_MASK(32)) { + mask = DMA_BIT_MASK(30); + fallback = 1; + continue; + } + b43err(dev->wl, "The machine/kernel does not support " + "the required %u-bit DMA mask\n", + (unsigned int)dma_mask_to_engine_type(orig_mask)); + return -EOPNOTSUPP; + } + if (fallback) { + b43info(dev->wl, "DMA mask fallback from %u-bit to %u-bit\n", + (unsigned int)dma_mask_to_engine_type(orig_mask), + (unsigned int)dma_mask_to_engine_type(mask)); + } + + return 0; +} + +int b43_dma_init(struct b43_wldev *dev) +{ + struct b43_dma *dma = &dev->dma; + int err; + u64 dmamask; + enum b43_dmatype type; + + dmamask = supported_dma_mask(dev); + type = dma_mask_to_engine_type(dmamask); + err = b43_dma_set_mask(dev, dmamask); + if (err) + return err; + + err = -ENOMEM; + /* setup TX DMA channels. */ + dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type); + if (!dma->tx_ring_AC_BK) + goto out; + + dma->tx_ring_AC_BE = b43_setup_dmaring(dev, 1, 1, type); + if (!dma->tx_ring_AC_BE) + goto err_destroy_bk; + + dma->tx_ring_AC_VI = b43_setup_dmaring(dev, 2, 1, type); + if (!dma->tx_ring_AC_VI) + goto err_destroy_be; + + dma->tx_ring_AC_VO = b43_setup_dmaring(dev, 3, 1, type); + if (!dma->tx_ring_AC_VO) + goto err_destroy_vi; + + dma->tx_ring_mcast = b43_setup_dmaring(dev, 4, 1, type); + if (!dma->tx_ring_mcast) + goto err_destroy_vo; + + /* setup RX DMA channel. */ + dma->rx_ring = b43_setup_dmaring(dev, 0, 0, type); + if (!dma->rx_ring) + goto err_destroy_mcast; + + /* No support for the TX status DMA ring. */ + B43_WARN_ON(dev->dev->id.revision < 5); + + b43dbg(dev->wl, "%u-bit DMA initialized\n", + (unsigned int)type); + err = 0; +out: + return err; + +err_destroy_mcast: + destroy_ring(dma, tx_ring_mcast); +err_destroy_vo: + destroy_ring(dma, tx_ring_AC_VO); +err_destroy_vi: + destroy_ring(dma, tx_ring_AC_VI); +err_destroy_be: + destroy_ring(dma, tx_ring_AC_BE); +err_destroy_bk: + destroy_ring(dma, tx_ring_AC_BK); + return err; +} + +/* Generate a cookie for the TX header. */ +static u16 generate_cookie(struct b43_dmaring *ring, int slot) +{ + u16 cookie; + + /* Use the upper 4 bits of the cookie as + * DMA controller ID and store the slot number + * in the lower 12 bits. + * Note that the cookie must never be 0, as this + * is a special value used in RX path. + * It can also not be 0xFFFF because that is special + * for multicast frames. + */ + cookie = (((u16)ring->index + 1) << 12); + B43_WARN_ON(slot & ~0x0FFF); + cookie |= (u16)slot; + + return cookie; +} + +/* Inspect a cookie and find out to which controller/slot it belongs. */ +static +struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot) +{ + struct b43_dma *dma = &dev->dma; + struct b43_dmaring *ring = NULL; + + switch (cookie & 0xF000) { + case 0x1000: + ring = dma->tx_ring_AC_BK; + break; + case 0x2000: + ring = dma->tx_ring_AC_BE; + break; + case 0x3000: + ring = dma->tx_ring_AC_VI; + break; + case 0x4000: + ring = dma->tx_ring_AC_VO; + break; + case 0x5000: + ring = dma->tx_ring_mcast; + break; + } + *slot = (cookie & 0x0FFF); + if (unlikely(!ring || *slot < 0 || *slot >= ring->nr_slots)) { + b43dbg(dev->wl, "TX-status contains " + "invalid cookie: 0x%04X\n", cookie); + return NULL; + } + + return ring; +} + +static int dma_tx_fragment(struct b43_dmaring *ring, + struct sk_buff *skb) +{ + const struct b43_dma_ops *ops = ring->ops; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(info); + u8 *header; + int slot, old_top_slot, old_used_slots; + int err; + struct b43_dmadesc_generic *desc; + struct b43_dmadesc_meta *meta; + struct b43_dmadesc_meta *meta_hdr; + u16 cookie; + size_t hdrsize = b43_txhdr_size(ring->dev); + + /* Important note: If the number of used DMA slots per TX frame + * is changed here, the TX_SLOTS_PER_FRAME definition at the top of + * the file has to be updated, too! + */ + + old_top_slot = ring->current_slot; + old_used_slots = ring->used_slots; + + /* Get a slot for the header. */ + slot = request_slot(ring); + desc = ops->idx2desc(ring, slot, &meta_hdr); + memset(meta_hdr, 0, sizeof(*meta_hdr)); + + header = &(ring->txhdr_cache[(slot / TX_SLOTS_PER_FRAME) * hdrsize]); + cookie = generate_cookie(ring, slot); + err = b43_generate_txhdr(ring->dev, header, + skb, info, cookie); + if (unlikely(err)) { + ring->current_slot = old_top_slot; + ring->used_slots = old_used_slots; + return err; + } + + meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, + hdrsize, 1); + if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) { + ring->current_slot = old_top_slot; + ring->used_slots = old_used_slots; + return -EIO; + } + ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr, + hdrsize, 1, 0, 0); + + /* Get a slot for the payload. */ + slot = request_slot(ring); + desc = ops->idx2desc(ring, slot, &meta); + memset(meta, 0, sizeof(*meta)); + + meta->skb = skb; + meta->is_last_fragment = 1; + priv_info->bouncebuffer = NULL; + + meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); + /* create a bounce buffer in zone_dma on mapping failure. */ + if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { + priv_info->bouncebuffer = kmalloc(skb->len, GFP_ATOMIC | GFP_DMA); + if (!priv_info->bouncebuffer) { + ring->current_slot = old_top_slot; + ring->used_slots = old_used_slots; + err = -ENOMEM; + goto out_unmap_hdr; + } + memcpy(priv_info->bouncebuffer, skb->data, skb->len); + + meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1); + if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { + kfree(priv_info->bouncebuffer); + priv_info->bouncebuffer = NULL; + ring->current_slot = old_top_slot; + ring->used_slots = old_used_slots; + err = -EIO; + goto out_unmap_hdr; + } + } + + ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1); + + if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { + /* Tell the firmware about the cookie of the last + * mcast frame, so it can clear the more-data bit in it. */ + b43_shm_write16(ring->dev, B43_SHM_SHARED, + B43_SHM_SH_MCASTCOOKIE, cookie); + } + /* Now transfer the whole frame. */ + wmb(); + ops->poke_tx(ring, next_slot(ring, slot)); + return 0; + +out_unmap_hdr: + unmap_descbuffer(ring, meta_hdr->dmaaddr, + hdrsize, 1); + return err; +} + +static inline int should_inject_overflow(struct b43_dmaring *ring) +{ +#ifdef CONFIG_B43_DEBUG + if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) { + /* Check if we should inject another ringbuffer overflow + * to test handling of this situation in the stack. */ + unsigned long next_overflow; + + next_overflow = ring->last_injected_overflow + HZ; + if (time_after(jiffies, next_overflow)) { + ring->last_injected_overflow = jiffies; + b43dbg(ring->dev->wl, + "Injecting TX ring overflow on " + "DMA controller %d\n", ring->index); + return 1; + } + } +#endif /* CONFIG_B43_DEBUG */ + return 0; +} + +/* Static mapping of mac80211's queues (priorities) to b43 DMA rings. */ +static struct b43_dmaring *select_ring_by_priority(struct b43_wldev *dev, + u8 queue_prio) +{ + struct b43_dmaring *ring; + + if (dev->qos_enabled) { + /* 0 = highest priority */ + switch (queue_prio) { + default: + B43_WARN_ON(1); + /* fallthrough */ + case 0: + ring = dev->dma.tx_ring_AC_VO; + break; + case 1: + ring = dev->dma.tx_ring_AC_VI; + break; + case 2: + ring = dev->dma.tx_ring_AC_BE; + break; + case 3: + ring = dev->dma.tx_ring_AC_BK; + break; + } + } else + ring = dev->dma.tx_ring_AC_BE; + + return ring; +} + +int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) +{ + struct b43_dmaring *ring; + struct ieee80211_hdr *hdr; + int err = 0; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + hdr = (struct ieee80211_hdr *)skb->data; + if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { + /* The multicast ring will be sent after the DTIM */ + ring = dev->dma.tx_ring_mcast; + /* Set the more-data bit. Ucode will clear it on + * the last frame for us. */ + hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); + } else { + /* Decide by priority where to put this frame. */ + ring = select_ring_by_priority( + dev, skb_get_queue_mapping(skb)); + } + + B43_WARN_ON(!ring->tx); + + if (unlikely(ring->stopped)) { + /* We get here only because of a bug in mac80211. + * Because of a race, one packet may be queued after + * the queue is stopped, thus we got called when we shouldn't. + * For now, just refuse the transmit. */ + if (b43_debug(dev, B43_DBG_DMAVERBOSE)) + b43err(dev->wl, "Packet after queue stopped\n"); + err = -ENOSPC; + goto out; + } + + if (unlikely(WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME))) { + /* If we get here, we have a real error with the queue + * full, but queues not stopped. */ + b43err(dev->wl, "DMA queue overflow\n"); + err = -ENOSPC; + goto out; + } + + /* Assign the queue number to the ring (if not already done before) + * so TX status handling can use it. The queue to ring mapping is + * static, so we don't need to store it per frame. */ + ring->queue_prio = skb_get_queue_mapping(skb); + + err = dma_tx_fragment(ring, skb); + if (unlikely(err == -ENOKEY)) { + /* Drop this packet, as we don't have the encryption key + * anymore and must not transmit it unencrypted. */ + dev_kfree_skb_any(skb); + err = 0; + goto out; + } + if (unlikely(err)) { + b43err(dev->wl, "DMA tx mapping failure\n"); + goto out; + } + if ((free_slots(ring) < TX_SLOTS_PER_FRAME) || + should_inject_overflow(ring)) { + /* This TX ring is full. */ + ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb)); + ring->stopped = 1; + if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { + b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index); + } + } +out: + + return err; +} + +void b43_dma_handle_txstatus(struct b43_wldev *dev, + const struct b43_txstatus *status) +{ + const struct b43_dma_ops *ops; + struct b43_dmaring *ring; + struct b43_dmadesc_generic *desc; + struct b43_dmadesc_meta *meta; + int slot, firstused; + bool frame_succeed; + + ring = parse_cookie(dev, status->cookie, &slot); + if (unlikely(!ring)) + return; + B43_WARN_ON(!ring->tx); + + /* Sanity check: TX packets are processed in-order on one ring. + * Check if the slot deduced from the cookie really is the first + * used slot. */ + firstused = ring->current_slot - ring->used_slots + 1; + if (firstused < 0) + firstused = ring->nr_slots + firstused; + if (unlikely(slot != firstused)) { + /* This possibly is a firmware bug and will result in + * malfunction, memory leaks and/or stall of DMA functionality. */ + b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. " + "Expected %d, but got %d\n", + ring->index, firstused, slot); + return; + } + + ops = ring->ops; + while (1) { + B43_WARN_ON(slot < 0 || slot >= ring->nr_slots); + desc = ops->idx2desc(ring, slot, &meta); + + if (b43_dma_ptr_is_poisoned(meta->skb)) { + b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) " + "on ring %d\n", + slot, firstused, ring->index); + break; + } + if (meta->skb) { + struct b43_private_tx_info *priv_info = + b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb)); + + unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1); + kfree(priv_info->bouncebuffer); + priv_info->bouncebuffer = NULL; + } else { + unmap_descbuffer(ring, meta->dmaaddr, + b43_txhdr_size(dev), 1); + } + + if (meta->is_last_fragment) { + struct ieee80211_tx_info *info; + + if (unlikely(!meta->skb)) { + /* This is a scatter-gather fragment of a frame, so + * the skb pointer must not be NULL. */ + b43dbg(dev->wl, "TX status unexpected NULL skb " + "at slot %d (first=%d) on ring %d\n", + slot, firstused, ring->index); + break; + } + + info = IEEE80211_SKB_CB(meta->skb); + + /* + * Call back to inform the ieee80211 subsystem about + * the status of the transmission. + */ + frame_succeed = b43_fill_txstatus_report(dev, info, status); +#ifdef CONFIG_B43_DEBUG + if (frame_succeed) + ring->nr_succeed_tx_packets++; + else + ring->nr_failed_tx_packets++; + ring->nr_total_packet_tries += status->frame_count; +#endif /* DEBUG */ + ieee80211_tx_status(dev->wl->hw, meta->skb); + + /* skb will be freed by ieee80211_tx_status(). + * Poison our pointer. */ + meta->skb = B43_DMA_PTR_POISON; + } else { + /* No need to call free_descriptor_buffer here, as + * this is only the txhdr, which is not allocated. + */ + if (unlikely(meta->skb)) { + b43dbg(dev->wl, "TX status unexpected non-NULL skb " + "at slot %d (first=%d) on ring %d\n", + slot, firstused, ring->index); + break; + } + } + + /* Everything unmapped and free'd. So it's not used anymore. */ + ring->used_slots--; + + if (meta->is_last_fragment) { + /* This is the last scatter-gather + * fragment of the frame. We are done. */ + break; + } + slot = next_slot(ring, slot); + } + if (ring->stopped) { + B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME); + ieee80211_wake_queue(dev->wl->hw, ring->queue_prio); + ring->stopped = 0; + if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { + b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index); + } + } +} + +static void dma_rx(struct b43_dmaring *ring, int *slot) +{ + const struct b43_dma_ops *ops = ring->ops; + struct b43_dmadesc_generic *desc; + struct b43_dmadesc_meta *meta; + struct b43_rxhdr_fw4 *rxhdr; + struct sk_buff *skb; + u16 len; + int err; + dma_addr_t dmaaddr; + + desc = ops->idx2desc(ring, *slot, &meta); + + sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); + skb = meta->skb; + + rxhdr = (struct b43_rxhdr_fw4 *)skb->data; + len = le16_to_cpu(rxhdr->frame_len); + if (len == 0) { + int i = 0; + + do { + udelay(2); + barrier(); + len = le16_to_cpu(rxhdr->frame_len); + } while (len == 0 && i++ < 5); + if (unlikely(len == 0)) { + dmaaddr = meta->dmaaddr; + goto drop_recycle_buffer; + } + } + if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) { + /* Something went wrong with the DMA. + * The device did not touch the buffer and did not overwrite the poison. */ + b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n"); + dmaaddr = meta->dmaaddr; + goto drop_recycle_buffer; + } + if (unlikely(len > ring->rx_buffersize)) { + /* The data did not fit into one descriptor buffer + * and is split over multiple buffers. + * This should never happen, as we try to allocate buffers + * big enough. So simply ignore this packet. + */ + int cnt = 0; + s32 tmp = len; + + while (1) { + desc = ops->idx2desc(ring, *slot, &meta); + /* recycle the descriptor buffer. */ + b43_poison_rx_buffer(ring, meta->skb); + sync_descbuffer_for_device(ring, meta->dmaaddr, + ring->rx_buffersize); + *slot = next_slot(ring, *slot); + cnt++; + tmp -= ring->rx_buffersize; + if (tmp <= 0) + break; + } + b43err(ring->dev->wl, "DMA RX buffer too small " + "(len: %u, buffer: %u, nr-dropped: %d)\n", + len, ring->rx_buffersize, cnt); + goto drop; + } + + dmaaddr = meta->dmaaddr; + err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC); + if (unlikely(err)) { + b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n"); + goto drop_recycle_buffer; + } + + unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0); + skb_put(skb, len + ring->frameoffset); + skb_pull(skb, ring->frameoffset); + + b43_rx(ring->dev, skb, rxhdr); +drop: + return; + +drop_recycle_buffer: + /* Poison and recycle the RX buffer. */ + b43_poison_rx_buffer(ring, skb); + sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize); +} + +void b43_dma_rx(struct b43_dmaring *ring) +{ + const struct b43_dma_ops *ops = ring->ops; + int slot, current_slot; + int used_slots = 0; + + B43_WARN_ON(ring->tx); + current_slot = ops->get_current_rxslot(ring); + B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots)); + + slot = ring->current_slot; + for (; slot != current_slot; slot = next_slot(ring, slot)) { + dma_rx(ring, &slot); + update_max_used_slots(ring, ++used_slots); + } + ops->set_current_rxslot(ring, slot); + ring->current_slot = slot; +} + +static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring) +{ + B43_WARN_ON(!ring->tx); + ring->ops->tx_suspend(ring); +} + +static void b43_dma_tx_resume_ring(struct b43_dmaring *ring) +{ + B43_WARN_ON(!ring->tx); + ring->ops->tx_resume(ring); +} + +void b43_dma_tx_suspend(struct b43_wldev *dev) +{ + b43_power_saving_ctl_bits(dev, B43_PS_AWAKE); + b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BK); + b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BE); + b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VI); + b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VO); + b43_dma_tx_suspend_ring(dev->dma.tx_ring_mcast); +} + +void b43_dma_tx_resume(struct b43_wldev *dev) +{ + b43_dma_tx_resume_ring(dev->dma.tx_ring_mcast); + b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VO); + b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VI); + b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BE); + b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BK); + b43_power_saving_ctl_bits(dev, 0); +} + +static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type, + u16 mmio_base, bool enable) +{ + u32 ctl; + + if (type == B43_DMA_64BIT) { + ctl = b43_read32(dev, mmio_base + B43_DMA64_RXCTL); + ctl &= ~B43_DMA64_RXDIRECTFIFO; + if (enable) + ctl |= B43_DMA64_RXDIRECTFIFO; + b43_write32(dev, mmio_base + B43_DMA64_RXCTL, ctl); + } else { + ctl = b43_read32(dev, mmio_base + B43_DMA32_RXCTL); + ctl &= ~B43_DMA32_RXDIRECTFIFO; + if (enable) + ctl |= B43_DMA32_RXDIRECTFIFO; + b43_write32(dev, mmio_base + B43_DMA32_RXCTL, ctl); + } +} + +/* Enable/Disable Direct FIFO Receive Mode (PIO) on a RX engine. + * This is called from PIO code, so DMA structures are not available. */ +void b43_dma_direct_fifo_rx(struct b43_wldev *dev, + unsigned int engine_index, bool enable) +{ + enum b43_dmatype type; + u16 mmio_base; + + type = dma_mask_to_engine_type(supported_dma_mask(dev)); + + mmio_base = b43_dmacontroller_base(type, engine_index); + direct_fifo_rx(dev, type, mmio_base, enable); +} diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h new file mode 100644 index 0000000..dc91944 --- /dev/null +++ b/drivers/net/wireless/b43/dma.h @@ -0,0 +1,289 @@ +#ifndef B43_DMA_H_ +#define B43_DMA_H_ + +#include + +#include "b43.h" + + +/* DMA-Interrupt reasons. */ +#define B43_DMAIRQ_FATALMASK ((1 << 10) | (1 << 11) | (1 << 12) \ + | (1 << 14) | (1 << 15)) +#define B43_DMAIRQ_NONFATALMASK (1 << 13) +#define B43_DMAIRQ_RX_DONE (1 << 16) + +/*** 32-bit DMA Engine. ***/ + +/* 32-bit DMA controller registers. */ +#define B43_DMA32_TXCTL 0x00 +#define B43_DMA32_TXENABLE 0x00000001 +#define B43_DMA32_TXSUSPEND 0x00000002 +#define B43_DMA32_TXLOOPBACK 0x00000004 +#define B43_DMA32_TXFLUSH 0x00000010 +#define B43_DMA32_TXADDREXT_MASK 0x00030000 +#define B43_DMA32_TXADDREXT_SHIFT 16 +#define B43_DMA32_TXRING 0x04 +#define B43_DMA32_TXINDEX 0x08 +#define B43_DMA32_TXSTATUS 0x0C +#define B43_DMA32_TXDPTR 0x00000FFF +#define B43_DMA32_TXSTATE 0x0000F000 +#define B43_DMA32_TXSTAT_DISABLED 0x00000000 +#define B43_DMA32_TXSTAT_ACTIVE 0x00001000 +#define B43_DMA32_TXSTAT_IDLEWAIT 0x00002000 +#define B43_DMA32_TXSTAT_STOPPED 0x00003000 +#define B43_DMA32_TXSTAT_SUSP 0x00004000 +#define B43_DMA32_TXERROR 0x000F0000 +#define B43_DMA32_TXERR_NOERR 0x00000000 +#define B43_DMA32_TXERR_PROT 0x00010000 +#define B43_DMA32_TXERR_UNDERRUN 0x00020000 +#define B43_DMA32_TXERR_BUFREAD 0x00030000 +#define B43_DMA32_TXERR_DESCREAD 0x00040000 +#define B43_DMA32_TXACTIVE 0xFFF00000 +#define B43_DMA32_RXCTL 0x10 +#define B43_DMA32_RXENABLE 0x00000001 +#define B43_DMA32_RXFROFF_MASK 0x000000FE +#define B43_DMA32_RXFROFF_SHIFT 1 +#define B43_DMA32_RXDIRECTFIFO 0x00000100 +#define B43_DMA32_RXADDREXT_MASK 0x00030000 +#define B43_DMA32_RXADDREXT_SHIFT 16 +#define B43_DMA32_RXRING 0x14 +#define B43_DMA32_RXINDEX 0x18 +#define B43_DMA32_RXSTATUS 0x1C +#define B43_DMA32_RXDPTR 0x00000FFF +#define B43_DMA32_RXSTATE 0x0000F000 +#define B43_DMA32_RXSTAT_DISABLED 0x00000000 +#define B43_DMA32_RXSTAT_ACTIVE 0x00001000 +#define B43_DMA32_RXSTAT_IDLEWAIT 0x00002000 +#define B43_DMA32_RXSTAT_STOPPED 0x00003000 +#define B43_DMA32_RXERROR 0x000F0000 +#define B43_DMA32_RXERR_NOERR 0x00000000 +#define B43_DMA32_RXERR_PROT 0x00010000 +#define B43_DMA32_RXERR_OVERFLOW 0x00020000 +#define B43_DMA32_RXERR_BUFWRITE 0x00030000 +#define B43_DMA32_RXERR_DESCREAD 0x00040000 +#define B43_DMA32_RXACTIVE 0xFFF00000 + +/* 32-bit DMA descriptor. */ +struct b43_dmadesc32 { + __le32 control; + __le32 address; +} __attribute__ ((__packed__)); +#define B43_DMA32_DCTL_BYTECNT 0x00001FFF +#define B43_DMA32_DCTL_ADDREXT_MASK 0x00030000 +#define B43_DMA32_DCTL_ADDREXT_SHIFT 16 +#define B43_DMA32_DCTL_DTABLEEND 0x10000000 +#define B43_DMA32_DCTL_IRQ 0x20000000 +#define B43_DMA32_DCTL_FRAMEEND 0x40000000 +#define B43_DMA32_DCTL_FRAMESTART 0x80000000 + +/*** 64-bit DMA Engine. ***/ + +/* 64-bit DMA controller registers. */ +#define B43_DMA64_TXCTL 0x00 +#define B43_DMA64_TXENABLE 0x00000001 +#define B43_DMA64_TXSUSPEND 0x00000002 +#define B43_DMA64_TXLOOPBACK 0x00000004 +#define B43_DMA64_TXFLUSH 0x00000010 +#define B43_DMA64_TXADDREXT_MASK 0x00030000 +#define B43_DMA64_TXADDREXT_SHIFT 16 +#define B43_DMA64_TXINDEX 0x04 +#define B43_DMA64_TXRINGLO 0x08 +#define B43_DMA64_TXRINGHI 0x0C +#define B43_DMA64_TXSTATUS 0x10 +#define B43_DMA64_TXSTATDPTR 0x00001FFF +#define B43_DMA64_TXSTAT 0xF0000000 +#define B43_DMA64_TXSTAT_DISABLED 0x00000000 +#define B43_DMA64_TXSTAT_ACTIVE 0x10000000 +#define B43_DMA64_TXSTAT_IDLEWAIT 0x20000000 +#define B43_DMA64_TXSTAT_STOPPED 0x30000000 +#define B43_DMA64_TXSTAT_SUSP 0x40000000 +#define B43_DMA64_TXERROR 0x14 +#define B43_DMA64_TXERRDPTR 0x0001FFFF +#define B43_DMA64_TXERR 0xF0000000 +#define B43_DMA64_TXERR_NOERR 0x00000000 +#define B43_DMA64_TXERR_PROT 0x10000000 +#define B43_DMA64_TXERR_UNDERRUN 0x20000000 +#define B43_DMA64_TXERR_TRANSFER 0x30000000 +#define B43_DMA64_TXERR_DESCREAD 0x40000000 +#define B43_DMA64_TXERR_CORE 0x50000000 +#define B43_DMA64_RXCTL 0x20 +#define B43_DMA64_RXENABLE 0x00000001 +#define B43_DMA64_RXFROFF_MASK 0x000000FE +#define B43_DMA64_RXFROFF_SHIFT 1 +#define B43_DMA64_RXDIRECTFIFO 0x00000100 +#define B43_DMA64_RXADDREXT_MASK 0x00030000 +#define B43_DMA64_RXADDREXT_SHIFT 16 +#define B43_DMA64_RXINDEX 0x24 +#define B43_DMA64_RXRINGLO 0x28 +#define B43_DMA64_RXRINGHI 0x2C +#define B43_DMA64_RXSTATUS 0x30 +#define B43_DMA64_RXSTATDPTR 0x00001FFF +#define B43_DMA64_RXSTAT 0xF0000000 +#define B43_DMA64_RXSTAT_DISABLED 0x00000000 +#define B43_DMA64_RXSTAT_ACTIVE 0x10000000 +#define B43_DMA64_RXSTAT_IDLEWAIT 0x20000000 +#define B43_DMA64_RXSTAT_STOPPED 0x30000000 +#define B43_DMA64_RXSTAT_SUSP 0x40000000 +#define B43_DMA64_RXERROR 0x34 +#define B43_DMA64_RXERRDPTR 0x0001FFFF +#define B43_DMA64_RXERR 0xF0000000 +#define B43_DMA64_RXERR_NOERR 0x00000000 +#define B43_DMA64_RXERR_PROT 0x10000000 +#define B43_DMA64_RXERR_UNDERRUN 0x20000000 +#define B43_DMA64_RXERR_TRANSFER 0x30000000 +#define B43_DMA64_RXERR_DESCREAD 0x40000000 +#define B43_DMA64_RXERR_CORE 0x50000000 + +/* 64-bit DMA descriptor. */ +struct b43_dmadesc64 { + __le32 control0; + __le32 control1; + __le32 address_low; + __le32 address_high; +} __attribute__ ((__packed__)); +#define B43_DMA64_DCTL0_DTABLEEND 0x10000000 +#define B43_DMA64_DCTL0_IRQ 0x20000000 +#define B43_DMA64_DCTL0_FRAMEEND 0x40000000 +#define B43_DMA64_DCTL0_FRAMESTART 0x80000000 +#define B43_DMA64_DCTL1_BYTECNT 0x00001FFF +#define B43_DMA64_DCTL1_ADDREXT_MASK 0x00030000 +#define B43_DMA64_DCTL1_ADDREXT_SHIFT 16 + +struct b43_dmadesc_generic { + union { + struct b43_dmadesc32 dma32; + struct b43_dmadesc64 dma64; + } __attribute__ ((__packed__)); +} __attribute__ ((__packed__)); + +/* Misc DMA constants */ +#define B43_DMA_RINGMEMSIZE PAGE_SIZE +#define B43_DMA0_RX_FRAMEOFFSET 30 + +/* DMA engine tuning knobs */ +#define B43_TXRING_SLOTS 256 +#define B43_RXRING_SLOTS 64 +#define B43_DMA0_RX_BUFFERSIZE IEEE80211_MAX_FRAME_LEN + +/* Pointer poison */ +#define B43_DMA_PTR_POISON ((void *)ERR_PTR(-ENOMEM)) +#define b43_dma_ptr_is_poisoned(ptr) (unlikely((ptr) == B43_DMA_PTR_POISON)) + + +struct sk_buff; +struct b43_private; +struct b43_txstatus; + +struct b43_dmadesc_meta { + /* The kernel DMA-able buffer. */ + struct sk_buff *skb; + /* DMA base bus-address of the descriptor buffer. */ + dma_addr_t dmaaddr; + /* ieee80211 TX status. Only used once per 802.11 frag. */ + bool is_last_fragment; +}; + +struct b43_dmaring; + +/* Lowlevel DMA operations that differ between 32bit and 64bit DMA. */ +struct b43_dma_ops { + struct b43_dmadesc_generic *(*idx2desc) (struct b43_dmaring * ring, + int slot, + struct b43_dmadesc_meta ** + meta); + void (*fill_descriptor) (struct b43_dmaring * ring, + struct b43_dmadesc_generic * desc, + dma_addr_t dmaaddr, u16 bufsize, int start, + int end, int irq); + void (*poke_tx) (struct b43_dmaring * ring, int slot); + void (*tx_suspend) (struct b43_dmaring * ring); + void (*tx_resume) (struct b43_dmaring * ring); + int (*get_current_rxslot) (struct b43_dmaring * ring); + void (*set_current_rxslot) (struct b43_dmaring * ring, int slot); +}; + +enum b43_dmatype { + B43_DMA_30BIT = 30, + B43_DMA_32BIT = 32, + B43_DMA_64BIT = 64, +}; + +struct b43_dmaring { + /* Lowlevel DMA ops. */ + const struct b43_dma_ops *ops; + /* Kernel virtual base address of the ring memory. */ + void *descbase; + /* Meta data about all descriptors. */ + struct b43_dmadesc_meta *meta; + /* Cache of TX headers for each TX frame. + * This is to avoid an allocation on each TX. + * This is NULL for an RX ring. + */ + u8 *txhdr_cache; + /* (Unadjusted) DMA base bus-address of the ring memory. */ + dma_addr_t dmabase; + /* Number of descriptor slots in the ring. */ + int nr_slots; + /* Number of used descriptor slots. */ + int used_slots; + /* Currently used slot in the ring. */ + int current_slot; + /* Frameoffset in octets. */ + u32 frameoffset; + /* Descriptor buffer size. */ + u16 rx_buffersize; + /* The MMIO base register of the DMA controller. */ + u16 mmio_base; + /* DMA controller index number (0-5). */ + int index; + /* Boolean. Is this a TX ring? */ + bool tx; + /* The type of DMA engine used. */ + enum b43_dmatype type; + /* Boolean. Is this ring stopped at ieee80211 level? */ + bool stopped; + /* The QOS priority assigned to this ring. Only used for TX rings. + * This is the mac80211 "queue" value. */ + u8 queue_prio; + struct b43_wldev *dev; +#ifdef CONFIG_B43_DEBUG + /* Maximum number of used slots. */ + int max_used_slots; + /* Last time we injected a ring overflow. */ + unsigned long last_injected_overflow; + /* Statistics: Number of successfully transmitted packets */ + u64 nr_succeed_tx_packets; + /* Statistics: Number of failed TX packets */ + u64 nr_failed_tx_packets; + /* Statistics: Total number of TX plus all retries. */ + u64 nr_total_packet_tries; +#endif /* CONFIG_B43_DEBUG */ +}; + +static inline u32 b43_dma_read(struct b43_dmaring *ring, u16 offset) +{ + return b43_read32(ring->dev, ring->mmio_base + offset); +} + +static inline void b43_dma_write(struct b43_dmaring *ring, u16 offset, u32 value) +{ + b43_write32(ring->dev, ring->mmio_base + offset, value); +} + +int b43_dma_init(struct b43_wldev *dev); +void b43_dma_free(struct b43_wldev *dev); + +void b43_dma_tx_suspend(struct b43_wldev *dev); +void b43_dma_tx_resume(struct b43_wldev *dev); + +int b43_dma_tx(struct b43_wldev *dev, + struct sk_buff *skb); +void b43_dma_handle_txstatus(struct b43_wldev *dev, + const struct b43_txstatus *status); + +void b43_dma_rx(struct b43_dmaring *ring); + +void b43_dma_direct_fifo_rx(struct b43_wldev *dev, + unsigned int engine_index, bool enable); + +#endif /* B43_DMA_H_ */ diff --git a/drivers/net/wireless/b43/leds.c b/drivers/net/wireless/b43/leds.c new file mode 100644 index 0000000..c587115 --- /dev/null +++ b/drivers/net/wireless/b43/leds.c @@ -0,0 +1,360 @@ +/* + + Broadcom B43 wireless driver + LED control + + Copyright (c) 2005 Martin Langer , + Copyright (c) 2005 Stefano Brivio + Copyright (c) 2005-2007 Michael Buesch + Copyright (c) 2005 Danny van Dyk + Copyright (c) 2005 Andreas Jaggi + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "b43.h" +#include "leds.h" +#include "rfkill.h" + + +static void b43_led_turn_on(struct b43_wldev *dev, u8 led_index, + bool activelow) +{ + u16 ctl; + + ctl = b43_read16(dev, B43_MMIO_GPIO_CONTROL); + if (activelow) + ctl &= ~(1 << led_index); + else + ctl |= (1 << led_index); + b43_write16(dev, B43_MMIO_GPIO_CONTROL, ctl); +} + +static void b43_led_turn_off(struct b43_wldev *dev, u8 led_index, + bool activelow) +{ + u16 ctl; + + ctl = b43_read16(dev, B43_MMIO_GPIO_CONTROL); + if (activelow) + ctl |= (1 << led_index); + else + ctl &= ~(1 << led_index); + b43_write16(dev, B43_MMIO_GPIO_CONTROL, ctl); +} + +static void b43_led_update(struct b43_wldev *dev, + struct b43_led *led) +{ + bool radio_enabled; + bool turn_on; + + if (!led->wl) + return; + + radio_enabled = (dev->phy.radio_on && dev->radio_hw_enable); + + /* The led->state read is racy, but we don't care. In case we raced + * with the brightness_set handler, we will be called again soon + * to fixup our state. */ + if (radio_enabled) + turn_on = atomic_read(&led->state) != LED_OFF; + else + turn_on = 0; + if (turn_on == led->hw_state) + return; + led->hw_state = turn_on; + + if (turn_on) + b43_led_turn_on(dev, led->index, led->activelow); + else + b43_led_turn_off(dev, led->index, led->activelow); +} + +static void b43_leds_work(struct work_struct *work) +{ + struct b43_leds *leds = container_of(work, struct b43_leds, work); + struct b43_wl *wl = container_of(leds, struct b43_wl, leds); + struct b43_wldev *dev; + + mutex_lock(&wl->mutex); + dev = wl->current_dev; + if (unlikely(!dev || b43_status(dev) < B43_STAT_STARTED)) + goto out_unlock; + + b43_led_update(dev, &wl->leds.led_tx); + b43_led_update(dev, &wl->leds.led_rx); + b43_led_update(dev, &wl->leds.led_radio); + b43_led_update(dev, &wl->leds.led_assoc); + +out_unlock: + mutex_unlock(&wl->mutex); +} + +/* Callback from the LED subsystem. */ +static void b43_led_brightness_set(struct led_classdev *led_dev, + enum led_brightness brightness) +{ + struct b43_led *led = container_of(led_dev, struct b43_led, led_dev); + struct b43_wl *wl = led->wl; + + if (likely(!wl->leds.stop)) { + atomic_set(&led->state, brightness); + ieee80211_queue_work(wl->hw, &wl->leds.work); + } +} + +static int b43_register_led(struct b43_wldev *dev, struct b43_led *led, + const char *name, const char *default_trigger, + u8 led_index, bool activelow) +{ + int err; + + if (led->wl) + return -EEXIST; + if (!default_trigger) + return -EINVAL; + led->wl = dev->wl; + led->index = led_index; + led->activelow = activelow; + strncpy(led->name, name, sizeof(led->name)); + atomic_set(&led->state, 0); + + led->led_dev.name = led->name; + led->led_dev.default_trigger = default_trigger; + led->led_dev.brightness_set = b43_led_brightness_set; + + err = led_classdev_register(dev->dev->dev, &led->led_dev); + if (err) { + b43warn(dev->wl, "LEDs: Failed to register %s\n", name); + led->wl = NULL; + return err; + } + + return 0; +} + +static void b43_unregister_led(struct b43_led *led) +{ + if (!led->wl) + return; + led_classdev_unregister(&led->led_dev); + led->wl = NULL; +} + +static void b43_map_led(struct b43_wldev *dev, + u8 led_index, + enum b43_led_behaviour behaviour, + bool activelow) +{ + struct ieee80211_hw *hw = dev->wl->hw; + char name[B43_LED_MAX_NAME_LEN + 1]; + + /* Map the b43 specific LED behaviour value to the + * generic LED triggers. */ + switch (behaviour) { + case B43_LED_INACTIVE: + case B43_LED_OFF: + case B43_LED_ON: + break; + case B43_LED_ACTIVITY: + case B43_LED_TRANSFER: + case B43_LED_APTRANSFER: + snprintf(name, sizeof(name), + "b43-%s::tx", wiphy_name(hw->wiphy)); + b43_register_led(dev, &dev->wl->leds.led_tx, name, + ieee80211_get_tx_led_name(hw), + led_index, activelow); + snprintf(name, sizeof(name), + "b43-%s::rx", wiphy_name(hw->wiphy)); + b43_register_led(dev, &dev->wl->leds.led_rx, name, + ieee80211_get_rx_led_name(hw), + led_index, activelow); + break; + case B43_LED_RADIO_ALL: + case B43_LED_RADIO_A: + case B43_LED_RADIO_B: + case B43_LED_MODE_BG: + snprintf(name, sizeof(name), + "b43-%s::radio", wiphy_name(hw->wiphy)); + b43_register_led(dev, &dev->wl->leds.led_radio, name, + ieee80211_get_radio_led_name(hw), + led_index, activelow); + break; + case B43_LED_WEIRD: + case B43_LED_ASSOC: + snprintf(name, sizeof(name), + "b43-%s::assoc", wiphy_name(hw->wiphy)); + b43_register_led(dev, &dev->wl->leds.led_assoc, name, + ieee80211_get_assoc_led_name(hw), + led_index, activelow); + break; + default: + b43warn(dev->wl, "LEDs: Unknown behaviour 0x%02X\n", + behaviour); + break; + } +} + +static void b43_led_get_sprominfo(struct b43_wldev *dev, + unsigned int led_index, + enum b43_led_behaviour *behaviour, + bool *activelow) +{ + struct ssb_bus *bus = dev->dev->bus; + u8 sprom[4]; + + sprom[0] = bus->sprom.gpio0; + sprom[1] = bus->sprom.gpio1; + sprom[2] = bus->sprom.gpio2; + sprom[3] = bus->sprom.gpio3; + + if (sprom[led_index] == 0xFF) { + /* There is no LED information in the SPROM + * for this LED. Hardcode it here. */ + *activelow = 0; + switch (led_index) { + case 0: + *behaviour = B43_LED_ACTIVITY; + *activelow = 1; + if (bus->boardinfo.vendor == PCI_VENDOR_ID_COMPAQ) + *behaviour = B43_LED_RADIO_ALL; + break; + case 1: + *behaviour = B43_LED_RADIO_B; + if (bus->boardinfo.vendor == PCI_VENDOR_ID_ASUSTEK) + *behaviour = B43_LED_ASSOC; + break; + case 2: + *behaviour = B43_LED_RADIO_A; + break; + case 3: + *behaviour = B43_LED_OFF; + break; + default: + *behaviour = B43_LED_OFF; + B43_WARN_ON(1); + return; + } + } else { + *behaviour = sprom[led_index] & B43_LED_BEHAVIOUR; + *activelow = !!(sprom[led_index] & B43_LED_ACTIVELOW); + } +} + +void b43_leds_init(struct b43_wldev *dev) +{ + struct b43_led *led; + unsigned int i; + enum b43_led_behaviour behaviour; + bool activelow; + + /* Sync the RF-kill LED state (if we have one) with radio and switch states. */ + led = &dev->wl->leds.led_radio; + if (led->wl) { + if (dev->phy.radio_on && b43_is_hw_radio_enabled(dev)) { + b43_led_turn_on(dev, led->index, led->activelow); + led->hw_state = 1; + atomic_set(&led->state, 1); + } else { + b43_led_turn_off(dev, led->index, led->activelow); + led->hw_state = 0; + atomic_set(&led->state, 0); + } + } + + /* Initialize TX/RX/ASSOC leds */ + led = &dev->wl->leds.led_tx; + if (led->wl) { + b43_led_turn_off(dev, led->index, led->activelow); + led->hw_state = 0; + atomic_set(&led->state, 0); + } + led = &dev->wl->leds.led_rx; + if (led->wl) { + b43_led_turn_off(dev, led->index, led->activelow); + led->hw_state = 0; + atomic_set(&led->state, 0); + } + led = &dev->wl->leds.led_assoc; + if (led->wl) { + b43_led_turn_off(dev, led->index, led->activelow); + led->hw_state = 0; + atomic_set(&led->state, 0); + } + + /* Initialize other LED states. */ + for (i = 0; i < B43_MAX_NR_LEDS; i++) { + b43_led_get_sprominfo(dev, i, &behaviour, &activelow); + switch (behaviour) { + case B43_LED_OFF: + b43_led_turn_off(dev, i, activelow); + break; + case B43_LED_ON: + b43_led_turn_on(dev, i, activelow); + break; + default: + /* Leave others as-is. */ + break; + } + } + + dev->wl->leds.stop = 0; +} + +void b43_leds_exit(struct b43_wldev *dev) +{ + struct b43_leds *leds = &dev->wl->leds; + + b43_led_turn_off(dev, leds->led_tx.index, leds->led_tx.activelow); + b43_led_turn_off(dev, leds->led_rx.index, leds->led_rx.activelow); + b43_led_turn_off(dev, leds->led_assoc.index, leds->led_assoc.activelow); + b43_led_turn_off(dev, leds->led_radio.index, leds->led_radio.activelow); +} + +void b43_leds_stop(struct b43_wldev *dev) +{ + struct b43_leds *leds = &dev->wl->leds; + + leds->stop = 1; + cancel_work_sync(&leds->work); +} + +void b43_leds_register(struct b43_wldev *dev) +{ + unsigned int i; + enum b43_led_behaviour behaviour; + bool activelow; + + INIT_WORK(&dev->wl->leds.work, b43_leds_work); + + /* Register the LEDs to the LED subsystem. */ + for (i = 0; i < B43_MAX_NR_LEDS; i++) { + b43_led_get_sprominfo(dev, i, &behaviour, &activelow); + b43_map_led(dev, i, behaviour, activelow); + } +} + +void b43_leds_unregister(struct b43_wl *wl) +{ + struct b43_leds *leds = &wl->leds; + + b43_unregister_led(&leds->led_tx); + b43_unregister_led(&leds->led_rx); + b43_unregister_led(&leds->led_assoc); + b43_unregister_led(&leds->led_radio); +} diff --git a/drivers/net/wireless/b43/leds.h b/drivers/net/wireless/b43/leds.h new file mode 100644 index 0000000..32b66d5 --- /dev/null +++ b/drivers/net/wireless/b43/leds.h @@ -0,0 +1,94 @@ +#ifndef B43_LEDS_H_ +#define B43_LEDS_H_ + +struct b43_wl; +struct b43_wldev; + +#ifdef CONFIG_B43_LEDS + +#include +#include +#include + + +#define B43_LED_MAX_NAME_LEN 31 + +struct b43_led { + struct b43_wl *wl; + /* The LED class device */ + struct led_classdev led_dev; + /* The index number of the LED. */ + u8 index; + /* If activelow is true, the LED is ON if the + * bit is switched off. */ + bool activelow; + /* The unique name string for this LED device. */ + char name[B43_LED_MAX_NAME_LEN + 1]; + /* The current status of the LED. This is updated locklessly. */ + atomic_t state; + /* The active state in hardware. */ + bool hw_state; +}; + +struct b43_leds { + struct b43_led led_tx; + struct b43_led led_rx; + struct b43_led led_radio; + struct b43_led led_assoc; + + bool stop; + struct work_struct work; +}; + +#define B43_MAX_NR_LEDS 4 + +#define B43_LED_BEHAVIOUR 0x7F +#define B43_LED_ACTIVELOW 0x80 +/* LED behaviour values */ +enum b43_led_behaviour { + B43_LED_OFF, + B43_LED_ON, + B43_LED_ACTIVITY, + B43_LED_RADIO_ALL, + B43_LED_RADIO_A, + B43_LED_RADIO_B, + B43_LED_MODE_BG, + B43_LED_TRANSFER, + B43_LED_APTRANSFER, + B43_LED_WEIRD, //FIXME + B43_LED_ASSOC, + B43_LED_INACTIVE, +}; + +void b43_leds_register(struct b43_wldev *dev); +void b43_leds_unregister(struct b43_wl *wl); +void b43_leds_init(struct b43_wldev *dev); +void b43_leds_exit(struct b43_wldev *dev); +void b43_leds_stop(struct b43_wldev *dev); + + +#else /* CONFIG_B43_LEDS */ +/* LED support disabled */ + +struct b43_leds { + /* empty */ +}; + +static inline void b43_leds_register(struct b43_wldev *dev) +{ +} +static inline void b43_leds_unregister(struct b43_wl *wl) +{ +} +static inline void b43_leds_init(struct b43_wldev *dev) +{ +} +static inline void b43_leds_exit(struct b43_wldev *dev) +{ +} +static inline void b43_leds_stop(struct b43_wldev *dev) +{ +} +#endif /* CONFIG_B43_LEDS */ + +#endif /* B43_LEDS_H_ */ diff --git a/drivers/net/wireless/b43/lo.c b/drivers/net/wireless/b43/lo.c new file mode 100644 index 0000000..976104f --- /dev/null +++ b/drivers/net/wireless/b43/lo.c @@ -0,0 +1,1017 @@ +/* + + Broadcom B43 wireless driver + + G PHY LO (LocalOscillator) Measuring and Control routines + + Copyright (c) 2005 Martin Langer , + Copyright (c) 2005, 2006 Stefano Brivio + Copyright (c) 2005-2007 Michael Buesch + Copyright (c) 2005, 2006 Danny van Dyk + Copyright (c) 2005, 2006 Andreas Jaggi + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "b43.h" +#include "lo.h" +#include "phy_g.h" +#include "main.h" + +#include +#include + + +static struct b43_lo_calib *b43_find_lo_calib(struct b43_txpower_lo_control *lo, + const struct b43_bbatt *bbatt, + const struct b43_rfatt *rfatt) +{ + struct b43_lo_calib *c; + + list_for_each_entry(c, &lo->calib_list, list) { + if (!b43_compare_bbatt(&c->bbatt, bbatt)) + continue; + if (!b43_compare_rfatt(&c->rfatt, rfatt)) + continue; + return c; + } + + return NULL; +} + +/* Write the LocalOscillator Control (adjust) value-pair. */ +static void b43_lo_write(struct b43_wldev *dev, struct b43_loctl *control) +{ + struct b43_phy *phy = &dev->phy; + u16 value; + + if (B43_DEBUG) { + if (unlikely(abs(control->i) > 16 || abs(control->q) > 16)) { + b43dbg(dev->wl, "Invalid LO control pair " + "(I: %d, Q: %d)\n", control->i, control->q); + dump_stack(); + return; + } + } + B43_WARN_ON(phy->type != B43_PHYTYPE_G); + + value = (u8) (control->q); + value |= ((u8) (control->i)) << 8; + b43_phy_write(dev, B43_PHY_LO_CTL, value); +} + +static u16 lo_measure_feedthrough(struct b43_wldev *dev, + u16 lna, u16 pga, u16 trsw_rx) +{ + struct b43_phy *phy = &dev->phy; + u16 rfover; + u16 feedthrough; + + if (phy->gmode) { + lna <<= B43_PHY_RFOVERVAL_LNA_SHIFT; + pga <<= B43_PHY_RFOVERVAL_PGA_SHIFT; + + B43_WARN_ON(lna & ~B43_PHY_RFOVERVAL_LNA); + B43_WARN_ON(pga & ~B43_PHY_RFOVERVAL_PGA); +/*FIXME This assertion fails B43_WARN_ON(trsw_rx & ~(B43_PHY_RFOVERVAL_TRSWRX | + B43_PHY_RFOVERVAL_BW)); +*/ + trsw_rx &= (B43_PHY_RFOVERVAL_TRSWRX | B43_PHY_RFOVERVAL_BW); + + /* Construct the RF Override Value */ + rfover = B43_PHY_RFOVERVAL_UNK; + rfover |= pga; + rfover |= lna; + rfover |= trsw_rx; + if ((dev->dev->bus->sprom.boardflags_lo & B43_BFL_EXTLNA) + && phy->rev > 6) + rfover |= B43_PHY_RFOVERVAL_EXTLNA; + + b43_phy_write(dev, B43_PHY_PGACTL, 0xE300); + b43_phy_write(dev, B43_PHY_RFOVERVAL, rfover); + udelay(10); + rfover |= B43_PHY_RFOVERVAL_BW_LBW; + b43_phy_write(dev, B43_PHY_RFOVERVAL, rfover); + udelay(10); + rfover |= B43_PHY_RFOVERVAL_BW_LPF; + b43_phy_write(dev, B43_PHY_RFOVERVAL, rfover); + udelay(10); + b43_phy_write(dev, B43_PHY_PGACTL, 0xF300); + } else { + pga |= B43_PHY_PGACTL_UNKNOWN; + b43_phy_write(dev, B43_PHY_PGACTL, pga); + udelay(10); + pga |= B43_PHY_PGACTL_LOWBANDW; + b43_phy_write(dev, B43_PHY_PGACTL, pga); + udelay(10); + pga |= B43_PHY_PGACTL_LPF; + b43_phy_write(dev, B43_PHY_PGACTL, pga); + } + udelay(21); + feedthrough = b43_phy_read(dev, B43_PHY_LO_LEAKAGE); + + /* This is a good place to check if we need to relax a bit, + * as this is the main function called regularly + * in the LO calibration. */ + cond_resched(); + + return feedthrough; +} + +/* TXCTL Register and Value Table. + * Returns the "TXCTL Register". + * "value" is the "TXCTL Value". + * "pad_mix_gain" is the PAD Mixer Gain. + */ +static u16 lo_txctl_register_table(struct b43_wldev *dev, + u16 *value, u16 *pad_mix_gain) +{ + struct b43_phy *phy = &dev->phy; + u16 reg, v, padmix; + + if (phy->type == B43_PHYTYPE_B) { + v = 0x30; + if (phy->radio_rev <= 5) { + reg = 0x43; + padmix = 0; + } else { + reg = 0x52; + padmix = 5; + } + } else { + if (phy->rev >= 2 && phy->radio_rev == 8) { + reg = 0x43; + v = 0x10; + padmix = 2; + } else { + reg = 0x52; + v = 0x30; + padmix = 5; + } + } + if (value) + *value = v; + if (pad_mix_gain) + *pad_mix_gain = padmix; + + return reg; +} + +static void lo_measure_txctl_values(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_g *gphy = phy->g; + struct b43_txpower_lo_control *lo = gphy->lo_control; + u16 reg, mask; + u16 trsw_rx, pga; + u16 radio_pctl_reg; + + static const u8 tx_bias_values[] = { + 0x09, 0x08, 0x0A, 0x01, 0x00, + 0x02, 0x05, 0x04, 0x06, + }; + static const u8 tx_magn_values[] = { + 0x70, 0x40, + }; + + if (!has_loopback_gain(phy)) { + radio_pctl_reg = 6; + trsw_rx = 2; + pga = 0; + } else { + int lb_gain; /* Loopback gain (in dB) */ + + trsw_rx = 0; + lb_gain = gphy->max_lb_gain / 2; + if (lb_gain > 10) { + radio_pctl_reg = 0; + pga = abs(10 - lb_gain) / 6; + pga = clamp_val(pga, 0, 15); + } else { + int cmp_val; + int tmp; + + pga = 0; + cmp_val = 0x24; + if ((phy->rev >= 2) && + (phy->radio_ver == 0x2050) && (phy->radio_rev == 8)) + cmp_val = 0x3C; + tmp = lb_gain; + if ((10 - lb_gain) < cmp_val) + tmp = (10 - lb_gain); + if (tmp < 0) + tmp += 6; + else + tmp += 3; + cmp_val /= 4; + tmp /= 4; + if (tmp >= cmp_val) + radio_pctl_reg = cmp_val; + else + radio_pctl_reg = tmp; + } + } + b43_radio_maskset(dev, 0x43, 0xFFF0, radio_pctl_reg); + b43_gphy_set_baseband_attenuation(dev, 2); + + reg = lo_txctl_register_table(dev, &mask, NULL); + mask = ~mask; + b43_radio_mask(dev, reg, mask); + + if (has_tx_magnification(phy)) { + int i, j; + int feedthrough; + int min_feedth = 0xFFFF; + u8 tx_magn, tx_bias; + + for (i = 0; i < ARRAY_SIZE(tx_magn_values); i++) { + tx_magn = tx_magn_values[i]; + b43_radio_maskset(dev, 0x52, 0xFF0F, tx_magn); + for (j = 0; j < ARRAY_SIZE(tx_bias_values); j++) { + tx_bias = tx_bias_values[j]; + b43_radio_maskset(dev, 0x52, 0xFFF0, tx_bias); + feedthrough = + lo_measure_feedthrough(dev, 0, pga, + trsw_rx); + if (feedthrough < min_feedth) { + lo->tx_bias = tx_bias; + lo->tx_magn = tx_magn; + min_feedth = feedthrough; + } + if (lo->tx_bias == 0) + break; + } + b43_radio_write16(dev, 0x52, + (b43_radio_read16(dev, 0x52) + & 0xFF00) | lo->tx_bias | lo-> + tx_magn); + } + } else { + lo->tx_magn = 0; + lo->tx_bias = 0; + b43_radio_mask(dev, 0x52, 0xFFF0); /* TX bias == 0 */ + } + lo->txctl_measured_time = jiffies; +} + +static void lo_read_power_vector(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_g *gphy = phy->g; + struct b43_txpower_lo_control *lo = gphy->lo_control; + int i; + u64 tmp; + u64 power_vector = 0; + + for (i = 0; i < 8; i += 2) { + tmp = b43_shm_read16(dev, B43_SHM_SHARED, 0x310 + i); + power_vector |= (tmp << (i * 8)); + /* Clear the vector on the device. */ + b43_shm_write16(dev, B43_SHM_SHARED, 0x310 + i, 0); + } + if (power_vector) + lo->power_vector = power_vector; + lo->pwr_vec_read_time = jiffies; +} + +/* 802.11/LO/GPHY/MeasuringGains */ +static void lo_measure_gain_values(struct b43_wldev *dev, + s16 max_rx_gain, int use_trsw_rx) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_g *gphy = phy->g; + u16 tmp; + + if (max_rx_gain < 0) + max_rx_gain = 0; + + if (has_loopback_gain(phy)) { + int trsw_rx = 0; + int trsw_rx_gain; + + if (use_trsw_rx) { + trsw_rx_gain = gphy->trsw_rx_gain / 2; + if (max_rx_gain >= trsw_rx_gain) { + trsw_rx_gain = max_rx_gain - trsw_rx_gain; + trsw_rx = 0x20; + } + } else + trsw_rx_gain = max_rx_gain; + if (trsw_rx_gain < 9) { + gphy->lna_lod_gain = 0; + } else { + gphy->lna_lod_gain = 1; + trsw_rx_gain -= 8; + } + trsw_rx_gain = clamp_val(trsw_rx_gain, 0, 0x2D); + gphy->pga_gain = trsw_rx_gain / 3; + if (gphy->pga_gain >= 5) { + gphy->pga_gain -= 5; + gphy->lna_gain = 2; + } else + gphy->lna_gain = 0; + } else { + gphy->lna_gain = 0; + gphy->trsw_rx_gain = 0x20; + if (max_rx_gain >= 0x14) { + gphy->lna_lod_gain = 1; + gphy->pga_gain = 2; + } else if (max_rx_gain >= 0x12) { + gphy->lna_lod_gain = 1; + gphy->pga_gain = 1; + } else if (max_rx_gain >= 0xF) { + gphy->lna_lod_gain = 1; + gphy->pga_gain = 0; + } else { + gphy->lna_lod_gain = 0; + gphy->pga_gain = 0; + } + } + + tmp = b43_radio_read16(dev, 0x7A); + if (gphy->lna_lod_gain == 0) + tmp &= ~0x0008; + else + tmp |= 0x0008; + b43_radio_write16(dev, 0x7A, tmp); +} + +struct lo_g_saved_values { + u8 old_channel; + + /* Core registers */ + u16 reg_3F4; + u16 reg_3E2; + + /* PHY registers */ + u16 phy_lo_mask; + u16 phy_extg_01; + u16 phy_dacctl_hwpctl; + u16 phy_dacctl; + u16 phy_cck_14; + u16 phy_hpwr_tssictl; + u16 phy_analogover; + u16 phy_analogoverval; + u16 phy_rfover; + u16 phy_rfoverval; + u16 phy_classctl; + u16 phy_cck_3E; + u16 phy_crs0; + u16 phy_pgactl; + u16 phy_cck_2A; + u16 phy_syncctl; + u16 phy_cck_30; + u16 phy_cck_06; + + /* Radio registers */ + u16 radio_43; + u16 radio_7A; + u16 radio_52; +}; + +static void lo_measure_setup(struct b43_wldev *dev, + struct lo_g_saved_values *sav) +{ + struct ssb_sprom *sprom = &dev->dev->bus->sprom; + struct b43_phy *phy = &dev->phy; + struct b43_phy_g *gphy = phy->g; + struct b43_txpower_lo_control *lo = gphy->lo_control; + u16 tmp; + + if (b43_has_hardware_pctl(dev)) { + sav->phy_lo_mask = b43_phy_read(dev, B43_PHY_LO_MASK); + sav->phy_extg_01 = b43_phy_read(dev, B43_PHY_EXTG(0x01)); + sav->phy_dacctl_hwpctl = b43_phy_read(dev, B43_PHY_DACCTL); + sav->phy_cck_14 = b43_phy_read(dev, B43_PHY_CCK(0x14)); + sav->phy_hpwr_tssictl = b43_phy_read(dev, B43_PHY_HPWR_TSSICTL); + + b43_phy_set(dev, B43_PHY_HPWR_TSSICTL, 0x100); + b43_phy_set(dev, B43_PHY_EXTG(0x01), 0x40); + b43_phy_set(dev, B43_PHY_DACCTL, 0x40); + b43_phy_set(dev, B43_PHY_CCK(0x14), 0x200); + } + if (phy->type == B43_PHYTYPE_B && + phy->radio_ver == 0x2050 && phy->radio_rev < 6) { + b43_phy_write(dev, B43_PHY_CCK(0x16), 0x410); + b43_phy_write(dev, B43_PHY_CCK(0x17), 0x820); + } + if (phy->rev >= 2) { + sav->phy_analogover = b43_phy_read(dev, B43_PHY_ANALOGOVER); + sav->phy_analogoverval = + b43_phy_read(dev, B43_PHY_ANALOGOVERVAL); + sav->phy_rfover = b43_phy_read(dev, B43_PHY_RFOVER); + sav->phy_rfoverval = b43_phy_read(dev, B43_PHY_RFOVERVAL); + sav->phy_classctl = b43_phy_read(dev, B43_PHY_CLASSCTL); + sav->phy_cck_3E = b43_phy_read(dev, B43_PHY_CCK(0x3E)); + sav->phy_crs0 = b43_phy_read(dev, B43_PHY_CRS0); + + b43_phy_mask(dev, B43_PHY_CLASSCTL, 0xFFFC); + b43_phy_mask(dev, B43_PHY_CRS0, 0x7FFF); + b43_phy_set(dev, B43_PHY_ANALOGOVER, 0x0003); + b43_phy_mask(dev, B43_PHY_ANALOGOVERVAL, 0xFFFC); + if (phy->type == B43_PHYTYPE_G) { + if ((phy->rev >= 7) && + (sprom->boardflags_lo & B43_BFL_EXTLNA)) { + b43_phy_write(dev, B43_PHY_RFOVER, 0x933); + } else { + b43_phy_write(dev, B43_PHY_RFOVER, 0x133); + } + } else { + b43_phy_write(dev, B43_PHY_RFOVER, 0); + } + b43_phy_write(dev, B43_PHY_CCK(0x3E), 0); + } + sav->reg_3F4 = b43_read16(dev, 0x3F4); + sav->reg_3E2 = b43_read16(dev, 0x3E2); + sav->radio_43 = b43_radio_read16(dev, 0x43); + sav->radio_7A = b43_radio_read16(dev, 0x7A); + sav->phy_pgactl = b43_phy_read(dev, B43_PHY_PGACTL); + sav->phy_cck_2A = b43_phy_read(dev, B43_PHY_CCK(0x2A)); + sav->phy_syncctl = b43_phy_read(dev, B43_PHY_SYNCCTL); + sav->phy_dacctl = b43_phy_read(dev, B43_PHY_DACCTL); + + if (!has_tx_magnification(phy)) { + sav->radio_52 = b43_radio_read16(dev, 0x52); + sav->radio_52 &= 0x00F0; + } + if (phy->type == B43_PHYTYPE_B) { + sav->phy_cck_30 = b43_phy_read(dev, B43_PHY_CCK(0x30)); + sav->phy_cck_06 = b43_phy_read(dev, B43_PHY_CCK(0x06)); + b43_phy_write(dev, B43_PHY_CCK(0x30), 0x00FF); + b43_phy_write(dev, B43_PHY_CCK(0x06), 0x3F3F); + } else { + b43_write16(dev, 0x3E2, b43_read16(dev, 0x3E2) + | 0x8000); + } + b43_write16(dev, 0x3F4, b43_read16(dev, 0x3F4) + & 0xF000); + + tmp = + (phy->type == B43_PHYTYPE_G) ? B43_PHY_LO_MASK : B43_PHY_CCK(0x2E); + b43_phy_write(dev, tmp, 0x007F); + + tmp = sav->phy_syncctl; + b43_phy_write(dev, B43_PHY_SYNCCTL, tmp & 0xFF7F); + tmp = sav->radio_7A; + b43_radio_write16(dev, 0x007A, tmp & 0xFFF0); + + b43_phy_write(dev, B43_PHY_CCK(0x2A), 0x8A3); + if (phy->type == B43_PHYTYPE_G || + (phy->type == B43_PHYTYPE_B && + phy->radio_ver == 0x2050 && phy->radio_rev >= 6)) { + b43_phy_write(dev, B43_PHY_CCK(0x2B), 0x1003); + } else + b43_phy_write(dev, B43_PHY_CCK(0x2B), 0x0802); + if (phy->rev >= 2) + b43_dummy_transmission(dev, false, true); + b43_gphy_channel_switch(dev, 6, 0); + b43_radio_read16(dev, 0x51); /* dummy read */ + if (phy->type == B43_PHYTYPE_G) + b43_phy_write(dev, B43_PHY_CCK(0x2F), 0); + + /* Re-measure the txctl values, if needed. */ + if (time_before(lo->txctl_measured_time, + jiffies - B43_LO_TXCTL_EXPIRE)) + lo_measure_txctl_values(dev); + + if (phy->type == B43_PHYTYPE_G && phy->rev >= 3) { + b43_phy_write(dev, B43_PHY_LO_MASK, 0xC078); + } else { + if (phy->type == B43_PHYTYPE_B) + b43_phy_write(dev, B43_PHY_CCK(0x2E), 0x8078); + else + b43_phy_write(dev, B43_PHY_LO_MASK, 0x8078); + } +} + +static void lo_measure_restore(struct b43_wldev *dev, + struct lo_g_saved_values *sav) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_g *gphy = phy->g; + u16 tmp; + + if (phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_PGACTL, 0xE300); + tmp = (gphy->pga_gain << 8); + b43_phy_write(dev, B43_PHY_RFOVERVAL, tmp | 0xA0); + udelay(5); + b43_phy_write(dev, B43_PHY_RFOVERVAL, tmp | 0xA2); + udelay(2); + b43_phy_write(dev, B43_PHY_RFOVERVAL, tmp | 0xA3); + } else { + tmp = (gphy->pga_gain | 0xEFA0); + b43_phy_write(dev, B43_PHY_PGACTL, tmp); + } + if (phy->type == B43_PHYTYPE_G) { + if (phy->rev >= 3) + b43_phy_write(dev, B43_PHY_CCK(0x2E), 0xC078); + else + b43_phy_write(dev, B43_PHY_CCK(0x2E), 0x8078); + if (phy->rev >= 2) + b43_phy_write(dev, B43_PHY_CCK(0x2F), 0x0202); + else + b43_phy_write(dev, B43_PHY_CCK(0x2F), 0x0101); + } + b43_write16(dev, 0x3F4, sav->reg_3F4); + b43_phy_write(dev, B43_PHY_PGACTL, sav->phy_pgactl); + b43_phy_write(dev, B43_PHY_CCK(0x2A), sav->phy_cck_2A); + b43_phy_write(dev, B43_PHY_SYNCCTL, sav->phy_syncctl); + b43_phy_write(dev, B43_PHY_DACCTL, sav->phy_dacctl); + b43_radio_write16(dev, 0x43, sav->radio_43); + b43_radio_write16(dev, 0x7A, sav->radio_7A); + if (!has_tx_magnification(phy)) { + tmp = sav->radio_52; + b43_radio_maskset(dev, 0x52, 0xFF0F, tmp); + } + b43_write16(dev, 0x3E2, sav->reg_3E2); + if (phy->type == B43_PHYTYPE_B && + phy->radio_ver == 0x2050 && phy->radio_rev <= 5) { + b43_phy_write(dev, B43_PHY_CCK(0x30), sav->phy_cck_30); + b43_phy_write(dev, B43_PHY_CCK(0x06), sav->phy_cck_06); + } + if (phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_ANALOGOVER, sav->phy_analogover); + b43_phy_write(dev, B43_PHY_ANALOGOVERVAL, + sav->phy_analogoverval); + b43_phy_write(dev, B43_PHY_CLASSCTL, sav->phy_classctl); + b43_phy_write(dev, B43_PHY_RFOVER, sav->phy_rfover); + b43_phy_write(dev, B43_PHY_RFOVERVAL, sav->phy_rfoverval); + b43_phy_write(dev, B43_PHY_CCK(0x3E), sav->phy_cck_3E); + b43_phy_write(dev, B43_PHY_CRS0, sav->phy_crs0); + } + if (b43_has_hardware_pctl(dev)) { + tmp = (sav->phy_lo_mask & 0xBFFF); + b43_phy_write(dev, B43_PHY_LO_MASK, tmp); + b43_phy_write(dev, B43_PHY_EXTG(0x01), sav->phy_extg_01); + b43_phy_write(dev, B43_PHY_DACCTL, sav->phy_dacctl_hwpctl); + b43_phy_write(dev, B43_PHY_CCK(0x14), sav->phy_cck_14); + b43_phy_write(dev, B43_PHY_HPWR_TSSICTL, sav->phy_hpwr_tssictl); + } + b43_gphy_channel_switch(dev, sav->old_channel, 1); +} + +struct b43_lo_g_statemachine { + int current_state; + int nr_measured; + int state_val_multiplier; + u16 lowest_feedth; + struct b43_loctl min_loctl; +}; + +/* Loop over each possible value in this state. */ +static int lo_probe_possible_loctls(struct b43_wldev *dev, + struct b43_loctl *probe_loctl, + struct b43_lo_g_statemachine *d) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_g *gphy = phy->g; + struct b43_loctl test_loctl; + struct b43_loctl orig_loctl; + struct b43_loctl prev_loctl = { + .i = -100, + .q = -100, + }; + int i; + int begin, end; + int found_lower = 0; + u16 feedth; + + static const struct b43_loctl modifiers[] = { + {.i = 1,.q = 1,}, + {.i = 1,.q = 0,}, + {.i = 1,.q = -1,}, + {.i = 0,.q = -1,}, + {.i = -1,.q = -1,}, + {.i = -1,.q = 0,}, + {.i = -1,.q = 1,}, + {.i = 0,.q = 1,}, + }; + + if (d->current_state == 0) { + begin = 1; + end = 8; + } else if (d->current_state % 2 == 0) { + begin = d->current_state - 1; + end = d->current_state + 1; + } else { + begin = d->current_state - 2; + end = d->current_state + 2; + } + if (begin < 1) + begin += 8; + if (end > 8) + end -= 8; + + memcpy(&orig_loctl, probe_loctl, sizeof(struct b43_loctl)); + i = begin; + d->current_state = i; + while (1) { + B43_WARN_ON(!(i >= 1 && i <= 8)); + memcpy(&test_loctl, &orig_loctl, sizeof(struct b43_loctl)); + test_loctl.i += modifiers[i - 1].i * d->state_val_multiplier; + test_loctl.q += modifiers[i - 1].q * d->state_val_multiplier; + if ((test_loctl.i != prev_loctl.i || + test_loctl.q != prev_loctl.q) && + (abs(test_loctl.i) <= 16 && abs(test_loctl.q) <= 16)) { + b43_lo_write(dev, &test_loctl); + feedth = lo_measure_feedthrough(dev, gphy->lna_gain, + gphy->pga_gain, + gphy->trsw_rx_gain); + if (feedth < d->lowest_feedth) { + memcpy(probe_loctl, &test_loctl, + sizeof(struct b43_loctl)); + found_lower = 1; + d->lowest_feedth = feedth; + if ((d->nr_measured < 2) && + !has_loopback_gain(phy)) + break; + } + } + memcpy(&prev_loctl, &test_loctl, sizeof(prev_loctl)); + if (i == end) + break; + if (i == 8) + i = 1; + else + i++; + d->current_state = i; + } + + return found_lower; +} + +static void lo_probe_loctls_statemachine(struct b43_wldev *dev, + struct b43_loctl *loctl, + int *max_rx_gain) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_g *gphy = phy->g; + struct b43_lo_g_statemachine d; + u16 feedth; + int found_lower; + struct b43_loctl probe_loctl; + int max_repeat = 1, repeat_cnt = 0; + + d.nr_measured = 0; + d.state_val_multiplier = 1; + if (has_loopback_gain(phy)) + d.state_val_multiplier = 3; + + memcpy(&d.min_loctl, loctl, sizeof(struct b43_loctl)); + if (has_loopback_gain(phy)) + max_repeat = 4; + do { + b43_lo_write(dev, &d.min_loctl); + feedth = lo_measure_feedthrough(dev, gphy->lna_gain, + gphy->pga_gain, + gphy->trsw_rx_gain); + if (feedth < 0x258) { + if (feedth >= 0x12C) + *max_rx_gain += 6; + else + *max_rx_gain += 3; + feedth = lo_measure_feedthrough(dev, gphy->lna_gain, + gphy->pga_gain, + gphy->trsw_rx_gain); + } + d.lowest_feedth = feedth; + + d.current_state = 0; + do { + B43_WARN_ON(! + (d.current_state >= 0 + && d.current_state <= 8)); + memcpy(&probe_loctl, &d.min_loctl, + sizeof(struct b43_loctl)); + found_lower = + lo_probe_possible_loctls(dev, &probe_loctl, &d); + if (!found_lower) + break; + if ((probe_loctl.i == d.min_loctl.i) && + (probe_loctl.q == d.min_loctl.q)) + break; + memcpy(&d.min_loctl, &probe_loctl, + sizeof(struct b43_loctl)); + d.nr_measured++; + } while (d.nr_measured < 24); + memcpy(loctl, &d.min_loctl, sizeof(struct b43_loctl)); + + if (has_loopback_gain(phy)) { + if (d.lowest_feedth > 0x1194) + *max_rx_gain -= 6; + else if (d.lowest_feedth < 0x5DC) + *max_rx_gain += 3; + if (repeat_cnt == 0) { + if (d.lowest_feedth <= 0x5DC) { + d.state_val_multiplier = 1; + repeat_cnt++; + } else + d.state_val_multiplier = 2; + } else if (repeat_cnt == 2) + d.state_val_multiplier = 1; + } + lo_measure_gain_values(dev, *max_rx_gain, + has_loopback_gain(phy)); + } while (++repeat_cnt < max_repeat); +} + +static +struct b43_lo_calib *b43_calibrate_lo_setting(struct b43_wldev *dev, + const struct b43_bbatt *bbatt, + const struct b43_rfatt *rfatt) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_g *gphy = phy->g; + struct b43_loctl loctl = { + .i = 0, + .q = 0, + }; + int max_rx_gain; + struct b43_lo_calib *cal; + struct lo_g_saved_values uninitialized_var(saved_regs); + /* Values from the "TXCTL Register and Value Table" */ + u16 txctl_reg; + u16 txctl_value; + u16 pad_mix_gain; + + saved_regs.old_channel = phy->channel; + b43_mac_suspend(dev); + lo_measure_setup(dev, &saved_regs); + + txctl_reg = lo_txctl_register_table(dev, &txctl_value, &pad_mix_gain); + + b43_radio_maskset(dev, 0x43, 0xFFF0, rfatt->att); + b43_radio_maskset(dev, txctl_reg, ~txctl_value, (rfatt->with_padmix ? txctl_value :0)); + + max_rx_gain = rfatt->att * 2; + max_rx_gain += bbatt->att / 2; + if (rfatt->with_padmix) + max_rx_gain -= pad_mix_gain; + if (has_loopback_gain(phy)) + max_rx_gain += gphy->max_lb_gain; + lo_measure_gain_values(dev, max_rx_gain, + has_loopback_gain(phy)); + + b43_gphy_set_baseband_attenuation(dev, bbatt->att); + lo_probe_loctls_statemachine(dev, &loctl, &max_rx_gain); + + lo_measure_restore(dev, &saved_regs); + b43_mac_enable(dev); + + if (b43_debug(dev, B43_DBG_LO)) { + b43dbg(dev->wl, "LO: Calibrated for BB(%u), RF(%u,%u) " + "=> I=%d Q=%d\n", + bbatt->att, rfatt->att, rfatt->with_padmix, + loctl.i, loctl.q); + } + + cal = kmalloc(sizeof(*cal), GFP_KERNEL); + if (!cal) { + b43warn(dev->wl, "LO calib: out of memory\n"); + return NULL; + } + memcpy(&cal->bbatt, bbatt, sizeof(*bbatt)); + memcpy(&cal->rfatt, rfatt, sizeof(*rfatt)); + memcpy(&cal->ctl, &loctl, sizeof(loctl)); + cal->calib_time = jiffies; + INIT_LIST_HEAD(&cal->list); + + return cal; +} + +/* Get a calibrated LO setting for the given attenuation values. + * Might return a NULL pointer under OOM! */ +static +struct b43_lo_calib *b43_get_calib_lo_settings(struct b43_wldev *dev, + const struct b43_bbatt *bbatt, + const struct b43_rfatt *rfatt) +{ + struct b43_txpower_lo_control *lo = dev->phy.g->lo_control; + struct b43_lo_calib *c; + + c = b43_find_lo_calib(lo, bbatt, rfatt); + if (c) + return c; + /* Not in the list of calibrated LO settings. + * Calibrate it now. */ + c = b43_calibrate_lo_setting(dev, bbatt, rfatt); + if (!c) + return NULL; + list_add(&c->list, &lo->calib_list); + + return c; +} + +void b43_gphy_dc_lt_init(struct b43_wldev *dev, bool update_all) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_g *gphy = phy->g; + struct b43_txpower_lo_control *lo = gphy->lo_control; + int i; + int rf_offset, bb_offset; + const struct b43_rfatt *rfatt; + const struct b43_bbatt *bbatt; + u64 power_vector; + bool table_changed = 0; + + BUILD_BUG_ON(B43_DC_LT_SIZE != 32); + B43_WARN_ON(lo->rfatt_list.len * lo->bbatt_list.len > 64); + + power_vector = lo->power_vector; + if (!update_all && !power_vector) + return; /* Nothing to do. */ + + /* Suspend the MAC now to avoid continuous suspend/enable + * cycles in the loop. */ + b43_mac_suspend(dev); + + for (i = 0; i < B43_DC_LT_SIZE * 2; i++) { + struct b43_lo_calib *cal; + int idx; + u16 val; + + if (!update_all && !(power_vector & (((u64)1ULL) << i))) + continue; + /* Update the table entry for this power_vector bit. + * The table rows are RFatt entries and columns are BBatt. */ + bb_offset = i / lo->rfatt_list.len; + rf_offset = i % lo->rfatt_list.len; + bbatt = &(lo->bbatt_list.list[bb_offset]); + rfatt = &(lo->rfatt_list.list[rf_offset]); + + cal = b43_calibrate_lo_setting(dev, bbatt, rfatt); + if (!cal) { + b43warn(dev->wl, "LO: Could not " + "calibrate DC table entry\n"); + continue; + } + /*FIXME: Is Q really in the low nibble? */ + val = (u8)(cal->ctl.q); + val |= ((u8)(cal->ctl.i)) << 4; + kfree(cal); + + /* Get the index into the hardware DC LT. */ + idx = i / 2; + /* Change the table in memory. */ + if (i % 2) { + /* Change the high byte. */ + lo->dc_lt[idx] = (lo->dc_lt[idx] & 0x00FF) + | ((val & 0x00FF) << 8); + } else { + /* Change the low byte. */ + lo->dc_lt[idx] = (lo->dc_lt[idx] & 0xFF00) + | (val & 0x00FF); + } + table_changed = 1; + } + if (table_changed) { + /* The table changed in memory. Update the hardware table. */ + for (i = 0; i < B43_DC_LT_SIZE; i++) + b43_phy_write(dev, 0x3A0 + i, lo->dc_lt[i]); + } + b43_mac_enable(dev); +} + +/* Fixup the RF attenuation value for the case where we are + * using the PAD mixer. */ +static inline void b43_lo_fixup_rfatt(struct b43_rfatt *rf) +{ + if (!rf->with_padmix) + return; + if ((rf->att != 1) && (rf->att != 2) && (rf->att != 3)) + rf->att = 4; +} + +void b43_lo_g_adjust(struct b43_wldev *dev) +{ + struct b43_phy_g *gphy = dev->phy.g; + struct b43_lo_calib *cal; + struct b43_rfatt rf; + + memcpy(&rf, &gphy->rfatt, sizeof(rf)); + b43_lo_fixup_rfatt(&rf); + + cal = b43_get_calib_lo_settings(dev, &gphy->bbatt, &rf); + if (!cal) + return; + b43_lo_write(dev, &cal->ctl); +} + +void b43_lo_g_adjust_to(struct b43_wldev *dev, + u16 rfatt, u16 bbatt, u16 tx_control) +{ + struct b43_rfatt rf; + struct b43_bbatt bb; + struct b43_lo_calib *cal; + + memset(&rf, 0, sizeof(rf)); + memset(&bb, 0, sizeof(bb)); + rf.att = rfatt; + bb.att = bbatt; + b43_lo_fixup_rfatt(&rf); + cal = b43_get_calib_lo_settings(dev, &bb, &rf); + if (!cal) + return; + b43_lo_write(dev, &cal->ctl); +} + +/* Periodic LO maintanance work */ +void b43_lo_g_maintanance_work(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_g *gphy = phy->g; + struct b43_txpower_lo_control *lo = gphy->lo_control; + unsigned long now; + unsigned long expire; + struct b43_lo_calib *cal, *tmp; + bool current_item_expired = 0; + bool hwpctl; + + if (!lo) + return; + now = jiffies; + hwpctl = b43_has_hardware_pctl(dev); + + if (hwpctl) { + /* Read the power vector and update it, if needed. */ + expire = now - B43_LO_PWRVEC_EXPIRE; + if (time_before(lo->pwr_vec_read_time, expire)) { + lo_read_power_vector(dev); + b43_gphy_dc_lt_init(dev, 0); + } + //FIXME Recalc the whole DC table from time to time? + } + + if (hwpctl) + return; + /* Search for expired LO settings. Remove them. + * Recalibrate the current setting, if expired. */ + expire = now - B43_LO_CALIB_EXPIRE; + list_for_each_entry_safe(cal, tmp, &lo->calib_list, list) { + if (!time_before(cal->calib_time, expire)) + continue; + /* This item expired. */ + if (b43_compare_bbatt(&cal->bbatt, &gphy->bbatt) && + b43_compare_rfatt(&cal->rfatt, &gphy->rfatt)) { + B43_WARN_ON(current_item_expired); + current_item_expired = 1; + } + if (b43_debug(dev, B43_DBG_LO)) { + b43dbg(dev->wl, "LO: Item BB(%u), RF(%u,%u), " + "I=%d, Q=%d expired\n", + cal->bbatt.att, cal->rfatt.att, + cal->rfatt.with_padmix, + cal->ctl.i, cal->ctl.q); + } + list_del(&cal->list); + kfree(cal); + } + if (current_item_expired || unlikely(list_empty(&lo->calib_list))) { + /* Recalibrate currently used LO setting. */ + if (b43_debug(dev, B43_DBG_LO)) + b43dbg(dev->wl, "LO: Recalibrating current LO setting\n"); + cal = b43_calibrate_lo_setting(dev, &gphy->bbatt, &gphy->rfatt); + if (cal) { + list_add(&cal->list, &lo->calib_list); + b43_lo_write(dev, &cal->ctl); + } else + b43warn(dev->wl, "Failed to recalibrate current LO setting\n"); + } +} + +void b43_lo_g_cleanup(struct b43_wldev *dev) +{ + struct b43_txpower_lo_control *lo = dev->phy.g->lo_control; + struct b43_lo_calib *cal, *tmp; + + if (!lo) + return; + list_for_each_entry_safe(cal, tmp, &lo->calib_list, list) { + list_del(&cal->list); + kfree(cal); + } +} + +/* LO Initialization */ +void b43_lo_g_init(struct b43_wldev *dev) +{ + if (b43_has_hardware_pctl(dev)) { + lo_read_power_vector(dev); + b43_gphy_dc_lt_init(dev, 1); + } +} diff --git a/drivers/net/wireless/b43/lo.h b/drivers/net/wireless/b43/lo.h new file mode 100644 index 0000000..3b27e20 --- /dev/null +++ b/drivers/net/wireless/b43/lo.h @@ -0,0 +1,87 @@ +#ifndef B43_LO_H_ +#define B43_LO_H_ + +/* G-PHY Local Oscillator */ + +#include "phy_g.h" + +struct b43_wldev; + +/* Local Oscillator control value-pair. */ +struct b43_loctl { + /* Control values. */ + s8 i; + s8 q; +}; +/* Debugging: Poison value for i and q values. */ +#define B43_LOCTL_POISON 111 + +/* This struct holds calibrated LO settings for a set of + * Baseband and RF attenuation settings. */ +struct b43_lo_calib { + /* The set of attenuation values this set of LO + * control values is calibrated for. */ + struct b43_bbatt bbatt; + struct b43_rfatt rfatt; + /* The set of control values for the LO. */ + struct b43_loctl ctl; + /* The time when these settings were calibrated (in jiffies) */ + unsigned long calib_time; + /* List. */ + struct list_head list; +}; + +/* Size of the DC Lookup Table in 16bit words. */ +#define B43_DC_LT_SIZE 32 + +/* Local Oscillator calibration information */ +struct b43_txpower_lo_control { + /* Lists of RF and BB attenuation values for this device. + * Used for building hardware power control tables. */ + struct b43_rfatt_list rfatt_list; + struct b43_bbatt_list bbatt_list; + + /* The DC Lookup Table is cached in memory here. + * Note that this is only used for Hardware Power Control. */ + u16 dc_lt[B43_DC_LT_SIZE]; + + /* List of calibrated control values (struct b43_lo_calib). */ + struct list_head calib_list; + /* Last time the power vector was read (jiffies). */ + unsigned long pwr_vec_read_time; + /* Last time the txctl values were measured (jiffies). */ + unsigned long txctl_measured_time; + + /* Current TX Bias value */ + u8 tx_bias; + /* Current TX Magnification Value (if used by the device) */ + u8 tx_magn; + + /* Saved device PowerVector */ + u64 power_vector; +}; + +/* Calibration expire timeouts. + * Timeouts must be multiple of 15 seconds. To make sure + * the item really expired when the 15 second timer hits, we + * subtract two additional seconds from the timeout. */ +#define B43_LO_CALIB_EXPIRE (HZ * (30 - 2)) +#define B43_LO_PWRVEC_EXPIRE (HZ * (30 - 2)) +#define B43_LO_TXCTL_EXPIRE (HZ * (180 - 4)) + + +/* Adjust the Local Oscillator to the saved attenuation + * and txctl values. + */ +void b43_lo_g_adjust(struct b43_wldev *dev); +/* Adjust to specific values. */ +void b43_lo_g_adjust_to(struct b43_wldev *dev, + u16 rfatt, u16 bbatt, u16 tx_control); + +void b43_gphy_dc_lt_init(struct b43_wldev *dev, bool update_all); + +void b43_lo_g_maintanance_work(struct b43_wldev *dev); +void b43_lo_g_cleanup(struct b43_wldev *dev); +void b43_lo_g_init(struct b43_wldev *dev); + +#endif /* B43_LO_H_ */ diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c new file mode 100644 index 0000000..0296be2 --- /dev/null +++ b/drivers/net/wireless/b43/main.c @@ -0,0 +1,5112 @@ +/* + + Broadcom B43 wireless driver + + Copyright (c) 2005 Martin Langer + Copyright (c) 2005 Stefano Brivio + Copyright (c) 2005-2009 Michael Buesch + Copyright (c) 2005 Danny van Dyk + Copyright (c) 2005 Andreas Jaggi + + SDIO support + Copyright (c) 2009 Albert Herranz + + Some parts of the code in this file are derived from the ipw2200 + driver Copyright(c) 2003 - 2004 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "b43.h" +#include "main.h" +#include "debugfs.h" +#include "phy_common.h" +#include "phy_g.h" +#include "phy_n.h" +#include "dma.h" +#include "pio.h" +#include "sysfs.h" +#include "xmit.h" +#include "lo.h" +#include "pcmcia.h" +#include "sdio.h" +#include + +MODULE_DESCRIPTION("Broadcom B43 wireless driver"); +MODULE_AUTHOR("Martin Langer"); +MODULE_AUTHOR("Stefano Brivio"); +MODULE_AUTHOR("Michael Buesch"); +MODULE_AUTHOR("Gábor Stefanik"); +MODULE_LICENSE("GPL"); + +MODULE_FIRMWARE(B43_SUPPORTED_FIRMWARE_ID); +MODULE_FIRMWARE("b43/ucode11.fw"); +MODULE_FIRMWARE("b43/ucode13.fw"); +MODULE_FIRMWARE("b43/ucode14.fw"); +MODULE_FIRMWARE("b43/ucode15.fw"); +MODULE_FIRMWARE("b43/ucode5.fw"); +MODULE_FIRMWARE("b43/ucode9.fw"); + +static int modparam_bad_frames_preempt; +module_param_named(bad_frames_preempt, modparam_bad_frames_preempt, int, 0444); +MODULE_PARM_DESC(bad_frames_preempt, + "enable(1) / disable(0) Bad Frames Preemption"); + +static char modparam_fwpostfix[16]; +module_param_string(fwpostfix, modparam_fwpostfix, 16, 0444); +MODULE_PARM_DESC(fwpostfix, "Postfix for the .fw files to load."); + +static int modparam_hwpctl; +module_param_named(hwpctl, modparam_hwpctl, int, 0444); +MODULE_PARM_DESC(hwpctl, "Enable hardware-side power control (default off)"); + +static int modparam_nohwcrypt; +module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444); +MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); + +static int modparam_hwtkip; +module_param_named(hwtkip, modparam_hwtkip, int, 0444); +MODULE_PARM_DESC(hwtkip, "Enable hardware tkip."); + +static int modparam_qos = 1; +module_param_named(qos, modparam_qos, int, 0444); +MODULE_PARM_DESC(qos, "Enable QOS support (default on)"); + +static int modparam_btcoex = 1; +module_param_named(btcoex, modparam_btcoex, int, 0444); +MODULE_PARM_DESC(btcoex, "Enable Bluetooth coexistence (default on)"); + +int b43_modparam_verbose = B43_VERBOSITY_DEFAULT; +module_param_named(verbose, b43_modparam_verbose, int, 0644); +MODULE_PARM_DESC(verbose, "Log message verbosity: 0=error, 1=warn, 2=info(default), 3=debug"); + +int b43_modparam_pio = B43_PIO_DEFAULT; +module_param_named(pio, b43_modparam_pio, int, 0644); +MODULE_PARM_DESC(pio, "Use PIO accesses by default: 0=DMA, 1=PIO"); + +static const struct ssb_device_id b43_ssb_tbl[] = { + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 5), + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 6), + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 7), + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 9), + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 10), + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 11), + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 12), + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 13), + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 15), + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 16), + SSB_DEVTABLE_END +}; + +MODULE_DEVICE_TABLE(ssb, b43_ssb_tbl); + +/* Channel and ratetables are shared for all devices. + * They can't be const, because ieee80211 puts some precalculated + * data in there. This data is the same for all devices, so we don't + * get concurrency issues */ +#define RATETAB_ENT(_rateid, _flags) \ + { \ + .bitrate = B43_RATE_TO_BASE100KBPS(_rateid), \ + .hw_value = (_rateid), \ + .flags = (_flags), \ + } + +/* + * NOTE: When changing this, sync with xmit.c's + * b43_plcp_get_bitrate_idx_* functions! + */ +static struct ieee80211_rate __b43_ratetable[] = { + RATETAB_ENT(B43_CCK_RATE_1MB, 0), + RATETAB_ENT(B43_CCK_RATE_2MB, IEEE80211_RATE_SHORT_PREAMBLE), + RATETAB_ENT(B43_CCK_RATE_5MB, IEEE80211_RATE_SHORT_PREAMBLE), + RATETAB_ENT(B43_CCK_RATE_11MB, IEEE80211_RATE_SHORT_PREAMBLE), + RATETAB_ENT(B43_OFDM_RATE_6MB, 0), + RATETAB_ENT(B43_OFDM_RATE_9MB, 0), + RATETAB_ENT(B43_OFDM_RATE_12MB, 0), + RATETAB_ENT(B43_OFDM_RATE_18MB, 0), + RATETAB_ENT(B43_OFDM_RATE_24MB, 0), + RATETAB_ENT(B43_OFDM_RATE_36MB, 0), + RATETAB_ENT(B43_OFDM_RATE_48MB, 0), + RATETAB_ENT(B43_OFDM_RATE_54MB, 0), +}; + +#define b43_a_ratetable (__b43_ratetable + 4) +#define b43_a_ratetable_size 8 +#define b43_b_ratetable (__b43_ratetable + 0) +#define b43_b_ratetable_size 4 +#define b43_g_ratetable (__b43_ratetable + 0) +#define b43_g_ratetable_size 12 + +#define CHAN4G(_channel, _freq, _flags) { \ + .band = IEEE80211_BAND_2GHZ, \ + .center_freq = (_freq), \ + .hw_value = (_channel), \ + .flags = (_flags), \ + .max_antenna_gain = 0, \ + .max_power = 30, \ +} +static struct ieee80211_channel b43_2ghz_chantable[] = { + CHAN4G(1, 2412, 0), + CHAN4G(2, 2417, 0), + CHAN4G(3, 2422, 0), + CHAN4G(4, 2427, 0), + CHAN4G(5, 2432, 0), + CHAN4G(6, 2437, 0), + CHAN4G(7, 2442, 0), + CHAN4G(8, 2447, 0), + CHAN4G(9, 2452, 0), + CHAN4G(10, 2457, 0), + CHAN4G(11, 2462, 0), + CHAN4G(12, 2467, 0), + CHAN4G(13, 2472, 0), + CHAN4G(14, 2484, 0), +}; +#undef CHAN4G + +#define CHAN5G(_channel, _flags) { \ + .band = IEEE80211_BAND_5GHZ, \ + .center_freq = 5000 + (5 * (_channel)), \ + .hw_value = (_channel), \ + .flags = (_flags), \ + .max_antenna_gain = 0, \ + .max_power = 30, \ +} +static struct ieee80211_channel b43_5ghz_nphy_chantable[] = { + CHAN5G(32, 0), CHAN5G(34, 0), + CHAN5G(36, 0), CHAN5G(38, 0), + CHAN5G(40, 0), CHAN5G(42, 0), + CHAN5G(44, 0), CHAN5G(46, 0), + CHAN5G(48, 0), CHAN5G(50, 0), + CHAN5G(52, 0), CHAN5G(54, 0), + CHAN5G(56, 0), CHAN5G(58, 0), + CHAN5G(60, 0), CHAN5G(62, 0), + CHAN5G(64, 0), CHAN5G(66, 0), + CHAN5G(68, 0), CHAN5G(70, 0), + CHAN5G(72, 0), CHAN5G(74, 0), + CHAN5G(76, 0), CHAN5G(78, 0), + CHAN5G(80, 0), CHAN5G(82, 0), + CHAN5G(84, 0), CHAN5G(86, 0), + CHAN5G(88, 0), CHAN5G(90, 0), + CHAN5G(92, 0), CHAN5G(94, 0), + CHAN5G(96, 0), CHAN5G(98, 0), + CHAN5G(100, 0), CHAN5G(102, 0), + CHAN5G(104, 0), CHAN5G(106, 0), + CHAN5G(108, 0), CHAN5G(110, 0), + CHAN5G(112, 0), CHAN5G(114, 0), + CHAN5G(116, 0), CHAN5G(118, 0), + CHAN5G(120, 0), CHAN5G(122, 0), + CHAN5G(124, 0), CHAN5G(126, 0), + CHAN5G(128, 0), CHAN5G(130, 0), + CHAN5G(132, 0), CHAN5G(134, 0), + CHAN5G(136, 0), CHAN5G(138, 0), + CHAN5G(140, 0), CHAN5G(142, 0), + CHAN5G(144, 0), CHAN5G(145, 0), + CHAN5G(146, 0), CHAN5G(147, 0), + CHAN5G(148, 0), CHAN5G(149, 0), + CHAN5G(150, 0), CHAN5G(151, 0), + CHAN5G(152, 0), CHAN5G(153, 0), + CHAN5G(154, 0), CHAN5G(155, 0), + CHAN5G(156, 0), CHAN5G(157, 0), + CHAN5G(158, 0), CHAN5G(159, 0), + CHAN5G(160, 0), CHAN5G(161, 0), + CHAN5G(162, 0), CHAN5G(163, 0), + CHAN5G(164, 0), CHAN5G(165, 0), + CHAN5G(166, 0), CHAN5G(168, 0), + CHAN5G(170, 0), CHAN5G(172, 0), + CHAN5G(174, 0), CHAN5G(176, 0), + CHAN5G(178, 0), CHAN5G(180, 0), + CHAN5G(182, 0), CHAN5G(184, 0), + CHAN5G(186, 0), CHAN5G(188, 0), + CHAN5G(190, 0), CHAN5G(192, 0), + CHAN5G(194, 0), CHAN5G(196, 0), + CHAN5G(198, 0), CHAN5G(200, 0), + CHAN5G(202, 0), CHAN5G(204, 0), + CHAN5G(206, 0), CHAN5G(208, 0), + CHAN5G(210, 0), CHAN5G(212, 0), + CHAN5G(214, 0), CHAN5G(216, 0), + CHAN5G(218, 0), CHAN5G(220, 0), + CHAN5G(222, 0), CHAN5G(224, 0), + CHAN5G(226, 0), CHAN5G(228, 0), +}; + +static struct ieee80211_channel b43_5ghz_aphy_chantable[] = { + CHAN5G(34, 0), CHAN5G(36, 0), + CHAN5G(38, 0), CHAN5G(40, 0), + CHAN5G(42, 0), CHAN5G(44, 0), + CHAN5G(46, 0), CHAN5G(48, 0), + CHAN5G(52, 0), CHAN5G(56, 0), + CHAN5G(60, 0), CHAN5G(64, 0), + CHAN5G(100, 0), CHAN5G(104, 0), + CHAN5G(108, 0), CHAN5G(112, 0), + CHAN5G(116, 0), CHAN5G(120, 0), + CHAN5G(124, 0), CHAN5G(128, 0), + CHAN5G(132, 0), CHAN5G(136, 0), + CHAN5G(140, 0), CHAN5G(149, 0), + CHAN5G(153, 0), CHAN5G(157, 0), + CHAN5G(161, 0), CHAN5G(165, 0), + CHAN5G(184, 0), CHAN5G(188, 0), + CHAN5G(192, 0), CHAN5G(196, 0), + CHAN5G(200, 0), CHAN5G(204, 0), + CHAN5G(208, 0), CHAN5G(212, 0), + CHAN5G(216, 0), +}; +#undef CHAN5G + +static struct ieee80211_supported_band b43_band_5GHz_nphy = { + .band = IEEE80211_BAND_5GHZ, + .channels = b43_5ghz_nphy_chantable, + .n_channels = ARRAY_SIZE(b43_5ghz_nphy_chantable), + .bitrates = b43_a_ratetable, + .n_bitrates = b43_a_ratetable_size, +}; + +static struct ieee80211_supported_band b43_band_5GHz_aphy = { + .band = IEEE80211_BAND_5GHZ, + .channels = b43_5ghz_aphy_chantable, + .n_channels = ARRAY_SIZE(b43_5ghz_aphy_chantable), + .bitrates = b43_a_ratetable, + .n_bitrates = b43_a_ratetable_size, +}; + +static struct ieee80211_supported_band b43_band_2GHz = { + .band = IEEE80211_BAND_2GHZ, + .channels = b43_2ghz_chantable, + .n_channels = ARRAY_SIZE(b43_2ghz_chantable), + .bitrates = b43_g_ratetable, + .n_bitrates = b43_g_ratetable_size, +}; + +static void b43_wireless_core_exit(struct b43_wldev *dev); +static int b43_wireless_core_init(struct b43_wldev *dev); +static struct b43_wldev * b43_wireless_core_stop(struct b43_wldev *dev); +static int b43_wireless_core_start(struct b43_wldev *dev); + +static int b43_ratelimit(struct b43_wl *wl) +{ + if (!wl || !wl->current_dev) + return 1; + if (b43_status(wl->current_dev) < B43_STAT_STARTED) + return 1; + /* We are up and running. + * Ratelimit the messages to avoid DoS over the net. */ + return net_ratelimit(); +} + +void b43info(struct b43_wl *wl, const char *fmt, ...) +{ + va_list args; + + if (b43_modparam_verbose < B43_VERBOSITY_INFO) + return; + if (!b43_ratelimit(wl)) + return; + va_start(args, fmt); + printk(KERN_INFO "b43-%s: ", + (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); + vprintk(fmt, args); + va_end(args); +} + +void b43err(struct b43_wl *wl, const char *fmt, ...) +{ + va_list args; + + if (b43_modparam_verbose < B43_VERBOSITY_ERROR) + return; + if (!b43_ratelimit(wl)) + return; + va_start(args, fmt); + printk(KERN_ERR "b43-%s ERROR: ", + (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); + vprintk(fmt, args); + va_end(args); +} + +void b43warn(struct b43_wl *wl, const char *fmt, ...) +{ + va_list args; + + if (b43_modparam_verbose < B43_VERBOSITY_WARN) + return; + if (!b43_ratelimit(wl)) + return; + va_start(args, fmt); + printk(KERN_WARNING "b43-%s warning: ", + (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); + vprintk(fmt, args); + va_end(args); +} + +void b43dbg(struct b43_wl *wl, const char *fmt, ...) +{ + va_list args; + + if (b43_modparam_verbose < B43_VERBOSITY_DEBUG) + return; + va_start(args, fmt); + printk(KERN_DEBUG "b43-%s debug: ", + (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); + vprintk(fmt, args); + va_end(args); +} + +static void b43_ram_write(struct b43_wldev *dev, u16 offset, u32 val) +{ + u32 macctl; + + B43_WARN_ON(offset % 4 != 0); + + macctl = b43_read32(dev, B43_MMIO_MACCTL); + if (macctl & B43_MACCTL_BE) + val = swab32(val); + + b43_write32(dev, B43_MMIO_RAM_CONTROL, offset); + mmiowb(); + b43_write32(dev, B43_MMIO_RAM_DATA, val); +} + +static inline void b43_shm_control_word(struct b43_wldev *dev, + u16 routing, u16 offset) +{ + u32 control; + + /* "offset" is the WORD offset. */ + control = routing; + control <<= 16; + control |= offset; + b43_write32(dev, B43_MMIO_SHM_CONTROL, control); +} + +u32 b43_shm_read32(struct b43_wldev *dev, u16 routing, u16 offset) +{ + u32 ret; + + if (routing == B43_SHM_SHARED) { + B43_WARN_ON(offset & 0x0001); + if (offset & 0x0003) { + /* Unaligned access */ + b43_shm_control_word(dev, routing, offset >> 2); + ret = b43_read16(dev, B43_MMIO_SHM_DATA_UNALIGNED); + b43_shm_control_word(dev, routing, (offset >> 2) + 1); + ret |= ((u32)b43_read16(dev, B43_MMIO_SHM_DATA)) << 16; + + goto out; + } + offset >>= 2; + } + b43_shm_control_word(dev, routing, offset); + ret = b43_read32(dev, B43_MMIO_SHM_DATA); +out: + return ret; +} + +u16 b43_shm_read16(struct b43_wldev *dev, u16 routing, u16 offset) +{ + u16 ret; + + if (routing == B43_SHM_SHARED) { + B43_WARN_ON(offset & 0x0001); + if (offset & 0x0003) { + /* Unaligned access */ + b43_shm_control_word(dev, routing, offset >> 2); + ret = b43_read16(dev, B43_MMIO_SHM_DATA_UNALIGNED); + + goto out; + } + offset >>= 2; + } + b43_shm_control_word(dev, routing, offset); + ret = b43_read16(dev, B43_MMIO_SHM_DATA); +out: + return ret; +} + +void b43_shm_write32(struct b43_wldev *dev, u16 routing, u16 offset, u32 value) +{ + if (routing == B43_SHM_SHARED) { + B43_WARN_ON(offset & 0x0001); + if (offset & 0x0003) { + /* Unaligned access */ + b43_shm_control_word(dev, routing, offset >> 2); + b43_write16(dev, B43_MMIO_SHM_DATA_UNALIGNED, + value & 0xFFFF); + b43_shm_control_word(dev, routing, (offset >> 2) + 1); + b43_write16(dev, B43_MMIO_SHM_DATA, + (value >> 16) & 0xFFFF); + return; + } + offset >>= 2; + } + b43_shm_control_word(dev, routing, offset); + b43_write32(dev, B43_MMIO_SHM_DATA, value); +} + +void b43_shm_write16(struct b43_wldev *dev, u16 routing, u16 offset, u16 value) +{ + if (routing == B43_SHM_SHARED) { + B43_WARN_ON(offset & 0x0001); + if (offset & 0x0003) { + /* Unaligned access */ + b43_shm_control_word(dev, routing, offset >> 2); + b43_write16(dev, B43_MMIO_SHM_DATA_UNALIGNED, value); + return; + } + offset >>= 2; + } + b43_shm_control_word(dev, routing, offset); + b43_write16(dev, B43_MMIO_SHM_DATA, value); +} + +/* Read HostFlags */ +u64 b43_hf_read(struct b43_wldev *dev) +{ + u64 ret; + + ret = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFHI); + ret <<= 16; + ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFMI); + ret <<= 16; + ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO); + + return ret; +} + +/* Write HostFlags */ +void b43_hf_write(struct b43_wldev *dev, u64 value) +{ + u16 lo, mi, hi; + + lo = (value & 0x00000000FFFFULL); + mi = (value & 0x0000FFFF0000ULL) >> 16; + hi = (value & 0xFFFF00000000ULL) >> 32; + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO, lo); + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFMI, mi); + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFHI, hi); +} + +/* Read the firmware capabilities bitmask (Opensource firmware only) */ +static u16 b43_fwcapa_read(struct b43_wldev *dev) +{ + B43_WARN_ON(!dev->fw.opensource); + return b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_FWCAPA); +} + +void b43_tsf_read(struct b43_wldev *dev, u64 *tsf) +{ + u32 low, high; + + B43_WARN_ON(dev->dev->id.revision < 3); + + /* The hardware guarantees us an atomic read, if we + * read the low register first. */ + low = b43_read32(dev, B43_MMIO_REV3PLUS_TSF_LOW); + high = b43_read32(dev, B43_MMIO_REV3PLUS_TSF_HIGH); + + *tsf = high; + *tsf <<= 32; + *tsf |= low; +} + +static void b43_time_lock(struct b43_wldev *dev) +{ + u32 macctl; + + macctl = b43_read32(dev, B43_MMIO_MACCTL); + macctl |= B43_MACCTL_TBTTHOLD; + b43_write32(dev, B43_MMIO_MACCTL, macctl); + /* Commit the write */ + b43_read32(dev, B43_MMIO_MACCTL); +} + +static void b43_time_unlock(struct b43_wldev *dev) +{ + u32 macctl; + + macctl = b43_read32(dev, B43_MMIO_MACCTL); + macctl &= ~B43_MACCTL_TBTTHOLD; + b43_write32(dev, B43_MMIO_MACCTL, macctl); + /* Commit the write */ + b43_read32(dev, B43_MMIO_MACCTL); +} + +static void b43_tsf_write_locked(struct b43_wldev *dev, u64 tsf) +{ + u32 low, high; + + B43_WARN_ON(dev->dev->id.revision < 3); + + low = tsf; + high = (tsf >> 32); + /* The hardware guarantees us an atomic write, if we + * write the low register first. */ + b43_write32(dev, B43_MMIO_REV3PLUS_TSF_LOW, low); + mmiowb(); + b43_write32(dev, B43_MMIO_REV3PLUS_TSF_HIGH, high); + mmiowb(); +} + +void b43_tsf_write(struct b43_wldev *dev, u64 tsf) +{ + b43_time_lock(dev); + b43_tsf_write_locked(dev, tsf); + b43_time_unlock(dev); +} + +static +void b43_macfilter_set(struct b43_wldev *dev, u16 offset, const u8 *mac) +{ + static const u8 zero_addr[ETH_ALEN] = { 0 }; + u16 data; + + if (!mac) + mac = zero_addr; + + offset |= 0x0020; + b43_write16(dev, B43_MMIO_MACFILTER_CONTROL, offset); + + data = mac[0]; + data |= mac[1] << 8; + b43_write16(dev, B43_MMIO_MACFILTER_DATA, data); + data = mac[2]; + data |= mac[3] << 8; + b43_write16(dev, B43_MMIO_MACFILTER_DATA, data); + data = mac[4]; + data |= mac[5] << 8; + b43_write16(dev, B43_MMIO_MACFILTER_DATA, data); +} + +static void b43_write_mac_bssid_templates(struct b43_wldev *dev) +{ + const u8 *mac; + const u8 *bssid; + u8 mac_bssid[ETH_ALEN * 2]; + int i; + u32 tmp; + + bssid = dev->wl->bssid; + mac = dev->wl->mac_addr; + + b43_macfilter_set(dev, B43_MACFILTER_BSSID, bssid); + + memcpy(mac_bssid, mac, ETH_ALEN); + memcpy(mac_bssid + ETH_ALEN, bssid, ETH_ALEN); + + /* Write our MAC address and BSSID to template ram */ + for (i = 0; i < ARRAY_SIZE(mac_bssid); i += sizeof(u32)) { + tmp = (u32) (mac_bssid[i + 0]); + tmp |= (u32) (mac_bssid[i + 1]) << 8; + tmp |= (u32) (mac_bssid[i + 2]) << 16; + tmp |= (u32) (mac_bssid[i + 3]) << 24; + b43_ram_write(dev, 0x20 + i, tmp); + } +} + +static void b43_upload_card_macaddress(struct b43_wldev *dev) +{ + b43_write_mac_bssid_templates(dev); + b43_macfilter_set(dev, B43_MACFILTER_SELF, dev->wl->mac_addr); +} + +static void b43_set_slot_time(struct b43_wldev *dev, u16 slot_time) +{ + /* slot_time is in usec. */ + /* This test used to exit for all but a G PHY. */ + if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) + return; + b43_write16(dev, B43_MMIO_IFSSLOT, 510 + slot_time); + /* Shared memory location 0x0010 is the slot time and should be + * set to slot_time; however, this register is initially 0 and changing + * the value adversely affects the transmit rate for BCM4311 + * devices. Until this behavior is unterstood, delete this step + * + * b43_shm_write16(dev, B43_SHM_SHARED, 0x0010, slot_time); + */ +} + +static void b43_short_slot_timing_enable(struct b43_wldev *dev) +{ + b43_set_slot_time(dev, 9); +} + +static void b43_short_slot_timing_disable(struct b43_wldev *dev) +{ + b43_set_slot_time(dev, 20); +} + +/* DummyTransmission function, as documented on + * http://bcm-v4.sipsolutions.net/802.11/DummyTransmission + */ +void b43_dummy_transmission(struct b43_wldev *dev, bool ofdm, bool pa_on) +{ + struct b43_phy *phy = &dev->phy; + unsigned int i, max_loop; + u16 value; + u32 buffer[5] = { + 0x00000000, + 0x00D40000, + 0x00000000, + 0x01000000, + 0x00000000, + }; + + if (ofdm) { + max_loop = 0x1E; + buffer[0] = 0x000201CC; + } else { + max_loop = 0xFA; + buffer[0] = 0x000B846E; + } + + for (i = 0; i < 5; i++) + b43_ram_write(dev, i * 4, buffer[i]); + + b43_write16(dev, 0x0568, 0x0000); + if (dev->dev->id.revision < 11) + b43_write16(dev, 0x07C0, 0x0000); + else + b43_write16(dev, 0x07C0, 0x0100); + value = (ofdm ? 0x41 : 0x40); + b43_write16(dev, 0x050C, value); + if ((phy->type == B43_PHYTYPE_N) || (phy->type == B43_PHYTYPE_LP)) + b43_write16(dev, 0x0514, 0x1A02); + b43_write16(dev, 0x0508, 0x0000); + b43_write16(dev, 0x050A, 0x0000); + b43_write16(dev, 0x054C, 0x0000); + b43_write16(dev, 0x056A, 0x0014); + b43_write16(dev, 0x0568, 0x0826); + b43_write16(dev, 0x0500, 0x0000); + if (!pa_on && (phy->type == B43_PHYTYPE_N)) { + //SPEC TODO + } + + switch (phy->type) { + case B43_PHYTYPE_N: + b43_write16(dev, 0x0502, 0x00D0); + break; + case B43_PHYTYPE_LP: + b43_write16(dev, 0x0502, 0x0050); + break; + default: + b43_write16(dev, 0x0502, 0x0030); + } + + if (phy->radio_ver == 0x2050 && phy->radio_rev <= 0x5) + b43_radio_write16(dev, 0x0051, 0x0017); + for (i = 0x00; i < max_loop; i++) { + value = b43_read16(dev, 0x050E); + if (value & 0x0080) + break; + udelay(10); + } + for (i = 0x00; i < 0x0A; i++) { + value = b43_read16(dev, 0x050E); + if (value & 0x0400) + break; + udelay(10); + } + for (i = 0x00; i < 0x19; i++) { + value = b43_read16(dev, 0x0690); + if (!(value & 0x0100)) + break; + udelay(10); + } + if (phy->radio_ver == 0x2050 && phy->radio_rev <= 0x5) + b43_radio_write16(dev, 0x0051, 0x0037); +} + +static void key_write(struct b43_wldev *dev, + u8 index, u8 algorithm, const u8 *key) +{ + unsigned int i; + u32 offset; + u16 value; + u16 kidx; + + /* Key index/algo block */ + kidx = b43_kidx_to_fw(dev, index); + value = ((kidx << 4) | algorithm); + b43_shm_write16(dev, B43_SHM_SHARED, + B43_SHM_SH_KEYIDXBLOCK + (kidx * 2), value); + + /* Write the key to the Key Table Pointer offset */ + offset = dev->ktp + (index * B43_SEC_KEYSIZE); + for (i = 0; i < B43_SEC_KEYSIZE; i += 2) { + value = key[i]; + value |= (u16) (key[i + 1]) << 8; + b43_shm_write16(dev, B43_SHM_SHARED, offset + i, value); + } +} + +static void keymac_write(struct b43_wldev *dev, u8 index, const u8 *addr) +{ + u32 addrtmp[2] = { 0, 0, }; + u8 pairwise_keys_start = B43_NR_GROUP_KEYS * 2; + + if (b43_new_kidx_api(dev)) + pairwise_keys_start = B43_NR_GROUP_KEYS; + + B43_WARN_ON(index < pairwise_keys_start); + /* We have four default TX keys and possibly four default RX keys. + * Physical mac 0 is mapped to physical key 4 or 8, depending + * on the firmware version. + * So we must adjust the index here. + */ + index -= pairwise_keys_start; + B43_WARN_ON(index >= B43_NR_PAIRWISE_KEYS); + + if (addr) { + addrtmp[0] = addr[0]; + addrtmp[0] |= ((u32) (addr[1]) << 8); + addrtmp[0] |= ((u32) (addr[2]) << 16); + addrtmp[0] |= ((u32) (addr[3]) << 24); + addrtmp[1] = addr[4]; + addrtmp[1] |= ((u32) (addr[5]) << 8); + } + + /* Receive match transmitter address (RCMTA) mechanism */ + b43_shm_write32(dev, B43_SHM_RCMTA, + (index * 2) + 0, addrtmp[0]); + b43_shm_write16(dev, B43_SHM_RCMTA, + (index * 2) + 1, addrtmp[1]); +} + +/* The ucode will use phase1 key with TEK key to decrypt rx packets. + * When a packet is received, the iv32 is checked. + * - if it doesn't the packet is returned without modification (and software + * decryption can be done). That's what happen when iv16 wrap. + * - if it does, the rc4 key is computed, and decryption is tried. + * Either it will success and B43_RX_MAC_DEC is returned, + * either it fails and B43_RX_MAC_DEC|B43_RX_MAC_DECERR is returned + * and the packet is not usable (it got modified by the ucode). + * So in order to never have B43_RX_MAC_DECERR, we should provide + * a iv32 and phase1key that match. Because we drop packets in case of + * B43_RX_MAC_DECERR, if we have a correct iv32 but a wrong phase1key, all + * packets will be lost without higher layer knowing (ie no resync possible + * until next wrap). + * + * NOTE : this should support 50 key like RCMTA because + * (B43_SHM_SH_KEYIDXBLOCK - B43_SHM_SH_TKIPTSCTTAK)/14 = 50 + */ +static void rx_tkip_phase1_write(struct b43_wldev *dev, u8 index, u32 iv32, + u16 *phase1key) +{ + unsigned int i; + u32 offset; + u8 pairwise_keys_start = B43_NR_GROUP_KEYS * 2; + + if (!modparam_hwtkip) + return; + + if (b43_new_kidx_api(dev)) + pairwise_keys_start = B43_NR_GROUP_KEYS; + + B43_WARN_ON(index < pairwise_keys_start); + /* We have four default TX keys and possibly four default RX keys. + * Physical mac 0 is mapped to physical key 4 or 8, depending + * on the firmware version. + * So we must adjust the index here. + */ + index -= pairwise_keys_start; + B43_WARN_ON(index >= B43_NR_PAIRWISE_KEYS); + + if (b43_debug(dev, B43_DBG_KEYS)) { + b43dbg(dev->wl, "rx_tkip_phase1_write : idx 0x%x, iv32 0x%x\n", + index, iv32); + } + /* Write the key to the RX tkip shared mem */ + offset = B43_SHM_SH_TKIPTSCTTAK + index * (10 + 4); + for (i = 0; i < 10; i += 2) { + b43_shm_write16(dev, B43_SHM_SHARED, offset + i, + phase1key ? phase1key[i / 2] : 0); + } + b43_shm_write16(dev, B43_SHM_SHARED, offset + i, iv32); + b43_shm_write16(dev, B43_SHM_SHARED, offset + i + 2, iv32 >> 16); +} + +static void b43_op_update_tkip_key(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta, + u32 iv32, u16 *phase1key) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev; + int index = keyconf->hw_key_idx; + + if (B43_WARN_ON(!modparam_hwtkip)) + return; + + /* This is only called from the RX path through mac80211, where + * our mutex is already locked. */ + B43_WARN_ON(!mutex_is_locked(&wl->mutex)); + dev = wl->current_dev; + B43_WARN_ON(!dev || b43_status(dev) < B43_STAT_INITIALIZED); + + keymac_write(dev, index, NULL); /* First zero out mac to avoid race */ + + rx_tkip_phase1_write(dev, index, iv32, phase1key); + /* only pairwise TKIP keys are supported right now */ + if (WARN_ON(!sta)) + return; + keymac_write(dev, index, sta->addr); +} + +static void do_key_write(struct b43_wldev *dev, + u8 index, u8 algorithm, + const u8 *key, size_t key_len, const u8 *mac_addr) +{ + u8 buf[B43_SEC_KEYSIZE] = { 0, }; + u8 pairwise_keys_start = B43_NR_GROUP_KEYS * 2; + + if (b43_new_kidx_api(dev)) + pairwise_keys_start = B43_NR_GROUP_KEYS; + + B43_WARN_ON(index >= ARRAY_SIZE(dev->key)); + B43_WARN_ON(key_len > B43_SEC_KEYSIZE); + + if (index >= pairwise_keys_start) + keymac_write(dev, index, NULL); /* First zero out mac. */ + if (algorithm == B43_SEC_ALGO_TKIP) { + /* + * We should provide an initial iv32, phase1key pair. + * We could start with iv32=0 and compute the corresponding + * phase1key, but this means calling ieee80211_get_tkip_key + * with a fake skb (or export other tkip function). + * Because we are lazy we hope iv32 won't start with + * 0xffffffff and let's b43_op_update_tkip_key provide a + * correct pair. + */ + rx_tkip_phase1_write(dev, index, 0xffffffff, (u16*)buf); + } else if (index >= pairwise_keys_start) /* clear it */ + rx_tkip_phase1_write(dev, index, 0, NULL); + if (key) + memcpy(buf, key, key_len); + key_write(dev, index, algorithm, buf); + if (index >= pairwise_keys_start) + keymac_write(dev, index, mac_addr); + + dev->key[index].algorithm = algorithm; +} + +static int b43_key_write(struct b43_wldev *dev, + int index, u8 algorithm, + const u8 *key, size_t key_len, + const u8 *mac_addr, + struct ieee80211_key_conf *keyconf) +{ + int i; + int pairwise_keys_start; + + /* For ALG_TKIP the key is encoded as a 256-bit (32 byte) data block: + * - Temporal Encryption Key (128 bits) + * - Temporal Authenticator Tx MIC Key (64 bits) + * - Temporal Authenticator Rx MIC Key (64 bits) + * + * Hardware only store TEK + */ + if (algorithm == B43_SEC_ALGO_TKIP && key_len == 32) + key_len = 16; + if (key_len > B43_SEC_KEYSIZE) + return -EINVAL; + for (i = 0; i < ARRAY_SIZE(dev->key); i++) { + /* Check that we don't already have this key. */ + B43_WARN_ON(dev->key[i].keyconf == keyconf); + } + if (index < 0) { + /* Pairwise key. Get an empty slot for the key. */ + if (b43_new_kidx_api(dev)) + pairwise_keys_start = B43_NR_GROUP_KEYS; + else + pairwise_keys_start = B43_NR_GROUP_KEYS * 2; + for (i = pairwise_keys_start; + i < pairwise_keys_start + B43_NR_PAIRWISE_KEYS; + i++) { + B43_WARN_ON(i >= ARRAY_SIZE(dev->key)); + if (!dev->key[i].keyconf) { + /* found empty */ + index = i; + break; + } + } + if (index < 0) { + b43warn(dev->wl, "Out of hardware key memory\n"); + return -ENOSPC; + } + } else + B43_WARN_ON(index > 3); + + do_key_write(dev, index, algorithm, key, key_len, mac_addr); + if ((index <= 3) && !b43_new_kidx_api(dev)) { + /* Default RX key */ + B43_WARN_ON(mac_addr); + do_key_write(dev, index + 4, algorithm, key, key_len, NULL); + } + keyconf->hw_key_idx = index; + dev->key[index].keyconf = keyconf; + + return 0; +} + +static int b43_key_clear(struct b43_wldev *dev, int index) +{ + if (B43_WARN_ON((index < 0) || (index >= ARRAY_SIZE(dev->key)))) + return -EINVAL; + do_key_write(dev, index, B43_SEC_ALGO_NONE, + NULL, B43_SEC_KEYSIZE, NULL); + if ((index <= 3) && !b43_new_kidx_api(dev)) { + do_key_write(dev, index + 4, B43_SEC_ALGO_NONE, + NULL, B43_SEC_KEYSIZE, NULL); + } + dev->key[index].keyconf = NULL; + + return 0; +} + +static void b43_clear_keys(struct b43_wldev *dev) +{ + int i, count; + + if (b43_new_kidx_api(dev)) + count = B43_NR_GROUP_KEYS + B43_NR_PAIRWISE_KEYS; + else + count = B43_NR_GROUP_KEYS * 2 + B43_NR_PAIRWISE_KEYS; + for (i = 0; i < count; i++) + b43_key_clear(dev, i); +} + +static void b43_dump_keymemory(struct b43_wldev *dev) +{ + unsigned int i, index, count, offset, pairwise_keys_start; + u8 mac[ETH_ALEN]; + u16 algo; + u32 rcmta0; + u16 rcmta1; + u64 hf; + struct b43_key *key; + + if (!b43_debug(dev, B43_DBG_KEYS)) + return; + + hf = b43_hf_read(dev); + b43dbg(dev->wl, "Hardware key memory dump: USEDEFKEYS=%u\n", + !!(hf & B43_HF_USEDEFKEYS)); + if (b43_new_kidx_api(dev)) { + pairwise_keys_start = B43_NR_GROUP_KEYS; + count = B43_NR_GROUP_KEYS + B43_NR_PAIRWISE_KEYS; + } else { + pairwise_keys_start = B43_NR_GROUP_KEYS * 2; + count = B43_NR_GROUP_KEYS * 2 + B43_NR_PAIRWISE_KEYS; + } + for (index = 0; index < count; index++) { + key = &(dev->key[index]); + printk(KERN_DEBUG "Key slot %02u: %s", + index, (key->keyconf == NULL) ? " " : "*"); + offset = dev->ktp + (index * B43_SEC_KEYSIZE); + for (i = 0; i < B43_SEC_KEYSIZE; i += 2) { + u16 tmp = b43_shm_read16(dev, B43_SHM_SHARED, offset + i); + printk("%02X%02X", (tmp & 0xFF), ((tmp >> 8) & 0xFF)); + } + + algo = b43_shm_read16(dev, B43_SHM_SHARED, + B43_SHM_SH_KEYIDXBLOCK + (index * 2)); + printk(" Algo: %04X/%02X", algo, key->algorithm); + + if (index >= pairwise_keys_start) { + if (key->algorithm == B43_SEC_ALGO_TKIP) { + printk(" TKIP: "); + offset = B43_SHM_SH_TKIPTSCTTAK + (index - 4) * (10 + 4); + for (i = 0; i < 14; i += 2) { + u16 tmp = b43_shm_read16(dev, B43_SHM_SHARED, offset + i); + printk("%02X%02X", (tmp & 0xFF), ((tmp >> 8) & 0xFF)); + } + } + rcmta0 = b43_shm_read32(dev, B43_SHM_RCMTA, + ((index - pairwise_keys_start) * 2) + 0); + rcmta1 = b43_shm_read16(dev, B43_SHM_RCMTA, + ((index - pairwise_keys_start) * 2) + 1); + *((__le32 *)(&mac[0])) = cpu_to_le32(rcmta0); + *((__le16 *)(&mac[4])) = cpu_to_le16(rcmta1); + printk(" MAC: %pM", mac); + } else + printk(" DEFAULT KEY"); + printk("\n"); + } +} + +void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags) +{ + u32 macctl; + u16 ucstat; + bool hwps; + bool awake; + int i; + + B43_WARN_ON((ps_flags & B43_PS_ENABLED) && + (ps_flags & B43_PS_DISABLED)); + B43_WARN_ON((ps_flags & B43_PS_AWAKE) && (ps_flags & B43_PS_ASLEEP)); + + if (ps_flags & B43_PS_ENABLED) { + hwps = 1; + } else if (ps_flags & B43_PS_DISABLED) { + hwps = 0; + } else { + //TODO: If powersave is not off and FIXME is not set and we are not in adhoc + // and thus is not an AP and we are associated, set bit 25 + } + if (ps_flags & B43_PS_AWAKE) { + awake = 1; + } else if (ps_flags & B43_PS_ASLEEP) { + awake = 0; + } else { + //TODO: If the device is awake or this is an AP, or we are scanning, or FIXME, + // or we are associated, or FIXME, or the latest PS-Poll packet sent was + // successful, set bit26 + } + +/* FIXME: For now we force awake-on and hwps-off */ + hwps = 0; + awake = 1; + + macctl = b43_read32(dev, B43_MMIO_MACCTL); + if (hwps) + macctl |= B43_MACCTL_HWPS; + else + macctl &= ~B43_MACCTL_HWPS; + if (awake) + macctl |= B43_MACCTL_AWAKE; + else + macctl &= ~B43_MACCTL_AWAKE; + b43_write32(dev, B43_MMIO_MACCTL, macctl); + /* Commit write */ + b43_read32(dev, B43_MMIO_MACCTL); + if (awake && dev->dev->id.revision >= 5) { + /* Wait for the microcode to wake up. */ + for (i = 0; i < 100; i++) { + ucstat = b43_shm_read16(dev, B43_SHM_SHARED, + B43_SHM_SH_UCODESTAT); + if (ucstat != B43_SHM_SH_UCODESTAT_SLEEP) + break; + udelay(10); + } + } +} + +void b43_wireless_core_reset(struct b43_wldev *dev, u32 flags) +{ + u32 tmslow; + u32 macctl; + + flags |= B43_TMSLOW_PHYCLKEN; + flags |= B43_TMSLOW_PHYRESET; + ssb_device_enable(dev->dev, flags); + msleep(2); /* Wait for the PLL to turn on. */ + + /* Now take the PHY out of Reset again */ + tmslow = ssb_read32(dev->dev, SSB_TMSLOW); + tmslow |= SSB_TMSLOW_FGC; + tmslow &= ~B43_TMSLOW_PHYRESET; + ssb_write32(dev->dev, SSB_TMSLOW, tmslow); + ssb_read32(dev->dev, SSB_TMSLOW); /* flush */ + msleep(1); + tmslow &= ~SSB_TMSLOW_FGC; + ssb_write32(dev->dev, SSB_TMSLOW, tmslow); + ssb_read32(dev->dev, SSB_TMSLOW); /* flush */ + msleep(1); + + /* Turn Analog ON, but only if we already know the PHY-type. + * This protects against very early setup where we don't know the + * PHY-type, yet. wireless_core_reset will be called once again later, + * when we know the PHY-type. */ + if (dev->phy.ops) + dev->phy.ops->switch_analog(dev, 1); + + macctl = b43_read32(dev, B43_MMIO_MACCTL); + macctl &= ~B43_MACCTL_GMODE; + if (flags & B43_TMSLOW_GMODE) + macctl |= B43_MACCTL_GMODE; + macctl |= B43_MACCTL_IHR_ENABLED; + b43_write32(dev, B43_MMIO_MACCTL, macctl); +} + +static void handle_irq_transmit_status(struct b43_wldev *dev) +{ + u32 v0, v1; + u16 tmp; + struct b43_txstatus stat; + + while (1) { + v0 = b43_read32(dev, B43_MMIO_XMITSTAT_0); + if (!(v0 & 0x00000001)) + break; + v1 = b43_read32(dev, B43_MMIO_XMITSTAT_1); + + stat.cookie = (v0 >> 16); + stat.seq = (v1 & 0x0000FFFF); + stat.phy_stat = ((v1 & 0x00FF0000) >> 16); + tmp = (v0 & 0x0000FFFF); + stat.frame_count = ((tmp & 0xF000) >> 12); + stat.rts_count = ((tmp & 0x0F00) >> 8); + stat.supp_reason = ((tmp & 0x001C) >> 2); + stat.pm_indicated = !!(tmp & 0x0080); + stat.intermediate = !!(tmp & 0x0040); + stat.for_ampdu = !!(tmp & 0x0020); + stat.acked = !!(tmp & 0x0002); + + b43_handle_txstatus(dev, &stat); + } +} + +static void drain_txstatus_queue(struct b43_wldev *dev) +{ + u32 dummy; + + if (dev->dev->id.revision < 5) + return; + /* Read all entries from the microcode TXstatus FIFO + * and throw them away. + */ + while (1) { + dummy = b43_read32(dev, B43_MMIO_XMITSTAT_0); + if (!(dummy & 0x00000001)) + break; + dummy = b43_read32(dev, B43_MMIO_XMITSTAT_1); + } +} + +static u32 b43_jssi_read(struct b43_wldev *dev) +{ + u32 val = 0; + + val = b43_shm_read16(dev, B43_SHM_SHARED, 0x08A); + val <<= 16; + val |= b43_shm_read16(dev, B43_SHM_SHARED, 0x088); + + return val; +} + +static void b43_jssi_write(struct b43_wldev *dev, u32 jssi) +{ + b43_shm_write16(dev, B43_SHM_SHARED, 0x088, (jssi & 0x0000FFFF)); + b43_shm_write16(dev, B43_SHM_SHARED, 0x08A, (jssi & 0xFFFF0000) >> 16); +} + +static void b43_generate_noise_sample(struct b43_wldev *dev) +{ + b43_jssi_write(dev, 0x7F7F7F7F); + b43_write32(dev, B43_MMIO_MACCMD, + b43_read32(dev, B43_MMIO_MACCMD) | B43_MACCMD_BGNOISE); +} + +static void b43_calculate_link_quality(struct b43_wldev *dev) +{ + /* Top half of Link Quality calculation. */ + + if (dev->phy.type != B43_PHYTYPE_G) + return; + if (dev->noisecalc.calculation_running) + return; + dev->noisecalc.calculation_running = 1; + dev->noisecalc.nr_samples = 0; + + b43_generate_noise_sample(dev); +} + +static void handle_irq_noise(struct b43_wldev *dev) +{ + struct b43_phy_g *phy = dev->phy.g; + u16 tmp; + u8 noise[4]; + u8 i, j; + s32 average; + + /* Bottom half of Link Quality calculation. */ + + if (dev->phy.type != B43_PHYTYPE_G) + return; + + /* Possible race condition: It might be possible that the user + * changed to a different channel in the meantime since we + * started the calculation. We ignore that fact, since it's + * not really that much of a problem. The background noise is + * an estimation only anyway. Slightly wrong results will get damped + * by the averaging of the 8 sample rounds. Additionally the + * value is shortlived. So it will be replaced by the next noise + * calculation round soon. */ + + B43_WARN_ON(!dev->noisecalc.calculation_running); + *((__le32 *)noise) = cpu_to_le32(b43_jssi_read(dev)); + if (noise[0] == 0x7F || noise[1] == 0x7F || + noise[2] == 0x7F || noise[3] == 0x7F) + goto generate_new; + + /* Get the noise samples. */ + B43_WARN_ON(dev->noisecalc.nr_samples >= 8); + i = dev->noisecalc.nr_samples; + noise[0] = clamp_val(noise[0], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); + noise[1] = clamp_val(noise[1], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); + noise[2] = clamp_val(noise[2], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); + noise[3] = clamp_val(noise[3], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); + dev->noisecalc.samples[i][0] = phy->nrssi_lt[noise[0]]; + dev->noisecalc.samples[i][1] = phy->nrssi_lt[noise[1]]; + dev->noisecalc.samples[i][2] = phy->nrssi_lt[noise[2]]; + dev->noisecalc.samples[i][3] = phy->nrssi_lt[noise[3]]; + dev->noisecalc.nr_samples++; + if (dev->noisecalc.nr_samples == 8) { + /* Calculate the Link Quality by the noise samples. */ + average = 0; + for (i = 0; i < 8; i++) { + for (j = 0; j < 4; j++) + average += dev->noisecalc.samples[i][j]; + } + average /= (8 * 4); + average *= 125; + average += 64; + average /= 128; + tmp = b43_shm_read16(dev, B43_SHM_SHARED, 0x40C); + tmp = (tmp / 128) & 0x1F; + if (tmp >= 8) + average += 2; + else + average -= 25; + if (tmp == 8) + average -= 72; + else + average -= 48; + + dev->stats.link_noise = average; + dev->noisecalc.calculation_running = 0; + return; + } +generate_new: + b43_generate_noise_sample(dev); +} + +static void handle_irq_tbtt_indication(struct b43_wldev *dev) +{ + if (b43_is_mode(dev->wl, NL80211_IFTYPE_AP)) { + ///TODO: PS TBTT + } else { + if (1 /*FIXME: the last PSpoll frame was sent successfully */ ) + b43_power_saving_ctl_bits(dev, 0); + } + if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC)) + dev->dfq_valid = 1; +} + +static void handle_irq_atim_end(struct b43_wldev *dev) +{ + if (dev->dfq_valid) { + b43_write32(dev, B43_MMIO_MACCMD, + b43_read32(dev, B43_MMIO_MACCMD) + | B43_MACCMD_DFQ_VALID); + dev->dfq_valid = 0; + } +} + +static void handle_irq_pmq(struct b43_wldev *dev) +{ + u32 tmp; + + //TODO: AP mode. + + while (1) { + tmp = b43_read32(dev, B43_MMIO_PS_STATUS); + if (!(tmp & 0x00000008)) + break; + } + /* 16bit write is odd, but correct. */ + b43_write16(dev, B43_MMIO_PS_STATUS, 0x0002); +} + +static void b43_write_template_common(struct b43_wldev *dev, + const u8 *data, u16 size, + u16 ram_offset, + u16 shm_size_offset, u8 rate) +{ + u32 i, tmp; + struct b43_plcp_hdr4 plcp; + + plcp.data = 0; + b43_generate_plcp_hdr(&plcp, size + FCS_LEN, rate); + b43_ram_write(dev, ram_offset, le32_to_cpu(plcp.data)); + ram_offset += sizeof(u32); + /* The PLCP is 6 bytes long, but we only wrote 4 bytes, yet. + * So leave the first two bytes of the next write blank. + */ + tmp = (u32) (data[0]) << 16; + tmp |= (u32) (data[1]) << 24; + b43_ram_write(dev, ram_offset, tmp); + ram_offset += sizeof(u32); + for (i = 2; i < size; i += sizeof(u32)) { + tmp = (u32) (data[i + 0]); + if (i + 1 < size) + tmp |= (u32) (data[i + 1]) << 8; + if (i + 2 < size) + tmp |= (u32) (data[i + 2]) << 16; + if (i + 3 < size) + tmp |= (u32) (data[i + 3]) << 24; + b43_ram_write(dev, ram_offset + i - 2, tmp); + } + b43_shm_write16(dev, B43_SHM_SHARED, shm_size_offset, + size + sizeof(struct b43_plcp_hdr6)); +} + +/* Check if the use of the antenna that ieee80211 told us to + * use is possible. This will fall back to DEFAULT. + * "antenna_nr" is the antenna identifier we got from ieee80211. */ +u8 b43_ieee80211_antenna_sanitize(struct b43_wldev *dev, + u8 antenna_nr) +{ + u8 antenna_mask; + + if (antenna_nr == 0) { + /* Zero means "use default antenna". That's always OK. */ + return 0; + } + + /* Get the mask of available antennas. */ + if (dev->phy.gmode) + antenna_mask = dev->dev->bus->sprom.ant_available_bg; + else + antenna_mask = dev->dev->bus->sprom.ant_available_a; + + if (!(antenna_mask & (1 << (antenna_nr - 1)))) { + /* This antenna is not available. Fall back to default. */ + return 0; + } + + return antenna_nr; +} + +/* Convert a b43 antenna number value to the PHY TX control value. */ +static u16 b43_antenna_to_phyctl(int antenna) +{ + switch (antenna) { + case B43_ANTENNA0: + return B43_TXH_PHY_ANT0; + case B43_ANTENNA1: + return B43_TXH_PHY_ANT1; + case B43_ANTENNA2: + return B43_TXH_PHY_ANT2; + case B43_ANTENNA3: + return B43_TXH_PHY_ANT3; + case B43_ANTENNA_AUTO0: + case B43_ANTENNA_AUTO1: + return B43_TXH_PHY_ANT01AUTO; + } + B43_WARN_ON(1); + return 0; +} + +static void b43_write_beacon_template(struct b43_wldev *dev, + u16 ram_offset, + u16 shm_size_offset) +{ + unsigned int i, len, variable_len; + const struct ieee80211_mgmt *bcn; + const u8 *ie; + bool tim_found = 0; + unsigned int rate; + u16 ctl; + int antenna; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(dev->wl->current_beacon); + + bcn = (const struct ieee80211_mgmt *)(dev->wl->current_beacon->data); + len = min((size_t) dev->wl->current_beacon->len, + 0x200 - sizeof(struct b43_plcp_hdr6)); + rate = ieee80211_get_tx_rate(dev->wl->hw, info)->hw_value; + + b43_write_template_common(dev, (const u8 *)bcn, + len, ram_offset, shm_size_offset, rate); + + /* Write the PHY TX control parameters. */ + antenna = B43_ANTENNA_DEFAULT; + antenna = b43_antenna_to_phyctl(antenna); + ctl = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_BEACPHYCTL); + /* We can't send beacons with short preamble. Would get PHY errors. */ + ctl &= ~B43_TXH_PHY_SHORTPRMBL; + ctl &= ~B43_TXH_PHY_ANT; + ctl &= ~B43_TXH_PHY_ENC; + ctl |= antenna; + if (b43_is_cck_rate(rate)) + ctl |= B43_TXH_PHY_ENC_CCK; + else + ctl |= B43_TXH_PHY_ENC_OFDM; + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_BEACPHYCTL, ctl); + + /* Find the position of the TIM and the DTIM_period value + * and write them to SHM. */ + ie = bcn->u.beacon.variable; + variable_len = len - offsetof(struct ieee80211_mgmt, u.beacon.variable); + for (i = 0; i < variable_len - 2; ) { + uint8_t ie_id, ie_len; + + ie_id = ie[i]; + ie_len = ie[i + 1]; + if (ie_id == 5) { + u16 tim_position; + u16 dtim_period; + /* This is the TIM Information Element */ + + /* Check whether the ie_len is in the beacon data range. */ + if (variable_len < ie_len + 2 + i) + break; + /* A valid TIM is at least 4 bytes long. */ + if (ie_len < 4) + break; + tim_found = 1; + + tim_position = sizeof(struct b43_plcp_hdr6); + tim_position += offsetof(struct ieee80211_mgmt, u.beacon.variable); + tim_position += i; + + dtim_period = ie[i + 3]; + + b43_shm_write16(dev, B43_SHM_SHARED, + B43_SHM_SH_TIMBPOS, tim_position); + b43_shm_write16(dev, B43_SHM_SHARED, + B43_SHM_SH_DTIMPER, dtim_period); + break; + } + i += ie_len + 2; + } + if (!tim_found) { + /* + * If ucode wants to modify TIM do it behind the beacon, this + * will happen, for example, when doing mesh networking. + */ + b43_shm_write16(dev, B43_SHM_SHARED, + B43_SHM_SH_TIMBPOS, + len + sizeof(struct b43_plcp_hdr6)); + b43_shm_write16(dev, B43_SHM_SHARED, + B43_SHM_SH_DTIMPER, 0); + } + b43dbg(dev->wl, "Updated beacon template at 0x%x\n", ram_offset); +} + +static void b43_upload_beacon0(struct b43_wldev *dev) +{ + struct b43_wl *wl = dev->wl; + + if (wl->beacon0_uploaded) + return; + b43_write_beacon_template(dev, 0x68, 0x18); + wl->beacon0_uploaded = 1; +} + +static void b43_upload_beacon1(struct b43_wldev *dev) +{ + struct b43_wl *wl = dev->wl; + + if (wl->beacon1_uploaded) + return; + b43_write_beacon_template(dev, 0x468, 0x1A); + wl->beacon1_uploaded = 1; +} + +static void handle_irq_beacon(struct b43_wldev *dev) +{ + struct b43_wl *wl = dev->wl; + u32 cmd, beacon0_valid, beacon1_valid; + + if (!b43_is_mode(wl, NL80211_IFTYPE_AP) && + !b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT)) + return; + + /* This is the bottom half of the asynchronous beacon update. */ + + /* Ignore interrupt in the future. */ + dev->irq_mask &= ~B43_IRQ_BEACON; + + cmd = b43_read32(dev, B43_MMIO_MACCMD); + beacon0_valid = (cmd & B43_MACCMD_BEACON0_VALID); + beacon1_valid = (cmd & B43_MACCMD_BEACON1_VALID); + + /* Schedule interrupt manually, if busy. */ + if (beacon0_valid && beacon1_valid) { + b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, B43_IRQ_BEACON); + dev->irq_mask |= B43_IRQ_BEACON; + return; + } + + if (unlikely(wl->beacon_templates_virgin)) { + /* We never uploaded a beacon before. + * Upload both templates now, but only mark one valid. */ + wl->beacon_templates_virgin = 0; + b43_upload_beacon0(dev); + b43_upload_beacon1(dev); + cmd = b43_read32(dev, B43_MMIO_MACCMD); + cmd |= B43_MACCMD_BEACON0_VALID; + b43_write32(dev, B43_MMIO_MACCMD, cmd); + } else { + if (!beacon0_valid) { + b43_upload_beacon0(dev); + cmd = b43_read32(dev, B43_MMIO_MACCMD); + cmd |= B43_MACCMD_BEACON0_VALID; + b43_write32(dev, B43_MMIO_MACCMD, cmd); + } else if (!beacon1_valid) { + b43_upload_beacon1(dev); + cmd = b43_read32(dev, B43_MMIO_MACCMD); + cmd |= B43_MACCMD_BEACON1_VALID; + b43_write32(dev, B43_MMIO_MACCMD, cmd); + } + } +} + +static void b43_do_beacon_update_trigger_work(struct b43_wldev *dev) +{ + u32 old_irq_mask = dev->irq_mask; + + /* update beacon right away or defer to irq */ + handle_irq_beacon(dev); + if (old_irq_mask != dev->irq_mask) { + /* The handler updated the IRQ mask. */ + B43_WARN_ON(!dev->irq_mask); + if (b43_read32(dev, B43_MMIO_GEN_IRQ_MASK)) { + b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, dev->irq_mask); + } else { + /* Device interrupts are currently disabled. That means + * we just ran the hardirq handler and scheduled the + * IRQ thread. The thread will write the IRQ mask when + * it finished, so there's nothing to do here. Writing + * the mask _here_ would incorrectly re-enable IRQs. */ + } + } +} + +static void b43_beacon_update_trigger_work(struct work_struct *work) +{ + struct b43_wl *wl = container_of(work, struct b43_wl, + beacon_update_trigger); + struct b43_wldev *dev; + + mutex_lock(&wl->mutex); + dev = wl->current_dev; + if (likely(dev && (b43_status(dev) >= B43_STAT_INITIALIZED))) { + if (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) { + /* wl->mutex is enough. */ + b43_do_beacon_update_trigger_work(dev); + mmiowb(); + } else { + spin_lock_irq(&wl->hardirq_lock); + b43_do_beacon_update_trigger_work(dev); + mmiowb(); + spin_unlock_irq(&wl->hardirq_lock); + } + } + mutex_unlock(&wl->mutex); +} + +/* Asynchronously update the packet templates in template RAM. + * Locking: Requires wl->mutex to be locked. */ +static void b43_update_templates(struct b43_wl *wl) +{ + struct sk_buff *beacon; + + /* This is the top half of the ansynchronous beacon update. + * The bottom half is the beacon IRQ. + * Beacon update must be asynchronous to avoid sending an + * invalid beacon. This can happen for example, if the firmware + * transmits a beacon while we are updating it. */ + + /* We could modify the existing beacon and set the aid bit in + * the TIM field, but that would probably require resizing and + * moving of data within the beacon template. + * Simply request a new beacon and let mac80211 do the hard work. */ + beacon = ieee80211_beacon_get(wl->hw, wl->vif); + if (unlikely(!beacon)) + return; + + if (wl->current_beacon) + dev_kfree_skb_any(wl->current_beacon); + wl->current_beacon = beacon; + wl->beacon0_uploaded = 0; + wl->beacon1_uploaded = 0; + ieee80211_queue_work(wl->hw, &wl->beacon_update_trigger); +} + +static void b43_set_beacon_int(struct b43_wldev *dev, u16 beacon_int) +{ + b43_time_lock(dev); + if (dev->dev->id.revision >= 3) { + b43_write32(dev, B43_MMIO_TSF_CFP_REP, (beacon_int << 16)); + b43_write32(dev, B43_MMIO_TSF_CFP_START, (beacon_int << 10)); + } else { + b43_write16(dev, 0x606, (beacon_int >> 6)); + b43_write16(dev, 0x610, beacon_int); + } + b43_time_unlock(dev); + b43dbg(dev->wl, "Set beacon interval to %u\n", beacon_int); +} + +static void b43_handle_firmware_panic(struct b43_wldev *dev) +{ + u16 reason; + + /* Read the register that contains the reason code for the panic. */ + reason = b43_shm_read16(dev, B43_SHM_SCRATCH, B43_FWPANIC_REASON_REG); + b43err(dev->wl, "Whoopsy, firmware panic! Reason: %u\n", reason); + + switch (reason) { + default: + b43dbg(dev->wl, "The panic reason is unknown.\n"); + /* fallthrough */ + case B43_FWPANIC_DIE: + /* Do not restart the controller or firmware. + * The device is nonfunctional from now on. + * Restarting would result in this panic to trigger again, + * so we avoid that recursion. */ + break; + case B43_FWPANIC_RESTART: + b43_controller_restart(dev, "Microcode panic"); + break; + } +} + +static void handle_irq_ucode_debug(struct b43_wldev *dev) +{ + unsigned int i, cnt; + u16 reason, marker_id, marker_line; + __le16 *buf; + + /* The proprietary firmware doesn't have this IRQ. */ + if (!dev->fw.opensource) + return; + + /* Read the register that contains the reason code for this IRQ. */ + reason = b43_shm_read16(dev, B43_SHM_SCRATCH, B43_DEBUGIRQ_REASON_REG); + + switch (reason) { + case B43_DEBUGIRQ_PANIC: + b43_handle_firmware_panic(dev); + break; + case B43_DEBUGIRQ_DUMP_SHM: + if (!B43_DEBUG) + break; /* Only with driver debugging enabled. */ + buf = kmalloc(4096, GFP_ATOMIC); + if (!buf) { + b43dbg(dev->wl, "SHM-dump: Failed to allocate memory\n"); + goto out; + } + for (i = 0; i < 4096; i += 2) { + u16 tmp = b43_shm_read16(dev, B43_SHM_SHARED, i); + buf[i / 2] = cpu_to_le16(tmp); + } + b43info(dev->wl, "Shared memory dump:\n"); + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, + 16, 2, buf, 4096, 1); + kfree(buf); + break; + case B43_DEBUGIRQ_DUMP_REGS: + if (!B43_DEBUG) + break; /* Only with driver debugging enabled. */ + b43info(dev->wl, "Microcode register dump:\n"); + for (i = 0, cnt = 0; i < 64; i++) { + u16 tmp = b43_shm_read16(dev, B43_SHM_SCRATCH, i); + if (cnt == 0) + printk(KERN_INFO); + printk("r%02u: 0x%04X ", i, tmp); + cnt++; + if (cnt == 6) { + printk("\n"); + cnt = 0; + } + } + printk("\n"); + break; + case B43_DEBUGIRQ_MARKER: + if (!B43_DEBUG) + break; /* Only with driver debugging enabled. */ + marker_id = b43_shm_read16(dev, B43_SHM_SCRATCH, + B43_MARKER_ID_REG); + marker_line = b43_shm_read16(dev, B43_SHM_SCRATCH, + B43_MARKER_LINE_REG); + b43info(dev->wl, "The firmware just executed the MARKER(%u) " + "at line number %u\n", + marker_id, marker_line); + break; + default: + b43dbg(dev->wl, "Debug-IRQ triggered for unknown reason: %u\n", + reason); + } +out: + /* Acknowledge the debug-IRQ, so the firmware can continue. */ + b43_shm_write16(dev, B43_SHM_SCRATCH, + B43_DEBUGIRQ_REASON_REG, B43_DEBUGIRQ_ACK); +} + +static void b43_do_interrupt_thread(struct b43_wldev *dev) +{ + u32 reason; + u32 dma_reason[ARRAY_SIZE(dev->dma_reason)]; + u32 merged_dma_reason = 0; + int i; + + if (unlikely(b43_status(dev) != B43_STAT_STARTED)) + return; + + reason = dev->irq_reason; + for (i = 0; i < ARRAY_SIZE(dma_reason); i++) { + dma_reason[i] = dev->dma_reason[i]; + merged_dma_reason |= dma_reason[i]; + } + + if (unlikely(reason & B43_IRQ_MAC_TXERR)) + b43err(dev->wl, "MAC transmission error\n"); + + if (unlikely(reason & B43_IRQ_PHY_TXERR)) { + b43err(dev->wl, "PHY transmission error\n"); + rmb(); + if (unlikely(atomic_dec_and_test(&dev->phy.txerr_cnt))) { + atomic_set(&dev->phy.txerr_cnt, + B43_PHY_TX_BADNESS_LIMIT); + b43err(dev->wl, "Too many PHY TX errors, " + "restarting the controller\n"); + b43_controller_restart(dev, "PHY TX errors"); + } + } + + if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK | + B43_DMAIRQ_NONFATALMASK))) { + if (merged_dma_reason & B43_DMAIRQ_FATALMASK) { + b43err(dev->wl, "Fatal DMA error: " + "0x%08X, 0x%08X, 0x%08X, " + "0x%08X, 0x%08X, 0x%08X\n", + dma_reason[0], dma_reason[1], + dma_reason[2], dma_reason[3], + dma_reason[4], dma_reason[5]); + b43err(dev->wl, "This device does not support DMA " + "on your system. Please use PIO instead.\n"); + /* Fall back to PIO transfers if we get fatal DMA errors! */ + dev->use_pio = 1; + b43_controller_restart(dev, "DMA error"); + return; + } + if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) { + b43err(dev->wl, "DMA error: " + "0x%08X, 0x%08X, 0x%08X, " + "0x%08X, 0x%08X, 0x%08X\n", + dma_reason[0], dma_reason[1], + dma_reason[2], dma_reason[3], + dma_reason[4], dma_reason[5]); + } + } + + if (unlikely(reason & B43_IRQ_UCODE_DEBUG)) + handle_irq_ucode_debug(dev); + if (reason & B43_IRQ_TBTT_INDI) + handle_irq_tbtt_indication(dev); + if (reason & B43_IRQ_ATIM_END) + handle_irq_atim_end(dev); + if (reason & B43_IRQ_BEACON) + handle_irq_beacon(dev); + if (reason & B43_IRQ_PMQ) + handle_irq_pmq(dev); + if (reason & B43_IRQ_TXFIFO_FLUSH_OK) + ;/* TODO */ + if (reason & B43_IRQ_NOISESAMPLE_OK) + handle_irq_noise(dev); + + /* Check the DMA reason registers for received data. */ + if (dma_reason[0] & B43_DMAIRQ_RX_DONE) { + if (b43_using_pio_transfers(dev)) + b43_pio_rx(dev->pio.rx_queue); + else + b43_dma_rx(dev->dma.rx_ring); + } + B43_WARN_ON(dma_reason[1] & B43_DMAIRQ_RX_DONE); + B43_WARN_ON(dma_reason[2] & B43_DMAIRQ_RX_DONE); + B43_WARN_ON(dma_reason[3] & B43_DMAIRQ_RX_DONE); + B43_WARN_ON(dma_reason[4] & B43_DMAIRQ_RX_DONE); + B43_WARN_ON(dma_reason[5] & B43_DMAIRQ_RX_DONE); + + if (reason & B43_IRQ_TX_OK) + handle_irq_transmit_status(dev); + + /* Re-enable interrupts on the device by restoring the current interrupt mask. */ + b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, dev->irq_mask); + +#if B43_DEBUG + if (b43_debug(dev, B43_DBG_VERBOSESTATS)) { + dev->irq_count++; + for (i = 0; i < ARRAY_SIZE(dev->irq_bit_count); i++) { + if (reason & (1 << i)) + dev->irq_bit_count[i]++; + } + } +#endif +} + +/* Interrupt thread handler. Handles device interrupts in thread context. */ +static irqreturn_t b43_interrupt_thread_handler(int irq, void *dev_id) +{ + struct b43_wldev *dev = dev_id; + + mutex_lock(&dev->wl->mutex); + b43_do_interrupt_thread(dev); + mmiowb(); + mutex_unlock(&dev->wl->mutex); + + return IRQ_HANDLED; +} + +static irqreturn_t b43_do_interrupt(struct b43_wldev *dev) +{ + u32 reason; + + /* This code runs under wl->hardirq_lock, but _only_ on non-SDIO busses. + * On SDIO, this runs under wl->mutex. */ + + reason = b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); + if (reason == 0xffffffff) /* shared IRQ */ + return IRQ_NONE; + reason &= dev->irq_mask; + if (!reason) + return IRQ_HANDLED; + + dev->dma_reason[0] = b43_read32(dev, B43_MMIO_DMA0_REASON) + & 0x0001DC00; + dev->dma_reason[1] = b43_read32(dev, B43_MMIO_DMA1_REASON) + & 0x0000DC00; + dev->dma_reason[2] = b43_read32(dev, B43_MMIO_DMA2_REASON) + & 0x0000DC00; + dev->dma_reason[3] = b43_read32(dev, B43_MMIO_DMA3_REASON) + & 0x0001DC00; + dev->dma_reason[4] = b43_read32(dev, B43_MMIO_DMA4_REASON) + & 0x0000DC00; +/* Unused ring + dev->dma_reason[5] = b43_read32(dev, B43_MMIO_DMA5_REASON) + & 0x0000DC00; +*/ + + /* ACK the interrupt. */ + b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, reason); + b43_write32(dev, B43_MMIO_DMA0_REASON, dev->dma_reason[0]); + b43_write32(dev, B43_MMIO_DMA1_REASON, dev->dma_reason[1]); + b43_write32(dev, B43_MMIO_DMA2_REASON, dev->dma_reason[2]); + b43_write32(dev, B43_MMIO_DMA3_REASON, dev->dma_reason[3]); + b43_write32(dev, B43_MMIO_DMA4_REASON, dev->dma_reason[4]); +/* Unused ring + b43_write32(dev, B43_MMIO_DMA5_REASON, dev->dma_reason[5]); +*/ + + /* Disable IRQs on the device. The IRQ thread handler will re-enable them. */ + b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, 0); + /* Save the reason bitmasks for the IRQ thread handler. */ + dev->irq_reason = reason; + + return IRQ_WAKE_THREAD; +} + +/* Interrupt handler top-half. This runs with interrupts disabled. */ +static irqreturn_t b43_interrupt_handler(int irq, void *dev_id) +{ + struct b43_wldev *dev = dev_id; + irqreturn_t ret; + + if (unlikely(b43_status(dev) < B43_STAT_STARTED)) + return IRQ_NONE; + + spin_lock(&dev->wl->hardirq_lock); + ret = b43_do_interrupt(dev); + mmiowb(); + spin_unlock(&dev->wl->hardirq_lock); + + return ret; +} + +/* SDIO interrupt handler. This runs in process context. */ +static void b43_sdio_interrupt_handler(struct b43_wldev *dev) +{ + struct b43_wl *wl = dev->wl; + irqreturn_t ret; + + mutex_lock(&wl->mutex); + + ret = b43_do_interrupt(dev); + if (ret == IRQ_WAKE_THREAD) + b43_do_interrupt_thread(dev); + + mutex_unlock(&wl->mutex); +} + +void b43_do_release_fw(struct b43_firmware_file *fw) +{ + release_firmware(fw->data); + fw->data = NULL; + fw->filename = NULL; +} + +static void b43_release_firmware(struct b43_wldev *dev) +{ + b43_do_release_fw(&dev->fw.ucode); + b43_do_release_fw(&dev->fw.pcm); + b43_do_release_fw(&dev->fw.initvals); + b43_do_release_fw(&dev->fw.initvals_band); +} + +static void b43_print_fw_helptext(struct b43_wl *wl, bool error) +{ + const char text[] = + "You must go to " \ + "http://wireless.kernel.org/en/users/Drivers/b43#devicefirmware " \ + "and download the correct firmware for this driver version. " \ + "Please carefully read all instructions on this website.\n"; + + if (error) + b43err(wl, text); + else + b43warn(wl, text); +} + +int b43_do_request_fw(struct b43_request_fw_context *ctx, + const char *name, + struct b43_firmware_file *fw) +{ + const struct firmware *blob; + struct b43_fw_header *hdr; + u32 size; + int err; + + if (!name) { + /* Don't fetch anything. Free possibly cached firmware. */ + /* FIXME: We should probably keep it anyway, to save some headache + * on suspend/resume with multiband devices. */ + b43_do_release_fw(fw); + return 0; + } + if (fw->filename) { + if ((fw->type == ctx->req_type) && + (strcmp(fw->filename, name) == 0)) + return 0; /* Already have this fw. */ + /* Free the cached firmware first. */ + /* FIXME: We should probably do this later after we successfully + * got the new fw. This could reduce headache with multiband devices. + * We could also redesign this to cache the firmware for all possible + * bands all the time. */ + b43_do_release_fw(fw); + } + + switch (ctx->req_type) { + case B43_FWTYPE_PROPRIETARY: + snprintf(ctx->fwname, sizeof(ctx->fwname), + "b43%s/%s.fw", + modparam_fwpostfix, name); + break; + case B43_FWTYPE_OPENSOURCE: + snprintf(ctx->fwname, sizeof(ctx->fwname), + "b43-open%s/%s.fw", + modparam_fwpostfix, name); + break; + default: + B43_WARN_ON(1); + return -ENOSYS; + } + err = request_firmware(&blob, ctx->fwname, ctx->dev->dev->dev); + if (err == -ENOENT) { + snprintf(ctx->errors[ctx->req_type], + sizeof(ctx->errors[ctx->req_type]), + "Firmware file \"%s\" not found\n", ctx->fwname); + return err; + } else if (err) { + snprintf(ctx->errors[ctx->req_type], + sizeof(ctx->errors[ctx->req_type]), + "Firmware file \"%s\" request failed (err=%d)\n", + ctx->fwname, err); + return err; + } + if (blob->size < sizeof(struct b43_fw_header)) + goto err_format; + hdr = (struct b43_fw_header *)(blob->data); + switch (hdr->type) { + case B43_FW_TYPE_UCODE: + case B43_FW_TYPE_PCM: + size = be32_to_cpu(hdr->size); + if (size != blob->size - sizeof(struct b43_fw_header)) + goto err_format; + /* fallthrough */ + case B43_FW_TYPE_IV: + if (hdr->ver != 1) + goto err_format; + break; + default: + goto err_format; + } + + fw->data = blob; + fw->filename = name; + fw->type = ctx->req_type; + + return 0; + +err_format: + snprintf(ctx->errors[ctx->req_type], + sizeof(ctx->errors[ctx->req_type]), + "Firmware file \"%s\" format error.\n", ctx->fwname); + release_firmware(blob); + + return -EPROTO; +} + +static int b43_try_request_fw(struct b43_request_fw_context *ctx) +{ + struct b43_wldev *dev = ctx->dev; + struct b43_firmware *fw = &ctx->dev->fw; + const u8 rev = ctx->dev->dev->id.revision; + const char *filename; + u32 tmshigh; + int err; + + /* Get microcode */ + tmshigh = ssb_read32(dev->dev, SSB_TMSHIGH); + if ((rev >= 5) && (rev <= 10)) + filename = "ucode5"; + else if ((rev >= 11) && (rev <= 12)) + filename = "ucode11"; + else if (rev == 13) + filename = "ucode13"; + else if (rev == 14) + filename = "ucode14"; + else if (rev >= 15) + filename = "ucode15"; + else + goto err_no_ucode; + err = b43_do_request_fw(ctx, filename, &fw->ucode); + if (err) + goto err_load; + + /* Get PCM code */ + if ((rev >= 5) && (rev <= 10)) + filename = "pcm5"; + else if (rev >= 11) + filename = NULL; + else + goto err_no_pcm; + fw->pcm_request_failed = 0; + err = b43_do_request_fw(ctx, filename, &fw->pcm); + if (err == -ENOENT) { + /* We did not find a PCM file? Not fatal, but + * core rev <= 10 must do without hwcrypto then. */ + fw->pcm_request_failed = 1; + } else if (err) + goto err_load; + + /* Get initvals */ + switch (dev->phy.type) { + case B43_PHYTYPE_A: + if ((rev >= 5) && (rev <= 10)) { + if (tmshigh & B43_TMSHIGH_HAVE_2GHZ_PHY) + filename = "a0g1initvals5"; + else + filename = "a0g0initvals5"; + } else + goto err_no_initvals; + break; + case B43_PHYTYPE_G: + if ((rev >= 5) && (rev <= 10)) + filename = "b0g0initvals5"; + else if (rev >= 13) + filename = "b0g0initvals13"; + else + goto err_no_initvals; + break; + case B43_PHYTYPE_N: + if ((rev >= 11) && (rev <= 12)) + filename = "n0initvals11"; + else + goto err_no_initvals; + break; + case B43_PHYTYPE_LP: + if (rev == 13) + filename = "lp0initvals13"; + else if (rev == 14) + filename = "lp0initvals14"; + else if (rev >= 15) + filename = "lp0initvals15"; + else + goto err_no_initvals; + break; + default: + goto err_no_initvals; + } + err = b43_do_request_fw(ctx, filename, &fw->initvals); + if (err) + goto err_load; + + /* Get bandswitch initvals */ + switch (dev->phy.type) { + case B43_PHYTYPE_A: + if ((rev >= 5) && (rev <= 10)) { + if (tmshigh & B43_TMSHIGH_HAVE_2GHZ_PHY) + filename = "a0g1bsinitvals5"; + else + filename = "a0g0bsinitvals5"; + } else if (rev >= 11) + filename = NULL; + else + goto err_no_initvals; + break; + case B43_PHYTYPE_G: + if ((rev >= 5) && (rev <= 10)) + filename = "b0g0bsinitvals5"; + else if (rev >= 11) + filename = NULL; + else + goto err_no_initvals; + break; + case B43_PHYTYPE_N: + if ((rev >= 11) && (rev <= 12)) + filename = "n0bsinitvals11"; + else + goto err_no_initvals; + break; + case B43_PHYTYPE_LP: + if (rev == 13) + filename = "lp0bsinitvals13"; + else if (rev == 14) + filename = "lp0bsinitvals14"; + else if (rev >= 15) + filename = "lp0bsinitvals15"; + else + goto err_no_initvals; + break; + default: + goto err_no_initvals; + } + err = b43_do_request_fw(ctx, filename, &fw->initvals_band); + if (err) + goto err_load; + + return 0; + +err_no_ucode: + err = ctx->fatal_failure = -EOPNOTSUPP; + b43err(dev->wl, "The driver does not know which firmware (ucode) " + "is required for your device (wl-core rev %u)\n", rev); + goto error; + +err_no_pcm: + err = ctx->fatal_failure = -EOPNOTSUPP; + b43err(dev->wl, "The driver does not know which firmware (PCM) " + "is required for your device (wl-core rev %u)\n", rev); + goto error; + +err_no_initvals: + err = ctx->fatal_failure = -EOPNOTSUPP; + b43err(dev->wl, "The driver does not know which firmware (initvals) " + "is required for your device (wl-core rev %u)\n", rev); + goto error; + +err_load: + /* We failed to load this firmware image. The error message + * already is in ctx->errors. Return and let our caller decide + * what to do. */ + goto error; + +error: + b43_release_firmware(dev); + return err; +} + +static int b43_request_firmware(struct b43_wldev *dev) +{ + struct b43_request_fw_context *ctx; + unsigned int i; + int err; + const char *errmsg; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + ctx->dev = dev; + + ctx->req_type = B43_FWTYPE_PROPRIETARY; + err = b43_try_request_fw(ctx); + if (!err) + goto out; /* Successfully loaded it. */ + err = ctx->fatal_failure; + if (err) + goto out; + + ctx->req_type = B43_FWTYPE_OPENSOURCE; + err = b43_try_request_fw(ctx); + if (!err) + goto out; /* Successfully loaded it. */ + err = ctx->fatal_failure; + if (err) + goto out; + + /* Could not find a usable firmware. Print the errors. */ + for (i = 0; i < B43_NR_FWTYPES; i++) { + errmsg = ctx->errors[i]; + if (strlen(errmsg)) + b43err(dev->wl, errmsg); + } + b43_print_fw_helptext(dev->wl, 1); + err = -ENOENT; + +out: + kfree(ctx); + return err; +} + +static int b43_upload_microcode(struct b43_wldev *dev) +{ + const size_t hdr_len = sizeof(struct b43_fw_header); + const __be32 *data; + unsigned int i, len; + u16 fwrev, fwpatch, fwdate, fwtime; + u32 tmp, macctl; + int err = 0; + + /* Jump the microcode PSM to offset 0 */ + macctl = b43_read32(dev, B43_MMIO_MACCTL); + B43_WARN_ON(macctl & B43_MACCTL_PSM_RUN); + macctl |= B43_MACCTL_PSM_JMP0; + b43_write32(dev, B43_MMIO_MACCTL, macctl); + /* Zero out all microcode PSM registers and shared memory. */ + for (i = 0; i < 64; i++) + b43_shm_write16(dev, B43_SHM_SCRATCH, i, 0); + for (i = 0; i < 4096; i += 2) + b43_shm_write16(dev, B43_SHM_SHARED, i, 0); + + /* Upload Microcode. */ + data = (__be32 *) (dev->fw.ucode.data->data + hdr_len); + len = (dev->fw.ucode.data->size - hdr_len) / sizeof(__be32); + b43_shm_control_word(dev, B43_SHM_UCODE | B43_SHM_AUTOINC_W, 0x0000); + for (i = 0; i < len; i++) { + b43_write32(dev, B43_MMIO_SHM_DATA, be32_to_cpu(data[i])); + udelay(10); + } + + if (dev->fw.pcm.data) { + /* Upload PCM data. */ + data = (__be32 *) (dev->fw.pcm.data->data + hdr_len); + len = (dev->fw.pcm.data->size - hdr_len) / sizeof(__be32); + b43_shm_control_word(dev, B43_SHM_HW, 0x01EA); + b43_write32(dev, B43_MMIO_SHM_DATA, 0x00004000); + /* No need for autoinc bit in SHM_HW */ + b43_shm_control_word(dev, B43_SHM_HW, 0x01EB); + for (i = 0; i < len; i++) { + b43_write32(dev, B43_MMIO_SHM_DATA, be32_to_cpu(data[i])); + udelay(10); + } + } + + b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, B43_IRQ_ALL); + + /* Start the microcode PSM */ + macctl = b43_read32(dev, B43_MMIO_MACCTL); + macctl &= ~B43_MACCTL_PSM_JMP0; + macctl |= B43_MACCTL_PSM_RUN; + b43_write32(dev, B43_MMIO_MACCTL, macctl); + + /* Wait for the microcode to load and respond */ + i = 0; + while (1) { + tmp = b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); + if (tmp == B43_IRQ_MAC_SUSPENDED) + break; + i++; + if (i >= 20) { + b43err(dev->wl, "Microcode not responding\n"); + b43_print_fw_helptext(dev->wl, 1); + err = -ENODEV; + goto error; + } + msleep(50); + } + b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); /* dummy read */ + + /* Get and check the revisions. */ + fwrev = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_UCODEREV); + fwpatch = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_UCODEPATCH); + fwdate = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_UCODEDATE); + fwtime = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_UCODETIME); + + if (fwrev <= 0x128) { + b43err(dev->wl, "YOUR FIRMWARE IS TOO OLD. Firmware from " + "binary drivers older than version 4.x is unsupported. " + "You must upgrade your firmware files.\n"); + b43_print_fw_helptext(dev->wl, 1); + err = -EOPNOTSUPP; + goto error; + } + dev->fw.rev = fwrev; + dev->fw.patch = fwpatch; + dev->fw.opensource = (fwdate == 0xFFFF); + + /* Default to use-all-queues. */ + dev->wl->hw->queues = dev->wl->mac80211_initially_registered_queues; + dev->qos_enabled = !!modparam_qos; + /* Default to firmware/hardware crypto acceleration. */ + dev->hwcrypto_enabled = 1; + + if (dev->fw.opensource) { + u16 fwcapa; + + /* Patchlevel info is encoded in the "time" field. */ + dev->fw.patch = fwtime; + b43info(dev->wl, "Loading OpenSource firmware version %u.%u\n", + dev->fw.rev, dev->fw.patch); + + fwcapa = b43_fwcapa_read(dev); + if (!(fwcapa & B43_FWCAPA_HWCRYPTO) || dev->fw.pcm_request_failed) { + b43info(dev->wl, "Hardware crypto acceleration not supported by firmware\n"); + /* Disable hardware crypto and fall back to software crypto. */ + dev->hwcrypto_enabled = 0; + } + if (!(fwcapa & B43_FWCAPA_QOS)) { + b43info(dev->wl, "QoS not supported by firmware\n"); + /* Disable QoS. Tweak hw->queues to 1. It will be restored before + * ieee80211_unregister to make sure the networking core can + * properly free possible resources. */ + dev->wl->hw->queues = 1; + dev->qos_enabled = 0; + } + } else { + b43info(dev->wl, "Loading firmware version %u.%u " + "(20%.2i-%.2i-%.2i %.2i:%.2i:%.2i)\n", + fwrev, fwpatch, + (fwdate >> 12) & 0xF, (fwdate >> 8) & 0xF, fwdate & 0xFF, + (fwtime >> 11) & 0x1F, (fwtime >> 5) & 0x3F, fwtime & 0x1F); + if (dev->fw.pcm_request_failed) { + b43warn(dev->wl, "No \"pcm5.fw\" firmware file found. " + "Hardware accelerated cryptography is disabled.\n"); + b43_print_fw_helptext(dev->wl, 0); + } + } + + if (b43_is_old_txhdr_format(dev)) { + /* We're over the deadline, but we keep support for old fw + * until it turns out to be in major conflict with something new. */ + b43warn(dev->wl, "You are using an old firmware image. " + "Support for old firmware will be removed soon " + "(official deadline was July 2008).\n"); + b43_print_fw_helptext(dev->wl, 0); + } + + return 0; + +error: + macctl = b43_read32(dev, B43_MMIO_MACCTL); + macctl &= ~B43_MACCTL_PSM_RUN; + macctl |= B43_MACCTL_PSM_JMP0; + b43_write32(dev, B43_MMIO_MACCTL, macctl); + + return err; +} + +static int b43_write_initvals(struct b43_wldev *dev, + const struct b43_iv *ivals, + size_t count, + size_t array_size) +{ + const struct b43_iv *iv; + u16 offset; + size_t i; + bool bit32; + + BUILD_BUG_ON(sizeof(struct b43_iv) != 6); + iv = ivals; + for (i = 0; i < count; i++) { + if (array_size < sizeof(iv->offset_size)) + goto err_format; + array_size -= sizeof(iv->offset_size); + offset = be16_to_cpu(iv->offset_size); + bit32 = !!(offset & B43_IV_32BIT); + offset &= B43_IV_OFFSET_MASK; + if (offset >= 0x1000) + goto err_format; + if (bit32) { + u32 value; + + if (array_size < sizeof(iv->data.d32)) + goto err_format; + array_size -= sizeof(iv->data.d32); + + value = get_unaligned_be32(&iv->data.d32); + b43_write32(dev, offset, value); + + iv = (const struct b43_iv *)((const uint8_t *)iv + + sizeof(__be16) + + sizeof(__be32)); + } else { + u16 value; + + if (array_size < sizeof(iv->data.d16)) + goto err_format; + array_size -= sizeof(iv->data.d16); + + value = be16_to_cpu(iv->data.d16); + b43_write16(dev, offset, value); + + iv = (const struct b43_iv *)((const uint8_t *)iv + + sizeof(__be16) + + sizeof(__be16)); + } + } + if (array_size) + goto err_format; + + return 0; + +err_format: + b43err(dev->wl, "Initial Values Firmware file-format error.\n"); + b43_print_fw_helptext(dev->wl, 1); + + return -EPROTO; +} + +static int b43_upload_initvals(struct b43_wldev *dev) +{ + const size_t hdr_len = sizeof(struct b43_fw_header); + const struct b43_fw_header *hdr; + struct b43_firmware *fw = &dev->fw; + const struct b43_iv *ivals; + size_t count; + int err; + + hdr = (const struct b43_fw_header *)(fw->initvals.data->data); + ivals = (const struct b43_iv *)(fw->initvals.data->data + hdr_len); + count = be32_to_cpu(hdr->size); + err = b43_write_initvals(dev, ivals, count, + fw->initvals.data->size - hdr_len); + if (err) + goto out; + if (fw->initvals_band.data) { + hdr = (const struct b43_fw_header *)(fw->initvals_band.data->data); + ivals = (const struct b43_iv *)(fw->initvals_band.data->data + hdr_len); + count = be32_to_cpu(hdr->size); + err = b43_write_initvals(dev, ivals, count, + fw->initvals_band.data->size - hdr_len); + if (err) + goto out; + } +out: + + return err; +} + +/* Initialize the GPIOs + * http://bcm-specs.sipsolutions.net/GPIO + */ +static int b43_gpio_init(struct b43_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + struct ssb_device *gpiodev, *pcidev = NULL; + u32 mask, set; + + b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL) + & ~B43_MACCTL_GPOUTSMSK); + + b43_write16(dev, B43_MMIO_GPIO_MASK, b43_read16(dev, B43_MMIO_GPIO_MASK) + | 0x000F); + + mask = 0x0000001F; + set = 0x0000000F; + if (dev->dev->bus->chip_id == 0x4301) { + mask |= 0x0060; + set |= 0x0060; + } + if (0 /* FIXME: conditional unknown */ ) { + b43_write16(dev, B43_MMIO_GPIO_MASK, + b43_read16(dev, B43_MMIO_GPIO_MASK) + | 0x0100); + mask |= 0x0180; + set |= 0x0180; + } + if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_PACTRL) { + b43_write16(dev, B43_MMIO_GPIO_MASK, + b43_read16(dev, B43_MMIO_GPIO_MASK) + | 0x0200); + mask |= 0x0200; + set |= 0x0200; + } + if (dev->dev->id.revision >= 2) + mask |= 0x0010; /* FIXME: This is redundant. */ + +#ifdef CONFIG_SSB_DRIVER_PCICORE + pcidev = bus->pcicore.dev; +#endif + gpiodev = bus->chipco.dev ? : pcidev; + if (!gpiodev) + return 0; + ssb_write32(gpiodev, B43_GPIO_CONTROL, + (ssb_read32(gpiodev, B43_GPIO_CONTROL) + & mask) | set); + + return 0; +} + +/* Turn off all GPIO stuff. Call this on module unload, for example. */ +static void b43_gpio_cleanup(struct b43_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + struct ssb_device *gpiodev, *pcidev = NULL; + +#ifdef CONFIG_SSB_DRIVER_PCICORE + pcidev = bus->pcicore.dev; +#endif + gpiodev = bus->chipco.dev ? : pcidev; + if (!gpiodev) + return; + ssb_write32(gpiodev, B43_GPIO_CONTROL, 0); +} + +/* http://bcm-specs.sipsolutions.net/EnableMac */ +void b43_mac_enable(struct b43_wldev *dev) +{ + if (b43_debug(dev, B43_DBG_FIRMWARE)) { + u16 fwstate; + + fwstate = b43_shm_read16(dev, B43_SHM_SHARED, + B43_SHM_SH_UCODESTAT); + if ((fwstate != B43_SHM_SH_UCODESTAT_SUSP) && + (fwstate != B43_SHM_SH_UCODESTAT_SLEEP)) { + b43err(dev->wl, "b43_mac_enable(): The firmware " + "should be suspended, but current state is %u\n", + fwstate); + } + } + + dev->mac_suspended--; + B43_WARN_ON(dev->mac_suspended < 0); + if (dev->mac_suspended == 0) { + b43_write32(dev, B43_MMIO_MACCTL, + b43_read32(dev, B43_MMIO_MACCTL) + | B43_MACCTL_ENABLED); + b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, + B43_IRQ_MAC_SUSPENDED); + /* Commit writes */ + b43_read32(dev, B43_MMIO_MACCTL); + b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); + b43_power_saving_ctl_bits(dev, 0); + } +} + +/* http://bcm-specs.sipsolutions.net/SuspendMAC */ +void b43_mac_suspend(struct b43_wldev *dev) +{ + int i; + u32 tmp; + + might_sleep(); + B43_WARN_ON(dev->mac_suspended < 0); + + if (dev->mac_suspended == 0) { + b43_power_saving_ctl_bits(dev, B43_PS_AWAKE); + b43_write32(dev, B43_MMIO_MACCTL, + b43_read32(dev, B43_MMIO_MACCTL) + & ~B43_MACCTL_ENABLED); + /* force pci to flush the write */ + b43_read32(dev, B43_MMIO_MACCTL); + for (i = 35; i; i--) { + tmp = b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); + if (tmp & B43_IRQ_MAC_SUSPENDED) + goto out; + udelay(10); + } + /* Hm, it seems this will take some time. Use msleep(). */ + for (i = 40; i; i--) { + tmp = b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); + if (tmp & B43_IRQ_MAC_SUSPENDED) + goto out; + msleep(1); + } + b43err(dev->wl, "MAC suspend failed\n"); + } +out: + dev->mac_suspended++; +} + +static void b43_adjust_opmode(struct b43_wldev *dev) +{ + struct b43_wl *wl = dev->wl; + u32 ctl; + u16 cfp_pretbtt; + + ctl = b43_read32(dev, B43_MMIO_MACCTL); + /* Reset status to STA infrastructure mode. */ + ctl &= ~B43_MACCTL_AP; + ctl &= ~B43_MACCTL_KEEP_CTL; + ctl &= ~B43_MACCTL_KEEP_BADPLCP; + ctl &= ~B43_MACCTL_KEEP_BAD; + ctl &= ~B43_MACCTL_PROMISC; + ctl &= ~B43_MACCTL_BEACPROMISC; + ctl |= B43_MACCTL_INFRA; + + if (b43_is_mode(wl, NL80211_IFTYPE_AP) || + b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT)) + ctl |= B43_MACCTL_AP; + else if (b43_is_mode(wl, NL80211_IFTYPE_ADHOC)) + ctl &= ~B43_MACCTL_INFRA; + + if (wl->filter_flags & FIF_CONTROL) + ctl |= B43_MACCTL_KEEP_CTL; + if (wl->filter_flags & FIF_FCSFAIL) + ctl |= B43_MACCTL_KEEP_BAD; + if (wl->filter_flags & FIF_PLCPFAIL) + ctl |= B43_MACCTL_KEEP_BADPLCP; + if (wl->filter_flags & FIF_PROMISC_IN_BSS) + ctl |= B43_MACCTL_PROMISC; + if (wl->filter_flags & FIF_BCN_PRBRESP_PROMISC) + ctl |= B43_MACCTL_BEACPROMISC; + + /* Workaround: On old hardware the HW-MAC-address-filter + * doesn't work properly, so always run promisc in filter + * it in software. */ + if (dev->dev->id.revision <= 4) + ctl |= B43_MACCTL_PROMISC; + + b43_write32(dev, B43_MMIO_MACCTL, ctl); + + cfp_pretbtt = 2; + if ((ctl & B43_MACCTL_INFRA) && !(ctl & B43_MACCTL_AP)) { + if (dev->dev->bus->chip_id == 0x4306 && + dev->dev->bus->chip_rev == 3) + cfp_pretbtt = 100; + else + cfp_pretbtt = 50; + } + b43_write16(dev, 0x612, cfp_pretbtt); + + /* FIXME: We don't currently implement the PMQ mechanism, + * so always disable it. If we want to implement PMQ, + * we need to enable it here (clear DISCPMQ) in AP mode. + */ + if (0 /* ctl & B43_MACCTL_AP */) { + b43_write32(dev, B43_MMIO_MACCTL, + b43_read32(dev, B43_MMIO_MACCTL) + & ~B43_MACCTL_DISCPMQ); + } else { + b43_write32(dev, B43_MMIO_MACCTL, + b43_read32(dev, B43_MMIO_MACCTL) + | B43_MACCTL_DISCPMQ); + } +} + +static void b43_rate_memory_write(struct b43_wldev *dev, u16 rate, int is_ofdm) +{ + u16 offset; + + if (is_ofdm) { + offset = 0x480; + offset += (b43_plcp_get_ratecode_ofdm(rate) & 0x000F) * 2; + } else { + offset = 0x4C0; + offset += (b43_plcp_get_ratecode_cck(rate) & 0x000F) * 2; + } + b43_shm_write16(dev, B43_SHM_SHARED, offset + 0x20, + b43_shm_read16(dev, B43_SHM_SHARED, offset)); +} + +static void b43_rate_memory_init(struct b43_wldev *dev) +{ + switch (dev->phy.type) { + case B43_PHYTYPE_A: + case B43_PHYTYPE_G: + case B43_PHYTYPE_N: + case B43_PHYTYPE_LP: + b43_rate_memory_write(dev, B43_OFDM_RATE_6MB, 1); + b43_rate_memory_write(dev, B43_OFDM_RATE_12MB, 1); + b43_rate_memory_write(dev, B43_OFDM_RATE_18MB, 1); + b43_rate_memory_write(dev, B43_OFDM_RATE_24MB, 1); + b43_rate_memory_write(dev, B43_OFDM_RATE_36MB, 1); + b43_rate_memory_write(dev, B43_OFDM_RATE_48MB, 1); + b43_rate_memory_write(dev, B43_OFDM_RATE_54MB, 1); + if (dev->phy.type == B43_PHYTYPE_A) + break; + /* fallthrough */ + case B43_PHYTYPE_B: + b43_rate_memory_write(dev, B43_CCK_RATE_1MB, 0); + b43_rate_memory_write(dev, B43_CCK_RATE_2MB, 0); + b43_rate_memory_write(dev, B43_CCK_RATE_5MB, 0); + b43_rate_memory_write(dev, B43_CCK_RATE_11MB, 0); + break; + default: + B43_WARN_ON(1); + } +} + +/* Set the default values for the PHY TX Control Words. */ +static void b43_set_phytxctl_defaults(struct b43_wldev *dev) +{ + u16 ctl = 0; + + ctl |= B43_TXH_PHY_ENC_CCK; + ctl |= B43_TXH_PHY_ANT01AUTO; + ctl |= B43_TXH_PHY_TXPWR; + + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_BEACPHYCTL, ctl); + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_ACKCTSPHYCTL, ctl); + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_PRPHYCTL, ctl); +} + +/* Set the TX-Antenna for management frames sent by firmware. */ +static void b43_mgmtframe_txantenna(struct b43_wldev *dev, int antenna) +{ + u16 ant; + u16 tmp; + + ant = b43_antenna_to_phyctl(antenna); + + /* For ACK/CTS */ + tmp = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_ACKCTSPHYCTL); + tmp = (tmp & ~B43_TXH_PHY_ANT) | ant; + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_ACKCTSPHYCTL, tmp); + /* For Probe Resposes */ + tmp = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_PRPHYCTL); + tmp = (tmp & ~B43_TXH_PHY_ANT) | ant; + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_PRPHYCTL, tmp); +} + +/* This is the opposite of b43_chip_init() */ +static void b43_chip_exit(struct b43_wldev *dev) +{ + b43_phy_exit(dev); + b43_gpio_cleanup(dev); + /* firmware is released later */ +} + +/* Initialize the chip + * http://bcm-specs.sipsolutions.net/ChipInit + */ +static int b43_chip_init(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + int err; + u32 value32, macctl; + u16 value16; + + /* Initialize the MAC control */ + macctl = B43_MACCTL_IHR_ENABLED | B43_MACCTL_SHM_ENABLED; + if (dev->phy.gmode) + macctl |= B43_MACCTL_GMODE; + macctl |= B43_MACCTL_INFRA; + b43_write32(dev, B43_MMIO_MACCTL, macctl); + + err = b43_request_firmware(dev); + if (err) + goto out; + err = b43_upload_microcode(dev); + if (err) + goto out; /* firmware is released later */ + + err = b43_gpio_init(dev); + if (err) + goto out; /* firmware is released later */ + + err = b43_upload_initvals(dev); + if (err) + goto err_gpio_clean; + + /* Turn the Analog on and initialize the PHY. */ + phy->ops->switch_analog(dev, 1); + err = b43_phy_init(dev); + if (err) + goto err_gpio_clean; + + /* Disable Interference Mitigation. */ + if (phy->ops->interf_mitigation) + phy->ops->interf_mitigation(dev, B43_INTERFMODE_NONE); + + /* Select the antennae */ + if (phy->ops->set_rx_antenna) + phy->ops->set_rx_antenna(dev, B43_ANTENNA_DEFAULT); + b43_mgmtframe_txantenna(dev, B43_ANTENNA_DEFAULT); + + if (phy->type == B43_PHYTYPE_B) { + value16 = b43_read16(dev, 0x005E); + value16 |= 0x0004; + b43_write16(dev, 0x005E, value16); + } + b43_write32(dev, 0x0100, 0x01000000); + if (dev->dev->id.revision < 5) + b43_write32(dev, 0x010C, 0x01000000); + + b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL) + & ~B43_MACCTL_INFRA); + b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL) + | B43_MACCTL_INFRA); + + /* Probe Response Timeout value */ + /* FIXME: Default to 0, has to be set by ioctl probably... :-/ */ + b43_shm_write16(dev, B43_SHM_SHARED, 0x0074, 0x0000); + + /* Initially set the wireless operation mode. */ + b43_adjust_opmode(dev); + + if (dev->dev->id.revision < 3) { + b43_write16(dev, 0x060E, 0x0000); + b43_write16(dev, 0x0610, 0x8000); + b43_write16(dev, 0x0604, 0x0000); + b43_write16(dev, 0x0606, 0x0200); + } else { + b43_write32(dev, 0x0188, 0x80000000); + b43_write32(dev, 0x018C, 0x02000000); + } + b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, 0x00004000); + b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001DC00); + b43_write32(dev, B43_MMIO_DMA1_IRQ_MASK, 0x0000DC00); + b43_write32(dev, B43_MMIO_DMA2_IRQ_MASK, 0x0000DC00); + b43_write32(dev, B43_MMIO_DMA3_IRQ_MASK, 0x0001DC00); + b43_write32(dev, B43_MMIO_DMA4_IRQ_MASK, 0x0000DC00); + b43_write32(dev, B43_MMIO_DMA5_IRQ_MASK, 0x0000DC00); + + value32 = ssb_read32(dev->dev, SSB_TMSLOW); + value32 |= 0x00100000; + ssb_write32(dev->dev, SSB_TMSLOW, value32); + + b43_write16(dev, B43_MMIO_POWERUP_DELAY, + dev->dev->bus->chipco.fast_pwrup_delay); + + err = 0; + b43dbg(dev->wl, "Chip initialized\n"); +out: + return err; + +err_gpio_clean: + b43_gpio_cleanup(dev); + return err; +} + +static void b43_periodic_every60sec(struct b43_wldev *dev) +{ + const struct b43_phy_operations *ops = dev->phy.ops; + + if (ops->pwork_60sec) + ops->pwork_60sec(dev); + + /* Force check the TX power emission now. */ + b43_phy_txpower_check(dev, B43_TXPWR_IGNORE_TIME); +} + +static void b43_periodic_every30sec(struct b43_wldev *dev) +{ + /* Update device statistics. */ + b43_calculate_link_quality(dev); +} + +static void b43_periodic_every15sec(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u16 wdr; + + if (dev->fw.opensource) { + /* Check if the firmware is still alive. + * It will reset the watchdog counter to 0 in its idle loop. */ + wdr = b43_shm_read16(dev, B43_SHM_SCRATCH, B43_WATCHDOG_REG); + if (unlikely(wdr)) { + b43err(dev->wl, "Firmware watchdog: The firmware died!\n"); + b43_controller_restart(dev, "Firmware watchdog"); + return; + } else { + b43_shm_write16(dev, B43_SHM_SCRATCH, + B43_WATCHDOG_REG, 1); + } + } + + if (phy->ops->pwork_15sec) + phy->ops->pwork_15sec(dev); + + atomic_set(&phy->txerr_cnt, B43_PHY_TX_BADNESS_LIMIT); + wmb(); + +#if B43_DEBUG + if (b43_debug(dev, B43_DBG_VERBOSESTATS)) { + unsigned int i; + + b43dbg(dev->wl, "Stats: %7u IRQs/sec, %7u TX/sec, %7u RX/sec\n", + dev->irq_count / 15, + dev->tx_count / 15, + dev->rx_count / 15); + dev->irq_count = 0; + dev->tx_count = 0; + dev->rx_count = 0; + for (i = 0; i < ARRAY_SIZE(dev->irq_bit_count); i++) { + if (dev->irq_bit_count[i]) { + b43dbg(dev->wl, "Stats: %7u IRQ-%02u/sec (0x%08X)\n", + dev->irq_bit_count[i] / 15, i, (1 << i)); + dev->irq_bit_count[i] = 0; + } + } + } +#endif +} + +static void do_periodic_work(struct b43_wldev *dev) +{ + unsigned int state; + + state = dev->periodic_state; + if (state % 4 == 0) + b43_periodic_every60sec(dev); + if (state % 2 == 0) + b43_periodic_every30sec(dev); + b43_periodic_every15sec(dev); +} + +/* Periodic work locking policy: + * The whole periodic work handler is protected by + * wl->mutex. If another lock is needed somewhere in the + * pwork callchain, it's acquired in-place, where it's needed. + */ +static void b43_periodic_work_handler(struct work_struct *work) +{ + struct b43_wldev *dev = container_of(work, struct b43_wldev, + periodic_work.work); + struct b43_wl *wl = dev->wl; + unsigned long delay; + + mutex_lock(&wl->mutex); + + if (unlikely(b43_status(dev) != B43_STAT_STARTED)) + goto out; + if (b43_debug(dev, B43_DBG_PWORK_STOP)) + goto out_requeue; + + do_periodic_work(dev); + + dev->periodic_state++; +out_requeue: + if (b43_debug(dev, B43_DBG_PWORK_FAST)) + delay = msecs_to_jiffies(50); + else + delay = round_jiffies_relative(HZ * 15); + ieee80211_queue_delayed_work(wl->hw, &dev->periodic_work, delay); +out: + mutex_unlock(&wl->mutex); +} + +static void b43_periodic_tasks_setup(struct b43_wldev *dev) +{ + struct delayed_work *work = &dev->periodic_work; + + dev->periodic_state = 0; + INIT_DELAYED_WORK(work, b43_periodic_work_handler); + ieee80211_queue_delayed_work(dev->wl->hw, work, 0); +} + +/* Check if communication with the device works correctly. */ +static int b43_validate_chipaccess(struct b43_wldev *dev) +{ + u32 v, backup0, backup4; + + backup0 = b43_shm_read32(dev, B43_SHM_SHARED, 0); + backup4 = b43_shm_read32(dev, B43_SHM_SHARED, 4); + + /* Check for read/write and endianness problems. */ + b43_shm_write32(dev, B43_SHM_SHARED, 0, 0x55AAAA55); + if (b43_shm_read32(dev, B43_SHM_SHARED, 0) != 0x55AAAA55) + goto error; + b43_shm_write32(dev, B43_SHM_SHARED, 0, 0xAA5555AA); + if (b43_shm_read32(dev, B43_SHM_SHARED, 0) != 0xAA5555AA) + goto error; + + /* Check if unaligned 32bit SHM_SHARED access works properly. + * However, don't bail out on failure, because it's noncritical. */ + b43_shm_write16(dev, B43_SHM_SHARED, 0, 0x1122); + b43_shm_write16(dev, B43_SHM_SHARED, 2, 0x3344); + b43_shm_write16(dev, B43_SHM_SHARED, 4, 0x5566); + b43_shm_write16(dev, B43_SHM_SHARED, 6, 0x7788); + if (b43_shm_read32(dev, B43_SHM_SHARED, 2) != 0x55663344) + b43warn(dev->wl, "Unaligned 32bit SHM read access is broken\n"); + b43_shm_write32(dev, B43_SHM_SHARED, 2, 0xAABBCCDD); + if (b43_shm_read16(dev, B43_SHM_SHARED, 0) != 0x1122 || + b43_shm_read16(dev, B43_SHM_SHARED, 2) != 0xCCDD || + b43_shm_read16(dev, B43_SHM_SHARED, 4) != 0xAABB || + b43_shm_read16(dev, B43_SHM_SHARED, 6) != 0x7788) + b43warn(dev->wl, "Unaligned 32bit SHM write access is broken\n"); + + b43_shm_write32(dev, B43_SHM_SHARED, 0, backup0); + b43_shm_write32(dev, B43_SHM_SHARED, 4, backup4); + + if ((dev->dev->id.revision >= 3) && (dev->dev->id.revision <= 10)) { + /* The 32bit register shadows the two 16bit registers + * with update sideeffects. Validate this. */ + b43_write16(dev, B43_MMIO_TSF_CFP_START, 0xAAAA); + b43_write32(dev, B43_MMIO_TSF_CFP_START, 0xCCCCBBBB); + if (b43_read16(dev, B43_MMIO_TSF_CFP_START_LOW) != 0xBBBB) + goto error; + if (b43_read16(dev, B43_MMIO_TSF_CFP_START_HIGH) != 0xCCCC) + goto error; + } + b43_write32(dev, B43_MMIO_TSF_CFP_START, 0); + + v = b43_read32(dev, B43_MMIO_MACCTL); + v |= B43_MACCTL_GMODE; + if (v != (B43_MACCTL_GMODE | B43_MACCTL_IHR_ENABLED)) + goto error; + + return 0; +error: + b43err(dev->wl, "Failed to validate the chipaccess\n"); + return -ENODEV; +} + +static void b43_security_init(struct b43_wldev *dev) +{ + dev->ktp = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_KTP); + /* KTP is a word address, but we address SHM bytewise. + * So multiply by two. + */ + dev->ktp *= 2; + /* Number of RCMTA address slots */ + b43_write16(dev, B43_MMIO_RCMTA_COUNT, B43_NR_PAIRWISE_KEYS); + /* Clear the key memory. */ + b43_clear_keys(dev); +} + +#ifdef CONFIG_B43_HWRNG +static int b43_rng_read(struct hwrng *rng, u32 *data) +{ + struct b43_wl *wl = (struct b43_wl *)rng->priv; + struct b43_wldev *dev; + int count = -ENODEV; + + mutex_lock(&wl->mutex); + dev = wl->current_dev; + if (likely(dev && b43_status(dev) >= B43_STAT_INITIALIZED)) { + *data = b43_read16(dev, B43_MMIO_RNG); + count = sizeof(u16); + } + mutex_unlock(&wl->mutex); + + return count; +} +#endif /* CONFIG_B43_HWRNG */ + +static void b43_rng_exit(struct b43_wl *wl) +{ +#ifdef CONFIG_B43_HWRNG + if (wl->rng_initialized) + hwrng_unregister(&wl->rng); +#endif /* CONFIG_B43_HWRNG */ +} + +static int b43_rng_init(struct b43_wl *wl) +{ + int err = 0; + +#ifdef CONFIG_B43_HWRNG + snprintf(wl->rng_name, ARRAY_SIZE(wl->rng_name), + "%s_%s", KBUILD_MODNAME, wiphy_name(wl->hw->wiphy)); + wl->rng.name = wl->rng_name; + wl->rng.data_read = b43_rng_read; + wl->rng.priv = (unsigned long)wl; + wl->rng_initialized = 1; + err = hwrng_register(&wl->rng); + if (err) { + wl->rng_initialized = 0; + b43err(wl, "Failed to register the random " + "number generator (%d)\n", err); + } +#endif /* CONFIG_B43_HWRNG */ + + return err; +} + +static void b43_tx_work(struct work_struct *work) +{ + struct b43_wl *wl = container_of(work, struct b43_wl, tx_work); + struct b43_wldev *dev; + struct sk_buff *skb; + int err = 0; + + mutex_lock(&wl->mutex); + dev = wl->current_dev; + if (unlikely(!dev || b43_status(dev) < B43_STAT_STARTED)) { + mutex_unlock(&wl->mutex); + return; + } + + while (skb_queue_len(&wl->tx_queue)) { + skb = skb_dequeue(&wl->tx_queue); + + if (b43_using_pio_transfers(dev)) + err = b43_pio_tx(dev, skb); + else + err = b43_dma_tx(dev, skb); + if (unlikely(err)) + dev_kfree_skb(skb); /* Drop it */ + } + +#if B43_DEBUG + dev->tx_count++; +#endif + mutex_unlock(&wl->mutex); +} + +static int b43_op_tx(struct ieee80211_hw *hw, + struct sk_buff *skb) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + + if (unlikely(skb->len < 2 + 2 + 6)) { + /* Too short, this can't be a valid frame. */ + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + B43_WARN_ON(skb_shinfo(skb)->nr_frags); + + skb_queue_tail(&wl->tx_queue, skb); + ieee80211_queue_work(wl->hw, &wl->tx_work); + + return NETDEV_TX_OK; +} + +static void b43_qos_params_upload(struct b43_wldev *dev, + const struct ieee80211_tx_queue_params *p, + u16 shm_offset) +{ + u16 params[B43_NR_QOSPARAMS]; + int bslots, tmp; + unsigned int i; + + if (!dev->qos_enabled) + return; + + bslots = b43_read16(dev, B43_MMIO_RNG) & p->cw_min; + + memset(¶ms, 0, sizeof(params)); + + params[B43_QOSPARAM_TXOP] = p->txop * 32; + params[B43_QOSPARAM_CWMIN] = p->cw_min; + params[B43_QOSPARAM_CWMAX] = p->cw_max; + params[B43_QOSPARAM_CWCUR] = p->cw_min; + params[B43_QOSPARAM_AIFS] = p->aifs; + params[B43_QOSPARAM_BSLOTS] = bslots; + params[B43_QOSPARAM_REGGAP] = bslots + p->aifs; + + for (i = 0; i < ARRAY_SIZE(params); i++) { + if (i == B43_QOSPARAM_STATUS) { + tmp = b43_shm_read16(dev, B43_SHM_SHARED, + shm_offset + (i * 2)); + /* Mark the parameters as updated. */ + tmp |= 0x100; + b43_shm_write16(dev, B43_SHM_SHARED, + shm_offset + (i * 2), + tmp); + } else { + b43_shm_write16(dev, B43_SHM_SHARED, + shm_offset + (i * 2), + params[i]); + } + } +} + +/* Mapping of mac80211 queue numbers to b43 QoS SHM offsets. */ +static const u16 b43_qos_shm_offsets[] = { + /* [mac80211-queue-nr] = SHM_OFFSET, */ + [0] = B43_QOS_VOICE, + [1] = B43_QOS_VIDEO, + [2] = B43_QOS_BESTEFFORT, + [3] = B43_QOS_BACKGROUND, +}; + +/* Update all QOS parameters in hardware. */ +static void b43_qos_upload_all(struct b43_wldev *dev) +{ + struct b43_wl *wl = dev->wl; + struct b43_qos_params *params; + unsigned int i; + + if (!dev->qos_enabled) + return; + + BUILD_BUG_ON(ARRAY_SIZE(b43_qos_shm_offsets) != + ARRAY_SIZE(wl->qos_params)); + + b43_mac_suspend(dev); + for (i = 0; i < ARRAY_SIZE(wl->qos_params); i++) { + params = &(wl->qos_params[i]); + b43_qos_params_upload(dev, &(params->p), + b43_qos_shm_offsets[i]); + } + b43_mac_enable(dev); +} + +static void b43_qos_clear(struct b43_wl *wl) +{ + struct b43_qos_params *params; + unsigned int i; + + /* Initialize QoS parameters to sane defaults. */ + + BUILD_BUG_ON(ARRAY_SIZE(b43_qos_shm_offsets) != + ARRAY_SIZE(wl->qos_params)); + + for (i = 0; i < ARRAY_SIZE(wl->qos_params); i++) { + params = &(wl->qos_params[i]); + + switch (b43_qos_shm_offsets[i]) { + case B43_QOS_VOICE: + params->p.txop = 0; + params->p.aifs = 2; + params->p.cw_min = 0x0001; + params->p.cw_max = 0x0001; + break; + case B43_QOS_VIDEO: + params->p.txop = 0; + params->p.aifs = 2; + params->p.cw_min = 0x0001; + params->p.cw_max = 0x0001; + break; + case B43_QOS_BESTEFFORT: + params->p.txop = 0; + params->p.aifs = 3; + params->p.cw_min = 0x0001; + params->p.cw_max = 0x03FF; + break; + case B43_QOS_BACKGROUND: + params->p.txop = 0; + params->p.aifs = 7; + params->p.cw_min = 0x0001; + params->p.cw_max = 0x03FF; + break; + default: + B43_WARN_ON(1); + } + } +} + +/* Initialize the core's QOS capabilities */ +static void b43_qos_init(struct b43_wldev *dev) +{ + if (!dev->qos_enabled) { + /* Disable QOS support. */ + b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_EDCF); + b43_write16(dev, B43_MMIO_IFSCTL, + b43_read16(dev, B43_MMIO_IFSCTL) + & ~B43_MMIO_IFSCTL_USE_EDCF); + b43dbg(dev->wl, "QoS disabled\n"); + return; + } + + /* Upload the current QOS parameters. */ + b43_qos_upload_all(dev); + + /* Enable QOS support. */ + b43_hf_write(dev, b43_hf_read(dev) | B43_HF_EDCF); + b43_write16(dev, B43_MMIO_IFSCTL, + b43_read16(dev, B43_MMIO_IFSCTL) + | B43_MMIO_IFSCTL_USE_EDCF); + b43dbg(dev->wl, "QoS enabled\n"); +} + +static int b43_op_conf_tx(struct ieee80211_hw *hw, u16 _queue, + const struct ieee80211_tx_queue_params *params) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev; + unsigned int queue = (unsigned int)_queue; + int err = -ENODEV; + + if (queue >= ARRAY_SIZE(wl->qos_params)) { + /* Queue not available or don't support setting + * params on this queue. Return success to not + * confuse mac80211. */ + return 0; + } + BUILD_BUG_ON(ARRAY_SIZE(b43_qos_shm_offsets) != + ARRAY_SIZE(wl->qos_params)); + + mutex_lock(&wl->mutex); + dev = wl->current_dev; + if (unlikely(!dev || (b43_status(dev) < B43_STAT_INITIALIZED))) + goto out_unlock; + + memcpy(&(wl->qos_params[queue].p), params, sizeof(*params)); + b43_mac_suspend(dev); + b43_qos_params_upload(dev, &(wl->qos_params[queue].p), + b43_qos_shm_offsets[queue]); + b43_mac_enable(dev); + err = 0; + +out_unlock: + mutex_unlock(&wl->mutex); + + return err; +} + +static int b43_op_get_stats(struct ieee80211_hw *hw, + struct ieee80211_low_level_stats *stats) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + + mutex_lock(&wl->mutex); + memcpy(stats, &wl->ieee_stats, sizeof(*stats)); + mutex_unlock(&wl->mutex); + + return 0; +} + +static u64 b43_op_get_tsf(struct ieee80211_hw *hw) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev; + u64 tsf; + + mutex_lock(&wl->mutex); + dev = wl->current_dev; + + if (dev && (b43_status(dev) >= B43_STAT_INITIALIZED)) + b43_tsf_read(dev, &tsf); + else + tsf = 0; + + mutex_unlock(&wl->mutex); + + return tsf; +} + +static void b43_op_set_tsf(struct ieee80211_hw *hw, u64 tsf) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev; + + mutex_lock(&wl->mutex); + dev = wl->current_dev; + + if (dev && (b43_status(dev) >= B43_STAT_INITIALIZED)) + b43_tsf_write(dev, tsf); + + mutex_unlock(&wl->mutex); +} + +static void b43_put_phy_into_reset(struct b43_wldev *dev) +{ + struct ssb_device *sdev = dev->dev; + u32 tmslow; + + tmslow = ssb_read32(sdev, SSB_TMSLOW); + tmslow &= ~B43_TMSLOW_GMODE; + tmslow |= B43_TMSLOW_PHYRESET; + tmslow |= SSB_TMSLOW_FGC; + ssb_write32(sdev, SSB_TMSLOW, tmslow); + msleep(1); + + tmslow = ssb_read32(sdev, SSB_TMSLOW); + tmslow &= ~SSB_TMSLOW_FGC; + tmslow |= B43_TMSLOW_PHYRESET; + ssb_write32(sdev, SSB_TMSLOW, tmslow); + msleep(1); +} + +static const char *band_to_string(enum ieee80211_band band) +{ + switch (band) { + case IEEE80211_BAND_5GHZ: + return "5"; + case IEEE80211_BAND_2GHZ: + return "2.4"; + default: + break; + } + B43_WARN_ON(1); + return ""; +} + +/* Expects wl->mutex locked */ +static int b43_switch_band(struct b43_wl *wl, struct ieee80211_channel *chan) +{ + struct b43_wldev *up_dev = NULL; + struct b43_wldev *down_dev; + struct b43_wldev *d; + int err; + bool uninitialized_var(gmode); + int prev_status; + + /* Find a device and PHY which supports the band. */ + list_for_each_entry(d, &wl->devlist, list) { + switch (chan->band) { + case IEEE80211_BAND_5GHZ: + if (d->phy.supports_5ghz) { + up_dev = d; + gmode = 0; + } + break; + case IEEE80211_BAND_2GHZ: + if (d->phy.supports_2ghz) { + up_dev = d; + gmode = 1; + } + break; + default: + B43_WARN_ON(1); + return -EINVAL; + } + if (up_dev) + break; + } + if (!up_dev) { + b43err(wl, "Could not find a device for %s-GHz band operation\n", + band_to_string(chan->band)); + return -ENODEV; + } + if ((up_dev == wl->current_dev) && + (!!wl->current_dev->phy.gmode == !!gmode)) { + /* This device is already running. */ + return 0; + } + b43dbg(wl, "Switching to %s-GHz band\n", + band_to_string(chan->band)); + down_dev = wl->current_dev; + + prev_status = b43_status(down_dev); + /* Shutdown the currently running core. */ + if (prev_status >= B43_STAT_STARTED) + down_dev = b43_wireless_core_stop(down_dev); + if (prev_status >= B43_STAT_INITIALIZED) + b43_wireless_core_exit(down_dev); + + if (down_dev != up_dev) { + /* We switch to a different core, so we put PHY into + * RESET on the old core. */ + b43_put_phy_into_reset(down_dev); + } + + /* Now start the new core. */ + up_dev->phy.gmode = gmode; + if (prev_status >= B43_STAT_INITIALIZED) { + err = b43_wireless_core_init(up_dev); + if (err) { + b43err(wl, "Fatal: Could not initialize device for " + "selected %s-GHz band\n", + band_to_string(chan->band)); + goto init_failure; + } + } + if (prev_status >= B43_STAT_STARTED) { + err = b43_wireless_core_start(up_dev); + if (err) { + b43err(wl, "Fatal: Coult not start device for " + "selected %s-GHz band\n", + band_to_string(chan->band)); + b43_wireless_core_exit(up_dev); + goto init_failure; + } + } + B43_WARN_ON(b43_status(up_dev) != prev_status); + + wl->current_dev = up_dev; + + return 0; +init_failure: + /* Whoops, failed to init the new core. No core is operating now. */ + wl->current_dev = NULL; + return err; +} + +/* Write the short and long frame retry limit values. */ +static void b43_set_retry_limits(struct b43_wldev *dev, + unsigned int short_retry, + unsigned int long_retry) +{ + /* The retry limit is a 4-bit counter. Enforce this to avoid overflowing + * the chip-internal counter. */ + short_retry = min(short_retry, (unsigned int)0xF); + long_retry = min(long_retry, (unsigned int)0xF); + + b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_SRLIMIT, + short_retry); + b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_LRLIMIT, + long_retry); +} + +static int b43_op_config(struct ieee80211_hw *hw, u32 changed) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev; + struct b43_phy *phy; + struct ieee80211_conf *conf = &hw->conf; + int antenna; + int err = 0; + + mutex_lock(&wl->mutex); + + /* Switch the band (if necessary). This might change the active core. */ + err = b43_switch_band(wl, conf->channel); + if (err) + goto out_unlock_mutex; + dev = wl->current_dev; + phy = &dev->phy; + + if (conf_is_ht(conf)) + phy->is_40mhz = + (conf_is_ht40_minus(conf) || conf_is_ht40_plus(conf)); + else + phy->is_40mhz = false; + + b43_mac_suspend(dev); + + if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) + b43_set_retry_limits(dev, conf->short_frame_max_tx_count, + conf->long_frame_max_tx_count); + changed &= ~IEEE80211_CONF_CHANGE_RETRY_LIMITS; + if (!changed) + goto out_mac_enable; + + /* Switch to the requested channel. + * The firmware takes care of races with the TX handler. */ + if (conf->channel->hw_value != phy->channel) + b43_switch_channel(dev, conf->channel->hw_value); + + dev->wl->radiotap_enabled = !!(conf->flags & IEEE80211_CONF_MONITOR); + + /* Adjust the desired TX power level. */ + if (conf->power_level != 0) { + if (conf->power_level != phy->desired_txpower) { + phy->desired_txpower = conf->power_level; + b43_phy_txpower_check(dev, B43_TXPWR_IGNORE_TIME | + B43_TXPWR_IGNORE_TSSI); + } + } + + /* Antennas for RX and management frame TX. */ + antenna = B43_ANTENNA_DEFAULT; + b43_mgmtframe_txantenna(dev, antenna); + antenna = B43_ANTENNA_DEFAULT; + if (phy->ops->set_rx_antenna) + phy->ops->set_rx_antenna(dev, antenna); + + if (wl->radio_enabled != phy->radio_on) { + if (wl->radio_enabled) { + b43_software_rfkill(dev, false); + b43info(dev->wl, "Radio turned on by software\n"); + if (!dev->radio_hw_enable) { + b43info(dev->wl, "The hardware RF-kill button " + "still turns the radio physically off. " + "Press the button to turn it on.\n"); + } + } else { + b43_software_rfkill(dev, true); + b43info(dev->wl, "Radio turned off by software\n"); + } + } + +out_mac_enable: + b43_mac_enable(dev); +out_unlock_mutex: + mutex_unlock(&wl->mutex); + + return err; +} + +static void b43_update_basic_rates(struct b43_wldev *dev, u32 brates) +{ + struct ieee80211_supported_band *sband = + dev->wl->hw->wiphy->bands[b43_current_band(dev->wl)]; + struct ieee80211_rate *rate; + int i; + u16 basic, direct, offset, basic_offset, rateptr; + + for (i = 0; i < sband->n_bitrates; i++) { + rate = &sband->bitrates[i]; + + if (b43_is_cck_rate(rate->hw_value)) { + direct = B43_SHM_SH_CCKDIRECT; + basic = B43_SHM_SH_CCKBASIC; + offset = b43_plcp_get_ratecode_cck(rate->hw_value); + offset &= 0xF; + } else { + direct = B43_SHM_SH_OFDMDIRECT; + basic = B43_SHM_SH_OFDMBASIC; + offset = b43_plcp_get_ratecode_ofdm(rate->hw_value); + offset &= 0xF; + } + + rate = ieee80211_get_response_rate(sband, brates, rate->bitrate); + + if (b43_is_cck_rate(rate->hw_value)) { + basic_offset = b43_plcp_get_ratecode_cck(rate->hw_value); + basic_offset &= 0xF; + } else { + basic_offset = b43_plcp_get_ratecode_ofdm(rate->hw_value); + basic_offset &= 0xF; + } + + /* + * Get the pointer that we need to point to + * from the direct map + */ + rateptr = b43_shm_read16(dev, B43_SHM_SHARED, + direct + 2 * basic_offset); + /* and write it to the basic map */ + b43_shm_write16(dev, B43_SHM_SHARED, basic + 2 * offset, + rateptr); + } +} + +static void b43_op_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *conf, + u32 changed) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev; + + mutex_lock(&wl->mutex); + + dev = wl->current_dev; + if (!dev || b43_status(dev) < B43_STAT_STARTED) + goto out_unlock_mutex; + + B43_WARN_ON(wl->vif != vif); + + if (changed & BSS_CHANGED_BSSID) { + if (conf->bssid) + memcpy(wl->bssid, conf->bssid, ETH_ALEN); + else + memset(wl->bssid, 0, ETH_ALEN); + } + + if (b43_status(dev) >= B43_STAT_INITIALIZED) { + if (changed & BSS_CHANGED_BEACON && + (b43_is_mode(wl, NL80211_IFTYPE_AP) || + b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT) || + b43_is_mode(wl, NL80211_IFTYPE_ADHOC))) + b43_update_templates(wl); + + if (changed & BSS_CHANGED_BSSID) + b43_write_mac_bssid_templates(dev); + } + + b43_mac_suspend(dev); + + /* Update templates for AP/mesh mode. */ + if (changed & BSS_CHANGED_BEACON_INT && + (b43_is_mode(wl, NL80211_IFTYPE_AP) || + b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT) || + b43_is_mode(wl, NL80211_IFTYPE_ADHOC))) + b43_set_beacon_int(dev, conf->beacon_int); + + if (changed & BSS_CHANGED_BASIC_RATES) + b43_update_basic_rates(dev, conf->basic_rates); + + if (changed & BSS_CHANGED_ERP_SLOT) { + if (conf->use_short_slot) + b43_short_slot_timing_enable(dev); + else + b43_short_slot_timing_disable(dev); + } + + b43_mac_enable(dev); +out_unlock_mutex: + mutex_unlock(&wl->mutex); +} + +static int b43_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev; + u8 algorithm; + u8 index; + int err; + static const u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + + if (modparam_nohwcrypt) + return -ENOSPC; /* User disabled HW-crypto */ + + mutex_lock(&wl->mutex); + + dev = wl->current_dev; + err = -ENODEV; + if (!dev || b43_status(dev) < B43_STAT_INITIALIZED) + goto out_unlock; + + if (dev->fw.pcm_request_failed || !dev->hwcrypto_enabled) { + /* We don't have firmware for the crypto engine. + * Must use software-crypto. */ + err = -EOPNOTSUPP; + goto out_unlock; + } + + err = -EINVAL; + switch (key->alg) { + case ALG_WEP: + if (key->keylen == WLAN_KEY_LEN_WEP40) + algorithm = B43_SEC_ALGO_WEP40; + else + algorithm = B43_SEC_ALGO_WEP104; + break; + case ALG_TKIP: + algorithm = B43_SEC_ALGO_TKIP; + break; + case ALG_CCMP: + algorithm = B43_SEC_ALGO_AES; + break; + default: + B43_WARN_ON(1); + goto out_unlock; + } + index = (u8) (key->keyidx); + if (index > 3) + goto out_unlock; + + switch (cmd) { + case SET_KEY: + if (algorithm == B43_SEC_ALGO_TKIP && + (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE) || + !modparam_hwtkip)) { + /* We support only pairwise key */ + err = -EOPNOTSUPP; + goto out_unlock; + } + + if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { + if (WARN_ON(!sta)) { + err = -EOPNOTSUPP; + goto out_unlock; + } + /* Pairwise key with an assigned MAC address. */ + err = b43_key_write(dev, -1, algorithm, + key->key, key->keylen, + sta->addr, key); + } else { + /* Group key */ + err = b43_key_write(dev, index, algorithm, + key->key, key->keylen, NULL, key); + } + if (err) + goto out_unlock; + + if (algorithm == B43_SEC_ALGO_WEP40 || + algorithm == B43_SEC_ALGO_WEP104) { + b43_hf_write(dev, b43_hf_read(dev) | B43_HF_USEDEFKEYS); + } else { + b43_hf_write(dev, + b43_hf_read(dev) & ~B43_HF_USEDEFKEYS); + } + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + if (algorithm == B43_SEC_ALGO_TKIP) + key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; + break; + case DISABLE_KEY: { + err = b43_key_clear(dev, key->hw_key_idx); + if (err) + goto out_unlock; + break; + } + default: + B43_WARN_ON(1); + } + +out_unlock: + if (!err) { + b43dbg(wl, "%s hardware based encryption for keyidx: %d, " + "mac: %pM\n", + cmd == SET_KEY ? "Using" : "Disabling", key->keyidx, + sta ? sta->addr : bcast_addr); + b43_dump_keymemory(dev); + } + mutex_unlock(&wl->mutex); + + return err; +} + +static void b43_op_configure_filter(struct ieee80211_hw *hw, + unsigned int changed, unsigned int *fflags, + u64 multicast) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev; + + mutex_lock(&wl->mutex); + dev = wl->current_dev; + if (!dev) { + *fflags = 0; + goto out_unlock; + } + + *fflags &= FIF_PROMISC_IN_BSS | + FIF_ALLMULTI | + FIF_FCSFAIL | + FIF_PLCPFAIL | + FIF_CONTROL | + FIF_OTHER_BSS | + FIF_BCN_PRBRESP_PROMISC; + + changed &= FIF_PROMISC_IN_BSS | + FIF_ALLMULTI | + FIF_FCSFAIL | + FIF_PLCPFAIL | + FIF_CONTROL | + FIF_OTHER_BSS | + FIF_BCN_PRBRESP_PROMISC; + + wl->filter_flags = *fflags; + + if (changed && b43_status(dev) >= B43_STAT_INITIALIZED) + b43_adjust_opmode(dev); + +out_unlock: + mutex_unlock(&wl->mutex); +} + +/* Locking: wl->mutex + * Returns the current dev. This might be different from the passed in dev, + * because the core might be gone away while we unlocked the mutex. */ +static struct b43_wldev * b43_wireless_core_stop(struct b43_wldev *dev) +{ + struct b43_wl *wl = dev->wl; + struct b43_wldev *orig_dev; + u32 mask; + +redo: + if (!dev || b43_status(dev) < B43_STAT_STARTED) + return dev; + + /* Cancel work. Unlock to avoid deadlocks. */ + mutex_unlock(&wl->mutex); + cancel_delayed_work_sync(&dev->periodic_work); + cancel_work_sync(&wl->tx_work); + mutex_lock(&wl->mutex); + dev = wl->current_dev; + if (!dev || b43_status(dev) < B43_STAT_STARTED) { + /* Whoops, aliens ate up the device while we were unlocked. */ + return dev; + } + + /* Disable interrupts on the device. */ + b43_set_status(dev, B43_STAT_INITIALIZED); + if (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) { + /* wl->mutex is locked. That is enough. */ + b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, 0); + b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); /* Flush */ + } else { + spin_lock_irq(&wl->hardirq_lock); + b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, 0); + b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); /* Flush */ + spin_unlock_irq(&wl->hardirq_lock); + } + /* Synchronize and free the interrupt handlers. Unlock to avoid deadlocks. */ + orig_dev = dev; + mutex_unlock(&wl->mutex); + if (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) { + b43_sdio_free_irq(dev); + } else { +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) + compat_synchronize_threaded_irq(&dev->irq_compat); + compat_free_threaded_irq(&dev->irq_compat); +#else + synchronize_irq(dev->dev->irq); + free_irq(dev->dev->irq, dev); +#endif + } + mutex_lock(&wl->mutex); + dev = wl->current_dev; + if (!dev) + return dev; + if (dev != orig_dev) { + if (b43_status(dev) >= B43_STAT_STARTED) + goto redo; + return dev; + } + mask = b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); + B43_WARN_ON(mask != 0xFFFFFFFF && mask); + + /* Drain the TX queue */ + while (skb_queue_len(&wl->tx_queue)) + dev_kfree_skb(skb_dequeue(&wl->tx_queue)); + + b43_mac_suspend(dev); + b43_leds_exit(dev); + b43dbg(wl, "Wireless interface stopped\n"); + + return dev; +} + +/* Locking: wl->mutex */ +static int b43_wireless_core_start(struct b43_wldev *dev) +{ + int err; + + B43_WARN_ON(b43_status(dev) != B43_STAT_INITIALIZED); + + drain_txstatus_queue(dev); + if (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) { + err = b43_sdio_request_irq(dev, b43_sdio_interrupt_handler); + if (err) { + b43err(dev->wl, "Cannot request SDIO IRQ\n"); + goto out; + } + } else { +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) + err = compat_request_threaded_irq(&dev->irq_compat, + dev->dev->irq, + b43_interrupt_handler, + b43_interrupt_thread_handler, + IRQF_SHARED, KBUILD_MODNAME, dev); +#else + err = request_threaded_irq(dev->dev->irq, b43_interrupt_handler, + b43_interrupt_thread_handler, + IRQF_SHARED, KBUILD_MODNAME, dev); +#endif + if (err) { + b43err(dev->wl, "Cannot request IRQ-%d\n", dev->dev->irq); + goto out; + } + } + + /* We are ready to run. */ + ieee80211_wake_queues(dev->wl->hw); + b43_set_status(dev, B43_STAT_STARTED); + + /* Start data flow (TX/RX). */ + b43_mac_enable(dev); + b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, dev->irq_mask); + + /* Start maintainance work */ + b43_periodic_tasks_setup(dev); + + b43_leds_init(dev); + + b43dbg(dev->wl, "Wireless interface started\n"); +out: + return err; +} + +/* Get PHY and RADIO versioning numbers */ +static int b43_phy_versioning(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u32 tmp; + u8 analog_type; + u8 phy_type; + u8 phy_rev; + u16 radio_manuf; + u16 radio_ver; + u16 radio_rev; + int unsupported = 0; + + /* Get PHY versioning */ + tmp = b43_read16(dev, B43_MMIO_PHY_VER); + analog_type = (tmp & B43_PHYVER_ANALOG) >> B43_PHYVER_ANALOG_SHIFT; + phy_type = (tmp & B43_PHYVER_TYPE) >> B43_PHYVER_TYPE_SHIFT; + phy_rev = (tmp & B43_PHYVER_VERSION); + switch (phy_type) { + case B43_PHYTYPE_A: + if (phy_rev >= 4) + unsupported = 1; + break; + case B43_PHYTYPE_B: + if (phy_rev != 2 && phy_rev != 4 && phy_rev != 6 + && phy_rev != 7) + unsupported = 1; + break; + case B43_PHYTYPE_G: + if (phy_rev > 9) + unsupported = 1; + break; +#ifdef CONFIG_B43_NPHY + case B43_PHYTYPE_N: + if (phy_rev > 4) + unsupported = 1; + break; +#endif +#ifdef CONFIG_B43_PHY_LP + case B43_PHYTYPE_LP: + if (phy_rev > 2) + unsupported = 1; + break; +#endif + default: + unsupported = 1; + }; + if (unsupported) { + b43err(dev->wl, "FOUND UNSUPPORTED PHY " + "(Analog %u, Type %u, Revision %u)\n", + analog_type, phy_type, phy_rev); + return -EOPNOTSUPP; + } + b43dbg(dev->wl, "Found PHY: Analog %u, Type %u, Revision %u\n", + analog_type, phy_type, phy_rev); + + /* Get RADIO versioning */ + if (dev->dev->bus->chip_id == 0x4317) { + if (dev->dev->bus->chip_rev == 0) + tmp = 0x3205017F; + else if (dev->dev->bus->chip_rev == 1) + tmp = 0x4205017F; + else + tmp = 0x5205017F; + } else { + b43_write16(dev, B43_MMIO_RADIO_CONTROL, B43_RADIOCTL_ID); + tmp = b43_read16(dev, B43_MMIO_RADIO_DATA_LOW); + b43_write16(dev, B43_MMIO_RADIO_CONTROL, B43_RADIOCTL_ID); + tmp |= (u32)b43_read16(dev, B43_MMIO_RADIO_DATA_HIGH) << 16; + } + radio_manuf = (tmp & 0x00000FFF); + radio_ver = (tmp & 0x0FFFF000) >> 12; + radio_rev = (tmp & 0xF0000000) >> 28; + if (radio_manuf != 0x17F /* Broadcom */) + unsupported = 1; + switch (phy_type) { + case B43_PHYTYPE_A: + if (radio_ver != 0x2060) + unsupported = 1; + if (radio_rev != 1) + unsupported = 1; + if (radio_manuf != 0x17F) + unsupported = 1; + break; + case B43_PHYTYPE_B: + if ((radio_ver & 0xFFF0) != 0x2050) + unsupported = 1; + break; + case B43_PHYTYPE_G: + if (radio_ver != 0x2050) + unsupported = 1; + break; + case B43_PHYTYPE_N: + if (radio_ver != 0x2055 && radio_ver != 0x2056) + unsupported = 1; + break; + case B43_PHYTYPE_LP: + if (radio_ver != 0x2062 && radio_ver != 0x2063) + unsupported = 1; + break; + default: + B43_WARN_ON(1); + } + if (unsupported) { + b43err(dev->wl, "FOUND UNSUPPORTED RADIO " + "(Manuf 0x%X, Version 0x%X, Revision %u)\n", + radio_manuf, radio_ver, radio_rev); + return -EOPNOTSUPP; + } + b43dbg(dev->wl, "Found Radio: Manuf 0x%X, Version 0x%X, Revision %u\n", + radio_manuf, radio_ver, radio_rev); + + phy->radio_manuf = radio_manuf; + phy->radio_ver = radio_ver; + phy->radio_rev = radio_rev; + + phy->analog = analog_type; + phy->type = phy_type; + phy->rev = phy_rev; + + return 0; +} + +static void setup_struct_phy_for_init(struct b43_wldev *dev, + struct b43_phy *phy) +{ + phy->hardware_power_control = !!modparam_hwpctl; + phy->next_txpwr_check_time = jiffies; + /* PHY TX errors counter. */ + atomic_set(&phy->txerr_cnt, B43_PHY_TX_BADNESS_LIMIT); + +#if B43_DEBUG + phy->phy_locked = 0; + phy->radio_locked = 0; +#endif +} + +static void setup_struct_wldev_for_init(struct b43_wldev *dev) +{ + dev->dfq_valid = 0; + + /* Assume the radio is enabled. If it's not enabled, the state will + * immediately get fixed on the first periodic work run. */ + dev->radio_hw_enable = 1; + + /* Stats */ + memset(&dev->stats, 0, sizeof(dev->stats)); + + setup_struct_phy_for_init(dev, &dev->phy); + + /* IRQ related flags */ + dev->irq_reason = 0; + memset(dev->dma_reason, 0, sizeof(dev->dma_reason)); + dev->irq_mask = B43_IRQ_MASKTEMPLATE; + if (b43_modparam_verbose < B43_VERBOSITY_DEBUG) + dev->irq_mask &= ~B43_IRQ_PHY_TXERR; + + dev->mac_suspended = 1; + + /* Noise calculation context */ + memset(&dev->noisecalc, 0, sizeof(dev->noisecalc)); +} + +static void b43_bluetooth_coext_enable(struct b43_wldev *dev) +{ + struct ssb_sprom *sprom = &dev->dev->bus->sprom; + u64 hf; + + if (!modparam_btcoex) + return; + if (!(sprom->boardflags_lo & B43_BFL_BTCOEXIST)) + return; + if (dev->phy.type != B43_PHYTYPE_B && !dev->phy.gmode) + return; + + hf = b43_hf_read(dev); + if (sprom->boardflags_lo & B43_BFL_BTCMOD) + hf |= B43_HF_BTCOEXALT; + else + hf |= B43_HF_BTCOEX; + b43_hf_write(dev, hf); +} + +static void b43_bluetooth_coext_disable(struct b43_wldev *dev) +{ + if (!modparam_btcoex) + return; + //TODO +} + +static void b43_imcfglo_timeouts_workaround(struct b43_wldev *dev) +{ +#ifdef CONFIG_SSB_DRIVER_PCICORE + struct ssb_bus *bus = dev->dev->bus; + u32 tmp; + + if (bus->pcicore.dev && + bus->pcicore.dev->id.coreid == SSB_DEV_PCI && + bus->pcicore.dev->id.revision <= 5) { + /* IMCFGLO timeouts workaround. */ + tmp = ssb_read32(dev->dev, SSB_IMCFGLO); + switch (bus->bustype) { + case SSB_BUSTYPE_PCI: + case SSB_BUSTYPE_PCMCIA: + tmp &= ~SSB_IMCFGLO_REQTO; + tmp &= ~SSB_IMCFGLO_SERTO; + tmp |= 0x32; + break; + case SSB_BUSTYPE_SSB: + tmp &= ~SSB_IMCFGLO_REQTO; + tmp &= ~SSB_IMCFGLO_SERTO; + tmp |= 0x53; + break; + default: + break; + } + ssb_write32(dev->dev, SSB_IMCFGLO, tmp); + } +#endif /* CONFIG_SSB_DRIVER_PCICORE */ +} + +static void b43_set_synth_pu_delay(struct b43_wldev *dev, bool idle) +{ + u16 pu_delay; + + /* The time value is in microseconds. */ + if (dev->phy.type == B43_PHYTYPE_A) + pu_delay = 3700; + else + pu_delay = 1050; + if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC) || idle) + pu_delay = 500; + if ((dev->phy.radio_ver == 0x2050) && (dev->phy.radio_rev == 8)) + pu_delay = max(pu_delay, (u16)2400); + + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_SPUWKUP, pu_delay); +} + +/* Set the TSF CFP pre-TargetBeaconTransmissionTime. */ +static void b43_set_pretbtt(struct b43_wldev *dev) +{ + u16 pretbtt; + + /* The time value is in microseconds. */ + if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC)) { + pretbtt = 2; + } else { + if (dev->phy.type == B43_PHYTYPE_A) + pretbtt = 120; + else + pretbtt = 250; + } + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_PRETBTT, pretbtt); + b43_write16(dev, B43_MMIO_TSF_CFP_PRETBTT, pretbtt); +} + +/* Shutdown a wireless core */ +/* Locking: wl->mutex */ +static void b43_wireless_core_exit(struct b43_wldev *dev) +{ + u32 macctl; + + B43_WARN_ON(dev && b43_status(dev) > B43_STAT_INITIALIZED); + if (!dev || b43_status(dev) != B43_STAT_INITIALIZED) + return; + b43_set_status(dev, B43_STAT_UNINIT); + + /* Stop the microcode PSM. */ + macctl = b43_read32(dev, B43_MMIO_MACCTL); + macctl &= ~B43_MACCTL_PSM_RUN; + macctl |= B43_MACCTL_PSM_JMP0; + b43_write32(dev, B43_MMIO_MACCTL, macctl); + + b43_dma_free(dev); + b43_pio_free(dev); + b43_chip_exit(dev); + dev->phy.ops->switch_analog(dev, 0); + if (dev->wl->current_beacon) { + dev_kfree_skb_any(dev->wl->current_beacon); + dev->wl->current_beacon = NULL; + } + + ssb_device_disable(dev->dev, 0); + ssb_bus_may_powerdown(dev->dev->bus); +} + +/* Initialize a wireless core */ +static int b43_wireless_core_init(struct b43_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + struct ssb_sprom *sprom = &bus->sprom; + struct b43_phy *phy = &dev->phy; + int err; + u64 hf; + u32 tmp; + + B43_WARN_ON(b43_status(dev) != B43_STAT_UNINIT); + + err = ssb_bus_powerup(bus, 0); + if (err) + goto out; + if (!ssb_device_is_enabled(dev->dev)) { + tmp = phy->gmode ? B43_TMSLOW_GMODE : 0; + b43_wireless_core_reset(dev, tmp); + } + + /* Reset all data structures. */ + setup_struct_wldev_for_init(dev); + phy->ops->prepare_structs(dev); + + /* Enable IRQ routing to this device. */ + ssb_pcicore_dev_irqvecs_enable(&bus->pcicore, dev->dev); + + b43_imcfglo_timeouts_workaround(dev); + b43_bluetooth_coext_disable(dev); + if (phy->ops->prepare_hardware) { + err = phy->ops->prepare_hardware(dev); + if (err) + goto err_busdown; + } + err = b43_chip_init(dev); + if (err) + goto err_busdown; + b43_shm_write16(dev, B43_SHM_SHARED, + B43_SHM_SH_WLCOREREV, dev->dev->id.revision); + hf = b43_hf_read(dev); + if (phy->type == B43_PHYTYPE_G) { + hf |= B43_HF_SYMW; + if (phy->rev == 1) + hf |= B43_HF_GDCW; + if (sprom->boardflags_lo & B43_BFL_PACTRL) + hf |= B43_HF_OFDMPABOOST; + } + if (phy->radio_ver == 0x2050) { + if (phy->radio_rev == 6) + hf |= B43_HF_4318TSSI; + if (phy->radio_rev < 6) + hf |= B43_HF_VCORECALC; + } + if (sprom->boardflags_lo & B43_BFL_XTAL_NOSLOW) + hf |= B43_HF_DSCRQ; /* Disable slowclock requests from ucode. */ +#ifdef CONFIG_SSB_DRIVER_PCICORE + if ((bus->bustype == SSB_BUSTYPE_PCI) && + (bus->pcicore.dev->id.revision <= 10)) + hf |= B43_HF_PCISCW; /* PCI slow clock workaround. */ +#endif + hf &= ~B43_HF_SKCFPUP; + b43_hf_write(dev, hf); + + b43_set_retry_limits(dev, B43_DEFAULT_SHORT_RETRY_LIMIT, + B43_DEFAULT_LONG_RETRY_LIMIT); + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_SFFBLIM, 3); + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_LFFBLIM, 2); + + /* Disable sending probe responses from firmware. + * Setting the MaxTime to one usec will always trigger + * a timeout, so we never send any probe resp. + * A timeout of zero is infinite. */ + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_PRMAXTIME, 1); + + b43_rate_memory_init(dev); + b43_set_phytxctl_defaults(dev); + + /* Minimum Contention Window */ + if (phy->type == B43_PHYTYPE_B) { + b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MINCONT, 0x1F); + } else { + b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MINCONT, 0xF); + } + /* Maximum Contention Window */ + b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MAXCONT, 0x3FF); + + if ((dev->dev->bus->bustype == SSB_BUSTYPE_PCMCIA) || + (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) || + dev->use_pio) { + dev->__using_pio_transfers = 1; + err = b43_pio_init(dev); + } else { + dev->__using_pio_transfers = 0; + err = b43_dma_init(dev); + } + if (err) + goto err_chip_exit; + b43_qos_init(dev); + b43_set_synth_pu_delay(dev, 1); + b43_bluetooth_coext_enable(dev); + + ssb_bus_powerup(bus, !(sprom->boardflags_lo & B43_BFL_XTAL_NOSLOW)); + b43_upload_card_macaddress(dev); + b43_security_init(dev); + + ieee80211_wake_queues(dev->wl->hw); + + b43_set_status(dev, B43_STAT_INITIALIZED); + +out: + return err; + +err_chip_exit: + b43_chip_exit(dev); +err_busdown: + ssb_bus_may_powerdown(bus); + B43_WARN_ON(b43_status(dev) != B43_STAT_UNINIT); + return err; +} + +static int b43_op_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev; + int err = -EOPNOTSUPP; + + /* TODO: allow WDS/AP devices to coexist */ + + if (vif->type != NL80211_IFTYPE_AP && + vif->type != NL80211_IFTYPE_MESH_POINT && + vif->type != NL80211_IFTYPE_STATION && + vif->type != NL80211_IFTYPE_WDS && + vif->type != NL80211_IFTYPE_ADHOC) + return -EOPNOTSUPP; + + mutex_lock(&wl->mutex); + if (wl->operating) + goto out_mutex_unlock; + + b43dbg(wl, "Adding Interface type %d\n", vif->type); + + dev = wl->current_dev; + wl->operating = 1; + wl->vif = vif; + wl->if_type = vif->type; + memcpy(wl->mac_addr, vif->addr, ETH_ALEN); + + b43_adjust_opmode(dev); + b43_set_pretbtt(dev); + b43_set_synth_pu_delay(dev, 0); + b43_upload_card_macaddress(dev); + + err = 0; + out_mutex_unlock: + mutex_unlock(&wl->mutex); + + return err; +} + +static void b43_op_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev = wl->current_dev; + + b43dbg(wl, "Removing Interface type %d\n", vif->type); + + mutex_lock(&wl->mutex); + + B43_WARN_ON(!wl->operating); + B43_WARN_ON(wl->vif != vif); + wl->vif = NULL; + + wl->operating = 0; + + b43_adjust_opmode(dev); + memset(wl->mac_addr, 0, ETH_ALEN); + b43_upload_card_macaddress(dev); + + mutex_unlock(&wl->mutex); +} + +static int b43_op_start(struct ieee80211_hw *hw) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev = wl->current_dev; + int did_init = 0; + int err = 0; + + /* Kill all old instance specific information to make sure + * the card won't use it in the short timeframe between start + * and mac80211 reconfiguring it. */ + memset(wl->bssid, 0, ETH_ALEN); + memset(wl->mac_addr, 0, ETH_ALEN); + wl->filter_flags = 0; + wl->radiotap_enabled = 0; + b43_qos_clear(wl); + wl->beacon0_uploaded = 0; + wl->beacon1_uploaded = 0; + wl->beacon_templates_virgin = 1; + wl->radio_enabled = 1; + + mutex_lock(&wl->mutex); + + if (b43_status(dev) < B43_STAT_INITIALIZED) { + err = b43_wireless_core_init(dev); + if (err) + goto out_mutex_unlock; + did_init = 1; + } + + if (b43_status(dev) < B43_STAT_STARTED) { + err = b43_wireless_core_start(dev); + if (err) { + if (did_init) + b43_wireless_core_exit(dev); + goto out_mutex_unlock; + } + } + + /* XXX: only do if device doesn't support rfkill irq */ + wiphy_rfkill_start_polling(hw->wiphy); + + out_mutex_unlock: + mutex_unlock(&wl->mutex); + + return err; +} + +static void b43_op_stop(struct ieee80211_hw *hw) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev = wl->current_dev; + + cancel_work_sync(&(wl->beacon_update_trigger)); + + mutex_lock(&wl->mutex); + if (b43_status(dev) >= B43_STAT_STARTED) { + dev = b43_wireless_core_stop(dev); + if (!dev) + goto out_unlock; + } + b43_wireless_core_exit(dev); + wl->radio_enabled = 0; + +out_unlock: + mutex_unlock(&wl->mutex); + + cancel_work_sync(&(wl->txpower_adjust_work)); +} + +static int b43_op_beacon_set_tim(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, bool set) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + + /* FIXME: add locking */ + b43_update_templates(wl); + + return 0; +} + +static void b43_op_sta_notify(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum sta_notify_cmd notify_cmd, + struct ieee80211_sta *sta) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + + B43_WARN_ON(!vif || wl->vif != vif); +} + +static void b43_op_sw_scan_start_notifier(struct ieee80211_hw *hw) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev; + + mutex_lock(&wl->mutex); + dev = wl->current_dev; + if (dev && (b43_status(dev) >= B43_STAT_INITIALIZED)) { + /* Disable CFP update during scan on other channels. */ + b43_hf_write(dev, b43_hf_read(dev) | B43_HF_SKCFPUP); + } + mutex_unlock(&wl->mutex); +} + +static void b43_op_sw_scan_complete_notifier(struct ieee80211_hw *hw) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev; + + mutex_lock(&wl->mutex); + dev = wl->current_dev; + if (dev && (b43_status(dev) >= B43_STAT_INITIALIZED)) { + /* Re-enable CFP update. */ + b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_SKCFPUP); + } + mutex_unlock(&wl->mutex); +} + +static const struct ieee80211_ops b43_hw_ops = { + .tx = b43_op_tx, + .conf_tx = b43_op_conf_tx, + .add_interface = b43_op_add_interface, + .remove_interface = b43_op_remove_interface, + .config = b43_op_config, + .bss_info_changed = b43_op_bss_info_changed, + .configure_filter = b43_op_configure_filter, + .set_key = b43_op_set_key, + .update_tkip_key = b43_op_update_tkip_key, + .get_stats = b43_op_get_stats, + .get_tsf = b43_op_get_tsf, + .set_tsf = b43_op_set_tsf, + .start = b43_op_start, + .stop = b43_op_stop, + .set_tim = b43_op_beacon_set_tim, + .sta_notify = b43_op_sta_notify, + .sw_scan_start = b43_op_sw_scan_start_notifier, + .sw_scan_complete = b43_op_sw_scan_complete_notifier, + .rfkill_poll = b43_rfkill_poll, +}; + +/* Hard-reset the chip. Do not call this directly. + * Use b43_controller_restart() + */ +static void b43_chip_reset(struct work_struct *work) +{ + struct b43_wldev *dev = + container_of(work, struct b43_wldev, restart_work); + struct b43_wl *wl = dev->wl; + int err = 0; + int prev_status; + + mutex_lock(&wl->mutex); + + prev_status = b43_status(dev); + /* Bring the device down... */ + if (prev_status >= B43_STAT_STARTED) { + dev = b43_wireless_core_stop(dev); + if (!dev) { + err = -ENODEV; + goto out; + } + } + if (prev_status >= B43_STAT_INITIALIZED) + b43_wireless_core_exit(dev); + + /* ...and up again. */ + if (prev_status >= B43_STAT_INITIALIZED) { + err = b43_wireless_core_init(dev); + if (err) + goto out; + } + if (prev_status >= B43_STAT_STARTED) { + err = b43_wireless_core_start(dev); + if (err) { + b43_wireless_core_exit(dev); + goto out; + } + } +out: + if (err) + wl->current_dev = NULL; /* Failed to init the dev. */ + mutex_unlock(&wl->mutex); + if (err) + b43err(wl, "Controller restart FAILED\n"); + else + b43info(wl, "Controller restarted\n"); +} + +static int b43_setup_bands(struct b43_wldev *dev, + bool have_2ghz_phy, bool have_5ghz_phy) +{ + struct ieee80211_hw *hw = dev->wl->hw; + + if (have_2ghz_phy) + hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &b43_band_2GHz; + if (dev->phy.type == B43_PHYTYPE_N) { + if (have_5ghz_phy) + hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &b43_band_5GHz_nphy; + } else { + if (have_5ghz_phy) + hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &b43_band_5GHz_aphy; + } + + dev->phy.supports_2ghz = have_2ghz_phy; + dev->phy.supports_5ghz = have_5ghz_phy; + + return 0; +} + +static void b43_wireless_core_detach(struct b43_wldev *dev) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) + if (dev->dev->bus->bustype != SSB_BUSTYPE_SDIO) + compat_destroy_threaded_irq(&dev->irq_compat); +#endif + /* We release firmware that late to not be required to re-request + * is all the time when we reinit the core. */ + b43_release_firmware(dev); + b43_phy_free(dev); +} + +static int b43_wireless_core_attach(struct b43_wldev *dev) +{ + struct b43_wl *wl = dev->wl; + struct ssb_bus *bus = dev->dev->bus; + struct pci_dev *pdev = (bus->bustype == SSB_BUSTYPE_PCI) ? bus->host_pci : NULL; + int err; + bool have_2ghz_phy = 0, have_5ghz_phy = 0; + u32 tmp; + + /* Do NOT do any device initialization here. + * Do it in wireless_core_init() instead. + * This function is for gathering basic information about the HW, only. + * Also some structs may be set up here. But most likely you want to have + * that in core_init(), too. + */ + + err = ssb_bus_powerup(bus, 0); + if (err) { + b43err(wl, "Bus powerup failed\n"); + goto out; + } + /* Get the PHY type. */ + if (dev->dev->id.revision >= 5) { + u32 tmshigh; + + tmshigh = ssb_read32(dev->dev, SSB_TMSHIGH); + have_2ghz_phy = !!(tmshigh & B43_TMSHIGH_HAVE_2GHZ_PHY); + have_5ghz_phy = !!(tmshigh & B43_TMSHIGH_HAVE_5GHZ_PHY); + } else + B43_WARN_ON(1); + + dev->phy.gmode = have_2ghz_phy; + dev->phy.radio_on = 1; + tmp = dev->phy.gmode ? B43_TMSLOW_GMODE : 0; + b43_wireless_core_reset(dev, tmp); + + err = b43_phy_versioning(dev); + if (err) + goto err_powerdown; + /* Check if this device supports multiband. */ + if (!pdev || + (pdev->device != 0x4312 && + pdev->device != 0x4319 && pdev->device != 0x4324)) { + /* No multiband support. */ + have_2ghz_phy = 0; + have_5ghz_phy = 0; + switch (dev->phy.type) { + case B43_PHYTYPE_A: + have_5ghz_phy = 1; + break; + case B43_PHYTYPE_LP: //FIXME not always! +#if 0 //FIXME enabling 5GHz causes a NULL pointer dereference + have_5ghz_phy = 1; +#endif + case B43_PHYTYPE_G: + case B43_PHYTYPE_N: + have_2ghz_phy = 1; + break; + default: + B43_WARN_ON(1); + } + } + if (dev->phy.type == B43_PHYTYPE_A) { + /* FIXME */ + b43err(wl, "IEEE 802.11a devices are unsupported\n"); + err = -EOPNOTSUPP; + goto err_powerdown; + } + if (1 /* disable A-PHY */) { + /* FIXME: For now we disable the A-PHY on multi-PHY devices. */ + if (dev->phy.type != B43_PHYTYPE_N && + dev->phy.type != B43_PHYTYPE_LP) { + have_2ghz_phy = 1; + have_5ghz_phy = 0; + } + } + + err = b43_phy_allocate(dev); + if (err) + goto err_powerdown; + + dev->phy.gmode = have_2ghz_phy; + tmp = dev->phy.gmode ? B43_TMSLOW_GMODE : 0; + b43_wireless_core_reset(dev, tmp); + + err = b43_validate_chipaccess(dev); + if (err) + goto err_phy_free; + err = b43_setup_bands(dev, have_2ghz_phy, have_5ghz_phy); + if (err) + goto err_phy_free; + + /* Now set some default "current_dev" */ + if (!wl->current_dev) + wl->current_dev = dev; + INIT_WORK(&dev->restart_work, b43_chip_reset); + + dev->phy.ops->switch_analog(dev, 0); + ssb_device_disable(dev->dev, 0); + ssb_bus_may_powerdown(bus); + +out: + return err; + +err_phy_free: + b43_phy_free(dev); +err_powerdown: + ssb_bus_may_powerdown(bus); + return err; +} + +static void b43_one_core_detach(struct ssb_device *dev) +{ + struct b43_wldev *wldev; + struct b43_wl *wl; + + /* Do not cancel ieee80211-workqueue based work here. + * See comment in b43_remove(). */ + + wldev = ssb_get_drvdata(dev); + wl = wldev->wl; + b43_debugfs_remove_device(wldev); + b43_wireless_core_detach(wldev); + list_del(&wldev->list); + wl->nr_devs--; + ssb_set_drvdata(dev, NULL); + kfree(wldev); +} + +static int b43_one_core_attach(struct ssb_device *dev, struct b43_wl *wl) +{ + struct b43_wldev *wldev; + struct pci_dev *pdev; + int err = -ENOMEM; + + if (!list_empty(&wl->devlist)) { + /* We are not the first core on this chip. */ + pdev = (dev->bus->bustype == SSB_BUSTYPE_PCI) ? dev->bus->host_pci : NULL; + /* Only special chips support more than one wireless + * core, although some of the other chips have more than + * one wireless core as well. Check for this and + * bail out early. + */ + if (!pdev || + ((pdev->device != 0x4321) && + (pdev->device != 0x4313) && (pdev->device != 0x431A))) { + b43dbg(wl, "Ignoring unconnected 802.11 core\n"); + return -ENODEV; + } + } + + wldev = kzalloc(sizeof(*wldev), GFP_KERNEL); + if (!wldev) + goto out; + + wldev->use_pio = b43_modparam_pio; + wldev->dev = dev; + wldev->wl = wl; + b43_set_status(wldev, B43_STAT_UNINIT); + wldev->bad_frames_preempt = modparam_bad_frames_preempt; + INIT_LIST_HEAD(&wldev->list); + + err = b43_wireless_core_attach(wldev); + if (err) + goto err_kfree_wldev; + + list_add(&wldev->list, &wl->devlist); + wl->nr_devs++; + ssb_set_drvdata(dev, wldev); + b43_debugfs_add_device(wldev); + + out: + return err; + + err_kfree_wldev: + kfree(wldev); + return err; +} + +#define IS_PDEV(pdev, _vendor, _device, _subvendor, _subdevice) ( \ + (pdev->vendor == PCI_VENDOR_ID_##_vendor) && \ + (pdev->device == _device) && \ + (pdev->subsystem_vendor == PCI_VENDOR_ID_##_subvendor) && \ + (pdev->subsystem_device == _subdevice) ) + +static void b43_sprom_fixup(struct ssb_bus *bus) +{ + struct pci_dev *pdev; + + /* boardflags workarounds */ + if (bus->boardinfo.vendor == SSB_BOARDVENDOR_DELL && + bus->chip_id == 0x4301 && bus->boardinfo.rev == 0x74) + bus->sprom.boardflags_lo |= B43_BFL_BTCOEXIST; + if (bus->boardinfo.vendor == PCI_VENDOR_ID_APPLE && + bus->boardinfo.type == 0x4E && bus->boardinfo.rev > 0x40) + bus->sprom.boardflags_lo |= B43_BFL_PACTRL; + if (bus->bustype == SSB_BUSTYPE_PCI) { + pdev = bus->host_pci; + if (IS_PDEV(pdev, BROADCOM, 0x4318, ASUSTEK, 0x100F) || + IS_PDEV(pdev, BROADCOM, 0x4320, DELL, 0x0003) || + IS_PDEV(pdev, BROADCOM, 0x4320, HP, 0x12f8) || + IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0015) || + IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0014) || + IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0013) || + IS_PDEV(pdev, BROADCOM, 0x4320, MOTOROLA, 0x7010)) + bus->sprom.boardflags_lo &= ~B43_BFL_BTCOEXIST; + } +} + +static void b43_wireless_exit(struct ssb_device *dev, struct b43_wl *wl) +{ + struct ieee80211_hw *hw = wl->hw; + + ssb_set_devtypedata(dev, NULL); + ieee80211_free_hw(hw); +} + +static int b43_wireless_init(struct ssb_device *dev) +{ + struct ssb_sprom *sprom = &dev->bus->sprom; + struct ieee80211_hw *hw; + struct b43_wl *wl; + int err = -ENOMEM; + + b43_sprom_fixup(dev->bus); + + hw = ieee80211_alloc_hw(sizeof(*wl), &b43_hw_ops); + if (!hw) { + b43err(NULL, "Could not allocate ieee80211 device\n"); + goto out; + } + wl = hw_to_b43_wl(hw); + + /* fill hw info */ + hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | + IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_NOISE_DBM; + + hw->wiphy->interface_modes = + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_MESH_POINT) | + BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_WDS) | + BIT(NL80211_IFTYPE_ADHOC); + + hw->queues = modparam_qos ? 4 : 1; + wl->mac80211_initially_registered_queues = hw->queues; + hw->max_rates = 2; + SET_IEEE80211_DEV(hw, dev->dev); + if (is_valid_ether_addr(sprom->et1mac)) + SET_IEEE80211_PERM_ADDR(hw, sprom->et1mac); + else + SET_IEEE80211_PERM_ADDR(hw, sprom->il0mac); + + /* Initialize struct b43_wl */ + wl->hw = hw; + mutex_init(&wl->mutex); + spin_lock_init(&wl->hardirq_lock); + INIT_LIST_HEAD(&wl->devlist); + INIT_WORK(&wl->beacon_update_trigger, b43_beacon_update_trigger_work); + INIT_WORK(&wl->txpower_adjust_work, b43_phy_txpower_adjust_work); + INIT_WORK(&wl->tx_work, b43_tx_work); + skb_queue_head_init(&wl->tx_queue); + + ssb_set_devtypedata(dev, wl); + b43info(wl, "Broadcom %04X WLAN found (core revision %u)\n", + dev->bus->chip_id, dev->id.revision); + err = 0; +out: + return err; +} + +static int b43_probe(struct ssb_device *dev, const struct ssb_device_id *id) +{ + struct b43_wl *wl; + int err; + int first = 0; + + wl = ssb_get_devtypedata(dev); + if (!wl) { + /* Probing the first core. Must setup common struct b43_wl */ + first = 1; + err = b43_wireless_init(dev); + if (err) + goto out; + wl = ssb_get_devtypedata(dev); + B43_WARN_ON(!wl); + } + err = b43_one_core_attach(dev, wl); + if (err) + goto err_wireless_exit; + + if (first) { + err = ieee80211_register_hw(wl->hw); + if (err) + goto err_one_core_detach; + b43_leds_register(wl->current_dev); + b43_rng_init(wl); + } + + out: + return err; + + err_one_core_detach: + b43_one_core_detach(dev); + err_wireless_exit: + if (first) + b43_wireless_exit(dev, wl); + return err; +} + +static void b43_remove(struct ssb_device *dev) +{ + struct b43_wl *wl = ssb_get_devtypedata(dev); + struct b43_wldev *wldev = ssb_get_drvdata(dev); + + /* We must cancel any work here before unregistering from ieee80211, + * as the ieee80211 unreg will destroy the workqueue. */ + cancel_work_sync(&wldev->restart_work); + + B43_WARN_ON(!wl); + if (wl->current_dev == wldev) { + /* Restore the queues count before unregistering, because firmware detect + * might have modified it. Restoring is important, so the networking + * stack can properly free resources. */ + wl->hw->queues = wl->mac80211_initially_registered_queues; + b43_leds_stop(wldev); + ieee80211_unregister_hw(wl->hw); + } + + b43_one_core_detach(dev); + + if (list_empty(&wl->devlist)) { + b43_rng_exit(wl); + b43_leds_unregister(wl); + /* Last core on the chip unregistered. + * We can destroy common struct b43_wl. + */ + b43_wireless_exit(dev, wl); + } +} + +/* Perform a hardware reset. This can be called from any context. */ +void b43_controller_restart(struct b43_wldev *dev, const char *reason) +{ + /* Must avoid requeueing, if we are in shutdown. */ + if (b43_status(dev) < B43_STAT_INITIALIZED) + return; + b43info(dev->wl, "Controller RESET (%s) ...\n", reason); + ieee80211_queue_work(dev->wl->hw, &dev->restart_work); +} + +static struct ssb_driver b43_ssb_driver = { + .name = KBUILD_MODNAME, + .id_table = b43_ssb_tbl, + .probe = b43_probe, + .remove = b43_remove, +}; + +static void b43_print_driverinfo(void) +{ + const char *feat_pci = "", *feat_pcmcia = "", *feat_nphy = "", + *feat_leds = "", *feat_sdio = ""; + +#ifdef CONFIG_B43_PCI_AUTOSELECT + feat_pci = "P"; +#endif +#ifdef CONFIG_B43_PCMCIA + feat_pcmcia = "M"; +#endif +#ifdef CONFIG_B43_NPHY + feat_nphy = "N"; +#endif +#ifdef CONFIG_B43_LEDS + feat_leds = "L"; +#endif +#ifdef CONFIG_B43_SDIO + feat_sdio = "S"; +#endif + printk(KERN_INFO "Broadcom 43xx driver loaded " + "[ Features: %s%s%s%s%s, Firmware-ID: " + B43_SUPPORTED_FIRMWARE_ID " ]\n", + feat_pci, feat_pcmcia, feat_nphy, + feat_leds, feat_sdio); +} + +static int __init b43_init(void) +{ + int err; + + b43_debugfs_init(); + err = b43_pcmcia_init(); + if (err) + goto err_dfs_exit; + err = b43_sdio_init(); + if (err) + goto err_pcmcia_exit; + err = ssb_driver_register(&b43_ssb_driver); + if (err) + goto err_sdio_exit; + b43_print_driverinfo(); + + return err; + +err_sdio_exit: + b43_sdio_exit(); +err_pcmcia_exit: + b43_pcmcia_exit(); +err_dfs_exit: + b43_debugfs_exit(); + return err; +} + +static void __exit b43_exit(void) +{ + ssb_driver_unregister(&b43_ssb_driver); + b43_sdio_exit(); + b43_pcmcia_exit(); + b43_debugfs_exit(); +} + +module_init(b43_init) +module_exit(b43_exit) diff --git a/drivers/net/wireless/b43/main.c.orig b/drivers/net/wireless/b43/main.c.orig new file mode 100644 index 0000000..1521b1e --- /dev/null +++ b/drivers/net/wireless/b43/main.c.orig @@ -0,0 +1,5095 @@ +/* + + Broadcom B43 wireless driver + + Copyright (c) 2005 Martin Langer + Copyright (c) 2005 Stefano Brivio + Copyright (c) 2005-2009 Michael Buesch + Copyright (c) 2005 Danny van Dyk + Copyright (c) 2005 Andreas Jaggi + + SDIO support + Copyright (c) 2009 Albert Herranz + + Some parts of the code in this file are derived from the ipw2200 + driver Copyright(c) 2003 - 2004 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "b43.h" +#include "main.h" +#include "debugfs.h" +#include "phy_common.h" +#include "phy_g.h" +#include "phy_n.h" +#include "dma.h" +#include "pio.h" +#include "sysfs.h" +#include "xmit.h" +#include "lo.h" +#include "pcmcia.h" +#include "sdio.h" +#include + +MODULE_DESCRIPTION("Broadcom B43 wireless driver"); +MODULE_AUTHOR("Martin Langer"); +MODULE_AUTHOR("Stefano Brivio"); +MODULE_AUTHOR("Michael Buesch"); +MODULE_AUTHOR("Gábor Stefanik"); +MODULE_LICENSE("GPL"); + +MODULE_FIRMWARE(B43_SUPPORTED_FIRMWARE_ID); +MODULE_FIRMWARE("b43/ucode11.fw"); +MODULE_FIRMWARE("b43/ucode13.fw"); +MODULE_FIRMWARE("b43/ucode14.fw"); +MODULE_FIRMWARE("b43/ucode15.fw"); +MODULE_FIRMWARE("b43/ucode5.fw"); +MODULE_FIRMWARE("b43/ucode9.fw"); + +static int modparam_bad_frames_preempt; +module_param_named(bad_frames_preempt, modparam_bad_frames_preempt, int, 0444); +MODULE_PARM_DESC(bad_frames_preempt, + "enable(1) / disable(0) Bad Frames Preemption"); + +static char modparam_fwpostfix[16]; +module_param_string(fwpostfix, modparam_fwpostfix, 16, 0444); +MODULE_PARM_DESC(fwpostfix, "Postfix for the .fw files to load."); + +static int modparam_hwpctl; +module_param_named(hwpctl, modparam_hwpctl, int, 0444); +MODULE_PARM_DESC(hwpctl, "Enable hardware-side power control (default off)"); + +static int modparam_nohwcrypt; +module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444); +MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); + +static int modparam_hwtkip; +module_param_named(hwtkip, modparam_hwtkip, int, 0444); +MODULE_PARM_DESC(hwtkip, "Enable hardware tkip."); + +static int modparam_qos = 1; +module_param_named(qos, modparam_qos, int, 0444); +MODULE_PARM_DESC(qos, "Enable QOS support (default on)"); + +static int modparam_btcoex = 1; +module_param_named(btcoex, modparam_btcoex, int, 0444); +MODULE_PARM_DESC(btcoex, "Enable Bluetooth coexistence (default on)"); + +int b43_modparam_verbose = B43_VERBOSITY_DEFAULT; +module_param_named(verbose, b43_modparam_verbose, int, 0644); +MODULE_PARM_DESC(verbose, "Log message verbosity: 0=error, 1=warn, 2=info(default), 3=debug"); + +int b43_modparam_pio = B43_PIO_DEFAULT; +module_param_named(pio, b43_modparam_pio, int, 0644); +MODULE_PARM_DESC(pio, "Use PIO accesses by default: 0=DMA, 1=PIO"); + +static const struct ssb_device_id b43_ssb_tbl[] = { + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 5), + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 6), + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 7), + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 9), + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 10), + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 11), + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 12), + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 13), + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 15), + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 16), + SSB_DEVTABLE_END +}; + +MODULE_DEVICE_TABLE(ssb, b43_ssb_tbl); + +/* Channel and ratetables are shared for all devices. + * They can't be const, because ieee80211 puts some precalculated + * data in there. This data is the same for all devices, so we don't + * get concurrency issues */ +#define RATETAB_ENT(_rateid, _flags) \ + { \ + .bitrate = B43_RATE_TO_BASE100KBPS(_rateid), \ + .hw_value = (_rateid), \ + .flags = (_flags), \ + } + +/* + * NOTE: When changing this, sync with xmit.c's + * b43_plcp_get_bitrate_idx_* functions! + */ +static struct ieee80211_rate __b43_ratetable[] = { + RATETAB_ENT(B43_CCK_RATE_1MB, 0), + RATETAB_ENT(B43_CCK_RATE_2MB, IEEE80211_RATE_SHORT_PREAMBLE), + RATETAB_ENT(B43_CCK_RATE_5MB, IEEE80211_RATE_SHORT_PREAMBLE), + RATETAB_ENT(B43_CCK_RATE_11MB, IEEE80211_RATE_SHORT_PREAMBLE), + RATETAB_ENT(B43_OFDM_RATE_6MB, 0), + RATETAB_ENT(B43_OFDM_RATE_9MB, 0), + RATETAB_ENT(B43_OFDM_RATE_12MB, 0), + RATETAB_ENT(B43_OFDM_RATE_18MB, 0), + RATETAB_ENT(B43_OFDM_RATE_24MB, 0), + RATETAB_ENT(B43_OFDM_RATE_36MB, 0), + RATETAB_ENT(B43_OFDM_RATE_48MB, 0), + RATETAB_ENT(B43_OFDM_RATE_54MB, 0), +}; + +#define b43_a_ratetable (__b43_ratetable + 4) +#define b43_a_ratetable_size 8 +#define b43_b_ratetable (__b43_ratetable + 0) +#define b43_b_ratetable_size 4 +#define b43_g_ratetable (__b43_ratetable + 0) +#define b43_g_ratetable_size 12 + +#define CHAN4G(_channel, _freq, _flags) { \ + .band = IEEE80211_BAND_2GHZ, \ + .center_freq = (_freq), \ + .hw_value = (_channel), \ + .flags = (_flags), \ + .max_antenna_gain = 0, \ + .max_power = 30, \ +} +static struct ieee80211_channel b43_2ghz_chantable[] = { + CHAN4G(1, 2412, 0), + CHAN4G(2, 2417, 0), + CHAN4G(3, 2422, 0), + CHAN4G(4, 2427, 0), + CHAN4G(5, 2432, 0), + CHAN4G(6, 2437, 0), + CHAN4G(7, 2442, 0), + CHAN4G(8, 2447, 0), + CHAN4G(9, 2452, 0), + CHAN4G(10, 2457, 0), + CHAN4G(11, 2462, 0), + CHAN4G(12, 2467, 0), + CHAN4G(13, 2472, 0), + CHAN4G(14, 2484, 0), +}; +#undef CHAN4G + +#define CHAN5G(_channel, _flags) { \ + .band = IEEE80211_BAND_5GHZ, \ + .center_freq = 5000 + (5 * (_channel)), \ + .hw_value = (_channel), \ + .flags = (_flags), \ + .max_antenna_gain = 0, \ + .max_power = 30, \ +} +static struct ieee80211_channel b43_5ghz_nphy_chantable[] = { + CHAN5G(32, 0), CHAN5G(34, 0), + CHAN5G(36, 0), CHAN5G(38, 0), + CHAN5G(40, 0), CHAN5G(42, 0), + CHAN5G(44, 0), CHAN5G(46, 0), + CHAN5G(48, 0), CHAN5G(50, 0), + CHAN5G(52, 0), CHAN5G(54, 0), + CHAN5G(56, 0), CHAN5G(58, 0), + CHAN5G(60, 0), CHAN5G(62, 0), + CHAN5G(64, 0), CHAN5G(66, 0), + CHAN5G(68, 0), CHAN5G(70, 0), + CHAN5G(72, 0), CHAN5G(74, 0), + CHAN5G(76, 0), CHAN5G(78, 0), + CHAN5G(80, 0), CHAN5G(82, 0), + CHAN5G(84, 0), CHAN5G(86, 0), + CHAN5G(88, 0), CHAN5G(90, 0), + CHAN5G(92, 0), CHAN5G(94, 0), + CHAN5G(96, 0), CHAN5G(98, 0), + CHAN5G(100, 0), CHAN5G(102, 0), + CHAN5G(104, 0), CHAN5G(106, 0), + CHAN5G(108, 0), CHAN5G(110, 0), + CHAN5G(112, 0), CHAN5G(114, 0), + CHAN5G(116, 0), CHAN5G(118, 0), + CHAN5G(120, 0), CHAN5G(122, 0), + CHAN5G(124, 0), CHAN5G(126, 0), + CHAN5G(128, 0), CHAN5G(130, 0), + CHAN5G(132, 0), CHAN5G(134, 0), + CHAN5G(136, 0), CHAN5G(138, 0), + CHAN5G(140, 0), CHAN5G(142, 0), + CHAN5G(144, 0), CHAN5G(145, 0), + CHAN5G(146, 0), CHAN5G(147, 0), + CHAN5G(148, 0), CHAN5G(149, 0), + CHAN5G(150, 0), CHAN5G(151, 0), + CHAN5G(152, 0), CHAN5G(153, 0), + CHAN5G(154, 0), CHAN5G(155, 0), + CHAN5G(156, 0), CHAN5G(157, 0), + CHAN5G(158, 0), CHAN5G(159, 0), + CHAN5G(160, 0), CHAN5G(161, 0), + CHAN5G(162, 0), CHAN5G(163, 0), + CHAN5G(164, 0), CHAN5G(165, 0), + CHAN5G(166, 0), CHAN5G(168, 0), + CHAN5G(170, 0), CHAN5G(172, 0), + CHAN5G(174, 0), CHAN5G(176, 0), + CHAN5G(178, 0), CHAN5G(180, 0), + CHAN5G(182, 0), CHAN5G(184, 0), + CHAN5G(186, 0), CHAN5G(188, 0), + CHAN5G(190, 0), CHAN5G(192, 0), + CHAN5G(194, 0), CHAN5G(196, 0), + CHAN5G(198, 0), CHAN5G(200, 0), + CHAN5G(202, 0), CHAN5G(204, 0), + CHAN5G(206, 0), CHAN5G(208, 0), + CHAN5G(210, 0), CHAN5G(212, 0), + CHAN5G(214, 0), CHAN5G(216, 0), + CHAN5G(218, 0), CHAN5G(220, 0), + CHAN5G(222, 0), CHAN5G(224, 0), + CHAN5G(226, 0), CHAN5G(228, 0), +}; + +static struct ieee80211_channel b43_5ghz_aphy_chantable[] = { + CHAN5G(34, 0), CHAN5G(36, 0), + CHAN5G(38, 0), CHAN5G(40, 0), + CHAN5G(42, 0), CHAN5G(44, 0), + CHAN5G(46, 0), CHAN5G(48, 0), + CHAN5G(52, 0), CHAN5G(56, 0), + CHAN5G(60, 0), CHAN5G(64, 0), + CHAN5G(100, 0), CHAN5G(104, 0), + CHAN5G(108, 0), CHAN5G(112, 0), + CHAN5G(116, 0), CHAN5G(120, 0), + CHAN5G(124, 0), CHAN5G(128, 0), + CHAN5G(132, 0), CHAN5G(136, 0), + CHAN5G(140, 0), CHAN5G(149, 0), + CHAN5G(153, 0), CHAN5G(157, 0), + CHAN5G(161, 0), CHAN5G(165, 0), + CHAN5G(184, 0), CHAN5G(188, 0), + CHAN5G(192, 0), CHAN5G(196, 0), + CHAN5G(200, 0), CHAN5G(204, 0), + CHAN5G(208, 0), CHAN5G(212, 0), + CHAN5G(216, 0), +}; +#undef CHAN5G + +static struct ieee80211_supported_band b43_band_5GHz_nphy = { + .band = IEEE80211_BAND_5GHZ, + .channels = b43_5ghz_nphy_chantable, + .n_channels = ARRAY_SIZE(b43_5ghz_nphy_chantable), + .bitrates = b43_a_ratetable, + .n_bitrates = b43_a_ratetable_size, +}; + +static struct ieee80211_supported_band b43_band_5GHz_aphy = { + .band = IEEE80211_BAND_5GHZ, + .channels = b43_5ghz_aphy_chantable, + .n_channels = ARRAY_SIZE(b43_5ghz_aphy_chantable), + .bitrates = b43_a_ratetable, + .n_bitrates = b43_a_ratetable_size, +}; + +static struct ieee80211_supported_band b43_band_2GHz = { + .band = IEEE80211_BAND_2GHZ, + .channels = b43_2ghz_chantable, + .n_channels = ARRAY_SIZE(b43_2ghz_chantable), + .bitrates = b43_g_ratetable, + .n_bitrates = b43_g_ratetable_size, +}; + +static void b43_wireless_core_exit(struct b43_wldev *dev); +static int b43_wireless_core_init(struct b43_wldev *dev); +static struct b43_wldev * b43_wireless_core_stop(struct b43_wldev *dev); +static int b43_wireless_core_start(struct b43_wldev *dev); + +static int b43_ratelimit(struct b43_wl *wl) +{ + if (!wl || !wl->current_dev) + return 1; + if (b43_status(wl->current_dev) < B43_STAT_STARTED) + return 1; + /* We are up and running. + * Ratelimit the messages to avoid DoS over the net. */ + return net_ratelimit(); +} + +void b43info(struct b43_wl *wl, const char *fmt, ...) +{ + va_list args; + + if (b43_modparam_verbose < B43_VERBOSITY_INFO) + return; + if (!b43_ratelimit(wl)) + return; + va_start(args, fmt); + printk(KERN_INFO "b43-%s: ", + (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); + vprintk(fmt, args); + va_end(args); +} + +void b43err(struct b43_wl *wl, const char *fmt, ...) +{ + va_list args; + + if (b43_modparam_verbose < B43_VERBOSITY_ERROR) + return; + if (!b43_ratelimit(wl)) + return; + va_start(args, fmt); + printk(KERN_ERR "b43-%s ERROR: ", + (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); + vprintk(fmt, args); + va_end(args); +} + +void b43warn(struct b43_wl *wl, const char *fmt, ...) +{ + va_list args; + + if (b43_modparam_verbose < B43_VERBOSITY_WARN) + return; + if (!b43_ratelimit(wl)) + return; + va_start(args, fmt); + printk(KERN_WARNING "b43-%s warning: ", + (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); + vprintk(fmt, args); + va_end(args); +} + +void b43dbg(struct b43_wl *wl, const char *fmt, ...) +{ + va_list args; + + if (b43_modparam_verbose < B43_VERBOSITY_DEBUG) + return; + va_start(args, fmt); + printk(KERN_DEBUG "b43-%s debug: ", + (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); + vprintk(fmt, args); + va_end(args); +} + +static void b43_ram_write(struct b43_wldev *dev, u16 offset, u32 val) +{ + u32 macctl; + + B43_WARN_ON(offset % 4 != 0); + + macctl = b43_read32(dev, B43_MMIO_MACCTL); + if (macctl & B43_MACCTL_BE) + val = swab32(val); + + b43_write32(dev, B43_MMIO_RAM_CONTROL, offset); + mmiowb(); + b43_write32(dev, B43_MMIO_RAM_DATA, val); +} + +static inline void b43_shm_control_word(struct b43_wldev *dev, + u16 routing, u16 offset) +{ + u32 control; + + /* "offset" is the WORD offset. */ + control = routing; + control <<= 16; + control |= offset; + b43_write32(dev, B43_MMIO_SHM_CONTROL, control); +} + +u32 b43_shm_read32(struct b43_wldev *dev, u16 routing, u16 offset) +{ + u32 ret; + + if (routing == B43_SHM_SHARED) { + B43_WARN_ON(offset & 0x0001); + if (offset & 0x0003) { + /* Unaligned access */ + b43_shm_control_word(dev, routing, offset >> 2); + ret = b43_read16(dev, B43_MMIO_SHM_DATA_UNALIGNED); + b43_shm_control_word(dev, routing, (offset >> 2) + 1); + ret |= ((u32)b43_read16(dev, B43_MMIO_SHM_DATA)) << 16; + + goto out; + } + offset >>= 2; + } + b43_shm_control_word(dev, routing, offset); + ret = b43_read32(dev, B43_MMIO_SHM_DATA); +out: + return ret; +} + +u16 b43_shm_read16(struct b43_wldev *dev, u16 routing, u16 offset) +{ + u16 ret; + + if (routing == B43_SHM_SHARED) { + B43_WARN_ON(offset & 0x0001); + if (offset & 0x0003) { + /* Unaligned access */ + b43_shm_control_word(dev, routing, offset >> 2); + ret = b43_read16(dev, B43_MMIO_SHM_DATA_UNALIGNED); + + goto out; + } + offset >>= 2; + } + b43_shm_control_word(dev, routing, offset); + ret = b43_read16(dev, B43_MMIO_SHM_DATA); +out: + return ret; +} + +void b43_shm_write32(struct b43_wldev *dev, u16 routing, u16 offset, u32 value) +{ + if (routing == B43_SHM_SHARED) { + B43_WARN_ON(offset & 0x0001); + if (offset & 0x0003) { + /* Unaligned access */ + b43_shm_control_word(dev, routing, offset >> 2); + b43_write16(dev, B43_MMIO_SHM_DATA_UNALIGNED, + value & 0xFFFF); + b43_shm_control_word(dev, routing, (offset >> 2) + 1); + b43_write16(dev, B43_MMIO_SHM_DATA, + (value >> 16) & 0xFFFF); + return; + } + offset >>= 2; + } + b43_shm_control_word(dev, routing, offset); + b43_write32(dev, B43_MMIO_SHM_DATA, value); +} + +void b43_shm_write16(struct b43_wldev *dev, u16 routing, u16 offset, u16 value) +{ + if (routing == B43_SHM_SHARED) { + B43_WARN_ON(offset & 0x0001); + if (offset & 0x0003) { + /* Unaligned access */ + b43_shm_control_word(dev, routing, offset >> 2); + b43_write16(dev, B43_MMIO_SHM_DATA_UNALIGNED, value); + return; + } + offset >>= 2; + } + b43_shm_control_word(dev, routing, offset); + b43_write16(dev, B43_MMIO_SHM_DATA, value); +} + +/* Read HostFlags */ +u64 b43_hf_read(struct b43_wldev *dev) +{ + u64 ret; + + ret = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFHI); + ret <<= 16; + ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFMI); + ret <<= 16; + ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO); + + return ret; +} + +/* Write HostFlags */ +void b43_hf_write(struct b43_wldev *dev, u64 value) +{ + u16 lo, mi, hi; + + lo = (value & 0x00000000FFFFULL); + mi = (value & 0x0000FFFF0000ULL) >> 16; + hi = (value & 0xFFFF00000000ULL) >> 32; + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO, lo); + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFMI, mi); + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFHI, hi); +} + +/* Read the firmware capabilities bitmask (Opensource firmware only) */ +static u16 b43_fwcapa_read(struct b43_wldev *dev) +{ + B43_WARN_ON(!dev->fw.opensource); + return b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_FWCAPA); +} + +void b43_tsf_read(struct b43_wldev *dev, u64 *tsf) +{ + u32 low, high; + + B43_WARN_ON(dev->dev->id.revision < 3); + + /* The hardware guarantees us an atomic read, if we + * read the low register first. */ + low = b43_read32(dev, B43_MMIO_REV3PLUS_TSF_LOW); + high = b43_read32(dev, B43_MMIO_REV3PLUS_TSF_HIGH); + + *tsf = high; + *tsf <<= 32; + *tsf |= low; +} + +static void b43_time_lock(struct b43_wldev *dev) +{ + u32 macctl; + + macctl = b43_read32(dev, B43_MMIO_MACCTL); + macctl |= B43_MACCTL_TBTTHOLD; + b43_write32(dev, B43_MMIO_MACCTL, macctl); + /* Commit the write */ + b43_read32(dev, B43_MMIO_MACCTL); +} + +static void b43_time_unlock(struct b43_wldev *dev) +{ + u32 macctl; + + macctl = b43_read32(dev, B43_MMIO_MACCTL); + macctl &= ~B43_MACCTL_TBTTHOLD; + b43_write32(dev, B43_MMIO_MACCTL, macctl); + /* Commit the write */ + b43_read32(dev, B43_MMIO_MACCTL); +} + +static void b43_tsf_write_locked(struct b43_wldev *dev, u64 tsf) +{ + u32 low, high; + + B43_WARN_ON(dev->dev->id.revision < 3); + + low = tsf; + high = (tsf >> 32); + /* The hardware guarantees us an atomic write, if we + * write the low register first. */ + b43_write32(dev, B43_MMIO_REV3PLUS_TSF_LOW, low); + mmiowb(); + b43_write32(dev, B43_MMIO_REV3PLUS_TSF_HIGH, high); + mmiowb(); +} + +void b43_tsf_write(struct b43_wldev *dev, u64 tsf) +{ + b43_time_lock(dev); + b43_tsf_write_locked(dev, tsf); + b43_time_unlock(dev); +} + +static +void b43_macfilter_set(struct b43_wldev *dev, u16 offset, const u8 *mac) +{ + static const u8 zero_addr[ETH_ALEN] = { 0 }; + u16 data; + + if (!mac) + mac = zero_addr; + + offset |= 0x0020; + b43_write16(dev, B43_MMIO_MACFILTER_CONTROL, offset); + + data = mac[0]; + data |= mac[1] << 8; + b43_write16(dev, B43_MMIO_MACFILTER_DATA, data); + data = mac[2]; + data |= mac[3] << 8; + b43_write16(dev, B43_MMIO_MACFILTER_DATA, data); + data = mac[4]; + data |= mac[5] << 8; + b43_write16(dev, B43_MMIO_MACFILTER_DATA, data); +} + +static void b43_write_mac_bssid_templates(struct b43_wldev *dev) +{ + const u8 *mac; + const u8 *bssid; + u8 mac_bssid[ETH_ALEN * 2]; + int i; + u32 tmp; + + bssid = dev->wl->bssid; + mac = dev->wl->mac_addr; + + b43_macfilter_set(dev, B43_MACFILTER_BSSID, bssid); + + memcpy(mac_bssid, mac, ETH_ALEN); + memcpy(mac_bssid + ETH_ALEN, bssid, ETH_ALEN); + + /* Write our MAC address and BSSID to template ram */ + for (i = 0; i < ARRAY_SIZE(mac_bssid); i += sizeof(u32)) { + tmp = (u32) (mac_bssid[i + 0]); + tmp |= (u32) (mac_bssid[i + 1]) << 8; + tmp |= (u32) (mac_bssid[i + 2]) << 16; + tmp |= (u32) (mac_bssid[i + 3]) << 24; + b43_ram_write(dev, 0x20 + i, tmp); + } +} + +static void b43_upload_card_macaddress(struct b43_wldev *dev) +{ + b43_write_mac_bssid_templates(dev); + b43_macfilter_set(dev, B43_MACFILTER_SELF, dev->wl->mac_addr); +} + +static void b43_set_slot_time(struct b43_wldev *dev, u16 slot_time) +{ + /* slot_time is in usec. */ + /* This test used to exit for all but a G PHY. */ + if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) + return; + b43_write16(dev, B43_MMIO_IFSSLOT, 510 + slot_time); + /* Shared memory location 0x0010 is the slot time and should be + * set to slot_time; however, this register is initially 0 and changing + * the value adversely affects the transmit rate for BCM4311 + * devices. Until this behavior is unterstood, delete this step + * + * b43_shm_write16(dev, B43_SHM_SHARED, 0x0010, slot_time); + */ +} + +static void b43_short_slot_timing_enable(struct b43_wldev *dev) +{ + b43_set_slot_time(dev, 9); +} + +static void b43_short_slot_timing_disable(struct b43_wldev *dev) +{ + b43_set_slot_time(dev, 20); +} + +/* DummyTransmission function, as documented on + * http://bcm-v4.sipsolutions.net/802.11/DummyTransmission + */ +void b43_dummy_transmission(struct b43_wldev *dev, bool ofdm, bool pa_on) +{ + struct b43_phy *phy = &dev->phy; + unsigned int i, max_loop; + u16 value; + u32 buffer[5] = { + 0x00000000, + 0x00D40000, + 0x00000000, + 0x01000000, + 0x00000000, + }; + + if (ofdm) { + max_loop = 0x1E; + buffer[0] = 0x000201CC; + } else { + max_loop = 0xFA; + buffer[0] = 0x000B846E; + } + + for (i = 0; i < 5; i++) + b43_ram_write(dev, i * 4, buffer[i]); + + b43_write16(dev, 0x0568, 0x0000); + if (dev->dev->id.revision < 11) + b43_write16(dev, 0x07C0, 0x0000); + else + b43_write16(dev, 0x07C0, 0x0100); + value = (ofdm ? 0x41 : 0x40); + b43_write16(dev, 0x050C, value); + if ((phy->type == B43_PHYTYPE_N) || (phy->type == B43_PHYTYPE_LP)) + b43_write16(dev, 0x0514, 0x1A02); + b43_write16(dev, 0x0508, 0x0000); + b43_write16(dev, 0x050A, 0x0000); + b43_write16(dev, 0x054C, 0x0000); + b43_write16(dev, 0x056A, 0x0014); + b43_write16(dev, 0x0568, 0x0826); + b43_write16(dev, 0x0500, 0x0000); + if (!pa_on && (phy->type == B43_PHYTYPE_N)) { + //SPEC TODO + } + + switch (phy->type) { + case B43_PHYTYPE_N: + b43_write16(dev, 0x0502, 0x00D0); + break; + case B43_PHYTYPE_LP: + b43_write16(dev, 0x0502, 0x0050); + break; + default: + b43_write16(dev, 0x0502, 0x0030); + } + + if (phy->radio_ver == 0x2050 && phy->radio_rev <= 0x5) + b43_radio_write16(dev, 0x0051, 0x0017); + for (i = 0x00; i < max_loop; i++) { + value = b43_read16(dev, 0x050E); + if (value & 0x0080) + break; + udelay(10); + } + for (i = 0x00; i < 0x0A; i++) { + value = b43_read16(dev, 0x050E); + if (value & 0x0400) + break; + udelay(10); + } + for (i = 0x00; i < 0x19; i++) { + value = b43_read16(dev, 0x0690); + if (!(value & 0x0100)) + break; + udelay(10); + } + if (phy->radio_ver == 0x2050 && phy->radio_rev <= 0x5) + b43_radio_write16(dev, 0x0051, 0x0037); +} + +static void key_write(struct b43_wldev *dev, + u8 index, u8 algorithm, const u8 *key) +{ + unsigned int i; + u32 offset; + u16 value; + u16 kidx; + + /* Key index/algo block */ + kidx = b43_kidx_to_fw(dev, index); + value = ((kidx << 4) | algorithm); + b43_shm_write16(dev, B43_SHM_SHARED, + B43_SHM_SH_KEYIDXBLOCK + (kidx * 2), value); + + /* Write the key to the Key Table Pointer offset */ + offset = dev->ktp + (index * B43_SEC_KEYSIZE); + for (i = 0; i < B43_SEC_KEYSIZE; i += 2) { + value = key[i]; + value |= (u16) (key[i + 1]) << 8; + b43_shm_write16(dev, B43_SHM_SHARED, offset + i, value); + } +} + +static void keymac_write(struct b43_wldev *dev, u8 index, const u8 *addr) +{ + u32 addrtmp[2] = { 0, 0, }; + u8 pairwise_keys_start = B43_NR_GROUP_KEYS * 2; + + if (b43_new_kidx_api(dev)) + pairwise_keys_start = B43_NR_GROUP_KEYS; + + B43_WARN_ON(index < pairwise_keys_start); + /* We have four default TX keys and possibly four default RX keys. + * Physical mac 0 is mapped to physical key 4 or 8, depending + * on the firmware version. + * So we must adjust the index here. + */ + index -= pairwise_keys_start; + B43_WARN_ON(index >= B43_NR_PAIRWISE_KEYS); + + if (addr) { + addrtmp[0] = addr[0]; + addrtmp[0] |= ((u32) (addr[1]) << 8); + addrtmp[0] |= ((u32) (addr[2]) << 16); + addrtmp[0] |= ((u32) (addr[3]) << 24); + addrtmp[1] = addr[4]; + addrtmp[1] |= ((u32) (addr[5]) << 8); + } + + /* Receive match transmitter address (RCMTA) mechanism */ + b43_shm_write32(dev, B43_SHM_RCMTA, + (index * 2) + 0, addrtmp[0]); + b43_shm_write16(dev, B43_SHM_RCMTA, + (index * 2) + 1, addrtmp[1]); +} + +/* The ucode will use phase1 key with TEK key to decrypt rx packets. + * When a packet is received, the iv32 is checked. + * - if it doesn't the packet is returned without modification (and software + * decryption can be done). That's what happen when iv16 wrap. + * - if it does, the rc4 key is computed, and decryption is tried. + * Either it will success and B43_RX_MAC_DEC is returned, + * either it fails and B43_RX_MAC_DEC|B43_RX_MAC_DECERR is returned + * and the packet is not usable (it got modified by the ucode). + * So in order to never have B43_RX_MAC_DECERR, we should provide + * a iv32 and phase1key that match. Because we drop packets in case of + * B43_RX_MAC_DECERR, if we have a correct iv32 but a wrong phase1key, all + * packets will be lost without higher layer knowing (ie no resync possible + * until next wrap). + * + * NOTE : this should support 50 key like RCMTA because + * (B43_SHM_SH_KEYIDXBLOCK - B43_SHM_SH_TKIPTSCTTAK)/14 = 50 + */ +static void rx_tkip_phase1_write(struct b43_wldev *dev, u8 index, u32 iv32, + u16 *phase1key) +{ + unsigned int i; + u32 offset; + u8 pairwise_keys_start = B43_NR_GROUP_KEYS * 2; + + if (!modparam_hwtkip) + return; + + if (b43_new_kidx_api(dev)) + pairwise_keys_start = B43_NR_GROUP_KEYS; + + B43_WARN_ON(index < pairwise_keys_start); + /* We have four default TX keys and possibly four default RX keys. + * Physical mac 0 is mapped to physical key 4 or 8, depending + * on the firmware version. + * So we must adjust the index here. + */ + index -= pairwise_keys_start; + B43_WARN_ON(index >= B43_NR_PAIRWISE_KEYS); + + if (b43_debug(dev, B43_DBG_KEYS)) { + b43dbg(dev->wl, "rx_tkip_phase1_write : idx 0x%x, iv32 0x%x\n", + index, iv32); + } + /* Write the key to the RX tkip shared mem */ + offset = B43_SHM_SH_TKIPTSCTTAK + index * (10 + 4); + for (i = 0; i < 10; i += 2) { + b43_shm_write16(dev, B43_SHM_SHARED, offset + i, + phase1key ? phase1key[i / 2] : 0); + } + b43_shm_write16(dev, B43_SHM_SHARED, offset + i, iv32); + b43_shm_write16(dev, B43_SHM_SHARED, offset + i + 2, iv32 >> 16); +} + +static void b43_op_update_tkip_key(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta, + u32 iv32, u16 *phase1key) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev; + int index = keyconf->hw_key_idx; + + if (B43_WARN_ON(!modparam_hwtkip)) + return; + + /* This is only called from the RX path through mac80211, where + * our mutex is already locked. */ + B43_WARN_ON(!mutex_is_locked(&wl->mutex)); + dev = wl->current_dev; + B43_WARN_ON(!dev || b43_status(dev) < B43_STAT_INITIALIZED); + + keymac_write(dev, index, NULL); /* First zero out mac to avoid race */ + + rx_tkip_phase1_write(dev, index, iv32, phase1key); + /* only pairwise TKIP keys are supported right now */ + if (WARN_ON(!sta)) + return; + keymac_write(dev, index, sta->addr); +} + +static void do_key_write(struct b43_wldev *dev, + u8 index, u8 algorithm, + const u8 *key, size_t key_len, const u8 *mac_addr) +{ + u8 buf[B43_SEC_KEYSIZE] = { 0, }; + u8 pairwise_keys_start = B43_NR_GROUP_KEYS * 2; + + if (b43_new_kidx_api(dev)) + pairwise_keys_start = B43_NR_GROUP_KEYS; + + B43_WARN_ON(index >= ARRAY_SIZE(dev->key)); + B43_WARN_ON(key_len > B43_SEC_KEYSIZE); + + if (index >= pairwise_keys_start) + keymac_write(dev, index, NULL); /* First zero out mac. */ + if (algorithm == B43_SEC_ALGO_TKIP) { + /* + * We should provide an initial iv32, phase1key pair. + * We could start with iv32=0 and compute the corresponding + * phase1key, but this means calling ieee80211_get_tkip_key + * with a fake skb (or export other tkip function). + * Because we are lazy we hope iv32 won't start with + * 0xffffffff and let's b43_op_update_tkip_key provide a + * correct pair. + */ + rx_tkip_phase1_write(dev, index, 0xffffffff, (u16*)buf); + } else if (index >= pairwise_keys_start) /* clear it */ + rx_tkip_phase1_write(dev, index, 0, NULL); + if (key) + memcpy(buf, key, key_len); + key_write(dev, index, algorithm, buf); + if (index >= pairwise_keys_start) + keymac_write(dev, index, mac_addr); + + dev->key[index].algorithm = algorithm; +} + +static int b43_key_write(struct b43_wldev *dev, + int index, u8 algorithm, + const u8 *key, size_t key_len, + const u8 *mac_addr, + struct ieee80211_key_conf *keyconf) +{ + int i; + int pairwise_keys_start; + + /* For ALG_TKIP the key is encoded as a 256-bit (32 byte) data block: + * - Temporal Encryption Key (128 bits) + * - Temporal Authenticator Tx MIC Key (64 bits) + * - Temporal Authenticator Rx MIC Key (64 bits) + * + * Hardware only store TEK + */ + if (algorithm == B43_SEC_ALGO_TKIP && key_len == 32) + key_len = 16; + if (key_len > B43_SEC_KEYSIZE) + return -EINVAL; + for (i = 0; i < ARRAY_SIZE(dev->key); i++) { + /* Check that we don't already have this key. */ + B43_WARN_ON(dev->key[i].keyconf == keyconf); + } + if (index < 0) { + /* Pairwise key. Get an empty slot for the key. */ + if (b43_new_kidx_api(dev)) + pairwise_keys_start = B43_NR_GROUP_KEYS; + else + pairwise_keys_start = B43_NR_GROUP_KEYS * 2; + for (i = pairwise_keys_start; + i < pairwise_keys_start + B43_NR_PAIRWISE_KEYS; + i++) { + B43_WARN_ON(i >= ARRAY_SIZE(dev->key)); + if (!dev->key[i].keyconf) { + /* found empty */ + index = i; + break; + } + } + if (index < 0) { + b43warn(dev->wl, "Out of hardware key memory\n"); + return -ENOSPC; + } + } else + B43_WARN_ON(index > 3); + + do_key_write(dev, index, algorithm, key, key_len, mac_addr); + if ((index <= 3) && !b43_new_kidx_api(dev)) { + /* Default RX key */ + B43_WARN_ON(mac_addr); + do_key_write(dev, index + 4, algorithm, key, key_len, NULL); + } + keyconf->hw_key_idx = index; + dev->key[index].keyconf = keyconf; + + return 0; +} + +static int b43_key_clear(struct b43_wldev *dev, int index) +{ + if (B43_WARN_ON((index < 0) || (index >= ARRAY_SIZE(dev->key)))) + return -EINVAL; + do_key_write(dev, index, B43_SEC_ALGO_NONE, + NULL, B43_SEC_KEYSIZE, NULL); + if ((index <= 3) && !b43_new_kidx_api(dev)) { + do_key_write(dev, index + 4, B43_SEC_ALGO_NONE, + NULL, B43_SEC_KEYSIZE, NULL); + } + dev->key[index].keyconf = NULL; + + return 0; +} + +static void b43_clear_keys(struct b43_wldev *dev) +{ + int i, count; + + if (b43_new_kidx_api(dev)) + count = B43_NR_GROUP_KEYS + B43_NR_PAIRWISE_KEYS; + else + count = B43_NR_GROUP_KEYS * 2 + B43_NR_PAIRWISE_KEYS; + for (i = 0; i < count; i++) + b43_key_clear(dev, i); +} + +static void b43_dump_keymemory(struct b43_wldev *dev) +{ + unsigned int i, index, count, offset, pairwise_keys_start; + u8 mac[ETH_ALEN]; + u16 algo; + u32 rcmta0; + u16 rcmta1; + u64 hf; + struct b43_key *key; + + if (!b43_debug(dev, B43_DBG_KEYS)) + return; + + hf = b43_hf_read(dev); + b43dbg(dev->wl, "Hardware key memory dump: USEDEFKEYS=%u\n", + !!(hf & B43_HF_USEDEFKEYS)); + if (b43_new_kidx_api(dev)) { + pairwise_keys_start = B43_NR_GROUP_KEYS; + count = B43_NR_GROUP_KEYS + B43_NR_PAIRWISE_KEYS; + } else { + pairwise_keys_start = B43_NR_GROUP_KEYS * 2; + count = B43_NR_GROUP_KEYS * 2 + B43_NR_PAIRWISE_KEYS; + } + for (index = 0; index < count; index++) { + key = &(dev->key[index]); + printk(KERN_DEBUG "Key slot %02u: %s", + index, (key->keyconf == NULL) ? " " : "*"); + offset = dev->ktp + (index * B43_SEC_KEYSIZE); + for (i = 0; i < B43_SEC_KEYSIZE; i += 2) { + u16 tmp = b43_shm_read16(dev, B43_SHM_SHARED, offset + i); + printk("%02X%02X", (tmp & 0xFF), ((tmp >> 8) & 0xFF)); + } + + algo = b43_shm_read16(dev, B43_SHM_SHARED, + B43_SHM_SH_KEYIDXBLOCK + (index * 2)); + printk(" Algo: %04X/%02X", algo, key->algorithm); + + if (index >= pairwise_keys_start) { + if (key->algorithm == B43_SEC_ALGO_TKIP) { + printk(" TKIP: "); + offset = B43_SHM_SH_TKIPTSCTTAK + (index - 4) * (10 + 4); + for (i = 0; i < 14; i += 2) { + u16 tmp = b43_shm_read16(dev, B43_SHM_SHARED, offset + i); + printk("%02X%02X", (tmp & 0xFF), ((tmp >> 8) & 0xFF)); + } + } + rcmta0 = b43_shm_read32(dev, B43_SHM_RCMTA, + ((index - pairwise_keys_start) * 2) + 0); + rcmta1 = b43_shm_read16(dev, B43_SHM_RCMTA, + ((index - pairwise_keys_start) * 2) + 1); + *((__le32 *)(&mac[0])) = cpu_to_le32(rcmta0); + *((__le16 *)(&mac[4])) = cpu_to_le16(rcmta1); + printk(" MAC: %pM", mac); + } else + printk(" DEFAULT KEY"); + printk("\n"); + } +} + +void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags) +{ + u32 macctl; + u16 ucstat; + bool hwps; + bool awake; + int i; + + B43_WARN_ON((ps_flags & B43_PS_ENABLED) && + (ps_flags & B43_PS_DISABLED)); + B43_WARN_ON((ps_flags & B43_PS_AWAKE) && (ps_flags & B43_PS_ASLEEP)); + + if (ps_flags & B43_PS_ENABLED) { + hwps = 1; + } else if (ps_flags & B43_PS_DISABLED) { + hwps = 0; + } else { + //TODO: If powersave is not off and FIXME is not set and we are not in adhoc + // and thus is not an AP and we are associated, set bit 25 + } + if (ps_flags & B43_PS_AWAKE) { + awake = 1; + } else if (ps_flags & B43_PS_ASLEEP) { + awake = 0; + } else { + //TODO: If the device is awake or this is an AP, or we are scanning, or FIXME, + // or we are associated, or FIXME, or the latest PS-Poll packet sent was + // successful, set bit26 + } + +/* FIXME: For now we force awake-on and hwps-off */ + hwps = 0; + awake = 1; + + macctl = b43_read32(dev, B43_MMIO_MACCTL); + if (hwps) + macctl |= B43_MACCTL_HWPS; + else + macctl &= ~B43_MACCTL_HWPS; + if (awake) + macctl |= B43_MACCTL_AWAKE; + else + macctl &= ~B43_MACCTL_AWAKE; + b43_write32(dev, B43_MMIO_MACCTL, macctl); + /* Commit write */ + b43_read32(dev, B43_MMIO_MACCTL); + if (awake && dev->dev->id.revision >= 5) { + /* Wait for the microcode to wake up. */ + for (i = 0; i < 100; i++) { + ucstat = b43_shm_read16(dev, B43_SHM_SHARED, + B43_SHM_SH_UCODESTAT); + if (ucstat != B43_SHM_SH_UCODESTAT_SLEEP) + break; + udelay(10); + } + } +} + +void b43_wireless_core_reset(struct b43_wldev *dev, u32 flags) +{ + u32 tmslow; + u32 macctl; + + flags |= B43_TMSLOW_PHYCLKEN; + flags |= B43_TMSLOW_PHYRESET; + ssb_device_enable(dev->dev, flags); + msleep(2); /* Wait for the PLL to turn on. */ + + /* Now take the PHY out of Reset again */ + tmslow = ssb_read32(dev->dev, SSB_TMSLOW); + tmslow |= SSB_TMSLOW_FGC; + tmslow &= ~B43_TMSLOW_PHYRESET; + ssb_write32(dev->dev, SSB_TMSLOW, tmslow); + ssb_read32(dev->dev, SSB_TMSLOW); /* flush */ + msleep(1); + tmslow &= ~SSB_TMSLOW_FGC; + ssb_write32(dev->dev, SSB_TMSLOW, tmslow); + ssb_read32(dev->dev, SSB_TMSLOW); /* flush */ + msleep(1); + + /* Turn Analog ON, but only if we already know the PHY-type. + * This protects against very early setup where we don't know the + * PHY-type, yet. wireless_core_reset will be called once again later, + * when we know the PHY-type. */ + if (dev->phy.ops) + dev->phy.ops->switch_analog(dev, 1); + + macctl = b43_read32(dev, B43_MMIO_MACCTL); + macctl &= ~B43_MACCTL_GMODE; + if (flags & B43_TMSLOW_GMODE) + macctl |= B43_MACCTL_GMODE; + macctl |= B43_MACCTL_IHR_ENABLED; + b43_write32(dev, B43_MMIO_MACCTL, macctl); +} + +static void handle_irq_transmit_status(struct b43_wldev *dev) +{ + u32 v0, v1; + u16 tmp; + struct b43_txstatus stat; + + while (1) { + v0 = b43_read32(dev, B43_MMIO_XMITSTAT_0); + if (!(v0 & 0x00000001)) + break; + v1 = b43_read32(dev, B43_MMIO_XMITSTAT_1); + + stat.cookie = (v0 >> 16); + stat.seq = (v1 & 0x0000FFFF); + stat.phy_stat = ((v1 & 0x00FF0000) >> 16); + tmp = (v0 & 0x0000FFFF); + stat.frame_count = ((tmp & 0xF000) >> 12); + stat.rts_count = ((tmp & 0x0F00) >> 8); + stat.supp_reason = ((tmp & 0x001C) >> 2); + stat.pm_indicated = !!(tmp & 0x0080); + stat.intermediate = !!(tmp & 0x0040); + stat.for_ampdu = !!(tmp & 0x0020); + stat.acked = !!(tmp & 0x0002); + + b43_handle_txstatus(dev, &stat); + } +} + +static void drain_txstatus_queue(struct b43_wldev *dev) +{ + u32 dummy; + + if (dev->dev->id.revision < 5) + return; + /* Read all entries from the microcode TXstatus FIFO + * and throw them away. + */ + while (1) { + dummy = b43_read32(dev, B43_MMIO_XMITSTAT_0); + if (!(dummy & 0x00000001)) + break; + dummy = b43_read32(dev, B43_MMIO_XMITSTAT_1); + } +} + +static u32 b43_jssi_read(struct b43_wldev *dev) +{ + u32 val = 0; + + val = b43_shm_read16(dev, B43_SHM_SHARED, 0x08A); + val <<= 16; + val |= b43_shm_read16(dev, B43_SHM_SHARED, 0x088); + + return val; +} + +static void b43_jssi_write(struct b43_wldev *dev, u32 jssi) +{ + b43_shm_write16(dev, B43_SHM_SHARED, 0x088, (jssi & 0x0000FFFF)); + b43_shm_write16(dev, B43_SHM_SHARED, 0x08A, (jssi & 0xFFFF0000) >> 16); +} + +static void b43_generate_noise_sample(struct b43_wldev *dev) +{ + b43_jssi_write(dev, 0x7F7F7F7F); + b43_write32(dev, B43_MMIO_MACCMD, + b43_read32(dev, B43_MMIO_MACCMD) | B43_MACCMD_BGNOISE); +} + +static void b43_calculate_link_quality(struct b43_wldev *dev) +{ + /* Top half of Link Quality calculation. */ + + if (dev->phy.type != B43_PHYTYPE_G) + return; + if (dev->noisecalc.calculation_running) + return; + dev->noisecalc.calculation_running = 1; + dev->noisecalc.nr_samples = 0; + + b43_generate_noise_sample(dev); +} + +static void handle_irq_noise(struct b43_wldev *dev) +{ + struct b43_phy_g *phy = dev->phy.g; + u16 tmp; + u8 noise[4]; + u8 i, j; + s32 average; + + /* Bottom half of Link Quality calculation. */ + + if (dev->phy.type != B43_PHYTYPE_G) + return; + + /* Possible race condition: It might be possible that the user + * changed to a different channel in the meantime since we + * started the calculation. We ignore that fact, since it's + * not really that much of a problem. The background noise is + * an estimation only anyway. Slightly wrong results will get damped + * by the averaging of the 8 sample rounds. Additionally the + * value is shortlived. So it will be replaced by the next noise + * calculation round soon. */ + + B43_WARN_ON(!dev->noisecalc.calculation_running); + *((__le32 *)noise) = cpu_to_le32(b43_jssi_read(dev)); + if (noise[0] == 0x7F || noise[1] == 0x7F || + noise[2] == 0x7F || noise[3] == 0x7F) + goto generate_new; + + /* Get the noise samples. */ + B43_WARN_ON(dev->noisecalc.nr_samples >= 8); + i = dev->noisecalc.nr_samples; + noise[0] = clamp_val(noise[0], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); + noise[1] = clamp_val(noise[1], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); + noise[2] = clamp_val(noise[2], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); + noise[3] = clamp_val(noise[3], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); + dev->noisecalc.samples[i][0] = phy->nrssi_lt[noise[0]]; + dev->noisecalc.samples[i][1] = phy->nrssi_lt[noise[1]]; + dev->noisecalc.samples[i][2] = phy->nrssi_lt[noise[2]]; + dev->noisecalc.samples[i][3] = phy->nrssi_lt[noise[3]]; + dev->noisecalc.nr_samples++; + if (dev->noisecalc.nr_samples == 8) { + /* Calculate the Link Quality by the noise samples. */ + average = 0; + for (i = 0; i < 8; i++) { + for (j = 0; j < 4; j++) + average += dev->noisecalc.samples[i][j]; + } + average /= (8 * 4); + average *= 125; + average += 64; + average /= 128; + tmp = b43_shm_read16(dev, B43_SHM_SHARED, 0x40C); + tmp = (tmp / 128) & 0x1F; + if (tmp >= 8) + average += 2; + else + average -= 25; + if (tmp == 8) + average -= 72; + else + average -= 48; + + dev->stats.link_noise = average; + dev->noisecalc.calculation_running = 0; + return; + } +generate_new: + b43_generate_noise_sample(dev); +} + +static void handle_irq_tbtt_indication(struct b43_wldev *dev) +{ + if (b43_is_mode(dev->wl, NL80211_IFTYPE_AP)) { + ///TODO: PS TBTT + } else { + if (1 /*FIXME: the last PSpoll frame was sent successfully */ ) + b43_power_saving_ctl_bits(dev, 0); + } + if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC)) + dev->dfq_valid = 1; +} + +static void handle_irq_atim_end(struct b43_wldev *dev) +{ + if (dev->dfq_valid) { + b43_write32(dev, B43_MMIO_MACCMD, + b43_read32(dev, B43_MMIO_MACCMD) + | B43_MACCMD_DFQ_VALID); + dev->dfq_valid = 0; + } +} + +static void handle_irq_pmq(struct b43_wldev *dev) +{ + u32 tmp; + + //TODO: AP mode. + + while (1) { + tmp = b43_read32(dev, B43_MMIO_PS_STATUS); + if (!(tmp & 0x00000008)) + break; + } + /* 16bit write is odd, but correct. */ + b43_write16(dev, B43_MMIO_PS_STATUS, 0x0002); +} + +static void b43_write_template_common(struct b43_wldev *dev, + const u8 *data, u16 size, + u16 ram_offset, + u16 shm_size_offset, u8 rate) +{ + u32 i, tmp; + struct b43_plcp_hdr4 plcp; + + plcp.data = 0; + b43_generate_plcp_hdr(&plcp, size + FCS_LEN, rate); + b43_ram_write(dev, ram_offset, le32_to_cpu(plcp.data)); + ram_offset += sizeof(u32); + /* The PLCP is 6 bytes long, but we only wrote 4 bytes, yet. + * So leave the first two bytes of the next write blank. + */ + tmp = (u32) (data[0]) << 16; + tmp |= (u32) (data[1]) << 24; + b43_ram_write(dev, ram_offset, tmp); + ram_offset += sizeof(u32); + for (i = 2; i < size; i += sizeof(u32)) { + tmp = (u32) (data[i + 0]); + if (i + 1 < size) + tmp |= (u32) (data[i + 1]) << 8; + if (i + 2 < size) + tmp |= (u32) (data[i + 2]) << 16; + if (i + 3 < size) + tmp |= (u32) (data[i + 3]) << 24; + b43_ram_write(dev, ram_offset + i - 2, tmp); + } + b43_shm_write16(dev, B43_SHM_SHARED, shm_size_offset, + size + sizeof(struct b43_plcp_hdr6)); +} + +/* Check if the use of the antenna that ieee80211 told us to + * use is possible. This will fall back to DEFAULT. + * "antenna_nr" is the antenna identifier we got from ieee80211. */ +u8 b43_ieee80211_antenna_sanitize(struct b43_wldev *dev, + u8 antenna_nr) +{ + u8 antenna_mask; + + if (antenna_nr == 0) { + /* Zero means "use default antenna". That's always OK. */ + return 0; + } + + /* Get the mask of available antennas. */ + if (dev->phy.gmode) + antenna_mask = dev->dev->bus->sprom.ant_available_bg; + else + antenna_mask = dev->dev->bus->sprom.ant_available_a; + + if (!(antenna_mask & (1 << (antenna_nr - 1)))) { + /* This antenna is not available. Fall back to default. */ + return 0; + } + + return antenna_nr; +} + +/* Convert a b43 antenna number value to the PHY TX control value. */ +static u16 b43_antenna_to_phyctl(int antenna) +{ + switch (antenna) { + case B43_ANTENNA0: + return B43_TXH_PHY_ANT0; + case B43_ANTENNA1: + return B43_TXH_PHY_ANT1; + case B43_ANTENNA2: + return B43_TXH_PHY_ANT2; + case B43_ANTENNA3: + return B43_TXH_PHY_ANT3; + case B43_ANTENNA_AUTO0: + case B43_ANTENNA_AUTO1: + return B43_TXH_PHY_ANT01AUTO; + } + B43_WARN_ON(1); + return 0; +} + +static void b43_write_beacon_template(struct b43_wldev *dev, + u16 ram_offset, + u16 shm_size_offset) +{ + unsigned int i, len, variable_len; + const struct ieee80211_mgmt *bcn; + const u8 *ie; + bool tim_found = 0; + unsigned int rate; + u16 ctl; + int antenna; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(dev->wl->current_beacon); + + bcn = (const struct ieee80211_mgmt *)(dev->wl->current_beacon->data); + len = min((size_t) dev->wl->current_beacon->len, + 0x200 - sizeof(struct b43_plcp_hdr6)); + rate = ieee80211_get_tx_rate(dev->wl->hw, info)->hw_value; + + b43_write_template_common(dev, (const u8 *)bcn, + len, ram_offset, shm_size_offset, rate); + + /* Write the PHY TX control parameters. */ + antenna = B43_ANTENNA_DEFAULT; + antenna = b43_antenna_to_phyctl(antenna); + ctl = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_BEACPHYCTL); + /* We can't send beacons with short preamble. Would get PHY errors. */ + ctl &= ~B43_TXH_PHY_SHORTPRMBL; + ctl &= ~B43_TXH_PHY_ANT; + ctl &= ~B43_TXH_PHY_ENC; + ctl |= antenna; + if (b43_is_cck_rate(rate)) + ctl |= B43_TXH_PHY_ENC_CCK; + else + ctl |= B43_TXH_PHY_ENC_OFDM; + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_BEACPHYCTL, ctl); + + /* Find the position of the TIM and the DTIM_period value + * and write them to SHM. */ + ie = bcn->u.beacon.variable; + variable_len = len - offsetof(struct ieee80211_mgmt, u.beacon.variable); + for (i = 0; i < variable_len - 2; ) { + uint8_t ie_id, ie_len; + + ie_id = ie[i]; + ie_len = ie[i + 1]; + if (ie_id == 5) { + u16 tim_position; + u16 dtim_period; + /* This is the TIM Information Element */ + + /* Check whether the ie_len is in the beacon data range. */ + if (variable_len < ie_len + 2 + i) + break; + /* A valid TIM is at least 4 bytes long. */ + if (ie_len < 4) + break; + tim_found = 1; + + tim_position = sizeof(struct b43_plcp_hdr6); + tim_position += offsetof(struct ieee80211_mgmt, u.beacon.variable); + tim_position += i; + + dtim_period = ie[i + 3]; + + b43_shm_write16(dev, B43_SHM_SHARED, + B43_SHM_SH_TIMBPOS, tim_position); + b43_shm_write16(dev, B43_SHM_SHARED, + B43_SHM_SH_DTIMPER, dtim_period); + break; + } + i += ie_len + 2; + } + if (!tim_found) { + /* + * If ucode wants to modify TIM do it behind the beacon, this + * will happen, for example, when doing mesh networking. + */ + b43_shm_write16(dev, B43_SHM_SHARED, + B43_SHM_SH_TIMBPOS, + len + sizeof(struct b43_plcp_hdr6)); + b43_shm_write16(dev, B43_SHM_SHARED, + B43_SHM_SH_DTIMPER, 0); + } + b43dbg(dev->wl, "Updated beacon template at 0x%x\n", ram_offset); +} + +static void b43_upload_beacon0(struct b43_wldev *dev) +{ + struct b43_wl *wl = dev->wl; + + if (wl->beacon0_uploaded) + return; + b43_write_beacon_template(dev, 0x68, 0x18); + wl->beacon0_uploaded = 1; +} + +static void b43_upload_beacon1(struct b43_wldev *dev) +{ + struct b43_wl *wl = dev->wl; + + if (wl->beacon1_uploaded) + return; + b43_write_beacon_template(dev, 0x468, 0x1A); + wl->beacon1_uploaded = 1; +} + +static void handle_irq_beacon(struct b43_wldev *dev) +{ + struct b43_wl *wl = dev->wl; + u32 cmd, beacon0_valid, beacon1_valid; + + if (!b43_is_mode(wl, NL80211_IFTYPE_AP) && + !b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT)) + return; + + /* This is the bottom half of the asynchronous beacon update. */ + + /* Ignore interrupt in the future. */ + dev->irq_mask &= ~B43_IRQ_BEACON; + + cmd = b43_read32(dev, B43_MMIO_MACCMD); + beacon0_valid = (cmd & B43_MACCMD_BEACON0_VALID); + beacon1_valid = (cmd & B43_MACCMD_BEACON1_VALID); + + /* Schedule interrupt manually, if busy. */ + if (beacon0_valid && beacon1_valid) { + b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, B43_IRQ_BEACON); + dev->irq_mask |= B43_IRQ_BEACON; + return; + } + + if (unlikely(wl->beacon_templates_virgin)) { + /* We never uploaded a beacon before. + * Upload both templates now, but only mark one valid. */ + wl->beacon_templates_virgin = 0; + b43_upload_beacon0(dev); + b43_upload_beacon1(dev); + cmd = b43_read32(dev, B43_MMIO_MACCMD); + cmd |= B43_MACCMD_BEACON0_VALID; + b43_write32(dev, B43_MMIO_MACCMD, cmd); + } else { + if (!beacon0_valid) { + b43_upload_beacon0(dev); + cmd = b43_read32(dev, B43_MMIO_MACCMD); + cmd |= B43_MACCMD_BEACON0_VALID; + b43_write32(dev, B43_MMIO_MACCMD, cmd); + } else if (!beacon1_valid) { + b43_upload_beacon1(dev); + cmd = b43_read32(dev, B43_MMIO_MACCMD); + cmd |= B43_MACCMD_BEACON1_VALID; + b43_write32(dev, B43_MMIO_MACCMD, cmd); + } + } +} + +static void b43_do_beacon_update_trigger_work(struct b43_wldev *dev) +{ + u32 old_irq_mask = dev->irq_mask; + + /* update beacon right away or defer to irq */ + handle_irq_beacon(dev); + if (old_irq_mask != dev->irq_mask) { + /* The handler updated the IRQ mask. */ + B43_WARN_ON(!dev->irq_mask); + if (b43_read32(dev, B43_MMIO_GEN_IRQ_MASK)) { + b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, dev->irq_mask); + } else { + /* Device interrupts are currently disabled. That means + * we just ran the hardirq handler and scheduled the + * IRQ thread. The thread will write the IRQ mask when + * it finished, so there's nothing to do here. Writing + * the mask _here_ would incorrectly re-enable IRQs. */ + } + } +} + +static void b43_beacon_update_trigger_work(struct work_struct *work) +{ + struct b43_wl *wl = container_of(work, struct b43_wl, + beacon_update_trigger); + struct b43_wldev *dev; + + mutex_lock(&wl->mutex); + dev = wl->current_dev; + if (likely(dev && (b43_status(dev) >= B43_STAT_INITIALIZED))) { + if (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) { + /* wl->mutex is enough. */ + b43_do_beacon_update_trigger_work(dev); + mmiowb(); + } else { + spin_lock_irq(&wl->hardirq_lock); + b43_do_beacon_update_trigger_work(dev); + mmiowb(); + spin_unlock_irq(&wl->hardirq_lock); + } + } + mutex_unlock(&wl->mutex); +} + +/* Asynchronously update the packet templates in template RAM. + * Locking: Requires wl->mutex to be locked. */ +static void b43_update_templates(struct b43_wl *wl) +{ + struct sk_buff *beacon; + + /* This is the top half of the ansynchronous beacon update. + * The bottom half is the beacon IRQ. + * Beacon update must be asynchronous to avoid sending an + * invalid beacon. This can happen for example, if the firmware + * transmits a beacon while we are updating it. */ + + /* We could modify the existing beacon and set the aid bit in + * the TIM field, but that would probably require resizing and + * moving of data within the beacon template. + * Simply request a new beacon and let mac80211 do the hard work. */ + beacon = ieee80211_beacon_get(wl->hw, wl->vif); + if (unlikely(!beacon)) + return; + + if (wl->current_beacon) + dev_kfree_skb_any(wl->current_beacon); + wl->current_beacon = beacon; + wl->beacon0_uploaded = 0; + wl->beacon1_uploaded = 0; + ieee80211_queue_work(wl->hw, &wl->beacon_update_trigger); +} + +static void b43_set_beacon_int(struct b43_wldev *dev, u16 beacon_int) +{ + b43_time_lock(dev); + if (dev->dev->id.revision >= 3) { + b43_write32(dev, B43_MMIO_TSF_CFP_REP, (beacon_int << 16)); + b43_write32(dev, B43_MMIO_TSF_CFP_START, (beacon_int << 10)); + } else { + b43_write16(dev, 0x606, (beacon_int >> 6)); + b43_write16(dev, 0x610, beacon_int); + } + b43_time_unlock(dev); + b43dbg(dev->wl, "Set beacon interval to %u\n", beacon_int); +} + +static void b43_handle_firmware_panic(struct b43_wldev *dev) +{ + u16 reason; + + /* Read the register that contains the reason code for the panic. */ + reason = b43_shm_read16(dev, B43_SHM_SCRATCH, B43_FWPANIC_REASON_REG); + b43err(dev->wl, "Whoopsy, firmware panic! Reason: %u\n", reason); + + switch (reason) { + default: + b43dbg(dev->wl, "The panic reason is unknown.\n"); + /* fallthrough */ + case B43_FWPANIC_DIE: + /* Do not restart the controller or firmware. + * The device is nonfunctional from now on. + * Restarting would result in this panic to trigger again, + * so we avoid that recursion. */ + break; + case B43_FWPANIC_RESTART: + b43_controller_restart(dev, "Microcode panic"); + break; + } +} + +static void handle_irq_ucode_debug(struct b43_wldev *dev) +{ + unsigned int i, cnt; + u16 reason, marker_id, marker_line; + __le16 *buf; + + /* The proprietary firmware doesn't have this IRQ. */ + if (!dev->fw.opensource) + return; + + /* Read the register that contains the reason code for this IRQ. */ + reason = b43_shm_read16(dev, B43_SHM_SCRATCH, B43_DEBUGIRQ_REASON_REG); + + switch (reason) { + case B43_DEBUGIRQ_PANIC: + b43_handle_firmware_panic(dev); + break; + case B43_DEBUGIRQ_DUMP_SHM: + if (!B43_DEBUG) + break; /* Only with driver debugging enabled. */ + buf = kmalloc(4096, GFP_ATOMIC); + if (!buf) { + b43dbg(dev->wl, "SHM-dump: Failed to allocate memory\n"); + goto out; + } + for (i = 0; i < 4096; i += 2) { + u16 tmp = b43_shm_read16(dev, B43_SHM_SHARED, i); + buf[i / 2] = cpu_to_le16(tmp); + } + b43info(dev->wl, "Shared memory dump:\n"); + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, + 16, 2, buf, 4096, 1); + kfree(buf); + break; + case B43_DEBUGIRQ_DUMP_REGS: + if (!B43_DEBUG) + break; /* Only with driver debugging enabled. */ + b43info(dev->wl, "Microcode register dump:\n"); + for (i = 0, cnt = 0; i < 64; i++) { + u16 tmp = b43_shm_read16(dev, B43_SHM_SCRATCH, i); + if (cnt == 0) + printk(KERN_INFO); + printk("r%02u: 0x%04X ", i, tmp); + cnt++; + if (cnt == 6) { + printk("\n"); + cnt = 0; + } + } + printk("\n"); + break; + case B43_DEBUGIRQ_MARKER: + if (!B43_DEBUG) + break; /* Only with driver debugging enabled. */ + marker_id = b43_shm_read16(dev, B43_SHM_SCRATCH, + B43_MARKER_ID_REG); + marker_line = b43_shm_read16(dev, B43_SHM_SCRATCH, + B43_MARKER_LINE_REG); + b43info(dev->wl, "The firmware just executed the MARKER(%u) " + "at line number %u\n", + marker_id, marker_line); + break; + default: + b43dbg(dev->wl, "Debug-IRQ triggered for unknown reason: %u\n", + reason); + } +out: + /* Acknowledge the debug-IRQ, so the firmware can continue. */ + b43_shm_write16(dev, B43_SHM_SCRATCH, + B43_DEBUGIRQ_REASON_REG, B43_DEBUGIRQ_ACK); +} + +static void b43_do_interrupt_thread(struct b43_wldev *dev) +{ + u32 reason; + u32 dma_reason[ARRAY_SIZE(dev->dma_reason)]; + u32 merged_dma_reason = 0; + int i; + + if (unlikely(b43_status(dev) != B43_STAT_STARTED)) + return; + + reason = dev->irq_reason; + for (i = 0; i < ARRAY_SIZE(dma_reason); i++) { + dma_reason[i] = dev->dma_reason[i]; + merged_dma_reason |= dma_reason[i]; + } + + if (unlikely(reason & B43_IRQ_MAC_TXERR)) + b43err(dev->wl, "MAC transmission error\n"); + + if (unlikely(reason & B43_IRQ_PHY_TXERR)) { + b43err(dev->wl, "PHY transmission error\n"); + rmb(); + if (unlikely(atomic_dec_and_test(&dev->phy.txerr_cnt))) { + atomic_set(&dev->phy.txerr_cnt, + B43_PHY_TX_BADNESS_LIMIT); + b43err(dev->wl, "Too many PHY TX errors, " + "restarting the controller\n"); + b43_controller_restart(dev, "PHY TX errors"); + } + } + + if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK | + B43_DMAIRQ_NONFATALMASK))) { + if (merged_dma_reason & B43_DMAIRQ_FATALMASK) { + b43err(dev->wl, "Fatal DMA error: " + "0x%08X, 0x%08X, 0x%08X, " + "0x%08X, 0x%08X, 0x%08X\n", + dma_reason[0], dma_reason[1], + dma_reason[2], dma_reason[3], + dma_reason[4], dma_reason[5]); + b43err(dev->wl, "This device does not support DMA " + "on your system. Please use PIO instead.\n"); + /* Fall back to PIO transfers if we get fatal DMA errors! */ + dev->use_pio = 1; + b43_controller_restart(dev, "DMA error"); + return; + } + if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) { + b43err(dev->wl, "DMA error: " + "0x%08X, 0x%08X, 0x%08X, " + "0x%08X, 0x%08X, 0x%08X\n", + dma_reason[0], dma_reason[1], + dma_reason[2], dma_reason[3], + dma_reason[4], dma_reason[5]); + } + } + + if (unlikely(reason & B43_IRQ_UCODE_DEBUG)) + handle_irq_ucode_debug(dev); + if (reason & B43_IRQ_TBTT_INDI) + handle_irq_tbtt_indication(dev); + if (reason & B43_IRQ_ATIM_END) + handle_irq_atim_end(dev); + if (reason & B43_IRQ_BEACON) + handle_irq_beacon(dev); + if (reason & B43_IRQ_PMQ) + handle_irq_pmq(dev); + if (reason & B43_IRQ_TXFIFO_FLUSH_OK) + ;/* TODO */ + if (reason & B43_IRQ_NOISESAMPLE_OK) + handle_irq_noise(dev); + + /* Check the DMA reason registers for received data. */ + if (dma_reason[0] & B43_DMAIRQ_RX_DONE) { + if (b43_using_pio_transfers(dev)) + b43_pio_rx(dev->pio.rx_queue); + else + b43_dma_rx(dev->dma.rx_ring); + } + B43_WARN_ON(dma_reason[1] & B43_DMAIRQ_RX_DONE); + B43_WARN_ON(dma_reason[2] & B43_DMAIRQ_RX_DONE); + B43_WARN_ON(dma_reason[3] & B43_DMAIRQ_RX_DONE); + B43_WARN_ON(dma_reason[4] & B43_DMAIRQ_RX_DONE); + B43_WARN_ON(dma_reason[5] & B43_DMAIRQ_RX_DONE); + + if (reason & B43_IRQ_TX_OK) + handle_irq_transmit_status(dev); + + /* Re-enable interrupts on the device by restoring the current interrupt mask. */ + b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, dev->irq_mask); + +#if B43_DEBUG + if (b43_debug(dev, B43_DBG_VERBOSESTATS)) { + dev->irq_count++; + for (i = 0; i < ARRAY_SIZE(dev->irq_bit_count); i++) { + if (reason & (1 << i)) + dev->irq_bit_count[i]++; + } + } +#endif +} + +/* Interrupt thread handler. Handles device interrupts in thread context. */ +static irqreturn_t b43_interrupt_thread_handler(int irq, void *dev_id) +{ + struct b43_wldev *dev = dev_id; + + mutex_lock(&dev->wl->mutex); + b43_do_interrupt_thread(dev); + mmiowb(); + mutex_unlock(&dev->wl->mutex); + + return IRQ_HANDLED; +} + +static irqreturn_t b43_do_interrupt(struct b43_wldev *dev) +{ + u32 reason; + + /* This code runs under wl->hardirq_lock, but _only_ on non-SDIO busses. + * On SDIO, this runs under wl->mutex. */ + + reason = b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); + if (reason == 0xffffffff) /* shared IRQ */ + return IRQ_NONE; + reason &= dev->irq_mask; + if (!reason) + return IRQ_HANDLED; + + dev->dma_reason[0] = b43_read32(dev, B43_MMIO_DMA0_REASON) + & 0x0001DC00; + dev->dma_reason[1] = b43_read32(dev, B43_MMIO_DMA1_REASON) + & 0x0000DC00; + dev->dma_reason[2] = b43_read32(dev, B43_MMIO_DMA2_REASON) + & 0x0000DC00; + dev->dma_reason[3] = b43_read32(dev, B43_MMIO_DMA3_REASON) + & 0x0001DC00; + dev->dma_reason[4] = b43_read32(dev, B43_MMIO_DMA4_REASON) + & 0x0000DC00; +/* Unused ring + dev->dma_reason[5] = b43_read32(dev, B43_MMIO_DMA5_REASON) + & 0x0000DC00; +*/ + + /* ACK the interrupt. */ + b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, reason); + b43_write32(dev, B43_MMIO_DMA0_REASON, dev->dma_reason[0]); + b43_write32(dev, B43_MMIO_DMA1_REASON, dev->dma_reason[1]); + b43_write32(dev, B43_MMIO_DMA2_REASON, dev->dma_reason[2]); + b43_write32(dev, B43_MMIO_DMA3_REASON, dev->dma_reason[3]); + b43_write32(dev, B43_MMIO_DMA4_REASON, dev->dma_reason[4]); +/* Unused ring + b43_write32(dev, B43_MMIO_DMA5_REASON, dev->dma_reason[5]); +*/ + + /* Disable IRQs on the device. The IRQ thread handler will re-enable them. */ + b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, 0); + /* Save the reason bitmasks for the IRQ thread handler. */ + dev->irq_reason = reason; + + return IRQ_WAKE_THREAD; +} + +/* Interrupt handler top-half. This runs with interrupts disabled. */ +static irqreturn_t b43_interrupt_handler(int irq, void *dev_id) +{ + struct b43_wldev *dev = dev_id; + irqreturn_t ret; + + if (unlikely(b43_status(dev) < B43_STAT_STARTED)) + return IRQ_NONE; + + spin_lock(&dev->wl->hardirq_lock); + ret = b43_do_interrupt(dev); + mmiowb(); + spin_unlock(&dev->wl->hardirq_lock); + + return ret; +} + +/* SDIO interrupt handler. This runs in process context. */ +static void b43_sdio_interrupt_handler(struct b43_wldev *dev) +{ + struct b43_wl *wl = dev->wl; + irqreturn_t ret; + + mutex_lock(&wl->mutex); + + ret = b43_do_interrupt(dev); + if (ret == IRQ_WAKE_THREAD) + b43_do_interrupt_thread(dev); + + mutex_unlock(&wl->mutex); +} + +void b43_do_release_fw(struct b43_firmware_file *fw) +{ + release_firmware(fw->data); + fw->data = NULL; + fw->filename = NULL; +} + +static void b43_release_firmware(struct b43_wldev *dev) +{ + b43_do_release_fw(&dev->fw.ucode); + b43_do_release_fw(&dev->fw.pcm); + b43_do_release_fw(&dev->fw.initvals); + b43_do_release_fw(&dev->fw.initvals_band); +} + +static void b43_print_fw_helptext(struct b43_wl *wl, bool error) +{ + const char text[] = + "You must go to " \ + "http://wireless.kernel.org/en/users/Drivers/b43#devicefirmware " \ + "and download the correct firmware for this driver version. " \ + "Please carefully read all instructions on this website.\n"; + + if (error) + b43err(wl, text); + else + b43warn(wl, text); +} + +int b43_do_request_fw(struct b43_request_fw_context *ctx, + const char *name, + struct b43_firmware_file *fw) +{ + const struct firmware *blob; + struct b43_fw_header *hdr; + u32 size; + int err; + + if (!name) { + /* Don't fetch anything. Free possibly cached firmware. */ + /* FIXME: We should probably keep it anyway, to save some headache + * on suspend/resume with multiband devices. */ + b43_do_release_fw(fw); + return 0; + } + if (fw->filename) { + if ((fw->type == ctx->req_type) && + (strcmp(fw->filename, name) == 0)) + return 0; /* Already have this fw. */ + /* Free the cached firmware first. */ + /* FIXME: We should probably do this later after we successfully + * got the new fw. This could reduce headache with multiband devices. + * We could also redesign this to cache the firmware for all possible + * bands all the time. */ + b43_do_release_fw(fw); + } + + switch (ctx->req_type) { + case B43_FWTYPE_PROPRIETARY: + snprintf(ctx->fwname, sizeof(ctx->fwname), + "b43%s/%s.fw", + modparam_fwpostfix, name); + break; + case B43_FWTYPE_OPENSOURCE: + snprintf(ctx->fwname, sizeof(ctx->fwname), + "b43-open%s/%s.fw", + modparam_fwpostfix, name); + break; + default: + B43_WARN_ON(1); + return -ENOSYS; + } + err = request_firmware(&blob, ctx->fwname, ctx->dev->dev->dev); + if (err == -ENOENT) { + snprintf(ctx->errors[ctx->req_type], + sizeof(ctx->errors[ctx->req_type]), + "Firmware file \"%s\" not found\n", ctx->fwname); + return err; + } else if (err) { + snprintf(ctx->errors[ctx->req_type], + sizeof(ctx->errors[ctx->req_type]), + "Firmware file \"%s\" request failed (err=%d)\n", + ctx->fwname, err); + return err; + } + if (blob->size < sizeof(struct b43_fw_header)) + goto err_format; + hdr = (struct b43_fw_header *)(blob->data); + switch (hdr->type) { + case B43_FW_TYPE_UCODE: + case B43_FW_TYPE_PCM: + size = be32_to_cpu(hdr->size); + if (size != blob->size - sizeof(struct b43_fw_header)) + goto err_format; + /* fallthrough */ + case B43_FW_TYPE_IV: + if (hdr->ver != 1) + goto err_format; + break; + default: + goto err_format; + } + + fw->data = blob; + fw->filename = name; + fw->type = ctx->req_type; + + return 0; + +err_format: + snprintf(ctx->errors[ctx->req_type], + sizeof(ctx->errors[ctx->req_type]), + "Firmware file \"%s\" format error.\n", ctx->fwname); + release_firmware(blob); + + return -EPROTO; +} + +static int b43_try_request_fw(struct b43_request_fw_context *ctx) +{ + struct b43_wldev *dev = ctx->dev; + struct b43_firmware *fw = &ctx->dev->fw; + const u8 rev = ctx->dev->dev->id.revision; + const char *filename; + u32 tmshigh; + int err; + + /* Get microcode */ + tmshigh = ssb_read32(dev->dev, SSB_TMSHIGH); + if ((rev >= 5) && (rev <= 10)) + filename = "ucode5"; + else if ((rev >= 11) && (rev <= 12)) + filename = "ucode11"; + else if (rev == 13) + filename = "ucode13"; + else if (rev == 14) + filename = "ucode14"; + else if (rev >= 15) + filename = "ucode15"; + else + goto err_no_ucode; + err = b43_do_request_fw(ctx, filename, &fw->ucode); + if (err) + goto err_load; + + /* Get PCM code */ + if ((rev >= 5) && (rev <= 10)) + filename = "pcm5"; + else if (rev >= 11) + filename = NULL; + else + goto err_no_pcm; + fw->pcm_request_failed = 0; + err = b43_do_request_fw(ctx, filename, &fw->pcm); + if (err == -ENOENT) { + /* We did not find a PCM file? Not fatal, but + * core rev <= 10 must do without hwcrypto then. */ + fw->pcm_request_failed = 1; + } else if (err) + goto err_load; + + /* Get initvals */ + switch (dev->phy.type) { + case B43_PHYTYPE_A: + if ((rev >= 5) && (rev <= 10)) { + if (tmshigh & B43_TMSHIGH_HAVE_2GHZ_PHY) + filename = "a0g1initvals5"; + else + filename = "a0g0initvals5"; + } else + goto err_no_initvals; + break; + case B43_PHYTYPE_G: + if ((rev >= 5) && (rev <= 10)) + filename = "b0g0initvals5"; + else if (rev >= 13) + filename = "b0g0initvals13"; + else + goto err_no_initvals; + break; + case B43_PHYTYPE_N: + if ((rev >= 11) && (rev <= 12)) + filename = "n0initvals11"; + else + goto err_no_initvals; + break; + case B43_PHYTYPE_LP: + if (rev == 13) + filename = "lp0initvals13"; + else if (rev == 14) + filename = "lp0initvals14"; + else if (rev >= 15) + filename = "lp0initvals15"; + else + goto err_no_initvals; + break; + default: + goto err_no_initvals; + } + err = b43_do_request_fw(ctx, filename, &fw->initvals); + if (err) + goto err_load; + + /* Get bandswitch initvals */ + switch (dev->phy.type) { + case B43_PHYTYPE_A: + if ((rev >= 5) && (rev <= 10)) { + if (tmshigh & B43_TMSHIGH_HAVE_2GHZ_PHY) + filename = "a0g1bsinitvals5"; + else + filename = "a0g0bsinitvals5"; + } else if (rev >= 11) + filename = NULL; + else + goto err_no_initvals; + break; + case B43_PHYTYPE_G: + if ((rev >= 5) && (rev <= 10)) + filename = "b0g0bsinitvals5"; + else if (rev >= 11) + filename = NULL; + else + goto err_no_initvals; + break; + case B43_PHYTYPE_N: + if ((rev >= 11) && (rev <= 12)) + filename = "n0bsinitvals11"; + else + goto err_no_initvals; + break; + case B43_PHYTYPE_LP: + if (rev == 13) + filename = "lp0bsinitvals13"; + else if (rev == 14) + filename = "lp0bsinitvals14"; + else if (rev >= 15) + filename = "lp0bsinitvals15"; + else + goto err_no_initvals; + break; + default: + goto err_no_initvals; + } + err = b43_do_request_fw(ctx, filename, &fw->initvals_band); + if (err) + goto err_load; + + return 0; + +err_no_ucode: + err = ctx->fatal_failure = -EOPNOTSUPP; + b43err(dev->wl, "The driver does not know which firmware (ucode) " + "is required for your device (wl-core rev %u)\n", rev); + goto error; + +err_no_pcm: + err = ctx->fatal_failure = -EOPNOTSUPP; + b43err(dev->wl, "The driver does not know which firmware (PCM) " + "is required for your device (wl-core rev %u)\n", rev); + goto error; + +err_no_initvals: + err = ctx->fatal_failure = -EOPNOTSUPP; + b43err(dev->wl, "The driver does not know which firmware (initvals) " + "is required for your device (wl-core rev %u)\n", rev); + goto error; + +err_load: + /* We failed to load this firmware image. The error message + * already is in ctx->errors. Return and let our caller decide + * what to do. */ + goto error; + +error: + b43_release_firmware(dev); + return err; +} + +static int b43_request_firmware(struct b43_wldev *dev) +{ + struct b43_request_fw_context *ctx; + unsigned int i; + int err; + const char *errmsg; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + ctx->dev = dev; + + ctx->req_type = B43_FWTYPE_PROPRIETARY; + err = b43_try_request_fw(ctx); + if (!err) + goto out; /* Successfully loaded it. */ + err = ctx->fatal_failure; + if (err) + goto out; + + ctx->req_type = B43_FWTYPE_OPENSOURCE; + err = b43_try_request_fw(ctx); + if (!err) + goto out; /* Successfully loaded it. */ + err = ctx->fatal_failure; + if (err) + goto out; + + /* Could not find a usable firmware. Print the errors. */ + for (i = 0; i < B43_NR_FWTYPES; i++) { + errmsg = ctx->errors[i]; + if (strlen(errmsg)) + b43err(dev->wl, errmsg); + } + b43_print_fw_helptext(dev->wl, 1); + err = -ENOENT; + +out: + kfree(ctx); + return err; +} + +static int b43_upload_microcode(struct b43_wldev *dev) +{ + const size_t hdr_len = sizeof(struct b43_fw_header); + const __be32 *data; + unsigned int i, len; + u16 fwrev, fwpatch, fwdate, fwtime; + u32 tmp, macctl; + int err = 0; + + /* Jump the microcode PSM to offset 0 */ + macctl = b43_read32(dev, B43_MMIO_MACCTL); + B43_WARN_ON(macctl & B43_MACCTL_PSM_RUN); + macctl |= B43_MACCTL_PSM_JMP0; + b43_write32(dev, B43_MMIO_MACCTL, macctl); + /* Zero out all microcode PSM registers and shared memory. */ + for (i = 0; i < 64; i++) + b43_shm_write16(dev, B43_SHM_SCRATCH, i, 0); + for (i = 0; i < 4096; i += 2) + b43_shm_write16(dev, B43_SHM_SHARED, i, 0); + + /* Upload Microcode. */ + data = (__be32 *) (dev->fw.ucode.data->data + hdr_len); + len = (dev->fw.ucode.data->size - hdr_len) / sizeof(__be32); + b43_shm_control_word(dev, B43_SHM_UCODE | B43_SHM_AUTOINC_W, 0x0000); + for (i = 0; i < len; i++) { + b43_write32(dev, B43_MMIO_SHM_DATA, be32_to_cpu(data[i])); + udelay(10); + } + + if (dev->fw.pcm.data) { + /* Upload PCM data. */ + data = (__be32 *) (dev->fw.pcm.data->data + hdr_len); + len = (dev->fw.pcm.data->size - hdr_len) / sizeof(__be32); + b43_shm_control_word(dev, B43_SHM_HW, 0x01EA); + b43_write32(dev, B43_MMIO_SHM_DATA, 0x00004000); + /* No need for autoinc bit in SHM_HW */ + b43_shm_control_word(dev, B43_SHM_HW, 0x01EB); + for (i = 0; i < len; i++) { + b43_write32(dev, B43_MMIO_SHM_DATA, be32_to_cpu(data[i])); + udelay(10); + } + } + + b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, B43_IRQ_ALL); + + /* Start the microcode PSM */ + macctl = b43_read32(dev, B43_MMIO_MACCTL); + macctl &= ~B43_MACCTL_PSM_JMP0; + macctl |= B43_MACCTL_PSM_RUN; + b43_write32(dev, B43_MMIO_MACCTL, macctl); + + /* Wait for the microcode to load and respond */ + i = 0; + while (1) { + tmp = b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); + if (tmp == B43_IRQ_MAC_SUSPENDED) + break; + i++; + if (i >= 20) { + b43err(dev->wl, "Microcode not responding\n"); + b43_print_fw_helptext(dev->wl, 1); + err = -ENODEV; + goto error; + } + msleep(50); + } + b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); /* dummy read */ + + /* Get and check the revisions. */ + fwrev = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_UCODEREV); + fwpatch = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_UCODEPATCH); + fwdate = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_UCODEDATE); + fwtime = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_UCODETIME); + + if (fwrev <= 0x128) { + b43err(dev->wl, "YOUR FIRMWARE IS TOO OLD. Firmware from " + "binary drivers older than version 4.x is unsupported. " + "You must upgrade your firmware files.\n"); + b43_print_fw_helptext(dev->wl, 1); + err = -EOPNOTSUPP; + goto error; + } + dev->fw.rev = fwrev; + dev->fw.patch = fwpatch; + dev->fw.opensource = (fwdate == 0xFFFF); + + /* Default to use-all-queues. */ + dev->wl->hw->queues = dev->wl->mac80211_initially_registered_queues; + dev->qos_enabled = !!modparam_qos; + /* Default to firmware/hardware crypto acceleration. */ + dev->hwcrypto_enabled = 1; + + if (dev->fw.opensource) { + u16 fwcapa; + + /* Patchlevel info is encoded in the "time" field. */ + dev->fw.patch = fwtime; + b43info(dev->wl, "Loading OpenSource firmware version %u.%u\n", + dev->fw.rev, dev->fw.patch); + + fwcapa = b43_fwcapa_read(dev); + if (!(fwcapa & B43_FWCAPA_HWCRYPTO) || dev->fw.pcm_request_failed) { + b43info(dev->wl, "Hardware crypto acceleration not supported by firmware\n"); + /* Disable hardware crypto and fall back to software crypto. */ + dev->hwcrypto_enabled = 0; + } + if (!(fwcapa & B43_FWCAPA_QOS)) { + b43info(dev->wl, "QoS not supported by firmware\n"); + /* Disable QoS. Tweak hw->queues to 1. It will be restored before + * ieee80211_unregister to make sure the networking core can + * properly free possible resources. */ + dev->wl->hw->queues = 1; + dev->qos_enabled = 0; + } + } else { + b43info(dev->wl, "Loading firmware version %u.%u " + "(20%.2i-%.2i-%.2i %.2i:%.2i:%.2i)\n", + fwrev, fwpatch, + (fwdate >> 12) & 0xF, (fwdate >> 8) & 0xF, fwdate & 0xFF, + (fwtime >> 11) & 0x1F, (fwtime >> 5) & 0x3F, fwtime & 0x1F); + if (dev->fw.pcm_request_failed) { + b43warn(dev->wl, "No \"pcm5.fw\" firmware file found. " + "Hardware accelerated cryptography is disabled.\n"); + b43_print_fw_helptext(dev->wl, 0); + } + } + + if (b43_is_old_txhdr_format(dev)) { + /* We're over the deadline, but we keep support for old fw + * until it turns out to be in major conflict with something new. */ + b43warn(dev->wl, "You are using an old firmware image. " + "Support for old firmware will be removed soon " + "(official deadline was July 2008).\n"); + b43_print_fw_helptext(dev->wl, 0); + } + + return 0; + +error: + macctl = b43_read32(dev, B43_MMIO_MACCTL); + macctl &= ~B43_MACCTL_PSM_RUN; + macctl |= B43_MACCTL_PSM_JMP0; + b43_write32(dev, B43_MMIO_MACCTL, macctl); + + return err; +} + +static int b43_write_initvals(struct b43_wldev *dev, + const struct b43_iv *ivals, + size_t count, + size_t array_size) +{ + const struct b43_iv *iv; + u16 offset; + size_t i; + bool bit32; + + BUILD_BUG_ON(sizeof(struct b43_iv) != 6); + iv = ivals; + for (i = 0; i < count; i++) { + if (array_size < sizeof(iv->offset_size)) + goto err_format; + array_size -= sizeof(iv->offset_size); + offset = be16_to_cpu(iv->offset_size); + bit32 = !!(offset & B43_IV_32BIT); + offset &= B43_IV_OFFSET_MASK; + if (offset >= 0x1000) + goto err_format; + if (bit32) { + u32 value; + + if (array_size < sizeof(iv->data.d32)) + goto err_format; + array_size -= sizeof(iv->data.d32); + + value = get_unaligned_be32(&iv->data.d32); + b43_write32(dev, offset, value); + + iv = (const struct b43_iv *)((const uint8_t *)iv + + sizeof(__be16) + + sizeof(__be32)); + } else { + u16 value; + + if (array_size < sizeof(iv->data.d16)) + goto err_format; + array_size -= sizeof(iv->data.d16); + + value = be16_to_cpu(iv->data.d16); + b43_write16(dev, offset, value); + + iv = (const struct b43_iv *)((const uint8_t *)iv + + sizeof(__be16) + + sizeof(__be16)); + } + } + if (array_size) + goto err_format; + + return 0; + +err_format: + b43err(dev->wl, "Initial Values Firmware file-format error.\n"); + b43_print_fw_helptext(dev->wl, 1); + + return -EPROTO; +} + +static int b43_upload_initvals(struct b43_wldev *dev) +{ + const size_t hdr_len = sizeof(struct b43_fw_header); + const struct b43_fw_header *hdr; + struct b43_firmware *fw = &dev->fw; + const struct b43_iv *ivals; + size_t count; + int err; + + hdr = (const struct b43_fw_header *)(fw->initvals.data->data); + ivals = (const struct b43_iv *)(fw->initvals.data->data + hdr_len); + count = be32_to_cpu(hdr->size); + err = b43_write_initvals(dev, ivals, count, + fw->initvals.data->size - hdr_len); + if (err) + goto out; + if (fw->initvals_band.data) { + hdr = (const struct b43_fw_header *)(fw->initvals_band.data->data); + ivals = (const struct b43_iv *)(fw->initvals_band.data->data + hdr_len); + count = be32_to_cpu(hdr->size); + err = b43_write_initvals(dev, ivals, count, + fw->initvals_band.data->size - hdr_len); + if (err) + goto out; + } +out: + + return err; +} + +/* Initialize the GPIOs + * http://bcm-specs.sipsolutions.net/GPIO + */ +static int b43_gpio_init(struct b43_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + struct ssb_device *gpiodev, *pcidev = NULL; + u32 mask, set; + + b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL) + & ~B43_MACCTL_GPOUTSMSK); + + b43_write16(dev, B43_MMIO_GPIO_MASK, b43_read16(dev, B43_MMIO_GPIO_MASK) + | 0x000F); + + mask = 0x0000001F; + set = 0x0000000F; + if (dev->dev->bus->chip_id == 0x4301) { + mask |= 0x0060; + set |= 0x0060; + } + if (0 /* FIXME: conditional unknown */ ) { + b43_write16(dev, B43_MMIO_GPIO_MASK, + b43_read16(dev, B43_MMIO_GPIO_MASK) + | 0x0100); + mask |= 0x0180; + set |= 0x0180; + } + if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_PACTRL) { + b43_write16(dev, B43_MMIO_GPIO_MASK, + b43_read16(dev, B43_MMIO_GPIO_MASK) + | 0x0200); + mask |= 0x0200; + set |= 0x0200; + } + if (dev->dev->id.revision >= 2) + mask |= 0x0010; /* FIXME: This is redundant. */ + +#ifdef CONFIG_SSB_DRIVER_PCICORE + pcidev = bus->pcicore.dev; +#endif + gpiodev = bus->chipco.dev ? : pcidev; + if (!gpiodev) + return 0; + ssb_write32(gpiodev, B43_GPIO_CONTROL, + (ssb_read32(gpiodev, B43_GPIO_CONTROL) + & mask) | set); + + return 0; +} + +/* Turn off all GPIO stuff. Call this on module unload, for example. */ +static void b43_gpio_cleanup(struct b43_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + struct ssb_device *gpiodev, *pcidev = NULL; + +#ifdef CONFIG_SSB_DRIVER_PCICORE + pcidev = bus->pcicore.dev; +#endif + gpiodev = bus->chipco.dev ? : pcidev; + if (!gpiodev) + return; + ssb_write32(gpiodev, B43_GPIO_CONTROL, 0); +} + +/* http://bcm-specs.sipsolutions.net/EnableMac */ +void b43_mac_enable(struct b43_wldev *dev) +{ + if (b43_debug(dev, B43_DBG_FIRMWARE)) { + u16 fwstate; + + fwstate = b43_shm_read16(dev, B43_SHM_SHARED, + B43_SHM_SH_UCODESTAT); + if ((fwstate != B43_SHM_SH_UCODESTAT_SUSP) && + (fwstate != B43_SHM_SH_UCODESTAT_SLEEP)) { + b43err(dev->wl, "b43_mac_enable(): The firmware " + "should be suspended, but current state is %u\n", + fwstate); + } + } + + dev->mac_suspended--; + B43_WARN_ON(dev->mac_suspended < 0); + if (dev->mac_suspended == 0) { + b43_write32(dev, B43_MMIO_MACCTL, + b43_read32(dev, B43_MMIO_MACCTL) + | B43_MACCTL_ENABLED); + b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, + B43_IRQ_MAC_SUSPENDED); + /* Commit writes */ + b43_read32(dev, B43_MMIO_MACCTL); + b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); + b43_power_saving_ctl_bits(dev, 0); + } +} + +/* http://bcm-specs.sipsolutions.net/SuspendMAC */ +void b43_mac_suspend(struct b43_wldev *dev) +{ + int i; + u32 tmp; + + might_sleep(); + B43_WARN_ON(dev->mac_suspended < 0); + + if (dev->mac_suspended == 0) { + b43_power_saving_ctl_bits(dev, B43_PS_AWAKE); + b43_write32(dev, B43_MMIO_MACCTL, + b43_read32(dev, B43_MMIO_MACCTL) + & ~B43_MACCTL_ENABLED); + /* force pci to flush the write */ + b43_read32(dev, B43_MMIO_MACCTL); + for (i = 35; i; i--) { + tmp = b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); + if (tmp & B43_IRQ_MAC_SUSPENDED) + goto out; + udelay(10); + } + /* Hm, it seems this will take some time. Use msleep(). */ + for (i = 40; i; i--) { + tmp = b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); + if (tmp & B43_IRQ_MAC_SUSPENDED) + goto out; + msleep(1); + } + b43err(dev->wl, "MAC suspend failed\n"); + } +out: + dev->mac_suspended++; +} + +static void b43_adjust_opmode(struct b43_wldev *dev) +{ + struct b43_wl *wl = dev->wl; + u32 ctl; + u16 cfp_pretbtt; + + ctl = b43_read32(dev, B43_MMIO_MACCTL); + /* Reset status to STA infrastructure mode. */ + ctl &= ~B43_MACCTL_AP; + ctl &= ~B43_MACCTL_KEEP_CTL; + ctl &= ~B43_MACCTL_KEEP_BADPLCP; + ctl &= ~B43_MACCTL_KEEP_BAD; + ctl &= ~B43_MACCTL_PROMISC; + ctl &= ~B43_MACCTL_BEACPROMISC; + ctl |= B43_MACCTL_INFRA; + + if (b43_is_mode(wl, NL80211_IFTYPE_AP) || + b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT)) + ctl |= B43_MACCTL_AP; + else if (b43_is_mode(wl, NL80211_IFTYPE_ADHOC)) + ctl &= ~B43_MACCTL_INFRA; + + if (wl->filter_flags & FIF_CONTROL) + ctl |= B43_MACCTL_KEEP_CTL; + if (wl->filter_flags & FIF_FCSFAIL) + ctl |= B43_MACCTL_KEEP_BAD; + if (wl->filter_flags & FIF_PLCPFAIL) + ctl |= B43_MACCTL_KEEP_BADPLCP; + if (wl->filter_flags & FIF_PROMISC_IN_BSS) + ctl |= B43_MACCTL_PROMISC; + if (wl->filter_flags & FIF_BCN_PRBRESP_PROMISC) + ctl |= B43_MACCTL_BEACPROMISC; + + /* Workaround: On old hardware the HW-MAC-address-filter + * doesn't work properly, so always run promisc in filter + * it in software. */ + if (dev->dev->id.revision <= 4) + ctl |= B43_MACCTL_PROMISC; + + b43_write32(dev, B43_MMIO_MACCTL, ctl); + + cfp_pretbtt = 2; + if ((ctl & B43_MACCTL_INFRA) && !(ctl & B43_MACCTL_AP)) { + if (dev->dev->bus->chip_id == 0x4306 && + dev->dev->bus->chip_rev == 3) + cfp_pretbtt = 100; + else + cfp_pretbtt = 50; + } + b43_write16(dev, 0x612, cfp_pretbtt); + + /* FIXME: We don't currently implement the PMQ mechanism, + * so always disable it. If we want to implement PMQ, + * we need to enable it here (clear DISCPMQ) in AP mode. + */ + if (0 /* ctl & B43_MACCTL_AP */) { + b43_write32(dev, B43_MMIO_MACCTL, + b43_read32(dev, B43_MMIO_MACCTL) + & ~B43_MACCTL_DISCPMQ); + } else { + b43_write32(dev, B43_MMIO_MACCTL, + b43_read32(dev, B43_MMIO_MACCTL) + | B43_MACCTL_DISCPMQ); + } +} + +static void b43_rate_memory_write(struct b43_wldev *dev, u16 rate, int is_ofdm) +{ + u16 offset; + + if (is_ofdm) { + offset = 0x480; + offset += (b43_plcp_get_ratecode_ofdm(rate) & 0x000F) * 2; + } else { + offset = 0x4C0; + offset += (b43_plcp_get_ratecode_cck(rate) & 0x000F) * 2; + } + b43_shm_write16(dev, B43_SHM_SHARED, offset + 0x20, + b43_shm_read16(dev, B43_SHM_SHARED, offset)); +} + +static void b43_rate_memory_init(struct b43_wldev *dev) +{ + switch (dev->phy.type) { + case B43_PHYTYPE_A: + case B43_PHYTYPE_G: + case B43_PHYTYPE_N: + case B43_PHYTYPE_LP: + b43_rate_memory_write(dev, B43_OFDM_RATE_6MB, 1); + b43_rate_memory_write(dev, B43_OFDM_RATE_12MB, 1); + b43_rate_memory_write(dev, B43_OFDM_RATE_18MB, 1); + b43_rate_memory_write(dev, B43_OFDM_RATE_24MB, 1); + b43_rate_memory_write(dev, B43_OFDM_RATE_36MB, 1); + b43_rate_memory_write(dev, B43_OFDM_RATE_48MB, 1); + b43_rate_memory_write(dev, B43_OFDM_RATE_54MB, 1); + if (dev->phy.type == B43_PHYTYPE_A) + break; + /* fallthrough */ + case B43_PHYTYPE_B: + b43_rate_memory_write(dev, B43_CCK_RATE_1MB, 0); + b43_rate_memory_write(dev, B43_CCK_RATE_2MB, 0); + b43_rate_memory_write(dev, B43_CCK_RATE_5MB, 0); + b43_rate_memory_write(dev, B43_CCK_RATE_11MB, 0); + break; + default: + B43_WARN_ON(1); + } +} + +/* Set the default values for the PHY TX Control Words. */ +static void b43_set_phytxctl_defaults(struct b43_wldev *dev) +{ + u16 ctl = 0; + + ctl |= B43_TXH_PHY_ENC_CCK; + ctl |= B43_TXH_PHY_ANT01AUTO; + ctl |= B43_TXH_PHY_TXPWR; + + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_BEACPHYCTL, ctl); + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_ACKCTSPHYCTL, ctl); + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_PRPHYCTL, ctl); +} + +/* Set the TX-Antenna for management frames sent by firmware. */ +static void b43_mgmtframe_txantenna(struct b43_wldev *dev, int antenna) +{ + u16 ant; + u16 tmp; + + ant = b43_antenna_to_phyctl(antenna); + + /* For ACK/CTS */ + tmp = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_ACKCTSPHYCTL); + tmp = (tmp & ~B43_TXH_PHY_ANT) | ant; + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_ACKCTSPHYCTL, tmp); + /* For Probe Resposes */ + tmp = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_PRPHYCTL); + tmp = (tmp & ~B43_TXH_PHY_ANT) | ant; + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_PRPHYCTL, tmp); +} + +/* This is the opposite of b43_chip_init() */ +static void b43_chip_exit(struct b43_wldev *dev) +{ + b43_phy_exit(dev); + b43_gpio_cleanup(dev); + /* firmware is released later */ +} + +/* Initialize the chip + * http://bcm-specs.sipsolutions.net/ChipInit + */ +static int b43_chip_init(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + int err; + u32 value32, macctl; + u16 value16; + + /* Initialize the MAC control */ + macctl = B43_MACCTL_IHR_ENABLED | B43_MACCTL_SHM_ENABLED; + if (dev->phy.gmode) + macctl |= B43_MACCTL_GMODE; + macctl |= B43_MACCTL_INFRA; + b43_write32(dev, B43_MMIO_MACCTL, macctl); + + err = b43_request_firmware(dev); + if (err) + goto out; + err = b43_upload_microcode(dev); + if (err) + goto out; /* firmware is released later */ + + err = b43_gpio_init(dev); + if (err) + goto out; /* firmware is released later */ + + err = b43_upload_initvals(dev); + if (err) + goto err_gpio_clean; + + /* Turn the Analog on and initialize the PHY. */ + phy->ops->switch_analog(dev, 1); + err = b43_phy_init(dev); + if (err) + goto err_gpio_clean; + + /* Disable Interference Mitigation. */ + if (phy->ops->interf_mitigation) + phy->ops->interf_mitigation(dev, B43_INTERFMODE_NONE); + + /* Select the antennae */ + if (phy->ops->set_rx_antenna) + phy->ops->set_rx_antenna(dev, B43_ANTENNA_DEFAULT); + b43_mgmtframe_txantenna(dev, B43_ANTENNA_DEFAULT); + + if (phy->type == B43_PHYTYPE_B) { + value16 = b43_read16(dev, 0x005E); + value16 |= 0x0004; + b43_write16(dev, 0x005E, value16); + } + b43_write32(dev, 0x0100, 0x01000000); + if (dev->dev->id.revision < 5) + b43_write32(dev, 0x010C, 0x01000000); + + b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL) + & ~B43_MACCTL_INFRA); + b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL) + | B43_MACCTL_INFRA); + + /* Probe Response Timeout value */ + /* FIXME: Default to 0, has to be set by ioctl probably... :-/ */ + b43_shm_write16(dev, B43_SHM_SHARED, 0x0074, 0x0000); + + /* Initially set the wireless operation mode. */ + b43_adjust_opmode(dev); + + if (dev->dev->id.revision < 3) { + b43_write16(dev, 0x060E, 0x0000); + b43_write16(dev, 0x0610, 0x8000); + b43_write16(dev, 0x0604, 0x0000); + b43_write16(dev, 0x0606, 0x0200); + } else { + b43_write32(dev, 0x0188, 0x80000000); + b43_write32(dev, 0x018C, 0x02000000); + } + b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, 0x00004000); + b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001DC00); + b43_write32(dev, B43_MMIO_DMA1_IRQ_MASK, 0x0000DC00); + b43_write32(dev, B43_MMIO_DMA2_IRQ_MASK, 0x0000DC00); + b43_write32(dev, B43_MMIO_DMA3_IRQ_MASK, 0x0001DC00); + b43_write32(dev, B43_MMIO_DMA4_IRQ_MASK, 0x0000DC00); + b43_write32(dev, B43_MMIO_DMA5_IRQ_MASK, 0x0000DC00); + + value32 = ssb_read32(dev->dev, SSB_TMSLOW); + value32 |= 0x00100000; + ssb_write32(dev->dev, SSB_TMSLOW, value32); + + b43_write16(dev, B43_MMIO_POWERUP_DELAY, + dev->dev->bus->chipco.fast_pwrup_delay); + + err = 0; + b43dbg(dev->wl, "Chip initialized\n"); +out: + return err; + +err_gpio_clean: + b43_gpio_cleanup(dev); + return err; +} + +static void b43_periodic_every60sec(struct b43_wldev *dev) +{ + const struct b43_phy_operations *ops = dev->phy.ops; + + if (ops->pwork_60sec) + ops->pwork_60sec(dev); + + /* Force check the TX power emission now. */ + b43_phy_txpower_check(dev, B43_TXPWR_IGNORE_TIME); +} + +static void b43_periodic_every30sec(struct b43_wldev *dev) +{ + /* Update device statistics. */ + b43_calculate_link_quality(dev); +} + +static void b43_periodic_every15sec(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u16 wdr; + + if (dev->fw.opensource) { + /* Check if the firmware is still alive. + * It will reset the watchdog counter to 0 in its idle loop. */ + wdr = b43_shm_read16(dev, B43_SHM_SCRATCH, B43_WATCHDOG_REG); + if (unlikely(wdr)) { + b43err(dev->wl, "Firmware watchdog: The firmware died!\n"); + b43_controller_restart(dev, "Firmware watchdog"); + return; + } else { + b43_shm_write16(dev, B43_SHM_SCRATCH, + B43_WATCHDOG_REG, 1); + } + } + + if (phy->ops->pwork_15sec) + phy->ops->pwork_15sec(dev); + + atomic_set(&phy->txerr_cnt, B43_PHY_TX_BADNESS_LIMIT); + wmb(); + +#if B43_DEBUG + if (b43_debug(dev, B43_DBG_VERBOSESTATS)) { + unsigned int i; + + b43dbg(dev->wl, "Stats: %7u IRQs/sec, %7u TX/sec, %7u RX/sec\n", + dev->irq_count / 15, + dev->tx_count / 15, + dev->rx_count / 15); + dev->irq_count = 0; + dev->tx_count = 0; + dev->rx_count = 0; + for (i = 0; i < ARRAY_SIZE(dev->irq_bit_count); i++) { + if (dev->irq_bit_count[i]) { + b43dbg(dev->wl, "Stats: %7u IRQ-%02u/sec (0x%08X)\n", + dev->irq_bit_count[i] / 15, i, (1 << i)); + dev->irq_bit_count[i] = 0; + } + } + } +#endif +} + +static void do_periodic_work(struct b43_wldev *dev) +{ + unsigned int state; + + state = dev->periodic_state; + if (state % 4 == 0) + b43_periodic_every60sec(dev); + if (state % 2 == 0) + b43_periodic_every30sec(dev); + b43_periodic_every15sec(dev); +} + +/* Periodic work locking policy: + * The whole periodic work handler is protected by + * wl->mutex. If another lock is needed somewhere in the + * pwork callchain, it's acquired in-place, where it's needed. + */ +static void b43_periodic_work_handler(struct work_struct *work) +{ + struct b43_wldev *dev = container_of(work, struct b43_wldev, + periodic_work.work); + struct b43_wl *wl = dev->wl; + unsigned long delay; + + mutex_lock(&wl->mutex); + + if (unlikely(b43_status(dev) != B43_STAT_STARTED)) + goto out; + if (b43_debug(dev, B43_DBG_PWORK_STOP)) + goto out_requeue; + + do_periodic_work(dev); + + dev->periodic_state++; +out_requeue: + if (b43_debug(dev, B43_DBG_PWORK_FAST)) + delay = msecs_to_jiffies(50); + else + delay = round_jiffies_relative(HZ * 15); + ieee80211_queue_delayed_work(wl->hw, &dev->periodic_work, delay); +out: + mutex_unlock(&wl->mutex); +} + +static void b43_periodic_tasks_setup(struct b43_wldev *dev) +{ + struct delayed_work *work = &dev->periodic_work; + + dev->periodic_state = 0; + INIT_DELAYED_WORK(work, b43_periodic_work_handler); + ieee80211_queue_delayed_work(dev->wl->hw, work, 0); +} + +/* Check if communication with the device works correctly. */ +static int b43_validate_chipaccess(struct b43_wldev *dev) +{ + u32 v, backup0, backup4; + + backup0 = b43_shm_read32(dev, B43_SHM_SHARED, 0); + backup4 = b43_shm_read32(dev, B43_SHM_SHARED, 4); + + /* Check for read/write and endianness problems. */ + b43_shm_write32(dev, B43_SHM_SHARED, 0, 0x55AAAA55); + if (b43_shm_read32(dev, B43_SHM_SHARED, 0) != 0x55AAAA55) + goto error; + b43_shm_write32(dev, B43_SHM_SHARED, 0, 0xAA5555AA); + if (b43_shm_read32(dev, B43_SHM_SHARED, 0) != 0xAA5555AA) + goto error; + + /* Check if unaligned 32bit SHM_SHARED access works properly. + * However, don't bail out on failure, because it's noncritical. */ + b43_shm_write16(dev, B43_SHM_SHARED, 0, 0x1122); + b43_shm_write16(dev, B43_SHM_SHARED, 2, 0x3344); + b43_shm_write16(dev, B43_SHM_SHARED, 4, 0x5566); + b43_shm_write16(dev, B43_SHM_SHARED, 6, 0x7788); + if (b43_shm_read32(dev, B43_SHM_SHARED, 2) != 0x55663344) + b43warn(dev->wl, "Unaligned 32bit SHM read access is broken\n"); + b43_shm_write32(dev, B43_SHM_SHARED, 2, 0xAABBCCDD); + if (b43_shm_read16(dev, B43_SHM_SHARED, 0) != 0x1122 || + b43_shm_read16(dev, B43_SHM_SHARED, 2) != 0xCCDD || + b43_shm_read16(dev, B43_SHM_SHARED, 4) != 0xAABB || + b43_shm_read16(dev, B43_SHM_SHARED, 6) != 0x7788) + b43warn(dev->wl, "Unaligned 32bit SHM write access is broken\n"); + + b43_shm_write32(dev, B43_SHM_SHARED, 0, backup0); + b43_shm_write32(dev, B43_SHM_SHARED, 4, backup4); + + if ((dev->dev->id.revision >= 3) && (dev->dev->id.revision <= 10)) { + /* The 32bit register shadows the two 16bit registers + * with update sideeffects. Validate this. */ + b43_write16(dev, B43_MMIO_TSF_CFP_START, 0xAAAA); + b43_write32(dev, B43_MMIO_TSF_CFP_START, 0xCCCCBBBB); + if (b43_read16(dev, B43_MMIO_TSF_CFP_START_LOW) != 0xBBBB) + goto error; + if (b43_read16(dev, B43_MMIO_TSF_CFP_START_HIGH) != 0xCCCC) + goto error; + } + b43_write32(dev, B43_MMIO_TSF_CFP_START, 0); + + v = b43_read32(dev, B43_MMIO_MACCTL); + v |= B43_MACCTL_GMODE; + if (v != (B43_MACCTL_GMODE | B43_MACCTL_IHR_ENABLED)) + goto error; + + return 0; +error: + b43err(dev->wl, "Failed to validate the chipaccess\n"); + return -ENODEV; +} + +static void b43_security_init(struct b43_wldev *dev) +{ + dev->ktp = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_KTP); + /* KTP is a word address, but we address SHM bytewise. + * So multiply by two. + */ + dev->ktp *= 2; + /* Number of RCMTA address slots */ + b43_write16(dev, B43_MMIO_RCMTA_COUNT, B43_NR_PAIRWISE_KEYS); + /* Clear the key memory. */ + b43_clear_keys(dev); +} + +#ifdef CONFIG_B43_HWRNG +static int b43_rng_read(struct hwrng *rng, u32 *data) +{ + struct b43_wl *wl = (struct b43_wl *)rng->priv; + struct b43_wldev *dev; + int count = -ENODEV; + + mutex_lock(&wl->mutex); + dev = wl->current_dev; + if (likely(dev && b43_status(dev) >= B43_STAT_INITIALIZED)) { + *data = b43_read16(dev, B43_MMIO_RNG); + count = sizeof(u16); + } + mutex_unlock(&wl->mutex); + + return count; +} +#endif /* CONFIG_B43_HWRNG */ + +static void b43_rng_exit(struct b43_wl *wl) +{ +#ifdef CONFIG_B43_HWRNG + if (wl->rng_initialized) + hwrng_unregister(&wl->rng); +#endif /* CONFIG_B43_HWRNG */ +} + +static int b43_rng_init(struct b43_wl *wl) +{ + int err = 0; + +#ifdef CONFIG_B43_HWRNG + snprintf(wl->rng_name, ARRAY_SIZE(wl->rng_name), + "%s_%s", KBUILD_MODNAME, wiphy_name(wl->hw->wiphy)); + wl->rng.name = wl->rng_name; + wl->rng.data_read = b43_rng_read; + wl->rng.priv = (unsigned long)wl; + wl->rng_initialized = 1; + err = hwrng_register(&wl->rng); + if (err) { + wl->rng_initialized = 0; + b43err(wl, "Failed to register the random " + "number generator (%d)\n", err); + } +#endif /* CONFIG_B43_HWRNG */ + + return err; +} + +static void b43_tx_work(struct work_struct *work) +{ + struct b43_wl *wl = container_of(work, struct b43_wl, tx_work); + struct b43_wldev *dev; + struct sk_buff *skb; + int err = 0; + + mutex_lock(&wl->mutex); + dev = wl->current_dev; + if (unlikely(!dev || b43_status(dev) < B43_STAT_STARTED)) { + mutex_unlock(&wl->mutex); + return; + } + + while (skb_queue_len(&wl->tx_queue)) { + skb = skb_dequeue(&wl->tx_queue); + + if (b43_using_pio_transfers(dev)) + err = b43_pio_tx(dev, skb); + else + err = b43_dma_tx(dev, skb); + if (unlikely(err)) + dev_kfree_skb(skb); /* Drop it */ + } + +#if B43_DEBUG + dev->tx_count++; +#endif + mutex_unlock(&wl->mutex); +} + +static int b43_op_tx(struct ieee80211_hw *hw, + struct sk_buff *skb) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + + if (unlikely(skb->len < 2 + 2 + 6)) { + /* Too short, this can't be a valid frame. */ + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + B43_WARN_ON(skb_shinfo(skb)->nr_frags); + + skb_queue_tail(&wl->tx_queue, skb); + ieee80211_queue_work(wl->hw, &wl->tx_work); + + return NETDEV_TX_OK; +} + +static void b43_qos_params_upload(struct b43_wldev *dev, + const struct ieee80211_tx_queue_params *p, + u16 shm_offset) +{ + u16 params[B43_NR_QOSPARAMS]; + int bslots, tmp; + unsigned int i; + + if (!dev->qos_enabled) + return; + + bslots = b43_read16(dev, B43_MMIO_RNG) & p->cw_min; + + memset(¶ms, 0, sizeof(params)); + + params[B43_QOSPARAM_TXOP] = p->txop * 32; + params[B43_QOSPARAM_CWMIN] = p->cw_min; + params[B43_QOSPARAM_CWMAX] = p->cw_max; + params[B43_QOSPARAM_CWCUR] = p->cw_min; + params[B43_QOSPARAM_AIFS] = p->aifs; + params[B43_QOSPARAM_BSLOTS] = bslots; + params[B43_QOSPARAM_REGGAP] = bslots + p->aifs; + + for (i = 0; i < ARRAY_SIZE(params); i++) { + if (i == B43_QOSPARAM_STATUS) { + tmp = b43_shm_read16(dev, B43_SHM_SHARED, + shm_offset + (i * 2)); + /* Mark the parameters as updated. */ + tmp |= 0x100; + b43_shm_write16(dev, B43_SHM_SHARED, + shm_offset + (i * 2), + tmp); + } else { + b43_shm_write16(dev, B43_SHM_SHARED, + shm_offset + (i * 2), + params[i]); + } + } +} + +/* Mapping of mac80211 queue numbers to b43 QoS SHM offsets. */ +static const u16 b43_qos_shm_offsets[] = { + /* [mac80211-queue-nr] = SHM_OFFSET, */ + [0] = B43_QOS_VOICE, + [1] = B43_QOS_VIDEO, + [2] = B43_QOS_BESTEFFORT, + [3] = B43_QOS_BACKGROUND, +}; + +/* Update all QOS parameters in hardware. */ +static void b43_qos_upload_all(struct b43_wldev *dev) +{ + struct b43_wl *wl = dev->wl; + struct b43_qos_params *params; + unsigned int i; + + if (!dev->qos_enabled) + return; + + BUILD_BUG_ON(ARRAY_SIZE(b43_qos_shm_offsets) != + ARRAY_SIZE(wl->qos_params)); + + b43_mac_suspend(dev); + for (i = 0; i < ARRAY_SIZE(wl->qos_params); i++) { + params = &(wl->qos_params[i]); + b43_qos_params_upload(dev, &(params->p), + b43_qos_shm_offsets[i]); + } + b43_mac_enable(dev); +} + +static void b43_qos_clear(struct b43_wl *wl) +{ + struct b43_qos_params *params; + unsigned int i; + + /* Initialize QoS parameters to sane defaults. */ + + BUILD_BUG_ON(ARRAY_SIZE(b43_qos_shm_offsets) != + ARRAY_SIZE(wl->qos_params)); + + for (i = 0; i < ARRAY_SIZE(wl->qos_params); i++) { + params = &(wl->qos_params[i]); + + switch (b43_qos_shm_offsets[i]) { + case B43_QOS_VOICE: + params->p.txop = 0; + params->p.aifs = 2; + params->p.cw_min = 0x0001; + params->p.cw_max = 0x0001; + break; + case B43_QOS_VIDEO: + params->p.txop = 0; + params->p.aifs = 2; + params->p.cw_min = 0x0001; + params->p.cw_max = 0x0001; + break; + case B43_QOS_BESTEFFORT: + params->p.txop = 0; + params->p.aifs = 3; + params->p.cw_min = 0x0001; + params->p.cw_max = 0x03FF; + break; + case B43_QOS_BACKGROUND: + params->p.txop = 0; + params->p.aifs = 7; + params->p.cw_min = 0x0001; + params->p.cw_max = 0x03FF; + break; + default: + B43_WARN_ON(1); + } + } +} + +/* Initialize the core's QOS capabilities */ +static void b43_qos_init(struct b43_wldev *dev) +{ + if (!dev->qos_enabled) { + /* Disable QOS support. */ + b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_EDCF); + b43_write16(dev, B43_MMIO_IFSCTL, + b43_read16(dev, B43_MMIO_IFSCTL) + & ~B43_MMIO_IFSCTL_USE_EDCF); + b43dbg(dev->wl, "QoS disabled\n"); + return; + } + + /* Upload the current QOS parameters. */ + b43_qos_upload_all(dev); + + /* Enable QOS support. */ + b43_hf_write(dev, b43_hf_read(dev) | B43_HF_EDCF); + b43_write16(dev, B43_MMIO_IFSCTL, + b43_read16(dev, B43_MMIO_IFSCTL) + | B43_MMIO_IFSCTL_USE_EDCF); + b43dbg(dev->wl, "QoS enabled\n"); +} + +static int b43_op_conf_tx(struct ieee80211_hw *hw, u16 _queue, + const struct ieee80211_tx_queue_params *params) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev; + unsigned int queue = (unsigned int)_queue; + int err = -ENODEV; + + if (queue >= ARRAY_SIZE(wl->qos_params)) { + /* Queue not available or don't support setting + * params on this queue. Return success to not + * confuse mac80211. */ + return 0; + } + BUILD_BUG_ON(ARRAY_SIZE(b43_qos_shm_offsets) != + ARRAY_SIZE(wl->qos_params)); + + mutex_lock(&wl->mutex); + dev = wl->current_dev; + if (unlikely(!dev || (b43_status(dev) < B43_STAT_INITIALIZED))) + goto out_unlock; + + memcpy(&(wl->qos_params[queue].p), params, sizeof(*params)); + b43_mac_suspend(dev); + b43_qos_params_upload(dev, &(wl->qos_params[queue].p), + b43_qos_shm_offsets[queue]); + b43_mac_enable(dev); + err = 0; + +out_unlock: + mutex_unlock(&wl->mutex); + + return err; +} + +static int b43_op_get_stats(struct ieee80211_hw *hw, + struct ieee80211_low_level_stats *stats) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + + mutex_lock(&wl->mutex); + memcpy(stats, &wl->ieee_stats, sizeof(*stats)); + mutex_unlock(&wl->mutex); + + return 0; +} + +static u64 b43_op_get_tsf(struct ieee80211_hw *hw) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev; + u64 tsf; + + mutex_lock(&wl->mutex); + dev = wl->current_dev; + + if (dev && (b43_status(dev) >= B43_STAT_INITIALIZED)) + b43_tsf_read(dev, &tsf); + else + tsf = 0; + + mutex_unlock(&wl->mutex); + + return tsf; +} + +static void b43_op_set_tsf(struct ieee80211_hw *hw, u64 tsf) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev; + + mutex_lock(&wl->mutex); + dev = wl->current_dev; + + if (dev && (b43_status(dev) >= B43_STAT_INITIALIZED)) + b43_tsf_write(dev, tsf); + + mutex_unlock(&wl->mutex); +} + +static void b43_put_phy_into_reset(struct b43_wldev *dev) +{ + struct ssb_device *sdev = dev->dev; + u32 tmslow; + + tmslow = ssb_read32(sdev, SSB_TMSLOW); + tmslow &= ~B43_TMSLOW_GMODE; + tmslow |= B43_TMSLOW_PHYRESET; + tmslow |= SSB_TMSLOW_FGC; + ssb_write32(sdev, SSB_TMSLOW, tmslow); + msleep(1); + + tmslow = ssb_read32(sdev, SSB_TMSLOW); + tmslow &= ~SSB_TMSLOW_FGC; + tmslow |= B43_TMSLOW_PHYRESET; + ssb_write32(sdev, SSB_TMSLOW, tmslow); + msleep(1); +} + +static const char *band_to_string(enum ieee80211_band band) +{ + switch (band) { + case IEEE80211_BAND_5GHZ: + return "5"; + case IEEE80211_BAND_2GHZ: + return "2.4"; + default: + break; + } + B43_WARN_ON(1); + return ""; +} + +/* Expects wl->mutex locked */ +static int b43_switch_band(struct b43_wl *wl, struct ieee80211_channel *chan) +{ + struct b43_wldev *up_dev = NULL; + struct b43_wldev *down_dev; + struct b43_wldev *d; + int err; + bool uninitialized_var(gmode); + int prev_status; + + /* Find a device and PHY which supports the band. */ + list_for_each_entry(d, &wl->devlist, list) { + switch (chan->band) { + case IEEE80211_BAND_5GHZ: + if (d->phy.supports_5ghz) { + up_dev = d; + gmode = 0; + } + break; + case IEEE80211_BAND_2GHZ: + if (d->phy.supports_2ghz) { + up_dev = d; + gmode = 1; + } + break; + default: + B43_WARN_ON(1); + return -EINVAL; + } + if (up_dev) + break; + } + if (!up_dev) { + b43err(wl, "Could not find a device for %s-GHz band operation\n", + band_to_string(chan->band)); + return -ENODEV; + } + if ((up_dev == wl->current_dev) && + (!!wl->current_dev->phy.gmode == !!gmode)) { + /* This device is already running. */ + return 0; + } + b43dbg(wl, "Switching to %s-GHz band\n", + band_to_string(chan->band)); + down_dev = wl->current_dev; + + prev_status = b43_status(down_dev); + /* Shutdown the currently running core. */ + if (prev_status >= B43_STAT_STARTED) + down_dev = b43_wireless_core_stop(down_dev); + if (prev_status >= B43_STAT_INITIALIZED) + b43_wireless_core_exit(down_dev); + + if (down_dev != up_dev) { + /* We switch to a different core, so we put PHY into + * RESET on the old core. */ + b43_put_phy_into_reset(down_dev); + } + + /* Now start the new core. */ + up_dev->phy.gmode = gmode; + if (prev_status >= B43_STAT_INITIALIZED) { + err = b43_wireless_core_init(up_dev); + if (err) { + b43err(wl, "Fatal: Could not initialize device for " + "selected %s-GHz band\n", + band_to_string(chan->band)); + goto init_failure; + } + } + if (prev_status >= B43_STAT_STARTED) { + err = b43_wireless_core_start(up_dev); + if (err) { + b43err(wl, "Fatal: Coult not start device for " + "selected %s-GHz band\n", + band_to_string(chan->band)); + b43_wireless_core_exit(up_dev); + goto init_failure; + } + } + B43_WARN_ON(b43_status(up_dev) != prev_status); + + wl->current_dev = up_dev; + + return 0; +init_failure: + /* Whoops, failed to init the new core. No core is operating now. */ + wl->current_dev = NULL; + return err; +} + +/* Write the short and long frame retry limit values. */ +static void b43_set_retry_limits(struct b43_wldev *dev, + unsigned int short_retry, + unsigned int long_retry) +{ + /* The retry limit is a 4-bit counter. Enforce this to avoid overflowing + * the chip-internal counter. */ + short_retry = min(short_retry, (unsigned int)0xF); + long_retry = min(long_retry, (unsigned int)0xF); + + b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_SRLIMIT, + short_retry); + b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_LRLIMIT, + long_retry); +} + +static int b43_op_config(struct ieee80211_hw *hw, u32 changed) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev; + struct b43_phy *phy; + struct ieee80211_conf *conf = &hw->conf; + int antenna; + int err = 0; + + mutex_lock(&wl->mutex); + + /* Switch the band (if necessary). This might change the active core. */ + err = b43_switch_band(wl, conf->channel); + if (err) + goto out_unlock_mutex; + dev = wl->current_dev; + phy = &dev->phy; + + if (conf_is_ht(conf)) + phy->is_40mhz = + (conf_is_ht40_minus(conf) || conf_is_ht40_plus(conf)); + else + phy->is_40mhz = false; + + b43_mac_suspend(dev); + + if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) + b43_set_retry_limits(dev, conf->short_frame_max_tx_count, + conf->long_frame_max_tx_count); + changed &= ~IEEE80211_CONF_CHANGE_RETRY_LIMITS; + if (!changed) + goto out_mac_enable; + + /* Switch to the requested channel. + * The firmware takes care of races with the TX handler. */ + if (conf->channel->hw_value != phy->channel) + b43_switch_channel(dev, conf->channel->hw_value); + + dev->wl->radiotap_enabled = !!(conf->flags & IEEE80211_CONF_MONITOR); + + /* Adjust the desired TX power level. */ + if (conf->power_level != 0) { + if (conf->power_level != phy->desired_txpower) { + phy->desired_txpower = conf->power_level; + b43_phy_txpower_check(dev, B43_TXPWR_IGNORE_TIME | + B43_TXPWR_IGNORE_TSSI); + } + } + + /* Antennas for RX and management frame TX. */ + antenna = B43_ANTENNA_DEFAULT; + b43_mgmtframe_txantenna(dev, antenna); + antenna = B43_ANTENNA_DEFAULT; + if (phy->ops->set_rx_antenna) + phy->ops->set_rx_antenna(dev, antenna); + + if (wl->radio_enabled != phy->radio_on) { + if (wl->radio_enabled) { + b43_software_rfkill(dev, false); + b43info(dev->wl, "Radio turned on by software\n"); + if (!dev->radio_hw_enable) { + b43info(dev->wl, "The hardware RF-kill button " + "still turns the radio physically off. " + "Press the button to turn it on.\n"); + } + } else { + b43_software_rfkill(dev, true); + b43info(dev->wl, "Radio turned off by software\n"); + } + } + +out_mac_enable: + b43_mac_enable(dev); +out_unlock_mutex: + mutex_unlock(&wl->mutex); + + return err; +} + +static void b43_update_basic_rates(struct b43_wldev *dev, u32 brates) +{ + struct ieee80211_supported_band *sband = + dev->wl->hw->wiphy->bands[b43_current_band(dev->wl)]; + struct ieee80211_rate *rate; + int i; + u16 basic, direct, offset, basic_offset, rateptr; + + for (i = 0; i < sband->n_bitrates; i++) { + rate = &sband->bitrates[i]; + + if (b43_is_cck_rate(rate->hw_value)) { + direct = B43_SHM_SH_CCKDIRECT; + basic = B43_SHM_SH_CCKBASIC; + offset = b43_plcp_get_ratecode_cck(rate->hw_value); + offset &= 0xF; + } else { + direct = B43_SHM_SH_OFDMDIRECT; + basic = B43_SHM_SH_OFDMBASIC; + offset = b43_plcp_get_ratecode_ofdm(rate->hw_value); + offset &= 0xF; + } + + rate = ieee80211_get_response_rate(sband, brates, rate->bitrate); + + if (b43_is_cck_rate(rate->hw_value)) { + basic_offset = b43_plcp_get_ratecode_cck(rate->hw_value); + basic_offset &= 0xF; + } else { + basic_offset = b43_plcp_get_ratecode_ofdm(rate->hw_value); + basic_offset &= 0xF; + } + + /* + * Get the pointer that we need to point to + * from the direct map + */ + rateptr = b43_shm_read16(dev, B43_SHM_SHARED, + direct + 2 * basic_offset); + /* and write it to the basic map */ + b43_shm_write16(dev, B43_SHM_SHARED, basic + 2 * offset, + rateptr); + } +} + +static void b43_op_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *conf, + u32 changed) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev; + + mutex_lock(&wl->mutex); + + dev = wl->current_dev; + if (!dev || b43_status(dev) < B43_STAT_STARTED) + goto out_unlock_mutex; + + B43_WARN_ON(wl->vif != vif); + + if (changed & BSS_CHANGED_BSSID) { + if (conf->bssid) + memcpy(wl->bssid, conf->bssid, ETH_ALEN); + else + memset(wl->bssid, 0, ETH_ALEN); + } + + if (b43_status(dev) >= B43_STAT_INITIALIZED) { + if (changed & BSS_CHANGED_BEACON && + (b43_is_mode(wl, NL80211_IFTYPE_AP) || + b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT) || + b43_is_mode(wl, NL80211_IFTYPE_ADHOC))) + b43_update_templates(wl); + + if (changed & BSS_CHANGED_BSSID) + b43_write_mac_bssid_templates(dev); + } + + b43_mac_suspend(dev); + + /* Update templates for AP/mesh mode. */ + if (changed & BSS_CHANGED_BEACON_INT && + (b43_is_mode(wl, NL80211_IFTYPE_AP) || + b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT) || + b43_is_mode(wl, NL80211_IFTYPE_ADHOC))) + b43_set_beacon_int(dev, conf->beacon_int); + + if (changed & BSS_CHANGED_BASIC_RATES) + b43_update_basic_rates(dev, conf->basic_rates); + + if (changed & BSS_CHANGED_ERP_SLOT) { + if (conf->use_short_slot) + b43_short_slot_timing_enable(dev); + else + b43_short_slot_timing_disable(dev); + } + + b43_mac_enable(dev); +out_unlock_mutex: + mutex_unlock(&wl->mutex); +} + +static int b43_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev; + u8 algorithm; + u8 index; + int err; + static const u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + + if (modparam_nohwcrypt) + return -ENOSPC; /* User disabled HW-crypto */ + + mutex_lock(&wl->mutex); + + dev = wl->current_dev; + err = -ENODEV; + if (!dev || b43_status(dev) < B43_STAT_INITIALIZED) + goto out_unlock; + + if (dev->fw.pcm_request_failed || !dev->hwcrypto_enabled) { + /* We don't have firmware for the crypto engine. + * Must use software-crypto. */ + err = -EOPNOTSUPP; + goto out_unlock; + } + + err = -EINVAL; + switch (key->alg) { + case ALG_WEP: + if (key->keylen == WLAN_KEY_LEN_WEP40) + algorithm = B43_SEC_ALGO_WEP40; + else + algorithm = B43_SEC_ALGO_WEP104; + break; + case ALG_TKIP: + algorithm = B43_SEC_ALGO_TKIP; + break; + case ALG_CCMP: + algorithm = B43_SEC_ALGO_AES; + break; + default: + B43_WARN_ON(1); + goto out_unlock; + } + index = (u8) (key->keyidx); + if (index > 3) + goto out_unlock; + + switch (cmd) { + case SET_KEY: + if (algorithm == B43_SEC_ALGO_TKIP && + (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE) || + !modparam_hwtkip)) { + /* We support only pairwise key */ + err = -EOPNOTSUPP; + goto out_unlock; + } + + if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { + if (WARN_ON(!sta)) { + err = -EOPNOTSUPP; + goto out_unlock; + } + /* Pairwise key with an assigned MAC address. */ + err = b43_key_write(dev, -1, algorithm, + key->key, key->keylen, + sta->addr, key); + } else { + /* Group key */ + err = b43_key_write(dev, index, algorithm, + key->key, key->keylen, NULL, key); + } + if (err) + goto out_unlock; + + if (algorithm == B43_SEC_ALGO_WEP40 || + algorithm == B43_SEC_ALGO_WEP104) { + b43_hf_write(dev, b43_hf_read(dev) | B43_HF_USEDEFKEYS); + } else { + b43_hf_write(dev, + b43_hf_read(dev) & ~B43_HF_USEDEFKEYS); + } + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + if (algorithm == B43_SEC_ALGO_TKIP) + key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; + break; + case DISABLE_KEY: { + err = b43_key_clear(dev, key->hw_key_idx); + if (err) + goto out_unlock; + break; + } + default: + B43_WARN_ON(1); + } + +out_unlock: + if (!err) { + b43dbg(wl, "%s hardware based encryption for keyidx: %d, " + "mac: %pM\n", + cmd == SET_KEY ? "Using" : "Disabling", key->keyidx, + sta ? sta->addr : bcast_addr); + b43_dump_keymemory(dev); + } + mutex_unlock(&wl->mutex); + + return err; +} + +static void b43_op_configure_filter(struct ieee80211_hw *hw, + unsigned int changed, unsigned int *fflags, + u64 multicast) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev; + + mutex_lock(&wl->mutex); + dev = wl->current_dev; + if (!dev) { + *fflags = 0; + goto out_unlock; + } + + *fflags &= FIF_PROMISC_IN_BSS | + FIF_ALLMULTI | + FIF_FCSFAIL | + FIF_PLCPFAIL | + FIF_CONTROL | + FIF_OTHER_BSS | + FIF_BCN_PRBRESP_PROMISC; + + changed &= FIF_PROMISC_IN_BSS | + FIF_ALLMULTI | + FIF_FCSFAIL | + FIF_PLCPFAIL | + FIF_CONTROL | + FIF_OTHER_BSS | + FIF_BCN_PRBRESP_PROMISC; + + wl->filter_flags = *fflags; + + if (changed && b43_status(dev) >= B43_STAT_INITIALIZED) + b43_adjust_opmode(dev); + +out_unlock: + mutex_unlock(&wl->mutex); +} + +/* Locking: wl->mutex + * Returns the current dev. This might be different from the passed in dev, + * because the core might be gone away while we unlocked the mutex. */ +static struct b43_wldev * b43_wireless_core_stop(struct b43_wldev *dev) +{ + struct b43_wl *wl = dev->wl; + struct b43_wldev *orig_dev; + u32 mask; + +redo: + if (!dev || b43_status(dev) < B43_STAT_STARTED) + return dev; + + /* Cancel work. Unlock to avoid deadlocks. */ + mutex_unlock(&wl->mutex); + cancel_delayed_work_sync(&dev->periodic_work); + cancel_work_sync(&wl->tx_work); + mutex_lock(&wl->mutex); + dev = wl->current_dev; + if (!dev || b43_status(dev) < B43_STAT_STARTED) { + /* Whoops, aliens ate up the device while we were unlocked. */ + return dev; + } + + /* Disable interrupts on the device. */ + b43_set_status(dev, B43_STAT_INITIALIZED); + if (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) { + /* wl->mutex is locked. That is enough. */ + b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, 0); + b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); /* Flush */ + } else { + spin_lock_irq(&wl->hardirq_lock); + b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, 0); + b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); /* Flush */ + spin_unlock_irq(&wl->hardirq_lock); + } + /* Synchronize and free the interrupt handlers. Unlock to avoid deadlocks. */ + orig_dev = dev; + mutex_unlock(&wl->mutex); + if (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) { + b43_sdio_free_irq(dev); + } else { + synchronize_irq(dev->dev->irq); + free_irq(dev->dev->irq, dev); + } + mutex_lock(&wl->mutex); + dev = wl->current_dev; + if (!dev) + return dev; + if (dev != orig_dev) { + if (b43_status(dev) >= B43_STAT_STARTED) + goto redo; + return dev; + } + mask = b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); + B43_WARN_ON(mask != 0xFFFFFFFF && mask); + + /* Drain the TX queue */ + while (skb_queue_len(&wl->tx_queue)) + dev_kfree_skb(skb_dequeue(&wl->tx_queue)); + + b43_mac_suspend(dev); + b43_leds_exit(dev); + b43dbg(wl, "Wireless interface stopped\n"); + + return dev; +} + +/* Locking: wl->mutex */ +static int b43_wireless_core_start(struct b43_wldev *dev) +{ + int err; + + B43_WARN_ON(b43_status(dev) != B43_STAT_INITIALIZED); + + drain_txstatus_queue(dev); + if (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) { + err = b43_sdio_request_irq(dev, b43_sdio_interrupt_handler); + if (err) { + b43err(dev->wl, "Cannot request SDIO IRQ\n"); + goto out; + } + } else { + err = request_threaded_irq(dev->dev->irq, b43_interrupt_handler, + b43_interrupt_thread_handler, + IRQF_SHARED, KBUILD_MODNAME, dev); + if (err) { + b43err(dev->wl, "Cannot request IRQ-%d\n", dev->dev->irq); + goto out; + } + } + + /* We are ready to run. */ + ieee80211_wake_queues(dev->wl->hw); + b43_set_status(dev, B43_STAT_STARTED); + + /* Start data flow (TX/RX). */ + b43_mac_enable(dev); + b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, dev->irq_mask); + + /* Start maintainance work */ + b43_periodic_tasks_setup(dev); + + b43_leds_init(dev); + + b43dbg(dev->wl, "Wireless interface started\n"); +out: + return err; +} + +/* Get PHY and RADIO versioning numbers */ +static int b43_phy_versioning(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u32 tmp; + u8 analog_type; + u8 phy_type; + u8 phy_rev; + u16 radio_manuf; + u16 radio_ver; + u16 radio_rev; + int unsupported = 0; + + /* Get PHY versioning */ + tmp = b43_read16(dev, B43_MMIO_PHY_VER); + analog_type = (tmp & B43_PHYVER_ANALOG) >> B43_PHYVER_ANALOG_SHIFT; + phy_type = (tmp & B43_PHYVER_TYPE) >> B43_PHYVER_TYPE_SHIFT; + phy_rev = (tmp & B43_PHYVER_VERSION); + switch (phy_type) { + case B43_PHYTYPE_A: + if (phy_rev >= 4) + unsupported = 1; + break; + case B43_PHYTYPE_B: + if (phy_rev != 2 && phy_rev != 4 && phy_rev != 6 + && phy_rev != 7) + unsupported = 1; + break; + case B43_PHYTYPE_G: + if (phy_rev > 9) + unsupported = 1; + break; +#ifdef CONFIG_B43_NPHY + case B43_PHYTYPE_N: + if (phy_rev > 4) + unsupported = 1; + break; +#endif +#ifdef CONFIG_B43_PHY_LP + case B43_PHYTYPE_LP: + if (phy_rev > 2) + unsupported = 1; + break; +#endif + default: + unsupported = 1; + }; + if (unsupported) { + b43err(dev->wl, "FOUND UNSUPPORTED PHY " + "(Analog %u, Type %u, Revision %u)\n", + analog_type, phy_type, phy_rev); + return -EOPNOTSUPP; + } + b43dbg(dev->wl, "Found PHY: Analog %u, Type %u, Revision %u\n", + analog_type, phy_type, phy_rev); + + /* Get RADIO versioning */ + if (dev->dev->bus->chip_id == 0x4317) { + if (dev->dev->bus->chip_rev == 0) + tmp = 0x3205017F; + else if (dev->dev->bus->chip_rev == 1) + tmp = 0x4205017F; + else + tmp = 0x5205017F; + } else { + b43_write16(dev, B43_MMIO_RADIO_CONTROL, B43_RADIOCTL_ID); + tmp = b43_read16(dev, B43_MMIO_RADIO_DATA_LOW); + b43_write16(dev, B43_MMIO_RADIO_CONTROL, B43_RADIOCTL_ID); + tmp |= (u32)b43_read16(dev, B43_MMIO_RADIO_DATA_HIGH) << 16; + } + radio_manuf = (tmp & 0x00000FFF); + radio_ver = (tmp & 0x0FFFF000) >> 12; + radio_rev = (tmp & 0xF0000000) >> 28; + if (radio_manuf != 0x17F /* Broadcom */) + unsupported = 1; + switch (phy_type) { + case B43_PHYTYPE_A: + if (radio_ver != 0x2060) + unsupported = 1; + if (radio_rev != 1) + unsupported = 1; + if (radio_manuf != 0x17F) + unsupported = 1; + break; + case B43_PHYTYPE_B: + if ((radio_ver & 0xFFF0) != 0x2050) + unsupported = 1; + break; + case B43_PHYTYPE_G: + if (radio_ver != 0x2050) + unsupported = 1; + break; + case B43_PHYTYPE_N: + if (radio_ver != 0x2055 && radio_ver != 0x2056) + unsupported = 1; + break; + case B43_PHYTYPE_LP: + if (radio_ver != 0x2062 && radio_ver != 0x2063) + unsupported = 1; + break; + default: + B43_WARN_ON(1); + } + if (unsupported) { + b43err(dev->wl, "FOUND UNSUPPORTED RADIO " + "(Manuf 0x%X, Version 0x%X, Revision %u)\n", + radio_manuf, radio_ver, radio_rev); + return -EOPNOTSUPP; + } + b43dbg(dev->wl, "Found Radio: Manuf 0x%X, Version 0x%X, Revision %u\n", + radio_manuf, radio_ver, radio_rev); + + phy->radio_manuf = radio_manuf; + phy->radio_ver = radio_ver; + phy->radio_rev = radio_rev; + + phy->analog = analog_type; + phy->type = phy_type; + phy->rev = phy_rev; + + return 0; +} + +static void setup_struct_phy_for_init(struct b43_wldev *dev, + struct b43_phy *phy) +{ + phy->hardware_power_control = !!modparam_hwpctl; + phy->next_txpwr_check_time = jiffies; + /* PHY TX errors counter. */ + atomic_set(&phy->txerr_cnt, B43_PHY_TX_BADNESS_LIMIT); + +#if B43_DEBUG + phy->phy_locked = 0; + phy->radio_locked = 0; +#endif +} + +static void setup_struct_wldev_for_init(struct b43_wldev *dev) +{ + dev->dfq_valid = 0; + + /* Assume the radio is enabled. If it's not enabled, the state will + * immediately get fixed on the first periodic work run. */ + dev->radio_hw_enable = 1; + + /* Stats */ + memset(&dev->stats, 0, sizeof(dev->stats)); + + setup_struct_phy_for_init(dev, &dev->phy); + + /* IRQ related flags */ + dev->irq_reason = 0; + memset(dev->dma_reason, 0, sizeof(dev->dma_reason)); + dev->irq_mask = B43_IRQ_MASKTEMPLATE; + if (b43_modparam_verbose < B43_VERBOSITY_DEBUG) + dev->irq_mask &= ~B43_IRQ_PHY_TXERR; + + dev->mac_suspended = 1; + + /* Noise calculation context */ + memset(&dev->noisecalc, 0, sizeof(dev->noisecalc)); +} + +static void b43_bluetooth_coext_enable(struct b43_wldev *dev) +{ + struct ssb_sprom *sprom = &dev->dev->bus->sprom; + u64 hf; + + if (!modparam_btcoex) + return; + if (!(sprom->boardflags_lo & B43_BFL_BTCOEXIST)) + return; + if (dev->phy.type != B43_PHYTYPE_B && !dev->phy.gmode) + return; + + hf = b43_hf_read(dev); + if (sprom->boardflags_lo & B43_BFL_BTCMOD) + hf |= B43_HF_BTCOEXALT; + else + hf |= B43_HF_BTCOEX; + b43_hf_write(dev, hf); +} + +static void b43_bluetooth_coext_disable(struct b43_wldev *dev) +{ + if (!modparam_btcoex) + return; + //TODO +} + +static void b43_imcfglo_timeouts_workaround(struct b43_wldev *dev) +{ +#ifdef CONFIG_SSB_DRIVER_PCICORE + struct ssb_bus *bus = dev->dev->bus; + u32 tmp; + + if (bus->pcicore.dev && + bus->pcicore.dev->id.coreid == SSB_DEV_PCI && + bus->pcicore.dev->id.revision <= 5) { + /* IMCFGLO timeouts workaround. */ + tmp = ssb_read32(dev->dev, SSB_IMCFGLO); + switch (bus->bustype) { + case SSB_BUSTYPE_PCI: + case SSB_BUSTYPE_PCMCIA: + tmp &= ~SSB_IMCFGLO_REQTO; + tmp &= ~SSB_IMCFGLO_SERTO; + tmp |= 0x32; + break; + case SSB_BUSTYPE_SSB: + tmp &= ~SSB_IMCFGLO_REQTO; + tmp &= ~SSB_IMCFGLO_SERTO; + tmp |= 0x53; + break; + default: + break; + } + ssb_write32(dev->dev, SSB_IMCFGLO, tmp); + } +#endif /* CONFIG_SSB_DRIVER_PCICORE */ +} + +static void b43_set_synth_pu_delay(struct b43_wldev *dev, bool idle) +{ + u16 pu_delay; + + /* The time value is in microseconds. */ + if (dev->phy.type == B43_PHYTYPE_A) + pu_delay = 3700; + else + pu_delay = 1050; + if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC) || idle) + pu_delay = 500; + if ((dev->phy.radio_ver == 0x2050) && (dev->phy.radio_rev == 8)) + pu_delay = max(pu_delay, (u16)2400); + + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_SPUWKUP, pu_delay); +} + +/* Set the TSF CFP pre-TargetBeaconTransmissionTime. */ +static void b43_set_pretbtt(struct b43_wldev *dev) +{ + u16 pretbtt; + + /* The time value is in microseconds. */ + if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC)) { + pretbtt = 2; + } else { + if (dev->phy.type == B43_PHYTYPE_A) + pretbtt = 120; + else + pretbtt = 250; + } + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_PRETBTT, pretbtt); + b43_write16(dev, B43_MMIO_TSF_CFP_PRETBTT, pretbtt); +} + +/* Shutdown a wireless core */ +/* Locking: wl->mutex */ +static void b43_wireless_core_exit(struct b43_wldev *dev) +{ + u32 macctl; + + B43_WARN_ON(dev && b43_status(dev) > B43_STAT_INITIALIZED); + if (!dev || b43_status(dev) != B43_STAT_INITIALIZED) + return; + b43_set_status(dev, B43_STAT_UNINIT); + + /* Stop the microcode PSM. */ + macctl = b43_read32(dev, B43_MMIO_MACCTL); + macctl &= ~B43_MACCTL_PSM_RUN; + macctl |= B43_MACCTL_PSM_JMP0; + b43_write32(dev, B43_MMIO_MACCTL, macctl); + + b43_dma_free(dev); + b43_pio_free(dev); + b43_chip_exit(dev); + dev->phy.ops->switch_analog(dev, 0); + if (dev->wl->current_beacon) { + dev_kfree_skb_any(dev->wl->current_beacon); + dev->wl->current_beacon = NULL; + } + + ssb_device_disable(dev->dev, 0); + ssb_bus_may_powerdown(dev->dev->bus); +} + +/* Initialize a wireless core */ +static int b43_wireless_core_init(struct b43_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + struct ssb_sprom *sprom = &bus->sprom; + struct b43_phy *phy = &dev->phy; + int err; + u64 hf; + u32 tmp; + + B43_WARN_ON(b43_status(dev) != B43_STAT_UNINIT); + + err = ssb_bus_powerup(bus, 0); + if (err) + goto out; + if (!ssb_device_is_enabled(dev->dev)) { + tmp = phy->gmode ? B43_TMSLOW_GMODE : 0; + b43_wireless_core_reset(dev, tmp); + } + + /* Reset all data structures. */ + setup_struct_wldev_for_init(dev); + phy->ops->prepare_structs(dev); + + /* Enable IRQ routing to this device. */ + ssb_pcicore_dev_irqvecs_enable(&bus->pcicore, dev->dev); + + b43_imcfglo_timeouts_workaround(dev); + b43_bluetooth_coext_disable(dev); + if (phy->ops->prepare_hardware) { + err = phy->ops->prepare_hardware(dev); + if (err) + goto err_busdown; + } + err = b43_chip_init(dev); + if (err) + goto err_busdown; + b43_shm_write16(dev, B43_SHM_SHARED, + B43_SHM_SH_WLCOREREV, dev->dev->id.revision); + hf = b43_hf_read(dev); + if (phy->type == B43_PHYTYPE_G) { + hf |= B43_HF_SYMW; + if (phy->rev == 1) + hf |= B43_HF_GDCW; + if (sprom->boardflags_lo & B43_BFL_PACTRL) + hf |= B43_HF_OFDMPABOOST; + } + if (phy->radio_ver == 0x2050) { + if (phy->radio_rev == 6) + hf |= B43_HF_4318TSSI; + if (phy->radio_rev < 6) + hf |= B43_HF_VCORECALC; + } + if (sprom->boardflags_lo & B43_BFL_XTAL_NOSLOW) + hf |= B43_HF_DSCRQ; /* Disable slowclock requests from ucode. */ +#ifdef CONFIG_SSB_DRIVER_PCICORE + if ((bus->bustype == SSB_BUSTYPE_PCI) && + (bus->pcicore.dev->id.revision <= 10)) + hf |= B43_HF_PCISCW; /* PCI slow clock workaround. */ +#endif + hf &= ~B43_HF_SKCFPUP; + b43_hf_write(dev, hf); + + b43_set_retry_limits(dev, B43_DEFAULT_SHORT_RETRY_LIMIT, + B43_DEFAULT_LONG_RETRY_LIMIT); + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_SFFBLIM, 3); + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_LFFBLIM, 2); + + /* Disable sending probe responses from firmware. + * Setting the MaxTime to one usec will always trigger + * a timeout, so we never send any probe resp. + * A timeout of zero is infinite. */ + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_PRMAXTIME, 1); + + b43_rate_memory_init(dev); + b43_set_phytxctl_defaults(dev); + + /* Minimum Contention Window */ + if (phy->type == B43_PHYTYPE_B) { + b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MINCONT, 0x1F); + } else { + b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MINCONT, 0xF); + } + /* Maximum Contention Window */ + b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MAXCONT, 0x3FF); + + if ((dev->dev->bus->bustype == SSB_BUSTYPE_PCMCIA) || + (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) || + dev->use_pio) { + dev->__using_pio_transfers = 1; + err = b43_pio_init(dev); + } else { + dev->__using_pio_transfers = 0; + err = b43_dma_init(dev); + } + if (err) + goto err_chip_exit; + b43_qos_init(dev); + b43_set_synth_pu_delay(dev, 1); + b43_bluetooth_coext_enable(dev); + + ssb_bus_powerup(bus, !(sprom->boardflags_lo & B43_BFL_XTAL_NOSLOW)); + b43_upload_card_macaddress(dev); + b43_security_init(dev); + + ieee80211_wake_queues(dev->wl->hw); + + b43_set_status(dev, B43_STAT_INITIALIZED); + +out: + return err; + +err_chip_exit: + b43_chip_exit(dev); +err_busdown: + ssb_bus_may_powerdown(bus); + B43_WARN_ON(b43_status(dev) != B43_STAT_UNINIT); + return err; +} + +static int b43_op_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev; + int err = -EOPNOTSUPP; + + /* TODO: allow WDS/AP devices to coexist */ + + if (vif->type != NL80211_IFTYPE_AP && + vif->type != NL80211_IFTYPE_MESH_POINT && + vif->type != NL80211_IFTYPE_STATION && + vif->type != NL80211_IFTYPE_WDS && + vif->type != NL80211_IFTYPE_ADHOC) + return -EOPNOTSUPP; + + mutex_lock(&wl->mutex); + if (wl->operating) + goto out_mutex_unlock; + + b43dbg(wl, "Adding Interface type %d\n", vif->type); + + dev = wl->current_dev; + wl->operating = 1; + wl->vif = vif; + wl->if_type = vif->type; + memcpy(wl->mac_addr, vif->addr, ETH_ALEN); + + b43_adjust_opmode(dev); + b43_set_pretbtt(dev); + b43_set_synth_pu_delay(dev, 0); + b43_upload_card_macaddress(dev); + + err = 0; + out_mutex_unlock: + mutex_unlock(&wl->mutex); + + return err; +} + +static void b43_op_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev = wl->current_dev; + + b43dbg(wl, "Removing Interface type %d\n", vif->type); + + mutex_lock(&wl->mutex); + + B43_WARN_ON(!wl->operating); + B43_WARN_ON(wl->vif != vif); + wl->vif = NULL; + + wl->operating = 0; + + b43_adjust_opmode(dev); + memset(wl->mac_addr, 0, ETH_ALEN); + b43_upload_card_macaddress(dev); + + mutex_unlock(&wl->mutex); +} + +static int b43_op_start(struct ieee80211_hw *hw) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev = wl->current_dev; + int did_init = 0; + int err = 0; + + /* Kill all old instance specific information to make sure + * the card won't use it in the short timeframe between start + * and mac80211 reconfiguring it. */ + memset(wl->bssid, 0, ETH_ALEN); + memset(wl->mac_addr, 0, ETH_ALEN); + wl->filter_flags = 0; + wl->radiotap_enabled = 0; + b43_qos_clear(wl); + wl->beacon0_uploaded = 0; + wl->beacon1_uploaded = 0; + wl->beacon_templates_virgin = 1; + wl->radio_enabled = 1; + + mutex_lock(&wl->mutex); + + if (b43_status(dev) < B43_STAT_INITIALIZED) { + err = b43_wireless_core_init(dev); + if (err) + goto out_mutex_unlock; + did_init = 1; + } + + if (b43_status(dev) < B43_STAT_STARTED) { + err = b43_wireless_core_start(dev); + if (err) { + if (did_init) + b43_wireless_core_exit(dev); + goto out_mutex_unlock; + } + } + + /* XXX: only do if device doesn't support rfkill irq */ + wiphy_rfkill_start_polling(hw->wiphy); + + out_mutex_unlock: + mutex_unlock(&wl->mutex); + + return err; +} + +static void b43_op_stop(struct ieee80211_hw *hw) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev = wl->current_dev; + + cancel_work_sync(&(wl->beacon_update_trigger)); + + mutex_lock(&wl->mutex); + if (b43_status(dev) >= B43_STAT_STARTED) { + dev = b43_wireless_core_stop(dev); + if (!dev) + goto out_unlock; + } + b43_wireless_core_exit(dev); + wl->radio_enabled = 0; + +out_unlock: + mutex_unlock(&wl->mutex); + + cancel_work_sync(&(wl->txpower_adjust_work)); +} + +static int b43_op_beacon_set_tim(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, bool set) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + + /* FIXME: add locking */ + b43_update_templates(wl); + + return 0; +} + +static void b43_op_sta_notify(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum sta_notify_cmd notify_cmd, + struct ieee80211_sta *sta) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + + B43_WARN_ON(!vif || wl->vif != vif); +} + +static void b43_op_sw_scan_start_notifier(struct ieee80211_hw *hw) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev; + + mutex_lock(&wl->mutex); + dev = wl->current_dev; + if (dev && (b43_status(dev) >= B43_STAT_INITIALIZED)) { + /* Disable CFP update during scan on other channels. */ + b43_hf_write(dev, b43_hf_read(dev) | B43_HF_SKCFPUP); + } + mutex_unlock(&wl->mutex); +} + +static void b43_op_sw_scan_complete_notifier(struct ieee80211_hw *hw) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev; + + mutex_lock(&wl->mutex); + dev = wl->current_dev; + if (dev && (b43_status(dev) >= B43_STAT_INITIALIZED)) { + /* Re-enable CFP update. */ + b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_SKCFPUP); + } + mutex_unlock(&wl->mutex); +} + +static const struct ieee80211_ops b43_hw_ops = { + .tx = b43_op_tx, + .conf_tx = b43_op_conf_tx, + .add_interface = b43_op_add_interface, + .remove_interface = b43_op_remove_interface, + .config = b43_op_config, + .bss_info_changed = b43_op_bss_info_changed, + .configure_filter = b43_op_configure_filter, + .set_key = b43_op_set_key, + .update_tkip_key = b43_op_update_tkip_key, + .get_stats = b43_op_get_stats, + .get_tsf = b43_op_get_tsf, + .set_tsf = b43_op_set_tsf, + .start = b43_op_start, + .stop = b43_op_stop, + .set_tim = b43_op_beacon_set_tim, + .sta_notify = b43_op_sta_notify, + .sw_scan_start = b43_op_sw_scan_start_notifier, + .sw_scan_complete = b43_op_sw_scan_complete_notifier, + .rfkill_poll = b43_rfkill_poll, +}; + +/* Hard-reset the chip. Do not call this directly. + * Use b43_controller_restart() + */ +static void b43_chip_reset(struct work_struct *work) +{ + struct b43_wldev *dev = + container_of(work, struct b43_wldev, restart_work); + struct b43_wl *wl = dev->wl; + int err = 0; + int prev_status; + + mutex_lock(&wl->mutex); + + prev_status = b43_status(dev); + /* Bring the device down... */ + if (prev_status >= B43_STAT_STARTED) { + dev = b43_wireless_core_stop(dev); + if (!dev) { + err = -ENODEV; + goto out; + } + } + if (prev_status >= B43_STAT_INITIALIZED) + b43_wireless_core_exit(dev); + + /* ...and up again. */ + if (prev_status >= B43_STAT_INITIALIZED) { + err = b43_wireless_core_init(dev); + if (err) + goto out; + } + if (prev_status >= B43_STAT_STARTED) { + err = b43_wireless_core_start(dev); + if (err) { + b43_wireless_core_exit(dev); + goto out; + } + } +out: + if (err) + wl->current_dev = NULL; /* Failed to init the dev. */ + mutex_unlock(&wl->mutex); + if (err) + b43err(wl, "Controller restart FAILED\n"); + else + b43info(wl, "Controller restarted\n"); +} + +static int b43_setup_bands(struct b43_wldev *dev, + bool have_2ghz_phy, bool have_5ghz_phy) +{ + struct ieee80211_hw *hw = dev->wl->hw; + + if (have_2ghz_phy) + hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &b43_band_2GHz; + if (dev->phy.type == B43_PHYTYPE_N) { + if (have_5ghz_phy) + hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &b43_band_5GHz_nphy; + } else { + if (have_5ghz_phy) + hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &b43_band_5GHz_aphy; + } + + dev->phy.supports_2ghz = have_2ghz_phy; + dev->phy.supports_5ghz = have_5ghz_phy; + + return 0; +} + +static void b43_wireless_core_detach(struct b43_wldev *dev) +{ + /* We release firmware that late to not be required to re-request + * is all the time when we reinit the core. */ + b43_release_firmware(dev); + b43_phy_free(dev); +} + +static int b43_wireless_core_attach(struct b43_wldev *dev) +{ + struct b43_wl *wl = dev->wl; + struct ssb_bus *bus = dev->dev->bus; + struct pci_dev *pdev = (bus->bustype == SSB_BUSTYPE_PCI) ? bus->host_pci : NULL; + int err; + bool have_2ghz_phy = 0, have_5ghz_phy = 0; + u32 tmp; + + /* Do NOT do any device initialization here. + * Do it in wireless_core_init() instead. + * This function is for gathering basic information about the HW, only. + * Also some structs may be set up here. But most likely you want to have + * that in core_init(), too. + */ + + err = ssb_bus_powerup(bus, 0); + if (err) { + b43err(wl, "Bus powerup failed\n"); + goto out; + } + /* Get the PHY type. */ + if (dev->dev->id.revision >= 5) { + u32 tmshigh; + + tmshigh = ssb_read32(dev->dev, SSB_TMSHIGH); + have_2ghz_phy = !!(tmshigh & B43_TMSHIGH_HAVE_2GHZ_PHY); + have_5ghz_phy = !!(tmshigh & B43_TMSHIGH_HAVE_5GHZ_PHY); + } else + B43_WARN_ON(1); + + dev->phy.gmode = have_2ghz_phy; + dev->phy.radio_on = 1; + tmp = dev->phy.gmode ? B43_TMSLOW_GMODE : 0; + b43_wireless_core_reset(dev, tmp); + + err = b43_phy_versioning(dev); + if (err) + goto err_powerdown; + /* Check if this device supports multiband. */ + if (!pdev || + (pdev->device != 0x4312 && + pdev->device != 0x4319 && pdev->device != 0x4324)) { + /* No multiband support. */ + have_2ghz_phy = 0; + have_5ghz_phy = 0; + switch (dev->phy.type) { + case B43_PHYTYPE_A: + have_5ghz_phy = 1; + break; + case B43_PHYTYPE_LP: //FIXME not always! +#if 0 //FIXME enabling 5GHz causes a NULL pointer dereference + have_5ghz_phy = 1; +#endif + case B43_PHYTYPE_G: + case B43_PHYTYPE_N: + have_2ghz_phy = 1; + break; + default: + B43_WARN_ON(1); + } + } + if (dev->phy.type == B43_PHYTYPE_A) { + /* FIXME */ + b43err(wl, "IEEE 802.11a devices are unsupported\n"); + err = -EOPNOTSUPP; + goto err_powerdown; + } + if (1 /* disable A-PHY */) { + /* FIXME: For now we disable the A-PHY on multi-PHY devices. */ + if (dev->phy.type != B43_PHYTYPE_N && + dev->phy.type != B43_PHYTYPE_LP) { + have_2ghz_phy = 1; + have_5ghz_phy = 0; + } + } + + err = b43_phy_allocate(dev); + if (err) + goto err_powerdown; + + dev->phy.gmode = have_2ghz_phy; + tmp = dev->phy.gmode ? B43_TMSLOW_GMODE : 0; + b43_wireless_core_reset(dev, tmp); + + err = b43_validate_chipaccess(dev); + if (err) + goto err_phy_free; + err = b43_setup_bands(dev, have_2ghz_phy, have_5ghz_phy); + if (err) + goto err_phy_free; + + /* Now set some default "current_dev" */ + if (!wl->current_dev) + wl->current_dev = dev; + INIT_WORK(&dev->restart_work, b43_chip_reset); + + dev->phy.ops->switch_analog(dev, 0); + ssb_device_disable(dev->dev, 0); + ssb_bus_may_powerdown(bus); + +out: + return err; + +err_phy_free: + b43_phy_free(dev); +err_powerdown: + ssb_bus_may_powerdown(bus); + return err; +} + +static void b43_one_core_detach(struct ssb_device *dev) +{ + struct b43_wldev *wldev; + struct b43_wl *wl; + + /* Do not cancel ieee80211-workqueue based work here. + * See comment in b43_remove(). */ + + wldev = ssb_get_drvdata(dev); + wl = wldev->wl; + b43_debugfs_remove_device(wldev); + b43_wireless_core_detach(wldev); + list_del(&wldev->list); + wl->nr_devs--; + ssb_set_drvdata(dev, NULL); + kfree(wldev); +} + +static int b43_one_core_attach(struct ssb_device *dev, struct b43_wl *wl) +{ + struct b43_wldev *wldev; + struct pci_dev *pdev; + int err = -ENOMEM; + + if (!list_empty(&wl->devlist)) { + /* We are not the first core on this chip. */ + pdev = (dev->bus->bustype == SSB_BUSTYPE_PCI) ? dev->bus->host_pci : NULL; + /* Only special chips support more than one wireless + * core, although some of the other chips have more than + * one wireless core as well. Check for this and + * bail out early. + */ + if (!pdev || + ((pdev->device != 0x4321) && + (pdev->device != 0x4313) && (pdev->device != 0x431A))) { + b43dbg(wl, "Ignoring unconnected 802.11 core\n"); + return -ENODEV; + } + } + + wldev = kzalloc(sizeof(*wldev), GFP_KERNEL); + if (!wldev) + goto out; + + wldev->use_pio = b43_modparam_pio; + wldev->dev = dev; + wldev->wl = wl; + b43_set_status(wldev, B43_STAT_UNINIT); + wldev->bad_frames_preempt = modparam_bad_frames_preempt; + INIT_LIST_HEAD(&wldev->list); + + err = b43_wireless_core_attach(wldev); + if (err) + goto err_kfree_wldev; + + list_add(&wldev->list, &wl->devlist); + wl->nr_devs++; + ssb_set_drvdata(dev, wldev); + b43_debugfs_add_device(wldev); + + out: + return err; + + err_kfree_wldev: + kfree(wldev); + return err; +} + +#define IS_PDEV(pdev, _vendor, _device, _subvendor, _subdevice) ( \ + (pdev->vendor == PCI_VENDOR_ID_##_vendor) && \ + (pdev->device == _device) && \ + (pdev->subsystem_vendor == PCI_VENDOR_ID_##_subvendor) && \ + (pdev->subsystem_device == _subdevice) ) + +static void b43_sprom_fixup(struct ssb_bus *bus) +{ + struct pci_dev *pdev; + + /* boardflags workarounds */ + if (bus->boardinfo.vendor == SSB_BOARDVENDOR_DELL && + bus->chip_id == 0x4301 && bus->boardinfo.rev == 0x74) + bus->sprom.boardflags_lo |= B43_BFL_BTCOEXIST; + if (bus->boardinfo.vendor == PCI_VENDOR_ID_APPLE && + bus->boardinfo.type == 0x4E && bus->boardinfo.rev > 0x40) + bus->sprom.boardflags_lo |= B43_BFL_PACTRL; + if (bus->bustype == SSB_BUSTYPE_PCI) { + pdev = bus->host_pci; + if (IS_PDEV(pdev, BROADCOM, 0x4318, ASUSTEK, 0x100F) || + IS_PDEV(pdev, BROADCOM, 0x4320, DELL, 0x0003) || + IS_PDEV(pdev, BROADCOM, 0x4320, HP, 0x12f8) || + IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0015) || + IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0014) || + IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0013) || + IS_PDEV(pdev, BROADCOM, 0x4320, MOTOROLA, 0x7010)) + bus->sprom.boardflags_lo &= ~B43_BFL_BTCOEXIST; + } +} + +static void b43_wireless_exit(struct ssb_device *dev, struct b43_wl *wl) +{ + struct ieee80211_hw *hw = wl->hw; + + ssb_set_devtypedata(dev, NULL); + ieee80211_free_hw(hw); +} + +static int b43_wireless_init(struct ssb_device *dev) +{ + struct ssb_sprom *sprom = &dev->bus->sprom; + struct ieee80211_hw *hw; + struct b43_wl *wl; + int err = -ENOMEM; + + b43_sprom_fixup(dev->bus); + + hw = ieee80211_alloc_hw(sizeof(*wl), &b43_hw_ops); + if (!hw) { + b43err(NULL, "Could not allocate ieee80211 device\n"); + goto out; + } + wl = hw_to_b43_wl(hw); + + /* fill hw info */ + hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | + IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_NOISE_DBM; + + hw->wiphy->interface_modes = + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_MESH_POINT) | + BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_WDS) | + BIT(NL80211_IFTYPE_ADHOC); + + hw->queues = modparam_qos ? 4 : 1; + wl->mac80211_initially_registered_queues = hw->queues; + hw->max_rates = 2; + SET_IEEE80211_DEV(hw, dev->dev); + if (is_valid_ether_addr(sprom->et1mac)) + SET_IEEE80211_PERM_ADDR(hw, sprom->et1mac); + else + SET_IEEE80211_PERM_ADDR(hw, sprom->il0mac); + + /* Initialize struct b43_wl */ + wl->hw = hw; + mutex_init(&wl->mutex); + spin_lock_init(&wl->hardirq_lock); + INIT_LIST_HEAD(&wl->devlist); + INIT_WORK(&wl->beacon_update_trigger, b43_beacon_update_trigger_work); + INIT_WORK(&wl->txpower_adjust_work, b43_phy_txpower_adjust_work); + INIT_WORK(&wl->tx_work, b43_tx_work); + skb_queue_head_init(&wl->tx_queue); + + ssb_set_devtypedata(dev, wl); + b43info(wl, "Broadcom %04X WLAN found (core revision %u)\n", + dev->bus->chip_id, dev->id.revision); + err = 0; +out: + return err; +} + +static int b43_probe(struct ssb_device *dev, const struct ssb_device_id *id) +{ + struct b43_wl *wl; + int err; + int first = 0; + + wl = ssb_get_devtypedata(dev); + if (!wl) { + /* Probing the first core. Must setup common struct b43_wl */ + first = 1; + err = b43_wireless_init(dev); + if (err) + goto out; + wl = ssb_get_devtypedata(dev); + B43_WARN_ON(!wl); + } + err = b43_one_core_attach(dev, wl); + if (err) + goto err_wireless_exit; + + if (first) { + err = ieee80211_register_hw(wl->hw); + if (err) + goto err_one_core_detach; + b43_leds_register(wl->current_dev); + b43_rng_init(wl); + } + + out: + return err; + + err_one_core_detach: + b43_one_core_detach(dev); + err_wireless_exit: + if (first) + b43_wireless_exit(dev, wl); + return err; +} + +static void b43_remove(struct ssb_device *dev) +{ + struct b43_wl *wl = ssb_get_devtypedata(dev); + struct b43_wldev *wldev = ssb_get_drvdata(dev); + + /* We must cancel any work here before unregistering from ieee80211, + * as the ieee80211 unreg will destroy the workqueue. */ + cancel_work_sync(&wldev->restart_work); + + B43_WARN_ON(!wl); + if (wl->current_dev == wldev) { + /* Restore the queues count before unregistering, because firmware detect + * might have modified it. Restoring is important, so the networking + * stack can properly free resources. */ + wl->hw->queues = wl->mac80211_initially_registered_queues; + b43_leds_stop(wldev); + ieee80211_unregister_hw(wl->hw); + } + + b43_one_core_detach(dev); + + if (list_empty(&wl->devlist)) { + b43_rng_exit(wl); + b43_leds_unregister(wl); + /* Last core on the chip unregistered. + * We can destroy common struct b43_wl. + */ + b43_wireless_exit(dev, wl); + } +} + +/* Perform a hardware reset. This can be called from any context. */ +void b43_controller_restart(struct b43_wldev *dev, const char *reason) +{ + /* Must avoid requeueing, if we are in shutdown. */ + if (b43_status(dev) < B43_STAT_INITIALIZED) + return; + b43info(dev->wl, "Controller RESET (%s) ...\n", reason); + ieee80211_queue_work(dev->wl->hw, &dev->restart_work); +} + +static struct ssb_driver b43_ssb_driver = { + .name = KBUILD_MODNAME, + .id_table = b43_ssb_tbl, + .probe = b43_probe, + .remove = b43_remove, +}; + +static void b43_print_driverinfo(void) +{ + const char *feat_pci = "", *feat_pcmcia = "", *feat_nphy = "", + *feat_leds = "", *feat_sdio = ""; + +#ifdef CONFIG_B43_PCI_AUTOSELECT + feat_pci = "P"; +#endif +#ifdef CONFIG_B43_PCMCIA + feat_pcmcia = "M"; +#endif +#ifdef CONFIG_B43_NPHY + feat_nphy = "N"; +#endif +#ifdef CONFIG_B43_LEDS + feat_leds = "L"; +#endif +#ifdef CONFIG_B43_SDIO + feat_sdio = "S"; +#endif + printk(KERN_INFO "Broadcom 43xx driver loaded " + "[ Features: %s%s%s%s%s, Firmware-ID: " + B43_SUPPORTED_FIRMWARE_ID " ]\n", + feat_pci, feat_pcmcia, feat_nphy, + feat_leds, feat_sdio); +} + +static int __init b43_init(void) +{ + int err; + + b43_debugfs_init(); + err = b43_pcmcia_init(); + if (err) + goto err_dfs_exit; + err = b43_sdio_init(); + if (err) + goto err_pcmcia_exit; + err = ssb_driver_register(&b43_ssb_driver); + if (err) + goto err_sdio_exit; + b43_print_driverinfo(); + + return err; + +err_sdio_exit: + b43_sdio_exit(); +err_pcmcia_exit: + b43_pcmcia_exit(); +err_dfs_exit: + b43_debugfs_exit(); + return err; +} + +static void __exit b43_exit(void) +{ + ssb_driver_unregister(&b43_ssb_driver); + b43_sdio_exit(); + b43_pcmcia_exit(); + b43_debugfs_exit(); +} + +module_init(b43_init) +module_exit(b43_exit) diff --git a/drivers/net/wireless/b43/main.h b/drivers/net/wireless/b43/main.h new file mode 100644 index 0000000..40db036 --- /dev/null +++ b/drivers/net/wireless/b43/main.h @@ -0,0 +1,144 @@ +/* + + Broadcom B43 wireless driver + + Copyright (c) 2005 Martin Langer , + Stefano Brivio + Michael Buesch + Danny van Dyk + Andreas Jaggi + + Some parts of the code in this file are derived from the ipw2200 + driver Copyright(c) 2003 - 2004 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#ifndef B43_MAIN_H_ +#define B43_MAIN_H_ + +#include "b43.h" + +#define P4D_BYT3S(magic, nr_bytes) u8 __p4dding##magic[nr_bytes] +#define P4D_BYTES(line, nr_bytes) P4D_BYT3S(line, nr_bytes) +/* Magic helper macro to pad structures. Ignore those above. It's magic. */ +#define PAD_BYTES(nr_bytes) P4D_BYTES( __LINE__ , (nr_bytes)) + + +extern int b43_modparam_verbose; + +/* Logmessage verbosity levels. Update the b43_modparam_verbose helptext, if + * you add or remove levels. */ +enum b43_verbosity { + B43_VERBOSITY_ERROR, + B43_VERBOSITY_WARN, + B43_VERBOSITY_INFO, + B43_VERBOSITY_DEBUG, + __B43_VERBOSITY_AFTERLAST, /* keep last */ + + B43_VERBOSITY_MAX = __B43_VERBOSITY_AFTERLAST - 1, +#if B43_DEBUG + B43_VERBOSITY_DEFAULT = B43_VERBOSITY_DEBUG, +#else + B43_VERBOSITY_DEFAULT = B43_VERBOSITY_INFO, +#endif +}; + + +/* Lightweight function to convert a frequency (in Mhz) to a channel number. */ +static inline u8 b43_freq_to_channel_5ghz(int freq) +{ + return ((freq - 5000) / 5); +} +static inline u8 b43_freq_to_channel_2ghz(int freq) +{ + u8 channel; + + if (freq == 2484) + channel = 14; + else + channel = (freq - 2407) / 5; + + return channel; +} + +/* Lightweight function to convert a channel number to a frequency (in Mhz). */ +static inline int b43_channel_to_freq_5ghz(u8 channel) +{ + return (5000 + (5 * channel)); +} +static inline int b43_channel_to_freq_2ghz(u8 channel) +{ + int freq; + + if (channel == 14) + freq = 2484; + else + freq = 2407 + (5 * channel); + + return freq; +} + +static inline int b43_is_cck_rate(int rate) +{ + return (rate == B43_CCK_RATE_1MB || + rate == B43_CCK_RATE_2MB || + rate == B43_CCK_RATE_5MB || rate == B43_CCK_RATE_11MB); +} + +static inline int b43_is_ofdm_rate(int rate) +{ + return !b43_is_cck_rate(rate); +} + +u8 b43_ieee80211_antenna_sanitize(struct b43_wldev *dev, + u8 antenna_nr); + +void b43_tsf_read(struct b43_wldev *dev, u64 * tsf); +void b43_tsf_write(struct b43_wldev *dev, u64 tsf); + +u32 b43_shm_read32(struct b43_wldev *dev, u16 routing, u16 offset); +u16 b43_shm_read16(struct b43_wldev *dev, u16 routing, u16 offset); +void b43_shm_write32(struct b43_wldev *dev, u16 routing, u16 offset, u32 value); +void b43_shm_write16(struct b43_wldev *dev, u16 routing, u16 offset, u16 value); + +u64 b43_hf_read(struct b43_wldev *dev); +void b43_hf_write(struct b43_wldev *dev, u64 value); + +void b43_dummy_transmission(struct b43_wldev *dev, bool ofdm, bool pa_on); + +void b43_wireless_core_reset(struct b43_wldev *dev, u32 flags); + +void b43_controller_restart(struct b43_wldev *dev, const char *reason); + +#define B43_PS_ENABLED (1 << 0) /* Force enable hardware power saving */ +#define B43_PS_DISABLED (1 << 1) /* Force disable hardware power saving */ +#define B43_PS_AWAKE (1 << 2) /* Force device awake */ +#define B43_PS_ASLEEP (1 << 3) /* Force device asleep */ +void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags); + +void b43_mac_suspend(struct b43_wldev *dev); +void b43_mac_enable(struct b43_wldev *dev); + + +struct b43_request_fw_context; +int b43_do_request_fw(struct b43_request_fw_context *ctx, + const char *name, + struct b43_firmware_file *fw); +void b43_do_release_fw(struct b43_firmware_file *fw); + +#endif /* B43_MAIN_H_ */ diff --git a/drivers/net/wireless/b43/pcmcia.c b/drivers/net/wireless/b43/pcmcia.c new file mode 100644 index 0000000..984174b --- /dev/null +++ b/drivers/net/wireless/b43/pcmcia.c @@ -0,0 +1,157 @@ +/* + + Broadcom B43 wireless driver + + Copyright (c) 2007 Michael Buesch + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "pcmcia.h" + +#include + +#include +#include +#include +#include +#include +#include + + +static /*const */ struct pcmcia_device_id b43_pcmcia_tbl[] = { + PCMCIA_DEVICE_MANF_CARD(0x2D0, 0x448), + PCMCIA_DEVICE_MANF_CARD(0x2D0, 0x476), + PCMCIA_DEVICE_NULL, +}; + +MODULE_DEVICE_TABLE(pcmcia, b43_pcmcia_tbl); + +#ifdef CONFIG_PM +static int b43_pcmcia_suspend(struct pcmcia_device *dev) +{ + struct ssb_bus *ssb = dev->priv; + + return ssb_bus_suspend(ssb); +} + +static int b43_pcmcia_resume(struct pcmcia_device *dev) +{ + struct ssb_bus *ssb = dev->priv; + + return ssb_bus_resume(ssb); +} +#else /* CONFIG_PM */ +# define b43_pcmcia_suspend NULL +# define b43_pcmcia_resume NULL +#endif /* CONFIG_PM */ + +static int __devinit b43_pcmcia_probe(struct pcmcia_device *dev) +{ + struct ssb_bus *ssb; + win_req_t win; + memreq_t mem; + int err = -ENOMEM; + int res = 0; + + ssb = kzalloc(sizeof(*ssb), GFP_KERNEL); + if (!ssb) + goto out_error; + + err = -ENODEV; + + dev->conf.Attributes = CONF_ENABLE_IRQ; + dev->conf.IntType = INT_MEMORY_AND_IO; + + dev->io.BasePort2 = 0; + dev->io.NumPorts2 = 0; + dev->io.Attributes2 = 0; + + win.Attributes = WIN_ADDR_SPACE_MEM | WIN_MEMORY_TYPE_CM | + WIN_ENABLE | WIN_DATA_WIDTH_16 | + WIN_USE_WAIT; + win.Base = 0; + win.Size = SSB_CORE_SIZE; + win.AccessSpeed = 250; + res = pcmcia_request_window(dev, &win, &dev->win); + if (res != 0) + goto err_kfree_ssb; + + mem.CardOffset = 0; + mem.Page = 0; + res = pcmcia_map_mem_page(dev, dev->win, &mem); + if (res != 0) + goto err_disable; + + dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; + dev->irq.Handler = NULL; /* The handler is registered later. */ + res = pcmcia_request_irq(dev, &dev->irq); + if (res != 0) + goto err_disable; + + res = pcmcia_request_configuration(dev, &dev->conf); + if (res != 0) + goto err_disable; + + err = ssb_bus_pcmciabus_register(ssb, dev, win.Base); + if (err) + goto err_disable; + dev->priv = ssb; + + return 0; + +err_disable: + pcmcia_disable_device(dev); +err_kfree_ssb: + kfree(ssb); +out_error: + printk(KERN_ERR "b43-pcmcia: Initialization failed (%d, %d)\n", + res, err); + return err; +} + +static void __devexit b43_pcmcia_remove(struct pcmcia_device *dev) +{ + struct ssb_bus *ssb = dev->priv; + + ssb_bus_unregister(ssb); + pcmcia_disable_device(dev); + kfree(ssb); + dev->priv = NULL; +} + +static struct pcmcia_driver b43_pcmcia_driver = { + .owner = THIS_MODULE, + .drv = { + .name = "b43-pcmcia", + }, + .id_table = b43_pcmcia_tbl, + .probe = b43_pcmcia_probe, + .remove = __devexit_p(b43_pcmcia_remove), + .suspend = b43_pcmcia_suspend, + .resume = b43_pcmcia_resume, +}; + +int b43_pcmcia_init(void) +{ + return pcmcia_register_driver(&b43_pcmcia_driver); +} + +void b43_pcmcia_exit(void) +{ + pcmcia_unregister_driver(&b43_pcmcia_driver); +} diff --git a/drivers/net/wireless/b43/pcmcia.h b/drivers/net/wireless/b43/pcmcia.h new file mode 100644 index 0000000..85f120a --- /dev/null +++ b/drivers/net/wireless/b43/pcmcia.h @@ -0,0 +1,20 @@ +#ifndef B43_PCMCIA_H_ +#define B43_PCMCIA_H_ + +#ifdef CONFIG_B43_PCMCIA + +int b43_pcmcia_init(void); +void b43_pcmcia_exit(void); + +#else /* CONFIG_B43_PCMCIA */ + +static inline int b43_pcmcia_init(void) +{ + return 0; +} +static inline void b43_pcmcia_exit(void) +{ +} + +#endif /* CONFIG_B43_PCMCIA */ +#endif /* B43_PCMCIA_H_ */ diff --git a/drivers/net/wireless/b43/phy_a.c b/drivers/net/wireless/b43/phy_a.c new file mode 100644 index 0000000..d90217c --- /dev/null +++ b/drivers/net/wireless/b43/phy_a.c @@ -0,0 +1,594 @@ +/* + + Broadcom B43 wireless driver + IEEE 802.11a PHY driver + + Copyright (c) 2005 Martin Langer , + Copyright (c) 2005-2007 Stefano Brivio + Copyright (c) 2005-2008 Michael Buesch + Copyright (c) 2005, 2006 Danny van Dyk + Copyright (c) 2005, 2006 Andreas Jaggi + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "b43.h" +#include "phy_a.h" +#include "phy_common.h" +#include "wa.h" +#include "tables.h" +#include "main.h" + + +/* Get the freq, as it has to be written to the device. */ +static inline u16 channel2freq_a(u8 channel) +{ + B43_WARN_ON(channel > 200); + + return (5000 + 5 * channel); +} + +static inline u16 freq_r3A_value(u16 frequency) +{ + u16 value; + + if (frequency < 5091) + value = 0x0040; + else if (frequency < 5321) + value = 0x0000; + else if (frequency < 5806) + value = 0x0080; + else + value = 0x0040; + + return value; +} + +#if 0 +/* This function converts a TSSI value to dBm in Q5.2 */ +static s8 b43_aphy_estimate_power_out(struct b43_wldev *dev, s8 tssi) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_a *aphy = phy->a; + s8 dbm = 0; + s32 tmp; + + tmp = (aphy->tgt_idle_tssi - aphy->cur_idle_tssi + tssi); + tmp += 0x80; + tmp = clamp_val(tmp, 0x00, 0xFF); + dbm = aphy->tssi2dbm[tmp]; + //TODO: There's a FIXME on the specs + + return dbm; +} +#endif + +static void b43_radio_set_tx_iq(struct b43_wldev *dev) +{ + static const u8 data_high[5] = { 0x00, 0x40, 0x80, 0x90, 0xD0 }; + static const u8 data_low[5] = { 0x00, 0x01, 0x05, 0x06, 0x0A }; + u16 tmp = b43_radio_read16(dev, 0x001E); + int i, j; + + for (i = 0; i < 5; i++) { + for (j = 0; j < 5; j++) { + if (tmp == (data_high[i] << 4 | data_low[j])) { + b43_phy_write(dev, 0x0069, + (i - j) << 8 | 0x00C0); + return; + } + } + } +} + +static void aphy_channel_switch(struct b43_wldev *dev, unsigned int channel) +{ + u16 freq, r8, tmp; + + freq = channel2freq_a(channel); + + r8 = b43_radio_read16(dev, 0x0008); + b43_write16(dev, 0x03F0, freq); + b43_radio_write16(dev, 0x0008, r8); + + //TODO: write max channel TX power? to Radio 0x2D + tmp = b43_radio_read16(dev, 0x002E); + tmp &= 0x0080; + //TODO: OR tmp with the Power out estimation for this channel? + b43_radio_write16(dev, 0x002E, tmp); + + if (freq >= 4920 && freq <= 5500) { + /* + * r8 = (((freq * 15 * 0xE1FC780F) >> 32) / 29) & 0x0F; + * = (freq * 0.025862069 + */ + r8 = 3 * freq / 116; /* is equal to r8 = freq * 0.025862 */ + } + b43_radio_write16(dev, 0x0007, (r8 << 4) | r8); + b43_radio_write16(dev, 0x0020, (r8 << 4) | r8); + b43_radio_write16(dev, 0x0021, (r8 << 4) | r8); + b43_radio_maskset(dev, 0x0022, 0x000F, (r8 << 4)); + b43_radio_write16(dev, 0x002A, (r8 << 4)); + b43_radio_write16(dev, 0x002B, (r8 << 4)); + b43_radio_maskset(dev, 0x0008, 0x00F0, (r8 << 4)); + b43_radio_maskset(dev, 0x0029, 0xFF0F, 0x00B0); + b43_radio_write16(dev, 0x0035, 0x00AA); + b43_radio_write16(dev, 0x0036, 0x0085); + b43_radio_maskset(dev, 0x003A, 0xFF20, freq_r3A_value(freq)); + b43_radio_mask(dev, 0x003D, 0x00FF); + b43_radio_maskset(dev, 0x0081, 0xFF7F, 0x0080); + b43_radio_mask(dev, 0x0035, 0xFFEF); + b43_radio_maskset(dev, 0x0035, 0xFFEF, 0x0010); + b43_radio_set_tx_iq(dev); + //TODO: TSSI2dbm workaround +//FIXME b43_phy_xmitpower(dev); +} + +static void b43_radio_init2060(struct b43_wldev *dev) +{ + b43_radio_write16(dev, 0x0004, 0x00C0); + b43_radio_write16(dev, 0x0005, 0x0008); + b43_radio_write16(dev, 0x0009, 0x0040); + b43_radio_write16(dev, 0x0005, 0x00AA); + b43_radio_write16(dev, 0x0032, 0x008F); + b43_radio_write16(dev, 0x0006, 0x008F); + b43_radio_write16(dev, 0x0034, 0x008F); + b43_radio_write16(dev, 0x002C, 0x0007); + b43_radio_write16(dev, 0x0082, 0x0080); + b43_radio_write16(dev, 0x0080, 0x0000); + b43_radio_write16(dev, 0x003F, 0x00DA); + b43_radio_mask(dev, 0x0005, ~0x0008); + b43_radio_mask(dev, 0x0081, ~0x0010); + b43_radio_mask(dev, 0x0081, ~0x0020); + b43_radio_mask(dev, 0x0081, ~0x0020); + msleep(1); /* delay 400usec */ + + b43_radio_maskset(dev, 0x0081, ~0x0020, 0x0010); + msleep(1); /* delay 400usec */ + + b43_radio_maskset(dev, 0x0005, ~0x0008, 0x0008); + b43_radio_mask(dev, 0x0085, ~0x0010); + b43_radio_mask(dev, 0x0005, ~0x0008); + b43_radio_mask(dev, 0x0081, ~0x0040); + b43_radio_maskset(dev, 0x0081, ~0x0040, 0x0040); + b43_radio_write16(dev, 0x0005, + (b43_radio_read16(dev, 0x0081) & ~0x0008) | 0x0008); + b43_phy_write(dev, 0x0063, 0xDDC6); + b43_phy_write(dev, 0x0069, 0x07BE); + b43_phy_write(dev, 0x006A, 0x0000); + + aphy_channel_switch(dev, dev->phy.ops->get_default_chan(dev)); + + msleep(1); +} + +static void b43_phy_rssiagc(struct b43_wldev *dev, u8 enable) +{ + int i; + + if (dev->phy.rev < 3) { + if (enable) + for (i = 0; i < B43_TAB_RSSIAGC1_SIZE; i++) { + b43_ofdmtab_write16(dev, + B43_OFDMTAB_LNAHPFGAIN1, i, 0xFFF8); + b43_ofdmtab_write16(dev, + B43_OFDMTAB_WRSSI, i, 0xFFF8); + } + else + for (i = 0; i < B43_TAB_RSSIAGC1_SIZE; i++) { + b43_ofdmtab_write16(dev, + B43_OFDMTAB_LNAHPFGAIN1, i, b43_tab_rssiagc1[i]); + b43_ofdmtab_write16(dev, + B43_OFDMTAB_WRSSI, i, b43_tab_rssiagc1[i]); + } + } else { + if (enable) + for (i = 0; i < B43_TAB_RSSIAGC1_SIZE; i++) + b43_ofdmtab_write16(dev, + B43_OFDMTAB_WRSSI, i, 0x0820); + else + for (i = 0; i < B43_TAB_RSSIAGC2_SIZE; i++) + b43_ofdmtab_write16(dev, + B43_OFDMTAB_WRSSI, i, b43_tab_rssiagc2[i]); + } +} + +static void b43_phy_ww(struct b43_wldev *dev) +{ + u16 b, curr_s, best_s = 0xFFFF; + int i; + + b43_phy_mask(dev, B43_PHY_CRS0, ~B43_PHY_CRS0_EN); + b43_phy_set(dev, B43_PHY_OFDM(0x1B), 0x1000); + b43_phy_maskset(dev, B43_PHY_OFDM(0x82), 0xF0FF, 0x0300); + b43_radio_set(dev, 0x0009, 0x0080); + b43_radio_maskset(dev, 0x0012, 0xFFFC, 0x0002); + b43_wa_initgains(dev); + b43_phy_write(dev, B43_PHY_OFDM(0xBA), 0x3ED5); + b = b43_phy_read(dev, B43_PHY_PWRDOWN); + b43_phy_write(dev, B43_PHY_PWRDOWN, (b & 0xFFF8) | 0x0005); + b43_radio_set(dev, 0x0004, 0x0004); + for (i = 0x10; i <= 0x20; i++) { + b43_radio_write16(dev, 0x0013, i); + curr_s = b43_phy_read(dev, B43_PHY_OTABLEQ) & 0x00FF; + if (!curr_s) { + best_s = 0x0000; + break; + } else if (curr_s >= 0x0080) + curr_s = 0x0100 - curr_s; + if (curr_s < best_s) + best_s = curr_s; + } + b43_phy_write(dev, B43_PHY_PWRDOWN, b); + b43_radio_mask(dev, 0x0004, 0xFFFB); + b43_radio_write16(dev, 0x0013, best_s); + b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1_R1, 0, 0xFFEC); + b43_phy_write(dev, B43_PHY_OFDM(0xB7), 0x1E80); + b43_phy_write(dev, B43_PHY_OFDM(0xB6), 0x1C00); + b43_phy_write(dev, B43_PHY_OFDM(0xB5), 0x0EC0); + b43_phy_write(dev, B43_PHY_OFDM(0xB2), 0x00C0); + b43_phy_write(dev, B43_PHY_OFDM(0xB9), 0x1FFF); + b43_phy_maskset(dev, B43_PHY_OFDM(0xBB), 0xF000, 0x0053); + b43_phy_maskset(dev, B43_PHY_OFDM61, 0xFE1F, 0x0120); + b43_phy_maskset(dev, B43_PHY_OFDM(0x13), 0x0FFF, 0x3000); + b43_phy_maskset(dev, B43_PHY_OFDM(0x14), 0x0FFF, 0x3000); + b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 6, 0x0017); + for (i = 0; i < 6; i++) + b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, i, 0x000F); + b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 0x0D, 0x000E); + b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 0x0E, 0x0011); + b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 0x0F, 0x0013); + b43_phy_write(dev, B43_PHY_OFDM(0x33), 0x5030); + b43_phy_set(dev, B43_PHY_CRS0, B43_PHY_CRS0_EN); +} + +static void hardware_pctl_init_aphy(struct b43_wldev *dev) +{ + //TODO +} + +void b43_phy_inita(struct b43_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + struct b43_phy *phy = &dev->phy; + + /* This lowlevel A-PHY init is also called from G-PHY init. + * So we must not access phy->a, if called from G-PHY code. + */ + B43_WARN_ON((phy->type != B43_PHYTYPE_A) && + (phy->type != B43_PHYTYPE_G)); + + might_sleep(); + + if (phy->rev >= 6) { + if (phy->type == B43_PHYTYPE_A) + b43_phy_mask(dev, B43_PHY_OFDM(0x1B), ~0x1000); + if (b43_phy_read(dev, B43_PHY_ENCORE) & B43_PHY_ENCORE_EN) + b43_phy_set(dev, B43_PHY_ENCORE, 0x0010); + else + b43_phy_mask(dev, B43_PHY_ENCORE, ~0x1010); + } + + b43_wa_all(dev); + + if (phy->type == B43_PHYTYPE_A) { + if (phy->gmode && (phy->rev < 3)) + b43_phy_set(dev, 0x0034, 0x0001); + b43_phy_rssiagc(dev, 0); + + b43_phy_set(dev, B43_PHY_CRS0, B43_PHY_CRS0_EN); + + b43_radio_init2060(dev); + + if ((bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) && + ((bus->boardinfo.type == SSB_BOARD_BU4306) || + (bus->boardinfo.type == SSB_BOARD_BU4309))) { + ; //TODO: A PHY LO + } + + if (phy->rev >= 3) + b43_phy_ww(dev); + + hardware_pctl_init_aphy(dev); + + //TODO: radar detection + } + + if ((phy->type == B43_PHYTYPE_G) && + (dev->dev->bus->sprom.boardflags_lo & B43_BFL_PACTRL)) { + b43_phy_maskset(dev, B43_PHY_OFDM(0x6E), 0xE000, 0x3CF); + } +} + +/* Initialise the TSSI->dBm lookup table */ +static int b43_aphy_init_tssi2dbm_table(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_a *aphy = phy->a; + s16 pab0, pab1, pab2; + + pab0 = (s16) (dev->dev->bus->sprom.pa1b0); + pab1 = (s16) (dev->dev->bus->sprom.pa1b1); + pab2 = (s16) (dev->dev->bus->sprom.pa1b2); + + if (pab0 != 0 && pab1 != 0 && pab2 != 0 && + pab0 != -1 && pab1 != -1 && pab2 != -1) { + /* The pabX values are set in SPROM. Use them. */ + if ((s8) dev->dev->bus->sprom.itssi_a != 0 && + (s8) dev->dev->bus->sprom.itssi_a != -1) + aphy->tgt_idle_tssi = + (s8) (dev->dev->bus->sprom.itssi_a); + else + aphy->tgt_idle_tssi = 62; + aphy->tssi2dbm = b43_generate_dyn_tssi2dbm_tab(dev, pab0, + pab1, pab2); + if (!aphy->tssi2dbm) + return -ENOMEM; + } else { + /* pabX values not set in SPROM, + * but APHY needs a generated table. */ + aphy->tssi2dbm = NULL; + b43err(dev->wl, "Could not generate tssi2dBm " + "table (wrong SPROM info)!\n"); + return -ENODEV; + } + + return 0; +} + +static int b43_aphy_op_allocate(struct b43_wldev *dev) +{ + struct b43_phy_a *aphy; + int err; + + aphy = kzalloc(sizeof(*aphy), GFP_KERNEL); + if (!aphy) + return -ENOMEM; + dev->phy.a = aphy; + + err = b43_aphy_init_tssi2dbm_table(dev); + if (err) + goto err_free_aphy; + + return 0; + +err_free_aphy: + kfree(aphy); + dev->phy.a = NULL; + + return err; +} + +static void b43_aphy_op_prepare_structs(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_a *aphy = phy->a; + const void *tssi2dbm; + int tgt_idle_tssi; + + /* tssi2dbm table is constant, so it is initialized at alloc time. + * Save a copy of the pointer. */ + tssi2dbm = aphy->tssi2dbm; + tgt_idle_tssi = aphy->tgt_idle_tssi; + + /* Zero out the whole PHY structure. */ + memset(aphy, 0, sizeof(*aphy)); + + aphy->tssi2dbm = tssi2dbm; + aphy->tgt_idle_tssi = tgt_idle_tssi; + + //TODO init struct b43_phy_a + +} + +static void b43_aphy_op_free(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_a *aphy = phy->a; + + kfree(aphy->tssi2dbm); + aphy->tssi2dbm = NULL; + + kfree(aphy); + dev->phy.a = NULL; +} + +static int b43_aphy_op_init(struct b43_wldev *dev) +{ + b43_phy_inita(dev); + + return 0; +} + +static inline u16 adjust_phyreg(struct b43_wldev *dev, u16 offset) +{ + /* OFDM registers are base-registers for the A-PHY. */ + if ((offset & B43_PHYROUTE) == B43_PHYROUTE_OFDM_GPHY) { + offset &= ~B43_PHYROUTE; + offset |= B43_PHYROUTE_BASE; + } + +#if B43_DEBUG + if ((offset & B43_PHYROUTE) == B43_PHYROUTE_EXT_GPHY) { + /* Ext-G registers are only available on G-PHYs */ + b43err(dev->wl, "Invalid EXT-G PHY access at " + "0x%04X on A-PHY\n", offset); + dump_stack(); + } + if ((offset & B43_PHYROUTE) == B43_PHYROUTE_N_BMODE) { + /* N-BMODE registers are only available on N-PHYs */ + b43err(dev->wl, "Invalid N-BMODE PHY access at " + "0x%04X on A-PHY\n", offset); + dump_stack(); + } +#endif /* B43_DEBUG */ + + return offset; +} + +static u16 b43_aphy_op_read(struct b43_wldev *dev, u16 reg) +{ + reg = adjust_phyreg(dev, reg); + b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); + return b43_read16(dev, B43_MMIO_PHY_DATA); +} + +static void b43_aphy_op_write(struct b43_wldev *dev, u16 reg, u16 value) +{ + reg = adjust_phyreg(dev, reg); + b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); + b43_write16(dev, B43_MMIO_PHY_DATA, value); +} + +static u16 b43_aphy_op_radio_read(struct b43_wldev *dev, u16 reg) +{ + /* Register 1 is a 32-bit register. */ + B43_WARN_ON(reg == 1); + /* A-PHY needs 0x40 for read access */ + reg |= 0x40; + + b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg); + return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW); +} + +static void b43_aphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value) +{ + /* Register 1 is a 32-bit register. */ + B43_WARN_ON(reg == 1); + + b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg); + b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value); +} + +static bool b43_aphy_op_supports_hwpctl(struct b43_wldev *dev) +{ + return (dev->phy.rev >= 5); +} + +static void b43_aphy_op_software_rfkill(struct b43_wldev *dev, + bool blocked) +{ + struct b43_phy *phy = &dev->phy; + + if (!blocked) { + if (phy->radio_on) + return; + b43_radio_write16(dev, 0x0004, 0x00C0); + b43_radio_write16(dev, 0x0005, 0x0008); + b43_phy_mask(dev, 0x0010, 0xFFF7); + b43_phy_mask(dev, 0x0011, 0xFFF7); + b43_radio_init2060(dev); + } else { + b43_radio_write16(dev, 0x0004, 0x00FF); + b43_radio_write16(dev, 0x0005, 0x00FB); + b43_phy_set(dev, 0x0010, 0x0008); + b43_phy_set(dev, 0x0011, 0x0008); + } +} + +static int b43_aphy_op_switch_channel(struct b43_wldev *dev, + unsigned int new_channel) +{ + if (new_channel > 200) + return -EINVAL; + aphy_channel_switch(dev, new_channel); + + return 0; +} + +static unsigned int b43_aphy_op_get_default_chan(struct b43_wldev *dev) +{ + return 36; /* Default to channel 36 */ +} + +static void b43_aphy_op_set_rx_antenna(struct b43_wldev *dev, int antenna) +{//TODO + struct b43_phy *phy = &dev->phy; + u16 tmp; + int autodiv = 0; + + if (antenna == B43_ANTENNA_AUTO0 || antenna == B43_ANTENNA_AUTO1) + autodiv = 1; + + b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_ANTDIVHELP); + + b43_phy_maskset(dev, B43_PHY_BBANDCFG, ~B43_PHY_BBANDCFG_RXANT, + (autodiv ? B43_ANTENNA_AUTO1 : antenna) << + B43_PHY_BBANDCFG_RXANT_SHIFT); + + if (autodiv) { + tmp = b43_phy_read(dev, B43_PHY_ANTDWELL); + if (antenna == B43_ANTENNA_AUTO1) + tmp &= ~B43_PHY_ANTDWELL_AUTODIV1; + else + tmp |= B43_PHY_ANTDWELL_AUTODIV1; + b43_phy_write(dev, B43_PHY_ANTDWELL, tmp); + } + if (phy->rev < 3) + b43_phy_maskset(dev, B43_PHY_ANTDWELL, 0xFF00, 0x24); + else { + b43_phy_set(dev, B43_PHY_OFDM61, 0x10); + if (phy->rev == 3) { + b43_phy_write(dev, B43_PHY_CLIPPWRDOWNT, 0x1D); + b43_phy_write(dev, B43_PHY_ADIVRELATED, 8); + } else { + b43_phy_write(dev, B43_PHY_CLIPPWRDOWNT, 0x3A); + b43_phy_maskset(dev, B43_PHY_ADIVRELATED, 0xFF00, 8); + } + } + + b43_hf_write(dev, b43_hf_read(dev) | B43_HF_ANTDIVHELP); +} + +static void b43_aphy_op_adjust_txpower(struct b43_wldev *dev) +{//TODO +} + +static enum b43_txpwr_result b43_aphy_op_recalc_txpower(struct b43_wldev *dev, + bool ignore_tssi) +{//TODO + return B43_TXPWR_RES_DONE; +} + +static void b43_aphy_op_pwork_15sec(struct b43_wldev *dev) +{//TODO +} + +static void b43_aphy_op_pwork_60sec(struct b43_wldev *dev) +{//TODO +} + +const struct b43_phy_operations b43_phyops_a = { + .allocate = b43_aphy_op_allocate, + .free = b43_aphy_op_free, + .prepare_structs = b43_aphy_op_prepare_structs, + .init = b43_aphy_op_init, + .phy_read = b43_aphy_op_read, + .phy_write = b43_aphy_op_write, + .radio_read = b43_aphy_op_radio_read, + .radio_write = b43_aphy_op_radio_write, + .supports_hwpctl = b43_aphy_op_supports_hwpctl, + .software_rfkill = b43_aphy_op_software_rfkill, + .switch_analog = b43_phyop_switch_analog_generic, + .switch_channel = b43_aphy_op_switch_channel, + .get_default_chan = b43_aphy_op_get_default_chan, + .set_rx_antenna = b43_aphy_op_set_rx_antenna, + .recalc_txpower = b43_aphy_op_recalc_txpower, + .adjust_txpower = b43_aphy_op_adjust_txpower, + .pwork_15sec = b43_aphy_op_pwork_15sec, + .pwork_60sec = b43_aphy_op_pwork_60sec, +}; diff --git a/drivers/net/wireless/b43/phy_a.h b/drivers/net/wireless/b43/phy_a.h new file mode 100644 index 0000000..5cfaab7 --- /dev/null +++ b/drivers/net/wireless/b43/phy_a.h @@ -0,0 +1,130 @@ +#ifndef LINUX_B43_PHY_A_H_ +#define LINUX_B43_PHY_A_H_ + +#include "phy_common.h" + + +/* OFDM (A) PHY Registers */ +#define B43_PHY_VERSION_OFDM B43_PHY_OFDM(0x00) /* Versioning register for A-PHY */ +#define B43_PHY_BBANDCFG B43_PHY_OFDM(0x01) /* Baseband config */ +#define B43_PHY_BBANDCFG_RXANT 0x180 /* RX Antenna selection */ +#define B43_PHY_BBANDCFG_RXANT_SHIFT 7 +#define B43_PHY_PWRDOWN B43_PHY_OFDM(0x03) /* Powerdown */ +#define B43_PHY_CRSTHRES1_R1 B43_PHY_OFDM(0x06) /* CRS Threshold 1 (phy.rev 1 only) */ +#define B43_PHY_LNAHPFCTL B43_PHY_OFDM(0x1C) /* LNA/HPF control */ +#define B43_PHY_LPFGAINCTL B43_PHY_OFDM(0x20) /* LPF Gain control */ +#define B43_PHY_ADIVRELATED B43_PHY_OFDM(0x27) /* FIXME rename */ +#define B43_PHY_CRS0 B43_PHY_OFDM(0x29) +#define B43_PHY_CRS0_EN 0x4000 +#define B43_PHY_PEAK_COUNT B43_PHY_OFDM(0x30) +#define B43_PHY_ANTDWELL B43_PHY_OFDM(0x2B) /* Antenna dwell */ +#define B43_PHY_ANTDWELL_AUTODIV1 0x0100 /* Automatic RX diversity start antenna */ +#define B43_PHY_ENCORE B43_PHY_OFDM(0x49) /* "Encore" (RangeMax / BroadRange) */ +#define B43_PHY_ENCORE_EN 0x0200 /* Encore enable */ +#define B43_PHY_LMS B43_PHY_OFDM(0x55) +#define B43_PHY_OFDM61 B43_PHY_OFDM(0x61) /* FIXME rename */ +#define B43_PHY_OFDM61_10 0x0010 /* FIXME rename */ +#define B43_PHY_IQBAL B43_PHY_OFDM(0x69) /* I/Q balance */ +#define B43_PHY_BBTXDC_BIAS B43_PHY_OFDM(0x6B) /* Baseband TX DC bias */ +#define B43_PHY_OTABLECTL B43_PHY_OFDM(0x72) /* OFDM table control (see below) */ +#define B43_PHY_OTABLEOFF 0x03FF /* OFDM table offset (see below) */ +#define B43_PHY_OTABLENR 0xFC00 /* OFDM table number (see below) */ +#define B43_PHY_OTABLENR_SHIFT 10 +#define B43_PHY_OTABLEI B43_PHY_OFDM(0x73) /* OFDM table data I */ +#define B43_PHY_OTABLEQ B43_PHY_OFDM(0x74) /* OFDM table data Q */ +#define B43_PHY_HPWR_TSSICTL B43_PHY_OFDM(0x78) /* Hardware power TSSI control */ +#define B43_PHY_ADCCTL B43_PHY_OFDM(0x7A) /* ADC control */ +#define B43_PHY_IDLE_TSSI B43_PHY_OFDM(0x7B) +#define B43_PHY_A_TEMP_SENSE B43_PHY_OFDM(0x7C) /* A PHY temperature sense */ +#define B43_PHY_NRSSITHRES B43_PHY_OFDM(0x8A) /* NRSSI threshold */ +#define B43_PHY_ANTWRSETT B43_PHY_OFDM(0x8C) /* Antenna WR settle */ +#define B43_PHY_ANTWRSETT_ARXDIV 0x2000 /* Automatic RX diversity enabled */ +#define B43_PHY_CLIPPWRDOWNT B43_PHY_OFDM(0x93) /* Clip powerdown threshold */ +#define B43_PHY_OFDM9B B43_PHY_OFDM(0x9B) /* FIXME rename */ +#define B43_PHY_N1P1GAIN B43_PHY_OFDM(0xA0) +#define B43_PHY_P1P2GAIN B43_PHY_OFDM(0xA1) +#define B43_PHY_N1N2GAIN B43_PHY_OFDM(0xA2) +#define B43_PHY_CLIPTHRES B43_PHY_OFDM(0xA3) +#define B43_PHY_CLIPN1P2THRES B43_PHY_OFDM(0xA4) +#define B43_PHY_CCKSHIFTBITS_WA B43_PHY_OFDM(0xA5) /* CCK shiftbits workaround, FIXME rename */ +#define B43_PHY_CCKSHIFTBITS B43_PHY_OFDM(0xA7) /* FIXME rename */ +#define B43_PHY_DIVSRCHIDX B43_PHY_OFDM(0xA8) /* Divider search gain/index */ +#define B43_PHY_CLIPP2THRES B43_PHY_OFDM(0xA9) +#define B43_PHY_CLIPP3THRES B43_PHY_OFDM(0xAA) +#define B43_PHY_DIVP1P2GAIN B43_PHY_OFDM(0xAB) +#define B43_PHY_DIVSRCHGAINBACK B43_PHY_OFDM(0xAD) /* Divider search gain back */ +#define B43_PHY_DIVSRCHGAINCHNG B43_PHY_OFDM(0xAE) /* Divider search gain change */ +#define B43_PHY_CRSTHRES1 B43_PHY_OFDM(0xC0) /* CRS Threshold 1 (phy.rev >= 2 only) */ +#define B43_PHY_CRSTHRES2 B43_PHY_OFDM(0xC1) /* CRS Threshold 2 (phy.rev >= 2 only) */ +#define B43_PHY_TSSIP_LTBASE B43_PHY_OFDM(0x380) /* TSSI power lookup table base */ +#define B43_PHY_DC_LTBASE B43_PHY_OFDM(0x3A0) /* DC lookup table base */ +#define B43_PHY_GAIN_LTBASE B43_PHY_OFDM(0x3C0) /* Gain lookup table base */ + +/*** OFDM table numbers ***/ +#define B43_OFDMTAB(number, offset) (((number) << B43_PHY_OTABLENR_SHIFT) | (offset)) +#define B43_OFDMTAB_AGC1 B43_OFDMTAB(0x00, 0) +#define B43_OFDMTAB_GAIN0 B43_OFDMTAB(0x00, 0) +#define B43_OFDMTAB_GAINX B43_OFDMTAB(0x01, 0) //TODO rename +#define B43_OFDMTAB_GAIN1 B43_OFDMTAB(0x01, 4) +#define B43_OFDMTAB_AGC3 B43_OFDMTAB(0x02, 0) +#define B43_OFDMTAB_GAIN2 B43_OFDMTAB(0x02, 3) +#define B43_OFDMTAB_LNAHPFGAIN1 B43_OFDMTAB(0x03, 0) +#define B43_OFDMTAB_WRSSI B43_OFDMTAB(0x04, 0) +#define B43_OFDMTAB_LNAHPFGAIN2 B43_OFDMTAB(0x04, 0) +#define B43_OFDMTAB_NOISESCALE B43_OFDMTAB(0x05, 0) +#define B43_OFDMTAB_AGC2 B43_OFDMTAB(0x06, 0) +#define B43_OFDMTAB_ROTOR B43_OFDMTAB(0x08, 0) +#define B43_OFDMTAB_ADVRETARD B43_OFDMTAB(0x09, 0) +#define B43_OFDMTAB_DAC B43_OFDMTAB(0x0C, 0) +#define B43_OFDMTAB_DC B43_OFDMTAB(0x0E, 7) +#define B43_OFDMTAB_PWRDYN2 B43_OFDMTAB(0x0E, 12) +#define B43_OFDMTAB_LNAGAIN B43_OFDMTAB(0x0E, 13) +#define B43_OFDMTAB_UNKNOWN_0F B43_OFDMTAB(0x0F, 0) //TODO rename +#define B43_OFDMTAB_UNKNOWN_APHY B43_OFDMTAB(0x0F, 7) //TODO rename +#define B43_OFDMTAB_LPFGAIN B43_OFDMTAB(0x0F, 12) +#define B43_OFDMTAB_RSSI B43_OFDMTAB(0x10, 0) +#define B43_OFDMTAB_UNKNOWN_11 B43_OFDMTAB(0x11, 4) //TODO rename +#define B43_OFDMTAB_AGC1_R1 B43_OFDMTAB(0x13, 0) +#define B43_OFDMTAB_GAINX_R1 B43_OFDMTAB(0x14, 0) //TODO remove! +#define B43_OFDMTAB_MINSIGSQ B43_OFDMTAB(0x14, 0) +#define B43_OFDMTAB_AGC3_R1 B43_OFDMTAB(0x15, 0) +#define B43_OFDMTAB_WRSSI_R1 B43_OFDMTAB(0x15, 4) +#define B43_OFDMTAB_TSSI B43_OFDMTAB(0x15, 0) +#define B43_OFDMTAB_DACRFPABB B43_OFDMTAB(0x16, 0) +#define B43_OFDMTAB_DACOFF B43_OFDMTAB(0x17, 0) +#define B43_OFDMTAB_DCBIAS B43_OFDMTAB(0x18, 0) + +u16 b43_ofdmtab_read16(struct b43_wldev *dev, u16 table, u16 offset); +void b43_ofdmtab_write16(struct b43_wldev *dev, u16 table, + u16 offset, u16 value); +u32 b43_ofdmtab_read32(struct b43_wldev *dev, u16 table, u16 offset); +void b43_ofdmtab_write32(struct b43_wldev *dev, u16 table, + u16 offset, u32 value); + + +struct b43_phy_a { + /* Pointer to the table used to convert a + * TSSI value to dBm-Q5.2 */ + const s8 *tssi2dbm; + /* Target idle TSSI */ + int tgt_idle_tssi; + /* Current idle TSSI */ + int cur_idle_tssi;//FIXME value currently not set + + /* A-PHY TX Power control value. */ + u16 txpwr_offset; + + //TODO lots of missing stuff +}; + +/** + * b43_phy_inita - Lowlevel A-PHY init routine. + * This is _only_ used by the G-PHY code. + */ +void b43_phy_inita(struct b43_wldev *dev); + + +struct b43_phy_operations; +extern const struct b43_phy_operations b43_phyops_a; + +#endif /* LINUX_B43_PHY_A_H_ */ diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c new file mode 100644 index 0000000..8f7d7ef --- /dev/null +++ b/drivers/net/wireless/b43/phy_common.c @@ -0,0 +1,468 @@ +/* + + Broadcom B43 wireless driver + Common PHY routines + + Copyright (c) 2005 Martin Langer , + Copyright (c) 2005-2007 Stefano Brivio + Copyright (c) 2005-2008 Michael Buesch + Copyright (c) 2005, 2006 Danny van Dyk + Copyright (c) 2005, 2006 Andreas Jaggi + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "phy_common.h" +#include "phy_g.h" +#include "phy_a.h" +#include "phy_n.h" +#include "phy_lp.h" +#include "b43.h" +#include "main.h" + + +int b43_phy_allocate(struct b43_wldev *dev) +{ + struct b43_phy *phy = &(dev->phy); + int err; + + phy->ops = NULL; + + switch (phy->type) { + case B43_PHYTYPE_A: + phy->ops = &b43_phyops_a; + break; + case B43_PHYTYPE_G: + phy->ops = &b43_phyops_g; + break; + case B43_PHYTYPE_N: +#ifdef CONFIG_B43_NPHY + phy->ops = &b43_phyops_n; +#endif + break; + case B43_PHYTYPE_LP: +#ifdef CONFIG_B43_PHY_LP + phy->ops = &b43_phyops_lp; +#endif + break; + } + if (B43_WARN_ON(!phy->ops)) + return -ENODEV; + + err = phy->ops->allocate(dev); + if (err) + phy->ops = NULL; + + return err; +} + +void b43_phy_free(struct b43_wldev *dev) +{ + dev->phy.ops->free(dev); + dev->phy.ops = NULL; +} + +int b43_phy_init(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + const struct b43_phy_operations *ops = phy->ops; + int err; + + phy->channel = ops->get_default_chan(dev); + + ops->software_rfkill(dev, false); + err = ops->init(dev); + if (err) { + b43err(dev->wl, "PHY init failed\n"); + goto err_block_rf; + } + /* Make sure to switch hardware and firmware (SHM) to + * the default channel. */ + err = b43_switch_channel(dev, ops->get_default_chan(dev)); + if (err) { + b43err(dev->wl, "PHY init: Channel switch to default failed\n"); + goto err_phy_exit; + } + + return 0; + +err_phy_exit: + if (ops->exit) + ops->exit(dev); +err_block_rf: + ops->software_rfkill(dev, true); + + return err; +} + +void b43_phy_exit(struct b43_wldev *dev) +{ + const struct b43_phy_operations *ops = dev->phy.ops; + + ops->software_rfkill(dev, true); + if (ops->exit) + ops->exit(dev); +} + +bool b43_has_hardware_pctl(struct b43_wldev *dev) +{ + if (!dev->phy.hardware_power_control) + return 0; + if (!dev->phy.ops->supports_hwpctl) + return 0; + return dev->phy.ops->supports_hwpctl(dev); +} + +void b43_radio_lock(struct b43_wldev *dev) +{ + u32 macctl; + +#if B43_DEBUG + B43_WARN_ON(dev->phy.radio_locked); + dev->phy.radio_locked = 1; +#endif + + macctl = b43_read32(dev, B43_MMIO_MACCTL); + macctl |= B43_MACCTL_RADIOLOCK; + b43_write32(dev, B43_MMIO_MACCTL, macctl); + /* Commit the write and wait for the firmware + * to finish any radio register access. */ + b43_read32(dev, B43_MMIO_MACCTL); + udelay(10); +} + +void b43_radio_unlock(struct b43_wldev *dev) +{ + u32 macctl; + +#if B43_DEBUG + B43_WARN_ON(!dev->phy.radio_locked); + dev->phy.radio_locked = 0; +#endif + + /* Commit any write */ + b43_read16(dev, B43_MMIO_PHY_VER); + /* unlock */ + macctl = b43_read32(dev, B43_MMIO_MACCTL); + macctl &= ~B43_MACCTL_RADIOLOCK; + b43_write32(dev, B43_MMIO_MACCTL, macctl); +} + +void b43_phy_lock(struct b43_wldev *dev) +{ +#if B43_DEBUG + B43_WARN_ON(dev->phy.phy_locked); + dev->phy.phy_locked = 1; +#endif + B43_WARN_ON(dev->dev->id.revision < 3); + + if (!b43_is_mode(dev->wl, NL80211_IFTYPE_AP)) + b43_power_saving_ctl_bits(dev, B43_PS_AWAKE); +} + +void b43_phy_unlock(struct b43_wldev *dev) +{ +#if B43_DEBUG + B43_WARN_ON(!dev->phy.phy_locked); + dev->phy.phy_locked = 0; +#endif + B43_WARN_ON(dev->dev->id.revision < 3); + + if (!b43_is_mode(dev->wl, NL80211_IFTYPE_AP)) + b43_power_saving_ctl_bits(dev, 0); +} + +static inline void assert_mac_suspended(struct b43_wldev *dev) +{ + if (!B43_DEBUG) + return; + if ((b43_status(dev) >= B43_STAT_INITIALIZED) && + (dev->mac_suspended <= 0)) { + b43dbg(dev->wl, "PHY/RADIO register access with " + "enabled MAC.\n"); + dump_stack(); + } +} + +u16 b43_radio_read(struct b43_wldev *dev, u16 reg) +{ + assert_mac_suspended(dev); + return dev->phy.ops->radio_read(dev, reg); +} + +void b43_radio_write(struct b43_wldev *dev, u16 reg, u16 value) +{ + assert_mac_suspended(dev); + dev->phy.ops->radio_write(dev, reg, value); +} + +void b43_radio_mask(struct b43_wldev *dev, u16 offset, u16 mask) +{ + b43_radio_write16(dev, offset, + b43_radio_read16(dev, offset) & mask); +} + +void b43_radio_set(struct b43_wldev *dev, u16 offset, u16 set) +{ + b43_radio_write16(dev, offset, + b43_radio_read16(dev, offset) | set); +} + +void b43_radio_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set) +{ + b43_radio_write16(dev, offset, + (b43_radio_read16(dev, offset) & mask) | set); +} + +u16 b43_phy_read(struct b43_wldev *dev, u16 reg) +{ + assert_mac_suspended(dev); + return dev->phy.ops->phy_read(dev, reg); +} + +void b43_phy_write(struct b43_wldev *dev, u16 reg, u16 value) +{ + assert_mac_suspended(dev); + dev->phy.ops->phy_write(dev, reg, value); +} + +void b43_phy_copy(struct b43_wldev *dev, u16 destreg, u16 srcreg) +{ + assert_mac_suspended(dev); + dev->phy.ops->phy_write(dev, destreg, + dev->phy.ops->phy_read(dev, srcreg)); +} + +void b43_phy_mask(struct b43_wldev *dev, u16 offset, u16 mask) +{ + if (dev->phy.ops->phy_maskset) { + assert_mac_suspended(dev); + dev->phy.ops->phy_maskset(dev, offset, mask, 0); + } else { + b43_phy_write(dev, offset, + b43_phy_read(dev, offset) & mask); + } +} + +void b43_phy_set(struct b43_wldev *dev, u16 offset, u16 set) +{ + if (dev->phy.ops->phy_maskset) { + assert_mac_suspended(dev); + dev->phy.ops->phy_maskset(dev, offset, 0xFFFF, set); + } else { + b43_phy_write(dev, offset, + b43_phy_read(dev, offset) | set); + } +} + +void b43_phy_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set) +{ + if (dev->phy.ops->phy_maskset) { + assert_mac_suspended(dev); + dev->phy.ops->phy_maskset(dev, offset, mask, set); + } else { + b43_phy_write(dev, offset, + (b43_phy_read(dev, offset) & mask) | set); + } +} + +int b43_switch_channel(struct b43_wldev *dev, unsigned int new_channel) +{ + struct b43_phy *phy = &(dev->phy); + u16 channelcookie, savedcookie; + int err; + + if (new_channel == B43_DEFAULT_CHANNEL) + new_channel = phy->ops->get_default_chan(dev); + + /* First we set the channel radio code to prevent the + * firmware from sending ghost packets. + */ + channelcookie = new_channel; + if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) + channelcookie |= 0x100; + //FIXME set 40Mhz flag if required + savedcookie = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_CHAN); + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_CHAN, channelcookie); + + /* Now try to switch the PHY hardware channel. */ + err = phy->ops->switch_channel(dev, new_channel); + if (err) + goto err_restore_cookie; + + dev->phy.channel = new_channel; + /* Wait for the radio to tune to the channel and stabilize. */ + msleep(8); + + return 0; + +err_restore_cookie: + b43_shm_write16(dev, B43_SHM_SHARED, + B43_SHM_SH_CHAN, savedcookie); + + return err; +} + +void b43_software_rfkill(struct b43_wldev *dev, bool blocked) +{ + struct b43_phy *phy = &dev->phy; + + b43_mac_suspend(dev); + phy->ops->software_rfkill(dev, blocked); + phy->radio_on = !blocked; + b43_mac_enable(dev); +} + +/** + * b43_phy_txpower_adjust_work - TX power workqueue. + * + * Workqueue for updating the TX power parameters in hardware. + */ +void b43_phy_txpower_adjust_work(struct work_struct *work) +{ + struct b43_wl *wl = container_of(work, struct b43_wl, + txpower_adjust_work); + struct b43_wldev *dev; + + mutex_lock(&wl->mutex); + dev = wl->current_dev; + + if (likely(dev && (b43_status(dev) >= B43_STAT_STARTED))) + dev->phy.ops->adjust_txpower(dev); + + mutex_unlock(&wl->mutex); +} + +void b43_phy_txpower_check(struct b43_wldev *dev, unsigned int flags) +{ + struct b43_phy *phy = &dev->phy; + unsigned long now = jiffies; + enum b43_txpwr_result result; + + if (!(flags & B43_TXPWR_IGNORE_TIME)) { + /* Check if it's time for a TXpower check. */ + if (time_before(now, phy->next_txpwr_check_time)) + return; /* Not yet */ + } + /* The next check will be needed in two seconds, or later. */ + phy->next_txpwr_check_time = round_jiffies(now + (HZ * 2)); + + if ((dev->dev->bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) && + (dev->dev->bus->boardinfo.type == SSB_BOARD_BU4306)) + return; /* No software txpower adjustment needed */ + + result = phy->ops->recalc_txpower(dev, !!(flags & B43_TXPWR_IGNORE_TSSI)); + if (result == B43_TXPWR_RES_DONE) + return; /* We are done. */ + B43_WARN_ON(result != B43_TXPWR_RES_NEED_ADJUST); + B43_WARN_ON(phy->ops->adjust_txpower == NULL); + + /* We must adjust the transmission power in hardware. + * Schedule b43_phy_txpower_adjust_work(). */ + ieee80211_queue_work(dev->wl->hw, &dev->wl->txpower_adjust_work); +} + +int b43_phy_shm_tssi_read(struct b43_wldev *dev, u16 shm_offset) +{ + const bool is_ofdm = (shm_offset != B43_SHM_SH_TSSI_CCK); + unsigned int a, b, c, d; + unsigned int average; + u32 tmp; + + tmp = b43_shm_read32(dev, B43_SHM_SHARED, shm_offset); + a = tmp & 0xFF; + b = (tmp >> 8) & 0xFF; + c = (tmp >> 16) & 0xFF; + d = (tmp >> 24) & 0xFF; + if (a == 0 || a == B43_TSSI_MAX || + b == 0 || b == B43_TSSI_MAX || + c == 0 || c == B43_TSSI_MAX || + d == 0 || d == B43_TSSI_MAX) + return -ENOENT; + /* The values are OK. Clear them. */ + tmp = B43_TSSI_MAX | (B43_TSSI_MAX << 8) | + (B43_TSSI_MAX << 16) | (B43_TSSI_MAX << 24); + b43_shm_write32(dev, B43_SHM_SHARED, shm_offset, tmp); + + if (is_ofdm) { + a = (a + 32) & 0x3F; + b = (b + 32) & 0x3F; + c = (c + 32) & 0x3F; + d = (d + 32) & 0x3F; + } + + /* Get the average of the values with 0.5 added to each value. */ + average = (a + b + c + d + 2) / 4; + if (is_ofdm) { + /* Adjust for CCK-boost */ + if (b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO) + & B43_HF_CCKBOOST) + average = (average >= 13) ? (average - 13) : 0; + } + + return average; +} + +void b43_phyop_switch_analog_generic(struct b43_wldev *dev, bool on) +{ + b43_write16(dev, B43_MMIO_PHY0, on ? 0 : 0xF4); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/Cordic */ +struct b43_c32 b43_cordic(int theta) +{ + u32 arctg[] = { 2949120, 1740967, 919879, 466945, 234379, 117304, + 58666, 29335, 14668, 7334, 3667, 1833, 917, 458, + 229, 115, 57, 29, }; + u8 i; + s32 tmp; + s8 signx = 1; + u32 angle = 0; + struct b43_c32 ret = { .i = 39797, .q = 0, }; + + while (theta > (180 << 16)) + theta -= (360 << 16); + while (theta < -(180 << 16)) + theta += (360 << 16); + + if (theta > (90 << 16)) { + theta -= (180 << 16); + signx = -1; + } else if (theta < -(90 << 16)) { + theta += (180 << 16); + signx = -1; + } + + for (i = 0; i <= 17; i++) { + if (theta > angle) { + tmp = ret.i - (ret.q >> i); + ret.q += ret.i >> i; + ret.i = tmp; + angle += arctg[i]; + } else { + tmp = ret.i + (ret.q >> i); + ret.q -= ret.i >> i; + ret.i = tmp; + angle -= arctg[i]; + } + } + + ret.i *= signx; + ret.q *= signx; + + return ret; +} diff --git a/drivers/net/wireless/b43/phy_common.h b/drivers/net/wireless/b43/phy_common.h new file mode 100644 index 0000000..1f58c0d --- /dev/null +++ b/drivers/net/wireless/b43/phy_common.h @@ -0,0 +1,435 @@ +#ifndef LINUX_B43_PHY_COMMON_H_ +#define LINUX_B43_PHY_COMMON_H_ + +#include +#if (LINUX_VERSION_CODE == KERNEL_VERSION(2,6,28)) +#include +#endif + +struct b43_wldev; + +/* Complex number using 2 32-bit signed integers */ +struct b43_c32 { s32 i, q; }; + +#define CORDIC_CONVERT(value) (((value) >= 0) ? \ + ((((value) >> 15) + 1) >> 1) : \ + -((((-(value)) >> 15) + 1) >> 1)) + +/* PHY register routing bits */ +#define B43_PHYROUTE 0x0C00 /* PHY register routing bits mask */ +#define B43_PHYROUTE_BASE 0x0000 /* Base registers */ +#define B43_PHYROUTE_OFDM_GPHY 0x0400 /* OFDM register routing for G-PHYs */ +#define B43_PHYROUTE_EXT_GPHY 0x0800 /* Extended G-PHY registers */ +#define B43_PHYROUTE_N_BMODE 0x0C00 /* N-PHY BMODE registers */ + +/* CCK (B-PHY) registers. */ +#define B43_PHY_CCK(reg) ((reg) | B43_PHYROUTE_BASE) +/* N-PHY registers. */ +#define B43_PHY_N(reg) ((reg) | B43_PHYROUTE_BASE) +/* N-PHY BMODE registers. */ +#define B43_PHY_N_BMODE(reg) ((reg) | B43_PHYROUTE_N_BMODE) +/* OFDM (A-PHY) registers. */ +#define B43_PHY_OFDM(reg) ((reg) | B43_PHYROUTE_OFDM_GPHY) +/* Extended G-PHY registers. */ +#define B43_PHY_EXTG(reg) ((reg) | B43_PHYROUTE_EXT_GPHY) + + +/* Masks for the PHY versioning registers. */ +#define B43_PHYVER_ANALOG 0xF000 +#define B43_PHYVER_ANALOG_SHIFT 12 +#define B43_PHYVER_TYPE 0x0F00 +#define B43_PHYVER_TYPE_SHIFT 8 +#define B43_PHYVER_VERSION 0x00FF + +/** + * enum b43_interference_mitigation - Interference Mitigation mode + * + * @B43_INTERFMODE_NONE: Disabled + * @B43_INTERFMODE_NONWLAN: Non-WLAN Interference Mitigation + * @B43_INTERFMODE_MANUALWLAN: WLAN Interference Mitigation + * @B43_INTERFMODE_AUTOWLAN: Automatic WLAN Interference Mitigation + */ +enum b43_interference_mitigation { + B43_INTERFMODE_NONE, + B43_INTERFMODE_NONWLAN, + B43_INTERFMODE_MANUALWLAN, + B43_INTERFMODE_AUTOWLAN, +}; + +/* Antenna identifiers */ +enum { + B43_ANTENNA0 = 0, /* Antenna 0 */ + B43_ANTENNA1 = 1, /* Antenna 1 */ + B43_ANTENNA_AUTO0 = 2, /* Automatic, starting with antenna 0 */ + B43_ANTENNA_AUTO1 = 3, /* Automatic, starting with antenna 1 */ + B43_ANTENNA2 = 4, + B43_ANTENNA3 = 8, + + B43_ANTENNA_AUTO = B43_ANTENNA_AUTO0, + B43_ANTENNA_DEFAULT = B43_ANTENNA_AUTO, +}; + +/** + * enum b43_txpwr_result - Return value for the recalc_txpower PHY op. + * + * @B43_TXPWR_RES_NEED_ADJUST: Values changed. Hardware adjustment is needed. + * @B43_TXPWR_RES_DONE: No more work to do. Everything is done. + */ +enum b43_txpwr_result { + B43_TXPWR_RES_NEED_ADJUST, + B43_TXPWR_RES_DONE, +}; + +/** + * struct b43_phy_operations - Function pointers for PHY ops. + * + * @allocate: Allocate and initialise the PHY data structures. + * Must not be NULL. + * @free: Destroy and free the PHY data structures. + * Must not be NULL. + * + * @prepare_structs: Prepare the PHY data structures. + * The data structures allocated in @allocate are + * initialized here. + * Must not be NULL. + * @prepare_hardware: Prepare the PHY. This is called before b43_chip_init to + * do some early early PHY hardware init. + * Can be NULL, if not required. + * @init: Initialize the PHY. + * Must not be NULL. + * @exit: Shutdown the PHY. + * Can be NULL, if not required. + * + * @phy_read: Read from a PHY register. + * Must not be NULL. + * @phy_write: Write to a PHY register. + * Must not be NULL. + * @phy_maskset: Maskset a PHY register, taking shortcuts. + * If it is NULL, a generic algorithm is used. + * @radio_read: Read from a Radio register. + * Must not be NULL. + * @radio_write: Write to a Radio register. + * Must not be NULL. + * + * @supports_hwpctl: Returns a boolean whether Hardware Power Control + * is supported or not. + * If NULL, hwpctl is assumed to be never supported. + * @software_rfkill: Turn the radio ON or OFF. + * Possible state values are + * RFKILL_STATE_SOFT_BLOCKED or + * RFKILL_STATE_UNBLOCKED + * Must not be NULL. + * @switch_analog: Turn the Analog on/off. + * Must not be NULL. + * @switch_channel: Switch the radio to another channel. + * Must not be NULL. + * @get_default_chan: Just returns the default channel number. + * Must not be NULL. + * @set_rx_antenna: Set the antenna used for RX. + * Can be NULL, if not supported. + * @interf_mitigation: Switch the Interference Mitigation mode. + * Can be NULL, if not supported. + * + * @recalc_txpower: Recalculate the transmission power parameters. + * This callback has to recalculate the TX power settings, + * but does not need to write them to the hardware, yet. + * Returns enum b43_txpwr_result to indicate whether the hardware + * needs to be adjusted. + * If B43_TXPWR_NEED_ADJUST is returned, @adjust_txpower + * will be called later. + * If the parameter "ignore_tssi" is true, the TSSI values should + * be ignored and a recalculation of the power settings should be + * done even if the TSSI values did not change. + * This function may sleep, but should not. + * Must not be NULL. + * @adjust_txpower: Write the previously calculated TX power settings + * (from @recalc_txpower) to the hardware. + * This function may sleep. + * Can be NULL, if (and ONLY if) @recalc_txpower _always_ + * returns B43_TXPWR_RES_DONE. + * + * @pwork_15sec: Periodic work. Called every 15 seconds. + * Can be NULL, if not required. + * @pwork_60sec: Periodic work. Called every 60 seconds. + * Can be NULL, if not required. + */ +struct b43_phy_operations { + /* Initialisation */ + int (*allocate)(struct b43_wldev *dev); + void (*free)(struct b43_wldev *dev); + void (*prepare_structs)(struct b43_wldev *dev); + int (*prepare_hardware)(struct b43_wldev *dev); + int (*init)(struct b43_wldev *dev); + void (*exit)(struct b43_wldev *dev); + + /* Register access */ + u16 (*phy_read)(struct b43_wldev *dev, u16 reg); + void (*phy_write)(struct b43_wldev *dev, u16 reg, u16 value); + void (*phy_maskset)(struct b43_wldev *dev, u16 reg, u16 mask, u16 set); + u16 (*radio_read)(struct b43_wldev *dev, u16 reg); + void (*radio_write)(struct b43_wldev *dev, u16 reg, u16 value); + + /* Radio */ + bool (*supports_hwpctl)(struct b43_wldev *dev); + void (*software_rfkill)(struct b43_wldev *dev, bool blocked); + void (*switch_analog)(struct b43_wldev *dev, bool on); + int (*switch_channel)(struct b43_wldev *dev, unsigned int new_channel); + unsigned int (*get_default_chan)(struct b43_wldev *dev); + void (*set_rx_antenna)(struct b43_wldev *dev, int antenna); + int (*interf_mitigation)(struct b43_wldev *dev, + enum b43_interference_mitigation new_mode); + + /* Transmission power adjustment */ + enum b43_txpwr_result (*recalc_txpower)(struct b43_wldev *dev, + bool ignore_tssi); + void (*adjust_txpower)(struct b43_wldev *dev); + + /* Misc */ + void (*pwork_15sec)(struct b43_wldev *dev); + void (*pwork_60sec)(struct b43_wldev *dev); +}; + +struct b43_phy_a; +struct b43_phy_g; +struct b43_phy_n; +struct b43_phy_lp; + +struct b43_phy { + /* Hardware operation callbacks. */ + const struct b43_phy_operations *ops; + + /* Most hardware context information is stored in the standard- + * specific data structures pointed to by the pointers below. + * Only one of them is valid (the currently enabled PHY). */ +#ifdef CONFIG_B43_DEBUG + /* No union for debug build to force NULL derefs in buggy code. */ + struct { +#else + union { +#endif + /* A-PHY specific information */ + struct b43_phy_a *a; + /* G-PHY specific information */ + struct b43_phy_g *g; + /* N-PHY specific information */ + struct b43_phy_n *n; + /* LP-PHY specific information */ + struct b43_phy_lp *lp; + }; + + /* Band support flags. */ + bool supports_2ghz; + bool supports_5ghz; + + /* HT info */ + bool is_40mhz; + + /* GMODE bit enabled? */ + bool gmode; + + /* Analog Type */ + u8 analog; + /* B43_PHYTYPE_ */ + u8 type; + /* PHY revision number. */ + u8 rev; + + /* Radio versioning */ + u16 radio_manuf; /* Radio manufacturer */ + u16 radio_ver; /* Radio version */ + u8 radio_rev; /* Radio revision */ + + /* Software state of the radio */ + bool radio_on; + + /* Desired TX power level (in dBm). + * This is set by the user and adjusted in b43_phy_xmitpower(). */ + int desired_txpower; + + /* Hardware Power Control enabled? */ + bool hardware_power_control; + + /* The time (in absolute jiffies) when the next TX power output + * check is needed. */ + unsigned long next_txpwr_check_time; + + /* current channel */ + unsigned int channel; + + /* PHY TX errors counter. */ + atomic_t txerr_cnt; + +#ifdef CONFIG_B43_DEBUG + /* PHY registers locked (w.r.t. firmware) */ + bool phy_locked; + /* Radio registers locked (w.r.t. firmware) */ + bool radio_locked; +#endif /* B43_DEBUG */ +}; + + +/** + * b43_phy_allocate - Allocate PHY structs + * Allocate the PHY data structures, based on the current dev->phy.type + */ +int b43_phy_allocate(struct b43_wldev *dev); + +/** + * b43_phy_free - Free PHY structs + */ +void b43_phy_free(struct b43_wldev *dev); + +/** + * b43_phy_init - Initialise the PHY + */ +int b43_phy_init(struct b43_wldev *dev); + +/** + * b43_phy_exit - Cleanup PHY + */ +void b43_phy_exit(struct b43_wldev *dev); + +/** + * b43_has_hardware_pctl - Hardware Power Control supported? + * Returns a boolean, whether hardware power control is supported. + */ +bool b43_has_hardware_pctl(struct b43_wldev *dev); + +/** + * b43_phy_read - 16bit PHY register read access + */ +u16 b43_phy_read(struct b43_wldev *dev, u16 reg); + +/** + * b43_phy_write - 16bit PHY register write access + */ +void b43_phy_write(struct b43_wldev *dev, u16 reg, u16 value); + +/** + * b43_phy_copy - copy contents of 16bit PHY register to another + */ +void b43_phy_copy(struct b43_wldev *dev, u16 destreg, u16 srcreg); + +/** + * b43_phy_mask - Mask a PHY register with a mask + */ +void b43_phy_mask(struct b43_wldev *dev, u16 offset, u16 mask); + +/** + * b43_phy_set - OR a PHY register with a bitmap + */ +void b43_phy_set(struct b43_wldev *dev, u16 offset, u16 set); + +/** + * b43_phy_maskset - Mask and OR a PHY register with a mask and bitmap + */ +void b43_phy_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set); + +/** + * b43_radio_read - 16bit Radio register read access + */ +u16 b43_radio_read(struct b43_wldev *dev, u16 reg); +#define b43_radio_read16 b43_radio_read /* DEPRECATED */ + +/** + * b43_radio_write - 16bit Radio register write access + */ +void b43_radio_write(struct b43_wldev *dev, u16 reg, u16 value); +#define b43_radio_write16 b43_radio_write /* DEPRECATED */ + +/** + * b43_radio_mask - Mask a 16bit radio register with a mask + */ +void b43_radio_mask(struct b43_wldev *dev, u16 offset, u16 mask); + +/** + * b43_radio_set - OR a 16bit radio register with a bitmap + */ +void b43_radio_set(struct b43_wldev *dev, u16 offset, u16 set); + +/** + * b43_radio_maskset - Mask and OR a radio register with a mask and bitmap + */ +void b43_radio_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set); + +/** + * b43_radio_lock - Lock firmware radio register access + */ +void b43_radio_lock(struct b43_wldev *dev); + +/** + * b43_radio_unlock - Unlock firmware radio register access + */ +void b43_radio_unlock(struct b43_wldev *dev); + +/** + * b43_phy_lock - Lock firmware PHY register access + */ +void b43_phy_lock(struct b43_wldev *dev); + +/** + * b43_phy_unlock - Unlock firmware PHY register access + */ +void b43_phy_unlock(struct b43_wldev *dev); + +/** + * b43_switch_channel - Switch to another channel + */ +int b43_switch_channel(struct b43_wldev *dev, unsigned int new_channel); +/** + * B43_DEFAULT_CHANNEL - Switch to the default channel. + */ +#define B43_DEFAULT_CHANNEL UINT_MAX + +/** + * b43_software_rfkill - Turn the radio ON or OFF in software. + */ +void b43_software_rfkill(struct b43_wldev *dev, bool blocked); + +/** + * b43_phy_txpower_check - Check TX power output. + * + * Compare the current TX power output to the desired power emission + * and schedule an adjustment in case it mismatches. + * + * @flags: OR'ed enum b43_phy_txpower_check_flags flags. + * See the docs below. + */ +void b43_phy_txpower_check(struct b43_wldev *dev, unsigned int flags); +/** + * enum b43_phy_txpower_check_flags - Flags for b43_phy_txpower_check() + * + * @B43_TXPWR_IGNORE_TIME: Ignore the schedule time and force-redo + * the check now. + * @B43_TXPWR_IGNORE_TSSI: Redo the recalculation, even if the average + * TSSI did not change. + */ +enum b43_phy_txpower_check_flags { + B43_TXPWR_IGNORE_TIME = (1 << 0), + B43_TXPWR_IGNORE_TSSI = (1 << 1), +}; + +struct work_struct; +void b43_phy_txpower_adjust_work(struct work_struct *work); + +/** + * b43_phy_shm_tssi_read - Read the average of the last 4 TSSI from SHM. + * + * @shm_offset: The SHM address to read the values from. + * + * Returns the average of the 4 TSSI values, or a negative error code. + */ +int b43_phy_shm_tssi_read(struct b43_wldev *dev, u16 shm_offset); + +/** + * b43_phy_switch_analog_generic - Generic PHY operation for switching the Analog. + * + * It does the switching based on the PHY0 core register. + * Do _not_ call this directly. Only use it as a switch_analog callback + * for struct b43_phy_operations. + */ +void b43_phyop_switch_analog_generic(struct b43_wldev *dev, bool on); + +struct b43_c32 b43_cordic(int theta); + +#endif /* LINUX_B43_PHY_COMMON_H_ */ diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c new file mode 100644 index 0000000..382826a --- /dev/null +++ b/drivers/net/wireless/b43/phy_g.c @@ -0,0 +1,3056 @@ +/* + + Broadcom B43 wireless driver + IEEE 802.11g PHY driver + + Copyright (c) 2005 Martin Langer , + Copyright (c) 2005-2007 Stefano Brivio + Copyright (c) 2005-2008 Michael Buesch + Copyright (c) 2005, 2006 Danny van Dyk + Copyright (c) 2005, 2006 Andreas Jaggi + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "b43.h" +#include "phy_g.h" +#include "phy_common.h" +#include "lo.h" +#include "main.h" + +#include + + +static const s8 b43_tssi2dbm_g_table[] = { + 77, 77, 77, 76, + 76, 76, 75, 75, + 74, 74, 73, 73, + 73, 72, 72, 71, + 71, 70, 70, 69, + 68, 68, 67, 67, + 66, 65, 65, 64, + 63, 63, 62, 61, + 60, 59, 58, 57, + 56, 55, 54, 53, + 52, 50, 49, 47, + 45, 43, 40, 37, + 33, 28, 22, 14, + 5, -7, -20, -20, + -20, -20, -20, -20, + -20, -20, -20, -20, +}; + +static const u8 b43_radio_channel_codes_bg[] = { + 12, 17, 22, 27, + 32, 37, 42, 47, + 52, 57, 62, 67, + 72, 84, +}; + + +static void b43_calc_nrssi_threshold(struct b43_wldev *dev); + + +#define bitrev4(tmp) (bitrev8(tmp) >> 4) + + +/* Get the freq, as it has to be written to the device. */ +static inline u16 channel2freq_bg(u8 channel) +{ + B43_WARN_ON(!(channel >= 1 && channel <= 14)); + + return b43_radio_channel_codes_bg[channel - 1]; +} + +static void generate_rfatt_list(struct b43_wldev *dev, + struct b43_rfatt_list *list) +{ + struct b43_phy *phy = &dev->phy; + + /* APHY.rev < 5 || GPHY.rev < 6 */ + static const struct b43_rfatt rfatt_0[] = { + {.att = 3,.with_padmix = 0,}, + {.att = 1,.with_padmix = 0,}, + {.att = 5,.with_padmix = 0,}, + {.att = 7,.with_padmix = 0,}, + {.att = 9,.with_padmix = 0,}, + {.att = 2,.with_padmix = 0,}, + {.att = 0,.with_padmix = 0,}, + {.att = 4,.with_padmix = 0,}, + {.att = 6,.with_padmix = 0,}, + {.att = 8,.with_padmix = 0,}, + {.att = 1,.with_padmix = 1,}, + {.att = 2,.with_padmix = 1,}, + {.att = 3,.with_padmix = 1,}, + {.att = 4,.with_padmix = 1,}, + }; + /* Radio.rev == 8 && Radio.version == 0x2050 */ + static const struct b43_rfatt rfatt_1[] = { + {.att = 2,.with_padmix = 1,}, + {.att = 4,.with_padmix = 1,}, + {.att = 6,.with_padmix = 1,}, + {.att = 8,.with_padmix = 1,}, + {.att = 10,.with_padmix = 1,}, + {.att = 12,.with_padmix = 1,}, + {.att = 14,.with_padmix = 1,}, + }; + /* Otherwise */ + static const struct b43_rfatt rfatt_2[] = { + {.att = 0,.with_padmix = 1,}, + {.att = 2,.with_padmix = 1,}, + {.att = 4,.with_padmix = 1,}, + {.att = 6,.with_padmix = 1,}, + {.att = 8,.with_padmix = 1,}, + {.att = 9,.with_padmix = 1,}, + {.att = 9,.with_padmix = 1,}, + }; + + if (!b43_has_hardware_pctl(dev)) { + /* Software pctl */ + list->list = rfatt_0; + list->len = ARRAY_SIZE(rfatt_0); + list->min_val = 0; + list->max_val = 9; + return; + } + if (phy->radio_ver == 0x2050 && phy->radio_rev == 8) { + /* Hardware pctl */ + list->list = rfatt_1; + list->len = ARRAY_SIZE(rfatt_1); + list->min_val = 0; + list->max_val = 14; + return; + } + /* Hardware pctl */ + list->list = rfatt_2; + list->len = ARRAY_SIZE(rfatt_2); + list->min_val = 0; + list->max_val = 9; +} + +static void generate_bbatt_list(struct b43_wldev *dev, + struct b43_bbatt_list *list) +{ + static const struct b43_bbatt bbatt_0[] = { + {.att = 0,}, + {.att = 1,}, + {.att = 2,}, + {.att = 3,}, + {.att = 4,}, + {.att = 5,}, + {.att = 6,}, + {.att = 7,}, + {.att = 8,}, + }; + + list->list = bbatt_0; + list->len = ARRAY_SIZE(bbatt_0); + list->min_val = 0; + list->max_val = 8; +} + +static void b43_shm_clear_tssi(struct b43_wldev *dev) +{ + b43_shm_write16(dev, B43_SHM_SHARED, 0x0058, 0x7F7F); + b43_shm_write16(dev, B43_SHM_SHARED, 0x005a, 0x7F7F); + b43_shm_write16(dev, B43_SHM_SHARED, 0x0070, 0x7F7F); + b43_shm_write16(dev, B43_SHM_SHARED, 0x0072, 0x7F7F); +} + +/* Synthetic PU workaround */ +static void b43_synth_pu_workaround(struct b43_wldev *dev, u8 channel) +{ + struct b43_phy *phy = &dev->phy; + + might_sleep(); + + if (phy->radio_ver != 0x2050 || phy->radio_rev >= 6) { + /* We do not need the workaround. */ + return; + } + + if (channel <= 10) { + b43_write16(dev, B43_MMIO_CHANNEL, + channel2freq_bg(channel + 4)); + } else { + b43_write16(dev, B43_MMIO_CHANNEL, channel2freq_bg(1)); + } + msleep(1); + b43_write16(dev, B43_MMIO_CHANNEL, channel2freq_bg(channel)); +} + +/* Set the baseband attenuation value on chip. */ +void b43_gphy_set_baseband_attenuation(struct b43_wldev *dev, + u16 baseband_attenuation) +{ + struct b43_phy *phy = &dev->phy; + + if (phy->analog == 0) { + b43_write16(dev, B43_MMIO_PHY0, (b43_read16(dev, B43_MMIO_PHY0) + & 0xFFF0) | + baseband_attenuation); + } else if (phy->analog > 1) { + b43_phy_maskset(dev, B43_PHY_DACCTL, 0xFFC3, (baseband_attenuation << 2)); + } else { + b43_phy_maskset(dev, B43_PHY_DACCTL, 0xFF87, (baseband_attenuation << 3)); + } +} + +/* Adjust the transmission power output (G-PHY) */ +static void b43_set_txpower_g(struct b43_wldev *dev, + const struct b43_bbatt *bbatt, + const struct b43_rfatt *rfatt, u8 tx_control) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_g *gphy = phy->g; + struct b43_txpower_lo_control *lo = gphy->lo_control; + u16 bb, rf; + u16 tx_bias, tx_magn; + + bb = bbatt->att; + rf = rfatt->att; + tx_bias = lo->tx_bias; + tx_magn = lo->tx_magn; + if (unlikely(tx_bias == 0xFF)) + tx_bias = 0; + + /* Save the values for later. Use memmove, because it's valid + * to pass &gphy->rfatt as rfatt pointer argument. Same for bbatt. */ + gphy->tx_control = tx_control; + memmove(&gphy->rfatt, rfatt, sizeof(*rfatt)); + gphy->rfatt.with_padmix = !!(tx_control & B43_TXCTL_TXMIX); + memmove(&gphy->bbatt, bbatt, sizeof(*bbatt)); + + if (b43_debug(dev, B43_DBG_XMITPOWER)) { + b43dbg(dev->wl, "Tuning TX-power to bbatt(%u), " + "rfatt(%u), tx_control(0x%02X), " + "tx_bias(0x%02X), tx_magn(0x%02X)\n", + bb, rf, tx_control, tx_bias, tx_magn); + } + + b43_gphy_set_baseband_attenuation(dev, bb); + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_RFATT, rf); + if (phy->radio_ver == 0x2050 && phy->radio_rev == 8) { + b43_radio_write16(dev, 0x43, + (rf & 0x000F) | (tx_control & 0x0070)); + } else { + b43_radio_maskset(dev, 0x43, 0xFFF0, (rf & 0x000F)); + b43_radio_maskset(dev, 0x52, ~0x0070, (tx_control & 0x0070)); + } + if (has_tx_magnification(phy)) { + b43_radio_write16(dev, 0x52, tx_magn | tx_bias); + } else { + b43_radio_maskset(dev, 0x52, 0xFFF0, (tx_bias & 0x000F)); + } + b43_lo_g_adjust(dev); +} + +/* GPHY_TSSI_Power_Lookup_Table_Init */ +static void b43_gphy_tssi_power_lt_init(struct b43_wldev *dev) +{ + struct b43_phy_g *gphy = dev->phy.g; + int i; + u16 value; + + for (i = 0; i < 32; i++) + b43_ofdmtab_write16(dev, 0x3C20, i, gphy->tssi2dbm[i]); + for (i = 32; i < 64; i++) + b43_ofdmtab_write16(dev, 0x3C00, i - 32, gphy->tssi2dbm[i]); + for (i = 0; i < 64; i += 2) { + value = (u16) gphy->tssi2dbm[i]; + value |= ((u16) gphy->tssi2dbm[i + 1]) << 8; + b43_phy_write(dev, 0x380 + (i / 2), value); + } +} + +/* GPHY_Gain_Lookup_Table_Init */ +static void b43_gphy_gain_lt_init(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_g *gphy = phy->g; + struct b43_txpower_lo_control *lo = gphy->lo_control; + u16 nr_written = 0; + u16 tmp; + u8 rf, bb; + + for (rf = 0; rf < lo->rfatt_list.len; rf++) { + for (bb = 0; bb < lo->bbatt_list.len; bb++) { + if (nr_written >= 0x40) + return; + tmp = lo->bbatt_list.list[bb].att; + tmp <<= 8; + if (phy->radio_rev == 8) + tmp |= 0x50; + else + tmp |= 0x40; + tmp |= lo->rfatt_list.list[rf].att; + b43_phy_write(dev, 0x3C0 + nr_written, tmp); + nr_written++; + } + } +} + +static void b43_set_all_gains(struct b43_wldev *dev, + s16 first, s16 second, s16 third) +{ + struct b43_phy *phy = &dev->phy; + u16 i; + u16 start = 0x08, end = 0x18; + u16 tmp; + u16 table; + + if (phy->rev <= 1) { + start = 0x10; + end = 0x20; + } + + table = B43_OFDMTAB_GAINX; + if (phy->rev <= 1) + table = B43_OFDMTAB_GAINX_R1; + for (i = 0; i < 4; i++) + b43_ofdmtab_write16(dev, table, i, first); + + for (i = start; i < end; i++) + b43_ofdmtab_write16(dev, table, i, second); + + if (third != -1) { + tmp = ((u16) third << 14) | ((u16) third << 6); + b43_phy_maskset(dev, 0x04A0, 0xBFBF, tmp); + b43_phy_maskset(dev, 0x04A1, 0xBFBF, tmp); + b43_phy_maskset(dev, 0x04A2, 0xBFBF, tmp); + } + b43_dummy_transmission(dev, false, true); +} + +static void b43_set_original_gains(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u16 i, tmp; + u16 table; + u16 start = 0x0008, end = 0x0018; + + if (phy->rev <= 1) { + start = 0x0010; + end = 0x0020; + } + + table = B43_OFDMTAB_GAINX; + if (phy->rev <= 1) + table = B43_OFDMTAB_GAINX_R1; + for (i = 0; i < 4; i++) { + tmp = (i & 0xFFFC); + tmp |= (i & 0x0001) << 1; + tmp |= (i & 0x0002) >> 1; + + b43_ofdmtab_write16(dev, table, i, tmp); + } + + for (i = start; i < end; i++) + b43_ofdmtab_write16(dev, table, i, i - start); + + b43_phy_maskset(dev, 0x04A0, 0xBFBF, 0x4040); + b43_phy_maskset(dev, 0x04A1, 0xBFBF, 0x4040); + b43_phy_maskset(dev, 0x04A2, 0xBFBF, 0x4000); + b43_dummy_transmission(dev, false, true); +} + +/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ +static void b43_nrssi_hw_write(struct b43_wldev *dev, u16 offset, s16 val) +{ + b43_phy_write(dev, B43_PHY_NRSSILT_CTRL, offset); + b43_phy_write(dev, B43_PHY_NRSSILT_DATA, (u16) val); +} + +/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ +static s16 b43_nrssi_hw_read(struct b43_wldev *dev, u16 offset) +{ + u16 val; + + b43_phy_write(dev, B43_PHY_NRSSILT_CTRL, offset); + val = b43_phy_read(dev, B43_PHY_NRSSILT_DATA); + + return (s16) val; +} + +/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ +static void b43_nrssi_hw_update(struct b43_wldev *dev, u16 val) +{ + u16 i; + s16 tmp; + + for (i = 0; i < 64; i++) { + tmp = b43_nrssi_hw_read(dev, i); + tmp -= val; + tmp = clamp_val(tmp, -32, 31); + b43_nrssi_hw_write(dev, i, tmp); + } +} + +/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ +static void b43_nrssi_mem_update(struct b43_wldev *dev) +{ + struct b43_phy_g *gphy = dev->phy.g; + s16 i, delta; + s32 tmp; + + delta = 0x1F - gphy->nrssi[0]; + for (i = 0; i < 64; i++) { + tmp = (i - delta) * gphy->nrssislope; + tmp /= 0x10000; + tmp += 0x3A; + tmp = clamp_val(tmp, 0, 0x3F); + gphy->nrssi_lt[i] = tmp; + } +} + +static void b43_calc_nrssi_offset(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u16 backup[20] = { 0 }; + s16 v47F; + u16 i; + u16 saved = 0xFFFF; + + backup[0] = b43_phy_read(dev, 0x0001); + backup[1] = b43_phy_read(dev, 0x0811); + backup[2] = b43_phy_read(dev, 0x0812); + if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */ + backup[3] = b43_phy_read(dev, 0x0814); + backup[4] = b43_phy_read(dev, 0x0815); + } + backup[5] = b43_phy_read(dev, 0x005A); + backup[6] = b43_phy_read(dev, 0x0059); + backup[7] = b43_phy_read(dev, 0x0058); + backup[8] = b43_phy_read(dev, 0x000A); + backup[9] = b43_phy_read(dev, 0x0003); + backup[10] = b43_radio_read16(dev, 0x007A); + backup[11] = b43_radio_read16(dev, 0x0043); + + b43_phy_mask(dev, 0x0429, 0x7FFF); + b43_phy_maskset(dev, 0x0001, 0x3FFF, 0x4000); + b43_phy_set(dev, 0x0811, 0x000C); + b43_phy_maskset(dev, 0x0812, 0xFFF3, 0x0004); + b43_phy_mask(dev, 0x0802, ~(0x1 | 0x2)); + if (phy->rev >= 6) { + backup[12] = b43_phy_read(dev, 0x002E); + backup[13] = b43_phy_read(dev, 0x002F); + backup[14] = b43_phy_read(dev, 0x080F); + backup[15] = b43_phy_read(dev, 0x0810); + backup[16] = b43_phy_read(dev, 0x0801); + backup[17] = b43_phy_read(dev, 0x0060); + backup[18] = b43_phy_read(dev, 0x0014); + backup[19] = b43_phy_read(dev, 0x0478); + + b43_phy_write(dev, 0x002E, 0); + b43_phy_write(dev, 0x002F, 0); + b43_phy_write(dev, 0x080F, 0); + b43_phy_write(dev, 0x0810, 0); + b43_phy_set(dev, 0x0478, 0x0100); + b43_phy_set(dev, 0x0801, 0x0040); + b43_phy_set(dev, 0x0060, 0x0040); + b43_phy_set(dev, 0x0014, 0x0200); + } + b43_radio_set(dev, 0x007A, 0x0070); + b43_radio_set(dev, 0x007A, 0x0080); + udelay(30); + + v47F = (s16) ((b43_phy_read(dev, 0x047F) >> 8) & 0x003F); + if (v47F >= 0x20) + v47F -= 0x40; + if (v47F == 31) { + for (i = 7; i >= 4; i--) { + b43_radio_write16(dev, 0x007B, i); + udelay(20); + v47F = + (s16) ((b43_phy_read(dev, 0x047F) >> 8) & 0x003F); + if (v47F >= 0x20) + v47F -= 0x40; + if (v47F < 31 && saved == 0xFFFF) + saved = i; + } + if (saved == 0xFFFF) + saved = 4; + } else { + b43_radio_mask(dev, 0x007A, 0x007F); + if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */ + b43_phy_set(dev, 0x0814, 0x0001); + b43_phy_mask(dev, 0x0815, 0xFFFE); + } + b43_phy_set(dev, 0x0811, 0x000C); + b43_phy_set(dev, 0x0812, 0x000C); + b43_phy_set(dev, 0x0811, 0x0030); + b43_phy_set(dev, 0x0812, 0x0030); + b43_phy_write(dev, 0x005A, 0x0480); + b43_phy_write(dev, 0x0059, 0x0810); + b43_phy_write(dev, 0x0058, 0x000D); + if (phy->rev == 0) { + b43_phy_write(dev, 0x0003, 0x0122); + } else { + b43_phy_set(dev, 0x000A, 0x2000); + } + if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */ + b43_phy_set(dev, 0x0814, 0x0004); + b43_phy_mask(dev, 0x0815, 0xFFFB); + } + b43_phy_maskset(dev, 0x0003, 0xFF9F, 0x0040); + b43_radio_set(dev, 0x007A, 0x000F); + b43_set_all_gains(dev, 3, 0, 1); + b43_radio_maskset(dev, 0x0043, 0x00F0, 0x000F); + udelay(30); + v47F = (s16) ((b43_phy_read(dev, 0x047F) >> 8) & 0x003F); + if (v47F >= 0x20) + v47F -= 0x40; + if (v47F == -32) { + for (i = 0; i < 4; i++) { + b43_radio_write16(dev, 0x007B, i); + udelay(20); + v47F = + (s16) ((b43_phy_read(dev, 0x047F) >> 8) & + 0x003F); + if (v47F >= 0x20) + v47F -= 0x40; + if (v47F > -31 && saved == 0xFFFF) + saved = i; + } + if (saved == 0xFFFF) + saved = 3; + } else + saved = 0; + } + b43_radio_write16(dev, 0x007B, saved); + + if (phy->rev >= 6) { + b43_phy_write(dev, 0x002E, backup[12]); + b43_phy_write(dev, 0x002F, backup[13]); + b43_phy_write(dev, 0x080F, backup[14]); + b43_phy_write(dev, 0x0810, backup[15]); + } + if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */ + b43_phy_write(dev, 0x0814, backup[3]); + b43_phy_write(dev, 0x0815, backup[4]); + } + b43_phy_write(dev, 0x005A, backup[5]); + b43_phy_write(dev, 0x0059, backup[6]); + b43_phy_write(dev, 0x0058, backup[7]); + b43_phy_write(dev, 0x000A, backup[8]); + b43_phy_write(dev, 0x0003, backup[9]); + b43_radio_write16(dev, 0x0043, backup[11]); + b43_radio_write16(dev, 0x007A, backup[10]); + b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) | 0x1 | 0x2); + b43_phy_set(dev, 0x0429, 0x8000); + b43_set_original_gains(dev); + if (phy->rev >= 6) { + b43_phy_write(dev, 0x0801, backup[16]); + b43_phy_write(dev, 0x0060, backup[17]); + b43_phy_write(dev, 0x0014, backup[18]); + b43_phy_write(dev, 0x0478, backup[19]); + } + b43_phy_write(dev, 0x0001, backup[0]); + b43_phy_write(dev, 0x0812, backup[2]); + b43_phy_write(dev, 0x0811, backup[1]); +} + +static void b43_calc_nrssi_slope(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_g *gphy = phy->g; + u16 backup[18] = { 0 }; + u16 tmp; + s16 nrssi0, nrssi1; + + B43_WARN_ON(phy->type != B43_PHYTYPE_G); + + if (phy->radio_rev >= 9) + return; + if (phy->radio_rev == 8) + b43_calc_nrssi_offset(dev); + + b43_phy_mask(dev, B43_PHY_G_CRS, 0x7FFF); + b43_phy_mask(dev, 0x0802, 0xFFFC); + backup[7] = b43_read16(dev, 0x03E2); + b43_write16(dev, 0x03E2, b43_read16(dev, 0x03E2) | 0x8000); + backup[0] = b43_radio_read16(dev, 0x007A); + backup[1] = b43_radio_read16(dev, 0x0052); + backup[2] = b43_radio_read16(dev, 0x0043); + backup[3] = b43_phy_read(dev, 0x0015); + backup[4] = b43_phy_read(dev, 0x005A); + backup[5] = b43_phy_read(dev, 0x0059); + backup[6] = b43_phy_read(dev, 0x0058); + backup[8] = b43_read16(dev, 0x03E6); + backup[9] = b43_read16(dev, B43_MMIO_CHANNEL_EXT); + if (phy->rev >= 3) { + backup[10] = b43_phy_read(dev, 0x002E); + backup[11] = b43_phy_read(dev, 0x002F); + backup[12] = b43_phy_read(dev, 0x080F); + backup[13] = b43_phy_read(dev, B43_PHY_G_LO_CONTROL); + backup[14] = b43_phy_read(dev, 0x0801); + backup[15] = b43_phy_read(dev, 0x0060); + backup[16] = b43_phy_read(dev, 0x0014); + backup[17] = b43_phy_read(dev, 0x0478); + b43_phy_write(dev, 0x002E, 0); + b43_phy_write(dev, B43_PHY_G_LO_CONTROL, 0); + switch (phy->rev) { + case 4: + case 6: + case 7: + b43_phy_set(dev, 0x0478, 0x0100); + b43_phy_set(dev, 0x0801, 0x0040); + break; + case 3: + case 5: + b43_phy_mask(dev, 0x0801, 0xFFBF); + break; + } + b43_phy_set(dev, 0x0060, 0x0040); + b43_phy_set(dev, 0x0014, 0x0200); + } + b43_radio_set(dev, 0x007A, 0x0070); + b43_set_all_gains(dev, 0, 8, 0); + b43_radio_mask(dev, 0x007A, 0x00F7); + if (phy->rev >= 2) { + b43_phy_maskset(dev, 0x0811, 0xFFCF, 0x0030); + b43_phy_maskset(dev, 0x0812, 0xFFCF, 0x0010); + } + b43_radio_set(dev, 0x007A, 0x0080); + udelay(20); + + nrssi0 = (s16) ((b43_phy_read(dev, 0x047F) >> 8) & 0x003F); + if (nrssi0 >= 0x0020) + nrssi0 -= 0x0040; + + b43_radio_mask(dev, 0x007A, 0x007F); + if (phy->rev >= 2) { + b43_phy_maskset(dev, 0x0003, 0xFF9F, 0x0040); + } + + b43_write16(dev, B43_MMIO_CHANNEL_EXT, + b43_read16(dev, B43_MMIO_CHANNEL_EXT) + | 0x2000); + b43_radio_set(dev, 0x007A, 0x000F); + b43_phy_write(dev, 0x0015, 0xF330); + if (phy->rev >= 2) { + b43_phy_maskset(dev, 0x0812, 0xFFCF, 0x0020); + b43_phy_maskset(dev, 0x0811, 0xFFCF, 0x0020); + } + + b43_set_all_gains(dev, 3, 0, 1); + if (phy->radio_rev == 8) { + b43_radio_write16(dev, 0x0043, 0x001F); + } else { + tmp = b43_radio_read16(dev, 0x0052) & 0xFF0F; + b43_radio_write16(dev, 0x0052, tmp | 0x0060); + tmp = b43_radio_read16(dev, 0x0043) & 0xFFF0; + b43_radio_write16(dev, 0x0043, tmp | 0x0009); + } + b43_phy_write(dev, 0x005A, 0x0480); + b43_phy_write(dev, 0x0059, 0x0810); + b43_phy_write(dev, 0x0058, 0x000D); + udelay(20); + nrssi1 = (s16) ((b43_phy_read(dev, 0x047F) >> 8) & 0x003F); + if (nrssi1 >= 0x0020) + nrssi1 -= 0x0040; + if (nrssi0 == nrssi1) + gphy->nrssislope = 0x00010000; + else + gphy->nrssislope = 0x00400000 / (nrssi0 - nrssi1); + if (nrssi0 >= -4) { + gphy->nrssi[0] = nrssi1; + gphy->nrssi[1] = nrssi0; + } + if (phy->rev >= 3) { + b43_phy_write(dev, 0x002E, backup[10]); + b43_phy_write(dev, 0x002F, backup[11]); + b43_phy_write(dev, 0x080F, backup[12]); + b43_phy_write(dev, B43_PHY_G_LO_CONTROL, backup[13]); + } + if (phy->rev >= 2) { + b43_phy_mask(dev, 0x0812, 0xFFCF); + b43_phy_mask(dev, 0x0811, 0xFFCF); + } + + b43_radio_write16(dev, 0x007A, backup[0]); + b43_radio_write16(dev, 0x0052, backup[1]); + b43_radio_write16(dev, 0x0043, backup[2]); + b43_write16(dev, 0x03E2, backup[7]); + b43_write16(dev, 0x03E6, backup[8]); + b43_write16(dev, B43_MMIO_CHANNEL_EXT, backup[9]); + b43_phy_write(dev, 0x0015, backup[3]); + b43_phy_write(dev, 0x005A, backup[4]); + b43_phy_write(dev, 0x0059, backup[5]); + b43_phy_write(dev, 0x0058, backup[6]); + b43_synth_pu_workaround(dev, phy->channel); + b43_phy_set(dev, 0x0802, (0x0001 | 0x0002)); + b43_set_original_gains(dev); + b43_phy_set(dev, B43_PHY_G_CRS, 0x8000); + if (phy->rev >= 3) { + b43_phy_write(dev, 0x0801, backup[14]); + b43_phy_write(dev, 0x0060, backup[15]); + b43_phy_write(dev, 0x0014, backup[16]); + b43_phy_write(dev, 0x0478, backup[17]); + } + b43_nrssi_mem_update(dev); + b43_calc_nrssi_threshold(dev); +} + +static void b43_calc_nrssi_threshold(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_g *gphy = phy->g; + s32 a, b; + s16 tmp16; + u16 tmp_u16; + + B43_WARN_ON(phy->type != B43_PHYTYPE_G); + + if (!phy->gmode || + !(dev->dev->bus->sprom.boardflags_lo & B43_BFL_RSSI)) { + tmp16 = b43_nrssi_hw_read(dev, 0x20); + if (tmp16 >= 0x20) + tmp16 -= 0x40; + if (tmp16 < 3) { + b43_phy_maskset(dev, 0x048A, 0xF000, 0x09EB); + } else { + b43_phy_maskset(dev, 0x048A, 0xF000, 0x0AED); + } + } else { + if (gphy->interfmode == B43_INTERFMODE_NONWLAN) { + a = 0xE; + b = 0xA; + } else if (!gphy->aci_wlan_automatic && gphy->aci_enable) { + a = 0x13; + b = 0x12; + } else { + a = 0xE; + b = 0x11; + } + + a = a * (gphy->nrssi[1] - gphy->nrssi[0]); + a += (gphy->nrssi[0] << 6); + if (a < 32) + a += 31; + else + a += 32; + a = a >> 6; + a = clamp_val(a, -31, 31); + + b = b * (gphy->nrssi[1] - gphy->nrssi[0]); + b += (gphy->nrssi[0] << 6); + if (b < 32) + b += 31; + else + b += 32; + b = b >> 6; + b = clamp_val(b, -31, 31); + + tmp_u16 = b43_phy_read(dev, 0x048A) & 0xF000; + tmp_u16 |= ((u32) b & 0x0000003F); + tmp_u16 |= (((u32) a & 0x0000003F) << 6); + b43_phy_write(dev, 0x048A, tmp_u16); + } +} + +/* Stack implementation to save/restore values from the + * interference mitigation code. + * It is save to restore values in random order. + */ +static void _stack_save(u32 *_stackptr, size_t *stackidx, + u8 id, u16 offset, u16 value) +{ + u32 *stackptr = &(_stackptr[*stackidx]); + + B43_WARN_ON(offset & 0xF000); + B43_WARN_ON(id & 0xF0); + *stackptr = offset; + *stackptr |= ((u32) id) << 12; + *stackptr |= ((u32) value) << 16; + (*stackidx)++; + B43_WARN_ON(*stackidx >= B43_INTERFSTACK_SIZE); +} + +static u16 _stack_restore(u32 *stackptr, u8 id, u16 offset) +{ + size_t i; + + B43_WARN_ON(offset & 0xF000); + B43_WARN_ON(id & 0xF0); + for (i = 0; i < B43_INTERFSTACK_SIZE; i++, stackptr++) { + if ((*stackptr & 0x00000FFF) != offset) + continue; + if (((*stackptr & 0x0000F000) >> 12) != id) + continue; + return ((*stackptr & 0xFFFF0000) >> 16); + } + B43_WARN_ON(1); + + return 0; +} + +#define phy_stacksave(offset) \ + do { \ + _stack_save(stack, &stackidx, 0x1, (offset), \ + b43_phy_read(dev, (offset))); \ + } while (0) +#define phy_stackrestore(offset) \ + do { \ + b43_phy_write(dev, (offset), \ + _stack_restore(stack, 0x1, \ + (offset))); \ + } while (0) +#define radio_stacksave(offset) \ + do { \ + _stack_save(stack, &stackidx, 0x2, (offset), \ + b43_radio_read16(dev, (offset))); \ + } while (0) +#define radio_stackrestore(offset) \ + do { \ + b43_radio_write16(dev, (offset), \ + _stack_restore(stack, 0x2, \ + (offset))); \ + } while (0) +#define ofdmtab_stacksave(table, offset) \ + do { \ + _stack_save(stack, &stackidx, 0x3, (offset)|(table), \ + b43_ofdmtab_read16(dev, (table), (offset))); \ + } while (0) +#define ofdmtab_stackrestore(table, offset) \ + do { \ + b43_ofdmtab_write16(dev, (table), (offset), \ + _stack_restore(stack, 0x3, \ + (offset)|(table))); \ + } while (0) + +static void +b43_radio_interference_mitigation_enable(struct b43_wldev *dev, int mode) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_g *gphy = phy->g; + u16 tmp, flipped; + size_t stackidx = 0; + u32 *stack = gphy->interfstack; + + switch (mode) { + case B43_INTERFMODE_NONWLAN: + if (phy->rev != 1) { + b43_phy_set(dev, 0x042B, 0x0800); + b43_phy_mask(dev, B43_PHY_G_CRS, ~0x4000); + break; + } + radio_stacksave(0x0078); + tmp = (b43_radio_read16(dev, 0x0078) & 0x001E); + B43_WARN_ON(tmp > 15); + flipped = bitrev4(tmp); + if (flipped < 10 && flipped >= 8) + flipped = 7; + else if (flipped >= 10) + flipped -= 3; + flipped = (bitrev4(flipped) << 1) | 0x0020; + b43_radio_write16(dev, 0x0078, flipped); + + b43_calc_nrssi_threshold(dev); + + phy_stacksave(0x0406); + b43_phy_write(dev, 0x0406, 0x7E28); + + b43_phy_set(dev, 0x042B, 0x0800); + b43_phy_set(dev, B43_PHY_RADIO_BITFIELD, 0x1000); + + phy_stacksave(0x04A0); + b43_phy_maskset(dev, 0x04A0, 0xC0C0, 0x0008); + phy_stacksave(0x04A1); + b43_phy_maskset(dev, 0x04A1, 0xC0C0, 0x0605); + phy_stacksave(0x04A2); + b43_phy_maskset(dev, 0x04A2, 0xC0C0, 0x0204); + phy_stacksave(0x04A8); + b43_phy_maskset(dev, 0x04A8, 0xC0C0, 0x0803); + phy_stacksave(0x04AB); + b43_phy_maskset(dev, 0x04AB, 0xC0C0, 0x0605); + + phy_stacksave(0x04A7); + b43_phy_write(dev, 0x04A7, 0x0002); + phy_stacksave(0x04A3); + b43_phy_write(dev, 0x04A3, 0x287A); + phy_stacksave(0x04A9); + b43_phy_write(dev, 0x04A9, 0x2027); + phy_stacksave(0x0493); + b43_phy_write(dev, 0x0493, 0x32F5); + phy_stacksave(0x04AA); + b43_phy_write(dev, 0x04AA, 0x2027); + phy_stacksave(0x04AC); + b43_phy_write(dev, 0x04AC, 0x32F5); + break; + case B43_INTERFMODE_MANUALWLAN: + if (b43_phy_read(dev, 0x0033) & 0x0800) + break; + + gphy->aci_enable = 1; + + phy_stacksave(B43_PHY_RADIO_BITFIELD); + phy_stacksave(B43_PHY_G_CRS); + if (phy->rev < 2) { + phy_stacksave(0x0406); + } else { + phy_stacksave(0x04C0); + phy_stacksave(0x04C1); + } + phy_stacksave(0x0033); + phy_stacksave(0x04A7); + phy_stacksave(0x04A3); + phy_stacksave(0x04A9); + phy_stacksave(0x04AA); + phy_stacksave(0x04AC); + phy_stacksave(0x0493); + phy_stacksave(0x04A1); + phy_stacksave(0x04A0); + phy_stacksave(0x04A2); + phy_stacksave(0x048A); + phy_stacksave(0x04A8); + phy_stacksave(0x04AB); + if (phy->rev == 2) { + phy_stacksave(0x04AD); + phy_stacksave(0x04AE); + } else if (phy->rev >= 3) { + phy_stacksave(0x04AD); + phy_stacksave(0x0415); + phy_stacksave(0x0416); + phy_stacksave(0x0417); + ofdmtab_stacksave(0x1A00, 0x2); + ofdmtab_stacksave(0x1A00, 0x3); + } + phy_stacksave(0x042B); + phy_stacksave(0x048C); + + b43_phy_mask(dev, B43_PHY_RADIO_BITFIELD, ~0x1000); + b43_phy_maskset(dev, B43_PHY_G_CRS, 0xFFFC, 0x0002); + + b43_phy_write(dev, 0x0033, 0x0800); + b43_phy_write(dev, 0x04A3, 0x2027); + b43_phy_write(dev, 0x04A9, 0x1CA8); + b43_phy_write(dev, 0x0493, 0x287A); + b43_phy_write(dev, 0x04AA, 0x1CA8); + b43_phy_write(dev, 0x04AC, 0x287A); + + b43_phy_maskset(dev, 0x04A0, 0xFFC0, 0x001A); + b43_phy_write(dev, 0x04A7, 0x000D); + + if (phy->rev < 2) { + b43_phy_write(dev, 0x0406, 0xFF0D); + } else if (phy->rev == 2) { + b43_phy_write(dev, 0x04C0, 0xFFFF); + b43_phy_write(dev, 0x04C1, 0x00A9); + } else { + b43_phy_write(dev, 0x04C0, 0x00C1); + b43_phy_write(dev, 0x04C1, 0x0059); + } + + b43_phy_maskset(dev, 0x04A1, 0xC0FF, 0x1800); + b43_phy_maskset(dev, 0x04A1, 0xFFC0, 0x0015); + b43_phy_maskset(dev, 0x04A8, 0xCFFF, 0x1000); + b43_phy_maskset(dev, 0x04A8, 0xF0FF, 0x0A00); + b43_phy_maskset(dev, 0x04AB, 0xCFFF, 0x1000); + b43_phy_maskset(dev, 0x04AB, 0xF0FF, 0x0800); + b43_phy_maskset(dev, 0x04AB, 0xFFCF, 0x0010); + b43_phy_maskset(dev, 0x04AB, 0xFFF0, 0x0005); + b43_phy_maskset(dev, 0x04A8, 0xFFCF, 0x0010); + b43_phy_maskset(dev, 0x04A8, 0xFFF0, 0x0006); + b43_phy_maskset(dev, 0x04A2, 0xF0FF, 0x0800); + b43_phy_maskset(dev, 0x04A0, 0xF0FF, 0x0500); + b43_phy_maskset(dev, 0x04A2, 0xFFF0, 0x000B); + + if (phy->rev >= 3) { + b43_phy_mask(dev, 0x048A, (u16)~0x8000); + b43_phy_maskset(dev, 0x0415, 0x8000, 0x36D8); + b43_phy_maskset(dev, 0x0416, 0x8000, 0x36D8); + b43_phy_maskset(dev, 0x0417, 0xFE00, 0x016D); + } else { + b43_phy_set(dev, 0x048A, 0x1000); + b43_phy_maskset(dev, 0x048A, 0x9FFF, 0x2000); + b43_hf_write(dev, b43_hf_read(dev) | B43_HF_ACIW); + } + if (phy->rev >= 2) { + b43_phy_set(dev, 0x042B, 0x0800); + } + b43_phy_maskset(dev, 0x048C, 0xF0FF, 0x0200); + if (phy->rev == 2) { + b43_phy_maskset(dev, 0x04AE, 0xFF00, 0x007F); + b43_phy_maskset(dev, 0x04AD, 0x00FF, 0x1300); + } else if (phy->rev >= 6) { + b43_ofdmtab_write16(dev, 0x1A00, 0x3, 0x007F); + b43_ofdmtab_write16(dev, 0x1A00, 0x2, 0x007F); + b43_phy_mask(dev, 0x04AD, 0x00FF); + } + b43_calc_nrssi_slope(dev); + break; + default: + B43_WARN_ON(1); + } +} + +static void +b43_radio_interference_mitigation_disable(struct b43_wldev *dev, int mode) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_g *gphy = phy->g; + u32 *stack = gphy->interfstack; + + switch (mode) { + case B43_INTERFMODE_NONWLAN: + if (phy->rev != 1) { + b43_phy_mask(dev, 0x042B, ~0x0800); + b43_phy_set(dev, B43_PHY_G_CRS, 0x4000); + break; + } + radio_stackrestore(0x0078); + b43_calc_nrssi_threshold(dev); + phy_stackrestore(0x0406); + b43_phy_mask(dev, 0x042B, ~0x0800); + if (!dev->bad_frames_preempt) { + b43_phy_mask(dev, B43_PHY_RADIO_BITFIELD, ~(1 << 11)); + } + b43_phy_set(dev, B43_PHY_G_CRS, 0x4000); + phy_stackrestore(0x04A0); + phy_stackrestore(0x04A1); + phy_stackrestore(0x04A2); + phy_stackrestore(0x04A8); + phy_stackrestore(0x04AB); + phy_stackrestore(0x04A7); + phy_stackrestore(0x04A3); + phy_stackrestore(0x04A9); + phy_stackrestore(0x0493); + phy_stackrestore(0x04AA); + phy_stackrestore(0x04AC); + break; + case B43_INTERFMODE_MANUALWLAN: + if (!(b43_phy_read(dev, 0x0033) & 0x0800)) + break; + + gphy->aci_enable = 0; + + phy_stackrestore(B43_PHY_RADIO_BITFIELD); + phy_stackrestore(B43_PHY_G_CRS); + phy_stackrestore(0x0033); + phy_stackrestore(0x04A3); + phy_stackrestore(0x04A9); + phy_stackrestore(0x0493); + phy_stackrestore(0x04AA); + phy_stackrestore(0x04AC); + phy_stackrestore(0x04A0); + phy_stackrestore(0x04A7); + if (phy->rev >= 2) { + phy_stackrestore(0x04C0); + phy_stackrestore(0x04C1); + } else + phy_stackrestore(0x0406); + phy_stackrestore(0x04A1); + phy_stackrestore(0x04AB); + phy_stackrestore(0x04A8); + if (phy->rev == 2) { + phy_stackrestore(0x04AD); + phy_stackrestore(0x04AE); + } else if (phy->rev >= 3) { + phy_stackrestore(0x04AD); + phy_stackrestore(0x0415); + phy_stackrestore(0x0416); + phy_stackrestore(0x0417); + ofdmtab_stackrestore(0x1A00, 0x2); + ofdmtab_stackrestore(0x1A00, 0x3); + } + phy_stackrestore(0x04A2); + phy_stackrestore(0x048A); + phy_stackrestore(0x042B); + phy_stackrestore(0x048C); + b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_ACIW); + b43_calc_nrssi_slope(dev); + break; + default: + B43_WARN_ON(1); + } +} + +#undef phy_stacksave +#undef phy_stackrestore +#undef radio_stacksave +#undef radio_stackrestore +#undef ofdmtab_stacksave +#undef ofdmtab_stackrestore + +static u16 b43_radio_core_calibration_value(struct b43_wldev *dev) +{ + u16 reg, index, ret; + + static const u8 rcc_table[] = { + 0x02, 0x03, 0x01, 0x0F, + 0x06, 0x07, 0x05, 0x0F, + 0x0A, 0x0B, 0x09, 0x0F, + 0x0E, 0x0F, 0x0D, 0x0F, + }; + + reg = b43_radio_read16(dev, 0x60); + index = (reg & 0x001E) >> 1; + ret = rcc_table[index] << 1; + ret |= (reg & 0x0001); + ret |= 0x0020; + + return ret; +} + +#define LPD(L, P, D) (((L) << 2) | ((P) << 1) | ((D) << 0)) +static u16 radio2050_rfover_val(struct b43_wldev *dev, + u16 phy_register, unsigned int lpd) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_g *gphy = phy->g; + struct ssb_sprom *sprom = &(dev->dev->bus->sprom); + + if (!phy->gmode) + return 0; + + if (has_loopback_gain(phy)) { + int max_lb_gain = gphy->max_lb_gain; + u16 extlna; + u16 i; + + if (phy->radio_rev == 8) + max_lb_gain += 0x3E; + else + max_lb_gain += 0x26; + if (max_lb_gain >= 0x46) { + extlna = 0x3000; + max_lb_gain -= 0x46; + } else if (max_lb_gain >= 0x3A) { + extlna = 0x1000; + max_lb_gain -= 0x3A; + } else if (max_lb_gain >= 0x2E) { + extlna = 0x2000; + max_lb_gain -= 0x2E; + } else { + extlna = 0; + max_lb_gain -= 0x10; + } + + for (i = 0; i < 16; i++) { + max_lb_gain -= (i * 6); + if (max_lb_gain < 6) + break; + } + + if ((phy->rev < 7) || + !(sprom->boardflags_lo & B43_BFL_EXTLNA)) { + if (phy_register == B43_PHY_RFOVER) { + return 0x1B3; + } else if (phy_register == B43_PHY_RFOVERVAL) { + extlna |= (i << 8); + switch (lpd) { + case LPD(0, 1, 1): + return 0x0F92; + case LPD(0, 0, 1): + case LPD(1, 0, 1): + return (0x0092 | extlna); + case LPD(1, 0, 0): + return (0x0093 | extlna); + } + B43_WARN_ON(1); + } + B43_WARN_ON(1); + } else { + if (phy_register == B43_PHY_RFOVER) { + return 0x9B3; + } else if (phy_register == B43_PHY_RFOVERVAL) { + if (extlna) + extlna |= 0x8000; + extlna |= (i << 8); + switch (lpd) { + case LPD(0, 1, 1): + return 0x8F92; + case LPD(0, 0, 1): + return (0x8092 | extlna); + case LPD(1, 0, 1): + return (0x2092 | extlna); + case LPD(1, 0, 0): + return (0x2093 | extlna); + } + B43_WARN_ON(1); + } + B43_WARN_ON(1); + } + } else { + if ((phy->rev < 7) || + !(sprom->boardflags_lo & B43_BFL_EXTLNA)) { + if (phy_register == B43_PHY_RFOVER) { + return 0x1B3; + } else if (phy_register == B43_PHY_RFOVERVAL) { + switch (lpd) { + case LPD(0, 1, 1): + return 0x0FB2; + case LPD(0, 0, 1): + return 0x00B2; + case LPD(1, 0, 1): + return 0x30B2; + case LPD(1, 0, 0): + return 0x30B3; + } + B43_WARN_ON(1); + } + B43_WARN_ON(1); + } else { + if (phy_register == B43_PHY_RFOVER) { + return 0x9B3; + } else if (phy_register == B43_PHY_RFOVERVAL) { + switch (lpd) { + case LPD(0, 1, 1): + return 0x8FB2; + case LPD(0, 0, 1): + return 0x80B2; + case LPD(1, 0, 1): + return 0x20B2; + case LPD(1, 0, 0): + return 0x20B3; + } + B43_WARN_ON(1); + } + B43_WARN_ON(1); + } + } + return 0; +} + +struct init2050_saved_values { + /* Core registers */ + u16 reg_3EC; + u16 reg_3E6; + u16 reg_3F4; + /* Radio registers */ + u16 radio_43; + u16 radio_51; + u16 radio_52; + /* PHY registers */ + u16 phy_pgactl; + u16 phy_cck_5A; + u16 phy_cck_59; + u16 phy_cck_58; + u16 phy_cck_30; + u16 phy_rfover; + u16 phy_rfoverval; + u16 phy_analogover; + u16 phy_analogoverval; + u16 phy_crs0; + u16 phy_classctl; + u16 phy_lo_mask; + u16 phy_lo_ctl; + u16 phy_syncctl; +}; + +static u16 b43_radio_init2050(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct init2050_saved_values sav; + u16 rcc; + u16 radio78; + u16 ret; + u16 i, j; + u32 tmp1 = 0, tmp2 = 0; + + memset(&sav, 0, sizeof(sav)); /* get rid of "may be used uninitialized..." */ + + sav.radio_43 = b43_radio_read16(dev, 0x43); + sav.radio_51 = b43_radio_read16(dev, 0x51); + sav.radio_52 = b43_radio_read16(dev, 0x52); + sav.phy_pgactl = b43_phy_read(dev, B43_PHY_PGACTL); + sav.phy_cck_5A = b43_phy_read(dev, B43_PHY_CCK(0x5A)); + sav.phy_cck_59 = b43_phy_read(dev, B43_PHY_CCK(0x59)); + sav.phy_cck_58 = b43_phy_read(dev, B43_PHY_CCK(0x58)); + + if (phy->type == B43_PHYTYPE_B) { + sav.phy_cck_30 = b43_phy_read(dev, B43_PHY_CCK(0x30)); + sav.reg_3EC = b43_read16(dev, 0x3EC); + + b43_phy_write(dev, B43_PHY_CCK(0x30), 0xFF); + b43_write16(dev, 0x3EC, 0x3F3F); + } else if (phy->gmode || phy->rev >= 2) { + sav.phy_rfover = b43_phy_read(dev, B43_PHY_RFOVER); + sav.phy_rfoverval = b43_phy_read(dev, B43_PHY_RFOVERVAL); + sav.phy_analogover = b43_phy_read(dev, B43_PHY_ANALOGOVER); + sav.phy_analogoverval = + b43_phy_read(dev, B43_PHY_ANALOGOVERVAL); + sav.phy_crs0 = b43_phy_read(dev, B43_PHY_CRS0); + sav.phy_classctl = b43_phy_read(dev, B43_PHY_CLASSCTL); + + b43_phy_set(dev, B43_PHY_ANALOGOVER, 0x0003); + b43_phy_mask(dev, B43_PHY_ANALOGOVERVAL, 0xFFFC); + b43_phy_mask(dev, B43_PHY_CRS0, 0x7FFF); + b43_phy_mask(dev, B43_PHY_CLASSCTL, 0xFFFC); + if (has_loopback_gain(phy)) { + sav.phy_lo_mask = b43_phy_read(dev, B43_PHY_LO_MASK); + sav.phy_lo_ctl = b43_phy_read(dev, B43_PHY_LO_CTL); + + if (phy->rev >= 3) + b43_phy_write(dev, B43_PHY_LO_MASK, 0xC020); + else + b43_phy_write(dev, B43_PHY_LO_MASK, 0x8020); + b43_phy_write(dev, B43_PHY_LO_CTL, 0); + } + + b43_phy_write(dev, B43_PHY_RFOVERVAL, + radio2050_rfover_val(dev, B43_PHY_RFOVERVAL, + LPD(0, 1, 1))); + b43_phy_write(dev, B43_PHY_RFOVER, + radio2050_rfover_val(dev, B43_PHY_RFOVER, 0)); + } + b43_write16(dev, 0x3E2, b43_read16(dev, 0x3E2) | 0x8000); + + sav.phy_syncctl = b43_phy_read(dev, B43_PHY_SYNCCTL); + b43_phy_mask(dev, B43_PHY_SYNCCTL, 0xFF7F); + sav.reg_3E6 = b43_read16(dev, 0x3E6); + sav.reg_3F4 = b43_read16(dev, 0x3F4); + + if (phy->analog == 0) { + b43_write16(dev, 0x03E6, 0x0122); + } else { + if (phy->analog >= 2) { + b43_phy_maskset(dev, B43_PHY_CCK(0x03), 0xFFBF, 0x40); + } + b43_write16(dev, B43_MMIO_CHANNEL_EXT, + (b43_read16(dev, B43_MMIO_CHANNEL_EXT) | 0x2000)); + } + + rcc = b43_radio_core_calibration_value(dev); + + if (phy->type == B43_PHYTYPE_B) + b43_radio_write16(dev, 0x78, 0x26); + if (phy->gmode || phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_RFOVERVAL, + radio2050_rfover_val(dev, B43_PHY_RFOVERVAL, + LPD(0, 1, 1))); + } + b43_phy_write(dev, B43_PHY_PGACTL, 0xBFAF); + b43_phy_write(dev, B43_PHY_CCK(0x2B), 0x1403); + if (phy->gmode || phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_RFOVERVAL, + radio2050_rfover_val(dev, B43_PHY_RFOVERVAL, + LPD(0, 0, 1))); + } + b43_phy_write(dev, B43_PHY_PGACTL, 0xBFA0); + b43_radio_set(dev, 0x51, 0x0004); + if (phy->radio_rev == 8) { + b43_radio_write16(dev, 0x43, 0x1F); + } else { + b43_radio_write16(dev, 0x52, 0); + b43_radio_maskset(dev, 0x43, 0xFFF0, 0x0009); + } + b43_phy_write(dev, B43_PHY_CCK(0x58), 0); + + for (i = 0; i < 16; i++) { + b43_phy_write(dev, B43_PHY_CCK(0x5A), 0x0480); + b43_phy_write(dev, B43_PHY_CCK(0x59), 0xC810); + b43_phy_write(dev, B43_PHY_CCK(0x58), 0x000D); + if (phy->gmode || phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_RFOVERVAL, + radio2050_rfover_val(dev, + B43_PHY_RFOVERVAL, + LPD(1, 0, 1))); + } + b43_phy_write(dev, B43_PHY_PGACTL, 0xAFB0); + udelay(10); + if (phy->gmode || phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_RFOVERVAL, + radio2050_rfover_val(dev, + B43_PHY_RFOVERVAL, + LPD(1, 0, 1))); + } + b43_phy_write(dev, B43_PHY_PGACTL, 0xEFB0); + udelay(10); + if (phy->gmode || phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_RFOVERVAL, + radio2050_rfover_val(dev, + B43_PHY_RFOVERVAL, + LPD(1, 0, 0))); + } + b43_phy_write(dev, B43_PHY_PGACTL, 0xFFF0); + udelay(20); + tmp1 += b43_phy_read(dev, B43_PHY_LO_LEAKAGE); + b43_phy_write(dev, B43_PHY_CCK(0x58), 0); + if (phy->gmode || phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_RFOVERVAL, + radio2050_rfover_val(dev, + B43_PHY_RFOVERVAL, + LPD(1, 0, 1))); + } + b43_phy_write(dev, B43_PHY_PGACTL, 0xAFB0); + } + udelay(10); + + b43_phy_write(dev, B43_PHY_CCK(0x58), 0); + tmp1++; + tmp1 >>= 9; + + for (i = 0; i < 16; i++) { + radio78 = (bitrev4(i) << 1) | 0x0020; + b43_radio_write16(dev, 0x78, radio78); + udelay(10); + for (j = 0; j < 16; j++) { + b43_phy_write(dev, B43_PHY_CCK(0x5A), 0x0D80); + b43_phy_write(dev, B43_PHY_CCK(0x59), 0xC810); + b43_phy_write(dev, B43_PHY_CCK(0x58), 0x000D); + if (phy->gmode || phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_RFOVERVAL, + radio2050_rfover_val(dev, + B43_PHY_RFOVERVAL, + LPD(1, 0, + 1))); + } + b43_phy_write(dev, B43_PHY_PGACTL, 0xAFB0); + udelay(10); + if (phy->gmode || phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_RFOVERVAL, + radio2050_rfover_val(dev, + B43_PHY_RFOVERVAL, + LPD(1, 0, + 1))); + } + b43_phy_write(dev, B43_PHY_PGACTL, 0xEFB0); + udelay(10); + if (phy->gmode || phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_RFOVERVAL, + radio2050_rfover_val(dev, + B43_PHY_RFOVERVAL, + LPD(1, 0, + 0))); + } + b43_phy_write(dev, B43_PHY_PGACTL, 0xFFF0); + udelay(10); + tmp2 += b43_phy_read(dev, B43_PHY_LO_LEAKAGE); + b43_phy_write(dev, B43_PHY_CCK(0x58), 0); + if (phy->gmode || phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_RFOVERVAL, + radio2050_rfover_val(dev, + B43_PHY_RFOVERVAL, + LPD(1, 0, + 1))); + } + b43_phy_write(dev, B43_PHY_PGACTL, 0xAFB0); + } + tmp2++; + tmp2 >>= 8; + if (tmp1 < tmp2) + break; + } + + /* Restore the registers */ + b43_phy_write(dev, B43_PHY_PGACTL, sav.phy_pgactl); + b43_radio_write16(dev, 0x51, sav.radio_51); + b43_radio_write16(dev, 0x52, sav.radio_52); + b43_radio_write16(dev, 0x43, sav.radio_43); + b43_phy_write(dev, B43_PHY_CCK(0x5A), sav.phy_cck_5A); + b43_phy_write(dev, B43_PHY_CCK(0x59), sav.phy_cck_59); + b43_phy_write(dev, B43_PHY_CCK(0x58), sav.phy_cck_58); + b43_write16(dev, 0x3E6, sav.reg_3E6); + if (phy->analog != 0) + b43_write16(dev, 0x3F4, sav.reg_3F4); + b43_phy_write(dev, B43_PHY_SYNCCTL, sav.phy_syncctl); + b43_synth_pu_workaround(dev, phy->channel); + if (phy->type == B43_PHYTYPE_B) { + b43_phy_write(dev, B43_PHY_CCK(0x30), sav.phy_cck_30); + b43_write16(dev, 0x3EC, sav.reg_3EC); + } else if (phy->gmode) { + b43_write16(dev, B43_MMIO_PHY_RADIO, + b43_read16(dev, B43_MMIO_PHY_RADIO) + & 0x7FFF); + b43_phy_write(dev, B43_PHY_RFOVER, sav.phy_rfover); + b43_phy_write(dev, B43_PHY_RFOVERVAL, sav.phy_rfoverval); + b43_phy_write(dev, B43_PHY_ANALOGOVER, sav.phy_analogover); + b43_phy_write(dev, B43_PHY_ANALOGOVERVAL, + sav.phy_analogoverval); + b43_phy_write(dev, B43_PHY_CRS0, sav.phy_crs0); + b43_phy_write(dev, B43_PHY_CLASSCTL, sav.phy_classctl); + if (has_loopback_gain(phy)) { + b43_phy_write(dev, B43_PHY_LO_MASK, sav.phy_lo_mask); + b43_phy_write(dev, B43_PHY_LO_CTL, sav.phy_lo_ctl); + } + } + if (i > 15) + ret = radio78; + else + ret = rcc; + + return ret; +} + +static void b43_phy_initb5(struct b43_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + struct b43_phy *phy = &dev->phy; + struct b43_phy_g *gphy = phy->g; + u16 offset, value; + u8 old_channel; + + if (phy->analog == 1) { + b43_radio_set(dev, 0x007A, 0x0050); + } + if ((bus->boardinfo.vendor != SSB_BOARDVENDOR_BCM) && + (bus->boardinfo.type != SSB_BOARD_BU4306)) { + value = 0x2120; + for (offset = 0x00A8; offset < 0x00C7; offset++) { + b43_phy_write(dev, offset, value); + value += 0x202; + } + } + b43_phy_maskset(dev, 0x0035, 0xF0FF, 0x0700); + if (phy->radio_ver == 0x2050) + b43_phy_write(dev, 0x0038, 0x0667); + + if (phy->gmode || phy->rev >= 2) { + if (phy->radio_ver == 0x2050) { + b43_radio_set(dev, 0x007A, 0x0020); + b43_radio_set(dev, 0x0051, 0x0004); + } + b43_write16(dev, B43_MMIO_PHY_RADIO, 0x0000); + + b43_phy_set(dev, 0x0802, 0x0100); + b43_phy_set(dev, 0x042B, 0x2000); + + b43_phy_write(dev, 0x001C, 0x186A); + + b43_phy_maskset(dev, 0x0013, 0x00FF, 0x1900); + b43_phy_maskset(dev, 0x0035, 0xFFC0, 0x0064); + b43_phy_maskset(dev, 0x005D, 0xFF80, 0x000A); + } + + if (dev->bad_frames_preempt) { + b43_phy_set(dev, B43_PHY_RADIO_BITFIELD, (1 << 11)); + } + + if (phy->analog == 1) { + b43_phy_write(dev, 0x0026, 0xCE00); + b43_phy_write(dev, 0x0021, 0x3763); + b43_phy_write(dev, 0x0022, 0x1BC3); + b43_phy_write(dev, 0x0023, 0x06F9); + b43_phy_write(dev, 0x0024, 0x037E); + } else + b43_phy_write(dev, 0x0026, 0xCC00); + b43_phy_write(dev, 0x0030, 0x00C6); + b43_write16(dev, 0x03EC, 0x3F22); + + if (phy->analog == 1) + b43_phy_write(dev, 0x0020, 0x3E1C); + else + b43_phy_write(dev, 0x0020, 0x301C); + + if (phy->analog == 0) + b43_write16(dev, 0x03E4, 0x3000); + + old_channel = phy->channel; + /* Force to channel 7, even if not supported. */ + b43_gphy_channel_switch(dev, 7, 0); + + if (phy->radio_ver != 0x2050) { + b43_radio_write16(dev, 0x0075, 0x0080); + b43_radio_write16(dev, 0x0079, 0x0081); + } + + b43_radio_write16(dev, 0x0050, 0x0020); + b43_radio_write16(dev, 0x0050, 0x0023); + + if (phy->radio_ver == 0x2050) { + b43_radio_write16(dev, 0x0050, 0x0020); + b43_radio_write16(dev, 0x005A, 0x0070); + } + + b43_radio_write16(dev, 0x005B, 0x007B); + b43_radio_write16(dev, 0x005C, 0x00B0); + + b43_radio_set(dev, 0x007A, 0x0007); + + b43_gphy_channel_switch(dev, old_channel, 0); + + b43_phy_write(dev, 0x0014, 0x0080); + b43_phy_write(dev, 0x0032, 0x00CA); + b43_phy_write(dev, 0x002A, 0x88A3); + + b43_set_txpower_g(dev, &gphy->bbatt, &gphy->rfatt, gphy->tx_control); + + if (phy->radio_ver == 0x2050) + b43_radio_write16(dev, 0x005D, 0x000D); + + b43_write16(dev, 0x03E4, (b43_read16(dev, 0x03E4) & 0xFFC0) | 0x0004); +} + +static void b43_phy_initb6(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_g *gphy = phy->g; + u16 offset, val; + u8 old_channel; + + b43_phy_write(dev, 0x003E, 0x817A); + b43_radio_write16(dev, 0x007A, + (b43_radio_read16(dev, 0x007A) | 0x0058)); + if (phy->radio_rev == 4 || phy->radio_rev == 5) { + b43_radio_write16(dev, 0x51, 0x37); + b43_radio_write16(dev, 0x52, 0x70); + b43_radio_write16(dev, 0x53, 0xB3); + b43_radio_write16(dev, 0x54, 0x9B); + b43_radio_write16(dev, 0x5A, 0x88); + b43_radio_write16(dev, 0x5B, 0x88); + b43_radio_write16(dev, 0x5D, 0x88); + b43_radio_write16(dev, 0x5E, 0x88); + b43_radio_write16(dev, 0x7D, 0x88); + b43_hf_write(dev, b43_hf_read(dev) + | B43_HF_TSSIRPSMW); + } + B43_WARN_ON(phy->radio_rev == 6 || phy->radio_rev == 7); /* We had code for these revs here... */ + if (phy->radio_rev == 8) { + b43_radio_write16(dev, 0x51, 0); + b43_radio_write16(dev, 0x52, 0x40); + b43_radio_write16(dev, 0x53, 0xB7); + b43_radio_write16(dev, 0x54, 0x98); + b43_radio_write16(dev, 0x5A, 0x88); + b43_radio_write16(dev, 0x5B, 0x6B); + b43_radio_write16(dev, 0x5C, 0x0F); + if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_ALTIQ) { + b43_radio_write16(dev, 0x5D, 0xFA); + b43_radio_write16(dev, 0x5E, 0xD8); + } else { + b43_radio_write16(dev, 0x5D, 0xF5); + b43_radio_write16(dev, 0x5E, 0xB8); + } + b43_radio_write16(dev, 0x0073, 0x0003); + b43_radio_write16(dev, 0x007D, 0x00A8); + b43_radio_write16(dev, 0x007C, 0x0001); + b43_radio_write16(dev, 0x007E, 0x0008); + } + val = 0x1E1F; + for (offset = 0x0088; offset < 0x0098; offset++) { + b43_phy_write(dev, offset, val); + val -= 0x0202; + } + val = 0x3E3F; + for (offset = 0x0098; offset < 0x00A8; offset++) { + b43_phy_write(dev, offset, val); + val -= 0x0202; + } + val = 0x2120; + for (offset = 0x00A8; offset < 0x00C8; offset++) { + b43_phy_write(dev, offset, (val & 0x3F3F)); + val += 0x0202; + } + if (phy->type == B43_PHYTYPE_G) { + b43_radio_set(dev, 0x007A, 0x0020); + b43_radio_set(dev, 0x0051, 0x0004); + b43_phy_set(dev, 0x0802, 0x0100); + b43_phy_set(dev, 0x042B, 0x2000); + b43_phy_write(dev, 0x5B, 0); + b43_phy_write(dev, 0x5C, 0); + } + + old_channel = phy->channel; + if (old_channel >= 8) + b43_gphy_channel_switch(dev, 1, 0); + else + b43_gphy_channel_switch(dev, 13, 0); + + b43_radio_write16(dev, 0x0050, 0x0020); + b43_radio_write16(dev, 0x0050, 0x0023); + udelay(40); + if (phy->radio_rev < 6 || phy->radio_rev == 8) { + b43_radio_write16(dev, 0x7C, (b43_radio_read16(dev, 0x7C) + | 0x0002)); + b43_radio_write16(dev, 0x50, 0x20); + } + if (phy->radio_rev <= 2) { + b43_radio_write16(dev, 0x7C, 0x20); + b43_radio_write16(dev, 0x5A, 0x70); + b43_radio_write16(dev, 0x5B, 0x7B); + b43_radio_write16(dev, 0x5C, 0xB0); + } + b43_radio_maskset(dev, 0x007A, 0x00F8, 0x0007); + + b43_gphy_channel_switch(dev, old_channel, 0); + + b43_phy_write(dev, 0x0014, 0x0200); + if (phy->radio_rev >= 6) + b43_phy_write(dev, 0x2A, 0x88C2); + else + b43_phy_write(dev, 0x2A, 0x8AC0); + b43_phy_write(dev, 0x0038, 0x0668); + b43_set_txpower_g(dev, &gphy->bbatt, &gphy->rfatt, gphy->tx_control); + if (phy->radio_rev <= 5) { + b43_phy_maskset(dev, 0x5D, 0xFF80, 0x0003); + } + if (phy->radio_rev <= 2) + b43_radio_write16(dev, 0x005D, 0x000D); + + if (phy->analog == 4) { + b43_write16(dev, 0x3E4, 9); + b43_phy_mask(dev, 0x61, 0x0FFF); + } else { + b43_phy_maskset(dev, 0x0002, 0xFFC0, 0x0004); + } + if (phy->type == B43_PHYTYPE_B) + B43_WARN_ON(1); + else if (phy->type == B43_PHYTYPE_G) + b43_write16(dev, 0x03E6, 0x0); +} + +static void b43_calc_loopback_gain(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_g *gphy = phy->g; + u16 backup_phy[16] = { 0 }; + u16 backup_radio[3]; + u16 backup_bband; + u16 i, j, loop_i_max; + u16 trsw_rx; + u16 loop1_outer_done, loop1_inner_done; + + backup_phy[0] = b43_phy_read(dev, B43_PHY_CRS0); + backup_phy[1] = b43_phy_read(dev, B43_PHY_CCKBBANDCFG); + backup_phy[2] = b43_phy_read(dev, B43_PHY_RFOVER); + backup_phy[3] = b43_phy_read(dev, B43_PHY_RFOVERVAL); + if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */ + backup_phy[4] = b43_phy_read(dev, B43_PHY_ANALOGOVER); + backup_phy[5] = b43_phy_read(dev, B43_PHY_ANALOGOVERVAL); + } + backup_phy[6] = b43_phy_read(dev, B43_PHY_CCK(0x5A)); + backup_phy[7] = b43_phy_read(dev, B43_PHY_CCK(0x59)); + backup_phy[8] = b43_phy_read(dev, B43_PHY_CCK(0x58)); + backup_phy[9] = b43_phy_read(dev, B43_PHY_CCK(0x0A)); + backup_phy[10] = b43_phy_read(dev, B43_PHY_CCK(0x03)); + backup_phy[11] = b43_phy_read(dev, B43_PHY_LO_MASK); + backup_phy[12] = b43_phy_read(dev, B43_PHY_LO_CTL); + backup_phy[13] = b43_phy_read(dev, B43_PHY_CCK(0x2B)); + backup_phy[14] = b43_phy_read(dev, B43_PHY_PGACTL); + backup_phy[15] = b43_phy_read(dev, B43_PHY_LO_LEAKAGE); + backup_bband = gphy->bbatt.att; + backup_radio[0] = b43_radio_read16(dev, 0x52); + backup_radio[1] = b43_radio_read16(dev, 0x43); + backup_radio[2] = b43_radio_read16(dev, 0x7A); + + b43_phy_mask(dev, B43_PHY_CRS0, 0x3FFF); + b43_phy_set(dev, B43_PHY_CCKBBANDCFG, 0x8000); + b43_phy_set(dev, B43_PHY_RFOVER, 0x0002); + b43_phy_mask(dev, B43_PHY_RFOVERVAL, 0xFFFD); + b43_phy_set(dev, B43_PHY_RFOVER, 0x0001); + b43_phy_mask(dev, B43_PHY_RFOVERVAL, 0xFFFE); + if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */ + b43_phy_set(dev, B43_PHY_ANALOGOVER, 0x0001); + b43_phy_mask(dev, B43_PHY_ANALOGOVERVAL, 0xFFFE); + b43_phy_set(dev, B43_PHY_ANALOGOVER, 0x0002); + b43_phy_mask(dev, B43_PHY_ANALOGOVERVAL, 0xFFFD); + } + b43_phy_set(dev, B43_PHY_RFOVER, 0x000C); + b43_phy_set(dev, B43_PHY_RFOVERVAL, 0x000C); + b43_phy_set(dev, B43_PHY_RFOVER, 0x0030); + b43_phy_maskset(dev, B43_PHY_RFOVERVAL, 0xFFCF, 0x10); + + b43_phy_write(dev, B43_PHY_CCK(0x5A), 0x0780); + b43_phy_write(dev, B43_PHY_CCK(0x59), 0xC810); + b43_phy_write(dev, B43_PHY_CCK(0x58), 0x000D); + + b43_phy_set(dev, B43_PHY_CCK(0x0A), 0x2000); + if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */ + b43_phy_set(dev, B43_PHY_ANALOGOVER, 0x0004); + b43_phy_mask(dev, B43_PHY_ANALOGOVERVAL, 0xFFFB); + } + b43_phy_maskset(dev, B43_PHY_CCK(0x03), 0xFF9F, 0x40); + + if (phy->radio_rev == 8) { + b43_radio_write16(dev, 0x43, 0x000F); + } else { + b43_radio_write16(dev, 0x52, 0); + b43_radio_maskset(dev, 0x43, 0xFFF0, 0x9); + } + b43_gphy_set_baseband_attenuation(dev, 11); + + if (phy->rev >= 3) + b43_phy_write(dev, B43_PHY_LO_MASK, 0xC020); + else + b43_phy_write(dev, B43_PHY_LO_MASK, 0x8020); + b43_phy_write(dev, B43_PHY_LO_CTL, 0); + + b43_phy_maskset(dev, B43_PHY_CCK(0x2B), 0xFFC0, 0x01); + b43_phy_maskset(dev, B43_PHY_CCK(0x2B), 0xC0FF, 0x800); + + b43_phy_set(dev, B43_PHY_RFOVER, 0x0100); + b43_phy_mask(dev, B43_PHY_RFOVERVAL, 0xCFFF); + + if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_EXTLNA) { + if (phy->rev >= 7) { + b43_phy_set(dev, B43_PHY_RFOVER, 0x0800); + b43_phy_set(dev, B43_PHY_RFOVERVAL, 0x8000); + } + } + b43_radio_mask(dev, 0x7A, 0x00F7); + + j = 0; + loop_i_max = (phy->radio_rev == 8) ? 15 : 9; + for (i = 0; i < loop_i_max; i++) { + for (j = 0; j < 16; j++) { + b43_radio_write16(dev, 0x43, i); + b43_phy_maskset(dev, B43_PHY_RFOVERVAL, 0xF0FF, (j << 8)); + b43_phy_maskset(dev, B43_PHY_PGACTL, 0x0FFF, 0xA000); + b43_phy_set(dev, B43_PHY_PGACTL, 0xF000); + udelay(20); + if (b43_phy_read(dev, B43_PHY_LO_LEAKAGE) >= 0xDFC) + goto exit_loop1; + } + } + exit_loop1: + loop1_outer_done = i; + loop1_inner_done = j; + if (j >= 8) { + b43_phy_set(dev, B43_PHY_RFOVERVAL, 0x30); + trsw_rx = 0x1B; + for (j = j - 8; j < 16; j++) { + b43_phy_maskset(dev, B43_PHY_RFOVERVAL, 0xF0FF, (j << 8)); + b43_phy_maskset(dev, B43_PHY_PGACTL, 0x0FFF, 0xA000); + b43_phy_set(dev, B43_PHY_PGACTL, 0xF000); + udelay(20); + trsw_rx -= 3; + if (b43_phy_read(dev, B43_PHY_LO_LEAKAGE) >= 0xDFC) + goto exit_loop2; + } + } else + trsw_rx = 0x18; + exit_loop2: + + if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */ + b43_phy_write(dev, B43_PHY_ANALOGOVER, backup_phy[4]); + b43_phy_write(dev, B43_PHY_ANALOGOVERVAL, backup_phy[5]); + } + b43_phy_write(dev, B43_PHY_CCK(0x5A), backup_phy[6]); + b43_phy_write(dev, B43_PHY_CCK(0x59), backup_phy[7]); + b43_phy_write(dev, B43_PHY_CCK(0x58), backup_phy[8]); + b43_phy_write(dev, B43_PHY_CCK(0x0A), backup_phy[9]); + b43_phy_write(dev, B43_PHY_CCK(0x03), backup_phy[10]); + b43_phy_write(dev, B43_PHY_LO_MASK, backup_phy[11]); + b43_phy_write(dev, B43_PHY_LO_CTL, backup_phy[12]); + b43_phy_write(dev, B43_PHY_CCK(0x2B), backup_phy[13]); + b43_phy_write(dev, B43_PHY_PGACTL, backup_phy[14]); + + b43_gphy_set_baseband_attenuation(dev, backup_bband); + + b43_radio_write16(dev, 0x52, backup_radio[0]); + b43_radio_write16(dev, 0x43, backup_radio[1]); + b43_radio_write16(dev, 0x7A, backup_radio[2]); + + b43_phy_write(dev, B43_PHY_RFOVER, backup_phy[2] | 0x0003); + udelay(10); + b43_phy_write(dev, B43_PHY_RFOVER, backup_phy[2]); + b43_phy_write(dev, B43_PHY_RFOVERVAL, backup_phy[3]); + b43_phy_write(dev, B43_PHY_CRS0, backup_phy[0]); + b43_phy_write(dev, B43_PHY_CCKBBANDCFG, backup_phy[1]); + + gphy->max_lb_gain = + ((loop1_inner_done * 6) - (loop1_outer_done * 4)) - 11; + gphy->trsw_rx_gain = trsw_rx * 2; +} + +static void b43_hardware_pctl_early_init(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + + if (!b43_has_hardware_pctl(dev)) { + b43_phy_write(dev, 0x047A, 0xC111); + return; + } + + b43_phy_mask(dev, 0x0036, 0xFEFF); + b43_phy_write(dev, 0x002F, 0x0202); + b43_phy_set(dev, 0x047C, 0x0002); + b43_phy_set(dev, 0x047A, 0xF000); + if (phy->radio_ver == 0x2050 && phy->radio_rev == 8) { + b43_phy_maskset(dev, 0x047A, 0xFF0F, 0x0010); + b43_phy_set(dev, 0x005D, 0x8000); + b43_phy_maskset(dev, 0x004E, 0xFFC0, 0x0010); + b43_phy_write(dev, 0x002E, 0xC07F); + b43_phy_set(dev, 0x0036, 0x0400); + } else { + b43_phy_set(dev, 0x0036, 0x0200); + b43_phy_set(dev, 0x0036, 0x0400); + b43_phy_mask(dev, 0x005D, 0x7FFF); + b43_phy_mask(dev, 0x004F, 0xFFFE); + b43_phy_maskset(dev, 0x004E, 0xFFC0, 0x0010); + b43_phy_write(dev, 0x002E, 0xC07F); + b43_phy_maskset(dev, 0x047A, 0xFF0F, 0x0010); + } +} + +/* Hardware power control for G-PHY */ +static void b43_hardware_pctl_init_gphy(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_g *gphy = phy->g; + + if (!b43_has_hardware_pctl(dev)) { + /* No hardware power control */ + b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_HWPCTL); + return; + } + + b43_phy_maskset(dev, 0x0036, 0xFFC0, (gphy->tgt_idle_tssi - gphy->cur_idle_tssi)); + b43_phy_maskset(dev, 0x0478, 0xFF00, (gphy->tgt_idle_tssi - gphy->cur_idle_tssi)); + b43_gphy_tssi_power_lt_init(dev); + b43_gphy_gain_lt_init(dev); + b43_phy_mask(dev, 0x0060, 0xFFBF); + b43_phy_write(dev, 0x0014, 0x0000); + + B43_WARN_ON(phy->rev < 6); + b43_phy_set(dev, 0x0478, 0x0800); + b43_phy_mask(dev, 0x0478, 0xFEFF); + b43_phy_mask(dev, 0x0801, 0xFFBF); + + b43_gphy_dc_lt_init(dev, 1); + + /* Enable hardware pctl in firmware. */ + b43_hf_write(dev, b43_hf_read(dev) | B43_HF_HWPCTL); +} + +/* Intialize B/G PHY power control */ +static void b43_phy_init_pctl(struct b43_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + struct b43_phy *phy = &dev->phy; + struct b43_phy_g *gphy = phy->g; + struct b43_rfatt old_rfatt; + struct b43_bbatt old_bbatt; + u8 old_tx_control = 0; + + B43_WARN_ON(phy->type != B43_PHYTYPE_G); + + if ((bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) && + (bus->boardinfo.type == SSB_BOARD_BU4306)) + return; + + b43_phy_write(dev, 0x0028, 0x8018); + + /* This does something with the Analog... */ + b43_write16(dev, B43_MMIO_PHY0, b43_read16(dev, B43_MMIO_PHY0) + & 0xFFDF); + + if (!phy->gmode) + return; + b43_hardware_pctl_early_init(dev); + if (gphy->cur_idle_tssi == 0) { + if (phy->radio_ver == 0x2050 && phy->analog == 0) { + b43_radio_maskset(dev, 0x0076, 0x00F7, 0x0084); + } else { + struct b43_rfatt rfatt; + struct b43_bbatt bbatt; + + memcpy(&old_rfatt, &gphy->rfatt, sizeof(old_rfatt)); + memcpy(&old_bbatt, &gphy->bbatt, sizeof(old_bbatt)); + old_tx_control = gphy->tx_control; + + bbatt.att = 11; + if (phy->radio_rev == 8) { + rfatt.att = 15; + rfatt.with_padmix = 1; + } else { + rfatt.att = 9; + rfatt.with_padmix = 0; + } + b43_set_txpower_g(dev, &bbatt, &rfatt, 0); + } + b43_dummy_transmission(dev, false, true); + gphy->cur_idle_tssi = b43_phy_read(dev, B43_PHY_ITSSI); + if (B43_DEBUG) { + /* Current-Idle-TSSI sanity check. */ + if (abs(gphy->cur_idle_tssi - gphy->tgt_idle_tssi) >= 20) { + b43dbg(dev->wl, + "!WARNING! Idle-TSSI phy->cur_idle_tssi " + "measuring failed. (cur=%d, tgt=%d). Disabling TX power " + "adjustment.\n", gphy->cur_idle_tssi, + gphy->tgt_idle_tssi); + gphy->cur_idle_tssi = 0; + } + } + if (phy->radio_ver == 0x2050 && phy->analog == 0) { + b43_radio_mask(dev, 0x0076, 0xFF7B); + } else { + b43_set_txpower_g(dev, &old_bbatt, + &old_rfatt, old_tx_control); + } + } + b43_hardware_pctl_init_gphy(dev); + b43_shm_clear_tssi(dev); +} + +static void b43_phy_initg(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_g *gphy = phy->g; + u16 tmp; + + if (phy->rev == 1) + b43_phy_initb5(dev); + else + b43_phy_initb6(dev); + + if (phy->rev >= 2 || phy->gmode) + b43_phy_inita(dev); + + if (phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_ANALOGOVER, 0); + b43_phy_write(dev, B43_PHY_ANALOGOVERVAL, 0); + } + if (phy->rev == 2) { + b43_phy_write(dev, B43_PHY_RFOVER, 0); + b43_phy_write(dev, B43_PHY_PGACTL, 0xC0); + } + if (phy->rev > 5) { + b43_phy_write(dev, B43_PHY_RFOVER, 0x400); + b43_phy_write(dev, B43_PHY_PGACTL, 0xC0); + } + if (phy->gmode || phy->rev >= 2) { + tmp = b43_phy_read(dev, B43_PHY_VERSION_OFDM); + tmp &= B43_PHYVER_VERSION; + if (tmp == 3 || tmp == 5) { + b43_phy_write(dev, B43_PHY_OFDM(0xC2), 0x1816); + b43_phy_write(dev, B43_PHY_OFDM(0xC3), 0x8006); + } + if (tmp == 5) { + b43_phy_maskset(dev, B43_PHY_OFDM(0xCC), 0x00FF, 0x1F00); + } + } + if ((phy->rev <= 2 && phy->gmode) || phy->rev >= 2) + b43_phy_write(dev, B43_PHY_OFDM(0x7E), 0x78); + if (phy->radio_rev == 8) { + b43_phy_set(dev, B43_PHY_EXTG(0x01), 0x80); + b43_phy_set(dev, B43_PHY_OFDM(0x3E), 0x4); + } + if (has_loopback_gain(phy)) + b43_calc_loopback_gain(dev); + + if (phy->radio_rev != 8) { + if (gphy->initval == 0xFFFF) + gphy->initval = b43_radio_init2050(dev); + else + b43_radio_write16(dev, 0x0078, gphy->initval); + } + b43_lo_g_init(dev); + if (has_tx_magnification(phy)) { + b43_radio_write16(dev, 0x52, + (b43_radio_read16(dev, 0x52) & 0xFF00) + | gphy->lo_control->tx_bias | gphy-> + lo_control->tx_magn); + } else { + b43_radio_maskset(dev, 0x52, 0xFFF0, gphy->lo_control->tx_bias); + } + if (phy->rev >= 6) { + b43_phy_maskset(dev, B43_PHY_CCK(0x36), 0x0FFF, (gphy->lo_control->tx_bias << 12)); + } + if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_PACTRL) + b43_phy_write(dev, B43_PHY_CCK(0x2E), 0x8075); + else + b43_phy_write(dev, B43_PHY_CCK(0x2E), 0x807F); + if (phy->rev < 2) + b43_phy_write(dev, B43_PHY_CCK(0x2F), 0x101); + else + b43_phy_write(dev, B43_PHY_CCK(0x2F), 0x202); + if (phy->gmode || phy->rev >= 2) { + b43_lo_g_adjust(dev); + b43_phy_write(dev, B43_PHY_LO_MASK, 0x8078); + } + + if (!(dev->dev->bus->sprom.boardflags_lo & B43_BFL_RSSI)) { + /* The specs state to update the NRSSI LT with + * the value 0x7FFFFFFF here. I think that is some weird + * compiler optimization in the original driver. + * Essentially, what we do here is resetting all NRSSI LT + * entries to -32 (see the clamp_val() in nrssi_hw_update()) + */ + b43_nrssi_hw_update(dev, 0xFFFF); //FIXME? + b43_calc_nrssi_threshold(dev); + } else if (phy->gmode || phy->rev >= 2) { + if (gphy->nrssi[0] == -1000) { + B43_WARN_ON(gphy->nrssi[1] != -1000); + b43_calc_nrssi_slope(dev); + } else + b43_calc_nrssi_threshold(dev); + } + if (phy->radio_rev == 8) + b43_phy_write(dev, B43_PHY_EXTG(0x05), 0x3230); + b43_phy_init_pctl(dev); + /* FIXME: The spec says in the following if, the 0 should be replaced + 'if OFDM may not be used in the current locale' + but OFDM is legal everywhere */ + if ((dev->dev->bus->chip_id == 0x4306 + && dev->dev->bus->chip_package == 2) || 0) { + b43_phy_mask(dev, B43_PHY_CRS0, 0xBFFF); + b43_phy_mask(dev, B43_PHY_OFDM(0xC3), 0x7FFF); + } +} + +void b43_gphy_channel_switch(struct b43_wldev *dev, + unsigned int channel, + bool synthetic_pu_workaround) +{ + if (synthetic_pu_workaround) + b43_synth_pu_workaround(dev, channel); + + b43_write16(dev, B43_MMIO_CHANNEL, channel2freq_bg(channel)); + + if (channel == 14) { + if (dev->dev->bus->sprom.country_code == + SSB_SPROM1CCODE_JAPAN) + b43_hf_write(dev, + b43_hf_read(dev) & ~B43_HF_ACPR); + else + b43_hf_write(dev, + b43_hf_read(dev) | B43_HF_ACPR); + b43_write16(dev, B43_MMIO_CHANNEL_EXT, + b43_read16(dev, B43_MMIO_CHANNEL_EXT) + | (1 << 11)); + } else { + b43_write16(dev, B43_MMIO_CHANNEL_EXT, + b43_read16(dev, B43_MMIO_CHANNEL_EXT) + & 0xF7BF); + } +} + +static void default_baseband_attenuation(struct b43_wldev *dev, + struct b43_bbatt *bb) +{ + struct b43_phy *phy = &dev->phy; + + if (phy->radio_ver == 0x2050 && phy->radio_rev < 6) + bb->att = 0; + else + bb->att = 2; +} + +static void default_radio_attenuation(struct b43_wldev *dev, + struct b43_rfatt *rf) +{ + struct ssb_bus *bus = dev->dev->bus; + struct b43_phy *phy = &dev->phy; + + rf->with_padmix = 0; + + if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM && + bus->boardinfo.type == SSB_BOARD_BCM4309G) { + if (bus->boardinfo.rev < 0x43) { + rf->att = 2; + return; + } else if (bus->boardinfo.rev < 0x51) { + rf->att = 3; + return; + } + } + + if (phy->type == B43_PHYTYPE_A) { + rf->att = 0x60; + return; + } + + switch (phy->radio_ver) { + case 0x2053: + switch (phy->radio_rev) { + case 1: + rf->att = 6; + return; + } + break; + case 0x2050: + switch (phy->radio_rev) { + case 0: + rf->att = 5; + return; + case 1: + if (phy->type == B43_PHYTYPE_G) { + if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM + && bus->boardinfo.type == SSB_BOARD_BCM4309G + && bus->boardinfo.rev >= 30) + rf->att = 3; + else if (bus->boardinfo.vendor == + SSB_BOARDVENDOR_BCM + && bus->boardinfo.type == + SSB_BOARD_BU4306) + rf->att = 3; + else + rf->att = 1; + } else { + if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM + && bus->boardinfo.type == SSB_BOARD_BCM4309G + && bus->boardinfo.rev >= 30) + rf->att = 7; + else + rf->att = 6; + } + return; + case 2: + if (phy->type == B43_PHYTYPE_G) { + if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM + && bus->boardinfo.type == SSB_BOARD_BCM4309G + && bus->boardinfo.rev >= 30) + rf->att = 3; + else if (bus->boardinfo.vendor == + SSB_BOARDVENDOR_BCM + && bus->boardinfo.type == + SSB_BOARD_BU4306) + rf->att = 5; + else if (bus->chip_id == 0x4320) + rf->att = 4; + else + rf->att = 3; + } else + rf->att = 6; + return; + case 3: + rf->att = 5; + return; + case 4: + case 5: + rf->att = 1; + return; + case 6: + case 7: + rf->att = 5; + return; + case 8: + rf->att = 0xA; + rf->with_padmix = 1; + return; + case 9: + default: + rf->att = 5; + return; + } + } + rf->att = 5; +} + +static u16 default_tx_control(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + + if (phy->radio_ver != 0x2050) + return 0; + if (phy->radio_rev == 1) + return B43_TXCTL_PA2DB | B43_TXCTL_TXMIX; + if (phy->radio_rev < 6) + return B43_TXCTL_PA2DB; + if (phy->radio_rev == 8) + return B43_TXCTL_TXMIX; + return 0; +} + +static u8 b43_gphy_aci_detect(struct b43_wldev *dev, u8 channel) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_g *gphy = phy->g; + u8 ret = 0; + u16 saved, rssi, temp; + int i, j = 0; + + saved = b43_phy_read(dev, 0x0403); + b43_switch_channel(dev, channel); + b43_phy_write(dev, 0x0403, (saved & 0xFFF8) | 5); + if (gphy->aci_hw_rssi) + rssi = b43_phy_read(dev, 0x048A) & 0x3F; + else + rssi = saved & 0x3F; + /* clamp temp to signed 5bit */ + if (rssi > 32) + rssi -= 64; + for (i = 0; i < 100; i++) { + temp = (b43_phy_read(dev, 0x047F) >> 8) & 0x3F; + if (temp > 32) + temp -= 64; + if (temp < rssi) + j++; + if (j >= 20) + ret = 1; + } + b43_phy_write(dev, 0x0403, saved); + + return ret; +} + +static u8 b43_gphy_aci_scan(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u8 ret[13]; + unsigned int channel = phy->channel; + unsigned int i, j, start, end; + + if (!((phy->type == B43_PHYTYPE_G) && (phy->rev > 0))) + return 0; + + b43_phy_lock(dev); + b43_radio_lock(dev); + b43_phy_mask(dev, 0x0802, 0xFFFC); + b43_phy_mask(dev, B43_PHY_G_CRS, 0x7FFF); + b43_set_all_gains(dev, 3, 8, 1); + + start = (channel - 5 > 0) ? channel - 5 : 1; + end = (channel + 5 < 14) ? channel + 5 : 13; + + for (i = start; i <= end; i++) { + if (abs(channel - i) > 2) + ret[i - 1] = b43_gphy_aci_detect(dev, i); + } + b43_switch_channel(dev, channel); + b43_phy_maskset(dev, 0x0802, 0xFFFC, 0x0003); + b43_phy_mask(dev, 0x0403, 0xFFF8); + b43_phy_set(dev, B43_PHY_G_CRS, 0x8000); + b43_set_original_gains(dev); + for (i = 0; i < 13; i++) { + if (!ret[i]) + continue; + end = (i + 5 < 13) ? i + 5 : 13; + for (j = i; j < end; j++) + ret[j] = 1; + } + b43_radio_unlock(dev); + b43_phy_unlock(dev); + + return ret[channel - 1]; +} + +static s32 b43_tssi2dbm_ad(s32 num, s32 den) +{ + if (num < 0) + return num / den; + else + return (num + den / 2) / den; +} + +static s8 b43_tssi2dbm_entry(s8 entry[], u8 index, + s16 pab0, s16 pab1, s16 pab2) +{ + s32 m1, m2, f = 256, q, delta; + s8 i = 0; + + m1 = b43_tssi2dbm_ad(16 * pab0 + index * pab1, 32); + m2 = max(b43_tssi2dbm_ad(32768 + index * pab2, 256), 1); + do { + if (i > 15) + return -EINVAL; + q = b43_tssi2dbm_ad(f * 4096 - + b43_tssi2dbm_ad(m2 * f, 16) * f, 2048); + delta = abs(q - f); + f = q; + i++; + } while (delta >= 2); + entry[index] = clamp_val(b43_tssi2dbm_ad(m1 * f, 8192), -127, 128); + return 0; +} + +u8 *b43_generate_dyn_tssi2dbm_tab(struct b43_wldev *dev, + s16 pab0, s16 pab1, s16 pab2) +{ + unsigned int i; + u8 *tab; + int err; + + tab = kmalloc(64, GFP_KERNEL); + if (!tab) { + b43err(dev->wl, "Could not allocate memory " + "for tssi2dbm table\n"); + return NULL; + } + for (i = 0; i < 64; i++) { + err = b43_tssi2dbm_entry(tab, i, pab0, pab1, pab2); + if (err) { + b43err(dev->wl, "Could not generate " + "tssi2dBm table\n"); + kfree(tab); + return NULL; + } + } + + return tab; +} + +/* Initialise the TSSI->dBm lookup table */ +static int b43_gphy_init_tssi2dbm_table(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_g *gphy = phy->g; + s16 pab0, pab1, pab2; + + pab0 = (s16) (dev->dev->bus->sprom.pa0b0); + pab1 = (s16) (dev->dev->bus->sprom.pa0b1); + pab2 = (s16) (dev->dev->bus->sprom.pa0b2); + + B43_WARN_ON((dev->dev->bus->chip_id == 0x4301) && + (phy->radio_ver != 0x2050)); /* Not supported anymore */ + + gphy->dyn_tssi_tbl = 0; + + if (pab0 != 0 && pab1 != 0 && pab2 != 0 && + pab0 != -1 && pab1 != -1 && pab2 != -1) { + /* The pabX values are set in SPROM. Use them. */ + if ((s8) dev->dev->bus->sprom.itssi_bg != 0 && + (s8) dev->dev->bus->sprom.itssi_bg != -1) { + gphy->tgt_idle_tssi = + (s8) (dev->dev->bus->sprom.itssi_bg); + } else + gphy->tgt_idle_tssi = 62; + gphy->tssi2dbm = b43_generate_dyn_tssi2dbm_tab(dev, pab0, + pab1, pab2); + if (!gphy->tssi2dbm) + return -ENOMEM; + gphy->dyn_tssi_tbl = 1; + } else { + /* pabX values not set in SPROM. */ + gphy->tgt_idle_tssi = 52; + gphy->tssi2dbm = b43_tssi2dbm_g_table; + } + + return 0; +} + +static int b43_gphy_op_allocate(struct b43_wldev *dev) +{ + struct b43_phy_g *gphy; + struct b43_txpower_lo_control *lo; + int err; + + gphy = kzalloc(sizeof(*gphy), GFP_KERNEL); + if (!gphy) { + err = -ENOMEM; + goto error; + } + dev->phy.g = gphy; + + lo = kzalloc(sizeof(*lo), GFP_KERNEL); + if (!lo) { + err = -ENOMEM; + goto err_free_gphy; + } + gphy->lo_control = lo; + + err = b43_gphy_init_tssi2dbm_table(dev); + if (err) + goto err_free_lo; + + return 0; + +err_free_lo: + kfree(lo); +err_free_gphy: + kfree(gphy); +error: + return err; +} + +static void b43_gphy_op_prepare_structs(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_g *gphy = phy->g; + const void *tssi2dbm; + int tgt_idle_tssi; + struct b43_txpower_lo_control *lo; + unsigned int i; + + /* tssi2dbm table is constant, so it is initialized at alloc time. + * Save a copy of the pointer. */ + tssi2dbm = gphy->tssi2dbm; + tgt_idle_tssi = gphy->tgt_idle_tssi; + /* Save the LO pointer. */ + lo = gphy->lo_control; + + /* Zero out the whole PHY structure. */ + memset(gphy, 0, sizeof(*gphy)); + + /* Restore pointers. */ + gphy->tssi2dbm = tssi2dbm; + gphy->tgt_idle_tssi = tgt_idle_tssi; + gphy->lo_control = lo; + + memset(gphy->minlowsig, 0xFF, sizeof(gphy->minlowsig)); + + /* NRSSI */ + for (i = 0; i < ARRAY_SIZE(gphy->nrssi); i++) + gphy->nrssi[i] = -1000; + for (i = 0; i < ARRAY_SIZE(gphy->nrssi_lt); i++) + gphy->nrssi_lt[i] = i; + + gphy->lofcal = 0xFFFF; + gphy->initval = 0xFFFF; + + gphy->interfmode = B43_INTERFMODE_NONE; + + /* OFDM-table address caching. */ + gphy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_UNKNOWN; + + gphy->average_tssi = 0xFF; + + /* Local Osciallator structure */ + lo->tx_bias = 0xFF; + INIT_LIST_HEAD(&lo->calib_list); +} + +static void b43_gphy_op_free(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_g *gphy = phy->g; + + kfree(gphy->lo_control); + + if (gphy->dyn_tssi_tbl) + kfree(gphy->tssi2dbm); + gphy->dyn_tssi_tbl = 0; + gphy->tssi2dbm = NULL; + + kfree(gphy); + dev->phy.g = NULL; +} + +static int b43_gphy_op_prepare_hardware(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_g *gphy = phy->g; + struct b43_txpower_lo_control *lo = gphy->lo_control; + + B43_WARN_ON(phy->type != B43_PHYTYPE_G); + + default_baseband_attenuation(dev, &gphy->bbatt); + default_radio_attenuation(dev, &gphy->rfatt); + gphy->tx_control = (default_tx_control(dev) << 4); + generate_rfatt_list(dev, &lo->rfatt_list); + generate_bbatt_list(dev, &lo->bbatt_list); + + /* Commit previous writes */ + b43_read32(dev, B43_MMIO_MACCTL); + + if (phy->rev == 1) { + /* Workaround: Temporarly disable gmode through the early init + * phase, as the gmode stuff is not needed for phy rev 1 */ + phy->gmode = 0; + b43_wireless_core_reset(dev, 0); + b43_phy_initg(dev); + phy->gmode = 1; + b43_wireless_core_reset(dev, B43_TMSLOW_GMODE); + } + + return 0; +} + +static int b43_gphy_op_init(struct b43_wldev *dev) +{ + b43_phy_initg(dev); + + return 0; +} + +static void b43_gphy_op_exit(struct b43_wldev *dev) +{ + b43_lo_g_cleanup(dev); +} + +static u16 b43_gphy_op_read(struct b43_wldev *dev, u16 reg) +{ + b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); + return b43_read16(dev, B43_MMIO_PHY_DATA); +} + +static void b43_gphy_op_write(struct b43_wldev *dev, u16 reg, u16 value) +{ + b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); + b43_write16(dev, B43_MMIO_PHY_DATA, value); +} + +static u16 b43_gphy_op_radio_read(struct b43_wldev *dev, u16 reg) +{ + /* Register 1 is a 32-bit register. */ + B43_WARN_ON(reg == 1); + /* G-PHY needs 0x80 for read access. */ + reg |= 0x80; + + b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg); + return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW); +} + +static void b43_gphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value) +{ + /* Register 1 is a 32-bit register. */ + B43_WARN_ON(reg == 1); + + b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg); + b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value); +} + +static bool b43_gphy_op_supports_hwpctl(struct b43_wldev *dev) +{ + return (dev->phy.rev >= 6); +} + +static void b43_gphy_op_software_rfkill(struct b43_wldev *dev, + bool blocked) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_g *gphy = phy->g; + unsigned int channel; + + might_sleep(); + + if (!blocked) { + /* Turn radio ON */ + if (phy->radio_on) + return; + + b43_phy_write(dev, 0x0015, 0x8000); + b43_phy_write(dev, 0x0015, 0xCC00); + b43_phy_write(dev, 0x0015, (phy->gmode ? 0x00C0 : 0x0000)); + if (gphy->radio_off_context.valid) { + /* Restore the RFover values. */ + b43_phy_write(dev, B43_PHY_RFOVER, + gphy->radio_off_context.rfover); + b43_phy_write(dev, B43_PHY_RFOVERVAL, + gphy->radio_off_context.rfoverval); + gphy->radio_off_context.valid = 0; + } + channel = phy->channel; + b43_gphy_channel_switch(dev, 6, 1); + b43_gphy_channel_switch(dev, channel, 0); + } else { + /* Turn radio OFF */ + u16 rfover, rfoverval; + + rfover = b43_phy_read(dev, B43_PHY_RFOVER); + rfoverval = b43_phy_read(dev, B43_PHY_RFOVERVAL); + gphy->radio_off_context.rfover = rfover; + gphy->radio_off_context.rfoverval = rfoverval; + gphy->radio_off_context.valid = 1; + b43_phy_write(dev, B43_PHY_RFOVER, rfover | 0x008C); + b43_phy_write(dev, B43_PHY_RFOVERVAL, rfoverval & 0xFF73); + } +} + +static int b43_gphy_op_switch_channel(struct b43_wldev *dev, + unsigned int new_channel) +{ + if ((new_channel < 1) || (new_channel > 14)) + return -EINVAL; + b43_gphy_channel_switch(dev, new_channel, 0); + + return 0; +} + +static unsigned int b43_gphy_op_get_default_chan(struct b43_wldev *dev) +{ + return 1; /* Default to channel 1 */ +} + +static void b43_gphy_op_set_rx_antenna(struct b43_wldev *dev, int antenna) +{ + struct b43_phy *phy = &dev->phy; + u16 tmp; + int autodiv = 0; + + if (antenna == B43_ANTENNA_AUTO0 || antenna == B43_ANTENNA_AUTO1) + autodiv = 1; + + b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_ANTDIVHELP); + + b43_phy_maskset(dev, B43_PHY_BBANDCFG, ~B43_PHY_BBANDCFG_RXANT, + (autodiv ? B43_ANTENNA_AUTO1 : antenna) << + B43_PHY_BBANDCFG_RXANT_SHIFT); + + if (autodiv) { + tmp = b43_phy_read(dev, B43_PHY_ANTDWELL); + if (antenna == B43_ANTENNA_AUTO1) + tmp &= ~B43_PHY_ANTDWELL_AUTODIV1; + else + tmp |= B43_PHY_ANTDWELL_AUTODIV1; + b43_phy_write(dev, B43_PHY_ANTDWELL, tmp); + } + + tmp = b43_phy_read(dev, B43_PHY_ANTWRSETT); + if (autodiv) + tmp |= B43_PHY_ANTWRSETT_ARXDIV; + else + tmp &= ~B43_PHY_ANTWRSETT_ARXDIV; + b43_phy_write(dev, B43_PHY_ANTWRSETT, tmp); + + if (autodiv) + b43_phy_set(dev, B43_PHY_ANTWRSETT, B43_PHY_ANTWRSETT_ARXDIV); + else { + b43_phy_mask(dev, B43_PHY_ANTWRSETT, + B43_PHY_ANTWRSETT_ARXDIV); + } + + if (phy->rev >= 2) { + b43_phy_set(dev, B43_PHY_OFDM61, B43_PHY_OFDM61_10); + b43_phy_maskset(dev, B43_PHY_DIVSRCHGAINBACK, 0xFF00, 0x15); + + if (phy->rev == 2) + b43_phy_write(dev, B43_PHY_ADIVRELATED, 8); + else + b43_phy_maskset(dev, B43_PHY_ADIVRELATED, 0xFF00, 8); + } + if (phy->rev >= 6) + b43_phy_write(dev, B43_PHY_OFDM9B, 0xDC); + + b43_hf_write(dev, b43_hf_read(dev) | B43_HF_ANTDIVHELP); +} + +static int b43_gphy_op_interf_mitigation(struct b43_wldev *dev, + enum b43_interference_mitigation mode) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_g *gphy = phy->g; + int currentmode; + + B43_WARN_ON(phy->type != B43_PHYTYPE_G); + if ((phy->rev == 0) || (!phy->gmode)) + return -ENODEV; + + gphy->aci_wlan_automatic = 0; + switch (mode) { + case B43_INTERFMODE_AUTOWLAN: + gphy->aci_wlan_automatic = 1; + if (gphy->aci_enable) + mode = B43_INTERFMODE_MANUALWLAN; + else + mode = B43_INTERFMODE_NONE; + break; + case B43_INTERFMODE_NONE: + case B43_INTERFMODE_NONWLAN: + case B43_INTERFMODE_MANUALWLAN: + break; + default: + return -EINVAL; + } + + currentmode = gphy->interfmode; + if (currentmode == mode) + return 0; + if (currentmode != B43_INTERFMODE_NONE) + b43_radio_interference_mitigation_disable(dev, currentmode); + + if (mode == B43_INTERFMODE_NONE) { + gphy->aci_enable = 0; + gphy->aci_hw_rssi = 0; + } else + b43_radio_interference_mitigation_enable(dev, mode); + gphy->interfmode = mode; + + return 0; +} + +/* http://bcm-specs.sipsolutions.net/EstimatePowerOut + * This function converts a TSSI value to dBm in Q5.2 + */ +static s8 b43_gphy_estimate_power_out(struct b43_wldev *dev, s8 tssi) +{ + struct b43_phy_g *gphy = dev->phy.g; + s8 dbm; + s32 tmp; + + tmp = (gphy->tgt_idle_tssi - gphy->cur_idle_tssi + tssi); + tmp = clamp_val(tmp, 0x00, 0x3F); + dbm = gphy->tssi2dbm[tmp]; + + return dbm; +} + +static void b43_put_attenuation_into_ranges(struct b43_wldev *dev, + int *_bbatt, int *_rfatt) +{ + int rfatt = *_rfatt; + int bbatt = *_bbatt; + struct b43_txpower_lo_control *lo = dev->phy.g->lo_control; + + /* Get baseband and radio attenuation values into their permitted ranges. + * Radio attenuation affects power level 4 times as much as baseband. */ + + /* Range constants */ + const int rf_min = lo->rfatt_list.min_val; + const int rf_max = lo->rfatt_list.max_val; + const int bb_min = lo->bbatt_list.min_val; + const int bb_max = lo->bbatt_list.max_val; + + while (1) { + if (rfatt > rf_max && bbatt > bb_max - 4) + break; /* Can not get it into ranges */ + if (rfatt < rf_min && bbatt < bb_min + 4) + break; /* Can not get it into ranges */ + if (bbatt > bb_max && rfatt > rf_max - 1) + break; /* Can not get it into ranges */ + if (bbatt < bb_min && rfatt < rf_min + 1) + break; /* Can not get it into ranges */ + + if (bbatt > bb_max) { + bbatt -= 4; + rfatt += 1; + continue; + } + if (bbatt < bb_min) { + bbatt += 4; + rfatt -= 1; + continue; + } + if (rfatt > rf_max) { + rfatt -= 1; + bbatt += 4; + continue; + } + if (rfatt < rf_min) { + rfatt += 1; + bbatt -= 4; + continue; + } + break; + } + + *_rfatt = clamp_val(rfatt, rf_min, rf_max); + *_bbatt = clamp_val(bbatt, bb_min, bb_max); +} + +static void b43_gphy_op_adjust_txpower(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_g *gphy = phy->g; + int rfatt, bbatt; + u8 tx_control; + + b43_mac_suspend(dev); + + /* Calculate the new attenuation values. */ + bbatt = gphy->bbatt.att; + bbatt += gphy->bbatt_delta; + rfatt = gphy->rfatt.att; + rfatt += gphy->rfatt_delta; + + b43_put_attenuation_into_ranges(dev, &bbatt, &rfatt); + tx_control = gphy->tx_control; + if ((phy->radio_ver == 0x2050) && (phy->radio_rev == 2)) { + if (rfatt <= 1) { + if (tx_control == 0) { + tx_control = + B43_TXCTL_PA2DB | + B43_TXCTL_TXMIX; + rfatt += 2; + bbatt += 2; + } else if (dev->dev->bus->sprom. + boardflags_lo & + B43_BFL_PACTRL) { + bbatt += 4 * (rfatt - 2); + rfatt = 2; + } + } else if (rfatt > 4 && tx_control) { + tx_control = 0; + if (bbatt < 3) { + rfatt -= 3; + bbatt += 2; + } else { + rfatt -= 2; + bbatt -= 2; + } + } + } + /* Save the control values */ + gphy->tx_control = tx_control; + b43_put_attenuation_into_ranges(dev, &bbatt, &rfatt); + gphy->rfatt.att = rfatt; + gphy->bbatt.att = bbatt; + + if (b43_debug(dev, B43_DBG_XMITPOWER)) + b43dbg(dev->wl, "Adjusting TX power\n"); + + /* Adjust the hardware */ + b43_phy_lock(dev); + b43_radio_lock(dev); + b43_set_txpower_g(dev, &gphy->bbatt, &gphy->rfatt, + gphy->tx_control); + b43_radio_unlock(dev); + b43_phy_unlock(dev); + + b43_mac_enable(dev); +} + +static enum b43_txpwr_result b43_gphy_op_recalc_txpower(struct b43_wldev *dev, + bool ignore_tssi) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_g *gphy = phy->g; + unsigned int average_tssi; + int cck_result, ofdm_result; + int estimated_pwr, desired_pwr, pwr_adjust; + int rfatt_delta, bbatt_delta; + unsigned int max_pwr; + + /* First get the average TSSI */ + cck_result = b43_phy_shm_tssi_read(dev, B43_SHM_SH_TSSI_CCK); + ofdm_result = b43_phy_shm_tssi_read(dev, B43_SHM_SH_TSSI_OFDM_G); + if ((cck_result < 0) && (ofdm_result < 0)) { + /* No TSSI information available */ + if (!ignore_tssi) + goto no_adjustment_needed; + cck_result = 0; + ofdm_result = 0; + } + if (cck_result < 0) + average_tssi = ofdm_result; + else if (ofdm_result < 0) + average_tssi = cck_result; + else + average_tssi = (cck_result + ofdm_result) / 2; + /* Merge the average with the stored value. */ + if (likely(gphy->average_tssi != 0xFF)) + average_tssi = (average_tssi + gphy->average_tssi) / 2; + gphy->average_tssi = average_tssi; + B43_WARN_ON(average_tssi >= B43_TSSI_MAX); + + /* Estimate the TX power emission based on the TSSI */ + estimated_pwr = b43_gphy_estimate_power_out(dev, average_tssi); + + B43_WARN_ON(phy->type != B43_PHYTYPE_G); + max_pwr = dev->dev->bus->sprom.maxpwr_bg; + if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_PACTRL) + max_pwr -= 3; /* minus 0.75 */ + if (unlikely(max_pwr >= INT_TO_Q52(30/*dBm*/))) { + b43warn(dev->wl, + "Invalid max-TX-power value in SPROM.\n"); + max_pwr = INT_TO_Q52(20); /* fake it */ + dev->dev->bus->sprom.maxpwr_bg = max_pwr; + } + + /* Get desired power (in Q5.2) */ + if (phy->desired_txpower < 0) + desired_pwr = INT_TO_Q52(0); + else + desired_pwr = INT_TO_Q52(phy->desired_txpower); + /* And limit it. max_pwr already is Q5.2 */ + desired_pwr = clamp_val(desired_pwr, 0, max_pwr); + if (b43_debug(dev, B43_DBG_XMITPOWER)) { + b43dbg(dev->wl, + "[TX power] current = " Q52_FMT + " dBm, desired = " Q52_FMT + " dBm, max = " Q52_FMT "\n", + Q52_ARG(estimated_pwr), + Q52_ARG(desired_pwr), + Q52_ARG(max_pwr)); + } + + /* Calculate the adjustment delta. */ + pwr_adjust = desired_pwr - estimated_pwr; + if (pwr_adjust == 0) + goto no_adjustment_needed; + + /* RF attenuation delta. */ + rfatt_delta = ((pwr_adjust + 7) / 8); + /* Lower attenuation => Bigger power output. Negate it. */ + rfatt_delta = -rfatt_delta; + + /* Baseband attenuation delta. */ + bbatt_delta = pwr_adjust / 2; + /* Lower attenuation => Bigger power output. Negate it. */ + bbatt_delta = -bbatt_delta; + /* RF att affects power level 4 times as much as + * Baseband attennuation. Subtract it. */ + bbatt_delta -= 4 * rfatt_delta; + +#if B43_DEBUG + if (b43_debug(dev, B43_DBG_XMITPOWER)) { + int dbm = pwr_adjust < 0 ? -pwr_adjust : pwr_adjust; + b43dbg(dev->wl, + "[TX power deltas] %s" Q52_FMT " dBm => " + "bbatt-delta = %d, rfatt-delta = %d\n", + (pwr_adjust < 0 ? "-" : ""), Q52_ARG(dbm), + bbatt_delta, rfatt_delta); + } +#endif /* DEBUG */ + + /* So do we finally need to adjust something in hardware? */ + if ((rfatt_delta == 0) && (bbatt_delta == 0)) + goto no_adjustment_needed; + + /* Save the deltas for later when we adjust the power. */ + gphy->bbatt_delta = bbatt_delta; + gphy->rfatt_delta = rfatt_delta; + + /* We need to adjust the TX power on the device. */ + return B43_TXPWR_RES_NEED_ADJUST; + +no_adjustment_needed: + return B43_TXPWR_RES_DONE; +} + +static void b43_gphy_op_pwork_15sec(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_g *gphy = phy->g; + + b43_mac_suspend(dev); + //TODO: update_aci_moving_average + if (gphy->aci_enable && gphy->aci_wlan_automatic) { + if (!gphy->aci_enable && 1 /*TODO: not scanning? */ ) { + if (0 /*TODO: bunch of conditions */ ) { + phy->ops->interf_mitigation(dev, + B43_INTERFMODE_MANUALWLAN); + } + } else if (0 /*TODO*/) { + if (/*(aci_average > 1000) &&*/ !b43_gphy_aci_scan(dev)) + phy->ops->interf_mitigation(dev, B43_INTERFMODE_NONE); + } + } else if (gphy->interfmode == B43_INTERFMODE_NONWLAN && + phy->rev == 1) { + //TODO: implement rev1 workaround + } + b43_lo_g_maintanance_work(dev); + b43_mac_enable(dev); +} + +static void b43_gphy_op_pwork_60sec(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + + if (!(dev->dev->bus->sprom.boardflags_lo & B43_BFL_RSSI)) + return; + + b43_mac_suspend(dev); + b43_calc_nrssi_slope(dev); + if ((phy->radio_ver == 0x2050) && (phy->radio_rev == 8)) { + u8 old_chan = phy->channel; + + /* VCO Calibration */ + if (old_chan >= 8) + b43_switch_channel(dev, 1); + else + b43_switch_channel(dev, 13); + b43_switch_channel(dev, old_chan); + } + b43_mac_enable(dev); +} + +const struct b43_phy_operations b43_phyops_g = { + .allocate = b43_gphy_op_allocate, + .free = b43_gphy_op_free, + .prepare_structs = b43_gphy_op_prepare_structs, + .prepare_hardware = b43_gphy_op_prepare_hardware, + .init = b43_gphy_op_init, + .exit = b43_gphy_op_exit, + .phy_read = b43_gphy_op_read, + .phy_write = b43_gphy_op_write, + .radio_read = b43_gphy_op_radio_read, + .radio_write = b43_gphy_op_radio_write, + .supports_hwpctl = b43_gphy_op_supports_hwpctl, + .software_rfkill = b43_gphy_op_software_rfkill, + .switch_analog = b43_phyop_switch_analog_generic, + .switch_channel = b43_gphy_op_switch_channel, + .get_default_chan = b43_gphy_op_get_default_chan, + .set_rx_antenna = b43_gphy_op_set_rx_antenna, + .interf_mitigation = b43_gphy_op_interf_mitigation, + .recalc_txpower = b43_gphy_op_recalc_txpower, + .adjust_txpower = b43_gphy_op_adjust_txpower, + .pwork_15sec = b43_gphy_op_pwork_15sec, + .pwork_60sec = b43_gphy_op_pwork_60sec, +}; diff --git a/drivers/net/wireless/b43/phy_g.h b/drivers/net/wireless/b43/phy_g.h new file mode 100644 index 0000000..8569fdd --- /dev/null +++ b/drivers/net/wireless/b43/phy_g.h @@ -0,0 +1,208 @@ +#ifndef LINUX_B43_PHY_G_H_ +#define LINUX_B43_PHY_G_H_ + +/* OFDM PHY registers are defined in the A-PHY header. */ +#include "phy_a.h" + +/* CCK (B) PHY Registers */ +#define B43_PHY_VERSION_CCK B43_PHY_CCK(0x00) /* Versioning register for B-PHY */ +#define B43_PHY_CCKBBANDCFG B43_PHY_CCK(0x01) /* Contains antenna 0/1 control bit */ +#define B43_PHY_PGACTL B43_PHY_CCK(0x15) /* PGA control */ +#define B43_PHY_PGACTL_LPF 0x1000 /* Low pass filter (?) */ +#define B43_PHY_PGACTL_LOWBANDW 0x0040 /* Low bandwidth flag */ +#define B43_PHY_PGACTL_UNKNOWN 0xEFA0 +#define B43_PHY_FBCTL1 B43_PHY_CCK(0x18) /* Frequency bandwidth control 1 */ +#define B43_PHY_ITSSI B43_PHY_CCK(0x29) /* Idle TSSI */ +#define B43_PHY_LO_LEAKAGE B43_PHY_CCK(0x2D) /* Measured LO leakage */ +#define B43_PHY_ENERGY B43_PHY_CCK(0x33) /* Energy */ +#define B43_PHY_SYNCCTL B43_PHY_CCK(0x35) +#define B43_PHY_FBCTL2 B43_PHY_CCK(0x38) /* Frequency bandwidth control 2 */ +#define B43_PHY_DACCTL B43_PHY_CCK(0x60) /* DAC control */ +#define B43_PHY_RCCALOVER B43_PHY_CCK(0x78) /* RC calibration override */ + +/* Extended G-PHY Registers */ +#define B43_PHY_CLASSCTL B43_PHY_EXTG(0x02) /* Classify control */ +#define B43_PHY_GTABCTL B43_PHY_EXTG(0x03) /* G-PHY table control (see below) */ +#define B43_PHY_GTABOFF 0x03FF /* G-PHY table offset (see below) */ +#define B43_PHY_GTABNR 0xFC00 /* G-PHY table number (see below) */ +#define B43_PHY_GTABNR_SHIFT 10 +#define B43_PHY_GTABDATA B43_PHY_EXTG(0x04) /* G-PHY table data */ +#define B43_PHY_LO_MASK B43_PHY_EXTG(0x0F) /* Local Oscillator control mask */ +#define B43_PHY_LO_CTL B43_PHY_EXTG(0x10) /* Local Oscillator control */ +#define B43_PHY_RFOVER B43_PHY_EXTG(0x11) /* RF override */ +#define B43_PHY_RFOVERVAL B43_PHY_EXTG(0x12) /* RF override value */ +#define B43_PHY_RFOVERVAL_EXTLNA 0x8000 +#define B43_PHY_RFOVERVAL_LNA 0x7000 +#define B43_PHY_RFOVERVAL_LNA_SHIFT 12 +#define B43_PHY_RFOVERVAL_PGA 0x0F00 +#define B43_PHY_RFOVERVAL_PGA_SHIFT 8 +#define B43_PHY_RFOVERVAL_UNK 0x0010 /* Unknown, always set. */ +#define B43_PHY_RFOVERVAL_TRSWRX 0x00E0 +#define B43_PHY_RFOVERVAL_BW 0x0003 /* Bandwidth flags */ +#define B43_PHY_RFOVERVAL_BW_LPF 0x0001 /* Low Pass Filter */ +#define B43_PHY_RFOVERVAL_BW_LBW 0x0002 /* Low Bandwidth (when set), high when unset */ +#define B43_PHY_ANALOGOVER B43_PHY_EXTG(0x14) /* Analog override */ +#define B43_PHY_ANALOGOVERVAL B43_PHY_EXTG(0x15) /* Analog override value */ + + +/*** G-PHY table numbers */ +#define B43_GTAB(number, offset) (((number) << B43_PHY_GTABNR_SHIFT) | (offset)) +#define B43_GTAB_NRSSI B43_GTAB(0x00, 0) +#define B43_GTAB_TRFEMW B43_GTAB(0x0C, 0x120) +#define B43_GTAB_ORIGTR B43_GTAB(0x2E, 0x298) + +u16 b43_gtab_read(struct b43_wldev *dev, u16 table, u16 offset); +void b43_gtab_write(struct b43_wldev *dev, u16 table, u16 offset, u16 value); + + +/* Returns the boolean whether "TX Magnification" is enabled. */ +#define has_tx_magnification(phy) \ + (((phy)->rev >= 2) && \ + ((phy)->radio_ver == 0x2050) && \ + ((phy)->radio_rev == 8)) +/* Card uses the loopback gain stuff */ +#define has_loopback_gain(phy) \ + (((phy)->rev > 1) || ((phy)->gmode)) + +/* Radio Attenuation (RF Attenuation) */ +struct b43_rfatt { + u8 att; /* Attenuation value */ + bool with_padmix; /* Flag, PAD Mixer enabled. */ +}; +struct b43_rfatt_list { + /* Attenuation values list */ + const struct b43_rfatt *list; + u8 len; + /* Minimum/Maximum attenuation values */ + u8 min_val; + u8 max_val; +}; + +/* Returns true, if the values are the same. */ +static inline bool b43_compare_rfatt(const struct b43_rfatt *a, + const struct b43_rfatt *b) +{ + return ((a->att == b->att) && + (a->with_padmix == b->with_padmix)); +} + +/* Baseband Attenuation */ +struct b43_bbatt { + u8 att; /* Attenuation value */ +}; +struct b43_bbatt_list { + /* Attenuation values list */ + const struct b43_bbatt *list; + u8 len; + /* Minimum/Maximum attenuation values */ + u8 min_val; + u8 max_val; +}; + +/* Returns true, if the values are the same. */ +static inline bool b43_compare_bbatt(const struct b43_bbatt *a, + const struct b43_bbatt *b) +{ + return (a->att == b->att); +} + +/* tx_control bits. */ +#define B43_TXCTL_PA3DB 0x40 /* PA Gain 3dB */ +#define B43_TXCTL_PA2DB 0x20 /* PA Gain 2dB */ +#define B43_TXCTL_TXMIX 0x10 /* TX Mixer Gain */ + +struct b43_txpower_lo_control; + +struct b43_phy_g { + /* ACI (adjacent channel interference) flags. */ + bool aci_enable; + bool aci_wlan_automatic; + bool aci_hw_rssi; + + /* Radio switched on/off */ + bool radio_on; + struct { + /* Values saved when turning the radio off. + * They are needed when turning it on again. */ + bool valid; + u16 rfover; + u16 rfoverval; + } radio_off_context; + + u16 minlowsig[2]; + u16 minlowsigpos[2]; + + /* Pointer to the table used to convert a + * TSSI value to dBm-Q5.2 */ + const s8 *tssi2dbm; + /* tssi2dbm is kmalloc()ed. Only used for free()ing. */ + bool dyn_tssi_tbl; + /* Target idle TSSI */ + int tgt_idle_tssi; + /* Current idle TSSI */ + int cur_idle_tssi; + /* The current average TSSI. */ + u8 average_tssi; + /* Current TX power level attenuation control values */ + struct b43_bbatt bbatt; + struct b43_rfatt rfatt; + u8 tx_control; /* B43_TXCTL_XXX */ + /* The calculated attenuation deltas that are used later + * when adjusting the actual power output. */ + int bbatt_delta; + int rfatt_delta; + + /* LocalOscillator control values. */ + struct b43_txpower_lo_control *lo_control; + /* Values from b43_calc_loopback_gain() */ + s16 max_lb_gain; /* Maximum Loopback gain in hdB */ + s16 trsw_rx_gain; /* TRSW RX gain in hdB */ + s16 lna_lod_gain; /* LNA lod */ + s16 lna_gain; /* LNA */ + s16 pga_gain; /* PGA */ + + /* Current Interference Mitigation mode */ + int interfmode; + /* Stack of saved values from the Interference Mitigation code. + * Each value in the stack is layed out as follows: + * bit 0-11: offset + * bit 12-15: register ID + * bit 16-32: value + * register ID is: 0x1 PHY, 0x2 Radio, 0x3 ILT + */ +#define B43_INTERFSTACK_SIZE 26 + u32 interfstack[B43_INTERFSTACK_SIZE]; //FIXME: use a data structure + + /* Saved values from the NRSSI Slope calculation */ + s16 nrssi[2]; + s32 nrssislope; + /* In memory nrssi lookup table. */ + s8 nrssi_lt[64]; + + u16 lofcal; + + u16 initval; //FIXME rename? + + /* The device does address auto increment for the OFDM tables. + * We cache the previously used address here and omit the address + * write on the next table access, if possible. */ + u16 ofdmtab_addr; /* The address currently set in hardware. */ + enum { /* The last data flow direction. */ + B43_OFDMTAB_DIRECTION_UNKNOWN = 0, + B43_OFDMTAB_DIRECTION_READ, + B43_OFDMTAB_DIRECTION_WRITE, + } ofdmtab_addr_direction; +}; + +void b43_gphy_set_baseband_attenuation(struct b43_wldev *dev, + u16 baseband_attenuation); +void b43_gphy_channel_switch(struct b43_wldev *dev, + unsigned int channel, + bool synthetic_pu_workaround); +u8 * b43_generate_dyn_tssi2dbm_tab(struct b43_wldev *dev, + s16 pab0, s16 pab1, s16 pab2); + +struct b43_phy_operations; +extern const struct b43_phy_operations b43_phyops_g; + +#endif /* LINUX_B43_PHY_G_H_ */ diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c new file mode 100644 index 0000000..185219e --- /dev/null +++ b/drivers/net/wireless/b43/phy_lp.c @@ -0,0 +1,2733 @@ +/* + + Broadcom B43 wireless driver + IEEE 802.11a/g LP-PHY driver + + Copyright (c) 2008-2009 Michael Buesch + Copyright (c) 2009 Gábor Stefanik + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "b43.h" +#include "main.h" +#include "phy_lp.h" +#include "phy_common.h" +#include "tables_lpphy.h" + + +static inline u16 channel2freq_lp(u8 channel) +{ + if (channel < 14) + return (2407 + 5 * channel); + else if (channel == 14) + return 2484; + else if (channel < 184) + return (5000 + 5 * channel); + else + return (4000 + 5 * channel); +} + +static unsigned int b43_lpphy_op_get_default_chan(struct b43_wldev *dev) +{ + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + return 1; + return 36; +} + +static int b43_lpphy_op_allocate(struct b43_wldev *dev) +{ + struct b43_phy_lp *lpphy; + + lpphy = kzalloc(sizeof(*lpphy), GFP_KERNEL); + if (!lpphy) + return -ENOMEM; + dev->phy.lp = lpphy; + + return 0; +} + +static void b43_lpphy_op_prepare_structs(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_lp *lpphy = phy->lp; + + memset(lpphy, 0, sizeof(*lpphy)); + lpphy->antenna = B43_ANTENNA_DEFAULT; + + //TODO +} + +static void b43_lpphy_op_free(struct b43_wldev *dev) +{ + struct b43_phy_lp *lpphy = dev->phy.lp; + + kfree(lpphy); + dev->phy.lp = NULL; +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/LP/ReadBandSrom */ +static void lpphy_read_band_sprom(struct b43_wldev *dev) +{ + struct b43_phy_lp *lpphy = dev->phy.lp; + struct ssb_bus *bus = dev->dev->bus; + u16 cckpo, maxpwr; + u32 ofdmpo; + int i; + + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + lpphy->tx_isolation_med_band = bus->sprom.tri2g; + lpphy->bx_arch = bus->sprom.bxa2g; + lpphy->rx_pwr_offset = bus->sprom.rxpo2g; + lpphy->rssi_vf = bus->sprom.rssismf2g; + lpphy->rssi_vc = bus->sprom.rssismc2g; + lpphy->rssi_gs = bus->sprom.rssisav2g; + lpphy->txpa[0] = bus->sprom.pa0b0; + lpphy->txpa[1] = bus->sprom.pa0b1; + lpphy->txpa[2] = bus->sprom.pa0b2; + maxpwr = bus->sprom.maxpwr_bg; + lpphy->max_tx_pwr_med_band = maxpwr; + cckpo = bus->sprom.cck2gpo; + /* + * We don't read SPROM's opo as specs say. On rev8 SPROMs + * opo == ofdm2gpo and we don't know any SSB with LP-PHY + * and SPROM rev below 8. + */ + B43_WARN_ON(bus->sprom.revision < 8); + ofdmpo = bus->sprom.ofdm2gpo; + if (cckpo) { + for (i = 0; i < 4; i++) { + lpphy->tx_max_rate[i] = + maxpwr - (ofdmpo & 0xF) * 2; + ofdmpo >>= 4; + } + ofdmpo = bus->sprom.ofdm2gpo; + for (i = 4; i < 15; i++) { + lpphy->tx_max_rate[i] = + maxpwr - (ofdmpo & 0xF) * 2; + ofdmpo >>= 4; + } + } else { + ofdmpo &= 0xFF; + for (i = 0; i < 4; i++) + lpphy->tx_max_rate[i] = maxpwr; + for (i = 4; i < 15; i++) + lpphy->tx_max_rate[i] = maxpwr - ofdmpo; + } + } else { /* 5GHz */ + lpphy->tx_isolation_low_band = bus->sprom.tri5gl; + lpphy->tx_isolation_med_band = bus->sprom.tri5g; + lpphy->tx_isolation_hi_band = bus->sprom.tri5gh; + lpphy->bx_arch = bus->sprom.bxa5g; + lpphy->rx_pwr_offset = bus->sprom.rxpo5g; + lpphy->rssi_vf = bus->sprom.rssismf5g; + lpphy->rssi_vc = bus->sprom.rssismc5g; + lpphy->rssi_gs = bus->sprom.rssisav5g; + lpphy->txpa[0] = bus->sprom.pa1b0; + lpphy->txpa[1] = bus->sprom.pa1b1; + lpphy->txpa[2] = bus->sprom.pa1b2; + lpphy->txpal[0] = bus->sprom.pa1lob0; + lpphy->txpal[1] = bus->sprom.pa1lob1; + lpphy->txpal[2] = bus->sprom.pa1lob2; + lpphy->txpah[0] = bus->sprom.pa1hib0; + lpphy->txpah[1] = bus->sprom.pa1hib1; + lpphy->txpah[2] = bus->sprom.pa1hib2; + maxpwr = bus->sprom.maxpwr_al; + ofdmpo = bus->sprom.ofdm5glpo; + lpphy->max_tx_pwr_low_band = maxpwr; + for (i = 4; i < 12; i++) { + lpphy->tx_max_ratel[i] = maxpwr - (ofdmpo & 0xF) * 2; + ofdmpo >>= 4; + } + maxpwr = bus->sprom.maxpwr_a; + ofdmpo = bus->sprom.ofdm5gpo; + lpphy->max_tx_pwr_med_band = maxpwr; + for (i = 4; i < 12; i++) { + lpphy->tx_max_rate[i] = maxpwr - (ofdmpo & 0xF) * 2; + ofdmpo >>= 4; + } + maxpwr = bus->sprom.maxpwr_ah; + ofdmpo = bus->sprom.ofdm5ghpo; + lpphy->max_tx_pwr_hi_band = maxpwr; + for (i = 4; i < 12; i++) { + lpphy->tx_max_rateh[i] = maxpwr - (ofdmpo & 0xF) * 2; + ofdmpo >>= 4; + } + } +} + +static void lpphy_adjust_gain_table(struct b43_wldev *dev, u32 freq) +{ + struct b43_phy_lp *lpphy = dev->phy.lp; + u16 temp[3]; + u16 isolation; + + B43_WARN_ON(dev->phy.rev >= 2); + + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + isolation = lpphy->tx_isolation_med_band; + else if (freq <= 5320) + isolation = lpphy->tx_isolation_low_band; + else if (freq <= 5700) + isolation = lpphy->tx_isolation_med_band; + else + isolation = lpphy->tx_isolation_hi_band; + + temp[0] = ((isolation - 26) / 12) << 12; + temp[1] = temp[0] + 0x1000; + temp[2] = temp[0] + 0x2000; + + b43_lptab_write_bulk(dev, B43_LPTAB16(13, 0), 3, temp); + b43_lptab_write_bulk(dev, B43_LPTAB16(12, 0), 3, temp); +} + +static void lpphy_table_init(struct b43_wldev *dev) +{ + u32 freq = channel2freq_lp(b43_lpphy_op_get_default_chan(dev)); + + if (dev->phy.rev < 2) + lpphy_rev0_1_table_init(dev); + else + lpphy_rev2plus_table_init(dev); + + lpphy_init_tx_gain_table(dev); + + if (dev->phy.rev < 2) + lpphy_adjust_gain_table(dev, freq); +} + +static void lpphy_baseband_rev0_1_init(struct b43_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + struct b43_phy_lp *lpphy = dev->phy.lp; + u16 tmp, tmp2; + + b43_phy_mask(dev, B43_LPPHY_AFE_DAC_CTL, 0xF7FF); + b43_phy_write(dev, B43_LPPHY_AFE_CTL, 0); + b43_phy_write(dev, B43_LPPHY_AFE_CTL_OVR, 0); + b43_phy_write(dev, B43_LPPHY_RF_OVERRIDE_0, 0); + b43_phy_write(dev, B43_LPPHY_RF_OVERRIDE_2, 0); + b43_phy_set(dev, B43_LPPHY_AFE_DAC_CTL, 0x0004); + b43_phy_maskset(dev, B43_LPPHY_OFDMSYNCTHRESH0, 0xFF00, 0x0078); + b43_phy_maskset(dev, B43_LPPHY_CLIPCTRTHRESH, 0x83FF, 0x5800); + b43_phy_write(dev, B43_LPPHY_ADC_COMPENSATION_CTL, 0x0016); + b43_phy_maskset(dev, B43_LPPHY_AFE_ADC_CTL_0, 0xFFF8, 0x0004); + b43_phy_maskset(dev, B43_LPPHY_VERYLOWGAINDB, 0x00FF, 0x5400); + b43_phy_maskset(dev, B43_LPPHY_HIGAINDB, 0x00FF, 0x2400); + b43_phy_maskset(dev, B43_LPPHY_LOWGAINDB, 0x00FF, 0x2100); + b43_phy_maskset(dev, B43_LPPHY_VERYLOWGAINDB, 0xFF00, 0x0006); + b43_phy_mask(dev, B43_LPPHY_RX_RADIO_CTL, 0xFFFE); + b43_phy_maskset(dev, B43_LPPHY_CLIPCTRTHRESH, 0xFFE0, 0x0005); + b43_phy_maskset(dev, B43_LPPHY_CLIPCTRTHRESH, 0xFC1F, 0x0180); + b43_phy_maskset(dev, B43_LPPHY_CLIPCTRTHRESH, 0x83FF, 0x3C00); + b43_phy_maskset(dev, B43_LPPHY_GAINDIRECTMISMATCH, 0xFFF0, 0x0005); + b43_phy_maskset(dev, B43_LPPHY_GAIN_MISMATCH_LIMIT, 0xFFC0, 0x001A); + b43_phy_maskset(dev, B43_LPPHY_CRS_ED_THRESH, 0xFF00, 0x00B3); + b43_phy_maskset(dev, B43_LPPHY_CRS_ED_THRESH, 0x00FF, 0xAD00); + b43_phy_maskset(dev, B43_LPPHY_INPUT_PWRDB, + 0xFF00, lpphy->rx_pwr_offset); + if ((bus->sprom.boardflags_lo & B43_BFL_FEM) && + ((b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) || + (bus->sprom.boardflags_hi & B43_BFH_PAREF))) { + ssb_pmu_set_ldo_voltage(&bus->chipco, LDO_PAREF, 0x28); + ssb_pmu_set_ldo_paref(&bus->chipco, true); + if (dev->phy.rev == 0) { + b43_phy_maskset(dev, B43_LPPHY_LP_RF_SIGNAL_LUT, + 0xFFCF, 0x0010); + } + b43_lptab_write(dev, B43_LPTAB16(11, 7), 60); + } else { + ssb_pmu_set_ldo_paref(&bus->chipco, false); + b43_phy_maskset(dev, B43_LPPHY_LP_RF_SIGNAL_LUT, + 0xFFCF, 0x0020); + b43_lptab_write(dev, B43_LPTAB16(11, 7), 100); + } + tmp = lpphy->rssi_vf | lpphy->rssi_vc << 4 | 0xA000; + b43_phy_write(dev, B43_LPPHY_AFE_RSSI_CTL_0, tmp); + if (bus->sprom.boardflags_hi & B43_BFH_RSSIINV) + b43_phy_maskset(dev, B43_LPPHY_AFE_RSSI_CTL_1, 0xF000, 0x0AAA); + else + b43_phy_maskset(dev, B43_LPPHY_AFE_RSSI_CTL_1, 0xF000, 0x02AA); + b43_lptab_write(dev, B43_LPTAB16(11, 1), 24); + b43_phy_maskset(dev, B43_LPPHY_RX_RADIO_CTL, + 0xFFF9, (lpphy->bx_arch << 1)); + if (dev->phy.rev == 1 && + (bus->sprom.boardflags_hi & B43_BFH_FEM_BT)) { + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xFFC0, 0x000A); + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0x3F00, 0x0900); + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_2, 0xFFC0, 0x000A); + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_2, 0xC0FF, 0x0B00); + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_3, 0xFFC0, 0x000A); + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_3, 0xC0FF, 0x0400); + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_4, 0xFFC0, 0x000A); + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_4, 0xC0FF, 0x0B00); + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_5, 0xFFC0, 0x000A); + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_5, 0xC0FF, 0x0900); + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_6, 0xFFC0, 0x000A); + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_6, 0xC0FF, 0x0B00); + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_7, 0xFFC0, 0x000A); + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_7, 0xC0FF, 0x0900); + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_8, 0xFFC0, 0x000A); + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_8, 0xC0FF, 0x0B00); + } else if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ || + (bus->boardinfo.type == 0x048A) || ((dev->phy.rev == 0) && + (bus->sprom.boardflags_lo & B43_BFL_FEM))) { + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xFFC0, 0x0001); + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xC0FF, 0x0400); + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_2, 0xFFC0, 0x0001); + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_2, 0xC0FF, 0x0500); + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_3, 0xFFC0, 0x0002); + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_3, 0xC0FF, 0x0800); + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_4, 0xFFC0, 0x0002); + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_4, 0xC0FF, 0x0A00); + } else if (dev->phy.rev == 1 || + (bus->sprom.boardflags_lo & B43_BFL_FEM)) { + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xFFC0, 0x0004); + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xC0FF, 0x0800); + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_2, 0xFFC0, 0x0004); + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_2, 0xC0FF, 0x0C00); + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_3, 0xFFC0, 0x0002); + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_3, 0xC0FF, 0x0100); + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_4, 0xFFC0, 0x0002); + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_4, 0xC0FF, 0x0300); + } else { + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xFFC0, 0x000A); + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xC0FF, 0x0900); + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_2, 0xFFC0, 0x000A); + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_2, 0xC0FF, 0x0B00); + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_3, 0xFFC0, 0x0006); + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_3, 0xC0FF, 0x0500); + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_4, 0xFFC0, 0x0006); + b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_4, 0xC0FF, 0x0700); + } + if (dev->phy.rev == 1 && (bus->sprom.boardflags_hi & B43_BFH_PAREF)) { + b43_phy_copy(dev, B43_LPPHY_TR_LOOKUP_5, B43_LPPHY_TR_LOOKUP_1); + b43_phy_copy(dev, B43_LPPHY_TR_LOOKUP_6, B43_LPPHY_TR_LOOKUP_2); + b43_phy_copy(dev, B43_LPPHY_TR_LOOKUP_7, B43_LPPHY_TR_LOOKUP_3); + b43_phy_copy(dev, B43_LPPHY_TR_LOOKUP_8, B43_LPPHY_TR_LOOKUP_4); + } + if ((bus->sprom.boardflags_hi & B43_BFH_FEM_BT) && + (bus->chip_id == 0x5354) && + (bus->chip_package == SSB_CHIPPACK_BCM4712S)) { + b43_phy_set(dev, B43_LPPHY_CRSGAIN_CTL, 0x0006); + b43_phy_write(dev, B43_LPPHY_GPIO_SELECT, 0x0005); + b43_phy_write(dev, B43_LPPHY_GPIO_OUTEN, 0xFFFF); + //FIXME the Broadcom driver caches & delays this HF write! + b43_hf_write(dev, b43_hf_read(dev) | B43_HF_PR45960W); + } + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + b43_phy_set(dev, B43_LPPHY_LP_PHY_CTL, 0x8000); + b43_phy_set(dev, B43_LPPHY_CRSGAIN_CTL, 0x0040); + b43_phy_maskset(dev, B43_LPPHY_MINPWR_LEVEL, 0x00FF, 0xA400); + b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xF0FF, 0x0B00); + b43_phy_maskset(dev, B43_LPPHY_SYNCPEAKCNT, 0xFFF8, 0x0007); + b43_phy_maskset(dev, B43_LPPHY_DSSS_CONFIRM_CNT, 0xFFF8, 0x0003); + b43_phy_maskset(dev, B43_LPPHY_DSSS_CONFIRM_CNT, 0xFFC7, 0x0020); + b43_phy_mask(dev, B43_LPPHY_IDLEAFTERPKTRXTO, 0x00FF); + } else { /* 5GHz */ + b43_phy_mask(dev, B43_LPPHY_LP_PHY_CTL, 0x7FFF); + b43_phy_mask(dev, B43_LPPHY_CRSGAIN_CTL, 0xFFBF); + } + if (dev->phy.rev == 1) { + tmp = b43_phy_read(dev, B43_LPPHY_CLIPCTRTHRESH); + tmp2 = (tmp & 0x03E0) >> 5; + tmp2 |= tmp2 << 5; + b43_phy_write(dev, B43_LPPHY_4C3, tmp2); + tmp = b43_phy_read(dev, B43_LPPHY_GAINDIRECTMISMATCH); + tmp2 = (tmp & 0x1F00) >> 8; + tmp2 |= tmp2 << 5; + b43_phy_write(dev, B43_LPPHY_4C4, tmp2); + tmp = b43_phy_read(dev, B43_LPPHY_VERYLOWGAINDB); + tmp2 = tmp & 0x00FF; + tmp2 |= tmp << 8; + b43_phy_write(dev, B43_LPPHY_4C5, tmp2); + } +} + +static void lpphy_save_dig_flt_state(struct b43_wldev *dev) +{ + static const u16 addr[] = { + B43_PHY_OFDM(0xC1), + B43_PHY_OFDM(0xC2), + B43_PHY_OFDM(0xC3), + B43_PHY_OFDM(0xC4), + B43_PHY_OFDM(0xC5), + B43_PHY_OFDM(0xC6), + B43_PHY_OFDM(0xC7), + B43_PHY_OFDM(0xC8), + B43_PHY_OFDM(0xCF), + }; + + static const u16 coefs[] = { + 0xDE5E, 0xE832, 0xE331, 0x4D26, + 0x0026, 0x1420, 0x0020, 0xFE08, + 0x0008, + }; + + struct b43_phy_lp *lpphy = dev->phy.lp; + int i; + + for (i = 0; i < ARRAY_SIZE(addr); i++) { + lpphy->dig_flt_state[i] = b43_phy_read(dev, addr[i]); + b43_phy_write(dev, addr[i], coefs[i]); + } +} + +static void lpphy_restore_dig_flt_state(struct b43_wldev *dev) +{ + static const u16 addr[] = { + B43_PHY_OFDM(0xC1), + B43_PHY_OFDM(0xC2), + B43_PHY_OFDM(0xC3), + B43_PHY_OFDM(0xC4), + B43_PHY_OFDM(0xC5), + B43_PHY_OFDM(0xC6), + B43_PHY_OFDM(0xC7), + B43_PHY_OFDM(0xC8), + B43_PHY_OFDM(0xCF), + }; + + struct b43_phy_lp *lpphy = dev->phy.lp; + int i; + + for (i = 0; i < ARRAY_SIZE(addr); i++) + b43_phy_write(dev, addr[i], lpphy->dig_flt_state[i]); +} + +static void lpphy_baseband_rev2plus_init(struct b43_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + struct b43_phy_lp *lpphy = dev->phy.lp; + + b43_phy_write(dev, B43_LPPHY_AFE_DAC_CTL, 0x50); + b43_phy_write(dev, B43_LPPHY_AFE_CTL, 0x8800); + b43_phy_write(dev, B43_LPPHY_AFE_CTL_OVR, 0); + b43_phy_write(dev, B43_LPPHY_AFE_CTL_OVRVAL, 0); + b43_phy_write(dev, B43_LPPHY_RF_OVERRIDE_0, 0); + b43_phy_write(dev, B43_LPPHY_RF_OVERRIDE_2, 0); + b43_phy_write(dev, B43_PHY_OFDM(0xF9), 0); + b43_phy_write(dev, B43_LPPHY_TR_LOOKUP_1, 0); + b43_phy_set(dev, B43_LPPHY_ADC_COMPENSATION_CTL, 0x10); + b43_phy_maskset(dev, B43_LPPHY_OFDMSYNCTHRESH0, 0xFF00, 0xB4); + b43_phy_maskset(dev, B43_LPPHY_DCOFFSETTRANSIENT, 0xF8FF, 0x200); + b43_phy_maskset(dev, B43_LPPHY_DCOFFSETTRANSIENT, 0xFF00, 0x7F); + b43_phy_maskset(dev, B43_LPPHY_GAINDIRECTMISMATCH, 0xFF0F, 0x40); + b43_phy_maskset(dev, B43_LPPHY_PREAMBLECONFIRMTO, 0xFF00, 0x2); + b43_phy_mask(dev, B43_LPPHY_CRSGAIN_CTL, ~0x4000); + b43_phy_mask(dev, B43_LPPHY_CRSGAIN_CTL, ~0x2000); + b43_phy_set(dev, B43_PHY_OFDM(0x10A), 0x1); + if (bus->boardinfo.rev >= 0x18) { + b43_lptab_write(dev, B43_LPTAB32(17, 65), 0xEC); + b43_phy_maskset(dev, B43_PHY_OFDM(0x10A), 0xFF01, 0x14); + } else { + b43_phy_maskset(dev, B43_PHY_OFDM(0x10A), 0xFF01, 0x10); + } + b43_phy_maskset(dev, B43_PHY_OFDM(0xDF), 0xFF00, 0xF4); + b43_phy_maskset(dev, B43_PHY_OFDM(0xDF), 0x00FF, 0xF100); + b43_phy_write(dev, B43_LPPHY_CLIPTHRESH, 0x48); + b43_phy_maskset(dev, B43_LPPHY_HIGAINDB, 0xFF00, 0x46); + b43_phy_maskset(dev, B43_PHY_OFDM(0xE4), 0xFF00, 0x10); + b43_phy_maskset(dev, B43_LPPHY_PWR_THRESH1, 0xFFF0, 0x9); + b43_phy_mask(dev, B43_LPPHY_GAINDIRECTMISMATCH, ~0xF); + b43_phy_maskset(dev, B43_LPPHY_VERYLOWGAINDB, 0x00FF, 0x5500); + b43_phy_maskset(dev, B43_LPPHY_CLIPCTRTHRESH, 0xFC1F, 0xA0); + b43_phy_maskset(dev, B43_LPPHY_GAINDIRECTMISMATCH, 0xE0FF, 0x300); + b43_phy_maskset(dev, B43_LPPHY_HIGAINDB, 0x00FF, 0x2A00); + if ((bus->chip_id == 0x4325) && (bus->chip_rev == 0)) { + b43_phy_maskset(dev, B43_LPPHY_LOWGAINDB, 0x00FF, 0x2100); + b43_phy_maskset(dev, B43_LPPHY_VERYLOWGAINDB, 0xFF00, 0xA); + } else { + b43_phy_maskset(dev, B43_LPPHY_LOWGAINDB, 0x00FF, 0x1E00); + b43_phy_maskset(dev, B43_LPPHY_VERYLOWGAINDB, 0xFF00, 0xD); + } + b43_phy_maskset(dev, B43_PHY_OFDM(0xFE), 0xFFE0, 0x1F); + b43_phy_maskset(dev, B43_PHY_OFDM(0xFF), 0xFFE0, 0xC); + b43_phy_maskset(dev, B43_PHY_OFDM(0x100), 0xFF00, 0x19); + b43_phy_maskset(dev, B43_PHY_OFDM(0xFF), 0x03FF, 0x3C00); + b43_phy_maskset(dev, B43_PHY_OFDM(0xFE), 0xFC1F, 0x3E0); + b43_phy_maskset(dev, B43_PHY_OFDM(0xFF), 0xFFE0, 0xC); + b43_phy_maskset(dev, B43_PHY_OFDM(0x100), 0x00FF, 0x1900); + b43_phy_maskset(dev, B43_LPPHY_CLIPCTRTHRESH, 0x83FF, 0x5800); + b43_phy_maskset(dev, B43_LPPHY_CLIPCTRTHRESH, 0xFFE0, 0x12); + b43_phy_maskset(dev, B43_LPPHY_GAINMISMATCH, 0x0FFF, 0x9000); + + if ((bus->chip_id == 0x4325) && (bus->chip_rev == 0)) { + b43_lptab_write(dev, B43_LPTAB16(0x08, 0x14), 0); + b43_lptab_write(dev, B43_LPTAB16(0x08, 0x12), 0x40); + } + + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + b43_phy_set(dev, B43_LPPHY_CRSGAIN_CTL, 0x40); + b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xF0FF, 0xB00); + b43_phy_maskset(dev, B43_LPPHY_SYNCPEAKCNT, 0xFFF8, 0x6); + b43_phy_maskset(dev, B43_LPPHY_MINPWR_LEVEL, 0x00FF, 0x9D00); + b43_phy_maskset(dev, B43_LPPHY_MINPWR_LEVEL, 0xFF00, 0xA1); + b43_phy_mask(dev, B43_LPPHY_IDLEAFTERPKTRXTO, 0x00FF); + } else /* 5GHz */ + b43_phy_mask(dev, B43_LPPHY_CRSGAIN_CTL, ~0x40); + + b43_phy_maskset(dev, B43_LPPHY_CRS_ED_THRESH, 0xFF00, 0xB3); + b43_phy_maskset(dev, B43_LPPHY_CRS_ED_THRESH, 0x00FF, 0xAD00); + b43_phy_maskset(dev, B43_LPPHY_INPUT_PWRDB, 0xFF00, lpphy->rx_pwr_offset); + b43_phy_set(dev, B43_LPPHY_RESET_CTL, 0x44); + b43_phy_write(dev, B43_LPPHY_RESET_CTL, 0x80); + b43_phy_write(dev, B43_LPPHY_AFE_RSSI_CTL_0, 0xA954); + b43_phy_write(dev, B43_LPPHY_AFE_RSSI_CTL_1, + 0x2000 | ((u16)lpphy->rssi_gs << 10) | + ((u16)lpphy->rssi_vc << 4) | lpphy->rssi_vf); + + if ((bus->chip_id == 0x4325) && (bus->chip_rev == 0)) { + b43_phy_set(dev, B43_LPPHY_AFE_ADC_CTL_0, 0x1C); + b43_phy_maskset(dev, B43_LPPHY_AFE_CTL, 0x00FF, 0x8800); + b43_phy_maskset(dev, B43_LPPHY_AFE_ADC_CTL_1, 0xFC3C, 0x0400); + } + + lpphy_save_dig_flt_state(dev); +} + +static void lpphy_baseband_init(struct b43_wldev *dev) +{ + lpphy_table_init(dev); + if (dev->phy.rev >= 2) + lpphy_baseband_rev2plus_init(dev); + else + lpphy_baseband_rev0_1_init(dev); +} + +struct b2062_freqdata { + u16 freq; + u8 data[6]; +}; + +/* Initialize the 2062 radio. */ +static void lpphy_2062_init(struct b43_wldev *dev) +{ + struct b43_phy_lp *lpphy = dev->phy.lp; + struct ssb_bus *bus = dev->dev->bus; + u32 crystalfreq, tmp, ref; + unsigned int i; + const struct b2062_freqdata *fd = NULL; + + static const struct b2062_freqdata freqdata_tab[] = { + { .freq = 12000, .data[0] = 6, .data[1] = 6, .data[2] = 6, + .data[3] = 6, .data[4] = 10, .data[5] = 6, }, + { .freq = 13000, .data[0] = 4, .data[1] = 4, .data[2] = 4, + .data[3] = 4, .data[4] = 11, .data[5] = 7, }, + { .freq = 14400, .data[0] = 3, .data[1] = 3, .data[2] = 3, + .data[3] = 3, .data[4] = 12, .data[5] = 7, }, + { .freq = 16200, .data[0] = 3, .data[1] = 3, .data[2] = 3, + .data[3] = 3, .data[4] = 13, .data[5] = 8, }, + { .freq = 18000, .data[0] = 2, .data[1] = 2, .data[2] = 2, + .data[3] = 2, .data[4] = 14, .data[5] = 8, }, + { .freq = 19200, .data[0] = 1, .data[1] = 1, .data[2] = 1, + .data[3] = 1, .data[4] = 14, .data[5] = 9, }, + }; + + b2062_upload_init_table(dev); + + b43_radio_write(dev, B2062_N_TX_CTL3, 0); + b43_radio_write(dev, B2062_N_TX_CTL4, 0); + b43_radio_write(dev, B2062_N_TX_CTL5, 0); + b43_radio_write(dev, B2062_N_TX_CTL6, 0); + b43_radio_write(dev, B2062_N_PDN_CTL0, 0x40); + b43_radio_write(dev, B2062_N_PDN_CTL0, 0); + b43_radio_write(dev, B2062_N_CALIB_TS, 0x10); + b43_radio_write(dev, B2062_N_CALIB_TS, 0); + if (dev->phy.rev > 0) { + b43_radio_write(dev, B2062_S_BG_CTL1, + (b43_radio_read(dev, B2062_N_COMM2) >> 1) | 0x80); + } + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + b43_radio_set(dev, B2062_N_TSSI_CTL0, 0x1); + else + b43_radio_mask(dev, B2062_N_TSSI_CTL0, ~0x1); + + /* Get the crystal freq, in Hz. */ + crystalfreq = bus->chipco.pmu.crystalfreq * 1000; + + B43_WARN_ON(!(bus->chipco.capabilities & SSB_CHIPCO_CAP_PMU)); + B43_WARN_ON(crystalfreq == 0); + + if (crystalfreq <= 30000000) { + lpphy->pdiv = 1; + b43_radio_mask(dev, B2062_S_RFPLL_CTL1, 0xFFFB); + } else { + lpphy->pdiv = 2; + b43_radio_set(dev, B2062_S_RFPLL_CTL1, 0x4); + } + + tmp = (((800000000 * lpphy->pdiv + crystalfreq) / + (2 * crystalfreq)) - 8) & 0xFF; + b43_radio_write(dev, B2062_S_RFPLL_CTL7, tmp); + + tmp = (((100 * crystalfreq + 16000000 * lpphy->pdiv) / + (32000000 * lpphy->pdiv)) - 1) & 0xFF; + b43_radio_write(dev, B2062_S_RFPLL_CTL18, tmp); + + tmp = (((2 * crystalfreq + 1000000 * lpphy->pdiv) / + (2000000 * lpphy->pdiv)) - 1) & 0xFF; + b43_radio_write(dev, B2062_S_RFPLL_CTL19, tmp); + + ref = (1000 * lpphy->pdiv + 2 * crystalfreq) / (2000 * lpphy->pdiv); + ref &= 0xFFFF; + for (i = 0; i < ARRAY_SIZE(freqdata_tab); i++) { + if (ref < freqdata_tab[i].freq) { + fd = &freqdata_tab[i]; + break; + } + } + if (!fd) + fd = &freqdata_tab[ARRAY_SIZE(freqdata_tab) - 1]; + b43dbg(dev->wl, "b2062: Using crystal tab entry %u kHz.\n", + fd->freq); /* FIXME: Keep this printk until the code is fully debugged. */ + + b43_radio_write(dev, B2062_S_RFPLL_CTL8, + ((u16)(fd->data[1]) << 4) | fd->data[0]); + b43_radio_write(dev, B2062_S_RFPLL_CTL9, + ((u16)(fd->data[3]) << 4) | fd->data[2]); + b43_radio_write(dev, B2062_S_RFPLL_CTL10, fd->data[4]); + b43_radio_write(dev, B2062_S_RFPLL_CTL11, fd->data[5]); +} + +/* Initialize the 2063 radio. */ +static void lpphy_2063_init(struct b43_wldev *dev) +{ + b2063_upload_init_table(dev); + b43_radio_write(dev, B2063_LOGEN_SP5, 0); + b43_radio_set(dev, B2063_COMM8, 0x38); + b43_radio_write(dev, B2063_REG_SP1, 0x56); + b43_radio_mask(dev, B2063_RX_BB_CTL2, ~0x2); + b43_radio_write(dev, B2063_PA_SP7, 0); + b43_radio_write(dev, B2063_TX_RF_SP6, 0x20); + b43_radio_write(dev, B2063_TX_RF_SP9, 0x40); + if (dev->phy.rev == 2) { + b43_radio_write(dev, B2063_PA_SP3, 0xa0); + b43_radio_write(dev, B2063_PA_SP4, 0xa0); + b43_radio_write(dev, B2063_PA_SP2, 0x18); + } else { + b43_radio_write(dev, B2063_PA_SP3, 0x20); + b43_radio_write(dev, B2063_PA_SP2, 0x20); + } +} + +struct lpphy_stx_table_entry { + u16 phy_offset; + u16 phy_shift; + u16 rf_addr; + u16 rf_shift; + u16 mask; +}; + +static const struct lpphy_stx_table_entry lpphy_stx_table[] = { + { .phy_offset = 2, .phy_shift = 6, .rf_addr = 0x3d, .rf_shift = 3, .mask = 0x01, }, + { .phy_offset = 1, .phy_shift = 12, .rf_addr = 0x4c, .rf_shift = 1, .mask = 0x01, }, + { .phy_offset = 1, .phy_shift = 8, .rf_addr = 0x50, .rf_shift = 0, .mask = 0x7f, }, + { .phy_offset = 0, .phy_shift = 8, .rf_addr = 0x44, .rf_shift = 0, .mask = 0xff, }, + { .phy_offset = 1, .phy_shift = 0, .rf_addr = 0x4a, .rf_shift = 0, .mask = 0xff, }, + { .phy_offset = 0, .phy_shift = 4, .rf_addr = 0x4d, .rf_shift = 0, .mask = 0xff, }, + { .phy_offset = 1, .phy_shift = 4, .rf_addr = 0x4e, .rf_shift = 0, .mask = 0xff, }, + { .phy_offset = 0, .phy_shift = 12, .rf_addr = 0x4f, .rf_shift = 0, .mask = 0x0f, }, + { .phy_offset = 1, .phy_shift = 0, .rf_addr = 0x4f, .rf_shift = 4, .mask = 0x0f, }, + { .phy_offset = 3, .phy_shift = 0, .rf_addr = 0x49, .rf_shift = 0, .mask = 0x0f, }, + { .phy_offset = 4, .phy_shift = 3, .rf_addr = 0x46, .rf_shift = 4, .mask = 0x07, }, + { .phy_offset = 3, .phy_shift = 15, .rf_addr = 0x46, .rf_shift = 0, .mask = 0x01, }, + { .phy_offset = 4, .phy_shift = 0, .rf_addr = 0x46, .rf_shift = 1, .mask = 0x07, }, + { .phy_offset = 3, .phy_shift = 8, .rf_addr = 0x48, .rf_shift = 4, .mask = 0x07, }, + { .phy_offset = 3, .phy_shift = 11, .rf_addr = 0x48, .rf_shift = 0, .mask = 0x0f, }, + { .phy_offset = 3, .phy_shift = 4, .rf_addr = 0x49, .rf_shift = 4, .mask = 0x0f, }, + { .phy_offset = 2, .phy_shift = 15, .rf_addr = 0x45, .rf_shift = 0, .mask = 0x01, }, + { .phy_offset = 5, .phy_shift = 13, .rf_addr = 0x52, .rf_shift = 4, .mask = 0x07, }, + { .phy_offset = 6, .phy_shift = 0, .rf_addr = 0x52, .rf_shift = 7, .mask = 0x01, }, + { .phy_offset = 5, .phy_shift = 3, .rf_addr = 0x41, .rf_shift = 5, .mask = 0x07, }, + { .phy_offset = 5, .phy_shift = 6, .rf_addr = 0x41, .rf_shift = 0, .mask = 0x0f, }, + { .phy_offset = 5, .phy_shift = 10, .rf_addr = 0x42, .rf_shift = 5, .mask = 0x07, }, + { .phy_offset = 4, .phy_shift = 15, .rf_addr = 0x42, .rf_shift = 0, .mask = 0x01, }, + { .phy_offset = 5, .phy_shift = 0, .rf_addr = 0x42, .rf_shift = 1, .mask = 0x07, }, + { .phy_offset = 4, .phy_shift = 11, .rf_addr = 0x43, .rf_shift = 4, .mask = 0x0f, }, + { .phy_offset = 4, .phy_shift = 7, .rf_addr = 0x43, .rf_shift = 0, .mask = 0x0f, }, + { .phy_offset = 4, .phy_shift = 6, .rf_addr = 0x45, .rf_shift = 1, .mask = 0x01, }, + { .phy_offset = 2, .phy_shift = 7, .rf_addr = 0x40, .rf_shift = 4, .mask = 0x0f, }, + { .phy_offset = 2, .phy_shift = 11, .rf_addr = 0x40, .rf_shift = 0, .mask = 0x0f, }, +}; + +static void lpphy_sync_stx(struct b43_wldev *dev) +{ + const struct lpphy_stx_table_entry *e; + unsigned int i; + u16 tmp; + + for (i = 0; i < ARRAY_SIZE(lpphy_stx_table); i++) { + e = &lpphy_stx_table[i]; + tmp = b43_radio_read(dev, e->rf_addr); + tmp >>= e->rf_shift; + tmp <<= e->phy_shift; + b43_phy_maskset(dev, B43_PHY_OFDM(0xF2 + e->phy_offset), + ~(e->mask << e->phy_shift), tmp); + } +} + +static void lpphy_radio_init(struct b43_wldev *dev) +{ + /* The radio is attached through the 4wire bus. */ + b43_phy_set(dev, B43_LPPHY_FOURWIRE_CTL, 0x2); + udelay(1); + b43_phy_mask(dev, B43_LPPHY_FOURWIRE_CTL, 0xFFFD); + udelay(1); + + if (dev->phy.radio_ver == 0x2062) { + lpphy_2062_init(dev); + } else { + lpphy_2063_init(dev); + lpphy_sync_stx(dev); + b43_phy_write(dev, B43_PHY_OFDM(0xF0), 0x5F80); + b43_phy_write(dev, B43_PHY_OFDM(0xF1), 0); + if (dev->dev->bus->chip_id == 0x4325) { + // TODO SSB PMU recalibration + } + } +} + +struct lpphy_iq_est { u32 iq_prod, i_pwr, q_pwr; }; + +static void lpphy_set_rc_cap(struct b43_wldev *dev) +{ + struct b43_phy_lp *lpphy = dev->phy.lp; + + u8 rc_cap = (lpphy->rc_cap & 0x1F) >> 1; + + if (dev->phy.rev == 1) //FIXME check channel 14! + rc_cap = min_t(u8, rc_cap + 5, 15); + + b43_radio_write(dev, B2062_N_RXBB_CALIB2, + max_t(u8, lpphy->rc_cap - 4, 0x80)); + b43_radio_write(dev, B2062_N_TX_CTL_A, rc_cap | 0x80); + b43_radio_write(dev, B2062_S_RXG_CNT16, + ((lpphy->rc_cap & 0x1F) >> 2) | 0x80); +} + +static u8 lpphy_get_bb_mult(struct b43_wldev *dev) +{ + return (b43_lptab_read(dev, B43_LPTAB16(0, 87)) & 0xFF00) >> 8; +} + +static void lpphy_set_bb_mult(struct b43_wldev *dev, u8 bb_mult) +{ + b43_lptab_write(dev, B43_LPTAB16(0, 87), (u16)bb_mult << 8); +} + +static void lpphy_set_deaf(struct b43_wldev *dev, bool user) +{ + struct b43_phy_lp *lpphy = dev->phy.lp; + + if (user) + lpphy->crs_usr_disable = 1; + else + lpphy->crs_sys_disable = 1; + b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xFF1F, 0x80); +} + +static void lpphy_clear_deaf(struct b43_wldev *dev, bool user) +{ + struct b43_phy_lp *lpphy = dev->phy.lp; + + if (user) + lpphy->crs_usr_disable = 0; + else + lpphy->crs_sys_disable = 0; + + if (!lpphy->crs_usr_disable && !lpphy->crs_sys_disable) { + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, + 0xFF1F, 0x60); + else + b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, + 0xFF1F, 0x20); + } +} + +static void lpphy_set_trsw_over(struct b43_wldev *dev, bool tx, bool rx) +{ + u16 trsw = (tx << 1) | rx; + b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFFC, trsw); + b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x3); +} + +static void lpphy_disable_crs(struct b43_wldev *dev, bool user) +{ + lpphy_set_deaf(dev, user); + lpphy_set_trsw_over(dev, false, true); + b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFFB); + b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x4); + b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFF7); + b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x8); + b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0x10); + b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x10); + b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFDF); + b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x20); + b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFBF); + b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x40); + b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0x7); + b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0x38); + b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xFF3F); + b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0x100); + b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xFDFF); + b43_phy_write(dev, B43_LPPHY_PS_CTL_OVERRIDE_VAL0, 0); + b43_phy_write(dev, B43_LPPHY_PS_CTL_OVERRIDE_VAL1, 1); + b43_phy_write(dev, B43_LPPHY_PS_CTL_OVERRIDE_VAL2, 0x20); + b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xFBFF); + b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xF7FF); + b43_phy_write(dev, B43_LPPHY_TX_GAIN_CTL_OVERRIDE_VAL, 0); + b43_phy_write(dev, B43_LPPHY_RX_GAIN_CTL_OVERRIDE_VAL, 0x45AF); + b43_phy_write(dev, B43_LPPHY_RF_OVERRIDE_2, 0x3FF); +} + +static void lpphy_restore_crs(struct b43_wldev *dev, bool user) +{ + lpphy_clear_deaf(dev, user); + b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFF80); + b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFC00); +} + +struct lpphy_tx_gains { u16 gm, pga, pad, dac; }; + +static void lpphy_disable_rx_gain_override(struct b43_wldev *dev) +{ + b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFFE); + b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFEF); + b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFBF); + if (dev->phy.rev >= 2) { + b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFEFF); + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFBFF); + b43_phy_mask(dev, B43_PHY_OFDM(0xE5), 0xFFF7); + } + } else { + b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFDFF); + } +} + +static void lpphy_enable_rx_gain_override(struct b43_wldev *dev) +{ + b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x1); + b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x10); + b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x40); + if (dev->phy.rev >= 2) { + b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x100); + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x400); + b43_phy_set(dev, B43_PHY_OFDM(0xE5), 0x8); + } + } else { + b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x200); + } +} + +static void lpphy_disable_tx_gain_override(struct b43_wldev *dev) +{ + if (dev->phy.rev < 2) + b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFEFF); + else { + b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFF7F); + b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xBFFF); + } + b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVR, 0xFFBF); +} + +static void lpphy_enable_tx_gain_override(struct b43_wldev *dev) +{ + if (dev->phy.rev < 2) + b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x100); + else { + b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x80); + b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x4000); + } + b43_phy_set(dev, B43_LPPHY_AFE_CTL_OVR, 0x40); +} + +static struct lpphy_tx_gains lpphy_get_tx_gains(struct b43_wldev *dev) +{ + struct lpphy_tx_gains gains; + u16 tmp; + + gains.dac = (b43_phy_read(dev, B43_LPPHY_AFE_DAC_CTL) & 0x380) >> 7; + if (dev->phy.rev < 2) { + tmp = b43_phy_read(dev, + B43_LPPHY_TX_GAIN_CTL_OVERRIDE_VAL) & 0x7FF; + gains.gm = tmp & 0x0007; + gains.pga = (tmp & 0x0078) >> 3; + gains.pad = (tmp & 0x780) >> 7; + } else { + tmp = b43_phy_read(dev, B43_LPPHY_TX_GAIN_CTL_OVERRIDE_VAL); + gains.pad = b43_phy_read(dev, B43_PHY_OFDM(0xFB)) & 0xFF; + gains.gm = tmp & 0xFF; + gains.pga = (tmp >> 8) & 0xFF; + } + + return gains; +} + +static void lpphy_set_dac_gain(struct b43_wldev *dev, u16 dac) +{ + u16 ctl = b43_phy_read(dev, B43_LPPHY_AFE_DAC_CTL) & 0xC7F; + ctl |= dac << 7; + b43_phy_maskset(dev, B43_LPPHY_AFE_DAC_CTL, 0xF000, ctl); +} + +static u16 lpphy_get_pa_gain(struct b43_wldev *dev) +{ + return b43_phy_read(dev, B43_PHY_OFDM(0xFB)) & 0x7F; +} + +static void lpphy_set_pa_gain(struct b43_wldev *dev, u16 gain) +{ + b43_phy_maskset(dev, B43_PHY_OFDM(0xFB), 0xE03F, gain << 6); + b43_phy_maskset(dev, B43_PHY_OFDM(0xFD), 0x80FF, gain << 8); +} + +static void lpphy_set_tx_gains(struct b43_wldev *dev, + struct lpphy_tx_gains gains) +{ + u16 rf_gain, pa_gain; + + if (dev->phy.rev < 2) { + rf_gain = (gains.pad << 7) | (gains.pga << 3) | gains.gm; + b43_phy_maskset(dev, B43_LPPHY_TX_GAIN_CTL_OVERRIDE_VAL, + 0xF800, rf_gain); + } else { + pa_gain = lpphy_get_pa_gain(dev); + b43_phy_write(dev, B43_LPPHY_TX_GAIN_CTL_OVERRIDE_VAL, + (gains.pga << 8) | gains.gm); + /* + * SPEC FIXME The spec calls for (pa_gain << 8) here, but that + * conflicts with the spec for set_pa_gain! Vendor driver bug? + */ + b43_phy_maskset(dev, B43_PHY_OFDM(0xFB), + 0x8000, gains.pad | (pa_gain << 6)); + b43_phy_write(dev, B43_PHY_OFDM(0xFC), + (gains.pga << 8) | gains.gm); + b43_phy_maskset(dev, B43_PHY_OFDM(0xFD), + 0x8000, gains.pad | (pa_gain << 8)); + } + lpphy_set_dac_gain(dev, gains.dac); + lpphy_enable_tx_gain_override(dev); +} + +static void lpphy_rev0_1_set_rx_gain(struct b43_wldev *dev, u32 gain) +{ + u16 trsw = gain & 0x1; + u16 lna = (gain & 0xFFFC) | ((gain & 0xC) >> 2); + u16 ext_lna = (gain & 2) >> 1; + + b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFFE, trsw); + b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, + 0xFBFF, ext_lna << 10); + b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, + 0xF7FF, ext_lna << 11); + b43_phy_write(dev, B43_LPPHY_RX_GAIN_CTL_OVERRIDE_VAL, lna); +} + +static void lpphy_rev2plus_set_rx_gain(struct b43_wldev *dev, u32 gain) +{ + u16 low_gain = gain & 0xFFFF; + u16 high_gain = (gain >> 16) & 0xF; + u16 ext_lna = (gain >> 21) & 0x1; + u16 trsw = ~(gain >> 20) & 0x1; + u16 tmp; + + b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFFE, trsw); + b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, + 0xFDFF, ext_lna << 9); + b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, + 0xFBFF, ext_lna << 10); + b43_phy_write(dev, B43_LPPHY_RX_GAIN_CTL_OVERRIDE_VAL, low_gain); + b43_phy_maskset(dev, B43_LPPHY_AFE_DDFS, 0xFFF0, high_gain); + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + tmp = (gain >> 2) & 0x3; + b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, + 0xE7FF, tmp<<11); + b43_phy_maskset(dev, B43_PHY_OFDM(0xE6), 0xFFE7, tmp << 3); + } +} + +static void lpphy_set_rx_gain(struct b43_wldev *dev, u32 gain) +{ + if (dev->phy.rev < 2) + lpphy_rev0_1_set_rx_gain(dev, gain); + else + lpphy_rev2plus_set_rx_gain(dev, gain); + lpphy_enable_rx_gain_override(dev); +} + +static void lpphy_set_rx_gain_by_index(struct b43_wldev *dev, u16 idx) +{ + u32 gain = b43_lptab_read(dev, B43_LPTAB16(12, idx)); + lpphy_set_rx_gain(dev, gain); +} + +static void lpphy_stop_ddfs(struct b43_wldev *dev) +{ + b43_phy_mask(dev, B43_LPPHY_AFE_DDFS, 0xFFFD); + b43_phy_mask(dev, B43_LPPHY_LP_PHY_CTL, 0xFFDF); +} + +static void lpphy_run_ddfs(struct b43_wldev *dev, int i_on, int q_on, + int incr1, int incr2, int scale_idx) +{ + lpphy_stop_ddfs(dev); + b43_phy_mask(dev, B43_LPPHY_AFE_DDFS_POINTER_INIT, 0xFF80); + b43_phy_mask(dev, B43_LPPHY_AFE_DDFS_POINTER_INIT, 0x80FF); + b43_phy_maskset(dev, B43_LPPHY_AFE_DDFS_INCR_INIT, 0xFF80, incr1); + b43_phy_maskset(dev, B43_LPPHY_AFE_DDFS_INCR_INIT, 0x80FF, incr2 << 8); + b43_phy_maskset(dev, B43_LPPHY_AFE_DDFS, 0xFFF7, i_on << 3); + b43_phy_maskset(dev, B43_LPPHY_AFE_DDFS, 0xFFEF, q_on << 4); + b43_phy_maskset(dev, B43_LPPHY_AFE_DDFS, 0xFF9F, scale_idx << 5); + b43_phy_mask(dev, B43_LPPHY_AFE_DDFS, 0xFFFB); + b43_phy_set(dev, B43_LPPHY_AFE_DDFS, 0x2); + b43_phy_set(dev, B43_LPPHY_LP_PHY_CTL, 0x20); +} + +static bool lpphy_rx_iq_est(struct b43_wldev *dev, u16 samples, u8 time, + struct lpphy_iq_est *iq_est) +{ + int i; + + b43_phy_mask(dev, B43_LPPHY_CRSGAIN_CTL, 0xFFF7); + b43_phy_write(dev, B43_LPPHY_IQ_NUM_SMPLS_ADDR, samples); + b43_phy_maskset(dev, B43_LPPHY_IQ_ENABLE_WAIT_TIME_ADDR, 0xFF00, time); + b43_phy_mask(dev, B43_LPPHY_IQ_ENABLE_WAIT_TIME_ADDR, 0xFEFF); + b43_phy_set(dev, B43_LPPHY_IQ_ENABLE_WAIT_TIME_ADDR, 0x200); + + for (i = 0; i < 500; i++) { + if (!(b43_phy_read(dev, + B43_LPPHY_IQ_ENABLE_WAIT_TIME_ADDR) & 0x200)) + break; + msleep(1); + } + + if ((b43_phy_read(dev, B43_LPPHY_IQ_ENABLE_WAIT_TIME_ADDR) & 0x200)) { + b43_phy_set(dev, B43_LPPHY_CRSGAIN_CTL, 0x8); + return false; + } + + iq_est->iq_prod = b43_phy_read(dev, B43_LPPHY_IQ_ACC_HI_ADDR); + iq_est->iq_prod <<= 16; + iq_est->iq_prod |= b43_phy_read(dev, B43_LPPHY_IQ_ACC_LO_ADDR); + + iq_est->i_pwr = b43_phy_read(dev, B43_LPPHY_IQ_I_PWR_ACC_HI_ADDR); + iq_est->i_pwr <<= 16; + iq_est->i_pwr |= b43_phy_read(dev, B43_LPPHY_IQ_I_PWR_ACC_LO_ADDR); + + iq_est->q_pwr = b43_phy_read(dev, B43_LPPHY_IQ_Q_PWR_ACC_HI_ADDR); + iq_est->q_pwr <<= 16; + iq_est->q_pwr |= b43_phy_read(dev, B43_LPPHY_IQ_Q_PWR_ACC_LO_ADDR); + + b43_phy_set(dev, B43_LPPHY_CRSGAIN_CTL, 0x8); + return true; +} + +static int lpphy_loopback(struct b43_wldev *dev) +{ + struct lpphy_iq_est iq_est; + int i, index = -1; + u32 tmp; + + memset(&iq_est, 0, sizeof(iq_est)); + + lpphy_set_trsw_over(dev, true, true); + b43_phy_set(dev, B43_LPPHY_AFE_CTL_OVR, 1); + b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVRVAL, 0xFFFE); + b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x800); + b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0x800); + b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x8); + b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0x8); + b43_radio_write(dev, B2062_N_TX_CTL_A, 0x80); + b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x80); + b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0x80); + for (i = 0; i < 32; i++) { + lpphy_set_rx_gain_by_index(dev, i); + lpphy_run_ddfs(dev, 1, 1, 5, 5, 0); + if (!(lpphy_rx_iq_est(dev, 1000, 32, &iq_est))) + continue; + tmp = (iq_est.i_pwr + iq_est.q_pwr) / 1000; + if ((tmp > 4000) && (tmp < 10000)) { + index = i; + break; + } + } + lpphy_stop_ddfs(dev); + return index; +} + +/* Fixed-point division algorithm using only integer math. */ +static u32 lpphy_qdiv_roundup(u32 dividend, u32 divisor, u8 precision) +{ + u32 quotient, remainder; + + if (divisor == 0) + return 0; + + quotient = dividend / divisor; + remainder = dividend % divisor; + + while (precision > 0) { + quotient <<= 1; + if (remainder << 1 >= divisor) { + quotient++; + remainder = (remainder << 1) - divisor; + } + precision--; + } + + if (remainder << 1 >= divisor) + quotient++; + + return quotient; +} + +/* Read the TX power control mode from hardware. */ +static void lpphy_read_tx_pctl_mode_from_hardware(struct b43_wldev *dev) +{ + struct b43_phy_lp *lpphy = dev->phy.lp; + u16 ctl; + + ctl = b43_phy_read(dev, B43_LPPHY_TX_PWR_CTL_CMD); + switch (ctl & B43_LPPHY_TX_PWR_CTL_CMD_MODE) { + case B43_LPPHY_TX_PWR_CTL_CMD_MODE_OFF: + lpphy->txpctl_mode = B43_LPPHY_TXPCTL_OFF; + break; + case B43_LPPHY_TX_PWR_CTL_CMD_MODE_SW: + lpphy->txpctl_mode = B43_LPPHY_TXPCTL_SW; + break; + case B43_LPPHY_TX_PWR_CTL_CMD_MODE_HW: + lpphy->txpctl_mode = B43_LPPHY_TXPCTL_HW; + break; + default: + lpphy->txpctl_mode = B43_LPPHY_TXPCTL_UNKNOWN; + B43_WARN_ON(1); + break; + } +} + +/* Set the TX power control mode in hardware. */ +static void lpphy_write_tx_pctl_mode_to_hardware(struct b43_wldev *dev) +{ + struct b43_phy_lp *lpphy = dev->phy.lp; + u16 ctl; + + switch (lpphy->txpctl_mode) { + case B43_LPPHY_TXPCTL_OFF: + ctl = B43_LPPHY_TX_PWR_CTL_CMD_MODE_OFF; + break; + case B43_LPPHY_TXPCTL_HW: + ctl = B43_LPPHY_TX_PWR_CTL_CMD_MODE_HW; + break; + case B43_LPPHY_TXPCTL_SW: + ctl = B43_LPPHY_TX_PWR_CTL_CMD_MODE_SW; + break; + default: + ctl = 0; + B43_WARN_ON(1); + } + b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_CMD, + (u16)~B43_LPPHY_TX_PWR_CTL_CMD_MODE, ctl); +} + +static void lpphy_set_tx_power_control(struct b43_wldev *dev, + enum b43_lpphy_txpctl_mode mode) +{ + struct b43_phy_lp *lpphy = dev->phy.lp; + enum b43_lpphy_txpctl_mode oldmode; + + lpphy_read_tx_pctl_mode_from_hardware(dev); + oldmode = lpphy->txpctl_mode; + if (oldmode == mode) + return; + lpphy->txpctl_mode = mode; + + if (oldmode == B43_LPPHY_TXPCTL_HW) { + //TODO Update TX Power NPT + //TODO Clear all TX Power offsets + } else { + if (mode == B43_LPPHY_TXPCTL_HW) { + //TODO Recalculate target TX power + b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_CMD, + 0xFF80, lpphy->tssi_idx); + b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_NNUM, + 0x8FFF, ((u16)lpphy->tssi_npt << 16)); + //TODO Set "TSSI Transmit Count" variable to total transmitted frame count + lpphy_disable_tx_gain_override(dev); + lpphy->tx_pwr_idx_over = -1; + } + } + if (dev->phy.rev >= 2) { + if (mode == B43_LPPHY_TXPCTL_HW) + b43_phy_set(dev, B43_PHY_OFDM(0xD0), 0x2); + else + b43_phy_mask(dev, B43_PHY_OFDM(0xD0), 0xFFFD); + } + lpphy_write_tx_pctl_mode_to_hardware(dev); +} + +static int b43_lpphy_op_switch_channel(struct b43_wldev *dev, + unsigned int new_channel); + +static void lpphy_rev0_1_rc_calib(struct b43_wldev *dev) +{ + struct b43_phy_lp *lpphy = dev->phy.lp; + struct lpphy_iq_est iq_est; + struct lpphy_tx_gains tx_gains; + static const u32 ideal_pwr_table[21] = { + 0x10000, 0x10557, 0x10e2d, 0x113e0, 0x10f22, 0x0ff64, + 0x0eda2, 0x0e5d4, 0x0efd1, 0x0fbe8, 0x0b7b8, 0x04b35, + 0x01a5e, 0x00a0b, 0x00444, 0x001fd, 0x000ff, 0x00088, + 0x0004c, 0x0002c, 0x0001a, + }; + bool old_txg_ovr; + u8 old_bbmult; + u16 old_rf_ovr, old_rf_ovrval, old_afe_ovr, old_afe_ovrval, + old_rf2_ovr, old_rf2_ovrval, old_phy_ctl; + enum b43_lpphy_txpctl_mode old_txpctl; + u32 normal_pwr, ideal_pwr, mean_sq_pwr, tmp = 0, mean_sq_pwr_min = 0; + int loopback, i, j, inner_sum, err; + + memset(&iq_est, 0, sizeof(iq_est)); + + err = b43_lpphy_op_switch_channel(dev, 7); + if (err) { + b43dbg(dev->wl, + "RC calib: Failed to switch to channel 7, error = %d\n", + err); + } + old_txg_ovr = !!(b43_phy_read(dev, B43_LPPHY_AFE_CTL_OVR) & 0x40); + old_bbmult = lpphy_get_bb_mult(dev); + if (old_txg_ovr) + tx_gains = lpphy_get_tx_gains(dev); + old_rf_ovr = b43_phy_read(dev, B43_LPPHY_RF_OVERRIDE_0); + old_rf_ovrval = b43_phy_read(dev, B43_LPPHY_RF_OVERRIDE_VAL_0); + old_afe_ovr = b43_phy_read(dev, B43_LPPHY_AFE_CTL_OVR); + old_afe_ovrval = b43_phy_read(dev, B43_LPPHY_AFE_CTL_OVRVAL); + old_rf2_ovr = b43_phy_read(dev, B43_LPPHY_RF_OVERRIDE_2); + old_rf2_ovrval = b43_phy_read(dev, B43_LPPHY_RF_OVERRIDE_2_VAL); + old_phy_ctl = b43_phy_read(dev, B43_LPPHY_LP_PHY_CTL); + lpphy_read_tx_pctl_mode_from_hardware(dev); + old_txpctl = lpphy->txpctl_mode; + + lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF); + lpphy_disable_crs(dev, true); + loopback = lpphy_loopback(dev); + if (loopback == -1) + goto finish; + lpphy_set_rx_gain_by_index(dev, loopback); + b43_phy_maskset(dev, B43_LPPHY_LP_PHY_CTL, 0xFFBF, 0x40); + b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xFFF8, 0x1); + b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xFFC7, 0x8); + b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xFF3F, 0xC0); + for (i = 128; i <= 159; i++) { + b43_radio_write(dev, B2062_N_RXBB_CALIB2, i); + inner_sum = 0; + for (j = 5; j <= 25; j++) { + lpphy_run_ddfs(dev, 1, 1, j, j, 0); + if (!(lpphy_rx_iq_est(dev, 1000, 32, &iq_est))) + goto finish; + mean_sq_pwr = iq_est.i_pwr + iq_est.q_pwr; + if (j == 5) + tmp = mean_sq_pwr; + ideal_pwr = ((ideal_pwr_table[j-5] >> 3) + 1) >> 1; + normal_pwr = lpphy_qdiv_roundup(mean_sq_pwr, tmp, 12); + mean_sq_pwr = ideal_pwr - normal_pwr; + mean_sq_pwr *= mean_sq_pwr; + inner_sum += mean_sq_pwr; + if ((i == 128) || (inner_sum < mean_sq_pwr_min)) { + lpphy->rc_cap = i; + mean_sq_pwr_min = inner_sum; + } + } + } + lpphy_stop_ddfs(dev); + +finish: + lpphy_restore_crs(dev, true); + b43_phy_write(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, old_rf_ovrval); + b43_phy_write(dev, B43_LPPHY_RF_OVERRIDE_0, old_rf_ovr); + b43_phy_write(dev, B43_LPPHY_AFE_CTL_OVRVAL, old_afe_ovrval); + b43_phy_write(dev, B43_LPPHY_AFE_CTL_OVR, old_afe_ovr); + b43_phy_write(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, old_rf2_ovrval); + b43_phy_write(dev, B43_LPPHY_RF_OVERRIDE_2, old_rf2_ovr); + b43_phy_write(dev, B43_LPPHY_LP_PHY_CTL, old_phy_ctl); + + lpphy_set_bb_mult(dev, old_bbmult); + if (old_txg_ovr) { + /* + * SPEC FIXME: The specs say "get_tx_gains" here, which is + * illogical. According to lwfinger, vendor driver v4.150.10.5 + * has a Set here, while v4.174.64.19 has a Get - regression in + * the vendor driver? This should be tested this once the code + * is testable. + */ + lpphy_set_tx_gains(dev, tx_gains); + } + lpphy_set_tx_power_control(dev, old_txpctl); + if (lpphy->rc_cap) + lpphy_set_rc_cap(dev); +} + +static void lpphy_rev2plus_rc_calib(struct b43_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000; + u8 tmp = b43_radio_read(dev, B2063_RX_BB_SP8) & 0xFF; + int i; + + b43_radio_write(dev, B2063_RX_BB_SP8, 0x0); + b43_radio_write(dev, B2063_RC_CALIB_CTL1, 0x7E); + b43_radio_mask(dev, B2063_PLL_SP1, 0xF7); + b43_radio_write(dev, B2063_RC_CALIB_CTL1, 0x7C); + b43_radio_write(dev, B2063_RC_CALIB_CTL2, 0x15); + b43_radio_write(dev, B2063_RC_CALIB_CTL3, 0x70); + b43_radio_write(dev, B2063_RC_CALIB_CTL4, 0x52); + b43_radio_write(dev, B2063_RC_CALIB_CTL5, 0x1); + b43_radio_write(dev, B2063_RC_CALIB_CTL1, 0x7D); + + for (i = 0; i < 10000; i++) { + if (b43_radio_read(dev, B2063_RC_CALIB_CTL6) & 0x2) + break; + msleep(1); + } + + if (!(b43_radio_read(dev, B2063_RC_CALIB_CTL6) & 0x2)) + b43_radio_write(dev, B2063_RX_BB_SP8, tmp); + + tmp = b43_radio_read(dev, B2063_TX_BB_SP3) & 0xFF; + + b43_radio_write(dev, B2063_TX_BB_SP3, 0x0); + b43_radio_write(dev, B2063_RC_CALIB_CTL1, 0x7E); + b43_radio_write(dev, B2063_RC_CALIB_CTL1, 0x7C); + b43_radio_write(dev, B2063_RC_CALIB_CTL2, 0x55); + b43_radio_write(dev, B2063_RC_CALIB_CTL3, 0x76); + + if (crystal_freq == 24000000) { + b43_radio_write(dev, B2063_RC_CALIB_CTL4, 0xFC); + b43_radio_write(dev, B2063_RC_CALIB_CTL5, 0x0); + } else { + b43_radio_write(dev, B2063_RC_CALIB_CTL4, 0x13); + b43_radio_write(dev, B2063_RC_CALIB_CTL5, 0x1); + } + + b43_radio_write(dev, B2063_PA_SP7, 0x7D); + + for (i = 0; i < 10000; i++) { + if (b43_radio_read(dev, B2063_RC_CALIB_CTL6) & 0x2) + break; + msleep(1); + } + + if (!(b43_radio_read(dev, B2063_RC_CALIB_CTL6) & 0x2)) + b43_radio_write(dev, B2063_TX_BB_SP3, tmp); + + b43_radio_write(dev, B2063_RC_CALIB_CTL1, 0x7E); +} + +static void lpphy_calibrate_rc(struct b43_wldev *dev) +{ + struct b43_phy_lp *lpphy = dev->phy.lp; + + if (dev->phy.rev >= 2) { + lpphy_rev2plus_rc_calib(dev); + } else if (!lpphy->rc_cap) { + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + lpphy_rev0_1_rc_calib(dev); + } else { + lpphy_set_rc_cap(dev); + } +} + +static void b43_lpphy_op_set_rx_antenna(struct b43_wldev *dev, int antenna) +{ + if (dev->phy.rev >= 2) + return; // rev2+ doesn't support antenna diversity + + if (B43_WARN_ON(antenna > B43_ANTENNA_AUTO1)) + return; + + b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_ANTDIVHELP); + + b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xFFFD, antenna & 0x2); + b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xFFFE, antenna & 0x1); + + b43_hf_write(dev, b43_hf_read(dev) | B43_HF_ANTDIVHELP); + + dev->phy.lp->antenna = antenna; +} + +static void lpphy_set_tx_iqcc(struct b43_wldev *dev, u16 a, u16 b) +{ + u16 tmp[2]; + + tmp[0] = a; + tmp[1] = b; + b43_lptab_write_bulk(dev, B43_LPTAB16(0, 80), 2, tmp); +} + +static void lpphy_set_tx_power_by_index(struct b43_wldev *dev, u8 index) +{ + struct b43_phy_lp *lpphy = dev->phy.lp; + struct lpphy_tx_gains gains; + u32 iq_comp, tx_gain, coeff, rf_power; + + lpphy->tx_pwr_idx_over = index; + lpphy_read_tx_pctl_mode_from_hardware(dev); + if (lpphy->txpctl_mode != B43_LPPHY_TXPCTL_OFF) + lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_SW); + if (dev->phy.rev >= 2) { + iq_comp = b43_lptab_read(dev, B43_LPTAB32(7, index + 320)); + tx_gain = b43_lptab_read(dev, B43_LPTAB32(7, index + 192)); + gains.pad = (tx_gain >> 16) & 0xFF; + gains.gm = tx_gain & 0xFF; + gains.pga = (tx_gain >> 8) & 0xFF; + gains.dac = (iq_comp >> 28) & 0xFF; + lpphy_set_tx_gains(dev, gains); + } else { + iq_comp = b43_lptab_read(dev, B43_LPTAB32(10, index + 320)); + tx_gain = b43_lptab_read(dev, B43_LPTAB32(10, index + 192)); + b43_phy_maskset(dev, B43_LPPHY_TX_GAIN_CTL_OVERRIDE_VAL, + 0xF800, (tx_gain >> 4) & 0x7FFF); + lpphy_set_dac_gain(dev, tx_gain & 0x7); + lpphy_set_pa_gain(dev, (tx_gain >> 24) & 0x7F); + } + lpphy_set_bb_mult(dev, (iq_comp >> 20) & 0xFF); + lpphy_set_tx_iqcc(dev, (iq_comp >> 10) & 0x3FF, iq_comp & 0x3FF); + if (dev->phy.rev >= 2) { + coeff = b43_lptab_read(dev, B43_LPTAB32(7, index + 448)); + } else { + coeff = b43_lptab_read(dev, B43_LPTAB32(10, index + 448)); + } + b43_lptab_write(dev, B43_LPTAB16(0, 85), coeff & 0xFFFF); + if (dev->phy.rev >= 2) { + rf_power = b43_lptab_read(dev, B43_LPTAB32(7, index + 576)); + b43_phy_maskset(dev, B43_LPPHY_RF_PWR_OVERRIDE, 0xFF00, + rf_power & 0xFFFF);//SPEC FIXME mask & set != 0 + } + lpphy_enable_tx_gain_override(dev); +} + +static void lpphy_btcoex_override(struct b43_wldev *dev) +{ + b43_write16(dev, B43_MMIO_BTCOEX_CTL, 0x3); + b43_write16(dev, B43_MMIO_BTCOEX_TXCTL, 0xFF); +} + +static void b43_lpphy_op_software_rfkill(struct b43_wldev *dev, + bool blocked) +{ + //TODO check MAC control register + if (blocked) { + if (dev->phy.rev >= 2) { + b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0x83FF); + b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x1F00); + b43_phy_mask(dev, B43_LPPHY_AFE_DDFS, 0x80FF); + b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xDFFF); + b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x0808); + } else { + b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xE0FF); + b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x1F00); + b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xFCFF); + b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x0018); + } + } else { + b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xE0FF); + if (dev->phy.rev >= 2) + b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xF7F7); + else + b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFFE7); + } +} + +/* This was previously called lpphy_japan_filter */ +static void lpphy_set_analog_filter(struct b43_wldev *dev, int channel) +{ + struct b43_phy_lp *lpphy = dev->phy.lp; + u16 tmp = (channel == 14); //SPEC FIXME check japanwidefilter! + + if (dev->phy.rev < 2) { //SPEC FIXME Isn't this rev0/1-specific? + b43_phy_maskset(dev, B43_LPPHY_LP_PHY_CTL, 0xFCFF, tmp << 9); + if ((dev->phy.rev == 1) && (lpphy->rc_cap)) + lpphy_set_rc_cap(dev); + } else { + b43_radio_write(dev, B2063_TX_BB_SP3, 0x3F); + } +} + +static void lpphy_set_tssi_mux(struct b43_wldev *dev, enum tssi_mux_mode mode) +{ + if (mode != TSSI_MUX_EXT) { + b43_radio_set(dev, B2063_PA_SP1, 0x2); + b43_phy_set(dev, B43_PHY_OFDM(0xF3), 0x1000); + b43_radio_write(dev, B2063_PA_CTL10, 0x51); + if (mode == TSSI_MUX_POSTPA) { + b43_radio_mask(dev, B2063_PA_SP1, 0xFFFE); + b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVRVAL, 0xFFC7); + } else { + b43_radio_maskset(dev, B2063_PA_SP1, 0xFFFE, 0x1); + b43_phy_maskset(dev, B43_LPPHY_AFE_CTL_OVRVAL, + 0xFFC7, 0x20); + } + } else { + B43_WARN_ON(1); + } +} + +static void lpphy_tx_pctl_init_hw(struct b43_wldev *dev) +{ + u16 tmp; + int i; + + //SPEC TODO Call LP PHY Clear TX Power offsets + for (i = 0; i < 64; i++) { + if (dev->phy.rev >= 2) + b43_lptab_write(dev, B43_LPTAB32(7, i + 1), i); + else + b43_lptab_write(dev, B43_LPTAB32(10, i + 1), i); + } + + b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_NNUM, 0xFF00, 0xFF); + b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_NNUM, 0x8FFF, 0x5000); + b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_IDLETSSI, 0xFFC0, 0x1F); + if (dev->phy.rev < 2) { + b43_phy_mask(dev, B43_LPPHY_LP_PHY_CTL, 0xEFFF); + b43_phy_maskset(dev, B43_LPPHY_LP_PHY_CTL, 0xDFFF, 0x2000); + } else { + b43_phy_mask(dev, B43_PHY_OFDM(0x103), 0xFFFE); + b43_phy_maskset(dev, B43_PHY_OFDM(0x103), 0xFFFB, 0x4); + b43_phy_maskset(dev, B43_PHY_OFDM(0x103), 0xFFEF, 0x10); + b43_radio_maskset(dev, B2063_IQ_CALIB_CTL2, 0xF3, 0x1); + lpphy_set_tssi_mux(dev, TSSI_MUX_POSTPA); + } + b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_IDLETSSI, 0x7FFF, 0x8000); + b43_phy_mask(dev, B43_LPPHY_TX_PWR_CTL_DELTAPWR_LIMIT, 0xFF); + b43_phy_write(dev, B43_LPPHY_TX_PWR_CTL_DELTAPWR_LIMIT, 0xA); + b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_CMD, + (u16)~B43_LPPHY_TX_PWR_CTL_CMD_MODE, + B43_LPPHY_TX_PWR_CTL_CMD_MODE_OFF); + b43_phy_mask(dev, B43_LPPHY_TX_PWR_CTL_NNUM, 0xF8FF); + b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_CMD, + (u16)~B43_LPPHY_TX_PWR_CTL_CMD_MODE, + B43_LPPHY_TX_PWR_CTL_CMD_MODE_SW); + + if (dev->phy.rev < 2) { + b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_0, 0xEFFF, 0x1000); + b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xEFFF); + } else { + lpphy_set_tx_power_by_index(dev, 0x7F); + } + + b43_dummy_transmission(dev, true, true); + + tmp = b43_phy_read(dev, B43_LPPHY_TX_PWR_CTL_STAT); + if (tmp & 0x8000) { + b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_IDLETSSI, + 0xFFC0, (tmp & 0xFF) - 32); + } + + b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xEFFF); + + // (SPEC?) TODO Set "Target TX frequency" variable to 0 + // SPEC FIXME "Set BB Multiplier to 0xE000" impossible - bb_mult is u8! +} + +static void lpphy_tx_pctl_init_sw(struct b43_wldev *dev) +{ + struct lpphy_tx_gains gains; + + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + gains.gm = 4; + gains.pad = 12; + gains.pga = 12; + gains.dac = 0; + } else { + gains.gm = 7; + gains.pad = 14; + gains.pga = 15; + gains.dac = 0; + } + lpphy_set_tx_gains(dev, gains); + lpphy_set_bb_mult(dev, 150); +} + +/* Initialize TX power control */ +static void lpphy_tx_pctl_init(struct b43_wldev *dev) +{ + if (0/*FIXME HWPCTL capable */) { + lpphy_tx_pctl_init_hw(dev); + } else { /* This device is only software TX power control capable. */ + lpphy_tx_pctl_init_sw(dev); + } +} + +static void lpphy_pr41573_workaround(struct b43_wldev *dev) +{ + struct b43_phy_lp *lpphy = dev->phy.lp; + u32 *saved_tab; + const unsigned int saved_tab_size = 256; + enum b43_lpphy_txpctl_mode txpctl_mode; + s8 tx_pwr_idx_over; + u16 tssi_npt, tssi_idx; + + saved_tab = kcalloc(saved_tab_size, sizeof(saved_tab[0]), GFP_KERNEL); + if (!saved_tab) { + b43err(dev->wl, "PR41573 failed. Out of memory!\n"); + return; + } + + lpphy_read_tx_pctl_mode_from_hardware(dev); + txpctl_mode = lpphy->txpctl_mode; + tx_pwr_idx_over = lpphy->tx_pwr_idx_over; + tssi_npt = lpphy->tssi_npt; + tssi_idx = lpphy->tssi_idx; + + if (dev->phy.rev < 2) { + b43_lptab_read_bulk(dev, B43_LPTAB32(10, 0x140), + saved_tab_size, saved_tab); + } else { + b43_lptab_read_bulk(dev, B43_LPTAB32(7, 0x140), + saved_tab_size, saved_tab); + } + //FIXME PHY reset + lpphy_table_init(dev); //FIXME is table init needed? + lpphy_baseband_init(dev); + lpphy_tx_pctl_init(dev); + b43_lpphy_op_software_rfkill(dev, false); + lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF); + if (dev->phy.rev < 2) { + b43_lptab_write_bulk(dev, B43_LPTAB32(10, 0x140), + saved_tab_size, saved_tab); + } else { + b43_lptab_write_bulk(dev, B43_LPTAB32(7, 0x140), + saved_tab_size, saved_tab); + } + b43_write16(dev, B43_MMIO_CHANNEL, lpphy->channel); + lpphy->tssi_npt = tssi_npt; + lpphy->tssi_idx = tssi_idx; + lpphy_set_analog_filter(dev, lpphy->channel); + if (tx_pwr_idx_over != -1) + lpphy_set_tx_power_by_index(dev, tx_pwr_idx_over); + if (lpphy->rc_cap) + lpphy_set_rc_cap(dev); + b43_lpphy_op_set_rx_antenna(dev, lpphy->antenna); + lpphy_set_tx_power_control(dev, txpctl_mode); + kfree(saved_tab); +} + +struct lpphy_rx_iq_comp { u8 chan; s8 c1, c0; }; + +static const struct lpphy_rx_iq_comp lpphy_5354_iq_table[] = { + { .chan = 1, .c1 = -66, .c0 = 15, }, + { .chan = 2, .c1 = -66, .c0 = 15, }, + { .chan = 3, .c1 = -66, .c0 = 15, }, + { .chan = 4, .c1 = -66, .c0 = 15, }, + { .chan = 5, .c1 = -66, .c0 = 15, }, + { .chan = 6, .c1 = -66, .c0 = 15, }, + { .chan = 7, .c1 = -66, .c0 = 14, }, + { .chan = 8, .c1 = -66, .c0 = 14, }, + { .chan = 9, .c1 = -66, .c0 = 14, }, + { .chan = 10, .c1 = -66, .c0 = 14, }, + { .chan = 11, .c1 = -66, .c0 = 14, }, + { .chan = 12, .c1 = -66, .c0 = 13, }, + { .chan = 13, .c1 = -66, .c0 = 13, }, + { .chan = 14, .c1 = -66, .c0 = 13, }, +}; + +static const struct lpphy_rx_iq_comp lpphy_rev0_1_iq_table[] = { + { .chan = 1, .c1 = -64, .c0 = 13, }, + { .chan = 2, .c1 = -64, .c0 = 13, }, + { .chan = 3, .c1 = -64, .c0 = 13, }, + { .chan = 4, .c1 = -64, .c0 = 13, }, + { .chan = 5, .c1 = -64, .c0 = 12, }, + { .chan = 6, .c1 = -64, .c0 = 12, }, + { .chan = 7, .c1 = -64, .c0 = 12, }, + { .chan = 8, .c1 = -64, .c0 = 12, }, + { .chan = 9, .c1 = -64, .c0 = 12, }, + { .chan = 10, .c1 = -64, .c0 = 11, }, + { .chan = 11, .c1 = -64, .c0 = 11, }, + { .chan = 12, .c1 = -64, .c0 = 11, }, + { .chan = 13, .c1 = -64, .c0 = 11, }, + { .chan = 14, .c1 = -64, .c0 = 10, }, + { .chan = 34, .c1 = -62, .c0 = 24, }, + { .chan = 38, .c1 = -62, .c0 = 24, }, + { .chan = 42, .c1 = -62, .c0 = 24, }, + { .chan = 46, .c1 = -62, .c0 = 23, }, + { .chan = 36, .c1 = -62, .c0 = 24, }, + { .chan = 40, .c1 = -62, .c0 = 24, }, + { .chan = 44, .c1 = -62, .c0 = 23, }, + { .chan = 48, .c1 = -62, .c0 = 23, }, + { .chan = 52, .c1 = -62, .c0 = 23, }, + { .chan = 56, .c1 = -62, .c0 = 22, }, + { .chan = 60, .c1 = -62, .c0 = 22, }, + { .chan = 64, .c1 = -62, .c0 = 22, }, + { .chan = 100, .c1 = -62, .c0 = 16, }, + { .chan = 104, .c1 = -62, .c0 = 16, }, + { .chan = 108, .c1 = -62, .c0 = 15, }, + { .chan = 112, .c1 = -62, .c0 = 14, }, + { .chan = 116, .c1 = -62, .c0 = 14, }, + { .chan = 120, .c1 = -62, .c0 = 13, }, + { .chan = 124, .c1 = -62, .c0 = 12, }, + { .chan = 128, .c1 = -62, .c0 = 12, }, + { .chan = 132, .c1 = -62, .c0 = 12, }, + { .chan = 136, .c1 = -62, .c0 = 11, }, + { .chan = 140, .c1 = -62, .c0 = 10, }, + { .chan = 149, .c1 = -61, .c0 = 9, }, + { .chan = 153, .c1 = -61, .c0 = 9, }, + { .chan = 157, .c1 = -61, .c0 = 9, }, + { .chan = 161, .c1 = -61, .c0 = 8, }, + { .chan = 165, .c1 = -61, .c0 = 8, }, + { .chan = 184, .c1 = -62, .c0 = 25, }, + { .chan = 188, .c1 = -62, .c0 = 25, }, + { .chan = 192, .c1 = -62, .c0 = 25, }, + { .chan = 196, .c1 = -62, .c0 = 25, }, + { .chan = 200, .c1 = -62, .c0 = 25, }, + { .chan = 204, .c1 = -62, .c0 = 25, }, + { .chan = 208, .c1 = -62, .c0 = 25, }, + { .chan = 212, .c1 = -62, .c0 = 25, }, + { .chan = 216, .c1 = -62, .c0 = 26, }, +}; + +static const struct lpphy_rx_iq_comp lpphy_rev2plus_iq_comp = { + .chan = 0, + .c1 = -64, + .c0 = 0, +}; + +static int lpphy_calc_rx_iq_comp(struct b43_wldev *dev, u16 samples) +{ + struct lpphy_iq_est iq_est; + u16 c0, c1; + int prod, ipwr, qpwr, prod_msb, q_msb, tmp1, tmp2, tmp3, tmp4, ret; + + c1 = b43_phy_read(dev, B43_LPPHY_RX_COMP_COEFF_S); + c0 = c1 >> 8; + c1 |= 0xFF; + + b43_phy_maskset(dev, B43_LPPHY_RX_COMP_COEFF_S, 0xFF00, 0x00C0); + b43_phy_mask(dev, B43_LPPHY_RX_COMP_COEFF_S, 0x00FF); + + ret = lpphy_rx_iq_est(dev, samples, 32, &iq_est); + if (!ret) + goto out; + + prod = iq_est.iq_prod; + ipwr = iq_est.i_pwr; + qpwr = iq_est.q_pwr; + + if (ipwr + qpwr < 2) { + ret = 0; + goto out; + } + + prod_msb = fls(abs(prod)); + q_msb = fls(abs(qpwr)); + tmp1 = prod_msb - 20; + + if (tmp1 >= 0) { + tmp3 = ((prod << (30 - prod_msb)) + (ipwr >> (1 + tmp1))) / + (ipwr >> tmp1); + } else { + tmp3 = ((prod << (30 - prod_msb)) + (ipwr << (-1 - tmp1))) / + (ipwr << -tmp1); + } + + tmp2 = q_msb - 11; + + if (tmp2 >= 0) + tmp4 = (qpwr << (31 - q_msb)) / (ipwr >> tmp2); + else + tmp4 = (qpwr << (31 - q_msb)) / (ipwr << -tmp2); + + tmp4 -= tmp3 * tmp3; + tmp4 = -int_sqrt(tmp4); + + c0 = tmp3 >> 3; + c1 = tmp4 >> 4; + +out: + b43_phy_maskset(dev, B43_LPPHY_RX_COMP_COEFF_S, 0xFF00, c1); + b43_phy_maskset(dev, B43_LPPHY_RX_COMP_COEFF_S, 0x00FF, c0 << 8); + return ret; +} + +static void lpphy_run_samples(struct b43_wldev *dev, u16 samples, u16 loops, + u16 wait) +{ + b43_phy_maskset(dev, B43_LPPHY_SMPL_PLAY_BUFFER_CTL, + 0xFFC0, samples - 1); + if (loops != 0xFFFF) + loops--; + b43_phy_maskset(dev, B43_LPPHY_SMPL_PLAY_COUNT, 0xF000, loops); + b43_phy_maskset(dev, B43_LPPHY_SMPL_PLAY_BUFFER_CTL, 0x3F, wait << 6); + b43_phy_set(dev, B43_LPPHY_A_PHY_CTL_ADDR, 0x1); +} + +//SPEC FIXME what does a negative freq mean? +static void lpphy_start_tx_tone(struct b43_wldev *dev, s32 freq, u16 max) +{ + struct b43_phy_lp *lpphy = dev->phy.lp; + u16 buf[64]; + int i, samples = 0, angle = 0; + int rotation = (((36 * freq) / 20) << 16) / 100; + struct b43_c32 sample; + + lpphy->tx_tone_freq = freq; + + if (freq) { + /* Find i for which abs(freq) integrally divides 20000 * i */ + for (i = 1; samples * abs(freq) != 20000 * i; i++) { + samples = (20000 * i) / abs(freq); + if(B43_WARN_ON(samples > 63)) + return; + } + } else { + samples = 2; + } + + for (i = 0; i < samples; i++) { + sample = b43_cordic(angle); + angle += rotation; + buf[i] = CORDIC_CONVERT((sample.i * max) & 0xFF) << 8; + buf[i] |= CORDIC_CONVERT((sample.q * max) & 0xFF); + } + + b43_lptab_write_bulk(dev, B43_LPTAB16(5, 0), samples, buf); + + lpphy_run_samples(dev, samples, 0xFFFF, 0); +} + +static void lpphy_stop_tx_tone(struct b43_wldev *dev) +{ + struct b43_phy_lp *lpphy = dev->phy.lp; + int i; + + lpphy->tx_tone_freq = 0; + + b43_phy_mask(dev, B43_LPPHY_SMPL_PLAY_COUNT, 0xF000); + for (i = 0; i < 31; i++) { + if (!(b43_phy_read(dev, B43_LPPHY_A_PHY_CTL_ADDR) & 0x1)) + break; + udelay(100); + } +} + + +static void lpphy_papd_cal(struct b43_wldev *dev, struct lpphy_tx_gains gains, + int mode, bool useindex, u8 index) +{ + //TODO +} + +static void lpphy_papd_cal_txpwr(struct b43_wldev *dev) +{ + struct b43_phy_lp *lpphy = dev->phy.lp; + struct ssb_bus *bus = dev->dev->bus; + struct lpphy_tx_gains gains, oldgains; + int old_txpctl, old_afe_ovr, old_rf, old_bbmult; + + lpphy_read_tx_pctl_mode_from_hardware(dev); + old_txpctl = lpphy->txpctl_mode; + old_afe_ovr = b43_phy_read(dev, B43_LPPHY_AFE_CTL_OVR) & 0x40; + if (old_afe_ovr) + oldgains = lpphy_get_tx_gains(dev); + old_rf = b43_phy_read(dev, B43_LPPHY_RF_PWR_OVERRIDE) & 0xFF; + old_bbmult = lpphy_get_bb_mult(dev); + + lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF); + + if (bus->chip_id == 0x4325 && bus->chip_rev == 0) + lpphy_papd_cal(dev, gains, 0, 1, 30); + else + lpphy_papd_cal(dev, gains, 0, 1, 65); + + if (old_afe_ovr) + lpphy_set_tx_gains(dev, oldgains); + lpphy_set_bb_mult(dev, old_bbmult); + lpphy_set_tx_power_control(dev, old_txpctl); + b43_phy_maskset(dev, B43_LPPHY_RF_PWR_OVERRIDE, 0xFF00, old_rf); +} + +static int lpphy_rx_iq_cal(struct b43_wldev *dev, bool noise, bool tx, + bool rx, bool pa, struct lpphy_tx_gains *gains) +{ + struct b43_phy_lp *lpphy = dev->phy.lp; + struct ssb_bus *bus = dev->dev->bus; + const struct lpphy_rx_iq_comp *iqcomp = NULL; + struct lpphy_tx_gains nogains, oldgains; + u16 tmp; + int i, ret; + + memset(&nogains, 0, sizeof(nogains)); + memset(&oldgains, 0, sizeof(oldgains)); + + if (bus->chip_id == 0x5354) { + for (i = 0; i < ARRAY_SIZE(lpphy_5354_iq_table); i++) { + if (lpphy_5354_iq_table[i].chan == lpphy->channel) { + iqcomp = &lpphy_5354_iq_table[i]; + } + } + } else if (dev->phy.rev >= 2) { + iqcomp = &lpphy_rev2plus_iq_comp; + } else { + for (i = 0; i < ARRAY_SIZE(lpphy_rev0_1_iq_table); i++) { + if (lpphy_rev0_1_iq_table[i].chan == lpphy->channel) { + iqcomp = &lpphy_rev0_1_iq_table[i]; + } + } + } + + if (B43_WARN_ON(!iqcomp)) + return 0; + + b43_phy_maskset(dev, B43_LPPHY_RX_COMP_COEFF_S, 0xFF00, iqcomp->c1); + b43_phy_maskset(dev, B43_LPPHY_RX_COMP_COEFF_S, + 0x00FF, iqcomp->c0 << 8); + + if (noise) { + tx = true; + rx = false; + pa = false; + } + + lpphy_set_trsw_over(dev, tx, rx); + + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x8); + b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, + 0xFFF7, pa << 3); + } else { + b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x20); + b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, + 0xFFDF, pa << 5); + } + + tmp = b43_phy_read(dev, B43_LPPHY_AFE_CTL_OVR) & 0x40; + + if (noise) + lpphy_set_rx_gain(dev, 0x2D5D); + else { + if (tmp) + oldgains = lpphy_get_tx_gains(dev); + if (!gains) + gains = &nogains; + lpphy_set_tx_gains(dev, *gains); + } + + b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVR, 0xFFFE); + b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVRVAL, 0xFFFE); + b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x800); + b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0x800); + lpphy_set_deaf(dev, false); + if (noise) + ret = lpphy_calc_rx_iq_comp(dev, 0xFFF0); + else { + lpphy_start_tx_tone(dev, 4000, 100); + ret = lpphy_calc_rx_iq_comp(dev, 0x4000); + lpphy_stop_tx_tone(dev); + } + lpphy_clear_deaf(dev, false); + b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFFC); + b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFF7); + b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFDF); + if (!noise) { + if (tmp) + lpphy_set_tx_gains(dev, oldgains); + else + lpphy_disable_tx_gain_override(dev); + } + lpphy_disable_rx_gain_override(dev); + b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVR, 0xFFFE); + b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVRVAL, 0xF7FF); + return ret; +} + +static void lpphy_calibration(struct b43_wldev *dev) +{ + struct b43_phy_lp *lpphy = dev->phy.lp; + enum b43_lpphy_txpctl_mode saved_pctl_mode; + bool full_cal = false; + + if (lpphy->full_calib_chan != lpphy->channel) { + full_cal = true; + lpphy->full_calib_chan = lpphy->channel; + } + + b43_mac_suspend(dev); + + lpphy_btcoex_override(dev); + if (dev->phy.rev >= 2) + lpphy_save_dig_flt_state(dev); + lpphy_read_tx_pctl_mode_from_hardware(dev); + saved_pctl_mode = lpphy->txpctl_mode; + lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF); + //TODO Perform transmit power table I/Q LO calibration + if ((dev->phy.rev == 0) && (saved_pctl_mode != B43_LPPHY_TXPCTL_OFF)) + lpphy_pr41573_workaround(dev); + if ((dev->phy.rev >= 2) && full_cal) { + lpphy_papd_cal_txpwr(dev); + } + lpphy_set_tx_power_control(dev, saved_pctl_mode); + if (dev->phy.rev >= 2) + lpphy_restore_dig_flt_state(dev); + lpphy_rx_iq_cal(dev, true, true, false, false, NULL); + + b43_mac_enable(dev); +} + +static u16 b43_lpphy_op_read(struct b43_wldev *dev, u16 reg) +{ + b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); + return b43_read16(dev, B43_MMIO_PHY_DATA); +} + +static void b43_lpphy_op_write(struct b43_wldev *dev, u16 reg, u16 value) +{ + b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); + b43_write16(dev, B43_MMIO_PHY_DATA, value); +} + +static void b43_lpphy_op_maskset(struct b43_wldev *dev, u16 reg, u16 mask, + u16 set) +{ + b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); + b43_write16(dev, B43_MMIO_PHY_DATA, + (b43_read16(dev, B43_MMIO_PHY_DATA) & mask) | set); +} + +static u16 b43_lpphy_op_radio_read(struct b43_wldev *dev, u16 reg) +{ + /* Register 1 is a 32-bit register. */ + B43_WARN_ON(reg == 1); + /* LP-PHY needs a special bit set for read access */ + if (dev->phy.rev < 2) { + if (reg != 0x4001) + reg |= 0x100; + } else + reg |= 0x200; + + b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg); + return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW); +} + +static void b43_lpphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value) +{ + /* Register 1 is a 32-bit register. */ + B43_WARN_ON(reg == 1); + + b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg); + b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value); +} + +struct b206x_channel { + u8 channel; + u16 freq; + u8 data[12]; +}; + +static const struct b206x_channel b2062_chantbl[] = { + { .channel = 1, .freq = 2412, .data[0] = 0xFF, .data[1] = 0xFF, + .data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32, + .data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, }, + { .channel = 2, .freq = 2417, .data[0] = 0xFF, .data[1] = 0xFF, + .data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32, + .data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, }, + { .channel = 3, .freq = 2422, .data[0] = 0xFF, .data[1] = 0xFF, + .data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32, + .data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, }, + { .channel = 4, .freq = 2427, .data[0] = 0xFF, .data[1] = 0xFF, + .data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32, + .data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, }, + { .channel = 5, .freq = 2432, .data[0] = 0xFF, .data[1] = 0xFF, + .data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32, + .data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, }, + { .channel = 6, .freq = 2437, .data[0] = 0xFF, .data[1] = 0xFF, + .data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32, + .data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, }, + { .channel = 7, .freq = 2442, .data[0] = 0xFF, .data[1] = 0xFF, + .data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32, + .data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, }, + { .channel = 8, .freq = 2447, .data[0] = 0xFF, .data[1] = 0xFF, + .data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32, + .data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, }, + { .channel = 9, .freq = 2452, .data[0] = 0xFF, .data[1] = 0xFF, + .data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32, + .data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, }, + { .channel = 10, .freq = 2457, .data[0] = 0xFF, .data[1] = 0xFF, + .data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32, + .data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, }, + { .channel = 11, .freq = 2462, .data[0] = 0xFF, .data[1] = 0xFF, + .data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32, + .data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, }, + { .channel = 12, .freq = 2467, .data[0] = 0xFF, .data[1] = 0xFF, + .data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32, + .data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, }, + { .channel = 13, .freq = 2472, .data[0] = 0xFF, .data[1] = 0xFF, + .data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32, + .data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, }, + { .channel = 14, .freq = 2484, .data[0] = 0xFF, .data[1] = 0xFF, + .data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32, + .data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, }, + { .channel = 34, .freq = 5170, .data[0] = 0x00, .data[1] = 0x22, + .data[2] = 0x20, .data[3] = 0x84, .data[4] = 0x3C, .data[5] = 0x77, + .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, }, + { .channel = 38, .freq = 5190, .data[0] = 0x00, .data[1] = 0x11, + .data[2] = 0x10, .data[3] = 0x83, .data[4] = 0x3C, .data[5] = 0x77, + .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, }, + { .channel = 42, .freq = 5210, .data[0] = 0x00, .data[1] = 0x11, + .data[2] = 0x10, .data[3] = 0x83, .data[4] = 0x3C, .data[5] = 0x77, + .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, }, + { .channel = 46, .freq = 5230, .data[0] = 0x00, .data[1] = 0x00, + .data[2] = 0x00, .data[3] = 0x83, .data[4] = 0x3C, .data[5] = 0x77, + .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, }, + { .channel = 36, .freq = 5180, .data[0] = 0x00, .data[1] = 0x11, + .data[2] = 0x20, .data[3] = 0x83, .data[4] = 0x3C, .data[5] = 0x77, + .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, }, + { .channel = 40, .freq = 5200, .data[0] = 0x00, .data[1] = 0x11, + .data[2] = 0x10, .data[3] = 0x84, .data[4] = 0x3C, .data[5] = 0x77, + .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, }, + { .channel = 44, .freq = 5220, .data[0] = 0x00, .data[1] = 0x11, + .data[2] = 0x00, .data[3] = 0x83, .data[4] = 0x3C, .data[5] = 0x77, + .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, }, + { .channel = 48, .freq = 5240, .data[0] = 0x00, .data[1] = 0x00, + .data[2] = 0x00, .data[3] = 0x83, .data[4] = 0x3C, .data[5] = 0x77, + .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, }, + { .channel = 52, .freq = 5260, .data[0] = 0x00, .data[1] = 0x00, + .data[2] = 0x00, .data[3] = 0x83, .data[4] = 0x3C, .data[5] = 0x77, + .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, }, + { .channel = 56, .freq = 5280, .data[0] = 0x00, .data[1] = 0x00, + .data[2] = 0x00, .data[3] = 0x83, .data[4] = 0x3C, .data[5] = 0x77, + .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, }, + { .channel = 60, .freq = 5300, .data[0] = 0x00, .data[1] = 0x00, + .data[2] = 0x00, .data[3] = 0x63, .data[4] = 0x3C, .data[5] = 0x77, + .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, }, + { .channel = 64, .freq = 5320, .data[0] = 0x00, .data[1] = 0x00, + .data[2] = 0x00, .data[3] = 0x62, .data[4] = 0x3C, .data[5] = 0x77, + .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, }, + { .channel = 100, .freq = 5500, .data[0] = 0x00, .data[1] = 0x00, + .data[2] = 0x00, .data[3] = 0x30, .data[4] = 0x3C, .data[5] = 0x77, + .data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, }, + { .channel = 104, .freq = 5520, .data[0] = 0x00, .data[1] = 0x00, + .data[2] = 0x00, .data[3] = 0x20, .data[4] = 0x3C, .data[5] = 0x77, + .data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, }, + { .channel = 108, .freq = 5540, .data[0] = 0x00, .data[1] = 0x00, + .data[2] = 0x00, .data[3] = 0x20, .data[4] = 0x3C, .data[5] = 0x77, + .data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, }, + { .channel = 112, .freq = 5560, .data[0] = 0x00, .data[1] = 0x00, + .data[2] = 0x00, .data[3] = 0x20, .data[4] = 0x3C, .data[5] = 0x77, + .data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, }, + { .channel = 116, .freq = 5580, .data[0] = 0x00, .data[1] = 0x00, + .data[2] = 0x00, .data[3] = 0x10, .data[4] = 0x3C, .data[5] = 0x77, + .data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, }, + { .channel = 120, .freq = 5600, .data[0] = 0x00, .data[1] = 0x00, + .data[2] = 0x00, .data[3] = 0x00, .data[4] = 0x3C, .data[5] = 0x77, + .data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, }, + { .channel = 124, .freq = 5620, .data[0] = 0x00, .data[1] = 0x00, + .data[2] = 0x00, .data[3] = 0x00, .data[4] = 0x3C, .data[5] = 0x77, + .data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, }, + { .channel = 128, .freq = 5640, .data[0] = 0x00, .data[1] = 0x00, + .data[2] = 0x00, .data[3] = 0x00, .data[4] = 0x3C, .data[5] = 0x77, + .data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, }, + { .channel = 132, .freq = 5660, .data[0] = 0x00, .data[1] = 0x00, + .data[2] = 0x00, .data[3] = 0x00, .data[4] = 0x3C, .data[5] = 0x77, + .data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, }, + { .channel = 136, .freq = 5680, .data[0] = 0x00, .data[1] = 0x00, + .data[2] = 0x00, .data[3] = 0x00, .data[4] = 0x3C, .data[5] = 0x77, + .data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, }, + { .channel = 140, .freq = 5700, .data[0] = 0x00, .data[1] = 0x00, + .data[2] = 0x00, .data[3] = 0x00, .data[4] = 0x3C, .data[5] = 0x77, + .data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, }, + { .channel = 149, .freq = 5745, .data[0] = 0x00, .data[1] = 0x00, + .data[2] = 0x00, .data[3] = 0x00, .data[4] = 0x3C, .data[5] = 0x77, + .data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, }, + { .channel = 153, .freq = 5765, .data[0] = 0x00, .data[1] = 0x00, + .data[2] = 0x00, .data[3] = 0x00, .data[4] = 0x3C, .data[5] = 0x77, + .data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, }, + { .channel = 157, .freq = 5785, .data[0] = 0x00, .data[1] = 0x00, + .data[2] = 0x00, .data[3] = 0x00, .data[4] = 0x3C, .data[5] = 0x77, + .data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, }, + { .channel = 161, .freq = 5805, .data[0] = 0x00, .data[1] = 0x00, + .data[2] = 0x00, .data[3] = 0x00, .data[4] = 0x3C, .data[5] = 0x77, + .data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, }, + { .channel = 165, .freq = 5825, .data[0] = 0x00, .data[1] = 0x00, + .data[2] = 0x00, .data[3] = 0x00, .data[4] = 0x3C, .data[5] = 0x77, + .data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, }, + { .channel = 184, .freq = 4920, .data[0] = 0x55, .data[1] = 0x77, + .data[2] = 0x90, .data[3] = 0xF7, .data[4] = 0x3C, .data[5] = 0x77, + .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0xFF, }, + { .channel = 188, .freq = 4940, .data[0] = 0x44, .data[1] = 0x77, + .data[2] = 0x80, .data[3] = 0xE7, .data[4] = 0x3C, .data[5] = 0x77, + .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0xFF, }, + { .channel = 192, .freq = 4960, .data[0] = 0x44, .data[1] = 0x66, + .data[2] = 0x80, .data[3] = 0xE7, .data[4] = 0x3C, .data[5] = 0x77, + .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0xFF, }, + { .channel = 196, .freq = 4980, .data[0] = 0x33, .data[1] = 0x66, + .data[2] = 0x70, .data[3] = 0xC7, .data[4] = 0x3C, .data[5] = 0x77, + .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0xFF, }, + { .channel = 200, .freq = 5000, .data[0] = 0x22, .data[1] = 0x55, + .data[2] = 0x60, .data[3] = 0xD7, .data[4] = 0x3C, .data[5] = 0x77, + .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0xFF, }, + { .channel = 204, .freq = 5020, .data[0] = 0x22, .data[1] = 0x55, + .data[2] = 0x60, .data[3] = 0xC7, .data[4] = 0x3C, .data[5] = 0x77, + .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0xFF, }, + { .channel = 208, .freq = 5040, .data[0] = 0x22, .data[1] = 0x44, + .data[2] = 0x50, .data[3] = 0xC7, .data[4] = 0x3C, .data[5] = 0x77, + .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0xFF, }, + { .channel = 212, .freq = 5060, .data[0] = 0x11, .data[1] = 0x44, + .data[2] = 0x50, .data[3] = 0xA5, .data[4] = 0x3C, .data[5] = 0x77, + .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, }, + { .channel = 216, .freq = 5080, .data[0] = 0x00, .data[1] = 0x44, + .data[2] = 0x40, .data[3] = 0xB6, .data[4] = 0x3C, .data[5] = 0x77, + .data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, }, +}; + +static const struct b206x_channel b2063_chantbl[] = { + { .channel = 1, .freq = 2412, .data[0] = 0x6F, .data[1] = 0x3C, + .data[2] = 0x3C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05, + .data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80, + .data[10] = 0x80, .data[11] = 0x70, }, + { .channel = 2, .freq = 2417, .data[0] = 0x6F, .data[1] = 0x3C, + .data[2] = 0x3C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05, + .data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80, + .data[10] = 0x80, .data[11] = 0x70, }, + { .channel = 3, .freq = 2422, .data[0] = 0x6F, .data[1] = 0x3C, + .data[2] = 0x3C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05, + .data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80, + .data[10] = 0x80, .data[11] = 0x70, }, + { .channel = 4, .freq = 2427, .data[0] = 0x6F, .data[1] = 0x2C, + .data[2] = 0x2C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05, + .data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80, + .data[10] = 0x80, .data[11] = 0x70, }, + { .channel = 5, .freq = 2432, .data[0] = 0x6F, .data[1] = 0x2C, + .data[2] = 0x2C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05, + .data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80, + .data[10] = 0x80, .data[11] = 0x70, }, + { .channel = 6, .freq = 2437, .data[0] = 0x6F, .data[1] = 0x2C, + .data[2] = 0x2C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05, + .data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80, + .data[10] = 0x80, .data[11] = 0x70, }, + { .channel = 7, .freq = 2442, .data[0] = 0x6F, .data[1] = 0x2C, + .data[2] = 0x2C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05, + .data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80, + .data[10] = 0x80, .data[11] = 0x70, }, + { .channel = 8, .freq = 2447, .data[0] = 0x6F, .data[1] = 0x2C, + .data[2] = 0x2C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05, + .data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80, + .data[10] = 0x80, .data[11] = 0x70, }, + { .channel = 9, .freq = 2452, .data[0] = 0x6F, .data[1] = 0x1C, + .data[2] = 0x1C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05, + .data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80, + .data[10] = 0x80, .data[11] = 0x70, }, + { .channel = 10, .freq = 2457, .data[0] = 0x6F, .data[1] = 0x1C, + .data[2] = 0x1C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05, + .data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80, + .data[10] = 0x80, .data[11] = 0x70, }, + { .channel = 11, .freq = 2462, .data[0] = 0x6E, .data[1] = 0x1C, + .data[2] = 0x1C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05, + .data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80, + .data[10] = 0x80, .data[11] = 0x70, }, + { .channel = 12, .freq = 2467, .data[0] = 0x6E, .data[1] = 0x1C, + .data[2] = 0x1C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05, + .data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80, + .data[10] = 0x80, .data[11] = 0x70, }, + { .channel = 13, .freq = 2472, .data[0] = 0x6E, .data[1] = 0x1C, + .data[2] = 0x1C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05, + .data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80, + .data[10] = 0x80, .data[11] = 0x70, }, + { .channel = 14, .freq = 2484, .data[0] = 0x6E, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05, + .data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80, + .data[10] = 0x80, .data[11] = 0x70, }, + { .channel = 34, .freq = 5170, .data[0] = 0x6A, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x02, .data[5] = 0x05, + .data[6] = 0x0D, .data[7] = 0x0D, .data[8] = 0x77, .data[9] = 0x80, + .data[10] = 0x20, .data[11] = 0x00, }, + { .channel = 36, .freq = 5180, .data[0] = 0x6A, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x01, .data[5] = 0x05, + .data[6] = 0x0D, .data[7] = 0x0C, .data[8] = 0x77, .data[9] = 0x80, + .data[10] = 0x20, .data[11] = 0x00, }, + { .channel = 38, .freq = 5190, .data[0] = 0x6A, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x01, .data[5] = 0x04, + .data[6] = 0x0C, .data[7] = 0x0C, .data[8] = 0x77, .data[9] = 0x80, + .data[10] = 0x20, .data[11] = 0x00, }, + { .channel = 40, .freq = 5200, .data[0] = 0x69, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x01, .data[5] = 0x04, + .data[6] = 0x0C, .data[7] = 0x0C, .data[8] = 0x77, .data[9] = 0x70, + .data[10] = 0x20, .data[11] = 0x00, }, + { .channel = 42, .freq = 5210, .data[0] = 0x69, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x01, .data[5] = 0x04, + .data[6] = 0x0B, .data[7] = 0x0C, .data[8] = 0x77, .data[9] = 0x70, + .data[10] = 0x20, .data[11] = 0x00, }, + { .channel = 44, .freq = 5220, .data[0] = 0x69, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x04, + .data[6] = 0x0B, .data[7] = 0x0B, .data[8] = 0x77, .data[9] = 0x60, + .data[10] = 0x20, .data[11] = 0x00, }, + { .channel = 46, .freq = 5230, .data[0] = 0x69, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x03, + .data[6] = 0x0A, .data[7] = 0x0B, .data[8] = 0x77, .data[9] = 0x60, + .data[10] = 0x20, .data[11] = 0x00, }, + { .channel = 48, .freq = 5240, .data[0] = 0x69, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x03, + .data[6] = 0x0A, .data[7] = 0x0A, .data[8] = 0x77, .data[9] = 0x60, + .data[10] = 0x20, .data[11] = 0x00, }, + { .channel = 52, .freq = 5260, .data[0] = 0x68, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x02, + .data[6] = 0x09, .data[7] = 0x09, .data[8] = 0x77, .data[9] = 0x60, + .data[10] = 0x20, .data[11] = 0x00, }, + { .channel = 56, .freq = 5280, .data[0] = 0x68, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x01, + .data[6] = 0x08, .data[7] = 0x08, .data[8] = 0x77, .data[9] = 0x50, + .data[10] = 0x10, .data[11] = 0x00, }, + { .channel = 60, .freq = 5300, .data[0] = 0x68, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x01, + .data[6] = 0x08, .data[7] = 0x08, .data[8] = 0x77, .data[9] = 0x50, + .data[10] = 0x10, .data[11] = 0x00, }, + { .channel = 64, .freq = 5320, .data[0] = 0x67, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00, + .data[6] = 0x08, .data[7] = 0x08, .data[8] = 0x77, .data[9] = 0x50, + .data[10] = 0x10, .data[11] = 0x00, }, + { .channel = 100, .freq = 5500, .data[0] = 0x64, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00, + .data[6] = 0x02, .data[7] = 0x01, .data[8] = 0x77, .data[9] = 0x20, + .data[10] = 0x00, .data[11] = 0x00, }, + { .channel = 104, .freq = 5520, .data[0] = 0x64, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00, + .data[6] = 0x01, .data[7] = 0x01, .data[8] = 0x77, .data[9] = 0x20, + .data[10] = 0x00, .data[11] = 0x00, }, + { .channel = 108, .freq = 5540, .data[0] = 0x63, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00, + .data[6] = 0x01, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x10, + .data[10] = 0x00, .data[11] = 0x00, }, + { .channel = 112, .freq = 5560, .data[0] = 0x63, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00, + .data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x10, + .data[10] = 0x00, .data[11] = 0x00, }, + { .channel = 116, .freq = 5580, .data[0] = 0x62, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00, + .data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x10, + .data[10] = 0x00, .data[11] = 0x00, }, + { .channel = 120, .freq = 5600, .data[0] = 0x62, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00, + .data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x00, + .data[10] = 0x00, .data[11] = 0x00, }, + { .channel = 124, .freq = 5620, .data[0] = 0x62, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00, + .data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x00, + .data[10] = 0x00, .data[11] = 0x00, }, + { .channel = 128, .freq = 5640, .data[0] = 0x61, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00, + .data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x00, + .data[10] = 0x00, .data[11] = 0x00, }, + { .channel = 132, .freq = 5660, .data[0] = 0x61, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00, + .data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x00, + .data[10] = 0x00, .data[11] = 0x00, }, + { .channel = 136, .freq = 5680, .data[0] = 0x61, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00, + .data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x00, + .data[10] = 0x00, .data[11] = 0x00, }, + { .channel = 140, .freq = 5700, .data[0] = 0x60, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00, + .data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x00, + .data[10] = 0x00, .data[11] = 0x00, }, + { .channel = 149, .freq = 5745, .data[0] = 0x60, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00, + .data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x00, + .data[10] = 0x00, .data[11] = 0x00, }, + { .channel = 153, .freq = 5765, .data[0] = 0x60, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00, + .data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x00, + .data[10] = 0x00, .data[11] = 0x00, }, + { .channel = 157, .freq = 5785, .data[0] = 0x60, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00, + .data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x00, + .data[10] = 0x00, .data[11] = 0x00, }, + { .channel = 161, .freq = 5805, .data[0] = 0x60, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00, + .data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x00, + .data[10] = 0x00, .data[11] = 0x00, }, + { .channel = 165, .freq = 5825, .data[0] = 0x60, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00, + .data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x00, + .data[10] = 0x00, .data[11] = 0x00, }, + { .channel = 184, .freq = 4920, .data[0] = 0x6E, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x09, .data[5] = 0x0E, + .data[6] = 0x0F, .data[7] = 0x0F, .data[8] = 0x77, .data[9] = 0xC0, + .data[10] = 0x50, .data[11] = 0x00, }, + { .channel = 188, .freq = 4940, .data[0] = 0x6E, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x09, .data[5] = 0x0D, + .data[6] = 0x0F, .data[7] = 0x0F, .data[8] = 0x77, .data[9] = 0xB0, + .data[10] = 0x50, .data[11] = 0x00, }, + { .channel = 192, .freq = 4960, .data[0] = 0x6E, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x08, .data[5] = 0x0C, + .data[6] = 0x0F, .data[7] = 0x0F, .data[8] = 0x77, .data[9] = 0xB0, + .data[10] = 0x50, .data[11] = 0x00, }, + { .channel = 196, .freq = 4980, .data[0] = 0x6D, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x08, .data[5] = 0x0C, + .data[6] = 0x0F, .data[7] = 0x0F, .data[8] = 0x77, .data[9] = 0xA0, + .data[10] = 0x40, .data[11] = 0x00, }, + { .channel = 200, .freq = 5000, .data[0] = 0x6D, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x08, .data[5] = 0x0B, + .data[6] = 0x0F, .data[7] = 0x0F, .data[8] = 0x77, .data[9] = 0xA0, + .data[10] = 0x40, .data[11] = 0x00, }, + { .channel = 204, .freq = 5020, .data[0] = 0x6D, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x08, .data[5] = 0x0A, + .data[6] = 0x0F, .data[7] = 0x0F, .data[8] = 0x77, .data[9] = 0xA0, + .data[10] = 0x40, .data[11] = 0x00, }, + { .channel = 208, .freq = 5040, .data[0] = 0x6C, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x07, .data[5] = 0x09, + .data[6] = 0x0F, .data[7] = 0x0F, .data[8] = 0x77, .data[9] = 0x90, + .data[10] = 0x40, .data[11] = 0x00, }, + { .channel = 212, .freq = 5060, .data[0] = 0x6C, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x06, .data[5] = 0x08, + .data[6] = 0x0F, .data[7] = 0x0F, .data[8] = 0x77, .data[9] = 0x90, + .data[10] = 0x40, .data[11] = 0x00, }, + { .channel = 216, .freq = 5080, .data[0] = 0x6C, .data[1] = 0x0C, + .data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x05, .data[5] = 0x08, + .data[6] = 0x0F, .data[7] = 0x0F, .data[8] = 0x77, .data[9] = 0x90, + .data[10] = 0x40, .data[11] = 0x00, }, +}; + +static void lpphy_b2062_reset_pll_bias(struct b43_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + + b43_radio_write(dev, B2062_S_RFPLL_CTL2, 0xFF); + udelay(20); + if (bus->chip_id == 0x5354) { + b43_radio_write(dev, B2062_N_COMM1, 4); + b43_radio_write(dev, B2062_S_RFPLL_CTL2, 4); + } else { + b43_radio_write(dev, B2062_S_RFPLL_CTL2, 0); + } + udelay(5); +} + +static void lpphy_b2062_vco_calib(struct b43_wldev *dev) +{ + b43_radio_write(dev, B2062_S_RFPLL_CTL21, 0x42); + b43_radio_write(dev, B2062_S_RFPLL_CTL21, 0x62); + udelay(200); +} + +static int lpphy_b2062_tune(struct b43_wldev *dev, + unsigned int channel) +{ + struct b43_phy_lp *lpphy = dev->phy.lp; + struct ssb_bus *bus = dev->dev->bus; + const struct b206x_channel *chandata = NULL; + u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000; + u32 tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8, tmp9; + int i, err = 0; + + for (i = 0; i < ARRAY_SIZE(b2062_chantbl); i++) { + if (b2062_chantbl[i].channel == channel) { + chandata = &b2062_chantbl[i]; + break; + } + } + + if (B43_WARN_ON(!chandata)) + return -EINVAL; + + b43_radio_set(dev, B2062_S_RFPLL_CTL14, 0x04); + b43_radio_write(dev, B2062_N_LGENA_TUNE0, chandata->data[0]); + b43_radio_write(dev, B2062_N_LGENA_TUNE2, chandata->data[1]); + b43_radio_write(dev, B2062_N_LGENA_TUNE3, chandata->data[2]); + b43_radio_write(dev, B2062_N_TX_TUNE, chandata->data[3]); + b43_radio_write(dev, B2062_S_LGENG_CTL1, chandata->data[4]); + b43_radio_write(dev, B2062_N_LGENA_CTL5, chandata->data[5]); + b43_radio_write(dev, B2062_N_LGENA_CTL6, chandata->data[6]); + b43_radio_write(dev, B2062_N_TX_PGA, chandata->data[7]); + b43_radio_write(dev, B2062_N_TX_PAD, chandata->data[8]); + + tmp1 = crystal_freq / 1000; + tmp2 = lpphy->pdiv * 1000; + b43_radio_write(dev, B2062_S_RFPLL_CTL33, 0xCC); + b43_radio_write(dev, B2062_S_RFPLL_CTL34, 0x07); + lpphy_b2062_reset_pll_bias(dev); + tmp3 = tmp2 * channel2freq_lp(channel); + if (channel2freq_lp(channel) < 4000) + tmp3 *= 2; + tmp4 = 48 * tmp1; + tmp6 = tmp3 / tmp4; + tmp7 = tmp3 % tmp4; + b43_radio_write(dev, B2062_S_RFPLL_CTL26, tmp6); + tmp5 = tmp7 * 0x100; + tmp6 = tmp5 / tmp4; + tmp7 = tmp5 % tmp4; + b43_radio_write(dev, B2062_S_RFPLL_CTL27, tmp6); + tmp5 = tmp7 * 0x100; + tmp6 = tmp5 / tmp4; + tmp7 = tmp5 % tmp4; + b43_radio_write(dev, B2062_S_RFPLL_CTL28, tmp6); + tmp5 = tmp7 * 0x100; + tmp6 = tmp5 / tmp4; + tmp7 = tmp5 % tmp4; + b43_radio_write(dev, B2062_S_RFPLL_CTL29, tmp6 + ((2 * tmp7) / tmp4)); + tmp8 = b43_radio_read(dev, B2062_S_RFPLL_CTL19); + tmp9 = ((2 * tmp3 * (tmp8 + 1)) + (3 * tmp1)) / (6 * tmp1); + b43_radio_write(dev, B2062_S_RFPLL_CTL23, (tmp9 >> 8) + 16); + b43_radio_write(dev, B2062_S_RFPLL_CTL24, tmp9 & 0xFF); + + lpphy_b2062_vco_calib(dev); + if (b43_radio_read(dev, B2062_S_RFPLL_CTL3) & 0x10) { + b43_radio_write(dev, B2062_S_RFPLL_CTL33, 0xFC); + b43_radio_write(dev, B2062_S_RFPLL_CTL34, 0); + lpphy_b2062_reset_pll_bias(dev); + lpphy_b2062_vco_calib(dev); + if (b43_radio_read(dev, B2062_S_RFPLL_CTL3) & 0x10) + err = -EIO; + } + + b43_radio_mask(dev, B2062_S_RFPLL_CTL14, ~0x04); + return err; +} + +static void lpphy_b2063_vco_calib(struct b43_wldev *dev) +{ + u16 tmp; + + b43_radio_mask(dev, B2063_PLL_SP1, ~0x40); + tmp = b43_radio_read(dev, B2063_PLL_JTAG_CALNRST) & 0xF8; + b43_radio_write(dev, B2063_PLL_JTAG_CALNRST, tmp); + udelay(1); + b43_radio_write(dev, B2063_PLL_JTAG_CALNRST, tmp | 0x4); + udelay(1); + b43_radio_write(dev, B2063_PLL_JTAG_CALNRST, tmp | 0x6); + udelay(1); + b43_radio_write(dev, B2063_PLL_JTAG_CALNRST, tmp | 0x7); + udelay(300); + b43_radio_set(dev, B2063_PLL_SP1, 0x40); +} + +static int lpphy_b2063_tune(struct b43_wldev *dev, + unsigned int channel) +{ + struct ssb_bus *bus = dev->dev->bus; + + static const struct b206x_channel *chandata = NULL; + u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000; + u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count; + u16 old_comm15, scale; + u32 tmp1, tmp2, tmp3, tmp4, tmp5, tmp6; + int i, div = (crystal_freq <= 26000000 ? 1 : 2); + + for (i = 0; i < ARRAY_SIZE(b2063_chantbl); i++) { + if (b2063_chantbl[i].channel == channel) { + chandata = &b2063_chantbl[i]; + break; + } + } + + if (B43_WARN_ON(!chandata)) + return -EINVAL; + + b43_radio_write(dev, B2063_LOGEN_VCOBUF1, chandata->data[0]); + b43_radio_write(dev, B2063_LOGEN_MIXER2, chandata->data[1]); + b43_radio_write(dev, B2063_LOGEN_BUF2, chandata->data[2]); + b43_radio_write(dev, B2063_LOGEN_RCCR1, chandata->data[3]); + b43_radio_write(dev, B2063_A_RX_1ST3, chandata->data[4]); + b43_radio_write(dev, B2063_A_RX_2ND1, chandata->data[5]); + b43_radio_write(dev, B2063_A_RX_2ND4, chandata->data[6]); + b43_radio_write(dev, B2063_A_RX_2ND7, chandata->data[7]); + b43_radio_write(dev, B2063_A_RX_PS6, chandata->data[8]); + b43_radio_write(dev, B2063_TX_RF_CTL2, chandata->data[9]); + b43_radio_write(dev, B2063_TX_RF_CTL5, chandata->data[10]); + b43_radio_write(dev, B2063_PA_CTL11, chandata->data[11]); + + old_comm15 = b43_radio_read(dev, B2063_COMM15); + b43_radio_set(dev, B2063_COMM15, 0x1E); + + if (chandata->freq > 4000) /* spec says 2484, but 4000 is safer */ + vco_freq = chandata->freq << 1; + else + vco_freq = chandata->freq << 2; + + freqref = crystal_freq * 3; + val1 = lpphy_qdiv_roundup(crystal_freq, 1000000, 16); + val2 = lpphy_qdiv_roundup(crystal_freq, 1000000 * div, 16); + val3 = lpphy_qdiv_roundup(vco_freq, 3, 16); + timeout = ((((8 * crystal_freq) / (div * 5000000)) + 1) >> 1) - 1; + b43_radio_write(dev, B2063_PLL_JTAG_PLL_VCO_CALIB3, 0x2); + b43_radio_maskset(dev, B2063_PLL_JTAG_PLL_VCO_CALIB6, + 0xFFF8, timeout >> 2); + b43_radio_maskset(dev, B2063_PLL_JTAG_PLL_VCO_CALIB7, + 0xFF9F,timeout << 5); + + timeoutref = ((((8 * crystal_freq) / (div * (timeout + 1))) + + 999999) / 1000000) + 1; + b43_radio_write(dev, B2063_PLL_JTAG_PLL_VCO_CALIB5, timeoutref); + + count = lpphy_qdiv_roundup(val3, val2 + 16, 16); + count *= (timeout + 1) * (timeoutref + 1); + count--; + b43_radio_maskset(dev, B2063_PLL_JTAG_PLL_VCO_CALIB7, + 0xF0, count >> 8); + b43_radio_write(dev, B2063_PLL_JTAG_PLL_VCO_CALIB8, count & 0xFF); + + tmp1 = ((val3 * 62500) / freqref) << 4; + tmp2 = ((val3 * 62500) % freqref) << 4; + while (tmp2 >= freqref) { + tmp1++; + tmp2 -= freqref; + } + b43_radio_maskset(dev, B2063_PLL_JTAG_PLL_SG1, 0xFFE0, tmp1 >> 4); + b43_radio_maskset(dev, B2063_PLL_JTAG_PLL_SG2, 0xFE0F, tmp1 << 4); + b43_radio_maskset(dev, B2063_PLL_JTAG_PLL_SG2, 0xFFF0, tmp1 >> 16); + b43_radio_write(dev, B2063_PLL_JTAG_PLL_SG3, (tmp2 >> 8) & 0xFF); + b43_radio_write(dev, B2063_PLL_JTAG_PLL_SG4, tmp2 & 0xFF); + + b43_radio_write(dev, B2063_PLL_JTAG_PLL_LF1, 0xB9); + b43_radio_write(dev, B2063_PLL_JTAG_PLL_LF2, 0x88); + b43_radio_write(dev, B2063_PLL_JTAG_PLL_LF3, 0x28); + b43_radio_write(dev, B2063_PLL_JTAG_PLL_LF4, 0x63); + + tmp3 = ((41 * (val3 - 3000)) /1200) + 27; + tmp4 = lpphy_qdiv_roundup(132000 * tmp1, 8451, 16); + + if ((tmp4 + tmp3 - 1) / tmp3 > 60) { + scale = 1; + tmp5 = ((tmp4 + tmp3) / (tmp3 << 1)) - 8; + } else { + scale = 0; + tmp5 = ((tmp4 + (tmp3 >> 1)) / tmp3) - 8; + } + b43_radio_maskset(dev, B2063_PLL_JTAG_PLL_CP2, 0xFFC0, tmp5); + b43_radio_maskset(dev, B2063_PLL_JTAG_PLL_CP2, 0xFFBF, scale << 6); + + tmp6 = lpphy_qdiv_roundup(100 * val1, val3, 16); + tmp6 *= (tmp5 * 8) * (scale + 1); + if (tmp6 > 150) + tmp6 = 0; + + b43_radio_maskset(dev, B2063_PLL_JTAG_PLL_CP3, 0xFFE0, tmp6); + b43_radio_maskset(dev, B2063_PLL_JTAG_PLL_CP3, 0xFFDF, scale << 5); + + b43_radio_maskset(dev, B2063_PLL_JTAG_PLL_XTAL_12, 0xFFFB, 0x4); + if (crystal_freq > 26000000) + b43_radio_set(dev, B2063_PLL_JTAG_PLL_XTAL_12, 0x2); + else + b43_radio_mask(dev, B2063_PLL_JTAG_PLL_XTAL_12, 0xFD); + + if (val1 == 45) + b43_radio_set(dev, B2063_PLL_JTAG_PLL_VCO1, 0x2); + else + b43_radio_mask(dev, B2063_PLL_JTAG_PLL_VCO1, 0xFD); + + b43_radio_set(dev, B2063_PLL_SP2, 0x3); + udelay(1); + b43_radio_mask(dev, B2063_PLL_SP2, 0xFFFC); + lpphy_b2063_vco_calib(dev); + b43_radio_write(dev, B2063_COMM15, old_comm15); + + return 0; +} + +static int b43_lpphy_op_switch_channel(struct b43_wldev *dev, + unsigned int new_channel) +{ + struct b43_phy_lp *lpphy = dev->phy.lp; + int err; + + if (dev->phy.radio_ver == 0x2063) { + err = lpphy_b2063_tune(dev, new_channel); + if (err) + return err; + } else { + err = lpphy_b2062_tune(dev, new_channel); + if (err) + return err; + lpphy_set_analog_filter(dev, new_channel); + lpphy_adjust_gain_table(dev, channel2freq_lp(new_channel)); + } + + lpphy->channel = new_channel; + b43_write16(dev, B43_MMIO_CHANNEL, new_channel); + + return 0; +} + +static int b43_lpphy_op_init(struct b43_wldev *dev) +{ + int err; + + lpphy_read_band_sprom(dev); //FIXME should this be in prepare_structs? + lpphy_baseband_init(dev); + lpphy_radio_init(dev); + lpphy_calibrate_rc(dev); + err = b43_lpphy_op_switch_channel(dev, 7); + if (err) { + b43dbg(dev->wl, "Switch to channel 7 failed, error = %d.\n", + err); + } + lpphy_tx_pctl_init(dev); + lpphy_calibration(dev); + //TODO ACI init + + return 0; +} + +static void b43_lpphy_op_adjust_txpower(struct b43_wldev *dev) +{ + //TODO +} + +static enum b43_txpwr_result b43_lpphy_op_recalc_txpower(struct b43_wldev *dev, + bool ignore_tssi) +{ + //TODO + return B43_TXPWR_RES_DONE; +} + +void b43_lpphy_op_switch_analog(struct b43_wldev *dev, bool on) +{ + if (on) { + b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVR, 0xfff8); + } else { + b43_phy_set(dev, B43_LPPHY_AFE_CTL_OVRVAL, 0x0007); + b43_phy_set(dev, B43_LPPHY_AFE_CTL_OVR, 0x0007); + } +} + +static void b43_lpphy_op_pwork_15sec(struct b43_wldev *dev) +{ + //TODO +} + +const struct b43_phy_operations b43_phyops_lp = { + .allocate = b43_lpphy_op_allocate, + .free = b43_lpphy_op_free, + .prepare_structs = b43_lpphy_op_prepare_structs, + .init = b43_lpphy_op_init, + .phy_read = b43_lpphy_op_read, + .phy_write = b43_lpphy_op_write, + .phy_maskset = b43_lpphy_op_maskset, + .radio_read = b43_lpphy_op_radio_read, + .radio_write = b43_lpphy_op_radio_write, + .software_rfkill = b43_lpphy_op_software_rfkill, + .switch_analog = b43_lpphy_op_switch_analog, + .switch_channel = b43_lpphy_op_switch_channel, + .get_default_chan = b43_lpphy_op_get_default_chan, + .set_rx_antenna = b43_lpphy_op_set_rx_antenna, + .recalc_txpower = b43_lpphy_op_recalc_txpower, + .adjust_txpower = b43_lpphy_op_adjust_txpower, + .pwork_15sec = b43_lpphy_op_pwork_15sec, + .pwork_60sec = lpphy_calibration, +}; diff --git a/drivers/net/wireless/b43/phy_lp.h b/drivers/net/wireless/b43/phy_lp.h new file mode 100644 index 0000000..62737f7 --- /dev/null +++ b/drivers/net/wireless/b43/phy_lp.h @@ -0,0 +1,912 @@ +#ifndef LINUX_B43_PHY_LP_H_ +#define LINUX_B43_PHY_LP_H_ + +/* Definitions for the LP-PHY */ + + +/* The CCK PHY register range. */ +#define B43_LPPHY_B_VERSION B43_PHY_CCK(0x00) /* B PHY version */ +#define B43_LPPHY_B_BBCONFIG B43_PHY_CCK(0x01) /* B PHY BBConfig */ +#define B43_LPPHY_B_RX_STAT0 B43_PHY_CCK(0x04) /* B PHY RX Status0 */ +#define B43_LPPHY_B_RX_STAT1 B43_PHY_CCK(0x05) /* B PHY RX Status1 */ +#define B43_LPPHY_B_CRS_THRESH B43_PHY_CCK(0x06) /* B PHY CRS Thresh */ +#define B43_LPPHY_B_TXERROR B43_PHY_CCK(0x07) /* B PHY TxError */ +#define B43_LPPHY_B_CHANNEL B43_PHY_CCK(0x08) /* B PHY Channel */ +#define B43_LPPHY_B_WORKAROUND B43_PHY_CCK(0x09) /* B PHY workaround */ +#define B43_LPPHY_B_TEST B43_PHY_CCK(0x0A) /* B PHY Test */ +#define B43_LPPHY_B_FOURWIRE_ADDR B43_PHY_CCK(0x0B) /* B PHY Fourwire Address */ +#define B43_LPPHY_B_FOURWIRE_DATA_HI B43_PHY_CCK(0x0C) /* B PHY Fourwire Data Hi */ +#define B43_LPPHY_B_FOURWIRE_DATA_LO B43_PHY_CCK(0x0D) /* B PHY Fourwire Data Lo */ +#define B43_LPPHY_B_BIST_STAT B43_PHY_CCK(0x0E) /* B PHY Bist Status */ +#define B43_LPPHY_PA_RAMP_TX_TO B43_PHY_CCK(0x10) /* PA Ramp TX Timeout */ +#define B43_LPPHY_RF_SYNTH_DC_TIMER B43_PHY_CCK(0x11) /* RF Synth DC Timer */ +#define B43_LPPHY_PA_RAMP_TX_TIME_IN B43_PHY_CCK(0x12) /* PA ramp TX Time in */ +#define B43_LPPHY_RX_FILTER_TIME_IN B43_PHY_CCK(0x13) /* RX Filter Time in */ +#define B43_LPPHY_PLL_COEFF_S B43_PHY_CCK(0x18) /* PLL Coefficient(s) */ +#define B43_LPPHY_PLL_OUT B43_PHY_CCK(0x19) /* PLL Out */ +#define B43_LPPHY_RSSI_THRES B43_PHY_CCK(0x20) /* RSSI Threshold */ +#define B43_LPPHY_IQ_THRES_HH B43_PHY_CCK(0x21) /* IQ Threshold HH */ +#define B43_LPPHY_IQ_THRES_H B43_PHY_CCK(0x22) /* IQ Threshold H */ +#define B43_LPPHY_IQ_THRES_L B43_PHY_CCK(0x23) /* IQ Threshold L */ +#define B43_LPPHY_IQ_THRES_LL B43_PHY_CCK(0x24) /* IQ Threshold LL */ +#define B43_LPPHY_AGC_GAIN B43_PHY_CCK(0x25) /* AGC Gain */ +#define B43_LPPHY_LNA_GAIN_RANGE B43_PHY_CCK(0x26) /* LNA Gain Range */ +#define B43_LPPHY_JSSI B43_PHY_CCK(0x27) /* JSSI */ +#define B43_LPPHY_TSSI_CTL B43_PHY_CCK(0x28) /* TSSI Control */ +#define B43_LPPHY_TSSI B43_PHY_CCK(0x29) /* TSSI */ +#define B43_LPPHY_TR_LOSS B43_PHY_CCK(0x2A) /* TR Loss */ +#define B43_LPPHY_LO_LEAKAGE B43_PHY_CCK(0x2B) /* LO Leakage */ +#define B43_LPPHY_LO_RSSIACC B43_PHY_CCK(0x2C) /* LO RSSIAcc */ +#define B43_LPPHY_LO_IQ_MAG_ACC B43_PHY_CCK(0x2D) /* LO IQ Mag Acc */ +#define B43_LPPHY_TX_DCOFFSET1 B43_PHY_CCK(0x2E) /* TX DCOffset1 */ +#define B43_LPPHY_TX_DCOFFSET2 B43_PHY_CCK(0x2F) /* TX DCOffset2 */ +#define B43_LPPHY_SYNCPEAKCNT B43_PHY_CCK(0x30) /* SyncPeakCnt */ +#define B43_LPPHY_SYNCFREQ B43_PHY_CCK(0x31) /* SyncFreq */ +#define B43_LPPHY_SYNCDIVERSITYCTL B43_PHY_CCK(0x32) /* SyncDiversityControl */ +#define B43_LPPHY_PEAKENERGYL B43_PHY_CCK(0x33) /* PeakEnergyL */ +#define B43_LPPHY_PEAKENERGYH B43_PHY_CCK(0x34) /* PeakEnergyH */ +#define B43_LPPHY_SYNCCTL B43_PHY_CCK(0x35) /* SyncControl */ +#define B43_LPPHY_DSSSSTEP B43_PHY_CCK(0x38) /* DsssStep */ +#define B43_LPPHY_DSSSWARMUP B43_PHY_CCK(0x39) /* DsssWarmup */ +#define B43_LPPHY_DSSSSIGPOW B43_PHY_CCK(0x3D) /* DsssSigPow */ +#define B43_LPPHY_SFDDETECTBLOCKTIME B43_PHY_CCK(0x40) /* SfdDetectBlockTIme */ +#define B43_LPPHY_SFDTO B43_PHY_CCK(0x41) /* SFDTimeOut */ +#define B43_LPPHY_SFDCTL B43_PHY_CCK(0x42) /* SFDControl */ +#define B43_LPPHY_RXDBG B43_PHY_CCK(0x43) /* rxDebug */ +#define B43_LPPHY_RX_DELAYCOMP B43_PHY_CCK(0x44) /* RX DelayComp */ +#define B43_LPPHY_CRSDROPOUTTO B43_PHY_CCK(0x45) /* CRSDropoutTimeout */ +#define B43_LPPHY_PSEUDOSHORTTO B43_PHY_CCK(0x46) /* PseudoShortTimeout */ +#define B43_LPPHY_PR3931 B43_PHY_CCK(0x47) /* PR3931 */ +#define B43_LPPHY_DSSSCOEFF1 B43_PHY_CCK(0x48) /* DSSSCoeff1 */ +#define B43_LPPHY_DSSSCOEFF2 B43_PHY_CCK(0x49) /* DSSSCoeff2 */ +#define B43_LPPHY_CCKCOEFF1 B43_PHY_CCK(0x4A) /* CCKCoeff1 */ +#define B43_LPPHY_CCKCOEFF2 B43_PHY_CCK(0x4B) /* CCKCoeff2 */ +#define B43_LPPHY_TRCORR B43_PHY_CCK(0x4C) /* TRCorr */ +#define B43_LPPHY_ANGLESCALE B43_PHY_CCK(0x4D) /* AngleScale */ +#define B43_LPPHY_OPTIONALMODES2 B43_PHY_CCK(0x4F) /* OptionalModes2 */ +#define B43_LPPHY_CCKLMSSTEPSIZE B43_PHY_CCK(0x50) /* CCKLMSStepSize */ +#define B43_LPPHY_DFEBYPASS B43_PHY_CCK(0x51) /* DFEBypass */ +#define B43_LPPHY_CCKSTARTDELAYLONG B43_PHY_CCK(0x52) /* CCKStartDelayLong */ +#define B43_LPPHY_CCKSTARTDELAYSHORT B43_PHY_CCK(0x53) /* CCKStartDelayShort */ +#define B43_LPPHY_PPROCCHDELAY B43_PHY_CCK(0x54) /* PprocChDelay */ +#define B43_LPPHY_PPROCONOFF B43_PHY_CCK(0x55) /* PProcOnOff */ +#define B43_LPPHY_LNAGAINTWOBIT10 B43_PHY_CCK(0x5B) /* LNAGainTwoBit10 */ +#define B43_LPPHY_LNAGAINTWOBIT32 B43_PHY_CCK(0x5C) /* LNAGainTwoBit32 */ +#define B43_LPPHY_OPTIONALMODES B43_PHY_CCK(0x5D) /* OptionalModes */ +#define B43_LPPHY_B_RX_STAT2 B43_PHY_CCK(0x5E) /* B PHY RX Status2 */ +#define B43_LPPHY_B_RX_STAT3 B43_PHY_CCK(0x5F) /* B PHY RX Status3 */ +#define B43_LPPHY_PWDNDACDELAY B43_PHY_CCK(0x63) /* pwdnDacDelay */ +#define B43_LPPHY_FINEDIGIGAIN_CTL B43_PHY_CCK(0x67) /* FineDigiGain Control */ +#define B43_LPPHY_LG2GAINTBLLNA8 B43_PHY_CCK(0x68) /* Lg2GainTblLNA8 */ +#define B43_LPPHY_LG2GAINTBLLNA28 B43_PHY_CCK(0x69) /* Lg2GainTblLNA28 */ +#define B43_LPPHY_GAINTBLLNATRSW B43_PHY_CCK(0x6A) /* GainTblLNATrSw */ +#define B43_LPPHY_PEAKENERGY B43_PHY_CCK(0x6B) /* PeakEnergy */ +#define B43_LPPHY_LG2INITGAIN B43_PHY_CCK(0x6C) /* lg2InitGain */ +#define B43_LPPHY_BLANKCOUNTLNAPGA B43_PHY_CCK(0x6D) /* BlankCountLnaPga */ +#define B43_LPPHY_LNAGAINTWOBIT54 B43_PHY_CCK(0x6E) /* LNAGainTwoBit54 */ +#define B43_LPPHY_LNAGAINTWOBIT76 B43_PHY_CCK(0x6F) /* LNAGainTwoBit76 */ +#define B43_LPPHY_JSSICTL B43_PHY_CCK(0x70) /* JSSIControl */ +#define B43_LPPHY_LG2GAINTBLLNA44 B43_PHY_CCK(0x71) /* Lg2GainTblLNA44 */ +#define B43_LPPHY_LG2GAINTBLLNA62 B43_PHY_CCK(0x72) /* Lg2GainTblLNA62 */ + +/* The OFDM PHY register range. */ +#define B43_LPPHY_VERSION B43_PHY_OFDM(0x00) /* Version */ +#define B43_LPPHY_BBCONFIG B43_PHY_OFDM(0x01) /* BBConfig */ +#define B43_LPPHY_RX_STAT0 B43_PHY_OFDM(0x04) /* RX Status0 */ +#define B43_LPPHY_RX_STAT1 B43_PHY_OFDM(0x05) /* RX Status1 */ +#define B43_LPPHY_TX_ERROR B43_PHY_OFDM(0x07) /* TX Error */ +#define B43_LPPHY_CHANNEL B43_PHY_OFDM(0x08) /* Channel */ +#define B43_LPPHY_WORKAROUND B43_PHY_OFDM(0x09) /* workaround */ +#define B43_LPPHY_FOURWIRE_ADDR B43_PHY_OFDM(0x0B) /* Fourwire Address */ +#define B43_LPPHY_FOURWIREDATAHI B43_PHY_OFDM(0x0C) /* FourwireDataHi */ +#define B43_LPPHY_FOURWIREDATALO B43_PHY_OFDM(0x0D) /* FourwireDataLo */ +#define B43_LPPHY_BISTSTAT0 B43_PHY_OFDM(0x0E) /* BistStatus0 */ +#define B43_LPPHY_BISTSTAT1 B43_PHY_OFDM(0x0F) /* BistStatus1 */ +#define B43_LPPHY_CRSGAIN_CTL B43_PHY_OFDM(0x10) /* crsgain Control */ +#define B43_LPPHY_OFDMPWR_THRESH0 B43_PHY_OFDM(0x11) /* ofdmPower Thresh0 */ +#define B43_LPPHY_OFDMPWR_THRESH1 B43_PHY_OFDM(0x12) /* ofdmPower Thresh1 */ +#define B43_LPPHY_OFDMPWR_THRESH2 B43_PHY_OFDM(0x13) /* ofdmPower Thresh2 */ +#define B43_LPPHY_DSSSPWR_THRESH0 B43_PHY_OFDM(0x14) /* dsssPower Thresh0 */ +#define B43_LPPHY_DSSSPWR_THRESH1 B43_PHY_OFDM(0x15) /* dsssPower Thresh1 */ +#define B43_LPPHY_MINPWR_LEVEL B43_PHY_OFDM(0x16) /* MinPower Level */ +#define B43_LPPHY_OFDMSYNCTHRESH0 B43_PHY_OFDM(0x17) /* ofdmSyncThresh0 */ +#define B43_LPPHY_OFDMSYNCTHRESH1 B43_PHY_OFDM(0x18) /* ofdmSyncThresh1 */ +#define B43_LPPHY_FINEFREQEST B43_PHY_OFDM(0x19) /* FineFreqEst */ +#define B43_LPPHY_IDLEAFTERPKTRXTO B43_PHY_OFDM(0x1A) /* IDLEafterPktRXTimeout */ +#define B43_LPPHY_LTRN_CTL B43_PHY_OFDM(0x1B) /* LTRN Control */ +#define B43_LPPHY_DCOFFSETTRANSIENT B43_PHY_OFDM(0x1C) /* DCOffsetTransient */ +#define B43_LPPHY_PREAMBLEINTO B43_PHY_OFDM(0x1D) /* PreambleInTimeout */ +#define B43_LPPHY_PREAMBLECONFIRMTO B43_PHY_OFDM(0x1E) /* PreambleConfirmTimeout */ +#define B43_LPPHY_CLIPTHRESH B43_PHY_OFDM(0x1F) /* ClipThresh */ +#define B43_LPPHY_CLIPCTRTHRESH B43_PHY_OFDM(0x20) /* ClipCtrThresh */ +#define B43_LPPHY_OFDMSYNCTIMER_CTL B43_PHY_OFDM(0x21) /* ofdmSyncTimer Control */ +#define B43_LPPHY_WAITFORPHYSELTO B43_PHY_OFDM(0x22) /* WaitforPHYSelTimeout */ +#define B43_LPPHY_HIGAINDB B43_PHY_OFDM(0x23) /* HiGainDB */ +#define B43_LPPHY_LOWGAINDB B43_PHY_OFDM(0x24) /* LowGainDB */ +#define B43_LPPHY_VERYLOWGAINDB B43_PHY_OFDM(0x25) /* VeryLowGainDB */ +#define B43_LPPHY_GAINMISMATCH B43_PHY_OFDM(0x26) /* gainMismatch */ +#define B43_LPPHY_GAINDIRECTMISMATCH B43_PHY_OFDM(0x27) /* gaindirectMismatch */ +#define B43_LPPHY_PWR_THRESH0 B43_PHY_OFDM(0x28) /* Power Thresh0 */ +#define B43_LPPHY_PWR_THRESH1 B43_PHY_OFDM(0x29) /* Power Thresh1 */ +#define B43_LPPHY_DETECTOR_DELAY_ADJUST B43_PHY_OFDM(0x2A) /* Detector Delay Adjust */ +#define B43_LPPHY_REDUCED_DETECTOR_DELAY B43_PHY_OFDM(0x2B) /* Reduced Detector Delay */ +#define B43_LPPHY_DATA_TO B43_PHY_OFDM(0x2C) /* data Timeout */ +#define B43_LPPHY_CORRELATOR_DIS_DELAY B43_PHY_OFDM(0x2D) /* correlator Dis Delay */ +#define B43_LPPHY_DIVERSITY_GAINBACK B43_PHY_OFDM(0x2E) /* Diversity GainBack */ +#define B43_LPPHY_DSSS_CONFIRM_CNT B43_PHY_OFDM(0x2F) /* DSSS Confirm Cnt */ +#define B43_LPPHY_DC_BLANK_INT B43_PHY_OFDM(0x30) /* DC Blank Interval */ +#define B43_LPPHY_GAIN_MISMATCH_LIMIT B43_PHY_OFDM(0x31) /* gain Mismatch Limit */ +#define B43_LPPHY_CRS_ED_THRESH B43_PHY_OFDM(0x32) /* crs ed thresh */ +#define B43_LPPHY_PHASE_SHIFT_CTL B43_PHY_OFDM(0x33) /* phase shift Control */ +#define B43_LPPHY_INPUT_PWRDB B43_PHY_OFDM(0x34) /* Input PowerDB */ +#define B43_LPPHY_OFDM_SYNC_CTL B43_PHY_OFDM(0x35) /* ofdm sync Control */ +#define B43_LPPHY_AFE_ADC_CTL_0 B43_PHY_OFDM(0x36) /* Afe ADC Control 0 */ +#define B43_LPPHY_AFE_ADC_CTL_1 B43_PHY_OFDM(0x37) /* Afe ADC Control 1 */ +#define B43_LPPHY_AFE_ADC_CTL_2 B43_PHY_OFDM(0x38) /* Afe ADC Control 2 */ +#define B43_LPPHY_AFE_DAC_CTL B43_PHY_OFDM(0x39) /* Afe DAC Control */ +#define B43_LPPHY_AFE_CTL B43_PHY_OFDM(0x3A) /* Afe Control */ +#define B43_LPPHY_AFE_CTL_OVR B43_PHY_OFDM(0x3B) /* Afe Control Ovr */ +#define B43_LPPHY_AFE_CTL_OVRVAL B43_PHY_OFDM(0x3C) /* Afe Control OvrVal */ +#define B43_LPPHY_AFE_RSSI_CTL_0 B43_PHY_OFDM(0x3D) /* Afe RSSI Control 0 */ +#define B43_LPPHY_AFE_RSSI_CTL_1 B43_PHY_OFDM(0x3E) /* Afe RSSI Control 1 */ +#define B43_LPPHY_AFE_RSSI_SEL B43_PHY_OFDM(0x3F) /* Afe RSSI Sel */ +#define B43_LPPHY_RADAR_THRESH B43_PHY_OFDM(0x40) /* Radar Thresh */ +#define B43_LPPHY_RADAR_BLANK_INT B43_PHY_OFDM(0x41) /* Radar blank Interval */ +#define B43_LPPHY_RADAR_MIN_FM_INT B43_PHY_OFDM(0x42) /* Radar min fm Interval */ +#define B43_LPPHY_RADAR_GAIN_TO B43_PHY_OFDM(0x43) /* Radar gain timeout */ +#define B43_LPPHY_RADAR_PULSE_TO B43_PHY_OFDM(0x44) /* Radar pulse timeout */ +#define B43_LPPHY_RADAR_DETECT_FM_CTL B43_PHY_OFDM(0x45) /* Radar detect FM Control */ +#define B43_LPPHY_RADAR_DETECT_EN B43_PHY_OFDM(0x46) /* Radar detect En */ +#define B43_LPPHY_RADAR_RD_DATA_REG B43_PHY_OFDM(0x47) /* Radar Rd Data Reg */ +#define B43_LPPHY_LP_PHY_CTL B43_PHY_OFDM(0x48) /* LP PHY Control */ +#define B43_LPPHY_CLASSIFIER_CTL B43_PHY_OFDM(0x49) /* classifier Control */ +#define B43_LPPHY_RESET_CTL B43_PHY_OFDM(0x4A) /* reset Control */ +#define B43_LPPHY_CLKEN_CTL B43_PHY_OFDM(0x4B) /* ClkEn Control */ +#define B43_LPPHY_RF_OVERRIDE_0 B43_PHY_OFDM(0x4C) /* RF Override 0 */ +#define B43_LPPHY_RF_OVERRIDE_VAL_0 B43_PHY_OFDM(0x4D) /* RF Override Val 0 */ +#define B43_LPPHY_TR_LOOKUP_1 B43_PHY_OFDM(0x4E) /* TR Lookup 1 */ +#define B43_LPPHY_TR_LOOKUP_2 B43_PHY_OFDM(0x4F) /* TR Lookup 2 */ +#define B43_LPPHY_RSSISELLOOKUP1 B43_PHY_OFDM(0x50) /* RssiSelLookup1 */ +#define B43_LPPHY_IQLO_CAL_CMD B43_PHY_OFDM(0x51) /* iqlo Cal Cmd */ +#define B43_LPPHY_IQLO_CAL_CMD_N_NUM B43_PHY_OFDM(0x52) /* iqlo Cal Cmd N num */ +#define B43_LPPHY_IQLO_CAL_CMD_G_CTL B43_PHY_OFDM(0x53) /* iqlo Cal Cmd G control */ +#define B43_LPPHY_MACINT_DBG_REGISTER B43_PHY_OFDM(0x54) /* macint Debug Register */ +#define B43_LPPHY_TABLE_ADDR B43_PHY_OFDM(0x55) /* Table Address */ +#define B43_LPPHY_TABLEDATALO B43_PHY_OFDM(0x56) /* TabledataLo */ +#define B43_LPPHY_TABLEDATAHI B43_PHY_OFDM(0x57) /* TabledataHi */ +#define B43_LPPHY_PHY_CRS_ENABLE_ADDR B43_PHY_OFDM(0x58) /* phy CRS Enable Address */ +#define B43_LPPHY_IDLETIME_CTL B43_PHY_OFDM(0x59) /* Idletime Control */ +#define B43_LPPHY_IDLETIME_CRS_ON_LO B43_PHY_OFDM(0x5A) /* Idletime CRS On Lo */ +#define B43_LPPHY_IDLETIME_CRS_ON_HI B43_PHY_OFDM(0x5B) /* Idletime CRS On Hi */ +#define B43_LPPHY_IDLETIME_MEAS_TIME_LO B43_PHY_OFDM(0x5C) /* Idletime Meas Time Lo */ +#define B43_LPPHY_IDLETIME_MEAS_TIME_HI B43_PHY_OFDM(0x5D) /* Idletime Meas Time Hi */ +#define B43_LPPHY_RESET_LEN_OFDM_TX_ADDR B43_PHY_OFDM(0x5E) /* Reset len Ofdm TX Address */ +#define B43_LPPHY_RESET_LEN_OFDM_RX_ADDR B43_PHY_OFDM(0x5F) /* Reset len Ofdm RX Address */ +#define B43_LPPHY_REG_CRS_ENABLE B43_PHY_OFDM(0x60) /* reg crs enable */ +#define B43_LPPHY_PLCP_TMT_STR0_CTR_MIN B43_PHY_OFDM(0x61) /* PLCP Tmt Str0 Ctr Min */ +#define B43_LPPHY_PKT_FSM_RESET_LEN_VAL B43_PHY_OFDM(0x62) /* Pkt fsm Reset Len Value */ +#define B43_LPPHY_READSYM2RESET_CTL B43_PHY_OFDM(0x63) /* readsym2reset Control */ +#define B43_LPPHY_DC_FILTER_DELAY1 B43_PHY_OFDM(0x64) /* Dc filter delay1 */ +#define B43_LPPHY_PACKET_RX_ACTIVE_TO B43_PHY_OFDM(0x65) /* packet rx Active timeout */ +#define B43_LPPHY_ED_TOVAL B43_PHY_OFDM(0x66) /* ed timeoutValue */ +#define B43_LPPHY_HOLD_CRS_ON_VAL B43_PHY_OFDM(0x67) /* hold CRS On Value */ +#define B43_LPPHY_OFDM_TX_PHY_CRS_DELAY_VAL B43_PHY_OFDM(0x69) /* ofdm tx phy CRS Delay Value */ +#define B43_LPPHY_CCK_TX_PHY_CRS_DELAY_VAL B43_PHY_OFDM(0x6A) /* cck tx phy CRS Delay Value */ +#define B43_LPPHY_ED_ON_CONFIRM_TIMER_VAL B43_PHY_OFDM(0x6B) /* Ed on confirm Timer Value */ +#define B43_LPPHY_ED_OFFSET_CONFIRM_TIMER_VAL B43_PHY_OFDM(0x6C) /* Ed offset confirm Timer Value */ +#define B43_LPPHY_PHY_CRS_OFFSET_TIMER_VAL B43_PHY_OFDM(0x6D) /* phy CRS offset Timer Value */ +#define B43_LPPHY_ADC_COMPENSATION_CTL B43_PHY_OFDM(0x70) /* ADC Compensation Control */ +#define B43_LPPHY_LOG2_RBPSK_ADDR B43_PHY_OFDM(0x71) /* log2 RBPSK Address */ +#define B43_LPPHY_LOG2_RQPSK_ADDR B43_PHY_OFDM(0x72) /* log2 RQPSK Address */ +#define B43_LPPHY_LOG2_R16QAM_ADDR B43_PHY_OFDM(0x73) /* log2 R16QAM Address */ +#define B43_LPPHY_LOG2_R64QAM_ADDR B43_PHY_OFDM(0x74) /* log2 R64QAM Address */ +#define B43_LPPHY_OFFSET_BPSK_ADDR B43_PHY_OFDM(0x75) /* offset BPSK Address */ +#define B43_LPPHY_OFFSET_QPSK_ADDR B43_PHY_OFDM(0x76) /* offset QPSK Address */ +#define B43_LPPHY_OFFSET_16QAM_ADDR B43_PHY_OFDM(0x77) /* offset 16QAM Address */ +#define B43_LPPHY_OFFSET_64QAM_ADDR B43_PHY_OFDM(0x78) /* offset 64QAM Address */ +#define B43_LPPHY_ALPHA1 B43_PHY_OFDM(0x79) /* Alpha1 */ +#define B43_LPPHY_ALPHA2 B43_PHY_OFDM(0x7A) /* Alpha2 */ +#define B43_LPPHY_BETA1 B43_PHY_OFDM(0x7B) /* Beta1 */ +#define B43_LPPHY_BETA2 B43_PHY_OFDM(0x7C) /* Beta2 */ +#define B43_LPPHY_LOOP_NUM_ADDR B43_PHY_OFDM(0x7D) /* Loop Num Address */ +#define B43_LPPHY_STR_COLLMAX_SMPL_ADDR B43_PHY_OFDM(0x7E) /* Str Collmax Sample Address */ +#define B43_LPPHY_MAX_SMPL_COARSE_FINE_ADDR B43_PHY_OFDM(0x7F) /* Max Sample Coarse/Fine Address */ +#define B43_LPPHY_MAX_SMPL_COARSE_STR0CTR_ADDR B43_PHY_OFDM(0x80) /* Max Sample Coarse/Str0Ctr Address */ +#define B43_LPPHY_IQ_ENABLE_WAIT_TIME_ADDR B43_PHY_OFDM(0x81) /* IQ Enable Wait Time Address */ +#define B43_LPPHY_IQ_NUM_SMPLS_ADDR B43_PHY_OFDM(0x82) /* IQ Num Samples Address */ +#define B43_LPPHY_IQ_ACC_HI_ADDR B43_PHY_OFDM(0x83) /* IQ Acc Hi Address */ +#define B43_LPPHY_IQ_ACC_LO_ADDR B43_PHY_OFDM(0x84) /* IQ Acc Lo Address */ +#define B43_LPPHY_IQ_I_PWR_ACC_HI_ADDR B43_PHY_OFDM(0x85) /* IQ I PWR Acc Hi Address */ +#define B43_LPPHY_IQ_I_PWR_ACC_LO_ADDR B43_PHY_OFDM(0x86) /* IQ I PWR Acc Lo Address */ +#define B43_LPPHY_IQ_Q_PWR_ACC_HI_ADDR B43_PHY_OFDM(0x87) /* IQ Q PWR Acc Hi Address */ +#define B43_LPPHY_IQ_Q_PWR_ACC_LO_ADDR B43_PHY_OFDM(0x88) /* IQ Q PWR Acc Lo Address */ +#define B43_LPPHY_MAXNUMSTEPS B43_PHY_OFDM(0x89) /* MaxNumsteps */ +#define B43_LPPHY_ROTORPHASE_ADDR B43_PHY_OFDM(0x8A) /* RotorPhase Address */ +#define B43_LPPHY_ADVANCEDRETARDROTOR_ADDR B43_PHY_OFDM(0x8B) /* AdvancedRetardRotor Address */ +#define B43_LPPHY_RSSIADCDELAY_CTL_ADDR B43_PHY_OFDM(0x8D) /* rssiAdcdelay Control Address */ +#define B43_LPPHY_TSSISTAT_ADDR B43_PHY_OFDM(0x8E) /* tssiStatus Address */ +#define B43_LPPHY_TEMPSENSESTAT_ADDR B43_PHY_OFDM(0x8F) /* tempsenseStatus Address */ +#define B43_LPPHY_TEMPSENSE_CTL_ADDR B43_PHY_OFDM(0x90) /* tempsense Control Address */ +#define B43_LPPHY_WRSSISTAT_ADDR B43_PHY_OFDM(0x91) /* wrssistatus Address */ +#define B43_LPPHY_MUFACTORADDR B43_PHY_OFDM(0x92) /* mufactoraddr */ +#define B43_LPPHY_SCRAMSTATE_ADDR B43_PHY_OFDM(0x93) /* scramstate Address */ +#define B43_LPPHY_TXHOLDOFFADDR B43_PHY_OFDM(0x94) /* txholdoffaddr */ +#define B43_LPPHY_PKTGAINVAL_ADDR B43_PHY_OFDM(0x95) /* pktgainval Address */ +#define B43_LPPHY_COARSEESTIM_ADDR B43_PHY_OFDM(0x96) /* Coarseestim Address */ +#define B43_LPPHY_STATE_TRANSITION_ADDR B43_PHY_OFDM(0x97) /* state Transition Address */ +#define B43_LPPHY_TRN_OFFSET_ADDR B43_PHY_OFDM(0x98) /* TRN offset Address */ +#define B43_LPPHY_NUM_ROTOR_ADDR B43_PHY_OFDM(0x99) /* Num Rotor Address */ +#define B43_LPPHY_VITERBI_OFFSET_ADDR B43_PHY_OFDM(0x9A) /* Viterbi Offset Address */ +#define B43_LPPHY_SMPL_COLLECT_WAIT_ADDR B43_PHY_OFDM(0x9B) /* Sample collect wait Address */ +#define B43_LPPHY_A_PHY_CTL_ADDR B43_PHY_OFDM(0x9C) /* A PHY Control Address */ +#define B43_LPPHY_NUM_PASS_THROUGH_ADDR B43_PHY_OFDM(0x9D) /* Num Pass Through Address */ +#define B43_LPPHY_RX_COMP_COEFF_S B43_PHY_OFDM(0x9E) /* RX Comp coefficient(s) */ +#define B43_LPPHY_CPAROTATEVAL B43_PHY_OFDM(0x9F) /* cpaRotateValue */ +#define B43_LPPHY_SMPL_PLAY_COUNT B43_PHY_OFDM(0xA0) /* Sample play count */ +#define B43_LPPHY_SMPL_PLAY_BUFFER_CTL B43_PHY_OFDM(0xA1) /* Sample play Buffer Control */ +#define B43_LPPHY_FOURWIRE_CTL B43_PHY_OFDM(0xA2) /* fourwire Control */ +#define B43_LPPHY_CPA_TAILCOUNT_VAL B43_PHY_OFDM(0xA3) /* CPA TailCount Value */ +#define B43_LPPHY_TX_PWR_CTL_CMD B43_PHY_OFDM(0xA4) /* TX Power Control Cmd */ +#define B43_LPPHY_TX_PWR_CTL_CMD_MODE 0xE000 /* TX power control mode mask */ +#define B43_LPPHY_TX_PWR_CTL_CMD_MODE_OFF 0x0000 /* TX power control is OFF */ +#define B43_LPPHY_TX_PWR_CTL_CMD_MODE_SW 0x8000 /* TX power control is SOFTWARE */ +#define B43_LPPHY_TX_PWR_CTL_CMD_MODE_HW 0xE000 /* TX power control is HARDWARE */ +#define B43_LPPHY_TX_PWR_CTL_NNUM B43_PHY_OFDM(0xA5) /* TX Power Control Nnum */ +#define B43_LPPHY_TX_PWR_CTL_IDLETSSI B43_PHY_OFDM(0xA6) /* TX Power Control IdleTssi */ +#define B43_LPPHY_TX_PWR_CTL_TARGETPWR B43_PHY_OFDM(0xA7) /* TX Power Control TargetPower */ +#define B43_LPPHY_TX_PWR_CTL_DELTAPWR_LIMIT B43_PHY_OFDM(0xA8) /* TX Power Control DeltaPower Limit */ +#define B43_LPPHY_TX_PWR_CTL_BASEINDEX B43_PHY_OFDM(0xA9) /* TX Power Control BaseIndex */ +#define B43_LPPHY_TX_PWR_CTL_PWR_INDEX B43_PHY_OFDM(0xAA) /* TX Power Control Power Index */ +#define B43_LPPHY_TX_PWR_CTL_STAT B43_PHY_OFDM(0xAB) /* TX Power Control Status */ +#define B43_LPPHY_LP_RF_SIGNAL_LUT B43_PHY_OFDM(0xAC) /* LP RF signal LUT */ +#define B43_LPPHY_RX_RADIO_CTL_FILTER_STATE B43_PHY_OFDM(0xAD) /* RX Radio Control Filter State */ +#define B43_LPPHY_RX_RADIO_CTL B43_PHY_OFDM(0xAE) /* RX Radio Control */ +#define B43_LPPHY_NRSSI_STAT_ADDR B43_PHY_OFDM(0xAF) /* NRSSI status Address */ +#define B43_LPPHY_RF_OVERRIDE_2 B43_PHY_OFDM(0xB0) /* RF override 2 */ +#define B43_LPPHY_RF_OVERRIDE_2_VAL B43_PHY_OFDM(0xB1) /* RF override 2 val */ +#define B43_LPPHY_PS_CTL_OVERRIDE_VAL0 B43_PHY_OFDM(0xB2) /* PS Control override val0 */ +#define B43_LPPHY_PS_CTL_OVERRIDE_VAL1 B43_PHY_OFDM(0xB3) /* PS Control override val1 */ +#define B43_LPPHY_PS_CTL_OVERRIDE_VAL2 B43_PHY_OFDM(0xB4) /* PS Control override val2 */ +#define B43_LPPHY_TX_GAIN_CTL_OVERRIDE_VAL B43_PHY_OFDM(0xB5) /* TX gain Control override val */ +#define B43_LPPHY_RX_GAIN_CTL_OVERRIDE_VAL B43_PHY_OFDM(0xB6) /* RX gain Control override val */ +#define B43_LPPHY_AFE_DDFS B43_PHY_OFDM(0xB7) /* AFE DDFS */ +#define B43_LPPHY_AFE_DDFS_POINTER_INIT B43_PHY_OFDM(0xB8) /* AFE DDFS pointer init */ +#define B43_LPPHY_AFE_DDFS_INCR_INIT B43_PHY_OFDM(0xB9) /* AFE DDFS incr init */ +#define B43_LPPHY_MRCNOISEREDUCTION B43_PHY_OFDM(0xBA) /* mrcNoiseReduction */ +#define B43_LPPHY_TR_LOOKUP_3 B43_PHY_OFDM(0xBB) /* TR Lookup 3 */ +#define B43_LPPHY_TR_LOOKUP_4 B43_PHY_OFDM(0xBC) /* TR Lookup 4 */ +#define B43_LPPHY_RADAR_FIFO_STAT B43_PHY_OFDM(0xBD) /* Radar FIFO Status */ +#define B43_LPPHY_GPIO_OUTEN B43_PHY_OFDM(0xBE) /* GPIO Out enable */ +#define B43_LPPHY_GPIO_SELECT B43_PHY_OFDM(0xBF) /* GPIO Select */ +#define B43_LPPHY_GPIO_OUT B43_PHY_OFDM(0xC0) /* GPIO Out */ +#define B43_LPPHY_4C3 B43_PHY_OFDM(0xC3) /* unknown, used during BB init */ +#define B43_LPPHY_4C4 B43_PHY_OFDM(0xC4) /* unknown, used during BB init */ +#define B43_LPPHY_4C5 B43_PHY_OFDM(0xC5) /* unknown, used during BB init */ +#define B43_LPPHY_TR_LOOKUP_5 B43_PHY_OFDM(0xC7) /* TR Lookup 5 */ +#define B43_LPPHY_TR_LOOKUP_6 B43_PHY_OFDM(0xC8) /* TR Lookup 6 */ +#define B43_LPPHY_TR_LOOKUP_7 B43_PHY_OFDM(0xC9) /* TR Lookup 7 */ +#define B43_LPPHY_TR_LOOKUP_8 B43_PHY_OFDM(0xCA) /* TR Lookup 8 */ +#define B43_LPPHY_RF_PWR_OVERRIDE B43_PHY_OFDM(0xD3) /* RF power override */ + + + +/* Radio register access decorators. */ +#define B43_LP_RADIO(radio_reg) (radio_reg) +#define B43_LP_NORTH(radio_reg) B43_LP_RADIO(radio_reg) +#define B43_LP_SOUTH(radio_reg) B43_LP_RADIO((radio_reg) | 0x4000) + + +/*** Broadcom 2062 NORTH radio registers ***/ +#define B2062_N_COMM1 B43_LP_NORTH(0x000) /* Common 01 (north) */ +#define B2062_N_COMM2 B43_LP_NORTH(0x002) /* Common 02 (north) */ +#define B2062_N_COMM3 B43_LP_NORTH(0x003) /* Common 03 (north) */ +#define B2062_N_COMM4 B43_LP_NORTH(0x004) /* Common 04 (north) */ +#define B2062_N_COMM5 B43_LP_NORTH(0x005) /* Common 05 (north) */ +#define B2062_N_COMM6 B43_LP_NORTH(0x006) /* Common 06 (north) */ +#define B2062_N_COMM7 B43_LP_NORTH(0x007) /* Common 07 (north) */ +#define B2062_N_COMM8 B43_LP_NORTH(0x008) /* Common 08 (north) */ +#define B2062_N_COMM9 B43_LP_NORTH(0x009) /* Common 09 (north) */ +#define B2062_N_COMM10 B43_LP_NORTH(0x00A) /* Common 10 (north) */ +#define B2062_N_COMM11 B43_LP_NORTH(0x00B) /* Common 11 (north) */ +#define B2062_N_COMM12 B43_LP_NORTH(0x00C) /* Common 12 (north) */ +#define B2062_N_COMM13 B43_LP_NORTH(0x00D) /* Common 13 (north) */ +#define B2062_N_COMM14 B43_LP_NORTH(0x00E) /* Common 14 (north) */ +#define B2062_N_COMM15 B43_LP_NORTH(0x00F) /* Common 15 (north) */ +#define B2062_N_PDN_CTL0 B43_LP_NORTH(0x010) /* PDN Control 0 (north) */ +#define B2062_N_PDN_CTL1 B43_LP_NORTH(0x011) /* PDN Control 1 (north) */ +#define B2062_N_PDN_CTL2 B43_LP_NORTH(0x012) /* PDN Control 2 (north) */ +#define B2062_N_PDN_CTL3 B43_LP_NORTH(0x013) /* PDN Control 3 (north) */ +#define B2062_N_PDN_CTL4 B43_LP_NORTH(0x014) /* PDN Control 4 (north) */ +#define B2062_N_GEN_CTL0 B43_LP_NORTH(0x015) /* GEN Control 0 (north) */ +#define B2062_N_IQ_CALIB B43_LP_NORTH(0x016) /* IQ Calibration (north) */ +#define B2062_N_LGENC B43_LP_NORTH(0x017) /* LGENC (north) */ +#define B2062_N_LGENA_LPF B43_LP_NORTH(0x018) /* LGENA LPF (north) */ +#define B2062_N_LGENA_BIAS0 B43_LP_NORTH(0x019) /* LGENA Bias 0 (north) */ +#define B2062_N_LGNEA_BIAS1 B43_LP_NORTH(0x01A) /* LGNEA Bias 1 (north) */ +#define B2062_N_LGENA_CTL0 B43_LP_NORTH(0x01B) /* LGENA Control 0 (north) */ +#define B2062_N_LGENA_CTL1 B43_LP_NORTH(0x01C) /* LGENA Control 1 (north) */ +#define B2062_N_LGENA_CTL2 B43_LP_NORTH(0x01D) /* LGENA Control 2 (north) */ +#define B2062_N_LGENA_TUNE0 B43_LP_NORTH(0x01E) /* LGENA Tune 0 (north) */ +#define B2062_N_LGENA_TUNE1 B43_LP_NORTH(0x01F) /* LGENA Tune 1 (north) */ +#define B2062_N_LGENA_TUNE2 B43_LP_NORTH(0x020) /* LGENA Tune 2 (north) */ +#define B2062_N_LGENA_TUNE3 B43_LP_NORTH(0x021) /* LGENA Tune 3 (north) */ +#define B2062_N_LGENA_CTL3 B43_LP_NORTH(0x022) /* LGENA Control 3 (north) */ +#define B2062_N_LGENA_CTL4 B43_LP_NORTH(0x023) /* LGENA Control 4 (north) */ +#define B2062_N_LGENA_CTL5 B43_LP_NORTH(0x024) /* LGENA Control 5 (north) */ +#define B2062_N_LGENA_CTL6 B43_LP_NORTH(0x025) /* LGENA Control 6 (north) */ +#define B2062_N_LGENA_CTL7 B43_LP_NORTH(0x026) /* LGENA Control 7 (north) */ +#define B2062_N_RXA_CTL0 B43_LP_NORTH(0x027) /* RXA Control 0 (north) */ +#define B2062_N_RXA_CTL1 B43_LP_NORTH(0x028) /* RXA Control 1 (north) */ +#define B2062_N_RXA_CTL2 B43_LP_NORTH(0x029) /* RXA Control 2 (north) */ +#define B2062_N_RXA_CTL3 B43_LP_NORTH(0x02A) /* RXA Control 3 (north) */ +#define B2062_N_RXA_CTL4 B43_LP_NORTH(0x02B) /* RXA Control 4 (north) */ +#define B2062_N_RXA_CTL5 B43_LP_NORTH(0x02C) /* RXA Control 5 (north) */ +#define B2062_N_RXA_CTL6 B43_LP_NORTH(0x02D) /* RXA Control 6 (north) */ +#define B2062_N_RXA_CTL7 B43_LP_NORTH(0x02E) /* RXA Control 7 (north) */ +#define B2062_N_RXBB_CTL0 B43_LP_NORTH(0x02F) /* RXBB Control 0 (north) */ +#define B2062_N_RXBB_CTL1 B43_LP_NORTH(0x030) /* RXBB Control 1 (north) */ +#define B2062_N_RXBB_CTL2 B43_LP_NORTH(0x031) /* RXBB Control 2 (north) */ +#define B2062_N_RXBB_GAIN0 B43_LP_NORTH(0x032) /* RXBB Gain 0 (north) */ +#define B2062_N_RXBB_GAIN1 B43_LP_NORTH(0x033) /* RXBB Gain 1 (north) */ +#define B2062_N_RXBB_GAIN2 B43_LP_NORTH(0x034) /* RXBB Gain 2 (north) */ +#define B2062_N_RXBB_GAIN3 B43_LP_NORTH(0x035) /* RXBB Gain 3 (north) */ +#define B2062_N_RXBB_RSSI0 B43_LP_NORTH(0x036) /* RXBB RSSI 0 (north) */ +#define B2062_N_RXBB_RSSI1 B43_LP_NORTH(0x037) /* RXBB RSSI 1 (north) */ +#define B2062_N_RXBB_CALIB0 B43_LP_NORTH(0x038) /* RXBB Calibration0 (north) */ +#define B2062_N_RXBB_CALIB1 B43_LP_NORTH(0x039) /* RXBB Calibration1 (north) */ +#define B2062_N_RXBB_CALIB2 B43_LP_NORTH(0x03A) /* RXBB Calibration2 (north) */ +#define B2062_N_RXBB_BIAS0 B43_LP_NORTH(0x03B) /* RXBB Bias 0 (north) */ +#define B2062_N_RXBB_BIAS1 B43_LP_NORTH(0x03C) /* RXBB Bias 1 (north) */ +#define B2062_N_RXBB_BIAS2 B43_LP_NORTH(0x03D) /* RXBB Bias 2 (north) */ +#define B2062_N_RXBB_BIAS3 B43_LP_NORTH(0x03E) /* RXBB Bias 3 (north) */ +#define B2062_N_RXBB_BIAS4 B43_LP_NORTH(0x03F) /* RXBB Bias 4 (north) */ +#define B2062_N_RXBB_BIAS5 B43_LP_NORTH(0x040) /* RXBB Bias 5 (north) */ +#define B2062_N_RXBB_RSSI2 B43_LP_NORTH(0x041) /* RXBB RSSI 2 (north) */ +#define B2062_N_RXBB_RSSI3 B43_LP_NORTH(0x042) /* RXBB RSSI 3 (north) */ +#define B2062_N_RXBB_RSSI4 B43_LP_NORTH(0x043) /* RXBB RSSI 4 (north) */ +#define B2062_N_RXBB_RSSI5 B43_LP_NORTH(0x044) /* RXBB RSSI 5 (north) */ +#define B2062_N_TX_CTL0 B43_LP_NORTH(0x045) /* TX Control 0 (north) */ +#define B2062_N_TX_CTL1 B43_LP_NORTH(0x046) /* TX Control 1 (north) */ +#define B2062_N_TX_CTL2 B43_LP_NORTH(0x047) /* TX Control 2 (north) */ +#define B2062_N_TX_CTL3 B43_LP_NORTH(0x048) /* TX Control 3 (north) */ +#define B2062_N_TX_CTL4 B43_LP_NORTH(0x049) /* TX Control 4 (north) */ +#define B2062_N_TX_CTL5 B43_LP_NORTH(0x04A) /* TX Control 5 (north) */ +#define B2062_N_TX_CTL6 B43_LP_NORTH(0x04B) /* TX Control 6 (north) */ +#define B2062_N_TX_CTL7 B43_LP_NORTH(0x04C) /* TX Control 7 (north) */ +#define B2062_N_TX_CTL8 B43_LP_NORTH(0x04D) /* TX Control 8 (north) */ +#define B2062_N_TX_CTL9 B43_LP_NORTH(0x04E) /* TX Control 9 (north) */ +#define B2062_N_TX_CTL_A B43_LP_NORTH(0x04F) /* TX Control A (north) */ +#define B2062_N_TX_GC2G B43_LP_NORTH(0x050) /* TX GC2G (north) */ +#define B2062_N_TX_GC5G B43_LP_NORTH(0x051) /* TX GC5G (north) */ +#define B2062_N_TX_TUNE B43_LP_NORTH(0x052) /* TX Tune (north) */ +#define B2062_N_TX_PAD B43_LP_NORTH(0x053) /* TX PAD (north) */ +#define B2062_N_TX_PGA B43_LP_NORTH(0x054) /* TX PGA (north) */ +#define B2062_N_TX_PADAUX B43_LP_NORTH(0x055) /* TX PADAUX (north) */ +#define B2062_N_TX_PGAAUX B43_LP_NORTH(0x056) /* TX PGAAUX (north) */ +#define B2062_N_TSSI_CTL0 B43_LP_NORTH(0x057) /* TSSI Control 0 (north) */ +#define B2062_N_TSSI_CTL1 B43_LP_NORTH(0x058) /* TSSI Control 1 (north) */ +#define B2062_N_TSSI_CTL2 B43_LP_NORTH(0x059) /* TSSI Control 2 (north) */ +#define B2062_N_IQ_CALIB_CTL0 B43_LP_NORTH(0x05A) /* IQ Calibration Control 0 (north) */ +#define B2062_N_IQ_CALIB_CTL1 B43_LP_NORTH(0x05B) /* IQ Calibration Control 1 (north) */ +#define B2062_N_IQ_CALIB_CTL2 B43_LP_NORTH(0x05C) /* IQ Calibration Control 2 (north) */ +#define B2062_N_CALIB_TS B43_LP_NORTH(0x05D) /* Calibration TS (north) */ +#define B2062_N_CALIB_CTL0 B43_LP_NORTH(0x05E) /* Calibration Control 0 (north) */ +#define B2062_N_CALIB_CTL1 B43_LP_NORTH(0x05F) /* Calibration Control 1 (north) */ +#define B2062_N_CALIB_CTL2 B43_LP_NORTH(0x060) /* Calibration Control 2 (north) */ +#define B2062_N_CALIB_CTL3 B43_LP_NORTH(0x061) /* Calibration Control 3 (north) */ +#define B2062_N_CALIB_CTL4 B43_LP_NORTH(0x062) /* Calibration Control 4 (north) */ +#define B2062_N_CALIB_DBG0 B43_LP_NORTH(0x063) /* Calibration Debug 0 (north) */ +#define B2062_N_CALIB_DBG1 B43_LP_NORTH(0x064) /* Calibration Debug 1 (north) */ +#define B2062_N_CALIB_DBG2 B43_LP_NORTH(0x065) /* Calibration Debug 2 (north) */ +#define B2062_N_CALIB_DBG3 B43_LP_NORTH(0x066) /* Calibration Debug 3 (north) */ +#define B2062_N_PSENSE_CTL0 B43_LP_NORTH(0x069) /* PSENSE Control 0 (north) */ +#define B2062_N_PSENSE_CTL1 B43_LP_NORTH(0x06A) /* PSENSE Control 1 (north) */ +#define B2062_N_PSENSE_CTL2 B43_LP_NORTH(0x06B) /* PSENSE Control 2 (north) */ +#define B2062_N_TEST_BUF0 B43_LP_NORTH(0x06C) /* TEST BUF0 (north) */ + +/*** Broadcom 2062 SOUTH radio registers ***/ +#define B2062_S_COMM1 B43_LP_SOUTH(0x000) /* Common 01 (south) */ +#define B2062_S_RADIO_ID_CODE B43_LP_SOUTH(0x001) /* Radio ID code (south) */ +#define B2062_S_COMM2 B43_LP_SOUTH(0x002) /* Common 02 (south) */ +#define B2062_S_COMM3 B43_LP_SOUTH(0x003) /* Common 03 (south) */ +#define B2062_S_COMM4 B43_LP_SOUTH(0x004) /* Common 04 (south) */ +#define B2062_S_COMM5 B43_LP_SOUTH(0x005) /* Common 05 (south) */ +#define B2062_S_COMM6 B43_LP_SOUTH(0x006) /* Common 06 (south) */ +#define B2062_S_COMM7 B43_LP_SOUTH(0x007) /* Common 07 (south) */ +#define B2062_S_COMM8 B43_LP_SOUTH(0x008) /* Common 08 (south) */ +#define B2062_S_COMM9 B43_LP_SOUTH(0x009) /* Common 09 (south) */ +#define B2062_S_COMM10 B43_LP_SOUTH(0x00A) /* Common 10 (south) */ +#define B2062_S_COMM11 B43_LP_SOUTH(0x00B) /* Common 11 (south) */ +#define B2062_S_COMM12 B43_LP_SOUTH(0x00C) /* Common 12 (south) */ +#define B2062_S_COMM13 B43_LP_SOUTH(0x00D) /* Common 13 (south) */ +#define B2062_S_COMM14 B43_LP_SOUTH(0x00E) /* Common 14 (south) */ +#define B2062_S_COMM15 B43_LP_SOUTH(0x00F) /* Common 15 (south) */ +#define B2062_S_PDS_CTL0 B43_LP_SOUTH(0x010) /* PDS Control 0 (south) */ +#define B2062_S_PDS_CTL1 B43_LP_SOUTH(0x011) /* PDS Control 1 (south) */ +#define B2062_S_PDS_CTL2 B43_LP_SOUTH(0x012) /* PDS Control 2 (south) */ +#define B2062_S_PDS_CTL3 B43_LP_SOUTH(0x013) /* PDS Control 3 (south) */ +#define B2062_S_BG_CTL0 B43_LP_SOUTH(0x014) /* BG Control 0 (south) */ +#define B2062_S_BG_CTL1 B43_LP_SOUTH(0x015) /* BG Control 1 (south) */ +#define B2062_S_BG_CTL2 B43_LP_SOUTH(0x016) /* BG Control 2 (south) */ +#define B2062_S_LGENG_CTL0 B43_LP_SOUTH(0x017) /* LGENG Control 00 (south) */ +#define B2062_S_LGENG_CTL1 B43_LP_SOUTH(0x018) /* LGENG Control 01 (south) */ +#define B2062_S_LGENG_CTL2 B43_LP_SOUTH(0x019) /* LGENG Control 02 (south) */ +#define B2062_S_LGENG_CTL3 B43_LP_SOUTH(0x01A) /* LGENG Control 03 (south) */ +#define B2062_S_LGENG_CTL4 B43_LP_SOUTH(0x01B) /* LGENG Control 04 (south) */ +#define B2062_S_LGENG_CTL5 B43_LP_SOUTH(0x01C) /* LGENG Control 05 (south) */ +#define B2062_S_LGENG_CTL6 B43_LP_SOUTH(0x01D) /* LGENG Control 06 (south) */ +#define B2062_S_LGENG_CTL7 B43_LP_SOUTH(0x01E) /* LGENG Control 07 (south) */ +#define B2062_S_LGENG_CTL8 B43_LP_SOUTH(0x01F) /* LGENG Control 08 (south) */ +#define B2062_S_LGENG_CTL9 B43_LP_SOUTH(0x020) /* LGENG Control 09 (south) */ +#define B2062_S_LGENG_CTL10 B43_LP_SOUTH(0x021) /* LGENG Control 10 (south) */ +#define B2062_S_LGENG_CTL11 B43_LP_SOUTH(0x022) /* LGENG Control 11 (south) */ +#define B2062_S_REFPLL_CTL0 B43_LP_SOUTH(0x023) /* REFPLL Control 00 (south) */ +#define B2062_S_REFPLL_CTL1 B43_LP_SOUTH(0x024) /* REFPLL Control 01 (south) */ +#define B2062_S_REFPLL_CTL2 B43_LP_SOUTH(0x025) /* REFPLL Control 02 (south) */ +#define B2062_S_REFPLL_CTL3 B43_LP_SOUTH(0x026) /* REFPLL Control 03 (south) */ +#define B2062_S_REFPLL_CTL4 B43_LP_SOUTH(0x027) /* REFPLL Control 04 (south) */ +#define B2062_S_REFPLL_CTL5 B43_LP_SOUTH(0x028) /* REFPLL Control 05 (south) */ +#define B2062_S_REFPLL_CTL6 B43_LP_SOUTH(0x029) /* REFPLL Control 06 (south) */ +#define B2062_S_REFPLL_CTL7 B43_LP_SOUTH(0x02A) /* REFPLL Control 07 (south) */ +#define B2062_S_REFPLL_CTL8 B43_LP_SOUTH(0x02B) /* REFPLL Control 08 (south) */ +#define B2062_S_REFPLL_CTL9 B43_LP_SOUTH(0x02C) /* REFPLL Control 09 (south) */ +#define B2062_S_REFPLL_CTL10 B43_LP_SOUTH(0x02D) /* REFPLL Control 10 (south) */ +#define B2062_S_REFPLL_CTL11 B43_LP_SOUTH(0x02E) /* REFPLL Control 11 (south) */ +#define B2062_S_REFPLL_CTL12 B43_LP_SOUTH(0x02F) /* REFPLL Control 12 (south) */ +#define B2062_S_REFPLL_CTL13 B43_LP_SOUTH(0x030) /* REFPLL Control 13 (south) */ +#define B2062_S_REFPLL_CTL14 B43_LP_SOUTH(0x031) /* REFPLL Control 14 (south) */ +#define B2062_S_REFPLL_CTL15 B43_LP_SOUTH(0x032) /* REFPLL Control 15 (south) */ +#define B2062_S_REFPLL_CTL16 B43_LP_SOUTH(0x033) /* REFPLL Control 16 (south) */ +#define B2062_S_RFPLL_CTL0 B43_LP_SOUTH(0x034) /* RFPLL Control 00 (south) */ +#define B2062_S_RFPLL_CTL1 B43_LP_SOUTH(0x035) /* RFPLL Control 01 (south) */ +#define B2062_S_RFPLL_CTL2 B43_LP_SOUTH(0x036) /* RFPLL Control 02 (south) */ +#define B2062_S_RFPLL_CTL3 B43_LP_SOUTH(0x037) /* RFPLL Control 03 (south) */ +#define B2062_S_RFPLL_CTL4 B43_LP_SOUTH(0x038) /* RFPLL Control 04 (south) */ +#define B2062_S_RFPLL_CTL5 B43_LP_SOUTH(0x039) /* RFPLL Control 05 (south) */ +#define B2062_S_RFPLL_CTL6 B43_LP_SOUTH(0x03A) /* RFPLL Control 06 (south) */ +#define B2062_S_RFPLL_CTL7 B43_LP_SOUTH(0x03B) /* RFPLL Control 07 (south) */ +#define B2062_S_RFPLL_CTL8 B43_LP_SOUTH(0x03C) /* RFPLL Control 08 (south) */ +#define B2062_S_RFPLL_CTL9 B43_LP_SOUTH(0x03D) /* RFPLL Control 09 (south) */ +#define B2062_S_RFPLL_CTL10 B43_LP_SOUTH(0x03E) /* RFPLL Control 10 (south) */ +#define B2062_S_RFPLL_CTL11 B43_LP_SOUTH(0x03F) /* RFPLL Control 11 (south) */ +#define B2062_S_RFPLL_CTL12 B43_LP_SOUTH(0x040) /* RFPLL Control 12 (south) */ +#define B2062_S_RFPLL_CTL13 B43_LP_SOUTH(0x041) /* RFPLL Control 13 (south) */ +#define B2062_S_RFPLL_CTL14 B43_LP_SOUTH(0x042) /* RFPLL Control 14 (south) */ +#define B2062_S_RFPLL_CTL15 B43_LP_SOUTH(0x043) /* RFPLL Control 15 (south) */ +#define B2062_S_RFPLL_CTL16 B43_LP_SOUTH(0x044) /* RFPLL Control 16 (south) */ +#define B2062_S_RFPLL_CTL17 B43_LP_SOUTH(0x045) /* RFPLL Control 17 (south) */ +#define B2062_S_RFPLL_CTL18 B43_LP_SOUTH(0x046) /* RFPLL Control 18 (south) */ +#define B2062_S_RFPLL_CTL19 B43_LP_SOUTH(0x047) /* RFPLL Control 19 (south) */ +#define B2062_S_RFPLL_CTL20 B43_LP_SOUTH(0x048) /* RFPLL Control 20 (south) */ +#define B2062_S_RFPLL_CTL21 B43_LP_SOUTH(0x049) /* RFPLL Control 21 (south) */ +#define B2062_S_RFPLL_CTL22 B43_LP_SOUTH(0x04A) /* RFPLL Control 22 (south) */ +#define B2062_S_RFPLL_CTL23 B43_LP_SOUTH(0x04B) /* RFPLL Control 23 (south) */ +#define B2062_S_RFPLL_CTL24 B43_LP_SOUTH(0x04C) /* RFPLL Control 24 (south) */ +#define B2062_S_RFPLL_CTL25 B43_LP_SOUTH(0x04D) /* RFPLL Control 25 (south) */ +#define B2062_S_RFPLL_CTL26 B43_LP_SOUTH(0x04E) /* RFPLL Control 26 (south) */ +#define B2062_S_RFPLL_CTL27 B43_LP_SOUTH(0x04F) /* RFPLL Control 27 (south) */ +#define B2062_S_RFPLL_CTL28 B43_LP_SOUTH(0x050) /* RFPLL Control 28 (south) */ +#define B2062_S_RFPLL_CTL29 B43_LP_SOUTH(0x051) /* RFPLL Control 29 (south) */ +#define B2062_S_RFPLL_CTL30 B43_LP_SOUTH(0x052) /* RFPLL Control 30 (south) */ +#define B2062_S_RFPLL_CTL31 B43_LP_SOUTH(0x053) /* RFPLL Control 31 (south) */ +#define B2062_S_RFPLL_CTL32 B43_LP_SOUTH(0x054) /* RFPLL Control 32 (south) */ +#define B2062_S_RFPLL_CTL33 B43_LP_SOUTH(0x055) /* RFPLL Control 33 (south) */ +#define B2062_S_RFPLL_CTL34 B43_LP_SOUTH(0x056) /* RFPLL Control 34 (south) */ +#define B2062_S_RXG_CNT0 B43_LP_SOUTH(0x057) /* RXG Counter 00 (south) */ +#define B2062_S_RXG_CNT1 B43_LP_SOUTH(0x058) /* RXG Counter 01 (south) */ +#define B2062_S_RXG_CNT2 B43_LP_SOUTH(0x059) /* RXG Counter 02 (south) */ +#define B2062_S_RXG_CNT3 B43_LP_SOUTH(0x05A) /* RXG Counter 03 (south) */ +#define B2062_S_RXG_CNT4 B43_LP_SOUTH(0x05B) /* RXG Counter 04 (south) */ +#define B2062_S_RXG_CNT5 B43_LP_SOUTH(0x05C) /* RXG Counter 05 (south) */ +#define B2062_S_RXG_CNT6 B43_LP_SOUTH(0x05D) /* RXG Counter 06 (south) */ +#define B2062_S_RXG_CNT7 B43_LP_SOUTH(0x05E) /* RXG Counter 07 (south) */ +#define B2062_S_RXG_CNT8 B43_LP_SOUTH(0x05F) /* RXG Counter 08 (south) */ +#define B2062_S_RXG_CNT9 B43_LP_SOUTH(0x060) /* RXG Counter 09 (south) */ +#define B2062_S_RXG_CNT10 B43_LP_SOUTH(0x061) /* RXG Counter 10 (south) */ +#define B2062_S_RXG_CNT11 B43_LP_SOUTH(0x062) /* RXG Counter 11 (south) */ +#define B2062_S_RXG_CNT12 B43_LP_SOUTH(0x063) /* RXG Counter 12 (south) */ +#define B2062_S_RXG_CNT13 B43_LP_SOUTH(0x064) /* RXG Counter 13 (south) */ +#define B2062_S_RXG_CNT14 B43_LP_SOUTH(0x065) /* RXG Counter 14 (south) */ +#define B2062_S_RXG_CNT15 B43_LP_SOUTH(0x066) /* RXG Counter 15 (south) */ +#define B2062_S_RXG_CNT16 B43_LP_SOUTH(0x067) /* RXG Counter 16 (south) */ +#define B2062_S_RXG_CNT17 B43_LP_SOUTH(0x068) /* RXG Counter 17 (south) */ + + + +/*** Broadcom 2063 radio registers ***/ +#define B2063_RADIO_ID_CODE B43_LP_RADIO(0x001) /* Radio ID code */ +#define B2063_COMM1 B43_LP_RADIO(0x000) /* Common 01 */ +#define B2063_COMM2 B43_LP_RADIO(0x002) /* Common 02 */ +#define B2063_COMM3 B43_LP_RADIO(0x003) /* Common 03 */ +#define B2063_COMM4 B43_LP_RADIO(0x004) /* Common 04 */ +#define B2063_COMM5 B43_LP_RADIO(0x005) /* Common 05 */ +#define B2063_COMM6 B43_LP_RADIO(0x006) /* Common 06 */ +#define B2063_COMM7 B43_LP_RADIO(0x007) /* Common 07 */ +#define B2063_COMM8 B43_LP_RADIO(0x008) /* Common 08 */ +#define B2063_COMM9 B43_LP_RADIO(0x009) /* Common 09 */ +#define B2063_COMM10 B43_LP_RADIO(0x00A) /* Common 10 */ +#define B2063_COMM11 B43_LP_RADIO(0x00B) /* Common 11 */ +#define B2063_COMM12 B43_LP_RADIO(0x00C) /* Common 12 */ +#define B2063_COMM13 B43_LP_RADIO(0x00D) /* Common 13 */ +#define B2063_COMM14 B43_LP_RADIO(0x00E) /* Common 14 */ +#define B2063_COMM15 B43_LP_RADIO(0x00F) /* Common 15 */ +#define B2063_COMM16 B43_LP_RADIO(0x010) /* Common 16 */ +#define B2063_COMM17 B43_LP_RADIO(0x011) /* Common 17 */ +#define B2063_COMM18 B43_LP_RADIO(0x012) /* Common 18 */ +#define B2063_COMM19 B43_LP_RADIO(0x013) /* Common 19 */ +#define B2063_COMM20 B43_LP_RADIO(0x014) /* Common 20 */ +#define B2063_COMM21 B43_LP_RADIO(0x015) /* Common 21 */ +#define B2063_COMM22 B43_LP_RADIO(0x016) /* Common 22 */ +#define B2063_COMM23 B43_LP_RADIO(0x017) /* Common 23 */ +#define B2063_COMM24 B43_LP_RADIO(0x018) /* Common 24 */ +#define B2063_PWR_SWITCH_CTL B43_LP_RADIO(0x019) /* POWER SWITCH Control */ +#define B2063_PLL_SP1 B43_LP_RADIO(0x01A) /* PLL SP 1 */ +#define B2063_PLL_SP2 B43_LP_RADIO(0x01B) /* PLL SP 2 */ +#define B2063_LOGEN_SP1 B43_LP_RADIO(0x01C) /* LOGEN SP 1 */ +#define B2063_LOGEN_SP2 B43_LP_RADIO(0x01D) /* LOGEN SP 2 */ +#define B2063_LOGEN_SP3 B43_LP_RADIO(0x01E) /* LOGEN SP 3 */ +#define B2063_LOGEN_SP4 B43_LP_RADIO(0x01F) /* LOGEN SP 4 */ +#define B2063_LOGEN_SP5 B43_LP_RADIO(0x020) /* LOGEN SP 5 */ +#define B2063_G_RX_SP1 B43_LP_RADIO(0x021) /* G RX SP 1 */ +#define B2063_G_RX_SP2 B43_LP_RADIO(0x022) /* G RX SP 2 */ +#define B2063_G_RX_SP3 B43_LP_RADIO(0x023) /* G RX SP 3 */ +#define B2063_G_RX_SP4 B43_LP_RADIO(0x024) /* G RX SP 4 */ +#define B2063_G_RX_SP5 B43_LP_RADIO(0x025) /* G RX SP 5 */ +#define B2063_G_RX_SP6 B43_LP_RADIO(0x026) /* G RX SP 6 */ +#define B2063_G_RX_SP7 B43_LP_RADIO(0x027) /* G RX SP 7 */ +#define B2063_G_RX_SP8 B43_LP_RADIO(0x028) /* G RX SP 8 */ +#define B2063_G_RX_SP9 B43_LP_RADIO(0x029) /* G RX SP 9 */ +#define B2063_G_RX_SP10 B43_LP_RADIO(0x02A) /* G RX SP 10 */ +#define B2063_G_RX_SP11 B43_LP_RADIO(0x02B) /* G RX SP 11 */ +#define B2063_A_RX_SP1 B43_LP_RADIO(0x02C) /* A RX SP 1 */ +#define B2063_A_RX_SP2 B43_LP_RADIO(0x02D) /* A RX SP 2 */ +#define B2063_A_RX_SP3 B43_LP_RADIO(0x02E) /* A RX SP 3 */ +#define B2063_A_RX_SP4 B43_LP_RADIO(0x02F) /* A RX SP 4 */ +#define B2063_A_RX_SP5 B43_LP_RADIO(0x030) /* A RX SP 5 */ +#define B2063_A_RX_SP6 B43_LP_RADIO(0x031) /* A RX SP 6 */ +#define B2063_A_RX_SP7 B43_LP_RADIO(0x032) /* A RX SP 7 */ +#define B2063_RX_BB_SP1 B43_LP_RADIO(0x033) /* RX BB SP 1 */ +#define B2063_RX_BB_SP2 B43_LP_RADIO(0x034) /* RX BB SP 2 */ +#define B2063_RX_BB_SP3 B43_LP_RADIO(0x035) /* RX BB SP 3 */ +#define B2063_RX_BB_SP4 B43_LP_RADIO(0x036) /* RX BB SP 4 */ +#define B2063_RX_BB_SP5 B43_LP_RADIO(0x037) /* RX BB SP 5 */ +#define B2063_RX_BB_SP6 B43_LP_RADIO(0x038) /* RX BB SP 6 */ +#define B2063_RX_BB_SP7 B43_LP_RADIO(0x039) /* RX BB SP 7 */ +#define B2063_RX_BB_SP8 B43_LP_RADIO(0x03A) /* RX BB SP 8 */ +#define B2063_TX_RF_SP1 B43_LP_RADIO(0x03B) /* TX RF SP 1 */ +#define B2063_TX_RF_SP2 B43_LP_RADIO(0x03C) /* TX RF SP 2 */ +#define B2063_TX_RF_SP3 B43_LP_RADIO(0x03D) /* TX RF SP 3 */ +#define B2063_TX_RF_SP4 B43_LP_RADIO(0x03E) /* TX RF SP 4 */ +#define B2063_TX_RF_SP5 B43_LP_RADIO(0x03F) /* TX RF SP 5 */ +#define B2063_TX_RF_SP6 B43_LP_RADIO(0x040) /* TX RF SP 6 */ +#define B2063_TX_RF_SP7 B43_LP_RADIO(0x041) /* TX RF SP 7 */ +#define B2063_TX_RF_SP8 B43_LP_RADIO(0x042) /* TX RF SP 8 */ +#define B2063_TX_RF_SP9 B43_LP_RADIO(0x043) /* TX RF SP 9 */ +#define B2063_TX_RF_SP10 B43_LP_RADIO(0x044) /* TX RF SP 10 */ +#define B2063_TX_RF_SP11 B43_LP_RADIO(0x045) /* TX RF SP 11 */ +#define B2063_TX_RF_SP12 B43_LP_RADIO(0x046) /* TX RF SP 12 */ +#define B2063_TX_RF_SP13 B43_LP_RADIO(0x047) /* TX RF SP 13 */ +#define B2063_TX_RF_SP14 B43_LP_RADIO(0x048) /* TX RF SP 14 */ +#define B2063_TX_RF_SP15 B43_LP_RADIO(0x049) /* TX RF SP 15 */ +#define B2063_TX_RF_SP16 B43_LP_RADIO(0x04A) /* TX RF SP 16 */ +#define B2063_TX_RF_SP17 B43_LP_RADIO(0x04B) /* TX RF SP 17 */ +#define B2063_PA_SP1 B43_LP_RADIO(0x04C) /* PA SP 1 */ +#define B2063_PA_SP2 B43_LP_RADIO(0x04D) /* PA SP 2 */ +#define B2063_PA_SP3 B43_LP_RADIO(0x04E) /* PA SP 3 */ +#define B2063_PA_SP4 B43_LP_RADIO(0x04F) /* PA SP 4 */ +#define B2063_PA_SP5 B43_LP_RADIO(0x050) /* PA SP 5 */ +#define B2063_PA_SP6 B43_LP_RADIO(0x051) /* PA SP 6 */ +#define B2063_PA_SP7 B43_LP_RADIO(0x052) /* PA SP 7 */ +#define B2063_TX_BB_SP1 B43_LP_RADIO(0x053) /* TX BB SP 1 */ +#define B2063_TX_BB_SP2 B43_LP_RADIO(0x054) /* TX BB SP 2 */ +#define B2063_TX_BB_SP3 B43_LP_RADIO(0x055) /* TX BB SP 3 */ +#define B2063_REG_SP1 B43_LP_RADIO(0x056) /* REG SP 1 */ +#define B2063_BANDGAP_CTL1 B43_LP_RADIO(0x057) /* BANDGAP Control 1 */ +#define B2063_BANDGAP_CTL2 B43_LP_RADIO(0x058) /* BANDGAP Control 2 */ +#define B2063_LPO_CTL1 B43_LP_RADIO(0x059) /* LPO Control 1 */ +#define B2063_RC_CALIB_CTL1 B43_LP_RADIO(0x05A) /* RC Calibration Control 1 */ +#define B2063_RC_CALIB_CTL2 B43_LP_RADIO(0x05B) /* RC Calibration Control 2 */ +#define B2063_RC_CALIB_CTL3 B43_LP_RADIO(0x05C) /* RC Calibration Control 3 */ +#define B2063_RC_CALIB_CTL4 B43_LP_RADIO(0x05D) /* RC Calibration Control 4 */ +#define B2063_RC_CALIB_CTL5 B43_LP_RADIO(0x05E) /* RC Calibration Control 5 */ +#define B2063_RC_CALIB_CTL6 B43_LP_RADIO(0x05F) /* RC Calibration Control 6 */ +#define B2063_RC_CALIB_CTL7 B43_LP_RADIO(0x060) /* RC Calibration Control 7 */ +#define B2063_RC_CALIB_CTL8 B43_LP_RADIO(0x061) /* RC Calibration Control 8 */ +#define B2063_RC_CALIB_CTL9 B43_LP_RADIO(0x062) /* RC Calibration Control 9 */ +#define B2063_RC_CALIB_CTL10 B43_LP_RADIO(0x063) /* RC Calibration Control 10 */ +#define B2063_PLL_JTAG_CALNRST B43_LP_RADIO(0x064) /* PLL JTAG CALNRST */ +#define B2063_PLL_JTAG_IN_PLL1 B43_LP_RADIO(0x065) /* PLL JTAG IN PLL 1 */ +#define B2063_PLL_JTAG_IN_PLL2 B43_LP_RADIO(0x066) /* PLL JTAG IN PLL 2 */ +#define B2063_PLL_JTAG_PLL_CP1 B43_LP_RADIO(0x067) /* PLL JTAG PLL CP 1 */ +#define B2063_PLL_JTAG_PLL_CP2 B43_LP_RADIO(0x068) /* PLL JTAG PLL CP 2 */ +#define B2063_PLL_JTAG_PLL_CP3 B43_LP_RADIO(0x069) /* PLL JTAG PLL CP 3 */ +#define B2063_PLL_JTAG_PLL_CP4 B43_LP_RADIO(0x06A) /* PLL JTAG PLL CP 4 */ +#define B2063_PLL_JTAG_PLL_CTL1 B43_LP_RADIO(0x06B) /* PLL JTAG PLL Control 1 */ +#define B2063_PLL_JTAG_PLL_LF1 B43_LP_RADIO(0x06C) /* PLL JTAG PLL LF 1 */ +#define B2063_PLL_JTAG_PLL_LF2 B43_LP_RADIO(0x06D) /* PLL JTAG PLL LF 2 */ +#define B2063_PLL_JTAG_PLL_LF3 B43_LP_RADIO(0x06E) /* PLL JTAG PLL LF 3 */ +#define B2063_PLL_JTAG_PLL_LF4 B43_LP_RADIO(0x06F) /* PLL JTAG PLL LF 4 */ +#define B2063_PLL_JTAG_PLL_SG1 B43_LP_RADIO(0x070) /* PLL JTAG PLL SG 1 */ +#define B2063_PLL_JTAG_PLL_SG2 B43_LP_RADIO(0x071) /* PLL JTAG PLL SG 2 */ +#define B2063_PLL_JTAG_PLL_SG3 B43_LP_RADIO(0x072) /* PLL JTAG PLL SG 3 */ +#define B2063_PLL_JTAG_PLL_SG4 B43_LP_RADIO(0x073) /* PLL JTAG PLL SG 4 */ +#define B2063_PLL_JTAG_PLL_SG5 B43_LP_RADIO(0x074) /* PLL JTAG PLL SG 5 */ +#define B2063_PLL_JTAG_PLL_VCO1 B43_LP_RADIO(0x075) /* PLL JTAG PLL VCO 1 */ +#define B2063_PLL_JTAG_PLL_VCO2 B43_LP_RADIO(0x076) /* PLL JTAG PLL VCO 2 */ +#define B2063_PLL_JTAG_PLL_VCO_CALIB1 B43_LP_RADIO(0x077) /* PLL JTAG PLL VCO Calibration 1 */ +#define B2063_PLL_JTAG_PLL_VCO_CALIB2 B43_LP_RADIO(0x078) /* PLL JTAG PLL VCO Calibration 2 */ +#define B2063_PLL_JTAG_PLL_VCO_CALIB3 B43_LP_RADIO(0x079) /* PLL JTAG PLL VCO Calibration 3 */ +#define B2063_PLL_JTAG_PLL_VCO_CALIB4 B43_LP_RADIO(0x07A) /* PLL JTAG PLL VCO Calibration 4 */ +#define B2063_PLL_JTAG_PLL_VCO_CALIB5 B43_LP_RADIO(0x07B) /* PLL JTAG PLL VCO Calibration 5 */ +#define B2063_PLL_JTAG_PLL_VCO_CALIB6 B43_LP_RADIO(0x07C) /* PLL JTAG PLL VCO Calibration 6 */ +#define B2063_PLL_JTAG_PLL_VCO_CALIB7 B43_LP_RADIO(0x07D) /* PLL JTAG PLL VCO Calibration 7 */ +#define B2063_PLL_JTAG_PLL_VCO_CALIB8 B43_LP_RADIO(0x07E) /* PLL JTAG PLL VCO Calibration 8 */ +#define B2063_PLL_JTAG_PLL_VCO_CALIB9 B43_LP_RADIO(0x07F) /* PLL JTAG PLL VCO Calibration 9 */ +#define B2063_PLL_JTAG_PLL_VCO_CALIB10 B43_LP_RADIO(0x080) /* PLL JTAG PLL VCO Calibration 10 */ +#define B2063_PLL_JTAG_PLL_XTAL_12 B43_LP_RADIO(0x081) /* PLL JTAG PLL XTAL 1 2 */ +#define B2063_PLL_JTAG_PLL_XTAL3 B43_LP_RADIO(0x082) /* PLL JTAG PLL XTAL 3 */ +#define B2063_LOGEN_ACL1 B43_LP_RADIO(0x083) /* LOGEN ACL 1 */ +#define B2063_LOGEN_ACL2 B43_LP_RADIO(0x084) /* LOGEN ACL 2 */ +#define B2063_LOGEN_ACL3 B43_LP_RADIO(0x085) /* LOGEN ACL 3 */ +#define B2063_LOGEN_ACL4 B43_LP_RADIO(0x086) /* LOGEN ACL 4 */ +#define B2063_LOGEN_ACL5 B43_LP_RADIO(0x087) /* LOGEN ACL 5 */ +#define B2063_LO_CALIB_INPUTS B43_LP_RADIO(0x088) /* LO Calibration INPUTS */ +#define B2063_LO_CALIB_CTL1 B43_LP_RADIO(0x089) /* LO Calibration Control 1 */ +#define B2063_LO_CALIB_CTL2 B43_LP_RADIO(0x08A) /* LO Calibration Control 2 */ +#define B2063_LO_CALIB_CTL3 B43_LP_RADIO(0x08B) /* LO Calibration Control 3 */ +#define B2063_LO_CALIB_WAITCNT B43_LP_RADIO(0x08C) /* LO Calibration WAITCNT */ +#define B2063_LO_CALIB_OVR1 B43_LP_RADIO(0x08D) /* LO Calibration OVR 1 */ +#define B2063_LO_CALIB_OVR2 B43_LP_RADIO(0x08E) /* LO Calibration OVR 2 */ +#define B2063_LO_CALIB_OVAL1 B43_LP_RADIO(0x08F) /* LO Calibration OVAL 1 */ +#define B2063_LO_CALIB_OVAL2 B43_LP_RADIO(0x090) /* LO Calibration OVAL 2 */ +#define B2063_LO_CALIB_OVAL3 B43_LP_RADIO(0x091) /* LO Calibration OVAL 3 */ +#define B2063_LO_CALIB_OVAL4 B43_LP_RADIO(0x092) /* LO Calibration OVAL 4 */ +#define B2063_LO_CALIB_OVAL5 B43_LP_RADIO(0x093) /* LO Calibration OVAL 5 */ +#define B2063_LO_CALIB_OVAL6 B43_LP_RADIO(0x094) /* LO Calibration OVAL 6 */ +#define B2063_LO_CALIB_OVAL7 B43_LP_RADIO(0x095) /* LO Calibration OVAL 7 */ +#define B2063_LO_CALIB_CALVLD1 B43_LP_RADIO(0x096) /* LO Calibration CALVLD 1 */ +#define B2063_LO_CALIB_CALVLD2 B43_LP_RADIO(0x097) /* LO Calibration CALVLD 2 */ +#define B2063_LO_CALIB_CVAL1 B43_LP_RADIO(0x098) /* LO Calibration CVAL 1 */ +#define B2063_LO_CALIB_CVAL2 B43_LP_RADIO(0x099) /* LO Calibration CVAL 2 */ +#define B2063_LO_CALIB_CVAL3 B43_LP_RADIO(0x09A) /* LO Calibration CVAL 3 */ +#define B2063_LO_CALIB_CVAL4 B43_LP_RADIO(0x09B) /* LO Calibration CVAL 4 */ +#define B2063_LO_CALIB_CVAL5 B43_LP_RADIO(0x09C) /* LO Calibration CVAL 5 */ +#define B2063_LO_CALIB_CVAL6 B43_LP_RADIO(0x09D) /* LO Calibration CVAL 6 */ +#define B2063_LO_CALIB_CVAL7 B43_LP_RADIO(0x09E) /* LO Calibration CVAL 7 */ +#define B2063_LOGEN_CALIB_EN B43_LP_RADIO(0x09F) /* LOGEN Calibration EN */ +#define B2063_LOGEN_PEAKDET1 B43_LP_RADIO(0x0A0) /* LOGEN PEAKDET 1 */ +#define B2063_LOGEN_RCCR1 B43_LP_RADIO(0x0A1) /* LOGEN RCCR 1 */ +#define B2063_LOGEN_VCOBUF1 B43_LP_RADIO(0x0A2) /* LOGEN VCOBUF 1 */ +#define B2063_LOGEN_MIXER1 B43_LP_RADIO(0x0A3) /* LOGEN MIXER 1 */ +#define B2063_LOGEN_MIXER2 B43_LP_RADIO(0x0A4) /* LOGEN MIXER 2 */ +#define B2063_LOGEN_BUF1 B43_LP_RADIO(0x0A5) /* LOGEN BUF 1 */ +#define B2063_LOGEN_BUF2 B43_LP_RADIO(0x0A6) /* LOGEN BUF 2 */ +#define B2063_LOGEN_DIV1 B43_LP_RADIO(0x0A7) /* LOGEN DIV 1 */ +#define B2063_LOGEN_DIV2 B43_LP_RADIO(0x0A8) /* LOGEN DIV 2 */ +#define B2063_LOGEN_DIV3 B43_LP_RADIO(0x0A9) /* LOGEN DIV 3 */ +#define B2063_LOGEN_CBUFRX1 B43_LP_RADIO(0x0AA) /* LOGEN CBUFRX 1 */ +#define B2063_LOGEN_CBUFRX2 B43_LP_RADIO(0x0AB) /* LOGEN CBUFRX 2 */ +#define B2063_LOGEN_CBUFTX1 B43_LP_RADIO(0x0AC) /* LOGEN CBUFTX 1 */ +#define B2063_LOGEN_CBUFTX2 B43_LP_RADIO(0x0AD) /* LOGEN CBUFTX 2 */ +#define B2063_LOGEN_IDAC1 B43_LP_RADIO(0x0AE) /* LOGEN IDAC 1 */ +#define B2063_LOGEN_SPARE1 B43_LP_RADIO(0x0AF) /* LOGEN SPARE 1 */ +#define B2063_LOGEN_SPARE2 B43_LP_RADIO(0x0B0) /* LOGEN SPARE 2 */ +#define B2063_LOGEN_SPARE3 B43_LP_RADIO(0x0B1) /* LOGEN SPARE 3 */ +#define B2063_G_RX_1ST1 B43_LP_RADIO(0x0B2) /* G RX 1ST 1 */ +#define B2063_G_RX_1ST2 B43_LP_RADIO(0x0B3) /* G RX 1ST 2 */ +#define B2063_G_RX_1ST3 B43_LP_RADIO(0x0B4) /* G RX 1ST 3 */ +#define B2063_G_RX_2ND1 B43_LP_RADIO(0x0B5) /* G RX 2ND 1 */ +#define B2063_G_RX_2ND2 B43_LP_RADIO(0x0B6) /* G RX 2ND 2 */ +#define B2063_G_RX_2ND3 B43_LP_RADIO(0x0B7) /* G RX 2ND 3 */ +#define B2063_G_RX_2ND4 B43_LP_RADIO(0x0B8) /* G RX 2ND 4 */ +#define B2063_G_RX_2ND5 B43_LP_RADIO(0x0B9) /* G RX 2ND 5 */ +#define B2063_G_RX_2ND6 B43_LP_RADIO(0x0BA) /* G RX 2ND 6 */ +#define B2063_G_RX_2ND7 B43_LP_RADIO(0x0BB) /* G RX 2ND 7 */ +#define B2063_G_RX_2ND8 B43_LP_RADIO(0x0BC) /* G RX 2ND 8 */ +#define B2063_G_RX_PS1 B43_LP_RADIO(0x0BD) /* G RX PS 1 */ +#define B2063_G_RX_PS2 B43_LP_RADIO(0x0BE) /* G RX PS 2 */ +#define B2063_G_RX_PS3 B43_LP_RADIO(0x0BF) /* G RX PS 3 */ +#define B2063_G_RX_PS4 B43_LP_RADIO(0x0C0) /* G RX PS 4 */ +#define B2063_G_RX_PS5 B43_LP_RADIO(0x0C1) /* G RX PS 5 */ +#define B2063_G_RX_MIX1 B43_LP_RADIO(0x0C2) /* G RX MIX 1 */ +#define B2063_G_RX_MIX2 B43_LP_RADIO(0x0C3) /* G RX MIX 2 */ +#define B2063_G_RX_MIX3 B43_LP_RADIO(0x0C4) /* G RX MIX 3 */ +#define B2063_G_RX_MIX4 B43_LP_RADIO(0x0C5) /* G RX MIX 4 */ +#define B2063_G_RX_MIX5 B43_LP_RADIO(0x0C6) /* G RX MIX 5 */ +#define B2063_G_RX_MIX6 B43_LP_RADIO(0x0C7) /* G RX MIX 6 */ +#define B2063_G_RX_MIX7 B43_LP_RADIO(0x0C8) /* G RX MIX 7 */ +#define B2063_G_RX_MIX8 B43_LP_RADIO(0x0C9) /* G RX MIX 8 */ +#define B2063_G_RX_PDET1 B43_LP_RADIO(0x0CA) /* G RX PDET 1 */ +#define B2063_G_RX_SPARES1 B43_LP_RADIO(0x0CB) /* G RX SPARES 1 */ +#define B2063_G_RX_SPARES2 B43_LP_RADIO(0x0CC) /* G RX SPARES 2 */ +#define B2063_G_RX_SPARES3 B43_LP_RADIO(0x0CD) /* G RX SPARES 3 */ +#define B2063_A_RX_1ST1 B43_LP_RADIO(0x0CE) /* A RX 1ST 1 */ +#define B2063_A_RX_1ST2 B43_LP_RADIO(0x0CF) /* A RX 1ST 2 */ +#define B2063_A_RX_1ST3 B43_LP_RADIO(0x0D0) /* A RX 1ST 3 */ +#define B2063_A_RX_1ST4 B43_LP_RADIO(0x0D1) /* A RX 1ST 4 */ +#define B2063_A_RX_1ST5 B43_LP_RADIO(0x0D2) /* A RX 1ST 5 */ +#define B2063_A_RX_2ND1 B43_LP_RADIO(0x0D3) /* A RX 2ND 1 */ +#define B2063_A_RX_2ND2 B43_LP_RADIO(0x0D4) /* A RX 2ND 2 */ +#define B2063_A_RX_2ND3 B43_LP_RADIO(0x0D5) /* A RX 2ND 3 */ +#define B2063_A_RX_2ND4 B43_LP_RADIO(0x0D6) /* A RX 2ND 4 */ +#define B2063_A_RX_2ND5 B43_LP_RADIO(0x0D7) /* A RX 2ND 5 */ +#define B2063_A_RX_2ND6 B43_LP_RADIO(0x0D8) /* A RX 2ND 6 */ +#define B2063_A_RX_2ND7 B43_LP_RADIO(0x0D9) /* A RX 2ND 7 */ +#define B2063_A_RX_PS1 B43_LP_RADIO(0x0DA) /* A RX PS 1 */ +#define B2063_A_RX_PS2 B43_LP_RADIO(0x0DB) /* A RX PS 2 */ +#define B2063_A_RX_PS3 B43_LP_RADIO(0x0DC) /* A RX PS 3 */ +#define B2063_A_RX_PS4 B43_LP_RADIO(0x0DD) /* A RX PS 4 */ +#define B2063_A_RX_PS5 B43_LP_RADIO(0x0DE) /* A RX PS 5 */ +#define B2063_A_RX_PS6 B43_LP_RADIO(0x0DF) /* A RX PS 6 */ +#define B2063_A_RX_MIX1 B43_LP_RADIO(0x0E0) /* A RX MIX 1 */ +#define B2063_A_RX_MIX2 B43_LP_RADIO(0x0E1) /* A RX MIX 2 */ +#define B2063_A_RX_MIX3 B43_LP_RADIO(0x0E2) /* A RX MIX 3 */ +#define B2063_A_RX_MIX4 B43_LP_RADIO(0x0E3) /* A RX MIX 4 */ +#define B2063_A_RX_MIX5 B43_LP_RADIO(0x0E4) /* A RX MIX 5 */ +#define B2063_A_RX_MIX6 B43_LP_RADIO(0x0E5) /* A RX MIX 6 */ +#define B2063_A_RX_MIX7 B43_LP_RADIO(0x0E6) /* A RX MIX 7 */ +#define B2063_A_RX_MIX8 B43_LP_RADIO(0x0E7) /* A RX MIX 8 */ +#define B2063_A_RX_PWRDET1 B43_LP_RADIO(0x0E8) /* A RX PWRDET 1 */ +#define B2063_A_RX_SPARE1 B43_LP_RADIO(0x0E9) /* A RX SPARE 1 */ +#define B2063_A_RX_SPARE2 B43_LP_RADIO(0x0EA) /* A RX SPARE 2 */ +#define B2063_A_RX_SPARE3 B43_LP_RADIO(0x0EB) /* A RX SPARE 3 */ +#define B2063_RX_TIA_CTL1 B43_LP_RADIO(0x0EC) /* RX TIA Control 1 */ +#define B2063_RX_TIA_CTL2 B43_LP_RADIO(0x0ED) /* RX TIA Control 2 */ +#define B2063_RX_TIA_CTL3 B43_LP_RADIO(0x0EE) /* RX TIA Control 3 */ +#define B2063_RX_TIA_CTL4 B43_LP_RADIO(0x0EF) /* RX TIA Control 4 */ +#define B2063_RX_TIA_CTL5 B43_LP_RADIO(0x0F0) /* RX TIA Control 5 */ +#define B2063_RX_TIA_CTL6 B43_LP_RADIO(0x0F1) /* RX TIA Control 6 */ +#define B2063_RX_BB_CTL1 B43_LP_RADIO(0x0F2) /* RX BB Control 1 */ +#define B2063_RX_BB_CTL2 B43_LP_RADIO(0x0F3) /* RX BB Control 2 */ +#define B2063_RX_BB_CTL3 B43_LP_RADIO(0x0F4) /* RX BB Control 3 */ +#define B2063_RX_BB_CTL4 B43_LP_RADIO(0x0F5) /* RX BB Control 4 */ +#define B2063_RX_BB_CTL5 B43_LP_RADIO(0x0F6) /* RX BB Control 5 */ +#define B2063_RX_BB_CTL6 B43_LP_RADIO(0x0F7) /* RX BB Control 6 */ +#define B2063_RX_BB_CTL7 B43_LP_RADIO(0x0F8) /* RX BB Control 7 */ +#define B2063_RX_BB_CTL8 B43_LP_RADIO(0x0F9) /* RX BB Control 8 */ +#define B2063_RX_BB_CTL9 B43_LP_RADIO(0x0FA) /* RX BB Control 9 */ +#define B2063_TX_RF_CTL1 B43_LP_RADIO(0x0FB) /* TX RF Control 1 */ +#define B2063_TX_RF_IDAC_LO_RF_I B43_LP_RADIO(0x0FC) /* TX RF IDAC LO RF I */ +#define B2063_TX_RF_IDAC_LO_RF_Q B43_LP_RADIO(0x0FD) /* TX RF IDAC LO RF Q */ +#define B2063_TX_RF_IDAC_LO_BB_I B43_LP_RADIO(0x0FE) /* TX RF IDAC LO BB I */ +#define B2063_TX_RF_IDAC_LO_BB_Q B43_LP_RADIO(0x0FF) /* TX RF IDAC LO BB Q */ +#define B2063_TX_RF_CTL2 B43_LP_RADIO(0x100) /* TX RF Control 2 */ +#define B2063_TX_RF_CTL3 B43_LP_RADIO(0x101) /* TX RF Control 3 */ +#define B2063_TX_RF_CTL4 B43_LP_RADIO(0x102) /* TX RF Control 4 */ +#define B2063_TX_RF_CTL5 B43_LP_RADIO(0x103) /* TX RF Control 5 */ +#define B2063_TX_RF_CTL6 B43_LP_RADIO(0x104) /* TX RF Control 6 */ +#define B2063_TX_RF_CTL7 B43_LP_RADIO(0x105) /* TX RF Control 7 */ +#define B2063_TX_RF_CTL8 B43_LP_RADIO(0x106) /* TX RF Control 8 */ +#define B2063_TX_RF_CTL9 B43_LP_RADIO(0x107) /* TX RF Control 9 */ +#define B2063_TX_RF_CTL10 B43_LP_RADIO(0x108) /* TX RF Control 10 */ +#define B2063_TX_RF_CTL14 B43_LP_RADIO(0x109) /* TX RF Control 14 */ +#define B2063_TX_RF_CTL15 B43_LP_RADIO(0x10A) /* TX RF Control 15 */ +#define B2063_PA_CTL1 B43_LP_RADIO(0x10B) /* PA Control 1 */ +#define B2063_PA_CTL2 B43_LP_RADIO(0x10C) /* PA Control 2 */ +#define B2063_PA_CTL3 B43_LP_RADIO(0x10D) /* PA Control 3 */ +#define B2063_PA_CTL4 B43_LP_RADIO(0x10E) /* PA Control 4 */ +#define B2063_PA_CTL5 B43_LP_RADIO(0x10F) /* PA Control 5 */ +#define B2063_PA_CTL6 B43_LP_RADIO(0x110) /* PA Control 6 */ +#define B2063_PA_CTL7 B43_LP_RADIO(0x111) /* PA Control 7 */ +#define B2063_PA_CTL8 B43_LP_RADIO(0x112) /* PA Control 8 */ +#define B2063_PA_CTL9 B43_LP_RADIO(0x113) /* PA Control 9 */ +#define B2063_PA_CTL10 B43_LP_RADIO(0x114) /* PA Control 10 */ +#define B2063_PA_CTL11 B43_LP_RADIO(0x115) /* PA Control 11 */ +#define B2063_PA_CTL12 B43_LP_RADIO(0x116) /* PA Control 12 */ +#define B2063_PA_CTL13 B43_LP_RADIO(0x117) /* PA Control 13 */ +#define B2063_TX_BB_CTL1 B43_LP_RADIO(0x118) /* TX BB Control 1 */ +#define B2063_TX_BB_CTL2 B43_LP_RADIO(0x119) /* TX BB Control 2 */ +#define B2063_TX_BB_CTL3 B43_LP_RADIO(0x11A) /* TX BB Control 3 */ +#define B2063_TX_BB_CTL4 B43_LP_RADIO(0x11B) /* TX BB Control 4 */ +#define B2063_GPIO_CTL1 B43_LP_RADIO(0x11C) /* GPIO Control 1 */ +#define B2063_VREG_CTL1 B43_LP_RADIO(0x11D) /* VREG Control 1 */ +#define B2063_AMUX_CTL1 B43_LP_RADIO(0x11E) /* AMUX Control 1 */ +#define B2063_IQ_CALIB_GVAR B43_LP_RADIO(0x11F) /* IQ Calibration GVAR */ +#define B2063_IQ_CALIB_CTL1 B43_LP_RADIO(0x120) /* IQ Calibration Control 1 */ +#define B2063_IQ_CALIB_CTL2 B43_LP_RADIO(0x121) /* IQ Calibration Control 2 */ +#define B2063_TEMPSENSE_CTL1 B43_LP_RADIO(0x122) /* TEMPSENSE Control 1 */ +#define B2063_TEMPSENSE_CTL2 B43_LP_RADIO(0x123) /* TEMPSENSE Control 2 */ +#define B2063_TX_RX_LOOPBACK1 B43_LP_RADIO(0x124) /* TX/RX LOOPBACK 1 */ +#define B2063_TX_RX_LOOPBACK2 B43_LP_RADIO(0x125) /* TX/RX LOOPBACK 2 */ +#define B2063_EXT_TSSI_CTL1 B43_LP_RADIO(0x126) /* EXT TSSI Control 1 */ +#define B2063_EXT_TSSI_CTL2 B43_LP_RADIO(0x127) /* EXT TSSI Control 2 */ +#define B2063_AFE_CTL B43_LP_RADIO(0x128) /* AFE Control */ + + + +enum b43_lpphy_txpctl_mode { + B43_LPPHY_TXPCTL_UNKNOWN = 0, + B43_LPPHY_TXPCTL_OFF, /* TX power control is OFF */ + B43_LPPHY_TXPCTL_SW, /* TX power control is set to Software */ + B43_LPPHY_TXPCTL_HW, /* TX power control is set to Hardware */ +}; + +struct b43_phy_lp { + /* Current TX power control mode. */ + enum b43_lpphy_txpctl_mode txpctl_mode; + + /* Transmit isolation medium band */ + u8 tx_isolation_med_band; + /* Transmit isolation low band */ + u8 tx_isolation_low_band; + /* Transmit isolation high band */ + u8 tx_isolation_hi_band; + + /* Max transmit power medium band */ + u16 max_tx_pwr_med_band; + /* Max transmit power low band */ + u16 max_tx_pwr_low_band; + /* Max transmit power high band */ + u16 max_tx_pwr_hi_band; + + /* FIXME What are these used for? */ + /* FIXME Is 15 the correct array size? */ + u16 tx_max_rate[15]; + u16 tx_max_ratel[15]; + u16 tx_max_rateh[15]; + + /* Transmit power arrays */ + s16 txpa[3], txpal[3], txpah[3]; + + /* Receive power offset */ + u8 rx_pwr_offset; + + /* TSSI transmit count */ + u16 tssi_tx_count; + /* TSSI index */ + u16 tssi_idx; /* FIXME initial value? */ + /* TSSI npt */ + u16 tssi_npt; /* FIXME initial value? */ + + /* Target TX frequency */ + u16 tgt_tx_freq; /* FIXME initial value? */ + + /* Transmit power index override */ + s8 tx_pwr_idx_over; /* FIXME initial value? */ + + /* RSSI vf */ + u8 rssi_vf; + /* RSSI vc */ + u8 rssi_vc; + /* RSSI gs */ + u8 rssi_gs; + + /* RC cap */ + u8 rc_cap; + /* BX arch */ + u8 bx_arch; + + /* Full calibration channel */ + u8 full_calib_chan; + + /* Transmit iqlocal best coeffs */ + bool tx_iqloc_best_coeffs_valid; + u8 tx_iqloc_best_coeffs[11]; + + /* Used for "Save/Restore Dig Filt State" */ + u16 dig_flt_state[9]; + + bool crs_usr_disable, crs_sys_disable; + + unsigned int pdiv; + + /* The channel we are tuned to */ + u8 channel; + + /* The active antenna diversity mode */ + int antenna; + + /* Frequency of the active TX tone */ + int tx_tone_freq; +}; + +enum tssi_mux_mode { + TSSI_MUX_PREPA, + TSSI_MUX_POSTPA, + TSSI_MUX_EXT, +}; + +struct b43_phy_operations; +extern const struct b43_phy_operations b43_phyops_lp; + +#endif /* LINUX_B43_PHY_LP_H_ */ diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c new file mode 100644 index 0000000..795bb1e --- /dev/null +++ b/drivers/net/wireless/b43/phy_n.c @@ -0,0 +1,3292 @@ +/* + + Broadcom B43 wireless driver + IEEE 802.11n PHY support + + Copyright (c) 2008 Michael Buesch + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include +#include + +#include "b43.h" +#include "phy_n.h" +#include "tables_nphy.h" +#include "main.h" + +struct nphy_txgains { + u16 txgm[2]; + u16 pga[2]; + u16 pad[2]; + u16 ipa[2]; +}; + +struct nphy_iqcal_params { + u16 txgm; + u16 pga; + u16 pad; + u16 ipa; + u16 cal_gain; + u16 ncorr[5]; +}; + +struct nphy_iq_est { + s32 iq0_prod; + u32 i0_pwr; + u32 q0_pwr; + s32 iq1_prod; + u32 i1_pwr; + u32 q1_pwr; +}; + +enum b43_nphy_rf_sequence { + B43_RFSEQ_RX2TX, + B43_RFSEQ_TX2RX, + B43_RFSEQ_RESET2RX, + B43_RFSEQ_UPDATE_GAINH, + B43_RFSEQ_UPDATE_GAINL, + B43_RFSEQ_UPDATE_GAINU, +}; + +static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd, + u8 *events, u8 *delays, u8 length); +static void b43_nphy_force_rf_sequence(struct b43_wldev *dev, + enum b43_nphy_rf_sequence seq); +static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field, + u16 value, u8 core, bool off); +static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field, + u16 value, u8 core); + +void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna) +{//TODO +} + +static void b43_nphy_op_adjust_txpower(struct b43_wldev *dev) +{//TODO +} + +static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev, + bool ignore_tssi) +{//TODO + return B43_TXPWR_RES_DONE; +} + +static void b43_chantab_radio_upload(struct b43_wldev *dev, + const struct b43_nphy_channeltab_entry *e) +{ + b43_radio_write16(dev, B2055_PLL_REF, e->radio_pll_ref); + b43_radio_write16(dev, B2055_RF_PLLMOD0, e->radio_rf_pllmod0); + b43_radio_write16(dev, B2055_RF_PLLMOD1, e->radio_rf_pllmod1); + b43_radio_write16(dev, B2055_VCO_CAPTAIL, e->radio_vco_captail); + b43_radio_write16(dev, B2055_VCO_CAL1, e->radio_vco_cal1); + b43_radio_write16(dev, B2055_VCO_CAL2, e->radio_vco_cal2); + b43_radio_write16(dev, B2055_PLL_LFC1, e->radio_pll_lfc1); + b43_radio_write16(dev, B2055_PLL_LFR1, e->radio_pll_lfr1); + b43_radio_write16(dev, B2055_PLL_LFC2, e->radio_pll_lfc2); + b43_radio_write16(dev, B2055_LGBUF_CENBUF, e->radio_lgbuf_cenbuf); + b43_radio_write16(dev, B2055_LGEN_TUNE1, e->radio_lgen_tune1); + b43_radio_write16(dev, B2055_LGEN_TUNE2, e->radio_lgen_tune2); + b43_radio_write16(dev, B2055_C1_LGBUF_ATUNE, e->radio_c1_lgbuf_atune); + b43_radio_write16(dev, B2055_C1_LGBUF_GTUNE, e->radio_c1_lgbuf_gtune); + b43_radio_write16(dev, B2055_C1_RX_RFR1, e->radio_c1_rx_rfr1); + b43_radio_write16(dev, B2055_C1_TX_PGAPADTN, e->radio_c1_tx_pgapadtn); + b43_radio_write16(dev, B2055_C1_TX_MXBGTRIM, e->radio_c1_tx_mxbgtrim); + b43_radio_write16(dev, B2055_C2_LGBUF_ATUNE, e->radio_c2_lgbuf_atune); + b43_radio_write16(dev, B2055_C2_LGBUF_GTUNE, e->radio_c2_lgbuf_gtune); + b43_radio_write16(dev, B2055_C2_RX_RFR1, e->radio_c2_rx_rfr1); + b43_radio_write16(dev, B2055_C2_TX_PGAPADTN, e->radio_c2_tx_pgapadtn); + b43_radio_write16(dev, B2055_C2_TX_MXBGTRIM, e->radio_c2_tx_mxbgtrim); +} + +static void b43_chantab_phy_upload(struct b43_wldev *dev, + const struct b43_nphy_channeltab_entry *e) +{ + b43_phy_write(dev, B43_NPHY_BW1A, e->phy_bw1a); + b43_phy_write(dev, B43_NPHY_BW2, e->phy_bw2); + b43_phy_write(dev, B43_NPHY_BW3, e->phy_bw3); + b43_phy_write(dev, B43_NPHY_BW4, e->phy_bw4); + b43_phy_write(dev, B43_NPHY_BW5, e->phy_bw5); + b43_phy_write(dev, B43_NPHY_BW6, e->phy_bw6); +} + +static void b43_nphy_tx_power_fix(struct b43_wldev *dev) +{ + //TODO +} + +/* Tune the hardware to a new channel. */ +static int nphy_channel_switch(struct b43_wldev *dev, unsigned int channel) +{ + const struct b43_nphy_channeltab_entry *tabent; + + tabent = b43_nphy_get_chantabent(dev, channel); + if (!tabent) + return -ESRCH; + + //FIXME enable/disable band select upper20 in RXCTL + if (0 /*FIXME 5Ghz*/) + b43_radio_maskset(dev, B2055_MASTER1, 0xFF8F, 0x20); + else + b43_radio_maskset(dev, B2055_MASTER1, 0xFF8F, 0x50); + b43_chantab_radio_upload(dev, tabent); + udelay(50); + b43_radio_write16(dev, B2055_VCO_CAL10, 5); + b43_radio_write16(dev, B2055_VCO_CAL10, 45); + b43_radio_write16(dev, B2055_VCO_CAL10, 65); + udelay(300); + if (0 /*FIXME 5Ghz*/) + b43_phy_set(dev, B43_NPHY_BANDCTL, B43_NPHY_BANDCTL_5GHZ); + else + b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ); + b43_chantab_phy_upload(dev, tabent); + b43_nphy_tx_power_fix(dev); + + return 0; +} + +static void b43_radio_init2055_pre(struct b43_wldev *dev) +{ + b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, + ~B43_NPHY_RFCTL_CMD_PORFORCE); + b43_phy_set(dev, B43_NPHY_RFCTL_CMD, + B43_NPHY_RFCTL_CMD_CHIP0PU | + B43_NPHY_RFCTL_CMD_OEPORFORCE); + b43_phy_set(dev, B43_NPHY_RFCTL_CMD, + B43_NPHY_RFCTL_CMD_PORFORCE); +} + +static void b43_radio_init2055_post(struct b43_wldev *dev) +{ + struct ssb_sprom *sprom = &(dev->dev->bus->sprom); + struct ssb_boardinfo *binfo = &(dev->dev->bus->boardinfo); + int i; + u16 val; + + b43_radio_mask(dev, B2055_MASTER1, 0xFFF3); + msleep(1); + if ((sprom->revision != 4) || + !(sprom->boardflags_hi & B43_BFH_RSSIINV)) { + if ((binfo->vendor != PCI_VENDOR_ID_BROADCOM) || + (binfo->type != 0x46D) || + (binfo->rev < 0x41)) { + b43_radio_mask(dev, B2055_C1_RX_BB_REG, 0x7F); + b43_radio_mask(dev, B2055_C1_RX_BB_REG, 0x7F); + msleep(1); + } + } + b43_radio_maskset(dev, B2055_RRCCAL_NOPTSEL, 0x3F, 0x2C); + msleep(1); + b43_radio_write16(dev, B2055_CAL_MISC, 0x3C); + msleep(1); + b43_radio_mask(dev, B2055_CAL_MISC, 0xFFBE); + msleep(1); + b43_radio_set(dev, B2055_CAL_LPOCTL, 0x80); + msleep(1); + b43_radio_set(dev, B2055_CAL_MISC, 0x1); + msleep(1); + b43_radio_set(dev, B2055_CAL_MISC, 0x40); + msleep(1); + for (i = 0; i < 100; i++) { + val = b43_radio_read16(dev, B2055_CAL_COUT2); + if (val & 0x80) + break; + udelay(10); + } + msleep(1); + b43_radio_mask(dev, B2055_CAL_LPOCTL, 0xFF7F); + msleep(1); + nphy_channel_switch(dev, dev->phy.channel); + b43_radio_write16(dev, B2055_C1_RX_BB_LPF, 0x9); + b43_radio_write16(dev, B2055_C2_RX_BB_LPF, 0x9); + b43_radio_write16(dev, B2055_C1_RX_BB_MIDACHP, 0x83); + b43_radio_write16(dev, B2055_C2_RX_BB_MIDACHP, 0x83); +} + +/* Initialize a Broadcom 2055 N-radio */ +static void b43_radio_init2055(struct b43_wldev *dev) +{ + b43_radio_init2055_pre(dev); + if (b43_status(dev) < B43_STAT_INITIALIZED) + b2055_upload_inittab(dev, 0, 1); + else + b2055_upload_inittab(dev, 0/*FIXME on 5ghz band*/, 0); + b43_radio_init2055_post(dev); +} + +void b43_nphy_radio_turn_on(struct b43_wldev *dev) +{ + b43_radio_init2055(dev); +} + +void b43_nphy_radio_turn_off(struct b43_wldev *dev) +{ + b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, + ~B43_NPHY_RFCTL_CMD_EN); +} + +/* + * Upload the N-PHY tables. + * http://bcm-v4.sipsolutions.net/802.11/PHY/N/InitTables + */ +static void b43_nphy_tables_init(struct b43_wldev *dev) +{ + if (dev->phy.rev < 3) + b43_nphy_rev0_1_2_tables_init(dev); + else + b43_nphy_rev3plus_tables_init(dev); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PA%20override */ +static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable) +{ + struct b43_phy_n *nphy = dev->phy.n; + enum ieee80211_band band; + u16 tmp; + + if (!enable) { + nphy->rfctrl_intc1_save = b43_phy_read(dev, + B43_NPHY_RFCTL_INTC1); + nphy->rfctrl_intc2_save = b43_phy_read(dev, + B43_NPHY_RFCTL_INTC2); + band = b43_current_band(dev->wl); + if (dev->phy.rev >= 3) { + if (band == IEEE80211_BAND_5GHZ) + tmp = 0x600; + else + tmp = 0x480; + } else { + if (band == IEEE80211_BAND_5GHZ) + tmp = 0x180; + else + tmp = 0x120; + } + b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, tmp); + b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, tmp); + } else { + b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, + nphy->rfctrl_intc1_save); + b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, + nphy->rfctrl_intc2_save); + } +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxLpFbw */ +static void b43_nphy_tx_lp_fbw(struct b43_wldev *dev) +{ + struct b43_phy_n *nphy = dev->phy.n; + u16 tmp; + enum ieee80211_band band = b43_current_band(dev->wl); + bool ipa = (nphy->ipa2g_on && band == IEEE80211_BAND_2GHZ) || + (nphy->ipa5g_on && band == IEEE80211_BAND_5GHZ); + + if (dev->phy.rev >= 3) { + if (ipa) { + tmp = 4; + b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S2, + (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp); + } + + tmp = 1; + b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S2, + (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp); + } +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/BmacPhyClkFgc */ +static void b43_nphy_bmac_clock_fgc(struct b43_wldev *dev, bool force) +{ + u32 tmslow; + + if (dev->phy.type != B43_PHYTYPE_N) + return; + + tmslow = ssb_read32(dev->dev, SSB_TMSLOW); + if (force) + tmslow |= SSB_TMSLOW_FGC; + else + tmslow &= ~SSB_TMSLOW_FGC; + ssb_write32(dev->dev, SSB_TMSLOW, tmslow); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CCA */ +static void b43_nphy_reset_cca(struct b43_wldev *dev) +{ + u16 bbcfg; + + b43_nphy_bmac_clock_fgc(dev, 1); + bbcfg = b43_phy_read(dev, B43_NPHY_BBCFG); + b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg | B43_NPHY_BBCFG_RSTCCA); + udelay(1); + b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg & ~B43_NPHY_BBCFG_RSTCCA); + b43_nphy_bmac_clock_fgc(dev, 0); + b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MIMOConfig */ +static void b43_nphy_update_mimo_config(struct b43_wldev *dev, s32 preamble) +{ + u16 mimocfg = b43_phy_read(dev, B43_NPHY_MIMOCFG); + + mimocfg |= B43_NPHY_MIMOCFG_AUTO; + if (preamble == 1) + mimocfg |= B43_NPHY_MIMOCFG_GFMIX; + else + mimocfg &= ~B43_NPHY_MIMOCFG_GFMIX; + + b43_phy_write(dev, B43_NPHY_MIMOCFG, mimocfg); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Chains */ +static void b43_nphy_update_txrx_chain(struct b43_wldev *dev) +{ + struct b43_phy_n *nphy = dev->phy.n; + + bool override = false; + u16 chain = 0x33; + + if (nphy->txrx_chain == 0) { + chain = 0x11; + override = true; + } else if (nphy->txrx_chain == 1) { + chain = 0x22; + override = true; + } + + b43_phy_maskset(dev, B43_NPHY_RFSEQCA, + ~(B43_NPHY_RFSEQCA_TXEN | B43_NPHY_RFSEQCA_RXEN), + chain); + + if (override) + b43_phy_set(dev, B43_NPHY_RFSEQMODE, + B43_NPHY_RFSEQMODE_CAOVER); + else + b43_phy_mask(dev, B43_NPHY_RFSEQMODE, + ~B43_NPHY_RFSEQMODE_CAOVER); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqEst */ +static void b43_nphy_rx_iq_est(struct b43_wldev *dev, struct nphy_iq_est *est, + u16 samps, u8 time, bool wait) +{ + int i; + u16 tmp; + + b43_phy_write(dev, B43_NPHY_IQEST_SAMCNT, samps); + b43_phy_maskset(dev, B43_NPHY_IQEST_WT, ~B43_NPHY_IQEST_WT_VAL, time); + if (wait) + b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_MODE); + else + b43_phy_mask(dev, B43_NPHY_IQEST_CMD, ~B43_NPHY_IQEST_CMD_MODE); + + b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_START); + + for (i = 1000; i; i--) { + tmp = b43_phy_read(dev, B43_NPHY_IQEST_CMD); + if (!(tmp & B43_NPHY_IQEST_CMD_START)) { + est->i0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI0) << 16) | + b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO0); + est->q0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI0) << 16) | + b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO0); + est->iq0_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI0) << 16) | + b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO0); + + est->i1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI1) << 16) | + b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO1); + est->q1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI1) << 16) | + b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO1); + est->iq1_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI1) << 16) | + b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO1); + return; + } + udelay(10); + } + memset(est, 0, sizeof(*est)); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqCoeffs */ +static void b43_nphy_rx_iq_coeffs(struct b43_wldev *dev, bool write, + struct b43_phy_n_iq_comp *pcomp) +{ + if (write) { + b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPA0, pcomp->a0); + b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPB0, pcomp->b0); + b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPA1, pcomp->a1); + b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPB1, pcomp->b1); + } else { + pcomp->a0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPA0); + pcomp->b0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPB0); + pcomp->a1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPA1); + pcomp->b1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPB1); + } +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhyCleanup */ +static void b43_nphy_rx_cal_phy_cleanup(struct b43_wldev *dev, u8 core) +{ + u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs; + + b43_phy_write(dev, B43_NPHY_RFSEQCA, regs[0]); + if (core == 0) { + b43_phy_write(dev, B43_NPHY_AFECTL_C1, regs[1]); + b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, regs[2]); + } else { + b43_phy_write(dev, B43_NPHY_AFECTL_C2, regs[1]); + b43_phy_write(dev, B43_NPHY_AFECTL_OVER, regs[2]); + } + b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs[3]); + b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs[4]); + b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO1, regs[5]); + b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO2, regs[6]); + b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S1, regs[7]); + b43_phy_write(dev, B43_NPHY_RFCTL_OVER, regs[8]); + b43_phy_write(dev, B43_NPHY_PAPD_EN0, regs[9]); + b43_phy_write(dev, B43_NPHY_PAPD_EN1, regs[10]); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhySetup */ +static void b43_nphy_rx_cal_phy_setup(struct b43_wldev *dev, u8 core) +{ + u8 rxval, txval; + u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs; + + regs[0] = b43_phy_read(dev, B43_NPHY_RFSEQCA); + if (core == 0) { + regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C1); + regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1); + } else { + regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2); + regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER); + } + regs[3] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1); + regs[4] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2); + regs[5] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO1); + regs[6] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO2); + regs[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S1); + regs[8] = b43_phy_read(dev, B43_NPHY_RFCTL_OVER); + regs[9] = b43_phy_read(dev, B43_NPHY_PAPD_EN0); + regs[10] = b43_phy_read(dev, B43_NPHY_PAPD_EN1); + + b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x0001); + b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x0001); + + b43_phy_maskset(dev, B43_NPHY_RFSEQCA, (u16)~B43_NPHY_RFSEQCA_RXDIS, + ((1 - core) << B43_NPHY_RFSEQCA_RXDIS_SHIFT)); + b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXEN, + ((1 - core) << B43_NPHY_RFSEQCA_TXEN_SHIFT)); + b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_RXEN, + (core << B43_NPHY_RFSEQCA_RXEN_SHIFT)); + b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXDIS, + (core << B43_NPHY_RFSEQCA_TXDIS_SHIFT)); + + if (core == 0) { + b43_phy_mask(dev, B43_NPHY_AFECTL_C1, ~0x0007); + b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0007); + } else { + b43_phy_mask(dev, B43_NPHY_AFECTL_C2, ~0x0007); + b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0007); + } + + b43_nphy_rf_control_intc_override(dev, 2, 0, 3); + b43_nphy_rf_control_override(dev, 8, 0, 3, false); + b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX); + + if (core == 0) { + rxval = 1; + txval = 8; + } else { + rxval = 4; + txval = 2; + } + b43_nphy_rf_control_intc_override(dev, 1, rxval, (core + 1)); + b43_nphy_rf_control_intc_override(dev, 1, txval, (2 - core)); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalcRxIqComp */ +static void b43_nphy_calc_rx_iq_comp(struct b43_wldev *dev, u8 mask) +{ + int i; + s32 iq; + u32 ii; + u32 qq; + int iq_nbits, qq_nbits; + int arsh, brsh; + u16 tmp, a, b; + + struct nphy_iq_est est; + struct b43_phy_n_iq_comp old; + struct b43_phy_n_iq_comp new = { }; + bool error = false; + + if (mask == 0) + return; + + b43_nphy_rx_iq_coeffs(dev, false, &old); + b43_nphy_rx_iq_coeffs(dev, true, &new); + b43_nphy_rx_iq_est(dev, &est, 0x4000, 32, false); + new = old; + + for (i = 0; i < 2; i++) { + if (i == 0 && (mask & 1)) { + iq = est.iq0_prod; + ii = est.i0_pwr; + qq = est.q0_pwr; + } else if (i == 1 && (mask & 2)) { + iq = est.iq1_prod; + ii = est.i1_pwr; + qq = est.q1_pwr; + } else { + B43_WARN_ON(1); + continue; + } + + if (ii + qq < 2) { + error = true; + break; + } + + iq_nbits = fls(abs(iq)); + qq_nbits = fls(qq); + + arsh = iq_nbits - 20; + if (arsh >= 0) { + a = -((iq << (30 - iq_nbits)) + (ii >> (1 + arsh))); + tmp = ii >> arsh; + } else { + a = -((iq << (30 - iq_nbits)) + (ii << (-1 - arsh))); + tmp = ii << -arsh; + } + if (tmp == 0) { + error = true; + break; + } + a /= tmp; + + brsh = qq_nbits - 11; + if (brsh >= 0) { + b = (qq << (31 - qq_nbits)); + tmp = ii >> brsh; + } else { + b = (qq << (31 - qq_nbits)); + tmp = ii << -brsh; + } + if (tmp == 0) { + error = true; + break; + } + b = int_sqrt(b / tmp - a * a) - (1 << 10); + + if (i == 0 && (mask & 0x1)) { + if (dev->phy.rev >= 3) { + new.a0 = a & 0x3FF; + new.b0 = b & 0x3FF; + } else { + new.a0 = b & 0x3FF; + new.b0 = a & 0x3FF; + } + } else if (i == 1 && (mask & 0x2)) { + if (dev->phy.rev >= 3) { + new.a1 = a & 0x3FF; + new.b1 = b & 0x3FF; + } else { + new.a1 = b & 0x3FF; + new.b1 = a & 0x3FF; + } + } + } + + if (error) + new = old; + + b43_nphy_rx_iq_coeffs(dev, true, &new); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxIqWar */ +static void b43_nphy_tx_iq_workaround(struct b43_wldev *dev) +{ + u16 array[4]; + int i; + + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x3C50); + for (i = 0; i < 4; i++) + array[i] = b43_phy_read(dev, B43_NPHY_TABLE_DATALO); + + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW0, array[0]); + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW1, array[1]); + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW2, array[2]); + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW3, array[3]); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */ +static void b43_nphy_write_clip_detection(struct b43_wldev *dev, u16 *clip_st) +{ + b43_phy_write(dev, B43_NPHY_C1_CLIP1THRES, clip_st[0]); + b43_phy_write(dev, B43_NPHY_C2_CLIP1THRES, clip_st[1]); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */ +static void b43_nphy_read_clip_detection(struct b43_wldev *dev, u16 *clip_st) +{ + clip_st[0] = b43_phy_read(dev, B43_NPHY_C1_CLIP1THRES); + clip_st[1] = b43_phy_read(dev, B43_NPHY_C2_CLIP1THRES); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/classifier */ +static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val) +{ + u16 tmp; + + if (dev->dev->id.revision == 16) + b43_mac_suspend(dev); + + tmp = b43_phy_read(dev, B43_NPHY_CLASSCTL); + tmp &= (B43_NPHY_CLASSCTL_CCKEN | B43_NPHY_CLASSCTL_OFDMEN | + B43_NPHY_CLASSCTL_WAITEDEN); + tmp &= ~mask; + tmp |= (val & mask); + b43_phy_maskset(dev, B43_NPHY_CLASSCTL, 0xFFF8, tmp); + + if (dev->dev->id.revision == 16) + b43_mac_enable(dev); + + return tmp; +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/carriersearch */ +static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_n *nphy = phy->n; + + if (enable) { + u16 clip[] = { 0xFFFF, 0xFFFF }; + if (nphy->deaf_count++ == 0) { + nphy->classifier_state = b43_nphy_classifier(dev, 0, 0); + b43_nphy_classifier(dev, 0x7, 0); + b43_nphy_read_clip_detection(dev, nphy->clip_state); + b43_nphy_write_clip_detection(dev, clip); + } + b43_nphy_reset_cca(dev); + } else { + if (--nphy->deaf_count == 0) { + b43_nphy_classifier(dev, 0x7, nphy->classifier_state); + b43_nphy_write_clip_detection(dev, nphy->clip_state); + } + } +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/stop-playback */ +static void b43_nphy_stop_playback(struct b43_wldev *dev) +{ + struct b43_phy_n *nphy = dev->phy.n; + u16 tmp; + + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, 1); + + tmp = b43_phy_read(dev, B43_NPHY_SAMP_STAT); + if (tmp & 0x1) + b43_phy_set(dev, B43_NPHY_SAMP_CMD, B43_NPHY_SAMP_CMD_STOP); + else if (tmp & 0x2) + b43_phy_mask(dev, B43_NPHY_IQLOCAL_CMDGCTL, (u16)~0x8000); + + b43_phy_mask(dev, B43_NPHY_SAMP_CMD, ~0x0004); + + if (nphy->bb_mult_save & 0x80000000) { + tmp = nphy->bb_mult_save & 0xFFFF; + b43_ntab_write(dev, B43_NTAB16(15, 87), tmp); + nphy->bb_mult_save = 0; + } + + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, 0); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SpurWar */ +static void b43_nphy_spur_workaround(struct b43_wldev *dev) +{ + struct b43_phy_n *nphy = dev->phy.n; + + unsigned int channel; + int tone[2] = { 57, 58 }; + u32 noise[2] = { 0x3FF, 0x3FF }; + + B43_WARN_ON(dev->phy.rev < 3); + + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, 1); + + /* FIXME: channel = radio_chanspec */ + + if (nphy->gband_spurwar_en) { + /* TODO: N PHY Adjust Analog Pfbw (7) */ + if (channel == 11 && dev->phy.is_40mhz) + ; /* TODO: N PHY Adjust Min Noise Var(2, tone, noise)*/ + else + ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/ + /* TODO: N PHY Adjust CRS Min Power (0x1E) */ + } + + if (nphy->aband_spurwar_en) { + if (channel == 54) { + tone[0] = 0x20; + noise[0] = 0x25F; + } else if (channel == 38 || channel == 102 || channel == 118) { + if (0 /* FIXME */) { + tone[0] = 0x20; + noise[0] = 0x21F; + } else { + tone[0] = 0; + noise[0] = 0; + } + } else if (channel == 134) { + tone[0] = 0x20; + noise[0] = 0x21F; + } else if (channel == 151) { + tone[0] = 0x10; + noise[0] = 0x23F; + } else if (channel == 153 || channel == 161) { + tone[0] = 0x30; + noise[0] = 0x23F; + } else { + tone[0] = 0; + noise[0] = 0; + } + + if (!tone[0] && !noise[0]) + ; /* TODO: N PHY Adjust Min Noise Var(1, tone, noise)*/ + else + ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/ + } + + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, 0); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */ +static void b43_nphy_gain_crtl_workarounds(struct b43_wldev *dev) +{ + struct b43_phy_n *nphy = dev->phy.n; + u8 i, j; + u8 code; + + /* TODO: for PHY >= 3 + s8 *lna1_gain, *lna2_gain; + u8 *gain_db, *gain_bits; + u16 *rfseq_init; + u8 lpf_gain[6] = { 0x00, 0x06, 0x0C, 0x12, 0x12, 0x12 }; + u8 lpf_bits[6] = { 0, 1, 2, 3, 3, 3 }; + */ + + u8 rfseq_events[3] = { 6, 8, 7 }; + u8 rfseq_delays[3] = { 10, 30, 1 }; + + if (dev->phy.rev >= 3) { + /* TODO */ + } else { + /* Set Clip 2 detect */ + b43_phy_set(dev, B43_NPHY_C1_CGAINI, + B43_NPHY_C1_CGAINI_CL2DETECT); + b43_phy_set(dev, B43_NPHY_C2_CGAINI, + B43_NPHY_C2_CGAINI_CL2DETECT); + + /* Set narrowband clip threshold */ + b43_phy_set(dev, B43_NPHY_C1_NBCLIPTHRES, 0x84); + b43_phy_set(dev, B43_NPHY_C2_NBCLIPTHRES, 0x84); + + if (!dev->phy.is_40mhz) { + /* Set dwell lengths */ + b43_phy_set(dev, B43_NPHY_CLIP1_NBDWELL_LEN, 0x002B); + b43_phy_set(dev, B43_NPHY_CLIP2_NBDWELL_LEN, 0x002B); + b43_phy_set(dev, B43_NPHY_W1CLIP1_DWELL_LEN, 0x0009); + b43_phy_set(dev, B43_NPHY_W1CLIP2_DWELL_LEN, 0x0009); + } + + /* Set wideband clip 2 threshold */ + b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES, + ~B43_NPHY_C1_CLIPWBTHRES_CLIP2, + 21); + b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES, + ~B43_NPHY_C2_CLIPWBTHRES_CLIP2, + 21); + + if (!dev->phy.is_40mhz) { + b43_phy_maskset(dev, B43_NPHY_C1_CGAINI, + ~B43_NPHY_C1_CGAINI_GAINBKOFF, 0x1); + b43_phy_maskset(dev, B43_NPHY_C2_CGAINI, + ~B43_NPHY_C2_CGAINI_GAINBKOFF, 0x1); + b43_phy_maskset(dev, B43_NPHY_C1_CCK_CGAINI, + ~B43_NPHY_C1_CCK_CGAINI_GAINBKOFF, 0x1); + b43_phy_maskset(dev, B43_NPHY_C2_CCK_CGAINI, + ~B43_NPHY_C2_CCK_CGAINI_GAINBKOFF, 0x1); + } + + b43_phy_set(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C); + + if (nphy->gain_boost) { + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ && + dev->phy.is_40mhz) + code = 4; + else + code = 5; + } else { + code = dev->phy.is_40mhz ? 6 : 7; + } + + /* Set HPVGA2 index */ + b43_phy_maskset(dev, B43_NPHY_C1_INITGAIN, + ~B43_NPHY_C1_INITGAIN_HPVGA2, + code << B43_NPHY_C1_INITGAIN_HPVGA2_SHIFT); + b43_phy_maskset(dev, B43_NPHY_C2_INITGAIN, + ~B43_NPHY_C2_INITGAIN_HPVGA2, + code << B43_NPHY_C2_INITGAIN_HPVGA2_SHIFT); + + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, + (code << 8 | 0x7C)); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, + (code << 8 | 0x7C)); + + /* TODO: b43_nphy_adjust_lna_gain_table(dev); */ + + if (nphy->elna_gain_config) { + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0808); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); + + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0C08); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); + + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, + (code << 8 | 0x74)); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, + (code << 8 | 0x74)); + } + + if (dev->phy.rev == 2) { + for (i = 0; i < 4; i++) { + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, + (0x0400 * i) + 0x0020); + for (j = 0; j < 21; j++) + b43_phy_write(dev, + B43_NPHY_TABLE_DATALO, 3 * j); + } + + b43_nphy_set_rf_sequence(dev, 5, + rfseq_events, rfseq_delays, 3); + b43_phy_maskset(dev, B43_NPHY_OVER_DGAIN1, + (u16)~B43_NPHY_OVER_DGAIN_CCKDGECV, + 0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT); + + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + b43_phy_maskset(dev, B43_PHY_N(0xC5D), + 0xFF80, 4); + } + } +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Workarounds */ +static void b43_nphy_workarounds(struct b43_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + struct b43_phy *phy = &dev->phy; + struct b43_phy_n *nphy = phy->n; + + u8 events1[7] = { 0x0, 0x1, 0x2, 0x8, 0x4, 0x5, 0x3 }; + u8 delays1[7] = { 0x8, 0x6, 0x6, 0x2, 0x4, 0x3C, 0x1 }; + + u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 }; + u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 }; + + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + b43_nphy_classifier(dev, 1, 0); + else + b43_nphy_classifier(dev, 1, 1); + + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, 1); + + b43_phy_set(dev, B43_NPHY_IQFLIP, + B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2); + + if (dev->phy.rev >= 3) { + /* TODO */ + } else { + if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ && + nphy->band5g_pwrgain) { + b43_radio_mask(dev, B2055_C1_TX_RF_SPARE, ~0x8); + b43_radio_mask(dev, B2055_C2_TX_RF_SPARE, ~0x8); + } else { + b43_radio_set(dev, B2055_C1_TX_RF_SPARE, 0x8); + b43_radio_set(dev, B2055_C2_TX_RF_SPARE, 0x8); + } + + /* TODO: convert to b43_ntab_write? */ + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2000); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x000A); + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2010); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x000A); + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2002); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0xCDAA); + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2012); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0xCDAA); + + if (dev->phy.rev < 2) { + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2008); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0000); + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2018); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0000); + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2007); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x7AAB); + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2017); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x7AAB); + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2006); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0800); + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2016); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0800); + } + + b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8); + b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301); + b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8); + b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301); + + if (bus->sprom.boardflags2_lo & 0x100 && + bus->boardinfo.type == 0x8B) { + delays1[0] = 0x1; + delays1[5] = 0x14; + } + b43_nphy_set_rf_sequence(dev, 0, events1, delays1, 7); + b43_nphy_set_rf_sequence(dev, 1, events2, delays2, 7); + + b43_nphy_gain_crtl_workarounds(dev); + + if (dev->phy.rev < 2) { + if (b43_phy_read(dev, B43_NPHY_RXCTL) & 0x2) + ; /*TODO: b43_mhf(dev, 2, 0x0010, 0x0010, 3);*/ + } else if (dev->phy.rev == 2) { + b43_phy_write(dev, B43_NPHY_CRSCHECK2, 0); + b43_phy_write(dev, B43_NPHY_CRSCHECK3, 0); + } + + if (dev->phy.rev < 2) + b43_phy_mask(dev, B43_NPHY_SCRAM_SIGCTL, + ~B43_NPHY_SCRAM_SIGCTL_SCM); + + /* Set phase track alpha and beta */ + b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x125); + b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x1B3); + b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x105); + b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x16E); + b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0xCD); + b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20); + + b43_phy_mask(dev, B43_NPHY_PIL_DW1, + (u16)~B43_NPHY_PIL_DW_64QAM); + b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5); + b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4); + b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00); + + if (dev->phy.rev == 2) + b43_phy_set(dev, B43_NPHY_FINERX2_CGC, + B43_NPHY_FINERX2_CGC_DECGC); + } + + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, 0); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/LoadSampleTable */ +static int b43_nphy_load_samples(struct b43_wldev *dev, + struct b43_c32 *samples, u16 len) { + struct b43_phy_n *nphy = dev->phy.n; + u16 i; + u32 *data; + + data = kzalloc(len * sizeof(u32), GFP_KERNEL); + if (!data) { + b43err(dev->wl, "allocation for samples loading failed\n"); + return -ENOMEM; + } + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, 1); + + for (i = 0; i < len; i++) { + data[i] = (samples[i].i & 0x3FF << 10); + data[i] |= samples[i].q & 0x3FF; + } + b43_ntab_write_bulk(dev, B43_NTAB32(17, 0), len, data); + + kfree(data); + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, 0); + return 0; +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GenLoadSamples */ +static u16 b43_nphy_gen_load_samples(struct b43_wldev *dev, u32 freq, u16 max, + bool test) +{ + int i; + u16 bw, len, rot, angle; + struct b43_c32 *samples; + + + bw = (dev->phy.is_40mhz) ? 40 : 20; + len = bw << 3; + + if (test) { + if (b43_phy_read(dev, B43_NPHY_BBCFG) & B43_NPHY_BBCFG_RSTRX) + bw = 82; + else + bw = 80; + + if (dev->phy.is_40mhz) + bw <<= 1; + + len = bw << 1; + } + + samples = kzalloc(len * sizeof(struct b43_c32), GFP_KERNEL); + if (!samples) { + b43err(dev->wl, "allocation for samples generation failed\n"); + return 0; + } + rot = (((freq * 36) / bw) << 16) / 100; + angle = 0; + + for (i = 0; i < len; i++) { + samples[i] = b43_cordic(angle); + angle += rot; + samples[i].q = CORDIC_CONVERT(samples[i].q * max); + samples[i].i = CORDIC_CONVERT(samples[i].i * max); + } + + i = b43_nphy_load_samples(dev, samples, len); + kfree(samples); + return (i < 0) ? 0 : len; +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RunSamples */ +static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops, + u16 wait, bool iqmode, bool dac_test) +{ + struct b43_phy_n *nphy = dev->phy.n; + int i; + u16 seq_mode; + u32 tmp; + + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, true); + + if ((nphy->bb_mult_save & 0x80000000) == 0) { + tmp = b43_ntab_read(dev, B43_NTAB16(15, 87)); + nphy->bb_mult_save = (tmp & 0xFFFF) | 0x80000000; + } + + if (!dev->phy.is_40mhz) + tmp = 0x6464; + else + tmp = 0x4747; + b43_ntab_write(dev, B43_NTAB16(15, 87), tmp); + + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, false); + + b43_phy_write(dev, B43_NPHY_SAMP_DEPCNT, (samps - 1)); + + if (loops != 0xFFFF) + b43_phy_write(dev, B43_NPHY_SAMP_LOOPCNT, (loops - 1)); + else + b43_phy_write(dev, B43_NPHY_SAMP_LOOPCNT, loops); + + b43_phy_write(dev, B43_NPHY_SAMP_WAITCNT, wait); + + seq_mode = b43_phy_read(dev, B43_NPHY_RFSEQMODE); + + b43_phy_set(dev, B43_NPHY_RFSEQMODE, B43_NPHY_RFSEQMODE_CAOVER); + if (iqmode) { + b43_phy_mask(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x7FFF); + b43_phy_set(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x8000); + } else { + if (dac_test) + b43_phy_write(dev, B43_NPHY_SAMP_CMD, 5); + else + b43_phy_write(dev, B43_NPHY_SAMP_CMD, 1); + } + for (i = 0; i < 100; i++) { + if (b43_phy_read(dev, B43_NPHY_RFSEQST) & 1) { + i = 0; + break; + } + udelay(10); + } + if (i) + b43err(dev->wl, "run samples timeout\n"); + + b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode); +} + +/* + * Transmits a known value for LO calibration + * http://bcm-v4.sipsolutions.net/802.11/PHY/N/TXTone + */ +static int b43_nphy_tx_tone(struct b43_wldev *dev, u32 freq, u16 max_val, + bool iqmode, bool dac_test) +{ + u16 samp = b43_nphy_gen_load_samples(dev, freq, max_val, dac_test); + if (samp == 0) + return -1; + b43_nphy_run_samples(dev, samp, 0xFFFF, 0, iqmode, dac_test); + return 0; +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlCoefSetup */ +static void b43_nphy_tx_pwr_ctrl_coef_setup(struct b43_wldev *dev) +{ + struct b43_phy_n *nphy = dev->phy.n; + int i, j; + u32 tmp; + u32 cur_real, cur_imag, real_part, imag_part; + + u16 buffer[7]; + + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, true); + + b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer); + + for (i = 0; i < 2; i++) { + tmp = ((buffer[i * 2] & 0x3FF) << 10) | + (buffer[i * 2 + 1] & 0x3FF); + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, + (((i + 26) << 10) | 320)); + for (j = 0; j < 128; j++) { + b43_phy_write(dev, B43_NPHY_TABLE_DATAHI, + ((tmp >> 16) & 0xFFFF)); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, + (tmp & 0xFFFF)); + } + } + + for (i = 0; i < 2; i++) { + tmp = buffer[5 + i]; + real_part = (tmp >> 8) & 0xFF; + imag_part = (tmp & 0xFF); + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, + (((i + 26) << 10) | 448)); + + if (dev->phy.rev >= 3) { + cur_real = real_part; + cur_imag = imag_part; + tmp = ((cur_real & 0xFF) << 8) | (cur_imag & 0xFF); + } + + for (j = 0; j < 128; j++) { + if (dev->phy.rev < 3) { + cur_real = (real_part * loscale[j] + 128) >> 8; + cur_imag = (imag_part * loscale[j] + 128) >> 8; + tmp = ((cur_real & 0xFF) << 8) | + (cur_imag & 0xFF); + } + b43_phy_write(dev, B43_NPHY_TABLE_DATAHI, + ((tmp >> 16) & 0xFFFF)); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, + (tmp & 0xFFFF)); + } + } + + if (dev->phy.rev >= 3) { + b43_shm_write16(dev, B43_SHM_SHARED, + B43_SHM_SH_NPHY_TXPWR_INDX0, 0xFFFF); + b43_shm_write16(dev, B43_SHM_SHARED, + B43_SHM_SH_NPHY_TXPWR_INDX1, 0xFFFF); + } + + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, false); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRfSeq */ +static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd, + u8 *events, u8 *delays, u8 length) +{ + struct b43_phy_n *nphy = dev->phy.n; + u8 i; + u8 end = (dev->phy.rev >= 3) ? 0x1F : 0x0F; + u16 offset1 = cmd << 4; + u16 offset2 = offset1 + 0x80; + + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, true); + + b43_ntab_write_bulk(dev, B43_NTAB8(7, offset1), length, events); + b43_ntab_write_bulk(dev, B43_NTAB8(7, offset2), length, delays); + + for (i = length; i < 16; i++) { + b43_ntab_write(dev, B43_NTAB8(7, offset1 + i), end); + b43_ntab_write(dev, B43_NTAB8(7, offset2 + i), 1); + } + + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, false); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ForceRFSeq */ +static void b43_nphy_force_rf_sequence(struct b43_wldev *dev, + enum b43_nphy_rf_sequence seq) +{ + static const u16 trigger[] = { + [B43_RFSEQ_RX2TX] = B43_NPHY_RFSEQTR_RX2TX, + [B43_RFSEQ_TX2RX] = B43_NPHY_RFSEQTR_TX2RX, + [B43_RFSEQ_RESET2RX] = B43_NPHY_RFSEQTR_RST2RX, + [B43_RFSEQ_UPDATE_GAINH] = B43_NPHY_RFSEQTR_UPGH, + [B43_RFSEQ_UPDATE_GAINL] = B43_NPHY_RFSEQTR_UPGL, + [B43_RFSEQ_UPDATE_GAINU] = B43_NPHY_RFSEQTR_UPGU, + }; + int i; + u16 seq_mode = b43_phy_read(dev, B43_NPHY_RFSEQMODE); + + B43_WARN_ON(seq >= ARRAY_SIZE(trigger)); + + b43_phy_set(dev, B43_NPHY_RFSEQMODE, + B43_NPHY_RFSEQMODE_CAOVER | B43_NPHY_RFSEQMODE_TROVER); + b43_phy_set(dev, B43_NPHY_RFSEQTR, trigger[seq]); + for (i = 0; i < 200; i++) { + if (!(b43_phy_read(dev, B43_NPHY_RFSEQST) & trigger[seq])) + goto ok; + msleep(1); + } + b43err(dev->wl, "RF sequence status timeout\n"); +ok: + b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */ +static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field, + u16 value, u8 core, bool off) +{ + int i; + u8 index = fls(field); + u8 addr, en_addr, val_addr; + /* we expect only one bit set */ + B43_WARN_ON(field & (~(1 << (index - 1)))); + + if (dev->phy.rev >= 3) { + const struct nphy_rf_control_override_rev3 *rf_ctrl; + for (i = 0; i < 2; i++) { + if (index == 0 || index == 16) { + b43err(dev->wl, + "Unsupported RF Ctrl Override call\n"); + return; + } + + rf_ctrl = &tbl_rf_control_override_rev3[index - 1]; + en_addr = B43_PHY_N((i == 0) ? + rf_ctrl->en_addr0 : rf_ctrl->en_addr1); + val_addr = B43_PHY_N((i == 0) ? + rf_ctrl->val_addr0 : rf_ctrl->val_addr1); + + if (off) { + b43_phy_mask(dev, en_addr, ~(field)); + b43_phy_mask(dev, val_addr, + ~(rf_ctrl->val_mask)); + } else { + if (core == 0 || ((1 << core) & i) != 0) { + b43_phy_set(dev, en_addr, field); + b43_phy_maskset(dev, val_addr, + ~(rf_ctrl->val_mask), + (value << rf_ctrl->val_shift)); + } + } + } + } else { + const struct nphy_rf_control_override_rev2 *rf_ctrl; + if (off) { + b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~(field)); + value = 0; + } else { + b43_phy_set(dev, B43_NPHY_RFCTL_OVER, field); + } + + for (i = 0; i < 2; i++) { + if (index <= 1 || index == 16) { + b43err(dev->wl, + "Unsupported RF Ctrl Override call\n"); + return; + } + + if (index == 2 || index == 10 || + (index >= 13 && index <= 15)) { + core = 1; + } + + rf_ctrl = &tbl_rf_control_override_rev2[index - 2]; + addr = B43_PHY_N((i == 0) ? + rf_ctrl->addr0 : rf_ctrl->addr1); + + if ((core & (1 << i)) != 0) + b43_phy_maskset(dev, addr, ~(rf_ctrl->bmask), + (value << rf_ctrl->shift)); + + b43_phy_set(dev, B43_NPHY_RFCTL_OVER, 0x1); + b43_phy_set(dev, B43_NPHY_RFCTL_CMD, + B43_NPHY_RFCTL_CMD_START); + udelay(1); + b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, 0xFFFE); + } + } +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */ +static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field, + u16 value, u8 core) +{ + u8 i, j; + u16 reg, tmp, val; + + B43_WARN_ON(dev->phy.rev < 3); + B43_WARN_ON(field > 4); + + for (i = 0; i < 2; i++) { + if ((core == 1 && i == 1) || (core == 2 && !i)) + continue; + + reg = (i == 0) ? + B43_NPHY_RFCTL_INTC1 : B43_NPHY_RFCTL_INTC2; + b43_phy_mask(dev, reg, 0xFBFF); + + switch (field) { + case 0: + b43_phy_write(dev, reg, 0); + b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX); + break; + case 1: + if (!i) { + b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC1, + 0xFC3F, (value << 6)); + b43_phy_maskset(dev, B43_NPHY_TXF_40CO_B1S1, + 0xFFFE, 1); + b43_phy_set(dev, B43_NPHY_RFCTL_CMD, + B43_NPHY_RFCTL_CMD_START); + for (j = 0; j < 100; j++) { + if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_START) { + j = 0; + break; + } + udelay(10); + } + if (j) + b43err(dev->wl, + "intc override timeout\n"); + b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S1, + 0xFFFE); + } else { + b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC2, + 0xFC3F, (value << 6)); + b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER, + 0xFFFE, 1); + b43_phy_set(dev, B43_NPHY_RFCTL_CMD, + B43_NPHY_RFCTL_CMD_RXTX); + for (j = 0; j < 100; j++) { + if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_RXTX) { + j = 0; + break; + } + udelay(10); + } + if (j) + b43err(dev->wl, + "intc override timeout\n"); + b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, + 0xFFFE); + } + break; + case 2: + if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { + tmp = 0x0020; + val = value << 5; + } else { + tmp = 0x0010; + val = value << 4; + } + b43_phy_maskset(dev, reg, ~tmp, val); + break; + case 3: + if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { + tmp = 0x0001; + val = value; + } else { + tmp = 0x0004; + val = value << 2; + } + b43_phy_maskset(dev, reg, ~tmp, val); + break; + case 4: + if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { + tmp = 0x0002; + val = value << 1; + } else { + tmp = 0x0008; + val = value << 3; + } + b43_phy_maskset(dev, reg, ~tmp, val); + break; + } + } +} + +static void b43_nphy_bphy_init(struct b43_wldev *dev) +{ + unsigned int i; + u16 val; + + val = 0x1E1F; + for (i = 0; i < 14; i++) { + b43_phy_write(dev, B43_PHY_N_BMODE(0x88 + i), val); + val -= 0x202; + } + val = 0x3E3F; + for (i = 0; i < 16; i++) { + b43_phy_write(dev, B43_PHY_N_BMODE(0x97 + i), val); + val -= 0x202; + } + b43_phy_write(dev, B43_PHY_N_BMODE(0x38), 0x668); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ScaleOffsetRssi */ +static void b43_nphy_scale_offset_rssi(struct b43_wldev *dev, u16 scale, + s8 offset, u8 core, u8 rail, u8 type) +{ + u16 tmp; + bool core1or5 = (core == 1) || (core == 5); + bool core2or5 = (core == 2) || (core == 5); + + offset = clamp_val(offset, -32, 31); + tmp = ((scale & 0x3F) << 8) | (offset & 0x3F); + + if (core1or5 && (rail == 0) && (type == 2)) + b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Z, tmp); + if (core1or5 && (rail == 1) && (type == 2)) + b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Z, tmp); + if (core2or5 && (rail == 0) && (type == 2)) + b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Z, tmp); + if (core2or5 && (rail == 1) && (type == 2)) + b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Z, tmp); + if (core1or5 && (rail == 0) && (type == 0)) + b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_X, tmp); + if (core1or5 && (rail == 1) && (type == 0)) + b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_X, tmp); + if (core2or5 && (rail == 0) && (type == 0)) + b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_X, tmp); + if (core2or5 && (rail == 1) && (type == 0)) + b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_X, tmp); + if (core1or5 && (rail == 0) && (type == 1)) + b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Y, tmp); + if (core1or5 && (rail == 1) && (type == 1)) + b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Y, tmp); + if (core2or5 && (rail == 0) && (type == 1)) + b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Y, tmp); + if (core2or5 && (rail == 1) && (type == 1)) + b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y, tmp); + if (core1or5 && (rail == 0) && (type == 6)) + b43_phy_write(dev, B43_NPHY_RSSIMC_0I_TBD, tmp); + if (core1or5 && (rail == 1) && (type == 6)) + b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_TBD, tmp); + if (core2or5 && (rail == 0) && (type == 6)) + b43_phy_write(dev, B43_NPHY_RSSIMC_1I_TBD, tmp); + if (core2or5 && (rail == 1) && (type == 6)) + b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TBD, tmp); + if (core1or5 && (rail == 0) && (type == 3)) + b43_phy_write(dev, B43_NPHY_RSSIMC_0I_PWRDET, tmp); + if (core1or5 && (rail == 1) && (type == 3)) + b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_PWRDET, tmp); + if (core2or5 && (rail == 0) && (type == 3)) + b43_phy_write(dev, B43_NPHY_RSSIMC_1I_PWRDET, tmp); + if (core2or5 && (rail == 1) && (type == 3)) + b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_PWRDET, tmp); + if (core1or5 && (type == 4)) + b43_phy_write(dev, B43_NPHY_RSSIMC_0I_TSSI, tmp); + if (core2or5 && (type == 4)) + b43_phy_write(dev, B43_NPHY_RSSIMC_1I_TSSI, tmp); + if (core1or5 && (type == 5)) + b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_TSSI, tmp); + if (core2or5 && (type == 5)) + b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TSSI, tmp); +} + +static void b43_nphy_rev2_rssi_select(struct b43_wldev *dev, u8 code, u8 type) +{ + u16 val; + + if (type < 3) + val = 0; + else if (type == 6) + val = 1; + else if (type == 3) + val = 2; + else + val = 3; + + val = (val << 12) | (val << 14); + b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, val); + b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, val); + + if (type < 3) { + b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO1, 0xFFCF, + (type + 1) << 4); + b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO2, 0xFFCF, + (type + 1) << 4); + } + + /* TODO use some definitions */ + if (code == 0) { + b43_phy_maskset(dev, B43_NPHY_AFECTL_OVER, 0xCFFF, 0); + if (type < 3) { + b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD, 0xFEC7, 0); + b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER, 0xEFDC, 0); + b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD, 0xFFFE, 0); + udelay(20); + b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER, 0xFFFE, 0); + } + } else { + b43_phy_maskset(dev, B43_NPHY_AFECTL_OVER, 0xCFFF, + 0x3000); + if (type < 3) { + b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD, + 0xFEC7, 0x0180); + b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER, + 0xEFDC, (code << 1 | 0x1021)); + b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD, 0xFFFE, 0x1); + udelay(20); + b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER, 0xFFFE, 0); + } + } +} + +static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code, u8 type) +{ + struct b43_phy_n *nphy = dev->phy.n; + u8 i; + u16 reg, val; + + if (code == 0) { + b43_phy_mask(dev, B43_NPHY_AFECTL_OVER1, 0xFDFF); + b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, 0xFDFF); + b43_phy_mask(dev, B43_NPHY_AFECTL_C1, 0xFCFF); + b43_phy_mask(dev, B43_NPHY_AFECTL_C2, 0xFCFF); + b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S0, 0xFFDF); + b43_phy_mask(dev, B43_NPHY_TXF_40CO_B32S1, 0xFFDF); + b43_phy_mask(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0xFFC3); + b43_phy_mask(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0xFFC3); + } else { + for (i = 0; i < 2; i++) { + if ((code == 1 && i == 1) || (code == 2 && !i)) + continue; + + reg = (i == 0) ? + B43_NPHY_AFECTL_OVER1 : B43_NPHY_AFECTL_OVER; + b43_phy_maskset(dev, reg, 0xFDFF, 0x0200); + + if (type < 3) { + reg = (i == 0) ? + B43_NPHY_AFECTL_C1 : + B43_NPHY_AFECTL_C2; + b43_phy_maskset(dev, reg, 0xFCFF, 0); + + reg = (i == 0) ? + B43_NPHY_RFCTL_LUT_TRSW_UP1 : + B43_NPHY_RFCTL_LUT_TRSW_UP2; + b43_phy_maskset(dev, reg, 0xFFC3, 0); + + if (type == 0) + val = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ? 4 : 8; + else if (type == 1) + val = 16; + else + val = 32; + b43_phy_set(dev, reg, val); + + reg = (i == 0) ? + B43_NPHY_TXF_40CO_B1S0 : + B43_NPHY_TXF_40CO_B32S1; + b43_phy_set(dev, reg, 0x0020); + } else { + if (type == 6) + val = 0x0100; + else if (type == 3) + val = 0x0200; + else + val = 0x0300; + + reg = (i == 0) ? + B43_NPHY_AFECTL_C1 : + B43_NPHY_AFECTL_C2; + + b43_phy_maskset(dev, reg, 0xFCFF, val); + b43_phy_maskset(dev, reg, 0xF3FF, val << 2); + + if (type != 3 && type != 6) { + enum ieee80211_band band = + b43_current_band(dev->wl); + + if ((nphy->ipa2g_on && + band == IEEE80211_BAND_2GHZ) || + (nphy->ipa5g_on && + band == IEEE80211_BAND_5GHZ)) + val = (band == IEEE80211_BAND_5GHZ) ? 0xC : 0xE; + else + val = 0x11; + reg = (i == 0) ? 0x2000 : 0x3000; + reg |= B2055_PADDRV; + b43_radio_write16(dev, reg, val); + + reg = (i == 0) ? + B43_NPHY_AFECTL_OVER1 : + B43_NPHY_AFECTL_OVER; + b43_phy_set(dev, reg, 0x0200); + } + } + } + } +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSISel */ +static void b43_nphy_rssi_select(struct b43_wldev *dev, u8 code, u8 type) +{ + if (dev->phy.rev >= 3) + b43_nphy_rev3_rssi_select(dev, code, type); + else + b43_nphy_rev2_rssi_select(dev, code, type); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRssi2055Vcm */ +static void b43_nphy_set_rssi_2055_vcm(struct b43_wldev *dev, u8 type, u8 *buf) +{ + int i; + for (i = 0; i < 2; i++) { + if (type == 2) { + if (i == 0) { + b43_radio_maskset(dev, B2055_C1_B0NB_RSSIVCM, + 0xFC, buf[0]); + b43_radio_maskset(dev, B2055_C1_RX_BB_RSSICTL5, + 0xFC, buf[1]); + } else { + b43_radio_maskset(dev, B2055_C2_B0NB_RSSIVCM, + 0xFC, buf[2 * i]); + b43_radio_maskset(dev, B2055_C2_RX_BB_RSSICTL5, + 0xFC, buf[2 * i + 1]); + } + } else { + if (i == 0) + b43_radio_maskset(dev, B2055_C1_RX_BB_RSSICTL5, + 0xF3, buf[0] << 2); + else + b43_radio_maskset(dev, B2055_C2_RX_BB_RSSICTL5, + 0xF3, buf[2 * i + 1] << 2); + } + } +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PollRssi */ +static int b43_nphy_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf, + u8 nsamp) +{ + int i; + int out; + u16 save_regs_phy[9]; + u16 s[2]; + + if (dev->phy.rev >= 3) { + save_regs_phy[0] = b43_phy_read(dev, + B43_NPHY_RFCTL_LUT_TRSW_UP1); + save_regs_phy[1] = b43_phy_read(dev, + B43_NPHY_RFCTL_LUT_TRSW_UP2); + save_regs_phy[2] = b43_phy_read(dev, B43_NPHY_AFECTL_C1); + save_regs_phy[3] = b43_phy_read(dev, B43_NPHY_AFECTL_C2); + save_regs_phy[4] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1); + save_regs_phy[5] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER); + save_regs_phy[6] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S0); + save_regs_phy[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B32S1); + } + + b43_nphy_rssi_select(dev, 5, type); + + if (dev->phy.rev < 2) { + save_regs_phy[8] = b43_phy_read(dev, B43_NPHY_GPIO_SEL); + b43_phy_write(dev, B43_NPHY_GPIO_SEL, 5); + } + + for (i = 0; i < 4; i++) + buf[i] = 0; + + for (i = 0; i < nsamp; i++) { + if (dev->phy.rev < 2) { + s[0] = b43_phy_read(dev, B43_NPHY_GPIO_LOOUT); + s[1] = b43_phy_read(dev, B43_NPHY_GPIO_HIOUT); + } else { + s[0] = b43_phy_read(dev, B43_NPHY_RSSI1); + s[1] = b43_phy_read(dev, B43_NPHY_RSSI2); + } + + buf[0] += ((s8)((s[0] & 0x3F) << 2)) >> 2; + buf[1] += ((s8)(((s[0] >> 8) & 0x3F) << 2)) >> 2; + buf[2] += ((s8)((s[1] & 0x3F) << 2)) >> 2; + buf[3] += ((s8)(((s[1] >> 8) & 0x3F) << 2)) >> 2; + } + out = (buf[0] & 0xFF) << 24 | (buf[1] & 0xFF) << 16 | + (buf[2] & 0xFF) << 8 | (buf[3] & 0xFF); + + if (dev->phy.rev < 2) + b43_phy_write(dev, B43_NPHY_GPIO_SEL, save_regs_phy[8]); + + if (dev->phy.rev >= 3) { + b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, + save_regs_phy[0]); + b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, + save_regs_phy[1]); + b43_phy_write(dev, B43_NPHY_AFECTL_C1, save_regs_phy[2]); + b43_phy_write(dev, B43_NPHY_AFECTL_C2, save_regs_phy[3]); + b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, save_regs_phy[4]); + b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[5]); + b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S0, save_regs_phy[6]); + b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S1, save_regs_phy[7]); + } + + return out; +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal */ +static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type) +{ + int i, j; + u8 state[4]; + u8 code, val; + u16 class, override; + u8 regs_save_radio[2]; + u16 regs_save_phy[2]; + s8 offset[4]; + + u16 clip_state[2]; + u16 clip_off[2] = { 0xFFFF, 0xFFFF }; + s32 results_min[4] = { }; + u8 vcm_final[4] = { }; + s32 results[4][4] = { }; + s32 miniq[4][2] = { }; + + if (type == 2) { + code = 0; + val = 6; + } else if (type < 2) { + code = 25; + val = 4; + } else { + B43_WARN_ON(1); + return; + } + + class = b43_nphy_classifier(dev, 0, 0); + b43_nphy_classifier(dev, 7, 4); + b43_nphy_read_clip_detection(dev, clip_state); + b43_nphy_write_clip_detection(dev, clip_off); + + if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) + override = 0x140; + else + override = 0x110; + + regs_save_phy[0] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1); + regs_save_radio[0] = b43_radio_read16(dev, B2055_C1_PD_RXTX); + b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, override); + b43_radio_write16(dev, B2055_C1_PD_RXTX, val); + + regs_save_phy[1] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2); + regs_save_radio[1] = b43_radio_read16(dev, B2055_C2_PD_RXTX); + b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, override); + b43_radio_write16(dev, B2055_C2_PD_RXTX, val); + + state[0] = b43_radio_read16(dev, B2055_C1_PD_RSSIMISC) & 0x07; + state[1] = b43_radio_read16(dev, B2055_C2_PD_RSSIMISC) & 0x07; + b43_radio_mask(dev, B2055_C1_PD_RSSIMISC, 0xF8); + b43_radio_mask(dev, B2055_C2_PD_RSSIMISC, 0xF8); + state[2] = b43_radio_read16(dev, B2055_C1_SP_RSSI) & 0x07; + state[3] = b43_radio_read16(dev, B2055_C2_SP_RSSI) & 0x07; + + b43_nphy_rssi_select(dev, 5, type); + b43_nphy_scale_offset_rssi(dev, 0, 0, 5, 0, type); + b43_nphy_scale_offset_rssi(dev, 0, 0, 5, 1, type); + + for (i = 0; i < 4; i++) { + u8 tmp[4]; + for (j = 0; j < 4; j++) + tmp[j] = i; + if (type != 1) + b43_nphy_set_rssi_2055_vcm(dev, type, tmp); + b43_nphy_poll_rssi(dev, type, results[i], 8); + if (type < 2) + for (j = 0; j < 2; j++) + miniq[i][j] = min(results[i][2 * j], + results[i][2 * j + 1]); + } + + for (i = 0; i < 4; i++) { + s32 mind = 40; + u8 minvcm = 0; + s32 minpoll = 249; + s32 curr; + for (j = 0; j < 4; j++) { + if (type == 2) + curr = abs(results[j][i]); + else + curr = abs(miniq[j][i / 2] - code * 8); + + if (curr < mind) { + mind = curr; + minvcm = j; + } + + if (results[j][i] < minpoll) + minpoll = results[j][i]; + } + results_min[i] = minpoll; + vcm_final[i] = minvcm; + } + + if (type != 1) + b43_nphy_set_rssi_2055_vcm(dev, type, vcm_final); + + for (i = 0; i < 4; i++) { + offset[i] = (code * 8) - results[vcm_final[i]][i]; + + if (offset[i] < 0) + offset[i] = -((abs(offset[i]) + 4) / 8); + else + offset[i] = (offset[i] + 4) / 8; + + if (results_min[i] == 248) + offset[i] = code - 32; + + if (i % 2 == 0) + b43_nphy_scale_offset_rssi(dev, 0, offset[i], 1, 0, + type); + else + b43_nphy_scale_offset_rssi(dev, 0, offset[i], 2, 1, + type); + } + + b43_radio_maskset(dev, B2055_C1_PD_RSSIMISC, 0xF8, state[0]); + b43_radio_maskset(dev, B2055_C1_PD_RSSIMISC, 0xF8, state[1]); + + switch (state[2]) { + case 1: + b43_nphy_rssi_select(dev, 1, 2); + break; + case 4: + b43_nphy_rssi_select(dev, 1, 0); + break; + case 2: + b43_nphy_rssi_select(dev, 1, 1); + break; + default: + b43_nphy_rssi_select(dev, 1, 1); + break; + } + + switch (state[3]) { + case 1: + b43_nphy_rssi_select(dev, 2, 2); + break; + case 4: + b43_nphy_rssi_select(dev, 2, 0); + break; + default: + b43_nphy_rssi_select(dev, 2, 1); + break; + } + + b43_nphy_rssi_select(dev, 0, type); + + b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs_save_phy[0]); + b43_radio_write16(dev, B2055_C1_PD_RXTX, regs_save_radio[0]); + b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs_save_phy[1]); + b43_radio_write16(dev, B2055_C2_PD_RXTX, regs_save_radio[1]); + + b43_nphy_classifier(dev, 7, class); + b43_nphy_write_clip_detection(dev, clip_state); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICalRev3 */ +static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev) +{ + /* TODO */ +} + +/* + * RSSI Calibration + * http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal + */ +static void b43_nphy_rssi_cal(struct b43_wldev *dev) +{ + if (dev->phy.rev >= 3) { + b43_nphy_rev3_rssi_cal(dev); + } else { + b43_nphy_rev2_rssi_cal(dev, 2); + b43_nphy_rev2_rssi_cal(dev, 0); + b43_nphy_rev2_rssi_cal(dev, 1); + } +} + +/* + * Restore RSSI Calibration + * http://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreRssiCal + */ +static void b43_nphy_restore_rssi_cal(struct b43_wldev *dev) +{ + struct b43_phy_n *nphy = dev->phy.n; + + u16 *rssical_radio_regs = NULL; + u16 *rssical_phy_regs = NULL; + + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (!nphy->rssical_chanspec_2G) + return; + rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G; + rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_2G; + } else { + if (!nphy->rssical_chanspec_5G) + return; + rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_5G; + rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_5G; + } + + /* TODO use some definitions */ + b43_radio_maskset(dev, 0x602B, 0xE3, rssical_radio_regs[0]); + b43_radio_maskset(dev, 0x702B, 0xE3, rssical_radio_regs[1]); + + b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Z, rssical_phy_regs[0]); + b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Z, rssical_phy_regs[1]); + b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Z, rssical_phy_regs[2]); + b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Z, rssical_phy_regs[3]); + + b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_X, rssical_phy_regs[4]); + b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_X, rssical_phy_regs[5]); + b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_X, rssical_phy_regs[6]); + b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_X, rssical_phy_regs[7]); + + b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Y, rssical_phy_regs[8]); + b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Y, rssical_phy_regs[9]); + b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Y, rssical_phy_regs[10]); + b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y, rssical_phy_regs[11]); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetIpaGainTbl */ +static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev) +{ + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (dev->phy.rev >= 6) { + /* TODO If the chip is 47162 + return txpwrctrl_tx_gain_ipa_rev5 */ + return txpwrctrl_tx_gain_ipa_rev6; + } else if (dev->phy.rev >= 5) { + return txpwrctrl_tx_gain_ipa_rev5; + } else { + return txpwrctrl_tx_gain_ipa; + } + } else { + return txpwrctrl_tx_gain_ipa_5g; + } +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalRadioSetup */ +static void b43_nphy_tx_cal_radio_setup(struct b43_wldev *dev) +{ + struct b43_phy_n *nphy = dev->phy.n; + u16 *save = nphy->tx_rx_cal_radio_saveregs; + u16 tmp; + u8 offset, i; + + if (dev->phy.rev >= 3) { + for (i = 0; i < 2; i++) { + tmp = (i == 0) ? 0x2000 : 0x3000; + offset = i * 11; + + save[offset + 0] = b43_radio_read16(dev, B2055_CAL_RVARCTL); + save[offset + 1] = b43_radio_read16(dev, B2055_CAL_LPOCTL); + save[offset + 2] = b43_radio_read16(dev, B2055_CAL_TS); + save[offset + 3] = b43_radio_read16(dev, B2055_CAL_RCCALRTS); + save[offset + 4] = b43_radio_read16(dev, B2055_CAL_RCALRTS); + save[offset + 5] = b43_radio_read16(dev, B2055_PADDRV); + save[offset + 6] = b43_radio_read16(dev, B2055_XOCTL1); + save[offset + 7] = b43_radio_read16(dev, B2055_XOCTL2); + save[offset + 8] = b43_radio_read16(dev, B2055_XOREGUL); + save[offset + 9] = b43_radio_read16(dev, B2055_XOMISC); + save[offset + 10] = b43_radio_read16(dev, B2055_PLL_LFC1); + + if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { + b43_radio_write16(dev, tmp | B2055_CAL_RVARCTL, 0x0A); + b43_radio_write16(dev, tmp | B2055_CAL_LPOCTL, 0x40); + b43_radio_write16(dev, tmp | B2055_CAL_TS, 0x55); + b43_radio_write16(dev, tmp | B2055_CAL_RCCALRTS, 0); + b43_radio_write16(dev, tmp | B2055_CAL_RCALRTS, 0); + if (nphy->ipa5g_on) { + b43_radio_write16(dev, tmp | B2055_PADDRV, 4); + b43_radio_write16(dev, tmp | B2055_XOCTL1, 1); + } else { + b43_radio_write16(dev, tmp | B2055_PADDRV, 0); + b43_radio_write16(dev, tmp | B2055_XOCTL1, 0x2F); + } + b43_radio_write16(dev, tmp | B2055_XOCTL2, 0); + } else { + b43_radio_write16(dev, tmp | B2055_CAL_RVARCTL, 0x06); + b43_radio_write16(dev, tmp | B2055_CAL_LPOCTL, 0x40); + b43_radio_write16(dev, tmp | B2055_CAL_TS, 0x55); + b43_radio_write16(dev, tmp | B2055_CAL_RCCALRTS, 0); + b43_radio_write16(dev, tmp | B2055_CAL_RCALRTS, 0); + b43_radio_write16(dev, tmp | B2055_XOCTL1, 0); + if (nphy->ipa2g_on) { + b43_radio_write16(dev, tmp | B2055_PADDRV, 6); + b43_radio_write16(dev, tmp | B2055_XOCTL2, + (dev->phy.rev < 5) ? 0x11 : 0x01); + } else { + b43_radio_write16(dev, tmp | B2055_PADDRV, 0); + b43_radio_write16(dev, tmp | B2055_XOCTL2, 0); + } + } + b43_radio_write16(dev, tmp | B2055_XOREGUL, 0); + b43_radio_write16(dev, tmp | B2055_XOMISC, 0); + b43_radio_write16(dev, tmp | B2055_PLL_LFC1, 0); + } + } else { + save[0] = b43_radio_read16(dev, B2055_C1_TX_RF_IQCAL1); + b43_radio_write16(dev, B2055_C1_TX_RF_IQCAL1, 0x29); + + save[1] = b43_radio_read16(dev, B2055_C1_TX_RF_IQCAL2); + b43_radio_write16(dev, B2055_C1_TX_RF_IQCAL2, 0x54); + + save[2] = b43_radio_read16(dev, B2055_C2_TX_RF_IQCAL1); + b43_radio_write16(dev, B2055_C2_TX_RF_IQCAL1, 0x29); + + save[3] = b43_radio_read16(dev, B2055_C2_TX_RF_IQCAL2); + b43_radio_write16(dev, B2055_C2_TX_RF_IQCAL2, 0x54); + + save[3] = b43_radio_read16(dev, B2055_C1_PWRDET_RXTX); + save[4] = b43_radio_read16(dev, B2055_C2_PWRDET_RXTX); + + if (!(b43_phy_read(dev, B43_NPHY_BANDCTL) & + B43_NPHY_BANDCTL_5GHZ)) { + b43_radio_write16(dev, B2055_C1_PWRDET_RXTX, 0x04); + b43_radio_write16(dev, B2055_C2_PWRDET_RXTX, 0x04); + } else { + b43_radio_write16(dev, B2055_C1_PWRDET_RXTX, 0x20); + b43_radio_write16(dev, B2055_C2_PWRDET_RXTX, 0x20); + } + + if (dev->phy.rev < 2) { + b43_radio_set(dev, B2055_C1_TX_BB_MXGM, 0x20); + b43_radio_set(dev, B2055_C2_TX_BB_MXGM, 0x20); + } else { + b43_radio_mask(dev, B2055_C1_TX_BB_MXGM, ~0x20); + b43_radio_mask(dev, B2055_C2_TX_BB_MXGM, ~0x20); + } + } +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IqCalGainParams */ +static void b43_nphy_iq_cal_gain_params(struct b43_wldev *dev, u16 core, + struct nphy_txgains target, + struct nphy_iqcal_params *params) +{ + int i, j, indx; + u16 gain; + + if (dev->phy.rev >= 3) { + params->txgm = target.txgm[core]; + params->pga = target.pga[core]; + params->pad = target.pad[core]; + params->ipa = target.ipa[core]; + params->cal_gain = (params->txgm << 12) | (params->pga << 8) | + (params->pad << 4) | (params->ipa); + for (j = 0; j < 5; j++) + params->ncorr[j] = 0x79; + } else { + gain = (target.pad[core]) | (target.pga[core] << 4) | + (target.txgm[core] << 8); + + indx = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ? + 1 : 0; + for (i = 0; i < 9; i++) + if (tbl_iqcal_gainparams[indx][i][0] == gain) + break; + i = min(i, 8); + + params->txgm = tbl_iqcal_gainparams[indx][i][1]; + params->pga = tbl_iqcal_gainparams[indx][i][2]; + params->pad = tbl_iqcal_gainparams[indx][i][3]; + params->cal_gain = (params->txgm << 7) | (params->pga << 4) | + (params->pad << 2); + for (j = 0; j < 4; j++) + params->ncorr[j] = tbl_iqcal_gainparams[indx][i][4 + j]; + } +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/UpdateTxCalLadder */ +static void b43_nphy_update_tx_cal_ladder(struct b43_wldev *dev, u16 core) +{ + struct b43_phy_n *nphy = dev->phy.n; + int i; + u16 scale, entry; + + u16 tmp = nphy->txcal_bbmult; + if (core == 0) + tmp >>= 8; + tmp &= 0xff; + + for (i = 0; i < 18; i++) { + scale = (ladder_lo[i].percent * tmp) / 100; + entry = ((scale & 0xFF) << 8) | ladder_lo[i].g_env; + b43_ntab_write(dev, B43_NTAB16(15, i), entry); + + scale = (ladder_iq[i].percent * tmp) / 100; + entry = ((scale & 0xFF) << 8) | ladder_iq[i].g_env; + b43_ntab_write(dev, B43_NTAB16(15, i + 32), entry); + } +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ExtPaSetTxDigiFilts */ +static void b43_nphy_ext_pa_set_tx_dig_filters(struct b43_wldev *dev) +{ + int i; + for (i = 0; i < 15; i++) + b43_phy_write(dev, B43_PHY_N(0x2C5 + i), + tbl_tx_filter_coef_rev4[2][i]); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IpaSetTxDigiFilts */ +static void b43_nphy_int_pa_set_tx_dig_filters(struct b43_wldev *dev) +{ + int i, j; + /* B43_NPHY_TXF_20CO_S0A1, B43_NPHY_TXF_40CO_S0A1, unknown */ + u16 offset[] = { 0x186, 0x195, 0x2C5 }; + + for (i = 0; i < 3; i++) + for (j = 0; j < 15; j++) + b43_phy_write(dev, B43_PHY_N(offset[i] + j), + tbl_tx_filter_coef_rev4[i][j]); + + if (dev->phy.is_40mhz) { + for (j = 0; j < 15; j++) + b43_phy_write(dev, B43_PHY_N(offset[0] + j), + tbl_tx_filter_coef_rev4[3][j]); + } else if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { + for (j = 0; j < 15; j++) + b43_phy_write(dev, B43_PHY_N(offset[0] + j), + tbl_tx_filter_coef_rev4[5][j]); + } + + if (dev->phy.channel == 14) + for (j = 0; j < 15; j++) + b43_phy_write(dev, B43_PHY_N(offset[0] + j), + tbl_tx_filter_coef_rev4[6][j]); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetTxGain */ +static struct nphy_txgains b43_nphy_get_tx_gains(struct b43_wldev *dev) +{ + struct b43_phy_n *nphy = dev->phy.n; + + u16 curr_gain[2]; + struct nphy_txgains target; + const u32 *table = NULL; + + if (nphy->txpwrctrl == 0) { + int i; + + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, true); + b43_ntab_read_bulk(dev, B43_NTAB16(7, 0x110), 2, curr_gain); + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, false); + + for (i = 0; i < 2; ++i) { + if (dev->phy.rev >= 3) { + target.ipa[i] = curr_gain[i] & 0x000F; + target.pad[i] = (curr_gain[i] & 0x00F0) >> 4; + target.pga[i] = (curr_gain[i] & 0x0F00) >> 8; + target.txgm[i] = (curr_gain[i] & 0x7000) >> 12; + } else { + target.ipa[i] = curr_gain[i] & 0x0003; + target.pad[i] = (curr_gain[i] & 0x000C) >> 2; + target.pga[i] = (curr_gain[i] & 0x0070) >> 4; + target.txgm[i] = (curr_gain[i] & 0x0380) >> 7; + } + } + } else { + int i; + u16 index[2]; + index[0] = (b43_phy_read(dev, B43_NPHY_C1_TXPCTL_STAT) & + B43_NPHY_TXPCTL_STAT_BIDX) >> + B43_NPHY_TXPCTL_STAT_BIDX_SHIFT; + index[1] = (b43_phy_read(dev, B43_NPHY_C2_TXPCTL_STAT) & + B43_NPHY_TXPCTL_STAT_BIDX) >> + B43_NPHY_TXPCTL_STAT_BIDX_SHIFT; + + for (i = 0; i < 2; ++i) { + if (dev->phy.rev >= 3) { + enum ieee80211_band band = + b43_current_band(dev->wl); + + if ((nphy->ipa2g_on && + band == IEEE80211_BAND_2GHZ) || + (nphy->ipa5g_on && + band == IEEE80211_BAND_5GHZ)) { + table = b43_nphy_get_ipa_gain_table(dev); + } else { + if (band == IEEE80211_BAND_5GHZ) { + if (dev->phy.rev == 3) + table = b43_ntab_tx_gain_rev3_5ghz; + else if (dev->phy.rev == 4) + table = b43_ntab_tx_gain_rev4_5ghz; + else + table = b43_ntab_tx_gain_rev5plus_5ghz; + } else { + table = b43_ntab_tx_gain_rev3plus_2ghz; + } + } + + target.ipa[i] = (table[index[i]] >> 16) & 0xF; + target.pad[i] = (table[index[i]] >> 20) & 0xF; + target.pga[i] = (table[index[i]] >> 24) & 0xF; + target.txgm[i] = (table[index[i]] >> 28) & 0xF; + } else { + table = b43_ntab_tx_gain_rev0_1_2; + + target.ipa[i] = (table[index[i]] >> 16) & 0x3; + target.pad[i] = (table[index[i]] >> 18) & 0x3; + target.pga[i] = (table[index[i]] >> 20) & 0x7; + target.txgm[i] = (table[index[i]] >> 23) & 0x7; + } + } + } + + return target; +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalPhyCleanup */ +static void b43_nphy_tx_cal_phy_cleanup(struct b43_wldev *dev) +{ + u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs; + + if (dev->phy.rev >= 3) { + b43_phy_write(dev, B43_NPHY_AFECTL_C1, regs[0]); + b43_phy_write(dev, B43_NPHY_AFECTL_C2, regs[1]); + b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, regs[2]); + b43_phy_write(dev, B43_NPHY_AFECTL_OVER, regs[3]); + b43_phy_write(dev, B43_NPHY_BBCFG, regs[4]); + b43_ntab_write(dev, B43_NTAB16(8, 3), regs[5]); + b43_ntab_write(dev, B43_NTAB16(8, 19), regs[6]); + b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs[7]); + b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs[8]); + b43_phy_write(dev, B43_NPHY_PAPD_EN0, regs[9]); + b43_phy_write(dev, B43_NPHY_PAPD_EN1, regs[10]); + b43_nphy_reset_cca(dev); + } else { + b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, regs[0]); + b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, regs[1]); + b43_phy_write(dev, B43_NPHY_AFECTL_OVER, regs[2]); + b43_ntab_write(dev, B43_NTAB16(8, 2), regs[3]); + b43_ntab_write(dev, B43_NTAB16(8, 18), regs[4]); + b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs[5]); + b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs[6]); + } +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalPhySetup */ +static void b43_nphy_tx_cal_phy_setup(struct b43_wldev *dev) +{ + u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs; + u16 tmp; + + regs[0] = b43_phy_read(dev, B43_NPHY_AFECTL_C1); + regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2); + if (dev->phy.rev >= 3) { + b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0xF0FF, 0x0A00); + b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0xF0FF, 0x0A00); + + tmp = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1); + regs[2] = tmp; + b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, tmp | 0x0600); + + tmp = b43_phy_read(dev, B43_NPHY_AFECTL_OVER); + regs[3] = tmp; + b43_phy_write(dev, B43_NPHY_AFECTL_OVER, tmp | 0x0600); + + regs[4] = b43_phy_read(dev, B43_NPHY_BBCFG); + b43_phy_mask(dev, B43_NPHY_BBCFG, (u16)~B43_NPHY_BBCFG_RSTRX); + + tmp = b43_ntab_read(dev, B43_NTAB16(8, 3)); + regs[5] = tmp; + b43_ntab_write(dev, B43_NTAB16(8, 3), 0); + + tmp = b43_ntab_read(dev, B43_NTAB16(8, 19)); + regs[6] = tmp; + b43_ntab_write(dev, B43_NTAB16(8, 19), 0); + regs[7] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1); + regs[8] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2); + + b43_nphy_rf_control_intc_override(dev, 2, 1, 3); + b43_nphy_rf_control_intc_override(dev, 1, 2, 1); + b43_nphy_rf_control_intc_override(dev, 1, 8, 2); + + regs[9] = b43_phy_read(dev, B43_NPHY_PAPD_EN0); + regs[10] = b43_phy_read(dev, B43_NPHY_PAPD_EN1); + b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x0001); + b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x0001); + } else { + b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, 0xA000); + b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, 0xA000); + tmp = b43_phy_read(dev, B43_NPHY_AFECTL_OVER); + regs[2] = tmp; + b43_phy_write(dev, B43_NPHY_AFECTL_OVER, tmp | 0x3000); + tmp = b43_ntab_read(dev, B43_NTAB16(8, 2)); + regs[3] = tmp; + tmp |= 0x2000; + b43_ntab_write(dev, B43_NTAB16(8, 2), tmp); + tmp = b43_ntab_read(dev, B43_NTAB16(8, 18)); + regs[4] = tmp; + tmp |= 0x2000; + b43_ntab_write(dev, B43_NTAB16(8, 18), tmp); + regs[5] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1); + regs[6] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2); + if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) + tmp = 0x0180; + else + tmp = 0x0120; + b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, tmp); + b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, tmp); + } +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SaveCal */ +static void b43_nphy_save_cal(struct b43_wldev *dev) +{ + struct b43_phy_n *nphy = dev->phy.n; + + struct b43_phy_n_iq_comp *rxcal_coeffs = NULL; + u16 *txcal_radio_regs = NULL; + u8 *iqcal_chanspec; + u16 *table = NULL; + + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, 1); + + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_2G; + txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_2G; + iqcal_chanspec = &nphy->iqcal_chanspec_2G; + table = nphy->cal_cache.txcal_coeffs_2G; + } else { + rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_5G; + txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_5G; + iqcal_chanspec = &nphy->iqcal_chanspec_5G; + table = nphy->cal_cache.txcal_coeffs_5G; + } + + b43_nphy_rx_iq_coeffs(dev, false, rxcal_coeffs); + /* TODO use some definitions */ + if (dev->phy.rev >= 3) { + txcal_radio_regs[0] = b43_radio_read(dev, 0x2021); + txcal_radio_regs[1] = b43_radio_read(dev, 0x2022); + txcal_radio_regs[2] = b43_radio_read(dev, 0x3021); + txcal_radio_regs[3] = b43_radio_read(dev, 0x3022); + txcal_radio_regs[4] = b43_radio_read(dev, 0x2023); + txcal_radio_regs[5] = b43_radio_read(dev, 0x2024); + txcal_radio_regs[6] = b43_radio_read(dev, 0x3023); + txcal_radio_regs[7] = b43_radio_read(dev, 0x3024); + } else { + txcal_radio_regs[0] = b43_radio_read(dev, 0x8B); + txcal_radio_regs[1] = b43_radio_read(dev, 0xBA); + txcal_radio_regs[2] = b43_radio_read(dev, 0x8D); + txcal_radio_regs[3] = b43_radio_read(dev, 0xBC); + } + *iqcal_chanspec = nphy->radio_chanspec; + b43_ntab_write_bulk(dev, B43_NTAB16(15, 80), 8, table); + + if (nphy->hang_avoid) + b43_nphy_stay_in_carrier_search(dev, 0); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreCal */ +static void b43_nphy_restore_cal(struct b43_wldev *dev) +{ + struct b43_phy_n *nphy = dev->phy.n; + + u16 coef[4]; + u16 *loft = NULL; + u16 *table = NULL; + + int i; + u16 *txcal_radio_regs = NULL; + struct b43_phy_n_iq_comp *rxcal_coeffs = NULL; + + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (nphy->iqcal_chanspec_2G == 0) + return; + table = nphy->cal_cache.txcal_coeffs_2G; + loft = &nphy->cal_cache.txcal_coeffs_2G[5]; + } else { + if (nphy->iqcal_chanspec_5G == 0) + return; + table = nphy->cal_cache.txcal_coeffs_5G; + loft = &nphy->cal_cache.txcal_coeffs_5G[5]; + } + + b43_ntab_write_bulk(dev, B43_NTAB16(15, 80), 4, table); + + for (i = 0; i < 4; i++) { + if (dev->phy.rev >= 3) + table[i] = coef[i]; + else + coef[i] = 0; + } + + b43_ntab_write_bulk(dev, B43_NTAB16(15, 88), 4, coef); + b43_ntab_write_bulk(dev, B43_NTAB16(15, 85), 2, loft); + b43_ntab_write_bulk(dev, B43_NTAB16(15, 93), 2, loft); + + if (dev->phy.rev < 2) + b43_nphy_tx_iq_workaround(dev); + + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_2G; + rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_2G; + } else { + txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_5G; + rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_5G; + } + + /* TODO use some definitions */ + if (dev->phy.rev >= 3) { + b43_radio_write(dev, 0x2021, txcal_radio_regs[0]); + b43_radio_write(dev, 0x2022, txcal_radio_regs[1]); + b43_radio_write(dev, 0x3021, txcal_radio_regs[2]); + b43_radio_write(dev, 0x3022, txcal_radio_regs[3]); + b43_radio_write(dev, 0x2023, txcal_radio_regs[4]); + b43_radio_write(dev, 0x2024, txcal_radio_regs[5]); + b43_radio_write(dev, 0x3023, txcal_radio_regs[6]); + b43_radio_write(dev, 0x3024, txcal_radio_regs[7]); + } else { + b43_radio_write(dev, 0x8B, txcal_radio_regs[0]); + b43_radio_write(dev, 0xBA, txcal_radio_regs[1]); + b43_radio_write(dev, 0x8D, txcal_radio_regs[2]); + b43_radio_write(dev, 0xBC, txcal_radio_regs[3]); + } + b43_nphy_rx_iq_coeffs(dev, true, rxcal_coeffs); +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalTxIqlo */ +static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev, + struct nphy_txgains target, + bool full, bool mphase) +{ + struct b43_phy_n *nphy = dev->phy.n; + int i; + int error = 0; + int freq; + bool avoid = false; + u8 length; + u16 tmp, core, type, count, max, numb, last, cmd; + const u16 *table; + bool phy6or5x; + + u16 buffer[11]; + u16 diq_start = 0; + u16 save[2]; + u16 gain[2]; + struct nphy_iqcal_params params[2]; + bool updated[2] = { }; + + b43_nphy_stay_in_carrier_search(dev, true); + + if (dev->phy.rev >= 4) { + avoid = nphy->hang_avoid; + nphy->hang_avoid = 0; + } + + b43_ntab_read_bulk(dev, B43_NTAB16(7, 0x110), 2, save); + + for (i = 0; i < 2; i++) { + b43_nphy_iq_cal_gain_params(dev, i, target, ¶ms[i]); + gain[i] = params[i].cal_gain; + } + + b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x110), 2, gain); + + b43_nphy_tx_cal_radio_setup(dev); + b43_nphy_tx_cal_phy_setup(dev); + + phy6or5x = dev->phy.rev >= 6 || + (dev->phy.rev == 5 && nphy->ipa2g_on && + b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ); + if (phy6or5x) { + if (dev->phy.is_40mhz) { + b43_ntab_write_bulk(dev, B43_NTAB16(15, 0), 18, + tbl_tx_iqlo_cal_loft_ladder_40); + b43_ntab_write_bulk(dev, B43_NTAB16(15, 32), 18, + tbl_tx_iqlo_cal_iqimb_ladder_40); + } else { + b43_ntab_write_bulk(dev, B43_NTAB16(15, 0), 18, + tbl_tx_iqlo_cal_loft_ladder_20); + b43_ntab_write_bulk(dev, B43_NTAB16(15, 32), 18, + tbl_tx_iqlo_cal_iqimb_ladder_20); + } + } + + b43_phy_write(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x8AA9); + + if (!dev->phy.is_40mhz) + freq = 2500; + else + freq = 5000; + + if (nphy->mphase_cal_phase_id > 2) + b43_nphy_run_samples(dev, (dev->phy.is_40mhz ? 40 : 20) * 8, + 0xFFFF, 0, true, false); + else + error = b43_nphy_tx_tone(dev, freq, 250, true, false); + + if (error == 0) { + if (nphy->mphase_cal_phase_id > 2) { + table = nphy->mphase_txcal_bestcoeffs; + length = 11; + if (dev->phy.rev < 3) + length -= 2; + } else { + if (!full && nphy->txiqlocal_coeffsvalid) { + table = nphy->txiqlocal_bestc; + length = 11; + if (dev->phy.rev < 3) + length -= 2; + } else { + full = true; + if (dev->phy.rev >= 3) { + table = tbl_tx_iqlo_cal_startcoefs_nphyrev3; + length = B43_NTAB_TX_IQLO_CAL_STARTCOEFS_REV3; + } else { + table = tbl_tx_iqlo_cal_startcoefs; + length = B43_NTAB_TX_IQLO_CAL_STARTCOEFS; + } + } + } + + b43_ntab_write_bulk(dev, B43_NTAB16(15, 64), length, table); + + if (full) { + if (dev->phy.rev >= 3) + max = B43_NTAB_TX_IQLO_CAL_CMDS_FULLCAL_REV3; + else + max = B43_NTAB_TX_IQLO_CAL_CMDS_FULLCAL; + } else { + if (dev->phy.rev >= 3) + max = B43_NTAB_TX_IQLO_CAL_CMDS_RECAL_REV3; + else + max = B43_NTAB_TX_IQLO_CAL_CMDS_RECAL; + } + + if (mphase) { + count = nphy->mphase_txcal_cmdidx; + numb = min(max, + (u16)(count + nphy->mphase_txcal_numcmds)); + } else { + count = 0; + numb = max; + } + + for (; count < numb; count++) { + if (full) { + if (dev->phy.rev >= 3) + cmd = tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[count]; + else + cmd = tbl_tx_iqlo_cal_cmds_fullcal[count]; + } else { + if (dev->phy.rev >= 3) + cmd = tbl_tx_iqlo_cal_cmds_recal_nphyrev3[count]; + else + cmd = tbl_tx_iqlo_cal_cmds_recal[count]; + } + + core = (cmd & 0x3000) >> 12; + type = (cmd & 0x0F00) >> 8; + + if (phy6or5x && updated[core] == 0) { + b43_nphy_update_tx_cal_ladder(dev, core); + updated[core] = 1; + } + + tmp = (params[core].ncorr[type] << 8) | 0x66; + b43_phy_write(dev, B43_NPHY_IQLOCAL_CMDNNUM, tmp); + + if (type == 1 || type == 3 || type == 4) { + buffer[0] = b43_ntab_read(dev, + B43_NTAB16(15, 69 + core)); + diq_start = buffer[0]; + buffer[0] = 0; + b43_ntab_write(dev, B43_NTAB16(15, 69 + core), + 0); + } + + b43_phy_write(dev, B43_NPHY_IQLOCAL_CMD, cmd); + for (i = 0; i < 2000; i++) { + tmp = b43_phy_read(dev, B43_NPHY_IQLOCAL_CMD); + if (tmp & 0xC000) + break; + udelay(10); + } + + b43_ntab_read_bulk(dev, B43_NTAB16(15, 96), length, + buffer); + b43_ntab_write_bulk(dev, B43_NTAB16(15, 64), length, + buffer); + + if (type == 1 || type == 3 || type == 4) + buffer[0] = diq_start; + } + + if (mphase) + nphy->mphase_txcal_cmdidx = (numb >= max) ? 0 : numb; + + last = (dev->phy.rev < 3) ? 6 : 7; + + if (!mphase || nphy->mphase_cal_phase_id == last) { + b43_ntab_write_bulk(dev, B43_NTAB16(15, 96), 4, buffer); + b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 4, buffer); + if (dev->phy.rev < 3) { + buffer[0] = 0; + buffer[1] = 0; + buffer[2] = 0; + buffer[3] = 0; + } + b43_ntab_write_bulk(dev, B43_NTAB16(15, 88), 4, + buffer); + b43_ntab_write_bulk(dev, B43_NTAB16(15, 101), 2, + buffer); + b43_ntab_write_bulk(dev, B43_NTAB16(15, 85), 2, + buffer); + b43_ntab_write_bulk(dev, B43_NTAB16(15, 93), 2, + buffer); + length = 11; + if (dev->phy.rev < 3) + length -= 2; + b43_ntab_read_bulk(dev, B43_NTAB16(15, 96), length, + nphy->txiqlocal_bestc); + nphy->txiqlocal_coeffsvalid = true; + /* TODO: Set nphy->txiqlocal_chanspec to + the current channel */ + } else { + length = 11; + if (dev->phy.rev < 3) + length -= 2; + b43_ntab_read_bulk(dev, B43_NTAB16(15, 96), length, + nphy->mphase_txcal_bestcoeffs); + } + + b43_nphy_stop_playback(dev); + b43_phy_write(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0); + } + + b43_nphy_tx_cal_phy_cleanup(dev); + b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x110), 2, save); + + if (dev->phy.rev < 2 && (!mphase || nphy->mphase_cal_phase_id == last)) + b43_nphy_tx_iq_workaround(dev); + + if (dev->phy.rev >= 4) + nphy->hang_avoid = avoid; + + b43_nphy_stay_in_carrier_search(dev, false); + + return error; +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ReapplyTxCalCoeffs */ +static void b43_nphy_reapply_tx_cal_coeffs(struct b43_wldev *dev) +{ + struct b43_phy_n *nphy = dev->phy.n; + u8 i; + u16 buffer[7]; + bool equal = true; + + if (!nphy->txiqlocal_coeffsvalid || 1 /* FIXME */) + return; + + b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer); + for (i = 0; i < 4; i++) { + if (buffer[i] != nphy->txiqlocal_bestc[i]) { + equal = false; + break; + } + } + + if (!equal) { + b43_ntab_write_bulk(dev, B43_NTAB16(15, 80), 4, + nphy->txiqlocal_bestc); + for (i = 0; i < 4; i++) + buffer[i] = 0; + b43_ntab_write_bulk(dev, B43_NTAB16(15, 88), 4, + buffer); + b43_ntab_write_bulk(dev, B43_NTAB16(15, 85), 2, + &nphy->txiqlocal_bestc[5]); + b43_ntab_write_bulk(dev, B43_NTAB16(15, 93), 2, + &nphy->txiqlocal_bestc[5]); + } +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalRxIqRev2 */ +static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev, + struct nphy_txgains target, u8 type, bool debug) +{ + struct b43_phy_n *nphy = dev->phy.n; + int i, j, index; + u8 rfctl[2]; + u8 afectl_core; + u16 tmp[6]; + u16 cur_hpf1, cur_hpf2, cur_lna; + u32 real, imag; + enum ieee80211_band band; + + u8 use; + u16 cur_hpf; + u16 lna[3] = { 3, 3, 1 }; + u16 hpf1[3] = { 7, 2, 0 }; + u16 hpf2[3] = { 2, 0, 0 }; + u32 power[3] = { }; + u16 gain_save[2]; + u16 cal_gain[2]; + struct nphy_iqcal_params cal_params[2]; + struct nphy_iq_est est; + int ret = 0; + bool playtone = true; + int desired = 13; + + b43_nphy_stay_in_carrier_search(dev, 1); + + if (dev->phy.rev < 2) + b43_nphy_reapply_tx_cal_coeffs(dev); + b43_ntab_read_bulk(dev, B43_NTAB16(7, 0x110), 2, gain_save); + for (i = 0; i < 2; i++) { + b43_nphy_iq_cal_gain_params(dev, i, target, &cal_params[i]); + cal_gain[i] = cal_params[i].cal_gain; + } + b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x110), 2, cal_gain); + + for (i = 0; i < 2; i++) { + if (i == 0) { + rfctl[0] = B43_NPHY_RFCTL_INTC1; + rfctl[1] = B43_NPHY_RFCTL_INTC2; + afectl_core = B43_NPHY_AFECTL_C1; + } else { + rfctl[0] = B43_NPHY_RFCTL_INTC2; + rfctl[1] = B43_NPHY_RFCTL_INTC1; + afectl_core = B43_NPHY_AFECTL_C2; + } + + tmp[1] = b43_phy_read(dev, B43_NPHY_RFSEQCA); + tmp[2] = b43_phy_read(dev, afectl_core); + tmp[3] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER); + tmp[4] = b43_phy_read(dev, rfctl[0]); + tmp[5] = b43_phy_read(dev, rfctl[1]); + + b43_phy_maskset(dev, B43_NPHY_RFSEQCA, + (u16)~B43_NPHY_RFSEQCA_RXDIS, + ((1 - i) << B43_NPHY_RFSEQCA_RXDIS_SHIFT)); + b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXEN, + (1 - i)); + b43_phy_set(dev, afectl_core, 0x0006); + b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0006); + + band = b43_current_band(dev->wl); + + if (nphy->rxcalparams & 0xFF000000) { + if (band == IEEE80211_BAND_5GHZ) + b43_phy_write(dev, rfctl[0], 0x140); + else + b43_phy_write(dev, rfctl[0], 0x110); + } else { + if (band == IEEE80211_BAND_5GHZ) + b43_phy_write(dev, rfctl[0], 0x180); + else + b43_phy_write(dev, rfctl[0], 0x120); + } + + if (band == IEEE80211_BAND_5GHZ) + b43_phy_write(dev, rfctl[1], 0x148); + else + b43_phy_write(dev, rfctl[1], 0x114); + + if (nphy->rxcalparams & 0x10000) { + b43_radio_maskset(dev, B2055_C1_GENSPARE2, 0xFC, + (i + 1)); + b43_radio_maskset(dev, B2055_C2_GENSPARE2, 0xFC, + (2 - i)); + } + + for (j = 0; i < 4; j++) { + if (j < 3) { + cur_lna = lna[j]; + cur_hpf1 = hpf1[j]; + cur_hpf2 = hpf2[j]; + } else { + if (power[1] > 10000) { + use = 1; + cur_hpf = cur_hpf1; + index = 2; + } else { + if (power[0] > 10000) { + use = 1; + cur_hpf = cur_hpf1; + index = 1; + } else { + index = 0; + use = 2; + cur_hpf = cur_hpf2; + } + } + cur_lna = lna[index]; + cur_hpf1 = hpf1[index]; + cur_hpf2 = hpf2[index]; + cur_hpf += desired - hweight32(power[index]); + cur_hpf = clamp_val(cur_hpf, 0, 10); + if (use == 1) + cur_hpf1 = cur_hpf; + else + cur_hpf2 = cur_hpf; + } + + tmp[0] = ((cur_hpf2 << 8) | (cur_hpf1 << 4) | + (cur_lna << 2)); + b43_nphy_rf_control_override(dev, 0x400, tmp[0], 3, + false); + b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX); + b43_nphy_stop_playback(dev); + + if (playtone) { + ret = b43_nphy_tx_tone(dev, 4000, + (nphy->rxcalparams & 0xFFFF), + false, false); + playtone = false; + } else { + b43_nphy_run_samples(dev, 160, 0xFFFF, 0, + false, false); + } + + if (ret == 0) { + if (j < 3) { + b43_nphy_rx_iq_est(dev, &est, 1024, 32, + false); + if (i == 0) { + real = est.i0_pwr; + imag = est.q0_pwr; + } else { + real = est.i1_pwr; + imag = est.q1_pwr; + } + power[i] = ((real + imag) / 1024) + 1; + } else { + b43_nphy_calc_rx_iq_comp(dev, 1 << i); + } + b43_nphy_stop_playback(dev); + } + + if (ret != 0) + break; + } + + b43_radio_mask(dev, B2055_C1_GENSPARE2, 0xFC); + b43_radio_mask(dev, B2055_C2_GENSPARE2, 0xFC); + b43_phy_write(dev, rfctl[1], tmp[5]); + b43_phy_write(dev, rfctl[0], tmp[4]); + b43_phy_write(dev, B43_NPHY_AFECTL_OVER, tmp[3]); + b43_phy_write(dev, afectl_core, tmp[2]); + b43_phy_write(dev, B43_NPHY_RFSEQCA, tmp[1]); + + if (ret != 0) + break; + } + + b43_nphy_rf_control_override(dev, 0x400, 0, 3, true); + b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX); + b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x110), 2, gain_save); + + b43_nphy_stay_in_carrier_search(dev, 0); + + return ret; +} + +static int b43_nphy_rev3_cal_rx_iq(struct b43_wldev *dev, + struct nphy_txgains target, u8 type, bool debug) +{ + return -1; +} + +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalRxIq */ +static int b43_nphy_cal_rx_iq(struct b43_wldev *dev, + struct nphy_txgains target, u8 type, bool debug) +{ + if (dev->phy.rev >= 3) + return b43_nphy_rev3_cal_rx_iq(dev, target, type, debug); + else + return b43_nphy_rev2_cal_rx_iq(dev, target, type, debug); +} + +/* + * Init N-PHY + * http://bcm-v4.sipsolutions.net/802.11/PHY/Init/N + */ +int b43_phy_initn(struct b43_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + struct b43_phy *phy = &dev->phy; + struct b43_phy_n *nphy = phy->n; + u8 tx_pwr_state; + struct nphy_txgains target; + u16 tmp; + enum ieee80211_band tmp2; + bool do_rssi_cal; + + u16 clip[2]; + bool do_cal = false; + + if ((dev->phy.rev >= 3) && + (bus->sprom.boardflags_lo & B43_BFL_EXTLNA) && + (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)) { + chipco_set32(&dev->dev->bus->chipco, SSB_CHIPCO_CHIPCTL, 0x40); + } + nphy->deaf_count = 0; + b43_nphy_tables_init(dev); + nphy->crsminpwr_adjusted = false; + nphy->noisevars_adjusted = false; + + /* Clear all overrides */ + if (dev->phy.rev >= 3) { + b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S1, 0); + b43_phy_write(dev, B43_NPHY_RFCTL_OVER, 0); + b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S0, 0); + b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S1, 0); + } else { + b43_phy_write(dev, B43_NPHY_RFCTL_OVER, 0); + } + b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, 0); + b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, 0); + if (dev->phy.rev < 6) { + b43_phy_write(dev, B43_NPHY_RFCTL_INTC3, 0); + b43_phy_write(dev, B43_NPHY_RFCTL_INTC4, 0); + } + b43_phy_mask(dev, B43_NPHY_RFSEQMODE, + ~(B43_NPHY_RFSEQMODE_CAOVER | + B43_NPHY_RFSEQMODE_TROVER)); + if (dev->phy.rev >= 3) + b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, 0); + b43_phy_write(dev, B43_NPHY_AFECTL_OVER, 0); + + if (dev->phy.rev <= 2) { + tmp = (dev->phy.rev == 2) ? 0x3B : 0x40; + b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, + ~B43_NPHY_BPHY_CTL3_SCALE, + tmp << B43_NPHY_BPHY_CTL3_SCALE_SHIFT); + } + b43_phy_write(dev, B43_NPHY_AFESEQ_TX2RX_PUD_20M, 0x20); + b43_phy_write(dev, B43_NPHY_AFESEQ_TX2RX_PUD_40M, 0x20); + + if (bus->sprom.boardflags2_lo & 0x100 || + (bus->boardinfo.vendor == PCI_VENDOR_ID_APPLE && + bus->boardinfo.type == 0x8B)) + b43_phy_write(dev, B43_NPHY_TXREALFD, 0xA0); + else + b43_phy_write(dev, B43_NPHY_TXREALFD, 0xB8); + b43_phy_write(dev, B43_NPHY_MIMO_CRSTXEXT, 0xC8); + b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x50); + b43_phy_write(dev, B43_NPHY_TXRIFS_FRDEL, 0x30); + + b43_nphy_update_mimo_config(dev, nphy->preamble_override); + b43_nphy_update_txrx_chain(dev); + + if (phy->rev < 2) { + b43_phy_write(dev, B43_NPHY_DUP40_GFBL, 0xAA8); + b43_phy_write(dev, B43_NPHY_DUP40_BL, 0x9A4); + } + + tmp2 = b43_current_band(dev->wl); + if ((nphy->ipa2g_on && tmp2 == IEEE80211_BAND_2GHZ) || + (nphy->ipa5g_on && tmp2 == IEEE80211_BAND_5GHZ)) { + b43_phy_set(dev, B43_NPHY_PAPD_EN0, 0x1); + b43_phy_maskset(dev, B43_NPHY_EPS_TABLE_ADJ0, 0x007F, + nphy->papd_epsilon_offset[0] << 7); + b43_phy_set(dev, B43_NPHY_PAPD_EN1, 0x1); + b43_phy_maskset(dev, B43_NPHY_EPS_TABLE_ADJ1, 0x007F, + nphy->papd_epsilon_offset[1] << 7); + b43_nphy_int_pa_set_tx_dig_filters(dev); + } else if (phy->rev >= 5) { + b43_nphy_ext_pa_set_tx_dig_filters(dev); + } + + b43_nphy_workarounds(dev); + + /* Reset CCA, in init code it differs a little from standard way */ + b43_nphy_bmac_clock_fgc(dev, 1); + tmp = b43_phy_read(dev, B43_NPHY_BBCFG); + b43_phy_write(dev, B43_NPHY_BBCFG, tmp | B43_NPHY_BBCFG_RSTCCA); + b43_phy_write(dev, B43_NPHY_BBCFG, tmp & ~B43_NPHY_BBCFG_RSTCCA); + b43_nphy_bmac_clock_fgc(dev, 0); + + /* TODO N PHY MAC PHY Clock Set with argument 1 */ + + b43_nphy_pa_override(dev, false); + b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX); + b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX); + b43_nphy_pa_override(dev, true); + + b43_nphy_classifier(dev, 0, 0); + b43_nphy_read_clip_detection(dev, clip); + tx_pwr_state = nphy->txpwrctrl; + /* TODO N PHY TX power control with argument 0 + (turning off power control) */ + /* TODO Fix the TX Power Settings */ + /* TODO N PHY TX Power Control Idle TSSI */ + /* TODO N PHY TX Power Control Setup */ + + if (phy->rev >= 3) { + /* TODO */ + } else { + b43_ntab_write_bulk(dev, B43_NTAB32(26, 192), 128, + b43_ntab_tx_gain_rev0_1_2); + b43_ntab_write_bulk(dev, B43_NTAB32(27, 192), 128, + b43_ntab_tx_gain_rev0_1_2); + } + + if (nphy->phyrxchain != 3) + ;/* TODO N PHY RX Core Set State with phyrxchain as argument */ + if (nphy->mphase_cal_phase_id > 0) + ;/* TODO PHY Periodic Calibration Multi-Phase Restart */ + + do_rssi_cal = false; + if (phy->rev >= 3) { + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + do_rssi_cal = (nphy->rssical_chanspec_2G == 0); + else + do_rssi_cal = (nphy->rssical_chanspec_5G == 0); + + if (do_rssi_cal) + b43_nphy_rssi_cal(dev); + else + b43_nphy_restore_rssi_cal(dev); + } else { + b43_nphy_rssi_cal(dev); + } + + if (!((nphy->measure_hold & 0x6) != 0)) { + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + do_cal = (nphy->iqcal_chanspec_2G == 0); + else + do_cal = (nphy->iqcal_chanspec_5G == 0); + + if (nphy->mute) + do_cal = false; + + if (do_cal) { + target = b43_nphy_get_tx_gains(dev); + + if (nphy->antsel_type == 2) + ;/*TODO NPHY Superswitch Init with argument 1*/ + if (nphy->perical != 2) { + b43_nphy_rssi_cal(dev); + if (phy->rev >= 3) { + nphy->cal_orig_pwr_idx[0] = + nphy->txpwrindex[0].index_internal; + nphy->cal_orig_pwr_idx[1] = + nphy->txpwrindex[1].index_internal; + /* TODO N PHY Pre Calibrate TX Gain */ + target = b43_nphy_get_tx_gains(dev); + } + } + } + } + + if (!b43_nphy_cal_tx_iq_lo(dev, target, true, false)) { + if (b43_nphy_cal_rx_iq(dev, target, 2, 0) == 0) + b43_nphy_save_cal(dev); + else if (nphy->mphase_cal_phase_id == 0) + ;/* N PHY Periodic Calibration with argument 3 */ + } else { + b43_nphy_restore_cal(dev); + } + + b43_nphy_tx_pwr_ctrl_coef_setup(dev); + /* TODO N PHY TX Power Control Enable with argument tx_pwr_state */ + b43_phy_write(dev, B43_NPHY_TXMACIF_HOLDOFF, 0x0015); + b43_phy_write(dev, B43_NPHY_TXMACDELAY, 0x0320); + if (phy->rev >= 3 && phy->rev <= 6) + b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x0014); + b43_nphy_tx_lp_fbw(dev); + if (phy->rev >= 3) + b43_nphy_spur_workaround(dev); + + b43err(dev->wl, "IEEE 802.11n devices are not supported, yet.\n"); + return 0; +} + +static int b43_nphy_op_allocate(struct b43_wldev *dev) +{ + struct b43_phy_n *nphy; + + nphy = kzalloc(sizeof(*nphy), GFP_KERNEL); + if (!nphy) + return -ENOMEM; + dev->phy.n = nphy; + + return 0; +} + +static void b43_nphy_op_prepare_structs(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_n *nphy = phy->n; + + memset(nphy, 0, sizeof(*nphy)); + + //TODO init struct b43_phy_n +} + +static void b43_nphy_op_free(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_n *nphy = phy->n; + + kfree(nphy); + phy->n = NULL; +} + +static int b43_nphy_op_init(struct b43_wldev *dev) +{ + return b43_phy_initn(dev); +} + +static inline void check_phyreg(struct b43_wldev *dev, u16 offset) +{ +#if B43_DEBUG + if ((offset & B43_PHYROUTE) == B43_PHYROUTE_OFDM_GPHY) { + /* OFDM registers are onnly available on A/G-PHYs */ + b43err(dev->wl, "Invalid OFDM PHY access at " + "0x%04X on N-PHY\n", offset); + dump_stack(); + } + if ((offset & B43_PHYROUTE) == B43_PHYROUTE_EXT_GPHY) { + /* Ext-G registers are only available on G-PHYs */ + b43err(dev->wl, "Invalid EXT-G PHY access at " + "0x%04X on N-PHY\n", offset); + dump_stack(); + } +#endif /* B43_DEBUG */ +} + +static u16 b43_nphy_op_read(struct b43_wldev *dev, u16 reg) +{ + check_phyreg(dev, reg); + b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); + return b43_read16(dev, B43_MMIO_PHY_DATA); +} + +static void b43_nphy_op_write(struct b43_wldev *dev, u16 reg, u16 value) +{ + check_phyreg(dev, reg); + b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); + b43_write16(dev, B43_MMIO_PHY_DATA, value); +} + +static u16 b43_nphy_op_radio_read(struct b43_wldev *dev, u16 reg) +{ + /* Register 1 is a 32-bit register. */ + B43_WARN_ON(reg == 1); + /* N-PHY needs 0x100 for read access */ + reg |= 0x100; + + b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg); + return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW); +} + +static void b43_nphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value) +{ + /* Register 1 is a 32-bit register. */ + B43_WARN_ON(reg == 1); + + b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg); + b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value); +} + +static void b43_nphy_op_software_rfkill(struct b43_wldev *dev, + bool blocked) +{//TODO +} + +static void b43_nphy_op_switch_analog(struct b43_wldev *dev, bool on) +{ + b43_phy_write(dev, B43_NPHY_AFECTL_OVER, + on ? 0 : 0x7FFF); +} + +static int b43_nphy_op_switch_channel(struct b43_wldev *dev, + unsigned int new_channel) +{ + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if ((new_channel < 1) || (new_channel > 14)) + return -EINVAL; + } else { + if (new_channel > 200) + return -EINVAL; + } + + return nphy_channel_switch(dev, new_channel); +} + +static unsigned int b43_nphy_op_get_default_chan(struct b43_wldev *dev) +{ + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + return 1; + return 36; +} + +const struct b43_phy_operations b43_phyops_n = { + .allocate = b43_nphy_op_allocate, + .free = b43_nphy_op_free, + .prepare_structs = b43_nphy_op_prepare_structs, + .init = b43_nphy_op_init, + .phy_read = b43_nphy_op_read, + .phy_write = b43_nphy_op_write, + .radio_read = b43_nphy_op_radio_read, + .radio_write = b43_nphy_op_radio_write, + .software_rfkill = b43_nphy_op_software_rfkill, + .switch_analog = b43_nphy_op_switch_analog, + .switch_channel = b43_nphy_op_switch_channel, + .get_default_chan = b43_nphy_op_get_default_chan, + .recalc_txpower = b43_nphy_op_recalc_txpower, + .adjust_txpower = b43_nphy_op_adjust_txpower, +}; diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h new file mode 100644 index 0000000..403aad3 --- /dev/null +++ b/drivers/net/wireless/b43/phy_n.h @@ -0,0 +1,1026 @@ +#ifndef B43_NPHY_H_ +#define B43_NPHY_H_ + +#include "phy_common.h" + + +/* N-PHY registers. */ + +#define B43_NPHY_BBCFG B43_PHY_N(0x001) /* BB config */ +#define B43_NPHY_BBCFG_RSTCCA 0x4000 /* Reset CCA */ +#define B43_NPHY_BBCFG_RSTRX 0x8000 /* Reset RX */ +#define B43_NPHY_CHANNEL B43_PHY_N(0x005) /* Channel */ +#define B43_NPHY_TXERR B43_PHY_N(0x007) /* TX error */ +#define B43_NPHY_BANDCTL B43_PHY_N(0x009) /* Band control */ +#define B43_NPHY_BANDCTL_5GHZ 0x0001 /* Use the 5GHz band */ +#define B43_NPHY_4WI_ADDR B43_PHY_N(0x00B) /* Four-wire bus address */ +#define B43_NPHY_4WI_DATAHI B43_PHY_N(0x00C) /* Four-wire bus data high */ +#define B43_NPHY_4WI_DATALO B43_PHY_N(0x00D) /* Four-wire bus data low */ +#define B43_NPHY_BIST_STAT0 B43_PHY_N(0x00E) /* Built-in self test status 0 */ +#define B43_NPHY_BIST_STAT1 B43_PHY_N(0x00F) /* Built-in self test status 1 */ + +#define B43_NPHY_C1_DESPWR B43_PHY_N(0x018) /* Core 1 desired power */ +#define B43_NPHY_C1_CCK_DESPWR B43_PHY_N(0x019) /* Core 1 CCK desired power */ +#define B43_NPHY_C1_BCLIPBKOFF B43_PHY_N(0x01A) /* Core 1 barely clip backoff */ +#define B43_NPHY_C1_CCK_BCLIPBKOFF B43_PHY_N(0x01B) /* Core 1 CCK barely clip backoff */ +#define B43_NPHY_C1_CGAINI B43_PHY_N(0x01C) /* Core 1 compute gain info */ +#define B43_NPHY_C1_CGAINI_GAINBKOFF 0x001F /* Gain backoff */ +#define B43_NPHY_C1_CGAINI_GAINBKOFF_SHIFT 0 +#define B43_NPHY_C1_CGAINI_CLIPGBKOFF 0x03E0 /* Clip gain backoff */ +#define B43_NPHY_C1_CGAINI_CLIPGBKOFF_SHIFT 5 +#define B43_NPHY_C1_CGAINI_GAINSTEP 0x1C00 /* Gain step */ +#define B43_NPHY_C1_CGAINI_GAINSTEP_SHIFT 10 +#define B43_NPHY_C1_CGAINI_CL2DETECT 0x2000 /* Clip 2 detect mask */ +#define B43_NPHY_C1_CCK_CGAINI B43_PHY_N(0x01D) /* Core 1 CCK compute gain info */ +#define B43_NPHY_C1_CCK_CGAINI_GAINBKOFF 0x001F /* Gain backoff */ +#define B43_NPHY_C1_CCK_CGAINI_CLIPGBKOFF 0x01E0 /* CCK barely clip gain backoff */ +#define B43_NPHY_C1_MINMAX_GAIN B43_PHY_N(0x01E) /* Core 1 min/max gain */ +#define B43_NPHY_C1_MINGAIN 0x00FF /* Minimum gain */ +#define B43_NPHY_C1_MINGAIN_SHIFT 0 +#define B43_NPHY_C1_MAXGAIN 0xFF00 /* Maximum gain */ +#define B43_NPHY_C1_MAXGAIN_SHIFT 8 +#define B43_NPHY_C1_CCK_MINMAX_GAIN B43_PHY_N(0x01F) /* Core 1 CCK min/max gain */ +#define B43_NPHY_C1_CCK_MINGAIN 0x00FF /* Minimum gain */ +#define B43_NPHY_C1_CCK_MINGAIN_SHIFT 0 +#define B43_NPHY_C1_CCK_MAXGAIN 0xFF00 /* Maximum gain */ +#define B43_NPHY_C1_CCK_MAXGAIN_SHIFT 8 +#define B43_NPHY_C1_INITGAIN B43_PHY_N(0x020) /* Core 1 initial gain code */ +#define B43_NPHY_C1_INITGAIN_EXTLNA 0x0001 /* External LNA index */ +#define B43_NPHY_C1_INITGAIN_LNA 0x0006 /* LNA index */ +#define B43_NPHY_C1_INITGAIN_LNAIDX_SHIFT 1 +#define B43_NPHY_C1_INITGAIN_HPVGA1 0x0078 /* HPVGA1 index */ +#define B43_NPHY_C1_INITGAIN_HPVGA1_SHIFT 3 +#define B43_NPHY_C1_INITGAIN_HPVGA2 0x0F80 /* HPVGA2 index */ +#define B43_NPHY_C1_INITGAIN_HPVGA2_SHIFT 7 +#define B43_NPHY_C1_INITGAIN_TRRX 0x1000 /* TR RX index */ +#define B43_NPHY_C1_INITGAIN_TRTX 0x2000 /* TR TX index */ +#define B43_NPHY_C1_CLIP1_HIGAIN B43_PHY_N(0x021) /* Core 1 clip1 high gain code */ +#define B43_NPHY_C1_CLIP1_MEDGAIN B43_PHY_N(0x022) /* Core 1 clip1 medium gain code */ +#define B43_NPHY_C1_CLIP1_LOGAIN B43_PHY_N(0x023) /* Core 1 clip1 low gain code */ +#define B43_NPHY_C1_CLIP2_GAIN B43_PHY_N(0x024) /* Core 1 clip2 gain code */ +#define B43_NPHY_C1_FILTERGAIN B43_PHY_N(0x025) /* Core 1 filter gain */ +#define B43_NPHY_C1_LPF_QHPF_BW B43_PHY_N(0x026) /* Core 1 LPF Q HP F bandwidth */ +#define B43_NPHY_C1_CLIPWBTHRES B43_PHY_N(0x027) /* Core 1 clip wideband threshold */ +#define B43_NPHY_C1_CLIPWBTHRES_CLIP2 0x003F /* Clip 2 */ +#define B43_NPHY_C1_CLIPWBTHRES_CLIP2_SHIFT 0 +#define B43_NPHY_C1_CLIPWBTHRES_CLIP1 0x0FC0 /* Clip 1 */ +#define B43_NPHY_C1_CLIPWBTHRES_CLIP1_SHIFT 6 +#define B43_NPHY_C1_W1THRES B43_PHY_N(0x028) /* Core 1 W1 threshold */ +#define B43_NPHY_C1_EDTHRES B43_PHY_N(0x029) /* Core 1 ED threshold */ +#define B43_NPHY_C1_SMSIGTHRES B43_PHY_N(0x02A) /* Core 1 small sig threshold */ +#define B43_NPHY_C1_NBCLIPTHRES B43_PHY_N(0x02B) /* Core 1 NB clip threshold */ +#define B43_NPHY_C1_CLIP1THRES B43_PHY_N(0x02C) /* Core 1 clip1 threshold */ +#define B43_NPHY_C1_CLIP2THRES B43_PHY_N(0x02D) /* Core 1 clip2 threshold */ + +#define B43_NPHY_C2_DESPWR B43_PHY_N(0x02E) /* Core 2 desired power */ +#define B43_NPHY_C2_CCK_DESPWR B43_PHY_N(0x02F) /* Core 2 CCK desired power */ +#define B43_NPHY_C2_BCLIPBKOFF B43_PHY_N(0x030) /* Core 2 barely clip backoff */ +#define B43_NPHY_C2_CCK_BCLIPBKOFF B43_PHY_N(0x031) /* Core 2 CCK barely clip backoff */ +#define B43_NPHY_C2_CGAINI B43_PHY_N(0x032) /* Core 2 compute gain info */ +#define B43_NPHY_C2_CGAINI_GAINBKOFF 0x001F /* Gain backoff */ +#define B43_NPHY_C2_CGAINI_GAINBKOFF_SHIFT 0 +#define B43_NPHY_C2_CGAINI_CLIPGBKOFF 0x03E0 /* Clip gain backoff */ +#define B43_NPHY_C2_CGAINI_CLIPGBKOFF_SHIFT 5 +#define B43_NPHY_C2_CGAINI_GAINSTEP 0x1C00 /* Gain step */ +#define B43_NPHY_C2_CGAINI_GAINSTEP_SHIFT 10 +#define B43_NPHY_C2_CGAINI_CL2DETECT 0x2000 /* Clip 2 detect mask */ +#define B43_NPHY_C2_CCK_CGAINI B43_PHY_N(0x033) /* Core 2 CCK compute gain info */ +#define B43_NPHY_C2_CCK_CGAINI_GAINBKOFF 0x001F /* Gain backoff */ +#define B43_NPHY_C2_CCK_CGAINI_CLIPGBKOFF 0x01E0 /* CCK barely clip gain backoff */ +#define B43_NPHY_C2_MINMAX_GAIN B43_PHY_N(0x034) /* Core 2 min/max gain */ +#define B43_NPHY_C2_MINGAIN 0x00FF /* Minimum gain */ +#define B43_NPHY_C2_MINGAIN_SHIFT 0 +#define B43_NPHY_C2_MAXGAIN 0xFF00 /* Maximum gain */ +#define B43_NPHY_C2_MAXGAIN_SHIFT 8 +#define B43_NPHY_C2_CCK_MINMAX_GAIN B43_PHY_N(0x035) /* Core 2 CCK min/max gain */ +#define B43_NPHY_C2_CCK_MINGAIN 0x00FF /* Minimum gain */ +#define B43_NPHY_C2_CCK_MINGAIN_SHIFT 0 +#define B43_NPHY_C2_CCK_MAXGAIN 0xFF00 /* Maximum gain */ +#define B43_NPHY_C2_CCK_MAXGAIN_SHIFT 8 +#define B43_NPHY_C2_INITGAIN B43_PHY_N(0x036) /* Core 2 initial gain code */ +#define B43_NPHY_C2_INITGAIN_EXTLNA 0x0001 /* External LNA index */ +#define B43_NPHY_C2_INITGAIN_LNA 0x0006 /* LNA index */ +#define B43_NPHY_C2_INITGAIN_LNAIDX_SHIFT 1 +#define B43_NPHY_C2_INITGAIN_HPVGA1 0x0078 /* HPVGA1 index */ +#define B43_NPHY_C2_INITGAIN_HPVGA1_SHIFT 3 +#define B43_NPHY_C2_INITGAIN_HPVGA2 0x0F80 /* HPVGA2 index */ +#define B43_NPHY_C2_INITGAIN_HPVGA2_SHIFT 7 +#define B43_NPHY_C2_INITGAIN_TRRX 0x1000 /* TR RX index */ +#define B43_NPHY_C2_INITGAIN_TRTX 0x2000 /* TR TX index */ +#define B43_NPHY_C2_CLIP1_HIGAIN B43_PHY_N(0x037) /* Core 2 clip1 high gain code */ +#define B43_NPHY_C2_CLIP1_MEDGAIN B43_PHY_N(0x038) /* Core 2 clip1 medium gain code */ +#define B43_NPHY_C2_CLIP1_LOGAIN B43_PHY_N(0x039) /* Core 2 clip1 low gain code */ +#define B43_NPHY_C2_CLIP2_GAIN B43_PHY_N(0x03A) /* Core 2 clip2 gain code */ +#define B43_NPHY_C2_FILTERGAIN B43_PHY_N(0x03B) /* Core 2 filter gain */ +#define B43_NPHY_C2_LPF_QHPF_BW B43_PHY_N(0x03C) /* Core 2 LPF Q HP F bandwidth */ +#define B43_NPHY_C2_CLIPWBTHRES B43_PHY_N(0x03D) /* Core 2 clip wideband threshold */ +#define B43_NPHY_C2_CLIPWBTHRES_CLIP2 0x003F /* Clip 2 */ +#define B43_NPHY_C2_CLIPWBTHRES_CLIP2_SHIFT 0 +#define B43_NPHY_C2_CLIPWBTHRES_CLIP1 0x0FC0 /* Clip 1 */ +#define B43_NPHY_C2_CLIPWBTHRES_CLIP1_SHIFT 6 +#define B43_NPHY_C2_W1THRES B43_PHY_N(0x03E) /* Core 2 W1 threshold */ +#define B43_NPHY_C2_EDTHRES B43_PHY_N(0x03F) /* Core 2 ED threshold */ +#define B43_NPHY_C2_SMSIGTHRES B43_PHY_N(0x040) /* Core 2 small sig threshold */ +#define B43_NPHY_C2_NBCLIPTHRES B43_PHY_N(0x041) /* Core 2 NB clip threshold */ +#define B43_NPHY_C2_CLIP1THRES B43_PHY_N(0x042) /* Core 2 clip1 threshold */ +#define B43_NPHY_C2_CLIP2THRES B43_PHY_N(0x043) /* Core 2 clip2 threshold */ + +#define B43_NPHY_CRS_THRES1 B43_PHY_N(0x044) /* CRS threshold 1 */ +#define B43_NPHY_CRS_THRES2 B43_PHY_N(0x045) /* CRS threshold 2 */ +#define B43_NPHY_CRS_THRES3 B43_PHY_N(0x046) /* CRS threshold 3 */ +#define B43_NPHY_CRSCTL B43_PHY_N(0x047) /* CRS control */ +#define B43_NPHY_DCFADDR B43_PHY_N(0x048) /* DC filter address */ +#define B43_NPHY_RXF20_NUM0 B43_PHY_N(0x049) /* RX filter 20 numerator 0 */ +#define B43_NPHY_RXF20_NUM1 B43_PHY_N(0x04A) /* RX filter 20 numerator 1 */ +#define B43_NPHY_RXF20_NUM2 B43_PHY_N(0x04B) /* RX filter 20 numerator 2 */ +#define B43_NPHY_RXF20_DENOM0 B43_PHY_N(0x04C) /* RX filter 20 denominator 0 */ +#define B43_NPHY_RXF20_DENOM1 B43_PHY_N(0x04D) /* RX filter 20 denominator 1 */ +#define B43_NPHY_RXF20_NUM10 B43_PHY_N(0x04E) /* RX filter 20 numerator 10 */ +#define B43_NPHY_RXF20_NUM11 B43_PHY_N(0x04F) /* RX filter 20 numerator 11 */ +#define B43_NPHY_RXF20_NUM12 B43_PHY_N(0x050) /* RX filter 20 numerator 12 */ +#define B43_NPHY_RXF20_DENOM10 B43_PHY_N(0x051) /* RX filter 20 denominator 10 */ +#define B43_NPHY_RXF20_DENOM11 B43_PHY_N(0x052) /* RX filter 20 denominator 11 */ +#define B43_NPHY_RXF40_NUM0 B43_PHY_N(0x053) /* RX filter 40 numerator 0 */ +#define B43_NPHY_RXF40_NUM1 B43_PHY_N(0x054) /* RX filter 40 numerator 1 */ +#define B43_NPHY_RXF40_NUM2 B43_PHY_N(0x055) /* RX filter 40 numerator 2 */ +#define B43_NPHY_RXF40_DENOM0 B43_PHY_N(0x056) /* RX filter 40 denominator 0 */ +#define B43_NPHY_RXF40_DENOM1 B43_PHY_N(0x057) /* RX filter 40 denominator 1 */ +#define B43_NPHY_RXF40_NUM10 B43_PHY_N(0x058) /* RX filter 40 numerator 10 */ +#define B43_NPHY_RXF40_NUM11 B43_PHY_N(0x059) /* RX filter 40 numerator 11 */ +#define B43_NPHY_RXF40_NUM12 B43_PHY_N(0x05A) /* RX filter 40 numerator 12 */ +#define B43_NPHY_RXF40_DENOM10 B43_PHY_N(0x05B) /* RX filter 40 denominator 10 */ +#define B43_NPHY_RXF40_DENOM11 B43_PHY_N(0x05C) /* RX filter 40 denominator 11 */ +#define B43_NPHY_PPROC_RSTLEN B43_PHY_N(0x060) /* Packet processing reset length */ +#define B43_NPHY_INITCARR_DLEN B43_PHY_N(0x061) /* Initial carrier detection length */ +#define B43_NPHY_CLIP1CARR_DLEN B43_PHY_N(0x062) /* Clip1 carrier detection length */ +#define B43_NPHY_CLIP2CARR_DLEN B43_PHY_N(0x063) /* Clip2 carrier detection length */ +#define B43_NPHY_INITGAIN_SLEN B43_PHY_N(0x064) /* Initial gain settle length */ +#define B43_NPHY_CLIP1GAIN_SLEN B43_PHY_N(0x065) /* Clip1 gain settle length */ +#define B43_NPHY_CLIP2GAIN_SLEN B43_PHY_N(0x066) /* Clip2 gain settle length */ +#define B43_NPHY_PACKGAIN_SLEN B43_PHY_N(0x067) /* Packet gain settle length */ +#define B43_NPHY_CARRSRC_TLEN B43_PHY_N(0x068) /* Carrier search timeout length */ +#define B43_NPHY_TISRC_TLEN B43_PHY_N(0x069) /* Timing search timeout length */ +#define B43_NPHY_ENDROP_TLEN B43_PHY_N(0x06A) /* Energy drop timeout length */ +#define B43_NPHY_CLIP1_NBDWELL_LEN B43_PHY_N(0x06B) /* Clip1 NB dwell length */ +#define B43_NPHY_CLIP2_NBDWELL_LEN B43_PHY_N(0x06C) /* Clip2 NB dwell length */ +#define B43_NPHY_W1CLIP1_DWELL_LEN B43_PHY_N(0x06D) /* W1 clip1 dwell length */ +#define B43_NPHY_W1CLIP2_DWELL_LEN B43_PHY_N(0x06E) /* W1 clip2 dwell length */ +#define B43_NPHY_W2CLIP1_DWELL_LEN B43_PHY_N(0x06F) /* W2 clip1 dwell length */ +#define B43_NPHY_PLOAD_CSENSE_EXTLEN B43_PHY_N(0x070) /* Payload carrier sense extension length */ +#define B43_NPHY_EDROP_CSENSE_EXTLEN B43_PHY_N(0x071) /* Energy drop carrier sense extension length */ +#define B43_NPHY_TABLE_ADDR B43_PHY_N(0x072) /* Table address */ +#define B43_NPHY_TABLE_DATALO B43_PHY_N(0x073) /* Table data low */ +#define B43_NPHY_TABLE_DATAHI B43_PHY_N(0x074) /* Table data high */ +#define B43_NPHY_WWISE_LENIDX B43_PHY_N(0x075) /* WWiSE length index */ +#define B43_NPHY_TGNSYNC_LENIDX B43_PHY_N(0x076) /* TGNsync length index */ +#define B43_NPHY_TXMACIF_HOLDOFF B43_PHY_N(0x077) /* TX MAC IF Hold off */ +#define B43_NPHY_RFCTL_CMD B43_PHY_N(0x078) /* RF control (command) */ +#define B43_NPHY_RFCTL_CMD_START 0x0001 /* Start sequence */ +#define B43_NPHY_RFCTL_CMD_RXTX 0x0002 /* RX/TX */ +#define B43_NPHY_RFCTL_CMD_CORESEL 0x0038 /* Core select */ +#define B43_NPHY_RFCTL_CMD_CORESEL_SHIFT 3 +#define B43_NPHY_RFCTL_CMD_PORFORCE 0x0040 /* POR force */ +#define B43_NPHY_RFCTL_CMD_OEPORFORCE 0x0080 /* OE POR force */ +#define B43_NPHY_RFCTL_CMD_RXEN 0x0100 /* RX enable */ +#define B43_NPHY_RFCTL_CMD_TXEN 0x0200 /* TX enable */ +#define B43_NPHY_RFCTL_CMD_CHIP0PU 0x0400 /* Chip0 PU */ +#define B43_NPHY_RFCTL_CMD_EN 0x0800 /* Radio enabled */ +#define B43_NPHY_RFCTL_CMD_SEQENCORE 0xF000 /* Seq en core */ +#define B43_NPHY_RFCTL_CMD_SEQENCORE_SHIFT 12 +#define B43_NPHY_RFCTL_RSSIO1 B43_PHY_N(0x07A) /* RF control (RSSI others 1) */ +#define B43_NPHY_RFCTL_RSSIO1_RXPD 0x0001 /* RX PD */ +#define B43_NPHY_RFCTL_RSSIO1_TXPD 0x0002 /* TX PD */ +#define B43_NPHY_RFCTL_RSSIO1_PAPD 0x0004 /* PA PD */ +#define B43_NPHY_RFCTL_RSSIO1_RSSICTL 0x0030 /* RSSI control */ +#define B43_NPHY_RFCTL_RSSIO1_LPFBW 0x00C0 /* LPF bandwidth */ +#define B43_NPHY_RFCTL_RSSIO1_HPFBWHI 0x0100 /* HPF bandwidth high */ +#define B43_NPHY_RFCTL_RSSIO1_HIQDISCO 0x0200 /* HIQ dis core */ +#define B43_NPHY_RFCTL_RXG1 B43_PHY_N(0x07B) /* RF control (RX gain 1) */ +#define B43_NPHY_RFCTL_TXG1 B43_PHY_N(0x07C) /* RF control (TX gain 1) */ +#define B43_NPHY_RFCTL_RSSIO2 B43_PHY_N(0x07D) /* RF control (RSSI others 2) */ +#define B43_NPHY_RFCTL_RSSIO2_RXPD 0x0001 /* RX PD */ +#define B43_NPHY_RFCTL_RSSIO2_TXPD 0x0002 /* TX PD */ +#define B43_NPHY_RFCTL_RSSIO2_PAPD 0x0004 /* PA PD */ +#define B43_NPHY_RFCTL_RSSIO2_RSSICTL 0x0030 /* RSSI control */ +#define B43_NPHY_RFCTL_RSSIO2_LPFBW 0x00C0 /* LPF bandwidth */ +#define B43_NPHY_RFCTL_RSSIO2_HPFBWHI 0x0100 /* HPF bandwidth high */ +#define B43_NPHY_RFCTL_RSSIO2_HIQDISCO 0x0200 /* HIQ dis core */ +#define B43_NPHY_RFCTL_RXG2 B43_PHY_N(0x07E) /* RF control (RX gain 2) */ +#define B43_NPHY_RFCTL_TXG2 B43_PHY_N(0x07F) /* RF control (TX gain 2) */ +#define B43_NPHY_RFCTL_RSSIO3 B43_PHY_N(0x080) /* RF control (RSSI others 3) */ +#define B43_NPHY_RFCTL_RSSIO3_RXPD 0x0001 /* RX PD */ +#define B43_NPHY_RFCTL_RSSIO3_TXPD 0x0002 /* TX PD */ +#define B43_NPHY_RFCTL_RSSIO3_PAPD 0x0004 /* PA PD */ +#define B43_NPHY_RFCTL_RSSIO3_RSSICTL 0x0030 /* RSSI control */ +#define B43_NPHY_RFCTL_RSSIO3_LPFBW 0x00C0 /* LPF bandwidth */ +#define B43_NPHY_RFCTL_RSSIO3_HPFBWHI 0x0100 /* HPF bandwidth high */ +#define B43_NPHY_RFCTL_RSSIO3_HIQDISCO 0x0200 /* HIQ dis core */ +#define B43_NPHY_RFCTL_RXG3 B43_PHY_N(0x081) /* RF control (RX gain 3) */ +#define B43_NPHY_RFCTL_TXG3 B43_PHY_N(0x082) /* RF control (TX gain 3) */ +#define B43_NPHY_RFCTL_RSSIO4 B43_PHY_N(0x083) /* RF control (RSSI others 4) */ +#define B43_NPHY_RFCTL_RSSIO4_RXPD 0x0001 /* RX PD */ +#define B43_NPHY_RFCTL_RSSIO4_TXPD 0x0002 /* TX PD */ +#define B43_NPHY_RFCTL_RSSIO4_PAPD 0x0004 /* PA PD */ +#define B43_NPHY_RFCTL_RSSIO4_RSSICTL 0x0030 /* RSSI control */ +#define B43_NPHY_RFCTL_RSSIO4_LPFBW 0x00C0 /* LPF bandwidth */ +#define B43_NPHY_RFCTL_RSSIO4_HPFBWHI 0x0100 /* HPF bandwidth high */ +#define B43_NPHY_RFCTL_RSSIO4_HIQDISCO 0x0200 /* HIQ dis core */ +#define B43_NPHY_RFCTL_RXG4 B43_PHY_N(0x084) /* RF control (RX gain 4) */ +#define B43_NPHY_RFCTL_TXG4 B43_PHY_N(0x085) /* RF control (TX gain 4) */ +#define B43_NPHY_C1_TXIQ_COMP_OFF B43_PHY_N(0x087) /* Core 1 TX I/Q comp offset */ +#define B43_NPHY_C2_TXIQ_COMP_OFF B43_PHY_N(0x088) /* Core 2 TX I/Q comp offset */ +#define B43_NPHY_C1_TXCTL B43_PHY_N(0x08B) /* Core 1 TX control */ +#define B43_NPHY_C2_TXCTL B43_PHY_N(0x08C) /* Core 2 TX control */ +#define B43_NPHY_AFECTL_OVER1 B43_PHY_N(0x08F) /* AFE control override 1 */ +#define B43_NPHY_SCRAM_SIGCTL B43_PHY_N(0x090) /* Scram signal control */ +#define B43_NPHY_SCRAM_SIGCTL_INITST 0x007F /* Initial state value */ +#define B43_NPHY_SCRAM_SIGCTL_INITST_SHIFT 0 +#define B43_NPHY_SCRAM_SIGCTL_SCM 0x0080 /* Scram control mode */ +#define B43_NPHY_SCRAM_SIGCTL_SICE 0x0100 /* Scram index control enable */ +#define B43_NPHY_SCRAM_SIGCTL_START 0xFE00 /* Scram start bit */ +#define B43_NPHY_SCRAM_SIGCTL_START_SHIFT 9 +#define B43_NPHY_RFCTL_INTC1 B43_PHY_N(0x091) /* RF control (intc 1) */ +#define B43_NPHY_RFCTL_INTC2 B43_PHY_N(0x092) /* RF control (intc 2) */ +#define B43_NPHY_RFCTL_INTC3 B43_PHY_N(0x093) /* RF control (intc 3) */ +#define B43_NPHY_RFCTL_INTC4 B43_PHY_N(0x094) /* RF control (intc 4) */ +#define B43_NPHY_NRDTO_WWISE B43_PHY_N(0x095) /* # datatones WWiSE */ +#define B43_NPHY_NRDTO_TGNSYNC B43_PHY_N(0x096) /* # datatones TGNsync */ +#define B43_NPHY_SIGFMOD_WWISE B43_PHY_N(0x097) /* Signal field mod WWiSE */ +#define B43_NPHY_LEG_SIGFMOD_11N B43_PHY_N(0x098) /* Legacy signal field mod 11n */ +#define B43_NPHY_HT_SIGFMOD_11N B43_PHY_N(0x099) /* HT signal field mod 11n */ +#define B43_NPHY_C1_RXIQ_COMPA0 B43_PHY_N(0x09A) /* Core 1 RX I/Q comp A0 */ +#define B43_NPHY_C1_RXIQ_COMPB0 B43_PHY_N(0x09B) /* Core 1 RX I/Q comp B0 */ +#define B43_NPHY_C2_RXIQ_COMPA1 B43_PHY_N(0x09C) /* Core 2 RX I/Q comp A1 */ +#define B43_NPHY_C2_RXIQ_COMPB1 B43_PHY_N(0x09D) /* Core 2 RX I/Q comp B1 */ +#define B43_NPHY_RXCTL B43_PHY_N(0x0A0) /* RX control */ +#define B43_NPHY_RXCTL_BSELU20 0x0010 /* Band select upper 20 */ +#define B43_NPHY_RXCTL_RIFSEN 0x0080 /* RIFS enable */ +#define B43_NPHY_RFSEQMODE B43_PHY_N(0x0A1) /* RF seq mode */ +#define B43_NPHY_RFSEQMODE_CAOVER 0x0001 /* Core active override */ +#define B43_NPHY_RFSEQMODE_TROVER 0x0002 /* Trigger override */ +#define B43_NPHY_RFSEQCA B43_PHY_N(0x0A2) /* RF seq core active */ +#define B43_NPHY_RFSEQCA_TXEN 0x000F /* TX enable */ +#define B43_NPHY_RFSEQCA_TXEN_SHIFT 0 +#define B43_NPHY_RFSEQCA_RXEN 0x00F0 /* RX enable */ +#define B43_NPHY_RFSEQCA_RXEN_SHIFT 4 +#define B43_NPHY_RFSEQCA_TXDIS 0x0F00 /* TX disable */ +#define B43_NPHY_RFSEQCA_TXDIS_SHIFT 8 +#define B43_NPHY_RFSEQCA_RXDIS 0xF000 /* RX disable */ +#define B43_NPHY_RFSEQCA_RXDIS_SHIFT 12 +#define B43_NPHY_RFSEQTR B43_PHY_N(0x0A3) /* RF seq trigger */ +#define B43_NPHY_RFSEQTR_RX2TX 0x0001 /* RX2TX */ +#define B43_NPHY_RFSEQTR_TX2RX 0x0002 /* TX2RX */ +#define B43_NPHY_RFSEQTR_UPGH 0x0004 /* Update gain H */ +#define B43_NPHY_RFSEQTR_UPGL 0x0008 /* Update gain L */ +#define B43_NPHY_RFSEQTR_UPGU 0x0010 /* Update gain U */ +#define B43_NPHY_RFSEQTR_RST2RX 0x0020 /* Reset to RX */ +#define B43_NPHY_RFSEQST B43_PHY_N(0x0A4) /* RF seq status. Values same as trigger. */ +#define B43_NPHY_AFECTL_OVER B43_PHY_N(0x0A5) /* AFE control override */ +#define B43_NPHY_AFECTL_C1 B43_PHY_N(0x0A6) /* AFE control core 1 */ +#define B43_NPHY_AFECTL_C2 B43_PHY_N(0x0A7) /* AFE control core 2 */ +#define B43_NPHY_AFECTL_C3 B43_PHY_N(0x0A8) /* AFE control core 3 */ +#define B43_NPHY_AFECTL_C4 B43_PHY_N(0x0A9) /* AFE control core 4 */ +#define B43_NPHY_AFECTL_DACGAIN1 B43_PHY_N(0x0AA) /* AFE control DAC gain 1 */ +#define B43_NPHY_AFECTL_DACGAIN2 B43_PHY_N(0x0AB) /* AFE control DAC gain 2 */ +#define B43_NPHY_AFECTL_DACGAIN3 B43_PHY_N(0x0AC) /* AFE control DAC gain 3 */ +#define B43_NPHY_AFECTL_DACGAIN4 B43_PHY_N(0x0AD) /* AFE control DAC gain 4 */ +#define B43_NPHY_STR_ADDR1 B43_PHY_N(0x0AE) /* STR address 1 */ +#define B43_NPHY_STR_ADDR2 B43_PHY_N(0x0AF) /* STR address 2 */ +#define B43_NPHY_CLASSCTL B43_PHY_N(0x0B0) /* Classifier control */ +#define B43_NPHY_CLASSCTL_CCKEN 0x0001 /* CCK enable */ +#define B43_NPHY_CLASSCTL_OFDMEN 0x0002 /* OFDM enable */ +#define B43_NPHY_CLASSCTL_WAITEDEN 0x0004 /* Waited enable */ +#define B43_NPHY_IQFLIP B43_PHY_N(0x0B1) /* I/Q flip */ +#define B43_NPHY_IQFLIP_ADC1 0x0001 /* ADC1 */ +#define B43_NPHY_IQFLIP_ADC2 0x0010 /* ADC2 */ +#define B43_NPHY_SISO_SNR_THRES B43_PHY_N(0x0B2) /* SISO SNR threshold */ +#define B43_NPHY_SIGMA_N_MULT B43_PHY_N(0x0B3) /* Sigma N multiplier */ +#define B43_NPHY_TXMACDELAY B43_PHY_N(0x0B4) /* TX MAC delay */ +#define B43_NPHY_TXFRAMEDELAY B43_PHY_N(0x0B5) /* TX frame delay */ +#define B43_NPHY_MLPARM B43_PHY_N(0x0B6) /* ML parameters */ +#define B43_NPHY_MLCTL B43_PHY_N(0x0B7) /* ML control */ +#define B43_NPHY_WWISE_20NCYCDAT B43_PHY_N(0x0B8) /* WWiSE 20 N cyc data */ +#define B43_NPHY_WWISE_40NCYCDAT B43_PHY_N(0x0B9) /* WWiSE 40 N cyc data */ +#define B43_NPHY_TGNSYNC_20NCYCDAT B43_PHY_N(0x0BA) /* TGNsync 20 N cyc data */ +#define B43_NPHY_TGNSYNC_40NCYCDAT B43_PHY_N(0x0BB) /* TGNsync 40 N cyc data */ +#define B43_NPHY_INITSWIZP B43_PHY_N(0x0BC) /* Initial swizzle pattern */ +#define B43_NPHY_TXTAILCNT B43_PHY_N(0x0BD) /* TX tail count value */ +#define B43_NPHY_BPHY_CTL1 B43_PHY_N(0x0BE) /* B PHY control 1 */ +#define B43_NPHY_BPHY_CTL2 B43_PHY_N(0x0BF) /* B PHY control 2 */ +#define B43_NPHY_BPHY_CTL2_LUT 0x001F /* LUT index */ +#define B43_NPHY_BPHY_CTL2_LUT_SHIFT 0 +#define B43_NPHY_BPHY_CTL2_MACDEL 0x7FE0 /* MAC delay */ +#define B43_NPHY_BPHY_CTL2_MACDEL_SHIFT 5 +#define B43_NPHY_IQLOCAL_CMD B43_PHY_N(0x0C0) /* I/Q LO cal command */ +#define B43_NPHY_IQLOCAL_CMD_EN 0x8000 +#define B43_NPHY_IQLOCAL_CMDNNUM B43_PHY_N(0x0C1) /* I/Q LO cal command N num */ +#define B43_NPHY_IQLOCAL_CMDGCTL B43_PHY_N(0x0C2) /* I/Q LO cal command G control */ +#define B43_NPHY_SAMP_CMD B43_PHY_N(0x0C3) /* Sample command */ +#define B43_NPHY_SAMP_CMD_STOP 0x0002 /* Stop */ +#define B43_NPHY_SAMP_LOOPCNT B43_PHY_N(0x0C4) /* Sample loop count */ +#define B43_NPHY_SAMP_WAITCNT B43_PHY_N(0x0C5) /* Sample wait count */ +#define B43_NPHY_SAMP_DEPCNT B43_PHY_N(0x0C6) /* Sample depth count */ +#define B43_NPHY_SAMP_STAT B43_PHY_N(0x0C7) /* Sample status */ +#define B43_NPHY_GPIO_LOOEN B43_PHY_N(0x0C8) /* GPIO low out enable */ +#define B43_NPHY_GPIO_HIOEN B43_PHY_N(0x0C9) /* GPIO high out enable */ +#define B43_NPHY_GPIO_SEL B43_PHY_N(0x0CA) /* GPIO select */ +#define B43_NPHY_GPIO_CLKCTL B43_PHY_N(0x0CB) /* GPIO clock control */ +#define B43_NPHY_TXF_20CO_AS0 B43_PHY_N(0x0CC) /* TX filter 20 coeff A stage 0 */ +#define B43_NPHY_TXF_20CO_AS1 B43_PHY_N(0x0CD) /* TX filter 20 coeff A stage 1 */ +#define B43_NPHY_TXF_20CO_AS2 B43_PHY_N(0x0CE) /* TX filter 20 coeff A stage 2 */ +#define B43_NPHY_TXF_20CO_B32S0 B43_PHY_N(0x0CF) /* TX filter 20 coeff B32 stage 0 */ +#define B43_NPHY_TXF_20CO_B1S0 B43_PHY_N(0x0D0) /* TX filter 20 coeff B1 stage 0 */ +#define B43_NPHY_TXF_20CO_B32S1 B43_PHY_N(0x0D1) /* TX filter 20 coeff B32 stage 1 */ +#define B43_NPHY_TXF_20CO_B1S1 B43_PHY_N(0x0D2) /* TX filter 20 coeff B1 stage 1 */ +#define B43_NPHY_TXF_20CO_B32S2 B43_PHY_N(0x0D3) /* TX filter 20 coeff B32 stage 2 */ +#define B43_NPHY_TXF_20CO_B1S2 B43_PHY_N(0x0D4) /* TX filter 20 coeff B1 stage 2 */ +#define B43_NPHY_SIGFLDTOL B43_PHY_N(0x0D5) /* Signal fld tolerance */ +#define B43_NPHY_TXSERFLD B43_PHY_N(0x0D6) /* TX service field */ +#define B43_NPHY_AFESEQ_RX2TX_PUD B43_PHY_N(0x0D7) /* AFE seq RX2TX power up/down delay */ +#define B43_NPHY_AFESEQ_TX2RX_PUD B43_PHY_N(0x0D8) /* AFE seq TX2RX power up/down delay */ +#define B43_NPHY_TGNSYNC_SCRAMI0 B43_PHY_N(0x0D9) /* TGNsync scram init 0 */ +#define B43_NPHY_TGNSYNC_SCRAMI1 B43_PHY_N(0x0DA) /* TGNsync scram init 1 */ +#define B43_NPHY_INITSWIZPATTLEG B43_PHY_N(0x0DB) /* Initial swizzle pattern leg */ +#define B43_NPHY_BPHY_CTL3 B43_PHY_N(0x0DC) /* B PHY control 3 */ +#define B43_NPHY_BPHY_CTL3_SCALE 0x00FF /* Scale */ +#define B43_NPHY_BPHY_CTL3_SCALE_SHIFT 0 +#define B43_NPHY_BPHY_CTL3_FSC 0xFF00 /* Frame start count value */ +#define B43_NPHY_BPHY_CTL3_FSC_SHIFT 8 +#define B43_NPHY_BPHY_CTL4 B43_PHY_N(0x0DD) /* B PHY control 4 */ +#define B43_NPHY_C1_TXBBMULT B43_PHY_N(0x0DE) /* Core 1 TX BB multiplier */ +#define B43_NPHY_C2_TXBBMULT B43_PHY_N(0x0DF) /* Core 2 TX BB multiplier */ +#define B43_NPHY_TXF_40CO_AS0 B43_PHY_N(0x0E1) /* TX filter 40 coeff A stage 0 */ +#define B43_NPHY_TXF_40CO_AS1 B43_PHY_N(0x0E2) /* TX filter 40 coeff A stage 1 */ +#define B43_NPHY_TXF_40CO_AS2 B43_PHY_N(0x0E3) /* TX filter 40 coeff A stage 2 */ +#define B43_NPHY_TXF_40CO_B32S0 B43_PHY_N(0x0E4) /* TX filter 40 coeff B32 stage 0 */ +#define B43_NPHY_TXF_40CO_B1S0 B43_PHY_N(0x0E5) /* TX filter 40 coeff B1 stage 0 */ +#define B43_NPHY_TXF_40CO_B32S1 B43_PHY_N(0x0E6) /* TX filter 40 coeff B32 stage 1 */ +#define B43_NPHY_TXF_40CO_B1S1 B43_PHY_N(0x0E7) /* TX filter 40 coeff B1 stage 1 */ +#define B43_NPHY_TXF_40CO_B32S2 B43_PHY_N(0x0E8) /* TX filter 40 coeff B32 stage 2 */ +#define B43_NPHY_TXF_40CO_B1S2 B43_PHY_N(0x0E9) /* TX filter 40 coeff B1 stage 2 */ +#define B43_NPHY_BIST_STAT2 B43_PHY_N(0x0EA) /* BIST status 2 */ +#define B43_NPHY_BIST_STAT3 B43_PHY_N(0x0EB) /* BIST status 3 */ +#define B43_NPHY_RFCTL_OVER B43_PHY_N(0x0EC) /* RF control override */ +#define B43_NPHY_MIMOCFG B43_PHY_N(0x0ED) /* MIMO config */ +#define B43_NPHY_MIMOCFG_GFMIX 0x0004 /* Greenfield or mixed mode */ +#define B43_NPHY_MIMOCFG_AUTO 0x0100 /* Greenfield/mixed mode auto */ +#define B43_NPHY_RADAR_BLNKCTL B43_PHY_N(0x0EE) /* Radar blank control */ +#define B43_NPHY_A0RADAR_FIFOCTL B43_PHY_N(0x0EF) /* Antenna 0 radar FIFO control */ +#define B43_NPHY_A1RADAR_FIFOCTL B43_PHY_N(0x0F0) /* Antenna 1 radar FIFO control */ +#define B43_NPHY_A0RADAR_FIFODAT B43_PHY_N(0x0F1) /* Antenna 0 radar FIFO data */ +#define B43_NPHY_A1RADAR_FIFODAT B43_PHY_N(0x0F2) /* Antenna 1 radar FIFO data */ +#define B43_NPHY_RADAR_THRES0 B43_PHY_N(0x0F3) /* Radar threshold 0 */ +#define B43_NPHY_RADAR_THRES1 B43_PHY_N(0x0F4) /* Radar threshold 1 */ +#define B43_NPHY_RADAR_THRES0R B43_PHY_N(0x0F5) /* Radar threshold 0R */ +#define B43_NPHY_RADAR_THRES1R B43_PHY_N(0x0F6) /* Radar threshold 1R */ +#define B43_NPHY_CSEN_20IN40_DLEN B43_PHY_N(0x0F7) /* Carrier sense 20 in 40 dwell length */ +#define B43_NPHY_RFCTL_LUT_TRSW_LO1 B43_PHY_N(0x0F8) /* RF control LUT TRSW lower 1 */ +#define B43_NPHY_RFCTL_LUT_TRSW_UP1 B43_PHY_N(0x0F9) /* RF control LUT TRSW upper 1 */ +#define B43_NPHY_RFCTL_LUT_TRSW_LO2 B43_PHY_N(0x0FA) /* RF control LUT TRSW lower 2 */ +#define B43_NPHY_RFCTL_LUT_TRSW_UP2 B43_PHY_N(0x0FB) /* RF control LUT TRSW upper 2 */ +#define B43_NPHY_RFCTL_LUT_TRSW_LO3 B43_PHY_N(0x0FC) /* RF control LUT TRSW lower 3 */ +#define B43_NPHY_RFCTL_LUT_TRSW_UP3 B43_PHY_N(0x0FD) /* RF control LUT TRSW upper 3 */ +#define B43_NPHY_RFCTL_LUT_TRSW_LO4 B43_PHY_N(0x0FE) /* RF control LUT TRSW lower 4 */ +#define B43_NPHY_RFCTL_LUT_TRSW_UP4 B43_PHY_N(0x0FF) /* RF control LUT TRSW upper 4 */ +#define B43_NPHY_RFCTL_LUT_LNAPA1 B43_PHY_N(0x100) /* RF control LUT LNA PA 1 */ +#define B43_NPHY_RFCTL_LUT_LNAPA2 B43_PHY_N(0x101) /* RF control LUT LNA PA 2 */ +#define B43_NPHY_RFCTL_LUT_LNAPA3 B43_PHY_N(0x102) /* RF control LUT LNA PA 3 */ +#define B43_NPHY_RFCTL_LUT_LNAPA4 B43_PHY_N(0x103) /* RF control LUT LNA PA 4 */ +#define B43_NPHY_TGNSYNC_CRCM0 B43_PHY_N(0x104) /* TGNsync CRC mask 0 */ +#define B43_NPHY_TGNSYNC_CRCM1 B43_PHY_N(0x105) /* TGNsync CRC mask 1 */ +#define B43_NPHY_TGNSYNC_CRCM2 B43_PHY_N(0x106) /* TGNsync CRC mask 2 */ +#define B43_NPHY_TGNSYNC_CRCM3 B43_PHY_N(0x107) /* TGNsync CRC mask 3 */ +#define B43_NPHY_TGNSYNC_CRCM4 B43_PHY_N(0x108) /* TGNsync CRC mask 4 */ +#define B43_NPHY_CRCPOLY B43_PHY_N(0x109) /* CRC polynomial */ +#define B43_NPHY_SIGCNT B43_PHY_N(0x10A) /* # sig count */ +#define B43_NPHY_SIGSTARTBIT_CTL B43_PHY_N(0x10B) /* Sig start bit control */ +#define B43_NPHY_CRCPOLY_ORDER B43_PHY_N(0x10C) /* CRC polynomial order */ +#define B43_NPHY_RFCTL_CST0 B43_PHY_N(0x10D) /* RF control core swap table 0 */ +#define B43_NPHY_RFCTL_CST1 B43_PHY_N(0x10E) /* RF control core swap table 1 */ +#define B43_NPHY_RFCTL_CST2O B43_PHY_N(0x10F) /* RF control core swap table 2 + others */ +#define B43_NPHY_BPHY_CTL5 B43_PHY_N(0x111) /* B PHY control 5 */ +#define B43_NPHY_RFSEQ_LPFBW B43_PHY_N(0x112) /* RF seq LPF bandwidth */ +#define B43_NPHY_TSSIBIAS1 B43_PHY_N(0x114) /* TSSI bias val 1 */ +#define B43_NPHY_TSSIBIAS2 B43_PHY_N(0x115) /* TSSI bias val 2 */ +#define B43_NPHY_TSSIBIAS_BIAS 0x00FF /* Bias */ +#define B43_NPHY_TSSIBIAS_BIAS_SHIFT 0 +#define B43_NPHY_TSSIBIAS_VAL 0xFF00 /* Value */ +#define B43_NPHY_TSSIBIAS_VAL_SHIFT 8 +#define B43_NPHY_ESTPWR1 B43_PHY_N(0x118) /* Estimated power 1 */ +#define B43_NPHY_ESTPWR2 B43_PHY_N(0x119) /* Estimated power 2 */ +#define B43_NPHY_ESTPWR_PWR 0x00FF /* Estimated power */ +#define B43_NPHY_ESTPWR_PWR_SHIFT 0 +#define B43_NPHY_ESTPWR_VALID 0x0100 /* Estimated power valid */ +#define B43_NPHY_TSSI_MAXTXFDT B43_PHY_N(0x11C) /* TSSI max TX frame delay time */ +#define B43_NPHY_TSSI_MAXTXFDT_VAL 0x00FF /* max TX frame delay time */ +#define B43_NPHY_TSSI_MAXTXFDT_VAL_SHIFT 0 +#define B43_NPHY_TSSI_MAXTDT B43_PHY_N(0x11D) /* TSSI max TSSI delay time */ +#define B43_NPHY_TSSI_MAXTDT_VAL 0x00FF /* max TSSI delay time */ +#define B43_NPHY_TSSI_MAXTDT_VAL_SHIFT 0 +#define B43_NPHY_ITSSI1 B43_PHY_N(0x11E) /* TSSI idle 1 */ +#define B43_NPHY_ITSSI2 B43_PHY_N(0x11F) /* TSSI idle 2 */ +#define B43_NPHY_ITSSI_VAL 0x00FF /* Idle TSSI */ +#define B43_NPHY_ITSSI_VAL_SHIFT 0 +#define B43_NPHY_TSSIMODE B43_PHY_N(0x122) /* TSSI mode */ +#define B43_NPHY_TSSIMODE_EN 0x0001 /* TSSI enable */ +#define B43_NPHY_TSSIMODE_PDEN 0x0002 /* Power det enable */ +#define B43_NPHY_RXMACIFM B43_PHY_N(0x123) /* RX Macif mode */ +#define B43_NPHY_CRSIT_COCNT_LO B43_PHY_N(0x124) /* CRS idle time CRS-on count (low) */ +#define B43_NPHY_CRSIT_COCNT_HI B43_PHY_N(0x125) /* CRS idle time CRS-on count (high) */ +#define B43_NPHY_CRSIT_MTCNT_LO B43_PHY_N(0x126) /* CRS idle time measure time count (low) */ +#define B43_NPHY_CRSIT_MTCNT_HI B43_PHY_N(0x127) /* CRS idle time measure time count (high) */ +#define B43_NPHY_SAMTWC B43_PHY_N(0x128) /* Sample tail wait count */ +#define B43_NPHY_IQEST_CMD B43_PHY_N(0x129) /* I/Q estimate command */ +#define B43_NPHY_IQEST_CMD_START 0x0001 /* Start */ +#define B43_NPHY_IQEST_CMD_MODE 0x0002 /* Mode */ +#define B43_NPHY_IQEST_WT B43_PHY_N(0x12A) /* I/Q estimate wait time */ +#define B43_NPHY_IQEST_WT_VAL 0x00FF /* Wait time */ +#define B43_NPHY_IQEST_WT_VAL_SHIFT 0 +#define B43_NPHY_IQEST_SAMCNT B43_PHY_N(0x12B) /* I/Q estimate sample count */ +#define B43_NPHY_IQEST_IQACC_LO0 B43_PHY_N(0x12C) /* I/Q estimate I/Q acc lo 0 */ +#define B43_NPHY_IQEST_IQACC_HI0 B43_PHY_N(0x12D) /* I/Q estimate I/Q acc hi 0 */ +#define B43_NPHY_IQEST_IPACC_LO0 B43_PHY_N(0x12E) /* I/Q estimate I power acc lo 0 */ +#define B43_NPHY_IQEST_IPACC_HI0 B43_PHY_N(0x12F) /* I/Q estimate I power acc hi 0 */ +#define B43_NPHY_IQEST_QPACC_LO0 B43_PHY_N(0x130) /* I/Q estimate Q power acc lo 0 */ +#define B43_NPHY_IQEST_QPACC_HI0 B43_PHY_N(0x131) /* I/Q estimate Q power acc hi 0 */ +#define B43_NPHY_IQEST_IQACC_LO1 B43_PHY_N(0x134) /* I/Q estimate I/Q acc lo 1 */ +#define B43_NPHY_IQEST_IQACC_HI1 B43_PHY_N(0x135) /* I/Q estimate I/Q acc hi 1 */ +#define B43_NPHY_IQEST_IPACC_LO1 B43_PHY_N(0x136) /* I/Q estimate I power acc lo 1 */ +#define B43_NPHY_IQEST_IPACC_HI1 B43_PHY_N(0x137) /* I/Q estimate I power acc hi 1 */ +#define B43_NPHY_IQEST_QPACC_LO1 B43_PHY_N(0x138) /* I/Q estimate Q power acc lo 1 */ +#define B43_NPHY_IQEST_QPACC_HI1 B43_PHY_N(0x139) /* I/Q estimate Q power acc hi 1 */ +#define B43_NPHY_MIMO_CRSTXEXT B43_PHY_N(0x13A) /* MIMO PHY CRS TX extension */ +#define B43_NPHY_PWRDET1 B43_PHY_N(0x13B) /* Power det 1 */ +#define B43_NPHY_PWRDET2 B43_PHY_N(0x13C) /* Power det 2 */ +#define B43_NPHY_MAXRSSI_DTIME B43_PHY_N(0x13F) /* RSSI max RSSI delay time */ +#define B43_NPHY_PIL_DW0 B43_PHY_N(0x141) /* Pilot data weight 0 */ +#define B43_NPHY_PIL_DW1 B43_PHY_N(0x142) /* Pilot data weight 1 */ +#define B43_NPHY_PIL_DW2 B43_PHY_N(0x143) /* Pilot data weight 2 */ +#define B43_NPHY_PIL_DW_BPSK 0x000F /* BPSK */ +#define B43_NPHY_PIL_DW_BPSK_SHIFT 0 +#define B43_NPHY_PIL_DW_QPSK 0x00F0 /* QPSK */ +#define B43_NPHY_PIL_DW_QPSK_SHIFT 4 +#define B43_NPHY_PIL_DW_16QAM 0x0F00 /* 16-QAM */ +#define B43_NPHY_PIL_DW_16QAM_SHIFT 8 +#define B43_NPHY_PIL_DW_64QAM 0xF000 /* 64-QAM */ +#define B43_NPHY_PIL_DW_64QAM_SHIFT 12 +#define B43_NPHY_FMDEM_CFG B43_PHY_N(0x144) /* FM demodulation config */ +#define B43_NPHY_PHASETR_A0 B43_PHY_N(0x145) /* Phase track alpha 0 */ +#define B43_NPHY_PHASETR_A1 B43_PHY_N(0x146) /* Phase track alpha 1 */ +#define B43_NPHY_PHASETR_A2 B43_PHY_N(0x147) /* Phase track alpha 2 */ +#define B43_NPHY_PHASETR_B0 B43_PHY_N(0x148) /* Phase track beta 0 */ +#define B43_NPHY_PHASETR_B1 B43_PHY_N(0x149) /* Phase track beta 1 */ +#define B43_NPHY_PHASETR_B2 B43_PHY_N(0x14A) /* Phase track beta 2 */ +#define B43_NPHY_PHASETR_CHG0 B43_PHY_N(0x14B) /* Phase track change 0 */ +#define B43_NPHY_PHASETR_CHG1 B43_PHY_N(0x14C) /* Phase track change 1 */ +#define B43_NPHY_PHASETW_OFF B43_PHY_N(0x14D) /* Phase track offset */ +#define B43_NPHY_RFCTL_DBG B43_PHY_N(0x14E) /* RF control debug */ +#define B43_NPHY_CCK_SHIFTB_REF B43_PHY_N(0x150) /* CCK shiftbits reference var */ +#define B43_NPHY_OVER_DGAIN0 B43_PHY_N(0x152) /* Override digital gain 0 */ +#define B43_NPHY_OVER_DGAIN1 B43_PHY_N(0x153) /* Override digital gain 1 */ +#define B43_NPHY_OVER_DGAIN_FDGV 0x0007 /* Force digital gain value */ +#define B43_NPHY_OVER_DGAIN_FDGV_SHIFT 0 +#define B43_NPHY_OVER_DGAIN_FDGEN 0x0008 /* Force digital gain enable */ +#define B43_NPHY_OVER_DGAIN_CCKDGECV 0xFF00 /* CCK digital gain enable count value */ +#define B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT 8 +#define B43_NPHY_BIST_STAT4 B43_PHY_N(0x156) /* BIST status 4 */ +#define B43_NPHY_RADAR_MAL B43_PHY_N(0x157) /* Radar MA length */ +#define B43_NPHY_RADAR_SRCCTL B43_PHY_N(0x158) /* Radar search control */ +#define B43_NPHY_VLD_DTSIG B43_PHY_N(0x159) /* VLD data tones sig */ +#define B43_NPHY_VLD_DTDAT B43_PHY_N(0x15A) /* VLD data tones data */ +#define B43_NPHY_C1_BPHY_RXIQCA0 B43_PHY_N(0x15B) /* Core 1 B PHY RX I/Q comp A0 */ +#define B43_NPHY_C1_BPHY_RXIQCB0 B43_PHY_N(0x15C) /* Core 1 B PHY RX I/Q comp B0 */ +#define B43_NPHY_C2_BPHY_RXIQCA1 B43_PHY_N(0x15D) /* Core 2 B PHY RX I/Q comp A1 */ +#define B43_NPHY_C2_BPHY_RXIQCB1 B43_PHY_N(0x15E) /* Core 2 B PHY RX I/Q comp B1 */ +#define B43_NPHY_FREQGAIN0 B43_PHY_N(0x160) /* Frequency gain 0 */ +#define B43_NPHY_FREQGAIN1 B43_PHY_N(0x161) /* Frequency gain 1 */ +#define B43_NPHY_FREQGAIN2 B43_PHY_N(0x162) /* Frequency gain 2 */ +#define B43_NPHY_FREQGAIN3 B43_PHY_N(0x163) /* Frequency gain 3 */ +#define B43_NPHY_FREQGAIN4 B43_PHY_N(0x164) /* Frequency gain 4 */ +#define B43_NPHY_FREQGAIN5 B43_PHY_N(0x165) /* Frequency gain 5 */ +#define B43_NPHY_FREQGAIN6 B43_PHY_N(0x166) /* Frequency gain 6 */ +#define B43_NPHY_FREQGAIN7 B43_PHY_N(0x167) /* Frequency gain 7 */ +#define B43_NPHY_FREQGAIN_BYPASS B43_PHY_N(0x168) /* Frequency gain bypass */ +#define B43_NPHY_TRLOSS B43_PHY_N(0x169) /* TR loss value */ +#define B43_NPHY_C1_ADCCLIP B43_PHY_N(0x16A) /* Core 1 ADC clip */ +#define B43_NPHY_C2_ADCCLIP B43_PHY_N(0x16B) /* Core 2 ADC clip */ +#define B43_NPHY_LTRN_OFFGAIN B43_PHY_N(0x16F) /* LTRN offset gain */ +#define B43_NPHY_LTRN_OFF B43_PHY_N(0x170) /* LTRN offset */ +#define B43_NPHY_NRDATAT_WWISE20SIG B43_PHY_N(0x171) /* # data tones WWiSE 20 sig */ +#define B43_NPHY_NRDATAT_WWISE40SIG B43_PHY_N(0x172) /* # data tones WWiSE 40 sig */ +#define B43_NPHY_NRDATAT_TGNSYNC20SIG B43_PHY_N(0x173) /* # data tones TGNsync 20 sig */ +#define B43_NPHY_NRDATAT_TGNSYNC40SIG B43_PHY_N(0x174) /* # data tones TGNsync 40 sig */ +#define B43_NPHY_WWISE_CRCM0 B43_PHY_N(0x175) /* WWiSE CRC mask 0 */ +#define B43_NPHY_WWISE_CRCM1 B43_PHY_N(0x176) /* WWiSE CRC mask 1 */ +#define B43_NPHY_WWISE_CRCM2 B43_PHY_N(0x177) /* WWiSE CRC mask 2 */ +#define B43_NPHY_WWISE_CRCM3 B43_PHY_N(0x178) /* WWiSE CRC mask 3 */ +#define B43_NPHY_WWISE_CRCM4 B43_PHY_N(0x179) /* WWiSE CRC mask 4 */ +#define B43_NPHY_CHANEST_CDDSH B43_PHY_N(0x17A) /* Channel estimate CDD shift */ +#define B43_NPHY_HTAGC_WCNT B43_PHY_N(0x17B) /* HT ADC wait counters */ +#define B43_NPHY_SQPARM B43_PHY_N(0x17C) /* SQ params */ +#define B43_NPHY_MCSDUP6M B43_PHY_N(0x17D) /* MCS dup 6M */ +#define B43_NPHY_NDATAT_DUP40 B43_PHY_N(0x17E) /* # data tones dup 40 */ +#define B43_NPHY_DUP40_TGNSYNC_CYCD B43_PHY_N(0x17F) /* Dup40 TGNsync cycle data */ +#define B43_NPHY_DUP40_GFBL B43_PHY_N(0x180) /* Dup40 GF format BL address */ +#define B43_NPHY_DUP40_BL B43_PHY_N(0x181) /* Dup40 format BL address */ +#define B43_NPHY_LEGDUP_FTA B43_PHY_N(0x182) /* Legacy dup frm table address */ +#define B43_NPHY_PACPROC_DBG B43_PHY_N(0x183) /* Packet processing debug */ +#define B43_NPHY_PIL_CYC1 B43_PHY_N(0x184) /* Pilot cycle counter 1 */ +#define B43_NPHY_PIL_CYC2 B43_PHY_N(0x185) /* Pilot cycle counter 2 */ +#define B43_NPHY_TXF_20CO_S0A1 B43_PHY_N(0x186) /* TX filter 20 coeff stage 0 A1 */ +#define B43_NPHY_TXF_20CO_S0A2 B43_PHY_N(0x187) /* TX filter 20 coeff stage 0 A2 */ +#define B43_NPHY_TXF_20CO_S1A1 B43_PHY_N(0x188) /* TX filter 20 coeff stage 1 A1 */ +#define B43_NPHY_TXF_20CO_S1A2 B43_PHY_N(0x189) /* TX filter 20 coeff stage 1 A2 */ +#define B43_NPHY_TXF_20CO_S2A1 B43_PHY_N(0x18A) /* TX filter 20 coeff stage 2 A1 */ +#define B43_NPHY_TXF_20CO_S2A2 B43_PHY_N(0x18B) /* TX filter 20 coeff stage 2 A2 */ +#define B43_NPHY_TXF_20CO_S0B1 B43_PHY_N(0x18C) /* TX filter 20 coeff stage 0 B1 */ +#define B43_NPHY_TXF_20CO_S0B2 B43_PHY_N(0x18D) /* TX filter 20 coeff stage 0 B2 */ +#define B43_NPHY_TXF_20CO_S0B3 B43_PHY_N(0x18E) /* TX filter 20 coeff stage 0 B3 */ +#define B43_NPHY_TXF_20CO_S1B1 B43_PHY_N(0x18F) /* TX filter 20 coeff stage 1 B1 */ +#define B43_NPHY_TXF_20CO_S1B2 B43_PHY_N(0x190) /* TX filter 20 coeff stage 1 B2 */ +#define B43_NPHY_TXF_20CO_S1B3 B43_PHY_N(0x191) /* TX filter 20 coeff stage 1 B3 */ +#define B43_NPHY_TXF_20CO_S2B1 B43_PHY_N(0x192) /* TX filter 20 coeff stage 2 B1 */ +#define B43_NPHY_TXF_20CO_S2B2 B43_PHY_N(0x193) /* TX filter 20 coeff stage 2 B2 */ +#define B43_NPHY_TXF_20CO_S2B3 B43_PHY_N(0x194) /* TX filter 20 coeff stage 2 B3 */ +#define B43_NPHY_TXF_40CO_S0A1 B43_PHY_N(0x195) /* TX filter 40 coeff stage 0 A1 */ +#define B43_NPHY_TXF_40CO_S0A2 B43_PHY_N(0x196) /* TX filter 40 coeff stage 0 A2 */ +#define B43_NPHY_TXF_40CO_S1A1 B43_PHY_N(0x197) /* TX filter 40 coeff stage 1 A1 */ +#define B43_NPHY_TXF_40CO_S1A2 B43_PHY_N(0x198) /* TX filter 40 coeff stage 1 A2 */ +#define B43_NPHY_TXF_40CO_S2A1 B43_PHY_N(0x199) /* TX filter 40 coeff stage 2 A1 */ +#define B43_NPHY_TXF_40CO_S2A2 B43_PHY_N(0x19A) /* TX filter 40 coeff stage 2 A2 */ +#define B43_NPHY_TXF_40CO_S0B1 B43_PHY_N(0x19B) /* TX filter 40 coeff stage 0 B1 */ +#define B43_NPHY_TXF_40CO_S0B2 B43_PHY_N(0x19C) /* TX filter 40 coeff stage 0 B2 */ +#define B43_NPHY_TXF_40CO_S0B3 B43_PHY_N(0x19D) /* TX filter 40 coeff stage 0 B3 */ +#define B43_NPHY_TXF_40CO_S1B1 B43_PHY_N(0x19E) /* TX filter 40 coeff stage 1 B1 */ +#define B43_NPHY_TXF_40CO_S1B2 B43_PHY_N(0x19F) /* TX filter 40 coeff stage 1 B2 */ +#define B43_NPHY_TXF_40CO_S1B3 B43_PHY_N(0x1A0) /* TX filter 40 coeff stage 1 B3 */ +#define B43_NPHY_TXF_40CO_S2B1 B43_PHY_N(0x1A1) /* TX filter 40 coeff stage 2 B1 */ +#define B43_NPHY_TXF_40CO_S2B2 B43_PHY_N(0x1A2) /* TX filter 40 coeff stage 2 B2 */ +#define B43_NPHY_TXF_40CO_S2B3 B43_PHY_N(0x1A3) /* TX filter 40 coeff stage 2 B3 */ +#define B43_NPHY_RSSIMC_0I_RSSI_X B43_PHY_N(0x1A4) /* RSSI multiplication coefficient 0 I RSSI X */ +#define B43_NPHY_RSSIMC_0I_RSSI_Y B43_PHY_N(0x1A5) /* RSSI multiplication coefficient 0 I RSSI Y */ +#define B43_NPHY_RSSIMC_0I_RSSI_Z B43_PHY_N(0x1A6) /* RSSI multiplication coefficient 0 I RSSI Z */ +#define B43_NPHY_RSSIMC_0I_TBD B43_PHY_N(0x1A7) /* RSSI multiplication coefficient 0 I TBD */ +#define B43_NPHY_RSSIMC_0I_PWRDET B43_PHY_N(0x1A8) /* RSSI multiplication coefficient 0 I power det */ +#define B43_NPHY_RSSIMC_0I_TSSI B43_PHY_N(0x1A9) /* RSSI multiplication coefficient 0 I TSSI */ +#define B43_NPHY_RSSIMC_0Q_RSSI_X B43_PHY_N(0x1AA) /* RSSI multiplication coefficient 0 Q RSSI X */ +#define B43_NPHY_RSSIMC_0Q_RSSI_Y B43_PHY_N(0x1AB) /* RSSI multiplication coefficient 0 Q RSSI Y */ +#define B43_NPHY_RSSIMC_0Q_RSSI_Z B43_PHY_N(0x1AC) /* RSSI multiplication coefficient 0 Q RSSI Z */ +#define B43_NPHY_RSSIMC_0Q_TBD B43_PHY_N(0x1AD) /* RSSI multiplication coefficient 0 Q TBD */ +#define B43_NPHY_RSSIMC_0Q_PWRDET B43_PHY_N(0x1AE) /* RSSI multiplication coefficient 0 Q power det */ +#define B43_NPHY_RSSIMC_0Q_TSSI B43_PHY_N(0x1AF) /* RSSI multiplication coefficient 0 Q TSSI */ +#define B43_NPHY_RSSIMC_1I_RSSI_X B43_PHY_N(0x1B0) /* RSSI multiplication coefficient 1 I RSSI X */ +#define B43_NPHY_RSSIMC_1I_RSSI_Y B43_PHY_N(0x1B1) /* RSSI multiplication coefficient 1 I RSSI Y */ +#define B43_NPHY_RSSIMC_1I_RSSI_Z B43_PHY_N(0x1B2) /* RSSI multiplication coefficient 1 I RSSI Z */ +#define B43_NPHY_RSSIMC_1I_TBD B43_PHY_N(0x1B3) /* RSSI multiplication coefficient 1 I TBD */ +#define B43_NPHY_RSSIMC_1I_PWRDET B43_PHY_N(0x1B4) /* RSSI multiplication coefficient 1 I power det */ +#define B43_NPHY_RSSIMC_1I_TSSI B43_PHY_N(0x1B5) /* RSSI multiplication coefficient 1 I TSSI */ +#define B43_NPHY_RSSIMC_1Q_RSSI_X B43_PHY_N(0x1B6) /* RSSI multiplication coefficient 1 Q RSSI X */ +#define B43_NPHY_RSSIMC_1Q_RSSI_Y B43_PHY_N(0x1B7) /* RSSI multiplication coefficient 1 Q RSSI Y */ +#define B43_NPHY_RSSIMC_1Q_RSSI_Z B43_PHY_N(0x1B8) /* RSSI multiplication coefficient 1 Q RSSI Z */ +#define B43_NPHY_RSSIMC_1Q_TBD B43_PHY_N(0x1B9) /* RSSI multiplication coefficient 1 Q TBD */ +#define B43_NPHY_RSSIMC_1Q_PWRDET B43_PHY_N(0x1BA) /* RSSI multiplication coefficient 1 Q power det */ +#define B43_NPHY_RSSIMC_1Q_TSSI B43_PHY_N(0x1BB) /* RSSI multiplication coefficient 1 Q TSSI */ +#define B43_NPHY_SAMC_WCNT B43_PHY_N(0x1BC) /* Sample collect wait counter */ +#define B43_NPHY_PTHROUGH_CNT B43_PHY_N(0x1BD) /* Pass-through counter */ +#define B43_NPHY_LTRN_OFF_G20L B43_PHY_N(0x1C4) /* LTRN offset gain 20L */ +#define B43_NPHY_LTRN_OFF_20L B43_PHY_N(0x1C5) /* LTRN offset 20L */ +#define B43_NPHY_LTRN_OFF_G20U B43_PHY_N(0x1C6) /* LTRN offset gain 20U */ +#define B43_NPHY_LTRN_OFF_20U B43_PHY_N(0x1C7) /* LTRN offset 20U */ +#define B43_NPHY_DSSSCCK_GAINSL B43_PHY_N(0x1C8) /* DSSS/CCK gain settle length */ +#define B43_NPHY_GPIO_LOOUT B43_PHY_N(0x1C9) /* GPIO low out */ +#define B43_NPHY_GPIO_HIOUT B43_PHY_N(0x1CA) /* GPIO high out */ +#define B43_NPHY_CRS_CHECK B43_PHY_N(0x1CB) /* CRS check */ +#define B43_NPHY_ML_LOGSS_RAT B43_PHY_N(0x1CC) /* ML/logss ratio */ +#define B43_NPHY_DUPSCALE B43_PHY_N(0x1CD) /* Dup scale */ +#define B43_NPHY_BW1A B43_PHY_N(0x1CE) /* BW 1A */ +#define B43_NPHY_BW2 B43_PHY_N(0x1CF) /* BW 2 */ +#define B43_NPHY_BW3 B43_PHY_N(0x1D0) /* BW 3 */ +#define B43_NPHY_BW4 B43_PHY_N(0x1D1) /* BW 4 */ +#define B43_NPHY_BW5 B43_PHY_N(0x1D2) /* BW 5 */ +#define B43_NPHY_BW6 B43_PHY_N(0x1D3) /* BW 6 */ +#define B43_NPHY_COALEN0 B43_PHY_N(0x1D4) /* Coarse length 0 */ +#define B43_NPHY_COALEN1 B43_PHY_N(0x1D5) /* Coarse length 1 */ +#define B43_NPHY_CRSTHRES_1U B43_PHY_N(0x1D6) /* CRS threshold 1 U */ +#define B43_NPHY_CRSTHRES_2U B43_PHY_N(0x1D7) /* CRS threshold 2 U */ +#define B43_NPHY_CRSTHRES_3U B43_PHY_N(0x1D8) /* CRS threshold 3 U */ +#define B43_NPHY_CRSCTL_U B43_PHY_N(0x1D9) /* CRS control U */ +#define B43_NPHY_CRSTHRES_1L B43_PHY_N(0x1DA) /* CRS threshold 1 L */ +#define B43_NPHY_CRSTHRES_2L B43_PHY_N(0x1DB) /* CRS threshold 2 L */ +#define B43_NPHY_CRSTHRES_3L B43_PHY_N(0x1DC) /* CRS threshold 3 L */ +#define B43_NPHY_CRSCTL_L B43_PHY_N(0x1DD) /* CRS control L */ +#define B43_NPHY_STRA_1U B43_PHY_N(0x1DE) /* STR address 1 U */ +#define B43_NPHY_STRA_2U B43_PHY_N(0x1DF) /* STR address 2 U */ +#define B43_NPHY_STRA_1L B43_PHY_N(0x1E0) /* STR address 1 L */ +#define B43_NPHY_STRA_2L B43_PHY_N(0x1E1) /* STR address 2 L */ +#define B43_NPHY_CRSCHECK1 B43_PHY_N(0x1E2) /* CRS check 1 */ +#define B43_NPHY_CRSCHECK2 B43_PHY_N(0x1E3) /* CRS check 2 */ +#define B43_NPHY_CRSCHECK3 B43_PHY_N(0x1E4) /* CRS check 3 */ +#define B43_NPHY_JMPSTP0 B43_PHY_N(0x1E5) /* Jump step 0 */ +#define B43_NPHY_JMPSTP1 B43_PHY_N(0x1E6) /* Jump step 1 */ +#define B43_NPHY_TXPCTL_CMD B43_PHY_N(0x1E7) /* TX power control command */ +#define B43_NPHY_TXPCTL_CMD_INIT 0x007F /* Init */ +#define B43_NPHY_TXPCTL_CMD_INIT_SHIFT 0 +#define B43_NPHY_TXPCTL_CMD_COEFF 0x2000 /* Power control coefficients */ +#define B43_NPHY_TXPCTL_CMD_HWPCTLEN 0x4000 /* Hardware TX power control enable */ +#define B43_NPHY_TXPCTL_CMD_PCTLEN 0x8000 /* TX power control enable */ +#define B43_NPHY_TXPCTL_N B43_PHY_N(0x1E8) /* TX power control N num */ +#define B43_NPHY_TXPCTL_N_TSSID 0x00FF /* N TSSI delay */ +#define B43_NPHY_TXPCTL_N_TSSID_SHIFT 0 +#define B43_NPHY_TXPCTL_N_NPTIL2 0x0700 /* N PT integer log2 */ +#define B43_NPHY_TXPCTL_N_NPTIL2_SHIFT 8 +#define B43_NPHY_TXPCTL_ITSSI B43_PHY_N(0x1E9) /* TX power control idle TSSI */ +#define B43_NPHY_TXPCTL_ITSSI_0 0x003F /* Idle TSSI 0 */ +#define B43_NPHY_TXPCTL_ITSSI_0_SHIFT 0 +#define B43_NPHY_TXPCTL_ITSSI_1 0x3F00 /* Idle TSSI 1 */ +#define B43_NPHY_TXPCTL_ITSSI_1_SHIFT 8 +#define B43_NPHY_TXPCTL_ITSSI_BINF 0x8000 /* Raw TSSI offset bin format */ +#define B43_NPHY_TXPCTL_TPWR B43_PHY_N(0x1EA) /* TX power control target power */ +#define B43_NPHY_TXPCTL_TPWR_0 0x00FF /* Power 0 */ +#define B43_NPHY_TXPCTL_TPWR_0_SHIFT 0 +#define B43_NPHY_TXPCTL_TPWR_1 0xFF00 /* Power 1 */ +#define B43_NPHY_TXPCTL_TPWR_1_SHIFT 8 +#define B43_NPHY_TXPCTL_BIDX B43_PHY_N(0x1EB) /* TX power control base index */ +#define B43_NPHY_TXPCTL_BIDX_0 0x007F /* uC base index 0 */ +#define B43_NPHY_TXPCTL_BIDX_0_SHIFT 0 +#define B43_NPHY_TXPCTL_BIDX_1 0x7F00 /* uC base index 1 */ +#define B43_NPHY_TXPCTL_BIDX_1_SHIFT 8 +#define B43_NPHY_TXPCTL_BIDX_LOAD 0x8000 /* Load base index */ +#define B43_NPHY_TXPCTL_PIDX B43_PHY_N(0x1EC) /* TX power control power index */ +#define B43_NPHY_TXPCTL_PIDX_0 0x007F /* uC power index 0 */ +#define B43_NPHY_TXPCTL_PIDX_0_SHIFT 0 +#define B43_NPHY_TXPCTL_PIDX_1 0x7F00 /* uC power index 1 */ +#define B43_NPHY_TXPCTL_PIDX_1_SHIFT 8 +#define B43_NPHY_C1_TXPCTL_STAT B43_PHY_N(0x1ED) /* Core 1 TX power control status */ +#define B43_NPHY_C2_TXPCTL_STAT B43_PHY_N(0x1EE) /* Core 2 TX power control status */ +#define B43_NPHY_TXPCTL_STAT_EST 0x00FF /* Estimated power */ +#define B43_NPHY_TXPCTL_STAT_EST_SHIFT 0 +#define B43_NPHY_TXPCTL_STAT_BIDX 0x7F00 /* Base index */ +#define B43_NPHY_TXPCTL_STAT_BIDX_SHIFT 8 +#define B43_NPHY_TXPCTL_STAT_ESTVALID 0x8000 /* Estimated power valid */ +#define B43_NPHY_SMALLSGS_LEN B43_PHY_N(0x1EF) /* Small sig gain settle length */ +#define B43_NPHY_PHYSTAT_GAIN0 B43_PHY_N(0x1F0) /* PHY stats gain info 0 */ +#define B43_NPHY_PHYSTAT_GAIN1 B43_PHY_N(0x1F1) /* PHY stats gain info 1 */ +#define B43_NPHY_PHYSTAT_FREQEST B43_PHY_N(0x1F2) /* PHY stats frequency estimate */ +#define B43_NPHY_PHYSTAT_ADVRET B43_PHY_N(0x1F3) /* PHY stats ADV retard */ +#define B43_NPHY_PHYLB_MODE B43_PHY_N(0x1F4) /* PHY loopback mode */ +#define B43_NPHY_TONE_MIDX20_1 B43_PHY_N(0x1F5) /* Tone map index 20/1 */ +#define B43_NPHY_TONE_MIDX20_2 B43_PHY_N(0x1F6) /* Tone map index 20/2 */ +#define B43_NPHY_TONE_MIDX20_3 B43_PHY_N(0x1F7) /* Tone map index 20/3 */ +#define B43_NPHY_TONE_MIDX40_1 B43_PHY_N(0x1F8) /* Tone map index 40/1 */ +#define B43_NPHY_TONE_MIDX40_2 B43_PHY_N(0x1F9) /* Tone map index 40/2 */ +#define B43_NPHY_TONE_MIDX40_3 B43_PHY_N(0x1FA) /* Tone map index 40/3 */ +#define B43_NPHY_TONE_MIDX40_4 B43_PHY_N(0x1FB) /* Tone map index 40/4 */ +#define B43_NPHY_PILTONE_MIDX1 B43_PHY_N(0x1FC) /* Pilot tone map index 1 */ +#define B43_NPHY_PILTONE_MIDX2 B43_PHY_N(0x1FD) /* Pilot tone map index 2 */ +#define B43_NPHY_PILTONE_MIDX3 B43_PHY_N(0x1FE) /* Pilot tone map index 3 */ +#define B43_NPHY_TXRIFS_FRDEL B43_PHY_N(0x1FF) /* TX RIFS frame delay */ +#define B43_NPHY_AFESEQ_RX2TX_PUD_40M B43_PHY_N(0x200) /* AFE seq rx2tx power up/down delay 40M */ +#define B43_NPHY_AFESEQ_TX2RX_PUD_40M B43_PHY_N(0x201) /* AFE seq tx2rx power up/down delay 40M */ +#define B43_NPHY_AFESEQ_RX2TX_PUD_20M B43_PHY_N(0x202) /* AFE seq rx2tx power up/down delay 20M */ +#define B43_NPHY_AFESEQ_TX2RX_PUD_20M B43_PHY_N(0x203) /* AFE seq tx2rx power up/down delay 20M */ +#define B43_NPHY_RX_SIGCTL B43_PHY_N(0x204) /* RX signal control */ +#define B43_NPHY_RXPIL_CYCNT0 B43_PHY_N(0x205) /* RX pilot cycle counter 0 */ +#define B43_NPHY_RXPIL_CYCNT1 B43_PHY_N(0x206) /* RX pilot cycle counter 1 */ +#define B43_NPHY_RXPIL_CYCNT2 B43_PHY_N(0x207) /* RX pilot cycle counter 2 */ +#define B43_NPHY_AFESEQ_RX2TX_PUD_10M B43_PHY_N(0x208) /* AFE seq rx2tx power up/down delay 10M */ +#define B43_NPHY_AFESEQ_TX2RX_PUD_10M B43_PHY_N(0x209) /* AFE seq tx2rx power up/down delay 10M */ +#define B43_NPHY_DSSSCCK_CRSEXTL B43_PHY_N(0x20A) /* DSSS/CCK CRS extension length */ +#define B43_NPHY_ML_LOGSS_RATSLOPE B43_PHY_N(0x20B) /* ML/logss ratio slope */ +#define B43_NPHY_RIFS_SRCTL B43_PHY_N(0x20C) /* RIFS search timeout length */ +#define B43_NPHY_TXREALFD B43_PHY_N(0x20D) /* TX real frame delay */ +#define B43_NPHY_HPANT_SWTHRES B43_PHY_N(0x20E) /* High power antenna switch threshold */ +#define B43_NPHY_EDCRS_ASSTHRES0 B43_PHY_N(0x210) /* ED CRS assert threshold 0 */ +#define B43_NPHY_EDCRS_ASSTHRES1 B43_PHY_N(0x211) /* ED CRS assert threshold 1 */ +#define B43_NPHY_EDCRS_DEASSTHRES0 B43_PHY_N(0x212) /* ED CRS deassert threshold 0 */ +#define B43_NPHY_EDCRS_DEASSTHRES1 B43_PHY_N(0x213) /* ED CRS deassert threshold 1 */ +#define B43_NPHY_STR_WTIME20U B43_PHY_N(0x214) /* STR wait time 20U */ +#define B43_NPHY_STR_WTIME20L B43_PHY_N(0x215) /* STR wait time 20L */ +#define B43_NPHY_TONE_MIDX657M B43_PHY_N(0x216) /* Tone map index 657M */ +#define B43_NPHY_HTSIGTONES B43_PHY_N(0x217) /* HT signal tones */ +#define B43_NPHY_RSSI1 B43_PHY_N(0x219) /* RSSI value 1 */ +#define B43_NPHY_RSSI2 B43_PHY_N(0x21A) /* RSSI value 2 */ +#define B43_NPHY_CHAN_ESTHANG B43_PHY_N(0x21D) /* Channel estimate hang */ +#define B43_NPHY_FINERX2_CGC B43_PHY_N(0x221) /* Fine RX 2 clock gate control */ +#define B43_NPHY_FINERX2_CGC_DECGC 0x0008 /* Decode gated clocks */ +#define B43_NPHY_TXPCTL_INIT B43_PHY_N(0x222) /* TX power controll init */ +#define B43_NPHY_TXPCTL_INIT_PIDXI1 0x00FF /* Power index init 1 */ +#define B43_NPHY_TXPCTL_INIT_PIDXI1_SHIFT 0 +#define B43_NPHY_PAPD_EN0 B43_PHY_N(0x297) /* PAPD Enable0 TBD */ +#define B43_NPHY_EPS_TABLE_ADJ0 B43_PHY_N(0x298) /* EPS Table Adj0 TBD */ +#define B43_NPHY_PAPD_EN1 B43_PHY_N(0x29B) /* PAPD Enable1 TBD */ +#define B43_NPHY_EPS_TABLE_ADJ1 B43_PHY_N(0x29C) /* EPS Table Adj1 TBD */ + + + +/* Broadcom 2055 radio registers */ + +#define B2055_GEN_SPARE 0x00 /* GEN spare */ +#define B2055_SP_PINPD 0x02 /* SP PIN PD */ +#define B2055_C1_SP_RSSI 0x03 /* SP RSSI Core 1 */ +#define B2055_C1_SP_PDMISC 0x04 /* SP PD MISC Core 1 */ +#define B2055_C2_SP_RSSI 0x05 /* SP RSSI Core 2 */ +#define B2055_C2_SP_PDMISC 0x06 /* SP PD MISC Core 2 */ +#define B2055_C1_SP_RXGC1 0x07 /* SP RX GC1 Core 1 */ +#define B2055_C1_SP_RXGC2 0x08 /* SP RX GC2 Core 1 */ +#define B2055_C2_SP_RXGC1 0x09 /* SP RX GC1 Core 2 */ +#define B2055_C2_SP_RXGC2 0x0A /* SP RX GC2 Core 2 */ +#define B2055_C1_SP_LPFBWSEL 0x0B /* SP LPF BW select Core 1 */ +#define B2055_C2_SP_LPFBWSEL 0x0C /* SP LPF BW select Core 2 */ +#define B2055_C1_SP_TXGC1 0x0D /* SP TX GC1 Core 1 */ +#define B2055_C1_SP_TXGC2 0x0E /* SP TX GC2 Core 1 */ +#define B2055_C2_SP_TXGC1 0x0F /* SP TX GC1 Core 2 */ +#define B2055_C2_SP_TXGC2 0x10 /* SP TX GC2 Core 2 */ +#define B2055_MASTER1 0x11 /* Master control 1 */ +#define B2055_MASTER2 0x12 /* Master control 2 */ +#define B2055_PD_LGEN 0x13 /* PD LGEN */ +#define B2055_PD_PLLTS 0x14 /* PD PLL TS */ +#define B2055_C1_PD_LGBUF 0x15 /* PD Core 1 LGBUF */ +#define B2055_C1_PD_TX 0x16 /* PD Core 1 TX */ +#define B2055_C1_PD_RXTX 0x17 /* PD Core 1 RXTX */ +#define B2055_C1_PD_RSSIMISC 0x18 /* PD Core 1 RSSI MISC */ +#define B2055_C2_PD_LGBUF 0x19 /* PD Core 2 LGBUF */ +#define B2055_C2_PD_TX 0x1A /* PD Core 2 TX */ +#define B2055_C2_PD_RXTX 0x1B /* PD Core 2 RXTX */ +#define B2055_C2_PD_RSSIMISC 0x1C /* PD Core 2 RSSI MISC */ +#define B2055_PWRDET_LGEN 0x1D /* PWRDET LGEN */ +#define B2055_C1_PWRDET_LGBUF 0x1E /* PWRDET LGBUF Core 1 */ +#define B2055_C1_PWRDET_RXTX 0x1F /* PWRDET RXTX Core 1 */ +#define B2055_C2_PWRDET_LGBUF 0x20 /* PWRDET LGBUF Core 2 */ +#define B2055_C2_PWRDET_RXTX 0x21 /* PWRDET RXTX Core 2 */ +#define B2055_RRCCAL_CS 0x22 /* RRCCAL Control spare */ +#define B2055_RRCCAL_NOPTSEL 0x23 /* RRCCAL N OPT SEL */ +#define B2055_CAL_MISC 0x24 /* CAL MISC */ +#define B2055_CAL_COUT 0x25 /* CAL Counter out */ +#define B2055_CAL_COUT2 0x26 /* CAL Counter out 2 */ +#define B2055_CAL_CVARCTL 0x27 /* CAL CVAR Control */ +#define B2055_CAL_RVARCTL 0x28 /* CAL RVAR Control */ +#define B2055_CAL_LPOCTL 0x29 /* CAL LPO Control */ +#define B2055_CAL_TS 0x2A /* CAL TS */ +#define B2055_CAL_RCCALRTS 0x2B /* CAL RCCAL READ TS */ +#define B2055_CAL_RCALRTS 0x2C /* CAL RCAL READ TS */ +#define B2055_PADDRV 0x2D /* PAD driver */ +#define B2055_XOCTL1 0x2E /* XO Control 1 */ +#define B2055_XOCTL2 0x2F /* XO Control 2 */ +#define B2055_XOREGUL 0x30 /* XO Regulator */ +#define B2055_XOMISC 0x31 /* XO misc */ +#define B2055_PLL_LFC1 0x32 /* PLL LF C1 */ +#define B2055_PLL_CALVTH 0x33 /* PLL CAL VTH */ +#define B2055_PLL_LFC2 0x34 /* PLL LF C2 */ +#define B2055_PLL_REF 0x35 /* PLL reference */ +#define B2055_PLL_LFR1 0x36 /* PLL LF R1 */ +#define B2055_PLL_PFDCP 0x37 /* PLL PFD CP */ +#define B2055_PLL_IDAC_CPOPAMP 0x38 /* PLL IDAC CPOPAMP */ +#define B2055_PLL_CPREG 0x39 /* PLL CP Regulator */ +#define B2055_PLL_RCAL 0x3A /* PLL RCAL */ +#define B2055_RF_PLLMOD0 0x3B /* RF PLL MOD0 */ +#define B2055_RF_PLLMOD1 0x3C /* RF PLL MOD1 */ +#define B2055_RF_MMDIDAC1 0x3D /* RF MMD IDAC 1 */ +#define B2055_RF_MMDIDAC0 0x3E /* RF MMD IDAC 0 */ +#define B2055_RF_MMDSP 0x3F /* RF MMD spare */ +#define B2055_VCO_CAL1 0x40 /* VCO cal 1 */ +#define B2055_VCO_CAL2 0x41 /* VCO cal 2 */ +#define B2055_VCO_CAL3 0x42 /* VCO cal 3 */ +#define B2055_VCO_CAL4 0x43 /* VCO cal 4 */ +#define B2055_VCO_CAL5 0x44 /* VCO cal 5 */ +#define B2055_VCO_CAL6 0x45 /* VCO cal 6 */ +#define B2055_VCO_CAL7 0x46 /* VCO cal 7 */ +#define B2055_VCO_CAL8 0x47 /* VCO cal 8 */ +#define B2055_VCO_CAL9 0x48 /* VCO cal 9 */ +#define B2055_VCO_CAL10 0x49 /* VCO cal 10 */ +#define B2055_VCO_CAL11 0x4A /* VCO cal 11 */ +#define B2055_VCO_CAL12 0x4B /* VCO cal 12 */ +#define B2055_VCO_CAL13 0x4C /* VCO cal 13 */ +#define B2055_VCO_CAL14 0x4D /* VCO cal 14 */ +#define B2055_VCO_CAL15 0x4E /* VCO cal 15 */ +#define B2055_VCO_CAL16 0x4F /* VCO cal 16 */ +#define B2055_VCO_KVCO 0x50 /* VCO KVCO */ +#define B2055_VCO_CAPTAIL 0x51 /* VCO CAP TAIL */ +#define B2055_VCO_IDACVCO 0x52 /* VCO IDAC VCO */ +#define B2055_VCO_REG 0x53 /* VCO Regulator */ +#define B2055_PLL_RFVTH 0x54 /* PLL RF VTH */ +#define B2055_LGBUF_CENBUF 0x55 /* LGBUF CEN BUF */ +#define B2055_LGEN_TUNE1 0x56 /* LGEN tune 1 */ +#define B2055_LGEN_TUNE2 0x57 /* LGEN tune 2 */ +#define B2055_LGEN_IDAC1 0x58 /* LGEN IDAC 1 */ +#define B2055_LGEN_IDAC2 0x59 /* LGEN IDAC 2 */ +#define B2055_LGEN_BIASC 0x5A /* LGEN BIAS counter */ +#define B2055_LGEN_BIASIDAC 0x5B /* LGEN BIAS IDAC */ +#define B2055_LGEN_RCAL 0x5C /* LGEN RCAL */ +#define B2055_LGEN_DIV 0x5D /* LGEN div */ +#define B2055_LGEN_SPARE2 0x5E /* LGEN spare 2 */ +#define B2055_C1_LGBUF_ATUNE 0x5F /* Core 1 LGBUF A tune */ +#define B2055_C1_LGBUF_GTUNE 0x60 /* Core 1 LGBUF G tune */ +#define B2055_C1_LGBUF_DIV 0x61 /* Core 1 LGBUF div */ +#define B2055_C1_LGBUF_AIDAC 0x62 /* Core 1 LGBUF A IDAC */ +#define B2055_C1_LGBUF_GIDAC 0x63 /* Core 1 LGBUF G IDAC */ +#define B2055_C1_LGBUF_IDACFO 0x64 /* Core 1 LGBUF IDAC filter override */ +#define B2055_C1_LGBUF_SPARE 0x65 /* Core 1 LGBUF spare */ +#define B2055_C1_RX_RFSPC1 0x66 /* Core 1 RX RF SPC1 */ +#define B2055_C1_RX_RFR1 0x67 /* Core 1 RX RF reg 1 */ +#define B2055_C1_RX_RFR2 0x68 /* Core 1 RX RF reg 2 */ +#define B2055_C1_RX_RFRCAL 0x69 /* Core 1 RX RF RCAL */ +#define B2055_C1_RX_BB_BLCMP 0x6A /* Core 1 RX Baseband BUFI LPF CMP */ +#define B2055_C1_RX_BB_LPF 0x6B /* Core 1 RX Baseband LPF */ +#define B2055_C1_RX_BB_MIDACHP 0x6C /* Core 1 RX Baseband MIDAC High-pass */ +#define B2055_C1_RX_BB_VGA1IDAC 0x6D /* Core 1 RX Baseband VGA1 IDAC */ +#define B2055_C1_RX_BB_VGA2IDAC 0x6E /* Core 1 RX Baseband VGA2 IDAC */ +#define B2055_C1_RX_BB_VGA3IDAC 0x6F /* Core 1 RX Baseband VGA3 IDAC */ +#define B2055_C1_RX_BB_BUFOCTL 0x70 /* Core 1 RX Baseband BUFO Control */ +#define B2055_C1_RX_BB_RCCALCTL 0x71 /* Core 1 RX Baseband RCCAL Control */ +#define B2055_C1_RX_BB_RSSICTL1 0x72 /* Core 1 RX Baseband RSSI Control 1 */ +#define B2055_C1_RX_BB_RSSICTL2 0x73 /* Core 1 RX Baseband RSSI Control 2 */ +#define B2055_C1_RX_BB_RSSICTL3 0x74 /* Core 1 RX Baseband RSSI Control 3 */ +#define B2055_C1_RX_BB_RSSICTL4 0x75 /* Core 1 RX Baseband RSSI Control 4 */ +#define B2055_C1_RX_BB_RSSICTL5 0x76 /* Core 1 RX Baseband RSSI Control 5 */ +#define B2055_C1_RX_BB_REG 0x77 /* Core 1 RX Baseband Regulator */ +#define B2055_C1_RX_BB_SPARE1 0x78 /* Core 1 RX Baseband spare 1 */ +#define B2055_C1_RX_TXBBRCAL 0x79 /* Core 1 RX TX BB RCAL */ +#define B2055_C1_TX_RF_SPGA 0x7A /* Core 1 TX RF SGM PGA */ +#define B2055_C1_TX_RF_SPAD 0x7B /* Core 1 TX RF SGM PAD */ +#define B2055_C1_TX_RF_CNTPGA1 0x7C /* Core 1 TX RF counter PGA 1 */ +#define B2055_C1_TX_RF_CNTPAD1 0x7D /* Core 1 TX RF counter PAD 1 */ +#define B2055_C1_TX_RF_PGAIDAC 0x7E /* Core 1 TX RF PGA IDAC */ +#define B2055_C1_TX_PGAPADTN 0x7F /* Core 1 TX PGA PAD TN */ +#define B2055_C1_TX_PADIDAC1 0x80 /* Core 1 TX PAD IDAC 1 */ +#define B2055_C1_TX_PADIDAC2 0x81 /* Core 1 TX PAD IDAC 2 */ +#define B2055_C1_TX_MXBGTRIM 0x82 /* Core 1 TX MX B/G TRIM */ +#define B2055_C1_TX_RF_RCAL 0x83 /* Core 1 TX RF RCAL */ +#define B2055_C1_TX_RF_PADTSSI1 0x84 /* Core 1 TX RF PAD TSSI1 */ +#define B2055_C1_TX_RF_PADTSSI2 0x85 /* Core 1 TX RF PAD TSSI2 */ +#define B2055_C1_TX_RF_SPARE 0x86 /* Core 1 TX RF spare */ +#define B2055_C1_TX_RF_IQCAL1 0x87 /* Core 1 TX RF I/Q CAL 1 */ +#define B2055_C1_TX_RF_IQCAL2 0x88 /* Core 1 TX RF I/Q CAL 2 */ +#define B2055_C1_TXBB_RCCAL 0x89 /* Core 1 TXBB RC CAL Control */ +#define B2055_C1_TXBB_LPF1 0x8A /* Core 1 TXBB LPF 1 */ +#define B2055_C1_TX_VOSCNCL 0x8B /* Core 1 TX VOS CNCL */ +#define B2055_C1_TX_LPF_MXGMIDAC 0x8C /* Core 1 TX LPF MXGM IDAC */ +#define B2055_C1_TX_BB_MXGM 0x8D /* Core 1 TX BB MXGM */ +#define B2055_C2_LGBUF_ATUNE 0x8E /* Core 2 LGBUF A tune */ +#define B2055_C2_LGBUF_GTUNE 0x8F /* Core 2 LGBUF G tune */ +#define B2055_C2_LGBUF_DIV 0x90 /* Core 2 LGBUF div */ +#define B2055_C2_LGBUF_AIDAC 0x91 /* Core 2 LGBUF A IDAC */ +#define B2055_C2_LGBUF_GIDAC 0x92 /* Core 2 LGBUF G IDAC */ +#define B2055_C2_LGBUF_IDACFO 0x93 /* Core 2 LGBUF IDAC filter override */ +#define B2055_C2_LGBUF_SPARE 0x94 /* Core 2 LGBUF spare */ +#define B2055_C2_RX_RFSPC1 0x95 /* Core 2 RX RF SPC1 */ +#define B2055_C2_RX_RFR1 0x96 /* Core 2 RX RF reg 1 */ +#define B2055_C2_RX_RFR2 0x97 /* Core 2 RX RF reg 2 */ +#define B2055_C2_RX_RFRCAL 0x98 /* Core 2 RX RF RCAL */ +#define B2055_C2_RX_BB_BLCMP 0x99 /* Core 2 RX Baseband BUFI LPF CMP */ +#define B2055_C2_RX_BB_LPF 0x9A /* Core 2 RX Baseband LPF */ +#define B2055_C2_RX_BB_MIDACHP 0x9B /* Core 2 RX Baseband MIDAC High-pass */ +#define B2055_C2_RX_BB_VGA1IDAC 0x9C /* Core 2 RX Baseband VGA1 IDAC */ +#define B2055_C2_RX_BB_VGA2IDAC 0x9D /* Core 2 RX Baseband VGA2 IDAC */ +#define B2055_C2_RX_BB_VGA3IDAC 0x9E /* Core 2 RX Baseband VGA3 IDAC */ +#define B2055_C2_RX_BB_BUFOCTL 0x9F /* Core 2 RX Baseband BUFO Control */ +#define B2055_C2_RX_BB_RCCALCTL 0xA0 /* Core 2 RX Baseband RCCAL Control */ +#define B2055_C2_RX_BB_RSSICTL1 0xA1 /* Core 2 RX Baseband RSSI Control 1 */ +#define B2055_C2_RX_BB_RSSICTL2 0xA2 /* Core 2 RX Baseband RSSI Control 2 */ +#define B2055_C2_RX_BB_RSSICTL3 0xA3 /* Core 2 RX Baseband RSSI Control 3 */ +#define B2055_C2_RX_BB_RSSICTL4 0xA4 /* Core 2 RX Baseband RSSI Control 4 */ +#define B2055_C2_RX_BB_RSSICTL5 0xA5 /* Core 2 RX Baseband RSSI Control 5 */ +#define B2055_C2_RX_BB_REG 0xA6 /* Core 2 RX Baseband Regulator */ +#define B2055_C2_RX_BB_SPARE1 0xA7 /* Core 2 RX Baseband spare 1 */ +#define B2055_C2_RX_TXBBRCAL 0xA8 /* Core 2 RX TX BB RCAL */ +#define B2055_C2_TX_RF_SPGA 0xA9 /* Core 2 TX RF SGM PGA */ +#define B2055_C2_TX_RF_SPAD 0xAA /* Core 2 TX RF SGM PAD */ +#define B2055_C2_TX_RF_CNTPGA1 0xAB /* Core 2 TX RF counter PGA 1 */ +#define B2055_C2_TX_RF_CNTPAD1 0xAC /* Core 2 TX RF counter PAD 1 */ +#define B2055_C2_TX_RF_PGAIDAC 0xAD /* Core 2 TX RF PGA IDAC */ +#define B2055_C2_TX_PGAPADTN 0xAE /* Core 2 TX PGA PAD TN */ +#define B2055_C2_TX_PADIDAC1 0xAF /* Core 2 TX PAD IDAC 1 */ +#define B2055_C2_TX_PADIDAC2 0xB0 /* Core 2 TX PAD IDAC 2 */ +#define B2055_C2_TX_MXBGTRIM 0xB1 /* Core 2 TX MX B/G TRIM */ +#define B2055_C2_TX_RF_RCAL 0xB2 /* Core 2 TX RF RCAL */ +#define B2055_C2_TX_RF_PADTSSI1 0xB3 /* Core 2 TX RF PAD TSSI1 */ +#define B2055_C2_TX_RF_PADTSSI2 0xB4 /* Core 2 TX RF PAD TSSI2 */ +#define B2055_C2_TX_RF_SPARE 0xB5 /* Core 2 TX RF spare */ +#define B2055_C2_TX_RF_IQCAL1 0xB6 /* Core 2 TX RF I/Q CAL 1 */ +#define B2055_C2_TX_RF_IQCAL2 0xB7 /* Core 2 TX RF I/Q CAL 2 */ +#define B2055_C2_TXBB_RCCAL 0xB8 /* Core 2 TXBB RC CAL Control */ +#define B2055_C2_TXBB_LPF1 0xB9 /* Core 2 TXBB LPF 1 */ +#define B2055_C2_TX_VOSCNCL 0xBA /* Core 2 TX VOS CNCL */ +#define B2055_C2_TX_LPF_MXGMIDAC 0xBB /* Core 2 TX LPF MXGM IDAC */ +#define B2055_C2_TX_BB_MXGM 0xBC /* Core 2 TX BB MXGM */ +#define B2055_PRG_GCHP21 0xBD /* PRG GC HPVGA23 21 */ +#define B2055_PRG_GCHP22 0xBE /* PRG GC HPVGA23 22 */ +#define B2055_PRG_GCHP23 0xBF /* PRG GC HPVGA23 23 */ +#define B2055_PRG_GCHP24 0xC0 /* PRG GC HPVGA23 24 */ +#define B2055_PRG_GCHP25 0xC1 /* PRG GC HPVGA23 25 */ +#define B2055_PRG_GCHP26 0xC2 /* PRG GC HPVGA23 26 */ +#define B2055_PRG_GCHP27 0xC3 /* PRG GC HPVGA23 27 */ +#define B2055_PRG_GCHP28 0xC4 /* PRG GC HPVGA23 28 */ +#define B2055_PRG_GCHP29 0xC5 /* PRG GC HPVGA23 29 */ +#define B2055_PRG_GCHP30 0xC6 /* PRG GC HPVGA23 30 */ +#define B2055_C1_LNA_GAINBST 0xCD /* Core 1 LNA GAINBST */ +#define B2055_C1_B0NB_RSSIVCM 0xD2 /* Core 1 B0 narrow-band RSSI VCM */ +#define B2055_C1_GENSPARE2 0xD6 /* Core 1 GEN spare 2 */ +#define B2055_C2_LNA_GAINBST 0xD9 /* Core 2 LNA GAINBST */ +#define B2055_C2_B0NB_RSSIVCM 0xDE /* Core 2 B0 narrow-band RSSI VCM */ +#define B2055_C2_GENSPARE2 0xE2 /* Core 2 GEN spare 2 */ + + + +struct b43_wldev; + +struct b43_phy_n_iq_comp { + s16 a0; + s16 b0; + s16 a1; + s16 b1; +}; + +struct b43_phy_n_rssical_cache { + u16 rssical_radio_regs_2G[2]; + u16 rssical_phy_regs_2G[12]; + + u16 rssical_radio_regs_5G[2]; + u16 rssical_phy_regs_5G[12]; +}; + +struct b43_phy_n_cal_cache { + u16 txcal_radio_regs_2G[8]; + u16 txcal_coeffs_2G[8]; + struct b43_phy_n_iq_comp rxcal_coeffs_2G; + + u16 txcal_radio_regs_5G[8]; + u16 txcal_coeffs_5G[8]; + struct b43_phy_n_iq_comp rxcal_coeffs_5G; +}; + +struct b43_phy_n_txpwrindex { + s8 index; + s8 index_internal; + s8 index_internal_save; + u16 AfectrlOverride; + u16 AfeCtrlDacGain; + u16 rad_gain; + u8 bbmult; + u16 iqcomp_a; + u16 iqcomp_b; + u16 locomp; +}; + +struct b43_phy_n { + u8 antsel_type; + u8 cal_orig_pwr_idx[2]; + u8 measure_hold; + u8 phyrxchain; + u8 perical; + u32 deaf_count; + u32 rxcalparams; + bool hang_avoid; + bool mute; + u16 papd_epsilon_offset[2]; + s32 preamble_override; + u32 bb_mult_save; + u16 radio_chanspec; + + bool gain_boost; + bool elna_gain_config; + bool band5g_pwrgain; + + u8 mphase_cal_phase_id; + u16 mphase_txcal_cmdidx; + u16 mphase_txcal_numcmds; + u16 mphase_txcal_bestcoeffs[11]; + + u8 txpwrctrl; + u16 txcal_bbmult; + u16 txiqlocal_bestc[11]; + bool txiqlocal_coeffsvalid; + struct b43_phy_n_txpwrindex txpwrindex[2]; + + u8 txrx_chain; + u16 tx_rx_cal_phy_saveregs[11]; + u16 tx_rx_cal_radio_saveregs[22]; + + u16 rfctrl_intc1_save; + u16 rfctrl_intc2_save; + + u16 classifier_state; + u16 clip_state[2]; + + bool aband_spurwar_en; + bool gband_spurwar_en; + + bool ipa2g_on; + u8 iqcal_chanspec_2G; + u8 rssical_chanspec_2G; + + bool ipa5g_on; + u8 iqcal_chanspec_5G; + u8 rssical_chanspec_5G; + + struct b43_phy_n_rssical_cache rssical_cache; + struct b43_phy_n_cal_cache cal_cache; + bool crsminpwr_adjusted; + bool noisevars_adjusted; +}; + + +struct b43_phy_operations; +extern const struct b43_phy_operations b43_phyops_n; + +#endif /* B43_NPHY_H_ */ diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c new file mode 100644 index 0000000..a6062c3 --- /dev/null +++ b/drivers/net/wireless/b43/pio.c @@ -0,0 +1,815 @@ +/* + + Broadcom B43 wireless driver + + PIO data transfer + + Copyright (c) 2005-2008 Michael Buesch + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "b43.h" +#include "pio.h" +#include "dma.h" +#include "main.h" +#include "xmit.h" + +#include +#include + + +static u16 generate_cookie(struct b43_pio_txqueue *q, + struct b43_pio_txpacket *pack) +{ + u16 cookie; + + /* Use the upper 4 bits of the cookie as + * PIO controller ID and store the packet index number + * in the lower 12 bits. + * Note that the cookie must never be 0, as this + * is a special value used in RX path. + * It can also not be 0xFFFF because that is special + * for multicast frames. + */ + cookie = (((u16)q->index + 1) << 12); + cookie |= pack->index; + + return cookie; +} + +static +struct b43_pio_txqueue *parse_cookie(struct b43_wldev *dev, + u16 cookie, + struct b43_pio_txpacket **pack) +{ + struct b43_pio *pio = &dev->pio; + struct b43_pio_txqueue *q = NULL; + unsigned int pack_index; + + switch (cookie & 0xF000) { + case 0x1000: + q = pio->tx_queue_AC_BK; + break; + case 0x2000: + q = pio->tx_queue_AC_BE; + break; + case 0x3000: + q = pio->tx_queue_AC_VI; + break; + case 0x4000: + q = pio->tx_queue_AC_VO; + break; + case 0x5000: + q = pio->tx_queue_mcast; + break; + } + if (B43_WARN_ON(!q)) + return NULL; + pack_index = (cookie & 0x0FFF); + if (B43_WARN_ON(pack_index >= ARRAY_SIZE(q->packets))) + return NULL; + *pack = &q->packets[pack_index]; + + return q; +} + +static u16 index_to_pioqueue_base(struct b43_wldev *dev, + unsigned int index) +{ + static const u16 bases[] = { + B43_MMIO_PIO_BASE0, + B43_MMIO_PIO_BASE1, + B43_MMIO_PIO_BASE2, + B43_MMIO_PIO_BASE3, + B43_MMIO_PIO_BASE4, + B43_MMIO_PIO_BASE5, + B43_MMIO_PIO_BASE6, + B43_MMIO_PIO_BASE7, + }; + static const u16 bases_rev11[] = { + B43_MMIO_PIO11_BASE0, + B43_MMIO_PIO11_BASE1, + B43_MMIO_PIO11_BASE2, + B43_MMIO_PIO11_BASE3, + B43_MMIO_PIO11_BASE4, + B43_MMIO_PIO11_BASE5, + }; + + if (dev->dev->id.revision >= 11) { + B43_WARN_ON(index >= ARRAY_SIZE(bases_rev11)); + return bases_rev11[index]; + } + B43_WARN_ON(index >= ARRAY_SIZE(bases)); + return bases[index]; +} + +static u16 pio_txqueue_offset(struct b43_wldev *dev) +{ + if (dev->dev->id.revision >= 11) + return 0x18; + return 0; +} + +static u16 pio_rxqueue_offset(struct b43_wldev *dev) +{ + if (dev->dev->id.revision >= 11) + return 0x38; + return 8; +} + +static struct b43_pio_txqueue *b43_setup_pioqueue_tx(struct b43_wldev *dev, + unsigned int index) +{ + struct b43_pio_txqueue *q; + struct b43_pio_txpacket *p; + unsigned int i; + + q = kzalloc(sizeof(*q), GFP_KERNEL); + if (!q) + return NULL; + q->dev = dev; + q->rev = dev->dev->id.revision; + q->mmio_base = index_to_pioqueue_base(dev, index) + + pio_txqueue_offset(dev); + q->index = index; + + q->free_packet_slots = B43_PIO_MAX_NR_TXPACKETS; + if (q->rev >= 8) { + q->buffer_size = 1920; //FIXME this constant is wrong. + } else { + q->buffer_size = b43_piotx_read16(q, B43_PIO_TXQBUFSIZE); + q->buffer_size -= 80; + } + + INIT_LIST_HEAD(&q->packets_list); + for (i = 0; i < ARRAY_SIZE(q->packets); i++) { + p = &(q->packets[i]); + INIT_LIST_HEAD(&p->list); + p->index = i; + p->queue = q; + list_add(&p->list, &q->packets_list); + } + + return q; +} + +static struct b43_pio_rxqueue *b43_setup_pioqueue_rx(struct b43_wldev *dev, + unsigned int index) +{ + struct b43_pio_rxqueue *q; + + q = kzalloc(sizeof(*q), GFP_KERNEL); + if (!q) + return NULL; + q->dev = dev; + q->rev = dev->dev->id.revision; + q->mmio_base = index_to_pioqueue_base(dev, index) + + pio_rxqueue_offset(dev); + + /* Enable Direct FIFO RX (PIO) on the engine. */ + b43_dma_direct_fifo_rx(dev, index, 1); + + return q; +} + +static void b43_pio_cancel_tx_packets(struct b43_pio_txqueue *q) +{ + struct b43_pio_txpacket *pack; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(q->packets); i++) { + pack = &(q->packets[i]); + if (pack->skb) { + dev_kfree_skb_any(pack->skb); + pack->skb = NULL; + } + } +} + +static void b43_destroy_pioqueue_tx(struct b43_pio_txqueue *q, + const char *name) +{ + if (!q) + return; + b43_pio_cancel_tx_packets(q); + kfree(q); +} + +static void b43_destroy_pioqueue_rx(struct b43_pio_rxqueue *q, + const char *name) +{ + if (!q) + return; + kfree(q); +} + +#define destroy_queue_tx(pio, queue) do { \ + b43_destroy_pioqueue_tx((pio)->queue, __stringify(queue)); \ + (pio)->queue = NULL; \ + } while (0) + +#define destroy_queue_rx(pio, queue) do { \ + b43_destroy_pioqueue_rx((pio)->queue, __stringify(queue)); \ + (pio)->queue = NULL; \ + } while (0) + +void b43_pio_free(struct b43_wldev *dev) +{ + struct b43_pio *pio; + + if (!b43_using_pio_transfers(dev)) + return; + pio = &dev->pio; + + destroy_queue_rx(pio, rx_queue); + destroy_queue_tx(pio, tx_queue_mcast); + destroy_queue_tx(pio, tx_queue_AC_VO); + destroy_queue_tx(pio, tx_queue_AC_VI); + destroy_queue_tx(pio, tx_queue_AC_BE); + destroy_queue_tx(pio, tx_queue_AC_BK); +} + +int b43_pio_init(struct b43_wldev *dev) +{ + struct b43_pio *pio = &dev->pio; + int err = -ENOMEM; + + b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL) + & ~B43_MACCTL_BE); + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_RXPADOFF, 0); + + pio->tx_queue_AC_BK = b43_setup_pioqueue_tx(dev, 0); + if (!pio->tx_queue_AC_BK) + goto out; + + pio->tx_queue_AC_BE = b43_setup_pioqueue_tx(dev, 1); + if (!pio->tx_queue_AC_BE) + goto err_destroy_bk; + + pio->tx_queue_AC_VI = b43_setup_pioqueue_tx(dev, 2); + if (!pio->tx_queue_AC_VI) + goto err_destroy_be; + + pio->tx_queue_AC_VO = b43_setup_pioqueue_tx(dev, 3); + if (!pio->tx_queue_AC_VO) + goto err_destroy_vi; + + pio->tx_queue_mcast = b43_setup_pioqueue_tx(dev, 4); + if (!pio->tx_queue_mcast) + goto err_destroy_vo; + + pio->rx_queue = b43_setup_pioqueue_rx(dev, 0); + if (!pio->rx_queue) + goto err_destroy_mcast; + + b43dbg(dev->wl, "PIO initialized\n"); + err = 0; +out: + return err; + +err_destroy_mcast: + destroy_queue_tx(pio, tx_queue_mcast); +err_destroy_vo: + destroy_queue_tx(pio, tx_queue_AC_VO); +err_destroy_vi: + destroy_queue_tx(pio, tx_queue_AC_VI); +err_destroy_be: + destroy_queue_tx(pio, tx_queue_AC_BE); +err_destroy_bk: + destroy_queue_tx(pio, tx_queue_AC_BK); + return err; +} + +/* Static mapping of mac80211's queues (priorities) to b43 PIO queues. */ +static struct b43_pio_txqueue *select_queue_by_priority(struct b43_wldev *dev, + u8 queue_prio) +{ + struct b43_pio_txqueue *q; + + if (dev->qos_enabled) { + /* 0 = highest priority */ + switch (queue_prio) { + default: + B43_WARN_ON(1); + /* fallthrough */ + case 0: + q = dev->pio.tx_queue_AC_VO; + break; + case 1: + q = dev->pio.tx_queue_AC_VI; + break; + case 2: + q = dev->pio.tx_queue_AC_BE; + break; + case 3: + q = dev->pio.tx_queue_AC_BK; + break; + } + } else + q = dev->pio.tx_queue_AC_BE; + + return q; +} + +static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q, + u16 ctl, + const void *_data, + unsigned int data_len) +{ + struct b43_wldev *dev = q->dev; + struct b43_wl *wl = dev->wl; + const u8 *data = _data; + + ctl |= B43_PIO_TXCTL_WRITELO | B43_PIO_TXCTL_WRITEHI; + b43_piotx_write16(q, B43_PIO_TXCTL, ctl); + + ssb_block_write(dev->dev, data, (data_len & ~1), + q->mmio_base + B43_PIO_TXDATA, + sizeof(u16)); + if (data_len & 1) { + u8 *tail = wl->pio_tailspace; + BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 2); + + /* Write the last byte. */ + ctl &= ~B43_PIO_TXCTL_WRITEHI; + b43_piotx_write16(q, B43_PIO_TXCTL, ctl); + tail[0] = data[data_len - 1]; + tail[1] = 0; + ssb_block_write(dev->dev, tail, 2, + q->mmio_base + B43_PIO_TXDATA, + sizeof(u16)); + } + + return ctl; +} + +static void pio_tx_frame_2byte_queue(struct b43_pio_txpacket *pack, + const u8 *hdr, unsigned int hdrlen) +{ + struct b43_pio_txqueue *q = pack->queue; + const char *frame = pack->skb->data; + unsigned int frame_len = pack->skb->len; + u16 ctl; + + ctl = b43_piotx_read16(q, B43_PIO_TXCTL); + ctl |= B43_PIO_TXCTL_FREADY; + ctl &= ~B43_PIO_TXCTL_EOF; + + /* Transfer the header data. */ + ctl = tx_write_2byte_queue(q, ctl, hdr, hdrlen); + /* Transfer the frame data. */ + ctl = tx_write_2byte_queue(q, ctl, frame, frame_len); + + ctl |= B43_PIO_TXCTL_EOF; + b43_piotx_write16(q, B43_PIO_TXCTL, ctl); +} + +static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q, + u32 ctl, + const void *_data, + unsigned int data_len) +{ + struct b43_wldev *dev = q->dev; + struct b43_wl *wl = dev->wl; + const u8 *data = _data; + + ctl |= B43_PIO8_TXCTL_0_7 | B43_PIO8_TXCTL_8_15 | + B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_24_31; + b43_piotx_write32(q, B43_PIO8_TXCTL, ctl); + + ssb_block_write(dev->dev, data, (data_len & ~3), + q->mmio_base + B43_PIO8_TXDATA, + sizeof(u32)); + if (data_len & 3) { + u8 *tail = wl->pio_tailspace; + BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 4); + + memset(tail, 0, 4); + /* Write the last few bytes. */ + ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 | + B43_PIO8_TXCTL_24_31); + switch (data_len & 3) { + case 3: + ctl |= B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_8_15; + tail[0] = data[data_len - 3]; + tail[1] = data[data_len - 2]; + tail[2] = data[data_len - 1]; + break; + case 2: + ctl |= B43_PIO8_TXCTL_8_15; + tail[0] = data[data_len - 2]; + tail[1] = data[data_len - 1]; + break; + case 1: + tail[0] = data[data_len - 1]; + break; + } + b43_piotx_write32(q, B43_PIO8_TXCTL, ctl); + ssb_block_write(dev->dev, tail, 4, + q->mmio_base + B43_PIO8_TXDATA, + sizeof(u32)); + } + + return ctl; +} + +static void pio_tx_frame_4byte_queue(struct b43_pio_txpacket *pack, + const u8 *hdr, unsigned int hdrlen) +{ + struct b43_pio_txqueue *q = pack->queue; + const char *frame = pack->skb->data; + unsigned int frame_len = pack->skb->len; + u32 ctl; + + ctl = b43_piotx_read32(q, B43_PIO8_TXCTL); + ctl |= B43_PIO8_TXCTL_FREADY; + ctl &= ~B43_PIO8_TXCTL_EOF; + + /* Transfer the header data. */ + ctl = tx_write_4byte_queue(q, ctl, hdr, hdrlen); + /* Transfer the frame data. */ + ctl = tx_write_4byte_queue(q, ctl, frame, frame_len); + + ctl |= B43_PIO8_TXCTL_EOF; + b43_piotx_write32(q, B43_PIO_TXCTL, ctl); +} + +static int pio_tx_frame(struct b43_pio_txqueue *q, + struct sk_buff *skb) +{ + struct b43_wldev *dev = q->dev; + struct b43_wl *wl = dev->wl; + struct b43_pio_txpacket *pack; + u16 cookie; + int err; + unsigned int hdrlen; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct b43_txhdr *txhdr = (struct b43_txhdr *)wl->pio_scratchspace; + + B43_WARN_ON(list_empty(&q->packets_list)); + pack = list_entry(q->packets_list.next, + struct b43_pio_txpacket, list); + + cookie = generate_cookie(q, pack); + hdrlen = b43_txhdr_size(dev); + BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(struct b43_txhdr)); + B43_WARN_ON(sizeof(wl->pio_scratchspace) < hdrlen); + err = b43_generate_txhdr(dev, (u8 *)txhdr, skb, + info, cookie); + if (err) + return err; + + if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { + /* Tell the firmware about the cookie of the last + * mcast frame, so it can clear the more-data bit in it. */ + b43_shm_write16(dev, B43_SHM_SHARED, + B43_SHM_SH_MCASTCOOKIE, cookie); + } + + pack->skb = skb; + if (q->rev >= 8) + pio_tx_frame_4byte_queue(pack, (const u8 *)txhdr, hdrlen); + else + pio_tx_frame_2byte_queue(pack, (const u8 *)txhdr, hdrlen); + + /* Remove it from the list of available packet slots. + * It will be put back when we receive the status report. */ + list_del(&pack->list); + + /* Update the queue statistics. */ + q->buffer_used += roundup(skb->len + hdrlen, 4); + q->free_packet_slots -= 1; + + return 0; +} + +int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb) +{ + struct b43_pio_txqueue *q; + struct ieee80211_hdr *hdr; + unsigned int hdrlen, total_len; + int err = 0; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + hdr = (struct ieee80211_hdr *)skb->data; + + if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { + /* The multicast queue will be sent after the DTIM. */ + q = dev->pio.tx_queue_mcast; + /* Set the frame More-Data bit. Ucode will clear it + * for us on the last frame. */ + hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); + } else { + /* Decide by priority where to put this frame. */ + q = select_queue_by_priority(dev, skb_get_queue_mapping(skb)); + } + + hdrlen = b43_txhdr_size(dev); + total_len = roundup(skb->len + hdrlen, 4); + + if (unlikely(total_len > q->buffer_size)) { + err = -ENOBUFS; + b43dbg(dev->wl, "PIO: TX packet longer than queue.\n"); + goto out; + } + if (unlikely(q->free_packet_slots == 0)) { + err = -ENOBUFS; + b43warn(dev->wl, "PIO: TX packet overflow.\n"); + goto out; + } + B43_WARN_ON(q->buffer_used > q->buffer_size); + + if (total_len > (q->buffer_size - q->buffer_used)) { + /* Not enough memory on the queue. */ + err = -EBUSY; + ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb)); + q->stopped = 1; + goto out; + } + + /* Assign the queue number to the ring (if not already done before) + * so TX status handling can use it. The mac80211-queue to b43-queue + * mapping is static, so we don't need to store it per frame. */ + q->queue_prio = skb_get_queue_mapping(skb); + + err = pio_tx_frame(q, skb); + if (unlikely(err == -ENOKEY)) { + /* Drop this packet, as we don't have the encryption key + * anymore and must not transmit it unencrypted. */ + dev_kfree_skb_any(skb); + err = 0; + goto out; + } + if (unlikely(err)) { + b43err(dev->wl, "PIO transmission failure\n"); + goto out; + } + + B43_WARN_ON(q->buffer_used > q->buffer_size); + if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) || + (q->free_packet_slots == 0)) { + /* The queue is full. */ + ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb)); + q->stopped = 1; + } + +out: + return err; +} + +void b43_pio_handle_txstatus(struct b43_wldev *dev, + const struct b43_txstatus *status) +{ + struct b43_pio_txqueue *q; + struct b43_pio_txpacket *pack = NULL; + unsigned int total_len; + struct ieee80211_tx_info *info; + + q = parse_cookie(dev, status->cookie, &pack); + if (unlikely(!q)) + return; + B43_WARN_ON(!pack); + + info = IEEE80211_SKB_CB(pack->skb); + + b43_fill_txstatus_report(dev, info, status); + + total_len = pack->skb->len + b43_txhdr_size(dev); + total_len = roundup(total_len, 4); + q->buffer_used -= total_len; + q->free_packet_slots += 1; + + ieee80211_tx_status(dev->wl->hw, pack->skb); + pack->skb = NULL; + list_add(&pack->list, &q->packets_list); + + if (q->stopped) { + ieee80211_wake_queue(dev->wl->hw, q->queue_prio); + q->stopped = 0; + } +} + +/* Returns whether we should fetch another frame. */ +static bool pio_rx_frame(struct b43_pio_rxqueue *q) +{ + struct b43_wldev *dev = q->dev; + struct b43_wl *wl = dev->wl; + u16 len; + u32 macstat; + unsigned int i, padding; + struct sk_buff *skb; + const char *err_msg = NULL; + struct b43_rxhdr_fw4 *rxhdr = + (struct b43_rxhdr_fw4 *)wl->pio_scratchspace; + + BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(*rxhdr)); + memset(rxhdr, 0, sizeof(*rxhdr)); + + /* Check if we have data and wait for it to get ready. */ + if (q->rev >= 8) { + u32 ctl; + + ctl = b43_piorx_read32(q, B43_PIO8_RXCTL); + if (!(ctl & B43_PIO8_RXCTL_FRAMERDY)) + return 0; + b43_piorx_write32(q, B43_PIO8_RXCTL, + B43_PIO8_RXCTL_FRAMERDY); + for (i = 0; i < 10; i++) { + ctl = b43_piorx_read32(q, B43_PIO8_RXCTL); + if (ctl & B43_PIO8_RXCTL_DATARDY) + goto data_ready; + udelay(10); + } + } else { + u16 ctl; + + ctl = b43_piorx_read16(q, B43_PIO_RXCTL); + if (!(ctl & B43_PIO_RXCTL_FRAMERDY)) + return 0; + b43_piorx_write16(q, B43_PIO_RXCTL, + B43_PIO_RXCTL_FRAMERDY); + for (i = 0; i < 10; i++) { + ctl = b43_piorx_read16(q, B43_PIO_RXCTL); + if (ctl & B43_PIO_RXCTL_DATARDY) + goto data_ready; + udelay(10); + } + } + b43dbg(q->dev->wl, "PIO RX timed out\n"); + return 1; +data_ready: + + /* Get the preamble (RX header) */ + if (q->rev >= 8) { + ssb_block_read(dev->dev, rxhdr, sizeof(*rxhdr), + q->mmio_base + B43_PIO8_RXDATA, + sizeof(u32)); + } else { + ssb_block_read(dev->dev, rxhdr, sizeof(*rxhdr), + q->mmio_base + B43_PIO_RXDATA, + sizeof(u16)); + } + /* Sanity checks. */ + len = le16_to_cpu(rxhdr->frame_len); + if (unlikely(len > 0x700)) { + err_msg = "len > 0x700"; + goto rx_error; + } + if (unlikely(len == 0)) { + err_msg = "len == 0"; + goto rx_error; + } + + macstat = le32_to_cpu(rxhdr->mac_status); + if (macstat & B43_RX_MAC_FCSERR) { + if (!(q->dev->wl->filter_flags & FIF_FCSFAIL)) { + /* Drop frames with failed FCS. */ + err_msg = "Frame FCS error"; + goto rx_error; + } + } + + /* We always pad 2 bytes, as that's what upstream code expects + * due to the RX-header being 30 bytes. In case the frame is + * unaligned, we pad another 2 bytes. */ + padding = (macstat & B43_RX_MAC_PADDING) ? 2 : 0; + skb = dev_alloc_skb(len + padding + 2); + if (unlikely(!skb)) { + err_msg = "Out of memory"; + goto rx_error; + } + skb_reserve(skb, 2); + skb_put(skb, len + padding); + if (q->rev >= 8) { + ssb_block_read(dev->dev, skb->data + padding, (len & ~3), + q->mmio_base + B43_PIO8_RXDATA, + sizeof(u32)); + if (len & 3) { + u8 *tail = wl->pio_tailspace; + BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 4); + + /* Read the last few bytes. */ + ssb_block_read(dev->dev, tail, 4, + q->mmio_base + B43_PIO8_RXDATA, + sizeof(u32)); + switch (len & 3) { + case 3: + skb->data[len + padding - 3] = tail[0]; + skb->data[len + padding - 2] = tail[1]; + skb->data[len + padding - 1] = tail[2]; + break; + case 2: + skb->data[len + padding - 2] = tail[0]; + skb->data[len + padding - 1] = tail[1]; + break; + case 1: + skb->data[len + padding - 1] = tail[0]; + break; + } + } + } else { + ssb_block_read(dev->dev, skb->data + padding, (len & ~1), + q->mmio_base + B43_PIO_RXDATA, + sizeof(u16)); + if (len & 1) { + u8 *tail = wl->pio_tailspace; + BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 2); + + /* Read the last byte. */ + ssb_block_read(dev->dev, tail, 2, + q->mmio_base + B43_PIO_RXDATA, + sizeof(u16)); + skb->data[len + padding - 1] = tail[0]; + } + } + + b43_rx(q->dev, skb, rxhdr); + + return 1; + +rx_error: + if (err_msg) + b43dbg(q->dev->wl, "PIO RX error: %s\n", err_msg); + if (q->rev >= 8) + b43_piorx_write32(q, B43_PIO8_RXCTL, B43_PIO8_RXCTL_DATARDY); + else + b43_piorx_write16(q, B43_PIO_RXCTL, B43_PIO_RXCTL_DATARDY); + + return 1; +} + +void b43_pio_rx(struct b43_pio_rxqueue *q) +{ + unsigned int count = 0; + bool stop; + + while (1) { + stop = (pio_rx_frame(q) == 0); + if (stop) + break; + cond_resched(); + if (WARN_ON_ONCE(++count > 10000)) + break; + } +} + +static void b43_pio_tx_suspend_queue(struct b43_pio_txqueue *q) +{ + if (q->rev >= 8) { + b43_piotx_write32(q, B43_PIO8_TXCTL, + b43_piotx_read32(q, B43_PIO8_TXCTL) + | B43_PIO8_TXCTL_SUSPREQ); + } else { + b43_piotx_write16(q, B43_PIO_TXCTL, + b43_piotx_read16(q, B43_PIO_TXCTL) + | B43_PIO_TXCTL_SUSPREQ); + } +} + +static void b43_pio_tx_resume_queue(struct b43_pio_txqueue *q) +{ + if (q->rev >= 8) { + b43_piotx_write32(q, B43_PIO8_TXCTL, + b43_piotx_read32(q, B43_PIO8_TXCTL) + & ~B43_PIO8_TXCTL_SUSPREQ); + } else { + b43_piotx_write16(q, B43_PIO_TXCTL, + b43_piotx_read16(q, B43_PIO_TXCTL) + & ~B43_PIO_TXCTL_SUSPREQ); + } +} + +void b43_pio_tx_suspend(struct b43_wldev *dev) +{ + b43_power_saving_ctl_bits(dev, B43_PS_AWAKE); + b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_BK); + b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_BE); + b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_VI); + b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_VO); + b43_pio_tx_suspend_queue(dev->pio.tx_queue_mcast); +} + +void b43_pio_tx_resume(struct b43_wldev *dev) +{ + b43_pio_tx_resume_queue(dev->pio.tx_queue_mcast); + b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_VO); + b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_VI); + b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_BE); + b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_BK); + b43_power_saving_ctl_bits(dev, 0); +} diff --git a/drivers/net/wireless/b43/pio.h b/drivers/net/wireless/b43/pio.h new file mode 100644 index 0000000..1e51614 --- /dev/null +++ b/drivers/net/wireless/b43/pio.h @@ -0,0 +1,165 @@ +#ifndef B43_PIO_H_ +#define B43_PIO_H_ + +#include "b43.h" + +#include +#include +#include +#include + + +/*** Registers for PIO queues up to revision 7. ***/ +/* TX queue. */ +#define B43_PIO_TXCTL 0x00 +#define B43_PIO_TXCTL_WRITELO 0x0001 +#define B43_PIO_TXCTL_WRITEHI 0x0002 +#define B43_PIO_TXCTL_EOF 0x0004 +#define B43_PIO_TXCTL_FREADY 0x0008 +#define B43_PIO_TXCTL_FLUSHREQ 0x0020 +#define B43_PIO_TXCTL_FLUSHPEND 0x0040 +#define B43_PIO_TXCTL_SUSPREQ 0x0080 +#define B43_PIO_TXCTL_QSUSP 0x0100 +#define B43_PIO_TXCTL_COMMCNT 0xFC00 +#define B43_PIO_TXCTL_COMMCNT_SHIFT 10 +#define B43_PIO_TXDATA 0x02 +#define B43_PIO_TXQBUFSIZE 0x04 +/* RX queue. */ +#define B43_PIO_RXCTL 0x00 +#define B43_PIO_RXCTL_FRAMERDY 0x0001 +#define B43_PIO_RXCTL_DATARDY 0x0002 +#define B43_PIO_RXDATA 0x02 + +/*** Registers for PIO queues revision 8 and later. ***/ +/* TX queue */ +#define B43_PIO8_TXCTL 0x00 +#define B43_PIO8_TXCTL_0_7 0x00000001 +#define B43_PIO8_TXCTL_8_15 0x00000002 +#define B43_PIO8_TXCTL_16_23 0x00000004 +#define B43_PIO8_TXCTL_24_31 0x00000008 +#define B43_PIO8_TXCTL_EOF 0x00000010 +#define B43_PIO8_TXCTL_FREADY 0x00000080 +#define B43_PIO8_TXCTL_SUSPREQ 0x00000100 +#define B43_PIO8_TXCTL_QSUSP 0x00000200 +#define B43_PIO8_TXCTL_FLUSHREQ 0x00000400 +#define B43_PIO8_TXCTL_FLUSHPEND 0x00000800 +#define B43_PIO8_TXDATA 0x04 +/* RX queue */ +#define B43_PIO8_RXCTL 0x00 +#define B43_PIO8_RXCTL_FRAMERDY 0x00000001 +#define B43_PIO8_RXCTL_DATARDY 0x00000002 +#define B43_PIO8_RXDATA 0x04 + + +/* The maximum number of TX-packets the HW can handle. */ +#define B43_PIO_MAX_NR_TXPACKETS 32 + + +struct b43_pio_txpacket { + /* Pointer to the TX queue we belong to. */ + struct b43_pio_txqueue *queue; + /* The TX data packet. */ + struct sk_buff *skb; + /* Index in the (struct b43_pio_txqueue)->packets array. */ + u8 index; + + struct list_head list; +}; + +struct b43_pio_txqueue { + struct b43_wldev *dev; + u16 mmio_base; + + /* The device queue buffer size in bytes. */ + u16 buffer_size; + /* The number of used bytes in the device queue buffer. */ + u16 buffer_used; + /* The number of packets that can still get queued. + * This is decremented on queueing a packet and incremented + * after receiving the transmit status. */ + u16 free_packet_slots; + + /* True, if the mac80211 queue was stopped due to overflow at TX. */ + bool stopped; + /* Our b43 queue index number */ + u8 index; + /* The mac80211 QoS queue priority. */ + u8 queue_prio; + + /* Buffer for TX packet meta data. */ + struct b43_pio_txpacket packets[B43_PIO_MAX_NR_TXPACKETS]; + struct list_head packets_list; + + /* Shortcut to the 802.11 core revision. This is to + * avoid horrible pointer dereferencing in the fastpaths. */ + u8 rev; +}; + +struct b43_pio_rxqueue { + struct b43_wldev *dev; + u16 mmio_base; + + /* Shortcut to the 802.11 core revision. This is to + * avoid horrible pointer dereferencing in the fastpaths. */ + u8 rev; +}; + + +static inline u16 b43_piotx_read16(struct b43_pio_txqueue *q, u16 offset) +{ + return b43_read16(q->dev, q->mmio_base + offset); +} + +static inline u32 b43_piotx_read32(struct b43_pio_txqueue *q, u16 offset) +{ + return b43_read32(q->dev, q->mmio_base + offset); +} + +static inline void b43_piotx_write16(struct b43_pio_txqueue *q, + u16 offset, u16 value) +{ + b43_write16(q->dev, q->mmio_base + offset, value); +} + +static inline void b43_piotx_write32(struct b43_pio_txqueue *q, + u16 offset, u32 value) +{ + b43_write32(q->dev, q->mmio_base + offset, value); +} + + +static inline u16 b43_piorx_read16(struct b43_pio_rxqueue *q, u16 offset) +{ + return b43_read16(q->dev, q->mmio_base + offset); +} + +static inline u32 b43_piorx_read32(struct b43_pio_rxqueue *q, u16 offset) +{ + return b43_read32(q->dev, q->mmio_base + offset); +} + +static inline void b43_piorx_write16(struct b43_pio_rxqueue *q, + u16 offset, u16 value) +{ + b43_write16(q->dev, q->mmio_base + offset, value); +} + +static inline void b43_piorx_write32(struct b43_pio_rxqueue *q, + u16 offset, u32 value) +{ + b43_write32(q->dev, q->mmio_base + offset, value); +} + + +int b43_pio_init(struct b43_wldev *dev); +void b43_pio_free(struct b43_wldev *dev); + +int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb); +void b43_pio_handle_txstatus(struct b43_wldev *dev, + const struct b43_txstatus *status); +void b43_pio_rx(struct b43_pio_rxqueue *q); + +void b43_pio_tx_suspend(struct b43_wldev *dev); +void b43_pio_tx_resume(struct b43_wldev *dev); + +#endif /* B43_PIO_H_ */ diff --git a/drivers/net/wireless/b43/rfkill.c b/drivers/net/wireless/b43/rfkill.c new file mode 100644 index 0000000..78016ae --- /dev/null +++ b/drivers/net/wireless/b43/rfkill.c @@ -0,0 +1,86 @@ +/* + + Broadcom B43 wireless driver + RFKILL support + + Copyright (c) 2007 Michael Buesch + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "b43.h" + + +/* Returns TRUE, if the radio is enabled in hardware. */ +bool b43_is_hw_radio_enabled(struct b43_wldev *dev) +{ + if (dev->phy.rev >= 3 || dev->phy.type == B43_PHYTYPE_LP) { + if (!(b43_read32(dev, B43_MMIO_RADIO_HWENABLED_HI) + & B43_MMIO_RADIO_HWENABLED_HI_MASK)) + return 1; + } else { + /* To prevent CPU fault on PPC, do not read a register + * unless the interface is started; however, on resume + * for hibernation, this routine is entered early. When + * that happens, unconditionally return TRUE. + */ + if (b43_status(dev) < B43_STAT_STARTED) + return 1; + if (b43_read16(dev, B43_MMIO_RADIO_HWENABLED_LO) + & B43_MMIO_RADIO_HWENABLED_LO_MASK) + return 1; + } + return 0; +} + +/* The poll callback for the hardware button. */ +void b43_rfkill_poll(struct ieee80211_hw *hw) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev = wl->current_dev; + struct ssb_bus *bus = dev->dev->bus; + bool enabled; + bool brought_up = false; + + mutex_lock(&wl->mutex); + if (unlikely(b43_status(dev) < B43_STAT_INITIALIZED)) { + if (ssb_bus_powerup(bus, 0)) { + mutex_unlock(&wl->mutex); + return; + } + ssb_device_enable(dev->dev, 0); + brought_up = true; + } + + enabled = b43_is_hw_radio_enabled(dev); + + if (unlikely(enabled != dev->radio_hw_enable)) { + dev->radio_hw_enable = enabled; + b43info(wl, "Radio hardware status changed to %s\n", + enabled ? "ENABLED" : "DISABLED"); + wiphy_rfkill_set_hw_state(hw->wiphy, !enabled); + if (enabled != dev->phy.radio_on) + b43_software_rfkill(dev, !enabled); + } + + if (brought_up) { + ssb_device_disable(dev->dev, 0); + ssb_bus_may_powerdown(bus); + } + + mutex_unlock(&wl->mutex); +} diff --git a/drivers/net/wireless/b43/rfkill.h b/drivers/net/wireless/b43/rfkill.h new file mode 100644 index 0000000..f046c3c --- /dev/null +++ b/drivers/net/wireless/b43/rfkill.h @@ -0,0 +1,11 @@ +#ifndef B43_RFKILL_H_ +#define B43_RFKILL_H_ + +struct ieee80211_hw; +struct b43_wldev; + +void b43_rfkill_poll(struct ieee80211_hw *hw); + +bool b43_is_hw_radio_enabled(struct b43_wldev *dev); + +#endif /* B43_RFKILL_H_ */ diff --git a/drivers/net/wireless/b43/sdio.c b/drivers/net/wireless/b43/sdio.c new file mode 100644 index 0000000..0d3ac64 --- /dev/null +++ b/drivers/net/wireless/b43/sdio.c @@ -0,0 +1,202 @@ +/* + * Broadcom B43 wireless driver + * + * SDIO over Sonics Silicon Backplane bus glue for b43. + * + * Copyright (C) 2009 Albert Herranz + * Copyright (C) 2009 Michael Buesch + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +#include +#include +#include +#include +#include + +#include "sdio.h" +#include "b43.h" + + +#define HNBU_CHIPID 0x01 /* vendor & device id */ + +#define B43_SDIO_BLOCK_SIZE 64 /* rx fifo max size in bytes */ + + +static const struct b43_sdio_quirk { + u16 vendor; + u16 device; + unsigned int quirks; +} b43_sdio_quirks[] = { + { 0x14E4, 0x4318, SSB_QUIRK_SDIO_READ_AFTER_WRITE32, }, + { }, +}; + + +static unsigned int b43_sdio_get_quirks(u16 vendor, u16 device) +{ + const struct b43_sdio_quirk *q; + + for (q = b43_sdio_quirks; q->quirks; q++) { + if (vendor == q->vendor && device == q->device) + return q->quirks; + } + + return 0; +} + +static void b43_sdio_interrupt_dispatcher(struct sdio_func *func) +{ + struct b43_sdio *sdio = sdio_get_drvdata(func); + struct b43_wldev *dev = sdio->irq_handler_opaque; + + if (unlikely(b43_status(dev) < B43_STAT_STARTED)) + return; + + sdio_release_host(func); + sdio->irq_handler(dev); + sdio_claim_host(func); +} + +int b43_sdio_request_irq(struct b43_wldev *dev, + void (*handler)(struct b43_wldev *dev)) +{ + struct ssb_bus *bus = dev->dev->bus; + struct sdio_func *func = bus->host_sdio; + struct b43_sdio *sdio = sdio_get_drvdata(func); + int err; + + sdio->irq_handler_opaque = dev; + sdio->irq_handler = handler; + sdio_claim_host(func); + err = sdio_claim_irq(func, b43_sdio_interrupt_dispatcher); + sdio_release_host(func); + + return err; +} + +void b43_sdio_free_irq(struct b43_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + struct sdio_func *func = bus->host_sdio; + struct b43_sdio *sdio = sdio_get_drvdata(func); + + sdio_claim_host(func); + sdio_release_irq(func); + sdio_release_host(func); + sdio->irq_handler_opaque = NULL; + sdio->irq_handler = NULL; +} + +static int b43_sdio_probe(struct sdio_func *func, + const struct sdio_device_id *id) +{ + struct b43_sdio *sdio; + struct sdio_func_tuple *tuple; + u16 vendor = 0, device = 0; + int error; + + /* Look for the card chip identifier. */ + tuple = func->tuples; + while (tuple) { + switch (tuple->code) { + case 0x80: + switch (tuple->data[0]) { + case HNBU_CHIPID: + if (tuple->size != 5) + break; + vendor = tuple->data[1] | (tuple->data[2]<<8); + device = tuple->data[3] | (tuple->data[4]<<8); + dev_info(&func->dev, "Chip ID %04x:%04x\n", + vendor, device); + break; + default: + break; + } + break; + default: + break; + } + tuple = tuple->next; + } + if (!vendor || !device) { + error = -ENODEV; + goto out; + } + + sdio_claim_host(func); + error = sdio_set_block_size(func, B43_SDIO_BLOCK_SIZE); + if (error) { + dev_err(&func->dev, "failed to set block size to %u bytes," + " error %d\n", B43_SDIO_BLOCK_SIZE, error); + goto err_release_host; + } + error = sdio_enable_func(func); + if (error) { + dev_err(&func->dev, "failed to enable func, error %d\n", error); + goto err_release_host; + } + sdio_release_host(func); + + sdio = kzalloc(sizeof(*sdio), GFP_KERNEL); + if (!sdio) { + error = -ENOMEM; + dev_err(&func->dev, "failed to allocate ssb bus\n"); + goto err_disable_func; + } + error = ssb_bus_sdiobus_register(&sdio->ssb, func, + b43_sdio_get_quirks(vendor, device)); + if (error) { + dev_err(&func->dev, "failed to register ssb sdio bus," + " error %d\n", error); + goto err_free_ssb; + } + sdio_set_drvdata(func, sdio); + + return 0; + +err_free_ssb: + kfree(sdio); +err_disable_func: + sdio_disable_func(func); +err_release_host: + sdio_release_host(func); +out: + return error; +} + +static void b43_sdio_remove(struct sdio_func *func) +{ + struct b43_sdio *sdio = sdio_get_drvdata(func); + + ssb_bus_unregister(&sdio->ssb); + sdio_disable_func(func); + kfree(sdio); + sdio_set_drvdata(func, NULL); +} + +static const struct sdio_device_id b43_sdio_ids[] = { + { SDIO_DEVICE(0x02d0, 0x044b) }, /* Nintendo Wii WLAN daughter card */ + { }, +}; + +static struct sdio_driver b43_sdio_driver = { + .name = "b43-sdio", + .id_table = b43_sdio_ids, + .probe = b43_sdio_probe, + .remove = b43_sdio_remove, +}; + +int b43_sdio_init(void) +{ + return sdio_register_driver(&b43_sdio_driver); +} + +void b43_sdio_exit(void) +{ + sdio_unregister_driver(&b43_sdio_driver); +} diff --git a/drivers/net/wireless/b43/sdio.h b/drivers/net/wireless/b43/sdio.h new file mode 100644 index 0000000..fb63309 --- /dev/null +++ b/drivers/net/wireless/b43/sdio.h @@ -0,0 +1,45 @@ +#ifndef B43_SDIO_H_ +#define B43_SDIO_H_ + +#include + +struct b43_wldev; + + +#ifdef CONFIG_B43_SDIO + +struct b43_sdio { + struct ssb_bus ssb; + void *irq_handler_opaque; + void (*irq_handler)(struct b43_wldev *dev); +}; + +int b43_sdio_request_irq(struct b43_wldev *dev, + void (*handler)(struct b43_wldev *dev)); +void b43_sdio_free_irq(struct b43_wldev *dev); + +int b43_sdio_init(void); +void b43_sdio_exit(void); + + +#else /* CONFIG_B43_SDIO */ + + +int b43_sdio_request_irq(struct b43_wldev *dev, + void (*handler)(struct b43_wldev *dev)) +{ + return -ENODEV; +} +void b43_sdio_free_irq(struct b43_wldev *dev) +{ +} +static inline int b43_sdio_init(void) +{ + return 0; +} +static inline void b43_sdio_exit(void) +{ +} + +#endif /* CONFIG_B43_SDIO */ +#endif /* B43_SDIO_H_ */ diff --git a/drivers/net/wireless/b43/sysfs.c b/drivers/net/wireless/b43/sysfs.c new file mode 100644 index 0000000..f1ae4e0 --- /dev/null +++ b/drivers/net/wireless/b43/sysfs.c @@ -0,0 +1,155 @@ +/* + + Broadcom B43 wireless driver + + SYSFS support routines + + Copyright (c) 2006 Michael Buesch + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include +#include + +#include "b43.h" +#include "sysfs.h" +#include "main.h" +#include "phy_common.h" + +#define GENERIC_FILESIZE 64 + +static int get_integer(const char *buf, size_t count) +{ + char tmp[10 + 1] = { 0 }; + int ret = -EINVAL; + + if (count == 0) + goto out; + count = min(count, (size_t) 10); + memcpy(tmp, buf, count); + ret = simple_strtol(tmp, NULL, 10); + out: + return ret; +} + +static ssize_t b43_attr_interfmode_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct b43_wldev *wldev = dev_to_b43_wldev(dev); + ssize_t count = 0; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + mutex_lock(&wldev->wl->mutex); + + if (wldev->phy.type != B43_PHYTYPE_G) { + mutex_unlock(&wldev->wl->mutex); + return -ENOSYS; + } + + switch (wldev->phy.g->interfmode) { + case B43_INTERFMODE_NONE: + count = + snprintf(buf, PAGE_SIZE, + "0 (No Interference Mitigation)\n"); + break; + case B43_INTERFMODE_NONWLAN: + count = + snprintf(buf, PAGE_SIZE, + "1 (Non-WLAN Interference Mitigation)\n"); + break; + case B43_INTERFMODE_MANUALWLAN: + count = + snprintf(buf, PAGE_SIZE, + "2 (WLAN Interference Mitigation)\n"); + break; + default: + B43_WARN_ON(1); + } + + mutex_unlock(&wldev->wl->mutex); + + return count; +} + +static ssize_t b43_attr_interfmode_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct b43_wldev *wldev = dev_to_b43_wldev(dev); + int err; + int mode; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + mode = get_integer(buf, count); + switch (mode) { + case 0: + mode = B43_INTERFMODE_NONE; + break; + case 1: + mode = B43_INTERFMODE_NONWLAN; + break; + case 2: + mode = B43_INTERFMODE_MANUALWLAN; + break; + case 3: + mode = B43_INTERFMODE_AUTOWLAN; + break; + default: + return -EINVAL; + } + + mutex_lock(&wldev->wl->mutex); + + if (wldev->phy.ops->interf_mitigation) { + err = wldev->phy.ops->interf_mitigation(wldev, mode); + if (err) { + b43err(wldev->wl, "Interference Mitigation not " + "supported by device\n"); + } + } else + err = -ENOSYS; + + mmiowb(); + mutex_unlock(&wldev->wl->mutex); + + return err ? err : count; +} + +static DEVICE_ATTR(interference, 0644, + b43_attr_interfmode_show, b43_attr_interfmode_store); + +int b43_sysfs_register(struct b43_wldev *wldev) +{ + struct device *dev = wldev->dev->dev; + + B43_WARN_ON(b43_status(wldev) != B43_STAT_INITIALIZED); + + return device_create_file(dev, &dev_attr_interference); +} + +void b43_sysfs_unregister(struct b43_wldev *wldev) +{ + struct device *dev = wldev->dev->dev; + + device_remove_file(dev, &dev_attr_interference); +} diff --git a/drivers/net/wireless/b43/sysfs.h b/drivers/net/wireless/b43/sysfs.h new file mode 100644 index 0000000..12bda9e --- /dev/null +++ b/drivers/net/wireless/b43/sysfs.h @@ -0,0 +1,9 @@ +#ifndef B43_SYSFS_H_ +#define B43_SYSFS_H_ + +struct b43_wldev; + +int b43_sysfs_register(struct b43_wldev *dev); +void b43_sysfs_unregister(struct b43_wldev *dev); + +#endif /* B43_SYSFS_H_ */ diff --git a/drivers/net/wireless/b43/tables.c b/drivers/net/wireless/b43/tables.c new file mode 100644 index 0000000..1ef9a64 --- /dev/null +++ b/drivers/net/wireless/b43/tables.c @@ -0,0 +1,466 @@ +/* + + Broadcom B43 wireless driver + + Copyright (c) 2005 Martin Langer , + Copyright (c) 2005-2007 Stefano Brivio + Copyright (c) 2006, 2006 Michael Buesch + Copyright (c) 2005 Danny van Dyk + Copyright (c) 2005 Andreas Jaggi + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "b43.h" +#include "tables.h" +#include "phy_g.h" + + +const u32 b43_tab_rotor[] = { + 0xFEB93FFD, 0xFEC63FFD, /* 0 */ + 0xFED23FFD, 0xFEDF3FFD, + 0xFEEC3FFE, 0xFEF83FFE, + 0xFF053FFE, 0xFF113FFE, + 0xFF1E3FFE, 0xFF2A3FFF, /* 8 */ + 0xFF373FFF, 0xFF443FFF, + 0xFF503FFF, 0xFF5D3FFF, + 0xFF693FFF, 0xFF763FFF, + 0xFF824000, 0xFF8F4000, /* 16 */ + 0xFF9B4000, 0xFFA84000, + 0xFFB54000, 0xFFC14000, + 0xFFCE4000, 0xFFDA4000, + 0xFFE74000, 0xFFF34000, /* 24 */ + 0x00004000, 0x000D4000, + 0x00194000, 0x00264000, + 0x00324000, 0x003F4000, + 0x004B4000, 0x00584000, /* 32 */ + 0x00654000, 0x00714000, + 0x007E4000, 0x008A3FFF, + 0x00973FFF, 0x00A33FFF, + 0x00B03FFF, 0x00BC3FFF, /* 40 */ + 0x00C93FFF, 0x00D63FFF, + 0x00E23FFE, 0x00EF3FFE, + 0x00FB3FFE, 0x01083FFE, + 0x01143FFE, 0x01213FFD, /* 48 */ + 0x012E3FFD, 0x013A3FFD, + 0x01473FFD, +}; + +const u32 b43_tab_retard[] = { + 0xDB93CB87, 0xD666CF64, /* 0 */ + 0xD1FDD358, 0xCDA6D826, + 0xCA38DD9F, 0xC729E2B4, + 0xC469E88E, 0xC26AEE2B, + 0xC0DEF46C, 0xC073FA62, /* 8 */ + 0xC01D00D5, 0xC0760743, + 0xC1560D1E, 0xC2E51369, + 0xC4ED18FF, 0xC7AC1ED7, + 0xCB2823B2, 0xCEFA28D9, /* 16 */ + 0xD2F62D3F, 0xD7BB3197, + 0xDCE53568, 0xE1FE3875, + 0xE7D13B35, 0xED663D35, + 0xF39B3EC4, 0xF98E3FA7, /* 24 */ + 0x00004000, 0x06723FA7, + 0x0C653EC4, 0x129A3D35, + 0x182F3B35, 0x1E023875, + 0x231B3568, 0x28453197, /* 32 */ + 0x2D0A2D3F, 0x310628D9, + 0x34D823B2, 0x38541ED7, + 0x3B1318FF, 0x3D1B1369, + 0x3EAA0D1E, 0x3F8A0743, /* 40 */ + 0x3FE300D5, 0x3F8DFA62, + 0x3F22F46C, 0x3D96EE2B, + 0x3B97E88E, 0x38D7E2B4, + 0x35C8DD9F, 0x325AD826, /* 48 */ + 0x2E03D358, 0x299ACF64, + 0x246DCB87, +}; + +const u16 b43_tab_finefreqa[] = { + 0x0082, 0x0082, 0x0102, 0x0182, /* 0 */ + 0x0202, 0x0282, 0x0302, 0x0382, + 0x0402, 0x0482, 0x0502, 0x0582, + 0x05E2, 0x0662, 0x06E2, 0x0762, + 0x07E2, 0x0842, 0x08C2, 0x0942, /* 16 */ + 0x09C2, 0x0A22, 0x0AA2, 0x0B02, + 0x0B82, 0x0BE2, 0x0C62, 0x0CC2, + 0x0D42, 0x0DA2, 0x0E02, 0x0E62, + 0x0EE2, 0x0F42, 0x0FA2, 0x1002, /* 32 */ + 0x1062, 0x10C2, 0x1122, 0x1182, + 0x11E2, 0x1242, 0x12A2, 0x12E2, + 0x1342, 0x13A2, 0x1402, 0x1442, + 0x14A2, 0x14E2, 0x1542, 0x1582, /* 48 */ + 0x15E2, 0x1622, 0x1662, 0x16C1, + 0x1701, 0x1741, 0x1781, 0x17E1, + 0x1821, 0x1861, 0x18A1, 0x18E1, + 0x1921, 0x1961, 0x19A1, 0x19E1, /* 64 */ + 0x1A21, 0x1A61, 0x1AA1, 0x1AC1, + 0x1B01, 0x1B41, 0x1B81, 0x1BA1, + 0x1BE1, 0x1C21, 0x1C41, 0x1C81, + 0x1CA1, 0x1CE1, 0x1D01, 0x1D41, /* 80 */ + 0x1D61, 0x1DA1, 0x1DC1, 0x1E01, + 0x1E21, 0x1E61, 0x1E81, 0x1EA1, + 0x1EE1, 0x1F01, 0x1F21, 0x1F41, + 0x1F81, 0x1FA1, 0x1FC1, 0x1FE1, /* 96 */ + 0x2001, 0x2041, 0x2061, 0x2081, + 0x20A1, 0x20C1, 0x20E1, 0x2101, + 0x2121, 0x2141, 0x2161, 0x2181, + 0x21A1, 0x21C1, 0x21E1, 0x2201, /* 112 */ + 0x2221, 0x2241, 0x2261, 0x2281, + 0x22A1, 0x22C1, 0x22C1, 0x22E1, + 0x2301, 0x2321, 0x2341, 0x2361, + 0x2361, 0x2381, 0x23A1, 0x23C1, /* 128 */ + 0x23E1, 0x23E1, 0x2401, 0x2421, + 0x2441, 0x2441, 0x2461, 0x2481, + 0x2481, 0x24A1, 0x24C1, 0x24C1, + 0x24E1, 0x2501, 0x2501, 0x2521, /* 144 */ + 0x2541, 0x2541, 0x2561, 0x2561, + 0x2581, 0x25A1, 0x25A1, 0x25C1, + 0x25C1, 0x25E1, 0x2601, 0x2601, + 0x2621, 0x2621, 0x2641, 0x2641, /* 160 */ + 0x2661, 0x2661, 0x2681, 0x2681, + 0x26A1, 0x26A1, 0x26C1, 0x26C1, + 0x26E1, 0x26E1, 0x2701, 0x2701, + 0x2721, 0x2721, 0x2740, 0x2740, /* 176 */ + 0x2760, 0x2760, 0x2780, 0x2780, + 0x2780, 0x27A0, 0x27A0, 0x27C0, + 0x27C0, 0x27E0, 0x27E0, 0x27E0, + 0x2800, 0x2800, 0x2820, 0x2820, /* 192 */ + 0x2820, 0x2840, 0x2840, 0x2840, + 0x2860, 0x2860, 0x2880, 0x2880, + 0x2880, 0x28A0, 0x28A0, 0x28A0, + 0x28C0, 0x28C0, 0x28C0, 0x28E0, /* 208 */ + 0x28E0, 0x28E0, 0x2900, 0x2900, + 0x2900, 0x2920, 0x2920, 0x2920, + 0x2940, 0x2940, 0x2940, 0x2960, + 0x2960, 0x2960, 0x2960, 0x2980, /* 224 */ + 0x2980, 0x2980, 0x29A0, 0x29A0, + 0x29A0, 0x29A0, 0x29C0, 0x29C0, + 0x29C0, 0x29E0, 0x29E0, 0x29E0, + 0x29E0, 0x2A00, 0x2A00, 0x2A00, /* 240 */ + 0x2A00, 0x2A20, 0x2A20, 0x2A20, + 0x2A20, 0x2A40, 0x2A40, 0x2A40, + 0x2A40, 0x2A60, 0x2A60, 0x2A60, +}; + +const u16 b43_tab_finefreqg[] = { + 0x0089, 0x02E9, 0x0409, 0x04E9, /* 0 */ + 0x05A9, 0x0669, 0x0709, 0x0789, + 0x0829, 0x08A9, 0x0929, 0x0989, + 0x0A09, 0x0A69, 0x0AC9, 0x0B29, + 0x0BA9, 0x0BE9, 0x0C49, 0x0CA9, /* 16 */ + 0x0D09, 0x0D69, 0x0DA9, 0x0E09, + 0x0E69, 0x0EA9, 0x0F09, 0x0F49, + 0x0FA9, 0x0FE9, 0x1029, 0x1089, + 0x10C9, 0x1109, 0x1169, 0x11A9, /* 32 */ + 0x11E9, 0x1229, 0x1289, 0x12C9, + 0x1309, 0x1349, 0x1389, 0x13C9, + 0x1409, 0x1449, 0x14A9, 0x14E9, + 0x1529, 0x1569, 0x15A9, 0x15E9, /* 48 */ + 0x1629, 0x1669, 0x16A9, 0x16E8, + 0x1728, 0x1768, 0x17A8, 0x17E8, + 0x1828, 0x1868, 0x18A8, 0x18E8, + 0x1928, 0x1968, 0x19A8, 0x19E8, /* 64 */ + 0x1A28, 0x1A68, 0x1AA8, 0x1AE8, + 0x1B28, 0x1B68, 0x1BA8, 0x1BE8, + 0x1C28, 0x1C68, 0x1CA8, 0x1CE8, + 0x1D28, 0x1D68, 0x1DC8, 0x1E08, /* 80 */ + 0x1E48, 0x1E88, 0x1EC8, 0x1F08, + 0x1F48, 0x1F88, 0x1FE8, 0x2028, + 0x2068, 0x20A8, 0x2108, 0x2148, + 0x2188, 0x21C8, 0x2228, 0x2268, /* 96 */ + 0x22C8, 0x2308, 0x2348, 0x23A8, + 0x23E8, 0x2448, 0x24A8, 0x24E8, + 0x2548, 0x25A8, 0x2608, 0x2668, + 0x26C8, 0x2728, 0x2787, 0x27E7, /* 112 */ + 0x2847, 0x28C7, 0x2947, 0x29A7, + 0x2A27, 0x2AC7, 0x2B47, 0x2BE7, + 0x2CA7, 0x2D67, 0x2E47, 0x2F67, + 0x3247, 0x3526, 0x3646, 0x3726, /* 128 */ + 0x3806, 0x38A6, 0x3946, 0x39E6, + 0x3A66, 0x3AE6, 0x3B66, 0x3BC6, + 0x3C45, 0x3CA5, 0x3D05, 0x3D85, + 0x3DE5, 0x3E45, 0x3EA5, 0x3EE5, /* 144 */ + 0x3F45, 0x3FA5, 0x4005, 0x4045, + 0x40A5, 0x40E5, 0x4145, 0x4185, + 0x41E5, 0x4225, 0x4265, 0x42C5, + 0x4305, 0x4345, 0x43A5, 0x43E5, /* 160 */ + 0x4424, 0x4464, 0x44C4, 0x4504, + 0x4544, 0x4584, 0x45C4, 0x4604, + 0x4644, 0x46A4, 0x46E4, 0x4724, + 0x4764, 0x47A4, 0x47E4, 0x4824, /* 176 */ + 0x4864, 0x48A4, 0x48E4, 0x4924, + 0x4964, 0x49A4, 0x49E4, 0x4A24, + 0x4A64, 0x4AA4, 0x4AE4, 0x4B23, + 0x4B63, 0x4BA3, 0x4BE3, 0x4C23, /* 192 */ + 0x4C63, 0x4CA3, 0x4CE3, 0x4D23, + 0x4D63, 0x4DA3, 0x4DE3, 0x4E23, + 0x4E63, 0x4EA3, 0x4EE3, 0x4F23, + 0x4F63, 0x4FC3, 0x5003, 0x5043, /* 208 */ + 0x5083, 0x50C3, 0x5103, 0x5143, + 0x5183, 0x51E2, 0x5222, 0x5262, + 0x52A2, 0x52E2, 0x5342, 0x5382, + 0x53C2, 0x5402, 0x5462, 0x54A2, /* 224 */ + 0x5502, 0x5542, 0x55A2, 0x55E2, + 0x5642, 0x5682, 0x56E2, 0x5722, + 0x5782, 0x57E1, 0x5841, 0x58A1, + 0x5901, 0x5961, 0x59C1, 0x5A21, /* 240 */ + 0x5AA1, 0x5B01, 0x5B81, 0x5BE1, + 0x5C61, 0x5D01, 0x5D80, 0x5E20, + 0x5EE0, 0x5FA0, 0x6080, 0x61C0, +}; + +const u16 b43_tab_noisea2[] = { + 0x0001, 0x0001, 0x0001, 0xFFFE, + 0xFFFE, 0x3FFF, 0x1000, 0x0393, +}; + +const u16 b43_tab_noisea3[] = { + 0x5E5E, 0x5E5E, 0x5E5E, 0x3F48, + 0x4C4C, 0x4C4C, 0x4C4C, 0x2D36, +}; + +const u16 b43_tab_noiseg1[] = { + 0x013C, 0x01F5, 0x031A, 0x0631, + 0x0001, 0x0001, 0x0001, 0x0001, +}; + +const u16 b43_tab_noiseg2[] = { + 0x5484, 0x3C40, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, +}; + +const u16 b43_tab_noisescalea2[] = { + 0x6767, 0x6767, 0x6767, 0x6767, /* 0 */ + 0x6767, 0x6767, 0x6767, 0x6767, + 0x6767, 0x6767, 0x6767, 0x6767, + 0x6767, 0x6700, 0x6767, 0x6767, + 0x6767, 0x6767, 0x6767, 0x6767, /* 16 */ + 0x6767, 0x6767, 0x6767, 0x6767, + 0x6767, 0x6767, 0x0067, +}; + +const u16 b43_tab_noisescalea3[] = { + 0x2323, 0x2323, 0x2323, 0x2323, /* 0 */ + 0x2323, 0x2323, 0x2323, 0x2323, + 0x2323, 0x2323, 0x2323, 0x2323, + 0x2323, 0x2300, 0x2323, 0x2323, + 0x2323, 0x2323, 0x2323, 0x2323, /* 16 */ + 0x2323, 0x2323, 0x2323, 0x2323, + 0x2323, 0x2323, 0x0023, +}; + +const u16 b43_tab_noisescaleg1[] = { + 0x6C77, 0x5162, 0x3B40, 0x3335, /* 0 */ + 0x2F2D, 0x2A2A, 0x2527, 0x1F21, + 0x1A1D, 0x1719, 0x1616, 0x1414, + 0x1414, 0x1400, 0x1414, 0x1614, + 0x1716, 0x1A19, 0x1F1D, 0x2521, /* 16 */ + 0x2A27, 0x2F2A, 0x332D, 0x3B35, + 0x5140, 0x6C62, 0x0077, +}; + +const u16 b43_tab_noisescaleg2[] = { + 0xD8DD, 0xCBD4, 0xBCC0, 0xB6B7, /* 0 */ + 0xB2B0, 0xADAD, 0xA7A9, 0x9FA1, + 0x969B, 0x9195, 0x8F8F, 0x8A8A, + 0x8A8A, 0x8A00, 0x8A8A, 0x8F8A, + 0x918F, 0x9695, 0x9F9B, 0xA7A1, /* 16 */ + 0xADA9, 0xB2AD, 0xB6B0, 0xBCB7, + 0xCBC0, 0xD8D4, 0x00DD, +}; + +const u16 b43_tab_noisescaleg3[] = { + 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, /* 0 */ + 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, + 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, + 0xA4A4, 0xA400, 0xA4A4, 0xA4A4, + 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, /* 16 */ + 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, + 0xA4A4, 0xA4A4, 0x00A4, +}; + +const u16 b43_tab_sigmasqr1[] = { + 0x007A, 0x0075, 0x0071, 0x006C, /* 0 */ + 0x0067, 0x0063, 0x005E, 0x0059, + 0x0054, 0x0050, 0x004B, 0x0046, + 0x0042, 0x003D, 0x003D, 0x003D, + 0x003D, 0x003D, 0x003D, 0x003D, /* 16 */ + 0x003D, 0x003D, 0x003D, 0x003D, + 0x003D, 0x003D, 0x0000, 0x003D, + 0x003D, 0x003D, 0x003D, 0x003D, + 0x003D, 0x003D, 0x003D, 0x003D, /* 32 */ + 0x003D, 0x003D, 0x003D, 0x003D, + 0x0042, 0x0046, 0x004B, 0x0050, + 0x0054, 0x0059, 0x005E, 0x0063, + 0x0067, 0x006C, 0x0071, 0x0075, /* 48 */ + 0x007A, +}; + +const u16 b43_tab_sigmasqr2[] = { + 0x00DE, 0x00DC, 0x00DA, 0x00D8, /* 0 */ + 0x00D6, 0x00D4, 0x00D2, 0x00CF, + 0x00CD, 0x00CA, 0x00C7, 0x00C4, + 0x00C1, 0x00BE, 0x00BE, 0x00BE, + 0x00BE, 0x00BE, 0x00BE, 0x00BE, /* 16 */ + 0x00BE, 0x00BE, 0x00BE, 0x00BE, + 0x00BE, 0x00BE, 0x0000, 0x00BE, + 0x00BE, 0x00BE, 0x00BE, 0x00BE, + 0x00BE, 0x00BE, 0x00BE, 0x00BE, /* 32 */ + 0x00BE, 0x00BE, 0x00BE, 0x00BE, + 0x00C1, 0x00C4, 0x00C7, 0x00CA, + 0x00CD, 0x00CF, 0x00D2, 0x00D4, + 0x00D6, 0x00D8, 0x00DA, 0x00DC, /* 48 */ + 0x00DE, +}; + +const u16 b43_tab_rssiagc1[] = { + 0xFFF8, 0xFFF8, 0xFFF8, 0xFFF8, /* 0 */ + 0xFFF8, 0xFFF9, 0xFFFC, 0xFFFE, + 0xFFF8, 0xFFF8, 0xFFF8, 0xFFF8, + 0xFFF8, 0xFFF8, 0xFFF8, 0xFFF8, +}; + +const u16 b43_tab_rssiagc2[] = { + 0x0820, 0x0820, 0x0920, 0x0C38, /* 0 */ + 0x0820, 0x0820, 0x0820, 0x0820, + 0x0820, 0x0820, 0x0920, 0x0A38, + 0x0820, 0x0820, 0x0820, 0x0820, + 0x0820, 0x0820, 0x0920, 0x0A38, /* 16 */ + 0x0820, 0x0820, 0x0820, 0x0820, + 0x0820, 0x0820, 0x0920, 0x0A38, + 0x0820, 0x0820, 0x0820, 0x0820, + 0x0820, 0x0820, 0x0920, 0x0A38, /* 32 */ + 0x0820, 0x0820, 0x0820, 0x0820, + 0x0820, 0x0820, 0x0920, 0x0A38, + 0x0820, 0x0820, 0x0820, 0x0820, +}; + +static inline void assert_sizes(void) +{ + BUILD_BUG_ON(B43_TAB_ROTOR_SIZE != ARRAY_SIZE(b43_tab_rotor)); + BUILD_BUG_ON(B43_TAB_RETARD_SIZE != ARRAY_SIZE(b43_tab_retard)); + BUILD_BUG_ON(B43_TAB_FINEFREQA_SIZE != ARRAY_SIZE(b43_tab_finefreqa)); + BUILD_BUG_ON(B43_TAB_FINEFREQG_SIZE != ARRAY_SIZE(b43_tab_finefreqg)); + BUILD_BUG_ON(B43_TAB_NOISEA2_SIZE != ARRAY_SIZE(b43_tab_noisea2)); + BUILD_BUG_ON(B43_TAB_NOISEA3_SIZE != ARRAY_SIZE(b43_tab_noisea3)); + BUILD_BUG_ON(B43_TAB_NOISEG1_SIZE != ARRAY_SIZE(b43_tab_noiseg1)); + BUILD_BUG_ON(B43_TAB_NOISEG2_SIZE != ARRAY_SIZE(b43_tab_noiseg2)); + BUILD_BUG_ON(B43_TAB_NOISESCALE_SIZE != + ARRAY_SIZE(b43_tab_noisescalea2)); + BUILD_BUG_ON(B43_TAB_NOISESCALE_SIZE != + ARRAY_SIZE(b43_tab_noisescalea3)); + BUILD_BUG_ON(B43_TAB_NOISESCALE_SIZE != + ARRAY_SIZE(b43_tab_noisescaleg1)); + BUILD_BUG_ON(B43_TAB_NOISESCALE_SIZE != + ARRAY_SIZE(b43_tab_noisescaleg2)); + BUILD_BUG_ON(B43_TAB_NOISESCALE_SIZE != + ARRAY_SIZE(b43_tab_noisescaleg3)); + BUILD_BUG_ON(B43_TAB_SIGMASQR_SIZE != ARRAY_SIZE(b43_tab_sigmasqr1)); + BUILD_BUG_ON(B43_TAB_SIGMASQR_SIZE != ARRAY_SIZE(b43_tab_sigmasqr2)); + BUILD_BUG_ON(B43_TAB_RSSIAGC1_SIZE != ARRAY_SIZE(b43_tab_rssiagc1)); + BUILD_BUG_ON(B43_TAB_RSSIAGC2_SIZE != ARRAY_SIZE(b43_tab_rssiagc2)); +} + +u16 b43_ofdmtab_read16(struct b43_wldev *dev, u16 table, u16 offset) +{ + struct b43_phy_g *gphy = dev->phy.g; + u16 addr; + + addr = table + offset; + if ((gphy->ofdmtab_addr_direction != B43_OFDMTAB_DIRECTION_READ) || + (addr - 1 != gphy->ofdmtab_addr)) { + /* The hardware has a different address in memory. Update it. */ + b43_phy_write(dev, B43_PHY_OTABLECTL, addr); + gphy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_READ; + } + gphy->ofdmtab_addr = addr; + + return b43_phy_read(dev, B43_PHY_OTABLEI); + + /* Some compiletime assertions... */ + assert_sizes(); +} + +void b43_ofdmtab_write16(struct b43_wldev *dev, u16 table, + u16 offset, u16 value) +{ + struct b43_phy_g *gphy = dev->phy.g; + u16 addr; + + addr = table + offset; + if ((gphy->ofdmtab_addr_direction != B43_OFDMTAB_DIRECTION_WRITE) || + (addr - 1 != gphy->ofdmtab_addr)) { + /* The hardware has a different address in memory. Update it. */ + b43_phy_write(dev, B43_PHY_OTABLECTL, addr); + gphy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_WRITE; + } + gphy->ofdmtab_addr = addr; + b43_phy_write(dev, B43_PHY_OTABLEI, value); +} + +u32 b43_ofdmtab_read32(struct b43_wldev *dev, u16 table, u16 offset) +{ + struct b43_phy_g *gphy = dev->phy.g; + u32 ret; + u16 addr; + + addr = table + offset; + if ((gphy->ofdmtab_addr_direction != B43_OFDMTAB_DIRECTION_READ) || + (addr - 1 != gphy->ofdmtab_addr)) { + /* The hardware has a different address in memory. Update it. */ + b43_phy_write(dev, B43_PHY_OTABLECTL, addr); + gphy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_READ; + } + gphy->ofdmtab_addr = addr; + ret = b43_phy_read(dev, B43_PHY_OTABLEQ); + ret <<= 16; + ret |= b43_phy_read(dev, B43_PHY_OTABLEI); + + return ret; +} + +void b43_ofdmtab_write32(struct b43_wldev *dev, u16 table, + u16 offset, u32 value) +{ + struct b43_phy_g *gphy = dev->phy.g; + u16 addr; + + addr = table + offset; + if ((gphy->ofdmtab_addr_direction != B43_OFDMTAB_DIRECTION_WRITE) || + (addr - 1 != gphy->ofdmtab_addr)) { + /* The hardware has a different address in memory. Update it. */ + b43_phy_write(dev, B43_PHY_OTABLECTL, addr); + gphy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_WRITE; + } + gphy->ofdmtab_addr = addr; + + b43_phy_write(dev, B43_PHY_OTABLEI, value); + b43_phy_write(dev, B43_PHY_OTABLEQ, (value >> 16)); +} + +u16 b43_gtab_read(struct b43_wldev *dev, u16 table, u16 offset) +{ + b43_phy_write(dev, B43_PHY_GTABCTL, table + offset); + return b43_phy_read(dev, B43_PHY_GTABDATA); +} + +void b43_gtab_write(struct b43_wldev *dev, u16 table, u16 offset, u16 value) +{ + b43_phy_write(dev, B43_PHY_GTABCTL, table + offset); + b43_phy_write(dev, B43_PHY_GTABDATA, value); +} diff --git a/drivers/net/wireless/b43/tables.h b/drivers/net/wireless/b43/tables.h new file mode 100644 index 0000000..80e73c7 --- /dev/null +++ b/drivers/net/wireless/b43/tables.h @@ -0,0 +1,34 @@ +#ifndef B43_TABLES_H_ +#define B43_TABLES_H_ + +#define B43_TAB_ROTOR_SIZE 53 +extern const u32 b43_tab_rotor[]; +#define B43_TAB_RETARD_SIZE 53 +extern const u32 b43_tab_retard[]; +#define B43_TAB_FINEFREQA_SIZE 256 +extern const u16 b43_tab_finefreqa[]; +#define B43_TAB_FINEFREQG_SIZE 256 +extern const u16 b43_tab_finefreqg[]; +#define B43_TAB_NOISEA2_SIZE 8 +extern const u16 b43_tab_noisea2[]; +#define B43_TAB_NOISEA3_SIZE 8 +extern const u16 b43_tab_noisea3[]; +#define B43_TAB_NOISEG1_SIZE 8 +extern const u16 b43_tab_noiseg1[]; +#define B43_TAB_NOISEG2_SIZE 8 +extern const u16 b43_tab_noiseg2[]; +#define B43_TAB_NOISESCALE_SIZE 27 +extern const u16 b43_tab_noisescalea2[]; +extern const u16 b43_tab_noisescalea3[]; +extern const u16 b43_tab_noisescaleg1[]; +extern const u16 b43_tab_noisescaleg2[]; +extern const u16 b43_tab_noisescaleg3[]; +#define B43_TAB_SIGMASQR_SIZE 53 +extern const u16 b43_tab_sigmasqr1[]; +extern const u16 b43_tab_sigmasqr2[]; +#define B43_TAB_RSSIAGC1_SIZE 16 +extern const u16 b43_tab_rssiagc1[]; +#define B43_TAB_RSSIAGC2_SIZE 48 +extern const u16 b43_tab_rssiagc2[]; + +#endif /* B43_TABLES_H_ */ diff --git a/drivers/net/wireless/b43/tables_lpphy.c b/drivers/net/wireless/b43/tables_lpphy.c new file mode 100644 index 0000000..61027ee --- /dev/null +++ b/drivers/net/wireless/b43/tables_lpphy.c @@ -0,0 +1,2457 @@ +/* + + Broadcom B43 wireless driver + IEEE 802.11a/g LP-PHY and radio device data tables + + Copyright (c) 2009 Michael Buesch + Copyright (c) 2009 Gábor Stefanik + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "b43.h" +#include "tables_lpphy.h" +#include "phy_common.h" +#include "phy_lp.h" + + +/* Entry of the 2062/2063 radio init table */ +struct b206x_init_tab_entry { + u16 offset; + u16 value_a; + u16 value_g; + u8 flags; +}; +#define B206X_FLAG_A 0x01 /* Flag: Init in A mode */ +#define B206X_FLAG_G 0x02 /* Flag: Init in G mode */ + +static const struct b206x_init_tab_entry b2062_init_tab[] = { + /* { .offset = B2062_N_COMM1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = 0x0001, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_COMM2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_COMM3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + { .offset = B2062_N_COMM4, .value_a = 0x0001, .value_g = 0x0000, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2062_N_COMM5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_COMM6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_COMM7, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_COMM8, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_COMM9, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_COMM10, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_COMM11, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_COMM12, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_COMM13, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_COMM14, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_COMM15, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_PDN_CTL0, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + { .offset = B2062_N_PDN_CTL1, .value_a = 0x0000, .value_g = 0x00CA, .flags = B206X_FLAG_G, }, + /* { .offset = B2062_N_PDN_CTL2, .value_a = 0x0018, .value_g = 0x0018, .flags = 0, }, */ + { .offset = B2062_N_PDN_CTL3, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + { .offset = B2062_N_PDN_CTL4, .value_a = 0x0015, .value_g = 0x002A, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2062_N_GEN_CTL0, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_IQ_CALIB, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */ + { .offset = B2062_N_LGENC, .value_a = 0x00DB, .value_g = 0x00FF, .flags = B206X_FLAG_A, }, + /* { .offset = B2062_N_LGENA_LPF, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */ + /* { .offset = B2062_N_LGENA_BIAS0, .value_a = 0x0041, .value_g = 0x0041, .flags = 0, }, */ + /* { .offset = B2062_N_LGNEA_BIAS1, .value_a = 0x0002, .value_g = 0x0002, .flags = 0, }, */ + /* { .offset = B2062_N_LGENA_CTL0, .value_a = 0x0032, .value_g = 0x0032, .flags = 0, }, */ + /* { .offset = B2062_N_LGENA_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_LGENA_CTL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + { .offset = B2062_N_LGENA_TUNE0, .value_a = 0x00DD, .value_g = 0x0000, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2062_N_LGENA_TUNE1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + { .offset = B2062_N_LGENA_TUNE2, .value_a = 0x00DD, .value_g = 0x0000, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + { .offset = B2062_N_LGENA_TUNE3, .value_a = 0x0077, .value_g = 0x00B5, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + { .offset = B2062_N_LGENA_CTL3, .value_a = 0x0000, .value_g = 0x00FF, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2062_N_LGENA_CTL4, .value_a = 0x001F, .value_g = 0x001F, .flags = 0, }, */ + /* { .offset = B2062_N_LGENA_CTL5, .value_a = 0x0032, .value_g = 0x0032, .flags = 0, }, */ + /* { .offset = B2062_N_LGENA_CTL6, .value_a = 0x0032, .value_g = 0x0032, .flags = 0, }, */ + { .offset = B2062_N_LGENA_CTL7, .value_a = 0x0033, .value_g = 0x0033, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2062_N_RXA_CTL0, .value_a = 0x0009, .value_g = 0x0009, .flags = 0, }, */ + { .offset = B2062_N_RXA_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_G, }, + /* { .offset = B2062_N_RXA_CTL2, .value_a = 0x0018, .value_g = 0x0018, .flags = 0, }, */ + /* { .offset = B2062_N_RXA_CTL3, .value_a = 0x0027, .value_g = 0x0027, .flags = 0, }, */ + /* { .offset = B2062_N_RXA_CTL4, .value_a = 0x0028, .value_g = 0x0028, .flags = 0, }, */ + /* { .offset = B2062_N_RXA_CTL5, .value_a = 0x0007, .value_g = 0x0007, .flags = 0, }, */ + /* { .offset = B2062_N_RXA_CTL6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_RXA_CTL7, .value_a = 0x0008, .value_g = 0x0008, .flags = 0, }, */ + { .offset = B2062_N_RXBB_CTL0, .value_a = 0x0082, .value_g = 0x0080, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2062_N_RXBB_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_RXBB_CTL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_RXBB_GAIN0, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + { .offset = B2062_N_RXBB_GAIN1, .value_a = 0x0004, .value_g = 0x0004, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + { .offset = B2062_N_RXBB_GAIN2, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2062_N_RXBB_GAIN3, .value_a = 0x0011, .value_g = 0x0011, .flags = 0, }, */ + /* { .offset = B2062_N_RXBB_RSSI0, .value_a = 0x0043, .value_g = 0x0043, .flags = 0, }, */ + /* { .offset = B2062_N_RXBB_RSSI1, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */ + /* { .offset = B2062_N_RXBB_CALIB0, .value_a = 0x0010, .value_g = 0x0010, .flags = 0, }, */ + /* { .offset = B2062_N_RXBB_CALIB1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_RXBB_CALIB2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_RXBB_BIAS0, .value_a = 0x0006, .value_g = 0x0006, .flags = 0, }, */ + /* { .offset = B2062_N_RXBB_BIAS1, .value_a = 0x002A, .value_g = 0x002A, .flags = 0, }, */ + /* { .offset = B2062_N_RXBB_BIAS2, .value_a = 0x00AA, .value_g = 0x00AA, .flags = 0, }, */ + /* { .offset = B2062_N_RXBB_BIAS3, .value_a = 0x0021, .value_g = 0x0021, .flags = 0, }, */ + /* { .offset = B2062_N_RXBB_BIAS4, .value_a = 0x00AA, .value_g = 0x00AA, .flags = 0, }, */ + /* { .offset = B2062_N_RXBB_BIAS5, .value_a = 0x0022, .value_g = 0x0022, .flags = 0, }, */ + /* { .offset = B2062_N_RXBB_RSSI2, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */ + /* { .offset = B2062_N_RXBB_RSSI3, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */ + /* { .offset = B2062_N_RXBB_RSSI4, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */ + /* { .offset = B2062_N_RXBB_RSSI5, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */ + /* { .offset = B2062_N_TX_CTL0, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */ + /* { .offset = B2062_N_TX_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_TX_CTL2, .value_a = 0x0084, .value_g = 0x0084, .flags = 0, }, */ + /* { .offset = B2062_N_TX_CTL3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + { .offset = B2062_N_TX_CTL4, .value_a = 0x0003, .value_g = 0x0003, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + { .offset = B2062_N_TX_CTL5, .value_a = 0x0002, .value_g = 0x0002, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2062_N_TX_CTL6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_TX_CTL7, .value_a = 0x0058, .value_g = 0x0058, .flags = 0, }, */ + /* { .offset = B2062_N_TX_CTL8, .value_a = 0x0082, .value_g = 0x0082, .flags = 0, }, */ + /* { .offset = B2062_N_TX_CTL9, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_TX_CTL_A, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_TX_GC2G, .value_a = 0x00FF, .value_g = 0x00FF, .flags = 0, }, */ + /* { .offset = B2062_N_TX_GC5G, .value_a = 0x00FF, .value_g = 0x00FF, .flags = 0, }, */ + { .offset = B2062_N_TX_TUNE, .value_a = 0x0088, .value_g = 0x001B, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2062_N_TX_PAD, .value_a = 0x0088, .value_g = 0x0088, .flags = 0, }, */ + /* { .offset = B2062_N_TX_PGA, .value_a = 0x0088, .value_g = 0x0088, .flags = 0, }, */ + /* { .offset = B2062_N_TX_PADAUX, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */ + /* { .offset = B2062_N_TX_PGAAUX, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */ + /* { .offset = B2062_N_TSSI_CTL0, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_TSSI_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_TSSI_CTL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_IQ_CALIB_CTL0, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */ + /* { .offset = B2062_N_IQ_CALIB_CTL1, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */ + /* { .offset = B2062_N_IQ_CALIB_CTL2, .value_a = 0x0032, .value_g = 0x0032, .flags = 0, }, */ + /* { .offset = B2062_N_CALIB_TS, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_CALIB_CTL0, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_CALIB_CTL1, .value_a = 0x0015, .value_g = 0x0015, .flags = 0, }, */ + /* { .offset = B2062_N_CALIB_CTL2, .value_a = 0x000F, .value_g = 0x000F, .flags = 0, }, */ + /* { .offset = B2062_N_CALIB_CTL3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_CALIB_CTL4, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_CALIB_DBG0, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_CALIB_DBG1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_CALIB_DBG2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_CALIB_DBG3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_PSENSE_CTL0, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_PSENSE_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_PSENSE_CTL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_N_TEST_BUF0, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_S_COMM1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_S_RADIO_ID_CODE, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_S_COMM2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_S_COMM3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + { .offset = B2062_S_COMM4, .value_a = 0x0001, .value_g = 0x0000, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2062_S_COMM5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_S_COMM6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_S_COMM7, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_S_COMM8, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_S_COMM9, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_S_COMM10, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_S_COMM11, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_S_COMM12, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_S_COMM13, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_S_COMM14, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_S_COMM15, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + { .offset = B2062_S_PDS_CTL0, .value_a = 0x00FF, .value_g = 0x00FF, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2062_S_PDS_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_S_PDS_CTL2, .value_a = 0x008E, .value_g = 0x008E, .flags = 0, }, */ + /* { .offset = B2062_S_PDS_CTL3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_S_BG_CTL0, .value_a = 0x0006, .value_g = 0x0006, .flags = 0, }, */ + /* { .offset = B2062_S_BG_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_S_BG_CTL2, .value_a = 0x0011, .value_g = 0x0011, .flags = 0, }, */ + { .offset = B2062_S_LGENG_CTL0, .value_a = 0x00F8, .value_g = 0x00D8, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + { .offset = B2062_S_LGENG_CTL1, .value_a = 0x003C, .value_g = 0x0024, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2062_S_LGENG_CTL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_S_LGENG_CTL3, .value_a = 0x0041, .value_g = 0x0041, .flags = 0, }, */ + /* { .offset = B2062_S_LGENG_CTL4, .value_a = 0x0002, .value_g = 0x0002, .flags = 0, }, */ + /* { .offset = B2062_S_LGENG_CTL5, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */ + /* { .offset = B2062_S_LGENG_CTL6, .value_a = 0x0022, .value_g = 0x0022, .flags = 0, }, */ + /* { .offset = B2062_S_LGENG_CTL7, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + { .offset = B2062_S_LGENG_CTL8, .value_a = 0x0088, .value_g = 0x0080, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2062_S_LGENG_CTL9, .value_a = 0x0088, .value_g = 0x0088, .flags = 0, }, */ + { .offset = B2062_S_LGENG_CTL10, .value_a = 0x0088, .value_g = 0x0080, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2062_S_LGENG_CTL11, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_S_REFPLL_CTL0, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_S_REFPLL_CTL1, .value_a = 0x0007, .value_g = 0x0007, .flags = 0, }, */ + /* { .offset = B2062_S_REFPLL_CTL2, .value_a = 0x00AF, .value_g = 0x00AF, .flags = 0, }, */ + /* { .offset = B2062_S_REFPLL_CTL3, .value_a = 0x0012, .value_g = 0x0012, .flags = 0, }, */ + /* { .offset = B2062_S_REFPLL_CTL4, .value_a = 0x000B, .value_g = 0x000B, .flags = 0, }, */ + /* { .offset = B2062_S_REFPLL_CTL5, .value_a = 0x005F, .value_g = 0x005F, .flags = 0, }, */ + /* { .offset = B2062_S_REFPLL_CTL6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_S_REFPLL_CTL7, .value_a = 0x0040, .value_g = 0x0040, .flags = 0, }, */ + /* { .offset = B2062_S_REFPLL_CTL8, .value_a = 0x0052, .value_g = 0x0052, .flags = 0, }, */ + /* { .offset = B2062_S_REFPLL_CTL9, .value_a = 0x0026, .value_g = 0x0026, .flags = 0, }, */ + /* { .offset = B2062_S_REFPLL_CTL10, .value_a = 0x0003, .value_g = 0x0003, .flags = 0, }, */ + /* { .offset = B2062_S_REFPLL_CTL11, .value_a = 0x0036, .value_g = 0x0036, .flags = 0, }, */ + /* { .offset = B2062_S_REFPLL_CTL12, .value_a = 0x0057, .value_g = 0x0057, .flags = 0, }, */ + /* { .offset = B2062_S_REFPLL_CTL13, .value_a = 0x0011, .value_g = 0x0011, .flags = 0, }, */ + /* { .offset = B2062_S_REFPLL_CTL14, .value_a = 0x0075, .value_g = 0x0075, .flags = 0, }, */ + /* { .offset = B2062_S_REFPLL_CTL15, .value_a = 0x00B4, .value_g = 0x00B4, .flags = 0, }, */ + /* { .offset = B2062_S_REFPLL_CTL16, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + { .offset = B2062_S_RFPLL_CTL0, .value_a = 0x0098, .value_g = 0x0098, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + { .offset = B2062_S_RFPLL_CTL1, .value_a = 0x0010, .value_g = 0x0010, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2062_S_RFPLL_CTL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_S_RFPLL_CTL3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_S_RFPLL_CTL4, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + { .offset = B2062_S_RFPLL_CTL5, .value_a = 0x0043, .value_g = 0x0043, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + { .offset = B2062_S_RFPLL_CTL6, .value_a = 0x0047, .value_g = 0x0047, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + { .offset = B2062_S_RFPLL_CTL7, .value_a = 0x000C, .value_g = 0x000C, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + { .offset = B2062_S_RFPLL_CTL8, .value_a = 0x0011, .value_g = 0x0011, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + { .offset = B2062_S_RFPLL_CTL9, .value_a = 0x0011, .value_g = 0x0011, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + { .offset = B2062_S_RFPLL_CTL10, .value_a = 0x000E, .value_g = 0x000E, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + { .offset = B2062_S_RFPLL_CTL11, .value_a = 0x0008, .value_g = 0x0008, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + { .offset = B2062_S_RFPLL_CTL12, .value_a = 0x0033, .value_g = 0x0033, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + { .offset = B2062_S_RFPLL_CTL13, .value_a = 0x000A, .value_g = 0x000A, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + { .offset = B2062_S_RFPLL_CTL14, .value_a = 0x0006, .value_g = 0x0006, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2062_S_RFPLL_CTL15, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_S_RFPLL_CTL16, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_S_RFPLL_CTL17, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + { .offset = B2062_S_RFPLL_CTL18, .value_a = 0x003E, .value_g = 0x003E, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + { .offset = B2062_S_RFPLL_CTL19, .value_a = 0x0013, .value_g = 0x0013, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2062_S_RFPLL_CTL20, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + { .offset = B2062_S_RFPLL_CTL21, .value_a = 0x0062, .value_g = 0x0062, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + { .offset = B2062_S_RFPLL_CTL22, .value_a = 0x0007, .value_g = 0x0007, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + { .offset = B2062_S_RFPLL_CTL23, .value_a = 0x0016, .value_g = 0x0016, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + { .offset = B2062_S_RFPLL_CTL24, .value_a = 0x005C, .value_g = 0x005C, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + { .offset = B2062_S_RFPLL_CTL25, .value_a = 0x0095, .value_g = 0x0095, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2062_S_RFPLL_CTL26, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_S_RFPLL_CTL27, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_S_RFPLL_CTL28, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_S_RFPLL_CTL29, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + { .offset = B2062_S_RFPLL_CTL30, .value_a = 0x00A0, .value_g = 0x00A0, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + { .offset = B2062_S_RFPLL_CTL31, .value_a = 0x0004, .value_g = 0x0004, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2062_S_RFPLL_CTL32, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + { .offset = B2062_S_RFPLL_CTL33, .value_a = 0x00CC, .value_g = 0x00CC, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + { .offset = B2062_S_RFPLL_CTL34, .value_a = 0x0007, .value_g = 0x0007, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2062_S_RXG_CNT0, .value_a = 0x0010, .value_g = 0x0010, .flags = 0, }, */ + /* { .offset = B2062_S_RXG_CNT1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_S_RXG_CNT2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_S_RXG_CNT3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_S_RXG_CNT4, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_S_RXG_CNT5, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */ + /* { .offset = B2062_S_RXG_CNT6, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */ + /* { .offset = B2062_S_RXG_CNT7, .value_a = 0x0005, .value_g = 0x0005, .flags = 0, }, */ + { .offset = B2062_S_RXG_CNT8, .value_a = 0x000F, .value_g = 0x000F, .flags = B206X_FLAG_A, }, + /* { .offset = B2062_S_RXG_CNT9, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_S_RXG_CNT10, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */ + /* { .offset = B2062_S_RXG_CNT11, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */ + /* { .offset = B2062_S_RXG_CNT12, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */ + /* { .offset = B2062_S_RXG_CNT13, .value_a = 0x0044, .value_g = 0x0044, .flags = 0, }, */ + /* { .offset = B2062_S_RXG_CNT14, .value_a = 0x00A0, .value_g = 0x00A0, .flags = 0, }, */ + /* { .offset = B2062_S_RXG_CNT15, .value_a = 0x0004, .value_g = 0x0004, .flags = 0, }, */ + /* { .offset = B2062_S_RXG_CNT16, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2062_S_RXG_CNT17, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */ +}; + +static const struct b206x_init_tab_entry b2063_init_tab[] = { + { .offset = B2063_COMM1, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_G, }, + /* { .offset = B2063_COMM2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_COMM3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_COMM4, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_COMM5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_COMM6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_COMM7, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_COMM8, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_COMM9, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + { .offset = B2063_COMM10, .value_a = 0x0001, .value_g = 0x0000, .flags = B206X_FLAG_A, }, + /* { .offset = B2063_COMM11, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_COMM12, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_COMM13, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_COMM14, .value_a = 0x0006, .value_g = 0x0006, .flags = 0, }, */ + /* { .offset = B2063_COMM15, .value_a = 0x000f, .value_g = 0x000f, .flags = 0, }, */ + { .offset = B2063_COMM16, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_G, }, + { .offset = B2063_COMM17, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_G, }, + { .offset = B2063_COMM18, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_G, }, + { .offset = B2063_COMM19, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_G, }, + { .offset = B2063_COMM20, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_G, }, + { .offset = B2063_COMM21, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_G, }, + { .offset = B2063_COMM22, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_G, }, + { .offset = B2063_COMM23, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_G, }, + { .offset = B2063_COMM24, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_G, }, + /* { .offset = B2063_PWR_SWITCH_CTL, .value_a = 0x007f, .value_g = 0x007f, .flags = 0, }, */ + /* { .offset = B2063_PLL_SP1, .value_a = 0x003f, .value_g = 0x003f, .flags = 0, }, */ + /* { .offset = B2063_PLL_SP2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + { .offset = B2063_LOGEN_SP1, .value_a = 0x00e8, .value_g = 0x00d4, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + { .offset = B2063_LOGEN_SP2, .value_a = 0x00a7, .value_g = 0x0053, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2063_LOGEN_SP3, .value_a = 0x00ff, .value_g = 0x00ff, .flags = 0, }, */ + { .offset = B2063_LOGEN_SP4, .value_a = 0x00f0, .value_g = 0x000f, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2063_LOGEN_SP5, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */ + { .offset = B2063_G_RX_SP1, .value_a = 0x001f, .value_g = 0x005e, .flags = B206X_FLAG_G, }, + { .offset = B2063_G_RX_SP2, .value_a = 0x007f, .value_g = 0x007e, .flags = B206X_FLAG_G, }, + { .offset = B2063_G_RX_SP3, .value_a = 0x0030, .value_g = 0x00f0, .flags = B206X_FLAG_G, }, + /* { .offset = B2063_G_RX_SP4, .value_a = 0x0035, .value_g = 0x0035, .flags = 0, }, */ + /* { .offset = B2063_G_RX_SP5, .value_a = 0x003f, .value_g = 0x003f, .flags = 0, }, */ + /* { .offset = B2063_G_RX_SP6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + { .offset = B2063_G_RX_SP7, .value_a = 0x007f, .value_g = 0x007f, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2063_G_RX_SP8, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_G_RX_SP9, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + { .offset = B2063_G_RX_SP10, .value_a = 0x000c, .value_g = 0x000c, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2063_G_RX_SP11, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + { .offset = B2063_A_RX_SP1, .value_a = 0x003c, .value_g = 0x003f, .flags = B206X_FLAG_A, }, + { .offset = B2063_A_RX_SP2, .value_a = 0x00fc, .value_g = 0x00fe, .flags = B206X_FLAG_A, }, + /* { .offset = B2063_A_RX_SP3, .value_a = 0x00ff, .value_g = 0x00ff, .flags = 0, }, */ + /* { .offset = B2063_A_RX_SP4, .value_a = 0x00ff, .value_g = 0x00ff, .flags = 0, }, */ + /* { .offset = B2063_A_RX_SP5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_A_RX_SP6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + { .offset = B2063_A_RX_SP7, .value_a = 0x0008, .value_g = 0x0008, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2063_RX_BB_SP1, .value_a = 0x000f, .value_g = 0x000f, .flags = 0, }, */ + /* { .offset = B2063_RX_BB_SP2, .value_a = 0x0022, .value_g = 0x0022, .flags = 0, }, */ + /* { .offset = B2063_RX_BB_SP3, .value_a = 0x00a8, .value_g = 0x00a8, .flags = 0, }, */ + { .offset = B2063_RX_BB_SP4, .value_a = 0x0060, .value_g = 0x0060, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2063_RX_BB_SP5, .value_a = 0x0011, .value_g = 0x0011, .flags = 0, }, */ + /* { .offset = B2063_RX_BB_SP6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_RX_BB_SP7, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + { .offset = B2063_RX_BB_SP8, .value_a = 0x0030, .value_g = 0x0030, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2063_TX_RF_SP1, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */ + /* { .offset = B2063_TX_RF_SP2, .value_a = 0x0003, .value_g = 0x0003, .flags = 0, }, */ + { .offset = B2063_TX_RF_SP3, .value_a = 0x000c, .value_g = 0x000b, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + { .offset = B2063_TX_RF_SP4, .value_a = 0x0010, .value_g = 0x000f, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2063_TX_RF_SP5, .value_a = 0x000f, .value_g = 0x000f, .flags = 0, }, */ + /* { .offset = B2063_TX_RF_SP6, .value_a = 0x0080, .value_g = 0x0080, .flags = 0, }, */ + /* { .offset = B2063_TX_RF_SP7, .value_a = 0x0068, .value_g = 0x0068, .flags = 0, }, */ + /* { .offset = B2063_TX_RF_SP8, .value_a = 0x0068, .value_g = 0x0068, .flags = 0, }, */ + /* { .offset = B2063_TX_RF_SP9, .value_a = 0x0080, .value_g = 0x0080, .flags = 0, }, */ + /* { .offset = B2063_TX_RF_SP10, .value_a = 0x00ff, .value_g = 0x00ff, .flags = 0, }, */ + /* { .offset = B2063_TX_RF_SP11, .value_a = 0x0003, .value_g = 0x0003, .flags = 0, }, */ + /* { .offset = B2063_TX_RF_SP12, .value_a = 0x0038, .value_g = 0x0038, .flags = 0, }, */ + /* { .offset = B2063_TX_RF_SP13, .value_a = 0x00ff, .value_g = 0x00ff, .flags = 0, }, */ + /* { .offset = B2063_TX_RF_SP14, .value_a = 0x0038, .value_g = 0x0038, .flags = 0, }, */ + /* { .offset = B2063_TX_RF_SP15, .value_a = 0x00c0, .value_g = 0x00c0, .flags = 0, }, */ + /* { .offset = B2063_TX_RF_SP16, .value_a = 0x00ff, .value_g = 0x00ff, .flags = 0, }, */ + /* { .offset = B2063_TX_RF_SP17, .value_a = 0x00ff, .value_g = 0x00ff, .flags = 0, }, */ + { .offset = B2063_PA_SP1, .value_a = 0x003d, .value_g = 0x00fd, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2063_PA_SP2, .value_a = 0x000c, .value_g = 0x000c, .flags = 0, }, */ + /* { .offset = B2063_PA_SP3, .value_a = 0x0096, .value_g = 0x0096, .flags = 0, }, */ + /* { .offset = B2063_PA_SP4, .value_a = 0x005a, .value_g = 0x005a, .flags = 0, }, */ + /* { .offset = B2063_PA_SP5, .value_a = 0x007f, .value_g = 0x007f, .flags = 0, }, */ + /* { .offset = B2063_PA_SP6, .value_a = 0x007f, .value_g = 0x007f, .flags = 0, }, */ + /* { .offset = B2063_PA_SP7, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */ + { .offset = B2063_TX_BB_SP1, .value_a = 0x0002, .value_g = 0x0002, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2063_TX_BB_SP2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_TX_BB_SP3, .value_a = 0x0030, .value_g = 0x0030, .flags = 0, }, */ + /* { .offset = B2063_REG_SP1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + { .offset = B2063_BANDGAP_CTL1, .value_a = 0x0056, .value_g = 0x0056, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2063_BANDGAP_CTL2, .value_a = 0x0006, .value_g = 0x0006, .flags = 0, }, */ + /* { .offset = B2063_LPO_CTL1, .value_a = 0x000e, .value_g = 0x000e, .flags = 0, }, */ + /* { .offset = B2063_RC_CALIB_CTL1, .value_a = 0x007e, .value_g = 0x007e, .flags = 0, }, */ + /* { .offset = B2063_RC_CALIB_CTL2, .value_a = 0x0015, .value_g = 0x0015, .flags = 0, }, */ + /* { .offset = B2063_RC_CALIB_CTL3, .value_a = 0x000f, .value_g = 0x000f, .flags = 0, }, */ + /* { .offset = B2063_RC_CALIB_CTL4, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_RC_CALIB_CTL5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_RC_CALIB_CTL6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_RC_CALIB_CTL7, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_RC_CALIB_CTL8, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_RC_CALIB_CTL9, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_RC_CALIB_CTL10, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_PLL_JTAG_CALNRST, .value_a = 0x0004, .value_g = 0x0004, .flags = 0, }, */ + /* { .offset = B2063_PLL_JTAG_IN_PLL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_PLL_JTAG_IN_PLL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_PLL_JTAG_PLL_CP1, .value_a = 0x00cf, .value_g = 0x00cf, .flags = 0, }, */ + /* { .offset = B2063_PLL_JTAG_PLL_CP2, .value_a = 0x0059, .value_g = 0x0059, .flags = 0, }, */ + /* { .offset = B2063_PLL_JTAG_PLL_CP3, .value_a = 0x0007, .value_g = 0x0007, .flags = 0, }, */ + /* { .offset = B2063_PLL_JTAG_PLL_CP4, .value_a = 0x0042, .value_g = 0x0042, .flags = 0, }, */ + /* { .offset = B2063_PLL_JTAG_PLL_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_PLL_JTAG_PLL_LF1, .value_a = 0x00db, .value_g = 0x00db, .flags = 0, }, */ + /* { .offset = B2063_PLL_JTAG_PLL_LF2, .value_a = 0x0094, .value_g = 0x0094, .flags = 0, }, */ + /* { .offset = B2063_PLL_JTAG_PLL_LF3, .value_a = 0x0028, .value_g = 0x0028, .flags = 0, }, */ + /* { .offset = B2063_PLL_JTAG_PLL_LF4, .value_a = 0x0063, .value_g = 0x0063, .flags = 0, }, */ + /* { .offset = B2063_PLL_JTAG_PLL_SG1, .value_a = 0x0007, .value_g = 0x0007, .flags = 0, }, */ + /* { .offset = B2063_PLL_JTAG_PLL_SG2, .value_a = 0x00d3, .value_g = 0x00d3, .flags = 0, }, */ + /* { .offset = B2063_PLL_JTAG_PLL_SG3, .value_a = 0x00b1, .value_g = 0x00b1, .flags = 0, }, */ + /* { .offset = B2063_PLL_JTAG_PLL_SG4, .value_a = 0x003b, .value_g = 0x003b, .flags = 0, }, */ + /* { .offset = B2063_PLL_JTAG_PLL_SG5, .value_a = 0x0006, .value_g = 0x0006, .flags = 0, }, */ + /* { .offset = B2063_PLL_JTAG_PLL_VCO1, .value_a = 0x0058, .value_g = 0x0058, .flags = 0, }, */ + { .offset = B2063_PLL_JTAG_PLL_VCO2, .value_a = 0x00f7, .value_g = 0x00f7, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2063_PLL_JTAG_PLL_VCO_CALIB1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_PLL_JTAG_PLL_VCO_CALIB2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_PLL_JTAG_PLL_VCO_CALIB3, .value_a = 0x0002, .value_g = 0x0002, .flags = 0, }, */ + /* { .offset = B2063_PLL_JTAG_PLL_VCO_CALIB4, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_PLL_JTAG_PLL_VCO_CALIB5, .value_a = 0x0009, .value_g = 0x0009, .flags = 0, }, */ + /* { .offset = B2063_PLL_JTAG_PLL_VCO_CALIB6, .value_a = 0x0005, .value_g = 0x0005, .flags = 0, }, */ + /* { .offset = B2063_PLL_JTAG_PLL_VCO_CALIB7, .value_a = 0x0016, .value_g = 0x0016, .flags = 0, }, */ + /* { .offset = B2063_PLL_JTAG_PLL_VCO_CALIB8, .value_a = 0x006b, .value_g = 0x006b, .flags = 0, }, */ + /* { .offset = B2063_PLL_JTAG_PLL_VCO_CALIB9, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_PLL_JTAG_PLL_VCO_CALIB10, .value_a = 0x00b3, .value_g = 0x00b3, .flags = 0, }, */ + /* { .offset = B2063_PLL_JTAG_PLL_XTAL_12, .value_a = 0x0004, .value_g = 0x0004, .flags = 0, }, */ + /* { .offset = B2063_PLL_JTAG_PLL_XTAL3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_LOGEN_ACL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_LOGEN_ACL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_LOGEN_ACL3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_LOGEN_ACL4, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_LOGEN_ACL5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_LO_CALIB_INPUTS, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_LO_CALIB_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_LO_CALIB_CTL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_LO_CALIB_CTL3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_LO_CALIB_WAITCNT, .value_a = 0x0002, .value_g = 0x0002, .flags = 0, }, */ + /* { .offset = B2063_LO_CALIB_OVR1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_LO_CALIB_OVR2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_LO_CALIB_OVAL1, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */ + /* { .offset = B2063_LO_CALIB_OVAL2, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */ + /* { .offset = B2063_LO_CALIB_OVAL3, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */ + /* { .offset = B2063_LO_CALIB_OVAL4, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */ + /* { .offset = B2063_LO_CALIB_OVAL5, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */ + /* { .offset = B2063_LO_CALIB_OVAL6, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */ + /* { .offset = B2063_LO_CALIB_OVAL7, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */ + /* { .offset = B2063_LO_CALIB_CALVLD1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_LO_CALIB_CALVLD2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_LO_CALIB_CVAL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_LO_CALIB_CVAL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_LO_CALIB_CVAL3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_LO_CALIB_CVAL4, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_LO_CALIB_CVAL5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_LO_CALIB_CVAL6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_LO_CALIB_CVAL7, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_LOGEN_CALIB_EN, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_LOGEN_PEAKDET1, .value_a = 0x00ff, .value_g = 0x00ff, .flags = 0, }, */ + /* { .offset = B2063_LOGEN_RCCR1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_LOGEN_VCOBUF1, .value_a = 0x0060, .value_g = 0x0060, .flags = 0, }, */ + /* { .offset = B2063_LOGEN_MIXER1, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */ + /* { .offset = B2063_LOGEN_MIXER2, .value_a = 0x000c, .value_g = 0x000c, .flags = 0, }, */ + /* { .offset = B2063_LOGEN_BUF1, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */ + /* { .offset = B2063_LOGEN_BUF2, .value_a = 0x000c, .value_g = 0x000c, .flags = 0, }, */ + /* { .offset = B2063_LOGEN_DIV1, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */ + /* { .offset = B2063_LOGEN_DIV2, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */ + /* { .offset = B2063_LOGEN_DIV3, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */ + /* { .offset = B2063_LOGEN_CBUFRX1, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */ + /* { .offset = B2063_LOGEN_CBUFRX2, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */ + /* { .offset = B2063_LOGEN_CBUFTX1, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */ + /* { .offset = B2063_LOGEN_CBUFTX2, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */ + /* { .offset = B2063_LOGEN_IDAC1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_LOGEN_SPARE1, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */ + /* { .offset = B2063_LOGEN_SPARE2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_LOGEN_SPARE3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_G_RX_1ST1, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */ + /* { .offset = B2063_G_RX_1ST2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_G_RX_1ST3, .value_a = 0x0005, .value_g = 0x0005, .flags = 0, }, */ + /* { .offset = B2063_G_RX_2ND1, .value_a = 0x0030, .value_g = 0x0030, .flags = 0, }, */ + /* { .offset = B2063_G_RX_2ND2, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */ + /* { .offset = B2063_G_RX_2ND3, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */ + /* { .offset = B2063_G_RX_2ND4, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_G_RX_2ND5, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */ + /* { .offset = B2063_G_RX_2ND6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_G_RX_2ND7, .value_a = 0x0035, .value_g = 0x0035, .flags = 0, }, */ + /* { .offset = B2063_G_RX_2ND8, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_G_RX_PS1, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */ + /* { .offset = B2063_G_RX_PS2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_G_RX_PS3, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */ + /* { .offset = B2063_G_RX_PS4, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_G_RX_PS5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_G_RX_MIX1, .value_a = 0x0044, .value_g = 0x0044, .flags = 0, }, */ + /* { .offset = B2063_G_RX_MIX2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + { .offset = B2063_G_RX_MIX3, .value_a = 0x0071, .value_g = 0x0071, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + { .offset = B2063_G_RX_MIX4, .value_a = 0x0071, .value_g = 0x0071, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2063_G_RX_MIX5, .value_a = 0x0003, .value_g = 0x0003, .flags = 0, }, */ + /* { .offset = B2063_G_RX_MIX6, .value_a = 0x0088, .value_g = 0x0088, .flags = 0, }, */ + /* { .offset = B2063_G_RX_MIX7, .value_a = 0x0044, .value_g = 0x0044, .flags = 0, }, */ + /* { .offset = B2063_G_RX_MIX8, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */ + /* { .offset = B2063_G_RX_PDET1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_G_RX_SPARES1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_G_RX_SPARES2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_G_RX_SPARES3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_A_RX_1ST1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + { .offset = B2063_A_RX_1ST2, .value_a = 0x00f0, .value_g = 0x0030, .flags = B206X_FLAG_A, }, + /* { .offset = B2063_A_RX_1ST3, .value_a = 0x0005, .value_g = 0x0005, .flags = 0, }, */ + /* { .offset = B2063_A_RX_1ST4, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */ + /* { .offset = B2063_A_RX_1ST5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_A_RX_2ND1, .value_a = 0x0005, .value_g = 0x0005, .flags = 0, }, */ + /* { .offset = B2063_A_RX_2ND2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_A_RX_2ND3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_A_RX_2ND4, .value_a = 0x0005, .value_g = 0x0005, .flags = 0, }, */ + /* { .offset = B2063_A_RX_2ND5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_A_RX_2ND6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_A_RX_2ND7, .value_a = 0x0005, .value_g = 0x0005, .flags = 0, }, */ + /* { .offset = B2063_A_RX_PS1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_A_RX_PS2, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */ + /* { .offset = B2063_A_RX_PS3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_A_RX_PS4, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */ + /* { .offset = B2063_A_RX_PS5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + { .offset = B2063_A_RX_PS6, .value_a = 0x0077, .value_g = 0x0077, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2063_A_RX_MIX1, .value_a = 0x0088, .value_g = 0x0088, .flags = 0, }, */ + /* { .offset = B2063_A_RX_MIX2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_A_RX_MIX3, .value_a = 0x0044, .value_g = 0x0044, .flags = 0, }, */ + { .offset = B2063_A_RX_MIX4, .value_a = 0x0003, .value_g = 0x0003, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + { .offset = B2063_A_RX_MIX5, .value_a = 0x000f, .value_g = 0x000f, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + { .offset = B2063_A_RX_MIX6, .value_a = 0x000f, .value_g = 0x000f, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2063_A_RX_MIX7, .value_a = 0x0044, .value_g = 0x0044, .flags = 0, }, */ + /* { .offset = B2063_A_RX_MIX8, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */ + /* { .offset = B2063_A_RX_PWRDET1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_A_RX_SPARE1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_A_RX_SPARE2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_A_RX_SPARE3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + { .offset = B2063_RX_TIA_CTL1, .value_a = 0x0077, .value_g = 0x0077, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2063_RX_TIA_CTL2, .value_a = 0x0058, .value_g = 0x0058, .flags = 0, }, */ + { .offset = B2063_RX_TIA_CTL3, .value_a = 0x0077, .value_g = 0x0077, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2063_RX_TIA_CTL4, .value_a = 0x0058, .value_g = 0x0058, .flags = 0, }, */ + /* { .offset = B2063_RX_TIA_CTL5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_RX_TIA_CTL6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_RX_BB_CTL1, .value_a = 0x0074, .value_g = 0x0074, .flags = 0, }, */ + { .offset = B2063_RX_BB_CTL2, .value_a = 0x0004, .value_g = 0x0004, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2063_RX_BB_CTL3, .value_a = 0x00a2, .value_g = 0x00a2, .flags = 0, }, */ + /* { .offset = B2063_RX_BB_CTL4, .value_a = 0x00aa, .value_g = 0x00aa, .flags = 0, }, */ + /* { .offset = B2063_RX_BB_CTL5, .value_a = 0x0024, .value_g = 0x0024, .flags = 0, }, */ + /* { .offset = B2063_RX_BB_CTL6, .value_a = 0x00a9, .value_g = 0x00a9, .flags = 0, }, */ + /* { .offset = B2063_RX_BB_CTL7, .value_a = 0x0028, .value_g = 0x0028, .flags = 0, }, */ + /* { .offset = B2063_RX_BB_CTL8, .value_a = 0x0010, .value_g = 0x0010, .flags = 0, }, */ + /* { .offset = B2063_RX_BB_CTL9, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */ + /* { .offset = B2063_TX_RF_CTL1, .value_a = 0x0080, .value_g = 0x0080, .flags = 0, }, */ + /* { .offset = B2063_TX_RF_IDAC_LO_RF_I, .value_a = 0x0088, .value_g = 0x0088, .flags = 0, }, */ + /* { .offset = B2063_TX_RF_IDAC_LO_RF_Q, .value_a = 0x0088, .value_g = 0x0088, .flags = 0, }, */ + /* { .offset = B2063_TX_RF_IDAC_LO_BB_I, .value_a = 0x0088, .value_g = 0x0088, .flags = 0, }, */ + /* { .offset = B2063_TX_RF_IDAC_LO_BB_Q, .value_a = 0x0088, .value_g = 0x0088, .flags = 0, }, */ + /* { .offset = B2063_TX_RF_CTL2, .value_a = 0x0080, .value_g = 0x0080, .flags = 0, }, */ + /* { .offset = B2063_TX_RF_CTL3, .value_a = 0x0038, .value_g = 0x0038, .flags = 0, }, */ + /* { .offset = B2063_TX_RF_CTL4, .value_a = 0x00b8, .value_g = 0x00b8, .flags = 0, }, */ + /* { .offset = B2063_TX_RF_CTL5, .value_a = 0x0080, .value_g = 0x0080, .flags = 0, }, */ + /* { .offset = B2063_TX_RF_CTL6, .value_a = 0x0038, .value_g = 0x0038, .flags = 0, }, */ + /* { .offset = B2063_TX_RF_CTL7, .value_a = 0x0078, .value_g = 0x0078, .flags = 0, }, */ + /* { .offset = B2063_TX_RF_CTL8, .value_a = 0x00c0, .value_g = 0x00c0, .flags = 0, }, */ + /* { .offset = B2063_TX_RF_CTL9, .value_a = 0x0003, .value_g = 0x0003, .flags = 0, }, */ + /* { .offset = B2063_TX_RF_CTL10, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_TX_RF_CTL14, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_TX_RF_CTL15, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + { .offset = B2063_PA_CTL1, .value_a = 0x0000, .value_g = 0x0004, .flags = B206X_FLAG_A, }, + /* { .offset = B2063_PA_CTL2, .value_a = 0x000c, .value_g = 0x000c, .flags = 0, }, */ + /* { .offset = B2063_PA_CTL3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_PA_CTL4, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_PA_CTL5, .value_a = 0x0096, .value_g = 0x0096, .flags = 0, }, */ + /* { .offset = B2063_PA_CTL6, .value_a = 0x0077, .value_g = 0x0077, .flags = 0, }, */ + /* { .offset = B2063_PA_CTL7, .value_a = 0x005a, .value_g = 0x005a, .flags = 0, }, */ + /* { .offset = B2063_PA_CTL8, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_PA_CTL9, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_PA_CTL10, .value_a = 0x0021, .value_g = 0x0021, .flags = 0, }, */ + /* { .offset = B2063_PA_CTL11, .value_a = 0x0070, .value_g = 0x0070, .flags = 0, }, */ + /* { .offset = B2063_PA_CTL12, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_PA_CTL13, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_TX_BB_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_TX_BB_CTL2, .value_a = 0x00b3, .value_g = 0x00b3, .flags = 0, }, */ + /* { .offset = B2063_TX_BB_CTL3, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */ + /* { .offset = B2063_TX_BB_CTL4, .value_a = 0x000b, .value_g = 0x000b, .flags = 0, }, */ + /* { .offset = B2063_GPIO_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + { .offset = B2063_VREG_CTL1, .value_a = 0x0003, .value_g = 0x0003, .flags = B206X_FLAG_A | B206X_FLAG_G, }, + /* { .offset = B2063_AMUX_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_IQ_CALIB_GVAR, .value_a = 0x00b3, .value_g = 0x00b3, .flags = 0, }, */ + /* { .offset = B2063_IQ_CALIB_CTL1, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */ + /* { .offset = B2063_IQ_CALIB_CTL2, .value_a = 0x0030, .value_g = 0x0030, .flags = 0, }, */ + /* { .offset = B2063_TEMPSENSE_CTL1, .value_a = 0x0046, .value_g = 0x0046, .flags = 0, }, */ + /* { .offset = B2063_TEMPSENSE_CTL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_TX_RX_LOOPBACK1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_TX_RX_LOOPBACK2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */ + /* { .offset = B2063_EXT_TSSI_CTL1, .value_a = 0x0021, .value_g = 0x0021, .flags = 0, }, */ + /* { .offset = B2063_EXT_TSSI_CTL2, .value_a = 0x0023, .value_g = 0x0023, .flags = 0, }, */ + /* { .offset = B2063_AFE_CTL , .value_a = 0x0002, .value_g = 0x0002, .flags = 0, }, */ +}; + +void b2062_upload_init_table(struct b43_wldev *dev) +{ + const struct b206x_init_tab_entry *e; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(b2062_init_tab); i++) { + e = &b2062_init_tab[i]; + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (!(e->flags & B206X_FLAG_G)) + continue; + b43_radio_write(dev, e->offset, e->value_g); + } else { + if (!(e->flags & B206X_FLAG_A)) + continue; + b43_radio_write(dev, e->offset, e->value_a); + } + } +} + +void b2063_upload_init_table(struct b43_wldev *dev) +{ + const struct b206x_init_tab_entry *e; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(b2063_init_tab); i++) { + e = &b2063_init_tab[i]; + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (!(e->flags & B206X_FLAG_G)) + continue; + b43_radio_write(dev, e->offset, e->value_g); + } else { + if (!(e->flags & B206X_FLAG_A)) + continue; + b43_radio_write(dev, e->offset, e->value_a); + } + } +} + +u32 b43_lptab_read(struct b43_wldev *dev, u32 offset) +{ + u32 type, value; + + type = offset & B43_LPTAB_TYPEMASK; + offset &= ~B43_LPTAB_TYPEMASK; + B43_WARN_ON(offset > 0xFFFF); + + switch (type) { + case B43_LPTAB_8BIT: + b43_phy_write(dev, B43_LPPHY_TABLE_ADDR, offset); + value = b43_phy_read(dev, B43_LPPHY_TABLEDATALO) & 0xFF; + break; + case B43_LPTAB_16BIT: + b43_phy_write(dev, B43_LPPHY_TABLE_ADDR, offset); + value = b43_phy_read(dev, B43_LPPHY_TABLEDATALO); + break; + case B43_LPTAB_32BIT: + b43_phy_write(dev, B43_LPPHY_TABLE_ADDR, offset); + value = b43_phy_read(dev, B43_LPPHY_TABLEDATAHI); + value <<= 16; + value |= b43_phy_read(dev, B43_LPPHY_TABLEDATALO); + break; + default: + B43_WARN_ON(1); + value = 0; + } + + return value; +} + +void b43_lptab_read_bulk(struct b43_wldev *dev, u32 offset, + unsigned int nr_elements, void *_data) +{ + u32 type; + u8 *data = _data; + unsigned int i; + + type = offset & B43_LPTAB_TYPEMASK; + offset &= ~B43_LPTAB_TYPEMASK; + B43_WARN_ON(offset > 0xFFFF); + + b43_phy_write(dev, B43_LPPHY_TABLE_ADDR, offset); + + for (i = 0; i < nr_elements; i++) { + switch (type) { + case B43_LPTAB_8BIT: + *data = b43_phy_read(dev, B43_LPPHY_TABLEDATALO) & 0xFF; + data++; + break; + case B43_LPTAB_16BIT: + *((u16 *)data) = b43_phy_read(dev, B43_LPPHY_TABLEDATALO); + data += 2; + break; + case B43_LPTAB_32BIT: + *((u32 *)data) = b43_phy_read(dev, B43_LPPHY_TABLEDATAHI); + *((u32 *)data) <<= 16; + *((u32 *)data) |= b43_phy_read(dev, B43_LPPHY_TABLEDATALO); + data += 4; + break; + default: + B43_WARN_ON(1); + } + } +} + +void b43_lptab_write(struct b43_wldev *dev, u32 offset, u32 value) +{ + u32 type; + + type = offset & B43_LPTAB_TYPEMASK; + offset &= ~B43_LPTAB_TYPEMASK; + B43_WARN_ON(offset > 0xFFFF); + + switch (type) { + case B43_LPTAB_8BIT: + B43_WARN_ON(value & ~0xFF); + b43_phy_write(dev, B43_LPPHY_TABLE_ADDR, offset); + b43_phy_write(dev, B43_LPPHY_TABLEDATALO, value); + break; + case B43_LPTAB_16BIT: + B43_WARN_ON(value & ~0xFFFF); + b43_phy_write(dev, B43_LPPHY_TABLE_ADDR, offset); + b43_phy_write(dev, B43_LPPHY_TABLEDATALO, value); + break; + case B43_LPTAB_32BIT: + b43_phy_write(dev, B43_LPPHY_TABLE_ADDR, offset); + b43_phy_write(dev, B43_LPPHY_TABLEDATAHI, value >> 16); + b43_phy_write(dev, B43_LPPHY_TABLEDATALO, value); + break; + default: + B43_WARN_ON(1); + } +} + +void b43_lptab_write_bulk(struct b43_wldev *dev, u32 offset, + unsigned int nr_elements, const void *_data) +{ + u32 type, value; + const u8 *data = _data; + unsigned int i; + + type = offset & B43_LPTAB_TYPEMASK; + offset &= ~B43_LPTAB_TYPEMASK; + B43_WARN_ON(offset > 0xFFFF); + + b43_phy_write(dev, B43_LPPHY_TABLE_ADDR, offset); + + for (i = 0; i < nr_elements; i++) { + switch (type) { + case B43_LPTAB_8BIT: + value = *data; + data++; + B43_WARN_ON(value & ~0xFF); + b43_phy_write(dev, B43_LPPHY_TABLEDATALO, value); + break; + case B43_LPTAB_16BIT: + value = *((u16 *)data); + data += 2; + B43_WARN_ON(value & ~0xFFFF); + b43_phy_write(dev, B43_LPPHY_TABLEDATALO, value); + break; + case B43_LPTAB_32BIT: + value = *((u32 *)data); + data += 4; + b43_phy_write(dev, B43_LPPHY_TABLEDATAHI, value >> 16); + b43_phy_write(dev, B43_LPPHY_TABLEDATALO, value); + break; + default: + B43_WARN_ON(1); + } + } +} + +static const u8 lpphy_min_sig_sq_table[] = { + 0xde, 0xdc, 0xda, 0xd8, 0xd6, 0xd4, 0xd2, 0xcf, 0xcd, + 0xca, 0xc7, 0xc4, 0xc1, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, + 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0x00, + 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, + 0xbe, 0xbe, 0xbe, 0xbe, 0xc1, 0xc4, 0xc7, 0xca, 0xcd, + 0xcf, 0xd2, 0xd4, 0xd6, 0xd8, 0xda, 0xdc, 0xde, +}; + +static const u16 lpphy_rev01_noise_scale_table[] = { + 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, + 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa400, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, + 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0x00a4, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x4c00, 0x2d36, + 0x0000, 0x0000, 0x4c00, 0x2d36, +}; + +static const u16 lpphy_rev2plus_noise_scale_table[] = { + 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, + 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, + 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x0000, + 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, + 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, + 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, + 0x00a4, +}; + +static const u16 lpphy_crs_gain_nft_table[] = { + 0x0366, 0x036a, 0x036f, 0x0364, 0x0367, 0x036d, 0x0374, 0x037f, 0x036f, + 0x037b, 0x038a, 0x0378, 0x0367, 0x036d, 0x0375, 0x0381, 0x0374, 0x0381, + 0x0392, 0x03a9, 0x03c4, 0x03e1, 0x0001, 0x001f, 0x0040, 0x005e, 0x007f, + 0x009e, 0x00bd, 0x00dd, 0x00fd, 0x011d, 0x013d, +}; + +static const u16 lpphy_rev01_filter_control_table[] = { + 0xa0fc, 0x10fc, 0x10db, 0x20b7, 0xff93, 0x10bf, 0x109b, 0x2077, 0xff53, + 0x0127, +}; + +static const u32 lpphy_rev2plus_filter_control_table[] = { + 0x000141fc, 0x000021fc, 0x000021b7, 0x0000416f, 0x0001ff27, 0x0000217f, + 0x00002137, 0x000040ef, 0x0001fea7, 0x0000024f, +}; + +static const u32 lpphy_rev01_ps_control_table[] = { + 0x00010000, 0x000000a0, 0x00040000, 0x00000048, 0x08080101, 0x00000080, + 0x08080101, 0x00000040, 0x08080101, 0x000000c0, 0x08a81501, 0x000000c0, + 0x0fe8fd01, 0x000000c0, 0x08300105, 0x000000c0, 0x08080201, 0x000000c0, + 0x08280205, 0x000000c0, 0xe80802fe, 0x000000c7, 0x28080206, 0x000000c0, + 0x08080202, 0x000000c0, 0x0ba87602, 0x000000c0, 0x1068013d, 0x000000c0, + 0x10280105, 0x000000c0, 0x08880102, 0x000000c0, 0x08280106, 0x000000c0, + 0xe80801fd, 0x000000c7, 0xa8080115, 0x000000c0, +}; + +static const u32 lpphy_rev2plus_ps_control_table[] = { + 0x00e38e08, 0x00e08e38, 0x00000000, 0x00000000, 0x00000000, 0x00002080, + 0x00006180, 0x00003002, 0x00000040, 0x00002042, 0x00180047, 0x00080043, + 0x00000041, 0x000020c1, 0x00046006, 0x00042002, 0x00040000, 0x00002003, + 0x00180006, 0x00080002, +}; + +static const u8 lpphy_pll_fraction_table[] = { + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, +}; + +static const u16 lpphy_iqlo_cal_table[] = { + 0x0200, 0x0300, 0x0400, 0x0600, 0x0800, 0x0b00, 0x1000, 0x1001, 0x1002, + 0x1003, 0x1004, 0x1005, 0x1006, 0x1007, 0x1707, 0x2007, 0x2d07, 0x4007, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x0300, 0x0400, 0x0600, + 0x0800, 0x0b00, 0x1000, 0x1001, 0x1002, 0x1003, 0x1004, 0x1005, 0x1006, + 0x1007, 0x1707, 0x2007, 0x2d07, 0x4007, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x4000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, +}; + +static const u16 lpphy_rev0_ofdm_cck_gain_table[] = { + 0x0001, 0x0001, 0x0001, 0x0001, 0x1001, 0x2001, 0x3001, 0x4001, 0x5001, + 0x6001, 0x7001, 0x7011, 0x7021, 0x2035, 0x2045, 0x2055, 0x2065, 0x2075, + 0x006d, 0x007d, 0x014d, 0x015d, 0x115d, 0x035d, 0x135d, 0x055d, 0x155d, + 0x0d5d, 0x1d5d, 0x2d5d, 0x555d, 0x655d, 0x755d, +}; + +static const u16 lpphy_rev1_ofdm_cck_gain_table[] = { + 0x5000, 0x6000, 0x7000, 0x0001, 0x1001, 0x2001, 0x3001, 0x4001, 0x5001, + 0x6001, 0x7001, 0x7011, 0x7021, 0x2035, 0x2045, 0x2055, 0x2065, 0x2075, + 0x006d, 0x007d, 0x014d, 0x015d, 0x115d, 0x035d, 0x135d, 0x055d, 0x155d, + 0x0d5d, 0x1d5d, 0x2d5d, 0x555d, 0x655d, 0x755d, +}; + +static const u16 lpphy_gain_delta_table[] = { + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, +}; + +static const u32 lpphy_tx_power_control_table[] = { + 0x00000050, 0x0000004f, 0x0000004e, 0x0000004d, 0x0000004c, 0x0000004b, + 0x0000004a, 0x00000049, 0x00000048, 0x00000047, 0x00000046, 0x00000045, + 0x00000044, 0x00000043, 0x00000042, 0x00000041, 0x00000040, 0x0000003f, + 0x0000003e, 0x0000003d, 0x0000003c, 0x0000003b, 0x0000003a, 0x00000039, + 0x00000038, 0x00000037, 0x00000036, 0x00000035, 0x00000034, 0x00000033, + 0x00000032, 0x00000031, 0x00000030, 0x0000002f, 0x0000002e, 0x0000002d, + 0x0000002c, 0x0000002b, 0x0000002a, 0x00000029, 0x00000028, 0x00000027, + 0x00000026, 0x00000025, 0x00000024, 0x00000023, 0x00000022, 0x00000021, + 0x00000020, 0x0000001f, 0x0000001e, 0x0000001d, 0x0000001c, 0x0000001b, + 0x0000001a, 0x00000019, 0x00000018, 0x00000017, 0x00000016, 0x00000015, + 0x00000014, 0x00000013, 0x00000012, 0x00000011, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x000075a0, 0x000075a0, 0x000075a1, 0x000075a1, 0x000075a2, 0x000075a2, + 0x000075a3, 0x000075a3, 0x000074b0, 0x000074b0, 0x000074b1, 0x000074b1, + 0x000074b2, 0x000074b2, 0x000074b3, 0x000074b3, 0x00006d20, 0x00006d20, + 0x00006d21, 0x00006d21, 0x00006d22, 0x00006d22, 0x00006d23, 0x00006d23, + 0x00004660, 0x00004660, 0x00004661, 0x00004661, 0x00004662, 0x00004662, + 0x00004663, 0x00004663, 0x00003e60, 0x00003e60, 0x00003e61, 0x00003e61, + 0x00003e62, 0x00003e62, 0x00003e63, 0x00003e63, 0x00003660, 0x00003660, + 0x00003661, 0x00003661, 0x00003662, 0x00003662, 0x00003663, 0x00003663, + 0x00002e60, 0x00002e60, 0x00002e61, 0x00002e61, 0x00002e62, 0x00002e62, + 0x00002e63, 0x00002e63, 0x00002660, 0x00002660, 0x00002661, 0x00002661, + 0x00002662, 0x00002662, 0x00002663, 0x00002663, 0x000025e0, 0x000025e0, + 0x000025e1, 0x000025e1, 0x000025e2, 0x000025e2, 0x000025e3, 0x000025e3, + 0x00001de0, 0x00001de0, 0x00001de1, 0x00001de1, 0x00001de2, 0x00001de2, + 0x00001de3, 0x00001de3, 0x00001d60, 0x00001d60, 0x00001d61, 0x00001d61, + 0x00001d62, 0x00001d62, 0x00001d63, 0x00001d63, 0x00001560, 0x00001560, + 0x00001561, 0x00001561, 0x00001562, 0x00001562, 0x00001563, 0x00001563, + 0x00000d60, 0x00000d60, 0x00000d61, 0x00000d61, 0x00000d62, 0x00000d62, + 0x00000d63, 0x00000d63, 0x00000ce0, 0x00000ce0, 0x00000ce1, 0x00000ce1, + 0x00000ce2, 0x00000ce2, 0x00000ce3, 0x00000ce3, 0x00000e10, 0x00000e10, + 0x00000e11, 0x00000e11, 0x00000e12, 0x00000e12, 0x00000e13, 0x00000e13, + 0x00000bf0, 0x00000bf0, 0x00000bf1, 0x00000bf1, 0x00000bf2, 0x00000bf2, + 0x00000bf3, 0x00000bf3, 0x04200000, 0x04000000, 0x04200000, 0x04000000, + 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, + 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, + 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, + 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, + 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, + 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, + 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, + 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, + 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, + 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, + 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, + 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, + 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, + 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, + 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, + 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, + 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, + 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, + 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, + 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, + 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x000000ff, 0x000002fc, + 0x0000fa08, 0x00000305, 0x00000206, 0x00000304, 0x0000fb04, 0x0000fcff, + 0x000005fb, 0x0000fd01, 0x00000401, 0x00000006, 0x0000ff03, 0x000007fc, + 0x0000fc08, 0x00000203, 0x0000fffb, 0x00000600, 0x0000fa01, 0x0000fc03, + 0x0000fe06, 0x0000fe00, 0x00000102, 0x000007fd, 0x000004fb, 0x000006ff, + 0x000004fd, 0x0000fdfa, 0x000007fb, 0x0000fdfa, 0x0000fa06, 0x00000500, + 0x0000f902, 0x000007fa, 0x0000fafa, 0x00000500, 0x000007fa, 0x00000700, + 0x00000305, 0x000004ff, 0x00000801, 0x00000503, 0x000005f9, 0x00000404, + 0x0000fb08, 0x000005fd, 0x00000501, 0x00000405, 0x0000fb03, 0x000007fc, + 0x00000403, 0x00000303, 0x00000402, 0x0000faff, 0x0000fe05, 0x000005fd, + 0x0000fe01, 0x000007fa, 0x00000202, 0x00000504, 0x00000102, 0x000008fe, + 0x0000fa04, 0x0000fafc, 0x0000fe08, 0x000000f9, 0x000002fa, 0x000003fe, + 0x00000304, 0x000004f9, 0x00000100, 0x0000fd06, 0x000008fc, 0x00000701, + 0x00000504, 0x0000fdfe, 0x0000fdfc, 0x000003fe, 0x00000704, 0x000002fc, + 0x000004f9, 0x0000fdfd, 0x0000fa07, 0x00000205, 0x000003fd, 0x000005fb, + 0x000004f9, 0x00000804, 0x0000fc06, 0x0000fcf9, 0x00000100, 0x0000fe05, + 0x00000408, 0x0000fb02, 0x00000304, 0x000006fe, 0x000004fa, 0x00000305, + 0x000008fc, 0x00000102, 0x000001fd, 0x000004fc, 0x0000fe03, 0x00000701, + 0x000001fb, 0x000001f9, 0x00000206, 0x000006fd, 0x00000508, 0x00000700, + 0x00000304, 0x000005fe, 0x000005ff, 0x0000fa04, 0x00000303, 0x0000fefb, + 0x000007f9, 0x0000fefc, 0x000004fd, 0x000005fc, 0x0000fffd, 0x0000fc08, + 0x0000fbf9, 0x0000fd07, 0x000008fb, 0x0000fe02, 0x000006fb, 0x00000702, +}; + +static const u32 lpphy_gain_idx_table[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x10000001, 0x00000000, 0x20000082, 0x00000000, 0x40000104, 0x00000000, + 0x60004207, 0x00000001, 0x7000838a, 0x00000001, 0xd021050d, 0x00000001, + 0xe041c683, 0x00000001, 0x50828805, 0x00000000, 0x80e34288, 0x00000000, + 0xb144040b, 0x00000000, 0xe1a6058e, 0x00000000, 0x12064711, 0x00000001, + 0xb0a18612, 0x00000010, 0xe1024794, 0x00000010, 0x11630915, 0x00000011, + 0x31c3ca1b, 0x00000011, 0xc1848a9c, 0x00000018, 0xf1e50da0, 0x00000018, + 0x22468e21, 0x00000019, 0x4286d023, 0x00000019, 0xa347d0a4, 0x00000019, + 0xb36811a6, 0x00000019, 0xf3e89227, 0x00000019, 0x0408d329, 0x0000001a, + 0x244953aa, 0x0000001a, 0x346994ab, 0x0000001a, 0x54aa152c, 0x0000001a, + 0x64ca55ad, 0x0000001a, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x10000001, 0x00000000, 0x20000082, 0x00000000, + 0x40000104, 0x00000000, 0x60004207, 0x00000001, 0x7000838a, 0x00000001, + 0xd021050d, 0x00000001, 0xe041c683, 0x00000001, 0x50828805, 0x00000000, + 0x80e34288, 0x00000000, 0xb144040b, 0x00000000, 0xe1a6058e, 0x00000000, + 0x12064711, 0x00000001, 0xb0a18612, 0x00000010, 0xe1024794, 0x00000010, + 0x11630915, 0x00000011, 0x31c3ca1b, 0x00000011, 0xc1848a9c, 0x00000018, + 0xf1e50da0, 0x00000018, 0x22468e21, 0x00000019, 0x4286d023, 0x00000019, + 0xa347d0a4, 0x00000019, 0xb36811a6, 0x00000019, 0xf3e89227, 0x00000019, + 0x0408d329, 0x0000001a, 0x244953aa, 0x0000001a, 0x346994ab, 0x0000001a, + 0x54aa152c, 0x0000001a, 0x64ca55ad, 0x0000001a, +}; + +static const u16 lpphy_aux_gain_idx_table[] = { + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0001, 0x0002, 0x0004, 0x0016, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0002, 0x0004, 0x0016, +}; + +static const u32 lpphy_gain_value_table[] = { + 0x00000008, 0x0000000e, 0x00000014, 0x0000001a, 0x000000fb, 0x00000004, + 0x00000008, 0x0000000d, 0x00000001, 0x00000004, 0x00000007, 0x0000000a, + 0x0000000d, 0x00000010, 0x00000012, 0x00000015, 0x00000000, 0x00000006, + 0x0000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000012, 0x00000000, + 0x00000000, 0x00000000, 0x00000018, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0000001e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000003, 0x00000006, 0x00000009, 0x0000000c, 0x0000000f, + 0x00000012, 0x00000015, 0x00000018, 0x0000001b, 0x0000001e, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000009, 0x000000f1, + 0x00000000, 0x00000000, +}; + +static const u16 lpphy_gain_table[] = { + 0x0000, 0x0400, 0x0800, 0x0802, 0x0804, 0x0806, 0x0807, 0x0808, 0x080a, + 0x080b, 0x080c, 0x080e, 0x080f, 0x0810, 0x0812, 0x0813, 0x0814, 0x0816, + 0x0817, 0x081a, 0x081b, 0x081f, 0x0820, 0x0824, 0x0830, 0x0834, 0x0837, + 0x083b, 0x083f, 0x0840, 0x0844, 0x0857, 0x085b, 0x085f, 0x08d7, 0x08db, + 0x08df, 0x0957, 0x095b, 0x095f, 0x0b57, 0x0b5b, 0x0b5f, 0x0f5f, 0x135f, + 0x175f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, +}; + +static const u32 lpphy_a0_gain_idx_table[] = { + 0x001111e0, 0x00652051, 0x00606055, 0x005b005a, 0x00555060, 0x00511065, + 0x004c806b, 0x0047d072, 0x00444078, 0x00400080, 0x003ca087, 0x0039408f, + 0x0035e098, 0x0032e0a1, 0x003030aa, 0x002d80b4, 0x002ae0bf, 0x002880ca, + 0x002640d6, 0x002410e3, 0x002220f0, 0x002020ff, 0x001e510e, 0x001ca11e, + 0x001b012f, 0x00199140, 0x00182153, 0x0016c168, 0x0015817d, 0x00145193, + 0x001321ab, 0x001211c5, 0x001111e0, 0x001021fc, 0x000f321a, 0x000e523a, + 0x000d925c, 0x000cd27f, 0x000c12a5, 0x000b62cd, 0x000ac2f8, 0x000a2325, + 0x00099355, 0x00091387, 0x000883bd, 0x000813f5, 0x0007a432, 0x00073471, + 0x0006c4b5, 0x000664fc, 0x00061547, 0x0005b598, 0x000565ec, 0x00051646, + 0x0004d6a5, 0x0004870a, 0x00044775, 0x000407e6, 0x0003d85e, 0x000398dd, + 0x00036963, 0x000339f2, 0x00030a89, 0x0002db28, +}; + +static const u16 lpphy_a0_aux_gain_idx_table[] = { + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0002, 0x0014, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0002, 0x0014, +}; + +static const u32 lpphy_a0_gain_value_table[] = { + 0x00000008, 0x0000000e, 0x00000014, 0x0000001a, 0x000000fb, 0x00000004, + 0x00000008, 0x0000000d, 0x00000001, 0x00000004, 0x00000007, 0x0000000a, + 0x0000000d, 0x00000010, 0x00000012, 0x00000015, 0x00000000, 0x00000006, + 0x0000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000012, 0x00000000, + 0x00000000, 0x00000000, 0x00000018, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0000001e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000003, 0x00000006, 0x00000009, 0x0000000c, 0x0000000f, + 0x00000012, 0x00000015, 0x00000018, 0x0000001b, 0x0000001e, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x0000000f, 0x000000f7, + 0x00000000, 0x00000000, +}; + +static const u16 lpphy_a0_gain_table[] = { + 0x0000, 0x0002, 0x0004, 0x0006, 0x0007, 0x0008, 0x000a, 0x000b, 0x000c, + 0x000e, 0x000f, 0x0010, 0x0012, 0x0013, 0x0014, 0x0016, 0x0017, 0x001a, + 0x001b, 0x001f, 0x0020, 0x0024, 0x0030, 0x0034, 0x0037, 0x003b, 0x003f, + 0x0040, 0x0044, 0x0057, 0x005b, 0x005f, 0x00d7, 0x00db, 0x00df, 0x0157, + 0x015b, 0x015f, 0x0357, 0x035b, 0x035f, 0x075f, 0x0b5f, 0x0f5f, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, +}; + +static const u16 lpphy_sw_control_table[] = { + 0x0128, 0x0128, 0x0009, 0x0009, 0x0028, 0x0028, 0x0028, 0x0028, 0x0128, + 0x0128, 0x0009, 0x0009, 0x0028, 0x0028, 0x0028, 0x0028, 0x0009, 0x0009, + 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0018, 0x0018, 0x0018, + 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0128, 0x0128, 0x0009, 0x0009, + 0x0028, 0x0028, 0x0028, 0x0028, 0x0128, 0x0128, 0x0009, 0x0009, 0x0028, + 0x0028, 0x0028, 0x0028, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, + 0x0009, 0x0009, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, + 0x0018, +}; + +static const u8 lpphy_hf_table[] = { + 0x4b, 0x36, 0x24, 0x18, 0x49, 0x34, 0x23, 0x17, 0x48, + 0x33, 0x23, 0x17, 0x48, 0x33, 0x23, 0x17, +}; + +static const u32 lpphy_papd_eps_table[] = { + 0x00000000, 0x00013ffc, 0x0001dff3, 0x0001bff0, 0x00023fe9, 0x00021fdf, + 0x00028fdf, 0x00033fd2, 0x00039fcb, 0x00043fc7, 0x0004efc2, 0x00055fb5, + 0x0005cfb0, 0x00063fa8, 0x00068fa3, 0x00071f98, 0x0007ef92, 0x00084f8b, + 0x0008df82, 0x00097f77, 0x0009df69, 0x000a3f62, 0x000adf57, 0x000b6f4c, + 0x000bff41, 0x000c9f39, 0x000cff30, 0x000dbf27, 0x000e4f1e, 0x000edf16, + 0x000f7f13, 0x00102f11, 0x00110f10, 0x0011df11, 0x0012ef15, 0x00143f1c, + 0x00158f27, 0x00172f35, 0x00193f47, 0x001baf5f, 0x001e6f7e, 0x0021cfa4, + 0x0025bfd2, 0x002a2008, 0x002fb047, 0x00360090, 0x003d40e0, 0x0045c135, + 0x004fb189, 0x005ae1d7, 0x0067221d, 0x0075025a, 0x007ff291, 0x007ff2bf, + 0x007ff2e3, 0x007ff2ff, 0x007ff315, 0x007ff329, 0x007ff33f, 0x007ff356, + 0x007ff36e, 0x007ff39c, 0x007ff441, 0x007ff506, +}; + +static const u32 lpphy_papd_mult_table[] = { + 0x001111e0, 0x00652051, 0x00606055, 0x005b005a, 0x00555060, 0x00511065, + 0x004c806b, 0x0047d072, 0x00444078, 0x00400080, 0x003ca087, 0x0039408f, + 0x0035e098, 0x0032e0a1, 0x003030aa, 0x002d80b4, 0x002ae0bf, 0x002880ca, + 0x002640d6, 0x002410e3, 0x002220f0, 0x002020ff, 0x001e510e, 0x001ca11e, + 0x001b012f, 0x00199140, 0x00182153, 0x0016c168, 0x0015817d, 0x00145193, + 0x001321ab, 0x001211c5, 0x001111e0, 0x001021fc, 0x000f321a, 0x000e523a, + 0x000d925c, 0x000cd27f, 0x000c12a5, 0x000b62cd, 0x000ac2f8, 0x000a2325, + 0x00099355, 0x00091387, 0x000883bd, 0x000813f5, 0x0007a432, 0x00073471, + 0x0006c4b5, 0x000664fc, 0x00061547, 0x0005b598, 0x000565ec, 0x00051646, + 0x0004d6a5, 0x0004870a, 0x00044775, 0x000407e6, 0x0003d85e, 0x000398dd, + 0x00036963, 0x000339f2, 0x00030a89, 0x0002db28, +}; + +static struct lpphy_tx_gain_table_entry lpphy_rev0_nopa_tx_gain_table[] = { + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 152, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 147, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 143, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 139, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 135, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 131, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 128, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 124, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 121, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 117, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 114, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 111, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 107, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 104, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 101, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 99, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 96, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 93, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 90, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 88, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 85, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 83, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 81, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 78, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 76, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 74, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 72, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 70, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 62, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 60, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 57, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 72, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 70, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 62, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 60, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 57, }, + { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 71, }, + { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 69, }, + { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 67, }, + { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 65, }, + { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 63, }, + { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 62, }, + { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 60, }, + { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 58, }, + { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 57, }, + { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 70, }, + { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 65, }, + { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 63, }, + { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 61, }, + { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 58, }, + { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 71, }, + { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 69, }, + { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 67, }, + { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 65, }, + { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 63, }, + { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 61, }, + { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 60, }, + { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 58, }, + { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 56, }, + { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 70, }, + { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 62, }, + { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 60, }, + { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 72, }, + { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 70, }, + { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 62, }, + { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 60, }, + { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 72, }, + { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 70, }, + { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 63, }, + { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 61, }, + { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 57, }, + { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 72, }, + { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 70, }, + { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 62, }, + { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 60, }, + { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 72, }, + { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 70, }, + { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 62, }, + { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 61, }, + { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 73, }, + { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 71, }, + { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 69, }, + { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 67, }, + { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 65, }, + { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 63, }, + { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 61, }, + { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 72, }, + { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 70, }, + { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 65, }, + { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 63, }, + { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 61, }, + { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 73, }, + { .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 71, }, +}; + +static struct lpphy_tx_gain_table_entry lpphy_rev0_2ghz_tx_gain_table[] = { + { .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 64, }, + { .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 62, }, + { .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 60, }, + { .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 59, }, + { .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 72, }, + { .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 70, }, + { .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 68, }, + { .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 66, }, + { .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 64, }, + { .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 62, }, + { .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 60, }, + { .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 59, }, + { .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 72, }, + { .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 70, }, + { .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 68, }, + { .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 66, }, + { .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 64, }, + { .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 63, }, + { .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 61, }, + { .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 59, }, + { .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 57, }, + { .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 72, }, + { .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 70, }, + { .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 68, }, + { .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 66, }, + { .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 64, }, + { .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 62, }, + { .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 60, }, + { .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 59, }, + { .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 72, }, + { .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 70, }, + { .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 68, }, + { .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 66, }, + { .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 64, }, + { .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 62, }, + { .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 61, }, + { .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 59, }, + { .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 73, }, + { .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 71, }, + { .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 69, }, + { .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 67, }, + { .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 65, }, + { .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 63, }, + { .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 61, }, + { .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 59, }, + { .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 72, }, + { .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 70, }, + { .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 68, }, + { .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 66, }, + { .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 65, }, + { .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 63, }, + { .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 61, }, + { .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 59, }, + { .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 73, }, + { .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 71, }, + { .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 69, }, + { .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 67, }, + { .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 65, }, + { .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 63, }, + { .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 61, }, + { .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 60, }, + { .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 72, }, + { .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 70, }, + { .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 68, }, + { .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 66, }, + { .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 64, }, + { .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 62, }, + { .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 60, }, + { .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 59, }, + { .gm = 4, .pga = 10, .pad = 5, .dac = 0, .bb_mult = 72, }, + { .gm = 4, .pga = 10, .pad = 5, .dac = 0, .bb_mult = 70, }, + { .gm = 4, .pga = 10, .pad = 5, .dac = 0, .bb_mult = 68, }, + { .gm = 4, .pga = 10, .pad = 5, .dac = 0, .bb_mult = 66, }, + { .gm = 4, .pga = 10, .pad = 5, .dac = 0, .bb_mult = 64, }, + { .gm = 4, .pga = 10, .pad = 5, .dac = 0, .bb_mult = 62, }, + { .gm = 4, .pga = 10, .pad = 5, .dac = 0, .bb_mult = 60, }, + { .gm = 4, .pga = 10, .pad = 5, .dac = 0, .bb_mult = 59, }, + { .gm = 4, .pga = 9, .pad = 5, .dac = 0, .bb_mult = 70, }, + { .gm = 4, .pga = 9, .pad = 5, .dac = 0, .bb_mult = 68, }, + { .gm = 4, .pga = 9, .pad = 5, .dac = 0, .bb_mult = 66, }, + { .gm = 4, .pga = 9, .pad = 5, .dac = 0, .bb_mult = 64, }, + { .gm = 4, .pga = 9, .pad = 5, .dac = 0, .bb_mult = 63, }, + { .gm = 4, .pga = 9, .pad = 5, .dac = 0, .bb_mult = 61, }, + { .gm = 4, .pga = 9, .pad = 5, .dac = 0, .bb_mult = 59, }, + { .gm = 4, .pga = 9, .pad = 4, .dac = 0, .bb_mult = 71, }, + { .gm = 4, .pga = 9, .pad = 4, .dac = 0, .bb_mult = 69, }, + { .gm = 4, .pga = 9, .pad = 4, .dac = 0, .bb_mult = 67, }, + { .gm = 4, .pga = 9, .pad = 4, .dac = 0, .bb_mult = 65, }, + { .gm = 4, .pga = 9, .pad = 4, .dac = 0, .bb_mult = 63, }, + { .gm = 4, .pga = 9, .pad = 4, .dac = 0, .bb_mult = 62, }, + { .gm = 4, .pga = 9, .pad = 4, .dac = 0, .bb_mult = 60, }, + { .gm = 4, .pga = 9, .pad = 4, .dac = 0, .bb_mult = 58, }, + { .gm = 4, .pga = 8, .pad = 4, .dac = 0, .bb_mult = 70, }, + { .gm = 4, .pga = 8, .pad = 4, .dac = 0, .bb_mult = 68, }, + { .gm = 4, .pga = 8, .pad = 4, .dac = 0, .bb_mult = 66, }, + { .gm = 4, .pga = 8, .pad = 4, .dac = 0, .bb_mult = 65, }, + { .gm = 4, .pga = 8, .pad = 4, .dac = 0, .bb_mult = 63, }, + { .gm = 4, .pga = 8, .pad = 4, .dac = 0, .bb_mult = 61, }, + { .gm = 4, .pga = 8, .pad = 4, .dac = 0, .bb_mult = 59, }, + { .gm = 4, .pga = 7, .pad = 4, .dac = 0, .bb_mult = 68, }, + { .gm = 4, .pga = 7, .pad = 4, .dac = 0, .bb_mult = 66, }, + { .gm = 4, .pga = 7, .pad = 4, .dac = 0, .bb_mult = 64, }, + { .gm = 4, .pga = 7, .pad = 4, .dac = 0, .bb_mult = 62, }, + { .gm = 4, .pga = 7, .pad = 4, .dac = 0, .bb_mult = 61, }, + { .gm = 4, .pga = 7, .pad = 4, .dac = 0, .bb_mult = 59, }, + { .gm = 4, .pga = 7, .pad = 3, .dac = 0, .bb_mult = 67, }, + { .gm = 4, .pga = 7, .pad = 3, .dac = 0, .bb_mult = 65, }, + { .gm = 4, .pga = 7, .pad = 3, .dac = 0, .bb_mult = 63, }, + { .gm = 4, .pga = 7, .pad = 3, .dac = 0, .bb_mult = 62, }, + { .gm = 4, .pga = 7, .pad = 3, .dac = 0, .bb_mult = 60, }, + { .gm = 4, .pga = 6, .pad = 3, .dac = 0, .bb_mult = 65, }, + { .gm = 4, .pga = 6, .pad = 3, .dac = 0, .bb_mult = 63, }, + { .gm = 4, .pga = 6, .pad = 3, .dac = 0, .bb_mult = 61, }, + { .gm = 4, .pga = 6, .pad = 3, .dac = 0, .bb_mult = 60, }, + { .gm = 4, .pga = 6, .pad = 3, .dac = 0, .bb_mult = 58, }, + { .gm = 4, .pga = 5, .pad = 3, .dac = 0, .bb_mult = 68, }, + { .gm = 4, .pga = 5, .pad = 3, .dac = 0, .bb_mult = 66, }, + { .gm = 4, .pga = 5, .pad = 3, .dac = 0, .bb_mult = 64, }, + { .gm = 4, .pga = 5, .pad = 3, .dac = 0, .bb_mult = 62, }, + { .gm = 4, .pga = 5, .pad = 3, .dac = 0, .bb_mult = 60, }, + { .gm = 4, .pga = 5, .pad = 3, .dac = 0, .bb_mult = 59, }, + { .gm = 4, .pga = 5, .pad = 3, .dac = 0, .bb_mult = 57, }, + { .gm = 4, .pga = 4, .pad = 2, .dac = 0, .bb_mult = 83, }, + { .gm = 4, .pga = 4, .pad = 2, .dac = 0, .bb_mult = 81, }, + { .gm = 4, .pga = 4, .pad = 2, .dac = 0, .bb_mult = 78, }, + { .gm = 4, .pga = 4, .pad = 2, .dac = 0, .bb_mult = 76, }, + { .gm = 4, .pga = 4, .pad = 2, .dac = 0, .bb_mult = 74, }, + { .gm = 4, .pga = 4, .pad = 2, .dac = 0, .bb_mult = 72, }, +}; + +static struct lpphy_tx_gain_table_entry lpphy_rev0_5ghz_tx_gain_table[] = { + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 99, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 96, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 93, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 90, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 88, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 85, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 83, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 81, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 78, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 76, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 74, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 72, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 70, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 62, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 60, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 57, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 55, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 72, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 70, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 62, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 60, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 58, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 56, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 55, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 71, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 69, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 67, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 65, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 63, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 62, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 60, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 58, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 56, }, + { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 72, }, + { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 70, }, + { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 62, }, + { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 60, }, + { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 57, }, + { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 73, }, + { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 71, }, + { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 69, }, + { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 67, }, + { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 65, }, + { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 63, }, + { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 61, }, + { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 60, }, + { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 58, }, + { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 71, }, + { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 69, }, + { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 67, }, + { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 65, }, + { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 63, }, + { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 61, }, + { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 60, }, + { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 58, }, + { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 70, }, + { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 62, }, + { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 61, }, + { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 57, }, + { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 56, }, + { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 65, }, + { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 63, }, + { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 61, }, + { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 58, }, + { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 70, }, + { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 63, }, + { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 61, }, + { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 57, }, + { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 70, }, + { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 62, }, + { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 60, }, + { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 57, }, + { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 70, }, + { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 62, }, + { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 61, }, + { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 57, }, + { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 70, }, + { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 62, }, + { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 61, }, + { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 57, }, + { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 70, }, + { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 62, }, + { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 61, }, + { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 57, }, + { .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 69, }, + { .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 67, }, + { .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 65, }, + { .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 63, }, + { .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 62, }, + { .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 60, }, +}; + +static struct lpphy_tx_gain_table_entry lpphy_rev1_nopa_tx_gain_table[] = { + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 152, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 147, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 143, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 139, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 135, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 131, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 128, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 124, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 121, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 117, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 114, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 111, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 107, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 104, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 101, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 99, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 96, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 93, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 90, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 88, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 85, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 83, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 81, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 78, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 76, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 74, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 72, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 70, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 62, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 60, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 57, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 72, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 70, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 62, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 60, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 57, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 72, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 70, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 62, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 60, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 57, }, + { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 71, }, + { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 69, }, + { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 67, }, + { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 65, }, + { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 63, }, + { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 62, }, + { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 60, }, + { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 58, }, + { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 57, }, + { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 70, }, + { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 65, }, + { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 63, }, + { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 61, }, + { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 58, }, + { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 71, }, + { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 69, }, + { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 67, }, + { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 65, }, + { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 63, }, + { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 61, }, + { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 60, }, + { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 58, }, + { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 56, }, + { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 70, }, + { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 62, }, + { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 60, }, + { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 72, }, + { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 70, }, + { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 62, }, + { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 60, }, + { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 72, }, + { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 70, }, + { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 63, }, + { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 61, }, + { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 57, }, + { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 72, }, + { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 70, }, + { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 62, }, + { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 60, }, + { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 72, }, + { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 70, }, + { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 62, }, + { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 61, }, + { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 73, }, + { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 71, }, + { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 69, }, + { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 67, }, + { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 65, }, + { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 63, }, + { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 61, }, + { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 72, }, + { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 70, }, + { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 65, }, + { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 63, }, + { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 61, }, + { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 73, }, + { .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 71, }, +}; + +static struct lpphy_tx_gain_table_entry lpphy_rev1_2ghz_tx_gain_table[] = { + { .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 90, }, + { .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 88, }, + { .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 85, }, + { .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 83, }, + { .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 81, }, + { .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 78, }, + { .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 76, }, + { .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 74, }, + { .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 72, }, + { .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 70, }, + { .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 68, }, + { .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 66, }, + { .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 64, }, + { .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 62, }, + { .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 60, }, + { .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 59, }, + { .gm = 4, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 72, }, + { .gm = 4, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 70, }, + { .gm = 4, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 68, }, + { .gm = 4, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 66, }, + { .gm = 4, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 64, }, + { .gm = 4, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 62, }, + { .gm = 4, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 60, }, + { .gm = 4, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 59, }, + { .gm = 4, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 72, }, + { .gm = 4, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 70, }, + { .gm = 4, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 68, }, + { .gm = 4, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 66, }, + { .gm = 4, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 64, }, + { .gm = 4, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 62, }, + { .gm = 4, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 60, }, + { .gm = 4, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 59, }, + { .gm = 4, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 72, }, + { .gm = 4, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 70, }, + { .gm = 4, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 68, }, + { .gm = 4, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 66, }, + { .gm = 4, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 64, }, + { .gm = 4, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 62, }, + { .gm = 4, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 60, }, + { .gm = 4, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 59, }, + { .gm = 4, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 72, }, + { .gm = 4, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 70, }, + { .gm = 4, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 68, }, + { .gm = 4, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 66, }, + { .gm = 4, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 64, }, + { .gm = 4, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 62, }, + { .gm = 4, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 60, }, + { .gm = 4, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 59, }, + { .gm = 4, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 72, }, + { .gm = 4, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 70, }, + { .gm = 4, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 68, }, + { .gm = 4, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 66, }, + { .gm = 4, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 64, }, + { .gm = 4, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 62, }, + { .gm = 4, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 60, }, + { .gm = 4, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 59, }, + { .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 72, }, + { .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 70, }, + { .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 68, }, + { .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 66, }, + { .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 64, }, + { .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 62, }, + { .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 60, }, + { .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 59, }, + { .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 72, }, + { .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 70, }, + { .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 68, }, + { .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 66, }, + { .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 64, }, + { .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 62, }, + { .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 60, }, + { .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 59, }, + { .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 72, }, + { .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 70, }, + { .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 68, }, + { .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 66, }, + { .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 64, }, + { .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 63, }, + { .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 61, }, + { .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 59, }, + { .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 57, }, + { .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 72, }, + { .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 70, }, + { .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 68, }, + { .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 66, }, + { .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 64, }, + { .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 62, }, + { .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 60, }, + { .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 59, }, + { .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 72, }, + { .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 70, }, + { .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 68, }, + { .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 66, }, + { .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 64, }, + { .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 62, }, + { .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 61, }, + { .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 59, }, + { .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 73, }, + { .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 71, }, + { .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 69, }, + { .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 67, }, + { .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 65, }, + { .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 63, }, + { .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 61, }, + { .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 59, }, + { .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 72, }, + { .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 70, }, + { .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 68, }, + { .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 66, }, + { .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 65, }, + { .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 63, }, + { .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 61, }, + { .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 59, }, + { .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 73, }, + { .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 71, }, + { .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 69, }, + { .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 67, }, + { .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 65, }, + { .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 63, }, + { .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 61, }, + { .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 60, }, + { .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 72, }, + { .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 70, }, + { .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 68, }, + { .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 66, }, + { .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 64, }, + { .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 62, }, + { .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 60, }, +}; + +static struct lpphy_tx_gain_table_entry lpphy_rev1_5ghz_tx_gain_table[] = { + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 99, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 96, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 93, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 90, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 88, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 85, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 83, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 81, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 78, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 76, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 74, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 72, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 70, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 62, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 60, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 57, }, + { .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 55, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 72, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 70, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 62, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 60, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 58, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 56, }, + { .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 55, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 71, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 69, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 67, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 65, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 63, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 62, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 60, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 58, }, + { .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 56, }, + { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 72, }, + { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 70, }, + { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 62, }, + { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 60, }, + { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 57, }, + { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 73, }, + { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 71, }, + { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 69, }, + { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 67, }, + { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 65, }, + { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 63, }, + { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 61, }, + { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 60, }, + { .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 58, }, + { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 71, }, + { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 69, }, + { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 67, }, + { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 65, }, + { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 63, }, + { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 61, }, + { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 60, }, + { .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 58, }, + { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 70, }, + { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 62, }, + { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 61, }, + { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 57, }, + { .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 56, }, + { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 65, }, + { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 63, }, + { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 61, }, + { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 58, }, + { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 70, }, + { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 63, }, + { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 61, }, + { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 57, }, + { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 70, }, + { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 62, }, + { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 60, }, + { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 57, }, + { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 70, }, + { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 62, }, + { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 61, }, + { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 57, }, + { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 70, }, + { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 62, }, + { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 61, }, + { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 57, }, + { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 70, }, + { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 68, }, + { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 66, }, + { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 62, }, + { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 61, }, + { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 59, }, + { .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 57, }, + { .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 69, }, + { .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 67, }, + { .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 65, }, + { .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 63, }, + { .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 62, }, + { .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 60, }, +}; + +static struct lpphy_tx_gain_table_entry lpphy_rev2_nopa_tx_gain_table[] = { + { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 152, }, + { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 147, }, + { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 143, }, + { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 139, }, + { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 135, }, + { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 131, }, + { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 128, }, + { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 124, }, + { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 121, }, + { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 117, }, + { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 114, }, + { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 111, }, + { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 107, }, + { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 104, }, + { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 101, }, + { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 99, }, + { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 96, }, + { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 93, }, + { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 90, }, + { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 88, }, + { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 85, }, + { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 83, }, + { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 81, }, + { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 78, }, + { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 76, }, + { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 74, }, + { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 72, }, + { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 70, }, + { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 68, }, + { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 66, }, + { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 197, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 192, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 186, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 181, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 176, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 171, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 166, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 161, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 157, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 152, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 148, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 144, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 140, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 136, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 132, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 128, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 124, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 121, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 117, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 114, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 111, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 108, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 105, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 102, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 99, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 96, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 93, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 91, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 88, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 86, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 83, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 81, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 79, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 76, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 74, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 72, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 70, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 68, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 66, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 64, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 248, .pad = 64, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 248, .pad = 62, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 241, .pad = 62, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 241, .pad = 60, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 234, .pad = 60, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 234, .pad = 59, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 227, .pad = 59, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 227, .pad = 57, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 221, .pad = 57, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 221, .pad = 55, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 215, .pad = 55, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 215, .pad = 54, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 208, .pad = 54, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 208, .pad = 52, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 203, .pad = 52, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 203, .pad = 51, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 197, .pad = 51, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 197, .pad = 49, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 191, .pad = 49, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 191, .pad = 48, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 186, .pad = 48, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 186, .pad = 47, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 181, .pad = 47, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 181, .pad = 45, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 175, .pad = 45, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 175, .pad = 44, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 170, .pad = 44, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 170, .pad = 43, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 166, .pad = 43, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 166, .pad = 42, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 161, .pad = 42, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 161, .pad = 40, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 156, .pad = 40, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 156, .pad = 39, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 152, .pad = 39, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 152, .pad = 38, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 148, .pad = 38, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 148, .pad = 37, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 143, .pad = 37, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 143, .pad = 36, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 139, .pad = 36, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 139, .pad = 35, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 135, .pad = 35, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 135, .pad = 34, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 132, .pad = 34, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 132, .pad = 33, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 128, .pad = 33, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 128, .pad = 32, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 124, .pad = 32, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 124, .pad = 31, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 121, .pad = 31, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 121, .pad = 30, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 117, .pad = 30, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 117, .pad = 29, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 114, .pad = 29, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 114, .pad = 29, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 111, .pad = 29, .dac = 0, .bb_mult = 64, }, +}; + +static struct lpphy_tx_gain_table_entry lpphy_rev2_2ghz_tx_gain_table[] = { + { .gm = 7, .pga = 99, .pad = 255, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 96, .pad = 255, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 93, .pad = 255, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 90, .pad = 255, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 88, .pad = 255, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 85, .pad = 255, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 83, .pad = 255, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 81, .pad = 255, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 78, .pad = 255, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 76, .pad = 255, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 74, .pad = 255, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 72, .pad = 255, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 70, .pad = 255, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 68, .pad = 255, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 66, .pad = 255, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 64, .pad = 255, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 64, .pad = 255, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 62, .pad = 255, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 62, .pad = 248, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 60, .pad = 248, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 60, .pad = 241, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 59, .pad = 241, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 59, .pad = 234, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 57, .pad = 234, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 57, .pad = 227, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 55, .pad = 227, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 55, .pad = 221, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 54, .pad = 221, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 54, .pad = 215, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 52, .pad = 215, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 52, .pad = 208, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 51, .pad = 208, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 51, .pad = 203, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 49, .pad = 203, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 49, .pad = 197, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 48, .pad = 197, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 48, .pad = 191, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 47, .pad = 191, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 47, .pad = 186, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 45, .pad = 186, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 45, .pad = 181, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 44, .pad = 181, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 44, .pad = 175, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 43, .pad = 175, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 43, .pad = 170, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 42, .pad = 170, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 42, .pad = 166, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 40, .pad = 166, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 40, .pad = 161, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 39, .pad = 161, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 39, .pad = 156, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 38, .pad = 156, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 38, .pad = 152, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 37, .pad = 152, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 37, .pad = 148, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 36, .pad = 148, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 36, .pad = 143, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 35, .pad = 143, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 35, .pad = 139, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 34, .pad = 139, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 34, .pad = 135, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 33, .pad = 135, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 33, .pad = 132, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 32, .pad = 132, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 32, .pad = 128, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 31, .pad = 128, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 31, .pad = 124, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 30, .pad = 124, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 30, .pad = 121, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 29, .pad = 121, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 29, .pad = 117, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 29, .pad = 117, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 29, .pad = 114, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 28, .pad = 114, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 28, .pad = 111, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 27, .pad = 111, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 27, .pad = 108, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 26, .pad = 108, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 26, .pad = 104, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 25, .pad = 104, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 25, .pad = 102, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 25, .pad = 102, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 25, .pad = 99, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 24, .pad = 99, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 24, .pad = 96, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 23, .pad = 96, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 23, .pad = 93, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 23, .pad = 93, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 23, .pad = 90, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 22, .pad = 90, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 22, .pad = 88, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 21, .pad = 88, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 21, .pad = 85, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 21, .pad = 85, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 21, .pad = 83, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 20, .pad = 83, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 20, .pad = 81, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 20, .pad = 81, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 20, .pad = 78, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 19, .pad = 78, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 19, .pad = 76, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 19, .pad = 76, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 19, .pad = 74, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 18, .pad = 74, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 18, .pad = 72, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 18, .pad = 72, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 18, .pad = 70, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 17, .pad = 70, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 17, .pad = 68, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 17, .pad = 68, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 17, .pad = 66, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 16, .pad = 66, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 16, .pad = 64, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 16, .pad = 64, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 16, .pad = 62, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 15, .pad = 62, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 15, .pad = 60, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 15, .pad = 60, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 15, .pad = 59, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 14, .pad = 59, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 14, .pad = 57, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 14, .pad = 57, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 14, .pad = 55, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 14, .pad = 55, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 14, .pad = 54, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 13, .pad = 54, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 13, .pad = 52, .dac = 0, .bb_mult = 64, }, + { .gm = 7, .pga = 13, .pad = 52, .dac = 0, .bb_mult = 64, }, +}; + +static struct lpphy_tx_gain_table_entry lpphy_rev2_5ghz_tx_gain_table[] = { + { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 152, }, + { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 147, }, + { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 143, }, + { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 139, }, + { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 135, }, + { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 131, }, + { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 128, }, + { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 124, }, + { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 121, }, + { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 117, }, + { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 114, }, + { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 111, }, + { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 107, }, + { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 104, }, + { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 101, }, + { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 99, }, + { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 96, }, + { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 93, }, + { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 90, }, + { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 88, }, + { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 85, }, + { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 83, }, + { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 81, }, + { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 78, }, + { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 76, }, + { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 74, }, + { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 72, }, + { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 70, }, + { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 68, }, + { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 66, }, + { .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 248, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 241, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 234, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 227, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 221, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 215, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 208, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 197, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 191, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 186, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 181, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 175, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 170, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 166, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 161, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 156, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 152, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 148, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 143, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 139, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 135, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 132, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 128, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 124, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 121, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 117, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 114, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 111, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 108, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 104, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 102, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 99, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 96, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 93, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 90, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 88, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 85, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 83, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 81, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 78, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 76, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 74, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 72, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 70, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 68, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 66, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 64, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 64, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 255, .pad = 62, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 248, .pad = 62, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 248, .pad = 60, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 241, .pad = 60, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 241, .pad = 59, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 234, .pad = 59, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 234, .pad = 57, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 227, .pad = 57, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 227, .pad = 55, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 221, .pad = 55, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 221, .pad = 54, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 215, .pad = 54, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 215, .pad = 52, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 208, .pad = 52, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 208, .pad = 51, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 203, .pad = 51, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 203, .pad = 49, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 197, .pad = 49, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 197, .pad = 48, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 191, .pad = 48, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 191, .pad = 47, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 186, .pad = 47, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 186, .pad = 45, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 181, .pad = 45, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 181, .pad = 44, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 175, .pad = 44, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 175, .pad = 43, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 170, .pad = 43, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 170, .pad = 42, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 166, .pad = 42, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 166, .pad = 40, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 161, .pad = 40, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 161, .pad = 39, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 156, .pad = 39, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 156, .pad = 38, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 152, .pad = 38, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 152, .pad = 37, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 148, .pad = 37, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 148, .pad = 36, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 143, .pad = 36, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 143, .pad = 35, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 139, .pad = 35, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 139, .pad = 34, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 135, .pad = 34, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 135, .pad = 33, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 132, .pad = 33, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 132, .pad = 32, .dac = 0, .bb_mult = 64, }, + { .gm = 255, .pga = 128, .pad = 32, .dac = 0, .bb_mult = 64, }, +}; + +void lpphy_rev0_1_table_init(struct b43_wldev *dev) +{ + B43_WARN_ON(dev->phy.rev >= 2); + + b43_lptab_write_bulk(dev, B43_LPTAB8(2, 0), + ARRAY_SIZE(lpphy_min_sig_sq_table), lpphy_min_sig_sq_table); + b43_lptab_write_bulk(dev, B43_LPTAB16(1, 0), + ARRAY_SIZE(lpphy_rev01_noise_scale_table), lpphy_rev01_noise_scale_table); + b43_lptab_write_bulk(dev, B43_LPTAB16(14, 0), + ARRAY_SIZE(lpphy_crs_gain_nft_table), lpphy_crs_gain_nft_table); + b43_lptab_write_bulk(dev, B43_LPTAB16(8, 0), + ARRAY_SIZE(lpphy_rev01_filter_control_table), lpphy_rev01_filter_control_table); + b43_lptab_write_bulk(dev, B43_LPTAB32(9, 0), + ARRAY_SIZE(lpphy_rev01_ps_control_table), lpphy_rev01_ps_control_table); + b43_lptab_write_bulk(dev, B43_LPTAB8(6, 0), + ARRAY_SIZE(lpphy_pll_fraction_table), lpphy_pll_fraction_table); + b43_lptab_write_bulk(dev, B43_LPTAB16(0, 0), + ARRAY_SIZE(lpphy_iqlo_cal_table), lpphy_iqlo_cal_table); + if (dev->phy.rev == 0) { + b43_lptab_write_bulk(dev, B43_LPTAB16(13, 0), + ARRAY_SIZE(lpphy_rev0_ofdm_cck_gain_table), lpphy_rev0_ofdm_cck_gain_table); + b43_lptab_write_bulk(dev, B43_LPTAB16(12, 0), + ARRAY_SIZE(lpphy_rev0_ofdm_cck_gain_table), lpphy_rev0_ofdm_cck_gain_table); + } else { + b43_lptab_write_bulk(dev, B43_LPTAB16(13, 0), + ARRAY_SIZE(lpphy_rev1_ofdm_cck_gain_table), lpphy_rev1_ofdm_cck_gain_table); + b43_lptab_write_bulk(dev, B43_LPTAB16(12, 0), + ARRAY_SIZE(lpphy_rev1_ofdm_cck_gain_table), lpphy_rev1_ofdm_cck_gain_table); +} + b43_lptab_write_bulk(dev, B43_LPTAB16(15, 0), + ARRAY_SIZE(lpphy_gain_delta_table), lpphy_gain_delta_table); + b43_lptab_write_bulk(dev, B43_LPTAB32(10, 0), + ARRAY_SIZE(lpphy_tx_power_control_table), lpphy_tx_power_control_table); +} + +void lpphy_rev2plus_table_init(struct b43_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + int i; + + B43_WARN_ON(dev->phy.rev < 2); + + for (i = 0; i < 704; i++) + b43_lptab_write(dev, B43_LPTAB32(7, i), 0); + + b43_lptab_write_bulk(dev, B43_LPTAB8(2, 0), + ARRAY_SIZE(lpphy_min_sig_sq_table), lpphy_min_sig_sq_table); + b43_lptab_write_bulk(dev, B43_LPTAB16(1, 0), + ARRAY_SIZE(lpphy_rev2plus_noise_scale_table), lpphy_rev2plus_noise_scale_table); + b43_lptab_write_bulk(dev, B43_LPTAB32(11, 0), + ARRAY_SIZE(lpphy_rev2plus_filter_control_table), lpphy_rev2plus_filter_control_table); + b43_lptab_write_bulk(dev, B43_LPTAB32(12, 0), + ARRAY_SIZE(lpphy_rev2plus_ps_control_table), lpphy_rev2plus_ps_control_table); + b43_lptab_write_bulk(dev, B43_LPTAB32(13, 0), + ARRAY_SIZE(lpphy_gain_idx_table), lpphy_gain_idx_table); + b43_lptab_write_bulk(dev, B43_LPTAB16(14, 0), + ARRAY_SIZE(lpphy_aux_gain_idx_table), lpphy_aux_gain_idx_table); + b43_lptab_write_bulk(dev, B43_LPTAB16(15, 0), + ARRAY_SIZE(lpphy_sw_control_table), lpphy_sw_control_table); + b43_lptab_write_bulk(dev, B43_LPTAB8(16, 0), + ARRAY_SIZE(lpphy_hf_table), lpphy_hf_table); + b43_lptab_write_bulk(dev, B43_LPTAB32(17, 0), + ARRAY_SIZE(lpphy_gain_value_table), lpphy_gain_value_table); + b43_lptab_write_bulk(dev, B43_LPTAB16(18, 0), + ARRAY_SIZE(lpphy_gain_table), lpphy_gain_table); + b43_lptab_write_bulk(dev, B43_LPTAB8(6, 0), + ARRAY_SIZE(lpphy_pll_fraction_table), lpphy_pll_fraction_table); + b43_lptab_write_bulk(dev, B43_LPTAB16(0, 0), + ARRAY_SIZE(lpphy_iqlo_cal_table), lpphy_iqlo_cal_table); + b43_lptab_write_bulk(dev, B43_LPTAB32(9, 0), + ARRAY_SIZE(lpphy_papd_eps_table), lpphy_papd_eps_table); + b43_lptab_write_bulk(dev, B43_LPTAB32(10, 0), + ARRAY_SIZE(lpphy_papd_mult_table), lpphy_papd_mult_table); + + if ((bus->chip_id == 0x4325) && (bus->chip_rev == 0)) { + b43_lptab_write_bulk(dev, B43_LPTAB32(13, 0), + ARRAY_SIZE(lpphy_a0_gain_idx_table), lpphy_a0_gain_idx_table); + b43_lptab_write_bulk(dev, B43_LPTAB16(14, 0), + ARRAY_SIZE(lpphy_a0_aux_gain_idx_table), lpphy_a0_aux_gain_idx_table); + b43_lptab_write_bulk(dev, B43_LPTAB32(17, 0), + ARRAY_SIZE(lpphy_a0_gain_value_table), lpphy_a0_gain_value_table); + b43_lptab_write_bulk(dev, B43_LPTAB16(18, 0), + ARRAY_SIZE(lpphy_a0_gain_table), lpphy_a0_gain_table); + } +} + +static void lpphy_rev0_1_write_gain_table(struct b43_wldev *dev, int offset, + struct lpphy_tx_gain_table_entry data) +{ + u32 tmp; + + B43_WARN_ON(dev->phy.rev >= 2); + + tmp = data.pad << 11; + tmp |= data.pga << 7; + tmp |= data.gm << 4; + tmp |= data.dac; + b43_lptab_write(dev, B43_LPTAB32(10, 0xC0 + offset), tmp); + tmp = data.bb_mult << 20; + b43_lptab_write(dev, B43_LPTAB32(10, 0x140 + offset), tmp); +} + +static void lpphy_rev2plus_write_gain_table(struct b43_wldev *dev, int offset, + struct lpphy_tx_gain_table_entry data) +{ + u32 tmp; + + B43_WARN_ON(dev->phy.rev < 2); + + tmp = data.pad << 16; + tmp |= data.pga << 8; + tmp |= data.gm; + if (dev->phy.rev >= 3) { + if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) + tmp |= 0x10 << 24; + else + tmp |= 0x70 << 24; + } else { + if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) + tmp |= 0x14 << 24; + else + tmp |= 0x7F << 24; + } + b43_lptab_write(dev, B43_LPTAB32(7, 0xC0 + offset), tmp); + tmp = data.bb_mult << 20; + tmp |= data.dac << 28; + b43_lptab_write(dev, B43_LPTAB32(7, 0x140 + offset), tmp); +} + +void lpphy_write_gain_table(struct b43_wldev *dev, int offset, + struct lpphy_tx_gain_table_entry data) +{ + if (dev->phy.rev >= 2) + lpphy_rev2plus_write_gain_table(dev, offset, data); + else + lpphy_rev0_1_write_gain_table(dev, offset, data); +} + +void lpphy_write_gain_table_bulk(struct b43_wldev *dev, int offset, int count, + struct lpphy_tx_gain_table_entry *table) +{ + int i; + + for (i = offset; i < count; i++) + lpphy_write_gain_table(dev, i, table[i]); +} + +void lpphy_init_tx_gain_table(struct b43_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + + switch (dev->phy.rev) { + case 0: + if ((bus->sprom.boardflags_hi & B43_BFH_NOPA) || + (bus->sprom.boardflags_lo & B43_BFL_HGPA)) + lpphy_write_gain_table_bulk(dev, 0, 128, + lpphy_rev0_nopa_tx_gain_table); + else if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + lpphy_write_gain_table_bulk(dev, 0, 128, + lpphy_rev0_2ghz_tx_gain_table); + else + lpphy_write_gain_table_bulk(dev, 0, 128, + lpphy_rev0_5ghz_tx_gain_table); + break; + case 1: + if ((bus->sprom.boardflags_hi & B43_BFH_NOPA) || + (bus->sprom.boardflags_lo & B43_BFL_HGPA)) + lpphy_write_gain_table_bulk(dev, 0, 128, + lpphy_rev1_nopa_tx_gain_table); + else if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + lpphy_write_gain_table_bulk(dev, 0, 128, + lpphy_rev1_2ghz_tx_gain_table); + else + lpphy_write_gain_table_bulk(dev, 0, 128, + lpphy_rev1_5ghz_tx_gain_table); + break; + default: + if (bus->sprom.boardflags_hi & B43_BFH_NOPA) + lpphy_write_gain_table_bulk(dev, 0, 128, + lpphy_rev2_nopa_tx_gain_table); + else if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + lpphy_write_gain_table_bulk(dev, 0, 128, + lpphy_rev2_2ghz_tx_gain_table); + else + lpphy_write_gain_table_bulk(dev, 0, 128, + lpphy_rev2_5ghz_tx_gain_table); + } +} diff --git a/drivers/net/wireless/b43/tables_lpphy.h b/drivers/net/wireless/b43/tables_lpphy.h new file mode 100644 index 0000000..84f1d26 --- /dev/null +++ b/drivers/net/wireless/b43/tables_lpphy.h @@ -0,0 +1,44 @@ +#ifndef B43_TABLES_LPPHY_H_ +#define B43_TABLES_LPPHY_H_ + + +#define B43_LPTAB_TYPEMASK 0xF0000000 +#define B43_LPTAB_8BIT 0x10000000 +#define B43_LPTAB_16BIT 0x20000000 +#define B43_LPTAB_32BIT 0x30000000 +#define B43_LPTAB8(table, offset) (((table) << 10) | (offset) | B43_LPTAB_8BIT) +#define B43_LPTAB16(table, offset) (((table) << 10) | (offset) | B43_LPTAB_16BIT) +#define B43_LPTAB32(table, offset) (((table) << 10) | (offset) | B43_LPTAB_32BIT) + +/* Table definitions */ +#define B43_LPTAB_TXPWR_R2PLUS B43_LPTAB32(0x07, 0) /* TX power lookup table (rev >= 2) */ +#define B43_LPTAB_TXPWR_R0_1 B43_LPTAB32(0xA0, 0) /* TX power lookup table (rev < 2) */ + +u32 b43_lptab_read(struct b43_wldev *dev, u32 offset); +void b43_lptab_write(struct b43_wldev *dev, u32 offset, u32 value); + +/* Bulk table access. Note that these functions return the bulk data in + * host endianness! The returned data is _not_ a bytearray, but an array + * consisting of nr_elements of the data type. */ +void b43_lptab_read_bulk(struct b43_wldev *dev, u32 offset, + unsigned int nr_elements, void *data); +void b43_lptab_write_bulk(struct b43_wldev *dev, u32 offset, + unsigned int nr_elements, const void *data); + +void b2062_upload_init_table(struct b43_wldev *dev); +void b2063_upload_init_table(struct b43_wldev *dev); + +struct lpphy_tx_gain_table_entry { + u8 gm, pga, pad, dac, bb_mult; +}; + +void lpphy_write_gain_table(struct b43_wldev *dev, int offset, + struct lpphy_tx_gain_table_entry data); +void lpphy_write_gain_table_bulk(struct b43_wldev *dev, int offset, int count, + struct lpphy_tx_gain_table_entry *table); + +void lpphy_rev0_1_table_init(struct b43_wldev *dev); +void lpphy_rev2plus_table_init(struct b43_wldev *dev); +void lpphy_init_tx_gain_table(struct b43_wldev *dev); + +#endif /* B43_TABLES_LPPHY_H_ */ diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c new file mode 100644 index 0000000..a00d509 --- /dev/null +++ b/drivers/net/wireless/b43/tables_nphy.c @@ -0,0 +1,3168 @@ +/* + + Broadcom B43 wireless driver + IEEE 802.11n PHY and radio device data tables + + Copyright (c) 2008 Michael Buesch + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "b43.h" +#include "tables_nphy.h" +#include "phy_common.h" +#include "phy_n.h" + + +struct b2055_inittab_entry { + /* Value to write if we use the 5GHz band. */ + u16 ghz5; + /* Value to write if we use the 2.4GHz band. */ + u16 ghz2; + /* Flags */ + u8 flags; +#define B2055_INITTAB_ENTRY_OK 0x01 +#define B2055_INITTAB_UPLOAD 0x02 +}; +#define UPLOAD .flags = B2055_INITTAB_ENTRY_OK | B2055_INITTAB_UPLOAD +#define NOUPLOAD .flags = B2055_INITTAB_ENTRY_OK + +static const struct b2055_inittab_entry b2055_inittab [] = { + [B2055_SP_PINPD] = { .ghz5 = 0x0080, .ghz2 = 0x0080, NOUPLOAD, }, + [B2055_C1_SP_RSSI] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_C1_SP_PDMISC] = { .ghz5 = 0x0027, .ghz2 = 0x0027, NOUPLOAD, }, + [B2055_C2_SP_RSSI] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_C2_SP_PDMISC] = { .ghz5 = 0x0027, .ghz2 = 0x0027, NOUPLOAD, }, + [B2055_C1_SP_RXGC1] = { .ghz5 = 0x007F, .ghz2 = 0x007F, UPLOAD, }, + [B2055_C1_SP_RXGC2] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, }, + [B2055_C2_SP_RXGC1] = { .ghz5 = 0x007F, .ghz2 = 0x007F, UPLOAD, }, + [B2055_C2_SP_RXGC2] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, }, + [B2055_C1_SP_LPFBWSEL] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, }, + [B2055_C2_SP_LPFBWSEL] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, }, + [B2055_C1_SP_TXGC1] = { .ghz5 = 0x004F, .ghz2 = 0x004F, UPLOAD, }, + [B2055_C1_SP_TXGC2] = { .ghz5 = 0x0005, .ghz2 = 0x0005, UPLOAD, }, + [B2055_C2_SP_TXGC1] = { .ghz5 = 0x004F, .ghz2 = 0x004F, UPLOAD, }, + [B2055_C2_SP_TXGC2] = { .ghz5 = 0x0005, .ghz2 = 0x0005, UPLOAD, }, + [B2055_MASTER1] = { .ghz5 = 0x00D0, .ghz2 = 0x00D0, NOUPLOAD, }, + [B2055_MASTER2] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, }, + [B2055_PD_LGEN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_PD_PLLTS] = { .ghz5 = 0x0040, .ghz2 = 0x0040, NOUPLOAD, }, + [B2055_C1_PD_LGBUF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_C1_PD_TX] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_C1_PD_RXTX] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_C1_PD_RSSIMISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_C2_PD_LGBUF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_C2_PD_TX] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_C2_PD_RXTX] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_C2_PD_RSSIMISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_PWRDET_LGEN] = { .ghz5 = 0x00C0, .ghz2 = 0x00C0, NOUPLOAD, }, + [B2055_C1_PWRDET_LGBUF] = { .ghz5 = 0x00FF, .ghz2 = 0x00FF, NOUPLOAD, }, + [B2055_C1_PWRDET_RXTX] = { .ghz5 = 0x00C0, .ghz2 = 0x00C0, NOUPLOAD, }, + [B2055_C2_PWRDET_LGBUF] = { .ghz5 = 0x00FF, .ghz2 = 0x00FF, NOUPLOAD, }, + [B2055_C2_PWRDET_RXTX] = { .ghz5 = 0x00C0, .ghz2 = 0x00C0, NOUPLOAD, }, + [B2055_RRCCAL_CS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_RRCCAL_NOPTSEL] = { .ghz5 = 0x002C, .ghz2 = 0x002C, NOUPLOAD, }, + [B2055_CAL_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_CAL_COUT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_CAL_COUT2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_CAL_CVARCTL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_CAL_RVARCTL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_CAL_LPOCTL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_CAL_TS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_CAL_RCCALRTS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_CAL_RCALRTS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_PADDRV] = { .ghz5 = 0x00A4, .ghz2 = 0x00A4, NOUPLOAD, }, + [B2055_XOCTL1] = { .ghz5 = 0x0038, .ghz2 = 0x0038, NOUPLOAD, }, + [B2055_XOCTL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_XOREGUL] = { .ghz5 = 0x0004, .ghz2 = 0x0004, UPLOAD, }, + [B2055_XOMISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_PLL_LFC1] = { .ghz5 = 0x000A, .ghz2 = 0x000A, NOUPLOAD, }, + [B2055_PLL_CALVTH] = { .ghz5 = 0x0087, .ghz2 = 0x0087, NOUPLOAD, }, + [B2055_PLL_LFC2] = { .ghz5 = 0x0009, .ghz2 = 0x0009, NOUPLOAD, }, + [B2055_PLL_REF] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, }, + [B2055_PLL_LFR1] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, }, + [B2055_PLL_PFDCP] = { .ghz5 = 0x0018, .ghz2 = 0x0018, UPLOAD, }, + [B2055_PLL_IDAC_CPOPAMP] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, }, + [B2055_PLL_CPREG] = { .ghz5 = 0x0004, .ghz2 = 0x0004, UPLOAD, }, + [B2055_PLL_RCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, }, + [B2055_RF_PLLMOD0] = { .ghz5 = 0x009E, .ghz2 = 0x009E, NOUPLOAD, }, + [B2055_RF_PLLMOD1] = { .ghz5 = 0x0009, .ghz2 = 0x0009, NOUPLOAD, }, + [B2055_RF_MMDIDAC1] = { .ghz5 = 0x00C8, .ghz2 = 0x00C8, UPLOAD, }, + [B2055_RF_MMDIDAC0] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, }, + [B2055_RF_MMDSP] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_VCO_CAL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_VCO_CAL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_VCO_CAL3] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, }, + [B2055_VCO_CAL4] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, }, + [B2055_VCO_CAL5] = { .ghz5 = 0x0096, .ghz2 = 0x0096, NOUPLOAD, }, + [B2055_VCO_CAL6] = { .ghz5 = 0x003E, .ghz2 = 0x003E, NOUPLOAD, }, + [B2055_VCO_CAL7] = { .ghz5 = 0x003E, .ghz2 = 0x003E, NOUPLOAD, }, + [B2055_VCO_CAL8] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, }, + [B2055_VCO_CAL9] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, }, + [B2055_VCO_CAL10] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, }, + [B2055_VCO_CAL11] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, }, + [B2055_VCO_CAL12] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_VCO_CAL13] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_VCO_CAL14] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_VCO_CAL15] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_VCO_CAL16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_VCO_KVCO] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, }, + [B2055_VCO_CAPTAIL] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, }, + [B2055_VCO_IDACVCO] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, }, + [B2055_VCO_REG] = { .ghz5 = 0x0084, .ghz2 = 0x0084, UPLOAD, }, + [B2055_PLL_RFVTH] = { .ghz5 = 0x00C3, .ghz2 = 0x00C3, NOUPLOAD, }, + [B2055_LGBUF_CENBUF] = { .ghz5 = 0x008F, .ghz2 = 0x008F, NOUPLOAD, }, + [B2055_LGEN_TUNE1] = { .ghz5 = 0x00FF, .ghz2 = 0x00FF, NOUPLOAD, }, + [B2055_LGEN_TUNE2] = { .ghz5 = 0x00FF, .ghz2 = 0x00FF, NOUPLOAD, }, + [B2055_LGEN_IDAC1] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, }, + [B2055_LGEN_IDAC2] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, }, + [B2055_LGEN_BIASC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_LGEN_BIASIDAC] = { .ghz5 = 0x00CC, .ghz2 = 0x00CC, NOUPLOAD, }, + [B2055_LGEN_RCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, }, + [B2055_LGEN_DIV] = { .ghz5 = 0x0080, .ghz2 = 0x0080, NOUPLOAD, }, + [B2055_LGEN_SPARE2] = { .ghz5 = 0x0080, .ghz2 = 0x0080, NOUPLOAD, }, + [B2055_C1_LGBUF_ATUNE] = { .ghz5 = 0x00F8, .ghz2 = 0x00F8, NOUPLOAD, }, + [B2055_C1_LGBUF_GTUNE] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, }, + [B2055_C1_LGBUF_DIV] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, }, + [B2055_C1_LGBUF_AIDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0008, UPLOAD, }, + [B2055_C1_LGBUF_GIDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, }, + [B2055_C1_LGBUF_IDACFO] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_C1_LGBUF_SPARE] = { .ghz5 = 0x0001, .ghz2 = 0x0001, UPLOAD, }, + [B2055_C1_RX_RFSPC1] = { .ghz5 = 0x008A, .ghz2 = 0x008A, NOUPLOAD, }, + [B2055_C1_RX_RFR1] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, }, + [B2055_C1_RX_RFR2] = { .ghz5 = 0x0083, .ghz2 = 0x0083, NOUPLOAD, }, + [B2055_C1_RX_RFRCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, }, + [B2055_C1_RX_BB_BLCMP] = { .ghz5 = 0x00A0, .ghz2 = 0x00A0, NOUPLOAD, }, + [B2055_C1_RX_BB_LPF] = { .ghz5 = 0x000A, .ghz2 = 0x000A, NOUPLOAD, }, + [B2055_C1_RX_BB_MIDACHP] = { .ghz5 = 0x0087, .ghz2 = 0x0087, UPLOAD, }, + [B2055_C1_RX_BB_VGA1IDAC] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, }, + [B2055_C1_RX_BB_VGA2IDAC] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, }, + [B2055_C1_RX_BB_VGA3IDAC] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, }, + [B2055_C1_RX_BB_BUFOCTL] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, }, + [B2055_C1_RX_BB_RCCALCTL] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, }, + [B2055_C1_RX_BB_RSSICTL1] = { .ghz5 = 0x006A, .ghz2 = 0x006A, UPLOAD, }, + [B2055_C1_RX_BB_RSSICTL2] = { .ghz5 = 0x00AB, .ghz2 = 0x00AB, UPLOAD, }, + [B2055_C1_RX_BB_RSSICTL3] = { .ghz5 = 0x0013, .ghz2 = 0x0013, UPLOAD, }, + [B2055_C1_RX_BB_RSSICTL4] = { .ghz5 = 0x00C1, .ghz2 = 0x00C1, UPLOAD, }, + [B2055_C1_RX_BB_RSSICTL5] = { .ghz5 = 0x00AA, .ghz2 = 0x00AA, UPLOAD, }, + [B2055_C1_RX_BB_REG] = { .ghz5 = 0x0087, .ghz2 = 0x0087, UPLOAD, }, + [B2055_C1_RX_BB_SPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_C1_RX_TXBBRCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, }, + [B2055_C1_TX_RF_SPGA] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, }, + [B2055_C1_TX_RF_SPAD] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, }, + [B2055_C1_TX_RF_CNTPGA1] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, }, + [B2055_C1_TX_RF_CNTPAD1] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, }, + [B2055_C1_TX_RF_PGAIDAC] = { .ghz5 = 0x0097, .ghz2 = 0x0097, UPLOAD, }, + [B2055_C1_TX_PGAPADTN] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, }, + [B2055_C1_TX_PADIDAC1] = { .ghz5 = 0x0014, .ghz2 = 0x0014, UPLOAD, }, + [B2055_C1_TX_PADIDAC2] = { .ghz5 = 0x0033, .ghz2 = 0x0033, NOUPLOAD, }, + [B2055_C1_TX_MXBGTRIM] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, }, + [B2055_C1_TX_RF_RCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, }, + [B2055_C1_TX_RF_PADTSSI1] = { .ghz5 = 0x0003, .ghz2 = 0x0003, UPLOAD, }, + [B2055_C1_TX_RF_PADTSSI2] = { .ghz5 = 0x000A, .ghz2 = 0x000A, NOUPLOAD, }, + [B2055_C1_TX_RF_SPARE] = { .ghz5 = 0x0003, .ghz2 = 0x0003, UPLOAD, }, + [B2055_C1_TX_RF_IQCAL1] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, }, + [B2055_C1_TX_RF_IQCAL2] = { .ghz5 = 0x00A4, .ghz2 = 0x00A4, NOUPLOAD, }, + [B2055_C1_TXBB_RCCAL] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, }, + [B2055_C1_TXBB_LPF1] = { .ghz5 = 0x0028, .ghz2 = 0x0028, NOUPLOAD, }, + [B2055_C1_TX_VOSCNCL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_C1_TX_LPF_MXGMIDAC] = { .ghz5 = 0x004A, .ghz2 = 0x004A, NOUPLOAD, }, + [B2055_C1_TX_BB_MXGM] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_C2_LGBUF_ATUNE] = { .ghz5 = 0x00F8, .ghz2 = 0x00F8, NOUPLOAD, }, + [B2055_C2_LGBUF_GTUNE] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, }, + [B2055_C2_LGBUF_DIV] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, }, + [B2055_C2_LGBUF_AIDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0008, UPLOAD, }, + [B2055_C2_LGBUF_GIDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, }, + [B2055_C2_LGBUF_IDACFO] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_C2_LGBUF_SPARE] = { .ghz5 = 0x0001, .ghz2 = 0x0001, UPLOAD, }, + [B2055_C2_RX_RFSPC1] = { .ghz5 = 0x008A, .ghz2 = 0x008A, NOUPLOAD, }, + [B2055_C2_RX_RFR1] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, }, + [B2055_C2_RX_RFR2] = { .ghz5 = 0x0083, .ghz2 = 0x0083, NOUPLOAD, }, + [B2055_C2_RX_RFRCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, }, + [B2055_C2_RX_BB_BLCMP] = { .ghz5 = 0x00A0, .ghz2 = 0x00A0, NOUPLOAD, }, + [B2055_C2_RX_BB_LPF] = { .ghz5 = 0x000A, .ghz2 = 0x000A, NOUPLOAD, }, + [B2055_C2_RX_BB_MIDACHP] = { .ghz5 = 0x0087, .ghz2 = 0x0087, UPLOAD, }, + [B2055_C2_RX_BB_VGA1IDAC] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, }, + [B2055_C2_RX_BB_VGA2IDAC] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, }, + [B2055_C2_RX_BB_VGA3IDAC] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, }, + [B2055_C2_RX_BB_BUFOCTL] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, }, + [B2055_C2_RX_BB_RCCALCTL] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, }, + [B2055_C2_RX_BB_RSSICTL1] = { .ghz5 = 0x006A, .ghz2 = 0x006A, UPLOAD, }, + [B2055_C2_RX_BB_RSSICTL2] = { .ghz5 = 0x00AB, .ghz2 = 0x00AB, UPLOAD, }, + [B2055_C2_RX_BB_RSSICTL3] = { .ghz5 = 0x0013, .ghz2 = 0x0013, UPLOAD, }, + [B2055_C2_RX_BB_RSSICTL4] = { .ghz5 = 0x00C1, .ghz2 = 0x00C1, UPLOAD, }, + [B2055_C2_RX_BB_RSSICTL5] = { .ghz5 = 0x00AA, .ghz2 = 0x00AA, UPLOAD, }, + [B2055_C2_RX_BB_REG] = { .ghz5 = 0x0087, .ghz2 = 0x0087, UPLOAD, }, + [B2055_C2_RX_BB_SPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_C2_RX_TXBBRCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, }, + [B2055_C2_TX_RF_SPGA] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, }, + [B2055_C2_TX_RF_SPAD] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, }, + [B2055_C2_TX_RF_CNTPGA1] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, }, + [B2055_C2_TX_RF_CNTPAD1] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, }, + [B2055_C2_TX_RF_PGAIDAC] = { .ghz5 = 0x0097, .ghz2 = 0x0097, UPLOAD, }, + [B2055_C2_TX_PGAPADTN] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, }, + [B2055_C2_TX_PADIDAC1] = { .ghz5 = 0x0014, .ghz2 = 0x0014, UPLOAD, }, + [B2055_C2_TX_PADIDAC2] = { .ghz5 = 0x0033, .ghz2 = 0x0033, NOUPLOAD, }, + [B2055_C2_TX_MXBGTRIM] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, }, + [B2055_C2_TX_RF_RCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, }, + [B2055_C2_TX_RF_PADTSSI1] = { .ghz5 = 0x0003, .ghz2 = 0x0003, UPLOAD, }, + [B2055_C2_TX_RF_PADTSSI2] = { .ghz5 = 0x000A, .ghz2 = 0x000A, NOUPLOAD, }, + [B2055_C2_TX_RF_SPARE] = { .ghz5 = 0x0003, .ghz2 = 0x0003, UPLOAD, }, + [B2055_C2_TX_RF_IQCAL1] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, }, + [B2055_C2_TX_RF_IQCAL2] = { .ghz5 = 0x00A4, .ghz2 = 0x00A4, NOUPLOAD, }, + [B2055_C2_TXBB_RCCAL] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, }, + [B2055_C2_TXBB_LPF1] = { .ghz5 = 0x0028, .ghz2 = 0x0028, NOUPLOAD, }, + [B2055_C2_TX_VOSCNCL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_C2_TX_LPF_MXGMIDAC] = { .ghz5 = 0x004A, .ghz2 = 0x004A, NOUPLOAD, }, + [B2055_C2_TX_BB_MXGM] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_PRG_GCHP21] = { .ghz5 = 0x0071, .ghz2 = 0x0071, NOUPLOAD, }, + [B2055_PRG_GCHP22] = { .ghz5 = 0x0072, .ghz2 = 0x0072, NOUPLOAD, }, + [B2055_PRG_GCHP23] = { .ghz5 = 0x0073, .ghz2 = 0x0073, NOUPLOAD, }, + [B2055_PRG_GCHP24] = { .ghz5 = 0x0074, .ghz2 = 0x0074, NOUPLOAD, }, + [B2055_PRG_GCHP25] = { .ghz5 = 0x0075, .ghz2 = 0x0075, NOUPLOAD, }, + [B2055_PRG_GCHP26] = { .ghz5 = 0x0076, .ghz2 = 0x0076, NOUPLOAD, }, + [B2055_PRG_GCHP27] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, }, + [B2055_PRG_GCHP28] = { .ghz5 = 0x0078, .ghz2 = 0x0078, NOUPLOAD, }, + [B2055_PRG_GCHP29] = { .ghz5 = 0x0079, .ghz2 = 0x0079, NOUPLOAD, }, + [B2055_PRG_GCHP30] = { .ghz5 = 0x007A, .ghz2 = 0x007A, NOUPLOAD, }, + [0xC7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [0xC8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [0xC9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [0xCA] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [0xCB] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [0xCC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_C1_LNA_GAINBST] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [0xCE] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [0xCF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [0xD0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [0xD1] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, }, + [B2055_C1_B0NB_RSSIVCM] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, }, + [0xD3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [0xD4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [0xD5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_C1_GENSPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [0xD7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [0xD8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_C2_LNA_GAINBST] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [0xDA] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [0xDB] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [0xDC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [0xDD] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, }, + [B2055_C2_B0NB_RSSIVCM] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, }, + [0xDF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [0xE0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [0xE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, + [B2055_C2_GENSPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, +}; + + +void b2055_upload_inittab(struct b43_wldev *dev, + bool ghz5, bool ignore_uploadflag) +{ + const struct b2055_inittab_entry *e; + unsigned int i; + u16 value; + + for (i = 0; i < ARRAY_SIZE(b2055_inittab); i++) { + e = &(b2055_inittab[i]); + if (!(e->flags & B2055_INITTAB_ENTRY_OK)) + continue; + if ((e->flags & B2055_INITTAB_UPLOAD) || ignore_uploadflag) { + if (ghz5) + value = e->ghz5; + else + value = e->ghz2; + b43_radio_write16(dev, i, value); + } + } +} + + +#define RADIOREGS(r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, \ + r12, r13, r14, r15, r16, r17, r18, r19, r20, r21) \ + .radio_pll_ref = r0, \ + .radio_rf_pllmod0 = r1, \ + .radio_rf_pllmod1 = r2, \ + .radio_vco_captail = r3, \ + .radio_vco_cal1 = r4, \ + .radio_vco_cal2 = r5, \ + .radio_pll_lfc1 = r6, \ + .radio_pll_lfr1 = r7, \ + .radio_pll_lfc2 = r8, \ + .radio_lgbuf_cenbuf = r9, \ + .radio_lgen_tune1 = r10, \ + .radio_lgen_tune2 = r11, \ + .radio_c1_lgbuf_atune = r12, \ + .radio_c1_lgbuf_gtune = r13, \ + .radio_c1_rx_rfr1 = r14, \ + .radio_c1_tx_pgapadtn = r15, \ + .radio_c1_tx_mxbgtrim = r16, \ + .radio_c2_lgbuf_atune = r17, \ + .radio_c2_lgbuf_gtune = r18, \ + .radio_c2_rx_rfr1 = r19, \ + .radio_c2_tx_pgapadtn = r20, \ + .radio_c2_tx_mxbgtrim = r21 + +#define PHYREGS(r0, r1, r2, r3, r4, r5) \ + .phy_bw1a = r0, \ + .phy_bw2 = r1, \ + .phy_bw3 = r2, \ + .phy_bw4 = r3, \ + .phy_bw5 = r4, \ + .phy_bw6 = r5 + +static const struct b43_nphy_channeltab_entry b43_nphy_channeltab[] = { + { .channel = 184, + .freq = 4920, /* MHz */ + .unk2 = 3280, + RADIOREGS(0x71, 0x01, 0xEC, 0x0F, 0xFF, 0x01, 0x04, 0x0A, + 0x00, 0x8F, 0xFF, 0xFF, 0xFF, 0x00, 0x0F, 0x0F, + 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), + PHYREGS(0xB407, 0xB007, 0xAC07, 0x1402, 0x1502, 0x1602), + }, + { .channel = 186, + .freq = 4930, /* MHz */ + .unk2 = 3287, + RADIOREGS(0x71, 0x01, 0xED, 0x0F, 0xFF, 0x01, 0x04, 0x0A, + 0x00, 0x8F, 0xFF, 0xFF, 0xFF, 0x00, 0x0F, 0x0F, + 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), + PHYREGS(0xB807, 0xB407, 0xB007, 0x1302, 0x1402, 0x1502), + }, + { .channel = 188, + .freq = 4940, /* MHz */ + .unk2 = 3293, + RADIOREGS(0x71, 0x01, 0xEE, 0x0F, 0xFF, 0x01, 0x04, 0x0A, + 0x00, 0x8F, 0xEE, 0xEE, 0xFF, 0x00, 0x0F, 0x0F, + 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), + PHYREGS(0xBC07, 0xB807, 0xB407, 0x1202, 0x1302, 0x1402), + }, + { .channel = 190, + .freq = 4950, /* MHz */ + .unk2 = 3300, + RADIOREGS(0x71, 0x01, 0xEF, 0x0F, 0xFF, 0x01, 0x04, 0x0A, + 0x00, 0x8F, 0xEE, 0xEE, 0xFF, 0x00, 0x0F, 0x0F, + 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), + PHYREGS(0xC007, 0xBC07, 0xB807, 0x1102, 0x1202, 0x1302), + }, + { .channel = 192, + .freq = 4960, /* MHz */ + .unk2 = 3307, + RADIOREGS(0x71, 0x01, 0xF0, 0x0F, 0xFF, 0x01, 0x04, 0x0A, + 0x00, 0x8F, 0xEE, 0xEE, 0xFF, 0x00, 0x0F, 0x0F, + 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), + PHYREGS(0xC407, 0xC007, 0xBC07, 0x0F02, 0x1102, 0x1202), + }, + { .channel = 194, + .freq = 4970, /* MHz */ + .unk2 = 3313, + RADIOREGS(0x71, 0x01, 0xF1, 0x0F, 0xFF, 0x01, 0x04, 0x0A, + 0x00, 0x8F, 0xEE, 0xEE, 0xFF, 0x00, 0x0F, 0x0F, + 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), + PHYREGS(0xC807, 0xC407, 0xC007, 0x0E02, 0x0F02, 0x1102), + }, + { .channel = 196, + .freq = 4980, /* MHz */ + .unk2 = 3320, + RADIOREGS(0x71, 0x01, 0xF2, 0x0E, 0xFF, 0x01, 0x04, 0x0A, + 0x00, 0x8F, 0xDD, 0xDD, 0xFF, 0x00, 0x0F, 0x0F, + 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), + PHYREGS(0xCC07, 0xC807, 0xC407, 0x0D02, 0x0E02, 0x0F02), + }, + { .channel = 198, + .freq = 4990, /* MHz */ + .unk2 = 3327, + RADIOREGS(0x71, 0x01, 0xF3, 0x0E, 0xFF, 0x01, 0x04, 0x0A, + 0x00, 0x8F, 0xDD, 0xDD, 0xFF, 0x00, 0x0F, 0x0F, + 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), + PHYREGS(0xD007, 0xCC07, 0xC807, 0x0C02, 0x0D02, 0x0E02), + }, + { .channel = 200, + .freq = 5000, /* MHz */ + .unk2 = 3333, + RADIOREGS(0x71, 0x01, 0xF4, 0x0E, 0xFF, 0x01, 0x04, 0x0A, + 0x00, 0x8F, 0xDD, 0xDD, 0xFF, 0x00, 0x0F, 0x0F, + 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), + PHYREGS(0xD407, 0xD007, 0xCC07, 0x0B02, 0x0C02, 0x0D02), + }, + { .channel = 202, + .freq = 5010, /* MHz */ + .unk2 = 3340, + RADIOREGS(0x71, 0x01, 0xF5, 0x0E, 0xFF, 0x01, 0x04, 0x0A, + 0x00, 0x8F, 0xDD, 0xDD, 0xFF, 0x00, 0x0F, 0x0F, + 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), + PHYREGS(0xD807, 0xD407, 0xD007, 0x0A02, 0x0B02, 0x0C02), + }, + { .channel = 204, + .freq = 5020, /* MHz */ + .unk2 = 3347, + RADIOREGS(0x71, 0x01, 0xF6, 0x0E, 0xF7, 0x01, 0x04, 0x0A, + 0x00, 0x8F, 0xCC, 0xCC, 0xFF, 0x00, 0x0F, 0x0F, + 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), + PHYREGS(0xDC07, 0xD807, 0xD407, 0x0902, 0x0A02, 0x0B02), + }, + { .channel = 206, + .freq = 5030, /* MHz */ + .unk2 = 3353, + RADIOREGS(0x71, 0x01, 0xF7, 0x0E, 0xF7, 0x01, 0x04, 0x0A, + 0x00, 0x8F, 0xCC, 0xCC, 0xFF, 0x00, 0x0F, 0x0F, + 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), + PHYREGS(0xE007, 0xDC07, 0xD807, 0x0802, 0x0902, 0x0A02), + }, + { .channel = 208, + .freq = 5040, /* MHz */ + .unk2 = 3360, + RADIOREGS(0x71, 0x01, 0xF8, 0x0D, 0xEF, 0x01, 0x04, 0x0A, + 0x00, 0x8F, 0xCC, 0xCC, 0xFF, 0x00, 0x0F, 0x0F, + 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), + PHYREGS(0xE407, 0xE007, 0xDC07, 0x0702, 0x0802, 0x0902), + }, + { .channel = 210, + .freq = 5050, /* MHz */ + .unk2 = 3367, + RADIOREGS(0x71, 0x01, 0xF9, 0x0D, 0xEF, 0x01, 0x04, 0x0A, + 0x00, 0x8F, 0xCC, 0xCC, 0xFF, 0x00, 0x0F, 0x0F, + 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), + PHYREGS(0xE807, 0xE407, 0xE007, 0x0602, 0x0702, 0x0802), + }, + { .channel = 212, + .freq = 5060, /* MHz */ + .unk2 = 3373, + RADIOREGS(0x71, 0x01, 0xFA, 0x0D, 0xE6, 0x01, 0x04, 0x0A, + 0x00, 0x8F, 0xBB, 0xBB, 0xFF, 0x00, 0x0E, 0x0F, + 0x8E, 0xFF, 0x00, 0x0E, 0x0F, 0x8E), + PHYREGS(0xEC07, 0xE807, 0xE407, 0x0502, 0x0602, 0x0702), + }, + { .channel = 214, + .freq = 5070, /* MHz */ + .unk2 = 3380, + RADIOREGS(0x71, 0x01, 0xFB, 0x0D, 0xE6, 0x01, 0x04, 0x0A, + 0x00, 0x8F, 0xBB, 0xBB, 0xFF, 0x00, 0x0E, 0x0F, + 0x8E, 0xFF, 0x00, 0x0E, 0x0F, 0x8E), + PHYREGS(0xF007, 0xEC07, 0xE807, 0x0402, 0x0502, 0x0602), + }, + { .channel = 216, + .freq = 5080, /* MHz */ + .unk2 = 3387, + RADIOREGS(0x71, 0x01, 0xFC, 0x0D, 0xDE, 0x01, 0x04, 0x0A, + 0x00, 0x8E, 0xBB, 0xBB, 0xEE, 0x00, 0x0E, 0x0F, + 0x8D, 0xEE, 0x00, 0x0E, 0x0F, 0x8D), + PHYREGS(0xF407, 0xF007, 0xEC07, 0x0302, 0x0402, 0x0502), + }, + { .channel = 218, + .freq = 5090, /* MHz */ + .unk2 = 3393, + RADIOREGS(0x71, 0x01, 0xFD, 0x0D, 0xDE, 0x01, 0x04, 0x0A, + 0x00, 0x8E, 0xBB, 0xBB, 0xEE, 0x00, 0x0E, 0x0F, + 0x8D, 0xEE, 0x00, 0x0E, 0x0F, 0x8D), + PHYREGS(0xF807, 0xF407, 0xF007, 0x0202, 0x0302, 0x0402), + }, + { .channel = 220, + .freq = 5100, /* MHz */ + .unk2 = 3400, + RADIOREGS(0x71, 0x01, 0xFE, 0x0C, 0xD6, 0x01, 0x04, 0x0A, + 0x00, 0x8E, 0xAA, 0xAA, 0xEE, 0x00, 0x0D, 0x0F, + 0x8D, 0xEE, 0x00, 0x0D, 0x0F, 0x8D), + PHYREGS(0xFC07, 0xF807, 0xF407, 0x0102, 0x0202, 0x0302), + }, + { .channel = 222, + .freq = 5110, /* MHz */ + .unk2 = 3407, + RADIOREGS(0x71, 0x01, 0xFF, 0x0C, 0xD6, 0x01, 0x04, 0x0A, + 0x00, 0x8E, 0xAA, 0xAA, 0xEE, 0x00, 0x0D, 0x0F, + 0x8D, 0xEE, 0x00, 0x0D, 0x0F, 0x8D), + PHYREGS(0x0008, 0xFC07, 0xF807, 0x0002, 0x0102, 0x0202), + }, + { .channel = 224, + .freq = 5120, /* MHz */ + .unk2 = 3413, + RADIOREGS(0x71, 0x02, 0x00, 0x0C, 0xCE, 0x01, 0x04, 0x0A, + 0x00, 0x8D, 0xAA, 0xAA, 0xDD, 0x00, 0x0D, 0x0F, + 0x8C, 0xDD, 0x00, 0x0D, 0x0F, 0x8C), + PHYREGS(0x0408, 0x0008, 0xFC07, 0xFF01, 0x0002, 0x0102), + }, + { .channel = 226, + .freq = 5130, /* MHz */ + .unk2 = 3420, + RADIOREGS(0x71, 0x02, 0x01, 0x0C, 0xCE, 0x01, 0x04, 0x0A, + 0x00, 0x8D, 0xAA, 0xAA, 0xDD, 0x00, 0x0D, 0x0F, + 0x8C, 0xDD, 0x00, 0x0D, 0x0F, 0x8C), + PHYREGS(0x0808, 0x0408, 0x0008, 0xFE01, 0xFF01, 0x0002), + }, + { .channel = 228, + .freq = 5140, /* MHz */ + .unk2 = 3427, + RADIOREGS(0x71, 0x02, 0x02, 0x0C, 0xC6, 0x01, 0x04, 0x0A, + 0x00, 0x8D, 0x99, 0x99, 0xDD, 0x00, 0x0C, 0x0E, + 0x8B, 0xDD, 0x00, 0x0C, 0x0E, 0x8B), + PHYREGS(0x0C08, 0x0808, 0x0408, 0xFD01, 0xFE01, 0xFF01), + }, + { .channel = 32, + .freq = 5160, /* MHz */ + .unk2 = 3440, + RADIOREGS(0x71, 0x02, 0x04, 0x0B, 0xBE, 0x01, 0x04, 0x0A, + 0x00, 0x8C, 0x99, 0x99, 0xCC, 0x00, 0x0B, 0x0D, + 0x8A, 0xCC, 0x00, 0x0B, 0x0D, 0x8A), + PHYREGS(0x1408, 0x1008, 0x0C08, 0xFB01, 0xFC01, 0xFD01), + }, + { .channel = 34, + .freq = 5170, /* MHz */ + .unk2 = 3447, + RADIOREGS(0x71, 0x02, 0x05, 0x0B, 0xBE, 0x01, 0x04, 0x0A, + 0x00, 0x8C, 0x99, 0x99, 0xCC, 0x00, 0x0B, 0x0D, + 0x8A, 0xCC, 0x00, 0x0B, 0x0D, 0x8A), + PHYREGS(0x1808, 0x1408, 0x1008, 0xFA01, 0xFB01, 0xFC01), + }, + { .channel = 36, + .freq = 5180, /* MHz */ + .unk2 = 3453, + RADIOREGS(0x71, 0x02, 0x06, 0x0B, 0xB6, 0x01, 0x04, 0x0A, + 0x00, 0x8C, 0x88, 0x88, 0xCC, 0x00, 0x0B, 0x0C, + 0x89, 0xCC, 0x00, 0x0B, 0x0C, 0x89), + PHYREGS(0x1C08, 0x1808, 0x1408, 0xF901, 0xFA01, 0xFB01), + }, + { .channel = 38, + .freq = 5190, /* MHz */ + .unk2 = 3460, + RADIOREGS(0x71, 0x02, 0x07, 0x0B, 0xB6, 0x01, 0x04, 0x0A, + 0x00, 0x8C, 0x88, 0x88, 0xCC, 0x00, 0x0B, 0x0C, + 0x89, 0xCC, 0x00, 0x0B, 0x0C, 0x89), + PHYREGS(0x2008, 0x1C08, 0x1808, 0xF801, 0xF901, 0xFA01), + }, + { .channel = 40, + .freq = 5200, /* MHz */ + .unk2 = 3467, + RADIOREGS(0x71, 0x02, 0x08, 0x0B, 0xAF, 0x01, 0x04, 0x0A, + 0x00, 0x8B, 0x88, 0x88, 0xBB, 0x00, 0x0A, 0x0B, + 0x89, 0xBB, 0x00, 0x0A, 0x0B, 0x89), + PHYREGS(0x2408, 0x2008, 0x1C08, 0xF701, 0xF801, 0xF901), + }, + { .channel = 42, + .freq = 5210, /* MHz */ + .unk2 = 3473, + RADIOREGS(0x71, 0x02, 0x09, 0x0B, 0xAF, 0x01, 0x04, 0x0A, + 0x00, 0x8B, 0x88, 0x88, 0xBB, 0x00, 0x0A, 0x0B, + 0x89, 0xBB, 0x00, 0x0A, 0x0B, 0x89), + PHYREGS(0x2808, 0x2408, 0x2008, 0xF601, 0xF701, 0xF801), + }, + { .channel = 44, + .freq = 5220, /* MHz */ + .unk2 = 3480, + RADIOREGS(0x71, 0x02, 0x0A, 0x0A, 0xA7, 0x01, 0x04, 0x0A, + 0x00, 0x8B, 0x77, 0x77, 0xBB, 0x00, 0x09, 0x0A, + 0x88, 0xBB, 0x00, 0x09, 0x0A, 0x88), + PHYREGS(0x2C08, 0x2808, 0x2408, 0xF501, 0xF601, 0xF701), + }, + { .channel = 46, + .freq = 5230, /* MHz */ + .unk2 = 3487, + RADIOREGS(0x71, 0x02, 0x0B, 0x0A, 0xA7, 0x01, 0x04, 0x0A, + 0x00, 0x8B, 0x77, 0x77, 0xBB, 0x00, 0x09, 0x0A, + 0x88, 0xBB, 0x00, 0x09, 0x0A, 0x88), + PHYREGS(0x3008, 0x2C08, 0x2808, 0xF401, 0xF501, 0xF601), + }, + { .channel = 48, + .freq = 5240, /* MHz */ + .unk2 = 3493, + RADIOREGS(0x71, 0x02, 0x0C, 0x0A, 0xA0, 0x01, 0x04, 0x0A, + 0x00, 0x8A, 0x77, 0x77, 0xAA, 0x00, 0x09, 0x0A, + 0x87, 0xAA, 0x00, 0x09, 0x0A, 0x87), + PHYREGS(0x3408, 0x3008, 0x2C08, 0xF301, 0xF401, 0xF501), + }, + { .channel = 50, + .freq = 5250, /* MHz */ + .unk2 = 3500, + RADIOREGS(0x71, 0x02, 0x0D, 0x0A, 0xA0, 0x01, 0x04, 0x0A, + 0x00, 0x8A, 0x77, 0x77, 0xAA, 0x00, 0x09, 0x0A, + 0x87, 0xAA, 0x00, 0x09, 0x0A, 0x87), + PHYREGS(0x3808, 0x3408, 0x3008, 0xF201, 0xF301, 0xF401), + }, + { .channel = 52, + .freq = 5260, /* MHz */ + .unk2 = 3507, + RADIOREGS(0x71, 0x02, 0x0E, 0x0A, 0x98, 0x01, 0x04, 0x0A, + 0x00, 0x8A, 0x66, 0x66, 0xAA, 0x00, 0x08, 0x09, + 0x87, 0xAA, 0x00, 0x08, 0x09, 0x87), + PHYREGS(0x3C08, 0x3808, 0x3408, 0xF101, 0xF201, 0xF301), + }, + { .channel = 54, + .freq = 5270, /* MHz */ + .unk2 = 3513, + RADIOREGS(0x71, 0x02, 0x0F, 0x0A, 0x98, 0x01, 0x04, 0x0A, + 0x00, 0x8A, 0x66, 0x66, 0xAA, 0x00, 0x08, 0x09, + 0x87, 0xAA, 0x00, 0x08, 0x09, 0x87), + PHYREGS(0x4008, 0x3C08, 0x3808, 0xF001, 0xF101, 0xF201), + }, + { .channel = 56, + .freq = 5280, /* MHz */ + .unk2 = 3520, + RADIOREGS(0x71, 0x02, 0x10, 0x09, 0x91, 0x01, 0x04, 0x0A, + 0x00, 0x89, 0x66, 0x66, 0x99, 0x00, 0x08, 0x08, + 0x86, 0x99, 0x00, 0x08, 0x08, 0x86), + PHYREGS(0x4408, 0x4008, 0x3C08, 0xF001, 0xF001, 0xF101), + }, + { .channel = 58, + .freq = 5290, /* MHz */ + .unk2 = 3527, + RADIOREGS(0x71, 0x02, 0x11, 0x09, 0x91, 0x01, 0x04, 0x0A, + 0x00, 0x89, 0x66, 0x66, 0x99, 0x00, 0x08, 0x08, + 0x86, 0x99, 0x00, 0x08, 0x08, 0x86), + PHYREGS(0x4808, 0x4408, 0x4008, 0xEF01, 0xF001, 0xF001), + }, + { .channel = 60, + .freq = 5300, /* MHz */ + .unk2 = 3533, + RADIOREGS(0x71, 0x02, 0x12, 0x09, 0x8A, 0x01, 0x04, 0x0A, + 0x00, 0x89, 0x55, 0x55, 0x99, 0x00, 0x08, 0x07, + 0x85, 0x99, 0x00, 0x08, 0x07, 0x85), + PHYREGS(0x4C08, 0x4808, 0x4408, 0xEE01, 0xEF01, 0xF001), + }, + { .channel = 62, + .freq = 5310, /* MHz */ + .unk2 = 3540, + RADIOREGS(0x71, 0x02, 0x13, 0x09, 0x8A, 0x01, 0x04, 0x0A, + 0x00, 0x89, 0x55, 0x55, 0x99, 0x00, 0x08, 0x07, + 0x85, 0x99, 0x00, 0x08, 0x07, 0x85), + PHYREGS(0x5008, 0x4C08, 0x4808, 0xED01, 0xEE01, 0xEF01), + }, + { .channel = 64, + .freq = 5320, /* MHz */ + .unk2 = 3547, + RADIOREGS(0x71, 0x02, 0x14, 0x09, 0x83, 0x01, 0x04, 0x0A, + 0x00, 0x88, 0x55, 0x55, 0x88, 0x00, 0x07, 0x07, + 0x84, 0x88, 0x00, 0x07, 0x07, 0x84), + PHYREGS(0x5408, 0x5008, 0x4C08, 0xEC01, 0xED01, 0xEE01), + }, + { .channel = 66, + .freq = 5330, /* MHz */ + .unk2 = 3553, + RADIOREGS(0x71, 0x02, 0x15, 0x09, 0x83, 0x01, 0x04, 0x0A, + 0x00, 0x88, 0x55, 0x55, 0x88, 0x00, 0x07, 0x07, + 0x84, 0x88, 0x00, 0x07, 0x07, 0x84), + PHYREGS(0x5808, 0x5408, 0x5008, 0xEB01, 0xEC01, 0xED01), + }, + { .channel = 68, + .freq = 5340, /* MHz */ + .unk2 = 3560, + RADIOREGS(0x71, 0x02, 0x16, 0x08, 0x7C, 0x01, 0x04, 0x0A, + 0x00, 0x88, 0x44, 0x44, 0x88, 0x00, 0x07, 0x06, + 0x84, 0x88, 0x00, 0x07, 0x06, 0x84), + PHYREGS(0x5C08, 0x5808, 0x5408, 0xEA01, 0xEB01, 0xEC01), + }, + { .channel = 70, + .freq = 5350, /* MHz */ + .unk2 = 3567, + RADIOREGS(0x71, 0x02, 0x17, 0x08, 0x7C, 0x01, 0x04, 0x0A, + 0x00, 0x88, 0x44, 0x44, 0x88, 0x00, 0x07, 0x06, + 0x84, 0x88, 0x00, 0x07, 0x06, 0x84), + PHYREGS(0x6008, 0x5C08, 0x5808, 0xE901, 0xEA01, 0xEB01), + }, + { .channel = 72, + .freq = 5360, /* MHz */ + .unk2 = 3573, + RADIOREGS(0x71, 0x02, 0x18, 0x08, 0x75, 0x01, 0x04, 0x0A, + 0x00, 0x87, 0x44, 0x44, 0x77, 0x00, 0x06, 0x05, + 0x83, 0x77, 0x00, 0x06, 0x05, 0x83), + PHYREGS(0x6408, 0x6008, 0x5C08, 0xE801, 0xE901, 0xEA01), + }, + { .channel = 74, + .freq = 5370, /* MHz */ + .unk2 = 3580, + RADIOREGS(0x71, 0x02, 0x19, 0x08, 0x75, 0x01, 0x04, 0x0A, + 0x00, 0x87, 0x44, 0x44, 0x77, 0x00, 0x06, 0x05, + 0x83, 0x77, 0x00, 0x06, 0x05, 0x83), + PHYREGS(0x6808, 0x6408, 0x6008, 0xE701, 0xE801, 0xE901), + }, + { .channel = 76, + .freq = 5380, /* MHz */ + .unk2 = 3587, + RADIOREGS(0x71, 0x02, 0x1A, 0x08, 0x6E, 0x01, 0x04, 0x0A, + 0x00, 0x87, 0x33, 0x33, 0x77, 0x00, 0x06, 0x04, + 0x82, 0x77, 0x00, 0x06, 0x04, 0x82), + PHYREGS(0x6C08, 0x6808, 0x6408, 0xE601, 0xE701, 0xE801), + }, + { .channel = 78, + .freq = 5390, /* MHz */ + .unk2 = 3593, + RADIOREGS(0x71, 0x02, 0x1B, 0x08, 0x6E, 0x01, 0x04, 0x0A, + 0x00, 0x87, 0x33, 0x33, 0x77, 0x00, 0x06, 0x04, + 0x82, 0x77, 0x00, 0x06, 0x04, 0x82), + PHYREGS(0x7008, 0x6C08, 0x6808, 0xE501, 0xE601, 0xE701), + }, + { .channel = 80, + .freq = 5400, /* MHz */ + .unk2 = 3600, + RADIOREGS(0x71, 0x02, 0x1C, 0x07, 0x67, 0x01, 0x04, 0x0A, + 0x00, 0x86, 0x33, 0x33, 0x66, 0x00, 0x05, 0x04, + 0x81, 0x66, 0x00, 0x05, 0x04, 0x81), + PHYREGS(0x7408, 0x7008, 0x6C08, 0xE501, 0xE501, 0xE601), + }, + { .channel = 82, + .freq = 5410, /* MHz */ + .unk2 = 3607, + RADIOREGS(0x71, 0x02, 0x1D, 0x07, 0x67, 0x01, 0x04, 0x0A, + 0x00, 0x86, 0x33, 0x33, 0x66, 0x00, 0x05, 0x04, + 0x81, 0x66, 0x00, 0x05, 0x04, 0x81), + PHYREGS(0x7808, 0x7408, 0x7008, 0xE401, 0xE501, 0xE501), + }, + { .channel = 84, + .freq = 5420, /* MHz */ + .unk2 = 3613, + RADIOREGS(0x71, 0x02, 0x1E, 0x07, 0x61, 0x01, 0x04, 0x0A, + 0x00, 0x86, 0x22, 0x22, 0x66, 0x00, 0x05, 0x03, + 0x80, 0x66, 0x00, 0x05, 0x03, 0x80), + PHYREGS(0x7C08, 0x7808, 0x7408, 0xE301, 0xE401, 0xE501), + }, + { .channel = 86, + .freq = 5430, /* MHz */ + .unk2 = 3620, + RADIOREGS(0x71, 0x02, 0x1F, 0x07, 0x61, 0x01, 0x04, 0x0A, + 0x00, 0x86, 0x22, 0x22, 0x66, 0x00, 0x05, 0x03, + 0x80, 0x66, 0x00, 0x05, 0x03, 0x80), + PHYREGS(0x8008, 0x7C08, 0x7808, 0xE201, 0xE301, 0xE401), + }, + { .channel = 88, + .freq = 5440, /* MHz */ + .unk2 = 3627, + RADIOREGS(0x71, 0x02, 0x20, 0x07, 0x5A, 0x01, 0x04, 0x0A, + 0x00, 0x85, 0x22, 0x22, 0x55, 0x00, 0x04, 0x02, + 0x80, 0x55, 0x00, 0x04, 0x02, 0x80), + PHYREGS(0x8408, 0x8008, 0x7C08, 0xE101, 0xE201, 0xE301), + }, + { .channel = 90, + .freq = 5450, /* MHz */ + .unk2 = 3633, + RADIOREGS(0x71, 0x02, 0x21, 0x07, 0x5A, 0x01, 0x04, 0x0A, + 0x00, 0x85, 0x22, 0x22, 0x55, 0x00, 0x04, 0x02, + 0x80, 0x55, 0x00, 0x04, 0x02, 0x80), + PHYREGS(0x8808, 0x8408, 0x8008, 0xE001, 0xE101, 0xE201), + }, + { .channel = 92, + .freq = 5460, /* MHz */ + .unk2 = 3640, + RADIOREGS(0x71, 0x02, 0x22, 0x06, 0x53, 0x01, 0x04, 0x0A, + 0x00, 0x85, 0x11, 0x11, 0x55, 0x00, 0x04, 0x01, + 0x80, 0x55, 0x00, 0x04, 0x01, 0x80), + PHYREGS(0x8C08, 0x8808, 0x8408, 0xDF01, 0xE001, 0xE101), + }, + { .channel = 94, + .freq = 5470, /* MHz */ + .unk2 = 3647, + RADIOREGS(0x71, 0x02, 0x23, 0x06, 0x53, 0x01, 0x04, 0x0A, + 0x00, 0x85, 0x11, 0x11, 0x55, 0x00, 0x04, 0x01, + 0x80, 0x55, 0x00, 0x04, 0x01, 0x80), + PHYREGS(0x9008, 0x8C08, 0x8808, 0xDE01, 0xDF01, 0xE001), + }, + { .channel = 96, + .freq = 5480, /* MHz */ + .unk2 = 3653, + RADIOREGS(0x71, 0x02, 0x24, 0x06, 0x4D, 0x01, 0x04, 0x0A, + 0x00, 0x84, 0x11, 0x11, 0x44, 0x00, 0x03, 0x00, + 0x80, 0x44, 0x00, 0x03, 0x00, 0x80), + PHYREGS(0x9408, 0x9008, 0x8C08, 0xDD01, 0xDE01, 0xDF01), + }, + { .channel = 98, + .freq = 5490, /* MHz */ + .unk2 = 3660, + RADIOREGS(0x71, 0x02, 0x25, 0x06, 0x4D, 0x01, 0x04, 0x0A, + 0x00, 0x84, 0x11, 0x11, 0x44, 0x00, 0x03, 0x00, + 0x80, 0x44, 0x00, 0x03, 0x00, 0x80), + PHYREGS(0x9808, 0x9408, 0x9008, 0xDD01, 0xDD01, 0xDE01), + }, + { .channel = 100, + .freq = 5500, /* MHz */ + .unk2 = 3667, + RADIOREGS(0x71, 0x02, 0x26, 0x06, 0x47, 0x01, 0x04, 0x0A, + 0x00, 0x84, 0x00, 0x00, 0x44, 0x00, 0x03, 0x00, + 0x80, 0x44, 0x00, 0x03, 0x00, 0x80), + PHYREGS(0x9C08, 0x9808, 0x9408, 0xDC01, 0xDD01, 0xDD01), + }, + { .channel = 102, + .freq = 5510, /* MHz */ + .unk2 = 3673, + RADIOREGS(0x71, 0x02, 0x27, 0x06, 0x47, 0x01, 0x04, 0x0A, + 0x00, 0x84, 0x00, 0x00, 0x44, 0x00, 0x03, 0x00, + 0x80, 0x44, 0x00, 0x03, 0x00, 0x80), + PHYREGS(0xA008, 0x9C08, 0x9808, 0xDB01, 0xDC01, 0xDD01), + }, + { .channel = 104, + .freq = 5520, /* MHz */ + .unk2 = 3680, + RADIOREGS(0x71, 0x02, 0x28, 0x05, 0x40, 0x01, 0x04, 0x0A, + 0x00, 0x83, 0x00, 0x00, 0x33, 0x00, 0x02, 0x00, + 0x80, 0x33, 0x00, 0x02, 0x00, 0x80), + PHYREGS(0xA408, 0xA008, 0x9C08, 0xDA01, 0xDB01, 0xDC01), + }, + { .channel = 106, + .freq = 5530, /* MHz */ + .unk2 = 3687, + RADIOREGS(0x71, 0x02, 0x29, 0x05, 0x40, 0x01, 0x04, 0x0A, + 0x00, 0x83, 0x00, 0x00, 0x33, 0x00, 0x02, 0x00, + 0x80, 0x33, 0x00, 0x02, 0x00, 0x80), + PHYREGS(0xA808, 0xA408, 0xA008, 0xD901, 0xDA01, 0xDB01), + }, + { .channel = 108, + .freq = 5540, /* MHz */ + .unk2 = 3693, + RADIOREGS(0x71, 0x02, 0x2A, 0x05, 0x3A, 0x01, 0x04, 0x0A, + 0x00, 0x83, 0x00, 0x00, 0x33, 0x00, 0x02, 0x00, + 0x80, 0x33, 0x00, 0x02, 0x00, 0x80), + PHYREGS(0xAC08, 0xA808, 0xA408, 0xD801, 0xD901, 0xDA01), + }, + { .channel = 110, + .freq = 5550, /* MHz */ + .unk2 = 3700, + RADIOREGS(0x71, 0x02, 0x2B, 0x05, 0x3A, 0x01, 0x04, 0x0A, + 0x00, 0x83, 0x00, 0x00, 0x33, 0x00, 0x02, 0x00, + 0x80, 0x33, 0x00, 0x02, 0x00, 0x80), + PHYREGS(0xB008, 0xAC08, 0xA808, 0xD701, 0xD801, 0xD901), + }, + { .channel = 112, + .freq = 5560, /* MHz */ + .unk2 = 3707, + RADIOREGS(0x71, 0x02, 0x2C, 0x05, 0x34, 0x01, 0x04, 0x0A, + 0x00, 0x82, 0x00, 0x00, 0x22, 0x00, 0x01, 0x00, + 0x80, 0x22, 0x00, 0x01, 0x00, 0x80), + PHYREGS(0xB408, 0xB008, 0xAC08, 0xD701, 0xD701, 0xD801), + }, + { .channel = 114, + .freq = 5570, /* MHz */ + .unk2 = 3713, + RADIOREGS(0x71, 0x02, 0x2D, 0x05, 0x34, 0x01, 0x04, 0x0A, + 0x00, 0x82, 0x00, 0x00, 0x22, 0x00, 0x01, 0x00, + 0x80, 0x22, 0x00, 0x01, 0x00, 0x80), + PHYREGS(0xB808, 0xB408, 0xB008, 0xD601, 0xD701, 0xD701), + }, + { .channel = 116, + .freq = 5580, /* MHz */ + .unk2 = 3720, + RADIOREGS(0x71, 0x02, 0x2E, 0x04, 0x2E, 0x01, 0x04, 0x0A, + 0x00, 0x82, 0x00, 0x00, 0x22, 0x00, 0x01, 0x00, + 0x80, 0x22, 0x00, 0x01, 0x00, 0x80), + PHYREGS(0xBC08, 0xB808, 0xB408, 0xD501, 0xD601, 0xD701), + }, + { .channel = 118, + .freq = 5590, /* MHz */ + .unk2 = 3727, + RADIOREGS(0x71, 0x02, 0x2F, 0x04, 0x2E, 0x01, 0x04, 0x0A, + 0x00, 0x82, 0x00, 0x00, 0x22, 0x00, 0x01, 0x00, + 0x80, 0x22, 0x00, 0x01, 0x00, 0x80), + PHYREGS(0xC008, 0xBC08, 0xB808, 0xD401, 0xD501, 0xD601), + }, + { .channel = 120, + .freq = 5600, /* MHz */ + .unk2 = 3733, + RADIOREGS(0x71, 0x02, 0x30, 0x04, 0x28, 0x01, 0x04, 0x0A, + 0x00, 0x81, 0x00, 0x00, 0x11, 0x00, 0x01, 0x00, + 0x80, 0x11, 0x00, 0x01, 0x00, 0x80), + PHYREGS(0xC408, 0xC008, 0xBC08, 0xD301, 0xD401, 0xD501), + }, + { .channel = 122, + .freq = 5610, /* MHz */ + .unk2 = 3740, + RADIOREGS(0x71, 0x02, 0x31, 0x04, 0x28, 0x01, 0x04, 0x0A, + 0x00, 0x81, 0x00, 0x00, 0x11, 0x00, 0x01, 0x00, + 0x80, 0x11, 0x00, 0x01, 0x00, 0x80), + PHYREGS(0xC808, 0xC408, 0xC008, 0xD201, 0xD301, 0xD401), + }, + { .channel = 124, + .freq = 5620, /* MHz */ + .unk2 = 3747, + RADIOREGS(0x71, 0x02, 0x32, 0x04, 0x21, 0x01, 0x04, 0x0A, + 0x00, 0x81, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, + 0x80, 0x11, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0xCC08, 0xC808, 0xC408, 0xD201, 0xD201, 0xD301), + }, + { .channel = 126, + .freq = 5630, /* MHz */ + .unk2 = 3753, + RADIOREGS(0x71, 0x02, 0x33, 0x04, 0x21, 0x01, 0x04, 0x0A, + 0x00, 0x81, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, + 0x80, 0x11, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0xD008, 0xCC08, 0xC808, 0xD101, 0xD201, 0xD201), + }, + { .channel = 128, + .freq = 5640, /* MHz */ + .unk2 = 3760, + RADIOREGS(0x71, 0x02, 0x34, 0x03, 0x1C, 0x01, 0x04, 0x0A, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0xD408, 0xD008, 0xCC08, 0xD001, 0xD101, 0xD201), + }, + { .channel = 130, + .freq = 5650, /* MHz */ + .unk2 = 3767, + RADIOREGS(0x71, 0x02, 0x35, 0x03, 0x1C, 0x01, 0x04, 0x0A, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0xD808, 0xD408, 0xD008, 0xCF01, 0xD001, 0xD101), + }, + { .channel = 132, + .freq = 5660, /* MHz */ + .unk2 = 3773, + RADIOREGS(0x71, 0x02, 0x36, 0x03, 0x16, 0x01, 0x04, 0x0A, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0xDC08, 0xD808, 0xD408, 0xCE01, 0xCF01, 0xD001), + }, + { .channel = 134, + .freq = 5670, /* MHz */ + .unk2 = 3780, + RADIOREGS(0x71, 0x02, 0x37, 0x03, 0x16, 0x01, 0x04, 0x0A, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0xE008, 0xDC08, 0xD808, 0xCE01, 0xCE01, 0xCF01), + }, + { .channel = 136, + .freq = 5680, /* MHz */ + .unk2 = 3787, + RADIOREGS(0x71, 0x02, 0x38, 0x03, 0x10, 0x01, 0x04, 0x0A, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0xE408, 0xE008, 0xDC08, 0xCD01, 0xCE01, 0xCE01), + }, + { .channel = 138, + .freq = 5690, /* MHz */ + .unk2 = 3793, + RADIOREGS(0x71, 0x02, 0x39, 0x03, 0x10, 0x01, 0x04, 0x0A, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0xE808, 0xE408, 0xE008, 0xCC01, 0xCD01, 0xCE01), + }, + { .channel = 140, + .freq = 5700, /* MHz */ + .unk2 = 3800, + RADIOREGS(0x71, 0x02, 0x3A, 0x02, 0x0A, 0x01, 0x04, 0x0A, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0xEC08, 0xE808, 0xE408, 0xCB01, 0xCC01, 0xCD01), + }, + { .channel = 142, + .freq = 5710, /* MHz */ + .unk2 = 3807, + RADIOREGS(0x71, 0x02, 0x3B, 0x02, 0x0A, 0x01, 0x04, 0x0A, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0xF008, 0xEC08, 0xE808, 0xCA01, 0xCB01, 0xCC01), + }, + { .channel = 144, + .freq = 5720, /* MHz */ + .unk2 = 3813, + RADIOREGS(0x71, 0x02, 0x3C, 0x02, 0x0A, 0x01, 0x04, 0x0A, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0xF408, 0xF008, 0xEC08, 0xC901, 0xCA01, 0xCB01), + }, + { .channel = 145, + .freq = 5725, /* MHz */ + .unk2 = 3817, + RADIOREGS(0x72, 0x04, 0x79, 0x02, 0x03, 0x01, 0x03, 0x14, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0xF608, 0xF208, 0xEE08, 0xC901, 0xCA01, 0xCB01), + }, + { .channel = 146, + .freq = 5730, /* MHz */ + .unk2 = 3820, + RADIOREGS(0x71, 0x02, 0x3D, 0x02, 0x0A, 0x01, 0x04, 0x0A, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0xF808, 0xF408, 0xF008, 0xC901, 0xC901, 0xCA01), + }, + { .channel = 147, + .freq = 5735, /* MHz */ + .unk2 = 3823, + RADIOREGS(0x72, 0x04, 0x7B, 0x02, 0x03, 0x01, 0x03, 0x14, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0xFA08, 0xF608, 0xF208, 0xC801, 0xC901, 0xCA01), + }, + { .channel = 148, + .freq = 5740, /* MHz */ + .unk2 = 3827, + RADIOREGS(0x71, 0x02, 0x3E, 0x02, 0x0A, 0x01, 0x04, 0x0A, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0xFC08, 0xF808, 0xF408, 0xC801, 0xC901, 0xC901), + }, + { .channel = 149, + .freq = 5745, /* MHz */ + .unk2 = 3830, + RADIOREGS(0x72, 0x04, 0x7D, 0x02, 0xFE, 0x00, 0x03, 0x14, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0xFE08, 0xFA08, 0xF608, 0xC801, 0xC801, 0xC901), + }, + { .channel = 150, + .freq = 5750, /* MHz */ + .unk2 = 3833, + RADIOREGS(0x71, 0x02, 0x3F, 0x02, 0x0A, 0x01, 0x04, 0x0A, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0x0009, 0xFC08, 0xF808, 0xC701, 0xC801, 0xC901), + }, + { .channel = 151, + .freq = 5755, /* MHz */ + .unk2 = 3837, + RADIOREGS(0x72, 0x04, 0x7F, 0x02, 0xFE, 0x00, 0x03, 0x14, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0x0209, 0xFE08, 0xFA08, 0xC701, 0xC801, 0xC801), + }, + { .channel = 152, + .freq = 5760, /* MHz */ + .unk2 = 3840, + RADIOREGS(0x71, 0x02, 0x40, 0x02, 0x0A, 0x01, 0x04, 0x0A, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0x0409, 0x0009, 0xFC08, 0xC601, 0xC701, 0xC801), + }, + { .channel = 153, + .freq = 5765, /* MHz */ + .unk2 = 3843, + RADIOREGS(0x72, 0x04, 0x81, 0x02, 0xF8, 0x00, 0x03, 0x14, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0x0609, 0x0209, 0xFE08, 0xC601, 0xC701, 0xC801), + }, + { .channel = 154, + .freq = 5770, /* MHz */ + .unk2 = 3847, + RADIOREGS(0x71, 0x02, 0x41, 0x02, 0x0A, 0x01, 0x04, 0x0A, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0x0809, 0x0409, 0x0009, 0xC601, 0xC601, 0xC701), + }, + { .channel = 155, + .freq = 5775, /* MHz */ + .unk2 = 3850, + RADIOREGS(0x72, 0x04, 0x83, 0x02, 0xF8, 0x00, 0x03, 0x14, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0x0A09, 0x0609, 0x0209, 0xC501, 0xC601, 0xC701), + }, + { .channel = 156, + .freq = 5780, /* MHz */ + .unk2 = 3853, + RADIOREGS(0x71, 0x02, 0x42, 0x02, 0x0A, 0x01, 0x04, 0x0A, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0x0C09, 0x0809, 0x0409, 0xC501, 0xC601, 0xC601), + }, + { .channel = 157, + .freq = 5785, /* MHz */ + .unk2 = 3857, + RADIOREGS(0x72, 0x04, 0x85, 0x02, 0xF2, 0x00, 0x03, 0x14, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0x0E09, 0x0A09, 0x0609, 0xC401, 0xC501, 0xC601), + }, + { .channel = 158, + .freq = 5790, /* MHz */ + .unk2 = 3860, + RADIOREGS(0x71, 0x02, 0x43, 0x02, 0x0A, 0x01, 0x04, 0x0A, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0x1009, 0x0C09, 0x0809, 0xC401, 0xC501, 0xC601), + }, + { .channel = 159, + .freq = 5795, /* MHz */ + .unk2 = 3863, + RADIOREGS(0x72, 0x04, 0x87, 0x02, 0xF2, 0x00, 0x03, 0x14, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0x1209, 0x0E09, 0x0A09, 0xC401, 0xC401, 0xC501), + }, + { .channel = 160, + .freq = 5800, /* MHz */ + .unk2 = 3867, + RADIOREGS(0x71, 0x02, 0x44, 0x01, 0x0A, 0x01, 0x04, 0x0A, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0x1409, 0x1009, 0x0C09, 0xC301, 0xC401, 0xC501), + }, + { .channel = 161, + .freq = 5805, /* MHz */ + .unk2 = 3870, + RADIOREGS(0x72, 0x04, 0x89, 0x01, 0xED, 0x00, 0x03, 0x14, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0x1609, 0x1209, 0x0E09, 0xC301, 0xC401, 0xC401), + }, + { .channel = 162, + .freq = 5810, /* MHz */ + .unk2 = 3873, + RADIOREGS(0x71, 0x02, 0x45, 0x01, 0x0A, 0x01, 0x04, 0x0A, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0x1809, 0x1409, 0x1009, 0xC201, 0xC301, 0xC401), + }, + { .channel = 163, + .freq = 5815, /* MHz */ + .unk2 = 3877, + RADIOREGS(0x72, 0x04, 0x8B, 0x01, 0xED, 0x00, 0x03, 0x14, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0x1A09, 0x1609, 0x1209, 0xC201, 0xC301, 0xC401), + }, + { .channel = 164, + .freq = 5820, /* MHz */ + .unk2 = 3880, + RADIOREGS(0x71, 0x02, 0x46, 0x01, 0x0A, 0x01, 0x04, 0x0A, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0x1C09, 0x1809, 0x1409, 0xC201, 0xC201, 0xC301), + }, + { .channel = 165, + .freq = 5825, /* MHz */ + .unk2 = 3883, + RADIOREGS(0x72, 0x04, 0x8D, 0x01, 0xED, 0x00, 0x03, 0x14, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0x1E09, 0x1A09, 0x1609, 0xC101, 0xC201, 0xC301), + }, + { .channel = 166, + .freq = 5830, /* MHz */ + .unk2 = 3887, + RADIOREGS(0x71, 0x02, 0x47, 0x01, 0x0A, 0x01, 0x04, 0x0A, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0x2009, 0x1C09, 0x1809, 0xC101, 0xC201, 0xC201), + }, + { .channel = 168, + .freq = 5840, /* MHz */ + .unk2 = 3893, + RADIOREGS(0x71, 0x02, 0x48, 0x01, 0x0A, 0x01, 0x04, 0x0A, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0x2409, 0x2009, 0x1C09, 0xC001, 0xC101, 0xC201), + }, + { .channel = 170, + .freq = 5850, /* MHz */ + .unk2 = 3900, + RADIOREGS(0x71, 0x02, 0x49, 0x01, 0xE0, 0x00, 0x04, 0x0A, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0x2809, 0x2409, 0x2009, 0xBF01, 0xC001, 0xC101), + }, + { .channel = 172, + .freq = 5860, /* MHz */ + .unk2 = 3907, + RADIOREGS(0x71, 0x02, 0x4A, 0x01, 0xDE, 0x00, 0x04, 0x0A, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0x2C09, 0x2809, 0x2409, 0xBF01, 0xBF01, 0xC001), + }, + { .channel = 174, + .freq = 5870, /* MHz */ + .unk2 = 3913, + RADIOREGS(0x71, 0x02, 0x4B, 0x00, 0xDB, 0x00, 0x04, 0x0A, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0x3009, 0x2C09, 0x2809, 0xBE01, 0xBF01, 0xBF01), + }, + { .channel = 176, + .freq = 5880, /* MHz */ + .unk2 = 3920, + RADIOREGS(0x71, 0x02, 0x4C, 0x00, 0xD8, 0x00, 0x04, 0x0A, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0x3409, 0x3009, 0x2C09, 0xBD01, 0xBE01, 0xBF01), + }, + { .channel = 178, + .freq = 5890, /* MHz */ + .unk2 = 3927, + RADIOREGS(0x71, 0x02, 0x4D, 0x00, 0xD6, 0x00, 0x04, 0x0A, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0x3809, 0x3409, 0x3009, 0xBC01, 0xBD01, 0xBE01), + }, + { .channel = 180, + .freq = 5900, /* MHz */ + .unk2 = 3933, + RADIOREGS(0x71, 0x02, 0x4E, 0x00, 0xD3, 0x00, 0x04, 0x0A, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0x3C09, 0x3809, 0x3409, 0xBC01, 0xBC01, 0xBD01), + }, + { .channel = 182, + .freq = 5910, /* MHz */ + .unk2 = 3940, + RADIOREGS(0x71, 0x02, 0x4F, 0x00, 0xD6, 0x00, 0x04, 0x0A, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), + PHYREGS(0x4009, 0x3C09, 0x3809, 0xBB01, 0xBC01, 0xBC01), + }, + { .channel = 1, + .freq = 2412, /* MHz */ + .unk2 = 3216, + RADIOREGS(0x73, 0x09, 0x6C, 0x0F, 0x00, 0x01, 0x07, 0x15, + 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0D, 0x0C, + 0x80, 0xFF, 0x88, 0x0D, 0x0C, 0x80), + PHYREGS(0xC903, 0xC503, 0xC103, 0x3A04, 0x3F04, 0x4304), + }, + { .channel = 2, + .freq = 2417, /* MHz */ + .unk2 = 3223, + RADIOREGS(0x73, 0x09, 0x71, 0x0F, 0x00, 0x01, 0x07, 0x15, + 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0C, 0x0B, + 0x80, 0xFF, 0x88, 0x0C, 0x0B, 0x80), + PHYREGS(0xCB03, 0xC703, 0xC303, 0x3804, 0x3D04, 0x4104), + }, + { .channel = 3, + .freq = 2422, /* MHz */ + .unk2 = 3229, + RADIOREGS(0x73, 0x09, 0x76, 0x0F, 0x00, 0x01, 0x07, 0x15, + 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0C, 0x0A, + 0x80, 0xFF, 0x88, 0x0C, 0x0A, 0x80), + PHYREGS(0xCD03, 0xC903, 0xC503, 0x3604, 0x3A04, 0x3F04), + }, + { .channel = 4, + .freq = 2427, /* MHz */ + .unk2 = 3236, + RADIOREGS(0x73, 0x09, 0x7B, 0x0F, 0x00, 0x01, 0x07, 0x15, + 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0C, 0x0A, + 0x80, 0xFF, 0x88, 0x0C, 0x0A, 0x80), + PHYREGS(0xCF03, 0xCB03, 0xC703, 0x3404, 0x3804, 0x3D04), + }, + { .channel = 5, + .freq = 2432, /* MHz */ + .unk2 = 3243, + RADIOREGS(0x73, 0x09, 0x80, 0x0F, 0x00, 0x01, 0x07, 0x15, + 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0C, 0x09, + 0x80, 0xFF, 0x88, 0x0C, 0x09, 0x80), + PHYREGS(0xD103, 0xCD03, 0xC903, 0x3104, 0x3604, 0x3A04), + }, + { .channel = 6, + .freq = 2437, /* MHz */ + .unk2 = 3249, + RADIOREGS(0x73, 0x09, 0x85, 0x0F, 0x00, 0x01, 0x07, 0x15, + 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0B, 0x08, + 0x80, 0xFF, 0x88, 0x0B, 0x08, 0x80), + PHYREGS(0xD303, 0xCF03, 0xCB03, 0x2F04, 0x3404, 0x3804), + }, + { .channel = 7, + .freq = 2442, /* MHz */ + .unk2 = 3256, + RADIOREGS(0x73, 0x09, 0x8A, 0x0F, 0x00, 0x01, 0x07, 0x15, + 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0A, 0x07, + 0x80, 0xFF, 0x88, 0x0A, 0x07, 0x80), + PHYREGS(0xD503, 0xD103, 0xCD03, 0x2D04, 0x3104, 0x3604), + }, + { .channel = 8, + .freq = 2447, /* MHz */ + .unk2 = 3263, + RADIOREGS(0x73, 0x09, 0x8F, 0x0F, 0x00, 0x01, 0x07, 0x15, + 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0A, 0x06, + 0x80, 0xFF, 0x88, 0x0A, 0x06, 0x80), + PHYREGS(0xD703, 0xD303, 0xCF03, 0x2B04, 0x2F04, 0x3404), + }, + { .channel = 9, + .freq = 2452, /* MHz */ + .unk2 = 3269, + RADIOREGS(0x73, 0x09, 0x94, 0x0F, 0x00, 0x01, 0x07, 0x15, + 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x09, 0x06, + 0x80, 0xFF, 0x88, 0x09, 0x06, 0x80), + PHYREGS(0xD903, 0xD503, 0xD103, 0x2904, 0x2D04, 0x3104), + }, + { .channel = 10, + .freq = 2457, /* MHz */ + .unk2 = 3276, + RADIOREGS(0x73, 0x09, 0x99, 0x0F, 0x00, 0x01, 0x07, 0x15, + 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x08, 0x05, + 0x80, 0xFF, 0x88, 0x08, 0x05, 0x80), + PHYREGS(0xDB03, 0xD703, 0xD303, 0x2704, 0x2B04, 0x2F04), + }, + { .channel = 11, + .freq = 2462, /* MHz */ + .unk2 = 3283, + RADIOREGS(0x73, 0x09, 0x9E, 0x0F, 0x00, 0x01, 0x07, 0x15, + 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x08, 0x04, + 0x80, 0xFF, 0x88, 0x08, 0x04, 0x80), + PHYREGS(0xDD03, 0xD903, 0xD503, 0x2404, 0x2904, 0x2D04), + }, + { .channel = 12, + .freq = 2467, /* MHz */ + .unk2 = 3289, + RADIOREGS(0x73, 0x09, 0xA3, 0x0F, 0x00, 0x01, 0x07, 0x15, + 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x08, 0x03, + 0x80, 0xFF, 0x88, 0x08, 0x03, 0x80), + PHYREGS(0xDF03, 0xDB03, 0xD703, 0x2204, 0x2704, 0x2B04), + }, + { .channel = 13, + .freq = 2472, /* MHz */ + .unk2 = 3296, + RADIOREGS(0x73, 0x09, 0xA8, 0x0F, 0x00, 0x01, 0x07, 0x15, + 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x07, 0x03, + 0x80, 0xFF, 0x88, 0x07, 0x03, 0x80), + PHYREGS(0xE103, 0xDD03, 0xD903, 0x2004, 0x2404, 0x2904), + }, + { .channel = 14, + .freq = 2484, /* MHz */ + .unk2 = 3312, + RADIOREGS(0x73, 0x09, 0xB4, 0x0F, 0xFF, 0x01, 0x07, 0x15, + 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x07, 0x01, + 0x80, 0xFF, 0x88, 0x07, 0x01, 0x80), + PHYREGS(0xE603, 0xE203, 0xDE03, 0x1B04, 0x1F04, 0x2404), + }, +}; + +const struct b43_nphy_channeltab_entry * +b43_nphy_get_chantabent(struct b43_wldev *dev, u8 channel) +{ + const struct b43_nphy_channeltab_entry *e; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(b43_nphy_channeltab); i++) { + e = &(b43_nphy_channeltab[i]); + if (e->channel == channel) + return e; + } + + return NULL; +} + + +static const u8 b43_ntab_adjustpower0[] = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, + 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, + 0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x05, + 0x06, 0x06, 0x06, 0x06, 0x07, 0x07, 0x07, 0x07, + 0x08, 0x08, 0x08, 0x08, 0x09, 0x09, 0x09, 0x09, + 0x0A, 0x0A, 0x0A, 0x0A, 0x0B, 0x0B, 0x0B, 0x0B, + 0x0C, 0x0C, 0x0C, 0x0C, 0x0D, 0x0D, 0x0D, 0x0D, + 0x0E, 0x0E, 0x0E, 0x0E, 0x0F, 0x0F, 0x0F, 0x0F, + 0x10, 0x10, 0x10, 0x10, 0x11, 0x11, 0x11, 0x11, + 0x12, 0x12, 0x12, 0x12, 0x13, 0x13, 0x13, 0x13, + 0x14, 0x14, 0x14, 0x14, 0x15, 0x15, 0x15, 0x15, + 0x16, 0x16, 0x16, 0x16, 0x17, 0x17, 0x17, 0x17, + 0x18, 0x18, 0x18, 0x18, 0x19, 0x19, 0x19, 0x19, + 0x1A, 0x1A, 0x1A, 0x1A, 0x1B, 0x1B, 0x1B, 0x1B, + 0x1C, 0x1C, 0x1C, 0x1C, 0x1D, 0x1D, 0x1D, 0x1D, + 0x1E, 0x1E, 0x1E, 0x1E, 0x1F, 0x1F, 0x1F, 0x1F, +}; + +static const u8 b43_ntab_adjustpower1[] = { + 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, + 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, + 0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x05, + 0x06, 0x06, 0x06, 0x06, 0x07, 0x07, 0x07, 0x07, + 0x08, 0x08, 0x08, 0x08, 0x09, 0x09, 0x09, 0x09, + 0x0A, 0x0A, 0x0A, 0x0A, 0x0B, 0x0B, 0x0B, 0x0B, + 0x0C, 0x0C, 0x0C, 0x0C, 0x0D, 0x0D, 0x0D, 0x0D, + 0x0E, 0x0E, 0x0E, 0x0E, 0x0F, 0x0F, 0x0F, 0x0F, + 0x10, 0x10, 0x10, 0x10, 0x11, 0x11, 0x11, 0x11, + 0x12, 0x12, 0x12, 0x12, 0x13, 0x13, 0x13, 0x13, + 0x14, 0x14, 0x14, 0x14, 0x15, 0x15, 0x15, 0x15, + 0x16, 0x16, 0x16, 0x16, 0x17, 0x17, 0x17, 0x17, + 0x18, 0x18, 0x18, 0x18, 0x19, 0x19, 0x19, 0x19, + 0x1A, 0x1A, 0x1A, 0x1A, 0x1B, 0x1B, 0x1B, 0x1B, + 0x1C, 0x1C, 0x1C, 0x1C, 0x1D, 0x1D, 0x1D, 0x1D, + 0x1E, 0x1E, 0x1E, 0x1E, 0x1F, 0x1F, 0x1F, 0x1F, +}; + +static const u16 b43_ntab_bdi[] = { + 0x0070, 0x0126, 0x012C, 0x0246, 0x048D, 0x04D2, +}; + +static const u32 b43_ntab_channelest[] = { + 0x44444444, 0x44444444, 0x44444444, 0x44444444, + 0x44444444, 0x44444444, 0x44444444, 0x44444444, + 0x10101010, 0x10101010, 0x10101010, 0x10101010, + 0x10101010, 0x10101010, 0x10101010, 0x10101010, + 0x44444444, 0x44444444, 0x44444444, 0x44444444, + 0x44444444, 0x44444444, 0x44444444, 0x44444444, + 0x10101010, 0x10101010, 0x10101010, 0x10101010, + 0x10101010, 0x10101010, 0x10101010, 0x10101010, + 0x44444444, 0x44444444, 0x44444444, 0x44444444, + 0x44444444, 0x44444444, 0x44444444, 0x44444444, + 0x44444444, 0x44444444, 0x44444444, 0x44444444, + 0x44444444, 0x44444444, 0x44444444, 0x44444444, + 0x10101010, 0x10101010, 0x10101010, 0x10101010, + 0x10101010, 0x10101010, 0x10101010, 0x10101010, + 0x10101010, 0x10101010, 0x10101010, 0x10101010, + 0x10101010, 0x10101010, 0x10101010, 0x10101010, + 0x44444444, 0x44444444, 0x44444444, 0x44444444, + 0x44444444, 0x44444444, 0x44444444, 0x44444444, + 0x44444444, 0x44444444, 0x44444444, 0x44444444, + 0x44444444, 0x44444444, 0x44444444, 0x44444444, + 0x10101010, 0x10101010, 0x10101010, 0x10101010, + 0x10101010, 0x10101010, 0x10101010, 0x10101010, + 0x10101010, 0x10101010, 0x10101010, 0x10101010, + 0x10101010, 0x10101010, 0x10101010, 0x10101010, +}; + +static const u8 b43_ntab_estimatepowerlt0[] = { + 0x50, 0x4F, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A, 0x49, + 0x48, 0x47, 0x46, 0x45, 0x44, 0x43, 0x42, 0x41, + 0x40, 0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, + 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, + 0x30, 0x2F, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, + 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, + 0x20, 0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, + 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, +}; + +static const u8 b43_ntab_estimatepowerlt1[] = { + 0x50, 0x4F, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A, 0x49, + 0x48, 0x47, 0x46, 0x45, 0x44, 0x43, 0x42, 0x41, + 0x40, 0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, + 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, + 0x30, 0x2F, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, + 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, + 0x20, 0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, + 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, +}; + +static const u8 b43_ntab_framelookup[] = { + 0x02, 0x04, 0x14, 0x14, 0x03, 0x05, 0x16, 0x16, + 0x0A, 0x0C, 0x1C, 0x1C, 0x0B, 0x0D, 0x1E, 0x1E, + 0x06, 0x08, 0x18, 0x18, 0x07, 0x09, 0x1A, 0x1A, + 0x0E, 0x10, 0x20, 0x28, 0x0F, 0x11, 0x22, 0x2A, +}; + +static const u32 b43_ntab_framestruct[] = { + 0x08004A04, 0x00100000, 0x01000A05, 0x00100020, + 0x09804506, 0x00100030, 0x09804507, 0x00100030, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x08004A0C, 0x00100008, 0x01000A0D, 0x00100028, + 0x0980450E, 0x00100038, 0x0980450F, 0x00100038, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000A04, 0x00100000, 0x11008A05, 0x00100020, + 0x1980C506, 0x00100030, 0x21810506, 0x00100030, + 0x21810506, 0x00100030, 0x01800504, 0x00100030, + 0x11808505, 0x00100030, 0x29814507, 0x01100030, + 0x00000A04, 0x00100000, 0x11008A05, 0x00100020, + 0x21810506, 0x00100030, 0x21810506, 0x00100030, + 0x29814507, 0x01100030, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000A0C, 0x00100008, 0x11008A0D, 0x00100028, + 0x1980C50E, 0x00100038, 0x2181050E, 0x00100038, + 0x2181050E, 0x00100038, 0x0180050C, 0x00100038, + 0x1180850D, 0x00100038, 0x2981450F, 0x01100038, + 0x00000A0C, 0x00100008, 0x11008A0D, 0x00100028, + 0x2181050E, 0x00100038, 0x2181050E, 0x00100038, + 0x2981450F, 0x01100038, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x08004A04, 0x00100000, 0x01000A05, 0x00100020, + 0x1980C506, 0x00100030, 0x1980C506, 0x00100030, + 0x11808504, 0x00100030, 0x3981CA05, 0x00100030, + 0x29814507, 0x01100030, 0x00000000, 0x00000000, + 0x10008A04, 0x00100000, 0x3981CA05, 0x00100030, + 0x1980C506, 0x00100030, 0x29814507, 0x01100030, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x08004A0C, 0x00100008, 0x01000A0D, 0x00100028, + 0x1980C50E, 0x00100038, 0x1980C50E, 0x00100038, + 0x1180850C, 0x00100038, 0x3981CA0D, 0x00100038, + 0x2981450F, 0x01100038, 0x00000000, 0x00000000, + 0x10008A0C, 0x00100008, 0x3981CA0D, 0x00100038, + 0x1980C50E, 0x00100038, 0x2981450F, 0x01100038, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x40021404, 0x00100000, 0x02001405, 0x00100040, + 0x0B004A06, 0x01900060, 0x13008A06, 0x01900060, + 0x13008A06, 0x01900060, 0x43020A04, 0x00100060, + 0x1B00CA05, 0x00100060, 0x23010A07, 0x01500060, + 0x40021404, 0x00100000, 0x1A00D405, 0x00100040, + 0x13008A06, 0x01900060, 0x13008A06, 0x01900060, + 0x23010A07, 0x01500060, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x4002140C, 0x00100010, 0x0200140D, 0x00100050, + 0x0B004A0E, 0x01900070, 0x13008A0E, 0x01900070, + 0x13008A0E, 0x01900070, 0x43020A0C, 0x00100070, + 0x1B00CA0D, 0x00100070, 0x23010A0F, 0x01500070, + 0x4002140C, 0x00100010, 0x1A00D40D, 0x00100050, + 0x13008A0E, 0x01900070, 0x13008A0E, 0x01900070, + 0x23010A0F, 0x01500070, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x50029404, 0x00100000, 0x32019405, 0x00100040, + 0x0B004A06, 0x01900060, 0x0B004A06, 0x01900060, + 0x5B02CA04, 0x00100060, 0x3B01D405, 0x00100060, + 0x23010A07, 0x01500060, 0x00000000, 0x00000000, + 0x5802D404, 0x00100000, 0x3B01D405, 0x00100060, + 0x0B004A06, 0x01900060, 0x23010A07, 0x01500060, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x5002940C, 0x00100010, 0x3201940D, 0x00100050, + 0x0B004A0E, 0x01900070, 0x0B004A0E, 0x01900070, + 0x5B02CA0C, 0x00100070, 0x3B01D40D, 0x00100070, + 0x23010A0F, 0x01500070, 0x00000000, 0x00000000, + 0x5802D40C, 0x00100010, 0x3B01D40D, 0x00100070, + 0x0B004A0E, 0x01900070, 0x23010A0F, 0x01500070, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x40021404, 0x000F4800, 0x62031405, 0x00100040, + 0x53028A06, 0x01900060, 0x53028A07, 0x01900060, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x4002140C, 0x000F4810, 0x6203140D, 0x00100050, + 0x53028A0E, 0x01900070, 0x53028A0F, 0x01900070, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000A0C, 0x00100008, 0x11008A0D, 0x00100028, + 0x1980C50E, 0x00100038, 0x2181050E, 0x00100038, + 0x2181050E, 0x00100038, 0x0180050C, 0x00100038, + 0x1180850D, 0x00100038, 0x1181850D, 0x00100038, + 0x2981450F, 0x01100038, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000A0C, 0x00100008, 0x11008A0D, 0x00100028, + 0x2181050E, 0x00100038, 0x2181050E, 0x00100038, + 0x1181850D, 0x00100038, 0x2981450F, 0x01100038, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x08004A04, 0x00100000, 0x01000A05, 0x00100020, + 0x0180C506, 0x00100030, 0x0180C506, 0x00100030, + 0x2180C50C, 0x00100030, 0x49820A0D, 0x0016A130, + 0x41824A0D, 0x0016A130, 0x2981450F, 0x01100030, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x2000CA0C, 0x00100000, 0x49820A0D, 0x0016A130, + 0x1980C50E, 0x00100030, 0x41824A0D, 0x0016A130, + 0x2981450F, 0x01100030, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x4002140C, 0x00100010, 0x0200140D, 0x00100050, + 0x0B004A0E, 0x01900070, 0x13008A0E, 0x01900070, + 0x13008A0E, 0x01900070, 0x43020A0C, 0x00100070, + 0x1B00CA0D, 0x00100070, 0x1B014A0D, 0x00100070, + 0x23010A0F, 0x01500070, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x4002140C, 0x00100010, 0x1A00D40D, 0x00100050, + 0x13008A0E, 0x01900070, 0x13008A0E, 0x01900070, + 0x1B014A0D, 0x00100070, 0x23010A0F, 0x01500070, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x50029404, 0x00100000, 0x32019405, 0x00100040, + 0x03004A06, 0x01900060, 0x03004A06, 0x01900060, + 0x6B030A0C, 0x00100060, 0x4B02140D, 0x0016A160, + 0x4302540D, 0x0016A160, 0x23010A0F, 0x01500060, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x6B03140C, 0x00100060, 0x4B02140D, 0x0016A160, + 0x0B004A0E, 0x01900060, 0x4302540D, 0x0016A160, + 0x23010A0F, 0x01500060, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x40021404, 0x00100000, 0x1A00D405, 0x00100040, + 0x53028A06, 0x01900060, 0x5B02CA06, 0x01900060, + 0x5B02CA06, 0x01900060, 0x43020A04, 0x00100060, + 0x1B00CA05, 0x00100060, 0x53028A07, 0x0190C060, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x4002140C, 0x00100010, 0x1A00D40D, 0x00100050, + 0x53028A0E, 0x01900070, 0x5B02CA0E, 0x01900070, + 0x5B02CA0E, 0x01900070, 0x43020A0C, 0x00100070, + 0x1B00CA0D, 0x00100070, 0x53028A0F, 0x0190C070, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x40021404, 0x00100000, 0x1A00D405, 0x00100040, + 0x5B02CA06, 0x01900060, 0x5B02CA06, 0x01900060, + 0x53028A07, 0x0190C060, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x4002140C, 0x00100010, 0x1A00D40D, 0x00100050, + 0x5B02CA0E, 0x01900070, 0x5B02CA0E, 0x01900070, + 0x53028A0F, 0x0190C070, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static const u32 b43_ntab_gainctl0[] = { + 0x007F003F, 0x007E013F, 0x007D023E, 0x007C033E, + 0x007B043D, 0x007A053D, 0x0079063C, 0x0078073C, + 0x0077083B, 0x0076093B, 0x00750A3A, 0x00740B3A, + 0x00730C39, 0x00720D39, 0x00710E38, 0x00700F38, + 0x006F0037, 0x006E0137, 0x006D0236, 0x006C0336, + 0x006B0435, 0x006A0535, 0x00690634, 0x00680734, + 0x00670833, 0x00660933, 0x00650A32, 0x00640B32, + 0x00630C31, 0x00620D31, 0x00610E30, 0x00600F30, + 0x005F002F, 0x005E012F, 0x005D022E, 0x005C032E, + 0x005B042D, 0x005A052D, 0x0059062C, 0x0058072C, + 0x0057082B, 0x0056092B, 0x00550A2A, 0x00540B2A, + 0x00530C29, 0x00520D29, 0x00510E28, 0x00500F28, + 0x004F0027, 0x004E0127, 0x004D0226, 0x004C0326, + 0x004B0425, 0x004A0525, 0x00490624, 0x00480724, + 0x00470823, 0x00460923, 0x00450A22, 0x00440B22, + 0x00430C21, 0x00420D21, 0x00410E20, 0x00400F20, + 0x003F001F, 0x003E011F, 0x003D021E, 0x003C031E, + 0x003B041D, 0x003A051D, 0x0039061C, 0x0038071C, + 0x0037081B, 0x0036091B, 0x00350A1A, 0x00340B1A, + 0x00330C19, 0x00320D19, 0x00310E18, 0x00300F18, + 0x002F0017, 0x002E0117, 0x002D0216, 0x002C0316, + 0x002B0415, 0x002A0515, 0x00290614, 0x00280714, + 0x00270813, 0x00260913, 0x00250A12, 0x00240B12, + 0x00230C11, 0x00220D11, 0x00210E10, 0x00200F10, + 0x001F000F, 0x001E010F, 0x001D020E, 0x001C030E, + 0x001B040D, 0x001A050D, 0x0019060C, 0x0018070C, + 0x0017080B, 0x0016090B, 0x00150A0A, 0x00140B0A, + 0x00130C09, 0x00120D09, 0x00110E08, 0x00100F08, + 0x000F0007, 0x000E0107, 0x000D0206, 0x000C0306, + 0x000B0405, 0x000A0505, 0x00090604, 0x00080704, + 0x00070803, 0x00060903, 0x00050A02, 0x00040B02, + 0x00030C01, 0x00020D01, 0x00010E00, 0x00000F00, +}; + +static const u32 b43_ntab_gainctl1[] = { + 0x007F003F, 0x007E013F, 0x007D023E, 0x007C033E, + 0x007B043D, 0x007A053D, 0x0079063C, 0x0078073C, + 0x0077083B, 0x0076093B, 0x00750A3A, 0x00740B3A, + 0x00730C39, 0x00720D39, 0x00710E38, 0x00700F38, + 0x006F0037, 0x006E0137, 0x006D0236, 0x006C0336, + 0x006B0435, 0x006A0535, 0x00690634, 0x00680734, + 0x00670833, 0x00660933, 0x00650A32, 0x00640B32, + 0x00630C31, 0x00620D31, 0x00610E30, 0x00600F30, + 0x005F002F, 0x005E012F, 0x005D022E, 0x005C032E, + 0x005B042D, 0x005A052D, 0x0059062C, 0x0058072C, + 0x0057082B, 0x0056092B, 0x00550A2A, 0x00540B2A, + 0x00530C29, 0x00520D29, 0x00510E28, 0x00500F28, + 0x004F0027, 0x004E0127, 0x004D0226, 0x004C0326, + 0x004B0425, 0x004A0525, 0x00490624, 0x00480724, + 0x00470823, 0x00460923, 0x00450A22, 0x00440B22, + 0x00430C21, 0x00420D21, 0x00410E20, 0x00400F20, + 0x003F001F, 0x003E011F, 0x003D021E, 0x003C031E, + 0x003B041D, 0x003A051D, 0x0039061C, 0x0038071C, + 0x0037081B, 0x0036091B, 0x00350A1A, 0x00340B1A, + 0x00330C19, 0x00320D19, 0x00310E18, 0x00300F18, + 0x002F0017, 0x002E0117, 0x002D0216, 0x002C0316, + 0x002B0415, 0x002A0515, 0x00290614, 0x00280714, + 0x00270813, 0x00260913, 0x00250A12, 0x00240B12, + 0x00230C11, 0x00220D11, 0x00210E10, 0x00200F10, + 0x001F000F, 0x001E010F, 0x001D020E, 0x001C030E, + 0x001B040D, 0x001A050D, 0x0019060C, 0x0018070C, + 0x0017080B, 0x0016090B, 0x00150A0A, 0x00140B0A, + 0x00130C09, 0x00120D09, 0x00110E08, 0x00100F08, + 0x000F0007, 0x000E0107, 0x000D0206, 0x000C0306, + 0x000B0405, 0x000A0505, 0x00090604, 0x00080704, + 0x00070803, 0x00060903, 0x00050A02, 0x00040B02, + 0x00030C01, 0x00020D01, 0x00010E00, 0x00000F00, +}; + +static const u32 b43_ntab_intlevel[] = { + 0x00802070, 0x0671188D, 0x0A60192C, 0x0A300E46, + 0x00C1188D, 0x080024D2, 0x00000070, +}; + +static const u32 b43_ntab_iqlt0[] = { + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, +}; + +static const u32 b43_ntab_iqlt1[] = { + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, + 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, +}; + +static const u16 b43_ntab_loftlt0[] = { + 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, + 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, + 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, + 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, + 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, + 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, + 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, + 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, + 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, + 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, + 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, + 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, + 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, + 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, + 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, + 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, + 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, + 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, + 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, + 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, + 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, + 0x0002, 0x0103, +}; + +static const u16 b43_ntab_loftlt1[] = { + 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, + 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, + 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, + 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, + 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, + 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, + 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, + 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, + 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, + 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, + 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, + 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, + 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, + 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, + 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, + 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, + 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, + 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, + 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, + 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, + 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, + 0x0002, 0x0103, +}; + +static const u8 b43_ntab_mcs[] = { + 0x00, 0x08, 0x0A, 0x10, 0x12, 0x19, 0x1A, 0x1C, + 0x40, 0x48, 0x4A, 0x50, 0x52, 0x59, 0x5A, 0x5C, + 0x80, 0x88, 0x8A, 0x90, 0x92, 0x99, 0x9A, 0x9C, + 0xC0, 0xC8, 0xCA, 0xD0, 0xD2, 0xD9, 0xDA, 0xDC, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x02, 0x04, 0x08, 0x09, 0x0A, 0x0C, + 0x10, 0x11, 0x12, 0x14, 0x18, 0x19, 0x1A, 0x1C, + 0x20, 0x21, 0x22, 0x24, 0x40, 0x41, 0x42, 0x44, + 0x48, 0x49, 0x4A, 0x4C, 0x50, 0x51, 0x52, 0x54, + 0x58, 0x59, 0x5A, 0x5C, 0x60, 0x61, 0x62, 0x64, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u32 b43_ntab_noisevar10[] = { + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, +}; + +static const u32 b43_ntab_noisevar11[] = { + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, + 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, +}; + +static const u16 b43_ntab_pilot[] = { + 0xFF08, 0xFF08, 0xFF08, 0xFF08, 0xFF08, 0xFF08, + 0xFF08, 0xFF08, 0x80D5, 0x80D5, 0x80D5, 0x80D5, + 0x80D5, 0x80D5, 0x80D5, 0x80D5, 0xFF0A, 0xFF82, + 0xFFA0, 0xFF28, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFF82, 0xFFA0, 0xFF28, 0xFF0A, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xF83F, 0xFA1F, 0xFA97, 0xFAB5, + 0xF2BD, 0xF0BF, 0xFFFF, 0xFFFF, 0xF017, 0xF815, + 0xF215, 0xF095, 0xF035, 0xF01D, 0xFFFF, 0xFFFF, + 0xFF08, 0xFF02, 0xFF80, 0xFF20, 0xFF08, 0xFF02, + 0xFF80, 0xFF20, 0xF01F, 0xF817, 0xFA15, 0xF295, + 0xF0B5, 0xF03D, 0xFFFF, 0xFFFF, 0xF82A, 0xFA0A, + 0xFA82, 0xFAA0, 0xF2A8, 0xF0AA, 0xFFFF, 0xFFFF, + 0xF002, 0xF800, 0xF200, 0xF080, 0xF020, 0xF008, + 0xFFFF, 0xFFFF, 0xF00A, 0xF802, 0xFA00, 0xF280, + 0xF0A0, 0xF028, 0xFFFF, 0xFFFF, +}; + +static const u32 b43_ntab_pilotlt[] = { + 0x76540123, 0x62407351, 0x76543201, 0x76540213, + 0x76540123, 0x76430521, +}; + +static const u32 b43_ntab_tdi20a0[] = { + 0x00091226, 0x000A1429, 0x000B56AD, 0x000C58B0, + 0x000D5AB3, 0x000E9CB6, 0x000F9EBA, 0x0000C13D, + 0x00020301, 0x00030504, 0x00040708, 0x0005090B, + 0x00064B8E, 0x00095291, 0x000A5494, 0x000B9718, + 0x000C9927, 0x000D9B2A, 0x000EDD2E, 0x000FDF31, + 0x000101B4, 0x000243B7, 0x000345BB, 0x000447BE, + 0x00058982, 0x00068C05, 0x00099309, 0x000A950C, + 0x000BD78F, 0x000CD992, 0x000DDB96, 0x000F1D99, + 0x00005FA8, 0x0001422C, 0x0002842F, 0x00038632, + 0x00048835, 0x0005CA38, 0x0006CCBC, 0x0009D3BF, + 0x000B1603, 0x000C1806, 0x000D1A0A, 0x000E1C0D, + 0x000F5E10, 0x00008093, 0x00018297, 0x0002C49A, + 0x0003C680, 0x0004C880, 0x00060B00, 0x00070D00, + 0x00000000, 0x00000000, 0x00000000, +}; + +static const u32 b43_ntab_tdi20a1[] = { + 0x00014B26, 0x00028D29, 0x000393AD, 0x00049630, + 0x0005D833, 0x0006DA36, 0x00099C3A, 0x000A9E3D, + 0x000BC081, 0x000CC284, 0x000DC488, 0x000F068B, + 0x0000488E, 0x00018B91, 0x0002D214, 0x0003D418, + 0x0004D6A7, 0x000618AA, 0x00071AAE, 0x0009DCB1, + 0x000B1EB4, 0x000C0137, 0x000D033B, 0x000E053E, + 0x000F4702, 0x00008905, 0x00020C09, 0x0003128C, + 0x0004148F, 0x00051712, 0x00065916, 0x00091B19, + 0x000A1D28, 0x000B5F2C, 0x000C41AF, 0x000D43B2, + 0x000E85B5, 0x000F87B8, 0x0000C9BC, 0x00024CBF, + 0x00035303, 0x00045506, 0x0005978A, 0x0006998D, + 0x00095B90, 0x000A5D93, 0x000B9F97, 0x000C821A, + 0x000D8400, 0x000EC600, 0x000FC800, 0x00010A00, + 0x00000000, 0x00000000, 0x00000000, +}; + +static const u32 b43_ntab_tdi40a0[] = { + 0x0011A346, 0x00136CCF, 0x0014F5D9, 0x001641E2, + 0x0017CB6B, 0x00195475, 0x001B2383, 0x001CAD0C, + 0x001E7616, 0x0000821F, 0x00020BA8, 0x0003D4B2, + 0x00056447, 0x00072DD0, 0x0008B6DA, 0x000A02E3, + 0x000B8C6C, 0x000D15F6, 0x0011E484, 0x0013AE0D, + 0x00153717, 0x00168320, 0x00180CA9, 0x00199633, + 0x001B6548, 0x001CEED1, 0x001EB7DB, 0x0000C3E4, + 0x00024D6D, 0x000416F7, 0x0005A585, 0x00076F0F, + 0x0008F818, 0x000A4421, 0x000BCDAB, 0x000D9734, + 0x00122649, 0x0013EFD2, 0x001578DC, 0x0016C4E5, + 0x00184E6E, 0x001A17F8, 0x001BA686, 0x001D3010, + 0x001EF999, 0x00010522, 0x00028EAC, 0x00045835, + 0x0005E74A, 0x0007B0D3, 0x00093A5D, 0x000A85E6, + 0x000C0F6F, 0x000DD8F9, 0x00126787, 0x00143111, + 0x0015BA9A, 0x00170623, 0x00188FAD, 0x001A5936, + 0x001BE84B, 0x001DB1D4, 0x001F3B5E, 0x000146E7, + 0x00031070, 0x000499FA, 0x00062888, 0x0007F212, + 0x00097B9B, 0x000AC7A4, 0x000C50AE, 0x000E1A37, + 0x0012A94C, 0x001472D5, 0x0015FC5F, 0x00174868, + 0x0018D171, 0x001A9AFB, 0x001C2989, 0x001DF313, + 0x001F7C9C, 0x000188A5, 0x000351AF, 0x0004DB38, + 0x0006AA4D, 0x000833D7, 0x0009BD60, 0x000B0969, + 0x000C9273, 0x000E5BFC, 0x00132A8A, 0x0014B414, + 0x00163D9D, 0x001789A6, 0x001912B0, 0x001ADC39, + 0x001C6BCE, 0x001E34D8, 0x001FBE61, 0x0001CA6A, + 0x00039374, 0x00051CFD, 0x0006EC0B, 0x00087515, + 0x0009FE9E, 0x000B4AA7, 0x000CD3B1, 0x000E9D3A, + 0x00000000, 0x00000000, +}; + +static const u32 b43_ntab_tdi40a1[] = { + 0x001EDB36, 0x000129CA, 0x0002B353, 0x00047CDD, + 0x0005C8E6, 0x000791EF, 0x00091BF9, 0x000AAA07, + 0x000C3391, 0x000DFD1A, 0x00120923, 0x0013D22D, + 0x00155C37, 0x0016EACB, 0x00187454, 0x001A3DDE, + 0x001B89E7, 0x001D12F0, 0x001F1CFA, 0x00016B88, + 0x00033492, 0x0004BE1B, 0x00060A24, 0x0007D32E, + 0x00095D38, 0x000AEC4C, 0x000C7555, 0x000E3EDF, + 0x00124AE8, 0x001413F1, 0x0015A37B, 0x00172C89, + 0x0018B593, 0x001A419C, 0x001BCB25, 0x001D942F, + 0x001F63B9, 0x0001AD4D, 0x00037657, 0x0004C260, + 0x00068BE9, 0x000814F3, 0x0009A47C, 0x000B2D8A, + 0x000CB694, 0x000E429D, 0x00128C26, 0x001455B0, + 0x0015E4BA, 0x00176E4E, 0x0018F758, 0x001A8361, + 0x001C0CEA, 0x001DD674, 0x001FA57D, 0x0001EE8B, + 0x0003B795, 0x0005039E, 0x0006CD27, 0x000856B1, + 0x0009E5C6, 0x000B6F4F, 0x000CF859, 0x000E8462, + 0x00130DEB, 0x00149775, 0x00162603, 0x0017AF8C, + 0x00193896, 0x001AC49F, 0x001C4E28, 0x001E17B2, + 0x0000A6C7, 0x00023050, 0x0003F9DA, 0x00054563, + 0x00070EEC, 0x00089876, 0x000A2704, 0x000BB08D, + 0x000D3A17, 0x001185A0, 0x00134F29, 0x0014D8B3, + 0x001667C8, 0x0017F151, 0x00197ADB, 0x001B0664, + 0x001C8FED, 0x001E5977, 0x0000E805, 0x0002718F, + 0x00043B18, 0x000586A1, 0x0007502B, 0x0008D9B4, + 0x000A68C9, 0x000BF252, 0x000DBBDC, 0x0011C7E5, + 0x001390EE, 0x00151A78, 0x0016A906, 0x00183290, + 0x0019BC19, 0x001B4822, 0x001CD12C, 0x001E9AB5, + 0x00000000, 0x00000000, +}; + +static const u32 b43_ntab_tdtrn[] = { + 0x061C061C, 0x0050EE68, 0xF592FE36, 0xFE5212F6, + 0x00000C38, 0xFE5212F6, 0xF592FE36, 0x0050EE68, + 0x061C061C, 0xEE680050, 0xFE36F592, 0x12F6FE52, + 0x0C380000, 0x12F6FE52, 0xFE36F592, 0xEE680050, + 0x061C061C, 0x0050EE68, 0xF592FE36, 0xFE5212F6, + 0x00000C38, 0xFE5212F6, 0xF592FE36, 0x0050EE68, + 0x061C061C, 0xEE680050, 0xFE36F592, 0x12F6FE52, + 0x0C380000, 0x12F6FE52, 0xFE36F592, 0xEE680050, + 0x05E305E3, 0x004DEF0C, 0xF5F3FE47, 0xFE611246, + 0x00000BC7, 0xFE611246, 0xF5F3FE47, 0x004DEF0C, + 0x05E305E3, 0xEF0C004D, 0xFE47F5F3, 0x1246FE61, + 0x0BC70000, 0x1246FE61, 0xFE47F5F3, 0xEF0C004D, + 0x05E305E3, 0x004DEF0C, 0xF5F3FE47, 0xFE611246, + 0x00000BC7, 0xFE611246, 0xF5F3FE47, 0x004DEF0C, + 0x05E305E3, 0xEF0C004D, 0xFE47F5F3, 0x1246FE61, + 0x0BC70000, 0x1246FE61, 0xFE47F5F3, 0xEF0C004D, + 0xFA58FA58, 0xF895043B, 0xFF4C09C0, 0xFBC6FFA8, + 0xFB84F384, 0x0798F6F9, 0x05760122, 0x058409F6, + 0x0B500000, 0x05B7F542, 0x08860432, 0x06DDFEE7, + 0xFB84F384, 0xF9D90664, 0xF7E8025C, 0x00FFF7BD, + 0x05A805A8, 0xF7BD00FF, 0x025CF7E8, 0x0664F9D9, + 0xF384FB84, 0xFEE706DD, 0x04320886, 0xF54205B7, + 0x00000B50, 0x09F60584, 0x01220576, 0xF6F90798, + 0xF384FB84, 0xFFA8FBC6, 0x09C0FF4C, 0x043BF895, + 0x02D402D4, 0x07DE0270, 0xFC96079C, 0xF90AFE94, + 0xFE00FF2C, 0x02D4065D, 0x092A0096, 0x0014FBB8, + 0xFD2CFD2C, 0x076AFB3C, 0x0096F752, 0xF991FD87, + 0xFB2C0200, 0xFEB8F960, 0x08E0FC96, 0x049802A8, + 0xFD2CFD2C, 0x02A80498, 0xFC9608E0, 0xF960FEB8, + 0x0200FB2C, 0xFD87F991, 0xF7520096, 0xFB3C076A, + 0xFD2CFD2C, 0xFBB80014, 0x0096092A, 0x065D02D4, + 0xFF2CFE00, 0xFE94F90A, 0x079CFC96, 0x027007DE, + 0x02D402D4, 0x027007DE, 0x079CFC96, 0xFE94F90A, + 0xFF2CFE00, 0x065D02D4, 0x0096092A, 0xFBB80014, + 0xFD2CFD2C, 0xFB3C076A, 0xF7520096, 0xFD87F991, + 0x0200FB2C, 0xF960FEB8, 0xFC9608E0, 0x02A80498, + 0xFD2CFD2C, 0x049802A8, 0x08E0FC96, 0xFEB8F960, + 0xFB2C0200, 0xF991FD87, 0x0096F752, 0x076AFB3C, + 0xFD2CFD2C, 0x0014FBB8, 0x092A0096, 0x02D4065D, + 0xFE00FF2C, 0xF90AFE94, 0xFC96079C, 0x07DE0270, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x062A0000, 0xFEFA0759, 0x08B80908, 0xF396FC2D, + 0xF9D6045C, 0xFC4EF608, 0xF748F596, 0x07B207BF, + 0x062A062A, 0xF84EF841, 0xF748F596, 0x03B209F8, + 0xF9D6045C, 0x0C6A03D3, 0x08B80908, 0x0106F8A7, + 0x062A0000, 0xFEFAF8A7, 0x08B8F6F8, 0xF39603D3, + 0xF9D6FBA4, 0xFC4E09F8, 0xF7480A6A, 0x07B2F841, + 0x062AF9D6, 0xF84E07BF, 0xF7480A6A, 0x03B2F608, + 0xF9D6FBA4, 0x0C6AFC2D, 0x08B8F6F8, 0x01060759, + 0x062A0000, 0xFEFA0759, 0x08B80908, 0xF396FC2D, + 0xF9D6045C, 0xFC4EF608, 0xF748F596, 0x07B207BF, + 0x062A062A, 0xF84EF841, 0xF748F596, 0x03B209F8, + 0xF9D6045C, 0x0C6A03D3, 0x08B80908, 0x0106F8A7, + 0x062A0000, 0xFEFAF8A7, 0x08B8F6F8, 0xF39603D3, + 0xF9D6FBA4, 0xFC4E09F8, 0xF7480A6A, 0x07B2F841, + 0x062AF9D6, 0xF84E07BF, 0xF7480A6A, 0x03B2F608, + 0xF9D6FBA4, 0x0C6AFC2D, 0x08B8F6F8, 0x01060759, + 0x061C061C, 0xFF30009D, 0xFFB21141, 0xFD87FB54, + 0xF65DFE59, 0x02EEF99E, 0x0166F03C, 0xFFF809B6, + 0x000008A4, 0x000AF42B, 0x00EFF577, 0xFA840BF2, + 0xFC02FF51, 0x08260F67, 0xFFF0036F, 0x0842F9C3, + 0x00000000, 0x063DF7BE, 0xFC910010, 0xF099F7DA, + 0x00AF03FE, 0xF40E057C, 0x0A89FF11, 0x0BD5FFF6, + 0xF75C0000, 0xF64A0008, 0x0FC4FE9A, 0x0662FD12, + 0x01A709A3, 0x04AC0279, 0xEEBF004E, 0xFF6300D0, + 0xF9E4F9E4, 0x00D0FF63, 0x004EEEBF, 0x027904AC, + 0x09A301A7, 0xFD120662, 0xFE9A0FC4, 0x0008F64A, + 0x0000F75C, 0xFFF60BD5, 0xFF110A89, 0x057CF40E, + 0x03FE00AF, 0xF7DAF099, 0x0010FC91, 0xF7BE063D, + 0x00000000, 0xF9C30842, 0x036FFFF0, 0x0F670826, + 0xFF51FC02, 0x0BF2FA84, 0xF57700EF, 0xF42B000A, + 0x08A40000, 0x09B6FFF8, 0xF03C0166, 0xF99E02EE, + 0xFE59F65D, 0xFB54FD87, 0x1141FFB2, 0x009DFF30, + 0x05E30000, 0xFF060705, 0x085408A0, 0xF425FC59, + 0xFA1D042A, 0xFC78F67A, 0xF7ACF60E, 0x075A0766, + 0x05E305E3, 0xF8A6F89A, 0xF7ACF60E, 0x03880986, + 0xFA1D042A, 0x0BDB03A7, 0x085408A0, 0x00FAF8FB, + 0x05E30000, 0xFF06F8FB, 0x0854F760, 0xF42503A7, + 0xFA1DFBD6, 0xFC780986, 0xF7AC09F2, 0x075AF89A, + 0x05E3FA1D, 0xF8A60766, 0xF7AC09F2, 0x0388F67A, + 0xFA1DFBD6, 0x0BDBFC59, 0x0854F760, 0x00FA0705, + 0x05E30000, 0xFF060705, 0x085408A0, 0xF425FC59, + 0xFA1D042A, 0xFC78F67A, 0xF7ACF60E, 0x075A0766, + 0x05E305E3, 0xF8A6F89A, 0xF7ACF60E, 0x03880986, + 0xFA1D042A, 0x0BDB03A7, 0x085408A0, 0x00FAF8FB, + 0x05E30000, 0xFF06F8FB, 0x0854F760, 0xF42503A7, + 0xFA1DFBD6, 0xFC780986, 0xF7AC09F2, 0x075AF89A, + 0x05E3FA1D, 0xF8A60766, 0xF7AC09F2, 0x0388F67A, + 0xFA1DFBD6, 0x0BDBFC59, 0x0854F760, 0x00FA0705, + 0xFA58FA58, 0xF8F0FE00, 0x0448073D, 0xFDC9FE46, + 0xF9910258, 0x089D0407, 0xFD5CF71A, 0x02AFFDE0, + 0x083E0496, 0xFF5A0740, 0xFF7AFD97, 0x00FE01F1, + 0x0009082E, 0xFA94FF75, 0xFECDF8EA, 0xFFB0F693, + 0xFD2CFA58, 0x0433FF16, 0xFBA405DD, 0xFA610341, + 0x06A606CB, 0x0039FD2D, 0x0677FA97, 0x01FA05E0, + 0xF896003E, 0x075A068B, 0x012CFC3E, 0xFA23F98D, + 0xFC7CFD43, 0xFF90FC0D, 0x01C10982, 0x00C601D6, + 0xFD2CFD2C, 0x01D600C6, 0x098201C1, 0xFC0DFF90, + 0xFD43FC7C, 0xF98DFA23, 0xFC3E012C, 0x068B075A, + 0x003EF896, 0x05E001FA, 0xFA970677, 0xFD2D0039, + 0x06CB06A6, 0x0341FA61, 0x05DDFBA4, 0xFF160433, + 0xFA58FD2C, 0xF693FFB0, 0xF8EAFECD, 0xFF75FA94, + 0x082E0009, 0x01F100FE, 0xFD97FF7A, 0x0740FF5A, + 0x0496083E, 0xFDE002AF, 0xF71AFD5C, 0x0407089D, + 0x0258F991, 0xFE46FDC9, 0x073D0448, 0xFE00F8F0, + 0xFD2CFD2C, 0xFCE00500, 0xFC09FDDC, 0xFE680157, + 0x04C70571, 0xFC3AFF21, 0xFCD70228, 0x056D0277, + 0x0200FE00, 0x0022F927, 0xFE3C032B, 0xFC44FF3C, + 0x03E9FBDB, 0x04570313, 0x04C9FF5C, 0x000D03B8, + 0xFA580000, 0xFBE900D2, 0xF9D0FE0B, 0x0125FDF9, + 0x042501BF, 0x0328FA2B, 0xFFA902F0, 0xFA250157, + 0x0200FE00, 0x03740438, 0xFF0405FD, 0x030CFE52, + 0x0037FB39, 0xFF6904C5, 0x04F8FD23, 0xFD31FC1B, + 0xFD2CFD2C, 0xFC1BFD31, 0xFD2304F8, 0x04C5FF69, + 0xFB390037, 0xFE52030C, 0x05FDFF04, 0x04380374, + 0xFE000200, 0x0157FA25, 0x02F0FFA9, 0xFA2B0328, + 0x01BF0425, 0xFDF90125, 0xFE0BF9D0, 0x00D2FBE9, + 0x0000FA58, 0x03B8000D, 0xFF5C04C9, 0x03130457, + 0xFBDB03E9, 0xFF3CFC44, 0x032BFE3C, 0xF9270022, + 0xFE000200, 0x0277056D, 0x0228FCD7, 0xFF21FC3A, + 0x057104C7, 0x0157FE68, 0xFDDCFC09, 0x0500FCE0, + 0xFD2CFD2C, 0x0500FCE0, 0xFDDCFC09, 0x0157FE68, + 0x057104C7, 0xFF21FC3A, 0x0228FCD7, 0x0277056D, + 0xFE000200, 0xF9270022, 0x032BFE3C, 0xFF3CFC44, + 0xFBDB03E9, 0x03130457, 0xFF5C04C9, 0x03B8000D, + 0x0000FA58, 0x00D2FBE9, 0xFE0BF9D0, 0xFDF90125, + 0x01BF0425, 0xFA2B0328, 0x02F0FFA9, 0x0157FA25, + 0xFE000200, 0x04380374, 0x05FDFF04, 0xFE52030C, + 0xFB390037, 0x04C5FF69, 0xFD2304F8, 0xFC1BFD31, + 0xFD2CFD2C, 0xFD31FC1B, 0x04F8FD23, 0xFF6904C5, + 0x0037FB39, 0x030CFE52, 0xFF0405FD, 0x03740438, + 0x0200FE00, 0xFA250157, 0xFFA902F0, 0x0328FA2B, + 0x042501BF, 0x0125FDF9, 0xF9D0FE0B, 0xFBE900D2, + 0xFA580000, 0x000D03B8, 0x04C9FF5C, 0x04570313, + 0x03E9FBDB, 0xFC44FF3C, 0xFE3C032B, 0x0022F927, + 0x0200FE00, 0x056D0277, 0xFCD70228, 0xFC3AFF21, + 0x04C70571, 0xFE680157, 0xFC09FDDC, 0xFCE00500, + 0x05A80000, 0xFF1006BE, 0x0800084A, 0xF49CFC7E, + 0xFA580400, 0xFC9CF6DA, 0xF800F672, 0x0710071C, + 0x05A805A8, 0xF8F0F8E4, 0xF800F672, 0x03640926, + 0xFA580400, 0x0B640382, 0x0800084A, 0x00F0F942, + 0x05A80000, 0xFF10F942, 0x0800F7B6, 0xF49C0382, + 0xFA58FC00, 0xFC9C0926, 0xF800098E, 0x0710F8E4, + 0x05A8FA58, 0xF8F0071C, 0xF800098E, 0x0364F6DA, + 0xFA58FC00, 0x0B64FC7E, 0x0800F7B6, 0x00F006BE, + 0x05A80000, 0xFF1006BE, 0x0800084A, 0xF49CFC7E, + 0xFA580400, 0xFC9CF6DA, 0xF800F672, 0x0710071C, + 0x05A805A8, 0xF8F0F8E4, 0xF800F672, 0x03640926, + 0xFA580400, 0x0B640382, 0x0800084A, 0x00F0F942, + 0x05A80000, 0xFF10F942, 0x0800F7B6, 0xF49C0382, + 0xFA58FC00, 0xFC9C0926, 0xF800098E, 0x0710F8E4, + 0x05A8FA58, 0xF8F0071C, 0xF800098E, 0x0364F6DA, + 0xFA58FC00, 0x0B64FC7E, 0x0800F7B6, 0x00F006BE, +}; + +static const u32 b43_ntab_tmap[] = { + 0x8A88AA80, 0x8AAAAA8A, 0x8A8A8AA8, 0x00000888, + 0x88000000, 0x8A8A88AA, 0x8AA88888, 0x8888A8A8, + 0xF1111110, 0x11111111, 0x11F11111, 0x00000111, + 0x11000000, 0x1111F111, 0x11111111, 0x111111F1, + 0x8A88AA80, 0x8AAAAA8A, 0x8A8A8AA8, 0x000AA888, + 0x88880000, 0x8A8A88AA, 0x8AA88888, 0x8888A8A8, + 0xA1111110, 0x11111111, 0x11C11111, 0x00000111, + 0x11000000, 0x1111A111, 0x11111111, 0x111111A1, + 0xA2222220, 0x22222222, 0x22C22222, 0x00000222, + 0x22000000, 0x2222A222, 0x22222222, 0x222222A2, + 0xF1111110, 0x11111111, 0x11F11111, 0x00011111, + 0x11110000, 0x1111F111, 0x11111111, 0x111111F1, + 0xA8AA88A0, 0xA88888A8, 0xA8A8A88A, 0x00088AAA, + 0xAAAA0000, 0xA8A8AA88, 0xA88AAAAA, 0xAAAA8A8A, + 0xAAA8AAA0, 0x8AAA8AAA, 0xAA8A8A8A, 0x000AAA88, + 0x8AAA0000, 0xAAA8A888, 0x8AA88A8A, 0x8A88A888, + 0x08080A00, 0x0A08080A, 0x080A0A08, 0x00080808, + 0x080A0000, 0x080A0808, 0x080A0808, 0x0A0A0A08, + 0xA0A0A0A0, 0x80A0A080, 0x8080A0A0, 0x00008080, + 0x80A00000, 0x80A080A0, 0xA080A0A0, 0x8080A0A0, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x99999000, 0x9B9B99BB, 0x9BB99999, 0x9999B9B9, + 0x9B99BB90, 0x9BBBBB9B, 0x9B9B9BB9, 0x00000999, + 0x88000000, 0x8A8A88AA, 0x8AA88888, 0x8888A8A8, + 0x8A88AA80, 0x8AAAAA8A, 0x8A8A8AA8, 0x00AAA888, + 0x22000000, 0x2222B222, 0x22222222, 0x222222B2, + 0xB2222220, 0x22222222, 0x22D22222, 0x00000222, + 0x11000000, 0x1111A111, 0x11111111, 0x111111A1, + 0xA1111110, 0x11111111, 0x11C11111, 0x00000111, + 0x33000000, 0x3333B333, 0x33333333, 0x333333B3, + 0xB3333330, 0x33333333, 0x33D33333, 0x00000333, + 0x22000000, 0x2222A222, 0x22222222, 0x222222A2, + 0xA2222220, 0x22222222, 0x22C22222, 0x00000222, + 0x99B99B00, 0x9B9B99BB, 0x9BB99999, 0x9999B9B9, + 0x9B99BB99, 0x9BBBBB9B, 0x9B9B9BB9, 0x00000999, + 0x88000000, 0x8A8A88AA, 0x8AA88888, 0x8888A8A8, + 0x8A88AA88, 0x8AAAAA8A, 0x8A8A8AA8, 0x08AAA888, + 0x22222200, 0x2222F222, 0x22222222, 0x222222F2, + 0x22222222, 0x22222222, 0x22F22222, 0x00000222, + 0x11000000, 0x1111F111, 0x11111111, 0x11111111, + 0xF1111111, 0x11111111, 0x11F11111, 0x01111111, + 0xBB9BB900, 0xB9B9BB99, 0xB99BBBBB, 0xBBBB9B9B, + 0xB9BB99BB, 0xB99999B9, 0xB9B9B99B, 0x00000BBB, + 0xAA000000, 0xA8A8AA88, 0xA88AAAAA, 0xAAAA8A8A, + 0xA8AA88AA, 0xA88888A8, 0xA8A8A88A, 0x0A888AAA, + 0xAA000000, 0xA8A8AA88, 0xA88AAAAA, 0xAAAA8A8A, + 0xA8AA88A0, 0xA88888A8, 0xA8A8A88A, 0x00000AAA, + 0x88000000, 0x8A8A88AA, 0x8AA88888, 0x8888A8A8, + 0x8A88AA80, 0x8AAAAA8A, 0x8A8A8AA8, 0x00000888, + 0xBBBBBB00, 0x999BBBBB, 0x9BB99B9B, 0xB9B9B9BB, + 0xB9B99BBB, 0xB9B9B9BB, 0xB9BB9B99, 0x00000999, + 0x8A000000, 0xAA88A888, 0xA88888AA, 0xA88A8A88, + 0xA88AA88A, 0x88A8AAAA, 0xA8AA8AAA, 0x0888A88A, + 0x0B0B0B00, 0x090B0B0B, 0x0B090B0B, 0x0909090B, + 0x09090B0B, 0x09090B0B, 0x09090B09, 0x00000909, + 0x0A000000, 0x0A080808, 0x080A080A, 0x080A0A08, + 0x080A080A, 0x0808080A, 0x0A0A0A08, 0x0808080A, + 0xB0B0B000, 0x9090B0B0, 0x90B09090, 0xB0B0B090, + 0xB0B090B0, 0x90B0B0B0, 0xB0B09090, 0x00000090, + 0x80000000, 0xA080A080, 0xA08080A0, 0xA0808080, + 0xA080A080, 0x80A0A0A0, 0xA0A080A0, 0x00A0A0A0, + 0x22000000, 0x2222F222, 0x22222222, 0x222222F2, + 0xF2222220, 0x22222222, 0x22F22222, 0x00000222, + 0x11000000, 0x1111F111, 0x11111111, 0x111111F1, + 0xF1111110, 0x11111111, 0x11F11111, 0x00000111, + 0x33000000, 0x3333F333, 0x33333333, 0x333333F3, + 0xF3333330, 0x33333333, 0x33F33333, 0x00000333, + 0x22000000, 0x2222F222, 0x22222222, 0x222222F2, + 0xF2222220, 0x22222222, 0x22F22222, 0x00000222, + 0x99000000, 0x9B9B99BB, 0x9BB99999, 0x9999B9B9, + 0x9B99BB90, 0x9BBBBB9B, 0x9B9B9BB9, 0x00000999, + 0x88000000, 0x8A8A88AA, 0x8AA88888, 0x8888A8A8, + 0x8A88AA80, 0x8AAAAA8A, 0x8A8A8AA8, 0x00000888, + 0x88888000, 0x8A8A88AA, 0x8AA88888, 0x8888A8A8, + 0x8A88AA80, 0x8AAAAA8A, 0x8A8A8AA8, 0x00000888, + 0x88000000, 0x8A8A88AA, 0x8AA88888, 0x8888A8A8, + 0x8A88AA80, 0x8AAAAA8A, 0x8A8A8AA8, 0x00AAA888, + 0x88A88A00, 0x8A8A88AA, 0x8AA88888, 0x8888A8A8, + 0x8A88AA88, 0x8AAAAA8A, 0x8A8A8AA8, 0x00000888, + 0x88000000, 0x8A8A88AA, 0x8AA88888, 0x8888A8A8, + 0x8A88AA88, 0x8AAAAA8A, 0x8A8A8AA8, 0x08AAA888, + 0x11000000, 0x1111A111, 0x11111111, 0x111111A1, + 0xA1111110, 0x11111111, 0x11C11111, 0x00000111, + 0x11000000, 0x1111A111, 0x11111111, 0x111111A1, + 0xA1111110, 0x11111111, 0x11C11111, 0x00000111, + 0x88000000, 0x8A8A88AA, 0x8AA88888, 0x8888A8A8, + 0x8A88AA80, 0x8AAAAA8A, 0x8A8A8AA8, 0x00000888, + 0x88000000, 0x8A8A88AA, 0x8AA88888, 0x8888A8A8, + 0x8A88AA80, 0x8AAAAA8A, 0x8A8A8AA8, 0x00000888, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +const u32 b43_ntab_tx_gain_rev0_1_2[] = { + 0x03cc2b44, 0x03cc2b42, 0x03cc2a44, 0x03cc2a42, + 0x03cc2944, 0x03c82b44, 0x03c82b42, 0x03c82a44, + 0x03c82a42, 0x03c82944, 0x03c82942, 0x03c82844, + 0x03c82842, 0x03c42b44, 0x03c42b42, 0x03c42a44, + 0x03c42a42, 0x03c42944, 0x03c42942, 0x03c42844, + 0x03c42842, 0x03c42744, 0x03c42742, 0x03c42644, + 0x03c42642, 0x03c42544, 0x03c42542, 0x03c42444, + 0x03c42442, 0x03c02b44, 0x03c02b42, 0x03c02a44, + 0x03c02a42, 0x03c02944, 0x03c02942, 0x03c02844, + 0x03c02842, 0x03c02744, 0x03c02742, 0x03b02b44, + 0x03b02b42, 0x03b02a44, 0x03b02a42, 0x03b02944, + 0x03b02942, 0x03b02844, 0x03b02842, 0x03b02744, + 0x03b02742, 0x03b02644, 0x03b02642, 0x03b02544, + 0x03b02542, 0x03a02b44, 0x03a02b42, 0x03a02a44, + 0x03a02a42, 0x03a02944, 0x03a02942, 0x03a02844, + 0x03a02842, 0x03a02744, 0x03a02742, 0x03902b44, + 0x03902b42, 0x03902a44, 0x03902a42, 0x03902944, + 0x03902942, 0x03902844, 0x03902842, 0x03902744, + 0x03902742, 0x03902644, 0x03902642, 0x03902544, + 0x03902542, 0x03802b44, 0x03802b42, 0x03802a44, + 0x03802a42, 0x03802944, 0x03802942, 0x03802844, + 0x03802842, 0x03802744, 0x03802742, 0x03802644, + 0x03802642, 0x03802544, 0x03802542, 0x03802444, + 0x03802442, 0x03802344, 0x03802342, 0x03802244, + 0x03802242, 0x03802144, 0x03802142, 0x03802044, + 0x03802042, 0x03801f44, 0x03801f42, 0x03801e44, + 0x03801e42, 0x03801d44, 0x03801d42, 0x03801c44, + 0x03801c42, 0x03801b44, 0x03801b42, 0x03801a44, + 0x03801a42, 0x03801944, 0x03801942, 0x03801844, + 0x03801842, 0x03801744, 0x03801742, 0x03801644, + 0x03801642, 0x03801544, 0x03801542, 0x03801444, + 0x03801442, 0x03801344, 0x03801342, 0x00002b00, +}; + +const u32 b43_ntab_tx_gain_rev3plus_2ghz[] = { + 0x1f410044, 0x1f410042, 0x1f410040, 0x1f41003e, + 0x1f41003c, 0x1f41003b, 0x1f410039, 0x1f410037, + 0x1e410044, 0x1e410042, 0x1e410040, 0x1e41003e, + 0x1e41003c, 0x1e41003b, 0x1e410039, 0x1e410037, + 0x1d410044, 0x1d410042, 0x1d410040, 0x1d41003e, + 0x1d41003c, 0x1d41003b, 0x1d410039, 0x1d410037, + 0x1c410044, 0x1c410042, 0x1c410040, 0x1c41003e, + 0x1c41003c, 0x1c41003b, 0x1c410039, 0x1c410037, + 0x1b410044, 0x1b410042, 0x1b410040, 0x1b41003e, + 0x1b41003c, 0x1b41003b, 0x1b410039, 0x1b410037, + 0x1a410044, 0x1a410042, 0x1a410040, 0x1a41003e, + 0x1a41003c, 0x1a41003b, 0x1a410039, 0x1a410037, + 0x19410044, 0x19410042, 0x19410040, 0x1941003e, + 0x1941003c, 0x1941003b, 0x19410039, 0x19410037, + 0x18410044, 0x18410042, 0x18410040, 0x1841003e, + 0x1841003c, 0x1841003b, 0x18410039, 0x18410037, + 0x17410044, 0x17410042, 0x17410040, 0x1741003e, + 0x1741003c, 0x1741003b, 0x17410039, 0x17410037, + 0x16410044, 0x16410042, 0x16410040, 0x1641003e, + 0x1641003c, 0x1641003b, 0x16410039, 0x16410037, + 0x15410044, 0x15410042, 0x15410040, 0x1541003e, + 0x1541003c, 0x1541003b, 0x15410039, 0x15410037, + 0x14410044, 0x14410042, 0x14410040, 0x1441003e, + 0x1441003c, 0x1441003b, 0x14410039, 0x14410037, + 0x13410044, 0x13410042, 0x13410040, 0x1341003e, + 0x1341003c, 0x1341003b, 0x13410039, 0x13410037, + 0x12410044, 0x12410042, 0x12410040, 0x1241003e, + 0x1241003c, 0x1241003b, 0x12410039, 0x12410037, + 0x11410044, 0x11410042, 0x11410040, 0x1141003e, + 0x1141003c, 0x1141003b, 0x11410039, 0x11410037, + 0x10410044, 0x10410042, 0x10410040, 0x1041003e, + 0x1041003c, 0x1041003b, 0x10410039, 0x10410037, +}; + +const u32 b43_ntab_tx_gain_rev3_5ghz[] = { + 0xcff70044, 0xcff70042, 0xcff70040, 0xcff7003e, + 0xcff7003c, 0xcff7003b, 0xcff70039, 0xcff70037, + 0xcef70044, 0xcef70042, 0xcef70040, 0xcef7003e, + 0xcef7003c, 0xcef7003b, 0xcef70039, 0xcef70037, + 0xcdf70044, 0xcdf70042, 0xcdf70040, 0xcdf7003e, + 0xcdf7003c, 0xcdf7003b, 0xcdf70039, 0xcdf70037, + 0xccf70044, 0xccf70042, 0xccf70040, 0xccf7003e, + 0xccf7003c, 0xccf7003b, 0xccf70039, 0xccf70037, + 0xcbf70044, 0xcbf70042, 0xcbf70040, 0xcbf7003e, + 0xcbf7003c, 0xcbf7003b, 0xcbf70039, 0xcbf70037, + 0xcaf70044, 0xcaf70042, 0xcaf70040, 0xcaf7003e, + 0xcaf7003c, 0xcaf7003b, 0xcaf70039, 0xcaf70037, + 0xc9f70044, 0xc9f70042, 0xc9f70040, 0xc9f7003e, + 0xc9f7003c, 0xc9f7003b, 0xc9f70039, 0xc9f70037, + 0xc8f70044, 0xc8f70042, 0xc8f70040, 0xc8f7003e, + 0xc8f7003c, 0xc8f7003b, 0xc8f70039, 0xc8f70037, + 0xc7f70044, 0xc7f70042, 0xc7f70040, 0xc7f7003e, + 0xc7f7003c, 0xc7f7003b, 0xc7f70039, 0xc7f70037, + 0xc6f70044, 0xc6f70042, 0xc6f70040, 0xc6f7003e, + 0xc6f7003c, 0xc6f7003b, 0xc6f70039, 0xc6f70037, + 0xc5f70044, 0xc5f70042, 0xc5f70040, 0xc5f7003e, + 0xc5f7003c, 0xc5f7003b, 0xc5f70039, 0xc5f70037, + 0xc4f70044, 0xc4f70042, 0xc4f70040, 0xc4f7003e, + 0xc4f7003c, 0xc4f7003b, 0xc4f70039, 0xc4f70037, + 0xc3f70044, 0xc3f70042, 0xc3f70040, 0xc3f7003e, + 0xc3f7003c, 0xc3f7003b, 0xc3f70039, 0xc3f70037, + 0xc2f70044, 0xc2f70042, 0xc2f70040, 0xc2f7003e, + 0xc2f7003c, 0xc2f7003b, 0xc2f70039, 0xc2f70037, + 0xc1f70044, 0xc1f70042, 0xc1f70040, 0xc1f7003e, + 0xc1f7003c, 0xc1f7003b, 0xc1f70039, 0xc1f70037, + 0xc0f70044, 0xc0f70042, 0xc0f70040, 0xc0f7003e, + 0xc0f7003c, 0xc0f7003b, 0xc0f70039, 0xc0f70037, +}; + +const u32 b43_ntab_tx_gain_rev4_5ghz[] = { + 0x2ff20044, 0x2ff20042, 0x2ff20040, 0x2ff2003e, + 0x2ff2003c, 0x2ff2003b, 0x2ff20039, 0x2ff20037, + 0x2ef20044, 0x2ef20042, 0x2ef20040, 0x2ef2003e, + 0x2ef2003c, 0x2ef2003b, 0x2ef20039, 0x2ef20037, + 0x2df20044, 0x2df20042, 0x2df20040, 0x2df2003e, + 0x2df2003c, 0x2df2003b, 0x2df20039, 0x2df20037, + 0x2cf20044, 0x2cf20042, 0x2cf20040, 0x2cf2003e, + 0x2cf2003c, 0x2cf2003b, 0x2cf20039, 0x2cf20037, + 0x2bf20044, 0x2bf20042, 0x2bf20040, 0x2bf2003e, + 0x2bf2003c, 0x2bf2003b, 0x2bf20039, 0x2bf20037, + 0x2af20044, 0x2af20042, 0x2af20040, 0x2af2003e, + 0x2af2003c, 0x2af2003b, 0x2af20039, 0x2af20037, + 0x29f20044, 0x29f20042, 0x29f20040, 0x29f2003e, + 0x29f2003c, 0x29f2003b, 0x29f20039, 0x29f20037, + 0x28f20044, 0x28f20042, 0x28f20040, 0x28f2003e, + 0x28f2003c, 0x28f2003b, 0x28f20039, 0x28f20037, + 0x27f20044, 0x27f20042, 0x27f20040, 0x27f2003e, + 0x27f2003c, 0x27f2003b, 0x27f20039, 0x27f20037, + 0x26f20044, 0x26f20042, 0x26f20040, 0x26f2003e, + 0x26f2003c, 0x26f2003b, 0x26f20039, 0x26f20037, + 0x25f20044, 0x25f20042, 0x25f20040, 0x25f2003e, + 0x25f2003c, 0x25f2003b, 0x25f20039, 0x25f20037, + 0x24f20044, 0x24f20042, 0x24f20040, 0x24f2003e, + 0x24f2003c, 0x24f2003b, 0x24f20039, 0x24f20038, + 0x23f20041, 0x23f20040, 0x23f2003f, 0x23f2003e, + 0x23f2003c, 0x23f2003b, 0x23f20039, 0x23f20037, + 0x22f20044, 0x22f20042, 0x22f20040, 0x22f2003e, + 0x22f2003c, 0x22f2003b, 0x22f20039, 0x22f20037, + 0x21f20044, 0x21f20042, 0x21f20040, 0x21f2003e, + 0x21f2003c, 0x21f2003b, 0x21f20039, 0x21f20037, + 0x20d20043, 0x20d20041, 0x20d2003e, 0x20d2003c, + 0x20d2003a, 0x20d20038, 0x20d20036, 0x20d20034, +}; + +const u32 b43_ntab_tx_gain_rev5plus_5ghz[] = { + 0x0f62004a, 0x0f620048, 0x0f620046, 0x0f620044, + 0x0f620042, 0x0f620040, 0x0f62003e, 0x0f62003c, + 0x0e620044, 0x0e620042, 0x0e620040, 0x0e62003e, + 0x0e62003c, 0x0e62003d, 0x0e62003b, 0x0e62003a, + 0x0d620043, 0x0d620041, 0x0d620040, 0x0d62003e, + 0x0d62003d, 0x0d62003c, 0x0d62003b, 0x0d62003a, + 0x0c620041, 0x0c620040, 0x0c62003f, 0x0c62003e, + 0x0c62003c, 0x0c62003b, 0x0c620039, 0x0c620037, + 0x0b620046, 0x0b620044, 0x0b620042, 0x0b620040, + 0x0b62003e, 0x0b62003c, 0x0b62003b, 0x0b62003a, + 0x0a620041, 0x0a620040, 0x0a62003e, 0x0a62003c, + 0x0a62003b, 0x0a62003a, 0x0a620039, 0x0a620038, + 0x0962003e, 0x0962003d, 0x0962003c, 0x0962003b, + 0x09620039, 0x09620037, 0x09620035, 0x09620033, + 0x08620044, 0x08620042, 0x08620040, 0x0862003e, + 0x0862003c, 0x0862003b, 0x0862003a, 0x08620039, + 0x07620043, 0x07620042, 0x07620040, 0x0762003f, + 0x0762003d, 0x0762003b, 0x0762003a, 0x07620039, + 0x0662003e, 0x0662003d, 0x0662003c, 0x0662003b, + 0x06620039, 0x06620037, 0x06620035, 0x06620033, + 0x05620046, 0x05620044, 0x05620042, 0x05620040, + 0x0562003e, 0x0562003c, 0x0562003b, 0x05620039, + 0x04620044, 0x04620042, 0x04620040, 0x0462003e, + 0x0462003c, 0x0462003b, 0x04620039, 0x04620038, + 0x0362003c, 0x0362003b, 0x0362003a, 0x03620039, + 0x03620038, 0x03620037, 0x03620035, 0x03620033, + 0x0262004c, 0x0262004a, 0x02620048, 0x02620047, + 0x02620046, 0x02620044, 0x02620043, 0x02620042, + 0x0162004a, 0x01620048, 0x01620046, 0x01620044, + 0x01620043, 0x01620042, 0x01620041, 0x01620040, + 0x00620042, 0x00620040, 0x0062003e, 0x0062003c, + 0x0062003b, 0x00620039, 0x00620037, 0x00620035, +}; + +const u32 txpwrctrl_tx_gain_ipa[] = { + 0x5ff7002d, 0x5ff7002b, 0x5ff7002a, 0x5ff70029, + 0x5ff70028, 0x5ff70027, 0x5ff70026, 0x5ff70025, + 0x5ef7002d, 0x5ef7002b, 0x5ef7002a, 0x5ef70029, + 0x5ef70028, 0x5ef70027, 0x5ef70026, 0x5ef70025, + 0x5df7002d, 0x5df7002b, 0x5df7002a, 0x5df70029, + 0x5df70028, 0x5df70027, 0x5df70026, 0x5df70025, + 0x5cf7002d, 0x5cf7002b, 0x5cf7002a, 0x5cf70029, + 0x5cf70028, 0x5cf70027, 0x5cf70026, 0x5cf70025, + 0x5bf7002d, 0x5bf7002b, 0x5bf7002a, 0x5bf70029, + 0x5bf70028, 0x5bf70027, 0x5bf70026, 0x5bf70025, + 0x5af7002d, 0x5af7002b, 0x5af7002a, 0x5af70029, + 0x5af70028, 0x5af70027, 0x5af70026, 0x5af70025, + 0x59f7002d, 0x59f7002b, 0x59f7002a, 0x59f70029, + 0x59f70028, 0x59f70027, 0x59f70026, 0x59f70025, + 0x58f7002d, 0x58f7002b, 0x58f7002a, 0x58f70029, + 0x58f70028, 0x58f70027, 0x58f70026, 0x58f70025, + 0x57f7002d, 0x57f7002b, 0x57f7002a, 0x57f70029, + 0x57f70028, 0x57f70027, 0x57f70026, 0x57f70025, + 0x56f7002d, 0x56f7002b, 0x56f7002a, 0x56f70029, + 0x56f70028, 0x56f70027, 0x56f70026, 0x56f70025, + 0x55f7002d, 0x55f7002b, 0x55f7002a, 0x55f70029, + 0x55f70028, 0x55f70027, 0x55f70026, 0x55f70025, + 0x54f7002d, 0x54f7002b, 0x54f7002a, 0x54f70029, + 0x54f70028, 0x54f70027, 0x54f70026, 0x54f70025, + 0x53f7002d, 0x53f7002b, 0x53f7002a, 0x53f70029, + 0x53f70028, 0x53f70027, 0x53f70026, 0x53f70025, + 0x52f7002d, 0x52f7002b, 0x52f7002a, 0x52f70029, + 0x52f70028, 0x52f70027, 0x52f70026, 0x52f70025, + 0x51f7002d, 0x51f7002b, 0x51f7002a, 0x51f70029, + 0x51f70028, 0x51f70027, 0x51f70026, 0x51f70025, + 0x50f7002d, 0x50f7002b, 0x50f7002a, 0x50f70029, + 0x50f70028, 0x50f70027, 0x50f70026, 0x50f70025, +}; + +const u32 txpwrctrl_tx_gain_ipa_rev5[] = { + 0x1ff7002d, 0x1ff7002b, 0x1ff7002a, 0x1ff70029, + 0x1ff70028, 0x1ff70027, 0x1ff70026, 0x1ff70025, + 0x1ef7002d, 0x1ef7002b, 0x1ef7002a, 0x1ef70029, + 0x1ef70028, 0x1ef70027, 0x1ef70026, 0x1ef70025, + 0x1df7002d, 0x1df7002b, 0x1df7002a, 0x1df70029, + 0x1df70028, 0x1df70027, 0x1df70026, 0x1df70025, + 0x1cf7002d, 0x1cf7002b, 0x1cf7002a, 0x1cf70029, + 0x1cf70028, 0x1cf70027, 0x1cf70026, 0x1cf70025, + 0x1bf7002d, 0x1bf7002b, 0x1bf7002a, 0x1bf70029, + 0x1bf70028, 0x1bf70027, 0x1bf70026, 0x1bf70025, + 0x1af7002d, 0x1af7002b, 0x1af7002a, 0x1af70029, + 0x1af70028, 0x1af70027, 0x1af70026, 0x1af70025, + 0x19f7002d, 0x19f7002b, 0x19f7002a, 0x19f70029, + 0x19f70028, 0x19f70027, 0x19f70026, 0x19f70025, + 0x18f7002d, 0x18f7002b, 0x18f7002a, 0x18f70029, + 0x18f70028, 0x18f70027, 0x18f70026, 0x18f70025, + 0x17f7002d, 0x17f7002b, 0x17f7002a, 0x17f70029, + 0x17f70028, 0x17f70027, 0x17f70026, 0x17f70025, + 0x16f7002d, 0x16f7002b, 0x16f7002a, 0x16f70029, + 0x16f70028, 0x16f70027, 0x16f70026, 0x16f70025, + 0x15f7002d, 0x15f7002b, 0x15f7002a, 0x15f70029, + 0x15f70028, 0x15f70027, 0x15f70026, 0x15f70025, + 0x14f7002d, 0x14f7002b, 0x14f7002a, 0x14f70029, + 0x14f70028, 0x14f70027, 0x14f70026, 0x14f70025, + 0x13f7002d, 0x13f7002b, 0x13f7002a, 0x13f70029, + 0x13f70028, 0x13f70027, 0x13f70026, 0x13f70025, + 0x12f7002d, 0x12f7002b, 0x12f7002a, 0x12f70029, + 0x12f70028, 0x12f70027, 0x12f70026, 0x12f70025, + 0x11f7002d, 0x11f7002b, 0x11f7002a, 0x11f70029, + 0x11f70028, 0x11f70027, 0x11f70026, 0x11f70025, + 0x10f7002d, 0x10f7002b, 0x10f7002a, 0x10f70029, + 0x10f70028, 0x10f70027, 0x10f70026, 0x10f70025, +}; + +const u32 txpwrctrl_tx_gain_ipa_rev6[] = { + 0x0ff7002d, 0x0ff7002b, 0x0ff7002a, 0x0ff70029, + 0x0ff70028, 0x0ff70027, 0x0ff70026, 0x0ff70025, + 0x0ef7002d, 0x0ef7002b, 0x0ef7002a, 0x0ef70029, + 0x0ef70028, 0x0ef70027, 0x0ef70026, 0x0ef70025, + 0x0df7002d, 0x0df7002b, 0x0df7002a, 0x0df70029, + 0x0df70028, 0x0df70027, 0x0df70026, 0x0df70025, + 0x0cf7002d, 0x0cf7002b, 0x0cf7002a, 0x0cf70029, + 0x0cf70028, 0x0cf70027, 0x0cf70026, 0x0cf70025, + 0x0bf7002d, 0x0bf7002b, 0x0bf7002a, 0x0bf70029, + 0x0bf70028, 0x0bf70027, 0x0bf70026, 0x0bf70025, + 0x0af7002d, 0x0af7002b, 0x0af7002a, 0x0af70029, + 0x0af70028, 0x0af70027, 0x0af70026, 0x0af70025, + 0x09f7002d, 0x09f7002b, 0x09f7002a, 0x09f70029, + 0x09f70028, 0x09f70027, 0x09f70026, 0x09f70025, + 0x08f7002d, 0x08f7002b, 0x08f7002a, 0x08f70029, + 0x08f70028, 0x08f70027, 0x08f70026, 0x08f70025, + 0x07f7002d, 0x07f7002b, 0x07f7002a, 0x07f70029, + 0x07f70028, 0x07f70027, 0x07f70026, 0x07f70025, + 0x06f7002d, 0x06f7002b, 0x06f7002a, 0x06f70029, + 0x06f70028, 0x06f70027, 0x06f70026, 0x06f70025, + 0x05f7002d, 0x05f7002b, 0x05f7002a, 0x05f70029, + 0x05f70028, 0x05f70027, 0x05f70026, 0x05f70025, + 0x04f7002d, 0x04f7002b, 0x04f7002a, 0x04f70029, + 0x04f70028, 0x04f70027, 0x04f70026, 0x04f70025, + 0x03f7002d, 0x03f7002b, 0x03f7002a, 0x03f70029, + 0x03f70028, 0x03f70027, 0x03f70026, 0x03f70025, + 0x02f7002d, 0x02f7002b, 0x02f7002a, 0x02f70029, + 0x02f70028, 0x02f70027, 0x02f70026, 0x02f70025, + 0x01f7002d, 0x01f7002b, 0x01f7002a, 0x01f70029, + 0x01f70028, 0x01f70027, 0x01f70026, 0x01f70025, + 0x00f7002d, 0x00f7002b, 0x00f7002a, 0x00f70029, + 0x00f70028, 0x00f70027, 0x00f70026, 0x00f70025, +}; + +const u32 txpwrctrl_tx_gain_ipa_5g[] = { + 0x7ff70035, 0x7ff70033, 0x7ff70032, 0x7ff70031, + 0x7ff7002f, 0x7ff7002e, 0x7ff7002d, 0x7ff7002b, + 0x7ff7002a, 0x7ff70029, 0x7ff70028, 0x7ff70027, + 0x7ff70026, 0x7ff70024, 0x7ff70023, 0x7ff70022, + 0x7ef70028, 0x7ef70027, 0x7ef70026, 0x7ef70025, + 0x7ef70024, 0x7ef70023, 0x7df70028, 0x7df70027, + 0x7df70026, 0x7df70025, 0x7df70024, 0x7df70023, + 0x7df70022, 0x7cf70029, 0x7cf70028, 0x7cf70027, + 0x7cf70026, 0x7cf70025, 0x7cf70023, 0x7cf70022, + 0x7bf70029, 0x7bf70028, 0x7bf70026, 0x7bf70025, + 0x7bf70024, 0x7bf70023, 0x7bf70022, 0x7bf70021, + 0x7af70029, 0x7af70028, 0x7af70027, 0x7af70026, + 0x7af70025, 0x7af70024, 0x7af70023, 0x7af70022, + 0x79f70029, 0x79f70028, 0x79f70027, 0x79f70026, + 0x79f70025, 0x79f70024, 0x79f70023, 0x79f70022, + 0x78f70029, 0x78f70028, 0x78f70027, 0x78f70026, + 0x78f70025, 0x78f70024, 0x78f70023, 0x78f70022, + 0x77f70029, 0x77f70028, 0x77f70027, 0x77f70026, + 0x77f70025, 0x77f70024, 0x77f70023, 0x77f70022, + 0x76f70029, 0x76f70028, 0x76f70027, 0x76f70026, + 0x76f70024, 0x76f70023, 0x76f70022, 0x76f70021, + 0x75f70029, 0x75f70028, 0x75f70027, 0x75f70026, + 0x75f70025, 0x75f70024, 0x75f70023, 0x74f70029, + 0x74f70028, 0x74f70026, 0x74f70025, 0x74f70024, + 0x74f70023, 0x74f70022, 0x73f70029, 0x73f70027, + 0x73f70026, 0x73f70025, 0x73f70024, 0x73f70023, + 0x73f70022, 0x72f70028, 0x72f70027, 0x72f70026, + 0x72f70025, 0x72f70024, 0x72f70023, 0x72f70022, + 0x71f70028, 0x71f70027, 0x71f70026, 0x71f70025, + 0x71f70024, 0x71f70023, 0x70f70028, 0x70f70027, + 0x70f70026, 0x70f70024, 0x70f70023, 0x70f70022, + 0x70f70021, 0x70f70020, 0x70f70020, 0x70f7001f, +}; + +const u16 tbl_iqcal_gainparams[2][9][8] = { + { + { 0x000, 0, 0, 2, 0x69, 0x69, 0x69, 0x69 }, + { 0x700, 7, 0, 0, 0x69, 0x69, 0x69, 0x69 }, + { 0x710, 7, 1, 0, 0x68, 0x68, 0x68, 0x68 }, + { 0x720, 7, 2, 0, 0x67, 0x67, 0x67, 0x67 }, + { 0x730, 7, 3, 0, 0x66, 0x66, 0x66, 0x66 }, + { 0x740, 7, 4, 0, 0x65, 0x65, 0x65, 0x65 }, + { 0x741, 7, 4, 1, 0x65, 0x65, 0x65, 0x65 }, + { 0x742, 7, 4, 2, 0x65, 0x65, 0x65, 0x65 }, + { 0x743, 7, 4, 3, 0x65, 0x65, 0x65, 0x65 } + }, + { + { 0x000, 7, 0, 0, 0x79, 0x79, 0x79, 0x79 }, + { 0x700, 7, 0, 0, 0x79, 0x79, 0x79, 0x79 }, + { 0x710, 7, 1, 0, 0x79, 0x79, 0x79, 0x79 }, + { 0x720, 7, 2, 0, 0x78, 0x78, 0x78, 0x78 }, + { 0x730, 7, 3, 0, 0x78, 0x78, 0x78, 0x78 }, + { 0x740, 7, 4, 0, 0x78, 0x78, 0x78, 0x78 }, + { 0x741, 7, 4, 1, 0x78, 0x78, 0x78, 0x78 }, + { 0x742, 7, 4, 2, 0x78, 0x78, 0x78, 0x78 }, + { 0x743, 7, 4, 3, 0x78, 0x78, 0x78, 0x78 } + } +}; + +const struct nphy_txiqcal_ladder ladder_lo[] = { + { 3, 0 }, + { 4, 0 }, + { 6, 0 }, + { 9, 0 }, + { 13, 0 }, + { 18, 0 }, + { 25, 0 }, + { 25, 1 }, + { 25, 2 }, + { 25, 3 }, + { 25, 4 }, + { 25, 5 }, + { 25, 6 }, + { 25, 7 }, + { 35, 7 }, + { 50, 7 }, + { 71, 7 }, + { 100, 7 } +}; + +const struct nphy_txiqcal_ladder ladder_iq[] = { + { 3, 0 }, + { 4, 0 }, + { 6, 0 }, + { 9, 0 }, + { 13, 0 }, + { 18, 0 }, + { 25, 0 }, + { 35, 0 }, + { 50, 0 }, + { 71, 0 }, + { 100, 0 }, + { 100, 1 }, + { 100, 2 }, + { 100, 3 }, + { 100, 4 }, + { 100, 5 }, + { 100, 6 }, + { 100, 7 } +}; + +const u16 loscale[] = { + 256, 256, 271, 271, + 287, 256, 256, 271, + 271, 287, 287, 304, + 304, 256, 256, 271, + 271, 287, 287, 304, + 304, 322, 322, 341, + 341, 362, 362, 383, + 383, 256, 256, 271, + 271, 287, 287, 304, + 304, 322, 322, 256, + 256, 271, 271, 287, + 287, 304, 304, 322, + 322, 341, 341, 362, + 362, 256, 256, 271, + 271, 287, 287, 304, + 304, 322, 322, 256, + 256, 271, 271, 287, + 287, 304, 304, 322, + 322, 341, 341, 362, + 362, 256, 256, 271, + 271, 287, 287, 304, + 304, 322, 322, 341, + 341, 362, 362, 383, + 383, 406, 406, 430, + 430, 455, 455, 482, + 482, 511, 511, 541, + 541, 573, 573, 607, + 607, 643, 643, 681, + 681, 722, 722, 764, + 764, 810, 810, 858, + 858, 908, 908, 962, + 962, 1019, 1019, 256 +}; + +const u16 tbl_tx_iqlo_cal_loft_ladder_40[] = { + 0x0200, 0x0300, 0x0400, 0x0700, + 0x0900, 0x0c00, 0x1200, 0x1201, + 0x1202, 0x1203, 0x1204, 0x1205, + 0x1206, 0x1207, 0x1907, 0x2307, + 0x3207, 0x4707 +}; + +const u16 tbl_tx_iqlo_cal_loft_ladder_20[] = { + 0x0300, 0x0500, 0x0700, 0x0900, + 0x0d00, 0x1100, 0x1900, 0x1901, + 0x1902, 0x1903, 0x1904, 0x1905, + 0x1906, 0x1907, 0x2407, 0x3207, + 0x4607, 0x6407 +}; + +const u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = { + 0x0100, 0x0200, 0x0400, 0x0700, + 0x0900, 0x0c00, 0x1200, 0x1900, + 0x2300, 0x3200, 0x4700, 0x4701, + 0x4702, 0x4703, 0x4704, 0x4705, + 0x4706, 0x4707 +}; + +const u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = { + 0x0200, 0x0300, 0x0600, 0x0900, + 0x0d00, 0x1100, 0x1900, 0x2400, + 0x3200, 0x4600, 0x6400, 0x6401, + 0x6402, 0x6403, 0x6404, 0x6405, + 0x6406, 0x6407 +}; + +const u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[B43_NTAB_TX_IQLO_CAL_STARTCOEFS_REV3] = { }; + +const u16 tbl_tx_iqlo_cal_startcoefs[B43_NTAB_TX_IQLO_CAL_STARTCOEFS] = { }; + +const u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[] = { + 0x8423, 0x8323, 0x8073, 0x8256, + 0x8045, 0x8223, 0x9423, 0x9323, + 0x9073, 0x9256, 0x9045, 0x9223 +}; + +const u16 tbl_tx_iqlo_cal_cmds_recal[] = { + 0x8101, 0x8253, 0x8053, 0x8234, + 0x8034, 0x9101, 0x9253, 0x9053, + 0x9234, 0x9034 +}; + +const u16 tbl_tx_iqlo_cal_cmds_fullcal[] = { + 0x8123, 0x8264, 0x8086, 0x8245, + 0x8056, 0x9123, 0x9264, 0x9086, + 0x9245, 0x9056 +}; + +const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = { + 0x8434, 0x8334, 0x8084, 0x8267, + 0x8056, 0x8234, 0x9434, 0x9334, + 0x9084, 0x9267, 0x9056, 0x9234 +}; + +const s16 tbl_tx_filter_coef_rev4[7][15] = { + { -377, 137, -407, 208, -1527, + 956, 93, 186, 93, 230, + -44, 230, 20, -191, 201 }, + { -77, 20, -98, 49, -93, + 60, 56, 111, 56, 26, + -5, 26, 34, -32, 34 }, + { -360, 164, -376, 164, -1533, + 576, 308, -314, 308, 121, + -73, 121, 91, 124, 91 }, + { -295, 200, -363, 142, -1391, + 826, 151, 301, 151, 151, + 301, 151, 602, -752, 602 }, + { -92, 58, -96, 49, -104, + 44, 17, 35, 17, 12, + 25, 12, 13, 27, 13 }, + { -375, 136, -399, 209, -1479, + 949, 130, 260, 130, 230, + -44, 230, 201, -191, 201 }, + { 0xed9, 0xc8, 0xe95, 0x8e, 0xa91, + 0x33a, 0x97, 0x12d, 0x97, 0x97, + 0x12d, 0x97, 0x25a, 0xd10, 0x25a } +}; + +/* addr0, addr1, bmask, shift */ +const struct nphy_rf_control_override_rev2 tbl_rf_control_override_rev2[] = { + { 0x78, 0x78, 0x0038, 3 }, /* for field == 0x0002 (fls == 2) */ + { 0x7A, 0x7D, 0x0001, 0 }, /* for field == 0x0004 (fls == 3) */ + { 0x7A, 0x7D, 0x0002, 1 }, /* for field == 0x0008 (fls == 4) */ + { 0x7A, 0x7D, 0x0004, 2 }, /* for field == 0x0010 (fls == 5) */ + { 0x7A, 0x7D, 0x0030, 4 }, /* for field == 0x0020 (fls == 6) */ + { 0x7A, 0x7D, 0x00C0, 6 }, /* for field == 0x0040 (fls == 7) */ + { 0x7A, 0x7D, 0x0100, 8 }, /* for field == 0x0080 (fls == 8) */ + { 0x7A, 0x7D, 0x0200, 9 }, /* for field == 0x0100 (fls == 9) */ + { 0x78, 0x78, 0x0004, 2 }, /* for field == 0x0200 (fls == 10) */ + { 0x7B, 0x7E, 0x01FF, 0 }, /* for field == 0x0400 (fls == 11) */ + { 0x7C, 0x7F, 0x01FF, 0 }, /* for field == 0x0800 (fls == 12) */ + { 0x78, 0x78, 0x0100, 8 }, /* for field == 0x1000 (fls == 13) */ + { 0x78, 0x78, 0x0200, 9 }, /* for field == 0x2000 (fls == 14) */ + { 0x78, 0x78, 0xF000, 12 } /* for field == 0x4000 (fls == 15) */ +}; + +/* val_mask, val_shift, en_addr0, val_addr0, en_addr1, val_addr1 */ +const struct nphy_rf_control_override_rev3 tbl_rf_control_override_rev3[] = { + { 0x8000, 15, 0xE5, 0xF9, 0xE6, 0xFB }, /* field == 0x0001 (fls 1) */ + { 0x0001, 0, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0002 (fls 2) */ + { 0x0002, 1, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0004 (fls 3) */ + { 0x0004, 2, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0008 (fls 4) */ + { 0x0016, 4, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0010 (fls 5) */ + { 0x0020, 5, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0020 (fls 6) */ + { 0x0040, 6, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0040 (fls 7) */ + { 0x0080, 6, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0080 (fls 8) */ + { 0x0100, 7, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0100 (fls 9) */ + { 0x0007, 0, 0xE7, 0xF8, 0xEC, 0xFA }, /* field == 0x0200 (fls 10) */ + { 0x0070, 4, 0xE7, 0xF8, 0xEC, 0xFA }, /* field == 0x0400 (fls 11) */ + { 0xE000, 13, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0800 (fls 12) */ + { 0xFFFF, 0, 0xE7, 0x7B, 0xEC, 0x7E }, /* field == 0x1000 (fls 13) */ + { 0xFFFF, 0, 0xE7, 0x7C, 0xEC, 0x7F }, /* field == 0x2000 (fls 14) */ + { 0x00C0, 6, 0xE7, 0xF9, 0xEC, 0xFB } /* field == 0x4000 (fls 15) */ +}; + +static inline void assert_ntab_array_sizes(void) +{ +#undef check +#define check(table, size) \ + BUILD_BUG_ON(ARRAY_SIZE(b43_ntab_##table) != B43_NTAB_##size##_SIZE) + + check(adjustpower0, C0_ADJPLT); + check(adjustpower1, C1_ADJPLT); + check(bdi, BDI); + check(channelest, CHANEST); + check(estimatepowerlt0, C0_ESTPLT); + check(estimatepowerlt1, C1_ESTPLT); + check(framelookup, FRAMELT); + check(framestruct, FRAMESTRUCT); + check(gainctl0, C0_GAINCTL); + check(gainctl1, C1_GAINCTL); + check(intlevel, INTLEVEL); + check(iqlt0, C0_IQLT); + check(iqlt1, C1_IQLT); + check(loftlt0, C0_LOFEEDTH); + check(loftlt1, C1_LOFEEDTH); + check(mcs, MCS); + check(noisevar10, NOISEVAR10); + check(noisevar11, NOISEVAR11); + check(pilot, PILOT); + check(pilotlt, PILOTLT); + check(tdi20a0, TDI20A0); + check(tdi20a1, TDI20A1); + check(tdi40a0, TDI40A0); + check(tdi40a1, TDI40A1); + check(tdtrn, TDTRN); + check(tmap, TMAP); + +#undef check +} + +u32 b43_ntab_read(struct b43_wldev *dev, u32 offset) +{ + u32 type, value; + + type = offset & B43_NTAB_TYPEMASK; + offset &= ~B43_NTAB_TYPEMASK; + B43_WARN_ON(offset > 0xFFFF); + + switch (type) { + case B43_NTAB_8BIT: + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset); + value = b43_phy_read(dev, B43_NPHY_TABLE_DATALO) & 0xFF; + break; + case B43_NTAB_16BIT: + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset); + value = b43_phy_read(dev, B43_NPHY_TABLE_DATALO); + break; + case B43_NTAB_32BIT: + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset); + value = b43_phy_read(dev, B43_NPHY_TABLE_DATAHI); + value <<= 16; + value |= b43_phy_read(dev, B43_NPHY_TABLE_DATALO); + break; + default: + B43_WARN_ON(1); + value = 0; + } + + return value; +} + +void b43_ntab_read_bulk(struct b43_wldev *dev, u32 offset, + unsigned int nr_elements, void *_data) +{ + u32 type; + u8 *data = _data; + unsigned int i; + + type = offset & B43_NTAB_TYPEMASK; + offset &= ~B43_NTAB_TYPEMASK; + B43_WARN_ON(offset > 0xFFFF); + + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset); + + for (i = 0; i < nr_elements; i++) { + switch (type) { + case B43_NTAB_8BIT: + *data = b43_phy_read(dev, B43_NPHY_TABLE_DATALO) & 0xFF; + data++; + break; + case B43_NTAB_16BIT: + *((u16 *)data) = b43_phy_read(dev, B43_NPHY_TABLE_DATALO); + data += 2; + break; + case B43_NTAB_32BIT: + *((u32 *)data) = b43_phy_read(dev, B43_NPHY_TABLE_DATAHI); + *((u32 *)data) <<= 16; + *((u32 *)data) |= b43_phy_read(dev, B43_NPHY_TABLE_DATALO); + data += 4; + break; + default: + B43_WARN_ON(1); + } + } +} + +void b43_ntab_write(struct b43_wldev *dev, u32 offset, u32 value) +{ + u32 type; + + type = offset & B43_NTAB_TYPEMASK; + offset &= 0xFFFF; + + switch (type) { + case B43_NTAB_8BIT: + B43_WARN_ON(value & ~0xFF); + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, value); + break; + case B43_NTAB_16BIT: + B43_WARN_ON(value & ~0xFFFF); + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, value); + break; + case B43_NTAB_32BIT: + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset); + b43_phy_write(dev, B43_NPHY_TABLE_DATAHI, value >> 16); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, value & 0xFFFF); + break; + default: + B43_WARN_ON(1); + } + + return; + + /* Some compiletime assertions... */ + assert_ntab_array_sizes(); +} + +void b43_ntab_write_bulk(struct b43_wldev *dev, u32 offset, + unsigned int nr_elements, const void *_data) +{ + u32 type, value; + const u8 *data = _data; + unsigned int i; + + type = offset & B43_NTAB_TYPEMASK; + offset &= ~B43_NTAB_TYPEMASK; + B43_WARN_ON(offset > 0xFFFF); + + b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset); + + for (i = 0; i < nr_elements; i++) { + switch (type) { + case B43_NTAB_8BIT: + value = *data; + data++; + B43_WARN_ON(value & ~0xFF); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, value); + break; + case B43_NTAB_16BIT: + value = *((u16 *)data); + data += 2; + B43_WARN_ON(value & ~0xFFFF); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, value); + break; + case B43_NTAB_32BIT: + value = *((u32 *)data); + data += 4; + b43_phy_write(dev, B43_NPHY_TABLE_DATAHI, value >> 16); + b43_phy_write(dev, B43_NPHY_TABLE_DATALO, + value & 0xFFFF); + break; + default: + B43_WARN_ON(1); + } + } +} + +#define ntab_upload(dev, offset, data) do { \ + unsigned int i; \ + for (i = 0; i < (offset##_SIZE); i++) \ + b43_ntab_write(dev, (offset) + i, (data)[i]); \ + } while (0) + +void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev) +{ + /* Static tables */ + ntab_upload(dev, B43_NTAB_FRAMESTRUCT, b43_ntab_framestruct); + ntab_upload(dev, B43_NTAB_FRAMELT, b43_ntab_framelookup); + ntab_upload(dev, B43_NTAB_TMAP, b43_ntab_tmap); + ntab_upload(dev, B43_NTAB_TDTRN, b43_ntab_tdtrn); + ntab_upload(dev, B43_NTAB_INTLEVEL, b43_ntab_intlevel); + ntab_upload(dev, B43_NTAB_PILOT, b43_ntab_pilot); + ntab_upload(dev, B43_NTAB_PILOTLT, b43_ntab_pilotlt); + ntab_upload(dev, B43_NTAB_TDI20A0, b43_ntab_tdi20a0); + ntab_upload(dev, B43_NTAB_TDI20A1, b43_ntab_tdi20a1); + ntab_upload(dev, B43_NTAB_TDI40A0, b43_ntab_tdi40a0); + ntab_upload(dev, B43_NTAB_TDI40A1, b43_ntab_tdi40a1); + ntab_upload(dev, B43_NTAB_BDI, b43_ntab_bdi); + ntab_upload(dev, B43_NTAB_CHANEST, b43_ntab_channelest); + ntab_upload(dev, B43_NTAB_MCS, b43_ntab_mcs); + + /* Volatile tables */ + ntab_upload(dev, B43_NTAB_NOISEVAR10, b43_ntab_noisevar10); + ntab_upload(dev, B43_NTAB_NOISEVAR11, b43_ntab_noisevar11); + ntab_upload(dev, B43_NTAB_C0_ESTPLT, b43_ntab_estimatepowerlt0); + ntab_upload(dev, B43_NTAB_C1_ESTPLT, b43_ntab_estimatepowerlt1); + ntab_upload(dev, B43_NTAB_C0_ADJPLT, b43_ntab_adjustpower0); + ntab_upload(dev, B43_NTAB_C1_ADJPLT, b43_ntab_adjustpower1); + ntab_upload(dev, B43_NTAB_C0_GAINCTL, b43_ntab_gainctl0); + ntab_upload(dev, B43_NTAB_C1_GAINCTL, b43_ntab_gainctl1); + ntab_upload(dev, B43_NTAB_C0_IQLT, b43_ntab_iqlt0); + ntab_upload(dev, B43_NTAB_C1_IQLT, b43_ntab_iqlt1); + ntab_upload(dev, B43_NTAB_C0_LOFEEDTH, b43_ntab_loftlt0); + ntab_upload(dev, B43_NTAB_C1_LOFEEDTH, b43_ntab_loftlt1); +} + +void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev) +{ + /* Static tables */ + /* TODO */ + + /* Volatile tables */ + /* TODO */ +} diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h new file mode 100644 index 0000000..9c1c6ec --- /dev/null +++ b/drivers/net/wireless/b43/tables_nphy.h @@ -0,0 +1,203 @@ +#ifndef B43_TABLES_NPHY_H_ +#define B43_TABLES_NPHY_H_ + +#include + + +struct b43_nphy_channeltab_entry { + /* The channel number */ + u8 channel; + /* Radio register values on channelswitch */ + u8 radio_pll_ref; + u8 radio_rf_pllmod0; + u8 radio_rf_pllmod1; + u8 radio_vco_captail; + u8 radio_vco_cal1; + u8 radio_vco_cal2; + u8 radio_pll_lfc1; + u8 radio_pll_lfr1; + u8 radio_pll_lfc2; + u8 radio_lgbuf_cenbuf; + u8 radio_lgen_tune1; + u8 radio_lgen_tune2; + u8 radio_c1_lgbuf_atune; + u8 radio_c1_lgbuf_gtune; + u8 radio_c1_rx_rfr1; + u8 radio_c1_tx_pgapadtn; + u8 radio_c1_tx_mxbgtrim; + u8 radio_c2_lgbuf_atune; + u8 radio_c2_lgbuf_gtune; + u8 radio_c2_rx_rfr1; + u8 radio_c2_tx_pgapadtn; + u8 radio_c2_tx_mxbgtrim; + /* PHY register values on channelswitch */ + u16 phy_bw1a; + u16 phy_bw2; + u16 phy_bw3; + u16 phy_bw4; + u16 phy_bw5; + u16 phy_bw6; + /* The channel frequency in MHz */ + u16 freq; + /* An unknown value */ + u16 unk2; +}; + + +struct b43_wldev; + +struct nphy_txiqcal_ladder { + u8 percent; + u8 g_env; +}; + +struct nphy_rf_control_override_rev2 { + u8 addr0; + u8 addr1; + u16 bmask; + u8 shift; +}; + +struct nphy_rf_control_override_rev3 { + u16 val_mask; + u8 val_shift; + u8 en_addr0; + u8 val_addr0; + u8 en_addr1; + u8 val_addr1; +}; + +/* Upload the default register value table. + * If "ghz5" is true, we upload the 5Ghz table. Otherwise the 2.4Ghz + * table is uploaded. If "ignore_uploadflag" is true, we upload any value + * and ignore the "UPLOAD" flag. */ +void b2055_upload_inittab(struct b43_wldev *dev, + bool ghz5, bool ignore_uploadflag); + + +/* Get the NPHY Channel Switch Table entry for a channel number. + * Returns NULL on failure to find an entry. */ +const struct b43_nphy_channeltab_entry * +b43_nphy_get_chantabent(struct b43_wldev *dev, u8 channel); + + +/* The N-PHY tables. */ + +#define B43_NTAB_TYPEMASK 0xF0000000 +#define B43_NTAB_8BIT 0x10000000 +#define B43_NTAB_16BIT 0x20000000 +#define B43_NTAB_32BIT 0x30000000 +#define B43_NTAB8(table, offset) (((table) << 10) | (offset) | B43_NTAB_8BIT) +#define B43_NTAB16(table, offset) (((table) << 10) | (offset) | B43_NTAB_16BIT) +#define B43_NTAB32(table, offset) (((table) << 10) | (offset) | B43_NTAB_32BIT) + +/* Static N-PHY tables */ +#define B43_NTAB_FRAMESTRUCT B43_NTAB32(0x0A, 0x000) /* Frame Struct Table */ +#define B43_NTAB_FRAMESTRUCT_SIZE 832 +#define B43_NTAB_FRAMELT B43_NTAB8 (0x18, 0x000) /* Frame Lookup Table */ +#define B43_NTAB_FRAMELT_SIZE 32 +#define B43_NTAB_TMAP B43_NTAB32(0x0C, 0x000) /* T Map Table */ +#define B43_NTAB_TMAP_SIZE 448 +#define B43_NTAB_TDTRN B43_NTAB32(0x0E, 0x000) /* TDTRN Table */ +#define B43_NTAB_TDTRN_SIZE 704 +#define B43_NTAB_INTLEVEL B43_NTAB32(0x0D, 0x000) /* Int Level Table */ +#define B43_NTAB_INTLEVEL_SIZE 7 +#define B43_NTAB_PILOT B43_NTAB16(0x0B, 0x000) /* Pilot Table */ +#define B43_NTAB_PILOT_SIZE 88 +#define B43_NTAB_PILOTLT B43_NTAB32(0x14, 0x000) /* Pilot Lookup Table */ +#define B43_NTAB_PILOTLT_SIZE 6 +#define B43_NTAB_TDI20A0 B43_NTAB32(0x13, 0x080) /* TDI Table 20 Antenna 0 */ +#define B43_NTAB_TDI20A0_SIZE 55 +#define B43_NTAB_TDI20A1 B43_NTAB32(0x13, 0x100) /* TDI Table 20 Antenna 1 */ +#define B43_NTAB_TDI20A1_SIZE 55 +#define B43_NTAB_TDI40A0 B43_NTAB32(0x13, 0x280) /* TDI Table 40 Antenna 0 */ +#define B43_NTAB_TDI40A0_SIZE 110 +#define B43_NTAB_TDI40A1 B43_NTAB32(0x13, 0x300) /* TDI Table 40 Antenna 1 */ +#define B43_NTAB_TDI40A1_SIZE 110 +#define B43_NTAB_BDI B43_NTAB16(0x15, 0x000) /* BDI Table */ +#define B43_NTAB_BDI_SIZE 6 +#define B43_NTAB_CHANEST B43_NTAB32(0x16, 0x000) /* Channel Estimate Table */ +#define B43_NTAB_CHANEST_SIZE 96 +#define B43_NTAB_MCS B43_NTAB8 (0x12, 0x000) /* MCS Table */ +#define B43_NTAB_MCS_SIZE 128 + +/* Volatile N-PHY tables */ +#define B43_NTAB_NOISEVAR10 B43_NTAB32(0x10, 0x000) /* Noise Var Table 10 */ +#define B43_NTAB_NOISEVAR10_SIZE 256 +#define B43_NTAB_NOISEVAR11 B43_NTAB32(0x10, 0x080) /* Noise Var Table 11 */ +#define B43_NTAB_NOISEVAR11_SIZE 256 +#define B43_NTAB_C0_ESTPLT B43_NTAB8 (0x1A, 0x000) /* Estimate Power Lookup Table Core 0 */ +#define B43_NTAB_C0_ESTPLT_SIZE 64 +#define B43_NTAB_C1_ESTPLT B43_NTAB8 (0x1B, 0x000) /* Estimate Power Lookup Table Core 1 */ +#define B43_NTAB_C1_ESTPLT_SIZE 64 +#define B43_NTAB_C0_ADJPLT B43_NTAB8 (0x1A, 0x040) /* Adjust Power Lookup Table Core 0 */ +#define B43_NTAB_C0_ADJPLT_SIZE 128 +#define B43_NTAB_C1_ADJPLT B43_NTAB8 (0x1B, 0x040) /* Adjust Power Lookup Table Core 1 */ +#define B43_NTAB_C1_ADJPLT_SIZE 128 +#define B43_NTAB_C0_GAINCTL B43_NTAB32(0x1A, 0x0C0) /* Gain Control Lookup Table Core 0 */ +#define B43_NTAB_C0_GAINCTL_SIZE 128 +#define B43_NTAB_C1_GAINCTL B43_NTAB32(0x1B, 0x0C0) /* Gain Control Lookup Table Core 1 */ +#define B43_NTAB_C1_GAINCTL_SIZE 128 +#define B43_NTAB_C0_IQLT B43_NTAB32(0x1A, 0x140) /* IQ Lookup Table Core 0 */ +#define B43_NTAB_C0_IQLT_SIZE 128 +#define B43_NTAB_C1_IQLT B43_NTAB32(0x1B, 0x140) /* IQ Lookup Table Core 1 */ +#define B43_NTAB_C1_IQLT_SIZE 128 +#define B43_NTAB_C0_LOFEEDTH B43_NTAB16(0x1A, 0x1C0) /* Local Oscillator Feed Through Lookup Table Core 0 */ +#define B43_NTAB_C0_LOFEEDTH_SIZE 128 +#define B43_NTAB_C1_LOFEEDTH B43_NTAB16(0x1B, 0x1C0) /* Local Oscillator Feed Through Lookup Table Core 1 */ +#define B43_NTAB_C1_LOFEEDTH_SIZE 128 + +#define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_40_SIZE 18 +#define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_20_SIZE 18 +#define B43_NTAB_TX_IQLO_CAL_IQIMB_LADDER_40_SIZE 18 +#define B43_NTAB_TX_IQLO_CAL_IQIMB_LADDER_20_SIZE 18 +#define B43_NTAB_TX_IQLO_CAL_STARTCOEFS_REV3 11 +#define B43_NTAB_TX_IQLO_CAL_STARTCOEFS 9 +#define B43_NTAB_TX_IQLO_CAL_CMDS_RECAL_REV3 12 +#define B43_NTAB_TX_IQLO_CAL_CMDS_RECAL 10 +#define B43_NTAB_TX_IQLO_CAL_CMDS_FULLCAL 10 +#define B43_NTAB_TX_IQLO_CAL_CMDS_FULLCAL_REV3 12 + +u32 b43_ntab_read(struct b43_wldev *dev, u32 offset); +void b43_ntab_read_bulk(struct b43_wldev *dev, u32 offset, + unsigned int nr_elements, void *_data); +void b43_ntab_write(struct b43_wldev *dev, u32 offset, u32 value); +void b43_ntab_write_bulk(struct b43_wldev *dev, u32 offset, + unsigned int nr_elements, const void *_data); + +void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev); +void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev); + +extern const u32 b43_ntab_tx_gain_rev0_1_2[]; +extern const u32 b43_ntab_tx_gain_rev3plus_2ghz[]; +extern const u32 b43_ntab_tx_gain_rev3_5ghz[]; +extern const u32 b43_ntab_tx_gain_rev4_5ghz[]; +extern const u32 b43_ntab_tx_gain_rev5plus_5ghz[]; + +extern const u32 txpwrctrl_tx_gain_ipa[]; +extern const u32 txpwrctrl_tx_gain_ipa_rev5[]; +extern const u32 txpwrctrl_tx_gain_ipa_rev6[]; +extern const u32 txpwrctrl_tx_gain_ipa_5g[]; +extern const u16 tbl_iqcal_gainparams[2][9][8]; +extern const struct nphy_txiqcal_ladder ladder_lo[]; +extern const struct nphy_txiqcal_ladder ladder_iq[]; +extern const u16 loscale[]; + +extern const u16 tbl_tx_iqlo_cal_loft_ladder_40[]; +extern const u16 tbl_tx_iqlo_cal_loft_ladder_20[]; +extern const u16 tbl_tx_iqlo_cal_iqimb_ladder_40[]; +extern const u16 tbl_tx_iqlo_cal_iqimb_ladder_20[]; +extern const u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[]; +extern const u16 tbl_tx_iqlo_cal_startcoefs[]; +extern const u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[]; +extern const u16 tbl_tx_iqlo_cal_cmds_recal[]; +extern const u16 tbl_tx_iqlo_cal_cmds_fullcal[]; +extern const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[]; +extern const s16 tbl_tx_filter_coef_rev4[7][15]; + +extern const struct nphy_rf_control_override_rev2 + tbl_rf_control_override_rev2[]; +extern const struct nphy_rf_control_override_rev3 + tbl_rf_control_override_rev3[]; + +#endif /* B43_TABLES_NPHY_H_ */ diff --git a/drivers/net/wireless/b43/wa.c b/drivers/net/wireless/b43/wa.c new file mode 100644 index 0000000..97c7916 --- /dev/null +++ b/drivers/net/wireless/b43/wa.c @@ -0,0 +1,636 @@ +/* + + Broadcom B43 wireless driver + + PHY workarounds. + + Copyright (c) 2005-2007 Stefano Brivio + Copyright (c) 2005-2007 Michael Buesch + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "b43.h" +#include "main.h" +#include "tables.h" +#include "phy_common.h" +#include "wa.h" + +static void b43_wa_papd(struct b43_wldev *dev) +{ + u16 backup; + + backup = b43_ofdmtab_read16(dev, B43_OFDMTAB_PWRDYN2, 0); + b43_ofdmtab_write16(dev, B43_OFDMTAB_PWRDYN2, 0, 7); + b43_ofdmtab_write16(dev, B43_OFDMTAB_UNKNOWN_APHY, 0, 0); + b43_dummy_transmission(dev, true, true); + b43_ofdmtab_write16(dev, B43_OFDMTAB_PWRDYN2, 0, backup); +} + +static void b43_wa_auxclipthr(struct b43_wldev *dev) +{ + b43_phy_write(dev, B43_PHY_OFDM(0x8E), 0x3800); +} + +static void b43_wa_afcdac(struct b43_wldev *dev) +{ + b43_phy_write(dev, 0x0035, 0x03FF); + b43_phy_write(dev, 0x0036, 0x0400); +} + +static void b43_wa_txdc_offset(struct b43_wldev *dev) +{ + b43_ofdmtab_write16(dev, B43_OFDMTAB_DC, 0, 0x0051); +} + +void b43_wa_initgains(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + + b43_phy_write(dev, B43_PHY_LNAHPFCTL, 0x1FF9); + b43_phy_mask(dev, B43_PHY_LPFGAINCTL, 0xFF0F); + if (phy->rev <= 2) + b43_ofdmtab_write16(dev, B43_OFDMTAB_LPFGAIN, 0, 0x1FBF); + b43_radio_write16(dev, 0x0002, 0x1FBF); + + b43_phy_write(dev, 0x0024, 0x4680); + b43_phy_write(dev, 0x0020, 0x0003); + b43_phy_write(dev, 0x001D, 0x0F40); + b43_phy_write(dev, 0x001F, 0x1C00); + if (phy->rev <= 3) + b43_phy_maskset(dev, 0x002A, 0x00FF, 0x0400); + else if (phy->rev == 5) { + b43_phy_maskset(dev, 0x002A, 0x00FF, 0x1A00); + b43_phy_write(dev, 0x00CC, 0x2121); + } + if (phy->rev >= 3) + b43_phy_write(dev, 0x00BA, 0x3ED5); +} + +static void b43_wa_divider(struct b43_wldev *dev) +{ + b43_phy_mask(dev, 0x002B, ~0x0100); + b43_phy_write(dev, 0x008E, 0x58C1); +} + +static void b43_wa_gt(struct b43_wldev *dev) /* Gain table. */ +{ + if (dev->phy.rev <= 2) { + b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 0, 15); + b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 1, 31); + b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 2, 42); + b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 3, 48); + b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 4, 58); + b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 0, 19); + b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 1, 19); + b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 2, 19); + b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 3, 19); + b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 4, 21); + b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 5, 21); + b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 6, 25); + b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN1, 0, 3); + b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN1, 1, 3); + b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN1, 2, 7); + } else { + b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 0, 19); + b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 1, 19); + b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 2, 19); + b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 3, 19); + b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 4, 21); + b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 5, 21); + b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 6, 25); + } +} + +static void b43_wa_rssi_lt(struct b43_wldev *dev) /* RSSI lookup table */ +{ + int i; + + if (0 /* FIXME: For APHY.rev=2 this might be needed */) { + for (i = 0; i < 8; i++) + b43_ofdmtab_write16(dev, B43_OFDMTAB_RSSI, i, i + 8); + for (i = 8; i < 16; i++) + b43_ofdmtab_write16(dev, B43_OFDMTAB_RSSI, i, i - 8); + } else { + for (i = 0; i < 64; i++) + b43_ofdmtab_write16(dev, B43_OFDMTAB_RSSI, i, i); + } +} + +static void b43_wa_analog(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u16 ofdmrev; + + ofdmrev = b43_phy_read(dev, B43_PHY_VERSION_OFDM) & B43_PHYVER_VERSION; + if (ofdmrev > 2) { + if (phy->type == B43_PHYTYPE_A) + b43_phy_write(dev, B43_PHY_PWRDOWN, 0x1808); + else + b43_phy_write(dev, B43_PHY_PWRDOWN, 0x1000); + } else { + b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 3, 0x1044); + b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 4, 0x7201); + b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 6, 0x0040); + } +} + +static void b43_wa_dac(struct b43_wldev *dev) +{ + if (dev->phy.analog == 1) + b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 1, + (b43_ofdmtab_read16(dev, B43_OFDMTAB_DAC, 1) & ~0x0034) | 0x0008); + else + b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 1, + (b43_ofdmtab_read16(dev, B43_OFDMTAB_DAC, 1) & ~0x0078) | 0x0010); +} + +static void b43_wa_fft(struct b43_wldev *dev) /* Fine frequency table */ +{ + int i; + + if (dev->phy.type == B43_PHYTYPE_A) + for (i = 0; i < B43_TAB_FINEFREQA_SIZE; i++) + b43_ofdmtab_write16(dev, B43_OFDMTAB_DACRFPABB, i, b43_tab_finefreqa[i]); + else + for (i = 0; i < B43_TAB_FINEFREQG_SIZE; i++) + b43_ofdmtab_write16(dev, B43_OFDMTAB_DACRFPABB, i, b43_tab_finefreqg[i]); +} + +static void b43_wa_nft(struct b43_wldev *dev) /* Noise figure table */ +{ + struct b43_phy *phy = &dev->phy; + int i; + + if (phy->type == B43_PHYTYPE_A) { + if (phy->rev == 2) + for (i = 0; i < B43_TAB_NOISEA2_SIZE; i++) + b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i, b43_tab_noisea2[i]); + else + for (i = 0; i < B43_TAB_NOISEA3_SIZE; i++) + b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i, b43_tab_noisea3[i]); + } else { + if (phy->rev == 1) + for (i = 0; i < B43_TAB_NOISEG1_SIZE; i++) + b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i, b43_tab_noiseg1[i]); + else + for (i = 0; i < B43_TAB_NOISEG2_SIZE; i++) + b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i, b43_tab_noiseg2[i]); + } +} + +static void b43_wa_rt(struct b43_wldev *dev) /* Rotor table */ +{ + int i; + + for (i = 0; i < B43_TAB_ROTOR_SIZE; i++) + b43_ofdmtab_write32(dev, B43_OFDMTAB_ROTOR, i, b43_tab_rotor[i]); +} + +static void b43_write_null_nst(struct b43_wldev *dev) +{ + int i; + + for (i = 0; i < B43_TAB_NOISESCALE_SIZE; i++) + b43_ofdmtab_write16(dev, B43_OFDMTAB_NOISESCALE, i, 0); +} + +static void b43_write_nst(struct b43_wldev *dev, const u16 *nst) +{ + int i; + + for (i = 0; i < B43_TAB_NOISESCALE_SIZE; i++) + b43_ofdmtab_write16(dev, B43_OFDMTAB_NOISESCALE, i, nst[i]); +} + +static void b43_wa_nst(struct b43_wldev *dev) /* Noise scale table */ +{ + struct b43_phy *phy = &dev->phy; + + if (phy->type == B43_PHYTYPE_A) { + if (phy->rev <= 1) + b43_write_null_nst(dev); + else if (phy->rev == 2) + b43_write_nst(dev, b43_tab_noisescalea2); + else if (phy->rev == 3) + b43_write_nst(dev, b43_tab_noisescalea3); + else + b43_write_nst(dev, b43_tab_noisescaleg3); + } else { + if (phy->rev >= 6) { + if (b43_phy_read(dev, B43_PHY_ENCORE) & B43_PHY_ENCORE_EN) + b43_write_nst(dev, b43_tab_noisescaleg3); + else + b43_write_nst(dev, b43_tab_noisescaleg2); + } else { + b43_write_nst(dev, b43_tab_noisescaleg1); + } + } +} + +static void b43_wa_art(struct b43_wldev *dev) /* ADV retard table */ +{ + int i; + + for (i = 0; i < B43_TAB_RETARD_SIZE; i++) + b43_ofdmtab_write32(dev, B43_OFDMTAB_ADVRETARD, + i, b43_tab_retard[i]); +} + +static void b43_wa_txlna_gain(struct b43_wldev *dev) +{ + b43_ofdmtab_write16(dev, B43_OFDMTAB_DC, 13, 0x0000); +} + +static void b43_wa_crs_reset(struct b43_wldev *dev) +{ + b43_phy_write(dev, 0x002C, 0x0064); +} + +static void b43_wa_2060txlna_gain(struct b43_wldev *dev) +{ + b43_hf_write(dev, b43_hf_read(dev) | + B43_HF_2060W); +} + +static void b43_wa_lms(struct b43_wldev *dev) +{ + b43_phy_maskset(dev, 0x0055, 0xFFC0, 0x0004); +} + +static void b43_wa_mixedsignal(struct b43_wldev *dev) +{ + b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 1, 3); +} + +static void b43_wa_msst(struct b43_wldev *dev) /* Min sigma square table */ +{ + struct b43_phy *phy = &dev->phy; + int i; + const u16 *tab; + + if (phy->type == B43_PHYTYPE_A) { + tab = b43_tab_sigmasqr1; + } else if (phy->type == B43_PHYTYPE_G) { + tab = b43_tab_sigmasqr2; + } else { + B43_WARN_ON(1); + return; + } + + for (i = 0; i < B43_TAB_SIGMASQR_SIZE; i++) { + b43_ofdmtab_write16(dev, B43_OFDMTAB_MINSIGSQ, + i, tab[i]); + } +} + +static void b43_wa_iqadc(struct b43_wldev *dev) +{ + if (dev->phy.analog == 4) + b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 0, + b43_ofdmtab_read16(dev, B43_OFDMTAB_DAC, 0) & ~0xF000); +} + +static void b43_wa_crs_ed(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + + if (phy->rev == 1) { + b43_phy_write(dev, B43_PHY_CRSTHRES1_R1, 0x4F19); + } else if (phy->rev == 2) { + b43_phy_write(dev, B43_PHY_CRSTHRES1, 0x1861); + b43_phy_write(dev, B43_PHY_CRSTHRES2, 0x0271); + b43_phy_set(dev, B43_PHY_ANTDWELL, 0x0800); + } else { + b43_phy_write(dev, B43_PHY_CRSTHRES1, 0x0098); + b43_phy_write(dev, B43_PHY_CRSTHRES2, 0x0070); + b43_phy_write(dev, B43_PHY_OFDM(0xC9), 0x0080); + b43_phy_set(dev, B43_PHY_ANTDWELL, 0x0800); + } +} + +static void b43_wa_crs_thr(struct b43_wldev *dev) +{ + b43_phy_maskset(dev, B43_PHY_CRS0, ~0x03C0, 0xD000); +} + +static void b43_wa_crs_blank(struct b43_wldev *dev) +{ + b43_phy_write(dev, B43_PHY_OFDM(0x2C), 0x005A); +} + +static void b43_wa_cck_shiftbits(struct b43_wldev *dev) +{ + b43_phy_write(dev, B43_PHY_CCKSHIFTBITS, 0x0026); +} + +static void b43_wa_wrssi_offset(struct b43_wldev *dev) +{ + int i; + + if (dev->phy.rev == 1) { + for (i = 0; i < 16; i++) { + b43_ofdmtab_write16(dev, B43_OFDMTAB_WRSSI_R1, + i, 0x0020); + } + } else { + for (i = 0; i < 32; i++) { + b43_ofdmtab_write16(dev, B43_OFDMTAB_WRSSI, + i, 0x0820); + } + } +} + +static void b43_wa_txpuoff_rxpuon(struct b43_wldev *dev) +{ + b43_ofdmtab_write16(dev, B43_OFDMTAB_UNKNOWN_0F, 2, 15); + b43_ofdmtab_write16(dev, B43_OFDMTAB_UNKNOWN_0F, 3, 20); +} + +static void b43_wa_altagc(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + + if (phy->rev == 1) { + b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1_R1, 0, 254); + b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1_R1, 1, 13); + b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1_R1, 2, 19); + b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1_R1, 3, 25); + b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, 0, 0x2710); + b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, 1, 0x9B83); + b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, 2, 0x9B83); + b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, 3, 0x0F8D); + b43_phy_write(dev, B43_PHY_LMS, 4); + } else { + b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 0, 254); + b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 1, 13); + b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 2, 19); + b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 3, 25); + } + + b43_phy_maskset(dev, B43_PHY_CCKSHIFTBITS_WA, (u16)~0xFF00, 0x5700); + b43_phy_maskset(dev, B43_PHY_OFDM(0x1A), ~0x007F, 0x000F); + b43_phy_maskset(dev, B43_PHY_OFDM(0x1A), ~0x3F80, 0x2B80); + b43_phy_maskset(dev, B43_PHY_ANTWRSETT, 0xF0FF, 0x0300); + b43_radio_set(dev, 0x7A, 0x0008); + b43_phy_maskset(dev, B43_PHY_N1P1GAIN, ~0x000F, 0x0008); + b43_phy_maskset(dev, B43_PHY_P1P2GAIN, ~0x0F00, 0x0600); + b43_phy_maskset(dev, B43_PHY_N1N2GAIN, ~0x0F00, 0x0700); + b43_phy_maskset(dev, B43_PHY_N1P1GAIN, ~0x0F00, 0x0100); + if (phy->rev == 1) { + b43_phy_maskset(dev, B43_PHY_N1N2GAIN, ~0x000F, 0x0007); + } + b43_phy_maskset(dev, B43_PHY_OFDM(0x88), ~0x00FF, 0x001C); + b43_phy_maskset(dev, B43_PHY_OFDM(0x88), ~0x3F00, 0x0200); + b43_phy_maskset(dev, B43_PHY_OFDM(0x96), ~0x00FF, 0x001C); + b43_phy_maskset(dev, B43_PHY_OFDM(0x89), ~0x00FF, 0x0020); + b43_phy_maskset(dev, B43_PHY_OFDM(0x89), ~0x3F00, 0x0200); + b43_phy_maskset(dev, B43_PHY_OFDM(0x82), ~0x00FF, 0x002E); + b43_phy_maskset(dev, B43_PHY_OFDM(0x96), (u16)~0xFF00, 0x1A00); + b43_phy_maskset(dev, B43_PHY_OFDM(0x81), ~0x00FF, 0x0028); + b43_phy_maskset(dev, B43_PHY_OFDM(0x81), (u16)~0xFF00, 0x2C00); + if (phy->rev == 1) { + b43_phy_write(dev, B43_PHY_PEAK_COUNT, 0x092B); + b43_phy_maskset(dev, B43_PHY_OFDM(0x1B), ~0x001E, 0x0002); + } else { + b43_phy_mask(dev, B43_PHY_OFDM(0x1B), ~0x001E); + b43_phy_write(dev, B43_PHY_OFDM(0x1F), 0x287A); + b43_phy_maskset(dev, B43_PHY_LPFGAINCTL, ~0x000F, 0x0004); + if (phy->rev >= 6) { + b43_phy_write(dev, B43_PHY_OFDM(0x22), 0x287A); + b43_phy_maskset(dev, B43_PHY_LPFGAINCTL, (u16)~0xF000, 0x3000); + } + } + b43_phy_maskset(dev, B43_PHY_DIVSRCHIDX, 0x8080, 0x7874); + b43_phy_write(dev, B43_PHY_OFDM(0x8E), 0x1C00); + if (phy->rev == 1) { + b43_phy_maskset(dev, B43_PHY_DIVP1P2GAIN, ~0x0F00, 0x0600); + b43_phy_write(dev, B43_PHY_OFDM(0x8B), 0x005E); + b43_phy_maskset(dev, B43_PHY_ANTWRSETT, ~0x00FF, 0x001E); + b43_phy_write(dev, B43_PHY_OFDM(0x8D), 0x0002); + b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC3_R1, 0, 0); + b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC3_R1, 1, 7); + b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC3_R1, 2, 16); + b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC3_R1, 3, 28); + } else { + b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC3, 0, 0); + b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC3, 1, 7); + b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC3, 2, 16); + b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC3, 3, 28); + } + if (phy->rev >= 6) { + b43_phy_mask(dev, B43_PHY_OFDM(0x26), ~0x0003); + b43_phy_mask(dev, B43_PHY_OFDM(0x26), ~0x1000); + } + b43_phy_read(dev, B43_PHY_VERSION_OFDM); /* Dummy read */ +} + +static void b43_wa_tr_ltov(struct b43_wldev *dev) /* TR Lookup Table Original Values */ +{ + b43_gtab_write(dev, B43_GTAB_ORIGTR, 0, 0xC480); +} + +static void b43_wa_cpll_nonpilot(struct b43_wldev *dev) +{ + b43_ofdmtab_write16(dev, B43_OFDMTAB_UNKNOWN_11, 0, 0); + b43_ofdmtab_write16(dev, B43_OFDMTAB_UNKNOWN_11, 1, 0); +} + +static void b43_wa_rssi_adc(struct b43_wldev *dev) +{ + if (dev->phy.analog == 4) + b43_phy_write(dev, 0x00DC, 0x7454); +} + +static void b43_wa_boards_a(struct b43_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + + if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM && + bus->boardinfo.type == SSB_BOARD_BU4306 && + bus->boardinfo.rev < 0x30) { + b43_phy_write(dev, 0x0010, 0xE000); + b43_phy_write(dev, 0x0013, 0x0140); + b43_phy_write(dev, 0x0014, 0x0280); + } else { + if (bus->boardinfo.type == SSB_BOARD_MP4318 && + bus->boardinfo.rev < 0x20) { + b43_phy_write(dev, 0x0013, 0x0210); + b43_phy_write(dev, 0x0014, 0x0840); + } else { + b43_phy_write(dev, 0x0013, 0x0140); + b43_phy_write(dev, 0x0014, 0x0280); + } + if (dev->phy.rev <= 4) + b43_phy_write(dev, 0x0010, 0xE000); + else + b43_phy_write(dev, 0x0010, 0x2000); + b43_ofdmtab_write16(dev, B43_OFDMTAB_DC, 1, 0x0039); + b43_ofdmtab_write16(dev, B43_OFDMTAB_UNKNOWN_APHY, 7, 0x0040); + } +} + +static void b43_wa_boards_g(struct b43_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + struct b43_phy *phy = &dev->phy; + + if (bus->boardinfo.vendor != SSB_BOARDVENDOR_BCM || + bus->boardinfo.type != SSB_BOARD_BU4306 || + bus->boardinfo.rev != 0x17) { + if (phy->rev < 2) { + b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX_R1, 1, 0x0002); + b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX_R1, 2, 0x0001); + } else { + b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX, 1, 0x0002); + b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX, 2, 0x0001); + if ((bus->sprom.boardflags_lo & B43_BFL_EXTLNA) && + (phy->rev >= 7)) { + b43_phy_mask(dev, B43_PHY_EXTG(0x11), 0xF7FF); + b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX, 0x0020, 0x0001); + b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX, 0x0021, 0x0001); + b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX, 0x0022, 0x0001); + b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX, 0x0023, 0x0000); + b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX, 0x0000, 0x0000); + b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX, 0x0003, 0x0002); + } + } + } + if (bus->sprom.boardflags_lo & B43_BFL_FEM) { + b43_phy_write(dev, B43_PHY_GTABCTL, 0x3120); + b43_phy_write(dev, B43_PHY_GTABDATA, 0xC480); + } +} + +void b43_wa_all(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + + if (phy->type == B43_PHYTYPE_A) { + switch (phy->rev) { + case 2: + b43_wa_papd(dev); + b43_wa_auxclipthr(dev); + b43_wa_afcdac(dev); + b43_wa_txdc_offset(dev); + b43_wa_initgains(dev); + b43_wa_divider(dev); + b43_wa_gt(dev); + b43_wa_rssi_lt(dev); + b43_wa_analog(dev); + b43_wa_dac(dev); + b43_wa_fft(dev); + b43_wa_nft(dev); + b43_wa_rt(dev); + b43_wa_nst(dev); + b43_wa_art(dev); + b43_wa_txlna_gain(dev); + b43_wa_crs_reset(dev); + b43_wa_2060txlna_gain(dev); + b43_wa_lms(dev); + break; + case 3: + b43_wa_papd(dev); + b43_wa_mixedsignal(dev); + b43_wa_rssi_lt(dev); + b43_wa_txdc_offset(dev); + b43_wa_initgains(dev); + b43_wa_dac(dev); + b43_wa_nft(dev); + b43_wa_nst(dev); + b43_wa_msst(dev); + b43_wa_analog(dev); + b43_wa_gt(dev); + b43_wa_txpuoff_rxpuon(dev); + b43_wa_txlna_gain(dev); + break; + case 5: + b43_wa_iqadc(dev); + case 6: + b43_wa_papd(dev); + b43_wa_rssi_lt(dev); + b43_wa_txdc_offset(dev); + b43_wa_initgains(dev); + b43_wa_dac(dev); + b43_wa_nft(dev); + b43_wa_nst(dev); + b43_wa_msst(dev); + b43_wa_analog(dev); + b43_wa_gt(dev); + b43_wa_txpuoff_rxpuon(dev); + b43_wa_txlna_gain(dev); + break; + case 7: + b43_wa_iqadc(dev); + b43_wa_papd(dev); + b43_wa_rssi_lt(dev); + b43_wa_txdc_offset(dev); + b43_wa_initgains(dev); + b43_wa_dac(dev); + b43_wa_nft(dev); + b43_wa_nst(dev); + b43_wa_msst(dev); + b43_wa_analog(dev); + b43_wa_gt(dev); + b43_wa_txpuoff_rxpuon(dev); + b43_wa_txlna_gain(dev); + b43_wa_rssi_adc(dev); + default: + B43_WARN_ON(1); + } + b43_wa_boards_a(dev); + } else if (phy->type == B43_PHYTYPE_G) { + switch (phy->rev) { + case 1://XXX review rev1 + b43_wa_crs_ed(dev); + b43_wa_crs_thr(dev); + b43_wa_crs_blank(dev); + b43_wa_cck_shiftbits(dev); + b43_wa_fft(dev); + b43_wa_nft(dev); + b43_wa_rt(dev); + b43_wa_nst(dev); + b43_wa_art(dev); + b43_wa_wrssi_offset(dev); + b43_wa_altagc(dev); + break; + case 2: + case 6: + case 7: + case 8: + case 9: + b43_wa_tr_ltov(dev); + b43_wa_crs_ed(dev); + b43_wa_rssi_lt(dev); + b43_wa_nft(dev); + b43_wa_nst(dev); + b43_wa_msst(dev); + b43_wa_wrssi_offset(dev); + b43_wa_altagc(dev); + b43_wa_analog(dev); + b43_wa_txpuoff_rxpuon(dev); + break; + default: + B43_WARN_ON(1); + } + b43_wa_boards_g(dev); + } else { /* No N PHY support so far, LP PHY is in phy_lp.c */ + B43_WARN_ON(1); + } + + b43_wa_cpll_nonpilot(dev); +} diff --git a/drivers/net/wireless/b43/wa.h b/drivers/net/wireless/b43/wa.h new file mode 100644 index 0000000..e163c5e --- /dev/null +++ b/drivers/net/wireless/b43/wa.h @@ -0,0 +1,7 @@ +#ifndef B43_WA_H_ +#define B43_WA_H_ + +void b43_wa_initgains(struct b43_wldev *dev); +void b43_wa_all(struct b43_wldev *dev); + +#endif /* B43_WA_H_ */ diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c new file mode 100644 index 0000000..eda0652 --- /dev/null +++ b/drivers/net/wireless/b43/xmit.c @@ -0,0 +1,799 @@ +/* + + Broadcom B43 wireless driver + + Transmission (TX/RX) related functions. + + Copyright (C) 2005 Martin Langer + Copyright (C) 2005 Stefano Brivio + Copyright (C) 2005, 2006 Michael Buesch + Copyright (C) 2005 Danny van Dyk + Copyright (C) 2005 Andreas Jaggi + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "xmit.h" +#include "phy_common.h" +#include "dma.h" +#include "pio.h" + + +/* Extract the bitrate index out of a CCK PLCP header. */ +static int b43_plcp_get_bitrate_idx_cck(struct b43_plcp_hdr6 *plcp) +{ + switch (plcp->raw[0]) { + case 0x0A: + return 0; + case 0x14: + return 1; + case 0x37: + return 2; + case 0x6E: + return 3; + } + return -1; +} + +/* Extract the bitrate index out of an OFDM PLCP header. */ +static int b43_plcp_get_bitrate_idx_ofdm(struct b43_plcp_hdr6 *plcp, bool aphy) +{ + int base = aphy ? 0 : 4; + + switch (plcp->raw[0] & 0xF) { + case 0xB: + return base + 0; + case 0xF: + return base + 1; + case 0xA: + return base + 2; + case 0xE: + return base + 3; + case 0x9: + return base + 4; + case 0xD: + return base + 5; + case 0x8: + return base + 6; + case 0xC: + return base + 7; + } + return -1; +} + +u8 b43_plcp_get_ratecode_cck(const u8 bitrate) +{ + switch (bitrate) { + case B43_CCK_RATE_1MB: + return 0x0A; + case B43_CCK_RATE_2MB: + return 0x14; + case B43_CCK_RATE_5MB: + return 0x37; + case B43_CCK_RATE_11MB: + return 0x6E; + } + B43_WARN_ON(1); + return 0; +} + +u8 b43_plcp_get_ratecode_ofdm(const u8 bitrate) +{ + switch (bitrate) { + case B43_OFDM_RATE_6MB: + return 0xB; + case B43_OFDM_RATE_9MB: + return 0xF; + case B43_OFDM_RATE_12MB: + return 0xA; + case B43_OFDM_RATE_18MB: + return 0xE; + case B43_OFDM_RATE_24MB: + return 0x9; + case B43_OFDM_RATE_36MB: + return 0xD; + case B43_OFDM_RATE_48MB: + return 0x8; + case B43_OFDM_RATE_54MB: + return 0xC; + } + B43_WARN_ON(1); + return 0; +} + +void b43_generate_plcp_hdr(struct b43_plcp_hdr4 *plcp, + const u16 octets, const u8 bitrate) +{ + __u8 *raw = plcp->raw; + + if (b43_is_ofdm_rate(bitrate)) { + u32 d; + + d = b43_plcp_get_ratecode_ofdm(bitrate); + B43_WARN_ON(octets & 0xF000); + d |= (octets << 5); + plcp->data = cpu_to_le32(d); + } else { + u32 plen; + + plen = octets * 16 / bitrate; + if ((octets * 16 % bitrate) > 0) { + plen++; + if ((bitrate == B43_CCK_RATE_11MB) + && ((octets * 8 % 11) < 4)) { + raw[1] = 0x84; + } else + raw[1] = 0x04; + } else + raw[1] = 0x04; + plcp->data |= cpu_to_le32(plen << 16); + raw[0] = b43_plcp_get_ratecode_cck(bitrate); + } +} + +static u8 b43_calc_fallback_rate(u8 bitrate) +{ + switch (bitrate) { + case B43_CCK_RATE_1MB: + return B43_CCK_RATE_1MB; + case B43_CCK_RATE_2MB: + return B43_CCK_RATE_1MB; + case B43_CCK_RATE_5MB: + return B43_CCK_RATE_2MB; + case B43_CCK_RATE_11MB: + return B43_CCK_RATE_5MB; + case B43_OFDM_RATE_6MB: + return B43_CCK_RATE_5MB; + case B43_OFDM_RATE_9MB: + return B43_OFDM_RATE_6MB; + case B43_OFDM_RATE_12MB: + return B43_OFDM_RATE_9MB; + case B43_OFDM_RATE_18MB: + return B43_OFDM_RATE_12MB; + case B43_OFDM_RATE_24MB: + return B43_OFDM_RATE_18MB; + case B43_OFDM_RATE_36MB: + return B43_OFDM_RATE_24MB; + case B43_OFDM_RATE_48MB: + return B43_OFDM_RATE_36MB; + case B43_OFDM_RATE_54MB: + return B43_OFDM_RATE_48MB; + } + B43_WARN_ON(1); + return 0; +} + +/* Generate a TX data header. */ +int b43_generate_txhdr(struct b43_wldev *dev, + u8 *_txhdr, + struct sk_buff *skb_frag, + struct ieee80211_tx_info *info, + u16 cookie) +{ + const unsigned char *fragment_data = skb_frag->data; + unsigned int fragment_len = skb_frag->len; + struct b43_txhdr *txhdr = (struct b43_txhdr *)_txhdr; + const struct b43_phy *phy = &dev->phy; + const struct ieee80211_hdr *wlhdr = + (const struct ieee80211_hdr *)fragment_data; + int use_encryption = !!info->control.hw_key; + __le16 fctl = wlhdr->frame_control; + struct ieee80211_rate *fbrate; + u8 rate, rate_fb; + int rate_ofdm, rate_fb_ofdm; + unsigned int plcp_fragment_len; + u32 mac_ctl = 0; + u16 phy_ctl = 0; + u8 extra_ft = 0; + struct ieee80211_rate *txrate; + struct ieee80211_tx_rate *rates; + + memset(txhdr, 0, sizeof(*txhdr)); + + txrate = ieee80211_get_tx_rate(dev->wl->hw, info); + rate = txrate ? txrate->hw_value : B43_CCK_RATE_1MB; + rate_ofdm = b43_is_ofdm_rate(rate); + fbrate = ieee80211_get_alt_retry_rate(dev->wl->hw, info, 0) ? : txrate; + rate_fb = fbrate->hw_value; + rate_fb_ofdm = b43_is_ofdm_rate(rate_fb); + + if (rate_ofdm) + txhdr->phy_rate = b43_plcp_get_ratecode_ofdm(rate); + else + txhdr->phy_rate = b43_plcp_get_ratecode_cck(rate); + txhdr->mac_frame_ctl = wlhdr->frame_control; + memcpy(txhdr->tx_receiver, wlhdr->addr1, 6); + + /* Calculate duration for fallback rate */ + if ((rate_fb == rate) || + (wlhdr->duration_id & cpu_to_le16(0x8000)) || + (wlhdr->duration_id == cpu_to_le16(0))) { + /* If the fallback rate equals the normal rate or the + * dur_id field contains an AID, CFP magic or 0, + * use the original dur_id field. */ + txhdr->dur_fb = wlhdr->duration_id; + } else { + txhdr->dur_fb = ieee80211_generic_frame_duration( + dev->wl->hw, info->control.vif, fragment_len, fbrate); + } + + plcp_fragment_len = fragment_len + FCS_LEN; + if (use_encryption) { + u8 key_idx = info->control.hw_key->hw_key_idx; + struct b43_key *key; + int wlhdr_len; + size_t iv_len; + + B43_WARN_ON(key_idx >= ARRAY_SIZE(dev->key)); + key = &(dev->key[key_idx]); + + if (unlikely(!key->keyconf)) { + /* This key is invalid. This might only happen + * in a short timeframe after machine resume before + * we were able to reconfigure keys. + * Drop this packet completely. Do not transmit it + * unencrypted to avoid leaking information. */ + return -ENOKEY; + } + + /* Hardware appends ICV. */ + plcp_fragment_len += info->control.hw_key->icv_len; + + key_idx = b43_kidx_to_fw(dev, key_idx); + mac_ctl |= (key_idx << B43_TXH_MAC_KEYIDX_SHIFT) & + B43_TXH_MAC_KEYIDX; + mac_ctl |= (key->algorithm << B43_TXH_MAC_KEYALG_SHIFT) & + B43_TXH_MAC_KEYALG; + wlhdr_len = ieee80211_hdrlen(fctl); + if (key->algorithm == B43_SEC_ALGO_TKIP) { + u16 phase1key[5]; + int i; + /* we give the phase1key and iv16 here, the key is stored in + * shm. With that the hardware can do phase 2 and encryption. + */ + ieee80211_get_tkip_key(info->control.hw_key, skb_frag, + IEEE80211_TKIP_P1_KEY, (u8*)phase1key); + /* phase1key is in host endian. Copy to little-endian txhdr->iv. */ + for (i = 0; i < 5; i++) { + txhdr->iv[i * 2 + 0] = phase1key[i]; + txhdr->iv[i * 2 + 1] = phase1key[i] >> 8; + } + /* iv16 */ + memcpy(txhdr->iv + 10, ((u8 *) wlhdr) + wlhdr_len, 3); + } else { + iv_len = min((size_t) info->control.hw_key->iv_len, + ARRAY_SIZE(txhdr->iv)); + memcpy(txhdr->iv, ((u8 *) wlhdr) + wlhdr_len, iv_len); + } + } + if (b43_is_old_txhdr_format(dev)) { + b43_generate_plcp_hdr((struct b43_plcp_hdr4 *)(&txhdr->old_format.plcp), + plcp_fragment_len, rate); + } else { + b43_generate_plcp_hdr((struct b43_plcp_hdr4 *)(&txhdr->new_format.plcp), + plcp_fragment_len, rate); + } + b43_generate_plcp_hdr((struct b43_plcp_hdr4 *)(&txhdr->plcp_fb), + plcp_fragment_len, rate_fb); + + /* Extra Frame Types */ + if (rate_fb_ofdm) + extra_ft |= B43_TXH_EFT_FB_OFDM; + else + extra_ft |= B43_TXH_EFT_FB_CCK; + + /* Set channel radio code. Note that the micrcode ORs 0x100 to + * this value before comparing it to the value in SHM, if this + * is a 5Ghz packet. + */ + txhdr->chan_radio_code = phy->channel; + + /* PHY TX Control word */ + if (rate_ofdm) + phy_ctl |= B43_TXH_PHY_ENC_OFDM; + else + phy_ctl |= B43_TXH_PHY_ENC_CCK; + if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) + phy_ctl |= B43_TXH_PHY_SHORTPRMBL; + + switch (b43_ieee80211_antenna_sanitize(dev, info->antenna_sel_tx)) { + case 0: /* Default */ + phy_ctl |= B43_TXH_PHY_ANT01AUTO; + break; + case 1: /* Antenna 0 */ + phy_ctl |= B43_TXH_PHY_ANT0; + break; + case 2: /* Antenna 1 */ + phy_ctl |= B43_TXH_PHY_ANT1; + break; + case 3: /* Antenna 2 */ + phy_ctl |= B43_TXH_PHY_ANT2; + break; + case 4: /* Antenna 3 */ + phy_ctl |= B43_TXH_PHY_ANT3; + break; + default: + B43_WARN_ON(1); + } + + rates = info->control.rates; + /* MAC control */ + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) + mac_ctl |= B43_TXH_MAC_ACK; + /* use hardware sequence counter as the non-TID counter */ + if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) + mac_ctl |= B43_TXH_MAC_HWSEQ; + if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) + mac_ctl |= B43_TXH_MAC_STMSDU; + if (phy->type == B43_PHYTYPE_A) + mac_ctl |= B43_TXH_MAC_5GHZ; + + /* Overwrite rates[0].count to make the retry calculation + * in the tx status easier. need the actual retry limit to + * detect whether the fallback rate was used. + */ + if ((rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) || + (rates[0].count <= dev->wl->hw->conf.long_frame_max_tx_count)) { + rates[0].count = dev->wl->hw->conf.long_frame_max_tx_count; + mac_ctl |= B43_TXH_MAC_LONGFRAME; + } else { + rates[0].count = dev->wl->hw->conf.short_frame_max_tx_count; + } + + /* Generate the RTS or CTS-to-self frame */ + if ((rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) || + (rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)) { + unsigned int len; + struct ieee80211_hdr *hdr; + int rts_rate, rts_rate_fb; + int rts_rate_ofdm, rts_rate_fb_ofdm; + struct b43_plcp_hdr6 *plcp; + struct ieee80211_rate *rts_cts_rate; + + rts_cts_rate = ieee80211_get_rts_cts_rate(dev->wl->hw, info); + + rts_rate = rts_cts_rate ? rts_cts_rate->hw_value : B43_CCK_RATE_1MB; + rts_rate_ofdm = b43_is_ofdm_rate(rts_rate); + rts_rate_fb = b43_calc_fallback_rate(rts_rate); + rts_rate_fb_ofdm = b43_is_ofdm_rate(rts_rate_fb); + + if (rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { + struct ieee80211_cts *cts; + + if (b43_is_old_txhdr_format(dev)) { + cts = (struct ieee80211_cts *) + (txhdr->old_format.rts_frame); + } else { + cts = (struct ieee80211_cts *) + (txhdr->new_format.rts_frame); + } + ieee80211_ctstoself_get(dev->wl->hw, info->control.vif, + fragment_data, fragment_len, + info, cts); + mac_ctl |= B43_TXH_MAC_SENDCTS; + len = sizeof(struct ieee80211_cts); + } else { + struct ieee80211_rts *rts; + + if (b43_is_old_txhdr_format(dev)) { + rts = (struct ieee80211_rts *) + (txhdr->old_format.rts_frame); + } else { + rts = (struct ieee80211_rts *) + (txhdr->new_format.rts_frame); + } + ieee80211_rts_get(dev->wl->hw, info->control.vif, + fragment_data, fragment_len, + info, rts); + mac_ctl |= B43_TXH_MAC_SENDRTS; + len = sizeof(struct ieee80211_rts); + } + len += FCS_LEN; + + /* Generate the PLCP headers for the RTS/CTS frame */ + if (b43_is_old_txhdr_format(dev)) + plcp = &txhdr->old_format.rts_plcp; + else + plcp = &txhdr->new_format.rts_plcp; + b43_generate_plcp_hdr((struct b43_plcp_hdr4 *)plcp, + len, rts_rate); + plcp = &txhdr->rts_plcp_fb; + b43_generate_plcp_hdr((struct b43_plcp_hdr4 *)plcp, + len, rts_rate_fb); + + if (b43_is_old_txhdr_format(dev)) { + hdr = (struct ieee80211_hdr *) + (&txhdr->old_format.rts_frame); + } else { + hdr = (struct ieee80211_hdr *) + (&txhdr->new_format.rts_frame); + } + txhdr->rts_dur_fb = hdr->duration_id; + + if (rts_rate_ofdm) { + extra_ft |= B43_TXH_EFT_RTS_OFDM; + txhdr->phy_rate_rts = + b43_plcp_get_ratecode_ofdm(rts_rate); + } else { + extra_ft |= B43_TXH_EFT_RTS_CCK; + txhdr->phy_rate_rts = + b43_plcp_get_ratecode_cck(rts_rate); + } + if (rts_rate_fb_ofdm) + extra_ft |= B43_TXH_EFT_RTSFB_OFDM; + else + extra_ft |= B43_TXH_EFT_RTSFB_CCK; + } + + /* Magic cookie */ + if (b43_is_old_txhdr_format(dev)) + txhdr->old_format.cookie = cpu_to_le16(cookie); + else + txhdr->new_format.cookie = cpu_to_le16(cookie); + + /* Apply the bitfields */ + txhdr->mac_ctl = cpu_to_le32(mac_ctl); + txhdr->phy_ctl = cpu_to_le16(phy_ctl); + txhdr->extra_ft = extra_ft; + + return 0; +} + +static s8 b43_rssi_postprocess(struct b43_wldev *dev, + u8 in_rssi, int ofdm, + int adjust_2053, int adjust_2050) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_g *gphy = phy->g; + s32 tmp; + + switch (phy->radio_ver) { + case 0x2050: + if (ofdm) { + tmp = in_rssi; + if (tmp > 127) + tmp -= 256; + tmp *= 73; + tmp /= 64; + if (adjust_2050) + tmp += 25; + else + tmp -= 3; + } else { + if (dev->dev->bus->sprom. + boardflags_lo & B43_BFL_RSSI) { + if (in_rssi > 63) + in_rssi = 63; + B43_WARN_ON(phy->type != B43_PHYTYPE_G); + tmp = gphy->nrssi_lt[in_rssi]; + tmp = 31 - tmp; + tmp *= -131; + tmp /= 128; + tmp -= 57; + } else { + tmp = in_rssi; + tmp = 31 - tmp; + tmp *= -149; + tmp /= 128; + tmp -= 68; + } + if (phy->type == B43_PHYTYPE_G && adjust_2050) + tmp += 25; + } + break; + case 0x2060: + if (in_rssi > 127) + tmp = in_rssi - 256; + else + tmp = in_rssi; + break; + default: + tmp = in_rssi; + tmp -= 11; + tmp *= 103; + tmp /= 64; + if (adjust_2053) + tmp -= 109; + else + tmp -= 83; + } + + return (s8) tmp; +} + +//TODO +#if 0 +static s8 b43_rssinoise_postprocess(struct b43_wldev *dev, u8 in_rssi) +{ + struct b43_phy *phy = &dev->phy; + s8 ret; + + if (phy->type == B43_PHYTYPE_A) { + //TODO: Incomplete specs. + ret = 0; + } else + ret = b43_rssi_postprocess(dev, in_rssi, 0, 1, 1); + + return ret; +} +#endif + +void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr) +{ + struct ieee80211_rx_status status; + struct b43_plcp_hdr6 *plcp; + struct ieee80211_hdr *wlhdr; + const struct b43_rxhdr_fw4 *rxhdr = _rxhdr; + __le16 fctl; + u16 phystat0, phystat3, chanstat, mactime; + u32 macstat; + u16 chanid; + u16 phytype; + int padding; + + memset(&status, 0, sizeof(status)); + + /* Get metadata about the frame from the header. */ + phystat0 = le16_to_cpu(rxhdr->phy_status0); + phystat3 = le16_to_cpu(rxhdr->phy_status3); + macstat = le32_to_cpu(rxhdr->mac_status); + mactime = le16_to_cpu(rxhdr->mac_time); + chanstat = le16_to_cpu(rxhdr->channel); + phytype = chanstat & B43_RX_CHAN_PHYTYPE; + + if (unlikely(macstat & B43_RX_MAC_FCSERR)) { + dev->wl->ieee_stats.dot11FCSErrorCount++; + status.flag |= RX_FLAG_FAILED_FCS_CRC; + } + if (unlikely(phystat0 & (B43_RX_PHYST0_PLCPHCF | B43_RX_PHYST0_PLCPFV))) + status.flag |= RX_FLAG_FAILED_PLCP_CRC; + if (phystat0 & B43_RX_PHYST0_SHORTPRMBL) + status.flag |= RX_FLAG_SHORTPRE; + if (macstat & B43_RX_MAC_DECERR) { + /* Decryption with the given key failed. + * Drop the packet. We also won't be able to decrypt it with + * the key in software. */ + goto drop; + } + + /* Skip PLCP and padding */ + padding = (macstat & B43_RX_MAC_PADDING) ? 2 : 0; + if (unlikely(skb->len < (sizeof(struct b43_plcp_hdr6) + padding))) { + b43dbg(dev->wl, "RX: Packet size underrun (1)\n"); + goto drop; + } + plcp = (struct b43_plcp_hdr6 *)(skb->data + padding); + skb_pull(skb, sizeof(struct b43_plcp_hdr6) + padding); + /* The skb contains the Wireless Header + payload data now */ + if (unlikely(skb->len < (2 + 2 + 6 /*minimum hdr */ + FCS_LEN))) { + b43dbg(dev->wl, "RX: Packet size underrun (2)\n"); + goto drop; + } + wlhdr = (struct ieee80211_hdr *)(skb->data); + fctl = wlhdr->frame_control; + + if (macstat & B43_RX_MAC_DEC) { + unsigned int keyidx; + int wlhdr_len; + + keyidx = ((macstat & B43_RX_MAC_KEYIDX) + >> B43_RX_MAC_KEYIDX_SHIFT); + /* We must adjust the key index here. We want the "physical" + * key index, but the ucode passed it slightly different. + */ + keyidx = b43_kidx_to_raw(dev, keyidx); + B43_WARN_ON(keyidx >= ARRAY_SIZE(dev->key)); + + if (dev->key[keyidx].algorithm != B43_SEC_ALGO_NONE) { + wlhdr_len = ieee80211_hdrlen(fctl); + if (unlikely(skb->len < (wlhdr_len + 3))) { + b43dbg(dev->wl, + "RX: Packet size underrun (3)\n"); + goto drop; + } + status.flag |= RX_FLAG_DECRYPTED; + } + } + + /* Link quality statistics */ + status.noise = dev->stats.link_noise; + if ((chanstat & B43_RX_CHAN_PHYTYPE) == B43_PHYTYPE_N) { +// s8 rssi = max(rxhdr->power0, rxhdr->power1); + //TODO: Find out what the rssi value is (dBm or percentage?) + // and also find out what the maximum possible value is. + // Fill status.ssi and status.signal fields. + } else { + status.signal = b43_rssi_postprocess(dev, rxhdr->jssi, + (phystat0 & B43_RX_PHYST0_OFDM), + (phystat0 & B43_RX_PHYST0_GAINCTL), + (phystat3 & B43_RX_PHYST3_TRSTATE)); + } + + if (phystat0 & B43_RX_PHYST0_OFDM) + status.rate_idx = b43_plcp_get_bitrate_idx_ofdm(plcp, + phytype == B43_PHYTYPE_A); + else + status.rate_idx = b43_plcp_get_bitrate_idx_cck(plcp); + if (unlikely(status.rate_idx == -1)) { + /* PLCP seems to be corrupted. + * Drop the frame, if we are not interested in corrupted frames. */ + if (!(dev->wl->filter_flags & FIF_PLCPFAIL)) + goto drop; + } + status.antenna = !!(phystat0 & B43_RX_PHYST0_ANT); + + /* + * All frames on monitor interfaces and beacons always need a full + * 64-bit timestamp. Monitor interfaces need it for diagnostic + * purposes and beacons for IBSS merging. + * This code assumes we get to process the packet within 16 bits + * of timestamp, i.e. about 65 milliseconds after the PHY received + * the first symbol. + */ + if (ieee80211_is_beacon(fctl) || dev->wl->radiotap_enabled) { + u16 low_mactime_now; + + b43_tsf_read(dev, &status.mactime); + low_mactime_now = status.mactime; + status.mactime = status.mactime & ~0xFFFFULL; + status.mactime += mactime; + if (low_mactime_now <= mactime) + status.mactime -= 0x10000; + status.flag |= RX_FLAG_TSFT; + } + + chanid = (chanstat & B43_RX_CHAN_ID) >> B43_RX_CHAN_ID_SHIFT; + switch (chanstat & B43_RX_CHAN_PHYTYPE) { + case B43_PHYTYPE_A: + status.band = IEEE80211_BAND_5GHZ; + B43_WARN_ON(1); + /* FIXME: We don't really know which value the "chanid" contains. + * So the following assignment might be wrong. */ + status.freq = b43_channel_to_freq_5ghz(chanid); + break; + case B43_PHYTYPE_G: + status.band = IEEE80211_BAND_2GHZ; + /* chanid is the radio channel cookie value as used + * to tune the radio. */ + status.freq = chanid + 2400; + break; + case B43_PHYTYPE_N: + case B43_PHYTYPE_LP: + /* chanid is the SHM channel cookie. Which is the plain + * channel number in b43. */ + if (chanstat & B43_RX_CHAN_5GHZ) { + status.band = IEEE80211_BAND_5GHZ; + status.freq = b43_freq_to_channel_5ghz(chanid); + } else { + status.band = IEEE80211_BAND_2GHZ; + status.freq = b43_freq_to_channel_2ghz(chanid); + } + break; + default: + B43_WARN_ON(1); + goto drop; + } + + memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); + ieee80211_rx_ni(dev->wl->hw, skb); + +#if B43_DEBUG + dev->rx_count++; +#endif + return; +drop: + b43dbg(dev->wl, "RX: Packet dropped\n"); + dev_kfree_skb_any(skb); +} + +void b43_handle_txstatus(struct b43_wldev *dev, + const struct b43_txstatus *status) +{ + b43_debugfs_log_txstat(dev, status); + + if (status->intermediate) + return; + if (status->for_ampdu) + return; + if (!status->acked) + dev->wl->ieee_stats.dot11ACKFailureCount++; + if (status->rts_count) { + if (status->rts_count == 0xF) //FIXME + dev->wl->ieee_stats.dot11RTSFailureCount++; + else + dev->wl->ieee_stats.dot11RTSSuccessCount++; + } + + if (b43_using_pio_transfers(dev)) + b43_pio_handle_txstatus(dev, status); + else + b43_dma_handle_txstatus(dev, status); + + b43_phy_txpower_check(dev, 0); +} + +/* Fill out the mac80211 TXstatus report based on the b43-specific + * txstatus report data. This returns a boolean whether the frame was + * successfully transmitted. */ +bool b43_fill_txstatus_report(struct b43_wldev *dev, + struct ieee80211_tx_info *report, + const struct b43_txstatus *status) +{ + bool frame_success = 1; + int retry_limit; + + /* preserve the confiured retry limit before clearing the status + * The xmit function has overwritten the rc's value with the actual + * retry limit done by the hardware */ + retry_limit = report->status.rates[0].count; + ieee80211_tx_info_clear_status(report); + + if (status->acked) { + /* The frame was ACKed. */ + report->flags |= IEEE80211_TX_STAT_ACK; + } else { + /* The frame was not ACKed... */ + if (!(report->flags & IEEE80211_TX_CTL_NO_ACK)) { + /* ...but we expected an ACK. */ + frame_success = 0; + } + } + if (status->frame_count == 0) { + /* The frame was not transmitted at all. */ + report->status.rates[0].count = 0; + } else if (status->rts_count > dev->wl->hw->conf.short_frame_max_tx_count) { + /* + * If the short retries (RTS, not data frame) have exceeded + * the limit, the hw will not have tried the selected rate, + * but will have used the fallback rate instead. + * Don't let the rate control count attempts for the selected + * rate in this case, otherwise the statistics will be off. + */ + report->status.rates[0].count = 0; + report->status.rates[1].count = status->frame_count; + } else { + if (status->frame_count > retry_limit) { + report->status.rates[0].count = retry_limit; + report->status.rates[1].count = status->frame_count - + retry_limit; + + } else { + report->status.rates[0].count = status->frame_count; + report->status.rates[1].idx = -1; + } + } + + return frame_success; +} + +/* Stop any TX operation on the device (suspend the hardware queues) */ +void b43_tx_suspend(struct b43_wldev *dev) +{ + if (b43_using_pio_transfers(dev)) + b43_pio_tx_suspend(dev); + else + b43_dma_tx_suspend(dev); +} + +/* Resume any TX operation on the device (resume the hardware queues) */ +void b43_tx_resume(struct b43_wldev *dev) +{ + if (b43_using_pio_transfers(dev)) + b43_pio_tx_resume(dev); + else + b43_dma_tx_resume(dev); +} diff --git a/drivers/net/wireless/b43/xmit.h b/drivers/net/wireless/b43/xmit.h new file mode 100644 index 0000000..d23ff9f --- /dev/null +++ b/drivers/net/wireless/b43/xmit.h @@ -0,0 +1,354 @@ +#ifndef B43_XMIT_H_ +#define B43_XMIT_H_ + +#include "main.h" +#include + + +#define _b43_declare_plcp_hdr(size) \ + struct b43_plcp_hdr##size { \ + union { \ + __le32 data; \ + __u8 raw[size]; \ + } __attribute__((__packed__)); \ + } __attribute__((__packed__)) + +/* struct b43_plcp_hdr4 */ +_b43_declare_plcp_hdr(4); +/* struct b43_plcp_hdr6 */ +_b43_declare_plcp_hdr(6); + +#undef _b43_declare_plcp_hdr + +/* TX header for v4 firmware */ +struct b43_txhdr { + __le32 mac_ctl; /* MAC TX control */ + __le16 mac_frame_ctl; /* Copy of the FrameControl field */ + __le16 tx_fes_time_norm; /* TX FES Time Normal */ + __le16 phy_ctl; /* PHY TX control */ + __le16 phy_ctl1; /* PHY TX control word 1 */ + __le16 phy_ctl1_fb; /* PHY TX control word 1 for fallback rates */ + __le16 phy_ctl1_rts; /* PHY TX control word 1 RTS */ + __le16 phy_ctl1_rts_fb; /* PHY TX control word 1 RTS for fallback rates */ + __u8 phy_rate; /* PHY rate */ + __u8 phy_rate_rts; /* PHY rate for RTS/CTS */ + __u8 extra_ft; /* Extra Frame Types */ + __u8 chan_radio_code; /* Channel Radio Code */ + __u8 iv[16]; /* Encryption IV */ + __u8 tx_receiver[6]; /* TX Frame Receiver address */ + __le16 tx_fes_time_fb; /* TX FES Time Fallback */ + struct b43_plcp_hdr6 rts_plcp_fb; /* RTS fallback PLCP header */ + __le16 rts_dur_fb; /* RTS fallback duration */ + struct b43_plcp_hdr6 plcp_fb; /* Fallback PLCP header */ + __le16 dur_fb; /* Fallback duration */ + __le16 mimo_modelen; /* MIMO mode length */ + __le16 mimo_ratelen_fb; /* MIMO fallback rate length */ + __le32 timeout; /* Timeout */ + + union { + /* The new r410 format. */ + struct { + __le16 mimo_antenna; /* MIMO antenna select */ + __le16 preload_size; /* Preload size */ + PAD_BYTES(2); + __le16 cookie; /* TX frame cookie */ + __le16 tx_status; /* TX status */ + struct b43_plcp_hdr6 rts_plcp; /* RTS PLCP header */ + __u8 rts_frame[16]; /* The RTS frame (if used) */ + PAD_BYTES(2); + struct b43_plcp_hdr6 plcp; /* Main PLCP header */ + } new_format __attribute__ ((__packed__)); + + /* The old r351 format. */ + struct { + PAD_BYTES(2); + __le16 cookie; /* TX frame cookie */ + __le16 tx_status; /* TX status */ + struct b43_plcp_hdr6 rts_plcp; /* RTS PLCP header */ + __u8 rts_frame[16]; /* The RTS frame (if used) */ + PAD_BYTES(2); + struct b43_plcp_hdr6 plcp; /* Main PLCP header */ + } old_format __attribute__ ((__packed__)); + + } __attribute__ ((__packed__)); +} __attribute__ ((__packed__)); + +/* MAC TX control */ +#define B43_TXH_MAC_USEFBR 0x10000000 /* Use fallback rate for this AMPDU */ +#define B43_TXH_MAC_KEYIDX 0x0FF00000 /* Security key index */ +#define B43_TXH_MAC_KEYIDX_SHIFT 20 +#define B43_TXH_MAC_KEYALG 0x00070000 /* Security key algorithm */ +#define B43_TXH_MAC_KEYALG_SHIFT 16 +#define B43_TXH_MAC_AMIC 0x00008000 /* AMIC */ +#define B43_TXH_MAC_RIFS 0x00004000 /* Use RIFS */ +#define B43_TXH_MAC_LIFETIME 0x00002000 /* Lifetime */ +#define B43_TXH_MAC_FRAMEBURST 0x00001000 /* Frameburst */ +#define B43_TXH_MAC_SENDCTS 0x00000800 /* Send CTS-to-self */ +#define B43_TXH_MAC_AMPDU 0x00000600 /* AMPDU status */ +#define B43_TXH_MAC_AMPDU_MPDU 0x00000000 /* Regular MPDU, not an AMPDU */ +#define B43_TXH_MAC_AMPDU_FIRST 0x00000200 /* First MPDU or AMPDU */ +#define B43_TXH_MAC_AMPDU_INTER 0x00000400 /* Intermediate MPDU or AMPDU */ +#define B43_TXH_MAC_AMPDU_LAST 0x00000600 /* Last (or only) MPDU of AMPDU */ +#define B43_TXH_MAC_40MHZ 0x00000100 /* Use 40 MHz bandwidth */ +#define B43_TXH_MAC_5GHZ 0x00000080 /* 5GHz band */ +#define B43_TXH_MAC_DFCS 0x00000040 /* DFCS */ +#define B43_TXH_MAC_IGNPMQ 0x00000020 /* Ignore PMQ */ +#define B43_TXH_MAC_HWSEQ 0x00000010 /* Use Hardware Sequence Number */ +#define B43_TXH_MAC_STMSDU 0x00000008 /* Start MSDU */ +#define B43_TXH_MAC_SENDRTS 0x00000004 /* Send RTS */ +#define B43_TXH_MAC_LONGFRAME 0x00000002 /* Long frame */ +#define B43_TXH_MAC_ACK 0x00000001 /* Immediate ACK */ + +/* Extra Frame Types */ +#define B43_TXH_EFT_FB 0x03 /* Data frame fallback encoding */ +#define B43_TXH_EFT_FB_CCK 0x00 /* CCK */ +#define B43_TXH_EFT_FB_OFDM 0x01 /* OFDM */ +#define B43_TXH_EFT_FB_EWC 0x02 /* EWC */ +#define B43_TXH_EFT_FB_N 0x03 /* N */ +#define B43_TXH_EFT_RTS 0x0C /* RTS/CTS encoding */ +#define B43_TXH_EFT_RTS_CCK 0x00 /* CCK */ +#define B43_TXH_EFT_RTS_OFDM 0x04 /* OFDM */ +#define B43_TXH_EFT_RTS_EWC 0x08 /* EWC */ +#define B43_TXH_EFT_RTS_N 0x0C /* N */ +#define B43_TXH_EFT_RTSFB 0x30 /* RTS/CTS fallback encoding */ +#define B43_TXH_EFT_RTSFB_CCK 0x00 /* CCK */ +#define B43_TXH_EFT_RTSFB_OFDM 0x10 /* OFDM */ +#define B43_TXH_EFT_RTSFB_EWC 0x20 /* EWC */ +#define B43_TXH_EFT_RTSFB_N 0x30 /* N */ + +/* PHY TX control word */ +#define B43_TXH_PHY_ENC 0x0003 /* Data frame encoding */ +#define B43_TXH_PHY_ENC_CCK 0x0000 /* CCK */ +#define B43_TXH_PHY_ENC_OFDM 0x0001 /* OFDM */ +#define B43_TXH_PHY_ENC_EWC 0x0002 /* EWC */ +#define B43_TXH_PHY_ENC_N 0x0003 /* N */ +#define B43_TXH_PHY_SHORTPRMBL 0x0010 /* Use short preamble */ +#define B43_TXH_PHY_ANT 0x03C0 /* Antenna selection */ +#define B43_TXH_PHY_ANT0 0x0000 /* Use antenna 0 */ +#define B43_TXH_PHY_ANT1 0x0040 /* Use antenna 1 */ +#define B43_TXH_PHY_ANT01AUTO 0x00C0 /* Use antenna 0/1 auto */ +#define B43_TXH_PHY_ANT2 0x0100 /* Use antenna 2 */ +#define B43_TXH_PHY_ANT3 0x0200 /* Use antenna 3 */ +#define B43_TXH_PHY_TXPWR 0xFC00 /* TX power */ +#define B43_TXH_PHY_TXPWR_SHIFT 10 + +/* PHY TX control word 1 */ +#define B43_TXH_PHY1_BW 0x0007 /* Bandwidth */ +#define B43_TXH_PHY1_BW_10 0x0000 /* 10 MHz */ +#define B43_TXH_PHY1_BW_10U 0x0001 /* 10 MHz upper */ +#define B43_TXH_PHY1_BW_20 0x0002 /* 20 MHz */ +#define B43_TXH_PHY1_BW_20U 0x0003 /* 20 MHz upper */ +#define B43_TXH_PHY1_BW_40 0x0004 /* 40 MHz */ +#define B43_TXH_PHY1_BW_40DUP 0x0005 /* 50 MHz duplicate */ +#define B43_TXH_PHY1_MODE 0x0038 /* Mode */ +#define B43_TXH_PHY1_MODE_SISO 0x0000 /* SISO */ +#define B43_TXH_PHY1_MODE_CDD 0x0008 /* CDD */ +#define B43_TXH_PHY1_MODE_STBC 0x0010 /* STBC */ +#define B43_TXH_PHY1_MODE_SDM 0x0018 /* SDM */ +#define B43_TXH_PHY1_CRATE 0x0700 /* Coding rate */ +#define B43_TXH_PHY1_CRATE_1_2 0x0000 /* 1/2 */ +#define B43_TXH_PHY1_CRATE_2_3 0x0100 /* 2/3 */ +#define B43_TXH_PHY1_CRATE_3_4 0x0200 /* 3/4 */ +#define B43_TXH_PHY1_CRATE_4_5 0x0300 /* 4/5 */ +#define B43_TXH_PHY1_CRATE_5_6 0x0400 /* 5/6 */ +#define B43_TXH_PHY1_CRATE_7_8 0x0600 /* 7/8 */ +#define B43_TXH_PHY1_MODUL 0x3800 /* Modulation scheme */ +#define B43_TXH_PHY1_MODUL_BPSK 0x0000 /* BPSK */ +#define B43_TXH_PHY1_MODUL_QPSK 0x0800 /* QPSK */ +#define B43_TXH_PHY1_MODUL_QAM16 0x1000 /* QAM16 */ +#define B43_TXH_PHY1_MODUL_QAM64 0x1800 /* QAM64 */ +#define B43_TXH_PHY1_MODUL_QAM256 0x2000 /* QAM256 */ + + +/* r351 firmware compatibility stuff. */ +static inline +bool b43_is_old_txhdr_format(struct b43_wldev *dev) +{ + return (dev->fw.rev <= 351); +} + +static inline +size_t b43_txhdr_size(struct b43_wldev *dev) +{ + if (b43_is_old_txhdr_format(dev)) + return 100 + sizeof(struct b43_plcp_hdr6); + return 104 + sizeof(struct b43_plcp_hdr6); +} + + +int b43_generate_txhdr(struct b43_wldev *dev, + u8 * txhdr, + struct sk_buff *skb_frag, + struct ieee80211_tx_info *txctl, u16 cookie); + +/* Transmit Status */ +struct b43_txstatus { + u16 cookie; /* The cookie from the txhdr */ + u16 seq; /* Sequence number */ + u8 phy_stat; /* PHY TX status */ + u8 frame_count; /* Frame transmit count */ + u8 rts_count; /* RTS transmit count */ + u8 supp_reason; /* Suppression reason */ + /* flags */ + u8 pm_indicated; /* PM mode indicated to AP */ + u8 intermediate; /* Intermediate status notification (not final) */ + u8 for_ampdu; /* Status is for an AMPDU (afterburner) */ + u8 acked; /* Wireless ACK received */ +}; + +/* txstatus supp_reason values */ +enum { + B43_TXST_SUPP_NONE, /* Not suppressed */ + B43_TXST_SUPP_PMQ, /* Suppressed due to PMQ entry */ + B43_TXST_SUPP_FLUSH, /* Suppressed due to flush request */ + B43_TXST_SUPP_PREV, /* Previous fragment failed */ + B43_TXST_SUPP_CHAN, /* Channel mismatch */ + B43_TXST_SUPP_LIFE, /* Lifetime expired */ + B43_TXST_SUPP_UNDER, /* Buffer underflow */ + B43_TXST_SUPP_ABNACK, /* Afterburner NACK */ +}; + +/* Receive header for v4 firmware. */ +struct b43_rxhdr_fw4 { + __le16 frame_len; /* Frame length */ + PAD_BYTES(2); + __le16 phy_status0; /* PHY RX Status 0 */ + union { + /* RSSI for A/B/G-PHYs */ + struct { + __u8 jssi; /* PHY RX Status 1: JSSI */ + __u8 sig_qual; /* PHY RX Status 1: Signal Quality */ + } __attribute__ ((__packed__)); + + /* RSSI for N-PHYs */ + struct { + __s8 power0; /* PHY RX Status 1: Power 0 */ + __s8 power1; /* PHY RX Status 1: Power 1 */ + } __attribute__ ((__packed__)); + } __attribute__ ((__packed__)); + __le16 phy_status2; /* PHY RX Status 2 */ + __le16 phy_status3; /* PHY RX Status 3 */ + __le32 mac_status; /* MAC RX status */ + __le16 mac_time; + __le16 channel; +} __attribute__ ((__packed__)); + +/* PHY RX Status 0 */ +#define B43_RX_PHYST0_GAINCTL 0x4000 /* Gain Control */ +#define B43_RX_PHYST0_PLCPHCF 0x0200 +#define B43_RX_PHYST0_PLCPFV 0x0100 +#define B43_RX_PHYST0_SHORTPRMBL 0x0080 /* Received with Short Preamble */ +#define B43_RX_PHYST0_LCRS 0x0040 +#define B43_RX_PHYST0_ANT 0x0020 /* Antenna */ +#define B43_RX_PHYST0_UNSRATE 0x0010 +#define B43_RX_PHYST0_CLIP 0x000C +#define B43_RX_PHYST0_CLIP_SHIFT 2 +#define B43_RX_PHYST0_FTYPE 0x0003 /* Frame type */ +#define B43_RX_PHYST0_CCK 0x0000 /* Frame type: CCK */ +#define B43_RX_PHYST0_OFDM 0x0001 /* Frame type: OFDM */ +#define B43_RX_PHYST0_PRE_N 0x0002 /* Pre-standard N-PHY frame */ +#define B43_RX_PHYST0_STD_N 0x0003 /* Standard N-PHY frame */ + +/* PHY RX Status 2 */ +#define B43_RX_PHYST2_LNAG 0xC000 /* LNA Gain */ +#define B43_RX_PHYST2_LNAG_SHIFT 14 +#define B43_RX_PHYST2_PNAG 0x3C00 /* PNA Gain */ +#define B43_RX_PHYST2_PNAG_SHIFT 10 +#define B43_RX_PHYST2_FOFF 0x03FF /* F offset */ + +/* PHY RX Status 3 */ +#define B43_RX_PHYST3_DIGG 0x1800 /* DIG Gain */ +#define B43_RX_PHYST3_DIGG_SHIFT 11 +#define B43_RX_PHYST3_TRSTATE 0x0400 /* TR state */ + +/* MAC RX Status */ +#define B43_RX_MAC_RXST_VALID 0x01000000 /* PHY RXST valid */ +#define B43_RX_MAC_TKIP_MICERR 0x00100000 /* TKIP MIC error */ +#define B43_RX_MAC_TKIP_MICATT 0x00080000 /* TKIP MIC attempted */ +#define B43_RX_MAC_AGGTYPE 0x00060000 /* Aggregation type */ +#define B43_RX_MAC_AGGTYPE_SHIFT 17 +#define B43_RX_MAC_AMSDU 0x00010000 /* A-MSDU mask */ +#define B43_RX_MAC_BEACONSENT 0x00008000 /* Beacon sent flag */ +#define B43_RX_MAC_KEYIDX 0x000007E0 /* Key index */ +#define B43_RX_MAC_KEYIDX_SHIFT 5 +#define B43_RX_MAC_DECERR 0x00000010 /* Decrypt error */ +#define B43_RX_MAC_DEC 0x00000008 /* Decryption attempted */ +#define B43_RX_MAC_PADDING 0x00000004 /* Pad bytes present */ +#define B43_RX_MAC_RESP 0x00000002 /* Response frame transmitted */ +#define B43_RX_MAC_FCSERR 0x00000001 /* FCS error */ + +/* RX channel */ +#define B43_RX_CHAN_40MHZ 0x1000 /* 40 Mhz channel width */ +#define B43_RX_CHAN_5GHZ 0x0800 /* 5 Ghz band */ +#define B43_RX_CHAN_ID 0x07F8 /* Channel ID */ +#define B43_RX_CHAN_ID_SHIFT 3 +#define B43_RX_CHAN_PHYTYPE 0x0007 /* PHY type */ + + +u8 b43_plcp_get_ratecode_cck(const u8 bitrate); +u8 b43_plcp_get_ratecode_ofdm(const u8 bitrate); + +void b43_generate_plcp_hdr(struct b43_plcp_hdr4 *plcp, + const u16 octets, const u8 bitrate); + +void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr); + +void b43_handle_txstatus(struct b43_wldev *dev, + const struct b43_txstatus *status); +bool b43_fill_txstatus_report(struct b43_wldev *dev, + struct ieee80211_tx_info *report, + const struct b43_txstatus *status); + +void b43_tx_suspend(struct b43_wldev *dev); +void b43_tx_resume(struct b43_wldev *dev); + + +/* Helper functions for converting the key-table index from "firmware-format" + * to "raw-format" and back. The firmware API changed for this at some revision. + * We need to account for that here. */ +static inline int b43_new_kidx_api(struct b43_wldev *dev) +{ + /* FIXME: Not sure the change was at rev 351 */ + return (dev->fw.rev >= 351); +} +static inline u8 b43_kidx_to_fw(struct b43_wldev *dev, u8 raw_kidx) +{ + u8 firmware_kidx; + if (b43_new_kidx_api(dev)) { + firmware_kidx = raw_kidx; + } else { + if (raw_kidx >= 4) /* Is per STA key? */ + firmware_kidx = raw_kidx - 4; + else + firmware_kidx = raw_kidx; /* TX default key */ + } + return firmware_kidx; +} +static inline u8 b43_kidx_to_raw(struct b43_wldev *dev, u8 firmware_kidx) +{ + u8 raw_kidx; + if (b43_new_kidx_api(dev)) + raw_kidx = firmware_kidx; + else + raw_kidx = firmware_kidx + 4; /* RX default keys or per STA keys */ + return raw_kidx; +} + +/* struct b43_private_tx_info - TX info private to b43. + * The structure is placed in (struct ieee80211_tx_info *)->rate_driver_data + * + * @bouncebuffer: DMA Bouncebuffer (if used) + */ +struct b43_private_tx_info { + void *bouncebuffer; +}; + +static inline struct b43_private_tx_info * +b43_get_priv_tx_info(struct ieee80211_tx_info *info) +{ + BUILD_BUG_ON(sizeof(struct b43_private_tx_info) > + sizeof(info->rate_driver_data)); + return (struct b43_private_tx_info *)info->rate_driver_data; +} + +#endif /* B43_XMIT_H_ */ diff --git a/drivers/net/wireless/b43legacy/Makefile b/drivers/net/wireless/b43legacy/Makefile new file mode 100644 index 0000000..227a77e --- /dev/null +++ b/drivers/net/wireless/b43legacy/Makefile @@ -0,0 +1,19 @@ +# b43legacy core +b43legacy-y += main.o +b43legacy-y += ilt.o +b43legacy-y += phy.o +b43legacy-y += radio.o +b43legacy-y += sysfs.o +b43legacy-y += xmit.o +# b43 RFKILL button support +b43legacy-y += rfkill.o +# b43legacy LED support +b43legacy-$(CONFIG_B43LEGACY_LEDS) += leds.o +# b43legacy debugging +b43legacy-$(CONFIG_B43LEGACY_DEBUG) += debugfs.o +# b43legacy DMA and PIO +b43legacy-$(CONFIG_B43LEGACY_DMA) += dma.o +b43legacy-$(CONFIG_B43LEGACY_PIO) += pio.o + +obj-$(CONFIG_B43LEGACY) += b43legacy.o + diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h new file mode 100644 index 0000000..89fe2f9 --- /dev/null +++ b/drivers/net/wireless/b43legacy/b43legacy.h @@ -0,0 +1,833 @@ +#ifndef B43legacy_H_ +#define B43legacy_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include "debugfs.h" +#include "leds.h" +#include "rfkill.h" +#include "phy.h" + + +/* The unique identifier of the firmware that's officially supported by this + * driver version. */ +#define B43legacy_SUPPORTED_FIRMWARE_ID "FW10" + +#define B43legacy_IRQWAIT_MAX_RETRIES 20 + +/* MMIO offsets */ +#define B43legacy_MMIO_DMA0_REASON 0x20 +#define B43legacy_MMIO_DMA0_IRQ_MASK 0x24 +#define B43legacy_MMIO_DMA1_REASON 0x28 +#define B43legacy_MMIO_DMA1_IRQ_MASK 0x2C +#define B43legacy_MMIO_DMA2_REASON 0x30 +#define B43legacy_MMIO_DMA2_IRQ_MASK 0x34 +#define B43legacy_MMIO_DMA3_REASON 0x38 +#define B43legacy_MMIO_DMA3_IRQ_MASK 0x3C +#define B43legacy_MMIO_DMA4_REASON 0x40 +#define B43legacy_MMIO_DMA4_IRQ_MASK 0x44 +#define B43legacy_MMIO_DMA5_REASON 0x48 +#define B43legacy_MMIO_DMA5_IRQ_MASK 0x4C +#define B43legacy_MMIO_MACCTL 0x120 /* MAC control */ +#define B43legacy_MMIO_MACCMD 0x124 /* MAC command */ +#define B43legacy_MMIO_GEN_IRQ_REASON 0x128 +#define B43legacy_MMIO_GEN_IRQ_MASK 0x12C +#define B43legacy_MMIO_RAM_CONTROL 0x130 +#define B43legacy_MMIO_RAM_DATA 0x134 +#define B43legacy_MMIO_PS_STATUS 0x140 +#define B43legacy_MMIO_RADIO_HWENABLED_HI 0x158 +#define B43legacy_MMIO_SHM_CONTROL 0x160 +#define B43legacy_MMIO_SHM_DATA 0x164 +#define B43legacy_MMIO_SHM_DATA_UNALIGNED 0x166 +#define B43legacy_MMIO_XMITSTAT_0 0x170 +#define B43legacy_MMIO_XMITSTAT_1 0x174 +#define B43legacy_MMIO_REV3PLUS_TSF_LOW 0x180 /* core rev >= 3 only */ +#define B43legacy_MMIO_REV3PLUS_TSF_HIGH 0x184 /* core rev >= 3 only */ +#define B43legacy_MMIO_TSF_CFP_REP 0x188 +#define B43legacy_MMIO_TSF_CFP_START 0x18C +/* 32-bit DMA */ +#define B43legacy_MMIO_DMA32_BASE0 0x200 +#define B43legacy_MMIO_DMA32_BASE1 0x220 +#define B43legacy_MMIO_DMA32_BASE2 0x240 +#define B43legacy_MMIO_DMA32_BASE3 0x260 +#define B43legacy_MMIO_DMA32_BASE4 0x280 +#define B43legacy_MMIO_DMA32_BASE5 0x2A0 +/* 64-bit DMA */ +#define B43legacy_MMIO_DMA64_BASE0 0x200 +#define B43legacy_MMIO_DMA64_BASE1 0x240 +#define B43legacy_MMIO_DMA64_BASE2 0x280 +#define B43legacy_MMIO_DMA64_BASE3 0x2C0 +#define B43legacy_MMIO_DMA64_BASE4 0x300 +#define B43legacy_MMIO_DMA64_BASE5 0x340 +/* PIO */ +#define B43legacy_MMIO_PIO1_BASE 0x300 +#define B43legacy_MMIO_PIO2_BASE 0x310 +#define B43legacy_MMIO_PIO3_BASE 0x320 +#define B43legacy_MMIO_PIO4_BASE 0x330 + +#define B43legacy_MMIO_PHY_VER 0x3E0 +#define B43legacy_MMIO_PHY_RADIO 0x3E2 +#define B43legacy_MMIO_PHY0 0x3E6 +#define B43legacy_MMIO_ANTENNA 0x3E8 +#define B43legacy_MMIO_CHANNEL 0x3F0 +#define B43legacy_MMIO_CHANNEL_EXT 0x3F4 +#define B43legacy_MMIO_RADIO_CONTROL 0x3F6 +#define B43legacy_MMIO_RADIO_DATA_HIGH 0x3F8 +#define B43legacy_MMIO_RADIO_DATA_LOW 0x3FA +#define B43legacy_MMIO_PHY_CONTROL 0x3FC +#define B43legacy_MMIO_PHY_DATA 0x3FE +#define B43legacy_MMIO_MACFILTER_CONTROL 0x420 +#define B43legacy_MMIO_MACFILTER_DATA 0x422 +#define B43legacy_MMIO_RCMTA_COUNT 0x43C /* Receive Match Transmitter Addr */ +#define B43legacy_MMIO_RADIO_HWENABLED_LO 0x49A +#define B43legacy_MMIO_GPIO_CONTROL 0x49C +#define B43legacy_MMIO_GPIO_MASK 0x49E +#define B43legacy_MMIO_TSF_CFP_PRETBTT 0x612 +#define B43legacy_MMIO_TSF_0 0x632 /* core rev < 3 only */ +#define B43legacy_MMIO_TSF_1 0x634 /* core rev < 3 only */ +#define B43legacy_MMIO_TSF_2 0x636 /* core rev < 3 only */ +#define B43legacy_MMIO_TSF_3 0x638 /* core rev < 3 only */ +#define B43legacy_MMIO_RNG 0x65A +#define B43legacy_MMIO_POWERUP_DELAY 0x6A8 + +/* SPROM boardflags_lo values */ +#define B43legacy_BFL_PACTRL 0x0002 +#define B43legacy_BFL_RSSI 0x0008 +#define B43legacy_BFL_EXTLNA 0x1000 + +/* GPIO register offset, in both ChipCommon and PCI core. */ +#define B43legacy_GPIO_CONTROL 0x6c + +/* SHM Routing */ +#define B43legacy_SHM_SHARED 0x0001 +#define B43legacy_SHM_WIRELESS 0x0002 +#define B43legacy_SHM_HW 0x0004 +#define B43legacy_SHM_UCODE 0x0300 + +/* SHM Routing modifiers */ +#define B43legacy_SHM_AUTOINC_R 0x0200 /* Read Auto-increment */ +#define B43legacy_SHM_AUTOINC_W 0x0100 /* Write Auto-increment */ +#define B43legacy_SHM_AUTOINC_RW (B43legacy_SHM_AUTOINC_R | \ + B43legacy_SHM_AUTOINC_W) + +/* Misc SHM_SHARED offsets */ +#define B43legacy_SHM_SH_WLCOREREV 0x0016 /* 802.11 core revision */ +#define B43legacy_SHM_SH_HOSTFLO 0x005E /* Hostflags ucode opts (low) */ +#define B43legacy_SHM_SH_HOSTFHI 0x0060 /* Hostflags ucode opts (high) */ +/* SHM_SHARED crypto engine */ +#define B43legacy_SHM_SH_KEYIDXBLOCK 0x05D4 /* Key index/algorithm block */ +/* SHM_SHARED beacon/AP variables */ +#define B43legacy_SHM_SH_DTIMP 0x0012 /* DTIM period */ +#define B43legacy_SHM_SH_BTL0 0x0018 /* Beacon template length 0 */ +#define B43legacy_SHM_SH_BTL1 0x001A /* Beacon template length 1 */ +#define B43legacy_SHM_SH_BTSFOFF 0x001C /* Beacon TSF offset */ +#define B43legacy_SHM_SH_TIMPOS 0x001E /* TIM position in beacon */ +#define B43legacy_SHM_SH_BEACPHYCTL 0x0054 /* Beacon PHY TX control word */ +/* SHM_SHARED ACK/CTS control */ +#define B43legacy_SHM_SH_ACKCTSPHYCTL 0x0022 /* ACK/CTS PHY control word */ +/* SHM_SHARED probe response variables */ +#define B43legacy_SHM_SH_PRTLEN 0x004A /* Probe Response template length */ +#define B43legacy_SHM_SH_PRMAXTIME 0x0074 /* Probe Response max time */ +#define B43legacy_SHM_SH_PRPHYCTL 0x0188 /* Probe Resp PHY TX control */ +/* SHM_SHARED rate tables */ +#define B43legacy_SHM_SH_OFDMDIRECT 0x0480 /* Pointer to OFDM direct map */ +#define B43legacy_SHM_SH_OFDMBASIC 0x04A0 /* Pointer to OFDM basic rate map */ +#define B43legacy_SHM_SH_CCKDIRECT 0x04C0 /* Pointer to CCK direct map */ +#define B43legacy_SHM_SH_CCKBASIC 0x04E0 /* Pointer to CCK basic rate map */ +/* SHM_SHARED microcode soft registers */ +#define B43legacy_SHM_SH_UCODEREV 0x0000 /* Microcode revision */ +#define B43legacy_SHM_SH_UCODEPATCH 0x0002 /* Microcode patchlevel */ +#define B43legacy_SHM_SH_UCODEDATE 0x0004 /* Microcode date */ +#define B43legacy_SHM_SH_UCODETIME 0x0006 /* Microcode time */ +#define B43legacy_SHM_SH_SPUWKUP 0x0094 /* pre-wakeup for synth PU in us */ +#define B43legacy_SHM_SH_PRETBTT 0x0096 /* pre-TBTT in us */ + +#define B43legacy_UCODEFLAGS_OFFSET 0x005E + +/* Hardware Radio Enable masks */ +#define B43legacy_MMIO_RADIO_HWENABLED_HI_MASK (1 << 16) +#define B43legacy_MMIO_RADIO_HWENABLED_LO_MASK (1 << 4) + +/* HostFlags. See b43legacy_hf_read/write() */ +#define B43legacy_HF_SYMW 0x00000002 /* G-PHY SYM workaround */ +#define B43legacy_HF_GDCW 0x00000020 /* G-PHY DV cancel filter */ +#define B43legacy_HF_OFDMPABOOST 0x00000040 /* Enable PA boost OFDM */ +#define B43legacy_HF_EDCF 0x00000100 /* on if WME/MAC suspended */ + +/* MacFilter offsets. */ +#define B43legacy_MACFILTER_SELF 0x0000 +#define B43legacy_MACFILTER_BSSID 0x0003 +#define B43legacy_MACFILTER_MAC 0x0010 + +/* PHYVersioning */ +#define B43legacy_PHYTYPE_B 0x01 +#define B43legacy_PHYTYPE_G 0x02 + +/* PHYRegisters */ +#define B43legacy_PHY_G_LO_CONTROL 0x0810 +#define B43legacy_PHY_ILT_G_CTRL 0x0472 +#define B43legacy_PHY_ILT_G_DATA1 0x0473 +#define B43legacy_PHY_ILT_G_DATA2 0x0474 +#define B43legacy_PHY_G_PCTL 0x0029 +#define B43legacy_PHY_RADIO_BITFIELD 0x0401 +#define B43legacy_PHY_G_CRS 0x0429 +#define B43legacy_PHY_NRSSILT_CTRL 0x0803 +#define B43legacy_PHY_NRSSILT_DATA 0x0804 + +/* RadioRegisters */ +#define B43legacy_RADIOCTL_ID 0x01 + +/* MAC Control bitfield */ +#define B43legacy_MACCTL_ENABLED 0x00000001 /* MAC Enabled */ +#define B43legacy_MACCTL_PSM_RUN 0x00000002 /* Run Microcode */ +#define B43legacy_MACCTL_PSM_JMP0 0x00000004 /* Microcode jump to 0 */ +#define B43legacy_MACCTL_SHM_ENABLED 0x00000100 /* SHM Enabled */ +#define B43legacy_MACCTL_IHR_ENABLED 0x00000400 /* IHR Region Enabled */ +#define B43legacy_MACCTL_BE 0x00010000 /* Big Endian mode */ +#define B43legacy_MACCTL_INFRA 0x00020000 /* Infrastructure mode */ +#define B43legacy_MACCTL_AP 0x00040000 /* AccessPoint mode */ +#define B43legacy_MACCTL_RADIOLOCK 0x00080000 /* Radio lock */ +#define B43legacy_MACCTL_BEACPROMISC 0x00100000 /* Beacon Promiscuous */ +#define B43legacy_MACCTL_KEEP_BADPLCP 0x00200000 /* Keep bad PLCP frames */ +#define B43legacy_MACCTL_KEEP_CTL 0x00400000 /* Keep control frames */ +#define B43legacy_MACCTL_KEEP_BAD 0x00800000 /* Keep bad frames (FCS) */ +#define B43legacy_MACCTL_PROMISC 0x01000000 /* Promiscuous mode */ +#define B43legacy_MACCTL_HWPS 0x02000000 /* Hardware Power Saving */ +#define B43legacy_MACCTL_AWAKE 0x04000000 /* Device is awake */ +#define B43legacy_MACCTL_TBTTHOLD 0x10000000 /* TBTT Hold */ +#define B43legacy_MACCTL_GMODE 0x80000000 /* G Mode */ + +/* MAC Command bitfield */ +#define B43legacy_MACCMD_BEACON0_VALID 0x00000001 /* Beacon 0 in template RAM is busy/valid */ +#define B43legacy_MACCMD_BEACON1_VALID 0x00000002 /* Beacon 1 in template RAM is busy/valid */ +#define B43legacy_MACCMD_DFQ_VALID 0x00000004 /* Directed frame queue valid (IBSS PS mode, ATIM) */ +#define B43legacy_MACCMD_CCA 0x00000008 /* Clear channel assessment */ +#define B43legacy_MACCMD_BGNOISE 0x00000010 /* Background noise */ + +/* 802.11 core specific TM State Low flags */ +#define B43legacy_TMSLOW_GMODE 0x20000000 /* G Mode Enable */ +#define B43legacy_TMSLOW_PLLREFSEL 0x00200000 /* PLL Freq Ref Select */ +#define B43legacy_TMSLOW_MACPHYCLKEN 0x00100000 /* MAC PHY Clock Ctrl Enbl */ +#define B43legacy_TMSLOW_PHYRESET 0x00080000 /* PHY Reset */ +#define B43legacy_TMSLOW_PHYCLKEN 0x00040000 /* PHY Clock Enable */ + +/* 802.11 core specific TM State High flags */ +#define B43legacy_TMSHIGH_FCLOCK 0x00040000 /* Fast Clock Available */ +#define B43legacy_TMSHIGH_GPHY 0x00010000 /* G-PHY avail (rev >= 5) */ + +#define B43legacy_UCODEFLAG_AUTODIV 0x0001 + +/* Generic-Interrupt reasons. */ +#define B43legacy_IRQ_MAC_SUSPENDED 0x00000001 +#define B43legacy_IRQ_BEACON 0x00000002 +#define B43legacy_IRQ_TBTT_INDI 0x00000004 /* Target Beacon Transmit Time */ +#define B43legacy_IRQ_BEACON_TX_OK 0x00000008 +#define B43legacy_IRQ_BEACON_CANCEL 0x00000010 +#define B43legacy_IRQ_ATIM_END 0x00000020 +#define B43legacy_IRQ_PMQ 0x00000040 +#define B43legacy_IRQ_PIO_WORKAROUND 0x00000100 +#define B43legacy_IRQ_MAC_TXERR 0x00000200 +#define B43legacy_IRQ_PHY_TXERR 0x00000800 +#define B43legacy_IRQ_PMEVENT 0x00001000 +#define B43legacy_IRQ_TIMER0 0x00002000 +#define B43legacy_IRQ_TIMER1 0x00004000 +#define B43legacy_IRQ_DMA 0x00008000 +#define B43legacy_IRQ_TXFIFO_FLUSH_OK 0x00010000 +#define B43legacy_IRQ_CCA_MEASURE_OK 0x00020000 +#define B43legacy_IRQ_NOISESAMPLE_OK 0x00040000 +#define B43legacy_IRQ_UCODE_DEBUG 0x08000000 +#define B43legacy_IRQ_RFKILL 0x10000000 +#define B43legacy_IRQ_TX_OK 0x20000000 +#define B43legacy_IRQ_PHY_G_CHANGED 0x40000000 +#define B43legacy_IRQ_TIMEOUT 0x80000000 + +#define B43legacy_IRQ_ALL 0xFFFFFFFF +#define B43legacy_IRQ_MASKTEMPLATE (B43legacy_IRQ_MAC_SUSPENDED | \ + B43legacy_IRQ_TBTT_INDI | \ + B43legacy_IRQ_ATIM_END | \ + B43legacy_IRQ_PMQ | \ + B43legacy_IRQ_MAC_TXERR | \ + B43legacy_IRQ_PHY_TXERR | \ + B43legacy_IRQ_DMA | \ + B43legacy_IRQ_TXFIFO_FLUSH_OK | \ + B43legacy_IRQ_NOISESAMPLE_OK | \ + B43legacy_IRQ_UCODE_DEBUG | \ + B43legacy_IRQ_RFKILL | \ + B43legacy_IRQ_TX_OK) + +/* Device specific rate values. + * The actual values defined here are (rate_in_mbps * 2). + * Some code depends on this. Don't change it. */ +#define B43legacy_CCK_RATE_1MB 2 +#define B43legacy_CCK_RATE_2MB 4 +#define B43legacy_CCK_RATE_5MB 11 +#define B43legacy_CCK_RATE_11MB 22 +#define B43legacy_OFDM_RATE_6MB 12 +#define B43legacy_OFDM_RATE_9MB 18 +#define B43legacy_OFDM_RATE_12MB 24 +#define B43legacy_OFDM_RATE_18MB 36 +#define B43legacy_OFDM_RATE_24MB 48 +#define B43legacy_OFDM_RATE_36MB 72 +#define B43legacy_OFDM_RATE_48MB 96 +#define B43legacy_OFDM_RATE_54MB 108 +/* Convert a b43legacy rate value to a rate in 100kbps */ +#define B43legacy_RATE_TO_100KBPS(rate) (((rate) * 10) / 2) + + +#define B43legacy_DEFAULT_SHORT_RETRY_LIMIT 7 +#define B43legacy_DEFAULT_LONG_RETRY_LIMIT 4 + +#define B43legacy_PHY_TX_BADNESS_LIMIT 1000 + +/* Max size of a security key */ +#define B43legacy_SEC_KEYSIZE 16 +/* Security algorithms. */ +enum { + B43legacy_SEC_ALGO_NONE = 0, /* unencrypted, as of TX header. */ + B43legacy_SEC_ALGO_WEP40, + B43legacy_SEC_ALGO_TKIP, + B43legacy_SEC_ALGO_AES, + B43legacy_SEC_ALGO_WEP104, + B43legacy_SEC_ALGO_AES_LEGACY, +}; + +/* Core Information Registers */ +#define B43legacy_CIR_BASE 0xf00 +#define B43legacy_CIR_SBTPSFLAG (B43legacy_CIR_BASE + 0x18) +#define B43legacy_CIR_SBIMSTATE (B43legacy_CIR_BASE + 0x90) +#define B43legacy_CIR_SBINTVEC (B43legacy_CIR_BASE + 0x94) +#define B43legacy_CIR_SBTMSTATELOW (B43legacy_CIR_BASE + 0x98) +#define B43legacy_CIR_SBTMSTATEHIGH (B43legacy_CIR_BASE + 0x9c) +#define B43legacy_CIR_SBIMCONFIGLOW (B43legacy_CIR_BASE + 0xa8) +#define B43legacy_CIR_SB_ID_HI (B43legacy_CIR_BASE + 0xfc) + +/* sbtmstatehigh state flags */ +#define B43legacy_SBTMSTATEHIGH_SERROR 0x00000001 +#define B43legacy_SBTMSTATEHIGH_BUSY 0x00000004 +#define B43legacy_SBTMSTATEHIGH_TIMEOUT 0x00000020 +#define B43legacy_SBTMSTATEHIGH_G_PHY_AVAIL 0x00010000 +#define B43legacy_SBTMSTATEHIGH_COREFLAGS 0x1FFF0000 +#define B43legacy_SBTMSTATEHIGH_DMA64BIT 0x10000000 +#define B43legacy_SBTMSTATEHIGH_GATEDCLK 0x20000000 +#define B43legacy_SBTMSTATEHIGH_BISTFAILED 0x40000000 +#define B43legacy_SBTMSTATEHIGH_BISTCOMPLETE 0x80000000 + +/* sbimstate flags */ +#define B43legacy_SBIMSTATE_IB_ERROR 0x20000 +#define B43legacy_SBIMSTATE_TIMEOUT 0x40000 + +#define PFX KBUILD_MODNAME ": " +#ifdef assert +# undef assert +#endif +#ifdef CONFIG_B43LEGACY_DEBUG +# define B43legacy_WARN_ON(x) WARN_ON(x) +# define B43legacy_BUG_ON(expr) \ + do { \ + if (unlikely((expr))) { \ + printk(KERN_INFO PFX "Test (%s) failed\n", \ + #expr); \ + BUG_ON(expr); \ + } \ + } while (0) +# define B43legacy_DEBUG 1 +#else +/* This will evaluate the argument even if debugging is disabled. */ +static inline bool __b43legacy_warn_on_dummy(bool x) { return x; } +# define B43legacy_WARN_ON(x) __b43legacy_warn_on_dummy(unlikely(!!(x))) +# define B43legacy_BUG_ON(x) do { /* nothing */ } while (0) +# define B43legacy_DEBUG 0 +#endif + + +struct net_device; +struct pci_dev; +struct b43legacy_dmaring; +struct b43legacy_pioqueue; + +/* The firmware file header */ +#define B43legacy_FW_TYPE_UCODE 'u' +#define B43legacy_FW_TYPE_PCM 'p' +#define B43legacy_FW_TYPE_IV 'i' +struct b43legacy_fw_header { + /* File type */ + u8 type; + /* File format version */ + u8 ver; + u8 __padding[2]; + /* Size of the data. For ucode and PCM this is in bytes. + * For IV this is number-of-ivs. */ + __be32 size; +} __attribute__((__packed__)); + +/* Initial Value file format */ +#define B43legacy_IV_OFFSET_MASK 0x7FFF +#define B43legacy_IV_32BIT 0x8000 +struct b43legacy_iv { + __be16 offset_size; + union { + __be16 d16; + __be32 d32; + } data __attribute__((__packed__)); +} __attribute__((__packed__)); + +#define B43legacy_PHYMODE(phytype) (1 << (phytype)) +#define B43legacy_PHYMODE_B B43legacy_PHYMODE \ + ((B43legacy_PHYTYPE_B)) +#define B43legacy_PHYMODE_G B43legacy_PHYMODE \ + ((B43legacy_PHYTYPE_G)) + +/* Value pair to measure the LocalOscillator. */ +struct b43legacy_lopair { + s8 low; + s8 high; + u8 used:1; +}; +#define B43legacy_LO_COUNT (14*4) + +struct b43legacy_phy { + /* Possible PHYMODEs on this PHY */ + u8 possible_phymodes; + /* GMODE bit enabled in MACCTL? */ + bool gmode; + + /* Analog Type */ + u8 analog; + /* B43legacy_PHYTYPE_ */ + u8 type; + /* PHY revision number. */ + u8 rev; + + u16 antenna_diversity; + u16 savedpctlreg; + /* Radio versioning */ + u16 radio_manuf; /* Radio manufacturer */ + u16 radio_ver; /* Radio version */ + u8 calibrated:1; + u8 radio_rev; /* Radio revision */ + + bool dyn_tssi_tbl; /* tssi2dbm is kmalloc()ed. */ + + /* ACI (adjacent channel interference) flags. */ + bool aci_enable; + bool aci_wlan_automatic; + bool aci_hw_rssi; + + /* Radio switched on/off */ + bool radio_on; + struct { + /* Values saved when turning the radio off. + * They are needed when turning it on again. */ + bool valid; + u16 rfover; + u16 rfoverval; + } radio_off_context; + + u16 minlowsig[2]; + u16 minlowsigpos[2]; + + /* LO Measurement Data. + * Use b43legacy_get_lopair() to get a value. + */ + struct b43legacy_lopair *_lo_pairs; + /* TSSI to dBm table in use */ + const s8 *tssi2dbm; + /* idle TSSI value */ + s8 idle_tssi; + /* Target idle TSSI */ + int tgt_idle_tssi; + /* Current idle TSSI */ + int cur_idle_tssi; + + /* LocalOscillator control values. */ + struct b43legacy_txpower_lo_control *lo_control; + /* Values from b43legacy_calc_loopback_gain() */ + s16 max_lb_gain; /* Maximum Loopback gain in hdB */ + s16 trsw_rx_gain; /* TRSW RX gain in hdB */ + s16 lna_lod_gain; /* LNA lod */ + s16 lna_gain; /* LNA */ + s16 pga_gain; /* PGA */ + + /* Desired TX power level (in dBm). This is set by the user and + * adjusted in b43legacy_phy_xmitpower(). */ + u8 power_level; + + /* Values from b43legacy_calc_loopback_gain() */ + u16 loopback_gain[2]; + + /* TX Power control values. */ + /* B/G PHY */ + struct { + /* Current Radio Attenuation for TXpower recalculation. */ + u16 rfatt; + /* Current Baseband Attenuation for TXpower recalculation. */ + u16 bbatt; + /* Current TXpower control value for TXpower recalculation. */ + u16 txctl1; + u16 txctl2; + }; + /* A PHY */ + struct { + u16 txpwr_offset; + }; + + /* Current Interference Mitigation mode */ + int interfmode; + /* Stack of saved values from the Interference Mitigation code. + * Each value in the stack is layed out as follows: + * bit 0-11: offset + * bit 12-15: register ID + * bit 16-32: value + * register ID is: 0x1 PHY, 0x2 Radio, 0x3 ILT + */ +#define B43legacy_INTERFSTACK_SIZE 26 + u32 interfstack[B43legacy_INTERFSTACK_SIZE]; + + /* Saved values from the NRSSI Slope calculation */ + s16 nrssi[2]; + s32 nrssislope; + /* In memory nrssi lookup table. */ + s8 nrssi_lt[64]; + + /* current channel */ + u8 channel; + + u16 lofcal; + + u16 initval; + + /* PHY TX errors counter. */ + atomic_t txerr_cnt; + +#if B43legacy_DEBUG + /* Manual TX-power control enabled? */ + bool manual_txpower_control; + /* PHY registers locked by b43legacy_phy_lock()? */ + bool phy_locked; +#endif /* B43legacy_DEBUG */ +}; + +/* Data structures for DMA transmission, per 80211 core. */ +struct b43legacy_dma { + struct b43legacy_dmaring *tx_ring0; + struct b43legacy_dmaring *tx_ring1; + struct b43legacy_dmaring *tx_ring2; + struct b43legacy_dmaring *tx_ring3; + struct b43legacy_dmaring *tx_ring4; + struct b43legacy_dmaring *tx_ring5; + + struct b43legacy_dmaring *rx_ring0; + struct b43legacy_dmaring *rx_ring3; /* only on core.rev < 5 */ +}; + +/* Data structures for PIO transmission, per 80211 core. */ +struct b43legacy_pio { + struct b43legacy_pioqueue *queue0; + struct b43legacy_pioqueue *queue1; + struct b43legacy_pioqueue *queue2; + struct b43legacy_pioqueue *queue3; +}; + +/* Context information for a noise calculation (Link Quality). */ +struct b43legacy_noise_calculation { + u8 channel_at_start; + bool calculation_running; + u8 nr_samples; + s8 samples[8][4]; +}; + +struct b43legacy_stats { + u8 link_noise; + /* Store the last TX/RX times here for updating the leds. */ + unsigned long last_tx; + unsigned long last_rx; +}; + +struct b43legacy_key { + void *keyconf; + bool enabled; + u8 algorithm; +}; + +struct b43legacy_wldev; + +/* Data structure for the WLAN parts (802.11 cores) of the b43legacy chip. */ +struct b43legacy_wl { + /* Pointer to the active wireless device on this chip */ + struct b43legacy_wldev *current_dev; + /* Pointer to the ieee80211 hardware data structure */ + struct ieee80211_hw *hw; + + spinlock_t irq_lock; /* locks IRQ */ + struct mutex mutex; /* locks wireless core state */ + spinlock_t leds_lock; /* lock for leds */ + + /* We can only have one operating interface (802.11 core) + * at a time. General information about this interface follows. + */ + + struct ieee80211_vif *vif; + /* MAC address (can be NULL). */ + u8 mac_addr[ETH_ALEN]; + /* Current BSSID (can be NULL). */ + u8 bssid[ETH_ALEN]; + /* Interface type. (IEEE80211_IF_TYPE_XXX) */ + int if_type; + /* Is the card operating in AP, STA or IBSS mode? */ + bool operating; + /* filter flags */ + unsigned int filter_flags; + /* Stats about the wireless interface */ + struct ieee80211_low_level_stats ieee_stats; + +#ifdef CONFIG_B43LEGACY_HWRNG + struct hwrng rng; + u8 rng_initialized; + char rng_name[30 + 1]; +#endif + + /* List of all wireless devices on this chip */ + struct list_head devlist; + u8 nr_devs; + + bool radiotap_enabled; + bool radio_enabled; + + /* The beacon we are currently using (AP or IBSS mode). + * This beacon stuff is protected by the irq_lock. */ + struct sk_buff *current_beacon; + bool beacon0_uploaded; + bool beacon1_uploaded; + bool beacon_templates_virgin; /* Never wrote the templates? */ + struct work_struct beacon_update_trigger; +}; + +/* Pointers to the firmware data and meta information about it. */ +struct b43legacy_firmware { + /* Microcode */ + const struct firmware *ucode; + /* PCM code */ + const struct firmware *pcm; + /* Initial MMIO values for the firmware */ + const struct firmware *initvals; + /* Initial MMIO values for the firmware, band-specific */ + const struct firmware *initvals_band; + /* Firmware revision */ + u16 rev; + /* Firmware patchlevel */ + u16 patch; +}; + +/* Device (802.11 core) initialization status. */ +enum { + B43legacy_STAT_UNINIT = 0, /* Uninitialized. */ + B43legacy_STAT_INITIALIZED = 1, /* Initialized, not yet started. */ + B43legacy_STAT_STARTED = 2, /* Up and running. */ +}; +#define b43legacy_status(wldev) atomic_read(&(wldev)->__init_status) +#define b43legacy_set_status(wldev, stat) do { \ + atomic_set(&(wldev)->__init_status, (stat)); \ + smp_wmb(); \ + } while (0) + +/* *** --- HOW LOCKING WORKS IN B43legacy --- *** + * + * You should always acquire both, wl->mutex and wl->irq_lock unless: + * - You don't need to acquire wl->irq_lock, if the interface is stopped. + * - You don't need to acquire wl->mutex in the IRQ handler, IRQ tasklet + * and packet TX path (and _ONLY_ there.) + */ + +/* Data structure for one wireless device (802.11 core) */ +struct b43legacy_wldev { + struct ssb_device *dev; + struct b43legacy_wl *wl; + + /* The device initialization status. + * Use b43legacy_status() to query. */ + atomic_t __init_status; + /* Saved init status for handling suspend. */ + int suspend_init_status; + + bool __using_pio; /* Using pio rather than dma. */ + bool bad_frames_preempt;/* Use "Bad Frames Preemption". */ + bool dfq_valid; /* Directed frame queue valid (IBSS PS mode, ATIM). */ + bool short_preamble; /* TRUE if using short preamble. */ + bool radio_hw_enable; /* State of radio hardware enable bit. */ + + /* PHY/Radio device. */ + struct b43legacy_phy phy; + union { + /* DMA engines. */ + struct b43legacy_dma dma; + /* PIO engines. */ + struct b43legacy_pio pio; + }; + + /* Various statistics about the physical device. */ + struct b43legacy_stats stats; + + /* The device LEDs. */ + struct b43legacy_led led_tx; + struct b43legacy_led led_rx; + struct b43legacy_led led_assoc; + struct b43legacy_led led_radio; + + /* Reason code of the last interrupt. */ + u32 irq_reason; + u32 dma_reason[6]; + /* The currently active generic-interrupt mask. */ + u32 irq_mask; + /* Link Quality calculation context. */ + struct b43legacy_noise_calculation noisecalc; + /* if > 0 MAC is suspended. if == 0 MAC is enabled. */ + int mac_suspended; + + /* Interrupt Service Routine tasklet (bottom-half) */ + struct tasklet_struct isr_tasklet; + + /* Periodic tasks */ + struct delayed_work periodic_work; + unsigned int periodic_state; + + struct work_struct restart_work; + + /* encryption/decryption */ + u16 ktp; /* Key table pointer */ + u8 max_nr_keys; + struct b43legacy_key key[58]; + + /* Firmware data */ + struct b43legacy_firmware fw; + + /* Devicelist in struct b43legacy_wl (all 802.11 cores) */ + struct list_head list; + + /* Debugging stuff follows. */ +#ifdef CONFIG_B43LEGACY_DEBUG + struct b43legacy_dfsentry *dfsentry; +#endif +}; + + +static inline +struct b43legacy_wl *hw_to_b43legacy_wl(struct ieee80211_hw *hw) +{ + return hw->priv; +} + +/* Helper function, which returns a boolean. + * TRUE, if PIO is used; FALSE, if DMA is used. + */ +#if defined(CONFIG_B43LEGACY_DMA) && defined(CONFIG_B43LEGACY_PIO) +static inline +int b43legacy_using_pio(struct b43legacy_wldev *dev) +{ + return dev->__using_pio; +} +#elif defined(CONFIG_B43LEGACY_DMA) +static inline +int b43legacy_using_pio(struct b43legacy_wldev *dev) +{ + return 0; +} +#elif defined(CONFIG_B43LEGACY_PIO) +static inline +int b43legacy_using_pio(struct b43legacy_wldev *dev) +{ + return 1; +} +#else +# error "Using neither DMA nor PIO? Confused..." +#endif + + +static inline +struct b43legacy_wldev *dev_to_b43legacy_wldev(struct device *dev) +{ + struct ssb_device *ssb_dev = dev_to_ssb_dev(dev); + return ssb_get_drvdata(ssb_dev); +} + +/* Is the device operating in a specified mode (IEEE80211_IF_TYPE_XXX). */ +static inline +int b43legacy_is_mode(struct b43legacy_wl *wl, int type) +{ + return (wl->operating && + wl->if_type == type); +} + +static inline +bool is_bcm_board_vendor(struct b43legacy_wldev *dev) +{ + return (dev->dev->bus->boardinfo.vendor == PCI_VENDOR_ID_BROADCOM); +} + +static inline +u16 b43legacy_read16(struct b43legacy_wldev *dev, u16 offset) +{ + return ssb_read16(dev->dev, offset); +} + +static inline +void b43legacy_write16(struct b43legacy_wldev *dev, u16 offset, u16 value) +{ + ssb_write16(dev->dev, offset, value); +} + +static inline +u32 b43legacy_read32(struct b43legacy_wldev *dev, u16 offset) +{ + return ssb_read32(dev->dev, offset); +} + +static inline +void b43legacy_write32(struct b43legacy_wldev *dev, u16 offset, u32 value) +{ + ssb_write32(dev->dev, offset, value); +} + +static inline +struct b43legacy_lopair *b43legacy_get_lopair(struct b43legacy_phy *phy, + u16 radio_attenuation, + u16 baseband_attenuation) +{ + return phy->_lo_pairs + (radio_attenuation + + 14 * (baseband_attenuation / 2)); +} + + + +/* Message printing */ +void b43legacyinfo(struct b43legacy_wl *wl, const char *fmt, ...) + __attribute__((format(printf, 2, 3))); +void b43legacyerr(struct b43legacy_wl *wl, const char *fmt, ...) + __attribute__((format(printf, 2, 3))); +void b43legacywarn(struct b43legacy_wl *wl, const char *fmt, ...) + __attribute__((format(printf, 2, 3))); +#if B43legacy_DEBUG +void b43legacydbg(struct b43legacy_wl *wl, const char *fmt, ...) + __attribute__((format(printf, 2, 3))); +#else /* DEBUG */ +# define b43legacydbg(wl, fmt...) do { /* nothing */ } while (0) +#endif /* DEBUG */ + +/* Macros for printing a value in Q5.2 format */ +#define Q52_FMT "%u.%u" +#define Q52_ARG(q52) ((q52) / 4), (((q52) & 3) * 100 / 4) + +#endif /* B43legacy_H_ */ diff --git a/drivers/net/wireless/b43legacy/debugfs.c b/drivers/net/wireless/b43legacy/debugfs.c new file mode 100644 index 0000000..1f85ac5 --- /dev/null +++ b/drivers/net/wireless/b43legacy/debugfs.c @@ -0,0 +1,505 @@ +/* + + Broadcom B43legacy wireless driver + + debugfs driver debugging code + + Copyright (c) 2005-2007 Michael Buesch + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include +#include +#include +#include +#include +#include + +#include "b43legacy.h" +#include "main.h" +#include "debugfs.h" +#include "dma.h" +#include "pio.h" +#include "xmit.h" + + +/* The root directory. */ +static struct dentry *rootdir; + +struct b43legacy_debugfs_fops { + ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize); + int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count); + struct file_operations fops; + /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */ + size_t file_struct_offset; + /* Take wl->irq_lock before calling read/write? */ + bool take_irqlock; +}; + +static inline +struct b43legacy_dfs_file * fops_to_dfs_file(struct b43legacy_wldev *dev, + const struct b43legacy_debugfs_fops *dfops) +{ + void *p; + + p = dev->dfsentry; + p += dfops->file_struct_offset; + + return p; +} + + +#define fappend(fmt, x...) \ + do { \ + if (bufsize - count) \ + count += snprintf(buf + count, \ + bufsize - count, \ + fmt , ##x); \ + else \ + printk(KERN_ERR "b43legacy: fappend overflow\n"); \ + } while (0) + + +/* wl->irq_lock is locked */ +static ssize_t tsf_read_file(struct b43legacy_wldev *dev, char *buf, size_t bufsize) +{ + ssize_t count = 0; + u64 tsf; + + b43legacy_tsf_read(dev, &tsf); + fappend("0x%08x%08x\n", + (unsigned int)((tsf & 0xFFFFFFFF00000000ULL) >> 32), + (unsigned int)(tsf & 0xFFFFFFFFULL)); + + return count; +} + +/* wl->irq_lock is locked */ +static int tsf_write_file(struct b43legacy_wldev *dev, const char *buf, size_t count) +{ + u64 tsf; + + if (sscanf(buf, "%llu", (unsigned long long *)(&tsf)) != 1) + return -EINVAL; + b43legacy_tsf_write(dev, tsf); + + return 0; +} + +/* wl->irq_lock is locked */ +static ssize_t ucode_regs_read_file(struct b43legacy_wldev *dev, char *buf, size_t bufsize) +{ + ssize_t count = 0; + int i; + + for (i = 0; i < 64; i++) { + fappend("r%d = 0x%04x\n", i, + b43legacy_shm_read16(dev, B43legacy_SHM_WIRELESS, i)); + } + + return count; +} + +/* wl->irq_lock is locked */ +static ssize_t shm_read_file(struct b43legacy_wldev *dev, char *buf, size_t bufsize) +{ + ssize_t count = 0; + int i; + u16 tmp; + __le16 *le16buf = (__le16 *)buf; + + for (i = 0; i < 0x1000; i++) { + if (bufsize < sizeof(tmp)) + break; + tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, 2 * i); + le16buf[i] = cpu_to_le16(tmp); + count += sizeof(tmp); + bufsize -= sizeof(tmp); + } + + return count; +} + +static ssize_t txstat_read_file(struct b43legacy_wldev *dev, char *buf, size_t bufsize) +{ + struct b43legacy_txstatus_log *log = &dev->dfsentry->txstatlog; + ssize_t count = 0; + unsigned long flags; + int i, idx; + struct b43legacy_txstatus *stat; + + spin_lock_irqsave(&log->lock, flags); + if (log->end < 0) { + fappend("Nothing transmitted, yet\n"); + goto out_unlock; + } + fappend("b43legacy TX status reports:\n\n" + "index | cookie | seq | phy_stat | frame_count | " + "rts_count | supp_reason | pm_indicated | " + "intermediate | for_ampdu | acked\n" "---\n"); + i = log->end + 1; + idx = 0; + while (1) { + if (i == B43legacy_NR_LOGGED_TXSTATUS) + i = 0; + stat = &(log->log[i]); + if (stat->cookie) { + fappend("%03d | " + "0x%04X | 0x%04X | 0x%02X | " + "0x%X | 0x%X | " + "%u | %u | " + "%u | %u | %u\n", + idx, + stat->cookie, stat->seq, stat->phy_stat, + stat->frame_count, stat->rts_count, + stat->supp_reason, stat->pm_indicated, + stat->intermediate, stat->for_ampdu, + stat->acked); + idx++; + } + if (i == log->end) + break; + i++; + } +out_unlock: + spin_unlock_irqrestore(&log->lock, flags); + + return count; +} + +/* wl->irq_lock is locked */ +static int restart_write_file(struct b43legacy_wldev *dev, const char *buf, size_t count) +{ + int err = 0; + + if (count > 0 && buf[0] == '1') { + b43legacy_controller_restart(dev, "manually restarted"); + } else + err = -EINVAL; + + return err; +} + +#undef fappend + +static int b43legacy_debugfs_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t b43legacy_debugfs_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct b43legacy_wldev *dev; + struct b43legacy_debugfs_fops *dfops; + struct b43legacy_dfs_file *dfile; + ssize_t uninitialized_var(ret); + char *buf; + const size_t bufsize = 1024 * 16; /* 16 KiB buffer */ + const size_t buforder = get_order(bufsize); + int err = 0; + + if (!count) + return 0; + dev = file->private_data; + if (!dev) + return -ENODEV; + + mutex_lock(&dev->wl->mutex); + if (b43legacy_status(dev) < B43legacy_STAT_INITIALIZED) { + err = -ENODEV; + goto out_unlock; + } + + dfops = container_of(file->f_op, struct b43legacy_debugfs_fops, fops); + if (!dfops->read) { + err = -ENOSYS; + goto out_unlock; + } + dfile = fops_to_dfs_file(dev, dfops); + + if (!dfile->buffer) { + buf = (char *)__get_free_pages(GFP_KERNEL, buforder); + if (!buf) { + err = -ENOMEM; + goto out_unlock; + } + memset(buf, 0, bufsize); + if (dfops->take_irqlock) { + spin_lock_irq(&dev->wl->irq_lock); + ret = dfops->read(dev, buf, bufsize); + spin_unlock_irq(&dev->wl->irq_lock); + } else + ret = dfops->read(dev, buf, bufsize); + if (ret <= 0) { + free_pages((unsigned long)buf, buforder); + err = ret; + goto out_unlock; + } + dfile->data_len = ret; + dfile->buffer = buf; + } + + ret = simple_read_from_buffer(userbuf, count, ppos, + dfile->buffer, + dfile->data_len); + if (*ppos >= dfile->data_len) { + free_pages((unsigned long)dfile->buffer, buforder); + dfile->buffer = NULL; + dfile->data_len = 0; + } +out_unlock: + mutex_unlock(&dev->wl->mutex); + + return err ? err : ret; +} + +static ssize_t b43legacy_debugfs_write(struct file *file, + const char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct b43legacy_wldev *dev; + struct b43legacy_debugfs_fops *dfops; + char *buf; + int err = 0; + + if (!count) + return 0; + if (count > PAGE_SIZE) + return -E2BIG; + dev = file->private_data; + if (!dev) + return -ENODEV; + + mutex_lock(&dev->wl->mutex); + if (b43legacy_status(dev) < B43legacy_STAT_INITIALIZED) { + err = -ENODEV; + goto out_unlock; + } + + dfops = container_of(file->f_op, struct b43legacy_debugfs_fops, fops); + if (!dfops->write) { + err = -ENOSYS; + goto out_unlock; + } + + buf = (char *)get_zeroed_page(GFP_KERNEL); + if (!buf) { + err = -ENOMEM; + goto out_unlock; + } + if (copy_from_user(buf, userbuf, count)) { + err = -EFAULT; + goto out_freepage; + } + if (dfops->take_irqlock) { + spin_lock_irq(&dev->wl->irq_lock); + err = dfops->write(dev, buf, count); + spin_unlock_irq(&dev->wl->irq_lock); + } else + err = dfops->write(dev, buf, count); + if (err) + goto out_freepage; + +out_freepage: + free_page((unsigned long)buf); +out_unlock: + mutex_unlock(&dev->wl->mutex); + + return err ? err : count; +} + + +#define B43legacy_DEBUGFS_FOPS(name, _read, _write, _take_irqlock) \ + static struct b43legacy_debugfs_fops fops_##name = { \ + .read = _read, \ + .write = _write, \ + .fops = { \ + .open = b43legacy_debugfs_open, \ + .read = b43legacy_debugfs_read, \ + .write = b43legacy_debugfs_write, \ + }, \ + .file_struct_offset = offsetof(struct b43legacy_dfsentry, \ + file_##name), \ + .take_irqlock = _take_irqlock, \ + } + +B43legacy_DEBUGFS_FOPS(tsf, tsf_read_file, tsf_write_file, 1); +B43legacy_DEBUGFS_FOPS(ucode_regs, ucode_regs_read_file, NULL, 1); +B43legacy_DEBUGFS_FOPS(shm, shm_read_file, NULL, 1); +B43legacy_DEBUGFS_FOPS(txstat, txstat_read_file, NULL, 0); +B43legacy_DEBUGFS_FOPS(restart, NULL, restart_write_file, 1); + + +int b43legacy_debug(struct b43legacy_wldev *dev, enum b43legacy_dyndbg feature) +{ + return !!(dev->dfsentry && dev->dfsentry->dyn_debug[feature]); +} + +static void b43legacy_remove_dynamic_debug(struct b43legacy_wldev *dev) +{ + struct b43legacy_dfsentry *e = dev->dfsentry; + int i; + + for (i = 0; i < __B43legacy_NR_DYNDBG; i++) + debugfs_remove(e->dyn_debug_dentries[i]); +} + +static void b43legacy_add_dynamic_debug(struct b43legacy_wldev *dev) +{ + struct b43legacy_dfsentry *e = dev->dfsentry; + struct dentry *d; + +#define add_dyn_dbg(name, id, initstate) do { \ + e->dyn_debug[id] = (initstate); \ + d = debugfs_create_bool(name, 0600, e->subdir, \ + &(e->dyn_debug[id])); \ + if (!IS_ERR(d)) \ + e->dyn_debug_dentries[id] = d; \ + } while (0) + + add_dyn_dbg("debug_xmitpower", B43legacy_DBG_XMITPOWER, 0); + add_dyn_dbg("debug_dmaoverflow", B43legacy_DBG_DMAOVERFLOW, 0); + add_dyn_dbg("debug_dmaverbose", B43legacy_DBG_DMAVERBOSE, 0); + add_dyn_dbg("debug_pwork_fast", B43legacy_DBG_PWORK_FAST, 0); + add_dyn_dbg("debug_pwork_stop", B43legacy_DBG_PWORK_STOP, 0); + +#undef add_dyn_dbg +} + +void b43legacy_debugfs_add_device(struct b43legacy_wldev *dev) +{ + struct b43legacy_dfsentry *e; + struct b43legacy_txstatus_log *log; + char devdir[16]; + + B43legacy_WARN_ON(!dev); + e = kzalloc(sizeof(*e), GFP_KERNEL); + if (!e) { + b43legacyerr(dev->wl, "debugfs: add device OOM\n"); + return; + } + e->dev = dev; + log = &e->txstatlog; + log->log = kcalloc(B43legacy_NR_LOGGED_TXSTATUS, + sizeof(struct b43legacy_txstatus), GFP_KERNEL); + if (!log->log) { + b43legacyerr(dev->wl, "debugfs: add device txstatus OOM\n"); + kfree(e); + return; + } + log->end = -1; + spin_lock_init(&log->lock); + + dev->dfsentry = e; + + snprintf(devdir, sizeof(devdir), "%s", wiphy_name(dev->wl->hw->wiphy)); + e->subdir = debugfs_create_dir(devdir, rootdir); + if (!e->subdir || IS_ERR(e->subdir)) { + if (e->subdir == ERR_PTR(-ENODEV)) { + b43legacydbg(dev->wl, "DebugFS (CONFIG_DEBUG_FS) not " + "enabled in kernel config\n"); + } else { + b43legacyerr(dev->wl, "debugfs: cannot create %s directory\n", + devdir); + } + dev->dfsentry = NULL; + kfree(log->log); + kfree(e); + return; + } + +#define ADD_FILE(name, mode) \ + do { \ + struct dentry *d; \ + d = debugfs_create_file(__stringify(name), \ + mode, e->subdir, dev, \ + &fops_##name.fops); \ + e->file_##name.dentry = NULL; \ + if (!IS_ERR(d)) \ + e->file_##name.dentry = d; \ + } while (0) + + + ADD_FILE(tsf, 0600); + ADD_FILE(ucode_regs, 0400); + ADD_FILE(shm, 0400); + ADD_FILE(txstat, 0400); + ADD_FILE(restart, 0200); + +#undef ADD_FILE + + b43legacy_add_dynamic_debug(dev); +} + +void b43legacy_debugfs_remove_device(struct b43legacy_wldev *dev) +{ + struct b43legacy_dfsentry *e; + + if (!dev) + return; + e = dev->dfsentry; + if (!e) + return; + b43legacy_remove_dynamic_debug(dev); + + debugfs_remove(e->file_tsf.dentry); + debugfs_remove(e->file_ucode_regs.dentry); + debugfs_remove(e->file_shm.dentry); + debugfs_remove(e->file_txstat.dentry); + debugfs_remove(e->file_restart.dentry); + + debugfs_remove(e->subdir); + kfree(e->txstatlog.log); + kfree(e); +} + +void b43legacy_debugfs_log_txstat(struct b43legacy_wldev *dev, + const struct b43legacy_txstatus *status) +{ + struct b43legacy_dfsentry *e = dev->dfsentry; + struct b43legacy_txstatus_log *log; + struct b43legacy_txstatus *cur; + int i; + + if (!e) + return; + log = &e->txstatlog; + B43legacy_WARN_ON(!irqs_disabled()); + spin_lock(&log->lock); + i = log->end + 1; + if (i == B43legacy_NR_LOGGED_TXSTATUS) + i = 0; + log->end = i; + cur = &(log->log[i]); + memcpy(cur, status, sizeof(*cur)); + spin_unlock(&log->lock); +} + +void b43legacy_debugfs_init(void) +{ + rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL); + if (IS_ERR(rootdir)) + rootdir = NULL; +} + +void b43legacy_debugfs_exit(void) +{ + debugfs_remove(rootdir); +} diff --git a/drivers/net/wireless/b43legacy/debugfs.h b/drivers/net/wireless/b43legacy/debugfs.h new file mode 100644 index 0000000..ae3b0d0 --- /dev/null +++ b/drivers/net/wireless/b43legacy/debugfs.h @@ -0,0 +1,89 @@ +#ifndef B43legacy_DEBUGFS_H_ +#define B43legacy_DEBUGFS_H_ + +struct b43legacy_wldev; +struct b43legacy_txstatus; + +enum b43legacy_dyndbg { /* Dynamic debugging features */ + B43legacy_DBG_XMITPOWER, + B43legacy_DBG_DMAOVERFLOW, + B43legacy_DBG_DMAVERBOSE, + B43legacy_DBG_PWORK_FAST, + B43legacy_DBG_PWORK_STOP, + __B43legacy_NR_DYNDBG, +}; + + +#ifdef CONFIG_B43LEGACY_DEBUG + +struct dentry; + +#define B43legacy_NR_LOGGED_TXSTATUS 100 + +struct b43legacy_txstatus_log { + struct b43legacy_txstatus *log; + int end; + spinlock_t lock; /* lock for debugging */ +}; + +struct b43legacy_dfs_file { + struct dentry *dentry; + char *buffer; + size_t data_len; +}; + +struct b43legacy_dfsentry { + struct b43legacy_wldev *dev; + struct dentry *subdir; + + struct b43legacy_dfs_file file_tsf; + struct b43legacy_dfs_file file_ucode_regs; + struct b43legacy_dfs_file file_shm; + struct b43legacy_dfs_file file_txstat; + struct b43legacy_dfs_file file_txpower_g; + struct b43legacy_dfs_file file_restart; + struct b43legacy_dfs_file file_loctls; + + struct b43legacy_txstatus_log txstatlog; + + /* Enabled/Disabled list for the dynamic debugging features. */ + u32 dyn_debug[__B43legacy_NR_DYNDBG]; + /* Dentries for the dynamic debugging entries. */ + struct dentry *dyn_debug_dentries[__B43legacy_NR_DYNDBG]; +}; + +int b43legacy_debug(struct b43legacy_wldev *dev, + enum b43legacy_dyndbg feature); + +void b43legacy_debugfs_init(void); +void b43legacy_debugfs_exit(void); +void b43legacy_debugfs_add_device(struct b43legacy_wldev *dev); +void b43legacy_debugfs_remove_device(struct b43legacy_wldev *dev); +void b43legacy_debugfs_log_txstat(struct b43legacy_wldev *dev, + const struct b43legacy_txstatus *status); + +#else /* CONFIG_B43LEGACY_DEBUG*/ + +static inline +int b43legacy_debug(struct b43legacy_wldev *dev, + enum b43legacy_dyndbg feature) +{ + return 0; +} + +static inline +void b43legacy_debugfs_init(void) { } +static inline +void b43legacy_debugfs_exit(void) { } +static inline +void b43legacy_debugfs_add_device(struct b43legacy_wldev *dev) { } +static inline +void b43legacy_debugfs_remove_device(struct b43legacy_wldev *dev) { } +static inline +void b43legacy_debugfs_log_txstat(struct b43legacy_wldev *dev, + const struct b43legacy_txstatus *status) + { } + +#endif /* CONFIG_B43LEGACY_DEBUG*/ + +#endif /* B43legacy_DEBUGFS_H_ */ diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c new file mode 100644 index 0000000..8b9387c --- /dev/null +++ b/drivers/net/wireless/b43legacy/dma.c @@ -0,0 +1,1689 @@ +/* + + Broadcom B43legacy wireless driver + + DMA ringbuffer and descriptor allocation/management + + Copyright (c) 2005, 2006 Michael Buesch + + Some code in this file is derived from the b44.c driver + Copyright (C) 2002 David S. Miller + Copyright (C) Pekka Pietikainen + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "b43legacy.h" +#include "dma.h" +#include "main.h" +#include "debugfs.h" +#include "xmit.h" + +#include +#include +#include +#include +#include + +/* 32bit DMA ops. */ +static +struct b43legacy_dmadesc_generic *op32_idx2desc( + struct b43legacy_dmaring *ring, + int slot, + struct b43legacy_dmadesc_meta **meta) +{ + struct b43legacy_dmadesc32 *desc; + + *meta = &(ring->meta[slot]); + desc = ring->descbase; + desc = &(desc[slot]); + + return (struct b43legacy_dmadesc_generic *)desc; +} + +static void op32_fill_descriptor(struct b43legacy_dmaring *ring, + struct b43legacy_dmadesc_generic *desc, + dma_addr_t dmaaddr, u16 bufsize, + int start, int end, int irq) +{ + struct b43legacy_dmadesc32 *descbase = ring->descbase; + int slot; + u32 ctl; + u32 addr; + u32 addrext; + + slot = (int)(&(desc->dma32) - descbase); + B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); + + addr = (u32)(dmaaddr & ~SSB_DMA_TRANSLATION_MASK); + addrext = (u32)(dmaaddr & SSB_DMA_TRANSLATION_MASK) + >> SSB_DMA_TRANSLATION_SHIFT; + addr |= ssb_dma_translation(ring->dev->dev); + ctl = (bufsize - ring->frameoffset) + & B43legacy_DMA32_DCTL_BYTECNT; + if (slot == ring->nr_slots - 1) + ctl |= B43legacy_DMA32_DCTL_DTABLEEND; + if (start) + ctl |= B43legacy_DMA32_DCTL_FRAMESTART; + if (end) + ctl |= B43legacy_DMA32_DCTL_FRAMEEND; + if (irq) + ctl |= B43legacy_DMA32_DCTL_IRQ; + ctl |= (addrext << B43legacy_DMA32_DCTL_ADDREXT_SHIFT) + & B43legacy_DMA32_DCTL_ADDREXT_MASK; + + desc->dma32.control = cpu_to_le32(ctl); + desc->dma32.address = cpu_to_le32(addr); +} + +static void op32_poke_tx(struct b43legacy_dmaring *ring, int slot) +{ + b43legacy_dma_write(ring, B43legacy_DMA32_TXINDEX, + (u32)(slot * sizeof(struct b43legacy_dmadesc32))); +} + +static void op32_tx_suspend(struct b43legacy_dmaring *ring) +{ + b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, + b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL) + | B43legacy_DMA32_TXSUSPEND); +} + +static void op32_tx_resume(struct b43legacy_dmaring *ring) +{ + b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, + b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL) + & ~B43legacy_DMA32_TXSUSPEND); +} + +static int op32_get_current_rxslot(struct b43legacy_dmaring *ring) +{ + u32 val; + + val = b43legacy_dma_read(ring, B43legacy_DMA32_RXSTATUS); + val &= B43legacy_DMA32_RXDPTR; + + return (val / sizeof(struct b43legacy_dmadesc32)); +} + +static void op32_set_current_rxslot(struct b43legacy_dmaring *ring, + int slot) +{ + b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX, + (u32)(slot * sizeof(struct b43legacy_dmadesc32))); +} + +static const struct b43legacy_dma_ops dma32_ops = { + .idx2desc = op32_idx2desc, + .fill_descriptor = op32_fill_descriptor, + .poke_tx = op32_poke_tx, + .tx_suspend = op32_tx_suspend, + .tx_resume = op32_tx_resume, + .get_current_rxslot = op32_get_current_rxslot, + .set_current_rxslot = op32_set_current_rxslot, +}; + +/* 64bit DMA ops. */ +static +struct b43legacy_dmadesc_generic *op64_idx2desc( + struct b43legacy_dmaring *ring, + int slot, + struct b43legacy_dmadesc_meta + **meta) +{ + struct b43legacy_dmadesc64 *desc; + + *meta = &(ring->meta[slot]); + desc = ring->descbase; + desc = &(desc[slot]); + + return (struct b43legacy_dmadesc_generic *)desc; +} + +static void op64_fill_descriptor(struct b43legacy_dmaring *ring, + struct b43legacy_dmadesc_generic *desc, + dma_addr_t dmaaddr, u16 bufsize, + int start, int end, int irq) +{ + struct b43legacy_dmadesc64 *descbase = ring->descbase; + int slot; + u32 ctl0 = 0; + u32 ctl1 = 0; + u32 addrlo; + u32 addrhi; + u32 addrext; + + slot = (int)(&(desc->dma64) - descbase); + B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); + + addrlo = (u32)(dmaaddr & 0xFFFFFFFF); + addrhi = (((u64)dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK); + addrext = (((u64)dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK) + >> SSB_DMA_TRANSLATION_SHIFT; + addrhi |= ssb_dma_translation(ring->dev->dev); + if (slot == ring->nr_slots - 1) + ctl0 |= B43legacy_DMA64_DCTL0_DTABLEEND; + if (start) + ctl0 |= B43legacy_DMA64_DCTL0_FRAMESTART; + if (end) + ctl0 |= B43legacy_DMA64_DCTL0_FRAMEEND; + if (irq) + ctl0 |= B43legacy_DMA64_DCTL0_IRQ; + ctl1 |= (bufsize - ring->frameoffset) + & B43legacy_DMA64_DCTL1_BYTECNT; + ctl1 |= (addrext << B43legacy_DMA64_DCTL1_ADDREXT_SHIFT) + & B43legacy_DMA64_DCTL1_ADDREXT_MASK; + + desc->dma64.control0 = cpu_to_le32(ctl0); + desc->dma64.control1 = cpu_to_le32(ctl1); + desc->dma64.address_low = cpu_to_le32(addrlo); + desc->dma64.address_high = cpu_to_le32(addrhi); +} + +static void op64_poke_tx(struct b43legacy_dmaring *ring, int slot) +{ + b43legacy_dma_write(ring, B43legacy_DMA64_TXINDEX, + (u32)(slot * sizeof(struct b43legacy_dmadesc64))); +} + +static void op64_tx_suspend(struct b43legacy_dmaring *ring) +{ + b43legacy_dma_write(ring, B43legacy_DMA64_TXCTL, + b43legacy_dma_read(ring, B43legacy_DMA64_TXCTL) + | B43legacy_DMA64_TXSUSPEND); +} + +static void op64_tx_resume(struct b43legacy_dmaring *ring) +{ + b43legacy_dma_write(ring, B43legacy_DMA64_TXCTL, + b43legacy_dma_read(ring, B43legacy_DMA64_TXCTL) + & ~B43legacy_DMA64_TXSUSPEND); +} + +static int op64_get_current_rxslot(struct b43legacy_dmaring *ring) +{ + u32 val; + + val = b43legacy_dma_read(ring, B43legacy_DMA64_RXSTATUS); + val &= B43legacy_DMA64_RXSTATDPTR; + + return (val / sizeof(struct b43legacy_dmadesc64)); +} + +static void op64_set_current_rxslot(struct b43legacy_dmaring *ring, + int slot) +{ + b43legacy_dma_write(ring, B43legacy_DMA64_RXINDEX, + (u32)(slot * sizeof(struct b43legacy_dmadesc64))); +} + +static const struct b43legacy_dma_ops dma64_ops = { + .idx2desc = op64_idx2desc, + .fill_descriptor = op64_fill_descriptor, + .poke_tx = op64_poke_tx, + .tx_suspend = op64_tx_suspend, + .tx_resume = op64_tx_resume, + .get_current_rxslot = op64_get_current_rxslot, + .set_current_rxslot = op64_set_current_rxslot, +}; + + +static inline int free_slots(struct b43legacy_dmaring *ring) +{ + return (ring->nr_slots - ring->used_slots); +} + +static inline int next_slot(struct b43legacy_dmaring *ring, int slot) +{ + B43legacy_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1)); + if (slot == ring->nr_slots - 1) + return 0; + return slot + 1; +} + +static inline int prev_slot(struct b43legacy_dmaring *ring, int slot) +{ + B43legacy_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1)); + if (slot == 0) + return ring->nr_slots - 1; + return slot - 1; +} + +#ifdef CONFIG_B43LEGACY_DEBUG +static void update_max_used_slots(struct b43legacy_dmaring *ring, + int current_used_slots) +{ + if (current_used_slots <= ring->max_used_slots) + return; + ring->max_used_slots = current_used_slots; + if (b43legacy_debug(ring->dev, B43legacy_DBG_DMAVERBOSE)) + b43legacydbg(ring->dev->wl, + "max_used_slots increased to %d on %s ring %d\n", + ring->max_used_slots, + ring->tx ? "TX" : "RX", + ring->index); +} +#else +static inline +void update_max_used_slots(struct b43legacy_dmaring *ring, + int current_used_slots) +{ } +#endif /* DEBUG */ + +/* Request a slot for usage. */ +static inline +int request_slot(struct b43legacy_dmaring *ring) +{ + int slot; + + B43legacy_WARN_ON(!ring->tx); + B43legacy_WARN_ON(ring->stopped); + B43legacy_WARN_ON(free_slots(ring) == 0); + + slot = next_slot(ring, ring->current_slot); + ring->current_slot = slot; + ring->used_slots++; + + update_max_used_slots(ring, ring->used_slots); + + return slot; +} + +/* Mac80211-queue to b43legacy-ring mapping */ +static struct b43legacy_dmaring *priority_to_txring( + struct b43legacy_wldev *dev, + int queue_priority) +{ + struct b43legacy_dmaring *ring; + +/*FIXME: For now we always run on TX-ring-1 */ +return dev->dma.tx_ring1; + + /* 0 = highest priority */ + switch (queue_priority) { + default: + B43legacy_WARN_ON(1); + /* fallthrough */ + case 0: + ring = dev->dma.tx_ring3; + break; + case 1: + ring = dev->dma.tx_ring2; + break; + case 2: + ring = dev->dma.tx_ring1; + break; + case 3: + ring = dev->dma.tx_ring0; + break; + case 4: + ring = dev->dma.tx_ring4; + break; + case 5: + ring = dev->dma.tx_ring5; + break; + } + + return ring; +} + +/* Bcm4301-ring to mac80211-queue mapping */ +static inline int txring_to_priority(struct b43legacy_dmaring *ring) +{ + static const u8 idx_to_prio[] = + { 3, 2, 1, 0, 4, 5, }; + +/*FIXME: have only one queue, for now */ +return 0; + + return idx_to_prio[ring->index]; +} + + +static u16 b43legacy_dmacontroller_base(enum b43legacy_dmatype type, + int controller_idx) +{ + static const u16 map64[] = { + B43legacy_MMIO_DMA64_BASE0, + B43legacy_MMIO_DMA64_BASE1, + B43legacy_MMIO_DMA64_BASE2, + B43legacy_MMIO_DMA64_BASE3, + B43legacy_MMIO_DMA64_BASE4, + B43legacy_MMIO_DMA64_BASE5, + }; + static const u16 map32[] = { + B43legacy_MMIO_DMA32_BASE0, + B43legacy_MMIO_DMA32_BASE1, + B43legacy_MMIO_DMA32_BASE2, + B43legacy_MMIO_DMA32_BASE3, + B43legacy_MMIO_DMA32_BASE4, + B43legacy_MMIO_DMA32_BASE5, + }; + + if (type == B43legacy_DMA_64BIT) { + B43legacy_WARN_ON(!(controller_idx >= 0 && + controller_idx < ARRAY_SIZE(map64))); + return map64[controller_idx]; + } + B43legacy_WARN_ON(!(controller_idx >= 0 && + controller_idx < ARRAY_SIZE(map32))); + return map32[controller_idx]; +} + +static inline +dma_addr_t map_descbuffer(struct b43legacy_dmaring *ring, + unsigned char *buf, + size_t len, + int tx) +{ + dma_addr_t dmaaddr; + + if (tx) + dmaaddr = ssb_dma_map_single(ring->dev->dev, + buf, len, + DMA_TO_DEVICE); + else + dmaaddr = ssb_dma_map_single(ring->dev->dev, + buf, len, + DMA_FROM_DEVICE); + + return dmaaddr; +} + +static inline +void unmap_descbuffer(struct b43legacy_dmaring *ring, + dma_addr_t addr, + size_t len, + int tx) +{ + if (tx) + ssb_dma_unmap_single(ring->dev->dev, + addr, len, + DMA_TO_DEVICE); + else + ssb_dma_unmap_single(ring->dev->dev, + addr, len, + DMA_FROM_DEVICE); +} + +static inline +void sync_descbuffer_for_cpu(struct b43legacy_dmaring *ring, + dma_addr_t addr, + size_t len) +{ + B43legacy_WARN_ON(ring->tx); + + ssb_dma_sync_single_for_cpu(ring->dev->dev, + addr, len, DMA_FROM_DEVICE); +} + +static inline +void sync_descbuffer_for_device(struct b43legacy_dmaring *ring, + dma_addr_t addr, + size_t len) +{ + B43legacy_WARN_ON(ring->tx); + + ssb_dma_sync_single_for_device(ring->dev->dev, + addr, len, DMA_FROM_DEVICE); +} + +static inline +void free_descriptor_buffer(struct b43legacy_dmaring *ring, + struct b43legacy_dmadesc_meta *meta, + int irq_context) +{ + if (meta->skb) { + if (irq_context) + dev_kfree_skb_irq(meta->skb); + else + dev_kfree_skb(meta->skb); + meta->skb = NULL; + } +} + +static int alloc_ringmemory(struct b43legacy_dmaring *ring) +{ + /* GFP flags must match the flags in free_ringmemory()! */ + ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev, + B43legacy_DMA_RINGMEMSIZE, + &(ring->dmabase), + GFP_KERNEL); + if (!ring->descbase) { + b43legacyerr(ring->dev->wl, "DMA ringmemory allocation" + " failed\n"); + return -ENOMEM; + } + memset(ring->descbase, 0, B43legacy_DMA_RINGMEMSIZE); + + return 0; +} + +static void free_ringmemory(struct b43legacy_dmaring *ring) +{ + ssb_dma_free_consistent(ring->dev->dev, B43legacy_DMA_RINGMEMSIZE, + ring->descbase, ring->dmabase, GFP_KERNEL); +} + +/* Reset the RX DMA channel */ +static int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev, + u16 mmio_base, + enum b43legacy_dmatype type) +{ + int i; + u32 value; + u16 offset; + + might_sleep(); + + offset = (type == B43legacy_DMA_64BIT) ? + B43legacy_DMA64_RXCTL : B43legacy_DMA32_RXCTL; + b43legacy_write32(dev, mmio_base + offset, 0); + for (i = 0; i < 10; i++) { + offset = (type == B43legacy_DMA_64BIT) ? + B43legacy_DMA64_RXSTATUS : B43legacy_DMA32_RXSTATUS; + value = b43legacy_read32(dev, mmio_base + offset); + if (type == B43legacy_DMA_64BIT) { + value &= B43legacy_DMA64_RXSTAT; + if (value == B43legacy_DMA64_RXSTAT_DISABLED) { + i = -1; + break; + } + } else { + value &= B43legacy_DMA32_RXSTATE; + if (value == B43legacy_DMA32_RXSTAT_DISABLED) { + i = -1; + break; + } + } + msleep(1); + } + if (i != -1) { + b43legacyerr(dev->wl, "DMA RX reset timed out\n"); + return -ENODEV; + } + + return 0; +} + +/* Reset the RX DMA channel */ +static int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev, + u16 mmio_base, + enum b43legacy_dmatype type) +{ + int i; + u32 value; + u16 offset; + + might_sleep(); + + for (i = 0; i < 10; i++) { + offset = (type == B43legacy_DMA_64BIT) ? + B43legacy_DMA64_TXSTATUS : B43legacy_DMA32_TXSTATUS; + value = b43legacy_read32(dev, mmio_base + offset); + if (type == B43legacy_DMA_64BIT) { + value &= B43legacy_DMA64_TXSTAT; + if (value == B43legacy_DMA64_TXSTAT_DISABLED || + value == B43legacy_DMA64_TXSTAT_IDLEWAIT || + value == B43legacy_DMA64_TXSTAT_STOPPED) + break; + } else { + value &= B43legacy_DMA32_TXSTATE; + if (value == B43legacy_DMA32_TXSTAT_DISABLED || + value == B43legacy_DMA32_TXSTAT_IDLEWAIT || + value == B43legacy_DMA32_TXSTAT_STOPPED) + break; + } + msleep(1); + } + offset = (type == B43legacy_DMA_64BIT) ? B43legacy_DMA64_TXCTL : + B43legacy_DMA32_TXCTL; + b43legacy_write32(dev, mmio_base + offset, 0); + for (i = 0; i < 10; i++) { + offset = (type == B43legacy_DMA_64BIT) ? + B43legacy_DMA64_TXSTATUS : B43legacy_DMA32_TXSTATUS; + value = b43legacy_read32(dev, mmio_base + offset); + if (type == B43legacy_DMA_64BIT) { + value &= B43legacy_DMA64_TXSTAT; + if (value == B43legacy_DMA64_TXSTAT_DISABLED) { + i = -1; + break; + } + } else { + value &= B43legacy_DMA32_TXSTATE; + if (value == B43legacy_DMA32_TXSTAT_DISABLED) { + i = -1; + break; + } + } + msleep(1); + } + if (i != -1) { + b43legacyerr(dev->wl, "DMA TX reset timed out\n"); + return -ENODEV; + } + /* ensure the reset is completed. */ + msleep(1); + + return 0; +} + +/* Check if a DMA mapping address is invalid. */ +static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring, + dma_addr_t addr, + size_t buffersize, + bool dma_to_device) +{ + if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr))) + return 1; + + switch (ring->type) { + case B43legacy_DMA_30BIT: + if ((u64)addr + buffersize > (1ULL << 30)) + goto address_error; + break; + case B43legacy_DMA_32BIT: + if ((u64)addr + buffersize > (1ULL << 32)) + goto address_error; + break; + case B43legacy_DMA_64BIT: + /* Currently we can't have addresses beyond 64 bits in the kernel. */ + break; + } + + /* The address is OK. */ + return 0; + +address_error: + /* We can't support this address. Unmap it again. */ + unmap_descbuffer(ring, addr, buffersize, dma_to_device); + + return 1; +} + +static int setup_rx_descbuffer(struct b43legacy_dmaring *ring, + struct b43legacy_dmadesc_generic *desc, + struct b43legacy_dmadesc_meta *meta, + gfp_t gfp_flags) +{ + struct b43legacy_rxhdr_fw3 *rxhdr; + struct b43legacy_hwtxstatus *txstat; + dma_addr_t dmaaddr; + struct sk_buff *skb; + + B43legacy_WARN_ON(ring->tx); + + skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); + if (unlikely(!skb)) + return -ENOMEM; + dmaaddr = map_descbuffer(ring, skb->data, + ring->rx_buffersize, 0); + if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { + /* ugh. try to realloc in zone_dma */ + gfp_flags |= GFP_DMA; + + dev_kfree_skb_any(skb); + + skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); + if (unlikely(!skb)) + return -ENOMEM; + dmaaddr = map_descbuffer(ring, skb->data, + ring->rx_buffersize, 0); + } + + if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { + dev_kfree_skb_any(skb); + return -EIO; + } + + meta->skb = skb; + meta->dmaaddr = dmaaddr; + ring->ops->fill_descriptor(ring, desc, dmaaddr, + ring->rx_buffersize, 0, 0, 0); + + rxhdr = (struct b43legacy_rxhdr_fw3 *)(skb->data); + rxhdr->frame_len = 0; + txstat = (struct b43legacy_hwtxstatus *)(skb->data); + txstat->cookie = 0; + + return 0; +} + +/* Allocate the initial descbuffers. + * This is used for an RX ring only. + */ +static int alloc_initial_descbuffers(struct b43legacy_dmaring *ring) +{ + int i; + int err = -ENOMEM; + struct b43legacy_dmadesc_generic *desc; + struct b43legacy_dmadesc_meta *meta; + + for (i = 0; i < ring->nr_slots; i++) { + desc = ring->ops->idx2desc(ring, i, &meta); + + err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL); + if (err) { + b43legacyerr(ring->dev->wl, + "Failed to allocate initial descbuffers\n"); + goto err_unwind; + } + } + mb(); /* all descbuffer setup before next line */ + ring->used_slots = ring->nr_slots; + err = 0; +out: + return err; + +err_unwind: + for (i--; i >= 0; i--) { + desc = ring->ops->idx2desc(ring, i, &meta); + + unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0); + dev_kfree_skb(meta->skb); + } + goto out; +} + +/* Do initial setup of the DMA controller. + * Reset the controller, write the ring busaddress + * and switch the "enable" bit on. + */ +static int dmacontroller_setup(struct b43legacy_dmaring *ring) +{ + int err = 0; + u32 value; + u32 addrext; + u32 trans = ssb_dma_translation(ring->dev->dev); + + if (ring->tx) { + if (ring->type == B43legacy_DMA_64BIT) { + u64 ringbase = (u64)(ring->dmabase); + + addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) + >> SSB_DMA_TRANSLATION_SHIFT; + value = B43legacy_DMA64_TXENABLE; + value |= (addrext << B43legacy_DMA64_TXADDREXT_SHIFT) + & B43legacy_DMA64_TXADDREXT_MASK; + b43legacy_dma_write(ring, B43legacy_DMA64_TXCTL, + value); + b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGLO, + (ringbase & 0xFFFFFFFF)); + b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGHI, + ((ringbase >> 32) + & ~SSB_DMA_TRANSLATION_MASK) + | trans); + } else { + u32 ringbase = (u32)(ring->dmabase); + + addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) + >> SSB_DMA_TRANSLATION_SHIFT; + value = B43legacy_DMA32_TXENABLE; + value |= (addrext << B43legacy_DMA32_TXADDREXT_SHIFT) + & B43legacy_DMA32_TXADDREXT_MASK; + b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, + value); + b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, + (ringbase & + ~SSB_DMA_TRANSLATION_MASK) + | trans); + } + } else { + err = alloc_initial_descbuffers(ring); + if (err) + goto out; + if (ring->type == B43legacy_DMA_64BIT) { + u64 ringbase = (u64)(ring->dmabase); + + addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) + >> SSB_DMA_TRANSLATION_SHIFT; + value = (ring->frameoffset << + B43legacy_DMA64_RXFROFF_SHIFT); + value |= B43legacy_DMA64_RXENABLE; + value |= (addrext << B43legacy_DMA64_RXADDREXT_SHIFT) + & B43legacy_DMA64_RXADDREXT_MASK; + b43legacy_dma_write(ring, B43legacy_DMA64_RXCTL, + value); + b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGLO, + (ringbase & 0xFFFFFFFF)); + b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGHI, + ((ringbase >> 32) & + ~SSB_DMA_TRANSLATION_MASK) | + trans); + b43legacy_dma_write(ring, B43legacy_DMA64_RXINDEX, + 200); + } else { + u32 ringbase = (u32)(ring->dmabase); + + addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) + >> SSB_DMA_TRANSLATION_SHIFT; + value = (ring->frameoffset << + B43legacy_DMA32_RXFROFF_SHIFT); + value |= B43legacy_DMA32_RXENABLE; + value |= (addrext << + B43legacy_DMA32_RXADDREXT_SHIFT) + & B43legacy_DMA32_RXADDREXT_MASK; + b43legacy_dma_write(ring, B43legacy_DMA32_RXCTL, + value); + b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, + (ringbase & + ~SSB_DMA_TRANSLATION_MASK) + | trans); + b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX, + 200); + } + } + +out: + return err; +} + +/* Shutdown the DMA controller. */ +static void dmacontroller_cleanup(struct b43legacy_dmaring *ring) +{ + if (ring->tx) { + b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base, + ring->type); + if (ring->type == B43legacy_DMA_64BIT) { + b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGLO, 0); + b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGHI, 0); + } else + b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 0); + } else { + b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base, + ring->type); + if (ring->type == B43legacy_DMA_64BIT) { + b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGLO, 0); + b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGHI, 0); + } else + b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, 0); + } +} + +static void free_all_descbuffers(struct b43legacy_dmaring *ring) +{ + struct b43legacy_dmadesc_generic *desc; + struct b43legacy_dmadesc_meta *meta; + int i; + + if (!ring->used_slots) + return; + for (i = 0; i < ring->nr_slots; i++) { + desc = ring->ops->idx2desc(ring, i, &meta); + + if (!meta->skb) { + B43legacy_WARN_ON(!ring->tx); + continue; + } + if (ring->tx) + unmap_descbuffer(ring, meta->dmaaddr, + meta->skb->len, 1); + else + unmap_descbuffer(ring, meta->dmaaddr, + ring->rx_buffersize, 0); + free_descriptor_buffer(ring, meta, 0); + } +} + +static u64 supported_dma_mask(struct b43legacy_wldev *dev) +{ + u32 tmp; + u16 mmio_base; + + tmp = b43legacy_read32(dev, SSB_TMSHIGH); + if (tmp & SSB_TMSHIGH_DMA64) + return DMA_BIT_MASK(64); + mmio_base = b43legacy_dmacontroller_base(0, 0); + b43legacy_write32(dev, + mmio_base + B43legacy_DMA32_TXCTL, + B43legacy_DMA32_TXADDREXT_MASK); + tmp = b43legacy_read32(dev, mmio_base + + B43legacy_DMA32_TXCTL); + if (tmp & B43legacy_DMA32_TXADDREXT_MASK) + return DMA_BIT_MASK(32); + + return DMA_BIT_MASK(30); +} + +static enum b43legacy_dmatype dma_mask_to_engine_type(u64 dmamask) +{ + if (dmamask == DMA_BIT_MASK(30)) + return B43legacy_DMA_30BIT; + if (dmamask == DMA_BIT_MASK(32)) + return B43legacy_DMA_32BIT; + if (dmamask == DMA_BIT_MASK(64)) + return B43legacy_DMA_64BIT; + B43legacy_WARN_ON(1); + return B43legacy_DMA_30BIT; +} + +/* Main initialization function. */ +static +struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev, + int controller_index, + int for_tx, + enum b43legacy_dmatype type) +{ + struct b43legacy_dmaring *ring; + int err; + int nr_slots; + dma_addr_t dma_test; + + ring = kzalloc(sizeof(*ring), GFP_KERNEL); + if (!ring) + goto out; + ring->type = type; + ring->dev = dev; + + nr_slots = B43legacy_RXRING_SLOTS; + if (for_tx) + nr_slots = B43legacy_TXRING_SLOTS; + + ring->meta = kcalloc(nr_slots, sizeof(struct b43legacy_dmadesc_meta), + GFP_KERNEL); + if (!ring->meta) + goto err_kfree_ring; + if (for_tx) { + ring->txhdr_cache = kcalloc(nr_slots, + sizeof(struct b43legacy_txhdr_fw3), + GFP_KERNEL); + if (!ring->txhdr_cache) + goto err_kfree_meta; + + /* test for ability to dma to txhdr_cache */ + dma_test = ssb_dma_map_single(dev->dev, ring->txhdr_cache, + sizeof(struct b43legacy_txhdr_fw3), + DMA_TO_DEVICE); + + if (b43legacy_dma_mapping_error(ring, dma_test, + sizeof(struct b43legacy_txhdr_fw3), 1)) { + /* ugh realloc */ + kfree(ring->txhdr_cache); + ring->txhdr_cache = kcalloc(nr_slots, + sizeof(struct b43legacy_txhdr_fw3), + GFP_KERNEL | GFP_DMA); + if (!ring->txhdr_cache) + goto err_kfree_meta; + + dma_test = ssb_dma_map_single(dev->dev, + ring->txhdr_cache, + sizeof(struct b43legacy_txhdr_fw3), + DMA_TO_DEVICE); + + if (b43legacy_dma_mapping_error(ring, dma_test, + sizeof(struct b43legacy_txhdr_fw3), 1)) + goto err_kfree_txhdr_cache; + } + + ssb_dma_unmap_single(dev->dev, dma_test, + sizeof(struct b43legacy_txhdr_fw3), + DMA_TO_DEVICE); + } + + ring->nr_slots = nr_slots; + ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index); + ring->index = controller_index; + if (type == B43legacy_DMA_64BIT) + ring->ops = &dma64_ops; + else + ring->ops = &dma32_ops; + if (for_tx) { + ring->tx = 1; + ring->current_slot = -1; + } else { + if (ring->index == 0) { + ring->rx_buffersize = B43legacy_DMA0_RX_BUFFERSIZE; + ring->frameoffset = B43legacy_DMA0_RX_FRAMEOFFSET; + } else if (ring->index == 3) { + ring->rx_buffersize = B43legacy_DMA3_RX_BUFFERSIZE; + ring->frameoffset = B43legacy_DMA3_RX_FRAMEOFFSET; + } else + B43legacy_WARN_ON(1); + } + spin_lock_init(&ring->lock); +#ifdef CONFIG_B43LEGACY_DEBUG + ring->last_injected_overflow = jiffies; +#endif + + err = alloc_ringmemory(ring); + if (err) + goto err_kfree_txhdr_cache; + err = dmacontroller_setup(ring); + if (err) + goto err_free_ringmemory; + +out: + return ring; + +err_free_ringmemory: + free_ringmemory(ring); +err_kfree_txhdr_cache: + kfree(ring->txhdr_cache); +err_kfree_meta: + kfree(ring->meta); +err_kfree_ring: + kfree(ring); + ring = NULL; + goto out; +} + +/* Main cleanup function. */ +static void b43legacy_destroy_dmaring(struct b43legacy_dmaring *ring) +{ + if (!ring) + return; + + b43legacydbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots:" + " %d/%d\n", (unsigned int)(ring->type), ring->mmio_base, + (ring->tx) ? "TX" : "RX", ring->max_used_slots, + ring->nr_slots); + /* Device IRQs are disabled prior entering this function, + * so no need to take care of concurrency with rx handler stuff. + */ + dmacontroller_cleanup(ring); + free_all_descbuffers(ring); + free_ringmemory(ring); + + kfree(ring->txhdr_cache); + kfree(ring->meta); + kfree(ring); +} + +void b43legacy_dma_free(struct b43legacy_wldev *dev) +{ + struct b43legacy_dma *dma; + + if (b43legacy_using_pio(dev)) + return; + dma = &dev->dma; + + b43legacy_destroy_dmaring(dma->rx_ring3); + dma->rx_ring3 = NULL; + b43legacy_destroy_dmaring(dma->rx_ring0); + dma->rx_ring0 = NULL; + + b43legacy_destroy_dmaring(dma->tx_ring5); + dma->tx_ring5 = NULL; + b43legacy_destroy_dmaring(dma->tx_ring4); + dma->tx_ring4 = NULL; + b43legacy_destroy_dmaring(dma->tx_ring3); + dma->tx_ring3 = NULL; + b43legacy_destroy_dmaring(dma->tx_ring2); + dma->tx_ring2 = NULL; + b43legacy_destroy_dmaring(dma->tx_ring1); + dma->tx_ring1 = NULL; + b43legacy_destroy_dmaring(dma->tx_ring0); + dma->tx_ring0 = NULL; +} + +static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask) +{ + u64 orig_mask = mask; + bool fallback = 0; + int err; + + /* Try to set the DMA mask. If it fails, try falling back to a + * lower mask, as we can always also support a lower one. */ + while (1) { + err = ssb_dma_set_mask(dev->dev, mask); + if (!err) + break; + if (mask == DMA_BIT_MASK(64)) { + mask = DMA_BIT_MASK(32); + fallback = 1; + continue; + } + if (mask == DMA_BIT_MASK(32)) { + mask = DMA_BIT_MASK(30); + fallback = 1; + continue; + } + b43legacyerr(dev->wl, "The machine/kernel does not support " + "the required %u-bit DMA mask\n", + (unsigned int)dma_mask_to_engine_type(orig_mask)); + return -EOPNOTSUPP; + } + if (fallback) { + b43legacyinfo(dev->wl, "DMA mask fallback from %u-bit to %u-" + "bit\n", + (unsigned int)dma_mask_to_engine_type(orig_mask), + (unsigned int)dma_mask_to_engine_type(mask)); + } + + return 0; +} + +int b43legacy_dma_init(struct b43legacy_wldev *dev) +{ + struct b43legacy_dma *dma = &dev->dma; + struct b43legacy_dmaring *ring; + int err; + u64 dmamask; + enum b43legacy_dmatype type; + + dmamask = supported_dma_mask(dev); + type = dma_mask_to_engine_type(dmamask); + err = b43legacy_dma_set_mask(dev, dmamask); + if (err) { +#ifdef CONFIG_B43LEGACY_PIO + b43legacywarn(dev->wl, "DMA for this device not supported. " + "Falling back to PIO\n"); + dev->__using_pio = 1; + return -EAGAIN; +#else + b43legacyerr(dev->wl, "DMA for this device not supported and " + "no PIO support compiled in\n"); + return -EOPNOTSUPP; +#endif + } + + err = -ENOMEM; + /* setup TX DMA channels. */ + ring = b43legacy_setup_dmaring(dev, 0, 1, type); + if (!ring) + goto out; + dma->tx_ring0 = ring; + + ring = b43legacy_setup_dmaring(dev, 1, 1, type); + if (!ring) + goto err_destroy_tx0; + dma->tx_ring1 = ring; + + ring = b43legacy_setup_dmaring(dev, 2, 1, type); + if (!ring) + goto err_destroy_tx1; + dma->tx_ring2 = ring; + + ring = b43legacy_setup_dmaring(dev, 3, 1, type); + if (!ring) + goto err_destroy_tx2; + dma->tx_ring3 = ring; + + ring = b43legacy_setup_dmaring(dev, 4, 1, type); + if (!ring) + goto err_destroy_tx3; + dma->tx_ring4 = ring; + + ring = b43legacy_setup_dmaring(dev, 5, 1, type); + if (!ring) + goto err_destroy_tx4; + dma->tx_ring5 = ring; + + /* setup RX DMA channels. */ + ring = b43legacy_setup_dmaring(dev, 0, 0, type); + if (!ring) + goto err_destroy_tx5; + dma->rx_ring0 = ring; + + if (dev->dev->id.revision < 5) { + ring = b43legacy_setup_dmaring(dev, 3, 0, type); + if (!ring) + goto err_destroy_rx0; + dma->rx_ring3 = ring; + } + + b43legacydbg(dev->wl, "%u-bit DMA initialized\n", (unsigned int)type); + err = 0; +out: + return err; + +err_destroy_rx0: + b43legacy_destroy_dmaring(dma->rx_ring0); + dma->rx_ring0 = NULL; +err_destroy_tx5: + b43legacy_destroy_dmaring(dma->tx_ring5); + dma->tx_ring5 = NULL; +err_destroy_tx4: + b43legacy_destroy_dmaring(dma->tx_ring4); + dma->tx_ring4 = NULL; +err_destroy_tx3: + b43legacy_destroy_dmaring(dma->tx_ring3); + dma->tx_ring3 = NULL; +err_destroy_tx2: + b43legacy_destroy_dmaring(dma->tx_ring2); + dma->tx_ring2 = NULL; +err_destroy_tx1: + b43legacy_destroy_dmaring(dma->tx_ring1); + dma->tx_ring1 = NULL; +err_destroy_tx0: + b43legacy_destroy_dmaring(dma->tx_ring0); + dma->tx_ring0 = NULL; + goto out; +} + +/* Generate a cookie for the TX header. */ +static u16 generate_cookie(struct b43legacy_dmaring *ring, + int slot) +{ + u16 cookie = 0x1000; + + /* Use the upper 4 bits of the cookie as + * DMA controller ID and store the slot number + * in the lower 12 bits. + * Note that the cookie must never be 0, as this + * is a special value used in RX path. + */ + switch (ring->index) { + case 0: + cookie = 0xA000; + break; + case 1: + cookie = 0xB000; + break; + case 2: + cookie = 0xC000; + break; + case 3: + cookie = 0xD000; + break; + case 4: + cookie = 0xE000; + break; + case 5: + cookie = 0xF000; + break; + } + B43legacy_WARN_ON(!(((u16)slot & 0xF000) == 0x0000)); + cookie |= (u16)slot; + + return cookie; +} + +/* Inspect a cookie and find out to which controller/slot it belongs. */ +static +struct b43legacy_dmaring *parse_cookie(struct b43legacy_wldev *dev, + u16 cookie, int *slot) +{ + struct b43legacy_dma *dma = &dev->dma; + struct b43legacy_dmaring *ring = NULL; + + switch (cookie & 0xF000) { + case 0xA000: + ring = dma->tx_ring0; + break; + case 0xB000: + ring = dma->tx_ring1; + break; + case 0xC000: + ring = dma->tx_ring2; + break; + case 0xD000: + ring = dma->tx_ring3; + break; + case 0xE000: + ring = dma->tx_ring4; + break; + case 0xF000: + ring = dma->tx_ring5; + break; + default: + B43legacy_WARN_ON(1); + } + *slot = (cookie & 0x0FFF); + B43legacy_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots)); + + return ring; +} + +static int dma_tx_fragment(struct b43legacy_dmaring *ring, + struct sk_buff **in_skb) +{ + struct sk_buff *skb = *in_skb; + const struct b43legacy_dma_ops *ops = ring->ops; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + u8 *header; + int slot, old_top_slot, old_used_slots; + int err; + struct b43legacy_dmadesc_generic *desc; + struct b43legacy_dmadesc_meta *meta; + struct b43legacy_dmadesc_meta *meta_hdr; + struct sk_buff *bounce_skb; + +#define SLOTS_PER_PACKET 2 + B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0); + + old_top_slot = ring->current_slot; + old_used_slots = ring->used_slots; + + /* Get a slot for the header. */ + slot = request_slot(ring); + desc = ops->idx2desc(ring, slot, &meta_hdr); + memset(meta_hdr, 0, sizeof(*meta_hdr)); + + header = &(ring->txhdr_cache[slot * sizeof( + struct b43legacy_txhdr_fw3)]); + err = b43legacy_generate_txhdr(ring->dev, header, + skb->data, skb->len, info, + generate_cookie(ring, slot)); + if (unlikely(err)) { + ring->current_slot = old_top_slot; + ring->used_slots = old_used_slots; + return err; + } + + meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, + sizeof(struct b43legacy_txhdr_fw3), 1); + if (b43legacy_dma_mapping_error(ring, meta_hdr->dmaaddr, + sizeof(struct b43legacy_txhdr_fw3), 1)) { + ring->current_slot = old_top_slot; + ring->used_slots = old_used_slots; + return -EIO; + } + ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr, + sizeof(struct b43legacy_txhdr_fw3), 1, 0, 0); + + /* Get a slot for the payload. */ + slot = request_slot(ring); + desc = ops->idx2desc(ring, slot, &meta); + memset(meta, 0, sizeof(*meta)); + + meta->skb = skb; + meta->is_last_fragment = 1; + + meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); + /* create a bounce buffer in zone_dma on mapping failure. */ + if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { + bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); + if (!bounce_skb) { + ring->current_slot = old_top_slot; + ring->used_slots = old_used_slots; + err = -ENOMEM; + goto out_unmap_hdr; + } + + memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len); + memcpy(bounce_skb->cb, skb->cb, sizeof(skb->cb)); + bounce_skb->dev = skb->dev; + skb_set_queue_mapping(bounce_skb, skb_get_queue_mapping(skb)); + info = IEEE80211_SKB_CB(bounce_skb); + + dev_kfree_skb_any(skb); + skb = bounce_skb; + *in_skb = bounce_skb; + meta->skb = skb; + meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); + if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { + ring->current_slot = old_top_slot; + ring->used_slots = old_used_slots; + err = -EIO; + goto out_free_bounce; + } + } + + ops->fill_descriptor(ring, desc, meta->dmaaddr, + skb->len, 0, 1, 1); + + wmb(); /* previous stuff MUST be done */ + /* Now transfer the whole frame. */ + ops->poke_tx(ring, next_slot(ring, slot)); + return 0; + +out_free_bounce: + dev_kfree_skb_any(skb); +out_unmap_hdr: + unmap_descbuffer(ring, meta_hdr->dmaaddr, + sizeof(struct b43legacy_txhdr_fw3), 1); + return err; +} + +static inline +int should_inject_overflow(struct b43legacy_dmaring *ring) +{ +#ifdef CONFIG_B43LEGACY_DEBUG + if (unlikely(b43legacy_debug(ring->dev, + B43legacy_DBG_DMAOVERFLOW))) { + /* Check if we should inject another ringbuffer overflow + * to test handling of this situation in the stack. */ + unsigned long next_overflow; + + next_overflow = ring->last_injected_overflow + HZ; + if (time_after(jiffies, next_overflow)) { + ring->last_injected_overflow = jiffies; + b43legacydbg(ring->dev->wl, + "Injecting TX ring overflow on " + "DMA controller %d\n", ring->index); + return 1; + } + } +#endif /* CONFIG_B43LEGACY_DEBUG */ + return 0; +} + +int b43legacy_dma_tx(struct b43legacy_wldev *dev, + struct sk_buff *skb) +{ + struct b43legacy_dmaring *ring; + struct ieee80211_hdr *hdr; + int err = 0; + unsigned long flags; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + ring = priority_to_txring(dev, skb_get_queue_mapping(skb)); + spin_lock_irqsave(&ring->lock, flags); + B43legacy_WARN_ON(!ring->tx); + + if (unlikely(ring->stopped)) { + /* We get here only because of a bug in mac80211. + * Because of a race, one packet may be queued after + * the queue is stopped, thus we got called when we shouldn't. + * For now, just refuse the transmit. */ + if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE)) + b43legacyerr(dev->wl, "Packet after queue stopped\n"); + err = -ENOSPC; + goto out_unlock; + } + + if (unlikely(WARN_ON(free_slots(ring) < SLOTS_PER_PACKET))) { + /* If we get here, we have a real error with the queue + * full, but queues not stopped. */ + b43legacyerr(dev->wl, "DMA queue overflow\n"); + err = -ENOSPC; + goto out_unlock; + } + + /* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing + * into the skb data or cb now. */ + hdr = NULL; + info = NULL; + err = dma_tx_fragment(ring, &skb); + if (unlikely(err == -ENOKEY)) { + /* Drop this packet, as we don't have the encryption key + * anymore and must not transmit it unencrypted. */ + dev_kfree_skb_any(skb); + err = 0; + goto out_unlock; + } + if (unlikely(err)) { + b43legacyerr(dev->wl, "DMA tx mapping failure\n"); + goto out_unlock; + } + if ((free_slots(ring) < SLOTS_PER_PACKET) || + should_inject_overflow(ring)) { + /* This TX ring is full. */ + ieee80211_stop_queue(dev->wl->hw, txring_to_priority(ring)); + ring->stopped = 1; + if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE)) + b43legacydbg(dev->wl, "Stopped TX ring %d\n", + ring->index); + } +out_unlock: + spin_unlock_irqrestore(&ring->lock, flags); + + return err; +} + +void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev, + const struct b43legacy_txstatus *status) +{ + const struct b43legacy_dma_ops *ops; + struct b43legacy_dmaring *ring; + struct b43legacy_dmadesc_generic *desc; + struct b43legacy_dmadesc_meta *meta; + int retry_limit; + int slot; + + ring = parse_cookie(dev, status->cookie, &slot); + if (unlikely(!ring)) + return; + B43legacy_WARN_ON(!irqs_disabled()); + spin_lock(&ring->lock); + + B43legacy_WARN_ON(!ring->tx); + ops = ring->ops; + while (1) { + B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); + desc = ops->idx2desc(ring, slot, &meta); + + if (meta->skb) + unmap_descbuffer(ring, meta->dmaaddr, + meta->skb->len, 1); + else + unmap_descbuffer(ring, meta->dmaaddr, + sizeof(struct b43legacy_txhdr_fw3), + 1); + + if (meta->is_last_fragment) { + struct ieee80211_tx_info *info; + BUG_ON(!meta->skb); + info = IEEE80211_SKB_CB(meta->skb); + + /* preserve the confiured retry limit before clearing the status + * The xmit function has overwritten the rc's value with the actual + * retry limit done by the hardware */ + retry_limit = info->status.rates[0].count; + ieee80211_tx_info_clear_status(info); + + if (status->acked) + info->flags |= IEEE80211_TX_STAT_ACK; + + if (status->rts_count > dev->wl->hw->conf.short_frame_max_tx_count) { + /* + * If the short retries (RTS, not data frame) have exceeded + * the limit, the hw will not have tried the selected rate, + * but will have used the fallback rate instead. + * Don't let the rate control count attempts for the selected + * rate in this case, otherwise the statistics will be off. + */ + info->status.rates[0].count = 0; + info->status.rates[1].count = status->frame_count; + } else { + if (status->frame_count > retry_limit) { + info->status.rates[0].count = retry_limit; + info->status.rates[1].count = status->frame_count - + retry_limit; + + } else { + info->status.rates[0].count = status->frame_count; + info->status.rates[1].idx = -1; + } + } + + /* Call back to inform the ieee80211 subsystem about the + * status of the transmission. + * Some fields of txstat are already filled in dma_tx(). + */ + ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb); + /* skb is freed by ieee80211_tx_status_irqsafe() */ + meta->skb = NULL; + } else { + /* No need to call free_descriptor_buffer here, as + * this is only the txhdr, which is not allocated. + */ + B43legacy_WARN_ON(meta->skb != NULL); + } + + /* Everything unmapped and free'd. So it's not used anymore. */ + ring->used_slots--; + + if (meta->is_last_fragment) + break; + slot = next_slot(ring, slot); + } + dev->stats.last_tx = jiffies; + if (ring->stopped) { + B43legacy_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET); + ieee80211_wake_queue(dev->wl->hw, txring_to_priority(ring)); + ring->stopped = 0; + if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE)) + b43legacydbg(dev->wl, "Woke up TX ring %d\n", + ring->index); + } + + spin_unlock(&ring->lock); +} + +static void dma_rx(struct b43legacy_dmaring *ring, + int *slot) +{ + const struct b43legacy_dma_ops *ops = ring->ops; + struct b43legacy_dmadesc_generic *desc; + struct b43legacy_dmadesc_meta *meta; + struct b43legacy_rxhdr_fw3 *rxhdr; + struct sk_buff *skb; + u16 len; + int err; + dma_addr_t dmaaddr; + + desc = ops->idx2desc(ring, *slot, &meta); + + sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); + skb = meta->skb; + + if (ring->index == 3) { + /* We received an xmit status. */ + struct b43legacy_hwtxstatus *hw = + (struct b43legacy_hwtxstatus *)skb->data; + int i = 0; + + while (hw->cookie == 0) { + if (i > 100) + break; + i++; + udelay(2); + barrier(); + } + b43legacy_handle_hwtxstatus(ring->dev, hw); + /* recycle the descriptor buffer. */ + sync_descbuffer_for_device(ring, meta->dmaaddr, + ring->rx_buffersize); + + return; + } + rxhdr = (struct b43legacy_rxhdr_fw3 *)skb->data; + len = le16_to_cpu(rxhdr->frame_len); + if (len == 0) { + int i = 0; + + do { + udelay(2); + barrier(); + len = le16_to_cpu(rxhdr->frame_len); + } while (len == 0 && i++ < 5); + if (unlikely(len == 0)) { + /* recycle the descriptor buffer. */ + sync_descbuffer_for_device(ring, meta->dmaaddr, + ring->rx_buffersize); + goto drop; + } + } + if (unlikely(len > ring->rx_buffersize)) { + /* The data did not fit into one descriptor buffer + * and is split over multiple buffers. + * This should never happen, as we try to allocate buffers + * big enough. So simply ignore this packet. + */ + int cnt = 0; + s32 tmp = len; + + while (1) { + desc = ops->idx2desc(ring, *slot, &meta); + /* recycle the descriptor buffer. */ + sync_descbuffer_for_device(ring, meta->dmaaddr, + ring->rx_buffersize); + *slot = next_slot(ring, *slot); + cnt++; + tmp -= ring->rx_buffersize; + if (tmp <= 0) + break; + } + b43legacyerr(ring->dev->wl, "DMA RX buffer too small " + "(len: %u, buffer: %u, nr-dropped: %d)\n", + len, ring->rx_buffersize, cnt); + goto drop; + } + + dmaaddr = meta->dmaaddr; + err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC); + if (unlikely(err)) { + b43legacydbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer()" + " failed\n"); + sync_descbuffer_for_device(ring, dmaaddr, + ring->rx_buffersize); + goto drop; + } + + unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0); + skb_put(skb, len + ring->frameoffset); + skb_pull(skb, ring->frameoffset); + + b43legacy_rx(ring->dev, skb, rxhdr); +drop: + return; +} + +void b43legacy_dma_rx(struct b43legacy_dmaring *ring) +{ + const struct b43legacy_dma_ops *ops = ring->ops; + int slot; + int current_slot; + int used_slots = 0; + + B43legacy_WARN_ON(ring->tx); + current_slot = ops->get_current_rxslot(ring); + B43legacy_WARN_ON(!(current_slot >= 0 && current_slot < + ring->nr_slots)); + + slot = ring->current_slot; + for (; slot != current_slot; slot = next_slot(ring, slot)) { + dma_rx(ring, &slot); + update_max_used_slots(ring, ++used_slots); + } + ops->set_current_rxslot(ring, slot); + ring->current_slot = slot; +} + +static void b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring *ring) +{ + unsigned long flags; + + spin_lock_irqsave(&ring->lock, flags); + B43legacy_WARN_ON(!ring->tx); + ring->ops->tx_suspend(ring); + spin_unlock_irqrestore(&ring->lock, flags); +} + +static void b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring *ring) +{ + unsigned long flags; + + spin_lock_irqsave(&ring->lock, flags); + B43legacy_WARN_ON(!ring->tx); + ring->ops->tx_resume(ring); + spin_unlock_irqrestore(&ring->lock, flags); +} + +void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev) +{ + b43legacy_power_saving_ctl_bits(dev, -1, 1); + b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring0); + b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring1); + b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring2); + b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring3); + b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring4); + b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring5); +} + +void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev) +{ + b43legacy_dma_tx_resume_ring(dev->dma.tx_ring5); + b43legacy_dma_tx_resume_ring(dev->dma.tx_ring4); + b43legacy_dma_tx_resume_ring(dev->dma.tx_ring3); + b43legacy_dma_tx_resume_ring(dev->dma.tx_ring2); + b43legacy_dma_tx_resume_ring(dev->dma.tx_ring1); + b43legacy_dma_tx_resume_ring(dev->dma.tx_ring0); + b43legacy_power_saving_ctl_bits(dev, -1, -1); +} diff --git a/drivers/net/wireless/b43legacy/dma.h b/drivers/net/wireless/b43legacy/dma.h new file mode 100644 index 0000000..f968104 --- /dev/null +++ b/drivers/net/wireless/b43legacy/dma.h @@ -0,0 +1,337 @@ +#ifndef B43legacy_DMA_H_ +#define B43legacy_DMA_H_ + +#include +#include +#include +#include +#include + +#include "b43legacy.h" + + +/* DMA-Interrupt reasons. */ +#define B43legacy_DMAIRQ_FATALMASK ((1 << 10) | (1 << 11) | (1 << 12) \ + | (1 << 14) | (1 << 15)) +#define B43legacy_DMAIRQ_NONFATALMASK (1 << 13) +#define B43legacy_DMAIRQ_RX_DONE (1 << 16) + + +/*** 32-bit DMA Engine. ***/ + +/* 32-bit DMA controller registers. */ +#define B43legacy_DMA32_TXCTL 0x00 +#define B43legacy_DMA32_TXENABLE 0x00000001 +#define B43legacy_DMA32_TXSUSPEND 0x00000002 +#define B43legacy_DMA32_TXLOOPBACK 0x00000004 +#define B43legacy_DMA32_TXFLUSH 0x00000010 +#define B43legacy_DMA32_TXADDREXT_MASK 0x00030000 +#define B43legacy_DMA32_TXADDREXT_SHIFT 16 +#define B43legacy_DMA32_TXRING 0x04 +#define B43legacy_DMA32_TXINDEX 0x08 +#define B43legacy_DMA32_TXSTATUS 0x0C +#define B43legacy_DMA32_TXDPTR 0x00000FFF +#define B43legacy_DMA32_TXSTATE 0x0000F000 +#define B43legacy_DMA32_TXSTAT_DISABLED 0x00000000 +#define B43legacy_DMA32_TXSTAT_ACTIVE 0x00001000 +#define B43legacy_DMA32_TXSTAT_IDLEWAIT 0x00002000 +#define B43legacy_DMA32_TXSTAT_STOPPED 0x00003000 +#define B43legacy_DMA32_TXSTAT_SUSP 0x00004000 +#define B43legacy_DMA32_TXERROR 0x000F0000 +#define B43legacy_DMA32_TXERR_NOERR 0x00000000 +#define B43legacy_DMA32_TXERR_PROT 0x00010000 +#define B43legacy_DMA32_TXERR_UNDERRUN 0x00020000 +#define B43legacy_DMA32_TXERR_BUFREAD 0x00030000 +#define B43legacy_DMA32_TXERR_DESCREAD 0x00040000 +#define B43legacy_DMA32_TXACTIVE 0xFFF00000 +#define B43legacy_DMA32_RXCTL 0x10 +#define B43legacy_DMA32_RXENABLE 0x00000001 +#define B43legacy_DMA32_RXFROFF_MASK 0x000000FE +#define B43legacy_DMA32_RXFROFF_SHIFT 1 +#define B43legacy_DMA32_RXDIRECTFIFO 0x00000100 +#define B43legacy_DMA32_RXADDREXT_MASK 0x00030000 +#define B43legacy_DMA32_RXADDREXT_SHIFT 16 +#define B43legacy_DMA32_RXRING 0x14 +#define B43legacy_DMA32_RXINDEX 0x18 +#define B43legacy_DMA32_RXSTATUS 0x1C +#define B43legacy_DMA32_RXDPTR 0x00000FFF +#define B43legacy_DMA32_RXSTATE 0x0000F000 +#define B43legacy_DMA32_RXSTAT_DISABLED 0x00000000 +#define B43legacy_DMA32_RXSTAT_ACTIVE 0x00001000 +#define B43legacy_DMA32_RXSTAT_IDLEWAIT 0x00002000 +#define B43legacy_DMA32_RXSTAT_STOPPED 0x00003000 +#define B43legacy_DMA32_RXERROR 0x000F0000 +#define B43legacy_DMA32_RXERR_NOERR 0x00000000 +#define B43legacy_DMA32_RXERR_PROT 0x00010000 +#define B43legacy_DMA32_RXERR_OVERFLOW 0x00020000 +#define B43legacy_DMA32_RXERR_BUFWRITE 0x00030000 +#define B43legacy_DMA32_RXERR_DESCREAD 0x00040000 +#define B43legacy_DMA32_RXACTIVE 0xFFF00000 + +/* 32-bit DMA descriptor. */ +struct b43legacy_dmadesc32 { + __le32 control; + __le32 address; +} __attribute__((__packed__)); +#define B43legacy_DMA32_DCTL_BYTECNT 0x00001FFF +#define B43legacy_DMA32_DCTL_ADDREXT_MASK 0x00030000 +#define B43legacy_DMA32_DCTL_ADDREXT_SHIFT 16 +#define B43legacy_DMA32_DCTL_DTABLEEND 0x10000000 +#define B43legacy_DMA32_DCTL_IRQ 0x20000000 +#define B43legacy_DMA32_DCTL_FRAMEEND 0x40000000 +#define B43legacy_DMA32_DCTL_FRAMESTART 0x80000000 + + + +/*** 64-bit DMA Engine. ***/ + +/* 64-bit DMA controller registers. */ +#define B43legacy_DMA64_TXCTL 0x00 +#define B43legacy_DMA64_TXENABLE 0x00000001 +#define B43legacy_DMA64_TXSUSPEND 0x00000002 +#define B43legacy_DMA64_TXLOOPBACK 0x00000004 +#define B43legacy_DMA64_TXFLUSH 0x00000010 +#define B43legacy_DMA64_TXADDREXT_MASK 0x00030000 +#define B43legacy_DMA64_TXADDREXT_SHIFT 16 +#define B43legacy_DMA64_TXINDEX 0x04 +#define B43legacy_DMA64_TXRINGLO 0x08 +#define B43legacy_DMA64_TXRINGHI 0x0C +#define B43legacy_DMA64_TXSTATUS 0x10 +#define B43legacy_DMA64_TXSTATDPTR 0x00001FFF +#define B43legacy_DMA64_TXSTAT 0xF0000000 +#define B43legacy_DMA64_TXSTAT_DISABLED 0x00000000 +#define B43legacy_DMA64_TXSTAT_ACTIVE 0x10000000 +#define B43legacy_DMA64_TXSTAT_IDLEWAIT 0x20000000 +#define B43legacy_DMA64_TXSTAT_STOPPED 0x30000000 +#define B43legacy_DMA64_TXSTAT_SUSP 0x40000000 +#define B43legacy_DMA64_TXERROR 0x14 +#define B43legacy_DMA64_TXERRDPTR 0x0001FFFF +#define B43legacy_DMA64_TXERR 0xF0000000 +#define B43legacy_DMA64_TXERR_NOERR 0x00000000 +#define B43legacy_DMA64_TXERR_PROT 0x10000000 +#define B43legacy_DMA64_TXERR_UNDERRUN 0x20000000 +#define B43legacy_DMA64_TXERR_TRANSFER 0x30000000 +#define B43legacy_DMA64_TXERR_DESCREAD 0x40000000 +#define B43legacy_DMA64_TXERR_CORE 0x50000000 +#define B43legacy_DMA64_RXCTL 0x20 +#define B43legacy_DMA64_RXENABLE 0x00000001 +#define B43legacy_DMA64_RXFROFF_MASK 0x000000FE +#define B43legacy_DMA64_RXFROFF_SHIFT 1 +#define B43legacy_DMA64_RXDIRECTFIFO 0x00000100 +#define B43legacy_DMA64_RXADDREXT_MASK 0x00030000 +#define B43legacy_DMA64_RXADDREXT_SHIFT 16 +#define B43legacy_DMA64_RXINDEX 0x24 +#define B43legacy_DMA64_RXRINGLO 0x28 +#define B43legacy_DMA64_RXRINGHI 0x2C +#define B43legacy_DMA64_RXSTATUS 0x30 +#define B43legacy_DMA64_RXSTATDPTR 0x00001FFF +#define B43legacy_DMA64_RXSTAT 0xF0000000 +#define B43legacy_DMA64_RXSTAT_DISABLED 0x00000000 +#define B43legacy_DMA64_RXSTAT_ACTIVE 0x10000000 +#define B43legacy_DMA64_RXSTAT_IDLEWAIT 0x20000000 +#define B43legacy_DMA64_RXSTAT_STOPPED 0x30000000 +#define B43legacy_DMA64_RXSTAT_SUSP 0x40000000 +#define B43legacy_DMA64_RXERROR 0x34 +#define B43legacy_DMA64_RXERRDPTR 0x0001FFFF +#define B43legacy_DMA64_RXERR 0xF0000000 +#define B43legacy_DMA64_RXERR_NOERR 0x00000000 +#define B43legacy_DMA64_RXERR_PROT 0x10000000 +#define B43legacy_DMA64_RXERR_UNDERRUN 0x20000000 +#define B43legacy_DMA64_RXERR_TRANSFER 0x30000000 +#define B43legacy_DMA64_RXERR_DESCREAD 0x40000000 +#define B43legacy_DMA64_RXERR_CORE 0x50000000 + +/* 64-bit DMA descriptor. */ +struct b43legacy_dmadesc64 { + __le32 control0; + __le32 control1; + __le32 address_low; + __le32 address_high; +} __attribute__((__packed__)); +#define B43legacy_DMA64_DCTL0_DTABLEEND 0x10000000 +#define B43legacy_DMA64_DCTL0_IRQ 0x20000000 +#define B43legacy_DMA64_DCTL0_FRAMEEND 0x40000000 +#define B43legacy_DMA64_DCTL0_FRAMESTART 0x80000000 +#define B43legacy_DMA64_DCTL1_BYTECNT 0x00001FFF +#define B43legacy_DMA64_DCTL1_ADDREXT_MASK 0x00030000 +#define B43legacy_DMA64_DCTL1_ADDREXT_SHIFT 16 + + + +struct b43legacy_dmadesc_generic { + union { + struct b43legacy_dmadesc32 dma32; + struct b43legacy_dmadesc64 dma64; + } __attribute__((__packed__)); +} __attribute__((__packed__)); + + +/* Misc DMA constants */ +#define B43legacy_DMA_RINGMEMSIZE PAGE_SIZE +#define B43legacy_DMA0_RX_FRAMEOFFSET 30 +#define B43legacy_DMA3_RX_FRAMEOFFSET 0 + + +/* DMA engine tuning knobs */ +#define B43legacy_TXRING_SLOTS 128 +#define B43legacy_RXRING_SLOTS 64 +#define B43legacy_DMA0_RX_BUFFERSIZE (2304 + 100) +#define B43legacy_DMA3_RX_BUFFERSIZE 16 + + + +#ifdef CONFIG_B43LEGACY_DMA + + +struct sk_buff; +struct b43legacy_private; +struct b43legacy_txstatus; + + +struct b43legacy_dmadesc_meta { + /* The kernel DMA-able buffer. */ + struct sk_buff *skb; + /* DMA base bus-address of the descriptor buffer. */ + dma_addr_t dmaaddr; + /* ieee80211 TX status. Only used once per 802.11 frag. */ + bool is_last_fragment; +}; + +struct b43legacy_dmaring; + +/* Lowlevel DMA operations that differ between 32bit and 64bit DMA. */ +struct b43legacy_dma_ops { + struct b43legacy_dmadesc_generic * (*idx2desc) + (struct b43legacy_dmaring *ring, + int slot, + struct b43legacy_dmadesc_meta + **meta); + void (*fill_descriptor)(struct b43legacy_dmaring *ring, + struct b43legacy_dmadesc_generic *desc, + dma_addr_t dmaaddr, u16 bufsize, + int start, int end, int irq); + void (*poke_tx)(struct b43legacy_dmaring *ring, int slot); + void (*tx_suspend)(struct b43legacy_dmaring *ring); + void (*tx_resume)(struct b43legacy_dmaring *ring); + int (*get_current_rxslot)(struct b43legacy_dmaring *ring); + void (*set_current_rxslot)(struct b43legacy_dmaring *ring, int slot); +}; + +enum b43legacy_dmatype { + B43legacy_DMA_30BIT = 30, + B43legacy_DMA_32BIT = 32, + B43legacy_DMA_64BIT = 64, +}; + +struct b43legacy_dmaring { + /* Lowlevel DMA ops. */ + const struct b43legacy_dma_ops *ops; + /* Kernel virtual base address of the ring memory. */ + void *descbase; + /* Meta data about all descriptors. */ + struct b43legacy_dmadesc_meta *meta; + /* Cache of TX headers for each slot. + * This is to avoid an allocation on each TX. + * This is NULL for an RX ring. + */ + u8 *txhdr_cache; + /* (Unadjusted) DMA base bus-address of the ring memory. */ + dma_addr_t dmabase; + /* Number of descriptor slots in the ring. */ + int nr_slots; + /* Number of used descriptor slots. */ + int used_slots; + /* Currently used slot in the ring. */ + int current_slot; + /* Frameoffset in octets. */ + u32 frameoffset; + /* Descriptor buffer size. */ + u16 rx_buffersize; + /* The MMIO base register of the DMA controller. */ + u16 mmio_base; + /* DMA controller index number (0-5). */ + int index; + /* Boolean. Is this a TX ring? */ + bool tx; + /* The type of DMA engine used. */ + enum b43legacy_dmatype type; + /* Boolean. Is this ring stopped at ieee80211 level? */ + bool stopped; + /* Lock, only used for TX. */ + spinlock_t lock; + struct b43legacy_wldev *dev; +#ifdef CONFIG_B43LEGACY_DEBUG + /* Maximum number of used slots. */ + int max_used_slots; + /* Last time we injected a ring overflow. */ + unsigned long last_injected_overflow; +#endif /* CONFIG_B43LEGACY_DEBUG*/ +}; + + +static inline +u32 b43legacy_dma_read(struct b43legacy_dmaring *ring, + u16 offset) +{ + return b43legacy_read32(ring->dev, ring->mmio_base + offset); +} + +static inline +void b43legacy_dma_write(struct b43legacy_dmaring *ring, + u16 offset, u32 value) +{ + b43legacy_write32(ring->dev, ring->mmio_base + offset, value); +} + + +int b43legacy_dma_init(struct b43legacy_wldev *dev); +void b43legacy_dma_free(struct b43legacy_wldev *dev); + +void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev); +void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev); + +int b43legacy_dma_tx(struct b43legacy_wldev *dev, + struct sk_buff *skb); +void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev, + const struct b43legacy_txstatus *status); + +void b43legacy_dma_rx(struct b43legacy_dmaring *ring); + +#else /* CONFIG_B43LEGACY_DMA */ + + +static inline +int b43legacy_dma_init(struct b43legacy_wldev *dev) +{ + return 0; +} +static inline +void b43legacy_dma_free(struct b43legacy_wldev *dev) +{ +} +static inline +int b43legacy_dma_tx(struct b43legacy_wldev *dev, + struct sk_buff *skb) +{ + return 0; +} +static inline +void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev, + const struct b43legacy_txstatus *status) +{ +} +static inline +void b43legacy_dma_rx(struct b43legacy_dmaring *ring) +{ +} +static inline +void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev) +{ +} +static inline +void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev) +{ +} + +#endif /* CONFIG_B43LEGACY_DMA */ +#endif /* B43legacy_DMA_H_ */ diff --git a/drivers/net/wireless/b43legacy/ilt.c b/drivers/net/wireless/b43legacy/ilt.c new file mode 100644 index 0000000..a849078 --- /dev/null +++ b/drivers/net/wireless/b43legacy/ilt.c @@ -0,0 +1,336 @@ +/* + + Broadcom B43legacy wireless driver + + Copyright (c) 2005 Martin Langer , + Stefano Brivio + Michael Buesch + Danny van Dyk + Andreas Jaggi + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "b43legacy.h" +#include "ilt.h" +#include "phy.h" + + +/**** Initial Internal Lookup Tables ****/ + +const u32 b43legacy_ilt_rotor[B43legacy_ILT_ROTOR_SIZE] = { + 0xFEB93FFD, 0xFEC63FFD, /* 0 */ + 0xFED23FFD, 0xFEDF3FFD, + 0xFEEC3FFE, 0xFEF83FFE, + 0xFF053FFE, 0xFF113FFE, + 0xFF1E3FFE, 0xFF2A3FFF, /* 8 */ + 0xFF373FFF, 0xFF443FFF, + 0xFF503FFF, 0xFF5D3FFF, + 0xFF693FFF, 0xFF763FFF, + 0xFF824000, 0xFF8F4000, /* 16 */ + 0xFF9B4000, 0xFFA84000, + 0xFFB54000, 0xFFC14000, + 0xFFCE4000, 0xFFDA4000, + 0xFFE74000, 0xFFF34000, /* 24 */ + 0x00004000, 0x000D4000, + 0x00194000, 0x00264000, + 0x00324000, 0x003F4000, + 0x004B4000, 0x00584000, /* 32 */ + 0x00654000, 0x00714000, + 0x007E4000, 0x008A3FFF, + 0x00973FFF, 0x00A33FFF, + 0x00B03FFF, 0x00BC3FFF, /* 40 */ + 0x00C93FFF, 0x00D63FFF, + 0x00E23FFE, 0x00EF3FFE, + 0x00FB3FFE, 0x01083FFE, + 0x01143FFE, 0x01213FFD, /* 48 */ + 0x012E3FFD, 0x013A3FFD, + 0x01473FFD, +}; + +const u32 b43legacy_ilt_retard[B43legacy_ILT_RETARD_SIZE] = { + 0xDB93CB87, 0xD666CF64, /* 0 */ + 0xD1FDD358, 0xCDA6D826, + 0xCA38DD9F, 0xC729E2B4, + 0xC469E88E, 0xC26AEE2B, + 0xC0DEF46C, 0xC073FA62, /* 8 */ + 0xC01D00D5, 0xC0760743, + 0xC1560D1E, 0xC2E51369, + 0xC4ED18FF, 0xC7AC1ED7, + 0xCB2823B2, 0xCEFA28D9, /* 16 */ + 0xD2F62D3F, 0xD7BB3197, + 0xDCE53568, 0xE1FE3875, + 0xE7D13B35, 0xED663D35, + 0xF39B3EC4, 0xF98E3FA7, /* 24 */ + 0x00004000, 0x06723FA7, + 0x0C653EC4, 0x129A3D35, + 0x182F3B35, 0x1E023875, + 0x231B3568, 0x28453197, /* 32 */ + 0x2D0A2D3F, 0x310628D9, + 0x34D823B2, 0x38541ED7, + 0x3B1318FF, 0x3D1B1369, + 0x3EAA0D1E, 0x3F8A0743, /* 40 */ + 0x3FE300D5, 0x3F8DFA62, + 0x3F22F46C, 0x3D96EE2B, + 0x3B97E88E, 0x38D7E2B4, + 0x35C8DD9F, 0x325AD826, /* 48 */ + 0x2E03D358, 0x299ACF64, + 0x246DCB87, +}; + +const u16 b43legacy_ilt_finefreqa[B43legacy_ILT_FINEFREQA_SIZE] = { + 0x0082, 0x0082, 0x0102, 0x0182, /* 0 */ + 0x0202, 0x0282, 0x0302, 0x0382, + 0x0402, 0x0482, 0x0502, 0x0582, + 0x05E2, 0x0662, 0x06E2, 0x0762, + 0x07E2, 0x0842, 0x08C2, 0x0942, /* 16 */ + 0x09C2, 0x0A22, 0x0AA2, 0x0B02, + 0x0B82, 0x0BE2, 0x0C62, 0x0CC2, + 0x0D42, 0x0DA2, 0x0E02, 0x0E62, + 0x0EE2, 0x0F42, 0x0FA2, 0x1002, /* 32 */ + 0x1062, 0x10C2, 0x1122, 0x1182, + 0x11E2, 0x1242, 0x12A2, 0x12E2, + 0x1342, 0x13A2, 0x1402, 0x1442, + 0x14A2, 0x14E2, 0x1542, 0x1582, /* 48 */ + 0x15E2, 0x1622, 0x1662, 0x16C1, + 0x1701, 0x1741, 0x1781, 0x17E1, + 0x1821, 0x1861, 0x18A1, 0x18E1, + 0x1921, 0x1961, 0x19A1, 0x19E1, /* 64 */ + 0x1A21, 0x1A61, 0x1AA1, 0x1AC1, + 0x1B01, 0x1B41, 0x1B81, 0x1BA1, + 0x1BE1, 0x1C21, 0x1C41, 0x1C81, + 0x1CA1, 0x1CE1, 0x1D01, 0x1D41, /* 80 */ + 0x1D61, 0x1DA1, 0x1DC1, 0x1E01, + 0x1E21, 0x1E61, 0x1E81, 0x1EA1, + 0x1EE1, 0x1F01, 0x1F21, 0x1F41, + 0x1F81, 0x1FA1, 0x1FC1, 0x1FE1, /* 96 */ + 0x2001, 0x2041, 0x2061, 0x2081, + 0x20A1, 0x20C1, 0x20E1, 0x2101, + 0x2121, 0x2141, 0x2161, 0x2181, + 0x21A1, 0x21C1, 0x21E1, 0x2201, /* 112 */ + 0x2221, 0x2241, 0x2261, 0x2281, + 0x22A1, 0x22C1, 0x22C1, 0x22E1, + 0x2301, 0x2321, 0x2341, 0x2361, + 0x2361, 0x2381, 0x23A1, 0x23C1, /* 128 */ + 0x23E1, 0x23E1, 0x2401, 0x2421, + 0x2441, 0x2441, 0x2461, 0x2481, + 0x2481, 0x24A1, 0x24C1, 0x24C1, + 0x24E1, 0x2501, 0x2501, 0x2521, /* 144 */ + 0x2541, 0x2541, 0x2561, 0x2561, + 0x2581, 0x25A1, 0x25A1, 0x25C1, + 0x25C1, 0x25E1, 0x2601, 0x2601, + 0x2621, 0x2621, 0x2641, 0x2641, /* 160 */ + 0x2661, 0x2661, 0x2681, 0x2681, + 0x26A1, 0x26A1, 0x26C1, 0x26C1, + 0x26E1, 0x26E1, 0x2701, 0x2701, + 0x2721, 0x2721, 0x2740, 0x2740, /* 176 */ + 0x2760, 0x2760, 0x2780, 0x2780, + 0x2780, 0x27A0, 0x27A0, 0x27C0, + 0x27C0, 0x27E0, 0x27E0, 0x27E0, + 0x2800, 0x2800, 0x2820, 0x2820, /* 192 */ + 0x2820, 0x2840, 0x2840, 0x2840, + 0x2860, 0x2860, 0x2880, 0x2880, + 0x2880, 0x28A0, 0x28A0, 0x28A0, + 0x28C0, 0x28C0, 0x28C0, 0x28E0, /* 208 */ + 0x28E0, 0x28E0, 0x2900, 0x2900, + 0x2900, 0x2920, 0x2920, 0x2920, + 0x2940, 0x2940, 0x2940, 0x2960, + 0x2960, 0x2960, 0x2960, 0x2980, /* 224 */ + 0x2980, 0x2980, 0x29A0, 0x29A0, + 0x29A0, 0x29A0, 0x29C0, 0x29C0, + 0x29C0, 0x29E0, 0x29E0, 0x29E0, + 0x29E0, 0x2A00, 0x2A00, 0x2A00, /* 240 */ + 0x2A00, 0x2A20, 0x2A20, 0x2A20, + 0x2A20, 0x2A40, 0x2A40, 0x2A40, + 0x2A40, 0x2A60, 0x2A60, 0x2A60, +}; + +const u16 b43legacy_ilt_finefreqg[B43legacy_ILT_FINEFREQG_SIZE] = { + 0x0089, 0x02E9, 0x0409, 0x04E9, /* 0 */ + 0x05A9, 0x0669, 0x0709, 0x0789, + 0x0829, 0x08A9, 0x0929, 0x0989, + 0x0A09, 0x0A69, 0x0AC9, 0x0B29, + 0x0BA9, 0x0BE9, 0x0C49, 0x0CA9, /* 16 */ + 0x0D09, 0x0D69, 0x0DA9, 0x0E09, + 0x0E69, 0x0EA9, 0x0F09, 0x0F49, + 0x0FA9, 0x0FE9, 0x1029, 0x1089, + 0x10C9, 0x1109, 0x1169, 0x11A9, /* 32 */ + 0x11E9, 0x1229, 0x1289, 0x12C9, + 0x1309, 0x1349, 0x1389, 0x13C9, + 0x1409, 0x1449, 0x14A9, 0x14E9, + 0x1529, 0x1569, 0x15A9, 0x15E9, /* 48 */ + 0x1629, 0x1669, 0x16A9, 0x16E8, + 0x1728, 0x1768, 0x17A8, 0x17E8, + 0x1828, 0x1868, 0x18A8, 0x18E8, + 0x1928, 0x1968, 0x19A8, 0x19E8, /* 64 */ + 0x1A28, 0x1A68, 0x1AA8, 0x1AE8, + 0x1B28, 0x1B68, 0x1BA8, 0x1BE8, + 0x1C28, 0x1C68, 0x1CA8, 0x1CE8, + 0x1D28, 0x1D68, 0x1DC8, 0x1E08, /* 80 */ + 0x1E48, 0x1E88, 0x1EC8, 0x1F08, + 0x1F48, 0x1F88, 0x1FE8, 0x2028, + 0x2068, 0x20A8, 0x2108, 0x2148, + 0x2188, 0x21C8, 0x2228, 0x2268, /* 96 */ + 0x22C8, 0x2308, 0x2348, 0x23A8, + 0x23E8, 0x2448, 0x24A8, 0x24E8, + 0x2548, 0x25A8, 0x2608, 0x2668, + 0x26C8, 0x2728, 0x2787, 0x27E7, /* 112 */ + 0x2847, 0x28C7, 0x2947, 0x29A7, + 0x2A27, 0x2AC7, 0x2B47, 0x2BE7, + 0x2CA7, 0x2D67, 0x2E47, 0x2F67, + 0x3247, 0x3526, 0x3646, 0x3726, /* 128 */ + 0x3806, 0x38A6, 0x3946, 0x39E6, + 0x3A66, 0x3AE6, 0x3B66, 0x3BC6, + 0x3C45, 0x3CA5, 0x3D05, 0x3D85, + 0x3DE5, 0x3E45, 0x3EA5, 0x3EE5, /* 144 */ + 0x3F45, 0x3FA5, 0x4005, 0x4045, + 0x40A5, 0x40E5, 0x4145, 0x4185, + 0x41E5, 0x4225, 0x4265, 0x42C5, + 0x4305, 0x4345, 0x43A5, 0x43E5, /* 160 */ + 0x4424, 0x4464, 0x44C4, 0x4504, + 0x4544, 0x4584, 0x45C4, 0x4604, + 0x4644, 0x46A4, 0x46E4, 0x4724, + 0x4764, 0x47A4, 0x47E4, 0x4824, /* 176 */ + 0x4864, 0x48A4, 0x48E4, 0x4924, + 0x4964, 0x49A4, 0x49E4, 0x4A24, + 0x4A64, 0x4AA4, 0x4AE4, 0x4B23, + 0x4B63, 0x4BA3, 0x4BE3, 0x4C23, /* 192 */ + 0x4C63, 0x4CA3, 0x4CE3, 0x4D23, + 0x4D63, 0x4DA3, 0x4DE3, 0x4E23, + 0x4E63, 0x4EA3, 0x4EE3, 0x4F23, + 0x4F63, 0x4FC3, 0x5003, 0x5043, /* 208 */ + 0x5083, 0x50C3, 0x5103, 0x5143, + 0x5183, 0x51E2, 0x5222, 0x5262, + 0x52A2, 0x52E2, 0x5342, 0x5382, + 0x53C2, 0x5402, 0x5462, 0x54A2, /* 224 */ + 0x5502, 0x5542, 0x55A2, 0x55E2, + 0x5642, 0x5682, 0x56E2, 0x5722, + 0x5782, 0x57E1, 0x5841, 0x58A1, + 0x5901, 0x5961, 0x59C1, 0x5A21, /* 240 */ + 0x5AA1, 0x5B01, 0x5B81, 0x5BE1, + 0x5C61, 0x5D01, 0x5D80, 0x5E20, + 0x5EE0, 0x5FA0, 0x6080, 0x61C0, +}; + +const u16 b43legacy_ilt_noisea2[B43legacy_ILT_NOISEA2_SIZE] = { + 0x0001, 0x0001, 0x0001, 0xFFFE, + 0xFFFE, 0x3FFF, 0x1000, 0x0393, +}; + +const u16 b43legacy_ilt_noisea3[B43legacy_ILT_NOISEA3_SIZE] = { + 0x4C4C, 0x4C4C, 0x4C4C, 0x2D36, + 0x4C4C, 0x4C4C, 0x4C4C, 0x2D36, +}; + +const u16 b43legacy_ilt_noiseg1[B43legacy_ILT_NOISEG1_SIZE] = { + 0x013C, 0x01F5, 0x031A, 0x0631, + 0x0001, 0x0001, 0x0001, 0x0001, +}; + +const u16 b43legacy_ilt_noiseg2[B43legacy_ILT_NOISEG2_SIZE] = { + 0x5484, 0x3C40, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, +}; + +const u16 b43legacy_ilt_noisescaleg1[B43legacy_ILT_NOISESCALEG_SIZE] = { + 0x6C77, 0x5162, 0x3B40, 0x3335, /* 0 */ + 0x2F2D, 0x2A2A, 0x2527, 0x1F21, + 0x1A1D, 0x1719, 0x1616, 0x1414, + 0x1414, 0x1400, 0x1414, 0x1614, + 0x1716, 0x1A19, 0x1F1D, 0x2521, /* 16 */ + 0x2A27, 0x2F2A, 0x332D, 0x3B35, + 0x5140, 0x6C62, 0x0077, +}; + +const u16 b43legacy_ilt_noisescaleg2[B43legacy_ILT_NOISESCALEG_SIZE] = { + 0xD8DD, 0xCBD4, 0xBCC0, 0XB6B7, /* 0 */ + 0xB2B0, 0xADAD, 0xA7A9, 0x9FA1, + 0x969B, 0x9195, 0x8F8F, 0x8A8A, + 0x8A8A, 0x8A00, 0x8A8A, 0x8F8A, + 0x918F, 0x9695, 0x9F9B, 0xA7A1, /* 16 */ + 0xADA9, 0xB2AD, 0xB6B0, 0xBCB7, + 0xCBC0, 0xD8D4, 0x00DD, +}; + +const u16 b43legacy_ilt_noisescaleg3[B43legacy_ILT_NOISESCALEG_SIZE] = { + 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, /* 0 */ + 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, + 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, + 0xA4A4, 0xA400, 0xA4A4, 0xA4A4, + 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, /* 16 */ + 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, + 0xA4A4, 0xA4A4, 0x00A4, +}; + +const u16 b43legacy_ilt_sigmasqr1[B43legacy_ILT_SIGMASQR_SIZE] = { + 0x007A, 0x0075, 0x0071, 0x006C, /* 0 */ + 0x0067, 0x0063, 0x005E, 0x0059, + 0x0054, 0x0050, 0x004B, 0x0046, + 0x0042, 0x003D, 0x003D, 0x003D, + 0x003D, 0x003D, 0x003D, 0x003D, /* 16 */ + 0x003D, 0x003D, 0x003D, 0x003D, + 0x003D, 0x003D, 0x0000, 0x003D, + 0x003D, 0x003D, 0x003D, 0x003D, + 0x003D, 0x003D, 0x003D, 0x003D, /* 32 */ + 0x003D, 0x003D, 0x003D, 0x003D, + 0x0042, 0x0046, 0x004B, 0x0050, + 0x0054, 0x0059, 0x005E, 0x0063, + 0x0067, 0x006C, 0x0071, 0x0075, /* 48 */ + 0x007A, +}; + +const u16 b43legacy_ilt_sigmasqr2[B43legacy_ILT_SIGMASQR_SIZE] = { + 0x00DE, 0x00DC, 0x00DA, 0x00D8, /* 0 */ + 0x00D6, 0x00D4, 0x00D2, 0x00CF, + 0x00CD, 0x00CA, 0x00C7, 0x00C4, + 0x00C1, 0x00BE, 0x00BE, 0x00BE, + 0x00BE, 0x00BE, 0x00BE, 0x00BE, /* 16 */ + 0x00BE, 0x00BE, 0x00BE, 0x00BE, + 0x00BE, 0x00BE, 0x0000, 0x00BE, + 0x00BE, 0x00BE, 0x00BE, 0x00BE, + 0x00BE, 0x00BE, 0x00BE, 0x00BE, /* 32 */ + 0x00BE, 0x00BE, 0x00BE, 0x00BE, + 0x00C1, 0x00C4, 0x00C7, 0x00CA, + 0x00CD, 0x00CF, 0x00D2, 0x00D4, + 0x00D6, 0x00D8, 0x00DA, 0x00DC, /* 48 */ + 0x00DE, +}; + +/**** Helper functions to access the device Internal Lookup Tables ****/ + +void b43legacy_ilt_write(struct b43legacy_wldev *dev, u16 offset, u16 val) +{ + b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_CTRL, offset); + mmiowb(); + b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_DATA1, val); +} + +void b43legacy_ilt_write32(struct b43legacy_wldev *dev, u16 offset, u32 val) +{ + b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_CTRL, offset); + mmiowb(); + b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_DATA2, + (val & 0xFFFF0000) >> 16); + b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_DATA1, + val & 0x0000FFFF); +} + +u16 b43legacy_ilt_read(struct b43legacy_wldev *dev, u16 offset) +{ + b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_CTRL, offset); + return b43legacy_phy_read(dev, B43legacy_PHY_ILT_G_DATA1); +} diff --git a/drivers/net/wireless/b43legacy/ilt.h b/drivers/net/wireless/b43legacy/ilt.h new file mode 100644 index 0000000..48bcf37 --- /dev/null +++ b/drivers/net/wireless/b43legacy/ilt.h @@ -0,0 +1,34 @@ +#ifndef B43legacy_ILT_H_ +#define B43legacy_ILT_H_ + +#define B43legacy_ILT_ROTOR_SIZE 53 +extern const u32 b43legacy_ilt_rotor[B43legacy_ILT_ROTOR_SIZE]; +#define B43legacy_ILT_RETARD_SIZE 53 +extern const u32 b43legacy_ilt_retard[B43legacy_ILT_RETARD_SIZE]; +#define B43legacy_ILT_FINEFREQA_SIZE 256 +extern const u16 b43legacy_ilt_finefreqa[B43legacy_ILT_FINEFREQA_SIZE]; +#define B43legacy_ILT_FINEFREQG_SIZE 256 +extern const u16 b43legacy_ilt_finefreqg[B43legacy_ILT_FINEFREQG_SIZE]; +#define B43legacy_ILT_NOISEA2_SIZE 8 +extern const u16 b43legacy_ilt_noisea2[B43legacy_ILT_NOISEA2_SIZE]; +#define B43legacy_ILT_NOISEA3_SIZE 8 +extern const u16 b43legacy_ilt_noisea3[B43legacy_ILT_NOISEA3_SIZE]; +#define B43legacy_ILT_NOISEG1_SIZE 8 +extern const u16 b43legacy_ilt_noiseg1[B43legacy_ILT_NOISEG1_SIZE]; +#define B43legacy_ILT_NOISEG2_SIZE 8 +extern const u16 b43legacy_ilt_noiseg2[B43legacy_ILT_NOISEG2_SIZE]; +#define B43legacy_ILT_NOISESCALEG_SIZE 27 +extern const u16 b43legacy_ilt_noisescaleg1[B43legacy_ILT_NOISESCALEG_SIZE]; +extern const u16 b43legacy_ilt_noisescaleg2[B43legacy_ILT_NOISESCALEG_SIZE]; +extern const u16 b43legacy_ilt_noisescaleg3[B43legacy_ILT_NOISESCALEG_SIZE]; +#define B43legacy_ILT_SIGMASQR_SIZE 53 +extern const u16 b43legacy_ilt_sigmasqr1[B43legacy_ILT_SIGMASQR_SIZE]; +extern const u16 b43legacy_ilt_sigmasqr2[B43legacy_ILT_SIGMASQR_SIZE]; + + +void b43legacy_ilt_write(struct b43legacy_wldev *dev, u16 offset, u16 val); +void b43legacy_ilt_write32(struct b43legacy_wldev *dev, u16 offset, + u32 val); +u16 b43legacy_ilt_read(struct b43legacy_wldev *dev, u16 offset); + +#endif /* B43legacy_ILT_H_ */ diff --git a/drivers/net/wireless/b43legacy/leds.c b/drivers/net/wireless/b43legacy/leds.c new file mode 100644 index 0000000..37e9be8 --- /dev/null +++ b/drivers/net/wireless/b43legacy/leds.c @@ -0,0 +1,243 @@ +/* + + Broadcom B43 wireless driver + LED control + + Copyright (c) 2005 Martin Langer , + Copyright (c) 2005 Stefano Brivio + Copyright (c) 2005-2007 Michael Buesch + Copyright (c) 2005 Danny van Dyk + Copyright (c) 2005 Andreas Jaggi + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "b43legacy.h" +#include "leds.h" +#include "rfkill.h" + + +static void b43legacy_led_turn_on(struct b43legacy_wldev *dev, u8 led_index, + bool activelow) +{ + struct b43legacy_wl *wl = dev->wl; + unsigned long flags; + u16 ctl; + + spin_lock_irqsave(&wl->leds_lock, flags); + ctl = b43legacy_read16(dev, B43legacy_MMIO_GPIO_CONTROL); + if (activelow) + ctl &= ~(1 << led_index); + else + ctl |= (1 << led_index); + b43legacy_write16(dev, B43legacy_MMIO_GPIO_CONTROL, ctl); + spin_unlock_irqrestore(&wl->leds_lock, flags); +} + +static void b43legacy_led_turn_off(struct b43legacy_wldev *dev, u8 led_index, + bool activelow) +{ + struct b43legacy_wl *wl = dev->wl; + unsigned long flags; + u16 ctl; + + spin_lock_irqsave(&wl->leds_lock, flags); + ctl = b43legacy_read16(dev, B43legacy_MMIO_GPIO_CONTROL); + if (activelow) + ctl |= (1 << led_index); + else + ctl &= ~(1 << led_index); + b43legacy_write16(dev, B43legacy_MMIO_GPIO_CONTROL, ctl); + spin_unlock_irqrestore(&wl->leds_lock, flags); +} + +/* Callback from the LED subsystem. */ +static void b43legacy_led_brightness_set(struct led_classdev *led_dev, + enum led_brightness brightness) +{ + struct b43legacy_led *led = container_of(led_dev, struct b43legacy_led, + led_dev); + struct b43legacy_wldev *dev = led->dev; + bool radio_enabled; + + /* Checking the radio-enabled status here is slightly racy, + * but we want to avoid the locking overhead and we don't care + * whether the LED has the wrong state for a second. */ + radio_enabled = (dev->phy.radio_on && dev->radio_hw_enable); + + if (brightness == LED_OFF || !radio_enabled) + b43legacy_led_turn_off(dev, led->index, led->activelow); + else + b43legacy_led_turn_on(dev, led->index, led->activelow); +} + +static int b43legacy_register_led(struct b43legacy_wldev *dev, + struct b43legacy_led *led, + const char *name, + const char *default_trigger, + u8 led_index, bool activelow) +{ + int err; + + b43legacy_led_turn_off(dev, led_index, activelow); + if (led->dev) + return -EEXIST; + if (!default_trigger) + return -EINVAL; + led->dev = dev; + led->index = led_index; + led->activelow = activelow; + strncpy(led->name, name, sizeof(led->name)); + + led->led_dev.name = led->name; + led->led_dev.default_trigger = default_trigger; + led->led_dev.brightness_set = b43legacy_led_brightness_set; + + err = led_classdev_register(dev->dev->dev, &led->led_dev); + if (err) { + b43legacywarn(dev->wl, "LEDs: Failed to register %s\n", name); + led->dev = NULL; + return err; + } + return 0; +} + +static void b43legacy_unregister_led(struct b43legacy_led *led) +{ + if (!led->dev) + return; + led_classdev_unregister(&led->led_dev); + b43legacy_led_turn_off(led->dev, led->index, led->activelow); + led->dev = NULL; +} + +static void b43legacy_map_led(struct b43legacy_wldev *dev, + u8 led_index, + enum b43legacy_led_behaviour behaviour, + bool activelow) +{ + struct ieee80211_hw *hw = dev->wl->hw; + char name[B43legacy_LED_MAX_NAME_LEN + 1]; + + /* Map the b43 specific LED behaviour value to the + * generic LED triggers. */ + switch (behaviour) { + case B43legacy_LED_INACTIVE: + break; + case B43legacy_LED_OFF: + b43legacy_led_turn_off(dev, led_index, activelow); + break; + case B43legacy_LED_ON: + b43legacy_led_turn_on(dev, led_index, activelow); + break; + case B43legacy_LED_ACTIVITY: + case B43legacy_LED_TRANSFER: + case B43legacy_LED_APTRANSFER: + snprintf(name, sizeof(name), + "b43legacy-%s::tx", wiphy_name(hw->wiphy)); + b43legacy_register_led(dev, &dev->led_tx, name, + ieee80211_get_tx_led_name(hw), + led_index, activelow); + snprintf(name, sizeof(name), + "b43legacy-%s::rx", wiphy_name(hw->wiphy)); + b43legacy_register_led(dev, &dev->led_rx, name, + ieee80211_get_rx_led_name(hw), + led_index, activelow); + break; + case B43legacy_LED_RADIO_ALL: + case B43legacy_LED_RADIO_A: + case B43legacy_LED_RADIO_B: + case B43legacy_LED_MODE_BG: + snprintf(name, sizeof(name), + "b43legacy-%s::radio", wiphy_name(hw->wiphy)); + b43legacy_register_led(dev, &dev->led_radio, name, + ieee80211_get_radio_led_name(hw), + led_index, activelow); + /* Sync the RF-kill LED state with radio and switch states. */ + if (dev->phy.radio_on && b43legacy_is_hw_radio_enabled(dev)) + b43legacy_led_turn_on(dev, led_index, activelow); + break; + case B43legacy_LED_WEIRD: + case B43legacy_LED_ASSOC: + snprintf(name, sizeof(name), + "b43legacy-%s::assoc", wiphy_name(hw->wiphy)); + b43legacy_register_led(dev, &dev->led_assoc, name, + ieee80211_get_assoc_led_name(hw), + led_index, activelow); + break; + default: + b43legacywarn(dev->wl, "LEDs: Unknown behaviour 0x%02X\n", + behaviour); + break; + } +} + +void b43legacy_leds_init(struct b43legacy_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + u8 sprom[4]; + int i; + enum b43legacy_led_behaviour behaviour; + bool activelow; + + sprom[0] = bus->sprom.gpio0; + sprom[1] = bus->sprom.gpio1; + sprom[2] = bus->sprom.gpio2; + sprom[3] = bus->sprom.gpio3; + + for (i = 0; i < 4; i++) { + if (sprom[i] == 0xFF) { + /* There is no LED information in the SPROM + * for this LED. Hardcode it here. */ + activelow = 0; + switch (i) { + case 0: + behaviour = B43legacy_LED_ACTIVITY; + activelow = 1; + if (bus->boardinfo.vendor == PCI_VENDOR_ID_COMPAQ) + behaviour = B43legacy_LED_RADIO_ALL; + break; + case 1: + behaviour = B43legacy_LED_RADIO_B; + if (bus->boardinfo.vendor == PCI_VENDOR_ID_ASUSTEK) + behaviour = B43legacy_LED_ASSOC; + break; + case 2: + behaviour = B43legacy_LED_RADIO_A; + break; + case 3: + behaviour = B43legacy_LED_OFF; + break; + default: + B43legacy_WARN_ON(1); + return; + } + } else { + behaviour = sprom[i] & B43legacy_LED_BEHAVIOUR; + activelow = !!(sprom[i] & B43legacy_LED_ACTIVELOW); + } + b43legacy_map_led(dev, i, behaviour, activelow); + } +} + +void b43legacy_leds_exit(struct b43legacy_wldev *dev) +{ + b43legacy_unregister_led(&dev->led_tx); + b43legacy_unregister_led(&dev->led_rx); + b43legacy_unregister_led(&dev->led_assoc); + b43legacy_unregister_led(&dev->led_radio); +} diff --git a/drivers/net/wireless/b43legacy/leds.h b/drivers/net/wireless/b43legacy/leds.h new file mode 100644 index 0000000..9ff6750 --- /dev/null +++ b/drivers/net/wireless/b43legacy/leds.h @@ -0,0 +1,63 @@ +#ifndef B43legacy_LEDS_H_ +#define B43legacy_LEDS_H_ + +struct b43legacy_wldev; + +#ifdef CONFIG_B43LEGACY_LEDS + +#include +#include + + +#define B43legacy_LED_MAX_NAME_LEN 31 + +struct b43legacy_led { + struct b43legacy_wldev *dev; + /* The LED class device */ + struct led_classdev led_dev; + /* The index number of the LED. */ + u8 index; + /* If activelow is true, the LED is ON if the + * bit is switched off. */ + bool activelow; + /* The unique name string for this LED device. */ + char name[B43legacy_LED_MAX_NAME_LEN + 1]; +}; + +#define B43legacy_LED_BEHAVIOUR 0x7F +#define B43legacy_LED_ACTIVELOW 0x80 +/* LED behaviour values */ +enum b43legacy_led_behaviour { + B43legacy_LED_OFF, + B43legacy_LED_ON, + B43legacy_LED_ACTIVITY, + B43legacy_LED_RADIO_ALL, + B43legacy_LED_RADIO_A, + B43legacy_LED_RADIO_B, + B43legacy_LED_MODE_BG, + B43legacy_LED_TRANSFER, + B43legacy_LED_APTRANSFER, + B43legacy_LED_WEIRD, + B43legacy_LED_ASSOC, + B43legacy_LED_INACTIVE, +}; + +void b43legacy_leds_init(struct b43legacy_wldev *dev); +void b43legacy_leds_exit(struct b43legacy_wldev *dev); + +#else /* CONFIG_B43LEGACY_LEDS */ +/* LED support disabled */ + +struct b43legacy_led { + /* empty */ +}; + +static inline void b43legacy_leds_init(struct b43legacy_wldev *dev) +{ +} +static inline void b43legacy_leds_exit(struct b43legacy_wldev *dev) +{ +} +#endif /* CONFIG_B43LEGACY_LEDS */ + +#endif /* B43legacy_LEDS_H_ */ diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c new file mode 100644 index 0000000..1d070be --- /dev/null +++ b/drivers/net/wireless/b43legacy/main.c @@ -0,0 +1,3990 @@ +/* + * + * Broadcom B43legacy wireless driver + * + * Copyright (c) 2005 Martin Langer + * Copyright (c) 2005-2008 Stefano Brivio + * Copyright (c) 2005, 2006 Michael Buesch + * Copyright (c) 2005 Danny van Dyk + * Copyright (c) 2005 Andreas Jaggi + * Copyright (c) 2007 Larry Finger + * + * Some parts of the code in this file are derived from the ipw2200 + * driver Copyright(c) 2003 - 2004 Intel Corporation. + + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "b43legacy.h" +#include "main.h" +#include "debugfs.h" +#include "phy.h" +#include "dma.h" +#include "pio.h" +#include "sysfs.h" +#include "xmit.h" +#include "radio.h" + + +MODULE_DESCRIPTION("Broadcom B43legacy wireless driver"); +MODULE_AUTHOR("Martin Langer"); +MODULE_AUTHOR("Stefano Brivio"); +MODULE_AUTHOR("Michael Buesch"); +MODULE_LICENSE("GPL"); + +MODULE_FIRMWARE(B43legacy_SUPPORTED_FIRMWARE_ID); +MODULE_FIRMWARE("b43legacy/ucode2.fw"); +MODULE_FIRMWARE("b43legacy/ucode4.fw"); + +#if defined(CONFIG_B43LEGACY_DMA) && defined(CONFIG_B43LEGACY_PIO) +static int modparam_pio; +module_param_named(pio, modparam_pio, int, 0444); +MODULE_PARM_DESC(pio, "enable(1) / disable(0) PIO mode"); +#elif defined(CONFIG_B43LEGACY_DMA) +# define modparam_pio 0 +#elif defined(CONFIG_B43LEGACY_PIO) +# define modparam_pio 1 +#endif + +static int modparam_bad_frames_preempt; +module_param_named(bad_frames_preempt, modparam_bad_frames_preempt, int, 0444); +MODULE_PARM_DESC(bad_frames_preempt, "enable(1) / disable(0) Bad Frames" + " Preemption"); + +static char modparam_fwpostfix[16]; +module_param_string(fwpostfix, modparam_fwpostfix, 16, 0444); +MODULE_PARM_DESC(fwpostfix, "Postfix for the firmware files to load."); + +/* The following table supports BCM4301, BCM4303 and BCM4306/2 devices. */ +static const struct ssb_device_id b43legacy_ssb_tbl[] = { + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 2), + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 4), + SSB_DEVTABLE_END +}; +MODULE_DEVICE_TABLE(ssb, b43legacy_ssb_tbl); + + +/* Channel and ratetables are shared for all devices. + * They can't be const, because ieee80211 puts some precalculated + * data in there. This data is the same for all devices, so we don't + * get concurrency issues */ +#define RATETAB_ENT(_rateid, _flags) \ + { \ + .bitrate = B43legacy_RATE_TO_100KBPS(_rateid), \ + .hw_value = (_rateid), \ + .flags = (_flags), \ + } +/* + * NOTE: When changing this, sync with xmit.c's + * b43legacy_plcp_get_bitrate_idx_* functions! + */ +static struct ieee80211_rate __b43legacy_ratetable[] = { + RATETAB_ENT(B43legacy_CCK_RATE_1MB, 0), + RATETAB_ENT(B43legacy_CCK_RATE_2MB, IEEE80211_RATE_SHORT_PREAMBLE), + RATETAB_ENT(B43legacy_CCK_RATE_5MB, IEEE80211_RATE_SHORT_PREAMBLE), + RATETAB_ENT(B43legacy_CCK_RATE_11MB, IEEE80211_RATE_SHORT_PREAMBLE), + RATETAB_ENT(B43legacy_OFDM_RATE_6MB, 0), + RATETAB_ENT(B43legacy_OFDM_RATE_9MB, 0), + RATETAB_ENT(B43legacy_OFDM_RATE_12MB, 0), + RATETAB_ENT(B43legacy_OFDM_RATE_18MB, 0), + RATETAB_ENT(B43legacy_OFDM_RATE_24MB, 0), + RATETAB_ENT(B43legacy_OFDM_RATE_36MB, 0), + RATETAB_ENT(B43legacy_OFDM_RATE_48MB, 0), + RATETAB_ENT(B43legacy_OFDM_RATE_54MB, 0), +}; +#define b43legacy_b_ratetable (__b43legacy_ratetable + 0) +#define b43legacy_b_ratetable_size 4 +#define b43legacy_g_ratetable (__b43legacy_ratetable + 0) +#define b43legacy_g_ratetable_size 12 + +#define CHANTAB_ENT(_chanid, _freq) \ + { \ + .center_freq = (_freq), \ + .hw_value = (_chanid), \ + } +static struct ieee80211_channel b43legacy_bg_chantable[] = { + CHANTAB_ENT(1, 2412), + CHANTAB_ENT(2, 2417), + CHANTAB_ENT(3, 2422), + CHANTAB_ENT(4, 2427), + CHANTAB_ENT(5, 2432), + CHANTAB_ENT(6, 2437), + CHANTAB_ENT(7, 2442), + CHANTAB_ENT(8, 2447), + CHANTAB_ENT(9, 2452), + CHANTAB_ENT(10, 2457), + CHANTAB_ENT(11, 2462), + CHANTAB_ENT(12, 2467), + CHANTAB_ENT(13, 2472), + CHANTAB_ENT(14, 2484), +}; + +static struct ieee80211_supported_band b43legacy_band_2GHz_BPHY = { + .channels = b43legacy_bg_chantable, + .n_channels = ARRAY_SIZE(b43legacy_bg_chantable), + .bitrates = b43legacy_b_ratetable, + .n_bitrates = b43legacy_b_ratetable_size, +}; + +static struct ieee80211_supported_band b43legacy_band_2GHz_GPHY = { + .channels = b43legacy_bg_chantable, + .n_channels = ARRAY_SIZE(b43legacy_bg_chantable), + .bitrates = b43legacy_g_ratetable, + .n_bitrates = b43legacy_g_ratetable_size, +}; + +static void b43legacy_wireless_core_exit(struct b43legacy_wldev *dev); +static int b43legacy_wireless_core_init(struct b43legacy_wldev *dev); +static void b43legacy_wireless_core_stop(struct b43legacy_wldev *dev); +static int b43legacy_wireless_core_start(struct b43legacy_wldev *dev); + + +static int b43legacy_ratelimit(struct b43legacy_wl *wl) +{ + if (!wl || !wl->current_dev) + return 1; + if (b43legacy_status(wl->current_dev) < B43legacy_STAT_STARTED) + return 1; + /* We are up and running. + * Ratelimit the messages to avoid DoS over the net. */ + return net_ratelimit(); +} + +void b43legacyinfo(struct b43legacy_wl *wl, const char *fmt, ...) +{ + va_list args; + + if (!b43legacy_ratelimit(wl)) + return; + va_start(args, fmt); + printk(KERN_INFO "b43legacy-%s: ", + (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); + vprintk(fmt, args); + va_end(args); +} + +void b43legacyerr(struct b43legacy_wl *wl, const char *fmt, ...) +{ + va_list args; + + if (!b43legacy_ratelimit(wl)) + return; + va_start(args, fmt); + printk(KERN_ERR "b43legacy-%s ERROR: ", + (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); + vprintk(fmt, args); + va_end(args); +} + +void b43legacywarn(struct b43legacy_wl *wl, const char *fmt, ...) +{ + va_list args; + + if (!b43legacy_ratelimit(wl)) + return; + va_start(args, fmt); + printk(KERN_WARNING "b43legacy-%s warning: ", + (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); + vprintk(fmt, args); + va_end(args); +} + +#if B43legacy_DEBUG +void b43legacydbg(struct b43legacy_wl *wl, const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + printk(KERN_DEBUG "b43legacy-%s debug: ", + (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); + vprintk(fmt, args); + va_end(args); +} +#endif /* DEBUG */ + +static void b43legacy_ram_write(struct b43legacy_wldev *dev, u16 offset, + u32 val) +{ + u32 status; + + B43legacy_WARN_ON(offset % 4 != 0); + + status = b43legacy_read32(dev, B43legacy_MMIO_MACCTL); + if (status & B43legacy_MACCTL_BE) + val = swab32(val); + + b43legacy_write32(dev, B43legacy_MMIO_RAM_CONTROL, offset); + mmiowb(); + b43legacy_write32(dev, B43legacy_MMIO_RAM_DATA, val); +} + +static inline +void b43legacy_shm_control_word(struct b43legacy_wldev *dev, + u16 routing, u16 offset) +{ + u32 control; + + /* "offset" is the WORD offset. */ + + control = routing; + control <<= 16; + control |= offset; + b43legacy_write32(dev, B43legacy_MMIO_SHM_CONTROL, control); +} + +u32 b43legacy_shm_read32(struct b43legacy_wldev *dev, + u16 routing, u16 offset) +{ + u32 ret; + + if (routing == B43legacy_SHM_SHARED) { + B43legacy_WARN_ON((offset & 0x0001) != 0); + if (offset & 0x0003) { + /* Unaligned access */ + b43legacy_shm_control_word(dev, routing, offset >> 2); + ret = b43legacy_read16(dev, + B43legacy_MMIO_SHM_DATA_UNALIGNED); + ret <<= 16; + b43legacy_shm_control_word(dev, routing, + (offset >> 2) + 1); + ret |= b43legacy_read16(dev, B43legacy_MMIO_SHM_DATA); + + return ret; + } + offset >>= 2; + } + b43legacy_shm_control_word(dev, routing, offset); + ret = b43legacy_read32(dev, B43legacy_MMIO_SHM_DATA); + + return ret; +} + +u16 b43legacy_shm_read16(struct b43legacy_wldev *dev, + u16 routing, u16 offset) +{ + u16 ret; + + if (routing == B43legacy_SHM_SHARED) { + B43legacy_WARN_ON((offset & 0x0001) != 0); + if (offset & 0x0003) { + /* Unaligned access */ + b43legacy_shm_control_word(dev, routing, offset >> 2); + ret = b43legacy_read16(dev, + B43legacy_MMIO_SHM_DATA_UNALIGNED); + + return ret; + } + offset >>= 2; + } + b43legacy_shm_control_word(dev, routing, offset); + ret = b43legacy_read16(dev, B43legacy_MMIO_SHM_DATA); + + return ret; +} + +void b43legacy_shm_write32(struct b43legacy_wldev *dev, + u16 routing, u16 offset, + u32 value) +{ + if (routing == B43legacy_SHM_SHARED) { + B43legacy_WARN_ON((offset & 0x0001) != 0); + if (offset & 0x0003) { + /* Unaligned access */ + b43legacy_shm_control_word(dev, routing, offset >> 2); + mmiowb(); + b43legacy_write16(dev, + B43legacy_MMIO_SHM_DATA_UNALIGNED, + (value >> 16) & 0xffff); + mmiowb(); + b43legacy_shm_control_word(dev, routing, + (offset >> 2) + 1); + mmiowb(); + b43legacy_write16(dev, B43legacy_MMIO_SHM_DATA, + value & 0xffff); + return; + } + offset >>= 2; + } + b43legacy_shm_control_word(dev, routing, offset); + mmiowb(); + b43legacy_write32(dev, B43legacy_MMIO_SHM_DATA, value); +} + +void b43legacy_shm_write16(struct b43legacy_wldev *dev, u16 routing, u16 offset, + u16 value) +{ + if (routing == B43legacy_SHM_SHARED) { + B43legacy_WARN_ON((offset & 0x0001) != 0); + if (offset & 0x0003) { + /* Unaligned access */ + b43legacy_shm_control_word(dev, routing, offset >> 2); + mmiowb(); + b43legacy_write16(dev, + B43legacy_MMIO_SHM_DATA_UNALIGNED, + value); + return; + } + offset >>= 2; + } + b43legacy_shm_control_word(dev, routing, offset); + mmiowb(); + b43legacy_write16(dev, B43legacy_MMIO_SHM_DATA, value); +} + +/* Read HostFlags */ +u32 b43legacy_hf_read(struct b43legacy_wldev *dev) +{ + u32 ret; + + ret = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_HOSTFHI); + ret <<= 16; + ret |= b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_HOSTFLO); + + return ret; +} + +/* Write HostFlags */ +void b43legacy_hf_write(struct b43legacy_wldev *dev, u32 value) +{ + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_HOSTFLO, + (value & 0x0000FFFF)); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_HOSTFHI, + ((value & 0xFFFF0000) >> 16)); +} + +void b43legacy_tsf_read(struct b43legacy_wldev *dev, u64 *tsf) +{ + /* We need to be careful. As we read the TSF from multiple + * registers, we should take care of register overflows. + * In theory, the whole tsf read process should be atomic. + * We try to be atomic here, by restaring the read process, + * if any of the high registers changed (overflew). + */ + if (dev->dev->id.revision >= 3) { + u32 low; + u32 high; + u32 high2; + + do { + high = b43legacy_read32(dev, + B43legacy_MMIO_REV3PLUS_TSF_HIGH); + low = b43legacy_read32(dev, + B43legacy_MMIO_REV3PLUS_TSF_LOW); + high2 = b43legacy_read32(dev, + B43legacy_MMIO_REV3PLUS_TSF_HIGH); + } while (unlikely(high != high2)); + + *tsf = high; + *tsf <<= 32; + *tsf |= low; + } else { + u64 tmp; + u16 v0; + u16 v1; + u16 v2; + u16 v3; + u16 test1; + u16 test2; + u16 test3; + + do { + v3 = b43legacy_read16(dev, B43legacy_MMIO_TSF_3); + v2 = b43legacy_read16(dev, B43legacy_MMIO_TSF_2); + v1 = b43legacy_read16(dev, B43legacy_MMIO_TSF_1); + v0 = b43legacy_read16(dev, B43legacy_MMIO_TSF_0); + + test3 = b43legacy_read16(dev, B43legacy_MMIO_TSF_3); + test2 = b43legacy_read16(dev, B43legacy_MMIO_TSF_2); + test1 = b43legacy_read16(dev, B43legacy_MMIO_TSF_1); + } while (v3 != test3 || v2 != test2 || v1 != test1); + + *tsf = v3; + *tsf <<= 48; + tmp = v2; + tmp <<= 32; + *tsf |= tmp; + tmp = v1; + tmp <<= 16; + *tsf |= tmp; + *tsf |= v0; + } +} + +static void b43legacy_time_lock(struct b43legacy_wldev *dev) +{ + u32 status; + + status = b43legacy_read32(dev, B43legacy_MMIO_MACCTL); + status |= B43legacy_MACCTL_TBTTHOLD; + b43legacy_write32(dev, B43legacy_MMIO_MACCTL, status); + mmiowb(); +} + +static void b43legacy_time_unlock(struct b43legacy_wldev *dev) +{ + u32 status; + + status = b43legacy_read32(dev, B43legacy_MMIO_MACCTL); + status &= ~B43legacy_MACCTL_TBTTHOLD; + b43legacy_write32(dev, B43legacy_MMIO_MACCTL, status); +} + +static void b43legacy_tsf_write_locked(struct b43legacy_wldev *dev, u64 tsf) +{ + /* Be careful with the in-progress timer. + * First zero out the low register, so we have a full + * register-overflow duration to complete the operation. + */ + if (dev->dev->id.revision >= 3) { + u32 lo = (tsf & 0x00000000FFFFFFFFULL); + u32 hi = (tsf & 0xFFFFFFFF00000000ULL) >> 32; + + b43legacy_write32(dev, B43legacy_MMIO_REV3PLUS_TSF_LOW, 0); + mmiowb(); + b43legacy_write32(dev, B43legacy_MMIO_REV3PLUS_TSF_HIGH, + hi); + mmiowb(); + b43legacy_write32(dev, B43legacy_MMIO_REV3PLUS_TSF_LOW, + lo); + } else { + u16 v0 = (tsf & 0x000000000000FFFFULL); + u16 v1 = (tsf & 0x00000000FFFF0000ULL) >> 16; + u16 v2 = (tsf & 0x0000FFFF00000000ULL) >> 32; + u16 v3 = (tsf & 0xFFFF000000000000ULL) >> 48; + + b43legacy_write16(dev, B43legacy_MMIO_TSF_0, 0); + mmiowb(); + b43legacy_write16(dev, B43legacy_MMIO_TSF_3, v3); + mmiowb(); + b43legacy_write16(dev, B43legacy_MMIO_TSF_2, v2); + mmiowb(); + b43legacy_write16(dev, B43legacy_MMIO_TSF_1, v1); + mmiowb(); + b43legacy_write16(dev, B43legacy_MMIO_TSF_0, v0); + } +} + +void b43legacy_tsf_write(struct b43legacy_wldev *dev, u64 tsf) +{ + b43legacy_time_lock(dev); + b43legacy_tsf_write_locked(dev, tsf); + b43legacy_time_unlock(dev); +} + +static +void b43legacy_macfilter_set(struct b43legacy_wldev *dev, + u16 offset, const u8 *mac) +{ + static const u8 zero_addr[ETH_ALEN] = { 0 }; + u16 data; + + if (!mac) + mac = zero_addr; + + offset |= 0x0020; + b43legacy_write16(dev, B43legacy_MMIO_MACFILTER_CONTROL, offset); + + data = mac[0]; + data |= mac[1] << 8; + b43legacy_write16(dev, B43legacy_MMIO_MACFILTER_DATA, data); + data = mac[2]; + data |= mac[3] << 8; + b43legacy_write16(dev, B43legacy_MMIO_MACFILTER_DATA, data); + data = mac[4]; + data |= mac[5] << 8; + b43legacy_write16(dev, B43legacy_MMIO_MACFILTER_DATA, data); +} + +static void b43legacy_write_mac_bssid_templates(struct b43legacy_wldev *dev) +{ + static const u8 zero_addr[ETH_ALEN] = { 0 }; + const u8 *mac = dev->wl->mac_addr; + const u8 *bssid = dev->wl->bssid; + u8 mac_bssid[ETH_ALEN * 2]; + int i; + u32 tmp; + + if (!bssid) + bssid = zero_addr; + if (!mac) + mac = zero_addr; + + b43legacy_macfilter_set(dev, B43legacy_MACFILTER_BSSID, bssid); + + memcpy(mac_bssid, mac, ETH_ALEN); + memcpy(mac_bssid + ETH_ALEN, bssid, ETH_ALEN); + + /* Write our MAC address and BSSID to template ram */ + for (i = 0; i < ARRAY_SIZE(mac_bssid); i += sizeof(u32)) { + tmp = (u32)(mac_bssid[i + 0]); + tmp |= (u32)(mac_bssid[i + 1]) << 8; + tmp |= (u32)(mac_bssid[i + 2]) << 16; + tmp |= (u32)(mac_bssid[i + 3]) << 24; + b43legacy_ram_write(dev, 0x20 + i, tmp); + b43legacy_ram_write(dev, 0x78 + i, tmp); + b43legacy_ram_write(dev, 0x478 + i, tmp); + } +} + +static void b43legacy_upload_card_macaddress(struct b43legacy_wldev *dev) +{ + b43legacy_write_mac_bssid_templates(dev); + b43legacy_macfilter_set(dev, B43legacy_MACFILTER_SELF, + dev->wl->mac_addr); +} + +static void b43legacy_set_slot_time(struct b43legacy_wldev *dev, + u16 slot_time) +{ + /* slot_time is in usec. */ + if (dev->phy.type != B43legacy_PHYTYPE_G) + return; + b43legacy_write16(dev, 0x684, 510 + slot_time); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0010, + slot_time); +} + +static void b43legacy_short_slot_timing_enable(struct b43legacy_wldev *dev) +{ + b43legacy_set_slot_time(dev, 9); +} + +static void b43legacy_short_slot_timing_disable(struct b43legacy_wldev *dev) +{ + b43legacy_set_slot_time(dev, 20); +} + +/* Synchronize IRQ top- and bottom-half. + * IRQs must be masked before calling this. + * This must not be called with the irq_lock held. + */ +static void b43legacy_synchronize_irq(struct b43legacy_wldev *dev) +{ + synchronize_irq(dev->dev->irq); + tasklet_kill(&dev->isr_tasklet); +} + +/* DummyTransmission function, as documented on + * http://bcm-specs.sipsolutions.net/DummyTransmission + */ +void b43legacy_dummy_transmission(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + unsigned int i; + unsigned int max_loop; + u16 value; + u32 buffer[5] = { + 0x00000000, + 0x00D40000, + 0x00000000, + 0x01000000, + 0x00000000, + }; + + switch (phy->type) { + case B43legacy_PHYTYPE_B: + case B43legacy_PHYTYPE_G: + max_loop = 0xFA; + buffer[0] = 0x000B846E; + break; + default: + B43legacy_BUG_ON(1); + return; + } + + for (i = 0; i < 5; i++) + b43legacy_ram_write(dev, i * 4, buffer[i]); + + /* dummy read follows */ + b43legacy_read32(dev, B43legacy_MMIO_MACCTL); + + b43legacy_write16(dev, 0x0568, 0x0000); + b43legacy_write16(dev, 0x07C0, 0x0000); + b43legacy_write16(dev, 0x050C, 0x0000); + b43legacy_write16(dev, 0x0508, 0x0000); + b43legacy_write16(dev, 0x050A, 0x0000); + b43legacy_write16(dev, 0x054C, 0x0000); + b43legacy_write16(dev, 0x056A, 0x0014); + b43legacy_write16(dev, 0x0568, 0x0826); + b43legacy_write16(dev, 0x0500, 0x0000); + b43legacy_write16(dev, 0x0502, 0x0030); + + if (phy->radio_ver == 0x2050 && phy->radio_rev <= 0x5) + b43legacy_radio_write16(dev, 0x0051, 0x0017); + for (i = 0x00; i < max_loop; i++) { + value = b43legacy_read16(dev, 0x050E); + if (value & 0x0080) + break; + udelay(10); + } + for (i = 0x00; i < 0x0A; i++) { + value = b43legacy_read16(dev, 0x050E); + if (value & 0x0400) + break; + udelay(10); + } + for (i = 0x00; i < 0x0A; i++) { + value = b43legacy_read16(dev, 0x0690); + if (!(value & 0x0100)) + break; + udelay(10); + } + if (phy->radio_ver == 0x2050 && phy->radio_rev <= 0x5) + b43legacy_radio_write16(dev, 0x0051, 0x0037); +} + +/* Turn the Analog ON/OFF */ +static void b43legacy_switch_analog(struct b43legacy_wldev *dev, int on) +{ + b43legacy_write16(dev, B43legacy_MMIO_PHY0, on ? 0 : 0xF4); +} + +void b43legacy_wireless_core_reset(struct b43legacy_wldev *dev, u32 flags) +{ + u32 tmslow; + u32 macctl; + + flags |= B43legacy_TMSLOW_PHYCLKEN; + flags |= B43legacy_TMSLOW_PHYRESET; + ssb_device_enable(dev->dev, flags); + msleep(2); /* Wait for the PLL to turn on. */ + + /* Now take the PHY out of Reset again */ + tmslow = ssb_read32(dev->dev, SSB_TMSLOW); + tmslow |= SSB_TMSLOW_FGC; + tmslow &= ~B43legacy_TMSLOW_PHYRESET; + ssb_write32(dev->dev, SSB_TMSLOW, tmslow); + ssb_read32(dev->dev, SSB_TMSLOW); /* flush */ + msleep(1); + tmslow &= ~SSB_TMSLOW_FGC; + ssb_write32(dev->dev, SSB_TMSLOW, tmslow); + ssb_read32(dev->dev, SSB_TMSLOW); /* flush */ + msleep(1); + + /* Turn Analog ON */ + b43legacy_switch_analog(dev, 1); + + macctl = b43legacy_read32(dev, B43legacy_MMIO_MACCTL); + macctl &= ~B43legacy_MACCTL_GMODE; + if (flags & B43legacy_TMSLOW_GMODE) { + macctl |= B43legacy_MACCTL_GMODE; + dev->phy.gmode = 1; + } else + dev->phy.gmode = 0; + macctl |= B43legacy_MACCTL_IHR_ENABLED; + b43legacy_write32(dev, B43legacy_MMIO_MACCTL, macctl); +} + +static void handle_irq_transmit_status(struct b43legacy_wldev *dev) +{ + u32 v0; + u32 v1; + u16 tmp; + struct b43legacy_txstatus stat; + + while (1) { + v0 = b43legacy_read32(dev, B43legacy_MMIO_XMITSTAT_0); + if (!(v0 & 0x00000001)) + break; + v1 = b43legacy_read32(dev, B43legacy_MMIO_XMITSTAT_1); + + stat.cookie = (v0 >> 16); + stat.seq = (v1 & 0x0000FFFF); + stat.phy_stat = ((v1 & 0x00FF0000) >> 16); + tmp = (v0 & 0x0000FFFF); + stat.frame_count = ((tmp & 0xF000) >> 12); + stat.rts_count = ((tmp & 0x0F00) >> 8); + stat.supp_reason = ((tmp & 0x001C) >> 2); + stat.pm_indicated = !!(tmp & 0x0080); + stat.intermediate = !!(tmp & 0x0040); + stat.for_ampdu = !!(tmp & 0x0020); + stat.acked = !!(tmp & 0x0002); + + b43legacy_handle_txstatus(dev, &stat); + } +} + +static void drain_txstatus_queue(struct b43legacy_wldev *dev) +{ + u32 dummy; + + if (dev->dev->id.revision < 5) + return; + /* Read all entries from the microcode TXstatus FIFO + * and throw them away. + */ + while (1) { + dummy = b43legacy_read32(dev, B43legacy_MMIO_XMITSTAT_0); + if (!(dummy & 0x00000001)) + break; + dummy = b43legacy_read32(dev, B43legacy_MMIO_XMITSTAT_1); + } +} + +static u32 b43legacy_jssi_read(struct b43legacy_wldev *dev) +{ + u32 val = 0; + + val = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, 0x40A); + val <<= 16; + val |= b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, 0x408); + + return val; +} + +static void b43legacy_jssi_write(struct b43legacy_wldev *dev, u32 jssi) +{ + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x408, + (jssi & 0x0000FFFF)); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x40A, + (jssi & 0xFFFF0000) >> 16); +} + +static void b43legacy_generate_noise_sample(struct b43legacy_wldev *dev) +{ + b43legacy_jssi_write(dev, 0x7F7F7F7F); + b43legacy_write32(dev, B43legacy_MMIO_MACCMD, + b43legacy_read32(dev, B43legacy_MMIO_MACCMD) + | B43legacy_MACCMD_BGNOISE); + B43legacy_WARN_ON(dev->noisecalc.channel_at_start != + dev->phy.channel); +} + +static void b43legacy_calculate_link_quality(struct b43legacy_wldev *dev) +{ + /* Top half of Link Quality calculation. */ + + if (dev->noisecalc.calculation_running) + return; + dev->noisecalc.channel_at_start = dev->phy.channel; + dev->noisecalc.calculation_running = 1; + dev->noisecalc.nr_samples = 0; + + b43legacy_generate_noise_sample(dev); +} + +static void handle_irq_noise(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 tmp; + u8 noise[4]; + u8 i; + u8 j; + s32 average; + + /* Bottom half of Link Quality calculation. */ + + B43legacy_WARN_ON(!dev->noisecalc.calculation_running); + if (dev->noisecalc.channel_at_start != phy->channel) + goto drop_calculation; + *((__le32 *)noise) = cpu_to_le32(b43legacy_jssi_read(dev)); + if (noise[0] == 0x7F || noise[1] == 0x7F || + noise[2] == 0x7F || noise[3] == 0x7F) + goto generate_new; + + /* Get the noise samples. */ + B43legacy_WARN_ON(dev->noisecalc.nr_samples >= 8); + i = dev->noisecalc.nr_samples; + noise[0] = clamp_val(noise[0], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); + noise[1] = clamp_val(noise[1], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); + noise[2] = clamp_val(noise[2], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); + noise[3] = clamp_val(noise[3], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); + dev->noisecalc.samples[i][0] = phy->nrssi_lt[noise[0]]; + dev->noisecalc.samples[i][1] = phy->nrssi_lt[noise[1]]; + dev->noisecalc.samples[i][2] = phy->nrssi_lt[noise[2]]; + dev->noisecalc.samples[i][3] = phy->nrssi_lt[noise[3]]; + dev->noisecalc.nr_samples++; + if (dev->noisecalc.nr_samples == 8) { + /* Calculate the Link Quality by the noise samples. */ + average = 0; + for (i = 0; i < 8; i++) { + for (j = 0; j < 4; j++) + average += dev->noisecalc.samples[i][j]; + } + average /= (8 * 4); + average *= 125; + average += 64; + average /= 128; + tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + 0x40C); + tmp = (tmp / 128) & 0x1F; + if (tmp >= 8) + average += 2; + else + average -= 25; + if (tmp == 8) + average -= 72; + else + average -= 48; + + dev->stats.link_noise = average; +drop_calculation: + dev->noisecalc.calculation_running = 0; + return; + } +generate_new: + b43legacy_generate_noise_sample(dev); +} + +static void handle_irq_tbtt_indication(struct b43legacy_wldev *dev) +{ + if (b43legacy_is_mode(dev->wl, NL80211_IFTYPE_AP)) { + /* TODO: PS TBTT */ + } else { + if (1/*FIXME: the last PSpoll frame was sent successfully */) + b43legacy_power_saving_ctl_bits(dev, -1, -1); + } + if (b43legacy_is_mode(dev->wl, NL80211_IFTYPE_ADHOC)) + dev->dfq_valid = 1; +} + +static void handle_irq_atim_end(struct b43legacy_wldev *dev) +{ + if (dev->dfq_valid) { + b43legacy_write32(dev, B43legacy_MMIO_MACCMD, + b43legacy_read32(dev, B43legacy_MMIO_MACCMD) + | B43legacy_MACCMD_DFQ_VALID); + dev->dfq_valid = 0; + } +} + +static void handle_irq_pmq(struct b43legacy_wldev *dev) +{ + u32 tmp; + + /* TODO: AP mode. */ + + while (1) { + tmp = b43legacy_read32(dev, B43legacy_MMIO_PS_STATUS); + if (!(tmp & 0x00000008)) + break; + } + /* 16bit write is odd, but correct. */ + b43legacy_write16(dev, B43legacy_MMIO_PS_STATUS, 0x0002); +} + +static void b43legacy_write_template_common(struct b43legacy_wldev *dev, + const u8 *data, u16 size, + u16 ram_offset, + u16 shm_size_offset, u8 rate) +{ + u32 i; + u32 tmp; + struct b43legacy_plcp_hdr4 plcp; + + plcp.data = 0; + b43legacy_generate_plcp_hdr(&plcp, size + FCS_LEN, rate); + b43legacy_ram_write(dev, ram_offset, le32_to_cpu(plcp.data)); + ram_offset += sizeof(u32); + /* The PLCP is 6 bytes long, but we only wrote 4 bytes, yet. + * So leave the first two bytes of the next write blank. + */ + tmp = (u32)(data[0]) << 16; + tmp |= (u32)(data[1]) << 24; + b43legacy_ram_write(dev, ram_offset, tmp); + ram_offset += sizeof(u32); + for (i = 2; i < size; i += sizeof(u32)) { + tmp = (u32)(data[i + 0]); + if (i + 1 < size) + tmp |= (u32)(data[i + 1]) << 8; + if (i + 2 < size) + tmp |= (u32)(data[i + 2]) << 16; + if (i + 3 < size) + tmp |= (u32)(data[i + 3]) << 24; + b43legacy_ram_write(dev, ram_offset + i - 2, tmp); + } + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, shm_size_offset, + size + sizeof(struct b43legacy_plcp_hdr6)); +} + +/* Convert a b43legacy antenna number value to the PHY TX control value. */ +static u16 b43legacy_antenna_to_phyctl(int antenna) +{ + switch (antenna) { + case B43legacy_ANTENNA0: + return B43legacy_TX4_PHY_ANT0; + case B43legacy_ANTENNA1: + return B43legacy_TX4_PHY_ANT1; + } + return B43legacy_TX4_PHY_ANTLAST; +} + +static void b43legacy_write_beacon_template(struct b43legacy_wldev *dev, + u16 ram_offset, + u16 shm_size_offset) +{ + + unsigned int i, len, variable_len; + const struct ieee80211_mgmt *bcn; + const u8 *ie; + bool tim_found = 0; + unsigned int rate; + u16 ctl; + int antenna; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(dev->wl->current_beacon); + + bcn = (const struct ieee80211_mgmt *)(dev->wl->current_beacon->data); + len = min((size_t)dev->wl->current_beacon->len, + 0x200 - sizeof(struct b43legacy_plcp_hdr6)); + rate = ieee80211_get_tx_rate(dev->wl->hw, info)->hw_value; + + b43legacy_write_template_common(dev, (const u8 *)bcn, len, ram_offset, + shm_size_offset, rate); + + /* Write the PHY TX control parameters. */ + antenna = B43legacy_ANTENNA_DEFAULT; + antenna = b43legacy_antenna_to_phyctl(antenna); + ctl = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_BEACPHYCTL); + /* We can't send beacons with short preamble. Would get PHY errors. */ + ctl &= ~B43legacy_TX4_PHY_SHORTPRMBL; + ctl &= ~B43legacy_TX4_PHY_ANT; + ctl &= ~B43legacy_TX4_PHY_ENC; + ctl |= antenna; + ctl |= B43legacy_TX4_PHY_ENC_CCK; + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_BEACPHYCTL, ctl); + + /* Find the position of the TIM and the DTIM_period value + * and write them to SHM. */ + ie = bcn->u.beacon.variable; + variable_len = len - offsetof(struct ieee80211_mgmt, u.beacon.variable); + for (i = 0; i < variable_len - 2; ) { + uint8_t ie_id, ie_len; + + ie_id = ie[i]; + ie_len = ie[i + 1]; + if (ie_id == 5) { + u16 tim_position; + u16 dtim_period; + /* This is the TIM Information Element */ + + /* Check whether the ie_len is in the beacon data range. */ + if (variable_len < ie_len + 2 + i) + break; + /* A valid TIM is at least 4 bytes long. */ + if (ie_len < 4) + break; + tim_found = 1; + + tim_position = sizeof(struct b43legacy_plcp_hdr6); + tim_position += offsetof(struct ieee80211_mgmt, + u.beacon.variable); + tim_position += i; + + dtim_period = ie[i + 3]; + + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_TIMPOS, tim_position); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_DTIMP, dtim_period); + break; + } + i += ie_len + 2; + } + if (!tim_found) { + b43legacywarn(dev->wl, "Did not find a valid TIM IE in the " + "beacon template packet. AP or IBSS operation " + "may be broken.\n"); + } else + b43legacydbg(dev->wl, "Updated beacon template\n"); +} + +static void b43legacy_write_probe_resp_plcp(struct b43legacy_wldev *dev, + u16 shm_offset, u16 size, + struct ieee80211_rate *rate) +{ + struct b43legacy_plcp_hdr4 plcp; + u32 tmp; + __le16 dur; + + plcp.data = 0; + b43legacy_generate_plcp_hdr(&plcp, size + FCS_LEN, rate->hw_value); + dur = ieee80211_generic_frame_duration(dev->wl->hw, + dev->wl->vif, + size, + rate); + /* Write PLCP in two parts and timing for packet transfer */ + tmp = le32_to_cpu(plcp.data); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, shm_offset, + tmp & 0xFFFF); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, shm_offset + 2, + tmp >> 16); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, shm_offset + 6, + le16_to_cpu(dur)); +} + +/* Instead of using custom probe response template, this function + * just patches custom beacon template by: + * 1) Changing packet type + * 2) Patching duration field + * 3) Stripping TIM + */ +static const u8 *b43legacy_generate_probe_resp(struct b43legacy_wldev *dev, + u16 *dest_size, + struct ieee80211_rate *rate) +{ + const u8 *src_data; + u8 *dest_data; + u16 src_size, elem_size, src_pos, dest_pos; + __le16 dur; + struct ieee80211_hdr *hdr; + size_t ie_start; + + src_size = dev->wl->current_beacon->len; + src_data = (const u8 *)dev->wl->current_beacon->data; + + /* Get the start offset of the variable IEs in the packet. */ + ie_start = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); + B43legacy_WARN_ON(ie_start != offsetof(struct ieee80211_mgmt, + u.beacon.variable)); + + if (B43legacy_WARN_ON(src_size < ie_start)) + return NULL; + + dest_data = kmalloc(src_size, GFP_ATOMIC); + if (unlikely(!dest_data)) + return NULL; + + /* Copy the static data and all Information Elements, except the TIM. */ + memcpy(dest_data, src_data, ie_start); + src_pos = ie_start; + dest_pos = ie_start; + for ( ; src_pos < src_size - 2; src_pos += elem_size) { + elem_size = src_data[src_pos + 1] + 2; + if (src_data[src_pos] == 5) { + /* This is the TIM. */ + continue; + } + memcpy(dest_data + dest_pos, src_data + src_pos, elem_size); + dest_pos += elem_size; + } + *dest_size = dest_pos; + hdr = (struct ieee80211_hdr *)dest_data; + + /* Set the frame control. */ + hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_PROBE_RESP); + dur = ieee80211_generic_frame_duration(dev->wl->hw, + dev->wl->vif, + *dest_size, + rate); + hdr->duration_id = dur; + + return dest_data; +} + +static void b43legacy_write_probe_resp_template(struct b43legacy_wldev *dev, + u16 ram_offset, + u16 shm_size_offset, + struct ieee80211_rate *rate) +{ + const u8 *probe_resp_data; + u16 size; + + size = dev->wl->current_beacon->len; + probe_resp_data = b43legacy_generate_probe_resp(dev, &size, rate); + if (unlikely(!probe_resp_data)) + return; + + /* Looks like PLCP headers plus packet timings are stored for + * all possible basic rates + */ + b43legacy_write_probe_resp_plcp(dev, 0x31A, size, + &b43legacy_b_ratetable[0]); + b43legacy_write_probe_resp_plcp(dev, 0x32C, size, + &b43legacy_b_ratetable[1]); + b43legacy_write_probe_resp_plcp(dev, 0x33E, size, + &b43legacy_b_ratetable[2]); + b43legacy_write_probe_resp_plcp(dev, 0x350, size, + &b43legacy_b_ratetable[3]); + + size = min((size_t)size, + 0x200 - sizeof(struct b43legacy_plcp_hdr6)); + b43legacy_write_template_common(dev, probe_resp_data, + size, ram_offset, + shm_size_offset, rate->hw_value); + kfree(probe_resp_data); +} + +static void b43legacy_upload_beacon0(struct b43legacy_wldev *dev) +{ + struct b43legacy_wl *wl = dev->wl; + + if (wl->beacon0_uploaded) + return; + b43legacy_write_beacon_template(dev, 0x68, 0x18); + /* FIXME: Probe resp upload doesn't really belong here, + * but we don't use that feature anyway. */ + b43legacy_write_probe_resp_template(dev, 0x268, 0x4A, + &__b43legacy_ratetable[3]); + wl->beacon0_uploaded = 1; +} + +static void b43legacy_upload_beacon1(struct b43legacy_wldev *dev) +{ + struct b43legacy_wl *wl = dev->wl; + + if (wl->beacon1_uploaded) + return; + b43legacy_write_beacon_template(dev, 0x468, 0x1A); + wl->beacon1_uploaded = 1; +} + +static void handle_irq_beacon(struct b43legacy_wldev *dev) +{ + struct b43legacy_wl *wl = dev->wl; + u32 cmd, beacon0_valid, beacon1_valid; + + if (!b43legacy_is_mode(wl, NL80211_IFTYPE_AP)) + return; + + /* This is the bottom half of the asynchronous beacon update. */ + + /* Ignore interrupt in the future. */ + dev->irq_mask &= ~B43legacy_IRQ_BEACON; + + cmd = b43legacy_read32(dev, B43legacy_MMIO_MACCMD); + beacon0_valid = (cmd & B43legacy_MACCMD_BEACON0_VALID); + beacon1_valid = (cmd & B43legacy_MACCMD_BEACON1_VALID); + + /* Schedule interrupt manually, if busy. */ + if (beacon0_valid && beacon1_valid) { + b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_REASON, B43legacy_IRQ_BEACON); + dev->irq_mask |= B43legacy_IRQ_BEACON; + return; + } + + if (unlikely(wl->beacon_templates_virgin)) { + /* We never uploaded a beacon before. + * Upload both templates now, but only mark one valid. */ + wl->beacon_templates_virgin = 0; + b43legacy_upload_beacon0(dev); + b43legacy_upload_beacon1(dev); + cmd = b43legacy_read32(dev, B43legacy_MMIO_MACCMD); + cmd |= B43legacy_MACCMD_BEACON0_VALID; + b43legacy_write32(dev, B43legacy_MMIO_MACCMD, cmd); + } else { + if (!beacon0_valid) { + b43legacy_upload_beacon0(dev); + cmd = b43legacy_read32(dev, B43legacy_MMIO_MACCMD); + cmd |= B43legacy_MACCMD_BEACON0_VALID; + b43legacy_write32(dev, B43legacy_MMIO_MACCMD, cmd); + } else if (!beacon1_valid) { + b43legacy_upload_beacon1(dev); + cmd = b43legacy_read32(dev, B43legacy_MMIO_MACCMD); + cmd |= B43legacy_MACCMD_BEACON1_VALID; + b43legacy_write32(dev, B43legacy_MMIO_MACCMD, cmd); + } + } +} + +static void b43legacy_beacon_update_trigger_work(struct work_struct *work) +{ + struct b43legacy_wl *wl = container_of(work, struct b43legacy_wl, + beacon_update_trigger); + struct b43legacy_wldev *dev; + + mutex_lock(&wl->mutex); + dev = wl->current_dev; + if (likely(dev && (b43legacy_status(dev) >= B43legacy_STAT_INITIALIZED))) { + spin_lock_irq(&wl->irq_lock); + /* Update beacon right away or defer to IRQ. */ + handle_irq_beacon(dev); + /* The handler might have updated the IRQ mask. */ + b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, + dev->irq_mask); + mmiowb(); + spin_unlock_irq(&wl->irq_lock); + } + mutex_unlock(&wl->mutex); +} + +/* Asynchronously update the packet templates in template RAM. + * Locking: Requires wl->irq_lock to be locked. */ +static void b43legacy_update_templates(struct b43legacy_wl *wl) +{ + struct sk_buff *beacon; + /* This is the top half of the ansynchronous beacon update. The bottom + * half is the beacon IRQ. Beacon update must be asynchronous to avoid + * sending an invalid beacon. This can happen for example, if the + * firmware transmits a beacon while we are updating it. */ + + /* We could modify the existing beacon and set the aid bit in the TIM + * field, but that would probably require resizing and moving of data + * within the beacon template. Simply request a new beacon and let + * mac80211 do the hard work. */ + beacon = ieee80211_beacon_get(wl->hw, wl->vif); + if (unlikely(!beacon)) + return; + + if (wl->current_beacon) + dev_kfree_skb_any(wl->current_beacon); + wl->current_beacon = beacon; + wl->beacon0_uploaded = 0; + wl->beacon1_uploaded = 0; + ieee80211_queue_work(wl->hw, &wl->beacon_update_trigger); +} + +static void b43legacy_set_beacon_int(struct b43legacy_wldev *dev, + u16 beacon_int) +{ + b43legacy_time_lock(dev); + if (dev->dev->id.revision >= 3) { + b43legacy_write32(dev, B43legacy_MMIO_TSF_CFP_REP, + (beacon_int << 16)); + b43legacy_write32(dev, B43legacy_MMIO_TSF_CFP_START, + (beacon_int << 10)); + } else { + b43legacy_write16(dev, 0x606, (beacon_int >> 6)); + b43legacy_write16(dev, 0x610, beacon_int); + } + b43legacy_time_unlock(dev); + b43legacydbg(dev->wl, "Set beacon interval to %u\n", beacon_int); +} + +static void handle_irq_ucode_debug(struct b43legacy_wldev *dev) +{ +} + +/* Interrupt handler bottom-half */ +static void b43legacy_interrupt_tasklet(struct b43legacy_wldev *dev) +{ + u32 reason; + u32 dma_reason[ARRAY_SIZE(dev->dma_reason)]; + u32 merged_dma_reason = 0; + int i; + unsigned long flags; + + spin_lock_irqsave(&dev->wl->irq_lock, flags); + + B43legacy_WARN_ON(b43legacy_status(dev) < + B43legacy_STAT_INITIALIZED); + + reason = dev->irq_reason; + for (i = 0; i < ARRAY_SIZE(dma_reason); i++) { + dma_reason[i] = dev->dma_reason[i]; + merged_dma_reason |= dma_reason[i]; + } + + if (unlikely(reason & B43legacy_IRQ_MAC_TXERR)) + b43legacyerr(dev->wl, "MAC transmission error\n"); + + if (unlikely(reason & B43legacy_IRQ_PHY_TXERR)) { + b43legacyerr(dev->wl, "PHY transmission error\n"); + rmb(); + if (unlikely(atomic_dec_and_test(&dev->phy.txerr_cnt))) { + b43legacyerr(dev->wl, "Too many PHY TX errors, " + "restarting the controller\n"); + b43legacy_controller_restart(dev, "PHY TX errors"); + } + } + + if (unlikely(merged_dma_reason & (B43legacy_DMAIRQ_FATALMASK | + B43legacy_DMAIRQ_NONFATALMASK))) { + if (merged_dma_reason & B43legacy_DMAIRQ_FATALMASK) { + b43legacyerr(dev->wl, "Fatal DMA error: " + "0x%08X, 0x%08X, 0x%08X, " + "0x%08X, 0x%08X, 0x%08X\n", + dma_reason[0], dma_reason[1], + dma_reason[2], dma_reason[3], + dma_reason[4], dma_reason[5]); + b43legacy_controller_restart(dev, "DMA error"); + mmiowb(); + spin_unlock_irqrestore(&dev->wl->irq_lock, flags); + return; + } + if (merged_dma_reason & B43legacy_DMAIRQ_NONFATALMASK) + b43legacyerr(dev->wl, "DMA error: " + "0x%08X, 0x%08X, 0x%08X, " + "0x%08X, 0x%08X, 0x%08X\n", + dma_reason[0], dma_reason[1], + dma_reason[2], dma_reason[3], + dma_reason[4], dma_reason[5]); + } + + if (unlikely(reason & B43legacy_IRQ_UCODE_DEBUG)) + handle_irq_ucode_debug(dev); + if (reason & B43legacy_IRQ_TBTT_INDI) + handle_irq_tbtt_indication(dev); + if (reason & B43legacy_IRQ_ATIM_END) + handle_irq_atim_end(dev); + if (reason & B43legacy_IRQ_BEACON) + handle_irq_beacon(dev); + if (reason & B43legacy_IRQ_PMQ) + handle_irq_pmq(dev); + if (reason & B43legacy_IRQ_TXFIFO_FLUSH_OK) + ;/*TODO*/ + if (reason & B43legacy_IRQ_NOISESAMPLE_OK) + handle_irq_noise(dev); + + /* Check the DMA reason registers for received data. */ + if (dma_reason[0] & B43legacy_DMAIRQ_RX_DONE) { + if (b43legacy_using_pio(dev)) + b43legacy_pio_rx(dev->pio.queue0); + else + b43legacy_dma_rx(dev->dma.rx_ring0); + } + B43legacy_WARN_ON(dma_reason[1] & B43legacy_DMAIRQ_RX_DONE); + B43legacy_WARN_ON(dma_reason[2] & B43legacy_DMAIRQ_RX_DONE); + if (dma_reason[3] & B43legacy_DMAIRQ_RX_DONE) { + if (b43legacy_using_pio(dev)) + b43legacy_pio_rx(dev->pio.queue3); + else + b43legacy_dma_rx(dev->dma.rx_ring3); + } + B43legacy_WARN_ON(dma_reason[4] & B43legacy_DMAIRQ_RX_DONE); + B43legacy_WARN_ON(dma_reason[5] & B43legacy_DMAIRQ_RX_DONE); + + if (reason & B43legacy_IRQ_TX_OK) + handle_irq_transmit_status(dev); + + b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, dev->irq_mask); + mmiowb(); + spin_unlock_irqrestore(&dev->wl->irq_lock, flags); +} + +static void pio_irq_workaround(struct b43legacy_wldev *dev, + u16 base, int queueidx) +{ + u16 rxctl; + + rxctl = b43legacy_read16(dev, base + B43legacy_PIO_RXCTL); + if (rxctl & B43legacy_PIO_RXCTL_DATAAVAILABLE) + dev->dma_reason[queueidx] |= B43legacy_DMAIRQ_RX_DONE; + else + dev->dma_reason[queueidx] &= ~B43legacy_DMAIRQ_RX_DONE; +} + +static void b43legacy_interrupt_ack(struct b43legacy_wldev *dev, u32 reason) +{ + if (b43legacy_using_pio(dev) && + (dev->dev->id.revision < 3) && + (!(reason & B43legacy_IRQ_PIO_WORKAROUND))) { + /* Apply a PIO specific workaround to the dma_reasons */ + pio_irq_workaround(dev, B43legacy_MMIO_PIO1_BASE, 0); + pio_irq_workaround(dev, B43legacy_MMIO_PIO2_BASE, 1); + pio_irq_workaround(dev, B43legacy_MMIO_PIO3_BASE, 2); + pio_irq_workaround(dev, B43legacy_MMIO_PIO4_BASE, 3); + } + + b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_REASON, reason); + + b43legacy_write32(dev, B43legacy_MMIO_DMA0_REASON, + dev->dma_reason[0]); + b43legacy_write32(dev, B43legacy_MMIO_DMA1_REASON, + dev->dma_reason[1]); + b43legacy_write32(dev, B43legacy_MMIO_DMA2_REASON, + dev->dma_reason[2]); + b43legacy_write32(dev, B43legacy_MMIO_DMA3_REASON, + dev->dma_reason[3]); + b43legacy_write32(dev, B43legacy_MMIO_DMA4_REASON, + dev->dma_reason[4]); + b43legacy_write32(dev, B43legacy_MMIO_DMA5_REASON, + dev->dma_reason[5]); +} + +/* Interrupt handler top-half */ +static irqreturn_t b43legacy_interrupt_handler(int irq, void *dev_id) +{ + irqreturn_t ret = IRQ_NONE; + struct b43legacy_wldev *dev = dev_id; + u32 reason; + + B43legacy_WARN_ON(!dev); + + spin_lock(&dev->wl->irq_lock); + + if (unlikely(b43legacy_status(dev) < B43legacy_STAT_STARTED)) + /* This can only happen on shared IRQ lines. */ + goto out; + reason = b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_REASON); + if (reason == 0xffffffff) /* shared IRQ */ + goto out; + ret = IRQ_HANDLED; + reason &= dev->irq_mask; + if (!reason) + goto out; + + dev->dma_reason[0] = b43legacy_read32(dev, + B43legacy_MMIO_DMA0_REASON) + & 0x0001DC00; + dev->dma_reason[1] = b43legacy_read32(dev, + B43legacy_MMIO_DMA1_REASON) + & 0x0000DC00; + dev->dma_reason[2] = b43legacy_read32(dev, + B43legacy_MMIO_DMA2_REASON) + & 0x0000DC00; + dev->dma_reason[3] = b43legacy_read32(dev, + B43legacy_MMIO_DMA3_REASON) + & 0x0001DC00; + dev->dma_reason[4] = b43legacy_read32(dev, + B43legacy_MMIO_DMA4_REASON) + & 0x0000DC00; + dev->dma_reason[5] = b43legacy_read32(dev, + B43legacy_MMIO_DMA5_REASON) + & 0x0000DC00; + + b43legacy_interrupt_ack(dev, reason); + /* Disable all IRQs. They are enabled again in the bottom half. */ + b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, 0); + /* Save the reason code and call our bottom half. */ + dev->irq_reason = reason; + tasklet_schedule(&dev->isr_tasklet); +out: + mmiowb(); + spin_unlock(&dev->wl->irq_lock); + + return ret; +} + +static void b43legacy_release_firmware(struct b43legacy_wldev *dev) +{ + release_firmware(dev->fw.ucode); + dev->fw.ucode = NULL; + release_firmware(dev->fw.pcm); + dev->fw.pcm = NULL; + release_firmware(dev->fw.initvals); + dev->fw.initvals = NULL; + release_firmware(dev->fw.initvals_band); + dev->fw.initvals_band = NULL; +} + +static void b43legacy_print_fw_helptext(struct b43legacy_wl *wl) +{ + b43legacyerr(wl, "You must go to http://linuxwireless.org/en/users/" + "Drivers/b43#devicefirmware " + "and download the correct firmware (version 3).\n"); +} + +static int do_request_fw(struct b43legacy_wldev *dev, + const char *name, + const struct firmware **fw) +{ + char path[sizeof(modparam_fwpostfix) + 32]; + struct b43legacy_fw_header *hdr; + u32 size; + int err; + + if (!name) + return 0; + + snprintf(path, ARRAY_SIZE(path), + "b43legacy%s/%s.fw", + modparam_fwpostfix, name); + err = request_firmware(fw, path, dev->dev->dev); + if (err) { + b43legacyerr(dev->wl, "Firmware file \"%s\" not found " + "or load failed.\n", path); + return err; + } + if ((*fw)->size < sizeof(struct b43legacy_fw_header)) + goto err_format; + hdr = (struct b43legacy_fw_header *)((*fw)->data); + switch (hdr->type) { + case B43legacy_FW_TYPE_UCODE: + case B43legacy_FW_TYPE_PCM: + size = be32_to_cpu(hdr->size); + if (size != (*fw)->size - sizeof(struct b43legacy_fw_header)) + goto err_format; + /* fallthrough */ + case B43legacy_FW_TYPE_IV: + if (hdr->ver != 1) + goto err_format; + break; + default: + goto err_format; + } + + return err; + +err_format: + b43legacyerr(dev->wl, "Firmware file \"%s\" format error.\n", path); + return -EPROTO; +} + +static int b43legacy_request_firmware(struct b43legacy_wldev *dev) +{ + struct b43legacy_firmware *fw = &dev->fw; + const u8 rev = dev->dev->id.revision; + const char *filename; + u32 tmshigh; + int err; + + tmshigh = ssb_read32(dev->dev, SSB_TMSHIGH); + if (!fw->ucode) { + if (rev == 2) + filename = "ucode2"; + else if (rev == 4) + filename = "ucode4"; + else + filename = "ucode5"; + err = do_request_fw(dev, filename, &fw->ucode); + if (err) + goto err_load; + } + if (!fw->pcm) { + if (rev < 5) + filename = "pcm4"; + else + filename = "pcm5"; + err = do_request_fw(dev, filename, &fw->pcm); + if (err) + goto err_load; + } + if (!fw->initvals) { + switch (dev->phy.type) { + case B43legacy_PHYTYPE_B: + case B43legacy_PHYTYPE_G: + if ((rev >= 5) && (rev <= 10)) + filename = "b0g0initvals5"; + else if (rev == 2 || rev == 4) + filename = "b0g0initvals2"; + else + goto err_no_initvals; + break; + default: + goto err_no_initvals; + } + err = do_request_fw(dev, filename, &fw->initvals); + if (err) + goto err_load; + } + if (!fw->initvals_band) { + switch (dev->phy.type) { + case B43legacy_PHYTYPE_B: + case B43legacy_PHYTYPE_G: + if ((rev >= 5) && (rev <= 10)) + filename = "b0g0bsinitvals5"; + else if (rev >= 11) + filename = NULL; + else if (rev == 2 || rev == 4) + filename = NULL; + else + goto err_no_initvals; + break; + default: + goto err_no_initvals; + } + err = do_request_fw(dev, filename, &fw->initvals_band); + if (err) + goto err_load; + } + + return 0; + +err_load: + b43legacy_print_fw_helptext(dev->wl); + goto error; + +err_no_initvals: + err = -ENODEV; + b43legacyerr(dev->wl, "No Initial Values firmware file for PHY %u, " + "core rev %u\n", dev->phy.type, rev); + goto error; + +error: + b43legacy_release_firmware(dev); + return err; +} + +static int b43legacy_upload_microcode(struct b43legacy_wldev *dev) +{ + const size_t hdr_len = sizeof(struct b43legacy_fw_header); + const __be32 *data; + unsigned int i; + unsigned int len; + u16 fwrev; + u16 fwpatch; + u16 fwdate; + u16 fwtime; + u32 tmp, macctl; + int err = 0; + + /* Jump the microcode PSM to offset 0 */ + macctl = b43legacy_read32(dev, B43legacy_MMIO_MACCTL); + B43legacy_WARN_ON(macctl & B43legacy_MACCTL_PSM_RUN); + macctl |= B43legacy_MACCTL_PSM_JMP0; + b43legacy_write32(dev, B43legacy_MMIO_MACCTL, macctl); + /* Zero out all microcode PSM registers and shared memory. */ + for (i = 0; i < 64; i++) + b43legacy_shm_write16(dev, B43legacy_SHM_WIRELESS, i, 0); + for (i = 0; i < 4096; i += 2) + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, i, 0); + + /* Upload Microcode. */ + data = (__be32 *) (dev->fw.ucode->data + hdr_len); + len = (dev->fw.ucode->size - hdr_len) / sizeof(__be32); + b43legacy_shm_control_word(dev, + B43legacy_SHM_UCODE | + B43legacy_SHM_AUTOINC_W, + 0x0000); + for (i = 0; i < len; i++) { + b43legacy_write32(dev, B43legacy_MMIO_SHM_DATA, + be32_to_cpu(data[i])); + udelay(10); + } + + if (dev->fw.pcm) { + /* Upload PCM data. */ + data = (__be32 *) (dev->fw.pcm->data + hdr_len); + len = (dev->fw.pcm->size - hdr_len) / sizeof(__be32); + b43legacy_shm_control_word(dev, B43legacy_SHM_HW, 0x01EA); + b43legacy_write32(dev, B43legacy_MMIO_SHM_DATA, 0x00004000); + /* No need for autoinc bit in SHM_HW */ + b43legacy_shm_control_word(dev, B43legacy_SHM_HW, 0x01EB); + for (i = 0; i < len; i++) { + b43legacy_write32(dev, B43legacy_MMIO_SHM_DATA, + be32_to_cpu(data[i])); + udelay(10); + } + } + + b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_REASON, + B43legacy_IRQ_ALL); + + /* Start the microcode PSM */ + macctl = b43legacy_read32(dev, B43legacy_MMIO_MACCTL); + macctl &= ~B43legacy_MACCTL_PSM_JMP0; + macctl |= B43legacy_MACCTL_PSM_RUN; + b43legacy_write32(dev, B43legacy_MMIO_MACCTL, macctl); + + /* Wait for the microcode to load and respond */ + i = 0; + while (1) { + tmp = b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_REASON); + if (tmp == B43legacy_IRQ_MAC_SUSPENDED) + break; + i++; + if (i >= B43legacy_IRQWAIT_MAX_RETRIES) { + b43legacyerr(dev->wl, "Microcode not responding\n"); + b43legacy_print_fw_helptext(dev->wl); + err = -ENODEV; + goto error; + } + msleep_interruptible(50); + if (signal_pending(current)) { + err = -EINTR; + goto error; + } + } + /* dummy read follows */ + b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_REASON); + + /* Get and check the revisions. */ + fwrev = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_UCODEREV); + fwpatch = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_UCODEPATCH); + fwdate = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_UCODEDATE); + fwtime = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_UCODETIME); + + if (fwrev > 0x128) { + b43legacyerr(dev->wl, "YOU ARE TRYING TO LOAD V4 FIRMWARE." + " Only firmware from binary drivers version 3.x" + " is supported. You must change your firmware" + " files.\n"); + b43legacy_print_fw_helptext(dev->wl); + err = -EOPNOTSUPP; + goto error; + } + b43legacyinfo(dev->wl, "Loading firmware version 0x%X, patch level %u " + "(20%.2i-%.2i-%.2i %.2i:%.2i:%.2i)\n", fwrev, fwpatch, + (fwdate >> 12) & 0xF, (fwdate >> 8) & 0xF, fwdate & 0xFF, + (fwtime >> 11) & 0x1F, (fwtime >> 5) & 0x3F, + fwtime & 0x1F); + + dev->fw.rev = fwrev; + dev->fw.patch = fwpatch; + + return 0; + +error: + macctl = b43legacy_read32(dev, B43legacy_MMIO_MACCTL); + macctl &= ~B43legacy_MACCTL_PSM_RUN; + macctl |= B43legacy_MACCTL_PSM_JMP0; + b43legacy_write32(dev, B43legacy_MMIO_MACCTL, macctl); + + return err; +} + +static int b43legacy_write_initvals(struct b43legacy_wldev *dev, + const struct b43legacy_iv *ivals, + size_t count, + size_t array_size) +{ + const struct b43legacy_iv *iv; + u16 offset; + size_t i; + bool bit32; + + BUILD_BUG_ON(sizeof(struct b43legacy_iv) != 6); + iv = ivals; + for (i = 0; i < count; i++) { + if (array_size < sizeof(iv->offset_size)) + goto err_format; + array_size -= sizeof(iv->offset_size); + offset = be16_to_cpu(iv->offset_size); + bit32 = !!(offset & B43legacy_IV_32BIT); + offset &= B43legacy_IV_OFFSET_MASK; + if (offset >= 0x1000) + goto err_format; + if (bit32) { + u32 value; + + if (array_size < sizeof(iv->data.d32)) + goto err_format; + array_size -= sizeof(iv->data.d32); + + value = get_unaligned_be32(&iv->data.d32); + b43legacy_write32(dev, offset, value); + + iv = (const struct b43legacy_iv *)((const uint8_t *)iv + + sizeof(__be16) + + sizeof(__be32)); + } else { + u16 value; + + if (array_size < sizeof(iv->data.d16)) + goto err_format; + array_size -= sizeof(iv->data.d16); + + value = be16_to_cpu(iv->data.d16); + b43legacy_write16(dev, offset, value); + + iv = (const struct b43legacy_iv *)((const uint8_t *)iv + + sizeof(__be16) + + sizeof(__be16)); + } + } + if (array_size) + goto err_format; + + return 0; + +err_format: + b43legacyerr(dev->wl, "Initial Values Firmware file-format error.\n"); + b43legacy_print_fw_helptext(dev->wl); + + return -EPROTO; +} + +static int b43legacy_upload_initvals(struct b43legacy_wldev *dev) +{ + const size_t hdr_len = sizeof(struct b43legacy_fw_header); + const struct b43legacy_fw_header *hdr; + struct b43legacy_firmware *fw = &dev->fw; + const struct b43legacy_iv *ivals; + size_t count; + int err; + + hdr = (const struct b43legacy_fw_header *)(fw->initvals->data); + ivals = (const struct b43legacy_iv *)(fw->initvals->data + hdr_len); + count = be32_to_cpu(hdr->size); + err = b43legacy_write_initvals(dev, ivals, count, + fw->initvals->size - hdr_len); + if (err) + goto out; + if (fw->initvals_band) { + hdr = (const struct b43legacy_fw_header *) + (fw->initvals_band->data); + ivals = (const struct b43legacy_iv *)(fw->initvals_band->data + + hdr_len); + count = be32_to_cpu(hdr->size); + err = b43legacy_write_initvals(dev, ivals, count, + fw->initvals_band->size - hdr_len); + if (err) + goto out; + } +out: + + return err; +} + +/* Initialize the GPIOs + * http://bcm-specs.sipsolutions.net/GPIO + */ +static int b43legacy_gpio_init(struct b43legacy_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + struct ssb_device *gpiodev, *pcidev = NULL; + u32 mask; + u32 set; + + b43legacy_write32(dev, B43legacy_MMIO_MACCTL, + b43legacy_read32(dev, + B43legacy_MMIO_MACCTL) + & 0xFFFF3FFF); + + b43legacy_write16(dev, B43legacy_MMIO_GPIO_MASK, + b43legacy_read16(dev, + B43legacy_MMIO_GPIO_MASK) + | 0x000F); + + mask = 0x0000001F; + set = 0x0000000F; + if (dev->dev->bus->chip_id == 0x4301) { + mask |= 0x0060; + set |= 0x0060; + } + if (dev->dev->bus->sprom.boardflags_lo & B43legacy_BFL_PACTRL) { + b43legacy_write16(dev, B43legacy_MMIO_GPIO_MASK, + b43legacy_read16(dev, + B43legacy_MMIO_GPIO_MASK) + | 0x0200); + mask |= 0x0200; + set |= 0x0200; + } + if (dev->dev->id.revision >= 2) + mask |= 0x0010; /* FIXME: This is redundant. */ + +#ifdef CONFIG_SSB_DRIVER_PCICORE + pcidev = bus->pcicore.dev; +#endif + gpiodev = bus->chipco.dev ? : pcidev; + if (!gpiodev) + return 0; + ssb_write32(gpiodev, B43legacy_GPIO_CONTROL, + (ssb_read32(gpiodev, B43legacy_GPIO_CONTROL) + & mask) | set); + + return 0; +} + +/* Turn off all GPIO stuff. Call this on module unload, for example. */ +static void b43legacy_gpio_cleanup(struct b43legacy_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + struct ssb_device *gpiodev, *pcidev = NULL; + +#ifdef CONFIG_SSB_DRIVER_PCICORE + pcidev = bus->pcicore.dev; +#endif + gpiodev = bus->chipco.dev ? : pcidev; + if (!gpiodev) + return; + ssb_write32(gpiodev, B43legacy_GPIO_CONTROL, 0); +} + +/* http://bcm-specs.sipsolutions.net/EnableMac */ +void b43legacy_mac_enable(struct b43legacy_wldev *dev) +{ + dev->mac_suspended--; + B43legacy_WARN_ON(dev->mac_suspended < 0); + B43legacy_WARN_ON(irqs_disabled()); + if (dev->mac_suspended == 0) { + b43legacy_write32(dev, B43legacy_MMIO_MACCTL, + b43legacy_read32(dev, + B43legacy_MMIO_MACCTL) + | B43legacy_MACCTL_ENABLED); + b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_REASON, + B43legacy_IRQ_MAC_SUSPENDED); + /* the next two are dummy reads */ + b43legacy_read32(dev, B43legacy_MMIO_MACCTL); + b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_REASON); + b43legacy_power_saving_ctl_bits(dev, -1, -1); + + /* Re-enable IRQs. */ + spin_lock_irq(&dev->wl->irq_lock); + b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, + dev->irq_mask); + spin_unlock_irq(&dev->wl->irq_lock); + } +} + +/* http://bcm-specs.sipsolutions.net/SuspendMAC */ +void b43legacy_mac_suspend(struct b43legacy_wldev *dev) +{ + int i; + u32 tmp; + + might_sleep(); + B43legacy_WARN_ON(irqs_disabled()); + B43legacy_WARN_ON(dev->mac_suspended < 0); + + if (dev->mac_suspended == 0) { + /* Mask IRQs before suspending MAC. Otherwise + * the MAC stays busy and won't suspend. */ + spin_lock_irq(&dev->wl->irq_lock); + b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, 0); + spin_unlock_irq(&dev->wl->irq_lock); + b43legacy_synchronize_irq(dev); + + b43legacy_power_saving_ctl_bits(dev, -1, 1); + b43legacy_write32(dev, B43legacy_MMIO_MACCTL, + b43legacy_read32(dev, + B43legacy_MMIO_MACCTL) + & ~B43legacy_MACCTL_ENABLED); + b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_REASON); + for (i = 40; i; i--) { + tmp = b43legacy_read32(dev, + B43legacy_MMIO_GEN_IRQ_REASON); + if (tmp & B43legacy_IRQ_MAC_SUSPENDED) + goto out; + msleep(1); + } + b43legacyerr(dev->wl, "MAC suspend failed\n"); + } +out: + dev->mac_suspended++; +} + +static void b43legacy_adjust_opmode(struct b43legacy_wldev *dev) +{ + struct b43legacy_wl *wl = dev->wl; + u32 ctl; + u16 cfp_pretbtt; + + ctl = b43legacy_read32(dev, B43legacy_MMIO_MACCTL); + /* Reset status to STA infrastructure mode. */ + ctl &= ~B43legacy_MACCTL_AP; + ctl &= ~B43legacy_MACCTL_KEEP_CTL; + ctl &= ~B43legacy_MACCTL_KEEP_BADPLCP; + ctl &= ~B43legacy_MACCTL_KEEP_BAD; + ctl &= ~B43legacy_MACCTL_PROMISC; + ctl &= ~B43legacy_MACCTL_BEACPROMISC; + ctl |= B43legacy_MACCTL_INFRA; + + if (b43legacy_is_mode(wl, NL80211_IFTYPE_AP)) + ctl |= B43legacy_MACCTL_AP; + else if (b43legacy_is_mode(wl, NL80211_IFTYPE_ADHOC)) + ctl &= ~B43legacy_MACCTL_INFRA; + + if (wl->filter_flags & FIF_CONTROL) + ctl |= B43legacy_MACCTL_KEEP_CTL; + if (wl->filter_flags & FIF_FCSFAIL) + ctl |= B43legacy_MACCTL_KEEP_BAD; + if (wl->filter_flags & FIF_PLCPFAIL) + ctl |= B43legacy_MACCTL_KEEP_BADPLCP; + if (wl->filter_flags & FIF_PROMISC_IN_BSS) + ctl |= B43legacy_MACCTL_PROMISC; + if (wl->filter_flags & FIF_BCN_PRBRESP_PROMISC) + ctl |= B43legacy_MACCTL_BEACPROMISC; + + /* Workaround: On old hardware the HW-MAC-address-filter + * doesn't work properly, so always run promisc in filter + * it in software. */ + if (dev->dev->id.revision <= 4) + ctl |= B43legacy_MACCTL_PROMISC; + + b43legacy_write32(dev, B43legacy_MMIO_MACCTL, ctl); + + cfp_pretbtt = 2; + if ((ctl & B43legacy_MACCTL_INFRA) && + !(ctl & B43legacy_MACCTL_AP)) { + if (dev->dev->bus->chip_id == 0x4306 && + dev->dev->bus->chip_rev == 3) + cfp_pretbtt = 100; + else + cfp_pretbtt = 50; + } + b43legacy_write16(dev, 0x612, cfp_pretbtt); +} + +static void b43legacy_rate_memory_write(struct b43legacy_wldev *dev, + u16 rate, + int is_ofdm) +{ + u16 offset; + + if (is_ofdm) { + offset = 0x480; + offset += (b43legacy_plcp_get_ratecode_ofdm(rate) & 0x000F) * 2; + } else { + offset = 0x4C0; + offset += (b43legacy_plcp_get_ratecode_cck(rate) & 0x000F) * 2; + } + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, offset + 0x20, + b43legacy_shm_read16(dev, + B43legacy_SHM_SHARED, offset)); +} + +static void b43legacy_rate_memory_init(struct b43legacy_wldev *dev) +{ + switch (dev->phy.type) { + case B43legacy_PHYTYPE_G: + b43legacy_rate_memory_write(dev, B43legacy_OFDM_RATE_6MB, 1); + b43legacy_rate_memory_write(dev, B43legacy_OFDM_RATE_12MB, 1); + b43legacy_rate_memory_write(dev, B43legacy_OFDM_RATE_18MB, 1); + b43legacy_rate_memory_write(dev, B43legacy_OFDM_RATE_24MB, 1); + b43legacy_rate_memory_write(dev, B43legacy_OFDM_RATE_36MB, 1); + b43legacy_rate_memory_write(dev, B43legacy_OFDM_RATE_48MB, 1); + b43legacy_rate_memory_write(dev, B43legacy_OFDM_RATE_54MB, 1); + /* fallthrough */ + case B43legacy_PHYTYPE_B: + b43legacy_rate_memory_write(dev, B43legacy_CCK_RATE_1MB, 0); + b43legacy_rate_memory_write(dev, B43legacy_CCK_RATE_2MB, 0); + b43legacy_rate_memory_write(dev, B43legacy_CCK_RATE_5MB, 0); + b43legacy_rate_memory_write(dev, B43legacy_CCK_RATE_11MB, 0); + break; + default: + B43legacy_BUG_ON(1); + } +} + +/* Set the TX-Antenna for management frames sent by firmware. */ +static void b43legacy_mgmtframe_txantenna(struct b43legacy_wldev *dev, + int antenna) +{ + u16 ant = 0; + u16 tmp; + + switch (antenna) { + case B43legacy_ANTENNA0: + ant |= B43legacy_TX4_PHY_ANT0; + break; + case B43legacy_ANTENNA1: + ant |= B43legacy_TX4_PHY_ANT1; + break; + case B43legacy_ANTENNA_AUTO: + ant |= B43legacy_TX4_PHY_ANTLAST; + break; + default: + B43legacy_BUG_ON(1); + } + + /* FIXME We also need to set the other flags of the PHY control + * field somewhere. */ + + /* For Beacons */ + tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_BEACPHYCTL); + tmp = (tmp & ~B43legacy_TX4_PHY_ANT) | ant; + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_BEACPHYCTL, tmp); + /* For ACK/CTS */ + tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_ACKCTSPHYCTL); + tmp = (tmp & ~B43legacy_TX4_PHY_ANT) | ant; + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_ACKCTSPHYCTL, tmp); + /* For Probe Resposes */ + tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_PRPHYCTL); + tmp = (tmp & ~B43legacy_TX4_PHY_ANT) | ant; + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_PRPHYCTL, tmp); +} + +/* This is the opposite of b43legacy_chip_init() */ +static void b43legacy_chip_exit(struct b43legacy_wldev *dev) +{ + b43legacy_radio_turn_off(dev, 1); + b43legacy_gpio_cleanup(dev); + /* firmware is released later */ +} + +/* Initialize the chip + * http://bcm-specs.sipsolutions.net/ChipInit + */ +static int b43legacy_chip_init(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + int err; + int tmp; + u32 value32, macctl; + u16 value16; + + /* Initialize the MAC control */ + macctl = B43legacy_MACCTL_IHR_ENABLED | B43legacy_MACCTL_SHM_ENABLED; + if (dev->phy.gmode) + macctl |= B43legacy_MACCTL_GMODE; + macctl |= B43legacy_MACCTL_INFRA; + b43legacy_write32(dev, B43legacy_MMIO_MACCTL, macctl); + + err = b43legacy_request_firmware(dev); + if (err) + goto out; + err = b43legacy_upload_microcode(dev); + if (err) + goto out; /* firmware is released later */ + + err = b43legacy_gpio_init(dev); + if (err) + goto out; /* firmware is released later */ + + err = b43legacy_upload_initvals(dev); + if (err) + goto err_gpio_clean; + b43legacy_radio_turn_on(dev); + + b43legacy_write16(dev, 0x03E6, 0x0000); + err = b43legacy_phy_init(dev); + if (err) + goto err_radio_off; + + /* Select initial Interference Mitigation. */ + tmp = phy->interfmode; + phy->interfmode = B43legacy_INTERFMODE_NONE; + b43legacy_radio_set_interference_mitigation(dev, tmp); + + b43legacy_phy_set_antenna_diversity(dev); + b43legacy_mgmtframe_txantenna(dev, B43legacy_ANTENNA_DEFAULT); + + if (phy->type == B43legacy_PHYTYPE_B) { + value16 = b43legacy_read16(dev, 0x005E); + value16 |= 0x0004; + b43legacy_write16(dev, 0x005E, value16); + } + b43legacy_write32(dev, 0x0100, 0x01000000); + if (dev->dev->id.revision < 5) + b43legacy_write32(dev, 0x010C, 0x01000000); + + value32 = b43legacy_read32(dev, B43legacy_MMIO_MACCTL); + value32 &= ~B43legacy_MACCTL_INFRA; + b43legacy_write32(dev, B43legacy_MMIO_MACCTL, value32); + value32 = b43legacy_read32(dev, B43legacy_MMIO_MACCTL); + value32 |= B43legacy_MACCTL_INFRA; + b43legacy_write32(dev, B43legacy_MMIO_MACCTL, value32); + + if (b43legacy_using_pio(dev)) { + b43legacy_write32(dev, 0x0210, 0x00000100); + b43legacy_write32(dev, 0x0230, 0x00000100); + b43legacy_write32(dev, 0x0250, 0x00000100); + b43legacy_write32(dev, 0x0270, 0x00000100); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0034, + 0x0000); + } + + /* Probe Response Timeout value */ + /* FIXME: Default to 0, has to be set by ioctl probably... :-/ */ + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0074, 0x0000); + + /* Initially set the wireless operation mode. */ + b43legacy_adjust_opmode(dev); + + if (dev->dev->id.revision < 3) { + b43legacy_write16(dev, 0x060E, 0x0000); + b43legacy_write16(dev, 0x0610, 0x8000); + b43legacy_write16(dev, 0x0604, 0x0000); + b43legacy_write16(dev, 0x0606, 0x0200); + } else { + b43legacy_write32(dev, 0x0188, 0x80000000); + b43legacy_write32(dev, 0x018C, 0x02000000); + } + b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_REASON, 0x00004000); + b43legacy_write32(dev, B43legacy_MMIO_DMA0_IRQ_MASK, 0x0001DC00); + b43legacy_write32(dev, B43legacy_MMIO_DMA1_IRQ_MASK, 0x0000DC00); + b43legacy_write32(dev, B43legacy_MMIO_DMA2_IRQ_MASK, 0x0000DC00); + b43legacy_write32(dev, B43legacy_MMIO_DMA3_IRQ_MASK, 0x0001DC00); + b43legacy_write32(dev, B43legacy_MMIO_DMA4_IRQ_MASK, 0x0000DC00); + b43legacy_write32(dev, B43legacy_MMIO_DMA5_IRQ_MASK, 0x0000DC00); + + value32 = ssb_read32(dev->dev, SSB_TMSLOW); + value32 |= 0x00100000; + ssb_write32(dev->dev, SSB_TMSLOW, value32); + + b43legacy_write16(dev, B43legacy_MMIO_POWERUP_DELAY, + dev->dev->bus->chipco.fast_pwrup_delay); + + /* PHY TX errors counter. */ + atomic_set(&phy->txerr_cnt, B43legacy_PHY_TX_BADNESS_LIMIT); + + B43legacy_WARN_ON(err != 0); + b43legacydbg(dev->wl, "Chip initialized\n"); +out: + return err; + +err_radio_off: + b43legacy_radio_turn_off(dev, 1); +err_gpio_clean: + b43legacy_gpio_cleanup(dev); + goto out; +} + +static void b43legacy_periodic_every120sec(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + + if (phy->type != B43legacy_PHYTYPE_G || phy->rev < 2) + return; + + b43legacy_mac_suspend(dev); + b43legacy_phy_lo_g_measure(dev); + b43legacy_mac_enable(dev); +} + +static void b43legacy_periodic_every60sec(struct b43legacy_wldev *dev) +{ + b43legacy_phy_lo_mark_all_unused(dev); + if (dev->dev->bus->sprom.boardflags_lo & B43legacy_BFL_RSSI) { + b43legacy_mac_suspend(dev); + b43legacy_calc_nrssi_slope(dev); + b43legacy_mac_enable(dev); + } +} + +static void b43legacy_periodic_every30sec(struct b43legacy_wldev *dev) +{ + /* Update device statistics. */ + b43legacy_calculate_link_quality(dev); +} + +static void b43legacy_periodic_every15sec(struct b43legacy_wldev *dev) +{ + b43legacy_phy_xmitpower(dev); /* FIXME: unless scanning? */ + + atomic_set(&dev->phy.txerr_cnt, B43legacy_PHY_TX_BADNESS_LIMIT); + wmb(); +} + +static void do_periodic_work(struct b43legacy_wldev *dev) +{ + unsigned int state; + + state = dev->periodic_state; + if (state % 8 == 0) + b43legacy_periodic_every120sec(dev); + if (state % 4 == 0) + b43legacy_periodic_every60sec(dev); + if (state % 2 == 0) + b43legacy_periodic_every30sec(dev); + b43legacy_periodic_every15sec(dev); +} + +/* Periodic work locking policy: + * The whole periodic work handler is protected by + * wl->mutex. If another lock is needed somewhere in the + * pwork callchain, it's acquired in-place, where it's needed. + */ +static void b43legacy_periodic_work_handler(struct work_struct *work) +{ + struct b43legacy_wldev *dev = container_of(work, struct b43legacy_wldev, + periodic_work.work); + struct b43legacy_wl *wl = dev->wl; + unsigned long delay; + + mutex_lock(&wl->mutex); + + if (unlikely(b43legacy_status(dev) != B43legacy_STAT_STARTED)) + goto out; + if (b43legacy_debug(dev, B43legacy_DBG_PWORK_STOP)) + goto out_requeue; + + do_periodic_work(dev); + + dev->periodic_state++; +out_requeue: + if (b43legacy_debug(dev, B43legacy_DBG_PWORK_FAST)) + delay = msecs_to_jiffies(50); + else + delay = round_jiffies_relative(HZ * 15); + ieee80211_queue_delayed_work(wl->hw, &dev->periodic_work, delay); +out: + mutex_unlock(&wl->mutex); +} + +static void b43legacy_periodic_tasks_setup(struct b43legacy_wldev *dev) +{ + struct delayed_work *work = &dev->periodic_work; + + dev->periodic_state = 0; + INIT_DELAYED_WORK(work, b43legacy_periodic_work_handler); + ieee80211_queue_delayed_work(dev->wl->hw, work, 0); +} + +/* Validate access to the chip (SHM) */ +static int b43legacy_validate_chipaccess(struct b43legacy_wldev *dev) +{ + u32 value; + u32 shm_backup; + + shm_backup = b43legacy_shm_read32(dev, B43legacy_SHM_SHARED, 0); + b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, 0, 0xAA5555AA); + if (b43legacy_shm_read32(dev, B43legacy_SHM_SHARED, 0) != + 0xAA5555AA) + goto error; + b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, 0, 0x55AAAA55); + if (b43legacy_shm_read32(dev, B43legacy_SHM_SHARED, 0) != + 0x55AAAA55) + goto error; + b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, 0, shm_backup); + + value = b43legacy_read32(dev, B43legacy_MMIO_MACCTL); + if ((value | B43legacy_MACCTL_GMODE) != + (B43legacy_MACCTL_GMODE | B43legacy_MACCTL_IHR_ENABLED)) + goto error; + + value = b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_REASON); + if (value) + goto error; + + return 0; +error: + b43legacyerr(dev->wl, "Failed to validate the chipaccess\n"); + return -ENODEV; +} + +static void b43legacy_security_init(struct b43legacy_wldev *dev) +{ + dev->max_nr_keys = (dev->dev->id.revision >= 5) ? 58 : 20; + B43legacy_WARN_ON(dev->max_nr_keys > ARRAY_SIZE(dev->key)); + dev->ktp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + 0x0056); + /* KTP is a word address, but we address SHM bytewise. + * So multiply by two. + */ + dev->ktp *= 2; + if (dev->dev->id.revision >= 5) + /* Number of RCMTA address slots */ + b43legacy_write16(dev, B43legacy_MMIO_RCMTA_COUNT, + dev->max_nr_keys - 8); +} + +#ifdef CONFIG_B43LEGACY_HWRNG +static int b43legacy_rng_read(struct hwrng *rng, u32 *data) +{ + struct b43legacy_wl *wl = (struct b43legacy_wl *)rng->priv; + unsigned long flags; + + /* Don't take wl->mutex here, as it could deadlock with + * hwrng internal locking. It's not needed to take + * wl->mutex here, anyway. */ + + spin_lock_irqsave(&wl->irq_lock, flags); + *data = b43legacy_read16(wl->current_dev, B43legacy_MMIO_RNG); + spin_unlock_irqrestore(&wl->irq_lock, flags); + + return (sizeof(u16)); +} +#endif + +static void b43legacy_rng_exit(struct b43legacy_wl *wl) +{ +#ifdef CONFIG_B43LEGACY_HWRNG + if (wl->rng_initialized) + hwrng_unregister(&wl->rng); +#endif +} + +static int b43legacy_rng_init(struct b43legacy_wl *wl) +{ + int err = 0; + +#ifdef CONFIG_B43LEGACY_HWRNG + snprintf(wl->rng_name, ARRAY_SIZE(wl->rng_name), + "%s_%s", KBUILD_MODNAME, wiphy_name(wl->hw->wiphy)); + wl->rng.name = wl->rng_name; + wl->rng.data_read = b43legacy_rng_read; + wl->rng.priv = (unsigned long)wl; + wl->rng_initialized = 1; + err = hwrng_register(&wl->rng); + if (err) { + wl->rng_initialized = 0; + b43legacyerr(wl, "Failed to register the random " + "number generator (%d)\n", err); + } + +#endif + return err; +} + +static int b43legacy_op_tx(struct ieee80211_hw *hw, + struct sk_buff *skb) +{ + struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); + struct b43legacy_wldev *dev = wl->current_dev; + int err = -ENODEV; + unsigned long flags; + + if (unlikely(!dev)) + goto out; + if (unlikely(b43legacy_status(dev) < B43legacy_STAT_STARTED)) + goto out; + /* DMA-TX is done without a global lock. */ + if (b43legacy_using_pio(dev)) { + spin_lock_irqsave(&wl->irq_lock, flags); + err = b43legacy_pio_tx(dev, skb); + spin_unlock_irqrestore(&wl->irq_lock, flags); + } else + err = b43legacy_dma_tx(dev, skb); +out: + if (unlikely(err)) { + /* Drop the packet. */ + dev_kfree_skb_any(skb); + } + return NETDEV_TX_OK; +} + +static int b43legacy_op_conf_tx(struct ieee80211_hw *hw, u16 queue, + const struct ieee80211_tx_queue_params *params) +{ + return 0; +} + +static int b43legacy_op_get_stats(struct ieee80211_hw *hw, + struct ieee80211_low_level_stats *stats) +{ + struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); + unsigned long flags; + + spin_lock_irqsave(&wl->irq_lock, flags); + memcpy(stats, &wl->ieee_stats, sizeof(*stats)); + spin_unlock_irqrestore(&wl->irq_lock, flags); + + return 0; +} + +static const char *phymode_to_string(unsigned int phymode) +{ + switch (phymode) { + case B43legacy_PHYMODE_B: + return "B"; + case B43legacy_PHYMODE_G: + return "G"; + default: + B43legacy_BUG_ON(1); + } + return ""; +} + +static int find_wldev_for_phymode(struct b43legacy_wl *wl, + unsigned int phymode, + struct b43legacy_wldev **dev, + bool *gmode) +{ + struct b43legacy_wldev *d; + + list_for_each_entry(d, &wl->devlist, list) { + if (d->phy.possible_phymodes & phymode) { + /* Ok, this device supports the PHY-mode. + * Set the gmode bit. */ + *gmode = 1; + *dev = d; + + return 0; + } + } + + return -ESRCH; +} + +static void b43legacy_put_phy_into_reset(struct b43legacy_wldev *dev) +{ + struct ssb_device *sdev = dev->dev; + u32 tmslow; + + tmslow = ssb_read32(sdev, SSB_TMSLOW); + tmslow &= ~B43legacy_TMSLOW_GMODE; + tmslow |= B43legacy_TMSLOW_PHYRESET; + tmslow |= SSB_TMSLOW_FGC; + ssb_write32(sdev, SSB_TMSLOW, tmslow); + msleep(1); + + tmslow = ssb_read32(sdev, SSB_TMSLOW); + tmslow &= ~SSB_TMSLOW_FGC; + tmslow |= B43legacy_TMSLOW_PHYRESET; + ssb_write32(sdev, SSB_TMSLOW, tmslow); + msleep(1); +} + +/* Expects wl->mutex locked */ +static int b43legacy_switch_phymode(struct b43legacy_wl *wl, + unsigned int new_mode) +{ + struct b43legacy_wldev *uninitialized_var(up_dev); + struct b43legacy_wldev *down_dev; + int err; + bool gmode = 0; + int prev_status; + + err = find_wldev_for_phymode(wl, new_mode, &up_dev, &gmode); + if (err) { + b43legacyerr(wl, "Could not find a device for %s-PHY mode\n", + phymode_to_string(new_mode)); + return err; + } + if ((up_dev == wl->current_dev) && + (!!wl->current_dev->phy.gmode == !!gmode)) + /* This device is already running. */ + return 0; + b43legacydbg(wl, "Reconfiguring PHYmode to %s-PHY\n", + phymode_to_string(new_mode)); + down_dev = wl->current_dev; + + prev_status = b43legacy_status(down_dev); + /* Shutdown the currently running core. */ + if (prev_status >= B43legacy_STAT_STARTED) + b43legacy_wireless_core_stop(down_dev); + if (prev_status >= B43legacy_STAT_INITIALIZED) + b43legacy_wireless_core_exit(down_dev); + + if (down_dev != up_dev) + /* We switch to a different core, so we put PHY into + * RESET on the old core. */ + b43legacy_put_phy_into_reset(down_dev); + + /* Now start the new core. */ + up_dev->phy.gmode = gmode; + if (prev_status >= B43legacy_STAT_INITIALIZED) { + err = b43legacy_wireless_core_init(up_dev); + if (err) { + b43legacyerr(wl, "Fatal: Could not initialize device" + " for newly selected %s-PHY mode\n", + phymode_to_string(new_mode)); + goto init_failure; + } + } + if (prev_status >= B43legacy_STAT_STARTED) { + err = b43legacy_wireless_core_start(up_dev); + if (err) { + b43legacyerr(wl, "Fatal: Coult not start device for " + "newly selected %s-PHY mode\n", + phymode_to_string(new_mode)); + b43legacy_wireless_core_exit(up_dev); + goto init_failure; + } + } + B43legacy_WARN_ON(b43legacy_status(up_dev) != prev_status); + + b43legacy_shm_write32(up_dev, B43legacy_SHM_SHARED, 0x003E, 0); + + wl->current_dev = up_dev; + + return 0; +init_failure: + /* Whoops, failed to init the new core. No core is operating now. */ + wl->current_dev = NULL; + return err; +} + +/* Write the short and long frame retry limit values. */ +static void b43legacy_set_retry_limits(struct b43legacy_wldev *dev, + unsigned int short_retry, + unsigned int long_retry) +{ + /* The retry limit is a 4-bit counter. Enforce this to avoid overflowing + * the chip-internal counter. */ + short_retry = min(short_retry, (unsigned int)0xF); + long_retry = min(long_retry, (unsigned int)0xF); + + b43legacy_shm_write16(dev, B43legacy_SHM_WIRELESS, 0x0006, short_retry); + b43legacy_shm_write16(dev, B43legacy_SHM_WIRELESS, 0x0007, long_retry); +} + +static int b43legacy_op_dev_config(struct ieee80211_hw *hw, + u32 changed) +{ + struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); + struct b43legacy_wldev *dev; + struct b43legacy_phy *phy; + struct ieee80211_conf *conf = &hw->conf; + unsigned long flags; + unsigned int new_phymode = 0xFFFF; + int antenna_tx; + int antenna_rx; + int err = 0; + + antenna_tx = B43legacy_ANTENNA_DEFAULT; + antenna_rx = B43legacy_ANTENNA_DEFAULT; + + mutex_lock(&wl->mutex); + dev = wl->current_dev; + phy = &dev->phy; + + if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) + b43legacy_set_retry_limits(dev, + conf->short_frame_max_tx_count, + conf->long_frame_max_tx_count); + changed &= ~IEEE80211_CONF_CHANGE_RETRY_LIMITS; + if (!changed) + goto out_unlock_mutex; + + /* Switch the PHY mode (if necessary). */ + switch (conf->channel->band) { + case IEEE80211_BAND_2GHZ: + if (phy->type == B43legacy_PHYTYPE_B) + new_phymode = B43legacy_PHYMODE_B; + else + new_phymode = B43legacy_PHYMODE_G; + break; + default: + B43legacy_WARN_ON(1); + } + err = b43legacy_switch_phymode(wl, new_phymode); + if (err) + goto out_unlock_mutex; + + /* Disable IRQs while reconfiguring the device. + * This makes it possible to drop the spinlock throughout + * the reconfiguration process. */ + spin_lock_irqsave(&wl->irq_lock, flags); + if (b43legacy_status(dev) < B43legacy_STAT_STARTED) { + spin_unlock_irqrestore(&wl->irq_lock, flags); + goto out_unlock_mutex; + } + b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, 0); + spin_unlock_irqrestore(&wl->irq_lock, flags); + b43legacy_synchronize_irq(dev); + + /* Switch to the requested channel. + * The firmware takes care of races with the TX handler. */ + if (conf->channel->hw_value != phy->channel) + b43legacy_radio_selectchannel(dev, conf->channel->hw_value, 0); + + dev->wl->radiotap_enabled = !!(conf->flags & IEEE80211_CONF_MONITOR); + + /* Adjust the desired TX power level. */ + if (conf->power_level != 0) { + if (conf->power_level != phy->power_level) { + phy->power_level = conf->power_level; + b43legacy_phy_xmitpower(dev); + } + } + + /* Antennas for RX and management frame TX. */ + b43legacy_mgmtframe_txantenna(dev, antenna_tx); + + if (wl->radio_enabled != phy->radio_on) { + if (wl->radio_enabled) { + b43legacy_radio_turn_on(dev); + b43legacyinfo(dev->wl, "Radio turned on by software\n"); + if (!dev->radio_hw_enable) + b43legacyinfo(dev->wl, "The hardware RF-kill" + " button still turns the radio" + " physically off. Press the" + " button to turn it on.\n"); + } else { + b43legacy_radio_turn_off(dev, 0); + b43legacyinfo(dev->wl, "Radio turned off by" + " software\n"); + } + } + + spin_lock_irqsave(&wl->irq_lock, flags); + b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, dev->irq_mask); + mmiowb(); + spin_unlock_irqrestore(&wl->irq_lock, flags); +out_unlock_mutex: + mutex_unlock(&wl->mutex); + + return err; +} + +static void b43legacy_update_basic_rates(struct b43legacy_wldev *dev, u32 brates) +{ + struct ieee80211_supported_band *sband = + dev->wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ]; + struct ieee80211_rate *rate; + int i; + u16 basic, direct, offset, basic_offset, rateptr; + + for (i = 0; i < sband->n_bitrates; i++) { + rate = &sband->bitrates[i]; + + if (b43legacy_is_cck_rate(rate->hw_value)) { + direct = B43legacy_SHM_SH_CCKDIRECT; + basic = B43legacy_SHM_SH_CCKBASIC; + offset = b43legacy_plcp_get_ratecode_cck(rate->hw_value); + offset &= 0xF; + } else { + direct = B43legacy_SHM_SH_OFDMDIRECT; + basic = B43legacy_SHM_SH_OFDMBASIC; + offset = b43legacy_plcp_get_ratecode_ofdm(rate->hw_value); + offset &= 0xF; + } + + rate = ieee80211_get_response_rate(sband, brates, rate->bitrate); + + if (b43legacy_is_cck_rate(rate->hw_value)) { + basic_offset = b43legacy_plcp_get_ratecode_cck(rate->hw_value); + basic_offset &= 0xF; + } else { + basic_offset = b43legacy_plcp_get_ratecode_ofdm(rate->hw_value); + basic_offset &= 0xF; + } + + /* + * Get the pointer that we need to point to + * from the direct map + */ + rateptr = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + direct + 2 * basic_offset); + /* and write it to the basic map */ + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, + basic + 2 * offset, rateptr); + } +} + +static void b43legacy_op_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *conf, + u32 changed) +{ + struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); + struct b43legacy_wldev *dev; + struct b43legacy_phy *phy; + unsigned long flags; + + mutex_lock(&wl->mutex); + B43legacy_WARN_ON(wl->vif != vif); + + dev = wl->current_dev; + phy = &dev->phy; + + /* Disable IRQs while reconfiguring the device. + * This makes it possible to drop the spinlock throughout + * the reconfiguration process. */ + spin_lock_irqsave(&wl->irq_lock, flags); + if (b43legacy_status(dev) < B43legacy_STAT_STARTED) { + spin_unlock_irqrestore(&wl->irq_lock, flags); + goto out_unlock_mutex; + } + b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, 0); + + if (changed & BSS_CHANGED_BSSID) { + b43legacy_synchronize_irq(dev); + + if (conf->bssid) + memcpy(wl->bssid, conf->bssid, ETH_ALEN); + else + memset(wl->bssid, 0, ETH_ALEN); + } + + if (b43legacy_status(dev) >= B43legacy_STAT_INITIALIZED) { + if (changed & BSS_CHANGED_BEACON && + (b43legacy_is_mode(wl, NL80211_IFTYPE_AP) || + b43legacy_is_mode(wl, NL80211_IFTYPE_ADHOC))) + b43legacy_update_templates(wl); + + if (changed & BSS_CHANGED_BSSID) + b43legacy_write_mac_bssid_templates(dev); + } + spin_unlock_irqrestore(&wl->irq_lock, flags); + + b43legacy_mac_suspend(dev); + + if (changed & BSS_CHANGED_BEACON_INT && + (b43legacy_is_mode(wl, NL80211_IFTYPE_AP) || + b43legacy_is_mode(wl, NL80211_IFTYPE_ADHOC))) + b43legacy_set_beacon_int(dev, conf->beacon_int); + + if (changed & BSS_CHANGED_BASIC_RATES) + b43legacy_update_basic_rates(dev, conf->basic_rates); + + if (changed & BSS_CHANGED_ERP_SLOT) { + if (conf->use_short_slot) + b43legacy_short_slot_timing_enable(dev); + else + b43legacy_short_slot_timing_disable(dev); + } + + b43legacy_mac_enable(dev); + + spin_lock_irqsave(&wl->irq_lock, flags); + b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, dev->irq_mask); + /* XXX: why? */ + mmiowb(); + spin_unlock_irqrestore(&wl->irq_lock, flags); + out_unlock_mutex: + mutex_unlock(&wl->mutex); +} + +static void b43legacy_op_configure_filter(struct ieee80211_hw *hw, + unsigned int changed, + unsigned int *fflags,u64 multicast) +{ + struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); + struct b43legacy_wldev *dev = wl->current_dev; + unsigned long flags; + + if (!dev) { + *fflags = 0; + return; + } + + spin_lock_irqsave(&wl->irq_lock, flags); + *fflags &= FIF_PROMISC_IN_BSS | + FIF_ALLMULTI | + FIF_FCSFAIL | + FIF_PLCPFAIL | + FIF_CONTROL | + FIF_OTHER_BSS | + FIF_BCN_PRBRESP_PROMISC; + + changed &= FIF_PROMISC_IN_BSS | + FIF_ALLMULTI | + FIF_FCSFAIL | + FIF_PLCPFAIL | + FIF_CONTROL | + FIF_OTHER_BSS | + FIF_BCN_PRBRESP_PROMISC; + + wl->filter_flags = *fflags; + + if (changed && b43legacy_status(dev) >= B43legacy_STAT_INITIALIZED) + b43legacy_adjust_opmode(dev); + spin_unlock_irqrestore(&wl->irq_lock, flags); +} + +/* Locking: wl->mutex */ +static void b43legacy_wireless_core_stop(struct b43legacy_wldev *dev) +{ + struct b43legacy_wl *wl = dev->wl; + unsigned long flags; + + if (b43legacy_status(dev) < B43legacy_STAT_STARTED) + return; + + /* Disable and sync interrupts. We must do this before than + * setting the status to INITIALIZED, as the interrupt handler + * won't care about IRQs then. */ + spin_lock_irqsave(&wl->irq_lock, flags); + b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, 0); + b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_MASK); /* flush */ + spin_unlock_irqrestore(&wl->irq_lock, flags); + b43legacy_synchronize_irq(dev); + + b43legacy_set_status(dev, B43legacy_STAT_INITIALIZED); + + mutex_unlock(&wl->mutex); + /* Must unlock as it would otherwise deadlock. No races here. + * Cancel the possibly running self-rearming periodic work. */ + cancel_delayed_work_sync(&dev->periodic_work); + mutex_lock(&wl->mutex); + + ieee80211_stop_queues(wl->hw); /* FIXME this could cause a deadlock */ + + b43legacy_mac_suspend(dev); + free_irq(dev->dev->irq, dev); + b43legacydbg(wl, "Wireless interface stopped\n"); +} + +/* Locking: wl->mutex */ +static int b43legacy_wireless_core_start(struct b43legacy_wldev *dev) +{ + int err; + + B43legacy_WARN_ON(b43legacy_status(dev) != B43legacy_STAT_INITIALIZED); + + drain_txstatus_queue(dev); + err = request_irq(dev->dev->irq, b43legacy_interrupt_handler, + IRQF_SHARED, KBUILD_MODNAME, dev); + if (err) { + b43legacyerr(dev->wl, "Cannot request IRQ-%d\n", + dev->dev->irq); + goto out; + } + /* We are ready to run. */ + ieee80211_wake_queues(dev->wl->hw); + b43legacy_set_status(dev, B43legacy_STAT_STARTED); + + /* Start data flow (TX/RX) */ + b43legacy_mac_enable(dev); + b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, dev->irq_mask); + + /* Start maintenance work */ + b43legacy_periodic_tasks_setup(dev); + + b43legacydbg(dev->wl, "Wireless interface started\n"); +out: + return err; +} + +/* Get PHY and RADIO versioning numbers */ +static int b43legacy_phy_versioning(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u32 tmp; + u8 analog_type; + u8 phy_type; + u8 phy_rev; + u16 radio_manuf; + u16 radio_ver; + u16 radio_rev; + int unsupported = 0; + + /* Get PHY versioning */ + tmp = b43legacy_read16(dev, B43legacy_MMIO_PHY_VER); + analog_type = (tmp & B43legacy_PHYVER_ANALOG) + >> B43legacy_PHYVER_ANALOG_SHIFT; + phy_type = (tmp & B43legacy_PHYVER_TYPE) >> B43legacy_PHYVER_TYPE_SHIFT; + phy_rev = (tmp & B43legacy_PHYVER_VERSION); + switch (phy_type) { + case B43legacy_PHYTYPE_B: + if (phy_rev != 2 && phy_rev != 4 + && phy_rev != 6 && phy_rev != 7) + unsupported = 1; + break; + case B43legacy_PHYTYPE_G: + if (phy_rev > 8) + unsupported = 1; + break; + default: + unsupported = 1; + }; + if (unsupported) { + b43legacyerr(dev->wl, "FOUND UNSUPPORTED PHY " + "(Analog %u, Type %u, Revision %u)\n", + analog_type, phy_type, phy_rev); + return -EOPNOTSUPP; + } + b43legacydbg(dev->wl, "Found PHY: Analog %u, Type %u, Revision %u\n", + analog_type, phy_type, phy_rev); + + + /* Get RADIO versioning */ + if (dev->dev->bus->chip_id == 0x4317) { + if (dev->dev->bus->chip_rev == 0) + tmp = 0x3205017F; + else if (dev->dev->bus->chip_rev == 1) + tmp = 0x4205017F; + else + tmp = 0x5205017F; + } else { + b43legacy_write16(dev, B43legacy_MMIO_RADIO_CONTROL, + B43legacy_RADIOCTL_ID); + tmp = b43legacy_read16(dev, B43legacy_MMIO_RADIO_DATA_HIGH); + tmp <<= 16; + b43legacy_write16(dev, B43legacy_MMIO_RADIO_CONTROL, + B43legacy_RADIOCTL_ID); + tmp |= b43legacy_read16(dev, B43legacy_MMIO_RADIO_DATA_LOW); + } + radio_manuf = (tmp & 0x00000FFF); + radio_ver = (tmp & 0x0FFFF000) >> 12; + radio_rev = (tmp & 0xF0000000) >> 28; + switch (phy_type) { + case B43legacy_PHYTYPE_B: + if ((radio_ver & 0xFFF0) != 0x2050) + unsupported = 1; + break; + case B43legacy_PHYTYPE_G: + if (radio_ver != 0x2050) + unsupported = 1; + break; + default: + B43legacy_BUG_ON(1); + } + if (unsupported) { + b43legacyerr(dev->wl, "FOUND UNSUPPORTED RADIO " + "(Manuf 0x%X, Version 0x%X, Revision %u)\n", + radio_manuf, radio_ver, radio_rev); + return -EOPNOTSUPP; + } + b43legacydbg(dev->wl, "Found Radio: Manuf 0x%X, Version 0x%X," + " Revision %u\n", radio_manuf, radio_ver, radio_rev); + + + phy->radio_manuf = radio_manuf; + phy->radio_ver = radio_ver; + phy->radio_rev = radio_rev; + + phy->analog = analog_type; + phy->type = phy_type; + phy->rev = phy_rev; + + return 0; +} + +static void setup_struct_phy_for_init(struct b43legacy_wldev *dev, + struct b43legacy_phy *phy) +{ + struct b43legacy_lopair *lo; + int i; + + memset(phy->minlowsig, 0xFF, sizeof(phy->minlowsig)); + memset(phy->minlowsigpos, 0, sizeof(phy->minlowsigpos)); + + /* Assume the radio is enabled. If it's not enabled, the state will + * immediately get fixed on the first periodic work run. */ + dev->radio_hw_enable = 1; + + phy->savedpctlreg = 0xFFFF; + phy->aci_enable = 0; + phy->aci_wlan_automatic = 0; + phy->aci_hw_rssi = 0; + + lo = phy->_lo_pairs; + if (lo) + memset(lo, 0, sizeof(struct b43legacy_lopair) * + B43legacy_LO_COUNT); + phy->max_lb_gain = 0; + phy->trsw_rx_gain = 0; + + /* Set default attenuation values. */ + phy->bbatt = b43legacy_default_baseband_attenuation(dev); + phy->rfatt = b43legacy_default_radio_attenuation(dev); + phy->txctl1 = b43legacy_default_txctl1(dev); + phy->txpwr_offset = 0; + + /* NRSSI */ + phy->nrssislope = 0; + for (i = 0; i < ARRAY_SIZE(phy->nrssi); i++) + phy->nrssi[i] = -1000; + for (i = 0; i < ARRAY_SIZE(phy->nrssi_lt); i++) + phy->nrssi_lt[i] = i; + + phy->lofcal = 0xFFFF; + phy->initval = 0xFFFF; + + phy->interfmode = B43legacy_INTERFMODE_NONE; + phy->channel = 0xFF; +} + +static void setup_struct_wldev_for_init(struct b43legacy_wldev *dev) +{ + /* Flags */ + dev->dfq_valid = 0; + + /* Stats */ + memset(&dev->stats, 0, sizeof(dev->stats)); + + setup_struct_phy_for_init(dev, &dev->phy); + + /* IRQ related flags */ + dev->irq_reason = 0; + memset(dev->dma_reason, 0, sizeof(dev->dma_reason)); + dev->irq_mask = B43legacy_IRQ_MASKTEMPLATE; + + dev->mac_suspended = 1; + + /* Noise calculation context */ + memset(&dev->noisecalc, 0, sizeof(dev->noisecalc)); +} + +static void b43legacy_imcfglo_timeouts_workaround(struct b43legacy_wldev *dev) +{ +#ifdef CONFIG_SSB_DRIVER_PCICORE + struct ssb_bus *bus = dev->dev->bus; + u32 tmp; + + if (bus->pcicore.dev && + bus->pcicore.dev->id.coreid == SSB_DEV_PCI && + bus->pcicore.dev->id.revision <= 5) { + /* IMCFGLO timeouts workaround. */ + tmp = ssb_read32(dev->dev, SSB_IMCFGLO); + switch (bus->bustype) { + case SSB_BUSTYPE_PCI: + case SSB_BUSTYPE_PCMCIA: + tmp &= ~SSB_IMCFGLO_REQTO; + tmp &= ~SSB_IMCFGLO_SERTO; + tmp |= 0x32; + break; + case SSB_BUSTYPE_SSB: + tmp &= ~SSB_IMCFGLO_REQTO; + tmp &= ~SSB_IMCFGLO_SERTO; + tmp |= 0x53; + break; + default: + break; + } + ssb_write32(dev->dev, SSB_IMCFGLO, tmp); + } +#endif /* CONFIG_SSB_DRIVER_PCICORE */ +} + +static void b43legacy_set_synth_pu_delay(struct b43legacy_wldev *dev, + bool idle) { + u16 pu_delay = 1050; + + if (b43legacy_is_mode(dev->wl, NL80211_IFTYPE_ADHOC) || idle) + pu_delay = 500; + if ((dev->phy.radio_ver == 0x2050) && (dev->phy.radio_rev == 8)) + pu_delay = max(pu_delay, (u16)2400); + + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_SPUWKUP, pu_delay); +} + +/* Set the TSF CFP pre-TargetBeaconTransmissionTime. */ +static void b43legacy_set_pretbtt(struct b43legacy_wldev *dev) +{ + u16 pretbtt; + + /* The time value is in microseconds. */ + if (b43legacy_is_mode(dev->wl, NL80211_IFTYPE_ADHOC)) + pretbtt = 2; + else + pretbtt = 250; + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_PRETBTT, pretbtt); + b43legacy_write16(dev, B43legacy_MMIO_TSF_CFP_PRETBTT, pretbtt); +} + +/* Shutdown a wireless core */ +/* Locking: wl->mutex */ +static void b43legacy_wireless_core_exit(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u32 macctl; + + B43legacy_WARN_ON(b43legacy_status(dev) > B43legacy_STAT_INITIALIZED); + if (b43legacy_status(dev) != B43legacy_STAT_INITIALIZED) + return; + b43legacy_set_status(dev, B43legacy_STAT_UNINIT); + + /* Stop the microcode PSM. */ + macctl = b43legacy_read32(dev, B43legacy_MMIO_MACCTL); + macctl &= ~B43legacy_MACCTL_PSM_RUN; + macctl |= B43legacy_MACCTL_PSM_JMP0; + b43legacy_write32(dev, B43legacy_MMIO_MACCTL, macctl); + + b43legacy_leds_exit(dev); + b43legacy_rng_exit(dev->wl); + b43legacy_pio_free(dev); + b43legacy_dma_free(dev); + b43legacy_chip_exit(dev); + b43legacy_radio_turn_off(dev, 1); + b43legacy_switch_analog(dev, 0); + if (phy->dyn_tssi_tbl) + kfree(phy->tssi2dbm); + kfree(phy->lo_control); + phy->lo_control = NULL; + if (dev->wl->current_beacon) { + dev_kfree_skb_any(dev->wl->current_beacon); + dev->wl->current_beacon = NULL; + } + + ssb_device_disable(dev->dev, 0); + ssb_bus_may_powerdown(dev->dev->bus); +} + +static void prepare_phy_data_for_init(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + int i; + + /* Set default attenuation values. */ + phy->bbatt = b43legacy_default_baseband_attenuation(dev); + phy->rfatt = b43legacy_default_radio_attenuation(dev); + phy->txctl1 = b43legacy_default_txctl1(dev); + phy->txctl2 = 0xFFFF; + phy->txpwr_offset = 0; + + /* NRSSI */ + phy->nrssislope = 0; + for (i = 0; i < ARRAY_SIZE(phy->nrssi); i++) + phy->nrssi[i] = -1000; + for (i = 0; i < ARRAY_SIZE(phy->nrssi_lt); i++) + phy->nrssi_lt[i] = i; + + phy->lofcal = 0xFFFF; + phy->initval = 0xFFFF; + + phy->aci_enable = 0; + phy->aci_wlan_automatic = 0; + phy->aci_hw_rssi = 0; + + phy->antenna_diversity = 0xFFFF; + memset(phy->minlowsig, 0xFF, sizeof(phy->minlowsig)); + memset(phy->minlowsigpos, 0, sizeof(phy->minlowsigpos)); + + /* Flags */ + phy->calibrated = 0; + + if (phy->_lo_pairs) + memset(phy->_lo_pairs, 0, + sizeof(struct b43legacy_lopair) * B43legacy_LO_COUNT); + memset(phy->loopback_gain, 0, sizeof(phy->loopback_gain)); +} + +/* Initialize a wireless core */ +static int b43legacy_wireless_core_init(struct b43legacy_wldev *dev) +{ + struct b43legacy_wl *wl = dev->wl; + struct ssb_bus *bus = dev->dev->bus; + struct b43legacy_phy *phy = &dev->phy; + struct ssb_sprom *sprom = &dev->dev->bus->sprom; + int err; + u32 hf; + u32 tmp; + + B43legacy_WARN_ON(b43legacy_status(dev) != B43legacy_STAT_UNINIT); + + err = ssb_bus_powerup(bus, 0); + if (err) + goto out; + if (!ssb_device_is_enabled(dev->dev)) { + tmp = phy->gmode ? B43legacy_TMSLOW_GMODE : 0; + b43legacy_wireless_core_reset(dev, tmp); + } + + if ((phy->type == B43legacy_PHYTYPE_B) || + (phy->type == B43legacy_PHYTYPE_G)) { + phy->_lo_pairs = kzalloc(sizeof(struct b43legacy_lopair) + * B43legacy_LO_COUNT, + GFP_KERNEL); + if (!phy->_lo_pairs) + return -ENOMEM; + } + setup_struct_wldev_for_init(dev); + + err = b43legacy_phy_init_tssi2dbm_table(dev); + if (err) + goto err_kfree_lo_control; + + /* Enable IRQ routing to this device. */ + ssb_pcicore_dev_irqvecs_enable(&bus->pcicore, dev->dev); + + b43legacy_imcfglo_timeouts_workaround(dev); + prepare_phy_data_for_init(dev); + b43legacy_phy_calibrate(dev); + err = b43legacy_chip_init(dev); + if (err) + goto err_kfree_tssitbl; + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_WLCOREREV, + dev->dev->id.revision); + hf = b43legacy_hf_read(dev); + if (phy->type == B43legacy_PHYTYPE_G) { + hf |= B43legacy_HF_SYMW; + if (phy->rev == 1) + hf |= B43legacy_HF_GDCW; + if (sprom->boardflags_lo & B43legacy_BFL_PACTRL) + hf |= B43legacy_HF_OFDMPABOOST; + } else if (phy->type == B43legacy_PHYTYPE_B) { + hf |= B43legacy_HF_SYMW; + if (phy->rev >= 2 && phy->radio_ver == 0x2050) + hf &= ~B43legacy_HF_GDCW; + } + b43legacy_hf_write(dev, hf); + + b43legacy_set_retry_limits(dev, + B43legacy_DEFAULT_SHORT_RETRY_LIMIT, + B43legacy_DEFAULT_LONG_RETRY_LIMIT); + + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, + 0x0044, 3); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, + 0x0046, 2); + + /* Disable sending probe responses from firmware. + * Setting the MaxTime to one usec will always trigger + * a timeout, so we never send any probe resp. + * A timeout of zero is infinite. */ + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_PRMAXTIME, 1); + + b43legacy_rate_memory_init(dev); + + /* Minimum Contention Window */ + if (phy->type == B43legacy_PHYTYPE_B) + b43legacy_shm_write16(dev, B43legacy_SHM_WIRELESS, + 0x0003, 31); + else + b43legacy_shm_write16(dev, B43legacy_SHM_WIRELESS, + 0x0003, 15); + /* Maximum Contention Window */ + b43legacy_shm_write16(dev, B43legacy_SHM_WIRELESS, + 0x0004, 1023); + + do { + if (b43legacy_using_pio(dev)) + err = b43legacy_pio_init(dev); + else { + err = b43legacy_dma_init(dev); + if (!err) + b43legacy_qos_init(dev); + } + } while (err == -EAGAIN); + if (err) + goto err_chip_exit; + + b43legacy_set_synth_pu_delay(dev, 1); + + ssb_bus_powerup(bus, 1); /* Enable dynamic PCTL */ + b43legacy_upload_card_macaddress(dev); + b43legacy_security_init(dev); + b43legacy_rng_init(wl); + + ieee80211_wake_queues(dev->wl->hw); + b43legacy_set_status(dev, B43legacy_STAT_INITIALIZED); + + b43legacy_leds_init(dev); +out: + return err; + +err_chip_exit: + b43legacy_chip_exit(dev); +err_kfree_tssitbl: + if (phy->dyn_tssi_tbl) + kfree(phy->tssi2dbm); +err_kfree_lo_control: + kfree(phy->lo_control); + phy->lo_control = NULL; + ssb_bus_may_powerdown(bus); + B43legacy_WARN_ON(b43legacy_status(dev) != B43legacy_STAT_UNINIT); + return err; +} + +static int b43legacy_op_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); + struct b43legacy_wldev *dev; + unsigned long flags; + int err = -EOPNOTSUPP; + + /* TODO: allow WDS/AP devices to coexist */ + + if (vif->type != NL80211_IFTYPE_AP && + vif->type != NL80211_IFTYPE_STATION && + vif->type != NL80211_IFTYPE_WDS && + vif->type != NL80211_IFTYPE_ADHOC) + return -EOPNOTSUPP; + + mutex_lock(&wl->mutex); + if (wl->operating) + goto out_mutex_unlock; + + b43legacydbg(wl, "Adding Interface type %d\n", vif->type); + + dev = wl->current_dev; + wl->operating = 1; + wl->vif = vif; + wl->if_type = vif->type; + memcpy(wl->mac_addr, vif->addr, ETH_ALEN); + + spin_lock_irqsave(&wl->irq_lock, flags); + b43legacy_adjust_opmode(dev); + b43legacy_set_pretbtt(dev); + b43legacy_set_synth_pu_delay(dev, 0); + b43legacy_upload_card_macaddress(dev); + spin_unlock_irqrestore(&wl->irq_lock, flags); + + err = 0; + out_mutex_unlock: + mutex_unlock(&wl->mutex); + + return err; +} + +static void b43legacy_op_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); + struct b43legacy_wldev *dev = wl->current_dev; + unsigned long flags; + + b43legacydbg(wl, "Removing Interface type %d\n", vif->type); + + mutex_lock(&wl->mutex); + + B43legacy_WARN_ON(!wl->operating); + B43legacy_WARN_ON(wl->vif != vif); + wl->vif = NULL; + + wl->operating = 0; + + spin_lock_irqsave(&wl->irq_lock, flags); + b43legacy_adjust_opmode(dev); + memset(wl->mac_addr, 0, ETH_ALEN); + b43legacy_upload_card_macaddress(dev); + spin_unlock_irqrestore(&wl->irq_lock, flags); + + mutex_unlock(&wl->mutex); +} + +static int b43legacy_op_start(struct ieee80211_hw *hw) +{ + struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); + struct b43legacy_wldev *dev = wl->current_dev; + int did_init = 0; + int err = 0; + + /* Kill all old instance specific information to make sure + * the card won't use it in the short timeframe between start + * and mac80211 reconfiguring it. */ + memset(wl->bssid, 0, ETH_ALEN); + memset(wl->mac_addr, 0, ETH_ALEN); + wl->filter_flags = 0; + wl->beacon0_uploaded = 0; + wl->beacon1_uploaded = 0; + wl->beacon_templates_virgin = 1; + wl->radio_enabled = 1; + + mutex_lock(&wl->mutex); + + if (b43legacy_status(dev) < B43legacy_STAT_INITIALIZED) { + err = b43legacy_wireless_core_init(dev); + if (err) + goto out_mutex_unlock; + did_init = 1; + } + + if (b43legacy_status(dev) < B43legacy_STAT_STARTED) { + err = b43legacy_wireless_core_start(dev); + if (err) { + if (did_init) + b43legacy_wireless_core_exit(dev); + goto out_mutex_unlock; + } + } + + wiphy_rfkill_start_polling(hw->wiphy); + +out_mutex_unlock: + mutex_unlock(&wl->mutex); + + return err; +} + +static void b43legacy_op_stop(struct ieee80211_hw *hw) +{ + struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); + struct b43legacy_wldev *dev = wl->current_dev; + + cancel_work_sync(&(wl->beacon_update_trigger)); + + mutex_lock(&wl->mutex); + if (b43legacy_status(dev) >= B43legacy_STAT_STARTED) + b43legacy_wireless_core_stop(dev); + b43legacy_wireless_core_exit(dev); + wl->radio_enabled = 0; + mutex_unlock(&wl->mutex); +} + +static int b43legacy_op_beacon_set_tim(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, bool set) +{ + struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); + unsigned long flags; + + spin_lock_irqsave(&wl->irq_lock, flags); + b43legacy_update_templates(wl); + spin_unlock_irqrestore(&wl->irq_lock, flags); + + return 0; +} + +static const struct ieee80211_ops b43legacy_hw_ops = { + .tx = b43legacy_op_tx, + .conf_tx = b43legacy_op_conf_tx, + .add_interface = b43legacy_op_add_interface, + .remove_interface = b43legacy_op_remove_interface, + .config = b43legacy_op_dev_config, + .bss_info_changed = b43legacy_op_bss_info_changed, + .configure_filter = b43legacy_op_configure_filter, + .get_stats = b43legacy_op_get_stats, + .start = b43legacy_op_start, + .stop = b43legacy_op_stop, + .set_tim = b43legacy_op_beacon_set_tim, + .rfkill_poll = b43legacy_rfkill_poll, +}; + +/* Hard-reset the chip. Do not call this directly. + * Use b43legacy_controller_restart() + */ +static void b43legacy_chip_reset(struct work_struct *work) +{ + struct b43legacy_wldev *dev = + container_of(work, struct b43legacy_wldev, restart_work); + struct b43legacy_wl *wl = dev->wl; + int err = 0; + int prev_status; + + mutex_lock(&wl->mutex); + + prev_status = b43legacy_status(dev); + /* Bring the device down... */ + if (prev_status >= B43legacy_STAT_STARTED) + b43legacy_wireless_core_stop(dev); + if (prev_status >= B43legacy_STAT_INITIALIZED) + b43legacy_wireless_core_exit(dev); + + /* ...and up again. */ + if (prev_status >= B43legacy_STAT_INITIALIZED) { + err = b43legacy_wireless_core_init(dev); + if (err) + goto out; + } + if (prev_status >= B43legacy_STAT_STARTED) { + err = b43legacy_wireless_core_start(dev); + if (err) { + b43legacy_wireless_core_exit(dev); + goto out; + } + } +out: + if (err) + wl->current_dev = NULL; /* Failed to init the dev. */ + mutex_unlock(&wl->mutex); + if (err) + b43legacyerr(wl, "Controller restart FAILED\n"); + else + b43legacyinfo(wl, "Controller restarted\n"); +} + +static int b43legacy_setup_modes(struct b43legacy_wldev *dev, + int have_bphy, + int have_gphy) +{ + struct ieee80211_hw *hw = dev->wl->hw; + struct b43legacy_phy *phy = &dev->phy; + + phy->possible_phymodes = 0; + if (have_bphy) { + hw->wiphy->bands[IEEE80211_BAND_2GHZ] = + &b43legacy_band_2GHz_BPHY; + phy->possible_phymodes |= B43legacy_PHYMODE_B; + } + + if (have_gphy) { + hw->wiphy->bands[IEEE80211_BAND_2GHZ] = + &b43legacy_band_2GHz_GPHY; + phy->possible_phymodes |= B43legacy_PHYMODE_G; + } + + return 0; +} + +static void b43legacy_wireless_core_detach(struct b43legacy_wldev *dev) +{ + /* We release firmware that late to not be required to re-request + * is all the time when we reinit the core. */ + b43legacy_release_firmware(dev); +} + +static int b43legacy_wireless_core_attach(struct b43legacy_wldev *dev) +{ + struct b43legacy_wl *wl = dev->wl; + struct ssb_bus *bus = dev->dev->bus; + struct pci_dev *pdev = (bus->bustype == SSB_BUSTYPE_PCI) ? bus->host_pci : NULL; + int err; + int have_bphy = 0; + int have_gphy = 0; + u32 tmp; + + /* Do NOT do any device initialization here. + * Do it in wireless_core_init() instead. + * This function is for gathering basic information about the HW, only. + * Also some structs may be set up here. But most likely you want to + * have that in core_init(), too. + */ + + err = ssb_bus_powerup(bus, 0); + if (err) { + b43legacyerr(wl, "Bus powerup failed\n"); + goto out; + } + /* Get the PHY type. */ + if (dev->dev->id.revision >= 5) { + u32 tmshigh; + + tmshigh = ssb_read32(dev->dev, SSB_TMSHIGH); + have_gphy = !!(tmshigh & B43legacy_TMSHIGH_GPHY); + if (!have_gphy) + have_bphy = 1; + } else if (dev->dev->id.revision == 4) + have_gphy = 1; + else + have_bphy = 1; + + dev->phy.gmode = (have_gphy || have_bphy); + dev->phy.radio_on = 1; + tmp = dev->phy.gmode ? B43legacy_TMSLOW_GMODE : 0; + b43legacy_wireless_core_reset(dev, tmp); + + err = b43legacy_phy_versioning(dev); + if (err) + goto err_powerdown; + /* Check if this device supports multiband. */ + if (!pdev || + (pdev->device != 0x4312 && + pdev->device != 0x4319 && + pdev->device != 0x4324)) { + /* No multiband support. */ + have_bphy = 0; + have_gphy = 0; + switch (dev->phy.type) { + case B43legacy_PHYTYPE_B: + have_bphy = 1; + break; + case B43legacy_PHYTYPE_G: + have_gphy = 1; + break; + default: + B43legacy_BUG_ON(1); + } + } + dev->phy.gmode = (have_gphy || have_bphy); + tmp = dev->phy.gmode ? B43legacy_TMSLOW_GMODE : 0; + b43legacy_wireless_core_reset(dev, tmp); + + err = b43legacy_validate_chipaccess(dev); + if (err) + goto err_powerdown; + err = b43legacy_setup_modes(dev, have_bphy, have_gphy); + if (err) + goto err_powerdown; + + /* Now set some default "current_dev" */ + if (!wl->current_dev) + wl->current_dev = dev; + INIT_WORK(&dev->restart_work, b43legacy_chip_reset); + + b43legacy_radio_turn_off(dev, 1); + b43legacy_switch_analog(dev, 0); + ssb_device_disable(dev->dev, 0); + ssb_bus_may_powerdown(bus); + +out: + return err; + +err_powerdown: + ssb_bus_may_powerdown(bus); + return err; +} + +static void b43legacy_one_core_detach(struct ssb_device *dev) +{ + struct b43legacy_wldev *wldev; + struct b43legacy_wl *wl; + + /* Do not cancel ieee80211-workqueue based work here. + * See comment in b43legacy_remove(). */ + + wldev = ssb_get_drvdata(dev); + wl = wldev->wl; + b43legacy_debugfs_remove_device(wldev); + b43legacy_wireless_core_detach(wldev); + list_del(&wldev->list); + wl->nr_devs--; + ssb_set_drvdata(dev, NULL); + kfree(wldev); +} + +static int b43legacy_one_core_attach(struct ssb_device *dev, + struct b43legacy_wl *wl) +{ + struct b43legacy_wldev *wldev; + struct pci_dev *pdev; + int err = -ENOMEM; + + if (!list_empty(&wl->devlist)) { + /* We are not the first core on this chip. */ + pdev = (dev->bus->bustype == SSB_BUSTYPE_PCI) ? dev->bus->host_pci : NULL; + /* Only special chips support more than one wireless + * core, although some of the other chips have more than + * one wireless core as well. Check for this and + * bail out early. + */ + if (!pdev || + ((pdev->device != 0x4321) && + (pdev->device != 0x4313) && + (pdev->device != 0x431A))) { + b43legacydbg(wl, "Ignoring unconnected 802.11 core\n"); + return -ENODEV; + } + } + + wldev = kzalloc(sizeof(*wldev), GFP_KERNEL); + if (!wldev) + goto out; + + wldev->dev = dev; + wldev->wl = wl; + b43legacy_set_status(wldev, B43legacy_STAT_UNINIT); + wldev->bad_frames_preempt = modparam_bad_frames_preempt; + tasklet_init(&wldev->isr_tasklet, + (void (*)(unsigned long))b43legacy_interrupt_tasklet, + (unsigned long)wldev); + if (modparam_pio) + wldev->__using_pio = 1; + INIT_LIST_HEAD(&wldev->list); + + err = b43legacy_wireless_core_attach(wldev); + if (err) + goto err_kfree_wldev; + + list_add(&wldev->list, &wl->devlist); + wl->nr_devs++; + ssb_set_drvdata(dev, wldev); + b43legacy_debugfs_add_device(wldev); +out: + return err; + +err_kfree_wldev: + kfree(wldev); + return err; +} + +static void b43legacy_sprom_fixup(struct ssb_bus *bus) +{ + /* boardflags workarounds */ + if (bus->boardinfo.vendor == PCI_VENDOR_ID_APPLE && + bus->boardinfo.type == 0x4E && + bus->boardinfo.rev > 0x40) + bus->sprom.boardflags_lo |= B43legacy_BFL_PACTRL; +} + +static void b43legacy_wireless_exit(struct ssb_device *dev, + struct b43legacy_wl *wl) +{ + struct ieee80211_hw *hw = wl->hw; + + ssb_set_devtypedata(dev, NULL); + ieee80211_free_hw(hw); +} + +static int b43legacy_wireless_init(struct ssb_device *dev) +{ + struct ssb_sprom *sprom = &dev->bus->sprom; + struct ieee80211_hw *hw; + struct b43legacy_wl *wl; + int err = -ENOMEM; + + b43legacy_sprom_fixup(dev->bus); + + hw = ieee80211_alloc_hw(sizeof(*wl), &b43legacy_hw_ops); + if (!hw) { + b43legacyerr(NULL, "Could not allocate ieee80211 device\n"); + goto out; + } + + /* fill hw info */ + hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | + IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_NOISE_DBM; + hw->wiphy->interface_modes = + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_WDS) | + BIT(NL80211_IFTYPE_ADHOC); + hw->queues = 1; /* FIXME: hardware has more queues */ + hw->max_rates = 2; + SET_IEEE80211_DEV(hw, dev->dev); + if (is_valid_ether_addr(sprom->et1mac)) + SET_IEEE80211_PERM_ADDR(hw, sprom->et1mac); + else + SET_IEEE80211_PERM_ADDR(hw, sprom->il0mac); + + /* Get and initialize struct b43legacy_wl */ + wl = hw_to_b43legacy_wl(hw); + memset(wl, 0, sizeof(*wl)); + wl->hw = hw; + spin_lock_init(&wl->irq_lock); + spin_lock_init(&wl->leds_lock); + mutex_init(&wl->mutex); + INIT_LIST_HEAD(&wl->devlist); + INIT_WORK(&wl->beacon_update_trigger, b43legacy_beacon_update_trigger_work); + + ssb_set_devtypedata(dev, wl); + b43legacyinfo(wl, "Broadcom %04X WLAN found\n", dev->bus->chip_id); + err = 0; +out: + return err; +} + +static int b43legacy_probe(struct ssb_device *dev, + const struct ssb_device_id *id) +{ + struct b43legacy_wl *wl; + int err; + int first = 0; + + wl = ssb_get_devtypedata(dev); + if (!wl) { + /* Probing the first core - setup common struct b43legacy_wl */ + first = 1; + err = b43legacy_wireless_init(dev); + if (err) + goto out; + wl = ssb_get_devtypedata(dev); + B43legacy_WARN_ON(!wl); + } + err = b43legacy_one_core_attach(dev, wl); + if (err) + goto err_wireless_exit; + + if (first) { + err = ieee80211_register_hw(wl->hw); + if (err) + goto err_one_core_detach; + } + +out: + return err; + +err_one_core_detach: + b43legacy_one_core_detach(dev); +err_wireless_exit: + if (first) + b43legacy_wireless_exit(dev, wl); + return err; +} + +static void b43legacy_remove(struct ssb_device *dev) +{ + struct b43legacy_wl *wl = ssb_get_devtypedata(dev); + struct b43legacy_wldev *wldev = ssb_get_drvdata(dev); + + /* We must cancel any work here before unregistering from ieee80211, + * as the ieee80211 unreg will destroy the workqueue. */ + cancel_work_sync(&wldev->restart_work); + + B43legacy_WARN_ON(!wl); + if (wl->current_dev == wldev) + ieee80211_unregister_hw(wl->hw); + + b43legacy_one_core_detach(dev); + + if (list_empty(&wl->devlist)) + /* Last core on the chip unregistered. + * We can destroy common struct b43legacy_wl. + */ + b43legacy_wireless_exit(dev, wl); +} + +/* Perform a hardware reset. This can be called from any context. */ +void b43legacy_controller_restart(struct b43legacy_wldev *dev, + const char *reason) +{ + /* Must avoid requeueing, if we are in shutdown. */ + if (b43legacy_status(dev) < B43legacy_STAT_INITIALIZED) + return; + b43legacyinfo(dev->wl, "Controller RESET (%s) ...\n", reason); + ieee80211_queue_work(dev->wl->hw, &dev->restart_work); +} + +#ifdef CONFIG_PM + +static int b43legacy_suspend(struct ssb_device *dev, pm_message_t state) +{ + struct b43legacy_wldev *wldev = ssb_get_drvdata(dev); + struct b43legacy_wl *wl = wldev->wl; + + b43legacydbg(wl, "Suspending...\n"); + + mutex_lock(&wl->mutex); + wldev->suspend_init_status = b43legacy_status(wldev); + if (wldev->suspend_init_status >= B43legacy_STAT_STARTED) + b43legacy_wireless_core_stop(wldev); + if (wldev->suspend_init_status >= B43legacy_STAT_INITIALIZED) + b43legacy_wireless_core_exit(wldev); + mutex_unlock(&wl->mutex); + + b43legacydbg(wl, "Device suspended.\n"); + + return 0; +} + +static int b43legacy_resume(struct ssb_device *dev) +{ + struct b43legacy_wldev *wldev = ssb_get_drvdata(dev); + struct b43legacy_wl *wl = wldev->wl; + int err = 0; + + b43legacydbg(wl, "Resuming...\n"); + + mutex_lock(&wl->mutex); + if (wldev->suspend_init_status >= B43legacy_STAT_INITIALIZED) { + err = b43legacy_wireless_core_init(wldev); + if (err) { + b43legacyerr(wl, "Resume failed at core init\n"); + goto out; + } + } + if (wldev->suspend_init_status >= B43legacy_STAT_STARTED) { + err = b43legacy_wireless_core_start(wldev); + if (err) { + b43legacy_wireless_core_exit(wldev); + b43legacyerr(wl, "Resume failed at core start\n"); + goto out; + } + } + + b43legacydbg(wl, "Device resumed.\n"); +out: + mutex_unlock(&wl->mutex); + return err; +} + +#else /* CONFIG_PM */ +# define b43legacy_suspend NULL +# define b43legacy_resume NULL +#endif /* CONFIG_PM */ + +static struct ssb_driver b43legacy_ssb_driver = { + .name = KBUILD_MODNAME, + .id_table = b43legacy_ssb_tbl, + .probe = b43legacy_probe, + .remove = b43legacy_remove, + .suspend = b43legacy_suspend, + .resume = b43legacy_resume, +}; + +static void b43legacy_print_driverinfo(void) +{ + const char *feat_pci = "", *feat_leds = "", + *feat_pio = "", *feat_dma = ""; + +#ifdef CONFIG_B43LEGACY_PCI_AUTOSELECT + feat_pci = "P"; +#endif +#ifdef CONFIG_B43LEGACY_LEDS + feat_leds = "L"; +#endif +#ifdef CONFIG_B43LEGACY_PIO + feat_pio = "I"; +#endif +#ifdef CONFIG_B43LEGACY_DMA + feat_dma = "D"; +#endif + printk(KERN_INFO "Broadcom 43xx-legacy driver loaded " + "[ Features: %s%s%s%s, Firmware-ID: " + B43legacy_SUPPORTED_FIRMWARE_ID " ]\n", + feat_pci, feat_leds, feat_pio, feat_dma); +} + +static int __init b43legacy_init(void) +{ + int err; + + b43legacy_debugfs_init(); + + err = ssb_driver_register(&b43legacy_ssb_driver); + if (err) + goto err_dfs_exit; + + b43legacy_print_driverinfo(); + + return err; + +err_dfs_exit: + b43legacy_debugfs_exit(); + return err; +} + +static void __exit b43legacy_exit(void) +{ + ssb_driver_unregister(&b43legacy_ssb_driver); + b43legacy_debugfs_exit(); +} + +module_init(b43legacy_init) +module_exit(b43legacy_exit) diff --git a/drivers/net/wireless/b43legacy/main.h b/drivers/net/wireless/b43legacy/main.h new file mode 100644 index 0000000..1f0e2e3 --- /dev/null +++ b/drivers/net/wireless/b43legacy/main.h @@ -0,0 +1,127 @@ +/* + + Broadcom B43legacy wireless driver + + Copyright (c) 2005 Martin Langer , + Copyright (c) 2005 Stefano Brivio + Copyright (c) 2005, 2006 Michael Buesch + Copyright (c) 2005 Danny van Dyk + Copyright (c) 2005 Andreas Jaggi + Copyright (c) 2007 Larry Finger + + Some parts of the code in this file are derived from the ipw2200 + driver Copyright(c) 2003 - 2004 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#ifndef B43legacy_MAIN_H_ +#define B43legacy_MAIN_H_ + +#include "b43legacy.h" + + +#define P4D_BYT3S(magic, nr_bytes) u8 __p4dding##magic[nr_bytes] +#define P4D_BYTES(line, nr_bytes) P4D_BYT3S(line, nr_bytes) +/* Magic helper macro to pad structures. Ignore those above. It's magic. */ +#define PAD_BYTES(nr_bytes) P4D_BYTES(__LINE__ , (nr_bytes)) + + +/* Lightweight function to convert a frequency (in Mhz) to a channel number. */ +static inline +u8 b43legacy_freq_to_channel_bg(int freq) +{ + u8 channel; + + if (freq == 2484) + channel = 14; + else + channel = (freq - 2407) / 5; + + return channel; +} +static inline +u8 b43legacy_freq_to_channel(struct b43legacy_wldev *dev, + int freq) +{ + return b43legacy_freq_to_channel_bg(freq); +} + +/* Lightweight function to convert a channel number to a frequency (in Mhz). */ +static inline +int b43legacy_channel_to_freq_bg(u8 channel) +{ + int freq; + + if (channel == 14) + freq = 2484; + else + freq = 2407 + (5 * channel); + + return freq; +} + +static inline +int b43legacy_channel_to_freq(struct b43legacy_wldev *dev, + u8 channel) +{ + return b43legacy_channel_to_freq_bg(channel); +} + +static inline +int b43legacy_is_cck_rate(int rate) +{ + return (rate == B43legacy_CCK_RATE_1MB || + rate == B43legacy_CCK_RATE_2MB || + rate == B43legacy_CCK_RATE_5MB || + rate == B43legacy_CCK_RATE_11MB); +} + +static inline +int b43legacy_is_ofdm_rate(int rate) +{ + return !b43legacy_is_cck_rate(rate); +} + +void b43legacy_tsf_read(struct b43legacy_wldev *dev, u64 *tsf); +void b43legacy_tsf_write(struct b43legacy_wldev *dev, u64 tsf); + +u32 b43legacy_shm_read32(struct b43legacy_wldev *dev, + u16 routing, u16 offset); +u16 b43legacy_shm_read16(struct b43legacy_wldev *dev, + u16 routing, u16 offset); +void b43legacy_shm_write32(struct b43legacy_wldev *dev, + u16 routing, u16 offset, + u32 value); +void b43legacy_shm_write16(struct b43legacy_wldev *dev, + u16 routing, u16 offset, + u16 value); + +u32 b43legacy_hf_read(struct b43legacy_wldev *dev); +void b43legacy_hf_write(struct b43legacy_wldev *dev, u32 value); + +void b43legacy_dummy_transmission(struct b43legacy_wldev *dev); + +void b43legacy_wireless_core_reset(struct b43legacy_wldev *dev, u32 flags); + +void b43legacy_mac_suspend(struct b43legacy_wldev *dev); +void b43legacy_mac_enable(struct b43legacy_wldev *dev); + +void b43legacy_controller_restart(struct b43legacy_wldev *dev, + const char *reason); + +#endif /* B43legacy_MAIN_H_ */ diff --git a/drivers/net/wireless/b43legacy/phy.c b/drivers/net/wireless/b43legacy/phy.c new file mode 100644 index 0000000..aaf2272 --- /dev/null +++ b/drivers/net/wireless/b43legacy/phy.c @@ -0,0 +1,2257 @@ +/* + + Broadcom B43legacy wireless driver + + Copyright (c) 2005 Martin Langer , + Stefano Brivio + Michael Buesch + Danny van Dyk + Andreas Jaggi + Copyright (c) 2007 Larry Finger + + Some parts of the code in this file are derived from the ipw2200 + driver Copyright(c) 2003 - 2004 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include +#include +#include +#include + +#include "b43legacy.h" +#include "phy.h" +#include "main.h" +#include "radio.h" +#include "ilt.h" + + +static const s8 b43legacy_tssi2dbm_b_table[] = { + 0x4D, 0x4C, 0x4B, 0x4A, + 0x4A, 0x49, 0x48, 0x47, + 0x47, 0x46, 0x45, 0x45, + 0x44, 0x43, 0x42, 0x42, + 0x41, 0x40, 0x3F, 0x3E, + 0x3D, 0x3C, 0x3B, 0x3A, + 0x39, 0x38, 0x37, 0x36, + 0x35, 0x34, 0x32, 0x31, + 0x30, 0x2F, 0x2D, 0x2C, + 0x2B, 0x29, 0x28, 0x26, + 0x25, 0x23, 0x21, 0x1F, + 0x1D, 0x1A, 0x17, 0x14, + 0x10, 0x0C, 0x06, 0x00, + -7, -7, -7, -7, + -7, -7, -7, -7, + -7, -7, -7, -7, +}; + +static const s8 b43legacy_tssi2dbm_g_table[] = { + 77, 77, 77, 76, + 76, 76, 75, 75, + 74, 74, 73, 73, + 73, 72, 72, 71, + 71, 70, 70, 69, + 68, 68, 67, 67, + 66, 65, 65, 64, + 63, 63, 62, 61, + 60, 59, 58, 57, + 56, 55, 54, 53, + 52, 50, 49, 47, + 45, 43, 40, 37, + 33, 28, 22, 14, + 5, -7, -20, -20, + -20, -20, -20, -20, + -20, -20, -20, -20, +}; + +static void b43legacy_phy_initg(struct b43legacy_wldev *dev); + + +static inline +void b43legacy_voluntary_preempt(void) +{ + B43legacy_BUG_ON(!(!in_atomic() && !in_irq() && + !in_interrupt() && !irqs_disabled())); +#ifndef CONFIG_PREEMPT + cond_resched(); +#endif /* CONFIG_PREEMPT */ +} + +/* Lock the PHY registers against concurrent access from the microcode. + * This lock is nonrecursive. */ +void b43legacy_phy_lock(struct b43legacy_wldev *dev) +{ +#if B43legacy_DEBUG + B43legacy_WARN_ON(dev->phy.phy_locked); + dev->phy.phy_locked = 1; +#endif + + if (dev->dev->id.revision < 3) { + b43legacy_mac_suspend(dev); + } else { + if (!b43legacy_is_mode(dev->wl, NL80211_IFTYPE_AP)) + b43legacy_power_saving_ctl_bits(dev, -1, 1); + } +} + +void b43legacy_phy_unlock(struct b43legacy_wldev *dev) +{ +#if B43legacy_DEBUG + B43legacy_WARN_ON(!dev->phy.phy_locked); + dev->phy.phy_locked = 0; +#endif + + if (dev->dev->id.revision < 3) { + b43legacy_mac_enable(dev); + } else { + if (!b43legacy_is_mode(dev->wl, NL80211_IFTYPE_AP)) + b43legacy_power_saving_ctl_bits(dev, -1, -1); + } +} + +u16 b43legacy_phy_read(struct b43legacy_wldev *dev, u16 offset) +{ + b43legacy_write16(dev, B43legacy_MMIO_PHY_CONTROL, offset); + return b43legacy_read16(dev, B43legacy_MMIO_PHY_DATA); +} + +void b43legacy_phy_write(struct b43legacy_wldev *dev, u16 offset, u16 val) +{ + b43legacy_write16(dev, B43legacy_MMIO_PHY_CONTROL, offset); + mmiowb(); + b43legacy_write16(dev, B43legacy_MMIO_PHY_DATA, val); +} + +void b43legacy_phy_calibrate(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + + b43legacy_read32(dev, B43legacy_MMIO_MACCTL); /* Dummy read. */ + if (phy->calibrated) + return; + if (phy->type == B43legacy_PHYTYPE_G && phy->rev == 1) { + b43legacy_wireless_core_reset(dev, 0); + b43legacy_phy_initg(dev); + b43legacy_wireless_core_reset(dev, B43legacy_TMSLOW_GMODE); + } + phy->calibrated = 1; +} + +/* intialize B PHY power control + * as described in http://bcm-specs.sipsolutions.net/InitPowerControl + */ +static void b43legacy_phy_init_pctl(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 saved_batt = 0; + u16 saved_ratt = 0; + u16 saved_txctl1 = 0; + int must_reset_txpower = 0; + + B43legacy_BUG_ON(!(phy->type == B43legacy_PHYTYPE_B || + phy->type == B43legacy_PHYTYPE_G)); + if (is_bcm_board_vendor(dev) && + (dev->dev->bus->boardinfo.type == 0x0416)) + return; + + b43legacy_phy_write(dev, 0x0028, 0x8018); + b43legacy_write16(dev, 0x03E6, b43legacy_read16(dev, 0x03E6) & 0xFFDF); + + if (phy->type == B43legacy_PHYTYPE_G) { + if (!phy->gmode) + return; + b43legacy_phy_write(dev, 0x047A, 0xC111); + } + if (phy->savedpctlreg != 0xFFFF) + return; +#ifdef CONFIG_B43LEGACY_DEBUG + if (phy->manual_txpower_control) + return; +#endif + + if (phy->type == B43legacy_PHYTYPE_B && + phy->rev >= 2 && + phy->radio_ver == 0x2050) + b43legacy_radio_write16(dev, 0x0076, + b43legacy_radio_read16(dev, 0x0076) + | 0x0084); + else { + saved_batt = phy->bbatt; + saved_ratt = phy->rfatt; + saved_txctl1 = phy->txctl1; + if ((phy->radio_rev >= 6) && (phy->radio_rev <= 8) + && /*FIXME: incomplete specs for 5 < revision < 9 */ 0) + b43legacy_radio_set_txpower_bg(dev, 0xB, 0x1F, 0); + else + b43legacy_radio_set_txpower_bg(dev, 0xB, 9, 0); + must_reset_txpower = 1; + } + b43legacy_dummy_transmission(dev); + + phy->savedpctlreg = b43legacy_phy_read(dev, B43legacy_PHY_G_PCTL); + + if (must_reset_txpower) + b43legacy_radio_set_txpower_bg(dev, saved_batt, saved_ratt, + saved_txctl1); + else + b43legacy_radio_write16(dev, 0x0076, b43legacy_radio_read16(dev, + 0x0076) & 0xFF7B); + b43legacy_radio_clear_tssi(dev); +} + +static void b43legacy_phy_agcsetup(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 offset = 0x0000; + + if (phy->rev == 1) + offset = 0x4C00; + + b43legacy_ilt_write(dev, offset, 0x00FE); + b43legacy_ilt_write(dev, offset + 1, 0x000D); + b43legacy_ilt_write(dev, offset + 2, 0x0013); + b43legacy_ilt_write(dev, offset + 3, 0x0019); + + if (phy->rev == 1) { + b43legacy_ilt_write(dev, 0x1800, 0x2710); + b43legacy_ilt_write(dev, 0x1801, 0x9B83); + b43legacy_ilt_write(dev, 0x1802, 0x9B83); + b43legacy_ilt_write(dev, 0x1803, 0x0F8D); + b43legacy_phy_write(dev, 0x0455, 0x0004); + } + + b43legacy_phy_write(dev, 0x04A5, (b43legacy_phy_read(dev, 0x04A5) + & 0x00FF) | 0x5700); + b43legacy_phy_write(dev, 0x041A, (b43legacy_phy_read(dev, 0x041A) + & 0xFF80) | 0x000F); + b43legacy_phy_write(dev, 0x041A, (b43legacy_phy_read(dev, 0x041A) + & 0xC07F) | 0x2B80); + b43legacy_phy_write(dev, 0x048C, (b43legacy_phy_read(dev, 0x048C) + & 0xF0FF) | 0x0300); + + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + | 0x0008); + + b43legacy_phy_write(dev, 0x04A0, (b43legacy_phy_read(dev, 0x04A0) + & 0xFFF0) | 0x0008); + b43legacy_phy_write(dev, 0x04A1, (b43legacy_phy_read(dev, 0x04A1) + & 0xF0FF) | 0x0600); + b43legacy_phy_write(dev, 0x04A2, (b43legacy_phy_read(dev, 0x04A2) + & 0xF0FF) | 0x0700); + b43legacy_phy_write(dev, 0x04A0, (b43legacy_phy_read(dev, 0x04A0) + & 0xF0FF) | 0x0100); + + if (phy->rev == 1) + b43legacy_phy_write(dev, 0x04A2, + (b43legacy_phy_read(dev, 0x04A2) + & 0xFFF0) | 0x0007); + + b43legacy_phy_write(dev, 0x0488, (b43legacy_phy_read(dev, 0x0488) + & 0xFF00) | 0x001C); + b43legacy_phy_write(dev, 0x0488, (b43legacy_phy_read(dev, 0x0488) + & 0xC0FF) | 0x0200); + b43legacy_phy_write(dev, 0x0496, (b43legacy_phy_read(dev, 0x0496) + & 0xFF00) | 0x001C); + b43legacy_phy_write(dev, 0x0489, (b43legacy_phy_read(dev, 0x0489) + & 0xFF00) | 0x0020); + b43legacy_phy_write(dev, 0x0489, (b43legacy_phy_read(dev, 0x0489) + & 0xC0FF) | 0x0200); + b43legacy_phy_write(dev, 0x0482, (b43legacy_phy_read(dev, 0x0482) + & 0xFF00) | 0x002E); + b43legacy_phy_write(dev, 0x0496, (b43legacy_phy_read(dev, 0x0496) + & 0x00FF) | 0x1A00); + b43legacy_phy_write(dev, 0x0481, (b43legacy_phy_read(dev, 0x0481) + & 0xFF00) | 0x0028); + b43legacy_phy_write(dev, 0x0481, (b43legacy_phy_read(dev, 0x0481) + & 0x00FF) | 0x2C00); + + if (phy->rev == 1) { + b43legacy_phy_write(dev, 0x0430, 0x092B); + b43legacy_phy_write(dev, 0x041B, + (b43legacy_phy_read(dev, 0x041B) + & 0xFFE1) | 0x0002); + } else { + b43legacy_phy_write(dev, 0x041B, + b43legacy_phy_read(dev, 0x041B) & 0xFFE1); + b43legacy_phy_write(dev, 0x041F, 0x287A); + b43legacy_phy_write(dev, 0x0420, + (b43legacy_phy_read(dev, 0x0420) + & 0xFFF0) | 0x0004); + } + + if (phy->rev > 2) { + b43legacy_phy_write(dev, 0x0422, 0x287A); + b43legacy_phy_write(dev, 0x0420, + (b43legacy_phy_read(dev, 0x0420) + & 0x0FFF) | 0x3000); + } + + b43legacy_phy_write(dev, 0x04A8, (b43legacy_phy_read(dev, 0x04A8) + & 0x8080) | 0x7874); + b43legacy_phy_write(dev, 0x048E, 0x1C00); + + if (phy->rev == 1) { + b43legacy_phy_write(dev, 0x04AB, + (b43legacy_phy_read(dev, 0x04AB) + & 0xF0FF) | 0x0600); + b43legacy_phy_write(dev, 0x048B, 0x005E); + b43legacy_phy_write(dev, 0x048C, + (b43legacy_phy_read(dev, 0x048C) & 0xFF00) + | 0x001E); + b43legacy_phy_write(dev, 0x048D, 0x0002); + } + + b43legacy_ilt_write(dev, offset + 0x0800, 0); + b43legacy_ilt_write(dev, offset + 0x0801, 7); + b43legacy_ilt_write(dev, offset + 0x0802, 16); + b43legacy_ilt_write(dev, offset + 0x0803, 28); + + if (phy->rev >= 6) { + b43legacy_phy_write(dev, 0x0426, + (b43legacy_phy_read(dev, 0x0426) & 0xFFFC)); + b43legacy_phy_write(dev, 0x0426, + (b43legacy_phy_read(dev, 0x0426) & 0xEFFF)); + } +} + +static void b43legacy_phy_setupg(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 i; + + B43legacy_BUG_ON(phy->type != B43legacy_PHYTYPE_G); + if (phy->rev == 1) { + b43legacy_phy_write(dev, 0x0406, 0x4F19); + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, + (b43legacy_phy_read(dev, + B43legacy_PHY_G_CRS) & 0xFC3F) | 0x0340); + b43legacy_phy_write(dev, 0x042C, 0x005A); + b43legacy_phy_write(dev, 0x0427, 0x001A); + + for (i = 0; i < B43legacy_ILT_FINEFREQG_SIZE; i++) + b43legacy_ilt_write(dev, 0x5800 + i, + b43legacy_ilt_finefreqg[i]); + for (i = 0; i < B43legacy_ILT_NOISEG1_SIZE; i++) + b43legacy_ilt_write(dev, 0x1800 + i, + b43legacy_ilt_noiseg1[i]); + for (i = 0; i < B43legacy_ILT_ROTOR_SIZE; i++) + b43legacy_ilt_write32(dev, 0x2000 + i, + b43legacy_ilt_rotor[i]); + } else { + /* nrssi values are signed 6-bit values. Why 0x7654 here? */ + b43legacy_nrssi_hw_write(dev, 0xBA98, (s16)0x7654); + + if (phy->rev == 2) { + b43legacy_phy_write(dev, 0x04C0, 0x1861); + b43legacy_phy_write(dev, 0x04C1, 0x0271); + } else if (phy->rev > 2) { + b43legacy_phy_write(dev, 0x04C0, 0x0098); + b43legacy_phy_write(dev, 0x04C1, 0x0070); + b43legacy_phy_write(dev, 0x04C9, 0x0080); + } + b43legacy_phy_write(dev, 0x042B, b43legacy_phy_read(dev, + 0x042B) | 0x800); + + for (i = 0; i < 64; i++) + b43legacy_ilt_write(dev, 0x4000 + i, i); + for (i = 0; i < B43legacy_ILT_NOISEG2_SIZE; i++) + b43legacy_ilt_write(dev, 0x1800 + i, + b43legacy_ilt_noiseg2[i]); + } + + if (phy->rev <= 2) + for (i = 0; i < B43legacy_ILT_NOISESCALEG_SIZE; i++) + b43legacy_ilt_write(dev, 0x1400 + i, + b43legacy_ilt_noisescaleg1[i]); + else if ((phy->rev >= 7) && (b43legacy_phy_read(dev, 0x0449) & 0x0200)) + for (i = 0; i < B43legacy_ILT_NOISESCALEG_SIZE; i++) + b43legacy_ilt_write(dev, 0x1400 + i, + b43legacy_ilt_noisescaleg3[i]); + else + for (i = 0; i < B43legacy_ILT_NOISESCALEG_SIZE; i++) + b43legacy_ilt_write(dev, 0x1400 + i, + b43legacy_ilt_noisescaleg2[i]); + + if (phy->rev == 2) + for (i = 0; i < B43legacy_ILT_SIGMASQR_SIZE; i++) + b43legacy_ilt_write(dev, 0x5000 + i, + b43legacy_ilt_sigmasqr1[i]); + else if ((phy->rev > 2) && (phy->rev <= 8)) + for (i = 0; i < B43legacy_ILT_SIGMASQR_SIZE; i++) + b43legacy_ilt_write(dev, 0x5000 + i, + b43legacy_ilt_sigmasqr2[i]); + + if (phy->rev == 1) { + for (i = 0; i < B43legacy_ILT_RETARD_SIZE; i++) + b43legacy_ilt_write32(dev, 0x2400 + i, + b43legacy_ilt_retard[i]); + for (i = 4; i < 20; i++) + b43legacy_ilt_write(dev, 0x5400 + i, 0x0020); + b43legacy_phy_agcsetup(dev); + + if (is_bcm_board_vendor(dev) && + (dev->dev->bus->boardinfo.type == 0x0416) && + (dev->dev->bus->boardinfo.rev == 0x0017)) + return; + + b43legacy_ilt_write(dev, 0x5001, 0x0002); + b43legacy_ilt_write(dev, 0x5002, 0x0001); + } else { + for (i = 0; i <= 0x20; i++) + b43legacy_ilt_write(dev, 0x1000 + i, 0x0820); + b43legacy_phy_agcsetup(dev); + b43legacy_phy_read(dev, 0x0400); /* dummy read */ + b43legacy_phy_write(dev, 0x0403, 0x1000); + b43legacy_ilt_write(dev, 0x3C02, 0x000F); + b43legacy_ilt_write(dev, 0x3C03, 0x0014); + + if (is_bcm_board_vendor(dev) && + (dev->dev->bus->boardinfo.type == 0x0416) && + (dev->dev->bus->boardinfo.rev == 0x0017)) + return; + + b43legacy_ilt_write(dev, 0x0401, 0x0002); + b43legacy_ilt_write(dev, 0x0402, 0x0001); + } +} + +/* Initialize the APHY portion of a GPHY. */ +static void b43legacy_phy_inita(struct b43legacy_wldev *dev) +{ + + might_sleep(); + + b43legacy_phy_setupg(dev); + if (dev->dev->bus->sprom.boardflags_lo & B43legacy_BFL_PACTRL) + b43legacy_phy_write(dev, 0x046E, 0x03CF); +} + +static void b43legacy_phy_initb2(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 offset; + int val; + + b43legacy_write16(dev, 0x03EC, 0x3F22); + b43legacy_phy_write(dev, 0x0020, 0x301C); + b43legacy_phy_write(dev, 0x0026, 0x0000); + b43legacy_phy_write(dev, 0x0030, 0x00C6); + b43legacy_phy_write(dev, 0x0088, 0x3E00); + val = 0x3C3D; + for (offset = 0x0089; offset < 0x00A7; offset++) { + b43legacy_phy_write(dev, offset, val); + val -= 0x0202; + } + b43legacy_phy_write(dev, 0x03E4, 0x3000); + b43legacy_radio_selectchannel(dev, phy->channel, 0); + if (phy->radio_ver != 0x2050) { + b43legacy_radio_write16(dev, 0x0075, 0x0080); + b43legacy_radio_write16(dev, 0x0079, 0x0081); + } + b43legacy_radio_write16(dev, 0x0050, 0x0020); + b43legacy_radio_write16(dev, 0x0050, 0x0023); + if (phy->radio_ver == 0x2050) { + b43legacy_radio_write16(dev, 0x0050, 0x0020); + b43legacy_radio_write16(dev, 0x005A, 0x0070); + b43legacy_radio_write16(dev, 0x005B, 0x007B); + b43legacy_radio_write16(dev, 0x005C, 0x00B0); + b43legacy_radio_write16(dev, 0x007A, 0x000F); + b43legacy_phy_write(dev, 0x0038, 0x0677); + b43legacy_radio_init2050(dev); + } + b43legacy_phy_write(dev, 0x0014, 0x0080); + b43legacy_phy_write(dev, 0x0032, 0x00CA); + b43legacy_phy_write(dev, 0x0032, 0x00CC); + b43legacy_phy_write(dev, 0x0035, 0x07C2); + b43legacy_phy_lo_b_measure(dev); + b43legacy_phy_write(dev, 0x0026, 0xCC00); + if (phy->radio_ver != 0x2050) + b43legacy_phy_write(dev, 0x0026, 0xCE00); + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, 0x1000); + b43legacy_phy_write(dev, 0x002A, 0x88A3); + if (phy->radio_ver != 0x2050) + b43legacy_phy_write(dev, 0x002A, 0x88C2); + b43legacy_radio_set_txpower_bg(dev, 0xFFFF, 0xFFFF, 0xFFFF); + b43legacy_phy_init_pctl(dev); +} + +static void b43legacy_phy_initb4(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 offset; + u16 val; + + b43legacy_write16(dev, 0x03EC, 0x3F22); + b43legacy_phy_write(dev, 0x0020, 0x301C); + b43legacy_phy_write(dev, 0x0026, 0x0000); + b43legacy_phy_write(dev, 0x0030, 0x00C6); + b43legacy_phy_write(dev, 0x0088, 0x3E00); + val = 0x3C3D; + for (offset = 0x0089; offset < 0x00A7; offset++) { + b43legacy_phy_write(dev, offset, val); + val -= 0x0202; + } + b43legacy_phy_write(dev, 0x03E4, 0x3000); + b43legacy_radio_selectchannel(dev, phy->channel, 0); + if (phy->radio_ver != 0x2050) { + b43legacy_radio_write16(dev, 0x0075, 0x0080); + b43legacy_radio_write16(dev, 0x0079, 0x0081); + } + b43legacy_radio_write16(dev, 0x0050, 0x0020); + b43legacy_radio_write16(dev, 0x0050, 0x0023); + if (phy->radio_ver == 0x2050) { + b43legacy_radio_write16(dev, 0x0050, 0x0020); + b43legacy_radio_write16(dev, 0x005A, 0x0070); + b43legacy_radio_write16(dev, 0x005B, 0x007B); + b43legacy_radio_write16(dev, 0x005C, 0x00B0); + b43legacy_radio_write16(dev, 0x007A, 0x000F); + b43legacy_phy_write(dev, 0x0038, 0x0677); + b43legacy_radio_init2050(dev); + } + b43legacy_phy_write(dev, 0x0014, 0x0080); + b43legacy_phy_write(dev, 0x0032, 0x00CA); + if (phy->radio_ver == 0x2050) + b43legacy_phy_write(dev, 0x0032, 0x00E0); + b43legacy_phy_write(dev, 0x0035, 0x07C2); + + b43legacy_phy_lo_b_measure(dev); + + b43legacy_phy_write(dev, 0x0026, 0xCC00); + if (phy->radio_ver == 0x2050) + b43legacy_phy_write(dev, 0x0026, 0xCE00); + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, 0x1100); + b43legacy_phy_write(dev, 0x002A, 0x88A3); + if (phy->radio_ver == 0x2050) + b43legacy_phy_write(dev, 0x002A, 0x88C2); + b43legacy_radio_set_txpower_bg(dev, 0xFFFF, 0xFFFF, 0xFFFF); + if (dev->dev->bus->sprom.boardflags_lo & B43legacy_BFL_RSSI) { + b43legacy_calc_nrssi_slope(dev); + b43legacy_calc_nrssi_threshold(dev); + } + b43legacy_phy_init_pctl(dev); +} + +static void b43legacy_phy_initb5(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 offset; + u16 value; + u8 old_channel; + + if (phy->analog == 1) + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + | 0x0050); + if (!is_bcm_board_vendor(dev) && + (dev->dev->bus->boardinfo.type != 0x0416)) { + value = 0x2120; + for (offset = 0x00A8 ; offset < 0x00C7; offset++) { + b43legacy_phy_write(dev, offset, value); + value += 0x0202; + } + } + b43legacy_phy_write(dev, 0x0035, + (b43legacy_phy_read(dev, 0x0035) & 0xF0FF) + | 0x0700); + if (phy->radio_ver == 0x2050) + b43legacy_phy_write(dev, 0x0038, 0x0667); + + if (phy->gmode) { + if (phy->radio_ver == 0x2050) { + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + | 0x0020); + b43legacy_radio_write16(dev, 0x0051, + b43legacy_radio_read16(dev, 0x0051) + | 0x0004); + } + b43legacy_write16(dev, B43legacy_MMIO_PHY_RADIO, 0x0000); + + b43legacy_phy_write(dev, 0x0802, b43legacy_phy_read(dev, 0x0802) + | 0x0100); + b43legacy_phy_write(dev, 0x042B, b43legacy_phy_read(dev, 0x042B) + | 0x2000); + + b43legacy_phy_write(dev, 0x001C, 0x186A); + + b43legacy_phy_write(dev, 0x0013, (b43legacy_phy_read(dev, + 0x0013) & 0x00FF) | 0x1900); + b43legacy_phy_write(dev, 0x0035, (b43legacy_phy_read(dev, + 0x0035) & 0xFFC0) | 0x0064); + b43legacy_phy_write(dev, 0x005D, (b43legacy_phy_read(dev, + 0x005D) & 0xFF80) | 0x000A); + b43legacy_phy_write(dev, 0x5B, 0x0000); + b43legacy_phy_write(dev, 0x5C, 0x0000); + } + + if (dev->bad_frames_preempt) + b43legacy_phy_write(dev, B43legacy_PHY_RADIO_BITFIELD, + b43legacy_phy_read(dev, + B43legacy_PHY_RADIO_BITFIELD) | (1 << 12)); + + if (phy->analog == 1) { + b43legacy_phy_write(dev, 0x0026, 0xCE00); + b43legacy_phy_write(dev, 0x0021, 0x3763); + b43legacy_phy_write(dev, 0x0022, 0x1BC3); + b43legacy_phy_write(dev, 0x0023, 0x06F9); + b43legacy_phy_write(dev, 0x0024, 0x037E); + } else + b43legacy_phy_write(dev, 0x0026, 0xCC00); + b43legacy_phy_write(dev, 0x0030, 0x00C6); + b43legacy_write16(dev, 0x03EC, 0x3F22); + + if (phy->analog == 1) + b43legacy_phy_write(dev, 0x0020, 0x3E1C); + else + b43legacy_phy_write(dev, 0x0020, 0x301C); + + if (phy->analog == 0) + b43legacy_write16(dev, 0x03E4, 0x3000); + + old_channel = (phy->channel == 0xFF) ? 1 : phy->channel; + /* Force to channel 7, even if not supported. */ + b43legacy_radio_selectchannel(dev, 7, 0); + + if (phy->radio_ver != 0x2050) { + b43legacy_radio_write16(dev, 0x0075, 0x0080); + b43legacy_radio_write16(dev, 0x0079, 0x0081); + } + + b43legacy_radio_write16(dev, 0x0050, 0x0020); + b43legacy_radio_write16(dev, 0x0050, 0x0023); + + if (phy->radio_ver == 0x2050) { + b43legacy_radio_write16(dev, 0x0050, 0x0020); + b43legacy_radio_write16(dev, 0x005A, 0x0070); + } + + b43legacy_radio_write16(dev, 0x005B, 0x007B); + b43legacy_radio_write16(dev, 0x005C, 0x00B0); + + b43legacy_radio_write16(dev, 0x007A, b43legacy_radio_read16(dev, + 0x007A) | 0x0007); + + b43legacy_radio_selectchannel(dev, old_channel, 0); + + b43legacy_phy_write(dev, 0x0014, 0x0080); + b43legacy_phy_write(dev, 0x0032, 0x00CA); + b43legacy_phy_write(dev, 0x002A, 0x88A3); + + b43legacy_radio_set_txpower_bg(dev, 0xFFFF, 0xFFFF, 0xFFFF); + + if (phy->radio_ver == 0x2050) + b43legacy_radio_write16(dev, 0x005D, 0x000D); + + b43legacy_write16(dev, 0x03E4, (b43legacy_read16(dev, 0x03E4) & + 0xFFC0) | 0x0004); +} + +static void b43legacy_phy_initb6(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 offset; + u16 val; + u8 old_channel; + + b43legacy_phy_write(dev, 0x003E, 0x817A); + b43legacy_radio_write16(dev, 0x007A, + (b43legacy_radio_read16(dev, 0x007A) | 0x0058)); + if (phy->radio_rev == 4 || + phy->radio_rev == 5) { + b43legacy_radio_write16(dev, 0x0051, 0x0037); + b43legacy_radio_write16(dev, 0x0052, 0x0070); + b43legacy_radio_write16(dev, 0x0053, 0x00B3); + b43legacy_radio_write16(dev, 0x0054, 0x009B); + b43legacy_radio_write16(dev, 0x005A, 0x0088); + b43legacy_radio_write16(dev, 0x005B, 0x0088); + b43legacy_radio_write16(dev, 0x005D, 0x0088); + b43legacy_radio_write16(dev, 0x005E, 0x0088); + b43legacy_radio_write16(dev, 0x007D, 0x0088); + b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET, + (b43legacy_shm_read32(dev, + B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET) + | 0x00000200)); + } + if (phy->radio_rev == 8) { + b43legacy_radio_write16(dev, 0x0051, 0x0000); + b43legacy_radio_write16(dev, 0x0052, 0x0040); + b43legacy_radio_write16(dev, 0x0053, 0x00B7); + b43legacy_radio_write16(dev, 0x0054, 0x0098); + b43legacy_radio_write16(dev, 0x005A, 0x0088); + b43legacy_radio_write16(dev, 0x005B, 0x006B); + b43legacy_radio_write16(dev, 0x005C, 0x000F); + if (dev->dev->bus->sprom.boardflags_lo & 0x8000) { + b43legacy_radio_write16(dev, 0x005D, 0x00FA); + b43legacy_radio_write16(dev, 0x005E, 0x00D8); + } else { + b43legacy_radio_write16(dev, 0x005D, 0x00F5); + b43legacy_radio_write16(dev, 0x005E, 0x00B8); + } + b43legacy_radio_write16(dev, 0x0073, 0x0003); + b43legacy_radio_write16(dev, 0x007D, 0x00A8); + b43legacy_radio_write16(dev, 0x007C, 0x0001); + b43legacy_radio_write16(dev, 0x007E, 0x0008); + } + val = 0x1E1F; + for (offset = 0x0088; offset < 0x0098; offset++) { + b43legacy_phy_write(dev, offset, val); + val -= 0x0202; + } + val = 0x3E3F; + for (offset = 0x0098; offset < 0x00A8; offset++) { + b43legacy_phy_write(dev, offset, val); + val -= 0x0202; + } + val = 0x2120; + for (offset = 0x00A8; offset < 0x00C8; offset++) { + b43legacy_phy_write(dev, offset, (val & 0x3F3F)); + val += 0x0202; + } + if (phy->type == B43legacy_PHYTYPE_G) { + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) | + 0x0020); + b43legacy_radio_write16(dev, 0x0051, + b43legacy_radio_read16(dev, 0x0051) | + 0x0004); + b43legacy_phy_write(dev, 0x0802, + b43legacy_phy_read(dev, 0x0802) | 0x0100); + b43legacy_phy_write(dev, 0x042B, + b43legacy_phy_read(dev, 0x042B) | 0x2000); + b43legacy_phy_write(dev, 0x5B, 0x0000); + b43legacy_phy_write(dev, 0x5C, 0x0000); + } + + old_channel = phy->channel; + if (old_channel >= 8) + b43legacy_radio_selectchannel(dev, 1, 0); + else + b43legacy_radio_selectchannel(dev, 13, 0); + + b43legacy_radio_write16(dev, 0x0050, 0x0020); + b43legacy_radio_write16(dev, 0x0050, 0x0023); + udelay(40); + if (phy->radio_rev < 6 || phy->radio_rev == 8) { + b43legacy_radio_write16(dev, 0x007C, + (b43legacy_radio_read16(dev, 0x007C) + | 0x0002)); + b43legacy_radio_write16(dev, 0x0050, 0x0020); + } + if (phy->radio_rev <= 2) { + b43legacy_radio_write16(dev, 0x0050, 0x0020); + b43legacy_radio_write16(dev, 0x005A, 0x0070); + b43legacy_radio_write16(dev, 0x005B, 0x007B); + b43legacy_radio_write16(dev, 0x005C, 0x00B0); + } + b43legacy_radio_write16(dev, 0x007A, + (b43legacy_radio_read16(dev, + 0x007A) & 0x00F8) | 0x0007); + + b43legacy_radio_selectchannel(dev, old_channel, 0); + + b43legacy_phy_write(dev, 0x0014, 0x0200); + if (phy->radio_rev >= 6) + b43legacy_phy_write(dev, 0x002A, 0x88C2); + else + b43legacy_phy_write(dev, 0x002A, 0x8AC0); + b43legacy_phy_write(dev, 0x0038, 0x0668); + b43legacy_radio_set_txpower_bg(dev, 0xFFFF, 0xFFFF, 0xFFFF); + if (phy->radio_rev == 4 || phy->radio_rev == 5) + b43legacy_phy_write(dev, 0x005D, (b43legacy_phy_read(dev, + 0x005D) & 0xFF80) | 0x0003); + if (phy->radio_rev <= 2) + b43legacy_radio_write16(dev, 0x005D, 0x000D); + + if (phy->analog == 4) { + b43legacy_write16(dev, 0x03E4, 0x0009); + b43legacy_phy_write(dev, 0x61, b43legacy_phy_read(dev, 0x61) + & 0xFFF); + } else + b43legacy_phy_write(dev, 0x0002, (b43legacy_phy_read(dev, + 0x0002) & 0xFFC0) | 0x0004); + if (phy->type == B43legacy_PHYTYPE_G) + b43legacy_write16(dev, 0x03E6, 0x0); + if (phy->type == B43legacy_PHYTYPE_B) { + b43legacy_write16(dev, 0x03E6, 0x8140); + b43legacy_phy_write(dev, 0x0016, 0x0410); + b43legacy_phy_write(dev, 0x0017, 0x0820); + b43legacy_phy_write(dev, 0x0062, 0x0007); + b43legacy_radio_init2050(dev); + b43legacy_phy_lo_g_measure(dev); + if (dev->dev->bus->sprom.boardflags_lo & + B43legacy_BFL_RSSI) { + b43legacy_calc_nrssi_slope(dev); + b43legacy_calc_nrssi_threshold(dev); + } + b43legacy_phy_init_pctl(dev); + } +} + +static void b43legacy_calc_loopback_gain(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 backup_phy[15] = {0}; + u16 backup_radio[3]; + u16 backup_bband; + u16 i; + u16 loop1_cnt; + u16 loop1_done; + u16 loop1_omitted; + u16 loop2_done; + + backup_phy[0] = b43legacy_phy_read(dev, 0x0429); + backup_phy[1] = b43legacy_phy_read(dev, 0x0001); + backup_phy[2] = b43legacy_phy_read(dev, 0x0811); + backup_phy[3] = b43legacy_phy_read(dev, 0x0812); + if (phy->rev != 1) { + backup_phy[4] = b43legacy_phy_read(dev, 0x0814); + backup_phy[5] = b43legacy_phy_read(dev, 0x0815); + } + backup_phy[6] = b43legacy_phy_read(dev, 0x005A); + backup_phy[7] = b43legacy_phy_read(dev, 0x0059); + backup_phy[8] = b43legacy_phy_read(dev, 0x0058); + backup_phy[9] = b43legacy_phy_read(dev, 0x000A); + backup_phy[10] = b43legacy_phy_read(dev, 0x0003); + backup_phy[11] = b43legacy_phy_read(dev, 0x080F); + backup_phy[12] = b43legacy_phy_read(dev, 0x0810); + backup_phy[13] = b43legacy_phy_read(dev, 0x002B); + backup_phy[14] = b43legacy_phy_read(dev, 0x0015); + b43legacy_phy_read(dev, 0x002D); /* dummy read */ + backup_bband = phy->bbatt; + backup_radio[0] = b43legacy_radio_read16(dev, 0x0052); + backup_radio[1] = b43legacy_radio_read16(dev, 0x0043); + backup_radio[2] = b43legacy_radio_read16(dev, 0x007A); + + b43legacy_phy_write(dev, 0x0429, + b43legacy_phy_read(dev, 0x0429) & 0x3FFF); + b43legacy_phy_write(dev, 0x0001, + b43legacy_phy_read(dev, 0x0001) & 0x8000); + b43legacy_phy_write(dev, 0x0811, + b43legacy_phy_read(dev, 0x0811) | 0x0002); + b43legacy_phy_write(dev, 0x0812, + b43legacy_phy_read(dev, 0x0812) & 0xFFFD); + b43legacy_phy_write(dev, 0x0811, + b43legacy_phy_read(dev, 0x0811) | 0x0001); + b43legacy_phy_write(dev, 0x0812, + b43legacy_phy_read(dev, 0x0812) & 0xFFFE); + if (phy->rev != 1) { + b43legacy_phy_write(dev, 0x0814, + b43legacy_phy_read(dev, 0x0814) | 0x0001); + b43legacy_phy_write(dev, 0x0815, + b43legacy_phy_read(dev, 0x0815) & 0xFFFE); + b43legacy_phy_write(dev, 0x0814, + b43legacy_phy_read(dev, 0x0814) | 0x0002); + b43legacy_phy_write(dev, 0x0815, + b43legacy_phy_read(dev, 0x0815) & 0xFFFD); + } + b43legacy_phy_write(dev, 0x0811, b43legacy_phy_read(dev, 0x0811) | + 0x000C); + b43legacy_phy_write(dev, 0x0812, b43legacy_phy_read(dev, 0x0812) | + 0x000C); + + b43legacy_phy_write(dev, 0x0811, (b43legacy_phy_read(dev, 0x0811) + & 0xFFCF) | 0x0030); + b43legacy_phy_write(dev, 0x0812, (b43legacy_phy_read(dev, 0x0812) + & 0xFFCF) | 0x0010); + + b43legacy_phy_write(dev, 0x005A, 0x0780); + b43legacy_phy_write(dev, 0x0059, 0xC810); + b43legacy_phy_write(dev, 0x0058, 0x000D); + if (phy->analog == 0) + b43legacy_phy_write(dev, 0x0003, 0x0122); + else + b43legacy_phy_write(dev, 0x000A, + b43legacy_phy_read(dev, 0x000A) + | 0x2000); + if (phy->rev != 1) { + b43legacy_phy_write(dev, 0x0814, + b43legacy_phy_read(dev, 0x0814) | 0x0004); + b43legacy_phy_write(dev, 0x0815, + b43legacy_phy_read(dev, 0x0815) & 0xFFFB); + } + b43legacy_phy_write(dev, 0x0003, + (b43legacy_phy_read(dev, 0x0003) + & 0xFF9F) | 0x0040); + if (phy->radio_ver == 0x2050 && phy->radio_rev == 2) { + b43legacy_radio_write16(dev, 0x0052, 0x0000); + b43legacy_radio_write16(dev, 0x0043, + (b43legacy_radio_read16(dev, 0x0043) + & 0xFFF0) | 0x0009); + loop1_cnt = 9; + } else if (phy->radio_rev == 8) { + b43legacy_radio_write16(dev, 0x0043, 0x000F); + loop1_cnt = 15; + } else + loop1_cnt = 0; + + b43legacy_phy_set_baseband_attenuation(dev, 11); + + if (phy->rev >= 3) + b43legacy_phy_write(dev, 0x080F, 0xC020); + else + b43legacy_phy_write(dev, 0x080F, 0x8020); + b43legacy_phy_write(dev, 0x0810, 0x0000); + + b43legacy_phy_write(dev, 0x002B, + (b43legacy_phy_read(dev, 0x002B) + & 0xFFC0) | 0x0001); + b43legacy_phy_write(dev, 0x002B, + (b43legacy_phy_read(dev, 0x002B) + & 0xC0FF) | 0x0800); + b43legacy_phy_write(dev, 0x0811, + b43legacy_phy_read(dev, 0x0811) | 0x0100); + b43legacy_phy_write(dev, 0x0812, + b43legacy_phy_read(dev, 0x0812) & 0xCFFF); + if (dev->dev->bus->sprom.boardflags_lo & B43legacy_BFL_EXTLNA) { + if (phy->rev >= 7) { + b43legacy_phy_write(dev, 0x0811, + b43legacy_phy_read(dev, 0x0811) + | 0x0800); + b43legacy_phy_write(dev, 0x0812, + b43legacy_phy_read(dev, 0x0812) + | 0x8000); + } + } + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + & 0x00F7); + + for (i = 0; i < loop1_cnt; i++) { + b43legacy_radio_write16(dev, 0x0043, loop1_cnt); + b43legacy_phy_write(dev, 0x0812, + (b43legacy_phy_read(dev, 0x0812) + & 0xF0FF) | (i << 8)); + b43legacy_phy_write(dev, 0x0015, + (b43legacy_phy_read(dev, 0x0015) + & 0x0FFF) | 0xA000); + b43legacy_phy_write(dev, 0x0015, + (b43legacy_phy_read(dev, 0x0015) + & 0x0FFF) | 0xF000); + udelay(20); + if (b43legacy_phy_read(dev, 0x002D) >= 0x0DFC) + break; + } + loop1_done = i; + loop1_omitted = loop1_cnt - loop1_done; + + loop2_done = 0; + if (loop1_done >= 8) { + b43legacy_phy_write(dev, 0x0812, + b43legacy_phy_read(dev, 0x0812) + | 0x0030); + for (i = loop1_done - 8; i < 16; i++) { + b43legacy_phy_write(dev, 0x0812, + (b43legacy_phy_read(dev, 0x0812) + & 0xF0FF) | (i << 8)); + b43legacy_phy_write(dev, 0x0015, + (b43legacy_phy_read(dev, 0x0015) + & 0x0FFF) | 0xA000); + b43legacy_phy_write(dev, 0x0015, + (b43legacy_phy_read(dev, 0x0015) + & 0x0FFF) | 0xF000); + udelay(20); + if (b43legacy_phy_read(dev, 0x002D) >= 0x0DFC) + break; + } + } + + if (phy->rev != 1) { + b43legacy_phy_write(dev, 0x0814, backup_phy[4]); + b43legacy_phy_write(dev, 0x0815, backup_phy[5]); + } + b43legacy_phy_write(dev, 0x005A, backup_phy[6]); + b43legacy_phy_write(dev, 0x0059, backup_phy[7]); + b43legacy_phy_write(dev, 0x0058, backup_phy[8]); + b43legacy_phy_write(dev, 0x000A, backup_phy[9]); + b43legacy_phy_write(dev, 0x0003, backup_phy[10]); + b43legacy_phy_write(dev, 0x080F, backup_phy[11]); + b43legacy_phy_write(dev, 0x0810, backup_phy[12]); + b43legacy_phy_write(dev, 0x002B, backup_phy[13]); + b43legacy_phy_write(dev, 0x0015, backup_phy[14]); + + b43legacy_phy_set_baseband_attenuation(dev, backup_bband); + + b43legacy_radio_write16(dev, 0x0052, backup_radio[0]); + b43legacy_radio_write16(dev, 0x0043, backup_radio[1]); + b43legacy_radio_write16(dev, 0x007A, backup_radio[2]); + + b43legacy_phy_write(dev, 0x0811, backup_phy[2] | 0x0003); + udelay(10); + b43legacy_phy_write(dev, 0x0811, backup_phy[2]); + b43legacy_phy_write(dev, 0x0812, backup_phy[3]); + b43legacy_phy_write(dev, 0x0429, backup_phy[0]); + b43legacy_phy_write(dev, 0x0001, backup_phy[1]); + + phy->loopback_gain[0] = ((loop1_done * 6) - (loop1_omitted * 4)) - 11; + phy->loopback_gain[1] = (24 - (3 * loop2_done)) * 2; +} + +static void b43legacy_phy_initg(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 tmp; + + if (phy->rev == 1) + b43legacy_phy_initb5(dev); + else + b43legacy_phy_initb6(dev); + if (phy->rev >= 2 && phy->gmode) + b43legacy_phy_inita(dev); + + if (phy->rev >= 2) { + b43legacy_phy_write(dev, 0x0814, 0x0000); + b43legacy_phy_write(dev, 0x0815, 0x0000); + } + if (phy->rev == 2) { + b43legacy_phy_write(dev, 0x0811, 0x0000); + b43legacy_phy_write(dev, 0x0015, 0x00C0); + } + if (phy->rev > 5) { + b43legacy_phy_write(dev, 0x0811, 0x0400); + b43legacy_phy_write(dev, 0x0015, 0x00C0); + } + if (phy->gmode) { + tmp = b43legacy_phy_read(dev, 0x0400) & 0xFF; + if (tmp == 3) { + b43legacy_phy_write(dev, 0x04C2, 0x1816); + b43legacy_phy_write(dev, 0x04C3, 0x8606); + } + if (tmp == 4 || tmp == 5) { + b43legacy_phy_write(dev, 0x04C2, 0x1816); + b43legacy_phy_write(dev, 0x04C3, 0x8006); + b43legacy_phy_write(dev, 0x04CC, + (b43legacy_phy_read(dev, + 0x04CC) & 0x00FF) | + 0x1F00); + } + if (phy->rev >= 2) + b43legacy_phy_write(dev, 0x047E, 0x0078); + } + if (phy->radio_rev == 8) { + b43legacy_phy_write(dev, 0x0801, b43legacy_phy_read(dev, 0x0801) + | 0x0080); + b43legacy_phy_write(dev, 0x043E, b43legacy_phy_read(dev, 0x043E) + | 0x0004); + } + if (phy->rev >= 2 && phy->gmode) + b43legacy_calc_loopback_gain(dev); + if (phy->radio_rev != 8) { + if (phy->initval == 0xFFFF) + phy->initval = b43legacy_radio_init2050(dev); + else + b43legacy_radio_write16(dev, 0x0078, phy->initval); + } + if (phy->txctl2 == 0xFFFF) + b43legacy_phy_lo_g_measure(dev); + else { + if (phy->radio_ver == 0x2050 && phy->radio_rev == 8) + b43legacy_radio_write16(dev, 0x0052, + (phy->txctl1 << 4) | + phy->txctl2); + else + b43legacy_radio_write16(dev, 0x0052, + (b43legacy_radio_read16(dev, + 0x0052) & 0xFFF0) | + phy->txctl1); + if (phy->rev >= 6) + b43legacy_phy_write(dev, 0x0036, + (b43legacy_phy_read(dev, 0x0036) + & 0x0FFF) | (phy->txctl2 << 12)); + if (dev->dev->bus->sprom.boardflags_lo & + B43legacy_BFL_PACTRL) + b43legacy_phy_write(dev, 0x002E, 0x8075); + else + b43legacy_phy_write(dev, 0x002E, 0x807F); + if (phy->rev < 2) + b43legacy_phy_write(dev, 0x002F, 0x0101); + else + b43legacy_phy_write(dev, 0x002F, 0x0202); + } + if (phy->gmode) { + b43legacy_phy_lo_adjust(dev, 0); + b43legacy_phy_write(dev, 0x080F, 0x8078); + } + + if (!(dev->dev->bus->sprom.boardflags_lo & B43legacy_BFL_RSSI)) { + /* The specs state to update the NRSSI LT with + * the value 0x7FFFFFFF here. I think that is some weird + * compiler optimization in the original driver. + * Essentially, what we do here is resetting all NRSSI LT + * entries to -32 (see the clamp_val() in nrssi_hw_update()) + */ + b43legacy_nrssi_hw_update(dev, 0xFFFF); + b43legacy_calc_nrssi_threshold(dev); + } else if (phy->gmode || phy->rev >= 2) { + if (phy->nrssi[0] == -1000) { + B43legacy_WARN_ON(phy->nrssi[1] != -1000); + b43legacy_calc_nrssi_slope(dev); + } else { + B43legacy_WARN_ON(phy->nrssi[1] == -1000); + b43legacy_calc_nrssi_threshold(dev); + } + } + if (phy->radio_rev == 8) + b43legacy_phy_write(dev, 0x0805, 0x3230); + b43legacy_phy_init_pctl(dev); + if (dev->dev->bus->chip_id == 0x4306 + && dev->dev->bus->chip_package == 2) { + b43legacy_phy_write(dev, 0x0429, + b43legacy_phy_read(dev, 0x0429) & 0xBFFF); + b43legacy_phy_write(dev, 0x04C3, + b43legacy_phy_read(dev, 0x04C3) & 0x7FFF); + } +} + +static u16 b43legacy_phy_lo_b_r15_loop(struct b43legacy_wldev *dev) +{ + int i; + u16 ret = 0; + unsigned long flags; + + local_irq_save(flags); + for (i = 0; i < 10; i++) { + b43legacy_phy_write(dev, 0x0015, 0xAFA0); + udelay(1); + b43legacy_phy_write(dev, 0x0015, 0xEFA0); + udelay(10); + b43legacy_phy_write(dev, 0x0015, 0xFFA0); + udelay(40); + ret += b43legacy_phy_read(dev, 0x002C); + } + local_irq_restore(flags); + b43legacy_voluntary_preempt(); + + return ret; +} + +void b43legacy_phy_lo_b_measure(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 regstack[12] = { 0 }; + u16 mls; + u16 fval; + int i; + int j; + + regstack[0] = b43legacy_phy_read(dev, 0x0015); + regstack[1] = b43legacy_radio_read16(dev, 0x0052) & 0xFFF0; + + if (phy->radio_ver == 0x2053) { + regstack[2] = b43legacy_phy_read(dev, 0x000A); + regstack[3] = b43legacy_phy_read(dev, 0x002A); + regstack[4] = b43legacy_phy_read(dev, 0x0035); + regstack[5] = b43legacy_phy_read(dev, 0x0003); + regstack[6] = b43legacy_phy_read(dev, 0x0001); + regstack[7] = b43legacy_phy_read(dev, 0x0030); + + regstack[8] = b43legacy_radio_read16(dev, 0x0043); + regstack[9] = b43legacy_radio_read16(dev, 0x007A); + regstack[10] = b43legacy_read16(dev, 0x03EC); + regstack[11] = b43legacy_radio_read16(dev, 0x0052) & 0x00F0; + + b43legacy_phy_write(dev, 0x0030, 0x00FF); + b43legacy_write16(dev, 0x03EC, 0x3F3F); + b43legacy_phy_write(dev, 0x0035, regstack[4] & 0xFF7F); + b43legacy_radio_write16(dev, 0x007A, regstack[9] & 0xFFF0); + } + b43legacy_phy_write(dev, 0x0015, 0xB000); + b43legacy_phy_write(dev, 0x002B, 0x0004); + + if (phy->radio_ver == 0x2053) { + b43legacy_phy_write(dev, 0x002B, 0x0203); + b43legacy_phy_write(dev, 0x002A, 0x08A3); + } + + phy->minlowsig[0] = 0xFFFF; + + for (i = 0; i < 4; i++) { + b43legacy_radio_write16(dev, 0x0052, regstack[1] | i); + b43legacy_phy_lo_b_r15_loop(dev); + } + for (i = 0; i < 10; i++) { + b43legacy_radio_write16(dev, 0x0052, regstack[1] | i); + mls = b43legacy_phy_lo_b_r15_loop(dev) / 10; + if (mls < phy->minlowsig[0]) { + phy->minlowsig[0] = mls; + phy->minlowsigpos[0] = i; + } + } + b43legacy_radio_write16(dev, 0x0052, regstack[1] + | phy->minlowsigpos[0]); + + phy->minlowsig[1] = 0xFFFF; + + for (i = -4; i < 5; i += 2) { + for (j = -4; j < 5; j += 2) { + if (j < 0) + fval = (0x0100 * i) + j + 0x0100; + else + fval = (0x0100 * i) + j; + b43legacy_phy_write(dev, 0x002F, fval); + mls = b43legacy_phy_lo_b_r15_loop(dev) / 10; + if (mls < phy->minlowsig[1]) { + phy->minlowsig[1] = mls; + phy->minlowsigpos[1] = fval; + } + } + } + phy->minlowsigpos[1] += 0x0101; + + b43legacy_phy_write(dev, 0x002F, phy->minlowsigpos[1]); + if (phy->radio_ver == 0x2053) { + b43legacy_phy_write(dev, 0x000A, regstack[2]); + b43legacy_phy_write(dev, 0x002A, regstack[3]); + b43legacy_phy_write(dev, 0x0035, regstack[4]); + b43legacy_phy_write(dev, 0x0003, regstack[5]); + b43legacy_phy_write(dev, 0x0001, regstack[6]); + b43legacy_phy_write(dev, 0x0030, regstack[7]); + + b43legacy_radio_write16(dev, 0x0043, regstack[8]); + b43legacy_radio_write16(dev, 0x007A, regstack[9]); + + b43legacy_radio_write16(dev, 0x0052, + (b43legacy_radio_read16(dev, 0x0052) + & 0x000F) | regstack[11]); + + b43legacy_write16(dev, 0x03EC, regstack[10]); + } + b43legacy_phy_write(dev, 0x0015, regstack[0]); +} + +static inline +u16 b43legacy_phy_lo_g_deviation_subval(struct b43legacy_wldev *dev, + u16 control) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 ret; + unsigned long flags; + + local_irq_save(flags); + if (phy->gmode) { + b43legacy_phy_write(dev, 0x15, 0xE300); + control <<= 8; + b43legacy_phy_write(dev, 0x0812, control | 0x00B0); + udelay(5); + b43legacy_phy_write(dev, 0x0812, control | 0x00B2); + udelay(2); + b43legacy_phy_write(dev, 0x0812, control | 0x00B3); + udelay(4); + b43legacy_phy_write(dev, 0x0015, 0xF300); + udelay(8); + } else { + b43legacy_phy_write(dev, 0x0015, control | 0xEFA0); + udelay(2); + b43legacy_phy_write(dev, 0x0015, control | 0xEFE0); + udelay(4); + b43legacy_phy_write(dev, 0x0015, control | 0xFFE0); + udelay(8); + } + ret = b43legacy_phy_read(dev, 0x002D); + local_irq_restore(flags); + b43legacy_voluntary_preempt(); + + return ret; +} + +static u32 b43legacy_phy_lo_g_singledeviation(struct b43legacy_wldev *dev, + u16 control) +{ + int i; + u32 ret = 0; + + for (i = 0; i < 8; i++) + ret += b43legacy_phy_lo_g_deviation_subval(dev, control); + + return ret; +} + +/* Write the LocalOscillator CONTROL */ +static inline +void b43legacy_lo_write(struct b43legacy_wldev *dev, + struct b43legacy_lopair *pair) +{ + u16 value; + + value = (u8)(pair->low); + value |= ((u8)(pair->high)) << 8; + +#ifdef CONFIG_B43LEGACY_DEBUG + /* Sanity check. */ + if (pair->low < -8 || pair->low > 8 || + pair->high < -8 || pair->high > 8) { + b43legacydbg(dev->wl, + "WARNING: Writing invalid LOpair " + "(low: %d, high: %d)\n", + pair->low, pair->high); + dump_stack(); + } +#endif + + b43legacy_phy_write(dev, B43legacy_PHY_G_LO_CONTROL, value); +} + +static inline +struct b43legacy_lopair *b43legacy_find_lopair(struct b43legacy_wldev *dev, + u16 bbatt, + u16 rfatt, + u16 tx) +{ + static const u8 dict[10] = { 11, 10, 11, 12, 13, 12, 13, 12, 13, 12 }; + struct b43legacy_phy *phy = &dev->phy; + + if (bbatt > 6) + bbatt = 6; + B43legacy_WARN_ON(rfatt >= 10); + + if (tx == 3) + return b43legacy_get_lopair(phy, rfatt, bbatt); + return b43legacy_get_lopair(phy, dict[rfatt], bbatt); +} + +static inline +struct b43legacy_lopair *b43legacy_current_lopair(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + + return b43legacy_find_lopair(dev, phy->bbatt, + phy->rfatt, phy->txctl1); +} + +/* Adjust B/G LO */ +void b43legacy_phy_lo_adjust(struct b43legacy_wldev *dev, int fixed) +{ + struct b43legacy_lopair *pair; + + if (fixed) { + /* Use fixed values. Only for initialization. */ + pair = b43legacy_find_lopair(dev, 2, 3, 0); + } else + pair = b43legacy_current_lopair(dev); + b43legacy_lo_write(dev, pair); +} + +static void b43legacy_phy_lo_g_measure_txctl2(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 txctl2 = 0; + u16 i; + u32 smallest; + u32 tmp; + + b43legacy_radio_write16(dev, 0x0052, 0x0000); + udelay(10); + smallest = b43legacy_phy_lo_g_singledeviation(dev, 0); + for (i = 0; i < 16; i++) { + b43legacy_radio_write16(dev, 0x0052, i); + udelay(10); + tmp = b43legacy_phy_lo_g_singledeviation(dev, 0); + if (tmp < smallest) { + smallest = tmp; + txctl2 = i; + } + } + phy->txctl2 = txctl2; +} + +static +void b43legacy_phy_lo_g_state(struct b43legacy_wldev *dev, + const struct b43legacy_lopair *in_pair, + struct b43legacy_lopair *out_pair, + u16 r27) +{ + static const struct b43legacy_lopair transitions[8] = { + { .high = 1, .low = 1, }, + { .high = 1, .low = 0, }, + { .high = 1, .low = -1, }, + { .high = 0, .low = -1, }, + { .high = -1, .low = -1, }, + { .high = -1, .low = 0, }, + { .high = -1, .low = 1, }, + { .high = 0, .low = 1, }, + }; + struct b43legacy_lopair lowest_transition = { + .high = in_pair->high, + .low = in_pair->low, + }; + struct b43legacy_lopair tmp_pair; + struct b43legacy_lopair transition; + int i = 12; + int state = 0; + int found_lower; + int j; + int begin; + int end; + u32 lowest_deviation; + u32 tmp; + + /* Note that in_pair and out_pair can point to the same pair. + * Be careful. */ + + b43legacy_lo_write(dev, &lowest_transition); + lowest_deviation = b43legacy_phy_lo_g_singledeviation(dev, r27); + do { + found_lower = 0; + B43legacy_WARN_ON(!(state >= 0 && state <= 8)); + if (state == 0) { + begin = 1; + end = 8; + } else if (state % 2 == 0) { + begin = state - 1; + end = state + 1; + } else { + begin = state - 2; + end = state + 2; + } + if (begin < 1) + begin += 8; + if (end > 8) + end -= 8; + + j = begin; + tmp_pair.high = lowest_transition.high; + tmp_pair.low = lowest_transition.low; + while (1) { + B43legacy_WARN_ON(!(j >= 1 && j <= 8)); + transition.high = tmp_pair.high + + transitions[j - 1].high; + transition.low = tmp_pair.low + transitions[j - 1].low; + if ((abs(transition.low) < 9) + && (abs(transition.high) < 9)) { + b43legacy_lo_write(dev, &transition); + tmp = b43legacy_phy_lo_g_singledeviation(dev, + r27); + if (tmp < lowest_deviation) { + lowest_deviation = tmp; + state = j; + found_lower = 1; + + lowest_transition.high = + transition.high; + lowest_transition.low = transition.low; + } + } + if (j == end) + break; + if (j == 8) + j = 1; + else + j++; + } + } while (i-- && found_lower); + + out_pair->high = lowest_transition.high; + out_pair->low = lowest_transition.low; +} + +/* Set the baseband attenuation value on chip. */ +void b43legacy_phy_set_baseband_attenuation(struct b43legacy_wldev *dev, + u16 bbatt) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 value; + + if (phy->analog == 0) { + value = (b43legacy_read16(dev, 0x03E6) & 0xFFF0); + value |= (bbatt & 0x000F); + b43legacy_write16(dev, 0x03E6, value); + return; + } + + if (phy->analog > 1) { + value = b43legacy_phy_read(dev, 0x0060) & 0xFFC3; + value |= (bbatt << 2) & 0x003C; + } else { + value = b43legacy_phy_read(dev, 0x0060) & 0xFF87; + value |= (bbatt << 3) & 0x0078; + } + b43legacy_phy_write(dev, 0x0060, value); +} + +/* http://bcm-specs.sipsolutions.net/LocalOscillator/Measure */ +void b43legacy_phy_lo_g_measure(struct b43legacy_wldev *dev) +{ + static const u8 pairorder[10] = { 3, 1, 5, 7, 9, 2, 0, 4, 6, 8 }; + const int is_initializing = (b43legacy_status(dev) + < B43legacy_STAT_STARTED); + struct b43legacy_phy *phy = &dev->phy; + u16 h; + u16 i; + u16 oldi = 0; + u16 j; + struct b43legacy_lopair control; + struct b43legacy_lopair *tmp_control; + u16 tmp; + u16 regstack[16] = { 0 }; + u8 oldchannel; + + /* XXX: What are these? */ + u8 r27 = 0; + u16 r31; + + oldchannel = phy->channel; + /* Setup */ + if (phy->gmode) { + regstack[0] = b43legacy_phy_read(dev, B43legacy_PHY_G_CRS); + regstack[1] = b43legacy_phy_read(dev, 0x0802); + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, regstack[0] + & 0x7FFF); + b43legacy_phy_write(dev, 0x0802, regstack[1] & 0xFFFC); + } + regstack[3] = b43legacy_read16(dev, 0x03E2); + b43legacy_write16(dev, 0x03E2, regstack[3] | 0x8000); + regstack[4] = b43legacy_read16(dev, B43legacy_MMIO_CHANNEL_EXT); + regstack[5] = b43legacy_phy_read(dev, 0x15); + regstack[6] = b43legacy_phy_read(dev, 0x2A); + regstack[7] = b43legacy_phy_read(dev, 0x35); + regstack[8] = b43legacy_phy_read(dev, 0x60); + regstack[9] = b43legacy_radio_read16(dev, 0x43); + regstack[10] = b43legacy_radio_read16(dev, 0x7A); + regstack[11] = b43legacy_radio_read16(dev, 0x52); + if (phy->gmode) { + regstack[12] = b43legacy_phy_read(dev, 0x0811); + regstack[13] = b43legacy_phy_read(dev, 0x0812); + regstack[14] = b43legacy_phy_read(dev, 0x0814); + regstack[15] = b43legacy_phy_read(dev, 0x0815); + } + b43legacy_radio_selectchannel(dev, 6, 0); + if (phy->gmode) { + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, regstack[0] + & 0x7FFF); + b43legacy_phy_write(dev, 0x0802, regstack[1] & 0xFFFC); + b43legacy_dummy_transmission(dev); + } + b43legacy_radio_write16(dev, 0x0043, 0x0006); + + b43legacy_phy_set_baseband_attenuation(dev, 2); + + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, 0x0000); + b43legacy_phy_write(dev, 0x002E, 0x007F); + b43legacy_phy_write(dev, 0x080F, 0x0078); + b43legacy_phy_write(dev, 0x0035, regstack[7] & ~(1 << 7)); + b43legacy_radio_write16(dev, 0x007A, regstack[10] & 0xFFF0); + b43legacy_phy_write(dev, 0x002B, 0x0203); + b43legacy_phy_write(dev, 0x002A, 0x08A3); + if (phy->gmode) { + b43legacy_phy_write(dev, 0x0814, regstack[14] | 0x0003); + b43legacy_phy_write(dev, 0x0815, regstack[15] & 0xFFFC); + b43legacy_phy_write(dev, 0x0811, 0x01B3); + b43legacy_phy_write(dev, 0x0812, 0x00B2); + } + if (is_initializing) + b43legacy_phy_lo_g_measure_txctl2(dev); + b43legacy_phy_write(dev, 0x080F, 0x8078); + + /* Measure */ + control.low = 0; + control.high = 0; + for (h = 0; h < 10; h++) { + /* Loop over each possible RadioAttenuation (0-9) */ + i = pairorder[h]; + if (is_initializing) { + if (i == 3) { + control.low = 0; + control.high = 0; + } else if (((i % 2 == 1) && (oldi % 2 == 1)) || + ((i % 2 == 0) && (oldi % 2 == 0))) { + tmp_control = b43legacy_get_lopair(phy, oldi, + 0); + memcpy(&control, tmp_control, sizeof(control)); + } else { + tmp_control = b43legacy_get_lopair(phy, 3, 0); + memcpy(&control, tmp_control, sizeof(control)); + } + } + /* Loop over each possible BasebandAttenuation/2 */ + for (j = 0; j < 4; j++) { + if (is_initializing) { + tmp = i * 2 + j; + r27 = 0; + r31 = 0; + if (tmp > 14) { + r31 = 1; + if (tmp > 17) + r27 = 1; + if (tmp > 19) + r27 = 2; + } + } else { + tmp_control = b43legacy_get_lopair(phy, i, + j * 2); + if (!tmp_control->used) + continue; + memcpy(&control, tmp_control, sizeof(control)); + r27 = 3; + r31 = 0; + } + b43legacy_radio_write16(dev, 0x43, i); + b43legacy_radio_write16(dev, 0x52, phy->txctl2); + udelay(10); + b43legacy_voluntary_preempt(); + + b43legacy_phy_set_baseband_attenuation(dev, j * 2); + + tmp = (regstack[10] & 0xFFF0); + if (r31) + tmp |= 0x0008; + b43legacy_radio_write16(dev, 0x007A, tmp); + + tmp_control = b43legacy_get_lopair(phy, i, j * 2); + b43legacy_phy_lo_g_state(dev, &control, tmp_control, + r27); + } + oldi = i; + } + /* Loop over each possible RadioAttenuation (10-13) */ + for (i = 10; i < 14; i++) { + /* Loop over each possible BasebandAttenuation/2 */ + for (j = 0; j < 4; j++) { + if (is_initializing) { + tmp_control = b43legacy_get_lopair(phy, i - 9, + j * 2); + memcpy(&control, tmp_control, sizeof(control)); + /* FIXME: The next line is wrong, as the + * following if statement can never trigger. */ + tmp = (i - 9) * 2 + j - 5; + r27 = 0; + r31 = 0; + if (tmp > 14) { + r31 = 1; + if (tmp > 17) + r27 = 1; + if (tmp > 19) + r27 = 2; + } + } else { + tmp_control = b43legacy_get_lopair(phy, i - 9, + j * 2); + if (!tmp_control->used) + continue; + memcpy(&control, tmp_control, sizeof(control)); + r27 = 3; + r31 = 0; + } + b43legacy_radio_write16(dev, 0x43, i - 9); + /* FIXME: shouldn't txctl1 be zero in the next line + * and 3 in the loop above? */ + b43legacy_radio_write16(dev, 0x52, + phy->txctl2 + | (3/*txctl1*/ << 4)); + udelay(10); + b43legacy_voluntary_preempt(); + + b43legacy_phy_set_baseband_attenuation(dev, j * 2); + + tmp = (regstack[10] & 0xFFF0); + if (r31) + tmp |= 0x0008; + b43legacy_radio_write16(dev, 0x7A, tmp); + + tmp_control = b43legacy_get_lopair(phy, i, j * 2); + b43legacy_phy_lo_g_state(dev, &control, tmp_control, + r27); + } + } + + /* Restoration */ + if (phy->gmode) { + b43legacy_phy_write(dev, 0x0015, 0xE300); + b43legacy_phy_write(dev, 0x0812, (r27 << 8) | 0xA0); + udelay(5); + b43legacy_phy_write(dev, 0x0812, (r27 << 8) | 0xA2); + udelay(2); + b43legacy_phy_write(dev, 0x0812, (r27 << 8) | 0xA3); + b43legacy_voluntary_preempt(); + } else + b43legacy_phy_write(dev, 0x0015, r27 | 0xEFA0); + b43legacy_phy_lo_adjust(dev, is_initializing); + b43legacy_phy_write(dev, 0x002E, 0x807F); + if (phy->gmode) + b43legacy_phy_write(dev, 0x002F, 0x0202); + else + b43legacy_phy_write(dev, 0x002F, 0x0101); + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, regstack[4]); + b43legacy_phy_write(dev, 0x0015, regstack[5]); + b43legacy_phy_write(dev, 0x002A, regstack[6]); + b43legacy_phy_write(dev, 0x0035, regstack[7]); + b43legacy_phy_write(dev, 0x0060, regstack[8]); + b43legacy_radio_write16(dev, 0x0043, regstack[9]); + b43legacy_radio_write16(dev, 0x007A, regstack[10]); + regstack[11] &= 0x00F0; + regstack[11] |= (b43legacy_radio_read16(dev, 0x52) & 0x000F); + b43legacy_radio_write16(dev, 0x52, regstack[11]); + b43legacy_write16(dev, 0x03E2, regstack[3]); + if (phy->gmode) { + b43legacy_phy_write(dev, 0x0811, regstack[12]); + b43legacy_phy_write(dev, 0x0812, regstack[13]); + b43legacy_phy_write(dev, 0x0814, regstack[14]); + b43legacy_phy_write(dev, 0x0815, regstack[15]); + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, regstack[0]); + b43legacy_phy_write(dev, 0x0802, regstack[1]); + } + b43legacy_radio_selectchannel(dev, oldchannel, 1); + +#ifdef CONFIG_B43LEGACY_DEBUG + { + /* Sanity check for all lopairs. */ + for (i = 0; i < B43legacy_LO_COUNT; i++) { + tmp_control = phy->_lo_pairs + i; + if (tmp_control->low < -8 || tmp_control->low > 8 || + tmp_control->high < -8 || tmp_control->high > 8) + b43legacywarn(dev->wl, + "WARNING: Invalid LOpair (low: %d, high:" + " %d, index: %d)\n", + tmp_control->low, tmp_control->high, i); + } + } +#endif /* CONFIG_B43LEGACY_DEBUG */ +} + +static +void b43legacy_phy_lo_mark_current_used(struct b43legacy_wldev *dev) +{ + struct b43legacy_lopair *pair; + + pair = b43legacy_current_lopair(dev); + pair->used = 1; +} + +void b43legacy_phy_lo_mark_all_unused(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + struct b43legacy_lopair *pair; + int i; + + for (i = 0; i < B43legacy_LO_COUNT; i++) { + pair = phy->_lo_pairs + i; + pair->used = 0; + } +} + +/* http://bcm-specs.sipsolutions.net/EstimatePowerOut + * This function converts a TSSI value to dBm in Q5.2 + */ +static s8 b43legacy_phy_estimate_power_out(struct b43legacy_wldev *dev, s8 tssi) +{ + struct b43legacy_phy *phy = &dev->phy; + s8 dbm = 0; + s32 tmp; + + tmp = phy->idle_tssi; + tmp += tssi; + tmp -= phy->savedpctlreg; + + switch (phy->type) { + case B43legacy_PHYTYPE_B: + case B43legacy_PHYTYPE_G: + tmp = clamp_val(tmp, 0x00, 0x3F); + dbm = phy->tssi2dbm[tmp]; + break; + default: + B43legacy_BUG_ON(1); + } + + return dbm; +} + +/* http://bcm-specs.sipsolutions.net/RecalculateTransmissionPower */ +void b43legacy_phy_xmitpower(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 tmp; + u16 txpower; + s8 v0; + s8 v1; + s8 v2; + s8 v3; + s8 average; + int max_pwr; + s16 desired_pwr; + s16 estimated_pwr; + s16 pwr_adjust; + s16 radio_att_delta; + s16 baseband_att_delta; + s16 radio_attenuation; + s16 baseband_attenuation; + + if (phy->savedpctlreg == 0xFFFF) + return; + if ((dev->dev->bus->boardinfo.type == 0x0416) && + is_bcm_board_vendor(dev)) + return; +#ifdef CONFIG_B43LEGACY_DEBUG + if (phy->manual_txpower_control) + return; +#endif + + B43legacy_BUG_ON(!(phy->type == B43legacy_PHYTYPE_B || + phy->type == B43legacy_PHYTYPE_G)); + tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, 0x0058); + v0 = (s8)(tmp & 0x00FF); + v1 = (s8)((tmp & 0xFF00) >> 8); + tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, 0x005A); + v2 = (s8)(tmp & 0x00FF); + v3 = (s8)((tmp & 0xFF00) >> 8); + tmp = 0; + + if (v0 == 0x7F || v1 == 0x7F || v2 == 0x7F || v3 == 0x7F) { + tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + 0x0070); + v0 = (s8)(tmp & 0x00FF); + v1 = (s8)((tmp & 0xFF00) >> 8); + tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + 0x0072); + v2 = (s8)(tmp & 0x00FF); + v3 = (s8)((tmp & 0xFF00) >> 8); + if (v0 == 0x7F || v1 == 0x7F || v2 == 0x7F || v3 == 0x7F) + return; + v0 = (v0 + 0x20) & 0x3F; + v1 = (v1 + 0x20) & 0x3F; + v2 = (v2 + 0x20) & 0x3F; + v3 = (v3 + 0x20) & 0x3F; + tmp = 1; + } + b43legacy_radio_clear_tssi(dev); + + average = (v0 + v1 + v2 + v3 + 2) / 4; + + if (tmp && (b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, 0x005E) + & 0x8)) + average -= 13; + + estimated_pwr = b43legacy_phy_estimate_power_out(dev, average); + + max_pwr = dev->dev->bus->sprom.maxpwr_bg; + + if ((dev->dev->bus->sprom.boardflags_lo + & B43legacy_BFL_PACTRL) && + (phy->type == B43legacy_PHYTYPE_G)) + max_pwr -= 0x3; + if (unlikely(max_pwr <= 0)) { + b43legacywarn(dev->wl, "Invalid max-TX-power value in SPROM." + "\n"); + max_pwr = 74; /* fake it */ + dev->dev->bus->sprom.maxpwr_bg = max_pwr; + } + + /* Use regulatory information to get the maximum power. + * In the absence of such data from mac80211, we will use 20 dBm, which + * is the value for the EU, US, Canada, and most of the world. + * The regulatory maximum is reduced by the antenna gain (from sprom) + * and 1.5 dBm (a safety factor??). The result is in Q5.2 format + * which accounts for the factor of 4 */ +#define REG_MAX_PWR 20 + max_pwr = min(REG_MAX_PWR * 4 + - dev->dev->bus->sprom.antenna_gain.ghz24.a0 + - 0x6, max_pwr); + + /* find the desired power in Q5.2 - power_level is in dBm + * and limit it - max_pwr is already in Q5.2 */ + desired_pwr = clamp_val(phy->power_level << 2, 0, max_pwr); + if (b43legacy_debug(dev, B43legacy_DBG_XMITPOWER)) + b43legacydbg(dev->wl, "Current TX power output: " Q52_FMT + " dBm, Desired TX power output: " Q52_FMT + " dBm\n", Q52_ARG(estimated_pwr), + Q52_ARG(desired_pwr)); + /* Check if we need to adjust the current power. The factor of 2 is + * for damping */ + pwr_adjust = (desired_pwr - estimated_pwr) / 2; + /* RF attenuation delta + * The minus sign is because lower attenuation => more power */ + radio_att_delta = -(pwr_adjust + 7) >> 3; + /* Baseband attenuation delta */ + baseband_att_delta = -(pwr_adjust >> 1) - (4 * radio_att_delta); + /* Do we need to adjust anything? */ + if ((radio_att_delta == 0) && (baseband_att_delta == 0)) { + b43legacy_phy_lo_mark_current_used(dev); + return; + } + + /* Calculate the new attenuation values. */ + baseband_attenuation = phy->bbatt; + baseband_attenuation += baseband_att_delta; + radio_attenuation = phy->rfatt; + radio_attenuation += radio_att_delta; + + /* Get baseband and radio attenuation values into permitted ranges. + * baseband 0-11, radio 0-9. + * Radio attenuation affects power level 4 times as much as baseband. + */ + if (radio_attenuation < 0) { + baseband_attenuation -= (4 * -radio_attenuation); + radio_attenuation = 0; + } else if (radio_attenuation > 9) { + baseband_attenuation += (4 * (radio_attenuation - 9)); + radio_attenuation = 9; + } else { + while (baseband_attenuation < 0 && radio_attenuation > 0) { + baseband_attenuation += 4; + radio_attenuation--; + } + while (baseband_attenuation > 11 && radio_attenuation < 9) { + baseband_attenuation -= 4; + radio_attenuation++; + } + } + baseband_attenuation = clamp_val(baseband_attenuation, 0, 11); + + txpower = phy->txctl1; + if ((phy->radio_ver == 0x2050) && (phy->radio_rev == 2)) { + if (radio_attenuation <= 1) { + if (txpower == 0) { + txpower = 3; + radio_attenuation += 2; + baseband_attenuation += 2; + } else if (dev->dev->bus->sprom.boardflags_lo + & B43legacy_BFL_PACTRL) { + baseband_attenuation += 4 * + (radio_attenuation - 2); + radio_attenuation = 2; + } + } else if (radio_attenuation > 4 && txpower != 0) { + txpower = 0; + if (baseband_attenuation < 3) { + radio_attenuation -= 3; + baseband_attenuation += 2; + } else { + radio_attenuation -= 2; + baseband_attenuation -= 2; + } + } + } + /* Save the control values */ + phy->txctl1 = txpower; + baseband_attenuation = clamp_val(baseband_attenuation, 0, 11); + radio_attenuation = clamp_val(radio_attenuation, 0, 9); + phy->rfatt = radio_attenuation; + phy->bbatt = baseband_attenuation; + + /* Adjust the hardware */ + b43legacy_phy_lock(dev); + b43legacy_radio_lock(dev); + b43legacy_radio_set_txpower_bg(dev, baseband_attenuation, + radio_attenuation, txpower); + b43legacy_phy_lo_mark_current_used(dev); + b43legacy_radio_unlock(dev); + b43legacy_phy_unlock(dev); +} + +static inline +s32 b43legacy_tssi2dbm_ad(s32 num, s32 den) +{ + if (num < 0) + return num/den; + else + return (num+den/2)/den; +} + +static inline +s8 b43legacy_tssi2dbm_entry(s8 entry [], u8 index, s16 pab0, s16 pab1, s16 pab2) +{ + s32 m1; + s32 m2; + s32 f = 256; + s32 q; + s32 delta; + s8 i = 0; + + m1 = b43legacy_tssi2dbm_ad(16 * pab0 + index * pab1, 32); + m2 = max(b43legacy_tssi2dbm_ad(32768 + index * pab2, 256), 1); + do { + if (i > 15) + return -EINVAL; + q = b43legacy_tssi2dbm_ad(f * 4096 - + b43legacy_tssi2dbm_ad(m2 * f, 16) * + f, 2048); + delta = abs(q - f); + f = q; + i++; + } while (delta >= 2); + entry[index] = clamp_val(b43legacy_tssi2dbm_ad(m1 * f, 8192), + -127, 128); + return 0; +} + +/* http://bcm-specs.sipsolutions.net/TSSI_to_DBM_Table */ +int b43legacy_phy_init_tssi2dbm_table(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + s16 pab0; + s16 pab1; + s16 pab2; + u8 idx; + s8 *dyn_tssi2dbm; + + B43legacy_WARN_ON(!(phy->type == B43legacy_PHYTYPE_B || + phy->type == B43legacy_PHYTYPE_G)); + pab0 = (s16)(dev->dev->bus->sprom.pa0b0); + pab1 = (s16)(dev->dev->bus->sprom.pa0b1); + pab2 = (s16)(dev->dev->bus->sprom.pa0b2); + + if ((dev->dev->bus->chip_id == 0x4301) && (phy->radio_ver != 0x2050)) { + phy->idle_tssi = 0x34; + phy->tssi2dbm = b43legacy_tssi2dbm_b_table; + return 0; + } + + if (pab0 != 0 && pab1 != 0 && pab2 != 0 && + pab0 != -1 && pab1 != -1 && pab2 != -1) { + /* The pabX values are set in SPROM. Use them. */ + if ((s8)dev->dev->bus->sprom.itssi_bg != 0 && + (s8)dev->dev->bus->sprom.itssi_bg != -1) + phy->idle_tssi = (s8)(dev->dev->bus->sprom. + itssi_bg); + else + phy->idle_tssi = 62; + dyn_tssi2dbm = kmalloc(64, GFP_KERNEL); + if (dyn_tssi2dbm == NULL) { + b43legacyerr(dev->wl, "Could not allocate memory " + "for tssi2dbm table\n"); + return -ENOMEM; + } + for (idx = 0; idx < 64; idx++) + if (b43legacy_tssi2dbm_entry(dyn_tssi2dbm, idx, pab0, + pab1, pab2)) { + phy->tssi2dbm = NULL; + b43legacyerr(dev->wl, "Could not generate " + "tssi2dBm table\n"); + kfree(dyn_tssi2dbm); + return -ENODEV; + } + phy->tssi2dbm = dyn_tssi2dbm; + phy->dyn_tssi_tbl = 1; + } else { + /* pabX values not set in SPROM. */ + switch (phy->type) { + case B43legacy_PHYTYPE_B: + phy->idle_tssi = 0x34; + phy->tssi2dbm = b43legacy_tssi2dbm_b_table; + break; + case B43legacy_PHYTYPE_G: + phy->idle_tssi = 0x34; + phy->tssi2dbm = b43legacy_tssi2dbm_g_table; + break; + } + } + + return 0; +} + +int b43legacy_phy_init(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + int err = -ENODEV; + + switch (phy->type) { + case B43legacy_PHYTYPE_B: + switch (phy->rev) { + case 2: + b43legacy_phy_initb2(dev); + err = 0; + break; + case 4: + b43legacy_phy_initb4(dev); + err = 0; + break; + case 5: + b43legacy_phy_initb5(dev); + err = 0; + break; + case 6: + b43legacy_phy_initb6(dev); + err = 0; + break; + } + break; + case B43legacy_PHYTYPE_G: + b43legacy_phy_initg(dev); + err = 0; + break; + } + if (err) + b43legacyerr(dev->wl, "Unknown PHYTYPE found\n"); + + return err; +} + +void b43legacy_phy_set_antenna_diversity(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 antennadiv; + u16 offset; + u16 value; + u32 ucodeflags; + + antennadiv = phy->antenna_diversity; + + if (antennadiv == 0xFFFF) + antennadiv = 3; + B43legacy_WARN_ON(antennadiv > 3); + + ucodeflags = b43legacy_shm_read32(dev, B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET); + b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET, + ucodeflags & ~B43legacy_UCODEFLAG_AUTODIV); + + switch (phy->type) { + case B43legacy_PHYTYPE_G: + offset = 0x0400; + + if (antennadiv == 2) + value = (3/*automatic*/ << 7); + else + value = (antennadiv << 7); + b43legacy_phy_write(dev, offset + 1, + (b43legacy_phy_read(dev, offset + 1) + & 0x7E7F) | value); + + if (antennadiv >= 2) { + if (antennadiv == 2) + value = (antennadiv << 7); + else + value = (0/*force0*/ << 7); + b43legacy_phy_write(dev, offset + 0x2B, + (b43legacy_phy_read(dev, + offset + 0x2B) + & 0xFEFF) | value); + } + + if (phy->type == B43legacy_PHYTYPE_G) { + if (antennadiv >= 2) + b43legacy_phy_write(dev, 0x048C, + b43legacy_phy_read(dev, + 0x048C) | 0x2000); + else + b43legacy_phy_write(dev, 0x048C, + b43legacy_phy_read(dev, + 0x048C) & ~0x2000); + if (phy->rev >= 2) { + b43legacy_phy_write(dev, 0x0461, + b43legacy_phy_read(dev, + 0x0461) | 0x0010); + b43legacy_phy_write(dev, 0x04AD, + (b43legacy_phy_read(dev, + 0x04AD) + & 0x00FF) | 0x0015); + if (phy->rev == 2) + b43legacy_phy_write(dev, 0x0427, + 0x0008); + else + b43legacy_phy_write(dev, 0x0427, + (b43legacy_phy_read(dev, 0x0427) + & 0x00FF) | 0x0008); + } else if (phy->rev >= 6) + b43legacy_phy_write(dev, 0x049B, 0x00DC); + } else { + if (phy->rev < 3) + b43legacy_phy_write(dev, 0x002B, + (b43legacy_phy_read(dev, + 0x002B) & 0x00FF) + | 0x0024); + else { + b43legacy_phy_write(dev, 0x0061, + b43legacy_phy_read(dev, + 0x0061) | 0x0010); + if (phy->rev == 3) { + b43legacy_phy_write(dev, 0x0093, + 0x001D); + b43legacy_phy_write(dev, 0x0027, + 0x0008); + } else { + b43legacy_phy_write(dev, 0x0093, + 0x003A); + b43legacy_phy_write(dev, 0x0027, + (b43legacy_phy_read(dev, 0x0027) + & 0x00FF) | 0x0008); + } + } + } + break; + case B43legacy_PHYTYPE_B: + if (dev->dev->id.revision == 2) + value = (3/*automatic*/ << 7); + else + value = (antennadiv << 7); + b43legacy_phy_write(dev, 0x03E2, + (b43legacy_phy_read(dev, 0x03E2) + & 0xFE7F) | value); + break; + default: + B43legacy_WARN_ON(1); + } + + if (antennadiv >= 2) { + ucodeflags = b43legacy_shm_read32(dev, B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET); + b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET, + ucodeflags | B43legacy_UCODEFLAG_AUTODIV); + } + + phy->antenna_diversity = antennadiv; +} + +/* Set the PowerSavingControlBits. + * Bitvalues: + * 0 => unset the bit + * 1 => set the bit + * -1 => calculate the bit + */ +void b43legacy_power_saving_ctl_bits(struct b43legacy_wldev *dev, + int bit25, int bit26) +{ + int i; + u32 status; + +/* FIXME: Force 25 to off and 26 to on for now: */ +bit25 = 0; +bit26 = 1; + + if (bit25 == -1) { + /* TODO: If powersave is not off and FIXME is not set and we + * are not in adhoc and thus is not an AP and we arei + * associated, set bit 25 */ + } + if (bit26 == -1) { + /* TODO: If the device is awake or this is an AP, or we are + * scanning, or FIXME, or we are associated, or FIXME, + * or the latest PS-Poll packet sent was successful, + * set bit26 */ + } + status = b43legacy_read32(dev, B43legacy_MMIO_MACCTL); + if (bit25) + status |= B43legacy_MACCTL_HWPS; + else + status &= ~B43legacy_MACCTL_HWPS; + if (bit26) + status |= B43legacy_MACCTL_AWAKE; + else + status &= ~B43legacy_MACCTL_AWAKE; + b43legacy_write32(dev, B43legacy_MMIO_MACCTL, status); + if (bit26 && dev->dev->id.revision >= 5) { + for (i = 0; i < 100; i++) { + if (b43legacy_shm_read32(dev, B43legacy_SHM_SHARED, + 0x0040) != 4) + break; + udelay(10); + } + } +} diff --git a/drivers/net/wireless/b43legacy/phy.h b/drivers/net/wireless/b43legacy/phy.h new file mode 100644 index 0000000..ecbe409 --- /dev/null +++ b/drivers/net/wireless/b43legacy/phy.h @@ -0,0 +1,209 @@ +/* + + Broadcom B43legacy wireless driver + + Copyright (c) 2005 Martin Langer , + Stefano Brivio + Michael Buesch + Danny van Dyk + Andreas Jaggi + Copyright (c) 2007 Larry Finger + + Some parts of the code in this file are derived from the ipw2200 + driver Copyright(c) 2003 - 2004 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#ifndef B43legacy_PHY_H_ +#define B43legacy_PHY_H_ + +#include + +enum { + B43legacy_ANTENNA0, /* Antenna 0 */ + B43legacy_ANTENNA1, /* Antenna 0 */ + B43legacy_ANTENNA_AUTO1, /* Automatic, starting with antenna 1 */ + B43legacy_ANTENNA_AUTO0, /* Automatic, starting with antenna 0 */ + + B43legacy_ANTENNA_AUTO = B43legacy_ANTENNA_AUTO0, + B43legacy_ANTENNA_DEFAULT = B43legacy_ANTENNA_AUTO, +}; + +enum { + B43legacy_INTERFMODE_NONE, + B43legacy_INTERFMODE_NONWLAN, + B43legacy_INTERFMODE_MANUALWLAN, + B43legacy_INTERFMODE_AUTOWLAN, +}; + +/*** PHY Registers ***/ + +/* Routing */ +#define B43legacy_PHYROUTE_OFDM_GPHY 0x400 +#define B43legacy_PHYROUTE_EXT_GPHY 0x800 + +/* Base registers. */ +#define B43legacy_PHY_BASE(reg) (reg) +/* OFDM (A) registers of a G-PHY */ +#define B43legacy_PHY_OFDM(reg) ((reg) | B43legacy_PHYROUTE_OFDM_GPHY) +/* Extended G-PHY registers */ +#define B43legacy_PHY_EXTG(reg) ((reg) | B43legacy_PHYROUTE_EXT_GPHY) + + +/* Extended G-PHY Registers */ +#define B43legacy_PHY_CLASSCTL B43legacy_PHY_EXTG(0x02) /* Classify control */ +#define B43legacy_PHY_GTABCTL B43legacy_PHY_EXTG(0x03) /* G-PHY table control (see below) */ +#define B43legacy_PHY_GTABOFF 0x03FF /* G-PHY table offset (see below) */ +#define B43legacy_PHY_GTABNR 0xFC00 /* G-PHY table number (see below) */ +#define B43legacy_PHY_GTABNR_SHIFT 10 +#define B43legacy_PHY_GTABDATA B43legacy_PHY_EXTG(0x04) /* G-PHY table data */ +#define B43legacy_PHY_LO_MASK B43legacy_PHY_EXTG(0x0F) /* Local Oscillator control mask */ +#define B43legacy_PHY_LO_CTL B43legacy_PHY_EXTG(0x10) /* Local Oscillator control */ +#define B43legacy_PHY_RFOVER B43legacy_PHY_EXTG(0x11) /* RF override */ +#define B43legacy_PHY_RFOVERVAL B43legacy_PHY_EXTG(0x12) /* RF override value */ +/*** OFDM table numbers ***/ +#define B43legacy_OFDMTAB(number, offset) \ + (((number) << B43legacy_PHY_OTABLENR_SHIFT) \ + | (offset)) +#define B43legacy_OFDMTAB_AGC1 B43legacy_OFDMTAB(0x00, 0) +#define B43legacy_OFDMTAB_GAIN0 B43legacy_OFDMTAB(0x00, 0) +#define B43legacy_OFDMTAB_GAINX B43legacy_OFDMTAB(0x01, 0) +#define B43legacy_OFDMTAB_GAIN1 B43legacy_OFDMTAB(0x01, 4) +#define B43legacy_OFDMTAB_AGC3 B43legacy_OFDMTAB(0x02, 0) +#define B43legacy_OFDMTAB_GAIN2 B43legacy_OFDMTAB(0x02, 3) +#define B43legacy_OFDMTAB_LNAHPFGAIN1 B43legacy_OFDMTAB(0x03, 0) +#define B43legacy_OFDMTAB_WRSSI B43legacy_OFDMTAB(0x04, 0) +#define B43legacy_OFDMTAB_LNAHPFGAIN2 B43legacy_OFDMTAB(0x04, 0) +#define B43legacy_OFDMTAB_NOISESCALE B43legacy_OFDMTAB(0x05, 0) +#define B43legacy_OFDMTAB_AGC2 B43legacy_OFDMTAB(0x06, 0) +#define B43legacy_OFDMTAB_ROTOR B43legacy_OFDMTAB(0x08, 0) +#define B43legacy_OFDMTAB_ADVRETARD B43legacy_OFDMTAB(0x09, 0) +#define B43legacy_OFDMTAB_DAC B43legacy_OFDMTAB(0x0C, 0) +#define B43legacy_OFDMTAB_DC B43legacy_OFDMTAB(0x0E, 7) +#define B43legacy_OFDMTAB_PWRDYN2 B43legacy_OFDMTAB(0x0E, 12) +#define B43legacy_OFDMTAB_LNAGAIN B43legacy_OFDMTAB(0x0E, 13) + +#define B43legacy_OFDMTAB_LPFGAIN B43legacy_OFDMTAB(0x0F, 12) +#define B43legacy_OFDMTAB_RSSI B43legacy_OFDMTAB(0x10, 0) + +#define B43legacy_OFDMTAB_AGC1_R1 B43legacy_OFDMTAB(0x13, 0) +#define B43legacy_OFDMTAB_GAINX_R1 B43legacy_OFDMTAB(0x14, 0) +#define B43legacy_OFDMTAB_MINSIGSQ B43legacy_OFDMTAB(0x14, 1) +#define B43legacy_OFDMTAB_AGC3_R1 B43legacy_OFDMTAB(0x15, 0) +#define B43legacy_OFDMTAB_WRSSI_R1 B43legacy_OFDMTAB(0x15, 4) +#define B43legacy_OFDMTAB_TSSI B43legacy_OFDMTAB(0x15, 0) +#define B43legacy_OFDMTAB_DACRFPABB B43legacy_OFDMTAB(0x16, 0) +#define B43legacy_OFDMTAB_DACOFF B43legacy_OFDMTAB(0x17, 0) +#define B43legacy_OFDMTAB_DCBIAS B43legacy_OFDMTAB(0x18, 0) + +void b43legacy_put_attenuation_into_ranges(int *_bbatt, int *_rfatt); + +/* OFDM (A) PHY Registers */ +#define B43legacy_PHY_VERSION_OFDM B43legacy_PHY_OFDM(0x00) /* Versioning register for A-PHY */ +#define B43legacy_PHY_BBANDCFG B43legacy_PHY_OFDM(0x01) /* Baseband config */ +#define B43legacy_PHY_BBANDCFG_RXANT 0x180 /* RX Antenna selection */ +#define B43legacy_PHY_BBANDCFG_RXANT_SHIFT 7 +#define B43legacy_PHY_PWRDOWN B43legacy_PHY_OFDM(0x03) /* Powerdown */ +#define B43legacy_PHY_CRSTHRES1 B43legacy_PHY_OFDM(0x06) /* CRS Threshold 1 */ +#define B43legacy_PHY_LNAHPFCTL B43legacy_PHY_OFDM(0x1C) /* LNA/HPF control */ +#define B43legacy_PHY_ADIVRELATED B43legacy_PHY_OFDM(0x27) /* FIXME rename */ +#define B43legacy_PHY_CRS0 B43legacy_PHY_OFDM(0x29) +#define B43legacy_PHY_ANTDWELL B43legacy_PHY_OFDM(0x2B) /* Antenna dwell */ +#define B43legacy_PHY_ANTDWELL_AUTODIV1 0x0100 /* Automatic RX diversity start antenna */ +#define B43legacy_PHY_ENCORE B43legacy_PHY_OFDM(0x49) /* "Encore" (RangeMax / BroadRange) */ +#define B43legacy_PHY_ENCORE_EN 0x0200 /* Encore enable */ +#define B43legacy_PHY_LMS B43legacy_PHY_OFDM(0x55) +#define B43legacy_PHY_OFDM61 B43legacy_PHY_OFDM(0x61) /* FIXME rename */ +#define B43legacy_PHY_OFDM61_10 0x0010 /* FIXME rename */ +#define B43legacy_PHY_IQBAL B43legacy_PHY_OFDM(0x69) /* I/Q balance */ +#define B43legacy_PHY_OTABLECTL B43legacy_PHY_OFDM(0x72) /* OFDM table control (see below) */ +#define B43legacy_PHY_OTABLEOFF 0x03FF /* OFDM table offset (see below) */ +#define B43legacy_PHY_OTABLENR 0xFC00 /* OFDM table number (see below) */ +#define B43legacy_PHY_OTABLENR_SHIFT 10 +#define B43legacy_PHY_OTABLEI B43legacy_PHY_OFDM(0x73) /* OFDM table data I */ +#define B43legacy_PHY_OTABLEQ B43legacy_PHY_OFDM(0x74) /* OFDM table data Q */ +#define B43legacy_PHY_HPWR_TSSICTL B43legacy_PHY_OFDM(0x78) /* Hardware power TSSI control */ +#define B43legacy_PHY_NRSSITHRES B43legacy_PHY_OFDM(0x8A) /* NRSSI threshold */ +#define B43legacy_PHY_ANTWRSETT B43legacy_PHY_OFDM(0x8C) /* Antenna WR settle */ +#define B43legacy_PHY_ANTWRSETT_ARXDIV 0x2000 /* Automatic RX diversity enabled */ +#define B43legacy_PHY_CLIPPWRDOWNT B43legacy_PHY_OFDM(0x93) /* Clip powerdown threshold */ +#define B43legacy_PHY_OFDM9B B43legacy_PHY_OFDM(0x9B) /* FIXME rename */ +#define B43legacy_PHY_N1P1GAIN B43legacy_PHY_OFDM(0xA0) +#define B43legacy_PHY_P1P2GAIN B43legacy_PHY_OFDM(0xA1) +#define B43legacy_PHY_N1N2GAIN B43legacy_PHY_OFDM(0xA2) +#define B43legacy_PHY_CLIPTHRES B43legacy_PHY_OFDM(0xA3) +#define B43legacy_PHY_CLIPN1P2THRES B43legacy_PHY_OFDM(0xA4) +#define B43legacy_PHY_DIVSRCHIDX B43legacy_PHY_OFDM(0xA8) /* Divider search gain/index */ +#define B43legacy_PHY_CLIPP2THRES B43legacy_PHY_OFDM(0xA9) +#define B43legacy_PHY_CLIPP3THRES B43legacy_PHY_OFDM(0xAA) +#define B43legacy_PHY_DIVP1P2GAIN B43legacy_PHY_OFDM(0xAB) +#define B43legacy_PHY_DIVSRCHGAINBACK B43legacy_PHY_OFDM(0xAD) /* Divider search gain back */ +#define B43legacy_PHY_DIVSRCHGAINCHNG B43legacy_PHY_OFDM(0xAE) /* Divider search gain change */ +#define B43legacy_PHY_CRSTHRES1_R1 B43legacy_PHY_OFDM(0xC0) /* CRS Threshold 1 (rev 1 only) */ +#define B43legacy_PHY_CRSTHRES2_R1 B43legacy_PHY_OFDM(0xC1) /* CRS Threshold 2 (rev 1 only) */ +#define B43legacy_PHY_TSSIP_LTBASE B43legacy_PHY_OFDM(0x380) /* TSSI power lookup table base */ +#define B43legacy_PHY_DC_LTBASE B43legacy_PHY_OFDM(0x3A0) /* DC lookup table base */ +#define B43legacy_PHY_GAIN_LTBASE B43legacy_PHY_OFDM(0x3C0) /* Gain lookup table base */ + +void b43legacy_put_attenuation_into_ranges(int *_bbatt, int *_rfatt); + +/* Masks for the different PHY versioning registers. */ +#define B43legacy_PHYVER_ANALOG 0xF000 +#define B43legacy_PHYVER_ANALOG_SHIFT 12 +#define B43legacy_PHYVER_TYPE 0x0F00 +#define B43legacy_PHYVER_TYPE_SHIFT 8 +#define B43legacy_PHYVER_VERSION 0x00FF + +struct b43legacy_wldev; + +void b43legacy_phy_lock(struct b43legacy_wldev *dev); +void b43legacy_phy_unlock(struct b43legacy_wldev *dev); + +/* Card uses the loopback gain stuff */ +#define has_loopback_gain(phy) \ + (((phy)->rev > 1) || ((phy)->gmode)) + +u16 b43legacy_phy_read(struct b43legacy_wldev *dev, u16 offset); +void b43legacy_phy_write(struct b43legacy_wldev *dev, u16 offset, u16 val); + +int b43legacy_phy_init_tssi2dbm_table(struct b43legacy_wldev *dev); +int b43legacy_phy_init(struct b43legacy_wldev *dev); + +void b43legacy_set_rx_antenna(struct b43legacy_wldev *dev, int antenna); + +void b43legacy_phy_set_antenna_diversity(struct b43legacy_wldev *dev); +void b43legacy_phy_calibrate(struct b43legacy_wldev *dev); +int b43legacy_phy_connect(struct b43legacy_wldev *dev, int connect); + +void b43legacy_phy_lo_b_measure(struct b43legacy_wldev *dev); +void b43legacy_phy_lo_g_measure(struct b43legacy_wldev *dev); +void b43legacy_phy_xmitpower(struct b43legacy_wldev *dev); + +/* Adjust the LocalOscillator to the saved values. + * "fixed" is only set to 1 once in initialization. Set to 0 otherwise. + */ +void b43legacy_phy_lo_adjust(struct b43legacy_wldev *dev, int fixed); +void b43legacy_phy_lo_mark_all_unused(struct b43legacy_wldev *dev); + +void b43legacy_phy_set_baseband_attenuation(struct b43legacy_wldev *dev, + u16 baseband_attenuation); + +void b43legacy_power_saving_ctl_bits(struct b43legacy_wldev *dev, + int bit25, int bit26); + +#endif /* B43legacy_PHY_H_ */ diff --git a/drivers/net/wireless/b43legacy/pio.c b/drivers/net/wireless/b43legacy/pio.c new file mode 100644 index 0000000..017c0e9 --- /dev/null +++ b/drivers/net/wireless/b43legacy/pio.c @@ -0,0 +1,694 @@ +/* + + Broadcom B43legacy wireless driver + + PIO Transmission + + Copyright (c) 2005 Michael Buesch + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "b43legacy.h" +#include "pio.h" +#include "main.h" +#include "xmit.h" + +#include + + +static void tx_start(struct b43legacy_pioqueue *queue) +{ + b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, + B43legacy_PIO_TXCTL_INIT); +} + +static void tx_octet(struct b43legacy_pioqueue *queue, + u8 octet) +{ + if (queue->need_workarounds) { + b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); + b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, + B43legacy_PIO_TXCTL_WRITELO); + } else { + b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, + B43legacy_PIO_TXCTL_WRITELO); + b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); + } +} + +static u16 tx_get_next_word(const u8 *txhdr, + const u8 *packet, + size_t txhdr_size, + unsigned int *pos) +{ + const u8 *source; + unsigned int i = *pos; + u16 ret; + + if (i < txhdr_size) + source = txhdr; + else { + source = packet; + i -= txhdr_size; + } + ret = le16_to_cpu(*((__le16 *)(source + i))); + *pos += 2; + + return ret; +} + +static void tx_data(struct b43legacy_pioqueue *queue, + u8 *txhdr, + const u8 *packet, + unsigned int octets) +{ + u16 data; + unsigned int i = 0; + + if (queue->need_workarounds) { + data = tx_get_next_word(txhdr, packet, + sizeof(struct b43legacy_txhdr_fw3), &i); + b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, data); + } + b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, + B43legacy_PIO_TXCTL_WRITELO | + B43legacy_PIO_TXCTL_WRITEHI); + while (i < octets - 1) { + data = tx_get_next_word(txhdr, packet, + sizeof(struct b43legacy_txhdr_fw3), &i); + b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, data); + } + if (octets % 2) + tx_octet(queue, packet[octets - + sizeof(struct b43legacy_txhdr_fw3) - 1]); +} + +static void tx_complete(struct b43legacy_pioqueue *queue, + struct sk_buff *skb) +{ + if (queue->need_workarounds) { + b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, + skb->data[skb->len - 1]); + b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, + B43legacy_PIO_TXCTL_WRITELO | + B43legacy_PIO_TXCTL_COMPLETE); + } else + b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, + B43legacy_PIO_TXCTL_COMPLETE); +} + +static u16 generate_cookie(struct b43legacy_pioqueue *queue, + struct b43legacy_pio_txpacket *packet) +{ + u16 cookie = 0x0000; + int packetindex; + + /* We use the upper 4 bits for the PIO + * controller ID and the lower 12 bits + * for the packet index (in the cache). + */ + switch (queue->mmio_base) { + case B43legacy_MMIO_PIO1_BASE: + break; + case B43legacy_MMIO_PIO2_BASE: + cookie = 0x1000; + break; + case B43legacy_MMIO_PIO3_BASE: + cookie = 0x2000; + break; + case B43legacy_MMIO_PIO4_BASE: + cookie = 0x3000; + break; + default: + B43legacy_WARN_ON(1); + } + packetindex = pio_txpacket_getindex(packet); + B43legacy_WARN_ON(!(((u16)packetindex & 0xF000) == 0x0000)); + cookie |= (u16)packetindex; + + return cookie; +} + +static +struct b43legacy_pioqueue *parse_cookie(struct b43legacy_wldev *dev, + u16 cookie, + struct b43legacy_pio_txpacket **packet) +{ + struct b43legacy_pio *pio = &dev->pio; + struct b43legacy_pioqueue *queue = NULL; + int packetindex; + + switch (cookie & 0xF000) { + case 0x0000: + queue = pio->queue0; + break; + case 0x1000: + queue = pio->queue1; + break; + case 0x2000: + queue = pio->queue2; + break; + case 0x3000: + queue = pio->queue3; + break; + default: + B43legacy_WARN_ON(1); + } + packetindex = (cookie & 0x0FFF); + B43legacy_WARN_ON(!(packetindex >= 0 && packetindex + < B43legacy_PIO_MAXTXPACKETS)); + *packet = &(queue->tx_packets_cache[packetindex]); + + return queue; +} + +union txhdr_union { + struct b43legacy_txhdr_fw3 txhdr_fw3; +}; + +static int pio_tx_write_fragment(struct b43legacy_pioqueue *queue, + struct sk_buff *skb, + struct b43legacy_pio_txpacket *packet, + size_t txhdr_size) +{ + union txhdr_union txhdr_data; + u8 *txhdr = NULL; + unsigned int octets; + int err; + + txhdr = (u8 *)(&txhdr_data.txhdr_fw3); + + B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0); + err = b43legacy_generate_txhdr(queue->dev, + txhdr, skb->data, skb->len, + IEEE80211_SKB_CB(skb), + generate_cookie(queue, packet)); + if (err) + return err; + + tx_start(queue); + octets = skb->len + txhdr_size; + if (queue->need_workarounds) + octets--; + tx_data(queue, txhdr, (u8 *)skb->data, octets); + tx_complete(queue, skb); + + return 0; +} + +static void free_txpacket(struct b43legacy_pio_txpacket *packet, + int irq_context) +{ + struct b43legacy_pioqueue *queue = packet->queue; + + if (packet->skb) { + if (irq_context) + dev_kfree_skb_irq(packet->skb); + else + dev_kfree_skb(packet->skb); + } + list_move(&packet->list, &queue->txfree); + queue->nr_txfree++; +} + +static int pio_tx_packet(struct b43legacy_pio_txpacket *packet) +{ + struct b43legacy_pioqueue *queue = packet->queue; + struct sk_buff *skb = packet->skb; + u16 octets; + int err; + + octets = (u16)skb->len + sizeof(struct b43legacy_txhdr_fw3); + if (queue->tx_devq_size < octets) { + b43legacywarn(queue->dev->wl, "PIO queue too small. " + "Dropping packet.\n"); + /* Drop it silently (return success) */ + free_txpacket(packet, 1); + return 0; + } + B43legacy_WARN_ON(queue->tx_devq_packets > + B43legacy_PIO_MAXTXDEVQPACKETS); + B43legacy_WARN_ON(queue->tx_devq_used > queue->tx_devq_size); + /* Check if there is sufficient free space on the device + * TX queue. If not, return and let the TX tasklet + * retry later. + */ + if (queue->tx_devq_packets == B43legacy_PIO_MAXTXDEVQPACKETS) + return -EBUSY; + if (queue->tx_devq_used + octets > queue->tx_devq_size) + return -EBUSY; + /* Now poke the device. */ + err = pio_tx_write_fragment(queue, skb, packet, + sizeof(struct b43legacy_txhdr_fw3)); + if (unlikely(err == -ENOKEY)) { + /* Drop this packet, as we don't have the encryption key + * anymore and must not transmit it unencrypted. */ + free_txpacket(packet, 1); + return 0; + } + + /* Account for the packet size. + * (We must not overflow the device TX queue) + */ + queue->tx_devq_packets++; + queue->tx_devq_used += octets; + + /* Transmission started, everything ok, move the + * packet to the txrunning list. + */ + list_move_tail(&packet->list, &queue->txrunning); + + return 0; +} + +static void tx_tasklet(unsigned long d) +{ + struct b43legacy_pioqueue *queue = (struct b43legacy_pioqueue *)d; + struct b43legacy_wldev *dev = queue->dev; + unsigned long flags; + struct b43legacy_pio_txpacket *packet, *tmp_packet; + int err; + u16 txctl; + + spin_lock_irqsave(&dev->wl->irq_lock, flags); + if (queue->tx_frozen) + goto out_unlock; + txctl = b43legacy_pio_read(queue, B43legacy_PIO_TXCTL); + if (txctl & B43legacy_PIO_TXCTL_SUSPEND) + goto out_unlock; + + list_for_each_entry_safe(packet, tmp_packet, &queue->txqueue, list) { + /* Try to transmit the packet. This can fail, if + * the device queue is full. In case of failure, the + * packet is left in the txqueue. + * If transmission succeed, the packet is moved to txrunning. + * If it is impossible to transmit the packet, it + * is dropped. + */ + err = pio_tx_packet(packet); + if (err) + break; + } +out_unlock: + spin_unlock_irqrestore(&dev->wl->irq_lock, flags); +} + +static void setup_txqueues(struct b43legacy_pioqueue *queue) +{ + struct b43legacy_pio_txpacket *packet; + int i; + + queue->nr_txfree = B43legacy_PIO_MAXTXPACKETS; + for (i = 0; i < B43legacy_PIO_MAXTXPACKETS; i++) { + packet = &(queue->tx_packets_cache[i]); + + packet->queue = queue; + INIT_LIST_HEAD(&packet->list); + + list_add(&packet->list, &queue->txfree); + } +} + +static +struct b43legacy_pioqueue *b43legacy_setup_pioqueue(struct b43legacy_wldev *dev, + u16 pio_mmio_base) +{ + struct b43legacy_pioqueue *queue; + u32 value; + u16 qsize; + + queue = kzalloc(sizeof(*queue), GFP_KERNEL); + if (!queue) + goto out; + + queue->dev = dev; + queue->mmio_base = pio_mmio_base; + queue->need_workarounds = (dev->dev->id.revision < 3); + + INIT_LIST_HEAD(&queue->txfree); + INIT_LIST_HEAD(&queue->txqueue); + INIT_LIST_HEAD(&queue->txrunning); + tasklet_init(&queue->txtask, tx_tasklet, + (unsigned long)queue); + + value = b43legacy_read32(dev, B43legacy_MMIO_MACCTL); + value &= ~B43legacy_MACCTL_BE; + b43legacy_write32(dev, B43legacy_MMIO_MACCTL, value); + + qsize = b43legacy_read16(dev, queue->mmio_base + + B43legacy_PIO_TXQBUFSIZE); + if (qsize == 0) { + b43legacyerr(dev->wl, "This card does not support PIO " + "operation mode. Please use DMA mode " + "(module parameter pio=0).\n"); + goto err_freequeue; + } + if (qsize <= B43legacy_PIO_TXQADJUST) { + b43legacyerr(dev->wl, "PIO tx device-queue too small (%u)\n", + qsize); + goto err_freequeue; + } + qsize -= B43legacy_PIO_TXQADJUST; + queue->tx_devq_size = qsize; + + setup_txqueues(queue); + +out: + return queue; + +err_freequeue: + kfree(queue); + queue = NULL; + goto out; +} + +static void cancel_transfers(struct b43legacy_pioqueue *queue) +{ + struct b43legacy_pio_txpacket *packet, *tmp_packet; + + tasklet_disable(&queue->txtask); + + list_for_each_entry_safe(packet, tmp_packet, &queue->txrunning, list) + free_txpacket(packet, 0); + list_for_each_entry_safe(packet, tmp_packet, &queue->txqueue, list) + free_txpacket(packet, 0); +} + +static void b43legacy_destroy_pioqueue(struct b43legacy_pioqueue *queue) +{ + if (!queue) + return; + + cancel_transfers(queue); + kfree(queue); +} + +void b43legacy_pio_free(struct b43legacy_wldev *dev) +{ + struct b43legacy_pio *pio; + + if (!b43legacy_using_pio(dev)) + return; + pio = &dev->pio; + + b43legacy_destroy_pioqueue(pio->queue3); + pio->queue3 = NULL; + b43legacy_destroy_pioqueue(pio->queue2); + pio->queue2 = NULL; + b43legacy_destroy_pioqueue(pio->queue1); + pio->queue1 = NULL; + b43legacy_destroy_pioqueue(pio->queue0); + pio->queue0 = NULL; +} + +int b43legacy_pio_init(struct b43legacy_wldev *dev) +{ + struct b43legacy_pio *pio = &dev->pio; + struct b43legacy_pioqueue *queue; + int err = -ENOMEM; + + queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO1_BASE); + if (!queue) + goto out; + pio->queue0 = queue; + + queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO2_BASE); + if (!queue) + goto err_destroy0; + pio->queue1 = queue; + + queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO3_BASE); + if (!queue) + goto err_destroy1; + pio->queue2 = queue; + + queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO4_BASE); + if (!queue) + goto err_destroy2; + pio->queue3 = queue; + + if (dev->dev->id.revision < 3) + dev->irq_mask |= B43legacy_IRQ_PIO_WORKAROUND; + + b43legacydbg(dev->wl, "PIO initialized\n"); + err = 0; +out: + return err; + +err_destroy2: + b43legacy_destroy_pioqueue(pio->queue2); + pio->queue2 = NULL; +err_destroy1: + b43legacy_destroy_pioqueue(pio->queue1); + pio->queue1 = NULL; +err_destroy0: + b43legacy_destroy_pioqueue(pio->queue0); + pio->queue0 = NULL; + goto out; +} + +int b43legacy_pio_tx(struct b43legacy_wldev *dev, + struct sk_buff *skb) +{ + struct b43legacy_pioqueue *queue = dev->pio.queue1; + struct b43legacy_pio_txpacket *packet; + + B43legacy_WARN_ON(queue->tx_suspended); + B43legacy_WARN_ON(list_empty(&queue->txfree)); + + packet = list_entry(queue->txfree.next, struct b43legacy_pio_txpacket, + list); + packet->skb = skb; + + list_move_tail(&packet->list, &queue->txqueue); + queue->nr_txfree--; + B43legacy_WARN_ON(queue->nr_txfree >= B43legacy_PIO_MAXTXPACKETS); + + tasklet_schedule(&queue->txtask); + + return 0; +} + +void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev, + const struct b43legacy_txstatus *status) +{ + struct b43legacy_pioqueue *queue; + struct b43legacy_pio_txpacket *packet; + struct ieee80211_tx_info *info; + int retry_limit; + + queue = parse_cookie(dev, status->cookie, &packet); + B43legacy_WARN_ON(!queue); + + if (!packet->skb) + return; + + queue->tx_devq_packets--; + queue->tx_devq_used -= (packet->skb->len + + sizeof(struct b43legacy_txhdr_fw3)); + + info = IEEE80211_SKB_CB(packet->skb); + + /* preserve the confiured retry limit before clearing the status + * The xmit function has overwritten the rc's value with the actual + * retry limit done by the hardware */ + retry_limit = info->status.rates[0].count; + ieee80211_tx_info_clear_status(info); + + if (status->acked) + info->flags |= IEEE80211_TX_STAT_ACK; + + if (status->rts_count > dev->wl->hw->conf.short_frame_max_tx_count) { + /* + * If the short retries (RTS, not data frame) have exceeded + * the limit, the hw will not have tried the selected rate, + * but will have used the fallback rate instead. + * Don't let the rate control count attempts for the selected + * rate in this case, otherwise the statistics will be off. + */ + info->status.rates[0].count = 0; + info->status.rates[1].count = status->frame_count; + } else { + if (status->frame_count > retry_limit) { + info->status.rates[0].count = retry_limit; + info->status.rates[1].count = status->frame_count - + retry_limit; + + } else { + info->status.rates[0].count = status->frame_count; + info->status.rates[1].idx = -1; + } + } + ieee80211_tx_status_irqsafe(dev->wl->hw, packet->skb); + packet->skb = NULL; + + free_txpacket(packet, 1); + /* If there are packets on the txqueue, poke the tasklet + * to transmit them. + */ + if (!list_empty(&queue->txqueue)) + tasklet_schedule(&queue->txtask); +} + +static void pio_rx_error(struct b43legacy_pioqueue *queue, + int clear_buffers, + const char *error) +{ + int i; + + b43legacyerr(queue->dev->wl, "PIO RX error: %s\n", error); + b43legacy_pio_write(queue, B43legacy_PIO_RXCTL, + B43legacy_PIO_RXCTL_READY); + if (clear_buffers) { + B43legacy_WARN_ON(queue->mmio_base != B43legacy_MMIO_PIO1_BASE); + for (i = 0; i < 15; i++) { + /* Dummy read. */ + b43legacy_pio_read(queue, B43legacy_PIO_RXDATA); + } + } +} + +void b43legacy_pio_rx(struct b43legacy_pioqueue *queue) +{ + __le16 preamble[21] = { 0 }; + struct b43legacy_rxhdr_fw3 *rxhdr; + u16 tmp; + u16 len; + u16 macstat; + int i; + int preamble_readwords; + struct sk_buff *skb; + + tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXCTL); + if (!(tmp & B43legacy_PIO_RXCTL_DATAAVAILABLE)) + return; + b43legacy_pio_write(queue, B43legacy_PIO_RXCTL, + B43legacy_PIO_RXCTL_DATAAVAILABLE); + + for (i = 0; i < 10; i++) { + tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXCTL); + if (tmp & B43legacy_PIO_RXCTL_READY) + goto data_ready; + udelay(10); + } + b43legacydbg(queue->dev->wl, "PIO RX timed out\n"); + return; +data_ready: + + len = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA); + if (unlikely(len > 0x700)) { + pio_rx_error(queue, 0, "len > 0x700"); + return; + } + if (unlikely(len == 0 && queue->mmio_base != + B43legacy_MMIO_PIO4_BASE)) { + pio_rx_error(queue, 0, "len == 0"); + return; + } + preamble[0] = cpu_to_le16(len); + if (queue->mmio_base == B43legacy_MMIO_PIO4_BASE) + preamble_readwords = 14 / sizeof(u16); + else + preamble_readwords = 18 / sizeof(u16); + for (i = 0; i < preamble_readwords; i++) { + tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA); + preamble[i + 1] = cpu_to_le16(tmp); + } + rxhdr = (struct b43legacy_rxhdr_fw3 *)preamble; + macstat = le16_to_cpu(rxhdr->mac_status); + if (macstat & B43legacy_RX_MAC_FCSERR) { + pio_rx_error(queue, + (queue->mmio_base == B43legacy_MMIO_PIO1_BASE), + "Frame FCS error"); + return; + } + if (queue->mmio_base == B43legacy_MMIO_PIO4_BASE) { + /* We received an xmit status. */ + struct b43legacy_hwtxstatus *hw; + + hw = (struct b43legacy_hwtxstatus *)(preamble + 1); + b43legacy_handle_hwtxstatus(queue->dev, hw); + + return; + } + + skb = dev_alloc_skb(len); + if (unlikely(!skb)) { + pio_rx_error(queue, 1, "OOM"); + return; + } + skb_put(skb, len); + for (i = 0; i < len - 1; i += 2) { + tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA); + *((__le16 *)(skb->data + i)) = cpu_to_le16(tmp); + } + if (len % 2) { + tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA); + skb->data[len - 1] = (tmp & 0x00FF); + } + b43legacy_rx(queue->dev, skb, rxhdr); +} + +void b43legacy_pio_tx_suspend(struct b43legacy_pioqueue *queue) +{ + b43legacy_power_saving_ctl_bits(queue->dev, -1, 1); + b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, + b43legacy_pio_read(queue, B43legacy_PIO_TXCTL) + | B43legacy_PIO_TXCTL_SUSPEND); +} + +void b43legacy_pio_tx_resume(struct b43legacy_pioqueue *queue) +{ + b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, + b43legacy_pio_read(queue, B43legacy_PIO_TXCTL) + & ~B43legacy_PIO_TXCTL_SUSPEND); + b43legacy_power_saving_ctl_bits(queue->dev, -1, -1); + tasklet_schedule(&queue->txtask); +} + +void b43legacy_pio_freeze_txqueues(struct b43legacy_wldev *dev) +{ + struct b43legacy_pio *pio; + + B43legacy_WARN_ON(!b43legacy_using_pio(dev)); + pio = &dev->pio; + pio->queue0->tx_frozen = 1; + pio->queue1->tx_frozen = 1; + pio->queue2->tx_frozen = 1; + pio->queue3->tx_frozen = 1; +} + +void b43legacy_pio_thaw_txqueues(struct b43legacy_wldev *dev) +{ + struct b43legacy_pio *pio; + + B43legacy_WARN_ON(!b43legacy_using_pio(dev)); + pio = &dev->pio; + pio->queue0->tx_frozen = 0; + pio->queue1->tx_frozen = 0; + pio->queue2->tx_frozen = 0; + pio->queue3->tx_frozen = 0; + if (!list_empty(&pio->queue0->txqueue)) + tasklet_schedule(&pio->queue0->txtask); + if (!list_empty(&pio->queue1->txqueue)) + tasklet_schedule(&pio->queue1->txtask); + if (!list_empty(&pio->queue2->txqueue)) + tasklet_schedule(&pio->queue2->txtask); + if (!list_empty(&pio->queue3->txqueue)) + tasklet_schedule(&pio->queue3->txtask); +} diff --git a/drivers/net/wireless/b43legacy/pio.h b/drivers/net/wireless/b43legacy/pio.h new file mode 100644 index 0000000..8e6773e --- /dev/null +++ b/drivers/net/wireless/b43legacy/pio.h @@ -0,0 +1,158 @@ +#ifndef B43legacy_PIO_H_ +#define B43legacy_PIO_H_ + +#include "b43legacy.h" + +#include +#include +#include + + +#define B43legacy_PIO_TXCTL 0x00 +#define B43legacy_PIO_TXDATA 0x02 +#define B43legacy_PIO_TXQBUFSIZE 0x04 +#define B43legacy_PIO_RXCTL 0x08 +#define B43legacy_PIO_RXDATA 0x0A + +#define B43legacy_PIO_TXCTL_WRITELO (1 << 0) +#define B43legacy_PIO_TXCTL_WRITEHI (1 << 1) +#define B43legacy_PIO_TXCTL_COMPLETE (1 << 2) +#define B43legacy_PIO_TXCTL_INIT (1 << 3) +#define B43legacy_PIO_TXCTL_SUSPEND (1 << 7) + +#define B43legacy_PIO_RXCTL_DATAAVAILABLE (1 << 0) +#define B43legacy_PIO_RXCTL_READY (1 << 1) + +/* PIO constants */ +#define B43legacy_PIO_MAXTXDEVQPACKETS 31 +#define B43legacy_PIO_TXQADJUST 80 + +/* PIO tuning knobs */ +#define B43legacy_PIO_MAXTXPACKETS 256 + + + +#ifdef CONFIG_B43LEGACY_PIO + + +struct b43legacy_pioqueue; +struct b43legacy_xmitstatus; + +struct b43legacy_pio_txpacket { + struct b43legacy_pioqueue *queue; + struct sk_buff *skb; + struct list_head list; +}; + +#define pio_txpacket_getindex(packet) ((int)((packet) - \ + (packet)->queue->tx_packets_cache)) + +struct b43legacy_pioqueue { + struct b43legacy_wldev *dev; + u16 mmio_base; + + bool tx_suspended; + bool tx_frozen; + bool need_workarounds; /* Workarounds needed for core.rev < 3 */ + + /* Adjusted size of the device internal TX buffer. */ + u16 tx_devq_size; + /* Used octets of the device internal TX buffer. */ + u16 tx_devq_used; + /* Used packet slots in the device internal TX buffer. */ + u8 tx_devq_packets; + /* Packets from the txfree list can + * be taken on incoming TX requests. + */ + struct list_head txfree; + unsigned int nr_txfree; + /* Packets on the txqueue are queued, + * but not completely written to the chip, yet. + */ + struct list_head txqueue; + /* Packets on the txrunning queue are completely + * posted to the device. We are waiting for the txstatus. + */ + struct list_head txrunning; + struct tasklet_struct txtask; + struct b43legacy_pio_txpacket + tx_packets_cache[B43legacy_PIO_MAXTXPACKETS]; +}; + +static inline +u16 b43legacy_pio_read(struct b43legacy_pioqueue *queue, + u16 offset) +{ + return b43legacy_read16(queue->dev, queue->mmio_base + offset); +} + +static inline +void b43legacy_pio_write(struct b43legacy_pioqueue *queue, + u16 offset, u16 value) +{ + b43legacy_write16(queue->dev, queue->mmio_base + offset, value); + mmiowb(); +} + + +int b43legacy_pio_init(struct b43legacy_wldev *dev); +void b43legacy_pio_free(struct b43legacy_wldev *dev); + +int b43legacy_pio_tx(struct b43legacy_wldev *dev, + struct sk_buff *skb); +void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev, + const struct b43legacy_txstatus *status); +void b43legacy_pio_rx(struct b43legacy_pioqueue *queue); + +/* Suspend TX queue in hardware. */ +void b43legacy_pio_tx_suspend(struct b43legacy_pioqueue *queue); +void b43legacy_pio_tx_resume(struct b43legacy_pioqueue *queue); +/* Suspend (freeze) the TX tasklet (software level). */ +void b43legacy_pio_freeze_txqueues(struct b43legacy_wldev *dev); +void b43legacy_pio_thaw_txqueues(struct b43legacy_wldev *dev); + +#else /* CONFIG_B43LEGACY_PIO */ + +static inline +int b43legacy_pio_init(struct b43legacy_wldev *dev) +{ + return 0; +} +static inline +void b43legacy_pio_free(struct b43legacy_wldev *dev) +{ +} +static inline +int b43legacy_pio_tx(struct b43legacy_wldev *dev, + struct sk_buff *skb) +{ + return 0; +} +static inline +void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev, + const struct b43legacy_txstatus *status) +{ +} +static inline +void b43legacy_pio_rx(struct b43legacy_pioqueue *queue) +{ +} +static inline +void b43legacy_pio_tx_suspend(struct b43legacy_pioqueue *queue) +{ +} +static inline +void b43legacy_pio_tx_resume(struct b43legacy_pioqueue *queue) +{ +} +static inline +void b43legacy_pio_freeze_txqueues(struct b43legacy_wldev *dev) +{ +} +static inline +void b43legacy_pio_thaw_txqueues(struct b43legacy_wldev *dev) +{ +} + +#endif /* CONFIG_B43LEGACY_PIO */ +#endif /* B43legacy_PIO_H_ */ diff --git a/drivers/net/wireless/b43legacy/radio.c b/drivers/net/wireless/b43legacy/radio.c new file mode 100644 index 0000000..2df545c --- /dev/null +++ b/drivers/net/wireless/b43legacy/radio.c @@ -0,0 +1,2162 @@ +/* + + Broadcom B43legacy wireless driver + + Copyright (c) 2005 Martin Langer , + Stefano Brivio + Michael Buesch + Danny van Dyk + Andreas Jaggi + Copyright (c) 2007 Larry Finger + + Some parts of the code in this file are derived from the ipw2200 + driver Copyright(c) 2003 - 2004 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include + +#include "b43legacy.h" +#include "main.h" +#include "phy.h" +#include "radio.h" +#include "ilt.h" + + +/* Table for b43legacy_radio_calibrationvalue() */ +static const u16 rcc_table[16] = { + 0x0002, 0x0003, 0x0001, 0x000F, + 0x0006, 0x0007, 0x0005, 0x000F, + 0x000A, 0x000B, 0x0009, 0x000F, + 0x000E, 0x000F, 0x000D, 0x000F, +}; + +/* Reverse the bits of a 4bit value. + * Example: 1101 is flipped 1011 + */ +static u16 flip_4bit(u16 value) +{ + u16 flipped = 0x0000; + + B43legacy_BUG_ON(!((value & ~0x000F) == 0x0000)); + + flipped |= (value & 0x0001) << 3; + flipped |= (value & 0x0002) << 1; + flipped |= (value & 0x0004) >> 1; + flipped |= (value & 0x0008) >> 3; + + return flipped; +} + +/* Get the freq, as it has to be written to the device. */ +static inline +u16 channel2freq_bg(u8 channel) +{ + /* Frequencies are given as frequencies_bg[index] + 2.4GHz + * Starting with channel 1 + */ + static const u16 frequencies_bg[14] = { + 12, 17, 22, 27, + 32, 37, 42, 47, + 52, 57, 62, 67, + 72, 84, + }; + + if (unlikely(channel < 1 || channel > 14)) { + printk(KERN_INFO "b43legacy: Channel %d is out of range\n", + channel); + dump_stack(); + return 2412; + } + + return frequencies_bg[channel - 1]; +} + +void b43legacy_radio_lock(struct b43legacy_wldev *dev) +{ + u32 status; + + status = b43legacy_read32(dev, B43legacy_MMIO_MACCTL); + B43legacy_WARN_ON(status & B43legacy_MACCTL_RADIOLOCK); + status |= B43legacy_MACCTL_RADIOLOCK; + b43legacy_write32(dev, B43legacy_MMIO_MACCTL, status); + mmiowb(); + udelay(10); +} + +void b43legacy_radio_unlock(struct b43legacy_wldev *dev) +{ + u32 status; + + b43legacy_read16(dev, B43legacy_MMIO_PHY_VER); /* dummy read */ + status = b43legacy_read32(dev, B43legacy_MMIO_MACCTL); + B43legacy_WARN_ON(!(status & B43legacy_MACCTL_RADIOLOCK)); + status &= ~B43legacy_MACCTL_RADIOLOCK; + b43legacy_write32(dev, B43legacy_MMIO_MACCTL, status); + mmiowb(); +} + +u16 b43legacy_radio_read16(struct b43legacy_wldev *dev, u16 offset) +{ + struct b43legacy_phy *phy = &dev->phy; + + switch (phy->type) { + case B43legacy_PHYTYPE_B: + if (phy->radio_ver == 0x2053) { + if (offset < 0x70) + offset += 0x80; + else if (offset < 0x80) + offset += 0x70; + } else if (phy->radio_ver == 0x2050) + offset |= 0x80; + else + B43legacy_WARN_ON(1); + break; + case B43legacy_PHYTYPE_G: + offset |= 0x80; + break; + default: + B43legacy_BUG_ON(1); + } + + b43legacy_write16(dev, B43legacy_MMIO_RADIO_CONTROL, offset); + return b43legacy_read16(dev, B43legacy_MMIO_RADIO_DATA_LOW); +} + +void b43legacy_radio_write16(struct b43legacy_wldev *dev, u16 offset, u16 val) +{ + b43legacy_write16(dev, B43legacy_MMIO_RADIO_CONTROL, offset); + mmiowb(); + b43legacy_write16(dev, B43legacy_MMIO_RADIO_DATA_LOW, val); +} + +static void b43legacy_set_all_gains(struct b43legacy_wldev *dev, + s16 first, s16 second, s16 third) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 i; + u16 start = 0x08; + u16 end = 0x18; + u16 offset = 0x0400; + u16 tmp; + + if (phy->rev <= 1) { + offset = 0x5000; + start = 0x10; + end = 0x20; + } + + for (i = 0; i < 4; i++) + b43legacy_ilt_write(dev, offset + i, first); + + for (i = start; i < end; i++) + b43legacy_ilt_write(dev, offset + i, second); + + if (third != -1) { + tmp = ((u16)third << 14) | ((u16)third << 6); + b43legacy_phy_write(dev, 0x04A0, + (b43legacy_phy_read(dev, 0x04A0) & 0xBFBF) + | tmp); + b43legacy_phy_write(dev, 0x04A1, + (b43legacy_phy_read(dev, 0x04A1) & 0xBFBF) + | tmp); + b43legacy_phy_write(dev, 0x04A2, + (b43legacy_phy_read(dev, 0x04A2) & 0xBFBF) + | tmp); + } + b43legacy_dummy_transmission(dev); +} + +static void b43legacy_set_original_gains(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 i; + u16 tmp; + u16 offset = 0x0400; + u16 start = 0x0008; + u16 end = 0x0018; + + if (phy->rev <= 1) { + offset = 0x5000; + start = 0x0010; + end = 0x0020; + } + + for (i = 0; i < 4; i++) { + tmp = (i & 0xFFFC); + tmp |= (i & 0x0001) << 1; + tmp |= (i & 0x0002) >> 1; + + b43legacy_ilt_write(dev, offset + i, tmp); + } + + for (i = start; i < end; i++) + b43legacy_ilt_write(dev, offset + i, i - start); + + b43legacy_phy_write(dev, 0x04A0, + (b43legacy_phy_read(dev, 0x04A0) & 0xBFBF) + | 0x4040); + b43legacy_phy_write(dev, 0x04A1, + (b43legacy_phy_read(dev, 0x04A1) & 0xBFBF) + | 0x4040); + b43legacy_phy_write(dev, 0x04A2, + (b43legacy_phy_read(dev, 0x04A2) & 0xBFBF) + | 0x4000); + b43legacy_dummy_transmission(dev); +} + +/* Synthetic PU workaround */ +static void b43legacy_synth_pu_workaround(struct b43legacy_wldev *dev, + u8 channel) +{ + struct b43legacy_phy *phy = &dev->phy; + + might_sleep(); + + if (phy->radio_ver != 0x2050 || phy->radio_rev >= 6) + /* We do not need the workaround. */ + return; + + if (channel <= 10) + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL, + channel2freq_bg(channel + 4)); + else + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL, + channel2freq_bg(channel)); + msleep(1); + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL, + channel2freq_bg(channel)); +} + +u8 b43legacy_radio_aci_detect(struct b43legacy_wldev *dev, u8 channel) +{ + struct b43legacy_phy *phy = &dev->phy; + u8 ret = 0; + u16 saved; + u16 rssi; + u16 temp; + int i; + int j = 0; + + saved = b43legacy_phy_read(dev, 0x0403); + b43legacy_radio_selectchannel(dev, channel, 0); + b43legacy_phy_write(dev, 0x0403, (saved & 0xFFF8) | 5); + if (phy->aci_hw_rssi) + rssi = b43legacy_phy_read(dev, 0x048A) & 0x3F; + else + rssi = saved & 0x3F; + /* clamp temp to signed 5bit */ + if (rssi > 32) + rssi -= 64; + for (i = 0; i < 100; i++) { + temp = (b43legacy_phy_read(dev, 0x047F) >> 8) & 0x3F; + if (temp > 32) + temp -= 64; + if (temp < rssi) + j++; + if (j >= 20) + ret = 1; + } + b43legacy_phy_write(dev, 0x0403, saved); + + return ret; +} + +u8 b43legacy_radio_aci_scan(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u8 ret[13]; + unsigned int channel = phy->channel; + unsigned int i; + unsigned int j; + unsigned int start; + unsigned int end; + + if (!((phy->type == B43legacy_PHYTYPE_G) && (phy->rev > 0))) + return 0; + + b43legacy_phy_lock(dev); + b43legacy_radio_lock(dev); + b43legacy_phy_write(dev, 0x0802, + b43legacy_phy_read(dev, 0x0802) & 0xFFFC); + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, + b43legacy_phy_read(dev, B43legacy_PHY_G_CRS) + & 0x7FFF); + b43legacy_set_all_gains(dev, 3, 8, 1); + + start = (channel - 5 > 0) ? channel - 5 : 1; + end = (channel + 5 < 14) ? channel + 5 : 13; + + for (i = start; i <= end; i++) { + if (abs(channel - i) > 2) + ret[i-1] = b43legacy_radio_aci_detect(dev, i); + } + b43legacy_radio_selectchannel(dev, channel, 0); + b43legacy_phy_write(dev, 0x0802, + (b43legacy_phy_read(dev, 0x0802) & 0xFFFC) + | 0x0003); + b43legacy_phy_write(dev, 0x0403, + b43legacy_phy_read(dev, 0x0403) & 0xFFF8); + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, + b43legacy_phy_read(dev, B43legacy_PHY_G_CRS) + | 0x8000); + b43legacy_set_original_gains(dev); + for (i = 0; i < 13; i++) { + if (!ret[i]) + continue; + end = (i + 5 < 13) ? i + 5 : 13; + for (j = i; j < end; j++) + ret[j] = 1; + } + b43legacy_radio_unlock(dev); + b43legacy_phy_unlock(dev); + + return ret[channel - 1]; +} + +/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ +void b43legacy_nrssi_hw_write(struct b43legacy_wldev *dev, u16 offset, s16 val) +{ + b43legacy_phy_write(dev, B43legacy_PHY_NRSSILT_CTRL, offset); + mmiowb(); + b43legacy_phy_write(dev, B43legacy_PHY_NRSSILT_DATA, (u16)val); +} + +/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ +s16 b43legacy_nrssi_hw_read(struct b43legacy_wldev *dev, u16 offset) +{ + u16 val; + + b43legacy_phy_write(dev, B43legacy_PHY_NRSSILT_CTRL, offset); + val = b43legacy_phy_read(dev, B43legacy_PHY_NRSSILT_DATA); + + return (s16)val; +} + +/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ +void b43legacy_nrssi_hw_update(struct b43legacy_wldev *dev, u16 val) +{ + u16 i; + s16 tmp; + + for (i = 0; i < 64; i++) { + tmp = b43legacy_nrssi_hw_read(dev, i); + tmp -= val; + tmp = clamp_val(tmp, -32, 31); + b43legacy_nrssi_hw_write(dev, i, tmp); + } +} + +/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ +void b43legacy_nrssi_mem_update(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + s16 i; + s16 delta; + s32 tmp; + + delta = 0x1F - phy->nrssi[0]; + for (i = 0; i < 64; i++) { + tmp = (i - delta) * phy->nrssislope; + tmp /= 0x10000; + tmp += 0x3A; + tmp = clamp_val(tmp, 0, 0x3F); + phy->nrssi_lt[i] = tmp; + } +} + +static void b43legacy_calc_nrssi_offset(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 backup[20] = { 0 }; + s16 v47F; + u16 i; + u16 saved = 0xFFFF; + + backup[0] = b43legacy_phy_read(dev, 0x0001); + backup[1] = b43legacy_phy_read(dev, 0x0811); + backup[2] = b43legacy_phy_read(dev, 0x0812); + backup[3] = b43legacy_phy_read(dev, 0x0814); + backup[4] = b43legacy_phy_read(dev, 0x0815); + backup[5] = b43legacy_phy_read(dev, 0x005A); + backup[6] = b43legacy_phy_read(dev, 0x0059); + backup[7] = b43legacy_phy_read(dev, 0x0058); + backup[8] = b43legacy_phy_read(dev, 0x000A); + backup[9] = b43legacy_phy_read(dev, 0x0003); + backup[10] = b43legacy_radio_read16(dev, 0x007A); + backup[11] = b43legacy_radio_read16(dev, 0x0043); + + b43legacy_phy_write(dev, 0x0429, + b43legacy_phy_read(dev, 0x0429) & 0x7FFF); + b43legacy_phy_write(dev, 0x0001, + (b43legacy_phy_read(dev, 0x0001) & 0x3FFF) + | 0x4000); + b43legacy_phy_write(dev, 0x0811, + b43legacy_phy_read(dev, 0x0811) | 0x000C); + b43legacy_phy_write(dev, 0x0812, + (b43legacy_phy_read(dev, 0x0812) & 0xFFF3) + | 0x0004); + b43legacy_phy_write(dev, 0x0802, + b43legacy_phy_read(dev, 0x0802) & ~(0x1 | 0x2)); + if (phy->rev >= 6) { + backup[12] = b43legacy_phy_read(dev, 0x002E); + backup[13] = b43legacy_phy_read(dev, 0x002F); + backup[14] = b43legacy_phy_read(dev, 0x080F); + backup[15] = b43legacy_phy_read(dev, 0x0810); + backup[16] = b43legacy_phy_read(dev, 0x0801); + backup[17] = b43legacy_phy_read(dev, 0x0060); + backup[18] = b43legacy_phy_read(dev, 0x0014); + backup[19] = b43legacy_phy_read(dev, 0x0478); + + b43legacy_phy_write(dev, 0x002E, 0); + b43legacy_phy_write(dev, 0x002F, 0); + b43legacy_phy_write(dev, 0x080F, 0); + b43legacy_phy_write(dev, 0x0810, 0); + b43legacy_phy_write(dev, 0x0478, + b43legacy_phy_read(dev, 0x0478) | 0x0100); + b43legacy_phy_write(dev, 0x0801, + b43legacy_phy_read(dev, 0x0801) | 0x0040); + b43legacy_phy_write(dev, 0x0060, + b43legacy_phy_read(dev, 0x0060) | 0x0040); + b43legacy_phy_write(dev, 0x0014, + b43legacy_phy_read(dev, 0x0014) | 0x0200); + } + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) | 0x0070); + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) | 0x0080); + udelay(30); + + v47F = (s16)((b43legacy_phy_read(dev, 0x047F) >> 8) & 0x003F); + if (v47F >= 0x20) + v47F -= 0x40; + if (v47F == 31) { + for (i = 7; i >= 4; i--) { + b43legacy_radio_write16(dev, 0x007B, i); + udelay(20); + v47F = (s16)((b43legacy_phy_read(dev, 0x047F) >> 8) + & 0x003F); + if (v47F >= 0x20) + v47F -= 0x40; + if (v47F < 31 && saved == 0xFFFF) + saved = i; + } + if (saved == 0xFFFF) + saved = 4; + } else { + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + & 0x007F); + b43legacy_phy_write(dev, 0x0814, + b43legacy_phy_read(dev, 0x0814) | 0x0001); + b43legacy_phy_write(dev, 0x0815, + b43legacy_phy_read(dev, 0x0815) & 0xFFFE); + b43legacy_phy_write(dev, 0x0811, + b43legacy_phy_read(dev, 0x0811) | 0x000C); + b43legacy_phy_write(dev, 0x0812, + b43legacy_phy_read(dev, 0x0812) | 0x000C); + b43legacy_phy_write(dev, 0x0811, + b43legacy_phy_read(dev, 0x0811) | 0x0030); + b43legacy_phy_write(dev, 0x0812, + b43legacy_phy_read(dev, 0x0812) | 0x0030); + b43legacy_phy_write(dev, 0x005A, 0x0480); + b43legacy_phy_write(dev, 0x0059, 0x0810); + b43legacy_phy_write(dev, 0x0058, 0x000D); + if (phy->analog == 0) + b43legacy_phy_write(dev, 0x0003, 0x0122); + else + b43legacy_phy_write(dev, 0x000A, + b43legacy_phy_read(dev, 0x000A) + | 0x2000); + b43legacy_phy_write(dev, 0x0814, + b43legacy_phy_read(dev, 0x0814) | 0x0004); + b43legacy_phy_write(dev, 0x0815, + b43legacy_phy_read(dev, 0x0815) & 0xFFFB); + b43legacy_phy_write(dev, 0x0003, + (b43legacy_phy_read(dev, 0x0003) & 0xFF9F) + | 0x0040); + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + | 0x000F); + b43legacy_set_all_gains(dev, 3, 0, 1); + b43legacy_radio_write16(dev, 0x0043, + (b43legacy_radio_read16(dev, 0x0043) + & 0x00F0) | 0x000F); + udelay(30); + v47F = (s16)((b43legacy_phy_read(dev, 0x047F) >> 8) & 0x003F); + if (v47F >= 0x20) + v47F -= 0x40; + if (v47F == -32) { + for (i = 0; i < 4; i++) { + b43legacy_radio_write16(dev, 0x007B, i); + udelay(20); + v47F = (s16)((b43legacy_phy_read(dev, 0x047F) >> + 8) & 0x003F); + if (v47F >= 0x20) + v47F -= 0x40; + if (v47F > -31 && saved == 0xFFFF) + saved = i; + } + if (saved == 0xFFFF) + saved = 3; + } else + saved = 0; + } + b43legacy_radio_write16(dev, 0x007B, saved); + + if (phy->rev >= 6) { + b43legacy_phy_write(dev, 0x002E, backup[12]); + b43legacy_phy_write(dev, 0x002F, backup[13]); + b43legacy_phy_write(dev, 0x080F, backup[14]); + b43legacy_phy_write(dev, 0x0810, backup[15]); + } + b43legacy_phy_write(dev, 0x0814, backup[3]); + b43legacy_phy_write(dev, 0x0815, backup[4]); + b43legacy_phy_write(dev, 0x005A, backup[5]); + b43legacy_phy_write(dev, 0x0059, backup[6]); + b43legacy_phy_write(dev, 0x0058, backup[7]); + b43legacy_phy_write(dev, 0x000A, backup[8]); + b43legacy_phy_write(dev, 0x0003, backup[9]); + b43legacy_radio_write16(dev, 0x0043, backup[11]); + b43legacy_radio_write16(dev, 0x007A, backup[10]); + b43legacy_phy_write(dev, 0x0802, + b43legacy_phy_read(dev, 0x0802) | 0x1 | 0x2); + b43legacy_phy_write(dev, 0x0429, + b43legacy_phy_read(dev, 0x0429) | 0x8000); + b43legacy_set_original_gains(dev); + if (phy->rev >= 6) { + b43legacy_phy_write(dev, 0x0801, backup[16]); + b43legacy_phy_write(dev, 0x0060, backup[17]); + b43legacy_phy_write(dev, 0x0014, backup[18]); + b43legacy_phy_write(dev, 0x0478, backup[19]); + } + b43legacy_phy_write(dev, 0x0001, backup[0]); + b43legacy_phy_write(dev, 0x0812, backup[2]); + b43legacy_phy_write(dev, 0x0811, backup[1]); +} + +void b43legacy_calc_nrssi_slope(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 backup[18] = { 0 }; + u16 tmp; + s16 nrssi0; + s16 nrssi1; + + switch (phy->type) { + case B43legacy_PHYTYPE_B: + backup[0] = b43legacy_radio_read16(dev, 0x007A); + backup[1] = b43legacy_radio_read16(dev, 0x0052); + backup[2] = b43legacy_radio_read16(dev, 0x0043); + backup[3] = b43legacy_phy_read(dev, 0x0030); + backup[4] = b43legacy_phy_read(dev, 0x0026); + backup[5] = b43legacy_phy_read(dev, 0x0015); + backup[6] = b43legacy_phy_read(dev, 0x002A); + backup[7] = b43legacy_phy_read(dev, 0x0020); + backup[8] = b43legacy_phy_read(dev, 0x005A); + backup[9] = b43legacy_phy_read(dev, 0x0059); + backup[10] = b43legacy_phy_read(dev, 0x0058); + backup[11] = b43legacy_read16(dev, 0x03E2); + backup[12] = b43legacy_read16(dev, 0x03E6); + backup[13] = b43legacy_read16(dev, B43legacy_MMIO_CHANNEL_EXT); + + tmp = b43legacy_radio_read16(dev, 0x007A); + tmp &= (phy->rev >= 5) ? 0x007F : 0x000F; + b43legacy_radio_write16(dev, 0x007A, tmp); + b43legacy_phy_write(dev, 0x0030, 0x00FF); + b43legacy_write16(dev, 0x03EC, 0x7F7F); + b43legacy_phy_write(dev, 0x0026, 0x0000); + b43legacy_phy_write(dev, 0x0015, + b43legacy_phy_read(dev, 0x0015) | 0x0020); + b43legacy_phy_write(dev, 0x002A, 0x08A3); + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + | 0x0080); + + nrssi0 = (s16)b43legacy_phy_read(dev, 0x0027); + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + & 0x007F); + if (phy->analog >= 2) + b43legacy_write16(dev, 0x03E6, 0x0040); + else if (phy->analog == 0) + b43legacy_write16(dev, 0x03E6, 0x0122); + else + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, + b43legacy_read16(dev, + B43legacy_MMIO_CHANNEL_EXT) & 0x2000); + b43legacy_phy_write(dev, 0x0020, 0x3F3F); + b43legacy_phy_write(dev, 0x0015, 0xF330); + b43legacy_radio_write16(dev, 0x005A, 0x0060); + b43legacy_radio_write16(dev, 0x0043, + b43legacy_radio_read16(dev, 0x0043) + & 0x00F0); + b43legacy_phy_write(dev, 0x005A, 0x0480); + b43legacy_phy_write(dev, 0x0059, 0x0810); + b43legacy_phy_write(dev, 0x0058, 0x000D); + udelay(20); + + nrssi1 = (s16)b43legacy_phy_read(dev, 0x0027); + b43legacy_phy_write(dev, 0x0030, backup[3]); + b43legacy_radio_write16(dev, 0x007A, backup[0]); + b43legacy_write16(dev, 0x03E2, backup[11]); + b43legacy_phy_write(dev, 0x0026, backup[4]); + b43legacy_phy_write(dev, 0x0015, backup[5]); + b43legacy_phy_write(dev, 0x002A, backup[6]); + b43legacy_synth_pu_workaround(dev, phy->channel); + if (phy->analog != 0) + b43legacy_write16(dev, 0x03F4, backup[13]); + + b43legacy_phy_write(dev, 0x0020, backup[7]); + b43legacy_phy_write(dev, 0x005A, backup[8]); + b43legacy_phy_write(dev, 0x0059, backup[9]); + b43legacy_phy_write(dev, 0x0058, backup[10]); + b43legacy_radio_write16(dev, 0x0052, backup[1]); + b43legacy_radio_write16(dev, 0x0043, backup[2]); + + if (nrssi0 == nrssi1) + phy->nrssislope = 0x00010000; + else + phy->nrssislope = 0x00400000 / (nrssi0 - nrssi1); + + if (nrssi0 <= -4) { + phy->nrssi[0] = nrssi0; + phy->nrssi[1] = nrssi1; + } + break; + case B43legacy_PHYTYPE_G: + if (phy->radio_rev >= 9) + return; + if (phy->radio_rev == 8) + b43legacy_calc_nrssi_offset(dev); + + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, + b43legacy_phy_read(dev, B43legacy_PHY_G_CRS) + & 0x7FFF); + b43legacy_phy_write(dev, 0x0802, + b43legacy_phy_read(dev, 0x0802) & 0xFFFC); + backup[7] = b43legacy_read16(dev, 0x03E2); + b43legacy_write16(dev, 0x03E2, + b43legacy_read16(dev, 0x03E2) | 0x8000); + backup[0] = b43legacy_radio_read16(dev, 0x007A); + backup[1] = b43legacy_radio_read16(dev, 0x0052); + backup[2] = b43legacy_radio_read16(dev, 0x0043); + backup[3] = b43legacy_phy_read(dev, 0x0015); + backup[4] = b43legacy_phy_read(dev, 0x005A); + backup[5] = b43legacy_phy_read(dev, 0x0059); + backup[6] = b43legacy_phy_read(dev, 0x0058); + backup[8] = b43legacy_read16(dev, 0x03E6); + backup[9] = b43legacy_read16(dev, B43legacy_MMIO_CHANNEL_EXT); + if (phy->rev >= 3) { + backup[10] = b43legacy_phy_read(dev, 0x002E); + backup[11] = b43legacy_phy_read(dev, 0x002F); + backup[12] = b43legacy_phy_read(dev, 0x080F); + backup[13] = b43legacy_phy_read(dev, + B43legacy_PHY_G_LO_CONTROL); + backup[14] = b43legacy_phy_read(dev, 0x0801); + backup[15] = b43legacy_phy_read(dev, 0x0060); + backup[16] = b43legacy_phy_read(dev, 0x0014); + backup[17] = b43legacy_phy_read(dev, 0x0478); + b43legacy_phy_write(dev, 0x002E, 0); + b43legacy_phy_write(dev, B43legacy_PHY_G_LO_CONTROL, 0); + switch (phy->rev) { + case 4: case 6: case 7: + b43legacy_phy_write(dev, 0x0478, + b43legacy_phy_read(dev, + 0x0478) | 0x0100); + b43legacy_phy_write(dev, 0x0801, + b43legacy_phy_read(dev, + 0x0801) | 0x0040); + break; + case 3: case 5: + b43legacy_phy_write(dev, 0x0801, + b43legacy_phy_read(dev, + 0x0801) & 0xFFBF); + break; + } + b43legacy_phy_write(dev, 0x0060, + b43legacy_phy_read(dev, 0x0060) + | 0x0040); + b43legacy_phy_write(dev, 0x0014, + b43legacy_phy_read(dev, 0x0014) + | 0x0200); + } + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + | 0x0070); + b43legacy_set_all_gains(dev, 0, 8, 0); + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + & 0x00F7); + if (phy->rev >= 2) { + b43legacy_phy_write(dev, 0x0811, + (b43legacy_phy_read(dev, 0x0811) + & 0xFFCF) | 0x0030); + b43legacy_phy_write(dev, 0x0812, + (b43legacy_phy_read(dev, 0x0812) + & 0xFFCF) | 0x0010); + } + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + | 0x0080); + udelay(20); + + nrssi0 = (s16)((b43legacy_phy_read(dev, 0x047F) >> 8) & 0x003F); + if (nrssi0 >= 0x0020) + nrssi0 -= 0x0040; + + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + & 0x007F); + if (phy->analog >= 2) + b43legacy_phy_write(dev, 0x0003, + (b43legacy_phy_read(dev, 0x0003) + & 0xFF9F) | 0x0040); + + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, + b43legacy_read16(dev, + B43legacy_MMIO_CHANNEL_EXT) | 0x2000); + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + | 0x000F); + b43legacy_phy_write(dev, 0x0015, 0xF330); + if (phy->rev >= 2) { + b43legacy_phy_write(dev, 0x0812, + (b43legacy_phy_read(dev, 0x0812) + & 0xFFCF) | 0x0020); + b43legacy_phy_write(dev, 0x0811, + (b43legacy_phy_read(dev, 0x0811) + & 0xFFCF) | 0x0020); + } + + b43legacy_set_all_gains(dev, 3, 0, 1); + if (phy->radio_rev == 8) + b43legacy_radio_write16(dev, 0x0043, 0x001F); + else { + tmp = b43legacy_radio_read16(dev, 0x0052) & 0xFF0F; + b43legacy_radio_write16(dev, 0x0052, tmp | 0x0060); + tmp = b43legacy_radio_read16(dev, 0x0043) & 0xFFF0; + b43legacy_radio_write16(dev, 0x0043, tmp | 0x0009); + } + b43legacy_phy_write(dev, 0x005A, 0x0480); + b43legacy_phy_write(dev, 0x0059, 0x0810); + b43legacy_phy_write(dev, 0x0058, 0x000D); + udelay(20); + nrssi1 = (s16)((b43legacy_phy_read(dev, 0x047F) >> 8) & 0x003F); + if (nrssi1 >= 0x0020) + nrssi1 -= 0x0040; + if (nrssi0 == nrssi1) + phy->nrssislope = 0x00010000; + else + phy->nrssislope = 0x00400000 / (nrssi0 - nrssi1); + if (nrssi0 >= -4) { + phy->nrssi[0] = nrssi1; + phy->nrssi[1] = nrssi0; + } + if (phy->rev >= 3) { + b43legacy_phy_write(dev, 0x002E, backup[10]); + b43legacy_phy_write(dev, 0x002F, backup[11]); + b43legacy_phy_write(dev, 0x080F, backup[12]); + b43legacy_phy_write(dev, B43legacy_PHY_G_LO_CONTROL, + backup[13]); + } + if (phy->rev >= 2) { + b43legacy_phy_write(dev, 0x0812, + b43legacy_phy_read(dev, 0x0812) + & 0xFFCF); + b43legacy_phy_write(dev, 0x0811, + b43legacy_phy_read(dev, 0x0811) + & 0xFFCF); + } + + b43legacy_radio_write16(dev, 0x007A, backup[0]); + b43legacy_radio_write16(dev, 0x0052, backup[1]); + b43legacy_radio_write16(dev, 0x0043, backup[2]); + b43legacy_write16(dev, 0x03E2, backup[7]); + b43legacy_write16(dev, 0x03E6, backup[8]); + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, backup[9]); + b43legacy_phy_write(dev, 0x0015, backup[3]); + b43legacy_phy_write(dev, 0x005A, backup[4]); + b43legacy_phy_write(dev, 0x0059, backup[5]); + b43legacy_phy_write(dev, 0x0058, backup[6]); + b43legacy_synth_pu_workaround(dev, phy->channel); + b43legacy_phy_write(dev, 0x0802, + b43legacy_phy_read(dev, 0x0802) | 0x0003); + b43legacy_set_original_gains(dev); + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, + b43legacy_phy_read(dev, B43legacy_PHY_G_CRS) + | 0x8000); + if (phy->rev >= 3) { + b43legacy_phy_write(dev, 0x0801, backup[14]); + b43legacy_phy_write(dev, 0x0060, backup[15]); + b43legacy_phy_write(dev, 0x0014, backup[16]); + b43legacy_phy_write(dev, 0x0478, backup[17]); + } + b43legacy_nrssi_mem_update(dev); + b43legacy_calc_nrssi_threshold(dev); + break; + default: + B43legacy_BUG_ON(1); + } +} + +void b43legacy_calc_nrssi_threshold(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + s32 threshold; + s32 a; + s32 b; + s16 tmp16; + u16 tmp_u16; + + switch (phy->type) { + case B43legacy_PHYTYPE_B: { + if (phy->radio_ver != 0x2050) + return; + if (!(dev->dev->bus->sprom.boardflags_lo & + B43legacy_BFL_RSSI)) + return; + + if (phy->radio_rev >= 6) { + threshold = (phy->nrssi[1] - phy->nrssi[0]) * 32; + threshold += 20 * (phy->nrssi[0] + 1); + threshold /= 40; + } else + threshold = phy->nrssi[1] - 5; + + threshold = clamp_val(threshold, 0, 0x3E); + b43legacy_phy_read(dev, 0x0020); /* dummy read */ + b43legacy_phy_write(dev, 0x0020, (((u16)threshold) << 8) + | 0x001C); + + if (phy->radio_rev >= 6) { + b43legacy_phy_write(dev, 0x0087, 0x0E0D); + b43legacy_phy_write(dev, 0x0086, 0x0C0B); + b43legacy_phy_write(dev, 0x0085, 0x0A09); + b43legacy_phy_write(dev, 0x0084, 0x0808); + b43legacy_phy_write(dev, 0x0083, 0x0808); + b43legacy_phy_write(dev, 0x0082, 0x0604); + b43legacy_phy_write(dev, 0x0081, 0x0302); + b43legacy_phy_write(dev, 0x0080, 0x0100); + } + break; + } + case B43legacy_PHYTYPE_G: + if (!phy->gmode || + !(dev->dev->bus->sprom.boardflags_lo & + B43legacy_BFL_RSSI)) { + tmp16 = b43legacy_nrssi_hw_read(dev, 0x20); + if (tmp16 >= 0x20) + tmp16 -= 0x40; + if (tmp16 < 3) + b43legacy_phy_write(dev, 0x048A, + (b43legacy_phy_read(dev, + 0x048A) & 0xF000) | 0x09EB); + else + b43legacy_phy_write(dev, 0x048A, + (b43legacy_phy_read(dev, + 0x048A) & 0xF000) | 0x0AED); + } else { + if (phy->interfmode == + B43legacy_RADIO_INTERFMODE_NONWLAN) { + a = 0xE; + b = 0xA; + } else if (!phy->aci_wlan_automatic && + phy->aci_enable) { + a = 0x13; + b = 0x12; + } else { + a = 0xE; + b = 0x11; + } + + a = a * (phy->nrssi[1] - phy->nrssi[0]); + a += (phy->nrssi[0] << 6); + if (a < 32) + a += 31; + else + a += 32; + a = a >> 6; + a = clamp_val(a, -31, 31); + + b = b * (phy->nrssi[1] - phy->nrssi[0]); + b += (phy->nrssi[0] << 6); + if (b < 32) + b += 31; + else + b += 32; + b = b >> 6; + b = clamp_val(b, -31, 31); + + tmp_u16 = b43legacy_phy_read(dev, 0x048A) & 0xF000; + tmp_u16 |= ((u32)b & 0x0000003F); + tmp_u16 |= (((u32)a & 0x0000003F) << 6); + b43legacy_phy_write(dev, 0x048A, tmp_u16); + } + break; + default: + B43legacy_BUG_ON(1); + } +} + +/* Stack implementation to save/restore values from the + * interference mitigation code. + * It is save to restore values in random order. + */ +static void _stack_save(u32 *_stackptr, size_t *stackidx, + u8 id, u16 offset, u16 value) +{ + u32 *stackptr = &(_stackptr[*stackidx]); + + B43legacy_WARN_ON(!((offset & 0xE000) == 0x0000)); + B43legacy_WARN_ON(!((id & 0xF8) == 0x00)); + *stackptr = offset; + *stackptr |= ((u32)id) << 13; + *stackptr |= ((u32)value) << 16; + (*stackidx)++; + B43legacy_WARN_ON(!(*stackidx < B43legacy_INTERFSTACK_SIZE)); +} + +static u16 _stack_restore(u32 *stackptr, + u8 id, u16 offset) +{ + size_t i; + + B43legacy_WARN_ON(!((offset & 0xE000) == 0x0000)); + B43legacy_WARN_ON(!((id & 0xF8) == 0x00)); + for (i = 0; i < B43legacy_INTERFSTACK_SIZE; i++, stackptr++) { + if ((*stackptr & 0x00001FFF) != offset) + continue; + if (((*stackptr & 0x00007000) >> 13) != id) + continue; + return ((*stackptr & 0xFFFF0000) >> 16); + } + B43legacy_BUG_ON(1); + + return 0; +} + +#define phy_stacksave(offset) \ + do { \ + _stack_save(stack, &stackidx, 0x1, (offset), \ + b43legacy_phy_read(dev, (offset))); \ + } while (0) +#define phy_stackrestore(offset) \ + do { \ + b43legacy_phy_write(dev, (offset), \ + _stack_restore(stack, 0x1, \ + (offset))); \ + } while (0) +#define radio_stacksave(offset) \ + do { \ + _stack_save(stack, &stackidx, 0x2, (offset), \ + b43legacy_radio_read16(dev, (offset))); \ + } while (0) +#define radio_stackrestore(offset) \ + do { \ + b43legacy_radio_write16(dev, (offset), \ + _stack_restore(stack, 0x2, \ + (offset))); \ + } while (0) +#define ilt_stacksave(offset) \ + do { \ + _stack_save(stack, &stackidx, 0x3, (offset), \ + b43legacy_ilt_read(dev, (offset))); \ + } while (0) +#define ilt_stackrestore(offset) \ + do { \ + b43legacy_ilt_write(dev, (offset), \ + _stack_restore(stack, 0x3, \ + (offset))); \ + } while (0) + +static void +b43legacy_radio_interference_mitigation_enable(struct b43legacy_wldev *dev, + int mode) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 tmp; + u16 flipped; + u32 tmp32; + size_t stackidx = 0; + u32 *stack = phy->interfstack; + + switch (mode) { + case B43legacy_RADIO_INTERFMODE_NONWLAN: + if (phy->rev != 1) { + b43legacy_phy_write(dev, 0x042B, + b43legacy_phy_read(dev, 0x042B) + | 0x0800); + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, + b43legacy_phy_read(dev, + B43legacy_PHY_G_CRS) & ~0x4000); + break; + } + radio_stacksave(0x0078); + tmp = (b43legacy_radio_read16(dev, 0x0078) & 0x001E); + flipped = flip_4bit(tmp); + if (flipped < 10 && flipped >= 8) + flipped = 7; + else if (flipped >= 10) + flipped -= 3; + flipped = flip_4bit(flipped); + flipped = (flipped << 1) | 0x0020; + b43legacy_radio_write16(dev, 0x0078, flipped); + + b43legacy_calc_nrssi_threshold(dev); + + phy_stacksave(0x0406); + b43legacy_phy_write(dev, 0x0406, 0x7E28); + + b43legacy_phy_write(dev, 0x042B, + b43legacy_phy_read(dev, 0x042B) | 0x0800); + b43legacy_phy_write(dev, B43legacy_PHY_RADIO_BITFIELD, + b43legacy_phy_read(dev, + B43legacy_PHY_RADIO_BITFIELD) | 0x1000); + + phy_stacksave(0x04A0); + b43legacy_phy_write(dev, 0x04A0, + (b43legacy_phy_read(dev, 0x04A0) & 0xC0C0) + | 0x0008); + phy_stacksave(0x04A1); + b43legacy_phy_write(dev, 0x04A1, + (b43legacy_phy_read(dev, 0x04A1) & 0xC0C0) + | 0x0605); + phy_stacksave(0x04A2); + b43legacy_phy_write(dev, 0x04A2, + (b43legacy_phy_read(dev, 0x04A2) & 0xC0C0) + | 0x0204); + phy_stacksave(0x04A8); + b43legacy_phy_write(dev, 0x04A8, + (b43legacy_phy_read(dev, 0x04A8) & 0xC0C0) + | 0x0803); + phy_stacksave(0x04AB); + b43legacy_phy_write(dev, 0x04AB, + (b43legacy_phy_read(dev, 0x04AB) & 0xC0C0) + | 0x0605); + + phy_stacksave(0x04A7); + b43legacy_phy_write(dev, 0x04A7, 0x0002); + phy_stacksave(0x04A3); + b43legacy_phy_write(dev, 0x04A3, 0x287A); + phy_stacksave(0x04A9); + b43legacy_phy_write(dev, 0x04A9, 0x2027); + phy_stacksave(0x0493); + b43legacy_phy_write(dev, 0x0493, 0x32F5); + phy_stacksave(0x04AA); + b43legacy_phy_write(dev, 0x04AA, 0x2027); + phy_stacksave(0x04AC); + b43legacy_phy_write(dev, 0x04AC, 0x32F5); + break; + case B43legacy_RADIO_INTERFMODE_MANUALWLAN: + if (b43legacy_phy_read(dev, 0x0033) & 0x0800) + break; + + phy->aci_enable = 1; + + phy_stacksave(B43legacy_PHY_RADIO_BITFIELD); + phy_stacksave(B43legacy_PHY_G_CRS); + if (phy->rev < 2) + phy_stacksave(0x0406); + else { + phy_stacksave(0x04C0); + phy_stacksave(0x04C1); + } + phy_stacksave(0x0033); + phy_stacksave(0x04A7); + phy_stacksave(0x04A3); + phy_stacksave(0x04A9); + phy_stacksave(0x04AA); + phy_stacksave(0x04AC); + phy_stacksave(0x0493); + phy_stacksave(0x04A1); + phy_stacksave(0x04A0); + phy_stacksave(0x04A2); + phy_stacksave(0x048A); + phy_stacksave(0x04A8); + phy_stacksave(0x04AB); + if (phy->rev == 2) { + phy_stacksave(0x04AD); + phy_stacksave(0x04AE); + } else if (phy->rev >= 3) { + phy_stacksave(0x04AD); + phy_stacksave(0x0415); + phy_stacksave(0x0416); + phy_stacksave(0x0417); + ilt_stacksave(0x1A00 + 0x2); + ilt_stacksave(0x1A00 + 0x3); + } + phy_stacksave(0x042B); + phy_stacksave(0x048C); + + b43legacy_phy_write(dev, B43legacy_PHY_RADIO_BITFIELD, + b43legacy_phy_read(dev, + B43legacy_PHY_RADIO_BITFIELD) & ~0x1000); + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, + (b43legacy_phy_read(dev, + B43legacy_PHY_G_CRS) + & 0xFFFC) | 0x0002); + + b43legacy_phy_write(dev, 0x0033, 0x0800); + b43legacy_phy_write(dev, 0x04A3, 0x2027); + b43legacy_phy_write(dev, 0x04A9, 0x1CA8); + b43legacy_phy_write(dev, 0x0493, 0x287A); + b43legacy_phy_write(dev, 0x04AA, 0x1CA8); + b43legacy_phy_write(dev, 0x04AC, 0x287A); + + b43legacy_phy_write(dev, 0x04A0, + (b43legacy_phy_read(dev, 0x04A0) + & 0xFFC0) | 0x001A); + b43legacy_phy_write(dev, 0x04A7, 0x000D); + + if (phy->rev < 2) + b43legacy_phy_write(dev, 0x0406, 0xFF0D); + else if (phy->rev == 2) { + b43legacy_phy_write(dev, 0x04C0, 0xFFFF); + b43legacy_phy_write(dev, 0x04C1, 0x00A9); + } else { + b43legacy_phy_write(dev, 0x04C0, 0x00C1); + b43legacy_phy_write(dev, 0x04C1, 0x0059); + } + + b43legacy_phy_write(dev, 0x04A1, + (b43legacy_phy_read(dev, 0x04A1) + & 0xC0FF) | 0x1800); + b43legacy_phy_write(dev, 0x04A1, + (b43legacy_phy_read(dev, 0x04A1) + & 0xFFC0) | 0x0015); + b43legacy_phy_write(dev, 0x04A8, + (b43legacy_phy_read(dev, 0x04A8) + & 0xCFFF) | 0x1000); + b43legacy_phy_write(dev, 0x04A8, + (b43legacy_phy_read(dev, 0x04A8) + & 0xF0FF) | 0x0A00); + b43legacy_phy_write(dev, 0x04AB, + (b43legacy_phy_read(dev, 0x04AB) + & 0xCFFF) | 0x1000); + b43legacy_phy_write(dev, 0x04AB, + (b43legacy_phy_read(dev, 0x04AB) + & 0xF0FF) | 0x0800); + b43legacy_phy_write(dev, 0x04AB, + (b43legacy_phy_read(dev, 0x04AB) + & 0xFFCF) | 0x0010); + b43legacy_phy_write(dev, 0x04AB, + (b43legacy_phy_read(dev, 0x04AB) + & 0xFFF0) | 0x0005); + b43legacy_phy_write(dev, 0x04A8, + (b43legacy_phy_read(dev, 0x04A8) + & 0xFFCF) | 0x0010); + b43legacy_phy_write(dev, 0x04A8, + (b43legacy_phy_read(dev, 0x04A8) + & 0xFFF0) | 0x0006); + b43legacy_phy_write(dev, 0x04A2, + (b43legacy_phy_read(dev, 0x04A2) + & 0xF0FF) | 0x0800); + b43legacy_phy_write(dev, 0x04A0, + (b43legacy_phy_read(dev, 0x04A0) + & 0xF0FF) | 0x0500); + b43legacy_phy_write(dev, 0x04A2, + (b43legacy_phy_read(dev, 0x04A2) + & 0xFFF0) | 0x000B); + + if (phy->rev >= 3) { + b43legacy_phy_write(dev, 0x048A, + b43legacy_phy_read(dev, 0x048A) + & ~0x8000); + b43legacy_phy_write(dev, 0x0415, + (b43legacy_phy_read(dev, 0x0415) + & 0x8000) | 0x36D8); + b43legacy_phy_write(dev, 0x0416, + (b43legacy_phy_read(dev, 0x0416) + & 0x8000) | 0x36D8); + b43legacy_phy_write(dev, 0x0417, + (b43legacy_phy_read(dev, 0x0417) + & 0xFE00) | 0x016D); + } else { + b43legacy_phy_write(dev, 0x048A, + b43legacy_phy_read(dev, 0x048A) + | 0x1000); + b43legacy_phy_write(dev, 0x048A, + (b43legacy_phy_read(dev, 0x048A) + & 0x9FFF) | 0x2000); + tmp32 = b43legacy_shm_read32(dev, B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET); + if (!(tmp32 & 0x800)) { + tmp32 |= 0x800; + b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET, + tmp32); + } + } + if (phy->rev >= 2) + b43legacy_phy_write(dev, 0x042B, + b43legacy_phy_read(dev, 0x042B) + | 0x0800); + b43legacy_phy_write(dev, 0x048C, + (b43legacy_phy_read(dev, 0x048C) + & 0xF0FF) | 0x0200); + if (phy->rev == 2) { + b43legacy_phy_write(dev, 0x04AE, + (b43legacy_phy_read(dev, 0x04AE) + & 0xFF00) | 0x007F); + b43legacy_phy_write(dev, 0x04AD, + (b43legacy_phy_read(dev, 0x04AD) + & 0x00FF) | 0x1300); + } else if (phy->rev >= 6) { + b43legacy_ilt_write(dev, 0x1A00 + 0x3, 0x007F); + b43legacy_ilt_write(dev, 0x1A00 + 0x2, 0x007F); + b43legacy_phy_write(dev, 0x04AD, + b43legacy_phy_read(dev, 0x04AD) + & 0x00FF); + } + b43legacy_calc_nrssi_slope(dev); + break; + default: + B43legacy_BUG_ON(1); + } +} + +static void +b43legacy_radio_interference_mitigation_disable(struct b43legacy_wldev *dev, + int mode) +{ + struct b43legacy_phy *phy = &dev->phy; + u32 tmp32; + u32 *stack = phy->interfstack; + + switch (mode) { + case B43legacy_RADIO_INTERFMODE_NONWLAN: + if (phy->rev != 1) { + b43legacy_phy_write(dev, 0x042B, + b43legacy_phy_read(dev, 0x042B) + & ~0x0800); + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, + b43legacy_phy_read(dev, + B43legacy_PHY_G_CRS) | 0x4000); + break; + } + phy_stackrestore(0x0078); + b43legacy_calc_nrssi_threshold(dev); + phy_stackrestore(0x0406); + b43legacy_phy_write(dev, 0x042B, + b43legacy_phy_read(dev, 0x042B) & ~0x0800); + if (!dev->bad_frames_preempt) + b43legacy_phy_write(dev, B43legacy_PHY_RADIO_BITFIELD, + b43legacy_phy_read(dev, + B43legacy_PHY_RADIO_BITFIELD) + & ~(1 << 11)); + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, + b43legacy_phy_read(dev, B43legacy_PHY_G_CRS) + | 0x4000); + phy_stackrestore(0x04A0); + phy_stackrestore(0x04A1); + phy_stackrestore(0x04A2); + phy_stackrestore(0x04A8); + phy_stackrestore(0x04AB); + phy_stackrestore(0x04A7); + phy_stackrestore(0x04A3); + phy_stackrestore(0x04A9); + phy_stackrestore(0x0493); + phy_stackrestore(0x04AA); + phy_stackrestore(0x04AC); + break; + case B43legacy_RADIO_INTERFMODE_MANUALWLAN: + if (!(b43legacy_phy_read(dev, 0x0033) & 0x0800)) + break; + + phy->aci_enable = 0; + + phy_stackrestore(B43legacy_PHY_RADIO_BITFIELD); + phy_stackrestore(B43legacy_PHY_G_CRS); + phy_stackrestore(0x0033); + phy_stackrestore(0x04A3); + phy_stackrestore(0x04A9); + phy_stackrestore(0x0493); + phy_stackrestore(0x04AA); + phy_stackrestore(0x04AC); + phy_stackrestore(0x04A0); + phy_stackrestore(0x04A7); + if (phy->rev >= 2) { + phy_stackrestore(0x04C0); + phy_stackrestore(0x04C1); + } else + phy_stackrestore(0x0406); + phy_stackrestore(0x04A1); + phy_stackrestore(0x04AB); + phy_stackrestore(0x04A8); + if (phy->rev == 2) { + phy_stackrestore(0x04AD); + phy_stackrestore(0x04AE); + } else if (phy->rev >= 3) { + phy_stackrestore(0x04AD); + phy_stackrestore(0x0415); + phy_stackrestore(0x0416); + phy_stackrestore(0x0417); + ilt_stackrestore(0x1A00 + 0x2); + ilt_stackrestore(0x1A00 + 0x3); + } + phy_stackrestore(0x04A2); + phy_stackrestore(0x04A8); + phy_stackrestore(0x042B); + phy_stackrestore(0x048C); + tmp32 = b43legacy_shm_read32(dev, B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET); + if (tmp32 & 0x800) { + tmp32 &= ~0x800; + b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET, + tmp32); + } + b43legacy_calc_nrssi_slope(dev); + break; + default: + B43legacy_BUG_ON(1); + } +} + +#undef phy_stacksave +#undef phy_stackrestore +#undef radio_stacksave +#undef radio_stackrestore +#undef ilt_stacksave +#undef ilt_stackrestore + +int b43legacy_radio_set_interference_mitigation(struct b43legacy_wldev *dev, + int mode) +{ + struct b43legacy_phy *phy = &dev->phy; + int currentmode; + + if ((phy->type != B43legacy_PHYTYPE_G) || + (phy->rev == 0) || (!phy->gmode)) + return -ENODEV; + + phy->aci_wlan_automatic = 0; + switch (mode) { + case B43legacy_RADIO_INTERFMODE_AUTOWLAN: + phy->aci_wlan_automatic = 1; + if (phy->aci_enable) + mode = B43legacy_RADIO_INTERFMODE_MANUALWLAN; + else + mode = B43legacy_RADIO_INTERFMODE_NONE; + break; + case B43legacy_RADIO_INTERFMODE_NONE: + case B43legacy_RADIO_INTERFMODE_NONWLAN: + case B43legacy_RADIO_INTERFMODE_MANUALWLAN: + break; + default: + return -EINVAL; + } + + currentmode = phy->interfmode; + if (currentmode == mode) + return 0; + if (currentmode != B43legacy_RADIO_INTERFMODE_NONE) + b43legacy_radio_interference_mitigation_disable(dev, + currentmode); + + if (mode == B43legacy_RADIO_INTERFMODE_NONE) { + phy->aci_enable = 0; + phy->aci_hw_rssi = 0; + } else + b43legacy_radio_interference_mitigation_enable(dev, mode); + phy->interfmode = mode; + + return 0; +} + +u16 b43legacy_radio_calibrationvalue(struct b43legacy_wldev *dev) +{ + u16 reg; + u16 index; + u16 ret; + + reg = b43legacy_radio_read16(dev, 0x0060); + index = (reg & 0x001E) >> 1; + ret = rcc_table[index] << 1; + ret |= (reg & 0x0001); + ret |= 0x0020; + + return ret; +} + +#define LPD(L, P, D) (((L) << 2) | ((P) << 1) | ((D) << 0)) +static u16 b43legacy_get_812_value(struct b43legacy_wldev *dev, u8 lpd) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 loop_or = 0; + u16 adj_loopback_gain = phy->loopback_gain[0]; + u8 loop; + u16 extern_lna_control; + + if (!phy->gmode) + return 0; + if (!has_loopback_gain(phy)) { + if (phy->rev < 7 || !(dev->dev->bus->sprom.boardflags_lo + & B43legacy_BFL_EXTLNA)) { + switch (lpd) { + case LPD(0, 1, 1): + return 0x0FB2; + case LPD(0, 0, 1): + return 0x00B2; + case LPD(1, 0, 1): + return 0x30B2; + case LPD(1, 0, 0): + return 0x30B3; + default: + B43legacy_BUG_ON(1); + } + } else { + switch (lpd) { + case LPD(0, 1, 1): + return 0x8FB2; + case LPD(0, 0, 1): + return 0x80B2; + case LPD(1, 0, 1): + return 0x20B2; + case LPD(1, 0, 0): + return 0x20B3; + default: + B43legacy_BUG_ON(1); + } + } + } else { + if (phy->radio_rev == 8) + adj_loopback_gain += 0x003E; + else + adj_loopback_gain += 0x0026; + if (adj_loopback_gain >= 0x46) { + adj_loopback_gain -= 0x46; + extern_lna_control = 0x3000; + } else if (adj_loopback_gain >= 0x3A) { + adj_loopback_gain -= 0x3A; + extern_lna_control = 0x2000; + } else if (adj_loopback_gain >= 0x2E) { + adj_loopback_gain -= 0x2E; + extern_lna_control = 0x1000; + } else { + adj_loopback_gain -= 0x10; + extern_lna_control = 0x0000; + } + for (loop = 0; loop < 16; loop++) { + u16 tmp = adj_loopback_gain - 6 * loop; + if (tmp < 6) + break; + } + + loop_or = (loop << 8) | extern_lna_control; + if (phy->rev >= 7 && dev->dev->bus->sprom.boardflags_lo + & B43legacy_BFL_EXTLNA) { + if (extern_lna_control) + loop_or |= 0x8000; + switch (lpd) { + case LPD(0, 1, 1): + return 0x8F92; + case LPD(0, 0, 1): + return (0x8092 | loop_or); + case LPD(1, 0, 1): + return (0x2092 | loop_or); + case LPD(1, 0, 0): + return (0x2093 | loop_or); + default: + B43legacy_BUG_ON(1); + } + } else { + switch (lpd) { + case LPD(0, 1, 1): + return 0x0F92; + case LPD(0, 0, 1): + case LPD(1, 0, 1): + return (0x0092 | loop_or); + case LPD(1, 0, 0): + return (0x0093 | loop_or); + default: + B43legacy_BUG_ON(1); + } + } + } + return 0; +} + +u16 b43legacy_radio_init2050(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 backup[21] = { 0 }; + u16 ret; + u16 i; + u16 j; + u32 tmp1 = 0; + u32 tmp2 = 0; + + backup[0] = b43legacy_radio_read16(dev, 0x0043); + backup[14] = b43legacy_radio_read16(dev, 0x0051); + backup[15] = b43legacy_radio_read16(dev, 0x0052); + backup[1] = b43legacy_phy_read(dev, 0x0015); + backup[16] = b43legacy_phy_read(dev, 0x005A); + backup[17] = b43legacy_phy_read(dev, 0x0059); + backup[18] = b43legacy_phy_read(dev, 0x0058); + if (phy->type == B43legacy_PHYTYPE_B) { + backup[2] = b43legacy_phy_read(dev, 0x0030); + backup[3] = b43legacy_read16(dev, 0x03EC); + b43legacy_phy_write(dev, 0x0030, 0x00FF); + b43legacy_write16(dev, 0x03EC, 0x3F3F); + } else { + if (phy->gmode) { + backup[4] = b43legacy_phy_read(dev, 0x0811); + backup[5] = b43legacy_phy_read(dev, 0x0812); + backup[6] = b43legacy_phy_read(dev, 0x0814); + backup[7] = b43legacy_phy_read(dev, 0x0815); + backup[8] = b43legacy_phy_read(dev, + B43legacy_PHY_G_CRS); + backup[9] = b43legacy_phy_read(dev, 0x0802); + b43legacy_phy_write(dev, 0x0814, + (b43legacy_phy_read(dev, 0x0814) + | 0x0003)); + b43legacy_phy_write(dev, 0x0815, + (b43legacy_phy_read(dev, 0x0815) + & 0xFFFC)); + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, + (b43legacy_phy_read(dev, + B43legacy_PHY_G_CRS) & 0x7FFF)); + b43legacy_phy_write(dev, 0x0802, + (b43legacy_phy_read(dev, 0x0802) + & 0xFFFC)); + if (phy->rev > 1) { /* loopback gain enabled */ + backup[19] = b43legacy_phy_read(dev, 0x080F); + backup[20] = b43legacy_phy_read(dev, 0x0810); + if (phy->rev >= 3) + b43legacy_phy_write(dev, 0x080F, + 0xC020); + else + b43legacy_phy_write(dev, 0x080F, + 0x8020); + b43legacy_phy_write(dev, 0x0810, 0x0000); + } + b43legacy_phy_write(dev, 0x0812, + b43legacy_get_812_value(dev, + LPD(0, 1, 1))); + if (phy->rev < 7 || + !(dev->dev->bus->sprom.boardflags_lo + & B43legacy_BFL_EXTLNA)) + b43legacy_phy_write(dev, 0x0811, 0x01B3); + else + b43legacy_phy_write(dev, 0x0811, 0x09B3); + } + } + b43legacy_write16(dev, B43legacy_MMIO_PHY_RADIO, + (b43legacy_read16(dev, B43legacy_MMIO_PHY_RADIO) + | 0x8000)); + backup[10] = b43legacy_phy_read(dev, 0x0035); + b43legacy_phy_write(dev, 0x0035, + (b43legacy_phy_read(dev, 0x0035) & 0xFF7F)); + backup[11] = b43legacy_read16(dev, 0x03E6); + backup[12] = b43legacy_read16(dev, B43legacy_MMIO_CHANNEL_EXT); + + /* Initialization */ + if (phy->analog == 0) + b43legacy_write16(dev, 0x03E6, 0x0122); + else { + if (phy->analog >= 2) + b43legacy_phy_write(dev, 0x0003, + (b43legacy_phy_read(dev, 0x0003) + & 0xFFBF) | 0x0040); + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, + (b43legacy_read16(dev, + B43legacy_MMIO_CHANNEL_EXT) | 0x2000)); + } + + ret = b43legacy_radio_calibrationvalue(dev); + + if (phy->type == B43legacy_PHYTYPE_B) + b43legacy_radio_write16(dev, 0x0078, 0x0026); + + if (phy->gmode) + b43legacy_phy_write(dev, 0x0812, + b43legacy_get_812_value(dev, + LPD(0, 1, 1))); + b43legacy_phy_write(dev, 0x0015, 0xBFAF); + b43legacy_phy_write(dev, 0x002B, 0x1403); + if (phy->gmode) + b43legacy_phy_write(dev, 0x0812, + b43legacy_get_812_value(dev, + LPD(0, 0, 1))); + b43legacy_phy_write(dev, 0x0015, 0xBFA0); + b43legacy_radio_write16(dev, 0x0051, + (b43legacy_radio_read16(dev, 0x0051) + | 0x0004)); + if (phy->radio_rev == 8) + b43legacy_radio_write16(dev, 0x0043, 0x001F); + else { + b43legacy_radio_write16(dev, 0x0052, 0x0000); + b43legacy_radio_write16(dev, 0x0043, + (b43legacy_radio_read16(dev, 0x0043) + & 0xFFF0) | 0x0009); + } + b43legacy_phy_write(dev, 0x0058, 0x0000); + + for (i = 0; i < 16; i++) { + b43legacy_phy_write(dev, 0x005A, 0x0480); + b43legacy_phy_write(dev, 0x0059, 0xC810); + b43legacy_phy_write(dev, 0x0058, 0x000D); + if (phy->gmode) + b43legacy_phy_write(dev, 0x0812, + b43legacy_get_812_value(dev, + LPD(1, 0, 1))); + b43legacy_phy_write(dev, 0x0015, 0xAFB0); + udelay(10); + if (phy->gmode) + b43legacy_phy_write(dev, 0x0812, + b43legacy_get_812_value(dev, + LPD(1, 0, 1))); + b43legacy_phy_write(dev, 0x0015, 0xEFB0); + udelay(10); + if (phy->gmode) + b43legacy_phy_write(dev, 0x0812, + b43legacy_get_812_value(dev, + LPD(1, 0, 0))); + b43legacy_phy_write(dev, 0x0015, 0xFFF0); + udelay(20); + tmp1 += b43legacy_phy_read(dev, 0x002D); + b43legacy_phy_write(dev, 0x0058, 0x0000); + if (phy->gmode) + b43legacy_phy_write(dev, 0x0812, + b43legacy_get_812_value(dev, + LPD(1, 0, 1))); + b43legacy_phy_write(dev, 0x0015, 0xAFB0); + } + + tmp1++; + tmp1 >>= 9; + udelay(10); + b43legacy_phy_write(dev, 0x0058, 0x0000); + + for (i = 0; i < 16; i++) { + b43legacy_radio_write16(dev, 0x0078, (flip_4bit(i) << 1) + | 0x0020); + backup[13] = b43legacy_radio_read16(dev, 0x0078); + udelay(10); + for (j = 0; j < 16; j++) { + b43legacy_phy_write(dev, 0x005A, 0x0D80); + b43legacy_phy_write(dev, 0x0059, 0xC810); + b43legacy_phy_write(dev, 0x0058, 0x000D); + if (phy->gmode) + b43legacy_phy_write(dev, 0x0812, + b43legacy_get_812_value(dev, + LPD(1, 0, 1))); + b43legacy_phy_write(dev, 0x0015, 0xAFB0); + udelay(10); + if (phy->gmode) + b43legacy_phy_write(dev, 0x0812, + b43legacy_get_812_value(dev, + LPD(1, 0, 1))); + b43legacy_phy_write(dev, 0x0015, 0xEFB0); + udelay(10); + if (phy->gmode) + b43legacy_phy_write(dev, 0x0812, + b43legacy_get_812_value(dev, + LPD(1, 0, 0))); + b43legacy_phy_write(dev, 0x0015, 0xFFF0); + udelay(10); + tmp2 += b43legacy_phy_read(dev, 0x002D); + b43legacy_phy_write(dev, 0x0058, 0x0000); + if (phy->gmode) + b43legacy_phy_write(dev, 0x0812, + b43legacy_get_812_value(dev, + LPD(1, 0, 1))); + b43legacy_phy_write(dev, 0x0015, 0xAFB0); + } + tmp2++; + tmp2 >>= 8; + if (tmp1 < tmp2) + break; + } + + /* Restore the registers */ + b43legacy_phy_write(dev, 0x0015, backup[1]); + b43legacy_radio_write16(dev, 0x0051, backup[14]); + b43legacy_radio_write16(dev, 0x0052, backup[15]); + b43legacy_radio_write16(dev, 0x0043, backup[0]); + b43legacy_phy_write(dev, 0x005A, backup[16]); + b43legacy_phy_write(dev, 0x0059, backup[17]); + b43legacy_phy_write(dev, 0x0058, backup[18]); + b43legacy_write16(dev, 0x03E6, backup[11]); + if (phy->analog != 0) + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, backup[12]); + b43legacy_phy_write(dev, 0x0035, backup[10]); + b43legacy_radio_selectchannel(dev, phy->channel, 1); + if (phy->type == B43legacy_PHYTYPE_B) { + b43legacy_phy_write(dev, 0x0030, backup[2]); + b43legacy_write16(dev, 0x03EC, backup[3]); + } else { + if (phy->gmode) { + b43legacy_write16(dev, B43legacy_MMIO_PHY_RADIO, + (b43legacy_read16(dev, + B43legacy_MMIO_PHY_RADIO) & 0x7FFF)); + b43legacy_phy_write(dev, 0x0811, backup[4]); + b43legacy_phy_write(dev, 0x0812, backup[5]); + b43legacy_phy_write(dev, 0x0814, backup[6]); + b43legacy_phy_write(dev, 0x0815, backup[7]); + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, + backup[8]); + b43legacy_phy_write(dev, 0x0802, backup[9]); + if (phy->rev > 1) { + b43legacy_phy_write(dev, 0x080F, backup[19]); + b43legacy_phy_write(dev, 0x0810, backup[20]); + } + } + } + if (i >= 15) + ret = backup[13]; + + return ret; +} + +static inline +u16 freq_r3A_value(u16 frequency) +{ + u16 value; + + if (frequency < 5091) + value = 0x0040; + else if (frequency < 5321) + value = 0x0000; + else if (frequency < 5806) + value = 0x0080; + else + value = 0x0040; + + return value; +} + +void b43legacy_radio_set_tx_iq(struct b43legacy_wldev *dev) +{ + static const u8 data_high[5] = { 0x00, 0x40, 0x80, 0x90, 0xD0 }; + static const u8 data_low[5] = { 0x00, 0x01, 0x05, 0x06, 0x0A }; + u16 tmp = b43legacy_radio_read16(dev, 0x001E); + int i; + int j; + + for (i = 0; i < 5; i++) { + for (j = 0; j < 5; j++) { + if (tmp == (data_high[i] | data_low[j])) { + b43legacy_phy_write(dev, 0x0069, (i - j) << 8 | + 0x00C0); + return; + } + } + } +} + +int b43legacy_radio_selectchannel(struct b43legacy_wldev *dev, + u8 channel, + int synthetic_pu_workaround) +{ + struct b43legacy_phy *phy = &dev->phy; + + if (channel == 0xFF) { + switch (phy->type) { + case B43legacy_PHYTYPE_B: + case B43legacy_PHYTYPE_G: + channel = B43legacy_RADIO_DEFAULT_CHANNEL_BG; + break; + default: + B43legacy_WARN_ON(1); + } + } + +/* TODO: Check if channel is valid - return -EINVAL if not */ + if (synthetic_pu_workaround) + b43legacy_synth_pu_workaround(dev, channel); + + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL, + channel2freq_bg(channel)); + + if (channel == 14) { + if (dev->dev->bus->sprom.country_code == 5) /* JAPAN) */ + b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET, + b43legacy_shm_read32(dev, + B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET) + & ~(1 << 7)); + else + b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET, + b43legacy_shm_read32(dev, + B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET) + | (1 << 7)); + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, + b43legacy_read16(dev, + B43legacy_MMIO_CHANNEL_EXT) | (1 << 11)); + } else + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, + b43legacy_read16(dev, + B43legacy_MMIO_CHANNEL_EXT) & 0xF7BF); + + phy->channel = channel; + /*XXX: Using the longer of 2 timeouts (8000 vs 2000 usecs). Specs states + * that 2000 usecs might suffice. */ + msleep(8); + + return 0; +} + +void b43legacy_radio_set_txantenna(struct b43legacy_wldev *dev, u32 val) +{ + u16 tmp; + + val <<= 8; + tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, 0x0022) & 0xFCFF; + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0022, tmp | val); + tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, 0x03A8) & 0xFCFF; + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x03A8, tmp | val); + tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, 0x0054) & 0xFCFF; + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0054, tmp | val); +} + +/* http://bcm-specs.sipsolutions.net/TX_Gain_Base_Band */ +static u16 b43legacy_get_txgain_base_band(u16 txpower) +{ + u16 ret; + + B43legacy_WARN_ON(txpower > 63); + + if (txpower >= 54) + ret = 2; + else if (txpower >= 49) + ret = 4; + else if (txpower >= 44) + ret = 5; + else + ret = 6; + + return ret; +} + +/* http://bcm-specs.sipsolutions.net/TX_Gain_Radio_Frequency_Power_Amplifier */ +static u16 b43legacy_get_txgain_freq_power_amp(u16 txpower) +{ + u16 ret; + + B43legacy_WARN_ON(txpower > 63); + + if (txpower >= 32) + ret = 0; + else if (txpower >= 25) + ret = 1; + else if (txpower >= 20) + ret = 2; + else if (txpower >= 12) + ret = 3; + else + ret = 4; + + return ret; +} + +/* http://bcm-specs.sipsolutions.net/TX_Gain_Digital_Analog_Converter */ +static u16 b43legacy_get_txgain_dac(u16 txpower) +{ + u16 ret; + + B43legacy_WARN_ON(txpower > 63); + + if (txpower >= 54) + ret = txpower - 53; + else if (txpower >= 49) + ret = txpower - 42; + else if (txpower >= 44) + ret = txpower - 37; + else if (txpower >= 32) + ret = txpower - 32; + else if (txpower >= 25) + ret = txpower - 20; + else if (txpower >= 20) + ret = txpower - 13; + else if (txpower >= 12) + ret = txpower - 8; + else + ret = txpower; + + return ret; +} + +void b43legacy_radio_set_txpower_a(struct b43legacy_wldev *dev, u16 txpower) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 pamp; + u16 base; + u16 dac; + u16 ilt; + + txpower = clamp_val(txpower, 0, 63); + + pamp = b43legacy_get_txgain_freq_power_amp(txpower); + pamp <<= 5; + pamp &= 0x00E0; + b43legacy_phy_write(dev, 0x0019, pamp); + + base = b43legacy_get_txgain_base_band(txpower); + base &= 0x000F; + b43legacy_phy_write(dev, 0x0017, base | 0x0020); + + ilt = b43legacy_ilt_read(dev, 0x3001); + ilt &= 0x0007; + + dac = b43legacy_get_txgain_dac(txpower); + dac <<= 3; + dac |= ilt; + + b43legacy_ilt_write(dev, 0x3001, dac); + + phy->txpwr_offset = txpower; + + /* TODO: FuncPlaceholder (Adjust BB loft cancel) */ +} + +void b43legacy_radio_set_txpower_bg(struct b43legacy_wldev *dev, + u16 baseband_attenuation, + u16 radio_attenuation, + u16 txpower) +{ + struct b43legacy_phy *phy = &dev->phy; + + if (baseband_attenuation == 0xFFFF) + baseband_attenuation = phy->bbatt; + if (radio_attenuation == 0xFFFF) + radio_attenuation = phy->rfatt; + if (txpower == 0xFFFF) + txpower = phy->txctl1; + phy->bbatt = baseband_attenuation; + phy->rfatt = radio_attenuation; + phy->txctl1 = txpower; + + B43legacy_WARN_ON(baseband_attenuation > 11); + if (phy->radio_rev < 6) + B43legacy_WARN_ON(radio_attenuation > 9); + else + B43legacy_WARN_ON(radio_attenuation > 31); + B43legacy_WARN_ON(txpower > 7); + + b43legacy_phy_set_baseband_attenuation(dev, baseband_attenuation); + b43legacy_radio_write16(dev, 0x0043, radio_attenuation); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0064, + radio_attenuation); + if (phy->radio_ver == 0x2050) + b43legacy_radio_write16(dev, 0x0052, + (b43legacy_radio_read16(dev, 0x0052) + & ~0x0070) | ((txpower << 4) & 0x0070)); + /* FIXME: The spec is very weird and unclear here. */ + if (phy->type == B43legacy_PHYTYPE_G) + b43legacy_phy_lo_adjust(dev, 0); +} + +u16 b43legacy_default_baseband_attenuation(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + + if (phy->radio_ver == 0x2050 && phy->radio_rev < 6) + return 0; + return 2; +} + +u16 b43legacy_default_radio_attenuation(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 att = 0xFFFF; + + switch (phy->radio_ver) { + case 0x2053: + switch (phy->radio_rev) { + case 1: + att = 6; + break; + } + break; + case 0x2050: + switch (phy->radio_rev) { + case 0: + att = 5; + break; + case 1: + if (phy->type == B43legacy_PHYTYPE_G) { + if (is_bcm_board_vendor(dev) && + dev->dev->bus->boardinfo.type == 0x421 && + dev->dev->bus->boardinfo.rev >= 30) + att = 3; + else if (is_bcm_board_vendor(dev) && + dev->dev->bus->boardinfo.type == 0x416) + att = 3; + else + att = 1; + } else { + if (is_bcm_board_vendor(dev) && + dev->dev->bus->boardinfo.type == 0x421 && + dev->dev->bus->boardinfo.rev >= 30) + att = 7; + else + att = 6; + } + break; + case 2: + if (phy->type == B43legacy_PHYTYPE_G) { + if (is_bcm_board_vendor(dev) && + dev->dev->bus->boardinfo.type == 0x421 && + dev->dev->bus->boardinfo.rev >= 30) + att = 3; + else if (is_bcm_board_vendor(dev) && + dev->dev->bus->boardinfo.type == + 0x416) + att = 5; + else if (dev->dev->bus->chip_id == 0x4320) + att = 4; + else + att = 3; + } else + att = 6; + break; + case 3: + att = 5; + break; + case 4: + case 5: + att = 1; + break; + case 6: + case 7: + att = 5; + break; + case 8: + att = 0x1A; + break; + case 9: + default: + att = 5; + } + } + if (is_bcm_board_vendor(dev) && + dev->dev->bus->boardinfo.type == 0x421) { + if (dev->dev->bus->boardinfo.rev < 0x43) + att = 2; + else if (dev->dev->bus->boardinfo.rev < 0x51) + att = 3; + } + if (att == 0xFFFF) + att = 5; + + return att; +} + +u16 b43legacy_default_txctl1(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + + if (phy->radio_ver != 0x2050) + return 0; + if (phy->radio_rev == 1) + return 3; + if (phy->radio_rev < 6) + return 2; + if (phy->radio_rev == 8) + return 1; + return 0; +} + +void b43legacy_radio_turn_on(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + int err; + u8 channel; + + might_sleep(); + + if (phy->radio_on) + return; + + switch (phy->type) { + case B43legacy_PHYTYPE_B: + case B43legacy_PHYTYPE_G: + b43legacy_phy_write(dev, 0x0015, 0x8000); + b43legacy_phy_write(dev, 0x0015, 0xCC00); + b43legacy_phy_write(dev, 0x0015, + (phy->gmode ? 0x00C0 : 0x0000)); + if (phy->radio_off_context.valid) { + /* Restore the RFover values. */ + b43legacy_phy_write(dev, B43legacy_PHY_RFOVER, + phy->radio_off_context.rfover); + b43legacy_phy_write(dev, B43legacy_PHY_RFOVERVAL, + phy->radio_off_context.rfoverval); + phy->radio_off_context.valid = 0; + } + channel = phy->channel; + err = b43legacy_radio_selectchannel(dev, + B43legacy_RADIO_DEFAULT_CHANNEL_BG, 1); + err |= b43legacy_radio_selectchannel(dev, channel, 0); + B43legacy_WARN_ON(err); + break; + default: + B43legacy_BUG_ON(1); + } + phy->radio_on = 1; +} + +void b43legacy_radio_turn_off(struct b43legacy_wldev *dev, bool force) +{ + struct b43legacy_phy *phy = &dev->phy; + + if (!phy->radio_on && !force) + return; + + if (phy->type == B43legacy_PHYTYPE_G && dev->dev->id.revision >= 5) { + u16 rfover, rfoverval; + + rfover = b43legacy_phy_read(dev, B43legacy_PHY_RFOVER); + rfoverval = b43legacy_phy_read(dev, B43legacy_PHY_RFOVERVAL); + if (!force) { + phy->radio_off_context.rfover = rfover; + phy->radio_off_context.rfoverval = rfoverval; + phy->radio_off_context.valid = 1; + } + b43legacy_phy_write(dev, B43legacy_PHY_RFOVER, rfover | 0x008C); + b43legacy_phy_write(dev, B43legacy_PHY_RFOVERVAL, + rfoverval & 0xFF73); + } else + b43legacy_phy_write(dev, 0x0015, 0xAA00); + phy->radio_on = 0; + b43legacydbg(dev->wl, "Radio initialized\n"); +} + +void b43legacy_radio_clear_tssi(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + + switch (phy->type) { + case B43legacy_PHYTYPE_B: + case B43legacy_PHYTYPE_G: + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0058, + 0x7F7F); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x005a, + 0x7F7F); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0070, + 0x7F7F); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0072, + 0x7F7F); + break; + } +} diff --git a/drivers/net/wireless/b43legacy/radio.h b/drivers/net/wireless/b43legacy/radio.h new file mode 100644 index 0000000..ec4de28 --- /dev/null +++ b/drivers/net/wireless/b43legacy/radio.h @@ -0,0 +1,98 @@ +/* + + Broadcom B43legacy wireless driver + + Copyright (c) 2005 Martin Langer , + Stefano Brivio + Michael Buesch + Danny van Dyk + Andreas Jaggi + + Some parts of the code in this file are derived from the ipw2200 + driver Copyright(c) 2003 - 2004 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#ifndef B43legacy_RADIO_H_ +#define B43legacy_RADIO_H_ + +#include "b43legacy.h" + + +#define B43legacy_RADIO_DEFAULT_CHANNEL_BG 6 + +/* Force antenna 0. */ +#define B43legacy_RADIO_TXANTENNA_0 0 +/* Force antenna 1. */ +#define B43legacy_RADIO_TXANTENNA_1 1 +/* Use the RX antenna, that was selected for the most recently + * received good PLCP header. + */ +#define B43legacy_RADIO_TXANTENNA_LASTPLCP 3 +#define B43legacy_RADIO_TXANTENNA_DEFAULT B43legacy_RADIO_TXANTENNA_LASTPLCP + +#define B43legacy_RADIO_INTERFMODE_NONE 0 +#define B43legacy_RADIO_INTERFMODE_NONWLAN 1 +#define B43legacy_RADIO_INTERFMODE_MANUALWLAN 2 +#define B43legacy_RADIO_INTERFMODE_AUTOWLAN 3 + + +void b43legacy_radio_lock(struct b43legacy_wldev *dev); +void b43legacy_radio_unlock(struct b43legacy_wldev *dev); + +u16 b43legacy_radio_read16(struct b43legacy_wldev *dev, u16 offset); +void b43legacy_radio_write16(struct b43legacy_wldev *dev, u16 offset, u16 val); + +u16 b43legacy_radio_init2050(struct b43legacy_wldev *dev); + +void b43legacy_radio_turn_on(struct b43legacy_wldev *dev); +void b43legacy_radio_turn_off(struct b43legacy_wldev *dev, bool force); + +int b43legacy_radio_selectchannel(struct b43legacy_wldev *dev, u8 channel, + int synthetic_pu_workaround); + +void b43legacy_radio_set_txpower_a(struct b43legacy_wldev *dev, u16 txpower); +void b43legacy_radio_set_txpower_bg(struct b43legacy_wldev *dev, + u16 baseband_attenuation, u16 attenuation, + u16 txpower); + +u16 b43legacy_default_baseband_attenuation(struct b43legacy_wldev *dev); +u16 b43legacy_default_radio_attenuation(struct b43legacy_wldev *dev); +u16 b43legacy_default_txctl1(struct b43legacy_wldev *dev); + +void b43legacy_radio_set_txantenna(struct b43legacy_wldev *dev, u32 val); + +void b43legacy_radio_clear_tssi(struct b43legacy_wldev *dev); + +u8 b43legacy_radio_aci_detect(struct b43legacy_wldev *dev, u8 channel); +u8 b43legacy_radio_aci_scan(struct b43legacy_wldev *dev); + +int b43legacy_radio_set_interference_mitigation(struct b43legacy_wldev *dev, + int mode); + +void b43legacy_calc_nrssi_slope(struct b43legacy_wldev *dev); +void b43legacy_calc_nrssi_threshold(struct b43legacy_wldev *dev); +s16 b43legacy_nrssi_hw_read(struct b43legacy_wldev *dev, u16 offset); +void b43legacy_nrssi_hw_write(struct b43legacy_wldev *dev, u16 offset, s16 val); +void b43legacy_nrssi_hw_update(struct b43legacy_wldev *dev, u16 val); +void b43legacy_nrssi_mem_update(struct b43legacy_wldev *dev); + +void b43legacy_radio_set_tx_iq(struct b43legacy_wldev *dev); +u16 b43legacy_radio_calibrationvalue(struct b43legacy_wldev *dev); + +#endif /* B43legacy_RADIO_H_ */ diff --git a/drivers/net/wireless/b43legacy/rfkill.c b/drivers/net/wireless/b43legacy/rfkill.c new file mode 100644 index 0000000..d579df7 --- /dev/null +++ b/drivers/net/wireless/b43legacy/rfkill.c @@ -0,0 +1,91 @@ +/* + + Broadcom B43 wireless driver + RFKILL support + + Copyright (c) 2007 Michael Buesch + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "radio.h" +#include "b43legacy.h" + + +/* Returns TRUE, if the radio is enabled in hardware. */ +bool b43legacy_is_hw_radio_enabled(struct b43legacy_wldev *dev) +{ + if (dev->phy.rev >= 3) { + if (!(b43legacy_read32(dev, B43legacy_MMIO_RADIO_HWENABLED_HI) + & B43legacy_MMIO_RADIO_HWENABLED_HI_MASK)) + return 1; + } else { + /* To prevent CPU fault on PPC, do not read a register + * unless the interface is started; however, on resume + * for hibernation, this routine is entered early. When + * that happens, unconditionally return TRUE. + */ + if (b43legacy_status(dev) < B43legacy_STAT_STARTED) + return 1; + if (b43legacy_read16(dev, B43legacy_MMIO_RADIO_HWENABLED_LO) + & B43legacy_MMIO_RADIO_HWENABLED_LO_MASK) + return 1; + } + return 0; +} + +/* The poll callback for the hardware button. */ +void b43legacy_rfkill_poll(struct ieee80211_hw *hw) +{ + struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); + struct b43legacy_wldev *dev = wl->current_dev; + struct ssb_bus *bus = dev->dev->bus; + bool enabled; + bool brought_up = false; + + mutex_lock(&wl->mutex); + if (unlikely(b43legacy_status(dev) < B43legacy_STAT_INITIALIZED)) { + if (ssb_bus_powerup(bus, 0)) { + mutex_unlock(&wl->mutex); + return; + } + ssb_device_enable(dev->dev, 0); + brought_up = true; + } + + enabled = b43legacy_is_hw_radio_enabled(dev); + + if (unlikely(enabled != dev->radio_hw_enable)) { + dev->radio_hw_enable = enabled; + b43legacyinfo(wl, "Radio hardware status changed to %s\n", + enabled ? "ENABLED" : "DISABLED"); + wiphy_rfkill_set_hw_state(hw->wiphy, !enabled); + if (enabled != dev->phy.radio_on) { + if (enabled) + b43legacy_radio_turn_on(dev); + else + b43legacy_radio_turn_off(dev, 0); + } + } + + if (brought_up) { + ssb_device_disable(dev->dev, 0); + ssb_bus_may_powerdown(bus); + } + + mutex_unlock(&wl->mutex); +} diff --git a/drivers/net/wireless/b43legacy/rfkill.h b/drivers/net/wireless/b43legacy/rfkill.h new file mode 100644 index 0000000..7558557 --- /dev/null +++ b/drivers/net/wireless/b43legacy/rfkill.h @@ -0,0 +1,11 @@ +#ifndef B43legacy_RFKILL_H_ +#define B43legacy_RFKILL_H_ + +struct ieee80211_hw; +struct b43legacy_wldev; + +void b43legacy_rfkill_poll(struct ieee80211_hw *hw); + +bool b43legacy_is_hw_radio_enabled(struct b43legacy_wldev *dev); + +#endif /* B43legacy_RFKILL_H_ */ diff --git a/drivers/net/wireless/b43legacy/sysfs.c b/drivers/net/wireless/b43legacy/sysfs.c new file mode 100644 index 0000000..56c384f --- /dev/null +++ b/drivers/net/wireless/b43legacy/sysfs.c @@ -0,0 +1,238 @@ +/* + + Broadcom B43legacy wireless driver + + SYSFS support routines + + Copyright (c) 2006 Michael Buesch + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "sysfs.h" +#include "b43legacy.h" +#include "main.h" +#include "phy.h" +#include "radio.h" + +#include + + +#define GENERIC_FILESIZE 64 + + +static int get_integer(const char *buf, size_t count) +{ + char tmp[10 + 1] = { 0 }; + int ret = -EINVAL; + + if (count == 0) + goto out; + count = min(count, (size_t)10); + memcpy(tmp, buf, count); + ret = simple_strtol(tmp, NULL, 10); +out: + return ret; +} + +static int get_boolean(const char *buf, size_t count) +{ + if (count != 0) { + if (buf[0] == '1') + return 1; + if (buf[0] == '0') + return 0; + if (count >= 4 && memcmp(buf, "true", 4) == 0) + return 1; + if (count >= 5 && memcmp(buf, "false", 5) == 0) + return 0; + if (count >= 3 && memcmp(buf, "yes", 3) == 0) + return 1; + if (count >= 2 && memcmp(buf, "no", 2) == 0) + return 0; + if (count >= 2 && memcmp(buf, "on", 2) == 0) + return 1; + if (count >= 3 && memcmp(buf, "off", 3) == 0) + return 0; + } + return -EINVAL; +} + +static ssize_t b43legacy_attr_interfmode_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct b43legacy_wldev *wldev = dev_to_b43legacy_wldev(dev); + ssize_t count = 0; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + mutex_lock(&wldev->wl->mutex); + + switch (wldev->phy.interfmode) { + case B43legacy_INTERFMODE_NONE: + count = snprintf(buf, PAGE_SIZE, "0 (No Interference" + " Mitigation)\n"); + break; + case B43legacy_INTERFMODE_NONWLAN: + count = snprintf(buf, PAGE_SIZE, "1 (Non-WLAN Interference" + " Mitigation)\n"); + break; + case B43legacy_INTERFMODE_MANUALWLAN: + count = snprintf(buf, PAGE_SIZE, "2 (WLAN Interference" + " Mitigation)\n"); + break; + default: + B43legacy_WARN_ON(1); + } + + mutex_unlock(&wldev->wl->mutex); + + return count; +} + +static ssize_t b43legacy_attr_interfmode_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct b43legacy_wldev *wldev = dev_to_b43legacy_wldev(dev); + unsigned long flags; + int err; + int mode; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + mode = get_integer(buf, count); + switch (mode) { + case 0: + mode = B43legacy_INTERFMODE_NONE; + break; + case 1: + mode = B43legacy_INTERFMODE_NONWLAN; + break; + case 2: + mode = B43legacy_INTERFMODE_MANUALWLAN; + break; + case 3: + mode = B43legacy_INTERFMODE_AUTOWLAN; + break; + default: + return -EINVAL; + } + + mutex_lock(&wldev->wl->mutex); + spin_lock_irqsave(&wldev->wl->irq_lock, flags); + + err = b43legacy_radio_set_interference_mitigation(wldev, mode); + if (err) + b43legacyerr(wldev->wl, "Interference Mitigation not " + "supported by device\n"); + mmiowb(); + spin_unlock_irqrestore(&wldev->wl->irq_lock, flags); + mutex_unlock(&wldev->wl->mutex); + + return err ? err : count; +} + +static DEVICE_ATTR(interference, 0644, + b43legacy_attr_interfmode_show, + b43legacy_attr_interfmode_store); + +static ssize_t b43legacy_attr_preamble_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct b43legacy_wldev *wldev = dev_to_b43legacy_wldev(dev); + ssize_t count; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + mutex_lock(&wldev->wl->mutex); + + if (wldev->short_preamble) + count = snprintf(buf, PAGE_SIZE, "1 (Short Preamble" + " enabled)\n"); + else + count = snprintf(buf, PAGE_SIZE, "0 (Short Preamble" + " disabled)\n"); + + mutex_unlock(&wldev->wl->mutex); + + return count; +} + +static ssize_t b43legacy_attr_preamble_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct b43legacy_wldev *wldev = dev_to_b43legacy_wldev(dev); + unsigned long flags; + int value; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + value = get_boolean(buf, count); + if (value < 0) + return value; + mutex_lock(&wldev->wl->mutex); + spin_lock_irqsave(&wldev->wl->irq_lock, flags); + + wldev->short_preamble = !!value; + + spin_unlock_irqrestore(&wldev->wl->irq_lock, flags); + mutex_unlock(&wldev->wl->mutex); + + return count; +} + +static DEVICE_ATTR(shortpreamble, 0644, + b43legacy_attr_preamble_show, + b43legacy_attr_preamble_store); + +int b43legacy_sysfs_register(struct b43legacy_wldev *wldev) +{ + struct device *dev = wldev->dev->dev; + int err; + + B43legacy_WARN_ON(b43legacy_status(wldev) != + B43legacy_STAT_INITIALIZED); + + err = device_create_file(dev, &dev_attr_interference); + if (err) + goto out; + err = device_create_file(dev, &dev_attr_shortpreamble); + if (err) + goto err_remove_interfmode; + +out: + return err; +err_remove_interfmode: + device_remove_file(dev, &dev_attr_interference); + goto out; +} + +void b43legacy_sysfs_unregister(struct b43legacy_wldev *wldev) +{ + struct device *dev = wldev->dev->dev; + + device_remove_file(dev, &dev_attr_shortpreamble); + device_remove_file(dev, &dev_attr_interference); +} diff --git a/drivers/net/wireless/b43legacy/sysfs.h b/drivers/net/wireless/b43legacy/sysfs.h new file mode 100644 index 0000000..417d509 --- /dev/null +++ b/drivers/net/wireless/b43legacy/sysfs.h @@ -0,0 +1,9 @@ +#ifndef B43legacy_SYSFS_H_ +#define B43legacy_SYSFS_H_ + +struct b43legacy_wldev; + +int b43legacy_sysfs_register(struct b43legacy_wldev *dev); +void b43legacy_sysfs_unregister(struct b43legacy_wldev *dev); + +#endif /* B43legacy_SYSFS_H_ */ diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c new file mode 100644 index 0000000..9c8882d --- /dev/null +++ b/drivers/net/wireless/b43legacy/xmit.c @@ -0,0 +1,680 @@ +/* + + Broadcom B43legacy wireless driver + + Transmission (TX/RX) related functions. + + Copyright (C) 2005 Martin Langer + Copyright (C) 2005 Stefano Brivio + Copyright (C) 2005, 2006 Michael Buesch + Copyright (C) 2005 Danny van Dyk + Copyright (C) 2005 Andreas Jaggi + Copyright (C) 2007 Larry Finger + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include + +#include "xmit.h" +#include "phy.h" +#include "dma.h" +#include "pio.h" + + +/* Extract the bitrate out of a CCK PLCP header. */ +static u8 b43legacy_plcp_get_bitrate_idx_cck(struct b43legacy_plcp_hdr6 *plcp) +{ + switch (plcp->raw[0]) { + case 0x0A: + return 0; + case 0x14: + return 1; + case 0x37: + return 2; + case 0x6E: + return 3; + } + B43legacy_BUG_ON(1); + return -1; +} + +/* Extract the bitrate out of an OFDM PLCP header. */ +static u8 b43legacy_plcp_get_bitrate_idx_ofdm(struct b43legacy_plcp_hdr6 *plcp, + bool aphy) +{ + int base = aphy ? 0 : 4; + + switch (plcp->raw[0] & 0xF) { + case 0xB: + return base + 0; + case 0xF: + return base + 1; + case 0xA: + return base + 2; + case 0xE: + return base + 3; + case 0x9: + return base + 4; + case 0xD: + return base + 5; + case 0x8: + return base + 6; + case 0xC: + return base + 7; + } + B43legacy_BUG_ON(1); + return -1; +} + +u8 b43legacy_plcp_get_ratecode_cck(const u8 bitrate) +{ + switch (bitrate) { + case B43legacy_CCK_RATE_1MB: + return 0x0A; + case B43legacy_CCK_RATE_2MB: + return 0x14; + case B43legacy_CCK_RATE_5MB: + return 0x37; + case B43legacy_CCK_RATE_11MB: + return 0x6E; + } + B43legacy_BUG_ON(1); + return 0; +} + +u8 b43legacy_plcp_get_ratecode_ofdm(const u8 bitrate) +{ + switch (bitrate) { + case B43legacy_OFDM_RATE_6MB: + return 0xB; + case B43legacy_OFDM_RATE_9MB: + return 0xF; + case B43legacy_OFDM_RATE_12MB: + return 0xA; + case B43legacy_OFDM_RATE_18MB: + return 0xE; + case B43legacy_OFDM_RATE_24MB: + return 0x9; + case B43legacy_OFDM_RATE_36MB: + return 0xD; + case B43legacy_OFDM_RATE_48MB: + return 0x8; + case B43legacy_OFDM_RATE_54MB: + return 0xC; + } + B43legacy_BUG_ON(1); + return 0; +} + +void b43legacy_generate_plcp_hdr(struct b43legacy_plcp_hdr4 *plcp, + const u16 octets, const u8 bitrate) +{ + __le32 *data = &(plcp->data); + __u8 *raw = plcp->raw; + + if (b43legacy_is_ofdm_rate(bitrate)) { + u16 d; + + d = b43legacy_plcp_get_ratecode_ofdm(bitrate); + B43legacy_WARN_ON(octets & 0xF000); + d |= (octets << 5); + *data = cpu_to_le32(d); + } else { + u32 plen; + + plen = octets * 16 / bitrate; + if ((octets * 16 % bitrate) > 0) { + plen++; + if ((bitrate == B43legacy_CCK_RATE_11MB) + && ((octets * 8 % 11) < 4)) + raw[1] = 0x84; + else + raw[1] = 0x04; + } else + raw[1] = 0x04; + *data |= cpu_to_le32(plen << 16); + raw[0] = b43legacy_plcp_get_ratecode_cck(bitrate); + } +} + +static u8 b43legacy_calc_fallback_rate(u8 bitrate) +{ + switch (bitrate) { + case B43legacy_CCK_RATE_1MB: + return B43legacy_CCK_RATE_1MB; + case B43legacy_CCK_RATE_2MB: + return B43legacy_CCK_RATE_1MB; + case B43legacy_CCK_RATE_5MB: + return B43legacy_CCK_RATE_2MB; + case B43legacy_CCK_RATE_11MB: + return B43legacy_CCK_RATE_5MB; + case B43legacy_OFDM_RATE_6MB: + return B43legacy_CCK_RATE_5MB; + case B43legacy_OFDM_RATE_9MB: + return B43legacy_OFDM_RATE_6MB; + case B43legacy_OFDM_RATE_12MB: + return B43legacy_OFDM_RATE_9MB; + case B43legacy_OFDM_RATE_18MB: + return B43legacy_OFDM_RATE_12MB; + case B43legacy_OFDM_RATE_24MB: + return B43legacy_OFDM_RATE_18MB; + case B43legacy_OFDM_RATE_36MB: + return B43legacy_OFDM_RATE_24MB; + case B43legacy_OFDM_RATE_48MB: + return B43legacy_OFDM_RATE_36MB; + case B43legacy_OFDM_RATE_54MB: + return B43legacy_OFDM_RATE_48MB; + } + B43legacy_BUG_ON(1); + return 0; +} + +static int generate_txhdr_fw3(struct b43legacy_wldev *dev, + struct b43legacy_txhdr_fw3 *txhdr, + const unsigned char *fragment_data, + unsigned int fragment_len, + struct ieee80211_tx_info *info, + u16 cookie) +{ + const struct ieee80211_hdr *wlhdr; + int use_encryption = !!info->control.hw_key; + u8 rate; + struct ieee80211_rate *rate_fb; + int rate_ofdm; + int rate_fb_ofdm; + unsigned int plcp_fragment_len; + u32 mac_ctl = 0; + u16 phy_ctl = 0; + struct ieee80211_rate *tx_rate; + struct ieee80211_tx_rate *rates; + + wlhdr = (const struct ieee80211_hdr *)fragment_data; + + memset(txhdr, 0, sizeof(*txhdr)); + + tx_rate = ieee80211_get_tx_rate(dev->wl->hw, info); + + rate = tx_rate->hw_value; + rate_ofdm = b43legacy_is_ofdm_rate(rate); + rate_fb = ieee80211_get_alt_retry_rate(dev->wl->hw, info, 0) ? : tx_rate; + rate_fb_ofdm = b43legacy_is_ofdm_rate(rate_fb->hw_value); + + txhdr->mac_frame_ctl = wlhdr->frame_control; + memcpy(txhdr->tx_receiver, wlhdr->addr1, 6); + + /* Calculate duration for fallback rate */ + if ((rate_fb->hw_value == rate) || + (wlhdr->duration_id & cpu_to_le16(0x8000)) || + (wlhdr->duration_id == cpu_to_le16(0))) { + /* If the fallback rate equals the normal rate or the + * dur_id field contains an AID, CFP magic or 0, + * use the original dur_id field. */ + txhdr->dur_fb = wlhdr->duration_id; + } else { + txhdr->dur_fb = ieee80211_generic_frame_duration(dev->wl->hw, + info->control.vif, + fragment_len, + rate_fb); + } + + plcp_fragment_len = fragment_len + FCS_LEN; + if (use_encryption) { + u8 key_idx = info->control.hw_key->hw_key_idx; + struct b43legacy_key *key; + int wlhdr_len; + size_t iv_len; + + B43legacy_WARN_ON(key_idx >= dev->max_nr_keys); + key = &(dev->key[key_idx]); + + if (key->enabled) { + /* Hardware appends ICV. */ + plcp_fragment_len += info->control.hw_key->icv_len; + + key_idx = b43legacy_kidx_to_fw(dev, key_idx); + mac_ctl |= (key_idx << B43legacy_TX4_MAC_KEYIDX_SHIFT) & + B43legacy_TX4_MAC_KEYIDX; + mac_ctl |= (key->algorithm << + B43legacy_TX4_MAC_KEYALG_SHIFT) & + B43legacy_TX4_MAC_KEYALG; + wlhdr_len = ieee80211_hdrlen(wlhdr->frame_control); + iv_len = min((size_t)info->control.hw_key->iv_len, + ARRAY_SIZE(txhdr->iv)); + memcpy(txhdr->iv, ((u8 *)wlhdr) + wlhdr_len, iv_len); + } else { + /* This key is invalid. This might only happen + * in a short timeframe after machine resume before + * we were able to reconfigure keys. + * Drop this packet completely. Do not transmit it + * unencrypted to avoid leaking information. */ + return -ENOKEY; + } + } + b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *) + (&txhdr->plcp), plcp_fragment_len, + rate); + b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *) + (&txhdr->plcp_fb), plcp_fragment_len, + rate_fb->hw_value); + + /* PHY TX Control word */ + if (rate_ofdm) + phy_ctl |= B43legacy_TX4_PHY_ENC_OFDM; + if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) + phy_ctl |= B43legacy_TX4_PHY_SHORTPRMBL; + switch (info->antenna_sel_tx) { + case 0: + phy_ctl |= B43legacy_TX4_PHY_ANTLAST; + break; + case 1: + phy_ctl |= B43legacy_TX4_PHY_ANT0; + break; + case 2: + phy_ctl |= B43legacy_TX4_PHY_ANT1; + break; + default: + B43legacy_BUG_ON(1); + } + + /* MAC control */ + rates = info->control.rates; + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) + mac_ctl |= B43legacy_TX4_MAC_ACK; + if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) + mac_ctl |= B43legacy_TX4_MAC_HWSEQ; + if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) + mac_ctl |= B43legacy_TX4_MAC_STMSDU; + if (rate_fb_ofdm) + mac_ctl |= B43legacy_TX4_MAC_FALLBACKOFDM; + + /* Overwrite rates[0].count to make the retry calculation + * in the tx status easier. need the actual retry limit to + * detect whether the fallback rate was used. + */ + if ((rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) || + (rates[0].count <= dev->wl->hw->conf.long_frame_max_tx_count)) { + rates[0].count = dev->wl->hw->conf.long_frame_max_tx_count; + mac_ctl |= B43legacy_TX4_MAC_LONGFRAME; + } else { + rates[0].count = dev->wl->hw->conf.short_frame_max_tx_count; + } + + /* Generate the RTS or CTS-to-self frame */ + if ((rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) || + (rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)) { + unsigned int len; + struct ieee80211_hdr *hdr; + int rts_rate; + int rts_rate_fb; + int rts_rate_ofdm; + int rts_rate_fb_ofdm; + + rts_rate = ieee80211_get_rts_cts_rate(dev->wl->hw, info)->hw_value; + rts_rate_ofdm = b43legacy_is_ofdm_rate(rts_rate); + rts_rate_fb = b43legacy_calc_fallback_rate(rts_rate); + rts_rate_fb_ofdm = b43legacy_is_ofdm_rate(rts_rate_fb); + if (rts_rate_fb_ofdm) + mac_ctl |= B43legacy_TX4_MAC_CTSFALLBACKOFDM; + + if (rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { + ieee80211_ctstoself_get(dev->wl->hw, + info->control.vif, + fragment_data, + fragment_len, info, + (struct ieee80211_cts *) + (txhdr->rts_frame)); + mac_ctl |= B43legacy_TX4_MAC_SENDCTS; + len = sizeof(struct ieee80211_cts); + } else { + ieee80211_rts_get(dev->wl->hw, + info->control.vif, + fragment_data, fragment_len, info, + (struct ieee80211_rts *) + (txhdr->rts_frame)); + mac_ctl |= B43legacy_TX4_MAC_SENDRTS; + len = sizeof(struct ieee80211_rts); + } + len += FCS_LEN; + b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *) + (&txhdr->rts_plcp), + len, rts_rate); + b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *) + (&txhdr->rts_plcp_fb), + len, rts_rate_fb); + hdr = (struct ieee80211_hdr *)(&txhdr->rts_frame); + txhdr->rts_dur_fb = hdr->duration_id; + } + + /* Magic cookie */ + txhdr->cookie = cpu_to_le16(cookie); + + /* Apply the bitfields */ + txhdr->mac_ctl = cpu_to_le32(mac_ctl); + txhdr->phy_ctl = cpu_to_le16(phy_ctl); + + return 0; +} + +int b43legacy_generate_txhdr(struct b43legacy_wldev *dev, + u8 *txhdr, + const unsigned char *fragment_data, + unsigned int fragment_len, + struct ieee80211_tx_info *info, + u16 cookie) +{ + return generate_txhdr_fw3(dev, (struct b43legacy_txhdr_fw3 *)txhdr, + fragment_data, fragment_len, + info, cookie); +} + +static s8 b43legacy_rssi_postprocess(struct b43legacy_wldev *dev, + u8 in_rssi, int ofdm, + int adjust_2053, int adjust_2050) +{ + struct b43legacy_phy *phy = &dev->phy; + s32 tmp; + + switch (phy->radio_ver) { + case 0x2050: + if (ofdm) { + tmp = in_rssi; + if (tmp > 127) + tmp -= 256; + tmp *= 73; + tmp /= 64; + if (adjust_2050) + tmp += 25; + else + tmp -= 3; + } else { + if (dev->dev->bus->sprom.boardflags_lo + & B43legacy_BFL_RSSI) { + if (in_rssi > 63) + in_rssi = 63; + tmp = phy->nrssi_lt[in_rssi]; + tmp = 31 - tmp; + tmp *= -131; + tmp /= 128; + tmp -= 57; + } else { + tmp = in_rssi; + tmp = 31 - tmp; + tmp *= -149; + tmp /= 128; + tmp -= 68; + } + if (phy->type == B43legacy_PHYTYPE_G && + adjust_2050) + tmp += 25; + } + break; + case 0x2060: + if (in_rssi > 127) + tmp = in_rssi - 256; + else + tmp = in_rssi; + break; + default: + tmp = in_rssi; + tmp -= 11; + tmp *= 103; + tmp /= 64; + if (adjust_2053) + tmp -= 109; + else + tmp -= 83; + } + + return (s8)tmp; +} + +void b43legacy_rx(struct b43legacy_wldev *dev, + struct sk_buff *skb, + const void *_rxhdr) +{ + struct ieee80211_rx_status status; + struct b43legacy_plcp_hdr6 *plcp; + struct ieee80211_hdr *wlhdr; + const struct b43legacy_rxhdr_fw3 *rxhdr = _rxhdr; + __le16 fctl; + u16 phystat0; + u16 phystat3; + u16 chanstat; + u16 mactime; + u32 macstat; + u16 chanid; + u8 jssi; + int padding; + + memset(&status, 0, sizeof(status)); + + /* Get metadata about the frame from the header. */ + phystat0 = le16_to_cpu(rxhdr->phy_status0); + phystat3 = le16_to_cpu(rxhdr->phy_status3); + jssi = rxhdr->jssi; + macstat = le16_to_cpu(rxhdr->mac_status); + mactime = le16_to_cpu(rxhdr->mac_time); + chanstat = le16_to_cpu(rxhdr->channel); + + if (macstat & B43legacy_RX_MAC_FCSERR) + dev->wl->ieee_stats.dot11FCSErrorCount++; + + /* Skip PLCP and padding */ + padding = (macstat & B43legacy_RX_MAC_PADDING) ? 2 : 0; + if (unlikely(skb->len < (sizeof(struct b43legacy_plcp_hdr6) + + padding))) { + b43legacydbg(dev->wl, "RX: Packet size underrun (1)\n"); + goto drop; + } + plcp = (struct b43legacy_plcp_hdr6 *)(skb->data + padding); + skb_pull(skb, sizeof(struct b43legacy_plcp_hdr6) + padding); + /* The skb contains the Wireless Header + payload data now */ + if (unlikely(skb->len < (2+2+6/*minimum hdr*/ + FCS_LEN))) { + b43legacydbg(dev->wl, "RX: Packet size underrun (2)\n"); + goto drop; + } + wlhdr = (struct ieee80211_hdr *)(skb->data); + fctl = wlhdr->frame_control; + + if ((macstat & B43legacy_RX_MAC_DEC) && + !(macstat & B43legacy_RX_MAC_DECERR)) { + unsigned int keyidx; + int wlhdr_len; + int iv_len; + int icv_len; + + keyidx = ((macstat & B43legacy_RX_MAC_KEYIDX) + >> B43legacy_RX_MAC_KEYIDX_SHIFT); + /* We must adjust the key index here. We want the "physical" + * key index, but the ucode passed it slightly different. + */ + keyidx = b43legacy_kidx_to_raw(dev, keyidx); + B43legacy_WARN_ON(keyidx >= dev->max_nr_keys); + + if (dev->key[keyidx].algorithm != B43legacy_SEC_ALGO_NONE) { + /* Remove PROTECTED flag to mark it as decrypted. */ + B43legacy_WARN_ON(!ieee80211_has_protected(fctl)); + fctl &= ~cpu_to_le16(IEEE80211_FCTL_PROTECTED); + wlhdr->frame_control = fctl; + + wlhdr_len = ieee80211_hdrlen(fctl); + if (unlikely(skb->len < (wlhdr_len + 3))) { + b43legacydbg(dev->wl, "RX: Packet size" + " underrun3\n"); + goto drop; + } + if (skb->data[wlhdr_len + 3] & (1 << 5)) { + /* The Ext-IV Bit is set in the "KeyID" + * octet of the IV. + */ + iv_len = 8; + icv_len = 8; + } else { + iv_len = 4; + icv_len = 4; + } + if (unlikely(skb->len < (wlhdr_len + iv_len + + icv_len))) { + b43legacydbg(dev->wl, "RX: Packet size" + " underrun4\n"); + goto drop; + } + /* Remove the IV */ + memmove(skb->data + iv_len, skb->data, wlhdr_len); + skb_pull(skb, iv_len); + /* Remove the ICV */ + skb_trim(skb, skb->len - icv_len); + + status.flag |= RX_FLAG_DECRYPTED; + } + } + + status.signal = b43legacy_rssi_postprocess(dev, jssi, + (phystat0 & B43legacy_RX_PHYST0_OFDM), + (phystat0 & B43legacy_RX_PHYST0_GAINCTL), + (phystat3 & B43legacy_RX_PHYST3_TRSTATE)); + status.noise = dev->stats.link_noise; + /* change to support A PHY */ + if (phystat0 & B43legacy_RX_PHYST0_OFDM) + status.rate_idx = b43legacy_plcp_get_bitrate_idx_ofdm(plcp, false); + else + status.rate_idx = b43legacy_plcp_get_bitrate_idx_cck(plcp); + status.antenna = !!(phystat0 & B43legacy_RX_PHYST0_ANT); + + /* + * All frames on monitor interfaces and beacons always need a full + * 64-bit timestamp. Monitor interfaces need it for diagnostic + * purposes and beacons for IBSS merging. + * This code assumes we get to process the packet within 16 bits + * of timestamp, i.e. about 65 milliseconds after the PHY received + * the first symbol. + */ + if (ieee80211_is_beacon(fctl) || dev->wl->radiotap_enabled) { + u16 low_mactime_now; + + b43legacy_tsf_read(dev, &status.mactime); + low_mactime_now = status.mactime; + status.mactime = status.mactime & ~0xFFFFULL; + status.mactime += mactime; + if (low_mactime_now <= mactime) + status.mactime -= 0x10000; + status.flag |= RX_FLAG_TSFT; + } + + chanid = (chanstat & B43legacy_RX_CHAN_ID) >> + B43legacy_RX_CHAN_ID_SHIFT; + switch (chanstat & B43legacy_RX_CHAN_PHYTYPE) { + case B43legacy_PHYTYPE_B: + case B43legacy_PHYTYPE_G: + status.band = IEEE80211_BAND_2GHZ; + status.freq = chanid + 2400; + break; + default: + b43legacywarn(dev->wl, "Unexpected value for chanstat (0x%X)\n", + chanstat); + } + + memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); + ieee80211_rx_irqsafe(dev->wl->hw, skb); + + return; +drop: + b43legacydbg(dev->wl, "RX: Packet dropped\n"); + dev_kfree_skb_any(skb); +} + +void b43legacy_handle_txstatus(struct b43legacy_wldev *dev, + const struct b43legacy_txstatus *status) +{ + b43legacy_debugfs_log_txstat(dev, status); + + if (status->intermediate) + return; + if (status->for_ampdu) + return; + if (!status->acked) + dev->wl->ieee_stats.dot11ACKFailureCount++; + if (status->rts_count) { + if (status->rts_count == 0xF) /* FIXME */ + dev->wl->ieee_stats.dot11RTSFailureCount++; + else + dev->wl->ieee_stats.dot11RTSSuccessCount++; + } + + if (b43legacy_using_pio(dev)) + b43legacy_pio_handle_txstatus(dev, status); + else + b43legacy_dma_handle_txstatus(dev, status); +} + +/* Handle TX status report as received through DMA/PIO queues */ +void b43legacy_handle_hwtxstatus(struct b43legacy_wldev *dev, + const struct b43legacy_hwtxstatus *hw) +{ + struct b43legacy_txstatus status; + u8 tmp; + + status.cookie = le16_to_cpu(hw->cookie); + status.seq = le16_to_cpu(hw->seq); + status.phy_stat = hw->phy_stat; + tmp = hw->count; + status.frame_count = (tmp >> 4); + status.rts_count = (tmp & 0x0F); + tmp = hw->flags << 1; + status.supp_reason = ((tmp & 0x1C) >> 2); + status.pm_indicated = !!(tmp & 0x80); + status.intermediate = !!(tmp & 0x40); + status.for_ampdu = !!(tmp & 0x20); + status.acked = !!(tmp & 0x02); + + b43legacy_handle_txstatus(dev, &status); +} + +/* Stop any TX operation on the device (suspend the hardware queues) */ +void b43legacy_tx_suspend(struct b43legacy_wldev *dev) +{ + if (b43legacy_using_pio(dev)) + b43legacy_pio_freeze_txqueues(dev); + else + b43legacy_dma_tx_suspend(dev); +} + +/* Resume any TX operation on the device (resume the hardware queues) */ +void b43legacy_tx_resume(struct b43legacy_wldev *dev) +{ + if (b43legacy_using_pio(dev)) + b43legacy_pio_thaw_txqueues(dev); + else + b43legacy_dma_tx_resume(dev); +} + +/* Initialize the QoS parameters */ +void b43legacy_qos_init(struct b43legacy_wldev *dev) +{ + /* FIXME: This function must probably be called from the mac80211 + * config callback. */ +return; + + b43legacy_hf_write(dev, b43legacy_hf_read(dev) | B43legacy_HF_EDCF); + /* FIXME kill magic */ + b43legacy_write16(dev, 0x688, + b43legacy_read16(dev, 0x688) | 0x4); + + + /*TODO: We might need some stack support here to get the values. */ +} diff --git a/drivers/net/wireless/b43legacy/xmit.h b/drivers/net/wireless/b43legacy/xmit.h new file mode 100644 index 0000000..9163308 --- /dev/null +++ b/drivers/net/wireless/b43legacy/xmit.h @@ -0,0 +1,261 @@ +#ifndef B43legacy_XMIT_H_ +#define B43legacy_XMIT_H_ + +#include "main.h" + + +#define _b43legacy_declare_plcp_hdr(size) \ + struct b43legacy_plcp_hdr##size { \ + union { \ + __le32 data; \ + __u8 raw[size]; \ + } __attribute__((__packed__)); \ + } __attribute__((__packed__)) + +/* struct b43legacy_plcp_hdr4 */ +_b43legacy_declare_plcp_hdr(4); +/* struct b43legacy_plcp_hdr6 */ +_b43legacy_declare_plcp_hdr(6); + +#undef _b43legacy_declare_plcp_hdr + + +/* TX header for v3 firmware */ +struct b43legacy_txhdr_fw3 { + __le32 mac_ctl; /* MAC TX control */ + __le16 mac_frame_ctl; /* Copy of the FrameControl */ + __le16 tx_fes_time_norm; /* TX FES Time Normal */ + __le16 phy_ctl; /* PHY TX control */ + __u8 iv[16]; /* Encryption IV */ + __u8 tx_receiver[6]; /* TX Frame Receiver address */ + __le16 tx_fes_time_fb; /* TX FES Time Fallback */ + struct b43legacy_plcp_hdr4 rts_plcp_fb; /* RTS fallback PLCP */ + __le16 rts_dur_fb; /* RTS fallback duration */ + struct b43legacy_plcp_hdr4 plcp_fb; /* Fallback PLCP */ + __le16 dur_fb; /* Fallback duration */ + PAD_BYTES(2); + __le16 cookie; + __le16 unknown_scb_stuff; + struct b43legacy_plcp_hdr6 rts_plcp; /* RTS PLCP */ + __u8 rts_frame[18]; /* The RTS frame (if used) */ + struct b43legacy_plcp_hdr6 plcp; +} __attribute__((__packed__)); + +/* MAC TX control */ +#define B43legacy_TX4_MAC_KEYIDX 0x0FF00000 /* Security key index */ +#define B43legacy_TX4_MAC_KEYIDX_SHIFT 20 +#define B43legacy_TX4_MAC_KEYALG 0x00070000 /* Security key algorithm */ +#define B43legacy_TX4_MAC_KEYALG_SHIFT 16 +#define B43legacy_TX4_MAC_LIFETIME 0x00001000 +#define B43legacy_TX4_MAC_FRAMEBURST 0x00000800 +#define B43legacy_TX4_MAC_SENDCTS 0x00000400 +#define B43legacy_TX4_MAC_AMPDU 0x00000300 +#define B43legacy_TX4_MAC_AMPDU_SHIFT 8 +#define B43legacy_TX4_MAC_CTSFALLBACKOFDM 0x00000200 +#define B43legacy_TX4_MAC_FALLBACKOFDM 0x00000100 +#define B43legacy_TX4_MAC_5GHZ 0x00000080 +#define B43legacy_TX4_MAC_IGNPMQ 0x00000020 +#define B43legacy_TX4_MAC_HWSEQ 0x00000010 /* Use Hardware Seq No */ +#define B43legacy_TX4_MAC_STMSDU 0x00000008 /* Start MSDU */ +#define B43legacy_TX4_MAC_SENDRTS 0x00000004 +#define B43legacy_TX4_MAC_LONGFRAME 0x00000002 +#define B43legacy_TX4_MAC_ACK 0x00000001 + +/* Extra Frame Types */ +#define B43legacy_TX4_EFT_FBOFDM 0x0001 /* Data frame fb rate type */ +#define B43legacy_TX4_EFT_RTSOFDM 0x0004 /* RTS/CTS rate type */ +#define B43legacy_TX4_EFT_RTSFBOFDM 0x0010 /* RTS/CTS fallback rate type */ + +/* PHY TX control word */ +#define B43legacy_TX4_PHY_ENC 0x0003 /* Data frame encoding */ +#define B43legacy_TX4_PHY_ENC_CCK 0x0000 /* CCK */ +#define B43legacy_TX4_PHY_ENC_OFDM 0x0001 /* Data frame rate type */ +#define B43legacy_TX4_PHY_SHORTPRMBL 0x0010 /* Use short preamble */ +#define B43legacy_TX4_PHY_ANT 0x03C0 /* Antenna selection */ +#define B43legacy_TX4_PHY_ANT0 0x0000 /* Use antenna 0 */ +#define B43legacy_TX4_PHY_ANT1 0x0100 /* Use antenna 1 */ +#define B43legacy_TX4_PHY_ANTLAST 0x0300 /* Use last used antenna */ + + + +int b43legacy_generate_txhdr(struct b43legacy_wldev *dev, + u8 *txhdr, + const unsigned char *fragment_data, + unsigned int fragment_len, + struct ieee80211_tx_info *info, + u16 cookie); + + +/* Transmit Status */ +struct b43legacy_txstatus { + u16 cookie; /* The cookie from the txhdr */ + u16 seq; /* Sequence number */ + u8 phy_stat; /* PHY TX status */ + u8 frame_count; /* Frame transmit count */ + u8 rts_count; /* RTS transmit count */ + u8 supp_reason; /* Suppression reason */ + /* flags */ + u8 pm_indicated;/* PM mode indicated to AP */ + u8 intermediate;/* Intermediate status notification */ + u8 for_ampdu; /* Status is for an AMPDU (afterburner) */ + u8 acked; /* Wireless ACK received */ +}; + +/* txstatus supp_reason values */ +enum { + B43legacy_TXST_SUPP_NONE, /* Not suppressed */ + B43legacy_TXST_SUPP_PMQ, /* Suppressed due to PMQ entry */ + B43legacy_TXST_SUPP_FLUSH, /* Suppressed due to flush request */ + B43legacy_TXST_SUPP_PREV, /* Previous fragment failed */ + B43legacy_TXST_SUPP_CHAN, /* Channel mismatch */ + B43legacy_TXST_SUPP_LIFE, /* Lifetime expired */ + B43legacy_TXST_SUPP_UNDER, /* Buffer underflow */ + B43legacy_TXST_SUPP_ABNACK, /* Afterburner NACK */ +}; + +/* Transmit Status as received through DMA/PIO on old chips */ +struct b43legacy_hwtxstatus { + PAD_BYTES(4); + __le16 cookie; + u8 flags; + u8 count; + PAD_BYTES(2); + __le16 seq; + u8 phy_stat; + PAD_BYTES(1); +} __attribute__((__packed__)); + + +/* Receive header for v3 firmware. */ +struct b43legacy_rxhdr_fw3 { + __le16 frame_len; /* Frame length */ + PAD_BYTES(2); + __le16 phy_status0; /* PHY RX Status 0 */ + __u8 jssi; /* PHY RX Status 1: JSSI */ + __u8 sig_qual; /* PHY RX Status 1: Signal Quality */ + PAD_BYTES(2); /* PHY RX Status 2 */ + __le16 phy_status3; /* PHY RX Status 3 */ + __le16 mac_status; /* MAC RX status */ + __le16 mac_time; + __le16 channel; +} __attribute__((__packed__)); + + +/* PHY RX Status 0 */ +#define B43legacy_RX_PHYST0_GAINCTL 0x4000 /* Gain Control */ +#define B43legacy_RX_PHYST0_PLCPHCF 0x0200 +#define B43legacy_RX_PHYST0_PLCPFV 0x0100 +#define B43legacy_RX_PHYST0_SHORTPRMBL 0x0080 /* Recvd with Short Preamble */ +#define B43legacy_RX_PHYST0_LCRS 0x0040 +#define B43legacy_RX_PHYST0_ANT 0x0020 /* Antenna */ +#define B43legacy_RX_PHYST0_UNSRATE 0x0010 +#define B43legacy_RX_PHYST0_CLIP 0x000C +#define B43legacy_RX_PHYST0_CLIP_SHIFT 2 +#define B43legacy_RX_PHYST0_FTYPE 0x0003 /* Frame type */ +#define B43legacy_RX_PHYST0_CCK 0x0000 /* Frame type: CCK */ +#define B43legacy_RX_PHYST0_OFDM 0x0001 /* Frame type: OFDM */ +#define B43legacy_RX_PHYST0_PRE_N 0x0002 /* Pre-standard N-PHY frame */ +#define B43legacy_RX_PHYST0_STD_N 0x0003 /* Standard N-PHY frame */ + +/* PHY RX Status 2 */ +#define B43legacy_RX_PHYST2_LNAG 0xC000 /* LNA Gain */ +#define B43legacy_RX_PHYST2_LNAG_SHIFT 14 +#define B43legacy_RX_PHYST2_PNAG 0x3C00 /* PNA Gain */ +#define B43legacy_RX_PHYST2_PNAG_SHIFT 10 +#define B43legacy_RX_PHYST2_FOFF 0x03FF /* F offset */ + +/* PHY RX Status 3 */ +#define B43legacy_RX_PHYST3_DIGG 0x1800 /* DIG Gain */ +#define B43legacy_RX_PHYST3_DIGG_SHIFT 11 +#define B43legacy_RX_PHYST3_TRSTATE 0x0400 /* TR state */ + +/* MAC RX Status */ +#define B43legacy_RX_MAC_BEACONSENT 0x00008000 /* Beacon send flag */ +#define B43legacy_RX_MAC_KEYIDX 0x000007E0 /* Key index */ +#define B43legacy_RX_MAC_KEYIDX_SHIFT 5 +#define B43legacy_RX_MAC_DECERR 0x00000010 /* Decrypt error */ +#define B43legacy_RX_MAC_DEC 0x00000008 /* Decryption attempted */ +#define B43legacy_RX_MAC_PADDING 0x00000004 /* Pad bytes present */ +#define B43legacy_RX_MAC_RESP 0x00000002 /* Response frame xmitted */ +#define B43legacy_RX_MAC_FCSERR 0x00000001 /* FCS error */ + +/* RX channel */ +#define B43legacy_RX_CHAN_GAIN 0xFC00 /* Gain */ +#define B43legacy_RX_CHAN_GAIN_SHIFT 10 +#define B43legacy_RX_CHAN_ID 0x03FC /* Channel ID */ +#define B43legacy_RX_CHAN_ID_SHIFT 2 +#define B43legacy_RX_CHAN_PHYTYPE 0x0003 /* PHY type */ + + + +u8 b43legacy_plcp_get_ratecode_cck(const u8 bitrate); +u8 b43legacy_plcp_get_ratecode_ofdm(const u8 bitrate); + +void b43legacy_generate_plcp_hdr(struct b43legacy_plcp_hdr4 *plcp, + const u16 octets, const u8 bitrate); + +void b43legacy_rx(struct b43legacy_wldev *dev, + struct sk_buff *skb, + const void *_rxhdr); + +void b43legacy_handle_txstatus(struct b43legacy_wldev *dev, + const struct b43legacy_txstatus *status); + +void b43legacy_handle_hwtxstatus(struct b43legacy_wldev *dev, + const struct b43legacy_hwtxstatus *hw); + +void b43legacy_tx_suspend(struct b43legacy_wldev *dev); +void b43legacy_tx_resume(struct b43legacy_wldev *dev); + + +#define B43legacy_NR_QOSPARMS 22 +enum { + B43legacy_QOSPARM_TXOP = 0, + B43legacy_QOSPARM_CWMIN, + B43legacy_QOSPARM_CWMAX, + B43legacy_QOSPARM_CWCUR, + B43legacy_QOSPARM_AIFS, + B43legacy_QOSPARM_BSLOTS, + B43legacy_QOSPARM_REGGAP, + B43legacy_QOSPARM_STATUS, +}; + +void b43legacy_qos_init(struct b43legacy_wldev *dev); + + +/* Helper functions for converting the key-table index from "firmware-format" + * to "raw-format" and back. The firmware API changed for this at some revision. + * We need to account for that here. */ +static inline +int b43legacy_new_kidx_api(struct b43legacy_wldev *dev) +{ + /* FIXME: Not sure the change was at rev 351 */ + return (dev->fw.rev >= 351); +} +static inline +u8 b43legacy_kidx_to_fw(struct b43legacy_wldev *dev, u8 raw_kidx) +{ + u8 firmware_kidx; + if (b43legacy_new_kidx_api(dev)) + firmware_kidx = raw_kidx; + else { + if (raw_kidx >= 4) /* Is per STA key? */ + firmware_kidx = raw_kidx - 4; + else + firmware_kidx = raw_kidx; /* TX default key */ + } + return firmware_kidx; +} +static inline +u8 b43legacy_kidx_to_raw(struct b43legacy_wldev *dev, u8 firmware_kidx) +{ + u8 raw_kidx; + if (b43legacy_new_kidx_api(dev)) + raw_kidx = firmware_kidx; + else + /* RX default keys or per STA keys */ + raw_kidx = firmware_kidx + 4; + return raw_kidx; +} + +#endif /* B43legacy_XMIT_H_ */ diff --git a/drivers/net/wireless/ipw2x00/Makefile b/drivers/net/wireless/ipw2x00/Makefile new file mode 100644 index 0000000..aecd2cf --- /dev/null +++ b/drivers/net/wireless/ipw2x00/Makefile @@ -0,0 +1,14 @@ +# +# Makefile for the Intel Centrino wireless drivers +# + +obj-$(CONFIG_IPW2100) += ipw2100.o +obj-$(CONFIG_IPW2200) += ipw2200.o + +obj-$(CONFIG_LIBIPW) += libipw.o +libipw-objs := \ + libipw_module.o \ + libipw_tx.o \ + libipw_rx.o \ + libipw_wx.o \ + libipw_geo.o diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c new file mode 100644 index 0000000..8f56dea --- /dev/null +++ b/drivers/net/wireless/ipw2x00/ipw2100.c @@ -0,0 +1,8769 @@ +/****************************************************************************** + + Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify it + under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., 59 + Temple Place - Suite 330, Boston, MA 02111-1307, USA. + + The full GNU General Public License is included in this distribution in the + file called LICENSE. + + Contact Information: + Intel Linux Wireless + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + + Portions of this file are based on the sample_* files provided by Wireless + Extensions 0.26 package and copyright (c) 1997-2003 Jean Tourrilhes + + + Portions of this file are based on the Host AP project, + Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen + + Copyright (c) 2002-2003, Jouni Malinen + + Portions of ipw2100_mod_firmware_load, ipw2100_do_mod_firmware_load, and + ipw2100_fw_load are loosely based on drivers/sound/sound_firmware.c + available in the 2.4.25 kernel sources, and are copyright (c) Alan Cox + +******************************************************************************/ +/* + + Initial driver on which this is based was developed by Janusz Gorycki, + Maciej Urbaniak, and Maciej Sosnowski. + + Promiscuous mode support added by Jacek Wysoczynski and Maciej Urbaniak. + +Theory of Operation + +Tx - Commands and Data + +Firmware and host share a circular queue of Transmit Buffer Descriptors (TBDs) +Each TBD contains a pointer to the physical (dma_addr_t) address of data being +sent to the firmware as well as the length of the data. + +The host writes to the TBD queue at the WRITE index. The WRITE index points +to the _next_ packet to be written and is advanced when after the TBD has been +filled. + +The firmware pulls from the TBD queue at the READ index. The READ index points +to the currently being read entry, and is advanced once the firmware is +done with a packet. + +When data is sent to the firmware, the first TBD is used to indicate to the +firmware if a Command or Data is being sent. If it is Command, all of the +command information is contained within the physical address referred to by the +TBD. If it is Data, the first TBD indicates the type of data packet, number +of fragments, etc. The next TBD then referrs to the actual packet location. + +The Tx flow cycle is as follows: + +1) ipw2100_tx() is called by kernel with SKB to transmit +2) Packet is move from the tx_free_list and appended to the transmit pending + list (tx_pend_list) +3) work is scheduled to move pending packets into the shared circular queue. +4) when placing packet in the circular queue, the incoming SKB is DMA mapped + to a physical address. That address is entered into a TBD. Two TBDs are + filled out. The first indicating a data packet, the second referring to the + actual payload data. +5) the packet is removed from tx_pend_list and placed on the end of the + firmware pending list (fw_pend_list) +6) firmware is notified that the WRITE index has +7) Once the firmware has processed the TBD, INTA is triggered. +8) For each Tx interrupt received from the firmware, the READ index is checked + to see which TBDs are done being processed. +9) For each TBD that has been processed, the ISR pulls the oldest packet + from the fw_pend_list. +10)The packet structure contained in the fw_pend_list is then used + to unmap the DMA address and to free the SKB originally passed to the driver + from the kernel. +11)The packet structure is placed onto the tx_free_list + +The above steps are the same for commands, only the msg_free_list/msg_pend_list +are used instead of tx_free_list/tx_pend_list + +... + +Critical Sections / Locking : + +There are two locks utilized. The first is the low level lock (priv->low_lock) +that protects the following: + +- Access to the Tx/Rx queue lists via priv->low_lock. The lists are as follows: + + tx_free_list : Holds pre-allocated Tx buffers. + TAIL modified in __ipw2100_tx_process() + HEAD modified in ipw2100_tx() + + tx_pend_list : Holds used Tx buffers waiting to go into the TBD ring + TAIL modified ipw2100_tx() + HEAD modified by ipw2100_tx_send_data() + + msg_free_list : Holds pre-allocated Msg (Command) buffers + TAIL modified in __ipw2100_tx_process() + HEAD modified in ipw2100_hw_send_command() + + msg_pend_list : Holds used Msg buffers waiting to go into the TBD ring + TAIL modified in ipw2100_hw_send_command() + HEAD modified in ipw2100_tx_send_commands() + + The flow of data on the TX side is as follows: + + MSG_FREE_LIST + COMMAND => MSG_PEND_LIST => TBD => MSG_FREE_LIST + TX_FREE_LIST + DATA => TX_PEND_LIST => TBD => TX_FREE_LIST + + The methods that work on the TBD ring are protected via priv->low_lock. + +- The internal data state of the device itself +- Access to the firmware read/write indexes for the BD queues + and associated logic + +All external entry functions are locked with the priv->action_lock to ensure +that only one external action is invoked at a time. + + +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "ipw2100.h" + +#define IPW2100_VERSION "git-1.2.2" + +#define DRV_NAME "ipw2100" +#define DRV_VERSION IPW2100_VERSION +#define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2100 Network Driver" +#define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation" + +/* Debugging stuff */ +#ifdef CONFIG_IPW2100_DEBUG +#define IPW2100_RX_DEBUG /* Reception debugging */ +#endif + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_VERSION(DRV_VERSION); +MODULE_AUTHOR(DRV_COPYRIGHT); +MODULE_LICENSE("GPL"); + +static int debug = 0; +static int network_mode = 0; +static int channel = 0; +static int associate = 0; +static int disable = 0; +#ifdef CONFIG_PM +static struct ipw2100_fw ipw2100_firmware; +#endif + +#include +module_param(debug, int, 0444); +module_param_named(mode, network_mode, int, 0444); +module_param(channel, int, 0444); +module_param(associate, int, 0444); +module_param(disable, int, 0444); + +MODULE_PARM_DESC(debug, "debug level"); +MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)"); +MODULE_PARM_DESC(channel, "channel"); +MODULE_PARM_DESC(associate, "auto associate when scanning (default off)"); +MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])"); + +static u32 ipw2100_debug_level = IPW_DL_NONE; + +#ifdef CONFIG_IPW2100_DEBUG +#define IPW_DEBUG(level, message...) \ +do { \ + if (ipw2100_debug_level & (level)) { \ + printk(KERN_DEBUG "ipw2100: %c %s ", \ + in_interrupt() ? 'I' : 'U', __func__); \ + printk(message); \ + } \ +} while (0) +#else +#define IPW_DEBUG(level, message...) do {} while (0) +#endif /* CONFIG_IPW2100_DEBUG */ + +#ifdef CONFIG_IPW2100_DEBUG +static const char *command_types[] = { + "undefined", + "unused", /* HOST_ATTENTION */ + "HOST_COMPLETE", + "unused", /* SLEEP */ + "unused", /* HOST_POWER_DOWN */ + "unused", + "SYSTEM_CONFIG", + "unused", /* SET_IMR */ + "SSID", + "MANDATORY_BSSID", + "AUTHENTICATION_TYPE", + "ADAPTER_ADDRESS", + "PORT_TYPE", + "INTERNATIONAL_MODE", + "CHANNEL", + "RTS_THRESHOLD", + "FRAG_THRESHOLD", + "POWER_MODE", + "TX_RATES", + "BASIC_TX_RATES", + "WEP_KEY_INFO", + "unused", + "unused", + "unused", + "unused", + "WEP_KEY_INDEX", + "WEP_FLAGS", + "ADD_MULTICAST", + "CLEAR_ALL_MULTICAST", + "BEACON_INTERVAL", + "ATIM_WINDOW", + "CLEAR_STATISTICS", + "undefined", + "undefined", + "undefined", + "undefined", + "TX_POWER_INDEX", + "undefined", + "undefined", + "undefined", + "undefined", + "undefined", + "undefined", + "BROADCAST_SCAN", + "CARD_DISABLE", + "PREFERRED_BSSID", + "SET_SCAN_OPTIONS", + "SCAN_DWELL_TIME", + "SWEEP_TABLE", + "AP_OR_STATION_TABLE", + "GROUP_ORDINALS", + "SHORT_RETRY_LIMIT", + "LONG_RETRY_LIMIT", + "unused", /* SAVE_CALIBRATION */ + "unused", /* RESTORE_CALIBRATION */ + "undefined", + "undefined", + "undefined", + "HOST_PRE_POWER_DOWN", + "unused", /* HOST_INTERRUPT_COALESCING */ + "undefined", + "CARD_DISABLE_PHY_OFF", + "MSDU_TX_RATES" "undefined", + "undefined", + "SET_STATION_STAT_BITS", + "CLEAR_STATIONS_STAT_BITS", + "LEAP_ROGUE_MODE", + "SET_SECURITY_INFORMATION", + "DISASSOCIATION_BSSID", + "SET_WPA_ASS_IE" +}; +#endif + +#define WEXT_USECHANNELS 1 + +static const long ipw2100_frequencies[] = { + 2412, 2417, 2422, 2427, + 2432, 2437, 2442, 2447, + 2452, 2457, 2462, 2467, + 2472, 2484 +}; + +#define FREQ_COUNT ARRAY_SIZE(ipw2100_frequencies) + +static const long ipw2100_rates_11b[] = { + 1000000, + 2000000, + 5500000, + 11000000 +}; + +static struct ieee80211_rate ipw2100_bg_rates[] = { + { .bitrate = 10 }, + { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, +}; + +#define RATE_COUNT ARRAY_SIZE(ipw2100_rates_11b) + +/* Pre-decl until we get the code solid and then we can clean it up */ +static void ipw2100_tx_send_commands(struct ipw2100_priv *priv); +static void ipw2100_tx_send_data(struct ipw2100_priv *priv); +static int ipw2100_adapter_setup(struct ipw2100_priv *priv); + +static void ipw2100_queues_initialize(struct ipw2100_priv *priv); +static void ipw2100_queues_free(struct ipw2100_priv *priv); +static int ipw2100_queues_allocate(struct ipw2100_priv *priv); + +static int ipw2100_fw_download(struct ipw2100_priv *priv, + struct ipw2100_fw *fw); +static int ipw2100_get_firmware(struct ipw2100_priv *priv, + struct ipw2100_fw *fw); +static int ipw2100_get_fwversion(struct ipw2100_priv *priv, char *buf, + size_t max); +static int ipw2100_get_ucodeversion(struct ipw2100_priv *priv, char *buf, + size_t max); +static void ipw2100_release_firmware(struct ipw2100_priv *priv, + struct ipw2100_fw *fw); +static int ipw2100_ucode_download(struct ipw2100_priv *priv, + struct ipw2100_fw *fw); +static void ipw2100_wx_event_work(struct work_struct *work); +static struct iw_statistics *ipw2100_wx_wireless_stats(struct net_device *dev); +static struct iw_handler_def ipw2100_wx_handler_def; + +static inline void read_register(struct net_device *dev, u32 reg, u32 * val) +{ + *val = readl((void __iomem *)(dev->base_addr + reg)); + IPW_DEBUG_IO("r: 0x%08X => 0x%08X\n", reg, *val); +} + +static inline void write_register(struct net_device *dev, u32 reg, u32 val) +{ + writel(val, (void __iomem *)(dev->base_addr + reg)); + IPW_DEBUG_IO("w: 0x%08X <= 0x%08X\n", reg, val); +} + +static inline void read_register_word(struct net_device *dev, u32 reg, + u16 * val) +{ + *val = readw((void __iomem *)(dev->base_addr + reg)); + IPW_DEBUG_IO("r: 0x%08X => %04X\n", reg, *val); +} + +static inline void read_register_byte(struct net_device *dev, u32 reg, u8 * val) +{ + *val = readb((void __iomem *)(dev->base_addr + reg)); + IPW_DEBUG_IO("r: 0x%08X => %02X\n", reg, *val); +} + +static inline void write_register_word(struct net_device *dev, u32 reg, u16 val) +{ + writew(val, (void __iomem *)(dev->base_addr + reg)); + IPW_DEBUG_IO("w: 0x%08X <= %04X\n", reg, val); +} + +static inline void write_register_byte(struct net_device *dev, u32 reg, u8 val) +{ + writeb(val, (void __iomem *)(dev->base_addr + reg)); + IPW_DEBUG_IO("w: 0x%08X =< %02X\n", reg, val); +} + +static inline void read_nic_dword(struct net_device *dev, u32 addr, u32 * val) +{ + write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS, + addr & IPW_REG_INDIRECT_ADDR_MASK); + read_register(dev, IPW_REG_INDIRECT_ACCESS_DATA, val); +} + +static inline void write_nic_dword(struct net_device *dev, u32 addr, u32 val) +{ + write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS, + addr & IPW_REG_INDIRECT_ADDR_MASK); + write_register(dev, IPW_REG_INDIRECT_ACCESS_DATA, val); +} + +static inline void read_nic_word(struct net_device *dev, u32 addr, u16 * val) +{ + write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS, + addr & IPW_REG_INDIRECT_ADDR_MASK); + read_register_word(dev, IPW_REG_INDIRECT_ACCESS_DATA, val); +} + +static inline void write_nic_word(struct net_device *dev, u32 addr, u16 val) +{ + write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS, + addr & IPW_REG_INDIRECT_ADDR_MASK); + write_register_word(dev, IPW_REG_INDIRECT_ACCESS_DATA, val); +} + +static inline void read_nic_byte(struct net_device *dev, u32 addr, u8 * val) +{ + write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS, + addr & IPW_REG_INDIRECT_ADDR_MASK); + read_register_byte(dev, IPW_REG_INDIRECT_ACCESS_DATA, val); +} + +static inline void write_nic_byte(struct net_device *dev, u32 addr, u8 val) +{ + write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS, + addr & IPW_REG_INDIRECT_ADDR_MASK); + write_register_byte(dev, IPW_REG_INDIRECT_ACCESS_DATA, val); +} + +static inline void write_nic_auto_inc_address(struct net_device *dev, u32 addr) +{ + write_register(dev, IPW_REG_AUTOINCREMENT_ADDRESS, + addr & IPW_REG_INDIRECT_ADDR_MASK); +} + +static inline void write_nic_dword_auto_inc(struct net_device *dev, u32 val) +{ + write_register(dev, IPW_REG_AUTOINCREMENT_DATA, val); +} + +static void write_nic_memory(struct net_device *dev, u32 addr, u32 len, + const u8 * buf) +{ + u32 aligned_addr; + u32 aligned_len; + u32 dif_len; + u32 i; + + /* read first nibble byte by byte */ + aligned_addr = addr & (~0x3); + dif_len = addr - aligned_addr; + if (dif_len) { + /* Start reading at aligned_addr + dif_len */ + write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS, + aligned_addr); + for (i = dif_len; i < 4; i++, buf++) + write_register_byte(dev, + IPW_REG_INDIRECT_ACCESS_DATA + i, + *buf); + + len -= dif_len; + aligned_addr += 4; + } + + /* read DWs through autoincrement registers */ + write_register(dev, IPW_REG_AUTOINCREMENT_ADDRESS, aligned_addr); + aligned_len = len & (~0x3); + for (i = 0; i < aligned_len; i += 4, buf += 4, aligned_addr += 4) + write_register(dev, IPW_REG_AUTOINCREMENT_DATA, *(u32 *) buf); + + /* copy the last nibble */ + dif_len = len - aligned_len; + write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS, aligned_addr); + for (i = 0; i < dif_len; i++, buf++) + write_register_byte(dev, IPW_REG_INDIRECT_ACCESS_DATA + i, + *buf); +} + +static void read_nic_memory(struct net_device *dev, u32 addr, u32 len, + u8 * buf) +{ + u32 aligned_addr; + u32 aligned_len; + u32 dif_len; + u32 i; + + /* read first nibble byte by byte */ + aligned_addr = addr & (~0x3); + dif_len = addr - aligned_addr; + if (dif_len) { + /* Start reading at aligned_addr + dif_len */ + write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS, + aligned_addr); + for (i = dif_len; i < 4; i++, buf++) + read_register_byte(dev, + IPW_REG_INDIRECT_ACCESS_DATA + i, + buf); + + len -= dif_len; + aligned_addr += 4; + } + + /* read DWs through autoincrement registers */ + write_register(dev, IPW_REG_AUTOINCREMENT_ADDRESS, aligned_addr); + aligned_len = len & (~0x3); + for (i = 0; i < aligned_len; i += 4, buf += 4, aligned_addr += 4) + read_register(dev, IPW_REG_AUTOINCREMENT_DATA, (u32 *) buf); + + /* copy the last nibble */ + dif_len = len - aligned_len; + write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS, aligned_addr); + for (i = 0; i < dif_len; i++, buf++) + read_register_byte(dev, IPW_REG_INDIRECT_ACCESS_DATA + i, buf); +} + +static inline int ipw2100_hw_is_adapter_in_system(struct net_device *dev) +{ + return (dev->base_addr && + (readl + ((void __iomem *)(dev->base_addr + + IPW_REG_DOA_DEBUG_AREA_START)) + == IPW_DATA_DOA_DEBUG_VALUE)); +} + +static int ipw2100_get_ordinal(struct ipw2100_priv *priv, u32 ord, + void *val, u32 * len) +{ + struct ipw2100_ordinals *ordinals = &priv->ordinals; + u32 addr; + u32 field_info; + u16 field_len; + u16 field_count; + u32 total_length; + + if (ordinals->table1_addr == 0) { + printk(KERN_WARNING DRV_NAME ": attempt to use fw ordinals " + "before they have been loaded.\n"); + return -EINVAL; + } + + if (IS_ORDINAL_TABLE_ONE(ordinals, ord)) { + if (*len < IPW_ORD_TAB_1_ENTRY_SIZE) { + *len = IPW_ORD_TAB_1_ENTRY_SIZE; + + printk(KERN_WARNING DRV_NAME + ": ordinal buffer length too small, need %zd\n", + IPW_ORD_TAB_1_ENTRY_SIZE); + + return -EINVAL; + } + + read_nic_dword(priv->net_dev, + ordinals->table1_addr + (ord << 2), &addr); + read_nic_dword(priv->net_dev, addr, val); + + *len = IPW_ORD_TAB_1_ENTRY_SIZE; + + return 0; + } + + if (IS_ORDINAL_TABLE_TWO(ordinals, ord)) { + + ord -= IPW_START_ORD_TAB_2; + + /* get the address of statistic */ + read_nic_dword(priv->net_dev, + ordinals->table2_addr + (ord << 3), &addr); + + /* get the second DW of statistics ; + * two 16-bit words - first is length, second is count */ + read_nic_dword(priv->net_dev, + ordinals->table2_addr + (ord << 3) + sizeof(u32), + &field_info); + + /* get each entry length */ + field_len = *((u16 *) & field_info); + + /* get number of entries */ + field_count = *(((u16 *) & field_info) + 1); + + /* abort if no enough memory */ + total_length = field_len * field_count; + if (total_length > *len) { + *len = total_length; + return -EINVAL; + } + + *len = total_length; + if (!total_length) + return 0; + + /* read the ordinal data from the SRAM */ + read_nic_memory(priv->net_dev, addr, total_length, val); + + return 0; + } + + printk(KERN_WARNING DRV_NAME ": ordinal %d neither in table 1 nor " + "in table 2\n", ord); + + return -EINVAL; +} + +static int ipw2100_set_ordinal(struct ipw2100_priv *priv, u32 ord, u32 * val, + u32 * len) +{ + struct ipw2100_ordinals *ordinals = &priv->ordinals; + u32 addr; + + if (IS_ORDINAL_TABLE_ONE(ordinals, ord)) { + if (*len != IPW_ORD_TAB_1_ENTRY_SIZE) { + *len = IPW_ORD_TAB_1_ENTRY_SIZE; + IPW_DEBUG_INFO("wrong size\n"); + return -EINVAL; + } + + read_nic_dword(priv->net_dev, + ordinals->table1_addr + (ord << 2), &addr); + + write_nic_dword(priv->net_dev, addr, *val); + + *len = IPW_ORD_TAB_1_ENTRY_SIZE; + + return 0; + } + + IPW_DEBUG_INFO("wrong table\n"); + if (IS_ORDINAL_TABLE_TWO(ordinals, ord)) + return -EINVAL; + + return -EINVAL; +} + +static char *snprint_line(char *buf, size_t count, + const u8 * data, u32 len, u32 ofs) +{ + int out, i, j, l; + char c; + + out = snprintf(buf, count, "%08X", ofs); + + for (l = 0, i = 0; i < 2; i++) { + out += snprintf(buf + out, count - out, " "); + for (j = 0; j < 8 && l < len; j++, l++) + out += snprintf(buf + out, count - out, "%02X ", + data[(i * 8 + j)]); + for (; j < 8; j++) + out += snprintf(buf + out, count - out, " "); + } + + out += snprintf(buf + out, count - out, " "); + for (l = 0, i = 0; i < 2; i++) { + out += snprintf(buf + out, count - out, " "); + for (j = 0; j < 8 && l < len; j++, l++) { + c = data[(i * 8 + j)]; + if (!isascii(c) || !isprint(c)) + c = '.'; + + out += snprintf(buf + out, count - out, "%c", c); + } + + for (; j < 8; j++) + out += snprintf(buf + out, count - out, " "); + } + + return buf; +} + +static void printk_buf(int level, const u8 * data, u32 len) +{ + char line[81]; + u32 ofs = 0; + if (!(ipw2100_debug_level & level)) + return; + + while (len) { + printk(KERN_DEBUG "%s\n", + snprint_line(line, sizeof(line), &data[ofs], + min(len, 16U), ofs)); + ofs += 16; + len -= min(len, 16U); + } +} + +#define MAX_RESET_BACKOFF 10 + +static void schedule_reset(struct ipw2100_priv *priv) +{ + unsigned long now = get_seconds(); + + /* If we haven't received a reset request within the backoff period, + * then we can reset the backoff interval so this reset occurs + * immediately */ + if (priv->reset_backoff && + (now - priv->last_reset > priv->reset_backoff)) + priv->reset_backoff = 0; + + priv->last_reset = get_seconds(); + + if (!(priv->status & STATUS_RESET_PENDING)) { + IPW_DEBUG_INFO("%s: Scheduling firmware restart (%ds).\n", + priv->net_dev->name, priv->reset_backoff); + netif_carrier_off(priv->net_dev); + netif_stop_queue(priv->net_dev); + priv->status |= STATUS_RESET_PENDING; + if (priv->reset_backoff) + queue_delayed_work(priv->workqueue, &priv->reset_work, + priv->reset_backoff * HZ); + else + queue_delayed_work(priv->workqueue, &priv->reset_work, + 0); + + if (priv->reset_backoff < MAX_RESET_BACKOFF) + priv->reset_backoff++; + + wake_up_interruptible(&priv->wait_command_queue); + } else + IPW_DEBUG_INFO("%s: Firmware restart already in progress.\n", + priv->net_dev->name); + +} + +#define HOST_COMPLETE_TIMEOUT (2 * HZ) +static int ipw2100_hw_send_command(struct ipw2100_priv *priv, + struct host_command *cmd) +{ + struct list_head *element; + struct ipw2100_tx_packet *packet; + unsigned long flags; + int err = 0; + + IPW_DEBUG_HC("Sending %s command (#%d), %d bytes\n", + command_types[cmd->host_command], cmd->host_command, + cmd->host_command_length); + printk_buf(IPW_DL_HC, (u8 *) cmd->host_command_parameters, + cmd->host_command_length); + + spin_lock_irqsave(&priv->low_lock, flags); + + if (priv->fatal_error) { + IPW_DEBUG_INFO + ("Attempt to send command while hardware in fatal error condition.\n"); + err = -EIO; + goto fail_unlock; + } + + if (!(priv->status & STATUS_RUNNING)) { + IPW_DEBUG_INFO + ("Attempt to send command while hardware is not running.\n"); + err = -EIO; + goto fail_unlock; + } + + if (priv->status & STATUS_CMD_ACTIVE) { + IPW_DEBUG_INFO + ("Attempt to send command while another command is pending.\n"); + err = -EBUSY; + goto fail_unlock; + } + + if (list_empty(&priv->msg_free_list)) { + IPW_DEBUG_INFO("no available msg buffers\n"); + goto fail_unlock; + } + + priv->status |= STATUS_CMD_ACTIVE; + priv->messages_sent++; + + element = priv->msg_free_list.next; + + packet = list_entry(element, struct ipw2100_tx_packet, list); + packet->jiffy_start = jiffies; + + /* initialize the firmware command packet */ + packet->info.c_struct.cmd->host_command_reg = cmd->host_command; + packet->info.c_struct.cmd->host_command_reg1 = cmd->host_command1; + packet->info.c_struct.cmd->host_command_len_reg = + cmd->host_command_length; + packet->info.c_struct.cmd->sequence = cmd->host_command_sequence; + + memcpy(packet->info.c_struct.cmd->host_command_params_reg, + cmd->host_command_parameters, + sizeof(packet->info.c_struct.cmd->host_command_params_reg)); + + list_del(element); + DEC_STAT(&priv->msg_free_stat); + + list_add_tail(element, &priv->msg_pend_list); + INC_STAT(&priv->msg_pend_stat); + + ipw2100_tx_send_commands(priv); + ipw2100_tx_send_data(priv); + + spin_unlock_irqrestore(&priv->low_lock, flags); + + /* + * We must wait for this command to complete before another + * command can be sent... but if we wait more than 3 seconds + * then there is a problem. + */ + + err = + wait_event_interruptible_timeout(priv->wait_command_queue, + !(priv-> + status & STATUS_CMD_ACTIVE), + HOST_COMPLETE_TIMEOUT); + + if (err == 0) { + IPW_DEBUG_INFO("Command completion failed out after %dms.\n", + 1000 * (HOST_COMPLETE_TIMEOUT / HZ)); + priv->fatal_error = IPW2100_ERR_MSG_TIMEOUT; + priv->status &= ~STATUS_CMD_ACTIVE; + schedule_reset(priv); + return -EIO; + } + + if (priv->fatal_error) { + printk(KERN_WARNING DRV_NAME ": %s: firmware fatal error\n", + priv->net_dev->name); + return -EIO; + } + + /* !!!!! HACK TEST !!!!! + * When lots of debug trace statements are enabled, the driver + * doesn't seem to have as many firmware restart cycles... + * + * As a test, we're sticking in a 1/100s delay here */ + schedule_timeout_uninterruptible(msecs_to_jiffies(10)); + + return 0; + + fail_unlock: + spin_unlock_irqrestore(&priv->low_lock, flags); + + return err; +} + +/* + * Verify the values and data access of the hardware + * No locks needed or used. No functions called. + */ +static int ipw2100_verify(struct ipw2100_priv *priv) +{ + u32 data1, data2; + u32 address; + + u32 val1 = 0x76543210; + u32 val2 = 0xFEDCBA98; + + /* Domain 0 check - all values should be DOA_DEBUG */ + for (address = IPW_REG_DOA_DEBUG_AREA_START; + address < IPW_REG_DOA_DEBUG_AREA_END; address += sizeof(u32)) { + read_register(priv->net_dev, address, &data1); + if (data1 != IPW_DATA_DOA_DEBUG_VALUE) + return -EIO; + } + + /* Domain 1 check - use arbitrary read/write compare */ + for (address = 0; address < 5; address++) { + /* The memory area is not used now */ + write_register(priv->net_dev, IPW_REG_DOMAIN_1_OFFSET + 0x32, + val1); + write_register(priv->net_dev, IPW_REG_DOMAIN_1_OFFSET + 0x36, + val2); + read_register(priv->net_dev, IPW_REG_DOMAIN_1_OFFSET + 0x32, + &data1); + read_register(priv->net_dev, IPW_REG_DOMAIN_1_OFFSET + 0x36, + &data2); + if (val1 == data1 && val2 == data2) + return 0; + } + + return -EIO; +} + +/* + * + * Loop until the CARD_DISABLED bit is the same value as the + * supplied parameter + * + * TODO: See if it would be more efficient to do a wait/wake + * cycle and have the completion event trigger the wakeup + * + */ +#define IPW_CARD_DISABLE_COMPLETE_WAIT 100 // 100 milli +static int ipw2100_wait_for_card_state(struct ipw2100_priv *priv, int state) +{ + int i; + u32 card_state; + u32 len = sizeof(card_state); + int err; + + for (i = 0; i <= IPW_CARD_DISABLE_COMPLETE_WAIT * 1000; i += 50) { + err = ipw2100_get_ordinal(priv, IPW_ORD_CARD_DISABLED, + &card_state, &len); + if (err) { + IPW_DEBUG_INFO("Query of CARD_DISABLED ordinal " + "failed.\n"); + return 0; + } + + /* We'll break out if either the HW state says it is + * in the state we want, or if HOST_COMPLETE command + * finishes */ + if ((card_state == state) || + ((priv->status & STATUS_ENABLED) ? + IPW_HW_STATE_ENABLED : IPW_HW_STATE_DISABLED) == state) { + if (state == IPW_HW_STATE_ENABLED) + priv->status |= STATUS_ENABLED; + else + priv->status &= ~STATUS_ENABLED; + + return 0; + } + + udelay(50); + } + + IPW_DEBUG_INFO("ipw2100_wait_for_card_state to %s state timed out\n", + state ? "DISABLED" : "ENABLED"); + return -EIO; +} + +/********************************************************************* + Procedure : sw_reset_and_clock + Purpose : Asserts s/w reset, asserts clock initialization + and waits for clock stabilization + ********************************************************************/ +static int sw_reset_and_clock(struct ipw2100_priv *priv) +{ + int i; + u32 r; + + // assert s/w reset + write_register(priv->net_dev, IPW_REG_RESET_REG, + IPW_AUX_HOST_RESET_REG_SW_RESET); + + // wait for clock stabilization + for (i = 0; i < 1000; i++) { + udelay(IPW_WAIT_RESET_ARC_COMPLETE_DELAY); + + // check clock ready bit + read_register(priv->net_dev, IPW_REG_RESET_REG, &r); + if (r & IPW_AUX_HOST_RESET_REG_PRINCETON_RESET) + break; + } + + if (i == 1000) + return -EIO; // TODO: better error value + + /* set "initialization complete" bit to move adapter to + * D0 state */ + write_register(priv->net_dev, IPW_REG_GP_CNTRL, + IPW_AUX_HOST_GP_CNTRL_BIT_INIT_DONE); + + /* wait for clock stabilization */ + for (i = 0; i < 10000; i++) { + udelay(IPW_WAIT_CLOCK_STABILIZATION_DELAY * 4); + + /* check clock ready bit */ + read_register(priv->net_dev, IPW_REG_GP_CNTRL, &r); + if (r & IPW_AUX_HOST_GP_CNTRL_BIT_CLOCK_READY) + break; + } + + if (i == 10000) + return -EIO; /* TODO: better error value */ + + /* set D0 standby bit */ + read_register(priv->net_dev, IPW_REG_GP_CNTRL, &r); + write_register(priv->net_dev, IPW_REG_GP_CNTRL, + r | IPW_AUX_HOST_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY); + + return 0; +} + +/********************************************************************* + Procedure : ipw2100_download_firmware + Purpose : Initiaze adapter after power on. + The sequence is: + 1. assert s/w reset first! + 2. awake clocks & wait for clock stabilization + 3. hold ARC (don't ask me why...) + 4. load Dino ucode and reset/clock init again + 5. zero-out shared mem + 6. download f/w + *******************************************************************/ +static int ipw2100_download_firmware(struct ipw2100_priv *priv) +{ + u32 address; + int err; + +#ifndef CONFIG_PM + /* Fetch the firmware and microcode */ + struct ipw2100_fw ipw2100_firmware; +#endif + + if (priv->fatal_error) { + IPW_DEBUG_ERROR("%s: ipw2100_download_firmware called after " + "fatal error %d. Interface must be brought down.\n", + priv->net_dev->name, priv->fatal_error); + return -EINVAL; + } +#ifdef CONFIG_PM + if (!ipw2100_firmware.version) { + err = ipw2100_get_firmware(priv, &ipw2100_firmware); + if (err) { + IPW_DEBUG_ERROR("%s: ipw2100_get_firmware failed: %d\n", + priv->net_dev->name, err); + priv->fatal_error = IPW2100_ERR_FW_LOAD; + goto fail; + } + } +#else + err = ipw2100_get_firmware(priv, &ipw2100_firmware); + if (err) { + IPW_DEBUG_ERROR("%s: ipw2100_get_firmware failed: %d\n", + priv->net_dev->name, err); + priv->fatal_error = IPW2100_ERR_FW_LOAD; + goto fail; + } +#endif + priv->firmware_version = ipw2100_firmware.version; + + /* s/w reset and clock stabilization */ + err = sw_reset_and_clock(priv); + if (err) { + IPW_DEBUG_ERROR("%s: sw_reset_and_clock failed: %d\n", + priv->net_dev->name, err); + goto fail; + } + + err = ipw2100_verify(priv); + if (err) { + IPW_DEBUG_ERROR("%s: ipw2100_verify failed: %d\n", + priv->net_dev->name, err); + goto fail; + } + + /* Hold ARC */ + write_nic_dword(priv->net_dev, + IPW_INTERNAL_REGISTER_HALT_AND_RESET, 0x80000000); + + /* allow ARC to run */ + write_register(priv->net_dev, IPW_REG_RESET_REG, 0); + + /* load microcode */ + err = ipw2100_ucode_download(priv, &ipw2100_firmware); + if (err) { + printk(KERN_ERR DRV_NAME ": %s: Error loading microcode: %d\n", + priv->net_dev->name, err); + goto fail; + } + + /* release ARC */ + write_nic_dword(priv->net_dev, + IPW_INTERNAL_REGISTER_HALT_AND_RESET, 0x00000000); + + /* s/w reset and clock stabilization (again!!!) */ + err = sw_reset_and_clock(priv); + if (err) { + printk(KERN_ERR DRV_NAME + ": %s: sw_reset_and_clock failed: %d\n", + priv->net_dev->name, err); + goto fail; + } + + /* load f/w */ + err = ipw2100_fw_download(priv, &ipw2100_firmware); + if (err) { + IPW_DEBUG_ERROR("%s: Error loading firmware: %d\n", + priv->net_dev->name, err); + goto fail; + } +#ifndef CONFIG_PM + /* + * When the .resume method of the driver is called, the other + * part of the system, i.e. the ide driver could still stay in + * the suspend stage. This prevents us from loading the firmware + * from the disk. --YZ + */ + + /* free any storage allocated for firmware image */ + ipw2100_release_firmware(priv, &ipw2100_firmware); +#endif + + /* zero out Domain 1 area indirectly (Si requirement) */ + for (address = IPW_HOST_FW_SHARED_AREA0; + address < IPW_HOST_FW_SHARED_AREA0_END; address += 4) + write_nic_dword(priv->net_dev, address, 0); + for (address = IPW_HOST_FW_SHARED_AREA1; + address < IPW_HOST_FW_SHARED_AREA1_END; address += 4) + write_nic_dword(priv->net_dev, address, 0); + for (address = IPW_HOST_FW_SHARED_AREA2; + address < IPW_HOST_FW_SHARED_AREA2_END; address += 4) + write_nic_dword(priv->net_dev, address, 0); + for (address = IPW_HOST_FW_SHARED_AREA3; + address < IPW_HOST_FW_SHARED_AREA3_END; address += 4) + write_nic_dword(priv->net_dev, address, 0); + for (address = IPW_HOST_FW_INTERRUPT_AREA; + address < IPW_HOST_FW_INTERRUPT_AREA_END; address += 4) + write_nic_dword(priv->net_dev, address, 0); + + return 0; + + fail: + ipw2100_release_firmware(priv, &ipw2100_firmware); + return err; +} + +static inline void ipw2100_enable_interrupts(struct ipw2100_priv *priv) +{ + if (priv->status & STATUS_INT_ENABLED) + return; + priv->status |= STATUS_INT_ENABLED; + write_register(priv->net_dev, IPW_REG_INTA_MASK, IPW_INTERRUPT_MASK); +} + +static inline void ipw2100_disable_interrupts(struct ipw2100_priv *priv) +{ + if (!(priv->status & STATUS_INT_ENABLED)) + return; + priv->status &= ~STATUS_INT_ENABLED; + write_register(priv->net_dev, IPW_REG_INTA_MASK, 0x0); +} + +static void ipw2100_initialize_ordinals(struct ipw2100_priv *priv) +{ + struct ipw2100_ordinals *ord = &priv->ordinals; + + IPW_DEBUG_INFO("enter\n"); + + read_register(priv->net_dev, IPW_MEM_HOST_SHARED_ORDINALS_TABLE_1, + &ord->table1_addr); + + read_register(priv->net_dev, IPW_MEM_HOST_SHARED_ORDINALS_TABLE_2, + &ord->table2_addr); + + read_nic_dword(priv->net_dev, ord->table1_addr, &ord->table1_size); + read_nic_dword(priv->net_dev, ord->table2_addr, &ord->table2_size); + + ord->table2_size &= 0x0000FFFF; + + IPW_DEBUG_INFO("table 1 size: %d\n", ord->table1_size); + IPW_DEBUG_INFO("table 2 size: %d\n", ord->table2_size); + IPW_DEBUG_INFO("exit\n"); +} + +static inline void ipw2100_hw_set_gpio(struct ipw2100_priv *priv) +{ + u32 reg = 0; + /* + * Set GPIO 3 writable by FW; GPIO 1 writable + * by driver and enable clock + */ + reg = (IPW_BIT_GPIO_GPIO3_MASK | IPW_BIT_GPIO_GPIO1_ENABLE | + IPW_BIT_GPIO_LED_OFF); + write_register(priv->net_dev, IPW_REG_GPIO, reg); +} + +static int rf_kill_active(struct ipw2100_priv *priv) +{ +#define MAX_RF_KILL_CHECKS 5 +#define RF_KILL_CHECK_DELAY 40 + + unsigned short value = 0; + u32 reg = 0; + int i; + + if (!(priv->hw_features & HW_FEATURE_RFKILL)) { + wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, false); + priv->status &= ~STATUS_RF_KILL_HW; + return 0; + } + + for (i = 0; i < MAX_RF_KILL_CHECKS; i++) { + udelay(RF_KILL_CHECK_DELAY); + read_register(priv->net_dev, IPW_REG_GPIO, ®); + value = (value << 1) | ((reg & IPW_BIT_GPIO_RF_KILL) ? 0 : 1); + } + + if (value == 0) { + wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true); + priv->status |= STATUS_RF_KILL_HW; + } else { + wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, false); + priv->status &= ~STATUS_RF_KILL_HW; + } + + return (value == 0); +} + +static int ipw2100_get_hw_features(struct ipw2100_priv *priv) +{ + u32 addr, len; + u32 val; + + /* + * EEPROM_SRAM_DB_START_ADDRESS using ordinal in ordinal table 1 + */ + len = sizeof(addr); + if (ipw2100_get_ordinal + (priv, IPW_ORD_EEPROM_SRAM_DB_BLOCK_START_ADDRESS, &addr, &len)) { + IPW_DEBUG_INFO("failed querying ordinals at line %d\n", + __LINE__); + return -EIO; + } + + IPW_DEBUG_INFO("EEPROM address: %08X\n", addr); + + /* + * EEPROM version is the byte at offset 0xfd in firmware + * We read 4 bytes, then shift out the byte we actually want */ + read_nic_dword(priv->net_dev, addr + 0xFC, &val); + priv->eeprom_version = (val >> 24) & 0xFF; + IPW_DEBUG_INFO("EEPROM version: %d\n", priv->eeprom_version); + + /* + * HW RF Kill enable is bit 0 in byte at offset 0x21 in firmware + * + * notice that the EEPROM bit is reverse polarity, i.e. + * bit = 0 signifies HW RF kill switch is supported + * bit = 1 signifies HW RF kill switch is NOT supported + */ + read_nic_dword(priv->net_dev, addr + 0x20, &val); + if (!((val >> 24) & 0x01)) + priv->hw_features |= HW_FEATURE_RFKILL; + + IPW_DEBUG_INFO("HW RF Kill: %ssupported.\n", + (priv->hw_features & HW_FEATURE_RFKILL) ? "" : "not "); + + return 0; +} + +/* + * Start firmware execution after power on and intialization + * The sequence is: + * 1. Release ARC + * 2. Wait for f/w initialization completes; + */ +static int ipw2100_start_adapter(struct ipw2100_priv *priv) +{ + int i; + u32 inta, inta_mask, gpio; + + IPW_DEBUG_INFO("enter\n"); + + if (priv->status & STATUS_RUNNING) + return 0; + + /* + * Initialize the hw - drive adapter to DO state by setting + * init_done bit. Wait for clk_ready bit and Download + * fw & dino ucode + */ + if (ipw2100_download_firmware(priv)) { + printk(KERN_ERR DRV_NAME + ": %s: Failed to power on the adapter.\n", + priv->net_dev->name); + return -EIO; + } + + /* Clear the Tx, Rx and Msg queues and the r/w indexes + * in the firmware RBD and TBD ring queue */ + ipw2100_queues_initialize(priv); + + ipw2100_hw_set_gpio(priv); + + /* TODO -- Look at disabling interrupts here to make sure none + * get fired during FW initialization */ + + /* Release ARC - clear reset bit */ + write_register(priv->net_dev, IPW_REG_RESET_REG, 0); + + /* wait for f/w intialization complete */ + IPW_DEBUG_FW("Waiting for f/w initialization to complete...\n"); + i = 5000; + do { + schedule_timeout_uninterruptible(msecs_to_jiffies(40)); + /* Todo... wait for sync command ... */ + + read_register(priv->net_dev, IPW_REG_INTA, &inta); + + /* check "init done" bit */ + if (inta & IPW2100_INTA_FW_INIT_DONE) { + /* reset "init done" bit */ + write_register(priv->net_dev, IPW_REG_INTA, + IPW2100_INTA_FW_INIT_DONE); + break; + } + + /* check error conditions : we check these after the firmware + * check so that if there is an error, the interrupt handler + * will see it and the adapter will be reset */ + if (inta & + (IPW2100_INTA_FATAL_ERROR | IPW2100_INTA_PARITY_ERROR)) { + /* clear error conditions */ + write_register(priv->net_dev, IPW_REG_INTA, + IPW2100_INTA_FATAL_ERROR | + IPW2100_INTA_PARITY_ERROR); + } + } while (--i); + + /* Clear out any pending INTAs since we aren't supposed to have + * interrupts enabled at this point... */ + read_register(priv->net_dev, IPW_REG_INTA, &inta); + read_register(priv->net_dev, IPW_REG_INTA_MASK, &inta_mask); + inta &= IPW_INTERRUPT_MASK; + /* Clear out any pending interrupts */ + if (inta & inta_mask) + write_register(priv->net_dev, IPW_REG_INTA, inta); + + IPW_DEBUG_FW("f/w initialization complete: %s\n", + i ? "SUCCESS" : "FAILED"); + + if (!i) { + printk(KERN_WARNING DRV_NAME + ": %s: Firmware did not initialize.\n", + priv->net_dev->name); + return -EIO; + } + + /* allow firmware to write to GPIO1 & GPIO3 */ + read_register(priv->net_dev, IPW_REG_GPIO, &gpio); + + gpio |= (IPW_BIT_GPIO_GPIO1_MASK | IPW_BIT_GPIO_GPIO3_MASK); + + write_register(priv->net_dev, IPW_REG_GPIO, gpio); + + /* Ready to receive commands */ + priv->status |= STATUS_RUNNING; + + /* The adapter has been reset; we are not associated */ + priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED); + + IPW_DEBUG_INFO("exit\n"); + + return 0; +} + +static inline void ipw2100_reset_fatalerror(struct ipw2100_priv *priv) +{ + if (!priv->fatal_error) + return; + + priv->fatal_errors[priv->fatal_index++] = priv->fatal_error; + priv->fatal_index %= IPW2100_ERROR_QUEUE; + priv->fatal_error = 0; +} + +/* NOTE: Our interrupt is disabled when this method is called */ +static int ipw2100_power_cycle_adapter(struct ipw2100_priv *priv) +{ + u32 reg; + int i; + + IPW_DEBUG_INFO("Power cycling the hardware.\n"); + + ipw2100_hw_set_gpio(priv); + + /* Step 1. Stop Master Assert */ + write_register(priv->net_dev, IPW_REG_RESET_REG, + IPW_AUX_HOST_RESET_REG_STOP_MASTER); + + /* Step 2. Wait for stop Master Assert + * (not more than 50us, otherwise ret error */ + i = 5; + do { + udelay(IPW_WAIT_RESET_MASTER_ASSERT_COMPLETE_DELAY); + read_register(priv->net_dev, IPW_REG_RESET_REG, ®); + + if (reg & IPW_AUX_HOST_RESET_REG_MASTER_DISABLED) + break; + } while (--i); + + priv->status &= ~STATUS_RESET_PENDING; + + if (!i) { + IPW_DEBUG_INFO + ("exit - waited too long for master assert stop\n"); + return -EIO; + } + + write_register(priv->net_dev, IPW_REG_RESET_REG, + IPW_AUX_HOST_RESET_REG_SW_RESET); + + /* Reset any fatal_error conditions */ + ipw2100_reset_fatalerror(priv); + + /* At this point, the adapter is now stopped and disabled */ + priv->status &= ~(STATUS_RUNNING | STATUS_ASSOCIATING | + STATUS_ASSOCIATED | STATUS_ENABLED); + + return 0; +} + +/* + * Send the CARD_DISABLE_PHY_OFF comamnd to the card to disable it + * + * After disabling, if the card was associated, a STATUS_ASSN_LOST will be sent. + * + * STATUS_CARD_DISABLE_NOTIFICATION will be sent regardless of + * if STATUS_ASSN_LOST is sent. + */ +static int ipw2100_hw_phy_off(struct ipw2100_priv *priv) +{ + +#define HW_PHY_OFF_LOOP_DELAY (HZ / 5000) + + struct host_command cmd = { + .host_command = CARD_DISABLE_PHY_OFF, + .host_command_sequence = 0, + .host_command_length = 0, + }; + int err, i; + u32 val1, val2; + + IPW_DEBUG_HC("CARD_DISABLE_PHY_OFF\n"); + + /* Turn off the radio */ + err = ipw2100_hw_send_command(priv, &cmd); + if (err) + return err; + + for (i = 0; i < 2500; i++) { + read_nic_dword(priv->net_dev, IPW2100_CONTROL_REG, &val1); + read_nic_dword(priv->net_dev, IPW2100_COMMAND, &val2); + + if ((val1 & IPW2100_CONTROL_PHY_OFF) && + (val2 & IPW2100_COMMAND_PHY_OFF)) + return 0; + + schedule_timeout_uninterruptible(HW_PHY_OFF_LOOP_DELAY); + } + + return -EIO; +} + +static int ipw2100_enable_adapter(struct ipw2100_priv *priv) +{ + struct host_command cmd = { + .host_command = HOST_COMPLETE, + .host_command_sequence = 0, + .host_command_length = 0 + }; + int err = 0; + + IPW_DEBUG_HC("HOST_COMPLETE\n"); + + if (priv->status & STATUS_ENABLED) + return 0; + + mutex_lock(&priv->adapter_mutex); + + if (rf_kill_active(priv)) { + IPW_DEBUG_HC("Command aborted due to RF kill active.\n"); + goto fail_up; + } + + err = ipw2100_hw_send_command(priv, &cmd); + if (err) { + IPW_DEBUG_INFO("Failed to send HOST_COMPLETE command\n"); + goto fail_up; + } + + err = ipw2100_wait_for_card_state(priv, IPW_HW_STATE_ENABLED); + if (err) { + IPW_DEBUG_INFO("%s: card not responding to init command.\n", + priv->net_dev->name); + goto fail_up; + } + + if (priv->stop_hang_check) { + priv->stop_hang_check = 0; + queue_delayed_work(priv->workqueue, &priv->hang_check, HZ / 2); + } + + fail_up: + mutex_unlock(&priv->adapter_mutex); + return err; +} + +static int ipw2100_hw_stop_adapter(struct ipw2100_priv *priv) +{ +#define HW_POWER_DOWN_DELAY (msecs_to_jiffies(100)) + + struct host_command cmd = { + .host_command = HOST_PRE_POWER_DOWN, + .host_command_sequence = 0, + .host_command_length = 0, + }; + int err, i; + u32 reg; + + if (!(priv->status & STATUS_RUNNING)) + return 0; + + priv->status |= STATUS_STOPPING; + + /* We can only shut down the card if the firmware is operational. So, + * if we haven't reset since a fatal_error, then we can not send the + * shutdown commands. */ + if (!priv->fatal_error) { + /* First, make sure the adapter is enabled so that the PHY_OFF + * command can shut it down */ + ipw2100_enable_adapter(priv); + + err = ipw2100_hw_phy_off(priv); + if (err) + printk(KERN_WARNING DRV_NAME + ": Error disabling radio %d\n", err); + + /* + * If in D0-standby mode going directly to D3 may cause a + * PCI bus violation. Therefore we must change out of the D0 + * state. + * + * Sending the PREPARE_FOR_POWER_DOWN will restrict the + * hardware from going into standby mode and will transition + * out of D0-standby if it is already in that state. + * + * STATUS_PREPARE_POWER_DOWN_COMPLETE will be sent by the + * driver upon completion. Once received, the driver can + * proceed to the D3 state. + * + * Prepare for power down command to fw. This command would + * take HW out of D0-standby and prepare it for D3 state. + * + * Currently FW does not support event notification for this + * event. Therefore, skip waiting for it. Just wait a fixed + * 100ms + */ + IPW_DEBUG_HC("HOST_PRE_POWER_DOWN\n"); + + err = ipw2100_hw_send_command(priv, &cmd); + if (err) + printk(KERN_WARNING DRV_NAME ": " + "%s: Power down command failed: Error %d\n", + priv->net_dev->name, err); + else + schedule_timeout_uninterruptible(HW_POWER_DOWN_DELAY); + } + + priv->status &= ~STATUS_ENABLED; + + /* + * Set GPIO 3 writable by FW; GPIO 1 writable + * by driver and enable clock + */ + ipw2100_hw_set_gpio(priv); + + /* + * Power down adapter. Sequence: + * 1. Stop master assert (RESET_REG[9]=1) + * 2. Wait for stop master (RESET_REG[8]==1) + * 3. S/w reset assert (RESET_REG[7] = 1) + */ + + /* Stop master assert */ + write_register(priv->net_dev, IPW_REG_RESET_REG, + IPW_AUX_HOST_RESET_REG_STOP_MASTER); + + /* wait stop master not more than 50 usec. + * Otherwise return error. */ + for (i = 5; i > 0; i--) { + udelay(10); + + /* Check master stop bit */ + read_register(priv->net_dev, IPW_REG_RESET_REG, ®); + + if (reg & IPW_AUX_HOST_RESET_REG_MASTER_DISABLED) + break; + } + + if (i == 0) + printk(KERN_WARNING DRV_NAME + ": %s: Could now power down adapter.\n", + priv->net_dev->name); + + /* assert s/w reset */ + write_register(priv->net_dev, IPW_REG_RESET_REG, + IPW_AUX_HOST_RESET_REG_SW_RESET); + + priv->status &= ~(STATUS_RUNNING | STATUS_STOPPING); + + return 0; +} + +static int ipw2100_disable_adapter(struct ipw2100_priv *priv) +{ + struct host_command cmd = { + .host_command = CARD_DISABLE, + .host_command_sequence = 0, + .host_command_length = 0 + }; + int err = 0; + + IPW_DEBUG_HC("CARD_DISABLE\n"); + + if (!(priv->status & STATUS_ENABLED)) + return 0; + + /* Make sure we clear the associated state */ + priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING); + + if (!priv->stop_hang_check) { + priv->stop_hang_check = 1; + cancel_delayed_work(&priv->hang_check); + } + + mutex_lock(&priv->adapter_mutex); + + err = ipw2100_hw_send_command(priv, &cmd); + if (err) { + printk(KERN_WARNING DRV_NAME + ": exit - failed to send CARD_DISABLE command\n"); + goto fail_up; + } + + err = ipw2100_wait_for_card_state(priv, IPW_HW_STATE_DISABLED); + if (err) { + printk(KERN_WARNING DRV_NAME + ": exit - card failed to change to DISABLED\n"); + goto fail_up; + } + + IPW_DEBUG_INFO("TODO: implement scan state machine\n"); + + fail_up: + mutex_unlock(&priv->adapter_mutex); + return err; +} + +static int ipw2100_set_scan_options(struct ipw2100_priv *priv) +{ + struct host_command cmd = { + .host_command = SET_SCAN_OPTIONS, + .host_command_sequence = 0, + .host_command_length = 8 + }; + int err; + + IPW_DEBUG_INFO("enter\n"); + + IPW_DEBUG_SCAN("setting scan options\n"); + + cmd.host_command_parameters[0] = 0; + + if (!(priv->config & CFG_ASSOCIATE)) + cmd.host_command_parameters[0] |= IPW_SCAN_NOASSOCIATE; + if ((priv->ieee->sec.flags & SEC_ENABLED) && priv->ieee->sec.enabled) + cmd.host_command_parameters[0] |= IPW_SCAN_MIXED_CELL; + if (priv->config & CFG_PASSIVE_SCAN) + cmd.host_command_parameters[0] |= IPW_SCAN_PASSIVE; + + cmd.host_command_parameters[1] = priv->channel_mask; + + err = ipw2100_hw_send_command(priv, &cmd); + + IPW_DEBUG_HC("SET_SCAN_OPTIONS 0x%04X\n", + cmd.host_command_parameters[0]); + + return err; +} + +static int ipw2100_start_scan(struct ipw2100_priv *priv) +{ + struct host_command cmd = { + .host_command = BROADCAST_SCAN, + .host_command_sequence = 0, + .host_command_length = 4 + }; + int err; + + IPW_DEBUG_HC("START_SCAN\n"); + + cmd.host_command_parameters[0] = 0; + + /* No scanning if in monitor mode */ + if (priv->ieee->iw_mode == IW_MODE_MONITOR) + return 1; + + if (priv->status & STATUS_SCANNING) { + IPW_DEBUG_SCAN("Scan requested while already in scan...\n"); + return 0; + } + + IPW_DEBUG_INFO("enter\n"); + + /* Not clearing here; doing so makes iwlist always return nothing... + * + * We should modify the table logic to use aging tables vs. clearing + * the table on each scan start. + */ + IPW_DEBUG_SCAN("starting scan\n"); + + priv->status |= STATUS_SCANNING; + err = ipw2100_hw_send_command(priv, &cmd); + if (err) + priv->status &= ~STATUS_SCANNING; + + IPW_DEBUG_INFO("exit\n"); + + return err; +} + +static const struct libipw_geo ipw_geos[] = { + { /* Restricted */ + "---", + .bg_channels = 14, + .bg = {{2412, 1}, {2417, 2}, {2422, 3}, + {2427, 4}, {2432, 5}, {2437, 6}, + {2442, 7}, {2447, 8}, {2452, 9}, + {2457, 10}, {2462, 11}, {2467, 12}, + {2472, 13}, {2484, 14}}, + }, +}; + +static int ipw2100_up(struct ipw2100_priv *priv, int deferred) +{ + unsigned long flags; + int rc = 0; + u32 lock; + u32 ord_len = sizeof(lock); + + /* Age scan list entries found before suspend */ + if (priv->suspend_time) { + libipw_networks_age(priv->ieee, priv->suspend_time); + priv->suspend_time = 0; + } + + /* Quiet if manually disabled. */ + if (priv->status & STATUS_RF_KILL_SW) { + IPW_DEBUG_INFO("%s: Radio is disabled by Manual Disable " + "switch\n", priv->net_dev->name); + return 0; + } + + /* the ipw2100 hardware really doesn't want power management delays + * longer than 175usec + */ + pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, "ipw2100", 175); + + /* If the interrupt is enabled, turn it off... */ + spin_lock_irqsave(&priv->low_lock, flags); + ipw2100_disable_interrupts(priv); + + /* Reset any fatal_error conditions */ + ipw2100_reset_fatalerror(priv); + spin_unlock_irqrestore(&priv->low_lock, flags); + + if (priv->status & STATUS_POWERED || + (priv->status & STATUS_RESET_PENDING)) { + /* Power cycle the card ... */ + if (ipw2100_power_cycle_adapter(priv)) { + printk(KERN_WARNING DRV_NAME + ": %s: Could not cycle adapter.\n", + priv->net_dev->name); + rc = 1; + goto exit; + } + } else + priv->status |= STATUS_POWERED; + + /* Load the firmware, start the clocks, etc. */ + if (ipw2100_start_adapter(priv)) { + printk(KERN_ERR DRV_NAME + ": %s: Failed to start the firmware.\n", + priv->net_dev->name); + rc = 1; + goto exit; + } + + ipw2100_initialize_ordinals(priv); + + /* Determine capabilities of this particular HW configuration */ + if (ipw2100_get_hw_features(priv)) { + printk(KERN_ERR DRV_NAME + ": %s: Failed to determine HW features.\n", + priv->net_dev->name); + rc = 1; + goto exit; + } + + /* Initialize the geo */ + if (libipw_set_geo(priv->ieee, &ipw_geos[0])) { + printk(KERN_WARNING DRV_NAME "Could not set geo\n"); + return 0; + } + priv->ieee->freq_band = LIBIPW_24GHZ_BAND; + + lock = LOCK_NONE; + if (ipw2100_set_ordinal(priv, IPW_ORD_PERS_DB_LOCK, &lock, &ord_len)) { + printk(KERN_ERR DRV_NAME + ": %s: Failed to clear ordinal lock.\n", + priv->net_dev->name); + rc = 1; + goto exit; + } + + priv->status &= ~STATUS_SCANNING; + + if (rf_kill_active(priv)) { + printk(KERN_INFO "%s: Radio is disabled by RF switch.\n", + priv->net_dev->name); + + if (priv->stop_rf_kill) { + priv->stop_rf_kill = 0; + queue_delayed_work(priv->workqueue, &priv->rf_kill, + round_jiffies_relative(HZ)); + } + + deferred = 1; + } + + /* Turn on the interrupt so that commands can be processed */ + ipw2100_enable_interrupts(priv); + + /* Send all of the commands that must be sent prior to + * HOST_COMPLETE */ + if (ipw2100_adapter_setup(priv)) { + printk(KERN_ERR DRV_NAME ": %s: Failed to start the card.\n", + priv->net_dev->name); + rc = 1; + goto exit; + } + + if (!deferred) { + /* Enable the adapter - sends HOST_COMPLETE */ + if (ipw2100_enable_adapter(priv)) { + printk(KERN_ERR DRV_NAME ": " + "%s: failed in call to enable adapter.\n", + priv->net_dev->name); + ipw2100_hw_stop_adapter(priv); + rc = 1; + goto exit; + } + + /* Start a scan . . . */ + ipw2100_set_scan_options(priv); + ipw2100_start_scan(priv); + } + + exit: + return rc; +} + +static void ipw2100_down(struct ipw2100_priv *priv) +{ + unsigned long flags; + union iwreq_data wrqu = { + .ap_addr = { + .sa_family = ARPHRD_ETHER} + }; + int associated = priv->status & STATUS_ASSOCIATED; + + /* Kill the RF switch timer */ + if (!priv->stop_rf_kill) { + priv->stop_rf_kill = 1; + cancel_delayed_work(&priv->rf_kill); + } + + /* Kill the firmware hang check timer */ + if (!priv->stop_hang_check) { + priv->stop_hang_check = 1; + cancel_delayed_work(&priv->hang_check); + } + + /* Kill any pending resets */ + if (priv->status & STATUS_RESET_PENDING) + cancel_delayed_work(&priv->reset_work); + + /* Make sure the interrupt is on so that FW commands will be + * processed correctly */ + spin_lock_irqsave(&priv->low_lock, flags); + ipw2100_enable_interrupts(priv); + spin_unlock_irqrestore(&priv->low_lock, flags); + + if (ipw2100_hw_stop_adapter(priv)) + printk(KERN_ERR DRV_NAME ": %s: Error stopping adapter.\n", + priv->net_dev->name); + + /* Do not disable the interrupt until _after_ we disable + * the adaptor. Otherwise the CARD_DISABLE command will never + * be ack'd by the firmware */ + spin_lock_irqsave(&priv->low_lock, flags); + ipw2100_disable_interrupts(priv); + spin_unlock_irqrestore(&priv->low_lock, flags); + + pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, "ipw2100", + PM_QOS_DEFAULT_VALUE); + + /* We have to signal any supplicant if we are disassociating */ + if (associated) + wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL); + + priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING); + netif_carrier_off(priv->net_dev); + netif_stop_queue(priv->net_dev); +} + +/* Called by register_netdev() */ +static int ipw2100_net_init(struct net_device *dev) +{ + struct ipw2100_priv *priv = libipw_priv(dev); + const struct libipw_geo *geo = libipw_get_geo(priv->ieee); + struct wireless_dev *wdev = &priv->ieee->wdev; + int ret; + int i; + + ret = ipw2100_up(priv, 1); + if (ret) + return ret; + + memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN); + + /* fill-out priv->ieee->bg_band */ + if (geo->bg_channels) { + struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band; + + bg_band->band = IEEE80211_BAND_2GHZ; + bg_band->n_channels = geo->bg_channels; + bg_band->channels = + kzalloc(geo->bg_channels * + sizeof(struct ieee80211_channel), GFP_KERNEL); + /* translate geo->bg to bg_band.channels */ + for (i = 0; i < geo->bg_channels; i++) { + bg_band->channels[i].band = IEEE80211_BAND_2GHZ; + bg_band->channels[i].center_freq = geo->bg[i].freq; + bg_band->channels[i].hw_value = geo->bg[i].channel; + bg_band->channels[i].max_power = geo->bg[i].max_power; + if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY) + bg_band->channels[i].flags |= + IEEE80211_CHAN_PASSIVE_SCAN; + if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS) + bg_band->channels[i].flags |= + IEEE80211_CHAN_NO_IBSS; + if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT) + bg_band->channels[i].flags |= + IEEE80211_CHAN_RADAR; + /* No equivalent for LIBIPW_CH_80211H_RULES, + LIBIPW_CH_UNIFORM_SPREADING, or + LIBIPW_CH_B_ONLY... */ + } + /* point at bitrate info */ + bg_band->bitrates = ipw2100_bg_rates; + bg_band->n_bitrates = RATE_COUNT; + + wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band; + } + + set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev); + if (wiphy_register(wdev->wiphy)) { + ipw2100_down(priv); + return -EIO; + } + return 0; +} + +static void ipw2100_reset_adapter(struct work_struct *work) +{ + struct ipw2100_priv *priv = + container_of(work, struct ipw2100_priv, reset_work.work); + unsigned long flags; + union iwreq_data wrqu = { + .ap_addr = { + .sa_family = ARPHRD_ETHER} + }; + int associated = priv->status & STATUS_ASSOCIATED; + + spin_lock_irqsave(&priv->low_lock, flags); + IPW_DEBUG_INFO(": %s: Restarting adapter.\n", priv->net_dev->name); + priv->resets++; + priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING); + priv->status |= STATUS_SECURITY_UPDATED; + + /* Force a power cycle even if interface hasn't been opened + * yet */ + cancel_delayed_work(&priv->reset_work); + priv->status |= STATUS_RESET_PENDING; + spin_unlock_irqrestore(&priv->low_lock, flags); + + mutex_lock(&priv->action_mutex); + /* stop timed checks so that they don't interfere with reset */ + priv->stop_hang_check = 1; + cancel_delayed_work(&priv->hang_check); + + /* We have to signal any supplicant if we are disassociating */ + if (associated) + wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL); + + ipw2100_up(priv, 0); + mutex_unlock(&priv->action_mutex); + +} + +static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status) +{ + +#define MAC_ASSOCIATION_READ_DELAY (HZ) + int ret; + unsigned int len, essid_len; + char essid[IW_ESSID_MAX_SIZE]; + u32 txrate; + u32 chan; + char *txratename; + u8 bssid[ETH_ALEN]; + DECLARE_SSID_BUF(ssid); + + /* + * TBD: BSSID is usually 00:00:00:00:00:00 here and not + * an actual MAC of the AP. Seems like FW sets this + * address too late. Read it later and expose through + * /proc or schedule a later task to query and update + */ + + essid_len = IW_ESSID_MAX_SIZE; + ret = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_SSID, + essid, &essid_len); + if (ret) { + IPW_DEBUG_INFO("failed querying ordinals at line %d\n", + __LINE__); + return; + } + + len = sizeof(u32); + ret = ipw2100_get_ordinal(priv, IPW_ORD_CURRENT_TX_RATE, &txrate, &len); + if (ret) { + IPW_DEBUG_INFO("failed querying ordinals at line %d\n", + __LINE__); + return; + } + + len = sizeof(u32); + ret = ipw2100_get_ordinal(priv, IPW_ORD_OUR_FREQ, &chan, &len); + if (ret) { + IPW_DEBUG_INFO("failed querying ordinals at line %d\n", + __LINE__); + return; + } + len = ETH_ALEN; + ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, &bssid, &len); + if (ret) { + IPW_DEBUG_INFO("failed querying ordinals at line %d\n", + __LINE__); + return; + } + memcpy(priv->ieee->bssid, bssid, ETH_ALEN); + + switch (txrate) { + case TX_RATE_1_MBIT: + txratename = "1Mbps"; + break; + case TX_RATE_2_MBIT: + txratename = "2Mbsp"; + break; + case TX_RATE_5_5_MBIT: + txratename = "5.5Mbps"; + break; + case TX_RATE_11_MBIT: + txratename = "11Mbps"; + break; + default: + IPW_DEBUG_INFO("Unknown rate: %d\n", txrate); + txratename = "unknown rate"; + break; + } + + IPW_DEBUG_INFO("%s: Associated with '%s' at %s, channel %d (BSSID=%pM)\n", + priv->net_dev->name, print_ssid(ssid, essid, essid_len), + txratename, chan, bssid); + + /* now we copy read ssid into dev */ + if (!(priv->config & CFG_STATIC_ESSID)) { + priv->essid_len = min((u8) essid_len, (u8) IW_ESSID_MAX_SIZE); + memcpy(priv->essid, essid, priv->essid_len); + } + priv->channel = chan; + memcpy(priv->bssid, bssid, ETH_ALEN); + + priv->status |= STATUS_ASSOCIATING; + priv->connect_start = get_seconds(); + + queue_delayed_work(priv->workqueue, &priv->wx_event_work, HZ / 10); +} + +static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid, + int length, int batch_mode) +{ + int ssid_len = min(length, IW_ESSID_MAX_SIZE); + struct host_command cmd = { + .host_command = SSID, + .host_command_sequence = 0, + .host_command_length = ssid_len + }; + int err; + DECLARE_SSID_BUF(ssid); + + IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len)); + + if (ssid_len) + memcpy(cmd.host_command_parameters, essid, ssid_len); + + if (!batch_mode) { + err = ipw2100_disable_adapter(priv); + if (err) + return err; + } + + /* Bug in FW currently doesn't honor bit 0 in SET_SCAN_OPTIONS to + * disable auto association -- so we cheat by setting a bogus SSID */ + if (!ssid_len && !(priv->config & CFG_ASSOCIATE)) { + int i; + u8 *bogus = (u8 *) cmd.host_command_parameters; + for (i = 0; i < IW_ESSID_MAX_SIZE; i++) + bogus[i] = 0x18 + i; + cmd.host_command_length = IW_ESSID_MAX_SIZE; + } + + /* NOTE: We always send the SSID command even if the provided ESSID is + * the same as what we currently think is set. */ + + err = ipw2100_hw_send_command(priv, &cmd); + if (!err) { + memset(priv->essid + ssid_len, 0, IW_ESSID_MAX_SIZE - ssid_len); + memcpy(priv->essid, essid, ssid_len); + priv->essid_len = ssid_len; + } + + if (!batch_mode) { + if (ipw2100_enable_adapter(priv)) + err = -EIO; + } + + return err; +} + +static void isr_indicate_association_lost(struct ipw2100_priv *priv, u32 status) +{ + DECLARE_SSID_BUF(ssid); + + IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC, + "disassociated: '%s' %pM \n", + print_ssid(ssid, priv->essid, priv->essid_len), + priv->bssid); + + priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING); + + if (priv->status & STATUS_STOPPING) { + IPW_DEBUG_INFO("Card is stopping itself, discard ASSN_LOST.\n"); + return; + } + + memset(priv->bssid, 0, ETH_ALEN); + memset(priv->ieee->bssid, 0, ETH_ALEN); + + netif_carrier_off(priv->net_dev); + netif_stop_queue(priv->net_dev); + + if (!(priv->status & STATUS_RUNNING)) + return; + + if (priv->status & STATUS_SECURITY_UPDATED) + queue_delayed_work(priv->workqueue, &priv->security_work, 0); + + queue_delayed_work(priv->workqueue, &priv->wx_event_work, 0); +} + +static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status) +{ + IPW_DEBUG_INFO("%s: RF Kill state changed to radio OFF.\n", + priv->net_dev->name); + + /* RF_KILL is now enabled (else we wouldn't be here) */ + wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true); + priv->status |= STATUS_RF_KILL_HW; + + /* Make sure the RF Kill check timer is running */ + priv->stop_rf_kill = 0; + cancel_delayed_work(&priv->rf_kill); + queue_delayed_work(priv->workqueue, &priv->rf_kill, + round_jiffies_relative(HZ)); +} + +static void send_scan_event(void *data) +{ + struct ipw2100_priv *priv = data; + union iwreq_data wrqu; + + wrqu.data.length = 0; + wrqu.data.flags = 0; + wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL); +} + +static void ipw2100_scan_event_later(struct work_struct *work) +{ + send_scan_event(container_of(work, struct ipw2100_priv, + scan_event_later.work)); +} + +static void ipw2100_scan_event_now(struct work_struct *work) +{ + send_scan_event(container_of(work, struct ipw2100_priv, + scan_event_now)); +} + +static void isr_scan_complete(struct ipw2100_priv *priv, u32 status) +{ + IPW_DEBUG_SCAN("scan complete\n"); + /* Age the scan results... */ + priv->ieee->scans++; + priv->status &= ~STATUS_SCANNING; + + /* Only userspace-requested scan completion events go out immediately */ + if (!priv->user_requested_scan) { + if (!delayed_work_pending(&priv->scan_event_later)) + queue_delayed_work(priv->workqueue, + &priv->scan_event_later, + round_jiffies_relative(msecs_to_jiffies(4000))); + } else { + priv->user_requested_scan = 0; + cancel_delayed_work(&priv->scan_event_later); + queue_work(priv->workqueue, &priv->scan_event_now); + } +} + +#ifdef CONFIG_IPW2100_DEBUG +#define IPW2100_HANDLER(v, f) { v, f, # v } +struct ipw2100_status_indicator { + int status; + void (*cb) (struct ipw2100_priv * priv, u32 status); + char *name; +}; +#else +#define IPW2100_HANDLER(v, f) { v, f } +struct ipw2100_status_indicator { + int status; + void (*cb) (struct ipw2100_priv * priv, u32 status); +}; +#endif /* CONFIG_IPW2100_DEBUG */ + +static void isr_indicate_scanning(struct ipw2100_priv *priv, u32 status) +{ + IPW_DEBUG_SCAN("Scanning...\n"); + priv->status |= STATUS_SCANNING; +} + +static const struct ipw2100_status_indicator status_handlers[] = { + IPW2100_HANDLER(IPW_STATE_INITIALIZED, NULL), + IPW2100_HANDLER(IPW_STATE_COUNTRY_FOUND, NULL), + IPW2100_HANDLER(IPW_STATE_ASSOCIATED, isr_indicate_associated), + IPW2100_HANDLER(IPW_STATE_ASSN_LOST, isr_indicate_association_lost), + IPW2100_HANDLER(IPW_STATE_ASSN_CHANGED, NULL), + IPW2100_HANDLER(IPW_STATE_SCAN_COMPLETE, isr_scan_complete), + IPW2100_HANDLER(IPW_STATE_ENTERED_PSP, NULL), + IPW2100_HANDLER(IPW_STATE_LEFT_PSP, NULL), + IPW2100_HANDLER(IPW_STATE_RF_KILL, isr_indicate_rf_kill), + IPW2100_HANDLER(IPW_STATE_DISABLED, NULL), + IPW2100_HANDLER(IPW_STATE_POWER_DOWN, NULL), + IPW2100_HANDLER(IPW_STATE_SCANNING, isr_indicate_scanning), + IPW2100_HANDLER(-1, NULL) +}; + +static void isr_status_change(struct ipw2100_priv *priv, int status) +{ + int i; + + if (status == IPW_STATE_SCANNING && + priv->status & STATUS_ASSOCIATED && + !(priv->status & STATUS_SCANNING)) { + IPW_DEBUG_INFO("Scan detected while associated, with " + "no scan request. Restarting firmware.\n"); + + /* Wake up any sleeping jobs */ + schedule_reset(priv); + } + + for (i = 0; status_handlers[i].status != -1; i++) { + if (status == status_handlers[i].status) { + IPW_DEBUG_NOTIF("Status change: %s\n", + status_handlers[i].name); + if (status_handlers[i].cb) + status_handlers[i].cb(priv, status); + priv->wstats.status = status; + return; + } + } + + IPW_DEBUG_NOTIF("unknown status received: %04x\n", status); +} + +static void isr_rx_complete_command(struct ipw2100_priv *priv, + struct ipw2100_cmd_header *cmd) +{ +#ifdef CONFIG_IPW2100_DEBUG + if (cmd->host_command_reg < ARRAY_SIZE(command_types)) { + IPW_DEBUG_HC("Command completed '%s (%d)'\n", + command_types[cmd->host_command_reg], + cmd->host_command_reg); + } +#endif + if (cmd->host_command_reg == HOST_COMPLETE) + priv->status |= STATUS_ENABLED; + + if (cmd->host_command_reg == CARD_DISABLE) + priv->status &= ~STATUS_ENABLED; + + priv->status &= ~STATUS_CMD_ACTIVE; + + wake_up_interruptible(&priv->wait_command_queue); +} + +#ifdef CONFIG_IPW2100_DEBUG +static const char *frame_types[] = { + "COMMAND_STATUS_VAL", + "STATUS_CHANGE_VAL", + "P80211_DATA_VAL", + "P8023_DATA_VAL", + "HOST_NOTIFICATION_VAL" +}; +#endif + +static int ipw2100_alloc_skb(struct ipw2100_priv *priv, + struct ipw2100_rx_packet *packet) +{ + packet->skb = dev_alloc_skb(sizeof(struct ipw2100_rx)); + if (!packet->skb) + return -ENOMEM; + + packet->rxp = (struct ipw2100_rx *)packet->skb->data; + packet->dma_addr = pci_map_single(priv->pci_dev, packet->skb->data, + sizeof(struct ipw2100_rx), + PCI_DMA_FROMDEVICE); + /* NOTE: pci_map_single does not return an error code, and 0 is a valid + * dma_addr */ + + return 0; +} + +#define SEARCH_ERROR 0xffffffff +#define SEARCH_FAIL 0xfffffffe +#define SEARCH_SUCCESS 0xfffffff0 +#define SEARCH_DISCARD 0 +#define SEARCH_SNAPSHOT 1 + +#define SNAPSHOT_ADDR(ofs) (priv->snapshot[((ofs) >> 12) & 0xff] + ((ofs) & 0xfff)) +static void ipw2100_snapshot_free(struct ipw2100_priv *priv) +{ + int i; + if (!priv->snapshot[0]) + return; + for (i = 0; i < 0x30; i++) + kfree(priv->snapshot[i]); + priv->snapshot[0] = NULL; +} + +#ifdef IPW2100_DEBUG_C3 +static int ipw2100_snapshot_alloc(struct ipw2100_priv *priv) +{ + int i; + if (priv->snapshot[0]) + return 1; + for (i = 0; i < 0x30; i++) { + priv->snapshot[i] = kmalloc(0x1000, GFP_ATOMIC); + if (!priv->snapshot[i]) { + IPW_DEBUG_INFO("%s: Error allocating snapshot " + "buffer %d\n", priv->net_dev->name, i); + while (i > 0) + kfree(priv->snapshot[--i]); + priv->snapshot[0] = NULL; + return 0; + } + } + + return 1; +} + +static u32 ipw2100_match_buf(struct ipw2100_priv *priv, u8 * in_buf, + size_t len, int mode) +{ + u32 i, j; + u32 tmp; + u8 *s, *d; + u32 ret; + + s = in_buf; + if (mode == SEARCH_SNAPSHOT) { + if (!ipw2100_snapshot_alloc(priv)) + mode = SEARCH_DISCARD; + } + + for (ret = SEARCH_FAIL, i = 0; i < 0x30000; i += 4) { + read_nic_dword(priv->net_dev, i, &tmp); + if (mode == SEARCH_SNAPSHOT) + *(u32 *) SNAPSHOT_ADDR(i) = tmp; + if (ret == SEARCH_FAIL) { + d = (u8 *) & tmp; + for (j = 0; j < 4; j++) { + if (*s != *d) { + s = in_buf; + continue; + } + + s++; + d++; + + if ((s - in_buf) == len) + ret = (i + j) - len + 1; + } + } else if (mode == SEARCH_DISCARD) + return ret; + } + + return ret; +} +#endif + +/* + * + * 0) Disconnect the SKB from the firmware (just unmap) + * 1) Pack the ETH header into the SKB + * 2) Pass the SKB to the network stack + * + * When packet is provided by the firmware, it contains the following: + * + * . libipw_hdr + * . libipw_snap_hdr + * + * The size of the constructed ethernet + * + */ +#ifdef IPW2100_RX_DEBUG +static u8 packet_data[IPW_RX_NIC_BUFFER_LENGTH]; +#endif + +static void ipw2100_corruption_detected(struct ipw2100_priv *priv, int i) +{ +#ifdef IPW2100_DEBUG_C3 + struct ipw2100_status *status = &priv->status_queue.drv[i]; + u32 match, reg; + int j; +#endif + + IPW_DEBUG_INFO(": PCI latency error detected at 0x%04zX.\n", + i * sizeof(struct ipw2100_status)); + +#ifdef IPW2100_DEBUG_C3 + /* Halt the firmware so we can get a good image */ + write_register(priv->net_dev, IPW_REG_RESET_REG, + IPW_AUX_HOST_RESET_REG_STOP_MASTER); + j = 5; + do { + udelay(IPW_WAIT_RESET_MASTER_ASSERT_COMPLETE_DELAY); + read_register(priv->net_dev, IPW_REG_RESET_REG, ®); + + if (reg & IPW_AUX_HOST_RESET_REG_MASTER_DISABLED) + break; + } while (j--); + + match = ipw2100_match_buf(priv, (u8 *) status, + sizeof(struct ipw2100_status), + SEARCH_SNAPSHOT); + if (match < SEARCH_SUCCESS) + IPW_DEBUG_INFO("%s: DMA status match in Firmware at " + "offset 0x%06X, length %d:\n", + priv->net_dev->name, match, + sizeof(struct ipw2100_status)); + else + IPW_DEBUG_INFO("%s: No DMA status match in " + "Firmware.\n", priv->net_dev->name); + + printk_buf((u8 *) priv->status_queue.drv, + sizeof(struct ipw2100_status) * RX_QUEUE_LENGTH); +#endif + + priv->fatal_error = IPW2100_ERR_C3_CORRUPTION; + priv->net_dev->stats.rx_errors++; + schedule_reset(priv); +} + +static void isr_rx(struct ipw2100_priv *priv, int i, + struct libipw_rx_stats *stats) +{ + struct net_device *dev = priv->net_dev; + struct ipw2100_status *status = &priv->status_queue.drv[i]; + struct ipw2100_rx_packet *packet = &priv->rx_buffers[i]; + + IPW_DEBUG_RX("Handler...\n"); + + if (unlikely(status->frame_size > skb_tailroom(packet->skb))) { + IPW_DEBUG_INFO("%s: frame_size (%u) > skb_tailroom (%u)!" + " Dropping.\n", + dev->name, + status->frame_size, skb_tailroom(packet->skb)); + dev->stats.rx_errors++; + return; + } + + if (unlikely(!netif_running(dev))) { + dev->stats.rx_errors++; + priv->wstats.discard.misc++; + IPW_DEBUG_DROP("Dropping packet while interface is not up.\n"); + return; + } + + if (unlikely(priv->ieee->iw_mode != IW_MODE_MONITOR && + !(priv->status & STATUS_ASSOCIATED))) { + IPW_DEBUG_DROP("Dropping packet while not associated.\n"); + priv->wstats.discard.misc++; + return; + } + + pci_unmap_single(priv->pci_dev, + packet->dma_addr, + sizeof(struct ipw2100_rx), PCI_DMA_FROMDEVICE); + + skb_put(packet->skb, status->frame_size); + +#ifdef IPW2100_RX_DEBUG + /* Make a copy of the frame so we can dump it to the logs if + * libipw_rx fails */ + skb_copy_from_linear_data(packet->skb, packet_data, + min_t(u32, status->frame_size, + IPW_RX_NIC_BUFFER_LENGTH)); +#endif + + if (!libipw_rx(priv->ieee, packet->skb, stats)) { +#ifdef IPW2100_RX_DEBUG + IPW_DEBUG_DROP("%s: Non consumed packet:\n", + dev->name); + printk_buf(IPW_DL_DROP, packet_data, status->frame_size); +#endif + dev->stats.rx_errors++; + + /* libipw_rx failed, so it didn't free the SKB */ + dev_kfree_skb_any(packet->skb); + packet->skb = NULL; + } + + /* We need to allocate a new SKB and attach it to the RDB. */ + if (unlikely(ipw2100_alloc_skb(priv, packet))) { + printk(KERN_WARNING DRV_NAME ": " + "%s: Unable to allocate SKB onto RBD ring - disabling " + "adapter.\n", dev->name); + /* TODO: schedule adapter shutdown */ + IPW_DEBUG_INFO("TODO: Shutdown adapter...\n"); + } + + /* Update the RDB entry */ + priv->rx_queue.drv[i].host_addr = packet->dma_addr; +} + +#ifdef CONFIG_IPW2100_MONITOR + +static void isr_rx_monitor(struct ipw2100_priv *priv, int i, + struct libipw_rx_stats *stats) +{ + struct net_device *dev = priv->net_dev; + struct ipw2100_status *status = &priv->status_queue.drv[i]; + struct ipw2100_rx_packet *packet = &priv->rx_buffers[i]; + + /* Magic struct that slots into the radiotap header -- no reason + * to build this manually element by element, we can write it much + * more efficiently than we can parse it. ORDER MATTERS HERE */ + struct ipw_rt_hdr { + struct ieee80211_radiotap_header rt_hdr; + s8 rt_dbmsignal; /* signal in dbM, kluged to signed */ + } *ipw_rt; + + IPW_DEBUG_RX("Handler...\n"); + + if (unlikely(status->frame_size > skb_tailroom(packet->skb) - + sizeof(struct ipw_rt_hdr))) { + IPW_DEBUG_INFO("%s: frame_size (%u) > skb_tailroom (%u)!" + " Dropping.\n", + dev->name, + status->frame_size, + skb_tailroom(packet->skb)); + dev->stats.rx_errors++; + return; + } + + if (unlikely(!netif_running(dev))) { + dev->stats.rx_errors++; + priv->wstats.discard.misc++; + IPW_DEBUG_DROP("Dropping packet while interface is not up.\n"); + return; + } + + if (unlikely(priv->config & CFG_CRC_CHECK && + status->flags & IPW_STATUS_FLAG_CRC_ERROR)) { + IPW_DEBUG_RX("CRC error in packet. Dropping.\n"); + dev->stats.rx_errors++; + return; + } + + pci_unmap_single(priv->pci_dev, packet->dma_addr, + sizeof(struct ipw2100_rx), PCI_DMA_FROMDEVICE); + memmove(packet->skb->data + sizeof(struct ipw_rt_hdr), + packet->skb->data, status->frame_size); + + ipw_rt = (struct ipw_rt_hdr *) packet->skb->data; + + ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION; + ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */ + ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct ipw_rt_hdr)); /* total hdr+data */ + + ipw_rt->rt_hdr.it_present = cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL); + + ipw_rt->rt_dbmsignal = status->rssi + IPW2100_RSSI_TO_DBM; + + skb_put(packet->skb, status->frame_size + sizeof(struct ipw_rt_hdr)); + + if (!libipw_rx(priv->ieee, packet->skb, stats)) { + dev->stats.rx_errors++; + + /* libipw_rx failed, so it didn't free the SKB */ + dev_kfree_skb_any(packet->skb); + packet->skb = NULL; + } + + /* We need to allocate a new SKB and attach it to the RDB. */ + if (unlikely(ipw2100_alloc_skb(priv, packet))) { + IPW_DEBUG_WARNING( + "%s: Unable to allocate SKB onto RBD ring - disabling " + "adapter.\n", dev->name); + /* TODO: schedule adapter shutdown */ + IPW_DEBUG_INFO("TODO: Shutdown adapter...\n"); + } + + /* Update the RDB entry */ + priv->rx_queue.drv[i].host_addr = packet->dma_addr; +} + +#endif + +static int ipw2100_corruption_check(struct ipw2100_priv *priv, int i) +{ + struct ipw2100_status *status = &priv->status_queue.drv[i]; + struct ipw2100_rx *u = priv->rx_buffers[i].rxp; + u16 frame_type = status->status_fields & STATUS_TYPE_MASK; + + switch (frame_type) { + case COMMAND_STATUS_VAL: + return (status->frame_size != sizeof(u->rx_data.command)); + case STATUS_CHANGE_VAL: + return (status->frame_size != sizeof(u->rx_data.status)); + case HOST_NOTIFICATION_VAL: + return (status->frame_size < sizeof(u->rx_data.notification)); + case P80211_DATA_VAL: + case P8023_DATA_VAL: +#ifdef CONFIG_IPW2100_MONITOR + return 0; +#else + switch (WLAN_FC_GET_TYPE(le16_to_cpu(u->rx_data.header.frame_ctl))) { + case IEEE80211_FTYPE_MGMT: + case IEEE80211_FTYPE_CTL: + return 0; + case IEEE80211_FTYPE_DATA: + return (status->frame_size > + IPW_MAX_802_11_PAYLOAD_LENGTH); + } +#endif + } + + return 1; +} + +/* + * ipw2100 interrupts are disabled at this point, and the ISR + * is the only code that calls this method. So, we do not need + * to play with any locks. + * + * RX Queue works as follows: + * + * Read index - firmware places packet in entry identified by the + * Read index and advances Read index. In this manner, + * Read index will always point to the next packet to + * be filled--but not yet valid. + * + * Write index - driver fills this entry with an unused RBD entry. + * This entry has not filled by the firmware yet. + * + * In between the W and R indexes are the RBDs that have been received + * but not yet processed. + * + * The process of handling packets will start at WRITE + 1 and advance + * until it reaches the READ index. + * + * The WRITE index is cached in the variable 'priv->rx_queue.next'. + * + */ +static void __ipw2100_rx_process(struct ipw2100_priv *priv) +{ + struct ipw2100_bd_queue *rxq = &priv->rx_queue; + struct ipw2100_status_queue *sq = &priv->status_queue; + struct ipw2100_rx_packet *packet; + u16 frame_type; + u32 r, w, i, s; + struct ipw2100_rx *u; + struct libipw_rx_stats stats = { + .mac_time = jiffies, + }; + + read_register(priv->net_dev, IPW_MEM_HOST_SHARED_RX_READ_INDEX, &r); + read_register(priv->net_dev, IPW_MEM_HOST_SHARED_RX_WRITE_INDEX, &w); + + if (r >= rxq->entries) { + IPW_DEBUG_RX("exit - bad read index\n"); + return; + } + + i = (rxq->next + 1) % rxq->entries; + s = i; + while (i != r) { + /* IPW_DEBUG_RX("r = %d : w = %d : processing = %d\n", + r, rxq->next, i); */ + + packet = &priv->rx_buffers[i]; + + /* Sync the DMA for the STATUS buffer so CPU is sure to get + * the correct values */ + pci_dma_sync_single_for_cpu(priv->pci_dev, + sq->nic + + sizeof(struct ipw2100_status) * i, + sizeof(struct ipw2100_status), + PCI_DMA_FROMDEVICE); + + /* Sync the DMA for the RX buffer so CPU is sure to get + * the correct values */ + pci_dma_sync_single_for_cpu(priv->pci_dev, packet->dma_addr, + sizeof(struct ipw2100_rx), + PCI_DMA_FROMDEVICE); + + if (unlikely(ipw2100_corruption_check(priv, i))) { + ipw2100_corruption_detected(priv, i); + goto increment; + } + + u = packet->rxp; + frame_type = sq->drv[i].status_fields & STATUS_TYPE_MASK; + stats.rssi = sq->drv[i].rssi + IPW2100_RSSI_TO_DBM; + stats.len = sq->drv[i].frame_size; + + stats.mask = 0; + if (stats.rssi != 0) + stats.mask |= LIBIPW_STATMASK_RSSI; + stats.freq = LIBIPW_24GHZ_BAND; + + IPW_DEBUG_RX("%s: '%s' frame type received (%d).\n", + priv->net_dev->name, frame_types[frame_type], + stats.len); + + switch (frame_type) { + case COMMAND_STATUS_VAL: + /* Reset Rx watchdog */ + isr_rx_complete_command(priv, &u->rx_data.command); + break; + + case STATUS_CHANGE_VAL: + isr_status_change(priv, u->rx_data.status); + break; + + case P80211_DATA_VAL: + case P8023_DATA_VAL: +#ifdef CONFIG_IPW2100_MONITOR + if (priv->ieee->iw_mode == IW_MODE_MONITOR) { + isr_rx_monitor(priv, i, &stats); + break; + } +#endif + if (stats.len < sizeof(struct libipw_hdr_3addr)) + break; + switch (WLAN_FC_GET_TYPE(le16_to_cpu(u->rx_data.header.frame_ctl))) { + case IEEE80211_FTYPE_MGMT: + libipw_rx_mgt(priv->ieee, + &u->rx_data.header, &stats); + break; + + case IEEE80211_FTYPE_CTL: + break; + + case IEEE80211_FTYPE_DATA: + isr_rx(priv, i, &stats); + break; + + } + break; + } + + increment: + /* clear status field associated with this RBD */ + rxq->drv[i].status.info.field = 0; + + i = (i + 1) % rxq->entries; + } + + if (i != s) { + /* backtrack one entry, wrapping to end if at 0 */ + rxq->next = (i ? i : rxq->entries) - 1; + + write_register(priv->net_dev, + IPW_MEM_HOST_SHARED_RX_WRITE_INDEX, rxq->next); + } +} + +/* + * __ipw2100_tx_process + * + * This routine will determine whether the next packet on + * the fw_pend_list has been processed by the firmware yet. + * + * If not, then it does nothing and returns. + * + * If so, then it removes the item from the fw_pend_list, frees + * any associated storage, and places the item back on the + * free list of its source (either msg_free_list or tx_free_list) + * + * TX Queue works as follows: + * + * Read index - points to the next TBD that the firmware will + * process. The firmware will read the data, and once + * done processing, it will advance the Read index. + * + * Write index - driver fills this entry with an constructed TBD + * entry. The Write index is not advanced until the + * packet has been configured. + * + * In between the W and R indexes are the TBDs that have NOT been + * processed. Lagging behind the R index are packets that have + * been processed but have not been freed by the driver. + * + * In order to free old storage, an internal index will be maintained + * that points to the next packet to be freed. When all used + * packets have been freed, the oldest index will be the same as the + * firmware's read index. + * + * The OLDEST index is cached in the variable 'priv->tx_queue.oldest' + * + * Because the TBD structure can not contain arbitrary data, the + * driver must keep an internal queue of cached allocations such that + * it can put that data back into the tx_free_list and msg_free_list + * for use by future command and data packets. + * + */ +static int __ipw2100_tx_process(struct ipw2100_priv *priv) +{ + struct ipw2100_bd_queue *txq = &priv->tx_queue; + struct ipw2100_bd *tbd; + struct list_head *element; + struct ipw2100_tx_packet *packet; + int descriptors_used; + int e, i; + u32 r, w, frag_num = 0; + + if (list_empty(&priv->fw_pend_list)) + return 0; + + element = priv->fw_pend_list.next; + + packet = list_entry(element, struct ipw2100_tx_packet, list); + tbd = &txq->drv[packet->index]; + + /* Determine how many TBD entries must be finished... */ + switch (packet->type) { + case COMMAND: + /* COMMAND uses only one slot; don't advance */ + descriptors_used = 1; + e = txq->oldest; + break; + + case DATA: + /* DATA uses two slots; advance and loop position. */ + descriptors_used = tbd->num_fragments; + frag_num = tbd->num_fragments - 1; + e = txq->oldest + frag_num; + e %= txq->entries; + break; + + default: + printk(KERN_WARNING DRV_NAME ": %s: Bad fw_pend_list entry!\n", + priv->net_dev->name); + return 0; + } + + /* if the last TBD is not done by NIC yet, then packet is + * not ready to be released. + * + */ + read_register(priv->net_dev, IPW_MEM_HOST_SHARED_TX_QUEUE_READ_INDEX, + &r); + read_register(priv->net_dev, IPW_MEM_HOST_SHARED_TX_QUEUE_WRITE_INDEX, + &w); + if (w != txq->next) + printk(KERN_WARNING DRV_NAME ": %s: write index mismatch\n", + priv->net_dev->name); + + /* + * txq->next is the index of the last packet written txq->oldest is + * the index of the r is the index of the next packet to be read by + * firmware + */ + + /* + * Quick graphic to help you visualize the following + * if / else statement + * + * ===>| s---->|=============== + * e>| + * | a | b | c | d | e | f | g | h | i | j | k | l + * r---->| + * w + * + * w - updated by driver + * r - updated by firmware + * s - start of oldest BD entry (txq->oldest) + * e - end of oldest BD entry + * + */ + if (!((r <= w && (e < r || e >= w)) || (e < r && e >= w))) { + IPW_DEBUG_TX("exit - no processed packets ready to release.\n"); + return 0; + } + + list_del(element); + DEC_STAT(&priv->fw_pend_stat); + +#ifdef CONFIG_IPW2100_DEBUG + { + i = txq->oldest; + IPW_DEBUG_TX("TX%d V=%p P=%04X T=%04X L=%d\n", i, + &txq->drv[i], + (u32) (txq->nic + i * sizeof(struct ipw2100_bd)), + txq->drv[i].host_addr, txq->drv[i].buf_length); + + if (packet->type == DATA) { + i = (i + 1) % txq->entries; + + IPW_DEBUG_TX("TX%d V=%p P=%04X T=%04X L=%d\n", i, + &txq->drv[i], + (u32) (txq->nic + i * + sizeof(struct ipw2100_bd)), + (u32) txq->drv[i].host_addr, + txq->drv[i].buf_length); + } + } +#endif + + switch (packet->type) { + case DATA: + if (txq->drv[txq->oldest].status.info.fields.txType != 0) + printk(KERN_WARNING DRV_NAME ": %s: Queue mismatch. " + "Expecting DATA TBD but pulled " + "something else: ids %d=%d.\n", + priv->net_dev->name, txq->oldest, packet->index); + + /* DATA packet; we have to unmap and free the SKB */ + for (i = 0; i < frag_num; i++) { + tbd = &txq->drv[(packet->index + 1 + i) % txq->entries]; + + IPW_DEBUG_TX("TX%d P=%08x L=%d\n", + (packet->index + 1 + i) % txq->entries, + tbd->host_addr, tbd->buf_length); + + pci_unmap_single(priv->pci_dev, + tbd->host_addr, + tbd->buf_length, PCI_DMA_TODEVICE); + } + + libipw_txb_free(packet->info.d_struct.txb); + packet->info.d_struct.txb = NULL; + + list_add_tail(element, &priv->tx_free_list); + INC_STAT(&priv->tx_free_stat); + + /* We have a free slot in the Tx queue, so wake up the + * transmit layer if it is stopped. */ + if (priv->status & STATUS_ASSOCIATED) + netif_wake_queue(priv->net_dev); + + /* A packet was processed by the hardware, so update the + * watchdog */ + priv->net_dev->trans_start = jiffies; + + break; + + case COMMAND: + if (txq->drv[txq->oldest].status.info.fields.txType != 1) + printk(KERN_WARNING DRV_NAME ": %s: Queue mismatch. " + "Expecting COMMAND TBD but pulled " + "something else: ids %d=%d.\n", + priv->net_dev->name, txq->oldest, packet->index); + +#ifdef CONFIG_IPW2100_DEBUG + if (packet->info.c_struct.cmd->host_command_reg < + ARRAY_SIZE(command_types)) + IPW_DEBUG_TX("Command '%s (%d)' processed: %d.\n", + command_types[packet->info.c_struct.cmd-> + host_command_reg], + packet->info.c_struct.cmd-> + host_command_reg, + packet->info.c_struct.cmd->cmd_status_reg); +#endif + + list_add_tail(element, &priv->msg_free_list); + INC_STAT(&priv->msg_free_stat); + break; + } + + /* advance oldest used TBD pointer to start of next entry */ + txq->oldest = (e + 1) % txq->entries; + /* increase available TBDs number */ + txq->available += descriptors_used; + SET_STAT(&priv->txq_stat, txq->available); + + IPW_DEBUG_TX("packet latency (send to process) %ld jiffies\n", + jiffies - packet->jiffy_start); + + return (!list_empty(&priv->fw_pend_list)); +} + +static inline void __ipw2100_tx_complete(struct ipw2100_priv *priv) +{ + int i = 0; + + while (__ipw2100_tx_process(priv) && i < 200) + i++; + + if (i == 200) { + printk(KERN_WARNING DRV_NAME ": " + "%s: Driver is running slow (%d iters).\n", + priv->net_dev->name, i); + } +} + +static void ipw2100_tx_send_commands(struct ipw2100_priv *priv) +{ + struct list_head *element; + struct ipw2100_tx_packet *packet; + struct ipw2100_bd_queue *txq = &priv->tx_queue; + struct ipw2100_bd *tbd; + int next = txq->next; + + while (!list_empty(&priv->msg_pend_list)) { + /* if there isn't enough space in TBD queue, then + * don't stuff a new one in. + * NOTE: 3 are needed as a command will take one, + * and there is a minimum of 2 that must be + * maintained between the r and w indexes + */ + if (txq->available <= 3) { + IPW_DEBUG_TX("no room in tx_queue\n"); + break; + } + + element = priv->msg_pend_list.next; + list_del(element); + DEC_STAT(&priv->msg_pend_stat); + + packet = list_entry(element, struct ipw2100_tx_packet, list); + + IPW_DEBUG_TX("using TBD at virt=%p, phys=%p\n", + &txq->drv[txq->next], + (void *)(txq->nic + txq->next * + sizeof(struct ipw2100_bd))); + + packet->index = txq->next; + + tbd = &txq->drv[txq->next]; + + /* initialize TBD */ + tbd->host_addr = packet->info.c_struct.cmd_phys; + tbd->buf_length = sizeof(struct ipw2100_cmd_header); + /* not marking number of fragments causes problems + * with f/w debug version */ + tbd->num_fragments = 1; + tbd->status.info.field = + IPW_BD_STATUS_TX_FRAME_COMMAND | + IPW_BD_STATUS_TX_INTERRUPT_ENABLE; + + /* update TBD queue counters */ + txq->next++; + txq->next %= txq->entries; + txq->available--; + DEC_STAT(&priv->txq_stat); + + list_add_tail(element, &priv->fw_pend_list); + INC_STAT(&priv->fw_pend_stat); + } + + if (txq->next != next) { + /* kick off the DMA by notifying firmware the + * write index has moved; make sure TBD stores are sync'd */ + wmb(); + write_register(priv->net_dev, + IPW_MEM_HOST_SHARED_TX_QUEUE_WRITE_INDEX, + txq->next); + } +} + +/* + * ipw2100_tx_send_data + * + */ +static void ipw2100_tx_send_data(struct ipw2100_priv *priv) +{ + struct list_head *element; + struct ipw2100_tx_packet *packet; + struct ipw2100_bd_queue *txq = &priv->tx_queue; + struct ipw2100_bd *tbd; + int next = txq->next; + int i = 0; + struct ipw2100_data_header *ipw_hdr; + struct libipw_hdr_3addr *hdr; + + while (!list_empty(&priv->tx_pend_list)) { + /* if there isn't enough space in TBD queue, then + * don't stuff a new one in. + * NOTE: 4 are needed as a data will take two, + * and there is a minimum of 2 that must be + * maintained between the r and w indexes + */ + element = priv->tx_pend_list.next; + packet = list_entry(element, struct ipw2100_tx_packet, list); + + if (unlikely(1 + packet->info.d_struct.txb->nr_frags > + IPW_MAX_BDS)) { + /* TODO: Support merging buffers if more than + * IPW_MAX_BDS are used */ + IPW_DEBUG_INFO("%s: Maximum BD threshold exceeded. " + "Increase fragmentation level.\n", + priv->net_dev->name); + } + + if (txq->available <= 3 + packet->info.d_struct.txb->nr_frags) { + IPW_DEBUG_TX("no room in tx_queue\n"); + break; + } + + list_del(element); + DEC_STAT(&priv->tx_pend_stat); + + tbd = &txq->drv[txq->next]; + + packet->index = txq->next; + + ipw_hdr = packet->info.d_struct.data; + hdr = (struct libipw_hdr_3addr *)packet->info.d_struct.txb-> + fragments[0]->data; + + if (priv->ieee->iw_mode == IW_MODE_INFRA) { + /* To DS: Addr1 = BSSID, Addr2 = SA, + Addr3 = DA */ + memcpy(ipw_hdr->src_addr, hdr->addr2, ETH_ALEN); + memcpy(ipw_hdr->dst_addr, hdr->addr3, ETH_ALEN); + } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) { + /* not From/To DS: Addr1 = DA, Addr2 = SA, + Addr3 = BSSID */ + memcpy(ipw_hdr->src_addr, hdr->addr2, ETH_ALEN); + memcpy(ipw_hdr->dst_addr, hdr->addr1, ETH_ALEN); + } + + ipw_hdr->host_command_reg = SEND; + ipw_hdr->host_command_reg1 = 0; + + /* For now we only support host based encryption */ + ipw_hdr->needs_encryption = 0; + ipw_hdr->encrypted = packet->info.d_struct.txb->encrypted; + if (packet->info.d_struct.txb->nr_frags > 1) + ipw_hdr->fragment_size = + packet->info.d_struct.txb->frag_size - + LIBIPW_3ADDR_LEN; + else + ipw_hdr->fragment_size = 0; + + tbd->host_addr = packet->info.d_struct.data_phys; + tbd->buf_length = sizeof(struct ipw2100_data_header); + tbd->num_fragments = 1 + packet->info.d_struct.txb->nr_frags; + tbd->status.info.field = + IPW_BD_STATUS_TX_FRAME_802_3 | + IPW_BD_STATUS_TX_FRAME_NOT_LAST_FRAGMENT; + txq->next++; + txq->next %= txq->entries; + + IPW_DEBUG_TX("data header tbd TX%d P=%08x L=%d\n", + packet->index, tbd->host_addr, tbd->buf_length); +#ifdef CONFIG_IPW2100_DEBUG + if (packet->info.d_struct.txb->nr_frags > 1) + IPW_DEBUG_FRAG("fragment Tx: %d frames\n", + packet->info.d_struct.txb->nr_frags); +#endif + + for (i = 0; i < packet->info.d_struct.txb->nr_frags; i++) { + tbd = &txq->drv[txq->next]; + if (i == packet->info.d_struct.txb->nr_frags - 1) + tbd->status.info.field = + IPW_BD_STATUS_TX_FRAME_802_3 | + IPW_BD_STATUS_TX_INTERRUPT_ENABLE; + else + tbd->status.info.field = + IPW_BD_STATUS_TX_FRAME_802_3 | + IPW_BD_STATUS_TX_FRAME_NOT_LAST_FRAGMENT; + + tbd->buf_length = packet->info.d_struct.txb-> + fragments[i]->len - LIBIPW_3ADDR_LEN; + + tbd->host_addr = pci_map_single(priv->pci_dev, + packet->info.d_struct. + txb->fragments[i]-> + data + + LIBIPW_3ADDR_LEN, + tbd->buf_length, + PCI_DMA_TODEVICE); + + IPW_DEBUG_TX("data frag tbd TX%d P=%08x L=%d\n", + txq->next, tbd->host_addr, + tbd->buf_length); + + pci_dma_sync_single_for_device(priv->pci_dev, + tbd->host_addr, + tbd->buf_length, + PCI_DMA_TODEVICE); + + txq->next++; + txq->next %= txq->entries; + } + + txq->available -= 1 + packet->info.d_struct.txb->nr_frags; + SET_STAT(&priv->txq_stat, txq->available); + + list_add_tail(element, &priv->fw_pend_list); + INC_STAT(&priv->fw_pend_stat); + } + + if (txq->next != next) { + /* kick off the DMA by notifying firmware the + * write index has moved; make sure TBD stores are sync'd */ + write_register(priv->net_dev, + IPW_MEM_HOST_SHARED_TX_QUEUE_WRITE_INDEX, + txq->next); + } + return; +} + +static void ipw2100_irq_tasklet(struct ipw2100_priv *priv) +{ + struct net_device *dev = priv->net_dev; + unsigned long flags; + u32 inta, tmp; + + spin_lock_irqsave(&priv->low_lock, flags); + ipw2100_disable_interrupts(priv); + + read_register(dev, IPW_REG_INTA, &inta); + + IPW_DEBUG_ISR("enter - INTA: 0x%08lX\n", + (unsigned long)inta & IPW_INTERRUPT_MASK); + + priv->in_isr++; + priv->interrupts++; + + /* We do not loop and keep polling for more interrupts as this + * is frowned upon and doesn't play nicely with other potentially + * chained IRQs */ + IPW_DEBUG_ISR("INTA: 0x%08lX\n", + (unsigned long)inta & IPW_INTERRUPT_MASK); + + if (inta & IPW2100_INTA_FATAL_ERROR) { + printk(KERN_WARNING DRV_NAME + ": Fatal interrupt. Scheduling firmware restart.\n"); + priv->inta_other++; + write_register(dev, IPW_REG_INTA, IPW2100_INTA_FATAL_ERROR); + + read_nic_dword(dev, IPW_NIC_FATAL_ERROR, &priv->fatal_error); + IPW_DEBUG_INFO("%s: Fatal error value: 0x%08X\n", + priv->net_dev->name, priv->fatal_error); + + read_nic_dword(dev, IPW_ERROR_ADDR(priv->fatal_error), &tmp); + IPW_DEBUG_INFO("%s: Fatal error address value: 0x%08X\n", + priv->net_dev->name, tmp); + + /* Wake up any sleeping jobs */ + schedule_reset(priv); + } + + if (inta & IPW2100_INTA_PARITY_ERROR) { + printk(KERN_ERR DRV_NAME + ": ***** PARITY ERROR INTERRUPT !!!! \n"); + priv->inta_other++; + write_register(dev, IPW_REG_INTA, IPW2100_INTA_PARITY_ERROR); + } + + if (inta & IPW2100_INTA_RX_TRANSFER) { + IPW_DEBUG_ISR("RX interrupt\n"); + + priv->rx_interrupts++; + + write_register(dev, IPW_REG_INTA, IPW2100_INTA_RX_TRANSFER); + + __ipw2100_rx_process(priv); + __ipw2100_tx_complete(priv); + } + + if (inta & IPW2100_INTA_TX_TRANSFER) { + IPW_DEBUG_ISR("TX interrupt\n"); + + priv->tx_interrupts++; + + write_register(dev, IPW_REG_INTA, IPW2100_INTA_TX_TRANSFER); + + __ipw2100_tx_complete(priv); + ipw2100_tx_send_commands(priv); + ipw2100_tx_send_data(priv); + } + + if (inta & IPW2100_INTA_TX_COMPLETE) { + IPW_DEBUG_ISR("TX complete\n"); + priv->inta_other++; + write_register(dev, IPW_REG_INTA, IPW2100_INTA_TX_COMPLETE); + + __ipw2100_tx_complete(priv); + } + + if (inta & IPW2100_INTA_EVENT_INTERRUPT) { + /* ipw2100_handle_event(dev); */ + priv->inta_other++; + write_register(dev, IPW_REG_INTA, IPW2100_INTA_EVENT_INTERRUPT); + } + + if (inta & IPW2100_INTA_FW_INIT_DONE) { + IPW_DEBUG_ISR("FW init done interrupt\n"); + priv->inta_other++; + + read_register(dev, IPW_REG_INTA, &tmp); + if (tmp & (IPW2100_INTA_FATAL_ERROR | + IPW2100_INTA_PARITY_ERROR)) { + write_register(dev, IPW_REG_INTA, + IPW2100_INTA_FATAL_ERROR | + IPW2100_INTA_PARITY_ERROR); + } + + write_register(dev, IPW_REG_INTA, IPW2100_INTA_FW_INIT_DONE); + } + + if (inta & IPW2100_INTA_STATUS_CHANGE) { + IPW_DEBUG_ISR("Status change interrupt\n"); + priv->inta_other++; + write_register(dev, IPW_REG_INTA, IPW2100_INTA_STATUS_CHANGE); + } + + if (inta & IPW2100_INTA_SLAVE_MODE_HOST_COMMAND_DONE) { + IPW_DEBUG_ISR("slave host mode interrupt\n"); + priv->inta_other++; + write_register(dev, IPW_REG_INTA, + IPW2100_INTA_SLAVE_MODE_HOST_COMMAND_DONE); + } + + priv->in_isr--; + ipw2100_enable_interrupts(priv); + + spin_unlock_irqrestore(&priv->low_lock, flags); + + IPW_DEBUG_ISR("exit\n"); +} + +static irqreturn_t ipw2100_interrupt(int irq, void *data) +{ + struct ipw2100_priv *priv = data; + u32 inta, inta_mask; + + if (!data) + return IRQ_NONE; + + spin_lock(&priv->low_lock); + + /* We check to see if we should be ignoring interrupts before + * we touch the hardware. During ucode load if we try and handle + * an interrupt we can cause keyboard problems as well as cause + * the ucode to fail to initialize */ + if (!(priv->status & STATUS_INT_ENABLED)) { + /* Shared IRQ */ + goto none; + } + + read_register(priv->net_dev, IPW_REG_INTA_MASK, &inta_mask); + read_register(priv->net_dev, IPW_REG_INTA, &inta); + + if (inta == 0xFFFFFFFF) { + /* Hardware disappeared */ + printk(KERN_WARNING DRV_NAME ": IRQ INTA == 0xFFFFFFFF\n"); + goto none; + } + + inta &= IPW_INTERRUPT_MASK; + + if (!(inta & inta_mask)) { + /* Shared interrupt */ + goto none; + } + + /* We disable the hardware interrupt here just to prevent unneeded + * calls to be made. We disable this again within the actual + * work tasklet, so if another part of the code re-enables the + * interrupt, that is fine */ + ipw2100_disable_interrupts(priv); + + tasklet_schedule(&priv->irq_tasklet); + spin_unlock(&priv->low_lock); + + return IRQ_HANDLED; + none: + spin_unlock(&priv->low_lock); + return IRQ_NONE; +} + +static netdev_tx_t ipw2100_tx(struct libipw_txb *txb, + struct net_device *dev, int pri) +{ + struct ipw2100_priv *priv = libipw_priv(dev); + struct list_head *element; + struct ipw2100_tx_packet *packet; + unsigned long flags; + + spin_lock_irqsave(&priv->low_lock, flags); + + if (!(priv->status & STATUS_ASSOCIATED)) { + IPW_DEBUG_INFO("Can not transmit when not connected.\n"); + priv->net_dev->stats.tx_carrier_errors++; + netif_stop_queue(dev); + goto fail_unlock; + } + + if (list_empty(&priv->tx_free_list)) + goto fail_unlock; + + element = priv->tx_free_list.next; + packet = list_entry(element, struct ipw2100_tx_packet, list); + + packet->info.d_struct.txb = txb; + + IPW_DEBUG_TX("Sending fragment (%d bytes):\n", txb->fragments[0]->len); + printk_buf(IPW_DL_TX, txb->fragments[0]->data, txb->fragments[0]->len); + + packet->jiffy_start = jiffies; + + list_del(element); + DEC_STAT(&priv->tx_free_stat); + + list_add_tail(element, &priv->tx_pend_list); + INC_STAT(&priv->tx_pend_stat); + + ipw2100_tx_send_data(priv); + + spin_unlock_irqrestore(&priv->low_lock, flags); + return NETDEV_TX_OK; + +fail_unlock: + netif_stop_queue(dev); + spin_unlock_irqrestore(&priv->low_lock, flags); + return NETDEV_TX_BUSY; +} + +static int ipw2100_msg_allocate(struct ipw2100_priv *priv) +{ + int i, j, err = -EINVAL; + void *v; + dma_addr_t p; + + priv->msg_buffers = + (struct ipw2100_tx_packet *)kmalloc(IPW_COMMAND_POOL_SIZE * + sizeof(struct + ipw2100_tx_packet), + GFP_KERNEL); + if (!priv->msg_buffers) { + printk(KERN_ERR DRV_NAME ": %s: PCI alloc failed for msg " + "buffers.\n", priv->net_dev->name); + return -ENOMEM; + } + + for (i = 0; i < IPW_COMMAND_POOL_SIZE; i++) { + v = pci_alloc_consistent(priv->pci_dev, + sizeof(struct ipw2100_cmd_header), &p); + if (!v) { + printk(KERN_ERR DRV_NAME ": " + "%s: PCI alloc failed for msg " + "buffers.\n", priv->net_dev->name); + err = -ENOMEM; + break; + } + + memset(v, 0, sizeof(struct ipw2100_cmd_header)); + + priv->msg_buffers[i].type = COMMAND; + priv->msg_buffers[i].info.c_struct.cmd = + (struct ipw2100_cmd_header *)v; + priv->msg_buffers[i].info.c_struct.cmd_phys = p; + } + + if (i == IPW_COMMAND_POOL_SIZE) + return 0; + + for (j = 0; j < i; j++) { + pci_free_consistent(priv->pci_dev, + sizeof(struct ipw2100_cmd_header), + priv->msg_buffers[j].info.c_struct.cmd, + priv->msg_buffers[j].info.c_struct. + cmd_phys); + } + + kfree(priv->msg_buffers); + priv->msg_buffers = NULL; + + return err; +} + +static int ipw2100_msg_initialize(struct ipw2100_priv *priv) +{ + int i; + + INIT_LIST_HEAD(&priv->msg_free_list); + INIT_LIST_HEAD(&priv->msg_pend_list); + + for (i = 0; i < IPW_COMMAND_POOL_SIZE; i++) + list_add_tail(&priv->msg_buffers[i].list, &priv->msg_free_list); + SET_STAT(&priv->msg_free_stat, i); + + return 0; +} + +static void ipw2100_msg_free(struct ipw2100_priv *priv) +{ + int i; + + if (!priv->msg_buffers) + return; + + for (i = 0; i < IPW_COMMAND_POOL_SIZE; i++) { + pci_free_consistent(priv->pci_dev, + sizeof(struct ipw2100_cmd_header), + priv->msg_buffers[i].info.c_struct.cmd, + priv->msg_buffers[i].info.c_struct. + cmd_phys); + } + + kfree(priv->msg_buffers); + priv->msg_buffers = NULL; +} + +static ssize_t show_pci(struct device *d, struct device_attribute *attr, + char *buf) +{ + struct pci_dev *pci_dev = container_of(d, struct pci_dev, dev); + char *out = buf; + int i, j; + u32 val; + + for (i = 0; i < 16; i++) { + out += sprintf(out, "[%08X] ", i * 16); + for (j = 0; j < 16; j += 4) { + pci_read_config_dword(pci_dev, i * 16 + j, &val); + out += sprintf(out, "%08X ", val); + } + out += sprintf(out, "\n"); + } + + return out - buf; +} + +static DEVICE_ATTR(pci, S_IRUGO, show_pci, NULL); + +static ssize_t show_cfg(struct device *d, struct device_attribute *attr, + char *buf) +{ + struct ipw2100_priv *p = dev_get_drvdata(d); + return sprintf(buf, "0x%08x\n", (int)p->config); +} + +static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL); + +static ssize_t show_status(struct device *d, struct device_attribute *attr, + char *buf) +{ + struct ipw2100_priv *p = dev_get_drvdata(d); + return sprintf(buf, "0x%08x\n", (int)p->status); +} + +static DEVICE_ATTR(status, S_IRUGO, show_status, NULL); + +static ssize_t show_capability(struct device *d, struct device_attribute *attr, + char *buf) +{ + struct ipw2100_priv *p = dev_get_drvdata(d); + return sprintf(buf, "0x%08x\n", (int)p->capability); +} + +static DEVICE_ATTR(capability, S_IRUGO, show_capability, NULL); + +#define IPW2100_REG(x) { IPW_ ##x, #x } +static const struct { + u32 addr; + const char *name; +} hw_data[] = { +IPW2100_REG(REG_GP_CNTRL), + IPW2100_REG(REG_GPIO), + IPW2100_REG(REG_INTA), + IPW2100_REG(REG_INTA_MASK), IPW2100_REG(REG_RESET_REG),}; +#define IPW2100_NIC(x, s) { x, #x, s } +static const struct { + u32 addr; + const char *name; + size_t size; +} nic_data[] = { +IPW2100_NIC(IPW2100_CONTROL_REG, 2), + IPW2100_NIC(0x210014, 1), IPW2100_NIC(0x210000, 1),}; +#define IPW2100_ORD(x, d) { IPW_ORD_ ##x, #x, d } +static const struct { + u8 index; + const char *name; + const char *desc; +} ord_data[] = { +IPW2100_ORD(STAT_TX_HOST_REQUESTS, "requested Host Tx's (MSDU)"), + IPW2100_ORD(STAT_TX_HOST_COMPLETE, + "successful Host Tx's (MSDU)"), + IPW2100_ORD(STAT_TX_DIR_DATA, + "successful Directed Tx's (MSDU)"), + IPW2100_ORD(STAT_TX_DIR_DATA1, + "successful Directed Tx's (MSDU) @ 1MB"), + IPW2100_ORD(STAT_TX_DIR_DATA2, + "successful Directed Tx's (MSDU) @ 2MB"), + IPW2100_ORD(STAT_TX_DIR_DATA5_5, + "successful Directed Tx's (MSDU) @ 5_5MB"), + IPW2100_ORD(STAT_TX_DIR_DATA11, + "successful Directed Tx's (MSDU) @ 11MB"), + IPW2100_ORD(STAT_TX_NODIR_DATA1, + "successful Non_Directed Tx's (MSDU) @ 1MB"), + IPW2100_ORD(STAT_TX_NODIR_DATA2, + "successful Non_Directed Tx's (MSDU) @ 2MB"), + IPW2100_ORD(STAT_TX_NODIR_DATA5_5, + "successful Non_Directed Tx's (MSDU) @ 5.5MB"), + IPW2100_ORD(STAT_TX_NODIR_DATA11, + "successful Non_Directed Tx's (MSDU) @ 11MB"), + IPW2100_ORD(STAT_NULL_DATA, "successful NULL data Tx's"), + IPW2100_ORD(STAT_TX_RTS, "successful Tx RTS"), + IPW2100_ORD(STAT_TX_CTS, "successful Tx CTS"), + IPW2100_ORD(STAT_TX_ACK, "successful Tx ACK"), + IPW2100_ORD(STAT_TX_ASSN, "successful Association Tx's"), + IPW2100_ORD(STAT_TX_ASSN_RESP, + "successful Association response Tx's"), + IPW2100_ORD(STAT_TX_REASSN, + "successful Reassociation Tx's"), + IPW2100_ORD(STAT_TX_REASSN_RESP, + "successful Reassociation response Tx's"), + IPW2100_ORD(STAT_TX_PROBE, + "probes successfully transmitted"), + IPW2100_ORD(STAT_TX_PROBE_RESP, + "probe responses successfully transmitted"), + IPW2100_ORD(STAT_TX_BEACON, "tx beacon"), + IPW2100_ORD(STAT_TX_ATIM, "Tx ATIM"), + IPW2100_ORD(STAT_TX_DISASSN, + "successful Disassociation TX"), + IPW2100_ORD(STAT_TX_AUTH, "successful Authentication Tx"), + IPW2100_ORD(STAT_TX_DEAUTH, + "successful Deauthentication TX"), + IPW2100_ORD(STAT_TX_TOTAL_BYTES, + "Total successful Tx data bytes"), + IPW2100_ORD(STAT_TX_RETRIES, "Tx retries"), + IPW2100_ORD(STAT_TX_RETRY1, "Tx retries at 1MBPS"), + IPW2100_ORD(STAT_TX_RETRY2, "Tx retries at 2MBPS"), + IPW2100_ORD(STAT_TX_RETRY5_5, "Tx retries at 5.5MBPS"), + IPW2100_ORD(STAT_TX_RETRY11, "Tx retries at 11MBPS"), + IPW2100_ORD(STAT_TX_FAILURES, "Tx Failures"), + IPW2100_ORD(STAT_TX_MAX_TRIES_IN_HOP, + "times max tries in a hop failed"), + IPW2100_ORD(STAT_TX_DISASSN_FAIL, + "times disassociation failed"), + IPW2100_ORD(STAT_TX_ERR_CTS, "missed/bad CTS frames"), + IPW2100_ORD(STAT_TX_ERR_ACK, "tx err due to acks"), + IPW2100_ORD(STAT_RX_HOST, "packets passed to host"), + IPW2100_ORD(STAT_RX_DIR_DATA, "directed packets"), + IPW2100_ORD(STAT_RX_DIR_DATA1, "directed packets at 1MB"), + IPW2100_ORD(STAT_RX_DIR_DATA2, "directed packets at 2MB"), + IPW2100_ORD(STAT_RX_DIR_DATA5_5, + "directed packets at 5.5MB"), + IPW2100_ORD(STAT_RX_DIR_DATA11, "directed packets at 11MB"), + IPW2100_ORD(STAT_RX_NODIR_DATA, "nondirected packets"), + IPW2100_ORD(STAT_RX_NODIR_DATA1, + "nondirected packets at 1MB"), + IPW2100_ORD(STAT_RX_NODIR_DATA2, + "nondirected packets at 2MB"), + IPW2100_ORD(STAT_RX_NODIR_DATA5_5, + "nondirected packets at 5.5MB"), + IPW2100_ORD(STAT_RX_NODIR_DATA11, + "nondirected packets at 11MB"), + IPW2100_ORD(STAT_RX_NULL_DATA, "null data rx's"), + IPW2100_ORD(STAT_RX_RTS, "Rx RTS"), IPW2100_ORD(STAT_RX_CTS, + "Rx CTS"), + IPW2100_ORD(STAT_RX_ACK, "Rx ACK"), + IPW2100_ORD(STAT_RX_CFEND, "Rx CF End"), + IPW2100_ORD(STAT_RX_CFEND_ACK, "Rx CF End + CF Ack"), + IPW2100_ORD(STAT_RX_ASSN, "Association Rx's"), + IPW2100_ORD(STAT_RX_ASSN_RESP, "Association response Rx's"), + IPW2100_ORD(STAT_RX_REASSN, "Reassociation Rx's"), + IPW2100_ORD(STAT_RX_REASSN_RESP, + "Reassociation response Rx's"), + IPW2100_ORD(STAT_RX_PROBE, "probe Rx's"), + IPW2100_ORD(STAT_RX_PROBE_RESP, "probe response Rx's"), + IPW2100_ORD(STAT_RX_BEACON, "Rx beacon"), + IPW2100_ORD(STAT_RX_ATIM, "Rx ATIM"), + IPW2100_ORD(STAT_RX_DISASSN, "disassociation Rx"), + IPW2100_ORD(STAT_RX_AUTH, "authentication Rx"), + IPW2100_ORD(STAT_RX_DEAUTH, "deauthentication Rx"), + IPW2100_ORD(STAT_RX_TOTAL_BYTES, + "Total rx data bytes received"), + IPW2100_ORD(STAT_RX_ERR_CRC, "packets with Rx CRC error"), + IPW2100_ORD(STAT_RX_ERR_CRC1, "Rx CRC errors at 1MB"), + IPW2100_ORD(STAT_RX_ERR_CRC2, "Rx CRC errors at 2MB"), + IPW2100_ORD(STAT_RX_ERR_CRC5_5, "Rx CRC errors at 5.5MB"), + IPW2100_ORD(STAT_RX_ERR_CRC11, "Rx CRC errors at 11MB"), + IPW2100_ORD(STAT_RX_DUPLICATE1, + "duplicate rx packets at 1MB"), + IPW2100_ORD(STAT_RX_DUPLICATE2, + "duplicate rx packets at 2MB"), + IPW2100_ORD(STAT_RX_DUPLICATE5_5, + "duplicate rx packets at 5.5MB"), + IPW2100_ORD(STAT_RX_DUPLICATE11, + "duplicate rx packets at 11MB"), + IPW2100_ORD(STAT_RX_DUPLICATE, "duplicate rx packets"), + IPW2100_ORD(PERS_DB_LOCK, "locking fw permanent db"), + IPW2100_ORD(PERS_DB_SIZE, "size of fw permanent db"), + IPW2100_ORD(PERS_DB_ADDR, "address of fw permanent db"), + IPW2100_ORD(STAT_RX_INVALID_PROTOCOL, + "rx frames with invalid protocol"), + IPW2100_ORD(SYS_BOOT_TIME, "Boot time"), + IPW2100_ORD(STAT_RX_NO_BUFFER, + "rx frames rejected due to no buffer"), + IPW2100_ORD(STAT_RX_MISSING_FRAG, + "rx frames dropped due to missing fragment"), + IPW2100_ORD(STAT_RX_ORPHAN_FRAG, + "rx frames dropped due to non-sequential fragment"), + IPW2100_ORD(STAT_RX_ORPHAN_FRAME, + "rx frames dropped due to unmatched 1st frame"), + IPW2100_ORD(STAT_RX_FRAG_AGEOUT, + "rx frames dropped due to uncompleted frame"), + IPW2100_ORD(STAT_RX_ICV_ERRORS, + "ICV errors during decryption"), + IPW2100_ORD(STAT_PSP_SUSPENSION, "times adapter suspended"), + IPW2100_ORD(STAT_PSP_BCN_TIMEOUT, "beacon timeout"), + IPW2100_ORD(STAT_PSP_POLL_TIMEOUT, + "poll response timeouts"), + IPW2100_ORD(STAT_PSP_NONDIR_TIMEOUT, + "timeouts waiting for last {broad,multi}cast pkt"), + IPW2100_ORD(STAT_PSP_RX_DTIMS, "PSP DTIMs received"), + IPW2100_ORD(STAT_PSP_RX_TIMS, "PSP TIMs received"), + IPW2100_ORD(STAT_PSP_STATION_ID, "PSP Station ID"), + IPW2100_ORD(LAST_ASSN_TIME, "RTC time of last association"), + IPW2100_ORD(STAT_PERCENT_MISSED_BCNS, + "current calculation of % missed beacons"), + IPW2100_ORD(STAT_PERCENT_RETRIES, + "current calculation of % missed tx retries"), + IPW2100_ORD(ASSOCIATED_AP_PTR, + "0 if not associated, else pointer to AP table entry"), + IPW2100_ORD(AVAILABLE_AP_CNT, + "AP's decsribed in the AP table"), + IPW2100_ORD(AP_LIST_PTR, "Ptr to list of available APs"), + IPW2100_ORD(STAT_AP_ASSNS, "associations"), + IPW2100_ORD(STAT_ASSN_FAIL, "association failures"), + IPW2100_ORD(STAT_ASSN_RESP_FAIL, + "failures due to response fail"), + IPW2100_ORD(STAT_FULL_SCANS, "full scans"), + IPW2100_ORD(CARD_DISABLED, "Card Disabled"), + IPW2100_ORD(STAT_ROAM_INHIBIT, + "times roaming was inhibited due to activity"), + IPW2100_ORD(RSSI_AT_ASSN, + "RSSI of associated AP at time of association"), + IPW2100_ORD(STAT_ASSN_CAUSE1, + "reassociation: no probe response or TX on hop"), + IPW2100_ORD(STAT_ASSN_CAUSE2, + "reassociation: poor tx/rx quality"), + IPW2100_ORD(STAT_ASSN_CAUSE3, + "reassociation: tx/rx quality (excessive AP load"), + IPW2100_ORD(STAT_ASSN_CAUSE4, + "reassociation: AP RSSI level"), + IPW2100_ORD(STAT_ASSN_CAUSE5, + "reassociations due to load leveling"), + IPW2100_ORD(STAT_AUTH_FAIL, "times authentication failed"), + IPW2100_ORD(STAT_AUTH_RESP_FAIL, + "times authentication response failed"), + IPW2100_ORD(STATION_TABLE_CNT, + "entries in association table"), + IPW2100_ORD(RSSI_AVG_CURR, "Current avg RSSI"), + IPW2100_ORD(POWER_MGMT_MODE, "Power mode - 0=CAM, 1=PSP"), + IPW2100_ORD(COUNTRY_CODE, + "IEEE country code as recv'd from beacon"), + IPW2100_ORD(COUNTRY_CHANNELS, + "channels suported by country"), + IPW2100_ORD(RESET_CNT, "adapter resets (warm)"), + IPW2100_ORD(BEACON_INTERVAL, "Beacon interval"), + IPW2100_ORD(ANTENNA_DIVERSITY, + "TRUE if antenna diversity is disabled"), + IPW2100_ORD(DTIM_PERIOD, "beacon intervals between DTIMs"), + IPW2100_ORD(OUR_FREQ, + "current radio freq lower digits - channel ID"), + IPW2100_ORD(RTC_TIME, "current RTC time"), + IPW2100_ORD(PORT_TYPE, "operating mode"), + IPW2100_ORD(CURRENT_TX_RATE, "current tx rate"), + IPW2100_ORD(SUPPORTED_RATES, "supported tx rates"), + IPW2100_ORD(ATIM_WINDOW, "current ATIM Window"), + IPW2100_ORD(BASIC_RATES, "basic tx rates"), + IPW2100_ORD(NIC_HIGHEST_RATE, "NIC highest tx rate"), + IPW2100_ORD(AP_HIGHEST_RATE, "AP highest tx rate"), + IPW2100_ORD(CAPABILITIES, + "Management frame capability field"), + IPW2100_ORD(AUTH_TYPE, "Type of authentication"), + IPW2100_ORD(RADIO_TYPE, "Adapter card platform type"), + IPW2100_ORD(RTS_THRESHOLD, + "Min packet length for RTS handshaking"), + IPW2100_ORD(INT_MODE, "International mode"), + IPW2100_ORD(FRAGMENTATION_THRESHOLD, + "protocol frag threshold"), + IPW2100_ORD(EEPROM_SRAM_DB_BLOCK_START_ADDRESS, + "EEPROM offset in SRAM"), + IPW2100_ORD(EEPROM_SRAM_DB_BLOCK_SIZE, + "EEPROM size in SRAM"), + IPW2100_ORD(EEPROM_SKU_CAPABILITY, "EEPROM SKU Capability"), + IPW2100_ORD(EEPROM_IBSS_11B_CHANNELS, + "EEPROM IBSS 11b channel set"), + IPW2100_ORD(MAC_VERSION, "MAC Version"), + IPW2100_ORD(MAC_REVISION, "MAC Revision"), + IPW2100_ORD(RADIO_VERSION, "Radio Version"), + IPW2100_ORD(NIC_MANF_DATE_TIME, "MANF Date/Time STAMP"), + IPW2100_ORD(UCODE_VERSION, "Ucode Version"),}; + +static ssize_t show_registers(struct device *d, struct device_attribute *attr, + char *buf) +{ + int i; + struct ipw2100_priv *priv = dev_get_drvdata(d); + struct net_device *dev = priv->net_dev; + char *out = buf; + u32 val = 0; + + out += sprintf(out, "%30s [Address ] : Hex\n", "Register"); + + for (i = 0; i < ARRAY_SIZE(hw_data); i++) { + read_register(dev, hw_data[i].addr, &val); + out += sprintf(out, "%30s [%08X] : %08X\n", + hw_data[i].name, hw_data[i].addr, val); + } + + return out - buf; +} + +static DEVICE_ATTR(registers, S_IRUGO, show_registers, NULL); + +static ssize_t show_hardware(struct device *d, struct device_attribute *attr, + char *buf) +{ + struct ipw2100_priv *priv = dev_get_drvdata(d); + struct net_device *dev = priv->net_dev; + char *out = buf; + int i; + + out += sprintf(out, "%30s [Address ] : Hex\n", "NIC entry"); + + for (i = 0; i < ARRAY_SIZE(nic_data); i++) { + u8 tmp8; + u16 tmp16; + u32 tmp32; + + switch (nic_data[i].size) { + case 1: + read_nic_byte(dev, nic_data[i].addr, &tmp8); + out += sprintf(out, "%30s [%08X] : %02X\n", + nic_data[i].name, nic_data[i].addr, + tmp8); + break; + case 2: + read_nic_word(dev, nic_data[i].addr, &tmp16); + out += sprintf(out, "%30s [%08X] : %04X\n", + nic_data[i].name, nic_data[i].addr, + tmp16); + break; + case 4: + read_nic_dword(dev, nic_data[i].addr, &tmp32); + out += sprintf(out, "%30s [%08X] : %08X\n", + nic_data[i].name, nic_data[i].addr, + tmp32); + break; + } + } + return out - buf; +} + +static DEVICE_ATTR(hardware, S_IRUGO, show_hardware, NULL); + +static ssize_t show_memory(struct device *d, struct device_attribute *attr, + char *buf) +{ + struct ipw2100_priv *priv = dev_get_drvdata(d); + struct net_device *dev = priv->net_dev; + static unsigned long loop = 0; + int len = 0; + u32 buffer[4]; + int i; + char line[81]; + + if (loop >= 0x30000) + loop = 0; + + /* sysfs provides us PAGE_SIZE buffer */ + while (len < PAGE_SIZE - 128 && loop < 0x30000) { + + if (priv->snapshot[0]) + for (i = 0; i < 4; i++) + buffer[i] = + *(u32 *) SNAPSHOT_ADDR(loop + i * 4); + else + for (i = 0; i < 4; i++) + read_nic_dword(dev, loop + i * 4, &buffer[i]); + + if (priv->dump_raw) + len += sprintf(buf + len, + "%c%c%c%c" + "%c%c%c%c" + "%c%c%c%c" + "%c%c%c%c", + ((u8 *) buffer)[0x0], + ((u8 *) buffer)[0x1], + ((u8 *) buffer)[0x2], + ((u8 *) buffer)[0x3], + ((u8 *) buffer)[0x4], + ((u8 *) buffer)[0x5], + ((u8 *) buffer)[0x6], + ((u8 *) buffer)[0x7], + ((u8 *) buffer)[0x8], + ((u8 *) buffer)[0x9], + ((u8 *) buffer)[0xa], + ((u8 *) buffer)[0xb], + ((u8 *) buffer)[0xc], + ((u8 *) buffer)[0xd], + ((u8 *) buffer)[0xe], + ((u8 *) buffer)[0xf]); + else + len += sprintf(buf + len, "%s\n", + snprint_line(line, sizeof(line), + (u8 *) buffer, 16, loop)); + loop += 16; + } + + return len; +} + +static ssize_t store_memory(struct device *d, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ipw2100_priv *priv = dev_get_drvdata(d); + struct net_device *dev = priv->net_dev; + const char *p = buf; + + (void)dev; /* kill unused-var warning for debug-only code */ + + if (count < 1) + return count; + + if (p[0] == '1' || + (count >= 2 && tolower(p[0]) == 'o' && tolower(p[1]) == 'n')) { + IPW_DEBUG_INFO("%s: Setting memory dump to RAW mode.\n", + dev->name); + priv->dump_raw = 1; + + } else if (p[0] == '0' || (count >= 2 && tolower(p[0]) == 'o' && + tolower(p[1]) == 'f')) { + IPW_DEBUG_INFO("%s: Setting memory dump to HEX mode.\n", + dev->name); + priv->dump_raw = 0; + + } else if (tolower(p[0]) == 'r') { + IPW_DEBUG_INFO("%s: Resetting firmware snapshot.\n", dev->name); + ipw2100_snapshot_free(priv); + + } else + IPW_DEBUG_INFO("%s: Usage: 0|on = HEX, 1|off = RAW, " + "reset = clear memory snapshot\n", dev->name); + + return count; +} + +static DEVICE_ATTR(memory, S_IWUSR | S_IRUGO, show_memory, store_memory); + +static ssize_t show_ordinals(struct device *d, struct device_attribute *attr, + char *buf) +{ + struct ipw2100_priv *priv = dev_get_drvdata(d); + u32 val = 0; + int len = 0; + u32 val_len; + static int loop = 0; + + if (priv->status & STATUS_RF_KILL_MASK) + return 0; + + if (loop >= ARRAY_SIZE(ord_data)) + loop = 0; + + /* sysfs provides us PAGE_SIZE buffer */ + while (len < PAGE_SIZE - 128 && loop < ARRAY_SIZE(ord_data)) { + val_len = sizeof(u32); + + if (ipw2100_get_ordinal(priv, ord_data[loop].index, &val, + &val_len)) + len += sprintf(buf + len, "[0x%02X] = ERROR %s\n", + ord_data[loop].index, + ord_data[loop].desc); + else + len += sprintf(buf + len, "[0x%02X] = 0x%08X %s\n", + ord_data[loop].index, val, + ord_data[loop].desc); + loop++; + } + + return len; +} + +static DEVICE_ATTR(ordinals, S_IRUGO, show_ordinals, NULL); + +static ssize_t show_stats(struct device *d, struct device_attribute *attr, + char *buf) +{ + struct ipw2100_priv *priv = dev_get_drvdata(d); + char *out = buf; + + out += sprintf(out, "interrupts: %d {tx: %d, rx: %d, other: %d}\n", + priv->interrupts, priv->tx_interrupts, + priv->rx_interrupts, priv->inta_other); + out += sprintf(out, "firmware resets: %d\n", priv->resets); + out += sprintf(out, "firmware hangs: %d\n", priv->hangs); +#ifdef CONFIG_IPW2100_DEBUG + out += sprintf(out, "packet mismatch image: %s\n", + priv->snapshot[0] ? "YES" : "NO"); +#endif + + return out - buf; +} + +static DEVICE_ATTR(stats, S_IRUGO, show_stats, NULL); + +static int ipw2100_switch_mode(struct ipw2100_priv *priv, u32 mode) +{ + int err; + + if (mode == priv->ieee->iw_mode) + return 0; + + err = ipw2100_disable_adapter(priv); + if (err) { + printk(KERN_ERR DRV_NAME ": %s: Could not disable adapter %d\n", + priv->net_dev->name, err); + return err; + } + + switch (mode) { + case IW_MODE_INFRA: + priv->net_dev->type = ARPHRD_ETHER; + break; + case IW_MODE_ADHOC: + priv->net_dev->type = ARPHRD_ETHER; + break; +#ifdef CONFIG_IPW2100_MONITOR + case IW_MODE_MONITOR: + priv->last_mode = priv->ieee->iw_mode; + priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP; + break; +#endif /* CONFIG_IPW2100_MONITOR */ + } + + priv->ieee->iw_mode = mode; + +#ifdef CONFIG_PM + /* Indicate ipw2100_download_firmware download firmware + * from disk instead of memory. */ + ipw2100_firmware.version = 0; +#endif + + printk(KERN_INFO "%s: Reseting on mode change.\n", priv->net_dev->name); + priv->reset_backoff = 0; + schedule_reset(priv); + + return 0; +} + +static ssize_t show_internals(struct device *d, struct device_attribute *attr, + char *buf) +{ + struct ipw2100_priv *priv = dev_get_drvdata(d); + int len = 0; + +#define DUMP_VAR(x,y) len += sprintf(buf + len, # x ": %" y "\n", priv-> x) + + if (priv->status & STATUS_ASSOCIATED) + len += sprintf(buf + len, "connected: %lu\n", + get_seconds() - priv->connect_start); + else + len += sprintf(buf + len, "not connected\n"); + + DUMP_VAR(ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx], "p"); + DUMP_VAR(status, "08lx"); + DUMP_VAR(config, "08lx"); + DUMP_VAR(capability, "08lx"); + + len += + sprintf(buf + len, "last_rtc: %lu\n", + (unsigned long)priv->last_rtc); + + DUMP_VAR(fatal_error, "d"); + DUMP_VAR(stop_hang_check, "d"); + DUMP_VAR(stop_rf_kill, "d"); + DUMP_VAR(messages_sent, "d"); + + DUMP_VAR(tx_pend_stat.value, "d"); + DUMP_VAR(tx_pend_stat.hi, "d"); + + DUMP_VAR(tx_free_stat.value, "d"); + DUMP_VAR(tx_free_stat.lo, "d"); + + DUMP_VAR(msg_free_stat.value, "d"); + DUMP_VAR(msg_free_stat.lo, "d"); + + DUMP_VAR(msg_pend_stat.value, "d"); + DUMP_VAR(msg_pend_stat.hi, "d"); + + DUMP_VAR(fw_pend_stat.value, "d"); + DUMP_VAR(fw_pend_stat.hi, "d"); + + DUMP_VAR(txq_stat.value, "d"); + DUMP_VAR(txq_stat.lo, "d"); + + DUMP_VAR(ieee->scans, "d"); + DUMP_VAR(reset_backoff, "d"); + + return len; +} + +static DEVICE_ATTR(internals, S_IRUGO, show_internals, NULL); + +static ssize_t show_bssinfo(struct device *d, struct device_attribute *attr, + char *buf) +{ + struct ipw2100_priv *priv = dev_get_drvdata(d); + char essid[IW_ESSID_MAX_SIZE + 1]; + u8 bssid[ETH_ALEN]; + u32 chan = 0; + char *out = buf; + unsigned int length; + int ret; + + if (priv->status & STATUS_RF_KILL_MASK) + return 0; + + memset(essid, 0, sizeof(essid)); + memset(bssid, 0, sizeof(bssid)); + + length = IW_ESSID_MAX_SIZE; + ret = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_SSID, essid, &length); + if (ret) + IPW_DEBUG_INFO("failed querying ordinals at line %d\n", + __LINE__); + + length = sizeof(bssid); + ret = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, + bssid, &length); + if (ret) + IPW_DEBUG_INFO("failed querying ordinals at line %d\n", + __LINE__); + + length = sizeof(u32); + ret = ipw2100_get_ordinal(priv, IPW_ORD_OUR_FREQ, &chan, &length); + if (ret) + IPW_DEBUG_INFO("failed querying ordinals at line %d\n", + __LINE__); + + out += sprintf(out, "ESSID: %s\n", essid); + out += sprintf(out, "BSSID: %pM\n", bssid); + out += sprintf(out, "Channel: %d\n", chan); + + return out - buf; +} + +static DEVICE_ATTR(bssinfo, S_IRUGO, show_bssinfo, NULL); + +#ifdef CONFIG_IPW2100_DEBUG +static ssize_t show_debug_level(struct device_driver *d, char *buf) +{ + return sprintf(buf, "0x%08X\n", ipw2100_debug_level); +} + +static ssize_t store_debug_level(struct device_driver *d, + const char *buf, size_t count) +{ + char *p = (char *)buf; + u32 val; + + if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { + p++; + if (p[0] == 'x' || p[0] == 'X') + p++; + val = simple_strtoul(p, &p, 16); + } else + val = simple_strtoul(p, &p, 10); + if (p == buf) + IPW_DEBUG_INFO(": %s is not in hex or decimal form.\n", buf); + else + ipw2100_debug_level = val; + + return strnlen(buf, count); +} + +static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO, show_debug_level, + store_debug_level); +#endif /* CONFIG_IPW2100_DEBUG */ + +static ssize_t show_fatal_error(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct ipw2100_priv *priv = dev_get_drvdata(d); + char *out = buf; + int i; + + if (priv->fatal_error) + out += sprintf(out, "0x%08X\n", priv->fatal_error); + else + out += sprintf(out, "0\n"); + + for (i = 1; i <= IPW2100_ERROR_QUEUE; i++) { + if (!priv->fatal_errors[(priv->fatal_index - i) % + IPW2100_ERROR_QUEUE]) + continue; + + out += sprintf(out, "%d. 0x%08X\n", i, + priv->fatal_errors[(priv->fatal_index - i) % + IPW2100_ERROR_QUEUE]); + } + + return out - buf; +} + +static ssize_t store_fatal_error(struct device *d, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct ipw2100_priv *priv = dev_get_drvdata(d); + schedule_reset(priv); + return count; +} + +static DEVICE_ATTR(fatal_error, S_IWUSR | S_IRUGO, show_fatal_error, + store_fatal_error); + +static ssize_t show_scan_age(struct device *d, struct device_attribute *attr, + char *buf) +{ + struct ipw2100_priv *priv = dev_get_drvdata(d); + return sprintf(buf, "%d\n", priv->ieee->scan_age); +} + +static ssize_t store_scan_age(struct device *d, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ipw2100_priv *priv = dev_get_drvdata(d); + struct net_device *dev = priv->net_dev; + char buffer[] = "00000000"; + unsigned long len = + (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1; + unsigned long val; + char *p = buffer; + + (void)dev; /* kill unused-var warning for debug-only code */ + + IPW_DEBUG_INFO("enter\n"); + + strncpy(buffer, buf, len); + buffer[len] = 0; + + if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { + p++; + if (p[0] == 'x' || p[0] == 'X') + p++; + val = simple_strtoul(p, &p, 16); + } else + val = simple_strtoul(p, &p, 10); + if (p == buffer) { + IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name); + } else { + priv->ieee->scan_age = val; + IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age); + } + + IPW_DEBUG_INFO("exit\n"); + return len; +} + +static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age); + +static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr, + char *buf) +{ + /* 0 - RF kill not enabled + 1 - SW based RF kill active (sysfs) + 2 - HW based RF kill active + 3 - Both HW and SW baed RF kill active */ + struct ipw2100_priv *priv = dev_get_drvdata(d); + int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) | + (rf_kill_active(priv) ? 0x2 : 0x0); + return sprintf(buf, "%i\n", val); +} + +static int ipw_radio_kill_sw(struct ipw2100_priv *priv, int disable_radio) +{ + if ((disable_radio ? 1 : 0) == + (priv->status & STATUS_RF_KILL_SW ? 1 : 0)) + return 0; + + IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n", + disable_radio ? "OFF" : "ON"); + + mutex_lock(&priv->action_mutex); + + if (disable_radio) { + priv->status |= STATUS_RF_KILL_SW; + ipw2100_down(priv); + } else { + priv->status &= ~STATUS_RF_KILL_SW; + if (rf_kill_active(priv)) { + IPW_DEBUG_RF_KILL("Can not turn radio back on - " + "disabled by HW switch\n"); + /* Make sure the RF_KILL check timer is running */ + priv->stop_rf_kill = 0; + cancel_delayed_work(&priv->rf_kill); + queue_delayed_work(priv->workqueue, &priv->rf_kill, + round_jiffies_relative(HZ)); + } else + schedule_reset(priv); + } + + mutex_unlock(&priv->action_mutex); + return 1; +} + +static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ipw2100_priv *priv = dev_get_drvdata(d); + ipw_radio_kill_sw(priv, buf[0] == '1'); + return count; +} + +static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill); + +static struct attribute *ipw2100_sysfs_entries[] = { + &dev_attr_hardware.attr, + &dev_attr_registers.attr, + &dev_attr_ordinals.attr, + &dev_attr_pci.attr, + &dev_attr_stats.attr, + &dev_attr_internals.attr, + &dev_attr_bssinfo.attr, + &dev_attr_memory.attr, + &dev_attr_scan_age.attr, + &dev_attr_fatal_error.attr, + &dev_attr_rf_kill.attr, + &dev_attr_cfg.attr, + &dev_attr_status.attr, + &dev_attr_capability.attr, + NULL, +}; + +static struct attribute_group ipw2100_attribute_group = { + .attrs = ipw2100_sysfs_entries, +}; + +static int status_queue_allocate(struct ipw2100_priv *priv, int entries) +{ + struct ipw2100_status_queue *q = &priv->status_queue; + + IPW_DEBUG_INFO("enter\n"); + + q->size = entries * sizeof(struct ipw2100_status); + q->drv = + (struct ipw2100_status *)pci_alloc_consistent(priv->pci_dev, + q->size, &q->nic); + if (!q->drv) { + IPW_DEBUG_WARNING("Can not allocate status queue.\n"); + return -ENOMEM; + } + + memset(q->drv, 0, q->size); + + IPW_DEBUG_INFO("exit\n"); + + return 0; +} + +static void status_queue_free(struct ipw2100_priv *priv) +{ + IPW_DEBUG_INFO("enter\n"); + + if (priv->status_queue.drv) { + pci_free_consistent(priv->pci_dev, priv->status_queue.size, + priv->status_queue.drv, + priv->status_queue.nic); + priv->status_queue.drv = NULL; + } + + IPW_DEBUG_INFO("exit\n"); +} + +static int bd_queue_allocate(struct ipw2100_priv *priv, + struct ipw2100_bd_queue *q, int entries) +{ + IPW_DEBUG_INFO("enter\n"); + + memset(q, 0, sizeof(struct ipw2100_bd_queue)); + + q->entries = entries; + q->size = entries * sizeof(struct ipw2100_bd); + q->drv = pci_alloc_consistent(priv->pci_dev, q->size, &q->nic); + if (!q->drv) { + IPW_DEBUG_INFO + ("can't allocate shared memory for buffer descriptors\n"); + return -ENOMEM; + } + memset(q->drv, 0, q->size); + + IPW_DEBUG_INFO("exit\n"); + + return 0; +} + +static void bd_queue_free(struct ipw2100_priv *priv, struct ipw2100_bd_queue *q) +{ + IPW_DEBUG_INFO("enter\n"); + + if (!q) + return; + + if (q->drv) { + pci_free_consistent(priv->pci_dev, q->size, q->drv, q->nic); + q->drv = NULL; + } + + IPW_DEBUG_INFO("exit\n"); +} + +static void bd_queue_initialize(struct ipw2100_priv *priv, + struct ipw2100_bd_queue *q, u32 base, u32 size, + u32 r, u32 w) +{ + IPW_DEBUG_INFO("enter\n"); + + IPW_DEBUG_INFO("initializing bd queue at virt=%p, phys=%08x\n", q->drv, + (u32) q->nic); + + write_register(priv->net_dev, base, q->nic); + write_register(priv->net_dev, size, q->entries); + write_register(priv->net_dev, r, q->oldest); + write_register(priv->net_dev, w, q->next); + + IPW_DEBUG_INFO("exit\n"); +} + +static void ipw2100_kill_workqueue(struct ipw2100_priv *priv) +{ + if (priv->workqueue) { + priv->stop_rf_kill = 1; + priv->stop_hang_check = 1; + cancel_delayed_work(&priv->reset_work); + cancel_delayed_work(&priv->security_work); + cancel_delayed_work(&priv->wx_event_work); + cancel_delayed_work(&priv->hang_check); + cancel_delayed_work(&priv->rf_kill); + cancel_delayed_work(&priv->scan_event_later); + destroy_workqueue(priv->workqueue); + priv->workqueue = NULL; + } +} + +static int ipw2100_tx_allocate(struct ipw2100_priv *priv) +{ + int i, j, err = -EINVAL; + void *v; + dma_addr_t p; + + IPW_DEBUG_INFO("enter\n"); + + err = bd_queue_allocate(priv, &priv->tx_queue, TX_QUEUE_LENGTH); + if (err) { + IPW_DEBUG_ERROR("%s: failed bd_queue_allocate\n", + priv->net_dev->name); + return err; + } + + priv->tx_buffers = + (struct ipw2100_tx_packet *)kmalloc(TX_PENDED_QUEUE_LENGTH * + sizeof(struct + ipw2100_tx_packet), + GFP_ATOMIC); + if (!priv->tx_buffers) { + printk(KERN_ERR DRV_NAME + ": %s: alloc failed form tx buffers.\n", + priv->net_dev->name); + bd_queue_free(priv, &priv->tx_queue); + return -ENOMEM; + } + + for (i = 0; i < TX_PENDED_QUEUE_LENGTH; i++) { + v = pci_alloc_consistent(priv->pci_dev, + sizeof(struct ipw2100_data_header), + &p); + if (!v) { + printk(KERN_ERR DRV_NAME + ": %s: PCI alloc failed for tx " "buffers.\n", + priv->net_dev->name); + err = -ENOMEM; + break; + } + + priv->tx_buffers[i].type = DATA; + priv->tx_buffers[i].info.d_struct.data = + (struct ipw2100_data_header *)v; + priv->tx_buffers[i].info.d_struct.data_phys = p; + priv->tx_buffers[i].info.d_struct.txb = NULL; + } + + if (i == TX_PENDED_QUEUE_LENGTH) + return 0; + + for (j = 0; j < i; j++) { + pci_free_consistent(priv->pci_dev, + sizeof(struct ipw2100_data_header), + priv->tx_buffers[j].info.d_struct.data, + priv->tx_buffers[j].info.d_struct. + data_phys); + } + + kfree(priv->tx_buffers); + priv->tx_buffers = NULL; + + return err; +} + +static void ipw2100_tx_initialize(struct ipw2100_priv *priv) +{ + int i; + + IPW_DEBUG_INFO("enter\n"); + + /* + * reinitialize packet info lists + */ + INIT_LIST_HEAD(&priv->fw_pend_list); + INIT_STAT(&priv->fw_pend_stat); + + /* + * reinitialize lists + */ + INIT_LIST_HEAD(&priv->tx_pend_list); + INIT_LIST_HEAD(&priv->tx_free_list); + INIT_STAT(&priv->tx_pend_stat); + INIT_STAT(&priv->tx_free_stat); + + for (i = 0; i < TX_PENDED_QUEUE_LENGTH; i++) { + /* We simply drop any SKBs that have been queued for + * transmit */ + if (priv->tx_buffers[i].info.d_struct.txb) { + libipw_txb_free(priv->tx_buffers[i].info.d_struct. + txb); + priv->tx_buffers[i].info.d_struct.txb = NULL; + } + + list_add_tail(&priv->tx_buffers[i].list, &priv->tx_free_list); + } + + SET_STAT(&priv->tx_free_stat, i); + + priv->tx_queue.oldest = 0; + priv->tx_queue.available = priv->tx_queue.entries; + priv->tx_queue.next = 0; + INIT_STAT(&priv->txq_stat); + SET_STAT(&priv->txq_stat, priv->tx_queue.available); + + bd_queue_initialize(priv, &priv->tx_queue, + IPW_MEM_HOST_SHARED_TX_QUEUE_BD_BASE, + IPW_MEM_HOST_SHARED_TX_QUEUE_BD_SIZE, + IPW_MEM_HOST_SHARED_TX_QUEUE_READ_INDEX, + IPW_MEM_HOST_SHARED_TX_QUEUE_WRITE_INDEX); + + IPW_DEBUG_INFO("exit\n"); + +} + +static void ipw2100_tx_free(struct ipw2100_priv *priv) +{ + int i; + + IPW_DEBUG_INFO("enter\n"); + + bd_queue_free(priv, &priv->tx_queue); + + if (!priv->tx_buffers) + return; + + for (i = 0; i < TX_PENDED_QUEUE_LENGTH; i++) { + if (priv->tx_buffers[i].info.d_struct.txb) { + libipw_txb_free(priv->tx_buffers[i].info.d_struct. + txb); + priv->tx_buffers[i].info.d_struct.txb = NULL; + } + if (priv->tx_buffers[i].info.d_struct.data) + pci_free_consistent(priv->pci_dev, + sizeof(struct ipw2100_data_header), + priv->tx_buffers[i].info.d_struct. + data, + priv->tx_buffers[i].info.d_struct. + data_phys); + } + + kfree(priv->tx_buffers); + priv->tx_buffers = NULL; + + IPW_DEBUG_INFO("exit\n"); +} + +static int ipw2100_rx_allocate(struct ipw2100_priv *priv) +{ + int i, j, err = -EINVAL; + + IPW_DEBUG_INFO("enter\n"); + + err = bd_queue_allocate(priv, &priv->rx_queue, RX_QUEUE_LENGTH); + if (err) { + IPW_DEBUG_INFO("failed bd_queue_allocate\n"); + return err; + } + + err = status_queue_allocate(priv, RX_QUEUE_LENGTH); + if (err) { + IPW_DEBUG_INFO("failed status_queue_allocate\n"); + bd_queue_free(priv, &priv->rx_queue); + return err; + } + + /* + * allocate packets + */ + priv->rx_buffers = (struct ipw2100_rx_packet *) + kmalloc(RX_QUEUE_LENGTH * sizeof(struct ipw2100_rx_packet), + GFP_KERNEL); + if (!priv->rx_buffers) { + IPW_DEBUG_INFO("can't allocate rx packet buffer table\n"); + + bd_queue_free(priv, &priv->rx_queue); + + status_queue_free(priv); + + return -ENOMEM; + } + + for (i = 0; i < RX_QUEUE_LENGTH; i++) { + struct ipw2100_rx_packet *packet = &priv->rx_buffers[i]; + + err = ipw2100_alloc_skb(priv, packet); + if (unlikely(err)) { + err = -ENOMEM; + break; + } + + /* The BD holds the cache aligned address */ + priv->rx_queue.drv[i].host_addr = packet->dma_addr; + priv->rx_queue.drv[i].buf_length = IPW_RX_NIC_BUFFER_LENGTH; + priv->status_queue.drv[i].status_fields = 0; + } + + if (i == RX_QUEUE_LENGTH) + return 0; + + for (j = 0; j < i; j++) { + pci_unmap_single(priv->pci_dev, priv->rx_buffers[j].dma_addr, + sizeof(struct ipw2100_rx_packet), + PCI_DMA_FROMDEVICE); + dev_kfree_skb(priv->rx_buffers[j].skb); + } + + kfree(priv->rx_buffers); + priv->rx_buffers = NULL; + + bd_queue_free(priv, &priv->rx_queue); + + status_queue_free(priv); + + return err; +} + +static void ipw2100_rx_initialize(struct ipw2100_priv *priv) +{ + IPW_DEBUG_INFO("enter\n"); + + priv->rx_queue.oldest = 0; + priv->rx_queue.available = priv->rx_queue.entries - 1; + priv->rx_queue.next = priv->rx_queue.entries - 1; + + INIT_STAT(&priv->rxq_stat); + SET_STAT(&priv->rxq_stat, priv->rx_queue.available); + + bd_queue_initialize(priv, &priv->rx_queue, + IPW_MEM_HOST_SHARED_RX_BD_BASE, + IPW_MEM_HOST_SHARED_RX_BD_SIZE, + IPW_MEM_HOST_SHARED_RX_READ_INDEX, + IPW_MEM_HOST_SHARED_RX_WRITE_INDEX); + + /* set up the status queue */ + write_register(priv->net_dev, IPW_MEM_HOST_SHARED_RX_STATUS_BASE, + priv->status_queue.nic); + + IPW_DEBUG_INFO("exit\n"); +} + +static void ipw2100_rx_free(struct ipw2100_priv *priv) +{ + int i; + + IPW_DEBUG_INFO("enter\n"); + + bd_queue_free(priv, &priv->rx_queue); + status_queue_free(priv); + + if (!priv->rx_buffers) + return; + + for (i = 0; i < RX_QUEUE_LENGTH; i++) { + if (priv->rx_buffers[i].rxp) { + pci_unmap_single(priv->pci_dev, + priv->rx_buffers[i].dma_addr, + sizeof(struct ipw2100_rx), + PCI_DMA_FROMDEVICE); + dev_kfree_skb(priv->rx_buffers[i].skb); + } + } + + kfree(priv->rx_buffers); + priv->rx_buffers = NULL; + + IPW_DEBUG_INFO("exit\n"); +} + +static int ipw2100_read_mac_address(struct ipw2100_priv *priv) +{ + u32 length = ETH_ALEN; + u8 addr[ETH_ALEN]; + + int err; + + err = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ADAPTER_MAC, addr, &length); + if (err) { + IPW_DEBUG_INFO("MAC address read failed\n"); + return -EIO; + } + + memcpy(priv->net_dev->dev_addr, addr, ETH_ALEN); + IPW_DEBUG_INFO("card MAC is %pM\n", priv->net_dev->dev_addr); + + return 0; +} + +/******************************************************************** + * + * Firmware Commands + * + ********************************************************************/ + +static int ipw2100_set_mac_address(struct ipw2100_priv *priv, int batch_mode) +{ + struct host_command cmd = { + .host_command = ADAPTER_ADDRESS, + .host_command_sequence = 0, + .host_command_length = ETH_ALEN + }; + int err; + + IPW_DEBUG_HC("SET_MAC_ADDRESS\n"); + + IPW_DEBUG_INFO("enter\n"); + + if (priv->config & CFG_CUSTOM_MAC) { + memcpy(cmd.host_command_parameters, priv->mac_addr, ETH_ALEN); + memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN); + } else + memcpy(cmd.host_command_parameters, priv->net_dev->dev_addr, + ETH_ALEN); + + err = ipw2100_hw_send_command(priv, &cmd); + + IPW_DEBUG_INFO("exit\n"); + return err; +} + +static int ipw2100_set_port_type(struct ipw2100_priv *priv, u32 port_type, + int batch_mode) +{ + struct host_command cmd = { + .host_command = PORT_TYPE, + .host_command_sequence = 0, + .host_command_length = sizeof(u32) + }; + int err; + + switch (port_type) { + case IW_MODE_INFRA: + cmd.host_command_parameters[0] = IPW_BSS; + break; + case IW_MODE_ADHOC: + cmd.host_command_parameters[0] = IPW_IBSS; + break; + } + + IPW_DEBUG_HC("PORT_TYPE: %s\n", + port_type == IPW_IBSS ? "Ad-Hoc" : "Managed"); + + if (!batch_mode) { + err = ipw2100_disable_adapter(priv); + if (err) { + printk(KERN_ERR DRV_NAME + ": %s: Could not disable adapter %d\n", + priv->net_dev->name, err); + return err; + } + } + + /* send cmd to firmware */ + err = ipw2100_hw_send_command(priv, &cmd); + + if (!batch_mode) + ipw2100_enable_adapter(priv); + + return err; +} + +static int ipw2100_set_channel(struct ipw2100_priv *priv, u32 channel, + int batch_mode) +{ + struct host_command cmd = { + .host_command = CHANNEL, + .host_command_sequence = 0, + .host_command_length = sizeof(u32) + }; + int err; + + cmd.host_command_parameters[0] = channel; + + IPW_DEBUG_HC("CHANNEL: %d\n", channel); + + /* If BSS then we don't support channel selection */ + if (priv->ieee->iw_mode == IW_MODE_INFRA) + return 0; + + if ((channel != 0) && + ((channel < REG_MIN_CHANNEL) || (channel > REG_MAX_CHANNEL))) + return -EINVAL; + + if (!batch_mode) { + err = ipw2100_disable_adapter(priv); + if (err) + return err; + } + + err = ipw2100_hw_send_command(priv, &cmd); + if (err) { + IPW_DEBUG_INFO("Failed to set channel to %d", channel); + return err; + } + + if (channel) + priv->config |= CFG_STATIC_CHANNEL; + else + priv->config &= ~CFG_STATIC_CHANNEL; + + priv->channel = channel; + + if (!batch_mode) { + err = ipw2100_enable_adapter(priv); + if (err) + return err; + } + + return 0; +} + +static int ipw2100_system_config(struct ipw2100_priv *priv, int batch_mode) +{ + struct host_command cmd = { + .host_command = SYSTEM_CONFIG, + .host_command_sequence = 0, + .host_command_length = 12, + }; + u32 ibss_mask, len = sizeof(u32); + int err; + + /* Set system configuration */ + + if (!batch_mode) { + err = ipw2100_disable_adapter(priv); + if (err) + return err; + } + + if (priv->ieee->iw_mode == IW_MODE_ADHOC) + cmd.host_command_parameters[0] |= IPW_CFG_IBSS_AUTO_START; + + cmd.host_command_parameters[0] |= IPW_CFG_IBSS_MASK | + IPW_CFG_BSS_MASK | IPW_CFG_802_1x_ENABLE; + + if (!(priv->config & CFG_LONG_PREAMBLE)) + cmd.host_command_parameters[0] |= IPW_CFG_PREAMBLE_AUTO; + + err = ipw2100_get_ordinal(priv, + IPW_ORD_EEPROM_IBSS_11B_CHANNELS, + &ibss_mask, &len); + if (err) + ibss_mask = IPW_IBSS_11B_DEFAULT_MASK; + + cmd.host_command_parameters[1] = REG_CHANNEL_MASK; + cmd.host_command_parameters[2] = REG_CHANNEL_MASK & ibss_mask; + + /* 11b only */ + /*cmd.host_command_parameters[0] |= DIVERSITY_ANTENNA_A; */ + + err = ipw2100_hw_send_command(priv, &cmd); + if (err) + return err; + +/* If IPv6 is configured in the kernel then we don't want to filter out all + * of the multicast packets as IPv6 needs some. */ +#if !defined(CONFIG_IPV6) && !defined(CONFIG_IPV6_MODULE) + cmd.host_command = ADD_MULTICAST; + cmd.host_command_sequence = 0; + cmd.host_command_length = 0; + + ipw2100_hw_send_command(priv, &cmd); +#endif + if (!batch_mode) { + err = ipw2100_enable_adapter(priv); + if (err) + return err; + } + + return 0; +} + +static int ipw2100_set_tx_rates(struct ipw2100_priv *priv, u32 rate, + int batch_mode) +{ + struct host_command cmd = { + .host_command = BASIC_TX_RATES, + .host_command_sequence = 0, + .host_command_length = 4 + }; + int err; + + cmd.host_command_parameters[0] = rate & TX_RATE_MASK; + + if (!batch_mode) { + err = ipw2100_disable_adapter(priv); + if (err) + return err; + } + + /* Set BASIC TX Rate first */ + ipw2100_hw_send_command(priv, &cmd); + + /* Set TX Rate */ + cmd.host_command = TX_RATES; + ipw2100_hw_send_command(priv, &cmd); + + /* Set MSDU TX Rate */ + cmd.host_command = MSDU_TX_RATES; + ipw2100_hw_send_command(priv, &cmd); + + if (!batch_mode) { + err = ipw2100_enable_adapter(priv); + if (err) + return err; + } + + priv->tx_rates = rate; + + return 0; +} + +static int ipw2100_set_power_mode(struct ipw2100_priv *priv, int power_level) +{ + struct host_command cmd = { + .host_command = POWER_MODE, + .host_command_sequence = 0, + .host_command_length = 4 + }; + int err; + + cmd.host_command_parameters[0] = power_level; + + err = ipw2100_hw_send_command(priv, &cmd); + if (err) + return err; + + if (power_level == IPW_POWER_MODE_CAM) + priv->power_mode = IPW_POWER_LEVEL(priv->power_mode); + else + priv->power_mode = IPW_POWER_ENABLED | power_level; + +#ifdef IPW2100_TX_POWER + if (priv->port_type == IBSS && priv->adhoc_power != DFTL_IBSS_TX_POWER) { + /* Set beacon interval */ + cmd.host_command = TX_POWER_INDEX; + cmd.host_command_parameters[0] = (u32) priv->adhoc_power; + + err = ipw2100_hw_send_command(priv, &cmd); + if (err) + return err; + } +#endif + + return 0; +} + +static int ipw2100_set_rts_threshold(struct ipw2100_priv *priv, u32 threshold) +{ + struct host_command cmd = { + .host_command = RTS_THRESHOLD, + .host_command_sequence = 0, + .host_command_length = 4 + }; + int err; + + if (threshold & RTS_DISABLED) + cmd.host_command_parameters[0] = MAX_RTS_THRESHOLD; + else + cmd.host_command_parameters[0] = threshold & ~RTS_DISABLED; + + err = ipw2100_hw_send_command(priv, &cmd); + if (err) + return err; + + priv->rts_threshold = threshold; + + return 0; +} + +#if 0 +int ipw2100_set_fragmentation_threshold(struct ipw2100_priv *priv, + u32 threshold, int batch_mode) +{ + struct host_command cmd = { + .host_command = FRAG_THRESHOLD, + .host_command_sequence = 0, + .host_command_length = 4, + .host_command_parameters[0] = 0, + }; + int err; + + if (!batch_mode) { + err = ipw2100_disable_adapter(priv); + if (err) + return err; + } + + if (threshold == 0) + threshold = DEFAULT_FRAG_THRESHOLD; + else { + threshold = max(threshold, MIN_FRAG_THRESHOLD); + threshold = min(threshold, MAX_FRAG_THRESHOLD); + } + + cmd.host_command_parameters[0] = threshold; + + IPW_DEBUG_HC("FRAG_THRESHOLD: %u\n", threshold); + + err = ipw2100_hw_send_command(priv, &cmd); + + if (!batch_mode) + ipw2100_enable_adapter(priv); + + if (!err) + priv->frag_threshold = threshold; + + return err; +} +#endif + +static int ipw2100_set_short_retry(struct ipw2100_priv *priv, u32 retry) +{ + struct host_command cmd = { + .host_command = SHORT_RETRY_LIMIT, + .host_command_sequence = 0, + .host_command_length = 4 + }; + int err; + + cmd.host_command_parameters[0] = retry; + + err = ipw2100_hw_send_command(priv, &cmd); + if (err) + return err; + + priv->short_retry_limit = retry; + + return 0; +} + +static int ipw2100_set_long_retry(struct ipw2100_priv *priv, u32 retry) +{ + struct host_command cmd = { + .host_command = LONG_RETRY_LIMIT, + .host_command_sequence = 0, + .host_command_length = 4 + }; + int err; + + cmd.host_command_parameters[0] = retry; + + err = ipw2100_hw_send_command(priv, &cmd); + if (err) + return err; + + priv->long_retry_limit = retry; + + return 0; +} + +static int ipw2100_set_mandatory_bssid(struct ipw2100_priv *priv, u8 * bssid, + int batch_mode) +{ + struct host_command cmd = { + .host_command = MANDATORY_BSSID, + .host_command_sequence = 0, + .host_command_length = (bssid == NULL) ? 0 : ETH_ALEN + }; + int err; + +#ifdef CONFIG_IPW2100_DEBUG + if (bssid != NULL) + IPW_DEBUG_HC("MANDATORY_BSSID: %pM\n", bssid); + else + IPW_DEBUG_HC("MANDATORY_BSSID: \n"); +#endif + /* if BSSID is empty then we disable mandatory bssid mode */ + if (bssid != NULL) + memcpy(cmd.host_command_parameters, bssid, ETH_ALEN); + + if (!batch_mode) { + err = ipw2100_disable_adapter(priv); + if (err) + return err; + } + + err = ipw2100_hw_send_command(priv, &cmd); + + if (!batch_mode) + ipw2100_enable_adapter(priv); + + return err; +} + +static int ipw2100_disassociate_bssid(struct ipw2100_priv *priv) +{ + struct host_command cmd = { + .host_command = DISASSOCIATION_BSSID, + .host_command_sequence = 0, + .host_command_length = ETH_ALEN + }; + int err; + int len; + + IPW_DEBUG_HC("DISASSOCIATION_BSSID\n"); + + len = ETH_ALEN; + /* The Firmware currently ignores the BSSID and just disassociates from + * the currently associated AP -- but in the off chance that a future + * firmware does use the BSSID provided here, we go ahead and try and + * set it to the currently associated AP's BSSID */ + memcpy(cmd.host_command_parameters, priv->bssid, ETH_ALEN); + + err = ipw2100_hw_send_command(priv, &cmd); + + return err; +} + +static int ipw2100_set_wpa_ie(struct ipw2100_priv *, + struct ipw2100_wpa_assoc_frame *, int) + __attribute__ ((unused)); + +static int ipw2100_set_wpa_ie(struct ipw2100_priv *priv, + struct ipw2100_wpa_assoc_frame *wpa_frame, + int batch_mode) +{ + struct host_command cmd = { + .host_command = SET_WPA_IE, + .host_command_sequence = 0, + .host_command_length = sizeof(struct ipw2100_wpa_assoc_frame), + }; + int err; + + IPW_DEBUG_HC("SET_WPA_IE\n"); + + if (!batch_mode) { + err = ipw2100_disable_adapter(priv); + if (err) + return err; + } + + memcpy(cmd.host_command_parameters, wpa_frame, + sizeof(struct ipw2100_wpa_assoc_frame)); + + err = ipw2100_hw_send_command(priv, &cmd); + + if (!batch_mode) { + if (ipw2100_enable_adapter(priv)) + err = -EIO; + } + + return err; +} + +struct security_info_params { + u32 allowed_ciphers; + u16 version; + u8 auth_mode; + u8 replay_counters_number; + u8 unicast_using_group; +} __attribute__ ((packed)); + +static int ipw2100_set_security_information(struct ipw2100_priv *priv, + int auth_mode, + int security_level, + int unicast_using_group, + int batch_mode) +{ + struct host_command cmd = { + .host_command = SET_SECURITY_INFORMATION, + .host_command_sequence = 0, + .host_command_length = sizeof(struct security_info_params) + }; + struct security_info_params *security = + (struct security_info_params *)&cmd.host_command_parameters; + int err; + memset(security, 0, sizeof(*security)); + + /* If shared key AP authentication is turned on, then we need to + * configure the firmware to try and use it. + * + * Actual data encryption/decryption is handled by the host. */ + security->auth_mode = auth_mode; + security->unicast_using_group = unicast_using_group; + + switch (security_level) { + default: + case SEC_LEVEL_0: + security->allowed_ciphers = IPW_NONE_CIPHER; + break; + case SEC_LEVEL_1: + security->allowed_ciphers = IPW_WEP40_CIPHER | + IPW_WEP104_CIPHER; + break; + case SEC_LEVEL_2: + security->allowed_ciphers = IPW_WEP40_CIPHER | + IPW_WEP104_CIPHER | IPW_TKIP_CIPHER; + break; + case SEC_LEVEL_2_CKIP: + security->allowed_ciphers = IPW_WEP40_CIPHER | + IPW_WEP104_CIPHER | IPW_CKIP_CIPHER; + break; + case SEC_LEVEL_3: + security->allowed_ciphers = IPW_WEP40_CIPHER | + IPW_WEP104_CIPHER | IPW_TKIP_CIPHER | IPW_CCMP_CIPHER; + break; + } + + IPW_DEBUG_HC + ("SET_SECURITY_INFORMATION: auth:%d cipher:0x%02X (level %d)\n", + security->auth_mode, security->allowed_ciphers, security_level); + + security->replay_counters_number = 0; + + if (!batch_mode) { + err = ipw2100_disable_adapter(priv); + if (err) + return err; + } + + err = ipw2100_hw_send_command(priv, &cmd); + + if (!batch_mode) + ipw2100_enable_adapter(priv); + + return err; +} + +static int ipw2100_set_tx_power(struct ipw2100_priv *priv, u32 tx_power) +{ + struct host_command cmd = { + .host_command = TX_POWER_INDEX, + .host_command_sequence = 0, + .host_command_length = 4 + }; + int err = 0; + u32 tmp = tx_power; + + if (tx_power != IPW_TX_POWER_DEFAULT) + tmp = (tx_power - IPW_TX_POWER_MIN_DBM) * 16 / + (IPW_TX_POWER_MAX_DBM - IPW_TX_POWER_MIN_DBM); + + cmd.host_command_parameters[0] = tmp; + + if (priv->ieee->iw_mode == IW_MODE_ADHOC) + err = ipw2100_hw_send_command(priv, &cmd); + if (!err) + priv->tx_power = tx_power; + + return 0; +} + +static int ipw2100_set_ibss_beacon_interval(struct ipw2100_priv *priv, + u32 interval, int batch_mode) +{ + struct host_command cmd = { + .host_command = BEACON_INTERVAL, + .host_command_sequence = 0, + .host_command_length = 4 + }; + int err; + + cmd.host_command_parameters[0] = interval; + + IPW_DEBUG_INFO("enter\n"); + + if (priv->ieee->iw_mode == IW_MODE_ADHOC) { + if (!batch_mode) { + err = ipw2100_disable_adapter(priv); + if (err) + return err; + } + + ipw2100_hw_send_command(priv, &cmd); + + if (!batch_mode) { + err = ipw2100_enable_adapter(priv); + if (err) + return err; + } + } + + IPW_DEBUG_INFO("exit\n"); + + return 0; +} + +static void ipw2100_queues_initialize(struct ipw2100_priv *priv) +{ + ipw2100_tx_initialize(priv); + ipw2100_rx_initialize(priv); + ipw2100_msg_initialize(priv); +} + +static void ipw2100_queues_free(struct ipw2100_priv *priv) +{ + ipw2100_tx_free(priv); + ipw2100_rx_free(priv); + ipw2100_msg_free(priv); +} + +static int ipw2100_queues_allocate(struct ipw2100_priv *priv) +{ + if (ipw2100_tx_allocate(priv) || + ipw2100_rx_allocate(priv) || ipw2100_msg_allocate(priv)) + goto fail; + + return 0; + + fail: + ipw2100_tx_free(priv); + ipw2100_rx_free(priv); + ipw2100_msg_free(priv); + return -ENOMEM; +} + +#define IPW_PRIVACY_CAPABLE 0x0008 + +static int ipw2100_set_wep_flags(struct ipw2100_priv *priv, u32 flags, + int batch_mode) +{ + struct host_command cmd = { + .host_command = WEP_FLAGS, + .host_command_sequence = 0, + .host_command_length = 4 + }; + int err; + + cmd.host_command_parameters[0] = flags; + + IPW_DEBUG_HC("WEP_FLAGS: flags = 0x%08X\n", flags); + + if (!batch_mode) { + err = ipw2100_disable_adapter(priv); + if (err) { + printk(KERN_ERR DRV_NAME + ": %s: Could not disable adapter %d\n", + priv->net_dev->name, err); + return err; + } + } + + /* send cmd to firmware */ + err = ipw2100_hw_send_command(priv, &cmd); + + if (!batch_mode) + ipw2100_enable_adapter(priv); + + return err; +} + +struct ipw2100_wep_key { + u8 idx; + u8 len; + u8 key[13]; +}; + +/* Macros to ease up priting WEP keys */ +#define WEP_FMT_64 "%02X%02X%02X%02X-%02X" +#define WEP_FMT_128 "%02X%02X%02X%02X-%02X%02X%02X%02X-%02X%02X%02X" +#define WEP_STR_64(x) x[0],x[1],x[2],x[3],x[4] +#define WEP_STR_128(x) x[0],x[1],x[2],x[3],x[4],x[5],x[6],x[7],x[8],x[9],x[10] + +/** + * Set a the wep key + * + * @priv: struct to work on + * @idx: index of the key we want to set + * @key: ptr to the key data to set + * @len: length of the buffer at @key + * @batch_mode: FIXME perform the operation in batch mode, not + * disabling the device. + * + * @returns 0 if OK, < 0 errno code on error. + * + * Fill out a command structure with the new wep key, length an + * index and send it down the wire. + */ +static int ipw2100_set_key(struct ipw2100_priv *priv, + int idx, char *key, int len, int batch_mode) +{ + int keylen = len ? (len <= 5 ? 5 : 13) : 0; + struct host_command cmd = { + .host_command = WEP_KEY_INFO, + .host_command_sequence = 0, + .host_command_length = sizeof(struct ipw2100_wep_key), + }; + struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters; + int err; + + IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n", + idx, keylen, len); + + /* NOTE: We don't check cached values in case the firmware was reset + * or some other problem is occurring. If the user is setting the key, + * then we push the change */ + + wep_key->idx = idx; + wep_key->len = keylen; + + if (keylen) { + memcpy(wep_key->key, key, len); + memset(wep_key->key + len, 0, keylen - len); + } + + /* Will be optimized out on debug not being configured in */ + if (keylen == 0) + IPW_DEBUG_WEP("%s: Clearing key %d\n", + priv->net_dev->name, wep_key->idx); + else if (keylen == 5) + IPW_DEBUG_WEP("%s: idx: %d, len: %d key: " WEP_FMT_64 "\n", + priv->net_dev->name, wep_key->idx, wep_key->len, + WEP_STR_64(wep_key->key)); + else + IPW_DEBUG_WEP("%s: idx: %d, len: %d key: " WEP_FMT_128 + "\n", + priv->net_dev->name, wep_key->idx, wep_key->len, + WEP_STR_128(wep_key->key)); + + if (!batch_mode) { + err = ipw2100_disable_adapter(priv); + /* FIXME: IPG: shouldn't this prink be in _disable_adapter()? */ + if (err) { + printk(KERN_ERR DRV_NAME + ": %s: Could not disable adapter %d\n", + priv->net_dev->name, err); + return err; + } + } + + /* send cmd to firmware */ + err = ipw2100_hw_send_command(priv, &cmd); + + if (!batch_mode) { + int err2 = ipw2100_enable_adapter(priv); + if (err == 0) + err = err2; + } + return err; +} + +static int ipw2100_set_key_index(struct ipw2100_priv *priv, + int idx, int batch_mode) +{ + struct host_command cmd = { + .host_command = WEP_KEY_INDEX, + .host_command_sequence = 0, + .host_command_length = 4, + .host_command_parameters = {idx}, + }; + int err; + + IPW_DEBUG_HC("WEP_KEY_INDEX: index = %d\n", idx); + + if (idx < 0 || idx > 3) + return -EINVAL; + + if (!batch_mode) { + err = ipw2100_disable_adapter(priv); + if (err) { + printk(KERN_ERR DRV_NAME + ": %s: Could not disable adapter %d\n", + priv->net_dev->name, err); + return err; + } + } + + /* send cmd to firmware */ + err = ipw2100_hw_send_command(priv, &cmd); + + if (!batch_mode) + ipw2100_enable_adapter(priv); + + return err; +} + +static int ipw2100_configure_security(struct ipw2100_priv *priv, int batch_mode) +{ + int i, err, auth_mode, sec_level, use_group; + + if (!(priv->status & STATUS_RUNNING)) + return 0; + + if (!batch_mode) { + err = ipw2100_disable_adapter(priv); + if (err) + return err; + } + + if (!priv->ieee->sec.enabled) { + err = + ipw2100_set_security_information(priv, IPW_AUTH_OPEN, + SEC_LEVEL_0, 0, 1); + } else { + auth_mode = IPW_AUTH_OPEN; + if (priv->ieee->sec.flags & SEC_AUTH_MODE) { + if (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY) + auth_mode = IPW_AUTH_SHARED; + else if (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP) + auth_mode = IPW_AUTH_LEAP_CISCO_ID; + } + + sec_level = SEC_LEVEL_0; + if (priv->ieee->sec.flags & SEC_LEVEL) + sec_level = priv->ieee->sec.level; + + use_group = 0; + if (priv->ieee->sec.flags & SEC_UNICAST_GROUP) + use_group = priv->ieee->sec.unicast_uses_group; + + err = + ipw2100_set_security_information(priv, auth_mode, sec_level, + use_group, 1); + } + + if (err) + goto exit; + + if (priv->ieee->sec.enabled) { + for (i = 0; i < 4; i++) { + if (!(priv->ieee->sec.flags & (1 << i))) { + memset(priv->ieee->sec.keys[i], 0, WEP_KEY_LEN); + priv->ieee->sec.key_sizes[i] = 0; + } else { + err = ipw2100_set_key(priv, i, + priv->ieee->sec.keys[i], + priv->ieee->sec. + key_sizes[i], 1); + if (err) + goto exit; + } + } + + ipw2100_set_key_index(priv, priv->ieee->crypt_info.tx_keyidx, 1); + } + + /* Always enable privacy so the Host can filter WEP packets if + * encrypted data is sent up */ + err = + ipw2100_set_wep_flags(priv, + priv->ieee->sec. + enabled ? IPW_PRIVACY_CAPABLE : 0, 1); + if (err) + goto exit; + + priv->status &= ~STATUS_SECURITY_UPDATED; + + exit: + if (!batch_mode) + ipw2100_enable_adapter(priv); + + return err; +} + +static void ipw2100_security_work(struct work_struct *work) +{ + struct ipw2100_priv *priv = + container_of(work, struct ipw2100_priv, security_work.work); + + /* If we happen to have reconnected before we get a chance to + * process this, then update the security settings--which causes + * a disassociation to occur */ + if (!(priv->status & STATUS_ASSOCIATED) && + priv->status & STATUS_SECURITY_UPDATED) + ipw2100_configure_security(priv, 0); +} + +static void shim__set_security(struct net_device *dev, + struct libipw_security *sec) +{ + struct ipw2100_priv *priv = libipw_priv(dev); + int i, force_update = 0; + + mutex_lock(&priv->action_mutex); + if (!(priv->status & STATUS_INITIALIZED)) + goto done; + + for (i = 0; i < 4; i++) { + if (sec->flags & (1 << i)) { + priv->ieee->sec.key_sizes[i] = sec->key_sizes[i]; + if (sec->key_sizes[i] == 0) + priv->ieee->sec.flags &= ~(1 << i); + else + memcpy(priv->ieee->sec.keys[i], sec->keys[i], + sec->key_sizes[i]); + if (sec->level == SEC_LEVEL_1) { + priv->ieee->sec.flags |= (1 << i); + priv->status |= STATUS_SECURITY_UPDATED; + } else + priv->ieee->sec.flags &= ~(1 << i); + } + } + + if ((sec->flags & SEC_ACTIVE_KEY) && + priv->ieee->sec.active_key != sec->active_key) { + if (sec->active_key <= 3) { + priv->ieee->sec.active_key = sec->active_key; + priv->ieee->sec.flags |= SEC_ACTIVE_KEY; + } else + priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY; + + priv->status |= STATUS_SECURITY_UPDATED; + } + + if ((sec->flags & SEC_AUTH_MODE) && + (priv->ieee->sec.auth_mode != sec->auth_mode)) { + priv->ieee->sec.auth_mode = sec->auth_mode; + priv->ieee->sec.flags |= SEC_AUTH_MODE; + priv->status |= STATUS_SECURITY_UPDATED; + } + + if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) { + priv->ieee->sec.flags |= SEC_ENABLED; + priv->ieee->sec.enabled = sec->enabled; + priv->status |= STATUS_SECURITY_UPDATED; + force_update = 1; + } + + if (sec->flags & SEC_ENCRYPT) + priv->ieee->sec.encrypt = sec->encrypt; + + if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) { + priv->ieee->sec.level = sec->level; + priv->ieee->sec.flags |= SEC_LEVEL; + priv->status |= STATUS_SECURITY_UPDATED; + } + + IPW_DEBUG_WEP("Security flags: %c %c%c%c%c %c%c%c%c\n", + priv->ieee->sec.flags & (1 << 8) ? '1' : '0', + priv->ieee->sec.flags & (1 << 7) ? '1' : '0', + priv->ieee->sec.flags & (1 << 6) ? '1' : '0', + priv->ieee->sec.flags & (1 << 5) ? '1' : '0', + priv->ieee->sec.flags & (1 << 4) ? '1' : '0', + priv->ieee->sec.flags & (1 << 3) ? '1' : '0', + priv->ieee->sec.flags & (1 << 2) ? '1' : '0', + priv->ieee->sec.flags & (1 << 1) ? '1' : '0', + priv->ieee->sec.flags & (1 << 0) ? '1' : '0'); + +/* As a temporary work around to enable WPA until we figure out why + * wpa_supplicant toggles the security capability of the driver, which + * forces a disassocation with force_update... + * + * if (force_update || !(priv->status & STATUS_ASSOCIATED))*/ + if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) + ipw2100_configure_security(priv, 0); + done: + mutex_unlock(&priv->action_mutex); +} + +static int ipw2100_adapter_setup(struct ipw2100_priv *priv) +{ + int err; + int batch_mode = 1; + u8 *bssid; + + IPW_DEBUG_INFO("enter\n"); + + err = ipw2100_disable_adapter(priv); + if (err) + return err; +#ifdef CONFIG_IPW2100_MONITOR + if (priv->ieee->iw_mode == IW_MODE_MONITOR) { + err = ipw2100_set_channel(priv, priv->channel, batch_mode); + if (err) + return err; + + IPW_DEBUG_INFO("exit\n"); + + return 0; + } +#endif /* CONFIG_IPW2100_MONITOR */ + + err = ipw2100_read_mac_address(priv); + if (err) + return -EIO; + + err = ipw2100_set_mac_address(priv, batch_mode); + if (err) + return err; + + err = ipw2100_set_port_type(priv, priv->ieee->iw_mode, batch_mode); + if (err) + return err; + + if (priv->ieee->iw_mode == IW_MODE_ADHOC) { + err = ipw2100_set_channel(priv, priv->channel, batch_mode); + if (err) + return err; + } + + err = ipw2100_system_config(priv, batch_mode); + if (err) + return err; + + err = ipw2100_set_tx_rates(priv, priv->tx_rates, batch_mode); + if (err) + return err; + + /* Default to power mode OFF */ + err = ipw2100_set_power_mode(priv, IPW_POWER_MODE_CAM); + if (err) + return err; + + err = ipw2100_set_rts_threshold(priv, priv->rts_threshold); + if (err) + return err; + + if (priv->config & CFG_STATIC_BSSID) + bssid = priv->bssid; + else + bssid = NULL; + err = ipw2100_set_mandatory_bssid(priv, bssid, batch_mode); + if (err) + return err; + + if (priv->config & CFG_STATIC_ESSID) + err = ipw2100_set_essid(priv, priv->essid, priv->essid_len, + batch_mode); + else + err = ipw2100_set_essid(priv, NULL, 0, batch_mode); + if (err) + return err; + + err = ipw2100_configure_security(priv, batch_mode); + if (err) + return err; + + if (priv->ieee->iw_mode == IW_MODE_ADHOC) { + err = + ipw2100_set_ibss_beacon_interval(priv, + priv->beacon_interval, + batch_mode); + if (err) + return err; + + err = ipw2100_set_tx_power(priv, priv->tx_power); + if (err) + return err; + } + + /* + err = ipw2100_set_fragmentation_threshold( + priv, priv->frag_threshold, batch_mode); + if (err) + return err; + */ + + IPW_DEBUG_INFO("exit\n"); + + return 0; +} + +/************************************************************************* + * + * EXTERNALLY CALLED METHODS + * + *************************************************************************/ + +/* This method is called by the network layer -- not to be confused with + * ipw2100_set_mac_address() declared above called by this driver (and this + * method as well) to talk to the firmware */ +static int ipw2100_set_address(struct net_device *dev, void *p) +{ + struct ipw2100_priv *priv = libipw_priv(dev); + struct sockaddr *addr = p; + int err = 0; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + mutex_lock(&priv->action_mutex); + + priv->config |= CFG_CUSTOM_MAC; + memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); + + err = ipw2100_set_mac_address(priv, 0); + if (err) + goto done; + + priv->reset_backoff = 0; + mutex_unlock(&priv->action_mutex); + ipw2100_reset_adapter(&priv->reset_work.work); + return 0; + + done: + mutex_unlock(&priv->action_mutex); + return err; +} + +static int ipw2100_open(struct net_device *dev) +{ + struct ipw2100_priv *priv = libipw_priv(dev); + unsigned long flags; + IPW_DEBUG_INFO("dev->open\n"); + + spin_lock_irqsave(&priv->low_lock, flags); + if (priv->status & STATUS_ASSOCIATED) { + netif_carrier_on(dev); + netif_start_queue(dev); + } + spin_unlock_irqrestore(&priv->low_lock, flags); + + return 0; +} + +static int ipw2100_close(struct net_device *dev) +{ + struct ipw2100_priv *priv = libipw_priv(dev); + unsigned long flags; + struct list_head *element; + struct ipw2100_tx_packet *packet; + + IPW_DEBUG_INFO("enter\n"); + + spin_lock_irqsave(&priv->low_lock, flags); + + if (priv->status & STATUS_ASSOCIATED) + netif_carrier_off(dev); + netif_stop_queue(dev); + + /* Flush the TX queue ... */ + while (!list_empty(&priv->tx_pend_list)) { + element = priv->tx_pend_list.next; + packet = list_entry(element, struct ipw2100_tx_packet, list); + + list_del(element); + DEC_STAT(&priv->tx_pend_stat); + + libipw_txb_free(packet->info.d_struct.txb); + packet->info.d_struct.txb = NULL; + + list_add_tail(element, &priv->tx_free_list); + INC_STAT(&priv->tx_free_stat); + } + spin_unlock_irqrestore(&priv->low_lock, flags); + + IPW_DEBUG_INFO("exit\n"); + + return 0; +} + +/* + * TODO: Fix this function... its just wrong + */ +static void ipw2100_tx_timeout(struct net_device *dev) +{ + struct ipw2100_priv *priv = libipw_priv(dev); + + dev->stats.tx_errors++; + +#ifdef CONFIG_IPW2100_MONITOR + if (priv->ieee->iw_mode == IW_MODE_MONITOR) + return; +#endif + + IPW_DEBUG_INFO("%s: TX timed out. Scheduling firmware restart.\n", + dev->name); + schedule_reset(priv); +} + +static int ipw2100_wpa_enable(struct ipw2100_priv *priv, int value) +{ + /* This is called when wpa_supplicant loads and closes the driver + * interface. */ + priv->ieee->wpa_enabled = value; + return 0; +} + +static int ipw2100_wpa_set_auth_algs(struct ipw2100_priv *priv, int value) +{ + + struct libipw_device *ieee = priv->ieee; + struct libipw_security sec = { + .flags = SEC_AUTH_MODE, + }; + int ret = 0; + + if (value & IW_AUTH_ALG_SHARED_KEY) { + sec.auth_mode = WLAN_AUTH_SHARED_KEY; + ieee->open_wep = 0; + } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) { + sec.auth_mode = WLAN_AUTH_OPEN; + ieee->open_wep = 1; + } else if (value & IW_AUTH_ALG_LEAP) { + sec.auth_mode = WLAN_AUTH_LEAP; + ieee->open_wep = 1; + } else + return -EINVAL; + + if (ieee->set_security) + ieee->set_security(ieee->dev, &sec); + else + ret = -EOPNOTSUPP; + + return ret; +} + +static void ipw2100_wpa_assoc_frame(struct ipw2100_priv *priv, + char *wpa_ie, int wpa_ie_len) +{ + + struct ipw2100_wpa_assoc_frame frame; + + frame.fixed_ie_mask = 0; + + /* copy WPA IE */ + memcpy(frame.var_ie, wpa_ie, wpa_ie_len); + frame.var_ie_len = wpa_ie_len; + + /* make sure WPA is enabled */ + ipw2100_wpa_enable(priv, 1); + ipw2100_set_wpa_ie(priv, &frame, 0); +} + +static void ipw_ethtool_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct ipw2100_priv *priv = libipw_priv(dev); + char fw_ver[64], ucode_ver[64]; + + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + + ipw2100_get_fwversion(priv, fw_ver, sizeof(fw_ver)); + ipw2100_get_ucodeversion(priv, ucode_ver, sizeof(ucode_ver)); + + snprintf(info->fw_version, sizeof(info->fw_version), "%s:%d:%s", + fw_ver, priv->eeprom_version, ucode_ver); + + strcpy(info->bus_info, pci_name(priv->pci_dev)); +} + +static u32 ipw2100_ethtool_get_link(struct net_device *dev) +{ + struct ipw2100_priv *priv = libipw_priv(dev); + return (priv->status & STATUS_ASSOCIATED) ? 1 : 0; +} + +static const struct ethtool_ops ipw2100_ethtool_ops = { + .get_link = ipw2100_ethtool_get_link, + .get_drvinfo = ipw_ethtool_get_drvinfo, +}; + +static void ipw2100_hang_check(struct work_struct *work) +{ + struct ipw2100_priv *priv = + container_of(work, struct ipw2100_priv, hang_check.work); + unsigned long flags; + u32 rtc = 0xa5a5a5a5; + u32 len = sizeof(rtc); + int restart = 0; + + spin_lock_irqsave(&priv->low_lock, flags); + + if (priv->fatal_error != 0) { + /* If fatal_error is set then we need to restart */ + IPW_DEBUG_INFO("%s: Hardware fatal error detected.\n", + priv->net_dev->name); + + restart = 1; + } else if (ipw2100_get_ordinal(priv, IPW_ORD_RTC_TIME, &rtc, &len) || + (rtc == priv->last_rtc)) { + /* Check if firmware is hung */ + IPW_DEBUG_INFO("%s: Firmware RTC stalled.\n", + priv->net_dev->name); + + restart = 1; + } + + if (restart) { + /* Kill timer */ + priv->stop_hang_check = 1; + priv->hangs++; + + /* Restart the NIC */ + schedule_reset(priv); + } + + priv->last_rtc = rtc; + + if (!priv->stop_hang_check) + queue_delayed_work(priv->workqueue, &priv->hang_check, HZ / 2); + + spin_unlock_irqrestore(&priv->low_lock, flags); +} + +static void ipw2100_rf_kill(struct work_struct *work) +{ + struct ipw2100_priv *priv = + container_of(work, struct ipw2100_priv, rf_kill.work); + unsigned long flags; + + spin_lock_irqsave(&priv->low_lock, flags); + + if (rf_kill_active(priv)) { + IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n"); + if (!priv->stop_rf_kill) + queue_delayed_work(priv->workqueue, &priv->rf_kill, + round_jiffies_relative(HZ)); + goto exit_unlock; + } + + /* RF Kill is now disabled, so bring the device back up */ + + if (!(priv->status & STATUS_RF_KILL_MASK)) { + IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting " + "device\n"); + schedule_reset(priv); + } else + IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still " + "enabled\n"); + + exit_unlock: + spin_unlock_irqrestore(&priv->low_lock, flags); +} + +static void ipw2100_irq_tasklet(struct ipw2100_priv *priv); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) +static const struct net_device_ops ipw2100_netdev_ops = { + .ndo_open = ipw2100_open, + .ndo_stop = ipw2100_close, + .ndo_start_xmit = libipw_xmit, + .ndo_change_mtu = libipw_change_mtu, + .ndo_init = ipw2100_net_init, + .ndo_tx_timeout = ipw2100_tx_timeout, + .ndo_set_mac_address = ipw2100_set_address, + .ndo_validate_addr = eth_validate_addr, +}; +#endif + +/* Look into using netdev destructor to shutdown ieee80211? */ + +static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev, + void __iomem * base_addr, + unsigned long mem_start, + unsigned long mem_len) +{ + struct ipw2100_priv *priv; + struct net_device *dev; + + dev = alloc_ieee80211(sizeof(struct ipw2100_priv), 0); + if (!dev) + return NULL; + priv = libipw_priv(dev); + priv->ieee = netdev_priv(dev); + priv->pci_dev = pci_dev; + priv->net_dev = dev; + + priv->ieee->hard_start_xmit = ipw2100_tx; + priv->ieee->set_security = shim__set_security; + + priv->ieee->perfect_rssi = -20; + priv->ieee->worst_rssi = -85; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + dev->netdev_ops = &ipw2100_netdev_ops; +#else + dev->open = ipw2100_open; + dev->stop = ipw2100_close; + dev->init = ipw2100_net_init; + dev->tx_timeout = ipw2100_tx_timeout; + dev->set_mac_address = ipw2100_set_address; +#endif + + dev->ethtool_ops = &ipw2100_ethtool_ops; + dev->wireless_handlers = &ipw2100_wx_handler_def; +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,31)) + priv->wireless_data.libipw = priv->ieee; +#else + priv->wireless_data.ieee80211 = (struct ieee80211_device *) priv->ieee; +#endif + dev->wireless_data = &priv->wireless_data; + dev->watchdog_timeo = 3 * HZ; + dev->irq = 0; + + dev->base_addr = (unsigned long)base_addr; + dev->mem_start = mem_start; + dev->mem_end = dev->mem_start + mem_len - 1; + + /* NOTE: We don't use the wireless_handlers hook + * in dev as the system will start throwing WX requests + * to us before we're actually initialized and it just + * ends up causing problems. So, we just handle + * the WX extensions through the ipw2100_ioctl interface */ + + /* memset() puts everything to 0, so we only have explicitly set + * those values that need to be something else */ + + /* If power management is turned on, default to AUTO mode */ + priv->power_mode = IPW_POWER_AUTO; + +#ifdef CONFIG_IPW2100_MONITOR + priv->config |= CFG_CRC_CHECK; +#endif + priv->ieee->wpa_enabled = 0; + priv->ieee->drop_unencrypted = 0; + priv->ieee->privacy_invoked = 0; + priv->ieee->ieee802_1x = 1; + + /* Set module parameters */ + switch (network_mode) { + case 1: + priv->ieee->iw_mode = IW_MODE_ADHOC; + break; +#ifdef CONFIG_IPW2100_MONITOR + case 2: + priv->ieee->iw_mode = IW_MODE_MONITOR; + break; +#endif + default: + case 0: + priv->ieee->iw_mode = IW_MODE_INFRA; + break; + } + + if (disable == 1) + priv->status |= STATUS_RF_KILL_SW; + + if (channel != 0 && + ((channel >= REG_MIN_CHANNEL) && (channel <= REG_MAX_CHANNEL))) { + priv->config |= CFG_STATIC_CHANNEL; + priv->channel = channel; + } + + if (associate) + priv->config |= CFG_ASSOCIATE; + + priv->beacon_interval = DEFAULT_BEACON_INTERVAL; + priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT; + priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT; + priv->rts_threshold = DEFAULT_RTS_THRESHOLD | RTS_DISABLED; + priv->frag_threshold = DEFAULT_FTS | FRAG_DISABLED; + priv->tx_power = IPW_TX_POWER_DEFAULT; + priv->tx_rates = DEFAULT_TX_RATES; + + strcpy(priv->nick, "ipw2100"); + + spin_lock_init(&priv->low_lock); + mutex_init(&priv->action_mutex); + mutex_init(&priv->adapter_mutex); + + init_waitqueue_head(&priv->wait_command_queue); + + netif_carrier_off(dev); + + INIT_LIST_HEAD(&priv->msg_free_list); + INIT_LIST_HEAD(&priv->msg_pend_list); + INIT_STAT(&priv->msg_free_stat); + INIT_STAT(&priv->msg_pend_stat); + + INIT_LIST_HEAD(&priv->tx_free_list); + INIT_LIST_HEAD(&priv->tx_pend_list); + INIT_STAT(&priv->tx_free_stat); + INIT_STAT(&priv->tx_pend_stat); + + INIT_LIST_HEAD(&priv->fw_pend_list); + INIT_STAT(&priv->fw_pend_stat); + + priv->workqueue = create_workqueue(DRV_NAME); + + INIT_DELAYED_WORK(&priv->reset_work, ipw2100_reset_adapter); + INIT_DELAYED_WORK(&priv->security_work, ipw2100_security_work); + INIT_DELAYED_WORK(&priv->wx_event_work, ipw2100_wx_event_work); + INIT_DELAYED_WORK(&priv->hang_check, ipw2100_hang_check); + INIT_DELAYED_WORK(&priv->rf_kill, ipw2100_rf_kill); + INIT_WORK(&priv->scan_event_now, ipw2100_scan_event_now); + INIT_DELAYED_WORK(&priv->scan_event_later, ipw2100_scan_event_later); + + tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) + ipw2100_irq_tasklet, (unsigned long)priv); + + /* NOTE: We do not start the deferred work for status checks yet */ + priv->stop_rf_kill = 1; + priv->stop_hang_check = 1; + + return dev; +} + +static int ipw2100_pci_init_one(struct pci_dev *pci_dev, + const struct pci_device_id *ent) +{ + unsigned long mem_start, mem_len, mem_flags; + void __iomem *base_addr = NULL; + struct net_device *dev = NULL; + struct ipw2100_priv *priv = NULL; + int err = 0; + int registered = 0; + u32 val; + + IPW_DEBUG_INFO("enter\n"); + + mem_start = pci_resource_start(pci_dev, 0); + mem_len = pci_resource_len(pci_dev, 0); + mem_flags = pci_resource_flags(pci_dev, 0); + + if ((mem_flags & IORESOURCE_MEM) != IORESOURCE_MEM) { + IPW_DEBUG_INFO("weird - resource type is not memory\n"); + err = -ENODEV; + goto fail; + } + + base_addr = ioremap_nocache(mem_start, mem_len); + if (!base_addr) { + printk(KERN_WARNING DRV_NAME + "Error calling ioremap_nocache.\n"); + err = -EIO; + goto fail; + } + + /* allocate and initialize our net_device */ + dev = ipw2100_alloc_device(pci_dev, base_addr, mem_start, mem_len); + if (!dev) { + printk(KERN_WARNING DRV_NAME + "Error calling ipw2100_alloc_device.\n"); + err = -ENOMEM; + goto fail; + } + + /* set up PCI mappings for device */ + err = pci_enable_device(pci_dev); + if (err) { + printk(KERN_WARNING DRV_NAME + "Error calling pci_enable_device.\n"); + return err; + } + + priv = libipw_priv(dev); + + pci_set_master(pci_dev); + pci_set_drvdata(pci_dev, priv); + + err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)); + if (err) { + printk(KERN_WARNING DRV_NAME + "Error calling pci_set_dma_mask.\n"); + pci_disable_device(pci_dev); + return err; + } + + err = pci_request_regions(pci_dev, DRV_NAME); + if (err) { + printk(KERN_WARNING DRV_NAME + "Error calling pci_request_regions.\n"); + pci_disable_device(pci_dev); + return err; + } + + /* We disable the RETRY_TIMEOUT register (0x41) to keep + * PCI Tx retries from interfering with C3 CPU state */ + pci_read_config_dword(pci_dev, 0x40, &val); + if ((val & 0x0000ff00) != 0) + pci_write_config_dword(pci_dev, 0x40, val & 0xffff00ff); + + pci_set_power_state(pci_dev, PCI_D0); + + if (!ipw2100_hw_is_adapter_in_system(dev)) { + printk(KERN_WARNING DRV_NAME + "Device not found via register read.\n"); + err = -ENODEV; + goto fail; + } + + SET_NETDEV_DEV(dev, &pci_dev->dev); + + /* Force interrupts to be shut off on the device */ + priv->status |= STATUS_INT_ENABLED; + ipw2100_disable_interrupts(priv); + + /* Allocate and initialize the Tx/Rx queues and lists */ + if (ipw2100_queues_allocate(priv)) { + printk(KERN_WARNING DRV_NAME + "Error calling ipw2100_queues_allocate.\n"); + err = -ENOMEM; + goto fail; + } + ipw2100_queues_initialize(priv); + + err = request_irq(pci_dev->irq, + ipw2100_interrupt, IRQF_SHARED, dev->name, priv); + if (err) { + printk(KERN_WARNING DRV_NAME + "Error calling request_irq: %d.\n", pci_dev->irq); + goto fail; + } + dev->irq = pci_dev->irq; + + IPW_DEBUG_INFO("Attempting to register device...\n"); + + printk(KERN_INFO DRV_NAME + ": Detected Intel PRO/Wireless 2100 Network Connection\n"); + + /* Bring up the interface. Pre 0.46, after we registered the + * network device we would call ipw2100_up. This introduced a race + * condition with newer hotplug configurations (network was coming + * up and making calls before the device was initialized). + * + * If we called ipw2100_up before we registered the device, then the + * device name wasn't registered. So, we instead use the net_dev->init + * member to call a function that then just turns and calls ipw2100_up. + * net_dev->init is called after name allocation but before the + * notifier chain is called */ + err = register_netdev(dev); + if (err) { + printk(KERN_WARNING DRV_NAME + "Error calling register_netdev.\n"); + goto fail; + } + + mutex_lock(&priv->action_mutex); + registered = 1; + + IPW_DEBUG_INFO("%s: Bound to %s\n", dev->name, pci_name(pci_dev)); + + /* perform this after register_netdev so that dev->name is set */ + err = sysfs_create_group(&pci_dev->dev.kobj, &ipw2100_attribute_group); + if (err) + goto fail_unlock; + + /* If the RF Kill switch is disabled, go ahead and complete the + * startup sequence */ + if (!(priv->status & STATUS_RF_KILL_MASK)) { + /* Enable the adapter - sends HOST_COMPLETE */ + if (ipw2100_enable_adapter(priv)) { + printk(KERN_WARNING DRV_NAME + ": %s: failed in call to enable adapter.\n", + priv->net_dev->name); + ipw2100_hw_stop_adapter(priv); + err = -EIO; + goto fail_unlock; + } + + /* Start a scan . . . */ + ipw2100_set_scan_options(priv); + ipw2100_start_scan(priv); + } + + IPW_DEBUG_INFO("exit\n"); + + priv->status |= STATUS_INITIALIZED; + + mutex_unlock(&priv->action_mutex); + + return 0; + + fail_unlock: + mutex_unlock(&priv->action_mutex); + + fail: + if (dev) { + if (registered) + unregister_netdev(dev); + + ipw2100_hw_stop_adapter(priv); + + ipw2100_disable_interrupts(priv); + + if (dev->irq) + free_irq(dev->irq, priv); + + ipw2100_kill_workqueue(priv); + + /* These are safe to call even if they weren't allocated */ + ipw2100_queues_free(priv); + sysfs_remove_group(&pci_dev->dev.kobj, + &ipw2100_attribute_group); + + free_ieee80211(dev, 0); + pci_set_drvdata(pci_dev, NULL); + } + + if (base_addr) + iounmap(base_addr); + + pci_release_regions(pci_dev); + pci_disable_device(pci_dev); + + return err; +} + +static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev) +{ + struct ipw2100_priv *priv = pci_get_drvdata(pci_dev); + struct net_device *dev; + + if (priv) { + mutex_lock(&priv->action_mutex); + + priv->status &= ~STATUS_INITIALIZED; + + dev = priv->net_dev; + sysfs_remove_group(&pci_dev->dev.kobj, + &ipw2100_attribute_group); + +#ifdef CONFIG_PM + if (ipw2100_firmware.version) + ipw2100_release_firmware(priv, &ipw2100_firmware); +#endif + /* Take down the hardware */ + ipw2100_down(priv); + + /* Release the mutex so that the network subsystem can + * complete any needed calls into the driver... */ + mutex_unlock(&priv->action_mutex); + + /* Unregister the device first - this results in close() + * being called if the device is open. If we free storage + * first, then close() will crash. */ + unregister_netdev(dev); + + /* ipw2100_down will ensure that there is no more pending work + * in the workqueue's, so we can safely remove them now. */ + ipw2100_kill_workqueue(priv); + + ipw2100_queues_free(priv); + + /* Free potential debugging firmware snapshot */ + ipw2100_snapshot_free(priv); + + if (dev->irq) + free_irq(dev->irq, priv); + + if (dev->base_addr) + iounmap((void __iomem *)dev->base_addr); + + /* wiphy_unregister needs to be here, before free_ieee80211 */ + wiphy_unregister(priv->ieee->wdev.wiphy); + kfree(priv->ieee->bg_band.channels); + free_ieee80211(dev, 0); + } + + pci_release_regions(pci_dev); + pci_disable_device(pci_dev); + + IPW_DEBUG_INFO("exit\n"); +} + +#ifdef CONFIG_PM +static int ipw2100_suspend(struct pci_dev *pci_dev, pm_message_t state) +{ + struct ipw2100_priv *priv = pci_get_drvdata(pci_dev); + struct net_device *dev = priv->net_dev; + + IPW_DEBUG_INFO("%s: Going into suspend...\n", dev->name); + + mutex_lock(&priv->action_mutex); + if (priv->status & STATUS_INITIALIZED) { + /* Take down the device; powers it off, etc. */ + ipw2100_down(priv); + } + + /* Remove the PRESENT state of the device */ + netif_device_detach(dev); + + pci_save_state(pci_dev); + pci_disable_device(pci_dev); + pci_set_power_state(pci_dev, PCI_D3hot); + + priv->suspend_at = get_seconds(); + + mutex_unlock(&priv->action_mutex); + + return 0; +} + +static int ipw2100_resume(struct pci_dev *pci_dev) +{ + struct ipw2100_priv *priv = pci_get_drvdata(pci_dev); + struct net_device *dev = priv->net_dev; + int err; + u32 val; + + if (IPW2100_PM_DISABLED) + return 0; + + mutex_lock(&priv->action_mutex); + + IPW_DEBUG_INFO("%s: Coming out of suspend...\n", dev->name); + + pci_set_power_state(pci_dev, PCI_D0); + err = pci_enable_device(pci_dev); + if (err) { + printk(KERN_ERR "%s: pci_enable_device failed on resume\n", + dev->name); + mutex_unlock(&priv->action_mutex); + return err; + } + pci_restore_state(pci_dev); + + /* + * Suspend/Resume resets the PCI configuration space, so we have to + * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries + * from interfering with C3 CPU state. pci_restore_state won't help + * here since it only restores the first 64 bytes pci config header. + */ + pci_read_config_dword(pci_dev, 0x40, &val); + if ((val & 0x0000ff00) != 0) + pci_write_config_dword(pci_dev, 0x40, val & 0xffff00ff); + + /* Set the device back into the PRESENT state; this will also wake + * the queue of needed */ + netif_device_attach(dev); + + priv->suspend_time = get_seconds() - priv->suspend_at; + + /* Bring the device back up */ + if (!(priv->status & STATUS_RF_KILL_SW)) + ipw2100_up(priv, 0); + + mutex_unlock(&priv->action_mutex); + + return 0; +} +#endif + +static void ipw2100_shutdown(struct pci_dev *pci_dev) +{ + struct ipw2100_priv *priv = pci_get_drvdata(pci_dev); + + /* Take down the device; powers it off, etc. */ + ipw2100_down(priv); + + pci_disable_device(pci_dev); +} + +#define IPW2100_DEV_ID(x) { PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, x } + +static DEFINE_PCI_DEVICE_TABLE(ipw2100_pci_id_table) = { + IPW2100_DEV_ID(0x2520), /* IN 2100A mPCI 3A */ + IPW2100_DEV_ID(0x2521), /* IN 2100A mPCI 3B */ + IPW2100_DEV_ID(0x2524), /* IN 2100A mPCI 3B */ + IPW2100_DEV_ID(0x2525), /* IN 2100A mPCI 3B */ + IPW2100_DEV_ID(0x2526), /* IN 2100A mPCI Gen A3 */ + IPW2100_DEV_ID(0x2522), /* IN 2100 mPCI 3B */ + IPW2100_DEV_ID(0x2523), /* IN 2100 mPCI 3A */ + IPW2100_DEV_ID(0x2527), /* IN 2100 mPCI 3B */ + IPW2100_DEV_ID(0x2528), /* IN 2100 mPCI 3B */ + IPW2100_DEV_ID(0x2529), /* IN 2100 mPCI 3B */ + IPW2100_DEV_ID(0x252B), /* IN 2100 mPCI 3A */ + IPW2100_DEV_ID(0x252C), /* IN 2100 mPCI 3A */ + IPW2100_DEV_ID(0x252D), /* IN 2100 mPCI 3A */ + + IPW2100_DEV_ID(0x2550), /* IB 2100A mPCI 3B */ + IPW2100_DEV_ID(0x2551), /* IB 2100 mPCI 3B */ + IPW2100_DEV_ID(0x2553), /* IB 2100 mPCI 3B */ + IPW2100_DEV_ID(0x2554), /* IB 2100 mPCI 3B */ + IPW2100_DEV_ID(0x2555), /* IB 2100 mPCI 3B */ + + IPW2100_DEV_ID(0x2560), /* DE 2100A mPCI 3A */ + IPW2100_DEV_ID(0x2562), /* DE 2100A mPCI 3A */ + IPW2100_DEV_ID(0x2563), /* DE 2100A mPCI 3A */ + IPW2100_DEV_ID(0x2561), /* DE 2100 mPCI 3A */ + IPW2100_DEV_ID(0x2565), /* DE 2100 mPCI 3A */ + IPW2100_DEV_ID(0x2566), /* DE 2100 mPCI 3A */ + IPW2100_DEV_ID(0x2567), /* DE 2100 mPCI 3A */ + + IPW2100_DEV_ID(0x2570), /* GA 2100 mPCI 3B */ + + IPW2100_DEV_ID(0x2580), /* TO 2100A mPCI 3B */ + IPW2100_DEV_ID(0x2582), /* TO 2100A mPCI 3B */ + IPW2100_DEV_ID(0x2583), /* TO 2100A mPCI 3B */ + IPW2100_DEV_ID(0x2581), /* TO 2100 mPCI 3B */ + IPW2100_DEV_ID(0x2585), /* TO 2100 mPCI 3B */ + IPW2100_DEV_ID(0x2586), /* TO 2100 mPCI 3B */ + IPW2100_DEV_ID(0x2587), /* TO 2100 mPCI 3B */ + + IPW2100_DEV_ID(0x2590), /* SO 2100A mPCI 3B */ + IPW2100_DEV_ID(0x2592), /* SO 2100A mPCI 3B */ + IPW2100_DEV_ID(0x2591), /* SO 2100 mPCI 3B */ + IPW2100_DEV_ID(0x2593), /* SO 2100 mPCI 3B */ + IPW2100_DEV_ID(0x2596), /* SO 2100 mPCI 3B */ + IPW2100_DEV_ID(0x2598), /* SO 2100 mPCI 3B */ + + IPW2100_DEV_ID(0x25A0), /* HP 2100 mPCI 3B */ + {0,}, +}; + +MODULE_DEVICE_TABLE(pci, ipw2100_pci_id_table); + +static struct pci_driver ipw2100_pci_driver = { + .name = DRV_NAME, + .id_table = ipw2100_pci_id_table, + .probe = ipw2100_pci_init_one, + .remove = __devexit_p(ipw2100_pci_remove_one), +#ifdef CONFIG_PM + .suspend = ipw2100_suspend, + .resume = ipw2100_resume, +#endif + .shutdown = ipw2100_shutdown, +}; + +/** + * Initialize the ipw2100 driver/module + * + * @returns 0 if ok, < 0 errno node con error. + * + * Note: we cannot init the /proc stuff until the PCI driver is there, + * or we risk an unlikely race condition on someone accessing + * uninitialized data in the PCI dev struct through /proc. + */ +static int __init ipw2100_init(void) +{ + int ret; + + printk(KERN_INFO DRV_NAME ": %s, %s\n", DRV_DESCRIPTION, DRV_VERSION); + printk(KERN_INFO DRV_NAME ": %s\n", DRV_COPYRIGHT); + + ret = pci_register_driver(&ipw2100_pci_driver); + if (ret) + goto out; + + pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, "ipw2100", + PM_QOS_DEFAULT_VALUE); +#ifdef CONFIG_IPW2100_DEBUG + ipw2100_debug_level = debug; + ret = driver_create_file(&ipw2100_pci_driver.driver, + &driver_attr_debug_level); +#endif + +out: + return ret; +} + +/** + * Cleanup ipw2100 driver registration + */ +static void __exit ipw2100_exit(void) +{ + /* FIXME: IPG: check that we have no instances of the devices open */ +#ifdef CONFIG_IPW2100_DEBUG + driver_remove_file(&ipw2100_pci_driver.driver, + &driver_attr_debug_level); +#endif + pci_unregister_driver(&ipw2100_pci_driver); + pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, "ipw2100"); +} + +module_init(ipw2100_init); +module_exit(ipw2100_exit); + +static int ipw2100_wx_get_name(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + /* + * This can be called at any time. No action lock required + */ + + struct ipw2100_priv *priv = libipw_priv(dev); + if (!(priv->status & STATUS_ASSOCIATED)) + strcpy(wrqu->name, "unassociated"); + else + snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11b"); + + IPW_DEBUG_WX("Name: %s\n", wrqu->name); + return 0; +} + +static int ipw2100_wx_set_freq(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw2100_priv *priv = libipw_priv(dev); + struct iw_freq *fwrq = &wrqu->freq; + int err = 0; + + if (priv->ieee->iw_mode == IW_MODE_INFRA) + return -EOPNOTSUPP; + + mutex_lock(&priv->action_mutex); + if (!(priv->status & STATUS_INITIALIZED)) { + err = -EIO; + goto done; + } + + /* if setting by freq convert to channel */ + if (fwrq->e == 1) { + if ((fwrq->m >= (int)2.412e8 && fwrq->m <= (int)2.487e8)) { + int f = fwrq->m / 100000; + int c = 0; + + while ((c < REG_MAX_CHANNEL) && + (f != ipw2100_frequencies[c])) + c++; + + /* hack to fall through */ + fwrq->e = 0; + fwrq->m = c + 1; + } + } + + if (fwrq->e > 0 || fwrq->m > 1000) { + err = -EOPNOTSUPP; + goto done; + } else { /* Set the channel */ + IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m); + err = ipw2100_set_channel(priv, fwrq->m, 0); + } + + done: + mutex_unlock(&priv->action_mutex); + return err; +} + +static int ipw2100_wx_get_freq(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + /* + * This can be called at any time. No action lock required + */ + + struct ipw2100_priv *priv = libipw_priv(dev); + + wrqu->freq.e = 0; + + /* If we are associated, trying to associate, or have a statically + * configured CHANNEL then return that; otherwise return ANY */ + if (priv->config & CFG_STATIC_CHANNEL || + priv->status & STATUS_ASSOCIATED) + wrqu->freq.m = priv->channel; + else + wrqu->freq.m = 0; + + IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel); + return 0; + +} + +static int ipw2100_wx_set_mode(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw2100_priv *priv = libipw_priv(dev); + int err = 0; + + IPW_DEBUG_WX("SET Mode -> %d \n", wrqu->mode); + + if (wrqu->mode == priv->ieee->iw_mode) + return 0; + + mutex_lock(&priv->action_mutex); + if (!(priv->status & STATUS_INITIALIZED)) { + err = -EIO; + goto done; + } + + switch (wrqu->mode) { +#ifdef CONFIG_IPW2100_MONITOR + case IW_MODE_MONITOR: + err = ipw2100_switch_mode(priv, IW_MODE_MONITOR); + break; +#endif /* CONFIG_IPW2100_MONITOR */ + case IW_MODE_ADHOC: + err = ipw2100_switch_mode(priv, IW_MODE_ADHOC); + break; + case IW_MODE_INFRA: + case IW_MODE_AUTO: + default: + err = ipw2100_switch_mode(priv, IW_MODE_INFRA); + break; + } + + done: + mutex_unlock(&priv->action_mutex); + return err; +} + +static int ipw2100_wx_get_mode(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + /* + * This can be called at any time. No action lock required + */ + + struct ipw2100_priv *priv = libipw_priv(dev); + + wrqu->mode = priv->ieee->iw_mode; + IPW_DEBUG_WX("GET Mode -> %d\n", wrqu->mode); + + return 0; +} + +#define POWER_MODES 5 + +/* Values are in microsecond */ +static const s32 timeout_duration[POWER_MODES] = { + 350000, + 250000, + 75000, + 37000, + 25000, +}; + +static const s32 period_duration[POWER_MODES] = { + 400000, + 700000, + 1000000, + 1000000, + 1000000 +}; + +static int ipw2100_wx_get_range(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + /* + * This can be called at any time. No action lock required + */ + + struct ipw2100_priv *priv = libipw_priv(dev); + struct iw_range *range = (struct iw_range *)extra; + u16 val; + int i, level; + + wrqu->data.length = sizeof(*range); + memset(range, 0, sizeof(*range)); + + /* Let's try to keep this struct in the same order as in + * linux/include/wireless.h + */ + + /* TODO: See what values we can set, and remove the ones we can't + * set, or fill them with some default data. + */ + + /* ~5 Mb/s real (802.11b) */ + range->throughput = 5 * 1000 * 1000; + +// range->sensitivity; /* signal level threshold range */ + + range->max_qual.qual = 100; + /* TODO: Find real max RSSI and stick here */ + range->max_qual.level = 0; + range->max_qual.noise = 0; + range->max_qual.updated = 7; /* Updated all three */ + + range->avg_qual.qual = 70; /* > 8% missed beacons is 'bad' */ + /* TODO: Find real 'good' to 'bad' threshold value for RSSI */ + range->avg_qual.level = 20 + IPW2100_RSSI_TO_DBM; + range->avg_qual.noise = 0; + range->avg_qual.updated = 7; /* Updated all three */ + + range->num_bitrates = RATE_COUNT; + + for (i = 0; i < RATE_COUNT && i < IW_MAX_BITRATES; i++) { + range->bitrate[i] = ipw2100_rates_11b[i]; + } + + range->min_rts = MIN_RTS_THRESHOLD; + range->max_rts = MAX_RTS_THRESHOLD; + range->min_frag = MIN_FRAG_THRESHOLD; + range->max_frag = MAX_FRAG_THRESHOLD; + + range->min_pmp = period_duration[0]; /* Minimal PM period */ + range->max_pmp = period_duration[POWER_MODES - 1]; /* Maximal PM period */ + range->min_pmt = timeout_duration[POWER_MODES - 1]; /* Minimal PM timeout */ + range->max_pmt = timeout_duration[0]; /* Maximal PM timeout */ + + /* How to decode max/min PM period */ + range->pmp_flags = IW_POWER_PERIOD; + /* How to decode max/min PM period */ + range->pmt_flags = IW_POWER_TIMEOUT; + /* What PM options are supported */ + range->pm_capa = IW_POWER_TIMEOUT | IW_POWER_PERIOD; + + range->encoding_size[0] = 5; + range->encoding_size[1] = 13; /* Different token sizes */ + range->num_encoding_sizes = 2; /* Number of entry in the list */ + range->max_encoding_tokens = WEP_KEYS; /* Max number of tokens */ +// range->encoding_login_index; /* token index for login token */ + + if (priv->ieee->iw_mode == IW_MODE_ADHOC) { + range->txpower_capa = IW_TXPOW_DBM; + range->num_txpower = IW_MAX_TXPOWER; + for (i = 0, level = (IPW_TX_POWER_MAX_DBM * 16); + i < IW_MAX_TXPOWER; + i++, level -= + ((IPW_TX_POWER_MAX_DBM - + IPW_TX_POWER_MIN_DBM) * 16) / (IW_MAX_TXPOWER - 1)) + range->txpower[i] = level / 16; + } else { + range->txpower_capa = 0; + range->num_txpower = 0; + } + + /* Set the Wireless Extension versions */ + range->we_version_compiled = WIRELESS_EXT; + range->we_version_source = 18; + +// range->retry_capa; /* What retry options are supported */ +// range->retry_flags; /* How to decode max/min retry limit */ +// range->r_time_flags; /* How to decode max/min retry life */ +// range->min_retry; /* Minimal number of retries */ +// range->max_retry; /* Maximal number of retries */ +// range->min_r_time; /* Minimal retry lifetime */ +// range->max_r_time; /* Maximal retry lifetime */ + + range->num_channels = FREQ_COUNT; + + val = 0; + for (i = 0; i < FREQ_COUNT; i++) { + // TODO: Include only legal frequencies for some countries +// if (local->channel_mask & (1 << i)) { + range->freq[val].i = i + 1; + range->freq[val].m = ipw2100_frequencies[i] * 100000; + range->freq[val].e = 1; + val++; +// } + if (val == IW_MAX_FREQUENCIES) + break; + } + range->num_frequency = val; + + /* Event capability (kernel + driver) */ + range->event_capa[0] = (IW_EVENT_CAPA_K_0 | + IW_EVENT_CAPA_MASK(SIOCGIWAP)); + range->event_capa[1] = IW_EVENT_CAPA_K_1; + + range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 | + IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP; + + IPW_DEBUG_WX("GET Range\n"); + + return 0; +} + +static int ipw2100_wx_set_wap(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw2100_priv *priv = libipw_priv(dev); + int err = 0; + + static const unsigned char any[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff + }; + static const unsigned char off[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 + }; + + // sanity checks + if (wrqu->ap_addr.sa_family != ARPHRD_ETHER) + return -EINVAL; + + mutex_lock(&priv->action_mutex); + if (!(priv->status & STATUS_INITIALIZED)) { + err = -EIO; + goto done; + } + + if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) || + !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) { + /* we disable mandatory BSSID association */ + IPW_DEBUG_WX("exit - disable mandatory BSSID\n"); + priv->config &= ~CFG_STATIC_BSSID; + err = ipw2100_set_mandatory_bssid(priv, NULL, 0); + goto done; + } + + priv->config |= CFG_STATIC_BSSID; + memcpy(priv->mandatory_bssid_mac, wrqu->ap_addr.sa_data, ETH_ALEN); + + err = ipw2100_set_mandatory_bssid(priv, wrqu->ap_addr.sa_data, 0); + + IPW_DEBUG_WX("SET BSSID -> %pM\n", wrqu->ap_addr.sa_data); + + done: + mutex_unlock(&priv->action_mutex); + return err; +} + +static int ipw2100_wx_get_wap(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + /* + * This can be called at any time. No action lock required + */ + + struct ipw2100_priv *priv = libipw_priv(dev); + + /* If we are associated, trying to associate, or have a statically + * configured BSSID then return that; otherwise return ANY */ + if (priv->config & CFG_STATIC_BSSID || priv->status & STATUS_ASSOCIATED) { + wrqu->ap_addr.sa_family = ARPHRD_ETHER; + memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN); + } else + memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN); + + IPW_DEBUG_WX("Getting WAP BSSID: %pM\n", wrqu->ap_addr.sa_data); + return 0; +} + +static int ipw2100_wx_set_essid(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw2100_priv *priv = libipw_priv(dev); + char *essid = ""; /* ANY */ + int length = 0; + int err = 0; + DECLARE_SSID_BUF(ssid); + + mutex_lock(&priv->action_mutex); + if (!(priv->status & STATUS_INITIALIZED)) { + err = -EIO; + goto done; + } + + if (wrqu->essid.flags && wrqu->essid.length) { + length = wrqu->essid.length; + essid = extra; + } + + if (length == 0) { + IPW_DEBUG_WX("Setting ESSID to ANY\n"); + priv->config &= ~CFG_STATIC_ESSID; + err = ipw2100_set_essid(priv, NULL, 0, 0); + goto done; + } + + length = min(length, IW_ESSID_MAX_SIZE); + + priv->config |= CFG_STATIC_ESSID; + + if (priv->essid_len == length && !memcmp(priv->essid, extra, length)) { + IPW_DEBUG_WX("ESSID set to current ESSID.\n"); + err = 0; + goto done; + } + + IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", + print_ssid(ssid, essid, length), length); + + priv->essid_len = length; + memcpy(priv->essid, essid, priv->essid_len); + + err = ipw2100_set_essid(priv, essid, length, 0); + + done: + mutex_unlock(&priv->action_mutex); + return err; +} + +static int ipw2100_wx_get_essid(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + /* + * This can be called at any time. No action lock required + */ + + struct ipw2100_priv *priv = libipw_priv(dev); + DECLARE_SSID_BUF(ssid); + + /* If we are associated, trying to associate, or have a statically + * configured ESSID then return that; otherwise return ANY */ + if (priv->config & CFG_STATIC_ESSID || priv->status & STATUS_ASSOCIATED) { + IPW_DEBUG_WX("Getting essid: '%s'\n", + print_ssid(ssid, priv->essid, priv->essid_len)); + memcpy(extra, priv->essid, priv->essid_len); + wrqu->essid.length = priv->essid_len; + wrqu->essid.flags = 1; /* active */ + } else { + IPW_DEBUG_WX("Getting essid: ANY\n"); + wrqu->essid.length = 0; + wrqu->essid.flags = 0; /* active */ + } + + return 0; +} + +static int ipw2100_wx_set_nick(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + /* + * This can be called at any time. No action lock required + */ + + struct ipw2100_priv *priv = libipw_priv(dev); + + if (wrqu->data.length > IW_ESSID_MAX_SIZE) + return -E2BIG; + + wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick)); + memset(priv->nick, 0, sizeof(priv->nick)); + memcpy(priv->nick, extra, wrqu->data.length); + + IPW_DEBUG_WX("SET Nickname -> %s \n", priv->nick); + + return 0; +} + +static int ipw2100_wx_get_nick(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + /* + * This can be called at any time. No action lock required + */ + + struct ipw2100_priv *priv = libipw_priv(dev); + + wrqu->data.length = strlen(priv->nick); + memcpy(extra, priv->nick, wrqu->data.length); + wrqu->data.flags = 1; /* active */ + + IPW_DEBUG_WX("GET Nickname -> %s \n", extra); + + return 0; +} + +static int ipw2100_wx_set_rate(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw2100_priv *priv = libipw_priv(dev); + u32 target_rate = wrqu->bitrate.value; + u32 rate; + int err = 0; + + mutex_lock(&priv->action_mutex); + if (!(priv->status & STATUS_INITIALIZED)) { + err = -EIO; + goto done; + } + + rate = 0; + + if (target_rate == 1000000 || + (!wrqu->bitrate.fixed && target_rate > 1000000)) + rate |= TX_RATE_1_MBIT; + if (target_rate == 2000000 || + (!wrqu->bitrate.fixed && target_rate > 2000000)) + rate |= TX_RATE_2_MBIT; + if (target_rate == 5500000 || + (!wrqu->bitrate.fixed && target_rate > 5500000)) + rate |= TX_RATE_5_5_MBIT; + if (target_rate == 11000000 || + (!wrqu->bitrate.fixed && target_rate > 11000000)) + rate |= TX_RATE_11_MBIT; + if (rate == 0) + rate = DEFAULT_TX_RATES; + + err = ipw2100_set_tx_rates(priv, rate, 0); + + IPW_DEBUG_WX("SET Rate -> %04X \n", rate); + done: + mutex_unlock(&priv->action_mutex); + return err; +} + +static int ipw2100_wx_get_rate(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw2100_priv *priv = libipw_priv(dev); + int val; + unsigned int len = sizeof(val); + int err = 0; + + if (!(priv->status & STATUS_ENABLED) || + priv->status & STATUS_RF_KILL_MASK || + !(priv->status & STATUS_ASSOCIATED)) { + wrqu->bitrate.value = 0; + return 0; + } + + mutex_lock(&priv->action_mutex); + if (!(priv->status & STATUS_INITIALIZED)) { + err = -EIO; + goto done; + } + + err = ipw2100_get_ordinal(priv, IPW_ORD_CURRENT_TX_RATE, &val, &len); + if (err) { + IPW_DEBUG_WX("failed querying ordinals.\n"); + goto done; + } + + switch (val & TX_RATE_MASK) { + case TX_RATE_1_MBIT: + wrqu->bitrate.value = 1000000; + break; + case TX_RATE_2_MBIT: + wrqu->bitrate.value = 2000000; + break; + case TX_RATE_5_5_MBIT: + wrqu->bitrate.value = 5500000; + break; + case TX_RATE_11_MBIT: + wrqu->bitrate.value = 11000000; + break; + default: + wrqu->bitrate.value = 0; + } + + IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value); + + done: + mutex_unlock(&priv->action_mutex); + return err; +} + +static int ipw2100_wx_set_rts(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw2100_priv *priv = libipw_priv(dev); + int value, err; + + /* Auto RTS not yet supported */ + if (wrqu->rts.fixed == 0) + return -EINVAL; + + mutex_lock(&priv->action_mutex); + if (!(priv->status & STATUS_INITIALIZED)) { + err = -EIO; + goto done; + } + + if (wrqu->rts.disabled) + value = priv->rts_threshold | RTS_DISABLED; + else { + if (wrqu->rts.value < 1 || wrqu->rts.value > 2304) { + err = -EINVAL; + goto done; + } + value = wrqu->rts.value; + } + + err = ipw2100_set_rts_threshold(priv, value); + + IPW_DEBUG_WX("SET RTS Threshold -> 0x%08X \n", value); + done: + mutex_unlock(&priv->action_mutex); + return err; +} + +static int ipw2100_wx_get_rts(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + /* + * This can be called at any time. No action lock required + */ + + struct ipw2100_priv *priv = libipw_priv(dev); + + wrqu->rts.value = priv->rts_threshold & ~RTS_DISABLED; + wrqu->rts.fixed = 1; /* no auto select */ + + /* If RTS is set to the default value, then it is disabled */ + wrqu->rts.disabled = (priv->rts_threshold & RTS_DISABLED) ? 1 : 0; + + IPW_DEBUG_WX("GET RTS Threshold -> 0x%08X \n", wrqu->rts.value); + + return 0; +} + +static int ipw2100_wx_set_txpow(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw2100_priv *priv = libipw_priv(dev); + int err = 0, value; + + if (ipw_radio_kill_sw(priv, wrqu->txpower.disabled)) + return -EINPROGRESS; + + if (priv->ieee->iw_mode != IW_MODE_ADHOC) + return 0; + + if ((wrqu->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM) + return -EINVAL; + + if (wrqu->txpower.fixed == 0) + value = IPW_TX_POWER_DEFAULT; + else { + if (wrqu->txpower.value < IPW_TX_POWER_MIN_DBM || + wrqu->txpower.value > IPW_TX_POWER_MAX_DBM) + return -EINVAL; + + value = wrqu->txpower.value; + } + + mutex_lock(&priv->action_mutex); + if (!(priv->status & STATUS_INITIALIZED)) { + err = -EIO; + goto done; + } + + err = ipw2100_set_tx_power(priv, value); + + IPW_DEBUG_WX("SET TX Power -> %d \n", value); + + done: + mutex_unlock(&priv->action_mutex); + return err; +} + +static int ipw2100_wx_get_txpow(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + /* + * This can be called at any time. No action lock required + */ + + struct ipw2100_priv *priv = libipw_priv(dev); + + wrqu->txpower.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0; + + if (priv->tx_power == IPW_TX_POWER_DEFAULT) { + wrqu->txpower.fixed = 0; + wrqu->txpower.value = IPW_TX_POWER_MAX_DBM; + } else { + wrqu->txpower.fixed = 1; + wrqu->txpower.value = priv->tx_power; + } + + wrqu->txpower.flags = IW_TXPOW_DBM; + + IPW_DEBUG_WX("GET TX Power -> %d \n", wrqu->txpower.value); + + return 0; +} + +static int ipw2100_wx_set_frag(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + /* + * This can be called at any time. No action lock required + */ + + struct ipw2100_priv *priv = libipw_priv(dev); + + if (!wrqu->frag.fixed) + return -EINVAL; + + if (wrqu->frag.disabled) { + priv->frag_threshold |= FRAG_DISABLED; + priv->ieee->fts = DEFAULT_FTS; + } else { + if (wrqu->frag.value < MIN_FRAG_THRESHOLD || + wrqu->frag.value > MAX_FRAG_THRESHOLD) + return -EINVAL; + + priv->ieee->fts = wrqu->frag.value & ~0x1; + priv->frag_threshold = priv->ieee->fts; + } + + IPW_DEBUG_WX("SET Frag Threshold -> %d \n", priv->ieee->fts); + + return 0; +} + +static int ipw2100_wx_get_frag(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + /* + * This can be called at any time. No action lock required + */ + + struct ipw2100_priv *priv = libipw_priv(dev); + wrqu->frag.value = priv->frag_threshold & ~FRAG_DISABLED; + wrqu->frag.fixed = 0; /* no auto select */ + wrqu->frag.disabled = (priv->frag_threshold & FRAG_DISABLED) ? 1 : 0; + + IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value); + + return 0; +} + +static int ipw2100_wx_set_retry(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw2100_priv *priv = libipw_priv(dev); + int err = 0; + + if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled) + return -EINVAL; + + if (!(wrqu->retry.flags & IW_RETRY_LIMIT)) + return 0; + + mutex_lock(&priv->action_mutex); + if (!(priv->status & STATUS_INITIALIZED)) { + err = -EIO; + goto done; + } + + if (wrqu->retry.flags & IW_RETRY_SHORT) { + err = ipw2100_set_short_retry(priv, wrqu->retry.value); + IPW_DEBUG_WX("SET Short Retry Limit -> %d \n", + wrqu->retry.value); + goto done; + } + + if (wrqu->retry.flags & IW_RETRY_LONG) { + err = ipw2100_set_long_retry(priv, wrqu->retry.value); + IPW_DEBUG_WX("SET Long Retry Limit -> %d \n", + wrqu->retry.value); + goto done; + } + + err = ipw2100_set_short_retry(priv, wrqu->retry.value); + if (!err) + err = ipw2100_set_long_retry(priv, wrqu->retry.value); + + IPW_DEBUG_WX("SET Both Retry Limits -> %d \n", wrqu->retry.value); + + done: + mutex_unlock(&priv->action_mutex); + return err; +} + +static int ipw2100_wx_get_retry(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + /* + * This can be called at any time. No action lock required + */ + + struct ipw2100_priv *priv = libipw_priv(dev); + + wrqu->retry.disabled = 0; /* can't be disabled */ + + if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) + return -EINVAL; + + if (wrqu->retry.flags & IW_RETRY_LONG) { + wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG; + wrqu->retry.value = priv->long_retry_limit; + } else { + wrqu->retry.flags = + (priv->short_retry_limit != + priv->long_retry_limit) ? + IW_RETRY_LIMIT | IW_RETRY_SHORT : IW_RETRY_LIMIT; + + wrqu->retry.value = priv->short_retry_limit; + } + + IPW_DEBUG_WX("GET Retry -> %d \n", wrqu->retry.value); + + return 0; +} + +static int ipw2100_wx_set_scan(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw2100_priv *priv = libipw_priv(dev); + int err = 0; + + mutex_lock(&priv->action_mutex); + if (!(priv->status & STATUS_INITIALIZED)) { + err = -EIO; + goto done; + } + + IPW_DEBUG_WX("Initiating scan...\n"); + + priv->user_requested_scan = 1; + if (ipw2100_set_scan_options(priv) || ipw2100_start_scan(priv)) { + IPW_DEBUG_WX("Start scan failed.\n"); + + /* TODO: Mark a scan as pending so when hardware initialized + * a scan starts */ + } + + done: + mutex_unlock(&priv->action_mutex); + return err; +} + +static int ipw2100_wx_get_scan(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + /* + * This can be called at any time. No action lock required + */ + + struct ipw2100_priv *priv = libipw_priv(dev); + return libipw_wx_get_scan(priv->ieee, info, wrqu, extra); +} + +/* + * Implementation based on code in hostap-driver v0.1.3 hostap_ioctl.c + */ +static int ipw2100_wx_set_encode(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *key) +{ + /* + * No check of STATUS_INITIALIZED required + */ + + struct ipw2100_priv *priv = libipw_priv(dev); + return libipw_wx_set_encode(priv->ieee, info, wrqu, key); +} + +static int ipw2100_wx_get_encode(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *key) +{ + /* + * This can be called at any time. No action lock required + */ + + struct ipw2100_priv *priv = libipw_priv(dev); + return libipw_wx_get_encode(priv->ieee, info, wrqu, key); +} + +static int ipw2100_wx_set_power(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw2100_priv *priv = libipw_priv(dev); + int err = 0; + + mutex_lock(&priv->action_mutex); + if (!(priv->status & STATUS_INITIALIZED)) { + err = -EIO; + goto done; + } + + if (wrqu->power.disabled) { + priv->power_mode = IPW_POWER_LEVEL(priv->power_mode); + err = ipw2100_set_power_mode(priv, IPW_POWER_MODE_CAM); + IPW_DEBUG_WX("SET Power Management Mode -> off\n"); + goto done; + } + + switch (wrqu->power.flags & IW_POWER_MODE) { + case IW_POWER_ON: /* If not specified */ + case IW_POWER_MODE: /* If set all mask */ + case IW_POWER_ALL_R: /* If explicitly state all */ + break; + default: /* Otherwise we don't support it */ + IPW_DEBUG_WX("SET PM Mode: %X not supported.\n", + wrqu->power.flags); + err = -EOPNOTSUPP; + goto done; + } + + /* If the user hasn't specified a power management mode yet, default + * to BATTERY */ + priv->power_mode = IPW_POWER_ENABLED | priv->power_mode; + err = ipw2100_set_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode)); + + IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode); + + done: + mutex_unlock(&priv->action_mutex); + return err; + +} + +static int ipw2100_wx_get_power(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + /* + * This can be called at any time. No action lock required + */ + + struct ipw2100_priv *priv = libipw_priv(dev); + + if (!(priv->power_mode & IPW_POWER_ENABLED)) + wrqu->power.disabled = 1; + else { + wrqu->power.disabled = 0; + wrqu->power.flags = 0; + } + + IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode); + + return 0; +} + +/* + * WE-18 WPA support + */ + +/* SIOCSIWGENIE */ +static int ipw2100_wx_set_genie(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + + struct ipw2100_priv *priv = libipw_priv(dev); + struct libipw_device *ieee = priv->ieee; + u8 *buf; + + if (!ieee->wpa_enabled) + return -EOPNOTSUPP; + + if (wrqu->data.length > MAX_WPA_IE_LEN || + (wrqu->data.length && extra == NULL)) + return -EINVAL; + + if (wrqu->data.length) { + buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + + kfree(ieee->wpa_ie); + ieee->wpa_ie = buf; + ieee->wpa_ie_len = wrqu->data.length; + } else { + kfree(ieee->wpa_ie); + ieee->wpa_ie = NULL; + ieee->wpa_ie_len = 0; + } + + ipw2100_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len); + + return 0; +} + +/* SIOCGIWGENIE */ +static int ipw2100_wx_get_genie(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw2100_priv *priv = libipw_priv(dev); + struct libipw_device *ieee = priv->ieee; + + if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) { + wrqu->data.length = 0; + return 0; + } + + if (wrqu->data.length < ieee->wpa_ie_len) + return -E2BIG; + + wrqu->data.length = ieee->wpa_ie_len; + memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len); + + return 0; +} + +/* SIOCSIWAUTH */ +static int ipw2100_wx_set_auth(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw2100_priv *priv = libipw_priv(dev); + struct libipw_device *ieee = priv->ieee; + struct iw_param *param = &wrqu->param; + struct lib80211_crypt_data *crypt; + unsigned long flags; + int ret = 0; + + switch (param->flags & IW_AUTH_INDEX) { + case IW_AUTH_WPA_VERSION: + case IW_AUTH_CIPHER_PAIRWISE: + case IW_AUTH_CIPHER_GROUP: + case IW_AUTH_KEY_MGMT: + /* + * ipw2200 does not use these parameters + */ + break; + + case IW_AUTH_TKIP_COUNTERMEASURES: + crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx]; + if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags) + break; + + flags = crypt->ops->get_flags(crypt->priv); + + if (param->value) + flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES; + else + flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES; + + crypt->ops->set_flags(flags, crypt->priv); + + break; + + case IW_AUTH_DROP_UNENCRYPTED:{ + /* HACK: + * + * wpa_supplicant calls set_wpa_enabled when the driver + * is loaded and unloaded, regardless of if WPA is being + * used. No other calls are made which can be used to + * determine if encryption will be used or not prior to + * association being expected. If encryption is not being + * used, drop_unencrypted is set to false, else true -- we + * can use this to determine if the CAP_PRIVACY_ON bit should + * be set. + */ + struct libipw_security sec = { + .flags = SEC_ENABLED, + .enabled = param->value, + }; + priv->ieee->drop_unencrypted = param->value; + /* We only change SEC_LEVEL for open mode. Others + * are set by ipw_wpa_set_encryption. + */ + if (!param->value) { + sec.flags |= SEC_LEVEL; + sec.level = SEC_LEVEL_0; + } else { + sec.flags |= SEC_LEVEL; + sec.level = SEC_LEVEL_1; + } + if (priv->ieee->set_security) + priv->ieee->set_security(priv->ieee->dev, &sec); + break; + } + + case IW_AUTH_80211_AUTH_ALG: + ret = ipw2100_wpa_set_auth_algs(priv, param->value); + break; + + case IW_AUTH_WPA_ENABLED: + ret = ipw2100_wpa_enable(priv, param->value); + break; + + case IW_AUTH_RX_UNENCRYPTED_EAPOL: + ieee->ieee802_1x = param->value; + break; + + //case IW_AUTH_ROAMING_CONTROL: + case IW_AUTH_PRIVACY_INVOKED: + ieee->privacy_invoked = param->value; + break; + + default: + return -EOPNOTSUPP; + } + return ret; +} + +/* SIOCGIWAUTH */ +static int ipw2100_wx_get_auth(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw2100_priv *priv = libipw_priv(dev); + struct libipw_device *ieee = priv->ieee; + struct lib80211_crypt_data *crypt; + struct iw_param *param = &wrqu->param; + int ret = 0; + + switch (param->flags & IW_AUTH_INDEX) { + case IW_AUTH_WPA_VERSION: + case IW_AUTH_CIPHER_PAIRWISE: + case IW_AUTH_CIPHER_GROUP: + case IW_AUTH_KEY_MGMT: + /* + * wpa_supplicant will control these internally + */ + ret = -EOPNOTSUPP; + break; + + case IW_AUTH_TKIP_COUNTERMEASURES: + crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx]; + if (!crypt || !crypt->ops->get_flags) { + IPW_DEBUG_WARNING("Can't get TKIP countermeasures: " + "crypt not set!\n"); + break; + } + + param->value = (crypt->ops->get_flags(crypt->priv) & + IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0; + + break; + + case IW_AUTH_DROP_UNENCRYPTED: + param->value = ieee->drop_unencrypted; + break; + + case IW_AUTH_80211_AUTH_ALG: + param->value = priv->ieee->sec.auth_mode; + break; + + case IW_AUTH_WPA_ENABLED: + param->value = ieee->wpa_enabled; + break; + + case IW_AUTH_RX_UNENCRYPTED_EAPOL: + param->value = ieee->ieee802_1x; + break; + + case IW_AUTH_ROAMING_CONTROL: + case IW_AUTH_PRIVACY_INVOKED: + param->value = ieee->privacy_invoked; + break; + + default: + return -EOPNOTSUPP; + } + return 0; +} + +/* SIOCSIWENCODEEXT */ +static int ipw2100_wx_set_encodeext(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw2100_priv *priv = libipw_priv(dev); + return libipw_wx_set_encodeext(priv->ieee, info, wrqu, extra); +} + +/* SIOCGIWENCODEEXT */ +static int ipw2100_wx_get_encodeext(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw2100_priv *priv = libipw_priv(dev); + return libipw_wx_get_encodeext(priv->ieee, info, wrqu, extra); +} + +/* SIOCSIWMLME */ +static int ipw2100_wx_set_mlme(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw2100_priv *priv = libipw_priv(dev); + struct iw_mlme *mlme = (struct iw_mlme *)extra; + __le16 reason; + + reason = cpu_to_le16(mlme->reason_code); + + switch (mlme->cmd) { + case IW_MLME_DEAUTH: + // silently ignore + break; + + case IW_MLME_DISASSOC: + ipw2100_disassociate_bssid(priv); + break; + + default: + return -EOPNOTSUPP; + } + return 0; +} + +/* + * + * IWPRIV handlers + * + */ +#ifdef CONFIG_IPW2100_MONITOR +static int ipw2100_wx_set_promisc(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw2100_priv *priv = libipw_priv(dev); + int *parms = (int *)extra; + int enable = (parms[0] > 0); + int err = 0; + + mutex_lock(&priv->action_mutex); + if (!(priv->status & STATUS_INITIALIZED)) { + err = -EIO; + goto done; + } + + if (enable) { + if (priv->ieee->iw_mode == IW_MODE_MONITOR) { + err = ipw2100_set_channel(priv, parms[1], 0); + goto done; + } + priv->channel = parms[1]; + err = ipw2100_switch_mode(priv, IW_MODE_MONITOR); + } else { + if (priv->ieee->iw_mode == IW_MODE_MONITOR) + err = ipw2100_switch_mode(priv, priv->last_mode); + } + done: + mutex_unlock(&priv->action_mutex); + return err; +} + +static int ipw2100_wx_reset(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw2100_priv *priv = libipw_priv(dev); + if (priv->status & STATUS_INITIALIZED) + schedule_reset(priv); + return 0; +} + +#endif + +static int ipw2100_wx_set_powermode(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw2100_priv *priv = libipw_priv(dev); + int err = 0, mode = *(int *)extra; + + mutex_lock(&priv->action_mutex); + if (!(priv->status & STATUS_INITIALIZED)) { + err = -EIO; + goto done; + } + + if ((mode < 0) || (mode > POWER_MODES)) + mode = IPW_POWER_AUTO; + + if (IPW_POWER_LEVEL(priv->power_mode) != mode) + err = ipw2100_set_power_mode(priv, mode); + done: + mutex_unlock(&priv->action_mutex); + return err; +} + +#define MAX_POWER_STRING 80 +static int ipw2100_wx_get_powermode(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + /* + * This can be called at any time. No action lock required + */ + + struct ipw2100_priv *priv = libipw_priv(dev); + int level = IPW_POWER_LEVEL(priv->power_mode); + s32 timeout, period; + + if (!(priv->power_mode & IPW_POWER_ENABLED)) { + snprintf(extra, MAX_POWER_STRING, + "Power save level: %d (Off)", level); + } else { + switch (level) { + case IPW_POWER_MODE_CAM: + snprintf(extra, MAX_POWER_STRING, + "Power save level: %d (None)", level); + break; + case IPW_POWER_AUTO: + snprintf(extra, MAX_POWER_STRING, + "Power save level: %d (Auto)", level); + break; + default: + timeout = timeout_duration[level - 1] / 1000; + period = period_duration[level - 1] / 1000; + snprintf(extra, MAX_POWER_STRING, + "Power save level: %d " + "(Timeout %dms, Period %dms)", + level, timeout, period); + } + } + + wrqu->data.length = strlen(extra) + 1; + + return 0; +} + +static int ipw2100_wx_set_preamble(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw2100_priv *priv = libipw_priv(dev); + int err, mode = *(int *)extra; + + mutex_lock(&priv->action_mutex); + if (!(priv->status & STATUS_INITIALIZED)) { + err = -EIO; + goto done; + } + + if (mode == 1) + priv->config |= CFG_LONG_PREAMBLE; + else if (mode == 0) + priv->config &= ~CFG_LONG_PREAMBLE; + else { + err = -EINVAL; + goto done; + } + + err = ipw2100_system_config(priv, 0); + + done: + mutex_unlock(&priv->action_mutex); + return err; +} + +static int ipw2100_wx_get_preamble(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + /* + * This can be called at any time. No action lock required + */ + + struct ipw2100_priv *priv = libipw_priv(dev); + + if (priv->config & CFG_LONG_PREAMBLE) + snprintf(wrqu->name, IFNAMSIZ, "long (1)"); + else + snprintf(wrqu->name, IFNAMSIZ, "auto (0)"); + + return 0; +} + +#ifdef CONFIG_IPW2100_MONITOR +static int ipw2100_wx_set_crc_check(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw2100_priv *priv = libipw_priv(dev); + int err, mode = *(int *)extra; + + mutex_lock(&priv->action_mutex); + if (!(priv->status & STATUS_INITIALIZED)) { + err = -EIO; + goto done; + } + + if (mode == 1) + priv->config |= CFG_CRC_CHECK; + else if (mode == 0) + priv->config &= ~CFG_CRC_CHECK; + else { + err = -EINVAL; + goto done; + } + err = 0; + + done: + mutex_unlock(&priv->action_mutex); + return err; +} + +static int ipw2100_wx_get_crc_check(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + /* + * This can be called at any time. No action lock required + */ + + struct ipw2100_priv *priv = libipw_priv(dev); + + if (priv->config & CFG_CRC_CHECK) + snprintf(wrqu->name, IFNAMSIZ, "CRC checked (1)"); + else + snprintf(wrqu->name, IFNAMSIZ, "CRC ignored (0)"); + + return 0; +} +#endif /* CONFIG_IPW2100_MONITOR */ + +static iw_handler ipw2100_wx_handlers[] = { + NULL, /* SIOCSIWCOMMIT */ + ipw2100_wx_get_name, /* SIOCGIWNAME */ + NULL, /* SIOCSIWNWID */ + NULL, /* SIOCGIWNWID */ + ipw2100_wx_set_freq, /* SIOCSIWFREQ */ + ipw2100_wx_get_freq, /* SIOCGIWFREQ */ + ipw2100_wx_set_mode, /* SIOCSIWMODE */ + ipw2100_wx_get_mode, /* SIOCGIWMODE */ + NULL, /* SIOCSIWSENS */ + NULL, /* SIOCGIWSENS */ + NULL, /* SIOCSIWRANGE */ + ipw2100_wx_get_range, /* SIOCGIWRANGE */ + NULL, /* SIOCSIWPRIV */ + NULL, /* SIOCGIWPRIV */ + NULL, /* SIOCSIWSTATS */ + NULL, /* SIOCGIWSTATS */ + NULL, /* SIOCSIWSPY */ + NULL, /* SIOCGIWSPY */ + NULL, /* SIOCGIWTHRSPY */ + NULL, /* SIOCWIWTHRSPY */ + ipw2100_wx_set_wap, /* SIOCSIWAP */ + ipw2100_wx_get_wap, /* SIOCGIWAP */ + ipw2100_wx_set_mlme, /* SIOCSIWMLME */ + NULL, /* SIOCGIWAPLIST -- deprecated */ + ipw2100_wx_set_scan, /* SIOCSIWSCAN */ + ipw2100_wx_get_scan, /* SIOCGIWSCAN */ + ipw2100_wx_set_essid, /* SIOCSIWESSID */ + ipw2100_wx_get_essid, /* SIOCGIWESSID */ + ipw2100_wx_set_nick, /* SIOCSIWNICKN */ + ipw2100_wx_get_nick, /* SIOCGIWNICKN */ + NULL, /* -- hole -- */ + NULL, /* -- hole -- */ + ipw2100_wx_set_rate, /* SIOCSIWRATE */ + ipw2100_wx_get_rate, /* SIOCGIWRATE */ + ipw2100_wx_set_rts, /* SIOCSIWRTS */ + ipw2100_wx_get_rts, /* SIOCGIWRTS */ + ipw2100_wx_set_frag, /* SIOCSIWFRAG */ + ipw2100_wx_get_frag, /* SIOCGIWFRAG */ + ipw2100_wx_set_txpow, /* SIOCSIWTXPOW */ + ipw2100_wx_get_txpow, /* SIOCGIWTXPOW */ + ipw2100_wx_set_retry, /* SIOCSIWRETRY */ + ipw2100_wx_get_retry, /* SIOCGIWRETRY */ + ipw2100_wx_set_encode, /* SIOCSIWENCODE */ + ipw2100_wx_get_encode, /* SIOCGIWENCODE */ + ipw2100_wx_set_power, /* SIOCSIWPOWER */ + ipw2100_wx_get_power, /* SIOCGIWPOWER */ + NULL, /* -- hole -- */ + NULL, /* -- hole -- */ + ipw2100_wx_set_genie, /* SIOCSIWGENIE */ + ipw2100_wx_get_genie, /* SIOCGIWGENIE */ + ipw2100_wx_set_auth, /* SIOCSIWAUTH */ + ipw2100_wx_get_auth, /* SIOCGIWAUTH */ + ipw2100_wx_set_encodeext, /* SIOCSIWENCODEEXT */ + ipw2100_wx_get_encodeext, /* SIOCGIWENCODEEXT */ + NULL, /* SIOCSIWPMKSA */ +}; + +#define IPW2100_PRIV_SET_MONITOR SIOCIWFIRSTPRIV +#define IPW2100_PRIV_RESET SIOCIWFIRSTPRIV+1 +#define IPW2100_PRIV_SET_POWER SIOCIWFIRSTPRIV+2 +#define IPW2100_PRIV_GET_POWER SIOCIWFIRSTPRIV+3 +#define IPW2100_PRIV_SET_LONGPREAMBLE SIOCIWFIRSTPRIV+4 +#define IPW2100_PRIV_GET_LONGPREAMBLE SIOCIWFIRSTPRIV+5 +#define IPW2100_PRIV_SET_CRC_CHECK SIOCIWFIRSTPRIV+6 +#define IPW2100_PRIV_GET_CRC_CHECK SIOCIWFIRSTPRIV+7 + +static const struct iw_priv_args ipw2100_private_args[] = { + +#ifdef CONFIG_IPW2100_MONITOR + { + IPW2100_PRIV_SET_MONITOR, + IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"}, + { + IPW2100_PRIV_RESET, + IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"}, +#endif /* CONFIG_IPW2100_MONITOR */ + + { + IPW2100_PRIV_SET_POWER, + IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_power"}, + { + IPW2100_PRIV_GET_POWER, + 0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_POWER_STRING, + "get_power"}, + { + IPW2100_PRIV_SET_LONGPREAMBLE, + IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_preamble"}, + { + IPW2100_PRIV_GET_LONGPREAMBLE, + 0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ, "get_preamble"}, +#ifdef CONFIG_IPW2100_MONITOR + { + IPW2100_PRIV_SET_CRC_CHECK, + IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_crc_check"}, + { + IPW2100_PRIV_GET_CRC_CHECK, + 0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ, "get_crc_check"}, +#endif /* CONFIG_IPW2100_MONITOR */ +}; + +static iw_handler ipw2100_private_handler[] = { +#ifdef CONFIG_IPW2100_MONITOR + ipw2100_wx_set_promisc, + ipw2100_wx_reset, +#else /* CONFIG_IPW2100_MONITOR */ + NULL, + NULL, +#endif /* CONFIG_IPW2100_MONITOR */ + ipw2100_wx_set_powermode, + ipw2100_wx_get_powermode, + ipw2100_wx_set_preamble, + ipw2100_wx_get_preamble, +#ifdef CONFIG_IPW2100_MONITOR + ipw2100_wx_set_crc_check, + ipw2100_wx_get_crc_check, +#else /* CONFIG_IPW2100_MONITOR */ + NULL, + NULL, +#endif /* CONFIG_IPW2100_MONITOR */ +}; + +/* + * Get wireless statistics. + * Called by /proc/net/wireless + * Also called by SIOCGIWSTATS + */ +static struct iw_statistics *ipw2100_wx_wireless_stats(struct net_device *dev) +{ + enum { + POOR = 30, + FAIR = 60, + GOOD = 80, + VERY_GOOD = 90, + EXCELLENT = 95, + PERFECT = 100 + }; + int rssi_qual; + int tx_qual; + int beacon_qual; + int quality; + + struct ipw2100_priv *priv = libipw_priv(dev); + struct iw_statistics *wstats; + u32 rssi, tx_retries, missed_beacons, tx_failures; + u32 ord_len = sizeof(u32); + + if (!priv) + return (struct iw_statistics *)NULL; + + wstats = &priv->wstats; + + /* if hw is disabled, then ipw2100_get_ordinal() can't be called. + * ipw2100_wx_wireless_stats seems to be called before fw is + * initialized. STATUS_ASSOCIATED will only be set if the hw is up + * and associated; if not associcated, the values are all meaningless + * anyway, so set them all to NULL and INVALID */ + if (!(priv->status & STATUS_ASSOCIATED)) { + wstats->miss.beacon = 0; + wstats->discard.retries = 0; + wstats->qual.qual = 0; + wstats->qual.level = 0; + wstats->qual.noise = 0; + wstats->qual.updated = 7; + wstats->qual.updated |= IW_QUAL_NOISE_INVALID | + IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID; + return wstats; + } + + if (ipw2100_get_ordinal(priv, IPW_ORD_STAT_PERCENT_MISSED_BCNS, + &missed_beacons, &ord_len)) + goto fail_get_ordinal; + + /* If we don't have a connection the quality and level is 0 */ + if (!(priv->status & STATUS_ASSOCIATED)) { + wstats->qual.qual = 0; + wstats->qual.level = 0; + } else { + if (ipw2100_get_ordinal(priv, IPW_ORD_RSSI_AVG_CURR, + &rssi, &ord_len)) + goto fail_get_ordinal; + wstats->qual.level = rssi + IPW2100_RSSI_TO_DBM; + if (rssi < 10) + rssi_qual = rssi * POOR / 10; + else if (rssi < 15) + rssi_qual = (rssi - 10) * (FAIR - POOR) / 5 + POOR; + else if (rssi < 20) + rssi_qual = (rssi - 15) * (GOOD - FAIR) / 5 + FAIR; + else if (rssi < 30) + rssi_qual = (rssi - 20) * (VERY_GOOD - GOOD) / + 10 + GOOD; + else + rssi_qual = (rssi - 30) * (PERFECT - VERY_GOOD) / + 10 + VERY_GOOD; + + if (ipw2100_get_ordinal(priv, IPW_ORD_STAT_PERCENT_RETRIES, + &tx_retries, &ord_len)) + goto fail_get_ordinal; + + if (tx_retries > 75) + tx_qual = (90 - tx_retries) * POOR / 15; + else if (tx_retries > 70) + tx_qual = (75 - tx_retries) * (FAIR - POOR) / 5 + POOR; + else if (tx_retries > 65) + tx_qual = (70 - tx_retries) * (GOOD - FAIR) / 5 + FAIR; + else if (tx_retries > 50) + tx_qual = (65 - tx_retries) * (VERY_GOOD - GOOD) / + 15 + GOOD; + else + tx_qual = (50 - tx_retries) * + (PERFECT - VERY_GOOD) / 50 + VERY_GOOD; + + if (missed_beacons > 50) + beacon_qual = (60 - missed_beacons) * POOR / 10; + else if (missed_beacons > 40) + beacon_qual = (50 - missed_beacons) * (FAIR - POOR) / + 10 + POOR; + else if (missed_beacons > 32) + beacon_qual = (40 - missed_beacons) * (GOOD - FAIR) / + 18 + FAIR; + else if (missed_beacons > 20) + beacon_qual = (32 - missed_beacons) * + (VERY_GOOD - GOOD) / 20 + GOOD; + else + beacon_qual = (20 - missed_beacons) * + (PERFECT - VERY_GOOD) / 20 + VERY_GOOD; + + quality = min(tx_qual, rssi_qual); + quality = min(beacon_qual, quality); + +#ifdef CONFIG_IPW2100_DEBUG + if (beacon_qual == quality) + IPW_DEBUG_WX("Quality clamped by Missed Beacons\n"); + else if (tx_qual == quality) + IPW_DEBUG_WX("Quality clamped by Tx Retries\n"); + else if (quality != 100) + IPW_DEBUG_WX("Quality clamped by Signal Strength\n"); + else + IPW_DEBUG_WX("Quality not clamped.\n"); +#endif + + wstats->qual.qual = quality; + wstats->qual.level = rssi + IPW2100_RSSI_TO_DBM; + } + + wstats->qual.noise = 0; + wstats->qual.updated = 7; + wstats->qual.updated |= IW_QUAL_NOISE_INVALID; + + /* FIXME: this is percent and not a # */ + wstats->miss.beacon = missed_beacons; + + if (ipw2100_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURES, + &tx_failures, &ord_len)) + goto fail_get_ordinal; + wstats->discard.retries = tx_failures; + + return wstats; + + fail_get_ordinal: + IPW_DEBUG_WX("failed querying ordinals.\n"); + + return (struct iw_statistics *)NULL; +} + +static struct iw_handler_def ipw2100_wx_handler_def = { + .standard = ipw2100_wx_handlers, + .num_standard = ARRAY_SIZE(ipw2100_wx_handlers), + .num_private = ARRAY_SIZE(ipw2100_private_handler), + .num_private_args = ARRAY_SIZE(ipw2100_private_args), + .private = (iw_handler *) ipw2100_private_handler, + .private_args = (struct iw_priv_args *)ipw2100_private_args, + .get_wireless_stats = ipw2100_wx_wireless_stats, +}; + +static void ipw2100_wx_event_work(struct work_struct *work) +{ + struct ipw2100_priv *priv = + container_of(work, struct ipw2100_priv, wx_event_work.work); + union iwreq_data wrqu; + unsigned int len = ETH_ALEN; + + if (priv->status & STATUS_STOPPING) + return; + + mutex_lock(&priv->action_mutex); + + IPW_DEBUG_WX("enter\n"); + + mutex_unlock(&priv->action_mutex); + + wrqu.ap_addr.sa_family = ARPHRD_ETHER; + + /* Fetch BSSID from the hardware */ + if (!(priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) || + priv->status & STATUS_RF_KILL_MASK || + ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, + &priv->bssid, &len)) { + memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN); + } else { + /* We now have the BSSID, so can finish setting to the full + * associated state */ + memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN); + memcpy(priv->ieee->bssid, priv->bssid, ETH_ALEN); + priv->status &= ~STATUS_ASSOCIATING; + priv->status |= STATUS_ASSOCIATED; + netif_carrier_on(priv->net_dev); + netif_wake_queue(priv->net_dev); + } + + if (!(priv->status & STATUS_ASSOCIATED)) { + IPW_DEBUG_WX("Configuring ESSID\n"); + mutex_lock(&priv->action_mutex); + /* This is a disassociation event, so kick the firmware to + * look for another AP */ + if (priv->config & CFG_STATIC_ESSID) + ipw2100_set_essid(priv, priv->essid, priv->essid_len, + 0); + else + ipw2100_set_essid(priv, NULL, 0, 0); + mutex_unlock(&priv->action_mutex); + } + + wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL); +} + +#define IPW2100_FW_MAJOR_VERSION 1 +#define IPW2100_FW_MINOR_VERSION 3 + +#define IPW2100_FW_MINOR(x) ((x & 0xff) >> 8) +#define IPW2100_FW_MAJOR(x) (x & 0xff) + +#define IPW2100_FW_VERSION ((IPW2100_FW_MINOR_VERSION << 8) | \ + IPW2100_FW_MAJOR_VERSION) + +#define IPW2100_FW_PREFIX "ipw2100-" __stringify(IPW2100_FW_MAJOR_VERSION) \ +"." __stringify(IPW2100_FW_MINOR_VERSION) + +#define IPW2100_FW_NAME(x) IPW2100_FW_PREFIX "" x ".fw" + +/* + +BINARY FIRMWARE HEADER FORMAT + +offset length desc +0 2 version +2 2 mode == 0:BSS,1:IBSS,2:MONITOR +4 4 fw_len +8 4 uc_len +C fw_len firmware data +12 + fw_len uc_len microcode data + +*/ + +struct ipw2100_fw_header { + short version; + short mode; + unsigned int fw_size; + unsigned int uc_size; +} __attribute__ ((packed)); + +static int ipw2100_mod_firmware_load(struct ipw2100_fw *fw) +{ + struct ipw2100_fw_header *h = + (struct ipw2100_fw_header *)fw->fw_entry->data; + + if (IPW2100_FW_MAJOR(h->version) != IPW2100_FW_MAJOR_VERSION) { + printk(KERN_WARNING DRV_NAME ": Firmware image not compatible " + "(detected version id of %u). " + "See Documentation/networking/README.ipw2100\n", + h->version); + return 1; + } + + fw->version = h->version; + fw->fw.data = fw->fw_entry->data + sizeof(struct ipw2100_fw_header); + fw->fw.size = h->fw_size; + fw->uc.data = fw->fw.data + h->fw_size; + fw->uc.size = h->uc_size; + + return 0; +} + +static int ipw2100_get_firmware(struct ipw2100_priv *priv, + struct ipw2100_fw *fw) +{ + char *fw_name; + int rc; + + IPW_DEBUG_INFO("%s: Using hotplug firmware load.\n", + priv->net_dev->name); + + switch (priv->ieee->iw_mode) { + case IW_MODE_ADHOC: + fw_name = IPW2100_FW_NAME("-i"); + break; +#ifdef CONFIG_IPW2100_MONITOR + case IW_MODE_MONITOR: + fw_name = IPW2100_FW_NAME("-p"); + break; +#endif + case IW_MODE_INFRA: + default: + fw_name = IPW2100_FW_NAME(""); + break; + } + + rc = request_firmware(&fw->fw_entry, fw_name, &priv->pci_dev->dev); + + if (rc < 0) { + printk(KERN_ERR DRV_NAME ": " + "%s: Firmware '%s' not available or load failed.\n", + priv->net_dev->name, fw_name); + return rc; + } + IPW_DEBUG_INFO("firmware data %p size %zd\n", fw->fw_entry->data, + fw->fw_entry->size); + + ipw2100_mod_firmware_load(fw); + + return 0; +} + +MODULE_FIRMWARE(IPW2100_FW_NAME("-i")); +#ifdef CONFIG_IPW2100_MONITOR +MODULE_FIRMWARE(IPW2100_FW_NAME("-p")); +#endif +MODULE_FIRMWARE(IPW2100_FW_NAME("")); + +static void ipw2100_release_firmware(struct ipw2100_priv *priv, + struct ipw2100_fw *fw) +{ + fw->version = 0; + if (fw->fw_entry) + release_firmware(fw->fw_entry); + fw->fw_entry = NULL; +} + +static int ipw2100_get_fwversion(struct ipw2100_priv *priv, char *buf, + size_t max) +{ + char ver[MAX_FW_VERSION_LEN]; + u32 len = MAX_FW_VERSION_LEN; + u32 tmp; + int i; + /* firmware version is an ascii string (max len of 14) */ + if (ipw2100_get_ordinal(priv, IPW_ORD_STAT_FW_VER_NUM, ver, &len)) + return -EIO; + tmp = max; + if (len >= max) + len = max - 1; + for (i = 0; i < len; i++) + buf[i] = ver[i]; + buf[i] = '\0'; + return tmp; +} + +static int ipw2100_get_ucodeversion(struct ipw2100_priv *priv, char *buf, + size_t max) +{ + u32 ver; + u32 len = sizeof(ver); + /* microcode version is a 32 bit integer */ + if (ipw2100_get_ordinal(priv, IPW_ORD_UCODE_VERSION, &ver, &len)) + return -EIO; + return snprintf(buf, max, "%08X", ver); +} + +/* + * On exit, the firmware will have been freed from the fw list + */ +static int ipw2100_fw_download(struct ipw2100_priv *priv, struct ipw2100_fw *fw) +{ + /* firmware is constructed of N contiguous entries, each entry is + * structured as: + * + * offset sie desc + * 0 4 address to write to + * 4 2 length of data run + * 6 length data + */ + unsigned int addr; + unsigned short len; + + const unsigned char *firmware_data = fw->fw.data; + unsigned int firmware_data_left = fw->fw.size; + + while (firmware_data_left > 0) { + addr = *(u32 *) (firmware_data); + firmware_data += 4; + firmware_data_left -= 4; + + len = *(u16 *) (firmware_data); + firmware_data += 2; + firmware_data_left -= 2; + + if (len > 32) { + printk(KERN_ERR DRV_NAME ": " + "Invalid firmware run-length of %d bytes\n", + len); + return -EINVAL; + } + + write_nic_memory(priv->net_dev, addr, len, firmware_data); + firmware_data += len; + firmware_data_left -= len; + } + + return 0; +} + +struct symbol_alive_response { + u8 cmd_id; + u8 seq_num; + u8 ucode_rev; + u8 eeprom_valid; + u16 valid_flags; + u8 IEEE_addr[6]; + u16 flags; + u16 pcb_rev; + u16 clock_settle_time; // 1us LSB + u16 powerup_settle_time; // 1us LSB + u16 hop_settle_time; // 1us LSB + u8 date[3]; // month, day, year + u8 time[2]; // hours, minutes + u8 ucode_valid; +}; + +static int ipw2100_ucode_download(struct ipw2100_priv *priv, + struct ipw2100_fw *fw) +{ + struct net_device *dev = priv->net_dev; + const unsigned char *microcode_data = fw->uc.data; + unsigned int microcode_data_left = fw->uc.size; + void __iomem *reg = (void __iomem *)dev->base_addr; + + struct symbol_alive_response response; + int i, j; + u8 data; + + /* Symbol control */ + write_nic_word(dev, IPW2100_CONTROL_REG, 0x703); + readl(reg); + write_nic_word(dev, IPW2100_CONTROL_REG, 0x707); + readl(reg); + + /* HW config */ + write_nic_byte(dev, 0x210014, 0x72); /* fifo width =16 */ + readl(reg); + write_nic_byte(dev, 0x210014, 0x72); /* fifo width =16 */ + readl(reg); + + /* EN_CS_ACCESS bit to reset control store pointer */ + write_nic_byte(dev, 0x210000, 0x40); + readl(reg); + write_nic_byte(dev, 0x210000, 0x0); + readl(reg); + write_nic_byte(dev, 0x210000, 0x40); + readl(reg); + + /* copy microcode from buffer into Symbol */ + + while (microcode_data_left > 0) { + write_nic_byte(dev, 0x210010, *microcode_data++); + write_nic_byte(dev, 0x210010, *microcode_data++); + microcode_data_left -= 2; + } + + /* EN_CS_ACCESS bit to reset the control store pointer */ + write_nic_byte(dev, 0x210000, 0x0); + readl(reg); + + /* Enable System (Reg 0) + * first enable causes garbage in RX FIFO */ + write_nic_byte(dev, 0x210000, 0x0); + readl(reg); + write_nic_byte(dev, 0x210000, 0x80); + readl(reg); + + /* Reset External Baseband Reg */ + write_nic_word(dev, IPW2100_CONTROL_REG, 0x703); + readl(reg); + write_nic_word(dev, IPW2100_CONTROL_REG, 0x707); + readl(reg); + + /* HW Config (Reg 5) */ + write_nic_byte(dev, 0x210014, 0x72); // fifo width =16 + readl(reg); + write_nic_byte(dev, 0x210014, 0x72); // fifo width =16 + readl(reg); + + /* Enable System (Reg 0) + * second enable should be OK */ + write_nic_byte(dev, 0x210000, 0x00); // clear enable system + readl(reg); + write_nic_byte(dev, 0x210000, 0x80); // set enable system + + /* check Symbol is enabled - upped this from 5 as it wasn't always + * catching the update */ + for (i = 0; i < 10; i++) { + udelay(10); + + /* check Dino is enabled bit */ + read_nic_byte(dev, 0x210000, &data); + if (data & 0x1) + break; + } + + if (i == 10) { + printk(KERN_ERR DRV_NAME ": %s: Error initializing Symbol\n", + dev->name); + return -EIO; + } + + /* Get Symbol alive response */ + for (i = 0; i < 30; i++) { + /* Read alive response structure */ + for (j = 0; + j < (sizeof(struct symbol_alive_response) >> 1); j++) + read_nic_word(dev, 0x210004, ((u16 *) & response) + j); + + if ((response.cmd_id == 1) && (response.ucode_valid == 0x1)) + break; + udelay(10); + } + + if (i == 30) { + printk(KERN_ERR DRV_NAME + ": %s: No response from Symbol - hw not alive\n", + dev->name); + printk_buf(IPW_DL_ERROR, (u8 *) & response, sizeof(response)); + return -EIO; + } + + return 0; +} diff --git a/drivers/net/wireless/ipw2x00/ipw2100.h b/drivers/net/wireless/ipw2x00/ipw2100.h new file mode 100644 index 0000000..1eab0d6 --- /dev/null +++ b/drivers/net/wireless/ipw2x00/ipw2100.h @@ -0,0 +1,1166 @@ +/****************************************************************************** + + Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify it + under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., 59 + Temple Place - Suite 330, Boston, MA 02111-1307, USA. + + The full GNU General Public License is included in this distribution in the + file called LICENSE. + + Contact Information: + Intel Linux Wireless + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +******************************************************************************/ +#ifndef _IPW2100_H +#define _IPW2100_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include // new driver API + +#ifdef CONFIG_IPW2100_MONITOR +#include +#endif + +#include +#include + +#include "libipw.h" + +struct ipw2100_priv; +struct ipw2100_tx_packet; +struct ipw2100_rx_packet; + +#define IPW_DL_UNINIT 0x80000000 +#define IPW_DL_NONE 0x00000000 +#define IPW_DL_ALL 0x7FFFFFFF + +/* + * To use the debug system; + * + * If you are defining a new debug classification, simply add it to the #define + * list here in the form of: + * + * #define IPW_DL_xxxx VALUE + * + * shifting value to the left one bit from the previous entry. xxxx should be + * the name of the classification (for example, WEP) + * + * You then need to either add a IPW2100_xxxx_DEBUG() macro definition for your + * classification, or use IPW_DEBUG(IPW_DL_xxxx, ...) whenever you want + * to send output to that classification. + * + * To add your debug level to the list of levels seen when you perform + * + * % cat /proc/net/ipw2100/debug_level + * + * you simply need to add your entry to the ipw2100_debug_levels array. + * + * If you do not see debug_level in /proc/net/ipw2100 then you do not have + * CONFIG_IPW2100_DEBUG defined in your kernel configuration + * + */ + +#define IPW_DL_ERROR (1<<0) +#define IPW_DL_WARNING (1<<1) +#define IPW_DL_INFO (1<<2) +#define IPW_DL_WX (1<<3) +#define IPW_DL_HC (1<<5) +#define IPW_DL_STATE (1<<6) + +#define IPW_DL_NOTIF (1<<10) +#define IPW_DL_SCAN (1<<11) +#define IPW_DL_ASSOC (1<<12) +#define IPW_DL_DROP (1<<13) + +#define IPW_DL_IOCTL (1<<14) +#define IPW_DL_RF_KILL (1<<17) + +#define IPW_DL_MANAGE (1<<15) +#define IPW_DL_FW (1<<16) + +#define IPW_DL_FRAG (1<<21) +#define IPW_DL_WEP (1<<22) +#define IPW_DL_TX (1<<23) +#define IPW_DL_RX (1<<24) +#define IPW_DL_ISR (1<<25) +#define IPW_DL_IO (1<<26) +#define IPW_DL_TRACE (1<<28) + +#define IPW_DEBUG_ERROR(f, a...) printk(KERN_ERR DRV_NAME ": " f, ## a) +#define IPW_DEBUG_WARNING(f, a...) printk(KERN_WARNING DRV_NAME ": " f, ## a) +#define IPW_DEBUG_INFO(f...) IPW_DEBUG(IPW_DL_INFO, ## f) +#define IPW_DEBUG_WX(f...) IPW_DEBUG(IPW_DL_WX, ## f) +#define IPW_DEBUG_SCAN(f...) IPW_DEBUG(IPW_DL_SCAN, ## f) +#define IPW_DEBUG_NOTIF(f...) IPW_DEBUG(IPW_DL_NOTIF, ## f) +#define IPW_DEBUG_TRACE(f...) IPW_DEBUG(IPW_DL_TRACE, ## f) +#define IPW_DEBUG_RX(f...) IPW_DEBUG(IPW_DL_RX, ## f) +#define IPW_DEBUG_TX(f...) IPW_DEBUG(IPW_DL_TX, ## f) +#define IPW_DEBUG_ISR(f...) IPW_DEBUG(IPW_DL_ISR, ## f) +#define IPW_DEBUG_MANAGEMENT(f...) IPW_DEBUG(IPW_DL_MANAGE, ## f) +#define IPW_DEBUG_WEP(f...) IPW_DEBUG(IPW_DL_WEP, ## f) +#define IPW_DEBUG_HC(f...) IPW_DEBUG(IPW_DL_HC, ## f) +#define IPW_DEBUG_FRAG(f...) IPW_DEBUG(IPW_DL_FRAG, ## f) +#define IPW_DEBUG_FW(f...) IPW_DEBUG(IPW_DL_FW, ## f) +#define IPW_DEBUG_RF_KILL(f...) IPW_DEBUG(IPW_DL_RF_KILL, ## f) +#define IPW_DEBUG_DROP(f...) IPW_DEBUG(IPW_DL_DROP, ## f) +#define IPW_DEBUG_IO(f...) IPW_DEBUG(IPW_DL_IO, ## f) +#define IPW_DEBUG_IOCTL(f...) IPW_DEBUG(IPW_DL_IOCTL, ## f) +#define IPW_DEBUG_STATE(f, a...) IPW_DEBUG(IPW_DL_STATE | IPW_DL_ASSOC | IPW_DL_INFO, f, ## a) +#define IPW_DEBUG_ASSOC(f, a...) IPW_DEBUG(IPW_DL_ASSOC | IPW_DL_INFO, f, ## a) + +enum { + IPW_HW_STATE_DISABLED = 1, + IPW_HW_STATE_ENABLED = 0 +}; + +struct ssid_context { + char ssid[IW_ESSID_MAX_SIZE + 1]; + int ssid_len; + unsigned char bssid[ETH_ALEN]; + int port_type; + int channel; + +}; + +extern const char *port_type_str[]; +extern const char *band_str[]; + +#define NUMBER_OF_BD_PER_COMMAND_PACKET 1 +#define NUMBER_OF_BD_PER_DATA_PACKET 2 + +#define IPW_MAX_BDS 6 +#define NUMBER_OF_OVERHEAD_BDS_PER_PACKETR 2 +#define NUMBER_OF_BDS_TO_LEAVE_FOR_COMMANDS 1 + +#define REQUIRED_SPACE_IN_RING_FOR_COMMAND_PACKET \ + (IPW_BD_QUEUE_W_R_MIN_SPARE + NUMBER_OF_BD_PER_COMMAND_PACKET) + +struct bd_status { + union { + struct { + u8 nlf:1, txType:2, intEnabled:1, reserved:4; + } fields; + u8 field; + } info; +} __attribute__ ((packed)); + +struct ipw2100_bd { + u32 host_addr; + u32 buf_length; + struct bd_status status; + /* number of fragments for frame (should be set only for + * 1st TBD) */ + u8 num_fragments; + u8 reserved[6]; +} __attribute__ ((packed)); + +#define IPW_BD_QUEUE_LENGTH(n) (1<value = (x)->hi = 0; \ + (x)->lo = 0x7fffffff; \ +} while (0) +#define SET_STAT(x,y) do { \ + (x)->value = y; \ + if ((x)->value > (x)->hi) (x)->hi = (x)->value; \ + if ((x)->value < (x)->lo) (x)->lo = (x)->value; \ +} while (0) +#define INC_STAT(x) do { if (++(x)->value > (x)->hi) (x)->hi = (x)->value; } \ +while (0) +#define DEC_STAT(x) do { if (--(x)->value < (x)->lo) (x)->lo = (x)->value; } \ +while (0) + +#define IPW2100_ERROR_QUEUE 5 + +/* Power management code: enable or disable? */ +enum { +#ifdef CONFIG_PM + IPW2100_PM_DISABLED = 0, + PM_STATE_SIZE = 16, +#else + IPW2100_PM_DISABLED = 1, + PM_STATE_SIZE = 0, +#endif +}; + +#define STATUS_POWERED (1<<0) +#define STATUS_CMD_ACTIVE (1<<1) /**< host command in progress */ +#define STATUS_RUNNING (1<<2) /* Card initialized, but not enabled */ +#define STATUS_ENABLED (1<<3) /* Card enabled -- can scan,Tx,Rx */ +#define STATUS_STOPPING (1<<4) /* Card is in shutdown phase */ +#define STATUS_INITIALIZED (1<<5) /* Card is ready for external calls */ +#define STATUS_ASSOCIATING (1<<9) /* Associated, but no BSSID yet */ +#define STATUS_ASSOCIATED (1<<10) /* Associated and BSSID valid */ +#define STATUS_INT_ENABLED (1<<11) +#define STATUS_RF_KILL_HW (1<<12) +#define STATUS_RF_KILL_SW (1<<13) +#define STATUS_RF_KILL_MASK (STATUS_RF_KILL_HW | STATUS_RF_KILL_SW) +#define STATUS_EXIT_PENDING (1<<14) + +#define STATUS_SCAN_PENDING (1<<23) +#define STATUS_SCANNING (1<<24) +#define STATUS_SCAN_ABORTING (1<<25) +#define STATUS_SCAN_COMPLETE (1<<26) +#define STATUS_WX_EVENT_PENDING (1<<27) +#define STATUS_RESET_PENDING (1<<29) +#define STATUS_SECURITY_UPDATED (1<<30) /* Security sync needed */ + +/* Internal NIC states */ +#define IPW_STATE_INITIALIZED (1<<0) +#define IPW_STATE_COUNTRY_FOUND (1<<1) +#define IPW_STATE_ASSOCIATED (1<<2) +#define IPW_STATE_ASSN_LOST (1<<3) +#define IPW_STATE_ASSN_CHANGED (1<<4) +#define IPW_STATE_SCAN_COMPLETE (1<<5) +#define IPW_STATE_ENTERED_PSP (1<<6) +#define IPW_STATE_LEFT_PSP (1<<7) +#define IPW_STATE_RF_KILL (1<<8) +#define IPW_STATE_DISABLED (1<<9) +#define IPW_STATE_POWER_DOWN (1<<10) +#define IPW_STATE_SCANNING (1<<11) + +#define CFG_STATIC_CHANNEL (1<<0) /* Restrict assoc. to single channel */ +#define CFG_STATIC_ESSID (1<<1) /* Restrict assoc. to single SSID */ +#define CFG_STATIC_BSSID (1<<2) /* Restrict assoc. to single BSSID */ +#define CFG_CUSTOM_MAC (1<<3) +#define CFG_LONG_PREAMBLE (1<<4) +#define CFG_ASSOCIATE (1<<6) +#define CFG_FIXED_RATE (1<<7) +#define CFG_ADHOC_CREATE (1<<8) +#define CFG_PASSIVE_SCAN (1<<10) +#ifdef CONFIG_IPW2100_MONITOR +#define CFG_CRC_CHECK (1<<11) +#endif + +#define CAP_SHARED_KEY (1<<0) /* Off = OPEN */ +#define CAP_PRIVACY_ON (1<<1) /* Off = No privacy */ + +struct ipw2100_priv { + + int stop_hang_check; /* Set 1 when shutting down to kill hang_check */ + int stop_rf_kill; /* Set 1 when shutting down to kill rf_kill */ + + struct libipw_device *ieee; + unsigned long status; + unsigned long config; + unsigned long capability; + + /* Statistics */ + int resets; + int reset_backoff; + + /* Context */ + u8 essid[IW_ESSID_MAX_SIZE]; + u8 essid_len; + u8 bssid[ETH_ALEN]; + u8 channel; + int last_mode; + + unsigned long connect_start; + unsigned long last_reset; + + u32 channel_mask; + u32 fatal_error; + u32 fatal_errors[IPW2100_ERROR_QUEUE]; + u32 fatal_index; + int eeprom_version; + int firmware_version; + unsigned long hw_features; + int hangs; + u32 last_rtc; + int dump_raw; /* 1 to dump raw bytes in /sys/.../memory */ + u8 *snapshot[0x30]; + + u8 mandatory_bssid_mac[ETH_ALEN]; + u8 mac_addr[ETH_ALEN]; + + int power_mode; + + int messages_sent; + + int short_retry_limit; + int long_retry_limit; + + u32 rts_threshold; + u32 frag_threshold; + + int in_isr; + + u32 tx_rates; + int tx_power; + u32 beacon_interval; + + char nick[IW_ESSID_MAX_SIZE + 1]; + + struct ipw2100_status_queue status_queue; + + struct statistic txq_stat; + struct statistic rxq_stat; + struct ipw2100_bd_queue rx_queue; + struct ipw2100_bd_queue tx_queue; + struct ipw2100_rx_packet *rx_buffers; + + struct statistic fw_pend_stat; + struct list_head fw_pend_list; + + struct statistic msg_free_stat; + struct statistic msg_pend_stat; + struct list_head msg_free_list; + struct list_head msg_pend_list; + struct ipw2100_tx_packet *msg_buffers; + + struct statistic tx_free_stat; + struct statistic tx_pend_stat; + struct list_head tx_free_list; + struct list_head tx_pend_list; + struct ipw2100_tx_packet *tx_buffers; + + struct ipw2100_ordinals ordinals; + + struct pci_dev *pci_dev; + + struct proc_dir_entry *dir_dev; + + struct net_device *net_dev; + struct iw_statistics wstats; + + struct iw_public_data wireless_data; + + struct tasklet_struct irq_tasklet; + + struct workqueue_struct *workqueue; + struct delayed_work reset_work; + struct delayed_work security_work; + struct delayed_work wx_event_work; + struct delayed_work hang_check; + struct delayed_work rf_kill; + struct work_struct scan_event_now; + struct delayed_work scan_event_later; + + int user_requested_scan; + + /* Track time in suspend */ + unsigned long suspend_at; + unsigned long suspend_time; + + u32 interrupts; + int tx_interrupts; + int rx_interrupts; + int inta_other; + + spinlock_t low_lock; + struct mutex action_mutex; + struct mutex adapter_mutex; + + wait_queue_head_t wait_command_queue; +}; + +/********************************************************* + * Host Command -> From Driver to FW + *********************************************************/ + +/** + * Host command identifiers + */ +#define HOST_COMPLETE 2 +#define SYSTEM_CONFIG 6 +#define SSID 8 +#define MANDATORY_BSSID 9 +#define AUTHENTICATION_TYPE 10 +#define ADAPTER_ADDRESS 11 +#define PORT_TYPE 12 +#define INTERNATIONAL_MODE 13 +#define CHANNEL 14 +#define RTS_THRESHOLD 15 +#define FRAG_THRESHOLD 16 +#define POWER_MODE 17 +#define TX_RATES 18 +#define BASIC_TX_RATES 19 +#define WEP_KEY_INFO 20 +#define WEP_KEY_INDEX 25 +#define WEP_FLAGS 26 +#define ADD_MULTICAST 27 +#define CLEAR_ALL_MULTICAST 28 +#define BEACON_INTERVAL 29 +#define ATIM_WINDOW 30 +#define CLEAR_STATISTICS 31 +#define SEND 33 +#define TX_POWER_INDEX 36 +#define BROADCAST_SCAN 43 +#define CARD_DISABLE 44 +#define PREFERRED_BSSID 45 +#define SET_SCAN_OPTIONS 46 +#define SCAN_DWELL_TIME 47 +#define SWEEP_TABLE 48 +#define AP_OR_STATION_TABLE 49 +#define GROUP_ORDINALS 50 +#define SHORT_RETRY_LIMIT 51 +#define LONG_RETRY_LIMIT 52 + +#define HOST_PRE_POWER_DOWN 58 +#define CARD_DISABLE_PHY_OFF 61 +#define MSDU_TX_RATES 62 + +/* Rogue AP Detection */ +#define SET_STATION_STAT_BITS 64 +#define CLEAR_STATIONS_STAT_BITS 65 +#define LEAP_ROGUE_MODE 66 //TODO tbw replaced by CFG_LEAP_ROGUE_AP +#define SET_SECURITY_INFORMATION 67 +#define DISASSOCIATION_BSSID 68 +#define SET_WPA_IE 69 + +/* system configuration bit mask: */ +#define IPW_CFG_MONITOR 0x00004 +#define IPW_CFG_PREAMBLE_AUTO 0x00010 +#define IPW_CFG_IBSS_AUTO_START 0x00020 +#define IPW_CFG_LOOPBACK 0x00100 +#define IPW_CFG_ANSWER_BCSSID_PROBE 0x00800 +#define IPW_CFG_BT_SIDEBAND_SIGNAL 0x02000 +#define IPW_CFG_802_1x_ENABLE 0x04000 +#define IPW_CFG_BSS_MASK 0x08000 +#define IPW_CFG_IBSS_MASK 0x10000 + +#define IPW_SCAN_NOASSOCIATE (1<<0) +#define IPW_SCAN_MIXED_CELL (1<<1) +/* RESERVED (1<<2) */ +#define IPW_SCAN_PASSIVE (1<<3) + +#define IPW_NIC_FATAL_ERROR 0x2A7F0 +#define IPW_ERROR_ADDR(x) (x & 0x3FFFF) +#define IPW_ERROR_CODE(x) ((x & 0xFF000000) >> 24) +#define IPW2100_ERR_C3_CORRUPTION (0x10 << 24) +#define IPW2100_ERR_MSG_TIMEOUT (0x11 << 24) +#define IPW2100_ERR_FW_LOAD (0x12 << 24) + +#define IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND 0x200 +#define IPW_MEM_SRAM_HOST_INTERRUPT_AREA_LOWER_BOUND IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x0D80 + +#define IPW_MEM_HOST_SHARED_RX_BD_BASE (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x40) +#define IPW_MEM_HOST_SHARED_RX_STATUS_BASE (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x44) +#define IPW_MEM_HOST_SHARED_RX_BD_SIZE (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x48) +#define IPW_MEM_HOST_SHARED_RX_READ_INDEX (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0xa0) + +#define IPW_MEM_HOST_SHARED_TX_QUEUE_BD_BASE (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x00) +#define IPW_MEM_HOST_SHARED_TX_QUEUE_BD_SIZE (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x04) +#define IPW_MEM_HOST_SHARED_TX_QUEUE_READ_INDEX (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x80) + +#define IPW_MEM_HOST_SHARED_RX_WRITE_INDEX \ + (IPW_MEM_SRAM_HOST_INTERRUPT_AREA_LOWER_BOUND + 0x20) + +#define IPW_MEM_HOST_SHARED_TX_QUEUE_WRITE_INDEX \ + (IPW_MEM_SRAM_HOST_INTERRUPT_AREA_LOWER_BOUND) + +#define IPW_MEM_HOST_SHARED_ORDINALS_TABLE_1 (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x180) +#define IPW_MEM_HOST_SHARED_ORDINALS_TABLE_2 (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x184) + +#define IPW2100_INTA_TX_TRANSFER (0x00000001) // Bit 0 (LSB) +#define IPW2100_INTA_RX_TRANSFER (0x00000002) // Bit 1 +#define IPW2100_INTA_TX_COMPLETE (0x00000004) // Bit 2 +#define IPW2100_INTA_EVENT_INTERRUPT (0x00000008) // Bit 3 +#define IPW2100_INTA_STATUS_CHANGE (0x00000010) // Bit 4 +#define IPW2100_INTA_BEACON_PERIOD_EXPIRED (0x00000020) // Bit 5 +#define IPW2100_INTA_SLAVE_MODE_HOST_COMMAND_DONE (0x00010000) // Bit 16 +#define IPW2100_INTA_FW_INIT_DONE (0x01000000) // Bit 24 +#define IPW2100_INTA_FW_CALIBRATION_CALC (0x02000000) // Bit 25 +#define IPW2100_INTA_FATAL_ERROR (0x40000000) // Bit 30 +#define IPW2100_INTA_PARITY_ERROR (0x80000000) // Bit 31 (MSB) + +#define IPW_AUX_HOST_RESET_REG_PRINCETON_RESET (0x00000001) +#define IPW_AUX_HOST_RESET_REG_FORCE_NMI (0x00000002) +#define IPW_AUX_HOST_RESET_REG_PCI_HOST_CLUSTER_FATAL_NMI (0x00000004) +#define IPW_AUX_HOST_RESET_REG_CORE_FATAL_NMI (0x00000008) +#define IPW_AUX_HOST_RESET_REG_SW_RESET (0x00000080) +#define IPW_AUX_HOST_RESET_REG_MASTER_DISABLED (0x00000100) +#define IPW_AUX_HOST_RESET_REG_STOP_MASTER (0x00000200) + +#define IPW_AUX_HOST_GP_CNTRL_BIT_CLOCK_READY (0x00000001) // Bit 0 (LSB) +#define IPW_AUX_HOST_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY (0x00000002) // Bit 1 +#define IPW_AUX_HOST_GP_CNTRL_BIT_INIT_DONE (0x00000004) // Bit 2 +#define IPW_AUX_HOST_GP_CNTRL_BITS_SYS_CONFIG (0x000007c0) // Bits 6-10 +#define IPW_AUX_HOST_GP_CNTRL_BIT_BUS_TYPE (0x00000200) // Bit 9 +#define IPW_AUX_HOST_GP_CNTRL_BIT_BAR0_BLOCK_SIZE (0x00000400) // Bit 10 +#define IPW_AUX_HOST_GP_CNTRL_BIT_USB_MODE (0x20000000) // Bit 29 +#define IPW_AUX_HOST_GP_CNTRL_BIT_HOST_FORCES_SYS_CLK (0x40000000) // Bit 30 +#define IPW_AUX_HOST_GP_CNTRL_BIT_FW_FORCES_SYS_CLK (0x80000000) // Bit 31 (MSB) + +#define IPW_BIT_GPIO_GPIO1_MASK 0x0000000C +#define IPW_BIT_GPIO_GPIO3_MASK 0x000000C0 +#define IPW_BIT_GPIO_GPIO1_ENABLE 0x00000008 +#define IPW_BIT_GPIO_RF_KILL 0x00010000 + +#define IPW_BIT_GPIO_LED_OFF 0x00002000 // Bit 13 = 1 + +#define IPW_REG_DOMAIN_0_OFFSET 0x0000 +#define IPW_REG_DOMAIN_1_OFFSET IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + +#define IPW_REG_INTA IPW_REG_DOMAIN_0_OFFSET + 0x0008 +#define IPW_REG_INTA_MASK IPW_REG_DOMAIN_0_OFFSET + 0x000C +#define IPW_REG_INDIRECT_ACCESS_ADDRESS IPW_REG_DOMAIN_0_OFFSET + 0x0010 +#define IPW_REG_INDIRECT_ACCESS_DATA IPW_REG_DOMAIN_0_OFFSET + 0x0014 +#define IPW_REG_AUTOINCREMENT_ADDRESS IPW_REG_DOMAIN_0_OFFSET + 0x0018 +#define IPW_REG_AUTOINCREMENT_DATA IPW_REG_DOMAIN_0_OFFSET + 0x001C +#define IPW_REG_RESET_REG IPW_REG_DOMAIN_0_OFFSET + 0x0020 +#define IPW_REG_GP_CNTRL IPW_REG_DOMAIN_0_OFFSET + 0x0024 +#define IPW_REG_GPIO IPW_REG_DOMAIN_0_OFFSET + 0x0030 +#define IPW_REG_FW_TYPE IPW_REG_DOMAIN_1_OFFSET + 0x0188 +#define IPW_REG_FW_VERSION IPW_REG_DOMAIN_1_OFFSET + 0x018C +#define IPW_REG_FW_COMPATABILITY_VERSION IPW_REG_DOMAIN_1_OFFSET + 0x0190 + +#define IPW_REG_INDIRECT_ADDR_MASK 0x00FFFFFC + +#define IPW_INTERRUPT_MASK 0xC1010013 + +#define IPW2100_CONTROL_REG 0x220000 +#define IPW2100_CONTROL_PHY_OFF 0x8 + +#define IPW2100_COMMAND 0x00300004 +#define IPW2100_COMMAND_PHY_ON 0x0 +#define IPW2100_COMMAND_PHY_OFF 0x1 + +/* in DEBUG_AREA, values of memory always 0xd55555d5 */ +#define IPW_REG_DOA_DEBUG_AREA_START IPW_REG_DOMAIN_0_OFFSET + 0x0090 +#define IPW_REG_DOA_DEBUG_AREA_END IPW_REG_DOMAIN_0_OFFSET + 0x00FF +#define IPW_DATA_DOA_DEBUG_VALUE 0xd55555d5 + +#define IPW_INTERNAL_REGISTER_HALT_AND_RESET 0x003000e0 + +#define IPW_WAIT_CLOCK_STABILIZATION_DELAY 50 // micro seconds +#define IPW_WAIT_RESET_ARC_COMPLETE_DELAY 10 // micro seconds +#define IPW_WAIT_RESET_MASTER_ASSERT_COMPLETE_DELAY 10 // micro seconds + +// BD ring queue read/write difference +#define IPW_BD_QUEUE_W_R_MIN_SPARE 2 + +#define IPW_CACHE_LINE_LENGTH_DEFAULT 0x80 + +#define IPW_CARD_DISABLE_PHY_OFF_COMPLETE_WAIT 100 // 100 milli +#define IPW_PREPARE_POWER_DOWN_COMPLETE_WAIT 100 // 100 milli + +#define IPW_HEADER_802_11_SIZE sizeof(struct libipw_hdr_3addr) +#define IPW_MAX_80211_PAYLOAD_SIZE 2304U +#define IPW_MAX_802_11_PAYLOAD_LENGTH 2312 +#define IPW_MAX_ACCEPTABLE_TX_FRAME_LENGTH 1536 +#define IPW_MIN_ACCEPTABLE_RX_FRAME_LENGTH 60 +#define IPW_MAX_ACCEPTABLE_RX_FRAME_LENGTH \ + (IPW_MAX_ACCEPTABLE_TX_FRAME_LENGTH + IPW_HEADER_802_11_SIZE - \ + sizeof(struct ethhdr)) + +#define IPW_802_11_FCS_LENGTH 4 +#define IPW_RX_NIC_BUFFER_LENGTH \ + (IPW_MAX_802_11_PAYLOAD_LENGTH + IPW_HEADER_802_11_SIZE + \ + IPW_802_11_FCS_LENGTH) + +#define IPW_802_11_PAYLOAD_OFFSET \ + (sizeof(struct libipw_hdr_3addr) + \ + sizeof(struct libipw_snap_hdr)) + +struct ipw2100_rx { + union { + unsigned char payload[IPW_RX_NIC_BUFFER_LENGTH]; + struct libipw_hdr_4addr header; + u32 status; + struct ipw2100_notification notification; + struct ipw2100_cmd_header command; + } rx_data; +} __attribute__ ((packed)); + +/* Bit 0-7 are for 802.11b tx rates - . Bit 5-7 are reserved */ +#define TX_RATE_1_MBIT 0x0001 +#define TX_RATE_2_MBIT 0x0002 +#define TX_RATE_5_5_MBIT 0x0004 +#define TX_RATE_11_MBIT 0x0008 +#define TX_RATE_MASK 0x000F +#define DEFAULT_TX_RATES 0x000F + +#define IPW_POWER_MODE_CAM 0x00 //(always on) +#define IPW_POWER_INDEX_1 0x01 +#define IPW_POWER_INDEX_2 0x02 +#define IPW_POWER_INDEX_3 0x03 +#define IPW_POWER_INDEX_4 0x04 +#define IPW_POWER_INDEX_5 0x05 +#define IPW_POWER_AUTO 0x06 +#define IPW_POWER_MASK 0x0F +#define IPW_POWER_ENABLED 0x10 +#define IPW_POWER_LEVEL(x) ((x) & IPW_POWER_MASK) + +#define IPW_TX_POWER_AUTO 0 +#define IPW_TX_POWER_ENHANCED 1 + +#define IPW_TX_POWER_DEFAULT 32 +#define IPW_TX_POWER_MIN 0 +#define IPW_TX_POWER_MAX 16 +#define IPW_TX_POWER_MIN_DBM (-12) +#define IPW_TX_POWER_MAX_DBM 16 + +#define FW_SCAN_DONOT_ASSOCIATE 0x0001 // Dont Attempt to Associate after Scan +#define FW_SCAN_PASSIVE 0x0008 // Force PASSSIVE Scan + +#define REG_MIN_CHANNEL 0 +#define REG_MAX_CHANNEL 14 + +#define REG_CHANNEL_MASK 0x00003FFF +#define IPW_IBSS_11B_DEFAULT_MASK 0x87ff + +#define DIVERSITY_EITHER 0 // Use both antennas +#define DIVERSITY_ANTENNA_A 1 // Use antenna A +#define DIVERSITY_ANTENNA_B 2 // Use antenna B + +#define HOST_COMMAND_WAIT 0 +#define HOST_COMMAND_NO_WAIT 1 + +#define LOCK_NONE 0 +#define LOCK_DRIVER 1 +#define LOCK_FW 2 + +#define TYPE_SWEEP_ORD 0x000D +#define TYPE_IBSS_STTN_ORD 0x000E +#define TYPE_BSS_AP_ORD 0x000F +#define TYPE_RAW_BEACON_ENTRY 0x0010 +#define TYPE_CALIBRATION_DATA 0x0011 +#define TYPE_ROGUE_AP_DATA 0x0012 +#define TYPE_ASSOCIATION_REQUEST 0x0013 +#define TYPE_REASSOCIATION_REQUEST 0x0014 + +#define HW_FEATURE_RFKILL 0x0001 +#define RF_KILLSWITCH_OFF 1 +#define RF_KILLSWITCH_ON 0 + +#define IPW_COMMAND_POOL_SIZE 40 + +#define IPW_START_ORD_TAB_1 1 +#define IPW_START_ORD_TAB_2 1000 + +#define IPW_ORD_TAB_1_ENTRY_SIZE sizeof(u32) + +#define IS_ORDINAL_TABLE_ONE(mgr,id) \ + ((id >= IPW_START_ORD_TAB_1) && (id < mgr->table1_size)) +#define IS_ORDINAL_TABLE_TWO(mgr,id) \ + ((id >= IPW_START_ORD_TAB_2) && (id < (mgr->table2_size + IPW_START_ORD_TAB_2))) + +#define BSS_ID_LENGTH 6 + +// Fixed size data: Ordinal Table 1 +typedef enum _ORDINAL_TABLE_1 { // NS - means Not Supported by FW +// Transmit statistics + IPW_ORD_STAT_TX_HOST_REQUESTS = 1, // # of requested Host Tx's (MSDU) + IPW_ORD_STAT_TX_HOST_COMPLETE, // # of successful Host Tx's (MSDU) + IPW_ORD_STAT_TX_DIR_DATA, // # of successful Directed Tx's (MSDU) + + IPW_ORD_STAT_TX_DIR_DATA1 = 4, // # of successful Directed Tx's (MSDU) @ 1MB + IPW_ORD_STAT_TX_DIR_DATA2, // # of successful Directed Tx's (MSDU) @ 2MB + IPW_ORD_STAT_TX_DIR_DATA5_5, // # of successful Directed Tx's (MSDU) @ 5_5MB + IPW_ORD_STAT_TX_DIR_DATA11, // # of successful Directed Tx's (MSDU) @ 11MB + IPW_ORD_STAT_TX_DIR_DATA22, // # of successful Directed Tx's (MSDU) @ 22MB + + IPW_ORD_STAT_TX_NODIR_DATA1 = 13, // # of successful Non_Directed Tx's (MSDU) @ 1MB + IPW_ORD_STAT_TX_NODIR_DATA2, // # of successful Non_Directed Tx's (MSDU) @ 2MB + IPW_ORD_STAT_TX_NODIR_DATA5_5, // # of successful Non_Directed Tx's (MSDU) @ 5.5MB + IPW_ORD_STAT_TX_NODIR_DATA11, // # of successful Non_Directed Tx's (MSDU) @ 11MB + + IPW_ORD_STAT_NULL_DATA = 21, // # of successful NULL data Tx's + IPW_ORD_STAT_TX_RTS, // # of successful Tx RTS + IPW_ORD_STAT_TX_CTS, // # of successful Tx CTS + IPW_ORD_STAT_TX_ACK, // # of successful Tx ACK + IPW_ORD_STAT_TX_ASSN, // # of successful Association Tx's + IPW_ORD_STAT_TX_ASSN_RESP, // # of successful Association response Tx's + IPW_ORD_STAT_TX_REASSN, // # of successful Reassociation Tx's + IPW_ORD_STAT_TX_REASSN_RESP, // # of successful Reassociation response Tx's + IPW_ORD_STAT_TX_PROBE, // # of probes successfully transmitted + IPW_ORD_STAT_TX_PROBE_RESP, // # of probe responses successfully transmitted + IPW_ORD_STAT_TX_BEACON, // # of tx beacon + IPW_ORD_STAT_TX_ATIM, // # of Tx ATIM + IPW_ORD_STAT_TX_DISASSN, // # of successful Disassociation TX + IPW_ORD_STAT_TX_AUTH, // # of successful Authentication Tx + IPW_ORD_STAT_TX_DEAUTH, // # of successful Deauthentication TX + + IPW_ORD_STAT_TX_TOTAL_BYTES = 41, // Total successful Tx data bytes + IPW_ORD_STAT_TX_RETRIES, // # of Tx retries + IPW_ORD_STAT_TX_RETRY1, // # of Tx retries at 1MBPS + IPW_ORD_STAT_TX_RETRY2, // # of Tx retries at 2MBPS + IPW_ORD_STAT_TX_RETRY5_5, // # of Tx retries at 5.5MBPS + IPW_ORD_STAT_TX_RETRY11, // # of Tx retries at 11MBPS + + IPW_ORD_STAT_TX_FAILURES = 51, // # of Tx Failures + IPW_ORD_STAT_TX_ABORT_AT_HOP, //NS // # of Tx's aborted at hop time + IPW_ORD_STAT_TX_MAX_TRIES_IN_HOP, // # of times max tries in a hop failed + IPW_ORD_STAT_TX_ABORT_LATE_DMA, //NS // # of times tx aborted due to late dma setup + IPW_ORD_STAT_TX_ABORT_STX, //NS // # of times backoff aborted + IPW_ORD_STAT_TX_DISASSN_FAIL, // # of times disassociation failed + IPW_ORD_STAT_TX_ERR_CTS, // # of missed/bad CTS frames + IPW_ORD_STAT_TX_BPDU, //NS // # of spanning tree BPDUs sent + IPW_ORD_STAT_TX_ERR_ACK, // # of tx err due to acks + + // Receive statistics + IPW_ORD_STAT_RX_HOST = 61, // # of packets passed to host + IPW_ORD_STAT_RX_DIR_DATA, // # of directed packets + IPW_ORD_STAT_RX_DIR_DATA1, // # of directed packets at 1MB + IPW_ORD_STAT_RX_DIR_DATA2, // # of directed packets at 2MB + IPW_ORD_STAT_RX_DIR_DATA5_5, // # of directed packets at 5.5MB + IPW_ORD_STAT_RX_DIR_DATA11, // # of directed packets at 11MB + IPW_ORD_STAT_RX_DIR_DATA22, // # of directed packets at 22MB + + IPW_ORD_STAT_RX_NODIR_DATA = 71, // # of nondirected packets + IPW_ORD_STAT_RX_NODIR_DATA1, // # of nondirected packets at 1MB + IPW_ORD_STAT_RX_NODIR_DATA2, // # of nondirected packets at 2MB + IPW_ORD_STAT_RX_NODIR_DATA5_5, // # of nondirected packets at 5.5MB + IPW_ORD_STAT_RX_NODIR_DATA11, // # of nondirected packets at 11MB + + IPW_ORD_STAT_RX_NULL_DATA = 80, // # of null data rx's + IPW_ORD_STAT_RX_POLL, //NS // # of poll rx + IPW_ORD_STAT_RX_RTS, // # of Rx RTS + IPW_ORD_STAT_RX_CTS, // # of Rx CTS + IPW_ORD_STAT_RX_ACK, // # of Rx ACK + IPW_ORD_STAT_RX_CFEND, // # of Rx CF End + IPW_ORD_STAT_RX_CFEND_ACK, // # of Rx CF End + CF Ack + IPW_ORD_STAT_RX_ASSN, // # of Association Rx's + IPW_ORD_STAT_RX_ASSN_RESP, // # of Association response Rx's + IPW_ORD_STAT_RX_REASSN, // # of Reassociation Rx's + IPW_ORD_STAT_RX_REASSN_RESP, // # of Reassociation response Rx's + IPW_ORD_STAT_RX_PROBE, // # of probe Rx's + IPW_ORD_STAT_RX_PROBE_RESP, // # of probe response Rx's + IPW_ORD_STAT_RX_BEACON, // # of Rx beacon + IPW_ORD_STAT_RX_ATIM, // # of Rx ATIM + IPW_ORD_STAT_RX_DISASSN, // # of disassociation Rx + IPW_ORD_STAT_RX_AUTH, // # of authentication Rx + IPW_ORD_STAT_RX_DEAUTH, // # of deauthentication Rx + + IPW_ORD_STAT_RX_TOTAL_BYTES = 101, // Total rx data bytes received + IPW_ORD_STAT_RX_ERR_CRC, // # of packets with Rx CRC error + IPW_ORD_STAT_RX_ERR_CRC1, // # of Rx CRC errors at 1MB + IPW_ORD_STAT_RX_ERR_CRC2, // # of Rx CRC errors at 2MB + IPW_ORD_STAT_RX_ERR_CRC5_5, // # of Rx CRC errors at 5.5MB + IPW_ORD_STAT_RX_ERR_CRC11, // # of Rx CRC errors at 11MB + + IPW_ORD_STAT_RX_DUPLICATE1 = 112, // # of duplicate rx packets at 1MB + IPW_ORD_STAT_RX_DUPLICATE2, // # of duplicate rx packets at 2MB + IPW_ORD_STAT_RX_DUPLICATE5_5, // # of duplicate rx packets at 5.5MB + IPW_ORD_STAT_RX_DUPLICATE11, // # of duplicate rx packets at 11MB + IPW_ORD_STAT_RX_DUPLICATE = 119, // # of duplicate rx packets + + IPW_ORD_PERS_DB_LOCK = 120, // # locking fw permanent db + IPW_ORD_PERS_DB_SIZE, // # size of fw permanent db + IPW_ORD_PERS_DB_ADDR, // # address of fw permanent db + IPW_ORD_STAT_RX_INVALID_PROTOCOL, // # of rx frames with invalid protocol + IPW_ORD_SYS_BOOT_TIME, // # Boot time + IPW_ORD_STAT_RX_NO_BUFFER, // # of rx frames rejected due to no buffer + IPW_ORD_STAT_RX_ABORT_LATE_DMA, //NS // # of rx frames rejected due to dma setup too late + IPW_ORD_STAT_RX_ABORT_AT_HOP, //NS // # of rx frames aborted due to hop + IPW_ORD_STAT_RX_MISSING_FRAG, // # of rx frames dropped due to missing fragment + IPW_ORD_STAT_RX_ORPHAN_FRAG, // # of rx frames dropped due to non-sequential fragment + IPW_ORD_STAT_RX_ORPHAN_FRAME, // # of rx frames dropped due to unmatched 1st frame + IPW_ORD_STAT_RX_FRAG_AGEOUT, // # of rx frames dropped due to uncompleted frame + IPW_ORD_STAT_RX_BAD_SSID, //NS // Bad SSID (unused) + IPW_ORD_STAT_RX_ICV_ERRORS, // # of ICV errors during decryption + +// PSP Statistics + IPW_ORD_STAT_PSP_SUSPENSION = 137, // # of times adapter suspended + IPW_ORD_STAT_PSP_BCN_TIMEOUT, // # of beacon timeout + IPW_ORD_STAT_PSP_POLL_TIMEOUT, // # of poll response timeouts + IPW_ORD_STAT_PSP_NONDIR_TIMEOUT, // # of timeouts waiting for last broadcast/muticast pkt + IPW_ORD_STAT_PSP_RX_DTIMS, // # of PSP DTIMs received + IPW_ORD_STAT_PSP_RX_TIMS, // # of PSP TIMs received + IPW_ORD_STAT_PSP_STATION_ID, // PSP Station ID + +// Association and roaming + IPW_ORD_LAST_ASSN_TIME = 147, // RTC time of last association + IPW_ORD_STAT_PERCENT_MISSED_BCNS, // current calculation of % missed beacons + IPW_ORD_STAT_PERCENT_RETRIES, // current calculation of % missed tx retries + IPW_ORD_ASSOCIATED_AP_PTR, // If associated, this is ptr to the associated + // AP table entry. set to 0 if not associated + IPW_ORD_AVAILABLE_AP_CNT, // # of AP's decsribed in the AP table + IPW_ORD_AP_LIST_PTR, // Ptr to list of available APs + IPW_ORD_STAT_AP_ASSNS, // # of associations + IPW_ORD_STAT_ASSN_FAIL, // # of association failures + IPW_ORD_STAT_ASSN_RESP_FAIL, // # of failuresdue to response fail + IPW_ORD_STAT_FULL_SCANS, // # of full scans + + IPW_ORD_CARD_DISABLED, // # Card Disabled + IPW_ORD_STAT_ROAM_INHIBIT, // # of times roaming was inhibited due to ongoing activity + IPW_FILLER_40, + IPW_ORD_RSSI_AT_ASSN = 160, // RSSI of associated AP at time of association + IPW_ORD_STAT_ASSN_CAUSE1, // # of reassociations due to no tx from AP in last N + // hops or no prob_ responses in last 3 minutes + IPW_ORD_STAT_ASSN_CAUSE2, // # of reassociations due to poor tx/rx quality + IPW_ORD_STAT_ASSN_CAUSE3, // # of reassociations due to tx/rx quality with excessive + // load at the AP + IPW_ORD_STAT_ASSN_CAUSE4, // # of reassociations due to AP RSSI level fell below + // eligible group + IPW_ORD_STAT_ASSN_CAUSE5, // # of reassociations due to load leveling + IPW_ORD_STAT_ASSN_CAUSE6, //NS // # of reassociations due to dropped by Ap + IPW_FILLER_41, + IPW_FILLER_42, + IPW_FILLER_43, + IPW_ORD_STAT_AUTH_FAIL, // # of times authentication failed + IPW_ORD_STAT_AUTH_RESP_FAIL, // # of times authentication response failed + IPW_ORD_STATION_TABLE_CNT, // # of entries in association table + +// Other statistics + IPW_ORD_RSSI_AVG_CURR = 173, // Current avg RSSI + IPW_ORD_STEST_RESULTS_CURR, //NS // Current self test results word + IPW_ORD_STEST_RESULTS_CUM, //NS // Cummulative self test results word + IPW_ORD_SELF_TEST_STATUS, //NS // + IPW_ORD_POWER_MGMT_MODE, // Power mode - 0=CAM, 1=PSP + IPW_ORD_POWER_MGMT_INDEX, //NS // + IPW_ORD_COUNTRY_CODE, // IEEE country code as recv'd from beacon + IPW_ORD_COUNTRY_CHANNELS, // channels suported by country +// IPW_ORD_COUNTRY_CHANNELS: +// For 11b the lower 2-byte are used for channels from 1-14 +// and the higher 2-byte are not used. + IPW_ORD_RESET_CNT, // # of adapter resets (warm) + IPW_ORD_BEACON_INTERVAL, // Beacon interval + + IPW_ORD_PRINCETON_VERSION = 184, //NS // Princeton Version + IPW_ORD_ANTENNA_DIVERSITY, // TRUE if antenna diversity is disabled + IPW_ORD_CCA_RSSI, //NS // CCA RSSI value (factory programmed) + IPW_ORD_STAT_EEPROM_UPDATE, //NS // # of times config EEPROM updated + IPW_ORD_DTIM_PERIOD, // # of beacon intervals between DTIMs + IPW_ORD_OUR_FREQ, // current radio freq lower digits - channel ID + + IPW_ORD_RTC_TIME = 190, // current RTC time + IPW_ORD_PORT_TYPE, // operating mode + IPW_ORD_CURRENT_TX_RATE, // current tx rate + IPW_ORD_SUPPORTED_RATES, // Bitmap of supported tx rates + IPW_ORD_ATIM_WINDOW, // current ATIM Window + IPW_ORD_BASIC_RATES, // bitmap of basic tx rates + IPW_ORD_NIC_HIGHEST_RATE, // bitmap of basic tx rates + IPW_ORD_AP_HIGHEST_RATE, // bitmap of basic tx rates + IPW_ORD_CAPABILITIES, // Management frame capability field + IPW_ORD_AUTH_TYPE, // Type of authentication + IPW_ORD_RADIO_TYPE, // Adapter card platform type + IPW_ORD_RTS_THRESHOLD = 201, // Min length of packet after which RTS handshaking is used + IPW_ORD_INT_MODE, // International mode + IPW_ORD_FRAGMENTATION_THRESHOLD, // protocol frag threshold + IPW_ORD_EEPROM_SRAM_DB_BLOCK_START_ADDRESS, // EEPROM offset in SRAM + IPW_ORD_EEPROM_SRAM_DB_BLOCK_SIZE, // EEPROM size in SRAM + IPW_ORD_EEPROM_SKU_CAPABILITY, // EEPROM SKU Capability 206 = + IPW_ORD_EEPROM_IBSS_11B_CHANNELS, // EEPROM IBSS 11b channel set + + IPW_ORD_MAC_VERSION = 209, // MAC Version + IPW_ORD_MAC_REVISION, // MAC Revision + IPW_ORD_RADIO_VERSION, // Radio Version + IPW_ORD_NIC_MANF_DATE_TIME, // MANF Date/Time STAMP + IPW_ORD_UCODE_VERSION, // Ucode Version + IPW_ORD_HW_RF_SWITCH_STATE = 214, // HW RF Kill Switch State +} ORDINALTABLE1; + +// ordinal table 2 +// Variable length data: +#define IPW_FIRST_VARIABLE_LENGTH_ORDINAL 1001 + +typedef enum _ORDINAL_TABLE_2 { // NS - means Not Supported by FW + IPW_ORD_STAT_BASE = 1000, // contains number of variable ORDs + IPW_ORD_STAT_ADAPTER_MAC = 1001, // 6 bytes: our adapter MAC address + IPW_ORD_STAT_PREFERRED_BSSID = 1002, // 6 bytes: BSSID of the preferred AP + IPW_ORD_STAT_MANDATORY_BSSID = 1003, // 6 bytes: BSSID of the mandatory AP + IPW_FILL_1, //NS // + IPW_ORD_STAT_COUNTRY_TEXT = 1005, // 36 bytes: Country name text, First two bytes are Country code + IPW_ORD_STAT_ASSN_SSID = 1006, // 32 bytes: ESSID String + IPW_ORD_STATION_TABLE = 1007, // ? bytes: Station/AP table (via Direct SSID Scans) + IPW_ORD_STAT_SWEEP_TABLE = 1008, // ? bytes: Sweep/Host Table table (via Broadcast Scans) + IPW_ORD_STAT_ROAM_LOG = 1009, // ? bytes: Roaming log + IPW_ORD_STAT_RATE_LOG = 1010, //NS // 0 bytes: Rate log + IPW_ORD_STAT_FIFO = 1011, //NS // 0 bytes: Fifo buffer data structures + IPW_ORD_STAT_FW_VER_NUM = 1012, // 14 bytes: fw version ID string as in (a.bb.ccc; "0.08.011") + IPW_ORD_STAT_FW_DATE = 1013, // 14 bytes: fw date string (mmm dd yyyy; "Mar 13 2002") + IPW_ORD_STAT_ASSN_AP_BSSID = 1014, // 6 bytes: MAC address of associated AP + IPW_ORD_STAT_DEBUG = 1015, //NS // ? bytes: + IPW_ORD_STAT_NIC_BPA_NUM = 1016, // 11 bytes: NIC BPA number in ASCII + IPW_ORD_STAT_UCODE_DATE = 1017, // 5 bytes: uCode date + IPW_ORD_SECURITY_NGOTIATION_RESULT = 1018, +} ORDINALTABLE2; // NS - means Not Supported by FW + +#define IPW_LAST_VARIABLE_LENGTH_ORDINAL 1018 + +#ifndef WIRELESS_SPY +#define WIRELESS_SPY // enable iwspy support +#endif + +#define IPW_HOST_FW_SHARED_AREA0 0x0002f200 +#define IPW_HOST_FW_SHARED_AREA0_END 0x0002f510 // 0x310 bytes + +#define IPW_HOST_FW_SHARED_AREA1 0x0002f610 +#define IPW_HOST_FW_SHARED_AREA1_END 0x0002f630 // 0x20 bytes + +#define IPW_HOST_FW_SHARED_AREA2 0x0002fa00 +#define IPW_HOST_FW_SHARED_AREA2_END 0x0002fa20 // 0x20 bytes + +#define IPW_HOST_FW_SHARED_AREA3 0x0002fc00 +#define IPW_HOST_FW_SHARED_AREA3_END 0x0002fc10 // 0x10 bytes + +#define IPW_HOST_FW_INTERRUPT_AREA 0x0002ff80 +#define IPW_HOST_FW_INTERRUPT_AREA_END 0x00030000 // 0x80 bytes + +struct ipw2100_fw_chunk { + unsigned char *buf; + long len; + long pos; + struct list_head list; +}; + +struct ipw2100_fw_chunk_set { + const void *data; + unsigned long size; +}; + +struct ipw2100_fw { + int version; + struct ipw2100_fw_chunk_set fw; + struct ipw2100_fw_chunk_set uc; + const struct firmware *fw_entry; +}; + +#define MAX_FW_VERSION_LEN 14 + +#endif /* _IPW2100_H */ diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c new file mode 100644 index 0000000..551fad1 --- /dev/null +++ b/drivers/net/wireless/ipw2x00/ipw2200.c @@ -0,0 +1,12137 @@ +/****************************************************************************** + + Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved. + + 802.11 status code portion of this file from ethereal-0.10.6: + Copyright 2000, Axis Communications AB + Ethereal - Network traffic analyzer + By Gerald Combs + Copyright 1998 Gerald Combs + + This program is free software; you can redistribute it and/or modify it + under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., 59 + Temple Place - Suite 330, Boston, MA 02111-1307, USA. + + The full GNU General Public License is included in this distribution in the + file called LICENSE. + + Contact Information: + Intel Linux Wireless + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +******************************************************************************/ + +#include +#include "ipw2200.h" + + +#ifndef KBUILD_EXTMOD +#define VK "k" +#else +#define VK +#endif + +#ifdef CONFIG_IPW2200_DEBUG +#define VD "d" +#else +#define VD +#endif + +#ifdef CONFIG_IPW2200_MONITOR +#define VM "m" +#else +#define VM +#endif + +#ifdef CONFIG_IPW2200_PROMISCUOUS +#define VP "p" +#else +#define VP +#endif + +#ifdef CONFIG_IPW2200_RADIOTAP +#define VR "r" +#else +#define VR +#endif + +#ifdef CONFIG_IPW2200_QOS +#define VQ "q" +#else +#define VQ +#endif + +#define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ +#define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver" +#define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation" +#define DRV_VERSION IPW2200_VERSION + +#define ETH_P_80211_STATS (ETH_P_80211_RAW + 1) + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_VERSION(DRV_VERSION); +MODULE_AUTHOR(DRV_COPYRIGHT); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE("ipw2200-ibss.fw"); +#ifdef CONFIG_IPW2200_MONITOR +MODULE_FIRMWARE("ipw2200-sniffer.fw"); +#endif +MODULE_FIRMWARE("ipw2200-bss.fw"); + +static int cmdlog = 0; +static int debug = 0; +static int default_channel = 0; +static int network_mode = 0; + +static u32 ipw_debug_level; +static int associate; +static int auto_create = 1; +static int led_support = 0; +static int disable = 0; +static int bt_coexist = 0; +static int hwcrypto = 0; +static int roaming = 1; +static const char ipw_modes[] = { + 'a', 'b', 'g', '?' +}; +static int antenna = CFG_SYS_ANTENNA_BOTH; + +#ifdef CONFIG_IPW2200_PROMISCUOUS +static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */ +#endif + +static struct ieee80211_rate ipw2200_rates[] = { + { .bitrate = 10 }, + { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 60 }, + { .bitrate = 90 }, + { .bitrate = 120 }, + { .bitrate = 180 }, + { .bitrate = 240 }, + { .bitrate = 360 }, + { .bitrate = 480 }, + { .bitrate = 540 } +}; + +#define ipw2200_a_rates (ipw2200_rates + 4) +#define ipw2200_num_a_rates 8 +#define ipw2200_bg_rates (ipw2200_rates + 0) +#define ipw2200_num_bg_rates 12 + +#ifdef CONFIG_IPW2200_QOS +static int qos_enable = 0; +static int qos_burst_enable = 0; +static int qos_no_ack_mask = 0; +static int burst_duration_CCK = 0; +static int burst_duration_OFDM = 0; + +static struct libipw_qos_parameters def_qos_parameters_OFDM = { + {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM, + QOS_TX3_CW_MIN_OFDM}, + {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM, + QOS_TX3_CW_MAX_OFDM}, + {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS}, + {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM}, + {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM, + QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM} +}; + +static struct libipw_qos_parameters def_qos_parameters_CCK = { + {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK, + QOS_TX3_CW_MIN_CCK}, + {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK, + QOS_TX3_CW_MAX_CCK}, + {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS}, + {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM}, + {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK, + QOS_TX3_TXOP_LIMIT_CCK} +}; + +static struct libipw_qos_parameters def_parameters_OFDM = { + {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM, + DEF_TX3_CW_MIN_OFDM}, + {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM, + DEF_TX3_CW_MAX_OFDM}, + {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS}, + {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM}, + {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM, + DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM} +}; + +static struct libipw_qos_parameters def_parameters_CCK = { + {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK, + DEF_TX3_CW_MIN_CCK}, + {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK, + DEF_TX3_CW_MAX_CCK}, + {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS}, + {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM}, + {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK, + DEF_TX3_TXOP_LIMIT_CCK} +}; + +static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 }; + +static int from_priority_to_tx_queue[] = { + IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1, + IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4 +}; + +static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv); + +static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters + *qos_param); +static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element + *qos_param); +#endif /* CONFIG_IPW2200_QOS */ + +static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev); +static void ipw_remove_current_network(struct ipw_priv *priv); +static void ipw_rx(struct ipw_priv *priv); +static int ipw_queue_tx_reclaim(struct ipw_priv *priv, + struct clx2_tx_queue *txq, int qindex); +static int ipw_queue_reset(struct ipw_priv *priv); + +static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf, + int len, int sync); + +static void ipw_tx_queue_free(struct ipw_priv *); + +static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *); +static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *); +static void ipw_rx_queue_replenish(void *); +static int ipw_up(struct ipw_priv *); +static void ipw_bg_up(struct work_struct *work); +static void ipw_down(struct ipw_priv *); +static void ipw_bg_down(struct work_struct *work); +static int ipw_config(struct ipw_priv *); +static int init_supported_rates(struct ipw_priv *priv, + struct ipw_supported_rates *prates); +static void ipw_set_hwcrypto_keys(struct ipw_priv *); +static void ipw_send_wep_keys(struct ipw_priv *, int); + +static int snprint_line(char *buf, size_t count, + const u8 * data, u32 len, u32 ofs) +{ + int out, i, j, l; + char c; + + out = snprintf(buf, count, "%08X", ofs); + + for (l = 0, i = 0; i < 2; i++) { + out += snprintf(buf + out, count - out, " "); + for (j = 0; j < 8 && l < len; j++, l++) + out += snprintf(buf + out, count - out, "%02X ", + data[(i * 8 + j)]); + for (; j < 8; j++) + out += snprintf(buf + out, count - out, " "); + } + + out += snprintf(buf + out, count - out, " "); + for (l = 0, i = 0; i < 2; i++) { + out += snprintf(buf + out, count - out, " "); + for (j = 0; j < 8 && l < len; j++, l++) { + c = data[(i * 8 + j)]; + if (!isascii(c) || !isprint(c)) + c = '.'; + + out += snprintf(buf + out, count - out, "%c", c); + } + + for (; j < 8; j++) + out += snprintf(buf + out, count - out, " "); + } + + return out; +} + +static void printk_buf(int level, const u8 * data, u32 len) +{ + char line[81]; + u32 ofs = 0; + if (!(ipw_debug_level & level)) + return; + + while (len) { + snprint_line(line, sizeof(line), &data[ofs], + min(len, 16U), ofs); + printk(KERN_DEBUG "%s\n", line); + ofs += 16; + len -= min(len, 16U); + } +} + +static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len) +{ + size_t out = size; + u32 ofs = 0; + int total = 0; + + while (size && len) { + out = snprint_line(output, size, &data[ofs], + min_t(size_t, len, 16U), ofs); + + ofs += 16; + output += out; + size -= out; + len -= min_t(size_t, len, 16U); + total += out; + } + return total; +} + +/* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */ +static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg); +#define ipw_read_reg32(a, b) _ipw_read_reg32(a, b) + +/* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */ +static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg); +#define ipw_read_reg8(a, b) _ipw_read_reg8(a, b) + +/* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */ +static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value); +static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c) +{ + IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__, + __LINE__, (u32) (b), (u32) (c)); + _ipw_write_reg8(a, b, c); +} + +/* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */ +static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value); +static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c) +{ + IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__, + __LINE__, (u32) (b), (u32) (c)); + _ipw_write_reg16(a, b, c); +} + +/* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */ +static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value); +static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c) +{ + IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__, + __LINE__, (u32) (b), (u32) (c)); + _ipw_write_reg32(a, b, c); +} + +/* 8-bit direct write (low 4K) */ +static inline void _ipw_write8(struct ipw_priv *ipw, unsigned long ofs, + u8 val) +{ + writeb(val, ipw->hw_base + ofs); +} + +/* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */ +#define ipw_write8(ipw, ofs, val) do { \ + IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, \ + __LINE__, (u32)(ofs), (u32)(val)); \ + _ipw_write8(ipw, ofs, val); \ +} while (0) + +/* 16-bit direct write (low 4K) */ +static inline void _ipw_write16(struct ipw_priv *ipw, unsigned long ofs, + u16 val) +{ + writew(val, ipw->hw_base + ofs); +} + +/* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */ +#define ipw_write16(ipw, ofs, val) do { \ + IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, \ + __LINE__, (u32)(ofs), (u32)(val)); \ + _ipw_write16(ipw, ofs, val); \ +} while (0) + +/* 32-bit direct write (low 4K) */ +static inline void _ipw_write32(struct ipw_priv *ipw, unsigned long ofs, + u32 val) +{ + writel(val, ipw->hw_base + ofs); +} + +/* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */ +#define ipw_write32(ipw, ofs, val) do { \ + IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, \ + __LINE__, (u32)(ofs), (u32)(val)); \ + _ipw_write32(ipw, ofs, val); \ +} while (0) + +/* 8-bit direct read (low 4K) */ +static inline u8 _ipw_read8(struct ipw_priv *ipw, unsigned long ofs) +{ + return readb(ipw->hw_base + ofs); +} + +/* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */ +#define ipw_read8(ipw, ofs) ({ \ + IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", __FILE__, __LINE__, \ + (u32)(ofs)); \ + _ipw_read8(ipw, ofs); \ +}) + +/* 16-bit direct read (low 4K) */ +static inline u16 _ipw_read16(struct ipw_priv *ipw, unsigned long ofs) +{ + return readw(ipw->hw_base + ofs); +} + +/* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */ +#define ipw_read16(ipw, ofs) ({ \ + IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", __FILE__, __LINE__, \ + (u32)(ofs)); \ + _ipw_read16(ipw, ofs); \ +}) + +/* 32-bit direct read (low 4K) */ +static inline u32 _ipw_read32(struct ipw_priv *ipw, unsigned long ofs) +{ + return readl(ipw->hw_base + ofs); +} + +/* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */ +#define ipw_read32(ipw, ofs) ({ \ + IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", __FILE__, __LINE__, \ + (u32)(ofs)); \ + _ipw_read32(ipw, ofs); \ +}) + +static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int); +/* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */ +#define ipw_read_indirect(a, b, c, d) ({ \ + IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %u bytes\n", __FILE__, \ + __LINE__, (u32)(b), (u32)(d)); \ + _ipw_read_indirect(a, b, c, d); \ +}) + +/* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */ +static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data, + int num); +#define ipw_write_indirect(a, b, c, d) do { \ + IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %u bytes\n", __FILE__, \ + __LINE__, (u32)(b), (u32)(d)); \ + _ipw_write_indirect(a, b, c, d); \ +} while (0) + +/* 32-bit indirect write (above 4K) */ +static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value) +{ + IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value); + _ipw_write32(priv, IPW_INDIRECT_ADDR, reg); + _ipw_write32(priv, IPW_INDIRECT_DATA, value); +} + +/* 8-bit indirect write (above 4K) */ +static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value) +{ + u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */ + u32 dif_len = reg - aligned_addr; + + IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value); + _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); + _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value); +} + +/* 16-bit indirect write (above 4K) */ +static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value) +{ + u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */ + u32 dif_len = (reg - aligned_addr) & (~0x1ul); + + IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value); + _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); + _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value); +} + +/* 8-bit indirect read (above 4K) */ +static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg) +{ + u32 word; + _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK); + IPW_DEBUG_IO(" reg = 0x%8X : \n", reg); + word = _ipw_read32(priv, IPW_INDIRECT_DATA); + return (word >> ((reg & 0x3) * 8)) & 0xff; +} + +/* 32-bit indirect read (above 4K) */ +static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg) +{ + u32 value; + + IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg); + + _ipw_write32(priv, IPW_INDIRECT_ADDR, reg); + value = _ipw_read32(priv, IPW_INDIRECT_DATA); + IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value); + return value; +} + +/* General purpose, no alignment requirement, iterative (multi-byte) read, */ +/* for area above 1st 4K of SRAM/reg space */ +static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf, + int num) +{ + u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */ + u32 dif_len = addr - aligned_addr; + u32 i; + + IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num); + + if (num <= 0) { + return; + } + + /* Read the first dword (or portion) byte by byte */ + if (unlikely(dif_len)) { + _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); + /* Start reading at aligned_addr + dif_len */ + for (i = dif_len; ((i < 4) && (num > 0)); i++, num--) + *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i); + aligned_addr += 4; + } + + /* Read all of the middle dwords as dwords, with auto-increment */ + _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr); + for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4) + *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA); + + /* Read the last dword (or portion) byte by byte */ + if (unlikely(num)) { + _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); + for (i = 0; num > 0; i++, num--) + *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i); + } +} + +/* General purpose, no alignment requirement, iterative (multi-byte) write, */ +/* for area above 1st 4K of SRAM/reg space */ +static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf, + int num) +{ + u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */ + u32 dif_len = addr - aligned_addr; + u32 i; + + IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num); + + if (num <= 0) { + return; + } + + /* Write the first dword (or portion) byte by byte */ + if (unlikely(dif_len)) { + _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); + /* Start writing at aligned_addr + dif_len */ + for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++) + _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf); + aligned_addr += 4; + } + + /* Write all of the middle dwords as dwords, with auto-increment */ + _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr); + for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4) + _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf); + + /* Write the last dword (or portion) byte by byte */ + if (unlikely(num)) { + _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); + for (i = 0; num > 0; i++, num--, buf++) + _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf); + } +} + +/* General purpose, no alignment requirement, iterative (multi-byte) write, */ +/* for 1st 4K of SRAM/regs space */ +static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf, + int num) +{ + memcpy_toio((priv->hw_base + addr), buf, num); +} + +/* Set bit(s) in low 4K of SRAM/regs */ +static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask) +{ + ipw_write32(priv, reg, ipw_read32(priv, reg) | mask); +} + +/* Clear bit(s) in low 4K of SRAM/regs */ +static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask) +{ + ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask); +} + +static inline void __ipw_enable_interrupts(struct ipw_priv *priv) +{ + if (priv->status & STATUS_INT_ENABLED) + return; + priv->status |= STATUS_INT_ENABLED; + ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL); +} + +static inline void __ipw_disable_interrupts(struct ipw_priv *priv) +{ + if (!(priv->status & STATUS_INT_ENABLED)) + return; + priv->status &= ~STATUS_INT_ENABLED; + ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL); +} + +static inline void ipw_enable_interrupts(struct ipw_priv *priv) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->irq_lock, flags); + __ipw_enable_interrupts(priv); + spin_unlock_irqrestore(&priv->irq_lock, flags); +} + +static inline void ipw_disable_interrupts(struct ipw_priv *priv) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->irq_lock, flags); + __ipw_disable_interrupts(priv); + spin_unlock_irqrestore(&priv->irq_lock, flags); +} + +static char *ipw_error_desc(u32 val) +{ + switch (val) { + case IPW_FW_ERROR_OK: + return "ERROR_OK"; + case IPW_FW_ERROR_FAIL: + return "ERROR_FAIL"; + case IPW_FW_ERROR_MEMORY_UNDERFLOW: + return "MEMORY_UNDERFLOW"; + case IPW_FW_ERROR_MEMORY_OVERFLOW: + return "MEMORY_OVERFLOW"; + case IPW_FW_ERROR_BAD_PARAM: + return "BAD_PARAM"; + case IPW_FW_ERROR_BAD_CHECKSUM: + return "BAD_CHECKSUM"; + case IPW_FW_ERROR_NMI_INTERRUPT: + return "NMI_INTERRUPT"; + case IPW_FW_ERROR_BAD_DATABASE: + return "BAD_DATABASE"; + case IPW_FW_ERROR_ALLOC_FAIL: + return "ALLOC_FAIL"; + case IPW_FW_ERROR_DMA_UNDERRUN: + return "DMA_UNDERRUN"; + case IPW_FW_ERROR_DMA_STATUS: + return "DMA_STATUS"; + case IPW_FW_ERROR_DINO_ERROR: + return "DINO_ERROR"; + case IPW_FW_ERROR_EEPROM_ERROR: + return "EEPROM_ERROR"; + case IPW_FW_ERROR_SYSASSERT: + return "SYSASSERT"; + case IPW_FW_ERROR_FATAL_ERROR: + return "FATAL_ERROR"; + default: + return "UNKNOWN_ERROR"; + } +} + +static void ipw_dump_error_log(struct ipw_priv *priv, + struct ipw_fw_error *error) +{ + u32 i; + + if (!error) { + IPW_ERROR("Error allocating and capturing error log. " + "Nothing to dump.\n"); + return; + } + + IPW_ERROR("Start IPW Error Log Dump:\n"); + IPW_ERROR("Status: 0x%08X, Config: %08X\n", + error->status, error->config); + + for (i = 0; i < error->elem_len; i++) + IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", + ipw_error_desc(error->elem[i].desc), + error->elem[i].time, + error->elem[i].blink1, + error->elem[i].blink2, + error->elem[i].link1, + error->elem[i].link2, error->elem[i].data); + for (i = 0; i < error->log_len; i++) + IPW_ERROR("%i\t0x%08x\t%i\n", + error->log[i].time, + error->log[i].data, error->log[i].event); +} + +static inline int ipw_is_init(struct ipw_priv *priv) +{ + return (priv->status & STATUS_INIT) ? 1 : 0; +} + +static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len) +{ + u32 addr, field_info, field_len, field_count, total_len; + + IPW_DEBUG_ORD("ordinal = %i\n", ord); + + if (!priv || !val || !len) { + IPW_DEBUG_ORD("Invalid argument\n"); + return -EINVAL; + } + + /* verify device ordinal tables have been initialized */ + if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) { + IPW_DEBUG_ORD("Access ordinals before initialization\n"); + return -EINVAL; + } + + switch (IPW_ORD_TABLE_ID_MASK & ord) { + case IPW_ORD_TABLE_0_MASK: + /* + * TABLE 0: Direct access to a table of 32 bit values + * + * This is a very simple table with the data directly + * read from the table + */ + + /* remove the table id from the ordinal */ + ord &= IPW_ORD_TABLE_VALUE_MASK; + + /* boundary check */ + if (ord > priv->table0_len) { + IPW_DEBUG_ORD("ordinal value (%i) longer then " + "max (%i)\n", ord, priv->table0_len); + return -EINVAL; + } + + /* verify we have enough room to store the value */ + if (*len < sizeof(u32)) { + IPW_DEBUG_ORD("ordinal buffer length too small, " + "need %zd\n", sizeof(u32)); + return -EINVAL; + } + + IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n", + ord, priv->table0_addr + (ord << 2)); + + *len = sizeof(u32); + ord <<= 2; + *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord); + break; + + case IPW_ORD_TABLE_1_MASK: + /* + * TABLE 1: Indirect access to a table of 32 bit values + * + * This is a fairly large table of u32 values each + * representing starting addr for the data (which is + * also a u32) + */ + + /* remove the table id from the ordinal */ + ord &= IPW_ORD_TABLE_VALUE_MASK; + + /* boundary check */ + if (ord > priv->table1_len) { + IPW_DEBUG_ORD("ordinal value too long\n"); + return -EINVAL; + } + + /* verify we have enough room to store the value */ + if (*len < sizeof(u32)) { + IPW_DEBUG_ORD("ordinal buffer length too small, " + "need %zd\n", sizeof(u32)); + return -EINVAL; + } + + *((u32 *) val) = + ipw_read_reg32(priv, (priv->table1_addr + (ord << 2))); + *len = sizeof(u32); + break; + + case IPW_ORD_TABLE_2_MASK: + /* + * TABLE 2: Indirect access to a table of variable sized values + * + * This table consist of six values, each containing + * - dword containing the starting offset of the data + * - dword containing the lengh in the first 16bits + * and the count in the second 16bits + */ + + /* remove the table id from the ordinal */ + ord &= IPW_ORD_TABLE_VALUE_MASK; + + /* boundary check */ + if (ord > priv->table2_len) { + IPW_DEBUG_ORD("ordinal value too long\n"); + return -EINVAL; + } + + /* get the address of statistic */ + addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3)); + + /* get the second DW of statistics ; + * two 16-bit words - first is length, second is count */ + field_info = + ipw_read_reg32(priv, + priv->table2_addr + (ord << 3) + + sizeof(u32)); + + /* get each entry length */ + field_len = *((u16 *) & field_info); + + /* get number of entries */ + field_count = *(((u16 *) & field_info) + 1); + + /* abort if not enough memory */ + total_len = field_len * field_count; + if (total_len > *len) { + *len = total_len; + return -EINVAL; + } + + *len = total_len; + if (!total_len) + return 0; + + IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, " + "field_info = 0x%08x\n", + addr, total_len, field_info); + ipw_read_indirect(priv, addr, val, total_len); + break; + + default: + IPW_DEBUG_ORD("Invalid ordinal!\n"); + return -EINVAL; + + } + + return 0; +} + +static void ipw_init_ordinals(struct ipw_priv *priv) +{ + priv->table0_addr = IPW_ORDINALS_TABLE_LOWER; + priv->table0_len = ipw_read32(priv, priv->table0_addr); + + IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n", + priv->table0_addr, priv->table0_len); + + priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1); + priv->table1_len = ipw_read_reg32(priv, priv->table1_addr); + + IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n", + priv->table1_addr, priv->table1_len); + + priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2); + priv->table2_len = ipw_read_reg32(priv, priv->table2_addr); + priv->table2_len &= 0x0000ffff; /* use first two bytes */ + + IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n", + priv->table2_addr, priv->table2_len); + +} + +static u32 ipw_register_toggle(u32 reg) +{ + reg &= ~IPW_START_STANDBY; + if (reg & IPW_GATE_ODMA) + reg &= ~IPW_GATE_ODMA; + if (reg & IPW_GATE_IDMA) + reg &= ~IPW_GATE_IDMA; + if (reg & IPW_GATE_ADMA) + reg &= ~IPW_GATE_ADMA; + return reg; +} + +/* + * LED behavior: + * - On radio ON, turn on any LEDs that require to be on during start + * - On initialization, start unassociated blink + * - On association, disable unassociated blink + * - On disassociation, start unassociated blink + * - On radio OFF, turn off any LEDs started during radio on + * + */ +#define LD_TIME_LINK_ON msecs_to_jiffies(300) +#define LD_TIME_LINK_OFF msecs_to_jiffies(2700) +#define LD_TIME_ACT_ON msecs_to_jiffies(250) + +static void ipw_led_link_on(struct ipw_priv *priv) +{ + unsigned long flags; + u32 led; + + /* If configured to not use LEDs, or nic_type is 1, + * then we don't toggle a LINK led */ + if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1) + return; + + spin_lock_irqsave(&priv->lock, flags); + + if (!(priv->status & STATUS_RF_KILL_MASK) && + !(priv->status & STATUS_LED_LINK_ON)) { + IPW_DEBUG_LED("Link LED On\n"); + led = ipw_read_reg32(priv, IPW_EVENT_REG); + led |= priv->led_association_on; + + led = ipw_register_toggle(led); + + IPW_DEBUG_LED("Reg: 0x%08X\n", led); + ipw_write_reg32(priv, IPW_EVENT_REG, led); + + priv->status |= STATUS_LED_LINK_ON; + + /* If we aren't associated, schedule turning the LED off */ + if (!(priv->status & STATUS_ASSOCIATED)) + queue_delayed_work(priv->workqueue, + &priv->led_link_off, + LD_TIME_LINK_ON); + } + + spin_unlock_irqrestore(&priv->lock, flags); +} + +static void ipw_bg_led_link_on(struct work_struct *work) +{ + struct ipw_priv *priv = + container_of(work, struct ipw_priv, led_link_on.work); + mutex_lock(&priv->mutex); + ipw_led_link_on(priv); + mutex_unlock(&priv->mutex); +} + +static void ipw_led_link_off(struct ipw_priv *priv) +{ + unsigned long flags; + u32 led; + + /* If configured not to use LEDs, or nic type is 1, + * then we don't goggle the LINK led. */ + if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1) + return; + + spin_lock_irqsave(&priv->lock, flags); + + if (priv->status & STATUS_LED_LINK_ON) { + led = ipw_read_reg32(priv, IPW_EVENT_REG); + led &= priv->led_association_off; + led = ipw_register_toggle(led); + + IPW_DEBUG_LED("Reg: 0x%08X\n", led); + ipw_write_reg32(priv, IPW_EVENT_REG, led); + + IPW_DEBUG_LED("Link LED Off\n"); + + priv->status &= ~STATUS_LED_LINK_ON; + + /* If we aren't associated and the radio is on, schedule + * turning the LED on (blink while unassociated) */ + if (!(priv->status & STATUS_RF_KILL_MASK) && + !(priv->status & STATUS_ASSOCIATED)) + queue_delayed_work(priv->workqueue, &priv->led_link_on, + LD_TIME_LINK_OFF); + + } + + spin_unlock_irqrestore(&priv->lock, flags); +} + +static void ipw_bg_led_link_off(struct work_struct *work) +{ + struct ipw_priv *priv = + container_of(work, struct ipw_priv, led_link_off.work); + mutex_lock(&priv->mutex); + ipw_led_link_off(priv); + mutex_unlock(&priv->mutex); +} + +static void __ipw_led_activity_on(struct ipw_priv *priv) +{ + u32 led; + + if (priv->config & CFG_NO_LED) + return; + + if (priv->status & STATUS_RF_KILL_MASK) + return; + + if (!(priv->status & STATUS_LED_ACT_ON)) { + led = ipw_read_reg32(priv, IPW_EVENT_REG); + led |= priv->led_activity_on; + + led = ipw_register_toggle(led); + + IPW_DEBUG_LED("Reg: 0x%08X\n", led); + ipw_write_reg32(priv, IPW_EVENT_REG, led); + + IPW_DEBUG_LED("Activity LED On\n"); + + priv->status |= STATUS_LED_ACT_ON; + + cancel_delayed_work(&priv->led_act_off); + queue_delayed_work(priv->workqueue, &priv->led_act_off, + LD_TIME_ACT_ON); + } else { + /* Reschedule LED off for full time period */ + cancel_delayed_work(&priv->led_act_off); + queue_delayed_work(priv->workqueue, &priv->led_act_off, + LD_TIME_ACT_ON); + } +} + +#if 0 +void ipw_led_activity_on(struct ipw_priv *priv) +{ + unsigned long flags; + spin_lock_irqsave(&priv->lock, flags); + __ipw_led_activity_on(priv); + spin_unlock_irqrestore(&priv->lock, flags); +} +#endif /* 0 */ + +static void ipw_led_activity_off(struct ipw_priv *priv) +{ + unsigned long flags; + u32 led; + + if (priv->config & CFG_NO_LED) + return; + + spin_lock_irqsave(&priv->lock, flags); + + if (priv->status & STATUS_LED_ACT_ON) { + led = ipw_read_reg32(priv, IPW_EVENT_REG); + led &= priv->led_activity_off; + + led = ipw_register_toggle(led); + + IPW_DEBUG_LED("Reg: 0x%08X\n", led); + ipw_write_reg32(priv, IPW_EVENT_REG, led); + + IPW_DEBUG_LED("Activity LED Off\n"); + + priv->status &= ~STATUS_LED_ACT_ON; + } + + spin_unlock_irqrestore(&priv->lock, flags); +} + +static void ipw_bg_led_activity_off(struct work_struct *work) +{ + struct ipw_priv *priv = + container_of(work, struct ipw_priv, led_act_off.work); + mutex_lock(&priv->mutex); + ipw_led_activity_off(priv); + mutex_unlock(&priv->mutex); +} + +static void ipw_led_band_on(struct ipw_priv *priv) +{ + unsigned long flags; + u32 led; + + /* Only nic type 1 supports mode LEDs */ + if (priv->config & CFG_NO_LED || + priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network) + return; + + spin_lock_irqsave(&priv->lock, flags); + + led = ipw_read_reg32(priv, IPW_EVENT_REG); + if (priv->assoc_network->mode == IEEE_A) { + led |= priv->led_ofdm_on; + led &= priv->led_association_off; + IPW_DEBUG_LED("Mode LED On: 802.11a\n"); + } else if (priv->assoc_network->mode == IEEE_G) { + led |= priv->led_ofdm_on; + led |= priv->led_association_on; + IPW_DEBUG_LED("Mode LED On: 802.11g\n"); + } else { + led &= priv->led_ofdm_off; + led |= priv->led_association_on; + IPW_DEBUG_LED("Mode LED On: 802.11b\n"); + } + + led = ipw_register_toggle(led); + + IPW_DEBUG_LED("Reg: 0x%08X\n", led); + ipw_write_reg32(priv, IPW_EVENT_REG, led); + + spin_unlock_irqrestore(&priv->lock, flags); +} + +static void ipw_led_band_off(struct ipw_priv *priv) +{ + unsigned long flags; + u32 led; + + /* Only nic type 1 supports mode LEDs */ + if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1) + return; + + spin_lock_irqsave(&priv->lock, flags); + + led = ipw_read_reg32(priv, IPW_EVENT_REG); + led &= priv->led_ofdm_off; + led &= priv->led_association_off; + + led = ipw_register_toggle(led); + + IPW_DEBUG_LED("Reg: 0x%08X\n", led); + ipw_write_reg32(priv, IPW_EVENT_REG, led); + + spin_unlock_irqrestore(&priv->lock, flags); +} + +static void ipw_led_radio_on(struct ipw_priv *priv) +{ + ipw_led_link_on(priv); +} + +static void ipw_led_radio_off(struct ipw_priv *priv) +{ + ipw_led_activity_off(priv); + ipw_led_link_off(priv); +} + +static void ipw_led_link_up(struct ipw_priv *priv) +{ + /* Set the Link Led on for all nic types */ + ipw_led_link_on(priv); +} + +static void ipw_led_link_down(struct ipw_priv *priv) +{ + ipw_led_activity_off(priv); + ipw_led_link_off(priv); + + if (priv->status & STATUS_RF_KILL_MASK) + ipw_led_radio_off(priv); +} + +static void ipw_led_init(struct ipw_priv *priv) +{ + priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE]; + + /* Set the default PINs for the link and activity leds */ + priv->led_activity_on = IPW_ACTIVITY_LED; + priv->led_activity_off = ~(IPW_ACTIVITY_LED); + + priv->led_association_on = IPW_ASSOCIATED_LED; + priv->led_association_off = ~(IPW_ASSOCIATED_LED); + + /* Set the default PINs for the OFDM leds */ + priv->led_ofdm_on = IPW_OFDM_LED; + priv->led_ofdm_off = ~(IPW_OFDM_LED); + + switch (priv->nic_type) { + case EEPROM_NIC_TYPE_1: + /* In this NIC type, the LEDs are reversed.... */ + priv->led_activity_on = IPW_ASSOCIATED_LED; + priv->led_activity_off = ~(IPW_ASSOCIATED_LED); + priv->led_association_on = IPW_ACTIVITY_LED; + priv->led_association_off = ~(IPW_ACTIVITY_LED); + + if (!(priv->config & CFG_NO_LED)) + ipw_led_band_on(priv); + + /* And we don't blink link LEDs for this nic, so + * just return here */ + return; + + case EEPROM_NIC_TYPE_3: + case EEPROM_NIC_TYPE_2: + case EEPROM_NIC_TYPE_4: + case EEPROM_NIC_TYPE_0: + break; + + default: + IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n", + priv->nic_type); + priv->nic_type = EEPROM_NIC_TYPE_0; + break; + } + + if (!(priv->config & CFG_NO_LED)) { + if (priv->status & STATUS_ASSOCIATED) + ipw_led_link_on(priv); + else + ipw_led_link_off(priv); + } +} + +static void ipw_led_shutdown(struct ipw_priv *priv) +{ + ipw_led_activity_off(priv); + ipw_led_link_off(priv); + ipw_led_band_off(priv); + cancel_delayed_work(&priv->led_link_on); + cancel_delayed_work(&priv->led_link_off); + cancel_delayed_work(&priv->led_act_off); +} + +/* + * The following adds a new attribute to the sysfs representation + * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/) + * used for controling the debug level. + * + * See the level definitions in ipw for details. + */ +static ssize_t show_debug_level(struct device_driver *d, char *buf) +{ + return sprintf(buf, "0x%08X\n", ipw_debug_level); +} + +static ssize_t store_debug_level(struct device_driver *d, const char *buf, + size_t count) +{ + char *p = (char *)buf; + u32 val; + + if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { + p++; + if (p[0] == 'x' || p[0] == 'X') + p++; + val = simple_strtoul(p, &p, 16); + } else + val = simple_strtoul(p, &p, 10); + if (p == buf) + printk(KERN_INFO DRV_NAME + ": %s is not in hex or decimal form.\n", buf); + else + ipw_debug_level = val; + + return strnlen(buf, count); +} + +static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO, + show_debug_level, store_debug_level); + +static inline u32 ipw_get_event_log_len(struct ipw_priv *priv) +{ + /* length = 1st dword in log */ + return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG)); +} + +static void ipw_capture_event_log(struct ipw_priv *priv, + u32 log_len, struct ipw_event *log) +{ + u32 base; + + if (log_len) { + base = ipw_read32(priv, IPW_EVENT_LOG); + ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32), + (u8 *) log, sizeof(*log) * log_len); + } +} + +static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv) +{ + struct ipw_fw_error *error; + u32 log_len = ipw_get_event_log_len(priv); + u32 base = ipw_read32(priv, IPW_ERROR_LOG); + u32 elem_len = ipw_read_reg32(priv, base); + + error = kmalloc(sizeof(*error) + + sizeof(*error->elem) * elem_len + + sizeof(*error->log) * log_len, GFP_ATOMIC); + if (!error) { + IPW_ERROR("Memory allocation for firmware error log " + "failed.\n"); + return NULL; + } + error->jiffies = jiffies; + error->status = priv->status; + error->config = priv->config; + error->elem_len = elem_len; + error->log_len = log_len; + error->elem = (struct ipw_error_elem *)error->payload; + error->log = (struct ipw_event *)(error->elem + elem_len); + + ipw_capture_event_log(priv, log_len, error->log); + + if (elem_len) + ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem, + sizeof(*error->elem) * elem_len); + + return error; +} + +static ssize_t show_event_log(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct ipw_priv *priv = dev_get_drvdata(d); + u32 log_len = ipw_get_event_log_len(priv); + u32 log_size; + struct ipw_event *log; + u32 len = 0, i; + + /* not using min() because of its strict type checking */ + log_size = PAGE_SIZE / sizeof(*log) > log_len ? + sizeof(*log) * log_len : PAGE_SIZE; + log = kzalloc(log_size, GFP_KERNEL); + if (!log) { + IPW_ERROR("Unable to allocate memory for log\n"); + return 0; + } + log_len = log_size / sizeof(*log); + ipw_capture_event_log(priv, log_len, log); + + len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len); + for (i = 0; i < log_len; i++) + len += snprintf(buf + len, PAGE_SIZE - len, + "\n%08X%08X%08X", + log[i].time, log[i].event, log[i].data); + len += snprintf(buf + len, PAGE_SIZE - len, "\n"); + kfree(log); + return len; +} + +static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL); + +static ssize_t show_error(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct ipw_priv *priv = dev_get_drvdata(d); + u32 len = 0, i; + if (!priv->error) + return 0; + len += snprintf(buf + len, PAGE_SIZE - len, + "%08lX%08X%08X%08X", + priv->error->jiffies, + priv->error->status, + priv->error->config, priv->error->elem_len); + for (i = 0; i < priv->error->elem_len; i++) + len += snprintf(buf + len, PAGE_SIZE - len, + "\n%08X%08X%08X%08X%08X%08X%08X", + priv->error->elem[i].time, + priv->error->elem[i].desc, + priv->error->elem[i].blink1, + priv->error->elem[i].blink2, + priv->error->elem[i].link1, + priv->error->elem[i].link2, + priv->error->elem[i].data); + + len += snprintf(buf + len, PAGE_SIZE - len, + "\n%08X", priv->error->log_len); + for (i = 0; i < priv->error->log_len; i++) + len += snprintf(buf + len, PAGE_SIZE - len, + "\n%08X%08X%08X", + priv->error->log[i].time, + priv->error->log[i].event, + priv->error->log[i].data); + len += snprintf(buf + len, PAGE_SIZE - len, "\n"); + return len; +} + +static ssize_t clear_error(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ipw_priv *priv = dev_get_drvdata(d); + + kfree(priv->error); + priv->error = NULL; + return count; +} + +static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error); + +static ssize_t show_cmd_log(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct ipw_priv *priv = dev_get_drvdata(d); + u32 len = 0, i; + if (!priv->cmdlog) + return 0; + for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len; + (i != priv->cmdlog_pos) && (PAGE_SIZE - len); + i = (i + 1) % priv->cmdlog_len) { + len += + snprintf(buf + len, PAGE_SIZE - len, + "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies, + priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd, + priv->cmdlog[i].cmd.len); + len += + snprintk_buf(buf + len, PAGE_SIZE - len, + (u8 *) priv->cmdlog[i].cmd.param, + priv->cmdlog[i].cmd.len); + len += snprintf(buf + len, PAGE_SIZE - len, "\n"); + } + len += snprintf(buf + len, PAGE_SIZE - len, "\n"); + return len; +} + +static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL); + +#ifdef CONFIG_IPW2200_PROMISCUOUS +static void ipw_prom_free(struct ipw_priv *priv); +static int ipw_prom_alloc(struct ipw_priv *priv); +static ssize_t store_rtap_iface(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ipw_priv *priv = dev_get_drvdata(d); + int rc = 0; + + if (count < 1) + return -EINVAL; + + switch (buf[0]) { + case '0': + if (!rtap_iface) + return count; + + if (netif_running(priv->prom_net_dev)) { + IPW_WARNING("Interface is up. Cannot unregister.\n"); + return count; + } + + ipw_prom_free(priv); + rtap_iface = 0; + break; + + case '1': + if (rtap_iface) + return count; + + rc = ipw_prom_alloc(priv); + if (!rc) + rtap_iface = 1; + break; + + default: + return -EINVAL; + } + + if (rc) { + IPW_ERROR("Failed to register promiscuous network " + "device (error %d).\n", rc); + } + + return count; +} + +static ssize_t show_rtap_iface(struct device *d, + struct device_attribute *attr, + char *buf) +{ + struct ipw_priv *priv = dev_get_drvdata(d); + if (rtap_iface) + return sprintf(buf, "%s", priv->prom_net_dev->name); + else { + buf[0] = '-'; + buf[1] = '1'; + buf[2] = '\0'; + return 3; + } +} + +static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface, + store_rtap_iface); + +static ssize_t store_rtap_filter(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ipw_priv *priv = dev_get_drvdata(d); + + if (!priv->prom_priv) { + IPW_ERROR("Attempting to set filter without " + "rtap_iface enabled.\n"); + return -EPERM; + } + + priv->prom_priv->filter = simple_strtol(buf, NULL, 0); + + IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n", + BIT_ARG16(priv->prom_priv->filter)); + + return count; +} + +static ssize_t show_rtap_filter(struct device *d, + struct device_attribute *attr, + char *buf) +{ + struct ipw_priv *priv = dev_get_drvdata(d); + return sprintf(buf, "0x%04X", + priv->prom_priv ? priv->prom_priv->filter : 0); +} + +static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter, + store_rtap_filter); +#endif + +static ssize_t show_scan_age(struct device *d, struct device_attribute *attr, + char *buf) +{ + struct ipw_priv *priv = dev_get_drvdata(d); + return sprintf(buf, "%d\n", priv->ieee->scan_age); +} + +static ssize_t store_scan_age(struct device *d, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ipw_priv *priv = dev_get_drvdata(d); + struct net_device *dev = priv->net_dev; + char buffer[] = "00000000"; + unsigned long len = + (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1; + unsigned long val; + char *p = buffer; + + IPW_DEBUG_INFO("enter\n"); + + strncpy(buffer, buf, len); + buffer[len] = 0; + + if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { + p++; + if (p[0] == 'x' || p[0] == 'X') + p++; + val = simple_strtoul(p, &p, 16); + } else + val = simple_strtoul(p, &p, 10); + if (p == buffer) { + IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name); + } else { + priv->ieee->scan_age = val; + IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age); + } + + IPW_DEBUG_INFO("exit\n"); + return len; +} + +static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age); + +static ssize_t show_led(struct device *d, struct device_attribute *attr, + char *buf) +{ + struct ipw_priv *priv = dev_get_drvdata(d); + return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1); +} + +static ssize_t store_led(struct device *d, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ipw_priv *priv = dev_get_drvdata(d); + + IPW_DEBUG_INFO("enter\n"); + + if (count == 0) + return 0; + + if (*buf == 0) { + IPW_DEBUG_LED("Disabling LED control.\n"); + priv->config |= CFG_NO_LED; + ipw_led_shutdown(priv); + } else { + IPW_DEBUG_LED("Enabling LED control.\n"); + priv->config &= ~CFG_NO_LED; + ipw_led_init(priv); + } + + IPW_DEBUG_INFO("exit\n"); + return count; +} + +static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led); + +static ssize_t show_status(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct ipw_priv *p = dev_get_drvdata(d); + return sprintf(buf, "0x%08x\n", (int)p->status); +} + +static DEVICE_ATTR(status, S_IRUGO, show_status, NULL); + +static ssize_t show_cfg(struct device *d, struct device_attribute *attr, + char *buf) +{ + struct ipw_priv *p = dev_get_drvdata(d); + return sprintf(buf, "0x%08x\n", (int)p->config); +} + +static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL); + +static ssize_t show_nic_type(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct ipw_priv *priv = dev_get_drvdata(d); + return sprintf(buf, "TYPE: %d\n", priv->nic_type); +} + +static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL); + +static ssize_t show_ucode_version(struct device *d, + struct device_attribute *attr, char *buf) +{ + u32 len = sizeof(u32), tmp = 0; + struct ipw_priv *p = dev_get_drvdata(d); + + if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len)) + return 0; + + return sprintf(buf, "0x%08x\n", tmp); +} + +static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL); + +static ssize_t show_rtc(struct device *d, struct device_attribute *attr, + char *buf) +{ + u32 len = sizeof(u32), tmp = 0; + struct ipw_priv *p = dev_get_drvdata(d); + + if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len)) + return 0; + + return sprintf(buf, "0x%08x\n", tmp); +} + +static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL); + +/* + * Add a device attribute to view/control the delay between eeprom + * operations. + */ +static ssize_t show_eeprom_delay(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct ipw_priv *p = dev_get_drvdata(d); + int n = p->eeprom_delay; + return sprintf(buf, "%i\n", n); +} +static ssize_t store_eeprom_delay(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ipw_priv *p = dev_get_drvdata(d); + sscanf(buf, "%i", &p->eeprom_delay); + return strnlen(buf, count); +} + +static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO, + show_eeprom_delay, store_eeprom_delay); + +static ssize_t show_command_event_reg(struct device *d, + struct device_attribute *attr, char *buf) +{ + u32 reg = 0; + struct ipw_priv *p = dev_get_drvdata(d); + + reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT); + return sprintf(buf, "0x%08x\n", reg); +} +static ssize_t store_command_event_reg(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + u32 reg; + struct ipw_priv *p = dev_get_drvdata(d); + + sscanf(buf, "%x", ®); + ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg); + return strnlen(buf, count); +} + +static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO, + show_command_event_reg, store_command_event_reg); + +static ssize_t show_mem_gpio_reg(struct device *d, + struct device_attribute *attr, char *buf) +{ + u32 reg = 0; + struct ipw_priv *p = dev_get_drvdata(d); + + reg = ipw_read_reg32(p, 0x301100); + return sprintf(buf, "0x%08x\n", reg); +} +static ssize_t store_mem_gpio_reg(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + u32 reg; + struct ipw_priv *p = dev_get_drvdata(d); + + sscanf(buf, "%x", ®); + ipw_write_reg32(p, 0x301100, reg); + return strnlen(buf, count); +} + +static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO, + show_mem_gpio_reg, store_mem_gpio_reg); + +static ssize_t show_indirect_dword(struct device *d, + struct device_attribute *attr, char *buf) +{ + u32 reg = 0; + struct ipw_priv *priv = dev_get_drvdata(d); + + if (priv->status & STATUS_INDIRECT_DWORD) + reg = ipw_read_reg32(priv, priv->indirect_dword); + else + reg = 0; + + return sprintf(buf, "0x%08x\n", reg); +} +static ssize_t store_indirect_dword(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ipw_priv *priv = dev_get_drvdata(d); + + sscanf(buf, "%x", &priv->indirect_dword); + priv->status |= STATUS_INDIRECT_DWORD; + return strnlen(buf, count); +} + +static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO, + show_indirect_dword, store_indirect_dword); + +static ssize_t show_indirect_byte(struct device *d, + struct device_attribute *attr, char *buf) +{ + u8 reg = 0; + struct ipw_priv *priv = dev_get_drvdata(d); + + if (priv->status & STATUS_INDIRECT_BYTE) + reg = ipw_read_reg8(priv, priv->indirect_byte); + else + reg = 0; + + return sprintf(buf, "0x%02x\n", reg); +} +static ssize_t store_indirect_byte(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ipw_priv *priv = dev_get_drvdata(d); + + sscanf(buf, "%x", &priv->indirect_byte); + priv->status |= STATUS_INDIRECT_BYTE; + return strnlen(buf, count); +} + +static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO, + show_indirect_byte, store_indirect_byte); + +static ssize_t show_direct_dword(struct device *d, + struct device_attribute *attr, char *buf) +{ + u32 reg = 0; + struct ipw_priv *priv = dev_get_drvdata(d); + + if (priv->status & STATUS_DIRECT_DWORD) + reg = ipw_read32(priv, priv->direct_dword); + else + reg = 0; + + return sprintf(buf, "0x%08x\n", reg); +} +static ssize_t store_direct_dword(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ipw_priv *priv = dev_get_drvdata(d); + + sscanf(buf, "%x", &priv->direct_dword); + priv->status |= STATUS_DIRECT_DWORD; + return strnlen(buf, count); +} + +static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO, + show_direct_dword, store_direct_dword); + +static int rf_kill_active(struct ipw_priv *priv) +{ + if (0 == (ipw_read32(priv, 0x30) & 0x10000)) { + priv->status |= STATUS_RF_KILL_HW; + wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true); + } else { + priv->status &= ~STATUS_RF_KILL_HW; + wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, false); + } + + return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0; +} + +static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr, + char *buf) +{ + /* 0 - RF kill not enabled + 1 - SW based RF kill active (sysfs) + 2 - HW based RF kill active + 3 - Both HW and SW baed RF kill active */ + struct ipw_priv *priv = dev_get_drvdata(d); + int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) | + (rf_kill_active(priv) ? 0x2 : 0x0); + return sprintf(buf, "%i\n", val); +} + +static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio) +{ + if ((disable_radio ? 1 : 0) == + ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0)) + return 0; + + IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n", + disable_radio ? "OFF" : "ON"); + + if (disable_radio) { + priv->status |= STATUS_RF_KILL_SW; + + if (priv->workqueue) { + cancel_delayed_work(&priv->request_scan); + cancel_delayed_work(&priv->request_direct_scan); + cancel_delayed_work(&priv->request_passive_scan); + cancel_delayed_work(&priv->scan_event); + } + queue_work(priv->workqueue, &priv->down); + } else { + priv->status &= ~STATUS_RF_KILL_SW; + if (rf_kill_active(priv)) { + IPW_DEBUG_RF_KILL("Can not turn radio back on - " + "disabled by HW switch\n"); + /* Make sure the RF_KILL check timer is running */ + cancel_delayed_work(&priv->rf_kill); + queue_delayed_work(priv->workqueue, &priv->rf_kill, + round_jiffies_relative(2 * HZ)); + } else + queue_work(priv->workqueue, &priv->up); + } + + return 1; +} + +static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ipw_priv *priv = dev_get_drvdata(d); + + ipw_radio_kill_sw(priv, buf[0] == '1'); + + return count; +} + +static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill); + +static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr, + char *buf) +{ + struct ipw_priv *priv = dev_get_drvdata(d); + int pos = 0, len = 0; + if (priv->config & CFG_SPEED_SCAN) { + while (priv->speed_scan[pos] != 0) + len += sprintf(&buf[len], "%d ", + priv->speed_scan[pos++]); + return len + sprintf(&buf[len], "\n"); + } + + return sprintf(buf, "0\n"); +} + +static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ipw_priv *priv = dev_get_drvdata(d); + int channel, pos = 0; + const char *p = buf; + + /* list of space separated channels to scan, optionally ending with 0 */ + while ((channel = simple_strtol(p, NULL, 0))) { + if (pos == MAX_SPEED_SCAN - 1) { + priv->speed_scan[pos] = 0; + break; + } + + if (libipw_is_valid_channel(priv->ieee, channel)) + priv->speed_scan[pos++] = channel; + else + IPW_WARNING("Skipping invalid channel request: %d\n", + channel); + p = strchr(p, ' '); + if (!p) + break; + while (*p == ' ' || *p == '\t') + p++; + } + + if (pos == 0) + priv->config &= ~CFG_SPEED_SCAN; + else { + priv->speed_scan_pos = 0; + priv->config |= CFG_SPEED_SCAN; + } + + return count; +} + +static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan, + store_speed_scan); + +static ssize_t show_net_stats(struct device *d, struct device_attribute *attr, + char *buf) +{ + struct ipw_priv *priv = dev_get_drvdata(d); + return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0'); +} + +static ssize_t store_net_stats(struct device *d, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ipw_priv *priv = dev_get_drvdata(d); + if (buf[0] == '1') + priv->config |= CFG_NET_STATS; + else + priv->config &= ~CFG_NET_STATS; + + return count; +} + +static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO, + show_net_stats, store_net_stats); + +static ssize_t show_channels(struct device *d, + struct device_attribute *attr, + char *buf) +{ + struct ipw_priv *priv = dev_get_drvdata(d); + const struct libipw_geo *geo = libipw_get_geo(priv->ieee); + int len = 0, i; + + len = sprintf(&buf[len], + "Displaying %d channels in 2.4Ghz band " + "(802.11bg):\n", geo->bg_channels); + + for (i = 0; i < geo->bg_channels; i++) { + len += sprintf(&buf[len], "%d: BSS%s%s, %s, Band %s.\n", + geo->bg[i].channel, + geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT ? + " (radar spectrum)" : "", + ((geo->bg[i].flags & LIBIPW_CH_NO_IBSS) || + (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)) + ? "" : ", IBSS", + geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY ? + "passive only" : "active/passive", + geo->bg[i].flags & LIBIPW_CH_B_ONLY ? + "B" : "B/G"); + } + + len += sprintf(&buf[len], + "Displaying %d channels in 5.2Ghz band " + "(802.11a):\n", geo->a_channels); + for (i = 0; i < geo->a_channels; i++) { + len += sprintf(&buf[len], "%d: BSS%s%s, %s.\n", + geo->a[i].channel, + geo->a[i].flags & LIBIPW_CH_RADAR_DETECT ? + " (radar spectrum)" : "", + ((geo->a[i].flags & LIBIPW_CH_NO_IBSS) || + (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT)) + ? "" : ", IBSS", + geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY ? + "passive only" : "active/passive"); + } + + return len; +} + +static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL); + +static void notify_wx_assoc_event(struct ipw_priv *priv) +{ + union iwreq_data wrqu; + wrqu.ap_addr.sa_family = ARPHRD_ETHER; + if (priv->status & STATUS_ASSOCIATED) + memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN); + else + memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN); + wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL); +} + +static void ipw_irq_tasklet(struct ipw_priv *priv) +{ + u32 inta, inta_mask, handled = 0; + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&priv->irq_lock, flags); + + inta = ipw_read32(priv, IPW_INTA_RW); + inta_mask = ipw_read32(priv, IPW_INTA_MASK_R); + inta &= (IPW_INTA_MASK_ALL & inta_mask); + + /* Add any cached INTA values that need to be handled */ + inta |= priv->isr_inta; + + spin_unlock_irqrestore(&priv->irq_lock, flags); + + spin_lock_irqsave(&priv->lock, flags); + + /* handle all the justifications for the interrupt */ + if (inta & IPW_INTA_BIT_RX_TRANSFER) { + ipw_rx(priv); + handled |= IPW_INTA_BIT_RX_TRANSFER; + } + + if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) { + IPW_DEBUG_HC("Command completed.\n"); + rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1); + priv->status &= ~STATUS_HCMD_ACTIVE; + wake_up_interruptible(&priv->wait_command_queue); + handled |= IPW_INTA_BIT_TX_CMD_QUEUE; + } + + if (inta & IPW_INTA_BIT_TX_QUEUE_1) { + IPW_DEBUG_TX("TX_QUEUE_1\n"); + rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0); + handled |= IPW_INTA_BIT_TX_QUEUE_1; + } + + if (inta & IPW_INTA_BIT_TX_QUEUE_2) { + IPW_DEBUG_TX("TX_QUEUE_2\n"); + rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1); + handled |= IPW_INTA_BIT_TX_QUEUE_2; + } + + if (inta & IPW_INTA_BIT_TX_QUEUE_3) { + IPW_DEBUG_TX("TX_QUEUE_3\n"); + rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2); + handled |= IPW_INTA_BIT_TX_QUEUE_3; + } + + if (inta & IPW_INTA_BIT_TX_QUEUE_4) { + IPW_DEBUG_TX("TX_QUEUE_4\n"); + rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3); + handled |= IPW_INTA_BIT_TX_QUEUE_4; + } + + if (inta & IPW_INTA_BIT_STATUS_CHANGE) { + IPW_WARNING("STATUS_CHANGE\n"); + handled |= IPW_INTA_BIT_STATUS_CHANGE; + } + + if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) { + IPW_WARNING("TX_PERIOD_EXPIRED\n"); + handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED; + } + + if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) { + IPW_WARNING("HOST_CMD_DONE\n"); + handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE; + } + + if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) { + IPW_WARNING("FW_INITIALIZATION_DONE\n"); + handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE; + } + + if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) { + IPW_WARNING("PHY_OFF_DONE\n"); + handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE; + } + + if (inta & IPW_INTA_BIT_RF_KILL_DONE) { + IPW_DEBUG_RF_KILL("RF_KILL_DONE\n"); + priv->status |= STATUS_RF_KILL_HW; + wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true); + wake_up_interruptible(&priv->wait_command_queue); + priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING); + cancel_delayed_work(&priv->request_scan); + cancel_delayed_work(&priv->request_direct_scan); + cancel_delayed_work(&priv->request_passive_scan); + cancel_delayed_work(&priv->scan_event); + schedule_work(&priv->link_down); + queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ); + handled |= IPW_INTA_BIT_RF_KILL_DONE; + } + + if (inta & IPW_INTA_BIT_FATAL_ERROR) { + IPW_WARNING("Firmware error detected. Restarting.\n"); + if (priv->error) { + IPW_DEBUG_FW("Sysfs 'error' log already exists.\n"); + if (ipw_debug_level & IPW_DL_FW_ERRORS) { + struct ipw_fw_error *error = + ipw_alloc_error_log(priv); + ipw_dump_error_log(priv, error); + kfree(error); + } + } else { + priv->error = ipw_alloc_error_log(priv); + if (priv->error) + IPW_DEBUG_FW("Sysfs 'error' log captured.\n"); + else + IPW_DEBUG_FW("Error allocating sysfs 'error' " + "log.\n"); + if (ipw_debug_level & IPW_DL_FW_ERRORS) + ipw_dump_error_log(priv, priv->error); + } + + /* XXX: If hardware encryption is for WPA/WPA2, + * we have to notify the supplicant. */ + if (priv->ieee->sec.encrypt) { + priv->status &= ~STATUS_ASSOCIATED; + notify_wx_assoc_event(priv); + } + + /* Keep the restart process from trying to send host + * commands by clearing the INIT status bit */ + priv->status &= ~STATUS_INIT; + + /* Cancel currently queued command. */ + priv->status &= ~STATUS_HCMD_ACTIVE; + wake_up_interruptible(&priv->wait_command_queue); + + queue_work(priv->workqueue, &priv->adapter_restart); + handled |= IPW_INTA_BIT_FATAL_ERROR; + } + + if (inta & IPW_INTA_BIT_PARITY_ERROR) { + IPW_ERROR("Parity error\n"); + handled |= IPW_INTA_BIT_PARITY_ERROR; + } + + if (handled != inta) { + IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled); + } + + spin_unlock_irqrestore(&priv->lock, flags); + + /* enable all interrupts */ + ipw_enable_interrupts(priv); +} + +#define IPW_CMD(x) case IPW_CMD_ ## x : return #x +static char *get_cmd_string(u8 cmd) +{ + switch (cmd) { + IPW_CMD(HOST_COMPLETE); + IPW_CMD(POWER_DOWN); + IPW_CMD(SYSTEM_CONFIG); + IPW_CMD(MULTICAST_ADDRESS); + IPW_CMD(SSID); + IPW_CMD(ADAPTER_ADDRESS); + IPW_CMD(PORT_TYPE); + IPW_CMD(RTS_THRESHOLD); + IPW_CMD(FRAG_THRESHOLD); + IPW_CMD(POWER_MODE); + IPW_CMD(WEP_KEY); + IPW_CMD(TGI_TX_KEY); + IPW_CMD(SCAN_REQUEST); + IPW_CMD(SCAN_REQUEST_EXT); + IPW_CMD(ASSOCIATE); + IPW_CMD(SUPPORTED_RATES); + IPW_CMD(SCAN_ABORT); + IPW_CMD(TX_FLUSH); + IPW_CMD(QOS_PARAMETERS); + IPW_CMD(DINO_CONFIG); + IPW_CMD(RSN_CAPABILITIES); + IPW_CMD(RX_KEY); + IPW_CMD(CARD_DISABLE); + IPW_CMD(SEED_NUMBER); + IPW_CMD(TX_POWER); + IPW_CMD(COUNTRY_INFO); + IPW_CMD(AIRONET_INFO); + IPW_CMD(AP_TX_POWER); + IPW_CMD(CCKM_INFO); + IPW_CMD(CCX_VER_INFO); + IPW_CMD(SET_CALIBRATION); + IPW_CMD(SENSITIVITY_CALIB); + IPW_CMD(RETRY_LIMIT); + IPW_CMD(IPW_PRE_POWER_DOWN); + IPW_CMD(VAP_BEACON_TEMPLATE); + IPW_CMD(VAP_DTIM_PERIOD); + IPW_CMD(EXT_SUPPORTED_RATES); + IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT); + IPW_CMD(VAP_QUIET_INTERVALS); + IPW_CMD(VAP_CHANNEL_SWITCH); + IPW_CMD(VAP_MANDATORY_CHANNELS); + IPW_CMD(VAP_CELL_PWR_LIMIT); + IPW_CMD(VAP_CF_PARAM_SET); + IPW_CMD(VAP_SET_BEACONING_STATE); + IPW_CMD(MEASUREMENT); + IPW_CMD(POWER_CAPABILITY); + IPW_CMD(SUPPORTED_CHANNELS); + IPW_CMD(TPC_REPORT); + IPW_CMD(WME_INFO); + IPW_CMD(PRODUCTION_COMMAND); + default: + return "UNKNOWN"; + } +} + +#define HOST_COMPLETE_TIMEOUT HZ + +static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd) +{ + int rc = 0; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + if (priv->status & STATUS_HCMD_ACTIVE) { + IPW_ERROR("Failed to send %s: Already sending a command.\n", + get_cmd_string(cmd->cmd)); + spin_unlock_irqrestore(&priv->lock, flags); + return -EAGAIN; + } + + priv->status |= STATUS_HCMD_ACTIVE; + + if (priv->cmdlog) { + priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies; + priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd; + priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len; + memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param, + cmd->len); + priv->cmdlog[priv->cmdlog_pos].retcode = -1; + } + + IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n", + get_cmd_string(cmd->cmd), cmd->cmd, cmd->len, + priv->status); + +#ifndef DEBUG_CMD_WEP_KEY + if (cmd->cmd == IPW_CMD_WEP_KEY) + IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n"); + else +#endif + printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len); + + rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0); + if (rc) { + priv->status &= ~STATUS_HCMD_ACTIVE; + IPW_ERROR("Failed to send %s: Reason %d\n", + get_cmd_string(cmd->cmd), rc); + spin_unlock_irqrestore(&priv->lock, flags); + goto exit; + } + spin_unlock_irqrestore(&priv->lock, flags); + + rc = wait_event_interruptible_timeout(priv->wait_command_queue, + !(priv-> + status & STATUS_HCMD_ACTIVE), + HOST_COMPLETE_TIMEOUT); + if (rc == 0) { + spin_lock_irqsave(&priv->lock, flags); + if (priv->status & STATUS_HCMD_ACTIVE) { + IPW_ERROR("Failed to send %s: Command timed out.\n", + get_cmd_string(cmd->cmd)); + priv->status &= ~STATUS_HCMD_ACTIVE; + spin_unlock_irqrestore(&priv->lock, flags); + rc = -EIO; + goto exit; + } + spin_unlock_irqrestore(&priv->lock, flags); + } else + rc = 0; + + if (priv->status & STATUS_RF_KILL_HW) { + IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n", + get_cmd_string(cmd->cmd)); + rc = -EIO; + goto exit; + } + + exit: + if (priv->cmdlog) { + priv->cmdlog[priv->cmdlog_pos++].retcode = rc; + priv->cmdlog_pos %= priv->cmdlog_len; + } + return rc; +} + +static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command) +{ + struct host_cmd cmd = { + .cmd = command, + }; + + return __ipw_send_cmd(priv, &cmd); +} + +static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len, + void *data) +{ + struct host_cmd cmd = { + .cmd = command, + .len = len, + .param = data, + }; + + return __ipw_send_cmd(priv, &cmd); +} + +static int ipw_send_host_complete(struct ipw_priv *priv) +{ + if (!priv) { + IPW_ERROR("Invalid args\n"); + return -1; + } + + return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE); +} + +static int ipw_send_system_config(struct ipw_priv *priv) +{ + return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG, + sizeof(priv->sys_config), + &priv->sys_config); +} + +static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len) +{ + if (!priv || !ssid) { + IPW_ERROR("Invalid args\n"); + return -1; + } + + return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE), + ssid); +} + +static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac) +{ + if (!priv || !mac) { + IPW_ERROR("Invalid args\n"); + return -1; + } + + IPW_DEBUG_INFO("%s: Setting MAC to %pM\n", + priv->net_dev->name, mac); + + return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac); +} + +/* + * NOTE: This must be executed from our workqueue as it results in udelay + * being called which may corrupt the keyboard if executed on default + * workqueue + */ +static void ipw_adapter_restart(void *adapter) +{ + struct ipw_priv *priv = adapter; + + if (priv->status & STATUS_RF_KILL_MASK) + return; + + ipw_down(priv); + + if (priv->assoc_network && + (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS)) + ipw_remove_current_network(priv); + + if (ipw_up(priv)) { + IPW_ERROR("Failed to up device\n"); + return; + } +} + +static void ipw_bg_adapter_restart(struct work_struct *work) +{ + struct ipw_priv *priv = + container_of(work, struct ipw_priv, adapter_restart); + mutex_lock(&priv->mutex); + ipw_adapter_restart(priv); + mutex_unlock(&priv->mutex); +} + +#define IPW_SCAN_CHECK_WATCHDOG (5 * HZ) + +static void ipw_scan_check(void *data) +{ + struct ipw_priv *priv = data; + if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) { + IPW_DEBUG_SCAN("Scan completion watchdog resetting " + "adapter after (%dms).\n", + jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG)); + queue_work(priv->workqueue, &priv->adapter_restart); + } +} + +static void ipw_bg_scan_check(struct work_struct *work) +{ + struct ipw_priv *priv = + container_of(work, struct ipw_priv, scan_check.work); + mutex_lock(&priv->mutex); + ipw_scan_check(priv); + mutex_unlock(&priv->mutex); +} + +static int ipw_send_scan_request_ext(struct ipw_priv *priv, + struct ipw_scan_request_ext *request) +{ + return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT, + sizeof(*request), request); +} + +static int ipw_send_scan_abort(struct ipw_priv *priv) +{ + if (!priv) { + IPW_ERROR("Invalid args\n"); + return -1; + } + + return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT); +} + +static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens) +{ + struct ipw_sensitivity_calib calib = { + .beacon_rssi_raw = cpu_to_le16(sens), + }; + + return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib), + &calib); +} + +static int ipw_send_associate(struct ipw_priv *priv, + struct ipw_associate *associate) +{ + if (!priv || !associate) { + IPW_ERROR("Invalid args\n"); + return -1; + } + + return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(*associate), + associate); +} + +static int ipw_send_supported_rates(struct ipw_priv *priv, + struct ipw_supported_rates *rates) +{ + if (!priv || !rates) { + IPW_ERROR("Invalid args\n"); + return -1; + } + + return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates), + rates); +} + +static int ipw_set_random_seed(struct ipw_priv *priv) +{ + u32 val; + + if (!priv) { + IPW_ERROR("Invalid args\n"); + return -1; + } + + get_random_bytes(&val, sizeof(val)); + + return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val); +} + +static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off) +{ + __le32 v = cpu_to_le32(phy_off); + if (!priv) { + IPW_ERROR("Invalid args\n"); + return -1; + } + + return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(v), &v); +} + +static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power) +{ + if (!priv || !power) { + IPW_ERROR("Invalid args\n"); + return -1; + } + + return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power); +} + +static int ipw_set_tx_power(struct ipw_priv *priv) +{ + const struct libipw_geo *geo = libipw_get_geo(priv->ieee); + struct ipw_tx_power tx_power; + s8 max_power; + int i; + + memset(&tx_power, 0, sizeof(tx_power)); + + /* configure device for 'G' band */ + tx_power.ieee_mode = IPW_G_MODE; + tx_power.num_channels = geo->bg_channels; + for (i = 0; i < geo->bg_channels; i++) { + max_power = geo->bg[i].max_power; + tx_power.channels_tx_power[i].channel_number = + geo->bg[i].channel; + tx_power.channels_tx_power[i].tx_power = max_power ? + min(max_power, priv->tx_power) : priv->tx_power; + } + if (ipw_send_tx_power(priv, &tx_power)) + return -EIO; + + /* configure device to also handle 'B' band */ + tx_power.ieee_mode = IPW_B_MODE; + if (ipw_send_tx_power(priv, &tx_power)) + return -EIO; + + /* configure device to also handle 'A' band */ + if (priv->ieee->abg_true) { + tx_power.ieee_mode = IPW_A_MODE; + tx_power.num_channels = geo->a_channels; + for (i = 0; i < tx_power.num_channels; i++) { + max_power = geo->a[i].max_power; + tx_power.channels_tx_power[i].channel_number = + geo->a[i].channel; + tx_power.channels_tx_power[i].tx_power = max_power ? + min(max_power, priv->tx_power) : priv->tx_power; + } + if (ipw_send_tx_power(priv, &tx_power)) + return -EIO; + } + return 0; +} + +static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts) +{ + struct ipw_rts_threshold rts_threshold = { + .rts_threshold = cpu_to_le16(rts), + }; + + if (!priv) { + IPW_ERROR("Invalid args\n"); + return -1; + } + + return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD, + sizeof(rts_threshold), &rts_threshold); +} + +static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag) +{ + struct ipw_frag_threshold frag_threshold = { + .frag_threshold = cpu_to_le16(frag), + }; + + if (!priv) { + IPW_ERROR("Invalid args\n"); + return -1; + } + + return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD, + sizeof(frag_threshold), &frag_threshold); +} + +static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode) +{ + __le32 param; + + if (!priv) { + IPW_ERROR("Invalid args\n"); + return -1; + } + + /* If on battery, set to 3, if AC set to CAM, else user + * level */ + switch (mode) { + case IPW_POWER_BATTERY: + param = cpu_to_le32(IPW_POWER_INDEX_3); + break; + case IPW_POWER_AC: + param = cpu_to_le32(IPW_POWER_MODE_CAM); + break; + default: + param = cpu_to_le32(mode); + break; + } + + return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param), + ¶m); +} + +static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit) +{ + struct ipw_retry_limit retry_limit = { + .short_retry_limit = slimit, + .long_retry_limit = llimit + }; + + if (!priv) { + IPW_ERROR("Invalid args\n"); + return -1; + } + + return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit), + &retry_limit); +} + +/* + * The IPW device contains a Microwire compatible EEPROM that stores + * various data like the MAC address. Usually the firmware has exclusive + * access to the eeprom, but during device initialization (before the + * device driver has sent the HostComplete command to the firmware) the + * device driver has read access to the EEPROM by way of indirect addressing + * through a couple of memory mapped registers. + * + * The following is a simplified implementation for pulling data out of the + * the eeprom, along with some helper functions to find information in + * the per device private data's copy of the eeprom. + * + * NOTE: To better understand how these functions work (i.e what is a chip + * select and why do have to keep driving the eeprom clock?), read + * just about any data sheet for a Microwire compatible EEPROM. + */ + +/* write a 32 bit value into the indirect accessor register */ +static inline void eeprom_write_reg(struct ipw_priv *p, u32 data) +{ + ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data); + + /* the eeprom requires some time to complete the operation */ + udelay(p->eeprom_delay); + + return; +} + +/* perform a chip select operation */ +static void eeprom_cs(struct ipw_priv *priv) +{ + eeprom_write_reg(priv, 0); + eeprom_write_reg(priv, EEPROM_BIT_CS); + eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK); + eeprom_write_reg(priv, EEPROM_BIT_CS); +} + +/* perform a chip select operation */ +static void eeprom_disable_cs(struct ipw_priv *priv) +{ + eeprom_write_reg(priv, EEPROM_BIT_CS); + eeprom_write_reg(priv, 0); + eeprom_write_reg(priv, EEPROM_BIT_SK); +} + +/* push a single bit down to the eeprom */ +static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit) +{ + int d = (bit ? EEPROM_BIT_DI : 0); + eeprom_write_reg(p, EEPROM_BIT_CS | d); + eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK); +} + +/* push an opcode followed by an address down to the eeprom */ +static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr) +{ + int i; + + eeprom_cs(priv); + eeprom_write_bit(priv, 1); + eeprom_write_bit(priv, op & 2); + eeprom_write_bit(priv, op & 1); + for (i = 7; i >= 0; i--) { + eeprom_write_bit(priv, addr & (1 << i)); + } +} + +/* pull 16 bits off the eeprom, one bit at a time */ +static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr) +{ + int i; + u16 r = 0; + + /* Send READ Opcode */ + eeprom_op(priv, EEPROM_CMD_READ, addr); + + /* Send dummy bit */ + eeprom_write_reg(priv, EEPROM_BIT_CS); + + /* Read the byte off the eeprom one bit at a time */ + for (i = 0; i < 16; i++) { + u32 data = 0; + eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK); + eeprom_write_reg(priv, EEPROM_BIT_CS); + data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS); + r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0); + } + + /* Send another dummy bit */ + eeprom_write_reg(priv, 0); + eeprom_disable_cs(priv); + + return r; +} + +/* helper function for pulling the mac address out of the private */ +/* data's copy of the eeprom data */ +static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac) +{ + memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6); +} + +/* + * Either the device driver (i.e. the host) or the firmware can + * load eeprom data into the designated region in SRAM. If neither + * happens then the FW will shutdown with a fatal error. + * + * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE + * bit needs region of shared SRAM needs to be non-zero. + */ +static void ipw_eeprom_init_sram(struct ipw_priv *priv) +{ + int i; + __le16 *eeprom = (__le16 *) priv->eeprom; + + IPW_DEBUG_TRACE(">>\n"); + + /* read entire contents of eeprom into private buffer */ + for (i = 0; i < 128; i++) + eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i)); + + /* + If the data looks correct, then copy it to our private + copy. Otherwise let the firmware know to perform the operation + on its own. + */ + if (priv->eeprom[EEPROM_VERSION] != 0) { + IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n"); + + /* write the eeprom data to sram */ + for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++) + ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]); + + /* Do not load eeprom data on fatal error or suspend */ + ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0); + } else { + IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n"); + + /* Load eeprom data on fatal error or suspend */ + ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1); + } + + IPW_DEBUG_TRACE("<<\n"); +} + +static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count) +{ + count >>= 2; + if (!count) + return; + _ipw_write32(priv, IPW_AUTOINC_ADDR, start); + while (count--) + _ipw_write32(priv, IPW_AUTOINC_DATA, 0); +} + +static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv) +{ + ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL, + CB_NUMBER_OF_ELEMENTS_SMALL * + sizeof(struct command_block)); +} + +static int ipw_fw_dma_enable(struct ipw_priv *priv) +{ /* start dma engine but no transfers yet */ + + IPW_DEBUG_FW(">> : \n"); + + /* Start the dma */ + ipw_fw_dma_reset_command_blocks(priv); + + /* Write CB base address */ + ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL); + + IPW_DEBUG_FW("<< : \n"); + return 0; +} + +static void ipw_fw_dma_abort(struct ipw_priv *priv) +{ + u32 control = 0; + + IPW_DEBUG_FW(">> :\n"); + + /* set the Stop and Abort bit */ + control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT; + ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control); + priv->sram_desc.last_cb_index = 0; + + IPW_DEBUG_FW("<< \n"); +} + +static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index, + struct command_block *cb) +{ + u32 address = + IPW_SHARED_SRAM_DMA_CONTROL + + (sizeof(struct command_block) * index); + IPW_DEBUG_FW(">> :\n"); + + ipw_write_indirect(priv, address, (u8 *) cb, + (int)sizeof(struct command_block)); + + IPW_DEBUG_FW("<< :\n"); + return 0; + +} + +static int ipw_fw_dma_kick(struct ipw_priv *priv) +{ + u32 control = 0; + u32 index = 0; + + IPW_DEBUG_FW(">> :\n"); + + for (index = 0; index < priv->sram_desc.last_cb_index; index++) + ipw_fw_dma_write_command_block(priv, index, + &priv->sram_desc.cb_list[index]); + + /* Enable the DMA in the CSR register */ + ipw_clear_bit(priv, IPW_RESET_REG, + IPW_RESET_REG_MASTER_DISABLED | + IPW_RESET_REG_STOP_MASTER); + + /* Set the Start bit. */ + control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START; + ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control); + + IPW_DEBUG_FW("<< :\n"); + return 0; +} + +static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv) +{ + u32 address; + u32 register_value = 0; + u32 cb_fields_address = 0; + + IPW_DEBUG_FW(">> :\n"); + address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB); + IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address); + + /* Read the DMA Controlor register */ + register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL); + IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x \n", register_value); + + /* Print the CB values */ + cb_fields_address = address; + register_value = ipw_read_reg32(priv, cb_fields_address); + IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value); + + cb_fields_address += sizeof(u32); + register_value = ipw_read_reg32(priv, cb_fields_address); + IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value); + + cb_fields_address += sizeof(u32); + register_value = ipw_read_reg32(priv, cb_fields_address); + IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n", + register_value); + + cb_fields_address += sizeof(u32); + register_value = ipw_read_reg32(priv, cb_fields_address); + IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value); + + IPW_DEBUG_FW(">> :\n"); +} + +static int ipw_fw_dma_command_block_index(struct ipw_priv *priv) +{ + u32 current_cb_address = 0; + u32 current_cb_index = 0; + + IPW_DEBUG_FW("<< :\n"); + current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB); + + current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) / + sizeof(struct command_block); + + IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n", + current_cb_index, current_cb_address); + + IPW_DEBUG_FW(">> :\n"); + return current_cb_index; + +} + +static int ipw_fw_dma_add_command_block(struct ipw_priv *priv, + u32 src_address, + u32 dest_address, + u32 length, + int interrupt_enabled, int is_last) +{ + + u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC | + CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG | + CB_DEST_SIZE_LONG; + struct command_block *cb; + u32 last_cb_element = 0; + + IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n", + src_address, dest_address, length); + + if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL) + return -1; + + last_cb_element = priv->sram_desc.last_cb_index; + cb = &priv->sram_desc.cb_list[last_cb_element]; + priv->sram_desc.last_cb_index++; + + /* Calculate the new CB control word */ + if (interrupt_enabled) + control |= CB_INT_ENABLED; + + if (is_last) + control |= CB_LAST_VALID; + + control |= length; + + /* Calculate the CB Element's checksum value */ + cb->status = control ^ src_address ^ dest_address; + + /* Copy the Source and Destination addresses */ + cb->dest_addr = dest_address; + cb->source_addr = src_address; + + /* Copy the Control Word last */ + cb->control = control; + + return 0; +} + +static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address, + int nr, u32 dest_address, u32 len) +{ + int ret, i; + u32 size; + + IPW_DEBUG_FW(">> \n"); + IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n", + nr, dest_address, len); + + for (i = 0; i < nr; i++) { + size = min_t(u32, len - i * CB_MAX_LENGTH, CB_MAX_LENGTH); + ret = ipw_fw_dma_add_command_block(priv, src_address[i], + dest_address + + i * CB_MAX_LENGTH, size, + 0, 0); + if (ret) { + IPW_DEBUG_FW_INFO(": Failed\n"); + return -1; + } else + IPW_DEBUG_FW_INFO(": Added new cb\n"); + } + + IPW_DEBUG_FW("<< \n"); + return 0; +} + +static int ipw_fw_dma_wait(struct ipw_priv *priv) +{ + u32 current_index = 0, previous_index; + u32 watchdog = 0; + + IPW_DEBUG_FW(">> : \n"); + + current_index = ipw_fw_dma_command_block_index(priv); + IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n", + (int)priv->sram_desc.last_cb_index); + + while (current_index < priv->sram_desc.last_cb_index) { + udelay(50); + previous_index = current_index; + current_index = ipw_fw_dma_command_block_index(priv); + + if (previous_index < current_index) { + watchdog = 0; + continue; + } + if (++watchdog > 400) { + IPW_DEBUG_FW_INFO("Timeout\n"); + ipw_fw_dma_dump_command_block(priv); + ipw_fw_dma_abort(priv); + return -1; + } + } + + ipw_fw_dma_abort(priv); + + /*Disable the DMA in the CSR register */ + ipw_set_bit(priv, IPW_RESET_REG, + IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER); + + IPW_DEBUG_FW("<< dmaWaitSync \n"); + return 0; +} + +static void ipw_remove_current_network(struct ipw_priv *priv) +{ + struct list_head *element, *safe; + struct libipw_network *network = NULL; + unsigned long flags; + + spin_lock_irqsave(&priv->ieee->lock, flags); + list_for_each_safe(element, safe, &priv->ieee->network_list) { + network = list_entry(element, struct libipw_network, list); + if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) { + list_del(element); + list_add_tail(&network->list, + &priv->ieee->network_free_list); + } + } + spin_unlock_irqrestore(&priv->ieee->lock, flags); +} + +/** + * Check that card is still alive. + * Reads debug register from domain0. + * If card is present, pre-defined value should + * be found there. + * + * @param priv + * @return 1 if card is present, 0 otherwise + */ +static inline int ipw_alive(struct ipw_priv *priv) +{ + return ipw_read32(priv, 0x90) == 0xd55555d5; +} + +/* timeout in msec, attempted in 10-msec quanta */ +static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask, + int timeout) +{ + int i = 0; + + do { + if ((ipw_read32(priv, addr) & mask) == mask) + return i; + mdelay(10); + i += 10; + } while (i < timeout); + + return -ETIME; +} + +/* These functions load the firmware and micro code for the operation of + * the ipw hardware. It assumes the buffer has all the bits for the + * image and the caller is handling the memory allocation and clean up. + */ + +static int ipw_stop_master(struct ipw_priv *priv) +{ + int rc; + + IPW_DEBUG_TRACE(">> \n"); + /* stop master. typical delay - 0 */ + ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER); + + /* timeout is in msec, polled in 10-msec quanta */ + rc = ipw_poll_bit(priv, IPW_RESET_REG, + IPW_RESET_REG_MASTER_DISABLED, 100); + if (rc < 0) { + IPW_ERROR("wait for stop master failed after 100ms\n"); + return -1; + } + + IPW_DEBUG_INFO("stop master %dms\n", rc); + + return rc; +} + +static void ipw_arc_release(struct ipw_priv *priv) +{ + IPW_DEBUG_TRACE(">> \n"); + mdelay(5); + + ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET); + + /* no one knows timing, for safety add some delay */ + mdelay(5); +} + +struct fw_chunk { + __le32 address; + __le32 length; +}; + +static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len) +{ + int rc = 0, i, addr; + u8 cr = 0; + __le16 *image; + + image = (__le16 *) data; + + IPW_DEBUG_TRACE(">> \n"); + + rc = ipw_stop_master(priv); + + if (rc < 0) + return rc; + + for (addr = IPW_SHARED_LOWER_BOUND; + addr < IPW_REGISTER_DOMAIN1_END; addr += 4) { + ipw_write32(priv, addr, 0); + } + + /* no ucode (yet) */ + memset(&priv->dino_alive, 0, sizeof(priv->dino_alive)); + /* destroy DMA queues */ + /* reset sequence */ + + ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON); + ipw_arc_release(priv); + ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF); + mdelay(1); + + /* reset PHY */ + ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN); + mdelay(1); + + ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0); + mdelay(1); + + /* enable ucode store */ + ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0); + ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS); + mdelay(1); + + /* write ucode */ + /** + * @bug + * Do NOT set indirect address register once and then + * store data to indirect data register in the loop. + * It seems very reasonable, but in this case DINO do not + * accept ucode. It is essential to set address each time. + */ + /* load new ipw uCode */ + for (i = 0; i < len / 2; i++) + ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE, + le16_to_cpu(image[i])); + + /* enable DINO */ + ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0); + ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM); + + /* this is where the igx / win driver deveates from the VAP driver. */ + + /* wait for alive response */ + for (i = 0; i < 100; i++) { + /* poll for incoming data */ + cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS); + if (cr & DINO_RXFIFO_DATA) + break; + mdelay(1); + } + + if (cr & DINO_RXFIFO_DATA) { + /* alive_command_responce size is NOT multiple of 4 */ + __le32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4]; + + for (i = 0; i < ARRAY_SIZE(response_buffer); i++) + response_buffer[i] = + cpu_to_le32(ipw_read_reg32(priv, + IPW_BASEBAND_RX_FIFO_READ)); + memcpy(&priv->dino_alive, response_buffer, + sizeof(priv->dino_alive)); + if (priv->dino_alive.alive_command == 1 + && priv->dino_alive.ucode_valid == 1) { + rc = 0; + IPW_DEBUG_INFO + ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) " + "of %02d/%02d/%02d %02d:%02d\n", + priv->dino_alive.software_revision, + priv->dino_alive.software_revision, + priv->dino_alive.device_identifier, + priv->dino_alive.device_identifier, + priv->dino_alive.time_stamp[0], + priv->dino_alive.time_stamp[1], + priv->dino_alive.time_stamp[2], + priv->dino_alive.time_stamp[3], + priv->dino_alive.time_stamp[4]); + } else { + IPW_DEBUG_INFO("Microcode is not alive\n"); + rc = -EINVAL; + } + } else { + IPW_DEBUG_INFO("No alive response from DINO\n"); + rc = -ETIME; + } + + /* disable DINO, otherwise for some reason + firmware have problem getting alive resp. */ + ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0); + + return rc; +} + +static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len) +{ + int ret = -1; + int offset = 0; + struct fw_chunk *chunk; + int total_nr = 0; + int i; + struct pci_pool *pool; + u32 *virts[CB_NUMBER_OF_ELEMENTS_SMALL]; + dma_addr_t phys[CB_NUMBER_OF_ELEMENTS_SMALL]; + + IPW_DEBUG_TRACE("<< : \n"); + + pool = pci_pool_create("ipw2200", priv->pci_dev, CB_MAX_LENGTH, 0, 0); + if (!pool) { + IPW_ERROR("pci_pool_create failed\n"); + return -ENOMEM; + } + + /* Start the Dma */ + ret = ipw_fw_dma_enable(priv); + + /* the DMA is already ready this would be a bug. */ + BUG_ON(priv->sram_desc.last_cb_index > 0); + + do { + u32 chunk_len; + u8 *start; + int size; + int nr = 0; + + chunk = (struct fw_chunk *)(data + offset); + offset += sizeof(struct fw_chunk); + chunk_len = le32_to_cpu(chunk->length); + start = data + offset; + + nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH; + for (i = 0; i < nr; i++) { + virts[total_nr] = pci_pool_alloc(pool, GFP_KERNEL, + &phys[total_nr]); + if (!virts[total_nr]) { + ret = -ENOMEM; + goto out; + } + size = min_t(u32, chunk_len - i * CB_MAX_LENGTH, + CB_MAX_LENGTH); + memcpy(virts[total_nr], start, size); + start += size; + total_nr++; + /* We don't support fw chunk larger than 64*8K */ + BUG_ON(total_nr > CB_NUMBER_OF_ELEMENTS_SMALL); + } + + /* build DMA packet and queue up for sending */ + /* dma to chunk->address, the chunk->length bytes from data + + * offeset*/ + /* Dma loading */ + ret = ipw_fw_dma_add_buffer(priv, &phys[total_nr - nr], + nr, le32_to_cpu(chunk->address), + chunk_len); + if (ret) { + IPW_DEBUG_INFO("dmaAddBuffer Failed\n"); + goto out; + } + + offset += chunk_len; + } while (offset < len); + + /* Run the DMA and wait for the answer */ + ret = ipw_fw_dma_kick(priv); + if (ret) { + IPW_ERROR("dmaKick Failed\n"); + goto out; + } + + ret = ipw_fw_dma_wait(priv); + if (ret) { + IPW_ERROR("dmaWaitSync Failed\n"); + goto out; + } + out: + for (i = 0; i < total_nr; i++) + pci_pool_free(pool, virts[i], phys[i]); + + pci_pool_destroy(pool); + + return ret; +} + +/* stop nic */ +static int ipw_stop_nic(struct ipw_priv *priv) +{ + int rc = 0; + + /* stop */ + ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER); + + rc = ipw_poll_bit(priv, IPW_RESET_REG, + IPW_RESET_REG_MASTER_DISABLED, 500); + if (rc < 0) { + IPW_ERROR("wait for reg master disabled failed after 500ms\n"); + return rc; + } + + ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET); + + return rc; +} + +static void ipw_start_nic(struct ipw_priv *priv) +{ + IPW_DEBUG_TRACE(">>\n"); + + /* prvHwStartNic release ARC */ + ipw_clear_bit(priv, IPW_RESET_REG, + IPW_RESET_REG_MASTER_DISABLED | + IPW_RESET_REG_STOP_MASTER | + CBD_RESET_REG_PRINCETON_RESET); + + /* enable power management */ + ipw_set_bit(priv, IPW_GP_CNTRL_RW, + IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY); + + IPW_DEBUG_TRACE("<<\n"); +} + +static int ipw_init_nic(struct ipw_priv *priv) +{ + int rc; + + IPW_DEBUG_TRACE(">>\n"); + /* reset */ + /*prvHwInitNic */ + /* set "initialization complete" bit to move adapter to D0 state */ + ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE); + + /* low-level PLL activation */ + ipw_write32(priv, IPW_READ_INT_REGISTER, + IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER); + + /* wait for clock stabilization */ + rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW, + IPW_GP_CNTRL_BIT_CLOCK_READY, 250); + if (rc < 0) + IPW_DEBUG_INFO("FAILED wait for clock stablization\n"); + + /* assert SW reset */ + ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET); + + udelay(10); + + /* set "initialization complete" bit to move adapter to D0 state */ + ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE); + + IPW_DEBUG_TRACE(">>\n"); + return 0; +} + +/* Call this function from process context, it will sleep in request_firmware. + * Probe is an ok place to call this from. + */ +static int ipw_reset_nic(struct ipw_priv *priv) +{ + int rc = 0; + unsigned long flags; + + IPW_DEBUG_TRACE(">>\n"); + + rc = ipw_init_nic(priv); + + spin_lock_irqsave(&priv->lock, flags); + /* Clear the 'host command active' bit... */ + priv->status &= ~STATUS_HCMD_ACTIVE; + wake_up_interruptible(&priv->wait_command_queue); + priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING); + wake_up_interruptible(&priv->wait_state); + spin_unlock_irqrestore(&priv->lock, flags); + + IPW_DEBUG_TRACE("<<\n"); + return rc; +} + + +struct ipw_fw { + __le32 ver; + __le32 boot_size; + __le32 ucode_size; + __le32 fw_size; + u8 data[0]; +}; + +static int ipw_get_fw(struct ipw_priv *priv, + const struct firmware **raw, const char *name) +{ + struct ipw_fw *fw; + int rc; + + /* ask firmware_class module to get the boot firmware off disk */ + rc = request_firmware(raw, name, &priv->pci_dev->dev); + if (rc < 0) { + IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc); + return rc; + } + + if ((*raw)->size < sizeof(*fw)) { + IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size); + return -EINVAL; + } + + fw = (void *)(*raw)->data; + + if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) + + le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) { + IPW_ERROR("%s is too small or corrupt (%zd)\n", + name, (*raw)->size); + return -EINVAL; + } + + IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n", + name, + le32_to_cpu(fw->ver) >> 16, + le32_to_cpu(fw->ver) & 0xff, + (*raw)->size - sizeof(*fw)); + return 0; +} + +#define IPW_RX_BUF_SIZE (3000) + +static void ipw_rx_queue_reset(struct ipw_priv *priv, + struct ipw_rx_queue *rxq) +{ + unsigned long flags; + int i; + + spin_lock_irqsave(&rxq->lock, flags); + + INIT_LIST_HEAD(&rxq->rx_free); + INIT_LIST_HEAD(&rxq->rx_used); + + /* Fill the rx_used queue with _all_ of the Rx buffers */ + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { + /* In the reset function, these buffers may have been allocated + * to an SKB, so we need to unmap and free potential storage */ + if (rxq->pool[i].skb != NULL) { + pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr, + IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); + dev_kfree_skb(rxq->pool[i].skb); + rxq->pool[i].skb = NULL; + } + list_add_tail(&rxq->pool[i].list, &rxq->rx_used); + } + + /* Set us so that we have processed and used all buffers, but have + * not restocked the Rx queue with fresh buffers */ + rxq->read = rxq->write = 0; + rxq->free_count = 0; + spin_unlock_irqrestore(&rxq->lock, flags); +} + +#ifdef CONFIG_PM +static int fw_loaded = 0; +static const struct firmware *raw = NULL; + +static void free_firmware(void) +{ + if (fw_loaded) { + release_firmware(raw); + raw = NULL; + fw_loaded = 0; + } +} +#else +#define free_firmware() do {} while (0) +#endif + +static int ipw_load(struct ipw_priv *priv) +{ +#ifndef CONFIG_PM + const struct firmware *raw = NULL; +#endif + struct ipw_fw *fw; + u8 *boot_img, *ucode_img, *fw_img; + u8 *name = NULL; + int rc = 0, retries = 3; + + switch (priv->ieee->iw_mode) { + case IW_MODE_ADHOC: + name = "ipw2200-ibss.fw"; + break; +#ifdef CONFIG_IPW2200_MONITOR + case IW_MODE_MONITOR: + name = "ipw2200-sniffer.fw"; + break; +#endif + case IW_MODE_INFRA: + name = "ipw2200-bss.fw"; + break; + } + + if (!name) { + rc = -EINVAL; + goto error; + } + +#ifdef CONFIG_PM + if (!fw_loaded) { +#endif + rc = ipw_get_fw(priv, &raw, name); + if (rc < 0) + goto error; +#ifdef CONFIG_PM + } +#endif + + fw = (void *)raw->data; + boot_img = &fw->data[0]; + ucode_img = &fw->data[le32_to_cpu(fw->boot_size)]; + fw_img = &fw->data[le32_to_cpu(fw->boot_size) + + le32_to_cpu(fw->ucode_size)]; + + if (rc < 0) + goto error; + + if (!priv->rxq) + priv->rxq = ipw_rx_queue_alloc(priv); + else + ipw_rx_queue_reset(priv, priv->rxq); + if (!priv->rxq) { + IPW_ERROR("Unable to initialize Rx queue\n"); + goto error; + } + + retry: + /* Ensure interrupts are disabled */ + ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL); + priv->status &= ~STATUS_INT_ENABLED; + + /* ack pending interrupts */ + ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL); + + ipw_stop_nic(priv); + + rc = ipw_reset_nic(priv); + if (rc < 0) { + IPW_ERROR("Unable to reset NIC\n"); + goto error; + } + + ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND, + IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND); + + /* DMA the initial boot firmware into the device */ + rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size)); + if (rc < 0) { + IPW_ERROR("Unable to load boot firmware: %d\n", rc); + goto error; + } + + /* kick start the device */ + ipw_start_nic(priv); + + /* wait for the device to finish its initial startup sequence */ + rc = ipw_poll_bit(priv, IPW_INTA_RW, + IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500); + if (rc < 0) { + IPW_ERROR("device failed to boot initial fw image\n"); + goto error; + } + IPW_DEBUG_INFO("initial device response after %dms\n", rc); + + /* ack fw init done interrupt */ + ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE); + + /* DMA the ucode into the device */ + rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size)); + if (rc < 0) { + IPW_ERROR("Unable to load ucode: %d\n", rc); + goto error; + } + + /* stop nic */ + ipw_stop_nic(priv); + + /* DMA bss firmware into the device */ + rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size)); + if (rc < 0) { + IPW_ERROR("Unable to load firmware: %d\n", rc); + goto error; + } +#ifdef CONFIG_PM + fw_loaded = 1; +#endif + + ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0); + + rc = ipw_queue_reset(priv); + if (rc < 0) { + IPW_ERROR("Unable to initialize queues\n"); + goto error; + } + + /* Ensure interrupts are disabled */ + ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL); + /* ack pending interrupts */ + ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL); + + /* kick start the device */ + ipw_start_nic(priv); + + if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) { + if (retries > 0) { + IPW_WARNING("Parity error. Retrying init.\n"); + retries--; + goto retry; + } + + IPW_ERROR("TODO: Handle parity error -- schedule restart?\n"); + rc = -EIO; + goto error; + } + + /* wait for the device */ + rc = ipw_poll_bit(priv, IPW_INTA_RW, + IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500); + if (rc < 0) { + IPW_ERROR("device failed to start within 500ms\n"); + goto error; + } + IPW_DEBUG_INFO("device response after %dms\n", rc); + + /* ack fw init done interrupt */ + ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE); + + /* read eeprom data and initialize the eeprom region of sram */ + priv->eeprom_delay = 1; + ipw_eeprom_init_sram(priv); + + /* enable interrupts */ + ipw_enable_interrupts(priv); + + /* Ensure our queue has valid packets */ + ipw_rx_queue_replenish(priv); + + ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read); + + /* ack pending interrupts */ + ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL); + +#ifndef CONFIG_PM + release_firmware(raw); +#endif + return 0; + + error: + if (priv->rxq) { + ipw_rx_queue_free(priv, priv->rxq); + priv->rxq = NULL; + } + ipw_tx_queue_free(priv); + if (raw) + release_firmware(raw); +#ifdef CONFIG_PM + fw_loaded = 0; + raw = NULL; +#endif + + return rc; +} + +/** + * DMA services + * + * Theory of operation + * + * A queue is a circular buffers with 'Read' and 'Write' pointers. + * 2 empty entries always kept in the buffer to protect from overflow. + * + * For Tx queue, there are low mark and high mark limits. If, after queuing + * the packet for Tx, free space become < low mark, Tx queue stopped. When + * reclaiming packets (on 'tx done IRQ), if free space become > high mark, + * Tx queue resumed. + * + * The IPW operates with six queues, one receive queue in the device's + * sram, one transmit queue for sending commands to the device firmware, + * and four transmit queues for data. + * + * The four transmit queues allow for performing quality of service (qos) + * transmissions as per the 802.11 protocol. Currently Linux does not + * provide a mechanism to the user for utilizing prioritized queues, so + * we only utilize the first data transmit queue (queue1). + */ + +/** + * Driver allocates buffers of this size for Rx + */ + +/** + * ipw_rx_queue_space - Return number of free slots available in queue. + */ +static int ipw_rx_queue_space(const struct ipw_rx_queue *q) +{ + int s = q->read - q->write; + if (s <= 0) + s += RX_QUEUE_SIZE; + /* keep some buffer to not confuse full and empty queue */ + s -= 2; + if (s < 0) + s = 0; + return s; +} + +static inline int ipw_tx_queue_space(const struct clx2_queue *q) +{ + int s = q->last_used - q->first_empty; + if (s <= 0) + s += q->n_bd; + s -= 2; /* keep some reserve to not confuse empty and full situations */ + if (s < 0) + s = 0; + return s; +} + +static inline int ipw_queue_inc_wrap(int index, int n_bd) +{ + return (++index == n_bd) ? 0 : index; +} + +/** + * Initialize common DMA queue structure + * + * @param q queue to init + * @param count Number of BD's to allocate. Should be power of 2 + * @param read_register Address for 'read' register + * (not offset within BAR, full address) + * @param write_register Address for 'write' register + * (not offset within BAR, full address) + * @param base_register Address for 'base' register + * (not offset within BAR, full address) + * @param size Address for 'size' register + * (not offset within BAR, full address) + */ +static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q, + int count, u32 read, u32 write, u32 base, u32 size) +{ + q->n_bd = count; + + q->low_mark = q->n_bd / 4; + if (q->low_mark < 4) + q->low_mark = 4; + + q->high_mark = q->n_bd / 8; + if (q->high_mark < 2) + q->high_mark = 2; + + q->first_empty = q->last_used = 0; + q->reg_r = read; + q->reg_w = write; + + ipw_write32(priv, base, q->dma_addr); + ipw_write32(priv, size, count); + ipw_write32(priv, read, 0); + ipw_write32(priv, write, 0); + + _ipw_read32(priv, 0x90); +} + +static int ipw_queue_tx_init(struct ipw_priv *priv, + struct clx2_tx_queue *q, + int count, u32 read, u32 write, u32 base, u32 size) +{ + struct pci_dev *dev = priv->pci_dev; + + q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL); + if (!q->txb) { + IPW_ERROR("vmalloc for auxilary BD structures failed\n"); + return -ENOMEM; + } + + q->bd = + pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr); + if (!q->bd) { + IPW_ERROR("pci_alloc_consistent(%zd) failed\n", + sizeof(q->bd[0]) * count); + kfree(q->txb); + q->txb = NULL; + return -ENOMEM; + } + + ipw_queue_init(priv, &q->q, count, read, write, base, size); + return 0; +} + +/** + * Free one TFD, those at index [txq->q.last_used]. + * Do NOT advance any indexes + * + * @param dev + * @param txq + */ +static void ipw_queue_tx_free_tfd(struct ipw_priv *priv, + struct clx2_tx_queue *txq) +{ + struct tfd_frame *bd = &txq->bd[txq->q.last_used]; + struct pci_dev *dev = priv->pci_dev; + int i; + + /* classify bd */ + if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE) + /* nothing to cleanup after for host commands */ + return; + + /* sanity check */ + if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) { + IPW_ERROR("Too many chunks: %i\n", + le32_to_cpu(bd->u.data.num_chunks)); + /** @todo issue fatal error, it is quite serious situation */ + return; + } + + /* unmap chunks if any */ + for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) { + pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]), + le16_to_cpu(bd->u.data.chunk_len[i]), + PCI_DMA_TODEVICE); + if (txq->txb[txq->q.last_used]) { + libipw_txb_free(txq->txb[txq->q.last_used]); + txq->txb[txq->q.last_used] = NULL; + } + } +} + +/** + * Deallocate DMA queue. + * + * Empty queue by removing and destroying all BD's. + * Free all buffers. + * + * @param dev + * @param q + */ +static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq) +{ + struct clx2_queue *q = &txq->q; + struct pci_dev *dev = priv->pci_dev; + + if (q->n_bd == 0) + return; + + /* first, empty all BD's */ + for (; q->first_empty != q->last_used; + q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) { + ipw_queue_tx_free_tfd(priv, txq); + } + + /* free buffers belonging to queue itself */ + pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd, + q->dma_addr); + kfree(txq->txb); + + /* 0 fill whole structure */ + memset(txq, 0, sizeof(*txq)); +} + +/** + * Destroy all DMA queues and structures + * + * @param priv + */ +static void ipw_tx_queue_free(struct ipw_priv *priv) +{ + /* Tx CMD queue */ + ipw_queue_tx_free(priv, &priv->txq_cmd); + + /* Tx queues */ + ipw_queue_tx_free(priv, &priv->txq[0]); + ipw_queue_tx_free(priv, &priv->txq[1]); + ipw_queue_tx_free(priv, &priv->txq[2]); + ipw_queue_tx_free(priv, &priv->txq[3]); +} + +static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid) +{ + /* First 3 bytes are manufacturer */ + bssid[0] = priv->mac_addr[0]; + bssid[1] = priv->mac_addr[1]; + bssid[2] = priv->mac_addr[2]; + + /* Last bytes are random */ + get_random_bytes(&bssid[3], ETH_ALEN - 3); + + bssid[0] &= 0xfe; /* clear multicast bit */ + bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */ +} + +static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid) +{ + struct ipw_station_entry entry; + int i; + + for (i = 0; i < priv->num_stations; i++) { + if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) { + /* Another node is active in network */ + priv->missed_adhoc_beacons = 0; + if (!(priv->config & CFG_STATIC_CHANNEL)) + /* when other nodes drop out, we drop out */ + priv->config &= ~CFG_ADHOC_PERSIST; + + return i; + } + } + + if (i == MAX_STATIONS) + return IPW_INVALID_STATION; + + IPW_DEBUG_SCAN("Adding AdHoc station: %pM\n", bssid); + + entry.reserved = 0; + entry.support_mode = 0; + memcpy(entry.mac_addr, bssid, ETH_ALEN); + memcpy(priv->stations[i], bssid, ETH_ALEN); + ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry), + &entry, sizeof(entry)); + priv->num_stations++; + + return i; +} + +static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid) +{ + int i; + + for (i = 0; i < priv->num_stations; i++) + if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) + return i; + + return IPW_INVALID_STATION; +} + +static void ipw_send_disassociate(struct ipw_priv *priv, int quiet) +{ + int err; + + if (priv->status & STATUS_ASSOCIATING) { + IPW_DEBUG_ASSOC("Disassociating while associating.\n"); + queue_work(priv->workqueue, &priv->disassociate); + return; + } + + if (!(priv->status & STATUS_ASSOCIATED)) { + IPW_DEBUG_ASSOC("Disassociating while not associated.\n"); + return; + } + + IPW_DEBUG_ASSOC("Disassocation attempt from %pM " + "on channel %d.\n", + priv->assoc_request.bssid, + priv->assoc_request.channel); + + priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED); + priv->status |= STATUS_DISASSOCIATING; + + if (quiet) + priv->assoc_request.assoc_type = HC_DISASSOC_QUIET; + else + priv->assoc_request.assoc_type = HC_DISASSOCIATE; + + err = ipw_send_associate(priv, &priv->assoc_request); + if (err) { + IPW_DEBUG_HC("Attempt to send [dis]associate command " + "failed.\n"); + return; + } + +} + +static int ipw_disassociate(void *data) +{ + struct ipw_priv *priv = data; + if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) + return 0; + ipw_send_disassociate(data, 0); + netif_carrier_off(priv->net_dev); + return 1; +} + +static void ipw_bg_disassociate(struct work_struct *work) +{ + struct ipw_priv *priv = + container_of(work, struct ipw_priv, disassociate); + mutex_lock(&priv->mutex); + ipw_disassociate(priv); + mutex_unlock(&priv->mutex); +} + +static void ipw_system_config(struct work_struct *work) +{ + struct ipw_priv *priv = + container_of(work, struct ipw_priv, system_config); + +#ifdef CONFIG_IPW2200_PROMISCUOUS + if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) { + priv->sys_config.accept_all_data_frames = 1; + priv->sys_config.accept_non_directed_frames = 1; + priv->sys_config.accept_all_mgmt_bcpr = 1; + priv->sys_config.accept_all_mgmt_frames = 1; + } +#endif + + ipw_send_system_config(priv); +} + +struct ipw_status_code { + u16 status; + const char *reason; +}; + +static const struct ipw_status_code ipw_status_codes[] = { + {0x00, "Successful"}, + {0x01, "Unspecified failure"}, + {0x0A, "Cannot support all requested capabilities in the " + "Capability information field"}, + {0x0B, "Reassociation denied due to inability to confirm that " + "association exists"}, + {0x0C, "Association denied due to reason outside the scope of this " + "standard"}, + {0x0D, + "Responding station does not support the specified authentication " + "algorithm"}, + {0x0E, + "Received an Authentication frame with authentication sequence " + "transaction sequence number out of expected sequence"}, + {0x0F, "Authentication rejected because of challenge failure"}, + {0x10, "Authentication rejected due to timeout waiting for next " + "frame in sequence"}, + {0x11, "Association denied because AP is unable to handle additional " + "associated stations"}, + {0x12, + "Association denied due to requesting station not supporting all " + "of the datarates in the BSSBasicServiceSet Parameter"}, + {0x13, + "Association denied due to requesting station not supporting " + "short preamble operation"}, + {0x14, + "Association denied due to requesting station not supporting " + "PBCC encoding"}, + {0x15, + "Association denied due to requesting station not supporting " + "channel agility"}, + {0x19, + "Association denied due to requesting station not supporting " + "short slot operation"}, + {0x1A, + "Association denied due to requesting station not supporting " + "DSSS-OFDM operation"}, + {0x28, "Invalid Information Element"}, + {0x29, "Group Cipher is not valid"}, + {0x2A, "Pairwise Cipher is not valid"}, + {0x2B, "AKMP is not valid"}, + {0x2C, "Unsupported RSN IE version"}, + {0x2D, "Invalid RSN IE Capabilities"}, + {0x2E, "Cipher suite is rejected per security policy"}, +}; + +static const char *ipw_get_status_code(u16 status) +{ + int i; + for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++) + if (ipw_status_codes[i].status == (status & 0xff)) + return ipw_status_codes[i].reason; + return "Unknown status value."; +} + +static void inline average_init(struct average *avg) +{ + memset(avg, 0, sizeof(*avg)); +} + +#define DEPTH_RSSI 8 +#define DEPTH_NOISE 16 +static s16 exponential_average(s16 prev_avg, s16 val, u8 depth) +{ + return ((depth-1)*prev_avg + val)/depth; +} + +static void average_add(struct average *avg, s16 val) +{ + avg->sum -= avg->entries[avg->pos]; + avg->sum += val; + avg->entries[avg->pos++] = val; + if (unlikely(avg->pos == AVG_ENTRIES)) { + avg->init = 1; + avg->pos = 0; + } +} + +static s16 average_value(struct average *avg) +{ + if (!unlikely(avg->init)) { + if (avg->pos) + return avg->sum / avg->pos; + return 0; + } + + return avg->sum / AVG_ENTRIES; +} + +static void ipw_reset_stats(struct ipw_priv *priv) +{ + u32 len = sizeof(u32); + + priv->quality = 0; + + average_init(&priv->average_missed_beacons); + priv->exp_avg_rssi = -60; + priv->exp_avg_noise = -85 + 0x100; + + priv->last_rate = 0; + priv->last_missed_beacons = 0; + priv->last_rx_packets = 0; + priv->last_tx_packets = 0; + priv->last_tx_failures = 0; + + /* Firmware managed, reset only when NIC is restarted, so we have to + * normalize on the current value */ + ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, + &priv->last_rx_err, &len); + ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, + &priv->last_tx_failures, &len); + + /* Driver managed, reset with each association */ + priv->missed_adhoc_beacons = 0; + priv->missed_beacons = 0; + priv->tx_packets = 0; + priv->rx_packets = 0; + +} + +static u32 ipw_get_max_rate(struct ipw_priv *priv) +{ + u32 i = 0x80000000; + u32 mask = priv->rates_mask; + /* If currently associated in B mode, restrict the maximum + * rate match to B rates */ + if (priv->assoc_request.ieee_mode == IPW_B_MODE) + mask &= LIBIPW_CCK_RATES_MASK; + + /* TODO: Verify that the rate is supported by the current rates + * list. */ + + while (i && !(mask & i)) + i >>= 1; + switch (i) { + case LIBIPW_CCK_RATE_1MB_MASK: + return 1000000; + case LIBIPW_CCK_RATE_2MB_MASK: + return 2000000; + case LIBIPW_CCK_RATE_5MB_MASK: + return 5500000; + case LIBIPW_OFDM_RATE_6MB_MASK: + return 6000000; + case LIBIPW_OFDM_RATE_9MB_MASK: + return 9000000; + case LIBIPW_CCK_RATE_11MB_MASK: + return 11000000; + case LIBIPW_OFDM_RATE_12MB_MASK: + return 12000000; + case LIBIPW_OFDM_RATE_18MB_MASK: + return 18000000; + case LIBIPW_OFDM_RATE_24MB_MASK: + return 24000000; + case LIBIPW_OFDM_RATE_36MB_MASK: + return 36000000; + case LIBIPW_OFDM_RATE_48MB_MASK: + return 48000000; + case LIBIPW_OFDM_RATE_54MB_MASK: + return 54000000; + } + + if (priv->ieee->mode == IEEE_B) + return 11000000; + else + return 54000000; +} + +static u32 ipw_get_current_rate(struct ipw_priv *priv) +{ + u32 rate, len = sizeof(rate); + int err; + + if (!(priv->status & STATUS_ASSOCIATED)) + return 0; + + if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) { + err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate, + &len); + if (err) { + IPW_DEBUG_INFO("failed querying ordinals.\n"); + return 0; + } + } else + return ipw_get_max_rate(priv); + + switch (rate) { + case IPW_TX_RATE_1MB: + return 1000000; + case IPW_TX_RATE_2MB: + return 2000000; + case IPW_TX_RATE_5MB: + return 5500000; + case IPW_TX_RATE_6MB: + return 6000000; + case IPW_TX_RATE_9MB: + return 9000000; + case IPW_TX_RATE_11MB: + return 11000000; + case IPW_TX_RATE_12MB: + return 12000000; + case IPW_TX_RATE_18MB: + return 18000000; + case IPW_TX_RATE_24MB: + return 24000000; + case IPW_TX_RATE_36MB: + return 36000000; + case IPW_TX_RATE_48MB: + return 48000000; + case IPW_TX_RATE_54MB: + return 54000000; + } + + return 0; +} + +#define IPW_STATS_INTERVAL (2 * HZ) +static void ipw_gather_stats(struct ipw_priv *priv) +{ + u32 rx_err, rx_err_delta, rx_packets_delta; + u32 tx_failures, tx_failures_delta, tx_packets_delta; + u32 missed_beacons_percent, missed_beacons_delta; + u32 quality = 0; + u32 len = sizeof(u32); + s16 rssi; + u32 beacon_quality, signal_quality, tx_quality, rx_quality, + rate_quality; + u32 max_rate; + + if (!(priv->status & STATUS_ASSOCIATED)) { + priv->quality = 0; + return; + } + + /* Update the statistics */ + ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS, + &priv->missed_beacons, &len); + missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons; + priv->last_missed_beacons = priv->missed_beacons; + if (priv->assoc_request.beacon_interval) { + missed_beacons_percent = missed_beacons_delta * + (HZ * le16_to_cpu(priv->assoc_request.beacon_interval)) / + (IPW_STATS_INTERVAL * 10); + } else { + missed_beacons_percent = 0; + } + average_add(&priv->average_missed_beacons, missed_beacons_percent); + + ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len); + rx_err_delta = rx_err - priv->last_rx_err; + priv->last_rx_err = rx_err; + + ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len); + tx_failures_delta = tx_failures - priv->last_tx_failures; + priv->last_tx_failures = tx_failures; + + rx_packets_delta = priv->rx_packets - priv->last_rx_packets; + priv->last_rx_packets = priv->rx_packets; + + tx_packets_delta = priv->tx_packets - priv->last_tx_packets; + priv->last_tx_packets = priv->tx_packets; + + /* Calculate quality based on the following: + * + * Missed beacon: 100% = 0, 0% = 70% missed + * Rate: 60% = 1Mbs, 100% = Max + * Rx and Tx errors represent a straight % of total Rx/Tx + * RSSI: 100% = > -50, 0% = < -80 + * Rx errors: 100% = 0, 0% = 50% missed + * + * The lowest computed quality is used. + * + */ +#define BEACON_THRESHOLD 5 + beacon_quality = 100 - missed_beacons_percent; + if (beacon_quality < BEACON_THRESHOLD) + beacon_quality = 0; + else + beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 / + (100 - BEACON_THRESHOLD); + IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n", + beacon_quality, missed_beacons_percent); + + priv->last_rate = ipw_get_current_rate(priv); + max_rate = ipw_get_max_rate(priv); + rate_quality = priv->last_rate * 40 / max_rate + 60; + IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n", + rate_quality, priv->last_rate / 1000000); + + if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta) + rx_quality = 100 - (rx_err_delta * 100) / + (rx_packets_delta + rx_err_delta); + else + rx_quality = 100; + IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n", + rx_quality, rx_err_delta, rx_packets_delta); + + if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta) + tx_quality = 100 - (tx_failures_delta * 100) / + (tx_packets_delta + tx_failures_delta); + else + tx_quality = 100; + IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n", + tx_quality, tx_failures_delta, tx_packets_delta); + + rssi = priv->exp_avg_rssi; + signal_quality = + (100 * + (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) * + (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) - + (priv->ieee->perfect_rssi - rssi) * + (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) + + 62 * (priv->ieee->perfect_rssi - rssi))) / + ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) * + (priv->ieee->perfect_rssi - priv->ieee->worst_rssi)); + if (signal_quality > 100) + signal_quality = 100; + else if (signal_quality < 1) + signal_quality = 0; + + IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n", + signal_quality, rssi); + + quality = min(rx_quality, signal_quality); + quality = min(tx_quality, quality); + quality = min(rate_quality, quality); + quality = min(beacon_quality, quality); + if (quality == beacon_quality) + IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n", + quality); + if (quality == rate_quality) + IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n", + quality); + if (quality == tx_quality) + IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n", + quality); + if (quality == rx_quality) + IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n", + quality); + if (quality == signal_quality) + IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n", + quality); + + priv->quality = quality; + + queue_delayed_work(priv->workqueue, &priv->gather_stats, + IPW_STATS_INTERVAL); +} + +static void ipw_bg_gather_stats(struct work_struct *work) +{ + struct ipw_priv *priv = + container_of(work, struct ipw_priv, gather_stats.work); + mutex_lock(&priv->mutex); + ipw_gather_stats(priv); + mutex_unlock(&priv->mutex); +} + +/* Missed beacon behavior: + * 1st missed -> roaming_threshold, just wait, don't do any scan/roam. + * roaming_threshold -> disassociate_threshold, scan and roam for better signal. + * Above disassociate threshold, give up and stop scanning. + * Roaming is disabled if disassociate_threshold <= roaming_threshold */ +static void ipw_handle_missed_beacon(struct ipw_priv *priv, + int missed_count) +{ + priv->notif_missed_beacons = missed_count; + + if (missed_count > priv->disassociate_threshold && + priv->status & STATUS_ASSOCIATED) { + /* If associated and we've hit the missed + * beacon threshold, disassociate, turn + * off roaming, and abort any active scans */ + IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | + IPW_DL_STATE | IPW_DL_ASSOC, + "Missed beacon: %d - disassociate\n", missed_count); + priv->status &= ~STATUS_ROAMING; + if (priv->status & STATUS_SCANNING) { + IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | + IPW_DL_STATE, + "Aborting scan with missed beacon.\n"); + queue_work(priv->workqueue, &priv->abort_scan); + } + + queue_work(priv->workqueue, &priv->disassociate); + return; + } + + if (priv->status & STATUS_ROAMING) { + /* If we are currently roaming, then just + * print a debug statement... */ + IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE, + "Missed beacon: %d - roam in progress\n", + missed_count); + return; + } + + if (roaming && + (missed_count > priv->roaming_threshold && + missed_count <= priv->disassociate_threshold)) { + /* If we are not already roaming, set the ROAM + * bit in the status and kick off a scan. + * This can happen several times before we reach + * disassociate_threshold. */ + IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE, + "Missed beacon: %d - initiate " + "roaming\n", missed_count); + if (!(priv->status & STATUS_ROAMING)) { + priv->status |= STATUS_ROAMING; + if (!(priv->status & STATUS_SCANNING)) + queue_delayed_work(priv->workqueue, + &priv->request_scan, 0); + } + return; + } + + if (priv->status & STATUS_SCANNING && + missed_count > IPW_MB_SCAN_CANCEL_THRESHOLD) { + /* Stop scan to keep fw from getting + * stuck (only if we aren't roaming -- + * otherwise we'll never scan more than 2 or 3 + * channels..) */ + IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE, + "Aborting scan with missed beacon.\n"); + queue_work(priv->workqueue, &priv->abort_scan); + } + + IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count); +} + +static void ipw_scan_event(struct work_struct *work) +{ + union iwreq_data wrqu; + + struct ipw_priv *priv = + container_of(work, struct ipw_priv, scan_event.work); + + wrqu.data.length = 0; + wrqu.data.flags = 0; + wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL); +} + +static void handle_scan_event(struct ipw_priv *priv) +{ + /* Only userspace-requested scan completion events go out immediately */ + if (!priv->user_requested_scan) { + if (!delayed_work_pending(&priv->scan_event)) + queue_delayed_work(priv->workqueue, &priv->scan_event, + round_jiffies_relative(msecs_to_jiffies(4000))); + } else { + union iwreq_data wrqu; + + priv->user_requested_scan = 0; + cancel_delayed_work(&priv->scan_event); + + wrqu.data.length = 0; + wrqu.data.flags = 0; + wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL); + } +} + +/** + * Handle host notification packet. + * Called from interrupt routine + */ +static void ipw_rx_notification(struct ipw_priv *priv, + struct ipw_rx_notification *notif) +{ + DECLARE_SSID_BUF(ssid); + u16 size = le16_to_cpu(notif->size); + + IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, size); + + switch (notif->subtype) { + case HOST_NOTIFICATION_STATUS_ASSOCIATED:{ + struct notif_association *assoc = ¬if->u.assoc; + + switch (assoc->state) { + case CMAS_ASSOCIATED:{ + IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | + IPW_DL_ASSOC, + "associated: '%s' %pM \n", + print_ssid(ssid, priv->essid, + priv->essid_len), + priv->bssid); + + switch (priv->ieee->iw_mode) { + case IW_MODE_INFRA: + memcpy(priv->ieee->bssid, + priv->bssid, ETH_ALEN); + break; + + case IW_MODE_ADHOC: + memcpy(priv->ieee->bssid, + priv->bssid, ETH_ALEN); + + /* clear out the station table */ + priv->num_stations = 0; + + IPW_DEBUG_ASSOC + ("queueing adhoc check\n"); + queue_delayed_work(priv-> + workqueue, + &priv-> + adhoc_check, + le16_to_cpu(priv-> + assoc_request. + beacon_interval)); + break; + } + + priv->status &= ~STATUS_ASSOCIATING; + priv->status |= STATUS_ASSOCIATED; + queue_work(priv->workqueue, + &priv->system_config); + +#ifdef CONFIG_IPW2200_QOS +#define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \ + le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_control)) + if ((priv->status & STATUS_AUTH) && + (IPW_GET_PACKET_STYPE(¬if->u.raw) + == IEEE80211_STYPE_ASSOC_RESP)) { + if ((sizeof + (struct + libipw_assoc_response) + <= size) + && (size <= 2314)) { + struct + libipw_rx_stats + stats = { + .len = size - 1, + }; + + IPW_DEBUG_QOS + ("QoS Associate " + "size %d\n", size); + libipw_rx_mgt(priv-> + ieee, + (struct + libipw_hdr_4addr + *) + ¬if->u.raw, &stats); + } + } +#endif + + schedule_work(&priv->link_up); + + break; + } + + case CMAS_AUTHENTICATED:{ + if (priv-> + status & (STATUS_ASSOCIATED | + STATUS_AUTH)) { + struct notif_authenticate *auth + = ¬if->u.auth; + IPW_DEBUG(IPW_DL_NOTIF | + IPW_DL_STATE | + IPW_DL_ASSOC, + "deauthenticated: '%s' " + "%pM" + ": (0x%04X) - %s \n", + print_ssid(ssid, + priv-> + essid, + priv-> + essid_len), + priv->bssid, + le16_to_cpu(auth->status), + ipw_get_status_code + (le16_to_cpu + (auth->status))); + + priv->status &= + ~(STATUS_ASSOCIATING | + STATUS_AUTH | + STATUS_ASSOCIATED); + + schedule_work(&priv->link_down); + break; + } + + IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | + IPW_DL_ASSOC, + "authenticated: '%s' %pM\n", + print_ssid(ssid, priv->essid, + priv->essid_len), + priv->bssid); + break; + } + + case CMAS_INIT:{ + if (priv->status & STATUS_AUTH) { + struct + libipw_assoc_response + *resp; + resp = + (struct + libipw_assoc_response + *)¬if->u.raw; + IPW_DEBUG(IPW_DL_NOTIF | + IPW_DL_STATE | + IPW_DL_ASSOC, + "association failed (0x%04X): %s\n", + le16_to_cpu(resp->status), + ipw_get_status_code + (le16_to_cpu + (resp->status))); + } + + IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | + IPW_DL_ASSOC, + "disassociated: '%s' %pM \n", + print_ssid(ssid, priv->essid, + priv->essid_len), + priv->bssid); + + priv->status &= + ~(STATUS_DISASSOCIATING | + STATUS_ASSOCIATING | + STATUS_ASSOCIATED | STATUS_AUTH); + if (priv->assoc_network + && (priv->assoc_network-> + capability & + WLAN_CAPABILITY_IBSS)) + ipw_remove_current_network + (priv); + + schedule_work(&priv->link_down); + + break; + } + + case CMAS_RX_ASSOC_RESP: + break; + + default: + IPW_ERROR("assoc: unknown (%d)\n", + assoc->state); + break; + } + + break; + } + + case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{ + struct notif_authenticate *auth = ¬if->u.auth; + switch (auth->state) { + case CMAS_AUTHENTICATED: + IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE, + "authenticated: '%s' %pM \n", + print_ssid(ssid, priv->essid, + priv->essid_len), + priv->bssid); + priv->status |= STATUS_AUTH; + break; + + case CMAS_INIT: + if (priv->status & STATUS_AUTH) { + IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | + IPW_DL_ASSOC, + "authentication failed (0x%04X): %s\n", + le16_to_cpu(auth->status), + ipw_get_status_code(le16_to_cpu + (auth-> + status))); + } + IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | + IPW_DL_ASSOC, + "deauthenticated: '%s' %pM\n", + print_ssid(ssid, priv->essid, + priv->essid_len), + priv->bssid); + + priv->status &= ~(STATUS_ASSOCIATING | + STATUS_AUTH | + STATUS_ASSOCIATED); + + schedule_work(&priv->link_down); + break; + + case CMAS_TX_AUTH_SEQ_1: + IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | + IPW_DL_ASSOC, "AUTH_SEQ_1\n"); + break; + case CMAS_RX_AUTH_SEQ_2: + IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | + IPW_DL_ASSOC, "AUTH_SEQ_2\n"); + break; + case CMAS_AUTH_SEQ_1_PASS: + IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | + IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n"); + break; + case CMAS_AUTH_SEQ_1_FAIL: + IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | + IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n"); + break; + case CMAS_TX_AUTH_SEQ_3: + IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | + IPW_DL_ASSOC, "AUTH_SEQ_3\n"); + break; + case CMAS_RX_AUTH_SEQ_4: + IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | + IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n"); + break; + case CMAS_AUTH_SEQ_2_PASS: + IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | + IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n"); + break; + case CMAS_AUTH_SEQ_2_FAIL: + IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | + IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n"); + break; + case CMAS_TX_ASSOC: + IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | + IPW_DL_ASSOC, "TX_ASSOC\n"); + break; + case CMAS_RX_ASSOC_RESP: + IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | + IPW_DL_ASSOC, "RX_ASSOC_RESP\n"); + + break; + case CMAS_ASSOCIATED: + IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | + IPW_DL_ASSOC, "ASSOCIATED\n"); + break; + default: + IPW_DEBUG_NOTIF("auth: failure - %d\n", + auth->state); + break; + } + break; + } + + case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{ + struct notif_channel_result *x = + ¬if->u.channel_result; + + if (size == sizeof(*x)) { + IPW_DEBUG_SCAN("Scan result for channel %d\n", + x->channel_num); + } else { + IPW_DEBUG_SCAN("Scan result of wrong size %d " + "(should be %zd)\n", + size, sizeof(*x)); + } + break; + } + + case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{ + struct notif_scan_complete *x = ¬if->u.scan_complete; + if (size == sizeof(*x)) { + IPW_DEBUG_SCAN + ("Scan completed: type %d, %d channels, " + "%d status\n", x->scan_type, + x->num_channels, x->status); + } else { + IPW_ERROR("Scan completed of wrong size %d " + "(should be %zd)\n", + size, sizeof(*x)); + } + + priv->status &= + ~(STATUS_SCANNING | STATUS_SCAN_ABORTING); + + wake_up_interruptible(&priv->wait_state); + cancel_delayed_work(&priv->scan_check); + + if (priv->status & STATUS_EXIT_PENDING) + break; + + priv->ieee->scans++; + +#ifdef CONFIG_IPW2200_MONITOR + if (priv->ieee->iw_mode == IW_MODE_MONITOR) { + priv->status |= STATUS_SCAN_FORCED; + queue_delayed_work(priv->workqueue, + &priv->request_scan, 0); + break; + } + priv->status &= ~STATUS_SCAN_FORCED; +#endif /* CONFIG_IPW2200_MONITOR */ + + /* Do queued direct scans first */ + if (priv->status & STATUS_DIRECT_SCAN_PENDING) { + queue_delayed_work(priv->workqueue, + &priv->request_direct_scan, 0); + } + + if (!(priv->status & (STATUS_ASSOCIATED | + STATUS_ASSOCIATING | + STATUS_ROAMING | + STATUS_DISASSOCIATING))) + queue_work(priv->workqueue, &priv->associate); + else if (priv->status & STATUS_ROAMING) { + if (x->status == SCAN_COMPLETED_STATUS_COMPLETE) + /* If a scan completed and we are in roam mode, then + * the scan that completed was the one requested as a + * result of entering roam... so, schedule the + * roam work */ + queue_work(priv->workqueue, + &priv->roam); + else + /* Don't schedule if we aborted the scan */ + priv->status &= ~STATUS_ROAMING; + } else if (priv->status & STATUS_SCAN_PENDING) + queue_delayed_work(priv->workqueue, + &priv->request_scan, 0); + else if (priv->config & CFG_BACKGROUND_SCAN + && priv->status & STATUS_ASSOCIATED) + queue_delayed_work(priv->workqueue, + &priv->request_scan, + round_jiffies_relative(HZ)); + + /* Send an empty event to user space. + * We don't send the received data on the event because + * it would require us to do complex transcoding, and + * we want to minimise the work done in the irq handler + * Use a request to extract the data. + * Also, we generate this even for any scan, regardless + * on how the scan was initiated. User space can just + * sync on periodic scan to get fresh data... + * Jean II */ + if (x->status == SCAN_COMPLETED_STATUS_COMPLETE) + handle_scan_event(priv); + break; + } + + case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{ + struct notif_frag_length *x = ¬if->u.frag_len; + + if (size == sizeof(*x)) + IPW_ERROR("Frag length: %d\n", + le16_to_cpu(x->frag_length)); + else + IPW_ERROR("Frag length of wrong size %d " + "(should be %zd)\n", + size, sizeof(*x)); + break; + } + + case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{ + struct notif_link_deterioration *x = + ¬if->u.link_deterioration; + + if (size == sizeof(*x)) { + IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE, + "link deterioration: type %d, cnt %d\n", + x->silence_notification_type, + x->silence_count); + memcpy(&priv->last_link_deterioration, x, + sizeof(*x)); + } else { + IPW_ERROR("Link Deterioration of wrong size %d " + "(should be %zd)\n", + size, sizeof(*x)); + } + break; + } + + case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{ + IPW_ERROR("Dino config\n"); + if (priv->hcmd + && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG) + IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n"); + + break; + } + + case HOST_NOTIFICATION_STATUS_BEACON_STATE:{ + struct notif_beacon_state *x = ¬if->u.beacon_state; + if (size != sizeof(*x)) { + IPW_ERROR + ("Beacon state of wrong size %d (should " + "be %zd)\n", size, sizeof(*x)); + break; + } + + if (le32_to_cpu(x->state) == + HOST_NOTIFICATION_STATUS_BEACON_MISSING) + ipw_handle_missed_beacon(priv, + le32_to_cpu(x-> + number)); + + break; + } + + case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{ + struct notif_tgi_tx_key *x = ¬if->u.tgi_tx_key; + if (size == sizeof(*x)) { + IPW_ERROR("TGi Tx Key: state 0x%02x sec type " + "0x%02x station %d\n", + x->key_state, x->security_type, + x->station_index); + break; + } + + IPW_ERROR + ("TGi Tx Key of wrong size %d (should be %zd)\n", + size, sizeof(*x)); + break; + } + + case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{ + struct notif_calibration *x = ¬if->u.calibration; + + if (size == sizeof(*x)) { + memcpy(&priv->calib, x, sizeof(*x)); + IPW_DEBUG_INFO("TODO: Calibration\n"); + break; + } + + IPW_ERROR + ("Calibration of wrong size %d (should be %zd)\n", + size, sizeof(*x)); + break; + } + + case HOST_NOTIFICATION_NOISE_STATS:{ + if (size == sizeof(u32)) { + priv->exp_avg_noise = + exponential_average(priv->exp_avg_noise, + (u8) (le32_to_cpu(notif->u.noise.value) & 0xff), + DEPTH_NOISE); + break; + } + + IPW_ERROR + ("Noise stat is wrong size %d (should be %zd)\n", + size, sizeof(u32)); + break; + } + + default: + IPW_DEBUG_NOTIF("Unknown notification: " + "subtype=%d,flags=0x%2x,size=%d\n", + notif->subtype, notif->flags, size); + } +} + +/** + * Destroys all DMA structures and initialise them again + * + * @param priv + * @return error code + */ +static int ipw_queue_reset(struct ipw_priv *priv) +{ + int rc = 0; + /** @todo customize queue sizes */ + int nTx = 64, nTxCmd = 8; + ipw_tx_queue_free(priv); + /* Tx CMD queue */ + rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd, + IPW_TX_CMD_QUEUE_READ_INDEX, + IPW_TX_CMD_QUEUE_WRITE_INDEX, + IPW_TX_CMD_QUEUE_BD_BASE, + IPW_TX_CMD_QUEUE_BD_SIZE); + if (rc) { + IPW_ERROR("Tx Cmd queue init failed\n"); + goto error; + } + /* Tx queue(s) */ + rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx, + IPW_TX_QUEUE_0_READ_INDEX, + IPW_TX_QUEUE_0_WRITE_INDEX, + IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE); + if (rc) { + IPW_ERROR("Tx 0 queue init failed\n"); + goto error; + } + rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx, + IPW_TX_QUEUE_1_READ_INDEX, + IPW_TX_QUEUE_1_WRITE_INDEX, + IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE); + if (rc) { + IPW_ERROR("Tx 1 queue init failed\n"); + goto error; + } + rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx, + IPW_TX_QUEUE_2_READ_INDEX, + IPW_TX_QUEUE_2_WRITE_INDEX, + IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE); + if (rc) { + IPW_ERROR("Tx 2 queue init failed\n"); + goto error; + } + rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx, + IPW_TX_QUEUE_3_READ_INDEX, + IPW_TX_QUEUE_3_WRITE_INDEX, + IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE); + if (rc) { + IPW_ERROR("Tx 3 queue init failed\n"); + goto error; + } + /* statistics */ + priv->rx_bufs_min = 0; + priv->rx_pend_max = 0; + return rc; + + error: + ipw_tx_queue_free(priv); + return rc; +} + +/** + * Reclaim Tx queue entries no more used by NIC. + * + * When FW advances 'R' index, all entries between old and + * new 'R' index need to be reclaimed. As result, some free space + * forms. If there is enough free space (> low mark), wake Tx queue. + * + * @note Need to protect against garbage in 'R' index + * @param priv + * @param txq + * @param qindex + * @return Number of used entries remains in the queue + */ +static int ipw_queue_tx_reclaim(struct ipw_priv *priv, + struct clx2_tx_queue *txq, int qindex) +{ + u32 hw_tail; + int used; + struct clx2_queue *q = &txq->q; + + hw_tail = ipw_read32(priv, q->reg_r); + if (hw_tail >= q->n_bd) { + IPW_ERROR + ("Read index for DMA queue (%d) is out of range [0-%d)\n", + hw_tail, q->n_bd); + goto done; + } + for (; q->last_used != hw_tail; + q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) { + ipw_queue_tx_free_tfd(priv, txq); + priv->tx_packets++; + } + done: + if ((ipw_tx_queue_space(q) > q->low_mark) && + (qindex >= 0)) + netif_wake_queue(priv->net_dev); + used = q->first_empty - q->last_used; + if (used < 0) + used += q->n_bd; + + return used; +} + +static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf, + int len, int sync) +{ + struct clx2_tx_queue *txq = &priv->txq_cmd; + struct clx2_queue *q = &txq->q; + struct tfd_frame *tfd; + + if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) { + IPW_ERROR("No space for Tx\n"); + return -EBUSY; + } + + tfd = &txq->bd[q->first_empty]; + txq->txb[q->first_empty] = NULL; + + memset(tfd, 0, sizeof(*tfd)); + tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE; + tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK; + priv->hcmd_seq++; + tfd->u.cmd.index = hcmd; + tfd->u.cmd.length = len; + memcpy(tfd->u.cmd.payload, buf, len); + q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd); + ipw_write32(priv, q->reg_w, q->first_empty); + _ipw_read32(priv, 0x90); + + return 0; +} + +/* + * Rx theory of operation + * + * The host allocates 32 DMA target addresses and passes the host address + * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is + * 0 to 31 + * + * Rx Queue Indexes + * The host/firmware share two index registers for managing the Rx buffers. + * + * The READ index maps to the first position that the firmware may be writing + * to -- the driver can read up to (but not including) this position and get + * good data. + * The READ index is managed by the firmware once the card is enabled. + * + * The WRITE index maps to the last position the driver has read from -- the + * position preceding WRITE is the last slot the firmware can place a packet. + * + * The queue is empty (no good data) if WRITE = READ - 1, and is full if + * WRITE = READ. + * + * During initialization the host sets up the READ queue position to the first + * INDEX position, and WRITE to the last (READ - 1 wrapped) + * + * When the firmware places a packet in a buffer it will advance the READ index + * and fire the RX interrupt. The driver can then query the READ index and + * process as many packets as possible, moving the WRITE index forward as it + * resets the Rx queue buffers with new memory. + * + * The management in the driver is as follows: + * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When + * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled + * to replensish the ipw->rxq->rx_free. + * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the + * ipw->rxq is replenished and the READ INDEX is updated (updating the + * 'processed' and 'read' driver indexes as well) + * + A received packet is processed and handed to the kernel network stack, + * detached from the ipw->rxq. The driver 'processed' index is updated. + * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free + * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ + * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there + * were enough free buffers and RX_STALLED is set it is cleared. + * + * + * Driver sequence: + * + * ipw_rx_queue_alloc() Allocates rx_free + * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls + * ipw_rx_queue_restock + * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx + * queue, updates firmware pointers, and updates + * the WRITE index. If insufficient rx_free buffers + * are available, schedules ipw_rx_queue_replenish + * + * -- enable interrupts -- + * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the + * READ INDEX, detaching the SKB from the pool. + * Moves the packet buffer from queue to rx_used. + * Calls ipw_rx_queue_restock to refill any empty + * slots. + * ... + * + */ + +/* + * If there are slots in the RX queue that need to be restocked, + * and we have free pre-allocated buffers, fill the ranks as much + * as we can pulling from rx_free. + * + * This moves the 'write' index forward to catch up with 'processed', and + * also updates the memory address in the firmware to reference the new + * target buffer. + */ +static void ipw_rx_queue_restock(struct ipw_priv *priv) +{ + struct ipw_rx_queue *rxq = priv->rxq; + struct list_head *element; + struct ipw_rx_mem_buffer *rxb; + unsigned long flags; + int write; + + spin_lock_irqsave(&rxq->lock, flags); + write = rxq->write; + while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) { + element = rxq->rx_free.next; + rxb = list_entry(element, struct ipw_rx_mem_buffer, list); + list_del(element); + + ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE, + rxb->dma_addr); + rxq->queue[rxq->write] = rxb; + rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE; + rxq->free_count--; + } + spin_unlock_irqrestore(&rxq->lock, flags); + + /* If the pre-allocated buffer pool is dropping low, schedule to + * refill it */ + if (rxq->free_count <= RX_LOW_WATERMARK) + queue_work(priv->workqueue, &priv->rx_replenish); + + /* If we've added more space for the firmware to place data, tell it */ + if (write != rxq->write) + ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write); +} + +/* + * Move all used packet from rx_used to rx_free, allocating a new SKB for each. + * Also restock the Rx queue via ipw_rx_queue_restock. + * + * This is called as a scheduled work item (except for during intialization) + */ +static void ipw_rx_queue_replenish(void *data) +{ + struct ipw_priv *priv = data; + struct ipw_rx_queue *rxq = priv->rxq; + struct list_head *element; + struct ipw_rx_mem_buffer *rxb; + unsigned long flags; + + spin_lock_irqsave(&rxq->lock, flags); + while (!list_empty(&rxq->rx_used)) { + element = rxq->rx_used.next; + rxb = list_entry(element, struct ipw_rx_mem_buffer, list); + rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC); + if (!rxb->skb) { + printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n", + priv->net_dev->name); + /* We don't reschedule replenish work here -- we will + * call the restock method and if it still needs + * more buffers it will schedule replenish */ + break; + } + list_del(element); + + rxb->dma_addr = + pci_map_single(priv->pci_dev, rxb->skb->data, + IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); + + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + } + spin_unlock_irqrestore(&rxq->lock, flags); + + ipw_rx_queue_restock(priv); +} + +static void ipw_bg_rx_queue_replenish(struct work_struct *work) +{ + struct ipw_priv *priv = + container_of(work, struct ipw_priv, rx_replenish); + mutex_lock(&priv->mutex); + ipw_rx_queue_replenish(priv); + mutex_unlock(&priv->mutex); +} + +/* Assumes that the skb field of the buffers in 'pool' is kept accurate. + * If an SKB has been detached, the POOL needs to have its SKB set to NULL + * This free routine walks the list of POOL entries and if SKB is set to + * non NULL it is unmapped and freed + */ +static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq) +{ + int i; + + if (!rxq) + return; + + for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { + if (rxq->pool[i].skb != NULL) { + pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr, + IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); + dev_kfree_skb(rxq->pool[i].skb); + } + } + + kfree(rxq); +} + +static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv) +{ + struct ipw_rx_queue *rxq; + int i; + + rxq = kzalloc(sizeof(*rxq), GFP_KERNEL); + if (unlikely(!rxq)) { + IPW_ERROR("memory allocation failed\n"); + return NULL; + } + spin_lock_init(&rxq->lock); + INIT_LIST_HEAD(&rxq->rx_free); + INIT_LIST_HEAD(&rxq->rx_used); + + /* Fill the rx_used queue with _all_ of the Rx buffers */ + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) + list_add_tail(&rxq->pool[i].list, &rxq->rx_used); + + /* Set us so that we have processed and used all buffers, but have + * not restocked the Rx queue with fresh buffers */ + rxq->read = rxq->write = 0; + rxq->free_count = 0; + + return rxq; +} + +static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate) +{ + rate &= ~LIBIPW_BASIC_RATE_MASK; + if (ieee_mode == IEEE_A) { + switch (rate) { + case LIBIPW_OFDM_RATE_6MB: + return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ? + 1 : 0; + case LIBIPW_OFDM_RATE_9MB: + return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ? + 1 : 0; + case LIBIPW_OFDM_RATE_12MB: + return priv-> + rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0; + case LIBIPW_OFDM_RATE_18MB: + return priv-> + rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0; + case LIBIPW_OFDM_RATE_24MB: + return priv-> + rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0; + case LIBIPW_OFDM_RATE_36MB: + return priv-> + rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0; + case LIBIPW_OFDM_RATE_48MB: + return priv-> + rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0; + case LIBIPW_OFDM_RATE_54MB: + return priv-> + rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0; + default: + return 0; + } + } + + /* B and G mixed */ + switch (rate) { + case LIBIPW_CCK_RATE_1MB: + return priv->rates_mask & LIBIPW_CCK_RATE_1MB_MASK ? 1 : 0; + case LIBIPW_CCK_RATE_2MB: + return priv->rates_mask & LIBIPW_CCK_RATE_2MB_MASK ? 1 : 0; + case LIBIPW_CCK_RATE_5MB: + return priv->rates_mask & LIBIPW_CCK_RATE_5MB_MASK ? 1 : 0; + case LIBIPW_CCK_RATE_11MB: + return priv->rates_mask & LIBIPW_CCK_RATE_11MB_MASK ? 1 : 0; + } + + /* If we are limited to B modulations, bail at this point */ + if (ieee_mode == IEEE_B) + return 0; + + /* G */ + switch (rate) { + case LIBIPW_OFDM_RATE_6MB: + return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ? 1 : 0; + case LIBIPW_OFDM_RATE_9MB: + return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ? 1 : 0; + case LIBIPW_OFDM_RATE_12MB: + return priv->rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0; + case LIBIPW_OFDM_RATE_18MB: + return priv->rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0; + case LIBIPW_OFDM_RATE_24MB: + return priv->rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0; + case LIBIPW_OFDM_RATE_36MB: + return priv->rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0; + case LIBIPW_OFDM_RATE_48MB: + return priv->rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0; + case LIBIPW_OFDM_RATE_54MB: + return priv->rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0; + } + + return 0; +} + +static int ipw_compatible_rates(struct ipw_priv *priv, + const struct libipw_network *network, + struct ipw_supported_rates *rates) +{ + int num_rates, i; + + memset(rates, 0, sizeof(*rates)); + num_rates = min(network->rates_len, (u8) IPW_MAX_RATES); + rates->num_rates = 0; + for (i = 0; i < num_rates; i++) { + if (!ipw_is_rate_in_mask(priv, network->mode, + network->rates[i])) { + + if (network->rates[i] & LIBIPW_BASIC_RATE_MASK) { + IPW_DEBUG_SCAN("Adding masked mandatory " + "rate %02X\n", + network->rates[i]); + rates->supported_rates[rates->num_rates++] = + network->rates[i]; + continue; + } + + IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n", + network->rates[i], priv->rates_mask); + continue; + } + + rates->supported_rates[rates->num_rates++] = network->rates[i]; + } + + num_rates = min(network->rates_ex_len, + (u8) (IPW_MAX_RATES - num_rates)); + for (i = 0; i < num_rates; i++) { + if (!ipw_is_rate_in_mask(priv, network->mode, + network->rates_ex[i])) { + if (network->rates_ex[i] & LIBIPW_BASIC_RATE_MASK) { + IPW_DEBUG_SCAN("Adding masked mandatory " + "rate %02X\n", + network->rates_ex[i]); + rates->supported_rates[rates->num_rates++] = + network->rates[i]; + continue; + } + + IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n", + network->rates_ex[i], priv->rates_mask); + continue; + } + + rates->supported_rates[rates->num_rates++] = + network->rates_ex[i]; + } + + return 1; +} + +static void ipw_copy_rates(struct ipw_supported_rates *dest, + const struct ipw_supported_rates *src) +{ + u8 i; + for (i = 0; i < src->num_rates; i++) + dest->supported_rates[i] = src->supported_rates[i]; + dest->num_rates = src->num_rates; +} + +/* TODO: Look at sniffed packets in the air to determine if the basic rate + * mask should ever be used -- right now all callers to add the scan rates are + * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */ +static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates, + u8 modulation, u32 rate_mask) +{ + u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ? + LIBIPW_BASIC_RATE_MASK : 0; + + if (rate_mask & LIBIPW_CCK_RATE_1MB_MASK) + rates->supported_rates[rates->num_rates++] = + LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_1MB; + + if (rate_mask & LIBIPW_CCK_RATE_2MB_MASK) + rates->supported_rates[rates->num_rates++] = + LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_2MB; + + if (rate_mask & LIBIPW_CCK_RATE_5MB_MASK) + rates->supported_rates[rates->num_rates++] = basic_mask | + LIBIPW_CCK_RATE_5MB; + + if (rate_mask & LIBIPW_CCK_RATE_11MB_MASK) + rates->supported_rates[rates->num_rates++] = basic_mask | + LIBIPW_CCK_RATE_11MB; +} + +static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates, + u8 modulation, u32 rate_mask) +{ + u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ? + LIBIPW_BASIC_RATE_MASK : 0; + + if (rate_mask & LIBIPW_OFDM_RATE_6MB_MASK) + rates->supported_rates[rates->num_rates++] = basic_mask | + LIBIPW_OFDM_RATE_6MB; + + if (rate_mask & LIBIPW_OFDM_RATE_9MB_MASK) + rates->supported_rates[rates->num_rates++] = + LIBIPW_OFDM_RATE_9MB; + + if (rate_mask & LIBIPW_OFDM_RATE_12MB_MASK) + rates->supported_rates[rates->num_rates++] = basic_mask | + LIBIPW_OFDM_RATE_12MB; + + if (rate_mask & LIBIPW_OFDM_RATE_18MB_MASK) + rates->supported_rates[rates->num_rates++] = + LIBIPW_OFDM_RATE_18MB; + + if (rate_mask & LIBIPW_OFDM_RATE_24MB_MASK) + rates->supported_rates[rates->num_rates++] = basic_mask | + LIBIPW_OFDM_RATE_24MB; + + if (rate_mask & LIBIPW_OFDM_RATE_36MB_MASK) + rates->supported_rates[rates->num_rates++] = + LIBIPW_OFDM_RATE_36MB; + + if (rate_mask & LIBIPW_OFDM_RATE_48MB_MASK) + rates->supported_rates[rates->num_rates++] = + LIBIPW_OFDM_RATE_48MB; + + if (rate_mask & LIBIPW_OFDM_RATE_54MB_MASK) + rates->supported_rates[rates->num_rates++] = + LIBIPW_OFDM_RATE_54MB; +} + +struct ipw_network_match { + struct libipw_network *network; + struct ipw_supported_rates rates; +}; + +static int ipw_find_adhoc_network(struct ipw_priv *priv, + struct ipw_network_match *match, + struct libipw_network *network, + int roaming) +{ + struct ipw_supported_rates rates; + DECLARE_SSID_BUF(ssid); + + /* Verify that this network's capability is compatible with the + * current mode (AdHoc or Infrastructure) */ + if ((priv->ieee->iw_mode == IW_MODE_ADHOC && + !(network->capability & WLAN_CAPABILITY_IBSS))) { + IPW_DEBUG_MERGE("Network '%s (%pM)' excluded due to " + "capability mismatch.\n", + print_ssid(ssid, network->ssid, + network->ssid_len), + network->bssid); + return 0; + } + + if (unlikely(roaming)) { + /* If we are roaming, then ensure check if this is a valid + * network to try and roam to */ + if ((network->ssid_len != match->network->ssid_len) || + memcmp(network->ssid, match->network->ssid, + network->ssid_len)) { + IPW_DEBUG_MERGE("Network '%s (%pM)' excluded " + "because of non-network ESSID.\n", + print_ssid(ssid, network->ssid, + network->ssid_len), + network->bssid); + return 0; + } + } else { + /* If an ESSID has been configured then compare the broadcast + * ESSID to ours */ + if ((priv->config & CFG_STATIC_ESSID) && + ((network->ssid_len != priv->essid_len) || + memcmp(network->ssid, priv->essid, + min(network->ssid_len, priv->essid_len)))) { + char escaped[IW_ESSID_MAX_SIZE * 2 + 1]; + + strncpy(escaped, + print_ssid(ssid, network->ssid, + network->ssid_len), + sizeof(escaped)); + IPW_DEBUG_MERGE("Network '%s (%pM)' excluded " + "because of ESSID mismatch: '%s'.\n", + escaped, network->bssid, + print_ssid(ssid, priv->essid, + priv->essid_len)); + return 0; + } + } + + /* If the old network rate is better than this one, don't bother + * testing everything else. */ + + if (network->time_stamp[0] < match->network->time_stamp[0]) { + IPW_DEBUG_MERGE("Network '%s excluded because newer than " + "current network.\n", + print_ssid(ssid, match->network->ssid, + match->network->ssid_len)); + return 0; + } else if (network->time_stamp[1] < match->network->time_stamp[1]) { + IPW_DEBUG_MERGE("Network '%s excluded because newer than " + "current network.\n", + print_ssid(ssid, match->network->ssid, + match->network->ssid_len)); + return 0; + } + + /* Now go through and see if the requested network is valid... */ + if (priv->ieee->scan_age != 0 && + time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) { + IPW_DEBUG_MERGE("Network '%s (%pM)' excluded " + "because of age: %ums.\n", + print_ssid(ssid, network->ssid, + network->ssid_len), + network->bssid, + jiffies_to_msecs(jiffies - + network->last_scanned)); + return 0; + } + + if ((priv->config & CFG_STATIC_CHANNEL) && + (network->channel != priv->channel)) { + IPW_DEBUG_MERGE("Network '%s (%pM)' excluded " + "because of channel mismatch: %d != %d.\n", + print_ssid(ssid, network->ssid, + network->ssid_len), + network->bssid, + network->channel, priv->channel); + return 0; + } + + /* Verify privacy compatability */ + if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) != + ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) { + IPW_DEBUG_MERGE("Network '%s (%pM)' excluded " + "because of privacy mismatch: %s != %s.\n", + print_ssid(ssid, network->ssid, + network->ssid_len), + network->bssid, + priv-> + capability & CAP_PRIVACY_ON ? "on" : "off", + network-> + capability & WLAN_CAPABILITY_PRIVACY ? "on" : + "off"); + return 0; + } + + if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) { + IPW_DEBUG_MERGE("Network '%s (%pM)' excluded " + "because of the same BSSID match: %pM" + ".\n", print_ssid(ssid, network->ssid, + network->ssid_len), + network->bssid, + priv->bssid); + return 0; + } + + /* Filter out any incompatible freq / mode combinations */ + if (!libipw_is_valid_mode(priv->ieee, network->mode)) { + IPW_DEBUG_MERGE("Network '%s (%pM)' excluded " + "because of invalid frequency/mode " + "combination.\n", + print_ssid(ssid, network->ssid, + network->ssid_len), + network->bssid); + return 0; + } + + /* Ensure that the rates supported by the driver are compatible with + * this AP, including verification of basic rates (mandatory) */ + if (!ipw_compatible_rates(priv, network, &rates)) { + IPW_DEBUG_MERGE("Network '%s (%pM)' excluded " + "because configured rate mask excludes " + "AP mandatory rate.\n", + print_ssid(ssid, network->ssid, + network->ssid_len), + network->bssid); + return 0; + } + + if (rates.num_rates == 0) { + IPW_DEBUG_MERGE("Network '%s (%pM)' excluded " + "because of no compatible rates.\n", + print_ssid(ssid, network->ssid, + network->ssid_len), + network->bssid); + return 0; + } + + /* TODO: Perform any further minimal comparititive tests. We do not + * want to put too much policy logic here; intelligent scan selection + * should occur within a generic IEEE 802.11 user space tool. */ + + /* Set up 'new' AP to this network */ + ipw_copy_rates(&match->rates, &rates); + match->network = network; + IPW_DEBUG_MERGE("Network '%s (%pM)' is a viable match.\n", + print_ssid(ssid, network->ssid, network->ssid_len), + network->bssid); + + return 1; +} + +static void ipw_merge_adhoc_network(struct work_struct *work) +{ + DECLARE_SSID_BUF(ssid); + struct ipw_priv *priv = + container_of(work, struct ipw_priv, merge_networks); + struct libipw_network *network = NULL; + struct ipw_network_match match = { + .network = priv->assoc_network + }; + + if ((priv->status & STATUS_ASSOCIATED) && + (priv->ieee->iw_mode == IW_MODE_ADHOC)) { + /* First pass through ROAM process -- look for a better + * network */ + unsigned long flags; + + spin_lock_irqsave(&priv->ieee->lock, flags); + list_for_each_entry(network, &priv->ieee->network_list, list) { + if (network != priv->assoc_network) + ipw_find_adhoc_network(priv, &match, network, + 1); + } + spin_unlock_irqrestore(&priv->ieee->lock, flags); + + if (match.network == priv->assoc_network) { + IPW_DEBUG_MERGE("No better ADHOC in this network to " + "merge to.\n"); + return; + } + + mutex_lock(&priv->mutex); + if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) { + IPW_DEBUG_MERGE("remove network %s\n", + print_ssid(ssid, priv->essid, + priv->essid_len)); + ipw_remove_current_network(priv); + } + + ipw_disassociate(priv); + priv->assoc_network = match.network; + mutex_unlock(&priv->mutex); + return; + } +} + +static int ipw_best_network(struct ipw_priv *priv, + struct ipw_network_match *match, + struct libipw_network *network, int roaming) +{ + struct ipw_supported_rates rates; + DECLARE_SSID_BUF(ssid); + + /* Verify that this network's capability is compatible with the + * current mode (AdHoc or Infrastructure) */ + if ((priv->ieee->iw_mode == IW_MODE_INFRA && + !(network->capability & WLAN_CAPABILITY_ESS)) || + (priv->ieee->iw_mode == IW_MODE_ADHOC && + !(network->capability & WLAN_CAPABILITY_IBSS))) { + IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded due to " + "capability mismatch.\n", + print_ssid(ssid, network->ssid, + network->ssid_len), + network->bssid); + return 0; + } + + if (unlikely(roaming)) { + /* If we are roaming, then ensure check if this is a valid + * network to try and roam to */ + if ((network->ssid_len != match->network->ssid_len) || + memcmp(network->ssid, match->network->ssid, + network->ssid_len)) { + IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded " + "because of non-network ESSID.\n", + print_ssid(ssid, network->ssid, + network->ssid_len), + network->bssid); + return 0; + } + } else { + /* If an ESSID has been configured then compare the broadcast + * ESSID to ours */ + if ((priv->config & CFG_STATIC_ESSID) && + ((network->ssid_len != priv->essid_len) || + memcmp(network->ssid, priv->essid, + min(network->ssid_len, priv->essid_len)))) { + char escaped[IW_ESSID_MAX_SIZE * 2 + 1]; + strncpy(escaped, + print_ssid(ssid, network->ssid, + network->ssid_len), + sizeof(escaped)); + IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded " + "because of ESSID mismatch: '%s'.\n", + escaped, network->bssid, + print_ssid(ssid, priv->essid, + priv->essid_len)); + return 0; + } + } + + /* If the old network rate is better than this one, don't bother + * testing everything else. */ + if (match->network && match->network->stats.rssi > network->stats.rssi) { + char escaped[IW_ESSID_MAX_SIZE * 2 + 1]; + strncpy(escaped, + print_ssid(ssid, network->ssid, network->ssid_len), + sizeof(escaped)); + IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded because " + "'%s (%pM)' has a stronger signal.\n", + escaped, network->bssid, + print_ssid(ssid, match->network->ssid, + match->network->ssid_len), + match->network->bssid); + return 0; + } + + /* If this network has already had an association attempt within the + * last 3 seconds, do not try and associate again... */ + if (network->last_associate && + time_after(network->last_associate + (HZ * 3UL), jiffies)) { + IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded " + "because of storming (%ums since last " + "assoc attempt).\n", + print_ssid(ssid, network->ssid, + network->ssid_len), + network->bssid, + jiffies_to_msecs(jiffies - + network->last_associate)); + return 0; + } + + /* Now go through and see if the requested network is valid... */ + if (priv->ieee->scan_age != 0 && + time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) { + IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded " + "because of age: %ums.\n", + print_ssid(ssid, network->ssid, + network->ssid_len), + network->bssid, + jiffies_to_msecs(jiffies - + network->last_scanned)); + return 0; + } + + if ((priv->config & CFG_STATIC_CHANNEL) && + (network->channel != priv->channel)) { + IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded " + "because of channel mismatch: %d != %d.\n", + print_ssid(ssid, network->ssid, + network->ssid_len), + network->bssid, + network->channel, priv->channel); + return 0; + } + + /* Verify privacy compatability */ + if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) != + ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) { + IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded " + "because of privacy mismatch: %s != %s.\n", + print_ssid(ssid, network->ssid, + network->ssid_len), + network->bssid, + priv->capability & CAP_PRIVACY_ON ? "on" : + "off", + network->capability & + WLAN_CAPABILITY_PRIVACY ? "on" : "off"); + return 0; + } + + if ((priv->config & CFG_STATIC_BSSID) && + memcmp(network->bssid, priv->bssid, ETH_ALEN)) { + IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded " + "because of BSSID mismatch: %pM.\n", + print_ssid(ssid, network->ssid, + network->ssid_len), + network->bssid, priv->bssid); + return 0; + } + + /* Filter out any incompatible freq / mode combinations */ + if (!libipw_is_valid_mode(priv->ieee, network->mode)) { + IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded " + "because of invalid frequency/mode " + "combination.\n", + print_ssid(ssid, network->ssid, + network->ssid_len), + network->bssid); + return 0; + } + + /* Filter out invalid channel in current GEO */ + if (!libipw_is_valid_channel(priv->ieee, network->channel)) { + IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded " + "because of invalid channel in current GEO\n", + print_ssid(ssid, network->ssid, + network->ssid_len), + network->bssid); + return 0; + } + + /* Ensure that the rates supported by the driver are compatible with + * this AP, including verification of basic rates (mandatory) */ + if (!ipw_compatible_rates(priv, network, &rates)) { + IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded " + "because configured rate mask excludes " + "AP mandatory rate.\n", + print_ssid(ssid, network->ssid, + network->ssid_len), + network->bssid); + return 0; + } + + if (rates.num_rates == 0) { + IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded " + "because of no compatible rates.\n", + print_ssid(ssid, network->ssid, + network->ssid_len), + network->bssid); + return 0; + } + + /* TODO: Perform any further minimal comparititive tests. We do not + * want to put too much policy logic here; intelligent scan selection + * should occur within a generic IEEE 802.11 user space tool. */ + + /* Set up 'new' AP to this network */ + ipw_copy_rates(&match->rates, &rates); + match->network = network; + + IPW_DEBUG_ASSOC("Network '%s (%pM)' is a viable match.\n", + print_ssid(ssid, network->ssid, network->ssid_len), + network->bssid); + + return 1; +} + +static void ipw_adhoc_create(struct ipw_priv *priv, + struct libipw_network *network) +{ + const struct libipw_geo *geo = libipw_get_geo(priv->ieee); + int i; + + /* + * For the purposes of scanning, we can set our wireless mode + * to trigger scans across combinations of bands, but when it + * comes to creating a new ad-hoc network, we have tell the FW + * exactly which band to use. + * + * We also have the possibility of an invalid channel for the + * chossen band. Attempting to create a new ad-hoc network + * with an invalid channel for wireless mode will trigger a + * FW fatal error. + * + */ + switch (libipw_is_valid_channel(priv->ieee, priv->channel)) { + case LIBIPW_52GHZ_BAND: + network->mode = IEEE_A; + i = libipw_channel_to_index(priv->ieee, priv->channel); + BUG_ON(i == -1); + if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY) { + IPW_WARNING("Overriding invalid channel\n"); + priv->channel = geo->a[0].channel; + } + break; + + case LIBIPW_24GHZ_BAND: + if (priv->ieee->mode & IEEE_G) + network->mode = IEEE_G; + else + network->mode = IEEE_B; + i = libipw_channel_to_index(priv->ieee, priv->channel); + BUG_ON(i == -1); + if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY) { + IPW_WARNING("Overriding invalid channel\n"); + priv->channel = geo->bg[0].channel; + } + break; + + default: + IPW_WARNING("Overriding invalid channel\n"); + if (priv->ieee->mode & IEEE_A) { + network->mode = IEEE_A; + priv->channel = geo->a[0].channel; + } else if (priv->ieee->mode & IEEE_G) { + network->mode = IEEE_G; + priv->channel = geo->bg[0].channel; + } else { + network->mode = IEEE_B; + priv->channel = geo->bg[0].channel; + } + break; + } + + network->channel = priv->channel; + priv->config |= CFG_ADHOC_PERSIST; + ipw_create_bssid(priv, network->bssid); + network->ssid_len = priv->essid_len; + memcpy(network->ssid, priv->essid, priv->essid_len); + memset(&network->stats, 0, sizeof(network->stats)); + network->capability = WLAN_CAPABILITY_IBSS; + if (!(priv->config & CFG_PREAMBLE_LONG)) + network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE; + if (priv->capability & CAP_PRIVACY_ON) + network->capability |= WLAN_CAPABILITY_PRIVACY; + network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH); + memcpy(network->rates, priv->rates.supported_rates, network->rates_len); + network->rates_ex_len = priv->rates.num_rates - network->rates_len; + memcpy(network->rates_ex, + &priv->rates.supported_rates[network->rates_len], + network->rates_ex_len); + network->last_scanned = 0; + network->flags = 0; + network->last_associate = 0; + network->time_stamp[0] = 0; + network->time_stamp[1] = 0; + network->beacon_interval = 100; /* Default */ + network->listen_interval = 10; /* Default */ + network->atim_window = 0; /* Default */ + network->wpa_ie_len = 0; + network->rsn_ie_len = 0; +} + +static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index) +{ + struct ipw_tgi_tx_key key; + + if (!(priv->ieee->sec.flags & (1 << index))) + return; + + key.key_id = index; + memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH); + key.security_type = type; + key.station_index = 0; /* always 0 for BSS */ + key.flags = 0; + /* 0 for new key; previous value of counter (after fatal error) */ + key.tx_counter[0] = cpu_to_le32(0); + key.tx_counter[1] = cpu_to_le32(0); + + ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key); +} + +static void ipw_send_wep_keys(struct ipw_priv *priv, int type) +{ + struct ipw_wep_key key; + int i; + + key.cmd_id = DINO_CMD_WEP_KEY; + key.seq_num = 0; + + /* Note: AES keys cannot be set for multiple times. + * Only set it at the first time. */ + for (i = 0; i < 4; i++) { + key.key_index = i | type; + if (!(priv->ieee->sec.flags & (1 << i))) { + key.key_size = 0; + continue; + } + + key.key_size = priv->ieee->sec.key_sizes[i]; + memcpy(key.key, priv->ieee->sec.keys[i], key.key_size); + + ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key); + } +} + +static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level) +{ + if (priv->ieee->host_encrypt) + return; + + switch (level) { + case SEC_LEVEL_3: + priv->sys_config.disable_unicast_decryption = 0; + priv->ieee->host_decrypt = 0; + break; + case SEC_LEVEL_2: + priv->sys_config.disable_unicast_decryption = 1; + priv->ieee->host_decrypt = 1; + break; + case SEC_LEVEL_1: + priv->sys_config.disable_unicast_decryption = 0; + priv->ieee->host_decrypt = 0; + break; + case SEC_LEVEL_0: + priv->sys_config.disable_unicast_decryption = 1; + break; + default: + break; + } +} + +static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level) +{ + if (priv->ieee->host_encrypt) + return; + + switch (level) { + case SEC_LEVEL_3: + priv->sys_config.disable_multicast_decryption = 0; + break; + case SEC_LEVEL_2: + priv->sys_config.disable_multicast_decryption = 1; + break; + case SEC_LEVEL_1: + priv->sys_config.disable_multicast_decryption = 0; + break; + case SEC_LEVEL_0: + priv->sys_config.disable_multicast_decryption = 1; + break; + default: + break; + } +} + +static void ipw_set_hwcrypto_keys(struct ipw_priv *priv) +{ + switch (priv->ieee->sec.level) { + case SEC_LEVEL_3: + if (priv->ieee->sec.flags & SEC_ACTIVE_KEY) + ipw_send_tgi_tx_key(priv, + DCT_FLAG_EXT_SECURITY_CCM, + priv->ieee->sec.active_key); + + if (!priv->ieee->host_mc_decrypt) + ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM); + break; + case SEC_LEVEL_2: + if (priv->ieee->sec.flags & SEC_ACTIVE_KEY) + ipw_send_tgi_tx_key(priv, + DCT_FLAG_EXT_SECURITY_TKIP, + priv->ieee->sec.active_key); + break; + case SEC_LEVEL_1: + ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP); + ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level); + ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level); + break; + case SEC_LEVEL_0: + default: + break; + } +} + +static void ipw_adhoc_check(void *data) +{ + struct ipw_priv *priv = data; + + if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold && + !(priv->config & CFG_ADHOC_PERSIST)) { + IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | + IPW_DL_STATE | IPW_DL_ASSOC, + "Missed beacon: %d - disassociate\n", + priv->missed_adhoc_beacons); + ipw_remove_current_network(priv); + ipw_disassociate(priv); + return; + } + + queue_delayed_work(priv->workqueue, &priv->adhoc_check, + le16_to_cpu(priv->assoc_request.beacon_interval)); +} + +static void ipw_bg_adhoc_check(struct work_struct *work) +{ + struct ipw_priv *priv = + container_of(work, struct ipw_priv, adhoc_check.work); + mutex_lock(&priv->mutex); + ipw_adhoc_check(priv); + mutex_unlock(&priv->mutex); +} + +static void ipw_debug_config(struct ipw_priv *priv) +{ + DECLARE_SSID_BUF(ssid); + IPW_DEBUG_INFO("Scan completed, no valid APs matched " + "[CFG 0x%08X]\n", priv->config); + if (priv->config & CFG_STATIC_CHANNEL) + IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel); + else + IPW_DEBUG_INFO("Channel unlocked.\n"); + if (priv->config & CFG_STATIC_ESSID) + IPW_DEBUG_INFO("ESSID locked to '%s'\n", + print_ssid(ssid, priv->essid, priv->essid_len)); + else + IPW_DEBUG_INFO("ESSID unlocked.\n"); + if (priv->config & CFG_STATIC_BSSID) + IPW_DEBUG_INFO("BSSID locked to %pM\n", priv->bssid); + else + IPW_DEBUG_INFO("BSSID unlocked.\n"); + if (priv->capability & CAP_PRIVACY_ON) + IPW_DEBUG_INFO("PRIVACY on\n"); + else + IPW_DEBUG_INFO("PRIVACY off\n"); + IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask); +} + +static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode) +{ + /* TODO: Verify that this works... */ + struct ipw_fixed_rate fr; + u32 reg; + u16 mask = 0; + u16 new_tx_rates = priv->rates_mask; + + /* Identify 'current FW band' and match it with the fixed + * Tx rates */ + + switch (priv->ieee->freq_band) { + case LIBIPW_52GHZ_BAND: /* A only */ + /* IEEE_A */ + if (priv->rates_mask & ~LIBIPW_OFDM_RATES_MASK) { + /* Invalid fixed rate mask */ + IPW_DEBUG_WX + ("invalid fixed rate mask in ipw_set_fixed_rate\n"); + new_tx_rates = 0; + break; + } + + new_tx_rates >>= LIBIPW_OFDM_SHIFT_MASK_A; + break; + + default: /* 2.4Ghz or Mixed */ + /* IEEE_B */ + if (mode == IEEE_B) { + if (new_tx_rates & ~LIBIPW_CCK_RATES_MASK) { + /* Invalid fixed rate mask */ + IPW_DEBUG_WX + ("invalid fixed rate mask in ipw_set_fixed_rate\n"); + new_tx_rates = 0; + } + break; + } + + /* IEEE_G */ + if (new_tx_rates & ~(LIBIPW_CCK_RATES_MASK | + LIBIPW_OFDM_RATES_MASK)) { + /* Invalid fixed rate mask */ + IPW_DEBUG_WX + ("invalid fixed rate mask in ipw_set_fixed_rate\n"); + new_tx_rates = 0; + break; + } + + if (LIBIPW_OFDM_RATE_6MB_MASK & new_tx_rates) { + mask |= (LIBIPW_OFDM_RATE_6MB_MASK >> 1); + new_tx_rates &= ~LIBIPW_OFDM_RATE_6MB_MASK; + } + + if (LIBIPW_OFDM_RATE_9MB_MASK & new_tx_rates) { + mask |= (LIBIPW_OFDM_RATE_9MB_MASK >> 1); + new_tx_rates &= ~LIBIPW_OFDM_RATE_9MB_MASK; + } + + if (LIBIPW_OFDM_RATE_12MB_MASK & new_tx_rates) { + mask |= (LIBIPW_OFDM_RATE_12MB_MASK >> 1); + new_tx_rates &= ~LIBIPW_OFDM_RATE_12MB_MASK; + } + + new_tx_rates |= mask; + break; + } + + fr.tx_rates = cpu_to_le16(new_tx_rates); + + reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE); + ipw_write_reg32(priv, reg, *(u32 *) & fr); +} + +static void ipw_abort_scan(struct ipw_priv *priv) +{ + int err; + + if (priv->status & STATUS_SCAN_ABORTING) { + IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n"); + return; + } + priv->status |= STATUS_SCAN_ABORTING; + + err = ipw_send_scan_abort(priv); + if (err) + IPW_DEBUG_HC("Request to abort scan failed.\n"); +} + +static void ipw_add_scan_channels(struct ipw_priv *priv, + struct ipw_scan_request_ext *scan, + int scan_type) +{ + int channel_index = 0; + const struct libipw_geo *geo; + int i; + + geo = libipw_get_geo(priv->ieee); + + if (priv->ieee->freq_band & LIBIPW_52GHZ_BAND) { + int start = channel_index; + for (i = 0; i < geo->a_channels; i++) { + if ((priv->status & STATUS_ASSOCIATED) && + geo->a[i].channel == priv->channel) + continue; + channel_index++; + scan->channels_list[channel_index] = geo->a[i].channel; + ipw_set_scan_type(scan, channel_index, + geo->a[i]. + flags & LIBIPW_CH_PASSIVE_ONLY ? + IPW_SCAN_PASSIVE_FULL_DWELL_SCAN : + scan_type); + } + + if (start != channel_index) { + scan->channels_list[start] = (u8) (IPW_A_MODE << 6) | + (channel_index - start); + channel_index++; + } + } + + if (priv->ieee->freq_band & LIBIPW_24GHZ_BAND) { + int start = channel_index; + if (priv->config & CFG_SPEED_SCAN) { + int index; + u8 channels[LIBIPW_24GHZ_CHANNELS] = { + /* nop out the list */ + [0] = 0 + }; + + u8 channel; + while (channel_index < IPW_SCAN_CHANNELS - 1) { + channel = + priv->speed_scan[priv->speed_scan_pos]; + if (channel == 0) { + priv->speed_scan_pos = 0; + channel = priv->speed_scan[0]; + } + if ((priv->status & STATUS_ASSOCIATED) && + channel == priv->channel) { + priv->speed_scan_pos++; + continue; + } + + /* If this channel has already been + * added in scan, break from loop + * and this will be the first channel + * in the next scan. + */ + if (channels[channel - 1] != 0) + break; + + channels[channel - 1] = 1; + priv->speed_scan_pos++; + channel_index++; + scan->channels_list[channel_index] = channel; + index = + libipw_channel_to_index(priv->ieee, channel); + ipw_set_scan_type(scan, channel_index, + geo->bg[index]. + flags & + LIBIPW_CH_PASSIVE_ONLY ? + IPW_SCAN_PASSIVE_FULL_DWELL_SCAN + : scan_type); + } + } else { + for (i = 0; i < geo->bg_channels; i++) { + if ((priv->status & STATUS_ASSOCIATED) && + geo->bg[i].channel == priv->channel) + continue; + channel_index++; + scan->channels_list[channel_index] = + geo->bg[i].channel; + ipw_set_scan_type(scan, channel_index, + geo->bg[i]. + flags & + LIBIPW_CH_PASSIVE_ONLY ? + IPW_SCAN_PASSIVE_FULL_DWELL_SCAN + : scan_type); + } + } + + if (start != channel_index) { + scan->channels_list[start] = (u8) (IPW_B_MODE << 6) | + (channel_index - start); + } + } +} + +static int ipw_passive_dwell_time(struct ipw_priv *priv) +{ + /* staying on passive channels longer than the DTIM interval during a + * scan, while associated, causes the firmware to cancel the scan + * without notification. Hence, don't stay on passive channels longer + * than the beacon interval. + */ + if (priv->status & STATUS_ASSOCIATED + && priv->assoc_network->beacon_interval > 10) + return priv->assoc_network->beacon_interval - 10; + else + return 120; +} + +static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct) +{ + struct ipw_scan_request_ext scan; + int err = 0, scan_type; + + if (!(priv->status & STATUS_INIT) || + (priv->status & STATUS_EXIT_PENDING)) + return 0; + + mutex_lock(&priv->mutex); + + if (direct && (priv->direct_scan_ssid_len == 0)) { + IPW_DEBUG_HC("Direct scan requested but no SSID to scan for\n"); + priv->status &= ~STATUS_DIRECT_SCAN_PENDING; + goto done; + } + + if (priv->status & STATUS_SCANNING) { + IPW_DEBUG_HC("Concurrent scan requested. Queuing.\n"); + priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING : + STATUS_SCAN_PENDING; + goto done; + } + + if (!(priv->status & STATUS_SCAN_FORCED) && + priv->status & STATUS_SCAN_ABORTING) { + IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n"); + priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING : + STATUS_SCAN_PENDING; + goto done; + } + + if (priv->status & STATUS_RF_KILL_MASK) { + IPW_DEBUG_HC("Queuing scan due to RF Kill activation\n"); + priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING : + STATUS_SCAN_PENDING; + goto done; + } + + memset(&scan, 0, sizeof(scan)); + scan.full_scan_index = cpu_to_le32(libipw_get_scans(priv->ieee)); + + if (type == IW_SCAN_TYPE_PASSIVE) { + IPW_DEBUG_WX("use passive scanning\n"); + scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN; + scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = + cpu_to_le16(ipw_passive_dwell_time(priv)); + ipw_add_scan_channels(priv, &scan, scan_type); + goto send_request; + } + + /* Use active scan by default. */ + if (priv->config & CFG_SPEED_SCAN) + scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] = + cpu_to_le16(30); + else + scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] = + cpu_to_le16(20); + + scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] = + cpu_to_le16(20); + + scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = + cpu_to_le16(ipw_passive_dwell_time(priv)); + scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20); + +#ifdef CONFIG_IPW2200_MONITOR + if (priv->ieee->iw_mode == IW_MODE_MONITOR) { + u8 channel; + u8 band = 0; + + switch (libipw_is_valid_channel(priv->ieee, priv->channel)) { + case LIBIPW_52GHZ_BAND: + band = (u8) (IPW_A_MODE << 6) | 1; + channel = priv->channel; + break; + + case LIBIPW_24GHZ_BAND: + band = (u8) (IPW_B_MODE << 6) | 1; + channel = priv->channel; + break; + + default: + band = (u8) (IPW_B_MODE << 6) | 1; + channel = 9; + break; + } + + scan.channels_list[0] = band; + scan.channels_list[1] = channel; + ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN); + + /* NOTE: The card will sit on this channel for this time + * period. Scan aborts are timing sensitive and frequently + * result in firmware restarts. As such, it is best to + * set a small dwell_time here and just keep re-issuing + * scans. Otherwise fast channel hopping will not actually + * hop channels. + * + * TODO: Move SPEED SCAN support to all modes and bands */ + scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = + cpu_to_le16(2000); + } else { +#endif /* CONFIG_IPW2200_MONITOR */ + /* Honor direct scans first, otherwise if we are roaming make + * this a direct scan for the current network. Finally, + * ensure that every other scan is a fast channel hop scan */ + if (direct) { + err = ipw_send_ssid(priv, priv->direct_scan_ssid, + priv->direct_scan_ssid_len); + if (err) { + IPW_DEBUG_HC("Attempt to send SSID command " + "failed\n"); + goto done; + } + + scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN; + } else if ((priv->status & STATUS_ROAMING) + || (!(priv->status & STATUS_ASSOCIATED) + && (priv->config & CFG_STATIC_ESSID) + && (le32_to_cpu(scan.full_scan_index) % 2))) { + err = ipw_send_ssid(priv, priv->essid, priv->essid_len); + if (err) { + IPW_DEBUG_HC("Attempt to send SSID command " + "failed.\n"); + goto done; + } + + scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN; + } else + scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN; + + ipw_add_scan_channels(priv, &scan, scan_type); +#ifdef CONFIG_IPW2200_MONITOR + } +#endif + +send_request: + err = ipw_send_scan_request_ext(priv, &scan); + if (err) { + IPW_DEBUG_HC("Sending scan command failed: %08X\n", err); + goto done; + } + + priv->status |= STATUS_SCANNING; + if (direct) { + priv->status &= ~STATUS_DIRECT_SCAN_PENDING; + priv->direct_scan_ssid_len = 0; + } else + priv->status &= ~STATUS_SCAN_PENDING; + + queue_delayed_work(priv->workqueue, &priv->scan_check, + IPW_SCAN_CHECK_WATCHDOG); +done: + mutex_unlock(&priv->mutex); + return err; +} + +static void ipw_request_passive_scan(struct work_struct *work) +{ + struct ipw_priv *priv = + container_of(work, struct ipw_priv, request_passive_scan.work); + ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE, 0); +} + +static void ipw_request_scan(struct work_struct *work) +{ + struct ipw_priv *priv = + container_of(work, struct ipw_priv, request_scan.work); + ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 0); +} + +static void ipw_request_direct_scan(struct work_struct *work) +{ + struct ipw_priv *priv = + container_of(work, struct ipw_priv, request_direct_scan.work); + ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 1); +} + +static void ipw_bg_abort_scan(struct work_struct *work) +{ + struct ipw_priv *priv = + container_of(work, struct ipw_priv, abort_scan); + mutex_lock(&priv->mutex); + ipw_abort_scan(priv); + mutex_unlock(&priv->mutex); +} + +static int ipw_wpa_enable(struct ipw_priv *priv, int value) +{ + /* This is called when wpa_supplicant loads and closes the driver + * interface. */ + priv->ieee->wpa_enabled = value; + return 0; +} + +static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value) +{ + struct libipw_device *ieee = priv->ieee; + struct libipw_security sec = { + .flags = SEC_AUTH_MODE, + }; + int ret = 0; + + if (value & IW_AUTH_ALG_SHARED_KEY) { + sec.auth_mode = WLAN_AUTH_SHARED_KEY; + ieee->open_wep = 0; + } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) { + sec.auth_mode = WLAN_AUTH_OPEN; + ieee->open_wep = 1; + } else if (value & IW_AUTH_ALG_LEAP) { + sec.auth_mode = WLAN_AUTH_LEAP; + ieee->open_wep = 1; + } else + return -EINVAL; + + if (ieee->set_security) + ieee->set_security(ieee->dev, &sec); + else + ret = -EOPNOTSUPP; + + return ret; +} + +static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie, + int wpa_ie_len) +{ + /* make sure WPA is enabled */ + ipw_wpa_enable(priv, 1); +} + +static int ipw_set_rsn_capa(struct ipw_priv *priv, + char *capabilities, int length) +{ + IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n"); + + return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length, + capabilities); +} + +/* + * WE-18 support + */ + +/* SIOCSIWGENIE */ +static int ipw_wx_set_genie(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + struct libipw_device *ieee = priv->ieee; + u8 *buf; + int err = 0; + + if (wrqu->data.length > MAX_WPA_IE_LEN || + (wrqu->data.length && extra == NULL)) + return -EINVAL; + + if (wrqu->data.length) { + buf = kmalloc(wrqu->data.length, GFP_KERNEL); + if (buf == NULL) { + err = -ENOMEM; + goto out; + } + + memcpy(buf, extra, wrqu->data.length); + kfree(ieee->wpa_ie); + ieee->wpa_ie = buf; + ieee->wpa_ie_len = wrqu->data.length; + } else { + kfree(ieee->wpa_ie); + ieee->wpa_ie = NULL; + ieee->wpa_ie_len = 0; + } + + ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len); + out: + return err; +} + +/* SIOCGIWGENIE */ +static int ipw_wx_get_genie(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + struct libipw_device *ieee = priv->ieee; + int err = 0; + + if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) { + wrqu->data.length = 0; + goto out; + } + + if (wrqu->data.length < ieee->wpa_ie_len) { + err = -E2BIG; + goto out; + } + + wrqu->data.length = ieee->wpa_ie_len; + memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len); + + out: + return err; +} + +static int wext_cipher2level(int cipher) +{ + switch (cipher) { + case IW_AUTH_CIPHER_NONE: + return SEC_LEVEL_0; + case IW_AUTH_CIPHER_WEP40: + case IW_AUTH_CIPHER_WEP104: + return SEC_LEVEL_1; + case IW_AUTH_CIPHER_TKIP: + return SEC_LEVEL_2; + case IW_AUTH_CIPHER_CCMP: + return SEC_LEVEL_3; + default: + return -1; + } +} + +/* SIOCSIWAUTH */ +static int ipw_wx_set_auth(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + struct libipw_device *ieee = priv->ieee; + struct iw_param *param = &wrqu->param; + struct lib80211_crypt_data *crypt; + unsigned long flags; + int ret = 0; + + switch (param->flags & IW_AUTH_INDEX) { + case IW_AUTH_WPA_VERSION: + break; + case IW_AUTH_CIPHER_PAIRWISE: + ipw_set_hw_decrypt_unicast(priv, + wext_cipher2level(param->value)); + break; + case IW_AUTH_CIPHER_GROUP: + ipw_set_hw_decrypt_multicast(priv, + wext_cipher2level(param->value)); + break; + case IW_AUTH_KEY_MGMT: + /* + * ipw2200 does not use these parameters + */ + break; + + case IW_AUTH_TKIP_COUNTERMEASURES: + crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx]; + if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags) + break; + + flags = crypt->ops->get_flags(crypt->priv); + + if (param->value) + flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES; + else + flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES; + + crypt->ops->set_flags(flags, crypt->priv); + + break; + + case IW_AUTH_DROP_UNENCRYPTED:{ + /* HACK: + * + * wpa_supplicant calls set_wpa_enabled when the driver + * is loaded and unloaded, regardless of if WPA is being + * used. No other calls are made which can be used to + * determine if encryption will be used or not prior to + * association being expected. If encryption is not being + * used, drop_unencrypted is set to false, else true -- we + * can use this to determine if the CAP_PRIVACY_ON bit should + * be set. + */ + struct libipw_security sec = { + .flags = SEC_ENABLED, + .enabled = param->value, + }; + priv->ieee->drop_unencrypted = param->value; + /* We only change SEC_LEVEL for open mode. Others + * are set by ipw_wpa_set_encryption. + */ + if (!param->value) { + sec.flags |= SEC_LEVEL; + sec.level = SEC_LEVEL_0; + } else { + sec.flags |= SEC_LEVEL; + sec.level = SEC_LEVEL_1; + } + if (priv->ieee->set_security) + priv->ieee->set_security(priv->ieee->dev, &sec); + break; + } + + case IW_AUTH_80211_AUTH_ALG: + ret = ipw_wpa_set_auth_algs(priv, param->value); + break; + + case IW_AUTH_WPA_ENABLED: + ret = ipw_wpa_enable(priv, param->value); + ipw_disassociate(priv); + break; + + case IW_AUTH_RX_UNENCRYPTED_EAPOL: + ieee->ieee802_1x = param->value; + break; + + case IW_AUTH_PRIVACY_INVOKED: + ieee->privacy_invoked = param->value; + break; + + default: + return -EOPNOTSUPP; + } + return ret; +} + +/* SIOCGIWAUTH */ +static int ipw_wx_get_auth(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + struct libipw_device *ieee = priv->ieee; + struct lib80211_crypt_data *crypt; + struct iw_param *param = &wrqu->param; + int ret = 0; + + switch (param->flags & IW_AUTH_INDEX) { + case IW_AUTH_WPA_VERSION: + case IW_AUTH_CIPHER_PAIRWISE: + case IW_AUTH_CIPHER_GROUP: + case IW_AUTH_KEY_MGMT: + /* + * wpa_supplicant will control these internally + */ + ret = -EOPNOTSUPP; + break; + + case IW_AUTH_TKIP_COUNTERMEASURES: + crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx]; + if (!crypt || !crypt->ops->get_flags) + break; + + param->value = (crypt->ops->get_flags(crypt->priv) & + IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0; + + break; + + case IW_AUTH_DROP_UNENCRYPTED: + param->value = ieee->drop_unencrypted; + break; + + case IW_AUTH_80211_AUTH_ALG: + param->value = ieee->sec.auth_mode; + break; + + case IW_AUTH_WPA_ENABLED: + param->value = ieee->wpa_enabled; + break; + + case IW_AUTH_RX_UNENCRYPTED_EAPOL: + param->value = ieee->ieee802_1x; + break; + + case IW_AUTH_ROAMING_CONTROL: + case IW_AUTH_PRIVACY_INVOKED: + param->value = ieee->privacy_invoked; + break; + + default: + return -EOPNOTSUPP; + } + return 0; +} + +/* SIOCSIWENCODEEXT */ +static int ipw_wx_set_encodeext(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; + + if (hwcrypto) { + if (ext->alg == IW_ENCODE_ALG_TKIP) { + /* IPW HW can't build TKIP MIC, + host decryption still needed */ + if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) + priv->ieee->host_mc_decrypt = 1; + else { + priv->ieee->host_encrypt = 0; + priv->ieee->host_encrypt_msdu = 1; + priv->ieee->host_decrypt = 1; + } + } else { + priv->ieee->host_encrypt = 0; + priv->ieee->host_encrypt_msdu = 0; + priv->ieee->host_decrypt = 0; + priv->ieee->host_mc_decrypt = 0; + } + } + + return libipw_wx_set_encodeext(priv->ieee, info, wrqu, extra); +} + +/* SIOCGIWENCODEEXT */ +static int ipw_wx_get_encodeext(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + return libipw_wx_get_encodeext(priv->ieee, info, wrqu, extra); +} + +/* SIOCSIWMLME */ +static int ipw_wx_set_mlme(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + struct iw_mlme *mlme = (struct iw_mlme *)extra; + __le16 reason; + + reason = cpu_to_le16(mlme->reason_code); + + switch (mlme->cmd) { + case IW_MLME_DEAUTH: + /* silently ignore */ + break; + + case IW_MLME_DISASSOC: + ipw_disassociate(priv); + break; + + default: + return -EOPNOTSUPP; + } + return 0; +} + +#ifdef CONFIG_IPW2200_QOS + +/* QoS */ +/* +* get the modulation type of the current network or +* the card current mode +*/ +static u8 ipw_qos_current_mode(struct ipw_priv * priv) +{ + u8 mode = 0; + + if (priv->status & STATUS_ASSOCIATED) { + unsigned long flags; + + spin_lock_irqsave(&priv->ieee->lock, flags); + mode = priv->assoc_network->mode; + spin_unlock_irqrestore(&priv->ieee->lock, flags); + } else { + mode = priv->ieee->mode; + } + IPW_DEBUG_QOS("QoS network/card mode %d \n", mode); + return mode; +} + +/* +* Handle management frame beacon and probe response +*/ +static int ipw_qos_handle_probe_response(struct ipw_priv *priv, + int active_network, + struct libipw_network *network) +{ + u32 size = sizeof(struct libipw_qos_parameters); + + if (network->capability & WLAN_CAPABILITY_IBSS) + network->qos_data.active = network->qos_data.supported; + + if (network->flags & NETWORK_HAS_QOS_MASK) { + if (active_network && + (network->flags & NETWORK_HAS_QOS_PARAMETERS)) + network->qos_data.active = network->qos_data.supported; + + if ((network->qos_data.active == 1) && (active_network == 1) && + (network->flags & NETWORK_HAS_QOS_PARAMETERS) && + (network->qos_data.old_param_count != + network->qos_data.param_count)) { + network->qos_data.old_param_count = + network->qos_data.param_count; + schedule_work(&priv->qos_activate); + IPW_DEBUG_QOS("QoS parameters change call " + "qos_activate\n"); + } + } else { + if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B)) + memcpy(&network->qos_data.parameters, + &def_parameters_CCK, size); + else + memcpy(&network->qos_data.parameters, + &def_parameters_OFDM, size); + + if ((network->qos_data.active == 1) && (active_network == 1)) { + IPW_DEBUG_QOS("QoS was disabled call qos_activate \n"); + schedule_work(&priv->qos_activate); + } + + network->qos_data.active = 0; + network->qos_data.supported = 0; + } + if ((priv->status & STATUS_ASSOCIATED) && + (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) { + if (memcmp(network->bssid, priv->bssid, ETH_ALEN)) + if (network->capability & WLAN_CAPABILITY_IBSS) + if ((network->ssid_len == + priv->assoc_network->ssid_len) && + !memcmp(network->ssid, + priv->assoc_network->ssid, + network->ssid_len)) { + queue_work(priv->workqueue, + &priv->merge_networks); + } + } + + return 0; +} + +/* +* This function set up the firmware to support QoS. It sends +* IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO +*/ +static int ipw_qos_activate(struct ipw_priv *priv, + struct libipw_qos_data *qos_network_data) +{ + int err; + struct libipw_qos_parameters qos_parameters[QOS_QOS_SETS]; + struct libipw_qos_parameters *active_one = NULL; + u32 size = sizeof(struct libipw_qos_parameters); + u32 burst_duration; + int i; + u8 type; + + type = ipw_qos_current_mode(priv); + + active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]); + memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size); + active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]); + memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size); + + if (qos_network_data == NULL) { + if (type == IEEE_B) { + IPW_DEBUG_QOS("QoS activate network mode %d\n", type); + active_one = &def_parameters_CCK; + } else + active_one = &def_parameters_OFDM; + + memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size); + burst_duration = ipw_qos_get_burst_duration(priv); + for (i = 0; i < QOS_QUEUE_NUM; i++) + qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] = + cpu_to_le16(burst_duration); + } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) { + if (type == IEEE_B) { + IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n", + type); + if (priv->qos_data.qos_enable == 0) + active_one = &def_parameters_CCK; + else + active_one = priv->qos_data.def_qos_parm_CCK; + } else { + if (priv->qos_data.qos_enable == 0) + active_one = &def_parameters_OFDM; + else + active_one = priv->qos_data.def_qos_parm_OFDM; + } + memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size); + } else { + unsigned long flags; + int active; + + spin_lock_irqsave(&priv->ieee->lock, flags); + active_one = &(qos_network_data->parameters); + qos_network_data->old_param_count = + qos_network_data->param_count; + memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size); + active = qos_network_data->supported; + spin_unlock_irqrestore(&priv->ieee->lock, flags); + + if (active == 0) { + burst_duration = ipw_qos_get_burst_duration(priv); + for (i = 0; i < QOS_QUEUE_NUM; i++) + qos_parameters[QOS_PARAM_SET_ACTIVE]. + tx_op_limit[i] = cpu_to_le16(burst_duration); + } + } + + IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n"); + err = ipw_send_qos_params_command(priv, + (struct libipw_qos_parameters *) + &(qos_parameters[0])); + if (err) + IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n"); + + return err; +} + +/* +* send IPW_CMD_WME_INFO to the firmware +*/ +static int ipw_qos_set_info_element(struct ipw_priv *priv) +{ + int ret = 0; + struct libipw_qos_information_element qos_info; + + if (priv == NULL) + return -1; + + qos_info.elementID = QOS_ELEMENT_ID; + qos_info.length = sizeof(struct libipw_qos_information_element) - 2; + + qos_info.version = QOS_VERSION_1; + qos_info.ac_info = 0; + + memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN); + qos_info.qui_type = QOS_OUI_TYPE; + qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE; + + ret = ipw_send_qos_info_command(priv, &qos_info); + if (ret != 0) { + IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n"); + } + return ret; +} + +/* +* Set the QoS parameter with the association request structure +*/ +static int ipw_qos_association(struct ipw_priv *priv, + struct libipw_network *network) +{ + int err = 0; + struct libipw_qos_data *qos_data = NULL; + struct libipw_qos_data ibss_data = { + .supported = 1, + .active = 1, + }; + + switch (priv->ieee->iw_mode) { + case IW_MODE_ADHOC: + BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS)); + + qos_data = &ibss_data; + break; + + case IW_MODE_INFRA: + qos_data = &network->qos_data; + break; + + default: + BUG(); + break; + } + + err = ipw_qos_activate(priv, qos_data); + if (err) { + priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC; + return err; + } + + if (priv->qos_data.qos_enable && qos_data->supported) { + IPW_DEBUG_QOS("QoS will be enabled for this association\n"); + priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC; + return ipw_qos_set_info_element(priv); + } + + return 0; +} + +/* +* handling the beaconing responses. if we get different QoS setting +* off the network from the associated setting, adjust the QoS +* setting +*/ +static int ipw_qos_association_resp(struct ipw_priv *priv, + struct libipw_network *network) +{ + int ret = 0; + unsigned long flags; + u32 size = sizeof(struct libipw_qos_parameters); + int set_qos_param = 0; + + if ((priv == NULL) || (network == NULL) || + (priv->assoc_network == NULL)) + return ret; + + if (!(priv->status & STATUS_ASSOCIATED)) + return ret; + + if ((priv->ieee->iw_mode != IW_MODE_INFRA)) + return ret; + + spin_lock_irqsave(&priv->ieee->lock, flags); + if (network->flags & NETWORK_HAS_QOS_PARAMETERS) { + memcpy(&priv->assoc_network->qos_data, &network->qos_data, + sizeof(struct libipw_qos_data)); + priv->assoc_network->qos_data.active = 1; + if ((network->qos_data.old_param_count != + network->qos_data.param_count)) { + set_qos_param = 1; + network->qos_data.old_param_count = + network->qos_data.param_count; + } + + } else { + if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B)) + memcpy(&priv->assoc_network->qos_data.parameters, + &def_parameters_CCK, size); + else + memcpy(&priv->assoc_network->qos_data.parameters, + &def_parameters_OFDM, size); + priv->assoc_network->qos_data.active = 0; + priv->assoc_network->qos_data.supported = 0; + set_qos_param = 1; + } + + spin_unlock_irqrestore(&priv->ieee->lock, flags); + + if (set_qos_param == 1) + schedule_work(&priv->qos_activate); + + return ret; +} + +static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv) +{ + u32 ret = 0; + + if ((priv == NULL)) + return 0; + + if (!(priv->ieee->modulation & LIBIPW_OFDM_MODULATION)) + ret = priv->qos_data.burst_duration_CCK; + else + ret = priv->qos_data.burst_duration_OFDM; + + return ret; +} + +/* +* Initialize the setting of QoS global +*/ +static void ipw_qos_init(struct ipw_priv *priv, int enable, + int burst_enable, u32 burst_duration_CCK, + u32 burst_duration_OFDM) +{ + priv->qos_data.qos_enable = enable; + + if (priv->qos_data.qos_enable) { + priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK; + priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM; + IPW_DEBUG_QOS("QoS is enabled\n"); + } else { + priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK; + priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM; + IPW_DEBUG_QOS("QoS is not enabled\n"); + } + + priv->qos_data.burst_enable = burst_enable; + + if (burst_enable) { + priv->qos_data.burst_duration_CCK = burst_duration_CCK; + priv->qos_data.burst_duration_OFDM = burst_duration_OFDM; + } else { + priv->qos_data.burst_duration_CCK = 0; + priv->qos_data.burst_duration_OFDM = 0; + } +} + +/* +* map the packet priority to the right TX Queue +*/ +static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority) +{ + if (priority > 7 || !priv->qos_data.qos_enable) + priority = 0; + + return from_priority_to_tx_queue[priority] - 1; +} + +static int ipw_is_qos_active(struct net_device *dev, + struct sk_buff *skb) +{ + struct ipw_priv *priv = libipw_priv(dev); + struct libipw_qos_data *qos_data = NULL; + int active, supported; + u8 *daddr = skb->data + ETH_ALEN; + int unicast = !is_multicast_ether_addr(daddr); + + if (!(priv->status & STATUS_ASSOCIATED)) + return 0; + + qos_data = &priv->assoc_network->qos_data; + + if (priv->ieee->iw_mode == IW_MODE_ADHOC) { + if (unicast == 0) + qos_data->active = 0; + else + qos_data->active = qos_data->supported; + } + active = qos_data->active; + supported = qos_data->supported; + IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d " + "unicast %d\n", + priv->qos_data.qos_enable, active, supported, unicast); + if (active && priv->qos_data.qos_enable) + return 1; + + return 0; + +} +/* +* add QoS parameter to the TX command +*/ +static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv, + u16 priority, + struct tfd_data *tfd) +{ + int tx_queue_id = 0; + + + tx_queue_id = from_priority_to_tx_queue[priority] - 1; + tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED; + + if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) { + tfd->tx_flags &= ~DCT_FLAG_ACK_REQD; + tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK); + } + return 0; +} + +/* +* background support to run QoS activate functionality +*/ +static void ipw_bg_qos_activate(struct work_struct *work) +{ + struct ipw_priv *priv = + container_of(work, struct ipw_priv, qos_activate); + + mutex_lock(&priv->mutex); + + if (priv->status & STATUS_ASSOCIATED) + ipw_qos_activate(priv, &(priv->assoc_network->qos_data)); + + mutex_unlock(&priv->mutex); +} + +static int ipw_handle_probe_response(struct net_device *dev, + struct libipw_probe_response *resp, + struct libipw_network *network) +{ + struct ipw_priv *priv = libipw_priv(dev); + int active_network = ((priv->status & STATUS_ASSOCIATED) && + (network == priv->assoc_network)); + + ipw_qos_handle_probe_response(priv, active_network, network); + + return 0; +} + +static int ipw_handle_beacon(struct net_device *dev, + struct libipw_beacon *resp, + struct libipw_network *network) +{ + struct ipw_priv *priv = libipw_priv(dev); + int active_network = ((priv->status & STATUS_ASSOCIATED) && + (network == priv->assoc_network)); + + ipw_qos_handle_probe_response(priv, active_network, network); + + return 0; +} + +static int ipw_handle_assoc_response(struct net_device *dev, + struct libipw_assoc_response *resp, + struct libipw_network *network) +{ + struct ipw_priv *priv = libipw_priv(dev); + ipw_qos_association_resp(priv, network); + return 0; +} + +static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters + *qos_param) +{ + return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS, + sizeof(*qos_param) * 3, qos_param); +} + +static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element + *qos_param) +{ + return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param), + qos_param); +} + +#endif /* CONFIG_IPW2200_QOS */ + +static int ipw_associate_network(struct ipw_priv *priv, + struct libipw_network *network, + struct ipw_supported_rates *rates, int roaming) +{ + int err; + DECLARE_SSID_BUF(ssid); + + if (priv->config & CFG_FIXED_RATE) + ipw_set_fixed_rate(priv, network->mode); + + if (!(priv->config & CFG_STATIC_ESSID)) { + priv->essid_len = min(network->ssid_len, + (u8) IW_ESSID_MAX_SIZE); + memcpy(priv->essid, network->ssid, priv->essid_len); + } + + network->last_associate = jiffies; + + memset(&priv->assoc_request, 0, sizeof(priv->assoc_request)); + priv->assoc_request.channel = network->channel; + priv->assoc_request.auth_key = 0; + + if ((priv->capability & CAP_PRIVACY_ON) && + (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) { + priv->assoc_request.auth_type = AUTH_SHARED_KEY; + priv->assoc_request.auth_key = priv->ieee->sec.active_key; + + if (priv->ieee->sec.level == SEC_LEVEL_1) + ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP); + + } else if ((priv->capability & CAP_PRIVACY_ON) && + (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP)) + priv->assoc_request.auth_type = AUTH_LEAP; + else + priv->assoc_request.auth_type = AUTH_OPEN; + + if (priv->ieee->wpa_ie_len) { + priv->assoc_request.policy_support = cpu_to_le16(0x02); /* RSN active */ + ipw_set_rsn_capa(priv, priv->ieee->wpa_ie, + priv->ieee->wpa_ie_len); + } + + /* + * It is valid for our ieee device to support multiple modes, but + * when it comes to associating to a given network we have to choose + * just one mode. + */ + if (network->mode & priv->ieee->mode & IEEE_A) + priv->assoc_request.ieee_mode = IPW_A_MODE; + else if (network->mode & priv->ieee->mode & IEEE_G) + priv->assoc_request.ieee_mode = IPW_G_MODE; + else if (network->mode & priv->ieee->mode & IEEE_B) + priv->assoc_request.ieee_mode = IPW_B_MODE; + + priv->assoc_request.capability = cpu_to_le16(network->capability); + if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) + && !(priv->config & CFG_PREAMBLE_LONG)) { + priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE; + } else { + priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE; + + /* Clear the short preamble if we won't be supporting it */ + priv->assoc_request.capability &= + ~cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE); + } + + /* Clear capability bits that aren't used in Ad Hoc */ + if (priv->ieee->iw_mode == IW_MODE_ADHOC) + priv->assoc_request.capability &= + ~cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME); + + IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, " + "802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n", + roaming ? "Rea" : "A", + print_ssid(ssid, priv->essid, priv->essid_len), + network->channel, + ipw_modes[priv->assoc_request.ieee_mode], + rates->num_rates, + (priv->assoc_request.preamble_length == + DCT_FLAG_LONG_PREAMBLE) ? "long" : "short", + network->capability & + WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long", + priv->capability & CAP_PRIVACY_ON ? "on " : "off", + priv->capability & CAP_PRIVACY_ON ? + (priv->capability & CAP_SHARED_KEY ? "(shared)" : + "(open)") : "", + priv->capability & CAP_PRIVACY_ON ? " key=" : "", + priv->capability & CAP_PRIVACY_ON ? + '1' + priv->ieee->sec.active_key : '.', + priv->capability & CAP_PRIVACY_ON ? '.' : ' '); + + priv->assoc_request.beacon_interval = cpu_to_le16(network->beacon_interval); + if ((priv->ieee->iw_mode == IW_MODE_ADHOC) && + (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) { + priv->assoc_request.assoc_type = HC_IBSS_START; + priv->assoc_request.assoc_tsf_msw = 0; + priv->assoc_request.assoc_tsf_lsw = 0; + } else { + if (unlikely(roaming)) + priv->assoc_request.assoc_type = HC_REASSOCIATE; + else + priv->assoc_request.assoc_type = HC_ASSOCIATE; + priv->assoc_request.assoc_tsf_msw = cpu_to_le32(network->time_stamp[1]); + priv->assoc_request.assoc_tsf_lsw = cpu_to_le32(network->time_stamp[0]); + } + + memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN); + + if (priv->ieee->iw_mode == IW_MODE_ADHOC) { + memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN); + priv->assoc_request.atim_window = cpu_to_le16(network->atim_window); + } else { + memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN); + priv->assoc_request.atim_window = 0; + } + + priv->assoc_request.listen_interval = cpu_to_le16(network->listen_interval); + + err = ipw_send_ssid(priv, priv->essid, priv->essid_len); + if (err) { + IPW_DEBUG_HC("Attempt to send SSID command failed.\n"); + return err; + } + + rates->ieee_mode = priv->assoc_request.ieee_mode; + rates->purpose = IPW_RATE_CONNECT; + ipw_send_supported_rates(priv, rates); + + if (priv->assoc_request.ieee_mode == IPW_G_MODE) + priv->sys_config.dot11g_auto_detection = 1; + else + priv->sys_config.dot11g_auto_detection = 0; + + if (priv->ieee->iw_mode == IW_MODE_ADHOC) + priv->sys_config.answer_broadcast_ssid_probe = 1; + else + priv->sys_config.answer_broadcast_ssid_probe = 0; + + err = ipw_send_system_config(priv); + if (err) { + IPW_DEBUG_HC("Attempt to send sys config command failed.\n"); + return err; + } + + IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi); + err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM); + if (err) { + IPW_DEBUG_HC("Attempt to send associate command failed.\n"); + return err; + } + + /* + * If preemption is enabled, it is possible for the association + * to complete before we return from ipw_send_associate. Therefore + * we have to be sure and update our priviate data first. + */ + priv->channel = network->channel; + memcpy(priv->bssid, network->bssid, ETH_ALEN); + priv->status |= STATUS_ASSOCIATING; + priv->status &= ~STATUS_SECURITY_UPDATED; + + priv->assoc_network = network; + +#ifdef CONFIG_IPW2200_QOS + ipw_qos_association(priv, network); +#endif + + err = ipw_send_associate(priv, &priv->assoc_request); + if (err) { + IPW_DEBUG_HC("Attempt to send associate command failed.\n"); + return err; + } + + IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %pM \n", + print_ssid(ssid, priv->essid, priv->essid_len), + priv->bssid); + + return 0; +} + +static void ipw_roam(void *data) +{ + struct ipw_priv *priv = data; + struct libipw_network *network = NULL; + struct ipw_network_match match = { + .network = priv->assoc_network + }; + + /* The roaming process is as follows: + * + * 1. Missed beacon threshold triggers the roaming process by + * setting the status ROAM bit and requesting a scan. + * 2. When the scan completes, it schedules the ROAM work + * 3. The ROAM work looks at all of the known networks for one that + * is a better network than the currently associated. If none + * found, the ROAM process is over (ROAM bit cleared) + * 4. If a better network is found, a disassociation request is + * sent. + * 5. When the disassociation completes, the roam work is again + * scheduled. The second time through, the driver is no longer + * associated, and the newly selected network is sent an + * association request. + * 6. At this point ,the roaming process is complete and the ROAM + * status bit is cleared. + */ + + /* If we are no longer associated, and the roaming bit is no longer + * set, then we are not actively roaming, so just return */ + if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING))) + return; + + if (priv->status & STATUS_ASSOCIATED) { + /* First pass through ROAM process -- look for a better + * network */ + unsigned long flags; + u8 rssi = priv->assoc_network->stats.rssi; + priv->assoc_network->stats.rssi = -128; + spin_lock_irqsave(&priv->ieee->lock, flags); + list_for_each_entry(network, &priv->ieee->network_list, list) { + if (network != priv->assoc_network) + ipw_best_network(priv, &match, network, 1); + } + spin_unlock_irqrestore(&priv->ieee->lock, flags); + priv->assoc_network->stats.rssi = rssi; + + if (match.network == priv->assoc_network) { + IPW_DEBUG_ASSOC("No better APs in this network to " + "roam to.\n"); + priv->status &= ~STATUS_ROAMING; + ipw_debug_config(priv); + return; + } + + ipw_send_disassociate(priv, 1); + priv->assoc_network = match.network; + + return; + } + + /* Second pass through ROAM process -- request association */ + ipw_compatible_rates(priv, priv->assoc_network, &match.rates); + ipw_associate_network(priv, priv->assoc_network, &match.rates, 1); + priv->status &= ~STATUS_ROAMING; +} + +static void ipw_bg_roam(struct work_struct *work) +{ + struct ipw_priv *priv = + container_of(work, struct ipw_priv, roam); + mutex_lock(&priv->mutex); + ipw_roam(priv); + mutex_unlock(&priv->mutex); +} + +static int ipw_associate(void *data) +{ + struct ipw_priv *priv = data; + + struct libipw_network *network = NULL; + struct ipw_network_match match = { + .network = NULL + }; + struct ipw_supported_rates *rates; + struct list_head *element; + unsigned long flags; + DECLARE_SSID_BUF(ssid); + + if (priv->ieee->iw_mode == IW_MODE_MONITOR) { + IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n"); + return 0; + } + + if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) { + IPW_DEBUG_ASSOC("Not attempting association (already in " + "progress)\n"); + return 0; + } + + if (priv->status & STATUS_DISASSOCIATING) { + IPW_DEBUG_ASSOC("Not attempting association (in " + "disassociating)\n "); + queue_work(priv->workqueue, &priv->associate); + return 0; + } + + if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) { + IPW_DEBUG_ASSOC("Not attempting association (scanning or not " + "initialized)\n"); + return 0; + } + + if (!(priv->config & CFG_ASSOCIATE) && + !(priv->config & (CFG_STATIC_ESSID | CFG_STATIC_BSSID))) { + IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n"); + return 0; + } + + /* Protect our use of the network_list */ + spin_lock_irqsave(&priv->ieee->lock, flags); + list_for_each_entry(network, &priv->ieee->network_list, list) + ipw_best_network(priv, &match, network, 0); + + network = match.network; + rates = &match.rates; + + if (network == NULL && + priv->ieee->iw_mode == IW_MODE_ADHOC && + priv->config & CFG_ADHOC_CREATE && + priv->config & CFG_STATIC_ESSID && + priv->config & CFG_STATIC_CHANNEL) { + /* Use oldest network if the free list is empty */ + if (list_empty(&priv->ieee->network_free_list)) { + struct libipw_network *oldest = NULL; + struct libipw_network *target; + + list_for_each_entry(target, &priv->ieee->network_list, list) { + if ((oldest == NULL) || + (target->last_scanned < oldest->last_scanned)) + oldest = target; + } + + /* If there are no more slots, expire the oldest */ + list_del(&oldest->list); + target = oldest; + IPW_DEBUG_ASSOC("Expired '%s' (%pM) from " + "network list.\n", + print_ssid(ssid, target->ssid, + target->ssid_len), + target->bssid); + list_add_tail(&target->list, + &priv->ieee->network_free_list); + } + + element = priv->ieee->network_free_list.next; + network = list_entry(element, struct libipw_network, list); + ipw_adhoc_create(priv, network); + rates = &priv->rates; + list_del(element); + list_add_tail(&network->list, &priv->ieee->network_list); + } + spin_unlock_irqrestore(&priv->ieee->lock, flags); + + /* If we reached the end of the list, then we don't have any valid + * matching APs */ + if (!network) { + ipw_debug_config(priv); + + if (!(priv->status & STATUS_SCANNING)) { + if (!(priv->config & CFG_SPEED_SCAN)) + queue_delayed_work(priv->workqueue, + &priv->request_scan, + SCAN_INTERVAL); + else + queue_delayed_work(priv->workqueue, + &priv->request_scan, 0); + } + + return 0; + } + + ipw_associate_network(priv, network, rates, 0); + + return 1; +} + +static void ipw_bg_associate(struct work_struct *work) +{ + struct ipw_priv *priv = + container_of(work, struct ipw_priv, associate); + mutex_lock(&priv->mutex); + ipw_associate(priv); + mutex_unlock(&priv->mutex); +} + +static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv, + struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr; + u16 fc; + + hdr = (struct ieee80211_hdr *)skb->data; + fc = le16_to_cpu(hdr->frame_control); + if (!(fc & IEEE80211_FCTL_PROTECTED)) + return; + + fc &= ~IEEE80211_FCTL_PROTECTED; + hdr->frame_control = cpu_to_le16(fc); + switch (priv->ieee->sec.level) { + case SEC_LEVEL_3: + /* Remove CCMP HDR */ + memmove(skb->data + LIBIPW_3ADDR_LEN, + skb->data + LIBIPW_3ADDR_LEN + 8, + skb->len - LIBIPW_3ADDR_LEN - 8); + skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */ + break; + case SEC_LEVEL_2: + break; + case SEC_LEVEL_1: + /* Remove IV */ + memmove(skb->data + LIBIPW_3ADDR_LEN, + skb->data + LIBIPW_3ADDR_LEN + 4, + skb->len - LIBIPW_3ADDR_LEN - 4); + skb_trim(skb, skb->len - 8); /* IV + ICV */ + break; + case SEC_LEVEL_0: + break; + default: + printk(KERN_ERR "Unknown security level %d\n", + priv->ieee->sec.level); + break; + } +} + +static void ipw_handle_data_packet(struct ipw_priv *priv, + struct ipw_rx_mem_buffer *rxb, + struct libipw_rx_stats *stats) +{ + struct net_device *dev = priv->net_dev; + struct libipw_hdr_4addr *hdr; + struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data; + + /* We received data from the HW, so stop the watchdog */ + dev->trans_start = jiffies; + + /* We only process data packets if the + * interface is open */ + if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) > + skb_tailroom(rxb->skb))) { + dev->stats.rx_errors++; + priv->wstats.discard.misc++; + IPW_DEBUG_DROP("Corruption detected! Oh no!\n"); + return; + } else if (unlikely(!netif_running(priv->net_dev))) { + dev->stats.rx_dropped++; + priv->wstats.discard.misc++; + IPW_DEBUG_DROP("Dropping packet while interface is not up.\n"); + return; + } + + /* Advance skb->data to the start of the actual payload */ + skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data)); + + /* Set the size of the skb to the size of the frame */ + skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length)); + + IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len); + + /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */ + hdr = (struct libipw_hdr_4addr *)rxb->skb->data; + if (priv->ieee->iw_mode != IW_MODE_MONITOR && + (is_multicast_ether_addr(hdr->addr1) ? + !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt)) + ipw_rebuild_decrypted_skb(priv, rxb->skb); + + if (!libipw_rx(priv->ieee, rxb->skb, stats)) + dev->stats.rx_errors++; + else { /* libipw_rx succeeded, so it now owns the SKB */ + rxb->skb = NULL; + __ipw_led_activity_on(priv); + } +} + +#ifdef CONFIG_IPW2200_RADIOTAP +static void ipw_handle_data_packet_monitor(struct ipw_priv *priv, + struct ipw_rx_mem_buffer *rxb, + struct libipw_rx_stats *stats) +{ + struct net_device *dev = priv->net_dev; + struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data; + struct ipw_rx_frame *frame = &pkt->u.frame; + + /* initial pull of some data */ + u16 received_channel = frame->received_channel; + u8 antennaAndPhy = frame->antennaAndPhy; + s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */ + u16 pktrate = frame->rate; + + /* Magic struct that slots into the radiotap header -- no reason + * to build this manually element by element, we can write it much + * more efficiently than we can parse it. ORDER MATTERS HERE */ + struct ipw_rt_hdr *ipw_rt; + + short len = le16_to_cpu(pkt->u.frame.length); + + /* We received data from the HW, so stop the watchdog */ + dev->trans_start = jiffies; + + /* We only process data packets if the + * interface is open */ + if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) > + skb_tailroom(rxb->skb))) { + dev->stats.rx_errors++; + priv->wstats.discard.misc++; + IPW_DEBUG_DROP("Corruption detected! Oh no!\n"); + return; + } else if (unlikely(!netif_running(priv->net_dev))) { + dev->stats.rx_dropped++; + priv->wstats.discard.misc++; + IPW_DEBUG_DROP("Dropping packet while interface is not up.\n"); + return; + } + + /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use + * that now */ + if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) { + /* FIXME: Should alloc bigger skb instead */ + dev->stats.rx_dropped++; + priv->wstats.discard.misc++; + IPW_DEBUG_DROP("Dropping too large packet in monitor\n"); + return; + } + + /* copy the frame itself */ + memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr), + rxb->skb->data + IPW_RX_FRAME_SIZE, len); + + ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data; + + ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION; + ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */ + ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct ipw_rt_hdr)); /* total header+data */ + + /* Big bitfield of all the fields we provide in radiotap */ + ipw_rt->rt_hdr.it_present = cpu_to_le32( + (1 << IEEE80211_RADIOTAP_TSFT) | + (1 << IEEE80211_RADIOTAP_FLAGS) | + (1 << IEEE80211_RADIOTAP_RATE) | + (1 << IEEE80211_RADIOTAP_CHANNEL) | + (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | + (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) | + (1 << IEEE80211_RADIOTAP_ANTENNA)); + + /* Zero the flags, we'll add to them as we go */ + ipw_rt->rt_flags = 0; + ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 | + frame->parent_tsf[2] << 16 | + frame->parent_tsf[1] << 8 | + frame->parent_tsf[0]); + + /* Convert signal to DBM */ + ipw_rt->rt_dbmsignal = antsignal; + ipw_rt->rt_dbmnoise = (s8) le16_to_cpu(frame->noise); + + /* Convert the channel data and set the flags */ + ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel)); + if (received_channel > 14) { /* 802.11a */ + ipw_rt->rt_chbitmask = + cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ)); + } else if (antennaAndPhy & 32) { /* 802.11b */ + ipw_rt->rt_chbitmask = + cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ)); + } else { /* 802.11g */ + ipw_rt->rt_chbitmask = + cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ); + } + + /* set the rate in multiples of 500k/s */ + switch (pktrate) { + case IPW_TX_RATE_1MB: + ipw_rt->rt_rate = 2; + break; + case IPW_TX_RATE_2MB: + ipw_rt->rt_rate = 4; + break; + case IPW_TX_RATE_5MB: + ipw_rt->rt_rate = 10; + break; + case IPW_TX_RATE_6MB: + ipw_rt->rt_rate = 12; + break; + case IPW_TX_RATE_9MB: + ipw_rt->rt_rate = 18; + break; + case IPW_TX_RATE_11MB: + ipw_rt->rt_rate = 22; + break; + case IPW_TX_RATE_12MB: + ipw_rt->rt_rate = 24; + break; + case IPW_TX_RATE_18MB: + ipw_rt->rt_rate = 36; + break; + case IPW_TX_RATE_24MB: + ipw_rt->rt_rate = 48; + break; + case IPW_TX_RATE_36MB: + ipw_rt->rt_rate = 72; + break; + case IPW_TX_RATE_48MB: + ipw_rt->rt_rate = 96; + break; + case IPW_TX_RATE_54MB: + ipw_rt->rt_rate = 108; + break; + default: + ipw_rt->rt_rate = 0; + break; + } + + /* antenna number */ + ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */ + + /* set the preamble flag if we have it */ + if ((antennaAndPhy & 64)) + ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; + + /* Set the size of the skb to the size of the frame */ + skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr)); + + IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len); + + if (!libipw_rx(priv->ieee, rxb->skb, stats)) + dev->stats.rx_errors++; + else { /* libipw_rx succeeded, so it now owns the SKB */ + rxb->skb = NULL; + /* no LED during capture */ + } +} +#endif + +#ifdef CONFIG_IPW2200_PROMISCUOUS +#define libipw_is_probe_response(fc) \ + ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \ + (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP ) + +#define libipw_is_management(fc) \ + ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) + +#define libipw_is_control(fc) \ + ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) + +#define libipw_is_data(fc) \ + ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) + +#define libipw_is_assoc_request(fc) \ + ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ) + +#define libipw_is_reassoc_request(fc) \ + ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ) + +static void ipw_handle_promiscuous_rx(struct ipw_priv *priv, + struct ipw_rx_mem_buffer *rxb, + struct libipw_rx_stats *stats) +{ + struct net_device *dev = priv->prom_net_dev; + struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data; + struct ipw_rx_frame *frame = &pkt->u.frame; + struct ipw_rt_hdr *ipw_rt; + + /* First cache any information we need before we overwrite + * the information provided in the skb from the hardware */ + struct ieee80211_hdr *hdr; + u16 channel = frame->received_channel; + u8 phy_flags = frame->antennaAndPhy; + s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM; + s8 noise = (s8) le16_to_cpu(frame->noise); + u8 rate = frame->rate; + short len = le16_to_cpu(pkt->u.frame.length); + struct sk_buff *skb; + int hdr_only = 0; + u16 filter = priv->prom_priv->filter; + + /* If the filter is set to not include Rx frames then return */ + if (filter & IPW_PROM_NO_RX) + return; + + /* We received data from the HW, so stop the watchdog */ + dev->trans_start = jiffies; + + if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) { + dev->stats.rx_errors++; + IPW_DEBUG_DROP("Corruption detected! Oh no!\n"); + return; + } + + /* We only process data packets if the interface is open */ + if (unlikely(!netif_running(dev))) { + dev->stats.rx_dropped++; + IPW_DEBUG_DROP("Dropping packet while interface is not up.\n"); + return; + } + + /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use + * that now */ + if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) { + /* FIXME: Should alloc bigger skb instead */ + dev->stats.rx_dropped++; + IPW_DEBUG_DROP("Dropping too large packet in monitor\n"); + return; + } + + hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE; + if (libipw_is_management(le16_to_cpu(hdr->frame_control))) { + if (filter & IPW_PROM_NO_MGMT) + return; + if (filter & IPW_PROM_MGMT_HEADER_ONLY) + hdr_only = 1; + } else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) { + if (filter & IPW_PROM_NO_CTL) + return; + if (filter & IPW_PROM_CTL_HEADER_ONLY) + hdr_only = 1; + } else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) { + if (filter & IPW_PROM_NO_DATA) + return; + if (filter & IPW_PROM_DATA_HEADER_ONLY) + hdr_only = 1; + } + + /* Copy the SKB since this is for the promiscuous side */ + skb = skb_copy(rxb->skb, GFP_ATOMIC); + if (skb == NULL) { + IPW_ERROR("skb_clone failed for promiscuous copy.\n"); + return; + } + + /* copy the frame data to write after where the radiotap header goes */ + ipw_rt = (void *)skb->data; + + if (hdr_only) + len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control)); + + memcpy(ipw_rt->payload, hdr, len); + + ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION; + ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */ + ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*ipw_rt)); /* total header+data */ + + /* Set the size of the skb to the size of the frame */ + skb_put(skb, sizeof(*ipw_rt) + len); + + /* Big bitfield of all the fields we provide in radiotap */ + ipw_rt->rt_hdr.it_present = cpu_to_le32( + (1 << IEEE80211_RADIOTAP_TSFT) | + (1 << IEEE80211_RADIOTAP_FLAGS) | + (1 << IEEE80211_RADIOTAP_RATE) | + (1 << IEEE80211_RADIOTAP_CHANNEL) | + (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | + (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) | + (1 << IEEE80211_RADIOTAP_ANTENNA)); + + /* Zero the flags, we'll add to them as we go */ + ipw_rt->rt_flags = 0; + ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 | + frame->parent_tsf[2] << 16 | + frame->parent_tsf[1] << 8 | + frame->parent_tsf[0]); + + /* Convert to DBM */ + ipw_rt->rt_dbmsignal = signal; + ipw_rt->rt_dbmnoise = noise; + + /* Convert the channel data and set the flags */ + ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel)); + if (channel > 14) { /* 802.11a */ + ipw_rt->rt_chbitmask = + cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ)); + } else if (phy_flags & (1 << 5)) { /* 802.11b */ + ipw_rt->rt_chbitmask = + cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ)); + } else { /* 802.11g */ + ipw_rt->rt_chbitmask = + cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ); + } + + /* set the rate in multiples of 500k/s */ + switch (rate) { + case IPW_TX_RATE_1MB: + ipw_rt->rt_rate = 2; + break; + case IPW_TX_RATE_2MB: + ipw_rt->rt_rate = 4; + break; + case IPW_TX_RATE_5MB: + ipw_rt->rt_rate = 10; + break; + case IPW_TX_RATE_6MB: + ipw_rt->rt_rate = 12; + break; + case IPW_TX_RATE_9MB: + ipw_rt->rt_rate = 18; + break; + case IPW_TX_RATE_11MB: + ipw_rt->rt_rate = 22; + break; + case IPW_TX_RATE_12MB: + ipw_rt->rt_rate = 24; + break; + case IPW_TX_RATE_18MB: + ipw_rt->rt_rate = 36; + break; + case IPW_TX_RATE_24MB: + ipw_rt->rt_rate = 48; + break; + case IPW_TX_RATE_36MB: + ipw_rt->rt_rate = 72; + break; + case IPW_TX_RATE_48MB: + ipw_rt->rt_rate = 96; + break; + case IPW_TX_RATE_54MB: + ipw_rt->rt_rate = 108; + break; + default: + ipw_rt->rt_rate = 0; + break; + } + + /* antenna number */ + ipw_rt->rt_antenna = (phy_flags & 3); + + /* set the preamble flag if we have it */ + if (phy_flags & (1 << 6)) + ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; + + IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len); + + if (!libipw_rx(priv->prom_priv->ieee, skb, stats)) { + dev->stats.rx_errors++; + dev_kfree_skb_any(skb); + } +} +#endif + +static int is_network_packet(struct ipw_priv *priv, + struct libipw_hdr_4addr *header) +{ + /* Filter incoming packets to determine if they are targetted toward + * this network, discarding packets coming from ourselves */ + switch (priv->ieee->iw_mode) { + case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */ + /* packets from our adapter are dropped (echo) */ + if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN)) + return 0; + + /* {broad,multi}cast packets to our BSSID go through */ + if (is_multicast_ether_addr(header->addr1)) + return !memcmp(header->addr3, priv->bssid, ETH_ALEN); + + /* packets to our adapter go through */ + return !memcmp(header->addr1, priv->net_dev->dev_addr, + ETH_ALEN); + + case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */ + /* packets from our adapter are dropped (echo) */ + if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN)) + return 0; + + /* {broad,multi}cast packets to our BSS go through */ + if (is_multicast_ether_addr(header->addr1)) + return !memcmp(header->addr2, priv->bssid, ETH_ALEN); + + /* packets to our adapter go through */ + return !memcmp(header->addr1, priv->net_dev->dev_addr, + ETH_ALEN); + } + + return 1; +} + +#define IPW_PACKET_RETRY_TIME HZ + +static int is_duplicate_packet(struct ipw_priv *priv, + struct libipw_hdr_4addr *header) +{ + u16 sc = le16_to_cpu(header->seq_ctl); + u16 seq = WLAN_GET_SEQ_SEQ(sc); + u16 frag = WLAN_GET_SEQ_FRAG(sc); + u16 *last_seq, *last_frag; + unsigned long *last_time; + + switch (priv->ieee->iw_mode) { + case IW_MODE_ADHOC: + { + struct list_head *p; + struct ipw_ibss_seq *entry = NULL; + u8 *mac = header->addr2; + int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE; + + __list_for_each(p, &priv->ibss_mac_hash[index]) { + entry = + list_entry(p, struct ipw_ibss_seq, list); + if (!memcmp(entry->mac, mac, ETH_ALEN)) + break; + } + if (p == &priv->ibss_mac_hash[index]) { + entry = kmalloc(sizeof(*entry), GFP_ATOMIC); + if (!entry) { + IPW_ERROR + ("Cannot malloc new mac entry\n"); + return 0; + } + memcpy(entry->mac, mac, ETH_ALEN); + entry->seq_num = seq; + entry->frag_num = frag; + entry->packet_time = jiffies; + list_add(&entry->list, + &priv->ibss_mac_hash[index]); + return 0; + } + last_seq = &entry->seq_num; + last_frag = &entry->frag_num; + last_time = &entry->packet_time; + break; + } + case IW_MODE_INFRA: + last_seq = &priv->last_seq_num; + last_frag = &priv->last_frag_num; + last_time = &priv->last_packet_time; + break; + default: + return 0; + } + if ((*last_seq == seq) && + time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) { + if (*last_frag == frag) + goto drop; + if (*last_frag + 1 != frag) + /* out-of-order fragment */ + goto drop; + } else + *last_seq = seq; + + *last_frag = frag; + *last_time = jiffies; + return 0; + + drop: + /* Comment this line now since we observed the card receives + * duplicate packets but the FCTL_RETRY bit is not set in the + * IBSS mode with fragmentation enabled. + BUG_ON(!(le16_to_cpu(header->frame_control) & IEEE80211_FCTL_RETRY)); */ + return 1; +} + +static void ipw_handle_mgmt_packet(struct ipw_priv *priv, + struct ipw_rx_mem_buffer *rxb, + struct libipw_rx_stats *stats) +{ + struct sk_buff *skb = rxb->skb; + struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data; + struct libipw_hdr_4addr *header = (struct libipw_hdr_4addr *) + (skb->data + IPW_RX_FRAME_SIZE); + + libipw_rx_mgt(priv->ieee, header, stats); + + if (priv->ieee->iw_mode == IW_MODE_ADHOC && + ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) == + IEEE80211_STYPE_PROBE_RESP) || + (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) == + IEEE80211_STYPE_BEACON))) { + if (!memcmp(header->addr3, priv->bssid, ETH_ALEN)) + ipw_add_station(priv, header->addr2); + } + + if (priv->config & CFG_NET_STATS) { + IPW_DEBUG_HC("sending stat packet\n"); + + /* Set the size of the skb to the size of the full + * ipw header and 802.11 frame */ + skb_put(skb, le16_to_cpu(pkt->u.frame.length) + + IPW_RX_FRAME_SIZE); + + /* Advance past the ipw packet header to the 802.11 frame */ + skb_pull(skb, IPW_RX_FRAME_SIZE); + + /* Push the libipw_rx_stats before the 802.11 frame */ + memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats)); + + skb->dev = priv->ieee->dev; + + /* Point raw at the libipw_stats */ + skb_reset_mac_header(skb); + + skb->pkt_type = PACKET_OTHERHOST; + skb->protocol = cpu_to_be16(ETH_P_80211_STATS); + memset(skb->cb, 0, sizeof(rxb->skb->cb)); + netif_rx(skb); + rxb->skb = NULL; + } +} + +/* + * Main entry function for recieving a packet with 80211 headers. This + * should be called when ever the FW has notified us that there is a new + * skb in the recieve queue. + */ +static void ipw_rx(struct ipw_priv *priv) +{ + struct ipw_rx_mem_buffer *rxb; + struct ipw_rx_packet *pkt; + struct libipw_hdr_4addr *header; + u32 r, w, i; + u8 network_packet; + u8 fill_rx = 0; + + r = ipw_read32(priv, IPW_RX_READ_INDEX); + w = ipw_read32(priv, IPW_RX_WRITE_INDEX); + i = priv->rxq->read; + + if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2)) + fill_rx = 1; + + while (i != r) { + rxb = priv->rxq->queue[i]; + if (unlikely(rxb == NULL)) { + printk(KERN_CRIT "Queue not allocated!\n"); + break; + } + priv->rxq->queue[i] = NULL; + + pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr, + IPW_RX_BUF_SIZE, + PCI_DMA_FROMDEVICE); + + pkt = (struct ipw_rx_packet *)rxb->skb->data; + IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n", + pkt->header.message_type, + pkt->header.rx_seq_num, pkt->header.control_bits); + + switch (pkt->header.message_type) { + case RX_FRAME_TYPE: /* 802.11 frame */ { + struct libipw_rx_stats stats = { + .rssi = pkt->u.frame.rssi_dbm - + IPW_RSSI_TO_DBM, + .signal = + pkt->u.frame.rssi_dbm - + IPW_RSSI_TO_DBM + 0x100, + .noise = + le16_to_cpu(pkt->u.frame.noise), + .rate = pkt->u.frame.rate, + .mac_time = jiffies, + .received_channel = + pkt->u.frame.received_channel, + .freq = + (pkt->u.frame. + control & (1 << 0)) ? + LIBIPW_24GHZ_BAND : + LIBIPW_52GHZ_BAND, + .len = le16_to_cpu(pkt->u.frame.length), + }; + + if (stats.rssi != 0) + stats.mask |= LIBIPW_STATMASK_RSSI; + if (stats.signal != 0) + stats.mask |= LIBIPW_STATMASK_SIGNAL; + if (stats.noise != 0) + stats.mask |= LIBIPW_STATMASK_NOISE; + if (stats.rate != 0) + stats.mask |= LIBIPW_STATMASK_RATE; + + priv->rx_packets++; + +#ifdef CONFIG_IPW2200_PROMISCUOUS + if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) + ipw_handle_promiscuous_rx(priv, rxb, &stats); +#endif + +#ifdef CONFIG_IPW2200_MONITOR + if (priv->ieee->iw_mode == IW_MODE_MONITOR) { +#ifdef CONFIG_IPW2200_RADIOTAP + + ipw_handle_data_packet_monitor(priv, + rxb, + &stats); +#else + ipw_handle_data_packet(priv, rxb, + &stats); +#endif + break; + } +#endif + + header = + (struct libipw_hdr_4addr *)(rxb->skb-> + data + + IPW_RX_FRAME_SIZE); + /* TODO: Check Ad-Hoc dest/source and make sure + * that we are actually parsing these packets + * correctly -- we should probably use the + * frame control of the packet and disregard + * the current iw_mode */ + + network_packet = + is_network_packet(priv, header); + if (network_packet && priv->assoc_network) { + priv->assoc_network->stats.rssi = + stats.rssi; + priv->exp_avg_rssi = + exponential_average(priv->exp_avg_rssi, + stats.rssi, DEPTH_RSSI); + } + + IPW_DEBUG_RX("Frame: len=%u\n", + le16_to_cpu(pkt->u.frame.length)); + + if (le16_to_cpu(pkt->u.frame.length) < + libipw_get_hdrlen(le16_to_cpu( + header->frame_ctl))) { + IPW_DEBUG_DROP + ("Received packet is too small. " + "Dropping.\n"); + priv->net_dev->stats.rx_errors++; + priv->wstats.discard.misc++; + break; + } + + switch (WLAN_FC_GET_TYPE + (le16_to_cpu(header->frame_ctl))) { + + case IEEE80211_FTYPE_MGMT: + ipw_handle_mgmt_packet(priv, rxb, + &stats); + break; + + case IEEE80211_FTYPE_CTL: + break; + + case IEEE80211_FTYPE_DATA: + if (unlikely(!network_packet || + is_duplicate_packet(priv, + header))) + { + IPW_DEBUG_DROP("Dropping: " + "%pM, " + "%pM, " + "%pM\n", + header->addr1, + header->addr2, + header->addr3); + break; + } + + ipw_handle_data_packet(priv, rxb, + &stats); + + break; + } + break; + } + + case RX_HOST_NOTIFICATION_TYPE:{ + IPW_DEBUG_RX + ("Notification: subtype=%02X flags=%02X size=%d\n", + pkt->u.notification.subtype, + pkt->u.notification.flags, + le16_to_cpu(pkt->u.notification.size)); + ipw_rx_notification(priv, &pkt->u.notification); + break; + } + + default: + IPW_DEBUG_RX("Bad Rx packet of type %d\n", + pkt->header.message_type); + break; + } + + /* For now we just don't re-use anything. We can tweak this + * later to try and re-use notification packets and SKBs that + * fail to Rx correctly */ + if (rxb->skb != NULL) { + dev_kfree_skb_any(rxb->skb); + rxb->skb = NULL; + } + + pci_unmap_single(priv->pci_dev, rxb->dma_addr, + IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); + list_add_tail(&rxb->list, &priv->rxq->rx_used); + + i = (i + 1) % RX_QUEUE_SIZE; + + /* If there are a lot of unsued frames, restock the Rx queue + * so the ucode won't assert */ + if (fill_rx) { + priv->rxq->read = i; + ipw_rx_queue_replenish(priv); + } + } + + /* Backtrack one entry */ + priv->rxq->read = i; + ipw_rx_queue_restock(priv); +} + +#define DEFAULT_RTS_THRESHOLD 2304U +#define MIN_RTS_THRESHOLD 1U +#define MAX_RTS_THRESHOLD 2304U +#define DEFAULT_BEACON_INTERVAL 100U +#define DEFAULT_SHORT_RETRY_LIMIT 7U +#define DEFAULT_LONG_RETRY_LIMIT 4U + +/** + * ipw_sw_reset + * @option: options to control different reset behaviour + * 0 = reset everything except the 'disable' module_param + * 1 = reset everything and print out driver info (for probe only) + * 2 = reset everything + */ +static int ipw_sw_reset(struct ipw_priv *priv, int option) +{ + int band, modulation; + int old_mode = priv->ieee->iw_mode; + + /* Initialize module parameter values here */ + priv->config = 0; + + /* We default to disabling the LED code as right now it causes + * too many systems to lock up... */ + if (!led_support) + priv->config |= CFG_NO_LED; + + if (associate) + priv->config |= CFG_ASSOCIATE; + else + IPW_DEBUG_INFO("Auto associate disabled.\n"); + + if (auto_create) + priv->config |= CFG_ADHOC_CREATE; + else + IPW_DEBUG_INFO("Auto adhoc creation disabled.\n"); + + priv->config &= ~CFG_STATIC_ESSID; + priv->essid_len = 0; + memset(priv->essid, 0, IW_ESSID_MAX_SIZE); + + if (disable && option) { + priv->status |= STATUS_RF_KILL_SW; + IPW_DEBUG_INFO("Radio disabled.\n"); + } + + if (default_channel != 0) { + priv->config |= CFG_STATIC_CHANNEL; + priv->channel = default_channel; + IPW_DEBUG_INFO("Bind to static channel %d\n", default_channel); + /* TODO: Validate that provided channel is in range */ + } +#ifdef CONFIG_IPW2200_QOS + ipw_qos_init(priv, qos_enable, qos_burst_enable, + burst_duration_CCK, burst_duration_OFDM); +#endif /* CONFIG_IPW2200_QOS */ + + switch (network_mode) { + case 1: + priv->ieee->iw_mode = IW_MODE_ADHOC; + priv->net_dev->type = ARPHRD_ETHER; + + break; +#ifdef CONFIG_IPW2200_MONITOR + case 2: + priv->ieee->iw_mode = IW_MODE_MONITOR; +#ifdef CONFIG_IPW2200_RADIOTAP + priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP; +#else + priv->net_dev->type = ARPHRD_IEEE80211; +#endif + break; +#endif + default: + case 0: + priv->net_dev->type = ARPHRD_ETHER; + priv->ieee->iw_mode = IW_MODE_INFRA; + break; + } + + if (hwcrypto) { + priv->ieee->host_encrypt = 0; + priv->ieee->host_encrypt_msdu = 0; + priv->ieee->host_decrypt = 0; + priv->ieee->host_mc_decrypt = 0; + } + IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off"); + + /* IPW2200/2915 is abled to do hardware fragmentation. */ + priv->ieee->host_open_frag = 0; + + if ((priv->pci_dev->device == 0x4223) || + (priv->pci_dev->device == 0x4224)) { + if (option == 1) + printk(KERN_INFO DRV_NAME + ": Detected Intel PRO/Wireless 2915ABG Network " + "Connection\n"); + priv->ieee->abg_true = 1; + band = LIBIPW_52GHZ_BAND | LIBIPW_24GHZ_BAND; + modulation = LIBIPW_OFDM_MODULATION | + LIBIPW_CCK_MODULATION; + priv->adapter = IPW_2915ABG; + priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B; + } else { + if (option == 1) + printk(KERN_INFO DRV_NAME + ": Detected Intel PRO/Wireless 2200BG Network " + "Connection\n"); + + priv->ieee->abg_true = 0; + band = LIBIPW_24GHZ_BAND; + modulation = LIBIPW_OFDM_MODULATION | + LIBIPW_CCK_MODULATION; + priv->adapter = IPW_2200BG; + priv->ieee->mode = IEEE_G | IEEE_B; + } + + priv->ieee->freq_band = band; + priv->ieee->modulation = modulation; + + priv->rates_mask = LIBIPW_DEFAULT_RATES_MASK; + + priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT; + priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT; + + priv->rts_threshold = DEFAULT_RTS_THRESHOLD; + priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT; + priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT; + + /* If power management is turned on, default to AC mode */ + priv->power_mode = IPW_POWER_AC; + priv->tx_power = IPW_TX_POWER_DEFAULT; + + return old_mode == priv->ieee->iw_mode; +} + +/* + * This file defines the Wireless Extension handlers. It does not + * define any methods of hardware manipulation and relies on the + * functions defined in ipw_main to provide the HW interaction. + * + * The exception to this is the use of the ipw_get_ordinal() + * function used to poll the hardware vs. making unecessary calls. + * + */ + +static int ipw_set_channel(struct ipw_priv *priv, u8 channel) +{ + if (channel == 0) { + IPW_DEBUG_INFO("Setting channel to ANY (0)\n"); + priv->config &= ~CFG_STATIC_CHANNEL; + IPW_DEBUG_ASSOC("Attempting to associate with new " + "parameters.\n"); + ipw_associate(priv); + return 0; + } + + priv->config |= CFG_STATIC_CHANNEL; + + if (priv->channel == channel) { + IPW_DEBUG_INFO("Request to set channel to current value (%d)\n", + channel); + return 0; + } + + IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel); + priv->channel = channel; + +#ifdef CONFIG_IPW2200_MONITOR + if (priv->ieee->iw_mode == IW_MODE_MONITOR) { + int i; + if (priv->status & STATUS_SCANNING) { + IPW_DEBUG_SCAN("Scan abort triggered due to " + "channel change.\n"); + ipw_abort_scan(priv); + } + + for (i = 1000; i && (priv->status & STATUS_SCANNING); i--) + udelay(10); + + if (priv->status & STATUS_SCANNING) + IPW_DEBUG_SCAN("Still scanning...\n"); + else + IPW_DEBUG_SCAN("Took %dms to abort current scan\n", + 1000 - i); + + return 0; + } +#endif /* CONFIG_IPW2200_MONITOR */ + + /* Network configuration changed -- force [re]association */ + IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n"); + if (!ipw_disassociate(priv)) + ipw_associate(priv); + + return 0; +} + +static int ipw_wx_set_freq(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + const struct libipw_geo *geo = libipw_get_geo(priv->ieee); + struct iw_freq *fwrq = &wrqu->freq; + int ret = 0, i; + u8 channel, flags; + int band; + + if (fwrq->m == 0) { + IPW_DEBUG_WX("SET Freq/Channel -> any\n"); + mutex_lock(&priv->mutex); + ret = ipw_set_channel(priv, 0); + mutex_unlock(&priv->mutex); + return ret; + } + /* if setting by freq convert to channel */ + if (fwrq->e == 1) { + channel = libipw_freq_to_channel(priv->ieee, fwrq->m); + if (channel == 0) + return -EINVAL; + } else + channel = fwrq->m; + + if (!(band = libipw_is_valid_channel(priv->ieee, channel))) + return -EINVAL; + + if (priv->ieee->iw_mode == IW_MODE_ADHOC) { + i = libipw_channel_to_index(priv->ieee, channel); + if (i == -1) + return -EINVAL; + + flags = (band == LIBIPW_24GHZ_BAND) ? + geo->bg[i].flags : geo->a[i].flags; + if (flags & LIBIPW_CH_PASSIVE_ONLY) { + IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n"); + return -EINVAL; + } + } + + IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m); + mutex_lock(&priv->mutex); + ret = ipw_set_channel(priv, channel); + mutex_unlock(&priv->mutex); + return ret; +} + +static int ipw_wx_get_freq(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + + wrqu->freq.e = 0; + + /* If we are associated, trying to associate, or have a statically + * configured CHANNEL then return that; otherwise return ANY */ + mutex_lock(&priv->mutex); + if (priv->config & CFG_STATIC_CHANNEL || + priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) { + int i; + + i = libipw_channel_to_index(priv->ieee, priv->channel); + BUG_ON(i == -1); + wrqu->freq.e = 1; + + switch (libipw_is_valid_channel(priv->ieee, priv->channel)) { + case LIBIPW_52GHZ_BAND: + wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000; + break; + + case LIBIPW_24GHZ_BAND: + wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000; + break; + + default: + BUG(); + } + } else + wrqu->freq.m = 0; + + mutex_unlock(&priv->mutex); + IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel); + return 0; +} + +static int ipw_wx_set_mode(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + int err = 0; + + IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode); + + switch (wrqu->mode) { +#ifdef CONFIG_IPW2200_MONITOR + case IW_MODE_MONITOR: +#endif + case IW_MODE_ADHOC: + case IW_MODE_INFRA: + break; + case IW_MODE_AUTO: + wrqu->mode = IW_MODE_INFRA; + break; + default: + return -EINVAL; + } + if (wrqu->mode == priv->ieee->iw_mode) + return 0; + + mutex_lock(&priv->mutex); + + ipw_sw_reset(priv, 0); + +#ifdef CONFIG_IPW2200_MONITOR + if (priv->ieee->iw_mode == IW_MODE_MONITOR) + priv->net_dev->type = ARPHRD_ETHER; + + if (wrqu->mode == IW_MODE_MONITOR) +#ifdef CONFIG_IPW2200_RADIOTAP + priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP; +#else + priv->net_dev->type = ARPHRD_IEEE80211; +#endif +#endif /* CONFIG_IPW2200_MONITOR */ + + /* Free the existing firmware and reset the fw_loaded + * flag so ipw_load() will bring in the new firmware */ + free_firmware(); + + priv->ieee->iw_mode = wrqu->mode; + + queue_work(priv->workqueue, &priv->adapter_restart); + mutex_unlock(&priv->mutex); + return err; +} + +static int ipw_wx_get_mode(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + mutex_lock(&priv->mutex); + wrqu->mode = priv->ieee->iw_mode; + IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode); + mutex_unlock(&priv->mutex); + return 0; +} + +/* Values are in microsecond */ +static const s32 timeout_duration[] = { + 350000, + 250000, + 75000, + 37000, + 25000, +}; + +static const s32 period_duration[] = { + 400000, + 700000, + 1000000, + 1000000, + 1000000 +}; + +static int ipw_wx_get_range(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + struct iw_range *range = (struct iw_range *)extra; + const struct libipw_geo *geo = libipw_get_geo(priv->ieee); + int i = 0, j; + + wrqu->data.length = sizeof(*range); + memset(range, 0, sizeof(*range)); + + /* 54Mbs == ~27 Mb/s real (802.11g) */ + range->throughput = 27 * 1000 * 1000; + + range->max_qual.qual = 100; + /* TODO: Find real max RSSI and stick here */ + range->max_qual.level = 0; + range->max_qual.noise = 0; + range->max_qual.updated = 7; /* Updated all three */ + + range->avg_qual.qual = 70; + /* TODO: Find real 'good' to 'bad' threshold value for RSSI */ + range->avg_qual.level = 0; /* FIXME to real average level */ + range->avg_qual.noise = 0; + range->avg_qual.updated = 7; /* Updated all three */ + mutex_lock(&priv->mutex); + range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES); + + for (i = 0; i < range->num_bitrates; i++) + range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) * + 500000; + + range->max_rts = DEFAULT_RTS_THRESHOLD; + range->min_frag = MIN_FRAG_THRESHOLD; + range->max_frag = MAX_FRAG_THRESHOLD; + + range->encoding_size[0] = 5; + range->encoding_size[1] = 13; + range->num_encoding_sizes = 2; + range->max_encoding_tokens = WEP_KEYS; + + /* Set the Wireless Extension versions */ + range->we_version_compiled = WIRELESS_EXT; + range->we_version_source = 18; + + i = 0; + if (priv->ieee->mode & (IEEE_B | IEEE_G)) { + for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) { + if ((priv->ieee->iw_mode == IW_MODE_ADHOC) && + (geo->bg[j].flags & LIBIPW_CH_PASSIVE_ONLY)) + continue; + + range->freq[i].i = geo->bg[j].channel; + range->freq[i].m = geo->bg[j].freq * 100000; + range->freq[i].e = 1; + i++; + } + } + + if (priv->ieee->mode & IEEE_A) { + for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) { + if ((priv->ieee->iw_mode == IW_MODE_ADHOC) && + (geo->a[j].flags & LIBIPW_CH_PASSIVE_ONLY)) + continue; + + range->freq[i].i = geo->a[j].channel; + range->freq[i].m = geo->a[j].freq * 100000; + range->freq[i].e = 1; + i++; + } + } + + range->num_channels = i; + range->num_frequency = i; + + mutex_unlock(&priv->mutex); + + /* Event capability (kernel + driver) */ + range->event_capa[0] = (IW_EVENT_CAPA_K_0 | + IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) | + IW_EVENT_CAPA_MASK(SIOCGIWAP) | + IW_EVENT_CAPA_MASK(SIOCGIWSCAN)); + range->event_capa[1] = IW_EVENT_CAPA_K_1; + + range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 | + IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP; + + range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE; + + IPW_DEBUG_WX("GET Range\n"); + return 0; +} + +static int ipw_wx_set_wap(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + + static const unsigned char any[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff + }; + static const unsigned char off[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 + }; + + if (wrqu->ap_addr.sa_family != ARPHRD_ETHER) + return -EINVAL; + mutex_lock(&priv->mutex); + if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) || + !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) { + /* we disable mandatory BSSID association */ + IPW_DEBUG_WX("Setting AP BSSID to ANY\n"); + priv->config &= ~CFG_STATIC_BSSID; + IPW_DEBUG_ASSOC("Attempting to associate with new " + "parameters.\n"); + ipw_associate(priv); + mutex_unlock(&priv->mutex); + return 0; + } + + priv->config |= CFG_STATIC_BSSID; + if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) { + IPW_DEBUG_WX("BSSID set to current BSSID.\n"); + mutex_unlock(&priv->mutex); + return 0; + } + + IPW_DEBUG_WX("Setting mandatory BSSID to %pM\n", + wrqu->ap_addr.sa_data); + + memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN); + + /* Network configuration changed -- force [re]association */ + IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n"); + if (!ipw_disassociate(priv)) + ipw_associate(priv); + + mutex_unlock(&priv->mutex); + return 0; +} + +static int ipw_wx_get_wap(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + + /* If we are associated, trying to associate, or have a statically + * configured BSSID then return that; otherwise return ANY */ + mutex_lock(&priv->mutex); + if (priv->config & CFG_STATIC_BSSID || + priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) { + wrqu->ap_addr.sa_family = ARPHRD_ETHER; + memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN); + } else + memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN); + + IPW_DEBUG_WX("Getting WAP BSSID: %pM\n", + wrqu->ap_addr.sa_data); + mutex_unlock(&priv->mutex); + return 0; +} + +static int ipw_wx_set_essid(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + int length; + DECLARE_SSID_BUF(ssid); + + mutex_lock(&priv->mutex); + + if (!wrqu->essid.flags) + { + IPW_DEBUG_WX("Setting ESSID to ANY\n"); + ipw_disassociate(priv); + priv->config &= ~CFG_STATIC_ESSID; + ipw_associate(priv); + mutex_unlock(&priv->mutex); + return 0; + } + + length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE); + + priv->config |= CFG_STATIC_ESSID; + + if (priv->essid_len == length && !memcmp(priv->essid, extra, length) + && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) { + IPW_DEBUG_WX("ESSID set to current ESSID.\n"); + mutex_unlock(&priv->mutex); + return 0; + } + + IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", + print_ssid(ssid, extra, length), length); + + priv->essid_len = length; + memcpy(priv->essid, extra, priv->essid_len); + + /* Network configuration changed -- force [re]association */ + IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n"); + if (!ipw_disassociate(priv)) + ipw_associate(priv); + + mutex_unlock(&priv->mutex); + return 0; +} + +static int ipw_wx_get_essid(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + DECLARE_SSID_BUF(ssid); + + /* If we are associated, trying to associate, or have a statically + * configured ESSID then return that; otherwise return ANY */ + mutex_lock(&priv->mutex); + if (priv->config & CFG_STATIC_ESSID || + priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) { + IPW_DEBUG_WX("Getting essid: '%s'\n", + print_ssid(ssid, priv->essid, priv->essid_len)); + memcpy(extra, priv->essid, priv->essid_len); + wrqu->essid.length = priv->essid_len; + wrqu->essid.flags = 1; /* active */ + } else { + IPW_DEBUG_WX("Getting essid: ANY\n"); + wrqu->essid.length = 0; + wrqu->essid.flags = 0; /* active */ + } + mutex_unlock(&priv->mutex); + return 0; +} + +static int ipw_wx_set_nick(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + + IPW_DEBUG_WX("Setting nick to '%s'\n", extra); + if (wrqu->data.length > IW_ESSID_MAX_SIZE) + return -E2BIG; + mutex_lock(&priv->mutex); + wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick)); + memset(priv->nick, 0, sizeof(priv->nick)); + memcpy(priv->nick, extra, wrqu->data.length); + IPW_DEBUG_TRACE("<<\n"); + mutex_unlock(&priv->mutex); + return 0; + +} + +static int ipw_wx_get_nick(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + IPW_DEBUG_WX("Getting nick\n"); + mutex_lock(&priv->mutex); + wrqu->data.length = strlen(priv->nick); + memcpy(extra, priv->nick, wrqu->data.length); + wrqu->data.flags = 1; /* active */ + mutex_unlock(&priv->mutex); + return 0; +} + +static int ipw_wx_set_sens(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + int err = 0; + + IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value); + IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value); + mutex_lock(&priv->mutex); + + if (wrqu->sens.fixed == 0) + { + priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT; + priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT; + goto out; + } + if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) || + (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) { + err = -EINVAL; + goto out; + } + + priv->roaming_threshold = wrqu->sens.value; + priv->disassociate_threshold = 3*wrqu->sens.value; + out: + mutex_unlock(&priv->mutex); + return err; +} + +static int ipw_wx_get_sens(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + mutex_lock(&priv->mutex); + wrqu->sens.fixed = 1; + wrqu->sens.value = priv->roaming_threshold; + mutex_unlock(&priv->mutex); + + IPW_DEBUG_WX("GET roaming threshold -> %s %d \n", + wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value); + + return 0; +} + +static int ipw_wx_set_rate(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + /* TODO: We should use semaphores or locks for access to priv */ + struct ipw_priv *priv = libipw_priv(dev); + u32 target_rate = wrqu->bitrate.value; + u32 fixed, mask; + + /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */ + /* value = X, fixed = 1 means only rate X */ + /* value = X, fixed = 0 means all rates lower equal X */ + + if (target_rate == -1) { + fixed = 0; + mask = LIBIPW_DEFAULT_RATES_MASK; + /* Now we should reassociate */ + goto apply; + } + + mask = 0; + fixed = wrqu->bitrate.fixed; + + if (target_rate == 1000000 || !fixed) + mask |= LIBIPW_CCK_RATE_1MB_MASK; + if (target_rate == 1000000) + goto apply; + + if (target_rate == 2000000 || !fixed) + mask |= LIBIPW_CCK_RATE_2MB_MASK; + if (target_rate == 2000000) + goto apply; + + if (target_rate == 5500000 || !fixed) + mask |= LIBIPW_CCK_RATE_5MB_MASK; + if (target_rate == 5500000) + goto apply; + + if (target_rate == 6000000 || !fixed) + mask |= LIBIPW_OFDM_RATE_6MB_MASK; + if (target_rate == 6000000) + goto apply; + + if (target_rate == 9000000 || !fixed) + mask |= LIBIPW_OFDM_RATE_9MB_MASK; + if (target_rate == 9000000) + goto apply; + + if (target_rate == 11000000 || !fixed) + mask |= LIBIPW_CCK_RATE_11MB_MASK; + if (target_rate == 11000000) + goto apply; + + if (target_rate == 12000000 || !fixed) + mask |= LIBIPW_OFDM_RATE_12MB_MASK; + if (target_rate == 12000000) + goto apply; + + if (target_rate == 18000000 || !fixed) + mask |= LIBIPW_OFDM_RATE_18MB_MASK; + if (target_rate == 18000000) + goto apply; + + if (target_rate == 24000000 || !fixed) + mask |= LIBIPW_OFDM_RATE_24MB_MASK; + if (target_rate == 24000000) + goto apply; + + if (target_rate == 36000000 || !fixed) + mask |= LIBIPW_OFDM_RATE_36MB_MASK; + if (target_rate == 36000000) + goto apply; + + if (target_rate == 48000000 || !fixed) + mask |= LIBIPW_OFDM_RATE_48MB_MASK; + if (target_rate == 48000000) + goto apply; + + if (target_rate == 54000000 || !fixed) + mask |= LIBIPW_OFDM_RATE_54MB_MASK; + if (target_rate == 54000000) + goto apply; + + IPW_DEBUG_WX("invalid rate specified, returning error\n"); + return -EINVAL; + + apply: + IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n", + mask, fixed ? "fixed" : "sub-rates"); + mutex_lock(&priv->mutex); + if (mask == LIBIPW_DEFAULT_RATES_MASK) { + priv->config &= ~CFG_FIXED_RATE; + ipw_set_fixed_rate(priv, priv->ieee->mode); + } else + priv->config |= CFG_FIXED_RATE; + + if (priv->rates_mask == mask) { + IPW_DEBUG_WX("Mask set to current mask.\n"); + mutex_unlock(&priv->mutex); + return 0; + } + + priv->rates_mask = mask; + + /* Network configuration changed -- force [re]association */ + IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n"); + if (!ipw_disassociate(priv)) + ipw_associate(priv); + + mutex_unlock(&priv->mutex); + return 0; +} + +static int ipw_wx_get_rate(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + mutex_lock(&priv->mutex); + wrqu->bitrate.value = priv->last_rate; + wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0; + mutex_unlock(&priv->mutex); + IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value); + return 0; +} + +static int ipw_wx_set_rts(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + mutex_lock(&priv->mutex); + if (wrqu->rts.disabled || !wrqu->rts.fixed) + priv->rts_threshold = DEFAULT_RTS_THRESHOLD; + else { + if (wrqu->rts.value < MIN_RTS_THRESHOLD || + wrqu->rts.value > MAX_RTS_THRESHOLD) { + mutex_unlock(&priv->mutex); + return -EINVAL; + } + priv->rts_threshold = wrqu->rts.value; + } + + ipw_send_rts_threshold(priv, priv->rts_threshold); + mutex_unlock(&priv->mutex); + IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold); + return 0; +} + +static int ipw_wx_get_rts(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + mutex_lock(&priv->mutex); + wrqu->rts.value = priv->rts_threshold; + wrqu->rts.fixed = 0; /* no auto select */ + wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD); + mutex_unlock(&priv->mutex); + IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value); + return 0; +} + +static int ipw_wx_set_txpow(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + int err = 0; + + mutex_lock(&priv->mutex); + if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) { + err = -EINPROGRESS; + goto out; + } + + if (!wrqu->power.fixed) + wrqu->power.value = IPW_TX_POWER_DEFAULT; + + if (wrqu->power.flags != IW_TXPOW_DBM) { + err = -EINVAL; + goto out; + } + + if ((wrqu->power.value > IPW_TX_POWER_MAX) || + (wrqu->power.value < IPW_TX_POWER_MIN)) { + err = -EINVAL; + goto out; + } + + priv->tx_power = wrqu->power.value; + err = ipw_set_tx_power(priv); + out: + mutex_unlock(&priv->mutex); + return err; +} + +static int ipw_wx_get_txpow(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + mutex_lock(&priv->mutex); + wrqu->power.value = priv->tx_power; + wrqu->power.fixed = 1; + wrqu->power.flags = IW_TXPOW_DBM; + wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0; + mutex_unlock(&priv->mutex); + + IPW_DEBUG_WX("GET TX Power -> %s %d \n", + wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value); + + return 0; +} + +static int ipw_wx_set_frag(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + mutex_lock(&priv->mutex); + if (wrqu->frag.disabled || !wrqu->frag.fixed) + priv->ieee->fts = DEFAULT_FTS; + else { + if (wrqu->frag.value < MIN_FRAG_THRESHOLD || + wrqu->frag.value > MAX_FRAG_THRESHOLD) { + mutex_unlock(&priv->mutex); + return -EINVAL; + } + + priv->ieee->fts = wrqu->frag.value & ~0x1; + } + + ipw_send_frag_threshold(priv, wrqu->frag.value); + mutex_unlock(&priv->mutex); + IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value); + return 0; +} + +static int ipw_wx_get_frag(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + mutex_lock(&priv->mutex); + wrqu->frag.value = priv->ieee->fts; + wrqu->frag.fixed = 0; /* no auto select */ + wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS); + mutex_unlock(&priv->mutex); + IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value); + + return 0; +} + +static int ipw_wx_set_retry(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + + if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled) + return -EINVAL; + + if (!(wrqu->retry.flags & IW_RETRY_LIMIT)) + return 0; + + if (wrqu->retry.value < 0 || wrqu->retry.value >= 255) + return -EINVAL; + + mutex_lock(&priv->mutex); + if (wrqu->retry.flags & IW_RETRY_SHORT) + priv->short_retry_limit = (u8) wrqu->retry.value; + else if (wrqu->retry.flags & IW_RETRY_LONG) + priv->long_retry_limit = (u8) wrqu->retry.value; + else { + priv->short_retry_limit = (u8) wrqu->retry.value; + priv->long_retry_limit = (u8) wrqu->retry.value; + } + + ipw_send_retry_limit(priv, priv->short_retry_limit, + priv->long_retry_limit); + mutex_unlock(&priv->mutex); + IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n", + priv->short_retry_limit, priv->long_retry_limit); + return 0; +} + +static int ipw_wx_get_retry(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + + mutex_lock(&priv->mutex); + wrqu->retry.disabled = 0; + + if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) { + mutex_unlock(&priv->mutex); + return -EINVAL; + } + + if (wrqu->retry.flags & IW_RETRY_LONG) { + wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG; + wrqu->retry.value = priv->long_retry_limit; + } else if (wrqu->retry.flags & IW_RETRY_SHORT) { + wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT; + wrqu->retry.value = priv->short_retry_limit; + } else { + wrqu->retry.flags = IW_RETRY_LIMIT; + wrqu->retry.value = priv->short_retry_limit; + } + mutex_unlock(&priv->mutex); + + IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value); + + return 0; +} + +static int ipw_wx_set_scan(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + struct iw_scan_req *req = (struct iw_scan_req *)extra; + struct delayed_work *work = NULL; + + mutex_lock(&priv->mutex); + + priv->user_requested_scan = 1; + + if (wrqu->data.length == sizeof(struct iw_scan_req)) { + if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { + int len = min((int)req->essid_len, + (int)sizeof(priv->direct_scan_ssid)); + memcpy(priv->direct_scan_ssid, req->essid, len); + priv->direct_scan_ssid_len = len; + work = &priv->request_direct_scan; + } else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) { + work = &priv->request_passive_scan; + } + } else { + /* Normal active broadcast scan */ + work = &priv->request_scan; + } + + mutex_unlock(&priv->mutex); + + IPW_DEBUG_WX("Start scan\n"); + + queue_delayed_work(priv->workqueue, work, 0); + + return 0; +} + +static int ipw_wx_get_scan(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + return libipw_wx_get_scan(priv->ieee, info, wrqu, extra); +} + +static int ipw_wx_set_encode(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *key) +{ + struct ipw_priv *priv = libipw_priv(dev); + int ret; + u32 cap = priv->capability; + + mutex_lock(&priv->mutex); + ret = libipw_wx_set_encode(priv->ieee, info, wrqu, key); + + /* In IBSS mode, we need to notify the firmware to update + * the beacon info after we changed the capability. */ + if (cap != priv->capability && + priv->ieee->iw_mode == IW_MODE_ADHOC && + priv->status & STATUS_ASSOCIATED) + ipw_disassociate(priv); + + mutex_unlock(&priv->mutex); + return ret; +} + +static int ipw_wx_get_encode(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *key) +{ + struct ipw_priv *priv = libipw_priv(dev); + return libipw_wx_get_encode(priv->ieee, info, wrqu, key); +} + +static int ipw_wx_set_power(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + int err; + mutex_lock(&priv->mutex); + if (wrqu->power.disabled) { + priv->power_mode = IPW_POWER_LEVEL(priv->power_mode); + err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM); + if (err) { + IPW_DEBUG_WX("failed setting power mode.\n"); + mutex_unlock(&priv->mutex); + return err; + } + IPW_DEBUG_WX("SET Power Management Mode -> off\n"); + mutex_unlock(&priv->mutex); + return 0; + } + + switch (wrqu->power.flags & IW_POWER_MODE) { + case IW_POWER_ON: /* If not specified */ + case IW_POWER_MODE: /* If set all mask */ + case IW_POWER_ALL_R: /* If explicitly state all */ + break; + default: /* Otherwise we don't support it */ + IPW_DEBUG_WX("SET PM Mode: %X not supported.\n", + wrqu->power.flags); + mutex_unlock(&priv->mutex); + return -EOPNOTSUPP; + } + + /* If the user hasn't specified a power management mode yet, default + * to BATTERY */ + if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC) + priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY; + else + priv->power_mode = IPW_POWER_ENABLED | priv->power_mode; + + err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode)); + if (err) { + IPW_DEBUG_WX("failed setting power mode.\n"); + mutex_unlock(&priv->mutex); + return err; + } + + IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode); + mutex_unlock(&priv->mutex); + return 0; +} + +static int ipw_wx_get_power(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + mutex_lock(&priv->mutex); + if (!(priv->power_mode & IPW_POWER_ENABLED)) + wrqu->power.disabled = 1; + else + wrqu->power.disabled = 0; + + mutex_unlock(&priv->mutex); + IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode); + + return 0; +} + +static int ipw_wx_set_powermode(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + int mode = *(int *)extra; + int err; + + mutex_lock(&priv->mutex); + if ((mode < 1) || (mode > IPW_POWER_LIMIT)) + mode = IPW_POWER_AC; + + if (IPW_POWER_LEVEL(priv->power_mode) != mode) { + err = ipw_send_power_mode(priv, mode); + if (err) { + IPW_DEBUG_WX("failed setting power mode.\n"); + mutex_unlock(&priv->mutex); + return err; + } + priv->power_mode = IPW_POWER_ENABLED | mode; + } + mutex_unlock(&priv->mutex); + return 0; +} + +#define MAX_WX_STRING 80 +static int ipw_wx_get_powermode(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + int level = IPW_POWER_LEVEL(priv->power_mode); + char *p = extra; + + p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level); + + switch (level) { + case IPW_POWER_AC: + p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)"); + break; + case IPW_POWER_BATTERY: + p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)"); + break; + default: + p += snprintf(p, MAX_WX_STRING - (p - extra), + "(Timeout %dms, Period %dms)", + timeout_duration[level - 1] / 1000, + period_duration[level - 1] / 1000); + } + + if (!(priv->power_mode & IPW_POWER_ENABLED)) + p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF"); + + wrqu->data.length = p - extra + 1; + + return 0; +} + +static int ipw_wx_set_wireless_mode(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + int mode = *(int *)extra; + u8 band = 0, modulation = 0; + + if (mode == 0 || mode & ~IEEE_MODE_MASK) { + IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode); + return -EINVAL; + } + mutex_lock(&priv->mutex); + if (priv->adapter == IPW_2915ABG) { + priv->ieee->abg_true = 1; + if (mode & IEEE_A) { + band |= LIBIPW_52GHZ_BAND; + modulation |= LIBIPW_OFDM_MODULATION; + } else + priv->ieee->abg_true = 0; + } else { + if (mode & IEEE_A) { + IPW_WARNING("Attempt to set 2200BG into " + "802.11a mode\n"); + mutex_unlock(&priv->mutex); + return -EINVAL; + } + + priv->ieee->abg_true = 0; + } + + if (mode & IEEE_B) { + band |= LIBIPW_24GHZ_BAND; + modulation |= LIBIPW_CCK_MODULATION; + } else + priv->ieee->abg_true = 0; + + if (mode & IEEE_G) { + band |= LIBIPW_24GHZ_BAND; + modulation |= LIBIPW_OFDM_MODULATION; + } else + priv->ieee->abg_true = 0; + + priv->ieee->mode = mode; + priv->ieee->freq_band = band; + priv->ieee->modulation = modulation; + init_supported_rates(priv, &priv->rates); + + /* Network configuration changed -- force [re]association */ + IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n"); + if (!ipw_disassociate(priv)) { + ipw_send_supported_rates(priv, &priv->rates); + ipw_associate(priv); + } + + /* Update the band LEDs */ + ipw_led_band_on(priv); + + IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n", + mode & IEEE_A ? 'a' : '.', + mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.'); + mutex_unlock(&priv->mutex); + return 0; +} + +static int ipw_wx_get_wireless_mode(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + mutex_lock(&priv->mutex); + switch (priv->ieee->mode) { + case IEEE_A: + strncpy(extra, "802.11a (1)", MAX_WX_STRING); + break; + case IEEE_B: + strncpy(extra, "802.11b (2)", MAX_WX_STRING); + break; + case IEEE_A | IEEE_B: + strncpy(extra, "802.11ab (3)", MAX_WX_STRING); + break; + case IEEE_G: + strncpy(extra, "802.11g (4)", MAX_WX_STRING); + break; + case IEEE_A | IEEE_G: + strncpy(extra, "802.11ag (5)", MAX_WX_STRING); + break; + case IEEE_B | IEEE_G: + strncpy(extra, "802.11bg (6)", MAX_WX_STRING); + break; + case IEEE_A | IEEE_B | IEEE_G: + strncpy(extra, "802.11abg (7)", MAX_WX_STRING); + break; + default: + strncpy(extra, "unknown", MAX_WX_STRING); + break; + } + + IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra); + + wrqu->data.length = strlen(extra) + 1; + mutex_unlock(&priv->mutex); + + return 0; +} + +static int ipw_wx_set_preamble(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + int mode = *(int *)extra; + mutex_lock(&priv->mutex); + /* Switching from SHORT -> LONG requires a disassociation */ + if (mode == 1) { + if (!(priv->config & CFG_PREAMBLE_LONG)) { + priv->config |= CFG_PREAMBLE_LONG; + + /* Network configuration changed -- force [re]association */ + IPW_DEBUG_ASSOC + ("[re]association triggered due to preamble change.\n"); + if (!ipw_disassociate(priv)) + ipw_associate(priv); + } + goto done; + } + + if (mode == 0) { + priv->config &= ~CFG_PREAMBLE_LONG; + goto done; + } + mutex_unlock(&priv->mutex); + return -EINVAL; + + done: + mutex_unlock(&priv->mutex); + return 0; +} + +static int ipw_wx_get_preamble(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + mutex_lock(&priv->mutex); + if (priv->config & CFG_PREAMBLE_LONG) + snprintf(wrqu->name, IFNAMSIZ, "long (1)"); + else + snprintf(wrqu->name, IFNAMSIZ, "auto (0)"); + mutex_unlock(&priv->mutex); + return 0; +} + +#ifdef CONFIG_IPW2200_MONITOR +static int ipw_wx_set_monitor(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + int *parms = (int *)extra; + int enable = (parms[0] > 0); + mutex_lock(&priv->mutex); + IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]); + if (enable) { + if (priv->ieee->iw_mode != IW_MODE_MONITOR) { +#ifdef CONFIG_IPW2200_RADIOTAP + priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP; +#else + priv->net_dev->type = ARPHRD_IEEE80211; +#endif + queue_work(priv->workqueue, &priv->adapter_restart); + } + + ipw_set_channel(priv, parms[1]); + } else { + if (priv->ieee->iw_mode != IW_MODE_MONITOR) { + mutex_unlock(&priv->mutex); + return 0; + } + priv->net_dev->type = ARPHRD_ETHER; + queue_work(priv->workqueue, &priv->adapter_restart); + } + mutex_unlock(&priv->mutex); + return 0; +} + +#endif /* CONFIG_IPW2200_MONITOR */ + +static int ipw_wx_reset(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + IPW_DEBUG_WX("RESET\n"); + queue_work(priv->workqueue, &priv->adapter_restart); + return 0; +} + +static int ipw_wx_sw_reset(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct ipw_priv *priv = libipw_priv(dev); + union iwreq_data wrqu_sec = { + .encoding = { + .flags = IW_ENCODE_DISABLED, + }, + }; + int ret; + + IPW_DEBUG_WX("SW_RESET\n"); + + mutex_lock(&priv->mutex); + + ret = ipw_sw_reset(priv, 2); + if (!ret) { + free_firmware(); + ipw_adapter_restart(priv); + } + + /* The SW reset bit might have been toggled on by the 'disable' + * module parameter, so take appropriate action */ + ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW); + + mutex_unlock(&priv->mutex); + libipw_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL); + mutex_lock(&priv->mutex); + + if (!(priv->status & STATUS_RF_KILL_MASK)) { + /* Configuration likely changed -- force [re]association */ + IPW_DEBUG_ASSOC("[re]association triggered due to sw " + "reset.\n"); + if (!ipw_disassociate(priv)) + ipw_associate(priv); + } + + mutex_unlock(&priv->mutex); + + return 0; +} + +/* Rebase the WE IOCTLs to zero for the handler array */ +#define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT] +static iw_handler ipw_wx_handlers[] = { + IW_IOCTL(SIOCGIWNAME) = (iw_handler) cfg80211_wext_giwname, + IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq, + IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq, + IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode, + IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode, + IW_IOCTL(SIOCSIWSENS) = ipw_wx_set_sens, + IW_IOCTL(SIOCGIWSENS) = ipw_wx_get_sens, + IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range, + IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap, + IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap, + IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan, + IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan, + IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid, + IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid, + IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick, + IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick, + IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate, + IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate, + IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts, + IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts, + IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag, + IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag, + IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow, + IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow, + IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry, + IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry, + IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode, + IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode, + IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power, + IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power, + IW_IOCTL(SIOCSIWSPY) = iw_handler_set_spy, + IW_IOCTL(SIOCGIWSPY) = iw_handler_get_spy, + IW_IOCTL(SIOCSIWTHRSPY) = iw_handler_set_thrspy, + IW_IOCTL(SIOCGIWTHRSPY) = iw_handler_get_thrspy, + IW_IOCTL(SIOCSIWGENIE) = ipw_wx_set_genie, + IW_IOCTL(SIOCGIWGENIE) = ipw_wx_get_genie, + IW_IOCTL(SIOCSIWMLME) = ipw_wx_set_mlme, + IW_IOCTL(SIOCSIWAUTH) = ipw_wx_set_auth, + IW_IOCTL(SIOCGIWAUTH) = ipw_wx_get_auth, + IW_IOCTL(SIOCSIWENCODEEXT) = ipw_wx_set_encodeext, + IW_IOCTL(SIOCGIWENCODEEXT) = ipw_wx_get_encodeext, +}; + +enum { + IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV, + IPW_PRIV_GET_POWER, + IPW_PRIV_SET_MODE, + IPW_PRIV_GET_MODE, + IPW_PRIV_SET_PREAMBLE, + IPW_PRIV_GET_PREAMBLE, + IPW_PRIV_RESET, + IPW_PRIV_SW_RESET, +#ifdef CONFIG_IPW2200_MONITOR + IPW_PRIV_SET_MONITOR, +#endif +}; + +static struct iw_priv_args ipw_priv_args[] = { + { + .cmd = IPW_PRIV_SET_POWER, + .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, + .name = "set_power"}, + { + .cmd = IPW_PRIV_GET_POWER, + .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING, + .name = "get_power"}, + { + .cmd = IPW_PRIV_SET_MODE, + .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, + .name = "set_mode"}, + { + .cmd = IPW_PRIV_GET_MODE, + .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING, + .name = "get_mode"}, + { + .cmd = IPW_PRIV_SET_PREAMBLE, + .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, + .name = "set_preamble"}, + { + .cmd = IPW_PRIV_GET_PREAMBLE, + .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ, + .name = "get_preamble"}, + { + IPW_PRIV_RESET, + IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"}, + { + IPW_PRIV_SW_RESET, + IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"}, +#ifdef CONFIG_IPW2200_MONITOR + { + IPW_PRIV_SET_MONITOR, + IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"}, +#endif /* CONFIG_IPW2200_MONITOR */ +}; + +static iw_handler ipw_priv_handler[] = { + ipw_wx_set_powermode, + ipw_wx_get_powermode, + ipw_wx_set_wireless_mode, + ipw_wx_get_wireless_mode, + ipw_wx_set_preamble, + ipw_wx_get_preamble, + ipw_wx_reset, + ipw_wx_sw_reset, +#ifdef CONFIG_IPW2200_MONITOR + ipw_wx_set_monitor, +#endif +}; + +static struct iw_handler_def ipw_wx_handler_def = { + .standard = ipw_wx_handlers, + .num_standard = ARRAY_SIZE(ipw_wx_handlers), + .num_private = ARRAY_SIZE(ipw_priv_handler), + .num_private_args = ARRAY_SIZE(ipw_priv_args), + .private = ipw_priv_handler, + .private_args = ipw_priv_args, + .get_wireless_stats = ipw_get_wireless_stats, +}; + +/* + * Get wireless statistics. + * Called by /proc/net/wireless + * Also called by SIOCGIWSTATS + */ +static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev) +{ + struct ipw_priv *priv = libipw_priv(dev); + struct iw_statistics *wstats; + + wstats = &priv->wstats; + + /* if hw is disabled, then ipw_get_ordinal() can't be called. + * netdev->get_wireless_stats seems to be called before fw is + * initialized. STATUS_ASSOCIATED will only be set if the hw is up + * and associated; if not associcated, the values are all meaningless + * anyway, so set them all to NULL and INVALID */ + if (!(priv->status & STATUS_ASSOCIATED)) { + wstats->miss.beacon = 0; + wstats->discard.retries = 0; + wstats->qual.qual = 0; + wstats->qual.level = 0; + wstats->qual.noise = 0; + wstats->qual.updated = 7; + wstats->qual.updated |= IW_QUAL_NOISE_INVALID | + IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID; + return wstats; + } + + wstats->qual.qual = priv->quality; + wstats->qual.level = priv->exp_avg_rssi; + wstats->qual.noise = priv->exp_avg_noise; + wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | + IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM; + + wstats->miss.beacon = average_value(&priv->average_missed_beacons); + wstats->discard.retries = priv->last_tx_failures; + wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable; + +/* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len)) + goto fail_get_ordinal; + wstats->discard.retries += tx_retry; */ + + return wstats; +} + +/* net device stuff */ + +static void init_sys_config(struct ipw_sys_config *sys_config) +{ + memset(sys_config, 0, sizeof(struct ipw_sys_config)); + sys_config->bt_coexistence = 0; + sys_config->answer_broadcast_ssid_probe = 0; + sys_config->accept_all_data_frames = 0; + sys_config->accept_non_directed_frames = 1; + sys_config->exclude_unicast_unencrypted = 0; + sys_config->disable_unicast_decryption = 1; + sys_config->exclude_multicast_unencrypted = 0; + sys_config->disable_multicast_decryption = 1; + if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B) + antenna = CFG_SYS_ANTENNA_BOTH; + sys_config->antenna_diversity = antenna; + sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */ + sys_config->dot11g_auto_detection = 0; + sys_config->enable_cts_to_self = 0; + sys_config->bt_coexist_collision_thr = 0; + sys_config->pass_noise_stats_to_host = 1; /* 1 -- fix for 256 */ + sys_config->silence_threshold = 0x1e; +} + +static int ipw_net_open(struct net_device *dev) +{ + IPW_DEBUG_INFO("dev->open\n"); + netif_start_queue(dev); + return 0; +} + +static int ipw_net_stop(struct net_device *dev) +{ + IPW_DEBUG_INFO("dev->close\n"); + netif_stop_queue(dev); + return 0; +} + +/* +todo: + +modify to send one tfd per fragment instead of using chunking. otherwise +we need to heavily modify the libipw_skb_to_txb. +*/ + +static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb, + int pri) +{ + struct libipw_hdr_3addrqos *hdr = (struct libipw_hdr_3addrqos *) + txb->fragments[0]->data; + int i = 0; + struct tfd_frame *tfd; +#ifdef CONFIG_IPW2200_QOS + int tx_id = ipw_get_tx_queue_number(priv, pri); + struct clx2_tx_queue *txq = &priv->txq[tx_id]; +#else + struct clx2_tx_queue *txq = &priv->txq[0]; +#endif + struct clx2_queue *q = &txq->q; + u8 id, hdr_len, unicast; + int fc; + + if (!(priv->status & STATUS_ASSOCIATED)) + goto drop; + + hdr_len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); + switch (priv->ieee->iw_mode) { + case IW_MODE_ADHOC: + unicast = !is_multicast_ether_addr(hdr->addr1); + id = ipw_find_station(priv, hdr->addr1); + if (id == IPW_INVALID_STATION) { + id = ipw_add_station(priv, hdr->addr1); + if (id == IPW_INVALID_STATION) { + IPW_WARNING("Attempt to send data to " + "invalid cell: %pM\n", + hdr->addr1); + goto drop; + } + } + break; + + case IW_MODE_INFRA: + default: + unicast = !is_multicast_ether_addr(hdr->addr3); + id = 0; + break; + } + + tfd = &txq->bd[q->first_empty]; + txq->txb[q->first_empty] = txb; + memset(tfd, 0, sizeof(*tfd)); + tfd->u.data.station_number = id; + + tfd->control_flags.message_type = TX_FRAME_TYPE; + tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK; + + tfd->u.data.cmd_id = DINO_CMD_TX; + tfd->u.data.len = cpu_to_le16(txb->payload_size); + + if (priv->assoc_request.ieee_mode == IPW_B_MODE) + tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK; + else + tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM; + + if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE) + tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE; + + fc = le16_to_cpu(hdr->frame_ctl); + hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS); + + memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len); + + if (likely(unicast)) + tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD; + + if (txb->encrypted && !priv->ieee->host_encrypt) { + switch (priv->ieee->sec.level) { + case SEC_LEVEL_3: + tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |= + cpu_to_le16(IEEE80211_FCTL_PROTECTED); + /* XXX: ACK flag must be set for CCMP even if it + * is a multicast/broadcast packet, because CCMP + * group communication encrypted by GTK is + * actually done by the AP. */ + if (!unicast) + tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD; + + tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP; + tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM; + tfd->u.data.key_index = 0; + tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE; + break; + case SEC_LEVEL_2: + tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |= + cpu_to_le16(IEEE80211_FCTL_PROTECTED); + tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP; + tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP; + tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE; + break; + case SEC_LEVEL_1: + tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |= + cpu_to_le16(IEEE80211_FCTL_PROTECTED); + tfd->u.data.key_index = priv->ieee->crypt_info.tx_keyidx; + if (priv->ieee->sec.key_sizes[priv->ieee->crypt_info.tx_keyidx] <= + 40) + tfd->u.data.key_index |= DCT_WEP_KEY_64Bit; + else + tfd->u.data.key_index |= DCT_WEP_KEY_128Bit; + break; + case SEC_LEVEL_0: + break; + default: + printk(KERN_ERR "Unknown security level %d\n", + priv->ieee->sec.level); + break; + } + } else + /* No hardware encryption */ + tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP; + +#ifdef CONFIG_IPW2200_QOS + if (fc & IEEE80211_STYPE_QOS_DATA) + ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data)); +#endif /* CONFIG_IPW2200_QOS */ + + /* payload */ + tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2), + txb->nr_frags)); + IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n", + txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks)); + for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) { + IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n", + i, le32_to_cpu(tfd->u.data.num_chunks), + txb->fragments[i]->len - hdr_len); + IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n", + i, tfd->u.data.num_chunks, + txb->fragments[i]->len - hdr_len); + printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len, + txb->fragments[i]->len - hdr_len); + + tfd->u.data.chunk_ptr[i] = + cpu_to_le32(pci_map_single + (priv->pci_dev, + txb->fragments[i]->data + hdr_len, + txb->fragments[i]->len - hdr_len, + PCI_DMA_TODEVICE)); + tfd->u.data.chunk_len[i] = + cpu_to_le16(txb->fragments[i]->len - hdr_len); + } + + if (i != txb->nr_frags) { + struct sk_buff *skb; + u16 remaining_bytes = 0; + int j; + + for (j = i; j < txb->nr_frags; j++) + remaining_bytes += txb->fragments[j]->len - hdr_len; + + printk(KERN_INFO "Trying to reallocate for %d bytes\n", + remaining_bytes); + skb = alloc_skb(remaining_bytes, GFP_ATOMIC); + if (skb != NULL) { + tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes); + for (j = i; j < txb->nr_frags; j++) { + int size = txb->fragments[j]->len - hdr_len; + + printk(KERN_INFO "Adding frag %d %d...\n", + j, size); + memcpy(skb_put(skb, size), + txb->fragments[j]->data + hdr_len, size); + } + dev_kfree_skb_any(txb->fragments[i]); + txb->fragments[i] = skb; + tfd->u.data.chunk_ptr[i] = + cpu_to_le32(pci_map_single + (priv->pci_dev, skb->data, + remaining_bytes, + PCI_DMA_TODEVICE)); + + le32_add_cpu(&tfd->u.data.num_chunks, 1); + } + } + + /* kick DMA */ + q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd); + ipw_write32(priv, q->reg_w, q->first_empty); + + if (ipw_tx_queue_space(q) < q->high_mark) + netif_stop_queue(priv->net_dev); + + return NETDEV_TX_OK; + + drop: + IPW_DEBUG_DROP("Silently dropping Tx packet.\n"); + libipw_txb_free(txb); + return NETDEV_TX_OK; +} + +static int ipw_net_is_queue_full(struct net_device *dev, int pri) +{ + struct ipw_priv *priv = libipw_priv(dev); +#ifdef CONFIG_IPW2200_QOS + int tx_id = ipw_get_tx_queue_number(priv, pri); + struct clx2_tx_queue *txq = &priv->txq[tx_id]; +#else + struct clx2_tx_queue *txq = &priv->txq[0]; +#endif /* CONFIG_IPW2200_QOS */ + + if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark) + return 1; + + return 0; +} + +#ifdef CONFIG_IPW2200_PROMISCUOUS +static void ipw_handle_promiscuous_tx(struct ipw_priv *priv, + struct libipw_txb *txb) +{ + struct libipw_rx_stats dummystats; + struct ieee80211_hdr *hdr; + u8 n; + u16 filter = priv->prom_priv->filter; + int hdr_only = 0; + + if (filter & IPW_PROM_NO_TX) + return; + + memset(&dummystats, 0, sizeof(dummystats)); + + /* Filtering of fragment chains is done agains the first fragment */ + hdr = (void *)txb->fragments[0]->data; + if (libipw_is_management(le16_to_cpu(hdr->frame_control))) { + if (filter & IPW_PROM_NO_MGMT) + return; + if (filter & IPW_PROM_MGMT_HEADER_ONLY) + hdr_only = 1; + } else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) { + if (filter & IPW_PROM_NO_CTL) + return; + if (filter & IPW_PROM_CTL_HEADER_ONLY) + hdr_only = 1; + } else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) { + if (filter & IPW_PROM_NO_DATA) + return; + if (filter & IPW_PROM_DATA_HEADER_ONLY) + hdr_only = 1; + } + + for(n=0; nnr_frags; ++n) { + struct sk_buff *src = txb->fragments[n]; + struct sk_buff *dst; + struct ieee80211_radiotap_header *rt_hdr; + int len; + + if (hdr_only) { + hdr = (void *)src->data; + len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control)); + } else + len = src->len; + + dst = alloc_skb(len + sizeof(*rt_hdr), GFP_ATOMIC); + if (!dst) + continue; + + rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr)); + + rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION; + rt_hdr->it_pad = 0; + rt_hdr->it_present = 0; /* after all, it's just an idea */ + rt_hdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL); + + *(__le16*)skb_put(dst, sizeof(u16)) = cpu_to_le16( + ieee80211chan2mhz(priv->channel)); + if (priv->channel > 14) /* 802.11a */ + *(__le16*)skb_put(dst, sizeof(u16)) = + cpu_to_le16(IEEE80211_CHAN_OFDM | + IEEE80211_CHAN_5GHZ); + else if (priv->ieee->mode == IEEE_B) /* 802.11b */ + *(__le16*)skb_put(dst, sizeof(u16)) = + cpu_to_le16(IEEE80211_CHAN_CCK | + IEEE80211_CHAN_2GHZ); + else /* 802.11g */ + *(__le16*)skb_put(dst, sizeof(u16)) = + cpu_to_le16(IEEE80211_CHAN_OFDM | + IEEE80211_CHAN_2GHZ); + + rt_hdr->it_len = cpu_to_le16(dst->len); + + skb_copy_from_linear_data(src, skb_put(dst, len), len); + + if (!libipw_rx(priv->prom_priv->ieee, dst, &dummystats)) + dev_kfree_skb_any(dst); + } +} +#endif + +static netdev_tx_t ipw_net_hard_start_xmit(struct libipw_txb *txb, + struct net_device *dev, int pri) +{ + struct ipw_priv *priv = libipw_priv(dev); + unsigned long flags; + netdev_tx_t ret; + + IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size); + spin_lock_irqsave(&priv->lock, flags); + +#ifdef CONFIG_IPW2200_PROMISCUOUS + if (rtap_iface && netif_running(priv->prom_net_dev)) + ipw_handle_promiscuous_tx(priv, txb); +#endif + + ret = ipw_tx_skb(priv, txb, pri); + if (ret == NETDEV_TX_OK) + __ipw_led_activity_on(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + return ret; +} + +static void ipw_net_set_multicast_list(struct net_device *dev) +{ + +} + +static int ipw_net_set_mac_address(struct net_device *dev, void *p) +{ + struct ipw_priv *priv = libipw_priv(dev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + mutex_lock(&priv->mutex); + priv->config |= CFG_CUSTOM_MAC; + memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); + printk(KERN_INFO "%s: Setting MAC to %pM\n", + priv->net_dev->name, priv->mac_addr); + queue_work(priv->workqueue, &priv->adapter_restart); + mutex_unlock(&priv->mutex); + return 0; +} + +static void ipw_ethtool_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct ipw_priv *p = libipw_priv(dev); + char vers[64]; + char date[32]; + u32 len; + + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + + len = sizeof(vers); + ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len); + len = sizeof(date); + ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len); + + snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)", + vers, date); + strcpy(info->bus_info, pci_name(p->pci_dev)); + info->eedump_len = IPW_EEPROM_IMAGE_SIZE; +} + +static u32 ipw_ethtool_get_link(struct net_device *dev) +{ + struct ipw_priv *priv = libipw_priv(dev); + return (priv->status & STATUS_ASSOCIATED) != 0; +} + +static int ipw_ethtool_get_eeprom_len(struct net_device *dev) +{ + return IPW_EEPROM_IMAGE_SIZE; +} + +static int ipw_ethtool_get_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 * bytes) +{ + struct ipw_priv *p = libipw_priv(dev); + + if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE) + return -EINVAL; + mutex_lock(&p->mutex); + memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len); + mutex_unlock(&p->mutex); + return 0; +} + +static int ipw_ethtool_set_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 * bytes) +{ + struct ipw_priv *p = libipw_priv(dev); + int i; + + if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE) + return -EINVAL; + mutex_lock(&p->mutex); + memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len); + for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++) + ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]); + mutex_unlock(&p->mutex); + return 0; +} + +static const struct ethtool_ops ipw_ethtool_ops = { + .get_link = ipw_ethtool_get_link, + .get_drvinfo = ipw_ethtool_get_drvinfo, + .get_eeprom_len = ipw_ethtool_get_eeprom_len, + .get_eeprom = ipw_ethtool_get_eeprom, + .set_eeprom = ipw_ethtool_set_eeprom, +}; + +static irqreturn_t ipw_isr(int irq, void *data) +{ + struct ipw_priv *priv = data; + u32 inta, inta_mask; + + if (!priv) + return IRQ_NONE; + + spin_lock(&priv->irq_lock); + + if (!(priv->status & STATUS_INT_ENABLED)) { + /* IRQ is disabled */ + goto none; + } + + inta = ipw_read32(priv, IPW_INTA_RW); + inta_mask = ipw_read32(priv, IPW_INTA_MASK_R); + + if (inta == 0xFFFFFFFF) { + /* Hardware disappeared */ + IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n"); + goto none; + } + + if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) { + /* Shared interrupt */ + goto none; + } + + /* tell the device to stop sending interrupts */ + __ipw_disable_interrupts(priv); + + /* ack current interrupts */ + inta &= (IPW_INTA_MASK_ALL & inta_mask); + ipw_write32(priv, IPW_INTA_RW, inta); + + /* Cache INTA value for our tasklet */ + priv->isr_inta = inta; + + tasklet_schedule(&priv->irq_tasklet); + + spin_unlock(&priv->irq_lock); + + return IRQ_HANDLED; + none: + spin_unlock(&priv->irq_lock); + return IRQ_NONE; +} + +static void ipw_rf_kill(void *adapter) +{ + struct ipw_priv *priv = adapter; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + + if (rf_kill_active(priv)) { + IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n"); + if (priv->workqueue) + queue_delayed_work(priv->workqueue, + &priv->rf_kill, 2 * HZ); + goto exit_unlock; + } + + /* RF Kill is now disabled, so bring the device back up */ + + if (!(priv->status & STATUS_RF_KILL_MASK)) { + IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting " + "device\n"); + + /* we can not do an adapter restart while inside an irq lock */ + queue_work(priv->workqueue, &priv->adapter_restart); + } else + IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still " + "enabled\n"); + + exit_unlock: + spin_unlock_irqrestore(&priv->lock, flags); +} + +static void ipw_bg_rf_kill(struct work_struct *work) +{ + struct ipw_priv *priv = + container_of(work, struct ipw_priv, rf_kill.work); + mutex_lock(&priv->mutex); + ipw_rf_kill(priv); + mutex_unlock(&priv->mutex); +} + +static void ipw_link_up(struct ipw_priv *priv) +{ + priv->last_seq_num = -1; + priv->last_frag_num = -1; + priv->last_packet_time = 0; + + netif_carrier_on(priv->net_dev); + + cancel_delayed_work(&priv->request_scan); + cancel_delayed_work(&priv->request_direct_scan); + cancel_delayed_work(&priv->request_passive_scan); + cancel_delayed_work(&priv->scan_event); + ipw_reset_stats(priv); + /* Ensure the rate is updated immediately */ + priv->last_rate = ipw_get_current_rate(priv); + ipw_gather_stats(priv); + ipw_led_link_up(priv); + notify_wx_assoc_event(priv); + + if (priv->config & CFG_BACKGROUND_SCAN) + queue_delayed_work(priv->workqueue, &priv->request_scan, HZ); +} + +static void ipw_bg_link_up(struct work_struct *work) +{ + struct ipw_priv *priv = + container_of(work, struct ipw_priv, link_up); + mutex_lock(&priv->mutex); + ipw_link_up(priv); + mutex_unlock(&priv->mutex); +} + +static void ipw_link_down(struct ipw_priv *priv) +{ + ipw_led_link_down(priv); + netif_carrier_off(priv->net_dev); + notify_wx_assoc_event(priv); + + /* Cancel any queued work ... */ + cancel_delayed_work(&priv->request_scan); + cancel_delayed_work(&priv->request_direct_scan); + cancel_delayed_work(&priv->request_passive_scan); + cancel_delayed_work(&priv->adhoc_check); + cancel_delayed_work(&priv->gather_stats); + + ipw_reset_stats(priv); + + if (!(priv->status & STATUS_EXIT_PENDING)) { + /* Queue up another scan... */ + queue_delayed_work(priv->workqueue, &priv->request_scan, 0); + } else + cancel_delayed_work(&priv->scan_event); +} + +static void ipw_bg_link_down(struct work_struct *work) +{ + struct ipw_priv *priv = + container_of(work, struct ipw_priv, link_down); + mutex_lock(&priv->mutex); + ipw_link_down(priv); + mutex_unlock(&priv->mutex); +} + +static int __devinit ipw_setup_deferred_work(struct ipw_priv *priv) +{ + int ret = 0; + + priv->workqueue = create_workqueue(DRV_NAME); + init_waitqueue_head(&priv->wait_command_queue); + init_waitqueue_head(&priv->wait_state); + + INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check); + INIT_WORK(&priv->associate, ipw_bg_associate); + INIT_WORK(&priv->disassociate, ipw_bg_disassociate); + INIT_WORK(&priv->system_config, ipw_system_config); + INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish); + INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart); + INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill); + INIT_WORK(&priv->up, ipw_bg_up); + INIT_WORK(&priv->down, ipw_bg_down); + INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan); + INIT_DELAYED_WORK(&priv->request_direct_scan, ipw_request_direct_scan); + INIT_DELAYED_WORK(&priv->request_passive_scan, ipw_request_passive_scan); + INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event); + INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats); + INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan); + INIT_WORK(&priv->roam, ipw_bg_roam); + INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check); + INIT_WORK(&priv->link_up, ipw_bg_link_up); + INIT_WORK(&priv->link_down, ipw_bg_link_down); + INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on); + INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off); + INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off); + INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network); + +#ifdef CONFIG_IPW2200_QOS + INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate); +#endif /* CONFIG_IPW2200_QOS */ + + tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) + ipw_irq_tasklet, (unsigned long)priv); + + return ret; +} + +static void shim__set_security(struct net_device *dev, + struct libipw_security *sec) +{ + struct ipw_priv *priv = libipw_priv(dev); + int i; + for (i = 0; i < 4; i++) { + if (sec->flags & (1 << i)) { + priv->ieee->sec.encode_alg[i] = sec->encode_alg[i]; + priv->ieee->sec.key_sizes[i] = sec->key_sizes[i]; + if (sec->key_sizes[i] == 0) + priv->ieee->sec.flags &= ~(1 << i); + else { + memcpy(priv->ieee->sec.keys[i], sec->keys[i], + sec->key_sizes[i]); + priv->ieee->sec.flags |= (1 << i); + } + priv->status |= STATUS_SECURITY_UPDATED; + } else if (sec->level != SEC_LEVEL_1) + priv->ieee->sec.flags &= ~(1 << i); + } + + if (sec->flags & SEC_ACTIVE_KEY) { + if (sec->active_key <= 3) { + priv->ieee->sec.active_key = sec->active_key; + priv->ieee->sec.flags |= SEC_ACTIVE_KEY; + } else + priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY; + priv->status |= STATUS_SECURITY_UPDATED; + } else + priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY; + + if ((sec->flags & SEC_AUTH_MODE) && + (priv->ieee->sec.auth_mode != sec->auth_mode)) { + priv->ieee->sec.auth_mode = sec->auth_mode; + priv->ieee->sec.flags |= SEC_AUTH_MODE; + if (sec->auth_mode == WLAN_AUTH_SHARED_KEY) + priv->capability |= CAP_SHARED_KEY; + else + priv->capability &= ~CAP_SHARED_KEY; + priv->status |= STATUS_SECURITY_UPDATED; + } + + if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) { + priv->ieee->sec.flags |= SEC_ENABLED; + priv->ieee->sec.enabled = sec->enabled; + priv->status |= STATUS_SECURITY_UPDATED; + if (sec->enabled) + priv->capability |= CAP_PRIVACY_ON; + else + priv->capability &= ~CAP_PRIVACY_ON; + } + + if (sec->flags & SEC_ENCRYPT) + priv->ieee->sec.encrypt = sec->encrypt; + + if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) { + priv->ieee->sec.level = sec->level; + priv->ieee->sec.flags |= SEC_LEVEL; + priv->status |= STATUS_SECURITY_UPDATED; + } + + if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT)) + ipw_set_hwcrypto_keys(priv); + + /* To match current functionality of ipw2100 (which works well w/ + * various supplicants, we don't force a disassociate if the + * privacy capability changes ... */ +#if 0 + if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) && + (((priv->assoc_request.capability & + cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && !sec->enabled) || + (!(priv->assoc_request.capability & + cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && sec->enabled))) { + IPW_DEBUG_ASSOC("Disassociating due to capability " + "change.\n"); + ipw_disassociate(priv); + } +#endif +} + +static int init_supported_rates(struct ipw_priv *priv, + struct ipw_supported_rates *rates) +{ + /* TODO: Mask out rates based on priv->rates_mask */ + + memset(rates, 0, sizeof(*rates)); + /* configure supported rates */ + switch (priv->ieee->freq_band) { + case LIBIPW_52GHZ_BAND: + rates->ieee_mode = IPW_A_MODE; + rates->purpose = IPW_RATE_CAPABILITIES; + ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION, + LIBIPW_OFDM_DEFAULT_RATES_MASK); + break; + + default: /* Mixed or 2.4Ghz */ + rates->ieee_mode = IPW_G_MODE; + rates->purpose = IPW_RATE_CAPABILITIES; + ipw_add_cck_scan_rates(rates, LIBIPW_CCK_MODULATION, + LIBIPW_CCK_DEFAULT_RATES_MASK); + if (priv->ieee->modulation & LIBIPW_OFDM_MODULATION) { + ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION, + LIBIPW_OFDM_DEFAULT_RATES_MASK); + } + break; + } + + return 0; +} + +static int ipw_config(struct ipw_priv *priv) +{ + /* This is only called from ipw_up, which resets/reloads the firmware + so, we don't need to first disable the card before we configure + it */ + if (ipw_set_tx_power(priv)) + goto error; + + /* initialize adapter address */ + if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr)) + goto error; + + /* set basic system config settings */ + init_sys_config(&priv->sys_config); + + /* Support Bluetooth if we have BT h/w on board, and user wants to. + * Does not support BT priority yet (don't abort or defer our Tx) */ + if (bt_coexist) { + unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY]; + + if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG) + priv->sys_config.bt_coexistence + |= CFG_BT_COEXISTENCE_SIGNAL_CHNL; + if (bt_caps & EEPROM_SKU_CAP_BT_OOB) + priv->sys_config.bt_coexistence + |= CFG_BT_COEXISTENCE_OOB; + } + +#ifdef CONFIG_IPW2200_PROMISCUOUS + if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) { + priv->sys_config.accept_all_data_frames = 1; + priv->sys_config.accept_non_directed_frames = 1; + priv->sys_config.accept_all_mgmt_bcpr = 1; + priv->sys_config.accept_all_mgmt_frames = 1; + } +#endif + + if (priv->ieee->iw_mode == IW_MODE_ADHOC) + priv->sys_config.answer_broadcast_ssid_probe = 1; + else + priv->sys_config.answer_broadcast_ssid_probe = 0; + + if (ipw_send_system_config(priv)) + goto error; + + init_supported_rates(priv, &priv->rates); + if (ipw_send_supported_rates(priv, &priv->rates)) + goto error; + + /* Set request-to-send threshold */ + if (priv->rts_threshold) { + if (ipw_send_rts_threshold(priv, priv->rts_threshold)) + goto error; + } +#ifdef CONFIG_IPW2200_QOS + IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n"); + ipw_qos_activate(priv, NULL); +#endif /* CONFIG_IPW2200_QOS */ + + if (ipw_set_random_seed(priv)) + goto error; + + /* final state transition to the RUN state */ + if (ipw_send_host_complete(priv)) + goto error; + + priv->status |= STATUS_INIT; + + ipw_led_init(priv); + ipw_led_radio_on(priv); + priv->notif_missed_beacons = 0; + + /* Set hardware WEP key if it is configured. */ + if ((priv->capability & CAP_PRIVACY_ON) && + (priv->ieee->sec.level == SEC_LEVEL_1) && + !(priv->ieee->host_encrypt || priv->ieee->host_decrypt)) + ipw_set_hwcrypto_keys(priv); + + return 0; + + error: + return -EIO; +} + +/* + * NOTE: + * + * These tables have been tested in conjunction with the + * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters. + * + * Altering this values, using it on other hardware, or in geographies + * not intended for resale of the above mentioned Intel adapters has + * not been tested. + * + * Remember to update the table in README.ipw2200 when changing this + * table. + * + */ +static const struct libipw_geo ipw_geos[] = { + { /* Restricted */ + "---", + .bg_channels = 11, + .bg = {{2412, 1}, {2417, 2}, {2422, 3}, + {2427, 4}, {2432, 5}, {2437, 6}, + {2442, 7}, {2447, 8}, {2452, 9}, + {2457, 10}, {2462, 11}}, + }, + + { /* Custom US/Canada */ + "ZZF", + .bg_channels = 11, + .bg = {{2412, 1}, {2417, 2}, {2422, 3}, + {2427, 4}, {2432, 5}, {2437, 6}, + {2442, 7}, {2447, 8}, {2452, 9}, + {2457, 10}, {2462, 11}}, + .a_channels = 8, + .a = {{5180, 36}, + {5200, 40}, + {5220, 44}, + {5240, 48}, + {5260, 52, LIBIPW_CH_PASSIVE_ONLY}, + {5280, 56, LIBIPW_CH_PASSIVE_ONLY}, + {5300, 60, LIBIPW_CH_PASSIVE_ONLY}, + {5320, 64, LIBIPW_CH_PASSIVE_ONLY}}, + }, + + { /* Rest of World */ + "ZZD", + .bg_channels = 13, + .bg = {{2412, 1}, {2417, 2}, {2422, 3}, + {2427, 4}, {2432, 5}, {2437, 6}, + {2442, 7}, {2447, 8}, {2452, 9}, + {2457, 10}, {2462, 11}, {2467, 12}, + {2472, 13}}, + }, + + { /* Custom USA & Europe & High */ + "ZZA", + .bg_channels = 11, + .bg = {{2412, 1}, {2417, 2}, {2422, 3}, + {2427, 4}, {2432, 5}, {2437, 6}, + {2442, 7}, {2447, 8}, {2452, 9}, + {2457, 10}, {2462, 11}}, + .a_channels = 13, + .a = {{5180, 36}, + {5200, 40}, + {5220, 44}, + {5240, 48}, + {5260, 52, LIBIPW_CH_PASSIVE_ONLY}, + {5280, 56, LIBIPW_CH_PASSIVE_ONLY}, + {5300, 60, LIBIPW_CH_PASSIVE_ONLY}, + {5320, 64, LIBIPW_CH_PASSIVE_ONLY}, + {5745, 149}, + {5765, 153}, + {5785, 157}, + {5805, 161}, + {5825, 165}}, + }, + + { /* Custom NA & Europe */ + "ZZB", + .bg_channels = 11, + .bg = {{2412, 1}, {2417, 2}, {2422, 3}, + {2427, 4}, {2432, 5}, {2437, 6}, + {2442, 7}, {2447, 8}, {2452, 9}, + {2457, 10}, {2462, 11}}, + .a_channels = 13, + .a = {{5180, 36}, + {5200, 40}, + {5220, 44}, + {5240, 48}, + {5260, 52, LIBIPW_CH_PASSIVE_ONLY}, + {5280, 56, LIBIPW_CH_PASSIVE_ONLY}, + {5300, 60, LIBIPW_CH_PASSIVE_ONLY}, + {5320, 64, LIBIPW_CH_PASSIVE_ONLY}, + {5745, 149, LIBIPW_CH_PASSIVE_ONLY}, + {5765, 153, LIBIPW_CH_PASSIVE_ONLY}, + {5785, 157, LIBIPW_CH_PASSIVE_ONLY}, + {5805, 161, LIBIPW_CH_PASSIVE_ONLY}, + {5825, 165, LIBIPW_CH_PASSIVE_ONLY}}, + }, + + { /* Custom Japan */ + "ZZC", + .bg_channels = 11, + .bg = {{2412, 1}, {2417, 2}, {2422, 3}, + {2427, 4}, {2432, 5}, {2437, 6}, + {2442, 7}, {2447, 8}, {2452, 9}, + {2457, 10}, {2462, 11}}, + .a_channels = 4, + .a = {{5170, 34}, {5190, 38}, + {5210, 42}, {5230, 46}}, + }, + + { /* Custom */ + "ZZM", + .bg_channels = 11, + .bg = {{2412, 1}, {2417, 2}, {2422, 3}, + {2427, 4}, {2432, 5}, {2437, 6}, + {2442, 7}, {2447, 8}, {2452, 9}, + {2457, 10}, {2462, 11}}, + }, + + { /* Europe */ + "ZZE", + .bg_channels = 13, + .bg = {{2412, 1}, {2417, 2}, {2422, 3}, + {2427, 4}, {2432, 5}, {2437, 6}, + {2442, 7}, {2447, 8}, {2452, 9}, + {2457, 10}, {2462, 11}, {2467, 12}, + {2472, 13}}, + .a_channels = 19, + .a = {{5180, 36}, + {5200, 40}, + {5220, 44}, + {5240, 48}, + {5260, 52, LIBIPW_CH_PASSIVE_ONLY}, + {5280, 56, LIBIPW_CH_PASSIVE_ONLY}, + {5300, 60, LIBIPW_CH_PASSIVE_ONLY}, + {5320, 64, LIBIPW_CH_PASSIVE_ONLY}, + {5500, 100, LIBIPW_CH_PASSIVE_ONLY}, + {5520, 104, LIBIPW_CH_PASSIVE_ONLY}, + {5540, 108, LIBIPW_CH_PASSIVE_ONLY}, + {5560, 112, LIBIPW_CH_PASSIVE_ONLY}, + {5580, 116, LIBIPW_CH_PASSIVE_ONLY}, + {5600, 120, LIBIPW_CH_PASSIVE_ONLY}, + {5620, 124, LIBIPW_CH_PASSIVE_ONLY}, + {5640, 128, LIBIPW_CH_PASSIVE_ONLY}, + {5660, 132, LIBIPW_CH_PASSIVE_ONLY}, + {5680, 136, LIBIPW_CH_PASSIVE_ONLY}, + {5700, 140, LIBIPW_CH_PASSIVE_ONLY}}, + }, + + { /* Custom Japan */ + "ZZJ", + .bg_channels = 14, + .bg = {{2412, 1}, {2417, 2}, {2422, 3}, + {2427, 4}, {2432, 5}, {2437, 6}, + {2442, 7}, {2447, 8}, {2452, 9}, + {2457, 10}, {2462, 11}, {2467, 12}, + {2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY}}, + .a_channels = 4, + .a = {{5170, 34}, {5190, 38}, + {5210, 42}, {5230, 46}}, + }, + + { /* Rest of World */ + "ZZR", + .bg_channels = 14, + .bg = {{2412, 1}, {2417, 2}, {2422, 3}, + {2427, 4}, {2432, 5}, {2437, 6}, + {2442, 7}, {2447, 8}, {2452, 9}, + {2457, 10}, {2462, 11}, {2467, 12}, + {2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY | + LIBIPW_CH_PASSIVE_ONLY}}, + }, + + { /* High Band */ + "ZZH", + .bg_channels = 13, + .bg = {{2412, 1}, {2417, 2}, {2422, 3}, + {2427, 4}, {2432, 5}, {2437, 6}, + {2442, 7}, {2447, 8}, {2452, 9}, + {2457, 10}, {2462, 11}, + {2467, 12, LIBIPW_CH_PASSIVE_ONLY}, + {2472, 13, LIBIPW_CH_PASSIVE_ONLY}}, + .a_channels = 4, + .a = {{5745, 149}, {5765, 153}, + {5785, 157}, {5805, 161}}, + }, + + { /* Custom Europe */ + "ZZG", + .bg_channels = 13, + .bg = {{2412, 1}, {2417, 2}, {2422, 3}, + {2427, 4}, {2432, 5}, {2437, 6}, + {2442, 7}, {2447, 8}, {2452, 9}, + {2457, 10}, {2462, 11}, + {2467, 12}, {2472, 13}}, + .a_channels = 4, + .a = {{5180, 36}, {5200, 40}, + {5220, 44}, {5240, 48}}, + }, + + { /* Europe */ + "ZZK", + .bg_channels = 13, + .bg = {{2412, 1}, {2417, 2}, {2422, 3}, + {2427, 4}, {2432, 5}, {2437, 6}, + {2442, 7}, {2447, 8}, {2452, 9}, + {2457, 10}, {2462, 11}, + {2467, 12, LIBIPW_CH_PASSIVE_ONLY}, + {2472, 13, LIBIPW_CH_PASSIVE_ONLY}}, + .a_channels = 24, + .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY}, + {5200, 40, LIBIPW_CH_PASSIVE_ONLY}, + {5220, 44, LIBIPW_CH_PASSIVE_ONLY}, + {5240, 48, LIBIPW_CH_PASSIVE_ONLY}, + {5260, 52, LIBIPW_CH_PASSIVE_ONLY}, + {5280, 56, LIBIPW_CH_PASSIVE_ONLY}, + {5300, 60, LIBIPW_CH_PASSIVE_ONLY}, + {5320, 64, LIBIPW_CH_PASSIVE_ONLY}, + {5500, 100, LIBIPW_CH_PASSIVE_ONLY}, + {5520, 104, LIBIPW_CH_PASSIVE_ONLY}, + {5540, 108, LIBIPW_CH_PASSIVE_ONLY}, + {5560, 112, LIBIPW_CH_PASSIVE_ONLY}, + {5580, 116, LIBIPW_CH_PASSIVE_ONLY}, + {5600, 120, LIBIPW_CH_PASSIVE_ONLY}, + {5620, 124, LIBIPW_CH_PASSIVE_ONLY}, + {5640, 128, LIBIPW_CH_PASSIVE_ONLY}, + {5660, 132, LIBIPW_CH_PASSIVE_ONLY}, + {5680, 136, LIBIPW_CH_PASSIVE_ONLY}, + {5700, 140, LIBIPW_CH_PASSIVE_ONLY}, + {5745, 149, LIBIPW_CH_PASSIVE_ONLY}, + {5765, 153, LIBIPW_CH_PASSIVE_ONLY}, + {5785, 157, LIBIPW_CH_PASSIVE_ONLY}, + {5805, 161, LIBIPW_CH_PASSIVE_ONLY}, + {5825, 165, LIBIPW_CH_PASSIVE_ONLY}}, + }, + + { /* Europe */ + "ZZL", + .bg_channels = 11, + .bg = {{2412, 1}, {2417, 2}, {2422, 3}, + {2427, 4}, {2432, 5}, {2437, 6}, + {2442, 7}, {2447, 8}, {2452, 9}, + {2457, 10}, {2462, 11}}, + .a_channels = 13, + .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY}, + {5200, 40, LIBIPW_CH_PASSIVE_ONLY}, + {5220, 44, LIBIPW_CH_PASSIVE_ONLY}, + {5240, 48, LIBIPW_CH_PASSIVE_ONLY}, + {5260, 52, LIBIPW_CH_PASSIVE_ONLY}, + {5280, 56, LIBIPW_CH_PASSIVE_ONLY}, + {5300, 60, LIBIPW_CH_PASSIVE_ONLY}, + {5320, 64, LIBIPW_CH_PASSIVE_ONLY}, + {5745, 149, LIBIPW_CH_PASSIVE_ONLY}, + {5765, 153, LIBIPW_CH_PASSIVE_ONLY}, + {5785, 157, LIBIPW_CH_PASSIVE_ONLY}, + {5805, 161, LIBIPW_CH_PASSIVE_ONLY}, + {5825, 165, LIBIPW_CH_PASSIVE_ONLY}}, + } +}; + +#define MAX_HW_RESTARTS 5 +static int ipw_up(struct ipw_priv *priv) +{ + int rc, i, j; + + /* Age scan list entries found before suspend */ + if (priv->suspend_time) { + libipw_networks_age(priv->ieee, priv->suspend_time); + priv->suspend_time = 0; + } + + if (priv->status & STATUS_EXIT_PENDING) + return -EIO; + + if (cmdlog && !priv->cmdlog) { + priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog), + GFP_KERNEL); + if (priv->cmdlog == NULL) { + IPW_ERROR("Error allocating %d command log entries.\n", + cmdlog); + return -ENOMEM; + } else { + priv->cmdlog_len = cmdlog; + } + } + + for (i = 0; i < MAX_HW_RESTARTS; i++) { + /* Load the microcode, firmware, and eeprom. + * Also start the clocks. */ + rc = ipw_load(priv); + if (rc) { + IPW_ERROR("Unable to load firmware: %d\n", rc); + return rc; + } + + ipw_init_ordinals(priv); + if (!(priv->config & CFG_CUSTOM_MAC)) + eeprom_parse_mac(priv, priv->mac_addr); + memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN); + memcpy(priv->net_dev->perm_addr, priv->mac_addr, ETH_ALEN); + + for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) { + if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE], + ipw_geos[j].name, 3)) + break; + } + if (j == ARRAY_SIZE(ipw_geos)) { + IPW_WARNING("SKU [%c%c%c] not recognized.\n", + priv->eeprom[EEPROM_COUNTRY_CODE + 0], + priv->eeprom[EEPROM_COUNTRY_CODE + 1], + priv->eeprom[EEPROM_COUNTRY_CODE + 2]); + j = 0; + } + if (libipw_set_geo(priv->ieee, &ipw_geos[j])) { + IPW_WARNING("Could not set geography."); + return 0; + } + + if (priv->status & STATUS_RF_KILL_SW) { + IPW_WARNING("Radio disabled by module parameter.\n"); + return 0; + } else if (rf_kill_active(priv)) { + IPW_WARNING("Radio Frequency Kill Switch is On:\n" + "Kill switch must be turned off for " + "wireless networking to work.\n"); + queue_delayed_work(priv->workqueue, &priv->rf_kill, + 2 * HZ); + return 0; + } + + rc = ipw_config(priv); + if (!rc) { + IPW_DEBUG_INFO("Configured device on count %i\n", i); + + /* If configure to try and auto-associate, kick + * off a scan. */ + queue_delayed_work(priv->workqueue, + &priv->request_scan, 0); + + return 0; + } + + IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc); + IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n", + i, MAX_HW_RESTARTS); + + /* We had an error bringing up the hardware, so take it + * all the way back down so we can try again */ + ipw_down(priv); + } + + /* tried to restart and config the device for as long as our + * patience could withstand */ + IPW_ERROR("Unable to initialize device after %d attempts.\n", i); + + return -EIO; +} + +static void ipw_bg_up(struct work_struct *work) +{ + struct ipw_priv *priv = + container_of(work, struct ipw_priv, up); + mutex_lock(&priv->mutex); + ipw_up(priv); + mutex_unlock(&priv->mutex); +} + +static void ipw_deinit(struct ipw_priv *priv) +{ + int i; + + if (priv->status & STATUS_SCANNING) { + IPW_DEBUG_INFO("Aborting scan during shutdown.\n"); + ipw_abort_scan(priv); + } + + if (priv->status & STATUS_ASSOCIATED) { + IPW_DEBUG_INFO("Disassociating during shutdown.\n"); + ipw_disassociate(priv); + } + + ipw_led_shutdown(priv); + + /* Wait up to 1s for status to change to not scanning and not + * associated (disassociation can take a while for a ful 802.11 + * exchange */ + for (i = 1000; i && (priv->status & + (STATUS_DISASSOCIATING | + STATUS_ASSOCIATED | STATUS_SCANNING)); i--) + udelay(10); + + if (priv->status & (STATUS_DISASSOCIATING | + STATUS_ASSOCIATED | STATUS_SCANNING)) + IPW_DEBUG_INFO("Still associated or scanning...\n"); + else + IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i); + + /* Attempt to disable the card */ + ipw_send_card_disable(priv, 0); + + priv->status &= ~STATUS_INIT; +} + +static void ipw_down(struct ipw_priv *priv) +{ + int exit_pending = priv->status & STATUS_EXIT_PENDING; + + priv->status |= STATUS_EXIT_PENDING; + + if (ipw_is_init(priv)) + ipw_deinit(priv); + + /* Wipe out the EXIT_PENDING status bit if we are not actually + * exiting the module */ + if (!exit_pending) + priv->status &= ~STATUS_EXIT_PENDING; + + /* tell the device to stop sending interrupts */ + ipw_disable_interrupts(priv); + + /* Clear all bits but the RF Kill */ + priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING; + netif_carrier_off(priv->net_dev); + + ipw_stop_nic(priv); + + ipw_led_radio_off(priv); +} + +static void ipw_bg_down(struct work_struct *work) +{ + struct ipw_priv *priv = + container_of(work, struct ipw_priv, down); + mutex_lock(&priv->mutex); + ipw_down(priv); + mutex_unlock(&priv->mutex); +} + +/* Called by register_netdev() */ +static int ipw_net_init(struct net_device *dev) +{ + int i, rc = 0; + struct ipw_priv *priv = libipw_priv(dev); + const struct libipw_geo *geo = libipw_get_geo(priv->ieee); + struct wireless_dev *wdev = &priv->ieee->wdev; + mutex_lock(&priv->mutex); + + if (ipw_up(priv)) { + rc = -EIO; + goto out; + } + + memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN); + + /* fill-out priv->ieee->bg_band */ + if (geo->bg_channels) { + struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band; + + bg_band->band = IEEE80211_BAND_2GHZ; + bg_band->n_channels = geo->bg_channels; + bg_band->channels = + kzalloc(geo->bg_channels * + sizeof(struct ieee80211_channel), GFP_KERNEL); + /* translate geo->bg to bg_band.channels */ + for (i = 0; i < geo->bg_channels; i++) { + bg_band->channels[i].band = IEEE80211_BAND_2GHZ; + bg_band->channels[i].center_freq = geo->bg[i].freq; + bg_band->channels[i].hw_value = geo->bg[i].channel; + bg_band->channels[i].max_power = geo->bg[i].max_power; + if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY) + bg_band->channels[i].flags |= + IEEE80211_CHAN_PASSIVE_SCAN; + if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS) + bg_band->channels[i].flags |= + IEEE80211_CHAN_NO_IBSS; + if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT) + bg_band->channels[i].flags |= + IEEE80211_CHAN_RADAR; + /* No equivalent for LIBIPW_CH_80211H_RULES, + LIBIPW_CH_UNIFORM_SPREADING, or + LIBIPW_CH_B_ONLY... */ + } + /* point at bitrate info */ + bg_band->bitrates = ipw2200_bg_rates; + bg_band->n_bitrates = ipw2200_num_bg_rates; + + wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band; + } + + /* fill-out priv->ieee->a_band */ + if (geo->a_channels) { + struct ieee80211_supported_band *a_band = &priv->ieee->a_band; + + a_band->band = IEEE80211_BAND_5GHZ; + a_band->n_channels = geo->a_channels; + a_band->channels = + kzalloc(geo->a_channels * + sizeof(struct ieee80211_channel), GFP_KERNEL); + /* translate geo->bg to a_band.channels */ + for (i = 0; i < geo->a_channels; i++) { + a_band->channels[i].band = IEEE80211_BAND_2GHZ; + a_band->channels[i].center_freq = geo->a[i].freq; + a_band->channels[i].hw_value = geo->a[i].channel; + a_band->channels[i].max_power = geo->a[i].max_power; + if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY) + a_band->channels[i].flags |= + IEEE80211_CHAN_PASSIVE_SCAN; + if (geo->a[i].flags & LIBIPW_CH_NO_IBSS) + a_band->channels[i].flags |= + IEEE80211_CHAN_NO_IBSS; + if (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT) + a_band->channels[i].flags |= + IEEE80211_CHAN_RADAR; + /* No equivalent for LIBIPW_CH_80211H_RULES, + LIBIPW_CH_UNIFORM_SPREADING, or + LIBIPW_CH_B_ONLY... */ + } + /* point at bitrate info */ + a_band->bitrates = ipw2200_a_rates; + a_band->n_bitrates = ipw2200_num_a_rates; + + wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band; + } + + set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev); + + /* With that information in place, we can now register the wiphy... */ + if (wiphy_register(wdev->wiphy)) { + rc = -EIO; + goto out; + } + +out: + mutex_unlock(&priv->mutex); + return rc; +} + +/* PCI driver stuff */ +static DEFINE_PCI_DEVICE_TABLE(card_ids) = { + {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0}, + {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0}, + {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0}, + {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0}, + {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0}, + {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0}, + {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0}, + {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0}, + {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0}, + {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0}, + {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0}, + {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0}, + {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0}, + {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0}, + {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0}, + {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0}, + {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0}, + {PCI_VDEVICE(INTEL, 0x104f), 0}, + {PCI_VDEVICE(INTEL, 0x4220), 0}, /* BG */ + {PCI_VDEVICE(INTEL, 0x4221), 0}, /* BG */ + {PCI_VDEVICE(INTEL, 0x4223), 0}, /* ABG */ + {PCI_VDEVICE(INTEL, 0x4224), 0}, /* ABG */ + + /* required last entry */ + {0,} +}; + +MODULE_DEVICE_TABLE(pci, card_ids); + +static struct attribute *ipw_sysfs_entries[] = { + &dev_attr_rf_kill.attr, + &dev_attr_direct_dword.attr, + &dev_attr_indirect_byte.attr, + &dev_attr_indirect_dword.attr, + &dev_attr_mem_gpio_reg.attr, + &dev_attr_command_event_reg.attr, + &dev_attr_nic_type.attr, + &dev_attr_status.attr, + &dev_attr_cfg.attr, + &dev_attr_error.attr, + &dev_attr_event_log.attr, + &dev_attr_cmd_log.attr, + &dev_attr_eeprom_delay.attr, + &dev_attr_ucode_version.attr, + &dev_attr_rtc.attr, + &dev_attr_scan_age.attr, + &dev_attr_led.attr, + &dev_attr_speed_scan.attr, + &dev_attr_net_stats.attr, + &dev_attr_channels.attr, +#ifdef CONFIG_IPW2200_PROMISCUOUS + &dev_attr_rtap_iface.attr, + &dev_attr_rtap_filter.attr, +#endif + NULL +}; + +static struct attribute_group ipw_attribute_group = { + .name = NULL, /* put in device directory */ + .attrs = ipw_sysfs_entries, +}; + +#ifdef CONFIG_IPW2200_PROMISCUOUS +static int ipw_prom_open(struct net_device *dev) +{ + struct ipw_prom_priv *prom_priv = libipw_priv(dev); + struct ipw_priv *priv = prom_priv->priv; + + IPW_DEBUG_INFO("prom dev->open\n"); + netif_carrier_off(dev); + + if (priv->ieee->iw_mode != IW_MODE_MONITOR) { + priv->sys_config.accept_all_data_frames = 1; + priv->sys_config.accept_non_directed_frames = 1; + priv->sys_config.accept_all_mgmt_bcpr = 1; + priv->sys_config.accept_all_mgmt_frames = 1; + + ipw_send_system_config(priv); + } + + return 0; +} + +static int ipw_prom_stop(struct net_device *dev) +{ + struct ipw_prom_priv *prom_priv = libipw_priv(dev); + struct ipw_priv *priv = prom_priv->priv; + + IPW_DEBUG_INFO("prom dev->stop\n"); + + if (priv->ieee->iw_mode != IW_MODE_MONITOR) { + priv->sys_config.accept_all_data_frames = 0; + priv->sys_config.accept_non_directed_frames = 0; + priv->sys_config.accept_all_mgmt_bcpr = 0; + priv->sys_config.accept_all_mgmt_frames = 0; + + ipw_send_system_config(priv); + } + + return 0; +} + +static netdev_tx_t ipw_prom_hard_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + IPW_DEBUG_INFO("prom dev->xmit\n"); + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) +static const struct net_device_ops ipw_prom_netdev_ops = { + .ndo_open = ipw_prom_open, + .ndo_stop = ipw_prom_stop, + .ndo_start_xmit = ipw_prom_hard_start_xmit, + .ndo_change_mtu = libipw_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; +#endif + +static int ipw_prom_alloc(struct ipw_priv *priv) +{ + int rc = 0; + + if (priv->prom_net_dev) + return -EPERM; + + priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv), 1); + if (priv->prom_net_dev == NULL) + return -ENOMEM; + + priv->prom_priv = libipw_priv(priv->prom_net_dev); + priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev); + priv->prom_priv->priv = priv; + + strcpy(priv->prom_net_dev->name, "rtap%d"); + memcpy(priv->prom_net_dev->dev_addr, priv->mac_addr, ETH_ALEN); + + priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + priv->prom_net_dev->netdev_ops = &ipw_prom_netdev_ops; +#else + priv->prom_net_dev->open = ipw_prom_open; + priv->prom_net_dev->stop = ipw_prom_stop; + priv->prom_net_dev->hard_start_xmit = ipw_prom_hard_start_xmit; +#endif + + priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR; + SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev); + + rc = register_netdev(priv->prom_net_dev); + if (rc) { + free_ieee80211(priv->prom_net_dev, 1); + priv->prom_net_dev = NULL; + return rc; + } + + return 0; +} + +static void ipw_prom_free(struct ipw_priv *priv) +{ + if (!priv->prom_net_dev) + return; + + unregister_netdev(priv->prom_net_dev); + free_ieee80211(priv->prom_net_dev, 1); + + priv->prom_net_dev = NULL; +} + +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) +static const struct net_device_ops ipw_netdev_ops = { + .ndo_init = ipw_net_init, + .ndo_open = ipw_net_open, + .ndo_stop = ipw_net_stop, + .ndo_set_multicast_list = ipw_net_set_multicast_list, + .ndo_set_mac_address = ipw_net_set_mac_address, + .ndo_start_xmit = libipw_xmit, + .ndo_change_mtu = libipw_change_mtu, + .ndo_validate_addr = eth_validate_addr, +}; +#endif + +static int __devinit ipw_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int err = 0; + struct net_device *net_dev; + void __iomem *base; + u32 length, val; + struct ipw_priv *priv; + int i; + + net_dev = alloc_ieee80211(sizeof(struct ipw_priv), 0); + if (net_dev == NULL) { + err = -ENOMEM; + goto out; + } + + priv = libipw_priv(net_dev); + priv->ieee = netdev_priv(net_dev); + + priv->net_dev = net_dev; + priv->pci_dev = pdev; + ipw_debug_level = debug; + spin_lock_init(&priv->irq_lock); + spin_lock_init(&priv->lock); + for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) + INIT_LIST_HEAD(&priv->ibss_mac_hash[i]); + + mutex_init(&priv->mutex); + if (pci_enable_device(pdev)) { + err = -ENODEV; + goto out_free_ieee80211; + } + + pci_set_master(pdev); + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (!err) + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n"); + goto out_pci_disable_device; + } + + pci_set_drvdata(pdev, priv); + + err = pci_request_regions(pdev, DRV_NAME); + if (err) + goto out_pci_disable_device; + + /* We disable the RETRY_TIMEOUT register (0x41) to keep + * PCI Tx retries from interfering with C3 CPU state */ + pci_read_config_dword(pdev, 0x40, &val); + if ((val & 0x0000ff00) != 0) + pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); + + length = pci_resource_len(pdev, 0); + priv->hw_len = length; + + base = pci_ioremap_bar(pdev, 0); + if (!base) { + err = -ENODEV; + goto out_pci_release_regions; + } + + priv->hw_base = base; + IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length); + IPW_DEBUG_INFO("pci_resource_base = %p\n", base); + + err = ipw_setup_deferred_work(priv); + if (err) { + IPW_ERROR("Unable to setup deferred work\n"); + goto out_iounmap; + } + + ipw_sw_reset(priv, 1); + + err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv); + if (err) { + IPW_ERROR("Error allocating IRQ %d\n", pdev->irq); + goto out_destroy_workqueue; + } + + SET_NETDEV_DEV(net_dev, &pdev->dev); + + mutex_lock(&priv->mutex); + + priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit; + priv->ieee->set_security = shim__set_security; + priv->ieee->is_queue_full = ipw_net_is_queue_full; + +#ifdef CONFIG_IPW2200_QOS + priv->ieee->is_qos_active = ipw_is_qos_active; + priv->ieee->handle_probe_response = ipw_handle_beacon; + priv->ieee->handle_beacon = ipw_handle_probe_response; + priv->ieee->handle_assoc_response = ipw_handle_assoc_response; +#endif /* CONFIG_IPW2200_QOS */ + + priv->ieee->perfect_rssi = -20; + priv->ieee->worst_rssi = -85; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + net_dev->netdev_ops = &ipw_netdev_ops; +#else + net_dev->open = ipw_net_open; + net_dev->stop = ipw_net_stop; + net_dev->init = ipw_net_init; + net_dev->set_multicast_list = ipw_net_set_multicast_list; + net_dev->set_mac_address = ipw_net_set_mac_address; +#endif + priv->wireless_data.spy_data = &priv->ieee->spy_data; + net_dev->wireless_data = &priv->wireless_data; + net_dev->wireless_handlers = &ipw_wx_handler_def; + net_dev->ethtool_ops = &ipw_ethtool_ops; + net_dev->irq = pdev->irq; + net_dev->base_addr = (unsigned long)priv->hw_base; + net_dev->mem_start = pci_resource_start(pdev, 0); + net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1; + + err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group); + if (err) { + IPW_ERROR("failed to create sysfs device attributes\n"); + mutex_unlock(&priv->mutex); + goto out_release_irq; + } + + mutex_unlock(&priv->mutex); + err = register_netdev(net_dev); + if (err) { + IPW_ERROR("failed to register network device\n"); + goto out_remove_sysfs; + } + +#ifdef CONFIG_IPW2200_PROMISCUOUS + if (rtap_iface) { + err = ipw_prom_alloc(priv); + if (err) { + IPW_ERROR("Failed to register promiscuous network " + "device (error %d).\n", err); + unregister_netdev(priv->net_dev); + goto out_remove_sysfs; + } + } +#endif + + printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg " + "channels, %d 802.11a channels)\n", + priv->ieee->geo.name, priv->ieee->geo.bg_channels, + priv->ieee->geo.a_channels); + + return 0; + + out_remove_sysfs: + sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group); + out_release_irq: + free_irq(pdev->irq, priv); + out_destroy_workqueue: + destroy_workqueue(priv->workqueue); + priv->workqueue = NULL; + out_iounmap: + iounmap(priv->hw_base); + out_pci_release_regions: + pci_release_regions(pdev); + out_pci_disable_device: + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + out_free_ieee80211: + free_ieee80211(priv->net_dev, 0); + out: + return err; +} + +static void __devexit ipw_pci_remove(struct pci_dev *pdev) +{ + struct ipw_priv *priv = pci_get_drvdata(pdev); + struct list_head *p, *q; + int i; + + if (!priv) + return; + + mutex_lock(&priv->mutex); + + priv->status |= STATUS_EXIT_PENDING; + ipw_down(priv); + sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group); + + mutex_unlock(&priv->mutex); + + unregister_netdev(priv->net_dev); + + if (priv->rxq) { + ipw_rx_queue_free(priv, priv->rxq); + priv->rxq = NULL; + } + ipw_tx_queue_free(priv); + + if (priv->cmdlog) { + kfree(priv->cmdlog); + priv->cmdlog = NULL; + } + /* ipw_down will ensure that there is no more pending work + * in the workqueue's, so we can safely remove them now. */ + cancel_delayed_work(&priv->adhoc_check); + cancel_delayed_work(&priv->gather_stats); + cancel_delayed_work(&priv->request_scan); + cancel_delayed_work(&priv->request_direct_scan); + cancel_delayed_work(&priv->request_passive_scan); + cancel_delayed_work(&priv->scan_event); + cancel_delayed_work(&priv->rf_kill); + cancel_delayed_work(&priv->scan_check); + destroy_workqueue(priv->workqueue); + priv->workqueue = NULL; + + /* Free MAC hash list for ADHOC */ + for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) { + list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) { + list_del(p); + kfree(list_entry(p, struct ipw_ibss_seq, list)); + } + } + + kfree(priv->error); + priv->error = NULL; + +#ifdef CONFIG_IPW2200_PROMISCUOUS + ipw_prom_free(priv); +#endif + + free_irq(pdev->irq, priv); + iounmap(priv->hw_base); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + /* wiphy_unregister needs to be here, before free_ieee80211 */ + wiphy_unregister(priv->ieee->wdev.wiphy); + kfree(priv->ieee->a_band.channels); + kfree(priv->ieee->bg_band.channels); + free_ieee80211(priv->net_dev, 0); + free_firmware(); +} + +#ifdef CONFIG_PM +static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct ipw_priv *priv = pci_get_drvdata(pdev); + struct net_device *dev = priv->net_dev; + + printk(KERN_INFO "%s: Going into suspend...\n", dev->name); + + /* Take down the device; powers it off, etc. */ + ipw_down(priv); + + /* Remove the PRESENT state of the device */ + netif_device_detach(dev); + + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + priv->suspend_at = get_seconds(); + + return 0; +} + +static int ipw_pci_resume(struct pci_dev *pdev) +{ + struct ipw_priv *priv = pci_get_drvdata(pdev); + struct net_device *dev = priv->net_dev; + int err; + u32 val; + + printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name); + + pci_set_power_state(pdev, PCI_D0); + err = pci_enable_device(pdev); + if (err) { + printk(KERN_ERR "%s: pci_enable_device failed on resume\n", + dev->name); + return err; + } + pci_restore_state(pdev); + + /* + * Suspend/Resume resets the PCI configuration space, so we have to + * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries + * from interfering with C3 CPU state. pci_restore_state won't help + * here since it only restores the first 64 bytes pci config header. + */ + pci_read_config_dword(pdev, 0x40, &val); + if ((val & 0x0000ff00) != 0) + pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); + + /* Set the device back into the PRESENT state; this will also wake + * the queue of needed */ + netif_device_attach(dev); + + priv->suspend_time = get_seconds() - priv->suspend_at; + + /* Bring the device back up */ + queue_work(priv->workqueue, &priv->up); + + return 0; +} +#endif + +static void ipw_pci_shutdown(struct pci_dev *pdev) +{ + struct ipw_priv *priv = pci_get_drvdata(pdev); + + /* Take down the device; powers it off, etc. */ + ipw_down(priv); + + pci_disable_device(pdev); +} + +/* driver initialization stuff */ +static struct pci_driver ipw_driver = { + .name = DRV_NAME, + .id_table = card_ids, + .probe = ipw_pci_probe, + .remove = __devexit_p(ipw_pci_remove), +#ifdef CONFIG_PM + .suspend = ipw_pci_suspend, + .resume = ipw_pci_resume, +#endif + .shutdown = ipw_pci_shutdown, +}; + +static int __init ipw_init(void) +{ + int ret; + + printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n"); + printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n"); + + ret = pci_register_driver(&ipw_driver); + if (ret) { + IPW_ERROR("Unable to initialize PCI module\n"); + return ret; + } + + ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level); + if (ret) { + IPW_ERROR("Unable to create driver sysfs file\n"); + pci_unregister_driver(&ipw_driver); + return ret; + } + + return ret; +} + +static void __exit ipw_exit(void) +{ + driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level); + pci_unregister_driver(&ipw_driver); +} + +module_param(disable, int, 0444); +MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])"); + +module_param(associate, int, 0444); +MODULE_PARM_DESC(associate, "auto associate when scanning (default off)"); + +module_param(auto_create, int, 0444); +MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)"); + +module_param_named(led, led_support, int, 0444); +MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)"); + +module_param(debug, int, 0444); +MODULE_PARM_DESC(debug, "debug output mask"); + +module_param_named(channel, default_channel, int, 0444); +MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])"); + +#ifdef CONFIG_IPW2200_PROMISCUOUS +module_param(rtap_iface, int, 0444); +MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)"); +#endif + +#ifdef CONFIG_IPW2200_QOS +module_param(qos_enable, int, 0444); +MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis"); + +module_param(qos_burst_enable, int, 0444); +MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode"); + +module_param(qos_no_ack_mask, int, 0444); +MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack"); + +module_param(burst_duration_CCK, int, 0444); +MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value"); + +module_param(burst_duration_OFDM, int, 0444); +MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value"); +#endif /* CONFIG_IPW2200_QOS */ + +#ifdef CONFIG_IPW2200_MONITOR +module_param_named(mode, network_mode, int, 0444); +MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)"); +#else +module_param_named(mode, network_mode, int, 0444); +MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)"); +#endif + +module_param(bt_coexist, int, 0444); +MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)"); + +module_param(hwcrypto, int, 0444); +MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)"); + +module_param(cmdlog, int, 0444); +MODULE_PARM_DESC(cmdlog, + "allocate a ring buffer for logging firmware commands"); + +module_param(roaming, int, 0444); +MODULE_PARM_DESC(roaming, "enable roaming support (default on)"); + +module_param(antenna, int, 0444); +MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)"); + +module_exit(ipw_exit); +module_init(ipw_init); diff --git a/drivers/net/wireless/ipw2x00/ipw2200.h b/drivers/net/wireless/ipw2x00/ipw2200.h new file mode 100644 index 0000000..bf0eeb2 --- /dev/null +++ b/drivers/net/wireless/ipw2x00/ipw2200.h @@ -0,0 +1,2017 @@ +/****************************************************************************** + + Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify it + under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., 59 + Temple Place - Suite 330, Boston, MA 02111-1307, USA. + + The full GNU General Public License is included in this distribution in the + file called LICENSE. + + Contact Information: + Intel Linux Wireless + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +******************************************************************************/ + +#ifndef __ipw2200_h__ +#define __ipw2200_h__ + +#define WEXT_USECHANNELS 1 + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +#define DRV_NAME "ipw2200" + +#include + +#include "libipw.h" + +/* Authentication and Association States */ +enum connection_manager_assoc_states { + CMAS_INIT = 0, + CMAS_TX_AUTH_SEQ_1, + CMAS_RX_AUTH_SEQ_2, + CMAS_AUTH_SEQ_1_PASS, + CMAS_AUTH_SEQ_1_FAIL, + CMAS_TX_AUTH_SEQ_3, + CMAS_RX_AUTH_SEQ_4, + CMAS_AUTH_SEQ_2_PASS, + CMAS_AUTH_SEQ_2_FAIL, + CMAS_AUTHENTICATED, + CMAS_TX_ASSOC, + CMAS_RX_ASSOC_RESP, + CMAS_ASSOCIATED, + CMAS_LAST +}; + +#define IPW_WAIT (1<<0) +#define IPW_QUIET (1<<1) +#define IPW_ROAMING (1<<2) + +#define IPW_POWER_MODE_CAM 0x00 //(always on) +#define IPW_POWER_INDEX_1 0x01 +#define IPW_POWER_INDEX_2 0x02 +#define IPW_POWER_INDEX_3 0x03 +#define IPW_POWER_INDEX_4 0x04 +#define IPW_POWER_INDEX_5 0x05 +#define IPW_POWER_AC 0x06 +#define IPW_POWER_BATTERY 0x07 +#define IPW_POWER_LIMIT 0x07 +#define IPW_POWER_MASK 0x0F +#define IPW_POWER_ENABLED 0x10 +#define IPW_POWER_LEVEL(x) ((x) & IPW_POWER_MASK) + +#define IPW_CMD_HOST_COMPLETE 2 +#define IPW_CMD_POWER_DOWN 4 +#define IPW_CMD_SYSTEM_CONFIG 6 +#define IPW_CMD_MULTICAST_ADDRESS 7 +#define IPW_CMD_SSID 8 +#define IPW_CMD_ADAPTER_ADDRESS 11 +#define IPW_CMD_PORT_TYPE 12 +#define IPW_CMD_RTS_THRESHOLD 15 +#define IPW_CMD_FRAG_THRESHOLD 16 +#define IPW_CMD_POWER_MODE 17 +#define IPW_CMD_WEP_KEY 18 +#define IPW_CMD_TGI_TX_KEY 19 +#define IPW_CMD_SCAN_REQUEST 20 +#define IPW_CMD_ASSOCIATE 21 +#define IPW_CMD_SUPPORTED_RATES 22 +#define IPW_CMD_SCAN_ABORT 23 +#define IPW_CMD_TX_FLUSH 24 +#define IPW_CMD_QOS_PARAMETERS 25 +#define IPW_CMD_SCAN_REQUEST_EXT 26 +#define IPW_CMD_DINO_CONFIG 30 +#define IPW_CMD_RSN_CAPABILITIES 31 +#define IPW_CMD_RX_KEY 32 +#define IPW_CMD_CARD_DISABLE 33 +#define IPW_CMD_SEED_NUMBER 34 +#define IPW_CMD_TX_POWER 35 +#define IPW_CMD_COUNTRY_INFO 36 +#define IPW_CMD_AIRONET_INFO 37 +#define IPW_CMD_AP_TX_POWER 38 +#define IPW_CMD_CCKM_INFO 39 +#define IPW_CMD_CCX_VER_INFO 40 +#define IPW_CMD_SET_CALIBRATION 41 +#define IPW_CMD_SENSITIVITY_CALIB 42 +#define IPW_CMD_RETRY_LIMIT 51 +#define IPW_CMD_IPW_PRE_POWER_DOWN 58 +#define IPW_CMD_VAP_BEACON_TEMPLATE 60 +#define IPW_CMD_VAP_DTIM_PERIOD 61 +#define IPW_CMD_EXT_SUPPORTED_RATES 62 +#define IPW_CMD_VAP_LOCAL_TX_PWR_CONSTRAINT 63 +#define IPW_CMD_VAP_QUIET_INTERVALS 64 +#define IPW_CMD_VAP_CHANNEL_SWITCH 65 +#define IPW_CMD_VAP_MANDATORY_CHANNELS 66 +#define IPW_CMD_VAP_CELL_PWR_LIMIT 67 +#define IPW_CMD_VAP_CF_PARAM_SET 68 +#define IPW_CMD_VAP_SET_BEACONING_STATE 69 +#define IPW_CMD_MEASUREMENT 80 +#define IPW_CMD_POWER_CAPABILITY 81 +#define IPW_CMD_SUPPORTED_CHANNELS 82 +#define IPW_CMD_TPC_REPORT 83 +#define IPW_CMD_WME_INFO 84 +#define IPW_CMD_PRODUCTION_COMMAND 85 +#define IPW_CMD_LINKSYS_EOU_INFO 90 + +#define RFD_SIZE 4 +#define NUM_TFD_CHUNKS 6 + +#define TX_QUEUE_SIZE 32 +#define RX_QUEUE_SIZE 32 + +#define DINO_CMD_WEP_KEY 0x08 +#define DINO_CMD_TX 0x0B +#define DCT_ANTENNA_A 0x01 +#define DCT_ANTENNA_B 0x02 + +#define IPW_A_MODE 0 +#define IPW_B_MODE 1 +#define IPW_G_MODE 2 + +/* + * TX Queue Flag Definitions + */ + +/* tx wep key definition */ +#define DCT_WEP_KEY_NOT_IMMIDIATE 0x00 +#define DCT_WEP_KEY_64Bit 0x40 +#define DCT_WEP_KEY_128Bit 0x80 +#define DCT_WEP_KEY_128bitIV 0xC0 +#define DCT_WEP_KEY_SIZE_MASK 0xC0 + +#define DCT_WEP_KEY_INDEX_MASK 0x0F +#define DCT_WEP_INDEX_USE_IMMEDIATE 0x20 + +/* abort attempt if mgmt frame is rx'd */ +#define DCT_FLAG_ABORT_MGMT 0x01 + +/* require CTS */ +#define DCT_FLAG_CTS_REQUIRED 0x02 + +/* use short preamble */ +#define DCT_FLAG_LONG_PREAMBLE 0x00 +#define DCT_FLAG_SHORT_PREAMBLE 0x04 + +/* RTS/CTS first */ +#define DCT_FLAG_RTS_REQD 0x08 + +/* dont calculate duration field */ +#define DCT_FLAG_DUR_SET 0x10 + +/* even if MAC WEP set (allows pre-encrypt) */ +#define DCT_FLAG_NO_WEP 0x20 + +/* overwrite TSF field */ +#define DCT_FLAG_TSF_REQD 0x40 + +/* ACK rx is expected to follow */ +#define DCT_FLAG_ACK_REQD 0x80 + +/* TX flags extension */ +#define DCT_FLAG_EXT_MODE_CCK 0x01 +#define DCT_FLAG_EXT_MODE_OFDM 0x00 + +#define DCT_FLAG_EXT_SECURITY_WEP 0x00 +#define DCT_FLAG_EXT_SECURITY_NO DCT_FLAG_EXT_SECURITY_WEP +#define DCT_FLAG_EXT_SECURITY_CKIP 0x04 +#define DCT_FLAG_EXT_SECURITY_CCM 0x08 +#define DCT_FLAG_EXT_SECURITY_TKIP 0x0C +#define DCT_FLAG_EXT_SECURITY_MASK 0x0C + +#define DCT_FLAG_EXT_QOS_ENABLED 0x10 + +#define DCT_FLAG_EXT_HC_NO_SIFS_PIFS 0x00 +#define DCT_FLAG_EXT_HC_SIFS 0x20 +#define DCT_FLAG_EXT_HC_PIFS 0x40 + +#define TX_RX_TYPE_MASK 0xFF +#define TX_FRAME_TYPE 0x00 +#define TX_HOST_COMMAND_TYPE 0x01 +#define RX_FRAME_TYPE 0x09 +#define RX_HOST_NOTIFICATION_TYPE 0x03 +#define RX_HOST_CMD_RESPONSE_TYPE 0x04 +#define RX_TX_FRAME_RESPONSE_TYPE 0x05 +#define TFD_NEED_IRQ_MASK 0x04 + +#define HOST_CMD_DINO_CONFIG 30 + +#define HOST_NOTIFICATION_STATUS_ASSOCIATED 10 +#define HOST_NOTIFICATION_STATUS_AUTHENTICATE 11 +#define HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT 12 +#define HOST_NOTIFICATION_STATUS_SCAN_COMPLETED 13 +#define HOST_NOTIFICATION_STATUS_FRAG_LENGTH 14 +#define HOST_NOTIFICATION_STATUS_LINK_DETERIORATION 15 +#define HOST_NOTIFICATION_DINO_CONFIG_RESPONSE 16 +#define HOST_NOTIFICATION_STATUS_BEACON_STATE 17 +#define HOST_NOTIFICATION_STATUS_TGI_TX_KEY 18 +#define HOST_NOTIFICATION_TX_STATUS 19 +#define HOST_NOTIFICATION_CALIB_KEEP_RESULTS 20 +#define HOST_NOTIFICATION_MEASUREMENT_STARTED 21 +#define HOST_NOTIFICATION_MEASUREMENT_ENDED 22 +#define HOST_NOTIFICATION_CHANNEL_SWITCHED 23 +#define HOST_NOTIFICATION_RX_DURING_QUIET_PERIOD 24 +#define HOST_NOTIFICATION_NOISE_STATS 25 +#define HOST_NOTIFICATION_S36_MEASUREMENT_ACCEPTED 30 +#define HOST_NOTIFICATION_S36_MEASUREMENT_REFUSED 31 + +#define HOST_NOTIFICATION_STATUS_BEACON_MISSING 1 +#define IPW_MB_SCAN_CANCEL_THRESHOLD 3 +#define IPW_MB_ROAMING_THRESHOLD_MIN 1 +#define IPW_MB_ROAMING_THRESHOLD_DEFAULT 8 +#define IPW_MB_ROAMING_THRESHOLD_MAX 30 +#define IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT 3*IPW_MB_ROAMING_THRESHOLD_DEFAULT +#define IPW_REAL_RATE_RX_PACKET_THRESHOLD 300 + +#define MACADRR_BYTE_LEN 6 + +#define DCR_TYPE_AP 0x01 +#define DCR_TYPE_WLAP 0x02 +#define DCR_TYPE_MU_ESS 0x03 +#define DCR_TYPE_MU_IBSS 0x04 +#define DCR_TYPE_MU_PIBSS 0x05 +#define DCR_TYPE_SNIFFER 0x06 +#define DCR_TYPE_MU_BSS DCR_TYPE_MU_ESS + +/* QoS definitions */ + +#define CW_MIN_OFDM 15 +#define CW_MAX_OFDM 1023 +#define CW_MIN_CCK 31 +#define CW_MAX_CCK 1023 + +#define QOS_TX0_CW_MIN_OFDM cpu_to_le16(CW_MIN_OFDM) +#define QOS_TX1_CW_MIN_OFDM cpu_to_le16(CW_MIN_OFDM) +#define QOS_TX2_CW_MIN_OFDM cpu_to_le16((CW_MIN_OFDM + 1)/2 - 1) +#define QOS_TX3_CW_MIN_OFDM cpu_to_le16((CW_MIN_OFDM + 1)/4 - 1) + +#define QOS_TX0_CW_MIN_CCK cpu_to_le16(CW_MIN_CCK) +#define QOS_TX1_CW_MIN_CCK cpu_to_le16(CW_MIN_CCK) +#define QOS_TX2_CW_MIN_CCK cpu_to_le16((CW_MIN_CCK + 1)/2 - 1) +#define QOS_TX3_CW_MIN_CCK cpu_to_le16((CW_MIN_CCK + 1)/4 - 1) + +#define QOS_TX0_CW_MAX_OFDM cpu_to_le16(CW_MAX_OFDM) +#define QOS_TX1_CW_MAX_OFDM cpu_to_le16(CW_MAX_OFDM) +#define QOS_TX2_CW_MAX_OFDM cpu_to_le16(CW_MIN_OFDM) +#define QOS_TX3_CW_MAX_OFDM cpu_to_le16((CW_MIN_OFDM + 1)/2 - 1) + +#define QOS_TX0_CW_MAX_CCK cpu_to_le16(CW_MAX_CCK) +#define QOS_TX1_CW_MAX_CCK cpu_to_le16(CW_MAX_CCK) +#define QOS_TX2_CW_MAX_CCK cpu_to_le16(CW_MIN_CCK) +#define QOS_TX3_CW_MAX_CCK cpu_to_le16((CW_MIN_CCK + 1)/2 - 1) + +#define QOS_TX0_AIFS (3 - QOS_AIFSN_MIN_VALUE) +#define QOS_TX1_AIFS (7 - QOS_AIFSN_MIN_VALUE) +#define QOS_TX2_AIFS (2 - QOS_AIFSN_MIN_VALUE) +#define QOS_TX3_AIFS (2 - QOS_AIFSN_MIN_VALUE) + +#define QOS_TX0_ACM 0 +#define QOS_TX1_ACM 0 +#define QOS_TX2_ACM 0 +#define QOS_TX3_ACM 0 + +#define QOS_TX0_TXOP_LIMIT_CCK 0 +#define QOS_TX1_TXOP_LIMIT_CCK 0 +#define QOS_TX2_TXOP_LIMIT_CCK cpu_to_le16(6016) +#define QOS_TX3_TXOP_LIMIT_CCK cpu_to_le16(3264) + +#define QOS_TX0_TXOP_LIMIT_OFDM 0 +#define QOS_TX1_TXOP_LIMIT_OFDM 0 +#define QOS_TX2_TXOP_LIMIT_OFDM cpu_to_le16(3008) +#define QOS_TX3_TXOP_LIMIT_OFDM cpu_to_le16(1504) + +#define DEF_TX0_CW_MIN_OFDM cpu_to_le16(CW_MIN_OFDM) +#define DEF_TX1_CW_MIN_OFDM cpu_to_le16(CW_MIN_OFDM) +#define DEF_TX2_CW_MIN_OFDM cpu_to_le16(CW_MIN_OFDM) +#define DEF_TX3_CW_MIN_OFDM cpu_to_le16(CW_MIN_OFDM) + +#define DEF_TX0_CW_MIN_CCK cpu_to_le16(CW_MIN_CCK) +#define DEF_TX1_CW_MIN_CCK cpu_to_le16(CW_MIN_CCK) +#define DEF_TX2_CW_MIN_CCK cpu_to_le16(CW_MIN_CCK) +#define DEF_TX3_CW_MIN_CCK cpu_to_le16(CW_MIN_CCK) + +#define DEF_TX0_CW_MAX_OFDM cpu_to_le16(CW_MAX_OFDM) +#define DEF_TX1_CW_MAX_OFDM cpu_to_le16(CW_MAX_OFDM) +#define DEF_TX2_CW_MAX_OFDM cpu_to_le16(CW_MAX_OFDM) +#define DEF_TX3_CW_MAX_OFDM cpu_to_le16(CW_MAX_OFDM) + +#define DEF_TX0_CW_MAX_CCK cpu_to_le16(CW_MAX_CCK) +#define DEF_TX1_CW_MAX_CCK cpu_to_le16(CW_MAX_CCK) +#define DEF_TX2_CW_MAX_CCK cpu_to_le16(CW_MAX_CCK) +#define DEF_TX3_CW_MAX_CCK cpu_to_le16(CW_MAX_CCK) + +#define DEF_TX0_AIFS 0 +#define DEF_TX1_AIFS 0 +#define DEF_TX2_AIFS 0 +#define DEF_TX3_AIFS 0 + +#define DEF_TX0_ACM 0 +#define DEF_TX1_ACM 0 +#define DEF_TX2_ACM 0 +#define DEF_TX3_ACM 0 + +#define DEF_TX0_TXOP_LIMIT_CCK 0 +#define DEF_TX1_TXOP_LIMIT_CCK 0 +#define DEF_TX2_TXOP_LIMIT_CCK 0 +#define DEF_TX3_TXOP_LIMIT_CCK 0 + +#define DEF_TX0_TXOP_LIMIT_OFDM 0 +#define DEF_TX1_TXOP_LIMIT_OFDM 0 +#define DEF_TX2_TXOP_LIMIT_OFDM 0 +#define DEF_TX3_TXOP_LIMIT_OFDM 0 + +#define QOS_QOS_SETS 3 +#define QOS_PARAM_SET_ACTIVE 0 +#define QOS_PARAM_SET_DEF_CCK 1 +#define QOS_PARAM_SET_DEF_OFDM 2 + +#define CTRL_QOS_NO_ACK (0x0020) + +#define IPW_TX_QUEUE_1 1 +#define IPW_TX_QUEUE_2 2 +#define IPW_TX_QUEUE_3 3 +#define IPW_TX_QUEUE_4 4 + +/* QoS sturctures */ +struct ipw_qos_info { + int qos_enable; + struct libipw_qos_parameters *def_qos_parm_OFDM; + struct libipw_qos_parameters *def_qos_parm_CCK; + u32 burst_duration_CCK; + u32 burst_duration_OFDM; + u16 qos_no_ack_mask; + int burst_enable; +}; + +/**************************************************************/ +/** + * Generic queue structure + * + * Contains common data for Rx and Tx queues + */ +struct clx2_queue { + int n_bd; /**< number of BDs in this queue */ + int first_empty; /**< 1-st empty entry (index) */ + int last_used; /**< last used entry (index) */ + u32 reg_w; /**< 'write' reg (queue head), addr in domain 1 */ + u32 reg_r; /**< 'read' reg (queue tail), addr in domain 1 */ + dma_addr_t dma_addr; /**< physical addr for BD's */ + int low_mark; /**< low watermark, resume queue if free space more than this */ + int high_mark; /**< high watermark, stop queue if free space less than this */ +} __attribute__ ((packed)); /* XXX */ + +struct machdr32 { + __le16 frame_ctl; + __le16 duration; // watch out for endians! + u8 addr1[MACADRR_BYTE_LEN]; + u8 addr2[MACADRR_BYTE_LEN]; + u8 addr3[MACADRR_BYTE_LEN]; + __le16 seq_ctrl; // more endians! + u8 addr4[MACADRR_BYTE_LEN]; + __le16 qos_ctrl; +} __attribute__ ((packed)); + +struct machdr30 { + __le16 frame_ctl; + __le16 duration; // watch out for endians! + u8 addr1[MACADRR_BYTE_LEN]; + u8 addr2[MACADRR_BYTE_LEN]; + u8 addr3[MACADRR_BYTE_LEN]; + __le16 seq_ctrl; // more endians! + u8 addr4[MACADRR_BYTE_LEN]; +} __attribute__ ((packed)); + +struct machdr26 { + __le16 frame_ctl; + __le16 duration; // watch out for endians! + u8 addr1[MACADRR_BYTE_LEN]; + u8 addr2[MACADRR_BYTE_LEN]; + u8 addr3[MACADRR_BYTE_LEN]; + __le16 seq_ctrl; // more endians! + __le16 qos_ctrl; +} __attribute__ ((packed)); + +struct machdr24 { + __le16 frame_ctl; + __le16 duration; // watch out for endians! + u8 addr1[MACADRR_BYTE_LEN]; + u8 addr2[MACADRR_BYTE_LEN]; + u8 addr3[MACADRR_BYTE_LEN]; + __le16 seq_ctrl; // more endians! +} __attribute__ ((packed)); + +// TX TFD with 32 byte MAC Header +struct tx_tfd_32 { + struct machdr32 mchdr; // 32 + __le32 uivplaceholder[2]; // 8 +} __attribute__ ((packed)); + +// TX TFD with 30 byte MAC Header +struct tx_tfd_30 { + struct machdr30 mchdr; // 30 + u8 reserved[2]; // 2 + __le32 uivplaceholder[2]; // 8 +} __attribute__ ((packed)); + +// tx tfd with 26 byte mac header +struct tx_tfd_26 { + struct machdr26 mchdr; // 26 + u8 reserved1[2]; // 2 + __le32 uivplaceholder[2]; // 8 + u8 reserved2[4]; // 4 +} __attribute__ ((packed)); + +// tx tfd with 24 byte mac header +struct tx_tfd_24 { + struct machdr24 mchdr; // 24 + __le32 uivplaceholder[2]; // 8 + u8 reserved[8]; // 8 +} __attribute__ ((packed)); + +#define DCT_WEP_KEY_FIELD_LENGTH 16 + +struct tfd_command { + u8 index; + u8 length; + __le16 reserved; + u8 payload[0]; +} __attribute__ ((packed)); + +struct tfd_data { + /* Header */ + __le32 work_area_ptr; + u8 station_number; /* 0 for BSS */ + u8 reserved1; + __le16 reserved2; + + /* Tx Parameters */ + u8 cmd_id; + u8 seq_num; + __le16 len; + u8 priority; + u8 tx_flags; + u8 tx_flags_ext; + u8 key_index; + u8 wepkey[DCT_WEP_KEY_FIELD_LENGTH]; + u8 rate; + u8 antenna; + __le16 next_packet_duration; + __le16 next_frag_len; + __le16 back_off_counter; //////txop; + u8 retrylimit; + __le16 cwcurrent; + u8 reserved3; + + /* 802.11 MAC Header */ + union { + struct tx_tfd_24 tfd_24; + struct tx_tfd_26 tfd_26; + struct tx_tfd_30 tfd_30; + struct tx_tfd_32 tfd_32; + } tfd; + + /* Payload DMA info */ + __le32 num_chunks; + __le32 chunk_ptr[NUM_TFD_CHUNKS]; + __le16 chunk_len[NUM_TFD_CHUNKS]; +} __attribute__ ((packed)); + +struct txrx_control_flags { + u8 message_type; + u8 rx_seq_num; + u8 control_bits; + u8 reserved; +} __attribute__ ((packed)); + +#define TFD_SIZE 128 +#define TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH (TFD_SIZE - sizeof(struct txrx_control_flags)) + +struct tfd_frame { + struct txrx_control_flags control_flags; + union { + struct tfd_data data; + struct tfd_command cmd; + u8 raw[TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH]; + } u; +} __attribute__ ((packed)); + +typedef void destructor_func(const void *); + +/** + * Tx Queue for DMA. Queue consists of circular buffer of + * BD's and required locking structures. + */ +struct clx2_tx_queue { + struct clx2_queue q; + struct tfd_frame *bd; + struct libipw_txb **txb; +}; + +/* + * RX related structures and functions + */ +#define RX_FREE_BUFFERS 32 +#define RX_LOW_WATERMARK 8 + +#define SUP_RATE_11A_MAX_NUM_CHANNELS 8 +#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 +#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 + +// Used for passing to driver number of successes and failures per rate +struct rate_histogram { + union { + __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS]; + __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS]; + __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS]; + } success; + union { + __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS]; + __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS]; + __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS]; + } failed; +} __attribute__ ((packed)); + +/* statistics command response */ +struct ipw_cmd_stats { + u8 cmd_id; + u8 seq_num; + __le16 good_sfd; + __le16 bad_plcp; + __le16 wrong_bssid; + __le16 valid_mpdu; + __le16 bad_mac_header; + __le16 reserved_frame_types; + __le16 rx_ina; + __le16 bad_crc32; + __le16 invalid_cts; + __le16 invalid_acks; + __le16 long_distance_ina_fina; + __le16 dsp_silence_unreachable; + __le16 accumulated_rssi; + __le16 rx_ovfl_frame_tossed; + __le16 rssi_silence_threshold; + __le16 rx_ovfl_frame_supplied; + __le16 last_rx_frame_signal; + __le16 last_rx_frame_noise; + __le16 rx_autodetec_no_ofdm; + __le16 rx_autodetec_no_barker; + __le16 reserved; +} __attribute__ ((packed)); + +struct notif_channel_result { + u8 channel_num; + struct ipw_cmd_stats stats; + u8 uReserved; +} __attribute__ ((packed)); + +#define SCAN_COMPLETED_STATUS_COMPLETE 1 +#define SCAN_COMPLETED_STATUS_ABORTED 2 + +struct notif_scan_complete { + u8 scan_type; + u8 num_channels; + u8 status; + u8 reserved; +} __attribute__ ((packed)); + +struct notif_frag_length { + __le16 frag_length; + __le16 reserved; +} __attribute__ ((packed)); + +struct notif_beacon_state { + __le32 state; + __le32 number; +} __attribute__ ((packed)); + +struct notif_tgi_tx_key { + u8 key_state; + u8 security_type; + u8 station_index; + u8 reserved; +} __attribute__ ((packed)); + +#define SILENCE_OVER_THRESH (1) +#define SILENCE_UNDER_THRESH (2) + +struct notif_link_deterioration { + struct ipw_cmd_stats stats; + u8 rate; + u8 modulation; + struct rate_histogram histogram; + u8 silence_notification_type; /* SILENCE_OVER/UNDER_THRESH */ + __le16 silence_count; +} __attribute__ ((packed)); + +struct notif_association { + u8 state; +} __attribute__ ((packed)); + +struct notif_authenticate { + u8 state; + struct machdr24 addr; + __le16 status; +} __attribute__ ((packed)); + +struct notif_calibration { + u8 data[104]; +} __attribute__ ((packed)); + +struct notif_noise { + __le32 value; +} __attribute__ ((packed)); + +struct ipw_rx_notification { + u8 reserved[8]; + u8 subtype; + u8 flags; + __le16 size; + union { + struct notif_association assoc; + struct notif_authenticate auth; + struct notif_channel_result channel_result; + struct notif_scan_complete scan_complete; + struct notif_frag_length frag_len; + struct notif_beacon_state beacon_state; + struct notif_tgi_tx_key tgi_tx_key; + struct notif_link_deterioration link_deterioration; + struct notif_calibration calibration; + struct notif_noise noise; + u8 raw[0]; + } u; +} __attribute__ ((packed)); + +struct ipw_rx_frame { + __le32 reserved1; + u8 parent_tsf[4]; // fw_use[0] is boolean for OUR_TSF_IS_GREATER + u8 received_channel; // The channel that this frame was received on. + // Note that for .11b this does not have to be + // the same as the channel that it was sent. + // Filled by LMAC + u8 frameStatus; + u8 rate; + u8 rssi; + u8 agc; + u8 rssi_dbm; + __le16 signal; + __le16 noise; + u8 antennaAndPhy; + u8 control; // control bit should be on in bg + u8 rtscts_rate; // rate of rts or cts (in rts cts sequence rate + // is identical) + u8 rtscts_seen; // 0x1 RTS seen ; 0x2 CTS seen + __le16 length; + u8 data[0]; +} __attribute__ ((packed)); + +struct ipw_rx_header { + u8 message_type; + u8 rx_seq_num; + u8 control_bits; + u8 reserved; +} __attribute__ ((packed)); + +struct ipw_rx_packet { + struct ipw_rx_header header; + union { + struct ipw_rx_frame frame; + struct ipw_rx_notification notification; + } u; +} __attribute__ ((packed)); + +#define IPW_RX_NOTIFICATION_SIZE sizeof(struct ipw_rx_header) + 12 +#define IPW_RX_FRAME_SIZE (unsigned int)(sizeof(struct ipw_rx_header) + \ + sizeof(struct ipw_rx_frame)) + +struct ipw_rx_mem_buffer { + dma_addr_t dma_addr; + struct sk_buff *skb; + struct list_head list; +}; /* Not transferred over network, so not __attribute__ ((packed)) */ + +struct ipw_rx_queue { + struct ipw_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; + struct ipw_rx_mem_buffer *queue[RX_QUEUE_SIZE]; + u32 processed; /* Internal index to last handled Rx packet */ + u32 read; /* Shared index to newest available Rx buffer */ + u32 write; /* Shared index to oldest written Rx packet */ + u32 free_count; /* Number of pre-allocated buffers in rx_free */ + /* Each of these lists is used as a FIFO for ipw_rx_mem_buffers */ + struct list_head rx_free; /* Own an SKBs */ + struct list_head rx_used; /* No SKB allocated */ + spinlock_t lock; +}; /* Not transferred over network, so not __attribute__ ((packed)) */ + +struct alive_command_responce { + u8 alive_command; + u8 sequence_number; + __le16 software_revision; + u8 device_identifier; + u8 reserved1[5]; + __le16 reserved2; + __le16 reserved3; + __le16 clock_settle_time; + __le16 powerup_settle_time; + __le16 reserved4; + u8 time_stamp[5]; /* month, day, year, hours, minutes */ + u8 ucode_valid; +} __attribute__ ((packed)); + +#define IPW_MAX_RATES 12 + +struct ipw_rates { + u8 num_rates; + u8 rates[IPW_MAX_RATES]; +} __attribute__ ((packed)); + +struct command_block { + unsigned int control; + u32 source_addr; + u32 dest_addr; + unsigned int status; +} __attribute__ ((packed)); + +#define CB_NUMBER_OF_ELEMENTS_SMALL 64 +struct fw_image_desc { + unsigned long last_cb_index; + unsigned long current_cb_index; + struct command_block cb_list[CB_NUMBER_OF_ELEMENTS_SMALL]; + void *v_addr; + unsigned long p_addr; + unsigned long len; +}; + +struct ipw_sys_config { + u8 bt_coexistence; + u8 reserved1; + u8 answer_broadcast_ssid_probe; + u8 accept_all_data_frames; + u8 accept_non_directed_frames; + u8 exclude_unicast_unencrypted; + u8 disable_unicast_decryption; + u8 exclude_multicast_unencrypted; + u8 disable_multicast_decryption; + u8 antenna_diversity; + u8 pass_crc_to_host; + u8 dot11g_auto_detection; + u8 enable_cts_to_self; + u8 enable_multicast_filtering; + u8 bt_coexist_collision_thr; + u8 silence_threshold; + u8 accept_all_mgmt_bcpr; + u8 accept_all_mgmt_frames; + u8 pass_noise_stats_to_host; + u8 reserved3; +} __attribute__ ((packed)); + +struct ipw_multicast_addr { + u8 num_of_multicast_addresses; + u8 reserved[3]; + u8 mac1[6]; + u8 mac2[6]; + u8 mac3[6]; + u8 mac4[6]; +} __attribute__ ((packed)); + +#define DCW_WEP_KEY_INDEX_MASK 0x03 /* bits [0:1] */ +#define DCW_WEP_KEY_SEC_TYPE_MASK 0x30 /* bits [4:5] */ + +#define DCW_WEP_KEY_SEC_TYPE_WEP 0x00 +#define DCW_WEP_KEY_SEC_TYPE_CCM 0x20 +#define DCW_WEP_KEY_SEC_TYPE_TKIP 0x30 + +#define DCW_WEP_KEY_INVALID_SIZE 0x00 /* 0 = Invalid key */ +#define DCW_WEP_KEY64Bit_SIZE 0x05 /* 64-bit encryption */ +#define DCW_WEP_KEY128Bit_SIZE 0x0D /* 128-bit encryption */ +#define DCW_CCM_KEY128Bit_SIZE 0x10 /* 128-bit key */ +//#define DCW_WEP_KEY128BitIV_SIZE 0x10 /* 128-bit key and 128-bit IV */ + +struct ipw_wep_key { + u8 cmd_id; + u8 seq_num; + u8 key_index; + u8 key_size; + u8 key[16]; +} __attribute__ ((packed)); + +struct ipw_tgi_tx_key { + u8 key_id; + u8 security_type; + u8 station_index; + u8 flags; + u8 key[16]; + __le32 tx_counter[2]; +} __attribute__ ((packed)); + +#define IPW_SCAN_CHANNELS 54 + +struct ipw_scan_request { + u8 scan_type; + __le16 dwell_time; + u8 channels_list[IPW_SCAN_CHANNELS]; + u8 channels_reserved[3]; +} __attribute__ ((packed)); + +enum { + IPW_SCAN_PASSIVE_TILL_FIRST_BEACON_SCAN = 0, + IPW_SCAN_PASSIVE_FULL_DWELL_SCAN, + IPW_SCAN_ACTIVE_DIRECT_SCAN, + IPW_SCAN_ACTIVE_BROADCAST_SCAN, + IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN, + IPW_SCAN_TYPES +}; + +struct ipw_scan_request_ext { + __le32 full_scan_index; + u8 channels_list[IPW_SCAN_CHANNELS]; + u8 scan_type[IPW_SCAN_CHANNELS / 2]; + u8 reserved; + __le16 dwell_time[IPW_SCAN_TYPES]; +} __attribute__ ((packed)); + +static inline u8 ipw_get_scan_type(struct ipw_scan_request_ext *scan, u8 index) +{ + if (index % 2) + return scan->scan_type[index / 2] & 0x0F; + else + return (scan->scan_type[index / 2] & 0xF0) >> 4; +} + +static inline void ipw_set_scan_type(struct ipw_scan_request_ext *scan, + u8 index, u8 scan_type) +{ + if (index % 2) + scan->scan_type[index / 2] = + (scan->scan_type[index / 2] & 0xF0) | (scan_type & 0x0F); + else + scan->scan_type[index / 2] = + (scan->scan_type[index / 2] & 0x0F) | + ((scan_type & 0x0F) << 4); +} + +struct ipw_associate { + u8 channel; +#ifdef __LITTLE_ENDIAN_BITFIELD + u8 auth_type:4, auth_key:4; +#else + u8 auth_key:4, auth_type:4; +#endif + u8 assoc_type; + u8 reserved; + __le16 policy_support; + u8 preamble_length; + u8 ieee_mode; + u8 bssid[ETH_ALEN]; + __le32 assoc_tsf_msw; + __le32 assoc_tsf_lsw; + __le16 capability; + __le16 listen_interval; + __le16 beacon_interval; + u8 dest[ETH_ALEN]; + __le16 atim_window; + u8 smr; + u8 reserved1; + __le16 reserved2; +} __attribute__ ((packed)); + +struct ipw_supported_rates { + u8 ieee_mode; + u8 num_rates; + u8 purpose; + u8 reserved; + u8 supported_rates[IPW_MAX_RATES]; +} __attribute__ ((packed)); + +struct ipw_rts_threshold { + __le16 rts_threshold; + __le16 reserved; +} __attribute__ ((packed)); + +struct ipw_frag_threshold { + __le16 frag_threshold; + __le16 reserved; +} __attribute__ ((packed)); + +struct ipw_retry_limit { + u8 short_retry_limit; + u8 long_retry_limit; + __le16 reserved; +} __attribute__ ((packed)); + +struct ipw_dino_config { + __le32 dino_config_addr; + __le16 dino_config_size; + u8 dino_response; + u8 reserved; +} __attribute__ ((packed)); + +struct ipw_aironet_info { + u8 id; + u8 length; + __le16 reserved; +} __attribute__ ((packed)); + +struct ipw_rx_key { + u8 station_index; + u8 key_type; + u8 key_id; + u8 key_flag; + u8 key[16]; + u8 station_address[6]; + u8 key_index; + u8 reserved; +} __attribute__ ((packed)); + +struct ipw_country_channel_info { + u8 first_channel; + u8 no_channels; + s8 max_tx_power; +} __attribute__ ((packed)); + +struct ipw_country_info { + u8 id; + u8 length; + u8 country_str[3]; + struct ipw_country_channel_info groups[7]; +} __attribute__ ((packed)); + +struct ipw_channel_tx_power { + u8 channel_number; + s8 tx_power; +} __attribute__ ((packed)); + +#define SCAN_ASSOCIATED_INTERVAL (HZ) +#define SCAN_INTERVAL (HZ / 10) +#define MAX_A_CHANNELS 37 +#define MAX_B_CHANNELS 14 + +struct ipw_tx_power { + u8 num_channels; + u8 ieee_mode; + struct ipw_channel_tx_power channels_tx_power[MAX_A_CHANNELS]; +} __attribute__ ((packed)); + +struct ipw_rsn_capabilities { + u8 id; + u8 length; + __le16 version; +} __attribute__ ((packed)); + +struct ipw_sensitivity_calib { + __le16 beacon_rssi_raw; + __le16 reserved; +} __attribute__ ((packed)); + +/** + * Host command structure. + * + * On input, the following fields should be filled: + * - cmd + * - len + * - status_len + * - param (if needed) + * + * On output, + * - \a status contains status; + * - \a param filled with status parameters. + */ +struct ipw_cmd { /* XXX */ + u32 cmd; /**< Host command */ + u32 status;/**< Status */ + u32 status_len; + /**< How many 32 bit parameters in the status */ + u32 len; /**< incoming parameters length, bytes */ + /** + * command parameters. + * There should be enough space for incoming and + * outcoming parameters. + * Incoming parameters listed 1-st, followed by outcoming params. + * nParams=(len+3)/4+status_len + */ + u32 param[0]; +} __attribute__ ((packed)); + +#define STATUS_HCMD_ACTIVE (1<<0) /**< host command in progress */ + +#define STATUS_INT_ENABLED (1<<1) +#define STATUS_RF_KILL_HW (1<<2) +#define STATUS_RF_KILL_SW (1<<3) +#define STATUS_RF_KILL_MASK (STATUS_RF_KILL_HW | STATUS_RF_KILL_SW) + +#define STATUS_INIT (1<<5) +#define STATUS_AUTH (1<<6) +#define STATUS_ASSOCIATED (1<<7) +#define STATUS_STATE_MASK (STATUS_INIT | STATUS_AUTH | STATUS_ASSOCIATED) + +#define STATUS_ASSOCIATING (1<<8) +#define STATUS_DISASSOCIATING (1<<9) +#define STATUS_ROAMING (1<<10) +#define STATUS_EXIT_PENDING (1<<11) +#define STATUS_DISASSOC_PENDING (1<<12) +#define STATUS_STATE_PENDING (1<<13) + +#define STATUS_DIRECT_SCAN_PENDING (1<<19) +#define STATUS_SCAN_PENDING (1<<20) +#define STATUS_SCANNING (1<<21) +#define STATUS_SCAN_ABORTING (1<<22) +#define STATUS_SCAN_FORCED (1<<23) + +#define STATUS_LED_LINK_ON (1<<24) +#define STATUS_LED_ACT_ON (1<<25) + +#define STATUS_INDIRECT_BYTE (1<<28) /* sysfs entry configured for access */ +#define STATUS_INDIRECT_DWORD (1<<29) /* sysfs entry configured for access */ +#define STATUS_DIRECT_DWORD (1<<30) /* sysfs entry configured for access */ + +#define STATUS_SECURITY_UPDATED (1<<31) /* Security sync needed */ + +#define CFG_STATIC_CHANNEL (1<<0) /* Restrict assoc. to single channel */ +#define CFG_STATIC_ESSID (1<<1) /* Restrict assoc. to single SSID */ +#define CFG_STATIC_BSSID (1<<2) /* Restrict assoc. to single BSSID */ +#define CFG_CUSTOM_MAC (1<<3) +#define CFG_PREAMBLE_LONG (1<<4) +#define CFG_ADHOC_PERSIST (1<<5) +#define CFG_ASSOCIATE (1<<6) +#define CFG_FIXED_RATE (1<<7) +#define CFG_ADHOC_CREATE (1<<8) +#define CFG_NO_LED (1<<9) +#define CFG_BACKGROUND_SCAN (1<<10) +#define CFG_SPEED_SCAN (1<<11) +#define CFG_NET_STATS (1<<12) + +#define CAP_SHARED_KEY (1<<0) /* Off = OPEN */ +#define CAP_PRIVACY_ON (1<<1) /* Off = No privacy */ + +#define MAX_STATIONS 32 +#define IPW_INVALID_STATION (0xff) + +struct ipw_station_entry { + u8 mac_addr[ETH_ALEN]; + u8 reserved; + u8 support_mode; +}; + +#define AVG_ENTRIES 8 +struct average { + s16 entries[AVG_ENTRIES]; + u8 pos; + u8 init; + s32 sum; +}; + +#define MAX_SPEED_SCAN 100 +#define IPW_IBSS_MAC_HASH_SIZE 31 + +struct ipw_ibss_seq { + u8 mac[ETH_ALEN]; + u16 seq_num; + u16 frag_num; + unsigned long packet_time; + struct list_head list; +}; + +struct ipw_error_elem { /* XXX */ + u32 desc; + u32 time; + u32 blink1; + u32 blink2; + u32 link1; + u32 link2; + u32 data; +}; + +struct ipw_event { /* XXX */ + u32 event; + u32 time; + u32 data; +} __attribute__ ((packed)); + +struct ipw_fw_error { /* XXX */ + unsigned long jiffies; + u32 status; + u32 config; + u32 elem_len; + u32 log_len; + struct ipw_error_elem *elem; + struct ipw_event *log; + u8 payload[0]; +} __attribute__ ((packed)); + +#ifdef CONFIG_IPW2200_PROMISCUOUS + +enum ipw_prom_filter { + IPW_PROM_CTL_HEADER_ONLY = (1 << 0), + IPW_PROM_MGMT_HEADER_ONLY = (1 << 1), + IPW_PROM_DATA_HEADER_ONLY = (1 << 2), + IPW_PROM_ALL_HEADER_ONLY = 0xf, /* bits 0..3 */ + IPW_PROM_NO_TX = (1 << 4), + IPW_PROM_NO_RX = (1 << 5), + IPW_PROM_NO_CTL = (1 << 6), + IPW_PROM_NO_MGMT = (1 << 7), + IPW_PROM_NO_DATA = (1 << 8), +}; + +struct ipw_priv; +struct ipw_prom_priv { + struct ipw_priv *priv; + struct libipw_device *ieee; + enum ipw_prom_filter filter; + int tx_packets; + int rx_packets; +}; +#endif + +#if defined(CONFIG_IPW2200_RADIOTAP) || defined(CONFIG_IPW2200_PROMISCUOUS) +/* Magic struct that slots into the radiotap header -- no reason + * to build this manually element by element, we can write it much + * more efficiently than we can parse it. ORDER MATTERS HERE + * + * When sent to us via the simulated Rx interface in sysfs, the entire + * structure is provided regardless of any bits unset. + */ +struct ipw_rt_hdr { + struct ieee80211_radiotap_header rt_hdr; + u64 rt_tsf; /* TSF */ /* XXX */ + u8 rt_flags; /* radiotap packet flags */ + u8 rt_rate; /* rate in 500kb/s */ + __le16 rt_channel; /* channel in mhz */ + __le16 rt_chbitmask; /* channel bitfield */ + s8 rt_dbmsignal; /* signal in dbM, kluged to signed */ + s8 rt_dbmnoise; + u8 rt_antenna; /* antenna number */ + u8 payload[0]; /* payload... */ +} __attribute__ ((packed)); +#endif + +struct ipw_priv { + /* ieee device used by generic ieee processing code */ + struct libipw_device *ieee; + + spinlock_t lock; + spinlock_t irq_lock; + struct mutex mutex; + + /* basic pci-network driver stuff */ + struct pci_dev *pci_dev; + struct net_device *net_dev; + +#ifdef CONFIG_IPW2200_PROMISCUOUS + /* Promiscuous mode */ + struct ipw_prom_priv *prom_priv; + struct net_device *prom_net_dev; +#endif + + /* pci hardware address support */ + void __iomem *hw_base; + unsigned long hw_len; + + struct fw_image_desc sram_desc; + + /* result of ucode download */ + struct alive_command_responce dino_alive; + + wait_queue_head_t wait_command_queue; + wait_queue_head_t wait_state; + + /* Rx and Tx DMA processing queues */ + struct ipw_rx_queue *rxq; + struct clx2_tx_queue txq_cmd; + struct clx2_tx_queue txq[4]; + u32 status; + u32 config; + u32 capability; + + struct average average_missed_beacons; + s16 exp_avg_rssi; + s16 exp_avg_noise; + u32 port_type; + int rx_bufs_min; /**< minimum number of bufs in Rx queue */ + int rx_pend_max; /**< maximum pending buffers for one IRQ */ + u32 hcmd_seq; /**< sequence number for hcmd */ + u32 disassociate_threshold; + u32 roaming_threshold; + + struct ipw_associate assoc_request; + struct libipw_network *assoc_network; + + unsigned long ts_scan_abort; + struct ipw_supported_rates rates; + struct ipw_rates phy[3]; /**< PHY restrictions, per band */ + struct ipw_rates supp; /**< software defined */ + struct ipw_rates extended; /**< use for corresp. IE, AP only */ + + struct notif_link_deterioration last_link_deterioration; /** for statistics */ + struct ipw_cmd *hcmd; /**< host command currently executed */ + + wait_queue_head_t hcmd_wq; /**< host command waits for execution */ + u32 tsf_bcn[2]; /**< TSF from latest beacon */ + + struct notif_calibration calib; /**< last calibration */ + + /* ordinal interface with firmware */ + u32 table0_addr; + u32 table0_len; + u32 table1_addr; + u32 table1_len; + u32 table2_addr; + u32 table2_len; + + /* context information */ + u8 essid[IW_ESSID_MAX_SIZE]; + u8 essid_len; + u8 nick[IW_ESSID_MAX_SIZE]; + u16 rates_mask; + u8 channel; + struct ipw_sys_config sys_config; + u32 power_mode; + u8 bssid[ETH_ALEN]; + u16 rts_threshold; + u8 mac_addr[ETH_ALEN]; + u8 num_stations; + u8 stations[MAX_STATIONS][ETH_ALEN]; + u8 short_retry_limit; + u8 long_retry_limit; + + u32 notif_missed_beacons; + + /* Statistics and counters normalized with each association */ + u32 last_missed_beacons; + u32 last_tx_packets; + u32 last_rx_packets; + u32 last_tx_failures; + u32 last_rx_err; + u32 last_rate; + + u32 missed_adhoc_beacons; + u32 missed_beacons; + u32 rx_packets; + u32 tx_packets; + u32 quality; + + u8 speed_scan[MAX_SPEED_SCAN]; + u8 speed_scan_pos; + + u16 last_seq_num; + u16 last_frag_num; + unsigned long last_packet_time; + struct list_head ibss_mac_hash[IPW_IBSS_MAC_HASH_SIZE]; + + /* eeprom */ + u8 eeprom[0x100]; /* 256 bytes of eeprom */ + u8 country[4]; + int eeprom_delay; + + struct iw_statistics wstats; + + struct iw_public_data wireless_data; + + int user_requested_scan; + u8 direct_scan_ssid[IW_ESSID_MAX_SIZE]; + u8 direct_scan_ssid_len; + + struct workqueue_struct *workqueue; + + struct delayed_work adhoc_check; + struct work_struct associate; + struct work_struct disassociate; + struct work_struct system_config; + struct work_struct rx_replenish; + struct delayed_work request_scan; + struct delayed_work request_direct_scan; + struct delayed_work request_passive_scan; + struct delayed_work scan_event; + struct work_struct adapter_restart; + struct delayed_work rf_kill; + struct work_struct up; + struct work_struct down; + struct delayed_work gather_stats; + struct work_struct abort_scan; + struct work_struct roam; + struct delayed_work scan_check; + struct work_struct link_up; + struct work_struct link_down; + + struct tasklet_struct irq_tasklet; + + /* LED related variables and work_struct */ + u8 nic_type; + u32 led_activity_on; + u32 led_activity_off; + u32 led_association_on; + u32 led_association_off; + u32 led_ofdm_on; + u32 led_ofdm_off; + + struct delayed_work led_link_on; + struct delayed_work led_link_off; + struct delayed_work led_act_off; + struct work_struct merge_networks; + + struct ipw_cmd_log *cmdlog; + int cmdlog_len; + int cmdlog_pos; + +#define IPW_2200BG 1 +#define IPW_2915ABG 2 + u8 adapter; + + s8 tx_power; + + /* Track time in suspend */ + unsigned long suspend_at; + unsigned long suspend_time; + +#ifdef CONFIG_PM + u32 pm_state[16]; +#endif + + struct ipw_fw_error *error; + + /* network state */ + + /* Used to pass the current INTA value from ISR to Tasklet */ + u32 isr_inta; + + /* QoS */ + struct ipw_qos_info qos_data; + struct work_struct qos_activate; + /*********************************/ + + /* debugging info */ + u32 indirect_dword; + u32 direct_dword; + u32 indirect_byte; +}; /*ipw_priv */ + +/* debug macros */ + +/* Debug and printf string expansion helpers for printing bitfields */ +#define BIT_FMT8 "%c%c%c%c-%c%c%c%c" +#define BIT_FMT16 BIT_FMT8 ":" BIT_FMT8 +#define BIT_FMT32 BIT_FMT16 " " BIT_FMT16 + +#define BITC(x,y) (((x>>y)&1)?'1':'0') +#define BIT_ARG8(x) \ +BITC(x,7),BITC(x,6),BITC(x,5),BITC(x,4),\ +BITC(x,3),BITC(x,2),BITC(x,1),BITC(x,0) + +#define BIT_ARG16(x) \ +BITC(x,15),BITC(x,14),BITC(x,13),BITC(x,12),\ +BITC(x,11),BITC(x,10),BITC(x,9),BITC(x,8),\ +BIT_ARG8(x) + +#define BIT_ARG32(x) \ +BITC(x,31),BITC(x,30),BITC(x,29),BITC(x,28),\ +BITC(x,27),BITC(x,26),BITC(x,25),BITC(x,24),\ +BITC(x,23),BITC(x,22),BITC(x,21),BITC(x,20),\ +BITC(x,19),BITC(x,18),BITC(x,17),BITC(x,16),\ +BIT_ARG16(x) + + +#define IPW_DEBUG(level, fmt, args...) \ +do { if (ipw_debug_level & (level)) \ + printk(KERN_DEBUG DRV_NAME": %c %s " fmt, \ + in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0) + +#ifdef CONFIG_IPW2200_DEBUG +#define IPW_LL_DEBUG(level, fmt, args...) \ +do { if (ipw_debug_level & (level)) \ + printk(KERN_DEBUG DRV_NAME": %c %s " fmt, \ + in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0) +#else +#define IPW_LL_DEBUG(level, fmt, args...) do {} while (0) +#endif /* CONFIG_IPW2200_DEBUG */ + +/* + * To use the debug system; + * + * If you are defining a new debug classification, simply add it to the #define + * list here in the form of: + * + * #define IPW_DL_xxxx VALUE + * + * shifting value to the left one bit from the previous entry. xxxx should be + * the name of the classification (for example, WEP) + * + * You then need to either add a IPW_xxxx_DEBUG() macro definition for your + * classification, or use IPW_DEBUG(IPW_DL_xxxx, ...) whenever you want + * to send output to that classification. + * + * To add your debug level to the list of levels seen when you perform + * + * % cat /proc/net/ipw/debug_level + * + * you simply need to add your entry to the ipw_debug_levels array. + * + * If you do not see debug_level in /proc/net/ipw then you do not have + * CONFIG_IPW2200_DEBUG defined in your kernel configuration + * + */ + +#define IPW_DL_ERROR (1<<0) +#define IPW_DL_WARNING (1<<1) +#define IPW_DL_INFO (1<<2) +#define IPW_DL_WX (1<<3) +#define IPW_DL_HOST_COMMAND (1<<5) +#define IPW_DL_STATE (1<<6) + +#define IPW_DL_NOTIF (1<<10) +#define IPW_DL_SCAN (1<<11) +#define IPW_DL_ASSOC (1<<12) +#define IPW_DL_DROP (1<<13) +#define IPW_DL_IOCTL (1<<14) + +#define IPW_DL_MANAGE (1<<15) +#define IPW_DL_FW (1<<16) +#define IPW_DL_RF_KILL (1<<17) +#define IPW_DL_FW_ERRORS (1<<18) + +#define IPW_DL_LED (1<<19) + +#define IPW_DL_ORD (1<<20) + +#define IPW_DL_FRAG (1<<21) +#define IPW_DL_WEP (1<<22) +#define IPW_DL_TX (1<<23) +#define IPW_DL_RX (1<<24) +#define IPW_DL_ISR (1<<25) +#define IPW_DL_FW_INFO (1<<26) +#define IPW_DL_IO (1<<27) +#define IPW_DL_TRACE (1<<28) + +#define IPW_DL_STATS (1<<29) +#define IPW_DL_MERGE (1<<30) +#define IPW_DL_QOS (1<<31) + +#define IPW_ERROR(f, a...) printk(KERN_ERR DRV_NAME ": " f, ## a) +#define IPW_WARNING(f, a...) printk(KERN_WARNING DRV_NAME ": " f, ## a) +#define IPW_DEBUG_INFO(f, a...) IPW_DEBUG(IPW_DL_INFO, f, ## a) + +#define IPW_DEBUG_WX(f, a...) IPW_DEBUG(IPW_DL_WX, f, ## a) +#define IPW_DEBUG_SCAN(f, a...) IPW_DEBUG(IPW_DL_SCAN, f, ## a) +#define IPW_DEBUG_TRACE(f, a...) IPW_LL_DEBUG(IPW_DL_TRACE, f, ## a) +#define IPW_DEBUG_RX(f, a...) IPW_LL_DEBUG(IPW_DL_RX, f, ## a) +#define IPW_DEBUG_TX(f, a...) IPW_LL_DEBUG(IPW_DL_TX, f, ## a) +#define IPW_DEBUG_ISR(f, a...) IPW_LL_DEBUG(IPW_DL_ISR, f, ## a) +#define IPW_DEBUG_MANAGEMENT(f, a...) IPW_DEBUG(IPW_DL_MANAGE, f, ## a) +#define IPW_DEBUG_LED(f, a...) IPW_LL_DEBUG(IPW_DL_LED, f, ## a) +#define IPW_DEBUG_WEP(f, a...) IPW_LL_DEBUG(IPW_DL_WEP, f, ## a) +#define IPW_DEBUG_HC(f, a...) IPW_LL_DEBUG(IPW_DL_HOST_COMMAND, f, ## a) +#define IPW_DEBUG_FRAG(f, a...) IPW_LL_DEBUG(IPW_DL_FRAG, f, ## a) +#define IPW_DEBUG_FW(f, a...) IPW_LL_DEBUG(IPW_DL_FW, f, ## a) +#define IPW_DEBUG_RF_KILL(f, a...) IPW_DEBUG(IPW_DL_RF_KILL, f, ## a) +#define IPW_DEBUG_DROP(f, a...) IPW_DEBUG(IPW_DL_DROP, f, ## a) +#define IPW_DEBUG_IO(f, a...) IPW_LL_DEBUG(IPW_DL_IO, f, ## a) +#define IPW_DEBUG_ORD(f, a...) IPW_LL_DEBUG(IPW_DL_ORD, f, ## a) +#define IPW_DEBUG_FW_INFO(f, a...) IPW_LL_DEBUG(IPW_DL_FW_INFO, f, ## a) +#define IPW_DEBUG_NOTIF(f, a...) IPW_DEBUG(IPW_DL_NOTIF, f, ## a) +#define IPW_DEBUG_STATE(f, a...) IPW_DEBUG(IPW_DL_STATE | IPW_DL_ASSOC | IPW_DL_INFO, f, ## a) +#define IPW_DEBUG_ASSOC(f, a...) IPW_DEBUG(IPW_DL_ASSOC | IPW_DL_INFO, f, ## a) +#define IPW_DEBUG_STATS(f, a...) IPW_LL_DEBUG(IPW_DL_STATS, f, ## a) +#define IPW_DEBUG_MERGE(f, a...) IPW_LL_DEBUG(IPW_DL_MERGE, f, ## a) +#define IPW_DEBUG_QOS(f, a...) IPW_LL_DEBUG(IPW_DL_QOS, f, ## a) + +#include + +/* +* Register bit definitions +*/ + +#define IPW_INTA_RW 0x00000008 +#define IPW_INTA_MASK_R 0x0000000C +#define IPW_INDIRECT_ADDR 0x00000010 +#define IPW_INDIRECT_DATA 0x00000014 +#define IPW_AUTOINC_ADDR 0x00000018 +#define IPW_AUTOINC_DATA 0x0000001C +#define IPW_RESET_REG 0x00000020 +#define IPW_GP_CNTRL_RW 0x00000024 + +#define IPW_READ_INT_REGISTER 0xFF4 + +#define IPW_GP_CNTRL_BIT_INIT_DONE 0x00000004 + +#define IPW_REGISTER_DOMAIN1_END 0x00001000 +#define IPW_SRAM_READ_INT_REGISTER 0x00000ff4 + +#define IPW_SHARED_LOWER_BOUND 0x00000200 +#define IPW_INTERRUPT_AREA_LOWER_BOUND 0x00000f80 + +#define IPW_NIC_SRAM_LOWER_BOUND 0x00000000 +#define IPW_NIC_SRAM_UPPER_BOUND 0x00030000 + +#define IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER (1 << 29) +#define IPW_GP_CNTRL_BIT_CLOCK_READY 0x00000001 +#define IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY 0x00000002 + +/* + * RESET Register Bit Indexes + */ +#define CBD_RESET_REG_PRINCETON_RESET (1<<0) +#define IPW_START_STANDBY (1<<2) +#define IPW_ACTIVITY_LED (1<<4) +#define IPW_ASSOCIATED_LED (1<<5) +#define IPW_OFDM_LED (1<<6) +#define IPW_RESET_REG_SW_RESET (1<<7) +#define IPW_RESET_REG_MASTER_DISABLED (1<<8) +#define IPW_RESET_REG_STOP_MASTER (1<<9) +#define IPW_GATE_ODMA (1<<25) +#define IPW_GATE_IDMA (1<<26) +#define IPW_ARC_KESHET_CONFIG (1<<27) +#define IPW_GATE_ADMA (1<<29) + +#define IPW_CSR_CIS_UPPER_BOUND 0x00000200 +#define IPW_DOMAIN_0_END 0x1000 +#define CLX_MEM_BAR_SIZE 0x1000 + +/* Dino/baseband control registers bits */ + +#define DINO_ENABLE_SYSTEM 0x80 /* 1 = baseband processor on, 0 = reset */ +#define DINO_ENABLE_CS 0x40 /* 1 = enable ucode load */ +#define DINO_RXFIFO_DATA 0x01 /* 1 = data available */ +#define IPW_BASEBAND_CONTROL_STATUS 0X00200000 +#define IPW_BASEBAND_TX_FIFO_WRITE 0X00200004 +#define IPW_BASEBAND_RX_FIFO_READ 0X00200004 +#define IPW_BASEBAND_CONTROL_STORE 0X00200010 + +#define IPW_INTERNAL_CMD_EVENT 0X00300004 +#define IPW_BASEBAND_POWER_DOWN 0x00000001 + +#define IPW_MEM_HALT_AND_RESET 0x003000e0 + +/* defgroup bits_halt_reset MEM_HALT_AND_RESET register bits */ +#define IPW_BIT_HALT_RESET_ON 0x80000000 +#define IPW_BIT_HALT_RESET_OFF 0x00000000 + +#define CB_LAST_VALID 0x20000000 +#define CB_INT_ENABLED 0x40000000 +#define CB_VALID 0x80000000 +#define CB_SRC_LE 0x08000000 +#define CB_DEST_LE 0x04000000 +#define CB_SRC_AUTOINC 0x00800000 +#define CB_SRC_IO_GATED 0x00400000 +#define CB_DEST_AUTOINC 0x00080000 +#define CB_SRC_SIZE_LONG 0x00200000 +#define CB_DEST_SIZE_LONG 0x00020000 + +/* DMA DEFINES */ + +#define DMA_CONTROL_SMALL_CB_CONST_VALUE 0x00540000 +#define DMA_CB_STOP_AND_ABORT 0x00000C00 +#define DMA_CB_START 0x00000100 + +#define IPW_SHARED_SRAM_SIZE 0x00030000 +#define IPW_SHARED_SRAM_DMA_CONTROL 0x00027000 +#define CB_MAX_LENGTH 0x1FFF + +#define IPW_HOST_EEPROM_DATA_SRAM_SIZE 0xA18 +#define IPW_EEPROM_IMAGE_SIZE 0x100 + +/* DMA defs */ +#define IPW_DMA_I_CURRENT_CB 0x003000D0 +#define IPW_DMA_O_CURRENT_CB 0x003000D4 +#define IPW_DMA_I_DMA_CONTROL 0x003000A4 +#define IPW_DMA_I_CB_BASE 0x003000A0 + +#define IPW_TX_CMD_QUEUE_BD_BASE 0x00000200 +#define IPW_TX_CMD_QUEUE_BD_SIZE 0x00000204 +#define IPW_TX_QUEUE_0_BD_BASE 0x00000208 +#define IPW_TX_QUEUE_0_BD_SIZE (0x0000020C) +#define IPW_TX_QUEUE_1_BD_BASE 0x00000210 +#define IPW_TX_QUEUE_1_BD_SIZE 0x00000214 +#define IPW_TX_QUEUE_2_BD_BASE 0x00000218 +#define IPW_TX_QUEUE_2_BD_SIZE (0x0000021C) +#define IPW_TX_QUEUE_3_BD_BASE 0x00000220 +#define IPW_TX_QUEUE_3_BD_SIZE 0x00000224 +#define IPW_RX_BD_BASE 0x00000240 +#define IPW_RX_BD_SIZE 0x00000244 +#define IPW_RFDS_TABLE_LOWER 0x00000500 + +#define IPW_TX_CMD_QUEUE_READ_INDEX 0x00000280 +#define IPW_TX_QUEUE_0_READ_INDEX 0x00000284 +#define IPW_TX_QUEUE_1_READ_INDEX 0x00000288 +#define IPW_TX_QUEUE_2_READ_INDEX (0x0000028C) +#define IPW_TX_QUEUE_3_READ_INDEX 0x00000290 +#define IPW_RX_READ_INDEX (0x000002A0) + +#define IPW_TX_CMD_QUEUE_WRITE_INDEX (0x00000F80) +#define IPW_TX_QUEUE_0_WRITE_INDEX (0x00000F84) +#define IPW_TX_QUEUE_1_WRITE_INDEX (0x00000F88) +#define IPW_TX_QUEUE_2_WRITE_INDEX (0x00000F8C) +#define IPW_TX_QUEUE_3_WRITE_INDEX (0x00000F90) +#define IPW_RX_WRITE_INDEX (0x00000FA0) + +/* + * EEPROM Related Definitions + */ + +#define IPW_EEPROM_DATA_SRAM_ADDRESS (IPW_SHARED_LOWER_BOUND + 0x814) +#define IPW_EEPROM_DATA_SRAM_SIZE (IPW_SHARED_LOWER_BOUND + 0x818) +#define IPW_EEPROM_LOAD_DISABLE (IPW_SHARED_LOWER_BOUND + 0x81C) +#define IPW_EEPROM_DATA (IPW_SHARED_LOWER_BOUND + 0x820) +#define IPW_EEPROM_UPPER_ADDRESS (IPW_SHARED_LOWER_BOUND + 0x9E0) + +#define IPW_STATION_TABLE_LOWER (IPW_SHARED_LOWER_BOUND + 0xA0C) +#define IPW_STATION_TABLE_UPPER (IPW_SHARED_LOWER_BOUND + 0xB0C) +#define IPW_REQUEST_ATIM (IPW_SHARED_LOWER_BOUND + 0xB0C) +#define IPW_ATIM_SENT (IPW_SHARED_LOWER_BOUND + 0xB10) +#define IPW_WHO_IS_AWAKE (IPW_SHARED_LOWER_BOUND + 0xB14) +#define IPW_DURING_ATIM_WINDOW (IPW_SHARED_LOWER_BOUND + 0xB18) + +#define MSB 1 +#define LSB 0 +#define WORD_TO_BYTE(_word) ((_word) * sizeof(u16)) + +#define GET_EEPROM_ADDR(_wordoffset,_byteoffset) \ + ( WORD_TO_BYTE(_wordoffset) + (_byteoffset) ) + +/* EEPROM access by BYTE */ +#define EEPROM_PME_CAPABILITY (GET_EEPROM_ADDR(0x09,MSB)) /* 1 byte */ +#define EEPROM_MAC_ADDRESS (GET_EEPROM_ADDR(0x21,LSB)) /* 6 byte */ +#define EEPROM_VERSION (GET_EEPROM_ADDR(0x24,MSB)) /* 1 byte */ +#define EEPROM_NIC_TYPE (GET_EEPROM_ADDR(0x25,LSB)) /* 1 byte */ +#define EEPROM_SKU_CAPABILITY (GET_EEPROM_ADDR(0x25,MSB)) /* 1 byte */ +#define EEPROM_COUNTRY_CODE (GET_EEPROM_ADDR(0x26,LSB)) /* 3 bytes */ +#define EEPROM_IBSS_CHANNELS_BG (GET_EEPROM_ADDR(0x28,LSB)) /* 2 bytes */ +#define EEPROM_IBSS_CHANNELS_A (GET_EEPROM_ADDR(0x29,MSB)) /* 5 bytes */ +#define EEPROM_BSS_CHANNELS_BG (GET_EEPROM_ADDR(0x2c,LSB)) /* 2 bytes */ +#define EEPROM_HW_VERSION (GET_EEPROM_ADDR(0x72,LSB)) /* 2 bytes */ + +/* NIC type as found in the one byte EEPROM_NIC_TYPE offset */ +#define EEPROM_NIC_TYPE_0 0 +#define EEPROM_NIC_TYPE_1 1 +#define EEPROM_NIC_TYPE_2 2 +#define EEPROM_NIC_TYPE_3 3 +#define EEPROM_NIC_TYPE_4 4 + +/* Bluetooth Coexistence capabilities as found in EEPROM_SKU_CAPABILITY */ +#define EEPROM_SKU_CAP_BT_CHANNEL_SIG 0x01 /* we can tell BT our channel # */ +#define EEPROM_SKU_CAP_BT_PRIORITY 0x02 /* BT can take priority over us */ +#define EEPROM_SKU_CAP_BT_OOB 0x04 /* we can signal BT out-of-band */ + +#define FW_MEM_REG_LOWER_BOUND 0x00300000 +#define FW_MEM_REG_EEPROM_ACCESS (FW_MEM_REG_LOWER_BOUND + 0x40) +#define IPW_EVENT_REG (FW_MEM_REG_LOWER_BOUND + 0x04) +#define EEPROM_BIT_SK (1<<0) +#define EEPROM_BIT_CS (1<<1) +#define EEPROM_BIT_DI (1<<2) +#define EEPROM_BIT_DO (1<<4) + +#define EEPROM_CMD_READ 0x2 + +/* Interrupts masks */ +#define IPW_INTA_NONE 0x00000000 + +#define IPW_INTA_BIT_RX_TRANSFER 0x00000002 +#define IPW_INTA_BIT_STATUS_CHANGE 0x00000010 +#define IPW_INTA_BIT_BEACON_PERIOD_EXPIRED 0x00000020 + +//Inta Bits for CF +#define IPW_INTA_BIT_TX_CMD_QUEUE 0x00000800 +#define IPW_INTA_BIT_TX_QUEUE_1 0x00001000 +#define IPW_INTA_BIT_TX_QUEUE_2 0x00002000 +#define IPW_INTA_BIT_TX_QUEUE_3 0x00004000 +#define IPW_INTA_BIT_TX_QUEUE_4 0x00008000 + +#define IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE 0x00010000 + +#define IPW_INTA_BIT_PREPARE_FOR_POWER_DOWN 0x00100000 +#define IPW_INTA_BIT_POWER_DOWN 0x00200000 + +#define IPW_INTA_BIT_FW_INITIALIZATION_DONE 0x01000000 +#define IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE 0x02000000 +#define IPW_INTA_BIT_RF_KILL_DONE 0x04000000 +#define IPW_INTA_BIT_FATAL_ERROR 0x40000000 +#define IPW_INTA_BIT_PARITY_ERROR 0x80000000 + +/* Interrupts enabled at init time. */ +#define IPW_INTA_MASK_ALL \ + (IPW_INTA_BIT_TX_QUEUE_1 | \ + IPW_INTA_BIT_TX_QUEUE_2 | \ + IPW_INTA_BIT_TX_QUEUE_3 | \ + IPW_INTA_BIT_TX_QUEUE_4 | \ + IPW_INTA_BIT_TX_CMD_QUEUE | \ + IPW_INTA_BIT_RX_TRANSFER | \ + IPW_INTA_BIT_FATAL_ERROR | \ + IPW_INTA_BIT_PARITY_ERROR | \ + IPW_INTA_BIT_STATUS_CHANGE | \ + IPW_INTA_BIT_FW_INITIALIZATION_DONE | \ + IPW_INTA_BIT_BEACON_PERIOD_EXPIRED | \ + IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE | \ + IPW_INTA_BIT_PREPARE_FOR_POWER_DOWN | \ + IPW_INTA_BIT_POWER_DOWN | \ + IPW_INTA_BIT_RF_KILL_DONE ) + +/* FW event log definitions */ +#define EVENT_ELEM_SIZE (3 * sizeof(u32)) +#define EVENT_START_OFFSET (1 * sizeof(u32) + 2 * sizeof(u16)) + +/* FW error log definitions */ +#define ERROR_ELEM_SIZE (7 * sizeof(u32)) +#define ERROR_START_OFFSET (1 * sizeof(u32)) + +/* TX power level (dbm) */ +#define IPW_TX_POWER_MIN -12 +#define IPW_TX_POWER_MAX 20 +#define IPW_TX_POWER_DEFAULT IPW_TX_POWER_MAX + +enum { + IPW_FW_ERROR_OK = 0, + IPW_FW_ERROR_FAIL, + IPW_FW_ERROR_MEMORY_UNDERFLOW, + IPW_FW_ERROR_MEMORY_OVERFLOW, + IPW_FW_ERROR_BAD_PARAM, + IPW_FW_ERROR_BAD_CHECKSUM, + IPW_FW_ERROR_NMI_INTERRUPT, + IPW_FW_ERROR_BAD_DATABASE, + IPW_FW_ERROR_ALLOC_FAIL, + IPW_FW_ERROR_DMA_UNDERRUN, + IPW_FW_ERROR_DMA_STATUS, + IPW_FW_ERROR_DINO_ERROR, + IPW_FW_ERROR_EEPROM_ERROR, + IPW_FW_ERROR_SYSASSERT, + IPW_FW_ERROR_FATAL_ERROR +}; + +#define AUTH_OPEN 0 +#define AUTH_SHARED_KEY 1 +#define AUTH_LEAP 2 +#define AUTH_IGNORE 3 + +#define HC_ASSOCIATE 0 +#define HC_REASSOCIATE 1 +#define HC_DISASSOCIATE 2 +#define HC_IBSS_START 3 +#define HC_IBSS_RECONF 4 +#define HC_DISASSOC_QUIET 5 + +#define HC_QOS_SUPPORT_ASSOC cpu_to_le16(0x01) + +#define IPW_RATE_CAPABILITIES 1 +#define IPW_RATE_CONNECT 0 + +/* + * Rate values and masks + */ +#define IPW_TX_RATE_1MB 0x0A +#define IPW_TX_RATE_2MB 0x14 +#define IPW_TX_RATE_5MB 0x37 +#define IPW_TX_RATE_6MB 0x0D +#define IPW_TX_RATE_9MB 0x0F +#define IPW_TX_RATE_11MB 0x6E +#define IPW_TX_RATE_12MB 0x05 +#define IPW_TX_RATE_18MB 0x07 +#define IPW_TX_RATE_24MB 0x09 +#define IPW_TX_RATE_36MB 0x0B +#define IPW_TX_RATE_48MB 0x01 +#define IPW_TX_RATE_54MB 0x03 + +#define IPW_ORD_TABLE_ID_MASK 0x0000FF00 +#define IPW_ORD_TABLE_VALUE_MASK 0x000000FF + +#define IPW_ORD_TABLE_0_MASK 0x0000F000 +#define IPW_ORD_TABLE_1_MASK 0x0000F100 +#define IPW_ORD_TABLE_2_MASK 0x0000F200 +#define IPW_ORD_TABLE_3_MASK 0x0000F300 +#define IPW_ORD_TABLE_4_MASK 0x0000F400 +#define IPW_ORD_TABLE_5_MASK 0x0000F500 +#define IPW_ORD_TABLE_6_MASK 0x0000F600 +#define IPW_ORD_TABLE_7_MASK 0x0000F700 + +/* + * Table 0 Entries (all entries are 32 bits) + */ +enum { + IPW_ORD_STAT_TX_CURR_RATE = IPW_ORD_TABLE_0_MASK + 1, + IPW_ORD_STAT_FRAG_TRESHOLD, + IPW_ORD_STAT_RTS_THRESHOLD, + IPW_ORD_STAT_TX_HOST_REQUESTS, + IPW_ORD_STAT_TX_HOST_COMPLETE, + IPW_ORD_STAT_TX_DIR_DATA, + IPW_ORD_STAT_TX_DIR_DATA_B_1, + IPW_ORD_STAT_TX_DIR_DATA_B_2, + IPW_ORD_STAT_TX_DIR_DATA_B_5_5, + IPW_ORD_STAT_TX_DIR_DATA_B_11, + /* Hole */ + + IPW_ORD_STAT_TX_DIR_DATA_G_1 = IPW_ORD_TABLE_0_MASK + 19, + IPW_ORD_STAT_TX_DIR_DATA_G_2, + IPW_ORD_STAT_TX_DIR_DATA_G_5_5, + IPW_ORD_STAT_TX_DIR_DATA_G_6, + IPW_ORD_STAT_TX_DIR_DATA_G_9, + IPW_ORD_STAT_TX_DIR_DATA_G_11, + IPW_ORD_STAT_TX_DIR_DATA_G_12, + IPW_ORD_STAT_TX_DIR_DATA_G_18, + IPW_ORD_STAT_TX_DIR_DATA_G_24, + IPW_ORD_STAT_TX_DIR_DATA_G_36, + IPW_ORD_STAT_TX_DIR_DATA_G_48, + IPW_ORD_STAT_TX_DIR_DATA_G_54, + IPW_ORD_STAT_TX_NON_DIR_DATA, + IPW_ORD_STAT_TX_NON_DIR_DATA_B_1, + IPW_ORD_STAT_TX_NON_DIR_DATA_B_2, + IPW_ORD_STAT_TX_NON_DIR_DATA_B_5_5, + IPW_ORD_STAT_TX_NON_DIR_DATA_B_11, + /* Hole */ + + IPW_ORD_STAT_TX_NON_DIR_DATA_G_1 = IPW_ORD_TABLE_0_MASK + 44, + IPW_ORD_STAT_TX_NON_DIR_DATA_G_2, + IPW_ORD_STAT_TX_NON_DIR_DATA_G_5_5, + IPW_ORD_STAT_TX_NON_DIR_DATA_G_6, + IPW_ORD_STAT_TX_NON_DIR_DATA_G_9, + IPW_ORD_STAT_TX_NON_DIR_DATA_G_11, + IPW_ORD_STAT_TX_NON_DIR_DATA_G_12, + IPW_ORD_STAT_TX_NON_DIR_DATA_G_18, + IPW_ORD_STAT_TX_NON_DIR_DATA_G_24, + IPW_ORD_STAT_TX_NON_DIR_DATA_G_36, + IPW_ORD_STAT_TX_NON_DIR_DATA_G_48, + IPW_ORD_STAT_TX_NON_DIR_DATA_G_54, + IPW_ORD_STAT_TX_RETRY, + IPW_ORD_STAT_TX_FAILURE, + IPW_ORD_STAT_RX_ERR_CRC, + IPW_ORD_STAT_RX_ERR_ICV, + IPW_ORD_STAT_RX_NO_BUFFER, + IPW_ORD_STAT_FULL_SCANS, + IPW_ORD_STAT_PARTIAL_SCANS, + IPW_ORD_STAT_TGH_ABORTED_SCANS, + IPW_ORD_STAT_TX_TOTAL_BYTES, + IPW_ORD_STAT_CURR_RSSI_RAW, + IPW_ORD_STAT_RX_BEACON, + IPW_ORD_STAT_MISSED_BEACONS, + IPW_ORD_TABLE_0_LAST +}; + +#define IPW_RSSI_TO_DBM 112 + +/* Table 1 Entries + */ +enum { + IPW_ORD_TABLE_1_LAST = IPW_ORD_TABLE_1_MASK | 1, +}; + +/* + * Table 2 Entries + * + * FW_VERSION: 16 byte string + * FW_DATE: 16 byte string (only 14 bytes used) + * UCODE_VERSION: 4 byte version code + * UCODE_DATE: 5 bytes code code + * ADDAPTER_MAC: 6 byte MAC address + * RTC: 4 byte clock + */ +enum { + IPW_ORD_STAT_FW_VERSION = IPW_ORD_TABLE_2_MASK | 1, + IPW_ORD_STAT_FW_DATE, + IPW_ORD_STAT_UCODE_VERSION, + IPW_ORD_STAT_UCODE_DATE, + IPW_ORD_STAT_ADAPTER_MAC, + IPW_ORD_STAT_RTC, + IPW_ORD_TABLE_2_LAST +}; + +/* Table 3 */ +enum { + IPW_ORD_STAT_TX_PACKET = IPW_ORD_TABLE_3_MASK | 0, + IPW_ORD_STAT_TX_PACKET_FAILURE, + IPW_ORD_STAT_TX_PACKET_SUCCESS, + IPW_ORD_STAT_TX_PACKET_ABORTED, + IPW_ORD_TABLE_3_LAST +}; + +/* Table 4 */ +enum { + IPW_ORD_TABLE_4_LAST = IPW_ORD_TABLE_4_MASK +}; + +/* Table 5 */ +enum { + IPW_ORD_STAT_AVAILABLE_AP_COUNT = IPW_ORD_TABLE_5_MASK, + IPW_ORD_STAT_AP_ASSNS, + IPW_ORD_STAT_ROAM, + IPW_ORD_STAT_ROAM_CAUSE_MISSED_BEACONS, + IPW_ORD_STAT_ROAM_CAUSE_UNASSOC, + IPW_ORD_STAT_ROAM_CAUSE_RSSI, + IPW_ORD_STAT_ROAM_CAUSE_LINK_QUALITY, + IPW_ORD_STAT_ROAM_CAUSE_AP_LOAD_BALANCE, + IPW_ORD_STAT_ROAM_CAUSE_AP_NO_TX, + IPW_ORD_STAT_LINK_UP, + IPW_ORD_STAT_LINK_DOWN, + IPW_ORD_ANTENNA_DIVERSITY, + IPW_ORD_CURR_FREQ, + IPW_ORD_TABLE_5_LAST +}; + +/* Table 6 */ +enum { + IPW_ORD_COUNTRY_CODE = IPW_ORD_TABLE_6_MASK, + IPW_ORD_CURR_BSSID, + IPW_ORD_CURR_SSID, + IPW_ORD_TABLE_6_LAST +}; + +/* Table 7 */ +enum { + IPW_ORD_STAT_PERCENT_MISSED_BEACONS = IPW_ORD_TABLE_7_MASK, + IPW_ORD_STAT_PERCENT_TX_RETRIES, + IPW_ORD_STAT_PERCENT_LINK_QUALITY, + IPW_ORD_STAT_CURR_RSSI_DBM, + IPW_ORD_TABLE_7_LAST +}; + +#define IPW_ERROR_LOG (IPW_SHARED_LOWER_BOUND + 0x410) +#define IPW_EVENT_LOG (IPW_SHARED_LOWER_BOUND + 0x414) +#define IPW_ORDINALS_TABLE_LOWER (IPW_SHARED_LOWER_BOUND + 0x500) +#define IPW_ORDINALS_TABLE_0 (IPW_SHARED_LOWER_BOUND + 0x180) +#define IPW_ORDINALS_TABLE_1 (IPW_SHARED_LOWER_BOUND + 0x184) +#define IPW_ORDINALS_TABLE_2 (IPW_SHARED_LOWER_BOUND + 0x188) +#define IPW_MEM_FIXED_OVERRIDE (IPW_SHARED_LOWER_BOUND + 0x41C) + +struct ipw_fixed_rate { + __le16 tx_rates; + __le16 reserved; +} __attribute__ ((packed)); + +#define IPW_INDIRECT_ADDR_MASK (~0x3ul) + +struct host_cmd { + u8 cmd; + u8 len; + u16 reserved; + u32 *param; +} __attribute__ ((packed)); /* XXX */ + +struct cmdlog_host_cmd { + u8 cmd; + u8 len; + __le16 reserved; + char param[124]; +} __attribute__ ((packed)); + +struct ipw_cmd_log { + unsigned long jiffies; + int retcode; + struct cmdlog_host_cmd cmd; +}; + +/* SysConfig command parameters ... */ +/* bt_coexistence param */ +#define CFG_BT_COEXISTENCE_SIGNAL_CHNL 0x01 /* tell BT our chnl # */ +#define CFG_BT_COEXISTENCE_DEFER 0x02 /* defer our Tx if BT traffic */ +#define CFG_BT_COEXISTENCE_KILL 0x04 /* kill our Tx if BT traffic */ +#define CFG_BT_COEXISTENCE_WME_OVER_BT 0x08 /* multimedia extensions */ +#define CFG_BT_COEXISTENCE_OOB 0x10 /* signal BT via out-of-band */ + +/* clear-to-send to self param */ +#define CFG_CTS_TO_ITSELF_ENABLED_MIN 0x00 +#define CFG_CTS_TO_ITSELF_ENABLED_MAX 0x01 +#define CFG_CTS_TO_ITSELF_ENABLED_DEF CFG_CTS_TO_ITSELF_ENABLED_MIN + +/* Antenna diversity param (h/w can select best antenna, based on signal) */ +#define CFG_SYS_ANTENNA_BOTH 0x00 /* NIC selects best antenna */ +#define CFG_SYS_ANTENNA_A 0x01 /* force antenna A */ +#define CFG_SYS_ANTENNA_B 0x03 /* force antenna B */ +#define CFG_SYS_ANTENNA_SLOW_DIV 0x02 /* consider background noise */ + +/* + * The definitions below were lifted off the ipw2100 driver, which only + * supports 'b' mode, so I'm sure these are not exactly correct. + * + * Somebody fix these!! + */ +#define REG_MIN_CHANNEL 0 +#define REG_MAX_CHANNEL 14 + +#define REG_CHANNEL_MASK 0x00003FFF +#define IPW_IBSS_11B_DEFAULT_MASK 0x87ff + +#define IPW_MAX_CONFIG_RETRIES 10 + +#endif /* __ipw2200_h__ */ diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h new file mode 100644 index 0000000..bf45391 --- /dev/null +++ b/drivers/net/wireless/ipw2x00/libipw.h @@ -0,0 +1,1092 @@ +/* + * Merged with mainline ieee80211.h in Aug 2004. Original ieee802_11 + * remains copyright by the original authors + * + * Portions of the merged code are based on Host AP (software wireless + * LAN access point) driver for Intersil Prism2/2.5/3. + * + * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen + * + * Copyright (c) 2002-2003, Jouni Malinen + * + * Adaption to a generic IEEE 802.11 stack by James Ketrenos + * + * Copyright (c) 2004-2005, Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. See README and COPYING for + * more details. + * + * API Version History + * 1.0.x -- Initial version + * 1.1.x -- Added radiotap, QoS, TIM, libipw_geo APIs, + * various structure changes, and crypto API init method + */ +#ifndef LIBIPW_H +#define LIBIPW_H +#include /* ETH_ALEN */ +#include /* ARRAY_SIZE */ +#include +#include + +#include +#include + +#define LIBIPW_VERSION "git-1.1.13" + +#define LIBIPW_DATA_LEN 2304 +/* Maximum size for the MA-UNITDATA primitive, 802.11 standard section + 6.2.1.1.2. + + The figure in section 7.1.2 suggests a body size of up to 2312 + bytes is allowed, which is a bit confusing, I suspect this + represents the 2304 bytes of real data, plus a possible 8 bytes of + WEP IV and ICV. (this interpretation suggested by Ramiro Barreiro) */ + +#define LIBIPW_1ADDR_LEN 10 +#define LIBIPW_2ADDR_LEN 16 +#define LIBIPW_3ADDR_LEN 24 +#define LIBIPW_4ADDR_LEN 30 +#define LIBIPW_FCS_LEN 4 +#define LIBIPW_HLEN (LIBIPW_4ADDR_LEN) +#define LIBIPW_FRAME_LEN (LIBIPW_DATA_LEN + LIBIPW_HLEN) + +#define MIN_FRAG_THRESHOLD 256U +#define MAX_FRAG_THRESHOLD 2346U + +/* QOS control */ +#define LIBIPW_QCTL_TID 0x000F + +/* debug macros */ + +#ifdef CONFIG_LIBIPW_DEBUG +extern u32 libipw_debug_level; +#define LIBIPW_DEBUG(level, fmt, args...) \ +do { if (libipw_debug_level & (level)) \ + printk(KERN_DEBUG "ieee80211: %c %s " fmt, \ + in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0) +static inline bool libipw_ratelimit_debug(u32 level) +{ + return (libipw_debug_level & level) && net_ratelimit(); +} +#else +#define LIBIPW_DEBUG(level, fmt, args...) do {} while (0) +static inline bool libipw_ratelimit_debug(u32 level) +{ + return false; +} +#endif /* CONFIG_LIBIPW_DEBUG */ + +/* + * To use the debug system: + * + * If you are defining a new debug classification, simply add it to the #define + * list here in the form of: + * + * #define LIBIPW_DL_xxxx VALUE + * + * shifting value to the left one bit from the previous entry. xxxx should be + * the name of the classification (for example, WEP) + * + * You then need to either add a LIBIPW_xxxx_DEBUG() macro definition for your + * classification, or use LIBIPW_DEBUG(LIBIPW_DL_xxxx, ...) whenever you want + * to send output to that classification. + * + * To add your debug level to the list of levels seen when you perform + * + * % cat /proc/net/ieee80211/debug_level + * + * you simply need to add your entry to the libipw_debug_level array. + * + * If you do not see debug_level in /proc/net/ieee80211 then you do not have + * CONFIG_LIBIPW_DEBUG defined in your kernel configuration + * + */ + +#define LIBIPW_DL_INFO (1<<0) +#define LIBIPW_DL_WX (1<<1) +#define LIBIPW_DL_SCAN (1<<2) +#define LIBIPW_DL_STATE (1<<3) +#define LIBIPW_DL_MGMT (1<<4) +#define LIBIPW_DL_FRAG (1<<5) +#define LIBIPW_DL_DROP (1<<7) + +#define LIBIPW_DL_TX (1<<8) +#define LIBIPW_DL_RX (1<<9) +#define LIBIPW_DL_QOS (1<<31) + +#define LIBIPW_ERROR(f, a...) printk(KERN_ERR "ieee80211: " f, ## a) +#define LIBIPW_WARNING(f, a...) printk(KERN_WARNING "ieee80211: " f, ## a) +#define LIBIPW_DEBUG_INFO(f, a...) LIBIPW_DEBUG(LIBIPW_DL_INFO, f, ## a) + +#define LIBIPW_DEBUG_WX(f, a...) LIBIPW_DEBUG(LIBIPW_DL_WX, f, ## a) +#define LIBIPW_DEBUG_SCAN(f, a...) LIBIPW_DEBUG(LIBIPW_DL_SCAN, f, ## a) +#define LIBIPW_DEBUG_STATE(f, a...) LIBIPW_DEBUG(LIBIPW_DL_STATE, f, ## a) +#define LIBIPW_DEBUG_MGMT(f, a...) LIBIPW_DEBUG(LIBIPW_DL_MGMT, f, ## a) +#define LIBIPW_DEBUG_FRAG(f, a...) LIBIPW_DEBUG(LIBIPW_DL_FRAG, f, ## a) +#define LIBIPW_DEBUG_DROP(f, a...) LIBIPW_DEBUG(LIBIPW_DL_DROP, f, ## a) +#define LIBIPW_DEBUG_TX(f, a...) LIBIPW_DEBUG(LIBIPW_DL_TX, f, ## a) +#define LIBIPW_DEBUG_RX(f, a...) LIBIPW_DEBUG(LIBIPW_DL_RX, f, ## a) +#define LIBIPW_DEBUG_QOS(f, a...) LIBIPW_DEBUG(LIBIPW_DL_QOS, f, ## a) +#include +#include /* ARPHRD_ETHER */ + +#ifndef WIRELESS_SPY +#define WIRELESS_SPY /* enable iwspy support */ +#endif +#include /* new driver API */ + +#define ETH_P_PREAUTH 0x88C7 /* IEEE 802.11i pre-authentication */ + +#ifndef ETH_P_80211_RAW +#define ETH_P_80211_RAW (ETH_P_ECONET + 1) +#endif + +/* IEEE 802.11 defines */ + +#define P80211_OUI_LEN 3 + +struct libipw_snap_hdr { + + u8 dsap; /* always 0xAA */ + u8 ssap; /* always 0xAA */ + u8 ctrl; /* always 0x03 */ + u8 oui[P80211_OUI_LEN]; /* organizational universal id */ + +} __attribute__ ((packed)); + +#define SNAP_SIZE sizeof(struct libipw_snap_hdr) + +#define WLAN_FC_GET_VERS(fc) ((fc) & IEEE80211_FCTL_VERS) +#define WLAN_FC_GET_TYPE(fc) ((fc) & IEEE80211_FCTL_FTYPE) +#define WLAN_FC_GET_STYPE(fc) ((fc) & IEEE80211_FCTL_STYPE) + +#define WLAN_GET_SEQ_FRAG(seq) ((seq) & IEEE80211_SCTL_FRAG) +#define WLAN_GET_SEQ_SEQ(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4) + +#define LIBIPW_STATMASK_SIGNAL (1<<0) +#define LIBIPW_STATMASK_RSSI (1<<1) +#define LIBIPW_STATMASK_NOISE (1<<2) +#define LIBIPW_STATMASK_RATE (1<<3) +#define LIBIPW_STATMASK_WEMASK 0x7 + +#define LIBIPW_CCK_MODULATION (1<<0) +#define LIBIPW_OFDM_MODULATION (1<<1) + +#define LIBIPW_24GHZ_BAND (1<<0) +#define LIBIPW_52GHZ_BAND (1<<1) + +#define LIBIPW_CCK_RATE_1MB 0x02 +#define LIBIPW_CCK_RATE_2MB 0x04 +#define LIBIPW_CCK_RATE_5MB 0x0B +#define LIBIPW_CCK_RATE_11MB 0x16 +#define LIBIPW_OFDM_RATE_6MB 0x0C +#define LIBIPW_OFDM_RATE_9MB 0x12 +#define LIBIPW_OFDM_RATE_12MB 0x18 +#define LIBIPW_OFDM_RATE_18MB 0x24 +#define LIBIPW_OFDM_RATE_24MB 0x30 +#define LIBIPW_OFDM_RATE_36MB 0x48 +#define LIBIPW_OFDM_RATE_48MB 0x60 +#define LIBIPW_OFDM_RATE_54MB 0x6C +#define LIBIPW_BASIC_RATE_MASK 0x80 + +#define LIBIPW_CCK_RATE_1MB_MASK (1<<0) +#define LIBIPW_CCK_RATE_2MB_MASK (1<<1) +#define LIBIPW_CCK_RATE_5MB_MASK (1<<2) +#define LIBIPW_CCK_RATE_11MB_MASK (1<<3) +#define LIBIPW_OFDM_RATE_6MB_MASK (1<<4) +#define LIBIPW_OFDM_RATE_9MB_MASK (1<<5) +#define LIBIPW_OFDM_RATE_12MB_MASK (1<<6) +#define LIBIPW_OFDM_RATE_18MB_MASK (1<<7) +#define LIBIPW_OFDM_RATE_24MB_MASK (1<<8) +#define LIBIPW_OFDM_RATE_36MB_MASK (1<<9) +#define LIBIPW_OFDM_RATE_48MB_MASK (1<<10) +#define LIBIPW_OFDM_RATE_54MB_MASK (1<<11) + +#define LIBIPW_CCK_RATES_MASK 0x0000000F +#define LIBIPW_CCK_BASIC_RATES_MASK (LIBIPW_CCK_RATE_1MB_MASK | \ + LIBIPW_CCK_RATE_2MB_MASK) +#define LIBIPW_CCK_DEFAULT_RATES_MASK (LIBIPW_CCK_BASIC_RATES_MASK | \ + LIBIPW_CCK_RATE_5MB_MASK | \ + LIBIPW_CCK_RATE_11MB_MASK) + +#define LIBIPW_OFDM_RATES_MASK 0x00000FF0 +#define LIBIPW_OFDM_BASIC_RATES_MASK (LIBIPW_OFDM_RATE_6MB_MASK | \ + LIBIPW_OFDM_RATE_12MB_MASK | \ + LIBIPW_OFDM_RATE_24MB_MASK) +#define LIBIPW_OFDM_DEFAULT_RATES_MASK (LIBIPW_OFDM_BASIC_RATES_MASK | \ + LIBIPW_OFDM_RATE_9MB_MASK | \ + LIBIPW_OFDM_RATE_18MB_MASK | \ + LIBIPW_OFDM_RATE_36MB_MASK | \ + LIBIPW_OFDM_RATE_48MB_MASK | \ + LIBIPW_OFDM_RATE_54MB_MASK) +#define LIBIPW_DEFAULT_RATES_MASK (LIBIPW_OFDM_DEFAULT_RATES_MASK | \ + LIBIPW_CCK_DEFAULT_RATES_MASK) + +#define LIBIPW_NUM_OFDM_RATES 8 +#define LIBIPW_NUM_CCK_RATES 4 +#define LIBIPW_OFDM_SHIFT_MASK_A 4 + +/* NOTE: This data is for statistical purposes; not all hardware provides this + * information for frames received. + * For libipw_rx_mgt, you need to set at least the 'len' parameter. + */ +struct libipw_rx_stats { + u32 mac_time; + s8 rssi; + u8 signal; + u8 noise; + u16 rate; /* in 100 kbps */ + u8 received_channel; + u8 control; + u8 mask; + u8 freq; + u16 len; + u64 tsf; + u32 beacon_time; +}; + +/* IEEE 802.11 requires that STA supports concurrent reception of at least + * three fragmented frames. This define can be increased to support more + * concurrent frames, but it should be noted that each entry can consume about + * 2 kB of RAM and increasing cache size will slow down frame reassembly. */ +#define LIBIPW_FRAG_CACHE_LEN 4 + +struct libipw_frag_entry { + unsigned long first_frag_time; + unsigned int seq; + unsigned int last_frag; + struct sk_buff *skb; + u8 src_addr[ETH_ALEN]; + u8 dst_addr[ETH_ALEN]; +}; + +struct libipw_stats { + unsigned int tx_unicast_frames; + unsigned int tx_multicast_frames; + unsigned int tx_fragments; + unsigned int tx_unicast_octets; + unsigned int tx_multicast_octets; + unsigned int tx_deferred_transmissions; + unsigned int tx_single_retry_frames; + unsigned int tx_multiple_retry_frames; + unsigned int tx_retry_limit_exceeded; + unsigned int tx_discards; + unsigned int rx_unicast_frames; + unsigned int rx_multicast_frames; + unsigned int rx_fragments; + unsigned int rx_unicast_octets; + unsigned int rx_multicast_octets; + unsigned int rx_fcs_errors; + unsigned int rx_discards_no_buffer; + unsigned int tx_discards_wrong_sa; + unsigned int rx_discards_undecryptable; + unsigned int rx_message_in_msg_fragments; + unsigned int rx_message_in_bad_msg_fragments; +}; + +struct libipw_device; + +#define SEC_KEY_1 (1<<0) +#define SEC_KEY_2 (1<<1) +#define SEC_KEY_3 (1<<2) +#define SEC_KEY_4 (1<<3) +#define SEC_ACTIVE_KEY (1<<4) +#define SEC_AUTH_MODE (1<<5) +#define SEC_UNICAST_GROUP (1<<6) +#define SEC_LEVEL (1<<7) +#define SEC_ENABLED (1<<8) +#define SEC_ENCRYPT (1<<9) + +#define SEC_LEVEL_0 0 /* None */ +#define SEC_LEVEL_1 1 /* WEP 40 and 104 bit */ +#define SEC_LEVEL_2 2 /* Level 1 + TKIP */ +#define SEC_LEVEL_2_CKIP 3 /* Level 1 + CKIP */ +#define SEC_LEVEL_3 4 /* Level 2 + CCMP */ + +#define SEC_ALG_NONE 0 +#define SEC_ALG_WEP 1 +#define SEC_ALG_TKIP 2 +#define SEC_ALG_CCMP 3 + +#define WEP_KEYS 4 +#define WEP_KEY_LEN 13 +#define SCM_KEY_LEN 32 +#define SCM_TEMPORAL_KEY_LENGTH 16 + +struct libipw_security { + u16 active_key:2, enabled:1, unicast_uses_group:1, encrypt:1; + u8 auth_mode; + u8 encode_alg[WEP_KEYS]; + u8 key_sizes[WEP_KEYS]; + u8 keys[WEP_KEYS][SCM_KEY_LEN]; + u8 level; + u16 flags; +} __attribute__ ((packed)); + +/* + + 802.11 data frame from AP + + ,-------------------------------------------------------------------. +Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 | + |------|------|---------|---------|---------|------|---------|------| +Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | frame | fcs | + | | tion | (BSSID) | | | ence | data | | + `-------------------------------------------------------------------' + +Total: 28-2340 bytes + +*/ + +#define BEACON_PROBE_SSID_ID_POSITION 12 + +struct libipw_hdr_1addr { + __le16 frame_ctl; + __le16 duration_id; + u8 addr1[ETH_ALEN]; + u8 payload[0]; +} __attribute__ ((packed)); + +struct libipw_hdr_2addr { + __le16 frame_ctl; + __le16 duration_id; + u8 addr1[ETH_ALEN]; + u8 addr2[ETH_ALEN]; + u8 payload[0]; +} __attribute__ ((packed)); + +struct libipw_hdr_3addr { + __le16 frame_ctl; + __le16 duration_id; + u8 addr1[ETH_ALEN]; + u8 addr2[ETH_ALEN]; + u8 addr3[ETH_ALEN]; + __le16 seq_ctl; + u8 payload[0]; +} __attribute__ ((packed)); + +struct libipw_hdr_4addr { + __le16 frame_ctl; + __le16 duration_id; + u8 addr1[ETH_ALEN]; + u8 addr2[ETH_ALEN]; + u8 addr3[ETH_ALEN]; + __le16 seq_ctl; + u8 addr4[ETH_ALEN]; + u8 payload[0]; +} __attribute__ ((packed)); + +struct libipw_hdr_3addrqos { + __le16 frame_ctl; + __le16 duration_id; + u8 addr1[ETH_ALEN]; + u8 addr2[ETH_ALEN]; + u8 addr3[ETH_ALEN]; + __le16 seq_ctl; + u8 payload[0]; + __le16 qos_ctl; +} __attribute__ ((packed)); + +struct libipw_info_element { + u8 id; + u8 len; + u8 data[0]; +} __attribute__ ((packed)); + +/* + * These are the data types that can make up management packets + * + u16 auth_algorithm; + u16 auth_sequence; + u16 beacon_interval; + u16 capability; + u8 current_ap[ETH_ALEN]; + u16 listen_interval; + struct { + u16 association_id:14, reserved:2; + } __attribute__ ((packed)); + u32 time_stamp[2]; + u16 reason; + u16 status; +*/ + +struct libipw_auth { + struct libipw_hdr_3addr header; + __le16 algorithm; + __le16 transaction; + __le16 status; + /* challenge */ + struct libipw_info_element info_element[0]; +} __attribute__ ((packed)); + +struct libipw_channel_switch { + u8 id; + u8 len; + u8 mode; + u8 channel; + u8 count; +} __attribute__ ((packed)); + +struct libipw_action { + struct libipw_hdr_3addr header; + u8 category; + u8 action; + union { + struct libipw_action_exchange { + u8 token; + struct libipw_info_element info_element[0]; + } exchange; + struct libipw_channel_switch channel_switch; + + } format; +} __attribute__ ((packed)); + +struct libipw_disassoc { + struct libipw_hdr_3addr header; + __le16 reason; +} __attribute__ ((packed)); + +/* Alias deauth for disassoc */ +#define libipw_deauth libipw_disassoc + +struct libipw_probe_request { + struct libipw_hdr_3addr header; + /* SSID, supported rates */ + struct libipw_info_element info_element[0]; +} __attribute__ ((packed)); + +struct libipw_probe_response { + struct libipw_hdr_3addr header; + __le32 time_stamp[2]; + __le16 beacon_interval; + __le16 capability; + /* SSID, supported rates, FH params, DS params, + * CF params, IBSS params, TIM (if beacon), RSN */ + struct libipw_info_element info_element[0]; +} __attribute__ ((packed)); + +/* Alias beacon for probe_response */ +#define libipw_beacon libipw_probe_response + +struct libipw_assoc_request { + struct libipw_hdr_3addr header; + __le16 capability; + __le16 listen_interval; + /* SSID, supported rates, RSN */ + struct libipw_info_element info_element[0]; +} __attribute__ ((packed)); + +struct libipw_reassoc_request { + struct libipw_hdr_3addr header; + __le16 capability; + __le16 listen_interval; + u8 current_ap[ETH_ALEN]; + struct libipw_info_element info_element[0]; +} __attribute__ ((packed)); + +struct libipw_assoc_response { + struct libipw_hdr_3addr header; + __le16 capability; + __le16 status; + __le16 aid; + /* supported rates */ + struct libipw_info_element info_element[0]; +} __attribute__ ((packed)); + +struct libipw_txb { + u8 nr_frags; + u8 encrypted; + u8 rts_included; + u8 reserved; + u16 frag_size; + u16 payload_size; + struct sk_buff *fragments[0]; +}; + +/* SWEEP TABLE ENTRIES NUMBER */ +#define MAX_SWEEP_TAB_ENTRIES 42 +#define MAX_SWEEP_TAB_ENTRIES_PER_PACKET 7 +/* MAX_RATES_LENGTH needs to be 12. The spec says 8, and many APs + * only use 8, and then use extended rates for the remaining supported + * rates. Other APs, however, stick all of their supported rates on the + * main rates information element... */ +#define MAX_RATES_LENGTH ((u8)12) +#define MAX_RATES_EX_LENGTH ((u8)16) +#define MAX_NETWORK_COUNT 128 + +#define CRC_LENGTH 4U + +#define MAX_WPA_IE_LEN 64 + +#define NETWORK_HAS_OFDM (1<<1) +#define NETWORK_HAS_CCK (1<<2) + +/* QoS structure */ +#define NETWORK_HAS_QOS_PARAMETERS (1<<3) +#define NETWORK_HAS_QOS_INFORMATION (1<<4) +#define NETWORK_HAS_QOS_MASK (NETWORK_HAS_QOS_PARAMETERS | \ + NETWORK_HAS_QOS_INFORMATION) + +/* 802.11h */ +#define NETWORK_HAS_POWER_CONSTRAINT (1<<5) +#define NETWORK_HAS_CSA (1<<6) +#define NETWORK_HAS_QUIET (1<<7) +#define NETWORK_HAS_IBSS_DFS (1<<8) +#define NETWORK_HAS_TPC_REPORT (1<<9) + +#define NETWORK_HAS_ERP_VALUE (1<<10) + +#define QOS_QUEUE_NUM 4 +#define QOS_OUI_LEN 3 +#define QOS_OUI_TYPE 2 +#define QOS_ELEMENT_ID 221 +#define QOS_OUI_INFO_SUB_TYPE 0 +#define QOS_OUI_PARAM_SUB_TYPE 1 +#define QOS_VERSION_1 1 +#define QOS_AIFSN_MIN_VALUE 2 + +struct libipw_qos_information_element { + u8 elementID; + u8 length; + u8 qui[QOS_OUI_LEN]; + u8 qui_type; + u8 qui_subtype; + u8 version; + u8 ac_info; +} __attribute__ ((packed)); + +struct libipw_qos_ac_parameter { + u8 aci_aifsn; + u8 ecw_min_max; + __le16 tx_op_limit; +} __attribute__ ((packed)); + +struct libipw_qos_parameter_info { + struct libipw_qos_information_element info_element; + u8 reserved; + struct libipw_qos_ac_parameter ac_params_record[QOS_QUEUE_NUM]; +} __attribute__ ((packed)); + +struct libipw_qos_parameters { + __le16 cw_min[QOS_QUEUE_NUM]; + __le16 cw_max[QOS_QUEUE_NUM]; + u8 aifs[QOS_QUEUE_NUM]; + u8 flag[QOS_QUEUE_NUM]; + __le16 tx_op_limit[QOS_QUEUE_NUM]; +} __attribute__ ((packed)); + +struct libipw_qos_data { + struct libipw_qos_parameters parameters; + int active; + int supported; + u8 param_count; + u8 old_param_count; +}; + +struct libipw_tim_parameters { + u8 tim_count; + u8 tim_period; +} __attribute__ ((packed)); + +/*******************************************************/ + +enum { /* libipw_basic_report.map */ + LIBIPW_BASIC_MAP_BSS = (1 << 0), + LIBIPW_BASIC_MAP_OFDM = (1 << 1), + LIBIPW_BASIC_MAP_UNIDENTIFIED = (1 << 2), + LIBIPW_BASIC_MAP_RADAR = (1 << 3), + LIBIPW_BASIC_MAP_UNMEASURED = (1 << 4), + /* Bits 5-7 are reserved */ + +}; +struct libipw_basic_report { + u8 channel; + __le64 start_time; + __le16 duration; + u8 map; +} __attribute__ ((packed)); + +enum { /* libipw_measurement_request.mode */ + /* Bit 0 is reserved */ + LIBIPW_MEASUREMENT_ENABLE = (1 << 1), + LIBIPW_MEASUREMENT_REQUEST = (1 << 2), + LIBIPW_MEASUREMENT_REPORT = (1 << 3), + /* Bits 4-7 are reserved */ +}; + +enum { + LIBIPW_REPORT_BASIC = 0, /* required */ + LIBIPW_REPORT_CCA = 1, /* optional */ + LIBIPW_REPORT_RPI = 2, /* optional */ + /* 3-255 reserved */ +}; + +struct libipw_measurement_params { + u8 channel; + __le64 start_time; + __le16 duration; +} __attribute__ ((packed)); + +struct libipw_measurement_request { + struct libipw_info_element ie; + u8 token; + u8 mode; + u8 type; + struct libipw_measurement_params params[0]; +} __attribute__ ((packed)); + +struct libipw_measurement_report { + struct libipw_info_element ie; + u8 token; + u8 mode; + u8 type; + union { + struct libipw_basic_report basic[0]; + } u; +} __attribute__ ((packed)); + +struct libipw_tpc_report { + u8 transmit_power; + u8 link_margin; +} __attribute__ ((packed)); + +struct libipw_channel_map { + u8 channel; + u8 map; +} __attribute__ ((packed)); + +struct libipw_ibss_dfs { + struct libipw_info_element ie; + u8 owner[ETH_ALEN]; + u8 recovery_interval; + struct libipw_channel_map channel_map[0]; +}; + +struct libipw_csa { + u8 mode; + u8 channel; + u8 count; +} __attribute__ ((packed)); + +struct libipw_quiet { + u8 count; + u8 period; + u8 duration; + u8 offset; +} __attribute__ ((packed)); + +struct libipw_network { + /* These entries are used to identify a unique network */ + u8 bssid[ETH_ALEN]; + u8 channel; + /* Ensure null-terminated for any debug msgs */ + u8 ssid[IW_ESSID_MAX_SIZE + 1]; + u8 ssid_len; + + struct libipw_qos_data qos_data; + + /* These are network statistics */ + struct libipw_rx_stats stats; + u16 capability; + u8 rates[MAX_RATES_LENGTH]; + u8 rates_len; + u8 rates_ex[MAX_RATES_EX_LENGTH]; + u8 rates_ex_len; + unsigned long last_scanned; + u8 mode; + u32 flags; + u32 last_associate; + u32 time_stamp[2]; + u16 beacon_interval; + u16 listen_interval; + u16 atim_window; + u8 erp_value; + u8 wpa_ie[MAX_WPA_IE_LEN]; + size_t wpa_ie_len; + u8 rsn_ie[MAX_WPA_IE_LEN]; + size_t rsn_ie_len; + struct libipw_tim_parameters tim; + + /* 802.11h info */ + + /* Power Constraint - mandatory if spctrm mgmt required */ + u8 power_constraint; + + /* TPC Report - mandatory if spctrm mgmt required */ + struct libipw_tpc_report tpc_report; + + /* IBSS DFS - mandatory if spctrm mgmt required and IBSS + * NOTE: This is variable length and so must be allocated dynamically */ + struct libipw_ibss_dfs *ibss_dfs; + + /* Channel Switch Announcement - optional if spctrm mgmt required */ + struct libipw_csa csa; + + /* Quiet - optional if spctrm mgmt required */ + struct libipw_quiet quiet; + + struct list_head list; +}; + +enum libipw_state { + LIBIPW_UNINITIALIZED = 0, + LIBIPW_INITIALIZED, + LIBIPW_ASSOCIATING, + LIBIPW_ASSOCIATED, + LIBIPW_AUTHENTICATING, + LIBIPW_AUTHENTICATED, + LIBIPW_SHUTDOWN +}; + +#define DEFAULT_MAX_SCAN_AGE (15 * HZ) +#define DEFAULT_FTS 2346 + +#define CFG_LIBIPW_RESERVE_FCS (1<<0) +#define CFG_LIBIPW_COMPUTE_FCS (1<<1) +#define CFG_LIBIPW_RTS (1<<2) + +#define LIBIPW_24GHZ_MIN_CHANNEL 1 +#define LIBIPW_24GHZ_MAX_CHANNEL 14 +#define LIBIPW_24GHZ_CHANNELS (LIBIPW_24GHZ_MAX_CHANNEL - \ + LIBIPW_24GHZ_MIN_CHANNEL + 1) + +#define LIBIPW_52GHZ_MIN_CHANNEL 34 +#define LIBIPW_52GHZ_MAX_CHANNEL 165 +#define LIBIPW_52GHZ_CHANNELS (LIBIPW_52GHZ_MAX_CHANNEL - \ + LIBIPW_52GHZ_MIN_CHANNEL + 1) + +enum { + LIBIPW_CH_PASSIVE_ONLY = (1 << 0), + LIBIPW_CH_80211H_RULES = (1 << 1), + LIBIPW_CH_B_ONLY = (1 << 2), + LIBIPW_CH_NO_IBSS = (1 << 3), + LIBIPW_CH_UNIFORM_SPREADING = (1 << 4), + LIBIPW_CH_RADAR_DETECT = (1 << 5), + LIBIPW_CH_INVALID = (1 << 6), +}; + +struct libipw_channel { + u32 freq; /* in MHz */ + u8 channel; + u8 flags; + u8 max_power; /* in dBm */ +}; + +struct libipw_geo { + u8 name[4]; + u8 bg_channels; + u8 a_channels; + struct libipw_channel bg[LIBIPW_24GHZ_CHANNELS]; + struct libipw_channel a[LIBIPW_52GHZ_CHANNELS]; +}; + +struct libipw_device { + struct net_device *dev; + struct wireless_dev wdev; + struct libipw_security sec; + + /* Bookkeeping structures */ + struct libipw_stats ieee_stats; + + struct libipw_geo geo; + struct ieee80211_supported_band bg_band; + struct ieee80211_supported_band a_band; + + /* Probe / Beacon management */ + struct list_head network_free_list; + struct list_head network_list; + struct libipw_network *networks; + int scans; + int scan_age; + + int iw_mode; /* operating mode (IW_MODE_*) */ + struct iw_spy_data spy_data; /* iwspy support */ + + spinlock_t lock; + + int tx_headroom; /* Set to size of any additional room needed at front + * of allocated Tx SKBs */ + u32 config; + + /* WEP and other encryption related settings at the device level */ + int open_wep; /* Set to 1 to allow unencrypted frames */ + + int reset_on_keychange; /* Set to 1 if the HW needs to be reset on + * WEP key changes */ + + /* If the host performs {en,de}cryption, then set to 1 */ + int host_encrypt; + int host_encrypt_msdu; + int host_decrypt; + /* host performs multicast decryption */ + int host_mc_decrypt; + + /* host should strip IV and ICV from protected frames */ + /* meaningful only when hardware decryption is being used */ + int host_strip_iv_icv; + + int host_open_frag; + int host_build_iv; + int ieee802_1x; /* is IEEE 802.1X used */ + + /* WPA data */ + int wpa_enabled; + int drop_unencrypted; + int privacy_invoked; + size_t wpa_ie_len; + u8 *wpa_ie; + + struct lib80211_crypt_info crypt_info; + + int bcrx_sta_key; /* use individual keys to override default keys even + * with RX of broad/multicast frames */ + + /* Fragmentation structures */ + struct libipw_frag_entry frag_cache[LIBIPW_FRAG_CACHE_LEN]; + unsigned int frag_next_idx; + u16 fts; /* Fragmentation Threshold */ + u16 rts; /* RTS threshold */ + + /* Association info */ + u8 bssid[ETH_ALEN]; + + enum libipw_state state; + + int mode; /* A, B, G */ + int modulation; /* CCK, OFDM */ + int freq_band; /* 2.4Ghz, 5.2Ghz, Mixed */ + int abg_true; /* ABG flag */ + + int perfect_rssi; + int worst_rssi; + + u16 prev_seq_ctl; /* used to drop duplicate frames */ + + /* Callback functions */ + void (*set_security) (struct net_device * dev, + struct libipw_security * sec); + netdev_tx_t (*hard_start_xmit) (struct libipw_txb * txb, + struct net_device * dev, int pri); + int (*reset_port) (struct net_device * dev); + int (*is_queue_full) (struct net_device * dev, int pri); + + int (*handle_management) (struct net_device * dev, + struct libipw_network * network, u16 type); + int (*is_qos_active) (struct net_device *dev, struct sk_buff *skb); + + /* Typical STA methods */ + int (*handle_auth) (struct net_device * dev, + struct libipw_auth * auth); + int (*handle_deauth) (struct net_device * dev, + struct libipw_deauth * auth); + int (*handle_action) (struct net_device * dev, + struct libipw_action * action, + struct libipw_rx_stats * stats); + int (*handle_disassoc) (struct net_device * dev, + struct libipw_disassoc * assoc); + int (*handle_beacon) (struct net_device * dev, + struct libipw_beacon * beacon, + struct libipw_network * network); + int (*handle_probe_response) (struct net_device * dev, + struct libipw_probe_response * resp, + struct libipw_network * network); + int (*handle_probe_request) (struct net_device * dev, + struct libipw_probe_request * req, + struct libipw_rx_stats * stats); + int (*handle_assoc_response) (struct net_device * dev, + struct libipw_assoc_response * resp, + struct libipw_network * network); + + /* Typical AP methods */ + int (*handle_assoc_request) (struct net_device * dev); + int (*handle_reassoc_request) (struct net_device * dev, + struct libipw_reassoc_request * req); + + /* This must be the last item so that it points to the data + * allocated beyond this structure by alloc_ieee80211 */ + u8 priv[0]; +}; + +#define IEEE_A (1<<0) +#define IEEE_B (1<<1) +#define IEEE_G (1<<2) +#define IEEE_MODE_MASK (IEEE_A|IEEE_B|IEEE_G) + +static inline void *libipw_priv(struct net_device *dev) +{ + return ((struct libipw_device *)netdev_priv(dev))->priv; +} + +static inline int libipw_is_valid_mode(struct libipw_device *ieee, + int mode) +{ + /* + * It is possible for both access points and our device to support + * combinations of modes, so as long as there is one valid combination + * of ap/device supported modes, then return success + * + */ + if ((mode & IEEE_A) && + (ieee->modulation & LIBIPW_OFDM_MODULATION) && + (ieee->freq_band & LIBIPW_52GHZ_BAND)) + return 1; + + if ((mode & IEEE_G) && + (ieee->modulation & LIBIPW_OFDM_MODULATION) && + (ieee->freq_band & LIBIPW_24GHZ_BAND)) + return 1; + + if ((mode & IEEE_B) && + (ieee->modulation & LIBIPW_CCK_MODULATION) && + (ieee->freq_band & LIBIPW_24GHZ_BAND)) + return 1; + + return 0; +} + +static inline int libipw_get_hdrlen(u16 fc) +{ + int hdrlen = LIBIPW_3ADDR_LEN; + u16 stype = WLAN_FC_GET_STYPE(fc); + + switch (WLAN_FC_GET_TYPE(fc)) { + case IEEE80211_FTYPE_DATA: + if ((fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS)) + hdrlen = LIBIPW_4ADDR_LEN; + if (stype & IEEE80211_STYPE_QOS_DATA) + hdrlen += 2; + break; + case IEEE80211_FTYPE_CTL: + switch (WLAN_FC_GET_STYPE(fc)) { + case IEEE80211_STYPE_CTS: + case IEEE80211_STYPE_ACK: + hdrlen = LIBIPW_1ADDR_LEN; + break; + default: + hdrlen = LIBIPW_2ADDR_LEN; + break; + } + break; + } + + return hdrlen; +} + +static inline u8 *libipw_get_payload(struct ieee80211_hdr *hdr) +{ + switch (libipw_get_hdrlen(le16_to_cpu(hdr->frame_control))) { + case LIBIPW_1ADDR_LEN: + return ((struct libipw_hdr_1addr *)hdr)->payload; + case LIBIPW_2ADDR_LEN: + return ((struct libipw_hdr_2addr *)hdr)->payload; + case LIBIPW_3ADDR_LEN: + return ((struct libipw_hdr_3addr *)hdr)->payload; + case LIBIPW_4ADDR_LEN: + return ((struct libipw_hdr_4addr *)hdr)->payload; + } + return NULL; +} + +static inline int libipw_is_ofdm_rate(u8 rate) +{ + switch (rate & ~LIBIPW_BASIC_RATE_MASK) { + case LIBIPW_OFDM_RATE_6MB: + case LIBIPW_OFDM_RATE_9MB: + case LIBIPW_OFDM_RATE_12MB: + case LIBIPW_OFDM_RATE_18MB: + case LIBIPW_OFDM_RATE_24MB: + case LIBIPW_OFDM_RATE_36MB: + case LIBIPW_OFDM_RATE_48MB: + case LIBIPW_OFDM_RATE_54MB: + return 1; + } + return 0; +} + +static inline int libipw_is_cck_rate(u8 rate) +{ + switch (rate & ~LIBIPW_BASIC_RATE_MASK) { + case LIBIPW_CCK_RATE_1MB: + case LIBIPW_CCK_RATE_2MB: + case LIBIPW_CCK_RATE_5MB: + case LIBIPW_CCK_RATE_11MB: + return 1; + } + return 0; +} + +/* ieee80211.c */ +extern void free_ieee80211(struct net_device *dev, int monitor); +extern struct net_device *alloc_ieee80211(int sizeof_priv, int monitor); +extern int libipw_change_mtu(struct net_device *dev, int new_mtu); + +extern void libipw_networks_age(struct libipw_device *ieee, + unsigned long age_secs); + +extern int libipw_set_encryption(struct libipw_device *ieee); + +/* libipw_tx.c */ +extern netdev_tx_t libipw_xmit(struct sk_buff *skb, + struct net_device *dev); +extern void libipw_txb_free(struct libipw_txb *); + +/* libipw_rx.c */ +extern void libipw_rx_any(struct libipw_device *ieee, + struct sk_buff *skb, struct libipw_rx_stats *stats); +extern int libipw_rx(struct libipw_device *ieee, struct sk_buff *skb, + struct libipw_rx_stats *rx_stats); +/* make sure to set stats->len */ +extern void libipw_rx_mgt(struct libipw_device *ieee, + struct libipw_hdr_4addr *header, + struct libipw_rx_stats *stats); +extern void libipw_network_reset(struct libipw_network *network); + +/* libipw_geo.c */ +extern const struct libipw_geo *libipw_get_geo(struct libipw_device + *ieee); +extern int libipw_set_geo(struct libipw_device *ieee, + const struct libipw_geo *geo); + +extern int libipw_is_valid_channel(struct libipw_device *ieee, + u8 channel); +extern int libipw_channel_to_index(struct libipw_device *ieee, + u8 channel); +extern u8 libipw_freq_to_channel(struct libipw_device *ieee, u32 freq); +extern u8 libipw_get_channel_flags(struct libipw_device *ieee, + u8 channel); +extern const struct libipw_channel *libipw_get_channel(struct + libipw_device + *ieee, u8 channel); +extern u32 libipw_channel_to_freq(struct libipw_device * ieee, + u8 channel); + +/* libipw_wx.c */ +extern int libipw_wx_get_scan(struct libipw_device *ieee, + struct iw_request_info *info, + union iwreq_data *wrqu, char *key); +extern int libipw_wx_set_encode(struct libipw_device *ieee, + struct iw_request_info *info, + union iwreq_data *wrqu, char *key); +extern int libipw_wx_get_encode(struct libipw_device *ieee, + struct iw_request_info *info, + union iwreq_data *wrqu, char *key); +extern int libipw_wx_set_encodeext(struct libipw_device *ieee, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra); +extern int libipw_wx_get_encodeext(struct libipw_device *ieee, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra); + +static inline void libipw_increment_scans(struct libipw_device *ieee) +{ + ieee->scans++; +} + +static inline int libipw_get_scans(struct libipw_device *ieee) +{ + return ieee->scans; +} + +#endif /* LIBIPW_H */ diff --git a/drivers/net/wireless/ipw2x00/libipw_geo.c b/drivers/net/wireless/ipw2x00/libipw_geo.c new file mode 100644 index 0000000..65e8c17 --- /dev/null +++ b/drivers/net/wireless/ipw2x00/libipw_geo.c @@ -0,0 +1,195 @@ +/****************************************************************************** + + Copyright(c) 2005 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify it + under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., 59 + Temple Place - Suite 330, Boston, MA 02111-1307, USA. + + The full GNU General Public License is included in this distribution in the + file called LICENSE. + + Contact Information: + Intel Linux Wireless + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +******************************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "libipw.h" + +int libipw_is_valid_channel(struct libipw_device *ieee, u8 channel) +{ + int i; + + /* Driver needs to initialize the geography map before using + * these helper functions */ + if (ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0) + return 0; + + if (ieee->freq_band & LIBIPW_24GHZ_BAND) + for (i = 0; i < ieee->geo.bg_channels; i++) + /* NOTE: If G mode is currently supported but + * this is a B only channel, we don't see it + * as valid. */ + if ((ieee->geo.bg[i].channel == channel) && + !(ieee->geo.bg[i].flags & LIBIPW_CH_INVALID) && + (!(ieee->mode & IEEE_G) || + !(ieee->geo.bg[i].flags & LIBIPW_CH_B_ONLY))) + return LIBIPW_24GHZ_BAND; + + if (ieee->freq_band & LIBIPW_52GHZ_BAND) + for (i = 0; i < ieee->geo.a_channels; i++) + if ((ieee->geo.a[i].channel == channel) && + !(ieee->geo.a[i].flags & LIBIPW_CH_INVALID)) + return LIBIPW_52GHZ_BAND; + + return 0; +} + +int libipw_channel_to_index(struct libipw_device *ieee, u8 channel) +{ + int i; + + /* Driver needs to initialize the geography map before using + * these helper functions */ + if (ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0) + return -1; + + if (ieee->freq_band & LIBIPW_24GHZ_BAND) + for (i = 0; i < ieee->geo.bg_channels; i++) + if (ieee->geo.bg[i].channel == channel) + return i; + + if (ieee->freq_band & LIBIPW_52GHZ_BAND) + for (i = 0; i < ieee->geo.a_channels; i++) + if (ieee->geo.a[i].channel == channel) + return i; + + return -1; +} + +u32 libipw_channel_to_freq(struct libipw_device * ieee, u8 channel) +{ + const struct libipw_channel * ch; + + /* Driver needs to initialize the geography map before using + * these helper functions */ + if (ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0) + return 0; + + ch = libipw_get_channel(ieee, channel); + if (!ch->channel) + return 0; + return ch->freq; +} + +u8 libipw_freq_to_channel(struct libipw_device * ieee, u32 freq) +{ + int i; + + /* Driver needs to initialize the geography map before using + * these helper functions */ + if (ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0) + return 0; + + freq /= 100000; + + if (ieee->freq_band & LIBIPW_24GHZ_BAND) + for (i = 0; i < ieee->geo.bg_channels; i++) + if (ieee->geo.bg[i].freq == freq) + return ieee->geo.bg[i].channel; + + if (ieee->freq_band & LIBIPW_52GHZ_BAND) + for (i = 0; i < ieee->geo.a_channels; i++) + if (ieee->geo.a[i].freq == freq) + return ieee->geo.a[i].channel; + + return 0; +} + +int libipw_set_geo(struct libipw_device *ieee, + const struct libipw_geo *geo) +{ + memcpy(ieee->geo.name, geo->name, 3); + ieee->geo.name[3] = '\0'; + ieee->geo.bg_channels = geo->bg_channels; + ieee->geo.a_channels = geo->a_channels; + memcpy(ieee->geo.bg, geo->bg, geo->bg_channels * + sizeof(struct libipw_channel)); + memcpy(ieee->geo.a, geo->a, ieee->geo.a_channels * + sizeof(struct libipw_channel)); + return 0; +} + +const struct libipw_geo *libipw_get_geo(struct libipw_device *ieee) +{ + return &ieee->geo; +} + +u8 libipw_get_channel_flags(struct libipw_device * ieee, u8 channel) +{ + int index = libipw_channel_to_index(ieee, channel); + + if (index == -1) + return LIBIPW_CH_INVALID; + + if (channel <= LIBIPW_24GHZ_CHANNELS) + return ieee->geo.bg[index].flags; + + return ieee->geo.a[index].flags; +} + +static const struct libipw_channel bad_channel = { + .channel = 0, + .flags = LIBIPW_CH_INVALID, + .max_power = 0, +}; + +const struct libipw_channel *libipw_get_channel(struct libipw_device + *ieee, u8 channel) +{ + int index = libipw_channel_to_index(ieee, channel); + + if (index == -1) + return &bad_channel; + + if (channel <= LIBIPW_24GHZ_CHANNELS) + return &ieee->geo.bg[index]; + + return &ieee->geo.a[index]; +} + +EXPORT_SYMBOL(libipw_get_channel); +EXPORT_SYMBOL(libipw_get_channel_flags); +EXPORT_SYMBOL(libipw_is_valid_channel); +EXPORT_SYMBOL(libipw_freq_to_channel); +EXPORT_SYMBOL(libipw_channel_to_freq); +EXPORT_SYMBOL(libipw_channel_to_index); +EXPORT_SYMBOL(libipw_set_geo); +EXPORT_SYMBOL(libipw_get_geo); diff --git a/drivers/net/wireless/ipw2x00/libipw_module.c b/drivers/net/wireless/ipw2x00/libipw_module.c new file mode 100644 index 0000000..81202c7 --- /dev/null +++ b/drivers/net/wireless/ipw2x00/libipw_module.c @@ -0,0 +1,343 @@ +/******************************************************************************* + + Copyright(c) 2004-2005 Intel Corporation. All rights reserved. + + Portions of this file are based on the WEP enablement code provided by the + Host AP project hostap-drivers v0.1.3 + Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen + + Copyright (c) 2002-2003, Jouni Malinen + + This program is free software; you can redistribute it and/or modify it + under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., 59 + Temple Place - Suite 330, Boston, MA 02111-1307, USA. + + The full GNU General Public License is included in this distribution in the + file called LICENSE. + + Contact Information: + Intel Linux Wireless + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "libipw.h" + +#define DRV_DESCRIPTION "802.11 data/management/control stack" +#define DRV_NAME "ieee80211" +#define DRV_VERSION LIBIPW_VERSION +#define DRV_COPYRIGHT "Copyright (C) 2004-2005 Intel Corporation " + +MODULE_VERSION(DRV_VERSION); +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_AUTHOR(DRV_COPYRIGHT); +MODULE_LICENSE("GPL"); + +struct cfg80211_ops libipw_config_ops = { }; +void *libipw_wiphy_privid = &libipw_wiphy_privid; + +static int libipw_networks_allocate(struct libipw_device *ieee) +{ + if (ieee->networks) + return 0; + + ieee->networks = + kzalloc(MAX_NETWORK_COUNT * sizeof(struct libipw_network), + GFP_KERNEL); + if (!ieee->networks) { + printk(KERN_WARNING "%s: Out of memory allocating beacons\n", + ieee->dev->name); + return -ENOMEM; + } + + return 0; +} + +void libipw_network_reset(struct libipw_network *network) +{ + if (!network) + return; + + if (network->ibss_dfs) { + kfree(network->ibss_dfs); + network->ibss_dfs = NULL; + } +} + +static inline void libipw_networks_free(struct libipw_device *ieee) +{ + int i; + + if (!ieee->networks) + return; + + for (i = 0; i < MAX_NETWORK_COUNT; i++) + if (ieee->networks[i].ibss_dfs) + kfree(ieee->networks[i].ibss_dfs); + + kfree(ieee->networks); + ieee->networks = NULL; +} + +void libipw_networks_age(struct libipw_device *ieee, + unsigned long age_secs) +{ + struct libipw_network *network = NULL; + unsigned long flags; + unsigned long age_jiffies = msecs_to_jiffies(age_secs * MSEC_PER_SEC); + + spin_lock_irqsave(&ieee->lock, flags); + list_for_each_entry(network, &ieee->network_list, list) { + network->last_scanned -= age_jiffies; + } + spin_unlock_irqrestore(&ieee->lock, flags); +} +EXPORT_SYMBOL(libipw_networks_age); + +static void libipw_networks_initialize(struct libipw_device *ieee) +{ + int i; + + INIT_LIST_HEAD(&ieee->network_free_list); + INIT_LIST_HEAD(&ieee->network_list); + for (i = 0; i < MAX_NETWORK_COUNT; i++) + list_add_tail(&ieee->networks[i].list, + &ieee->network_free_list); +} + +int libipw_change_mtu(struct net_device *dev, int new_mtu) +{ + if ((new_mtu < 68) || (new_mtu > LIBIPW_DATA_LEN)) + return -EINVAL; + dev->mtu = new_mtu; + return 0; +} +EXPORT_SYMBOL(libipw_change_mtu); + +struct net_device *alloc_ieee80211(int sizeof_priv, int monitor) +{ + struct libipw_device *ieee; + struct net_device *dev; + int err; + + LIBIPW_DEBUG_INFO("Initializing...\n"); + + dev = alloc_etherdev(sizeof(struct libipw_device) + sizeof_priv); + if (!dev) { + LIBIPW_ERROR("Unable to allocate network device.\n"); + goto failed; + } + ieee = netdev_priv(dev); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29)) + dev->hard_start_xmit = libipw_xmit; + dev->change_mtu = libipw_change_mtu; +#endif + + ieee->dev = dev; + + if (!monitor) { + ieee->wdev.wiphy = wiphy_new(&libipw_config_ops, 0); + if (!ieee->wdev.wiphy) { + LIBIPW_ERROR("Unable to allocate wiphy.\n"); + goto failed_free_netdev; + } + + ieee->dev->ieee80211_ptr = &ieee->wdev; + ieee->wdev.iftype = NL80211_IFTYPE_STATION; + + /* Fill-out wiphy structure bits we know... Not enough info + here to call set_wiphy_dev or set MAC address or channel info + -- have to do that in ->ndo_init... */ + ieee->wdev.wiphy->privid = libipw_wiphy_privid; + + ieee->wdev.wiphy->max_scan_ssids = 1; + ieee->wdev.wiphy->max_scan_ie_len = 0; + ieee->wdev.wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) + | BIT(NL80211_IFTYPE_ADHOC); + } + + err = libipw_networks_allocate(ieee); + if (err) { + LIBIPW_ERROR("Unable to allocate beacon storage: %d\n", err); + goto failed_free_wiphy; + } + libipw_networks_initialize(ieee); + + /* Default fragmentation threshold is maximum payload size */ + ieee->fts = DEFAULT_FTS; + ieee->rts = DEFAULT_FTS; + ieee->scan_age = DEFAULT_MAX_SCAN_AGE; + ieee->open_wep = 1; + + /* Default to enabling full open WEP with host based encrypt/decrypt */ + ieee->host_encrypt = 1; + ieee->host_decrypt = 1; + ieee->host_mc_decrypt = 1; + + /* Host fragmentation in Open mode. Default is enabled. + * Note: host fragmentation is always enabled if host encryption + * is enabled. For cards can do hardware encryption, they must do + * hardware fragmentation as well. So we don't need a variable + * like host_enc_frag. */ + ieee->host_open_frag = 1; + ieee->ieee802_1x = 1; /* Default to supporting 802.1x */ + + spin_lock_init(&ieee->lock); + + lib80211_crypt_info_init(&ieee->crypt_info, dev->name, &ieee->lock); + + ieee->wpa_enabled = 0; + ieee->drop_unencrypted = 0; + ieee->privacy_invoked = 0; + + return dev; + +failed_free_wiphy: + if (!monitor) + wiphy_free(ieee->wdev.wiphy); +failed_free_netdev: + free_netdev(dev); +failed: + return NULL; +} + +void free_ieee80211(struct net_device *dev, int monitor) +{ + struct libipw_device *ieee = netdev_priv(dev); + + lib80211_crypt_info_free(&ieee->crypt_info); + + libipw_networks_free(ieee); + + /* free cfg80211 resources */ + if (!monitor) + wiphy_free(ieee->wdev.wiphy); + + free_netdev(dev); +} + +#ifdef CONFIG_LIBIPW_DEBUG + +static int debug = 0; +u32 libipw_debug_level = 0; +EXPORT_SYMBOL_GPL(libipw_debug_level); +static struct proc_dir_entry *libipw_proc = NULL; + +static int debug_level_proc_show(struct seq_file *m, void *v) +{ + seq_printf(m, "0x%08X\n", libipw_debug_level); + return 0; +} + +static int debug_level_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, debug_level_proc_show, NULL); +} + +static ssize_t debug_level_proc_write(struct file *file, + const char __user *buffer, size_t count, loff_t *pos) +{ + char buf[] = "0x00000000\n"; + size_t len = min(sizeof(buf) - 1, count); + unsigned long val; + + if (copy_from_user(buf, buffer, len)) + return count; + buf[len] = 0; + if (sscanf(buf, "%li", &val) != 1) + printk(KERN_INFO DRV_NAME + ": %s is not in hex or decimal form.\n", buf); + else + libipw_debug_level = val; + + return strnlen(buf, len); +} + +static const struct file_operations debug_level_proc_fops = { + .owner = THIS_MODULE, + .open = debug_level_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = debug_level_proc_write, +}; +#endif /* CONFIG_LIBIPW_DEBUG */ + +static int __init libipw_init(void) +{ +#ifdef CONFIG_LIBIPW_DEBUG + struct proc_dir_entry *e; + + libipw_debug_level = debug; + libipw_proc = proc_mkdir(DRV_NAME, init_net.proc_net); + if (libipw_proc == NULL) { + LIBIPW_ERROR("Unable to create " DRV_NAME + " proc directory\n"); + return -EIO; + } + e = proc_create("debug_level", S_IRUGO | S_IWUSR, libipw_proc, + &debug_level_proc_fops); + if (!e) { + remove_proc_entry(DRV_NAME, init_net.proc_net); + libipw_proc = NULL; + return -EIO; + } +#endif /* CONFIG_LIBIPW_DEBUG */ + + printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n"); + printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n"); + + return 0; +} + +static void __exit libipw_exit(void) +{ +#ifdef CONFIG_LIBIPW_DEBUG + if (libipw_proc) { + remove_proc_entry("debug_level", libipw_proc); + remove_proc_entry(DRV_NAME, init_net.proc_net); + libipw_proc = NULL; + } +#endif /* CONFIG_LIBIPW_DEBUG */ +} + +#ifdef CONFIG_LIBIPW_DEBUG +#include +module_param(debug, int, 0444); +MODULE_PARM_DESC(debug, "debug output mask"); +#endif /* CONFIG_LIBIPW_DEBUG */ + +module_exit(libipw_exit); +module_init(libipw_init); + +EXPORT_SYMBOL(alloc_ieee80211); +EXPORT_SYMBOL(free_ieee80211); diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c new file mode 100644 index 0000000..282b1f7 --- /dev/null +++ b/drivers/net/wireless/ipw2x00/libipw_rx.c @@ -0,0 +1,1798 @@ +/* + * Original code based Host AP (software wireless LAN access point) driver + * for Intersil Prism2/2.5/3 - hostap.o module, common routines + * + * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen + * + * Copyright (c) 2002-2003, Jouni Malinen + * Copyright (c) 2004-2005, Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. See README and COPYING for + * more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "libipw.h" + +static void libipw_monitor_rx(struct libipw_device *ieee, + struct sk_buff *skb, + struct libipw_rx_stats *rx_stats) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + u16 fc = le16_to_cpu(hdr->frame_control); + + skb->dev = ieee->dev; + skb_reset_mac_header(skb); + skb_pull(skb, libipw_get_hdrlen(fc)); + skb->pkt_type = PACKET_OTHERHOST; + skb->protocol = htons(ETH_P_80211_RAW); + memset(skb->cb, 0, sizeof(skb->cb)); + netif_rx(skb); +} + +/* Called only as a tasklet (software IRQ) */ +static struct libipw_frag_entry *libipw_frag_cache_find(struct + libipw_device + *ieee, + unsigned int seq, + unsigned int frag, + u8 * src, + u8 * dst) +{ + struct libipw_frag_entry *entry; + int i; + + for (i = 0; i < LIBIPW_FRAG_CACHE_LEN; i++) { + entry = &ieee->frag_cache[i]; + if (entry->skb != NULL && + time_after(jiffies, entry->first_frag_time + 2 * HZ)) { + LIBIPW_DEBUG_FRAG("expiring fragment cache entry " + "seq=%u last_frag=%u\n", + entry->seq, entry->last_frag); + dev_kfree_skb_any(entry->skb); + entry->skb = NULL; + } + + if (entry->skb != NULL && entry->seq == seq && + (entry->last_frag + 1 == frag || frag == -1) && + !compare_ether_addr(entry->src_addr, src) && + !compare_ether_addr(entry->dst_addr, dst)) + return entry; + } + + return NULL; +} + +/* Called only as a tasklet (software IRQ) */ +static struct sk_buff *libipw_frag_cache_get(struct libipw_device *ieee, + struct libipw_hdr_4addr *hdr) +{ + struct sk_buff *skb = NULL; + u16 sc; + unsigned int frag, seq; + struct libipw_frag_entry *entry; + + sc = le16_to_cpu(hdr->seq_ctl); + frag = WLAN_GET_SEQ_FRAG(sc); + seq = WLAN_GET_SEQ_SEQ(sc); + + if (frag == 0) { + /* Reserve enough space to fit maximum frame length */ + skb = dev_alloc_skb(ieee->dev->mtu + + sizeof(struct libipw_hdr_4addr) + + 8 /* LLC */ + + 2 /* alignment */ + + 8 /* WEP */ + ETH_ALEN /* WDS */ ); + if (skb == NULL) + return NULL; + + entry = &ieee->frag_cache[ieee->frag_next_idx]; + ieee->frag_next_idx++; + if (ieee->frag_next_idx >= LIBIPW_FRAG_CACHE_LEN) + ieee->frag_next_idx = 0; + + if (entry->skb != NULL) + dev_kfree_skb_any(entry->skb); + + entry->first_frag_time = jiffies; + entry->seq = seq; + entry->last_frag = frag; + entry->skb = skb; + memcpy(entry->src_addr, hdr->addr2, ETH_ALEN); + memcpy(entry->dst_addr, hdr->addr1, ETH_ALEN); + } else { + /* received a fragment of a frame for which the head fragment + * should have already been received */ + entry = libipw_frag_cache_find(ieee, seq, frag, hdr->addr2, + hdr->addr1); + if (entry != NULL) { + entry->last_frag = frag; + skb = entry->skb; + } + } + + return skb; +} + +/* Called only as a tasklet (software IRQ) */ +static int libipw_frag_cache_invalidate(struct libipw_device *ieee, + struct libipw_hdr_4addr *hdr) +{ + u16 sc; + unsigned int seq; + struct libipw_frag_entry *entry; + + sc = le16_to_cpu(hdr->seq_ctl); + seq = WLAN_GET_SEQ_SEQ(sc); + + entry = libipw_frag_cache_find(ieee, seq, -1, hdr->addr2, + hdr->addr1); + + if (entry == NULL) { + LIBIPW_DEBUG_FRAG("could not invalidate fragment cache " + "entry (seq=%u)\n", seq); + return -1; + } + + entry->skb = NULL; + return 0; +} + +#ifdef NOT_YET +/* libipw_rx_frame_mgtmt + * + * Responsible for handling management control frames + * + * Called by libipw_rx */ +static int +libipw_rx_frame_mgmt(struct libipw_device *ieee, struct sk_buff *skb, + struct libipw_rx_stats *rx_stats, u16 type, + u16 stype) +{ + if (ieee->iw_mode == IW_MODE_MASTER) { + printk(KERN_DEBUG "%s: Master mode not yet suppported.\n", + ieee->dev->name); + return 0; +/* + hostap_update_sta_ps(ieee, (struct hostap_libipw_hdr_4addr *) + skb->data);*/ + } + + if (ieee->hostapd && type == WLAN_FC_TYPE_MGMT) { + if (stype == WLAN_FC_STYPE_BEACON && + ieee->iw_mode == IW_MODE_MASTER) { + struct sk_buff *skb2; + /* Process beacon frames also in kernel driver to + * update STA(AP) table statistics */ + skb2 = skb_clone(skb, GFP_ATOMIC); + if (skb2) + hostap_rx(skb2->dev, skb2, rx_stats); + } + + /* send management frames to the user space daemon for + * processing */ + ieee->apdevstats.rx_packets++; + ieee->apdevstats.rx_bytes += skb->len; + prism2_rx_80211(ieee->apdev, skb, rx_stats, PRISM2_RX_MGMT); + return 0; + } + + if (ieee->iw_mode == IW_MODE_MASTER) { + if (type != WLAN_FC_TYPE_MGMT && type != WLAN_FC_TYPE_CTRL) { + printk(KERN_DEBUG "%s: unknown management frame " + "(type=0x%02x, stype=0x%02x) dropped\n", + skb->dev->name, type, stype); + return -1; + } + + hostap_rx(skb->dev, skb, rx_stats); + return 0; + } + + printk(KERN_DEBUG "%s: hostap_rx_frame_mgmt: management frame " + "received in non-Host AP mode\n", skb->dev->name); + return -1; +} +#endif + +/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */ +/* Ethernet-II snap header (RFC1042 for most EtherTypes) */ +static unsigned char libipw_rfc1042_header[] = + { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; + +/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */ +static unsigned char libipw_bridge_tunnel_header[] = + { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 }; +/* No encapsulation header if EtherType < 0x600 (=length) */ + +/* Called by libipw_rx_frame_decrypt */ +static int libipw_is_eapol_frame(struct libipw_device *ieee, + struct sk_buff *skb) +{ + struct net_device *dev = ieee->dev; + u16 fc, ethertype; + struct libipw_hdr_3addr *hdr; + u8 *pos; + + if (skb->len < 24) + return 0; + + hdr = (struct libipw_hdr_3addr *)skb->data; + fc = le16_to_cpu(hdr->frame_ctl); + + /* check that the frame is unicast frame to us */ + if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == + IEEE80211_FCTL_TODS && + !compare_ether_addr(hdr->addr1, dev->dev_addr) && + !compare_ether_addr(hdr->addr3, dev->dev_addr)) { + /* ToDS frame with own addr BSSID and DA */ + } else if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == + IEEE80211_FCTL_FROMDS && + !compare_ether_addr(hdr->addr1, dev->dev_addr)) { + /* FromDS frame with own addr as DA */ + } else + return 0; + + if (skb->len < 24 + 8) + return 0; + + /* check for port access entity Ethernet type */ + pos = skb->data + 24; + ethertype = (pos[6] << 8) | pos[7]; + if (ethertype == ETH_P_PAE) + return 1; + + return 0; +} + +/* Called only as a tasklet (software IRQ), by libipw_rx */ +static int +libipw_rx_frame_decrypt(struct libipw_device *ieee, struct sk_buff *skb, + struct lib80211_crypt_data *crypt) +{ + struct libipw_hdr_3addr *hdr; + int res, hdrlen; + + if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL) + return 0; + + hdr = (struct libipw_hdr_3addr *)skb->data; + hdrlen = libipw_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); + + atomic_inc(&crypt->refcnt); + res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv); + atomic_dec(&crypt->refcnt); + if (res < 0) { + LIBIPW_DEBUG_DROP("decryption failed (SA=%pM) res=%d\n", + hdr->addr2, res); + if (res == -2) + LIBIPW_DEBUG_DROP("Decryption failed ICV " + "mismatch (key %d)\n", + skb->data[hdrlen + 3] >> 6); + ieee->ieee_stats.rx_discards_undecryptable++; + return -1; + } + + return res; +} + +/* Called only as a tasklet (software IRQ), by libipw_rx */ +static int +libipw_rx_frame_decrypt_msdu(struct libipw_device *ieee, + struct sk_buff *skb, int keyidx, + struct lib80211_crypt_data *crypt) +{ + struct libipw_hdr_3addr *hdr; + int res, hdrlen; + + if (crypt == NULL || crypt->ops->decrypt_msdu == NULL) + return 0; + + hdr = (struct libipw_hdr_3addr *)skb->data; + hdrlen = libipw_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); + + atomic_inc(&crypt->refcnt); + res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv); + atomic_dec(&crypt->refcnt); + if (res < 0) { + printk(KERN_DEBUG "%s: MSDU decryption/MIC verification failed" + " (SA=%pM keyidx=%d)\n", ieee->dev->name, hdr->addr2, + keyidx); + return -1; + } + + return 0; +} + +/* All received frames are sent to this function. @skb contains the frame in + * IEEE 802.11 format, i.e., in the format it was sent over air. + * This function is called only as a tasklet (software IRQ). */ +int libipw_rx(struct libipw_device *ieee, struct sk_buff *skb, + struct libipw_rx_stats *rx_stats) +{ + struct net_device *dev = ieee->dev; + struct libipw_hdr_4addr *hdr; + size_t hdrlen; + u16 fc, type, stype, sc; + unsigned int frag; + u8 *payload; + u16 ethertype; +#ifdef NOT_YET + struct net_device *wds = NULL; + struct sk_buff *skb2 = NULL; + struct net_device *wds = NULL; + int frame_authorized = 0; + int from_assoc_ap = 0; + void *sta = NULL; +#endif + u8 dst[ETH_ALEN]; + u8 src[ETH_ALEN]; + struct lib80211_crypt_data *crypt = NULL; + int keyidx = 0; + int can_be_decrypted = 0; + + hdr = (struct libipw_hdr_4addr *)skb->data; + if (skb->len < 10) { + printk(KERN_INFO "%s: SKB length < 10\n", dev->name); + goto rx_dropped; + } + + fc = le16_to_cpu(hdr->frame_ctl); + type = WLAN_FC_GET_TYPE(fc); + stype = WLAN_FC_GET_STYPE(fc); + sc = le16_to_cpu(hdr->seq_ctl); + frag = WLAN_GET_SEQ_FRAG(sc); + hdrlen = libipw_get_hdrlen(fc); + + if (skb->len < hdrlen) { + printk(KERN_INFO "%s: invalid SKB length %d\n", + dev->name, skb->len); + goto rx_dropped; + } + + /* Put this code here so that we avoid duplicating it in all + * Rx paths. - Jean II */ +#ifdef CONFIG_WIRELESS_EXT +#ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */ + /* If spy monitoring on */ + if (ieee->spy_data.spy_number > 0) { + struct iw_quality wstats; + + wstats.updated = 0; + if (rx_stats->mask & LIBIPW_STATMASK_RSSI) { + wstats.level = rx_stats->signal; + wstats.updated |= IW_QUAL_LEVEL_UPDATED; + } else + wstats.updated |= IW_QUAL_LEVEL_INVALID; + + if (rx_stats->mask & LIBIPW_STATMASK_NOISE) { + wstats.noise = rx_stats->noise; + wstats.updated |= IW_QUAL_NOISE_UPDATED; + } else + wstats.updated |= IW_QUAL_NOISE_INVALID; + + if (rx_stats->mask & LIBIPW_STATMASK_SIGNAL) { + wstats.qual = rx_stats->signal; + wstats.updated |= IW_QUAL_QUAL_UPDATED; + } else + wstats.updated |= IW_QUAL_QUAL_INVALID; + + /* Update spy records */ + wireless_spy_update(ieee->dev, hdr->addr2, &wstats); + } +#endif /* IW_WIRELESS_SPY */ +#endif /* CONFIG_WIRELESS_EXT */ + +#ifdef NOT_YET + hostap_update_rx_stats(local->ap, hdr, rx_stats); +#endif + + if (ieee->iw_mode == IW_MODE_MONITOR) { + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; + libipw_monitor_rx(ieee, skb, rx_stats); + return 1; + } + + can_be_decrypted = (is_multicast_ether_addr(hdr->addr1) || + is_broadcast_ether_addr(hdr->addr2)) ? + ieee->host_mc_decrypt : ieee->host_decrypt; + + if (can_be_decrypted) { + if (skb->len >= hdrlen + 3) { + /* Top two-bits of byte 3 are the key index */ + keyidx = skb->data[hdrlen + 3] >> 6; + } + + /* ieee->crypt[] is WEP_KEY (4) in length. Given that keyidx + * is only allowed 2-bits of storage, no value of keyidx can + * be provided via above code that would result in keyidx + * being out of range */ + crypt = ieee->crypt_info.crypt[keyidx]; + +#ifdef NOT_YET + sta = NULL; + + /* Use station specific key to override default keys if the + * receiver address is a unicast address ("individual RA"). If + * bcrx_sta_key parameter is set, station specific key is used + * even with broad/multicast targets (this is against IEEE + * 802.11, but makes it easier to use different keys with + * stations that do not support WEP key mapping). */ + + if (!(hdr->addr1[0] & 0x01) || local->bcrx_sta_key) + (void)hostap_handle_sta_crypto(local, hdr, &crypt, + &sta); +#endif + + /* allow NULL decrypt to indicate an station specific override + * for default encryption */ + if (crypt && (crypt->ops == NULL || + crypt->ops->decrypt_mpdu == NULL)) + crypt = NULL; + + if (!crypt && (fc & IEEE80211_FCTL_PROTECTED)) { + /* This seems to be triggered by some (multicast?) + * frames from other than current BSS, so just drop the + * frames silently instead of filling system log with + * these reports. */ + LIBIPW_DEBUG_DROP("Decryption failed (not set)" + " (SA=%pM)\n", hdr->addr2); + ieee->ieee_stats.rx_discards_undecryptable++; + goto rx_dropped; + } + } +#ifdef NOT_YET + if (type != WLAN_FC_TYPE_DATA) { + if (type == WLAN_FC_TYPE_MGMT && stype == WLAN_FC_STYPE_AUTH && + fc & IEEE80211_FCTL_PROTECTED && ieee->host_decrypt && + (keyidx = hostap_rx_frame_decrypt(ieee, skb, crypt)) < 0) { + printk(KERN_DEBUG "%s: failed to decrypt mgmt::auth " + "from %pM\n", dev->name, hdr->addr2); + /* TODO: could inform hostapd about this so that it + * could send auth failure report */ + goto rx_dropped; + } + + if (libipw_rx_frame_mgmt(ieee, skb, rx_stats, type, stype)) + goto rx_dropped; + else + goto rx_exit; + } +#endif + /* drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.29) */ + if (sc == ieee->prev_seq_ctl) + goto rx_dropped; + else + ieee->prev_seq_ctl = sc; + + /* Data frame - extract src/dst addresses */ + if (skb->len < LIBIPW_3ADDR_LEN) + goto rx_dropped; + + switch (fc & (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) { + case IEEE80211_FCTL_FROMDS: + memcpy(dst, hdr->addr1, ETH_ALEN); + memcpy(src, hdr->addr3, ETH_ALEN); + break; + case IEEE80211_FCTL_TODS: + memcpy(dst, hdr->addr3, ETH_ALEN); + memcpy(src, hdr->addr2, ETH_ALEN); + break; + case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS: + if (skb->len < LIBIPW_4ADDR_LEN) + goto rx_dropped; + memcpy(dst, hdr->addr3, ETH_ALEN); + memcpy(src, hdr->addr4, ETH_ALEN); + break; + case 0: + memcpy(dst, hdr->addr1, ETH_ALEN); + memcpy(src, hdr->addr2, ETH_ALEN); + break; + } + +#ifdef NOT_YET + if (hostap_rx_frame_wds(ieee, hdr, fc, &wds)) + goto rx_dropped; + if (wds) { + skb->dev = dev = wds; + stats = hostap_get_stats(dev); + } + + if (ieee->iw_mode == IW_MODE_MASTER && !wds && + (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == + IEEE80211_FCTL_FROMDS && ieee->stadev + && !compare_ether_addr(hdr->addr2, ieee->assoc_ap_addr)) { + /* Frame from BSSID of the AP for which we are a client */ + skb->dev = dev = ieee->stadev; + stats = hostap_get_stats(dev); + from_assoc_ap = 1; + } +#endif + +#ifdef NOT_YET + if ((ieee->iw_mode == IW_MODE_MASTER || + ieee->iw_mode == IW_MODE_REPEAT) && !from_assoc_ap) { + switch (hostap_handle_sta_rx(ieee, dev, skb, rx_stats, + wds != NULL)) { + case AP_RX_CONTINUE_NOT_AUTHORIZED: + frame_authorized = 0; + break; + case AP_RX_CONTINUE: + frame_authorized = 1; + break; + case AP_RX_DROP: + goto rx_dropped; + case AP_RX_EXIT: + goto rx_exit; + } + } +#endif + + /* Nullfunc frames may have PS-bit set, so they must be passed to + * hostap_handle_sta_rx() before being dropped here. */ + + stype &= ~IEEE80211_STYPE_QOS_DATA; + + if (stype != IEEE80211_STYPE_DATA && + stype != IEEE80211_STYPE_DATA_CFACK && + stype != IEEE80211_STYPE_DATA_CFPOLL && + stype != IEEE80211_STYPE_DATA_CFACKPOLL) { + if (stype != IEEE80211_STYPE_NULLFUNC) + LIBIPW_DEBUG_DROP("RX: dropped data frame " + "with no data (type=0x%02x, " + "subtype=0x%02x, len=%d)\n", + type, stype, skb->len); + goto rx_dropped; + } + + /* skb: hdr + (possibly fragmented, possibly encrypted) payload */ + + if ((fc & IEEE80211_FCTL_PROTECTED) && can_be_decrypted && + (keyidx = libipw_rx_frame_decrypt(ieee, skb, crypt)) < 0) + goto rx_dropped; + + hdr = (struct libipw_hdr_4addr *)skb->data; + + /* skb: hdr + (possibly fragmented) plaintext payload */ + // PR: FIXME: hostap has additional conditions in the "if" below: + // ieee->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) && + if ((frag != 0) || (fc & IEEE80211_FCTL_MOREFRAGS)) { + int flen; + struct sk_buff *frag_skb = libipw_frag_cache_get(ieee, hdr); + LIBIPW_DEBUG_FRAG("Rx Fragment received (%u)\n", frag); + + if (!frag_skb) { + LIBIPW_DEBUG(LIBIPW_DL_RX | LIBIPW_DL_FRAG, + "Rx cannot get skb from fragment " + "cache (morefrag=%d seq=%u frag=%u)\n", + (fc & IEEE80211_FCTL_MOREFRAGS) != 0, + WLAN_GET_SEQ_SEQ(sc), frag); + goto rx_dropped; + } + + flen = skb->len; + if (frag != 0) + flen -= hdrlen; + + if (frag_skb->tail + flen > frag_skb->end) { + printk(KERN_WARNING "%s: host decrypted and " + "reassembled frame did not fit skb\n", + dev->name); + libipw_frag_cache_invalidate(ieee, hdr); + goto rx_dropped; + } + + if (frag == 0) { + /* copy first fragment (including full headers) into + * beginning of the fragment cache skb */ + skb_copy_from_linear_data(skb, skb_put(frag_skb, flen), flen); + } else { + /* append frame payload to the end of the fragment + * cache skb */ + skb_copy_from_linear_data_offset(skb, hdrlen, + skb_put(frag_skb, flen), flen); + } + dev_kfree_skb_any(skb); + skb = NULL; + + if (fc & IEEE80211_FCTL_MOREFRAGS) { + /* more fragments expected - leave the skb in fragment + * cache for now; it will be delivered to upper layers + * after all fragments have been received */ + goto rx_exit; + } + + /* this was the last fragment and the frame will be + * delivered, so remove skb from fragment cache */ + skb = frag_skb; + hdr = (struct libipw_hdr_4addr *)skb->data; + libipw_frag_cache_invalidate(ieee, hdr); + } + + /* skb: hdr + (possible reassembled) full MSDU payload; possibly still + * encrypted/authenticated */ + if ((fc & IEEE80211_FCTL_PROTECTED) && can_be_decrypted && + libipw_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt)) + goto rx_dropped; + + hdr = (struct libipw_hdr_4addr *)skb->data; + if (crypt && !(fc & IEEE80211_FCTL_PROTECTED) && !ieee->open_wep) { + if ( /*ieee->ieee802_1x && */ + libipw_is_eapol_frame(ieee, skb)) { + /* pass unencrypted EAPOL frames even if encryption is + * configured */ + } else { + LIBIPW_DEBUG_DROP("encryption configured, but RX " + "frame not encrypted (SA=%pM)\n", + hdr->addr2); + goto rx_dropped; + } + } + + if (crypt && !(fc & IEEE80211_FCTL_PROTECTED) && !ieee->open_wep && + !libipw_is_eapol_frame(ieee, skb)) { + LIBIPW_DEBUG_DROP("dropped unencrypted RX data " + "frame from %pM (drop_unencrypted=1)\n", + hdr->addr2); + goto rx_dropped; + } + + /* If the frame was decrypted in hardware, we may need to strip off + * any security data (IV, ICV, etc) that was left behind */ + if (!can_be_decrypted && (fc & IEEE80211_FCTL_PROTECTED) && + ieee->host_strip_iv_icv) { + int trimlen = 0; + + /* Top two-bits of byte 3 are the key index */ + if (skb->len >= hdrlen + 3) + keyidx = skb->data[hdrlen + 3] >> 6; + + /* To strip off any security data which appears before the + * payload, we simply increase hdrlen (as the header gets + * chopped off immediately below). For the security data which + * appears after the payload, we use skb_trim. */ + + switch (ieee->sec.encode_alg[keyidx]) { + case SEC_ALG_WEP: + /* 4 byte IV */ + hdrlen += 4; + /* 4 byte ICV */ + trimlen = 4; + break; + case SEC_ALG_TKIP: + /* 4 byte IV, 4 byte ExtIV */ + hdrlen += 8; + /* 8 byte MIC, 4 byte ICV */ + trimlen = 12; + break; + case SEC_ALG_CCMP: + /* 8 byte CCMP header */ + hdrlen += 8; + /* 8 byte MIC */ + trimlen = 8; + break; + } + + if (skb->len < trimlen) + goto rx_dropped; + + __skb_trim(skb, skb->len - trimlen); + + if (skb->len < hdrlen) + goto rx_dropped; + } + + /* skb: hdr + (possible reassembled) full plaintext payload */ + + payload = skb->data + hdrlen; + ethertype = (payload[6] << 8) | payload[7]; + +#ifdef NOT_YET + /* If IEEE 802.1X is used, check whether the port is authorized to send + * the received frame. */ + if (ieee->ieee802_1x && ieee->iw_mode == IW_MODE_MASTER) { + if (ethertype == ETH_P_PAE) { + printk(KERN_DEBUG "%s: RX: IEEE 802.1X frame\n", + dev->name); + if (ieee->hostapd && ieee->apdev) { + /* Send IEEE 802.1X frames to the user + * space daemon for processing */ + prism2_rx_80211(ieee->apdev, skb, rx_stats, + PRISM2_RX_MGMT); + ieee->apdevstats.rx_packets++; + ieee->apdevstats.rx_bytes += skb->len; + goto rx_exit; + } + } else if (!frame_authorized) { + printk(KERN_DEBUG "%s: dropped frame from " + "unauthorized port (IEEE 802.1X): " + "ethertype=0x%04x\n", dev->name, ethertype); + goto rx_dropped; + } + } +#endif + + /* convert hdr + possible LLC headers into Ethernet header */ + if (skb->len - hdrlen >= 8 && + ((memcmp(payload, libipw_rfc1042_header, SNAP_SIZE) == 0 && + ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) || + memcmp(payload, libipw_bridge_tunnel_header, SNAP_SIZE) == 0)) { + /* remove RFC1042 or Bridge-Tunnel encapsulation and + * replace EtherType */ + skb_pull(skb, hdrlen + SNAP_SIZE); + memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN); + memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN); + } else { + __be16 len; + /* Leave Ethernet header part of hdr and full payload */ + skb_pull(skb, hdrlen); + len = htons(skb->len); + memcpy(skb_push(skb, 2), &len, 2); + memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN); + memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN); + } + +#ifdef NOT_YET + if (wds && ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == + IEEE80211_FCTL_TODS) && skb->len >= ETH_HLEN + ETH_ALEN) { + /* Non-standard frame: get addr4 from its bogus location after + * the payload */ + skb_copy_to_linear_data_offset(skb, ETH_ALEN, + skb->data + skb->len - ETH_ALEN, + ETH_ALEN); + skb_trim(skb, skb->len - ETH_ALEN); + } +#endif + + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; + +#ifdef NOT_YET + if (ieee->iw_mode == IW_MODE_MASTER && !wds && ieee->ap->bridge_packets) { + if (dst[0] & 0x01) { + /* copy multicast frame both to the higher layers and + * to the wireless media */ + ieee->ap->bridged_multicast++; + skb2 = skb_clone(skb, GFP_ATOMIC); + if (skb2 == NULL) + printk(KERN_DEBUG "%s: skb_clone failed for " + "multicast frame\n", dev->name); + } else if (hostap_is_sta_assoc(ieee->ap, dst)) { + /* send frame directly to the associated STA using + * wireless media and not passing to higher layers */ + ieee->ap->bridged_unicast++; + skb2 = skb; + skb = NULL; + } + } + + if (skb2 != NULL) { + /* send to wireless media */ + skb2->dev = dev; + skb2->protocol = htons(ETH_P_802_3); + skb_reset_mac_header(skb2); + skb_reset_network_header(skb2); + /* skb2->network_header += ETH_HLEN; */ + dev_queue_xmit(skb2); + } +#endif + + if (skb) { + skb->protocol = eth_type_trans(skb, dev); + memset(skb->cb, 0, sizeof(skb->cb)); + skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */ + if (netif_rx(skb) == NET_RX_DROP) { + /* netif_rx always succeeds, but it might drop + * the packet. If it drops the packet, we log that + * in our stats. */ + LIBIPW_DEBUG_DROP + ("RX: netif_rx dropped the packet\n"); + dev->stats.rx_dropped++; + } + } + + rx_exit: +#ifdef NOT_YET + if (sta) + hostap_handle_sta_release(sta); +#endif + return 1; + + rx_dropped: + dev->stats.rx_dropped++; + + /* Returning 0 indicates to caller that we have not handled the SKB-- + * so it is still allocated and can be used again by underlying + * hardware as a DMA target */ + return 0; +} + +/* Filter out unrelated packets, call libipw_rx[_mgt] + * This function takes over the skb, it should not be used again after calling + * this function. */ +void libipw_rx_any(struct libipw_device *ieee, + struct sk_buff *skb, struct libipw_rx_stats *stats) +{ + struct libipw_hdr_4addr *hdr; + int is_packet_for_us; + u16 fc; + + if (ieee->iw_mode == IW_MODE_MONITOR) { + if (!libipw_rx(ieee, skb, stats)) + dev_kfree_skb_irq(skb); + return; + } + + if (skb->len < sizeof(struct ieee80211_hdr)) + goto drop_free; + + hdr = (struct libipw_hdr_4addr *)skb->data; + fc = le16_to_cpu(hdr->frame_ctl); + + if ((fc & IEEE80211_FCTL_VERS) != 0) + goto drop_free; + + switch (fc & IEEE80211_FCTL_FTYPE) { + case IEEE80211_FTYPE_MGMT: + if (skb->len < sizeof(struct libipw_hdr_3addr)) + goto drop_free; + libipw_rx_mgt(ieee, hdr, stats); + dev_kfree_skb_irq(skb); + return; + case IEEE80211_FTYPE_DATA: + break; + case IEEE80211_FTYPE_CTL: + return; + default: + return; + } + + is_packet_for_us = 0; + switch (ieee->iw_mode) { + case IW_MODE_ADHOC: + /* our BSS and not from/to DS */ + if (memcmp(hdr->addr3, ieee->bssid, ETH_ALEN) == 0) + if ((fc & (IEEE80211_FCTL_TODS+IEEE80211_FCTL_FROMDS)) == 0) { + /* promisc: get all */ + if (ieee->dev->flags & IFF_PROMISC) + is_packet_for_us = 1; + /* to us */ + else if (memcmp(hdr->addr1, ieee->dev->dev_addr, ETH_ALEN) == 0) + is_packet_for_us = 1; + /* mcast */ + else if (is_multicast_ether_addr(hdr->addr1)) + is_packet_for_us = 1; + } + break; + case IW_MODE_INFRA: + /* our BSS (== from our AP) and from DS */ + if (memcmp(hdr->addr2, ieee->bssid, ETH_ALEN) == 0) + if ((fc & (IEEE80211_FCTL_TODS+IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS) { + /* promisc: get all */ + if (ieee->dev->flags & IFF_PROMISC) + is_packet_for_us = 1; + /* to us */ + else if (memcmp(hdr->addr1, ieee->dev->dev_addr, ETH_ALEN) == 0) + is_packet_for_us = 1; + /* mcast */ + else if (is_multicast_ether_addr(hdr->addr1)) { + /* not our own packet bcasted from AP */ + if (memcmp(hdr->addr3, ieee->dev->dev_addr, ETH_ALEN)) + is_packet_for_us = 1; + } + } + break; + default: + /* ? */ + break; + } + + if (is_packet_for_us) + if (!libipw_rx(ieee, skb, stats)) + dev_kfree_skb_irq(skb); + return; + +drop_free: + dev_kfree_skb_irq(skb); + ieee->dev->stats.rx_dropped++; + return; +} + +#define MGMT_FRAME_FIXED_PART_LENGTH 0x24 + +static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 }; + +/* +* Make ther structure we read from the beacon packet has +* the right values +*/ +static int libipw_verify_qos_info(struct libipw_qos_information_element + *info_element, int sub_type) +{ + + if (info_element->qui_subtype != sub_type) + return -1; + if (memcmp(info_element->qui, qos_oui, QOS_OUI_LEN)) + return -1; + if (info_element->qui_type != QOS_OUI_TYPE) + return -1; + if (info_element->version != QOS_VERSION_1) + return -1; + + return 0; +} + +/* + * Parse a QoS parameter element + */ +static int libipw_read_qos_param_element(struct libipw_qos_parameter_info + *element_param, struct libipw_info_element + *info_element) +{ + int ret = 0; + u16 size = sizeof(struct libipw_qos_parameter_info) - 2; + + if ((info_element == NULL) || (element_param == NULL)) + return -1; + + if (info_element->id == QOS_ELEMENT_ID && info_element->len == size) { + memcpy(element_param->info_element.qui, info_element->data, + info_element->len); + element_param->info_element.elementID = info_element->id; + element_param->info_element.length = info_element->len; + } else + ret = -1; + if (ret == 0) + ret = libipw_verify_qos_info(&element_param->info_element, + QOS_OUI_PARAM_SUB_TYPE); + return ret; +} + +/* + * Parse a QoS information element + */ +static int libipw_read_qos_info_element(struct + libipw_qos_information_element + *element_info, struct libipw_info_element + *info_element) +{ + int ret = 0; + u16 size = sizeof(struct libipw_qos_information_element) - 2; + + if (element_info == NULL) + return -1; + if (info_element == NULL) + return -1; + + if ((info_element->id == QOS_ELEMENT_ID) && (info_element->len == size)) { + memcpy(element_info->qui, info_element->data, + info_element->len); + element_info->elementID = info_element->id; + element_info->length = info_element->len; + } else + ret = -1; + + if (ret == 0) + ret = libipw_verify_qos_info(element_info, + QOS_OUI_INFO_SUB_TYPE); + return ret; +} + +/* + * Write QoS parameters from the ac parameters. + */ +static int libipw_qos_convert_ac_to_parameters(struct + libipw_qos_parameter_info + *param_elm, struct + libipw_qos_parameters + *qos_param) +{ + int rc = 0; + int i; + struct libipw_qos_ac_parameter *ac_params; + u32 txop; + u8 cw_min; + u8 cw_max; + + for (i = 0; i < QOS_QUEUE_NUM; i++) { + ac_params = &(param_elm->ac_params_record[i]); + + qos_param->aifs[i] = (ac_params->aci_aifsn) & 0x0F; + qos_param->aifs[i] -= (qos_param->aifs[i] < 2) ? 0 : 2; + + cw_min = ac_params->ecw_min_max & 0x0F; + qos_param->cw_min[i] = cpu_to_le16((1 << cw_min) - 1); + + cw_max = (ac_params->ecw_min_max & 0xF0) >> 4; + qos_param->cw_max[i] = cpu_to_le16((1 << cw_max) - 1); + + qos_param->flag[i] = + (ac_params->aci_aifsn & 0x10) ? 0x01 : 0x00; + + txop = le16_to_cpu(ac_params->tx_op_limit) * 32; + qos_param->tx_op_limit[i] = cpu_to_le16(txop); + } + return rc; +} + +/* + * we have a generic data element which it may contain QoS information or + * parameters element. check the information element length to decide + * which type to read + */ +static int libipw_parse_qos_info_param_IE(struct libipw_info_element + *info_element, + struct libipw_network *network) +{ + int rc = 0; + struct libipw_qos_parameters *qos_param = NULL; + struct libipw_qos_information_element qos_info_element; + + rc = libipw_read_qos_info_element(&qos_info_element, info_element); + + if (rc == 0) { + network->qos_data.param_count = qos_info_element.ac_info & 0x0F; + network->flags |= NETWORK_HAS_QOS_INFORMATION; + } else { + struct libipw_qos_parameter_info param_element; + + rc = libipw_read_qos_param_element(¶m_element, + info_element); + if (rc == 0) { + qos_param = &(network->qos_data.parameters); + libipw_qos_convert_ac_to_parameters(¶m_element, + qos_param); + network->flags |= NETWORK_HAS_QOS_PARAMETERS; + network->qos_data.param_count = + param_element.info_element.ac_info & 0x0F; + } + } + + if (rc == 0) { + LIBIPW_DEBUG_QOS("QoS is supported\n"); + network->qos_data.supported = 1; + } + return rc; +} + +#ifdef CONFIG_LIBIPW_DEBUG +#define MFIE_STRING(x) case WLAN_EID_ ##x: return #x + +static const char *get_info_element_string(u16 id) +{ + switch (id) { + MFIE_STRING(SSID); + MFIE_STRING(SUPP_RATES); + MFIE_STRING(FH_PARAMS); + MFIE_STRING(DS_PARAMS); + MFIE_STRING(CF_PARAMS); + MFIE_STRING(TIM); + MFIE_STRING(IBSS_PARAMS); + MFIE_STRING(COUNTRY); + MFIE_STRING(HP_PARAMS); + MFIE_STRING(HP_TABLE); + MFIE_STRING(REQUEST); + MFIE_STRING(CHALLENGE); + MFIE_STRING(PWR_CONSTRAINT); + MFIE_STRING(PWR_CAPABILITY); + MFIE_STRING(TPC_REQUEST); + MFIE_STRING(TPC_REPORT); + MFIE_STRING(SUPPORTED_CHANNELS); + MFIE_STRING(CHANNEL_SWITCH); + MFIE_STRING(MEASURE_REQUEST); + MFIE_STRING(MEASURE_REPORT); + MFIE_STRING(QUIET); + MFIE_STRING(IBSS_DFS); + MFIE_STRING(ERP_INFO); + MFIE_STRING(RSN); + MFIE_STRING(EXT_SUPP_RATES); + MFIE_STRING(GENERIC); + MFIE_STRING(QOS_PARAMETER); + default: + return "UNKNOWN"; + } +} +#endif + +static int libipw_parse_info_param(struct libipw_info_element + *info_element, u16 length, + struct libipw_network *network) +{ + DECLARE_SSID_BUF(ssid); + u8 i; +#ifdef CONFIG_LIBIPW_DEBUG + char rates_str[64]; + char *p; +#endif + + while (length >= sizeof(*info_element)) { + if (sizeof(*info_element) + info_element->len > length) { + LIBIPW_DEBUG_MGMT("Info elem: parse failed: " + "info_element->len + 2 > left : " + "info_element->len+2=%zd left=%d, id=%d.\n", + info_element->len + + sizeof(*info_element), + length, info_element->id); + /* We stop processing but don't return an error here + * because some misbehaviour APs break this rule. ie. + * Orinoco AP1000. */ + break; + } + + switch (info_element->id) { + case WLAN_EID_SSID: + network->ssid_len = min(info_element->len, + (u8) IW_ESSID_MAX_SIZE); + memcpy(network->ssid, info_element->data, + network->ssid_len); + if (network->ssid_len < IW_ESSID_MAX_SIZE) + memset(network->ssid + network->ssid_len, 0, + IW_ESSID_MAX_SIZE - network->ssid_len); + + LIBIPW_DEBUG_MGMT("WLAN_EID_SSID: '%s' len=%d.\n", + print_ssid(ssid, network->ssid, + network->ssid_len), + network->ssid_len); + break; + + case WLAN_EID_SUPP_RATES: +#ifdef CONFIG_LIBIPW_DEBUG + p = rates_str; +#endif + network->rates_len = min(info_element->len, + MAX_RATES_LENGTH); + for (i = 0; i < network->rates_len; i++) { + network->rates[i] = info_element->data[i]; +#ifdef CONFIG_LIBIPW_DEBUG + p += snprintf(p, sizeof(rates_str) - + (p - rates_str), "%02X ", + network->rates[i]); +#endif + if (libipw_is_ofdm_rate + (info_element->data[i])) { + network->flags |= NETWORK_HAS_OFDM; + if (info_element->data[i] & + LIBIPW_BASIC_RATE_MASK) + network->flags &= + ~NETWORK_HAS_CCK; + } + } + + LIBIPW_DEBUG_MGMT("WLAN_EID_SUPP_RATES: '%s' (%d)\n", + rates_str, network->rates_len); + break; + + case WLAN_EID_EXT_SUPP_RATES: +#ifdef CONFIG_LIBIPW_DEBUG + p = rates_str; +#endif + network->rates_ex_len = min(info_element->len, + MAX_RATES_EX_LENGTH); + for (i = 0; i < network->rates_ex_len; i++) { + network->rates_ex[i] = info_element->data[i]; +#ifdef CONFIG_LIBIPW_DEBUG + p += snprintf(p, sizeof(rates_str) - + (p - rates_str), "%02X ", + network->rates[i]); +#endif + if (libipw_is_ofdm_rate + (info_element->data[i])) { + network->flags |= NETWORK_HAS_OFDM; + if (info_element->data[i] & + LIBIPW_BASIC_RATE_MASK) + network->flags &= + ~NETWORK_HAS_CCK; + } + } + + LIBIPW_DEBUG_MGMT("WLAN_EID_EXT_SUPP_RATES: '%s' (%d)\n", + rates_str, network->rates_ex_len); + break; + + case WLAN_EID_DS_PARAMS: + LIBIPW_DEBUG_MGMT("WLAN_EID_DS_PARAMS: %d\n", + info_element->data[0]); + network->channel = info_element->data[0]; + break; + + case WLAN_EID_FH_PARAMS: + LIBIPW_DEBUG_MGMT("WLAN_EID_FH_PARAMS: ignored\n"); + break; + + case WLAN_EID_CF_PARAMS: + LIBIPW_DEBUG_MGMT("WLAN_EID_CF_PARAMS: ignored\n"); + break; + + case WLAN_EID_TIM: + network->tim.tim_count = info_element->data[0]; + network->tim.tim_period = info_element->data[1]; + LIBIPW_DEBUG_MGMT("WLAN_EID_TIM: partially ignored\n"); + break; + + case WLAN_EID_ERP_INFO: + network->erp_value = info_element->data[0]; + network->flags |= NETWORK_HAS_ERP_VALUE; + LIBIPW_DEBUG_MGMT("MFIE_TYPE_ERP_SET: %d\n", + network->erp_value); + break; + + case WLAN_EID_IBSS_PARAMS: + network->atim_window = info_element->data[0]; + LIBIPW_DEBUG_MGMT("WLAN_EID_IBSS_PARAMS: %d\n", + network->atim_window); + break; + + case WLAN_EID_CHALLENGE: + LIBIPW_DEBUG_MGMT("WLAN_EID_CHALLENGE: ignored\n"); + break; + + case WLAN_EID_GENERIC: + LIBIPW_DEBUG_MGMT("WLAN_EID_GENERIC: %d bytes\n", + info_element->len); + if (!libipw_parse_qos_info_param_IE(info_element, + network)) + break; + + if (info_element->len >= 4 && + info_element->data[0] == 0x00 && + info_element->data[1] == 0x50 && + info_element->data[2] == 0xf2 && + info_element->data[3] == 0x01) { + network->wpa_ie_len = min(info_element->len + 2, + MAX_WPA_IE_LEN); + memcpy(network->wpa_ie, info_element, + network->wpa_ie_len); + } + break; + + case WLAN_EID_RSN: + LIBIPW_DEBUG_MGMT("WLAN_EID_RSN: %d bytes\n", + info_element->len); + network->rsn_ie_len = min(info_element->len + 2, + MAX_WPA_IE_LEN); + memcpy(network->rsn_ie, info_element, + network->rsn_ie_len); + break; + + case WLAN_EID_QOS_PARAMETER: + printk(KERN_ERR + "QoS Error need to parse QOS_PARAMETER IE\n"); + break; + /* 802.11h */ + case WLAN_EID_PWR_CONSTRAINT: + network->power_constraint = info_element->data[0]; + network->flags |= NETWORK_HAS_POWER_CONSTRAINT; + break; + + case WLAN_EID_CHANNEL_SWITCH: + network->power_constraint = info_element->data[0]; + network->flags |= NETWORK_HAS_CSA; + break; + + case WLAN_EID_QUIET: + network->quiet.count = info_element->data[0]; + network->quiet.period = info_element->data[1]; + network->quiet.duration = info_element->data[2]; + network->quiet.offset = info_element->data[3]; + network->flags |= NETWORK_HAS_QUIET; + break; + + case WLAN_EID_IBSS_DFS: + if (network->ibss_dfs) + break; + network->ibss_dfs = kmemdup(info_element->data, + info_element->len, + GFP_ATOMIC); + if (!network->ibss_dfs) + return 1; + network->flags |= NETWORK_HAS_IBSS_DFS; + break; + + case WLAN_EID_TPC_REPORT: + network->tpc_report.transmit_power = + info_element->data[0]; + network->tpc_report.link_margin = info_element->data[1]; + network->flags |= NETWORK_HAS_TPC_REPORT; + break; + + default: + LIBIPW_DEBUG_MGMT + ("Unsupported info element: %s (%d)\n", + get_info_element_string(info_element->id), + info_element->id); + break; + } + + length -= sizeof(*info_element) + info_element->len; + info_element = + (struct libipw_info_element *)&info_element-> + data[info_element->len]; + } + + return 0; +} + +static int libipw_handle_assoc_resp(struct libipw_device *ieee, struct libipw_assoc_response + *frame, struct libipw_rx_stats *stats) +{ + struct libipw_network network_resp = { + .ibss_dfs = NULL, + }; + struct libipw_network *network = &network_resp; + struct net_device *dev = ieee->dev; + + network->flags = 0; + network->qos_data.active = 0; + network->qos_data.supported = 0; + network->qos_data.param_count = 0; + network->qos_data.old_param_count = 0; + + //network->atim_window = le16_to_cpu(frame->aid) & (0x3FFF); + network->atim_window = le16_to_cpu(frame->aid); + network->listen_interval = le16_to_cpu(frame->status); + memcpy(network->bssid, frame->header.addr3, ETH_ALEN); + network->capability = le16_to_cpu(frame->capability); + network->last_scanned = jiffies; + network->rates_len = network->rates_ex_len = 0; + network->last_associate = 0; + network->ssid_len = 0; + network->erp_value = + (network->capability & WLAN_CAPABILITY_IBSS) ? 0x3 : 0x0; + + if (stats->freq == LIBIPW_52GHZ_BAND) { + /* for A band (No DS info) */ + network->channel = stats->received_channel; + } else + network->flags |= NETWORK_HAS_CCK; + + network->wpa_ie_len = 0; + network->rsn_ie_len = 0; + + if (libipw_parse_info_param + (frame->info_element, stats->len - sizeof(*frame), network)) + return 1; + + network->mode = 0; + if (stats->freq == LIBIPW_52GHZ_BAND) + network->mode = IEEE_A; + else { + if (network->flags & NETWORK_HAS_OFDM) + network->mode |= IEEE_G; + if (network->flags & NETWORK_HAS_CCK) + network->mode |= IEEE_B; + } + + memcpy(&network->stats, stats, sizeof(network->stats)); + + if (ieee->handle_assoc_response != NULL) + ieee->handle_assoc_response(dev, frame, network); + + return 0; +} + +/***************************************************/ + +static int libipw_network_init(struct libipw_device *ieee, struct libipw_probe_response + *beacon, + struct libipw_network *network, + struct libipw_rx_stats *stats) +{ + DECLARE_SSID_BUF(ssid); + + network->qos_data.active = 0; + network->qos_data.supported = 0; + network->qos_data.param_count = 0; + network->qos_data.old_param_count = 0; + + /* Pull out fixed field data */ + memcpy(network->bssid, beacon->header.addr3, ETH_ALEN); + network->capability = le16_to_cpu(beacon->capability); + network->last_scanned = jiffies; + network->time_stamp[0] = le32_to_cpu(beacon->time_stamp[0]); + network->time_stamp[1] = le32_to_cpu(beacon->time_stamp[1]); + network->beacon_interval = le16_to_cpu(beacon->beacon_interval); + /* Where to pull this? beacon->listen_interval; */ + network->listen_interval = 0x0A; + network->rates_len = network->rates_ex_len = 0; + network->last_associate = 0; + network->ssid_len = 0; + network->flags = 0; + network->atim_window = 0; + network->erp_value = (network->capability & WLAN_CAPABILITY_IBSS) ? + 0x3 : 0x0; + + if (stats->freq == LIBIPW_52GHZ_BAND) { + /* for A band (No DS info) */ + network->channel = stats->received_channel; + } else + network->flags |= NETWORK_HAS_CCK; + + network->wpa_ie_len = 0; + network->rsn_ie_len = 0; + + if (libipw_parse_info_param + (beacon->info_element, stats->len - sizeof(*beacon), network)) + return 1; + + network->mode = 0; + if (stats->freq == LIBIPW_52GHZ_BAND) + network->mode = IEEE_A; + else { + if (network->flags & NETWORK_HAS_OFDM) + network->mode |= IEEE_G; + if (network->flags & NETWORK_HAS_CCK) + network->mode |= IEEE_B; + } + + if (network->mode == 0) { + LIBIPW_DEBUG_SCAN("Filtered out '%s (%pM)' " + "network.\n", + print_ssid(ssid, network->ssid, + network->ssid_len), + network->bssid); + return 1; + } + + memcpy(&network->stats, stats, sizeof(network->stats)); + + return 0; +} + +static inline int is_same_network(struct libipw_network *src, + struct libipw_network *dst) +{ + /* A network is only a duplicate if the channel, BSSID, and ESSID + * all match. We treat all with the same BSSID and channel + * as one network */ + return ((src->ssid_len == dst->ssid_len) && + (src->channel == dst->channel) && + !compare_ether_addr(src->bssid, dst->bssid) && + !memcmp(src->ssid, dst->ssid, src->ssid_len)); +} + +static void update_network(struct libipw_network *dst, + struct libipw_network *src) +{ + int qos_active; + u8 old_param; + + libipw_network_reset(dst); + dst->ibss_dfs = src->ibss_dfs; + + /* We only update the statistics if they were created by receiving + * the network information on the actual channel the network is on. + * + * This keeps beacons received on neighbor channels from bringing + * down the signal level of an AP. */ + if (dst->channel == src->stats.received_channel) + memcpy(&dst->stats, &src->stats, + sizeof(struct libipw_rx_stats)); + else + LIBIPW_DEBUG_SCAN("Network %pM info received " + "off channel (%d vs. %d)\n", src->bssid, + dst->channel, src->stats.received_channel); + + dst->capability = src->capability; + memcpy(dst->rates, src->rates, src->rates_len); + dst->rates_len = src->rates_len; + memcpy(dst->rates_ex, src->rates_ex, src->rates_ex_len); + dst->rates_ex_len = src->rates_ex_len; + + dst->mode = src->mode; + dst->flags = src->flags; + dst->time_stamp[0] = src->time_stamp[0]; + dst->time_stamp[1] = src->time_stamp[1]; + + dst->beacon_interval = src->beacon_interval; + dst->listen_interval = src->listen_interval; + dst->atim_window = src->atim_window; + dst->erp_value = src->erp_value; + dst->tim = src->tim; + + memcpy(dst->wpa_ie, src->wpa_ie, src->wpa_ie_len); + dst->wpa_ie_len = src->wpa_ie_len; + memcpy(dst->rsn_ie, src->rsn_ie, src->rsn_ie_len); + dst->rsn_ie_len = src->rsn_ie_len; + + dst->last_scanned = jiffies; + qos_active = src->qos_data.active; + old_param = dst->qos_data.old_param_count; + if (dst->flags & NETWORK_HAS_QOS_MASK) + memcpy(&dst->qos_data, &src->qos_data, + sizeof(struct libipw_qos_data)); + else { + dst->qos_data.supported = src->qos_data.supported; + dst->qos_data.param_count = src->qos_data.param_count; + } + + if (dst->qos_data.supported == 1) { + if (dst->ssid_len) + LIBIPW_DEBUG_QOS + ("QoS the network %s is QoS supported\n", + dst->ssid); + else + LIBIPW_DEBUG_QOS + ("QoS the network is QoS supported\n"); + } + dst->qos_data.active = qos_active; + dst->qos_data.old_param_count = old_param; + + /* dst->last_associate is not overwritten */ +} + +static inline int is_beacon(__le16 fc) +{ + return (WLAN_FC_GET_STYPE(le16_to_cpu(fc)) == IEEE80211_STYPE_BEACON); +} + +static void libipw_process_probe_response(struct libipw_device + *ieee, struct + libipw_probe_response + *beacon, struct libipw_rx_stats + *stats) +{ + struct net_device *dev = ieee->dev; + struct libipw_network network = { + .ibss_dfs = NULL, + }; + struct libipw_network *target; + struct libipw_network *oldest = NULL; +#ifdef CONFIG_LIBIPW_DEBUG + struct libipw_info_element *info_element = beacon->info_element; +#endif + unsigned long flags; + DECLARE_SSID_BUF(ssid); + + LIBIPW_DEBUG_SCAN("'%s' (%pM" + "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n", + print_ssid(ssid, info_element->data, info_element->len), + beacon->header.addr3, + (beacon->capability & cpu_to_le16(1 << 0xf)) ? '1' : '0', + (beacon->capability & cpu_to_le16(1 << 0xe)) ? '1' : '0', + (beacon->capability & cpu_to_le16(1 << 0xd)) ? '1' : '0', + (beacon->capability & cpu_to_le16(1 << 0xc)) ? '1' : '0', + (beacon->capability & cpu_to_le16(1 << 0xb)) ? '1' : '0', + (beacon->capability & cpu_to_le16(1 << 0xa)) ? '1' : '0', + (beacon->capability & cpu_to_le16(1 << 0x9)) ? '1' : '0', + (beacon->capability & cpu_to_le16(1 << 0x8)) ? '1' : '0', + (beacon->capability & cpu_to_le16(1 << 0x7)) ? '1' : '0', + (beacon->capability & cpu_to_le16(1 << 0x6)) ? '1' : '0', + (beacon->capability & cpu_to_le16(1 << 0x5)) ? '1' : '0', + (beacon->capability & cpu_to_le16(1 << 0x4)) ? '1' : '0', + (beacon->capability & cpu_to_le16(1 << 0x3)) ? '1' : '0', + (beacon->capability & cpu_to_le16(1 << 0x2)) ? '1' : '0', + (beacon->capability & cpu_to_le16(1 << 0x1)) ? '1' : '0', + (beacon->capability & cpu_to_le16(1 << 0x0)) ? '1' : '0'); + + if (libipw_network_init(ieee, beacon, &network, stats)) { + LIBIPW_DEBUG_SCAN("Dropped '%s' (%pM) via %s.\n", + print_ssid(ssid, info_element->data, + info_element->len), + beacon->header.addr3, + is_beacon(beacon->header.frame_ctl) ? + "BEACON" : "PROBE RESPONSE"); + return; + } + + /* The network parsed correctly -- so now we scan our known networks + * to see if we can find it in our list. + * + * NOTE: This search is definitely not optimized. Once its doing + * the "right thing" we'll optimize it for efficiency if + * necessary */ + + /* Search for this entry in the list and update it if it is + * already there. */ + + spin_lock_irqsave(&ieee->lock, flags); + + list_for_each_entry(target, &ieee->network_list, list) { + if (is_same_network(target, &network)) + break; + + if ((oldest == NULL) || + time_before(target->last_scanned, oldest->last_scanned)) + oldest = target; + } + + /* If we didn't find a match, then get a new network slot to initialize + * with this beacon's information */ + if (&target->list == &ieee->network_list) { + if (list_empty(&ieee->network_free_list)) { + /* If there are no more slots, expire the oldest */ + list_del(&oldest->list); + target = oldest; + LIBIPW_DEBUG_SCAN("Expired '%s' (%pM) from " + "network list.\n", + print_ssid(ssid, target->ssid, + target->ssid_len), + target->bssid); + libipw_network_reset(target); + } else { + /* Otherwise just pull from the free list */ + target = list_entry(ieee->network_free_list.next, + struct libipw_network, list); + list_del(ieee->network_free_list.next); + } + +#ifdef CONFIG_LIBIPW_DEBUG + LIBIPW_DEBUG_SCAN("Adding '%s' (%pM) via %s.\n", + print_ssid(ssid, network.ssid, + network.ssid_len), + network.bssid, + is_beacon(beacon->header.frame_ctl) ? + "BEACON" : "PROBE RESPONSE"); +#endif + memcpy(target, &network, sizeof(*target)); + network.ibss_dfs = NULL; + list_add_tail(&target->list, &ieee->network_list); + } else { + LIBIPW_DEBUG_SCAN("Updating '%s' (%pM) via %s.\n", + print_ssid(ssid, target->ssid, + target->ssid_len), + target->bssid, + is_beacon(beacon->header.frame_ctl) ? + "BEACON" : "PROBE RESPONSE"); + update_network(target, &network); + network.ibss_dfs = NULL; + } + + spin_unlock_irqrestore(&ieee->lock, flags); + + if (is_beacon(beacon->header.frame_ctl)) { + if (ieee->handle_beacon != NULL) + ieee->handle_beacon(dev, beacon, target); + } else { + if (ieee->handle_probe_response != NULL) + ieee->handle_probe_response(dev, beacon, target); + } +} + +void libipw_rx_mgt(struct libipw_device *ieee, + struct libipw_hdr_4addr *header, + struct libipw_rx_stats *stats) +{ + switch (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl))) { + case IEEE80211_STYPE_ASSOC_RESP: + LIBIPW_DEBUG_MGMT("received ASSOCIATION RESPONSE (%d)\n", + WLAN_FC_GET_STYPE(le16_to_cpu + (header->frame_ctl))); + libipw_handle_assoc_resp(ieee, + (struct libipw_assoc_response *) + header, stats); + break; + + case IEEE80211_STYPE_REASSOC_RESP: + LIBIPW_DEBUG_MGMT("received REASSOCIATION RESPONSE (%d)\n", + WLAN_FC_GET_STYPE(le16_to_cpu + (header->frame_ctl))); + break; + + case IEEE80211_STYPE_PROBE_REQ: + LIBIPW_DEBUG_MGMT("received auth (%d)\n", + WLAN_FC_GET_STYPE(le16_to_cpu + (header->frame_ctl))); + + if (ieee->handle_probe_request != NULL) + ieee->handle_probe_request(ieee->dev, + (struct + libipw_probe_request *) + header, stats); + break; + + case IEEE80211_STYPE_PROBE_RESP: + LIBIPW_DEBUG_MGMT("received PROBE RESPONSE (%d)\n", + WLAN_FC_GET_STYPE(le16_to_cpu + (header->frame_ctl))); + LIBIPW_DEBUG_SCAN("Probe response\n"); + libipw_process_probe_response(ieee, + (struct + libipw_probe_response *) + header, stats); + break; + + case IEEE80211_STYPE_BEACON: + LIBIPW_DEBUG_MGMT("received BEACON (%d)\n", + WLAN_FC_GET_STYPE(le16_to_cpu + (header->frame_ctl))); + LIBIPW_DEBUG_SCAN("Beacon\n"); + libipw_process_probe_response(ieee, + (struct + libipw_probe_response *) + header, stats); + break; + case IEEE80211_STYPE_AUTH: + + LIBIPW_DEBUG_MGMT("received auth (%d)\n", + WLAN_FC_GET_STYPE(le16_to_cpu + (header->frame_ctl))); + + if (ieee->handle_auth != NULL) + ieee->handle_auth(ieee->dev, + (struct libipw_auth *)header); + break; + + case IEEE80211_STYPE_DISASSOC: + if (ieee->handle_disassoc != NULL) + ieee->handle_disassoc(ieee->dev, + (struct libipw_disassoc *) + header); + break; + + case IEEE80211_STYPE_ACTION: + LIBIPW_DEBUG_MGMT("ACTION\n"); + if (ieee->handle_action) + ieee->handle_action(ieee->dev, + (struct libipw_action *) + header, stats); + break; + + case IEEE80211_STYPE_REASSOC_REQ: + LIBIPW_DEBUG_MGMT("received reassoc (%d)\n", + WLAN_FC_GET_STYPE(le16_to_cpu + (header->frame_ctl))); + + LIBIPW_DEBUG_MGMT("%s: LIBIPW_REASSOC_REQ received\n", + ieee->dev->name); + if (ieee->handle_reassoc_request != NULL) + ieee->handle_reassoc_request(ieee->dev, + (struct libipw_reassoc_request *) + header); + break; + + case IEEE80211_STYPE_ASSOC_REQ: + LIBIPW_DEBUG_MGMT("received assoc (%d)\n", + WLAN_FC_GET_STYPE(le16_to_cpu + (header->frame_ctl))); + + LIBIPW_DEBUG_MGMT("%s: LIBIPW_ASSOC_REQ received\n", + ieee->dev->name); + if (ieee->handle_assoc_request != NULL) + ieee->handle_assoc_request(ieee->dev); + break; + + case IEEE80211_STYPE_DEAUTH: + LIBIPW_DEBUG_MGMT("DEAUTH\n"); + if (ieee->handle_deauth != NULL) + ieee->handle_deauth(ieee->dev, + (struct libipw_deauth *) + header); + break; + default: + LIBIPW_DEBUG_MGMT("received UNKNOWN (%d)\n", + WLAN_FC_GET_STYPE(le16_to_cpu + (header->frame_ctl))); + LIBIPW_DEBUG_MGMT("%s: Unknown management packet: %d\n", + ieee->dev->name, + WLAN_FC_GET_STYPE(le16_to_cpu + (header->frame_ctl))); + break; + } +} + +EXPORT_SYMBOL_GPL(libipw_rx_any); +EXPORT_SYMBOL(libipw_rx_mgt); +EXPORT_SYMBOL(libipw_rx); diff --git a/drivers/net/wireless/ipw2x00/libipw_tx.c b/drivers/net/wireless/ipw2x00/libipw_tx.c new file mode 100644 index 0000000..da8beac --- /dev/null +++ b/drivers/net/wireless/ipw2x00/libipw_tx.c @@ -0,0 +1,546 @@ +/****************************************************************************** + + Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify it + under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., 59 + Temple Place - Suite 330, Boston, MA 02111-1307, USA. + + The full GNU General Public License is included in this distribution in the + file called LICENSE. + + Contact Information: + Intel Linux Wireless + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +******************************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "libipw.h" + +/* + +802.11 Data Frame + + ,-------------------------------------------------------------------. +Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 | + |------|------|---------|---------|---------|------|---------|------| +Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | Frame | fcs | + | | tion | (BSSID) | | | ence | data | | + `--------------------------------------------------| |------' +Total: 28 non-data bytes `----.----' + | + .- 'Frame data' expands, if WEP enabled, to <----------' + | + V + ,-----------------------. +Bytes | 4 | 0-2296 | 4 | + |-----|-----------|-----| +Desc. | IV | Encrypted | ICV | + | | Packet | | + `-----| |-----' + `-----.-----' + | + .- 'Encrypted Packet' expands to + | + V + ,---------------------------------------------------. +Bytes | 1 | 1 | 1 | 3 | 2 | 0-2304 | + |------|------|---------|----------|------|---------| +Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP | + | DSAP | SSAP | | | | Packet | + | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | | + `---------------------------------------------------- +Total: 8 non-data bytes + +802.3 Ethernet Data Frame + + ,-----------------------------------------. +Bytes | 6 | 6 | 2 | Variable | 4 | + |-------|-------|------|-----------|------| +Desc. | Dest. | Source| Type | IP Packet | fcs | + | MAC | MAC | | | | + `-----------------------------------------' +Total: 18 non-data bytes + +In the event that fragmentation is required, the incoming payload is split into +N parts of size ieee->fts. The first fragment contains the SNAP header and the +remaining packets are just data. + +If encryption is enabled, each fragment payload size is reduced by enough space +to add the prefix and postfix (IV and ICV totalling 8 bytes in the case of WEP) +So if you have 1500 bytes of payload with ieee->fts set to 500 without +encryption it will take 3 frames. With WEP it will take 4 frames as the +payload of each frame is reduced to 492 bytes. + +* SKB visualization +* +* ,- skb->data +* | +* | ETHERNET HEADER ,-<-- PAYLOAD +* | | 14 bytes from skb->data +* | 2 bytes for Type --> ,T. | (sizeof ethhdr) +* | | | | +* |,-Dest.--. ,--Src.---. | | | +* | 6 bytes| | 6 bytes | | | | +* v | | | | | | +* 0 | v 1 | v | v 2 +* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 +* ^ | ^ | ^ | +* | | | | | | +* | | | | `T' <---- 2 bytes for Type +* | | | | +* | | '---SNAP--' <-------- 6 bytes for SNAP +* | | +* `-IV--' <-------------------- 4 bytes for IV (WEP) +* +* SNAP HEADER +* +*/ + +static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 }; +static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 }; + +static int libipw_copy_snap(u8 * data, __be16 h_proto) +{ + struct libipw_snap_hdr *snap; + u8 *oui; + + snap = (struct libipw_snap_hdr *)data; + snap->dsap = 0xaa; + snap->ssap = 0xaa; + snap->ctrl = 0x03; + + if (h_proto == htons(ETH_P_AARP) || h_proto == htons(ETH_P_IPX)) + oui = P802_1H_OUI; + else + oui = RFC1042_OUI; + snap->oui[0] = oui[0]; + snap->oui[1] = oui[1]; + snap->oui[2] = oui[2]; + + memcpy(data + SNAP_SIZE, &h_proto, sizeof(u16)); + + return SNAP_SIZE + sizeof(u16); +} + +static int libipw_encrypt_fragment(struct libipw_device *ieee, + struct sk_buff *frag, int hdr_len) +{ + struct lib80211_crypt_data *crypt = + ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx]; + int res; + + if (crypt == NULL) + return -1; + + /* To encrypt, frame format is: + * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */ + atomic_inc(&crypt->refcnt); + res = 0; + if (crypt->ops && crypt->ops->encrypt_mpdu) + res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv); + + atomic_dec(&crypt->refcnt); + if (res < 0) { + printk(KERN_INFO "%s: Encryption failed: len=%d.\n", + ieee->dev->name, frag->len); + ieee->ieee_stats.tx_discards++; + return -1; + } + + return 0; +} + +void libipw_txb_free(struct libipw_txb *txb) +{ + int i; + if (unlikely(!txb)) + return; + for (i = 0; i < txb->nr_frags; i++) + if (txb->fragments[i]) + dev_kfree_skb_any(txb->fragments[i]); + kfree(txb); +} + +static struct libipw_txb *libipw_alloc_txb(int nr_frags, int txb_size, + int headroom, gfp_t gfp_mask) +{ + struct libipw_txb *txb; + int i; + txb = kmalloc(sizeof(struct libipw_txb) + (sizeof(u8 *) * nr_frags), + gfp_mask); + if (!txb) + return NULL; + + memset(txb, 0, sizeof(struct libipw_txb)); + txb->nr_frags = nr_frags; + txb->frag_size = txb_size; + + for (i = 0; i < nr_frags; i++) { + txb->fragments[i] = __dev_alloc_skb(txb_size + headroom, + gfp_mask); + if (unlikely(!txb->fragments[i])) { + i--; + break; + } + skb_reserve(txb->fragments[i], headroom); + } + if (unlikely(i != nr_frags)) { + while (i >= 0) + dev_kfree_skb_any(txb->fragments[i--]); + kfree(txb); + return NULL; + } + return txb; +} + +static int libipw_classify(struct sk_buff *skb) +{ + struct ethhdr *eth; + struct iphdr *ip; + + eth = (struct ethhdr *)skb->data; + if (eth->h_proto != htons(ETH_P_IP)) + return 0; + + ip = ip_hdr(skb); + switch (ip->tos & 0xfc) { + case 0x20: + return 2; + case 0x40: + return 1; + case 0x60: + return 3; + case 0x80: + return 4; + case 0xa0: + return 5; + case 0xc0: + return 6; + case 0xe0: + return 7; + default: + return 0; + } +} + +/* Incoming skb is converted to a txb which consists of + * a block of 802.11 fragment packets (stored as skbs) */ +netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct libipw_device *ieee = netdev_priv(dev); + struct libipw_txb *txb = NULL; + struct libipw_hdr_3addrqos *frag_hdr; + int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size, + rts_required; + unsigned long flags; + int encrypt, host_encrypt, host_encrypt_msdu, host_build_iv; + __be16 ether_type; + int bytes, fc, hdr_len; + struct sk_buff *skb_frag; + struct libipw_hdr_3addrqos header = {/* Ensure zero initialized */ + .duration_id = 0, + .seq_ctl = 0, + .qos_ctl = 0 + }; + u8 dest[ETH_ALEN], src[ETH_ALEN]; + struct lib80211_crypt_data *crypt; + int priority = skb->priority; + int snapped = 0; + + if (ieee->is_queue_full && (*ieee->is_queue_full) (dev, priority)) + return NETDEV_TX_BUSY; + + spin_lock_irqsave(&ieee->lock, flags); + + /* If there is no driver handler to take the TXB, dont' bother + * creating it... */ + if (!ieee->hard_start_xmit) { + printk(KERN_WARNING "%s: No xmit handler.\n", ieee->dev->name); + goto success; + } + + if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) { + printk(KERN_WARNING "%s: skb too small (%d).\n", + ieee->dev->name, skb->len); + goto success; + } + + ether_type = ((struct ethhdr *)skb->data)->h_proto; + + crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx]; + + encrypt = !(ether_type == htons(ETH_P_PAE) && ieee->ieee802_1x) && + ieee->sec.encrypt; + + host_encrypt = ieee->host_encrypt && encrypt && crypt; + host_encrypt_msdu = ieee->host_encrypt_msdu && encrypt && crypt; + host_build_iv = ieee->host_build_iv && encrypt && crypt; + + if (!encrypt && ieee->ieee802_1x && + ieee->drop_unencrypted && ether_type != htons(ETH_P_PAE)) { + dev->stats.tx_dropped++; + goto success; + } + + /* Save source and destination addresses */ + skb_copy_from_linear_data(skb, dest, ETH_ALEN); + skb_copy_from_linear_data_offset(skb, ETH_ALEN, src, ETH_ALEN); + + if (host_encrypt || host_build_iv) + fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA | + IEEE80211_FCTL_PROTECTED; + else + fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA; + + if (ieee->iw_mode == IW_MODE_INFRA) { + fc |= IEEE80211_FCTL_TODS; + /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */ + memcpy(header.addr1, ieee->bssid, ETH_ALEN); + memcpy(header.addr2, src, ETH_ALEN); + memcpy(header.addr3, dest, ETH_ALEN); + } else if (ieee->iw_mode == IW_MODE_ADHOC) { + /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */ + memcpy(header.addr1, dest, ETH_ALEN); + memcpy(header.addr2, src, ETH_ALEN); + memcpy(header.addr3, ieee->bssid, ETH_ALEN); + } + hdr_len = LIBIPW_3ADDR_LEN; + + if (ieee->is_qos_active && ieee->is_qos_active(dev, skb)) { + fc |= IEEE80211_STYPE_QOS_DATA; + hdr_len += 2; + + skb->priority = libipw_classify(skb); + header.qos_ctl |= cpu_to_le16(skb->priority & LIBIPW_QCTL_TID); + } + header.frame_ctl = cpu_to_le16(fc); + + /* Advance the SKB to the start of the payload */ + skb_pull(skb, sizeof(struct ethhdr)); + + /* Determine total amount of storage required for TXB packets */ + bytes = skb->len + SNAP_SIZE + sizeof(u16); + + /* Encrypt msdu first on the whole data packet. */ + if ((host_encrypt || host_encrypt_msdu) && + crypt && crypt->ops && crypt->ops->encrypt_msdu) { + int res = 0; + int len = bytes + hdr_len + crypt->ops->extra_msdu_prefix_len + + crypt->ops->extra_msdu_postfix_len; + struct sk_buff *skb_new = dev_alloc_skb(len); + + if (unlikely(!skb_new)) + goto failed; + + skb_reserve(skb_new, crypt->ops->extra_msdu_prefix_len); + memcpy(skb_put(skb_new, hdr_len), &header, hdr_len); + snapped = 1; + libipw_copy_snap(skb_put(skb_new, SNAP_SIZE + sizeof(u16)), + ether_type); + skb_copy_from_linear_data(skb, skb_put(skb_new, skb->len), skb->len); + res = crypt->ops->encrypt_msdu(skb_new, hdr_len, crypt->priv); + if (res < 0) { + LIBIPW_ERROR("msdu encryption failed\n"); + dev_kfree_skb_any(skb_new); + goto failed; + } + dev_kfree_skb_any(skb); + skb = skb_new; + bytes += crypt->ops->extra_msdu_prefix_len + + crypt->ops->extra_msdu_postfix_len; + skb_pull(skb, hdr_len); + } + + if (host_encrypt || ieee->host_open_frag) { + /* Determine fragmentation size based on destination (multicast + * and broadcast are not fragmented) */ + if (is_multicast_ether_addr(dest) || + is_broadcast_ether_addr(dest)) + frag_size = MAX_FRAG_THRESHOLD; + else + frag_size = ieee->fts; + + /* Determine amount of payload per fragment. Regardless of if + * this stack is providing the full 802.11 header, one will + * eventually be affixed to this fragment -- so we must account + * for it when determining the amount of payload space. */ + bytes_per_frag = frag_size - hdr_len; + if (ieee->config & + (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS)) + bytes_per_frag -= LIBIPW_FCS_LEN; + + /* Each fragment may need to have room for encryptiong + * pre/postfix */ + if (host_encrypt) + bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len + + crypt->ops->extra_mpdu_postfix_len; + + /* Number of fragments is the total + * bytes_per_frag / payload_per_fragment */ + nr_frags = bytes / bytes_per_frag; + bytes_last_frag = bytes % bytes_per_frag; + if (bytes_last_frag) + nr_frags++; + else + bytes_last_frag = bytes_per_frag; + } else { + nr_frags = 1; + bytes_per_frag = bytes_last_frag = bytes; + frag_size = bytes + hdr_len; + } + + rts_required = (frag_size > ieee->rts + && ieee->config & CFG_LIBIPW_RTS); + if (rts_required) + nr_frags++; + + /* When we allocate the TXB we allocate enough space for the reserve + * and full fragment bytes (bytes_per_frag doesn't include prefix, + * postfix, header, FCS, etc.) */ + txb = libipw_alloc_txb(nr_frags, frag_size, + ieee->tx_headroom, GFP_ATOMIC); + if (unlikely(!txb)) { + printk(KERN_WARNING "%s: Could not allocate TXB\n", + ieee->dev->name); + goto failed; + } + txb->encrypted = encrypt; + if (host_encrypt) + txb->payload_size = frag_size * (nr_frags - 1) + + bytes_last_frag; + else + txb->payload_size = bytes; + + if (rts_required) { + skb_frag = txb->fragments[0]; + frag_hdr = + (struct libipw_hdr_3addrqos *)skb_put(skb_frag, hdr_len); + + /* + * Set header frame_ctl to the RTS. + */ + header.frame_ctl = + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS); + memcpy(frag_hdr, &header, hdr_len); + + /* + * Restore header frame_ctl to the original data setting. + */ + header.frame_ctl = cpu_to_le16(fc); + + if (ieee->config & + (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS)) + skb_put(skb_frag, 4); + + txb->rts_included = 1; + i = 1; + } else + i = 0; + + for (; i < nr_frags; i++) { + skb_frag = txb->fragments[i]; + + if (host_encrypt || host_build_iv) + skb_reserve(skb_frag, + crypt->ops->extra_mpdu_prefix_len); + + frag_hdr = + (struct libipw_hdr_3addrqos *)skb_put(skb_frag, hdr_len); + memcpy(frag_hdr, &header, hdr_len); + + /* If this is not the last fragment, then add the MOREFRAGS + * bit to the frame control */ + if (i != nr_frags - 1) { + frag_hdr->frame_ctl = + cpu_to_le16(fc | IEEE80211_FCTL_MOREFRAGS); + bytes = bytes_per_frag; + } else { + /* The last fragment takes the remaining length */ + bytes = bytes_last_frag; + } + + if (i == 0 && !snapped) { + libipw_copy_snap(skb_put + (skb_frag, SNAP_SIZE + sizeof(u16)), + ether_type); + bytes -= SNAP_SIZE + sizeof(u16); + } + + skb_copy_from_linear_data(skb, skb_put(skb_frag, bytes), bytes); + + /* Advance the SKB... */ + skb_pull(skb, bytes); + + /* Encryption routine will move the header forward in order + * to insert the IV between the header and the payload */ + if (host_encrypt) + libipw_encrypt_fragment(ieee, skb_frag, hdr_len); + else if (host_build_iv) { + atomic_inc(&crypt->refcnt); + if (crypt->ops->build_iv) + crypt->ops->build_iv(skb_frag, hdr_len, + ieee->sec.keys[ieee->sec.active_key], + ieee->sec.key_sizes[ieee->sec.active_key], + crypt->priv); + atomic_dec(&crypt->refcnt); + } + + if (ieee->config & + (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS)) + skb_put(skb_frag, 4); + } + + success: + spin_unlock_irqrestore(&ieee->lock, flags); + + dev_kfree_skb_any(skb); + + if (txb) { + netdev_tx_t ret = (*ieee->hard_start_xmit)(txb, dev, priority); + if (ret == NETDEV_TX_OK) { + dev->stats.tx_packets++; + dev->stats.tx_bytes += txb->payload_size; + return NETDEV_TX_OK; + } + + libipw_txb_free(txb); + } + + return NETDEV_TX_OK; + + failed: + spin_unlock_irqrestore(&ieee->lock, flags); + netif_stop_queue(dev); + dev->stats.tx_errors++; + return NETDEV_TX_BUSY; +} +EXPORT_SYMBOL(libipw_xmit); + +EXPORT_SYMBOL(libipw_txb_free); diff --git a/drivers/net/wireless/ipw2x00/libipw_wx.c b/drivers/net/wireless/ipw2x00/libipw_wx.c new file mode 100644 index 0000000..4d89f66 --- /dev/null +++ b/drivers/net/wireless/ipw2x00/libipw_wx.c @@ -0,0 +1,771 @@ +/****************************************************************************** + + Copyright(c) 2004-2005 Intel Corporation. All rights reserved. + + Portions of this file are based on the WEP enablement code provided by the + Host AP project hostap-drivers v0.1.3 + Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen + + Copyright (c) 2002-2003, Jouni Malinen + + This program is free software; you can redistribute it and/or modify it + under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., 59 + Temple Place - Suite 330, Boston, MA 02111-1307, USA. + + The full GNU General Public License is included in this distribution in the + file called LICENSE. + + Contact Information: + Intel Linux Wireless + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +******************************************************************************/ + +#include +#include +#include + +#include +#include + +#include "libipw.h" + +static const char *libipw_modes[] = { + "?", "a", "b", "ab", "g", "ag", "bg", "abg" +}; + +static inline unsigned int elapsed_jiffies_msecs(unsigned long start) +{ + unsigned long end = jiffies; + + if (end >= start) + return jiffies_to_msecs(end - start); + + return jiffies_to_msecs(end + (MAX_JIFFY_OFFSET - start) + 1); +} + +#define MAX_CUSTOM_LEN 64 +static char *libipw_translate_scan(struct libipw_device *ieee, + char *start, char *stop, + struct libipw_network *network, + struct iw_request_info *info) +{ + char custom[MAX_CUSTOM_LEN]; + char *p; + struct iw_event iwe; + int i, j; + char *current_val; /* For rates */ + u8 rate; + + /* First entry *MUST* be the AP MAC address */ + iwe.cmd = SIOCGIWAP; + iwe.u.ap_addr.sa_family = ARPHRD_ETHER; + memcpy(iwe.u.ap_addr.sa_data, network->bssid, ETH_ALEN); + start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_ADDR_LEN); + + /* Remaining entries will be displayed in the order we provide them */ + + /* Add the ESSID */ + iwe.cmd = SIOCGIWESSID; + iwe.u.data.flags = 1; + iwe.u.data.length = min(network->ssid_len, (u8) 32); + start = iwe_stream_add_point(info, start, stop, + &iwe, network->ssid); + + /* Add the protocol name */ + iwe.cmd = SIOCGIWNAME; + snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11%s", + libipw_modes[network->mode]); + start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_CHAR_LEN); + + /* Add mode */ + iwe.cmd = SIOCGIWMODE; + if (network->capability & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)) { + if (network->capability & WLAN_CAPABILITY_ESS) + iwe.u.mode = IW_MODE_MASTER; + else + iwe.u.mode = IW_MODE_ADHOC; + + start = iwe_stream_add_event(info, start, stop, + &iwe, IW_EV_UINT_LEN); + } + + /* Add channel and frequency */ + /* Note : userspace automatically computes channel using iwrange */ + iwe.cmd = SIOCGIWFREQ; + iwe.u.freq.m = libipw_channel_to_freq(ieee, network->channel); + iwe.u.freq.e = 6; + iwe.u.freq.i = 0; + start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_FREQ_LEN); + + /* Add encryption capability */ + iwe.cmd = SIOCGIWENCODE; + if (network->capability & WLAN_CAPABILITY_PRIVACY) + iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; + else + iwe.u.data.flags = IW_ENCODE_DISABLED; + iwe.u.data.length = 0; + start = iwe_stream_add_point(info, start, stop, + &iwe, network->ssid); + + /* Add basic and extended rates */ + /* Rate : stuffing multiple values in a single event require a bit + * more of magic - Jean II */ + current_val = start + iwe_stream_lcp_len(info); + iwe.cmd = SIOCGIWRATE; + /* Those two flags are ignored... */ + iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; + + for (i = 0, j = 0; i < network->rates_len;) { + if (j < network->rates_ex_len && + ((network->rates_ex[j] & 0x7F) < + (network->rates[i] & 0x7F))) + rate = network->rates_ex[j++] & 0x7F; + else + rate = network->rates[i++] & 0x7F; + /* Bit rate given in 500 kb/s units (+ 0x80) */ + iwe.u.bitrate.value = ((rate & 0x7f) * 500000); + /* Add new value to event */ + current_val = iwe_stream_add_value(info, start, current_val, + stop, &iwe, IW_EV_PARAM_LEN); + } + for (; j < network->rates_ex_len; j++) { + rate = network->rates_ex[j] & 0x7F; + /* Bit rate given in 500 kb/s units (+ 0x80) */ + iwe.u.bitrate.value = ((rate & 0x7f) * 500000); + /* Add new value to event */ + current_val = iwe_stream_add_value(info, start, current_val, + stop, &iwe, IW_EV_PARAM_LEN); + } + /* Check if we added any rate */ + if ((current_val - start) > iwe_stream_lcp_len(info)) + start = current_val; + + /* Add quality statistics */ + iwe.cmd = IWEVQUAL; + iwe.u.qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | + IW_QUAL_NOISE_UPDATED; + + if (!(network->stats.mask & LIBIPW_STATMASK_RSSI)) { + iwe.u.qual.updated |= IW_QUAL_QUAL_INVALID | + IW_QUAL_LEVEL_INVALID; + iwe.u.qual.qual = 0; + } else { + if (ieee->perfect_rssi == ieee->worst_rssi) + iwe.u.qual.qual = 100; + else + iwe.u.qual.qual = + (100 * + (ieee->perfect_rssi - ieee->worst_rssi) * + (ieee->perfect_rssi - ieee->worst_rssi) - + (ieee->perfect_rssi - network->stats.rssi) * + (15 * (ieee->perfect_rssi - ieee->worst_rssi) + + 62 * (ieee->perfect_rssi - + network->stats.rssi))) / + ((ieee->perfect_rssi - + ieee->worst_rssi) * (ieee->perfect_rssi - + ieee->worst_rssi)); + if (iwe.u.qual.qual > 100) + iwe.u.qual.qual = 100; + else if (iwe.u.qual.qual < 1) + iwe.u.qual.qual = 0; + } + + if (!(network->stats.mask & LIBIPW_STATMASK_NOISE)) { + iwe.u.qual.updated |= IW_QUAL_NOISE_INVALID; + iwe.u.qual.noise = 0; + } else { + iwe.u.qual.noise = network->stats.noise; + } + + if (!(network->stats.mask & LIBIPW_STATMASK_SIGNAL)) { + iwe.u.qual.updated |= IW_QUAL_LEVEL_INVALID; + iwe.u.qual.level = 0; + } else { + iwe.u.qual.level = network->stats.signal; + } + + start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_QUAL_LEN); + + iwe.cmd = IWEVCUSTOM; + p = custom; + + iwe.u.data.length = p - custom; + if (iwe.u.data.length) + start = iwe_stream_add_point(info, start, stop, &iwe, custom); + + memset(&iwe, 0, sizeof(iwe)); + if (network->wpa_ie_len) { + char buf[MAX_WPA_IE_LEN]; + memcpy(buf, network->wpa_ie, network->wpa_ie_len); + iwe.cmd = IWEVGENIE; + iwe.u.data.length = network->wpa_ie_len; + start = iwe_stream_add_point(info, start, stop, &iwe, buf); + } + + memset(&iwe, 0, sizeof(iwe)); + if (network->rsn_ie_len) { + char buf[MAX_WPA_IE_LEN]; + memcpy(buf, network->rsn_ie, network->rsn_ie_len); + iwe.cmd = IWEVGENIE; + iwe.u.data.length = network->rsn_ie_len; + start = iwe_stream_add_point(info, start, stop, &iwe, buf); + } + + /* Add EXTRA: Age to display seconds since last beacon/probe response + * for given network. */ + iwe.cmd = IWEVCUSTOM; + p = custom; + p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), + " Last beacon: %ums ago", + elapsed_jiffies_msecs(network->last_scanned)); + iwe.u.data.length = p - custom; + if (iwe.u.data.length) + start = iwe_stream_add_point(info, start, stop, &iwe, custom); + + /* Add spectrum management information */ + iwe.cmd = -1; + p = custom; + p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Channel flags: "); + + if (libipw_get_channel_flags(ieee, network->channel) & + LIBIPW_CH_INVALID) { + iwe.cmd = IWEVCUSTOM; + p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), "INVALID "); + } + + if (libipw_get_channel_flags(ieee, network->channel) & + LIBIPW_CH_RADAR_DETECT) { + iwe.cmd = IWEVCUSTOM; + p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), "DFS "); + } + + if (iwe.cmd == IWEVCUSTOM) { + iwe.u.data.length = p - custom; + start = iwe_stream_add_point(info, start, stop, &iwe, custom); + } + + return start; +} + +#define SCAN_ITEM_SIZE 128 + +int libipw_wx_get_scan(struct libipw_device *ieee, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct libipw_network *network; + unsigned long flags; + int err = 0; + + char *ev = extra; + char *stop = ev + wrqu->data.length; + int i = 0; + DECLARE_SSID_BUF(ssid); + + LIBIPW_DEBUG_WX("Getting scan\n"); + + spin_lock_irqsave(&ieee->lock, flags); + + list_for_each_entry(network, &ieee->network_list, list) { + i++; + if (stop - ev < SCAN_ITEM_SIZE) { + err = -E2BIG; + break; + } + + if (ieee->scan_age == 0 || + time_after(network->last_scanned + ieee->scan_age, jiffies)) + ev = libipw_translate_scan(ieee, ev, stop, network, + info); + else { + LIBIPW_DEBUG_SCAN("Not showing network '%s (" + "%pM)' due to age (%ums).\n", + print_ssid(ssid, network->ssid, + network->ssid_len), + network->bssid, + elapsed_jiffies_msecs( + network->last_scanned)); + } + } + + spin_unlock_irqrestore(&ieee->lock, flags); + + wrqu->data.length = ev - extra; + wrqu->data.flags = 0; + + LIBIPW_DEBUG_WX("exit: %d networks returned.\n", i); + + return err; +} + +int libipw_wx_set_encode(struct libipw_device *ieee, + struct iw_request_info *info, + union iwreq_data *wrqu, char *keybuf) +{ + struct iw_point *erq = &(wrqu->encoding); + struct net_device *dev = ieee->dev; + struct libipw_security sec = { + .flags = 0 + }; + int i, key, key_provided, len; + struct lib80211_crypt_data **crypt; + int host_crypto = ieee->host_encrypt || ieee->host_decrypt || ieee->host_build_iv; + DECLARE_SSID_BUF(ssid); + + LIBIPW_DEBUG_WX("SET_ENCODE\n"); + + key = erq->flags & IW_ENCODE_INDEX; + if (key) { + if (key > WEP_KEYS) + return -EINVAL; + key--; + key_provided = 1; + } else { + key_provided = 0; + key = ieee->crypt_info.tx_keyidx; + } + + LIBIPW_DEBUG_WX("Key: %d [%s]\n", key, key_provided ? + "provided" : "default"); + + crypt = &ieee->crypt_info.crypt[key]; + + if (erq->flags & IW_ENCODE_DISABLED) { + if (key_provided && *crypt) { + LIBIPW_DEBUG_WX("Disabling encryption on key %d.\n", + key); + lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt); + } else + LIBIPW_DEBUG_WX("Disabling encryption.\n"); + + /* Check all the keys to see if any are still configured, + * and if no key index was provided, de-init them all */ + for (i = 0; i < WEP_KEYS; i++) { + if (ieee->crypt_info.crypt[i] != NULL) { + if (key_provided) + break; + lib80211_crypt_delayed_deinit(&ieee->crypt_info, + &ieee->crypt_info.crypt[i]); + } + } + + if (i == WEP_KEYS) { + sec.enabled = 0; + sec.encrypt = 0; + sec.level = SEC_LEVEL_0; + sec.flags |= SEC_ENABLED | SEC_LEVEL | SEC_ENCRYPT; + } + + goto done; + } + + sec.enabled = 1; + sec.encrypt = 1; + sec.flags |= SEC_ENABLED | SEC_ENCRYPT; + + if (*crypt != NULL && (*crypt)->ops != NULL && + strcmp((*crypt)->ops->name, "WEP") != 0) { + /* changing to use WEP; deinit previously used algorithm + * on this key */ + lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt); + } + + if (*crypt == NULL && host_crypto) { + struct lib80211_crypt_data *new_crypt; + + /* take WEP into use */ + new_crypt = kzalloc(sizeof(struct lib80211_crypt_data), + GFP_KERNEL); + if (new_crypt == NULL) + return -ENOMEM; + new_crypt->ops = lib80211_get_crypto_ops("WEP"); + if (!new_crypt->ops) { + request_module("lib80211_crypt_wep"); + new_crypt->ops = lib80211_get_crypto_ops("WEP"); + } + + if (new_crypt->ops && try_module_get(new_crypt->ops->owner)) + new_crypt->priv = new_crypt->ops->init(key); + + if (!new_crypt->ops || !new_crypt->priv) { + kfree(new_crypt); + new_crypt = NULL; + + printk(KERN_WARNING "%s: could not initialize WEP: " + "load module lib80211_crypt_wep\n", dev->name); + return -EOPNOTSUPP; + } + *crypt = new_crypt; + } + + /* If a new key was provided, set it up */ + if (erq->length > 0) { +#ifdef CONFIG_LIBIPW_DEBUG + DECLARE_SSID_BUF(ssid); +#endif + + len = erq->length <= 5 ? 5 : 13; + memcpy(sec.keys[key], keybuf, erq->length); + if (len > erq->length) + memset(sec.keys[key] + erq->length, 0, + len - erq->length); + LIBIPW_DEBUG_WX("Setting key %d to '%s' (%d:%d bytes)\n", + key, print_ssid(ssid, sec.keys[key], len), + erq->length, len); + sec.key_sizes[key] = len; + if (*crypt) + (*crypt)->ops->set_key(sec.keys[key], len, NULL, + (*crypt)->priv); + sec.flags |= (1 << key); + /* This ensures a key will be activated if no key is + * explicitly set */ + if (key == sec.active_key) + sec.flags |= SEC_ACTIVE_KEY; + + } else { + if (host_crypto) { + len = (*crypt)->ops->get_key(sec.keys[key], WEP_KEY_LEN, + NULL, (*crypt)->priv); + if (len == 0) { + /* Set a default key of all 0 */ + LIBIPW_DEBUG_WX("Setting key %d to all " + "zero.\n", key); + memset(sec.keys[key], 0, 13); + (*crypt)->ops->set_key(sec.keys[key], 13, NULL, + (*crypt)->priv); + sec.key_sizes[key] = 13; + sec.flags |= (1 << key); + } + } + /* No key data - just set the default TX key index */ + if (key_provided) { + LIBIPW_DEBUG_WX("Setting key %d to default Tx " + "key.\n", key); + ieee->crypt_info.tx_keyidx = key; + sec.active_key = key; + sec.flags |= SEC_ACTIVE_KEY; + } + } + if (erq->flags & (IW_ENCODE_OPEN | IW_ENCODE_RESTRICTED)) { + ieee->open_wep = !(erq->flags & IW_ENCODE_RESTRICTED); + sec.auth_mode = ieee->open_wep ? WLAN_AUTH_OPEN : + WLAN_AUTH_SHARED_KEY; + sec.flags |= SEC_AUTH_MODE; + LIBIPW_DEBUG_WX("Auth: %s\n", + sec.auth_mode == WLAN_AUTH_OPEN ? + "OPEN" : "SHARED KEY"); + } + + /* For now we just support WEP, so only set that security level... + * TODO: When WPA is added this is one place that needs to change */ + sec.flags |= SEC_LEVEL; + sec.level = SEC_LEVEL_1; /* 40 and 104 bit WEP */ + sec.encode_alg[key] = SEC_ALG_WEP; + + done: + if (ieee->set_security) + ieee->set_security(dev, &sec); + + /* Do not reset port if card is in Managed mode since resetting will + * generate new IEEE 802.11 authentication which may end up in looping + * with IEEE 802.1X. If your hardware requires a reset after WEP + * configuration (for example... Prism2), implement the reset_port in + * the callbacks structures used to initialize the 802.11 stack. */ + if (ieee->reset_on_keychange && + ieee->iw_mode != IW_MODE_INFRA && + ieee->reset_port && ieee->reset_port(dev)) { + printk(KERN_DEBUG "%s: reset_port failed\n", dev->name); + return -EINVAL; + } + return 0; +} + +int libipw_wx_get_encode(struct libipw_device *ieee, + struct iw_request_info *info, + union iwreq_data *wrqu, char *keybuf) +{ + struct iw_point *erq = &(wrqu->encoding); + int len, key; + struct lib80211_crypt_data *crypt; + struct libipw_security *sec = &ieee->sec; + + LIBIPW_DEBUG_WX("GET_ENCODE\n"); + + key = erq->flags & IW_ENCODE_INDEX; + if (key) { + if (key > WEP_KEYS) + return -EINVAL; + key--; + } else + key = ieee->crypt_info.tx_keyidx; + + crypt = ieee->crypt_info.crypt[key]; + erq->flags = key + 1; + + if (!sec->enabled) { + erq->length = 0; + erq->flags |= IW_ENCODE_DISABLED; + return 0; + } + + len = sec->key_sizes[key]; + memcpy(keybuf, sec->keys[key], len); + + erq->length = len; + erq->flags |= IW_ENCODE_ENABLED; + + if (ieee->open_wep) + erq->flags |= IW_ENCODE_OPEN; + else + erq->flags |= IW_ENCODE_RESTRICTED; + + return 0; +} + +int libipw_wx_set_encodeext(struct libipw_device *ieee, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct net_device *dev = ieee->dev; + struct iw_point *encoding = &wrqu->encoding; + struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; + int i, idx, ret = 0; + int group_key = 0; + const char *alg, *module; + struct lib80211_crypto_ops *ops; + struct lib80211_crypt_data **crypt; + + struct libipw_security sec = { + .flags = 0, + }; + + idx = encoding->flags & IW_ENCODE_INDEX; + if (idx) { + if (idx < 1 || idx > WEP_KEYS) + return -EINVAL; + idx--; + } else + idx = ieee->crypt_info.tx_keyidx; + + if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) { + crypt = &ieee->crypt_info.crypt[idx]; + group_key = 1; + } else { + /* some Cisco APs use idx>0 for unicast in dynamic WEP */ + if (idx != 0 && ext->alg != IW_ENCODE_ALG_WEP) + return -EINVAL; + if (ieee->iw_mode == IW_MODE_INFRA) + crypt = &ieee->crypt_info.crypt[idx]; + else + return -EINVAL; + } + + sec.flags |= SEC_ENABLED | SEC_ENCRYPT; + if ((encoding->flags & IW_ENCODE_DISABLED) || + ext->alg == IW_ENCODE_ALG_NONE) { + if (*crypt) + lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt); + + for (i = 0; i < WEP_KEYS; i++) + if (ieee->crypt_info.crypt[i] != NULL) + break; + + if (i == WEP_KEYS) { + sec.enabled = 0; + sec.encrypt = 0; + sec.level = SEC_LEVEL_0; + sec.flags |= SEC_LEVEL; + } + goto done; + } + + sec.enabled = 1; + sec.encrypt = 1; + + if (group_key ? !ieee->host_mc_decrypt : + !(ieee->host_encrypt || ieee->host_decrypt || + ieee->host_encrypt_msdu)) + goto skip_host_crypt; + + switch (ext->alg) { + case IW_ENCODE_ALG_WEP: + alg = "WEP"; + module = "lib80211_crypt_wep"; + break; + case IW_ENCODE_ALG_TKIP: + alg = "TKIP"; + module = "lib80211_crypt_tkip"; + break; + case IW_ENCODE_ALG_CCMP: + alg = "CCMP"; + module = "lib80211_crypt_ccmp"; + break; + default: + LIBIPW_DEBUG_WX("%s: unknown crypto alg %d\n", + dev->name, ext->alg); + ret = -EINVAL; + goto done; + } + + ops = lib80211_get_crypto_ops(alg); + if (ops == NULL) { + request_module(module); + ops = lib80211_get_crypto_ops(alg); + } + if (ops == NULL) { + LIBIPW_DEBUG_WX("%s: unknown crypto alg %d\n", + dev->name, ext->alg); + ret = -EINVAL; + goto done; + } + + if (*crypt == NULL || (*crypt)->ops != ops) { + struct lib80211_crypt_data *new_crypt; + + lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt); + + new_crypt = kzalloc(sizeof(*new_crypt), GFP_KERNEL); + if (new_crypt == NULL) { + ret = -ENOMEM; + goto done; + } + new_crypt->ops = ops; + if (new_crypt->ops && try_module_get(new_crypt->ops->owner)) + new_crypt->priv = new_crypt->ops->init(idx); + if (new_crypt->priv == NULL) { + kfree(new_crypt); + ret = -EINVAL; + goto done; + } + *crypt = new_crypt; + } + + if (ext->key_len > 0 && (*crypt)->ops->set_key && + (*crypt)->ops->set_key(ext->key, ext->key_len, ext->rx_seq, + (*crypt)->priv) < 0) { + LIBIPW_DEBUG_WX("%s: key setting failed\n", dev->name); + ret = -EINVAL; + goto done; + } + + skip_host_crypt: + if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) { + ieee->crypt_info.tx_keyidx = idx; + sec.active_key = idx; + sec.flags |= SEC_ACTIVE_KEY; + } + + if (ext->alg != IW_ENCODE_ALG_NONE) { + memcpy(sec.keys[idx], ext->key, ext->key_len); + sec.key_sizes[idx] = ext->key_len; + sec.flags |= (1 << idx); + if (ext->alg == IW_ENCODE_ALG_WEP) { + sec.encode_alg[idx] = SEC_ALG_WEP; + sec.flags |= SEC_LEVEL; + sec.level = SEC_LEVEL_1; + } else if (ext->alg == IW_ENCODE_ALG_TKIP) { + sec.encode_alg[idx] = SEC_ALG_TKIP; + sec.flags |= SEC_LEVEL; + sec.level = SEC_LEVEL_2; + } else if (ext->alg == IW_ENCODE_ALG_CCMP) { + sec.encode_alg[idx] = SEC_ALG_CCMP; + sec.flags |= SEC_LEVEL; + sec.level = SEC_LEVEL_3; + } + /* Don't set sec level for group keys. */ + if (group_key) + sec.flags &= ~SEC_LEVEL; + } + done: + if (ieee->set_security) + ieee->set_security(ieee->dev, &sec); + + /* + * Do not reset port if card is in Managed mode since resetting will + * generate new IEEE 802.11 authentication which may end up in looping + * with IEEE 802.1X. If your hardware requires a reset after WEP + * configuration (for example... Prism2), implement the reset_port in + * the callbacks structures used to initialize the 802.11 stack. + */ + if (ieee->reset_on_keychange && + ieee->iw_mode != IW_MODE_INFRA && + ieee->reset_port && ieee->reset_port(dev)) { + LIBIPW_DEBUG_WX("%s: reset_port failed\n", dev->name); + return -EINVAL; + } + + return ret; +} + +int libipw_wx_get_encodeext(struct libipw_device *ieee, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct iw_point *encoding = &wrqu->encoding; + struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; + struct libipw_security *sec = &ieee->sec; + int idx, max_key_len; + + max_key_len = encoding->length - sizeof(*ext); + if (max_key_len < 0) + return -EINVAL; + + idx = encoding->flags & IW_ENCODE_INDEX; + if (idx) { + if (idx < 1 || idx > WEP_KEYS) + return -EINVAL; + idx--; + } else + idx = ieee->crypt_info.tx_keyidx; + + if (!(ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) && + ext->alg != IW_ENCODE_ALG_WEP) + if (idx != 0 || ieee->iw_mode != IW_MODE_INFRA) + return -EINVAL; + + encoding->flags = idx + 1; + memset(ext, 0, sizeof(*ext)); + + if (!sec->enabled) { + ext->alg = IW_ENCODE_ALG_NONE; + ext->key_len = 0; + encoding->flags |= IW_ENCODE_DISABLED; + } else { + if (sec->encode_alg[idx] == SEC_ALG_WEP) + ext->alg = IW_ENCODE_ALG_WEP; + else if (sec->encode_alg[idx] == SEC_ALG_TKIP) + ext->alg = IW_ENCODE_ALG_TKIP; + else if (sec->encode_alg[idx] == SEC_ALG_CCMP) + ext->alg = IW_ENCODE_ALG_CCMP; + else + return -EINVAL; + + ext->key_len = sec->key_sizes[idx]; + memcpy(ext->key, sec->keys[idx], ext->key_len); + encoding->flags |= IW_ENCODE_ENABLED; + if (ext->key_len && + (ext->alg == IW_ENCODE_ALG_TKIP || + ext->alg == IW_ENCODE_ALG_CCMP)) + ext->ext_flags |= IW_ENCODE_EXT_TX_SEQ_VALID; + + } + + return 0; +} + +EXPORT_SYMBOL(libipw_wx_set_encodeext); +EXPORT_SYMBOL(libipw_wx_get_encodeext); + +EXPORT_SYMBOL(libipw_wx_get_scan); +EXPORT_SYMBOL(libipw_wx_set_encode); +EXPORT_SYMBOL(libipw_wx_get_encode); diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile new file mode 100644 index 0000000..f13ecd0 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/Makefile @@ -0,0 +1,23 @@ +obj-$(CONFIG_IWLWIFI) += iwlcore.o +iwlcore-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o +iwlcore-objs += iwl-rx.o iwl-tx.o iwl-sta.o iwl-calib.o +iwlcore-objs += iwl-scan.o iwl-led.o +iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o +iwlcore-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o + +CFLAGS_iwl-devtrace.o := -I$(src) + +# AGN +obj-$(CONFIG_IWLAGN) += iwlagn.o +iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o + +iwlagn-$(CONFIG_COMPAT_IWL4965) += iwl-4965.o +iwlagn-$(CONFIG_IWL5000) += iwl-5000.o +iwlagn-$(CONFIG_IWL5000) += iwl-6000.o +iwlagn-$(CONFIG_IWL5000) += iwl-1000.o + +# 3945 +obj-$(CONFIG_IWL3945) += iwl3945.o +iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o + +ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c new file mode 100644 index 0000000..3bf2e6e --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-1000.c @@ -0,0 +1,281 @@ +/****************************************************************************** + * + * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "iwl-eeprom.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-io.h" +#include "iwl-sta.h" +#include "iwl-helpers.h" +#include "iwl-5000-hw.h" +#include "iwl-agn-led.h" + +/* Highest firmware API version supported */ +#define IWL1000_UCODE_API_MAX 3 + +/* Lowest firmware API version supported */ +#define IWL1000_UCODE_API_MIN 1 + +#define IWL1000_FW_PRE "iwlwifi-1000-" +#define _IWL1000_MODULE_FIRMWARE(api) IWL1000_FW_PRE #api ".ucode" +#define IWL1000_MODULE_FIRMWARE(api) _IWL1000_MODULE_FIRMWARE(api) + + +/* + * For 1000, use advance thermal throttling critical temperature threshold, + * but legacy thermal management implementation for now. + * This is for the reason of 1000 uCode using advance thermal throttling API + * but not implement ct_kill_exit based on ct_kill exit temperature + * so the thermal throttling will still based on legacy thermal throttling + * management. + * The code here need to be modified once 1000 uCode has the advanced thermal + * throttling algorithm in place + */ +static void iwl1000_set_ct_threshold(struct iwl_priv *priv) +{ + /* want Celsius */ + priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY; + priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD; +} + +/* NIC configuration for 1000 series */ +static void iwl1000_nic_config(struct iwl_priv *priv) +{ + /* set CSR_HW_CONFIG_REG for uCode use */ + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | + CSR_HW_IF_CONFIG_REG_BIT_MAC_SI); + + /* Setting digital SVR for 1000 card to 1.32V */ + /* locking is acquired in iwl_set_bits_mask_prph() function */ + iwl_set_bits_mask_prph(priv, APMG_DIGITAL_SVR_REG, + APMG_SVR_DIGITAL_VOLTAGE_1_32, + ~APMG_SVR_VOLTAGE_CONFIG_BIT_MSK); +} + +static struct iwl_sensitivity_ranges iwl1000_sensitivity = { + .min_nrg_cck = 95, + .max_nrg_cck = 0, /* not used, set to 0 */ + .auto_corr_min_ofdm = 90, + .auto_corr_min_ofdm_mrc = 170, + .auto_corr_min_ofdm_x1 = 120, + .auto_corr_min_ofdm_mrc_x1 = 240, + + .auto_corr_max_ofdm = 120, + .auto_corr_max_ofdm_mrc = 210, + .auto_corr_max_ofdm_x1 = 155, + .auto_corr_max_ofdm_mrc_x1 = 290, + + .auto_corr_min_cck = 125, + .auto_corr_max_cck = 200, + .auto_corr_min_cck_mrc = 170, + .auto_corr_max_cck_mrc = 400, + .nrg_th_cck = 95, + .nrg_th_ofdm = 95, + + .barker_corr_th_min = 190, + .barker_corr_th_min_mrc = 390, + .nrg_th_cca = 62, +}; + +static int iwl1000_hw_set_hw_params(struct iwl_priv *priv) +{ + if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES && + priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES) + priv->cfg->num_of_queues = + priv->cfg->mod_params->num_of_queues; + + priv->hw_params.max_txq_num = priv->cfg->num_of_queues; + priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; + priv->hw_params.scd_bc_tbls_size = + priv->cfg->num_of_queues * + sizeof(struct iwl5000_scd_bc_tbl); + priv->hw_params.tfd_size = sizeof(struct iwl_tfd); + priv->hw_params.max_stations = IWL5000_STATION_COUNT; + priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID; + + priv->hw_params.max_data_size = IWL50_RTC_DATA_SIZE; + priv->hw_params.max_inst_size = IWL50_RTC_INST_SIZE; + + priv->hw_params.max_bsm_size = 0; + priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) | + BIT(IEEE80211_BAND_5GHZ); + priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR; + + priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); + priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant); + priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant; + priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant; + + if (priv->cfg->ops->lib->temp_ops.set_ct_kill) + priv->cfg->ops->lib->temp_ops.set_ct_kill(priv); + + /* Set initial sensitivity parameters */ + /* Set initial calibration set */ + priv->hw_params.sens = &iwl1000_sensitivity; + priv->hw_params.calib_init_cfg = + BIT(IWL_CALIB_XTAL) | + BIT(IWL_CALIB_LO) | + BIT(IWL_CALIB_TX_IQ) | + BIT(IWL_CALIB_TX_IQ_PERD) | + BIT(IWL_CALIB_BASE_BAND); + + return 0; +} + +static struct iwl_lib_ops iwl1000_lib = { + .set_hw_params = iwl1000_hw_set_hw_params, + .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl, + .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl, + .txq_set_sched = iwl5000_txq_set_sched, + .txq_agg_enable = iwl5000_txq_agg_enable, + .txq_agg_disable = iwl5000_txq_agg_disable, + .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, + .txq_free_tfd = iwl_hw_txq_free_tfd, + .txq_init = iwl_hw_tx_queue_init, + .rx_handler_setup = iwl5000_rx_handler_setup, + .setup_deferred_work = iwl5000_setup_deferred_work, + .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, + .load_ucode = iwl5000_load_ucode, + .dump_nic_event_log = iwl_dump_nic_event_log, + .dump_nic_error_log = iwl_dump_nic_error_log, + .dump_csr = iwl_dump_csr, + .dump_fh = iwl_dump_fh, + .init_alive_start = iwl5000_init_alive_start, + .alive_notify = iwl5000_alive_notify, + .send_tx_power = iwl5000_send_tx_power, + .update_chain_flags = iwl_update_chain_flags, + .apm_ops = { + .init = iwl_apm_init, + .stop = iwl_apm_stop, + .config = iwl1000_nic_config, + .set_pwr_src = iwl_set_pwr_src, + }, + .eeprom_ops = { + .regulatory_bands = { + EEPROM_5000_REG_BAND_1_CHANNELS, + EEPROM_5000_REG_BAND_2_CHANNELS, + EEPROM_5000_REG_BAND_3_CHANNELS, + EEPROM_5000_REG_BAND_4_CHANNELS, + EEPROM_5000_REG_BAND_5_CHANNELS, + EEPROM_5000_REG_BAND_24_HT40_CHANNELS, + EEPROM_5000_REG_BAND_52_HT40_CHANNELS + }, + .verify_signature = iwlcore_eeprom_verify_signature, + .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, + .release_semaphore = iwlcore_eeprom_release_semaphore, + .calib_version = iwl5000_eeprom_calib_version, + .query_addr = iwl5000_eeprom_query_addr, + }, + .post_associate = iwl_post_associate, + .isr = iwl_isr_ict, + .config_ap = iwl_config_ap, + .temp_ops = { + .temperature = iwl5000_temperature, + .set_ct_kill = iwl1000_set_ct_threshold, + }, + .add_bcast_station = iwl_add_bcast_station, +}; + +static const struct iwl_ops iwl1000_ops = { + .ucode = &iwl5000_ucode, + .lib = &iwl1000_lib, + .hcmd = &iwl5000_hcmd, + .utils = &iwl5000_hcmd_utils, + .led = &iwlagn_led_ops, +}; + +struct iwl_cfg iwl1000_bgn_cfg = { + .name = "1000 Series BGN", + .fw_name_pre = IWL1000_FW_PRE, + .ucode_api_max = IWL1000_UCODE_API_MAX, + .ucode_api_min = IWL1000_UCODE_API_MIN, + .sku = IWL_SKU_G|IWL_SKU_N, + .ops = &iwl1000_ops, + .eeprom_size = OTP_LOW_IMAGE_SIZE, + .eeprom_ver = EEPROM_1000_EEPROM_VERSION, + .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, + .num_of_queues = IWL50_NUM_QUEUES, + .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, + .mod_params = &iwl50_mod_params, + .valid_tx_ant = ANT_A, + .valid_rx_ant = ANT_AB, + .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, + .set_l0s = true, + .use_bsm = false, + .max_ll_items = OTP_MAX_LL_ITEMS_1000, + .shadow_ram_support = false, + .ht_greenfield_support = true, + .led_compensation = 51, + .use_rts_for_ht = true, /* use rts/cts protection */ + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, + .chain_noise_scale = 1000, +}; + +struct iwl_cfg iwl1000_bg_cfg = { + .name = "1000 Series BG", + .fw_name_pre = IWL1000_FW_PRE, + .ucode_api_max = IWL1000_UCODE_API_MAX, + .ucode_api_min = IWL1000_UCODE_API_MIN, + .sku = IWL_SKU_G, + .ops = &iwl1000_ops, + .eeprom_size = OTP_LOW_IMAGE_SIZE, + .eeprom_ver = EEPROM_1000_EEPROM_VERSION, + .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, + .num_of_queues = IWL50_NUM_QUEUES, + .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, + .mod_params = &iwl50_mod_params, + .valid_tx_ant = ANT_A, + .valid_rx_ant = ANT_AB, + .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, + .set_l0s = true, + .use_bsm = false, + .max_ll_items = OTP_MAX_LL_ITEMS_1000, + .shadow_ram_support = false, + .ht_greenfield_support = true, + .led_compensation = 51, + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, + .chain_noise_scale = 1000, +}; + +MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-fh.h b/drivers/net/wireless/iwlwifi/iwl-3945-fh.h new file mode 100644 index 0000000..042f6bc --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-3945-fh.h @@ -0,0 +1,188 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_3945_fh_h__ +#define __iwl_3945_fh_h__ + +/************************************/ +/* iwl3945 Flow Handler Definitions */ +/************************************/ + +/** + * This I/O area is directly read/writable by driver (e.g. Linux uses writel()) + * Addresses are offsets from device's PCI hardware base address. + */ +#define FH39_MEM_LOWER_BOUND (0x0800) +#define FH39_MEM_UPPER_BOUND (0x1000) + +#define FH39_CBCC_TABLE (FH39_MEM_LOWER_BOUND + 0x140) +#define FH39_TFDB_TABLE (FH39_MEM_LOWER_BOUND + 0x180) +#define FH39_RCSR_TABLE (FH39_MEM_LOWER_BOUND + 0x400) +#define FH39_RSSR_TABLE (FH39_MEM_LOWER_BOUND + 0x4c0) +#define FH39_TCSR_TABLE (FH39_MEM_LOWER_BOUND + 0x500) +#define FH39_TSSR_TABLE (FH39_MEM_LOWER_BOUND + 0x680) + +/* TFDB (Transmit Frame Buffer Descriptor) */ +#define FH39_TFDB(_ch, buf) (FH39_TFDB_TABLE + \ + ((_ch) * 2 + (buf)) * 0x28) +#define FH39_TFDB_CHNL_BUF_CTRL_REG(_ch) (FH39_TFDB_TABLE + 0x50 * (_ch)) + +/* CBCC channel is [0,2] */ +#define FH39_CBCC(_ch) (FH39_CBCC_TABLE + (_ch) * 0x8) +#define FH39_CBCC_CTRL(_ch) (FH39_CBCC(_ch) + 0x00) +#define FH39_CBCC_BASE(_ch) (FH39_CBCC(_ch) + 0x04) + +/* RCSR channel is [0,2] */ +#define FH39_RCSR(_ch) (FH39_RCSR_TABLE + (_ch) * 0x40) +#define FH39_RCSR_CONFIG(_ch) (FH39_RCSR(_ch) + 0x00) +#define FH39_RCSR_RBD_BASE(_ch) (FH39_RCSR(_ch) + 0x04) +#define FH39_RCSR_WPTR(_ch) (FH39_RCSR(_ch) + 0x20) +#define FH39_RCSR_RPTR_ADDR(_ch) (FH39_RCSR(_ch) + 0x24) + +#define FH39_RSCSR_CHNL0_WPTR (FH39_RCSR_WPTR(0)) + +/* RSSR */ +#define FH39_RSSR_CTRL (FH39_RSSR_TABLE + 0x000) +#define FH39_RSSR_STATUS (FH39_RSSR_TABLE + 0x004) + +/* TCSR */ +#define FH39_TCSR(_ch) (FH39_TCSR_TABLE + (_ch) * 0x20) +#define FH39_TCSR_CONFIG(_ch) (FH39_TCSR(_ch) + 0x00) +#define FH39_TCSR_CREDIT(_ch) (FH39_TCSR(_ch) + 0x04) +#define FH39_TCSR_BUFF_STTS(_ch) (FH39_TCSR(_ch) + 0x08) + +/* TSSR */ +#define FH39_TSSR_CBB_BASE (FH39_TSSR_TABLE + 0x000) +#define FH39_TSSR_MSG_CONFIG (FH39_TSSR_TABLE + 0x008) +#define FH39_TSSR_TX_STATUS (FH39_TSSR_TABLE + 0x010) + + +/* DBM */ + +#define FH39_SRVC_CHNL (6) + +#define FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE (20) +#define FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH (4) + +#define FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN (0x08000000) + +#define FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE (0x80000000) + +#define FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE (0x20000000) + +#define FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 (0x01000000) + +#define FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST (0x00001000) + +#define FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH (0x00000000) + +#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000) +#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER (0x00000001) + +#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000) +#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008) + +#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000) + +#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000) + +#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000) +#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000) + +#define FH39_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00004000) + +#define FH39_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR (0x00000001) + +#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON (0xFF000000) +#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON (0x00FF0000) + +#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B (0x00000400) + +#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON (0x00000100) +#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON (0x00000080) + +#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH (0x00000020) +#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH (0x00000005) + +#define FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) (BIT(_ch) << 24) +#define FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch) (BIT(_ch) << 16) + +#define FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_ch) \ + (FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) | \ + FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch)) + +#define FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000) + +struct iwl3945_tfd_tb { + __le32 addr; + __le32 len; +} __attribute__ ((packed)); + +struct iwl3945_tfd { + __le32 control_flags; + struct iwl3945_tfd_tb tbs[4]; + u8 __pad[28]; +} __attribute__ ((packed)); + + +#endif /* __iwl_3945_fh_h__ */ + diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h new file mode 100644 index 0000000..3a876a8 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h @@ -0,0 +1,296 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +/* + * Please use this file (iwl-3945-hw.h) only for hardware-related definitions. + * Please use iwl-3945-commands.h for uCode API definitions. + * Please use iwl-3945.h for driver implementation definitions. + */ + +#ifndef __iwl_3945_hw__ +#define __iwl_3945_hw__ + +#include "iwl-eeprom.h" + +/* Time constants */ +#define SHORT_SLOT_TIME 9 +#define LONG_SLOT_TIME 20 + +/* RSSI to dBm */ +#define IWL39_RSSI_OFFSET 95 + +/* + * EEPROM related constants, enums, and structures. + */ +#define EEPROM_SKU_CAP_OP_MODE_MRC (1 << 7) + +/* + * Mapping of a Tx power level, at factory calibration temperature, + * to a radio/DSP gain table index. + * One for each of 5 "sample" power levels in each band. + * v_det is measured at the factory, using the 3945's built-in power amplifier + * (PA) output voltage detector. This same detector is used during Tx of + * long packets in normal operation to provide feedback as to proper output + * level. + * Data copied from EEPROM. + * DO NOT ALTER THIS STRUCTURE!!! + */ +struct iwl3945_eeprom_txpower_sample { + u8 gain_index; /* index into power (gain) setup table ... */ + s8 power; /* ... for this pwr level for this chnl group */ + u16 v_det; /* PA output voltage */ +} __attribute__ ((packed)); + +/* + * Mappings of Tx power levels -> nominal radio/DSP gain table indexes. + * One for each channel group (a.k.a. "band") (1 for BG, 4 for A). + * Tx power setup code interpolates between the 5 "sample" power levels + * to determine the nominal setup for a requested power level. + * Data copied from EEPROM. + * DO NOT ALTER THIS STRUCTURE!!! + */ +struct iwl3945_eeprom_txpower_group { + struct iwl3945_eeprom_txpower_sample samples[5]; /* 5 power levels */ + s32 a, b, c, d, e; /* coefficients for voltage->power + * formula (signed) */ + s32 Fa, Fb, Fc, Fd, Fe; /* these modify coeffs based on + * frequency (signed) */ + s8 saturation_power; /* highest power possible by h/w in this + * band */ + u8 group_channel; /* "representative" channel # in this band */ + s16 temperature; /* h/w temperature at factory calib this band + * (signed) */ +} __attribute__ ((packed)); + +/* + * Temperature-based Tx-power compensation data, not band-specific. + * These coefficients are use to modify a/b/c/d/e coeffs based on + * difference between current temperature and factory calib temperature. + * Data copied from EEPROM. + */ +struct iwl3945_eeprom_temperature_corr { + u32 Ta; + u32 Tb; + u32 Tc; + u32 Td; + u32 Te; +} __attribute__ ((packed)); + +/* + * EEPROM map + */ +struct iwl3945_eeprom { + u8 reserved0[16]; + u16 device_id; /* abs.ofs: 16 */ + u8 reserved1[2]; + u16 pmc; /* abs.ofs: 20 */ + u8 reserved2[20]; + u8 mac_address[6]; /* abs.ofs: 42 */ + u8 reserved3[58]; + u16 board_revision; /* abs.ofs: 106 */ + u8 reserved4[11]; + u8 board_pba_number[9]; /* abs.ofs: 119 */ + u8 reserved5[8]; + u16 version; /* abs.ofs: 136 */ + u8 sku_cap; /* abs.ofs: 138 */ + u8 leds_mode; /* abs.ofs: 139 */ + u16 oem_mode; + u16 wowlan_mode; /* abs.ofs: 142 */ + u16 leds_time_interval; /* abs.ofs: 144 */ + u8 leds_off_time; /* abs.ofs: 146 */ + u8 leds_on_time; /* abs.ofs: 147 */ + u8 almgor_m_version; /* abs.ofs: 148 */ + u8 antenna_switch_type; /* abs.ofs: 149 */ + u8 reserved6[42]; + u8 sku_id[4]; /* abs.ofs: 192 */ + +/* + * Per-channel regulatory data. + * + * Each channel that *might* be supported by 3945 or 4965 has a fixed location + * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory + * txpower (MSB). + * + * Entries immediately below are for 20 MHz channel width. HT40 (40 MHz) + * channels (only for 4965, not supported by 3945) appear later in the EEPROM. + * + * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 + */ + u16 band_1_count; /* abs.ofs: 196 */ + struct iwl_eeprom_channel band_1_channels[14]; /* abs.ofs: 198 */ + +/* + * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196, + * 5.0 GHz channels 7, 8, 11, 12, 16 + * (4915-5080MHz) (none of these is ever supported) + */ + u16 band_2_count; /* abs.ofs: 226 */ + struct iwl_eeprom_channel band_2_channels[13]; /* abs.ofs: 228 */ + +/* + * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 + * (5170-5320MHz) + */ + u16 band_3_count; /* abs.ofs: 254 */ + struct iwl_eeprom_channel band_3_channels[12]; /* abs.ofs: 256 */ + +/* + * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 + * (5500-5700MHz) + */ + u16 band_4_count; /* abs.ofs: 280 */ + struct iwl_eeprom_channel band_4_channels[11]; /* abs.ofs: 282 */ + +/* + * 5.7 GHz channels 145, 149, 153, 157, 161, 165 + * (5725-5825MHz) + */ + u16 band_5_count; /* abs.ofs: 304 */ + struct iwl_eeprom_channel band_5_channels[6]; /* abs.ofs: 306 */ + + u8 reserved9[194]; + +/* + * 3945 Txpower calibration data. + */ +#define IWL_NUM_TX_CALIB_GROUPS 5 + struct iwl3945_eeprom_txpower_group groups[IWL_NUM_TX_CALIB_GROUPS]; +/* abs.ofs: 512 */ + struct iwl3945_eeprom_temperature_corr corrections; /* abs.ofs: 832 */ + u8 reserved16[172]; /* fill out to full 1024 byte block */ +} __attribute__ ((packed)); + +#define IWL3945_EEPROM_IMG_SIZE 1024 + +/* End of EEPROM */ + +#define PCI_CFG_REV_ID_BIT_BASIC_SKU (0x40) /* bit 6 */ +#define PCI_CFG_REV_ID_BIT_RTP (0x80) /* bit 7 */ + +/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */ +#define IWL39_NUM_QUEUES 5 +#define IWL_NUM_SCAN_RATES (2) + +#define IWL_DEFAULT_TX_RETRY 15 + +/*********************************************/ + +#define RFD_SIZE 4 +#define NUM_TFD_CHUNKS 4 + +#define RX_QUEUE_SIZE 256 +#define RX_QUEUE_MASK 255 +#define RX_QUEUE_SIZE_LOG 8 + +#define U32_PAD(n) ((4-(n))&0x3) + +#define TFD_CTL_COUNT_SET(n) (n << 24) +#define TFD_CTL_COUNT_GET(ctl) ((ctl >> 24) & 7) +#define TFD_CTL_PAD_SET(n) (n << 28) +#define TFD_CTL_PAD_GET(ctl) (ctl >> 28) + +/* Sizes and addresses for instruction and data memory (SRAM) in + * 3945's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */ +#define IWL39_RTC_INST_LOWER_BOUND (0x000000) +#define IWL39_RTC_INST_UPPER_BOUND (0x014000) + +#define IWL39_RTC_DATA_LOWER_BOUND (0x800000) +#define IWL39_RTC_DATA_UPPER_BOUND (0x808000) + +#define IWL39_RTC_INST_SIZE (IWL39_RTC_INST_UPPER_BOUND - \ + IWL39_RTC_INST_LOWER_BOUND) +#define IWL39_RTC_DATA_SIZE (IWL39_RTC_DATA_UPPER_BOUND - \ + IWL39_RTC_DATA_LOWER_BOUND) + +#define IWL39_MAX_INST_SIZE IWL39_RTC_INST_SIZE +#define IWL39_MAX_DATA_SIZE IWL39_RTC_DATA_SIZE + +/* Size of uCode instruction memory in bootstrap state machine */ +#define IWL39_MAX_BSM_SIZE IWL39_RTC_INST_SIZE + +static inline int iwl3945_hw_valid_rtc_data_addr(u32 addr) +{ + return (addr >= IWL39_RTC_DATA_LOWER_BOUND) && + (addr < IWL39_RTC_DATA_UPPER_BOUND); +} + +/* Base physical address of iwl3945_shared is provided to FH_TSSR_CBB_BASE + * and &iwl3945_shared.rx_read_ptr[0] is provided to FH_RCSR_RPTR_ADDR(0) */ +struct iwl3945_shared { + __le32 tx_base_ptr[8]; +} __attribute__ ((packed)); + +static inline u8 iwl3945_hw_get_rate(__le16 rate_n_flags) +{ + return le16_to_cpu(rate_n_flags) & 0xFF; +} + +static inline u16 iwl3945_hw_get_rate_n_flags(__le16 rate_n_flags) +{ + return le16_to_cpu(rate_n_flags); +} + +static inline __le16 iwl3945_hw_set_rate_n_flags(u8 rate, u16 flags) +{ + return cpu_to_le16((u16)rate|flags); +} +#endif diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.c b/drivers/net/wireless/iwlwifi/iwl-3945-led.c new file mode 100644 index 0000000..abe2b73 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.c @@ -0,0 +1,91 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "iwl-commands.h" +#include "iwl-3945.h" +#include "iwl-core.h" +#include "iwl-dev.h" +#include "iwl-3945-led.h" + + +/* Send led command */ +static int iwl3945_send_led_cmd(struct iwl_priv *priv, + struct iwl_led_cmd *led_cmd) +{ + struct iwl_host_cmd cmd = { + .id = REPLY_LEDS_CMD, + .len = sizeof(struct iwl_led_cmd), + .data = led_cmd, + .flags = CMD_ASYNC, + .callback = NULL, + }; + + return iwl_send_cmd(priv, &cmd); +} + +/* Set led on command */ +static int iwl3945_led_on(struct iwl_priv *priv) +{ + struct iwl_led_cmd led_cmd = { + .id = IWL_LED_LINK, + .on = IWL_LED_SOLID, + .off = 0, + .interval = IWL_DEF_LED_INTRVL + }; + return iwl3945_send_led_cmd(priv, &led_cmd); +} + +/* Set led off command */ +static int iwl3945_led_off(struct iwl_priv *priv) +{ + struct iwl_led_cmd led_cmd = { + .id = IWL_LED_LINK, + .on = 0, + .off = 0, + .interval = IWL_DEF_LED_INTRVL + }; + IWL_DEBUG_LED(priv, "led off\n"); + return iwl3945_send_led_cmd(priv, &led_cmd); +} + +const struct iwl_led_ops iwl3945_led_ops = { + .cmd = iwl3945_send_led_cmd, + .on = iwl3945_led_on, + .off = iwl3945_led_off, +}; diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.h b/drivers/net/wireless/iwlwifi/iwl-3945-led.h new file mode 100644 index 0000000..ce990ad --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.h @@ -0,0 +1,32 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_3945_led_h__ +#define __iwl_3945_led_h__ + +extern const struct iwl_led_ops iwl3945_led_ops; + +#endif /* __iwl_3945_led_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c new file mode 100644 index 0000000..47909f9 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c @@ -0,0 +1,973 @@ +/****************************************************************************** + * + * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include "iwl-commands.h" +#include "iwl-3945.h" +#include "iwl-sta.h" + +#define RS_NAME "iwl-3945-rs" + +static s32 iwl3945_expected_tpt_g[IWL_RATE_COUNT_3945] = { + 7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202 +}; + +static s32 iwl3945_expected_tpt_g_prot[IWL_RATE_COUNT_3945] = { + 7, 13, 35, 58, 0, 0, 0, 80, 93, 113, 123, 125 +}; + +static s32 iwl3945_expected_tpt_a[IWL_RATE_COUNT_3945] = { + 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186 +}; + +static s32 iwl3945_expected_tpt_b[IWL_RATE_COUNT_3945] = { + 7, 13, 35, 58, 0, 0, 0, 0, 0, 0, 0, 0 +}; + +struct iwl3945_tpt_entry { + s8 min_rssi; + u8 index; +}; + +static struct iwl3945_tpt_entry iwl3945_tpt_table_a[] = { + {-60, IWL_RATE_54M_INDEX}, + {-64, IWL_RATE_48M_INDEX}, + {-72, IWL_RATE_36M_INDEX}, + {-80, IWL_RATE_24M_INDEX}, + {-84, IWL_RATE_18M_INDEX}, + {-85, IWL_RATE_12M_INDEX}, + {-87, IWL_RATE_9M_INDEX}, + {-89, IWL_RATE_6M_INDEX} +}; + +static struct iwl3945_tpt_entry iwl3945_tpt_table_g[] = { + {-60, IWL_RATE_54M_INDEX}, + {-64, IWL_RATE_48M_INDEX}, + {-68, IWL_RATE_36M_INDEX}, + {-80, IWL_RATE_24M_INDEX}, + {-84, IWL_RATE_18M_INDEX}, + {-85, IWL_RATE_12M_INDEX}, + {-86, IWL_RATE_11M_INDEX}, + {-88, IWL_RATE_5M_INDEX}, + {-90, IWL_RATE_2M_INDEX}, + {-92, IWL_RATE_1M_INDEX} +}; + +#define IWL_RATE_MAX_WINDOW 62 +#define IWL_RATE_FLUSH (3*HZ) +#define IWL_RATE_WIN_FLUSH (HZ/2) +#define IWL39_RATE_HIGH_TH 11520 +#define IWL_SUCCESS_UP_TH 8960 +#define IWL_SUCCESS_DOWN_TH 10880 +#define IWL_RATE_MIN_FAILURE_TH 6 +#define IWL_RATE_MIN_SUCCESS_TH 8 +#define IWL_RATE_DECREASE_TH 1920 +#define IWL_RATE_RETRY_TH 15 + +static u8 iwl3945_get_rate_index_by_rssi(s32 rssi, enum ieee80211_band band) +{ + u32 index = 0; + u32 table_size = 0; + struct iwl3945_tpt_entry *tpt_table = NULL; + + if ((rssi < IWL_MIN_RSSI_VAL) || (rssi > IWL_MAX_RSSI_VAL)) + rssi = IWL_MIN_RSSI_VAL; + + switch (band) { + case IEEE80211_BAND_2GHZ: + tpt_table = iwl3945_tpt_table_g; + table_size = ARRAY_SIZE(iwl3945_tpt_table_g); + break; + + case IEEE80211_BAND_5GHZ: + tpt_table = iwl3945_tpt_table_a; + table_size = ARRAY_SIZE(iwl3945_tpt_table_a); + break; + + default: + BUG(); + break; + } + + while ((index < table_size) && (rssi < tpt_table[index].min_rssi)) + index++; + + index = min(index, (table_size - 1)); + + return tpt_table[index].index; +} + +static void iwl3945_clear_window(struct iwl3945_rate_scale_data *window) +{ + window->data = 0; + window->success_counter = 0; + window->success_ratio = -1; + window->counter = 0; + window->average_tpt = IWL_INVALID_VALUE; + window->stamp = 0; +} + +/** + * iwl3945_rate_scale_flush_windows - flush out the rate scale windows + * + * Returns the number of windows that have gathered data but were + * not flushed. If there were any that were not flushed, then + * reschedule the rate flushing routine. + */ +static int iwl3945_rate_scale_flush_windows(struct iwl3945_rs_sta *rs_sta) +{ + int unflushed = 0; + int i; + unsigned long flags; + struct iwl_priv *priv __maybe_unused = rs_sta->priv; + + /* + * For each rate, if we have collected data on that rate + * and it has been more than IWL_RATE_WIN_FLUSH + * since we flushed, clear out the gathered statistics + */ + for (i = 0; i < IWL_RATE_COUNT_3945; i++) { + if (!rs_sta->win[i].counter) + continue; + + spin_lock_irqsave(&rs_sta->lock, flags); + if (time_after(jiffies, rs_sta->win[i].stamp + + IWL_RATE_WIN_FLUSH)) { + IWL_DEBUG_RATE(priv, "flushing %d samples of rate " + "index %d\n", + rs_sta->win[i].counter, i); + iwl3945_clear_window(&rs_sta->win[i]); + } else + unflushed++; + spin_unlock_irqrestore(&rs_sta->lock, flags); + } + + return unflushed; +} + +#define IWL_RATE_FLUSH_MAX 5000 /* msec */ +#define IWL_RATE_FLUSH_MIN 50 /* msec */ +#define IWL_AVERAGE_PACKETS 1500 + +static void iwl3945_bg_rate_scale_flush(unsigned long data) +{ + struct iwl3945_rs_sta *rs_sta = (void *)data; + struct iwl_priv *priv __maybe_unused = rs_sta->priv; + int unflushed = 0; + unsigned long flags; + u32 packet_count, duration, pps; + + IWL_DEBUG_RATE(priv, "enter\n"); + + unflushed = iwl3945_rate_scale_flush_windows(rs_sta); + + spin_lock_irqsave(&rs_sta->lock, flags); + + /* Number of packets Rx'd since last time this timer ran */ + packet_count = (rs_sta->tx_packets - rs_sta->last_tx_packets) + 1; + + rs_sta->last_tx_packets = rs_sta->tx_packets + 1; + + if (unflushed) { + duration = + jiffies_to_msecs(jiffies - rs_sta->last_partial_flush); + + IWL_DEBUG_RATE(priv, "Tx'd %d packets in %dms\n", + packet_count, duration); + + /* Determine packets per second */ + if (duration) + pps = (packet_count * 1000) / duration; + else + pps = 0; + + if (pps) { + duration = (IWL_AVERAGE_PACKETS * 1000) / pps; + if (duration < IWL_RATE_FLUSH_MIN) + duration = IWL_RATE_FLUSH_MIN; + else if (duration > IWL_RATE_FLUSH_MAX) + duration = IWL_RATE_FLUSH_MAX; + } else + duration = IWL_RATE_FLUSH_MAX; + + rs_sta->flush_time = msecs_to_jiffies(duration); + + IWL_DEBUG_RATE(priv, "new flush period: %d msec ave %d\n", + duration, packet_count); + + mod_timer(&rs_sta->rate_scale_flush, jiffies + + rs_sta->flush_time); + + rs_sta->last_partial_flush = jiffies; + } else { + rs_sta->flush_time = IWL_RATE_FLUSH; + rs_sta->flush_pending = 0; + } + /* If there weren't any unflushed entries, we don't schedule the timer + * to run again */ + + rs_sta->last_flush = jiffies; + + spin_unlock_irqrestore(&rs_sta->lock, flags); + + IWL_DEBUG_RATE(priv, "leave\n"); +} + +/** + * iwl3945_collect_tx_data - Update the success/failure sliding window + * + * We keep a sliding window of the last 64 packets transmitted + * at this rate. window->data contains the bitmask of successful + * packets. + */ +static void iwl3945_collect_tx_data(struct iwl3945_rs_sta *rs_sta, + struct iwl3945_rate_scale_data *window, + int success, int retries, int index) +{ + unsigned long flags; + s32 fail_count; + struct iwl_priv *priv __maybe_unused = rs_sta->priv; + + if (!retries) { + IWL_DEBUG_RATE(priv, "leave: retries == 0 -- should be at least 1\n"); + return; + } + + spin_lock_irqsave(&rs_sta->lock, flags); + + /* + * Keep track of only the latest 62 tx frame attempts in this rate's + * history window; anything older isn't really relevant any more. + * If we have filled up the sliding window, drop the oldest attempt; + * if the oldest attempt (highest bit in bitmap) shows "success", + * subtract "1" from the success counter (this is the main reason + * we keep these bitmaps!). + * */ + while (retries > 0) { + if (window->counter >= IWL_RATE_MAX_WINDOW) { + + /* remove earliest */ + window->counter = IWL_RATE_MAX_WINDOW - 1; + + if (window->data & (1ULL << (IWL_RATE_MAX_WINDOW - 1))) { + window->data &= ~(1ULL << (IWL_RATE_MAX_WINDOW - 1)); + window->success_counter--; + } + } + + /* Increment frames-attempted counter */ + window->counter++; + + /* Shift bitmap by one frame (throw away oldest history), + * OR in "1", and increment "success" if this + * frame was successful. */ + window->data <<= 1; + if (success > 0) { + window->success_counter++; + window->data |= 0x1; + success--; + } + + retries--; + } + + /* Calculate current success ratio, avoid divide-by-0! */ + if (window->counter > 0) + window->success_ratio = 128 * (100 * window->success_counter) + / window->counter; + else + window->success_ratio = IWL_INVALID_VALUE; + + fail_count = window->counter - window->success_counter; + + /* Calculate average throughput, if we have enough history. */ + if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) || + (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH)) + window->average_tpt = ((window->success_ratio * + rs_sta->expected_tpt[index] + 64) / 128); + else + window->average_tpt = IWL_INVALID_VALUE; + + /* Tag this window as having been updated */ + window->stamp = jiffies; + + spin_unlock_irqrestore(&rs_sta->lock, flags); + +} + +static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta) +{ + struct iwl3945_rs_sta *rs_sta = priv_sta; + struct iwl_priv *priv = (struct iwl_priv *)priv_r; + int i; + + IWL_DEBUG_RATE(priv, "enter\n"); + + spin_lock_init(&rs_sta->lock); + + rs_sta->priv = priv; + + rs_sta->start_rate = IWL_RATE_INVALID; + + /* default to just 802.11b */ + rs_sta->expected_tpt = iwl3945_expected_tpt_b; + + rs_sta->last_partial_flush = jiffies; + rs_sta->last_flush = jiffies; + rs_sta->flush_time = IWL_RATE_FLUSH; + rs_sta->last_tx_packets = 0; + rs_sta->ibss_sta_added = 0; + + init_timer(&rs_sta->rate_scale_flush); + rs_sta->rate_scale_flush.data = (unsigned long)rs_sta; + rs_sta->rate_scale_flush.function = iwl3945_bg_rate_scale_flush; + + for (i = 0; i < IWL_RATE_COUNT_3945; i++) + iwl3945_clear_window(&rs_sta->win[i]); + + /* TODO: what is a good starting rate for STA? About middle? Maybe not + * the lowest or the highest rate.. Could consider using RSSI from + * previous packets? Need to have IEEE 802.1X auth succeed immediately + * after assoc.. */ + + for (i = sband->n_bitrates - 1; i >= 0; i--) { + if (sta->supp_rates[sband->band] & (1 << i)) { + rs_sta->last_txrate_idx = i; + break; + } + } + + priv->sta_supp_rates = sta->supp_rates[sband->band]; + /* For 5 GHz band it start at IWL_FIRST_OFDM_RATE */ + if (sband->band == IEEE80211_BAND_5GHZ) { + rs_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE; + priv->sta_supp_rates = priv->sta_supp_rates << + IWL_FIRST_OFDM_RATE; + } + + + IWL_DEBUG_RATE(priv, "leave\n"); +} + +static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) +{ + return hw->priv; +} + +/* rate scale requires free function to be implemented */ +static void rs_free(void *priv) +{ + return; +} + +static void *rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp) +{ + struct iwl3945_rs_sta *rs_sta; + struct iwl3945_sta_priv *psta = (void *) sta->drv_priv; + struct iwl_priv *priv __maybe_unused = iwl_priv; + + IWL_DEBUG_RATE(priv, "enter\n"); + + rs_sta = &psta->rs_sta; + + IWL_DEBUG_RATE(priv, "leave\n"); + + return rs_sta; +} + +static void rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta, + void *priv_sta) +{ + struct iwl3945_sta_priv *psta = (void *) sta->drv_priv; + struct iwl3945_rs_sta *rs_sta = &psta->rs_sta; + struct iwl_priv *priv __maybe_unused = rs_sta->priv; + + IWL_DEBUG_RATE(priv, "enter\n"); + del_timer_sync(&rs_sta->rate_scale_flush); + IWL_DEBUG_RATE(priv, "leave\n"); +} + + +/** + * rs_tx_status - Update rate control values based on Tx results + * + * NOTE: Uses iwl_priv->retry_rate for the # of retries attempted by + * the hardware for each rate. + */ +static void rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta, + struct sk_buff *skb) +{ + s8 retries = 0, current_count; + int scale_rate_index, first_index, last_index; + unsigned long flags; + struct iwl_priv *priv = (struct iwl_priv *)priv_rate; + struct iwl3945_rs_sta *rs_sta = priv_sta; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + IWL_DEBUG_RATE(priv, "enter\n"); + + retries = info->status.rates[0].count; + /* Sanity Check for retries */ + if (retries > IWL_RATE_RETRY_TH) + retries = IWL_RATE_RETRY_TH; + + first_index = sband->bitrates[info->status.rates[0].idx].hw_value; + if ((first_index < 0) || (first_index >= IWL_RATE_COUNT_3945)) { + IWL_DEBUG_RATE(priv, "leave: Rate out of bounds: %d\n", first_index); + return; + } + + if (!priv_sta) { + IWL_DEBUG_RATE(priv, "leave: No STA priv data to update!\n"); + return; + } + + rs_sta->tx_packets++; + + scale_rate_index = first_index; + last_index = first_index; + + /* + * Update the window for each rate. We determine which rates + * were Tx'd based on the total number of retries vs. the number + * of retries configured for each rate -- currently set to the + * priv value 'retry_rate' vs. rate specific + * + * On exit from this while loop last_index indicates the rate + * at which the frame was finally transmitted (or failed if no + * ACK) + */ + while (retries > 1) { + if ((retries - 1) < priv->retry_rate) { + current_count = (retries - 1); + last_index = scale_rate_index; + } else { + current_count = priv->retry_rate; + last_index = iwl3945_rs_next_rate(priv, + scale_rate_index); + } + + /* Update this rate accounting for as many retries + * as was used for it (per current_count) */ + iwl3945_collect_tx_data(rs_sta, + &rs_sta->win[scale_rate_index], + 0, current_count, scale_rate_index); + IWL_DEBUG_RATE(priv, "Update rate %d for %d retries.\n", + scale_rate_index, current_count); + + retries -= current_count; + + scale_rate_index = last_index; + } + + + /* Update the last index window with success/failure based on ACK */ + IWL_DEBUG_RATE(priv, "Update rate %d with %s.\n", + last_index, + (info->flags & IEEE80211_TX_STAT_ACK) ? + "success" : "failure"); + iwl3945_collect_tx_data(rs_sta, + &rs_sta->win[last_index], + info->flags & IEEE80211_TX_STAT_ACK, 1, last_index); + + /* We updated the rate scale window -- if its been more than + * flush_time since the last run, schedule the flush + * again */ + spin_lock_irqsave(&rs_sta->lock, flags); + + if (!rs_sta->flush_pending && + time_after(jiffies, rs_sta->last_flush + + rs_sta->flush_time)) { + + rs_sta->last_partial_flush = jiffies; + rs_sta->flush_pending = 1; + mod_timer(&rs_sta->rate_scale_flush, + jiffies + rs_sta->flush_time); + } + + spin_unlock_irqrestore(&rs_sta->lock, flags); + + IWL_DEBUG_RATE(priv, "leave\n"); + + return; +} + +static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta, + u8 index, u16 rate_mask, enum ieee80211_band band) +{ + u8 high = IWL_RATE_INVALID; + u8 low = IWL_RATE_INVALID; + struct iwl_priv *priv __maybe_unused = rs_sta->priv; + + /* 802.11A walks to the next literal adjacent rate in + * the rate table */ + if (unlikely(band == IEEE80211_BAND_5GHZ)) { + int i; + u32 mask; + + /* Find the previous rate that is in the rate mask */ + i = index - 1; + for (mask = (1 << i); i >= 0; i--, mask >>= 1) { + if (rate_mask & mask) { + low = i; + break; + } + } + + /* Find the next rate that is in the rate mask */ + i = index + 1; + for (mask = (1 << i); i < IWL_RATE_COUNT_3945; + i++, mask <<= 1) { + if (rate_mask & mask) { + high = i; + break; + } + } + + return (high << 8) | low; + } + + low = index; + while (low != IWL_RATE_INVALID) { + if (rs_sta->tgg) + low = iwl3945_rates[low].prev_rs_tgg; + else + low = iwl3945_rates[low].prev_rs; + if (low == IWL_RATE_INVALID) + break; + if (rate_mask & (1 << low)) + break; + IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low); + } + + high = index; + while (high != IWL_RATE_INVALID) { + if (rs_sta->tgg) + high = iwl3945_rates[high].next_rs_tgg; + else + high = iwl3945_rates[high].next_rs; + if (high == IWL_RATE_INVALID) + break; + if (rate_mask & (1 << high)) + break; + IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high); + } + + return (high << 8) | low; +} + +/** + * rs_get_rate - find the rate for the requested packet + * + * Returns the ieee80211_rate structure allocated by the driver. + * + * The rate control algorithm has no internal mapping between hw_mode's + * rate ordering and the rate ordering used by the rate control algorithm. + * + * The rate control algorithm uses a single table of rates that goes across + * the entire A/B/G spectrum vs. being limited to just one particular + * hw_mode. + * + * As such, we can't convert the index obtained below into the hw_mode's + * rate table and must reference the driver allocated rate table + * + */ +static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, + void *priv_sta, struct ieee80211_tx_rate_control *txrc) +{ + struct ieee80211_supported_band *sband = txrc->sband; + struct sk_buff *skb = txrc->skb; + u8 low = IWL_RATE_INVALID; + u8 high = IWL_RATE_INVALID; + u16 high_low; + int index; + struct iwl3945_rs_sta *rs_sta = priv_sta; + struct iwl3945_rate_scale_data *window = NULL; + int current_tpt = IWL_INVALID_VALUE; + int low_tpt = IWL_INVALID_VALUE; + int high_tpt = IWL_INVALID_VALUE; + u32 fail_count; + s8 scale_action = 0; + unsigned long flags; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + u16 rate_mask = sta ? sta->supp_rates[sband->band] : 0; + s8 max_rate_idx = -1; + struct iwl_priv *priv = (struct iwl_priv *)priv_r; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + IWL_DEBUG_RATE(priv, "enter\n"); + + if (rate_control_send_low(sta, priv_sta, txrc)) + return; + + rate_mask = sta->supp_rates[sband->band]; + + /* get user max rate if set */ + max_rate_idx = txrc->max_rate_idx; + if ((sband->band == IEEE80211_BAND_5GHZ) && (max_rate_idx != -1)) + max_rate_idx += IWL_FIRST_OFDM_RATE; + if ((max_rate_idx < 0) || (max_rate_idx >= IWL_RATE_COUNT)) + max_rate_idx = -1; + + index = min(rs_sta->last_txrate_idx & 0xffff, IWL_RATE_COUNT_3945 - 1); + + if (sband->band == IEEE80211_BAND_5GHZ) + rate_mask = rate_mask << IWL_FIRST_OFDM_RATE; + + if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) && + !rs_sta->ibss_sta_added) { + u8 sta_id = iwl_find_station(priv, hdr->addr1); + + if (sta_id == IWL_INVALID_STATION) { + IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n", + hdr->addr1); + sta_id = iwl_add_station(priv, hdr->addr1, false, + CMD_ASYNC, NULL); + } + if (sta_id != IWL_INVALID_STATION) + rs_sta->ibss_sta_added = 1; + } + + spin_lock_irqsave(&rs_sta->lock, flags); + + /* for recent assoc, choose best rate regarding + * to rssi value + */ + if (rs_sta->start_rate != IWL_RATE_INVALID) { + if (rs_sta->start_rate < index && + (rate_mask & (1 << rs_sta->start_rate))) + index = rs_sta->start_rate; + rs_sta->start_rate = IWL_RATE_INVALID; + } + + /* force user max rate if set by user */ + if ((max_rate_idx != -1) && (max_rate_idx < index)) { + if (rate_mask & (1 << max_rate_idx)) + index = max_rate_idx; + } + + window = &(rs_sta->win[index]); + + fail_count = window->counter - window->success_counter; + + if (((fail_count < IWL_RATE_MIN_FAILURE_TH) && + (window->success_counter < IWL_RATE_MIN_SUCCESS_TH))) { + spin_unlock_irqrestore(&rs_sta->lock, flags); + + IWL_DEBUG_RATE(priv, "Invalid average_tpt on rate %d: " + "counter: %d, success_counter: %d, " + "expected_tpt is %sNULL\n", + index, + window->counter, + window->success_counter, + rs_sta->expected_tpt ? "not " : ""); + + /* Can't calculate this yet; not enough history */ + window->average_tpt = IWL_INVALID_VALUE; + goto out; + + } + + current_tpt = window->average_tpt; + + high_low = iwl3945_get_adjacent_rate(rs_sta, index, rate_mask, + sband->band); + low = high_low & 0xff; + high = (high_low >> 8) & 0xff; + + /* If user set max rate, dont allow higher than user constrain */ + if ((max_rate_idx != -1) && (max_rate_idx < high)) + high = IWL_RATE_INVALID; + + /* Collect Measured throughputs of adjacent rates */ + if (low != IWL_RATE_INVALID) + low_tpt = rs_sta->win[low].average_tpt; + + if (high != IWL_RATE_INVALID) + high_tpt = rs_sta->win[high].average_tpt; + + spin_unlock_irqrestore(&rs_sta->lock, flags); + + scale_action = 0; + + /* Low success ratio , need to drop the rate */ + if ((window->success_ratio < IWL_RATE_DECREASE_TH) || !current_tpt) { + IWL_DEBUG_RATE(priv, "decrease rate because of low success_ratio\n"); + scale_action = -1; + /* No throughput measured yet for adjacent rates, + * try increase */ + } else if ((low_tpt == IWL_INVALID_VALUE) && + (high_tpt == IWL_INVALID_VALUE)) { + + if (high != IWL_RATE_INVALID && window->success_ratio >= IWL_RATE_INCREASE_TH) + scale_action = 1; + else if (low != IWL_RATE_INVALID) + scale_action = 0; + + /* Both adjacent throughputs are measured, but neither one has + * better throughput; we're using the best rate, don't change + * it! */ + } else if ((low_tpt != IWL_INVALID_VALUE) && + (high_tpt != IWL_INVALID_VALUE) && + (low_tpt < current_tpt) && (high_tpt < current_tpt)) { + + IWL_DEBUG_RATE(priv, "No action -- low [%d] & high [%d] < " + "current_tpt [%d]\n", + low_tpt, high_tpt, current_tpt); + scale_action = 0; + + /* At least one of the rates has better throughput */ + } else { + if (high_tpt != IWL_INVALID_VALUE) { + + /* High rate has better throughput, Increase + * rate */ + if (high_tpt > current_tpt && + window->success_ratio >= IWL_RATE_INCREASE_TH) + scale_action = 1; + else { + IWL_DEBUG_RATE(priv, + "decrease rate because of high tpt\n"); + scale_action = 0; + } + } else if (low_tpt != IWL_INVALID_VALUE) { + if (low_tpt > current_tpt) { + IWL_DEBUG_RATE(priv, + "decrease rate because of low tpt\n"); + scale_action = -1; + } else if (window->success_ratio >= IWL_RATE_INCREASE_TH) { + /* Lower rate has better + * throughput,decrease rate */ + scale_action = 1; + } + } + } + + /* Sanity check; asked for decrease, but success rate or throughput + * has been good at old rate. Don't change it. */ + if ((scale_action == -1) && (low != IWL_RATE_INVALID) && + ((window->success_ratio > IWL_RATE_HIGH_TH) || + (current_tpt > (100 * rs_sta->expected_tpt[low])))) + scale_action = 0; + + switch (scale_action) { + case -1: + + /* Decrese rate */ + if (low != IWL_RATE_INVALID) + index = low; + break; + + case 1: + /* Increase rate */ + if (high != IWL_RATE_INVALID) + index = high; + + break; + + case 0: + default: + /* No change */ + break; + } + + IWL_DEBUG_RATE(priv, "Selected %d (action %d) - low %d high %d\n", + index, scale_action, low, high); + + out: + + rs_sta->last_txrate_idx = index; + if (sband->band == IEEE80211_BAND_5GHZ) + info->control.rates[0].idx = rs_sta->last_txrate_idx - + IWL_FIRST_OFDM_RATE; + else + info->control.rates[0].idx = rs_sta->last_txrate_idx; + + IWL_DEBUG_RATE(priv, "leave: %d\n", index); +} + +#ifdef CONFIG_MAC80211_DEBUGFS +static int iwl3945_open_file_generic(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t iwl3945_sta_dbgfs_stats_table_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + char *buff; + int desc = 0; + int j; + ssize_t ret; + struct iwl3945_rs_sta *lq_sta = file->private_data; + + buff = kmalloc(1024, GFP_KERNEL); + if (!buff) + return -ENOMEM; + + desc += sprintf(buff + desc, "tx packets=%d last rate index=%d\n" + "rate=0x%X flush time %d\n", + lq_sta->tx_packets, + lq_sta->last_txrate_idx, + lq_sta->start_rate, jiffies_to_msecs(lq_sta->flush_time)); + for (j = 0; j < IWL_RATE_COUNT_3945; j++) { + desc += sprintf(buff+desc, + "counter=%d success=%d %%=%d\n", + lq_sta->win[j].counter, + lq_sta->win[j].success_counter, + lq_sta->win[j].success_ratio); + } + ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); + kfree(buff); + return ret; +} + +static const struct file_operations rs_sta_dbgfs_stats_table_ops = { + .read = iwl3945_sta_dbgfs_stats_table_read, + .open = iwl3945_open_file_generic, +}; + +static void iwl3945_add_debugfs(void *priv, void *priv_sta, + struct dentry *dir) +{ + struct iwl3945_rs_sta *lq_sta = priv_sta; + + lq_sta->rs_sta_dbgfs_stats_table_file = + debugfs_create_file("rate_stats_table", 0600, dir, + lq_sta, &rs_sta_dbgfs_stats_table_ops); + +} + +static void iwl3945_remove_debugfs(void *priv, void *priv_sta) +{ + struct iwl3945_rs_sta *lq_sta = priv_sta; + debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file); +} +#endif + +static struct rate_control_ops rs_ops = { + .module = NULL, + .name = RS_NAME, + .tx_status = rs_tx_status, + .get_rate = rs_get_rate, + .rate_init = rs_rate_init, + .alloc = rs_alloc, + .free = rs_free, + .alloc_sta = rs_alloc_sta, + .free_sta = rs_free_sta, +#ifdef CONFIG_MAC80211_DEBUGFS + .add_sta_debugfs = iwl3945_add_debugfs, + .remove_sta_debugfs = iwl3945_remove_debugfs, +#endif + +}; + +void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id) +{ + struct iwl_priv *priv = hw->priv; + s32 rssi = 0; + unsigned long flags; + struct iwl3945_rs_sta *rs_sta; + struct ieee80211_sta *sta; + struct iwl3945_sta_priv *psta; + + IWL_DEBUG_RATE(priv, "enter\n"); + + rcu_read_lock(); + + sta = ieee80211_find_sta(priv->vif, + priv->stations[sta_id].sta.sta.addr); + if (!sta) { + rcu_read_unlock(); + return; + } + + psta = (void *) sta->drv_priv; + rs_sta = &psta->rs_sta; + + spin_lock_irqsave(&rs_sta->lock, flags); + + rs_sta->tgg = 0; + switch (priv->band) { + case IEEE80211_BAND_2GHZ: + /* TODO: this always does G, not a regression */ + if (priv->active_rxon.flags & RXON_FLG_TGG_PROTECT_MSK) { + rs_sta->tgg = 1; + rs_sta->expected_tpt = iwl3945_expected_tpt_g_prot; + } else + rs_sta->expected_tpt = iwl3945_expected_tpt_g; + break; + + case IEEE80211_BAND_5GHZ: + rs_sta->expected_tpt = iwl3945_expected_tpt_a; + break; + case IEEE80211_NUM_BANDS: + BUG(); + break; + } + + spin_unlock_irqrestore(&rs_sta->lock, flags); + + rssi = priv->last_rx_rssi; + if (rssi == 0) + rssi = IWL_MIN_RSSI_VAL; + + IWL_DEBUG_RATE(priv, "Network RSSI: %d\n", rssi); + + rs_sta->start_rate = iwl3945_get_rate_index_by_rssi(rssi, priv->band); + + IWL_DEBUG_RATE(priv, "leave: rssi %d assign rate index: " + "%d (plcp 0x%x)\n", rssi, rs_sta->start_rate, + iwl3945_rates[rs_sta->start_rate].plcp); + rcu_read_unlock(); +} + +int iwl3945_rate_control_register(void) +{ + return ieee80211_rate_control_register(&rs_ops); +} + +void iwl3945_rate_control_unregister(void) +{ + ieee80211_rate_control_unregister(&rs_ops); +} + + diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c new file mode 100644 index 0000000..303cc81 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c @@ -0,0 +1,2859 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "iwl-fh.h" +#include "iwl-3945-fh.h" +#include "iwl-commands.h" +#include "iwl-sta.h" +#include "iwl-3945.h" +#include "iwl-eeprom.h" +#include "iwl-core.h" +#include "iwl-helpers.h" +#include "iwl-led.h" +#include "iwl-3945-led.h" + +#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \ + [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ + IWL_RATE_##r##M_IEEE, \ + IWL_RATE_##ip##M_INDEX, \ + IWL_RATE_##in##M_INDEX, \ + IWL_RATE_##rp##M_INDEX, \ + IWL_RATE_##rn##M_INDEX, \ + IWL_RATE_##pp##M_INDEX, \ + IWL_RATE_##np##M_INDEX, \ + IWL_RATE_##r##M_INDEX_TABLE, \ + IWL_RATE_##ip##M_INDEX_TABLE } + +/* + * Parameter order: + * rate, prev rate, next rate, prev tgg rate, next tgg rate + * + * If there isn't a valid next or previous rate then INV is used which + * maps to IWL_RATE_INVALID + * + */ +const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945] = { + IWL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2), /* 1mbps */ + IWL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5), /* 2mbps */ + IWL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11), /*5.5mbps */ + IWL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18), /* 11mbps */ + IWL_DECLARE_RATE_INFO(6, 5, 9, 5, 11, 5, 11), /* 6mbps */ + IWL_DECLARE_RATE_INFO(9, 6, 11, 5, 11, 5, 11), /* 9mbps */ + IWL_DECLARE_RATE_INFO(12, 11, 18, 11, 18, 11, 18), /* 12mbps */ + IWL_DECLARE_RATE_INFO(18, 12, 24, 12, 24, 11, 24), /* 18mbps */ + IWL_DECLARE_RATE_INFO(24, 18, 36, 18, 36, 18, 36), /* 24mbps */ + IWL_DECLARE_RATE_INFO(36, 24, 48, 24, 48, 24, 48), /* 36mbps */ + IWL_DECLARE_RATE_INFO(48, 36, 54, 36, 54, 36, 54), /* 48mbps */ + IWL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV),/* 54mbps */ +}; + +/* 1 = enable the iwl3945_disable_events() function */ +#define IWL_EVT_DISABLE (0) +#define IWL_EVT_DISABLE_SIZE (1532/32) + +/** + * iwl3945_disable_events - Disable selected events in uCode event log + * + * Disable an event by writing "1"s into "disable" + * bitmap in SRAM. Bit position corresponds to Event # (id/type). + * Default values of 0 enable uCode events to be logged. + * Use for only special debugging. This function is just a placeholder as-is, + * you'll need to provide the special bits! ... + * ... and set IWL_EVT_DISABLE to 1. */ +void iwl3945_disable_events(struct iwl_priv *priv) +{ + int i; + u32 base; /* SRAM address of event log header */ + u32 disable_ptr; /* SRAM address of event-disable bitmap array */ + u32 array_size; /* # of u32 entries in array */ + u32 evt_disable[IWL_EVT_DISABLE_SIZE] = { + 0x00000000, /* 31 - 0 Event id numbers */ + 0x00000000, /* 63 - 32 */ + 0x00000000, /* 95 - 64 */ + 0x00000000, /* 127 - 96 */ + 0x00000000, /* 159 - 128 */ + 0x00000000, /* 191 - 160 */ + 0x00000000, /* 223 - 192 */ + 0x00000000, /* 255 - 224 */ + 0x00000000, /* 287 - 256 */ + 0x00000000, /* 319 - 288 */ + 0x00000000, /* 351 - 320 */ + 0x00000000, /* 383 - 352 */ + 0x00000000, /* 415 - 384 */ + 0x00000000, /* 447 - 416 */ + 0x00000000, /* 479 - 448 */ + 0x00000000, /* 511 - 480 */ + 0x00000000, /* 543 - 512 */ + 0x00000000, /* 575 - 544 */ + 0x00000000, /* 607 - 576 */ + 0x00000000, /* 639 - 608 */ + 0x00000000, /* 671 - 640 */ + 0x00000000, /* 703 - 672 */ + 0x00000000, /* 735 - 704 */ + 0x00000000, /* 767 - 736 */ + 0x00000000, /* 799 - 768 */ + 0x00000000, /* 831 - 800 */ + 0x00000000, /* 863 - 832 */ + 0x00000000, /* 895 - 864 */ + 0x00000000, /* 927 - 896 */ + 0x00000000, /* 959 - 928 */ + 0x00000000, /* 991 - 960 */ + 0x00000000, /* 1023 - 992 */ + 0x00000000, /* 1055 - 1024 */ + 0x00000000, /* 1087 - 1056 */ + 0x00000000, /* 1119 - 1088 */ + 0x00000000, /* 1151 - 1120 */ + 0x00000000, /* 1183 - 1152 */ + 0x00000000, /* 1215 - 1184 */ + 0x00000000, /* 1247 - 1216 */ + 0x00000000, /* 1279 - 1248 */ + 0x00000000, /* 1311 - 1280 */ + 0x00000000, /* 1343 - 1312 */ + 0x00000000, /* 1375 - 1344 */ + 0x00000000, /* 1407 - 1376 */ + 0x00000000, /* 1439 - 1408 */ + 0x00000000, /* 1471 - 1440 */ + 0x00000000, /* 1503 - 1472 */ + }; + + base = le32_to_cpu(priv->card_alive.log_event_table_ptr); + if (!iwl3945_hw_valid_rtc_data_addr(base)) { + IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base); + return; + } + + disable_ptr = iwl_read_targ_mem(priv, base + (4 * sizeof(u32))); + array_size = iwl_read_targ_mem(priv, base + (5 * sizeof(u32))); + + if (IWL_EVT_DISABLE && (array_size == IWL_EVT_DISABLE_SIZE)) { + IWL_DEBUG_INFO(priv, "Disabling selected uCode log events at 0x%x\n", + disable_ptr); + for (i = 0; i < IWL_EVT_DISABLE_SIZE; i++) + iwl_write_targ_mem(priv, + disable_ptr + (i * sizeof(u32)), + evt_disable[i]); + + } else { + IWL_DEBUG_INFO(priv, "Selected uCode log events may be disabled\n"); + IWL_DEBUG_INFO(priv, " by writing \"1\"s into disable bitmap\n"); + IWL_DEBUG_INFO(priv, " in SRAM at 0x%x, size %d u32s\n", + disable_ptr, array_size); + } + +} + +static int iwl3945_hwrate_to_plcp_idx(u8 plcp) +{ + int idx; + + for (idx = 0; idx < IWL_RATE_COUNT; idx++) + if (iwl3945_rates[idx].plcp == plcp) + return idx; + return -1; +} + +#ifdef CONFIG_IWLWIFI_DEBUG +#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x + +static const char *iwl3945_get_tx_fail_reason(u32 status) +{ + switch (status & TX_STATUS_MSK) { + case TX_STATUS_SUCCESS: + return "SUCCESS"; + TX_STATUS_ENTRY(SHORT_LIMIT); + TX_STATUS_ENTRY(LONG_LIMIT); + TX_STATUS_ENTRY(FIFO_UNDERRUN); + TX_STATUS_ENTRY(MGMNT_ABORT); + TX_STATUS_ENTRY(NEXT_FRAG); + TX_STATUS_ENTRY(LIFE_EXPIRE); + TX_STATUS_ENTRY(DEST_PS); + TX_STATUS_ENTRY(ABORTED); + TX_STATUS_ENTRY(BT_RETRY); + TX_STATUS_ENTRY(STA_INVALID); + TX_STATUS_ENTRY(FRAG_DROPPED); + TX_STATUS_ENTRY(TID_DISABLE); + TX_STATUS_ENTRY(FRAME_FLUSHED); + TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL); + TX_STATUS_ENTRY(TX_LOCKED); + TX_STATUS_ENTRY(NO_BEACON_ON_RADAR); + } + + return "UNKNOWN"; +} +#else +static inline const char *iwl3945_get_tx_fail_reason(u32 status) +{ + return ""; +} +#endif + +/* + * get ieee prev rate from rate scale table. + * for A and B mode we need to overright prev + * value + */ +int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate) +{ + int next_rate = iwl3945_get_prev_ieee_rate(rate); + + switch (priv->band) { + case IEEE80211_BAND_5GHZ: + if (rate == IWL_RATE_12M_INDEX) + next_rate = IWL_RATE_9M_INDEX; + else if (rate == IWL_RATE_6M_INDEX) + next_rate = IWL_RATE_6M_INDEX; + break; + case IEEE80211_BAND_2GHZ: + if (!(priv->sta_supp_rates & IWL_OFDM_RATES_MASK) && + iwl_is_associated(priv)) { + if (rate == IWL_RATE_11M_INDEX) + next_rate = IWL_RATE_5M_INDEX; + } + break; + + default: + break; + } + + return next_rate; +} + + +/** + * iwl3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd + * + * When FW advances 'R' index, all entries between old and new 'R' index + * need to be reclaimed. As result, some free space forms. If there is + * enough free space (> low mark), wake the stack that feeds us. + */ +static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv, + int txq_id, int index) +{ + struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct iwl_queue *q = &txq->q; + struct iwl_tx_info *tx_info; + + BUG_ON(txq_id == IWL_CMD_QUEUE_NUM); + + for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index; + q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { + + tx_info = &txq->txb[txq->q.read_ptr]; + ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]); + tx_info->skb[0] = NULL; + priv->cfg->ops->lib->txq_free_tfd(priv, txq); + } + + if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) && + (txq_id != IWL_CMD_QUEUE_NUM) && + priv->mac80211_registered) + iwl_wake_queue(priv, txq_id); +} + +/** + * iwl3945_rx_reply_tx - Handle Tx response + */ +static void iwl3945_rx_reply_tx(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + u16 sequence = le16_to_cpu(pkt->hdr.sequence); + int txq_id = SEQ_TO_QUEUE(sequence); + int index = SEQ_TO_INDEX(sequence); + struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct ieee80211_tx_info *info; + struct iwl3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; + u32 status = le32_to_cpu(tx_resp->status); + int rate_idx; + int fail; + + if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) { + IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d " + "is out of range [0-%d] %d %d\n", txq_id, + index, txq->q.n_bd, txq->q.write_ptr, + txq->q.read_ptr); + return; + } + + info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]); + ieee80211_tx_info_clear_status(info); + + /* Fill the MRR chain with some info about on-chip retransmissions */ + rate_idx = iwl3945_hwrate_to_plcp_idx(tx_resp->rate); + if (info->band == IEEE80211_BAND_5GHZ) + rate_idx -= IWL_FIRST_OFDM_RATE; + + fail = tx_resp->failure_frame; + + info->status.rates[0].idx = rate_idx; + info->status.rates[0].count = fail + 1; /* add final attempt */ + + /* tx_status->rts_retry_count = tx_resp->failure_rts; */ + info->flags |= ((status & TX_STATUS_MSK) == TX_STATUS_SUCCESS) ? + IEEE80211_TX_STAT_ACK : 0; + + IWL_DEBUG_TX(priv, "Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n", + txq_id, iwl3945_get_tx_fail_reason(status), status, + tx_resp->rate, tx_resp->failure_frame); + + IWL_DEBUG_TX_REPLY(priv, "Tx queue reclaim %d\n", index); + iwl3945_tx_queue_reclaim(priv, txq_id, index); + + if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) + IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n"); +} + + + +/***************************************************************************** + * + * Intel PRO/Wireless 3945ABG/BG Network Connection + * + * RX handler implementations + * + *****************************************************************************/ + +void iwl3945_hw_rx_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n", + (int)sizeof(struct iwl3945_notif_statistics), + le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); + + memcpy(&priv->statistics_39, pkt->u.raw, sizeof(priv->statistics_39)); +} + +/****************************************************************************** + * + * Misc. internal state and helper functions + * + ******************************************************************************/ +#ifdef CONFIG_IWLWIFI_DEBUG + +/** + * iwl3945_report_frame - dump frame to syslog during debug sessions + * + * You may hack this function to show different aspects of received frames, + * including selective frame dumps. + * group100 parameter selects whether to show 1 out of 100 good frames. + */ +static void _iwl3945_dbg_report_frame(struct iwl_priv *priv, + struct iwl_rx_packet *pkt, + struct ieee80211_hdr *header, int group100) +{ + u32 to_us; + u32 print_summary = 0; + u32 print_dump = 0; /* set to 1 to dump all frames' contents */ + u32 hundred = 0; + u32 dataframe = 0; + __le16 fc; + u16 seq_ctl; + u16 channel; + u16 phy_flags; + u16 length; + u16 status; + u16 bcn_tmr; + u32 tsf_low; + u64 tsf; + u8 rssi; + u8 agc; + u16 sig_avg; + u16 noise_diff; + struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt); + struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); + struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt); + u8 *data = IWL_RX_DATA(pkt); + + /* MAC header */ + fc = header->frame_control; + seq_ctl = le16_to_cpu(header->seq_ctrl); + + /* metadata */ + channel = le16_to_cpu(rx_hdr->channel); + phy_flags = le16_to_cpu(rx_hdr->phy_flags); + length = le16_to_cpu(rx_hdr->len); + + /* end-of-frame status and timestamp */ + status = le32_to_cpu(rx_end->status); + bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp); + tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff; + tsf = le64_to_cpu(rx_end->timestamp); + + /* signal statistics */ + rssi = rx_stats->rssi; + agc = rx_stats->agc; + sig_avg = le16_to_cpu(rx_stats->sig_avg); + noise_diff = le16_to_cpu(rx_stats->noise_diff); + + to_us = !compare_ether_addr(header->addr1, priv->mac_addr); + + /* if data frame is to us and all is good, + * (optionally) print summary for only 1 out of every 100 */ + if (to_us && (fc & ~cpu_to_le16(IEEE80211_FCTL_PROTECTED)) == + cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) { + dataframe = 1; + if (!group100) + print_summary = 1; /* print each frame */ + else if (priv->framecnt_to_us < 100) { + priv->framecnt_to_us++; + print_summary = 0; + } else { + priv->framecnt_to_us = 0; + print_summary = 1; + hundred = 1; + } + } else { + /* print summary for all other frames */ + print_summary = 1; + } + + if (print_summary) { + char *title; + int rate; + + if (hundred) + title = "100Frames"; + else if (ieee80211_has_retry(fc)) + title = "Retry"; + else if (ieee80211_is_assoc_resp(fc)) + title = "AscRsp"; + else if (ieee80211_is_reassoc_resp(fc)) + title = "RasRsp"; + else if (ieee80211_is_probe_resp(fc)) { + title = "PrbRsp"; + print_dump = 1; /* dump frame contents */ + } else if (ieee80211_is_beacon(fc)) { + title = "Beacon"; + print_dump = 1; /* dump frame contents */ + } else if (ieee80211_is_atim(fc)) + title = "ATIM"; + else if (ieee80211_is_auth(fc)) + title = "Auth"; + else if (ieee80211_is_deauth(fc)) + title = "DeAuth"; + else if (ieee80211_is_disassoc(fc)) + title = "DisAssoc"; + else + title = "Frame"; + + rate = iwl3945_hwrate_to_plcp_idx(rx_hdr->rate); + if (rate == -1) + rate = 0; + else + rate = iwl3945_rates[rate].ieee / 2; + + /* print frame summary. + * MAC addresses show just the last byte (for brevity), + * but you can hack it to show more, if you'd like to. */ + if (dataframe) + IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, " + "len=%u, rssi=%d, chnl=%d, rate=%d, \n", + title, le16_to_cpu(fc), header->addr1[5], + length, rssi, channel, rate); + else { + /* src/dst addresses assume managed mode */ + IWL_DEBUG_RX(priv, "%s: 0x%04x, dst=0x%02x, " + "src=0x%02x, rssi=%u, tim=%lu usec, " + "phy=0x%02x, chnl=%d\n", + title, le16_to_cpu(fc), header->addr1[5], + header->addr3[5], rssi, + tsf_low - priv->scan_start_tsf, + phy_flags, channel); + } + } + if (print_dump) + iwl_print_hex_dump(priv, IWL_DL_RX, data, length); +} + +static void iwl3945_dbg_report_frame(struct iwl_priv *priv, + struct iwl_rx_packet *pkt, + struct ieee80211_hdr *header, int group100) +{ + if (iwl_get_debug_level(priv) & IWL_DL_RX) + _iwl3945_dbg_report_frame(priv, pkt, header, group100); +} + +#else +static inline void iwl3945_dbg_report_frame(struct iwl_priv *priv, + struct iwl_rx_packet *pkt, + struct ieee80211_hdr *header, int group100) +{ +} +#endif + +/* This is necessary only for a number of statistics, see the caller. */ +static int iwl3945_is_network_packet(struct iwl_priv *priv, + struct ieee80211_hdr *header) +{ + /* Filter incoming packets to determine if they are targeted toward + * this network, discarding packets coming from ourselves */ + switch (priv->iw_mode) { + case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */ + /* packets to our IBSS update information */ + return !compare_ether_addr(header->addr3, priv->bssid); + case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */ + /* packets to our IBSS update information */ + return !compare_ether_addr(header->addr2, priv->bssid); + default: + return 1; + } +} + +static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb, + struct ieee80211_rx_status *stats) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IWL_RX_DATA(pkt); + struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); + struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt); + u16 len = le16_to_cpu(rx_hdr->len); + struct sk_buff *skb; + int ret; + __le16 fc = hdr->frame_control; + + /* We received data from the HW, so stop the watchdog */ + if (unlikely(len + IWL39_RX_FRAME_SIZE > + PAGE_SIZE << priv->hw_params.rx_page_order)) { + IWL_DEBUG_DROP(priv, "Corruption detected!\n"); + return; + } + + /* We only process data packets if the interface is open */ + if (unlikely(!priv->is_open)) { + IWL_DEBUG_DROP_LIMIT(priv, + "Dropping packet while interface is not open.\n"); + return; + } + + skb = alloc_skb(IWL_LINK_HDR_MAX * 2, GFP_ATOMIC); + if (!skb) { + IWL_ERR(priv, "alloc_skb failed\n"); + return; + } + + if (!iwl3945_mod_params.sw_crypto) + iwl_set_decrypted_flag(priv, + (struct ieee80211_hdr *)rxb_addr(rxb), + le32_to_cpu(rx_end->status), stats); + + skb_reserve(skb, IWL_LINK_HDR_MAX); + skb_add_rx_frag(skb, 0, rxb->page, + (void *)rx_hdr->payload - (void *)pkt, len); + + /* mac80211 currently doesn't support paged SKB. Convert it to + * linear SKB for management frame and data frame requires + * software decryption or software defragementation. */ + if (ieee80211_is_mgmt(fc) || + ieee80211_has_protected(fc) || + ieee80211_has_morefrags(fc) || + le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) + ret = skb_linearize(skb); + else + ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ? + 0 : -ENOMEM; + + if (ret) { + kfree_skb(skb); + goto out; + } + + /* + * XXX: We cannot touch the page and its virtual memory (pkt) after + * here. It might have already been freed by the above skb change. + */ + + iwl_update_stats(priv, false, fc, len); + memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); + + ieee80211_rx(priv->hw, skb); + out: + priv->alloc_rxb_page--; + rxb->page = NULL; +} + +#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6) + +static void iwl3945_rx_reply_rx(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct ieee80211_hdr *header; + struct ieee80211_rx_status rx_status; + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt); + struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); + struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt); + int snr; + u16 rx_stats_sig_avg = le16_to_cpu(rx_stats->sig_avg); + u16 rx_stats_noise_diff = le16_to_cpu(rx_stats->noise_diff); + u8 network_packet; + + rx_status.flag = 0; + rx_status.mactime = le64_to_cpu(rx_end->timestamp); + rx_status.freq = + ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel)); + rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? + IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + + rx_status.rate_idx = iwl3945_hwrate_to_plcp_idx(rx_hdr->rate); + if (rx_status.band == IEEE80211_BAND_5GHZ) + rx_status.rate_idx -= IWL_FIRST_OFDM_RATE; + + rx_status.antenna = (le16_to_cpu(rx_hdr->phy_flags) & + RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4; + + /* set the preamble flag if appropriate */ + if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) + rx_status.flag |= RX_FLAG_SHORTPRE; + + if ((unlikely(rx_stats->phy_count > 20))) { + IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n", + rx_stats->phy_count); + return; + } + + if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR) + || !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) { + IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", rx_end->status); + return; + } + + + + /* Convert 3945's rssi indicator to dBm */ + rx_status.signal = rx_stats->rssi - IWL39_RSSI_OFFSET; + + /* Set default noise value to -127 */ + if (priv->last_rx_noise == 0) + priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE; + + /* 3945 provides noise info for OFDM frames only. + * sig_avg and noise_diff are measured by the 3945's digital signal + * processor (DSP), and indicate linear levels of signal level and + * distortion/noise within the packet preamble after + * automatic gain control (AGC). sig_avg should stay fairly + * constant if the radio's AGC is working well. + * Since these values are linear (not dB or dBm), linear + * signal-to-noise ratio (SNR) is (sig_avg / noise_diff). + * Convert linear SNR to dB SNR, then subtract that from rssi dBm + * to obtain noise level in dBm. + * Calculate rx_status.signal (quality indicator in %) based on SNR. */ + if (rx_stats_noise_diff) { + snr = rx_stats_sig_avg / rx_stats_noise_diff; + rx_status.noise = rx_status.signal - + iwl3945_calc_db_from_ratio(snr); + } else { + rx_status.noise = priv->last_rx_noise; + } + + + IWL_DEBUG_STATS(priv, "Rssi %d noise %d sig_avg %d noise_diff %d\n", + rx_status.signal, rx_status.noise, + rx_stats_sig_avg, rx_stats_noise_diff); + + header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt); + + network_packet = iwl3945_is_network_packet(priv, header); + + IWL_DEBUG_STATS_LIMIT(priv, "[%c] %d RSSI:%d Signal:%u, Noise:%u, Rate:%u\n", + network_packet ? '*' : ' ', + le16_to_cpu(rx_hdr->channel), + rx_status.signal, rx_status.signal, + rx_status.noise, rx_status.rate_idx); + + /* Set "1" to report good data frames in groups of 100 */ + iwl3945_dbg_report_frame(priv, pkt, header, 1); + iwl_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len), header); + + if (network_packet) { + priv->last_beacon_time = le32_to_cpu(rx_end->beacon_timestamp); + priv->last_tsf = le64_to_cpu(rx_end->timestamp); + priv->last_rx_rssi = rx_status.signal; + priv->last_rx_noise = rx_status.noise; + } + + iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status); +} + +int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + dma_addr_t addr, u16 len, u8 reset, u8 pad) +{ + int count; + struct iwl_queue *q; + struct iwl3945_tfd *tfd, *tfd_tmp; + + q = &txq->q; + tfd_tmp = (struct iwl3945_tfd *)txq->tfds; + tfd = &tfd_tmp[q->write_ptr]; + + if (reset) + memset(tfd, 0, sizeof(*tfd)); + + count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags)); + + if ((count >= NUM_TFD_CHUNKS) || (count < 0)) { + IWL_ERR(priv, "Error can not send more than %d chunks\n", + NUM_TFD_CHUNKS); + return -EINVAL; + } + + tfd->tbs[count].addr = cpu_to_le32(addr); + tfd->tbs[count].len = cpu_to_le32(len); + + count++; + + tfd->control_flags = cpu_to_le32(TFD_CTL_COUNT_SET(count) | + TFD_CTL_PAD_SET(pad)); + + return 0; +} + +/** + * iwl3945_hw_txq_free_tfd - Free one TFD, those at index [txq->q.read_ptr] + * + * Does NOT advance any indexes + */ +void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) +{ + struct iwl3945_tfd *tfd_tmp = (struct iwl3945_tfd *)txq->tfds; + int index = txq->q.read_ptr; + struct iwl3945_tfd *tfd = &tfd_tmp[index]; + struct pci_dev *dev = priv->pci_dev; + int i; + int counter; + + /* sanity check */ + counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags)); + if (counter > NUM_TFD_CHUNKS) { + IWL_ERR(priv, "Too many chunks: %i\n", counter); + /* @todo issue fatal error, it is quite serious situation */ + return; + } + + /* Unmap tx_cmd */ + if (counter) + pci_unmap_single(dev, + pci_unmap_addr(&txq->meta[index], mapping), + pci_unmap_len(&txq->meta[index], len), + PCI_DMA_TODEVICE); + + /* unmap chunks if any */ + + for (i = 1; i < counter; i++) { + pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr), + le32_to_cpu(tfd->tbs[i].len), PCI_DMA_TODEVICE); + if (txq->txb[txq->q.read_ptr].skb[0]) { + struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[0]; + if (txq->txb[txq->q.read_ptr].skb[0]) { + /* Can be called from interrupt context */ + dev_kfree_skb_any(skb); + txq->txb[txq->q.read_ptr].skb[0] = NULL; + } + } + } + return ; +} + +/** + * iwl3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD: + * +*/ +void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv, + struct iwl_device_cmd *cmd, + struct ieee80211_tx_info *info, + struct ieee80211_hdr *hdr, + int sta_id, int tx_id) +{ + u16 hw_value = ieee80211_get_tx_rate(priv->hw, info)->hw_value; + u16 rate_index = min(hw_value & 0xffff, IWL_RATE_COUNT - 1); + u16 rate_mask; + int rate; + u8 rts_retry_limit; + u8 data_retry_limit; + __le32 tx_flags; + __le16 fc = hdr->frame_control; + struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload; + + rate = iwl3945_rates[rate_index].plcp; + tx_flags = tx_cmd->tx_flags; + + /* We need to figure out how to get the sta->supp_rates while + * in this running context */ + rate_mask = IWL_RATES_MASK; + + + /* Set retry limit on DATA packets and Probe Responses*/ + if (ieee80211_is_probe_resp(fc)) + data_retry_limit = 3; + else + data_retry_limit = IWL_DEFAULT_TX_RETRY; + tx_cmd->data_retry_limit = data_retry_limit; + + if (tx_id >= IWL_CMD_QUEUE_NUM) + rts_retry_limit = 3; + else + rts_retry_limit = 7; + + if (data_retry_limit < rts_retry_limit) + rts_retry_limit = data_retry_limit; + tx_cmd->rts_retry_limit = rts_retry_limit; + + if (ieee80211_is_mgmt(fc)) { + switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { + case cpu_to_le16(IEEE80211_STYPE_AUTH): + case cpu_to_le16(IEEE80211_STYPE_DEAUTH): + case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): + case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): + if (tx_flags & TX_CMD_FLG_RTS_MSK) { + tx_flags &= ~TX_CMD_FLG_RTS_MSK; + tx_flags |= TX_CMD_FLG_CTS_MSK; + } + break; + default: + break; + } + } + + tx_cmd->rate = rate; + tx_cmd->tx_flags = tx_flags; + + /* OFDM */ + tx_cmd->supp_rates[0] = + ((rate_mask & IWL_OFDM_RATES_MASK) >> IWL_FIRST_OFDM_RATE) & 0xFF; + + /* CCK */ + tx_cmd->supp_rates[1] = (rate_mask & 0xF); + + IWL_DEBUG_RATE(priv, "Tx sta id: %d, rate: %d (plcp), flags: 0x%4X " + "cck/ofdm mask: 0x%x/0x%x\n", sta_id, + tx_cmd->rate, le32_to_cpu(tx_cmd->tx_flags), + tx_cmd->supp_rates[1], tx_cmd->supp_rates[0]); +} + +u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate, u8 flags) +{ + unsigned long flags_spin; + struct iwl_station_entry *station; + + if (sta_id == IWL_INVALID_STATION) + return IWL_INVALID_STATION; + + spin_lock_irqsave(&priv->sta_lock, flags_spin); + station = &priv->stations[sta_id]; + + station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK; + station->sta.rate_n_flags = cpu_to_le16(tx_rate); + station->sta.mode = STA_CONTROL_MODIFY_MSK; + + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + + iwl_send_add_sta(priv, &station->sta, flags); + IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n", + sta_id, tx_rate); + return sta_id; +} + +static int iwl3945_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src) +{ + if (src == IWL_PWR_SRC_VAUX) { + if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) { + iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_PWR_SRC_VAUX, + ~APMG_PS_CTRL_MSK_PWR_SRC); + + iwl_poll_bit(priv, CSR_GPIO_IN, + CSR_GPIO_IN_VAL_VAUX_PWR_SRC, + CSR_GPIO_IN_BIT_AUX_POWER, 5000); + } + } else { + iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, + ~APMG_PS_CTRL_MSK_PWR_SRC); + + iwl_poll_bit(priv, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC, + CSR_GPIO_IN_BIT_AUX_POWER, 5000); /* uS */ + } + + return 0; +} + +static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) +{ + iwl_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->dma_addr); + iwl_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma); + iwl_write_direct32(priv, FH39_RCSR_WPTR(0), 0); + iwl_write_direct32(priv, FH39_RCSR_CONFIG(0), + FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE | + FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE | + FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN | + FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 | + (RX_QUEUE_SIZE_LOG << FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE) | + FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST | + (1 << FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH) | + FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH); + + /* fake read to flush all prev I/O */ + iwl_read_direct32(priv, FH39_RSSR_CTRL); + + return 0; +} + +static int iwl3945_tx_reset(struct iwl_priv *priv) +{ + + /* bypass mode */ + iwl_write_prph(priv, ALM_SCD_MODE_REG, 0x2); + + /* RA 0 is active */ + iwl_write_prph(priv, ALM_SCD_ARASTAT_REG, 0x01); + + /* all 6 fifo are active */ + iwl_write_prph(priv, ALM_SCD_TXFACT_REG, 0x3f); + + iwl_write_prph(priv, ALM_SCD_SBYP_MODE_1_REG, 0x010000); + iwl_write_prph(priv, ALM_SCD_SBYP_MODE_2_REG, 0x030002); + iwl_write_prph(priv, ALM_SCD_TXF4MF_REG, 0x000004); + iwl_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005); + + iwl_write_direct32(priv, FH39_TSSR_CBB_BASE, + priv->shared_phys); + + iwl_write_direct32(priv, FH39_TSSR_MSG_CONFIG, + FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON | + FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON | + FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B | + FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON | + FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON | + FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH | + FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH); + + + return 0; +} + +/** + * iwl3945_txq_ctx_reset - Reset TX queue context + * + * Destroys all DMA structures and initialize them again + */ +static int iwl3945_txq_ctx_reset(struct iwl_priv *priv) +{ + int rc; + int txq_id, slots_num; + + iwl3945_hw_txq_ctx_free(priv); + + /* allocate tx queue structure */ + rc = iwl_alloc_txq_mem(priv); + if (rc) + return rc; + + /* Tx CMD queue */ + rc = iwl3945_tx_reset(priv); + if (rc) + goto error; + + /* Tx queue(s) */ + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { + slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ? + TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; + rc = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num, + txq_id); + if (rc) { + IWL_ERR(priv, "Tx %d queue init failed\n", txq_id); + goto error; + } + } + + return rc; + + error: + iwl3945_hw_txq_ctx_free(priv); + return rc; +} + + +/* + * Start up 3945's basic functionality after it has been reset + * (e.g. after platform boot, or shutdown via iwl_apm_stop()) + * NOTE: This does not load uCode nor start the embedded processor + */ +static int iwl3945_apm_init(struct iwl_priv *priv) +{ + int ret = iwl_apm_init(priv); + + /* Clear APMG (NIC's internal power management) interrupts */ + iwl_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0); + iwl_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF); + + /* Reset radio chip */ + iwl_set_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ); + udelay(5); + iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ); + + return ret; +} + +static void iwl3945_nic_config(struct iwl_priv *priv) +{ + struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; + unsigned long flags; + u8 rev_id = 0; + + spin_lock_irqsave(&priv->lock, flags); + + /* Determine HW type */ + pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &rev_id); + + IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id); + + if (rev_id & PCI_CFG_REV_ID_BIT_RTP) + IWL_DEBUG_INFO(priv, "RTP type \n"); + else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) { + IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n"); + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR39_HW_IF_CONFIG_REG_BIT_3945_MB); + } else { + IWL_DEBUG_INFO(priv, "3945 RADIO-MM type\n"); + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR39_HW_IF_CONFIG_REG_BIT_3945_MM); + } + + if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) { + IWL_DEBUG_INFO(priv, "SKU OP mode is mrc\n"); + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC); + } else + IWL_DEBUG_INFO(priv, "SKU OP mode is basic\n"); + + if ((eeprom->board_revision & 0xF0) == 0xD0) { + IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n", + eeprom->board_revision); + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE); + } else { + IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n", + eeprom->board_revision); + iwl_clear_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE); + } + + if (eeprom->almgor_m_version <= 1) { + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A); + IWL_DEBUG_INFO(priv, "Card M type A version is 0x%X\n", + eeprom->almgor_m_version); + } else { + IWL_DEBUG_INFO(priv, "Card M type B version is 0x%X\n", + eeprom->almgor_m_version); + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B); + } + spin_unlock_irqrestore(&priv->lock, flags); + + if (eeprom->sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE) + IWL_DEBUG_RF_KILL(priv, "SW RF KILL supported in EEPROM.\n"); + + if (eeprom->sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE) + IWL_DEBUG_RF_KILL(priv, "HW RF KILL supported in EEPROM.\n"); +} + +int iwl3945_hw_nic_init(struct iwl_priv *priv) +{ + int rc; + unsigned long flags; + struct iwl_rx_queue *rxq = &priv->rxq; + + spin_lock_irqsave(&priv->lock, flags); + priv->cfg->ops->lib->apm_ops.init(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + rc = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN); + if (rc) + return rc; + + priv->cfg->ops->lib->apm_ops.config(priv); + + /* Allocate the RX queue, or reset if it is already allocated */ + if (!rxq->bd) { + rc = iwl_rx_queue_alloc(priv); + if (rc) { + IWL_ERR(priv, "Unable to initialize Rx queue\n"); + return -ENOMEM; + } + } else + iwl3945_rx_queue_reset(priv, rxq); + + iwl3945_rx_replenish(priv); + + iwl3945_rx_init(priv, rxq); + + + /* Look at using this instead: + rxq->need_update = 1; + iwl_rx_queue_update_write_ptr(priv, rxq); + */ + + iwl_write_direct32(priv, FH39_RCSR_WPTR(0), rxq->write & ~7); + + rc = iwl3945_txq_ctx_reset(priv); + if (rc) + return rc; + + set_bit(STATUS_INIT, &priv->status); + + return 0; +} + +/** + * iwl3945_hw_txq_ctx_free - Free TXQ Context + * + * Destroy all TX DMA queues and structures + */ +void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv) +{ + int txq_id; + + /* Tx queues */ + if (priv->txq) + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; + txq_id++) + if (txq_id == IWL_CMD_QUEUE_NUM) + iwl_cmd_queue_free(priv); + else + iwl_tx_queue_free(priv, txq_id); + + /* free tx queue structure */ + iwl_free_txq_mem(priv); +} + +void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv) +{ + int txq_id; + + /* stop SCD */ + iwl_write_prph(priv, ALM_SCD_MODE_REG, 0); + iwl_write_prph(priv, ALM_SCD_TXFACT_REG, 0); + + /* reset TFD queues */ + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { + iwl_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0); + iwl_poll_direct_bit(priv, FH39_TSSR_TX_STATUS, + FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id), + 1000); + } + + iwl3945_hw_txq_ctx_free(priv); +} + +/** + * iwl3945_hw_reg_adjust_power_by_temp + * return index delta into power gain settings table +*/ +static int iwl3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading) +{ + return (new_reading - old_reading) * (-11) / 100; +} + +/** + * iwl3945_hw_reg_temp_out_of_range - Keep temperature in sane range + */ +static inline int iwl3945_hw_reg_temp_out_of_range(int temperature) +{ + return ((temperature < -260) || (temperature > 25)) ? 1 : 0; +} + +int iwl3945_hw_get_temperature(struct iwl_priv *priv) +{ + return iwl_read32(priv, CSR_UCODE_DRV_GP2); +} + +/** + * iwl3945_hw_reg_txpower_get_temperature + * get the current temperature by reading from NIC +*/ +static int iwl3945_hw_reg_txpower_get_temperature(struct iwl_priv *priv) +{ + struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; + int temperature; + + temperature = iwl3945_hw_get_temperature(priv); + + /* driver's okay range is -260 to +25. + * human readable okay range is 0 to +285 */ + IWL_DEBUG_INFO(priv, "Temperature: %d\n", temperature + IWL_TEMP_CONVERT); + + /* handle insane temp reading */ + if (iwl3945_hw_reg_temp_out_of_range(temperature)) { + IWL_ERR(priv, "Error bad temperature value %d\n", temperature); + + /* if really really hot(?), + * substitute the 3rd band/group's temp measured at factory */ + if (priv->last_temperature > 100) + temperature = eeprom->groups[2].temperature; + else /* else use most recent "sane" value from driver */ + temperature = priv->last_temperature; + } + + return temperature; /* raw, not "human readable" */ +} + +/* Adjust Txpower only if temperature variance is greater than threshold. + * + * Both are lower than older versions' 9 degrees */ +#define IWL_TEMPERATURE_LIMIT_TIMER 6 + +/** + * is_temp_calib_needed - determines if new calibration is needed + * + * records new temperature in tx_mgr->temperature. + * replaces tx_mgr->last_temperature *only* if calib needed + * (assumes caller will actually do the calibration!). */ +static int is_temp_calib_needed(struct iwl_priv *priv) +{ + int temp_diff; + + priv->temperature = iwl3945_hw_reg_txpower_get_temperature(priv); + temp_diff = priv->temperature - priv->last_temperature; + + /* get absolute value */ + if (temp_diff < 0) { + IWL_DEBUG_POWER(priv, "Getting cooler, delta %d,\n", temp_diff); + temp_diff = -temp_diff; + } else if (temp_diff == 0) + IWL_DEBUG_POWER(priv, "Same temp,\n"); + else + IWL_DEBUG_POWER(priv, "Getting warmer, delta %d,\n", temp_diff); + + /* if we don't need calibration, *don't* update last_temperature */ + if (temp_diff < IWL_TEMPERATURE_LIMIT_TIMER) { + IWL_DEBUG_POWER(priv, "Timed thermal calib not needed\n"); + return 0; + } + + IWL_DEBUG_POWER(priv, "Timed thermal calib needed\n"); + + /* assume that caller will actually do calib ... + * update the "last temperature" value */ + priv->last_temperature = priv->temperature; + return 1; +} + +#define IWL_MAX_GAIN_ENTRIES 78 +#define IWL_CCK_FROM_OFDM_POWER_DIFF -5 +#define IWL_CCK_FROM_OFDM_INDEX_DIFF (10) + +/* radio and DSP power table, each step is 1/2 dB. + * 1st number is for RF analog gain, 2nd number is for DSP pre-DAC gain. */ +static struct iwl3945_tx_power power_gain_table[2][IWL_MAX_GAIN_ENTRIES] = { + { + {251, 127}, /* 2.4 GHz, highest power */ + {251, 127}, + {251, 127}, + {251, 127}, + {251, 125}, + {251, 110}, + {251, 105}, + {251, 98}, + {187, 125}, + {187, 115}, + {187, 108}, + {187, 99}, + {243, 119}, + {243, 111}, + {243, 105}, + {243, 97}, + {243, 92}, + {211, 106}, + {211, 100}, + {179, 120}, + {179, 113}, + {179, 107}, + {147, 125}, + {147, 119}, + {147, 112}, + {147, 106}, + {147, 101}, + {147, 97}, + {147, 91}, + {115, 107}, + {235, 121}, + {235, 115}, + {235, 109}, + {203, 127}, + {203, 121}, + {203, 115}, + {203, 108}, + {203, 102}, + {203, 96}, + {203, 92}, + {171, 110}, + {171, 104}, + {171, 98}, + {139, 116}, + {227, 125}, + {227, 119}, + {227, 113}, + {227, 107}, + {227, 101}, + {227, 96}, + {195, 113}, + {195, 106}, + {195, 102}, + {195, 95}, + {163, 113}, + {163, 106}, + {163, 102}, + {163, 95}, + {131, 113}, + {131, 106}, + {131, 102}, + {131, 95}, + {99, 113}, + {99, 106}, + {99, 102}, + {99, 95}, + {67, 113}, + {67, 106}, + {67, 102}, + {67, 95}, + {35, 113}, + {35, 106}, + {35, 102}, + {35, 95}, + {3, 113}, + {3, 106}, + {3, 102}, + {3, 95} }, /* 2.4 GHz, lowest power */ + { + {251, 127}, /* 5.x GHz, highest power */ + {251, 120}, + {251, 114}, + {219, 119}, + {219, 101}, + {187, 113}, + {187, 102}, + {155, 114}, + {155, 103}, + {123, 117}, + {123, 107}, + {123, 99}, + {123, 92}, + {91, 108}, + {59, 125}, + {59, 118}, + {59, 109}, + {59, 102}, + {59, 96}, + {59, 90}, + {27, 104}, + {27, 98}, + {27, 92}, + {115, 118}, + {115, 111}, + {115, 104}, + {83, 126}, + {83, 121}, + {83, 113}, + {83, 105}, + {83, 99}, + {51, 118}, + {51, 111}, + {51, 104}, + {51, 98}, + {19, 116}, + {19, 109}, + {19, 102}, + {19, 98}, + {19, 93}, + {171, 113}, + {171, 107}, + {171, 99}, + {139, 120}, + {139, 113}, + {139, 107}, + {139, 99}, + {107, 120}, + {107, 113}, + {107, 107}, + {107, 99}, + {75, 120}, + {75, 113}, + {75, 107}, + {75, 99}, + {43, 120}, + {43, 113}, + {43, 107}, + {43, 99}, + {11, 120}, + {11, 113}, + {11, 107}, + {11, 99}, + {131, 107}, + {131, 99}, + {99, 120}, + {99, 113}, + {99, 107}, + {99, 99}, + {67, 120}, + {67, 113}, + {67, 107}, + {67, 99}, + {35, 120}, + {35, 113}, + {35, 107}, + {35, 99}, + {3, 120} } /* 5.x GHz, lowest power */ +}; + +static inline u8 iwl3945_hw_reg_fix_power_index(int index) +{ + if (index < 0) + return 0; + if (index >= IWL_MAX_GAIN_ENTRIES) + return IWL_MAX_GAIN_ENTRIES - 1; + return (u8) index; +} + +/* Kick off thermal recalibration check every 60 seconds */ +#define REG_RECALIB_PERIOD (60) + +/** + * iwl3945_hw_reg_set_scan_power - Set Tx power for scan probe requests + * + * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK) + * or 6 Mbit (OFDM) rates. + */ +static void iwl3945_hw_reg_set_scan_power(struct iwl_priv *priv, u32 scan_tbl_index, + s32 rate_index, const s8 *clip_pwrs, + struct iwl_channel_info *ch_info, + int band_index) +{ + struct iwl3945_scan_power_info *scan_power_info; + s8 power; + u8 power_index; + + scan_power_info = &ch_info->scan_pwr_info[scan_tbl_index]; + + /* use this channel group's 6Mbit clipping/saturation pwr, + * but cap at regulatory scan power restriction (set during init + * based on eeprom channel data) for this channel. */ + power = min(ch_info->scan_power, clip_pwrs[IWL_RATE_6M_INDEX_TABLE]); + + /* further limit to user's max power preference. + * FIXME: Other spectrum management power limitations do not + * seem to apply?? */ + power = min(power, priv->tx_power_user_lmt); + scan_power_info->requested_power = power; + + /* find difference between new scan *power* and current "normal" + * Tx *power* for 6Mb. Use this difference (x2) to adjust the + * current "normal" temperature-compensated Tx power *index* for + * this rate (1Mb or 6Mb) to yield new temp-compensated scan power + * *index*. */ + power_index = ch_info->power_info[rate_index].power_table_index + - (power - ch_info->power_info + [IWL_RATE_6M_INDEX_TABLE].requested_power) * 2; + + /* store reference index that we use when adjusting *all* scan + * powers. So we can accommodate user (all channel) or spectrum + * management (single channel) power changes "between" temperature + * feedback compensation procedures. + * don't force fit this reference index into gain table; it may be a + * negative number. This will help avoid errors when we're at + * the lower bounds (highest gains, for warmest temperatures) + * of the table. */ + + /* don't exceed table bounds for "real" setting */ + power_index = iwl3945_hw_reg_fix_power_index(power_index); + + scan_power_info->power_table_index = power_index; + scan_power_info->tpc.tx_gain = + power_gain_table[band_index][power_index].tx_gain; + scan_power_info->tpc.dsp_atten = + power_gain_table[band_index][power_index].dsp_atten; +} + +/** + * iwl3945_send_tx_power - fill in Tx Power command with gain settings + * + * Configures power settings for all rates for the current channel, + * using values from channel info struct, and send to NIC + */ +static int iwl3945_send_tx_power(struct iwl_priv *priv) +{ + int rate_idx, i; + const struct iwl_channel_info *ch_info = NULL; + struct iwl3945_txpowertable_cmd txpower = { + .channel = priv->active_rxon.channel, + }; + + txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1; + ch_info = iwl_get_channel_info(priv, + priv->band, + le16_to_cpu(priv->active_rxon.channel)); + if (!ch_info) { + IWL_ERR(priv, + "Failed to get channel info for channel %d [%d]\n", + le16_to_cpu(priv->active_rxon.channel), priv->band); + return -EINVAL; + } + + if (!is_channel_valid(ch_info)) { + IWL_DEBUG_POWER(priv, "Not calling TX_PWR_TABLE_CMD on " + "non-Tx channel.\n"); + return 0; + } + + /* fill cmd with power settings for all rates for current channel */ + /* Fill OFDM rate */ + for (rate_idx = IWL_FIRST_OFDM_RATE, i = 0; + rate_idx <= IWL39_LAST_OFDM_RATE; rate_idx++, i++) { + + txpower.power[i].tpc = ch_info->power_info[i].tpc; + txpower.power[i].rate = iwl3945_rates[rate_idx].plcp; + + IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n", + le16_to_cpu(txpower.channel), + txpower.band, + txpower.power[i].tpc.tx_gain, + txpower.power[i].tpc.dsp_atten, + txpower.power[i].rate); + } + /* Fill CCK rates */ + for (rate_idx = IWL_FIRST_CCK_RATE; + rate_idx <= IWL_LAST_CCK_RATE; rate_idx++, i++) { + txpower.power[i].tpc = ch_info->power_info[i].tpc; + txpower.power[i].rate = iwl3945_rates[rate_idx].plcp; + + IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n", + le16_to_cpu(txpower.channel), + txpower.band, + txpower.power[i].tpc.tx_gain, + txpower.power[i].tpc.dsp_atten, + txpower.power[i].rate); + } + + return iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, + sizeof(struct iwl3945_txpowertable_cmd), + &txpower); + +} + +/** + * iwl3945_hw_reg_set_new_power - Configures power tables at new levels + * @ch_info: Channel to update. Uses power_info.requested_power. + * + * Replace requested_power and base_power_index ch_info fields for + * one channel. + * + * Called if user or spectrum management changes power preferences. + * Takes into account h/w and modulation limitations (clip power). + * + * This does *not* send anything to NIC, just sets up ch_info for one channel. + * + * NOTE: reg_compensate_for_temperature_dif() *must* be run after this to + * properly fill out the scan powers, and actual h/w gain settings, + * and send changes to NIC + */ +static int iwl3945_hw_reg_set_new_power(struct iwl_priv *priv, + struct iwl_channel_info *ch_info) +{ + struct iwl3945_channel_power_info *power_info; + int power_changed = 0; + int i; + const s8 *clip_pwrs; + int power; + + /* Get this chnlgrp's rate-to-max/clip-powers table */ + clip_pwrs = priv->clip39_groups[ch_info->group_index].clip_powers; + + /* Get this channel's rate-to-current-power settings table */ + power_info = ch_info->power_info; + + /* update OFDM Txpower settings */ + for (i = IWL_RATE_6M_INDEX_TABLE; i <= IWL_RATE_54M_INDEX_TABLE; + i++, ++power_info) { + int delta_idx; + + /* limit new power to be no more than h/w capability */ + power = min(ch_info->curr_txpow, clip_pwrs[i]); + if (power == power_info->requested_power) + continue; + + /* find difference between old and new requested powers, + * update base (non-temp-compensated) power index */ + delta_idx = (power - power_info->requested_power) * 2; + power_info->base_power_index -= delta_idx; + + /* save new requested power value */ + power_info->requested_power = power; + + power_changed = 1; + } + + /* update CCK Txpower settings, based on OFDM 12M setting ... + * ... all CCK power settings for a given channel are the *same*. */ + if (power_changed) { + power = + ch_info->power_info[IWL_RATE_12M_INDEX_TABLE]. + requested_power + IWL_CCK_FROM_OFDM_POWER_DIFF; + + /* do all CCK rates' iwl3945_channel_power_info structures */ + for (i = IWL_RATE_1M_INDEX_TABLE; i <= IWL_RATE_11M_INDEX_TABLE; i++) { + power_info->requested_power = power; + power_info->base_power_index = + ch_info->power_info[IWL_RATE_12M_INDEX_TABLE]. + base_power_index + IWL_CCK_FROM_OFDM_INDEX_DIFF; + ++power_info; + } + } + + return 0; +} + +/** + * iwl3945_hw_reg_get_ch_txpower_limit - returns new power limit for channel + * + * NOTE: Returned power limit may be less (but not more) than requested, + * based strictly on regulatory (eeprom and spectrum mgt) limitations + * (no consideration for h/w clipping limitations). + */ +static int iwl3945_hw_reg_get_ch_txpower_limit(struct iwl_channel_info *ch_info) +{ + s8 max_power; + +#if 0 + /* if we're using TGd limits, use lower of TGd or EEPROM */ + if (ch_info->tgd_data.max_power != 0) + max_power = min(ch_info->tgd_data.max_power, + ch_info->eeprom.max_power_avg); + + /* else just use EEPROM limits */ + else +#endif + max_power = ch_info->eeprom.max_power_avg; + + return min(max_power, ch_info->max_power_avg); +} + +/** + * iwl3945_hw_reg_comp_txpower_temp - Compensate for temperature + * + * Compensate txpower settings of *all* channels for temperature. + * This only accounts for the difference between current temperature + * and the factory calibration temperatures, and bases the new settings + * on the channel's base_power_index. + * + * If RxOn is "associated", this sends the new Txpower to NIC! + */ +static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv) +{ + struct iwl_channel_info *ch_info = NULL; + struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; + int delta_index; + const s8 *clip_pwrs; /* array of h/w max power levels for each rate */ + u8 a_band; + u8 rate_index; + u8 scan_tbl_index; + u8 i; + int ref_temp; + int temperature = priv->temperature; + + /* set up new Tx power info for each and every channel, 2.4 and 5.x */ + for (i = 0; i < priv->channel_count; i++) { + ch_info = &priv->channel_info[i]; + a_band = is_channel_a_band(ch_info); + + /* Get this chnlgrp's factory calibration temperature */ + ref_temp = (s16)eeprom->groups[ch_info->group_index]. + temperature; + + /* get power index adjustment based on current and factory + * temps */ + delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature, + ref_temp); + + /* set tx power value for all rates, OFDM and CCK */ + for (rate_index = 0; rate_index < IWL_RATE_COUNT; + rate_index++) { + int power_idx = + ch_info->power_info[rate_index].base_power_index; + + /* temperature compensate */ + power_idx += delta_index; + + /* stay within table range */ + power_idx = iwl3945_hw_reg_fix_power_index(power_idx); + ch_info->power_info[rate_index]. + power_table_index = (u8) power_idx; + ch_info->power_info[rate_index].tpc = + power_gain_table[a_band][power_idx]; + } + + /* Get this chnlgrp's rate-to-max/clip-powers table */ + clip_pwrs = priv->clip39_groups[ch_info->group_index].clip_powers; + + /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */ + for (scan_tbl_index = 0; + scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) { + s32 actual_index = (scan_tbl_index == 0) ? + IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE; + iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index, + actual_index, clip_pwrs, + ch_info, a_band); + } + } + + /* send Txpower command for current channel to ucode */ + return priv->cfg->ops->lib->send_tx_power(priv); +} + +int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power) +{ + struct iwl_channel_info *ch_info; + s8 max_power; + u8 a_band; + u8 i; + + if (priv->tx_power_user_lmt == power) { + IWL_DEBUG_POWER(priv, "Requested Tx power same as current " + "limit: %ddBm.\n", power); + return 0; + } + + IWL_DEBUG_POWER(priv, "Setting upper limit clamp to %ddBm.\n", power); + priv->tx_power_user_lmt = power; + + /* set up new Tx powers for each and every channel, 2.4 and 5.x */ + + for (i = 0; i < priv->channel_count; i++) { + ch_info = &priv->channel_info[i]; + a_band = is_channel_a_band(ch_info); + + /* find minimum power of all user and regulatory constraints + * (does not consider h/w clipping limitations) */ + max_power = iwl3945_hw_reg_get_ch_txpower_limit(ch_info); + max_power = min(power, max_power); + if (max_power != ch_info->curr_txpow) { + ch_info->curr_txpow = max_power; + + /* this considers the h/w clipping limitations */ + iwl3945_hw_reg_set_new_power(priv, ch_info); + } + } + + /* update txpower settings for all channels, + * send to NIC if associated. */ + is_temp_calib_needed(priv); + iwl3945_hw_reg_comp_txpower_temp(priv); + + return 0; +} + +static int iwl3945_send_rxon_assoc(struct iwl_priv *priv) +{ + int rc = 0; + struct iwl_rx_packet *pkt; + struct iwl3945_rxon_assoc_cmd rxon_assoc; + struct iwl_host_cmd cmd = { + .id = REPLY_RXON_ASSOC, + .len = sizeof(rxon_assoc), + .flags = CMD_WANT_SKB, + .data = &rxon_assoc, + }; + const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon; + const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon; + + if ((rxon1->flags == rxon2->flags) && + (rxon1->filter_flags == rxon2->filter_flags) && + (rxon1->cck_basic_rates == rxon2->cck_basic_rates) && + (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) { + IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n"); + return 0; + } + + rxon_assoc.flags = priv->staging_rxon.flags; + rxon_assoc.filter_flags = priv->staging_rxon.filter_flags; + rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates; + rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates; + rxon_assoc.reserved = 0; + + rc = iwl_send_cmd_sync(priv, &cmd); + if (rc) + return rc; + + pkt = (struct iwl_rx_packet *)cmd.reply_page; + if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERR(priv, "Bad return from REPLY_RXON_ASSOC command\n"); + rc = -EIO; + } + + iwl_free_pages(priv, cmd.reply_page); + + return rc; +} + +/** + * iwl3945_commit_rxon - commit staging_rxon to hardware + * + * The RXON command in staging_rxon is committed to the hardware and + * the active_rxon structure is updated with the new data. This + * function correctly transitions out of the RXON_ASSOC_MSK state if + * a HW tune is required based on the RXON structure changes. + */ +static int iwl3945_commit_rxon(struct iwl_priv *priv) +{ + /* cast away the const for active_rxon in this function */ + struct iwl3945_rxon_cmd *active_rxon = (void *)&priv->active_rxon; + struct iwl3945_rxon_cmd *staging_rxon = (void *)&priv->staging_rxon; + int rc = 0; + bool new_assoc = + !!(priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK); + + if (!iwl_is_alive(priv)) + return -1; + + /* always get timestamp with Rx frame */ + staging_rxon->flags |= RXON_FLG_TSF2HOST_MSK; + + /* select antenna */ + staging_rxon->flags &= + ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK); + staging_rxon->flags |= iwl3945_get_antenna_flags(priv); + + rc = iwl_check_rxon_cmd(priv); + if (rc) { + IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n"); + return -EINVAL; + } + + /* If we don't need to send a full RXON, we can use + * iwl3945_rxon_assoc_cmd which is used to reconfigure filter + * and other flags for the current radio configuration. */ + if (!iwl_full_rxon_required(priv)) { + rc = iwl_send_rxon_assoc(priv); + if (rc) { + IWL_ERR(priv, "Error setting RXON_ASSOC " + "configuration (%d).\n", rc); + return rc; + } + + memcpy(active_rxon, staging_rxon, sizeof(*active_rxon)); + + return 0; + } + + /* If we are currently associated and the new config requires + * an RXON_ASSOC and the new config wants the associated mask enabled, + * we must clear the associated from the active configuration + * before we apply the new config */ + if (iwl_is_associated(priv) && new_assoc) { + IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n"); + active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; + + /* + * reserved4 and 5 could have been filled by the iwlcore code. + * Let's clear them before pushing to the 3945. + */ + active_rxon->reserved4 = 0; + active_rxon->reserved5 = 0; + rc = iwl_send_cmd_pdu(priv, REPLY_RXON, + sizeof(struct iwl3945_rxon_cmd), + &priv->active_rxon); + + /* If the mask clearing failed then we set + * active_rxon back to what it was previously */ + if (rc) { + active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK; + IWL_ERR(priv, "Error clearing ASSOC_MSK on current " + "configuration (%d).\n", rc); + return rc; + } + } + + IWL_DEBUG_INFO(priv, "Sending RXON\n" + "* with%s RXON_FILTER_ASSOC_MSK\n" + "* channel = %d\n" + "* bssid = %pM\n", + (new_assoc ? "" : "out"), + le16_to_cpu(staging_rxon->channel), + staging_rxon->bssid_addr); + + /* + * reserved4 and 5 could have been filled by the iwlcore code. + * Let's clear them before pushing to the 3945. + */ + staging_rxon->reserved4 = 0; + staging_rxon->reserved5 = 0; + + iwl_set_rxon_hwcrypto(priv, !iwl3945_mod_params.sw_crypto); + + /* Apply the new configuration */ + rc = iwl_send_cmd_pdu(priv, REPLY_RXON, + sizeof(struct iwl3945_rxon_cmd), + staging_rxon); + if (rc) { + IWL_ERR(priv, "Error setting new configuration (%d).\n", rc); + return rc; + } + + memcpy(active_rxon, staging_rxon, sizeof(*active_rxon)); + + iwl_clear_stations_table(priv); + + /* If we issue a new RXON command which required a tune then we must + * send a new TXPOWER command or we won't be able to Tx any frames */ + rc = priv->cfg->ops->lib->send_tx_power(priv); + if (rc) { + IWL_ERR(priv, "Error setting Tx power (%d).\n", rc); + return rc; + } + + /* Add the broadcast address so we can send broadcast frames */ + priv->cfg->ops->lib->add_bcast_station(priv); + + /* If we have set the ASSOC_MSK and we are in BSS mode then + * add the IWL_AP_ID to the station rate table */ + if (iwl_is_associated(priv) && + (priv->iw_mode == NL80211_IFTYPE_STATION)) + if (iwl_add_station(priv, priv->active_rxon.bssid_addr, + true, CMD_SYNC, NULL) == IWL_INVALID_STATION) { + IWL_ERR(priv, "Error adding AP address for transmit\n"); + return -EIO; + } + + /* Init the hardware's rate fallback order based on the band */ + rc = iwl3945_init_hw_rate_table(priv); + if (rc) { + IWL_ERR(priv, "Error setting HW rate table: %02X\n", rc); + return -EIO; + } + + return 0; +} + +/** + * iwl3945_reg_txpower_periodic - called when time to check our temperature. + * + * -- reset periodic timer + * -- see if temp has changed enough to warrant re-calibration ... if so: + * -- correct coeffs for temp (can reset temp timer) + * -- save this temp as "last", + * -- send new set of gain settings to NIC + * NOTE: This should continue working, even when we're not associated, + * so we can keep our internal table of scan powers current. */ +void iwl3945_reg_txpower_periodic(struct iwl_priv *priv) +{ + /* This will kick in the "brute force" + * iwl3945_hw_reg_comp_txpower_temp() below */ + if (!is_temp_calib_needed(priv)) + goto reschedule; + + /* Set up a new set of temp-adjusted TxPowers, send to NIC. + * This is based *only* on current temperature, + * ignoring any previous power measurements */ + iwl3945_hw_reg_comp_txpower_temp(priv); + + reschedule: + queue_delayed_work(priv->workqueue, + &priv->thermal_periodic, REG_RECALIB_PERIOD * HZ); +} + +static void iwl3945_bg_reg_txpower_periodic(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, + thermal_periodic.work); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + iwl3945_reg_txpower_periodic(priv); + mutex_unlock(&priv->mutex); +} + +/** + * iwl3945_hw_reg_get_ch_grp_index - find the channel-group index (0-4) + * for the channel. + * + * This function is used when initializing channel-info structs. + * + * NOTE: These channel groups do *NOT* match the bands above! + * These channel groups are based on factory-tested channels; + * on A-band, EEPROM's "group frequency" entries represent the top + * channel in each group 1-4. Group 5 All B/G channels are in group 0. + */ +static u16 iwl3945_hw_reg_get_ch_grp_index(struct iwl_priv *priv, + const struct iwl_channel_info *ch_info) +{ + struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; + struct iwl3945_eeprom_txpower_group *ch_grp = &eeprom->groups[0]; + u8 group; + u16 group_index = 0; /* based on factory calib frequencies */ + u8 grp_channel; + + /* Find the group index for the channel ... don't use index 1(?) */ + if (is_channel_a_band(ch_info)) { + for (group = 1; group < 5; group++) { + grp_channel = ch_grp[group].group_channel; + if (ch_info->channel <= grp_channel) { + group_index = group; + break; + } + } + /* group 4 has a few channels *above* its factory cal freq */ + if (group == 5) + group_index = 4; + } else + group_index = 0; /* 2.4 GHz, group 0 */ + + IWL_DEBUG_POWER(priv, "Chnl %d mapped to grp %d\n", ch_info->channel, + group_index); + return group_index; +} + +/** + * iwl3945_hw_reg_get_matched_power_index - Interpolate to get nominal index + * + * Interpolate to get nominal (i.e. at factory calibration temperature) index + * into radio/DSP gain settings table for requested power. + */ +static int iwl3945_hw_reg_get_matched_power_index(struct iwl_priv *priv, + s8 requested_power, + s32 setting_index, s32 *new_index) +{ + const struct iwl3945_eeprom_txpower_group *chnl_grp = NULL; + struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; + s32 index0, index1; + s32 power = 2 * requested_power; + s32 i; + const struct iwl3945_eeprom_txpower_sample *samples; + s32 gains0, gains1; + s32 res; + s32 denominator; + + chnl_grp = &eeprom->groups[setting_index]; + samples = chnl_grp->samples; + for (i = 0; i < 5; i++) { + if (power == samples[i].power) { + *new_index = samples[i].gain_index; + return 0; + } + } + + if (power > samples[1].power) { + index0 = 0; + index1 = 1; + } else if (power > samples[2].power) { + index0 = 1; + index1 = 2; + } else if (power > samples[3].power) { + index0 = 2; + index1 = 3; + } else { + index0 = 3; + index1 = 4; + } + + denominator = (s32) samples[index1].power - (s32) samples[index0].power; + if (denominator == 0) + return -EINVAL; + gains0 = (s32) samples[index0].gain_index * (1 << 19); + gains1 = (s32) samples[index1].gain_index * (1 << 19); + res = gains0 + (gains1 - gains0) * + ((s32) power - (s32) samples[index0].power) / denominator + + (1 << 18); + *new_index = res >> 19; + return 0; +} + +static void iwl3945_hw_reg_init_channel_groups(struct iwl_priv *priv) +{ + u32 i; + s32 rate_index; + struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; + const struct iwl3945_eeprom_txpower_group *group; + + IWL_DEBUG_POWER(priv, "Initializing factory calib info from EEPROM\n"); + + for (i = 0; i < IWL_NUM_TX_CALIB_GROUPS; i++) { + s8 *clip_pwrs; /* table of power levels for each rate */ + s8 satur_pwr; /* saturation power for each chnl group */ + group = &eeprom->groups[i]; + + /* sanity check on factory saturation power value */ + if (group->saturation_power < 40) { + IWL_WARN(priv, "Error: saturation power is %d, " + "less than minimum expected 40\n", + group->saturation_power); + return; + } + + /* + * Derive requested power levels for each rate, based on + * hardware capabilities (saturation power for band). + * Basic value is 3dB down from saturation, with further + * power reductions for highest 3 data rates. These + * backoffs provide headroom for high rate modulation + * power peaks, without too much distortion (clipping). + */ + /* we'll fill in this array with h/w max power levels */ + clip_pwrs = (s8 *) priv->clip39_groups[i].clip_powers; + + /* divide factory saturation power by 2 to find -3dB level */ + satur_pwr = (s8) (group->saturation_power >> 1); + + /* fill in channel group's nominal powers for each rate */ + for (rate_index = 0; + rate_index < IWL_RATE_COUNT; rate_index++, clip_pwrs++) { + switch (rate_index) { + case IWL_RATE_36M_INDEX_TABLE: + if (i == 0) /* B/G */ + *clip_pwrs = satur_pwr; + else /* A */ + *clip_pwrs = satur_pwr - 5; + break; + case IWL_RATE_48M_INDEX_TABLE: + if (i == 0) + *clip_pwrs = satur_pwr - 7; + else + *clip_pwrs = satur_pwr - 10; + break; + case IWL_RATE_54M_INDEX_TABLE: + if (i == 0) + *clip_pwrs = satur_pwr - 9; + else + *clip_pwrs = satur_pwr - 12; + break; + default: + *clip_pwrs = satur_pwr; + break; + } + } + } +} + +/** + * iwl3945_txpower_set_from_eeprom - Set channel power info based on EEPROM + * + * Second pass (during init) to set up priv->channel_info + * + * Set up Tx-power settings in our channel info database for each VALID + * (for this geo/SKU) channel, at all Tx data rates, based on eeprom values + * and current temperature. + * + * Since this is based on current temperature (at init time), these values may + * not be valid for very long, but it gives us a starting/default point, + * and allows us to active (i.e. using Tx) scan. + * + * This does *not* write values to NIC, just sets up our internal table. + */ +int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv) +{ + struct iwl_channel_info *ch_info = NULL; + struct iwl3945_channel_power_info *pwr_info; + struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; + int delta_index; + u8 rate_index; + u8 scan_tbl_index; + const s8 *clip_pwrs; /* array of power levels for each rate */ + u8 gain, dsp_atten; + s8 power; + u8 pwr_index, base_pwr_index, a_band; + u8 i; + int temperature; + + /* save temperature reference, + * so we can determine next time to calibrate */ + temperature = iwl3945_hw_reg_txpower_get_temperature(priv); + priv->last_temperature = temperature; + + iwl3945_hw_reg_init_channel_groups(priv); + + /* initialize Tx power info for each and every channel, 2.4 and 5.x */ + for (i = 0, ch_info = priv->channel_info; i < priv->channel_count; + i++, ch_info++) { + a_band = is_channel_a_band(ch_info); + if (!is_channel_valid(ch_info)) + continue; + + /* find this channel's channel group (*not* "band") index */ + ch_info->group_index = + iwl3945_hw_reg_get_ch_grp_index(priv, ch_info); + + /* Get this chnlgrp's rate->max/clip-powers table */ + clip_pwrs = priv->clip39_groups[ch_info->group_index].clip_powers; + + /* calculate power index *adjustment* value according to + * diff between current temperature and factory temperature */ + delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature, + eeprom->groups[ch_info->group_index]. + temperature); + + IWL_DEBUG_POWER(priv, "Delta index for channel %d: %d [%d]\n", + ch_info->channel, delta_index, temperature + + IWL_TEMP_CONVERT); + + /* set tx power value for all OFDM rates */ + for (rate_index = 0; rate_index < IWL_OFDM_RATES; + rate_index++) { + s32 uninitialized_var(power_idx); + int rc; + + /* use channel group's clip-power table, + * but don't exceed channel's max power */ + s8 pwr = min(ch_info->max_power_avg, + clip_pwrs[rate_index]); + + pwr_info = &ch_info->power_info[rate_index]; + + /* get base (i.e. at factory-measured temperature) + * power table index for this rate's power */ + rc = iwl3945_hw_reg_get_matched_power_index(priv, pwr, + ch_info->group_index, + &power_idx); + if (rc) { + IWL_ERR(priv, "Invalid power index\n"); + return rc; + } + pwr_info->base_power_index = (u8) power_idx; + + /* temperature compensate */ + power_idx += delta_index; + + /* stay within range of gain table */ + power_idx = iwl3945_hw_reg_fix_power_index(power_idx); + + /* fill 1 OFDM rate's iwl3945_channel_power_info struct */ + pwr_info->requested_power = pwr; + pwr_info->power_table_index = (u8) power_idx; + pwr_info->tpc.tx_gain = + power_gain_table[a_band][power_idx].tx_gain; + pwr_info->tpc.dsp_atten = + power_gain_table[a_band][power_idx].dsp_atten; + } + + /* set tx power for CCK rates, based on OFDM 12 Mbit settings*/ + pwr_info = &ch_info->power_info[IWL_RATE_12M_INDEX_TABLE]; + power = pwr_info->requested_power + + IWL_CCK_FROM_OFDM_POWER_DIFF; + pwr_index = pwr_info->power_table_index + + IWL_CCK_FROM_OFDM_INDEX_DIFF; + base_pwr_index = pwr_info->base_power_index + + IWL_CCK_FROM_OFDM_INDEX_DIFF; + + /* stay within table range */ + pwr_index = iwl3945_hw_reg_fix_power_index(pwr_index); + gain = power_gain_table[a_band][pwr_index].tx_gain; + dsp_atten = power_gain_table[a_band][pwr_index].dsp_atten; + + /* fill each CCK rate's iwl3945_channel_power_info structure + * NOTE: All CCK-rate Txpwrs are the same for a given chnl! + * NOTE: CCK rates start at end of OFDM rates! */ + for (rate_index = 0; + rate_index < IWL_CCK_RATES; rate_index++) { + pwr_info = &ch_info->power_info[rate_index+IWL_OFDM_RATES]; + pwr_info->requested_power = power; + pwr_info->power_table_index = pwr_index; + pwr_info->base_power_index = base_pwr_index; + pwr_info->tpc.tx_gain = gain; + pwr_info->tpc.dsp_atten = dsp_atten; + } + + /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */ + for (scan_tbl_index = 0; + scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) { + s32 actual_index = (scan_tbl_index == 0) ? + IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE; + iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index, + actual_index, clip_pwrs, ch_info, a_band); + } + } + + return 0; +} + +int iwl3945_hw_rxq_stop(struct iwl_priv *priv) +{ + int rc; + + iwl_write_direct32(priv, FH39_RCSR_CONFIG(0), 0); + rc = iwl_poll_direct_bit(priv, FH39_RSSR_STATUS, + FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); + if (rc < 0) + IWL_ERR(priv, "Can't stop Rx DMA.\n"); + + return 0; +} + +int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq) +{ + int txq_id = txq->q.id; + + struct iwl3945_shared *shared_data = priv->shared_virt; + + shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr); + + iwl_write_direct32(priv, FH39_CBCC_CTRL(txq_id), 0); + iwl_write_direct32(priv, FH39_CBCC_BASE(txq_id), 0); + + iwl_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), + FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT | + FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF | + FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD | + FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL | + FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE); + + /* fake read to flush all prev. writes */ + iwl_read32(priv, FH39_TSSR_CBB_BASE); + + return 0; +} + +/* + * HCMD utils + */ +static u16 iwl3945_get_hcmd_size(u8 cmd_id, u16 len) +{ + switch (cmd_id) { + case REPLY_RXON: + return sizeof(struct iwl3945_rxon_cmd); + case POWER_TABLE_CMD: + return sizeof(struct iwl3945_powertable_cmd); + default: + return len; + } +} + + +static u16 iwl3945_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data) +{ + struct iwl3945_addsta_cmd *addsta = (struct iwl3945_addsta_cmd *)data; + addsta->mode = cmd->mode; + memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify)); + memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo)); + addsta->station_flags = cmd->station_flags; + addsta->station_flags_msk = cmd->station_flags_msk; + addsta->tid_disable_tx = cpu_to_le16(0); + addsta->rate_n_flags = cmd->rate_n_flags; + addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid; + addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid; + addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn; + + return (u16)sizeof(struct iwl3945_addsta_cmd); +} + + +/** + * iwl3945_init_hw_rate_table - Initialize the hardware rate fallback table + */ +int iwl3945_init_hw_rate_table(struct iwl_priv *priv) +{ + int rc, i, index, prev_index; + struct iwl3945_rate_scaling_cmd rate_cmd = { + .reserved = {0, 0, 0}, + }; + struct iwl3945_rate_scaling_info *table = rate_cmd.table; + + for (i = 0; i < ARRAY_SIZE(iwl3945_rates); i++) { + index = iwl3945_rates[i].table_rs_index; + + table[index].rate_n_flags = + iwl3945_hw_set_rate_n_flags(iwl3945_rates[i].plcp, 0); + table[index].try_cnt = priv->retry_rate; + prev_index = iwl3945_get_prev_ieee_rate(i); + table[index].next_rate_index = + iwl3945_rates[prev_index].table_rs_index; + } + + switch (priv->band) { + case IEEE80211_BAND_5GHZ: + IWL_DEBUG_RATE(priv, "Select A mode rate scale\n"); + /* If one of the following CCK rates is used, + * have it fall back to the 6M OFDM rate */ + for (i = IWL_RATE_1M_INDEX_TABLE; + i <= IWL_RATE_11M_INDEX_TABLE; i++) + table[i].next_rate_index = + iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index; + + /* Don't fall back to CCK rates */ + table[IWL_RATE_12M_INDEX_TABLE].next_rate_index = + IWL_RATE_9M_INDEX_TABLE; + + /* Don't drop out of OFDM rates */ + table[IWL_RATE_6M_INDEX_TABLE].next_rate_index = + iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index; + break; + + case IEEE80211_BAND_2GHZ: + IWL_DEBUG_RATE(priv, "Select B/G mode rate scale\n"); + /* If an OFDM rate is used, have it fall back to the + * 1M CCK rates */ + + if (!(priv->sta_supp_rates & IWL_OFDM_RATES_MASK) && + iwl_is_associated(priv)) { + + index = IWL_FIRST_CCK_RATE; + for (i = IWL_RATE_6M_INDEX_TABLE; + i <= IWL_RATE_54M_INDEX_TABLE; i++) + table[i].next_rate_index = + iwl3945_rates[index].table_rs_index; + + index = IWL_RATE_11M_INDEX_TABLE; + /* CCK shouldn't fall back to OFDM... */ + table[index].next_rate_index = IWL_RATE_5M_INDEX_TABLE; + } + break; + + default: + WARN_ON(1); + break; + } + + /* Update the rate scaling for control frame Tx */ + rate_cmd.table_id = 0; + rc = iwl_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd), + &rate_cmd); + if (rc) + return rc; + + /* Update the rate scaling for data frame Tx */ + rate_cmd.table_id = 1; + return iwl_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd), + &rate_cmd); +} + +/* Called when initializing driver */ +int iwl3945_hw_set_hw_params(struct iwl_priv *priv) +{ + memset((void *)&priv->hw_params, 0, + sizeof(struct iwl_hw_params)); + + priv->shared_virt = dma_alloc_coherent(&priv->pci_dev->dev, + sizeof(struct iwl3945_shared), + &priv->shared_phys, GFP_KERNEL); + if (!priv->shared_virt) { + IWL_ERR(priv, "failed to allocate pci memory\n"); + mutex_unlock(&priv->mutex); + return -ENOMEM; + } + + /* Assign number of Usable TX queues */ + priv->hw_params.max_txq_num = priv->cfg->num_of_queues; + + priv->hw_params.tfd_size = sizeof(struct iwl3945_tfd); + priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_3K); + priv->hw_params.max_rxq_size = RX_QUEUE_SIZE; + priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG; + priv->hw_params.max_stations = IWL3945_STATION_COUNT; + priv->hw_params.bcast_sta_id = IWL3945_BROADCAST_ID; + + priv->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR; + priv->hw_params.max_beacon_itrvl = IWL39_MAX_UCODE_BEACON_INTERVAL; + + return 0; +} + +unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv, + struct iwl3945_frame *frame, u8 rate) +{ + struct iwl3945_tx_beacon_cmd *tx_beacon_cmd; + unsigned int frame_size; + + tx_beacon_cmd = (struct iwl3945_tx_beacon_cmd *)&frame->u; + memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd)); + + tx_beacon_cmd->tx.sta_id = priv->hw_params.bcast_sta_id; + tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + + frame_size = iwl3945_fill_beacon_frame(priv, + tx_beacon_cmd->frame, + sizeof(frame->u) - sizeof(*tx_beacon_cmd)); + + BUG_ON(frame_size > MAX_MPDU_SIZE); + tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size); + + tx_beacon_cmd->tx.rate = rate; + tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK | + TX_CMD_FLG_TSF_MSK); + + /* supp_rates[0] == OFDM start at IWL_FIRST_OFDM_RATE*/ + tx_beacon_cmd->tx.supp_rates[0] = + (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; + + tx_beacon_cmd->tx.supp_rates[1] = + (IWL_CCK_BASIC_RATES_MASK & 0xF); + + return sizeof(struct iwl3945_tx_beacon_cmd) + frame_size; +} + +void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv) +{ + priv->rx_handlers[REPLY_TX] = iwl3945_rx_reply_tx; + priv->rx_handlers[REPLY_3945_RX] = iwl3945_rx_reply_rx; +} + +void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv) +{ + INIT_DELAYED_WORK(&priv->thermal_periodic, + iwl3945_bg_reg_txpower_periodic); +} + +void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv) +{ + cancel_delayed_work(&priv->thermal_periodic); +} + +/* check contents of special bootstrap uCode SRAM */ +static int iwl3945_verify_bsm(struct iwl_priv *priv) + { + __le32 *image = priv->ucode_boot.v_addr; + u32 len = priv->ucode_boot.len; + u32 reg; + u32 val; + + IWL_DEBUG_INFO(priv, "Begin verify bsm\n"); + + /* verify BSM SRAM contents */ + val = iwl_read_prph(priv, BSM_WR_DWCOUNT_REG); + for (reg = BSM_SRAM_LOWER_BOUND; + reg < BSM_SRAM_LOWER_BOUND + len; + reg += sizeof(u32), image++) { + val = iwl_read_prph(priv, reg); + if (val != le32_to_cpu(*image)) { + IWL_ERR(priv, "BSM uCode verification failed at " + "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n", + BSM_SRAM_LOWER_BOUND, + reg - BSM_SRAM_LOWER_BOUND, len, + val, le32_to_cpu(*image)); + return -EIO; + } + } + + IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n"); + + return 0; +} + + +/****************************************************************************** + * + * EEPROM related functions + * + ******************************************************************************/ + +/* + * Clear the OWNER_MSK, to establish driver (instead of uCode running on + * embedded controller) as EEPROM reader; each read is a series of pulses + * to/from the EEPROM chip, not a single event, so even reads could conflict + * if they weren't arbitrated by some ownership mechanism. Here, the driver + * simply claims ownership, which should be safe when this function is called + * (i.e. before loading uCode!). + */ +static int iwl3945_eeprom_acquire_semaphore(struct iwl_priv *priv) +{ + _iwl_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK); + return 0; +} + + +static void iwl3945_eeprom_release_semaphore(struct iwl_priv *priv) +{ + return; +} + + /** + * iwl3945_load_bsm - Load bootstrap instructions + * + * BSM operation: + * + * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program + * in special SRAM that does not power down during RFKILL. When powering back + * up after power-saving sleeps (or during initial uCode load), the BSM loads + * the bootstrap program into the on-board processor, and starts it. + * + * The bootstrap program loads (via DMA) instructions and data for a new + * program from host DRAM locations indicated by the host driver in the + * BSM_DRAM_* registers. Once the new program is loaded, it starts + * automatically. + * + * When initializing the NIC, the host driver points the BSM to the + * "initialize" uCode image. This uCode sets up some internal data, then + * notifies host via "initialize alive" that it is complete. + * + * The host then replaces the BSM_DRAM_* pointer values to point to the + * normal runtime uCode instructions and a backup uCode data cache buffer + * (filled initially with starting data values for the on-board processor), + * then triggers the "initialize" uCode to load and launch the runtime uCode, + * which begins normal operation. + * + * When doing a power-save shutdown, runtime uCode saves data SRAM into + * the backup data cache in DRAM before SRAM is powered down. + * + * When powering back up, the BSM loads the bootstrap program. This reloads + * the runtime uCode instructions and the backup data cache into SRAM, + * and re-launches the runtime uCode from where it left off. + */ +static int iwl3945_load_bsm(struct iwl_priv *priv) +{ + __le32 *image = priv->ucode_boot.v_addr; + u32 len = priv->ucode_boot.len; + dma_addr_t pinst; + dma_addr_t pdata; + u32 inst_len; + u32 data_len; + int rc; + int i; + u32 done; + u32 reg_offset; + + IWL_DEBUG_INFO(priv, "Begin load bsm\n"); + + /* make sure bootstrap program is no larger than BSM's SRAM size */ + if (len > IWL39_MAX_BSM_SIZE) + return -EINVAL; + + /* Tell bootstrap uCode where to find the "Initialize" uCode + * in host DRAM ... host DRAM physical address bits 31:0 for 3945. + * NOTE: iwl3945_initialize_alive_start() will replace these values, + * after the "initialize" uCode has run, to point to + * runtime/protocol instructions and backup data cache. */ + pinst = priv->ucode_init.p_addr; + pdata = priv->ucode_init_data.p_addr; + inst_len = priv->ucode_init.len; + data_len = priv->ucode_init_data.len; + + iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); + iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); + iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len); + iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len); + + /* Fill BSM memory with bootstrap instructions */ + for (reg_offset = BSM_SRAM_LOWER_BOUND; + reg_offset < BSM_SRAM_LOWER_BOUND + len; + reg_offset += sizeof(u32), image++) + _iwl_write_prph(priv, reg_offset, + le32_to_cpu(*image)); + + rc = iwl3945_verify_bsm(priv); + if (rc) + return rc; + + /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */ + iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0); + iwl_write_prph(priv, BSM_WR_MEM_DST_REG, + IWL39_RTC_INST_LOWER_BOUND); + iwl_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32)); + + /* Load bootstrap code into instruction SRAM now, + * to prepare to load "initialize" uCode */ + iwl_write_prph(priv, BSM_WR_CTRL_REG, + BSM_WR_CTRL_REG_BIT_START); + + /* Wait for load of bootstrap uCode to finish */ + for (i = 0; i < 100; i++) { + done = iwl_read_prph(priv, BSM_WR_CTRL_REG); + if (!(done & BSM_WR_CTRL_REG_BIT_START)) + break; + udelay(10); + } + if (i < 100) + IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i); + else { + IWL_ERR(priv, "BSM write did not complete!\n"); + return -EIO; + } + + /* Enable future boot loads whenever power management unit triggers it + * (e.g. when powering back up after power-save shutdown) */ + iwl_write_prph(priv, BSM_WR_CTRL_REG, + BSM_WR_CTRL_REG_BIT_START_EN); + + return 0; +} + +#define IWL3945_UCODE_GET(item) \ +static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode,\ + u32 api_ver) \ +{ \ + return le32_to_cpu(ucode->u.v1.item); \ +} + +static u32 iwl3945_ucode_get_header_size(u32 api_ver) +{ + return UCODE_HEADER_SIZE(1); +} +static u32 iwl3945_ucode_get_build(const struct iwl_ucode_header *ucode, + u32 api_ver) +{ + return 0; +} +static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode, + u32 api_ver) +{ + return (u8 *) ucode->u.v1.data; +} + +IWL3945_UCODE_GET(inst_size); +IWL3945_UCODE_GET(data_size); +IWL3945_UCODE_GET(init_size); +IWL3945_UCODE_GET(init_data_size); +IWL3945_UCODE_GET(boot_size); + +static struct iwl_hcmd_ops iwl3945_hcmd = { + .rxon_assoc = iwl3945_send_rxon_assoc, + .commit_rxon = iwl3945_commit_rxon, +}; + +static struct iwl_ucode_ops iwl3945_ucode = { + .get_header_size = iwl3945_ucode_get_header_size, + .get_build = iwl3945_ucode_get_build, + .get_inst_size = iwl3945_ucode_get_inst_size, + .get_data_size = iwl3945_ucode_get_data_size, + .get_init_size = iwl3945_ucode_get_init_size, + .get_init_data_size = iwl3945_ucode_get_init_data_size, + .get_boot_size = iwl3945_ucode_get_boot_size, + .get_data = iwl3945_ucode_get_data, +}; + +static struct iwl_lib_ops iwl3945_lib = { + .txq_attach_buf_to_tfd = iwl3945_hw_txq_attach_buf_to_tfd, + .txq_free_tfd = iwl3945_hw_txq_free_tfd, + .txq_init = iwl3945_hw_tx_queue_init, + .load_ucode = iwl3945_load_bsm, + .dump_nic_event_log = iwl3945_dump_nic_event_log, + .dump_nic_error_log = iwl3945_dump_nic_error_log, + .apm_ops = { + .init = iwl3945_apm_init, + .stop = iwl_apm_stop, + .config = iwl3945_nic_config, + .set_pwr_src = iwl3945_set_pwr_src, + }, + .eeprom_ops = { + .regulatory_bands = { + EEPROM_REGULATORY_BAND_1_CHANNELS, + EEPROM_REGULATORY_BAND_2_CHANNELS, + EEPROM_REGULATORY_BAND_3_CHANNELS, + EEPROM_REGULATORY_BAND_4_CHANNELS, + EEPROM_REGULATORY_BAND_5_CHANNELS, + EEPROM_REGULATORY_BAND_NO_HT40, + EEPROM_REGULATORY_BAND_NO_HT40, + }, + .verify_signature = iwlcore_eeprom_verify_signature, + .acquire_semaphore = iwl3945_eeprom_acquire_semaphore, + .release_semaphore = iwl3945_eeprom_release_semaphore, + .query_addr = iwlcore_eeprom_query_addr, + }, + .send_tx_power = iwl3945_send_tx_power, + .is_valid_rtc_data_addr = iwl3945_hw_valid_rtc_data_addr, + .post_associate = iwl3945_post_associate, + .isr = iwl_isr_legacy, + .config_ap = iwl3945_config_ap, + .add_bcast_station = iwl3945_add_bcast_station, +}; + +static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = { + .get_hcmd_size = iwl3945_get_hcmd_size, + .build_addsta_hcmd = iwl3945_build_addsta_hcmd, + .rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag, +}; + +static const struct iwl_ops iwl3945_ops = { + .ucode = &iwl3945_ucode, + .lib = &iwl3945_lib, + .hcmd = &iwl3945_hcmd, + .utils = &iwl3945_hcmd_utils, + .led = &iwl3945_led_ops, +}; + +static struct iwl_cfg iwl3945_bg_cfg = { + .name = "3945BG", + .fw_name_pre = IWL3945_FW_PRE, + .ucode_api_max = IWL3945_UCODE_API_MAX, + .ucode_api_min = IWL3945_UCODE_API_MIN, + .sku = IWL_SKU_G, + .eeprom_size = IWL3945_EEPROM_IMG_SIZE, + .eeprom_ver = EEPROM_3945_EEPROM_VERSION, + .ops = &iwl3945_ops, + .num_of_queues = IWL39_NUM_QUEUES, + .mod_params = &iwl3945_mod_params, + .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL, + .set_l0s = false, + .use_bsm = true, + .use_isr_legacy = true, + .ht_greenfield_support = false, + .led_compensation = 64, + .broken_powersave = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, +}; + +static struct iwl_cfg iwl3945_abg_cfg = { + .name = "3945ABG", + .fw_name_pre = IWL3945_FW_PRE, + .ucode_api_max = IWL3945_UCODE_API_MAX, + .ucode_api_min = IWL3945_UCODE_API_MIN, + .sku = IWL_SKU_A|IWL_SKU_G, + .eeprom_size = IWL3945_EEPROM_IMG_SIZE, + .eeprom_ver = EEPROM_3945_EEPROM_VERSION, + .ops = &iwl3945_ops, + .num_of_queues = IWL39_NUM_QUEUES, + .mod_params = &iwl3945_mod_params, + .use_isr_legacy = true, + .ht_greenfield_support = false, + .led_compensation = 64, + .broken_powersave = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, +}; + +DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = { + {IWL_PCI_DEVICE(0x4222, 0x1005, iwl3945_bg_cfg)}, + {IWL_PCI_DEVICE(0x4222, 0x1034, iwl3945_bg_cfg)}, + {IWL_PCI_DEVICE(0x4222, 0x1044, iwl3945_bg_cfg)}, + {IWL_PCI_DEVICE(0x4227, 0x1014, iwl3945_bg_cfg)}, + {IWL_PCI_DEVICE(0x4222, PCI_ANY_ID, iwl3945_abg_cfg)}, + {IWL_PCI_DEVICE(0x4227, PCI_ANY_ID, iwl3945_abg_cfg)}, + {0} +}; + +MODULE_DEVICE_TABLE(pci, iwl3945_hw_card_ids); diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h new file mode 100644 index 0000000..452dfd5 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-3945.h @@ -0,0 +1,301 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +/* + * Please use this file (iwl-3945.h) for driver implementation definitions. + * Please use iwl-3945-commands.h for uCode API definitions. + * Please use iwl-3945-hw.h for hardware-related definitions. + */ + +#ifndef __iwl_3945_h__ +#define __iwl_3945_h__ + +#include /* for struct pci_device_id */ +#include +#include + +/* Hardware specific file defines the PCI IDs table for that hardware module */ +extern const struct pci_device_id iwl3945_hw_card_ids[]; + +#include "iwl-csr.h" +#include "iwl-prph.h" +#include "iwl-fh.h" +#include "iwl-3945-hw.h" +#include "iwl-debug.h" +#include "iwl-power.h" +#include "iwl-dev.h" +#include "iwl-led.h" + +/* Highest firmware API version supported */ +#define IWL3945_UCODE_API_MAX 2 + +/* Lowest firmware API version supported */ +#define IWL3945_UCODE_API_MIN 1 + +#define IWL3945_FW_PRE "iwlwifi-3945-" +#define _IWL3945_MODULE_FIRMWARE(api) IWL3945_FW_PRE #api ".ucode" +#define IWL3945_MODULE_FIRMWARE(api) _IWL3945_MODULE_FIRMWARE(api) + +/* Default noise level to report when noise measurement is not available. + * This may be because we're: + * 1) Not associated (4965, no beacon statistics being sent to driver) + * 2) Scanning (noise measurement does not apply to associated channel) + * 3) Receiving CCK (3945 delivers noise info only for OFDM frames) + * Use default noise value of -127 ... this is below the range of measurable + * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user. + * Also, -127 works better than 0 when averaging frames with/without + * noise info (e.g. averaging might be done in app); measured dBm values are + * always negative ... using a negative value as the default keeps all + * averages within an s8's (used in some apps) range of negative values. */ +#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127) + +/* Module parameters accessible from iwl-*.c */ +extern struct iwl_mod_params iwl3945_mod_params; + +struct iwl3945_rate_scale_data { + u64 data; + s32 success_counter; + s32 success_ratio; + s32 counter; + s32 average_tpt; + unsigned long stamp; +}; + +struct iwl3945_rs_sta { + spinlock_t lock; + struct iwl_priv *priv; + s32 *expected_tpt; + unsigned long last_partial_flush; + unsigned long last_flush; + u32 flush_time; + u32 last_tx_packets; + u32 tx_packets; + u8 tgg; + u8 flush_pending; + u8 start_rate; + u8 ibss_sta_added; + struct timer_list rate_scale_flush; + struct iwl3945_rate_scale_data win[IWL_RATE_COUNT_3945]; +#ifdef CONFIG_MAC80211_DEBUGFS + struct dentry *rs_sta_dbgfs_stats_table_file; +#endif + + /* used to be in sta_info */ + int last_txrate_idx; +}; + + +struct iwl3945_sta_priv { + struct iwl3945_rs_sta rs_sta; +}; + +enum iwl3945_antenna { + IWL_ANTENNA_DIVERSITY, + IWL_ANTENNA_MAIN, + IWL_ANTENNA_AUX +}; + +/* + * RTS threshold here is total size [2347] minus 4 FCS bytes + * Per spec: + * a value of 0 means RTS on all data/management packets + * a value > max MSDU size means no RTS + * else RTS for data/management frames where MPDU is larger + * than RTS value. + */ +#define DEFAULT_RTS_THRESHOLD 2347U +#define MIN_RTS_THRESHOLD 0U +#define MAX_RTS_THRESHOLD 2347U +#define MAX_MSDU_SIZE 2304U +#define MAX_MPDU_SIZE 2346U +#define DEFAULT_BEACON_INTERVAL 100U +#define DEFAULT_SHORT_RETRY_LIMIT 7U +#define DEFAULT_LONG_RETRY_LIMIT 4U + +#include "iwl-agn-rs.h" + +#define IWL_TX_FIFO_AC0 0 +#define IWL_TX_FIFO_AC1 1 +#define IWL_TX_FIFO_AC2 2 +#define IWL_TX_FIFO_AC3 3 +#define IWL_TX_FIFO_HCCA_1 5 +#define IWL_TX_FIFO_HCCA_2 6 +#define IWL_TX_FIFO_NONE 7 + +#define IEEE80211_DATA_LEN 2304 +#define IEEE80211_4ADDR_LEN 30 +#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN) +#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN) + +struct iwl3945_frame { + union { + struct ieee80211_hdr frame; + struct iwl3945_tx_beacon_cmd beacon; + u8 raw[IEEE80211_FRAME_LEN]; + u8 cmd[360]; + } u; + struct list_head list; +}; + +#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4) +#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ) +#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4) + +#define SUP_RATE_11A_MAX_NUM_CHANNELS 8 +#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 +#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 + +#define IWL_SUPPORTED_RATES_IE_LEN 8 + +#define SCAN_INTERVAL 100 + +#define MAX_TID_COUNT 9 + +#define IWL_INVALID_RATE 0xFF +#define IWL_INVALID_VALUE -1 + +#define STA_PS_STATUS_WAKE 0 +#define STA_PS_STATUS_SLEEP 1 + +struct iwl3945_ibss_seq { + u8 mac[ETH_ALEN]; + u16 seq_num; + u16 frag_num; + unsigned long packet_time; + struct list_head list; +}; + +#define IWL_RX_HDR(x) ((struct iwl3945_rx_frame_hdr *)(\ + x->u.rx_frame.stats.payload + \ + x->u.rx_frame.stats.phy_count)) +#define IWL_RX_END(x) ((struct iwl3945_rx_frame_end *)(\ + IWL_RX_HDR(x)->payload + \ + le16_to_cpu(IWL_RX_HDR(x)->len))) +#define IWL_RX_STATS(x) (&x->u.rx_frame.stats) +#define IWL_RX_DATA(x) (IWL_RX_HDR(x)->payload) + + +/****************************************************************************** + * + * Functions implemented in iwl-base.c which are forward declared here + * for use by iwl-*.c + * + *****************************************************************************/ +extern int iwl3945_calc_db_from_ratio(int sig_ratio); +extern void iwl3945_rx_replenish(void *data); +extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq); +extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv, + struct ieee80211_hdr *hdr,int left); +extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log, + char **buf, bool display); +extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv); + +/* + * Currently used by iwl-3945-rs... look at restructuring so that it doesn't + * call this... todo... fix that. +*/ +extern u8 iwl3945_sync_station(struct iwl_priv *priv, int sta_id, + u16 tx_rate, u8 flags); + +/****************************************************************************** + * + * Functions implemented in iwl-[34]*.c which are forward declared here + * for use by iwl-base.c + * + * NOTE: The implementation of these functions are hardware specific + * which is why they are in the hardware specific files (vs. iwl-base.c) + * + * Naming convention -- + * iwl3945_ <-- Its part of iwlwifi (should be changed to iwl3945_) + * iwl3945_hw_ <-- Hardware specific (implemented in iwl-XXXX.c by all HW) + * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX) + * iwl3945_bg_ <-- Called from work queue context + * iwl3945_mac_ <-- mac80211 callback + * + ****************************************************************************/ +extern void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv); +extern void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv); +extern void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv); +extern int iwl3945_hw_rxq_stop(struct iwl_priv *priv); +extern int iwl3945_hw_set_hw_params(struct iwl_priv *priv); +extern int iwl3945_hw_nic_init(struct iwl_priv *priv); +extern int iwl3945_hw_nic_stop_master(struct iwl_priv *priv); +extern void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv); +extern void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv); +extern int iwl3945_hw_nic_reset(struct iwl_priv *priv); +extern int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + dma_addr_t addr, u16 len, + u8 reset, u8 pad); +extern void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, + struct iwl_tx_queue *txq); +extern int iwl3945_hw_get_temperature(struct iwl_priv *priv); +extern int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, + struct iwl_tx_queue *txq); +extern unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv, + struct iwl3945_frame *frame, u8 rate); +void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv, + struct iwl_device_cmd *cmd, + struct ieee80211_tx_info *info, + struct ieee80211_hdr *hdr, + int sta_id, int tx_id); +extern int iwl3945_hw_reg_send_txpower(struct iwl_priv *priv); +extern int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power); +extern void iwl3945_hw_rx_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +extern void iwl3945_disable_events(struct iwl_priv *priv); +extern int iwl4965_get_temperature(const struct iwl_priv *priv); +extern void iwl3945_post_associate(struct iwl_priv *priv); +extern void iwl3945_config_ap(struct iwl_priv *priv); + +/** + * iwl3945_hw_find_station - Find station id for a given BSSID + * @bssid: MAC address of station ID to find + * + * NOTE: This should not be hardware specific but the code has + * not yet been merged into a single common layer for managing the + * station tables. + */ +extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid); + +/* + * Forward declare iwl-3945.c functions for iwl-base.c + */ +extern __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv); +extern int iwl3945_init_hw_rate_table(struct iwl_priv *priv); +extern void iwl3945_reg_txpower_periodic(struct iwl_priv *priv); +extern int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv); +extern u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, + u16 tx_rate, u8 flags); + +extern const struct iwl_channel_info *iwl3945_get_channel_info( + const struct iwl_priv *priv, enum ieee80211_band band, u16 channel); + +extern int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate); + +/* Requires full declaration of iwl_priv before including */ +#include "iwl-io.h" + +#endif diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h new file mode 100644 index 0000000..67ef562 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h @@ -0,0 +1,816 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +/* + * Please use this file (iwl-4965-hw.h) only for hardware-related definitions. + * Use iwl-commands.h for uCode API definitions. + * Use iwl-dev.h for driver implementation definitions. + */ + +#ifndef __iwl_4965_hw_h__ +#define __iwl_4965_hw_h__ + +#include "iwl-fh.h" + +/* EEPROM */ +#define IWL4965_EEPROM_IMG_SIZE 1024 + +/* + * uCode queue management definitions ... + * The first queue used for block-ack aggregation is #7 (4965 only). + * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7. + */ +#define IWL49_FIRST_AMPDU_QUEUE 7 + +/* Time constants */ +#define SHORT_SLOT_TIME 9 +#define LONG_SLOT_TIME 20 + +/* RSSI to dBm */ +#define IWL49_RSSI_OFFSET 44 + + +/* PCI registers */ +#define PCI_CFG_RETRY_TIMEOUT 0x041 + +/* PCI register values */ +#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01 +#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02 + +#define IWL_NUM_SCAN_RATES (2) + +#define IWL_DEFAULT_TX_RETRY 15 + + +/* Sizes and addresses for instruction and data memory (SRAM) in + * 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */ +#define IWL49_RTC_INST_LOWER_BOUND (0x000000) +#define IWL49_RTC_INST_UPPER_BOUND (0x018000) + +#define IWL49_RTC_DATA_LOWER_BOUND (0x800000) +#define IWL49_RTC_DATA_UPPER_BOUND (0x80A000) + +#define IWL49_RTC_INST_SIZE (IWL49_RTC_INST_UPPER_BOUND - \ + IWL49_RTC_INST_LOWER_BOUND) +#define IWL49_RTC_DATA_SIZE (IWL49_RTC_DATA_UPPER_BOUND - \ + IWL49_RTC_DATA_LOWER_BOUND) + +#define IWL49_MAX_INST_SIZE IWL49_RTC_INST_SIZE +#define IWL49_MAX_DATA_SIZE IWL49_RTC_DATA_SIZE + +/* Size of uCode instruction memory in bootstrap state machine */ +#define IWL49_MAX_BSM_SIZE BSM_SRAM_SIZE + +static inline int iwl4965_hw_valid_rtc_data_addr(u32 addr) +{ + return (addr >= IWL49_RTC_DATA_LOWER_BOUND) && + (addr < IWL49_RTC_DATA_UPPER_BOUND); +} + +/********************* START TEMPERATURE *************************************/ + +/** + * 4965 temperature calculation. + * + * The driver must calculate the device temperature before calculating + * a txpower setting (amplifier gain is temperature dependent). The + * calculation uses 4 measurements, 3 of which (R1, R2, R3) are calibration + * values used for the life of the driver, and one of which (R4) is the + * real-time temperature indicator. + * + * uCode provides all 4 values to the driver via the "initialize alive" + * notification (see struct iwl4965_init_alive_resp). After the runtime uCode + * image loads, uCode updates the R4 value via statistics notifications + * (see STATISTICS_NOTIFICATION), which occur after each received beacon + * when associated, or can be requested via REPLY_STATISTICS_CMD. + * + * NOTE: uCode provides the R4 value as a 23-bit signed value. Driver + * must sign-extend to 32 bits before applying formula below. + * + * Formula: + * + * degrees Kelvin = ((97 * 259 * (R4 - R2) / (R3 - R1)) / 100) + 8 + * + * NOTE: The basic formula is 259 * (R4-R2) / (R3-R1). The 97/100 is + * an additional correction, which should be centered around 0 degrees + * Celsius (273 degrees Kelvin). The 8 (3 percent of 273) compensates for + * centering the 97/100 correction around 0 degrees K. + * + * Add 273 to Kelvin value to find degrees Celsius, for comparing current + * temperature with factory-measured temperatures when calculating txpower + * settings. + */ +#define TEMPERATURE_CALIB_KELVIN_OFFSET 8 +#define TEMPERATURE_CALIB_A_VAL 259 + +/* Limit range of calculated temperature to be between these Kelvin values */ +#define IWL_TX_POWER_TEMPERATURE_MIN (263) +#define IWL_TX_POWER_TEMPERATURE_MAX (410) + +#define IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(t) \ + (((t) < IWL_TX_POWER_TEMPERATURE_MIN) || \ + ((t) > IWL_TX_POWER_TEMPERATURE_MAX)) + +/********************* END TEMPERATURE ***************************************/ + +/********************* START TXPOWER *****************************************/ + +/** + * 4965 txpower calculations rely on information from three sources: + * + * 1) EEPROM + * 2) "initialize" alive notification + * 3) statistics notifications + * + * EEPROM data consists of: + * + * 1) Regulatory information (max txpower and channel usage flags) is provided + * separately for each channel that can possibly supported by 4965. + * 40 MHz wide (.11n HT40) channels are listed separately from 20 MHz + * (legacy) channels. + * + * See struct iwl4965_eeprom_channel for format, and struct iwl4965_eeprom + * for locations in EEPROM. + * + * 2) Factory txpower calibration information is provided separately for + * sub-bands of contiguous channels. 2.4GHz has just one sub-band, + * but 5 GHz has several sub-bands. + * + * In addition, per-band (2.4 and 5 Ghz) saturation txpowers are provided. + * + * See struct iwl4965_eeprom_calib_info (and the tree of structures + * contained within it) for format, and struct iwl4965_eeprom for + * locations in EEPROM. + * + * "Initialization alive" notification (see struct iwl4965_init_alive_resp) + * consists of: + * + * 1) Temperature calculation parameters. + * + * 2) Power supply voltage measurement. + * + * 3) Tx gain compensation to balance 2 transmitters for MIMO use. + * + * Statistics notifications deliver: + * + * 1) Current values for temperature param R4. + */ + +/** + * To calculate a txpower setting for a given desired target txpower, channel, + * modulation bit rate, and transmitter chain (4965 has 2 transmitters to + * support MIMO and transmit diversity), driver must do the following: + * + * 1) Compare desired txpower vs. (EEPROM) regulatory limit for this channel. + * Do not exceed regulatory limit; reduce target txpower if necessary. + * + * If setting up txpowers for MIMO rates (rate indexes 8-15, 24-31), + * 2 transmitters will be used simultaneously; driver must reduce the + * regulatory limit by 3 dB (half-power) for each transmitter, so the + * combined total output of the 2 transmitters is within regulatory limits. + * + * + * 2) Compare target txpower vs. (EEPROM) saturation txpower *reduced by + * backoff for this bit rate*. Do not exceed (saturation - backoff[rate]); + * reduce target txpower if necessary. + * + * Backoff values below are in 1/2 dB units (equivalent to steps in + * txpower gain tables): + * + * OFDM 6 - 36 MBit: 10 steps (5 dB) + * OFDM 48 MBit: 15 steps (7.5 dB) + * OFDM 54 MBit: 17 steps (8.5 dB) + * OFDM 60 MBit: 20 steps (10 dB) + * CCK all rates: 10 steps (5 dB) + * + * Backoff values apply to saturation txpower on a per-transmitter basis; + * when using MIMO (2 transmitters), each transmitter uses the same + * saturation level provided in EEPROM, and the same backoff values; + * no reduction (such as with regulatory txpower limits) is required. + * + * Saturation and Backoff values apply equally to 20 Mhz (legacy) channel + * widths and 40 Mhz (.11n HT40) channel widths; there is no separate + * factory measurement for ht40 channels. + * + * The result of this step is the final target txpower. The rest of + * the steps figure out the proper settings for the device to achieve + * that target txpower. + * + * + * 3) Determine (EEPROM) calibration sub band for the target channel, by + * comparing against first and last channels in each sub band + * (see struct iwl4965_eeprom_calib_subband_info). + * + * + * 4) Linearly interpolate (EEPROM) factory calibration measurement sets, + * referencing the 2 factory-measured (sample) channels within the sub band. + * + * Interpolation is based on difference between target channel's frequency + * and the sample channels' frequencies. Since channel numbers are based + * on frequency (5 MHz between each channel number), this is equivalent + * to interpolating based on channel number differences. + * + * Note that the sample channels may or may not be the channels at the + * edges of the sub band. The target channel may be "outside" of the + * span of the sampled channels. + * + * Driver may choose the pair (for 2 Tx chains) of measurements (see + * struct iwl4965_eeprom_calib_ch_info) for which the actual measured + * txpower comes closest to the desired txpower. Usually, though, + * the middle set of measurements is closest to the regulatory limits, + * and is therefore a good choice for all txpower calculations (this + * assumes that high accuracy is needed for maximizing legal txpower, + * while lower txpower configurations do not need as much accuracy). + * + * Driver should interpolate both members of the chosen measurement pair, + * i.e. for both Tx chains (radio transmitters), unless the driver knows + * that only one of the chains will be used (e.g. only one tx antenna + * connected, but this should be unusual). The rate scaling algorithm + * switches antennas to find best performance, so both Tx chains will + * be used (although only one at a time) even for non-MIMO transmissions. + * + * Driver should interpolate factory values for temperature, gain table + * index, and actual power. The power amplifier detector values are + * not used by the driver. + * + * Sanity check: If the target channel happens to be one of the sample + * channels, the results should agree with the sample channel's + * measurements! + * + * + * 5) Find difference between desired txpower and (interpolated) + * factory-measured txpower. Using (interpolated) factory gain table index + * (shown elsewhere) as a starting point, adjust this index lower to + * increase txpower, or higher to decrease txpower, until the target + * txpower is reached. Each step in the gain table is 1/2 dB. + * + * For example, if factory measured txpower is 16 dBm, and target txpower + * is 13 dBm, add 6 steps to the factory gain index to reduce txpower + * by 3 dB. + * + * + * 6) Find difference between current device temperature and (interpolated) + * factory-measured temperature for sub-band. Factory values are in + * degrees Celsius. To calculate current temperature, see comments for + * "4965 temperature calculation". + * + * If current temperature is higher than factory temperature, driver must + * increase gain (lower gain table index), and vice verse. + * + * Temperature affects gain differently for different channels: + * + * 2.4 GHz all channels: 3.5 degrees per half-dB step + * 5 GHz channels 34-43: 4.5 degrees per half-dB step + * 5 GHz channels >= 44: 4.0 degrees per half-dB step + * + * NOTE: Temperature can increase rapidly when transmitting, especially + * with heavy traffic at high txpowers. Driver should update + * temperature calculations often under these conditions to + * maintain strong txpower in the face of rising temperature. + * + * + * 7) Find difference between current power supply voltage indicator + * (from "initialize alive") and factory-measured power supply voltage + * indicator (EEPROM). + * + * If the current voltage is higher (indicator is lower) than factory + * voltage, gain should be reduced (gain table index increased) by: + * + * (eeprom - current) / 7 + * + * If the current voltage is lower (indicator is higher) than factory + * voltage, gain should be increased (gain table index decreased) by: + * + * 2 * (current - eeprom) / 7 + * + * If number of index steps in either direction turns out to be > 2, + * something is wrong ... just use 0. + * + * NOTE: Voltage compensation is independent of band/channel. + * + * NOTE: "Initialize" uCode measures current voltage, which is assumed + * to be constant after this initial measurement. Voltage + * compensation for txpower (number of steps in gain table) + * may be calculated once and used until the next uCode bootload. + * + * + * 8) If setting up txpowers for MIMO rates (rate indexes 8-15, 24-31), + * adjust txpower for each transmitter chain, so txpower is balanced + * between the two chains. There are 5 pairs of tx_atten[group][chain] + * values in "initialize alive", one pair for each of 5 channel ranges: + * + * Group 0: 5 GHz channel 34-43 + * Group 1: 5 GHz channel 44-70 + * Group 2: 5 GHz channel 71-124 + * Group 3: 5 GHz channel 125-200 + * Group 4: 2.4 GHz all channels + * + * Add the tx_atten[group][chain] value to the index for the target chain. + * The values are signed, but are in pairs of 0 and a non-negative number, + * so as to reduce gain (if necessary) of the "hotter" channel. This + * avoids any need to double-check for regulatory compliance after + * this step. + * + * + * 9) If setting up for a CCK rate, lower the gain by adding a CCK compensation + * value to the index: + * + * Hardware rev B: 9 steps (4.5 dB) + * Hardware rev C: 5 steps (2.5 dB) + * + * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG, + * bits [3:2], 1 = B, 2 = C. + * + * NOTE: This compensation is in addition to any saturation backoff that + * might have been applied in an earlier step. + * + * + * 10) Select the gain table, based on band (2.4 vs 5 GHz). + * + * Limit the adjusted index to stay within the table! + * + * + * 11) Read gain table entries for DSP and radio gain, place into appropriate + * location(s) in command (struct iwl4965_txpowertable_cmd). + */ + +/* Limit range of txpower output target to be between these values */ +#define IWL_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm = 1 milliwatt */ +#define IWL_TX_POWER_TARGET_POWER_MAX (16) /* 16 dBm */ + +/** + * When MIMO is used (2 transmitters operating simultaneously), driver should + * limit each transmitter to deliver a max of 3 dB below the regulatory limit + * for the device. That is, use half power for each transmitter, so total + * txpower is within regulatory limits. + * + * The value "6" represents number of steps in gain table to reduce power 3 dB. + * Each step is 1/2 dB. + */ +#define IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION (6) + +/** + * CCK gain compensation. + * + * When calculating txpowers for CCK, after making sure that the target power + * is within regulatory and saturation limits, driver must additionally + * back off gain by adding these values to the gain table index. + * + * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG, + * bits [3:2], 1 = B, 2 = C. + */ +#define IWL_TX_POWER_CCK_COMPENSATION_B_STEP (9) +#define IWL_TX_POWER_CCK_COMPENSATION_C_STEP (5) + +/* + * 4965 power supply voltage compensation for txpower + */ +#define TX_POWER_IWL_VOLTAGE_CODES_PER_03V (7) + +/** + * Gain tables. + * + * The following tables contain pair of values for setting txpower, i.e. + * gain settings for the output of the device's digital signal processor (DSP), + * and for the analog gain structure of the transmitter. + * + * Each entry in the gain tables represents a step of 1/2 dB. Note that these + * are *relative* steps, not indications of absolute output power. Output + * power varies with temperature, voltage, and channel frequency, and also + * requires consideration of average power (to satisfy regulatory constraints), + * and peak power (to avoid distortion of the output signal). + * + * Each entry contains two values: + * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained + * linear value that multiplies the output of the digital signal processor, + * before being sent to the analog radio. + * 2) Radio gain. This sets the analog gain of the radio Tx path. + * It is a coarser setting, and behaves in a logarithmic (dB) fashion. + * + * EEPROM contains factory calibration data for txpower. This maps actual + * measured txpower levels to gain settings in the "well known" tables + * below ("well-known" means here that both factory calibration *and* the + * driver work with the same table). + * + * There are separate tables for 2.4 GHz and 5 GHz bands. The 5 GHz table + * has an extension (into negative indexes), in case the driver needs to + * boost power setting for high device temperatures (higher than would be + * present during factory calibration). A 5 Ghz EEPROM index of "40" + * corresponds to the 49th entry in the table used by the driver. + */ +#define MIN_TX_GAIN_INDEX (0) /* highest gain, lowest idx, 2.4 */ +#define MIN_TX_GAIN_INDEX_52GHZ_EXT (-9) /* highest gain, lowest idx, 5 */ + +/** + * 2.4 GHz gain table + * + * Index Dsp gain Radio gain + * 0 110 0x3f (highest gain) + * 1 104 0x3f + * 2 98 0x3f + * 3 110 0x3e + * 4 104 0x3e + * 5 98 0x3e + * 6 110 0x3d + * 7 104 0x3d + * 8 98 0x3d + * 9 110 0x3c + * 10 104 0x3c + * 11 98 0x3c + * 12 110 0x3b + * 13 104 0x3b + * 14 98 0x3b + * 15 110 0x3a + * 16 104 0x3a + * 17 98 0x3a + * 18 110 0x39 + * 19 104 0x39 + * 20 98 0x39 + * 21 110 0x38 + * 22 104 0x38 + * 23 98 0x38 + * 24 110 0x37 + * 25 104 0x37 + * 26 98 0x37 + * 27 110 0x36 + * 28 104 0x36 + * 29 98 0x36 + * 30 110 0x35 + * 31 104 0x35 + * 32 98 0x35 + * 33 110 0x34 + * 34 104 0x34 + * 35 98 0x34 + * 36 110 0x33 + * 37 104 0x33 + * 38 98 0x33 + * 39 110 0x32 + * 40 104 0x32 + * 41 98 0x32 + * 42 110 0x31 + * 43 104 0x31 + * 44 98 0x31 + * 45 110 0x30 + * 46 104 0x30 + * 47 98 0x30 + * 48 110 0x6 + * 49 104 0x6 + * 50 98 0x6 + * 51 110 0x5 + * 52 104 0x5 + * 53 98 0x5 + * 54 110 0x4 + * 55 104 0x4 + * 56 98 0x4 + * 57 110 0x3 + * 58 104 0x3 + * 59 98 0x3 + * 60 110 0x2 + * 61 104 0x2 + * 62 98 0x2 + * 63 110 0x1 + * 64 104 0x1 + * 65 98 0x1 + * 66 110 0x0 + * 67 104 0x0 + * 68 98 0x0 + * 69 97 0 + * 70 96 0 + * 71 95 0 + * 72 94 0 + * 73 93 0 + * 74 92 0 + * 75 91 0 + * 76 90 0 + * 77 89 0 + * 78 88 0 + * 79 87 0 + * 80 86 0 + * 81 85 0 + * 82 84 0 + * 83 83 0 + * 84 82 0 + * 85 81 0 + * 86 80 0 + * 87 79 0 + * 88 78 0 + * 89 77 0 + * 90 76 0 + * 91 75 0 + * 92 74 0 + * 93 73 0 + * 94 72 0 + * 95 71 0 + * 96 70 0 + * 97 69 0 + * 98 68 0 + */ + +/** + * 5 GHz gain table + * + * Index Dsp gain Radio gain + * -9 123 0x3F (highest gain) + * -8 117 0x3F + * -7 110 0x3F + * -6 104 0x3F + * -5 98 0x3F + * -4 110 0x3E + * -3 104 0x3E + * -2 98 0x3E + * -1 110 0x3D + * 0 104 0x3D + * 1 98 0x3D + * 2 110 0x3C + * 3 104 0x3C + * 4 98 0x3C + * 5 110 0x3B + * 6 104 0x3B + * 7 98 0x3B + * 8 110 0x3A + * 9 104 0x3A + * 10 98 0x3A + * 11 110 0x39 + * 12 104 0x39 + * 13 98 0x39 + * 14 110 0x38 + * 15 104 0x38 + * 16 98 0x38 + * 17 110 0x37 + * 18 104 0x37 + * 19 98 0x37 + * 20 110 0x36 + * 21 104 0x36 + * 22 98 0x36 + * 23 110 0x35 + * 24 104 0x35 + * 25 98 0x35 + * 26 110 0x34 + * 27 104 0x34 + * 28 98 0x34 + * 29 110 0x33 + * 30 104 0x33 + * 31 98 0x33 + * 32 110 0x32 + * 33 104 0x32 + * 34 98 0x32 + * 35 110 0x31 + * 36 104 0x31 + * 37 98 0x31 + * 38 110 0x30 + * 39 104 0x30 + * 40 98 0x30 + * 41 110 0x25 + * 42 104 0x25 + * 43 98 0x25 + * 44 110 0x24 + * 45 104 0x24 + * 46 98 0x24 + * 47 110 0x23 + * 48 104 0x23 + * 49 98 0x23 + * 50 110 0x22 + * 51 104 0x18 + * 52 98 0x18 + * 53 110 0x17 + * 54 104 0x17 + * 55 98 0x17 + * 56 110 0x16 + * 57 104 0x16 + * 58 98 0x16 + * 59 110 0x15 + * 60 104 0x15 + * 61 98 0x15 + * 62 110 0x14 + * 63 104 0x14 + * 64 98 0x14 + * 65 110 0x13 + * 66 104 0x13 + * 67 98 0x13 + * 68 110 0x12 + * 69 104 0x08 + * 70 98 0x08 + * 71 110 0x07 + * 72 104 0x07 + * 73 98 0x07 + * 74 110 0x06 + * 75 104 0x06 + * 76 98 0x06 + * 77 110 0x05 + * 78 104 0x05 + * 79 98 0x05 + * 80 110 0x04 + * 81 104 0x04 + * 82 98 0x04 + * 83 110 0x03 + * 84 104 0x03 + * 85 98 0x03 + * 86 110 0x02 + * 87 104 0x02 + * 88 98 0x02 + * 89 110 0x01 + * 90 104 0x01 + * 91 98 0x01 + * 92 110 0x00 + * 93 104 0x00 + * 94 98 0x00 + * 95 93 0x00 + * 96 88 0x00 + * 97 83 0x00 + * 98 78 0x00 + */ + + +/** + * Sanity checks and default values for EEPROM regulatory levels. + * If EEPROM values fall outside MIN/MAX range, use default values. + * + * Regulatory limits refer to the maximum average txpower allowed by + * regulatory agencies in the geographies in which the device is meant + * to be operated. These limits are SKU-specific (i.e. geography-specific), + * and channel-specific; each channel has an individual regulatory limit + * listed in the EEPROM. + * + * Units are in half-dBm (i.e. "34" means 17 dBm). + */ +#define IWL_TX_POWER_DEFAULT_REGULATORY_24 (34) +#define IWL_TX_POWER_DEFAULT_REGULATORY_52 (34) +#define IWL_TX_POWER_REGULATORY_MIN (0) +#define IWL_TX_POWER_REGULATORY_MAX (34) + +/** + * Sanity checks and default values for EEPROM saturation levels. + * If EEPROM values fall outside MIN/MAX range, use default values. + * + * Saturation is the highest level that the output power amplifier can produce + * without significant clipping distortion. This is a "peak" power level. + * Different types of modulation (i.e. various "rates", and OFDM vs. CCK) + * require differing amounts of backoff, relative to their average power output, + * in order to avoid clipping distortion. + * + * Driver must make sure that it is violating neither the saturation limit, + * nor the regulatory limit, when calculating Tx power settings for various + * rates. + * + * Units are in half-dBm (i.e. "38" means 19 dBm). + */ +#define IWL_TX_POWER_DEFAULT_SATURATION_24 (38) +#define IWL_TX_POWER_DEFAULT_SATURATION_52 (38) +#define IWL_TX_POWER_SATURATION_MIN (20) +#define IWL_TX_POWER_SATURATION_MAX (50) + +/** + * Channel groups used for Tx Attenuation calibration (MIMO tx channel balance) + * and thermal Txpower calibration. + * + * When calculating txpower, driver must compensate for current device + * temperature; higher temperature requires higher gain. Driver must calculate + * current temperature (see "4965 temperature calculation"), then compare vs. + * factory calibration temperature in EEPROM; if current temperature is higher + * than factory temperature, driver must *increase* gain by proportions shown + * in table below. If current temperature is lower than factory, driver must + * *decrease* gain. + * + * Different frequency ranges require different compensation, as shown below. + */ +/* Group 0, 5.2 GHz ch 34-43: 4.5 degrees per 1/2 dB. */ +#define CALIB_IWL_TX_ATTEN_GR1_FCH 34 +#define CALIB_IWL_TX_ATTEN_GR1_LCH 43 + +/* Group 1, 5.3 GHz ch 44-70: 4.0 degrees per 1/2 dB. */ +#define CALIB_IWL_TX_ATTEN_GR2_FCH 44 +#define CALIB_IWL_TX_ATTEN_GR2_LCH 70 + +/* Group 2, 5.5 GHz ch 71-124: 4.0 degrees per 1/2 dB. */ +#define CALIB_IWL_TX_ATTEN_GR3_FCH 71 +#define CALIB_IWL_TX_ATTEN_GR3_LCH 124 + +/* Group 3, 5.7 GHz ch 125-200: 4.0 degrees per 1/2 dB. */ +#define CALIB_IWL_TX_ATTEN_GR4_FCH 125 +#define CALIB_IWL_TX_ATTEN_GR4_LCH 200 + +/* Group 4, 2.4 GHz all channels: 3.5 degrees per 1/2 dB. */ +#define CALIB_IWL_TX_ATTEN_GR5_FCH 1 +#define CALIB_IWL_TX_ATTEN_GR5_LCH 20 + +enum { + CALIB_CH_GROUP_1 = 0, + CALIB_CH_GROUP_2 = 1, + CALIB_CH_GROUP_3 = 2, + CALIB_CH_GROUP_4 = 3, + CALIB_CH_GROUP_5 = 4, + CALIB_CH_GROUP_MAX +}; + +/********************* END TXPOWER *****************************************/ + + +/** + * Tx/Rx Queues + * + * Most communication between driver and 4965 is via queues of data buffers. + * For example, all commands that the driver issues to device's embedded + * controller (uCode) are via the command queue (one of the Tx queues). All + * uCode command responses/replies/notifications, including Rx frames, are + * conveyed from uCode to driver via the Rx queue. + * + * Most support for these queues, including handshake support, resides in + * structures in host DRAM, shared between the driver and the device. When + * allocating this memory, the driver must make sure that data written by + * the host CPU updates DRAM immediately (and does not get "stuck" in CPU's + * cache memory), so DRAM and cache are consistent, and the device can + * immediately see changes made by the driver. + * + * 4965 supports up to 16 DRAM-based Tx queues, and services these queues via + * up to 7 DMA channels (FIFOs). Each Tx queue is supported by a circular array + * in DRAM containing 256 Transmit Frame Descriptors (TFDs). + */ +#define IWL49_NUM_FIFOS 7 +#define IWL49_CMD_FIFO_NUM 4 +#define IWL49_NUM_QUEUES 16 +#define IWL49_NUM_AMPDU_QUEUES 8 + + +/** + * struct iwl4965_schedq_bc_tbl + * + * Byte Count table + * + * Each Tx queue uses a byte-count table containing 320 entries: + * one 16-bit entry for each of 256 TFDs, plus an additional 64 entries that + * duplicate the first 64 entries (to avoid wrap-around within a Tx window; + * max Tx window is 64 TFDs). + * + * When driver sets up a new TFD, it must also enter the total byte count + * of the frame to be transmitted into the corresponding entry in the byte + * count table for the chosen Tx queue. If the TFD index is 0-63, the driver + * must duplicate the byte count entry in corresponding index 256-319. + * + * padding puts each byte count table on a 1024-byte boundary; + * 4965 assumes tables are separated by 1024 bytes. + */ +struct iwl4965_scd_bc_tbl { + __le16 tfd_offset[TFD_QUEUE_BC_SIZE]; + u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)]; +} __attribute__ ((packed)); + +#endif /* !__iwl_4965_hw_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c new file mode 100644 index 0000000..1bd2cd8 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c @@ -0,0 +1,2274 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "iwl-eeprom.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-io.h" +#include "iwl-helpers.h" +#include "iwl-calib.h" +#include "iwl-sta.h" +#include "iwl-agn-led.h" + +static int iwl4965_send_tx_power(struct iwl_priv *priv); +static int iwl4965_hw_get_temperature(struct iwl_priv *priv); + +/* Highest firmware API version supported */ +#define IWL4965_UCODE_API_MAX 2 + +/* Lowest firmware API version supported */ +#define IWL4965_UCODE_API_MIN 2 + +#define IWL4965_FW_PRE "iwlwifi-4965-" +#define _IWL4965_MODULE_FIRMWARE(api) IWL4965_FW_PRE #api ".ucode" +#define IWL4965_MODULE_FIRMWARE(api) _IWL4965_MODULE_FIRMWARE(api) + + +/* module parameters */ +static struct iwl_mod_params iwl4965_mod_params = { + .amsdu_size_8K = 1, + .restart_fw = 1, + /* the rest are 0 by default */ +}; + +/* check contents of special bootstrap uCode SRAM */ +static int iwl4965_verify_bsm(struct iwl_priv *priv) +{ + __le32 *image = priv->ucode_boot.v_addr; + u32 len = priv->ucode_boot.len; + u32 reg; + u32 val; + + IWL_DEBUG_INFO(priv, "Begin verify bsm\n"); + + /* verify BSM SRAM contents */ + val = iwl_read_prph(priv, BSM_WR_DWCOUNT_REG); + for (reg = BSM_SRAM_LOWER_BOUND; + reg < BSM_SRAM_LOWER_BOUND + len; + reg += sizeof(u32), image++) { + val = iwl_read_prph(priv, reg); + if (val != le32_to_cpu(*image)) { + IWL_ERR(priv, "BSM uCode verification failed at " + "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n", + BSM_SRAM_LOWER_BOUND, + reg - BSM_SRAM_LOWER_BOUND, len, + val, le32_to_cpu(*image)); + return -EIO; + } + } + + IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n"); + + return 0; +} + +/** + * iwl4965_load_bsm - Load bootstrap instructions + * + * BSM operation: + * + * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program + * in special SRAM that does not power down during RFKILL. When powering back + * up after power-saving sleeps (or during initial uCode load), the BSM loads + * the bootstrap program into the on-board processor, and starts it. + * + * The bootstrap program loads (via DMA) instructions and data for a new + * program from host DRAM locations indicated by the host driver in the + * BSM_DRAM_* registers. Once the new program is loaded, it starts + * automatically. + * + * When initializing the NIC, the host driver points the BSM to the + * "initialize" uCode image. This uCode sets up some internal data, then + * notifies host via "initialize alive" that it is complete. + * + * The host then replaces the BSM_DRAM_* pointer values to point to the + * normal runtime uCode instructions and a backup uCode data cache buffer + * (filled initially with starting data values for the on-board processor), + * then triggers the "initialize" uCode to load and launch the runtime uCode, + * which begins normal operation. + * + * When doing a power-save shutdown, runtime uCode saves data SRAM into + * the backup data cache in DRAM before SRAM is powered down. + * + * When powering back up, the BSM loads the bootstrap program. This reloads + * the runtime uCode instructions and the backup data cache into SRAM, + * and re-launches the runtime uCode from where it left off. + */ +static int iwl4965_load_bsm(struct iwl_priv *priv) +{ + __le32 *image = priv->ucode_boot.v_addr; + u32 len = priv->ucode_boot.len; + dma_addr_t pinst; + dma_addr_t pdata; + u32 inst_len; + u32 data_len; + int i; + u32 done; + u32 reg_offset; + int ret; + + IWL_DEBUG_INFO(priv, "Begin load bsm\n"); + + priv->ucode_type = UCODE_RT; + + /* make sure bootstrap program is no larger than BSM's SRAM size */ + if (len > IWL49_MAX_BSM_SIZE) + return -EINVAL; + + /* Tell bootstrap uCode where to find the "Initialize" uCode + * in host DRAM ... host DRAM physical address bits 35:4 for 4965. + * NOTE: iwl_init_alive_start() will replace these values, + * after the "initialize" uCode has run, to point to + * runtime/protocol instructions and backup data cache. + */ + pinst = priv->ucode_init.p_addr >> 4; + pdata = priv->ucode_init_data.p_addr >> 4; + inst_len = priv->ucode_init.len; + data_len = priv->ucode_init_data.len; + + iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); + iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); + iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len); + iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len); + + /* Fill BSM memory with bootstrap instructions */ + for (reg_offset = BSM_SRAM_LOWER_BOUND; + reg_offset < BSM_SRAM_LOWER_BOUND + len; + reg_offset += sizeof(u32), image++) + _iwl_write_prph(priv, reg_offset, le32_to_cpu(*image)); + + ret = iwl4965_verify_bsm(priv); + if (ret) + return ret; + + /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */ + iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0); + iwl_write_prph(priv, BSM_WR_MEM_DST_REG, IWL49_RTC_INST_LOWER_BOUND); + iwl_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32)); + + /* Load bootstrap code into instruction SRAM now, + * to prepare to load "initialize" uCode */ + iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START); + + /* Wait for load of bootstrap uCode to finish */ + for (i = 0; i < 100; i++) { + done = iwl_read_prph(priv, BSM_WR_CTRL_REG); + if (!(done & BSM_WR_CTRL_REG_BIT_START)) + break; + udelay(10); + } + if (i < 100) + IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i); + else { + IWL_ERR(priv, "BSM write did not complete!\n"); + return -EIO; + } + + /* Enable future boot loads whenever power management unit triggers it + * (e.g. when powering back up after power-save shutdown) */ + iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN); + + + return 0; +} + +/** + * iwl4965_set_ucode_ptrs - Set uCode address location + * + * Tell initialization uCode where to find runtime uCode. + * + * BSM registers initially contain pointers to initialization uCode. + * We need to replace them to load runtime uCode inst and data, + * and to save runtime data when powering down. + */ +static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv) +{ + dma_addr_t pinst; + dma_addr_t pdata; + int ret = 0; + + /* bits 35:4 for 4965 */ + pinst = priv->ucode_code.p_addr >> 4; + pdata = priv->ucode_data_backup.p_addr >> 4; + + /* Tell bootstrap uCode where to find image to load */ + iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); + iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); + iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, + priv->ucode_data.len); + + /* Inst byte count must be last to set up, bit 31 signals uCode + * that all new ptr/size info is in place */ + iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, + priv->ucode_code.len | BSM_DRAM_INST_LOAD); + IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n"); + + return ret; +} + +/** + * iwl4965_init_alive_start - Called after REPLY_ALIVE notification received + * + * Called after REPLY_ALIVE notification received from "initialize" uCode. + * + * The 4965 "initialize" ALIVE reply contains calibration data for: + * Voltage, temperature, and MIMO tx gain correction, now stored in priv + * (3945 does not contain this data). + * + * Tell "initialize" uCode to go ahead and load the runtime uCode. +*/ +static void iwl4965_init_alive_start(struct iwl_priv *priv) +{ + /* Check alive response for "valid" sign from uCode */ + if (priv->card_alive_init.is_valid != UCODE_VALID_OK) { + /* We had an error bringing up the hardware, so take it + * all the way back down so we can try again */ + IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n"); + goto restart; + } + + /* Bootstrap uCode has loaded initialize uCode ... verify inst image. + * This is a paranoid check, because we would not have gotten the + * "initialize" alive if code weren't properly loaded. */ + if (iwl_verify_ucode(priv)) { + /* Runtime instruction load was bad; + * take it all the way back down so we can try again */ + IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n"); + goto restart; + } + + /* Calculate temperature */ + priv->temperature = iwl4965_hw_get_temperature(priv); + + /* Send pointers to protocol/runtime uCode image ... init code will + * load and launch runtime uCode, which will send us another "Alive" + * notification. */ + IWL_DEBUG_INFO(priv, "Initialization Alive received.\n"); + if (iwl4965_set_ucode_ptrs(priv)) { + /* Runtime instruction load won't happen; + * take it all the way back down so we can try again */ + IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n"); + goto restart; + } + return; + +restart: + queue_work(priv->workqueue, &priv->restart); +} + +static bool is_ht40_channel(__le32 rxon_flags) +{ + int chan_mod = le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK) + >> RXON_FLG_CHANNEL_MODE_POS; + return ((chan_mod == CHANNEL_MODE_PURE_40) || + (chan_mod == CHANNEL_MODE_MIXED)); +} + +/* + * EEPROM handlers + */ +static u16 iwl4965_eeprom_calib_version(struct iwl_priv *priv) +{ + return iwl_eeprom_query16(priv, EEPROM_4965_CALIB_VERSION_OFFSET); +} + +/* + * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask + * must be called under priv->lock and mac access + */ +static void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask) +{ + iwl_write_prph(priv, IWL49_SCD_TXFACT, mask); +} + +static void iwl4965_nic_config(struct iwl_priv *priv) +{ + unsigned long flags; + u16 radio_cfg; + + spin_lock_irqsave(&priv->lock, flags); + + radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); + + /* write radio config values to register */ + if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX) + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + EEPROM_RF_CFG_TYPE_MSK(radio_cfg) | + EEPROM_RF_CFG_STEP_MSK(radio_cfg) | + EEPROM_RF_CFG_DASH_MSK(radio_cfg)); + + /* set CSR_HW_CONFIG_REG for uCode use */ + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | + CSR_HW_IF_CONFIG_REG_BIT_MAC_SI); + + priv->calib_info = (struct iwl_eeprom_calib_info *) + iwl_eeprom_query_addr(priv, EEPROM_4965_CALIB_TXPOWER_OFFSET); + + spin_unlock_irqrestore(&priv->lock, flags); +} + +/* Reset differential Rx gains in NIC to prepare for chain noise calibration. + * Called after every association, but this runs only once! + * ... once chain noise is calibrated the first time, it's good forever. */ +static void iwl4965_chain_noise_reset(struct iwl_priv *priv) +{ + struct iwl_chain_noise_data *data = &(priv->chain_noise_data); + + if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) { + struct iwl_calib_diff_gain_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD; + cmd.diff_gain_a = 0; + cmd.diff_gain_b = 0; + cmd.diff_gain_c = 0; + if (iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, + sizeof(cmd), &cmd)) + IWL_ERR(priv, + "Could not send REPLY_PHY_CALIBRATION_CMD\n"); + data->state = IWL_CHAIN_NOISE_ACCUMULATE; + IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n"); + } +} + +static void iwl4965_gain_computation(struct iwl_priv *priv, + u32 *average_noise, + u16 min_average_noise_antenna_i, + u32 min_average_noise, + u8 default_chain) +{ + int i, ret; + struct iwl_chain_noise_data *data = &priv->chain_noise_data; + + data->delta_gain_code[min_average_noise_antenna_i] = 0; + + for (i = default_chain; i < NUM_RX_CHAINS; i++) { + s32 delta_g = 0; + + if (!(data->disconn_array[i]) && + (data->delta_gain_code[i] == + CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) { + delta_g = average_noise[i] - min_average_noise; + data->delta_gain_code[i] = (u8)((delta_g * 10) / 15); + data->delta_gain_code[i] = + min(data->delta_gain_code[i], + (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE); + + data->delta_gain_code[i] = + (data->delta_gain_code[i] | (1 << 2)); + } else { + data->delta_gain_code[i] = 0; + } + } + IWL_DEBUG_CALIB(priv, "delta_gain_codes: a %d b %d c %d\n", + data->delta_gain_code[0], + data->delta_gain_code[1], + data->delta_gain_code[2]); + + /* Differential gain gets sent to uCode only once */ + if (!data->radio_write) { + struct iwl_calib_diff_gain_cmd cmd; + data->radio_write = 1; + + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD; + cmd.diff_gain_a = data->delta_gain_code[0]; + cmd.diff_gain_b = data->delta_gain_code[1]; + cmd.diff_gain_c = data->delta_gain_code[2]; + ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, + sizeof(cmd), &cmd); + if (ret) + IWL_DEBUG_CALIB(priv, "fail sending cmd " + "REPLY_PHY_CALIBRATION_CMD \n"); + + /* TODO we might want recalculate + * rx_chain in rxon cmd */ + + /* Mark so we run this algo only once! */ + data->state = IWL_CHAIN_NOISE_CALIBRATED; + } + data->chain_noise_a = 0; + data->chain_noise_b = 0; + data->chain_noise_c = 0; + data->chain_signal_a = 0; + data->chain_signal_b = 0; + data->chain_signal_c = 0; + data->beacon_count = 0; +} + +static void iwl4965_bg_txpower_work(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, + txpower_work); + + /* If a scan happened to start before we got here + * then just return; the statistics notification will + * kick off another scheduled work to compensate for + * any temperature delta we missed here. */ + if (test_bit(STATUS_EXIT_PENDING, &priv->status) || + test_bit(STATUS_SCANNING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + + /* Regardless of if we are associated, we must reconfigure the + * TX power since frames can be sent on non-radar channels while + * not associated */ + iwl4965_send_tx_power(priv); + + /* Update last_temperature to keep is_calib_needed from running + * when it isn't needed... */ + priv->last_temperature = priv->temperature; + + mutex_unlock(&priv->mutex); +} + +/* + * Acquire priv->lock before calling this function ! + */ +static void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index) +{ + iwl_write_direct32(priv, HBUS_TARG_WRPTR, + (index & 0xff) | (txq_id << 8)); + iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index); +} + +/** + * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue + * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed + * @scd_retry: (1) Indicates queue will be used in aggregation mode + * + * NOTE: Acquire priv->lock before calling this function ! + */ +static void iwl4965_tx_queue_set_status(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + int tx_fifo_id, int scd_retry) +{ + int txq_id = txq->q.id; + + /* Find out whether to activate Tx queue */ + int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0; + + /* Set up and activate */ + iwl_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id), + (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) | + (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) | + (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) | + (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) | + IWL49_SCD_QUEUE_STTS_REG_MSK); + + txq->sched_retry = scd_retry; + + IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n", + active ? "Activate" : "Deactivate", + scd_retry ? "BA" : "AC", txq_id, tx_fifo_id); +} + +static const u16 default_queue_to_tx_fifo[] = { + IWL_TX_FIFO_AC3, + IWL_TX_FIFO_AC2, + IWL_TX_FIFO_AC1, + IWL_TX_FIFO_AC0, + IWL49_CMD_FIFO_NUM, + IWL_TX_FIFO_HCCA_1, + IWL_TX_FIFO_HCCA_2 +}; + +static int iwl4965_alive_notify(struct iwl_priv *priv) +{ + u32 a; + unsigned long flags; + int i, chan; + u32 reg_val; + + spin_lock_irqsave(&priv->lock, flags); + + /* Clear 4965's internal Tx Scheduler data base */ + priv->scd_base_addr = iwl_read_prph(priv, IWL49_SCD_SRAM_BASE_ADDR); + a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET; + for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4) + iwl_write_targ_mem(priv, a, 0); + for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4) + iwl_write_targ_mem(priv, a, 0); + for (; a < priv->scd_base_addr + + IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4) + iwl_write_targ_mem(priv, a, 0); + + /* Tel 4965 where to find Tx byte count tables */ + iwl_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR, + priv->scd_bc_tbls.dma >> 10); + + /* Enable DMA channel */ + for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++) + iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan), + FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | + FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); + + /* Update FH chicken bits */ + reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG); + iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG, + reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); + + /* Disable chain mode for all queues */ + iwl_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0); + + /* Initialize each Tx queue (including the command queue) */ + for (i = 0; i < priv->hw_params.max_txq_num; i++) { + + /* TFD circular buffer read/write indexes */ + iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0); + iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8)); + + /* Max Tx Window size for Scheduler-ACK mode */ + iwl_write_targ_mem(priv, priv->scd_base_addr + + IWL49_SCD_CONTEXT_QUEUE_OFFSET(i), + (SCD_WIN_SIZE << + IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & + IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); + + /* Frame limit */ + iwl_write_targ_mem(priv, priv->scd_base_addr + + IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) + + sizeof(u32), + (SCD_FRAME_LIMIT << + IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & + IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); + + } + iwl_write_prph(priv, IWL49_SCD_INTERRUPT_MASK, + (1 << priv->hw_params.max_txq_num) - 1); + + /* Activate all Tx DMA/FIFO channels */ + priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 6)); + + iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0); + + /* make sure all queue are not stopped */ + memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped)); + for (i = 0; i < 4; i++) + atomic_set(&priv->queue_stop_count[i], 0); + + /* reset to 0 to enable all the queue first */ + priv->txq_ctx_active_msk = 0; + /* Map each Tx/cmd queue to its corresponding fifo */ + for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) { + int ac = default_queue_to_tx_fifo[i]; + iwl_txq_ctx_activate(priv, i); + iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0); + } + + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +static struct iwl_sensitivity_ranges iwl4965_sensitivity = { + .min_nrg_cck = 97, + .max_nrg_cck = 0, /* not used, set to 0 */ + + .auto_corr_min_ofdm = 85, + .auto_corr_min_ofdm_mrc = 170, + .auto_corr_min_ofdm_x1 = 105, + .auto_corr_min_ofdm_mrc_x1 = 220, + + .auto_corr_max_ofdm = 120, + .auto_corr_max_ofdm_mrc = 210, + .auto_corr_max_ofdm_x1 = 140, + .auto_corr_max_ofdm_mrc_x1 = 270, + + .auto_corr_min_cck = 125, + .auto_corr_max_cck = 200, + .auto_corr_min_cck_mrc = 200, + .auto_corr_max_cck_mrc = 400, + + .nrg_th_cck = 100, + .nrg_th_ofdm = 100, + + .barker_corr_th_min = 190, + .barker_corr_th_min_mrc = 390, + .nrg_th_cca = 62, +}; + +static void iwl4965_set_ct_threshold(struct iwl_priv *priv) +{ + /* want Kelvin */ + priv->hw_params.ct_kill_threshold = + CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY); +} + +/** + * iwl4965_hw_set_hw_params + * + * Called when initializing driver + */ +static int iwl4965_hw_set_hw_params(struct iwl_priv *priv) +{ + if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES && + priv->cfg->mod_params->num_of_queues <= IWL49_NUM_QUEUES) + priv->cfg->num_of_queues = + priv->cfg->mod_params->num_of_queues; + + priv->hw_params.max_txq_num = priv->cfg->num_of_queues; + priv->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM; + priv->hw_params.scd_bc_tbls_size = + priv->cfg->num_of_queues * + sizeof(struct iwl4965_scd_bc_tbl); + priv->hw_params.tfd_size = sizeof(struct iwl_tfd); + priv->hw_params.max_stations = IWL4965_STATION_COUNT; + priv->hw_params.bcast_sta_id = IWL4965_BROADCAST_ID; + priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE; + priv->hw_params.max_inst_size = IWL49_RTC_INST_SIZE; + priv->hw_params.max_bsm_size = BSM_SRAM_SIZE; + priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ); + + priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR; + + priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); + priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant); + priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant; + priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant; + if (priv->cfg->ops->lib->temp_ops.set_ct_kill) + priv->cfg->ops->lib->temp_ops.set_ct_kill(priv); + + priv->hw_params.sens = &iwl4965_sensitivity; + + return 0; +} + +static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res) +{ + s32 sign = 1; + + if (num < 0) { + sign = -sign; + num = -num; + } + if (denom < 0) { + sign = -sign; + denom = -denom; + } + *res = 1; + *res = ((num * 2 + denom) / (denom * 2)) * sign; + + return 1; +} + +/** + * iwl4965_get_voltage_compensation - Power supply voltage comp for txpower + * + * Determines power supply voltage compensation for txpower calculations. + * Returns number of 1/2-dB steps to subtract from gain table index, + * to compensate for difference between power supply voltage during + * factory measurements, vs. current power supply voltage. + * + * Voltage indication is higher for lower voltage. + * Lower voltage requires more gain (lower gain table index). + */ +static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage, + s32 current_voltage) +{ + s32 comp = 0; + + if ((TX_POWER_IWL_ILLEGAL_VOLTAGE == eeprom_voltage) || + (TX_POWER_IWL_ILLEGAL_VOLTAGE == current_voltage)) + return 0; + + iwl4965_math_div_round(current_voltage - eeprom_voltage, + TX_POWER_IWL_VOLTAGE_CODES_PER_03V, &comp); + + if (current_voltage > eeprom_voltage) + comp *= 2; + if ((comp < -2) || (comp > 2)) + comp = 0; + + return comp; +} + +static s32 iwl4965_get_tx_atten_grp(u16 channel) +{ + if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH && + channel <= CALIB_IWL_TX_ATTEN_GR5_LCH) + return CALIB_CH_GROUP_5; + + if (channel >= CALIB_IWL_TX_ATTEN_GR1_FCH && + channel <= CALIB_IWL_TX_ATTEN_GR1_LCH) + return CALIB_CH_GROUP_1; + + if (channel >= CALIB_IWL_TX_ATTEN_GR2_FCH && + channel <= CALIB_IWL_TX_ATTEN_GR2_LCH) + return CALIB_CH_GROUP_2; + + if (channel >= CALIB_IWL_TX_ATTEN_GR3_FCH && + channel <= CALIB_IWL_TX_ATTEN_GR3_LCH) + return CALIB_CH_GROUP_3; + + if (channel >= CALIB_IWL_TX_ATTEN_GR4_FCH && + channel <= CALIB_IWL_TX_ATTEN_GR4_LCH) + return CALIB_CH_GROUP_4; + + return -1; +} + +static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel) +{ + s32 b = -1; + + for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) { + if (priv->calib_info->band_info[b].ch_from == 0) + continue; + + if ((channel >= priv->calib_info->band_info[b].ch_from) + && (channel <= priv->calib_info->band_info[b].ch_to)) + break; + } + + return b; +} + +static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2) +{ + s32 val; + + if (x2 == x1) + return y1; + else { + iwl4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val); + return val + y2; + } +} + +/** + * iwl4965_interpolate_chan - Interpolate factory measurements for one channel + * + * Interpolates factory measurements from the two sample channels within a + * sub-band, to apply to channel of interest. Interpolation is proportional to + * differences in channel frequencies, which is proportional to differences + * in channel number. + */ +static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel, + struct iwl_eeprom_calib_ch_info *chan_info) +{ + s32 s = -1; + u32 c; + u32 m; + const struct iwl_eeprom_calib_measure *m1; + const struct iwl_eeprom_calib_measure *m2; + struct iwl_eeprom_calib_measure *omeas; + u32 ch_i1; + u32 ch_i2; + + s = iwl4965_get_sub_band(priv, channel); + if (s >= EEPROM_TX_POWER_BANDS) { + IWL_ERR(priv, "Tx Power can not find channel %d\n", channel); + return -1; + } + + ch_i1 = priv->calib_info->band_info[s].ch1.ch_num; + ch_i2 = priv->calib_info->band_info[s].ch2.ch_num; + chan_info->ch_num = (u8) channel; + + IWL_DEBUG_TXPOWER(priv, "channel %d subband %d factory cal ch %d & %d\n", + channel, s, ch_i1, ch_i2); + + for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) { + for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) { + m1 = &(priv->calib_info->band_info[s].ch1. + measurements[c][m]); + m2 = &(priv->calib_info->band_info[s].ch2. + measurements[c][m]); + omeas = &(chan_info->measurements[c][m]); + + omeas->actual_pow = + (u8) iwl4965_interpolate_value(channel, ch_i1, + m1->actual_pow, + ch_i2, + m2->actual_pow); + omeas->gain_idx = + (u8) iwl4965_interpolate_value(channel, ch_i1, + m1->gain_idx, ch_i2, + m2->gain_idx); + omeas->temperature = + (u8) iwl4965_interpolate_value(channel, ch_i1, + m1->temperature, + ch_i2, + m2->temperature); + omeas->pa_det = + (s8) iwl4965_interpolate_value(channel, ch_i1, + m1->pa_det, ch_i2, + m2->pa_det); + + IWL_DEBUG_TXPOWER(priv, + "chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m, + m1->actual_pow, m2->actual_pow, omeas->actual_pow); + IWL_DEBUG_TXPOWER(priv, + "chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m, + m1->gain_idx, m2->gain_idx, omeas->gain_idx); + IWL_DEBUG_TXPOWER(priv, + "chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m, + m1->pa_det, m2->pa_det, omeas->pa_det); + IWL_DEBUG_TXPOWER(priv, + "chain %d meas %d T1=%d T2=%d T=%d\n", c, m, + m1->temperature, m2->temperature, + omeas->temperature); + } + } + + return 0; +} + +/* bit-rate-dependent table to prevent Tx distortion, in half-dB units, + * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */ +static s32 back_off_table[] = { + 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */ + 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */ + 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */ + 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */ + 10 /* CCK */ +}; + +/* Thermal compensation values for txpower for various frequency ranges ... + * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */ +static struct iwl4965_txpower_comp_entry { + s32 degrees_per_05db_a; + s32 degrees_per_05db_a_denom; +} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = { + {9, 2}, /* group 0 5.2, ch 34-43 */ + {4, 1}, /* group 1 5.2, ch 44-70 */ + {4, 1}, /* group 2 5.2, ch 71-124 */ + {4, 1}, /* group 3 5.2, ch 125-200 */ + {3, 1} /* group 4 2.4, ch all */ +}; + +static s32 get_min_power_index(s32 rate_power_index, u32 band) +{ + if (!band) { + if ((rate_power_index & 7) <= 4) + return MIN_TX_GAIN_INDEX_52GHZ_EXT; + } + return MIN_TX_GAIN_INDEX; +} + +struct gain_entry { + u8 dsp; + u8 radio; +}; + +static const struct gain_entry gain_table[2][108] = { + /* 5.2GHz power gain index table */ + { + {123, 0x3F}, /* highest txpower */ + {117, 0x3F}, + {110, 0x3F}, + {104, 0x3F}, + {98, 0x3F}, + {110, 0x3E}, + {104, 0x3E}, + {98, 0x3E}, + {110, 0x3D}, + {104, 0x3D}, + {98, 0x3D}, + {110, 0x3C}, + {104, 0x3C}, + {98, 0x3C}, + {110, 0x3B}, + {104, 0x3B}, + {98, 0x3B}, + {110, 0x3A}, + {104, 0x3A}, + {98, 0x3A}, + {110, 0x39}, + {104, 0x39}, + {98, 0x39}, + {110, 0x38}, + {104, 0x38}, + {98, 0x38}, + {110, 0x37}, + {104, 0x37}, + {98, 0x37}, + {110, 0x36}, + {104, 0x36}, + {98, 0x36}, + {110, 0x35}, + {104, 0x35}, + {98, 0x35}, + {110, 0x34}, + {104, 0x34}, + {98, 0x34}, + {110, 0x33}, + {104, 0x33}, + {98, 0x33}, + {110, 0x32}, + {104, 0x32}, + {98, 0x32}, + {110, 0x31}, + {104, 0x31}, + {98, 0x31}, + {110, 0x30}, + {104, 0x30}, + {98, 0x30}, + {110, 0x25}, + {104, 0x25}, + {98, 0x25}, + {110, 0x24}, + {104, 0x24}, + {98, 0x24}, + {110, 0x23}, + {104, 0x23}, + {98, 0x23}, + {110, 0x22}, + {104, 0x18}, + {98, 0x18}, + {110, 0x17}, + {104, 0x17}, + {98, 0x17}, + {110, 0x16}, + {104, 0x16}, + {98, 0x16}, + {110, 0x15}, + {104, 0x15}, + {98, 0x15}, + {110, 0x14}, + {104, 0x14}, + {98, 0x14}, + {110, 0x13}, + {104, 0x13}, + {98, 0x13}, + {110, 0x12}, + {104, 0x08}, + {98, 0x08}, + {110, 0x07}, + {104, 0x07}, + {98, 0x07}, + {110, 0x06}, + {104, 0x06}, + {98, 0x06}, + {110, 0x05}, + {104, 0x05}, + {98, 0x05}, + {110, 0x04}, + {104, 0x04}, + {98, 0x04}, + {110, 0x03}, + {104, 0x03}, + {98, 0x03}, + {110, 0x02}, + {104, 0x02}, + {98, 0x02}, + {110, 0x01}, + {104, 0x01}, + {98, 0x01}, + {110, 0x00}, + {104, 0x00}, + {98, 0x00}, + {93, 0x00}, + {88, 0x00}, + {83, 0x00}, + {78, 0x00}, + }, + /* 2.4GHz power gain index table */ + { + {110, 0x3f}, /* highest txpower */ + {104, 0x3f}, + {98, 0x3f}, + {110, 0x3e}, + {104, 0x3e}, + {98, 0x3e}, + {110, 0x3d}, + {104, 0x3d}, + {98, 0x3d}, + {110, 0x3c}, + {104, 0x3c}, + {98, 0x3c}, + {110, 0x3b}, + {104, 0x3b}, + {98, 0x3b}, + {110, 0x3a}, + {104, 0x3a}, + {98, 0x3a}, + {110, 0x39}, + {104, 0x39}, + {98, 0x39}, + {110, 0x38}, + {104, 0x38}, + {98, 0x38}, + {110, 0x37}, + {104, 0x37}, + {98, 0x37}, + {110, 0x36}, + {104, 0x36}, + {98, 0x36}, + {110, 0x35}, + {104, 0x35}, + {98, 0x35}, + {110, 0x34}, + {104, 0x34}, + {98, 0x34}, + {110, 0x33}, + {104, 0x33}, + {98, 0x33}, + {110, 0x32}, + {104, 0x32}, + {98, 0x32}, + {110, 0x31}, + {104, 0x31}, + {98, 0x31}, + {110, 0x30}, + {104, 0x30}, + {98, 0x30}, + {110, 0x6}, + {104, 0x6}, + {98, 0x6}, + {110, 0x5}, + {104, 0x5}, + {98, 0x5}, + {110, 0x4}, + {104, 0x4}, + {98, 0x4}, + {110, 0x3}, + {104, 0x3}, + {98, 0x3}, + {110, 0x2}, + {104, 0x2}, + {98, 0x2}, + {110, 0x1}, + {104, 0x1}, + {98, 0x1}, + {110, 0x0}, + {104, 0x0}, + {98, 0x0}, + {97, 0}, + {96, 0}, + {95, 0}, + {94, 0}, + {93, 0}, + {92, 0}, + {91, 0}, + {90, 0}, + {89, 0}, + {88, 0}, + {87, 0}, + {86, 0}, + {85, 0}, + {84, 0}, + {83, 0}, + {82, 0}, + {81, 0}, + {80, 0}, + {79, 0}, + {78, 0}, + {77, 0}, + {76, 0}, + {75, 0}, + {74, 0}, + {73, 0}, + {72, 0}, + {71, 0}, + {70, 0}, + {69, 0}, + {68, 0}, + {67, 0}, + {66, 0}, + {65, 0}, + {64, 0}, + {63, 0}, + {62, 0}, + {61, 0}, + {60, 0}, + {59, 0}, + } +}; + +static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel, + u8 is_ht40, u8 ctrl_chan_high, + struct iwl4965_tx_power_db *tx_power_tbl) +{ + u8 saturation_power; + s32 target_power; + s32 user_target_power; + s32 power_limit; + s32 current_temp; + s32 reg_limit; + s32 current_regulatory; + s32 txatten_grp = CALIB_CH_GROUP_MAX; + int i; + int c; + const struct iwl_channel_info *ch_info = NULL; + struct iwl_eeprom_calib_ch_info ch_eeprom_info; + const struct iwl_eeprom_calib_measure *measurement; + s16 voltage; + s32 init_voltage; + s32 voltage_compensation; + s32 degrees_per_05db_num; + s32 degrees_per_05db_denom; + s32 factory_temp; + s32 temperature_comp[2]; + s32 factory_gain_index[2]; + s32 factory_actual_pwr[2]; + s32 power_index; + + /* tx_power_user_lmt is in dBm, convert to half-dBm (half-dB units + * are used for indexing into txpower table) */ + user_target_power = 2 * priv->tx_power_user_lmt; + + /* Get current (RXON) channel, band, width */ + IWL_DEBUG_TXPOWER(priv, "chan %d band %d is_ht40 %d\n", channel, band, + is_ht40); + + ch_info = iwl_get_channel_info(priv, priv->band, channel); + + if (!is_channel_valid(ch_info)) + return -EINVAL; + + /* get txatten group, used to select 1) thermal txpower adjustment + * and 2) mimo txpower balance between Tx chains. */ + txatten_grp = iwl4965_get_tx_atten_grp(channel); + if (txatten_grp < 0) { + IWL_ERR(priv, "Can't find txatten group for channel %d.\n", + channel); + return -EINVAL; + } + + IWL_DEBUG_TXPOWER(priv, "channel %d belongs to txatten group %d\n", + channel, txatten_grp); + + if (is_ht40) { + if (ctrl_chan_high) + channel -= 2; + else + channel += 2; + } + + /* hardware txpower limits ... + * saturation (clipping distortion) txpowers are in half-dBm */ + if (band) + saturation_power = priv->calib_info->saturation_power24; + else + saturation_power = priv->calib_info->saturation_power52; + + if (saturation_power < IWL_TX_POWER_SATURATION_MIN || + saturation_power > IWL_TX_POWER_SATURATION_MAX) { + if (band) + saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_24; + else + saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_52; + } + + /* regulatory txpower limits ... reg_limit values are in half-dBm, + * max_power_avg values are in dBm, convert * 2 */ + if (is_ht40) + reg_limit = ch_info->ht40_max_power_avg * 2; + else + reg_limit = ch_info->max_power_avg * 2; + + if ((reg_limit < IWL_TX_POWER_REGULATORY_MIN) || + (reg_limit > IWL_TX_POWER_REGULATORY_MAX)) { + if (band) + reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_24; + else + reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_52; + } + + /* Interpolate txpower calibration values for this channel, + * based on factory calibration tests on spaced channels. */ + iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info); + + /* calculate tx gain adjustment based on power supply voltage */ + voltage = le16_to_cpu(priv->calib_info->voltage); + init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage); + voltage_compensation = + iwl4965_get_voltage_compensation(voltage, init_voltage); + + IWL_DEBUG_TXPOWER(priv, "curr volt %d eeprom volt %d volt comp %d\n", + init_voltage, + voltage, voltage_compensation); + + /* get current temperature (Celsius) */ + current_temp = max(priv->temperature, IWL_TX_POWER_TEMPERATURE_MIN); + current_temp = min(priv->temperature, IWL_TX_POWER_TEMPERATURE_MAX); + current_temp = KELVIN_TO_CELSIUS(current_temp); + + /* select thermal txpower adjustment params, based on channel group + * (same frequency group used for mimo txatten adjustment) */ + degrees_per_05db_num = + tx_power_cmp_tble[txatten_grp].degrees_per_05db_a; + degrees_per_05db_denom = + tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom; + + /* get per-chain txpower values from factory measurements */ + for (c = 0; c < 2; c++) { + measurement = &ch_eeprom_info.measurements[c][1]; + + /* txgain adjustment (in half-dB steps) based on difference + * between factory and current temperature */ + factory_temp = measurement->temperature; + iwl4965_math_div_round((current_temp - factory_temp) * + degrees_per_05db_denom, + degrees_per_05db_num, + &temperature_comp[c]); + + factory_gain_index[c] = measurement->gain_idx; + factory_actual_pwr[c] = measurement->actual_pow; + + IWL_DEBUG_TXPOWER(priv, "chain = %d\n", c); + IWL_DEBUG_TXPOWER(priv, "fctry tmp %d, " + "curr tmp %d, comp %d steps\n", + factory_temp, current_temp, + temperature_comp[c]); + + IWL_DEBUG_TXPOWER(priv, "fctry idx %d, fctry pwr %d\n", + factory_gain_index[c], + factory_actual_pwr[c]); + } + + /* for each of 33 bit-rates (including 1 for CCK) */ + for (i = 0; i < POWER_TABLE_NUM_ENTRIES; i++) { + u8 is_mimo_rate; + union iwl4965_tx_power_dual_stream tx_power; + + /* for mimo, reduce each chain's txpower by half + * (3dB, 6 steps), so total output power is regulatory + * compliant. */ + if (i & 0x8) { + current_regulatory = reg_limit - + IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION; + is_mimo_rate = 1; + } else { + current_regulatory = reg_limit; + is_mimo_rate = 0; + } + + /* find txpower limit, either hardware or regulatory */ + power_limit = saturation_power - back_off_table[i]; + if (power_limit > current_regulatory) + power_limit = current_regulatory; + + /* reduce user's txpower request if necessary + * for this rate on this channel */ + target_power = user_target_power; + if (target_power > power_limit) + target_power = power_limit; + + IWL_DEBUG_TXPOWER(priv, "rate %d sat %d reg %d usr %d tgt %d\n", + i, saturation_power - back_off_table[i], + current_regulatory, user_target_power, + target_power); + + /* for each of 2 Tx chains (radio transmitters) */ + for (c = 0; c < 2; c++) { + s32 atten_value; + + if (is_mimo_rate) + atten_value = + (s32)le32_to_cpu(priv->card_alive_init. + tx_atten[txatten_grp][c]); + else + atten_value = 0; + + /* calculate index; higher index means lower txpower */ + power_index = (u8) (factory_gain_index[c] - + (target_power - + factory_actual_pwr[c]) - + temperature_comp[c] - + voltage_compensation + + atten_value); + +/* IWL_DEBUG_TXPOWER(priv, "calculated txpower index %d\n", + power_index); */ + + if (power_index < get_min_power_index(i, band)) + power_index = get_min_power_index(i, band); + + /* adjust 5 GHz index to support negative indexes */ + if (!band) + power_index += 9; + + /* CCK, rate 32, reduce txpower for CCK */ + if (i == POWER_TABLE_CCK_ENTRY) + power_index += + IWL_TX_POWER_CCK_COMPENSATION_C_STEP; + + /* stay within the table! */ + if (power_index > 107) { + IWL_WARN(priv, "txpower index %d > 107\n", + power_index); + power_index = 107; + } + if (power_index < 0) { + IWL_WARN(priv, "txpower index %d < 0\n", + power_index); + power_index = 0; + } + + /* fill txpower command for this rate/chain */ + tx_power.s.radio_tx_gain[c] = + gain_table[band][power_index].radio; + tx_power.s.dsp_predis_atten[c] = + gain_table[band][power_index].dsp; + + IWL_DEBUG_TXPOWER(priv, "chain %d mimo %d index %d " + "gain 0x%02x dsp %d\n", + c, atten_value, power_index, + tx_power.s.radio_tx_gain[c], + tx_power.s.dsp_predis_atten[c]); + } /* for each chain */ + + tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw); + + } /* for each rate */ + + return 0; +} + +/** + * iwl4965_send_tx_power - Configure the TXPOWER level user limit + * + * Uses the active RXON for channel, band, and characteristics (ht40, high) + * The power limit is taken from priv->tx_power_user_lmt. + */ +static int iwl4965_send_tx_power(struct iwl_priv *priv) +{ + struct iwl4965_txpowertable_cmd cmd = { 0 }; + int ret; + u8 band = 0; + bool is_ht40 = false; + u8 ctrl_chan_high = 0; + + if (test_bit(STATUS_SCANNING, &priv->status)) { + /* If this gets hit a lot, switch it to a BUG() and catch + * the stack trace to find out who is calling this during + * a scan. */ + IWL_WARN(priv, "TX Power requested while scanning!\n"); + return -EAGAIN; + } + + band = priv->band == IEEE80211_BAND_2GHZ; + + is_ht40 = is_ht40_channel(priv->active_rxon.flags); + + if (is_ht40 && + (priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK)) + ctrl_chan_high = 1; + + cmd.band = band; + cmd.channel = priv->active_rxon.channel; + + ret = iwl4965_fill_txpower_tbl(priv, band, + le16_to_cpu(priv->active_rxon.channel), + is_ht40, ctrl_chan_high, &cmd.tx_power); + if (ret) + goto out; + + ret = iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd); + +out: + return ret; +} + +static int iwl4965_send_rxon_assoc(struct iwl_priv *priv) +{ + int ret = 0; + struct iwl4965_rxon_assoc_cmd rxon_assoc; + const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon; + const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon; + + if ((rxon1->flags == rxon2->flags) && + (rxon1->filter_flags == rxon2->filter_flags) && + (rxon1->cck_basic_rates == rxon2->cck_basic_rates) && + (rxon1->ofdm_ht_single_stream_basic_rates == + rxon2->ofdm_ht_single_stream_basic_rates) && + (rxon1->ofdm_ht_dual_stream_basic_rates == + rxon2->ofdm_ht_dual_stream_basic_rates) && + (rxon1->rx_chain == rxon2->rx_chain) && + (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) { + IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n"); + return 0; + } + + rxon_assoc.flags = priv->staging_rxon.flags; + rxon_assoc.filter_flags = priv->staging_rxon.filter_flags; + rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates; + rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates; + rxon_assoc.reserved = 0; + rxon_assoc.ofdm_ht_single_stream_basic_rates = + priv->staging_rxon.ofdm_ht_single_stream_basic_rates; + rxon_assoc.ofdm_ht_dual_stream_basic_rates = + priv->staging_rxon.ofdm_ht_dual_stream_basic_rates; + rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain; + + ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC, + sizeof(rxon_assoc), &rxon_assoc, NULL); + if (ret) + return ret; + + return ret; +} + +static int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel) +{ + int rc; + u8 band = 0; + bool is_ht40 = false; + u8 ctrl_chan_high = 0; + struct iwl4965_channel_switch_cmd cmd; + const struct iwl_channel_info *ch_info; + + band = priv->band == IEEE80211_BAND_2GHZ; + + ch_info = iwl_get_channel_info(priv, priv->band, channel); + + is_ht40 = is_ht40_channel(priv->staging_rxon.flags); + + if (is_ht40 && + (priv->staging_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK)) + ctrl_chan_high = 1; + + cmd.band = band; + cmd.expect_beacon = 0; + cmd.channel = cpu_to_le16(channel); + cmd.rxon_flags = priv->staging_rxon.flags; + cmd.rxon_filter_flags = priv->staging_rxon.filter_flags; + cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time); + if (ch_info) + cmd.expect_beacon = is_channel_radar(ch_info); + else { + IWL_ERR(priv, "invalid channel switch from %u to %u\n", + priv->active_rxon.channel, channel); + return -EFAULT; + } + + rc = iwl4965_fill_txpower_tbl(priv, band, channel, is_ht40, + ctrl_chan_high, &cmd.tx_power); + if (rc) { + IWL_DEBUG_11H(priv, "error:%d fill txpower_tbl\n", rc); + return rc; + } + + priv->switch_rxon.channel = cpu_to_le16(channel); + priv->switch_rxon.switch_in_progress = true; + + return iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd); +} + +/** + * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array + */ +static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + u16 byte_cnt) +{ + struct iwl4965_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; + int txq_id = txq->q.id; + int write_ptr = txq->q.write_ptr; + int len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; + __le16 bc_ent; + + WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX); + + bc_ent = cpu_to_le16(len & 0xFFF); + /* Set up byte count within first 256 entries */ + scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; + + /* If within first 64 entries, duplicate at end */ + if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) + scd_bc_tbl[txq_id]. + tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; +} + +/** + * sign_extend - Sign extend a value using specified bit as sign-bit + * + * Example: sign_extend(9, 3) would return -7 as bit3 of 1001b is 1 + * and bit0..2 is 001b which when sign extended to 1111111111111001b is -7. + * + * @param oper value to sign extend + * @param index 0 based bit index (0<=index<32) to sign bit + */ +static s32 sign_extend(u32 oper, int index) +{ + u8 shift = 31 - index; + + return (s32)(oper << shift) >> shift; +} + +/** + * iwl4965_hw_get_temperature - return the calibrated temperature (in Kelvin) + * @statistics: Provides the temperature reading from the uCode + * + * A return of <0 indicates bogus data in the statistics + */ +static int iwl4965_hw_get_temperature(struct iwl_priv *priv) +{ + s32 temperature; + s32 vt; + s32 R1, R2, R3; + u32 R4; + + if (test_bit(STATUS_TEMPERATURE, &priv->status) && + (priv->statistics.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK)) { + IWL_DEBUG_TEMP(priv, "Running HT40 temperature calibration\n"); + R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]); + R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]); + R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]); + R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]); + } else { + IWL_DEBUG_TEMP(priv, "Running temperature calibration\n"); + R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]); + R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]); + R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]); + R4 = le32_to_cpu(priv->card_alive_init.therm_r4[0]); + } + + /* + * Temperature is only 23 bits, so sign extend out to 32. + * + * NOTE If we haven't received a statistics notification yet + * with an updated temperature, use R4 provided to us in the + * "initialize" ALIVE response. + */ + if (!test_bit(STATUS_TEMPERATURE, &priv->status)) + vt = sign_extend(R4, 23); + else + vt = sign_extend( + le32_to_cpu(priv->statistics.general.temperature), 23); + + IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt); + + if (R3 == R1) { + IWL_ERR(priv, "Calibration conflict R1 == R3\n"); + return -1; + } + + /* Calculate temperature in degrees Kelvin, adjust by 97%. + * Add offset to center the adjustment around 0 degrees Centigrade. */ + temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2); + temperature /= (R3 - R1); + temperature = (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET; + + IWL_DEBUG_TEMP(priv, "Calibrated temperature: %dK, %dC\n", + temperature, KELVIN_TO_CELSIUS(temperature)); + + return temperature; +} + +/* Adjust Txpower only if temperature variance is greater than threshold. */ +#define IWL_TEMPERATURE_THRESHOLD 3 + +/** + * iwl4965_is_temp_calib_needed - determines if new calibration is needed + * + * If the temperature changed has changed sufficiently, then a recalibration + * is needed. + * + * Assumes caller will replace priv->last_temperature once calibration + * executed. + */ +static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv) +{ + int temp_diff; + + if (!test_bit(STATUS_STATISTICS, &priv->status)) { + IWL_DEBUG_TEMP(priv, "Temperature not updated -- no statistics.\n"); + return 0; + } + + temp_diff = priv->temperature - priv->last_temperature; + + /* get absolute value */ + if (temp_diff < 0) { + IWL_DEBUG_POWER(priv, "Getting cooler, delta %d, \n", temp_diff); + temp_diff = -temp_diff; + } else if (temp_diff == 0) + IWL_DEBUG_POWER(priv, "Same temp, \n"); + else + IWL_DEBUG_POWER(priv, "Getting warmer, delta %d, \n", temp_diff); + + if (temp_diff < IWL_TEMPERATURE_THRESHOLD) { + IWL_DEBUG_POWER(priv, "Thermal txpower calib not needed\n"); + return 0; + } + + IWL_DEBUG_POWER(priv, "Thermal txpower calib needed\n"); + + return 1; +} + +static void iwl4965_temperature_calib(struct iwl_priv *priv) +{ + s32 temp; + + temp = iwl4965_hw_get_temperature(priv); + if (temp < 0) + return; + + if (priv->temperature != temp) { + if (priv->temperature) + IWL_DEBUG_TEMP(priv, "Temperature changed " + "from %dC to %dC\n", + KELVIN_TO_CELSIUS(priv->temperature), + KELVIN_TO_CELSIUS(temp)); + else + IWL_DEBUG_TEMP(priv, "Temperature " + "initialized to %dC\n", + KELVIN_TO_CELSIUS(temp)); + } + + priv->temperature = temp; + iwl_tt_handler(priv); + set_bit(STATUS_TEMPERATURE, &priv->status); + + if (!priv->disable_tx_power_cal && + unlikely(!test_bit(STATUS_SCANNING, &priv->status)) && + iwl4965_is_temp_calib_needed(priv)) + queue_work(priv->workqueue, &priv->txpower_work); +} + +/** + * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration + */ +static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv, + u16 txq_id) +{ + /* Simply stop the queue, but don't change any configuration; + * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ + iwl_write_prph(priv, + IWL49_SCD_QUEUE_STATUS_BITS(txq_id), + (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)| + (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); +} + +/** + * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE + * priv->lock must be held by the caller + */ +static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id, + u16 ssn_idx, u8 tx_fifo) +{ + if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) || + (IWL49_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues + <= txq_id)) { + IWL_WARN(priv, + "queue number out of range: %d, must be %d to %d\n", + txq_id, IWL49_FIRST_AMPDU_QUEUE, + IWL49_FIRST_AMPDU_QUEUE + + priv->cfg->num_of_ampdu_queues - 1); + return -EINVAL; + } + + iwl4965_tx_queue_stop_scheduler(priv, txq_id); + + iwl_clear_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id)); + + priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); + priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); + /* supposes that ssn_idx is valid (!= 0xFFF) */ + iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx); + + iwl_clear_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id)); + iwl_txq_ctx_deactivate(priv, txq_id); + iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0); + + return 0; +} + +/** + * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue + */ +static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid, + u16 txq_id) +{ + u32 tbl_dw_addr; + u32 tbl_dw; + u16 scd_q2ratid; + + scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK; + + tbl_dw_addr = priv->scd_base_addr + + IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id); + + tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr); + + if (txq_id & 0x1) + tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); + else + tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); + + iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw); + + return 0; +} + + +/** + * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue + * + * NOTE: txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE, + * i.e. it must be one of the higher queues used for aggregation + */ +static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id, + int tx_fifo, int sta_id, int tid, u16 ssn_idx) +{ + unsigned long flags; + u16 ra_tid; + + if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) || + (IWL49_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues + <= txq_id)) { + IWL_WARN(priv, + "queue number out of range: %d, must be %d to %d\n", + txq_id, IWL49_FIRST_AMPDU_QUEUE, + IWL49_FIRST_AMPDU_QUEUE + + priv->cfg->num_of_ampdu_queues - 1); + return -EINVAL; + } + + ra_tid = BUILD_RAxTID(sta_id, tid); + + /* Modify device's station table to Tx this TID */ + iwl_sta_tx_modify_enable_tid(priv, sta_id, tid); + + spin_lock_irqsave(&priv->lock, flags); + + /* Stop this Tx queue before configuring it */ + iwl4965_tx_queue_stop_scheduler(priv, txq_id); + + /* Map receiver-address / traffic-ID to this queue */ + iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id); + + /* Set this queue as a chain-building queue */ + iwl_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id)); + + /* Place first TFD at index corresponding to start sequence number. + * Assumes that ssn_idx is valid (!= 0xFFF) */ + priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); + priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); + iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx); + + /* Set up Tx window size and frame limit for this queue */ + iwl_write_targ_mem(priv, + priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id), + (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & + IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); + + iwl_write_targ_mem(priv, priv->scd_base_addr + + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), + (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) + & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); + + iwl_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id)); + + /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ + iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1); + + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + + +static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len) +{ + switch (cmd_id) { + case REPLY_RXON: + return (u16) sizeof(struct iwl4965_rxon_cmd); + default: + return len; + } +} + +static u16 iwl4965_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data) +{ + struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data; + addsta->mode = cmd->mode; + memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify)); + memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo)); + addsta->station_flags = cmd->station_flags; + addsta->station_flags_msk = cmd->station_flags_msk; + addsta->tid_disable_tx = cmd->tid_disable_tx; + addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid; + addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid; + addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn; + addsta->sleep_tx_count = cmd->sleep_tx_count; + addsta->reserved1 = cpu_to_le16(0); + addsta->reserved2 = cpu_to_le16(0); + + return (u16)sizeof(struct iwl4965_addsta_cmd); +} + +static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp) +{ + return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN; +} + +/** + * iwl4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue + */ +static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv, + struct iwl_ht_agg *agg, + struct iwl4965_tx_resp *tx_resp, + int txq_id, u16 start_idx) +{ + u16 status; + struct agg_tx_status *frame_status = tx_resp->u.agg_status; + struct ieee80211_tx_info *info = NULL; + struct ieee80211_hdr *hdr = NULL; + u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags); + int i, sh, idx; + u16 seq; + if (agg->wait_for_ba) + IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n"); + + agg->frame_count = tx_resp->frame_count; + agg->start_idx = start_idx; + agg->rate_n_flags = rate_n_flags; + agg->bitmap = 0; + + /* num frames attempted by Tx command */ + if (agg->frame_count == 1) { + /* Only one frame was attempted; no block-ack will arrive */ + status = le16_to_cpu(frame_status[0].status); + idx = start_idx; + + /* FIXME: code repetition */ + IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n", + agg->frame_count, agg->start_idx, idx); + + info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]); + info->status.rates[0].count = tx_resp->failure_frame + 1; + info->flags &= ~IEEE80211_TX_CTL_AMPDU; + info->flags |= iwl_tx_status_to_mac80211(status); + iwl_hwrate_to_tx_control(priv, rate_n_flags, info); + /* FIXME: code repetition end */ + + IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n", + status & 0xff, tx_resp->failure_frame); + IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags); + + agg->wait_for_ba = 0; + } else { + /* Two or more frames were attempted; expect block-ack */ + u64 bitmap = 0; + int start = agg->start_idx; + + /* Construct bit-map of pending frames within Tx window */ + for (i = 0; i < agg->frame_count; i++) { + u16 sc; + status = le16_to_cpu(frame_status[i].status); + seq = le16_to_cpu(frame_status[i].sequence); + idx = SEQ_TO_INDEX(seq); + txq_id = SEQ_TO_QUEUE(seq); + + if (status & (AGG_TX_STATE_FEW_BYTES_MSK | + AGG_TX_STATE_ABORT_MSK)) + continue; + + IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n", + agg->frame_count, txq_id, idx); + + hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx); + if (!hdr) { + IWL_ERR(priv, + "BUG_ON idx doesn't point to valid skb" + " idx=%d, txq_id=%d\n", idx, txq_id); + return -1; + } + + sc = le16_to_cpu(hdr->seq_ctrl); + if (idx != (SEQ_TO_SN(sc) & 0xff)) { + IWL_ERR(priv, + "BUG_ON idx doesn't match seq control" + " idx=%d, seq_idx=%d, seq=%d\n", + idx, SEQ_TO_SN(sc), hdr->seq_ctrl); + return -1; + } + + IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n", + i, idx, SEQ_TO_SN(sc)); + + sh = idx - start; + if (sh > 64) { + sh = (start - idx) + 0xff; + bitmap = bitmap << sh; + sh = 0; + start = idx; + } else if (sh < -64) + sh = 0xff - (start - idx); + else if (sh < 0) { + sh = start - idx; + start = idx; + bitmap = bitmap << sh; + sh = 0; + } + bitmap |= 1ULL << sh; + IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n", + start, (unsigned long long)bitmap); + } + + agg->bitmap = bitmap; + agg->start_idx = start; + IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n", + agg->frame_count, agg->start_idx, + (unsigned long long)agg->bitmap); + + if (bitmap) + agg->wait_for_ba = 1; + } + return 0; +} + +/** + * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response + */ +static void iwl4965_rx_reply_tx(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + u16 sequence = le16_to_cpu(pkt->hdr.sequence); + int txq_id = SEQ_TO_QUEUE(sequence); + int index = SEQ_TO_INDEX(sequence); + struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct ieee80211_hdr *hdr; + struct ieee80211_tx_info *info; + struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; + u32 status = le32_to_cpu(tx_resp->u.status); + int uninitialized_var(tid); + int sta_id; + int freed; + u8 *qc = NULL; + + if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) { + IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d " + "is out of range [0-%d] %d %d\n", txq_id, + index, txq->q.n_bd, txq->q.write_ptr, + txq->q.read_ptr); + return; + } + + info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]); + memset(&info->status, 0, sizeof(info->status)); + + hdr = iwl_tx_queue_get_hdr(priv, txq_id, index); + if (ieee80211_is_data_qos(hdr->frame_control)) { + qc = ieee80211_get_qos_ctl(hdr); + tid = qc[0] & 0xf; + } + + sta_id = iwl_get_ra_sta_id(priv, hdr); + if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) { + IWL_ERR(priv, "Station not known\n"); + return; + } + + if (txq->sched_retry) { + const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp); + struct iwl_ht_agg *agg = NULL; + + WARN_ON(!qc); + + agg = &priv->stations[sta_id].tid[tid].agg; + + iwl4965_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index); + + /* check if BAR is needed */ + if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status)) + info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; + + if (txq->q.read_ptr != (scd_ssn & 0xff)) { + index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd); + IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn " + "%d index %d\n", scd_ssn , index); + freed = iwl_tx_queue_reclaim(priv, txq_id, index); + iwl_free_tfds_in_queue(priv, sta_id, tid, freed); + + if (priv->mac80211_registered && + (iwl_queue_space(&txq->q) > txq->q.low_mark) && + (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) { + if (agg->state == IWL_AGG_OFF) + iwl_wake_queue(priv, txq_id); + else + iwl_wake_queue(priv, txq->swq_id); + } + } + } else { + info->status.rates[0].count = tx_resp->failure_frame + 1; + info->flags |= iwl_tx_status_to_mac80211(status); + iwl_hwrate_to_tx_control(priv, + le32_to_cpu(tx_resp->rate_n_flags), + info); + + IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) " + "rate_n_flags 0x%x retries %d\n", + txq_id, + iwl_get_tx_fail_reason(status), status, + le32_to_cpu(tx_resp->rate_n_flags), + tx_resp->failure_frame); + + freed = iwl_tx_queue_reclaim(priv, txq_id, index); + if (qc && likely(sta_id != IWL_INVALID_STATION)) + priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; + + if (priv->mac80211_registered && + (iwl_queue_space(&txq->q) > txq->q.low_mark)) + iwl_wake_queue(priv, txq_id); + } + + if (qc && likely(sta_id != IWL_INVALID_STATION)) + iwl_txq_check_empty(priv, sta_id, tid, txq_id); + + if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) + IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n"); +} + +static int iwl4965_calc_rssi(struct iwl_priv *priv, + struct iwl_rx_phy_res *rx_resp) +{ + /* data from PHY/DSP regarding signal strength, etc., + * contents are always there, not configurable by host. */ + struct iwl4965_rx_non_cfg_phy *ncphy = + (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf; + u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK) + >> IWL49_AGC_DB_POS; + + u32 valid_antennae = + (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK) + >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET; + u8 max_rssi = 0; + u32 i; + + /* Find max rssi among 3 possible receivers. + * These values are measured by the digital signal processor (DSP). + * They should stay fairly constant even as the signal strength varies, + * if the radio's automatic gain control (AGC) is working right. + * AGC value (see below) will provide the "interesting" info. */ + for (i = 0; i < 3; i++) + if (valid_antennae & (1 << i)) + max_rssi = max(ncphy->rssi_info[i << 1], max_rssi); + + IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n", + ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4], + max_rssi, agc); + + /* dBm = max_rssi dB - agc dB - constant. + * Higher AGC (higher radio gain) means lower signal. */ + return max_rssi - agc - IWL49_RSSI_OFFSET; +} + + +/* Set up 4965-specific Rx frame reply handlers */ +static void iwl4965_rx_handler_setup(struct iwl_priv *priv) +{ + /* Legacy Rx frames */ + priv->rx_handlers[REPLY_RX] = iwl_rx_reply_rx; + /* Tx response */ + priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx; +} + +static void iwl4965_setup_deferred_work(struct iwl_priv *priv) +{ + INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work); +} + +static void iwl4965_cancel_deferred_work(struct iwl_priv *priv) +{ + cancel_work_sync(&priv->txpower_work); +} + +#define IWL4965_UCODE_GET(item) \ +static u32 iwl4965_ucode_get_##item(const struct iwl_ucode_header *ucode,\ + u32 api_ver) \ +{ \ + return le32_to_cpu(ucode->u.v1.item); \ +} + +static u32 iwl4965_ucode_get_header_size(u32 api_ver) +{ + return UCODE_HEADER_SIZE(1); +} +static u32 iwl4965_ucode_get_build(const struct iwl_ucode_header *ucode, + u32 api_ver) +{ + return 0; +} +static u8 *iwl4965_ucode_get_data(const struct iwl_ucode_header *ucode, + u32 api_ver) +{ + return (u8 *) ucode->u.v1.data; +} + +IWL4965_UCODE_GET(inst_size); +IWL4965_UCODE_GET(data_size); +IWL4965_UCODE_GET(init_size); +IWL4965_UCODE_GET(init_data_size); +IWL4965_UCODE_GET(boot_size); + +static struct iwl_hcmd_ops iwl4965_hcmd = { + .rxon_assoc = iwl4965_send_rxon_assoc, + .commit_rxon = iwl_commit_rxon, + .set_rxon_chain = iwl_set_rxon_chain, +}; + +static struct iwl_ucode_ops iwl4965_ucode = { + .get_header_size = iwl4965_ucode_get_header_size, + .get_build = iwl4965_ucode_get_build, + .get_inst_size = iwl4965_ucode_get_inst_size, + .get_data_size = iwl4965_ucode_get_data_size, + .get_init_size = iwl4965_ucode_get_init_size, + .get_init_data_size = iwl4965_ucode_get_init_data_size, + .get_boot_size = iwl4965_ucode_get_boot_size, + .get_data = iwl4965_ucode_get_data, +}; +static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = { + .get_hcmd_size = iwl4965_get_hcmd_size, + .build_addsta_hcmd = iwl4965_build_addsta_hcmd, + .chain_noise_reset = iwl4965_chain_noise_reset, + .gain_computation = iwl4965_gain_computation, + .rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag, + .calc_rssi = iwl4965_calc_rssi, +}; + +static struct iwl_lib_ops iwl4965_lib = { + .set_hw_params = iwl4965_hw_set_hw_params, + .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl, + .txq_set_sched = iwl4965_txq_set_sched, + .txq_agg_enable = iwl4965_txq_agg_enable, + .txq_agg_disable = iwl4965_txq_agg_disable, + .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, + .txq_free_tfd = iwl_hw_txq_free_tfd, + .txq_init = iwl_hw_tx_queue_init, + .rx_handler_setup = iwl4965_rx_handler_setup, + .setup_deferred_work = iwl4965_setup_deferred_work, + .cancel_deferred_work = iwl4965_cancel_deferred_work, + .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr, + .alive_notify = iwl4965_alive_notify, + .init_alive_start = iwl4965_init_alive_start, + .load_ucode = iwl4965_load_bsm, + .dump_nic_event_log = iwl_dump_nic_event_log, + .dump_nic_error_log = iwl_dump_nic_error_log, + .set_channel_switch = iwl4965_hw_channel_switch, + .apm_ops = { + .init = iwl_apm_init, + .stop = iwl_apm_stop, + .config = iwl4965_nic_config, + .set_pwr_src = iwl_set_pwr_src, + }, + .eeprom_ops = { + .regulatory_bands = { + EEPROM_REGULATORY_BAND_1_CHANNELS, + EEPROM_REGULATORY_BAND_2_CHANNELS, + EEPROM_REGULATORY_BAND_3_CHANNELS, + EEPROM_REGULATORY_BAND_4_CHANNELS, + EEPROM_REGULATORY_BAND_5_CHANNELS, + EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS, + EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS + }, + .verify_signature = iwlcore_eeprom_verify_signature, + .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, + .release_semaphore = iwlcore_eeprom_release_semaphore, + .calib_version = iwl4965_eeprom_calib_version, + .query_addr = iwlcore_eeprom_query_addr, + }, + .send_tx_power = iwl4965_send_tx_power, + .update_chain_flags = iwl_update_chain_flags, + .post_associate = iwl_post_associate, + .config_ap = iwl_config_ap, + .isr = iwl_isr_legacy, + .temp_ops = { + .temperature = iwl4965_temperature_calib, + .set_ct_kill = iwl4965_set_ct_threshold, + }, + .add_bcast_station = iwl_add_bcast_station, +}; + +static const struct iwl_ops iwl4965_ops = { + .ucode = &iwl4965_ucode, + .lib = &iwl4965_lib, + .hcmd = &iwl4965_hcmd, + .utils = &iwl4965_hcmd_utils, + .led = &iwlagn_led_ops, +}; + +struct iwl_cfg iwl4965_agn_cfg = { + .name = "4965AGN", + .fw_name_pre = IWL4965_FW_PRE, + .ucode_api_max = IWL4965_UCODE_API_MAX, + .ucode_api_min = IWL4965_UCODE_API_MIN, + .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, + .eeprom_size = IWL4965_EEPROM_IMG_SIZE, + .eeprom_ver = EEPROM_4965_EEPROM_VERSION, + .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION, + .ops = &iwl4965_ops, + .num_of_queues = IWL49_NUM_QUEUES, + .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES, + .mod_params = &iwl4965_mod_params, + .valid_tx_ant = ANT_AB, + .valid_rx_ant = ANT_ABC, + .pll_cfg_val = 0, + .set_l0s = true, + .use_bsm = true, + .use_isr_legacy = true, + .ht_greenfield_support = false, + .broken_powersave = true, + .led_compensation = 61, + .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, +}; + +/* Module firmware */ +MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX)); + +module_param_named(antenna, iwl4965_mod_params.antenna, int, S_IRUGO); +MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); +module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, S_IRUGO); +MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])"); +module_param_named( + disable_hw_scan, iwl4965_mod_params.disable_hw_scan, int, S_IRUGO); +MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)"); + +module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, S_IRUGO); +MODULE_PARM_DESC(queues_num, "number of hw queues."); +/* 11n */ +module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, S_IRUGO); +MODULE_PARM_DESC(11n_disable, "disable 11n functionality"); +module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K, + int, S_IRUGO); +MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size"); + +module_param_named(fw_restart4965, iwl4965_mod_params.restart_fw, int, S_IRUGO); +MODULE_PARM_DESC(fw_restart4965, "restart firmware in case of error"); diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h new file mode 100644 index 0000000..714e032 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h @@ -0,0 +1,121 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +/* + * Please use this file (iwl-5000-hw.h) only for hardware-related definitions. + * Use iwl-5000-commands.h for uCode API definitions. + */ + +#ifndef __iwl_5000_hw_h__ +#define __iwl_5000_hw_h__ + +#define IWL50_RTC_INST_LOWER_BOUND (0x000000) +#define IWL50_RTC_INST_UPPER_BOUND (0x020000) + +#define IWL50_RTC_DATA_LOWER_BOUND (0x800000) +#define IWL50_RTC_DATA_UPPER_BOUND (0x80C000) + +#define IWL50_RTC_INST_SIZE (IWL50_RTC_INST_UPPER_BOUND - \ + IWL50_RTC_INST_LOWER_BOUND) +#define IWL50_RTC_DATA_SIZE (IWL50_RTC_DATA_UPPER_BOUND - \ + IWL50_RTC_DATA_LOWER_BOUND) + +/* EEPROM */ +#define IWL_5000_EEPROM_IMG_SIZE 2048 + +#define IWL50_CMD_FIFO_NUM 7 +#define IWL50_NUM_QUEUES 20 +#define IWL50_NUM_AMPDU_QUEUES 10 +#define IWL50_FIRST_AMPDU_QUEUE 10 + +/* 5150 only */ +#define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF (-5) + +static inline s32 iwl_temp_calib_to_offset(struct iwl_priv *priv) +{ + u16 temperature, voltage; + __le16 *temp_calib = + (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_TEMPERATURE); + + temperature = le16_to_cpu(temp_calib[0]); + voltage = le16_to_cpu(temp_calib[1]); + + /* offset = temp - volt / coeff */ + return (s32)(temperature - voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF); +} + +/* Fixed (non-configurable) rx data from phy */ + +/** + * struct iwl5000_schedq_bc_tbl scheduler byte count table + * base physical address of iwl5000_shared + * is provided to SCD_DRAM_BASE_ADDR + * @tfd_offset 0-12 - tx command byte count + * 12-16 - station index + */ +struct iwl5000_scd_bc_tbl { + __le16 tfd_offset[TFD_QUEUE_BC_SIZE]; +} __attribute__ ((packed)); + + +#endif /* __iwl_5000_hw_h__ */ + diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c new file mode 100644 index 0000000..e476acb --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c @@ -0,0 +1,1774 @@ +/****************************************************************************** + * + * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "iwl-eeprom.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-io.h" +#include "iwl-sta.h" +#include "iwl-helpers.h" +#include "iwl-agn-led.h" +#include "iwl-5000-hw.h" +#include "iwl-6000-hw.h" + +/* Highest firmware API version supported */ +#define IWL5000_UCODE_API_MAX 2 +#define IWL5150_UCODE_API_MAX 2 + +/* Lowest firmware API version supported */ +#define IWL5000_UCODE_API_MIN 1 +#define IWL5150_UCODE_API_MIN 1 + +#define IWL5000_FW_PRE "iwlwifi-5000-" +#define _IWL5000_MODULE_FIRMWARE(api) IWL5000_FW_PRE #api ".ucode" +#define IWL5000_MODULE_FIRMWARE(api) _IWL5000_MODULE_FIRMWARE(api) + +#define IWL5150_FW_PRE "iwlwifi-5150-" +#define _IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE #api ".ucode" +#define IWL5150_MODULE_FIRMWARE(api) _IWL5150_MODULE_FIRMWARE(api) + +static const u16 iwl5000_default_queue_to_tx_fifo[] = { + IWL_TX_FIFO_AC3, + IWL_TX_FIFO_AC2, + IWL_TX_FIFO_AC1, + IWL_TX_FIFO_AC0, + IWL50_CMD_FIFO_NUM, + IWL_TX_FIFO_HCCA_1, + IWL_TX_FIFO_HCCA_2 +}; + +/* NIC configuration for 5000 series */ +void iwl5000_nic_config(struct iwl_priv *priv) +{ + unsigned long flags; + u16 radio_cfg; + + spin_lock_irqsave(&priv->lock, flags); + + radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); + + /* write radio config values to register */ + if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) < EEPROM_RF_CONFIG_TYPE_MAX) + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + EEPROM_RF_CFG_TYPE_MSK(radio_cfg) | + EEPROM_RF_CFG_STEP_MSK(radio_cfg) | + EEPROM_RF_CFG_DASH_MSK(radio_cfg)); + + /* set CSR_HW_CONFIG_REG for uCode use */ + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | + CSR_HW_IF_CONFIG_REG_BIT_MAC_SI); + + /* W/A : NIC is stuck in a reset state after Early PCIe power off + * (PCIe power is lost before PERST# is asserted), + * causing ME FW to lose ownership and not being able to obtain it back. + */ + iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS, + ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS); + + + spin_unlock_irqrestore(&priv->lock, flags); +} + + +/* + * EEPROM + */ +static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address) +{ + u16 offset = 0; + + if ((address & INDIRECT_ADDRESS) == 0) + return address; + + switch (address & INDIRECT_TYPE_MSK) { + case INDIRECT_HOST: + offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_HOST); + break; + case INDIRECT_GENERAL: + offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_GENERAL); + break; + case INDIRECT_REGULATORY: + offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_REGULATORY); + break; + case INDIRECT_CALIBRATION: + offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_CALIBRATION); + break; + case INDIRECT_PROCESS_ADJST: + offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_PROCESS_ADJST); + break; + case INDIRECT_OTHERS: + offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_OTHERS); + break; + default: + IWL_ERR(priv, "illegal indirect type: 0x%X\n", + address & INDIRECT_TYPE_MSK); + break; + } + + /* translate the offset from words to byte */ + return (address & ADDRESS_MSK) + (offset << 1); +} + +u16 iwl5000_eeprom_calib_version(struct iwl_priv *priv) +{ + struct iwl_eeprom_calib_hdr { + u8 version; + u8 pa_type; + u16 voltage; + } *hdr; + + hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv, + EEPROM_5000_CALIB_ALL); + return hdr->version; + +} + +static void iwl5000_gain_computation(struct iwl_priv *priv, + u32 average_noise[NUM_RX_CHAINS], + u16 min_average_noise_antenna_i, + u32 min_average_noise, + u8 default_chain) +{ + int i; + s32 delta_g; + struct iwl_chain_noise_data *data = &priv->chain_noise_data; + + /* + * Find Gain Code for the chains based on "default chain" + */ + for (i = default_chain + 1; i < NUM_RX_CHAINS; i++) { + if ((data->disconn_array[i])) { + data->delta_gain_code[i] = 0; + continue; + } + + delta_g = (priv->cfg->chain_noise_scale * + ((s32)average_noise[default_chain] - + (s32)average_noise[i])) / 1500; + + /* bound gain by 2 bits value max, 3rd bit is sign */ + data->delta_gain_code[i] = + min(abs(delta_g), (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE); + + if (delta_g < 0) + /* + * set negative sign ... + * note to Intel developers: This is uCode API format, + * not the format of any internal device registers. + * Do not change this format for e.g. 6050 or similar + * devices. Change format only if more resolution + * (i.e. more than 2 bits magnitude) is needed. + */ + data->delta_gain_code[i] |= (1 << 2); + } + + IWL_DEBUG_CALIB(priv, "Delta gains: ANT_B = %d ANT_C = %d\n", + data->delta_gain_code[1], data->delta_gain_code[2]); + + if (!data->radio_write) { + struct iwl_calib_chain_noise_gain_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + + cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD; + cmd.hdr.first_group = 0; + cmd.hdr.groups_num = 1; + cmd.hdr.data_valid = 1; + cmd.delta_gain_1 = data->delta_gain_code[1]; + cmd.delta_gain_2 = data->delta_gain_code[2]; + iwl_send_cmd_pdu_async(priv, REPLY_PHY_CALIBRATION_CMD, + sizeof(cmd), &cmd, NULL); + + data->radio_write = 1; + data->state = IWL_CHAIN_NOISE_CALIBRATED; + } + + data->chain_noise_a = 0; + data->chain_noise_b = 0; + data->chain_noise_c = 0; + data->chain_signal_a = 0; + data->chain_signal_b = 0; + data->chain_signal_c = 0; + data->beacon_count = 0; +} + +static void iwl5000_chain_noise_reset(struct iwl_priv *priv) +{ + struct iwl_chain_noise_data *data = &priv->chain_noise_data; + int ret; + + if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) { + struct iwl_calib_chain_noise_reset_cmd cmd; + memset(&cmd, 0, sizeof(cmd)); + + cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD; + cmd.hdr.first_group = 0; + cmd.hdr.groups_num = 1; + cmd.hdr.data_valid = 1; + ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, + sizeof(cmd), &cmd); + if (ret) + IWL_ERR(priv, + "Could not send REPLY_PHY_CALIBRATION_CMD\n"); + data->state = IWL_CHAIN_NOISE_ACCUMULATE; + IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n"); + } +} + +void iwl5000_rts_tx_cmd_flag(struct ieee80211_tx_info *info, + __le32 *tx_flags) +{ + if ((info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) || + (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)) + *tx_flags |= TX_CMD_FLG_RTS_CTS_MSK; + else + *tx_flags &= ~TX_CMD_FLG_RTS_CTS_MSK; +} + +static struct iwl_sensitivity_ranges iwl5000_sensitivity = { + .min_nrg_cck = 95, + .max_nrg_cck = 0, /* not used, set to 0 */ + .auto_corr_min_ofdm = 90, + .auto_corr_min_ofdm_mrc = 170, + .auto_corr_min_ofdm_x1 = 120, + .auto_corr_min_ofdm_mrc_x1 = 240, + + .auto_corr_max_ofdm = 120, + .auto_corr_max_ofdm_mrc = 210, + .auto_corr_max_ofdm_x1 = 120, + .auto_corr_max_ofdm_mrc_x1 = 240, + + .auto_corr_min_cck = 125, + .auto_corr_max_cck = 200, + .auto_corr_min_cck_mrc = 170, + .auto_corr_max_cck_mrc = 400, + .nrg_th_cck = 95, + .nrg_th_ofdm = 95, + + .barker_corr_th_min = 190, + .barker_corr_th_min_mrc = 390, + .nrg_th_cca = 62, +}; + +static struct iwl_sensitivity_ranges iwl5150_sensitivity = { + .min_nrg_cck = 95, + .max_nrg_cck = 0, /* not used, set to 0 */ + .auto_corr_min_ofdm = 90, + .auto_corr_min_ofdm_mrc = 170, + .auto_corr_min_ofdm_x1 = 105, + .auto_corr_min_ofdm_mrc_x1 = 220, + + .auto_corr_max_ofdm = 120, + .auto_corr_max_ofdm_mrc = 210, + /* max = min for performance bug in 5150 DSP */ + .auto_corr_max_ofdm_x1 = 105, + .auto_corr_max_ofdm_mrc_x1 = 220, + + .auto_corr_min_cck = 125, + .auto_corr_max_cck = 200, + .auto_corr_min_cck_mrc = 170, + .auto_corr_max_cck_mrc = 400, + .nrg_th_cck = 95, + .nrg_th_ofdm = 95, + + .barker_corr_th_min = 190, + .barker_corr_th_min_mrc = 390, + .nrg_th_cca = 62, +}; + +const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv, + size_t offset) +{ + u32 address = eeprom_indirect_address(priv, offset); + BUG_ON(address >= priv->cfg->eeprom_size); + return &priv->eeprom[address]; +} + +static void iwl5150_set_ct_threshold(struct iwl_priv *priv) +{ + const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF; + s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY) - + iwl_temp_calib_to_offset(priv); + + priv->hw_params.ct_kill_threshold = threshold * volt2temp_coef; +} + +static void iwl5000_set_ct_threshold(struct iwl_priv *priv) +{ + /* want Celsius */ + priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY; +} + +/* + * Calibration + */ +static int iwl5000_set_Xtal_calib(struct iwl_priv *priv) +{ + struct iwl_calib_xtal_freq_cmd cmd; + __le16 *xtal_calib = + (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL); + + cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD; + cmd.hdr.first_group = 0; + cmd.hdr.groups_num = 1; + cmd.hdr.data_valid = 1; + cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]); + cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]); + return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL], + (u8 *)&cmd, sizeof(cmd)); +} + +static int iwl5000_send_calib_cfg(struct iwl_priv *priv) +{ + struct iwl_calib_cfg_cmd calib_cfg_cmd; + struct iwl_host_cmd cmd = { + .id = CALIBRATION_CFG_CMD, + .len = sizeof(struct iwl_calib_cfg_cmd), + .data = &calib_cfg_cmd, + }; + + memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd)); + calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL; + calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL; + calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL; + calib_cfg_cmd.ucd_calib_cfg.flags = IWL_CALIB_INIT_CFG_ALL; + + return iwl_send_cmd(priv, &cmd); +} + +static void iwl5000_rx_calib_result(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw; + int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; + int index; + + /* reduce the size of the length field itself */ + len -= 4; + + /* Define the order in which the results will be sent to the runtime + * uCode. iwl_send_calib_results sends them in a row according to their + * index. We sort them here */ + switch (hdr->op_code) { + case IWL_PHY_CALIBRATE_DC_CMD: + index = IWL_CALIB_DC; + break; + case IWL_PHY_CALIBRATE_LO_CMD: + index = IWL_CALIB_LO; + break; + case IWL_PHY_CALIBRATE_TX_IQ_CMD: + index = IWL_CALIB_TX_IQ; + break; + case IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD: + index = IWL_CALIB_TX_IQ_PERD; + break; + case IWL_PHY_CALIBRATE_BASE_BAND_CMD: + index = IWL_CALIB_BASE_BAND; + break; + default: + IWL_ERR(priv, "Unknown calibration notification %d\n", + hdr->op_code); + return; + } + iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len); +} + +static void iwl5000_rx_calib_complete(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + IWL_DEBUG_INFO(priv, "Init. calibration is completed, restarting fw.\n"); + queue_work(priv->workqueue, &priv->restart); +} + +/* + * ucode + */ +static int iwl5000_load_section(struct iwl_priv *priv, const char *name, + struct fw_desc *image, u32 dst_addr) +{ + dma_addr_t phy_addr = image->p_addr; + u32 byte_cnt = image->len; + int ret; + + priv->ucode_write_complete = 0; + + iwl_write_direct32(priv, + FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), + FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); + + iwl_write_direct32(priv, + FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr); + + iwl_write_direct32(priv, + FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL), + phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); + + iwl_write_direct32(priv, + FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), + (iwl_get_dma_hi_addr(phy_addr) + << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); + + iwl_write_direct32(priv, + FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL), + 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM | + 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX | + FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); + + iwl_write_direct32(priv, + FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), + FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | + FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | + FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); + + IWL_DEBUG_INFO(priv, "%s uCode section being loaded...\n", name); + ret = wait_event_interruptible_timeout(priv->wait_command_queue, + priv->ucode_write_complete, 5 * HZ); + if (ret == -ERESTARTSYS) { + IWL_ERR(priv, "Could not load the %s uCode section due " + "to interrupt\n", name); + return ret; + } + if (!ret) { + IWL_ERR(priv, "Could not load the %s uCode section\n", + name); + return -ETIMEDOUT; + } + + return 0; +} + +static int iwl5000_load_given_ucode(struct iwl_priv *priv, + struct fw_desc *inst_image, + struct fw_desc *data_image) +{ + int ret = 0; + + ret = iwl5000_load_section(priv, "INST", inst_image, + IWL50_RTC_INST_LOWER_BOUND); + if (ret) + return ret; + + return iwl5000_load_section(priv, "DATA", data_image, + IWL50_RTC_DATA_LOWER_BOUND); +} + +int iwl5000_load_ucode(struct iwl_priv *priv) +{ + int ret = 0; + + /* check whether init ucode should be loaded, or rather runtime ucode */ + if (priv->ucode_init.len && (priv->ucode_type == UCODE_NONE)) { + IWL_DEBUG_INFO(priv, "Init ucode found. Loading init ucode...\n"); + ret = iwl5000_load_given_ucode(priv, + &priv->ucode_init, &priv->ucode_init_data); + if (!ret) { + IWL_DEBUG_INFO(priv, "Init ucode load complete.\n"); + priv->ucode_type = UCODE_INIT; + } + } else { + IWL_DEBUG_INFO(priv, "Init ucode not found, or already loaded. " + "Loading runtime ucode...\n"); + ret = iwl5000_load_given_ucode(priv, + &priv->ucode_code, &priv->ucode_data); + if (!ret) { + IWL_DEBUG_INFO(priv, "Runtime ucode load complete.\n"); + priv->ucode_type = UCODE_RT; + } + } + + return ret; +} + +void iwl5000_init_alive_start(struct iwl_priv *priv) +{ + int ret = 0; + + /* Check alive response for "valid" sign from uCode */ + if (priv->card_alive_init.is_valid != UCODE_VALID_OK) { + /* We had an error bringing up the hardware, so take it + * all the way back down so we can try again */ + IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n"); + goto restart; + } + + /* initialize uCode was loaded... verify inst image. + * This is a paranoid check, because we would not have gotten the + * "initialize" alive if code weren't properly loaded. */ + if (iwl_verify_ucode(priv)) { + /* Runtime instruction load was bad; + * take it all the way back down so we can try again */ + IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n"); + goto restart; + } + + iwl_clear_stations_table(priv); + ret = priv->cfg->ops->lib->alive_notify(priv); + if (ret) { + IWL_WARN(priv, + "Could not complete ALIVE transition: %d\n", ret); + goto restart; + } + + iwl5000_send_calib_cfg(priv); + return; + +restart: + /* real restart (first load init_ucode) */ + queue_work(priv->workqueue, &priv->restart); +} + +static void iwl5000_set_wr_ptrs(struct iwl_priv *priv, + int txq_id, u32 index) +{ + iwl_write_direct32(priv, HBUS_TARG_WRPTR, + (index & 0xff) | (txq_id << 8)); + iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(txq_id), index); +} + +static void iwl5000_tx_queue_set_status(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + int tx_fifo_id, int scd_retry) +{ + int txq_id = txq->q.id; + int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0; + + iwl_write_prph(priv, IWL50_SCD_QUEUE_STATUS_BITS(txq_id), + (active << IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE) | + (tx_fifo_id << IWL50_SCD_QUEUE_STTS_REG_POS_TXF) | + (1 << IWL50_SCD_QUEUE_STTS_REG_POS_WSL) | + IWL50_SCD_QUEUE_STTS_REG_MSK); + + txq->sched_retry = scd_retry; + + IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n", + active ? "Activate" : "Deactivate", + scd_retry ? "BA" : "AC", txq_id, tx_fifo_id); +} + +int iwl5000_alive_notify(struct iwl_priv *priv) +{ + u32 a; + unsigned long flags; + int i, chan; + u32 reg_val; + + spin_lock_irqsave(&priv->lock, flags); + + priv->scd_base_addr = iwl_read_prph(priv, IWL50_SCD_SRAM_BASE_ADDR); + a = priv->scd_base_addr + IWL50_SCD_CONTEXT_DATA_OFFSET; + for (; a < priv->scd_base_addr + IWL50_SCD_TX_STTS_BITMAP_OFFSET; + a += 4) + iwl_write_targ_mem(priv, a, 0); + for (; a < priv->scd_base_addr + IWL50_SCD_TRANSLATE_TBL_OFFSET; + a += 4) + iwl_write_targ_mem(priv, a, 0); + for (; a < priv->scd_base_addr + + IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4) + iwl_write_targ_mem(priv, a, 0); + + iwl_write_prph(priv, IWL50_SCD_DRAM_BASE_ADDR, + priv->scd_bc_tbls.dma >> 10); + + /* Enable DMA channel */ + for (chan = 0; chan < FH50_TCSR_CHNL_NUM ; chan++) + iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan), + FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | + FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); + + /* Update FH chicken bits */ + reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG); + iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG, + reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); + + iwl_write_prph(priv, IWL50_SCD_QUEUECHAIN_SEL, + IWL50_SCD_QUEUECHAIN_SEL_ALL(priv->hw_params.max_txq_num)); + iwl_write_prph(priv, IWL50_SCD_AGGR_SEL, 0); + + /* initiate the queues */ + for (i = 0; i < priv->hw_params.max_txq_num; i++) { + iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(i), 0); + iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8)); + iwl_write_targ_mem(priv, priv->scd_base_addr + + IWL50_SCD_CONTEXT_QUEUE_OFFSET(i), 0); + iwl_write_targ_mem(priv, priv->scd_base_addr + + IWL50_SCD_CONTEXT_QUEUE_OFFSET(i) + + sizeof(u32), + ((SCD_WIN_SIZE << + IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & + IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | + ((SCD_FRAME_LIMIT << + IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & + IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); + } + + iwl_write_prph(priv, IWL50_SCD_INTERRUPT_MASK, + IWL_MASK(0, priv->hw_params.max_txq_num)); + + /* Activate all Tx DMA/FIFO channels */ + priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7)); + + iwl5000_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0); + + /* make sure all queue are not stopped */ + memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped)); + for (i = 0; i < 4; i++) + atomic_set(&priv->queue_stop_count[i], 0); + + /* reset to 0 to enable all the queue first */ + priv->txq_ctx_active_msk = 0; + /* map qos queues to fifos one-to-one */ + for (i = 0; i < ARRAY_SIZE(iwl5000_default_queue_to_tx_fifo); i++) { + int ac = iwl5000_default_queue_to_tx_fifo[i]; + iwl_txq_ctx_activate(priv, i); + iwl5000_tx_queue_set_status(priv, &priv->txq[i], ac, 0); + } + + /* + * TODO - need to initialize these queues and map them to FIFOs + * in the loop above, not only mark them as active. We do this + * because we want the first aggregation queue to be queue #10, + * but do not use 8 or 9 otherwise yet. + */ + iwl_txq_ctx_activate(priv, 7); + iwl_txq_ctx_activate(priv, 8); + iwl_txq_ctx_activate(priv, 9); + + spin_unlock_irqrestore(&priv->lock, flags); + + + iwl_send_wimax_coex(priv); + + iwl5000_set_Xtal_calib(priv); + iwl_send_calib_results(priv); + + return 0; +} + +int iwl5000_hw_set_hw_params(struct iwl_priv *priv) +{ + if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES && + priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES) + priv->cfg->num_of_queues = + priv->cfg->mod_params->num_of_queues; + + priv->hw_params.max_txq_num = priv->cfg->num_of_queues; + priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; + priv->hw_params.scd_bc_tbls_size = + priv->cfg->num_of_queues * + sizeof(struct iwl5000_scd_bc_tbl); + priv->hw_params.tfd_size = sizeof(struct iwl_tfd); + priv->hw_params.max_stations = IWL5000_STATION_COUNT; + priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID; + + priv->hw_params.max_data_size = IWL50_RTC_DATA_SIZE; + priv->hw_params.max_inst_size = IWL50_RTC_INST_SIZE; + + priv->hw_params.max_bsm_size = 0; + priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) | + BIT(IEEE80211_BAND_5GHZ); + priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR; + + priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); + priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant); + priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant; + priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant; + + if (priv->cfg->ops->lib->temp_ops.set_ct_kill) + priv->cfg->ops->lib->temp_ops.set_ct_kill(priv); + + /* Set initial sensitivity parameters */ + /* Set initial calibration set */ + switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) { + case CSR_HW_REV_TYPE_5150: + priv->hw_params.sens = &iwl5150_sensitivity; + priv->hw_params.calib_init_cfg = + BIT(IWL_CALIB_DC) | + BIT(IWL_CALIB_LO) | + BIT(IWL_CALIB_TX_IQ) | + BIT(IWL_CALIB_BASE_BAND); + + break; + default: + priv->hw_params.sens = &iwl5000_sensitivity; + priv->hw_params.calib_init_cfg = + BIT(IWL_CALIB_XTAL) | + BIT(IWL_CALIB_LO) | + BIT(IWL_CALIB_TX_IQ) | + BIT(IWL_CALIB_TX_IQ_PERD) | + BIT(IWL_CALIB_BASE_BAND); + break; + } + + return 0; +} + +/** + * iwl5000_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array + */ +void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + u16 byte_cnt) +{ + struct iwl5000_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; + int write_ptr = txq->q.write_ptr; + int txq_id = txq->q.id; + u8 sec_ctl = 0; + u8 sta_id = 0; + u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; + __le16 bc_ent; + + WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX); + + if (txq_id != IWL_CMD_QUEUE_NUM) { + sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id; + sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl; + + switch (sec_ctl & TX_CMD_SEC_MSK) { + case TX_CMD_SEC_CCM: + len += CCMP_MIC_LEN; + break; + case TX_CMD_SEC_TKIP: + len += TKIP_ICV_LEN; + break; + case TX_CMD_SEC_WEP: + len += WEP_IV_LEN + WEP_ICV_LEN; + break; + } + } + + bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12)); + + scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; + + if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) + scd_bc_tbl[txq_id]. + tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; +} + +void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv, + struct iwl_tx_queue *txq) +{ + struct iwl5000_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; + int txq_id = txq->q.id; + int read_ptr = txq->q.read_ptr; + u8 sta_id = 0; + __le16 bc_ent; + + WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); + + if (txq_id != IWL_CMD_QUEUE_NUM) + sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id; + + bc_ent = cpu_to_le16(1 | (sta_id << 12)); + scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; + + if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) + scd_bc_tbl[txq_id]. + tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; +} + +static int iwl5000_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid, + u16 txq_id) +{ + u32 tbl_dw_addr; + u32 tbl_dw; + u16 scd_q2ratid; + + scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK; + + tbl_dw_addr = priv->scd_base_addr + + IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id); + + tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr); + + if (txq_id & 0x1) + tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); + else + tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); + + iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw); + + return 0; +} +static void iwl5000_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id) +{ + /* Simply stop the queue, but don't change any configuration; + * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ + iwl_write_prph(priv, + IWL50_SCD_QUEUE_STATUS_BITS(txq_id), + (0 << IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE)| + (1 << IWL50_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); +} + +int iwl5000_txq_agg_enable(struct iwl_priv *priv, int txq_id, + int tx_fifo, int sta_id, int tid, u16 ssn_idx) +{ + unsigned long flags; + u16 ra_tid; + + if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) || + (IWL50_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues + <= txq_id)) { + IWL_WARN(priv, + "queue number out of range: %d, must be %d to %d\n", + txq_id, IWL50_FIRST_AMPDU_QUEUE, + IWL50_FIRST_AMPDU_QUEUE + + priv->cfg->num_of_ampdu_queues - 1); + return -EINVAL; + } + + ra_tid = BUILD_RAxTID(sta_id, tid); + + /* Modify device's station table to Tx this TID */ + iwl_sta_tx_modify_enable_tid(priv, sta_id, tid); + + spin_lock_irqsave(&priv->lock, flags); + + /* Stop this Tx queue before configuring it */ + iwl5000_tx_queue_stop_scheduler(priv, txq_id); + + /* Map receiver-address / traffic-ID to this queue */ + iwl5000_tx_queue_set_q2ratid(priv, ra_tid, txq_id); + + /* Set this queue as a chain-building queue */ + iwl_set_bits_prph(priv, IWL50_SCD_QUEUECHAIN_SEL, (1<txq[txq_id].q.read_ptr = (ssn_idx & 0xff); + priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); + iwl5000_set_wr_ptrs(priv, txq_id, ssn_idx); + + /* Set up Tx window size and frame limit for this queue */ + iwl_write_targ_mem(priv, priv->scd_base_addr + + IWL50_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + + sizeof(u32), + ((SCD_WIN_SIZE << + IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & + IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | + ((SCD_FRAME_LIMIT << + IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & + IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); + + iwl_set_bits_prph(priv, IWL50_SCD_INTERRUPT_MASK, (1 << txq_id)); + + /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ + iwl5000_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1); + + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +int iwl5000_txq_agg_disable(struct iwl_priv *priv, u16 txq_id, + u16 ssn_idx, u8 tx_fifo) +{ + if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) || + (IWL50_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues + <= txq_id)) { + IWL_ERR(priv, + "queue number out of range: %d, must be %d to %d\n", + txq_id, IWL50_FIRST_AMPDU_QUEUE, + IWL50_FIRST_AMPDU_QUEUE + + priv->cfg->num_of_ampdu_queues - 1); + return -EINVAL; + } + + iwl5000_tx_queue_stop_scheduler(priv, txq_id); + + iwl_clear_bits_prph(priv, IWL50_SCD_AGGR_SEL, (1 << txq_id)); + + priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); + priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); + /* supposes that ssn_idx is valid (!= 0xFFF) */ + iwl5000_set_wr_ptrs(priv, txq_id, ssn_idx); + + iwl_clear_bits_prph(priv, IWL50_SCD_INTERRUPT_MASK, (1 << txq_id)); + iwl_txq_ctx_deactivate(priv, txq_id); + iwl5000_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0); + + return 0; +} + +u16 iwl5000_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data) +{ + u16 size = (u16)sizeof(struct iwl_addsta_cmd); + struct iwl_addsta_cmd *addsta = (struct iwl_addsta_cmd *)data; + memcpy(addsta, cmd, size); + /* resrved in 5000 */ + addsta->rate_n_flags = cpu_to_le16(0); + return size; +} + + +/* + * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask + * must be called under priv->lock and mac access + */ +void iwl5000_txq_set_sched(struct iwl_priv *priv, u32 mask) +{ + iwl_write_prph(priv, IWL50_SCD_TXFACT, mask); +} + + +static inline u32 iwl5000_get_scd_ssn(struct iwl5000_tx_resp *tx_resp) +{ + return le32_to_cpup((__le32 *)&tx_resp->status + + tx_resp->frame_count) & MAX_SN; +} + +static int iwl5000_tx_status_reply_tx(struct iwl_priv *priv, + struct iwl_ht_agg *agg, + struct iwl5000_tx_resp *tx_resp, + int txq_id, u16 start_idx) +{ + u16 status; + struct agg_tx_status *frame_status = &tx_resp->status; + struct ieee80211_tx_info *info = NULL; + struct ieee80211_hdr *hdr = NULL; + u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags); + int i, sh, idx; + u16 seq; + + if (agg->wait_for_ba) + IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n"); + + agg->frame_count = tx_resp->frame_count; + agg->start_idx = start_idx; + agg->rate_n_flags = rate_n_flags; + agg->bitmap = 0; + + /* # frames attempted by Tx command */ + if (agg->frame_count == 1) { + /* Only one frame was attempted; no block-ack will arrive */ + status = le16_to_cpu(frame_status[0].status); + idx = start_idx; + + /* FIXME: code repetition */ + IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n", + agg->frame_count, agg->start_idx, idx); + + info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]); + info->status.rates[0].count = tx_resp->failure_frame + 1; + info->flags &= ~IEEE80211_TX_CTL_AMPDU; + info->flags |= iwl_tx_status_to_mac80211(status); + iwl_hwrate_to_tx_control(priv, rate_n_flags, info); + + /* FIXME: code repetition end */ + + IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n", + status & 0xff, tx_resp->failure_frame); + IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags); + + agg->wait_for_ba = 0; + } else { + /* Two or more frames were attempted; expect block-ack */ + u64 bitmap = 0; + int start = agg->start_idx; + + /* Construct bit-map of pending frames within Tx window */ + for (i = 0; i < agg->frame_count; i++) { + u16 sc; + status = le16_to_cpu(frame_status[i].status); + seq = le16_to_cpu(frame_status[i].sequence); + idx = SEQ_TO_INDEX(seq); + txq_id = SEQ_TO_QUEUE(seq); + + if (status & (AGG_TX_STATE_FEW_BYTES_MSK | + AGG_TX_STATE_ABORT_MSK)) + continue; + + IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n", + agg->frame_count, txq_id, idx); + + hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx); + if (!hdr) { + IWL_ERR(priv, + "BUG_ON idx doesn't point to valid skb" + " idx=%d, txq_id=%d\n", idx, txq_id); + return -1; + } + + sc = le16_to_cpu(hdr->seq_ctrl); + if (idx != (SEQ_TO_SN(sc) & 0xff)) { + IWL_ERR(priv, + "BUG_ON idx doesn't match seq control" + " idx=%d, seq_idx=%d, seq=%d\n", + idx, SEQ_TO_SN(sc), + hdr->seq_ctrl); + return -1; + } + + IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n", + i, idx, SEQ_TO_SN(sc)); + + sh = idx - start; + if (sh > 64) { + sh = (start - idx) + 0xff; + bitmap = bitmap << sh; + sh = 0; + start = idx; + } else if (sh < -64) + sh = 0xff - (start - idx); + else if (sh < 0) { + sh = start - idx; + start = idx; + bitmap = bitmap << sh; + sh = 0; + } + bitmap |= 1ULL << sh; + IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n", + start, (unsigned long long)bitmap); + } + + agg->bitmap = bitmap; + agg->start_idx = start; + IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n", + agg->frame_count, agg->start_idx, + (unsigned long long)agg->bitmap); + + if (bitmap) + agg->wait_for_ba = 1; + } + return 0; +} + +static void iwl5000_rx_reply_tx(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + u16 sequence = le16_to_cpu(pkt->hdr.sequence); + int txq_id = SEQ_TO_QUEUE(sequence); + int index = SEQ_TO_INDEX(sequence); + struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct ieee80211_tx_info *info; + struct iwl5000_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; + u32 status = le16_to_cpu(tx_resp->status.status); + int tid; + int sta_id; + int freed; + + if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) { + IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d " + "is out of range [0-%d] %d %d\n", txq_id, + index, txq->q.n_bd, txq->q.write_ptr, + txq->q.read_ptr); + return; + } + + info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]); + memset(&info->status, 0, sizeof(info->status)); + + tid = (tx_resp->ra_tid & IWL50_TX_RES_TID_MSK) >> IWL50_TX_RES_TID_POS; + sta_id = (tx_resp->ra_tid & IWL50_TX_RES_RA_MSK) >> IWL50_TX_RES_RA_POS; + + if (txq->sched_retry) { + const u32 scd_ssn = iwl5000_get_scd_ssn(tx_resp); + struct iwl_ht_agg *agg = NULL; + + agg = &priv->stations[sta_id].tid[tid].agg; + + iwl5000_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index); + + /* check if BAR is needed */ + if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status)) + info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; + + if (txq->q.read_ptr != (scd_ssn & 0xff)) { + index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd); + IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim " + "scd_ssn=%d idx=%d txq=%d swq=%d\n", + scd_ssn , index, txq_id, txq->swq_id); + + freed = iwl_tx_queue_reclaim(priv, txq_id, index); + iwl_free_tfds_in_queue(priv, sta_id, tid, freed); + + if (priv->mac80211_registered && + (iwl_queue_space(&txq->q) > txq->q.low_mark) && + (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) { + if (agg->state == IWL_AGG_OFF) + iwl_wake_queue(priv, txq_id); + else + iwl_wake_queue(priv, txq->swq_id); + } + } + } else { + BUG_ON(txq_id != txq->swq_id); + + info->status.rates[0].count = tx_resp->failure_frame + 1; + info->flags |= iwl_tx_status_to_mac80211(status); + iwl_hwrate_to_tx_control(priv, + le32_to_cpu(tx_resp->rate_n_flags), + info); + + IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags " + "0x%x retries %d\n", + txq_id, + iwl_get_tx_fail_reason(status), status, + le32_to_cpu(tx_resp->rate_n_flags), + tx_resp->failure_frame); + + freed = iwl_tx_queue_reclaim(priv, txq_id, index); + iwl_free_tfds_in_queue(priv, sta_id, tid, freed); + + if (priv->mac80211_registered && + (iwl_queue_space(&txq->q) > txq->q.low_mark)) + iwl_wake_queue(priv, txq_id); + } + + iwl_txq_check_empty(priv, sta_id, tid, txq_id); + + if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) + IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n"); +} + +/* Currently 5000 is the superset of everything */ +u16 iwl5000_get_hcmd_size(u8 cmd_id, u16 len) +{ + return len; +} + +void iwl5000_setup_deferred_work(struct iwl_priv *priv) +{ + /* in 5000 the tx power calibration is done in uCode */ + priv->disable_tx_power_cal = 1; +} + +void iwl5000_rx_handler_setup(struct iwl_priv *priv) +{ + /* init calibration handlers */ + priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] = + iwl5000_rx_calib_result; + priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] = + iwl5000_rx_calib_complete; + priv->rx_handlers[REPLY_TX] = iwl5000_rx_reply_tx; +} + + +int iwl5000_hw_valid_rtc_data_addr(u32 addr) +{ + return (addr >= IWL50_RTC_DATA_LOWER_BOUND) && + (addr < IWL50_RTC_DATA_UPPER_BOUND); +} + +static int iwl5000_send_rxon_assoc(struct iwl_priv *priv) +{ + int ret = 0; + struct iwl5000_rxon_assoc_cmd rxon_assoc; + const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon; + const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon; + + if ((rxon1->flags == rxon2->flags) && + (rxon1->filter_flags == rxon2->filter_flags) && + (rxon1->cck_basic_rates == rxon2->cck_basic_rates) && + (rxon1->ofdm_ht_single_stream_basic_rates == + rxon2->ofdm_ht_single_stream_basic_rates) && + (rxon1->ofdm_ht_dual_stream_basic_rates == + rxon2->ofdm_ht_dual_stream_basic_rates) && + (rxon1->ofdm_ht_triple_stream_basic_rates == + rxon2->ofdm_ht_triple_stream_basic_rates) && + (rxon1->acquisition_data == rxon2->acquisition_data) && + (rxon1->rx_chain == rxon2->rx_chain) && + (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) { + IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n"); + return 0; + } + + rxon_assoc.flags = priv->staging_rxon.flags; + rxon_assoc.filter_flags = priv->staging_rxon.filter_flags; + rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates; + rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates; + rxon_assoc.reserved1 = 0; + rxon_assoc.reserved2 = 0; + rxon_assoc.reserved3 = 0; + rxon_assoc.ofdm_ht_single_stream_basic_rates = + priv->staging_rxon.ofdm_ht_single_stream_basic_rates; + rxon_assoc.ofdm_ht_dual_stream_basic_rates = + priv->staging_rxon.ofdm_ht_dual_stream_basic_rates; + rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain; + rxon_assoc.ofdm_ht_triple_stream_basic_rates = + priv->staging_rxon.ofdm_ht_triple_stream_basic_rates; + rxon_assoc.acquisition_data = priv->staging_rxon.acquisition_data; + + ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC, + sizeof(rxon_assoc), &rxon_assoc, NULL); + if (ret) + return ret; + + return ret; +} +int iwl5000_send_tx_power(struct iwl_priv *priv) +{ + struct iwl5000_tx_power_dbm_cmd tx_power_cmd; + u8 tx_ant_cfg_cmd; + + /* half dBm need to multiply */ + tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt); + + if (priv->tx_power_lmt_in_half_dbm && + priv->tx_power_lmt_in_half_dbm < tx_power_cmd.global_lmt) { + /* + * For the newer devices which using enhanced/extend tx power + * table in EEPROM, the format is in half dBm. driver need to + * convert to dBm format before report to mac80211. + * By doing so, there is a possibility of 1/2 dBm resolution + * lost. driver will perform "round-up" operation before + * reporting, but it will cause 1/2 dBm tx power over the + * regulatory limit. Perform the checking here, if the + * "tx_power_user_lmt" is higher than EEPROM value (in + * half-dBm format), lower the tx power based on EEPROM + */ + tx_power_cmd.global_lmt = priv->tx_power_lmt_in_half_dbm; + } + tx_power_cmd.flags = IWL50_TX_POWER_NO_CLOSED; + tx_power_cmd.srv_chan_lmt = IWL50_TX_POWER_AUTO; + + if (IWL_UCODE_API(priv->ucode_ver) == 1) + tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1; + else + tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD; + + return iwl_send_cmd_pdu_async(priv, tx_ant_cfg_cmd, + sizeof(tx_power_cmd), &tx_power_cmd, + NULL); +} + +void iwl5000_temperature(struct iwl_priv *priv) +{ + /* store temperature from statistics (in Celsius) */ + priv->temperature = le32_to_cpu(priv->statistics.general.temperature); + iwl_tt_handler(priv); +} + +static void iwl5150_temperature(struct iwl_priv *priv) +{ + u32 vt = 0; + s32 offset = iwl_temp_calib_to_offset(priv); + + vt = le32_to_cpu(priv->statistics.general.temperature); + vt = vt / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF + offset; + /* now vt hold the temperature in Kelvin */ + priv->temperature = KELVIN_TO_CELSIUS(vt); + iwl_tt_handler(priv); +} + +/* Calc max signal level (dBm) among 3 possible receivers */ +int iwl5000_calc_rssi(struct iwl_priv *priv, + struct iwl_rx_phy_res *rx_resp) +{ + /* data from PHY/DSP regarding signal strength, etc., + * contents are always there, not configurable by host + */ + struct iwl5000_non_cfg_phy *ncphy = + (struct iwl5000_non_cfg_phy *)rx_resp->non_cfg_phy_buf; + u32 val, rssi_a, rssi_b, rssi_c, max_rssi; + u8 agc; + + val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_AGC_IDX]); + agc = (val & IWL50_OFDM_AGC_MSK) >> IWL50_OFDM_AGC_BIT_POS; + + /* Find max rssi among 3 possible receivers. + * These values are measured by the digital signal processor (DSP). + * They should stay fairly constant even as the signal strength varies, + * if the radio's automatic gain control (AGC) is working right. + * AGC value (see below) will provide the "interesting" info. + */ + val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_AB_IDX]); + rssi_a = (val & IWL50_OFDM_RSSI_A_MSK) >> IWL50_OFDM_RSSI_A_BIT_POS; + rssi_b = (val & IWL50_OFDM_RSSI_B_MSK) >> IWL50_OFDM_RSSI_B_BIT_POS; + val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_C_IDX]); + rssi_c = (val & IWL50_OFDM_RSSI_C_MSK) >> IWL50_OFDM_RSSI_C_BIT_POS; + + max_rssi = max_t(u32, rssi_a, rssi_b); + max_rssi = max_t(u32, max_rssi, rssi_c); + + IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n", + rssi_a, rssi_b, rssi_c, max_rssi, agc); + + /* dBm = max_rssi dB - agc dB - constant. + * Higher AGC (higher radio gain) means lower signal. */ + return max_rssi - agc - IWL49_RSSI_OFFSET; +} + +static int iwl5000_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant) +{ + struct iwl_tx_ant_config_cmd tx_ant_cmd = { + .valid = cpu_to_le32(valid_tx_ant), + }; + + if (IWL_UCODE_API(priv->ucode_ver) > 1) { + IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant); + return iwl_send_cmd_pdu(priv, TX_ANT_CONFIGURATION_CMD, + sizeof(struct iwl_tx_ant_config_cmd), + &tx_ant_cmd); + } else { + IWL_DEBUG_HC(priv, "TX_ANT_CONFIGURATION_CMD not supported\n"); + return -EOPNOTSUPP; + } +} + + +#define IWL5000_UCODE_GET(item) \ +static u32 iwl5000_ucode_get_##item(const struct iwl_ucode_header *ucode,\ + u32 api_ver) \ +{ \ + if (api_ver <= 2) \ + return le32_to_cpu(ucode->u.v1.item); \ + return le32_to_cpu(ucode->u.v2.item); \ +} + +static u32 iwl5000_ucode_get_header_size(u32 api_ver) +{ + if (api_ver <= 2) + return UCODE_HEADER_SIZE(1); + return UCODE_HEADER_SIZE(2); +} + +static u32 iwl5000_ucode_get_build(const struct iwl_ucode_header *ucode, + u32 api_ver) +{ + if (api_ver <= 2) + return 0; + return le32_to_cpu(ucode->u.v2.build); +} + +static u8 *iwl5000_ucode_get_data(const struct iwl_ucode_header *ucode, + u32 api_ver) +{ + if (api_ver <= 2) + return (u8 *) ucode->u.v1.data; + return (u8 *) ucode->u.v2.data; +} + +IWL5000_UCODE_GET(inst_size); +IWL5000_UCODE_GET(data_size); +IWL5000_UCODE_GET(init_size); +IWL5000_UCODE_GET(init_data_size); +IWL5000_UCODE_GET(boot_size); + +static int iwl5000_hw_channel_switch(struct iwl_priv *priv, u16 channel) +{ + struct iwl5000_channel_switch_cmd cmd; + const struct iwl_channel_info *ch_info; + struct iwl_host_cmd hcmd = { + .id = REPLY_CHANNEL_SWITCH, + .len = sizeof(cmd), + .flags = CMD_SIZE_HUGE, + .data = &cmd, + }; + + IWL_DEBUG_11H(priv, "channel switch from %d to %d\n", + priv->active_rxon.channel, channel); + cmd.band = priv->band == IEEE80211_BAND_2GHZ; + cmd.channel = cpu_to_le16(channel); + cmd.rxon_flags = priv->staging_rxon.flags; + cmd.rxon_filter_flags = priv->staging_rxon.filter_flags; + cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time); + ch_info = iwl_get_channel_info(priv, priv->band, channel); + if (ch_info) + cmd.expect_beacon = is_channel_radar(ch_info); + else { + IWL_ERR(priv, "invalid channel switch from %u to %u\n", + priv->active_rxon.channel, channel); + return -EFAULT; + } + priv->switch_rxon.channel = cpu_to_le16(channel); + priv->switch_rxon.switch_in_progress = true; + + return iwl_send_cmd_sync(priv, &hcmd); +} + +struct iwl_hcmd_ops iwl5000_hcmd = { + .rxon_assoc = iwl5000_send_rxon_assoc, + .commit_rxon = iwl_commit_rxon, + .set_rxon_chain = iwl_set_rxon_chain, + .set_tx_ant = iwl5000_send_tx_ant_config, +}; + +struct iwl_hcmd_utils_ops iwl5000_hcmd_utils = { + .get_hcmd_size = iwl5000_get_hcmd_size, + .build_addsta_hcmd = iwl5000_build_addsta_hcmd, + .gain_computation = iwl5000_gain_computation, + .chain_noise_reset = iwl5000_chain_noise_reset, + .rts_tx_cmd_flag = iwl5000_rts_tx_cmd_flag, + .calc_rssi = iwl5000_calc_rssi, +}; + +struct iwl_ucode_ops iwl5000_ucode = { + .get_header_size = iwl5000_ucode_get_header_size, + .get_build = iwl5000_ucode_get_build, + .get_inst_size = iwl5000_ucode_get_inst_size, + .get_data_size = iwl5000_ucode_get_data_size, + .get_init_size = iwl5000_ucode_get_init_size, + .get_init_data_size = iwl5000_ucode_get_init_data_size, + .get_boot_size = iwl5000_ucode_get_boot_size, + .get_data = iwl5000_ucode_get_data, +}; + +struct iwl_lib_ops iwl5000_lib = { + .set_hw_params = iwl5000_hw_set_hw_params, + .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl, + .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl, + .txq_set_sched = iwl5000_txq_set_sched, + .txq_agg_enable = iwl5000_txq_agg_enable, + .txq_agg_disable = iwl5000_txq_agg_disable, + .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, + .txq_free_tfd = iwl_hw_txq_free_tfd, + .txq_init = iwl_hw_tx_queue_init, + .rx_handler_setup = iwl5000_rx_handler_setup, + .setup_deferred_work = iwl5000_setup_deferred_work, + .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, + .dump_nic_event_log = iwl_dump_nic_event_log, + .dump_nic_error_log = iwl_dump_nic_error_log, + .dump_csr = iwl_dump_csr, + .dump_fh = iwl_dump_fh, + .load_ucode = iwl5000_load_ucode, + .init_alive_start = iwl5000_init_alive_start, + .alive_notify = iwl5000_alive_notify, + .send_tx_power = iwl5000_send_tx_power, + .update_chain_flags = iwl_update_chain_flags, + .set_channel_switch = iwl5000_hw_channel_switch, + .apm_ops = { + .init = iwl_apm_init, + .stop = iwl_apm_stop, + .config = iwl5000_nic_config, + .set_pwr_src = iwl_set_pwr_src, + }, + .eeprom_ops = { + .regulatory_bands = { + EEPROM_5000_REG_BAND_1_CHANNELS, + EEPROM_5000_REG_BAND_2_CHANNELS, + EEPROM_5000_REG_BAND_3_CHANNELS, + EEPROM_5000_REG_BAND_4_CHANNELS, + EEPROM_5000_REG_BAND_5_CHANNELS, + EEPROM_5000_REG_BAND_24_HT40_CHANNELS, + EEPROM_5000_REG_BAND_52_HT40_CHANNELS + }, + .verify_signature = iwlcore_eeprom_verify_signature, + .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, + .release_semaphore = iwlcore_eeprom_release_semaphore, + .calib_version = iwl5000_eeprom_calib_version, + .query_addr = iwl5000_eeprom_query_addr, + }, + .post_associate = iwl_post_associate, + .isr = iwl_isr_ict, + .config_ap = iwl_config_ap, + .temp_ops = { + .temperature = iwl5000_temperature, + .set_ct_kill = iwl5000_set_ct_threshold, + }, + .add_bcast_station = iwl_add_bcast_station, +}; + +static struct iwl_lib_ops iwl5150_lib = { + .set_hw_params = iwl5000_hw_set_hw_params, + .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl, + .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl, + .txq_set_sched = iwl5000_txq_set_sched, + .txq_agg_enable = iwl5000_txq_agg_enable, + .txq_agg_disable = iwl5000_txq_agg_disable, + .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, + .txq_free_tfd = iwl_hw_txq_free_tfd, + .txq_init = iwl_hw_tx_queue_init, + .rx_handler_setup = iwl5000_rx_handler_setup, + .setup_deferred_work = iwl5000_setup_deferred_work, + .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, + .dump_nic_event_log = iwl_dump_nic_event_log, + .dump_nic_error_log = iwl_dump_nic_error_log, + .dump_csr = iwl_dump_csr, + .load_ucode = iwl5000_load_ucode, + .init_alive_start = iwl5000_init_alive_start, + .alive_notify = iwl5000_alive_notify, + .send_tx_power = iwl5000_send_tx_power, + .update_chain_flags = iwl_update_chain_flags, + .set_channel_switch = iwl5000_hw_channel_switch, + .apm_ops = { + .init = iwl_apm_init, + .stop = iwl_apm_stop, + .config = iwl5000_nic_config, + .set_pwr_src = iwl_set_pwr_src, + }, + .eeprom_ops = { + .regulatory_bands = { + EEPROM_5000_REG_BAND_1_CHANNELS, + EEPROM_5000_REG_BAND_2_CHANNELS, + EEPROM_5000_REG_BAND_3_CHANNELS, + EEPROM_5000_REG_BAND_4_CHANNELS, + EEPROM_5000_REG_BAND_5_CHANNELS, + EEPROM_5000_REG_BAND_24_HT40_CHANNELS, + EEPROM_5000_REG_BAND_52_HT40_CHANNELS + }, + .verify_signature = iwlcore_eeprom_verify_signature, + .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, + .release_semaphore = iwlcore_eeprom_release_semaphore, + .calib_version = iwl5000_eeprom_calib_version, + .query_addr = iwl5000_eeprom_query_addr, + }, + .post_associate = iwl_post_associate, + .isr = iwl_isr_ict, + .config_ap = iwl_config_ap, + .temp_ops = { + .temperature = iwl5150_temperature, + .set_ct_kill = iwl5150_set_ct_threshold, + }, + .add_bcast_station = iwl_add_bcast_station, +}; + +static const struct iwl_ops iwl5000_ops = { + .ucode = &iwl5000_ucode, + .lib = &iwl5000_lib, + .hcmd = &iwl5000_hcmd, + .utils = &iwl5000_hcmd_utils, + .led = &iwlagn_led_ops, +}; + +static const struct iwl_ops iwl5150_ops = { + .ucode = &iwl5000_ucode, + .lib = &iwl5150_lib, + .hcmd = &iwl5000_hcmd, + .utils = &iwl5000_hcmd_utils, + .led = &iwlagn_led_ops, +}; + +struct iwl_mod_params iwl50_mod_params = { + .amsdu_size_8K = 1, + .restart_fw = 1, + /* the rest are 0 by default */ +}; + + +struct iwl_cfg iwl5300_agn_cfg = { + .name = "5300AGN", + .fw_name_pre = IWL5000_FW_PRE, + .ucode_api_max = IWL5000_UCODE_API_MAX, + .ucode_api_min = IWL5000_UCODE_API_MIN, + .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, + .ops = &iwl5000_ops, + .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, + .eeprom_ver = EEPROM_5000_EEPROM_VERSION, + .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, + .num_of_queues = IWL50_NUM_QUEUES, + .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, + .mod_params = &iwl50_mod_params, + .valid_tx_ant = ANT_ABC, + .valid_rx_ant = ANT_ABC, + .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, + .set_l0s = true, + .use_bsm = false, + .ht_greenfield_support = true, + .led_compensation = 51, + .use_rts_for_ht = true, /* use rts/cts protection */ + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, + .chain_noise_scale = 1000, +}; + +struct iwl_cfg iwl5100_bgn_cfg = { + .name = "5100BGN", + .fw_name_pre = IWL5000_FW_PRE, + .ucode_api_max = IWL5000_UCODE_API_MAX, + .ucode_api_min = IWL5000_UCODE_API_MIN, + .sku = IWL_SKU_G|IWL_SKU_N, + .ops = &iwl5000_ops, + .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, + .eeprom_ver = EEPROM_5000_EEPROM_VERSION, + .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, + .num_of_queues = IWL50_NUM_QUEUES, + .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, + .mod_params = &iwl50_mod_params, + .valid_tx_ant = ANT_B, + .valid_rx_ant = ANT_AB, + .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, + .set_l0s = true, + .use_bsm = false, + .ht_greenfield_support = true, + .led_compensation = 51, + .use_rts_for_ht = true, /* use rts/cts protection */ + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, + .chain_noise_scale = 1000, +}; + +struct iwl_cfg iwl5100_abg_cfg = { + .name = "5100ABG", + .fw_name_pre = IWL5000_FW_PRE, + .ucode_api_max = IWL5000_UCODE_API_MAX, + .ucode_api_min = IWL5000_UCODE_API_MIN, + .sku = IWL_SKU_A|IWL_SKU_G, + .ops = &iwl5000_ops, + .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, + .eeprom_ver = EEPROM_5000_EEPROM_VERSION, + .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, + .num_of_queues = IWL50_NUM_QUEUES, + .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, + .mod_params = &iwl50_mod_params, + .valid_tx_ant = ANT_B, + .valid_rx_ant = ANT_AB, + .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, + .set_l0s = true, + .use_bsm = false, + .led_compensation = 51, + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, + .chain_noise_scale = 1000, +}; + +struct iwl_cfg iwl5100_agn_cfg = { + .name = "5100AGN", + .fw_name_pre = IWL5000_FW_PRE, + .ucode_api_max = IWL5000_UCODE_API_MAX, + .ucode_api_min = IWL5000_UCODE_API_MIN, + .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, + .ops = &iwl5000_ops, + .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, + .eeprom_ver = EEPROM_5000_EEPROM_VERSION, + .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, + .num_of_queues = IWL50_NUM_QUEUES, + .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, + .mod_params = &iwl50_mod_params, + .valid_tx_ant = ANT_B, + .valid_rx_ant = ANT_AB, + .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, + .set_l0s = true, + .use_bsm = false, + .ht_greenfield_support = true, + .led_compensation = 51, + .use_rts_for_ht = true, /* use rts/cts protection */ + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, + .chain_noise_scale = 1000, +}; + +struct iwl_cfg iwl5350_agn_cfg = { + .name = "5350AGN", + .fw_name_pre = IWL5000_FW_PRE, + .ucode_api_max = IWL5000_UCODE_API_MAX, + .ucode_api_min = IWL5000_UCODE_API_MIN, + .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, + .ops = &iwl5000_ops, + .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, + .eeprom_ver = EEPROM_5050_EEPROM_VERSION, + .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, + .num_of_queues = IWL50_NUM_QUEUES, + .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, + .mod_params = &iwl50_mod_params, + .valid_tx_ant = ANT_ABC, + .valid_rx_ant = ANT_ABC, + .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, + .set_l0s = true, + .use_bsm = false, + .ht_greenfield_support = true, + .led_compensation = 51, + .use_rts_for_ht = true, /* use rts/cts protection */ + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, + .chain_noise_scale = 1000, +}; + +struct iwl_cfg iwl5150_agn_cfg = { + .name = "5150AGN", + .fw_name_pre = IWL5150_FW_PRE, + .ucode_api_max = IWL5150_UCODE_API_MAX, + .ucode_api_min = IWL5150_UCODE_API_MIN, + .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, + .ops = &iwl5150_ops, + .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, + .eeprom_ver = EEPROM_5050_EEPROM_VERSION, + .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, + .num_of_queues = IWL50_NUM_QUEUES, + .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, + .mod_params = &iwl50_mod_params, + .valid_tx_ant = ANT_A, + .valid_rx_ant = ANT_AB, + .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, + .set_l0s = true, + .use_bsm = false, + .ht_greenfield_support = true, + .led_compensation = 51, + .use_rts_for_ht = true, /* use rts/cts protection */ + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, + .chain_noise_scale = 1000, +}; + +struct iwl_cfg iwl5150_abg_cfg = { + .name = "5150ABG", + .fw_name_pre = IWL5150_FW_PRE, + .ucode_api_max = IWL5150_UCODE_API_MAX, + .ucode_api_min = IWL5150_UCODE_API_MIN, + .sku = IWL_SKU_A|IWL_SKU_G, + .ops = &iwl5150_ops, + .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, + .eeprom_ver = EEPROM_5050_EEPROM_VERSION, + .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, + .num_of_queues = IWL50_NUM_QUEUES, + .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, + .mod_params = &iwl50_mod_params, + .valid_tx_ant = ANT_A, + .valid_rx_ant = ANT_AB, + .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, + .set_l0s = true, + .use_bsm = false, + .led_compensation = 51, + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, + .chain_noise_scale = 1000, +}; + +MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX)); + +module_param_named(swcrypto50, iwl50_mod_params.sw_crypto, bool, S_IRUGO); +MODULE_PARM_DESC(swcrypto50, + "using software crypto engine (default 0 [hardware])\n"); +module_param_named(queues_num50, iwl50_mod_params.num_of_queues, int, S_IRUGO); +MODULE_PARM_DESC(queues_num50, "number of hw queues in 50xx series"); +module_param_named(11n_disable50, iwl50_mod_params.disable_11n, int, S_IRUGO); +MODULE_PARM_DESC(11n_disable50, "disable 50XX 11n functionality"); +module_param_named(amsdu_size_8K50, iwl50_mod_params.amsdu_size_8K, + int, S_IRUGO); +MODULE_PARM_DESC(amsdu_size_8K50, "enable 8K amsdu size in 50XX series"); +module_param_named(fw_restart50, iwl50_mod_params.restart_fw, int, S_IRUGO); +MODULE_PARM_DESC(fw_restart50, "restart firmware in case of error"); diff --git a/drivers/net/wireless/iwlwifi/iwl-6000-hw.h b/drivers/net/wireless/iwlwifi/iwl-6000-hw.h new file mode 100644 index 0000000..ddba399 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-6000-hw.h @@ -0,0 +1,81 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +/* + * Please use this file (iwl-6000-hw.h) only for hardware-related definitions. + * Use iwl-5000-commands.h for uCode API definitions. + */ + +#ifndef __iwl_6000_hw_h__ +#define __iwl_6000_hw_h__ + +#define IWL60_RTC_INST_LOWER_BOUND (0x000000) +#define IWL60_RTC_INST_UPPER_BOUND (0x040000) +#define IWL60_RTC_DATA_LOWER_BOUND (0x800000) +#define IWL60_RTC_DATA_UPPER_BOUND (0x814000) +#define IWL60_RTC_INST_SIZE \ + (IWL60_RTC_INST_UPPER_BOUND - IWL60_RTC_INST_LOWER_BOUND) +#define IWL60_RTC_DATA_SIZE \ + (IWL60_RTC_DATA_UPPER_BOUND - IWL60_RTC_DATA_LOWER_BOUND) + +#endif /* __iwl_6000_hw_h__ */ + diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c new file mode 100644 index 0000000..c4844ad --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c @@ -0,0 +1,548 @@ +/****************************************************************************** + * + * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "iwl-eeprom.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-io.h" +#include "iwl-sta.h" +#include "iwl-helpers.h" +#include "iwl-5000-hw.h" +#include "iwl-6000-hw.h" +#include "iwl-agn-led.h" + +/* Highest firmware API version supported */ +#define IWL6000_UCODE_API_MAX 4 +#define IWL6050_UCODE_API_MAX 4 + +/* Lowest firmware API version supported */ +#define IWL6000_UCODE_API_MIN 4 +#define IWL6050_UCODE_API_MIN 4 + +#define IWL6000_FW_PRE "iwlwifi-6000-" +#define _IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE #api ".ucode" +#define IWL6000_MODULE_FIRMWARE(api) _IWL6000_MODULE_FIRMWARE(api) + +#define IWL6050_FW_PRE "iwlwifi-6050-" +#define _IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE #api ".ucode" +#define IWL6050_MODULE_FIRMWARE(api) _IWL6050_MODULE_FIRMWARE(api) + +static void iwl6000_set_ct_threshold(struct iwl_priv *priv) +{ + /* want Celsius */ + priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD; + priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD; +} + +/* Indicate calibration version to uCode. */ +static void iwl6050_set_calib_version(struct iwl_priv *priv) +{ + if (priv->cfg->ops->lib->eeprom_ops.calib_version(priv) >= 6) + iwl_set_bit(priv, CSR_GP_DRIVER_REG, + CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6); +} + +/* NIC configuration for 6000 series */ +static void iwl6000_nic_config(struct iwl_priv *priv) +{ + u16 radio_cfg; + + radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); + + /* write radio config values to register */ + if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + EEPROM_RF_CFG_TYPE_MSK(radio_cfg) | + EEPROM_RF_CFG_STEP_MSK(radio_cfg) | + EEPROM_RF_CFG_DASH_MSK(radio_cfg)); + + /* set CSR_HW_CONFIG_REG for uCode use */ + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | + CSR_HW_IF_CONFIG_REG_BIT_MAC_SI); + + /* no locking required for register write */ + if (priv->cfg->pa_type == IWL_PA_INTERNAL) { + /* 2x2 IPA phy type */ + iwl_write32(priv, CSR_GP_DRIVER_REG, + CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA); + } + /* else do nothing, uCode configured */ + if (priv->cfg->ops->lib->temp_ops.set_calib_version) + priv->cfg->ops->lib->temp_ops.set_calib_version(priv); +} + +static struct iwl_sensitivity_ranges iwl6000_sensitivity = { + .min_nrg_cck = 97, + .max_nrg_cck = 0, /* not used, set to 0 */ + .auto_corr_min_ofdm = 80, + .auto_corr_min_ofdm_mrc = 128, + .auto_corr_min_ofdm_x1 = 105, + .auto_corr_min_ofdm_mrc_x1 = 192, + + .auto_corr_max_ofdm = 145, + .auto_corr_max_ofdm_mrc = 232, + .auto_corr_max_ofdm_x1 = 110, + .auto_corr_max_ofdm_mrc_x1 = 232, + + .auto_corr_min_cck = 125, + .auto_corr_max_cck = 175, + .auto_corr_min_cck_mrc = 160, + .auto_corr_max_cck_mrc = 310, + .nrg_th_cck = 97, + .nrg_th_ofdm = 100, + + .barker_corr_th_min = 190, + .barker_corr_th_min_mrc = 390, + .nrg_th_cca = 62, +}; + +static int iwl6000_hw_set_hw_params(struct iwl_priv *priv) +{ + if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES && + priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES) + priv->cfg->num_of_queues = + priv->cfg->mod_params->num_of_queues; + + priv->hw_params.max_txq_num = priv->cfg->num_of_queues; + priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; + priv->hw_params.scd_bc_tbls_size = + priv->cfg->num_of_queues * + sizeof(struct iwl5000_scd_bc_tbl); + priv->hw_params.tfd_size = sizeof(struct iwl_tfd); + priv->hw_params.max_stations = IWL5000_STATION_COUNT; + priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID; + + priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE; + priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE; + + priv->hw_params.max_bsm_size = 0; + priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) | + BIT(IEEE80211_BAND_5GHZ); + priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR; + + priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); + priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant); + priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant; + priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant; + + if (priv->cfg->ops->lib->temp_ops.set_ct_kill) + priv->cfg->ops->lib->temp_ops.set_ct_kill(priv); + + /* Set initial sensitivity parameters */ + /* Set initial calibration set */ + priv->hw_params.sens = &iwl6000_sensitivity; + switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) { + case CSR_HW_REV_TYPE_6x50: + priv->hw_params.calib_init_cfg = + BIT(IWL_CALIB_XTAL) | + BIT(IWL_CALIB_DC) | + BIT(IWL_CALIB_LO) | + BIT(IWL_CALIB_TX_IQ) | + BIT(IWL_CALIB_BASE_BAND); + + break; + default: + priv->hw_params.calib_init_cfg = + BIT(IWL_CALIB_XTAL) | + BIT(IWL_CALIB_LO) | + BIT(IWL_CALIB_TX_IQ) | + BIT(IWL_CALIB_BASE_BAND); + break; + } + + return 0; +} + +static int iwl6000_hw_channel_switch(struct iwl_priv *priv, u16 channel) +{ + struct iwl6000_channel_switch_cmd cmd; + const struct iwl_channel_info *ch_info; + struct iwl_host_cmd hcmd = { + .id = REPLY_CHANNEL_SWITCH, + .len = sizeof(cmd), + .flags = CMD_SIZE_HUGE, + .data = &cmd, + }; + + IWL_DEBUG_11H(priv, "channel switch from %d to %d\n", + priv->active_rxon.channel, channel); + + cmd.band = priv->band == IEEE80211_BAND_2GHZ; + cmd.channel = cpu_to_le16(channel); + cmd.rxon_flags = priv->staging_rxon.flags; + cmd.rxon_filter_flags = priv->staging_rxon.filter_flags; + cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time); + ch_info = iwl_get_channel_info(priv, priv->band, channel); + if (ch_info) + cmd.expect_beacon = is_channel_radar(ch_info); + else { + IWL_ERR(priv, "invalid channel switch from %u to %u\n", + priv->active_rxon.channel, channel); + return -EFAULT; + } + priv->switch_rxon.channel = cpu_to_le16(channel); + priv->switch_rxon.switch_in_progress = true; + + return iwl_send_cmd_sync(priv, &hcmd); +} + +static struct iwl_lib_ops iwl6000_lib = { + .set_hw_params = iwl6000_hw_set_hw_params, + .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl, + .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl, + .txq_set_sched = iwl5000_txq_set_sched, + .txq_agg_enable = iwl5000_txq_agg_enable, + .txq_agg_disable = iwl5000_txq_agg_disable, + .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, + .txq_free_tfd = iwl_hw_txq_free_tfd, + .txq_init = iwl_hw_tx_queue_init, + .rx_handler_setup = iwl5000_rx_handler_setup, + .setup_deferred_work = iwl5000_setup_deferred_work, + .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, + .load_ucode = iwl5000_load_ucode, + .dump_nic_event_log = iwl_dump_nic_event_log, + .dump_nic_error_log = iwl_dump_nic_error_log, + .dump_csr = iwl_dump_csr, + .dump_fh = iwl_dump_fh, + .init_alive_start = iwl5000_init_alive_start, + .alive_notify = iwl5000_alive_notify, + .send_tx_power = iwl5000_send_tx_power, + .update_chain_flags = iwl_update_chain_flags, + .set_channel_switch = iwl6000_hw_channel_switch, + .apm_ops = { + .init = iwl_apm_init, + .stop = iwl_apm_stop, + .config = iwl6000_nic_config, + .set_pwr_src = iwl_set_pwr_src, + }, + .eeprom_ops = { + .regulatory_bands = { + EEPROM_5000_REG_BAND_1_CHANNELS, + EEPROM_5000_REG_BAND_2_CHANNELS, + EEPROM_5000_REG_BAND_3_CHANNELS, + EEPROM_5000_REG_BAND_4_CHANNELS, + EEPROM_5000_REG_BAND_5_CHANNELS, + EEPROM_5000_REG_BAND_24_HT40_CHANNELS, + EEPROM_5000_REG_BAND_52_HT40_CHANNELS + }, + .verify_signature = iwlcore_eeprom_verify_signature, + .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, + .release_semaphore = iwlcore_eeprom_release_semaphore, + .calib_version = iwl5000_eeprom_calib_version, + .query_addr = iwl5000_eeprom_query_addr, + .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower, + }, + .post_associate = iwl_post_associate, + .isr = iwl_isr_ict, + .config_ap = iwl_config_ap, + .temp_ops = { + .temperature = iwl5000_temperature, + .set_ct_kill = iwl6000_set_ct_threshold, + }, + .add_bcast_station = iwl_add_bcast_station, +}; + +static const struct iwl_ops iwl6000_ops = { + .ucode = &iwl5000_ucode, + .lib = &iwl6000_lib, + .hcmd = &iwl5000_hcmd, + .utils = &iwl5000_hcmd_utils, + .led = &iwlagn_led_ops, +}; + +static struct iwl_lib_ops iwl6050_lib = { + .set_hw_params = iwl6000_hw_set_hw_params, + .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl, + .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl, + .txq_set_sched = iwl5000_txq_set_sched, + .txq_agg_enable = iwl5000_txq_agg_enable, + .txq_agg_disable = iwl5000_txq_agg_disable, + .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, + .txq_free_tfd = iwl_hw_txq_free_tfd, + .txq_init = iwl_hw_tx_queue_init, + .rx_handler_setup = iwl5000_rx_handler_setup, + .setup_deferred_work = iwl5000_setup_deferred_work, + .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, + .load_ucode = iwl5000_load_ucode, + .dump_nic_event_log = iwl_dump_nic_event_log, + .dump_nic_error_log = iwl_dump_nic_error_log, + .dump_csr = iwl_dump_csr, + .dump_fh = iwl_dump_fh, + .init_alive_start = iwl5000_init_alive_start, + .alive_notify = iwl5000_alive_notify, + .send_tx_power = iwl5000_send_tx_power, + .update_chain_flags = iwl_update_chain_flags, + .set_channel_switch = iwl6000_hw_channel_switch, + .apm_ops = { + .init = iwl_apm_init, + .stop = iwl_apm_stop, + .config = iwl6000_nic_config, + .set_pwr_src = iwl_set_pwr_src, + }, + .eeprom_ops = { + .regulatory_bands = { + EEPROM_5000_REG_BAND_1_CHANNELS, + EEPROM_5000_REG_BAND_2_CHANNELS, + EEPROM_5000_REG_BAND_3_CHANNELS, + EEPROM_5000_REG_BAND_4_CHANNELS, + EEPROM_5000_REG_BAND_5_CHANNELS, + EEPROM_5000_REG_BAND_24_HT40_CHANNELS, + EEPROM_5000_REG_BAND_52_HT40_CHANNELS + }, + .verify_signature = iwlcore_eeprom_verify_signature, + .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, + .release_semaphore = iwlcore_eeprom_release_semaphore, + .calib_version = iwl5000_eeprom_calib_version, + .query_addr = iwl5000_eeprom_query_addr, + .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower, + }, + .post_associate = iwl_post_associate, + .isr = iwl_isr_ict, + .config_ap = iwl_config_ap, + .temp_ops = { + .temperature = iwl5000_temperature, + .set_ct_kill = iwl6000_set_ct_threshold, + .set_calib_version = iwl6050_set_calib_version, + }, + .add_bcast_station = iwl_add_bcast_station, +}; + +static const struct iwl_ops iwl6050_ops = { + .ucode = &iwl5000_ucode, + .lib = &iwl6050_lib, + .hcmd = &iwl5000_hcmd, + .utils = &iwl5000_hcmd_utils, + .led = &iwlagn_led_ops, +}; + +/* + * "i": Internal configuration, use internal Power Amplifier + */ +struct iwl_cfg iwl6000i_2agn_cfg = { + .name = "6000 Series 2x2 AGN", + .fw_name_pre = IWL6000_FW_PRE, + .ucode_api_max = IWL6000_UCODE_API_MAX, + .ucode_api_min = IWL6000_UCODE_API_MIN, + .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, + .ops = &iwl6000_ops, + .eeprom_size = OTP_LOW_IMAGE_SIZE, + .eeprom_ver = EEPROM_6000_EEPROM_VERSION, + .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, + .num_of_queues = IWL50_NUM_QUEUES, + .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, + .mod_params = &iwl50_mod_params, + .valid_tx_ant = ANT_BC, + .valid_rx_ant = ANT_BC, + .pll_cfg_val = 0, + .set_l0s = true, + .use_bsm = false, + .pa_type = IWL_PA_INTERNAL, + .max_ll_items = OTP_MAX_LL_ITEMS_6x00, + .shadow_ram_support = true, + .ht_greenfield_support = true, + .led_compensation = 51, + .use_rts_for_ht = true, /* use rts/cts protection */ + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .supports_idle = true, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1000, +}; + +struct iwl_cfg iwl6000i_2abg_cfg = { + .name = "6000 Series 2x2 ABG", + .fw_name_pre = IWL6000_FW_PRE, + .ucode_api_max = IWL6000_UCODE_API_MAX, + .ucode_api_min = IWL6000_UCODE_API_MIN, + .sku = IWL_SKU_A|IWL_SKU_G, + .ops = &iwl6000_ops, + .eeprom_size = OTP_LOW_IMAGE_SIZE, + .eeprom_ver = EEPROM_6000_EEPROM_VERSION, + .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, + .num_of_queues = IWL50_NUM_QUEUES, + .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, + .mod_params = &iwl50_mod_params, + .valid_tx_ant = ANT_BC, + .valid_rx_ant = ANT_BC, + .pll_cfg_val = 0, + .set_l0s = true, + .use_bsm = false, + .pa_type = IWL_PA_INTERNAL, + .max_ll_items = OTP_MAX_LL_ITEMS_6x00, + .shadow_ram_support = true, + .ht_greenfield_support = true, + .led_compensation = 51, + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .supports_idle = true, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1000, +}; + +struct iwl_cfg iwl6000i_2bg_cfg = { + .name = "6000 Series 2x2 BG", + .fw_name_pre = IWL6000_FW_PRE, + .ucode_api_max = IWL6000_UCODE_API_MAX, + .ucode_api_min = IWL6000_UCODE_API_MIN, + .sku = IWL_SKU_G, + .ops = &iwl6000_ops, + .eeprom_size = OTP_LOW_IMAGE_SIZE, + .eeprom_ver = EEPROM_6000_EEPROM_VERSION, + .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, + .num_of_queues = IWL50_NUM_QUEUES, + .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, + .mod_params = &iwl50_mod_params, + .valid_tx_ant = ANT_BC, + .valid_rx_ant = ANT_BC, + .pll_cfg_val = 0, + .set_l0s = true, + .use_bsm = false, + .pa_type = IWL_PA_INTERNAL, + .max_ll_items = OTP_MAX_LL_ITEMS_6x00, + .shadow_ram_support = true, + .ht_greenfield_support = true, + .led_compensation = 51, + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .supports_idle = true, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1000, +}; + +struct iwl_cfg iwl6050_2agn_cfg = { + .name = "6050 Series 2x2 AGN", + .fw_name_pre = IWL6050_FW_PRE, + .ucode_api_max = IWL6050_UCODE_API_MAX, + .ucode_api_min = IWL6050_UCODE_API_MIN, + .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, + .ops = &iwl6050_ops, + .eeprom_size = OTP_LOW_IMAGE_SIZE, + .eeprom_ver = EEPROM_6050_EEPROM_VERSION, + .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, + .num_of_queues = IWL50_NUM_QUEUES, + .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, + .mod_params = &iwl50_mod_params, + .valid_tx_ant = ANT_AB, + .valid_rx_ant = ANT_AB, + .pll_cfg_val = 0, + .set_l0s = true, + .use_bsm = false, + .pa_type = IWL_PA_SYSTEM, + .max_ll_items = OTP_MAX_LL_ITEMS_6x50, + .shadow_ram_support = true, + .ht_greenfield_support = true, + .led_compensation = 51, + .use_rts_for_ht = true, /* use rts/cts protection */ + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .supports_idle = true, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1500, +}; + +struct iwl_cfg iwl6050_2abg_cfg = { + .name = "6050 Series 2x2 ABG", + .fw_name_pre = IWL6050_FW_PRE, + .ucode_api_max = IWL6050_UCODE_API_MAX, + .ucode_api_min = IWL6050_UCODE_API_MIN, + .sku = IWL_SKU_A|IWL_SKU_G, + .ops = &iwl6050_ops, + .eeprom_size = OTP_LOW_IMAGE_SIZE, + .eeprom_ver = EEPROM_6050_EEPROM_VERSION, + .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, + .num_of_queues = IWL50_NUM_QUEUES, + .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, + .mod_params = &iwl50_mod_params, + .valid_tx_ant = ANT_AB, + .valid_rx_ant = ANT_AB, + .pll_cfg_val = 0, + .set_l0s = true, + .use_bsm = false, + .pa_type = IWL_PA_SYSTEM, + .max_ll_items = OTP_MAX_LL_ITEMS_6x50, + .shadow_ram_support = true, + .ht_greenfield_support = true, + .led_compensation = 51, + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .supports_idle = true, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1500, +}; + +struct iwl_cfg iwl6000_3agn_cfg = { + .name = "6000 Series 3x3 AGN", + .fw_name_pre = IWL6000_FW_PRE, + .ucode_api_max = IWL6000_UCODE_API_MAX, + .ucode_api_min = IWL6000_UCODE_API_MIN, + .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, + .ops = &iwl6000_ops, + .eeprom_size = OTP_LOW_IMAGE_SIZE, + .eeprom_ver = EEPROM_6000_EEPROM_VERSION, + .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, + .num_of_queues = IWL50_NUM_QUEUES, + .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, + .mod_params = &iwl50_mod_params, + .valid_tx_ant = ANT_ABC, + .valid_rx_ant = ANT_ABC, + .pll_cfg_val = 0, + .set_l0s = true, + .use_bsm = false, + .pa_type = IWL_PA_SYSTEM, + .max_ll_items = OTP_MAX_LL_ITEMS_6x00, + .shadow_ram_support = true, + .ht_greenfield_support = true, + .led_compensation = 51, + .use_rts_for_ht = true, /* use rts/cts protection */ + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .supports_idle = true, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1000, +}; + +MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX)); diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-led.c b/drivers/net/wireless/iwlwifi/iwl-agn-led.c new file mode 100644 index 0000000..1a24946 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-agn-led.c @@ -0,0 +1,85 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "iwl-commands.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-io.h" +#include "iwl-agn-led.h" + +/* Send led command */ +static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd) +{ + struct iwl_host_cmd cmd = { + .id = REPLY_LEDS_CMD, + .len = sizeof(struct iwl_led_cmd), + .data = led_cmd, + .flags = CMD_ASYNC, + .callback = NULL, + }; + u32 reg; + + reg = iwl_read32(priv, CSR_LED_REG); + if (reg != (reg & CSR_LED_BSM_CTRL_MSK)) + iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK); + + return iwl_send_cmd(priv, &cmd); +} + +/* Set led register off */ +static int iwl_led_on_reg(struct iwl_priv *priv) +{ + IWL_DEBUG_LED(priv, "led on\n"); + iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON); + return 0; +} + +/* Set led register off */ +static int iwl_led_off_reg(struct iwl_priv *priv) +{ + IWL_DEBUG_LED(priv, "LED Reg off\n"); + iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_OFF); + return 0; +} + +const struct iwl_led_ops iwlagn_led_ops = { + .cmd = iwl_send_led_cmd, + .on = iwl_led_on_reg, + .off = iwl_led_off_reg, +}; diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-led.h b/drivers/net/wireless/iwlwifi/iwl-agn-led.h new file mode 100644 index 0000000..a594e4f --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-agn-led.h @@ -0,0 +1,32 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_agn_led_h__ +#define __iwl_agn_led_h__ + +extern const struct iwl_led_ops iwlagn_led_ops; + +#endif /* __iwl_agn_led_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c new file mode 100644 index 0000000..8bf7c20 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c @@ -0,0 +1,3029 @@ +/****************************************************************************** + * + * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include "iwl-dev.h" +#include "iwl-sta.h" +#include "iwl-core.h" + +#define RS_NAME "iwl-agn-rs" + +#define NUM_TRY_BEFORE_ANT_TOGGLE 1 +#define IWL_NUMBER_TRY 1 +#define IWL_HT_NUMBER_TRY 3 + +#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */ +#define IWL_RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */ +#define IWL_RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */ + +/* max allowed rate miss before sync LQ cmd */ +#define IWL_MISSED_RATE_MAX 15 +/* max time to accum history 2 seconds */ +#define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ) + +static u8 rs_ht_to_legacy[] = { + IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX, + IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX, + IWL_RATE_6M_INDEX, + IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX, + IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX, + IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX, + IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX +}; + +static const u8 ant_toggle_lookup[] = { + /*ANT_NONE -> */ ANT_NONE, + /*ANT_A -> */ ANT_B, + /*ANT_B -> */ ANT_C, + /*ANT_AB -> */ ANT_BC, + /*ANT_C -> */ ANT_A, + /*ANT_AC -> */ ANT_AB, + /*ANT_BC -> */ ANT_AC, + /*ANT_ABC -> */ ANT_ABC, +}; + +static void rs_rate_scale_perform(struct iwl_priv *priv, + struct sk_buff *skb, + struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta); +static void rs_fill_link_cmd(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, u32 rate_n_flags); + + +#ifdef CONFIG_MAC80211_DEBUGFS +static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta, + u32 *rate_n_flags, int index); +#else +static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta, + u32 *rate_n_flags, int index) +{} +#endif + +/** + * The following tables contain the expected throughput metrics for all rates + * + * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits + * + * where invalid entries are zeros. + * + * CCK rates are only valid in legacy table and will only be used in G + * (2.4 GHz) band. + */ + +static s32 expected_tpt_legacy[IWL_RATE_COUNT] = { + 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0 +}; + +static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202}, /* Norm */ + {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210}, /* SGI */ + {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381}, /* AGG */ + {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */ +}; + +static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */ + {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */ + {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */ + {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */ +}; + +static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */ + {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */ + {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */ + {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI*/ +}; + +static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */ + {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */ + {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */ + {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */ +}; + +static s32 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 99, 0, 153, 186, 208, 239, 256, 263, 268}, /* Norm */ + {0, 0, 0, 0, 106, 0, 162, 194, 215, 246, 262, 268, 273}, /* SGI */ + {0, 0, 0, 0, 134, 0, 249, 346, 431, 574, 685, 732, 775}, /* AGG */ + {0, 0, 0, 0, 148, 0, 272, 376, 465, 614, 727, 775, 818}, /* AGG+SGI */ +}; + +static s32 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 152, 0, 211, 239, 255, 279, 290, 294, 297}, /* Norm */ + {0, 0, 0, 0, 160, 0, 219, 245, 261, 284, 294, 297, 300}, /* SGI */ + {0, 0, 0, 0, 254, 0, 443, 584, 695, 868, 984, 1030, 1070}, /* AGG */ + {0, 0, 0, 0, 277, 0, 478, 624, 737, 911, 1026, 1070, 1109}, /* AGG+SGI */ +}; + +/* mbps, mcs */ +static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = { + { "1", "BPSK DSSS"}, + { "2", "QPSK DSSS"}, + {"5.5", "BPSK CCK"}, + { "11", "QPSK CCK"}, + { "6", "BPSK 1/2"}, + { "9", "BPSK 1/2"}, + { "12", "QPSK 1/2"}, + { "18", "QPSK 3/4"}, + { "24", "16QAM 1/2"}, + { "36", "16QAM 3/4"}, + { "48", "64QAM 2/3"}, + { "54", "64QAM 3/4"}, + { "60", "64QAM 5/6"}, +}; + +#define MCS_INDEX_PER_STREAM (8) + +static inline u8 rs_extract_rate(u32 rate_n_flags) +{ + return (u8)(rate_n_flags & 0xFF); +} + +static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window) +{ + window->data = 0; + window->success_counter = 0; + window->success_ratio = IWL_INVALID_VALUE; + window->counter = 0; + window->average_tpt = IWL_INVALID_VALUE; + window->stamp = 0; +} + +static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type) +{ + return (ant_type & valid_antenna) == ant_type; +} + +/* + * removes the old data from the statistics. All data that is older than + * TID_MAX_TIME_DIFF, will be deleted. + */ +static void rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time) +{ + /* The oldest age we want to keep */ + u32 oldest_time = curr_time - TID_MAX_TIME_DIFF; + + while (tl->queue_count && + (tl->time_stamp < oldest_time)) { + tl->total -= tl->packet_count[tl->head]; + tl->packet_count[tl->head] = 0; + tl->time_stamp += TID_QUEUE_CELL_SPACING; + tl->queue_count--; + tl->head++; + if (tl->head >= TID_QUEUE_MAX_SIZE) + tl->head = 0; + } +} + +/* + * increment traffic load value for tid and also remove + * any old values if passed the certain time period + */ +static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data, + struct ieee80211_hdr *hdr) +{ + u32 curr_time = jiffies_to_msecs(jiffies); + u32 time_diff; + s32 index; + struct iwl_traffic_load *tl = NULL; + u8 tid; + + if (ieee80211_is_data_qos(hdr->frame_control)) { + u8 *qc = ieee80211_get_qos_ctl(hdr); + tid = qc[0] & 0xf; + } else + return MAX_TID_COUNT; + + if (unlikely(tid >= TID_MAX_LOAD_COUNT)) + return MAX_TID_COUNT; + + tl = &lq_data->load[tid]; + + curr_time -= curr_time % TID_ROUND_VALUE; + + /* Happens only for the first packet. Initialize the data */ + if (!(tl->queue_count)) { + tl->total = 1; + tl->time_stamp = curr_time; + tl->queue_count = 1; + tl->head = 0; + tl->packet_count[0] = 1; + return MAX_TID_COUNT; + } + + time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time); + index = time_diff / TID_QUEUE_CELL_SPACING; + + /* The history is too long: remove data that is older than */ + /* TID_MAX_TIME_DIFF */ + if (index >= TID_QUEUE_MAX_SIZE) + rs_tl_rm_old_stats(tl, curr_time); + + index = (tl->head + index) % TID_QUEUE_MAX_SIZE; + tl->packet_count[index] = tl->packet_count[index] + 1; + tl->total = tl->total + 1; + + if ((index + 1) > tl->queue_count) + tl->queue_count = index + 1; + + return tid; +} + +/* + get the traffic load value for tid +*/ +static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid) +{ + u32 curr_time = jiffies_to_msecs(jiffies); + u32 time_diff; + s32 index; + struct iwl_traffic_load *tl = NULL; + + if (tid >= TID_MAX_LOAD_COUNT) + return 0; + + tl = &(lq_data->load[tid]); + + curr_time -= curr_time % TID_ROUND_VALUE; + + if (!(tl->queue_count)) + return 0; + + time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time); + index = time_diff / TID_QUEUE_CELL_SPACING; + + /* The history is too long: remove data that is older than */ + /* TID_MAX_TIME_DIFF */ + if (index >= TID_QUEUE_MAX_SIZE) + rs_tl_rm_old_stats(tl, curr_time); + + return tl->total; +} + +static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv, + struct iwl_lq_sta *lq_data, u8 tid, + struct ieee80211_sta *sta) +{ + int ret; + + if (rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) { + IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n", + sta->addr, tid); + ret = ieee80211_start_tx_ba_session(sta, tid); + if (ret == -EAGAIN) { + /* + * driver and mac80211 is out of sync + * this might be cause by reloading firmware + * stop the tx ba session here + */ + IWL_DEBUG_HT(priv, "Fail start Tx agg on tid: %d\n", + tid); + ret = ieee80211_stop_tx_ba_session(sta, tid, + WLAN_BACK_INITIATOR); + } + } +} + +static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid, + struct iwl_lq_sta *lq_data, + struct ieee80211_sta *sta) +{ + if ((tid < TID_MAX_LOAD_COUNT)) + rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta); + else if (tid == IWL_AGG_ALL_TID) + for (tid = 0; tid < TID_MAX_LOAD_COUNT; tid++) + rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta); + if (priv->cfg->use_rts_for_ht) { + /* + * switch to RTS/CTS if it is the prefer protection method + * for HT traffic + */ + IWL_DEBUG_HT(priv, "use RTS/CTS protection for HT\n"); + priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN; + iwlcore_commit_rxon(priv); + } +} + +static inline int get_num_of_ant_from_rate(u32 rate_n_flags) +{ + return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) + + !!(rate_n_flags & RATE_MCS_ANT_B_MSK) + + !!(rate_n_flags & RATE_MCS_ANT_C_MSK); +} + +/** + * rs_collect_tx_data - Update the success/failure sliding window + * + * We keep a sliding window of the last 62 packets transmitted + * at this rate. window->data contains the bitmask of successful + * packets. + */ +static int rs_collect_tx_data(struct iwl_rate_scale_data *windows, + int scale_index, s32 tpt, int attempts, + int successes) +{ + struct iwl_rate_scale_data *window = NULL; + static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1)); + s32 fail_count; + + if (scale_index < 0 || scale_index >= IWL_RATE_COUNT) + return -EINVAL; + + /* Select window for current tx bit rate */ + window = &(windows[scale_index]); + + /* + * Keep track of only the latest 62 tx frame attempts in this rate's + * history window; anything older isn't really relevant any more. + * If we have filled up the sliding window, drop the oldest attempt; + * if the oldest attempt (highest bit in bitmap) shows "success", + * subtract "1" from the success counter (this is the main reason + * we keep these bitmaps!). + */ + while (attempts > 0) { + if (window->counter >= IWL_RATE_MAX_WINDOW) { + + /* remove earliest */ + window->counter = IWL_RATE_MAX_WINDOW - 1; + + if (window->data & mask) { + window->data &= ~mask; + window->success_counter--; + } + } + + /* Increment frames-attempted counter */ + window->counter++; + + /* Shift bitmap by one frame to throw away oldest history */ + window->data <<= 1; + + /* Mark the most recent #successes attempts as successful */ + if (successes > 0) { + window->success_counter++; + window->data |= 0x1; + successes--; + } + + attempts--; + } + + /* Calculate current success ratio, avoid divide-by-0! */ + if (window->counter > 0) + window->success_ratio = 128 * (100 * window->success_counter) + / window->counter; + else + window->success_ratio = IWL_INVALID_VALUE; + + fail_count = window->counter - window->success_counter; + + /* Calculate average throughput, if we have enough history. */ + if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) || + (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH)) + window->average_tpt = (window->success_ratio * tpt + 64) / 128; + else + window->average_tpt = IWL_INVALID_VALUE; + + /* Tag this window as having been updated */ + window->stamp = jiffies; + + return 0; +} + +/* + * Fill uCode API rate_n_flags field, based on "search" or "active" table. + */ +/* FIXME:RS:remove this function and put the flags statically in the table */ +static u32 rate_n_flags_from_tbl(struct iwl_priv *priv, + struct iwl_scale_tbl_info *tbl, + int index, u8 use_green) +{ + u32 rate_n_flags = 0; + + if (is_legacy(tbl->lq_type)) { + rate_n_flags = iwl_rates[index].plcp; + if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE) + rate_n_flags |= RATE_MCS_CCK_MSK; + + } else if (is_Ht(tbl->lq_type)) { + if (index > IWL_LAST_OFDM_RATE) { + IWL_ERR(priv, "Invalid HT rate index %d\n", index); + index = IWL_LAST_OFDM_RATE; + } + rate_n_flags = RATE_MCS_HT_MSK; + + if (is_siso(tbl->lq_type)) + rate_n_flags |= iwl_rates[index].plcp_siso; + else if (is_mimo2(tbl->lq_type)) + rate_n_flags |= iwl_rates[index].plcp_mimo2; + else + rate_n_flags |= iwl_rates[index].plcp_mimo3; + } else { + IWL_ERR(priv, "Invalid tbl->lq_type %d\n", tbl->lq_type); + } + + rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) & + RATE_MCS_ANT_ABC_MSK); + + if (is_Ht(tbl->lq_type)) { + if (tbl->is_ht40) { + if (tbl->is_dup) + rate_n_flags |= RATE_MCS_DUP_MSK; + else + rate_n_flags |= RATE_MCS_HT40_MSK; + } + if (tbl->is_SGI) + rate_n_flags |= RATE_MCS_SGI_MSK; + + if (use_green) { + rate_n_flags |= RATE_MCS_GF_MSK; + if (is_siso(tbl->lq_type) && tbl->is_SGI) { + rate_n_flags &= ~RATE_MCS_SGI_MSK; + IWL_ERR(priv, "GF was set with SGI:SISO\n"); + } + } + } + return rate_n_flags; +} + +/* + * Interpret uCode API's rate_n_flags format, + * fill "search" or "active" tx mode table. + */ +static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags, + enum ieee80211_band band, + struct iwl_scale_tbl_info *tbl, + int *rate_idx) +{ + u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK); + u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags); + u8 mcs; + + *rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags); + + if (*rate_idx == IWL_RATE_INVALID) { + *rate_idx = -1; + return -EINVAL; + } + tbl->is_SGI = 0; /* default legacy setup */ + tbl->is_ht40 = 0; + tbl->is_dup = 0; + tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS); + tbl->lq_type = LQ_NONE; + tbl->max_search = IWL_MAX_SEARCH; + + /* legacy rate format */ + if (!(rate_n_flags & RATE_MCS_HT_MSK)) { + if (num_of_ant == 1) { + if (band == IEEE80211_BAND_5GHZ) + tbl->lq_type = LQ_A; + else + tbl->lq_type = LQ_G; + } + /* HT rate format */ + } else { + if (rate_n_flags & RATE_MCS_SGI_MSK) + tbl->is_SGI = 1; + + if ((rate_n_flags & RATE_MCS_HT40_MSK) || + (rate_n_flags & RATE_MCS_DUP_MSK)) + tbl->is_ht40 = 1; + + if (rate_n_flags & RATE_MCS_DUP_MSK) + tbl->is_dup = 1; + + mcs = rs_extract_rate(rate_n_flags); + + /* SISO */ + if (mcs <= IWL_RATE_SISO_60M_PLCP) { + if (num_of_ant == 1) + tbl->lq_type = LQ_SISO; /*else NONE*/ + /* MIMO2 */ + } else if (mcs <= IWL_RATE_MIMO2_60M_PLCP) { + if (num_of_ant == 2) + tbl->lq_type = LQ_MIMO2; + /* MIMO3 */ + } else { + if (num_of_ant == 3) { + tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH; + tbl->lq_type = LQ_MIMO3; + } + } + } + return 0; +} + +/* switch to another antenna/antennas and return 1 */ +/* if no other valid antenna found, return 0 */ +static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags, + struct iwl_scale_tbl_info *tbl) +{ + u8 new_ant_type; + + if (!tbl->ant_type || tbl->ant_type > ANT_ABC) + return 0; + + if (!rs_is_valid_ant(valid_ant, tbl->ant_type)) + return 0; + + new_ant_type = ant_toggle_lookup[tbl->ant_type]; + + while ((new_ant_type != tbl->ant_type) && + !rs_is_valid_ant(valid_ant, new_ant_type)) + new_ant_type = ant_toggle_lookup[new_ant_type]; + + if (new_ant_type == tbl->ant_type) + return 0; + + tbl->ant_type = new_ant_type; + *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK; + *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS; + return 1; +} + +/** + * Green-field mode is valid if the station supports it and + * there are no non-GF stations present in the BSS. + */ +static inline u8 rs_use_green(struct ieee80211_sta *sta, + struct iwl_ht_config *ht_conf) +{ + return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) && + !(ht_conf->non_GF_STA_present); +} + +/** + * rs_get_supported_rates - get the available rates + * + * if management frame or broadcast frame only return + * basic available rates. + * + */ +static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta, + struct ieee80211_hdr *hdr, + enum iwl_table_type rate_type) +{ + if (hdr && is_multicast_ether_addr(hdr->addr1) && + lq_sta->active_rate_basic) + return lq_sta->active_rate_basic; + + if (is_legacy(rate_type)) { + return lq_sta->active_legacy_rate; + } else { + if (is_siso(rate_type)) + return lq_sta->active_siso_rate; + else if (is_mimo2(rate_type)) + return lq_sta->active_mimo2_rate; + else + return lq_sta->active_mimo3_rate; + } +} + +static u16 rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask, + int rate_type) +{ + u8 high = IWL_RATE_INVALID; + u8 low = IWL_RATE_INVALID; + + /* 802.11A or ht walks to the next literal adjacent rate in + * the rate table */ + if (is_a_band(rate_type) || !is_legacy(rate_type)) { + int i; + u32 mask; + + /* Find the previous rate that is in the rate mask */ + i = index - 1; + for (mask = (1 << i); i >= 0; i--, mask >>= 1) { + if (rate_mask & mask) { + low = i; + break; + } + } + + /* Find the next rate that is in the rate mask */ + i = index + 1; + for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) { + if (rate_mask & mask) { + high = i; + break; + } + } + + return (high << 8) | low; + } + + low = index; + while (low != IWL_RATE_INVALID) { + low = iwl_rates[low].prev_rs; + if (low == IWL_RATE_INVALID) + break; + if (rate_mask & (1 << low)) + break; + IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low); + } + + high = index; + while (high != IWL_RATE_INVALID) { + high = iwl_rates[high].next_rs; + if (high == IWL_RATE_INVALID) + break; + if (rate_mask & (1 << high)) + break; + IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high); + } + + return (high << 8) | low; +} + +static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta, + struct iwl_scale_tbl_info *tbl, + u8 scale_index, u8 ht_possible) +{ + s32 low; + u16 rate_mask; + u16 high_low; + u8 switch_to_legacy = 0; + u8 is_green = lq_sta->is_green; + struct iwl_priv *priv = lq_sta->drv; + + /* check if we need to switch from HT to legacy rates. + * assumption is that mandatory rates (1Mbps or 6Mbps) + * are always supported (spec demand) */ + if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) { + switch_to_legacy = 1; + scale_index = rs_ht_to_legacy[scale_index]; + if (lq_sta->band == IEEE80211_BAND_5GHZ) + tbl->lq_type = LQ_A; + else + tbl->lq_type = LQ_G; + + if (num_of_ant(tbl->ant_type) > 1) + tbl->ant_type = + first_antenna(priv->hw_params.valid_tx_ant); + + tbl->is_ht40 = 0; + tbl->is_SGI = 0; + tbl->max_search = IWL_MAX_SEARCH; + } + + rate_mask = rs_get_supported_rates(lq_sta, NULL, tbl->lq_type); + + /* Mask with station rate restriction */ + if (is_legacy(tbl->lq_type)) { + /* supp_rates has no CCK bits in A mode */ + if (lq_sta->band == IEEE80211_BAND_5GHZ) + rate_mask = (u16)(rate_mask & + (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE)); + else + rate_mask = (u16)(rate_mask & lq_sta->supp_rates); + } + + /* If we switched from HT to legacy, check current rate */ + if (switch_to_legacy && (rate_mask & (1 << scale_index))) { + low = scale_index; + goto out; + } + + high_low = rs_get_adjacent_rate(lq_sta->drv, scale_index, rate_mask, + tbl->lq_type); + low = high_low & 0xff; + + if (low == IWL_RATE_INVALID) + low = scale_index; + +out: + return rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green); +} + +/* + * Simple function to compare two rate scale table types + */ +static bool table_type_matches(struct iwl_scale_tbl_info *a, + struct iwl_scale_tbl_info *b) +{ + return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) && + (a->is_SGI == b->is_SGI); +} +/* + * Static function to get the expected throughput from an iwl_scale_tbl_info + * that wraps a NULL pointer check + */ +static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index) +{ + if (tbl->expected_tpt) + return tbl->expected_tpt[rs_index]; + return 0; +} + +/* + * mac80211 sends us Tx status + */ +static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta, + struct sk_buff *skb) +{ + int legacy_success; + int retries; + int rs_index, mac_index, i; + struct iwl_lq_sta *lq_sta = priv_sta; + struct iwl_link_quality_cmd *table; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct iwl_priv *priv = (struct iwl_priv *)priv_r; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct iwl_rate_scale_data *window = NULL; + enum mac80211_rate_control_flags mac_flags; + u32 tx_rate; + struct iwl_scale_tbl_info tbl_type; + struct iwl_scale_tbl_info *curr_tbl, *other_tbl; + s32 tpt = 0; + + IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n"); + + if (!ieee80211_is_data(hdr->frame_control) || + info->flags & IEEE80211_TX_CTL_NO_ACK) + return; + + /* This packet was aggregated but doesn't carry status info */ + if ((info->flags & IEEE80211_TX_CTL_AMPDU) && + !(info->flags & IEEE80211_TX_STAT_AMPDU)) + return; + + if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) && + !lq_sta->ibss_sta_added) + return; + + /* + * Ignore this Tx frame response if its initial rate doesn't match + * that of latest Link Quality command. There may be stragglers + * from a previous Link Quality command, but we're no longer interested + * in those; they're either from the "active" mode while we're trying + * to check "search" mode, or a prior "search" mode after we've moved + * to a new "search" mode (which might become the new "active" mode). + */ + table = &lq_sta->lq; + tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); + rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, &rs_index); + if (priv->band == IEEE80211_BAND_5GHZ) + rs_index -= IWL_FIRST_OFDM_RATE; + mac_flags = info->status.rates[0].flags; + mac_index = info->status.rates[0].idx; + /* For HT packets, map MCS to PLCP */ + if (mac_flags & IEEE80211_TX_RC_MCS) { + mac_index &= RATE_MCS_CODE_MSK; /* Remove # of streams */ + if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE)) + mac_index++; + /* + * mac80211 HT index is always zero-indexed; we need to move + * HT OFDM rates after CCK rates in 2.4 GHz band + */ + if (priv->band == IEEE80211_BAND_2GHZ) + mac_index += IWL_FIRST_OFDM_RATE; + } + /* Here we actually compare this rate to the latest LQ command */ + if ((mac_index < 0) || + (tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) || + (tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) || + (tbl_type.is_dup != !!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) || + (tbl_type.ant_type != info->antenna_sel_tx) || + (!!(tx_rate & RATE_MCS_HT_MSK) != !!(mac_flags & IEEE80211_TX_RC_MCS)) || + (!!(tx_rate & RATE_MCS_GF_MSK) != !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) || + (rs_index != mac_index)) { + IWL_DEBUG_RATE(priv, "initial rate %d does not match %d (0x%x)\n", mac_index, rs_index, tx_rate); + /* + * Since rates mis-match, the last LQ command may have failed. + * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with + * ... driver. + */ + lq_sta->missed_rate_counter++; + if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) { + lq_sta->missed_rate_counter = 0; + iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC); + } + /* Regardless, ignore this status info for outdated rate */ + return; + } else + /* Rate did match, so reset the missed_rate_counter */ + lq_sta->missed_rate_counter = 0; + + /* Figure out if rate scale algorithm is in active or search table */ + if (table_type_matches(&tbl_type, + &(lq_sta->lq_info[lq_sta->active_tbl]))) { + curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); + } else if (table_type_matches(&tbl_type, + &lq_sta->lq_info[1 - lq_sta->active_tbl])) { + curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); + other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + } else { + IWL_DEBUG_RATE(priv, "Neither active nor search matches tx rate\n"); + return; + } + window = (struct iwl_rate_scale_data *)&(curr_tbl->win[0]); + + /* + * Updating the frame history depends on whether packets were + * aggregated. + * + * For aggregation, all packets were transmitted at the same rate, the + * first index into rate scale table. + */ + if (info->flags & IEEE80211_TX_STAT_AMPDU) { + tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); + rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, + &rs_index); + tpt = get_expected_tpt(curr_tbl, rs_index); + rs_collect_tx_data(window, rs_index, tpt, + info->status.ampdu_ack_len, + info->status.ampdu_ack_map); + + /* Update success/fail counts if not searching for new mode */ + if (lq_sta->stay_in_tbl) { + lq_sta->total_success += info->status.ampdu_ack_map; + lq_sta->total_failed += (info->status.ampdu_ack_len - + info->status.ampdu_ack_map); + } + } else { + /* + * For legacy, update frame history with for each Tx retry. + */ + retries = info->status.rates[0].count - 1; + /* HW doesn't send more than 15 retries */ + retries = min(retries, 15); + + /* The last transmission may have been successful */ + legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK); + /* Collect data for each rate used during failed TX attempts */ + for (i = 0; i <= retries; ++i) { + tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags); + rs_get_tbl_info_from_mcs(tx_rate, priv->band, + &tbl_type, &rs_index); + /* + * Only collect stats if retried rate is in the same RS + * table as active/search. + */ + if (table_type_matches(&tbl_type, curr_tbl)) + tpt = get_expected_tpt(curr_tbl, rs_index); + else if (table_type_matches(&tbl_type, other_tbl)) + tpt = get_expected_tpt(other_tbl, rs_index); + else + continue; + + /* Constants mean 1 transmission, 0 successes */ + if (i < retries) + rs_collect_tx_data(window, rs_index, tpt, 1, + 0); + else + rs_collect_tx_data(window, rs_index, tpt, 1, + legacy_success); + } + + /* Update success/fail counts if not searching for new mode */ + if (lq_sta->stay_in_tbl) { + lq_sta->total_success += legacy_success; + lq_sta->total_failed += retries + (1 - legacy_success); + } + } + /* The last TX rate is cached in lq_sta; it's set in if/else above */ + lq_sta->last_rate_n_flags = tx_rate; + + /* See if there's a better rate or modulation mode to try. */ + if (sta && sta->supp_rates[sband->band]) + rs_rate_scale_perform(priv, skb, sta, lq_sta); +} + +/* + * Begin a period of staying with a selected modulation mode. + * Set "stay_in_tbl" flag to prevent any mode switches. + * Set frame tx success limits according to legacy vs. high-throughput, + * and reset overall (spanning all rates) tx success history statistics. + * These control how long we stay using same modulation mode before + * searching for a new mode. + */ +static void rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy, + struct iwl_lq_sta *lq_sta) +{ + IWL_DEBUG_RATE(priv, "we are staying in the same table\n"); + lq_sta->stay_in_tbl = 1; /* only place this gets set */ + if (is_legacy) { + lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT; + lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT; + lq_sta->max_success_limit = IWL_LEGACY_SUCCESS_LIMIT; + } else { + lq_sta->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT; + lq_sta->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT; + lq_sta->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT; + } + lq_sta->table_count = 0; + lq_sta->total_failed = 0; + lq_sta->total_success = 0; + lq_sta->flush_timer = jiffies; + lq_sta->action_counter = 0; +} + +/* + * Find correct throughput table for given mode of modulation + */ +static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta, + struct iwl_scale_tbl_info *tbl) +{ + /* Used to choose among HT tables */ + s32 (*ht_tbl_pointer)[IWL_RATE_COUNT]; + + /* Check for invalid LQ type */ + if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) { + tbl->expected_tpt = expected_tpt_legacy; + return; + } + + /* Legacy rates have only one table */ + if (is_legacy(tbl->lq_type)) { + tbl->expected_tpt = expected_tpt_legacy; + return; + } + + /* Choose among many HT tables depending on number of streams + * (SISO/MIMO2/MIMO3), channel width (20/40), SGI, and aggregation + * status */ + if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup)) + ht_tbl_pointer = expected_tpt_siso20MHz; + else if (is_siso(tbl->lq_type)) + ht_tbl_pointer = expected_tpt_siso40MHz; + else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup)) + ht_tbl_pointer = expected_tpt_mimo2_20MHz; + else if (is_mimo2(tbl->lq_type)) + ht_tbl_pointer = expected_tpt_mimo2_40MHz; + else if (is_mimo3(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup)) + ht_tbl_pointer = expected_tpt_mimo3_20MHz; + else /* if (is_mimo3(tbl->lq_type)) <-- must be true */ + ht_tbl_pointer = expected_tpt_mimo3_40MHz; + + if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */ + tbl->expected_tpt = ht_tbl_pointer[0]; + else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */ + tbl->expected_tpt = ht_tbl_pointer[1]; + else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */ + tbl->expected_tpt = ht_tbl_pointer[2]; + else /* AGG+SGI */ + tbl->expected_tpt = ht_tbl_pointer[3]; +} + +/* + * Find starting rate for new "search" high-throughput mode of modulation. + * Goal is to find lowest expected rate (under perfect conditions) that is + * above the current measured throughput of "active" mode, to give new mode + * a fair chance to prove itself without too many challenges. + * + * This gets called when transitioning to more aggressive modulation + * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive + * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need + * to decrease to match "active" throughput. When moving from MIMO to SISO, + * bit rate will typically need to increase, but not if performance was bad. + */ +static s32 rs_get_best_rate(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct iwl_scale_tbl_info *tbl, /* "search" */ + u16 rate_mask, s8 index) +{ + /* "active" values */ + struct iwl_scale_tbl_info *active_tbl = + &(lq_sta->lq_info[lq_sta->active_tbl]); + s32 active_sr = active_tbl->win[index].success_ratio; + s32 active_tpt = active_tbl->expected_tpt[index]; + + /* expected "search" throughput */ + s32 *tpt_tbl = tbl->expected_tpt; + + s32 new_rate, high, low, start_hi; + u16 high_low; + s8 rate = index; + + new_rate = high = low = start_hi = IWL_RATE_INVALID; + + for (; ;) { + high_low = rs_get_adjacent_rate(priv, rate, rate_mask, + tbl->lq_type); + + low = high_low & 0xff; + high = (high_low >> 8) & 0xff; + + /* + * Lower the "search" bit rate, to give new "search" mode + * approximately the same throughput as "active" if: + * + * 1) "Active" mode has been working modestly well (but not + * great), and expected "search" throughput (under perfect + * conditions) at candidate rate is above the actual + * measured "active" throughput (but less than expected + * "active" throughput under perfect conditions). + * OR + * 2) "Active" mode has been working perfectly or very well + * and expected "search" throughput (under perfect + * conditions) at candidate rate is above expected + * "active" throughput (under perfect conditions). + */ + if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) && + ((active_sr > IWL_RATE_DECREASE_TH) && + (active_sr <= IWL_RATE_HIGH_TH) && + (tpt_tbl[rate] <= active_tpt))) || + ((active_sr >= IWL_RATE_SCALE_SWITCH) && + (tpt_tbl[rate] > active_tpt))) { + + /* (2nd or later pass) + * If we've already tried to raise the rate, and are + * now trying to lower it, use the higher rate. */ + if (start_hi != IWL_RATE_INVALID) { + new_rate = start_hi; + break; + } + + new_rate = rate; + + /* Loop again with lower rate */ + if (low != IWL_RATE_INVALID) + rate = low; + + /* Lower rate not available, use the original */ + else + break; + + /* Else try to raise the "search" rate to match "active" */ + } else { + /* (2nd or later pass) + * If we've already tried to lower the rate, and are + * now trying to raise it, use the lower rate. */ + if (new_rate != IWL_RATE_INVALID) + break; + + /* Loop again with higher rate */ + else if (high != IWL_RATE_INVALID) { + start_hi = high; + rate = high; + + /* Higher rate not available, use the original */ + } else { + new_rate = rate; + break; + } + } + } + + return new_rate; +} + +/* + * Set up search table for MIMO2 + */ +static int rs_switch_to_mimo2(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, + struct iwl_scale_tbl_info *tbl, int index) +{ + u16 rate_mask; + s32 rate; + s8 is_green = lq_sta->is_green; + + if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported) + return -1; + + if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2) + == WLAN_HT_CAP_SM_PS_STATIC) + return -1; + + /* Need both Tx chains/antennas to support MIMO */ + if (priv->hw_params.tx_chains_num < 2) + return -1; + + IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n"); + + tbl->lq_type = LQ_MIMO2; + tbl->is_dup = lq_sta->is_dup; + tbl->action = 0; + tbl->max_search = IWL_MAX_SEARCH; + rate_mask = lq_sta->active_mimo2_rate; + + if (iwl_is_ht40_tx_allowed(priv, &sta->ht_cap)) + tbl->is_ht40 = 1; + else + tbl->is_ht40 = 0; + + rs_set_expected_tpt_table(lq_sta, tbl); + + rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index); + + IWL_DEBUG_RATE(priv, "LQ: MIMO2 best rate %d mask %X\n", rate, rate_mask); + if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) { + IWL_DEBUG_RATE(priv, "Can't switch with index %d rate mask %x\n", + rate, rate_mask); + return -1; + } + tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, rate, is_green); + + IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n", + tbl->current_rate, is_green); + return 0; +} + +/* + * Set up search table for MIMO3 + */ +static int rs_switch_to_mimo3(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, + struct iwl_scale_tbl_info *tbl, int index) +{ + u16 rate_mask; + s32 rate; + s8 is_green = lq_sta->is_green; + + if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported) + return -1; + + if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2) + == WLAN_HT_CAP_SM_PS_STATIC) + return -1; + + /* Need both Tx chains/antennas to support MIMO */ + if (priv->hw_params.tx_chains_num < 3) + return -1; + + IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO3\n"); + + tbl->lq_type = LQ_MIMO3; + tbl->is_dup = lq_sta->is_dup; + tbl->action = 0; + tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH; + rate_mask = lq_sta->active_mimo3_rate; + + if (iwl_is_ht40_tx_allowed(priv, &sta->ht_cap)) + tbl->is_ht40 = 1; + else + tbl->is_ht40 = 0; + + rs_set_expected_tpt_table(lq_sta, tbl); + + rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index); + + IWL_DEBUG_RATE(priv, "LQ: MIMO3 best rate %d mask %X\n", + rate, rate_mask); + if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) { + IWL_DEBUG_RATE(priv, "Can't switch with index %d rate mask %x\n", + rate, rate_mask); + return -1; + } + tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, rate, is_green); + + IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n", + tbl->current_rate, is_green); + return 0; +} + +/* + * Set up search table for SISO + */ +static int rs_switch_to_siso(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, + struct iwl_scale_tbl_info *tbl, int index) +{ + u16 rate_mask; + u8 is_green = lq_sta->is_green; + s32 rate; + + if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported) + return -1; + + IWL_DEBUG_RATE(priv, "LQ: try to switch to SISO\n"); + + tbl->is_dup = lq_sta->is_dup; + tbl->lq_type = LQ_SISO; + tbl->action = 0; + tbl->max_search = IWL_MAX_SEARCH; + rate_mask = lq_sta->active_siso_rate; + + if (iwl_is_ht40_tx_allowed(priv, &sta->ht_cap)) + tbl->is_ht40 = 1; + else + tbl->is_ht40 = 0; + + if (is_green) + tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/ + + rs_set_expected_tpt_table(lq_sta, tbl); + rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index); + + IWL_DEBUG_RATE(priv, "LQ: get best rate %d mask %X\n", rate, rate_mask); + if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) { + IWL_DEBUG_RATE(priv, "can not switch with index %d rate mask %x\n", + rate, rate_mask); + return -1; + } + tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, rate, is_green); + IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n", + tbl->current_rate, is_green); + return 0; +} + +/* + * Try to switch to new modulation mode from legacy + */ +static int rs_move_legacy_other(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, + int index) +{ + struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + struct iwl_scale_tbl_info *search_tbl = + &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); + struct iwl_rate_scale_data *window = &(tbl->win[index]); + u32 sz = (sizeof(struct iwl_scale_tbl_info) - + (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); + u8 start_action = tbl->action; + u8 valid_tx_ant = priv->hw_params.valid_tx_ant; + u8 tx_chains_num = priv->hw_params.tx_chains_num; + int ret = 0; + u8 update_search_tbl_counter = 0; + + if (!iwl_ht_enabled(priv)) + /* stay in Legacy */ + tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; + else if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE && + tbl->action > IWL_LEGACY_SWITCH_SISO) + tbl->action = IWL_LEGACY_SWITCH_SISO; + for (; ;) { + lq_sta->action_counter++; + switch (tbl->action) { + case IWL_LEGACY_SWITCH_ANTENNA1: + case IWL_LEGACY_SWITCH_ANTENNA2: + IWL_DEBUG_RATE(priv, "LQ: Legacy toggle Antenna\n"); + + if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 && + tx_chains_num <= 1) || + (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 && + tx_chains_num <= 2)) + break; + + /* Don't change antenna if success has been great */ + if (window->success_ratio >= IWL_RS_GOOD_RATIO) + break; + + /* Set up search table to try other antenna */ + memcpy(search_tbl, tbl, sz); + + if (rs_toggle_antenna(valid_tx_ant, + &search_tbl->current_rate, search_tbl)) { + update_search_tbl_counter = 1; + rs_set_expected_tpt_table(lq_sta, search_tbl); + goto out; + } + break; + case IWL_LEGACY_SWITCH_SISO: + IWL_DEBUG_RATE(priv, "LQ: Legacy switch to SISO\n"); + + /* Set up search table to try SISO */ + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = 0; + ret = rs_switch_to_siso(priv, lq_sta, conf, sta, + search_tbl, index); + if (!ret) { + lq_sta->action_counter = 0; + goto out; + } + + break; + case IWL_LEGACY_SWITCH_MIMO2_AB: + case IWL_LEGACY_SWITCH_MIMO2_AC: + case IWL_LEGACY_SWITCH_MIMO2_BC: + IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO2\n"); + + /* Set up search table to try MIMO */ + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = 0; + + if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB) + search_tbl->ant_type = ANT_AB; + else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC) + search_tbl->ant_type = ANT_AC; + else + search_tbl->ant_type = ANT_BC; + + if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type)) + break; + + ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta, + search_tbl, index); + if (!ret) { + lq_sta->action_counter = 0; + goto out; + } + break; + + case IWL_LEGACY_SWITCH_MIMO3_ABC: + IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO3\n"); + + /* Set up search table to try MIMO3 */ + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = 0; + + search_tbl->ant_type = ANT_ABC; + + if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type)) + break; + + ret = rs_switch_to_mimo3(priv, lq_sta, conf, sta, + search_tbl, index); + if (!ret) { + lq_sta->action_counter = 0; + goto out; + } + break; + } + tbl->action++; + if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC) + tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; + + if (tbl->action == start_action) + break; + + } + search_tbl->lq_type = LQ_NONE; + return 0; + +out: + lq_sta->search_better_tbl = 1; + tbl->action++; + if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC) + tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; + if (update_search_tbl_counter) + search_tbl->action = tbl->action; + return 0; + +} + +/* + * Try to switch to new modulation mode from SISO + */ +static int rs_move_siso_to_other(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, int index) +{ + u8 is_green = lq_sta->is_green; + struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + struct iwl_scale_tbl_info *search_tbl = + &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); + struct iwl_rate_scale_data *window = &(tbl->win[index]); + struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + u32 sz = (sizeof(struct iwl_scale_tbl_info) - + (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); + u8 start_action = tbl->action; + u8 valid_tx_ant = priv->hw_params.valid_tx_ant; + u8 tx_chains_num = priv->hw_params.tx_chains_num; + u8 update_search_tbl_counter = 0; + int ret; + + if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE && + tbl->action > IWL_SISO_SWITCH_ANTENNA2) { + /* stay in SISO */ + tbl->action = IWL_SISO_SWITCH_ANTENNA1; + } + for (;;) { + lq_sta->action_counter++; + switch (tbl->action) { + case IWL_SISO_SWITCH_ANTENNA1: + case IWL_SISO_SWITCH_ANTENNA2: + IWL_DEBUG_RATE(priv, "LQ: SISO toggle Antenna\n"); + + if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 && + tx_chains_num <= 1) || + (tbl->action == IWL_SISO_SWITCH_ANTENNA2 && + tx_chains_num <= 2)) + break; + + if (window->success_ratio >= IWL_RS_GOOD_RATIO) + break; + + memcpy(search_tbl, tbl, sz); + if (rs_toggle_antenna(valid_tx_ant, + &search_tbl->current_rate, search_tbl)) { + update_search_tbl_counter = 1; + goto out; + } + break; + case IWL_SISO_SWITCH_MIMO2_AB: + case IWL_SISO_SWITCH_MIMO2_AC: + case IWL_SISO_SWITCH_MIMO2_BC: + IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO2\n"); + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = 0; + + if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB) + search_tbl->ant_type = ANT_AB; + else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC) + search_tbl->ant_type = ANT_AC; + else + search_tbl->ant_type = ANT_BC; + + if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type)) + break; + + ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta, + search_tbl, index); + if (!ret) + goto out; + break; + case IWL_SISO_SWITCH_GI: + if (!tbl->is_ht40 && !(ht_cap->cap & + IEEE80211_HT_CAP_SGI_20)) + break; + if (tbl->is_ht40 && !(ht_cap->cap & + IEEE80211_HT_CAP_SGI_40)) + break; + + IWL_DEBUG_RATE(priv, "LQ: SISO toggle SGI/NGI\n"); + + memcpy(search_tbl, tbl, sz); + if (is_green) { + if (!tbl->is_SGI) + break; + else + IWL_ERR(priv, + "SGI was set in GF+SISO\n"); + } + search_tbl->is_SGI = !tbl->is_SGI; + rs_set_expected_tpt_table(lq_sta, search_tbl); + if (tbl->is_SGI) { + s32 tpt = lq_sta->last_tpt / 100; + if (tpt >= search_tbl->expected_tpt[index]) + break; + } + search_tbl->current_rate = + rate_n_flags_from_tbl(priv, search_tbl, + index, is_green); + update_search_tbl_counter = 1; + goto out; + case IWL_SISO_SWITCH_MIMO3_ABC: + IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO3\n"); + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = 0; + search_tbl->ant_type = ANT_ABC; + + if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type)) + break; + + ret = rs_switch_to_mimo3(priv, lq_sta, conf, sta, + search_tbl, index); + if (!ret) + goto out; + break; + } + tbl->action++; + if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC) + tbl->action = IWL_SISO_SWITCH_ANTENNA1; + + if (tbl->action == start_action) + break; + } + search_tbl->lq_type = LQ_NONE; + return 0; + + out: + lq_sta->search_better_tbl = 1; + tbl->action++; + if (tbl->action > IWL_SISO_SWITCH_MIMO3_ABC) + tbl->action = IWL_SISO_SWITCH_ANTENNA1; + if (update_search_tbl_counter) + search_tbl->action = tbl->action; + + return 0; +} + +/* + * Try to switch to new modulation mode from MIMO2 + */ +static int rs_move_mimo2_to_other(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, int index) +{ + s8 is_green = lq_sta->is_green; + struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + struct iwl_scale_tbl_info *search_tbl = + &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); + struct iwl_rate_scale_data *window = &(tbl->win[index]); + struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + u32 sz = (sizeof(struct iwl_scale_tbl_info) - + (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); + u8 start_action = tbl->action; + u8 valid_tx_ant = priv->hw_params.valid_tx_ant; + u8 tx_chains_num = priv->hw_params.tx_chains_num; + u8 update_search_tbl_counter = 0; + int ret; + + if ((iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE) && + (tbl->action < IWL_MIMO2_SWITCH_SISO_A || + tbl->action > IWL_MIMO2_SWITCH_SISO_C)) { + /* switch in SISO */ + tbl->action = IWL_MIMO2_SWITCH_SISO_A; + } + for (;;) { + lq_sta->action_counter++; + switch (tbl->action) { + case IWL_MIMO2_SWITCH_ANTENNA1: + case IWL_MIMO2_SWITCH_ANTENNA2: + IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle Antennas\n"); + + if (tx_chains_num <= 2) + break; + + if (window->success_ratio >= IWL_RS_GOOD_RATIO) + break; + + memcpy(search_tbl, tbl, sz); + if (rs_toggle_antenna(valid_tx_ant, + &search_tbl->current_rate, search_tbl)) { + update_search_tbl_counter = 1; + goto out; + } + break; + case IWL_MIMO2_SWITCH_SISO_A: + case IWL_MIMO2_SWITCH_SISO_B: + case IWL_MIMO2_SWITCH_SISO_C: + IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to SISO\n"); + + /* Set up new search table for SISO */ + memcpy(search_tbl, tbl, sz); + + if (tbl->action == IWL_MIMO2_SWITCH_SISO_A) + search_tbl->ant_type = ANT_A; + else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B) + search_tbl->ant_type = ANT_B; + else + search_tbl->ant_type = ANT_C; + + if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type)) + break; + + ret = rs_switch_to_siso(priv, lq_sta, conf, sta, + search_tbl, index); + if (!ret) + goto out; + + break; + + case IWL_MIMO2_SWITCH_GI: + if (!tbl->is_ht40 && !(ht_cap->cap & + IEEE80211_HT_CAP_SGI_20)) + break; + if (tbl->is_ht40 && !(ht_cap->cap & + IEEE80211_HT_CAP_SGI_40)) + break; + + IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle SGI/NGI\n"); + + /* Set up new search table for MIMO2 */ + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = !tbl->is_SGI; + rs_set_expected_tpt_table(lq_sta, search_tbl); + /* + * If active table already uses the fastest possible + * modulation (dual stream with short guard interval), + * and it's working well, there's no need to look + * for a better type of modulation! + */ + if (tbl->is_SGI) { + s32 tpt = lq_sta->last_tpt / 100; + if (tpt >= search_tbl->expected_tpt[index]) + break; + } + search_tbl->current_rate = + rate_n_flags_from_tbl(priv, search_tbl, + index, is_green); + update_search_tbl_counter = 1; + goto out; + + case IWL_MIMO2_SWITCH_MIMO3_ABC: + IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to MIMO3\n"); + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = 0; + search_tbl->ant_type = ANT_ABC; + + if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type)) + break; + + ret = rs_switch_to_mimo3(priv, lq_sta, conf, sta, + search_tbl, index); + if (!ret) + goto out; + + break; + } + tbl->action++; + if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC) + tbl->action = IWL_MIMO2_SWITCH_ANTENNA1; + + if (tbl->action == start_action) + break; + } + search_tbl->lq_type = LQ_NONE; + return 0; + out: + lq_sta->search_better_tbl = 1; + tbl->action++; + if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC) + tbl->action = IWL_MIMO2_SWITCH_ANTENNA1; + if (update_search_tbl_counter) + search_tbl->action = tbl->action; + + return 0; + +} + +/* + * Try to switch to new modulation mode from MIMO3 + */ +static int rs_move_mimo3_to_other(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, int index) +{ + s8 is_green = lq_sta->is_green; + struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + struct iwl_scale_tbl_info *search_tbl = + &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); + struct iwl_rate_scale_data *window = &(tbl->win[index]); + struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + u32 sz = (sizeof(struct iwl_scale_tbl_info) - + (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); + u8 start_action = tbl->action; + u8 valid_tx_ant = priv->hw_params.valid_tx_ant; + u8 tx_chains_num = priv->hw_params.tx_chains_num; + int ret; + u8 update_search_tbl_counter = 0; + + if ((iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE) && + (tbl->action < IWL_MIMO3_SWITCH_SISO_A || + tbl->action > IWL_MIMO3_SWITCH_SISO_C)) { + /* switch in SISO */ + tbl->action = IWL_MIMO3_SWITCH_SISO_A; + } + for (;;) { + lq_sta->action_counter++; + switch (tbl->action) { + case IWL_MIMO3_SWITCH_ANTENNA1: + case IWL_MIMO3_SWITCH_ANTENNA2: + IWL_DEBUG_RATE(priv, "LQ: MIMO3 toggle Antennas\n"); + + if (tx_chains_num <= 3) + break; + + if (window->success_ratio >= IWL_RS_GOOD_RATIO) + break; + + memcpy(search_tbl, tbl, sz); + if (rs_toggle_antenna(valid_tx_ant, + &search_tbl->current_rate, search_tbl)) + goto out; + break; + case IWL_MIMO3_SWITCH_SISO_A: + case IWL_MIMO3_SWITCH_SISO_B: + case IWL_MIMO3_SWITCH_SISO_C: + IWL_DEBUG_RATE(priv, "LQ: MIMO3 switch to SISO\n"); + + /* Set up new search table for SISO */ + memcpy(search_tbl, tbl, sz); + + if (tbl->action == IWL_MIMO3_SWITCH_SISO_A) + search_tbl->ant_type = ANT_A; + else if (tbl->action == IWL_MIMO3_SWITCH_SISO_B) + search_tbl->ant_type = ANT_B; + else + search_tbl->ant_type = ANT_C; + + if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type)) + break; + + ret = rs_switch_to_siso(priv, lq_sta, conf, sta, + search_tbl, index); + if (!ret) + goto out; + + break; + + case IWL_MIMO3_SWITCH_MIMO2_AB: + case IWL_MIMO3_SWITCH_MIMO2_AC: + case IWL_MIMO3_SWITCH_MIMO2_BC: + IWL_DEBUG_RATE(priv, "LQ: MIMO3 switch to MIMO2\n"); + + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = 0; + if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AB) + search_tbl->ant_type = ANT_AB; + else if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AC) + search_tbl->ant_type = ANT_AC; + else + search_tbl->ant_type = ANT_BC; + + if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type)) + break; + + ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta, + search_tbl, index); + if (!ret) + goto out; + + break; + + case IWL_MIMO3_SWITCH_GI: + if (!tbl->is_ht40 && !(ht_cap->cap & + IEEE80211_HT_CAP_SGI_20)) + break; + if (tbl->is_ht40 && !(ht_cap->cap & + IEEE80211_HT_CAP_SGI_40)) + break; + + IWL_DEBUG_RATE(priv, "LQ: MIMO3 toggle SGI/NGI\n"); + + /* Set up new search table for MIMO */ + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = !tbl->is_SGI; + rs_set_expected_tpt_table(lq_sta, search_tbl); + /* + * If active table already uses the fastest possible + * modulation (dual stream with short guard interval), + * and it's working well, there's no need to look + * for a better type of modulation! + */ + if (tbl->is_SGI) { + s32 tpt = lq_sta->last_tpt / 100; + if (tpt >= search_tbl->expected_tpt[index]) + break; + } + search_tbl->current_rate = + rate_n_flags_from_tbl(priv, search_tbl, + index, is_green); + update_search_tbl_counter = 1; + goto out; + } + tbl->action++; + if (tbl->action > IWL_MIMO3_SWITCH_GI) + tbl->action = IWL_MIMO3_SWITCH_ANTENNA1; + + if (tbl->action == start_action) + break; + } + search_tbl->lq_type = LQ_NONE; + return 0; + out: + lq_sta->search_better_tbl = 1; + tbl->action++; + if (tbl->action > IWL_MIMO3_SWITCH_GI) + tbl->action = IWL_MIMO3_SWITCH_ANTENNA1; + if (update_search_tbl_counter) + search_tbl->action = tbl->action; + + return 0; + +} + +/* + * Check whether we should continue using same modulation mode, or + * begin search for a new mode, based on: + * 1) # tx successes or failures while using this mode + * 2) # times calling this function + * 3) elapsed time in this mode (not used, for now) + */ +static void rs_stay_in_table(struct iwl_lq_sta *lq_sta) +{ + struct iwl_scale_tbl_info *tbl; + int i; + int active_tbl; + int flush_interval_passed = 0; + struct iwl_priv *priv; + + priv = lq_sta->drv; + active_tbl = lq_sta->active_tbl; + + tbl = &(lq_sta->lq_info[active_tbl]); + + /* If we've been disallowing search, see if we should now allow it */ + if (lq_sta->stay_in_tbl) { + + /* Elapsed time using current modulation mode */ + if (lq_sta->flush_timer) + flush_interval_passed = + time_after(jiffies, + (unsigned long)(lq_sta->flush_timer + + IWL_RATE_SCALE_FLUSH_INTVL)); + + /* + * Check if we should allow search for new modulation mode. + * If many frames have failed or succeeded, or we've used + * this same modulation for a long time, allow search, and + * reset history stats that keep track of whether we should + * allow a new search. Also (below) reset all bitmaps and + * stats in active history. + */ + if ((lq_sta->total_failed > lq_sta->max_failure_limit) || + (lq_sta->total_success > lq_sta->max_success_limit) || + ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer) + && (flush_interval_passed))) { + IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n:", + lq_sta->total_failed, + lq_sta->total_success, + flush_interval_passed); + + /* Allow search for new mode */ + lq_sta->stay_in_tbl = 0; /* only place reset */ + lq_sta->total_failed = 0; + lq_sta->total_success = 0; + lq_sta->flush_timer = 0; + + /* + * Else if we've used this modulation mode enough repetitions + * (regardless of elapsed time or success/failure), reset + * history bitmaps and rate-specific stats for all rates in + * active table. + */ + } else { + lq_sta->table_count++; + if (lq_sta->table_count >= + lq_sta->table_count_limit) { + lq_sta->table_count = 0; + + IWL_DEBUG_RATE(priv, "LQ: stay in table clear win\n"); + for (i = 0; i < IWL_RATE_COUNT; i++) + rs_rate_scale_clear_window( + &(tbl->win[i])); + } + } + + /* If transitioning to allow "search", reset all history + * bitmaps and stats in active table (this will become the new + * "search" table). */ + if (!lq_sta->stay_in_tbl) { + for (i = 0; i < IWL_RATE_COUNT; i++) + rs_rate_scale_clear_window(&(tbl->win[i])); + } + } +} + +/* + * setup rate table in uCode + * return rate_n_flags as used in the table + */ +static u32 rs_update_rate_tbl(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct iwl_scale_tbl_info *tbl, + int index, u8 is_green) +{ + u32 rate; + + /* Update uCode's rate table. */ + rate = rate_n_flags_from_tbl(priv, tbl, index, is_green); + rs_fill_link_cmd(priv, lq_sta, rate); + iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC); + + return rate; +} + +/* + * Do rate scaling and search for new modulation mode. + */ +static void rs_rate_scale_perform(struct iwl_priv *priv, + struct sk_buff *skb, + struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta) +{ + struct ieee80211_hw *hw = priv->hw; + struct ieee80211_conf *conf = &hw->conf; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + int low = IWL_RATE_INVALID; + int high = IWL_RATE_INVALID; + int index; + int i; + struct iwl_rate_scale_data *window = NULL; + int current_tpt = IWL_INVALID_VALUE; + int low_tpt = IWL_INVALID_VALUE; + int high_tpt = IWL_INVALID_VALUE; + u32 fail_count; + s8 scale_action = 0; + u16 rate_mask; + u8 update_lq = 0; + struct iwl_scale_tbl_info *tbl, *tbl1; + u16 rate_scale_index_msk = 0; + u32 rate; + u8 is_green = 0; + u8 active_tbl = 0; + u8 done_search = 0; + u16 high_low; + s32 sr; + u8 tid = MAX_TID_COUNT; + struct iwl_tid_data *tid_data; + + IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n"); + + /* Send management frames and NO_ACK data using lowest rate. */ + /* TODO: this could probably be improved.. */ + if (!ieee80211_is_data(hdr->frame_control) || + info->flags & IEEE80211_TX_CTL_NO_ACK) + return; + + if (!sta || !lq_sta) + return; + + lq_sta->supp_rates = sta->supp_rates[lq_sta->band]; + + tid = rs_tl_add_packet(lq_sta, hdr); + if ((tid != MAX_TID_COUNT) && (lq_sta->tx_agg_tid_en & (1 << tid))) { + tid_data = &priv->stations[lq_sta->lq.sta_id].tid[tid]; + if (tid_data->agg.state == IWL_AGG_OFF) + lq_sta->is_agg = 0; + else + lq_sta->is_agg = 1; + } else + lq_sta->is_agg = 0; + + /* + * Select rate-scale / modulation-mode table to work with in + * the rest of this function: "search" if searching for better + * modulation mode, or "active" if doing rate scaling within a mode. + */ + if (!lq_sta->search_better_tbl) + active_tbl = lq_sta->active_tbl; + else + active_tbl = 1 - lq_sta->active_tbl; + + tbl = &(lq_sta->lq_info[active_tbl]); + if (is_legacy(tbl->lq_type)) + lq_sta->is_green = 0; + else + lq_sta->is_green = rs_use_green(sta, &priv->current_ht_config); + is_green = lq_sta->is_green; + + /* current tx rate */ + index = lq_sta->last_txrate_idx; + + IWL_DEBUG_RATE(priv, "Rate scale index %d for type %d\n", index, + tbl->lq_type); + + /* rates available for this association, and for modulation mode */ + rate_mask = rs_get_supported_rates(lq_sta, hdr, tbl->lq_type); + + IWL_DEBUG_RATE(priv, "mask 0x%04X \n", rate_mask); + + /* mask with station rate restriction */ + if (is_legacy(tbl->lq_type)) { + if (lq_sta->band == IEEE80211_BAND_5GHZ) + /* supp_rates has no CCK bits in A mode */ + rate_scale_index_msk = (u16) (rate_mask & + (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE)); + else + rate_scale_index_msk = (u16) (rate_mask & + lq_sta->supp_rates); + + } else + rate_scale_index_msk = rate_mask; + + if (!rate_scale_index_msk) + rate_scale_index_msk = rate_mask; + + if (!((1 << index) & rate_scale_index_msk)) { + IWL_ERR(priv, "Current Rate is not valid\n"); + if (lq_sta->search_better_tbl) { + /* revert to active table if search table is not valid*/ + tbl->lq_type = LQ_NONE; + lq_sta->search_better_tbl = 0; + tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + /* get "active" rate info */ + index = iwl_hwrate_to_plcp_idx(tbl->current_rate); + rate = rs_update_rate_tbl(priv, lq_sta, + tbl, index, is_green); + } + return; + } + + /* Get expected throughput table and history window for current rate */ + if (!tbl->expected_tpt) { + IWL_ERR(priv, "tbl->expected_tpt is NULL\n"); + return; + } + + /* force user max rate if set by user */ + if ((lq_sta->max_rate_idx != -1) && + (lq_sta->max_rate_idx < index)) { + index = lq_sta->max_rate_idx; + update_lq = 1; + window = &(tbl->win[index]); + goto lq_update; + } + + window = &(tbl->win[index]); + + /* + * If there is not enough history to calculate actual average + * throughput, keep analyzing results of more tx frames, without + * changing rate or mode (bypass most of the rest of this function). + * Set up new rate table in uCode only if old rate is not supported + * in current association (use new rate found above). + */ + fail_count = window->counter - window->success_counter; + if ((fail_count < IWL_RATE_MIN_FAILURE_TH) && + (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) { + IWL_DEBUG_RATE(priv, "LQ: still below TH. succ=%d total=%d " + "for index %d\n", + window->success_counter, window->counter, index); + + /* Can't calculate this yet; not enough history */ + window->average_tpt = IWL_INVALID_VALUE; + + /* Should we stay with this modulation mode, + * or search for a new one? */ + rs_stay_in_table(lq_sta); + + goto out; + } + /* Else we have enough samples; calculate estimate of + * actual average throughput */ + + /* Sanity-check TPT calculations */ + BUG_ON(window->average_tpt != ((window->success_ratio * + tbl->expected_tpt[index] + 64) / 128)); + + /* If we are searching for better modulation mode, check success. */ + if (lq_sta->search_better_tbl && + (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_MULTI)) { + /* If good success, continue using the "search" mode; + * no need to send new link quality command, since we're + * continuing to use the setup that we've been trying. */ + if (window->average_tpt > lq_sta->last_tpt) { + + IWL_DEBUG_RATE(priv, "LQ: SWITCHING TO NEW TABLE " + "suc=%d cur-tpt=%d old-tpt=%d\n", + window->success_ratio, + window->average_tpt, + lq_sta->last_tpt); + + if (!is_legacy(tbl->lq_type)) + lq_sta->enable_counter = 1; + + /* Swap tables; "search" becomes "active" */ + lq_sta->active_tbl = active_tbl; + current_tpt = window->average_tpt; + + /* Else poor success; go back to mode in "active" table */ + } else { + + IWL_DEBUG_RATE(priv, "LQ: GOING BACK TO THE OLD TABLE " + "suc=%d cur-tpt=%d old-tpt=%d\n", + window->success_ratio, + window->average_tpt, + lq_sta->last_tpt); + + /* Nullify "search" table */ + tbl->lq_type = LQ_NONE; + + /* Revert to "active" table */ + active_tbl = lq_sta->active_tbl; + tbl = &(lq_sta->lq_info[active_tbl]); + + /* Revert to "active" rate and throughput info */ + index = iwl_hwrate_to_plcp_idx(tbl->current_rate); + current_tpt = lq_sta->last_tpt; + + /* Need to set up a new rate table in uCode */ + update_lq = 1; + } + + /* Either way, we've made a decision; modulation mode + * search is done, allow rate adjustment next time. */ + lq_sta->search_better_tbl = 0; + done_search = 1; /* Don't switch modes below! */ + goto lq_update; + } + + /* (Else) not in search of better modulation mode, try for better + * starting rate, while staying in this mode. */ + high_low = rs_get_adjacent_rate(priv, index, rate_scale_index_msk, + tbl->lq_type); + low = high_low & 0xff; + high = (high_low >> 8) & 0xff; + + /* If user set max rate, dont allow higher than user constrain */ + if ((lq_sta->max_rate_idx != -1) && + (lq_sta->max_rate_idx < high)) + high = IWL_RATE_INVALID; + + sr = window->success_ratio; + + /* Collect measured throughputs for current and adjacent rates */ + current_tpt = window->average_tpt; + if (low != IWL_RATE_INVALID) + low_tpt = tbl->win[low].average_tpt; + if (high != IWL_RATE_INVALID) + high_tpt = tbl->win[high].average_tpt; + + scale_action = 0; + + /* Too many failures, decrease rate */ + if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) { + IWL_DEBUG_RATE(priv, "decrease rate because of low success_ratio\n"); + scale_action = -1; + + /* No throughput measured yet for adjacent rates; try increase. */ + } else if ((low_tpt == IWL_INVALID_VALUE) && + (high_tpt == IWL_INVALID_VALUE)) { + + if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH) + scale_action = 1; + else if (low != IWL_RATE_INVALID) + scale_action = 0; + } + + /* Both adjacent throughputs are measured, but neither one has better + * throughput; we're using the best rate, don't change it! */ + else if ((low_tpt != IWL_INVALID_VALUE) && + (high_tpt != IWL_INVALID_VALUE) && + (low_tpt < current_tpt) && + (high_tpt < current_tpt)) + scale_action = 0; + + /* At least one adjacent rate's throughput is measured, + * and may have better performance. */ + else { + /* Higher adjacent rate's throughput is measured */ + if (high_tpt != IWL_INVALID_VALUE) { + /* Higher rate has better throughput */ + if (high_tpt > current_tpt && + sr >= IWL_RATE_INCREASE_TH) { + scale_action = 1; + } else { + scale_action = 0; + } + + /* Lower adjacent rate's throughput is measured */ + } else if (low_tpt != IWL_INVALID_VALUE) { + /* Lower rate has better throughput */ + if (low_tpt > current_tpt) { + IWL_DEBUG_RATE(priv, + "decrease rate because of low tpt\n"); + scale_action = -1; + } else if (sr >= IWL_RATE_INCREASE_TH) { + scale_action = 1; + } + } + } + + /* Sanity check; asked for decrease, but success rate or throughput + * has been good at old rate. Don't change it. */ + if ((scale_action == -1) && (low != IWL_RATE_INVALID) && + ((sr > IWL_RATE_HIGH_TH) || + (current_tpt > (100 * tbl->expected_tpt[low])))) + scale_action = 0; + if (!iwl_ht_enabled(priv) && !is_legacy(tbl->lq_type)) + scale_action = -1; + if (iwl_tx_ant_restriction(priv) != IWL_ANT_OK_MULTI && + (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) + scale_action = -1; + switch (scale_action) { + case -1: + /* Decrease starting rate, update uCode's rate table */ + if (low != IWL_RATE_INVALID) { + update_lq = 1; + index = low; + } + + break; + case 1: + /* Increase starting rate, update uCode's rate table */ + if (high != IWL_RATE_INVALID) { + update_lq = 1; + index = high; + } + + break; + case 0: + /* No change */ + default: + break; + } + + IWL_DEBUG_RATE(priv, "choose rate scale index %d action %d low %d " + "high %d type %d\n", + index, scale_action, low, high, tbl->lq_type); + +lq_update: + /* Replace uCode's rate table for the destination station. */ + if (update_lq) + rate = rs_update_rate_tbl(priv, lq_sta, + tbl, index, is_green); + + if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_MULTI) { + /* Should we stay with this modulation mode, + * or search for a new one? */ + rs_stay_in_table(lq_sta); + } + /* + * Search for new modulation mode if we're: + * 1) Not changing rates right now + * 2) Not just finishing up a search + * 3) Allowing a new search + */ + if (!update_lq && !done_search && !lq_sta->stay_in_tbl && window->counter) { + /* Save current throughput to compare with "search" throughput*/ + lq_sta->last_tpt = current_tpt; + + /* Select a new "search" modulation mode to try. + * If one is found, set up the new "search" table. */ + if (is_legacy(tbl->lq_type)) + rs_move_legacy_other(priv, lq_sta, conf, sta, index); + else if (is_siso(tbl->lq_type)) + rs_move_siso_to_other(priv, lq_sta, conf, sta, index); + else if (is_mimo2(tbl->lq_type)) + rs_move_mimo2_to_other(priv, lq_sta, conf, sta, index); + else + rs_move_mimo3_to_other(priv, lq_sta, conf, sta, index); + + /* If new "search" mode was selected, set up in uCode table */ + if (lq_sta->search_better_tbl) { + /* Access the "search" table, clear its history. */ + tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); + for (i = 0; i < IWL_RATE_COUNT; i++) + rs_rate_scale_clear_window(&(tbl->win[i])); + + /* Use new "search" start rate */ + index = iwl_hwrate_to_plcp_idx(tbl->current_rate); + + IWL_DEBUG_RATE(priv, "Switch current mcs: %X index: %d\n", + tbl->current_rate, index); + rs_fill_link_cmd(priv, lq_sta, tbl->current_rate); + iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC); + } else + done_search = 1; + } + + if (done_search && !lq_sta->stay_in_tbl) { + /* If the "active" (non-search) mode was legacy, + * and we've tried switching antennas, + * but we haven't been able to try HT modes (not available), + * stay with best antenna legacy modulation for a while + * before next round of mode comparisons. */ + tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]); + if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) && + lq_sta->action_counter > tbl1->max_search) { + IWL_DEBUG_RATE(priv, "LQ: STAY in legacy table\n"); + rs_set_stay_in_table(priv, 1, lq_sta); + } + + /* If we're in an HT mode, and all 3 mode switch actions + * have been tried and compared, stay in this best modulation + * mode for a while before next round of mode comparisons. */ + if (lq_sta->enable_counter && + (lq_sta->action_counter >= tbl1->max_search) && + iwl_ht_enabled(priv)) { + if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) && + (lq_sta->tx_agg_tid_en & (1 << tid)) && + (tid != MAX_TID_COUNT)) { + tid_data = + &priv->stations[lq_sta->lq.sta_id].tid[tid]; + if (tid_data->agg.state == IWL_AGG_OFF) { + IWL_DEBUG_RATE(priv, + "try to aggregate tid %d\n", + tid); + rs_tl_turn_on_agg(priv, tid, + lq_sta, sta); + } + } + rs_set_stay_in_table(priv, 0, lq_sta); + } + } + +out: + tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, index, is_green); + i = index; + lq_sta->last_txrate_idx = i; + + return; +} + + +static void rs_initialize_lq(struct iwl_priv *priv, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta) +{ + struct iwl_scale_tbl_info *tbl; + int rate_idx; + int i; + u32 rate; + u8 use_green = rs_use_green(sta, &priv->current_ht_config); + u8 active_tbl = 0; + u8 valid_tx_ant; + + if (!sta || !lq_sta) + goto out; + + i = lq_sta->last_txrate_idx; + + if ((lq_sta->lq.sta_id == 0xff) && + (priv->iw_mode == NL80211_IFTYPE_ADHOC)) + goto out; + + valid_tx_ant = priv->hw_params.valid_tx_ant; + + if (!lq_sta->search_better_tbl) + active_tbl = lq_sta->active_tbl; + else + active_tbl = 1 - lq_sta->active_tbl; + + tbl = &(lq_sta->lq_info[active_tbl]); + + if ((i < 0) || (i >= IWL_RATE_COUNT)) + i = 0; + + rate = iwl_rates[i].plcp; + tbl->ant_type = first_antenna(valid_tx_ant); + rate |= tbl->ant_type << RATE_MCS_ANT_POS; + + if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE) + rate |= RATE_MCS_CCK_MSK; + + rs_get_tbl_info_from_mcs(rate, priv->band, tbl, &rate_idx); + if (!rs_is_valid_ant(valid_tx_ant, tbl->ant_type)) + rs_toggle_antenna(valid_tx_ant, &rate, tbl); + + rate = rate_n_flags_from_tbl(priv, tbl, rate_idx, use_green); + tbl->current_rate = rate; + rs_set_expected_tpt_table(lq_sta, tbl); + rs_fill_link_cmd(NULL, lq_sta, rate); + iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC); + out: + return; +} + +static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta, + struct ieee80211_tx_rate_control *txrc) +{ + + struct sk_buff *skb = txrc->skb; + struct ieee80211_supported_band *sband = txrc->sband; + struct iwl_priv *priv = (struct iwl_priv *)priv_r; + struct ieee80211_conf *conf = &priv->hw->conf; + struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct iwl_lq_sta *lq_sta = priv_sta; + int rate_idx; + + IWL_DEBUG_RATE_LIMIT(priv, "rate scale calculate new rate for skb\n"); + + /* Get max rate if user set max rate */ + if (lq_sta) { + lq_sta->max_rate_idx = txrc->max_rate_idx; + if ((sband->band == IEEE80211_BAND_5GHZ) && + (lq_sta->max_rate_idx != -1)) + lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE; + if ((lq_sta->max_rate_idx < 0) || + (lq_sta->max_rate_idx >= IWL_RATE_COUNT)) + lq_sta->max_rate_idx = -1; + } + + /* Send management frames and NO_ACK data using lowest rate. */ + if (rate_control_send_low(sta, priv_sta, txrc)) + return; + + rate_idx = lq_sta->last_txrate_idx; + + if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) && + !lq_sta->ibss_sta_added) { + u8 sta_id = iwl_find_station(priv, hdr->addr1); + + if (sta_id == IWL_INVALID_STATION) { + IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n", + hdr->addr1); + sta_id = iwl_add_station(priv, hdr->addr1, + false, CMD_ASYNC, ht_cap); + } + if ((sta_id != IWL_INVALID_STATION)) { + lq_sta->lq.sta_id = sta_id; + lq_sta->lq.rs_table[0].rate_n_flags = 0; + lq_sta->ibss_sta_added = 1; + rs_initialize_lq(priv, conf, sta, lq_sta); + } + } + + if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) { + rate_idx -= IWL_FIRST_OFDM_RATE; + /* 6M and 9M shared same MCS index */ + rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0; + if (rs_extract_rate(lq_sta->last_rate_n_flags) >= + IWL_RATE_MIMO3_6M_PLCP) + rate_idx = rate_idx + (2 * MCS_INDEX_PER_STREAM); + else if (rs_extract_rate(lq_sta->last_rate_n_flags) >= + IWL_RATE_MIMO2_6M_PLCP) + rate_idx = rate_idx + MCS_INDEX_PER_STREAM; + info->control.rates[0].flags = IEEE80211_TX_RC_MCS; + if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK) + info->control.rates[0].flags |= IEEE80211_TX_RC_SHORT_GI; + if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK) + info->control.rates[0].flags |= IEEE80211_TX_RC_DUP_DATA; + if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK) + info->control.rates[0].flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; + if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK) + info->control.rates[0].flags |= IEEE80211_TX_RC_GREEN_FIELD; + } else { + /* Check for invalid rates */ + if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) || + ((sband->band == IEEE80211_BAND_5GHZ) && + (rate_idx < IWL_FIRST_OFDM_RATE))) + rate_idx = rate_lowest_index(sband, sta); + /* On valid 5 GHz rate, adjust index */ + else if (sband->band == IEEE80211_BAND_5GHZ) + rate_idx -= IWL_FIRST_OFDM_RATE; + info->control.rates[0].flags = 0; + } + info->control.rates[0].idx = rate_idx; + +} + +static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta, + gfp_t gfp) +{ + struct iwl_lq_sta *lq_sta; + struct iwl_station_priv *sta_priv = (struct iwl_station_priv *) sta->drv_priv; + struct iwl_priv *priv; + + priv = (struct iwl_priv *)priv_rate; + IWL_DEBUG_RATE(priv, "create station rate scale window\n"); + + lq_sta = &sta_priv->lq_sta; + + return lq_sta; +} + +static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta) +{ + int i, j; + struct iwl_priv *priv = (struct iwl_priv *)priv_r; + struct ieee80211_conf *conf = &priv->hw->conf; + struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + struct iwl_lq_sta *lq_sta = priv_sta; + + lq_sta->lq.sta_id = 0xff; + + for (j = 0; j < LQ_SIZE; j++) + for (i = 0; i < IWL_RATE_COUNT; i++) + rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]); + + lq_sta->flush_timer = 0; + lq_sta->supp_rates = sta->supp_rates[sband->band]; + for (j = 0; j < LQ_SIZE; j++) + for (i = 0; i < IWL_RATE_COUNT; i++) + rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]); + + IWL_DEBUG_RATE(priv, "LQ: *** rate scale station global init ***\n"); + /* TODO: what is a good starting rate for STA? About middle? Maybe not + * the lowest or the highest rate.. Could consider using RSSI from + * previous packets? Need to have IEEE 802.1X auth succeed immediately + * after assoc.. */ + + lq_sta->ibss_sta_added = 0; + if (priv->iw_mode == NL80211_IFTYPE_AP) { + u8 sta_id = iwl_find_station(priv, + sta->addr); + + /* for IBSS the call are from tasklet */ + IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n", sta->addr); + + if (sta_id == IWL_INVALID_STATION) { + IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n", sta->addr); + sta_id = iwl_add_station(priv, sta->addr, false, + CMD_ASYNC, ht_cap); + } + if ((sta_id != IWL_INVALID_STATION)) { + lq_sta->lq.sta_id = sta_id; + lq_sta->lq.rs_table[0].rate_n_flags = 0; + } + /* FIXME: this is w/a remove it later */ + priv->assoc_station_added = 1; + } + + lq_sta->is_dup = 0; + lq_sta->max_rate_idx = -1; + lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX; + lq_sta->is_green = rs_use_green(sta, &priv->current_ht_config); + lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000); + lq_sta->active_rate_basic = priv->active_rate_basic; + lq_sta->band = priv->band; + /* + * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3), + * supp_rates[] does not; shift to convert format, force 9 MBits off. + */ + lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1; + lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1; + lq_sta->active_siso_rate &= ~((u16)0x2); + lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE; + + /* Same here */ + lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1; + lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1; + lq_sta->active_mimo2_rate &= ~((u16)0x2); + lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE; + + lq_sta->active_mimo3_rate = ht_cap->mcs.rx_mask[2] << 1; + lq_sta->active_mimo3_rate |= ht_cap->mcs.rx_mask[2] & 0x1; + lq_sta->active_mimo3_rate &= ~((u16)0x2); + lq_sta->active_mimo3_rate <<= IWL_FIRST_OFDM_RATE; + + IWL_DEBUG_RATE(priv, "SISO-RATE=%X MIMO2-RATE=%X MIMO3-RATE=%X\n", + lq_sta->active_siso_rate, + lq_sta->active_mimo2_rate, + lq_sta->active_mimo3_rate); + + /* These values will be overridden later */ + lq_sta->lq.general_params.single_stream_ant_msk = ANT_A; + lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB; + + /* as default allow aggregation for all tids */ + lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID; + lq_sta->drv = priv; + + /* Set last_txrate_idx to lowest rate */ + lq_sta->last_txrate_idx = rate_lowest_index(sband, sta); + if (sband->band == IEEE80211_BAND_5GHZ) + lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE; + lq_sta->is_agg = 0; + + rs_initialize_lq(priv, conf, sta, lq_sta); +} + +static void rs_fill_link_cmd(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, u32 new_rate) +{ + struct iwl_scale_tbl_info tbl_type; + int index = 0; + int rate_idx; + int repeat_rate = 0; + u8 ant_toggle_cnt = 0; + u8 use_ht_possible = 1; + u8 valid_tx_ant = 0; + struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq; + + /* Override starting rate (index 0) if needed for debug purposes */ + rs_dbgfs_set_mcs(lq_sta, &new_rate, index); + + /* Interpret new_rate (rate_n_flags) */ + memset(&tbl_type, 0, sizeof(tbl_type)); + rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, + &tbl_type, &rate_idx); + + /* How many times should we repeat the initial rate? */ + if (is_legacy(tbl_type.lq_type)) { + ant_toggle_cnt = 1; + repeat_rate = IWL_NUMBER_TRY; + } else { + repeat_rate = IWL_HT_NUMBER_TRY; + } + + lq_cmd->general_params.mimo_delimiter = + is_mimo(tbl_type.lq_type) ? 1 : 0; + + /* Fill 1st table entry (index 0) */ + lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate); + + if (num_of_ant(tbl_type.ant_type) == 1) { + lq_cmd->general_params.single_stream_ant_msk = + tbl_type.ant_type; + } else if (num_of_ant(tbl_type.ant_type) == 2) { + lq_cmd->general_params.dual_stream_ant_msk = + tbl_type.ant_type; + } /* otherwise we don't modify the existing value */ + + index++; + repeat_rate--; + + if (priv) + valid_tx_ant = priv->hw_params.valid_tx_ant; + + /* Fill rest of rate table */ + while (index < LINK_QUAL_MAX_RETRY_NUM) { + /* Repeat initial/next rate. + * For legacy IWL_NUMBER_TRY == 1, this loop will not execute. + * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */ + while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) { + if (is_legacy(tbl_type.lq_type)) { + if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE) + ant_toggle_cnt++; + else if (priv && + rs_toggle_antenna(valid_tx_ant, + &new_rate, &tbl_type)) + ant_toggle_cnt = 1; +} + + /* Override next rate if needed for debug purposes */ + rs_dbgfs_set_mcs(lq_sta, &new_rate, index); + + /* Fill next table entry */ + lq_cmd->rs_table[index].rate_n_flags = + cpu_to_le32(new_rate); + repeat_rate--; + index++; + } + + rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type, + &rate_idx); + + /* Indicate to uCode which entries might be MIMO. + * If initial rate was MIMO, this will finally end up + * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */ + if (is_mimo(tbl_type.lq_type)) + lq_cmd->general_params.mimo_delimiter = index; + + /* Get next rate */ + new_rate = rs_get_lower_rate(lq_sta, &tbl_type, rate_idx, + use_ht_possible); + + /* How many times should we repeat the next rate? */ + if (is_legacy(tbl_type.lq_type)) { + if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE) + ant_toggle_cnt++; + else if (priv && + rs_toggle_antenna(valid_tx_ant, + &new_rate, &tbl_type)) + ant_toggle_cnt = 1; + + repeat_rate = IWL_NUMBER_TRY; + } else { + repeat_rate = IWL_HT_NUMBER_TRY; + } + + /* Don't allow HT rates after next pass. + * rs_get_lower_rate() will change type to LQ_A or LQ_G. */ + use_ht_possible = 0; + + /* Override next rate if needed for debug purposes */ + rs_dbgfs_set_mcs(lq_sta, &new_rate, index); + + /* Fill next table entry */ + lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate); + + index++; + repeat_rate--; + } + + lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF; + lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; + lq_cmd->agg_params.agg_time_limit = + cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); +} + +static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) +{ + return hw->priv; +} +/* rate scale requires free function to be implemented */ +static void rs_free(void *priv_rate) +{ + return; +} + +static void rs_free_sta(void *priv_r, struct ieee80211_sta *sta, + void *priv_sta) +{ + struct iwl_priv *priv __maybe_unused = priv_r; + + IWL_DEBUG_RATE(priv, "enter\n"); + IWL_DEBUG_RATE(priv, "leave\n"); +} + + +#ifdef CONFIG_MAC80211_DEBUGFS +static int open_file_generic(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} +static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta, + u32 *rate_n_flags, int index) +{ + struct iwl_priv *priv; + u8 valid_tx_ant; + u8 ant_sel_tx; + + priv = lq_sta->drv; + valid_tx_ant = priv->hw_params.valid_tx_ant; + if (lq_sta->dbg_fixed_rate) { + ant_sel_tx = + ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) + >> RATE_MCS_ANT_POS); + if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) { + *rate_n_flags = lq_sta->dbg_fixed_rate; + IWL_DEBUG_RATE(priv, "Fixed rate ON\n"); + } else { + lq_sta->dbg_fixed_rate = 0; + IWL_ERR(priv, + "Invalid antenna selection 0x%X, Valid is 0x%X\n", + ant_sel_tx, valid_tx_ant); + IWL_DEBUG_RATE(priv, "Fixed rate OFF\n"); + } + } else { + IWL_DEBUG_RATE(priv, "Fixed rate OFF\n"); + } +} + +static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + struct iwl_lq_sta *lq_sta = file->private_data; + struct iwl_priv *priv; + char buf[64]; + int buf_size; + u32 parsed_rate; + + priv = lq_sta->drv; + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + if (sscanf(buf, "%x", &parsed_rate) == 1) + lq_sta->dbg_fixed_rate = parsed_rate; + else + lq_sta->dbg_fixed_rate = 0; + + lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */ + lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ + lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ + lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ + + IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n", + lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate); + + if (lq_sta->dbg_fixed_rate) { + rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate); + iwl_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC); + } + + return count; +} + +static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file, + char __user *user_buf, size_t count, loff_t *ppos) +{ + char *buff; + int desc = 0; + int i = 0; + int index = 0; + ssize_t ret; + + struct iwl_lq_sta *lq_sta = file->private_data; + struct iwl_priv *priv; + struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + + priv = lq_sta->drv; + buff = kmalloc(1024, GFP_KERNEL); + if (!buff) + return -ENOMEM; + + desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id); + desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n", + lq_sta->total_failed, lq_sta->total_success, + lq_sta->active_legacy_rate); + desc += sprintf(buff+desc, "fixed rate 0x%X\n", + lq_sta->dbg_fixed_rate); + desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n", + (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "", + (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "", + (priv->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : ""); + desc += sprintf(buff+desc, "lq type %s\n", + (is_legacy(tbl->lq_type)) ? "legacy" : "HT"); + if (is_Ht(tbl->lq_type)) { + desc += sprintf(buff+desc, " %s", + (is_siso(tbl->lq_type)) ? "SISO" : + ((is_mimo2(tbl->lq_type)) ? "MIMO2" : "MIMO3")); + desc += sprintf(buff+desc, " %s", + (tbl->is_ht40) ? "40MHz" : "20MHz"); + desc += sprintf(buff+desc, " %s %s %s\n", (tbl->is_SGI) ? "SGI" : "", + (lq_sta->is_green) ? "GF enabled" : "", + (lq_sta->is_agg) ? "AGG on" : ""); + } + desc += sprintf(buff+desc, "last tx rate=0x%X\n", + lq_sta->last_rate_n_flags); + desc += sprintf(buff+desc, "general:" + "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n", + lq_sta->lq.general_params.flags, + lq_sta->lq.general_params.mimo_delimiter, + lq_sta->lq.general_params.single_stream_ant_msk, + lq_sta->lq.general_params.dual_stream_ant_msk); + + desc += sprintf(buff+desc, "agg:" + "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n", + le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit), + lq_sta->lq.agg_params.agg_dis_start_th, + lq_sta->lq.agg_params.agg_frame_cnt_limit); + + desc += sprintf(buff+desc, + "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n", + lq_sta->lq.general_params.start_rate_index[0], + lq_sta->lq.general_params.start_rate_index[1], + lq_sta->lq.general_params.start_rate_index[2], + lq_sta->lq.general_params.start_rate_index[3]); + + for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { + index = iwl_hwrate_to_plcp_idx( + le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags)); + if (is_legacy(tbl->lq_type)) { + desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n", + i, le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags), + iwl_rate_mcs[index].mbps); + } else { + desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps (%s)\n", + i, le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags), + iwl_rate_mcs[index].mbps, iwl_rate_mcs[index].mcs); + } + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); + kfree(buff); + return ret; +} + +static const struct file_operations rs_sta_dbgfs_scale_table_ops = { + .write = rs_sta_dbgfs_scale_table_write, + .read = rs_sta_dbgfs_scale_table_read, + .open = open_file_generic, +}; +static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file, + char __user *user_buf, size_t count, loff_t *ppos) +{ + char *buff; + int desc = 0; + int i, j; + ssize_t ret; + + struct iwl_lq_sta *lq_sta = file->private_data; + + buff = kmalloc(1024, GFP_KERNEL); + if (!buff) + return -ENOMEM; + + for (i = 0; i < LQ_SIZE; i++) { + desc += sprintf(buff+desc, + "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n" + "rate=0x%X\n", + lq_sta->active_tbl == i ? "*" : "x", + lq_sta->lq_info[i].lq_type, + lq_sta->lq_info[i].is_SGI, + lq_sta->lq_info[i].is_ht40, + lq_sta->lq_info[i].is_dup, + lq_sta->is_green, + lq_sta->lq_info[i].current_rate); + for (j = 0; j < IWL_RATE_COUNT; j++) { + desc += sprintf(buff+desc, + "counter=%d success=%d %%=%d\n", + lq_sta->lq_info[i].win[j].counter, + lq_sta->lq_info[i].win[j].success_counter, + lq_sta->lq_info[i].win[j].success_ratio); + } + } + ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); + kfree(buff); + return ret; +} + +static const struct file_operations rs_sta_dbgfs_stats_table_ops = { + .read = rs_sta_dbgfs_stats_table_read, + .open = open_file_generic, +}; + +static ssize_t rs_sta_dbgfs_rate_scale_data_read(struct file *file, + char __user *user_buf, size_t count, loff_t *ppos) +{ + char buff[120]; + int desc = 0; + ssize_t ret; + + struct iwl_lq_sta *lq_sta = file->private_data; + struct iwl_priv *priv; + struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl]; + + priv = lq_sta->drv; + + if (is_Ht(tbl->lq_type)) + desc += sprintf(buff+desc, + "Bit Rate= %d Mb/s\n", + tbl->expected_tpt[lq_sta->last_txrate_idx]); + else + desc += sprintf(buff+desc, + "Bit Rate= %d Mb/s\n", + iwl_rates[lq_sta->last_txrate_idx].ieee >> 1); + desc += sprintf(buff+desc, + "Signal Level= %d dBm\tNoise Level= %d dBm\n", + priv->last_rx_rssi, priv->last_rx_noise); + desc += sprintf(buff+desc, + "Tsf= 0x%llx\tBeacon time= 0x%08X\n", + priv->last_tsf, priv->last_beacon_time); + + ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); + return ret; +} + +static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = { + .read = rs_sta_dbgfs_rate_scale_data_read, + .open = open_file_generic, +}; + +static void rs_add_debugfs(void *priv, void *priv_sta, + struct dentry *dir) +{ + struct iwl_lq_sta *lq_sta = priv_sta; + lq_sta->rs_sta_dbgfs_scale_table_file = + debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir, + lq_sta, &rs_sta_dbgfs_scale_table_ops); + lq_sta->rs_sta_dbgfs_stats_table_file = + debugfs_create_file("rate_stats_table", S_IRUSR, dir, + lq_sta, &rs_sta_dbgfs_stats_table_ops); + lq_sta->rs_sta_dbgfs_rate_scale_data_file = + debugfs_create_file("rate_scale_data", S_IRUSR, dir, + lq_sta, &rs_sta_dbgfs_rate_scale_data_ops); + lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file = + debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir, + &lq_sta->tx_agg_tid_en); + +} + +static void rs_remove_debugfs(void *priv, void *priv_sta) +{ + struct iwl_lq_sta *lq_sta = priv_sta; + debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file); + debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file); + debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file); + debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file); +} +#endif + +static struct rate_control_ops rs_ops = { + .module = NULL, + .name = RS_NAME, + .tx_status = rs_tx_status, + .get_rate = rs_get_rate, + .rate_init = rs_rate_init, + .alloc = rs_alloc, + .free = rs_free, + .alloc_sta = rs_alloc_sta, + .free_sta = rs_free_sta, +#ifdef CONFIG_MAC80211_DEBUGFS + .add_sta_debugfs = rs_add_debugfs, + .remove_sta_debugfs = rs_remove_debugfs, +#endif +}; + +int iwlagn_rate_control_register(void) +{ + return ieee80211_rate_control_register(&rs_ops); +} + +void iwlagn_rate_control_unregister(void) +{ + ieee80211_rate_control_unregister(&rs_ops); +} + diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h new file mode 100644 index 0000000..e719239 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h @@ -0,0 +1,504 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_agn_rs_h__ +#define __iwl_agn_rs_h__ + +struct iwl_rate_info { + u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ + u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */ + u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */ + u8 plcp_mimo3; /* uCode API: IWL_RATE_MIMO3_6M_PLCP, etc. */ + u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */ + u8 prev_ieee; /* previous rate in IEEE speeds */ + u8 next_ieee; /* next rate in IEEE speeds */ + u8 prev_rs; /* previous rate used in rs algo */ + u8 next_rs; /* next rate used in rs algo */ + u8 prev_rs_tgg; /* previous rate used in TGG rs algo */ + u8 next_rs_tgg; /* next rate used in TGG rs algo */ +}; + +struct iwl3945_rate_info { + u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ + u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */ + u8 prev_ieee; /* previous rate in IEEE speeds */ + u8 next_ieee; /* next rate in IEEE speeds */ + u8 prev_rs; /* previous rate used in rs algo */ + u8 next_rs; /* next rate used in rs algo */ + u8 prev_rs_tgg; /* previous rate used in TGG rs algo */ + u8 next_rs_tgg; /* next rate used in TGG rs algo */ + u8 table_rs_index; /* index in rate scale table cmd */ + u8 prev_table_rs; /* prev in rate table cmd */ +}; + + +/* + * These serve as indexes into + * struct iwl_rate_info iwl_rates[IWL_RATE_COUNT]; + */ +enum { + IWL_RATE_1M_INDEX = 0, + IWL_RATE_2M_INDEX, + IWL_RATE_5M_INDEX, + IWL_RATE_11M_INDEX, + IWL_RATE_6M_INDEX, + IWL_RATE_9M_INDEX, + IWL_RATE_12M_INDEX, + IWL_RATE_18M_INDEX, + IWL_RATE_24M_INDEX, + IWL_RATE_36M_INDEX, + IWL_RATE_48M_INDEX, + IWL_RATE_54M_INDEX, + IWL_RATE_60M_INDEX, + IWL_RATE_COUNT, /*FIXME:RS:change to IWL_RATE_INDEX_COUNT,*/ + IWL_RATE_COUNT_LEGACY = IWL_RATE_COUNT - 1, /* Excluding 60M */ + IWL_RATE_COUNT_3945 = IWL_RATE_COUNT - 1, + IWL_RATE_INVM_INDEX = IWL_RATE_COUNT, + IWL_RATE_INVALID = IWL_RATE_COUNT, +}; + +enum { + IWL_RATE_6M_INDEX_TABLE = 0, + IWL_RATE_9M_INDEX_TABLE, + IWL_RATE_12M_INDEX_TABLE, + IWL_RATE_18M_INDEX_TABLE, + IWL_RATE_24M_INDEX_TABLE, + IWL_RATE_36M_INDEX_TABLE, + IWL_RATE_48M_INDEX_TABLE, + IWL_RATE_54M_INDEX_TABLE, + IWL_RATE_1M_INDEX_TABLE, + IWL_RATE_2M_INDEX_TABLE, + IWL_RATE_5M_INDEX_TABLE, + IWL_RATE_11M_INDEX_TABLE, + IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1, +}; + +enum { + IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX, + IWL39_LAST_OFDM_RATE = IWL_RATE_54M_INDEX, + IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX, + IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX, + IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX, +}; + +/* #define vs. enum to keep from defaulting to 'large integer' */ +#define IWL_RATE_6M_MASK (1 << IWL_RATE_6M_INDEX) +#define IWL_RATE_9M_MASK (1 << IWL_RATE_9M_INDEX) +#define IWL_RATE_12M_MASK (1 << IWL_RATE_12M_INDEX) +#define IWL_RATE_18M_MASK (1 << IWL_RATE_18M_INDEX) +#define IWL_RATE_24M_MASK (1 << IWL_RATE_24M_INDEX) +#define IWL_RATE_36M_MASK (1 << IWL_RATE_36M_INDEX) +#define IWL_RATE_48M_MASK (1 << IWL_RATE_48M_INDEX) +#define IWL_RATE_54M_MASK (1 << IWL_RATE_54M_INDEX) +#define IWL_RATE_60M_MASK (1 << IWL_RATE_60M_INDEX) +#define IWL_RATE_1M_MASK (1 << IWL_RATE_1M_INDEX) +#define IWL_RATE_2M_MASK (1 << IWL_RATE_2M_INDEX) +#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX) +#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX) + +/* uCode API values for legacy bit rates, both OFDM and CCK */ +enum { + IWL_RATE_6M_PLCP = 13, + IWL_RATE_9M_PLCP = 15, + IWL_RATE_12M_PLCP = 5, + IWL_RATE_18M_PLCP = 7, + IWL_RATE_24M_PLCP = 9, + IWL_RATE_36M_PLCP = 11, + IWL_RATE_48M_PLCP = 1, + IWL_RATE_54M_PLCP = 3, + IWL_RATE_60M_PLCP = 3,/*FIXME:RS:should be removed*/ + IWL_RATE_1M_PLCP = 10, + IWL_RATE_2M_PLCP = 20, + IWL_RATE_5M_PLCP = 55, + IWL_RATE_11M_PLCP = 110, + /*FIXME:RS:change to IWL_RATE_LEGACY_??M_PLCP */ + /*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/ +}; + +/* uCode API values for OFDM high-throughput (HT) bit rates */ +enum { + IWL_RATE_SISO_6M_PLCP = 0, + IWL_RATE_SISO_12M_PLCP = 1, + IWL_RATE_SISO_18M_PLCP = 2, + IWL_RATE_SISO_24M_PLCP = 3, + IWL_RATE_SISO_36M_PLCP = 4, + IWL_RATE_SISO_48M_PLCP = 5, + IWL_RATE_SISO_54M_PLCP = 6, + IWL_RATE_SISO_60M_PLCP = 7, + IWL_RATE_MIMO2_6M_PLCP = 0x8, + IWL_RATE_MIMO2_12M_PLCP = 0x9, + IWL_RATE_MIMO2_18M_PLCP = 0xa, + IWL_RATE_MIMO2_24M_PLCP = 0xb, + IWL_RATE_MIMO2_36M_PLCP = 0xc, + IWL_RATE_MIMO2_48M_PLCP = 0xd, + IWL_RATE_MIMO2_54M_PLCP = 0xe, + IWL_RATE_MIMO2_60M_PLCP = 0xf, + IWL_RATE_MIMO3_6M_PLCP = 0x10, + IWL_RATE_MIMO3_12M_PLCP = 0x11, + IWL_RATE_MIMO3_18M_PLCP = 0x12, + IWL_RATE_MIMO3_24M_PLCP = 0x13, + IWL_RATE_MIMO3_36M_PLCP = 0x14, + IWL_RATE_MIMO3_48M_PLCP = 0x15, + IWL_RATE_MIMO3_54M_PLCP = 0x16, + IWL_RATE_MIMO3_60M_PLCP = 0x17, + IWL_RATE_SISO_INVM_PLCP, + IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP, + IWL_RATE_MIMO3_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP, +}; + +/* MAC header values for bit rates */ +enum { + IWL_RATE_6M_IEEE = 12, + IWL_RATE_9M_IEEE = 18, + IWL_RATE_12M_IEEE = 24, + IWL_RATE_18M_IEEE = 36, + IWL_RATE_24M_IEEE = 48, + IWL_RATE_36M_IEEE = 72, + IWL_RATE_48M_IEEE = 96, + IWL_RATE_54M_IEEE = 108, + IWL_RATE_60M_IEEE = 120, + IWL_RATE_1M_IEEE = 2, + IWL_RATE_2M_IEEE = 4, + IWL_RATE_5M_IEEE = 11, + IWL_RATE_11M_IEEE = 22, +}; + +#define IWL_CCK_BASIC_RATES_MASK \ + (IWL_RATE_1M_MASK | \ + IWL_RATE_2M_MASK) + +#define IWL_CCK_RATES_MASK \ + (IWL_CCK_BASIC_RATES_MASK | \ + IWL_RATE_5M_MASK | \ + IWL_RATE_11M_MASK) + +#define IWL_OFDM_BASIC_RATES_MASK \ + (IWL_RATE_6M_MASK | \ + IWL_RATE_12M_MASK | \ + IWL_RATE_24M_MASK) + +#define IWL_OFDM_RATES_MASK \ + (IWL_OFDM_BASIC_RATES_MASK | \ + IWL_RATE_9M_MASK | \ + IWL_RATE_18M_MASK | \ + IWL_RATE_36M_MASK | \ + IWL_RATE_48M_MASK | \ + IWL_RATE_54M_MASK) + +#define IWL_BASIC_RATES_MASK \ + (IWL_OFDM_BASIC_RATES_MASK | \ + IWL_CCK_BASIC_RATES_MASK) + +#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1) + +#define IWL_INVALID_VALUE -1 + +#define IWL_MIN_RSSI_VAL -100 +#define IWL_MAX_RSSI_VAL 0 + +/* These values specify how many Tx frame attempts before + * searching for a new modulation mode */ +#define IWL_LEGACY_FAILURE_LIMIT 160 +#define IWL_LEGACY_SUCCESS_LIMIT 480 +#define IWL_LEGACY_TABLE_COUNT 160 + +#define IWL_NONE_LEGACY_FAILURE_LIMIT 400 +#define IWL_NONE_LEGACY_SUCCESS_LIMIT 4500 +#define IWL_NONE_LEGACY_TABLE_COUNT 1500 + +/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */ +#define IWL_RS_GOOD_RATIO 12800 /* 100% */ +#define IWL_RATE_SCALE_SWITCH 10880 /* 85% */ +#define IWL_RATE_HIGH_TH 10880 /* 85% */ +#define IWL_RATE_INCREASE_TH 6400 /* 50% */ +#define IWL_RATE_DECREASE_TH 1920 /* 15% */ + +/* possible actions when in legacy mode */ +#define IWL_LEGACY_SWITCH_ANTENNA1 0 +#define IWL_LEGACY_SWITCH_ANTENNA2 1 +#define IWL_LEGACY_SWITCH_SISO 2 +#define IWL_LEGACY_SWITCH_MIMO2_AB 3 +#define IWL_LEGACY_SWITCH_MIMO2_AC 4 +#define IWL_LEGACY_SWITCH_MIMO2_BC 5 +#define IWL_LEGACY_SWITCH_MIMO3_ABC 6 + +/* possible actions when in siso mode */ +#define IWL_SISO_SWITCH_ANTENNA1 0 +#define IWL_SISO_SWITCH_ANTENNA2 1 +#define IWL_SISO_SWITCH_MIMO2_AB 2 +#define IWL_SISO_SWITCH_MIMO2_AC 3 +#define IWL_SISO_SWITCH_MIMO2_BC 4 +#define IWL_SISO_SWITCH_GI 5 +#define IWL_SISO_SWITCH_MIMO3_ABC 6 + + +/* possible actions when in mimo mode */ +#define IWL_MIMO2_SWITCH_ANTENNA1 0 +#define IWL_MIMO2_SWITCH_ANTENNA2 1 +#define IWL_MIMO2_SWITCH_SISO_A 2 +#define IWL_MIMO2_SWITCH_SISO_B 3 +#define IWL_MIMO2_SWITCH_SISO_C 4 +#define IWL_MIMO2_SWITCH_GI 5 +#define IWL_MIMO2_SWITCH_MIMO3_ABC 6 + + +/* possible actions when in mimo3 mode */ +#define IWL_MIMO3_SWITCH_ANTENNA1 0 +#define IWL_MIMO3_SWITCH_ANTENNA2 1 +#define IWL_MIMO3_SWITCH_SISO_A 2 +#define IWL_MIMO3_SWITCH_SISO_B 3 +#define IWL_MIMO3_SWITCH_SISO_C 4 +#define IWL_MIMO3_SWITCH_MIMO2_AB 5 +#define IWL_MIMO3_SWITCH_MIMO2_AC 6 +#define IWL_MIMO3_SWITCH_MIMO2_BC 7 +#define IWL_MIMO3_SWITCH_GI 8 + + +#define IWL_MAX_11N_MIMO3_SEARCH IWL_MIMO3_SWITCH_GI +#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_MIMO3_ABC + +/*FIXME:RS:add possible actions for MIMO3*/ + +#define IWL_ACTION_LIMIT 3 /* # possible actions */ + +#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */ + +/* load per tid defines for A-MPDU activation */ +#define IWL_AGG_TPT_THREHOLD 0 +#define IWL_AGG_LOAD_THRESHOLD 10 +#define IWL_AGG_ALL_TID 0xff +#define TID_QUEUE_CELL_SPACING 50 /*mS */ +#define TID_QUEUE_MAX_SIZE 20 +#define TID_ROUND_VALUE 5 /* mS */ +#define TID_MAX_LOAD_COUNT 8 + +#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING) +#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y)) + +extern const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT]; +extern const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945]; + +enum iwl_table_type { + LQ_NONE, + LQ_G, /* legacy types */ + LQ_A, + LQ_SISO, /* high-throughput types */ + LQ_MIMO2, + LQ_MIMO3, + LQ_MAX, +}; + +#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A)) +#define is_siso(tbl) ((tbl) == LQ_SISO) +#define is_mimo2(tbl) ((tbl) == LQ_MIMO2) +#define is_mimo3(tbl) ((tbl) == LQ_MIMO3) +#define is_mimo(tbl) (is_mimo2(tbl) || is_mimo3(tbl)) +#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl)) +#define is_a_band(tbl) ((tbl) == LQ_A) +#define is_g_and(tbl) ((tbl) == LQ_G) + +#define ANT_NONE 0x0 +#define ANT_A BIT(0) +#define ANT_B BIT(1) +#define ANT_AB (ANT_A | ANT_B) +#define ANT_C BIT(2) +#define ANT_AC (ANT_A | ANT_C) +#define ANT_BC (ANT_B | ANT_C) +#define ANT_ABC (ANT_AB | ANT_C) + +#define IWL_MAX_MCS_DISPLAY_SIZE 12 + +struct iwl_rate_mcs_info { + char mbps[IWL_MAX_MCS_DISPLAY_SIZE]; + char mcs[IWL_MAX_MCS_DISPLAY_SIZE]; +}; + +/** + * struct iwl_rate_scale_data -- tx success history for one rate + */ +struct iwl_rate_scale_data { + u64 data; /* bitmap of successful frames */ + s32 success_counter; /* number of frames successful */ + s32 success_ratio; /* per-cent * 128 */ + s32 counter; /* number of frames attempted */ + s32 average_tpt; /* success ratio * expected throughput */ + unsigned long stamp; +}; + +/** + * struct iwl_scale_tbl_info -- tx params and success history for all rates + * + * There are two of these in struct iwl_lq_sta, + * one for "active", and one for "search". + */ +struct iwl_scale_tbl_info { + enum iwl_table_type lq_type; + u8 ant_type; + u8 is_SGI; /* 1 = short guard interval */ + u8 is_ht40; /* 1 = 40 MHz channel width */ + u8 is_dup; /* 1 = duplicated data streams */ + u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */ + u8 max_search; /* maximun number of tables we can search */ + s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */ + u32 current_rate; /* rate_n_flags, uCode API format */ + struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */ +}; + +struct iwl_traffic_load { + unsigned long time_stamp; /* age of the oldest statistics */ + u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time + * slice */ + u32 total; /* total num of packets during the + * last TID_MAX_TIME_DIFF */ + u8 queue_count; /* number of queues that has + * been used since the last cleanup */ + u8 head; /* start of the circular buffer */ +}; + +/** + * struct iwl_lq_sta -- driver's rate scaling private structure + * + * Pointer to this gets passed back and forth between driver and mac80211. + */ +struct iwl_lq_sta { + u8 active_tbl; /* index of active table, range 0-1 */ + u8 enable_counter; /* indicates HT mode */ + u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */ + u8 search_better_tbl; /* 1: currently trying alternate mode */ + s32 last_tpt; + + /* The following determine when to search for a new mode */ + u32 table_count_limit; + u32 max_failure_limit; /* # failed frames before new search */ + u32 max_success_limit; /* # successful frames before new search */ + u32 table_count; + u32 total_failed; /* total failed frames, any/all rates */ + u32 total_success; /* total successful frames, any/all rates */ + u64 flush_timer; /* time staying in mode before new search */ + + u8 action_counter; /* # mode-switch actions tried */ + u8 is_green; + u8 is_dup; + enum ieee80211_band band; + u8 ibss_sta_added; + + /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */ + u32 supp_rates; + u16 active_legacy_rate; + u16 active_siso_rate; + u16 active_mimo2_rate; + u16 active_mimo3_rate; + u16 active_rate_basic; + s8 max_rate_idx; /* Max rate set by user */ + u8 missed_rate_counter; + + struct iwl_link_quality_cmd lq; + struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */ + struct iwl_traffic_load load[TID_MAX_LOAD_COUNT]; + u8 tx_agg_tid_en; +#ifdef CONFIG_MAC80211_DEBUGFS + struct dentry *rs_sta_dbgfs_scale_table_file; + struct dentry *rs_sta_dbgfs_stats_table_file; + struct dentry *rs_sta_dbgfs_rate_scale_data_file; + struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file; + u32 dbg_fixed_rate; +#endif + struct iwl_priv *drv; + + /* used to be in sta_info */ + int last_txrate_idx; + /* last tx rate_n_flags */ + u32 last_rate_n_flags; + /* packets destined for this STA are aggregated */ + u8 is_agg; +}; + +static inline u8 num_of_ant(u8 mask) +{ + return !!((mask) & ANT_A) + + !!((mask) & ANT_B) + + !!((mask) & ANT_C); +} + +static inline u8 first_antenna(u8 mask) +{ + if (mask & ANT_A) + return ANT_A; + if (mask & ANT_B) + return ANT_B; + return ANT_C; +} + + +static inline u8 iwl_get_prev_ieee_rate(u8 rate_index) +{ + u8 rate = iwl_rates[rate_index].prev_ieee; + + if (rate == IWL_RATE_INVALID) + rate = rate_index; + return rate; +} + +static inline u8 iwl3945_get_prev_ieee_rate(u8 rate_index) +{ + u8 rate = iwl3945_rates[rate_index].prev_ieee; + + if (rate == IWL_RATE_INVALID) + rate = rate_index; + return rate; +} + +/** + * iwl3945_rate_scale_init - Initialize the rate scale table based on assoc info + * + * The specific throughput table used is based on the type of network + * the associated with, including A, B, G, and G w/ TGG protection + */ +extern void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id); + +/** + * iwl_rate_control_register - Register the rate control algorithm callbacks + * + * Since the rate control algorithm is hardware specific, there is no need + * or reason to place it as a stand alone module. The driver can call + * iwl_rate_control_register in order to register the rate control callbacks + * with the mac80211 subsystem. This should be performed prior to calling + * ieee80211_register_hw + * + */ +extern int iwlagn_rate_control_register(void); +extern int iwl3945_rate_control_register(void); + +/** + * iwl_rate_control_unregister - Unregister the rate control callbacks + * + * This should be called after calling ieee80211_unregister_hw, but before + * the driver is unloaded. + */ +extern void iwlagn_rate_control_unregister(void); +extern void iwl3945_rate_control_unregister(void); + +#endif /* __iwl_agn__rs__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c new file mode 100644 index 0000000..29831c9 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -0,0 +1,3943 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#define DRV_NAME "iwlagn" + +#include "iwl-eeprom.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-io.h" +#include "iwl-helpers.h" +#include "iwl-sta.h" +#include "iwl-calib.h" + + +/****************************************************************************** + * + * module boiler plate + * + ******************************************************************************/ + +/* + * module name, copyright, version, etc. + */ +#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link AGN driver for Linux" + +#ifdef CONFIG_IWLWIFI_DEBUG +#define VD "d" +#else +#define VD +#endif + +#define DRV_VERSION IWLWIFI_VERSION VD + + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_VERSION(DRV_VERSION); +MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("iwl4965"); + +/*************** STATION TABLE MANAGEMENT **** + * mac80211 should be examined to determine if sta_info is duplicating + * the functionality provided here + */ + +/**************************************************************/ + +/** + * iwl_commit_rxon - commit staging_rxon to hardware + * + * The RXON command in staging_rxon is committed to the hardware and + * the active_rxon structure is updated with the new data. This + * function correctly transitions out of the RXON_ASSOC_MSK state if + * a HW tune is required based on the RXON structure changes. + */ +int iwl_commit_rxon(struct iwl_priv *priv) +{ + /* cast away the const for active_rxon in this function */ + struct iwl_rxon_cmd *active_rxon = (void *)&priv->active_rxon; + int ret; + bool new_assoc = + !!(priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK); + + if (!iwl_is_alive(priv)) + return -EBUSY; + + /* always get timestamp with Rx frame */ + priv->staging_rxon.flags |= RXON_FLG_TSF2HOST_MSK; + + ret = iwl_check_rxon_cmd(priv); + if (ret) { + IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n"); + return -EINVAL; + } + + /* + * receive commit_rxon request + * abort any previous channel switch if still in process + */ + if (priv->switch_rxon.switch_in_progress && + (priv->switch_rxon.channel != priv->staging_rxon.channel)) { + IWL_DEBUG_11H(priv, "abort channel switch on %d\n", + le16_to_cpu(priv->switch_rxon.channel)); + priv->switch_rxon.switch_in_progress = false; + } + + /* If we don't need to send a full RXON, we can use + * iwl_rxon_assoc_cmd which is used to reconfigure filter + * and other flags for the current radio configuration. */ + if (!iwl_full_rxon_required(priv)) { + ret = iwl_send_rxon_assoc(priv); + if (ret) { + IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret); + return ret; + } + + memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); + iwl_print_rx_config_cmd(priv); + return 0; + } + + /* station table will be cleared */ + priv->assoc_station_added = 0; + + /* If we are currently associated and the new config requires + * an RXON_ASSOC and the new config wants the associated mask enabled, + * we must clear the associated from the active configuration + * before we apply the new config */ + if (iwl_is_associated(priv) && new_assoc) { + IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n"); + active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; + + ret = iwl_send_cmd_pdu(priv, REPLY_RXON, + sizeof(struct iwl_rxon_cmd), + &priv->active_rxon); + + /* If the mask clearing failed then we set + * active_rxon back to what it was previously */ + if (ret) { + active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK; + IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret); + return ret; + } + } + + IWL_DEBUG_INFO(priv, "Sending RXON\n" + "* with%s RXON_FILTER_ASSOC_MSK\n" + "* channel = %d\n" + "* bssid = %pM\n", + (new_assoc ? "" : "out"), + le16_to_cpu(priv->staging_rxon.channel), + priv->staging_rxon.bssid_addr); + + iwl_set_rxon_hwcrypto(priv, !priv->cfg->mod_params->sw_crypto); + + /* Apply the new configuration + * RXON unassoc clears the station table in uCode, send it before + * we add the bcast station. If assoc bit is set, we will send RXON + * after having added the bcast and bssid station. + */ + if (!new_assoc) { + ret = iwl_send_cmd_pdu(priv, REPLY_RXON, + sizeof(struct iwl_rxon_cmd), &priv->staging_rxon); + if (ret) { + IWL_ERR(priv, "Error setting new RXON (%d)\n", ret); + return ret; + } + memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); + } + + iwl_clear_stations_table(priv); + + priv->start_calib = 0; + + /* Add the broadcast address so we can send broadcast frames */ + priv->cfg->ops->lib->add_bcast_station(priv); + + + /* If we have set the ASSOC_MSK and we are in BSS mode then + * add the IWL_AP_ID to the station rate table */ + if (new_assoc) { + if (priv->iw_mode == NL80211_IFTYPE_STATION) { + ret = iwl_rxon_add_station(priv, + priv->active_rxon.bssid_addr, 1); + if (ret == IWL_INVALID_STATION) { + IWL_ERR(priv, + "Error adding AP address for TX.\n"); + return -EIO; + } + priv->assoc_station_added = 1; + if (priv->default_wep_key && + iwl_send_static_wepkey_cmd(priv, 0)) + IWL_ERR(priv, + "Could not send WEP static key.\n"); + } + + /* + * allow CTS-to-self if possible for new association. + * this is relevant only for 5000 series and up, + * but will not damage 4965 + */ + priv->staging_rxon.flags |= RXON_FLG_SELF_CTS_EN; + + /* Apply the new configuration + * RXON assoc doesn't clear the station table in uCode, + */ + ret = iwl_send_cmd_pdu(priv, REPLY_RXON, + sizeof(struct iwl_rxon_cmd), &priv->staging_rxon); + if (ret) { + IWL_ERR(priv, "Error setting new RXON (%d)\n", ret); + return ret; + } + memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); + } + iwl_print_rx_config_cmd(priv); + + iwl_init_sensitivity(priv); + + /* If we issue a new RXON command which required a tune then we must + * send a new TXPOWER command or we won't be able to Tx any frames */ + ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true); + if (ret) { + IWL_ERR(priv, "Error sending TX power (%d)\n", ret); + return ret; + } + + return 0; +} + +void iwl_update_chain_flags(struct iwl_priv *priv) +{ + + if (priv->cfg->ops->hcmd->set_rxon_chain) + priv->cfg->ops->hcmd->set_rxon_chain(priv); + iwlcore_commit_rxon(priv); +} + +static void iwl_clear_free_frames(struct iwl_priv *priv) +{ + struct list_head *element; + + IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n", + priv->frames_count); + + while (!list_empty(&priv->free_frames)) { + element = priv->free_frames.next; + list_del(element); + kfree(list_entry(element, struct iwl_frame, list)); + priv->frames_count--; + } + + if (priv->frames_count) { + IWL_WARN(priv, "%d frames still in use. Did we lose one?\n", + priv->frames_count); + priv->frames_count = 0; + } +} + +static struct iwl_frame *iwl_get_free_frame(struct iwl_priv *priv) +{ + struct iwl_frame *frame; + struct list_head *element; + if (list_empty(&priv->free_frames)) { + frame = kzalloc(sizeof(*frame), GFP_KERNEL); + if (!frame) { + IWL_ERR(priv, "Could not allocate frame!\n"); + return NULL; + } + + priv->frames_count++; + return frame; + } + + element = priv->free_frames.next; + list_del(element); + return list_entry(element, struct iwl_frame, list); +} + +static void iwl_free_frame(struct iwl_priv *priv, struct iwl_frame *frame) +{ + memset(frame, 0, sizeof(*frame)); + list_add(&frame->list, &priv->free_frames); +} + +static u32 iwl_fill_beacon_frame(struct iwl_priv *priv, + struct ieee80211_hdr *hdr, + int left) +{ + if (!iwl_is_associated(priv) || !priv->ibss_beacon || + ((priv->iw_mode != NL80211_IFTYPE_ADHOC) && + (priv->iw_mode != NL80211_IFTYPE_AP))) + return 0; + + if (priv->ibss_beacon->len > left) + return 0; + + memcpy(hdr, priv->ibss_beacon->data, priv->ibss_beacon->len); + + return priv->ibss_beacon->len; +} + +/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */ +static void iwl_set_beacon_tim(struct iwl_priv *priv, + struct iwl_tx_beacon_cmd *tx_beacon_cmd, + u8 *beacon, u32 frame_size) +{ + u16 tim_idx; + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon; + + /* + * The index is relative to frame start but we start looking at the + * variable-length part of the beacon. + */ + tim_idx = mgmt->u.beacon.variable - beacon; + + /* Parse variable-length elements of beacon to find WLAN_EID_TIM */ + while ((tim_idx < (frame_size - 2)) && + (beacon[tim_idx] != WLAN_EID_TIM)) + tim_idx += beacon[tim_idx+1] + 2; + + /* If TIM field was found, set variables */ + if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) { + tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx); + tx_beacon_cmd->tim_size = beacon[tim_idx+1]; + } else + IWL_WARN(priv, "Unable to find TIM Element in beacon\n"); +} + +static unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv, + struct iwl_frame *frame) +{ + struct iwl_tx_beacon_cmd *tx_beacon_cmd; + u32 frame_size; + u32 rate_flags; + u32 rate; + /* + * We have to set up the TX command, the TX Beacon command, and the + * beacon contents. + */ + + /* Initialize memory */ + tx_beacon_cmd = &frame->u.beacon; + memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd)); + + /* Set up TX beacon contents */ + frame_size = iwl_fill_beacon_frame(priv, tx_beacon_cmd->frame, + sizeof(frame->u) - sizeof(*tx_beacon_cmd)); + if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE)) + return 0; + + /* Set up TX command fields */ + tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size); + tx_beacon_cmd->tx.sta_id = priv->hw_params.bcast_sta_id; + tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK | + TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK; + + /* Set up TX beacon command fields */ + iwl_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame, + frame_size); + + /* Set up packet rate and flags */ + rate = iwl_rate_get_lowest_plcp(priv); + priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant); + rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant); + if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE)) + rate_flags |= RATE_MCS_CCK_MSK; + tx_beacon_cmd->tx.rate_n_flags = iwl_hw_set_rate_n_flags(rate, + rate_flags); + + return sizeof(*tx_beacon_cmd) + frame_size; +} +static int iwl_send_beacon_cmd(struct iwl_priv *priv) +{ + struct iwl_frame *frame; + unsigned int frame_size; + int rc; + + frame = iwl_get_free_frame(priv); + if (!frame) { + IWL_ERR(priv, "Could not obtain free frame buffer for beacon " + "command.\n"); + return -ENOMEM; + } + + frame_size = iwl_hw_get_beacon_cmd(priv, frame); + if (!frame_size) { + IWL_ERR(priv, "Error configuring the beacon command\n"); + iwl_free_frame(priv, frame); + return -EINVAL; + } + + rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size, + &frame->u.cmd[0]); + + iwl_free_frame(priv, frame); + + return rc; +} + +static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) +{ + struct iwl_tfd_tb *tb = &tfd->tbs[idx]; + + dma_addr_t addr = get_unaligned_le32(&tb->lo); + if (sizeof(dma_addr_t) > sizeof(u32)) + addr |= + ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16; + + return addr; +} + +static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) +{ + struct iwl_tfd_tb *tb = &tfd->tbs[idx]; + + return le16_to_cpu(tb->hi_n_len) >> 4; +} + +static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx, + dma_addr_t addr, u16 len) +{ + struct iwl_tfd_tb *tb = &tfd->tbs[idx]; + u16 hi_n_len = len << 4; + + put_unaligned_le32(addr, &tb->lo); + if (sizeof(dma_addr_t) > sizeof(u32)) + hi_n_len |= ((addr >> 16) >> 16) & 0xF; + + tb->hi_n_len = cpu_to_le16(hi_n_len); + + tfd->num_tbs = idx + 1; +} + +static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd) +{ + return tfd->num_tbs & 0x1f; +} + +/** + * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] + * @priv - driver private data + * @txq - tx queue + * + * Does NOT advance any TFD circular buffer read/write indexes + * Does NOT free the TFD itself (which is within circular buffer) + */ +void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) +{ + struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds; + struct iwl_tfd *tfd; + struct pci_dev *dev = priv->pci_dev; + int index = txq->q.read_ptr; + int i; + int num_tbs; + + tfd = &tfd_tmp[index]; + + /* Sanity check on number of chunks */ + num_tbs = iwl_tfd_get_num_tbs(tfd); + + if (num_tbs >= IWL_NUM_OF_TBS) { + IWL_ERR(priv, "Too many chunks: %i\n", num_tbs); + /* @todo issue fatal error, it is quite serious situation */ + return; + } + + /* Unmap tx_cmd */ + if (num_tbs) + pci_unmap_single(dev, + pci_unmap_addr(&txq->meta[index], mapping), + pci_unmap_len(&txq->meta[index], len), + PCI_DMA_BIDIRECTIONAL); + + /* Unmap chunks, if any. */ + for (i = 1; i < num_tbs; i++) { + pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i), + iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE); + + if (txq->txb) { + dev_kfree_skb(txq->txb[txq->q.read_ptr].skb[i - 1]); + txq->txb[txq->q.read_ptr].skb[i - 1] = NULL; + } + } +} + +int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + dma_addr_t addr, u16 len, + u8 reset, u8 pad) +{ + struct iwl_queue *q; + struct iwl_tfd *tfd, *tfd_tmp; + u32 num_tbs; + + q = &txq->q; + tfd_tmp = (struct iwl_tfd *)txq->tfds; + tfd = &tfd_tmp[q->write_ptr]; + + if (reset) + memset(tfd, 0, sizeof(*tfd)); + + num_tbs = iwl_tfd_get_num_tbs(tfd); + + /* Each TFD can point to a maximum 20 Tx buffers */ + if (num_tbs >= IWL_NUM_OF_TBS) { + IWL_ERR(priv, "Error can not send more than %d chunks\n", + IWL_NUM_OF_TBS); + return -EINVAL; + } + + BUG_ON(addr & ~DMA_BIT_MASK(36)); + if (unlikely(addr & ~IWL_TX_DMA_MASK)) + IWL_ERR(priv, "Unaligned address = %llx\n", + (unsigned long long)addr); + + iwl_tfd_set_tb(tfd, num_tbs, addr, len); + + return 0; +} + +/* + * Tell nic where to find circular buffer of Tx Frame Descriptors for + * given Tx queue, and enable the DMA channel used for that queue. + * + * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA + * channels supported in hardware. + */ +int iwl_hw_tx_queue_init(struct iwl_priv *priv, + struct iwl_tx_queue *txq) +{ + int txq_id = txq->q.id; + + /* Circular buffer (TFD queue in DRAM) physical base address */ + iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id), + txq->q.dma_addr >> 8); + + return 0; +} + +/****************************************************************************** + * + * Generic RX handler implementations + * + ******************************************************************************/ +static void iwl_rx_reply_alive(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_alive_resp *palive; + struct delayed_work *pwork; + + palive = &pkt->u.alive_frame; + + IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision " + "0x%01X 0x%01X\n", + palive->is_valid, palive->ver_type, + palive->ver_subtype); + + if (palive->ver_subtype == INITIALIZE_SUBTYPE) { + IWL_DEBUG_INFO(priv, "Initialization Alive received.\n"); + memcpy(&priv->card_alive_init, + &pkt->u.alive_frame, + sizeof(struct iwl_init_alive_resp)); + pwork = &priv->init_alive_start; + } else { + IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); + memcpy(&priv->card_alive, &pkt->u.alive_frame, + sizeof(struct iwl_alive_resp)); + pwork = &priv->alive_start; + } + + /* We delay the ALIVE response by 5ms to + * give the HW RF Kill time to activate... */ + if (palive->is_valid == UCODE_VALID_OK) + queue_delayed_work(priv->workqueue, pwork, + msecs_to_jiffies(5)); + else + IWL_WARN(priv, "uCode did not respond OK.\n"); +} + +static void iwl_bg_beacon_update(struct work_struct *work) +{ + struct iwl_priv *priv = + container_of(work, struct iwl_priv, beacon_update); + struct sk_buff *beacon; + + /* Pull updated AP beacon from mac80211. will fail if not in AP mode */ + beacon = ieee80211_beacon_get(priv->hw, priv->vif); + + if (!beacon) { + IWL_ERR(priv, "update beacon failed\n"); + return; + } + + mutex_lock(&priv->mutex); + /* new beacon skb is allocated every time; dispose previous.*/ + if (priv->ibss_beacon) + dev_kfree_skb(priv->ibss_beacon); + + priv->ibss_beacon = beacon; + mutex_unlock(&priv->mutex); + + iwl_send_beacon_cmd(priv); +} + +/** + * iwl_bg_statistics_periodic - Timer callback to queue statistics + * + * This callback is provided in order to send a statistics request. + * + * This timer function is continually reset to execute within + * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION + * was received. We need to ensure we receive the statistics in order + * to update the temperature used for calibrating the TXPOWER. + */ +static void iwl_bg_statistics_periodic(unsigned long data) +{ + struct iwl_priv *priv = (struct iwl_priv *)data; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + /* dont send host command if rf-kill is on */ + if (!iwl_is_ready_rf(priv)) + return; + + iwl_send_statistics_request(priv, CMD_ASYNC, false); +} + + +static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base, + u32 start_idx, u32 num_events, + u32 mode) +{ + u32 i; + u32 ptr; /* SRAM byte address of log data */ + u32 ev, time, data; /* event log data */ + unsigned long reg_flags; + + if (mode == 0) + ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32)); + else + ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32)); + + /* Make sure device is powered up for SRAM reads */ + spin_lock_irqsave(&priv->reg_lock, reg_flags); + if (iwl_grab_nic_access(priv)) { + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); + return; + } + + /* Set starting address; reads will auto-increment */ + _iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr); + rmb(); + + /* + * "time" is actually "data" for mode 0 (no timestamp). + * place event id # at far right for easier visual parsing. + */ + for (i = 0; i < num_events; i++) { + ev = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); + time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); + if (mode == 0) { + trace_iwlwifi_dev_ucode_cont_event(priv, + 0, time, ev); + } else { + data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); + trace_iwlwifi_dev_ucode_cont_event(priv, + time, data, ev); + } + } + /* Allow device to power down */ + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); +} + +static void iwl_continuous_event_trace(struct iwl_priv *priv) +{ + u32 capacity; /* event log capacity in # entries */ + u32 base; /* SRAM byte address of event log header */ + u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */ + u32 num_wraps; /* # times uCode wrapped to top of log */ + u32 next_entry; /* index of next entry to be written by uCode */ + + if (priv->ucode_type == UCODE_INIT) + base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr); + else + base = le32_to_cpu(priv->card_alive.log_event_table_ptr); + if (priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) { + capacity = iwl_read_targ_mem(priv, base); + num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32))); + mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32))); + next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32))); + } else + return; + + if (num_wraps == priv->event_log.num_wraps) { + iwl_print_cont_event_trace(priv, + base, priv->event_log.next_entry, + next_entry - priv->event_log.next_entry, + mode); + priv->event_log.non_wraps_count++; + } else { + if ((num_wraps - priv->event_log.num_wraps) > 1) + priv->event_log.wraps_more_count++; + else + priv->event_log.wraps_once_count++; + trace_iwlwifi_dev_ucode_wrap_event(priv, + num_wraps - priv->event_log.num_wraps, + next_entry, priv->event_log.next_entry); + if (next_entry < priv->event_log.next_entry) { + iwl_print_cont_event_trace(priv, base, + priv->event_log.next_entry, + capacity - priv->event_log.next_entry, + mode); + + iwl_print_cont_event_trace(priv, base, 0, + next_entry, mode); + } else { + iwl_print_cont_event_trace(priv, base, + next_entry, capacity - next_entry, + mode); + + iwl_print_cont_event_trace(priv, base, 0, + next_entry, mode); + } + } + priv->event_log.num_wraps = num_wraps; + priv->event_log.next_entry = next_entry; +} + +/** + * iwl_bg_ucode_trace - Timer callback to log ucode event + * + * The timer is continually set to execute every + * UCODE_TRACE_PERIOD milliseconds after the last timer expired + * this function is to perform continuous uCode event logging operation + * if enabled + */ +static void iwl_bg_ucode_trace(unsigned long data) +{ + struct iwl_priv *priv = (struct iwl_priv *)data; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (priv->event_log.ucode_trace) { + iwl_continuous_event_trace(priv); + /* Reschedule the timer to occur in UCODE_TRACE_PERIOD */ + mod_timer(&priv->ucode_trace, + jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD)); + } +} + +static void iwl_rx_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl4965_beacon_notif *beacon = + (struct iwl4965_beacon_notif *)pkt->u.raw; + u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); + + IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d " + "tsf %d %d rate %d\n", + le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK, + beacon->beacon_notify_hdr.failure_frame, + le32_to_cpu(beacon->ibss_mgr_status), + le32_to_cpu(beacon->high_tsf), + le32_to_cpu(beacon->low_tsf), rate); +#endif + + if ((priv->iw_mode == NL80211_IFTYPE_AP) && + (!test_bit(STATUS_EXIT_PENDING, &priv->status))) + queue_work(priv->workqueue, &priv->beacon_update); +} + +/* Handle notification from uCode that card's power state is changing + * due to software, hardware, or critical temperature RFKILL */ +static void iwl_rx_card_state_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); + unsigned long status = priv->status; + + IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n", + (flags & HW_CARD_DISABLED) ? "Kill" : "On", + (flags & SW_CARD_DISABLED) ? "Kill" : "On", + (flags & CT_CARD_DISABLED) ? + "Reached" : "Not reached"); + + if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | + CT_CARD_DISABLED)) { + + iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + + iwl_write_direct32(priv, HBUS_TARG_MBX_C, + HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); + + if (!(flags & RXON_CARD_DISABLED)) { + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + iwl_write_direct32(priv, HBUS_TARG_MBX_C, + HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); + } + if (flags & CT_CARD_DISABLED) + iwl_tt_enter_ct_kill(priv); + } + if (!(flags & CT_CARD_DISABLED)) + iwl_tt_exit_ct_kill(priv); + + if (flags & HW_CARD_DISABLED) + set_bit(STATUS_RF_KILL_HW, &priv->status); + else + clear_bit(STATUS_RF_KILL_HW, &priv->status); + + + if (!(flags & RXON_CARD_DISABLED)) + iwl_scan_cancel(priv); + + if ((test_bit(STATUS_RF_KILL_HW, &status) != + test_bit(STATUS_RF_KILL_HW, &priv->status))) + wiphy_rfkill_set_hw_state(priv->hw->wiphy, + test_bit(STATUS_RF_KILL_HW, &priv->status)); + else + wake_up_interruptible(&priv->wait_command_queue); +} + +int iwl_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src) +{ + if (src == IWL_PWR_SRC_VAUX) { + if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) + iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_PWR_SRC_VAUX, + ~APMG_PS_CTRL_MSK_PWR_SRC); + } else { + iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, + ~APMG_PS_CTRL_MSK_PWR_SRC); + } + + return 0; +} + +/** + * iwl_setup_rx_handlers - Initialize Rx handler callbacks + * + * Setup the RX handlers for each of the reply types sent from the uCode + * to the host. + * + * This function chains into the hardware specific files for them to setup + * any hardware specific handlers as well. + */ +static void iwl_setup_rx_handlers(struct iwl_priv *priv) +{ + priv->rx_handlers[REPLY_ALIVE] = iwl_rx_reply_alive; + priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error; + priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa; + priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] = + iwl_rx_spectrum_measure_notif; + priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif; + priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] = + iwl_rx_pm_debug_statistics_notif; + priv->rx_handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif; + + /* + * The same handler is used for both the REPLY to a discrete + * statistics request from the host as well as for the periodic + * statistics notifications (after received beacons) from the uCode. + */ + priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl_reply_statistics; + priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics; + + iwl_setup_rx_scan_handlers(priv); + + /* status change handler */ + priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif; + + priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] = + iwl_rx_missed_beacon_notif; + /* Rx handlers */ + priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl_rx_reply_rx_phy; + priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl_rx_reply_rx; + /* block ack */ + priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl_rx_reply_compressed_ba; + /* Set up hardware specific Rx handlers */ + priv->cfg->ops->lib->rx_handler_setup(priv); +} + +/** + * iwl_rx_handle - Main entry function for receiving responses from uCode + * + * Uses the priv->rx_handlers callback function array to invoke + * the appropriate handlers, including command responses, + * frame-received notifications, and other notifications. + */ +void iwl_rx_handle(struct iwl_priv *priv) +{ + struct iwl_rx_mem_buffer *rxb; + struct iwl_rx_packet *pkt; + struct iwl_rx_queue *rxq = &priv->rxq; + u32 r, i; + int reclaim; + unsigned long flags; + u8 fill_rx = 0; + u32 count = 8; + int total_empty; + + /* uCode's read index (stored in shared DRAM) indicates the last Rx + * buffer that the driver may process (last buffer filled by ucode). */ + r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; + i = rxq->read; + + /* Rx interrupt, but nothing sent from uCode */ + if (i == r) + IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i); + + /* calculate total frames need to be restock after handling RX */ + total_empty = r - rxq->write_actual; + if (total_empty < 0) + total_empty += RX_QUEUE_SIZE; + + if (total_empty > (RX_QUEUE_SIZE / 2)) + fill_rx = 1; + + while (i != r) { + rxb = rxq->queue[i]; + + /* If an RXB doesn't have a Rx queue slot associated with it, + * then a bug has been introduced in the queue refilling + * routines -- catch it here */ + BUG_ON(rxb == NULL); + + rxq->queue[i] = NULL; + + pci_unmap_page(priv->pci_dev, rxb->page_dma, + PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + pkt = rxb_addr(rxb); + + trace_iwlwifi_dev_rx(priv, pkt, + le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); + + /* Reclaim a command buffer only if this packet is a response + * to a (driver-originated) command. + * If the packet (e.g. Rx frame) originated from uCode, + * there is no command buffer to reclaim. + * Ucode should set SEQ_RX_FRAME bit if ucode-originated, + * but apparently a few don't get set; catch them here. */ + reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) && + (pkt->hdr.cmd != REPLY_RX_PHY_CMD) && + (pkt->hdr.cmd != REPLY_RX) && + (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) && + (pkt->hdr.cmd != REPLY_COMPRESSED_BA) && + (pkt->hdr.cmd != STATISTICS_NOTIFICATION) && + (pkt->hdr.cmd != REPLY_TX); + + /* Based on type of command response or notification, + * handle those that need handling via function in + * rx_handlers table. See iwl_setup_rx_handlers() */ + if (priv->rx_handlers[pkt->hdr.cmd]) { + IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, + i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); + priv->isr_stats.rx_handlers[pkt->hdr.cmd]++; + priv->rx_handlers[pkt->hdr.cmd] (priv, rxb); + } else { + /* No handling needed */ + IWL_DEBUG_RX(priv, + "r %d i %d No handler needed for %s, 0x%02x\n", + r, i, get_cmd_string(pkt->hdr.cmd), + pkt->hdr.cmd); + } + + /* + * XXX: After here, we should always check rxb->page + * against NULL before touching it or its virtual + * memory (pkt). Because some rx_handler might have + * already taken or freed the pages. + */ + + if (reclaim) { + /* Invoke any callbacks, transfer the buffer to caller, + * and fire off the (possibly) blocking iwl_send_cmd() + * as we reclaim the driver command queue */ + if (rxb->page) + iwl_tx_cmd_complete(priv, rxb); + else + IWL_WARN(priv, "Claim null rxb?\n"); + } + + /* Reuse the page if possible. For notification packets and + * SKBs that fail to Rx correctly, add them back into the + * rx_free list for reuse later. */ + spin_lock_irqsave(&rxq->lock, flags); + if (rxb->page != NULL) { + rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page, + 0, PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + } else + list_add_tail(&rxb->list, &rxq->rx_used); + + spin_unlock_irqrestore(&rxq->lock, flags); + + i = (i + 1) & RX_QUEUE_MASK; + /* If there are a lot of unused frames, + * restock the Rx queue so ucode wont assert. */ + if (fill_rx) { + count++; + if (count >= 8) { + rxq->read = i; + iwl_rx_replenish_now(priv); + count = 0; + } + } + } + + /* Backtrack one entry */ + rxq->read = i; + if (fill_rx) + iwl_rx_replenish_now(priv); + else + iwl_rx_queue_restock(priv); +} + +/* call this function to flush any scheduled tasklet */ +static inline void iwl_synchronize_irq(struct iwl_priv *priv) +{ + /* wait to make sure we flush pending tasklet*/ + synchronize_irq(priv->pci_dev->irq); + tasklet_kill(&priv->irq_tasklet); +} + +static void iwl_irq_tasklet_legacy(struct iwl_priv *priv) +{ + u32 inta, handled = 0; + u32 inta_fh; + unsigned long flags; + u32 i; +#ifdef CONFIG_IWLWIFI_DEBUG + u32 inta_mask; +#endif + + spin_lock_irqsave(&priv->lock, flags); + + /* Ack/clear/reset pending uCode interrupts. + * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, + * and will clear only when CSR_FH_INT_STATUS gets cleared. */ + inta = iwl_read32(priv, CSR_INT); + iwl_write32(priv, CSR_INT, inta); + + /* Ack/clear/reset pending flow-handler (DMA) interrupts. + * Any new interrupts that happen after this, either while we're + * in this tasklet, or later, will show up in next ISR/tasklet. */ + inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); + iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh); + +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_get_debug_level(priv) & IWL_DL_ISR) { + /* just for debug */ + inta_mask = iwl_read32(priv, CSR_INT_MASK); + IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", + inta, inta_mask, inta_fh); + } +#endif + + spin_unlock_irqrestore(&priv->lock, flags); + + /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not + * atomic, make sure that inta covers all the interrupts that + * we've discovered, even if FH interrupt came in just after + * reading CSR_INT. */ + if (inta_fh & CSR49_FH_INT_RX_MASK) + inta |= CSR_INT_BIT_FH_RX; + if (inta_fh & CSR49_FH_INT_TX_MASK) + inta |= CSR_INT_BIT_FH_TX; + + /* Now service all interrupt bits discovered above. */ + if (inta & CSR_INT_BIT_HW_ERR) { + IWL_ERR(priv, "Hardware error detected. Restarting.\n"); + + /* Tell the device to stop sending interrupts */ + iwl_disable_interrupts(priv); + + priv->isr_stats.hw++; + iwl_irq_handle_error(priv); + + handled |= CSR_INT_BIT_HW_ERR; + + return; + } + +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { + /* NIC fires this, but we don't use it, redundant with WAKEUP */ + if (inta & CSR_INT_BIT_SCD) { + IWL_DEBUG_ISR(priv, "Scheduler finished to transmit " + "the frame/frames.\n"); + priv->isr_stats.sch++; + } + + /* Alive notification via Rx interrupt will do the real work */ + if (inta & CSR_INT_BIT_ALIVE) { + IWL_DEBUG_ISR(priv, "Alive interrupt\n"); + priv->isr_stats.alive++; + } + } +#endif + /* Safely ignore these bits for debug checks below */ + inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); + + /* HW RF KILL switch toggled */ + if (inta & CSR_INT_BIT_RF_KILL) { + int hw_rf_kill = 0; + if (!(iwl_read32(priv, CSR_GP_CNTRL) & + CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) + hw_rf_kill = 1; + + IWL_WARN(priv, "RF_KILL bit toggled to %s.\n", + hw_rf_kill ? "disable radio" : "enable radio"); + + priv->isr_stats.rfkill++; + + /* driver only loads ucode once setting the interface up. + * the driver allows loading the ucode even if the radio + * is killed. Hence update the killswitch state here. The + * rfkill handler will care about restarting if needed. + */ + if (!test_bit(STATUS_ALIVE, &priv->status)) { + if (hw_rf_kill) + set_bit(STATUS_RF_KILL_HW, &priv->status); + else + clear_bit(STATUS_RF_KILL_HW, &priv->status); + wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill); + } + + handled |= CSR_INT_BIT_RF_KILL; + } + + /* Chip got too hot and stopped itself */ + if (inta & CSR_INT_BIT_CT_KILL) { + IWL_ERR(priv, "Microcode CT kill error detected.\n"); + priv->isr_stats.ctkill++; + handled |= CSR_INT_BIT_CT_KILL; + } + + /* Error detected by uCode */ + if (inta & CSR_INT_BIT_SW_ERR) { + IWL_ERR(priv, "Microcode SW error detected. " + " Restarting 0x%X.\n", inta); + priv->isr_stats.sw++; + priv->isr_stats.sw_err = inta; + iwl_irq_handle_error(priv); + handled |= CSR_INT_BIT_SW_ERR; + } + + /* + * uCode wakes up after power-down sleep. + * Tell device about any new tx or host commands enqueued, + * and about any Rx buffers made available while asleep. + */ + if (inta & CSR_INT_BIT_WAKEUP) { + IWL_DEBUG_ISR(priv, "Wakeup interrupt\n"); + iwl_rx_queue_update_write_ptr(priv, &priv->rxq); + for (i = 0; i < priv->hw_params.max_txq_num; i++) + iwl_txq_update_write_ptr(priv, &priv->txq[i]); + priv->isr_stats.wakeup++; + handled |= CSR_INT_BIT_WAKEUP; + } + + /* All uCode command responses, including Tx command responses, + * Rx "responses" (frame-received notification), and other + * notifications from uCode come through here*/ + if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { + iwl_rx_handle(priv); + priv->isr_stats.rx++; + handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); + } + + /* This "Tx" DMA channel is used only for loading uCode */ + if (inta & CSR_INT_BIT_FH_TX) { + IWL_DEBUG_ISR(priv, "uCode load interrupt\n"); + priv->isr_stats.tx++; + handled |= CSR_INT_BIT_FH_TX; + /* Wake up uCode load routine, now that load is complete */ + priv->ucode_write_complete = 1; + wake_up_interruptible(&priv->wait_command_queue); + } + + if (inta & ~handled) { + IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled); + priv->isr_stats.unhandled++; + } + + if (inta & ~(priv->inta_mask)) { + IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n", + inta & ~priv->inta_mask); + IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh); + } + + /* Re-enable all interrupts */ + /* only Re-enable if diabled by irq */ + if (test_bit(STATUS_INT_ENABLED, &priv->status)) + iwl_enable_interrupts(priv); + +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { + inta = iwl_read32(priv, CSR_INT); + inta_mask = iwl_read32(priv, CSR_INT_MASK); + inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); + IWL_DEBUG_ISR(priv, "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, " + "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); + } +#endif +} + +/* tasklet for iwlagn interrupt */ +static void iwl_irq_tasklet(struct iwl_priv *priv) +{ + u32 inta = 0; + u32 handled = 0; + unsigned long flags; + u32 i; +#ifdef CONFIG_IWLWIFI_DEBUG + u32 inta_mask; +#endif + + spin_lock_irqsave(&priv->lock, flags); + + /* Ack/clear/reset pending uCode interrupts. + * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, + */ + iwl_write32(priv, CSR_INT, priv->inta); + + inta = priv->inta; + +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_get_debug_level(priv) & IWL_DL_ISR) { + /* just for debug */ + inta_mask = iwl_read32(priv, CSR_INT_MASK); + IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x\n ", + inta, inta_mask); + } +#endif + + spin_unlock_irqrestore(&priv->lock, flags); + + /* saved interrupt in inta variable now we can reset priv->inta */ + priv->inta = 0; + + /* Now service all interrupt bits discovered above. */ + if (inta & CSR_INT_BIT_HW_ERR) { + IWL_ERR(priv, "Hardware error detected. Restarting.\n"); + + /* Tell the device to stop sending interrupts */ + iwl_disable_interrupts(priv); + + priv->isr_stats.hw++; + iwl_irq_handle_error(priv); + + handled |= CSR_INT_BIT_HW_ERR; + + return; + } + +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { + /* NIC fires this, but we don't use it, redundant with WAKEUP */ + if (inta & CSR_INT_BIT_SCD) { + IWL_DEBUG_ISR(priv, "Scheduler finished to transmit " + "the frame/frames.\n"); + priv->isr_stats.sch++; + } + + /* Alive notification via Rx interrupt will do the real work */ + if (inta & CSR_INT_BIT_ALIVE) { + IWL_DEBUG_ISR(priv, "Alive interrupt\n"); + priv->isr_stats.alive++; + } + } +#endif + /* Safely ignore these bits for debug checks below */ + inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); + + /* HW RF KILL switch toggled */ + if (inta & CSR_INT_BIT_RF_KILL) { + int hw_rf_kill = 0; + if (!(iwl_read32(priv, CSR_GP_CNTRL) & + CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) + hw_rf_kill = 1; + + IWL_WARN(priv, "RF_KILL bit toggled to %s.\n", + hw_rf_kill ? "disable radio" : "enable radio"); + + priv->isr_stats.rfkill++; + + /* driver only loads ucode once setting the interface up. + * the driver allows loading the ucode even if the radio + * is killed. Hence update the killswitch state here. The + * rfkill handler will care about restarting if needed. + */ + if (!test_bit(STATUS_ALIVE, &priv->status)) { + if (hw_rf_kill) + set_bit(STATUS_RF_KILL_HW, &priv->status); + else + clear_bit(STATUS_RF_KILL_HW, &priv->status); + wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill); + } + + handled |= CSR_INT_BIT_RF_KILL; + } + + /* Chip got too hot and stopped itself */ + if (inta & CSR_INT_BIT_CT_KILL) { + IWL_ERR(priv, "Microcode CT kill error detected.\n"); + priv->isr_stats.ctkill++; + handled |= CSR_INT_BIT_CT_KILL; + } + + /* Error detected by uCode */ + if (inta & CSR_INT_BIT_SW_ERR) { + IWL_ERR(priv, "Microcode SW error detected. " + " Restarting 0x%X.\n", inta); + priv->isr_stats.sw++; + priv->isr_stats.sw_err = inta; + iwl_irq_handle_error(priv); + handled |= CSR_INT_BIT_SW_ERR; + } + + /* uCode wakes up after power-down sleep */ + if (inta & CSR_INT_BIT_WAKEUP) { + IWL_DEBUG_ISR(priv, "Wakeup interrupt\n"); + iwl_rx_queue_update_write_ptr(priv, &priv->rxq); + for (i = 0; i < priv->hw_params.max_txq_num; i++) + iwl_txq_update_write_ptr(priv, &priv->txq[i]); + + priv->isr_stats.wakeup++; + + handled |= CSR_INT_BIT_WAKEUP; + } + + /* All uCode command responses, including Tx command responses, + * Rx "responses" (frame-received notification), and other + * notifications from uCode come through here*/ + if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | + CSR_INT_BIT_RX_PERIODIC)) { + IWL_DEBUG_ISR(priv, "Rx interrupt\n"); + if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { + handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); + iwl_write32(priv, CSR_FH_INT_STATUS, + CSR49_FH_INT_RX_MASK); + } + if (inta & CSR_INT_BIT_RX_PERIODIC) { + handled |= CSR_INT_BIT_RX_PERIODIC; + iwl_write32(priv, CSR_INT, CSR_INT_BIT_RX_PERIODIC); + } + /* Sending RX interrupt require many steps to be done in the + * the device: + * 1- write interrupt to current index in ICT table. + * 2- dma RX frame. + * 3- update RX shared data to indicate last write index. + * 4- send interrupt. + * This could lead to RX race, driver could receive RX interrupt + * but the shared data changes does not reflect this; + * periodic interrupt will detect any dangling Rx activity. + */ + + /* Disable periodic interrupt; we use it as just a one-shot. */ + iwl_write8(priv, CSR_INT_PERIODIC_REG, + CSR_INT_PERIODIC_DIS); + iwl_rx_handle(priv); + + /* + * Enable periodic interrupt in 8 msec only if we received + * real RX interrupt (instead of just periodic int), to catch + * any dangling Rx interrupt. If it was just the periodic + * interrupt, there was no dangling Rx activity, and no need + * to extend the periodic interrupt; one-shot is enough. + */ + if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) + iwl_write8(priv, CSR_INT_PERIODIC_REG, + CSR_INT_PERIODIC_ENA); + + priv->isr_stats.rx++; + } + + /* This "Tx" DMA channel is used only for loading uCode */ + if (inta & CSR_INT_BIT_FH_TX) { + iwl_write32(priv, CSR_FH_INT_STATUS, CSR49_FH_INT_TX_MASK); + IWL_DEBUG_ISR(priv, "uCode load interrupt\n"); + priv->isr_stats.tx++; + handled |= CSR_INT_BIT_FH_TX; + /* Wake up uCode load routine, now that load is complete */ + priv->ucode_write_complete = 1; + wake_up_interruptible(&priv->wait_command_queue); + } + + if (inta & ~handled) { + IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled); + priv->isr_stats.unhandled++; + } + + if (inta & ~(priv->inta_mask)) { + IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n", + inta & ~priv->inta_mask); + } + + /* Re-enable all interrupts */ + /* only Re-enable if diabled by irq */ + if (test_bit(STATUS_INT_ENABLED, &priv->status)) + iwl_enable_interrupts(priv); +} + + +/****************************************************************************** + * + * uCode download functions + * + ******************************************************************************/ + +static void iwl_dealloc_ucode_pci(struct iwl_priv *priv) +{ + iwl_free_fw_desc(priv->pci_dev, &priv->ucode_code); + iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data); + iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup); + iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init); + iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init_data); + iwl_free_fw_desc(priv->pci_dev, &priv->ucode_boot); +} + +static void iwl_nic_start(struct iwl_priv *priv) +{ + /* Remove all resets to allow NIC to operate */ + iwl_write32(priv, CSR_RESET, 0); +} + + +static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context); +static int iwl_mac_setup_register(struct iwl_priv *priv); + +static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first) +{ + const char *name_pre = priv->cfg->fw_name_pre; + + if (first) + priv->fw_index = priv->cfg->ucode_api_max; + else + priv->fw_index--; + + if (priv->fw_index < priv->cfg->ucode_api_min) { + IWL_ERR(priv, "no suitable firmware found!\n"); + return -ENOENT; + } + + sprintf(priv->firmware_name, "%s%d%s", + name_pre, priv->fw_index, ".ucode"); + + IWL_DEBUG_INFO(priv, "attempting to load firmware '%s'\n", + priv->firmware_name); + + return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name, + &priv->pci_dev->dev, GFP_KERNEL, priv, + iwl_ucode_callback); +} + +/** + * iwl_ucode_callback - callback when firmware was loaded + * + * If loaded successfully, copies the firmware into buffers + * for the card to fetch (via DMA). + */ +static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) +{ + struct iwl_priv *priv = context; + struct iwl_ucode_header *ucode; + const unsigned int api_max = priv->cfg->ucode_api_max; + const unsigned int api_min = priv->cfg->ucode_api_min; + u8 *src; + size_t len; + u32 api_ver, build; + u32 inst_size, data_size, init_size, init_data_size, boot_size; + int err; + u16 eeprom_ver; + + if (!ucode_raw) { + IWL_ERR(priv, "request for firmware file '%s' failed.\n", + priv->firmware_name); + goto try_again; + } + + IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n", + priv->firmware_name, ucode_raw->size); + + /* Make sure that we got at least the v1 header! */ + if (ucode_raw->size < priv->cfg->ops->ucode->get_header_size(1)) { + IWL_ERR(priv, "File size way too small!\n"); + goto try_again; + } + + /* Data from ucode file: header followed by uCode images */ + ucode = (struct iwl_ucode_header *)ucode_raw->data; + + priv->ucode_ver = le32_to_cpu(ucode->ver); + api_ver = IWL_UCODE_API(priv->ucode_ver); + build = priv->cfg->ops->ucode->get_build(ucode, api_ver); + inst_size = priv->cfg->ops->ucode->get_inst_size(ucode, api_ver); + data_size = priv->cfg->ops->ucode->get_data_size(ucode, api_ver); + init_size = priv->cfg->ops->ucode->get_init_size(ucode, api_ver); + init_data_size = + priv->cfg->ops->ucode->get_init_data_size(ucode, api_ver); + boot_size = priv->cfg->ops->ucode->get_boot_size(ucode, api_ver); + src = priv->cfg->ops->ucode->get_data(ucode, api_ver); + + /* api_ver should match the api version forming part of the + * firmware filename ... but we don't check for that and only rely + * on the API version read from firmware header from here on forward */ + + if (api_ver < api_min || api_ver > api_max) { + IWL_ERR(priv, "Driver unable to support your firmware API. " + "Driver supports v%u, firmware is v%u.\n", + api_max, api_ver); + goto try_again; + } + + if (api_ver != api_max) + IWL_ERR(priv, "Firmware has old API version. Expected v%u, " + "got v%u. New firmware can be obtained " + "from http://www.intellinuxwireless.org.\n", + api_max, api_ver); + + IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n", + IWL_UCODE_MAJOR(priv->ucode_ver), + IWL_UCODE_MINOR(priv->ucode_ver), + IWL_UCODE_API(priv->ucode_ver), + IWL_UCODE_SERIAL(priv->ucode_ver)); + + snprintf(priv->hw->wiphy->fw_version, + sizeof(priv->hw->wiphy->fw_version), + "%u.%u.%u.%u", + IWL_UCODE_MAJOR(priv->ucode_ver), + IWL_UCODE_MINOR(priv->ucode_ver), + IWL_UCODE_API(priv->ucode_ver), + IWL_UCODE_SERIAL(priv->ucode_ver)); + + if (build) + IWL_DEBUG_INFO(priv, "Build %u\n", build); + + eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION); + IWL_DEBUG_INFO(priv, "NVM Type: %s, version: 0x%x\n", + (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) + ? "OTP" : "EEPROM", eeprom_ver); + + IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n", + priv->ucode_ver); + IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n", + inst_size); + IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %u\n", + data_size); + IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %u\n", + init_size); + IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %u\n", + init_data_size); + IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %u\n", + boot_size); + + /* + * For any of the failures below (before allocating pci memory) + * we will try to load a version with a smaller API -- maybe the + * user just got a corrupted version of the latest API. + */ + + /* Verify size of file vs. image size info in file's header */ + if (ucode_raw->size != + priv->cfg->ops->ucode->get_header_size(api_ver) + + inst_size + data_size + init_size + + init_data_size + boot_size) { + + IWL_DEBUG_INFO(priv, + "uCode file size %d does not match expected size\n", + (int)ucode_raw->size); + goto try_again; + } + + /* Verify that uCode images will fit in card's SRAM */ + if (inst_size > priv->hw_params.max_inst_size) { + IWL_DEBUG_INFO(priv, "uCode instr len %d too large to fit in\n", + inst_size); + goto try_again; + } + + if (data_size > priv->hw_params.max_data_size) { + IWL_DEBUG_INFO(priv, "uCode data len %d too large to fit in\n", + data_size); + goto try_again; + } + if (init_size > priv->hw_params.max_inst_size) { + IWL_INFO(priv, "uCode init instr len %d too large to fit in\n", + init_size); + goto try_again; + } + if (init_data_size > priv->hw_params.max_data_size) { + IWL_INFO(priv, "uCode init data len %d too large to fit in\n", + init_data_size); + goto try_again; + } + if (boot_size > priv->hw_params.max_bsm_size) { + IWL_INFO(priv, "uCode boot instr len %d too large to fit in\n", + boot_size); + goto try_again; + } + + /* Allocate ucode buffers for card's bus-master loading ... */ + + /* Runtime instructions and 2 copies of data: + * 1) unmodified from disk + * 2) backup cache for save/restore during power-downs */ + priv->ucode_code.len = inst_size; + iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_code); + + priv->ucode_data.len = data_size; + iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data); + + priv->ucode_data_backup.len = data_size; + iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup); + + if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr || + !priv->ucode_data_backup.v_addr) + goto err_pci_alloc; + + /* Initialization instructions and data */ + if (init_size && init_data_size) { + priv->ucode_init.len = init_size; + iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init); + + priv->ucode_init_data.len = init_data_size; + iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data); + + if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr) + goto err_pci_alloc; + } + + /* Bootstrap (instructions only, no data) */ + if (boot_size) { + priv->ucode_boot.len = boot_size; + iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot); + + if (!priv->ucode_boot.v_addr) + goto err_pci_alloc; + } + + /* Copy images into buffers for card's bus-master reads ... */ + + /* Runtime instructions (first block of data in file) */ + len = inst_size; + IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n", len); + memcpy(priv->ucode_code.v_addr, src, len); + src += len; + + IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n", + priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr); + + /* Runtime data (2nd block) + * NOTE: Copy into backup buffer will be done in iwl_up() */ + len = data_size; + IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n", len); + memcpy(priv->ucode_data.v_addr, src, len); + memcpy(priv->ucode_data_backup.v_addr, src, len); + src += len; + + /* Initialization instructions (3rd block) */ + if (init_size) { + len = init_size; + IWL_DEBUG_INFO(priv, "Copying (but not loading) init instr len %Zd\n", + len); + memcpy(priv->ucode_init.v_addr, src, len); + src += len; + } + + /* Initialization data (4th block) */ + if (init_data_size) { + len = init_data_size; + IWL_DEBUG_INFO(priv, "Copying (but not loading) init data len %Zd\n", + len); + memcpy(priv->ucode_init_data.v_addr, src, len); + src += len; + } + + /* Bootstrap instructions (5th block) */ + len = boot_size; + IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n", len); + memcpy(priv->ucode_boot.v_addr, src, len); + + /************************************************** + * This is still part of probe() in a sense... + * + * 9. Setup and register with mac80211 and debugfs + **************************************************/ + err = iwl_mac_setup_register(priv); + if (err) + goto out_unbind; + + err = iwl_dbgfs_register(priv, DRV_NAME); + if (err) + IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err); + + /* We have our copies now, allow OS release its copies */ + release_firmware(ucode_raw); + return; + + try_again: + /* try next, if any */ + if (iwl_request_firmware(priv, false)) + goto out_unbind; + release_firmware(ucode_raw); + return; + + err_pci_alloc: + IWL_ERR(priv, "failed to allocate pci memory\n"); + iwl_dealloc_ucode_pci(priv); + out_unbind: + device_release_driver(&priv->pci_dev->dev); + release_firmware(ucode_raw); +} + +static const char *desc_lookup_text[] = { + "OK", + "FAIL", + "BAD_PARAM", + "BAD_CHECKSUM", + "NMI_INTERRUPT_WDG", + "SYSASSERT", + "FATAL_ERROR", + "BAD_COMMAND", + "HW_ERROR_TUNE_LOCK", + "HW_ERROR_TEMPERATURE", + "ILLEGAL_CHAN_FREQ", + "VCC_NOT_STABLE", + "FH_ERROR", + "NMI_INTERRUPT_HOST", + "NMI_INTERRUPT_ACTION_PT", + "NMI_INTERRUPT_UNKNOWN", + "UCODE_VERSION_MISMATCH", + "HW_ERROR_ABS_LOCK", + "HW_ERROR_CAL_LOCK_FAIL", + "NMI_INTERRUPT_INST_ACTION_PT", + "NMI_INTERRUPT_DATA_ACTION_PT", + "NMI_TRM_HW_ER", + "NMI_INTERRUPT_TRM", + "NMI_INTERRUPT_BREAK_POINT" + "DEBUG_0", + "DEBUG_1", + "DEBUG_2", + "DEBUG_3", + "ADVANCED SYSASSERT" +}; + +static const char *desc_lookup(int i) +{ + int max = ARRAY_SIZE(desc_lookup_text) - 1; + + if (i < 0 || i > max) + i = max; + + return desc_lookup_text[i]; +} + +#define ERROR_START_OFFSET (1 * sizeof(u32)) +#define ERROR_ELEM_SIZE (7 * sizeof(u32)) + +void iwl_dump_nic_error_log(struct iwl_priv *priv) +{ + u32 data2, line; + u32 desc, time, count, base, data1; + u32 blink1, blink2, ilink1, ilink2; + + if (priv->ucode_type == UCODE_INIT) + base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr); + else + base = le32_to_cpu(priv->card_alive.error_event_table_ptr); + + if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) { + IWL_ERR(priv, + "Not valid error log pointer 0x%08X for %s uCode\n", + base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT"); + return; + } + + count = iwl_read_targ_mem(priv, base); + + if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { + IWL_ERR(priv, "Start IWL Error Log Dump:\n"); + IWL_ERR(priv, "Status: 0x%08lX, count: %d\n", + priv->status, count); + } + + desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32)); + blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32)); + blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32)); + ilink1 = iwl_read_targ_mem(priv, base + 5 * sizeof(u32)); + ilink2 = iwl_read_targ_mem(priv, base + 6 * sizeof(u32)); + data1 = iwl_read_targ_mem(priv, base + 7 * sizeof(u32)); + data2 = iwl_read_targ_mem(priv, base + 8 * sizeof(u32)); + line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32)); + time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32)); + + trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, data2, line, + blink1, blink2, ilink1, ilink2); + + IWL_ERR(priv, "Desc Time " + "data1 data2 line\n"); + IWL_ERR(priv, "%-28s (#%02d) %010u 0x%08X 0x%08X %u\n", + desc_lookup(desc), desc, time, data1, data2, line); + IWL_ERR(priv, "blink1 blink2 ilink1 ilink2\n"); + IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2, + ilink1, ilink2); + +} + +#define EVENT_START_OFFSET (4 * sizeof(u32)) + +/** + * iwl_print_event_log - Dump error event log to syslog + * + */ +static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx, + u32 num_events, u32 mode, + int pos, char **buf, size_t bufsz) +{ + u32 i; + u32 base; /* SRAM byte address of event log header */ + u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */ + u32 ptr; /* SRAM byte address of log data */ + u32 ev, time, data; /* event log data */ + unsigned long reg_flags; + + if (num_events == 0) + return pos; + if (priv->ucode_type == UCODE_INIT) + base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr); + else + base = le32_to_cpu(priv->card_alive.log_event_table_ptr); + + if (mode == 0) + event_size = 2 * sizeof(u32); + else + event_size = 3 * sizeof(u32); + + ptr = base + EVENT_START_OFFSET + (start_idx * event_size); + + /* Make sure device is powered up for SRAM reads */ + spin_lock_irqsave(&priv->reg_lock, reg_flags); + iwl_grab_nic_access(priv); + + /* Set starting address; reads will auto-increment */ + _iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr); + rmb(); + + /* "time" is actually "data" for mode 0 (no timestamp). + * place event id # at far right for easier visual parsing. */ + for (i = 0; i < num_events; i++) { + ev = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); + time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); + if (mode == 0) { + /* data, ev */ + if (bufsz) { + pos += scnprintf(*buf + pos, bufsz - pos, + "EVT_LOG:0x%08x:%04u\n", + time, ev); + } else { + trace_iwlwifi_dev_ucode_event(priv, 0, + time, ev); + IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n", + time, ev); + } + } else { + data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); + if (bufsz) { + pos += scnprintf(*buf + pos, bufsz - pos, + "EVT_LOGT:%010u:0x%08x:%04u\n", + time, data, ev); + } else { + IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n", + time, data, ev); + trace_iwlwifi_dev_ucode_event(priv, time, + data, ev); + } + } + } + + /* Allow device to power down */ + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); + return pos; +} + +/** + * iwl_print_last_event_logs - Dump the newest # of event log to syslog + */ +static int iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity, + u32 num_wraps, u32 next_entry, + u32 size, u32 mode, + int pos, char **buf, size_t bufsz) +{ + /* + * display the newest DEFAULT_LOG_ENTRIES entries + * i.e the entries just before the next ont that uCode would fill. + */ + if (num_wraps) { + if (next_entry < size) { + pos = iwl_print_event_log(priv, + capacity - (size - next_entry), + size - next_entry, mode, + pos, buf, bufsz); + pos = iwl_print_event_log(priv, 0, + next_entry, mode, + pos, buf, bufsz); + } else + pos = iwl_print_event_log(priv, next_entry - size, + size, mode, pos, buf, bufsz); + } else { + if (next_entry < size) { + pos = iwl_print_event_log(priv, 0, next_entry, + mode, pos, buf, bufsz); + } else { + pos = iwl_print_event_log(priv, next_entry - size, + size, mode, pos, buf, bufsz); + } + } + return pos; +} + +/* For sanity check only. Actual size is determined by uCode, typ. 512 */ +#define MAX_EVENT_LOG_SIZE (512) + +#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20) + +int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log, + char **buf, bool display) +{ + u32 base; /* SRAM byte address of event log header */ + u32 capacity; /* event log capacity in # entries */ + u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */ + u32 num_wraps; /* # times uCode wrapped to top of log */ + u32 next_entry; /* index of next entry to be written by uCode */ + u32 size; /* # entries that we'll print */ + int pos = 0; + size_t bufsz = 0; + + if (priv->ucode_type == UCODE_INIT) + base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr); + else + base = le32_to_cpu(priv->card_alive.log_event_table_ptr); + + if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) { + IWL_ERR(priv, + "Invalid event log pointer 0x%08X for %s uCode\n", + base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT"); + return -EINVAL; + } + + /* event log header */ + capacity = iwl_read_targ_mem(priv, base); + mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32))); + num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32))); + next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32))); + + if (capacity > MAX_EVENT_LOG_SIZE) { + IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n", + capacity, MAX_EVENT_LOG_SIZE); + capacity = MAX_EVENT_LOG_SIZE; + } + + if (next_entry > MAX_EVENT_LOG_SIZE) { + IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n", + next_entry, MAX_EVENT_LOG_SIZE); + next_entry = MAX_EVENT_LOG_SIZE; + } + + size = num_wraps ? capacity : next_entry; + + /* bail out if nothing in log */ + if (size == 0) { + IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n"); + return pos; + } + +#ifdef CONFIG_IWLWIFI_DEBUG + if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log) + size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES) + ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size; +#else + size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES) + ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size; +#endif + IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n", + size); + +#ifdef CONFIG_IWLWIFI_DEBUG + if (display) { + if (full_log) + bufsz = capacity * 48; + else + bufsz = size * 48; + *buf = kmalloc(bufsz, GFP_KERNEL); + if (!*buf) + return -ENOMEM; + } + if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) { + /* + * if uCode has wrapped back to top of log, + * start at the oldest entry, + * i.e the next one that uCode would fill. + */ + if (num_wraps) + pos = iwl_print_event_log(priv, next_entry, + capacity - next_entry, mode, + pos, buf, bufsz); + /* (then/else) start at top of log */ + pos = iwl_print_event_log(priv, 0, + next_entry, mode, pos, buf, bufsz); + } else + pos = iwl_print_last_event_logs(priv, capacity, num_wraps, + next_entry, size, mode, + pos, buf, bufsz); +#else + pos = iwl_print_last_event_logs(priv, capacity, num_wraps, + next_entry, size, mode, + pos, buf, bufsz); +#endif + return pos; +} + +/** + * iwl_alive_start - called after REPLY_ALIVE notification received + * from protocol/runtime uCode (initialization uCode's + * Alive gets handled by iwl_init_alive_start()). + */ +static void iwl_alive_start(struct iwl_priv *priv) +{ + int ret = 0; + + IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); + + if (priv->card_alive.is_valid != UCODE_VALID_OK) { + /* We had an error bringing up the hardware, so take it + * all the way back down so we can try again */ + IWL_DEBUG_INFO(priv, "Alive failed.\n"); + goto restart; + } + + /* Initialize uCode has loaded Runtime uCode ... verify inst image. + * This is a paranoid check, because we would not have gotten the + * "runtime" alive if code weren't properly loaded. */ + if (iwl_verify_ucode(priv)) { + /* Runtime instruction load was bad; + * take it all the way back down so we can try again */ + IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n"); + goto restart; + } + + iwl_clear_stations_table(priv); + ret = priv->cfg->ops->lib->alive_notify(priv); + if (ret) { + IWL_WARN(priv, + "Could not complete ALIVE transition [ntf]: %d\n", ret); + goto restart; + } + + /* After the ALIVE response, we can send host commands to the uCode */ + set_bit(STATUS_ALIVE, &priv->status); + + if (iwl_is_rfkill(priv)) + return; + + ieee80211_wake_queues(priv->hw); + + priv->active_rate = priv->rates_mask; + priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK; + + /* Configure Tx antenna selection based on H/W config */ + if (priv->cfg->ops->hcmd->set_tx_ant) + priv->cfg->ops->hcmd->set_tx_ant(priv, priv->cfg->valid_tx_ant); + + if (iwl_is_associated(priv)) { + struct iwl_rxon_cmd *active_rxon = + (struct iwl_rxon_cmd *)&priv->active_rxon; + /* apply any changes in staging */ + priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; + active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; + } else { + /* Initialize our rx_config data */ + iwl_connection_init_rx_config(priv, priv->iw_mode); + + if (priv->cfg->ops->hcmd->set_rxon_chain) + priv->cfg->ops->hcmd->set_rxon_chain(priv); + + memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN); + } + + /* Configure Bluetooth device coexistence support */ + iwl_send_bt_config(priv); + + iwl_reset_run_time_calib(priv); + + /* Configure the adapter for unassociated operation */ + iwlcore_commit_rxon(priv); + + /* At this point, the NIC is initialized and operational */ + iwl_rf_kill_ct_config(priv); + + iwl_leds_init(priv); + + IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); + set_bit(STATUS_READY, &priv->status); + wake_up_interruptible(&priv->wait_command_queue); + + iwl_power_update_mode(priv, true); + + /* reassociate for ADHOC mode */ + if (priv->vif && (priv->iw_mode == NL80211_IFTYPE_ADHOC)) { + struct sk_buff *beacon = ieee80211_beacon_get(priv->hw, + priv->vif); + if (beacon) + iwl_mac_beacon_update(priv->hw, beacon); + } + + + if (test_and_clear_bit(STATUS_MODE_PENDING, &priv->status)) + iwl_set_mode(priv, priv->iw_mode); + + return; + + restart: + queue_work(priv->workqueue, &priv->restart); +} + +static void iwl_cancel_deferred_work(struct iwl_priv *priv); + +static void __iwl_down(struct iwl_priv *priv) +{ + unsigned long flags; + int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status); + + IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n"); + + if (!exit_pending) + set_bit(STATUS_EXIT_PENDING, &priv->status); + + iwl_clear_stations_table(priv); + + /* Unblock any waiting calls */ + wake_up_interruptible_all(&priv->wait_command_queue); + + /* Wipe out the EXIT_PENDING status bit if we are not actually + * exiting the module */ + if (!exit_pending) + clear_bit(STATUS_EXIT_PENDING, &priv->status); + + /* stop and reset the on-board processor */ + iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); + + /* tell the device to stop sending interrupts */ + spin_lock_irqsave(&priv->lock, flags); + iwl_disable_interrupts(priv); + spin_unlock_irqrestore(&priv->lock, flags); + iwl_synchronize_irq(priv); + + if (priv->mac80211_registered) + ieee80211_stop_queues(priv->hw); + + /* If we have not previously called iwl_init() then + * clear all bits but the RF Kill bit and return */ + if (!iwl_is_init(priv)) { + priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) << + STATUS_RF_KILL_HW | + test_bit(STATUS_GEO_CONFIGURED, &priv->status) << + STATUS_GEO_CONFIGURED | + test_bit(STATUS_EXIT_PENDING, &priv->status) << + STATUS_EXIT_PENDING; + goto exit; + } + + /* ...otherwise clear out all the status bits but the RF Kill + * bit and continue taking the NIC down. */ + priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) << + STATUS_RF_KILL_HW | + test_bit(STATUS_GEO_CONFIGURED, &priv->status) << + STATUS_GEO_CONFIGURED | + test_bit(STATUS_FW_ERROR, &priv->status) << + STATUS_FW_ERROR | + test_bit(STATUS_EXIT_PENDING, &priv->status) << + STATUS_EXIT_PENDING; + + /* device going down, Stop using ICT table */ + iwl_disable_ict(priv); + + iwl_txq_ctx_stop(priv); + iwl_rxq_stop(priv); + + /* Power-down device's busmaster DMA clocks */ + iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); + udelay(5); + + /* Make sure (redundant) we've released our request to stay awake */ + iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + + /* Stop the device, and put it in low power state */ + priv->cfg->ops->lib->apm_ops.stop(priv); + + exit: + memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); + + if (priv->ibss_beacon) + dev_kfree_skb(priv->ibss_beacon); + priv->ibss_beacon = NULL; + + /* clear out any free frames */ + iwl_clear_free_frames(priv); +} + +static void iwl_down(struct iwl_priv *priv) +{ + mutex_lock(&priv->mutex); + __iwl_down(priv); + mutex_unlock(&priv->mutex); + + iwl_cancel_deferred_work(priv); +} + +#define HW_READY_TIMEOUT (50) + +static int iwl_set_hw_ready(struct iwl_priv *priv) +{ + int ret = 0; + + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); + + /* See if we got it */ + ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, + CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, + HW_READY_TIMEOUT); + if (ret != -ETIMEDOUT) + priv->hw_ready = true; + else + priv->hw_ready = false; + + IWL_DEBUG_INFO(priv, "hardware %s\n", + (priv->hw_ready == 1) ? "ready" : "not ready"); + return ret; +} + +static int iwl_prepare_card_hw(struct iwl_priv *priv) +{ + int ret = 0; + + IWL_DEBUG_INFO(priv, "iwl_prepare_card_hw enter \n"); + + ret = iwl_set_hw_ready(priv); + if (priv->hw_ready) + return ret; + + /* If HW is not ready, prepare the conditions to check again */ + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_PREPARE); + + ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, + ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, + CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000); + + /* HW should be ready by now, check again. */ + if (ret != -ETIMEDOUT) + iwl_set_hw_ready(priv); + + return ret; +} + +#define MAX_HW_RESTARTS 5 + +static int __iwl_up(struct iwl_priv *priv) +{ + int i; + int ret; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { + IWL_WARN(priv, "Exit pending; will not bring the NIC up\n"); + return -EIO; + } + + if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) { + IWL_ERR(priv, "ucode not available for device bringup\n"); + return -EIO; + } + + iwl_prepare_card_hw(priv); + + if (!priv->hw_ready) { + IWL_WARN(priv, "Exit HW not ready\n"); + return -EIO; + } + + /* If platform's RF_KILL switch is NOT set to KILL */ + if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) + clear_bit(STATUS_RF_KILL_HW, &priv->status); + else + set_bit(STATUS_RF_KILL_HW, &priv->status); + + if (iwl_is_rfkill(priv)) { + wiphy_rfkill_set_hw_state(priv->hw->wiphy, true); + + iwl_enable_interrupts(priv); + IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n"); + return 0; + } + + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + + ret = iwl_hw_nic_init(priv); + if (ret) { + IWL_ERR(priv, "Unable to init nic\n"); + return ret; + } + + /* make sure rfkill handshake bits are cleared */ + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + + /* clear (again), then enable host interrupts */ + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + iwl_enable_interrupts(priv); + + /* really make sure rfkill handshake bits are cleared */ + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + + /* Copy original ucode data image from disk into backup cache. + * This will be used to initialize the on-board processor's + * data SRAM for a clean start when the runtime program first loads. */ + memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr, + priv->ucode_data.len); + + for (i = 0; i < MAX_HW_RESTARTS; i++) { + + iwl_clear_stations_table(priv); + + /* load bootstrap state machine, + * load bootstrap program into processor's memory, + * prepare to load the "initialize" uCode */ + ret = priv->cfg->ops->lib->load_ucode(priv); + + if (ret) { + IWL_ERR(priv, "Unable to set up bootstrap uCode: %d\n", + ret); + continue; + } + + /* start card; "initialize" will load runtime ucode */ + iwl_nic_start(priv); + + IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n"); + + return 0; + } + + set_bit(STATUS_EXIT_PENDING, &priv->status); + __iwl_down(priv); + clear_bit(STATUS_EXIT_PENDING, &priv->status); + + /* tried to restart and config the device for as long as our + * patience could withstand */ + IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i); + return -EIO; +} + + +/***************************************************************************** + * + * Workqueue callbacks + * + *****************************************************************************/ + +static void iwl_bg_init_alive_start(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, init_alive_start.work); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + priv->cfg->ops->lib->init_alive_start(priv); + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_alive_start(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, alive_start.work); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + /* enable dram interrupt */ + iwl_reset_ict(priv); + + mutex_lock(&priv->mutex); + iwl_alive_start(priv); + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_run_time_calib_work(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, + run_time_calib_work); + + mutex_lock(&priv->mutex); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status) || + test_bit(STATUS_SCANNING, &priv->status)) { + mutex_unlock(&priv->mutex); + return; + } + + if (priv->start_calib) { + iwl_chain_noise_calibration(priv, &priv->statistics); + + iwl_sensitivity_calibration(priv, &priv->statistics); + } + + mutex_unlock(&priv->mutex); + return; +} + +static void iwl_bg_restart(struct work_struct *data) +{ + struct iwl_priv *priv = container_of(data, struct iwl_priv, restart); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) { + mutex_lock(&priv->mutex); + priv->vif = NULL; + priv->is_open = 0; + mutex_unlock(&priv->mutex); + iwl_down(priv); + ieee80211_restart_hw(priv->hw); + } else { + iwl_down(priv); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + __iwl_up(priv); + mutex_unlock(&priv->mutex); + } +} + +static void iwl_bg_rx_replenish(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, rx_replenish); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + iwl_rx_replenish(priv); + mutex_unlock(&priv->mutex); +} + +#define IWL_DELAY_NEXT_SCAN (HZ*2) + +void iwl_post_associate(struct iwl_priv *priv) +{ + struct ieee80211_conf *conf = NULL; + int ret = 0; + unsigned long flags; + + if (priv->iw_mode == NL80211_IFTYPE_AP) { + IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__); + return; + } + + IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n", + priv->assoc_id, priv->active_rxon.bssid_addr); + + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + + if (!priv->vif || !priv->is_open) + return; + + iwl_scan_cancel_timeout(priv, 200); + + conf = ieee80211_get_hw_conf(priv->hw); + + priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + iwlcore_commit_rxon(priv); + + iwl_setup_rxon_timing(priv); + ret = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING, + sizeof(priv->rxon_timing), &priv->rxon_timing); + if (ret) + IWL_WARN(priv, "REPLY_RXON_TIMING failed - " + "Attempting to continue.\n"); + + priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; + + iwl_set_rxon_ht(priv, &priv->current_ht_config); + + if (priv->cfg->ops->hcmd->set_rxon_chain) + priv->cfg->ops->hcmd->set_rxon_chain(priv); + + priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id); + + IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n", + priv->assoc_id, priv->beacon_int); + + if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE) + priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; + else + priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; + + if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { + if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME) + priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; + else + priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + + if (priv->iw_mode == NL80211_IFTYPE_ADHOC) + priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + + } + + iwlcore_commit_rxon(priv); + + switch (priv->iw_mode) { + case NL80211_IFTYPE_STATION: + break; + + case NL80211_IFTYPE_ADHOC: + + /* assume default assoc id */ + priv->assoc_id = 1; + + iwl_rxon_add_station(priv, priv->bssid, 0); + iwl_send_beacon_cmd(priv); + + break; + + default: + IWL_ERR(priv, "%s Should not be called in %d mode\n", + __func__, priv->iw_mode); + break; + } + + if (priv->iw_mode == NL80211_IFTYPE_ADHOC) + priv->assoc_station_added = 1; + + spin_lock_irqsave(&priv->lock, flags); + iwl_activate_qos(priv, 0); + spin_unlock_irqrestore(&priv->lock, flags); + + /* the chain noise calibration will enabled PM upon completion + * If chain noise has already been run, then we need to enable + * power management here */ + if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE) + iwl_power_update_mode(priv, false); + + /* Enable Rx differential gain and sensitivity calibrations */ + iwl_chain_noise_reset(priv); + priv->start_calib = 1; + +} + +/***************************************************************************** + * + * mac80211 entry point functions + * + *****************************************************************************/ + +#define UCODE_READY_TIMEOUT (4 * HZ) + +/* + * Not a mac80211 entry point function, but it fits in with all the + * other mac80211 functions grouped here. + */ +static int iwl_mac_setup_register(struct iwl_priv *priv) +{ + int ret; + struct ieee80211_hw *hw = priv->hw; + hw->rate_control_algorithm = "iwl-agn-rs"; + + /* Tell mac80211 our characteristics */ + hw->flags = IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_NOISE_DBM | + IEEE80211_HW_AMPDU_AGGREGATION | + IEEE80211_HW_SPECTRUM_MGMT; + + if (!priv->cfg->broken_powersave) + hw->flags |= IEEE80211_HW_SUPPORTS_PS | + IEEE80211_HW_SUPPORTS_DYNAMIC_PS; + + if (priv->cfg->sku & IWL_SKU_N) + hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | + IEEE80211_HW_SUPPORTS_STATIC_SMPS; + + hw->sta_data_size = sizeof(struct iwl_station_priv); + hw->wiphy->interface_modes = + BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_ADHOC); + + hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY | + WIPHY_FLAG_DISABLE_BEACON_HINTS; + + /* + * For now, disable PS by default because it affects + * RX performance significantly. + */ + hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; + + hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX + 1; + /* we create the 802.11 header and a zero-length SSID element */ + hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2; + + /* Default value; 4 EDCA QOS priorities */ + hw->queues = 4; + + hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; + + if (priv->bands[IEEE80211_BAND_2GHZ].n_channels) + priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = + &priv->bands[IEEE80211_BAND_2GHZ]; + if (priv->bands[IEEE80211_BAND_5GHZ].n_channels) + priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = + &priv->bands[IEEE80211_BAND_5GHZ]; + + ret = ieee80211_register_hw(priv->hw); + if (ret) { + IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); + return ret; + } + priv->mac80211_registered = 1; + + return 0; +} + + +static int iwl_mac_start(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + int ret; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + /* we should be verifying the device is ready to be opened */ + mutex_lock(&priv->mutex); + ret = __iwl_up(priv); + mutex_unlock(&priv->mutex); + + if (ret) + return ret; + + if (iwl_is_rfkill(priv)) + goto out; + + IWL_DEBUG_INFO(priv, "Start UP work done.\n"); + + /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from + * mac80211 will not be run successfully. */ + ret = wait_event_interruptible_timeout(priv->wait_command_queue, + test_bit(STATUS_READY, &priv->status), + UCODE_READY_TIMEOUT); + if (!ret) { + if (!test_bit(STATUS_READY, &priv->status)) { + IWL_ERR(priv, "START_ALIVE timeout after %dms.\n", + jiffies_to_msecs(UCODE_READY_TIMEOUT)); + return -ETIMEDOUT; + } + } + + iwl_led_start(priv); + +out: + priv->is_open = 1; + IWL_DEBUG_MAC80211(priv, "leave\n"); + return 0; +} + +static void iwl_mac_stop(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (!priv->is_open) + return; + + priv->is_open = 0; + + if (iwl_is_ready_rf(priv) || test_bit(STATUS_SCAN_HW, &priv->status)) { + /* stop mac, cancel any scan request and clear + * RXON_FILTER_ASSOC_MSK BIT + */ + mutex_lock(&priv->mutex); + iwl_scan_cancel_timeout(priv, 100); + mutex_unlock(&priv->mutex); + } + + iwl_down(priv); + + flush_workqueue(priv->workqueue); + + /* enable interrupts again in order to receive rfkill changes */ + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + iwl_enable_interrupts(priv); + + IWL_DEBUG_MAC80211(priv, "leave\n"); +} + +static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MACDUMP(priv, "enter\n"); + + IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, + ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); + + if (iwl_tx_skb(priv, skb)) + dev_kfree_skb_any(skb); + + IWL_DEBUG_MACDUMP(priv, "leave\n"); + return NETDEV_TX_OK; +} + +void iwl_config_ap(struct iwl_priv *priv) +{ + int ret = 0; + unsigned long flags; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + /* The following should be done only at AP bring up */ + if (!iwl_is_associated(priv)) { + + /* RXON - unassoc (to set timing command) */ + priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + iwlcore_commit_rxon(priv); + + /* RXON Timing */ + iwl_setup_rxon_timing(priv); + ret = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING, + sizeof(priv->rxon_timing), &priv->rxon_timing); + if (ret) + IWL_WARN(priv, "REPLY_RXON_TIMING failed - " + "Attempting to continue.\n"); + + /* AP has all antennas */ + priv->chain_noise_data.active_chains = + priv->hw_params.valid_rx_ant; + iwl_set_rxon_ht(priv, &priv->current_ht_config); + if (priv->cfg->ops->hcmd->set_rxon_chain) + priv->cfg->ops->hcmd->set_rxon_chain(priv); + + /* FIXME: what should be the assoc_id for AP? */ + priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id); + if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE) + priv->staging_rxon.flags |= + RXON_FLG_SHORT_PREAMBLE_MSK; + else + priv->staging_rxon.flags &= + ~RXON_FLG_SHORT_PREAMBLE_MSK; + + if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { + if (priv->assoc_capability & + WLAN_CAPABILITY_SHORT_SLOT_TIME) + priv->staging_rxon.flags |= + RXON_FLG_SHORT_SLOT_MSK; + else + priv->staging_rxon.flags &= + ~RXON_FLG_SHORT_SLOT_MSK; + + if (priv->iw_mode == NL80211_IFTYPE_ADHOC) + priv->staging_rxon.flags &= + ~RXON_FLG_SHORT_SLOT_MSK; + } + /* restore RXON assoc */ + priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; + iwlcore_commit_rxon(priv); + iwl_reset_qos(priv); + spin_lock_irqsave(&priv->lock, flags); + iwl_activate_qos(priv, 1); + spin_unlock_irqrestore(&priv->lock, flags); + iwl_add_bcast_station(priv); + } + iwl_send_beacon_cmd(priv); + + /* FIXME - we need to add code here to detect a totally new + * configuration, reset the AP, unassoc, rxon timing, assoc, + * clear sta table, add BCAST sta... */ +} + +static void iwl_mac_update_tkip_key(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta, + u32 iv32, u16 *phase1key) +{ + + struct iwl_priv *priv = hw->priv; + IWL_DEBUG_MAC80211(priv, "enter\n"); + + iwl_update_tkip_key(priv, keyconf, + sta ? sta->addr : iwl_bcast_addr, + iv32, phase1key); + + IWL_DEBUG_MAC80211(priv, "leave\n"); +} + +static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct iwl_priv *priv = hw->priv; + const u8 *addr; + int ret; + u8 sta_id; + bool is_default_wep_key = false; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (priv->cfg->mod_params->sw_crypto) { + IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n"); + return -EOPNOTSUPP; + } + addr = sta ? sta->addr : iwl_bcast_addr; + sta_id = iwl_find_station(priv, addr); + if (sta_id == IWL_INVALID_STATION) { + IWL_DEBUG_MAC80211(priv, "leave - %pM not in station map.\n", + addr); + return -EINVAL; + + } + + mutex_lock(&priv->mutex); + iwl_scan_cancel_timeout(priv, 100); + mutex_unlock(&priv->mutex); + + /* If we are getting WEP group key and we didn't receive any key mapping + * so far, we are in legacy wep mode (group key only), otherwise we are + * in 1X mode. + * In legacy wep mode, we use another host command to the uCode */ + if (key->alg == ALG_WEP && sta_id == priv->hw_params.bcast_sta_id && + priv->iw_mode != NL80211_IFTYPE_AP) { + if (cmd == SET_KEY) + is_default_wep_key = !priv->key_mapping_key; + else + is_default_wep_key = + (key->hw_key_idx == HW_KEY_DEFAULT); + } + + switch (cmd) { + case SET_KEY: + if (is_default_wep_key) + ret = iwl_set_default_wep_key(priv, key); + else + ret = iwl_set_dynamic_key(priv, key, sta_id); + + IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n"); + break; + case DISABLE_KEY: + if (is_default_wep_key) + ret = iwl_remove_default_wep_key(priv, key); + else + ret = iwl_remove_dynamic_key(priv, key, sta_id); + + IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n"); + break; + default: + ret = -EINVAL; + } + + IWL_DEBUG_MAC80211(priv, "leave\n"); + + return ret; +} + +static int iwl_mac_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, u16 *ssn) +{ + struct iwl_priv *priv = hw->priv; + int ret; + + IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n", + sta->addr, tid); + + if (!(priv->cfg->sku & IWL_SKU_N)) + return -EACCES; + + switch (action) { + case IEEE80211_AMPDU_RX_START: + IWL_DEBUG_HT(priv, "start Rx\n"); + return iwl_sta_rx_agg_start(priv, sta->addr, tid, *ssn); + case IEEE80211_AMPDU_RX_STOP: + IWL_DEBUG_HT(priv, "stop Rx\n"); + ret = iwl_sta_rx_agg_stop(priv, sta->addr, tid); + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return 0; + else + return ret; + case IEEE80211_AMPDU_TX_START: + IWL_DEBUG_HT(priv, "start Tx\n"); + return iwl_tx_agg_start(priv, sta->addr, tid, ssn); + case IEEE80211_AMPDU_TX_STOP: + IWL_DEBUG_HT(priv, "stop Tx\n"); + ret = iwl_tx_agg_stop(priv, sta->addr, tid); + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return 0; + else + return ret; + case IEEE80211_AMPDU_TX_OPERATIONAL: + /* do nothing */ + return -EOPNOTSUPP; + default: + IWL_DEBUG_HT(priv, "unknown\n"); + return -EINVAL; + break; + } + return 0; +} + +static int iwl_mac_get_stats(struct ieee80211_hw *hw, + struct ieee80211_low_level_stats *stats) +{ + struct iwl_priv *priv = hw->priv; + + priv = hw->priv; + IWL_DEBUG_MAC80211(priv, "enter\n"); + IWL_DEBUG_MAC80211(priv, "leave\n"); + + return 0; +} + +static void iwl_mac_sta_notify(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum sta_notify_cmd cmd, + struct ieee80211_sta *sta) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; + int sta_id; + + /* + * TODO: We really should use this callback to + * actually maintain the station table in + * the device. + */ + + switch (cmd) { + case STA_NOTIFY_ADD: + atomic_set(&sta_priv->pending_frames, 0); + if (vif->type == NL80211_IFTYPE_AP) + sta_priv->client = true; + break; + case STA_NOTIFY_SLEEP: + WARN_ON(!sta_priv->client); + sta_priv->asleep = true; + if (atomic_read(&sta_priv->pending_frames) > 0) + ieee80211_sta_block_awake(hw, sta, true); + break; + case STA_NOTIFY_AWAKE: + WARN_ON(!sta_priv->client); + if (!sta_priv->asleep) + break; + sta_priv->asleep = false; + sta_id = iwl_find_station(priv, sta->addr); + if (sta_id != IWL_INVALID_STATION) + iwl_sta_modify_ps_wake(priv, sta_id); + break; + default: + break; + } +} + +/***************************************************************************** + * + * sysfs attributes + * + *****************************************************************************/ + +#ifdef CONFIG_IWLWIFI_DEBUG + +/* + * The following adds a new attribute to the sysfs representation + * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/) + * used for controlling the debug level. + * + * See the level definitions in iwl for details. + * + * The debug_level being managed using sysfs below is a per device debug + * level that is used instead of the global debug level if it (the per + * device debug level) is set. + */ +static ssize_t show_debug_level(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + return sprintf(buf, "0x%08X\n", iwl_get_debug_level(priv)); +} +static ssize_t store_debug_level(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + unsigned long val; + int ret; + + ret = strict_strtoul(buf, 0, &val); + if (ret) + IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf); + else { + priv->debug_level = val; + if (iwl_alloc_traffic_mem(priv)) + IWL_ERR(priv, + "Not enough memory to generate traffic log\n"); + } + return strnlen(buf, count); +} + +static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, + show_debug_level, store_debug_level); + + +#endif /* CONFIG_IWLWIFI_DEBUG */ + + +static ssize_t show_temperature(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + + if (!iwl_is_alive(priv)) + return -EAGAIN; + + return sprintf(buf, "%d\n", priv->temperature); +} + +static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL); + +static ssize_t show_tx_power(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + + if (!iwl_is_ready_rf(priv)) + return sprintf(buf, "off\n"); + else + return sprintf(buf, "%d\n", priv->tx_power_user_lmt); +} + +static ssize_t store_tx_power(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + unsigned long val; + int ret; + + ret = strict_strtoul(buf, 10, &val); + if (ret) + IWL_INFO(priv, "%s is not in decimal form.\n", buf); + else { + ret = iwl_set_tx_power(priv, val, false); + if (ret) + IWL_ERR(priv, "failed setting tx power (0x%d).\n", + ret); + else + ret = count; + } + return ret; +} + +static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power); + +static ssize_t show_flags(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + + return sprintf(buf, "0x%04X\n", priv->active_rxon.flags); +} + +static ssize_t store_flags(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + unsigned long val; + u32 flags; + int ret = strict_strtoul(buf, 0, &val); + if (ret) + return ret; + flags = (u32)val; + + mutex_lock(&priv->mutex); + if (le32_to_cpu(priv->staging_rxon.flags) != flags) { + /* Cancel any currently running scans... */ + if (iwl_scan_cancel_timeout(priv, 100)) + IWL_WARN(priv, "Could not cancel scan.\n"); + else { + IWL_DEBUG_INFO(priv, "Commit rxon.flags = 0x%04X\n", flags); + priv->staging_rxon.flags = cpu_to_le32(flags); + iwlcore_commit_rxon(priv); + } + } + mutex_unlock(&priv->mutex); + + return count; +} + +static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags); + +static ssize_t show_filter_flags(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + + return sprintf(buf, "0x%04X\n", + le32_to_cpu(priv->active_rxon.filter_flags)); +} + +static ssize_t store_filter_flags(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + unsigned long val; + u32 filter_flags; + int ret = strict_strtoul(buf, 0, &val); + if (ret) + return ret; + filter_flags = (u32)val; + + mutex_lock(&priv->mutex); + if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) { + /* Cancel any currently running scans... */ + if (iwl_scan_cancel_timeout(priv, 100)) + IWL_WARN(priv, "Could not cancel scan.\n"); + else { + IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = " + "0x%04X\n", filter_flags); + priv->staging_rxon.filter_flags = + cpu_to_le32(filter_flags); + iwlcore_commit_rxon(priv); + } + } + mutex_unlock(&priv->mutex); + + return count; +} + +static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags, + store_filter_flags); + + +static ssize_t show_statistics(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + u32 size = sizeof(struct iwl_notif_statistics); + u32 len = 0, ofs = 0; + u8 *data = (u8 *)&priv->statistics; + int rc = 0; + + if (!iwl_is_alive(priv)) + return -EAGAIN; + + mutex_lock(&priv->mutex); + rc = iwl_send_statistics_request(priv, CMD_SYNC, false); + mutex_unlock(&priv->mutex); + + if (rc) { + len = sprintf(buf, + "Error sending statistics request: 0x%08X\n", rc); + return len; + } + + while (size && (PAGE_SIZE - len)) { + hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len, + PAGE_SIZE - len, 1); + len = strlen(buf); + if (PAGE_SIZE - len) + buf[len++] = '\n'; + + ofs += 16; + size -= min(size, 16U); + } + + return len; +} + +static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL); + +static ssize_t show_rts_ht_protection(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + + return sprintf(buf, "%s\n", + priv->cfg->use_rts_for_ht ? "RTS/CTS" : "CTS-to-self"); +} + +static ssize_t store_rts_ht_protection(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + unsigned long val; + int ret; + + ret = strict_strtoul(buf, 10, &val); + if (ret) + IWL_INFO(priv, "Input is not in decimal form.\n"); + else { + if (!iwl_is_associated(priv)) + priv->cfg->use_rts_for_ht = val ? true : false; + else + IWL_ERR(priv, "Sta associated with AP - " + "Change protection mechanism is not allowed\n"); + ret = count; + } + return ret; +} + +static DEVICE_ATTR(rts_ht_protection, S_IWUSR | S_IRUGO, + show_rts_ht_protection, store_rts_ht_protection); + + +/***************************************************************************** + * + * driver setup and teardown + * + *****************************************************************************/ + +static void iwl_setup_deferred_work(struct iwl_priv *priv) +{ + priv->workqueue = create_singlethread_workqueue(DRV_NAME); + + init_waitqueue_head(&priv->wait_command_queue); + + INIT_WORK(&priv->restart, iwl_bg_restart); + INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish); + INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update); + INIT_WORK(&priv->run_time_calib_work, iwl_bg_run_time_calib_work); + INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start); + INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start); + + iwl_setup_scan_deferred_work(priv); + + if (priv->cfg->ops->lib->setup_deferred_work) + priv->cfg->ops->lib->setup_deferred_work(priv); + + init_timer(&priv->statistics_periodic); + priv->statistics_periodic.data = (unsigned long)priv; + priv->statistics_periodic.function = iwl_bg_statistics_periodic; + + init_timer(&priv->ucode_trace); + priv->ucode_trace.data = (unsigned long)priv; + priv->ucode_trace.function = iwl_bg_ucode_trace; + + if (!priv->cfg->use_isr_legacy) + tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) + iwl_irq_tasklet, (unsigned long)priv); + else + tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) + iwl_irq_tasklet_legacy, (unsigned long)priv); +} + +static void iwl_cancel_deferred_work(struct iwl_priv *priv) +{ + if (priv->cfg->ops->lib->cancel_deferred_work) + priv->cfg->ops->lib->cancel_deferred_work(priv); + + cancel_delayed_work_sync(&priv->init_alive_start); + cancel_delayed_work(&priv->scan_check); + cancel_delayed_work(&priv->alive_start); + cancel_work_sync(&priv->beacon_update); + del_timer_sync(&priv->statistics_periodic); + del_timer_sync(&priv->ucode_trace); +} + +static void iwl_init_hw_rates(struct iwl_priv *priv, + struct ieee80211_rate *rates) +{ + int i; + + for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) { + rates[i].bitrate = iwl_rates[i].ieee * 5; + rates[i].hw_value = i; /* Rate scaling will work on indexes */ + rates[i].hw_value_short = i; + rates[i].flags = 0; + if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) { + /* + * If CCK != 1M then set short preamble rate flag. + */ + rates[i].flags |= + (iwl_rates[i].plcp == IWL_RATE_1M_PLCP) ? + 0 : IEEE80211_RATE_SHORT_PREAMBLE; + } + } +} + +static int iwl_init_drv(struct iwl_priv *priv) +{ + int ret; + + priv->ibss_beacon = NULL; + + spin_lock_init(&priv->sta_lock); + spin_lock_init(&priv->hcmd_lock); + + INIT_LIST_HEAD(&priv->free_frames); + + mutex_init(&priv->mutex); + mutex_init(&priv->sync_cmd_mutex); + + /* Clear the driver's (not device's) station table */ + iwl_clear_stations_table(priv); + + priv->ieee_channels = NULL; + priv->ieee_rates = NULL; + priv->band = IEEE80211_BAND_2GHZ; + + priv->iw_mode = NL80211_IFTYPE_STATION; + priv->current_ht_config.smps = IEEE80211_SMPS_STATIC; + priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF; + + /* initialize force reset */ + priv->force_reset[IWL_RF_RESET].reset_duration = + IWL_DELAY_NEXT_FORCE_RF_RESET; + priv->force_reset[IWL_FW_RESET].reset_duration = + IWL_DELAY_NEXT_FORCE_FW_RELOAD; + + /* Choose which receivers/antennas to use */ + if (priv->cfg->ops->hcmd->set_rxon_chain) + priv->cfg->ops->hcmd->set_rxon_chain(priv); + + iwl_init_scan_params(priv); + + iwl_reset_qos(priv); + + priv->qos_data.qos_active = 0; + priv->qos_data.qos_cap.val = 0; + + priv->rates_mask = IWL_RATES_MASK; + /* Set the tx_power_user_lmt to the lowest power level + * this value will get overwritten by channel max power avg + * from eeprom */ + priv->tx_power_user_lmt = IWL_TX_POWER_TARGET_POWER_MIN; + + ret = iwl_init_channel_map(priv); + if (ret) { + IWL_ERR(priv, "initializing regulatory failed: %d\n", ret); + goto err; + } + + ret = iwlcore_init_geos(priv); + if (ret) { + IWL_ERR(priv, "initializing geos failed: %d\n", ret); + goto err_free_channel_map; + } + iwl_init_hw_rates(priv, priv->ieee_rates); + + return 0; + +err_free_channel_map: + iwl_free_channel_map(priv); +err: + return ret; +} + +static void iwl_uninit_drv(struct iwl_priv *priv) +{ + iwl_calib_free_results(priv); + iwlcore_free_geos(priv); + iwl_free_channel_map(priv); + kfree(priv->scan); +} + +static struct attribute *iwl_sysfs_entries[] = { + &dev_attr_flags.attr, + &dev_attr_filter_flags.attr, + &dev_attr_statistics.attr, + &dev_attr_temperature.attr, + &dev_attr_tx_power.attr, + &dev_attr_rts_ht_protection.attr, +#ifdef CONFIG_IWLWIFI_DEBUG + &dev_attr_debug_level.attr, +#endif + NULL +}; + +static struct attribute_group iwl_attribute_group = { + .name = NULL, /* put in device directory */ + .attrs = iwl_sysfs_entries, +}; + +static struct ieee80211_ops iwl_hw_ops = { + .tx = iwl_mac_tx, + .start = iwl_mac_start, + .stop = iwl_mac_stop, + .add_interface = iwl_mac_add_interface, + .remove_interface = iwl_mac_remove_interface, + .config = iwl_mac_config, + .configure_filter = iwl_configure_filter, + .set_key = iwl_mac_set_key, + .update_tkip_key = iwl_mac_update_tkip_key, + .get_stats = iwl_mac_get_stats, + .conf_tx = iwl_mac_conf_tx, + .reset_tsf = iwl_mac_reset_tsf, + .bss_info_changed = iwl_bss_info_changed, + .ampdu_action = iwl_mac_ampdu_action, + .hw_scan = iwl_mac_hw_scan, + .sta_notify = iwl_mac_sta_notify, +}; + +static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + int err = 0; + struct iwl_priv *priv; + struct ieee80211_hw *hw; + struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); + unsigned long flags; + u16 pci_cmd; + + /************************ + * 1. Allocating HW data + ************************/ + + /* Disabling hardware scan means that mac80211 will perform scans + * "the hard way", rather than using device's scan. */ + if (cfg->mod_params->disable_hw_scan) { + if (iwl_debug_level & IWL_DL_INFO) + dev_printk(KERN_DEBUG, &(pdev->dev), + "Disabling hw_scan\n"); + iwl_hw_ops.hw_scan = NULL; + } + + hw = iwl_alloc_all(cfg, &iwl_hw_ops); + if (!hw) { + err = -ENOMEM; + goto out; + } + priv = hw->priv; + /* At this point both hw and priv are allocated. */ + + SET_IEEE80211_DEV(hw, &pdev->dev); + + IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n"); + priv->cfg = cfg; + priv->pci_dev = pdev; + priv->inta_mask = CSR_INI_SET_MASK; + +#ifdef CONFIG_IWLWIFI_DEBUG + atomic_set(&priv->restrict_refcnt, 0); +#endif + if (iwl_alloc_traffic_mem(priv)) + IWL_ERR(priv, "Not enough memory to generate traffic log\n"); + + /************************** + * 2. Initializing PCI bus + **************************/ + if (pci_enable_device(pdev)) { + err = -ENODEV; + goto out_ieee80211_free_hw; + } + + pci_set_master(pdev); + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); + if (!err) + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); + if (err) { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (!err) + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + /* both attempts failed: */ + if (err) { + IWL_WARN(priv, "No suitable DMA available.\n"); + goto out_pci_disable_device; + } + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) + goto out_pci_disable_device; + + pci_set_drvdata(pdev, priv); + + + /*********************** + * 3. Read REV register + ***********************/ + priv->hw_base = pci_iomap(pdev, 0, 0); + if (!priv->hw_base) { + err = -ENODEV; + goto out_pci_release_regions; + } + + IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n", + (unsigned long long) pci_resource_len(pdev, 0)); + IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base); + + /* these spin locks will be used in apm_ops.init and EEPROM access + * we should init now + */ + spin_lock_init(&priv->reg_lock); + spin_lock_init(&priv->lock); + + /* + * stop and reset the on-board processor just in case it is in a + * strange state ... like being left stranded by a primary kernel + * and this is now the kdump kernel trying to start up + */ + iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); + + iwl_hw_detect(priv); + IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s REV=0x%X\n", + priv->cfg->name, priv->hw_rev); + + /* We disable the RETRY_TIMEOUT register (0x41) to keep + * PCI Tx retries from interfering with C3 CPU state */ + pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); + + iwl_prepare_card_hw(priv); + if (!priv->hw_ready) { + IWL_WARN(priv, "Failed, HW not ready\n"); + goto out_iounmap; + } + + /***************** + * 4. Read EEPROM + *****************/ + /* Read the EEPROM */ + err = iwl_eeprom_init(priv); + if (err) { + IWL_ERR(priv, "Unable to init EEPROM\n"); + goto out_iounmap; + } + err = iwl_eeprom_check_version(priv); + if (err) + goto out_free_eeprom; + + /* extract MAC Address */ + iwl_eeprom_get_mac(priv, priv->mac_addr); + IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->mac_addr); + SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr); + + /************************ + * 5. Setup HW constants + ************************/ + if (iwl_set_hw_params(priv)) { + IWL_ERR(priv, "failed to set hw parameters\n"); + goto out_free_eeprom; + } + + /******************* + * 6. Setup priv + *******************/ + + err = iwl_init_drv(priv); + if (err) + goto out_free_eeprom; + /* At this point both hw and priv are initialized. */ + + /******************** + * 7. Setup services + ********************/ + spin_lock_irqsave(&priv->lock, flags); + iwl_disable_interrupts(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + pci_enable_msi(priv->pci_dev); + + iwl_alloc_isr_ict(priv); + err = request_irq(priv->pci_dev->irq, priv->cfg->ops->lib->isr, + IRQF_SHARED, DRV_NAME, priv); + if (err) { + IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq); + goto out_disable_msi; + } + err = sysfs_create_group(&pdev->dev.kobj, &iwl_attribute_group); + if (err) { + IWL_ERR(priv, "failed to create sysfs device attributes\n"); + goto out_free_irq; + } + + iwl_setup_deferred_work(priv); + iwl_setup_rx_handlers(priv); + + /********************************************* + * 8. Enable interrupts and read RFKILL state + *********************************************/ + + /* enable interrupts if needed: hw bug w/a */ + pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd); + if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { + pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; + pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd); + } + + iwl_enable_interrupts(priv); + + /* If platform's RF_KILL switch is NOT set to KILL */ + if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) + clear_bit(STATUS_RF_KILL_HW, &priv->status); + else + set_bit(STATUS_RF_KILL_HW, &priv->status); + + wiphy_rfkill_set_hw_state(priv->hw->wiphy, + test_bit(STATUS_RF_KILL_HW, &priv->status)); + + iwl_power_initialize(priv); + iwl_tt_initialize(priv); + + err = iwl_request_firmware(priv, true); + if (err) + goto out_remove_sysfs; + + return 0; + + out_remove_sysfs: + destroy_workqueue(priv->workqueue); + priv->workqueue = NULL; + sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group); + out_free_irq: + free_irq(priv->pci_dev->irq, priv); + iwl_free_isr_ict(priv); + out_disable_msi: + pci_disable_msi(priv->pci_dev); + iwl_uninit_drv(priv); + out_free_eeprom: + iwl_eeprom_free(priv); + out_iounmap: + pci_iounmap(pdev, priv->hw_base); + out_pci_release_regions: + pci_set_drvdata(pdev, NULL); + pci_release_regions(pdev); + out_pci_disable_device: + pci_disable_device(pdev); + out_ieee80211_free_hw: + iwl_free_traffic_mem(priv); + ieee80211_free_hw(priv->hw); + out: + return err; +} + +static void __devexit iwl_pci_remove(struct pci_dev *pdev) +{ + struct iwl_priv *priv = pci_get_drvdata(pdev); + unsigned long flags; + + if (!priv) + return; + + IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n"); + + iwl_dbgfs_unregister(priv); + sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group); + + /* ieee80211_unregister_hw call wil cause iwl_mac_stop to + * to be called and iwl_down since we are removing the device + * we need to set STATUS_EXIT_PENDING bit. + */ + set_bit(STATUS_EXIT_PENDING, &priv->status); + if (priv->mac80211_registered) { + ieee80211_unregister_hw(priv->hw); + priv->mac80211_registered = 0; + } else { + iwl_down(priv); + } + + /* + * Make sure device is reset to low power before unloading driver. + * This may be redundant with iwl_down(), but there are paths to + * run iwl_down() without calling apm_ops.stop(), and there are + * paths to avoid running iwl_down() at all before leaving driver. + * This (inexpensive) call *makes sure* device is reset. + */ + priv->cfg->ops->lib->apm_ops.stop(priv); + + iwl_tt_exit(priv); + + /* make sure we flush any pending irq or + * tasklet for the driver + */ + spin_lock_irqsave(&priv->lock, flags); + iwl_disable_interrupts(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + iwl_synchronize_irq(priv); + + iwl_dealloc_ucode_pci(priv); + + if (priv->rxq.bd) + iwl_rx_queue_free(priv, &priv->rxq); + iwl_hw_txq_ctx_free(priv); + + iwl_clear_stations_table(priv); + iwl_eeprom_free(priv); + + + /*netif_stop_queue(dev); */ + flush_workqueue(priv->workqueue); + + /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes + * priv->workqueue... so we can't take down the workqueue + * until now... */ + destroy_workqueue(priv->workqueue); + priv->workqueue = NULL; + iwl_free_traffic_mem(priv); + + free_irq(priv->pci_dev->irq, priv); + pci_disable_msi(priv->pci_dev); + pci_iounmap(pdev, priv->hw_base); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + + iwl_uninit_drv(priv); + + iwl_free_isr_ict(priv); + + if (priv->ibss_beacon) + dev_kfree_skb(priv->ibss_beacon); + + ieee80211_free_hw(priv->hw); +} + + +/***************************************************************************** + * + * driver and module entry point + * + *****************************************************************************/ + +/* Hardware specific file defines the PCI IDs table for that hardware module */ +static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { +#ifdef CONFIG_COMPAT_IWL4965 + {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)}, + {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)}, +#endif /* CONFIG_COMPAT_IWL4965 */ +#ifdef CONFIG_IWL5000 +/* 5100 Series WiFi */ + {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1304, iwl5100_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1205, iwl5100_bgn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1305, iwl5100_bgn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1206, iwl5100_abg_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1306, iwl5100_abg_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1221, iwl5100_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1321, iwl5100_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1224, iwl5100_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1324, iwl5100_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1225, iwl5100_bgn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1325, iwl5100_bgn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1226, iwl5100_abg_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1326, iwl5100_abg_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1211, iwl5100_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1311, iwl5100_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1214, iwl5100_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1314, iwl5100_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1215, iwl5100_bgn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1315, iwl5100_bgn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1216, iwl5100_abg_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1316, iwl5100_abg_cfg)}, /* Half Mini Card */ + +/* 5300 Series WiFi */ + {IWL_PCI_DEVICE(0x4235, 0x1021, iwl5300_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1121, iwl5300_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1024, iwl5300_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1124, iwl5300_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1001, iwl5300_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1101, iwl5300_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1004, iwl5300_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1104, iwl5300_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4236, 0x1011, iwl5300_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4236, 0x1111, iwl5300_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4236, 0x1014, iwl5300_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4236, 0x1114, iwl5300_agn_cfg)}, /* Half Mini Card */ + +/* 5350 Series WiFi/WiMax */ + {IWL_PCI_DEVICE(0x423A, 0x1001, iwl5350_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423A, 0x1021, iwl5350_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423B, 0x1011, iwl5350_agn_cfg)}, /* Mini Card */ + +/* 5150 Series Wifi/WiMax */ + {IWL_PCI_DEVICE(0x423C, 0x1201, iwl5150_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423C, 0x1301, iwl5150_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x423C, 0x1206, iwl5150_abg_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */ + + {IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x423D, 0x1216, iwl5150_abg_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423D, 0x1316, iwl5150_abg_cfg)}, /* Half Mini Card */ + +/* 6x00 Series */ + {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)}, + {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)}, + {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)}, + {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)}, + {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)}, + {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)}, + {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)}, + {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)}, + {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)}, + {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)}, + +/* 6x50 WiFi/WiMax Series */ + {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0087, 0x1306, iwl6050_2abg_cfg)}, + {IWL_PCI_DEVICE(0x0087, 0x1321, iwl6050_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0087, 0x1326, iwl6050_2abg_cfg)}, + {IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_2abg_cfg)}, + +/* 1000 Series WiFi */ + {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1305, iwl1000_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1225, iwl1000_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1325, iwl1000_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0084, 0x1215, iwl1000_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0084, 0x1315, iwl1000_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1206, iwl1000_bg_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1306, iwl1000_bg_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1226, iwl1000_bg_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1326, iwl1000_bg_cfg)}, + {IWL_PCI_DEVICE(0x0084, 0x1216, iwl1000_bg_cfg)}, + {IWL_PCI_DEVICE(0x0084, 0x1316, iwl1000_bg_cfg)}, +#endif /* CONFIG_IWL5000 */ + + {0} +}; +MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); + +static struct pci_driver iwl_driver = { + .name = DRV_NAME, + .id_table = iwl_hw_card_ids, + .probe = iwl_pci_probe, + .remove = __devexit_p(iwl_pci_remove), +#ifdef CONFIG_PM + .suspend = iwl_pci_suspend, + .resume = iwl_pci_resume, +#endif +}; + +static int __init iwl_init(void) +{ + + int ret; + printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n"); + printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n"); + + ret = iwlagn_rate_control_register(); + if (ret) { + printk(KERN_ERR DRV_NAME + "Unable to register rate control algorithm: %d\n", ret); + return ret; + } + + ret = pci_register_driver(&iwl_driver); + if (ret) { + printk(KERN_ERR DRV_NAME "Unable to initialize PCI module\n"); + goto error_register; + } + + return ret; + +error_register: + iwlagn_rate_control_unregister(); + return ret; +} + +static void __exit iwl_exit(void) +{ + pci_unregister_driver(&iwl_driver); + iwlagn_rate_control_unregister(); +} + +module_exit(iwl_exit); +module_init(iwl_init); + +#ifdef CONFIG_IWLWIFI_DEBUG +module_param_named(debug50, iwl_debug_level, uint, S_IRUGO); +MODULE_PARM_DESC(debug50, "50XX debug output mask (deprecated)"); +module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug, "debug output mask"); +#endif + diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c.orig b/drivers/net/wireless/iwlwifi/iwl-agn.c.orig new file mode 100644 index 0000000..47b0214 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c.orig @@ -0,0 +1,3943 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#define DRV_NAME "iwlagn" + +#include "iwl-eeprom.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-io.h" +#include "iwl-helpers.h" +#include "iwl-sta.h" +#include "iwl-calib.h" + + +/****************************************************************************** + * + * module boiler plate + * + ******************************************************************************/ + +/* + * module name, copyright, version, etc. + */ +#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link AGN driver for Linux" + +#ifdef CONFIG_IWLWIFI_DEBUG +#define VD "d" +#else +#define VD +#endif + +#define DRV_VERSION IWLWIFI_VERSION VD + + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_VERSION(DRV_VERSION); +MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("iwl4965"); + +/*************** STATION TABLE MANAGEMENT **** + * mac80211 should be examined to determine if sta_info is duplicating + * the functionality provided here + */ + +/**************************************************************/ + +/** + * iwl_commit_rxon - commit staging_rxon to hardware + * + * The RXON command in staging_rxon is committed to the hardware and + * the active_rxon structure is updated with the new data. This + * function correctly transitions out of the RXON_ASSOC_MSK state if + * a HW tune is required based on the RXON structure changes. + */ +int iwl_commit_rxon(struct iwl_priv *priv) +{ + /* cast away the const for active_rxon in this function */ + struct iwl_rxon_cmd *active_rxon = (void *)&priv->active_rxon; + int ret; + bool new_assoc = + !!(priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK); + + if (!iwl_is_alive(priv)) + return -EBUSY; + + /* always get timestamp with Rx frame */ + priv->staging_rxon.flags |= RXON_FLG_TSF2HOST_MSK; + + ret = iwl_check_rxon_cmd(priv); + if (ret) { + IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n"); + return -EINVAL; + } + + /* + * receive commit_rxon request + * abort any previous channel switch if still in process + */ + if (priv->switch_rxon.switch_in_progress && + (priv->switch_rxon.channel != priv->staging_rxon.channel)) { + IWL_DEBUG_11H(priv, "abort channel switch on %d\n", + le16_to_cpu(priv->switch_rxon.channel)); + priv->switch_rxon.switch_in_progress = false; + } + + /* If we don't need to send a full RXON, we can use + * iwl_rxon_assoc_cmd which is used to reconfigure filter + * and other flags for the current radio configuration. */ + if (!iwl_full_rxon_required(priv)) { + ret = iwl_send_rxon_assoc(priv); + if (ret) { + IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret); + return ret; + } + + memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); + iwl_print_rx_config_cmd(priv); + return 0; + } + + /* station table will be cleared */ + priv->assoc_station_added = 0; + + /* If we are currently associated and the new config requires + * an RXON_ASSOC and the new config wants the associated mask enabled, + * we must clear the associated from the active configuration + * before we apply the new config */ + if (iwl_is_associated(priv) && new_assoc) { + IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n"); + active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; + + ret = iwl_send_cmd_pdu(priv, REPLY_RXON, + sizeof(struct iwl_rxon_cmd), + &priv->active_rxon); + + /* If the mask clearing failed then we set + * active_rxon back to what it was previously */ + if (ret) { + active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK; + IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret); + return ret; + } + } + + IWL_DEBUG_INFO(priv, "Sending RXON\n" + "* with%s RXON_FILTER_ASSOC_MSK\n" + "* channel = %d\n" + "* bssid = %pM\n", + (new_assoc ? "" : "out"), + le16_to_cpu(priv->staging_rxon.channel), + priv->staging_rxon.bssid_addr); + + iwl_set_rxon_hwcrypto(priv, !priv->cfg->mod_params->sw_crypto); + + /* Apply the new configuration + * RXON unassoc clears the station table in uCode, send it before + * we add the bcast station. If assoc bit is set, we will send RXON + * after having added the bcast and bssid station. + */ + if (!new_assoc) { + ret = iwl_send_cmd_pdu(priv, REPLY_RXON, + sizeof(struct iwl_rxon_cmd), &priv->staging_rxon); + if (ret) { + IWL_ERR(priv, "Error setting new RXON (%d)\n", ret); + return ret; + } + memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); + } + + iwl_clear_stations_table(priv); + + priv->start_calib = 0; + + /* Add the broadcast address so we can send broadcast frames */ + priv->cfg->ops->lib->add_bcast_station(priv); + + + /* If we have set the ASSOC_MSK and we are in BSS mode then + * add the IWL_AP_ID to the station rate table */ + if (new_assoc) { + if (priv->iw_mode == NL80211_IFTYPE_STATION) { + ret = iwl_rxon_add_station(priv, + priv->active_rxon.bssid_addr, 1); + if (ret == IWL_INVALID_STATION) { + IWL_ERR(priv, + "Error adding AP address for TX.\n"); + return -EIO; + } + priv->assoc_station_added = 1; + if (priv->default_wep_key && + iwl_send_static_wepkey_cmd(priv, 0)) + IWL_ERR(priv, + "Could not send WEP static key.\n"); + } + + /* + * allow CTS-to-self if possible for new association. + * this is relevant only for 5000 series and up, + * but will not damage 4965 + */ + priv->staging_rxon.flags |= RXON_FLG_SELF_CTS_EN; + + /* Apply the new configuration + * RXON assoc doesn't clear the station table in uCode, + */ + ret = iwl_send_cmd_pdu(priv, REPLY_RXON, + sizeof(struct iwl_rxon_cmd), &priv->staging_rxon); + if (ret) { + IWL_ERR(priv, "Error setting new RXON (%d)\n", ret); + return ret; + } + memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); + } + iwl_print_rx_config_cmd(priv); + + iwl_init_sensitivity(priv); + + /* If we issue a new RXON command which required a tune then we must + * send a new TXPOWER command or we won't be able to Tx any frames */ + ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true); + if (ret) { + IWL_ERR(priv, "Error sending TX power (%d)\n", ret); + return ret; + } + + return 0; +} + +void iwl_update_chain_flags(struct iwl_priv *priv) +{ + + if (priv->cfg->ops->hcmd->set_rxon_chain) + priv->cfg->ops->hcmd->set_rxon_chain(priv); + iwlcore_commit_rxon(priv); +} + +static void iwl_clear_free_frames(struct iwl_priv *priv) +{ + struct list_head *element; + + IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n", + priv->frames_count); + + while (!list_empty(&priv->free_frames)) { + element = priv->free_frames.next; + list_del(element); + kfree(list_entry(element, struct iwl_frame, list)); + priv->frames_count--; + } + + if (priv->frames_count) { + IWL_WARN(priv, "%d frames still in use. Did we lose one?\n", + priv->frames_count); + priv->frames_count = 0; + } +} + +static struct iwl_frame *iwl_get_free_frame(struct iwl_priv *priv) +{ + struct iwl_frame *frame; + struct list_head *element; + if (list_empty(&priv->free_frames)) { + frame = kzalloc(sizeof(*frame), GFP_KERNEL); + if (!frame) { + IWL_ERR(priv, "Could not allocate frame!\n"); + return NULL; + } + + priv->frames_count++; + return frame; + } + + element = priv->free_frames.next; + list_del(element); + return list_entry(element, struct iwl_frame, list); +} + +static void iwl_free_frame(struct iwl_priv *priv, struct iwl_frame *frame) +{ + memset(frame, 0, sizeof(*frame)); + list_add(&frame->list, &priv->free_frames); +} + +static u32 iwl_fill_beacon_frame(struct iwl_priv *priv, + struct ieee80211_hdr *hdr, + int left) +{ + if (!iwl_is_associated(priv) || !priv->ibss_beacon || + ((priv->iw_mode != NL80211_IFTYPE_ADHOC) && + (priv->iw_mode != NL80211_IFTYPE_AP))) + return 0; + + if (priv->ibss_beacon->len > left) + return 0; + + memcpy(hdr, priv->ibss_beacon->data, priv->ibss_beacon->len); + + return priv->ibss_beacon->len; +} + +/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */ +static void iwl_set_beacon_tim(struct iwl_priv *priv, + struct iwl_tx_beacon_cmd *tx_beacon_cmd, + u8 *beacon, u32 frame_size) +{ + u16 tim_idx; + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon; + + /* + * The index is relative to frame start but we start looking at the + * variable-length part of the beacon. + */ + tim_idx = mgmt->u.beacon.variable - beacon; + + /* Parse variable-length elements of beacon to find WLAN_EID_TIM */ + while ((tim_idx < (frame_size - 2)) && + (beacon[tim_idx] != WLAN_EID_TIM)) + tim_idx += beacon[tim_idx+1] + 2; + + /* If TIM field was found, set variables */ + if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) { + tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx); + tx_beacon_cmd->tim_size = beacon[tim_idx+1]; + } else + IWL_WARN(priv, "Unable to find TIM Element in beacon\n"); +} + +static unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv, + struct iwl_frame *frame) +{ + struct iwl_tx_beacon_cmd *tx_beacon_cmd; + u32 frame_size; + u32 rate_flags; + u32 rate; + /* + * We have to set up the TX command, the TX Beacon command, and the + * beacon contents. + */ + + /* Initialize memory */ + tx_beacon_cmd = &frame->u.beacon; + memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd)); + + /* Set up TX beacon contents */ + frame_size = iwl_fill_beacon_frame(priv, tx_beacon_cmd->frame, + sizeof(frame->u) - sizeof(*tx_beacon_cmd)); + if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE)) + return 0; + + /* Set up TX command fields */ + tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size); + tx_beacon_cmd->tx.sta_id = priv->hw_params.bcast_sta_id; + tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK | + TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK; + + /* Set up TX beacon command fields */ + iwl_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame, + frame_size); + + /* Set up packet rate and flags */ + rate = iwl_rate_get_lowest_plcp(priv); + priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant); + rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant); + if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE)) + rate_flags |= RATE_MCS_CCK_MSK; + tx_beacon_cmd->tx.rate_n_flags = iwl_hw_set_rate_n_flags(rate, + rate_flags); + + return sizeof(*tx_beacon_cmd) + frame_size; +} +static int iwl_send_beacon_cmd(struct iwl_priv *priv) +{ + struct iwl_frame *frame; + unsigned int frame_size; + int rc; + + frame = iwl_get_free_frame(priv); + if (!frame) { + IWL_ERR(priv, "Could not obtain free frame buffer for beacon " + "command.\n"); + return -ENOMEM; + } + + frame_size = iwl_hw_get_beacon_cmd(priv, frame); + if (!frame_size) { + IWL_ERR(priv, "Error configuring the beacon command\n"); + iwl_free_frame(priv, frame); + return -EINVAL; + } + + rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size, + &frame->u.cmd[0]); + + iwl_free_frame(priv, frame); + + return rc; +} + +static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) +{ + struct iwl_tfd_tb *tb = &tfd->tbs[idx]; + + dma_addr_t addr = get_unaligned_le32(&tb->lo); + if (sizeof(dma_addr_t) > sizeof(u32)) + addr |= + ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16; + + return addr; +} + +static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) +{ + struct iwl_tfd_tb *tb = &tfd->tbs[idx]; + + return le16_to_cpu(tb->hi_n_len) >> 4; +} + +static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx, + dma_addr_t addr, u16 len) +{ + struct iwl_tfd_tb *tb = &tfd->tbs[idx]; + u16 hi_n_len = len << 4; + + put_unaligned_le32(addr, &tb->lo); + if (sizeof(dma_addr_t) > sizeof(u32)) + hi_n_len |= ((addr >> 16) >> 16) & 0xF; + + tb->hi_n_len = cpu_to_le16(hi_n_len); + + tfd->num_tbs = idx + 1; +} + +static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd) +{ + return tfd->num_tbs & 0x1f; +} + +/** + * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] + * @priv - driver private data + * @txq - tx queue + * + * Does NOT advance any TFD circular buffer read/write indexes + * Does NOT free the TFD itself (which is within circular buffer) + */ +void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) +{ + struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds; + struct iwl_tfd *tfd; + struct pci_dev *dev = priv->pci_dev; + int index = txq->q.read_ptr; + int i; + int num_tbs; + + tfd = &tfd_tmp[index]; + + /* Sanity check on number of chunks */ + num_tbs = iwl_tfd_get_num_tbs(tfd); + + if (num_tbs >= IWL_NUM_OF_TBS) { + IWL_ERR(priv, "Too many chunks: %i\n", num_tbs); + /* @todo issue fatal error, it is quite serious situation */ + return; + } + + /* Unmap tx_cmd */ + if (num_tbs) + pci_unmap_single(dev, + pci_unmap_addr(&txq->meta[index], mapping), + pci_unmap_len(&txq->meta[index], len), + PCI_DMA_BIDIRECTIONAL); + + /* Unmap chunks, if any. */ + for (i = 1; i < num_tbs; i++) { + pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i), + iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE); + + if (txq->txb) { + dev_kfree_skb(txq->txb[txq->q.read_ptr].skb[i - 1]); + txq->txb[txq->q.read_ptr].skb[i - 1] = NULL; + } + } +} + +int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + dma_addr_t addr, u16 len, + u8 reset, u8 pad) +{ + struct iwl_queue *q; + struct iwl_tfd *tfd, *tfd_tmp; + u32 num_tbs; + + q = &txq->q; + tfd_tmp = (struct iwl_tfd *)txq->tfds; + tfd = &tfd_tmp[q->write_ptr]; + + if (reset) + memset(tfd, 0, sizeof(*tfd)); + + num_tbs = iwl_tfd_get_num_tbs(tfd); + + /* Each TFD can point to a maximum 20 Tx buffers */ + if (num_tbs >= IWL_NUM_OF_TBS) { + IWL_ERR(priv, "Error can not send more than %d chunks\n", + IWL_NUM_OF_TBS); + return -EINVAL; + } + + BUG_ON(addr & ~DMA_BIT_MASK(36)); + if (unlikely(addr & ~IWL_TX_DMA_MASK)) + IWL_ERR(priv, "Unaligned address = %llx\n", + (unsigned long long)addr); + + iwl_tfd_set_tb(tfd, num_tbs, addr, len); + + return 0; +} + +/* + * Tell nic where to find circular buffer of Tx Frame Descriptors for + * given Tx queue, and enable the DMA channel used for that queue. + * + * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA + * channels supported in hardware. + */ +int iwl_hw_tx_queue_init(struct iwl_priv *priv, + struct iwl_tx_queue *txq) +{ + int txq_id = txq->q.id; + + /* Circular buffer (TFD queue in DRAM) physical base address */ + iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id), + txq->q.dma_addr >> 8); + + return 0; +} + +/****************************************************************************** + * + * Generic RX handler implementations + * + ******************************************************************************/ +static void iwl_rx_reply_alive(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_alive_resp *palive; + struct delayed_work *pwork; + + palive = &pkt->u.alive_frame; + + IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision " + "0x%01X 0x%01X\n", + palive->is_valid, palive->ver_type, + palive->ver_subtype); + + if (palive->ver_subtype == INITIALIZE_SUBTYPE) { + IWL_DEBUG_INFO(priv, "Initialization Alive received.\n"); + memcpy(&priv->card_alive_init, + &pkt->u.alive_frame, + sizeof(struct iwl_init_alive_resp)); + pwork = &priv->init_alive_start; + } else { + IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); + memcpy(&priv->card_alive, &pkt->u.alive_frame, + sizeof(struct iwl_alive_resp)); + pwork = &priv->alive_start; + } + + /* We delay the ALIVE response by 5ms to + * give the HW RF Kill time to activate... */ + if (palive->is_valid == UCODE_VALID_OK) + queue_delayed_work(priv->workqueue, pwork, + msecs_to_jiffies(5)); + else + IWL_WARN(priv, "uCode did not respond OK.\n"); +} + +static void iwl_bg_beacon_update(struct work_struct *work) +{ + struct iwl_priv *priv = + container_of(work, struct iwl_priv, beacon_update); + struct sk_buff *beacon; + + /* Pull updated AP beacon from mac80211. will fail if not in AP mode */ + beacon = ieee80211_beacon_get(priv->hw, priv->vif); + + if (!beacon) { + IWL_ERR(priv, "update beacon failed\n"); + return; + } + + mutex_lock(&priv->mutex); + /* new beacon skb is allocated every time; dispose previous.*/ + if (priv->ibss_beacon) + dev_kfree_skb(priv->ibss_beacon); + + priv->ibss_beacon = beacon; + mutex_unlock(&priv->mutex); + + iwl_send_beacon_cmd(priv); +} + +/** + * iwl_bg_statistics_periodic - Timer callback to queue statistics + * + * This callback is provided in order to send a statistics request. + * + * This timer function is continually reset to execute within + * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION + * was received. We need to ensure we receive the statistics in order + * to update the temperature used for calibrating the TXPOWER. + */ +static void iwl_bg_statistics_periodic(unsigned long data) +{ + struct iwl_priv *priv = (struct iwl_priv *)data; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + /* dont send host command if rf-kill is on */ + if (!iwl_is_ready_rf(priv)) + return; + + iwl_send_statistics_request(priv, CMD_ASYNC, false); +} + + +static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base, + u32 start_idx, u32 num_events, + u32 mode) +{ + u32 i; + u32 ptr; /* SRAM byte address of log data */ + u32 ev, time, data; /* event log data */ + unsigned long reg_flags; + + if (mode == 0) + ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32)); + else + ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32)); + + /* Make sure device is powered up for SRAM reads */ + spin_lock_irqsave(&priv->reg_lock, reg_flags); + if (iwl_grab_nic_access(priv)) { + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); + return; + } + + /* Set starting address; reads will auto-increment */ + _iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr); + rmb(); + + /* + * "time" is actually "data" for mode 0 (no timestamp). + * place event id # at far right for easier visual parsing. + */ + for (i = 0; i < num_events; i++) { + ev = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); + time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); + if (mode == 0) { + trace_iwlwifi_dev_ucode_cont_event(priv, + 0, time, ev); + } else { + data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); + trace_iwlwifi_dev_ucode_cont_event(priv, + time, data, ev); + } + } + /* Allow device to power down */ + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); +} + +static void iwl_continuous_event_trace(struct iwl_priv *priv) +{ + u32 capacity; /* event log capacity in # entries */ + u32 base; /* SRAM byte address of event log header */ + u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */ + u32 num_wraps; /* # times uCode wrapped to top of log */ + u32 next_entry; /* index of next entry to be written by uCode */ + + if (priv->ucode_type == UCODE_INIT) + base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr); + else + base = le32_to_cpu(priv->card_alive.log_event_table_ptr); + if (priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) { + capacity = iwl_read_targ_mem(priv, base); + num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32))); + mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32))); + next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32))); + } else + return; + + if (num_wraps == priv->event_log.num_wraps) { + iwl_print_cont_event_trace(priv, + base, priv->event_log.next_entry, + next_entry - priv->event_log.next_entry, + mode); + priv->event_log.non_wraps_count++; + } else { + if ((num_wraps - priv->event_log.num_wraps) > 1) + priv->event_log.wraps_more_count++; + else + priv->event_log.wraps_once_count++; + trace_iwlwifi_dev_ucode_wrap_event(priv, + num_wraps - priv->event_log.num_wraps, + next_entry, priv->event_log.next_entry); + if (next_entry < priv->event_log.next_entry) { + iwl_print_cont_event_trace(priv, base, + priv->event_log.next_entry, + capacity - priv->event_log.next_entry, + mode); + + iwl_print_cont_event_trace(priv, base, 0, + next_entry, mode); + } else { + iwl_print_cont_event_trace(priv, base, + next_entry, capacity - next_entry, + mode); + + iwl_print_cont_event_trace(priv, base, 0, + next_entry, mode); + } + } + priv->event_log.num_wraps = num_wraps; + priv->event_log.next_entry = next_entry; +} + +/** + * iwl_bg_ucode_trace - Timer callback to log ucode event + * + * The timer is continually set to execute every + * UCODE_TRACE_PERIOD milliseconds after the last timer expired + * this function is to perform continuous uCode event logging operation + * if enabled + */ +static void iwl_bg_ucode_trace(unsigned long data) +{ + struct iwl_priv *priv = (struct iwl_priv *)data; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (priv->event_log.ucode_trace) { + iwl_continuous_event_trace(priv); + /* Reschedule the timer to occur in UCODE_TRACE_PERIOD */ + mod_timer(&priv->ucode_trace, + jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD)); + } +} + +static void iwl_rx_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl4965_beacon_notif *beacon = + (struct iwl4965_beacon_notif *)pkt->u.raw; + u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); + + IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d " + "tsf %d %d rate %d\n", + le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK, + beacon->beacon_notify_hdr.failure_frame, + le32_to_cpu(beacon->ibss_mgr_status), + le32_to_cpu(beacon->high_tsf), + le32_to_cpu(beacon->low_tsf), rate); +#endif + + if ((priv->iw_mode == NL80211_IFTYPE_AP) && + (!test_bit(STATUS_EXIT_PENDING, &priv->status))) + queue_work(priv->workqueue, &priv->beacon_update); +} + +/* Handle notification from uCode that card's power state is changing + * due to software, hardware, or critical temperature RFKILL */ +static void iwl_rx_card_state_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); + unsigned long status = priv->status; + + IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n", + (flags & HW_CARD_DISABLED) ? "Kill" : "On", + (flags & SW_CARD_DISABLED) ? "Kill" : "On", + (flags & CT_CARD_DISABLED) ? + "Reached" : "Not reached"); + + if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | + CT_CARD_DISABLED)) { + + iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + + iwl_write_direct32(priv, HBUS_TARG_MBX_C, + HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); + + if (!(flags & RXON_CARD_DISABLED)) { + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + iwl_write_direct32(priv, HBUS_TARG_MBX_C, + HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); + } + if (flags & CT_CARD_DISABLED) + iwl_tt_enter_ct_kill(priv); + } + if (!(flags & CT_CARD_DISABLED)) + iwl_tt_exit_ct_kill(priv); + + if (flags & HW_CARD_DISABLED) + set_bit(STATUS_RF_KILL_HW, &priv->status); + else + clear_bit(STATUS_RF_KILL_HW, &priv->status); + + + if (!(flags & RXON_CARD_DISABLED)) + iwl_scan_cancel(priv); + + if ((test_bit(STATUS_RF_KILL_HW, &status) != + test_bit(STATUS_RF_KILL_HW, &priv->status))) + wiphy_rfkill_set_hw_state(priv->hw->wiphy, + test_bit(STATUS_RF_KILL_HW, &priv->status)); + else + wake_up_interruptible(&priv->wait_command_queue); +} + +int iwl_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src) +{ + if (src == IWL_PWR_SRC_VAUX) { + if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) + iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_PWR_SRC_VAUX, + ~APMG_PS_CTRL_MSK_PWR_SRC); + } else { + iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, + ~APMG_PS_CTRL_MSK_PWR_SRC); + } + + return 0; +} + +/** + * iwl_setup_rx_handlers - Initialize Rx handler callbacks + * + * Setup the RX handlers for each of the reply types sent from the uCode + * to the host. + * + * This function chains into the hardware specific files for them to setup + * any hardware specific handlers as well. + */ +static void iwl_setup_rx_handlers(struct iwl_priv *priv) +{ + priv->rx_handlers[REPLY_ALIVE] = iwl_rx_reply_alive; + priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error; + priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa; + priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] = + iwl_rx_spectrum_measure_notif; + priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif; + priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] = + iwl_rx_pm_debug_statistics_notif; + priv->rx_handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif; + + /* + * The same handler is used for both the REPLY to a discrete + * statistics request from the host as well as for the periodic + * statistics notifications (after received beacons) from the uCode. + */ + priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl_reply_statistics; + priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics; + + iwl_setup_rx_scan_handlers(priv); + + /* status change handler */ + priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif; + + priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] = + iwl_rx_missed_beacon_notif; + /* Rx handlers */ + priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl_rx_reply_rx_phy; + priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl_rx_reply_rx; + /* block ack */ + priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl_rx_reply_compressed_ba; + /* Set up hardware specific Rx handlers */ + priv->cfg->ops->lib->rx_handler_setup(priv); +} + +/** + * iwl_rx_handle - Main entry function for receiving responses from uCode + * + * Uses the priv->rx_handlers callback function array to invoke + * the appropriate handlers, including command responses, + * frame-received notifications, and other notifications. + */ +void iwl_rx_handle(struct iwl_priv *priv) +{ + struct iwl_rx_mem_buffer *rxb; + struct iwl_rx_packet *pkt; + struct iwl_rx_queue *rxq = &priv->rxq; + u32 r, i; + int reclaim; + unsigned long flags; + u8 fill_rx = 0; + u32 count = 8; + int total_empty; + + /* uCode's read index (stored in shared DRAM) indicates the last Rx + * buffer that the driver may process (last buffer filled by ucode). */ + r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; + i = rxq->read; + + /* Rx interrupt, but nothing sent from uCode */ + if (i == r) + IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i); + + /* calculate total frames need to be restock after handling RX */ + total_empty = r - rxq->write_actual; + if (total_empty < 0) + total_empty += RX_QUEUE_SIZE; + + if (total_empty > (RX_QUEUE_SIZE / 2)) + fill_rx = 1; + + while (i != r) { + rxb = rxq->queue[i]; + + /* If an RXB doesn't have a Rx queue slot associated with it, + * then a bug has been introduced in the queue refilling + * routines -- catch it here */ + BUG_ON(rxb == NULL); + + rxq->queue[i] = NULL; + + pci_unmap_page(priv->pci_dev, rxb->page_dma, + PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + pkt = rxb_addr(rxb); + + trace_iwlwifi_dev_rx(priv, pkt, + le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); + + /* Reclaim a command buffer only if this packet is a response + * to a (driver-originated) command. + * If the packet (e.g. Rx frame) originated from uCode, + * there is no command buffer to reclaim. + * Ucode should set SEQ_RX_FRAME bit if ucode-originated, + * but apparently a few don't get set; catch them here. */ + reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) && + (pkt->hdr.cmd != REPLY_RX_PHY_CMD) && + (pkt->hdr.cmd != REPLY_RX) && + (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) && + (pkt->hdr.cmd != REPLY_COMPRESSED_BA) && + (pkt->hdr.cmd != STATISTICS_NOTIFICATION) && + (pkt->hdr.cmd != REPLY_TX); + + /* Based on type of command response or notification, + * handle those that need handling via function in + * rx_handlers table. See iwl_setup_rx_handlers() */ + if (priv->rx_handlers[pkt->hdr.cmd]) { + IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, + i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); + priv->isr_stats.rx_handlers[pkt->hdr.cmd]++; + priv->rx_handlers[pkt->hdr.cmd] (priv, rxb); + } else { + /* No handling needed */ + IWL_DEBUG_RX(priv, + "r %d i %d No handler needed for %s, 0x%02x\n", + r, i, get_cmd_string(pkt->hdr.cmd), + pkt->hdr.cmd); + } + + /* + * XXX: After here, we should always check rxb->page + * against NULL before touching it or its virtual + * memory (pkt). Because some rx_handler might have + * already taken or freed the pages. + */ + + if (reclaim) { + /* Invoke any callbacks, transfer the buffer to caller, + * and fire off the (possibly) blocking iwl_send_cmd() + * as we reclaim the driver command queue */ + if (rxb->page) + iwl_tx_cmd_complete(priv, rxb); + else + IWL_WARN(priv, "Claim null rxb?\n"); + } + + /* Reuse the page if possible. For notification packets and + * SKBs that fail to Rx correctly, add them back into the + * rx_free list for reuse later. */ + spin_lock_irqsave(&rxq->lock, flags); + if (rxb->page != NULL) { + rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page, + 0, PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + } else + list_add_tail(&rxb->list, &rxq->rx_used); + + spin_unlock_irqrestore(&rxq->lock, flags); + + i = (i + 1) & RX_QUEUE_MASK; + /* If there are a lot of unused frames, + * restock the Rx queue so ucode wont assert. */ + if (fill_rx) { + count++; + if (count >= 8) { + rxq->read = i; + iwl_rx_replenish_now(priv); + count = 0; + } + } + } + + /* Backtrack one entry */ + rxq->read = i; + if (fill_rx) + iwl_rx_replenish_now(priv); + else + iwl_rx_queue_restock(priv); +} + +/* call this function to flush any scheduled tasklet */ +static inline void iwl_synchronize_irq(struct iwl_priv *priv) +{ + /* wait to make sure we flush pending tasklet*/ + synchronize_irq(priv->pci_dev->irq); + tasklet_kill(&priv->irq_tasklet); +} + +static void iwl_irq_tasklet_legacy(struct iwl_priv *priv) +{ + u32 inta, handled = 0; + u32 inta_fh; + unsigned long flags; + u32 i; +#ifdef CONFIG_IWLWIFI_DEBUG + u32 inta_mask; +#endif + + spin_lock_irqsave(&priv->lock, flags); + + /* Ack/clear/reset pending uCode interrupts. + * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, + * and will clear only when CSR_FH_INT_STATUS gets cleared. */ + inta = iwl_read32(priv, CSR_INT); + iwl_write32(priv, CSR_INT, inta); + + /* Ack/clear/reset pending flow-handler (DMA) interrupts. + * Any new interrupts that happen after this, either while we're + * in this tasklet, or later, will show up in next ISR/tasklet. */ + inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); + iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh); + +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_get_debug_level(priv) & IWL_DL_ISR) { + /* just for debug */ + inta_mask = iwl_read32(priv, CSR_INT_MASK); + IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", + inta, inta_mask, inta_fh); + } +#endif + + spin_unlock_irqrestore(&priv->lock, flags); + + /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not + * atomic, make sure that inta covers all the interrupts that + * we've discovered, even if FH interrupt came in just after + * reading CSR_INT. */ + if (inta_fh & CSR49_FH_INT_RX_MASK) + inta |= CSR_INT_BIT_FH_RX; + if (inta_fh & CSR49_FH_INT_TX_MASK) + inta |= CSR_INT_BIT_FH_TX; + + /* Now service all interrupt bits discovered above. */ + if (inta & CSR_INT_BIT_HW_ERR) { + IWL_ERR(priv, "Hardware error detected. Restarting.\n"); + + /* Tell the device to stop sending interrupts */ + iwl_disable_interrupts(priv); + + priv->isr_stats.hw++; + iwl_irq_handle_error(priv); + + handled |= CSR_INT_BIT_HW_ERR; + + return; + } + +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { + /* NIC fires this, but we don't use it, redundant with WAKEUP */ + if (inta & CSR_INT_BIT_SCD) { + IWL_DEBUG_ISR(priv, "Scheduler finished to transmit " + "the frame/frames.\n"); + priv->isr_stats.sch++; + } + + /* Alive notification via Rx interrupt will do the real work */ + if (inta & CSR_INT_BIT_ALIVE) { + IWL_DEBUG_ISR(priv, "Alive interrupt\n"); + priv->isr_stats.alive++; + } + } +#endif + /* Safely ignore these bits for debug checks below */ + inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); + + /* HW RF KILL switch toggled */ + if (inta & CSR_INT_BIT_RF_KILL) { + int hw_rf_kill = 0; + if (!(iwl_read32(priv, CSR_GP_CNTRL) & + CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) + hw_rf_kill = 1; + + IWL_WARN(priv, "RF_KILL bit toggled to %s.\n", + hw_rf_kill ? "disable radio" : "enable radio"); + + priv->isr_stats.rfkill++; + + /* driver only loads ucode once setting the interface up. + * the driver allows loading the ucode even if the radio + * is killed. Hence update the killswitch state here. The + * rfkill handler will care about restarting if needed. + */ + if (!test_bit(STATUS_ALIVE, &priv->status)) { + if (hw_rf_kill) + set_bit(STATUS_RF_KILL_HW, &priv->status); + else + clear_bit(STATUS_RF_KILL_HW, &priv->status); + wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill); + } + + handled |= CSR_INT_BIT_RF_KILL; + } + + /* Chip got too hot and stopped itself */ + if (inta & CSR_INT_BIT_CT_KILL) { + IWL_ERR(priv, "Microcode CT kill error detected.\n"); + priv->isr_stats.ctkill++; + handled |= CSR_INT_BIT_CT_KILL; + } + + /* Error detected by uCode */ + if (inta & CSR_INT_BIT_SW_ERR) { + IWL_ERR(priv, "Microcode SW error detected. " + " Restarting 0x%X.\n", inta); + priv->isr_stats.sw++; + priv->isr_stats.sw_err = inta; + iwl_irq_handle_error(priv); + handled |= CSR_INT_BIT_SW_ERR; + } + + /* + * uCode wakes up after power-down sleep. + * Tell device about any new tx or host commands enqueued, + * and about any Rx buffers made available while asleep. + */ + if (inta & CSR_INT_BIT_WAKEUP) { + IWL_DEBUG_ISR(priv, "Wakeup interrupt\n"); + iwl_rx_queue_update_write_ptr(priv, &priv->rxq); + for (i = 0; i < priv->hw_params.max_txq_num; i++) + iwl_txq_update_write_ptr(priv, &priv->txq[i]); + priv->isr_stats.wakeup++; + handled |= CSR_INT_BIT_WAKEUP; + } + + /* All uCode command responses, including Tx command responses, + * Rx "responses" (frame-received notification), and other + * notifications from uCode come through here*/ + if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { + iwl_rx_handle(priv); + priv->isr_stats.rx++; + handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); + } + + /* This "Tx" DMA channel is used only for loading uCode */ + if (inta & CSR_INT_BIT_FH_TX) { + IWL_DEBUG_ISR(priv, "uCode load interrupt\n"); + priv->isr_stats.tx++; + handled |= CSR_INT_BIT_FH_TX; + /* Wake up uCode load routine, now that load is complete */ + priv->ucode_write_complete = 1; + wake_up_interruptible(&priv->wait_command_queue); + } + + if (inta & ~handled) { + IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled); + priv->isr_stats.unhandled++; + } + + if (inta & ~(priv->inta_mask)) { + IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n", + inta & ~priv->inta_mask); + IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh); + } + + /* Re-enable all interrupts */ + /* only Re-enable if diabled by irq */ + if (test_bit(STATUS_INT_ENABLED, &priv->status)) + iwl_enable_interrupts(priv); + +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { + inta = iwl_read32(priv, CSR_INT); + inta_mask = iwl_read32(priv, CSR_INT_MASK); + inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); + IWL_DEBUG_ISR(priv, "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, " + "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); + } +#endif +} + +/* tasklet for iwlagn interrupt */ +static void iwl_irq_tasklet(struct iwl_priv *priv) +{ + u32 inta = 0; + u32 handled = 0; + unsigned long flags; + u32 i; +#ifdef CONFIG_IWLWIFI_DEBUG + u32 inta_mask; +#endif + + spin_lock_irqsave(&priv->lock, flags); + + /* Ack/clear/reset pending uCode interrupts. + * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, + */ + iwl_write32(priv, CSR_INT, priv->inta); + + inta = priv->inta; + +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_get_debug_level(priv) & IWL_DL_ISR) { + /* just for debug */ + inta_mask = iwl_read32(priv, CSR_INT_MASK); + IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x\n ", + inta, inta_mask); + } +#endif + + spin_unlock_irqrestore(&priv->lock, flags); + + /* saved interrupt in inta variable now we can reset priv->inta */ + priv->inta = 0; + + /* Now service all interrupt bits discovered above. */ + if (inta & CSR_INT_BIT_HW_ERR) { + IWL_ERR(priv, "Hardware error detected. Restarting.\n"); + + /* Tell the device to stop sending interrupts */ + iwl_disable_interrupts(priv); + + priv->isr_stats.hw++; + iwl_irq_handle_error(priv); + + handled |= CSR_INT_BIT_HW_ERR; + + return; + } + +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { + /* NIC fires this, but we don't use it, redundant with WAKEUP */ + if (inta & CSR_INT_BIT_SCD) { + IWL_DEBUG_ISR(priv, "Scheduler finished to transmit " + "the frame/frames.\n"); + priv->isr_stats.sch++; + } + + /* Alive notification via Rx interrupt will do the real work */ + if (inta & CSR_INT_BIT_ALIVE) { + IWL_DEBUG_ISR(priv, "Alive interrupt\n"); + priv->isr_stats.alive++; + } + } +#endif + /* Safely ignore these bits for debug checks below */ + inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); + + /* HW RF KILL switch toggled */ + if (inta & CSR_INT_BIT_RF_KILL) { + int hw_rf_kill = 0; + if (!(iwl_read32(priv, CSR_GP_CNTRL) & + CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) + hw_rf_kill = 1; + + IWL_WARN(priv, "RF_KILL bit toggled to %s.\n", + hw_rf_kill ? "disable radio" : "enable radio"); + + priv->isr_stats.rfkill++; + + /* driver only loads ucode once setting the interface up. + * the driver allows loading the ucode even if the radio + * is killed. Hence update the killswitch state here. The + * rfkill handler will care about restarting if needed. + */ + if (!test_bit(STATUS_ALIVE, &priv->status)) { + if (hw_rf_kill) + set_bit(STATUS_RF_KILL_HW, &priv->status); + else + clear_bit(STATUS_RF_KILL_HW, &priv->status); + wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill); + } + + handled |= CSR_INT_BIT_RF_KILL; + } + + /* Chip got too hot and stopped itself */ + if (inta & CSR_INT_BIT_CT_KILL) { + IWL_ERR(priv, "Microcode CT kill error detected.\n"); + priv->isr_stats.ctkill++; + handled |= CSR_INT_BIT_CT_KILL; + } + + /* Error detected by uCode */ + if (inta & CSR_INT_BIT_SW_ERR) { + IWL_ERR(priv, "Microcode SW error detected. " + " Restarting 0x%X.\n", inta); + priv->isr_stats.sw++; + priv->isr_stats.sw_err = inta; + iwl_irq_handle_error(priv); + handled |= CSR_INT_BIT_SW_ERR; + } + + /* uCode wakes up after power-down sleep */ + if (inta & CSR_INT_BIT_WAKEUP) { + IWL_DEBUG_ISR(priv, "Wakeup interrupt\n"); + iwl_rx_queue_update_write_ptr(priv, &priv->rxq); + for (i = 0; i < priv->hw_params.max_txq_num; i++) + iwl_txq_update_write_ptr(priv, &priv->txq[i]); + + priv->isr_stats.wakeup++; + + handled |= CSR_INT_BIT_WAKEUP; + } + + /* All uCode command responses, including Tx command responses, + * Rx "responses" (frame-received notification), and other + * notifications from uCode come through here*/ + if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | + CSR_INT_BIT_RX_PERIODIC)) { + IWL_DEBUG_ISR(priv, "Rx interrupt\n"); + if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { + handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); + iwl_write32(priv, CSR_FH_INT_STATUS, + CSR49_FH_INT_RX_MASK); + } + if (inta & CSR_INT_BIT_RX_PERIODIC) { + handled |= CSR_INT_BIT_RX_PERIODIC; + iwl_write32(priv, CSR_INT, CSR_INT_BIT_RX_PERIODIC); + } + /* Sending RX interrupt require many steps to be done in the + * the device: + * 1- write interrupt to current index in ICT table. + * 2- dma RX frame. + * 3- update RX shared data to indicate last write index. + * 4- send interrupt. + * This could lead to RX race, driver could receive RX interrupt + * but the shared data changes does not reflect this; + * periodic interrupt will detect any dangling Rx activity. + */ + + /* Disable periodic interrupt; we use it as just a one-shot. */ + iwl_write8(priv, CSR_INT_PERIODIC_REG, + CSR_INT_PERIODIC_DIS); + iwl_rx_handle(priv); + + /* + * Enable periodic interrupt in 8 msec only if we received + * real RX interrupt (instead of just periodic int), to catch + * any dangling Rx interrupt. If it was just the periodic + * interrupt, there was no dangling Rx activity, and no need + * to extend the periodic interrupt; one-shot is enough. + */ + if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) + iwl_write8(priv, CSR_INT_PERIODIC_REG, + CSR_INT_PERIODIC_ENA); + + priv->isr_stats.rx++; + } + + /* This "Tx" DMA channel is used only for loading uCode */ + if (inta & CSR_INT_BIT_FH_TX) { + iwl_write32(priv, CSR_FH_INT_STATUS, CSR49_FH_INT_TX_MASK); + IWL_DEBUG_ISR(priv, "uCode load interrupt\n"); + priv->isr_stats.tx++; + handled |= CSR_INT_BIT_FH_TX; + /* Wake up uCode load routine, now that load is complete */ + priv->ucode_write_complete = 1; + wake_up_interruptible(&priv->wait_command_queue); + } + + if (inta & ~handled) { + IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled); + priv->isr_stats.unhandled++; + } + + if (inta & ~(priv->inta_mask)) { + IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n", + inta & ~priv->inta_mask); + } + + /* Re-enable all interrupts */ + /* only Re-enable if diabled by irq */ + if (test_bit(STATUS_INT_ENABLED, &priv->status)) + iwl_enable_interrupts(priv); +} + + +/****************************************************************************** + * + * uCode download functions + * + ******************************************************************************/ + +static void iwl_dealloc_ucode_pci(struct iwl_priv *priv) +{ + iwl_free_fw_desc(priv->pci_dev, &priv->ucode_code); + iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data); + iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup); + iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init); + iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init_data); + iwl_free_fw_desc(priv->pci_dev, &priv->ucode_boot); +} + +static void iwl_nic_start(struct iwl_priv *priv) +{ + /* Remove all resets to allow NIC to operate */ + iwl_write32(priv, CSR_RESET, 0); +} + + +static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context); +static int iwl_mac_setup_register(struct iwl_priv *priv); + +static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first) +{ + const char *name_pre = priv->cfg->fw_name_pre; + + if (first) + priv->fw_index = priv->cfg->ucode_api_max; + else + priv->fw_index--; + + if (priv->fw_index < priv->cfg->ucode_api_min) { + IWL_ERR(priv, "no suitable firmware found!\n"); + return -ENOENT; + } + + sprintf(priv->firmware_name, "%s%d%s", + name_pre, priv->fw_index, ".ucode"); + + IWL_DEBUG_INFO(priv, "attempting to load firmware '%s'\n", + priv->firmware_name); + + return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name, + &priv->pci_dev->dev, GFP_KERNEL, priv, + iwl_ucode_callback); +} + +/** + * iwl_ucode_callback - callback when firmware was loaded + * + * If loaded successfully, copies the firmware into buffers + * for the card to fetch (via DMA). + */ +static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) +{ + struct iwl_priv *priv = context; + struct iwl_ucode_header *ucode; + const unsigned int api_max = priv->cfg->ucode_api_max; + const unsigned int api_min = priv->cfg->ucode_api_min; + u8 *src; + size_t len; + u32 api_ver, build; + u32 inst_size, data_size, init_size, init_data_size, boot_size; + int err; + u16 eeprom_ver; + + if (!ucode_raw) { + IWL_ERR(priv, "request for firmware file '%s' failed.\n", + priv->firmware_name); + goto try_again; + } + + IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n", + priv->firmware_name, ucode_raw->size); + + /* Make sure that we got at least the v1 header! */ + if (ucode_raw->size < priv->cfg->ops->ucode->get_header_size(1)) { + IWL_ERR(priv, "File size way too small!\n"); + goto try_again; + } + + /* Data from ucode file: header followed by uCode images */ + ucode = (struct iwl_ucode_header *)ucode_raw->data; + + priv->ucode_ver = le32_to_cpu(ucode->ver); + api_ver = IWL_UCODE_API(priv->ucode_ver); + build = priv->cfg->ops->ucode->get_build(ucode, api_ver); + inst_size = priv->cfg->ops->ucode->get_inst_size(ucode, api_ver); + data_size = priv->cfg->ops->ucode->get_data_size(ucode, api_ver); + init_size = priv->cfg->ops->ucode->get_init_size(ucode, api_ver); + init_data_size = + priv->cfg->ops->ucode->get_init_data_size(ucode, api_ver); + boot_size = priv->cfg->ops->ucode->get_boot_size(ucode, api_ver); + src = priv->cfg->ops->ucode->get_data(ucode, api_ver); + + /* api_ver should match the api version forming part of the + * firmware filename ... but we don't check for that and only rely + * on the API version read from firmware header from here on forward */ + + if (api_ver < api_min || api_ver > api_max) { + IWL_ERR(priv, "Driver unable to support your firmware API. " + "Driver supports v%u, firmware is v%u.\n", + api_max, api_ver); + goto try_again; + } + + if (api_ver != api_max) + IWL_ERR(priv, "Firmware has old API version. Expected v%u, " + "got v%u. New firmware can be obtained " + "from http://www.intellinuxwireless.org.\n", + api_max, api_ver); + + IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n", + IWL_UCODE_MAJOR(priv->ucode_ver), + IWL_UCODE_MINOR(priv->ucode_ver), + IWL_UCODE_API(priv->ucode_ver), + IWL_UCODE_SERIAL(priv->ucode_ver)); + + snprintf(priv->hw->wiphy->fw_version, + sizeof(priv->hw->wiphy->fw_version), + "%u.%u.%u.%u", + IWL_UCODE_MAJOR(priv->ucode_ver), + IWL_UCODE_MINOR(priv->ucode_ver), + IWL_UCODE_API(priv->ucode_ver), + IWL_UCODE_SERIAL(priv->ucode_ver)); + + if (build) + IWL_DEBUG_INFO(priv, "Build %u\n", build); + + eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION); + IWL_DEBUG_INFO(priv, "NVM Type: %s, version: 0x%x\n", + (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) + ? "OTP" : "EEPROM", eeprom_ver); + + IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n", + priv->ucode_ver); + IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n", + inst_size); + IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %u\n", + data_size); + IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %u\n", + init_size); + IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %u\n", + init_data_size); + IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %u\n", + boot_size); + + /* + * For any of the failures below (before allocating pci memory) + * we will try to load a version with a smaller API -- maybe the + * user just got a corrupted version of the latest API. + */ + + /* Verify size of file vs. image size info in file's header */ + if (ucode_raw->size != + priv->cfg->ops->ucode->get_header_size(api_ver) + + inst_size + data_size + init_size + + init_data_size + boot_size) { + + IWL_DEBUG_INFO(priv, + "uCode file size %d does not match expected size\n", + (int)ucode_raw->size); + goto try_again; + } + + /* Verify that uCode images will fit in card's SRAM */ + if (inst_size > priv->hw_params.max_inst_size) { + IWL_DEBUG_INFO(priv, "uCode instr len %d too large to fit in\n", + inst_size); + goto try_again; + } + + if (data_size > priv->hw_params.max_data_size) { + IWL_DEBUG_INFO(priv, "uCode data len %d too large to fit in\n", + data_size); + goto try_again; + } + if (init_size > priv->hw_params.max_inst_size) { + IWL_INFO(priv, "uCode init instr len %d too large to fit in\n", + init_size); + goto try_again; + } + if (init_data_size > priv->hw_params.max_data_size) { + IWL_INFO(priv, "uCode init data len %d too large to fit in\n", + init_data_size); + goto try_again; + } + if (boot_size > priv->hw_params.max_bsm_size) { + IWL_INFO(priv, "uCode boot instr len %d too large to fit in\n", + boot_size); + goto try_again; + } + + /* Allocate ucode buffers for card's bus-master loading ... */ + + /* Runtime instructions and 2 copies of data: + * 1) unmodified from disk + * 2) backup cache for save/restore during power-downs */ + priv->ucode_code.len = inst_size; + iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_code); + + priv->ucode_data.len = data_size; + iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data); + + priv->ucode_data_backup.len = data_size; + iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup); + + if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr || + !priv->ucode_data_backup.v_addr) + goto err_pci_alloc; + + /* Initialization instructions and data */ + if (init_size && init_data_size) { + priv->ucode_init.len = init_size; + iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init); + + priv->ucode_init_data.len = init_data_size; + iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data); + + if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr) + goto err_pci_alloc; + } + + /* Bootstrap (instructions only, no data) */ + if (boot_size) { + priv->ucode_boot.len = boot_size; + iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot); + + if (!priv->ucode_boot.v_addr) + goto err_pci_alloc; + } + + /* Copy images into buffers for card's bus-master reads ... */ + + /* Runtime instructions (first block of data in file) */ + len = inst_size; + IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n", len); + memcpy(priv->ucode_code.v_addr, src, len); + src += len; + + IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n", + priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr); + + /* Runtime data (2nd block) + * NOTE: Copy into backup buffer will be done in iwl_up() */ + len = data_size; + IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n", len); + memcpy(priv->ucode_data.v_addr, src, len); + memcpy(priv->ucode_data_backup.v_addr, src, len); + src += len; + + /* Initialization instructions (3rd block) */ + if (init_size) { + len = init_size; + IWL_DEBUG_INFO(priv, "Copying (but not loading) init instr len %Zd\n", + len); + memcpy(priv->ucode_init.v_addr, src, len); + src += len; + } + + /* Initialization data (4th block) */ + if (init_data_size) { + len = init_data_size; + IWL_DEBUG_INFO(priv, "Copying (but not loading) init data len %Zd\n", + len); + memcpy(priv->ucode_init_data.v_addr, src, len); + src += len; + } + + /* Bootstrap instructions (5th block) */ + len = boot_size; + IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n", len); + memcpy(priv->ucode_boot.v_addr, src, len); + + /************************************************** + * This is still part of probe() in a sense... + * + * 9. Setup and register with mac80211 and debugfs + **************************************************/ + err = iwl_mac_setup_register(priv); + if (err) + goto out_unbind; + + err = iwl_dbgfs_register(priv, DRV_NAME); + if (err) + IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err); + + /* We have our copies now, allow OS release its copies */ + release_firmware(ucode_raw); + return; + + try_again: + /* try next, if any */ + if (iwl_request_firmware(priv, false)) + goto out_unbind; + release_firmware(ucode_raw); + return; + + err_pci_alloc: + IWL_ERR(priv, "failed to allocate pci memory\n"); + iwl_dealloc_ucode_pci(priv); + out_unbind: + device_release_driver(&priv->pci_dev->dev); + release_firmware(ucode_raw); +} + +static const char *desc_lookup_text[] = { + "OK", + "FAIL", + "BAD_PARAM", + "BAD_CHECKSUM", + "NMI_INTERRUPT_WDG", + "SYSASSERT", + "FATAL_ERROR", + "BAD_COMMAND", + "HW_ERROR_TUNE_LOCK", + "HW_ERROR_TEMPERATURE", + "ILLEGAL_CHAN_FREQ", + "VCC_NOT_STABLE", + "FH_ERROR", + "NMI_INTERRUPT_HOST", + "NMI_INTERRUPT_ACTION_PT", + "NMI_INTERRUPT_UNKNOWN", + "UCODE_VERSION_MISMATCH", + "HW_ERROR_ABS_LOCK", + "HW_ERROR_CAL_LOCK_FAIL", + "NMI_INTERRUPT_INST_ACTION_PT", + "NMI_INTERRUPT_DATA_ACTION_PT", + "NMI_TRM_HW_ER", + "NMI_INTERRUPT_TRM", + "NMI_INTERRUPT_BREAK_POINT" + "DEBUG_0", + "DEBUG_1", + "DEBUG_2", + "DEBUG_3", + "ADVANCED SYSASSERT" +}; + +static const char *desc_lookup(int i) +{ + int max = ARRAY_SIZE(desc_lookup_text) - 1; + + if (i < 0 || i > max) + i = max; + + return desc_lookup_text[i]; +} + +#define ERROR_START_OFFSET (1 * sizeof(u32)) +#define ERROR_ELEM_SIZE (7 * sizeof(u32)) + +void iwl_dump_nic_error_log(struct iwl_priv *priv) +{ + u32 data2, line; + u32 desc, time, count, base, data1; + u32 blink1, blink2, ilink1, ilink2; + + if (priv->ucode_type == UCODE_INIT) + base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr); + else + base = le32_to_cpu(priv->card_alive.error_event_table_ptr); + + if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) { + IWL_ERR(priv, + "Not valid error log pointer 0x%08X for %s uCode\n", + base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT"); + return; + } + + count = iwl_read_targ_mem(priv, base); + + if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { + IWL_ERR(priv, "Start IWL Error Log Dump:\n"); + IWL_ERR(priv, "Status: 0x%08lX, count: %d\n", + priv->status, count); + } + + desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32)); + blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32)); + blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32)); + ilink1 = iwl_read_targ_mem(priv, base + 5 * sizeof(u32)); + ilink2 = iwl_read_targ_mem(priv, base + 6 * sizeof(u32)); + data1 = iwl_read_targ_mem(priv, base + 7 * sizeof(u32)); + data2 = iwl_read_targ_mem(priv, base + 8 * sizeof(u32)); + line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32)); + time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32)); + + trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, data2, line, + blink1, blink2, ilink1, ilink2); + + IWL_ERR(priv, "Desc Time " + "data1 data2 line\n"); + IWL_ERR(priv, "%-28s (#%02d) %010u 0x%08X 0x%08X %u\n", + desc_lookup(desc), desc, time, data1, data2, line); + IWL_ERR(priv, "blink1 blink2 ilink1 ilink2\n"); + IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2, + ilink1, ilink2); + +} + +#define EVENT_START_OFFSET (4 * sizeof(u32)) + +/** + * iwl_print_event_log - Dump error event log to syslog + * + */ +static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx, + u32 num_events, u32 mode, + int pos, char **buf, size_t bufsz) +{ + u32 i; + u32 base; /* SRAM byte address of event log header */ + u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */ + u32 ptr; /* SRAM byte address of log data */ + u32 ev, time, data; /* event log data */ + unsigned long reg_flags; + + if (num_events == 0) + return pos; + if (priv->ucode_type == UCODE_INIT) + base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr); + else + base = le32_to_cpu(priv->card_alive.log_event_table_ptr); + + if (mode == 0) + event_size = 2 * sizeof(u32); + else + event_size = 3 * sizeof(u32); + + ptr = base + EVENT_START_OFFSET + (start_idx * event_size); + + /* Make sure device is powered up for SRAM reads */ + spin_lock_irqsave(&priv->reg_lock, reg_flags); + iwl_grab_nic_access(priv); + + /* Set starting address; reads will auto-increment */ + _iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr); + rmb(); + + /* "time" is actually "data" for mode 0 (no timestamp). + * place event id # at far right for easier visual parsing. */ + for (i = 0; i < num_events; i++) { + ev = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); + time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); + if (mode == 0) { + /* data, ev */ + if (bufsz) { + pos += scnprintf(*buf + pos, bufsz - pos, + "EVT_LOG:0x%08x:%04u\n", + time, ev); + } else { + trace_iwlwifi_dev_ucode_event(priv, 0, + time, ev); + IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n", + time, ev); + } + } else { + data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); + if (bufsz) { + pos += scnprintf(*buf + pos, bufsz - pos, + "EVT_LOGT:%010u:0x%08x:%04u\n", + time, data, ev); + } else { + IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n", + time, data, ev); + trace_iwlwifi_dev_ucode_event(priv, time, + data, ev); + } + } + } + + /* Allow device to power down */ + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); + return pos; +} + +/** + * iwl_print_last_event_logs - Dump the newest # of event log to syslog + */ +static int iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity, + u32 num_wraps, u32 next_entry, + u32 size, u32 mode, + int pos, char **buf, size_t bufsz) +{ + /* + * display the newest DEFAULT_LOG_ENTRIES entries + * i.e the entries just before the next ont that uCode would fill. + */ + if (num_wraps) { + if (next_entry < size) { + pos = iwl_print_event_log(priv, + capacity - (size - next_entry), + size - next_entry, mode, + pos, buf, bufsz); + pos = iwl_print_event_log(priv, 0, + next_entry, mode, + pos, buf, bufsz); + } else + pos = iwl_print_event_log(priv, next_entry - size, + size, mode, pos, buf, bufsz); + } else { + if (next_entry < size) { + pos = iwl_print_event_log(priv, 0, next_entry, + mode, pos, buf, bufsz); + } else { + pos = iwl_print_event_log(priv, next_entry - size, + size, mode, pos, buf, bufsz); + } + } + return pos; +} + +/* For sanity check only. Actual size is determined by uCode, typ. 512 */ +#define MAX_EVENT_LOG_SIZE (512) + +#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20) + +int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log, + char **buf, bool display) +{ + u32 base; /* SRAM byte address of event log header */ + u32 capacity; /* event log capacity in # entries */ + u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */ + u32 num_wraps; /* # times uCode wrapped to top of log */ + u32 next_entry; /* index of next entry to be written by uCode */ + u32 size; /* # entries that we'll print */ + int pos = 0; + size_t bufsz = 0; + + if (priv->ucode_type == UCODE_INIT) + base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr); + else + base = le32_to_cpu(priv->card_alive.log_event_table_ptr); + + if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) { + IWL_ERR(priv, + "Invalid event log pointer 0x%08X for %s uCode\n", + base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT"); + return -EINVAL; + } + + /* event log header */ + capacity = iwl_read_targ_mem(priv, base); + mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32))); + num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32))); + next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32))); + + if (capacity > MAX_EVENT_LOG_SIZE) { + IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n", + capacity, MAX_EVENT_LOG_SIZE); + capacity = MAX_EVENT_LOG_SIZE; + } + + if (next_entry > MAX_EVENT_LOG_SIZE) { + IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n", + next_entry, MAX_EVENT_LOG_SIZE); + next_entry = MAX_EVENT_LOG_SIZE; + } + + size = num_wraps ? capacity : next_entry; + + /* bail out if nothing in log */ + if (size == 0) { + IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n"); + return pos; + } + +#ifdef CONFIG_IWLWIFI_DEBUG + if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log) + size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES) + ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size; +#else + size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES) + ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size; +#endif + IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n", + size); + +#ifdef CONFIG_IWLWIFI_DEBUG + if (display) { + if (full_log) + bufsz = capacity * 48; + else + bufsz = size * 48; + *buf = kmalloc(bufsz, GFP_KERNEL); + if (!*buf) + return -ENOMEM; + } + if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) { + /* + * if uCode has wrapped back to top of log, + * start at the oldest entry, + * i.e the next one that uCode would fill. + */ + if (num_wraps) + pos = iwl_print_event_log(priv, next_entry, + capacity - next_entry, mode, + pos, buf, bufsz); + /* (then/else) start at top of log */ + pos = iwl_print_event_log(priv, 0, + next_entry, mode, pos, buf, bufsz); + } else + pos = iwl_print_last_event_logs(priv, capacity, num_wraps, + next_entry, size, mode, + pos, buf, bufsz); +#else + pos = iwl_print_last_event_logs(priv, capacity, num_wraps, + next_entry, size, mode, + pos, buf, bufsz); +#endif + return pos; +} + +/** + * iwl_alive_start - called after REPLY_ALIVE notification received + * from protocol/runtime uCode (initialization uCode's + * Alive gets handled by iwl_init_alive_start()). + */ +static void iwl_alive_start(struct iwl_priv *priv) +{ + int ret = 0; + + IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); + + if (priv->card_alive.is_valid != UCODE_VALID_OK) { + /* We had an error bringing up the hardware, so take it + * all the way back down so we can try again */ + IWL_DEBUG_INFO(priv, "Alive failed.\n"); + goto restart; + } + + /* Initialize uCode has loaded Runtime uCode ... verify inst image. + * This is a paranoid check, because we would not have gotten the + * "runtime" alive if code weren't properly loaded. */ + if (iwl_verify_ucode(priv)) { + /* Runtime instruction load was bad; + * take it all the way back down so we can try again */ + IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n"); + goto restart; + } + + iwl_clear_stations_table(priv); + ret = priv->cfg->ops->lib->alive_notify(priv); + if (ret) { + IWL_WARN(priv, + "Could not complete ALIVE transition [ntf]: %d\n", ret); + goto restart; + } + + /* After the ALIVE response, we can send host commands to the uCode */ + set_bit(STATUS_ALIVE, &priv->status); + + if (iwl_is_rfkill(priv)) + return; + + ieee80211_wake_queues(priv->hw); + + priv->active_rate = priv->rates_mask; + priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK; + + /* Configure Tx antenna selection based on H/W config */ + if (priv->cfg->ops->hcmd->set_tx_ant) + priv->cfg->ops->hcmd->set_tx_ant(priv, priv->cfg->valid_tx_ant); + + if (iwl_is_associated(priv)) { + struct iwl_rxon_cmd *active_rxon = + (struct iwl_rxon_cmd *)&priv->active_rxon; + /* apply any changes in staging */ + priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; + active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; + } else { + /* Initialize our rx_config data */ + iwl_connection_init_rx_config(priv, priv->iw_mode); + + if (priv->cfg->ops->hcmd->set_rxon_chain) + priv->cfg->ops->hcmd->set_rxon_chain(priv); + + memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN); + } + + /* Configure Bluetooth device coexistence support */ + iwl_send_bt_config(priv); + + iwl_reset_run_time_calib(priv); + + /* Configure the adapter for unassociated operation */ + iwlcore_commit_rxon(priv); + + /* At this point, the NIC is initialized and operational */ + iwl_rf_kill_ct_config(priv); + + iwl_leds_init(priv); + + IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); + set_bit(STATUS_READY, &priv->status); + wake_up_interruptible(&priv->wait_command_queue); + + iwl_power_update_mode(priv, true); + + /* reassociate for ADHOC mode */ + if (priv->vif && (priv->iw_mode == NL80211_IFTYPE_ADHOC)) { + struct sk_buff *beacon = ieee80211_beacon_get(priv->hw, + priv->vif); + if (beacon) + iwl_mac_beacon_update(priv->hw, beacon); + } + + + if (test_and_clear_bit(STATUS_MODE_PENDING, &priv->status)) + iwl_set_mode(priv, priv->iw_mode); + + return; + + restart: + queue_work(priv->workqueue, &priv->restart); +} + +static void iwl_cancel_deferred_work(struct iwl_priv *priv); + +static void __iwl_down(struct iwl_priv *priv) +{ + unsigned long flags; + int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status); + + IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n"); + + if (!exit_pending) + set_bit(STATUS_EXIT_PENDING, &priv->status); + + iwl_clear_stations_table(priv); + + /* Unblock any waiting calls */ + wake_up_interruptible_all(&priv->wait_command_queue); + + /* Wipe out the EXIT_PENDING status bit if we are not actually + * exiting the module */ + if (!exit_pending) + clear_bit(STATUS_EXIT_PENDING, &priv->status); + + /* stop and reset the on-board processor */ + iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); + + /* tell the device to stop sending interrupts */ + spin_lock_irqsave(&priv->lock, flags); + iwl_disable_interrupts(priv); + spin_unlock_irqrestore(&priv->lock, flags); + iwl_synchronize_irq(priv); + + if (priv->mac80211_registered) + ieee80211_stop_queues(priv->hw); + + /* If we have not previously called iwl_init() then + * clear all bits but the RF Kill bit and return */ + if (!iwl_is_init(priv)) { + priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) << + STATUS_RF_KILL_HW | + test_bit(STATUS_GEO_CONFIGURED, &priv->status) << + STATUS_GEO_CONFIGURED | + test_bit(STATUS_EXIT_PENDING, &priv->status) << + STATUS_EXIT_PENDING; + goto exit; + } + + /* ...otherwise clear out all the status bits but the RF Kill + * bit and continue taking the NIC down. */ + priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) << + STATUS_RF_KILL_HW | + test_bit(STATUS_GEO_CONFIGURED, &priv->status) << + STATUS_GEO_CONFIGURED | + test_bit(STATUS_FW_ERROR, &priv->status) << + STATUS_FW_ERROR | + test_bit(STATUS_EXIT_PENDING, &priv->status) << + STATUS_EXIT_PENDING; + + /* device going down, Stop using ICT table */ + iwl_disable_ict(priv); + + iwl_txq_ctx_stop(priv); + iwl_rxq_stop(priv); + + /* Power-down device's busmaster DMA clocks */ + iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); + udelay(5); + + /* Make sure (redundant) we've released our request to stay awake */ + iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + + /* Stop the device, and put it in low power state */ + priv->cfg->ops->lib->apm_ops.stop(priv); + + exit: + memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); + + if (priv->ibss_beacon) + dev_kfree_skb(priv->ibss_beacon); + priv->ibss_beacon = NULL; + + /* clear out any free frames */ + iwl_clear_free_frames(priv); +} + +static void iwl_down(struct iwl_priv *priv) +{ + mutex_lock(&priv->mutex); + __iwl_down(priv); + mutex_unlock(&priv->mutex); + + iwl_cancel_deferred_work(priv); +} + +#define HW_READY_TIMEOUT (50) + +static int iwl_set_hw_ready(struct iwl_priv *priv) +{ + int ret = 0; + + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); + + /* See if we got it */ + ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, + CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, + HW_READY_TIMEOUT); + if (ret != -ETIMEDOUT) + priv->hw_ready = true; + else + priv->hw_ready = false; + + IWL_DEBUG_INFO(priv, "hardware %s\n", + (priv->hw_ready == 1) ? "ready" : "not ready"); + return ret; +} + +static int iwl_prepare_card_hw(struct iwl_priv *priv) +{ + int ret = 0; + + IWL_DEBUG_INFO(priv, "iwl_prepare_card_hw enter \n"); + + ret = iwl_set_hw_ready(priv); + if (priv->hw_ready) + return ret; + + /* If HW is not ready, prepare the conditions to check again */ + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_PREPARE); + + ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, + ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, + CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000); + + /* HW should be ready by now, check again. */ + if (ret != -ETIMEDOUT) + iwl_set_hw_ready(priv); + + return ret; +} + +#define MAX_HW_RESTARTS 5 + +static int __iwl_up(struct iwl_priv *priv) +{ + int i; + int ret; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { + IWL_WARN(priv, "Exit pending; will not bring the NIC up\n"); + return -EIO; + } + + if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) { + IWL_ERR(priv, "ucode not available for device bringup\n"); + return -EIO; + } + + iwl_prepare_card_hw(priv); + + if (!priv->hw_ready) { + IWL_WARN(priv, "Exit HW not ready\n"); + return -EIO; + } + + /* If platform's RF_KILL switch is NOT set to KILL */ + if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) + clear_bit(STATUS_RF_KILL_HW, &priv->status); + else + set_bit(STATUS_RF_KILL_HW, &priv->status); + + if (iwl_is_rfkill(priv)) { + wiphy_rfkill_set_hw_state(priv->hw->wiphy, true); + + iwl_enable_interrupts(priv); + IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n"); + return 0; + } + + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + + ret = iwl_hw_nic_init(priv); + if (ret) { + IWL_ERR(priv, "Unable to init nic\n"); + return ret; + } + + /* make sure rfkill handshake bits are cleared */ + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + + /* clear (again), then enable host interrupts */ + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + iwl_enable_interrupts(priv); + + /* really make sure rfkill handshake bits are cleared */ + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + + /* Copy original ucode data image from disk into backup cache. + * This will be used to initialize the on-board processor's + * data SRAM for a clean start when the runtime program first loads. */ + memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr, + priv->ucode_data.len); + + for (i = 0; i < MAX_HW_RESTARTS; i++) { + + iwl_clear_stations_table(priv); + + /* load bootstrap state machine, + * load bootstrap program into processor's memory, + * prepare to load the "initialize" uCode */ + ret = priv->cfg->ops->lib->load_ucode(priv); + + if (ret) { + IWL_ERR(priv, "Unable to set up bootstrap uCode: %d\n", + ret); + continue; + } + + /* start card; "initialize" will load runtime ucode */ + iwl_nic_start(priv); + + IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n"); + + return 0; + } + + set_bit(STATUS_EXIT_PENDING, &priv->status); + __iwl_down(priv); + clear_bit(STATUS_EXIT_PENDING, &priv->status); + + /* tried to restart and config the device for as long as our + * patience could withstand */ + IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i); + return -EIO; +} + + +/***************************************************************************** + * + * Workqueue callbacks + * + *****************************************************************************/ + +static void iwl_bg_init_alive_start(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, init_alive_start.work); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + priv->cfg->ops->lib->init_alive_start(priv); + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_alive_start(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, alive_start.work); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + /* enable dram interrupt */ + iwl_reset_ict(priv); + + mutex_lock(&priv->mutex); + iwl_alive_start(priv); + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_run_time_calib_work(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, + run_time_calib_work); + + mutex_lock(&priv->mutex); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status) || + test_bit(STATUS_SCANNING, &priv->status)) { + mutex_unlock(&priv->mutex); + return; + } + + if (priv->start_calib) { + iwl_chain_noise_calibration(priv, &priv->statistics); + + iwl_sensitivity_calibration(priv, &priv->statistics); + } + + mutex_unlock(&priv->mutex); + return; +} + +static void iwl_bg_restart(struct work_struct *data) +{ + struct iwl_priv *priv = container_of(data, struct iwl_priv, restart); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) { + mutex_lock(&priv->mutex); + priv->vif = NULL; + priv->is_open = 0; + mutex_unlock(&priv->mutex); + iwl_down(priv); + ieee80211_restart_hw(priv->hw); + } else { + iwl_down(priv); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + __iwl_up(priv); + mutex_unlock(&priv->mutex); + } +} + +static void iwl_bg_rx_replenish(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, rx_replenish); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + iwl_rx_replenish(priv); + mutex_unlock(&priv->mutex); +} + +#define IWL_DELAY_NEXT_SCAN (HZ*2) + +void iwl_post_associate(struct iwl_priv *priv) +{ + struct ieee80211_conf *conf = NULL; + int ret = 0; + unsigned long flags; + + if (priv->iw_mode == NL80211_IFTYPE_AP) { + IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__); + return; + } + + IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n", + priv->assoc_id, priv->active_rxon.bssid_addr); + + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + + if (!priv->vif || !priv->is_open) + return; + + iwl_scan_cancel_timeout(priv, 200); + + conf = ieee80211_get_hw_conf(priv->hw); + + priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + iwlcore_commit_rxon(priv); + + iwl_setup_rxon_timing(priv); + ret = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING, + sizeof(priv->rxon_timing), &priv->rxon_timing); + if (ret) + IWL_WARN(priv, "REPLY_RXON_TIMING failed - " + "Attempting to continue.\n"); + + priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; + + iwl_set_rxon_ht(priv, &priv->current_ht_config); + + if (priv->cfg->ops->hcmd->set_rxon_chain) + priv->cfg->ops->hcmd->set_rxon_chain(priv); + + priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id); + + IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n", + priv->assoc_id, priv->beacon_int); + + if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE) + priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; + else + priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; + + if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { + if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME) + priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; + else + priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + + if (priv->iw_mode == NL80211_IFTYPE_ADHOC) + priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + + } + + iwlcore_commit_rxon(priv); + + switch (priv->iw_mode) { + case NL80211_IFTYPE_STATION: + break; + + case NL80211_IFTYPE_ADHOC: + + /* assume default assoc id */ + priv->assoc_id = 1; + + iwl_rxon_add_station(priv, priv->bssid, 0); + iwl_send_beacon_cmd(priv); + + break; + + default: + IWL_ERR(priv, "%s Should not be called in %d mode\n", + __func__, priv->iw_mode); + break; + } + + if (priv->iw_mode == NL80211_IFTYPE_ADHOC) + priv->assoc_station_added = 1; + + spin_lock_irqsave(&priv->lock, flags); + iwl_activate_qos(priv, 0); + spin_unlock_irqrestore(&priv->lock, flags); + + /* the chain noise calibration will enabled PM upon completion + * If chain noise has already been run, then we need to enable + * power management here */ + if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE) + iwl_power_update_mode(priv, false); + + /* Enable Rx differential gain and sensitivity calibrations */ + iwl_chain_noise_reset(priv); + priv->start_calib = 1; + +} + +/***************************************************************************** + * + * mac80211 entry point functions + * + *****************************************************************************/ + +#define UCODE_READY_TIMEOUT (4 * HZ) + +/* + * Not a mac80211 entry point function, but it fits in with all the + * other mac80211 functions grouped here. + */ +static int iwl_mac_setup_register(struct iwl_priv *priv) +{ + int ret; + struct ieee80211_hw *hw = priv->hw; + hw->rate_control_algorithm = "iwl-agn-rs"; + + /* Tell mac80211 our characteristics */ + hw->flags = IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_NOISE_DBM | + IEEE80211_HW_AMPDU_AGGREGATION | + IEEE80211_HW_SPECTRUM_MGMT; + + if (!priv->cfg->broken_powersave) + hw->flags |= IEEE80211_HW_SUPPORTS_PS | + IEEE80211_HW_SUPPORTS_DYNAMIC_PS; + + if (priv->cfg->sku & IWL_SKU_N) + hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | + IEEE80211_HW_SUPPORTS_STATIC_SMPS; + + hw->sta_data_size = sizeof(struct iwl_station_priv); + hw->wiphy->interface_modes = + BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_ADHOC); + + hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY | + WIPHY_FLAG_DISABLE_BEACON_HINTS; + + /* + * For now, disable PS by default because it affects + * RX performance significantly. + */ + hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; + + hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX + 1; + /* we create the 802.11 header and a zero-length SSID element */ + hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2; + + /* Default value; 4 EDCA QOS priorities */ + hw->queues = 4; + + hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; + + if (priv->bands[IEEE80211_BAND_2GHZ].n_channels) + priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = + &priv->bands[IEEE80211_BAND_2GHZ]; + if (priv->bands[IEEE80211_BAND_5GHZ].n_channels) + priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = + &priv->bands[IEEE80211_BAND_5GHZ]; + + ret = ieee80211_register_hw(priv->hw); + if (ret) { + IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); + return ret; + } + priv->mac80211_registered = 1; + + return 0; +} + + +static int iwl_mac_start(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + int ret; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + /* we should be verifying the device is ready to be opened */ + mutex_lock(&priv->mutex); + ret = __iwl_up(priv); + mutex_unlock(&priv->mutex); + + if (ret) + return ret; + + if (iwl_is_rfkill(priv)) + goto out; + + IWL_DEBUG_INFO(priv, "Start UP work done.\n"); + + /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from + * mac80211 will not be run successfully. */ + ret = wait_event_interruptible_timeout(priv->wait_command_queue, + test_bit(STATUS_READY, &priv->status), + UCODE_READY_TIMEOUT); + if (!ret) { + if (!test_bit(STATUS_READY, &priv->status)) { + IWL_ERR(priv, "START_ALIVE timeout after %dms.\n", + jiffies_to_msecs(UCODE_READY_TIMEOUT)); + return -ETIMEDOUT; + } + } + + iwl_led_start(priv); + +out: + priv->is_open = 1; + IWL_DEBUG_MAC80211(priv, "leave\n"); + return 0; +} + +static void iwl_mac_stop(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (!priv->is_open) + return; + + priv->is_open = 0; + + if (iwl_is_ready_rf(priv) || test_bit(STATUS_SCAN_HW, &priv->status)) { + /* stop mac, cancel any scan request and clear + * RXON_FILTER_ASSOC_MSK BIT + */ + mutex_lock(&priv->mutex); + iwl_scan_cancel_timeout(priv, 100); + mutex_unlock(&priv->mutex); + } + + iwl_down(priv); + + flush_workqueue(priv->workqueue); + + /* enable interrupts again in order to receive rfkill changes */ + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + iwl_enable_interrupts(priv); + + IWL_DEBUG_MAC80211(priv, "leave\n"); +} + +static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MACDUMP(priv, "enter\n"); + + IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, + ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); + + if (iwl_tx_skb(priv, skb)) + dev_kfree_skb_any(skb); + + IWL_DEBUG_MACDUMP(priv, "leave\n"); + return NETDEV_TX_OK; +} + +void iwl_config_ap(struct iwl_priv *priv) +{ + int ret = 0; + unsigned long flags; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + /* The following should be done only at AP bring up */ + if (!iwl_is_associated(priv)) { + + /* RXON - unassoc (to set timing command) */ + priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + iwlcore_commit_rxon(priv); + + /* RXON Timing */ + iwl_setup_rxon_timing(priv); + ret = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING, + sizeof(priv->rxon_timing), &priv->rxon_timing); + if (ret) + IWL_WARN(priv, "REPLY_RXON_TIMING failed - " + "Attempting to continue.\n"); + + /* AP has all antennas */ + priv->chain_noise_data.active_chains = + priv->hw_params.valid_rx_ant; + iwl_set_rxon_ht(priv, &priv->current_ht_config); + if (priv->cfg->ops->hcmd->set_rxon_chain) + priv->cfg->ops->hcmd->set_rxon_chain(priv); + + /* FIXME: what should be the assoc_id for AP? */ + priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id); + if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE) + priv->staging_rxon.flags |= + RXON_FLG_SHORT_PREAMBLE_MSK; + else + priv->staging_rxon.flags &= + ~RXON_FLG_SHORT_PREAMBLE_MSK; + + if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { + if (priv->assoc_capability & + WLAN_CAPABILITY_SHORT_SLOT_TIME) + priv->staging_rxon.flags |= + RXON_FLG_SHORT_SLOT_MSK; + else + priv->staging_rxon.flags &= + ~RXON_FLG_SHORT_SLOT_MSK; + + if (priv->iw_mode == NL80211_IFTYPE_ADHOC) + priv->staging_rxon.flags &= + ~RXON_FLG_SHORT_SLOT_MSK; + } + /* restore RXON assoc */ + priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; + iwlcore_commit_rxon(priv); + iwl_reset_qos(priv); + spin_lock_irqsave(&priv->lock, flags); + iwl_activate_qos(priv, 1); + spin_unlock_irqrestore(&priv->lock, flags); + iwl_add_bcast_station(priv); + } + iwl_send_beacon_cmd(priv); + + /* FIXME - we need to add code here to detect a totally new + * configuration, reset the AP, unassoc, rxon timing, assoc, + * clear sta table, add BCAST sta... */ +} + +static void iwl_mac_update_tkip_key(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta, + u32 iv32, u16 *phase1key) +{ + + struct iwl_priv *priv = hw->priv; + IWL_DEBUG_MAC80211(priv, "enter\n"); + + iwl_update_tkip_key(priv, keyconf, + sta ? sta->addr : iwl_bcast_addr, + iv32, phase1key); + + IWL_DEBUG_MAC80211(priv, "leave\n"); +} + +static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct iwl_priv *priv = hw->priv; + const u8 *addr; + int ret; + u8 sta_id; + bool is_default_wep_key = false; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (priv->cfg->mod_params->sw_crypto) { + IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n"); + return -EOPNOTSUPP; + } + addr = sta ? sta->addr : iwl_bcast_addr; + sta_id = iwl_find_station(priv, addr); + if (sta_id == IWL_INVALID_STATION) { + IWL_DEBUG_MAC80211(priv, "leave - %pM not in station map.\n", + addr); + return -EINVAL; + + } + + mutex_lock(&priv->mutex); + iwl_scan_cancel_timeout(priv, 100); + mutex_unlock(&priv->mutex); + + /* If we are getting WEP group key and we didn't receive any key mapping + * so far, we are in legacy wep mode (group key only), otherwise we are + * in 1X mode. + * In legacy wep mode, we use another host command to the uCode */ + if (key->alg == ALG_WEP && sta_id == priv->hw_params.bcast_sta_id && + priv->iw_mode != NL80211_IFTYPE_AP) { + if (cmd == SET_KEY) + is_default_wep_key = !priv->key_mapping_key; + else + is_default_wep_key = + (key->hw_key_idx == HW_KEY_DEFAULT); + } + + switch (cmd) { + case SET_KEY: + if (is_default_wep_key) + ret = iwl_set_default_wep_key(priv, key); + else + ret = iwl_set_dynamic_key(priv, key, sta_id); + + IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n"); + break; + case DISABLE_KEY: + if (is_default_wep_key) + ret = iwl_remove_default_wep_key(priv, key); + else + ret = iwl_remove_dynamic_key(priv, key, sta_id); + + IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n"); + break; + default: + ret = -EINVAL; + } + + IWL_DEBUG_MAC80211(priv, "leave\n"); + + return ret; +} + +static int iwl_mac_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, u16 *ssn) +{ + struct iwl_priv *priv = hw->priv; + int ret; + + IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n", + sta->addr, tid); + + if (!(priv->cfg->sku & IWL_SKU_N)) + return -EACCES; + + switch (action) { + case IEEE80211_AMPDU_RX_START: + IWL_DEBUG_HT(priv, "start Rx\n"); + return iwl_sta_rx_agg_start(priv, sta->addr, tid, *ssn); + case IEEE80211_AMPDU_RX_STOP: + IWL_DEBUG_HT(priv, "stop Rx\n"); + ret = iwl_sta_rx_agg_stop(priv, sta->addr, tid); + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return 0; + else + return ret; + case IEEE80211_AMPDU_TX_START: + IWL_DEBUG_HT(priv, "start Tx\n"); + return iwl_tx_agg_start(priv, sta->addr, tid, ssn); + case IEEE80211_AMPDU_TX_STOP: + IWL_DEBUG_HT(priv, "stop Tx\n"); + ret = iwl_tx_agg_stop(priv, sta->addr, tid); + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return 0; + else + return ret; + case IEEE80211_AMPDU_TX_OPERATIONAL: + /* do nothing */ + return -EOPNOTSUPP; + default: + IWL_DEBUG_HT(priv, "unknown\n"); + return -EINVAL; + break; + } + return 0; +} + +static int iwl_mac_get_stats(struct ieee80211_hw *hw, + struct ieee80211_low_level_stats *stats) +{ + struct iwl_priv *priv = hw->priv; + + priv = hw->priv; + IWL_DEBUG_MAC80211(priv, "enter\n"); + IWL_DEBUG_MAC80211(priv, "leave\n"); + + return 0; +} + +static void iwl_mac_sta_notify(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum sta_notify_cmd cmd, + struct ieee80211_sta *sta) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; + int sta_id; + + /* + * TODO: We really should use this callback to + * actually maintain the station table in + * the device. + */ + + switch (cmd) { + case STA_NOTIFY_ADD: + atomic_set(&sta_priv->pending_frames, 0); + if (vif->type == NL80211_IFTYPE_AP) + sta_priv->client = true; + break; + case STA_NOTIFY_SLEEP: + WARN_ON(!sta_priv->client); + sta_priv->asleep = true; + if (atomic_read(&sta_priv->pending_frames) > 0) + ieee80211_sta_block_awake(hw, sta, true); + break; + case STA_NOTIFY_AWAKE: + WARN_ON(!sta_priv->client); + if (!sta_priv->asleep) + break; + sta_priv->asleep = false; + sta_id = iwl_find_station(priv, sta->addr); + if (sta_id != IWL_INVALID_STATION) + iwl_sta_modify_ps_wake(priv, sta_id); + break; + default: + break; + } +} + +/***************************************************************************** + * + * sysfs attributes + * + *****************************************************************************/ + +#ifdef CONFIG_IWLWIFI_DEBUG + +/* + * The following adds a new attribute to the sysfs representation + * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/) + * used for controlling the debug level. + * + * See the level definitions in iwl for details. + * + * The debug_level being managed using sysfs below is a per device debug + * level that is used instead of the global debug level if it (the per + * device debug level) is set. + */ +static ssize_t show_debug_level(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + return sprintf(buf, "0x%08X\n", iwl_get_debug_level(priv)); +} +static ssize_t store_debug_level(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + unsigned long val; + int ret; + + ret = strict_strtoul(buf, 0, &val); + if (ret) + IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf); + else { + priv->debug_level = val; + if (iwl_alloc_traffic_mem(priv)) + IWL_ERR(priv, + "Not enough memory to generate traffic log\n"); + } + return strnlen(buf, count); +} + +static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, + show_debug_level, store_debug_level); + + +#endif /* CONFIG_IWLWIFI_DEBUG */ + + +static ssize_t show_temperature(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + + if (!iwl_is_alive(priv)) + return -EAGAIN; + + return sprintf(buf, "%d\n", priv->temperature); +} + +static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL); + +static ssize_t show_tx_power(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + + if (!iwl_is_ready_rf(priv)) + return sprintf(buf, "off\n"); + else + return sprintf(buf, "%d\n", priv->tx_power_user_lmt); +} + +static ssize_t store_tx_power(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + unsigned long val; + int ret; + + ret = strict_strtoul(buf, 10, &val); + if (ret) + IWL_INFO(priv, "%s is not in decimal form.\n", buf); + else { + ret = iwl_set_tx_power(priv, val, false); + if (ret) + IWL_ERR(priv, "failed setting tx power (0x%d).\n", + ret); + else + ret = count; + } + return ret; +} + +static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power); + +static ssize_t show_flags(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + + return sprintf(buf, "0x%04X\n", priv->active_rxon.flags); +} + +static ssize_t store_flags(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + unsigned long val; + u32 flags; + int ret = strict_strtoul(buf, 0, &val); + if (ret) + return ret; + flags = (u32)val; + + mutex_lock(&priv->mutex); + if (le32_to_cpu(priv->staging_rxon.flags) != flags) { + /* Cancel any currently running scans... */ + if (iwl_scan_cancel_timeout(priv, 100)) + IWL_WARN(priv, "Could not cancel scan.\n"); + else { + IWL_DEBUG_INFO(priv, "Commit rxon.flags = 0x%04X\n", flags); + priv->staging_rxon.flags = cpu_to_le32(flags); + iwlcore_commit_rxon(priv); + } + } + mutex_unlock(&priv->mutex); + + return count; +} + +static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags); + +static ssize_t show_filter_flags(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + + return sprintf(buf, "0x%04X\n", + le32_to_cpu(priv->active_rxon.filter_flags)); +} + +static ssize_t store_filter_flags(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + unsigned long val; + u32 filter_flags; + int ret = strict_strtoul(buf, 0, &val); + if (ret) + return ret; + filter_flags = (u32)val; + + mutex_lock(&priv->mutex); + if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) { + /* Cancel any currently running scans... */ + if (iwl_scan_cancel_timeout(priv, 100)) + IWL_WARN(priv, "Could not cancel scan.\n"); + else { + IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = " + "0x%04X\n", filter_flags); + priv->staging_rxon.filter_flags = + cpu_to_le32(filter_flags); + iwlcore_commit_rxon(priv); + } + } + mutex_unlock(&priv->mutex); + + return count; +} + +static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags, + store_filter_flags); + + +static ssize_t show_statistics(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + u32 size = sizeof(struct iwl_notif_statistics); + u32 len = 0, ofs = 0; + u8 *data = (u8 *)&priv->statistics; + int rc = 0; + + if (!iwl_is_alive(priv)) + return -EAGAIN; + + mutex_lock(&priv->mutex); + rc = iwl_send_statistics_request(priv, CMD_SYNC, false); + mutex_unlock(&priv->mutex); + + if (rc) { + len = sprintf(buf, + "Error sending statistics request: 0x%08X\n", rc); + return len; + } + + while (size && (PAGE_SIZE - len)) { + hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len, + PAGE_SIZE - len, 1); + len = strlen(buf); + if (PAGE_SIZE - len) + buf[len++] = '\n'; + + ofs += 16; + size -= min(size, 16U); + } + + return len; +} + +static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL); + +static ssize_t show_rts_ht_protection(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + + return sprintf(buf, "%s\n", + priv->cfg->use_rts_for_ht ? "RTS/CTS" : "CTS-to-self"); +} + +static ssize_t store_rts_ht_protection(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + unsigned long val; + int ret; + + ret = strict_strtoul(buf, 10, &val); + if (ret) + IWL_INFO(priv, "Input is not in decimal form.\n"); + else { + if (!iwl_is_associated(priv)) + priv->cfg->use_rts_for_ht = val ? true : false; + else + IWL_ERR(priv, "Sta associated with AP - " + "Change protection mechanism is not allowed\n"); + ret = count; + } + return ret; +} + +static DEVICE_ATTR(rts_ht_protection, S_IWUSR | S_IRUGO, + show_rts_ht_protection, store_rts_ht_protection); + + +/***************************************************************************** + * + * driver setup and teardown + * + *****************************************************************************/ + +static void iwl_setup_deferred_work(struct iwl_priv *priv) +{ + priv->workqueue = create_singlethread_workqueue(DRV_NAME); + + init_waitqueue_head(&priv->wait_command_queue); + + INIT_WORK(&priv->restart, iwl_bg_restart); + INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish); + INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update); + INIT_WORK(&priv->run_time_calib_work, iwl_bg_run_time_calib_work); + INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start); + INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start); + + iwl_setup_scan_deferred_work(priv); + + if (priv->cfg->ops->lib->setup_deferred_work) + priv->cfg->ops->lib->setup_deferred_work(priv); + + init_timer(&priv->statistics_periodic); + priv->statistics_periodic.data = (unsigned long)priv; + priv->statistics_periodic.function = iwl_bg_statistics_periodic; + + init_timer(&priv->ucode_trace); + priv->ucode_trace.data = (unsigned long)priv; + priv->ucode_trace.function = iwl_bg_ucode_trace; + + if (!priv->cfg->use_isr_legacy) + tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) + iwl_irq_tasklet, (unsigned long)priv); + else + tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) + iwl_irq_tasklet_legacy, (unsigned long)priv); +} + +static void iwl_cancel_deferred_work(struct iwl_priv *priv) +{ + if (priv->cfg->ops->lib->cancel_deferred_work) + priv->cfg->ops->lib->cancel_deferred_work(priv); + + cancel_delayed_work_sync(&priv->init_alive_start); + cancel_delayed_work(&priv->scan_check); + cancel_delayed_work(&priv->alive_start); + cancel_work_sync(&priv->beacon_update); + del_timer_sync(&priv->statistics_periodic); + del_timer_sync(&priv->ucode_trace); +} + +static void iwl_init_hw_rates(struct iwl_priv *priv, + struct ieee80211_rate *rates) +{ + int i; + + for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) { + rates[i].bitrate = iwl_rates[i].ieee * 5; + rates[i].hw_value = i; /* Rate scaling will work on indexes */ + rates[i].hw_value_short = i; + rates[i].flags = 0; + if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) { + /* + * If CCK != 1M then set short preamble rate flag. + */ + rates[i].flags |= + (iwl_rates[i].plcp == IWL_RATE_1M_PLCP) ? + 0 : IEEE80211_RATE_SHORT_PREAMBLE; + } + } +} + +static int iwl_init_drv(struct iwl_priv *priv) +{ + int ret; + + priv->ibss_beacon = NULL; + + spin_lock_init(&priv->sta_lock); + spin_lock_init(&priv->hcmd_lock); + + INIT_LIST_HEAD(&priv->free_frames); + + mutex_init(&priv->mutex); + mutex_init(&priv->sync_cmd_mutex); + + /* Clear the driver's (not device's) station table */ + iwl_clear_stations_table(priv); + + priv->ieee_channels = NULL; + priv->ieee_rates = NULL; + priv->band = IEEE80211_BAND_2GHZ; + + priv->iw_mode = NL80211_IFTYPE_STATION; + priv->current_ht_config.smps = IEEE80211_SMPS_STATIC; + priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF; + + /* initialize force reset */ + priv->force_reset[IWL_RF_RESET].reset_duration = + IWL_DELAY_NEXT_FORCE_RF_RESET; + priv->force_reset[IWL_FW_RESET].reset_duration = + IWL_DELAY_NEXT_FORCE_FW_RELOAD; + + /* Choose which receivers/antennas to use */ + if (priv->cfg->ops->hcmd->set_rxon_chain) + priv->cfg->ops->hcmd->set_rxon_chain(priv); + + iwl_init_scan_params(priv); + + iwl_reset_qos(priv); + + priv->qos_data.qos_active = 0; + priv->qos_data.qos_cap.val = 0; + + priv->rates_mask = IWL_RATES_MASK; + /* Set the tx_power_user_lmt to the lowest power level + * this value will get overwritten by channel max power avg + * from eeprom */ + priv->tx_power_user_lmt = IWL_TX_POWER_TARGET_POWER_MIN; + + ret = iwl_init_channel_map(priv); + if (ret) { + IWL_ERR(priv, "initializing regulatory failed: %d\n", ret); + goto err; + } + + ret = iwlcore_init_geos(priv); + if (ret) { + IWL_ERR(priv, "initializing geos failed: %d\n", ret); + goto err_free_channel_map; + } + iwl_init_hw_rates(priv, priv->ieee_rates); + + return 0; + +err_free_channel_map: + iwl_free_channel_map(priv); +err: + return ret; +} + +static void iwl_uninit_drv(struct iwl_priv *priv) +{ + iwl_calib_free_results(priv); + iwlcore_free_geos(priv); + iwl_free_channel_map(priv); + kfree(priv->scan); +} + +static struct attribute *iwl_sysfs_entries[] = { + &dev_attr_flags.attr, + &dev_attr_filter_flags.attr, + &dev_attr_statistics.attr, + &dev_attr_temperature.attr, + &dev_attr_tx_power.attr, + &dev_attr_rts_ht_protection.attr, +#ifdef CONFIG_IWLWIFI_DEBUG + &dev_attr_debug_level.attr, +#endif + NULL +}; + +static struct attribute_group iwl_attribute_group = { + .name = NULL, /* put in device directory */ + .attrs = iwl_sysfs_entries, +}; + +static struct ieee80211_ops iwl_hw_ops = { + .tx = iwl_mac_tx, + .start = iwl_mac_start, + .stop = iwl_mac_stop, + .add_interface = iwl_mac_add_interface, + .remove_interface = iwl_mac_remove_interface, + .config = iwl_mac_config, + .configure_filter = iwl_configure_filter, + .set_key = iwl_mac_set_key, + .update_tkip_key = iwl_mac_update_tkip_key, + .get_stats = iwl_mac_get_stats, + .conf_tx = iwl_mac_conf_tx, + .reset_tsf = iwl_mac_reset_tsf, + .bss_info_changed = iwl_bss_info_changed, + .ampdu_action = iwl_mac_ampdu_action, + .hw_scan = iwl_mac_hw_scan, + .sta_notify = iwl_mac_sta_notify, +}; + +static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + int err = 0; + struct iwl_priv *priv; + struct ieee80211_hw *hw; + struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); + unsigned long flags; + u16 pci_cmd; + + /************************ + * 1. Allocating HW data + ************************/ + + /* Disabling hardware scan means that mac80211 will perform scans + * "the hard way", rather than using device's scan. */ + if (cfg->mod_params->disable_hw_scan) { + if (iwl_debug_level & IWL_DL_INFO) + dev_printk(KERN_DEBUG, &(pdev->dev), + "Disabling hw_scan\n"); + iwl_hw_ops.hw_scan = NULL; + } + + hw = iwl_alloc_all(cfg, &iwl_hw_ops); + if (!hw) { + err = -ENOMEM; + goto out; + } + priv = hw->priv; + /* At this point both hw and priv are allocated. */ + + SET_IEEE80211_DEV(hw, &pdev->dev); + + IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n"); + priv->cfg = cfg; + priv->pci_dev = pdev; + priv->inta_mask = CSR_INI_SET_MASK; + +#ifdef CONFIG_IWLWIFI_DEBUG + atomic_set(&priv->restrict_refcnt, 0); +#endif + if (iwl_alloc_traffic_mem(priv)) + IWL_ERR(priv, "Not enough memory to generate traffic log\n"); + + /************************** + * 2. Initializing PCI bus + **************************/ + if (pci_enable_device(pdev)) { + err = -ENODEV; + goto out_ieee80211_free_hw; + } + + pci_set_master(pdev); + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); + if (!err) + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); + if (err) { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (!err) + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + /* both attempts failed: */ + if (err) { + IWL_WARN(priv, "No suitable DMA available.\n"); + goto out_pci_disable_device; + } + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) + goto out_pci_disable_device; + + pci_set_drvdata(pdev, priv); + + + /*********************** + * 3. Read REV register + ***********************/ + priv->hw_base = pci_iomap(pdev, 0, 0); + if (!priv->hw_base) { + err = -ENODEV; + goto out_pci_release_regions; + } + + IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n", + (unsigned long long) pci_resource_len(pdev, 0)); + IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base); + + /* these spin locks will be used in apm_ops.init and EEPROM access + * we should init now + */ + spin_lock_init(&priv->reg_lock); + spin_lock_init(&priv->lock); + + /* + * stop and reset the on-board processor just in case it is in a + * strange state ... like being left stranded by a primary kernel + * and this is now the kdump kernel trying to start up + */ + iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); + + iwl_hw_detect(priv); + IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s REV=0x%X\n", + priv->cfg->name, priv->hw_rev); + + /* We disable the RETRY_TIMEOUT register (0x41) to keep + * PCI Tx retries from interfering with C3 CPU state */ + pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); + + iwl_prepare_card_hw(priv); + if (!priv->hw_ready) { + IWL_WARN(priv, "Failed, HW not ready\n"); + goto out_iounmap; + } + + /***************** + * 4. Read EEPROM + *****************/ + /* Read the EEPROM */ + err = iwl_eeprom_init(priv); + if (err) { + IWL_ERR(priv, "Unable to init EEPROM\n"); + goto out_iounmap; + } + err = iwl_eeprom_check_version(priv); + if (err) + goto out_free_eeprom; + + /* extract MAC Address */ + iwl_eeprom_get_mac(priv, priv->mac_addr); + IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->mac_addr); + SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr); + + /************************ + * 5. Setup HW constants + ************************/ + if (iwl_set_hw_params(priv)) { + IWL_ERR(priv, "failed to set hw parameters\n"); + goto out_free_eeprom; + } + + /******************* + * 6. Setup priv + *******************/ + + err = iwl_init_drv(priv); + if (err) + goto out_free_eeprom; + /* At this point both hw and priv are initialized. */ + + /******************** + * 7. Setup services + ********************/ + spin_lock_irqsave(&priv->lock, flags); + iwl_disable_interrupts(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + pci_enable_msi(priv->pci_dev); + + iwl_alloc_isr_ict(priv); + err = request_irq(priv->pci_dev->irq, priv->cfg->ops->lib->isr, + IRQF_SHARED, DRV_NAME, priv); + if (err) { + IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq); + goto out_disable_msi; + } + err = sysfs_create_group(&pdev->dev.kobj, &iwl_attribute_group); + if (err) { + IWL_ERR(priv, "failed to create sysfs device attributes\n"); + goto out_free_irq; + } + + iwl_setup_deferred_work(priv); + iwl_setup_rx_handlers(priv); + + /********************************************* + * 8. Enable interrupts and read RFKILL state + *********************************************/ + + /* enable interrupts if needed: hw bug w/a */ + pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd); + if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { + pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; + pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd); + } + + iwl_enable_interrupts(priv); + + /* If platform's RF_KILL switch is NOT set to KILL */ + if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) + clear_bit(STATUS_RF_KILL_HW, &priv->status); + else + set_bit(STATUS_RF_KILL_HW, &priv->status); + + wiphy_rfkill_set_hw_state(priv->hw->wiphy, + test_bit(STATUS_RF_KILL_HW, &priv->status)); + + iwl_power_initialize(priv); + iwl_tt_initialize(priv); + + err = iwl_request_firmware(priv, true); + if (err) + goto out_remove_sysfs; + + return 0; + + out_remove_sysfs: + destroy_workqueue(priv->workqueue); + priv->workqueue = NULL; + sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group); + out_free_irq: + free_irq(priv->pci_dev->irq, priv); + iwl_free_isr_ict(priv); + out_disable_msi: + pci_disable_msi(priv->pci_dev); + iwl_uninit_drv(priv); + out_free_eeprom: + iwl_eeprom_free(priv); + out_iounmap: + pci_iounmap(pdev, priv->hw_base); + out_pci_release_regions: + pci_set_drvdata(pdev, NULL); + pci_release_regions(pdev); + out_pci_disable_device: + pci_disable_device(pdev); + out_ieee80211_free_hw: + iwl_free_traffic_mem(priv); + ieee80211_free_hw(priv->hw); + out: + return err; +} + +static void __devexit iwl_pci_remove(struct pci_dev *pdev) +{ + struct iwl_priv *priv = pci_get_drvdata(pdev); + unsigned long flags; + + if (!priv) + return; + + IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n"); + + iwl_dbgfs_unregister(priv); + sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group); + + /* ieee80211_unregister_hw call wil cause iwl_mac_stop to + * to be called and iwl_down since we are removing the device + * we need to set STATUS_EXIT_PENDING bit. + */ + set_bit(STATUS_EXIT_PENDING, &priv->status); + if (priv->mac80211_registered) { + ieee80211_unregister_hw(priv->hw); + priv->mac80211_registered = 0; + } else { + iwl_down(priv); + } + + /* + * Make sure device is reset to low power before unloading driver. + * This may be redundant with iwl_down(), but there are paths to + * run iwl_down() without calling apm_ops.stop(), and there are + * paths to avoid running iwl_down() at all before leaving driver. + * This (inexpensive) call *makes sure* device is reset. + */ + priv->cfg->ops->lib->apm_ops.stop(priv); + + iwl_tt_exit(priv); + + /* make sure we flush any pending irq or + * tasklet for the driver + */ + spin_lock_irqsave(&priv->lock, flags); + iwl_disable_interrupts(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + iwl_synchronize_irq(priv); + + iwl_dealloc_ucode_pci(priv); + + if (priv->rxq.bd) + iwl_rx_queue_free(priv, &priv->rxq); + iwl_hw_txq_ctx_free(priv); + + iwl_clear_stations_table(priv); + iwl_eeprom_free(priv); + + + /*netif_stop_queue(dev); */ + flush_workqueue(priv->workqueue); + + /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes + * priv->workqueue... so we can't take down the workqueue + * until now... */ + destroy_workqueue(priv->workqueue); + priv->workqueue = NULL; + iwl_free_traffic_mem(priv); + + free_irq(priv->pci_dev->irq, priv); + pci_disable_msi(priv->pci_dev); + pci_iounmap(pdev, priv->hw_base); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + + iwl_uninit_drv(priv); + + iwl_free_isr_ict(priv); + + if (priv->ibss_beacon) + dev_kfree_skb(priv->ibss_beacon); + + ieee80211_free_hw(priv->hw); +} + + +/***************************************************************************** + * + * driver and module entry point + * + *****************************************************************************/ + +/* Hardware specific file defines the PCI IDs table for that hardware module */ +static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { +#ifdef CONFIG_IWL4965 + {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)}, + {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)}, +#endif /* CONFIG_IWL4965 */ +#ifdef CONFIG_IWL5000 +/* 5100 Series WiFi */ + {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1304, iwl5100_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1205, iwl5100_bgn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1305, iwl5100_bgn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1206, iwl5100_abg_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1306, iwl5100_abg_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1221, iwl5100_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1321, iwl5100_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1224, iwl5100_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1324, iwl5100_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1225, iwl5100_bgn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1325, iwl5100_bgn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1226, iwl5100_abg_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1326, iwl5100_abg_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1211, iwl5100_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1311, iwl5100_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1214, iwl5100_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1314, iwl5100_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1215, iwl5100_bgn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1315, iwl5100_bgn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1216, iwl5100_abg_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1316, iwl5100_abg_cfg)}, /* Half Mini Card */ + +/* 5300 Series WiFi */ + {IWL_PCI_DEVICE(0x4235, 0x1021, iwl5300_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1121, iwl5300_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1024, iwl5300_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1124, iwl5300_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1001, iwl5300_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1101, iwl5300_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1004, iwl5300_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1104, iwl5300_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4236, 0x1011, iwl5300_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4236, 0x1111, iwl5300_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4236, 0x1014, iwl5300_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4236, 0x1114, iwl5300_agn_cfg)}, /* Half Mini Card */ + +/* 5350 Series WiFi/WiMax */ + {IWL_PCI_DEVICE(0x423A, 0x1001, iwl5350_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423A, 0x1021, iwl5350_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423B, 0x1011, iwl5350_agn_cfg)}, /* Mini Card */ + +/* 5150 Series Wifi/WiMax */ + {IWL_PCI_DEVICE(0x423C, 0x1201, iwl5150_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423C, 0x1301, iwl5150_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x423C, 0x1206, iwl5150_abg_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */ + + {IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x423D, 0x1216, iwl5150_abg_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423D, 0x1316, iwl5150_abg_cfg)}, /* Half Mini Card */ + +/* 6x00 Series */ + {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)}, + {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)}, + {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)}, + {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)}, + {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)}, + {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)}, + {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)}, + {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)}, + {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)}, + {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)}, + +/* 6x50 WiFi/WiMax Series */ + {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0087, 0x1306, iwl6050_2abg_cfg)}, + {IWL_PCI_DEVICE(0x0087, 0x1321, iwl6050_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0087, 0x1326, iwl6050_2abg_cfg)}, + {IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_2abg_cfg)}, + +/* 1000 Series WiFi */ + {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1305, iwl1000_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1225, iwl1000_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1325, iwl1000_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0084, 0x1215, iwl1000_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0084, 0x1315, iwl1000_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1206, iwl1000_bg_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1306, iwl1000_bg_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1226, iwl1000_bg_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1326, iwl1000_bg_cfg)}, + {IWL_PCI_DEVICE(0x0084, 0x1216, iwl1000_bg_cfg)}, + {IWL_PCI_DEVICE(0x0084, 0x1316, iwl1000_bg_cfg)}, +#endif /* CONFIG_IWL5000 */ + + {0} +}; +MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); + +static struct pci_driver iwl_driver = { + .name = DRV_NAME, + .id_table = iwl_hw_card_ids, + .probe = iwl_pci_probe, + .remove = __devexit_p(iwl_pci_remove), +#ifdef CONFIG_PM + .suspend = iwl_pci_suspend, + .resume = iwl_pci_resume, +#endif +}; + +static int __init iwl_init(void) +{ + + int ret; + printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n"); + printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n"); + + ret = iwlagn_rate_control_register(); + if (ret) { + printk(KERN_ERR DRV_NAME + "Unable to register rate control algorithm: %d\n", ret); + return ret; + } + + ret = pci_register_driver(&iwl_driver); + if (ret) { + printk(KERN_ERR DRV_NAME "Unable to initialize PCI module\n"); + goto error_register; + } + + return ret; + +error_register: + iwlagn_rate_control_unregister(); + return ret; +} + +static void __exit iwl_exit(void) +{ + pci_unregister_driver(&iwl_driver); + iwlagn_rate_control_unregister(); +} + +module_exit(iwl_exit); +module_init(iwl_init); + +#ifdef CONFIG_IWLWIFI_DEBUG +module_param_named(debug50, iwl_debug_level, uint, S_IRUGO); +MODULE_PARM_DESC(debug50, "50XX debug output mask (deprecated)"); +module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug, "debug output mask"); +#endif + diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c new file mode 100644 index 0000000..845831a --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-calib.c @@ -0,0 +1,901 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#include + +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-calib.h" + +/***************************************************************************** + * INIT calibrations framework + *****************************************************************************/ + +struct statistics_general_data { + u32 beacon_silence_rssi_a; + u32 beacon_silence_rssi_b; + u32 beacon_silence_rssi_c; + u32 beacon_energy_a; + u32 beacon_energy_b; + u32 beacon_energy_c; +}; + +int iwl_send_calib_results(struct iwl_priv *priv) +{ + int ret = 0; + int i = 0; + + struct iwl_host_cmd hcmd = { + .id = REPLY_PHY_CALIBRATION_CMD, + .flags = CMD_SIZE_HUGE, + }; + + for (i = 0; i < IWL_CALIB_MAX; i++) { + if ((BIT(i) & priv->hw_params.calib_init_cfg) && + priv->calib_results[i].buf) { + hcmd.len = priv->calib_results[i].buf_len; + hcmd.data = priv->calib_results[i].buf; + ret = iwl_send_cmd_sync(priv, &hcmd); + if (ret) + goto err; + } + } + + return 0; +err: + IWL_ERR(priv, "Error %d iteration %d\n", ret, i); + return ret; +} +EXPORT_SYMBOL(iwl_send_calib_results); + +int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len) +{ + if (res->buf_len != len) { + kfree(res->buf); + res->buf = kzalloc(len, GFP_ATOMIC); + } + if (unlikely(res->buf == NULL)) + return -ENOMEM; + + res->buf_len = len; + memcpy(res->buf, buf, len); + return 0; +} +EXPORT_SYMBOL(iwl_calib_set); + +void iwl_calib_free_results(struct iwl_priv *priv) +{ + int i; + + for (i = 0; i < IWL_CALIB_MAX; i++) { + kfree(priv->calib_results[i].buf); + priv->calib_results[i].buf = NULL; + priv->calib_results[i].buf_len = 0; + } +} +EXPORT_SYMBOL(iwl_calib_free_results); + +/***************************************************************************** + * RUNTIME calibrations framework + *****************************************************************************/ + +/* "false alarms" are signals that our DSP tries to lock onto, + * but then determines that they are either noise, or transmissions + * from a distant wireless network (also "noise", really) that get + * "stepped on" by stronger transmissions within our own network. + * This algorithm attempts to set a sensitivity level that is high + * enough to receive all of our own network traffic, but not so + * high that our DSP gets too busy trying to lock onto non-network + * activity/noise. */ +static int iwl_sens_energy_cck(struct iwl_priv *priv, + u32 norm_fa, + u32 rx_enable_time, + struct statistics_general_data *rx_info) +{ + u32 max_nrg_cck = 0; + int i = 0; + u8 max_silence_rssi = 0; + u32 silence_ref = 0; + u8 silence_rssi_a = 0; + u8 silence_rssi_b = 0; + u8 silence_rssi_c = 0; + u32 val; + + /* "false_alarms" values below are cross-multiplications to assess the + * numbers of false alarms within the measured period of actual Rx + * (Rx is off when we're txing), vs the min/max expected false alarms + * (some should be expected if rx is sensitive enough) in a + * hypothetical listening period of 200 time units (TU), 204.8 msec: + * + * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time + * + * */ + u32 false_alarms = norm_fa * 200 * 1024; + u32 max_false_alarms = MAX_FA_CCK * rx_enable_time; + u32 min_false_alarms = MIN_FA_CCK * rx_enable_time; + struct iwl_sensitivity_data *data = NULL; + const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens; + + data = &(priv->sensitivity_data); + + data->nrg_auto_corr_silence_diff = 0; + + /* Find max silence rssi among all 3 receivers. + * This is background noise, which may include transmissions from other + * networks, measured during silence before our network's beacon */ + silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a & + ALL_BAND_FILTER) >> 8); + silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b & + ALL_BAND_FILTER) >> 8); + silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c & + ALL_BAND_FILTER) >> 8); + + val = max(silence_rssi_b, silence_rssi_c); + max_silence_rssi = max(silence_rssi_a, (u8) val); + + /* Store silence rssi in 20-beacon history table */ + data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi; + data->nrg_silence_idx++; + if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L) + data->nrg_silence_idx = 0; + + /* Find max silence rssi across 20 beacon history */ + for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) { + val = data->nrg_silence_rssi[i]; + silence_ref = max(silence_ref, val); + } + IWL_DEBUG_CALIB(priv, "silence a %u, b %u, c %u, 20-bcn max %u\n", + silence_rssi_a, silence_rssi_b, silence_rssi_c, + silence_ref); + + /* Find max rx energy (min value!) among all 3 receivers, + * measured during beacon frame. + * Save it in 10-beacon history table. */ + i = data->nrg_energy_idx; + val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c); + data->nrg_value[i] = min(rx_info->beacon_energy_a, val); + + data->nrg_energy_idx++; + if (data->nrg_energy_idx >= 10) + data->nrg_energy_idx = 0; + + /* Find min rx energy (max value) across 10 beacon history. + * This is the minimum signal level that we want to receive well. + * Add backoff (margin so we don't miss slightly lower energy frames). + * This establishes an upper bound (min value) for energy threshold. */ + max_nrg_cck = data->nrg_value[0]; + for (i = 1; i < 10; i++) + max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i])); + max_nrg_cck += 6; + + IWL_DEBUG_CALIB(priv, "rx energy a %u, b %u, c %u, 10-bcn max/min %u\n", + rx_info->beacon_energy_a, rx_info->beacon_energy_b, + rx_info->beacon_energy_c, max_nrg_cck - 6); + + /* Count number of consecutive beacons with fewer-than-desired + * false alarms. */ + if (false_alarms < min_false_alarms) + data->num_in_cck_no_fa++; + else + data->num_in_cck_no_fa = 0; + IWL_DEBUG_CALIB(priv, "consecutive bcns with few false alarms = %u\n", + data->num_in_cck_no_fa); + + /* If we got too many false alarms this time, reduce sensitivity */ + if ((false_alarms > max_false_alarms) && + (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK)) { + IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u\n", + false_alarms, max_false_alarms); + IWL_DEBUG_CALIB(priv, "... reducing sensitivity\n"); + data->nrg_curr_state = IWL_FA_TOO_MANY; + /* Store for "fewer than desired" on later beacon */ + data->nrg_silence_ref = silence_ref; + + /* increase energy threshold (reduce nrg value) + * to decrease sensitivity */ + data->nrg_th_cck = data->nrg_th_cck - NRG_STEP_CCK; + /* Else if we got fewer than desired, increase sensitivity */ + } else if (false_alarms < min_false_alarms) { + data->nrg_curr_state = IWL_FA_TOO_FEW; + + /* Compare silence level with silence level for most recent + * healthy number or too many false alarms */ + data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref - + (s32)silence_ref; + + IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u, silence diff %d\n", + false_alarms, min_false_alarms, + data->nrg_auto_corr_silence_diff); + + /* Increase value to increase sensitivity, but only if: + * 1a) previous beacon did *not* have *too many* false alarms + * 1b) AND there's a significant difference in Rx levels + * from a previous beacon with too many, or healthy # FAs + * OR 2) We've seen a lot of beacons (100) with too few + * false alarms */ + if ((data->nrg_prev_state != IWL_FA_TOO_MANY) && + ((data->nrg_auto_corr_silence_diff > NRG_DIFF) || + (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) { + + IWL_DEBUG_CALIB(priv, "... increasing sensitivity\n"); + /* Increase nrg value to increase sensitivity */ + val = data->nrg_th_cck + NRG_STEP_CCK; + data->nrg_th_cck = min((u32)ranges->min_nrg_cck, val); + } else { + IWL_DEBUG_CALIB(priv, "... but not changing sensitivity\n"); + } + + /* Else we got a healthy number of false alarms, keep status quo */ + } else { + IWL_DEBUG_CALIB(priv, " FA in safe zone\n"); + data->nrg_curr_state = IWL_FA_GOOD_RANGE; + + /* Store for use in "fewer than desired" with later beacon */ + data->nrg_silence_ref = silence_ref; + + /* If previous beacon had too many false alarms, + * give it some extra margin by reducing sensitivity again + * (but don't go below measured energy of desired Rx) */ + if (IWL_FA_TOO_MANY == data->nrg_prev_state) { + IWL_DEBUG_CALIB(priv, "... increasing margin\n"); + if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN)) + data->nrg_th_cck -= NRG_MARGIN; + else + data->nrg_th_cck = max_nrg_cck; + } + } + + /* Make sure the energy threshold does not go above the measured + * energy of the desired Rx signals (reduced by backoff margin), + * or else we might start missing Rx frames. + * Lower value is higher energy, so we use max()! + */ + data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck); + IWL_DEBUG_CALIB(priv, "new nrg_th_cck %u\n", data->nrg_th_cck); + + data->nrg_prev_state = data->nrg_curr_state; + + /* Auto-correlation CCK algorithm */ + if (false_alarms > min_false_alarms) { + + /* increase auto_corr values to decrease sensitivity + * so the DSP won't be disturbed by the noise + */ + if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK) + data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1; + else { + val = data->auto_corr_cck + AUTO_CORR_STEP_CCK; + data->auto_corr_cck = + min((u32)ranges->auto_corr_max_cck, val); + } + val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK; + data->auto_corr_cck_mrc = + min((u32)ranges->auto_corr_max_cck_mrc, val); + } else if ((false_alarms < min_false_alarms) && + ((data->nrg_auto_corr_silence_diff > NRG_DIFF) || + (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) { + + /* Decrease auto_corr values to increase sensitivity */ + val = data->auto_corr_cck - AUTO_CORR_STEP_CCK; + data->auto_corr_cck = + max((u32)ranges->auto_corr_min_cck, val); + val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK; + data->auto_corr_cck_mrc = + max((u32)ranges->auto_corr_min_cck_mrc, val); + } + + return 0; +} + + +static int iwl_sens_auto_corr_ofdm(struct iwl_priv *priv, + u32 norm_fa, + u32 rx_enable_time) +{ + u32 val; + u32 false_alarms = norm_fa * 200 * 1024; + u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time; + u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time; + struct iwl_sensitivity_data *data = NULL; + const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens; + + data = &(priv->sensitivity_data); + + /* If we got too many false alarms this time, reduce sensitivity */ + if (false_alarms > max_false_alarms) { + + IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u)\n", + false_alarms, max_false_alarms); + + val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm = + min((u32)ranges->auto_corr_max_ofdm, val); + + val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm_mrc = + min((u32)ranges->auto_corr_max_ofdm_mrc, val); + + val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm_x1 = + min((u32)ranges->auto_corr_max_ofdm_x1, val); + + val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm_mrc_x1 = + min((u32)ranges->auto_corr_max_ofdm_mrc_x1, val); + } + + /* Else if we got fewer than desired, increase sensitivity */ + else if (false_alarms < min_false_alarms) { + + IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u\n", + false_alarms, min_false_alarms); + + val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm = + max((u32)ranges->auto_corr_min_ofdm, val); + + val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm_mrc = + max((u32)ranges->auto_corr_min_ofdm_mrc, val); + + val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm_x1 = + max((u32)ranges->auto_corr_min_ofdm_x1, val); + + val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm_mrc_x1 = + max((u32)ranges->auto_corr_min_ofdm_mrc_x1, val); + } else { + IWL_DEBUG_CALIB(priv, "min FA %u < norm FA %u < max FA %u OK\n", + min_false_alarms, false_alarms, max_false_alarms); + } + return 0; +} + +/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */ +static int iwl_sensitivity_write(struct iwl_priv *priv) +{ + struct iwl_sensitivity_cmd cmd ; + struct iwl_sensitivity_data *data = NULL; + struct iwl_host_cmd cmd_out = { + .id = SENSITIVITY_CMD, + .len = sizeof(struct iwl_sensitivity_cmd), + .flags = CMD_ASYNC, + .data = &cmd, + }; + + data = &(priv->sensitivity_data); + + memset(&cmd, 0, sizeof(cmd)); + + cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] = + cpu_to_le16((u16)data->auto_corr_ofdm); + cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] = + cpu_to_le16((u16)data->auto_corr_ofdm_mrc); + cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] = + cpu_to_le16((u16)data->auto_corr_ofdm_x1); + cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] = + cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1); + + cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] = + cpu_to_le16((u16)data->auto_corr_cck); + cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] = + cpu_to_le16((u16)data->auto_corr_cck_mrc); + + cmd.table[HD_MIN_ENERGY_CCK_DET_INDEX] = + cpu_to_le16((u16)data->nrg_th_cck); + cmd.table[HD_MIN_ENERGY_OFDM_DET_INDEX] = + cpu_to_le16((u16)data->nrg_th_ofdm); + + cmd.table[HD_BARKER_CORR_TH_ADD_MIN_INDEX] = + cpu_to_le16(data->barker_corr_th_min); + cmd.table[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] = + cpu_to_le16(data->barker_corr_th_min_mrc); + cmd.table[HD_OFDM_ENERGY_TH_IN_INDEX] = + cpu_to_le16(data->nrg_th_cca); + + IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n", + data->auto_corr_ofdm, data->auto_corr_ofdm_mrc, + data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1, + data->nrg_th_ofdm); + + IWL_DEBUG_CALIB(priv, "cck: ac %u mrc %u thresh %u\n", + data->auto_corr_cck, data->auto_corr_cck_mrc, + data->nrg_th_cck); + + /* Update uCode's "work" table, and copy it to DSP */ + cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE; + + /* Don't send command to uCode if nothing has changed */ + if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]), + sizeof(u16)*HD_TABLE_SIZE)) { + IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n"); + return 0; + } + + /* Copy table for comparison next time */ + memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]), + sizeof(u16)*HD_TABLE_SIZE); + + return iwl_send_cmd(priv, &cmd_out); +} + +void iwl_init_sensitivity(struct iwl_priv *priv) +{ + int ret = 0; + int i; + struct iwl_sensitivity_data *data = NULL; + const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens; + + if (priv->disable_sens_cal) + return; + + IWL_DEBUG_CALIB(priv, "Start iwl_init_sensitivity\n"); + + /* Clear driver's sensitivity algo data */ + data = &(priv->sensitivity_data); + + if (ranges == NULL) + return; + + memset(data, 0, sizeof(struct iwl_sensitivity_data)); + + data->num_in_cck_no_fa = 0; + data->nrg_curr_state = IWL_FA_TOO_MANY; + data->nrg_prev_state = IWL_FA_TOO_MANY; + data->nrg_silence_ref = 0; + data->nrg_silence_idx = 0; + data->nrg_energy_idx = 0; + + for (i = 0; i < 10; i++) + data->nrg_value[i] = 0; + + for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) + data->nrg_silence_rssi[i] = 0; + + data->auto_corr_ofdm = ranges->auto_corr_min_ofdm; + data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc; + data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1; + data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1; + data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF; + data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc; + data->nrg_th_cck = ranges->nrg_th_cck; + data->nrg_th_ofdm = ranges->nrg_th_ofdm; + data->barker_corr_th_min = ranges->barker_corr_th_min; + data->barker_corr_th_min_mrc = ranges->barker_corr_th_min_mrc; + data->nrg_th_cca = ranges->nrg_th_cca; + + data->last_bad_plcp_cnt_ofdm = 0; + data->last_fa_cnt_ofdm = 0; + data->last_bad_plcp_cnt_cck = 0; + data->last_fa_cnt_cck = 0; + + ret |= iwl_sensitivity_write(priv); + IWL_DEBUG_CALIB(priv, "<rx.general); + struct statistics_rx *statistics = &(resp->rx); + unsigned long flags; + struct statistics_general_data statis; + + if (priv->disable_sens_cal) + return; + + data = &(priv->sensitivity_data); + + if (!iwl_is_associated(priv)) { + IWL_DEBUG_CALIB(priv, "<< - not associated\n"); + return; + } + + spin_lock_irqsave(&priv->lock, flags); + if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { + IWL_DEBUG_CALIB(priv, "<< invalid data.\n"); + spin_unlock_irqrestore(&priv->lock, flags); + return; + } + + /* Extract Statistics: */ + rx_enable_time = le32_to_cpu(rx_info->channel_load); + fa_cck = le32_to_cpu(statistics->cck.false_alarm_cnt); + fa_ofdm = le32_to_cpu(statistics->ofdm.false_alarm_cnt); + bad_plcp_cck = le32_to_cpu(statistics->cck.plcp_err); + bad_plcp_ofdm = le32_to_cpu(statistics->ofdm.plcp_err); + + statis.beacon_silence_rssi_a = + le32_to_cpu(statistics->general.beacon_silence_rssi_a); + statis.beacon_silence_rssi_b = + le32_to_cpu(statistics->general.beacon_silence_rssi_b); + statis.beacon_silence_rssi_c = + le32_to_cpu(statistics->general.beacon_silence_rssi_c); + statis.beacon_energy_a = + le32_to_cpu(statistics->general.beacon_energy_a); + statis.beacon_energy_b = + le32_to_cpu(statistics->general.beacon_energy_b); + statis.beacon_energy_c = + le32_to_cpu(statistics->general.beacon_energy_c); + + spin_unlock_irqrestore(&priv->lock, flags); + + IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time); + + if (!rx_enable_time) { + IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0! \n"); + return; + } + + /* These statistics increase monotonically, and do not reset + * at each beacon. Calculate difference from last value, or just + * use the new statistics value if it has reset or wrapped around. */ + if (data->last_bad_plcp_cnt_cck > bad_plcp_cck) + data->last_bad_plcp_cnt_cck = bad_plcp_cck; + else { + bad_plcp_cck -= data->last_bad_plcp_cnt_cck; + data->last_bad_plcp_cnt_cck += bad_plcp_cck; + } + + if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm) + data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm; + else { + bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm; + data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm; + } + + if (data->last_fa_cnt_ofdm > fa_ofdm) + data->last_fa_cnt_ofdm = fa_ofdm; + else { + fa_ofdm -= data->last_fa_cnt_ofdm; + data->last_fa_cnt_ofdm += fa_ofdm; + } + + if (data->last_fa_cnt_cck > fa_cck) + data->last_fa_cnt_cck = fa_cck; + else { + fa_cck -= data->last_fa_cnt_cck; + data->last_fa_cnt_cck += fa_cck; + } + + /* Total aborted signal locks */ + norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm; + norm_fa_cck = fa_cck + bad_plcp_cck; + + IWL_DEBUG_CALIB(priv, "cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck, + bad_plcp_cck, fa_ofdm, bad_plcp_ofdm); + + iwl_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time); + iwl_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis); + iwl_sensitivity_write(priv); + + return; +} +EXPORT_SYMBOL(iwl_sensitivity_calibration); + +static inline u8 find_first_chain(u8 mask) +{ + if (mask & ANT_A) + return CHAIN_A; + if (mask & ANT_B) + return CHAIN_B; + return CHAIN_C; +} + +/* + * Accumulate 20 beacons of signal and noise statistics for each of + * 3 receivers/antennas/rx-chains, then figure out: + * 1) Which antennas are connected. + * 2) Differential rx gain settings to balance the 3 receivers. + */ +void iwl_chain_noise_calibration(struct iwl_priv *priv, + struct iwl_notif_statistics *stat_resp) +{ + struct iwl_chain_noise_data *data = NULL; + + u32 chain_noise_a; + u32 chain_noise_b; + u32 chain_noise_c; + u32 chain_sig_a; + u32 chain_sig_b; + u32 chain_sig_c; + u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE}; + u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE}; + u32 max_average_sig; + u16 max_average_sig_antenna_i; + u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE; + u16 min_average_noise_antenna_i = INITIALIZATION_VALUE; + u16 i = 0; + u16 rxon_chnum = INITIALIZATION_VALUE; + u16 stat_chnum = INITIALIZATION_VALUE; + u8 rxon_band24; + u8 stat_band24; + u32 active_chains = 0; + u8 num_tx_chains; + unsigned long flags; + struct statistics_rx_non_phy *rx_info = &(stat_resp->rx.general); + u8 first_chain; + + if (priv->disable_chain_noise_cal) + return; + + data = &(priv->chain_noise_data); + + /* + * Accumulate just the first "chain_noise_num_beacons" after + * the first association, then we're done forever. + */ + if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) { + if (data->state == IWL_CHAIN_NOISE_ALIVE) + IWL_DEBUG_CALIB(priv, "Wait for noise calib reset\n"); + return; + } + + spin_lock_irqsave(&priv->lock, flags); + if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { + IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n"); + spin_unlock_irqrestore(&priv->lock, flags); + return; + } + + rxon_band24 = !!(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK); + rxon_chnum = le16_to_cpu(priv->staging_rxon.channel); + stat_band24 = !!(stat_resp->flag & STATISTICS_REPLY_FLG_BAND_24G_MSK); + stat_chnum = le32_to_cpu(stat_resp->flag) >> 16; + + /* Make sure we accumulate data for just the associated channel + * (even if scanning). */ + if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) { + IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n", + rxon_chnum, rxon_band24); + spin_unlock_irqrestore(&priv->lock, flags); + return; + } + + /* + * Accumulate beacon statistics values across + * "chain_noise_num_beacons" + */ + chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) & + IN_BAND_FILTER; + chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) & + IN_BAND_FILTER; + chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) & + IN_BAND_FILTER; + + chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER; + chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER; + chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER; + + spin_unlock_irqrestore(&priv->lock, flags); + + data->beacon_count++; + + data->chain_noise_a = (chain_noise_a + data->chain_noise_a); + data->chain_noise_b = (chain_noise_b + data->chain_noise_b); + data->chain_noise_c = (chain_noise_c + data->chain_noise_c); + + data->chain_signal_a = (chain_sig_a + data->chain_signal_a); + data->chain_signal_b = (chain_sig_b + data->chain_signal_b); + data->chain_signal_c = (chain_sig_c + data->chain_signal_c); + + IWL_DEBUG_CALIB(priv, "chan=%d, band24=%d, beacon=%d\n", + rxon_chnum, rxon_band24, data->beacon_count); + IWL_DEBUG_CALIB(priv, "chain_sig: a %d b %d c %d\n", + chain_sig_a, chain_sig_b, chain_sig_c); + IWL_DEBUG_CALIB(priv, "chain_noise: a %d b %d c %d\n", + chain_noise_a, chain_noise_b, chain_noise_c); + + /* If this is the "chain_noise_num_beacons", determine: + * 1) Disconnected antennas (using signal strengths) + * 2) Differential gain (using silence noise) to balance receivers */ + if (data->beacon_count != priv->cfg->chain_noise_num_beacons) + return; + + /* Analyze signal for disconnected antenna */ + average_sig[0] = + (data->chain_signal_a) / priv->cfg->chain_noise_num_beacons; + average_sig[1] = + (data->chain_signal_b) / priv->cfg->chain_noise_num_beacons; + average_sig[2] = + (data->chain_signal_c) / priv->cfg->chain_noise_num_beacons; + + if (average_sig[0] >= average_sig[1]) { + max_average_sig = average_sig[0]; + max_average_sig_antenna_i = 0; + active_chains = (1 << max_average_sig_antenna_i); + } else { + max_average_sig = average_sig[1]; + max_average_sig_antenna_i = 1; + active_chains = (1 << max_average_sig_antenna_i); + } + + if (average_sig[2] >= max_average_sig) { + max_average_sig = average_sig[2]; + max_average_sig_antenna_i = 2; + active_chains = (1 << max_average_sig_antenna_i); + } + + IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n", + average_sig[0], average_sig[1], average_sig[2]); + IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n", + max_average_sig, max_average_sig_antenna_i); + + /* Compare signal strengths for all 3 receivers. */ + for (i = 0; i < NUM_RX_CHAINS; i++) { + if (i != max_average_sig_antenna_i) { + s32 rssi_delta = (max_average_sig - average_sig[i]); + + /* If signal is very weak, compared with + * strongest, mark it as disconnected. */ + if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS) + data->disconn_array[i] = 1; + else + active_chains |= (1 << i); + IWL_DEBUG_CALIB(priv, "i = %d rssiDelta = %d " + "disconn_array[i] = %d\n", + i, rssi_delta, data->disconn_array[i]); + } + } + + num_tx_chains = 0; + for (i = 0; i < NUM_RX_CHAINS; i++) { + /* loops on all the bits of + * priv->hw_setting.valid_tx_ant */ + u8 ant_msk = (1 << i); + if (!(priv->hw_params.valid_tx_ant & ant_msk)) + continue; + + num_tx_chains++; + if (data->disconn_array[i] == 0) + /* there is a Tx antenna connected */ + break; + if (num_tx_chains == priv->hw_params.tx_chains_num && + data->disconn_array[i]) { + /* + * If all chains are disconnected + * connect the first valid tx chain + */ + first_chain = + find_first_chain(priv->cfg->valid_tx_ant); + data->disconn_array[first_chain] = 0; + active_chains |= BIT(first_chain); + IWL_DEBUG_CALIB(priv, "All Tx chains are disconnected W/A - declare %d as connected\n", + first_chain); + break; + } + } + + /* Save for use within RXON, TX, SCAN commands, etc. */ + priv->chain_noise_data.active_chains = active_chains; + IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n", + active_chains); + + /* Analyze noise for rx balance */ + average_noise[0] = + ((data->chain_noise_a) / priv->cfg->chain_noise_num_beacons); + average_noise[1] = + ((data->chain_noise_b) / priv->cfg->chain_noise_num_beacons); + average_noise[2] = + ((data->chain_noise_c) / priv->cfg->chain_noise_num_beacons); + + for (i = 0; i < NUM_RX_CHAINS; i++) { + if (!(data->disconn_array[i]) && + (average_noise[i] <= min_average_noise)) { + /* This means that chain i is active and has + * lower noise values so far: */ + min_average_noise = average_noise[i]; + min_average_noise_antenna_i = i; + } + } + + IWL_DEBUG_CALIB(priv, "average_noise: a %d b %d c %d\n", + average_noise[0], average_noise[1], + average_noise[2]); + + IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n", + min_average_noise, min_average_noise_antenna_i); + + if (priv->cfg->ops->utils->gain_computation) + priv->cfg->ops->utils->gain_computation(priv, average_noise, + min_average_noise_antenna_i, min_average_noise, + find_first_chain(priv->cfg->valid_rx_ant)); + + /* Some power changes may have been made during the calibration. + * Update and commit the RXON + */ + if (priv->cfg->ops->lib->update_chain_flags) + priv->cfg->ops->lib->update_chain_flags(priv); + + data->state = IWL_CHAIN_NOISE_DONE; + iwl_power_update_mode(priv, false); +} +EXPORT_SYMBOL(iwl_chain_noise_calibration); + + +void iwl_reset_run_time_calib(struct iwl_priv *priv) +{ + int i; + memset(&(priv->sensitivity_data), 0, + sizeof(struct iwl_sensitivity_data)); + memset(&(priv->chain_noise_data), 0, + sizeof(struct iwl_chain_noise_data)); + for (i = 0; i < NUM_RX_CHAINS; i++) + priv->chain_noise_data.delta_gain_code[i] = + CHAIN_NOISE_DELTA_GAIN_INIT_VAL; + + /* Ask for statistics now, the uCode will send notification + * periodically after association */ + iwl_send_statistics_request(priv, CMD_ASYNC, true); +} +EXPORT_SYMBOL(iwl_reset_run_time_calib); + diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.h b/drivers/net/wireless/iwlwifi/iwl-calib.h new file mode 100644 index 0000000..2b7b1df --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-calib.h @@ -0,0 +1,84 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ +#ifndef __iwl_calib_h__ +#define __iwl_calib_h__ + +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-commands.h" + +void iwl_chain_noise_calibration(struct iwl_priv *priv, + struct iwl_notif_statistics *stat_resp); +void iwl_sensitivity_calibration(struct iwl_priv *priv, + struct iwl_notif_statistics *resp); + +void iwl_init_sensitivity(struct iwl_priv *priv); +void iwl_reset_run_time_calib(struct iwl_priv *priv); +static inline void iwl_chain_noise_reset(struct iwl_priv *priv) +{ + + if (!priv->disable_chain_noise_cal && + priv->cfg->ops->utils->chain_noise_reset) + priv->cfg->ops->utils->chain_noise_reset(priv); +} + +#endif /* __iwl_calib_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h new file mode 100644 index 0000000..6383d9f --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-commands.h @@ -0,0 +1,3817 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +/* + * Please use this file (iwl-commands.h) only for uCode API definitions. + * Please use iwl-4965-hw.h for hardware-related definitions. + * Please use iwl-dev.h for driver implementation definitions. + */ + +#ifndef __iwl_commands_h__ +#define __iwl_commands_h__ + +struct iwl_priv; + +/* uCode version contains 4 values: Major/Minor/API/Serial */ +#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24) +#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16) +#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8) +#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF) + + +/* Tx rates */ +#define IWL_CCK_RATES 4 +#define IWL_OFDM_RATES 8 +#define IWL_MAX_RATES (IWL_CCK_RATES + IWL_OFDM_RATES) + +enum { + REPLY_ALIVE = 0x1, + REPLY_ERROR = 0x2, + + /* RXON and QOS commands */ + REPLY_RXON = 0x10, + REPLY_RXON_ASSOC = 0x11, + REPLY_QOS_PARAM = 0x13, + REPLY_RXON_TIMING = 0x14, + + /* Multi-Station support */ + REPLY_ADD_STA = 0x18, + REPLY_REMOVE_STA = 0x19, /* not used */ + REPLY_REMOVE_ALL_STA = 0x1a, /* not used */ + + /* Security */ + REPLY_WEPKEY = 0x20, + + /* RX, TX, LEDs */ + REPLY_3945_RX = 0x1b, /* 3945 only */ + REPLY_TX = 0x1c, + REPLY_RATE_SCALE = 0x47, /* 3945 only */ + REPLY_LEDS_CMD = 0x48, + REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* 4965 only */ + + /* WiMAX coexistence */ + COEX_PRIORITY_TABLE_CMD = 0x5a, /* for 5000 series and up */ + COEX_MEDIUM_NOTIFICATION = 0x5b, + COEX_EVENT_CMD = 0x5c, + + /* Calibration */ + TEMPERATURE_NOTIFICATION = 0x62, + CALIBRATION_CFG_CMD = 0x65, + CALIBRATION_RES_NOTIFICATION = 0x66, + CALIBRATION_COMPLETE_NOTIFICATION = 0x67, + + /* 802.11h related */ + REPLY_QUIET_CMD = 0x71, /* not used */ + REPLY_CHANNEL_SWITCH = 0x72, + CHANNEL_SWITCH_NOTIFICATION = 0x73, + REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74, + SPECTRUM_MEASURE_NOTIFICATION = 0x75, + + /* Power Management */ + POWER_TABLE_CMD = 0x77, + PM_SLEEP_NOTIFICATION = 0x7A, + PM_DEBUG_STATISTIC_NOTIFIC = 0x7B, + + /* Scan commands and notifications */ + REPLY_SCAN_CMD = 0x80, + REPLY_SCAN_ABORT_CMD = 0x81, + SCAN_START_NOTIFICATION = 0x82, + SCAN_RESULTS_NOTIFICATION = 0x83, + SCAN_COMPLETE_NOTIFICATION = 0x84, + + /* IBSS/AP commands */ + BEACON_NOTIFICATION = 0x90, + REPLY_TX_BEACON = 0x91, + WHO_IS_AWAKE_NOTIFICATION = 0x94, /* not used */ + + /* Miscellaneous commands */ + REPLY_TX_POWER_DBM_CMD = 0x95, + QUIET_NOTIFICATION = 0x96, /* not used */ + REPLY_TX_PWR_TABLE_CMD = 0x97, + REPLY_TX_POWER_DBM_CMD_V1 = 0x98, /* old version of API */ + TX_ANT_CONFIGURATION_CMD = 0x98, + MEASURE_ABORT_NOTIFICATION = 0x99, /* not used */ + + /* Bluetooth device coexistence config command */ + REPLY_BT_CONFIG = 0x9b, + + /* Statistics */ + REPLY_STATISTICS_CMD = 0x9c, + STATISTICS_NOTIFICATION = 0x9d, + + /* RF-KILL commands and notifications */ + REPLY_CARD_STATE_CMD = 0xa0, + CARD_STATE_NOTIFICATION = 0xa1, + + /* Missed beacons notification */ + MISSED_BEACONS_NOTIFICATION = 0xa2, + + REPLY_CT_KILL_CONFIG_CMD = 0xa4, + SENSITIVITY_CMD = 0xa8, + REPLY_PHY_CALIBRATION_CMD = 0xb0, + REPLY_RX_PHY_CMD = 0xc0, + REPLY_RX_MPDU_CMD = 0xc1, + REPLY_RX = 0xc3, + REPLY_COMPRESSED_BA = 0xc5, + REPLY_MAX = 0xff +}; + +/****************************************************************************** + * (0) + * Commonly used structures and definitions: + * Command header, rate_n_flags, txpower + * + *****************************************************************************/ + +/* iwl_cmd_header flags value */ +#define IWL_CMD_FAILED_MSK 0x40 + +#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f) +#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8) +#define SEQ_TO_INDEX(s) ((s) & 0xff) +#define INDEX_TO_SEQ(i) ((i) & 0xff) +#define SEQ_HUGE_FRAME cpu_to_le16(0x4000) +#define SEQ_RX_FRAME cpu_to_le16(0x8000) + +/** + * struct iwl_cmd_header + * + * This header format appears in the beginning of each command sent from the + * driver, and each response/notification received from uCode. + */ +struct iwl_cmd_header { + u8 cmd; /* Command ID: REPLY_RXON, etc. */ + u8 flags; /* 0:5 reserved, 6 abort, 7 internal */ + /* + * The driver sets up the sequence number to values of its choosing. + * uCode does not use this value, but passes it back to the driver + * when sending the response to each driver-originated command, so + * the driver can match the response to the command. Since the values + * don't get used by uCode, the driver may set up an arbitrary format. + * + * There is one exception: uCode sets bit 15 when it originates + * the response/notification, i.e. when the response/notification + * is not a direct response to a command sent by the driver. For + * example, uCode issues REPLY_3945_RX when it sends a received frame + * to the driver; it is not a direct response to any driver command. + * + * The Linux driver uses the following format: + * + * 0:7 tfd index - position within TX queue + * 8:12 TX queue id + * 13 reserved + * 14 huge - driver sets this to indicate command is in the + * 'huge' storage at the end of the command buffers + * 15 unsolicited RX or uCode-originated notification + */ + __le16 sequence; + + /* command or response/notification data follows immediately */ + u8 data[0]; +} __attribute__ ((packed)); + + +/** + * struct iwl3945_tx_power + * + * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_SCAN_CMD, REPLY_CHANNEL_SWITCH + * + * Each entry contains two values: + * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained + * linear value that multiplies the output of the digital signal processor, + * before being sent to the analog radio. + * 2) Radio gain. This sets the analog gain of the radio Tx path. + * It is a coarser setting, and behaves in a logarithmic (dB) fashion. + * + * Driver obtains values from struct iwl3945_tx_power power_gain_table[][]. + */ +struct iwl3945_tx_power { + u8 tx_gain; /* gain for analog radio */ + u8 dsp_atten; /* gain for DSP */ +} __attribute__ ((packed)); + +/** + * struct iwl3945_power_per_rate + * + * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH + */ +struct iwl3945_power_per_rate { + u8 rate; /* plcp */ + struct iwl3945_tx_power tpc; + u8 reserved; +} __attribute__ ((packed)); + +/** + * iwlagn rate_n_flags bit fields + * + * rate_n_flags format is used in following iwlagn commands: + * REPLY_RX (response only) + * REPLY_RX_MPDU (response only) + * REPLY_TX (both command and response) + * REPLY_TX_LINK_QUALITY_CMD + * + * High-throughput (HT) rate format for bits 7:0 (bit 8 must be "1"): + * 2-0: 0) 6 Mbps + * 1) 12 Mbps + * 2) 18 Mbps + * 3) 24 Mbps + * 4) 36 Mbps + * 5) 48 Mbps + * 6) 54 Mbps + * 7) 60 Mbps + * + * 4-3: 0) Single stream (SISO) + * 1) Dual stream (MIMO) + * 2) Triple stream (MIMO) + * + * 5: Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data + * + * Legacy OFDM rate format for bits 7:0 (bit 8 must be "0", bit 9 "0"): + * 3-0: 0xD) 6 Mbps + * 0xF) 9 Mbps + * 0x5) 12 Mbps + * 0x7) 18 Mbps + * 0x9) 24 Mbps + * 0xB) 36 Mbps + * 0x1) 48 Mbps + * 0x3) 54 Mbps + * + * Legacy CCK rate format for bits 7:0 (bit 8 must be "0", bit 9 "1"): + * 6-0: 10) 1 Mbps + * 20) 2 Mbps + * 55) 5.5 Mbps + * 110) 11 Mbps + */ +#define RATE_MCS_CODE_MSK 0x7 +#define RATE_MCS_SPATIAL_POS 3 +#define RATE_MCS_SPATIAL_MSK 0x18 +#define RATE_MCS_HT_DUP_POS 5 +#define RATE_MCS_HT_DUP_MSK 0x20 + +/* Bit 8: (1) HT format, (0) legacy format in bits 7:0 */ +#define RATE_MCS_FLAGS_POS 8 +#define RATE_MCS_HT_POS 8 +#define RATE_MCS_HT_MSK 0x100 + +/* Bit 9: (1) CCK, (0) OFDM. HT (bit 8) must be "0" for this bit to be valid */ +#define RATE_MCS_CCK_POS 9 +#define RATE_MCS_CCK_MSK 0x200 + +/* Bit 10: (1) Use Green Field preamble */ +#define RATE_MCS_GF_POS 10 +#define RATE_MCS_GF_MSK 0x400 + +/* Bit 11: (1) Use 40Mhz HT40 chnl width, (0) use 20 MHz legacy chnl width */ +#define RATE_MCS_HT40_POS 11 +#define RATE_MCS_HT40_MSK 0x800 + +/* Bit 12: (1) Duplicate data on both 20MHz chnls. HT40 (bit 11) must be set. */ +#define RATE_MCS_DUP_POS 12 +#define RATE_MCS_DUP_MSK 0x1000 + +/* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */ +#define RATE_MCS_SGI_POS 13 +#define RATE_MCS_SGI_MSK 0x2000 + +/** + * rate_n_flags Tx antenna masks + * 4965 has 2 transmitters + * 5100 has 1 transmitter B + * 5150 has 1 transmitter A + * 5300 has 3 transmitters + * 5350 has 3 transmitters + * bit14:16 + */ +#define RATE_MCS_ANT_POS 14 +#define RATE_MCS_ANT_A_MSK 0x04000 +#define RATE_MCS_ANT_B_MSK 0x08000 +#define RATE_MCS_ANT_C_MSK 0x10000 +#define RATE_MCS_ANT_AB_MSK (RATE_MCS_ANT_A_MSK | RATE_MCS_ANT_B_MSK) +#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | RATE_MCS_ANT_C_MSK) +#define RATE_ANT_NUM 3 + +#define POWER_TABLE_NUM_ENTRIES 33 +#define POWER_TABLE_NUM_HT_OFDM_ENTRIES 32 +#define POWER_TABLE_CCK_ENTRY 32 + +#define IWL_PWR_NUM_HT_OFDM_ENTRIES 24 +#define IWL_PWR_CCK_ENTRIES 2 + +/** + * union iwl4965_tx_power_dual_stream + * + * Host format used for REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH + * Use __le32 version (struct tx_power_dual_stream) when building command. + * + * Driver provides radio gain and DSP attenuation settings to device in pairs, + * one value for each transmitter chain. The first value is for transmitter A, + * second for transmitter B. + * + * For SISO bit rates, both values in a pair should be identical. + * For MIMO rates, one value may be different from the other, + * in order to balance the Tx output between the two transmitters. + * + * See more details in doc for TXPOWER in iwl-4965-hw.h. + */ +union iwl4965_tx_power_dual_stream { + struct { + u8 radio_tx_gain[2]; + u8 dsp_predis_atten[2]; + } s; + u32 dw; +}; + +/** + * struct tx_power_dual_stream + * + * Table entries in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH + * + * Same format as iwl_tx_power_dual_stream, but __le32 + */ +struct tx_power_dual_stream { + __le32 dw; +} __attribute__ ((packed)); + +/** + * struct iwl4965_tx_power_db + * + * Entire table within REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH + */ +struct iwl4965_tx_power_db { + struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES]; +} __attribute__ ((packed)); + +/** + * Command REPLY_TX_POWER_DBM_CMD = 0x98 + * struct iwl5000_tx_power_dbm_cmd + */ +#define IWL50_TX_POWER_AUTO 0x7f +#define IWL50_TX_POWER_NO_CLOSED (0x1 << 6) + +struct iwl5000_tx_power_dbm_cmd { + s8 global_lmt; /*in half-dBm (e.g. 30 = 15 dBm) */ + u8 flags; + s8 srv_chan_lmt; /*in half-dBm (e.g. 30 = 15 dBm) */ + u8 reserved; +} __attribute__ ((packed)); + +/** + * Command TX_ANT_CONFIGURATION_CMD = 0x98 + * This command is used to configure valid Tx antenna. + * By default uCode concludes the valid antenna according to the radio flavor. + * This command enables the driver to override/modify this conclusion. + */ +struct iwl_tx_ant_config_cmd { + __le32 valid; +} __attribute__ ((packed)); + +/****************************************************************************** + * (0a) + * Alive and Error Commands & Responses: + * + *****************************************************************************/ + +#define UCODE_VALID_OK cpu_to_le32(0x1) +#define INITIALIZE_SUBTYPE (9) + +/* + * ("Initialize") REPLY_ALIVE = 0x1 (response only, not a command) + * + * uCode issues this "initialize alive" notification once the initialization + * uCode image has completed its work, and is ready to load the runtime image. + * This is the *first* "alive" notification that the driver will receive after + * rebooting uCode; the "initialize" alive is indicated by subtype field == 9. + * + * See comments documenting "BSM" (bootstrap state machine). + * + * For 4965, this notification contains important calibration data for + * calculating txpower settings: + * + * 1) Power supply voltage indication. The voltage sensor outputs higher + * values for lower voltage, and vice verse. + * + * 2) Temperature measurement parameters, for each of two channel widths + * (20 MHz and 40 MHz) supported by the radios. Temperature sensing + * is done via one of the receiver chains, and channel width influences + * the results. + * + * 3) Tx gain compensation to balance 4965's 2 Tx chains for MIMO operation, + * for each of 5 frequency ranges. + */ +struct iwl_init_alive_resp { + u8 ucode_minor; + u8 ucode_major; + __le16 reserved1; + u8 sw_rev[8]; + u8 ver_type; + u8 ver_subtype; /* "9" for initialize alive */ + __le16 reserved2; + __le32 log_event_table_ptr; + __le32 error_event_table_ptr; + __le32 timestamp; + __le32 is_valid; + + /* calibration values from "initialize" uCode */ + __le32 voltage; /* signed, higher value is lower voltage */ + __le32 therm_r1[2]; /* signed, 1st for normal, 2nd for HT40 */ + __le32 therm_r2[2]; /* signed */ + __le32 therm_r3[2]; /* signed */ + __le32 therm_r4[2]; /* signed */ + __le32 tx_atten[5][2]; /* signed MIMO gain comp, 5 freq groups, + * 2 Tx chains */ +} __attribute__ ((packed)); + + +/** + * REPLY_ALIVE = 0x1 (response only, not a command) + * + * uCode issues this "alive" notification once the runtime image is ready + * to receive commands from the driver. This is the *second* "alive" + * notification that the driver will receive after rebooting uCode; + * this "alive" is indicated by subtype field != 9. + * + * See comments documenting "BSM" (bootstrap state machine). + * + * This response includes two pointers to structures within the device's + * data SRAM (access via HBUS_TARG_MEM_* regs) that are useful for debugging: + * + * 1) log_event_table_ptr indicates base of the event log. This traces + * a 256-entry history of uCode execution within a circular buffer. + * Its header format is: + * + * __le32 log_size; log capacity (in number of entries) + * __le32 type; (1) timestamp with each entry, (0) no timestamp + * __le32 wraps; # times uCode has wrapped to top of circular buffer + * __le32 write_index; next circular buffer entry that uCode would fill + * + * The header is followed by the circular buffer of log entries. Entries + * with timestamps have the following format: + * + * __le32 event_id; range 0 - 1500 + * __le32 timestamp; low 32 bits of TSF (of network, if associated) + * __le32 data; event_id-specific data value + * + * Entries without timestamps contain only event_id and data. + * + * 2) error_event_table_ptr indicates base of the error log. This contains + * information about any uCode error that occurs. For 4965, the format + * of the error log is: + * + * __le32 valid; (nonzero) valid, (0) log is empty + * __le32 error_id; type of error + * __le32 pc; program counter + * __le32 blink1; branch link + * __le32 blink2; branch link + * __le32 ilink1; interrupt link + * __le32 ilink2; interrupt link + * __le32 data1; error-specific data + * __le32 data2; error-specific data + * __le32 line; source code line of error + * __le32 bcon_time; beacon timer + * __le32 tsf_low; network timestamp function timer + * __le32 tsf_hi; network timestamp function timer + * + * The Linux driver can print both logs to the system log when a uCode error + * occurs. + */ +struct iwl_alive_resp { + u8 ucode_minor; + u8 ucode_major; + __le16 reserved1; + u8 sw_rev[8]; + u8 ver_type; + u8 ver_subtype; /* not "9" for runtime alive */ + __le16 reserved2; + __le32 log_event_table_ptr; /* SRAM address for event log */ + __le32 error_event_table_ptr; /* SRAM address for error log */ + __le32 timestamp; + __le32 is_valid; +} __attribute__ ((packed)); + +/* + * REPLY_ERROR = 0x2 (response only, not a command) + */ +struct iwl_error_resp { + __le32 error_type; + u8 cmd_id; + u8 reserved1; + __le16 bad_cmd_seq_num; + __le32 error_info; + __le64 timestamp; +} __attribute__ ((packed)); + +/****************************************************************************** + * (1) + * RXON Commands & Responses: + * + *****************************************************************************/ + +/* + * Rx config defines & structure + */ +/* rx_config device types */ +enum { + RXON_DEV_TYPE_AP = 1, + RXON_DEV_TYPE_ESS = 3, + RXON_DEV_TYPE_IBSS = 4, + RXON_DEV_TYPE_SNIFFER = 6, +}; + + +#define RXON_RX_CHAIN_DRIVER_FORCE_MSK cpu_to_le16(0x1 << 0) +#define RXON_RX_CHAIN_DRIVER_FORCE_POS (0) +#define RXON_RX_CHAIN_VALID_MSK cpu_to_le16(0x7 << 1) +#define RXON_RX_CHAIN_VALID_POS (1) +#define RXON_RX_CHAIN_FORCE_SEL_MSK cpu_to_le16(0x7 << 4) +#define RXON_RX_CHAIN_FORCE_SEL_POS (4) +#define RXON_RX_CHAIN_FORCE_MIMO_SEL_MSK cpu_to_le16(0x7 << 7) +#define RXON_RX_CHAIN_FORCE_MIMO_SEL_POS (7) +#define RXON_RX_CHAIN_CNT_MSK cpu_to_le16(0x3 << 10) +#define RXON_RX_CHAIN_CNT_POS (10) +#define RXON_RX_CHAIN_MIMO_CNT_MSK cpu_to_le16(0x3 << 12) +#define RXON_RX_CHAIN_MIMO_CNT_POS (12) +#define RXON_RX_CHAIN_MIMO_FORCE_MSK cpu_to_le16(0x1 << 14) +#define RXON_RX_CHAIN_MIMO_FORCE_POS (14) + +/* rx_config flags */ +/* band & modulation selection */ +#define RXON_FLG_BAND_24G_MSK cpu_to_le32(1 << 0) +#define RXON_FLG_CCK_MSK cpu_to_le32(1 << 1) +/* auto detection enable */ +#define RXON_FLG_AUTO_DETECT_MSK cpu_to_le32(1 << 2) +/* TGg protection when tx */ +#define RXON_FLG_TGG_PROTECT_MSK cpu_to_le32(1 << 3) +/* cck short slot & preamble */ +#define RXON_FLG_SHORT_SLOT_MSK cpu_to_le32(1 << 4) +#define RXON_FLG_SHORT_PREAMBLE_MSK cpu_to_le32(1 << 5) +/* antenna selection */ +#define RXON_FLG_DIS_DIV_MSK cpu_to_le32(1 << 7) +#define RXON_FLG_ANT_SEL_MSK cpu_to_le32(0x0f00) +#define RXON_FLG_ANT_A_MSK cpu_to_le32(1 << 8) +#define RXON_FLG_ANT_B_MSK cpu_to_le32(1 << 9) +/* radar detection enable */ +#define RXON_FLG_RADAR_DETECT_MSK cpu_to_le32(1 << 12) +#define RXON_FLG_TGJ_NARROW_BAND_MSK cpu_to_le32(1 << 13) +/* rx response to host with 8-byte TSF +* (according to ON_AIR deassertion) */ +#define RXON_FLG_TSF2HOST_MSK cpu_to_le32(1 << 15) + + +/* HT flags */ +#define RXON_FLG_CTRL_CHANNEL_LOC_POS (22) +#define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK cpu_to_le32(0x1 << 22) + +#define RXON_FLG_HT_OPERATING_MODE_POS (23) + +#define RXON_FLG_HT_PROT_MSK cpu_to_le32(0x1 << 23) +#define RXON_FLG_HT40_PROT_MSK cpu_to_le32(0x2 << 23) + +#define RXON_FLG_CHANNEL_MODE_POS (25) +#define RXON_FLG_CHANNEL_MODE_MSK cpu_to_le32(0x3 << 25) + +/* channel mode */ +enum { + CHANNEL_MODE_LEGACY = 0, + CHANNEL_MODE_PURE_40 = 1, + CHANNEL_MODE_MIXED = 2, + CHANNEL_MODE_RESERVED = 3, +}; +#define RXON_FLG_CHANNEL_MODE_LEGACY cpu_to_le32(CHANNEL_MODE_LEGACY << RXON_FLG_CHANNEL_MODE_POS) +#define RXON_FLG_CHANNEL_MODE_PURE_40 cpu_to_le32(CHANNEL_MODE_PURE_40 << RXON_FLG_CHANNEL_MODE_POS) +#define RXON_FLG_CHANNEL_MODE_MIXED cpu_to_le32(CHANNEL_MODE_MIXED << RXON_FLG_CHANNEL_MODE_POS) + +/* CTS to self (if spec allows) flag */ +#define RXON_FLG_SELF_CTS_EN cpu_to_le32(0x1<<30) + +/* rx_config filter flags */ +/* accept all data frames */ +#define RXON_FILTER_PROMISC_MSK cpu_to_le32(1 << 0) +/* pass control & management to host */ +#define RXON_FILTER_CTL2HOST_MSK cpu_to_le32(1 << 1) +/* accept multi-cast */ +#define RXON_FILTER_ACCEPT_GRP_MSK cpu_to_le32(1 << 2) +/* don't decrypt uni-cast frames */ +#define RXON_FILTER_DIS_DECRYPT_MSK cpu_to_le32(1 << 3) +/* don't decrypt multi-cast frames */ +#define RXON_FILTER_DIS_GRP_DECRYPT_MSK cpu_to_le32(1 << 4) +/* STA is associated */ +#define RXON_FILTER_ASSOC_MSK cpu_to_le32(1 << 5) +/* transfer to host non bssid beacons in associated state */ +#define RXON_FILTER_BCON_AWARE_MSK cpu_to_le32(1 << 6) + +/** + * REPLY_RXON = 0x10 (command, has simple generic response) + * + * RXON tunes the radio tuner to a service channel, and sets up a number + * of parameters that are used primarily for Rx, but also for Tx operations. + * + * NOTE: When tuning to a new channel, driver must set the + * RXON_FILTER_ASSOC_MSK to 0. This will clear station-dependent + * info within the device, including the station tables, tx retry + * rate tables, and txpower tables. Driver must build a new station + * table and txpower table before transmitting anything on the RXON + * channel. + * + * NOTE: All RXONs wipe clean the internal txpower table. Driver must + * issue a new REPLY_TX_PWR_TABLE_CMD after each REPLY_RXON (0x10), + * regardless of whether RXON_FILTER_ASSOC_MSK is set. + */ + +struct iwl3945_rxon_cmd { + u8 node_addr[6]; + __le16 reserved1; + u8 bssid_addr[6]; + __le16 reserved2; + u8 wlap_bssid_addr[6]; + __le16 reserved3; + u8 dev_type; + u8 air_propagation; + __le16 reserved4; + u8 ofdm_basic_rates; + u8 cck_basic_rates; + __le16 assoc_id; + __le32 flags; + __le32 filter_flags; + __le16 channel; + __le16 reserved5; +} __attribute__ ((packed)); + +struct iwl4965_rxon_cmd { + u8 node_addr[6]; + __le16 reserved1; + u8 bssid_addr[6]; + __le16 reserved2; + u8 wlap_bssid_addr[6]; + __le16 reserved3; + u8 dev_type; + u8 air_propagation; + __le16 rx_chain; + u8 ofdm_basic_rates; + u8 cck_basic_rates; + __le16 assoc_id; + __le32 flags; + __le32 filter_flags; + __le16 channel; + u8 ofdm_ht_single_stream_basic_rates; + u8 ofdm_ht_dual_stream_basic_rates; +} __attribute__ ((packed)); + +/* 5000 HW just extend this command */ +struct iwl_rxon_cmd { + u8 node_addr[6]; + __le16 reserved1; + u8 bssid_addr[6]; + __le16 reserved2; + u8 wlap_bssid_addr[6]; + __le16 reserved3; + u8 dev_type; + u8 air_propagation; + __le16 rx_chain; + u8 ofdm_basic_rates; + u8 cck_basic_rates; + __le16 assoc_id; + __le32 flags; + __le32 filter_flags; + __le16 channel; + u8 ofdm_ht_single_stream_basic_rates; + u8 ofdm_ht_dual_stream_basic_rates; + u8 ofdm_ht_triple_stream_basic_rates; + u8 reserved5; + __le16 acquisition_data; + __le16 reserved6; +} __attribute__ ((packed)); + +/* + * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response) + */ +struct iwl3945_rxon_assoc_cmd { + __le32 flags; + __le32 filter_flags; + u8 ofdm_basic_rates; + u8 cck_basic_rates; + __le16 reserved; +} __attribute__ ((packed)); + +struct iwl4965_rxon_assoc_cmd { + __le32 flags; + __le32 filter_flags; + u8 ofdm_basic_rates; + u8 cck_basic_rates; + u8 ofdm_ht_single_stream_basic_rates; + u8 ofdm_ht_dual_stream_basic_rates; + __le16 rx_chain_select_flags; + __le16 reserved; +} __attribute__ ((packed)); + +struct iwl5000_rxon_assoc_cmd { + __le32 flags; + __le32 filter_flags; + u8 ofdm_basic_rates; + u8 cck_basic_rates; + __le16 reserved1; + u8 ofdm_ht_single_stream_basic_rates; + u8 ofdm_ht_dual_stream_basic_rates; + u8 ofdm_ht_triple_stream_basic_rates; + u8 reserved2; + __le16 rx_chain_select_flags; + __le16 acquisition_data; + __le32 reserved3; +} __attribute__ ((packed)); + +#define IWL_CONN_MAX_LISTEN_INTERVAL 10 +#define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */ +#define IWL39_MAX_UCODE_BEACON_INTERVAL 1 /* 1024 */ + +/* + * REPLY_RXON_TIMING = 0x14 (command, has simple generic response) + */ +struct iwl_rxon_time_cmd { + __le64 timestamp; + __le16 beacon_interval; + __le16 atim_window; + __le32 beacon_init_val; + __le16 listen_interval; + __le16 reserved; +} __attribute__ ((packed)); + +/* + * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response) + */ +struct iwl3945_channel_switch_cmd { + u8 band; + u8 expect_beacon; + __le16 channel; + __le32 rxon_flags; + __le32 rxon_filter_flags; + __le32 switch_time; + struct iwl3945_power_per_rate power[IWL_MAX_RATES]; +} __attribute__ ((packed)); + +struct iwl4965_channel_switch_cmd { + u8 band; + u8 expect_beacon; + __le16 channel; + __le32 rxon_flags; + __le32 rxon_filter_flags; + __le32 switch_time; + struct iwl4965_tx_power_db tx_power; +} __attribute__ ((packed)); + +/** + * struct iwl5000_channel_switch_cmd + * @band: 0- 5.2GHz, 1- 2.4GHz + * @expect_beacon: 0- resume transmits after channel switch + * 1- wait for beacon to resume transmits + * @channel: new channel number + * @rxon_flags: Rx on flags + * @rxon_filter_flags: filtering parameters + * @switch_time: switch time in extended beacon format + * @reserved: reserved bytes + */ +struct iwl5000_channel_switch_cmd { + u8 band; + u8 expect_beacon; + __le16 channel; + __le32 rxon_flags; + __le32 rxon_filter_flags; + __le32 switch_time; + __le32 reserved[2][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES]; +} __attribute__ ((packed)); + +/** + * struct iwl6000_channel_switch_cmd + * @band: 0- 5.2GHz, 1- 2.4GHz + * @expect_beacon: 0- resume transmits after channel switch + * 1- wait for beacon to resume transmits + * @channel: new channel number + * @rxon_flags: Rx on flags + * @rxon_filter_flags: filtering parameters + * @switch_time: switch time in extended beacon format + * @reserved: reserved bytes + */ +struct iwl6000_channel_switch_cmd { + u8 band; + u8 expect_beacon; + __le16 channel; + __le32 rxon_flags; + __le32 rxon_filter_flags; + __le32 switch_time; + __le32 reserved[3][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES]; +} __attribute__ ((packed)); + +/* + * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command) + */ +struct iwl_csa_notification { + __le16 band; + __le16 channel; + __le32 status; /* 0 - OK, 1 - fail */ +} __attribute__ ((packed)); + +/****************************************************************************** + * (2) + * Quality-of-Service (QOS) Commands & Responses: + * + *****************************************************************************/ + +/** + * struct iwl_ac_qos -- QOS timing params for REPLY_QOS_PARAM + * One for each of 4 EDCA access categories in struct iwl_qosparam_cmd + * + * @cw_min: Contention window, start value in numbers of slots. + * Should be a power-of-2, minus 1. Device's default is 0x0f. + * @cw_max: Contention window, max value in numbers of slots. + * Should be a power-of-2, minus 1. Device's default is 0x3f. + * @aifsn: Number of slots in Arbitration Interframe Space (before + * performing random backoff timing prior to Tx). Device default 1. + * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0. + * + * Device will automatically increase contention window by (2*CW) + 1 for each + * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW + * value, to cap the CW value. + */ +struct iwl_ac_qos { + __le16 cw_min; + __le16 cw_max; + u8 aifsn; + u8 reserved1; + __le16 edca_txop; +} __attribute__ ((packed)); + +/* QoS flags defines */ +#define QOS_PARAM_FLG_UPDATE_EDCA_MSK cpu_to_le32(0x01) +#define QOS_PARAM_FLG_TGN_MSK cpu_to_le32(0x02) +#define QOS_PARAM_FLG_TXOP_TYPE_MSK cpu_to_le32(0x10) + +/* Number of Access Categories (AC) (EDCA), queues 0..3 */ +#define AC_NUM 4 + +/* + * REPLY_QOS_PARAM = 0x13 (command, has simple generic response) + * + * This command sets up timings for each of the 4 prioritized EDCA Tx FIFOs + * 0: Background, 1: Best Effort, 2: Video, 3: Voice. + */ +struct iwl_qosparam_cmd { + __le32 qos_flags; + struct iwl_ac_qos ac[AC_NUM]; +} __attribute__ ((packed)); + +/****************************************************************************** + * (3) + * Add/Modify Stations Commands & Responses: + * + *****************************************************************************/ +/* + * Multi station support + */ + +/* Special, dedicated locations within device's station table */ +#define IWL_AP_ID 0 +#define IWL_MULTICAST_ID 1 +#define IWL_STA_ID 2 +#define IWL3945_BROADCAST_ID 24 +#define IWL3945_STATION_COUNT 25 +#define IWL4965_BROADCAST_ID 31 +#define IWL4965_STATION_COUNT 32 +#define IWL5000_BROADCAST_ID 15 +#define IWL5000_STATION_COUNT 16 + +#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/ +#define IWL_INVALID_STATION 255 + +#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2); +#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8); +#define STA_FLG_RTS_MIMO_PROT_MSK cpu_to_le32(1 << 17) +#define STA_FLG_AGG_MPDU_8US_MSK cpu_to_le32(1 << 18) +#define STA_FLG_MAX_AGG_SIZE_POS (19) +#define STA_FLG_MAX_AGG_SIZE_MSK cpu_to_le32(3 << 19) +#define STA_FLG_HT40_EN_MSK cpu_to_le32(1 << 21) +#define STA_FLG_MIMO_DIS_MSK cpu_to_le32(1 << 22) +#define STA_FLG_AGG_MPDU_DENSITY_POS (23) +#define STA_FLG_AGG_MPDU_DENSITY_MSK cpu_to_le32(7 << 23) + +/* Use in mode field. 1: modify existing entry, 0: add new station entry */ +#define STA_CONTROL_MODIFY_MSK 0x01 + +/* key flags __le16*/ +#define STA_KEY_FLG_ENCRYPT_MSK cpu_to_le16(0x0007) +#define STA_KEY_FLG_NO_ENC cpu_to_le16(0x0000) +#define STA_KEY_FLG_WEP cpu_to_le16(0x0001) +#define STA_KEY_FLG_CCMP cpu_to_le16(0x0002) +#define STA_KEY_FLG_TKIP cpu_to_le16(0x0003) + +#define STA_KEY_FLG_KEYID_POS 8 +#define STA_KEY_FLG_INVALID cpu_to_le16(0x0800) +/* wep key is either from global key (0) or from station info array (1) */ +#define STA_KEY_FLG_MAP_KEY_MSK cpu_to_le16(0x0008) + +/* wep key in STA: 5-bytes (0) or 13-bytes (1) */ +#define STA_KEY_FLG_KEY_SIZE_MSK cpu_to_le16(0x1000) +#define STA_KEY_MULTICAST_MSK cpu_to_le16(0x4000) +#define STA_KEY_MAX_NUM 8 + +/* Flags indicate whether to modify vs. don't change various station params */ +#define STA_MODIFY_KEY_MASK 0x01 +#define STA_MODIFY_TID_DISABLE_TX 0x02 +#define STA_MODIFY_TX_RATE_MSK 0x04 +#define STA_MODIFY_ADDBA_TID_MSK 0x08 +#define STA_MODIFY_DELBA_TID_MSK 0x10 +#define STA_MODIFY_SLEEP_TX_COUNT_MSK 0x20 + +/* Receiver address (actually, Rx station's index into station table), + * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ +#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) + +struct iwl4965_keyinfo { + __le16 key_flags; + u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */ + u8 reserved1; + __le16 tkip_rx_ttak[5]; /* 10-byte unicast TKIP TTAK */ + u8 key_offset; + u8 reserved2; + u8 key[16]; /* 16-byte unicast decryption key */ +} __attribute__ ((packed)); + +/* 5000 */ +struct iwl_keyinfo { + __le16 key_flags; + u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */ + u8 reserved1; + __le16 tkip_rx_ttak[5]; /* 10-byte unicast TKIP TTAK */ + u8 key_offset; + u8 reserved2; + u8 key[16]; /* 16-byte unicast decryption key */ + __le64 tx_secur_seq_cnt; + __le64 hw_tkip_mic_rx_key; + __le64 hw_tkip_mic_tx_key; +} __attribute__ ((packed)); + +/** + * struct sta_id_modify + * @addr[ETH_ALEN]: station's MAC address + * @sta_id: index of station in uCode's station table + * @modify_mask: STA_MODIFY_*, 1: modify, 0: don't change + * + * Driver selects unused table index when adding new station, + * or the index to a pre-existing station entry when modifying that station. + * Some indexes have special purposes (IWL_AP_ID, index 0, is for AP). + * + * modify_mask flags select which parameters to modify vs. leave alone. + */ +struct sta_id_modify { + u8 addr[ETH_ALEN]; + __le16 reserved1; + u8 sta_id; + u8 modify_mask; + __le16 reserved2; +} __attribute__ ((packed)); + +/* + * REPLY_ADD_STA = 0x18 (command) + * + * The device contains an internal table of per-station information, + * with info on security keys, aggregation parameters, and Tx rates for + * initial Tx attempt and any retries (4965 uses REPLY_TX_LINK_QUALITY_CMD, + * 3945 uses REPLY_RATE_SCALE to set up rate tables). + * + * REPLY_ADD_STA sets up the table entry for one station, either creating + * a new entry, or modifying a pre-existing one. + * + * NOTE: RXON command (without "associated" bit set) wipes the station table + * clean. Moving into RF_KILL state does this also. Driver must set up + * new station table before transmitting anything on the RXON channel + * (except active scans or active measurements; those commands carry + * their own txpower/rate setup data). + * + * When getting started on a new channel, driver must set up the + * IWL_BROADCAST_ID entry (last entry in the table). For a client + * station in a BSS, once an AP is selected, driver sets up the AP STA + * in the IWL_AP_ID entry (1st entry in the table). BROADCAST and AP + * are all that are needed for a BSS client station. If the device is + * used as AP, or in an IBSS network, driver must set up station table + * entries for all STAs in network, starting with index IWL_STA_ID. + */ + +struct iwl3945_addsta_cmd { + u8 mode; /* 1: modify existing, 0: add new station */ + u8 reserved[3]; + struct sta_id_modify sta; + struct iwl4965_keyinfo key; + __le32 station_flags; /* STA_FLG_* */ + __le32 station_flags_msk; /* STA_FLG_* */ + + /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID) + * corresponding to bit (e.g. bit 5 controls TID 5). + * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */ + __le16 tid_disable_tx; + + __le16 rate_n_flags; + + /* TID for which to add block-ack support. + * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ + u8 add_immediate_ba_tid; + + /* TID for which to remove block-ack support. + * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */ + u8 remove_immediate_ba_tid; + + /* Starting Sequence Number for added block-ack support. + * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ + __le16 add_immediate_ba_ssn; +} __attribute__ ((packed)); + +struct iwl4965_addsta_cmd { + u8 mode; /* 1: modify existing, 0: add new station */ + u8 reserved[3]; + struct sta_id_modify sta; + struct iwl4965_keyinfo key; + __le32 station_flags; /* STA_FLG_* */ + __le32 station_flags_msk; /* STA_FLG_* */ + + /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID) + * corresponding to bit (e.g. bit 5 controls TID 5). + * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */ + __le16 tid_disable_tx; + + __le16 reserved1; + + /* TID for which to add block-ack support. + * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ + u8 add_immediate_ba_tid; + + /* TID for which to remove block-ack support. + * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */ + u8 remove_immediate_ba_tid; + + /* Starting Sequence Number for added block-ack support. + * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ + __le16 add_immediate_ba_ssn; + + /* + * Number of packets OK to transmit to station even though + * it is asleep -- used to synchronise PS-poll and u-APSD + * responses while ucode keeps track of STA sleep state. + */ + __le16 sleep_tx_count; + + __le16 reserved2; +} __attribute__ ((packed)); + +/* 5000 */ +struct iwl_addsta_cmd { + u8 mode; /* 1: modify existing, 0: add new station */ + u8 reserved[3]; + struct sta_id_modify sta; + struct iwl_keyinfo key; + __le32 station_flags; /* STA_FLG_* */ + __le32 station_flags_msk; /* STA_FLG_* */ + + /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID) + * corresponding to bit (e.g. bit 5 controls TID 5). + * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */ + __le16 tid_disable_tx; + + __le16 rate_n_flags; /* 3945 only */ + + /* TID for which to add block-ack support. + * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ + u8 add_immediate_ba_tid; + + /* TID for which to remove block-ack support. + * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */ + u8 remove_immediate_ba_tid; + + /* Starting Sequence Number for added block-ack support. + * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ + __le16 add_immediate_ba_ssn; + + /* + * Number of packets OK to transmit to station even though + * it is asleep -- used to synchronise PS-poll and u-APSD + * responses while ucode keeps track of STA sleep state. + */ + __le16 sleep_tx_count; + + __le16 reserved2; +} __attribute__ ((packed)); + + +#define ADD_STA_SUCCESS_MSK 0x1 +#define ADD_STA_NO_ROOM_IN_TABLE 0x2 +#define ADD_STA_NO_BLOCK_ACK_RESOURCE 0x4 +#define ADD_STA_MODIFY_NON_EXIST_STA 0x8 +/* + * REPLY_ADD_STA = 0x18 (response) + */ +struct iwl_add_sta_resp { + u8 status; /* ADD_STA_* */ +} __attribute__ ((packed)); + +#define REM_STA_SUCCESS_MSK 0x1 +/* + * REPLY_REM_STA = 0x19 (response) + */ +struct iwl_rem_sta_resp { + u8 status; +} __attribute__ ((packed)); + +/* + * REPLY_REM_STA = 0x19 (command) + */ +struct iwl_rem_sta_cmd { + u8 num_sta; /* number of removed stations */ + u8 reserved[3]; + u8 addr[ETH_ALEN]; /* MAC addr of the first station */ + u8 reserved2[2]; +} __attribute__ ((packed)); + +/* + * REPLY_WEP_KEY = 0x20 + */ +struct iwl_wep_key { + u8 key_index; + u8 key_offset; + u8 reserved1[2]; + u8 key_size; + u8 reserved2[3]; + u8 key[16]; +} __attribute__ ((packed)); + +struct iwl_wep_cmd { + u8 num_keys; + u8 global_key_type; + u8 flags; + u8 reserved; + struct iwl_wep_key key[0]; +} __attribute__ ((packed)); + +#define WEP_KEY_WEP_TYPE 1 +#define WEP_KEYS_MAX 4 +#define WEP_INVALID_OFFSET 0xff +#define WEP_KEY_LEN_64 5 +#define WEP_KEY_LEN_128 13 + +/****************************************************************************** + * (4) + * Rx Responses: + * + *****************************************************************************/ + +#define RX_RES_STATUS_NO_CRC32_ERROR cpu_to_le32(1 << 0) +#define RX_RES_STATUS_NO_RXE_OVERFLOW cpu_to_le32(1 << 1) + +#define RX_RES_PHY_FLAGS_BAND_24_MSK cpu_to_le16(1 << 0) +#define RX_RES_PHY_FLAGS_MOD_CCK_MSK cpu_to_le16(1 << 1) +#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK cpu_to_le16(1 << 2) +#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK cpu_to_le16(1 << 3) +#define RX_RES_PHY_FLAGS_ANTENNA_MSK 0xf0 +#define RX_RES_PHY_FLAGS_ANTENNA_POS 4 + +#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8) +#define RX_RES_STATUS_SEC_TYPE_NONE (0x0 << 8) +#define RX_RES_STATUS_SEC_TYPE_WEP (0x1 << 8) +#define RX_RES_STATUS_SEC_TYPE_CCMP (0x2 << 8) +#define RX_RES_STATUS_SEC_TYPE_TKIP (0x3 << 8) +#define RX_RES_STATUS_SEC_TYPE_ERR (0x7 << 8) + +#define RX_RES_STATUS_STATION_FOUND (1<<6) +#define RX_RES_STATUS_NO_STATION_INFO_MISMATCH (1<<7) + +#define RX_RES_STATUS_DECRYPT_TYPE_MSK (0x3 << 11) +#define RX_RES_STATUS_NOT_DECRYPT (0x0 << 11) +#define RX_RES_STATUS_DECRYPT_OK (0x3 << 11) +#define RX_RES_STATUS_BAD_ICV_MIC (0x1 << 11) +#define RX_RES_STATUS_BAD_KEY_TTAK (0x2 << 11) + +#define RX_MPDU_RES_STATUS_ICV_OK (0x20) +#define RX_MPDU_RES_STATUS_MIC_OK (0x40) +#define RX_MPDU_RES_STATUS_TTAK_OK (1 << 7) +#define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800) + + +struct iwl3945_rx_frame_stats { + u8 phy_count; + u8 id; + u8 rssi; + u8 agc; + __le16 sig_avg; + __le16 noise_diff; + u8 payload[0]; +} __attribute__ ((packed)); + +struct iwl3945_rx_frame_hdr { + __le16 channel; + __le16 phy_flags; + u8 reserved1; + u8 rate; + __le16 len; + u8 payload[0]; +} __attribute__ ((packed)); + +struct iwl3945_rx_frame_end { + __le32 status; + __le64 timestamp; + __le32 beacon_timestamp; +} __attribute__ ((packed)); + +/* + * REPLY_3945_RX = 0x1b (response only, not a command) + * + * NOTE: DO NOT dereference from casts to this structure + * It is provided only for calculating minimum data set size. + * The actual offsets of the hdr and end are dynamic based on + * stats.phy_count + */ +struct iwl3945_rx_frame { + struct iwl3945_rx_frame_stats stats; + struct iwl3945_rx_frame_hdr hdr; + struct iwl3945_rx_frame_end end; +} __attribute__ ((packed)); + +#define IWL39_RX_FRAME_SIZE (4 + sizeof(struct iwl3945_rx_frame)) + +/* Fixed (non-configurable) rx data from phy */ + +#define IWL49_RX_RES_PHY_CNT 14 +#define IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET (4) +#define IWL49_RX_PHY_FLAGS_ANTENNAE_MASK (0x70) +#define IWL49_AGC_DB_MASK (0x3f80) /* MASK(7,13) */ +#define IWL49_AGC_DB_POS (7) +struct iwl4965_rx_non_cfg_phy { + __le16 ant_selection; /* ant A bit 4, ant B bit 5, ant C bit 6 */ + __le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */ + u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */ + u8 pad[0]; +} __attribute__ ((packed)); + + +#define IWL50_RX_RES_PHY_CNT 8 +#define IWL50_RX_RES_AGC_IDX 1 +#define IWL50_RX_RES_RSSI_AB_IDX 2 +#define IWL50_RX_RES_RSSI_C_IDX 3 +#define IWL50_OFDM_AGC_MSK 0xfe00 +#define IWL50_OFDM_AGC_BIT_POS 9 +#define IWL50_OFDM_RSSI_A_MSK 0x00ff +#define IWL50_OFDM_RSSI_A_BIT_POS 0 +#define IWL50_OFDM_RSSI_B_MSK 0xff0000 +#define IWL50_OFDM_RSSI_B_BIT_POS 16 +#define IWL50_OFDM_RSSI_C_MSK 0x00ff +#define IWL50_OFDM_RSSI_C_BIT_POS 0 + +struct iwl5000_non_cfg_phy { + __le32 non_cfg_phy[IWL50_RX_RES_PHY_CNT]; /* up to 8 phy entries */ +} __attribute__ ((packed)); + + +/* + * REPLY_RX = 0xc3 (response only, not a command) + * Used only for legacy (non 11n) frames. + */ +struct iwl_rx_phy_res { + u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */ + u8 cfg_phy_cnt; /* configurable DSP phy data byte count */ + u8 stat_id; /* configurable DSP phy data set ID */ + u8 reserved1; + __le64 timestamp; /* TSF at on air rise */ + __le32 beacon_time_stamp; /* beacon at on-air rise */ + __le16 phy_flags; /* general phy flags: band, modulation, ... */ + __le16 channel; /* channel number */ + u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */ + __le32 rate_n_flags; /* RATE_MCS_* */ + __le16 byte_count; /* frame's byte-count */ + __le16 reserved3; +} __attribute__ ((packed)); + +struct iwl4965_rx_mpdu_res_start { + __le16 byte_count; + __le16 reserved; +} __attribute__ ((packed)); + + +/****************************************************************************** + * (5) + * Tx Commands & Responses: + * + * Driver must place each REPLY_TX command into one of the prioritized Tx + * queues in host DRAM, shared between driver and device (see comments for + * SCD registers and Tx/Rx Queues). When the device's Tx scheduler and uCode + * are preparing to transmit, the device pulls the Tx command over the PCI + * bus via one of the device's Tx DMA channels, to fill an internal FIFO + * from which data will be transmitted. + * + * uCode handles all timing and protocol related to control frames + * (RTS/CTS/ACK), based on flags in the Tx command. uCode and Tx scheduler + * handle reception of block-acks; uCode updates the host driver via + * REPLY_COMPRESSED_BA (4965). + * + * uCode handles retrying Tx when an ACK is expected but not received. + * This includes trying lower data rates than the one requested in the Tx + * command, as set up by the REPLY_RATE_SCALE (for 3945) or + * REPLY_TX_LINK_QUALITY_CMD (4965). + * + * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD. + * This command must be executed after every RXON command, before Tx can occur. + *****************************************************************************/ + +/* REPLY_TX Tx flags field */ + +/* 1: Use RTS/CTS protocol or CTS-to-self if spec allows it + * before this frame. if CTS-to-self required check + * RXON_FLG_SELF_CTS_EN status. */ +#define TX_CMD_FLG_RTS_CTS_MSK cpu_to_le32(1 << 0) + +/* 1: Use Request-To-Send protocol before this frame. + * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK. */ +#define TX_CMD_FLG_RTS_MSK cpu_to_le32(1 << 1) + +/* 1: Transmit Clear-To-Send to self before this frame. + * Driver should set this for AUTH/DEAUTH/ASSOC-REQ/REASSOC mgmnt frames. + * Mutually exclusive vs. TX_CMD_FLG_RTS_MSK. */ +#define TX_CMD_FLG_CTS_MSK cpu_to_le32(1 << 2) + +/* 1: Expect ACK from receiving station + * 0: Don't expect ACK (MAC header's duration field s/b 0) + * Set this for unicast frames, but not broadcast/multicast. */ +#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3) + +/* For 4965: + * 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD). + * Tx command's initial_rate_index indicates first rate to try; + * uCode walks through table for additional Tx attempts. + * 0: Use Tx rate/MCS from Tx command's rate_n_flags field. + * This rate will be used for all Tx attempts; it will not be scaled. */ +#define TX_CMD_FLG_STA_RATE_MSK cpu_to_le32(1 << 4) + +/* 1: Expect immediate block-ack. + * Set when Txing a block-ack request frame. Also set TX_CMD_FLG_ACK_MSK. */ +#define TX_CMD_FLG_IMM_BA_RSP_MASK cpu_to_le32(1 << 6) + +/* 1: Frame requires full Tx-Op protection. + * Set this if either RTS or CTS Tx Flag gets set. */ +#define TX_CMD_FLG_FULL_TXOP_PROT_MSK cpu_to_le32(1 << 7) + +/* Tx antenna selection field; used only for 3945, reserved (0) for 4965. + * Set field to "0" to allow 3945 uCode to select antenna (normal usage). */ +#define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00) +#define TX_CMD_FLG_ANT_A_MSK cpu_to_le32(1 << 8) +#define TX_CMD_FLG_ANT_B_MSK cpu_to_le32(1 << 9) + +/* 1: Ignore Bluetooth priority for this frame. + * 0: Delay Tx until Bluetooth device is done (normal usage). */ +#define TX_CMD_FLG_BT_DIS_MSK cpu_to_le32(1 << 12) + +/* 1: uCode overrides sequence control field in MAC header. + * 0: Driver provides sequence control field in MAC header. + * Set this for management frames, non-QOS data frames, non-unicast frames, + * and also in Tx command embedded in REPLY_SCAN_CMD for active scans. */ +#define TX_CMD_FLG_SEQ_CTL_MSK cpu_to_le32(1 << 13) + +/* 1: This frame is non-last MPDU; more fragments are coming. + * 0: Last fragment, or not using fragmentation. */ +#define TX_CMD_FLG_MORE_FRAG_MSK cpu_to_le32(1 << 14) + +/* 1: uCode calculates and inserts Timestamp Function (TSF) in outgoing frame. + * 0: No TSF required in outgoing frame. + * Set this for transmitting beacons and probe responses. */ +#define TX_CMD_FLG_TSF_MSK cpu_to_le32(1 << 16) + +/* 1: Driver inserted 2 bytes pad after the MAC header, for (required) dword + * alignment of frame's payload data field. + * 0: No pad + * Set this for MAC headers with 26 or 30 bytes, i.e. those with QOS or ADDR4 + * field (but not both). Driver must align frame data (i.e. data following + * MAC header) to DWORD boundary. */ +#define TX_CMD_FLG_MH_PAD_MSK cpu_to_le32(1 << 20) + +/* accelerate aggregation support + * 0 - no CCMP encryption; 1 - CCMP encryption */ +#define TX_CMD_FLG_AGG_CCMP_MSK cpu_to_le32(1 << 22) + +/* HCCA-AP - disable duration overwriting. */ +#define TX_CMD_FLG_DUR_MSK cpu_to_le32(1 << 25) + + +/* + * TX command security control + */ +#define TX_CMD_SEC_WEP 0x01 +#define TX_CMD_SEC_CCM 0x02 +#define TX_CMD_SEC_TKIP 0x03 +#define TX_CMD_SEC_MSK 0x03 +#define TX_CMD_SEC_SHIFT 6 +#define TX_CMD_SEC_KEY128 0x08 + +/* + * security overhead sizes + */ +#define WEP_IV_LEN 4 +#define WEP_ICV_LEN 4 +#define CCMP_MIC_LEN 8 +#define TKIP_ICV_LEN 4 + +/* + * REPLY_TX = 0x1c (command) + */ + +struct iwl3945_tx_cmd { + /* + * MPDU byte count: + * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size, + * + 8 byte IV for CCM or TKIP (not used for WEP) + * + Data payload + * + 8-byte MIC (not used for CCM/WEP) + * NOTE: Does not include Tx command bytes, post-MAC pad bytes, + * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i + * Range: 14-2342 bytes. + */ + __le16 len; + + /* + * MPDU or MSDU byte count for next frame. + * Used for fragmentation and bursting, but not 11n aggregation. + * Same as "len", but for next frame. Set to 0 if not applicable. + */ + __le16 next_frame_len; + + __le32 tx_flags; /* TX_CMD_FLG_* */ + + u8 rate; + + /* Index of recipient station in uCode's station table */ + u8 sta_id; + u8 tid_tspec; + u8 sec_ctl; + u8 key[16]; + union { + u8 byte[8]; + __le16 word[4]; + __le32 dw[2]; + } tkip_mic; + __le32 next_frame_info; + union { + __le32 life_time; + __le32 attempt; + } stop_time; + u8 supp_rates[2]; + u8 rts_retry_limit; /*byte 50 */ + u8 data_retry_limit; /*byte 51 */ + union { + __le16 pm_frame_timeout; + __le16 attempt_duration; + } timeout; + + /* + * Duration of EDCA burst Tx Opportunity, in 32-usec units. + * Set this if txop time is not specified by HCCA protocol (e.g. by AP). + */ + __le16 driver_txop; + + /* + * MAC header goes here, followed by 2 bytes padding if MAC header + * length is 26 or 30 bytes, followed by payload data + */ + u8 payload[0]; + struct ieee80211_hdr hdr[0]; +} __attribute__ ((packed)); + +/* + * REPLY_TX = 0x1c (response) + */ +struct iwl3945_tx_resp { + u8 failure_rts; + u8 failure_frame; + u8 bt_kill_count; + u8 rate; + __le32 wireless_media_time; + __le32 status; /* TX status */ +} __attribute__ ((packed)); + + +/* + * 4965 uCode updates these Tx attempt count values in host DRAM. + * Used for managing Tx retries when expecting block-acks. + * Driver should set these fields to 0. + */ +struct iwl_dram_scratch { + u8 try_cnt; /* Tx attempts */ + u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */ + __le16 reserved; +} __attribute__ ((packed)); + +struct iwl_tx_cmd { + /* + * MPDU byte count: + * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size, + * + 8 byte IV for CCM or TKIP (not used for WEP) + * + Data payload + * + 8-byte MIC (not used for CCM/WEP) + * NOTE: Does not include Tx command bytes, post-MAC pad bytes, + * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i + * Range: 14-2342 bytes. + */ + __le16 len; + + /* + * MPDU or MSDU byte count for next frame. + * Used for fragmentation and bursting, but not 11n aggregation. + * Same as "len", but for next frame. Set to 0 if not applicable. + */ + __le16 next_frame_len; + + __le32 tx_flags; /* TX_CMD_FLG_* */ + + /* uCode may modify this field of the Tx command (in host DRAM!). + * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */ + struct iwl_dram_scratch scratch; + + /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */ + __le32 rate_n_flags; /* RATE_MCS_* */ + + /* Index of destination station in uCode's station table */ + u8 sta_id; + + /* Type of security encryption: CCM or TKIP */ + u8 sec_ctl; /* TX_CMD_SEC_* */ + + /* + * Index into rate table (see REPLY_TX_LINK_QUALITY_CMD) for initial + * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for + * data frames, this field may be used to selectively reduce initial + * rate (via non-0 value) for special frames (e.g. management), while + * still supporting rate scaling for all frames. + */ + u8 initial_rate_index; + u8 reserved; + u8 key[16]; + __le16 next_frame_flags; + __le16 reserved2; + union { + __le32 life_time; + __le32 attempt; + } stop_time; + + /* Host DRAM physical address pointer to "scratch" in this command. + * Must be dword aligned. "0" in dram_lsb_ptr disables usage. */ + __le32 dram_lsb_ptr; + u8 dram_msb_ptr; + + u8 rts_retry_limit; /*byte 50 */ + u8 data_retry_limit; /*byte 51 */ + u8 tid_tspec; + union { + __le16 pm_frame_timeout; + __le16 attempt_duration; + } timeout; + + /* + * Duration of EDCA burst Tx Opportunity, in 32-usec units. + * Set this if txop time is not specified by HCCA protocol (e.g. by AP). + */ + __le16 driver_txop; + + /* + * MAC header goes here, followed by 2 bytes padding if MAC header + * length is 26 or 30 bytes, followed by payload data + */ + u8 payload[0]; + struct ieee80211_hdr hdr[0]; +} __attribute__ ((packed)); + +/* TX command response is sent after *all* transmission attempts. + * + * NOTES: + * + * TX_STATUS_FAIL_NEXT_FRAG + * + * If the fragment flag in the MAC header for the frame being transmitted + * is set and there is insufficient time to transmit the next frame, the + * TX status will be returned with 'TX_STATUS_FAIL_NEXT_FRAG'. + * + * TX_STATUS_FIFO_UNDERRUN + * + * Indicates the host did not provide bytes to the FIFO fast enough while + * a TX was in progress. + * + * TX_STATUS_FAIL_MGMNT_ABORT + * + * This status is only possible if the ABORT ON MGMT RX parameter was + * set to true with the TX command. + * + * If the MSB of the status parameter is set then an abort sequence is + * required. This sequence consists of the host activating the TX Abort + * control line, and then waiting for the TX Abort command response. This + * indicates that a the device is no longer in a transmit state, and that the + * command FIFO has been cleared. The host must then deactivate the TX Abort + * control line. Receiving is still allowed in this case. + */ +enum { + TX_STATUS_SUCCESS = 0x01, + TX_STATUS_DIRECT_DONE = 0x02, + TX_STATUS_FAIL_SHORT_LIMIT = 0x82, + TX_STATUS_FAIL_LONG_LIMIT = 0x83, + TX_STATUS_FAIL_FIFO_UNDERRUN = 0x84, + TX_STATUS_FAIL_MGMNT_ABORT = 0x85, + TX_STATUS_FAIL_NEXT_FRAG = 0x86, + TX_STATUS_FAIL_LIFE_EXPIRE = 0x87, + TX_STATUS_FAIL_DEST_PS = 0x88, + TX_STATUS_FAIL_ABORTED = 0x89, + TX_STATUS_FAIL_BT_RETRY = 0x8a, + TX_STATUS_FAIL_STA_INVALID = 0x8b, + TX_STATUS_FAIL_FRAG_DROPPED = 0x8c, + TX_STATUS_FAIL_TID_DISABLE = 0x8d, + TX_STATUS_FAIL_FRAME_FLUSHED = 0x8e, + TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f, + TX_STATUS_FAIL_TX_LOCKED = 0x90, + TX_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91, +}; + +#define TX_PACKET_MODE_REGULAR 0x0000 +#define TX_PACKET_MODE_BURST_SEQ 0x0100 +#define TX_PACKET_MODE_BURST_FIRST 0x0200 + +enum { + TX_POWER_PA_NOT_ACTIVE = 0x0, +}; + +enum { + TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */ + TX_STATUS_DELAY_MSK = 0x00000040, + TX_STATUS_ABORT_MSK = 0x00000080, + TX_PACKET_MODE_MSK = 0x0000ff00, /* bits 8:15 */ + TX_FIFO_NUMBER_MSK = 0x00070000, /* bits 16:18 */ + TX_RESERVED = 0x00780000, /* bits 19:22 */ + TX_POWER_PA_DETECT_MSK = 0x7f800000, /* bits 23:30 */ + TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */ +}; + +static inline u32 iwl_tx_status_to_mac80211(u32 status) +{ + status &= TX_STATUS_MSK; + + switch (status) { + case TX_STATUS_SUCCESS: + case TX_STATUS_DIRECT_DONE: + return IEEE80211_TX_STAT_ACK; + case TX_STATUS_FAIL_DEST_PS: + return IEEE80211_TX_STAT_TX_FILTERED; + default: + return 0; + } +} + +static inline bool iwl_is_tx_success(u32 status) +{ + status &= TX_STATUS_MSK; + return (status == TX_STATUS_SUCCESS) || + (status == TX_STATUS_DIRECT_DONE); +} + + + +/* ******************************* + * TX aggregation status + ******************************* */ + +enum { + AGG_TX_STATE_TRANSMITTED = 0x00, + AGG_TX_STATE_UNDERRUN_MSK = 0x01, + AGG_TX_STATE_BT_PRIO_MSK = 0x02, + AGG_TX_STATE_FEW_BYTES_MSK = 0x04, + AGG_TX_STATE_ABORT_MSK = 0x08, + AGG_TX_STATE_LAST_SENT_TTL_MSK = 0x10, + AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK = 0x20, + AGG_TX_STATE_LAST_SENT_BT_KILL_MSK = 0x40, + AGG_TX_STATE_SCD_QUERY_MSK = 0x80, + AGG_TX_STATE_TEST_BAD_CRC32_MSK = 0x100, + AGG_TX_STATE_RESPONSE_MSK = 0x1ff, + AGG_TX_STATE_DUMP_TX_MSK = 0x200, + AGG_TX_STATE_DELAY_TX_MSK = 0x400 +}; + +#define AGG_TX_STATE_LAST_SENT_MSK (AGG_TX_STATE_LAST_SENT_TTL_MSK | \ + AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK | \ + AGG_TX_STATE_LAST_SENT_BT_KILL_MSK) + +/* # tx attempts for first frame in aggregation */ +#define AGG_TX_STATE_TRY_CNT_POS 12 +#define AGG_TX_STATE_TRY_CNT_MSK 0xf000 + +/* Command ID and sequence number of Tx command for this frame */ +#define AGG_TX_STATE_SEQ_NUM_POS 16 +#define AGG_TX_STATE_SEQ_NUM_MSK 0xffff0000 + +/* + * REPLY_TX = 0x1c (response) + * + * This response may be in one of two slightly different formats, indicated + * by the frame_count field: + * + * 1) No aggregation (frame_count == 1). This reports Tx results for + * a single frame. Multiple attempts, at various bit rates, may have + * been made for this frame. + * + * 2) Aggregation (frame_count > 1). This reports Tx results for + * 2 or more frames that used block-acknowledge. All frames were + * transmitted at same rate. Rate scaling may have been used if first + * frame in this new agg block failed in previous agg block(s). + * + * Note that, for aggregation, ACK (block-ack) status is not delivered here; + * block-ack has not been received by the time the 4965 records this status. + * This status relates to reasons the tx might have been blocked or aborted + * within the sending station (this 4965), rather than whether it was + * received successfully by the destination station. + */ +struct agg_tx_status { + __le16 status; + __le16 sequence; +} __attribute__ ((packed)); + +struct iwl4965_tx_resp { + u8 frame_count; /* 1 no aggregation, >1 aggregation */ + u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */ + u8 failure_rts; /* # failures due to unsuccessful RTS */ + u8 failure_frame; /* # failures due to no ACK (unused for agg) */ + + /* For non-agg: Rate at which frame was successful. + * For agg: Rate at which all frames were transmitted. */ + __le32 rate_n_flags; /* RATE_MCS_* */ + + /* For non-agg: RTS + CTS + frame tx attempts time + ACK. + * For agg: RTS + CTS + aggregation tx time + block-ack time. */ + __le16 wireless_media_time; /* uSecs */ + + __le16 reserved; + __le32 pa_power1; /* RF power amplifier measurement (not used) */ + __le32 pa_power2; + + /* + * For non-agg: frame status TX_STATUS_* + * For agg: status of 1st frame, AGG_TX_STATE_*; other frame status + * fields follow this one, up to frame_count. + * Bit fields: + * 11- 0: AGG_TX_STATE_* status code + * 15-12: Retry count for 1st frame in aggregation (retries + * occur if tx failed for this frame when it was a + * member of a previous aggregation block). If rate + * scaling is used, retry count indicates the rate + * table entry used for all frames in the new agg. + * 31-16: Sequence # for this frame's Tx cmd (not SSN!) + */ + union { + __le32 status; + struct agg_tx_status agg_status[0]; /* for each agg frame */ + } u; +} __attribute__ ((packed)); + +/* + * definitions for initial rate index field + * bits [3:0] initial rate index + * bits [6:4] rate table color, used for the initial rate + * bit-7 invalid rate indication + * i.e. rate was not chosen from rate table + * or rate table color was changed during frame retries + * refer tlc rate info + */ + +#define IWL50_TX_RES_INIT_RATE_INDEX_POS 0 +#define IWL50_TX_RES_INIT_RATE_INDEX_MSK 0x0f +#define IWL50_TX_RES_RATE_TABLE_COLOR_POS 4 +#define IWL50_TX_RES_RATE_TABLE_COLOR_MSK 0x70 +#define IWL50_TX_RES_INV_RATE_INDEX_MSK 0x80 + +/* refer to ra_tid */ +#define IWL50_TX_RES_TID_POS 0 +#define IWL50_TX_RES_TID_MSK 0x0f +#define IWL50_TX_RES_RA_POS 4 +#define IWL50_TX_RES_RA_MSK 0xf0 + +struct iwl5000_tx_resp { + u8 frame_count; /* 1 no aggregation, >1 aggregation */ + u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */ + u8 failure_rts; /* # failures due to unsuccessful RTS */ + u8 failure_frame; /* # failures due to no ACK (unused for agg) */ + + /* For non-agg: Rate at which frame was successful. + * For agg: Rate at which all frames were transmitted. */ + __le32 rate_n_flags; /* RATE_MCS_* */ + + /* For non-agg: RTS + CTS + frame tx attempts time + ACK. + * For agg: RTS + CTS + aggregation tx time + block-ack time. */ + __le16 wireless_media_time; /* uSecs */ + + u8 pa_status; /* RF power amplifier measurement (not used) */ + u8 pa_integ_res_a[3]; + u8 pa_integ_res_b[3]; + u8 pa_integ_res_C[3]; + + __le32 tfd_info; + __le16 seq_ctl; + __le16 byte_cnt; + u8 tlc_info; + u8 ra_tid; /* tid (0:3), sta_id (4:7) */ + __le16 frame_ctrl; + /* + * For non-agg: frame status TX_STATUS_* + * For agg: status of 1st frame, AGG_TX_STATE_*; other frame status + * fields follow this one, up to frame_count. + * Bit fields: + * 11- 0: AGG_TX_STATE_* status code + * 15-12: Retry count for 1st frame in aggregation (retries + * occur if tx failed for this frame when it was a + * member of a previous aggregation block). If rate + * scaling is used, retry count indicates the rate + * table entry used for all frames in the new agg. + * 31-16: Sequence # for this frame's Tx cmd (not SSN!) + */ + struct agg_tx_status status; /* TX status (in aggregation - + * status of 1st frame) */ +} __attribute__ ((packed)); +/* + * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command) + * + * Reports Block-Acknowledge from recipient station + */ +struct iwl_compressed_ba_resp { + __le32 sta_addr_lo32; + __le16 sta_addr_hi16; + __le16 reserved; + + /* Index of recipient (BA-sending) station in uCode's station table */ + u8 sta_id; + u8 tid; + __le16 seq_ctl; + __le64 bitmap; + __le16 scd_flow; + __le16 scd_ssn; +} __attribute__ ((packed)); + +/* + * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response) + * + * See details under "TXPOWER" in iwl-4965-hw.h. + */ + +struct iwl3945_txpowertable_cmd { + u8 band; /* 0: 5 GHz, 1: 2.4 GHz */ + u8 reserved; + __le16 channel; + struct iwl3945_power_per_rate power[IWL_MAX_RATES]; +} __attribute__ ((packed)); + +struct iwl4965_txpowertable_cmd { + u8 band; /* 0: 5 GHz, 1: 2.4 GHz */ + u8 reserved; + __le16 channel; + struct iwl4965_tx_power_db tx_power; +} __attribute__ ((packed)); + + +/** + * struct iwl3945_rate_scaling_cmd - Rate Scaling Command & Response + * + * REPLY_RATE_SCALE = 0x47 (command, has simple generic response) + * + * NOTE: The table of rates passed to the uCode via the + * RATE_SCALE command sets up the corresponding order of + * rates used for all related commands, including rate + * masks, etc. + * + * For example, if you set 9MB (PLCP 0x0f) as the first + * rate in the rate table, the bit mask for that rate + * when passed through ofdm_basic_rates on the REPLY_RXON + * command would be bit 0 (1 << 0) + */ +struct iwl3945_rate_scaling_info { + __le16 rate_n_flags; + u8 try_cnt; + u8 next_rate_index; +} __attribute__ ((packed)); + +struct iwl3945_rate_scaling_cmd { + u8 table_id; + u8 reserved[3]; + struct iwl3945_rate_scaling_info table[IWL_MAX_RATES]; +} __attribute__ ((packed)); + + +/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */ +#define LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK (1 << 0) + +/* # of EDCA prioritized tx fifos */ +#define LINK_QUAL_AC_NUM AC_NUM + +/* # entries in rate scale table to support Tx retries */ +#define LINK_QUAL_MAX_RETRY_NUM 16 + +/* Tx antenna selection values */ +#define LINK_QUAL_ANT_A_MSK (1 << 0) +#define LINK_QUAL_ANT_B_MSK (1 << 1) +#define LINK_QUAL_ANT_MSK (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK) + + +/** + * struct iwl_link_qual_general_params + * + * Used in REPLY_TX_LINK_QUALITY_CMD + */ +struct iwl_link_qual_general_params { + u8 flags; + + /* No entries at or above this (driver chosen) index contain MIMO */ + u8 mimo_delimiter; + + /* Best single antenna to use for single stream (legacy, SISO). */ + u8 single_stream_ant_msk; /* LINK_QUAL_ANT_* */ + + /* Best antennas to use for MIMO (unused for 4965, assumes both). */ + u8 dual_stream_ant_msk; /* LINK_QUAL_ANT_* */ + + /* + * If driver needs to use different initial rates for different + * EDCA QOS access categories (as implemented by tx fifos 0-3), + * this table will set that up, by indicating the indexes in the + * rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table at which to start. + * Otherwise, driver should set all entries to 0. + * + * Entry usage: + * 0 = Background, 1 = Best Effort (normal), 2 = Video, 3 = Voice + * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3. + */ + u8 start_rate_index[LINK_QUAL_AC_NUM]; +} __attribute__ ((packed)); + +#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */ +#define LINK_QUAL_AGG_TIME_LIMIT_MAX (65535) +#define LINK_QUAL_AGG_TIME_LIMIT_MIN (0) + +#define LINK_QUAL_AGG_DISABLE_START_DEF (3) +#define LINK_QUAL_AGG_DISABLE_START_MAX (255) +#define LINK_QUAL_AGG_DISABLE_START_MIN (0) + +#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (31) +#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63) +#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0) + +/** + * struct iwl_link_qual_agg_params + * + * Used in REPLY_TX_LINK_QUALITY_CMD + */ +struct iwl_link_qual_agg_params { + + /* Maximum number of uSec in aggregation. + * Driver should set this to 4000 (4 milliseconds). */ + __le16 agg_time_limit; + + /* + * Number of Tx retries allowed for a frame, before that frame will + * no longer be considered for the start of an aggregation sequence + * (scheduler will then try to tx it as single frame). + * Driver should set this to 3. + */ + u8 agg_dis_start_th; + + /* + * Maximum number of frames in aggregation. + * 0 = no limit (default). 1 = no aggregation. + * Other values = max # frames in aggregation. + */ + u8 agg_frame_cnt_limit; + + __le32 reserved; +} __attribute__ ((packed)); + +/* + * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response) + * + * For 4965 only; 3945 uses REPLY_RATE_SCALE. + * + * Each station in the 4965's internal station table has its own table of 16 + * Tx rates and modulation modes (e.g. legacy/SISO/MIMO) for retrying Tx when + * an ACK is not received. This command replaces the entire table for + * one station. + * + * NOTE: Station must already be in 4965's station table. Use REPLY_ADD_STA. + * + * The rate scaling procedures described below work well. Of course, other + * procedures are possible, and may work better for particular environments. + * + * + * FILLING THE RATE TABLE + * + * Given a particular initial rate and mode, as determined by the rate + * scaling algorithm described below, the Linux driver uses the following + * formula to fill the rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table in the + * Link Quality command: + * + * + * 1) If using High-throughput (HT) (SISO or MIMO) initial rate: + * a) Use this same initial rate for first 3 entries. + * b) Find next lower available rate using same mode (SISO or MIMO), + * use for next 3 entries. If no lower rate available, switch to + * legacy mode (no HT40 channel, no MIMO, no short guard interval). + * c) If using MIMO, set command's mimo_delimiter to number of entries + * using MIMO (3 or 6). + * d) After trying 2 HT rates, switch to legacy mode (no HT40 channel, + * no MIMO, no short guard interval), at the next lower bit rate + * (e.g. if second HT bit rate was 54, try 48 legacy), and follow + * legacy procedure for remaining table entries. + * + * 2) If using legacy initial rate: + * a) Use the initial rate for only one entry. + * b) For each following entry, reduce the rate to next lower available + * rate, until reaching the lowest available rate. + * c) When reducing rate, also switch antenna selection. + * d) Once lowest available rate is reached, repeat this rate until + * rate table is filled (16 entries), switching antenna each entry. + * + * + * ACCUMULATING HISTORY + * + * The rate scaling algorithm for 4965, as implemented in Linux driver, uses + * two sets of frame Tx success history: One for the current/active modulation + * mode, and one for a speculative/search mode that is being attempted. If the + * speculative mode turns out to be more effective (i.e. actual transfer + * rate is better), then the driver continues to use the speculative mode + * as the new current active mode. + * + * Each history set contains, separately for each possible rate, data for a + * sliding window of the 62 most recent tx attempts at that rate. The data + * includes a shifting bitmap of success(1)/failure(0), and sums of successful + * and attempted frames, from which the driver can additionally calculate a + * success ratio (success / attempted) and number of failures + * (attempted - success), and control the size of the window (attempted). + * The driver uses the bit map to remove successes from the success sum, as + * the oldest tx attempts fall out of the window. + * + * When the 4965 makes multiple tx attempts for a given frame, each attempt + * might be at a different rate, and have different modulation characteristics + * (e.g. antenna, fat channel, short guard interval), as set up in the rate + * scaling table in the Link Quality command. The driver must determine + * which rate table entry was used for each tx attempt, to determine which + * rate-specific history to update, and record only those attempts that + * match the modulation characteristics of the history set. + * + * When using block-ack (aggregation), all frames are transmitted at the same + * rate, since there is no per-attempt acknowledgment from the destination + * station. The Tx response struct iwl_tx_resp indicates the Tx rate in + * rate_n_flags field. After receiving a block-ack, the driver can update + * history for the entire block all at once. + * + * + * FINDING BEST STARTING RATE: + * + * When working with a selected initial modulation mode (see below), the + * driver attempts to find a best initial rate. The initial rate is the + * first entry in the Link Quality command's rate table. + * + * 1) Calculate actual throughput (success ratio * expected throughput, see + * table below) for current initial rate. Do this only if enough frames + * have been attempted to make the value meaningful: at least 6 failed + * tx attempts, or at least 8 successes. If not enough, don't try rate + * scaling yet. + * + * 2) Find available rates adjacent to current initial rate. Available means: + * a) supported by hardware && + * b) supported by association && + * c) within any constraints selected by user + * + * 3) Gather measured throughputs for adjacent rates. These might not have + * enough history to calculate a throughput. That's okay, we might try + * using one of them anyway! + * + * 4) Try decreasing rate if, for current rate: + * a) success ratio is < 15% || + * b) lower adjacent rate has better measured throughput || + * c) higher adjacent rate has worse throughput, and lower is unmeasured + * + * As a sanity check, if decrease was determined above, leave rate + * unchanged if: + * a) lower rate unavailable + * b) success ratio at current rate > 85% (very good) + * c) current measured throughput is better than expected throughput + * of lower rate (under perfect 100% tx conditions, see table below) + * + * 5) Try increasing rate if, for current rate: + * a) success ratio is < 15% || + * b) both adjacent rates' throughputs are unmeasured (try it!) || + * b) higher adjacent rate has better measured throughput || + * c) lower adjacent rate has worse throughput, and higher is unmeasured + * + * As a sanity check, if increase was determined above, leave rate + * unchanged if: + * a) success ratio at current rate < 70%. This is not particularly + * good performance; higher rate is sure to have poorer success. + * + * 6) Re-evaluate the rate after each tx frame. If working with block- + * acknowledge, history and statistics may be calculated for the entire + * block (including prior history that fits within the history windows), + * before re-evaluation. + * + * FINDING BEST STARTING MODULATION MODE: + * + * After working with a modulation mode for a "while" (and doing rate scaling), + * the driver searches for a new initial mode in an attempt to improve + * throughput. The "while" is measured by numbers of attempted frames: + * + * For legacy mode, search for new mode after: + * 480 successful frames, or 160 failed frames + * For high-throughput modes (SISO or MIMO), search for new mode after: + * 4500 successful frames, or 400 failed frames + * + * Mode switch possibilities are (3 for each mode): + * + * For legacy: + * Change antenna, try SISO (if HT association), try MIMO (if HT association) + * For SISO: + * Change antenna, try MIMO, try shortened guard interval (SGI) + * For MIMO: + * Try SISO antenna A, SISO antenna B, try shortened guard interval (SGI) + * + * When trying a new mode, use the same bit rate as the old/current mode when + * trying antenna switches and shortened guard interval. When switching to + * SISO from MIMO or legacy, or to MIMO from SISO or legacy, use a rate + * for which the expected throughput (under perfect conditions) is about the + * same or slightly better than the actual measured throughput delivered by + * the old/current mode. + * + * Actual throughput can be estimated by multiplying the expected throughput + * by the success ratio (successful / attempted tx frames). Frame size is + * not considered in this calculation; it assumes that frame size will average + * out to be fairly consistent over several samples. The following are + * metric values for expected throughput assuming 100% success ratio. + * Only G band has support for CCK rates: + * + * RATE: 1 2 5 11 6 9 12 18 24 36 48 54 60 + * + * G: 7 13 35 58 40 57 72 98 121 154 177 186 186 + * A: 0 0 0 0 40 57 72 98 121 154 177 186 186 + * SISO 20MHz: 0 0 0 0 42 42 76 102 124 159 183 193 202 + * SGI SISO 20MHz: 0 0 0 0 46 46 82 110 132 168 192 202 211 + * MIMO 20MHz: 0 0 0 0 74 74 123 155 179 214 236 244 251 + * SGI MIMO 20MHz: 0 0 0 0 81 81 131 164 188 222 243 251 257 + * SISO 40MHz: 0 0 0 0 77 77 127 160 184 220 242 250 257 + * SGI SISO 40MHz: 0 0 0 0 83 83 135 169 193 229 250 257 264 + * MIMO 40MHz: 0 0 0 0 123 123 182 214 235 264 279 285 289 + * SGI MIMO 40MHz: 0 0 0 0 131 131 191 222 242 270 284 289 293 + * + * After the new mode has been tried for a short while (minimum of 6 failed + * frames or 8 successful frames), compare success ratio and actual throughput + * estimate of the new mode with the old. If either is better with the new + * mode, continue to use the new mode. + * + * Continue comparing modes until all 3 possibilities have been tried. + * If moving from legacy to HT, try all 3 possibilities from the new HT + * mode. After trying all 3, a best mode is found. Continue to use this mode + * for the longer "while" described above (e.g. 480 successful frames for + * legacy), and then repeat the search process. + * + */ +struct iwl_link_quality_cmd { + + /* Index of destination/recipient station in uCode's station table */ + u8 sta_id; + u8 reserved1; + __le16 control; /* not used */ + struct iwl_link_qual_general_params general_params; + struct iwl_link_qual_agg_params agg_params; + + /* + * Rate info; when using rate-scaling, Tx command's initial_rate_index + * specifies 1st Tx rate attempted, via index into this table. + * 4965 works its way through table when retrying Tx. + */ + struct { + __le32 rate_n_flags; /* RATE_MCS_*, IWL_RATE_* */ + } rs_table[LINK_QUAL_MAX_RETRY_NUM]; + __le32 reserved2; +} __attribute__ ((packed)); + +/* + * BT configuration enable flags: + * bit 0 - 1: BT channel announcement enabled + * 0: disable + * bit 1 - 1: priority of BT device enabled + * 0: disable + * bit 2 - 1: BT 2 wire support enabled + * 0: disable + */ +#define BT_COEX_DISABLE (0x0) +#define BT_ENABLE_CHANNEL_ANNOUNCE BIT(0) +#define BT_ENABLE_PRIORITY BIT(1) +#define BT_ENABLE_2_WIRE BIT(2) + +#define BT_COEX_DISABLE (0x0) +#define BT_COEX_ENABLE (BT_ENABLE_CHANNEL_ANNOUNCE | BT_ENABLE_PRIORITY) + +#define BT_LEAD_TIME_MIN (0x0) +#define BT_LEAD_TIME_DEF (0x1E) +#define BT_LEAD_TIME_MAX (0xFF) + +#define BT_MAX_KILL_MIN (0x1) +#define BT_MAX_KILL_DEF (0x5) +#define BT_MAX_KILL_MAX (0xFF) + +/* + * REPLY_BT_CONFIG = 0x9b (command, has simple generic response) + * + * 3945 and 4965 support hardware handshake with Bluetooth device on + * same platform. Bluetooth device alerts wireless device when it will Tx; + * wireless device can delay or kill its own Tx to accommodate. + */ +struct iwl_bt_cmd { + u8 flags; + u8 lead_time; + u8 max_kill; + u8 reserved; + __le32 kill_ack_mask; + __le32 kill_cts_mask; +} __attribute__ ((packed)); + +/****************************************************************************** + * (6) + * Spectrum Management (802.11h) Commands, Responses, Notifications: + * + *****************************************************************************/ + +/* + * Spectrum Management + */ +#define MEASUREMENT_FILTER_FLAG (RXON_FILTER_PROMISC_MSK | \ + RXON_FILTER_CTL2HOST_MSK | \ + RXON_FILTER_ACCEPT_GRP_MSK | \ + RXON_FILTER_DIS_DECRYPT_MSK | \ + RXON_FILTER_DIS_GRP_DECRYPT_MSK | \ + RXON_FILTER_ASSOC_MSK | \ + RXON_FILTER_BCON_AWARE_MSK) + +struct iwl_measure_channel { + __le32 duration; /* measurement duration in extended beacon + * format */ + u8 channel; /* channel to measure */ + u8 type; /* see enum iwl_measure_type */ + __le16 reserved; +} __attribute__ ((packed)); + +/* + * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command) + */ +struct iwl_spectrum_cmd { + __le16 len; /* number of bytes starting from token */ + u8 token; /* token id */ + u8 id; /* measurement id -- 0 or 1 */ + u8 origin; /* 0 = TGh, 1 = other, 2 = TGk */ + u8 periodic; /* 1 = periodic */ + __le16 path_loss_timeout; + __le32 start_time; /* start time in extended beacon format */ + __le32 reserved2; + __le32 flags; /* rxon flags */ + __le32 filter_flags; /* rxon filter flags */ + __le16 channel_count; /* minimum 1, maximum 10 */ + __le16 reserved3; + struct iwl_measure_channel channels[10]; +} __attribute__ ((packed)); + +/* + * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response) + */ +struct iwl_spectrum_resp { + u8 token; + u8 id; /* id of the prior command replaced, or 0xff */ + __le16 status; /* 0 - command will be handled + * 1 - cannot handle (conflicts with another + * measurement) */ +} __attribute__ ((packed)); + +enum iwl_measurement_state { + IWL_MEASUREMENT_START = 0, + IWL_MEASUREMENT_STOP = 1, +}; + +enum iwl_measurement_status { + IWL_MEASUREMENT_OK = 0, + IWL_MEASUREMENT_CONCURRENT = 1, + IWL_MEASUREMENT_CSA_CONFLICT = 2, + IWL_MEASUREMENT_TGH_CONFLICT = 3, + /* 4-5 reserved */ + IWL_MEASUREMENT_STOPPED = 6, + IWL_MEASUREMENT_TIMEOUT = 7, + IWL_MEASUREMENT_PERIODIC_FAILED = 8, +}; + +#define NUM_ELEMENTS_IN_HISTOGRAM 8 + +struct iwl_measurement_histogram { + __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */ + __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */ +} __attribute__ ((packed)); + +/* clear channel availability counters */ +struct iwl_measurement_cca_counters { + __le32 ofdm; + __le32 cck; +} __attribute__ ((packed)); + +enum iwl_measure_type { + IWL_MEASURE_BASIC = (1 << 0), + IWL_MEASURE_CHANNEL_LOAD = (1 << 1), + IWL_MEASURE_HISTOGRAM_RPI = (1 << 2), + IWL_MEASURE_HISTOGRAM_NOISE = (1 << 3), + IWL_MEASURE_FRAME = (1 << 4), + /* bits 5:6 are reserved */ + IWL_MEASURE_IDLE = (1 << 7), +}; + +/* + * SPECTRUM_MEASURE_NOTIFICATION = 0x75 (notification only, not a command) + */ +struct iwl_spectrum_notification { + u8 id; /* measurement id -- 0 or 1 */ + u8 token; + u8 channel_index; /* index in measurement channel list */ + u8 state; /* 0 - start, 1 - stop */ + __le32 start_time; /* lower 32-bits of TSF */ + u8 band; /* 0 - 5.2GHz, 1 - 2.4GHz */ + u8 channel; + u8 type; /* see enum iwl_measurement_type */ + u8 reserved1; + /* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only only + * valid if applicable for measurement type requested. */ + __le32 cca_ofdm; /* cca fraction time in 40Mhz clock periods */ + __le32 cca_cck; /* cca fraction time in 44Mhz clock periods */ + __le32 cca_time; /* channel load time in usecs */ + u8 basic_type; /* 0 - bss, 1 - ofdm preamble, 2 - + * unidentified */ + u8 reserved2[3]; + struct iwl_measurement_histogram histogram; + __le32 stop_time; /* lower 32-bits of TSF */ + __le32 status; /* see iwl_measurement_status */ +} __attribute__ ((packed)); + +/****************************************************************************** + * (7) + * Power Management Commands, Responses, Notifications: + * + *****************************************************************************/ + +/** + * struct iwl_powertable_cmd - Power Table Command + * @flags: See below: + * + * POWER_TABLE_CMD = 0x77 (command, has simple generic response) + * + * PM allow: + * bit 0 - '0' Driver not allow power management + * '1' Driver allow PM (use rest of parameters) + * + * uCode send sleep notifications: + * bit 1 - '0' Don't send sleep notification + * '1' send sleep notification (SEND_PM_NOTIFICATION) + * + * Sleep over DTIM + * bit 2 - '0' PM have to walk up every DTIM + * '1' PM could sleep over DTIM till listen Interval. + * + * PCI power managed + * bit 3 - '0' (PCI_CFG_LINK_CTRL & 0x1) + * '1' !(PCI_CFG_LINK_CTRL & 0x1) + * + * Fast PD + * bit 4 - '1' Put radio to sleep when receiving frame for others + * + * Force sleep Modes + * bit 31/30- '00' use both mac/xtal sleeps + * '01' force Mac sleep + * '10' force xtal sleep + * '11' Illegal set + * + * NOTE: if sleep_interval[SLEEP_INTRVL_TABLE_SIZE-1] > DTIM period then + * ucode assume sleep over DTIM is allowed and we don't need to wake up + * for every DTIM. + */ +#define IWL_POWER_VEC_SIZE 5 + +#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0)) +#define IWL_POWER_SLEEP_OVER_DTIM_MSK cpu_to_le16(BIT(2)) +#define IWL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3)) +#define IWL_POWER_FAST_PD cpu_to_le16(BIT(4)) + +struct iwl3945_powertable_cmd { + __le16 flags; + u8 reserved[2]; + __le32 rx_data_timeout; + __le32 tx_data_timeout; + __le32 sleep_interval[IWL_POWER_VEC_SIZE]; +} __attribute__ ((packed)); + +struct iwl_powertable_cmd { + __le16 flags; + u8 keep_alive_seconds; /* 3945 reserved */ + u8 debug_flags; /* 3945 reserved */ + __le32 rx_data_timeout; + __le32 tx_data_timeout; + __le32 sleep_interval[IWL_POWER_VEC_SIZE]; + __le32 keep_alive_beacons; +} __attribute__ ((packed)); + +/* + * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command) + * 3945 and 4965 identical. + */ +struct iwl_sleep_notification { + u8 pm_sleep_mode; + u8 pm_wakeup_src; + __le16 reserved; + __le32 sleep_time; + __le32 tsf_low; + __le32 bcon_timer; +} __attribute__ ((packed)); + +/* Sleep states. 3945 and 4965 identical. */ +enum { + IWL_PM_NO_SLEEP = 0, + IWL_PM_SLP_MAC = 1, + IWL_PM_SLP_FULL_MAC_UNASSOCIATE = 2, + IWL_PM_SLP_FULL_MAC_CARD_STATE = 3, + IWL_PM_SLP_PHY = 4, + IWL_PM_SLP_REPENT = 5, + IWL_PM_WAKEUP_BY_TIMER = 6, + IWL_PM_WAKEUP_BY_DRIVER = 7, + IWL_PM_WAKEUP_BY_RFKILL = 8, + /* 3 reserved */ + IWL_PM_NUM_OF_MODES = 12, +}; + +/* + * REPLY_CARD_STATE_CMD = 0xa0 (command, has simple generic response) + */ +#define CARD_STATE_CMD_DISABLE 0x00 /* Put card to sleep */ +#define CARD_STATE_CMD_ENABLE 0x01 /* Wake up card */ +#define CARD_STATE_CMD_HALT 0x02 /* Power down permanently */ +struct iwl_card_state_cmd { + __le32 status; /* CARD_STATE_CMD_* request new power state */ +} __attribute__ ((packed)); + +/* + * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command) + */ +struct iwl_card_state_notif { + __le32 flags; +} __attribute__ ((packed)); + +#define HW_CARD_DISABLED 0x01 +#define SW_CARD_DISABLED 0x02 +#define CT_CARD_DISABLED 0x04 +#define RXON_CARD_DISABLED 0x10 + +struct iwl_ct_kill_config { + __le32 reserved; + __le32 critical_temperature_M; + __le32 critical_temperature_R; +} __attribute__ ((packed)); + +/* 1000, and 6x00 */ +struct iwl_ct_kill_throttling_config { + __le32 critical_temperature_exit; + __le32 reserved; + __le32 critical_temperature_enter; +} __attribute__ ((packed)); + +/****************************************************************************** + * (8) + * Scan Commands, Responses, Notifications: + * + *****************************************************************************/ + +#define SCAN_CHANNEL_TYPE_PASSIVE cpu_to_le32(0) +#define SCAN_CHANNEL_TYPE_ACTIVE cpu_to_le32(1) + +/** + * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table + * + * One for each channel in the scan list. + * Each channel can independently select: + * 1) SSID for directed active scans + * 2) Txpower setting (for rate specified within Tx command) + * 3) How long to stay on-channel (behavior may be modified by quiet_time, + * quiet_plcp_th, good_CRC_th) + * + * To avoid uCode errors, make sure the following are true (see comments + * under struct iwl_scan_cmd about max_out_time and quiet_time): + * 1) If using passive_dwell (i.e. passive_dwell != 0): + * active_dwell <= passive_dwell (< max_out_time if max_out_time != 0) + * 2) quiet_time <= active_dwell + * 3) If restricting off-channel time (i.e. max_out_time !=0): + * passive_dwell < max_out_time + * active_dwell < max_out_time + */ + +/* FIXME: rename to AP1, remove tpc */ +struct iwl3945_scan_channel { + /* + * type is defined as: + * 0:0 1 = active, 0 = passive + * 1:4 SSID direct bit map; if a bit is set, then corresponding + * SSID IE is transmitted in probe request. + * 5:7 reserved + */ + u8 type; + u8 channel; /* band is selected by iwl3945_scan_cmd "flags" field */ + struct iwl3945_tx_power tpc; + __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */ + __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */ +} __attribute__ ((packed)); + +/* set number of direct probes u8 type */ +#define IWL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1)))) + +struct iwl_scan_channel { + /* + * type is defined as: + * 0:0 1 = active, 0 = passive + * 1:20 SSID direct bit map; if a bit is set, then corresponding + * SSID IE is transmitted in probe request. + * 21:31 reserved + */ + __le32 type; + __le16 channel; /* band is selected by iwl_scan_cmd "flags" field */ + u8 tx_gain; /* gain for analog radio */ + u8 dsp_atten; /* gain for DSP */ + __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */ + __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */ +} __attribute__ ((packed)); + +/* set number of direct probes __le32 type */ +#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1)))) + +/** + * struct iwl_ssid_ie - directed scan network information element + * + * Up to 20 of these may appear in REPLY_SCAN_CMD (Note: Only 4 are in + * 3945 SCAN api), selected by "type" bit field in struct iwl_scan_channel; + * each channel may select different ssids from among the 20 (4) entries. + * SSID IEs get transmitted in reverse order of entry. + */ +struct iwl_ssid_ie { + u8 id; + u8 len; + u8 ssid[32]; +} __attribute__ ((packed)); + +#define PROBE_OPTION_MAX_3945 4 +#define PROBE_OPTION_MAX 20 +#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF) +#define IWL_GOOD_CRC_TH cpu_to_le16(1) +#define IWL_MAX_SCAN_SIZE 1024 +#define IWL_MAX_CMD_SIZE 4096 +#define IWL_MAX_PROBE_REQUEST 200 + +/* + * REPLY_SCAN_CMD = 0x80 (command) + * + * The hardware scan command is very powerful; the driver can set it up to + * maintain (relatively) normal network traffic while doing a scan in the + * background. The max_out_time and suspend_time control the ratio of how + * long the device stays on an associated network channel ("service channel") + * vs. how long it's away from the service channel, i.e. tuned to other channels + * for scanning. + * + * max_out_time is the max time off-channel (in usec), and suspend_time + * is how long (in "extended beacon" format) that the scan is "suspended" + * after returning to the service channel. That is, suspend_time is the + * time that we stay on the service channel, doing normal work, between + * scan segments. The driver may set these parameters differently to support + * scanning when associated vs. not associated, and light vs. heavy traffic + * loads when associated. + * + * After receiving this command, the device's scan engine does the following; + * + * 1) Sends SCAN_START notification to driver + * 2) Checks to see if it has time to do scan for one channel + * 3) Sends NULL packet, with power-save (PS) bit set to 1, + * to tell AP that we're going off-channel + * 4) Tunes to first channel in scan list, does active or passive scan + * 5) Sends SCAN_RESULT notification to driver + * 6) Checks to see if it has time to do scan on *next* channel in list + * 7) Repeats 4-6 until it no longer has time to scan the next channel + * before max_out_time expires + * 8) Returns to service channel + * 9) Sends NULL packet with PS=0 to tell AP that we're back + * 10) Stays on service channel until suspend_time expires + * 11) Repeats entire process 2-10 until list is complete + * 12) Sends SCAN_COMPLETE notification + * + * For fast, efficient scans, the scan command also has support for staying on + * a channel for just a short time, if doing active scanning and getting no + * responses to the transmitted probe request. This time is controlled by + * quiet_time, and the number of received packets below which a channel is + * considered "quiet" is controlled by quiet_plcp_threshold. + * + * For active scanning on channels that have regulatory restrictions against + * blindly transmitting, the scan can listen before transmitting, to make sure + * that there is already legitimate activity on the channel. If enough + * packets are cleanly received on the channel (controlled by good_CRC_th, + * typical value 1), the scan engine starts transmitting probe requests. + * + * Driver must use separate scan commands for 2.4 vs. 5 GHz bands. + * + * To avoid uCode errors, see timing restrictions described under + * struct iwl_scan_channel. + */ + +struct iwl3945_scan_cmd { + __le16 len; + u8 reserved0; + u8 channel_count; /* # channels in channel list */ + __le16 quiet_time; /* dwell only this # millisecs on quiet channel + * (only for active scan) */ + __le16 quiet_plcp_th; /* quiet chnl is < this # pkts (typ. 1) */ + __le16 good_CRC_th; /* passive -> active promotion threshold */ + __le16 reserved1; + __le32 max_out_time; /* max usec to be away from associated (service) + * channel */ + __le32 suspend_time; /* pause scan this long (in "extended beacon + * format") when returning to service channel: + * 3945; 31:24 # beacons, 19:0 additional usec, + * 4965; 31:22 # beacons, 21:0 additional usec. + */ + __le32 flags; /* RXON_FLG_* */ + __le32 filter_flags; /* RXON_FILTER_* */ + + /* For active scans (set to all-0s for passive scans). + * Does not include payload. Must specify Tx rate; no rate scaling. */ + struct iwl3945_tx_cmd tx_cmd; + + /* For directed active scans (set to all-0s otherwise) */ + struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX_3945]; + + /* + * Probe request frame, followed by channel list. + * + * Size of probe request frame is specified by byte count in tx_cmd. + * Channel list follows immediately after probe request frame. + * Number of channels in list is specified by channel_count. + * Each channel in list is of type: + * + * struct iwl3945_scan_channel channels[0]; + * + * NOTE: Only one band of channels can be scanned per pass. You + * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait + * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION) + * before requesting another scan. + */ + u8 data[0]; +} __attribute__ ((packed)); + +struct iwl_scan_cmd { + __le16 len; + u8 reserved0; + u8 channel_count; /* # channels in channel list */ + __le16 quiet_time; /* dwell only this # millisecs on quiet channel + * (only for active scan) */ + __le16 quiet_plcp_th; /* quiet chnl is < this # pkts (typ. 1) */ + __le16 good_CRC_th; /* passive -> active promotion threshold */ + __le16 rx_chain; /* RXON_RX_CHAIN_* */ + __le32 max_out_time; /* max usec to be away from associated (service) + * channel */ + __le32 suspend_time; /* pause scan this long (in "extended beacon + * format") when returning to service chnl: + * 3945; 31:24 # beacons, 19:0 additional usec, + * 4965; 31:22 # beacons, 21:0 additional usec. + */ + __le32 flags; /* RXON_FLG_* */ + __le32 filter_flags; /* RXON_FILTER_* */ + + /* For active scans (set to all-0s for passive scans). + * Does not include payload. Must specify Tx rate; no rate scaling. */ + struct iwl_tx_cmd tx_cmd; + + /* For directed active scans (set to all-0s otherwise) */ + struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; + + /* + * Probe request frame, followed by channel list. + * + * Size of probe request frame is specified by byte count in tx_cmd. + * Channel list follows immediately after probe request frame. + * Number of channels in list is specified by channel_count. + * Each channel in list is of type: + * + * struct iwl_scan_channel channels[0]; + * + * NOTE: Only one band of channels can be scanned per pass. You + * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait + * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION) + * before requesting another scan. + */ + u8 data[0]; +} __attribute__ ((packed)); + +/* Can abort will notify by complete notification with abort status. */ +#define CAN_ABORT_STATUS cpu_to_le32(0x1) +/* complete notification statuses */ +#define ABORT_STATUS 0x2 + +/* + * REPLY_SCAN_CMD = 0x80 (response) + */ +struct iwl_scanreq_notification { + __le32 status; /* 1: okay, 2: cannot fulfill request */ +} __attribute__ ((packed)); + +/* + * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command) + */ +struct iwl_scanstart_notification { + __le32 tsf_low; + __le32 tsf_high; + __le32 beacon_timer; + u8 channel; + u8 band; + u8 reserved[2]; + __le32 status; +} __attribute__ ((packed)); + +#define SCAN_OWNER_STATUS 0x1; +#define MEASURE_OWNER_STATUS 0x2; + +#define NUMBER_OF_STATISTICS 1 /* first __le32 is good CRC */ +/* + * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command) + */ +struct iwl_scanresults_notification { + u8 channel; + u8 band; + u8 reserved[2]; + __le32 tsf_low; + __le32 tsf_high; + __le32 statistics[NUMBER_OF_STATISTICS]; +} __attribute__ ((packed)); + +/* + * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command) + */ +struct iwl_scancomplete_notification { + u8 scanned_channels; + u8 status; + u8 reserved; + u8 last_channel; + __le32 tsf_low; + __le32 tsf_high; +} __attribute__ ((packed)); + + +/****************************************************************************** + * (9) + * IBSS/AP Commands and Notifications: + * + *****************************************************************************/ + +/* + * BEACON_NOTIFICATION = 0x90 (notification only, not a command) + */ + +struct iwl3945_beacon_notif { + struct iwl3945_tx_resp beacon_notify_hdr; + __le32 low_tsf; + __le32 high_tsf; + __le32 ibss_mgr_status; +} __attribute__ ((packed)); + +struct iwl4965_beacon_notif { + struct iwl4965_tx_resp beacon_notify_hdr; + __le32 low_tsf; + __le32 high_tsf; + __le32 ibss_mgr_status; +} __attribute__ ((packed)); + +/* + * REPLY_TX_BEACON = 0x91 (command, has simple generic response) + */ + +struct iwl3945_tx_beacon_cmd { + struct iwl3945_tx_cmd tx; + __le16 tim_idx; + u8 tim_size; + u8 reserved1; + struct ieee80211_hdr frame[0]; /* beacon frame */ +} __attribute__ ((packed)); + +struct iwl_tx_beacon_cmd { + struct iwl_tx_cmd tx; + __le16 tim_idx; + u8 tim_size; + u8 reserved1; + struct ieee80211_hdr frame[0]; /* beacon frame */ +} __attribute__ ((packed)); + +/****************************************************************************** + * (10) + * Statistics Commands and Notifications: + * + *****************************************************************************/ + +#define IWL_TEMP_CONVERT 260 + +#define SUP_RATE_11A_MAX_NUM_CHANNELS 8 +#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 +#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 + +/* Used for passing to driver number of successes and failures per rate */ +struct rate_histogram { + union { + __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS]; + __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS]; + __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS]; + } success; + union { + __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS]; + __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS]; + __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS]; + } failed; +} __attribute__ ((packed)); + +/* statistics command response */ + +struct iwl39_statistics_rx_phy { + __le32 ina_cnt; + __le32 fina_cnt; + __le32 plcp_err; + __le32 crc32_err; + __le32 overrun_err; + __le32 early_overrun_err; + __le32 crc32_good; + __le32 false_alarm_cnt; + __le32 fina_sync_err_cnt; + __le32 sfd_timeout; + __le32 fina_timeout; + __le32 unresponded_rts; + __le32 rxe_frame_limit_overrun; + __le32 sent_ack_cnt; + __le32 sent_cts_cnt; +} __attribute__ ((packed)); + +struct iwl39_statistics_rx_non_phy { + __le32 bogus_cts; /* CTS received when not expecting CTS */ + __le32 bogus_ack; /* ACK received when not expecting ACK */ + __le32 non_bssid_frames; /* number of frames with BSSID that + * doesn't belong to the STA BSSID */ + __le32 filtered_frames; /* count frames that were dumped in the + * filtering process */ + __le32 non_channel_beacons; /* beacons with our bss id but not on + * our serving channel */ +} __attribute__ ((packed)); + +struct iwl39_statistics_rx { + struct iwl39_statistics_rx_phy ofdm; + struct iwl39_statistics_rx_phy cck; + struct iwl39_statistics_rx_non_phy general; +} __attribute__ ((packed)); + +struct iwl39_statistics_tx { + __le32 preamble_cnt; + __le32 rx_detected_cnt; + __le32 bt_prio_defer_cnt; + __le32 bt_prio_kill_cnt; + __le32 few_bytes_cnt; + __le32 cts_timeout; + __le32 ack_timeout; + __le32 expected_ack_cnt; + __le32 actual_ack_cnt; +} __attribute__ ((packed)); + +struct statistics_dbg { + __le32 burst_check; + __le32 burst_count; + __le32 reserved[4]; +} __attribute__ ((packed)); + +struct iwl39_statistics_div { + __le32 tx_on_a; + __le32 tx_on_b; + __le32 exec_time; + __le32 probe_time; +} __attribute__ ((packed)); + +struct iwl39_statistics_general { + __le32 temperature; + struct statistics_dbg dbg; + __le32 sleep_time; + __le32 slots_out; + __le32 slots_idle; + __le32 ttl_timestamp; + struct iwl39_statistics_div div; +} __attribute__ ((packed)); + +struct statistics_rx_phy { + __le32 ina_cnt; + __le32 fina_cnt; + __le32 plcp_err; + __le32 crc32_err; + __le32 overrun_err; + __le32 early_overrun_err; + __le32 crc32_good; + __le32 false_alarm_cnt; + __le32 fina_sync_err_cnt; + __le32 sfd_timeout; + __le32 fina_timeout; + __le32 unresponded_rts; + __le32 rxe_frame_limit_overrun; + __le32 sent_ack_cnt; + __le32 sent_cts_cnt; + __le32 sent_ba_rsp_cnt; + __le32 dsp_self_kill; + __le32 mh_format_err; + __le32 re_acq_main_rssi_sum; + __le32 reserved3; +} __attribute__ ((packed)); + +struct statistics_rx_ht_phy { + __le32 plcp_err; + __le32 overrun_err; + __le32 early_overrun_err; + __le32 crc32_good; + __le32 crc32_err; + __le32 mh_format_err; + __le32 agg_crc32_good; + __le32 agg_mpdu_cnt; + __le32 agg_cnt; + __le32 unsupport_mcs; +} __attribute__ ((packed)); + +#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1) + +struct statistics_rx_non_phy { + __le32 bogus_cts; /* CTS received when not expecting CTS */ + __le32 bogus_ack; /* ACK received when not expecting ACK */ + __le32 non_bssid_frames; /* number of frames with BSSID that + * doesn't belong to the STA BSSID */ + __le32 filtered_frames; /* count frames that were dumped in the + * filtering process */ + __le32 non_channel_beacons; /* beacons with our bss id but not on + * our serving channel */ + __le32 channel_beacons; /* beacons with our bss id and in our + * serving channel */ + __le32 num_missed_bcon; /* number of missed beacons */ + __le32 adc_rx_saturation_time; /* count in 0.8us units the time the + * ADC was in saturation */ + __le32 ina_detection_search_time;/* total time (in 0.8us) searched + * for INA */ + __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */ + __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */ + __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */ + __le32 interference_data_flag; /* flag for interference data + * availability. 1 when data is + * available. */ + __le32 channel_load; /* counts RX Enable time in uSec */ + __le32 dsp_false_alarms; /* DSP false alarm (both OFDM + * and CCK) counter */ + __le32 beacon_rssi_a; + __le32 beacon_rssi_b; + __le32 beacon_rssi_c; + __le32 beacon_energy_a; + __le32 beacon_energy_b; + __le32 beacon_energy_c; +} __attribute__ ((packed)); + +struct statistics_rx { + struct statistics_rx_phy ofdm; + struct statistics_rx_phy cck; + struct statistics_rx_non_phy general; + struct statistics_rx_ht_phy ofdm_ht; +} __attribute__ ((packed)); + +/** + * struct statistics_tx_power - current tx power + * + * @ant_a: current tx power on chain a in 1/2 dB step + * @ant_b: current tx power on chain b in 1/2 dB step + * @ant_c: current tx power on chain c in 1/2 dB step + */ +struct statistics_tx_power { + u8 ant_a; + u8 ant_b; + u8 ant_c; + u8 reserved; +} __attribute__ ((packed)); + +struct statistics_tx_non_phy_agg { + __le32 ba_timeout; + __le32 ba_reschedule_frames; + __le32 scd_query_agg_frame_cnt; + __le32 scd_query_no_agg; + __le32 scd_query_agg; + __le32 scd_query_mismatch; + __le32 frame_not_ready; + __le32 underrun; + __le32 bt_prio_kill; + __le32 rx_ba_rsp_cnt; +} __attribute__ ((packed)); + +struct statistics_tx { + __le32 preamble_cnt; + __le32 rx_detected_cnt; + __le32 bt_prio_defer_cnt; + __le32 bt_prio_kill_cnt; + __le32 few_bytes_cnt; + __le32 cts_timeout; + __le32 ack_timeout; + __le32 expected_ack_cnt; + __le32 actual_ack_cnt; + __le32 dump_msdu_cnt; + __le32 burst_abort_next_frame_mismatch_cnt; + __le32 burst_abort_missing_next_frame_cnt; + __le32 cts_timeout_collision; + __le32 ack_or_ba_timeout_collision; + struct statistics_tx_non_phy_agg agg; + struct statistics_tx_power tx_power; + __le32 reserved1; +} __attribute__ ((packed)); + + +struct statistics_div { + __le32 tx_on_a; + __le32 tx_on_b; + __le32 exec_time; + __le32 probe_time; + __le32 reserved1; + __le32 reserved2; +} __attribute__ ((packed)); + +struct statistics_general { + __le32 temperature; /* radio temperature */ + __le32 temperature_m; /* for 5000 and up, this is radio voltage */ + struct statistics_dbg dbg; + __le32 sleep_time; + __le32 slots_out; + __le32 slots_idle; + __le32 ttl_timestamp; + struct statistics_div div; + __le32 rx_enable_counter; + /* + * num_of_sos_states: + * count the number of times we have to re-tune + * in order to get out of bad PHY status + */ + __le32 num_of_sos_states; + __le32 reserved2; + __le32 reserved3; +} __attribute__ ((packed)); + +#define UCODE_STATISTICS_CLEAR_MSK (0x1 << 0) +#define UCODE_STATISTICS_FREQUENCY_MSK (0x1 << 1) +#define UCODE_STATISTICS_NARROW_BAND_MSK (0x1 << 2) + +/* + * REPLY_STATISTICS_CMD = 0x9c, + * 3945 and 4965 identical. + * + * This command triggers an immediate response containing uCode statistics. + * The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below. + * + * If the CLEAR_STATS configuration flag is set, uCode will clear its + * internal copy of the statistics (counters) after issuing the response. + * This flag does not affect STATISTICS_NOTIFICATIONs after beacons (see below). + * + * If the DISABLE_NOTIF configuration flag is set, uCode will not issue + * STATISTICS_NOTIFICATIONs after received beacons (see below). This flag + * does not affect the response to the REPLY_STATISTICS_CMD 0x9c itself. + */ +#define IWL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1) /* see above */ +#define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */ +struct iwl_statistics_cmd { + __le32 configuration_flags; /* IWL_STATS_CONF_* */ +} __attribute__ ((packed)); + +/* + * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command) + * + * By default, uCode issues this notification after receiving a beacon + * while associated. To disable this behavior, set DISABLE_NOTIF flag in the + * REPLY_STATISTICS_CMD 0x9c, above. + * + * Statistics counters continue to increment beacon after beacon, but are + * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD + * 0x9c with CLEAR_STATS bit set (see above). + * + * uCode also issues this notification during scans. uCode clears statistics + * appropriately so that each notification contains statistics for only the + * one channel that has just been scanned. + */ +#define STATISTICS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2) +#define STATISTICS_REPLY_FLG_HT40_MODE_MSK cpu_to_le32(0x8) + +struct iwl3945_notif_statistics { + __le32 flag; + struct iwl39_statistics_rx rx; + struct iwl39_statistics_tx tx; + struct iwl39_statistics_general general; +} __attribute__ ((packed)); + +struct iwl_notif_statistics { + __le32 flag; + struct statistics_rx rx; + struct statistics_tx tx; + struct statistics_general general; +} __attribute__ ((packed)); + + +/* + * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command) + * + * uCode send MISSED_BEACONS_NOTIFICATION to driver when detect beacon missed + * in regardless of how many missed beacons, which mean when driver receive the + * notification, inside the command, it can find all the beacons information + * which include number of total missed beacons, number of consecutive missed + * beacons, number of beacons received and number of beacons expected to + * receive. + * + * If uCode detected consecutive_missed_beacons > 5, it will reset the radio + * in order to bring the radio/PHY back to working state; which has no relation + * to when driver will perform sensitivity calibration. + * + * Driver should set it own missed_beacon_threshold to decide when to perform + * sensitivity calibration based on number of consecutive missed beacons in + * order to improve overall performance, especially in noisy environment. + * + */ + +#define IWL_MISSED_BEACON_THRESHOLD_MIN (1) +#define IWL_MISSED_BEACON_THRESHOLD_DEF (5) +#define IWL_MISSED_BEACON_THRESHOLD_MAX IWL_MISSED_BEACON_THRESHOLD_DEF + +struct iwl_missed_beacon_notif { + __le32 consecutive_missed_beacons; + __le32 total_missed_becons; + __le32 num_expected_beacons; + __le32 num_recvd_beacons; +} __attribute__ ((packed)); + + +/****************************************************************************** + * (11) + * Rx Calibration Commands: + * + * With the uCode used for open source drivers, most Tx calibration (except + * for Tx Power) and most Rx calibration is done by uCode during the + * "initialize" phase of uCode boot. Driver must calibrate only: + * + * 1) Tx power (depends on temperature), described elsewhere + * 2) Receiver gain balance (optimize MIMO, and detect disconnected antennas) + * 3) Receiver sensitivity (to optimize signal detection) + * + *****************************************************************************/ + +/** + * SENSITIVITY_CMD = 0xa8 (command, has simple generic response) + * + * This command sets up the Rx signal detector for a sensitivity level that + * is high enough to lock onto all signals within the associated network, + * but low enough to ignore signals that are below a certain threshold, so as + * not to have too many "false alarms". False alarms are signals that the + * Rx DSP tries to lock onto, but then discards after determining that they + * are noise. + * + * The optimum number of false alarms is between 5 and 50 per 200 TUs + * (200 * 1024 uSecs, i.e. 204.8 milliseconds) of actual Rx time (i.e. + * time listening, not transmitting). Driver must adjust sensitivity so that + * the ratio of actual false alarms to actual Rx time falls within this range. + * + * While associated, uCode delivers STATISTICS_NOTIFICATIONs after each + * received beacon. These provide information to the driver to analyze the + * sensitivity. Don't analyze statistics that come in from scanning, or any + * other non-associated-network source. Pertinent statistics include: + * + * From "general" statistics (struct statistics_rx_non_phy): + * + * (beacon_energy_[abc] & 0x0FF00) >> 8 (unsigned, higher value is lower level) + * Measure of energy of desired signal. Used for establishing a level + * below which the device does not detect signals. + * + * (beacon_silence_rssi_[abc] & 0x0FF00) >> 8 (unsigned, units in dB) + * Measure of background noise in silent period after beacon. + * + * channel_load + * uSecs of actual Rx time during beacon period (varies according to + * how much time was spent transmitting). + * + * From "cck" and "ofdm" statistics (struct statistics_rx_phy), separately: + * + * false_alarm_cnt + * Signal locks abandoned early (before phy-level header). + * + * plcp_err + * Signal locks abandoned late (during phy-level header). + * + * NOTE: Both false_alarm_cnt and plcp_err increment monotonically from + * beacon to beacon, i.e. each value is an accumulation of all errors + * before and including the latest beacon. Values will wrap around to 0 + * after counting up to 2^32 - 1. Driver must differentiate vs. + * previous beacon's values to determine # false alarms in the current + * beacon period. + * + * Total number of false alarms = false_alarms + plcp_errs + * + * For OFDM, adjust the following table entries in struct iwl_sensitivity_cmd + * (notice that the start points for OFDM are at or close to settings for + * maximum sensitivity): + * + * START / MIN / MAX + * HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX 90 / 85 / 120 + * HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX 170 / 170 / 210 + * HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX 105 / 105 / 140 + * HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX 220 / 220 / 270 + * + * If actual rate of OFDM false alarms (+ plcp_errors) is too high + * (greater than 50 for each 204.8 msecs listening), reduce sensitivity + * by *adding* 1 to all 4 of the table entries above, up to the max for + * each entry. Conversely, if false alarm rate is too low (less than 5 + * for each 204.8 msecs listening), *subtract* 1 from each entry to + * increase sensitivity. + * + * For CCK sensitivity, keep track of the following: + * + * 1). 20-beacon history of maximum background noise, indicated by + * (beacon_silence_rssi_[abc] & 0x0FF00), units in dB, across the + * 3 receivers. For any given beacon, the "silence reference" is + * the maximum of last 60 samples (20 beacons * 3 receivers). + * + * 2). 10-beacon history of strongest signal level, as indicated + * by (beacon_energy_[abc] & 0x0FF00) >> 8, across the 3 receivers, + * i.e. the strength of the signal through the best receiver at the + * moment. These measurements are "upside down", with lower values + * for stronger signals, so max energy will be *minimum* value. + * + * Then for any given beacon, the driver must determine the *weakest* + * of the strongest signals; this is the minimum level that needs to be + * successfully detected, when using the best receiver at the moment. + * "Max cck energy" is the maximum (higher value means lower energy!) + * of the last 10 minima. Once this is determined, driver must add + * a little margin by adding "6" to it. + * + * 3). Number of consecutive beacon periods with too few false alarms. + * Reset this to 0 at the first beacon period that falls within the + * "good" range (5 to 50 false alarms per 204.8 milliseconds rx). + * + * Then, adjust the following CCK table entries in struct iwl_sensitivity_cmd + * (notice that the start points for CCK are at maximum sensitivity): + * + * START / MIN / MAX + * HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX 125 / 125 / 200 + * HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX 200 / 200 / 400 + * HD_MIN_ENERGY_CCK_DET_INDEX 100 / 0 / 100 + * + * If actual rate of CCK false alarms (+ plcp_errors) is too high + * (greater than 50 for each 204.8 msecs listening), method for reducing + * sensitivity is: + * + * 1) *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX, + * up to max 400. + * + * 2) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is < 160, + * sensitivity has been reduced a significant amount; bring it up to + * a moderate 161. Otherwise, *add* 3, up to max 200. + * + * 3) a) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is > 160, + * sensitivity has been reduced only a moderate or small amount; + * *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_INDEX, + * down to min 0. Otherwise (if gain has been significantly reduced), + * don't change the HD_MIN_ENERGY_CCK_DET_INDEX value. + * + * b) Save a snapshot of the "silence reference". + * + * If actual rate of CCK false alarms (+ plcp_errors) is too low + * (less than 5 for each 204.8 msecs listening), method for increasing + * sensitivity is used only if: + * + * 1a) Previous beacon did not have too many false alarms + * 1b) AND difference between previous "silence reference" and current + * "silence reference" (prev - current) is 2 or more, + * OR 2) 100 or more consecutive beacon periods have had rate of + * less than 5 false alarms per 204.8 milliseconds rx time. + * + * Method for increasing sensitivity: + * + * 1) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX, + * down to min 125. + * + * 2) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX, + * down to min 200. + * + * 3) *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_INDEX, up to max 100. + * + * If actual rate of CCK false alarms (+ plcp_errors) is within good range + * (between 5 and 50 for each 204.8 msecs listening): + * + * 1) Save a snapshot of the silence reference. + * + * 2) If previous beacon had too many CCK false alarms (+ plcp_errors), + * give some extra margin to energy threshold by *subtracting* 8 + * from value in HD_MIN_ENERGY_CCK_DET_INDEX. + * + * For all cases (too few, too many, good range), make sure that the CCK + * detection threshold (energy) is below the energy level for robust + * detection over the past 10 beacon periods, the "Max cck energy". + * Lower values mean higher energy; this means making sure that the value + * in HD_MIN_ENERGY_CCK_DET_INDEX is at or *above* "Max cck energy". + * + */ + +/* + * Table entries in SENSITIVITY_CMD (struct iwl_sensitivity_cmd) + */ +#define HD_TABLE_SIZE (11) /* number of entries */ +#define HD_MIN_ENERGY_CCK_DET_INDEX (0) /* table indexes */ +#define HD_MIN_ENERGY_OFDM_DET_INDEX (1) +#define HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX (2) +#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX (3) +#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX (4) +#define HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX (5) +#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX (6) +#define HD_BARKER_CORR_TH_ADD_MIN_INDEX (7) +#define HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX (8) +#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX (9) +#define HD_OFDM_ENERGY_TH_IN_INDEX (10) + +/* Control field in struct iwl_sensitivity_cmd */ +#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE cpu_to_le16(0) +#define SENSITIVITY_CMD_CONTROL_WORK_TABLE cpu_to_le16(1) + +/** + * struct iwl_sensitivity_cmd + * @control: (1) updates working table, (0) updates default table + * @table: energy threshold values, use HD_* as index into table + * + * Always use "1" in "control" to update uCode's working table and DSP. + */ +struct iwl_sensitivity_cmd { + __le16 control; /* always use "1" */ + __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */ +} __attribute__ ((packed)); + + +/** + * REPLY_PHY_CALIBRATION_CMD = 0xb0 (command, has simple generic response) + * + * This command sets the relative gains of 4965's 3 radio receiver chains. + * + * After the first association, driver should accumulate signal and noise + * statistics from the STATISTICS_NOTIFICATIONs that follow the first 20 + * beacons from the associated network (don't collect statistics that come + * in from scanning, or any other non-network source). + * + * DISCONNECTED ANTENNA: + * + * Driver should determine which antennas are actually connected, by comparing + * average beacon signal levels for the 3 Rx chains. Accumulate (add) the + * following values over 20 beacons, one accumulator for each of the chains + * a/b/c, from struct statistics_rx_non_phy: + * + * beacon_rssi_[abc] & 0x0FF (unsigned, units in dB) + * + * Find the strongest signal from among a/b/c. Compare the other two to the + * strongest. If any signal is more than 15 dB (times 20, unless you + * divide the accumulated values by 20) below the strongest, the driver + * considers that antenna to be disconnected, and should not try to use that + * antenna/chain for Rx or Tx. If both A and B seem to be disconnected, + * driver should declare the stronger one as connected, and attempt to use it + * (A and B are the only 2 Tx chains!). + * + * + * RX BALANCE: + * + * Driver should balance the 3 receivers (but just the ones that are connected + * to antennas, see above) for gain, by comparing the average signal levels + * detected during the silence after each beacon (background noise). + * Accumulate (add) the following values over 20 beacons, one accumulator for + * each of the chains a/b/c, from struct statistics_rx_non_phy: + * + * beacon_silence_rssi_[abc] & 0x0FF (unsigned, units in dB) + * + * Find the weakest background noise level from among a/b/c. This Rx chain + * will be the reference, with 0 gain adjustment. Attenuate other channels by + * finding noise difference: + * + * (accum_noise[i] - accum_noise[reference]) / 30 + * + * The "30" adjusts the dB in the 20 accumulated samples to units of 1.5 dB. + * For use in diff_gain_[abc] fields of struct iwl_calibration_cmd, the + * driver should limit the difference results to a range of 0-3 (0-4.5 dB), + * and set bit 2 to indicate "reduce gain". The value for the reference + * (weakest) chain should be "0". + * + * diff_gain_[abc] bit fields: + * 2: (1) reduce gain, (0) increase gain + * 1-0: amount of gain, units of 1.5 dB + */ + +/* Phy calibration command for series */ + +enum { + IWL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7, + IWL_PHY_CALIBRATE_DC_CMD = 8, + IWL_PHY_CALIBRATE_LO_CMD = 9, + IWL_PHY_CALIBRATE_TX_IQ_CMD = 11, + IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD = 15, + IWL_PHY_CALIBRATE_BASE_BAND_CMD = 16, + IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD = 17, + IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD = 18, + IWL_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD = 19, +}; + + +#define IWL_CALIB_INIT_CFG_ALL cpu_to_le32(0xffffffff) + +struct iwl_calib_cfg_elmnt_s { + __le32 is_enable; + __le32 start; + __le32 send_res; + __le32 apply_res; + __le32 reserved; +} __attribute__ ((packed)); + +struct iwl_calib_cfg_status_s { + struct iwl_calib_cfg_elmnt_s once; + struct iwl_calib_cfg_elmnt_s perd; + __le32 flags; +} __attribute__ ((packed)); + +struct iwl_calib_cfg_cmd { + struct iwl_calib_cfg_status_s ucd_calib_cfg; + struct iwl_calib_cfg_status_s drv_calib_cfg; + __le32 reserved1; +} __attribute__ ((packed)); + +struct iwl_calib_hdr { + u8 op_code; + u8 first_group; + u8 groups_num; + u8 data_valid; +} __attribute__ ((packed)); + +struct iwl_calib_cmd { + struct iwl_calib_hdr hdr; + u8 data[0]; +} __attribute__ ((packed)); + +/* IWL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */ +struct iwl_calib_diff_gain_cmd { + struct iwl_calib_hdr hdr; + s8 diff_gain_a; /* see above */ + s8 diff_gain_b; + s8 diff_gain_c; + u8 reserved1; +} __attribute__ ((packed)); + +struct iwl_calib_xtal_freq_cmd { + struct iwl_calib_hdr hdr; + u8 cap_pin1; + u8 cap_pin2; + u8 pad[2]; +} __attribute__ ((packed)); + +/* IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD */ +struct iwl_calib_chain_noise_reset_cmd { + struct iwl_calib_hdr hdr; + u8 data[0]; +}; + +/* IWL_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD */ +struct iwl_calib_chain_noise_gain_cmd { + struct iwl_calib_hdr hdr; + u8 delta_gain_1; + u8 delta_gain_2; + u8 pad[2]; +} __attribute__ ((packed)); + +/****************************************************************************** + * (12) + * Miscellaneous Commands: + * + *****************************************************************************/ + +/* + * LEDs Command & Response + * REPLY_LEDS_CMD = 0x48 (command, has simple generic response) + * + * For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field), + * this command turns it on or off, or sets up a periodic blinking cycle. + */ +struct iwl_led_cmd { + __le32 interval; /* "interval" in uSec */ + u8 id; /* 1: Activity, 2: Link, 3: Tech */ + u8 off; /* # intervals off while blinking; + * "0", with >0 "on" value, turns LED on */ + u8 on; /* # intervals on while blinking; + * "0", regardless of "off", turns LED off */ + u8 reserved; +} __attribute__ ((packed)); + +/* + * station priority table entries + * also used as potential "events" value for both + * COEX_MEDIUM_NOTIFICATION and COEX_EVENT_CMD + */ + +/* + * COEX events entry flag masks + * RP - Requested Priority + * WP - Win Medium Priority: priority assigned when the contention has been won + */ +#define COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG (0x1) +#define COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG (0x2) +#define COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG (0x4) + +#define COEX_CU_UNASSOC_IDLE_RP 4 +#define COEX_CU_UNASSOC_MANUAL_SCAN_RP 4 +#define COEX_CU_UNASSOC_AUTO_SCAN_RP 4 +#define COEX_CU_CALIBRATION_RP 4 +#define COEX_CU_PERIODIC_CALIBRATION_RP 4 +#define COEX_CU_CONNECTION_ESTAB_RP 4 +#define COEX_CU_ASSOCIATED_IDLE_RP 4 +#define COEX_CU_ASSOC_MANUAL_SCAN_RP 4 +#define COEX_CU_ASSOC_AUTO_SCAN_RP 4 +#define COEX_CU_ASSOC_ACTIVE_LEVEL_RP 4 +#define COEX_CU_RF_ON_RP 6 +#define COEX_CU_RF_OFF_RP 4 +#define COEX_CU_STAND_ALONE_DEBUG_RP 6 +#define COEX_CU_IPAN_ASSOC_LEVEL_RP 4 +#define COEX_CU_RSRVD1_RP 4 +#define COEX_CU_RSRVD2_RP 4 + +#define COEX_CU_UNASSOC_IDLE_WP 3 +#define COEX_CU_UNASSOC_MANUAL_SCAN_WP 3 +#define COEX_CU_UNASSOC_AUTO_SCAN_WP 3 +#define COEX_CU_CALIBRATION_WP 3 +#define COEX_CU_PERIODIC_CALIBRATION_WP 3 +#define COEX_CU_CONNECTION_ESTAB_WP 3 +#define COEX_CU_ASSOCIATED_IDLE_WP 3 +#define COEX_CU_ASSOC_MANUAL_SCAN_WP 3 +#define COEX_CU_ASSOC_AUTO_SCAN_WP 3 +#define COEX_CU_ASSOC_ACTIVE_LEVEL_WP 3 +#define COEX_CU_RF_ON_WP 3 +#define COEX_CU_RF_OFF_WP 3 +#define COEX_CU_STAND_ALONE_DEBUG_WP 6 +#define COEX_CU_IPAN_ASSOC_LEVEL_WP 3 +#define COEX_CU_RSRVD1_WP 3 +#define COEX_CU_RSRVD2_WP 3 + +#define COEX_UNASSOC_IDLE_FLAGS 0 +#define COEX_UNASSOC_MANUAL_SCAN_FLAGS \ + (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \ + COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG) +#define COEX_UNASSOC_AUTO_SCAN_FLAGS \ + (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \ + COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG) +#define COEX_CALIBRATION_FLAGS \ + (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \ + COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG) +#define COEX_PERIODIC_CALIBRATION_FLAGS 0 +/* + * COEX_CONNECTION_ESTAB: + * we need DELAY_MEDIUM_FREE_NTFY to let WiMAX disconnect from network. + */ +#define COEX_CONNECTION_ESTAB_FLAGS \ + (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \ + COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG | \ + COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG) +#define COEX_ASSOCIATED_IDLE_FLAGS 0 +#define COEX_ASSOC_MANUAL_SCAN_FLAGS \ + (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \ + COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG) +#define COEX_ASSOC_AUTO_SCAN_FLAGS \ + (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \ + COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG) +#define COEX_ASSOC_ACTIVE_LEVEL_FLAGS 0 +#define COEX_RF_ON_FLAGS 0 +#define COEX_RF_OFF_FLAGS 0 +#define COEX_STAND_ALONE_DEBUG_FLAGS \ + (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \ + COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG) +#define COEX_IPAN_ASSOC_LEVEL_FLAGS \ + (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \ + COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG | \ + COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG) +#define COEX_RSRVD1_FLAGS 0 +#define COEX_RSRVD2_FLAGS 0 +/* + * COEX_CU_RF_ON is the event wrapping all radio ownership. + * We need DELAY_MEDIUM_FREE_NTFY to let WiMAX disconnect from network. + */ +#define COEX_CU_RF_ON_FLAGS \ + (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \ + COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG | \ + COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG) + + +enum { + /* un-association part */ + COEX_UNASSOC_IDLE = 0, + COEX_UNASSOC_MANUAL_SCAN = 1, + COEX_UNASSOC_AUTO_SCAN = 2, + /* calibration */ + COEX_CALIBRATION = 3, + COEX_PERIODIC_CALIBRATION = 4, + /* connection */ + COEX_CONNECTION_ESTAB = 5, + /* association part */ + COEX_ASSOCIATED_IDLE = 6, + COEX_ASSOC_MANUAL_SCAN = 7, + COEX_ASSOC_AUTO_SCAN = 8, + COEX_ASSOC_ACTIVE_LEVEL = 9, + /* RF ON/OFF */ + COEX_RF_ON = 10, + COEX_RF_OFF = 11, + COEX_STAND_ALONE_DEBUG = 12, + /* IPAN */ + COEX_IPAN_ASSOC_LEVEL = 13, + /* reserved */ + COEX_RSRVD1 = 14, + COEX_RSRVD2 = 15, + COEX_NUM_OF_EVENTS = 16 +}; + +/* + * Coexistence WIFI/WIMAX Command + * COEX_PRIORITY_TABLE_CMD = 0x5a + * + */ +struct iwl_wimax_coex_event_entry { + u8 request_prio; + u8 win_medium_prio; + u8 reserved; + u8 flags; +} __attribute__ ((packed)); + +/* COEX flag masks */ + +/* Station table is valid */ +#define COEX_FLAGS_STA_TABLE_VALID_MSK (0x1) +/* UnMask wake up src at unassociated sleep */ +#define COEX_FLAGS_UNASSOC_WA_UNMASK_MSK (0x4) +/* UnMask wake up src at associated sleep */ +#define COEX_FLAGS_ASSOC_WA_UNMASK_MSK (0x8) +/* Enable CoEx feature. */ +#define COEX_FLAGS_COEX_ENABLE_MSK (0x80) + +struct iwl_wimax_coex_cmd { + u8 flags; + u8 reserved[3]; + struct iwl_wimax_coex_event_entry sta_prio[COEX_NUM_OF_EVENTS]; +} __attribute__ ((packed)); + +/* + * Coexistence MEDIUM NOTIFICATION + * COEX_MEDIUM_NOTIFICATION = 0x5b + * + * notification from uCode to host to indicate medium changes + * + */ +/* + * status field + * bit 0 - 2: medium status + * bit 3: medium change indication + * bit 4 - 31: reserved + */ +/* status option values, (0 - 2 bits) */ +#define COEX_MEDIUM_BUSY (0x0) /* radio belongs to WiMAX */ +#define COEX_MEDIUM_ACTIVE (0x1) /* radio belongs to WiFi */ +#define COEX_MEDIUM_PRE_RELEASE (0x2) /* received radio release */ +#define COEX_MEDIUM_MSK (0x7) + +/* send notification status (1 bit) */ +#define COEX_MEDIUM_CHANGED (0x8) +#define COEX_MEDIUM_CHANGED_MSK (0x8) +#define COEX_MEDIUM_SHIFT (3) + +struct iwl_coex_medium_notification { + __le32 status; + __le32 events; +} __attribute__ ((packed)); + +/* + * Coexistence EVENT Command + * COEX_EVENT_CMD = 0x5c + * + * send from host to uCode for coex event request. + */ +/* flags options */ +#define COEX_EVENT_REQUEST_MSK (0x1) + +struct iwl_coex_event_cmd { + u8 flags; + u8 event; + __le16 reserved; +} __attribute__ ((packed)); + +struct iwl_coex_event_resp { + __le32 status; +} __attribute__ ((packed)); + + +/****************************************************************************** + * (13) + * Union of all expected notifications/responses: + * + *****************************************************************************/ + +struct iwl_rx_packet { + /* + * The first 4 bytes of the RX frame header contain both the RX frame + * size and some flags. + * Bit fields: + * 31: flag flush RB request + * 30: flag ignore TC (terminal counter) request + * 29: flag fast IRQ request + * 28-14: Reserved + * 13-00: RX frame size + */ + __le32 len_n_flags; + struct iwl_cmd_header hdr; + union { + struct iwl3945_rx_frame rx_frame; + struct iwl3945_tx_resp tx_resp; + struct iwl3945_beacon_notif beacon_status; + + struct iwl_alive_resp alive_frame; + struct iwl_spectrum_notification spectrum_notif; + struct iwl_csa_notification csa_notif; + struct iwl_error_resp err_resp; + struct iwl_card_state_notif card_state_notif; + struct iwl_add_sta_resp add_sta; + struct iwl_rem_sta_resp rem_sta; + struct iwl_sleep_notification sleep_notif; + struct iwl_spectrum_resp spectrum; + struct iwl_notif_statistics stats; + struct iwl_compressed_ba_resp compressed_ba; + struct iwl_missed_beacon_notif missed_beacon; + struct iwl_coex_medium_notification coex_medium_notif; + struct iwl_coex_event_resp coex_event; + __le32 status; + u8 raw[0]; + } u; +} __attribute__ ((packed)); + +int iwl_agn_check_rxon_cmd(struct iwl_priv *priv); + +#endif /* __iwl_commands_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c new file mode 100644 index 0000000..112149e --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-core.c @@ -0,0 +1,3443 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ + +#include +#include +#include +#include +#include + +#include "iwl-eeprom.h" +#include "iwl-dev.h" /* FIXME: remove */ +#include "iwl-debug.h" +#include "iwl-core.h" +#include "iwl-io.h" +#include "iwl-power.h" +#include "iwl-sta.h" +#include "iwl-helpers.h" + + +MODULE_DESCRIPTION("iwl core"); +MODULE_VERSION(IWLWIFI_VERSION); +MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); +MODULE_LICENSE("GPL"); + +/* + * set bt_coex_active to true, uCode will do kill/defer + * every time the priority line is asserted (BT is sending signals on the + * priority line in the PCIx). + * set bt_coex_active to false, uCode will ignore the BT activity and + * perform the normal operation + * + * User might experience transmit issue on some platform due to WiFi/BT + * co-exist problem. The possible behaviors are: + * Able to scan and finding all the available AP + * Not able to associate with any AP + * On those platforms, WiFi communication can be restored by set + * "bt_coex_active" module parameter to "false" + * + * default: bt_coex_active = true (BT_COEX_ENABLE) + */ +static bool bt_coex_active = true; +module_param(bt_coex_active, bool, S_IRUGO); +MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist\n"); + +static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = { + {COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP, + 0, COEX_UNASSOC_IDLE_FLAGS}, + {COEX_CU_UNASSOC_MANUAL_SCAN_RP, COEX_CU_UNASSOC_MANUAL_SCAN_WP, + 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS}, + {COEX_CU_UNASSOC_AUTO_SCAN_RP, COEX_CU_UNASSOC_AUTO_SCAN_WP, + 0, COEX_UNASSOC_AUTO_SCAN_FLAGS}, + {COEX_CU_CALIBRATION_RP, COEX_CU_CALIBRATION_WP, + 0, COEX_CALIBRATION_FLAGS}, + {COEX_CU_PERIODIC_CALIBRATION_RP, COEX_CU_PERIODIC_CALIBRATION_WP, + 0, COEX_PERIODIC_CALIBRATION_FLAGS}, + {COEX_CU_CONNECTION_ESTAB_RP, COEX_CU_CONNECTION_ESTAB_WP, + 0, COEX_CONNECTION_ESTAB_FLAGS}, + {COEX_CU_ASSOCIATED_IDLE_RP, COEX_CU_ASSOCIATED_IDLE_WP, + 0, COEX_ASSOCIATED_IDLE_FLAGS}, + {COEX_CU_ASSOC_MANUAL_SCAN_RP, COEX_CU_ASSOC_MANUAL_SCAN_WP, + 0, COEX_ASSOC_MANUAL_SCAN_FLAGS}, + {COEX_CU_ASSOC_AUTO_SCAN_RP, COEX_CU_ASSOC_AUTO_SCAN_WP, + 0, COEX_ASSOC_AUTO_SCAN_FLAGS}, + {COEX_CU_ASSOC_ACTIVE_LEVEL_RP, COEX_CU_ASSOC_ACTIVE_LEVEL_WP, + 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS}, + {COEX_CU_RF_ON_RP, COEX_CU_RF_ON_WP, 0, COEX_CU_RF_ON_FLAGS}, + {COEX_CU_RF_OFF_RP, COEX_CU_RF_OFF_WP, 0, COEX_RF_OFF_FLAGS}, + {COEX_CU_STAND_ALONE_DEBUG_RP, COEX_CU_STAND_ALONE_DEBUG_WP, + 0, COEX_STAND_ALONE_DEBUG_FLAGS}, + {COEX_CU_IPAN_ASSOC_LEVEL_RP, COEX_CU_IPAN_ASSOC_LEVEL_WP, + 0, COEX_IPAN_ASSOC_LEVEL_FLAGS}, + {COEX_CU_RSRVD1_RP, COEX_CU_RSRVD1_WP, 0, COEX_RSRVD1_FLAGS}, + {COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS} +}; + +#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \ + [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ + IWL_RATE_SISO_##s##M_PLCP, \ + IWL_RATE_MIMO2_##s##M_PLCP,\ + IWL_RATE_MIMO3_##s##M_PLCP,\ + IWL_RATE_##r##M_IEEE, \ + IWL_RATE_##ip##M_INDEX, \ + IWL_RATE_##in##M_INDEX, \ + IWL_RATE_##rp##M_INDEX, \ + IWL_RATE_##rn##M_INDEX, \ + IWL_RATE_##pp##M_INDEX, \ + IWL_RATE_##np##M_INDEX } + +u32 iwl_debug_level; +EXPORT_SYMBOL(iwl_debug_level); + +static irqreturn_t iwl_isr(int irq, void *data); + +/* + * Parameter order: + * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate + * + * If there isn't a valid next or previous rate then INV is used which + * maps to IWL_RATE_INVALID + * + */ +const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = { + IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */ + IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */ + IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */ + IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */ + IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */ + IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */ + IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */ + IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */ + IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */ + IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */ + IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */ + IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */ + IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */ + /* FIXME:RS: ^^ should be INV (legacy) */ +}; +EXPORT_SYMBOL(iwl_rates); + +/** + * translate ucode response to mac80211 tx status control values + */ +void iwl_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags, + struct ieee80211_tx_info *info) +{ + struct ieee80211_tx_rate *r = &info->control.rates[0]; + + info->antenna_sel_tx = + ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS); + if (rate_n_flags & RATE_MCS_HT_MSK) + r->flags |= IEEE80211_TX_RC_MCS; + if (rate_n_flags & RATE_MCS_GF_MSK) + r->flags |= IEEE80211_TX_RC_GREEN_FIELD; + if (rate_n_flags & RATE_MCS_HT40_MSK) + r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; + if (rate_n_flags & RATE_MCS_DUP_MSK) + r->flags |= IEEE80211_TX_RC_DUP_DATA; + if (rate_n_flags & RATE_MCS_SGI_MSK) + r->flags |= IEEE80211_TX_RC_SHORT_GI; + r->idx = iwl_hwrate_to_mac80211_idx(rate_n_flags, info->band); +} +EXPORT_SYMBOL(iwl_hwrate_to_tx_control); + +int iwl_hwrate_to_plcp_idx(u32 rate_n_flags) +{ + int idx = 0; + + /* HT rate format */ + if (rate_n_flags & RATE_MCS_HT_MSK) { + idx = (rate_n_flags & 0xff); + + if (idx >= IWL_RATE_MIMO3_6M_PLCP) + idx = idx - IWL_RATE_MIMO3_6M_PLCP; + else if (idx >= IWL_RATE_MIMO2_6M_PLCP) + idx = idx - IWL_RATE_MIMO2_6M_PLCP; + + idx += IWL_FIRST_OFDM_RATE; + /* skip 9M not supported in ht*/ + if (idx >= IWL_RATE_9M_INDEX) + idx += 1; + if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE)) + return idx; + + /* legacy rate format, search for match in table */ + } else { + for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++) + if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF)) + return idx; + } + + return -1; +} +EXPORT_SYMBOL(iwl_hwrate_to_plcp_idx); + +int iwl_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band) +{ + int idx = 0; + int band_offset = 0; + + /* HT rate format: mac80211 wants an MCS number, which is just LSB */ + if (rate_n_flags & RATE_MCS_HT_MSK) { + idx = (rate_n_flags & 0xff); + return idx; + /* Legacy rate format, search for match in table */ + } else { + if (band == IEEE80211_BAND_5GHZ) + band_offset = IWL_FIRST_OFDM_RATE; + for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++) + if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF)) + return idx - band_offset; + } + + return -1; +} + +u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant) +{ + int i; + u8 ind = ant; + for (i = 0; i < RATE_ANT_NUM - 1; i++) { + ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0; + if (priv->hw_params.valid_tx_ant & BIT(ind)) + return ind; + } + return ant; +} +EXPORT_SYMBOL(iwl_toggle_tx_ant); + +const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; +EXPORT_SYMBOL(iwl_bcast_addr); + + +/* This function both allocates and initializes hw and priv. */ +struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg, + struct ieee80211_ops *hw_ops) +{ + struct iwl_priv *priv; + + /* mac80211 allocates memory for this device instance, including + * space for this driver's private structure */ + struct ieee80211_hw *hw = + ieee80211_alloc_hw(sizeof(struct iwl_priv), hw_ops); + if (hw == NULL) { + printk(KERN_ERR "%s: Can not allocate network device\n", + cfg->name); + goto out; + } + + priv = hw->priv; + priv->hw = hw; + +out: + return hw; +} +EXPORT_SYMBOL(iwl_alloc_all); + +void iwl_hw_detect(struct iwl_priv *priv) +{ + priv->hw_rev = _iwl_read32(priv, CSR_HW_REV); + priv->hw_wa_rev = _iwl_read32(priv, CSR_HW_REV_WA_REG); + pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id); +} +EXPORT_SYMBOL(iwl_hw_detect); + +int iwl_hw_nic_init(struct iwl_priv *priv) +{ + unsigned long flags; + struct iwl_rx_queue *rxq = &priv->rxq; + int ret; + + /* nic_init */ + spin_lock_irqsave(&priv->lock, flags); + priv->cfg->ops->lib->apm_ops.init(priv); + + /* Set interrupt coalescing calibration timer to default (512 usecs) */ + iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF); + + spin_unlock_irqrestore(&priv->lock, flags); + + ret = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN); + + priv->cfg->ops->lib->apm_ops.config(priv); + + /* Allocate the RX queue, or reset if it is already allocated */ + if (!rxq->bd) { + ret = iwl_rx_queue_alloc(priv); + if (ret) { + IWL_ERR(priv, "Unable to initialize Rx queue\n"); + return -ENOMEM; + } + } else + iwl_rx_queue_reset(priv, rxq); + + iwl_rx_replenish(priv); + + iwl_rx_init(priv, rxq); + + spin_lock_irqsave(&priv->lock, flags); + + rxq->need_update = 1; + iwl_rx_queue_update_write_ptr(priv, rxq); + + spin_unlock_irqrestore(&priv->lock, flags); + + /* Allocate and init all Tx and Command queues */ + ret = iwl_txq_ctx_reset(priv); + if (ret) + return ret; + + set_bit(STATUS_INIT, &priv->status); + + return 0; +} +EXPORT_SYMBOL(iwl_hw_nic_init); + +/* + * QoS support +*/ +void iwl_activate_qos(struct iwl_priv *priv, u8 force) +{ + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + priv->qos_data.def_qos_parm.qos_flags = 0; + + if (priv->qos_data.qos_cap.q_AP.queue_request && + !priv->qos_data.qos_cap.q_AP.txop_request) + priv->qos_data.def_qos_parm.qos_flags |= + QOS_PARAM_FLG_TXOP_TYPE_MSK; + if (priv->qos_data.qos_active) + priv->qos_data.def_qos_parm.qos_flags |= + QOS_PARAM_FLG_UPDATE_EDCA_MSK; + + if (priv->current_ht_config.is_ht) + priv->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK; + + if (force || iwl_is_associated(priv)) { + IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n", + priv->qos_data.qos_active, + priv->qos_data.def_qos_parm.qos_flags); + + iwl_send_cmd_pdu_async(priv, REPLY_QOS_PARAM, + sizeof(struct iwl_qosparam_cmd), + &priv->qos_data.def_qos_parm, NULL); + } +} +EXPORT_SYMBOL(iwl_activate_qos); + +/* + * AC CWmin CW max AIFSN TXOP Limit TXOP Limit + * (802.11b) (802.11a/g) + * AC_BK 15 1023 7 0 0 + * AC_BE 15 1023 3 0 0 + * AC_VI 7 15 2 6.016ms 3.008ms + * AC_VO 3 7 2 3.264ms 1.504ms + */ +void iwl_reset_qos(struct iwl_priv *priv) +{ + u16 cw_min = 15; + u16 cw_max = 1023; + u8 aifs = 2; + bool is_legacy = false; + unsigned long flags; + int i; + + spin_lock_irqsave(&priv->lock, flags); + /* QoS always active in AP and ADHOC mode + * In STA mode wait for association + */ + if (priv->iw_mode == NL80211_IFTYPE_ADHOC || + priv->iw_mode == NL80211_IFTYPE_AP) + priv->qos_data.qos_active = 1; + else + priv->qos_data.qos_active = 0; + + /* check for legacy mode */ + if ((priv->iw_mode == NL80211_IFTYPE_ADHOC && + (priv->active_rate & IWL_OFDM_RATES_MASK) == 0) || + (priv->iw_mode == NL80211_IFTYPE_STATION && + (priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK) == 0)) { + cw_min = 31; + is_legacy = 1; + } + + if (priv->qos_data.qos_active) + aifs = 3; + + /* AC_BE */ + priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min); + priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max); + priv->qos_data.def_qos_parm.ac[0].aifsn = aifs; + priv->qos_data.def_qos_parm.ac[0].edca_txop = 0; + priv->qos_data.def_qos_parm.ac[0].reserved1 = 0; + + if (priv->qos_data.qos_active) { + /* AC_BK */ + i = 1; + priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min); + priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max); + priv->qos_data.def_qos_parm.ac[i].aifsn = 7; + priv->qos_data.def_qos_parm.ac[i].edca_txop = 0; + priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; + + /* AC_VI */ + i = 2; + priv->qos_data.def_qos_parm.ac[i].cw_min = + cpu_to_le16((cw_min + 1) / 2 - 1); + priv->qos_data.def_qos_parm.ac[i].cw_max = + cpu_to_le16(cw_min); + priv->qos_data.def_qos_parm.ac[i].aifsn = 2; + if (is_legacy) + priv->qos_data.def_qos_parm.ac[i].edca_txop = + cpu_to_le16(6016); + else + priv->qos_data.def_qos_parm.ac[i].edca_txop = + cpu_to_le16(3008); + priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; + + /* AC_VO */ + i = 3; + priv->qos_data.def_qos_parm.ac[i].cw_min = + cpu_to_le16((cw_min + 1) / 4 - 1); + priv->qos_data.def_qos_parm.ac[i].cw_max = + cpu_to_le16((cw_min + 1) / 2 - 1); + priv->qos_data.def_qos_parm.ac[i].aifsn = 2; + priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; + if (is_legacy) + priv->qos_data.def_qos_parm.ac[i].edca_txop = + cpu_to_le16(3264); + else + priv->qos_data.def_qos_parm.ac[i].edca_txop = + cpu_to_le16(1504); + } else { + for (i = 1; i < 4; i++) { + priv->qos_data.def_qos_parm.ac[i].cw_min = + cpu_to_le16(cw_min); + priv->qos_data.def_qos_parm.ac[i].cw_max = + cpu_to_le16(cw_max); + priv->qos_data.def_qos_parm.ac[i].aifsn = aifs; + priv->qos_data.def_qos_parm.ac[i].edca_txop = 0; + priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; + } + } + IWL_DEBUG_QOS(priv, "set QoS to default \n"); + + spin_unlock_irqrestore(&priv->lock, flags); +} +EXPORT_SYMBOL(iwl_reset_qos); + +#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ +#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ +static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv, + struct ieee80211_sta_ht_cap *ht_info, + enum ieee80211_band band) +{ + u16 max_bit_rate = 0; + u8 rx_chains_num = priv->hw_params.rx_chains_num; + u8 tx_chains_num = priv->hw_params.tx_chains_num; + + ht_info->cap = 0; + memset(&ht_info->mcs, 0, sizeof(ht_info->mcs)); + + ht_info->ht_supported = true; + + if (priv->cfg->ht_greenfield_support) + ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD; + ht_info->cap |= IEEE80211_HT_CAP_SGI_20; + max_bit_rate = MAX_BIT_RATE_20_MHZ; + if (priv->hw_params.ht40_channel & BIT(band)) { + ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; + ht_info->cap |= IEEE80211_HT_CAP_SGI_40; + ht_info->mcs.rx_mask[4] = 0x01; + max_bit_rate = MAX_BIT_RATE_40_MHZ; + } + + if (priv->cfg->mod_params->amsdu_size_8K) + ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; + + ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF; + ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF; + + ht_info->mcs.rx_mask[0] = 0xFF; + if (rx_chains_num >= 2) + ht_info->mcs.rx_mask[1] = 0xFF; + if (rx_chains_num >= 3) + ht_info->mcs.rx_mask[2] = 0xFF; + + /* Highest supported Rx data rate */ + max_bit_rate *= rx_chains_num; + WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK); + ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate); + + /* Tx MCS capabilities */ + ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; + if (tx_chains_num != rx_chains_num) { + ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; + ht_info->mcs.tx_params |= ((tx_chains_num - 1) << + IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); + } +} + +/** + * iwlcore_init_geos - Initialize mac80211's geo/channel info based from eeprom + */ +int iwlcore_init_geos(struct iwl_priv *priv) +{ + struct iwl_channel_info *ch; + struct ieee80211_supported_band *sband; + struct ieee80211_channel *channels; + struct ieee80211_channel *geo_ch; + struct ieee80211_rate *rates; + int i = 0; + + if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates || + priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) { + IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n"); + set_bit(STATUS_GEO_CONFIGURED, &priv->status); + return 0; + } + + channels = kzalloc(sizeof(struct ieee80211_channel) * + priv->channel_count, GFP_KERNEL); + if (!channels) + return -ENOMEM; + + rates = kzalloc((sizeof(struct ieee80211_rate) * IWL_RATE_COUNT_LEGACY), + GFP_KERNEL); + if (!rates) { + kfree(channels); + return -ENOMEM; + } + + /* 5.2GHz channels start after the 2.4GHz channels */ + sband = &priv->bands[IEEE80211_BAND_5GHZ]; + sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)]; + /* just OFDM */ + sband->bitrates = &rates[IWL_FIRST_OFDM_RATE]; + sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE; + + if (priv->cfg->sku & IWL_SKU_N) + iwlcore_init_ht_hw_capab(priv, &sband->ht_cap, + IEEE80211_BAND_5GHZ); + + sband = &priv->bands[IEEE80211_BAND_2GHZ]; + sband->channels = channels; + /* OFDM & CCK */ + sband->bitrates = rates; + sband->n_bitrates = IWL_RATE_COUNT_LEGACY; + + if (priv->cfg->sku & IWL_SKU_N) + iwlcore_init_ht_hw_capab(priv, &sband->ht_cap, + IEEE80211_BAND_2GHZ); + + priv->ieee_channels = channels; + priv->ieee_rates = rates; + + for (i = 0; i < priv->channel_count; i++) { + ch = &priv->channel_info[i]; + + /* FIXME: might be removed if scan is OK */ + if (!is_channel_valid(ch)) + continue; + + if (is_channel_a_band(ch)) + sband = &priv->bands[IEEE80211_BAND_5GHZ]; + else + sband = &priv->bands[IEEE80211_BAND_2GHZ]; + + geo_ch = &sband->channels[sband->n_channels++]; + + geo_ch->center_freq = + ieee80211_channel_to_frequency(ch->channel); + geo_ch->max_power = ch->max_power_avg; + geo_ch->max_antenna_gain = 0xff; + geo_ch->hw_value = ch->channel; + + if (is_channel_valid(ch)) { + if (!(ch->flags & EEPROM_CHANNEL_IBSS)) + geo_ch->flags |= IEEE80211_CHAN_NO_IBSS; + + if (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) + geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN; + + if (ch->flags & EEPROM_CHANNEL_RADAR) + geo_ch->flags |= IEEE80211_CHAN_RADAR; + + geo_ch->flags |= ch->ht40_extension_channel; + + if (ch->max_power_avg > priv->tx_power_device_lmt) + priv->tx_power_device_lmt = ch->max_power_avg; + } else { + geo_ch->flags |= IEEE80211_CHAN_DISABLED; + } + + IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n", + ch->channel, geo_ch->center_freq, + is_channel_a_band(ch) ? "5.2" : "2.4", + geo_ch->flags & IEEE80211_CHAN_DISABLED ? + "restricted" : "valid", + geo_ch->flags); + } + + if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) && + priv->cfg->sku & IWL_SKU_A) { + IWL_INFO(priv, "Incorrectly detected BG card as ABG. " + "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n", + priv->pci_dev->device, + priv->pci_dev->subsystem_device); + priv->cfg->sku &= ~IWL_SKU_A; + } + + IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n", + priv->bands[IEEE80211_BAND_2GHZ].n_channels, + priv->bands[IEEE80211_BAND_5GHZ].n_channels); + + set_bit(STATUS_GEO_CONFIGURED, &priv->status); + + return 0; +} +EXPORT_SYMBOL(iwlcore_init_geos); + +/* + * iwlcore_free_geos - undo allocations in iwlcore_init_geos + */ +void iwlcore_free_geos(struct iwl_priv *priv) +{ + kfree(priv->ieee_channels); + kfree(priv->ieee_rates); + clear_bit(STATUS_GEO_CONFIGURED, &priv->status); +} +EXPORT_SYMBOL(iwlcore_free_geos); + +/* + * iwlcore_rts_tx_cmd_flag: Set rts/cts. 3945 and 4965 only share this + * function. + */ +void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info *info, + __le32 *tx_flags) +{ + if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) { + *tx_flags |= TX_CMD_FLG_RTS_MSK; + *tx_flags &= ~TX_CMD_FLG_CTS_MSK; + } else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { + *tx_flags &= ~TX_CMD_FLG_RTS_MSK; + *tx_flags |= TX_CMD_FLG_CTS_MSK; + } +} +EXPORT_SYMBOL(iwlcore_rts_tx_cmd_flag); + +static bool is_single_rx_stream(struct iwl_priv *priv) +{ + return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC || + priv->current_ht_config.single_chain_sufficient; +} + +static u8 iwl_is_channel_extension(struct iwl_priv *priv, + enum ieee80211_band band, + u16 channel, u8 extension_chan_offset) +{ + const struct iwl_channel_info *ch_info; + + ch_info = iwl_get_channel_info(priv, band, channel); + if (!is_channel_valid(ch_info)) + return 0; + + if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE) + return !(ch_info->ht40_extension_channel & + IEEE80211_CHAN_NO_HT40PLUS); + else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW) + return !(ch_info->ht40_extension_channel & + IEEE80211_CHAN_NO_HT40MINUS); + + return 0; +} + +u8 iwl_is_ht40_tx_allowed(struct iwl_priv *priv, + struct ieee80211_sta_ht_cap *sta_ht_inf) +{ + struct iwl_ht_config *ht_conf = &priv->current_ht_config; + + if (!ht_conf->is_ht || !ht_conf->is_40mhz) + return 0; + + /* We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40 + * the bit will not set if it is pure 40MHz case + */ + if (sta_ht_inf) { + if (!sta_ht_inf->ht_supported) + return 0; + } +#ifdef CONFIG_IWLWIFI_DEBUG + if (priv->disable_ht40) + return 0; +#endif + return iwl_is_channel_extension(priv, priv->band, + le16_to_cpu(priv->staging_rxon.channel), + ht_conf->extension_chan_offset); +} +EXPORT_SYMBOL(iwl_is_ht40_tx_allowed); + +static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val) +{ + u16 new_val = 0; + u16 beacon_factor = 0; + + beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val; + new_val = beacon_val / beacon_factor; + + if (!new_val) + new_val = max_beacon_val; + + return new_val; +} + +void iwl_setup_rxon_timing(struct iwl_priv *priv) +{ + u64 tsf; + s32 interval_tm, rem; + unsigned long flags; + struct ieee80211_conf *conf = NULL; + u16 beacon_int; + + conf = ieee80211_get_hw_conf(priv->hw); + + spin_lock_irqsave(&priv->lock, flags); + priv->rxon_timing.timestamp = cpu_to_le64(priv->timestamp); + priv->rxon_timing.listen_interval = cpu_to_le16(conf->listen_interval); + + if (priv->iw_mode == NL80211_IFTYPE_STATION) { + beacon_int = priv->beacon_int; + priv->rxon_timing.atim_window = 0; + } else { + beacon_int = priv->vif->bss_conf.beacon_int; + + /* TODO: we need to get atim_window from upper stack + * for now we set to 0 */ + priv->rxon_timing.atim_window = 0; + } + + beacon_int = iwl_adjust_beacon_interval(beacon_int, + priv->hw_params.max_beacon_itrvl * 1024); + priv->rxon_timing.beacon_interval = cpu_to_le16(beacon_int); + + tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */ + interval_tm = beacon_int * 1024; + rem = do_div(tsf, interval_tm); + priv->rxon_timing.beacon_init_val = cpu_to_le32(interval_tm - rem); + + spin_unlock_irqrestore(&priv->lock, flags); + IWL_DEBUG_ASSOC(priv, + "beacon interval %d beacon timer %d beacon tim %d\n", + le16_to_cpu(priv->rxon_timing.beacon_interval), + le32_to_cpu(priv->rxon_timing.beacon_init_val), + le16_to_cpu(priv->rxon_timing.atim_window)); +} +EXPORT_SYMBOL(iwl_setup_rxon_timing); + +void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt) +{ + struct iwl_rxon_cmd *rxon = &priv->staging_rxon; + + if (hw_decrypt) + rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK; + else + rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK; + +} +EXPORT_SYMBOL(iwl_set_rxon_hwcrypto); + +/** + * iwl_check_rxon_cmd - validate RXON structure is valid + * + * NOTE: This is really only useful during development and can eventually + * be #ifdef'd out once the driver is stable and folks aren't actively + * making changes + */ +int iwl_check_rxon_cmd(struct iwl_priv *priv) +{ + int error = 0; + int counter = 1; + struct iwl_rxon_cmd *rxon = &priv->staging_rxon; + + if (rxon->flags & RXON_FLG_BAND_24G_MSK) { + error |= le32_to_cpu(rxon->flags & + (RXON_FLG_TGJ_NARROW_BAND_MSK | + RXON_FLG_RADAR_DETECT_MSK)); + if (error) + IWL_WARN(priv, "check 24G fields %d | %d\n", + counter++, error); + } else { + error |= (rxon->flags & RXON_FLG_SHORT_SLOT_MSK) ? + 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK); + if (error) + IWL_WARN(priv, "check 52 fields %d | %d\n", + counter++, error); + error |= le32_to_cpu(rxon->flags & RXON_FLG_CCK_MSK); + if (error) + IWL_WARN(priv, "check 52 CCK %d | %d\n", + counter++, error); + } + error |= (rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1; + if (error) + IWL_WARN(priv, "check mac addr %d | %d\n", counter++, error); + + /* make sure basic rates 6Mbps and 1Mbps are supported */ + error |= (((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0) && + ((rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0)); + if (error) + IWL_WARN(priv, "check basic rate %d | %d\n", counter++, error); + + error |= (le16_to_cpu(rxon->assoc_id) > 2007); + if (error) + IWL_WARN(priv, "check assoc id %d | %d\n", counter++, error); + + error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) + == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)); + if (error) + IWL_WARN(priv, "check CCK and short slot %d | %d\n", + counter++, error); + + error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) + == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)); + if (error) + IWL_WARN(priv, "check CCK & auto detect %d | %d\n", + counter++, error); + + error |= ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK | + RXON_FLG_TGG_PROTECT_MSK)) == RXON_FLG_TGG_PROTECT_MSK); + if (error) + IWL_WARN(priv, "check TGG and auto detect %d | %d\n", + counter++, error); + + if (error) + IWL_WARN(priv, "Tuning to channel %d\n", + le16_to_cpu(rxon->channel)); + + if (error) { + IWL_ERR(priv, "Not a valid iwl_rxon_assoc_cmd field values\n"); + return -1; + } + return 0; +} +EXPORT_SYMBOL(iwl_check_rxon_cmd); + +/** + * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed + * @priv: staging_rxon is compared to active_rxon + * + * If the RXON structure is changing enough to require a new tune, + * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that + * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required. + */ +int iwl_full_rxon_required(struct iwl_priv *priv) +{ + + /* These items are only settable from the full RXON command */ + if (!(iwl_is_associated(priv)) || + compare_ether_addr(priv->staging_rxon.bssid_addr, + priv->active_rxon.bssid_addr) || + compare_ether_addr(priv->staging_rxon.node_addr, + priv->active_rxon.node_addr) || + compare_ether_addr(priv->staging_rxon.wlap_bssid_addr, + priv->active_rxon.wlap_bssid_addr) || + (priv->staging_rxon.dev_type != priv->active_rxon.dev_type) || + (priv->staging_rxon.channel != priv->active_rxon.channel) || + (priv->staging_rxon.air_propagation != + priv->active_rxon.air_propagation) || + (priv->staging_rxon.ofdm_ht_single_stream_basic_rates != + priv->active_rxon.ofdm_ht_single_stream_basic_rates) || + (priv->staging_rxon.ofdm_ht_dual_stream_basic_rates != + priv->active_rxon.ofdm_ht_dual_stream_basic_rates) || + (priv->staging_rxon.ofdm_ht_triple_stream_basic_rates != + priv->active_rxon.ofdm_ht_triple_stream_basic_rates) || + (priv->staging_rxon.assoc_id != priv->active_rxon.assoc_id)) + return 1; + + /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can + * be updated with the RXON_ASSOC command -- however only some + * flag transitions are allowed using RXON_ASSOC */ + + /* Check if we are not switching bands */ + if ((priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) != + (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK)) + return 1; + + /* Check if we are switching association toggle */ + if ((priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) != + (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) + return 1; + + return 0; +} +EXPORT_SYMBOL(iwl_full_rxon_required); + +u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv) +{ + int i; + int rate_mask; + + /* Set rate mask*/ + if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) + rate_mask = priv->active_rate_basic & IWL_CCK_RATES_MASK; + else + rate_mask = priv->active_rate_basic & IWL_OFDM_RATES_MASK; + + /* Find lowest valid rate */ + for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID; + i = iwl_rates[i].next_ieee) { + if (rate_mask & (1 << i)) + return iwl_rates[i].plcp; + } + + /* No valid rate was found. Assign the lowest one */ + if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) + return IWL_RATE_1M_PLCP; + else + return IWL_RATE_6M_PLCP; +} +EXPORT_SYMBOL(iwl_rate_get_lowest_plcp); + +void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf) +{ + struct iwl_rxon_cmd *rxon = &priv->staging_rxon; + + if (!ht_conf->is_ht) { + rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK | + RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | + RXON_FLG_HT40_PROT_MSK | + RXON_FLG_HT_PROT_MSK); + return; + } + + /* FIXME: if the definition of ht_protection changed, the "translation" + * will be needed for rxon->flags + */ + rxon->flags |= cpu_to_le32(ht_conf->ht_protection << RXON_FLG_HT_OPERATING_MODE_POS); + + /* Set up channel bandwidth: + * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */ + /* clear the HT channel mode before set the mode */ + rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK | + RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); + if (iwl_is_ht40_tx_allowed(priv, NULL)) { + /* pure ht40 */ + if (ht_conf->ht_protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) { + rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40; + /* Note: control channel is opposite of extension channel */ + switch (ht_conf->extension_chan_offset) { + case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: + rxon->flags &= ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; + break; + case IEEE80211_HT_PARAM_CHA_SEC_BELOW: + rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; + break; + } + } else { + /* Note: control channel is opposite of extension channel */ + switch (ht_conf->extension_chan_offset) { + case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: + rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); + rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; + break; + case IEEE80211_HT_PARAM_CHA_SEC_BELOW: + rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; + rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; + break; + case IEEE80211_HT_PARAM_CHA_SEC_NONE: + default: + /* channel location only valid if in Mixed mode */ + IWL_ERR(priv, "invalid extension channel offset\n"); + break; + } + } + } else { + rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY; + } + + if (priv->cfg->ops->hcmd->set_rxon_chain) + priv->cfg->ops->hcmd->set_rxon_chain(priv); + + IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X " + "extension channel offset 0x%x\n", + le32_to_cpu(rxon->flags), ht_conf->ht_protection, + ht_conf->extension_chan_offset); + return; +} +EXPORT_SYMBOL(iwl_set_rxon_ht); + +#define IWL_NUM_RX_CHAINS_MULTIPLE 3 +#define IWL_NUM_RX_CHAINS_SINGLE 2 +#define IWL_NUM_IDLE_CHAINS_DUAL 2 +#define IWL_NUM_IDLE_CHAINS_SINGLE 1 + +/* + * Determine how many receiver/antenna chains to use. + * + * More provides better reception via diversity. Fewer saves power + * at the expense of throughput, but only when not in powersave to + * start with. + * + * MIMO (dual stream) requires at least 2, but works better with 3. + * This does not determine *which* chains to use, just how many. + */ +static int iwl_get_active_rx_chain_count(struct iwl_priv *priv) +{ + /* # of Rx chains to use when expecting MIMO. */ + if (is_single_rx_stream(priv)) + return IWL_NUM_RX_CHAINS_SINGLE; + else + return IWL_NUM_RX_CHAINS_MULTIPLE; +} + +/* + * When we are in power saving mode, unless device support spatial + * multiplexing power save, use the active count for rx chain count. + */ +static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt) +{ + /* # Rx chains when idling, depending on SMPS mode */ + switch (priv->current_ht_config.smps) { + case IEEE80211_SMPS_STATIC: + case IEEE80211_SMPS_DYNAMIC: + return IWL_NUM_IDLE_CHAINS_SINGLE; + case IEEE80211_SMPS_OFF: + return active_cnt; + default: + WARN(1, "invalid SMPS mode %d", + priv->current_ht_config.smps); + return active_cnt; + } +} + +/* up to 4 chains */ +static u8 iwl_count_chain_bitmap(u32 chain_bitmap) +{ + u8 res; + res = (chain_bitmap & BIT(0)) >> 0; + res += (chain_bitmap & BIT(1)) >> 1; + res += (chain_bitmap & BIT(2)) >> 2; + res += (chain_bitmap & BIT(3)) >> 3; + return res; +} + +/** + * iwl_is_monitor_mode - Determine if interface in monitor mode + * + * priv->iw_mode is set in add_interface, but add_interface is + * never called for monitor mode. The only way mac80211 informs us about + * monitor mode is through configuring filters (call to configure_filter). + */ +bool iwl_is_monitor_mode(struct iwl_priv *priv) +{ + return !!(priv->staging_rxon.filter_flags & RXON_FILTER_PROMISC_MSK); +} +EXPORT_SYMBOL(iwl_is_monitor_mode); + +/** + * iwl_set_rxon_chain - Set up Rx chain usage in "staging" RXON image + * + * Selects how many and which Rx receivers/antennas/chains to use. + * This should not be used for scan command ... it puts data in wrong place. + */ +void iwl_set_rxon_chain(struct iwl_priv *priv) +{ + bool is_single = is_single_rx_stream(priv); + bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status); + u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt; + u32 active_chains; + u16 rx_chain; + + /* Tell uCode which antennas are actually connected. + * Before first association, we assume all antennas are connected. + * Just after first association, iwl_chain_noise_calibration() + * checks which antennas actually *are* connected. */ + if (priv->chain_noise_data.active_chains) + active_chains = priv->chain_noise_data.active_chains; + else + active_chains = priv->hw_params.valid_rx_ant; + + rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS; + + /* How many receivers should we use? */ + active_rx_cnt = iwl_get_active_rx_chain_count(priv); + idle_rx_cnt = iwl_get_idle_rx_chain_count(priv, active_rx_cnt); + + + /* correct rx chain count according hw settings + * and chain noise calibration + */ + valid_rx_cnt = iwl_count_chain_bitmap(active_chains); + if (valid_rx_cnt < active_rx_cnt) + active_rx_cnt = valid_rx_cnt; + + if (valid_rx_cnt < idle_rx_cnt) + idle_rx_cnt = valid_rx_cnt; + + rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS; + rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS; + + /* copied from 'iwl_bg_request_scan()' */ + /* Force use of chains B and C (0x6) for Rx for 4965 + * Avoid A (0x1) because of its off-channel reception on A-band. + * MIMO is not used here, but value is required */ + if (iwl_is_monitor_mode(priv) && + !(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) && + ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)) { + rx_chain = ANT_ABC << RXON_RX_CHAIN_VALID_POS; + rx_chain |= ANT_BC << RXON_RX_CHAIN_FORCE_SEL_POS; + rx_chain |= ANT_ABC << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS; + rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS; + } + + priv->staging_rxon.rx_chain = cpu_to_le16(rx_chain); + + if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam) + priv->staging_rxon.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK; + else + priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK; + + IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n", + priv->staging_rxon.rx_chain, + active_rx_cnt, idle_rx_cnt); + + WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 || + active_rx_cnt < idle_rx_cnt); +} +EXPORT_SYMBOL(iwl_set_rxon_chain); + +/** + * iwl_set_rxon_channel - Set the phymode and channel values in staging RXON + * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz + * @channel: Any channel valid for the requested phymode + + * In addition to setting the staging RXON, priv->phymode is also set. + * + * NOTE: Does not commit to the hardware; it sets appropriate bit fields + * in the staging RXON flag structure based on the phymode + */ +int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch) +{ + enum ieee80211_band band = ch->band; + u16 channel = ieee80211_frequency_to_channel(ch->center_freq); + + if (!iwl_get_channel_info(priv, band, channel)) { + IWL_DEBUG_INFO(priv, "Could not set channel to %d [%d]\n", + channel, band); + return -EINVAL; + } + + if ((le16_to_cpu(priv->staging_rxon.channel) == channel) && + (priv->band == band)) + return 0; + + priv->staging_rxon.channel = cpu_to_le16(channel); + if (band == IEEE80211_BAND_5GHZ) + priv->staging_rxon.flags &= ~RXON_FLG_BAND_24G_MSK; + else + priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK; + + priv->band = band; + + IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band); + + return 0; +} +EXPORT_SYMBOL(iwl_set_rxon_channel); + +void iwl_set_flags_for_band(struct iwl_priv *priv, + enum ieee80211_band band) +{ + if (band == IEEE80211_BAND_5GHZ) { + priv->staging_rxon.flags &= + ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK + | RXON_FLG_CCK_MSK); + priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; + } else { + /* Copied from iwl_post_associate() */ + if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME) + priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; + else + priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + + if (priv->iw_mode == NL80211_IFTYPE_ADHOC) + priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + + priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK; + priv->staging_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK; + priv->staging_rxon.flags &= ~RXON_FLG_CCK_MSK; + } +} + +/* + * initialize rxon structure with default values from eeprom + */ +void iwl_connection_init_rx_config(struct iwl_priv *priv, int mode) +{ + const struct iwl_channel_info *ch_info; + + memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon)); + + switch (mode) { + case NL80211_IFTYPE_AP: + priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP; + break; + + case NL80211_IFTYPE_STATION: + priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS; + priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK; + break; + + case NL80211_IFTYPE_ADHOC: + priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS; + priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK; + priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK | + RXON_FILTER_ACCEPT_GRP_MSK; + break; + + default: + IWL_ERR(priv, "Unsupported interface type %d\n", mode); + break; + } + +#if 0 + /* TODO: Figure out when short_preamble would be set and cache from + * that */ + if (!hw_to_local(priv->hw)->short_preamble) + priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; + else + priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; +#endif + + ch_info = iwl_get_channel_info(priv, priv->band, + le16_to_cpu(priv->active_rxon.channel)); + + if (!ch_info) + ch_info = &priv->channel_info[0]; + + /* + * in some case A channels are all non IBSS + * in this case force B/G channel + */ + if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) && + !(is_channel_ibss(ch_info))) + ch_info = &priv->channel_info[0]; + + priv->staging_rxon.channel = cpu_to_le16(ch_info->channel); + priv->band = ch_info->band; + + iwl_set_flags_for_band(priv, priv->band); + + priv->staging_rxon.ofdm_basic_rates = + (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; + priv->staging_rxon.cck_basic_rates = + (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF; + + /* clear both MIX and PURE40 mode flag */ + priv->staging_rxon.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED | + RXON_FLG_CHANNEL_MODE_PURE_40); + memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN); + memcpy(priv->staging_rxon.wlap_bssid_addr, priv->mac_addr, ETH_ALEN); + priv->staging_rxon.ofdm_ht_single_stream_basic_rates = 0xff; + priv->staging_rxon.ofdm_ht_dual_stream_basic_rates = 0xff; + priv->staging_rxon.ofdm_ht_triple_stream_basic_rates = 0xff; +} +EXPORT_SYMBOL(iwl_connection_init_rx_config); + +static void iwl_set_rate(struct iwl_priv *priv) +{ + const struct ieee80211_supported_band *hw = NULL; + struct ieee80211_rate *rate; + int i; + + hw = iwl_get_hw_mode(priv, priv->band); + if (!hw) { + IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n"); + return; + } + + priv->active_rate = 0; + priv->active_rate_basic = 0; + + for (i = 0; i < hw->n_bitrates; i++) { + rate = &(hw->bitrates[i]); + if (rate->hw_value < IWL_RATE_COUNT_LEGACY) + priv->active_rate |= (1 << rate->hw_value); + } + + IWL_DEBUG_RATE(priv, "Set active_rate = %0x, active_rate_basic = %0x\n", + priv->active_rate, priv->active_rate_basic); + + /* + * If a basic rate is configured, then use it (adding IWL_RATE_1M_MASK) + * otherwise set it to the default of all CCK rates and 6, 12, 24 for + * OFDM + */ + if (priv->active_rate_basic & IWL_CCK_BASIC_RATES_MASK) + priv->staging_rxon.cck_basic_rates = + ((priv->active_rate_basic & + IWL_CCK_RATES_MASK) >> IWL_FIRST_CCK_RATE) & 0xF; + else + priv->staging_rxon.cck_basic_rates = + (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF; + + if (priv->active_rate_basic & IWL_OFDM_BASIC_RATES_MASK) + priv->staging_rxon.ofdm_basic_rates = + ((priv->active_rate_basic & + (IWL_OFDM_BASIC_RATES_MASK | IWL_RATE_6M_MASK)) >> + IWL_FIRST_OFDM_RATE) & 0xFF; + else + priv->staging_rxon.ofdm_basic_rates = + (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; +} + +void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_rxon_cmd *rxon = (void *)&priv->active_rxon; + struct iwl_csa_notification *csa = &(pkt->u.csa_notif); + + if (priv->switch_rxon.switch_in_progress) { + if (!le32_to_cpu(csa->status) && + (csa->channel == priv->switch_rxon.channel)) { + rxon->channel = csa->channel; + priv->staging_rxon.channel = csa->channel; + IWL_DEBUG_11H(priv, "CSA notif: channel %d\n", + le16_to_cpu(csa->channel)); + } else + IWL_ERR(priv, "CSA notif (fail) : channel %d\n", + le16_to_cpu(csa->channel)); + + priv->switch_rxon.switch_in_progress = false; + } +} +EXPORT_SYMBOL(iwl_rx_csa); + +#ifdef CONFIG_IWLWIFI_DEBUG +void iwl_print_rx_config_cmd(struct iwl_priv *priv) +{ + struct iwl_rxon_cmd *rxon = &priv->staging_rxon; + + IWL_DEBUG_RADIO(priv, "RX CONFIG:\n"); + iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon)); + IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n", le16_to_cpu(rxon->channel)); + IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags)); + IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n", + le32_to_cpu(rxon->filter_flags)); + IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type); + IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n", + rxon->ofdm_basic_rates); + IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates); + IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr); + IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr); + IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id)); +} +EXPORT_SYMBOL(iwl_print_rx_config_cmd); +#endif +/** + * iwl_irq_handle_error - called for HW or SW error interrupt from card + */ +void iwl_irq_handle_error(struct iwl_priv *priv) +{ + /* Set the FW error flag -- cleared on iwl_down */ + set_bit(STATUS_FW_ERROR, &priv->status); + + /* Cancel currently queued command. */ + clear_bit(STATUS_HCMD_ACTIVE, &priv->status); + + priv->cfg->ops->lib->dump_nic_error_log(priv); + if (priv->cfg->ops->lib->dump_csr) + priv->cfg->ops->lib->dump_csr(priv); + if (priv->cfg->ops->lib->dump_fh) + priv->cfg->ops->lib->dump_fh(priv, NULL, false); + priv->cfg->ops->lib->dump_nic_event_log(priv, false, NULL, false); +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) + iwl_print_rx_config_cmd(priv); +#endif + + wake_up_interruptible(&priv->wait_command_queue); + + /* Keep the restart process from trying to send host + * commands by clearing the INIT status bit */ + clear_bit(STATUS_READY, &priv->status); + + if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) { + IWL_DEBUG(priv, IWL_DL_FW_ERRORS, + "Restarting adapter due to uCode error.\n"); + + if (priv->cfg->mod_params->restart_fw) + queue_work(priv->workqueue, &priv->restart); + } +} +EXPORT_SYMBOL(iwl_irq_handle_error); + +int iwl_apm_stop_master(struct iwl_priv *priv) +{ + int ret = 0; + + /* stop device's busmaster DMA activity */ + iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); + + ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED, + CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); + if (ret) + IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n"); + + IWL_DEBUG_INFO(priv, "stop master\n"); + + return ret; +} +EXPORT_SYMBOL(iwl_apm_stop_master); + +void iwl_apm_stop(struct iwl_priv *priv) +{ + IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n"); + + /* Stop device's DMA activity */ + iwl_apm_stop_master(priv); + + /* Reset the entire device */ + iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); + + udelay(10); + + /* + * Clear "initialization complete" bit to move adapter from + * D0A* (powered-up Active) --> D0U* (Uninitialized) state. + */ + iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); +} +EXPORT_SYMBOL(iwl_apm_stop); + + +/* + * Start up NIC's basic functionality after it has been reset + * (e.g. after platform boot, or shutdown via iwl_apm_stop()) + * NOTE: This does not load uCode nor start the embedded processor + */ +int iwl_apm_init(struct iwl_priv *priv) +{ + int ret = 0; + u16 lctl; + + IWL_DEBUG_INFO(priv, "Init card's basic functions\n"); + + /* + * Use "set_bit" below rather than "write", to preserve any hardware + * bits already set by default after reset. + */ + + /* Disable L0S exit timer (platform NMI Work/Around) */ + iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS, + CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); + + /* + * Disable L0s without affecting L1; + * don't wait for ICH L0s (ICH bug W/A) + */ + iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS, + CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); + + /* Set FH wait threshold to maximum (HW error during stress W/A) */ + iwl_set_bit(priv, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); + + /* + * Enable HAP INTA (interrupt from management bus) to + * wake device's PCI Express link L1a -> L0s + * NOTE: This is no-op for 3945 (non-existant bit) + */ + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); + + /* + * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition. + * Check if BIOS (or OS) enabled L1-ASPM on this device. + * If so (likely), disable L0S, so device moves directly L0->L1; + * costs negligible amount of power savings. + * If not (unlikely), enable L0S, so there is at least some + * power savings, even without L1. + */ + if (priv->cfg->set_l0s) { + lctl = iwl_pcie_link_ctl(priv); + if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) == + PCI_CFG_LINK_CTRL_VAL_L1_EN) { + /* L1-ASPM enabled; disable(!) L0S */ + iwl_set_bit(priv, CSR_GIO_REG, + CSR_GIO_REG_VAL_L0S_ENABLED); + IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n"); + } else { + /* L1-ASPM disabled; enable(!) L0S */ + iwl_clear_bit(priv, CSR_GIO_REG, + CSR_GIO_REG_VAL_L0S_ENABLED); + IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n"); + } + } + + /* Configure analog phase-lock-loop before activating to D0A */ + if (priv->cfg->pll_cfg_val) + iwl_set_bit(priv, CSR_ANA_PLL_CFG, priv->cfg->pll_cfg_val); + + /* + * Set "initialization complete" bit to move adapter from + * D0U* --> D0A* (powered-up active) state. + */ + iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + + /* + * Wait for clock stabilization; once stabilized, access to + * device-internal resources is supported, e.g. iwl_write_prph() + * and accesses to uCode SRAM. + */ + ret = iwl_poll_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); + if (ret < 0) { + IWL_DEBUG_INFO(priv, "Failed to init the card\n"); + goto out; + } + + /* + * Enable DMA and BSM (if used) clocks, wait for them to stabilize. + * BSM (Boostrap State Machine) is only in 3945 and 4965; + * later devices (i.e. 5000 and later) have non-volatile SRAM, + * and don't need BSM to restore data after power-saving sleep. + * + * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits + * do not disable clocks. This preserves any hardware bits already + * set by default in "CLK_CTRL_REG" after reset. + */ + if (priv->cfg->use_bsm) + iwl_write_prph(priv, APMG_CLK_EN_REG, + APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT); + else + iwl_write_prph(priv, APMG_CLK_EN_REG, + APMG_CLK_VAL_DMA_CLK_RQT); + udelay(20); + + /* Disable L1-Active */ + iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG, + APMG_PCIDEV_STT_VAL_L1_ACT_DIS); + +out: + return ret; +} +EXPORT_SYMBOL(iwl_apm_init); + + + +void iwl_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) +{ + struct iwl_priv *priv = hw->priv; + __le32 *filter_flags = &priv->staging_rxon.filter_flags; + + IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n", + changed_flags, *total_flags); + + if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) { + if (*total_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) + *filter_flags |= RXON_FILTER_PROMISC_MSK; + else + *filter_flags &= ~RXON_FILTER_PROMISC_MSK; + } + if (changed_flags & FIF_ALLMULTI) { + if (*total_flags & FIF_ALLMULTI) + *filter_flags |= RXON_FILTER_ACCEPT_GRP_MSK; + else + *filter_flags &= ~RXON_FILTER_ACCEPT_GRP_MSK; + } + if (changed_flags & FIF_CONTROL) { + if (*total_flags & FIF_CONTROL) + *filter_flags |= RXON_FILTER_CTL2HOST_MSK; + else + *filter_flags &= ~RXON_FILTER_CTL2HOST_MSK; + } + if (changed_flags & FIF_BCN_PRBRESP_PROMISC) { + if (*total_flags & FIF_BCN_PRBRESP_PROMISC) + *filter_flags |= RXON_FILTER_BCON_AWARE_MSK; + else + *filter_flags &= ~RXON_FILTER_BCON_AWARE_MSK; + } + + /* We avoid iwl_commit_rxon here to commit the new filter flags + * since mac80211 will call ieee80211_hw_config immediately. + * (mc_list is not supported at this time). Otherwise, we need to + * queue a background iwl_commit_rxon work. + */ + + *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | + FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; +} +EXPORT_SYMBOL(iwl_configure_filter); + +int iwl_set_hw_params(struct iwl_priv *priv) +{ + priv->hw_params.max_rxq_size = RX_QUEUE_SIZE; + priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG; + if (priv->cfg->mod_params->amsdu_size_8K) + priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K); + else + priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K); + + priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL; + + if (priv->cfg->mod_params->disable_11n) + priv->cfg->sku &= ~IWL_SKU_N; + + /* Device-specific setup */ + return priv->cfg->ops->lib->set_hw_params(priv); +} +EXPORT_SYMBOL(iwl_set_hw_params); + +int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force) +{ + int ret = 0; + s8 prev_tx_power = priv->tx_power_user_lmt; + + if (tx_power < IWL_TX_POWER_TARGET_POWER_MIN) { + IWL_WARN(priv, "Requested user TXPOWER %d below lower limit %d.\n", + tx_power, + IWL_TX_POWER_TARGET_POWER_MIN); + return -EINVAL; + } + + if (tx_power > priv->tx_power_device_lmt) { + IWL_WARN(priv, + "Requested user TXPOWER %d above upper limit %d.\n", + tx_power, priv->tx_power_device_lmt); + return -EINVAL; + } + + if (priv->tx_power_user_lmt != tx_power) + force = true; + + /* if nic is not up don't send command */ + if (iwl_is_ready_rf(priv)) { + priv->tx_power_user_lmt = tx_power; + if (force && priv->cfg->ops->lib->send_tx_power) + ret = priv->cfg->ops->lib->send_tx_power(priv); + else if (!priv->cfg->ops->lib->send_tx_power) + ret = -EOPNOTSUPP; + /* + * if fail to set tx_power, restore the orig. tx power + */ + if (ret) + priv->tx_power_user_lmt = prev_tx_power; + } + + /* + * Even this is an async host command, the command + * will always report success from uCode + * So once driver can placing the command into the queue + * successfully, driver can use priv->tx_power_user_lmt + * to reflect the current tx power + */ + return ret; +} +EXPORT_SYMBOL(iwl_set_tx_power); + +#define ICT_COUNT (PAGE_SIZE/sizeof(u32)) + +/* Free dram table */ +void iwl_free_isr_ict(struct iwl_priv *priv) +{ + if (priv->ict_tbl_vir) { + dma_free_coherent(&priv->pci_dev->dev, + (sizeof(u32) * ICT_COUNT) + PAGE_SIZE, + priv->ict_tbl_vir, priv->ict_tbl_dma); + priv->ict_tbl_vir = NULL; + } +} +EXPORT_SYMBOL(iwl_free_isr_ict); + + +/* allocate dram shared table it is a PAGE_SIZE aligned + * also reset all data related to ICT table interrupt. + */ +int iwl_alloc_isr_ict(struct iwl_priv *priv) +{ + + if (priv->cfg->use_isr_legacy) + return 0; + /* allocate shrared data table */ + priv->ict_tbl_vir = dma_alloc_coherent(&priv->pci_dev->dev, + (sizeof(u32) * ICT_COUNT) + PAGE_SIZE, + &priv->ict_tbl_dma, GFP_KERNEL); + if (!priv->ict_tbl_vir) + return -ENOMEM; + + /* align table to PAGE_SIZE boundry */ + priv->aligned_ict_tbl_dma = ALIGN(priv->ict_tbl_dma, PAGE_SIZE); + + IWL_DEBUG_ISR(priv, "ict dma addr %Lx dma aligned %Lx diff %d\n", + (unsigned long long)priv->ict_tbl_dma, + (unsigned long long)priv->aligned_ict_tbl_dma, + (int)(priv->aligned_ict_tbl_dma - priv->ict_tbl_dma)); + + priv->ict_tbl = priv->ict_tbl_vir + + (priv->aligned_ict_tbl_dma - priv->ict_tbl_dma); + + IWL_DEBUG_ISR(priv, "ict vir addr %p vir aligned %p diff %d\n", + priv->ict_tbl, priv->ict_tbl_vir, + (int)(priv->aligned_ict_tbl_dma - priv->ict_tbl_dma)); + + /* reset table and index to all 0 */ + memset(priv->ict_tbl_vir,0, (sizeof(u32) * ICT_COUNT) + PAGE_SIZE); + priv->ict_index = 0; + + /* add periodic RX interrupt */ + priv->inta_mask |= CSR_INT_BIT_RX_PERIODIC; + return 0; +} +EXPORT_SYMBOL(iwl_alloc_isr_ict); + +/* Device is going up inform it about using ICT interrupt table, + * also we need to tell the driver to start using ICT interrupt. + */ +int iwl_reset_ict(struct iwl_priv *priv) +{ + u32 val; + unsigned long flags; + + if (!priv->ict_tbl_vir) + return 0; + + spin_lock_irqsave(&priv->lock, flags); + iwl_disable_interrupts(priv); + + memset(&priv->ict_tbl[0], 0, sizeof(u32) * ICT_COUNT); + + val = priv->aligned_ict_tbl_dma >> PAGE_SHIFT; + + val |= CSR_DRAM_INT_TBL_ENABLE; + val |= CSR_DRAM_INIT_TBL_WRAP_CHECK; + + IWL_DEBUG_ISR(priv, "CSR_DRAM_INT_TBL_REG =0x%X " + "aligned dma address %Lx\n", + val, (unsigned long long)priv->aligned_ict_tbl_dma); + + iwl_write32(priv, CSR_DRAM_INT_TBL_REG, val); + priv->use_ict = true; + priv->ict_index = 0; + iwl_write32(priv, CSR_INT, priv->inta_mask); + iwl_enable_interrupts(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} +EXPORT_SYMBOL(iwl_reset_ict); + +/* Device is going down disable ict interrupt usage */ +void iwl_disable_ict(struct iwl_priv *priv) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + priv->use_ict = false; + spin_unlock_irqrestore(&priv->lock, flags); +} +EXPORT_SYMBOL(iwl_disable_ict); + +/* interrupt handler using ict table, with this interrupt driver will + * stop using INTA register to get device's interrupt, reading this register + * is expensive, device will write interrupts in ICT dram table, increment + * index then will fire interrupt to driver, driver will OR all ICT table + * entries from current index up to table entry with 0 value. the result is + * the interrupt we need to service, driver will set the entries back to 0 and + * set index. + */ +irqreturn_t iwl_isr_ict(int irq, void *data) +{ + struct iwl_priv *priv = data; + u32 inta, inta_mask; + u32 val = 0; + + if (!priv) + return IRQ_NONE; + + /* dram interrupt table not set yet, + * use legacy interrupt. + */ + if (!priv->use_ict) + return iwl_isr(irq, data); + + spin_lock(&priv->lock); + + /* Disable (but don't clear!) interrupts here to avoid + * back-to-back ISRs and sporadic interrupts from our NIC. + * If we have something to service, the tasklet will re-enable ints. + * If we *don't* have something, we'll re-enable before leaving here. + */ + inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */ + iwl_write32(priv, CSR_INT_MASK, 0x00000000); + + + /* Ignore interrupt if there's nothing in NIC to service. + * This may be due to IRQ shared with another device, + * or due to sporadic interrupts thrown from our NIC. */ + if (!priv->ict_tbl[priv->ict_index]) { + IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n"); + goto none; + } + + /* read all entries that not 0 start with ict_index */ + while (priv->ict_tbl[priv->ict_index]) { + + val |= le32_to_cpu(priv->ict_tbl[priv->ict_index]); + IWL_DEBUG_ISR(priv, "ICT index %d value 0x%08X\n", + priv->ict_index, + le32_to_cpu(priv->ict_tbl[priv->ict_index])); + priv->ict_tbl[priv->ict_index] = 0; + priv->ict_index = iwl_queue_inc_wrap(priv->ict_index, + ICT_COUNT); + + } + + /* We should not get this value, just ignore it. */ + if (val == 0xffffffff) + val = 0; + + /* + * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit + * (bit 15 before shifting it to 31) to clear when using interrupt + * coalescing. fortunately, bits 18 and 19 stay set when this happens + * so we use them to decide on the real state of the Rx bit. + * In order words, bit 15 is set if bit 18 or bit 19 are set. + */ + if (val & 0xC0000) + val |= 0x8000; + + inta = (0xff & val) | ((0xff00 & val) << 16); + IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n", + inta, inta_mask, val); + + inta &= priv->inta_mask; + priv->inta |= inta; + + /* iwl_irq_tasklet() will service interrupts and re-enable them */ + if (likely(inta)) + tasklet_schedule(&priv->irq_tasklet); + else if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta) { + /* Allow interrupt if was disabled by this handler and + * no tasklet was schedules, We should not enable interrupt, + * tasklet will enable it. + */ + iwl_enable_interrupts(priv); + } + + spin_unlock(&priv->lock); + return IRQ_HANDLED; + + none: + /* re-enable interrupts here since we don't have anything to service. + * only Re-enable if disabled by irq. + */ + if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta) + iwl_enable_interrupts(priv); + + spin_unlock(&priv->lock); + return IRQ_NONE; +} +EXPORT_SYMBOL(iwl_isr_ict); + + +static irqreturn_t iwl_isr(int irq, void *data) +{ + struct iwl_priv *priv = data; + u32 inta, inta_mask; +#ifdef CONFIG_IWLWIFI_DEBUG + u32 inta_fh; +#endif + if (!priv) + return IRQ_NONE; + + spin_lock(&priv->lock); + + /* Disable (but don't clear!) interrupts here to avoid + * back-to-back ISRs and sporadic interrupts from our NIC. + * If we have something to service, the tasklet will re-enable ints. + * If we *don't* have something, we'll re-enable before leaving here. */ + inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */ + iwl_write32(priv, CSR_INT_MASK, 0x00000000); + + /* Discover which interrupts are active/pending */ + inta = iwl_read32(priv, CSR_INT); + + /* Ignore interrupt if there's nothing in NIC to service. + * This may be due to IRQ shared with another device, + * or due to sporadic interrupts thrown from our NIC. */ + if (!inta) { + IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n"); + goto none; + } + + if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { + /* Hardware disappeared. It might have already raised + * an interrupt */ + IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta); + goto unplugged; + } + +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { + inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); + IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, " + "fh 0x%08x\n", inta, inta_mask, inta_fh); + } +#endif + + priv->inta |= inta; + /* iwl_irq_tasklet() will service interrupts and re-enable them */ + if (likely(inta)) + tasklet_schedule(&priv->irq_tasklet); + else if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta) + iwl_enable_interrupts(priv); + + unplugged: + spin_unlock(&priv->lock); + return IRQ_HANDLED; + + none: + /* re-enable interrupts here since we don't have anything to service. */ + /* only Re-enable if diabled by irq and no schedules tasklet. */ + if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta) + iwl_enable_interrupts(priv); + + spin_unlock(&priv->lock); + return IRQ_NONE; +} + +irqreturn_t iwl_isr_legacy(int irq, void *data) +{ + struct iwl_priv *priv = data; + u32 inta, inta_mask; + u32 inta_fh; + if (!priv) + return IRQ_NONE; + + spin_lock(&priv->lock); + + /* Disable (but don't clear!) interrupts here to avoid + * back-to-back ISRs and sporadic interrupts from our NIC. + * If we have something to service, the tasklet will re-enable ints. + * If we *don't* have something, we'll re-enable before leaving here. */ + inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */ + iwl_write32(priv, CSR_INT_MASK, 0x00000000); + + /* Discover which interrupts are active/pending */ + inta = iwl_read32(priv, CSR_INT); + inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); + + /* Ignore interrupt if there's nothing in NIC to service. + * This may be due to IRQ shared with another device, + * or due to sporadic interrupts thrown from our NIC. */ + if (!inta && !inta_fh) { + IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0, inta_fh == 0\n"); + goto none; + } + + if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { + /* Hardware disappeared. It might have already raised + * an interrupt */ + IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta); + goto unplugged; + } + + IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", + inta, inta_mask, inta_fh); + + inta &= ~CSR_INT_BIT_SCD; + + /* iwl_irq_tasklet() will service interrupts and re-enable them */ + if (likely(inta || inta_fh)) + tasklet_schedule(&priv->irq_tasklet); + + unplugged: + spin_unlock(&priv->lock); + return IRQ_HANDLED; + + none: + /* re-enable interrupts here since we don't have anything to service. */ + /* only Re-enable if diabled by irq */ + if (test_bit(STATUS_INT_ENABLED, &priv->status)) + iwl_enable_interrupts(priv); + spin_unlock(&priv->lock); + return IRQ_NONE; +} +EXPORT_SYMBOL(iwl_isr_legacy); + +int iwl_send_bt_config(struct iwl_priv *priv) +{ + struct iwl_bt_cmd bt_cmd = { + .lead_time = BT_LEAD_TIME_DEF, + .max_kill = BT_MAX_KILL_DEF, + .kill_ack_mask = 0, + .kill_cts_mask = 0, + }; + + if (!bt_coex_active) + bt_cmd.flags = BT_COEX_DISABLE; + else + bt_cmd.flags = BT_COEX_ENABLE; + + IWL_DEBUG_INFO(priv, "BT coex %s\n", + (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active"); + + return iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG, + sizeof(struct iwl_bt_cmd), &bt_cmd); +} +EXPORT_SYMBOL(iwl_send_bt_config); + +int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear) +{ + struct iwl_statistics_cmd statistics_cmd = { + .configuration_flags = + clear ? IWL_STATS_CONF_CLEAR_STATS : 0, + }; + + if (flags & CMD_ASYNC) + return iwl_send_cmd_pdu_async(priv, REPLY_STATISTICS_CMD, + sizeof(struct iwl_statistics_cmd), + &statistics_cmd, NULL); + else + return iwl_send_cmd_pdu(priv, REPLY_STATISTICS_CMD, + sizeof(struct iwl_statistics_cmd), + &statistics_cmd); +} +EXPORT_SYMBOL(iwl_send_statistics_request); + +/** + * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host, + * using sample data 100 bytes apart. If these sample points are good, + * it's a pretty good bet that everything between them is good, too. + */ +static int iwlcore_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len) +{ + u32 val; + int ret = 0; + u32 errcnt = 0; + u32 i; + + IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); + + for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) { + /* read data comes through single port, auto-incr addr */ + /* NOTE: Use the debugless read so we don't flood kernel log + * if IWL_DL_IO is set */ + iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, + i + IWL49_RTC_INST_LOWER_BOUND); + val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); + if (val != le32_to_cpu(*image)) { + ret = -EIO; + errcnt++; + if (errcnt >= 3) + break; + } + } + + return ret; +} + +/** + * iwlcore_verify_inst_full - verify runtime uCode image in card vs. host, + * looking at all data. + */ +static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 *image, + u32 len) +{ + u32 val; + u32 save_len = len; + int ret = 0; + u32 errcnt; + + IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); + + iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, + IWL49_RTC_INST_LOWER_BOUND); + + errcnt = 0; + for (; len > 0; len -= sizeof(u32), image++) { + /* read data comes through single port, auto-incr addr */ + /* NOTE: Use the debugless read so we don't flood kernel log + * if IWL_DL_IO is set */ + val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); + if (val != le32_to_cpu(*image)) { + IWL_ERR(priv, "uCode INST section is invalid at " + "offset 0x%x, is 0x%x, s/b 0x%x\n", + save_len - len, val, le32_to_cpu(*image)); + ret = -EIO; + errcnt++; + if (errcnt >= 20) + break; + } + } + + if (!errcnt) + IWL_DEBUG_INFO(priv, + "ucode image in INSTRUCTION memory is good\n"); + + return ret; +} + +/** + * iwl_verify_ucode - determine which instruction image is in SRAM, + * and verify its contents + */ +int iwl_verify_ucode(struct iwl_priv *priv) +{ + __le32 *image; + u32 len; + int ret; + + /* Try bootstrap */ + image = (__le32 *)priv->ucode_boot.v_addr; + len = priv->ucode_boot.len; + ret = iwlcore_verify_inst_sparse(priv, image, len); + if (!ret) { + IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n"); + return 0; + } + + /* Try initialize */ + image = (__le32 *)priv->ucode_init.v_addr; + len = priv->ucode_init.len; + ret = iwlcore_verify_inst_sparse(priv, image, len); + if (!ret) { + IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n"); + return 0; + } + + /* Try runtime/protocol */ + image = (__le32 *)priv->ucode_code.v_addr; + len = priv->ucode_code.len; + ret = iwlcore_verify_inst_sparse(priv, image, len); + if (!ret) { + IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n"); + return 0; + } + + IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n"); + + /* Since nothing seems to match, show first several data entries in + * instruction SRAM, so maybe visual inspection will give a clue. + * Selection of bootstrap image (vs. other images) is arbitrary. */ + image = (__le32 *)priv->ucode_boot.v_addr; + len = priv->ucode_boot.len; + ret = iwl_verify_inst_full(priv, image, len); + + return ret; +} +EXPORT_SYMBOL(iwl_verify_ucode); + + +void iwl_rf_kill_ct_config(struct iwl_priv *priv) +{ + struct iwl_ct_kill_config cmd; + struct iwl_ct_kill_throttling_config adv_cmd; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&priv->lock, flags); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); + spin_unlock_irqrestore(&priv->lock, flags); + priv->thermal_throttle.ct_kill_toggle = false; + + if (priv->cfg->support_ct_kill_exit) { + adv_cmd.critical_temperature_enter = + cpu_to_le32(priv->hw_params.ct_kill_threshold); + adv_cmd.critical_temperature_exit = + cpu_to_le32(priv->hw_params.ct_kill_exit_threshold); + + ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD, + sizeof(adv_cmd), &adv_cmd); + if (ret) + IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n"); + else + IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD " + "succeeded, " + "critical temperature enter is %d," + "exit is %d\n", + priv->hw_params.ct_kill_threshold, + priv->hw_params.ct_kill_exit_threshold); + } else { + cmd.critical_temperature_R = + cpu_to_le32(priv->hw_params.ct_kill_threshold); + + ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD, + sizeof(cmd), &cmd); + if (ret) + IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n"); + else + IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD " + "succeeded, " + "critical temperature is %d\n", + priv->hw_params.ct_kill_threshold); + } +} +EXPORT_SYMBOL(iwl_rf_kill_ct_config); + + +/* + * CARD_STATE_CMD + * + * Use: Sets the device's internal card state to enable, disable, or halt + * + * When in the 'enable' state the card operates as normal. + * When in the 'disable' state, the card enters into a low power mode. + * When in the 'halt' state, the card is shut down and must be fully + * restarted to come back on. + */ +int iwl_send_card_state(struct iwl_priv *priv, u32 flags, u8 meta_flag) +{ + struct iwl_host_cmd cmd = { + .id = REPLY_CARD_STATE_CMD, + .len = sizeof(u32), + .data = &flags, + .flags = meta_flag, + }; + + return iwl_send_cmd(priv, &cmd); +} + +void iwl_rx_pm_sleep_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif); + IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n", + sleep->pm_sleep_mode, sleep->pm_wakeup_src); +#endif +} +EXPORT_SYMBOL(iwl_rx_pm_sleep_notif); + +void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; + IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled " + "notification for %s:\n", len, + get_cmd_string(pkt->hdr.cmd)); + iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len); +} +EXPORT_SYMBOL(iwl_rx_pm_debug_statistics_notif); + +void iwl_rx_reply_error(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + + IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) " + "seq 0x%04X ser 0x%08X\n", + le32_to_cpu(pkt->u.err_resp.error_type), + get_cmd_string(pkt->u.err_resp.cmd_id), + pkt->u.err_resp.cmd_id, + le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num), + le32_to_cpu(pkt->u.err_resp.error_info)); +} +EXPORT_SYMBOL(iwl_rx_reply_error); + +void iwl_clear_isr_stats(struct iwl_priv *priv) +{ + memset(&priv->isr_stats, 0, sizeof(priv->isr_stats)); +} + +int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue, + const struct ieee80211_tx_queue_params *params) +{ + struct iwl_priv *priv = hw->priv; + unsigned long flags; + int q; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (!iwl_is_ready_rf(priv)) { + IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n"); + return -EIO; + } + + if (queue >= AC_NUM) { + IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue); + return 0; + } + + q = AC_NUM - 1 - queue; + + spin_lock_irqsave(&priv->lock, flags); + + priv->qos_data.def_qos_parm.ac[q].cw_min = cpu_to_le16(params->cw_min); + priv->qos_data.def_qos_parm.ac[q].cw_max = cpu_to_le16(params->cw_max); + priv->qos_data.def_qos_parm.ac[q].aifsn = params->aifs; + priv->qos_data.def_qos_parm.ac[q].edca_txop = + cpu_to_le16((params->txop * 32)); + + priv->qos_data.def_qos_parm.ac[q].reserved1 = 0; + priv->qos_data.qos_active = 1; + + if (priv->iw_mode == NL80211_IFTYPE_AP) + iwl_activate_qos(priv, 1); + else if (priv->assoc_id && iwl_is_associated(priv)) + iwl_activate_qos(priv, 0); + + spin_unlock_irqrestore(&priv->lock, flags); + + IWL_DEBUG_MAC80211(priv, "leave\n"); + return 0; +} +EXPORT_SYMBOL(iwl_mac_conf_tx); + +static void iwl_ht_conf(struct iwl_priv *priv, + struct ieee80211_bss_conf *bss_conf) +{ + struct iwl_ht_config *ht_conf = &priv->current_ht_config; + struct ieee80211_sta *sta; + + IWL_DEBUG_MAC80211(priv, "enter: \n"); + + if (!ht_conf->is_ht) + return; + + ht_conf->ht_protection = + bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION; + ht_conf->non_GF_STA_present = + !!(bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); + + ht_conf->single_chain_sufficient = false; + + switch (priv->iw_mode) { + case NL80211_IFTYPE_STATION: + rcu_read_lock(); + sta = ieee80211_find_sta(priv->vif, priv->bssid); + if (sta) { + struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + int maxstreams; + + maxstreams = (ht_cap->mcs.tx_params & + IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK) + >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT; + maxstreams += 1; + + if ((ht_cap->mcs.rx_mask[1] == 0) && + (ht_cap->mcs.rx_mask[2] == 0)) + ht_conf->single_chain_sufficient = true; + if (maxstreams <= 1) + ht_conf->single_chain_sufficient = true; + } else { + /* + * If at all, this can only happen through a race + * when the AP disconnects us while we're still + * setting up the connection, in that case mac80211 + * will soon tell us about that. + */ + ht_conf->single_chain_sufficient = true; + } + rcu_read_unlock(); + break; + case NL80211_IFTYPE_ADHOC: + ht_conf->single_chain_sufficient = true; + break; + default: + break; + } + + IWL_DEBUG_MAC80211(priv, "leave\n"); +} + +static inline void iwl_set_no_assoc(struct iwl_priv *priv) +{ + priv->assoc_id = 0; + iwl_led_disassociate(priv); + /* + * inform the ucode that there is no longer an + * association and that no more packets should be + * sent + */ + priv->staging_rxon.filter_flags &= + ~RXON_FILTER_ASSOC_MSK; + priv->staging_rxon.assoc_id = 0; + iwlcore_commit_rxon(priv); +} + +#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6) +void iwl_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changes) +{ + struct iwl_priv *priv = hw->priv; + int ret; + + IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes); + + if (!iwl_is_alive(priv)) + return; + + mutex_lock(&priv->mutex); + + if (changes & BSS_CHANGED_BEACON && + priv->iw_mode == NL80211_IFTYPE_AP) { + dev_kfree_skb(priv->ibss_beacon); + priv->ibss_beacon = ieee80211_beacon_get(hw, vif); + } + + if (changes & BSS_CHANGED_BEACON_INT) { + priv->beacon_int = bss_conf->beacon_int; + /* TODO: in AP mode, do something to make this take effect */ + } + + if (changes & BSS_CHANGED_BSSID) { + IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid); + + /* + * If there is currently a HW scan going on in the + * background then we need to cancel it else the RXON + * below/in post_associate will fail. + */ + if (iwl_scan_cancel_timeout(priv, 100)) { + IWL_WARN(priv, "Aborted scan still in progress after 100ms\n"); + IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n"); + mutex_unlock(&priv->mutex); + return; + } + + /* mac80211 only sets assoc when in STATION mode */ + if (priv->iw_mode == NL80211_IFTYPE_ADHOC || + bss_conf->assoc) { + memcpy(priv->staging_rxon.bssid_addr, + bss_conf->bssid, ETH_ALEN); + + /* currently needed in a few places */ + memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN); + } else { + priv->staging_rxon.filter_flags &= + ~RXON_FILTER_ASSOC_MSK; + } + + } + + /* + * This needs to be after setting the BSSID in case + * mac80211 decides to do both changes at once because + * it will invoke post_associate. + */ + if (priv->iw_mode == NL80211_IFTYPE_ADHOC && + changes & BSS_CHANGED_BEACON) { + struct sk_buff *beacon = ieee80211_beacon_get(hw, vif); + + if (beacon) + iwl_mac_beacon_update(hw, beacon); + } + + if (changes & BSS_CHANGED_ERP_PREAMBLE) { + IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n", + bss_conf->use_short_preamble); + if (bss_conf->use_short_preamble) + priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; + else + priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; + } + + if (changes & BSS_CHANGED_ERP_CTS_PROT) { + IWL_DEBUG_MAC80211(priv, "ERP_CTS %d\n", bss_conf->use_cts_prot); + if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ)) + priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK; + else + priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK; + } + + if (changes & BSS_CHANGED_BASIC_RATES) { + /* XXX use this information + * + * To do that, remove code from iwl_set_rate() and put something + * like this here: + * + if (A-band) + priv->staging_rxon.ofdm_basic_rates = + bss_conf->basic_rates; + else + priv->staging_rxon.ofdm_basic_rates = + bss_conf->basic_rates >> 4; + priv->staging_rxon.cck_basic_rates = + bss_conf->basic_rates & 0xF; + */ + } + + if (changes & BSS_CHANGED_HT) { + iwl_ht_conf(priv, bss_conf); + + if (priv->cfg->ops->hcmd->set_rxon_chain) + priv->cfg->ops->hcmd->set_rxon_chain(priv); + } + + if (changes & BSS_CHANGED_ASSOC) { + IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc); + if (bss_conf->assoc) { + priv->assoc_id = bss_conf->aid; + priv->beacon_int = bss_conf->beacon_int; + priv->timestamp = bss_conf->timestamp; + priv->assoc_capability = bss_conf->assoc_capability; + + iwl_led_associate(priv); + + /* + * We have just associated, don't start scan too early + * leave time for EAPOL exchange to complete. + * + * XXX: do this in mac80211 + */ + priv->next_scan_jiffies = jiffies + + IWL_DELAY_NEXT_SCAN_AFTER_ASSOC; + if (!iwl_is_rfkill(priv)) + priv->cfg->ops->lib->post_associate(priv); + } else + iwl_set_no_assoc(priv); + } + + if (changes && iwl_is_associated(priv) && priv->assoc_id) { + IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n", + changes); + ret = iwl_send_rxon_assoc(priv); + if (!ret) { + /* Sync active_rxon with latest change. */ + memcpy((void *)&priv->active_rxon, + &priv->staging_rxon, + sizeof(struct iwl_rxon_cmd)); + } + } + + if (changes & BSS_CHANGED_BEACON_ENABLED) { + if (vif->bss_conf.enable_beacon) { + memcpy(priv->staging_rxon.bssid_addr, + bss_conf->bssid, ETH_ALEN); + memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN); + iwlcore_config_ap(priv); + } else + iwl_set_no_assoc(priv); + } + + mutex_unlock(&priv->mutex); + + IWL_DEBUG_MAC80211(priv, "leave\n"); +} +EXPORT_SYMBOL(iwl_bss_info_changed); + +int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct iwl_priv *priv = hw->priv; + unsigned long flags; + __le64 timestamp; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (!iwl_is_ready_rf(priv)) { + IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n"); + return -EIO; + } + + if (priv->iw_mode != NL80211_IFTYPE_ADHOC) { + IWL_DEBUG_MAC80211(priv, "leave - not IBSS\n"); + return -EIO; + } + + spin_lock_irqsave(&priv->lock, flags); + + if (priv->ibss_beacon) + dev_kfree_skb(priv->ibss_beacon); + + priv->ibss_beacon = skb; + + priv->assoc_id = 0; + timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp; + priv->timestamp = le64_to_cpu(timestamp); + + IWL_DEBUG_MAC80211(priv, "leave\n"); + spin_unlock_irqrestore(&priv->lock, flags); + + iwl_reset_qos(priv); + + priv->cfg->ops->lib->post_associate(priv); + + + return 0; +} +EXPORT_SYMBOL(iwl_mac_beacon_update); + +int iwl_set_mode(struct iwl_priv *priv, int mode) +{ + if (mode == NL80211_IFTYPE_ADHOC) { + const struct iwl_channel_info *ch_info; + + ch_info = iwl_get_channel_info(priv, + priv->band, + le16_to_cpu(priv->staging_rxon.channel)); + + if (!ch_info || !is_channel_ibss(ch_info)) { + IWL_ERR(priv, "channel %d not IBSS channel\n", + le16_to_cpu(priv->staging_rxon.channel)); + return -EINVAL; + } + } + + iwl_connection_init_rx_config(priv, mode); + + if (priv->cfg->ops->hcmd->set_rxon_chain) + priv->cfg->ops->hcmd->set_rxon_chain(priv); + + memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN); + + iwl_clear_stations_table(priv); + + /* dont commit rxon if rf-kill is on*/ + if (!iwl_is_ready_rf(priv)) + return -EAGAIN; + + iwlcore_commit_rxon(priv); + + return 0; +} +EXPORT_SYMBOL(iwl_set_mode); + +int iwl_mac_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_priv *priv = hw->priv; + int err = 0; + + IWL_DEBUG_MAC80211(priv, "enter: type %d\n", vif->type); + + mutex_lock(&priv->mutex); + + if (priv->vif) { + IWL_DEBUG_MAC80211(priv, "leave - vif != NULL\n"); + err = -EOPNOTSUPP; + goto out; + } + + priv->vif = vif; + priv->iw_mode = vif->type; + + if (vif->addr) { + IWL_DEBUG_MAC80211(priv, "Set %pM\n", vif->addr); + memcpy(priv->mac_addr, vif->addr, ETH_ALEN); + } + + if (iwl_set_mode(priv, vif->type) == -EAGAIN) + /* we are not ready, will run again when ready */ + set_bit(STATUS_MODE_PENDING, &priv->status); + + out: + mutex_unlock(&priv->mutex); + + IWL_DEBUG_MAC80211(priv, "leave\n"); + return err; +} +EXPORT_SYMBOL(iwl_mac_add_interface); + +void iwl_mac_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + mutex_lock(&priv->mutex); + + if (iwl_is_ready_rf(priv)) { + iwl_scan_cancel_timeout(priv, 100); + priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + iwlcore_commit_rxon(priv); + } + if (priv->vif == vif) { + priv->vif = NULL; + memset(priv->bssid, 0, ETH_ALEN); + } + mutex_unlock(&priv->mutex); + + IWL_DEBUG_MAC80211(priv, "leave\n"); + +} +EXPORT_SYMBOL(iwl_mac_remove_interface); + +/** + * iwl_mac_config - mac80211 config callback + * + * We ignore conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME since it seems to + * be set inappropriately and the driver currently sets the hardware up to + * use it whenever needed. + */ +int iwl_mac_config(struct ieee80211_hw *hw, u32 changed) +{ + struct iwl_priv *priv = hw->priv; + const struct iwl_channel_info *ch_info; + struct ieee80211_conf *conf = &hw->conf; + struct iwl_ht_config *ht_conf = &priv->current_ht_config; + unsigned long flags = 0; + int ret = 0; + u16 ch; + int scan_active = 0; + + mutex_lock(&priv->mutex); + + IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n", + conf->channel->hw_value, changed); + + if (unlikely(!priv->cfg->mod_params->disable_hw_scan && + test_bit(STATUS_SCANNING, &priv->status))) { + scan_active = 1; + IWL_DEBUG_MAC80211(priv, "leave - scanning\n"); + } + + if (changed & (IEEE80211_CONF_CHANGE_SMPS | + IEEE80211_CONF_CHANGE_CHANNEL)) { + /* mac80211 uses static for non-HT which is what we want */ + priv->current_ht_config.smps = conf->smps_mode; + + /* + * Recalculate chain counts. + * + * If monitor mode is enabled then mac80211 will + * set up the SM PS mode to OFF if an HT channel is + * configured. + */ + if (priv->cfg->ops->hcmd->set_rxon_chain) + priv->cfg->ops->hcmd->set_rxon_chain(priv); + } + + /* during scanning mac80211 will delay channel setting until + * scan finish with changed = 0 + */ + if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) { + if (scan_active) + goto set_ch_out; + + ch = ieee80211_frequency_to_channel(conf->channel->center_freq); + ch_info = iwl_get_channel_info(priv, conf->channel->band, ch); + if (!is_channel_valid(ch_info)) { + IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n"); + ret = -EINVAL; + goto set_ch_out; + } + + if (priv->iw_mode == NL80211_IFTYPE_ADHOC && + !is_channel_ibss(ch_info)) { + IWL_ERR(priv, "channel %d in band %d not " + "IBSS channel\n", + conf->channel->hw_value, conf->channel->band); + ret = -EINVAL; + goto set_ch_out; + } + + spin_lock_irqsave(&priv->lock, flags); + + /* Configure HT40 channels */ + ht_conf->is_ht = conf_is_ht(conf); + if (ht_conf->is_ht) { + if (conf_is_ht40_minus(conf)) { + ht_conf->extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_BELOW; + ht_conf->is_40mhz = true; + } else if (conf_is_ht40_plus(conf)) { + ht_conf->extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_ABOVE; + ht_conf->is_40mhz = true; + } else { + ht_conf->extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_NONE; + ht_conf->is_40mhz = false; + } + } else + ht_conf->is_40mhz = false; + /* Default to no protection. Protection mode will later be set + * from BSS config in iwl_ht_conf */ + ht_conf->ht_protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE; + + /* if we are switching from ht to 2.4 clear flags + * from any ht related info since 2.4 does not + * support ht */ + if ((le16_to_cpu(priv->staging_rxon.channel) != ch)) + priv->staging_rxon.flags = 0; + + iwl_set_rxon_channel(priv, conf->channel); + iwl_set_rxon_ht(priv, ht_conf); + + iwl_set_flags_for_band(priv, conf->channel->band); + spin_unlock_irqrestore(&priv->lock, flags); + if (iwl_is_associated(priv) && + (le16_to_cpu(priv->active_rxon.channel) != ch) && + priv->cfg->ops->lib->set_channel_switch) { + iwl_set_rate(priv); + /* + * at this point, staging_rxon has the + * configuration for channel switch + */ + ret = priv->cfg->ops->lib->set_channel_switch(priv, + ch); + if (!ret) { + iwl_print_rx_config_cmd(priv); + goto out; + } + priv->switch_rxon.switch_in_progress = false; + } + set_ch_out: + /* The list of supported rates and rate mask can be different + * for each band; since the band may have changed, reset + * the rate mask to what mac80211 lists */ + iwl_set_rate(priv); + } + + if (changed & (IEEE80211_CONF_CHANGE_PS | + IEEE80211_CONF_CHANGE_IDLE)) { + ret = iwl_power_update_mode(priv, false); + if (ret) + IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n"); + } + + if (changed & IEEE80211_CONF_CHANGE_POWER) { + IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n", + priv->tx_power_user_lmt, conf->power_level); + + iwl_set_tx_power(priv, conf->power_level, false); + } + + if (!iwl_is_ready(priv)) { + IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); + goto out; + } + + if (scan_active) + goto out; + + if (memcmp(&priv->active_rxon, + &priv->staging_rxon, sizeof(priv->staging_rxon))) + iwlcore_commit_rxon(priv); + else + IWL_DEBUG_INFO(priv, "Not re-sending same RXON configuration.\n"); + + +out: + IWL_DEBUG_MAC80211(priv, "leave\n"); + mutex_unlock(&priv->mutex); + return ret; +} +EXPORT_SYMBOL(iwl_mac_config); + +void iwl_mac_reset_tsf(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + unsigned long flags; + + mutex_lock(&priv->mutex); + IWL_DEBUG_MAC80211(priv, "enter\n"); + + spin_lock_irqsave(&priv->lock, flags); + memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config)); + spin_unlock_irqrestore(&priv->lock, flags); + + iwl_reset_qos(priv); + + spin_lock_irqsave(&priv->lock, flags); + priv->assoc_id = 0; + priv->assoc_capability = 0; + priv->assoc_station_added = 0; + + /* new association get rid of ibss beacon skb */ + if (priv->ibss_beacon) + dev_kfree_skb(priv->ibss_beacon); + + priv->ibss_beacon = NULL; + + priv->beacon_int = priv->vif->bss_conf.beacon_int; + priv->timestamp = 0; + if ((priv->iw_mode == NL80211_IFTYPE_STATION)) + priv->beacon_int = 0; + + spin_unlock_irqrestore(&priv->lock, flags); + + if (!iwl_is_ready_rf(priv)) { + IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); + mutex_unlock(&priv->mutex); + return; + } + + /* we are restarting association process + * clear RXON_FILTER_ASSOC_MSK bit + */ + if (priv->iw_mode != NL80211_IFTYPE_AP) { + iwl_scan_cancel_timeout(priv, 100); + priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + iwlcore_commit_rxon(priv); + } + + if (priv->iw_mode != NL80211_IFTYPE_ADHOC) { + IWL_DEBUG_MAC80211(priv, "leave - not in IBSS\n"); + mutex_unlock(&priv->mutex); + return; + } + + iwl_set_rate(priv); + + mutex_unlock(&priv->mutex); + + IWL_DEBUG_MAC80211(priv, "leave\n"); +} +EXPORT_SYMBOL(iwl_mac_reset_tsf); + +int iwl_alloc_txq_mem(struct iwl_priv *priv) +{ + if (!priv->txq) + priv->txq = kzalloc( + sizeof(struct iwl_tx_queue) * priv->cfg->num_of_queues, + GFP_KERNEL); + if (!priv->txq) { + IWL_ERR(priv, "Not enough memory for txq \n"); + return -ENOMEM; + } + return 0; +} +EXPORT_SYMBOL(iwl_alloc_txq_mem); + +void iwl_free_txq_mem(struct iwl_priv *priv) +{ + kfree(priv->txq); + priv->txq = NULL; +} +EXPORT_SYMBOL(iwl_free_txq_mem); + +int iwl_send_wimax_coex(struct iwl_priv *priv) +{ + struct iwl_wimax_coex_cmd uninitialized_var(coex_cmd); + + if (priv->cfg->support_wimax_coexist) { + /* UnMask wake up src at associated sleep */ + coex_cmd.flags |= COEX_FLAGS_ASSOC_WA_UNMASK_MSK; + + /* UnMask wake up src at unassociated sleep */ + coex_cmd.flags |= COEX_FLAGS_UNASSOC_WA_UNMASK_MSK; + memcpy(coex_cmd.sta_prio, cu_priorities, + sizeof(struct iwl_wimax_coex_event_entry) * + COEX_NUM_OF_EVENTS); + + /* enabling the coexistence feature */ + coex_cmd.flags |= COEX_FLAGS_COEX_ENABLE_MSK; + + /* enabling the priorities tables */ + coex_cmd.flags |= COEX_FLAGS_STA_TABLE_VALID_MSK; + } else { + /* coexistence is disabled */ + memset(&coex_cmd, 0, sizeof(coex_cmd)); + } + return iwl_send_cmd_pdu(priv, COEX_PRIORITY_TABLE_CMD, + sizeof(coex_cmd), &coex_cmd); +} +EXPORT_SYMBOL(iwl_send_wimax_coex); + +#ifdef CONFIG_IWLWIFI_DEBUGFS + +#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES) + +void iwl_reset_traffic_log(struct iwl_priv *priv) +{ + priv->tx_traffic_idx = 0; + priv->rx_traffic_idx = 0; + if (priv->tx_traffic) + memset(priv->tx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE); + if (priv->rx_traffic) + memset(priv->rx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE); +} + +int iwl_alloc_traffic_mem(struct iwl_priv *priv) +{ + u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE; + + if (iwl_debug_level & IWL_DL_TX) { + if (!priv->tx_traffic) { + priv->tx_traffic = + kzalloc(traffic_size, GFP_KERNEL); + if (!priv->tx_traffic) + return -ENOMEM; + } + } + if (iwl_debug_level & IWL_DL_RX) { + if (!priv->rx_traffic) { + priv->rx_traffic = + kzalloc(traffic_size, GFP_KERNEL); + if (!priv->rx_traffic) + return -ENOMEM; + } + } + iwl_reset_traffic_log(priv); + return 0; +} +EXPORT_SYMBOL(iwl_alloc_traffic_mem); + +void iwl_free_traffic_mem(struct iwl_priv *priv) +{ + kfree(priv->tx_traffic); + priv->tx_traffic = NULL; + + kfree(priv->rx_traffic); + priv->rx_traffic = NULL; +} +EXPORT_SYMBOL(iwl_free_traffic_mem); + +void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv, + u16 length, struct ieee80211_hdr *header) +{ + __le16 fc; + u16 len; + + if (likely(!(iwl_debug_level & IWL_DL_TX))) + return; + + if (!priv->tx_traffic) + return; + + fc = header->frame_control; + if (ieee80211_is_data(fc)) { + len = (length > IWL_TRAFFIC_ENTRY_SIZE) + ? IWL_TRAFFIC_ENTRY_SIZE : length; + memcpy((priv->tx_traffic + + (priv->tx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)), + header, len); + priv->tx_traffic_idx = + (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES; + } +} +EXPORT_SYMBOL(iwl_dbg_log_tx_data_frame); + +void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv, + u16 length, struct ieee80211_hdr *header) +{ + __le16 fc; + u16 len; + + if (likely(!(iwl_debug_level & IWL_DL_RX))) + return; + + if (!priv->rx_traffic) + return; + + fc = header->frame_control; + if (ieee80211_is_data(fc)) { + len = (length > IWL_TRAFFIC_ENTRY_SIZE) + ? IWL_TRAFFIC_ENTRY_SIZE : length; + memcpy((priv->rx_traffic + + (priv->rx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)), + header, len); + priv->rx_traffic_idx = + (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES; + } +} +EXPORT_SYMBOL(iwl_dbg_log_rx_data_frame); + +const char *get_mgmt_string(int cmd) +{ + switch (cmd) { + IWL_CMD(MANAGEMENT_ASSOC_REQ); + IWL_CMD(MANAGEMENT_ASSOC_RESP); + IWL_CMD(MANAGEMENT_REASSOC_REQ); + IWL_CMD(MANAGEMENT_REASSOC_RESP); + IWL_CMD(MANAGEMENT_PROBE_REQ); + IWL_CMD(MANAGEMENT_PROBE_RESP); + IWL_CMD(MANAGEMENT_BEACON); + IWL_CMD(MANAGEMENT_ATIM); + IWL_CMD(MANAGEMENT_DISASSOC); + IWL_CMD(MANAGEMENT_AUTH); + IWL_CMD(MANAGEMENT_DEAUTH); + IWL_CMD(MANAGEMENT_ACTION); + default: + return "UNKNOWN"; + + } +} + +const char *get_ctrl_string(int cmd) +{ + switch (cmd) { + IWL_CMD(CONTROL_BACK_REQ); + IWL_CMD(CONTROL_BACK); + IWL_CMD(CONTROL_PSPOLL); + IWL_CMD(CONTROL_RTS); + IWL_CMD(CONTROL_CTS); + IWL_CMD(CONTROL_ACK); + IWL_CMD(CONTROL_CFEND); + IWL_CMD(CONTROL_CFENDACK); + default: + return "UNKNOWN"; + + } +} + +void iwl_clear_traffic_stats(struct iwl_priv *priv) +{ + memset(&priv->tx_stats, 0, sizeof(struct traffic_stats)); + memset(&priv->rx_stats, 0, sizeof(struct traffic_stats)); + priv->led_tpt = 0; +} + +/* + * if CONFIG_IWLWIFI_DEBUGFS defined, iwl_update_stats function will + * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass. + * Use debugFs to display the rx/rx_statistics + * if CONFIG_IWLWIFI_DEBUGFS not being defined, then no MGMT and CTRL + * information will be recorded, but DATA pkt still will be recorded + * for the reason of iwl_led.c need to control the led blinking based on + * number of tx and rx data. + * + */ +void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len) +{ + struct traffic_stats *stats; + + if (is_tx) + stats = &priv->tx_stats; + else + stats = &priv->rx_stats; + + if (ieee80211_is_mgmt(fc)) { + switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { + case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): + stats->mgmt[MANAGEMENT_ASSOC_REQ]++; + break; + case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP): + stats->mgmt[MANAGEMENT_ASSOC_RESP]++; + break; + case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): + stats->mgmt[MANAGEMENT_REASSOC_REQ]++; + break; + case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP): + stats->mgmt[MANAGEMENT_REASSOC_RESP]++; + break; + case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ): + stats->mgmt[MANAGEMENT_PROBE_REQ]++; + break; + case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP): + stats->mgmt[MANAGEMENT_PROBE_RESP]++; + break; + case cpu_to_le16(IEEE80211_STYPE_BEACON): + stats->mgmt[MANAGEMENT_BEACON]++; + break; + case cpu_to_le16(IEEE80211_STYPE_ATIM): + stats->mgmt[MANAGEMENT_ATIM]++; + break; + case cpu_to_le16(IEEE80211_STYPE_DISASSOC): + stats->mgmt[MANAGEMENT_DISASSOC]++; + break; + case cpu_to_le16(IEEE80211_STYPE_AUTH): + stats->mgmt[MANAGEMENT_AUTH]++; + break; + case cpu_to_le16(IEEE80211_STYPE_DEAUTH): + stats->mgmt[MANAGEMENT_DEAUTH]++; + break; + case cpu_to_le16(IEEE80211_STYPE_ACTION): + stats->mgmt[MANAGEMENT_ACTION]++; + break; + } + } else if (ieee80211_is_ctl(fc)) { + switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { + case cpu_to_le16(IEEE80211_STYPE_BACK_REQ): + stats->ctrl[CONTROL_BACK_REQ]++; + break; + case cpu_to_le16(IEEE80211_STYPE_BACK): + stats->ctrl[CONTROL_BACK]++; + break; + case cpu_to_le16(IEEE80211_STYPE_PSPOLL): + stats->ctrl[CONTROL_PSPOLL]++; + break; + case cpu_to_le16(IEEE80211_STYPE_RTS): + stats->ctrl[CONTROL_RTS]++; + break; + case cpu_to_le16(IEEE80211_STYPE_CTS): + stats->ctrl[CONTROL_CTS]++; + break; + case cpu_to_le16(IEEE80211_STYPE_ACK): + stats->ctrl[CONTROL_ACK]++; + break; + case cpu_to_le16(IEEE80211_STYPE_CFEND): + stats->ctrl[CONTROL_CFEND]++; + break; + case cpu_to_le16(IEEE80211_STYPE_CFENDACK): + stats->ctrl[CONTROL_CFENDACK]++; + break; + } + } else { + /* data */ + stats->data_cnt++; + stats->data_bytes += len; + } + iwl_leds_background(priv); +} +EXPORT_SYMBOL(iwl_update_stats); +#endif + +const static char *get_csr_string(int cmd) +{ + switch (cmd) { + IWL_CMD(CSR_HW_IF_CONFIG_REG); + IWL_CMD(CSR_INT_COALESCING); + IWL_CMD(CSR_INT); + IWL_CMD(CSR_INT_MASK); + IWL_CMD(CSR_FH_INT_STATUS); + IWL_CMD(CSR_GPIO_IN); + IWL_CMD(CSR_RESET); + IWL_CMD(CSR_GP_CNTRL); + IWL_CMD(CSR_HW_REV); + IWL_CMD(CSR_EEPROM_REG); + IWL_CMD(CSR_EEPROM_GP); + IWL_CMD(CSR_OTP_GP_REG); + IWL_CMD(CSR_GIO_REG); + IWL_CMD(CSR_GP_UCODE_REG); + IWL_CMD(CSR_GP_DRIVER_REG); + IWL_CMD(CSR_UCODE_DRV_GP1); + IWL_CMD(CSR_UCODE_DRV_GP2); + IWL_CMD(CSR_LED_REG); + IWL_CMD(CSR_DRAM_INT_TBL_REG); + IWL_CMD(CSR_GIO_CHICKEN_BITS); + IWL_CMD(CSR_ANA_PLL_CFG); + IWL_CMD(CSR_HW_REV_WA_REG); + IWL_CMD(CSR_DBG_HPET_MEM_REG); + default: + return "UNKNOWN"; + + } +} + +void iwl_dump_csr(struct iwl_priv *priv) +{ + int i; + u32 csr_tbl[] = { + CSR_HW_IF_CONFIG_REG, + CSR_INT_COALESCING, + CSR_INT, + CSR_INT_MASK, + CSR_FH_INT_STATUS, + CSR_GPIO_IN, + CSR_RESET, + CSR_GP_CNTRL, + CSR_HW_REV, + CSR_EEPROM_REG, + CSR_EEPROM_GP, + CSR_OTP_GP_REG, + CSR_GIO_REG, + CSR_GP_UCODE_REG, + CSR_GP_DRIVER_REG, + CSR_UCODE_DRV_GP1, + CSR_UCODE_DRV_GP2, + CSR_LED_REG, + CSR_DRAM_INT_TBL_REG, + CSR_GIO_CHICKEN_BITS, + CSR_ANA_PLL_CFG, + CSR_HW_REV_WA_REG, + CSR_DBG_HPET_MEM_REG + }; + IWL_ERR(priv, "CSR values:\n"); + IWL_ERR(priv, "(2nd byte of CSR_INT_COALESCING is " + "CSR_INT_PERIODIC_REG)\n"); + for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) { + IWL_ERR(priv, " %25s: 0X%08x\n", + get_csr_string(csr_tbl[i]), + iwl_read32(priv, csr_tbl[i])); + } +} +EXPORT_SYMBOL(iwl_dump_csr); + +const static char *get_fh_string(int cmd) +{ + switch (cmd) { + IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG); + IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG); + IWL_CMD(FH_RSCSR_CHNL0_WPTR); + IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG); + IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG); + IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG); + IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV); + IWL_CMD(FH_TSSR_TX_STATUS_REG); + IWL_CMD(FH_TSSR_TX_ERROR_REG); + default: + return "UNKNOWN"; + + } +} + +int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display) +{ + int i; +#ifdef CONFIG_IWLWIFI_DEBUG + int pos = 0; + size_t bufsz = 0; +#endif + u32 fh_tbl[] = { + FH_RSCSR_CHNL0_STTS_WPTR_REG, + FH_RSCSR_CHNL0_RBDCB_BASE_REG, + FH_RSCSR_CHNL0_WPTR, + FH_MEM_RCSR_CHNL0_CONFIG_REG, + FH_MEM_RSSR_SHARED_CTRL_REG, + FH_MEM_RSSR_RX_STATUS_REG, + FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV, + FH_TSSR_TX_STATUS_REG, + FH_TSSR_TX_ERROR_REG + }; +#ifdef CONFIG_IWLWIFI_DEBUG + if (display) { + bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40; + *buf = kmalloc(bufsz, GFP_KERNEL); + if (!*buf) + return -ENOMEM; + pos += scnprintf(*buf + pos, bufsz - pos, + "FH register values:\n"); + for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { + pos += scnprintf(*buf + pos, bufsz - pos, + " %34s: 0X%08x\n", + get_fh_string(fh_tbl[i]), + iwl_read_direct32(priv, fh_tbl[i])); + } + return pos; + } +#endif + IWL_ERR(priv, "FH register values:\n"); + for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { + IWL_ERR(priv, " %34s: 0X%08x\n", + get_fh_string(fh_tbl[i]), + iwl_read_direct32(priv, fh_tbl[i])); + } + return 0; +} +EXPORT_SYMBOL(iwl_dump_fh); + +static void iwl_force_rf_reset(struct iwl_priv *priv) +{ + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (!iwl_is_associated(priv)) { + IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n"); + return; + } + /* + * There is no easy and better way to force reset the radio, + * the only known method is switching channel which will force to + * reset and tune the radio. + * Use internal short scan (single channel) operation to should + * achieve this objective. + * Driver should reset the radio when number of consecutive missed + * beacon, or any other uCode error condition detected. + */ + IWL_DEBUG_INFO(priv, "perform radio reset.\n"); + iwl_internal_short_hw_scan(priv); + return; +} + + +int iwl_force_reset(struct iwl_priv *priv, int mode) +{ + struct iwl_force_reset *force_reset; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return -EINVAL; + + if (mode >= IWL_MAX_FORCE_RESET) { + IWL_DEBUG_INFO(priv, "invalid reset request.\n"); + return -EINVAL; + } + force_reset = &priv->force_reset[mode]; + force_reset->reset_request_count++; + if (force_reset->last_force_reset_jiffies && + time_after(force_reset->last_force_reset_jiffies + + force_reset->reset_duration, jiffies)) { + IWL_DEBUG_INFO(priv, "force reset rejected\n"); + force_reset->reset_reject_count++; + return -EAGAIN; + } + force_reset->reset_success_count++; + force_reset->last_force_reset_jiffies = jiffies; + IWL_DEBUG_INFO(priv, "perform force reset (%d)\n", mode); + switch (mode) { + case IWL_RF_RESET: + iwl_force_rf_reset(priv); + break; + case IWL_FW_RESET: + IWL_ERR(priv, "On demand firmware reload\n"); + /* Set the FW error flag -- cleared on iwl_down */ + set_bit(STATUS_FW_ERROR, &priv->status); + wake_up_interruptible(&priv->wait_command_queue); + /* + * Keep the restart process from trying to send host + * commands by clearing the INIT status bit + */ + clear_bit(STATUS_READY, &priv->status); + queue_work(priv->workqueue, &priv->restart); + break; + } + return 0; +} + +#ifdef CONFIG_PM + +int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct iwl_priv *priv = pci_get_drvdata(pdev); + + /* + * This function is called when system goes into suspend state + * mac80211 will call iwl_mac_stop() from the mac80211 suspend function + * first but since iwl_mac_stop() has no knowledge of who the caller is, + * it will not call apm_ops.stop() to stop the DMA operation. + * Calling apm_ops.stop here to make sure we stop the DMA. + */ + priv->cfg->ops->lib->apm_ops.stop(priv); + + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3hot); + + return 0; +} +EXPORT_SYMBOL(iwl_pci_suspend); + +int iwl_pci_resume(struct pci_dev *pdev) +{ + struct iwl_priv *priv = pci_get_drvdata(pdev); + int ret; + + pci_set_power_state(pdev, PCI_D0); + ret = pci_enable_device(pdev); + if (ret) + return ret; + pci_restore_state(pdev); + iwl_enable_interrupts(priv); + + return 0; +} +EXPORT_SYMBOL(iwl_pci_resume); + +#endif /* CONFIG_PM */ diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h new file mode 100644 index 0000000..4ef7739 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-core.h @@ -0,0 +1,706 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __iwl_core_h__ +#define __iwl_core_h__ + +/************************ + * forward declarations * + ************************/ +struct iwl_host_cmd; +struct iwl_cmd; + + +#define IWLWIFI_VERSION "in-tree:" +#define DRV_COPYRIGHT "Copyright(c) 2003-2010 Intel Corporation" +#define DRV_AUTHOR "" + +#define IWL_PCI_DEVICE(dev, subdev, cfg) \ + .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \ + .subvendor = PCI_ANY_ID, .subdevice = (subdev), \ + .driver_data = (kernel_ulong_t)&(cfg) + +#define IWL_SKU_G 0x1 +#define IWL_SKU_A 0x2 +#define IWL_SKU_N 0x8 + +#define IWL_CMD(x) case x: return #x + +struct iwl_hcmd_ops { + int (*rxon_assoc)(struct iwl_priv *priv); + int (*commit_rxon)(struct iwl_priv *priv); + void (*set_rxon_chain)(struct iwl_priv *priv); + int (*set_tx_ant)(struct iwl_priv *priv, u8 valid_tx_ant); +}; + +struct iwl_hcmd_utils_ops { + u16 (*get_hcmd_size)(u8 cmd_id, u16 len); + u16 (*build_addsta_hcmd)(const struct iwl_addsta_cmd *cmd, u8 *data); + void (*gain_computation)(struct iwl_priv *priv, + u32 *average_noise, + u16 min_average_noise_antennat_i, + u32 min_average_noise, + u8 default_chain); + void (*chain_noise_reset)(struct iwl_priv *priv); + void (*rts_tx_cmd_flag)(struct ieee80211_tx_info *info, + __le32 *tx_flags); + int (*calc_rssi)(struct iwl_priv *priv, + struct iwl_rx_phy_res *rx_resp); +}; + +struct iwl_apm_ops { + int (*init)(struct iwl_priv *priv); + void (*stop)(struct iwl_priv *priv); + void (*config)(struct iwl_priv *priv); + int (*set_pwr_src)(struct iwl_priv *priv, enum iwl_pwr_src src); +}; + +struct iwl_temp_ops { + void (*temperature)(struct iwl_priv *priv); + void (*set_ct_kill)(struct iwl_priv *priv); + void (*set_calib_version)(struct iwl_priv *priv); +}; + +struct iwl_ucode_ops { + u32 (*get_header_size)(u32); + u32 (*get_build)(const struct iwl_ucode_header *, u32); + u32 (*get_inst_size)(const struct iwl_ucode_header *, u32); + u32 (*get_data_size)(const struct iwl_ucode_header *, u32); + u32 (*get_init_size)(const struct iwl_ucode_header *, u32); + u32 (*get_init_data_size)(const struct iwl_ucode_header *, u32); + u32 (*get_boot_size)(const struct iwl_ucode_header *, u32); + u8 * (*get_data)(const struct iwl_ucode_header *, u32); +}; + +struct iwl_lib_ops { + /* set hw dependent parameters */ + int (*set_hw_params)(struct iwl_priv *priv); + /* Handling TX */ + void (*txq_update_byte_cnt_tbl)(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + u16 byte_cnt); + void (*txq_inval_byte_cnt_tbl)(struct iwl_priv *priv, + struct iwl_tx_queue *txq); + void (*txq_set_sched)(struct iwl_priv *priv, u32 mask); + int (*txq_attach_buf_to_tfd)(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + dma_addr_t addr, + u16 len, u8 reset, u8 pad); + void (*txq_free_tfd)(struct iwl_priv *priv, + struct iwl_tx_queue *txq); + int (*txq_init)(struct iwl_priv *priv, + struct iwl_tx_queue *txq); + /* aggregations */ + int (*txq_agg_enable)(struct iwl_priv *priv, int txq_id, int tx_fifo, + int sta_id, int tid, u16 ssn_idx); + int (*txq_agg_disable)(struct iwl_priv *priv, u16 txq_id, u16 ssn_idx, + u8 tx_fifo); + /* setup Rx handler */ + void (*rx_handler_setup)(struct iwl_priv *priv); + /* setup deferred work */ + void (*setup_deferred_work)(struct iwl_priv *priv); + /* cancel deferred work */ + void (*cancel_deferred_work)(struct iwl_priv *priv); + /* alive notification after init uCode load */ + void (*init_alive_start)(struct iwl_priv *priv); + /* alive notification */ + int (*alive_notify)(struct iwl_priv *priv); + /* check validity of rtc data address */ + int (*is_valid_rtc_data_addr)(u32 addr); + /* 1st ucode load */ + int (*load_ucode)(struct iwl_priv *priv); + int (*dump_nic_event_log)(struct iwl_priv *priv, + bool full_log, char **buf, bool display); + void (*dump_nic_error_log)(struct iwl_priv *priv); + void (*dump_csr)(struct iwl_priv *priv); + int (*dump_fh)(struct iwl_priv *priv, char **buf, bool display); + int (*set_channel_switch)(struct iwl_priv *priv, u16 channel); + /* power management */ + struct iwl_apm_ops apm_ops; + + /* power */ + int (*send_tx_power) (struct iwl_priv *priv); + void (*update_chain_flags)(struct iwl_priv *priv); + void (*post_associate) (struct iwl_priv *priv); + void (*config_ap) (struct iwl_priv *priv); + irqreturn_t (*isr) (int irq, void *data); + + /* eeprom operations (as defined in iwl-eeprom.h) */ + struct iwl_eeprom_ops eeprom_ops; + + /* temperature */ + struct iwl_temp_ops temp_ops; + /* station management */ + void (*add_bcast_station)(struct iwl_priv *priv); +}; + +struct iwl_led_ops { + int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd); + int (*on)(struct iwl_priv *priv); + int (*off)(struct iwl_priv *priv); +}; + +struct iwl_ops { + const struct iwl_ucode_ops *ucode; + const struct iwl_lib_ops *lib; + const struct iwl_hcmd_ops *hcmd; + const struct iwl_hcmd_utils_ops *utils; + const struct iwl_led_ops *led; +}; + +struct iwl_mod_params { + int sw_crypto; /* def: 0 = using hardware encryption */ + int disable_hw_scan; /* def: 0 = use h/w scan */ + int num_of_queues; /* def: HW dependent */ + int disable_11n; /* def: 0 = 11n capabilities enabled */ + int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */ + int antenna; /* def: 0 = both antennas (use diversity) */ + int restart_fw; /* def: 1 = restart firmware */ +}; + +/** + * struct iwl_cfg + * @fw_name_pre: Firmware filename prefix. The api version and extension + * (.ucode) will be added to filename before loading from disk. The + * filename is constructed as fw_name_pre.ucode. + * @ucode_api_max: Highest version of uCode API supported by driver. + * @ucode_api_min: Lowest version of uCode API supported by driver. + * @pa_type: used by 6000 series only to identify the type of Power Amplifier + * @max_ll_items: max number of OTP blocks + * @shadow_ram_support: shadow support for OTP memory + * @led_compensation: compensate on the led on/off time per HW according + * to the deviation to achieve the desired led frequency. + * The detail algorithm is described in iwl-led.c + * @use_rts_for_ht: use rts/cts protection for HT traffic + * @chain_noise_num_beacons: number of beacons used to compute chain noise + * @adv_thermal_throttle: support advance thermal throttle + * @support_ct_kill_exit: support ct kill exit condition + * @support_wimax_coexist: support wimax/wifi co-exist + * @plcp_delta_threshold: plcp error rate threshold used to trigger + * radio tuning when there is a high receiving plcp error rate + * + * We enable the driver to be backward compatible wrt API version. The + * driver specifies which APIs it supports (with @ucode_api_max being the + * highest and @ucode_api_min the lowest). Firmware will only be loaded if + * it has a supported API version. The firmware's API version will be + * stored in @iwl_priv, enabling the driver to make runtime changes based + * on firmware version used. + * + * For example, + * if (IWL_UCODE_API(priv->ucode_ver) >= 2) { + * Driver interacts with Firmware API version >= 2. + * } else { + * Driver interacts with Firmware API version 1. + * } + * + * The ideal usage of this infrastructure is to treat a new ucode API + * release as a new hardware revision. That is, through utilizing the + * iwl_hcmd_utils_ops etc. we accommodate different command structures + * and flows between hardware versions (4965/5000) as well as their API + * versions. + * + */ +struct iwl_cfg { + const char *name; + const char *fw_name_pre; + const unsigned int ucode_api_max; + const unsigned int ucode_api_min; + unsigned int sku; + int eeprom_size; + u16 eeprom_ver; + u16 eeprom_calib_ver; + int num_of_queues; /* def: HW dependent */ + int num_of_ampdu_queues;/* def: HW dependent */ + const struct iwl_ops *ops; + const struct iwl_mod_params *mod_params; + u8 valid_tx_ant; + u8 valid_rx_ant; + + /* for iwl_apm_init() */ + u32 pll_cfg_val; + bool set_l0s; + bool use_bsm; + + bool use_isr_legacy; + enum iwl_pa_type pa_type; + const u16 max_ll_items; + const bool shadow_ram_support; + const bool ht_greenfield_support; + u16 led_compensation; + const bool broken_powersave; + bool use_rts_for_ht; + int chain_noise_num_beacons; + const bool supports_idle; + bool adv_thermal_throttle; + bool support_ct_kill_exit; + const bool support_wimax_coexist; + u8 plcp_delta_threshold; + s32 chain_noise_scale; +}; + +/*************************** + * L i b * + ***************************/ + +struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg, + struct ieee80211_ops *hw_ops); +void iwl_hw_detect(struct iwl_priv *priv); +void iwl_reset_qos(struct iwl_priv *priv); +void iwl_activate_qos(struct iwl_priv *priv, u8 force); +int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue, + const struct ieee80211_tx_queue_params *params); +void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt); +int iwl_check_rxon_cmd(struct iwl_priv *priv); +int iwl_full_rxon_required(struct iwl_priv *priv); +void iwl_set_rxon_chain(struct iwl_priv *priv); +int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch); +void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf); +u8 iwl_is_ht40_tx_allowed(struct iwl_priv *priv, + struct ieee80211_sta_ht_cap *sta_ht_inf); +void iwl_set_flags_for_band(struct iwl_priv *priv, enum ieee80211_band band); +void iwl_connection_init_rx_config(struct iwl_priv *priv, int mode); +int iwl_set_decrypted_flag(struct iwl_priv *priv, + struct ieee80211_hdr *hdr, + u32 decrypt_res, + struct ieee80211_rx_status *stats); +void iwl_irq_handle_error(struct iwl_priv *priv); +void iwl_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, u64 multicast); +int iwl_hw_nic_init(struct iwl_priv *priv); +int iwl_set_hw_params(struct iwl_priv *priv); +bool iwl_is_monitor_mode(struct iwl_priv *priv); +void iwl_post_associate(struct iwl_priv *priv); +void iwl_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changes); +int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb); +int iwl_commit_rxon(struct iwl_priv *priv); +int iwl_set_mode(struct iwl_priv *priv, int mode); +int iwl_mac_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); +void iwl_mac_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); +int iwl_mac_config(struct ieee80211_hw *hw, u32 changed); +void iwl_config_ap(struct iwl_priv *priv); +void iwl_mac_reset_tsf(struct ieee80211_hw *hw); +int iwl_alloc_txq_mem(struct iwl_priv *priv); +void iwl_free_txq_mem(struct iwl_priv *priv); +void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info *info, + __le32 *tx_flags); +int iwl_send_wimax_coex(struct iwl_priv *priv); +#ifdef CONFIG_IWLWIFI_DEBUGFS +int iwl_alloc_traffic_mem(struct iwl_priv *priv); +void iwl_free_traffic_mem(struct iwl_priv *priv); +void iwl_reset_traffic_log(struct iwl_priv *priv); +void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv, + u16 length, struct ieee80211_hdr *header); +void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv, + u16 length, struct ieee80211_hdr *header); +const char *get_mgmt_string(int cmd); +const char *get_ctrl_string(int cmd); +void iwl_clear_traffic_stats(struct iwl_priv *priv); +void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, + u16 len); +#else +static inline int iwl_alloc_traffic_mem(struct iwl_priv *priv) +{ + return 0; +} +static inline void iwl_free_traffic_mem(struct iwl_priv *priv) +{ +} +static inline void iwl_reset_traffic_log(struct iwl_priv *priv) +{ +} +static inline void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv, + u16 length, struct ieee80211_hdr *header) +{ +} +static inline void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv, + u16 length, struct ieee80211_hdr *header) +{ +} +static inline void iwl_update_stats(struct iwl_priv *priv, bool is_tx, + __le16 fc, u16 len) +{ + struct traffic_stats *stats; + + if (is_tx) + stats = &priv->tx_stats; + else + stats = &priv->rx_stats; + + if (ieee80211_is_data(fc)) { + /* data */ + stats->data_bytes += len; + } + iwl_leds_background(priv); +} +#endif +/***************************************************** + * RX handlers. + * **************************************************/ +void iwl_rx_pm_sleep_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +void iwl_rx_reply_error(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); + +/***************************************************** +* RX +******************************************************/ +void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq); +void iwl_cmd_queue_free(struct iwl_priv *priv); +int iwl_rx_queue_alloc(struct iwl_priv *priv); +void iwl_rx_handle(struct iwl_priv *priv); +void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, + struct iwl_rx_queue *q); +void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq); +void iwl_rx_replenish(struct iwl_priv *priv); +void iwl_rx_replenish_now(struct iwl_priv *priv); +int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq); +void iwl_rx_queue_restock(struct iwl_priv *priv); +int iwl_rx_queue_space(const struct iwl_rx_queue *q); +void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority); +void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); +int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index); +/* Handlers */ +void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +void iwl_rx_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +void iwl_reply_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); + +/* TX helpers */ + +/***************************************************** +* TX +******************************************************/ +int iwl_txq_ctx_reset(struct iwl_priv *priv); +void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); +int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + dma_addr_t addr, u16 len, u8 reset, u8 pad); +int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb); +void iwl_hw_txq_ctx_free(struct iwl_priv *priv); +int iwl_hw_tx_queue_init(struct iwl_priv *priv, + struct iwl_tx_queue *txq); +void iwl_free_tfds_in_queue(struct iwl_priv *priv, + int sta_id, int tid, int freed); +void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq); +int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, + int slots_num, u32 txq_id); +void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id); +int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn); +int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid); +int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id); +/***************************************************** + * TX power + ****************************************************/ +int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force); + +/******************************************************************************* + * Rate + ******************************************************************************/ + +void iwl_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags, + struct ieee80211_tx_info *info); +int iwl_hwrate_to_plcp_idx(u32 rate_n_flags); +int iwl_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band); + +u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv); + +u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx); + +static inline u32 iwl_ant_idx_to_flags(u8 ant_idx) +{ + return BIT(ant_idx) << RATE_MCS_ANT_POS; +} + +static inline u8 iwl_hw_get_rate(__le32 rate_n_flags) +{ + return le32_to_cpu(rate_n_flags) & 0xFF; +} +static inline u32 iwl_hw_get_rate_n_flags(__le32 rate_n_flags) +{ + return le32_to_cpu(rate_n_flags) & 0x1FFFF; +} +static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags) +{ + return cpu_to_le32(flags|(u32)rate); +} + +/******************************************************************************* + * Scanning + ******************************************************************************/ +void iwl_init_scan_params(struct iwl_priv *priv); +int iwl_scan_cancel(struct iwl_priv *priv); +int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms); +int iwl_mac_hw_scan(struct ieee80211_hw *hw, struct cfg80211_scan_request *req); +int iwl_internal_short_hw_scan(struct iwl_priv *priv); +int iwl_force_reset(struct iwl_priv *priv, int mode); +u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame, + const u8 *ie, int ie_len, int left); +void iwl_setup_rx_scan_handlers(struct iwl_priv *priv); +u16 iwl_get_active_dwell_time(struct iwl_priv *priv, + enum ieee80211_band band, + u8 n_probes); +u16 iwl_get_passive_dwell_time(struct iwl_priv *priv, + enum ieee80211_band band); +void iwl_bg_scan_check(struct work_struct *data); +void iwl_bg_abort_scan(struct work_struct *work); +void iwl_bg_scan_completed(struct work_struct *work); +void iwl_setup_scan_deferred_work(struct iwl_priv *priv); + +/* For faster active scanning, scan will move to the next channel if fewer than + * PLCP_QUIET_THRESH packets are heard on this channel within + * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell + * time if it's a quiet channel (nothing responded to our probe, and there's + * no other traffic). + * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */ +#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */ +#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */ + + +/******************************************************************************* + * Calibrations - implemented in iwl-calib.c + ******************************************************************************/ +int iwl_send_calib_results(struct iwl_priv *priv); +int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len); +void iwl_calib_free_results(struct iwl_priv *priv); + +/***************************************************** + * S e n d i n g H o s t C o m m a n d s * + *****************************************************/ + +const char *get_cmd_string(u8 cmd); +int __must_check iwl_send_cmd_sync(struct iwl_priv *priv, + struct iwl_host_cmd *cmd); +int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd); +int __must_check iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, + u16 len, const void *data); +int iwl_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len, + const void *data, + void (*callback)(struct iwl_priv *priv, + struct iwl_device_cmd *cmd, + struct iwl_rx_packet *pkt)); + +int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd); + +int iwl_send_card_state(struct iwl_priv *priv, u32 flags, + u8 meta_flag); + +/***************************************************** + * PCI * + *****************************************************/ +irqreturn_t iwl_isr_legacy(int irq, void *data); +int iwl_reset_ict(struct iwl_priv *priv); +void iwl_disable_ict(struct iwl_priv *priv); +int iwl_alloc_isr_ict(struct iwl_priv *priv); +void iwl_free_isr_ict(struct iwl_priv *priv); +irqreturn_t iwl_isr_ict(int irq, void *data); + +static inline u16 iwl_pcie_link_ctl(struct iwl_priv *priv) +{ + int pos; + u16 pci_lnk_ctl; + pos = pci_find_capability(priv->pci_dev, PCI_CAP_ID_EXP); + pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl); + return pci_lnk_ctl; +} +#ifdef CONFIG_PM +int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state); +int iwl_pci_resume(struct pci_dev *pdev); +#endif /* CONFIG_PM */ + +/***************************************************** +* Error Handling Debugging +******************************************************/ +void iwl_dump_nic_error_log(struct iwl_priv *priv); +int iwl_dump_nic_event_log(struct iwl_priv *priv, + bool full_log, char **buf, bool display); +void iwl_dump_csr(struct iwl_priv *priv); +int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display); +#ifdef CONFIG_IWLWIFI_DEBUG +void iwl_print_rx_config_cmd(struct iwl_priv *priv); +#else +static inline void iwl_print_rx_config_cmd(struct iwl_priv *priv) +{ +} +#endif + +void iwl_clear_isr_stats(struct iwl_priv *priv); + +/***************************************************** +* GEOS +******************************************************/ +int iwlcore_init_geos(struct iwl_priv *priv); +void iwlcore_free_geos(struct iwl_priv *priv); + +/*************** DRIVER STATUS FUNCTIONS *****/ + +#define STATUS_HCMD_ACTIVE 0 /* host command in progress */ +/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */ +#define STATUS_INT_ENABLED 2 +#define STATUS_RF_KILL_HW 3 +#define STATUS_CT_KILL 4 +#define STATUS_INIT 5 +#define STATUS_ALIVE 6 +#define STATUS_READY 7 +#define STATUS_TEMPERATURE 8 +#define STATUS_GEO_CONFIGURED 9 +#define STATUS_EXIT_PENDING 10 +#define STATUS_STATISTICS 12 +#define STATUS_SCANNING 13 +#define STATUS_SCAN_ABORTING 14 +#define STATUS_SCAN_HW 15 +#define STATUS_POWER_PMI 16 +#define STATUS_FW_ERROR 17 +#define STATUS_MODE_PENDING 18 + + +static inline int iwl_is_ready(struct iwl_priv *priv) +{ + /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are + * set but EXIT_PENDING is not */ + return test_bit(STATUS_READY, &priv->status) && + test_bit(STATUS_GEO_CONFIGURED, &priv->status) && + !test_bit(STATUS_EXIT_PENDING, &priv->status); +} + +static inline int iwl_is_alive(struct iwl_priv *priv) +{ + return test_bit(STATUS_ALIVE, &priv->status); +} + +static inline int iwl_is_init(struct iwl_priv *priv) +{ + return test_bit(STATUS_INIT, &priv->status); +} + +static inline int iwl_is_rfkill_hw(struct iwl_priv *priv) +{ + return test_bit(STATUS_RF_KILL_HW, &priv->status); +} + +static inline int iwl_is_rfkill(struct iwl_priv *priv) +{ + return iwl_is_rfkill_hw(priv); +} + +static inline int iwl_is_ctkill(struct iwl_priv *priv) +{ + return test_bit(STATUS_CT_KILL, &priv->status); +} + +static inline int iwl_is_ready_rf(struct iwl_priv *priv) +{ + + if (iwl_is_rfkill(priv)) + return 0; + + return iwl_is_ready(priv); +} + +extern void iwl_rf_kill_ct_config(struct iwl_priv *priv); +extern int iwl_send_bt_config(struct iwl_priv *priv); +extern int iwl_send_statistics_request(struct iwl_priv *priv, + u8 flags, bool clear); +extern int iwl_verify_ucode(struct iwl_priv *priv); +extern int iwl_send_lq_cmd(struct iwl_priv *priv, + struct iwl_link_quality_cmd *lq, u8 flags); +extern void iwl_rx_reply_rx(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +extern void iwl_rx_reply_rx_phy(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +void iwl_rx_reply_compressed_ba(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +void iwl_apm_stop(struct iwl_priv *priv); +int iwl_apm_stop_master(struct iwl_priv *priv); +int iwl_apm_init(struct iwl_priv *priv); + +void iwl_setup_rxon_timing(struct iwl_priv *priv); +static inline int iwl_send_rxon_assoc(struct iwl_priv *priv) +{ + return priv->cfg->ops->hcmd->rxon_assoc(priv); +} +static inline int iwlcore_commit_rxon(struct iwl_priv *priv) +{ + return priv->cfg->ops->hcmd->commit_rxon(priv); +} +static inline void iwlcore_config_ap(struct iwl_priv *priv) +{ + priv->cfg->ops->lib->config_ap(priv); +} +static inline const struct ieee80211_supported_band *iwl_get_hw_mode( + struct iwl_priv *priv, enum ieee80211_band band) +{ + return priv->hw->wiphy->bands[band]; +} +#endif /* __iwl_core_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h new file mode 100644 index 0000000..808b714 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-csr.h @@ -0,0 +1,451 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_csr_h__ +#define __iwl_csr_h__ +/* + * CSR (control and status registers) + * + * CSR registers are mapped directly into PCI bus space, and are accessible + * whenever platform supplies power to device, even when device is in + * low power states due to driver-invoked device resets + * (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes. + * + * Use iwl_write32() and iwl_read32() family to access these registers; + * these provide simple PCI bus access, without waking up the MAC. + * Do not use iwl_write_direct32() family for these registers; + * no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ. + * The MAC (uCode processor, etc.) does not need to be powered up for accessing + * the CSR registers. + * + * NOTE: Device does need to be awake in order to read this memory + * via CSR_EEPROM and CSR_OTP registers + */ +#define CSR_BASE (0x000) + +#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */ +#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */ +#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */ +#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */ +#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack*/ +#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */ +#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/ +#define CSR_GP_CNTRL (CSR_BASE+0x024) + +/* 2nd byte of CSR_INT_COALESCING, not accessible via iwl_write32()! */ +#define CSR_INT_PERIODIC_REG (CSR_BASE+0x005) + +/* + * Hardware revision info + * Bit fields: + * 31-8: Reserved + * 7-4: Type of device: see CSR_HW_REV_TYPE_xxx definitions + * 3-2: Revision step: 0 = A, 1 = B, 2 = C, 3 = D + * 1-0: "Dash" (-) value, as in A-1, etc. + * + * NOTE: Revision step affects calculation of CCK txpower for 4965. + * NOTE: See also CSR_HW_REV_WA_REG (work-around for bug in 4965). + */ +#define CSR_HW_REV (CSR_BASE+0x028) + +/* + * EEPROM and OTP (one-time-programmable) memory reads + * + * NOTE: Device must be awake, initialized via apm_ops.init(), + * in order to read. + */ +#define CSR_EEPROM_REG (CSR_BASE+0x02c) +#define CSR_EEPROM_GP (CSR_BASE+0x030) +#define CSR_OTP_GP_REG (CSR_BASE+0x034) + +#define CSR_GIO_REG (CSR_BASE+0x03C) +#define CSR_GP_UCODE_REG (CSR_BASE+0x048) +#define CSR_GP_DRIVER_REG (CSR_BASE+0x050) + +/* + * UCODE-DRIVER GP (general purpose) mailbox registers. + * SET/CLR registers set/clear bit(s) if "1" is written. + */ +#define CSR_UCODE_DRV_GP1 (CSR_BASE+0x054) +#define CSR_UCODE_DRV_GP1_SET (CSR_BASE+0x058) +#define CSR_UCODE_DRV_GP1_CLR (CSR_BASE+0x05c) +#define CSR_UCODE_DRV_GP2 (CSR_BASE+0x060) + +#define CSR_LED_REG (CSR_BASE+0x094) +#define CSR_DRAM_INT_TBL_REG (CSR_BASE+0x0A0) + +/* GIO Chicken Bits (PCI Express bus link power management) */ +#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100) + +/* Analog phase-lock-loop configuration */ +#define CSR_ANA_PLL_CFG (CSR_BASE+0x20c) + +/* + * CSR Hardware Revision Workaround Register. Indicates hardware rev; + * "step" determines CCK backoff for txpower calculation. Used for 4965 only. + * See also CSR_HW_REV register. + * Bit fields: + * 3-2: 0 = A, 1 = B, 2 = C, 3 = D step + * 1-0: "Dash" (-) value, as in C-1, etc. + */ +#define CSR_HW_REV_WA_REG (CSR_BASE+0x22C) + +#define CSR_DBG_HPET_MEM_REG (CSR_BASE+0x240) +#define CSR_DBG_LINK_PWR_MGMT_REG (CSR_BASE+0x250) + +/* Bits for CSR_HW_IF_CONFIG_REG */ +#define CSR49_HW_IF_CONFIG_REG_BIT_4965_R (0x00000010) +#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x00000C00) +#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100) +#define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200) + +#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MB (0x00000100) +#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MM (0x00000200) +#define CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC (0x00000400) +#define CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE (0x00000800) +#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A (0x00000000) +#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B (0x00001000) + +#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000) +#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000) +#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */ +#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */ +#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */ + +#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/ +#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/ + +/* interrupt flags in INTA, set by uCode or hardware (e.g. dma), + * acknowledged (reset) by host writing "1" to flagged bits. */ +#define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */ +#define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */ +#define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */ +#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */ +#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */ +#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */ +#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */ +#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */ +#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses, 3945 */ +#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */ +#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */ + +#define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \ + CSR_INT_BIT_HW_ERR | \ + CSR_INT_BIT_FH_TX | \ + CSR_INT_BIT_SW_ERR | \ + CSR_INT_BIT_RF_KILL | \ + CSR_INT_BIT_SW_RX | \ + CSR_INT_BIT_WAKEUP | \ + CSR_INT_BIT_ALIVE) + +/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */ +#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */ +#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */ +#define CSR39_FH_INT_BIT_RX_CHNL2 (1 << 18) /* Rx channel 2 (3945 only) */ +#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */ +#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */ +#define CSR39_FH_INT_BIT_TX_CHNL6 (1 << 6) /* Tx channel 6 (3945 only) */ +#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */ +#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */ + +#define CSR39_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \ + CSR39_FH_INT_BIT_RX_CHNL2 | \ + CSR_FH_INT_BIT_RX_CHNL1 | \ + CSR_FH_INT_BIT_RX_CHNL0) + + +#define CSR39_FH_INT_TX_MASK (CSR39_FH_INT_BIT_TX_CHNL6 | \ + CSR_FH_INT_BIT_TX_CHNL1 | \ + CSR_FH_INT_BIT_TX_CHNL0) + +#define CSR49_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \ + CSR_FH_INT_BIT_RX_CHNL1 | \ + CSR_FH_INT_BIT_RX_CHNL0) + +#define CSR49_FH_INT_TX_MASK (CSR_FH_INT_BIT_TX_CHNL1 | \ + CSR_FH_INT_BIT_TX_CHNL0) + +/* GPIO */ +#define CSR_GPIO_IN_BIT_AUX_POWER (0x00000200) +#define CSR_GPIO_IN_VAL_VAUX_PWR_SRC (0x00000000) +#define CSR_GPIO_IN_VAL_VMAIN_PWR_SRC (0x00000200) + +/* RESET */ +#define CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001) +#define CSR_RESET_REG_FLAG_FORCE_NMI (0x00000002) +#define CSR_RESET_REG_FLAG_SW_RESET (0x00000080) +#define CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100) +#define CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200) +#define CSR_RESET_LINK_PWR_MGMT_DISABLED (0x80000000) + +/* + * GP (general purpose) CONTROL REGISTER + * Bit fields: + * 27: HW_RF_KILL_SW + * Indicates state of (platform's) hardware RF-Kill switch + * 26-24: POWER_SAVE_TYPE + * Indicates current power-saving mode: + * 000 -- No power saving + * 001 -- MAC power-down + * 010 -- PHY (radio) power-down + * 011 -- Error + * 9-6: SYS_CONFIG + * Indicates current system configuration, reflecting pins on chip + * as forced high/low by device circuit board. + * 4: GOING_TO_SLEEP + * Indicates MAC is entering a power-saving sleep power-down. + * Not a good time to access device-internal resources. + * 3: MAC_ACCESS_REQ + * Host sets this to request and maintain MAC wakeup, to allow host + * access to device-internal resources. Host must wait for + * MAC_CLOCK_READY (and !GOING_TO_SLEEP) before accessing non-CSR + * device registers. + * 2: INIT_DONE + * Host sets this to put device into fully operational D0 power mode. + * Host resets this after SW_RESET to put device into low power mode. + * 0: MAC_CLOCK_READY + * Indicates MAC (ucode processor, etc.) is powered up and can run. + * Internal resources are accessible. + * NOTE: This does not indicate that the processor is actually running. + * NOTE: This does not indicate that 4965 or 3945 has completed + * init or post-power-down restore of internal SRAM memory. + * Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that + * SRAM is restored and uCode is in normal operation mode. + * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and + * do not need to save/restore it. + * NOTE: After device reset, this bit remains "0" until host sets + * INIT_DONE + */ +#define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001) +#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004) +#define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008) +#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010) + +#define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001) + +#define CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE (0x07000000) +#define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE (0x04000000) +#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000) + + +/* HW REV */ +#define CSR_HW_REV_TYPE_MSK (0x00000F0) +#define CSR_HW_REV_TYPE_3945 (0x00000D0) +#define CSR_HW_REV_TYPE_4965 (0x0000000) +#define CSR_HW_REV_TYPE_5300 (0x0000020) +#define CSR_HW_REV_TYPE_5350 (0x0000030) +#define CSR_HW_REV_TYPE_5100 (0x0000050) +#define CSR_HW_REV_TYPE_5150 (0x0000040) +#define CSR_HW_REV_TYPE_1000 (0x0000060) +#define CSR_HW_REV_TYPE_6x00 (0x0000070) +#define CSR_HW_REV_TYPE_6x50 (0x0000080) +#define CSR_HW_REV_TYPE_NONE (0x00000F0) + +/* EEPROM REG */ +#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001) +#define CSR_EEPROM_REG_BIT_CMD (0x00000002) +#define CSR_EEPROM_REG_MSK_ADDR (0x0000FFFC) +#define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000) + +/* EEPROM GP */ +#define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */ +#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180) +#define CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP (0x00000000) +#define CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP (0x00000001) +#define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K (0x00000002) +#define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K (0x00000004) + +/* One-time-programmable memory general purpose reg */ +#define CSR_OTP_GP_REG_DEVICE_SELECT (0x00010000) /* 0 - EEPROM, 1 - OTP */ +#define CSR_OTP_GP_REG_OTP_ACCESS_MODE (0x00020000) /* 0 - absolute, 1 - relative */ +#define CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK (0x00100000) /* bit 20 */ +#define CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK (0x00200000) /* bit 21 */ + +/* GP REG */ +#define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */ +#define CSR_GP_REG_NO_POWER_SAVE (0x00000000) +#define CSR_GP_REG_MAC_POWER_SAVE (0x01000000) +#define CSR_GP_REG_PHY_POWER_SAVE (0x02000000) +#define CSR_GP_REG_POWER_SAVE_ERROR (0x03000000) + + +/* CSR GIO */ +#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002) + +/* + * UCODE-DRIVER GP (general purpose) mailbox register 1 + * Host driver and uCode write and/or read this register to communicate with + * each other. + * Bit fields: + * 4: UCODE_DISABLE + * Host sets this to request permanent halt of uCode, same as + * sending CARD_STATE command with "halt" bit set. + * 3: CT_KILL_EXIT + * Host sets this to request exit from CT_KILL state, i.e. host thinks + * device temperature is low enough to continue normal operation. + * 2: CMD_BLOCKED + * Host sets this during RF KILL power-down sequence (HW, SW, CT KILL) + * to release uCode to clear all Tx and command queues, enter + * unassociated mode, and power down. + * NOTE: Some devices also use HBUS_TARG_MBX_C register for this bit. + * 1: SW_BIT_RFKILL + * Host sets this when issuing CARD_STATE command to request + * device sleep. + * 0: MAC_SLEEP + * uCode sets this when preparing a power-saving power-down. + * uCode resets this when power-up is complete and SRAM is sane. + * NOTE: 3945/4965 saves internal SRAM data to host when powering down, + * and must restore this data after powering back up. + * MAC_SLEEP is the best indication that restore is complete. + * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and + * do not need to save/restore it. + */ +#define CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP (0x00000001) +#define CSR_UCODE_SW_BIT_RFKILL (0x00000002) +#define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004) +#define CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT (0x00000008) + +/* GP Driver */ +#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_MSK (0x00000003) +#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_3x3_HYB (0x00000000) +#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_HYB (0x00000001) +#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA (0x00000002) +#define CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6 (0x00000004) + +/* GIO Chicken Bits (PCI Express bus link power management) */ +#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000) +#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000) + +/* LED */ +#define CSR_LED_BSM_CTRL_MSK (0xFFFFFFDF) +#define CSR_LED_REG_TRUN_ON (0x78) +#define CSR_LED_REG_TRUN_OFF (0x38) + +/* ANA_PLL */ +#define CSR39_ANA_PLL_CFG_VAL (0x01000000) +#define CSR50_ANA_PLL_CFG_VAL (0x00880300) + +/* HPET MEM debug */ +#define CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000) + +/* DRAM INT TABLE */ +#define CSR_DRAM_INT_TBL_ENABLE (1 << 31) +#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27) + +/* + * HBUS (Host-side Bus) + * + * HBUS registers are mapped directly into PCI bus space, but are used + * to indirectly access device's internal memory or registers that + * may be powered-down. + * + * Use iwl_write_direct32()/iwl_read_direct32() family for these registers; + * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ + * to make sure the MAC (uCode processor, etc.) is powered up for accessing + * internal resources. + * + * Do not use iwl_write32()/iwl_read32() family to access these registers; + * these provide only simple PCI bus access, without waking up the MAC. + */ +#define HBUS_BASE (0x400) + +/* + * Registers for accessing device's internal SRAM memory (e.g. SCD SRAM + * structures, error log, event log, verifying uCode load). + * First write to address register, then read from or write to data register + * to complete the job. Once the address register is set up, accesses to + * data registers auto-increment the address by one dword. + * Bit usage for address registers (read or write): + * 0-31: memory address within device + */ +#define HBUS_TARG_MEM_RADDR (HBUS_BASE+0x00c) +#define HBUS_TARG_MEM_WADDR (HBUS_BASE+0x010) +#define HBUS_TARG_MEM_WDAT (HBUS_BASE+0x018) +#define HBUS_TARG_MEM_RDAT (HBUS_BASE+0x01c) + +/* Mailbox C, used as workaround alternative to CSR_UCODE_DRV_GP1 mailbox */ +#define HBUS_TARG_MBX_C (HBUS_BASE+0x030) +#define HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED (0x00000004) + +/* + * Registers for accessing device's internal peripheral registers + * (e.g. SCD, BSM, etc.). First write to address register, + * then read from or write to data register to complete the job. + * Bit usage for address registers (read or write): + * 0-15: register address (offset) within device + * 24-25: (# bytes - 1) to read or write (e.g. 3 for dword) + */ +#define HBUS_TARG_PRPH_WADDR (HBUS_BASE+0x044) +#define HBUS_TARG_PRPH_RADDR (HBUS_BASE+0x048) +#define HBUS_TARG_PRPH_WDAT (HBUS_BASE+0x04c) +#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050) + +/* + * Per-Tx-queue write pointer (index, really!) + * Indicates index to next TFD that driver will fill (1 past latest filled). + * Bit usage: + * 0-7: queue write index + * 11-8: queue selector + */ +#define HBUS_TARG_WRPTR (HBUS_BASE+0x060) + +#endif /* !__iwl_csr_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h new file mode 100644 index 0000000..1c7b53d --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h @@ -0,0 +1,194 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_debug_h__ +#define __iwl_debug_h__ + +struct iwl_priv; +extern u32 iwl_debug_level; + +#define IWL_ERR(p, f, a...) dev_err(&((p)->pci_dev->dev), f, ## a) +#define IWL_WARN(p, f, a...) dev_warn(&((p)->pci_dev->dev), f, ## a) +#define IWL_INFO(p, f, a...) dev_info(&((p)->pci_dev->dev), f, ## a) +#define IWL_CRIT(p, f, a...) dev_crit(&((p)->pci_dev->dev), f, ## a) + +#define iwl_print_hex_error(priv, p, len) \ +do { \ + print_hex_dump(KERN_ERR, "iwl data: ", \ + DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \ +} while (0) + +#ifdef CONFIG_IWLWIFI_DEBUG +#define IWL_DEBUG(__priv, level, fmt, args...) \ +do { \ + if (iwl_get_debug_level(__priv) & (level)) \ + dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \ + "%c %s " fmt, in_interrupt() ? 'I' : 'U', \ + __func__ , ## args); \ +} while (0) + +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) \ +do { \ + if ((iwl_get_debug_level(__priv) & (level)) && net_ratelimit()) \ + dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \ + "%c %s " fmt, in_interrupt() ? 'I' : 'U', \ + __func__ , ## args); \ +} while (0) + +#define iwl_print_hex_dump(priv, level, p, len) \ +do { \ + if (iwl_get_debug_level(priv) & level) \ + print_hex_dump(KERN_DEBUG, "iwl data: ", \ + DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \ +} while (0) + +#else +#define IWL_DEBUG(__priv, level, fmt, args...) +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) +static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level, + void *p, u32 len) +{} +#endif /* CONFIG_IWLWIFI_DEBUG */ + +#ifdef CONFIG_IWLWIFI_DEBUGFS +int iwl_dbgfs_register(struct iwl_priv *priv, const char *name); +void iwl_dbgfs_unregister(struct iwl_priv *priv); +#else +static inline int iwl_dbgfs_register(struct iwl_priv *priv, const char *name) +{ + return 0; +} +static inline void iwl_dbgfs_unregister(struct iwl_priv *priv) +{ +} +#endif /* CONFIG_IWLWIFI_DEBUGFS */ + +/* + * To use the debug system: + * + * If you are defining a new debug classification, simply add it to the #define + * list here in the form of + * + * #define IWL_DL_xxxx VALUE + * + * where xxxx should be the name of the classification (for example, WEP). + * + * You then need to either add a IWL_xxxx_DEBUG() macro definition for your + * classification, or use IWL_DEBUG(IWL_DL_xxxx, ...) whenever you want + * to send output to that classification. + * + * The active debug levels can be accessed via files + * + * /sys/module/iwlagn/parameters/debug{50} + * /sys/class/net/wlan0/device/debug_level + * + * when CONFIG_IWLWIFI_DEBUG=y. + */ + +/* 0x0000000F - 0x00000001 */ +#define IWL_DL_INFO (1 << 0) +#define IWL_DL_MAC80211 (1 << 1) +#define IWL_DL_HCMD (1 << 2) +#define IWL_DL_STATE (1 << 3) +/* 0x000000F0 - 0x00000010 */ +#define IWL_DL_MACDUMP (1 << 4) +#define IWL_DL_HCMD_DUMP (1 << 5) +#define IWL_DL_RADIO (1 << 7) +/* 0x00000F00 - 0x00000100 */ +#define IWL_DL_POWER (1 << 8) +#define IWL_DL_TEMP (1 << 9) +#define IWL_DL_NOTIF (1 << 10) +#define IWL_DL_SCAN (1 << 11) +/* 0x0000F000 - 0x00001000 */ +#define IWL_DL_ASSOC (1 << 12) +#define IWL_DL_DROP (1 << 13) +#define IWL_DL_TXPOWER (1 << 14) +#define IWL_DL_AP (1 << 15) +/* 0x000F0000 - 0x00010000 */ +#define IWL_DL_FW (1 << 16) +#define IWL_DL_RF_KILL (1 << 17) +#define IWL_DL_FW_ERRORS (1 << 18) +#define IWL_DL_LED (1 << 19) +/* 0x00F00000 - 0x00100000 */ +#define IWL_DL_RATE (1 << 20) +#define IWL_DL_CALIB (1 << 21) +#define IWL_DL_WEP (1 << 22) +#define IWL_DL_TX (1 << 23) +/* 0x0F000000 - 0x01000000 */ +#define IWL_DL_RX (1 << 24) +#define IWL_DL_ISR (1 << 25) +#define IWL_DL_HT (1 << 26) +#define IWL_DL_IO (1 << 27) +/* 0xF0000000 - 0x10000000 */ +#define IWL_DL_11H (1 << 28) +#define IWL_DL_STATS (1 << 29) +#define IWL_DL_TX_REPLY (1 << 30) +#define IWL_DL_QOS (1 << 31) + +#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a) +#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a) +#define IWL_DEBUG_MACDUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_MACDUMP, f, ## a) +#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a) +#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a) +#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a) +#define IWL_DEBUG_TX(p, f, a...) IWL_DEBUG(p, IWL_DL_TX, f, ## a) +#define IWL_DEBUG_ISR(p, f, a...) IWL_DEBUG(p, IWL_DL_ISR, f, ## a) +#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a) +#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a) +#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a) +#define IWL_DEBUG_HC_DUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a) +#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a) +#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a) +#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a) +#define IWL_DEBUG_DROP(p, f, a...) IWL_DEBUG(p, IWL_DL_DROP, f, ## a) +#define IWL_DEBUG_DROP_LIMIT(p, f, a...) \ + IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a) +#define IWL_DEBUG_AP(p, f, a...) IWL_DEBUG(p, IWL_DL_AP, f, ## a) +#define IWL_DEBUG_TXPOWER(p, f, a...) IWL_DEBUG(p, IWL_DL_TXPOWER, f, ## a) +#define IWL_DEBUG_IO(p, f, a...) IWL_DEBUG(p, IWL_DL_IO, f, ## a) +#define IWL_DEBUG_RATE(p, f, a...) IWL_DEBUG(p, IWL_DL_RATE, f, ## a) +#define IWL_DEBUG_RATE_LIMIT(p, f, a...) \ + IWL_DEBUG_LIMIT(p, IWL_DL_RATE, f, ## a) +#define IWL_DEBUG_NOTIF(p, f, a...) IWL_DEBUG(p, IWL_DL_NOTIF, f, ## a) +#define IWL_DEBUG_ASSOC(p, f, a...) \ + IWL_DEBUG(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a) +#define IWL_DEBUG_ASSOC_LIMIT(p, f, a...) \ + IWL_DEBUG_LIMIT(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a) +#define IWL_DEBUG_HT(p, f, a...) IWL_DEBUG(p, IWL_DL_HT, f, ## a) +#define IWL_DEBUG_STATS(p, f, a...) IWL_DEBUG(p, IWL_DL_STATS, f, ## a) +#define IWL_DEBUG_STATS_LIMIT(p, f, a...) \ + IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a) +#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a) +#define IWL_DEBUG_TX_REPLY_LIMIT(p, f, a...) \ + IWL_DEBUG_LIMIT(p, IWL_DL_TX_REPLY, f, ## a) +#define IWL_DEBUG_QOS(p, f, a...) IWL_DEBUG(p, IWL_DL_QOS, f, ## a) +#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a) +#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a) +#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a) + +#endif diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c new file mode 100644 index 0000000..7bf44f1 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c @@ -0,0 +1,2394 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ + +#include +#include +#include + +#include +#include + + +#include "iwl-dev.h" +#include "iwl-debug.h" +#include "iwl-core.h" +#include "iwl-io.h" +#include "iwl-calib.h" + +/* create and remove of files */ +#define DEBUGFS_ADD_FILE(name, parent, mode) do { \ + if (!debugfs_create_file(#name, mode, parent, priv, \ + &iwl_dbgfs_##name##_ops)) \ + goto err; \ +} while (0) + +#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \ + struct dentry *__tmp; \ + __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \ + parent, ptr); \ + if (IS_ERR(__tmp) || !__tmp) \ + goto err; \ +} while (0) + +#define DEBUGFS_ADD_X32(name, parent, ptr) do { \ + struct dentry *__tmp; \ + __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \ + parent, ptr); \ + if (IS_ERR(__tmp) || !__tmp) \ + goto err; \ +} while (0) + +/* file operation */ +#define DEBUGFS_READ_FUNC(name) \ +static ssize_t iwl_dbgfs_##name##_read(struct file *file, \ + char __user *user_buf, \ + size_t count, loff_t *ppos); + +#define DEBUGFS_WRITE_FUNC(name) \ +static ssize_t iwl_dbgfs_##name##_write(struct file *file, \ + const char __user *user_buf, \ + size_t count, loff_t *ppos); + + +static int iwl_dbgfs_open_file_generic(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +#define DEBUGFS_READ_FILE_OPS(name) \ + DEBUGFS_READ_FUNC(name); \ +static const struct file_operations iwl_dbgfs_##name##_ops = { \ + .read = iwl_dbgfs_##name##_read, \ + .open = iwl_dbgfs_open_file_generic, \ +}; + +#define DEBUGFS_WRITE_FILE_OPS(name) \ + DEBUGFS_WRITE_FUNC(name); \ +static const struct file_operations iwl_dbgfs_##name##_ops = { \ + .write = iwl_dbgfs_##name##_write, \ + .open = iwl_dbgfs_open_file_generic, \ +}; + + +#define DEBUGFS_READ_WRITE_FILE_OPS(name) \ + DEBUGFS_READ_FUNC(name); \ + DEBUGFS_WRITE_FUNC(name); \ +static const struct file_operations iwl_dbgfs_##name##_ops = { \ + .write = iwl_dbgfs_##name##_write, \ + .read = iwl_dbgfs_##name##_read, \ + .open = iwl_dbgfs_open_file_generic, \ +}; + + +static ssize_t iwl_dbgfs_tx_statistics_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + char *buf; + int pos = 0; + + int cnt; + ssize_t ret; + const size_t bufsz = 100 + + sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX); + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + pos += scnprintf(buf + pos, bufsz - pos, "Management:\n"); + for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, + "\t%25s\t\t: %u\n", + get_mgmt_string(cnt), + priv->tx_stats.mgmt[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "Control\n"); + for (cnt = 0; cnt < CONTROL_MAX; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, + "\t%25s\t\t: %u\n", + get_ctrl_string(cnt), + priv->tx_stats.ctrl[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "Data:\n"); + pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n", + priv->tx_stats.data_cnt); + pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n", + priv->tx_stats.data_bytes); + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_clear_traffic_statistics_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + u32 clear_flag; + char buf[8]; + int buf_size; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%x", &clear_flag) != 1) + return -EFAULT; + iwl_clear_traffic_stats(priv); + + return count; +} + +static ssize_t iwl_dbgfs_rx_statistics_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + char *buf; + int pos = 0; + int cnt; + ssize_t ret; + const size_t bufsz = 100 + + sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX); + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pos += scnprintf(buf + pos, bufsz - pos, "Management:\n"); + for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, + "\t%25s\t\t: %u\n", + get_mgmt_string(cnt), + priv->rx_stats.mgmt[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "Control:\n"); + for (cnt = 0; cnt < CONTROL_MAX; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, + "\t%25s\t\t: %u\n", + get_ctrl_string(cnt), + priv->rx_stats.ctrl[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "Data:\n"); + pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n", + priv->rx_stats.data_cnt); + pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n", + priv->rx_stats.data_bytes); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +#define BYTE1_MASK 0x000000ff; +#define BYTE2_MASK 0x0000ffff; +#define BYTE3_MASK 0x00ffffff; +static ssize_t iwl_dbgfs_sram_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + u32 val; + char *buf; + ssize_t ret; + int i; + int pos = 0; + struct iwl_priv *priv = file->private_data; + size_t bufsz; + + /* default is to dump the entire data segment */ + if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) { + priv->dbgfs_sram_offset = 0x800000; + if (priv->ucode_type == UCODE_INIT) + priv->dbgfs_sram_len = priv->ucode_init_data.len; + else + priv->dbgfs_sram_len = priv->ucode_data.len; + } + bufsz = 30 + priv->dbgfs_sram_len * sizeof(char) * 10; + buf = kmalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n", + priv->dbgfs_sram_len); + pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n", + priv->dbgfs_sram_offset); + for (i = priv->dbgfs_sram_len; i > 0; i -= 4) { + val = iwl_read_targ_mem(priv, priv->dbgfs_sram_offset + \ + priv->dbgfs_sram_len - i); + if (i < 4) { + switch (i) { + case 1: + val &= BYTE1_MASK; + break; + case 2: + val &= BYTE2_MASK; + break; + case 3: + val &= BYTE3_MASK; + break; + } + } + if (!(i % 16)) + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_sram_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[64]; + int buf_size; + u32 offset, len; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + if (sscanf(buf, "%x,%x", &offset, &len) == 2) { + priv->dbgfs_sram_offset = offset; + priv->dbgfs_sram_len = len; + } else { + priv->dbgfs_sram_offset = 0; + priv->dbgfs_sram_len = 0; + } + + return count; +} + +static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + struct iwl_station_entry *station; + int max_sta = priv->hw_params.max_stations; + char *buf; + int i, j, pos = 0; + ssize_t ret; + /* Add 30 for initial string */ + const size_t bufsz = 30 + sizeof(char) * 500 * (priv->num_stations); + + buf = kmalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n", + priv->num_stations); + + for (i = 0; i < max_sta; i++) { + station = &priv->stations[i]; + if (station->used) { + pos += scnprintf(buf + pos, bufsz - pos, + "station %d:\ngeneral data:\n", i+1); + pos += scnprintf(buf + pos, bufsz - pos, "id: %u\n", + station->sta.sta.sta_id); + pos += scnprintf(buf + pos, bufsz - pos, "mode: %u\n", + station->sta.mode); + pos += scnprintf(buf + pos, bufsz - pos, + "flags: 0x%x\n", + station->sta.station_flags_msk); + pos += scnprintf(buf + pos, bufsz - pos, "tid data:\n"); + pos += scnprintf(buf + pos, bufsz - pos, + "seq_num\t\ttxq_id"); + pos += scnprintf(buf + pos, bufsz - pos, + "\tframe_count\twait_for_ba\t"); + pos += scnprintf(buf + pos, bufsz - pos, + "start_idx\tbitmap0\t"); + pos += scnprintf(buf + pos, bufsz - pos, + "bitmap1\trate_n_flags"); + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + + for (j = 0; j < MAX_TID_COUNT; j++) { + pos += scnprintf(buf + pos, bufsz - pos, + "[%d]:\t\t%u", j, + station->tid[j].seq_number); + pos += scnprintf(buf + pos, bufsz - pos, + "\t%u\t\t%u\t\t%u\t\t", + station->tid[j].agg.txq_id, + station->tid[j].agg.frame_count, + station->tid[j].agg.wait_for_ba); + pos += scnprintf(buf + pos, bufsz - pos, + "%u\t%llu\t%u", + station->tid[j].agg.start_idx, + (unsigned long long)station->tid[j].agg.bitmap, + station->tid[j].agg.rate_n_flags); + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + } + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_nvm_read(struct file *file, + char __user *user_buf, + size_t count, + loff_t *ppos) +{ + ssize_t ret; + struct iwl_priv *priv = file->private_data; + int pos = 0, ofs = 0, buf_size = 0; + const u8 *ptr; + char *buf; + u16 eeprom_ver; + size_t eeprom_len = priv->cfg->eeprom_size; + buf_size = 4 * eeprom_len + 256; + + if (eeprom_len % 16) { + IWL_ERR(priv, "NVM size is not multiple of 16.\n"); + return -ENODATA; + } + + ptr = priv->eeprom; + if (!ptr) { + IWL_ERR(priv, "Invalid EEPROM/OTP memory\n"); + return -ENOMEM; + } + + /* 4 characters for byte 0xYY */ + buf = kzalloc(buf_size, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION); + pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s, " + "version: 0x%x\n", + (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) + ? "OTP" : "EEPROM", eeprom_ver); + for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) { + pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs); + hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos, + buf_size - pos, 0); + pos += strlen(buf + pos); + if (buf_size - pos > 0) + buf[pos++] = '\n'; + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_log_event_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char *buf; + int pos = 0; + ssize_t ret = -ENOMEM; + + ret = pos = priv->cfg->ops->lib->dump_nic_event_log( + priv, true, &buf, true); + if (buf) { + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + } + return ret; +} + +static ssize_t iwl_dbgfs_log_event_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + u32 event_log_flag; + char buf[8]; + int buf_size; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &event_log_flag) != 1) + return -EFAULT; + if (event_log_flag == 1) + priv->cfg->ops->lib->dump_nic_event_log(priv, true, + NULL, false); + + return count; +} + + + +static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + struct ieee80211_channel *channels = NULL; + const struct ieee80211_supported_band *supp_band = NULL; + int pos = 0, i, bufsz = PAGE_SIZE; + char *buf; + ssize_t ret; + + if (!test_bit(STATUS_GEO_CONFIGURED, &priv->status)) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + + supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ); + if (supp_band) { + channels = supp_band->channels; + + pos += scnprintf(buf + pos, bufsz - pos, + "Displaying %d channels in 2.4GHz band 802.11bg):\n", + supp_band->n_channels); + + for (i = 0; i < supp_band->n_channels; i++) + pos += scnprintf(buf + pos, bufsz - pos, + "%d: %ddBm: BSS%s%s, %s.\n", + ieee80211_frequency_to_channel( + channels[i].center_freq), + channels[i].max_power, + channels[i].flags & IEEE80211_CHAN_RADAR ? + " (IEEE 802.11h required)" : "", + ((channels[i].flags & IEEE80211_CHAN_NO_IBSS) + || (channels[i].flags & + IEEE80211_CHAN_RADAR)) ? "" : + ", IBSS", + channels[i].flags & + IEEE80211_CHAN_PASSIVE_SCAN ? + "passive only" : "active/passive"); + } + supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ); + if (supp_band) { + channels = supp_band->channels; + + pos += scnprintf(buf + pos, bufsz - pos, + "Displaying %d channels in 5.2GHz band (802.11a)\n", + supp_band->n_channels); + + for (i = 0; i < supp_band->n_channels; i++) + pos += scnprintf(buf + pos, bufsz - pos, + "%d: %ddBm: BSS%s%s, %s.\n", + ieee80211_frequency_to_channel( + channels[i].center_freq), + channels[i].max_power, + channels[i].flags & IEEE80211_CHAN_RADAR ? + " (IEEE 802.11h required)" : "", + ((channels[i].flags & IEEE80211_CHAN_NO_IBSS) + || (channels[i].flags & + IEEE80211_CHAN_RADAR)) ? "" : + ", IBSS", + channels[i].flags & + IEEE80211_CHAN_PASSIVE_SCAN ? + "passive only" : "active/passive"); + } + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_status_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + char buf[512]; + int pos = 0; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n", + test_bit(STATUS_HCMD_ACTIVE, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n", + test_bit(STATUS_INT_ENABLED, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n", + test_bit(STATUS_RF_KILL_HW, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n", + test_bit(STATUS_CT_KILL, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n", + test_bit(STATUS_INIT, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n", + test_bit(STATUS_ALIVE, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n", + test_bit(STATUS_READY, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_TEMPERATURE:\t %d\n", + test_bit(STATUS_TEMPERATURE, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n", + test_bit(STATUS_GEO_CONFIGURED, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n", + test_bit(STATUS_EXIT_PENDING, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n", + test_bit(STATUS_STATISTICS, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n", + test_bit(STATUS_SCANNING, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n", + test_bit(STATUS_SCAN_ABORTING, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n", + test_bit(STATUS_SCAN_HW, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n", + test_bit(STATUS_POWER_PMI, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n", + test_bit(STATUS_FW_ERROR, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_MODE_PENDING:\t %d\n", + test_bit(STATUS_MODE_PENDING, &priv->status)); + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_interrupt_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int pos = 0; + int cnt = 0; + char *buf; + int bufsz = 24 * 64; /* 24 items * 64 char per item */ + ssize_t ret; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + + pos += scnprintf(buf + pos, bufsz - pos, + "Interrupt Statistics Report:\n"); + + pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n", + priv->isr_stats.hw); + pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n", + priv->isr_stats.sw); + if (priv->isr_stats.sw > 0) { + pos += scnprintf(buf + pos, bufsz - pos, + "\tLast Restarting Code: 0x%X\n", + priv->isr_stats.sw_err); + } +#ifdef CONFIG_IWLWIFI_DEBUG + pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n", + priv->isr_stats.sch); + pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n", + priv->isr_stats.alive); +#endif + pos += scnprintf(buf + pos, bufsz - pos, + "HW RF KILL switch toggled:\t %u\n", + priv->isr_stats.rfkill); + + pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n", + priv->isr_stats.ctkill); + + pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n", + priv->isr_stats.wakeup); + + pos += scnprintf(buf + pos, bufsz - pos, + "Rx command responses:\t\t %u\n", + priv->isr_stats.rx); + for (cnt = 0; cnt < REPLY_MAX; cnt++) { + if (priv->isr_stats.rx_handlers[cnt] > 0) + pos += scnprintf(buf + pos, bufsz - pos, + "\tRx handler[%36s]:\t\t %u\n", + get_cmd_string(cnt), + priv->isr_stats.rx_handlers[cnt]); + } + + pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n", + priv->isr_stats.tx); + + pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n", + priv->isr_stats.unhandled); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_interrupt_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + u32 reset_flag; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%x", &reset_flag) != 1) + return -EFAULT; + if (reset_flag == 0) + iwl_clear_isr_stats(priv); + + return count; +} + +static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + int pos = 0, i; + char buf[256]; + const size_t bufsz = sizeof(buf); + ssize_t ret; + + for (i = 0; i < AC_NUM; i++) { + pos += scnprintf(buf + pos, bufsz - pos, + "\tcw_min\tcw_max\taifsn\ttxop\n"); + pos += scnprintf(buf + pos, bufsz - pos, + "AC[%d]\t%u\t%u\t%u\t%u\n", i, + priv->qos_data.def_qos_parm.ac[i].cw_min, + priv->qos_data.def_qos_parm.ac[i].cw_max, + priv->qos_data.def_qos_parm.ac[i].aifsn, + priv->qos_data.def_qos_parm.ac[i].edca_txop); + } + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + return ret; +} + +static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + int pos = 0; + char buf[256]; + const size_t bufsz = sizeof(buf); + ssize_t ret; + + pos += scnprintf(buf + pos, bufsz - pos, + "allow blinking: %s\n", + (priv->allow_blinking) ? "True" : "False"); + if (priv->allow_blinking) { + pos += scnprintf(buf + pos, bufsz - pos, + "Led blinking rate: %u\n", + priv->last_blink_rate); + pos += scnprintf(buf + pos, bufsz - pos, + "Last blink time: %lu\n", + priv->last_blink_time); + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + return ret; +} + +static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + struct iwl_tt_mgmt *tt = &priv->thermal_throttle; + struct iwl_tt_restriction *restriction; + char buf[100]; + int pos = 0; + const size_t bufsz = sizeof(buf); + ssize_t ret; + + pos += scnprintf(buf + pos, bufsz - pos, + "Thermal Throttling Mode: %s\n", + tt->advanced_tt ? "Advance" : "Legacy"); + pos += scnprintf(buf + pos, bufsz - pos, + "Thermal Throttling State: %d\n", + tt->state); + if (tt->advanced_tt) { + restriction = tt->restriction + tt->state; + pos += scnprintf(buf + pos, bufsz - pos, + "Tx mode: %d\n", + restriction->tx_stream); + pos += scnprintf(buf + pos, bufsz - pos, + "Rx mode: %d\n", + restriction->rx_stream); + pos += scnprintf(buf + pos, bufsz - pos, + "HT mode: %d\n", + restriction->is_ht); + } + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + return ret; +} + +static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int ht40; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &ht40) != 1) + return -EFAULT; + if (!iwl_is_associated(priv)) + priv->disable_ht40 = ht40 ? true : false; + else { + IWL_ERR(priv, "Sta associated with AP - " + "Change to 40MHz channel support is not allowed\n"); + return -EINVAL; + } + + return count; +} + +static ssize_t iwl_dbgfs_disable_ht40_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[100]; + int pos = 0; + const size_t bufsz = sizeof(buf); + ssize_t ret; + + pos += scnprintf(buf + pos, bufsz - pos, + "11n 40MHz Mode: %s\n", + priv->disable_ht40 ? "Disabled" : "Enabled"); + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + return ret; +} + +static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int value; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + if (sscanf(buf, "%d", &value) != 1) + return -EINVAL; + + /* + * Our users expect 0 to be "CAM", but 0 isn't actually + * valid here. However, let's not confuse them and present + * IWL_POWER_INDEX_1 as "1", not "0". + */ + if (value == 0) + return -EINVAL; + else if (value > 0) + value -= 1; + + if (value != -1 && (value < 0 || value >= IWL_POWER_NUM)) + return -EINVAL; + + if (!iwl_is_ready_rf(priv)) + return -EAGAIN; + + priv->power_data.debug_sleep_level_override = value; + + mutex_lock(&priv->mutex); + iwl_power_update_mode(priv, true); + mutex_unlock(&priv->mutex); + + return count; +} + +static ssize_t iwl_dbgfs_sleep_level_override_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[10]; + int pos, value; + const size_t bufsz = sizeof(buf); + + /* see the write function */ + value = priv->power_data.debug_sleep_level_override; + if (value >= 0) + value += 1; + + pos = scnprintf(buf, bufsz, "%d\n", value); + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_current_sleep_command_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[200]; + int pos = 0, i; + const size_t bufsz = sizeof(buf); + struct iwl_powertable_cmd *cmd = &priv->power_data.sleep_cmd; + + pos += scnprintf(buf + pos, bufsz - pos, + "flags: %#.2x\n", le16_to_cpu(cmd->flags)); + pos += scnprintf(buf + pos, bufsz - pos, + "RX/TX timeout: %d/%d usec\n", + le32_to_cpu(cmd->rx_data_timeout), + le32_to_cpu(cmd->tx_data_timeout)); + for (i = 0; i < IWL_POWER_VEC_SIZE; i++) + pos += scnprintf(buf + pos, bufsz - pos, + "sleep_interval[%d]: %d\n", i, + le32_to_cpu(cmd->sleep_interval[i])); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +DEBUGFS_READ_WRITE_FILE_OPS(sram); +DEBUGFS_READ_WRITE_FILE_OPS(log_event); +DEBUGFS_READ_FILE_OPS(nvm); +DEBUGFS_READ_FILE_OPS(stations); +DEBUGFS_READ_FILE_OPS(channels); +DEBUGFS_READ_FILE_OPS(status); +DEBUGFS_READ_WRITE_FILE_OPS(interrupt); +DEBUGFS_READ_FILE_OPS(qos); +DEBUGFS_READ_FILE_OPS(led); +DEBUGFS_READ_FILE_OPS(thermal_throttling); +DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40); +DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override); +DEBUGFS_READ_FILE_OPS(current_sleep_command); + +static ssize_t iwl_dbgfs_traffic_log_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + int pos = 0, ofs = 0; + int cnt = 0, entry; + struct iwl_tx_queue *txq; + struct iwl_queue *q; + struct iwl_rx_queue *rxq = &priv->rxq; + char *buf; + int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) + + (priv->cfg->num_of_queues * 32 * 8) + 400; + const u8 *ptr; + ssize_t ret; + + if (!priv->txq) { + IWL_ERR(priv, "txq not ready\n"); + return -EAGAIN; + } + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate buffer\n"); + return -ENOMEM; + } + pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n"); + for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) { + txq = &priv->txq[cnt]; + q = &txq->q; + pos += scnprintf(buf + pos, bufsz - pos, + "q[%d]: read_ptr: %u, write_ptr: %u\n", + cnt, q->read_ptr, q->write_ptr); + } + if (priv->tx_traffic && (iwl_debug_level & IWL_DL_TX)) { + ptr = priv->tx_traffic; + pos += scnprintf(buf + pos, bufsz - pos, + "Tx Traffic idx: %u\n", priv->tx_traffic_idx); + for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) { + for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16; + entry++, ofs += 16) { + pos += scnprintf(buf + pos, bufsz - pos, + "0x%.4x ", ofs); + hex_dump_to_buffer(ptr + ofs, 16, 16, 2, + buf + pos, bufsz - pos, 0); + pos += strlen(buf + pos); + if (bufsz - pos > 0) + buf[pos++] = '\n'; + } + } + } + + pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n"); + pos += scnprintf(buf + pos, bufsz - pos, + "read: %u, write: %u\n", + rxq->read, rxq->write); + + if (priv->rx_traffic && (iwl_debug_level & IWL_DL_RX)) { + ptr = priv->rx_traffic; + pos += scnprintf(buf + pos, bufsz - pos, + "Rx Traffic idx: %u\n", priv->rx_traffic_idx); + for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) { + for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16; + entry++, ofs += 16) { + pos += scnprintf(buf + pos, bufsz - pos, + "0x%.4x ", ofs); + hex_dump_to_buffer(ptr + ofs, 16, 16, 2, + buf + pos, bufsz - pos, 0); + pos += strlen(buf + pos); + if (bufsz - pos > 0) + buf[pos++] = '\n'; + } + } + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_traffic_log_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int traffic_log; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &traffic_log) != 1) + return -EFAULT; + if (traffic_log == 0) + iwl_reset_traffic_log(priv); + + return count; +} + +static ssize_t iwl_dbgfs_tx_queue_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + struct iwl_tx_queue *txq; + struct iwl_queue *q; + char *buf; + int pos = 0; + int cnt; + int ret; + const size_t bufsz = sizeof(char) * 64 * priv->cfg->num_of_queues; + + if (!priv->txq) { + IWL_ERR(priv, "txq not ready\n"); + return -EAGAIN; + } + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) { + txq = &priv->txq[cnt]; + q = &txq->q; + pos += scnprintf(buf + pos, bufsz - pos, + "hwq %.2d: read=%u write=%u stop=%d" + " swq_id=%#.2x (ac %d/hwq %d)\n", + cnt, q->read_ptr, q->write_ptr, + !!test_bit(cnt, priv->queue_stopped), + txq->swq_id, + txq->swq_id & 0x80 ? txq->swq_id & 3 : + txq->swq_id, + txq->swq_id & 0x80 ? (txq->swq_id >> 2) & + 0x1f : txq->swq_id); + if (cnt >= 4) + continue; + /* for the ACs, display the stop count too */ + pos += scnprintf(buf + pos, bufsz - pos, + " stop-count: %d\n", + atomic_read(&priv->queue_stop_count[cnt])); + } + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + struct iwl_rx_queue *rxq = &priv->rxq; + char buf[256]; + int pos = 0; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n", + rxq->read); + pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n", + rxq->write); + pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n", + rxq->free_count); + pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n", + le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF); + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static int iwl_dbgfs_statistics_flag(struct iwl_priv *priv, char *buf, + int bufsz) +{ + int p = 0; + + p += scnprintf(buf + p, bufsz - p, + "Statistics Flag(0x%X):\n", + le32_to_cpu(priv->statistics.flag)); + if (le32_to_cpu(priv->statistics.flag) & UCODE_STATISTICS_CLEAR_MSK) + p += scnprintf(buf + p, bufsz - p, + "\tStatistics have been cleared\n"); + p += scnprintf(buf + p, bufsz - p, + "\tOperational Frequency: %s\n", + (le32_to_cpu(priv->statistics.flag) & + UCODE_STATISTICS_FREQUENCY_MSK) + ? "2.4 GHz" : "5.2 GHz"); + p += scnprintf(buf + p, bufsz - p, + "\tTGj Narrow Band: %s\n", + (le32_to_cpu(priv->statistics.flag) & + UCODE_STATISTICS_NARROW_BAND_MSK) + ? "enabled" : "disabled"); + return p; +} + +static const char ucode_stats_header[] = + "%-32s current acumulative delta max\n"; +static const char ucode_stats_short_format[] = + " %-30s %10u\n"; +static const char ucode_stats_format[] = + " %-30s %10u %10u %10u %10u\n"; + +static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + int pos = 0; + char *buf; + int bufsz = sizeof(struct statistics_rx_phy) * 40 + + sizeof(struct statistics_rx_non_phy) * 40 + + sizeof(struct statistics_rx_ht_phy) * 40 + 400; + ssize_t ret; + struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm; + struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck; + struct statistics_rx_non_phy *general, *accum_general; + struct statistics_rx_non_phy *delta_general, *max_general; + struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht; + + if (!iwl_is_alive(priv)) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + + /* the statistic information display here is based on + * the last statistics notification from uCode + * might not reflect the current uCode activity + */ + ofdm = &priv->statistics.rx.ofdm; + cck = &priv->statistics.rx.cck; + general = &priv->statistics.rx.general; + ht = &priv->statistics.rx.ofdm_ht; + accum_ofdm = &priv->accum_statistics.rx.ofdm; + accum_cck = &priv->accum_statistics.rx.cck; + accum_general = &priv->accum_statistics.rx.general; + accum_ht = &priv->accum_statistics.rx.ofdm_ht; + delta_ofdm = &priv->delta_statistics.rx.ofdm; + delta_cck = &priv->delta_statistics.rx.cck; + delta_general = &priv->delta_statistics.rx.general; + delta_ht = &priv->delta_statistics.rx.ofdm_ht; + max_ofdm = &priv->max_delta.rx.ofdm; + max_cck = &priv->max_delta.rx.cck; + max_general = &priv->max_delta.rx.general; + max_ht = &priv->max_delta.rx.ofdm_ht; + + pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header, + "Statistics_Rx - OFDM:"); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "ina_cnt:", le32_to_cpu(ofdm->ina_cnt), + accum_ofdm->ina_cnt, + delta_ofdm->ina_cnt, max_ofdm->ina_cnt); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "fina_cnt:", + le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt, + delta_ofdm->fina_cnt, max_ofdm->fina_cnt); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "plcp_err:", + le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err, + delta_ofdm->plcp_err, max_ofdm->plcp_err); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "crc32_err:", + le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err, + delta_ofdm->crc32_err, max_ofdm->crc32_err); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "overrun_err:", + le32_to_cpu(ofdm->overrun_err), + accum_ofdm->overrun_err, + delta_ofdm->overrun_err, max_ofdm->overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "early_overrun_err:", + le32_to_cpu(ofdm->early_overrun_err), + accum_ofdm->early_overrun_err, + delta_ofdm->early_overrun_err, + max_ofdm->early_overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "crc32_good:", + le32_to_cpu(ofdm->crc32_good), + accum_ofdm->crc32_good, + delta_ofdm->crc32_good, max_ofdm->crc32_good); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "false_alarm_cnt:", + le32_to_cpu(ofdm->false_alarm_cnt), + accum_ofdm->false_alarm_cnt, + delta_ofdm->false_alarm_cnt, + max_ofdm->false_alarm_cnt); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "fina_sync_err_cnt:", + le32_to_cpu(ofdm->fina_sync_err_cnt), + accum_ofdm->fina_sync_err_cnt, + delta_ofdm->fina_sync_err_cnt, + max_ofdm->fina_sync_err_cnt); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "sfd_timeout:", + le32_to_cpu(ofdm->sfd_timeout), + accum_ofdm->sfd_timeout, + delta_ofdm->sfd_timeout, + max_ofdm->sfd_timeout); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "fina_timeout:", + le32_to_cpu(ofdm->fina_timeout), + accum_ofdm->fina_timeout, + delta_ofdm->fina_timeout, + max_ofdm->fina_timeout); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "unresponded_rts:", + le32_to_cpu(ofdm->unresponded_rts), + accum_ofdm->unresponded_rts, + delta_ofdm->unresponded_rts, + max_ofdm->unresponded_rts); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "rxe_frame_lmt_ovrun:", + le32_to_cpu(ofdm->rxe_frame_limit_overrun), + accum_ofdm->rxe_frame_limit_overrun, + delta_ofdm->rxe_frame_limit_overrun, + max_ofdm->rxe_frame_limit_overrun); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "sent_ack_cnt:", + le32_to_cpu(ofdm->sent_ack_cnt), + accum_ofdm->sent_ack_cnt, + delta_ofdm->sent_ack_cnt, + max_ofdm->sent_ack_cnt); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "sent_cts_cnt:", + le32_to_cpu(ofdm->sent_cts_cnt), + accum_ofdm->sent_cts_cnt, + delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "sent_ba_rsp_cnt:", + le32_to_cpu(ofdm->sent_ba_rsp_cnt), + accum_ofdm->sent_ba_rsp_cnt, + delta_ofdm->sent_ba_rsp_cnt, + max_ofdm->sent_ba_rsp_cnt); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "dsp_self_kill:", + le32_to_cpu(ofdm->dsp_self_kill), + accum_ofdm->dsp_self_kill, + delta_ofdm->dsp_self_kill, + max_ofdm->dsp_self_kill); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "mh_format_err:", + le32_to_cpu(ofdm->mh_format_err), + accum_ofdm->mh_format_err, + delta_ofdm->mh_format_err, + max_ofdm->mh_format_err); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "re_acq_main_rssi_sum:", + le32_to_cpu(ofdm->re_acq_main_rssi_sum), + accum_ofdm->re_acq_main_rssi_sum, + delta_ofdm->re_acq_main_rssi_sum, + max_ofdm->re_acq_main_rssi_sum); + + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header, + "Statistics_Rx - CCK:"); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "ina_cnt:", + le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt, + delta_cck->ina_cnt, max_cck->ina_cnt); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "fina_cnt:", + le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt, + delta_cck->fina_cnt, max_cck->fina_cnt); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "plcp_err:", + le32_to_cpu(cck->plcp_err), accum_cck->plcp_err, + delta_cck->plcp_err, max_cck->plcp_err); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "crc32_err:", + le32_to_cpu(cck->crc32_err), accum_cck->crc32_err, + delta_cck->crc32_err, max_cck->crc32_err); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "overrun_err:", + le32_to_cpu(cck->overrun_err), + accum_cck->overrun_err, + delta_cck->overrun_err, max_cck->overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "early_overrun_err:", + le32_to_cpu(cck->early_overrun_err), + accum_cck->early_overrun_err, + delta_cck->early_overrun_err, + max_cck->early_overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "crc32_good:", + le32_to_cpu(cck->crc32_good), accum_cck->crc32_good, + delta_cck->crc32_good, + max_cck->crc32_good); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "false_alarm_cnt:", + le32_to_cpu(cck->false_alarm_cnt), + accum_cck->false_alarm_cnt, + delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "fina_sync_err_cnt:", + le32_to_cpu(cck->fina_sync_err_cnt), + accum_cck->fina_sync_err_cnt, + delta_cck->fina_sync_err_cnt, + max_cck->fina_sync_err_cnt); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "sfd_timeout:", + le32_to_cpu(cck->sfd_timeout), + accum_cck->sfd_timeout, + delta_cck->sfd_timeout, max_cck->sfd_timeout); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "fina_timeout:", + le32_to_cpu(cck->fina_timeout), + accum_cck->fina_timeout, + delta_cck->fina_timeout, max_cck->fina_timeout); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "unresponded_rts:", + le32_to_cpu(cck->unresponded_rts), + accum_cck->unresponded_rts, + delta_cck->unresponded_rts, + max_cck->unresponded_rts); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "rxe_frame_lmt_ovrun:", + le32_to_cpu(cck->rxe_frame_limit_overrun), + accum_cck->rxe_frame_limit_overrun, + delta_cck->rxe_frame_limit_overrun, + max_cck->rxe_frame_limit_overrun); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "sent_ack_cnt:", + le32_to_cpu(cck->sent_ack_cnt), + accum_cck->sent_ack_cnt, + delta_cck->sent_ack_cnt, + max_cck->sent_ack_cnt); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "sent_cts_cnt:", + le32_to_cpu(cck->sent_cts_cnt), + accum_cck->sent_cts_cnt, + delta_cck->sent_cts_cnt, + max_cck->sent_cts_cnt); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "sent_ba_rsp_cnt:", + le32_to_cpu(cck->sent_ba_rsp_cnt), + accum_cck->sent_ba_rsp_cnt, + delta_cck->sent_ba_rsp_cnt, + max_cck->sent_ba_rsp_cnt); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "dsp_self_kill:", + le32_to_cpu(cck->dsp_self_kill), + accum_cck->dsp_self_kill, + delta_cck->dsp_self_kill, + max_cck->dsp_self_kill); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "mh_format_err:", + le32_to_cpu(cck->mh_format_err), + accum_cck->mh_format_err, + delta_cck->mh_format_err, max_cck->mh_format_err); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "re_acq_main_rssi_sum:", + le32_to_cpu(cck->re_acq_main_rssi_sum), + accum_cck->re_acq_main_rssi_sum, + delta_cck->re_acq_main_rssi_sum, + max_cck->re_acq_main_rssi_sum); + + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header, + "Statistics_Rx - GENERAL:"); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "bogus_cts:", + le32_to_cpu(general->bogus_cts), + accum_general->bogus_cts, + delta_general->bogus_cts, max_general->bogus_cts); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "bogus_ack:", + le32_to_cpu(general->bogus_ack), + accum_general->bogus_ack, + delta_general->bogus_ack, max_general->bogus_ack); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "non_bssid_frames:", + le32_to_cpu(general->non_bssid_frames), + accum_general->non_bssid_frames, + delta_general->non_bssid_frames, + max_general->non_bssid_frames); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "filtered_frames:", + le32_to_cpu(general->filtered_frames), + accum_general->filtered_frames, + delta_general->filtered_frames, + max_general->filtered_frames); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "non_channel_beacons:", + le32_to_cpu(general->non_channel_beacons), + accum_general->non_channel_beacons, + delta_general->non_channel_beacons, + max_general->non_channel_beacons); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "channel_beacons:", + le32_to_cpu(general->channel_beacons), + accum_general->channel_beacons, + delta_general->channel_beacons, + max_general->channel_beacons); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "num_missed_bcon:", + le32_to_cpu(general->num_missed_bcon), + accum_general->num_missed_bcon, + delta_general->num_missed_bcon, + max_general->num_missed_bcon); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "adc_rx_saturation_time:", + le32_to_cpu(general->adc_rx_saturation_time), + accum_general->adc_rx_saturation_time, + delta_general->adc_rx_saturation_time, + max_general->adc_rx_saturation_time); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "ina_detect_search_tm:", + le32_to_cpu(general->ina_detection_search_time), + accum_general->ina_detection_search_time, + delta_general->ina_detection_search_time, + max_general->ina_detection_search_time); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "beacon_silence_rssi_a:", + le32_to_cpu(general->beacon_silence_rssi_a), + accum_general->beacon_silence_rssi_a, + delta_general->beacon_silence_rssi_a, + max_general->beacon_silence_rssi_a); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "beacon_silence_rssi_b:", + le32_to_cpu(general->beacon_silence_rssi_b), + accum_general->beacon_silence_rssi_b, + delta_general->beacon_silence_rssi_b, + max_general->beacon_silence_rssi_b); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "beacon_silence_rssi_c:", + le32_to_cpu(general->beacon_silence_rssi_c), + accum_general->beacon_silence_rssi_c, + delta_general->beacon_silence_rssi_c, + max_general->beacon_silence_rssi_c); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "interference_data_flag:", + le32_to_cpu(general->interference_data_flag), + accum_general->interference_data_flag, + delta_general->interference_data_flag, + max_general->interference_data_flag); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "channel_load:", + le32_to_cpu(general->channel_load), + accum_general->channel_load, + delta_general->channel_load, + max_general->channel_load); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "dsp_false_alarms:", + le32_to_cpu(general->dsp_false_alarms), + accum_general->dsp_false_alarms, + delta_general->dsp_false_alarms, + max_general->dsp_false_alarms); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "beacon_rssi_a:", + le32_to_cpu(general->beacon_rssi_a), + accum_general->beacon_rssi_a, + delta_general->beacon_rssi_a, + max_general->beacon_rssi_a); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "beacon_rssi_b:", + le32_to_cpu(general->beacon_rssi_b), + accum_general->beacon_rssi_b, + delta_general->beacon_rssi_b, + max_general->beacon_rssi_b); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "beacon_rssi_c:", + le32_to_cpu(general->beacon_rssi_c), + accum_general->beacon_rssi_c, + delta_general->beacon_rssi_c, + max_general->beacon_rssi_c); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "beacon_energy_a:", + le32_to_cpu(general->beacon_energy_a), + accum_general->beacon_energy_a, + delta_general->beacon_energy_a, + max_general->beacon_energy_a); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "beacon_energy_b:", + le32_to_cpu(general->beacon_energy_b), + accum_general->beacon_energy_b, + delta_general->beacon_energy_b, + max_general->beacon_energy_b); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "beacon_energy_c:", + le32_to_cpu(general->beacon_energy_c), + accum_general->beacon_energy_c, + delta_general->beacon_energy_c, + max_general->beacon_energy_c); + + pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM_HT:\n"); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header, + "Statistics_Rx - OFDM_HT:"); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "plcp_err:", + le32_to_cpu(ht->plcp_err), accum_ht->plcp_err, + delta_ht->plcp_err, max_ht->plcp_err); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "overrun_err:", + le32_to_cpu(ht->overrun_err), accum_ht->overrun_err, + delta_ht->overrun_err, max_ht->overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "early_overrun_err:", + le32_to_cpu(ht->early_overrun_err), + accum_ht->early_overrun_err, + delta_ht->early_overrun_err, + max_ht->early_overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "crc32_good:", + le32_to_cpu(ht->crc32_good), accum_ht->crc32_good, + delta_ht->crc32_good, max_ht->crc32_good); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "crc32_err:", + le32_to_cpu(ht->crc32_err), accum_ht->crc32_err, + delta_ht->crc32_err, max_ht->crc32_err); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "mh_format_err:", + le32_to_cpu(ht->mh_format_err), + accum_ht->mh_format_err, + delta_ht->mh_format_err, max_ht->mh_format_err); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "agg_crc32_good:", + le32_to_cpu(ht->agg_crc32_good), + accum_ht->agg_crc32_good, + delta_ht->agg_crc32_good, max_ht->agg_crc32_good); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "agg_mpdu_cnt:", + le32_to_cpu(ht->agg_mpdu_cnt), + accum_ht->agg_mpdu_cnt, + delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "agg_cnt:", + le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt, + delta_ht->agg_cnt, max_ht->agg_cnt); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "unsupport_mcs:", + le32_to_cpu(ht->unsupport_mcs), + accum_ht->unsupport_mcs, + delta_ht->unsupport_mcs, max_ht->unsupport_mcs); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + int pos = 0; + char *buf; + int bufsz = (sizeof(struct statistics_tx) * 48) + 250; + ssize_t ret; + struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx; + + if (!iwl_is_alive(priv)) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + + /* the statistic information display here is based on + * the last statistics notification from uCode + * might not reflect the current uCode activity + */ + tx = &priv->statistics.tx; + accum_tx = &priv->accum_statistics.tx; + delta_tx = &priv->delta_statistics.tx; + max_tx = &priv->max_delta.tx; + pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header, + "Statistics_Tx:"); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "preamble:", + le32_to_cpu(tx->preamble_cnt), + accum_tx->preamble_cnt, + delta_tx->preamble_cnt, max_tx->preamble_cnt); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "rx_detected_cnt:", + le32_to_cpu(tx->rx_detected_cnt), + accum_tx->rx_detected_cnt, + delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "bt_prio_defer_cnt:", + le32_to_cpu(tx->bt_prio_defer_cnt), + accum_tx->bt_prio_defer_cnt, + delta_tx->bt_prio_defer_cnt, + max_tx->bt_prio_defer_cnt); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "bt_prio_kill_cnt:", + le32_to_cpu(tx->bt_prio_kill_cnt), + accum_tx->bt_prio_kill_cnt, + delta_tx->bt_prio_kill_cnt, + max_tx->bt_prio_kill_cnt); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "few_bytes_cnt:", + le32_to_cpu(tx->few_bytes_cnt), + accum_tx->few_bytes_cnt, + delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "cts_timeout:", + le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout, + delta_tx->cts_timeout, max_tx->cts_timeout); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "ack_timeout:", + le32_to_cpu(tx->ack_timeout), + accum_tx->ack_timeout, + delta_tx->ack_timeout, max_tx->ack_timeout); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "expected_ack_cnt:", + le32_to_cpu(tx->expected_ack_cnt), + accum_tx->expected_ack_cnt, + delta_tx->expected_ack_cnt, + max_tx->expected_ack_cnt); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "actual_ack_cnt:", + le32_to_cpu(tx->actual_ack_cnt), + accum_tx->actual_ack_cnt, + delta_tx->actual_ack_cnt, + max_tx->actual_ack_cnt); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "dump_msdu_cnt:", + le32_to_cpu(tx->dump_msdu_cnt), + accum_tx->dump_msdu_cnt, + delta_tx->dump_msdu_cnt, + max_tx->dump_msdu_cnt); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "abort_nxt_frame_mismatch:", + le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt), + accum_tx->burst_abort_next_frame_mismatch_cnt, + delta_tx->burst_abort_next_frame_mismatch_cnt, + max_tx->burst_abort_next_frame_mismatch_cnt); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "abort_missing_nxt_frame:", + le32_to_cpu(tx->burst_abort_missing_next_frame_cnt), + accum_tx->burst_abort_missing_next_frame_cnt, + delta_tx->burst_abort_missing_next_frame_cnt, + max_tx->burst_abort_missing_next_frame_cnt); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "cts_timeout_collision:", + le32_to_cpu(tx->cts_timeout_collision), + accum_tx->cts_timeout_collision, + delta_tx->cts_timeout_collision, + max_tx->cts_timeout_collision); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "ack_ba_timeout_collision:", + le32_to_cpu(tx->ack_or_ba_timeout_collision), + accum_tx->ack_or_ba_timeout_collision, + delta_tx->ack_or_ba_timeout_collision, + max_tx->ack_or_ba_timeout_collision); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "agg ba_timeout:", + le32_to_cpu(tx->agg.ba_timeout), + accum_tx->agg.ba_timeout, + delta_tx->agg.ba_timeout, + max_tx->agg.ba_timeout); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "agg ba_resched_frames:", + le32_to_cpu(tx->agg.ba_reschedule_frames), + accum_tx->agg.ba_reschedule_frames, + delta_tx->agg.ba_reschedule_frames, + max_tx->agg.ba_reschedule_frames); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "agg scd_query_agg_frame:", + le32_to_cpu(tx->agg.scd_query_agg_frame_cnt), + accum_tx->agg.scd_query_agg_frame_cnt, + delta_tx->agg.scd_query_agg_frame_cnt, + max_tx->agg.scd_query_agg_frame_cnt); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "agg scd_query_no_agg:", + le32_to_cpu(tx->agg.scd_query_no_agg), + accum_tx->agg.scd_query_no_agg, + delta_tx->agg.scd_query_no_agg, + max_tx->agg.scd_query_no_agg); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "agg scd_query_agg:", + le32_to_cpu(tx->agg.scd_query_agg), + accum_tx->agg.scd_query_agg, + delta_tx->agg.scd_query_agg, + max_tx->agg.scd_query_agg); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "agg scd_query_mismatch:", + le32_to_cpu(tx->agg.scd_query_mismatch), + accum_tx->agg.scd_query_mismatch, + delta_tx->agg.scd_query_mismatch, + max_tx->agg.scd_query_mismatch); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "agg frame_not_ready:", + le32_to_cpu(tx->agg.frame_not_ready), + accum_tx->agg.frame_not_ready, + delta_tx->agg.frame_not_ready, + max_tx->agg.frame_not_ready); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "agg underrun:", + le32_to_cpu(tx->agg.underrun), + accum_tx->agg.underrun, + delta_tx->agg.underrun, max_tx->agg.underrun); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "agg bt_prio_kill:", + le32_to_cpu(tx->agg.bt_prio_kill), + accum_tx->agg.bt_prio_kill, + delta_tx->agg.bt_prio_kill, + max_tx->agg.bt_prio_kill); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "agg rx_ba_rsp_cnt:", + le32_to_cpu(tx->agg.rx_ba_rsp_cnt), + accum_tx->agg.rx_ba_rsp_cnt, + delta_tx->agg.rx_ba_rsp_cnt, + max_tx->agg.rx_ba_rsp_cnt); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + int pos = 0; + char *buf; + int bufsz = sizeof(struct statistics_general) * 10 + 300; + ssize_t ret; + struct statistics_general *general, *accum_general; + struct statistics_general *delta_general, *max_general; + struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg; + struct statistics_div *div, *accum_div, *delta_div, *max_div; + + if (!iwl_is_alive(priv)) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + + /* the statistic information display here is based on + * the last statistics notification from uCode + * might not reflect the current uCode activity + */ + general = &priv->statistics.general; + dbg = &priv->statistics.general.dbg; + div = &priv->statistics.general.div; + accum_general = &priv->accum_statistics.general; + delta_general = &priv->delta_statistics.general; + max_general = &priv->max_delta.general; + accum_dbg = &priv->accum_statistics.general.dbg; + delta_dbg = &priv->delta_statistics.general.dbg; + max_dbg = &priv->max_delta.general.dbg; + accum_div = &priv->accum_statistics.general.div; + delta_div = &priv->delta_statistics.general.div; + max_div = &priv->max_delta.general.div; + pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header, + "Statistics_General:"); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_short_format, + "temperature:", + le32_to_cpu(general->temperature)); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_short_format, + "temperature_m:", + le32_to_cpu(general->temperature_m)); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "burst_check:", + le32_to_cpu(dbg->burst_check), + accum_dbg->burst_check, + delta_dbg->burst_check, max_dbg->burst_check); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "burst_count:", + le32_to_cpu(dbg->burst_count), + accum_dbg->burst_count, + delta_dbg->burst_count, max_dbg->burst_count); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "sleep_time:", + le32_to_cpu(general->sleep_time), + accum_general->sleep_time, + delta_general->sleep_time, max_general->sleep_time); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "slots_out:", + le32_to_cpu(general->slots_out), + accum_general->slots_out, + delta_general->slots_out, max_general->slots_out); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "slots_idle:", + le32_to_cpu(general->slots_idle), + accum_general->slots_idle, + delta_general->slots_idle, max_general->slots_idle); + pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n", + le32_to_cpu(general->ttl_timestamp)); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "tx_on_a:", + le32_to_cpu(div->tx_on_a), accum_div->tx_on_a, + delta_div->tx_on_a, max_div->tx_on_a); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "tx_on_b:", + le32_to_cpu(div->tx_on_b), accum_div->tx_on_b, + delta_div->tx_on_b, max_div->tx_on_b); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "exec_time:", + le32_to_cpu(div->exec_time), accum_div->exec_time, + delta_div->exec_time, max_div->exec_time); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "probe_time:", + le32_to_cpu(div->probe_time), accum_div->probe_time, + delta_div->probe_time, max_div->probe_time); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "rx_enable_counter:", + le32_to_cpu(general->rx_enable_counter), + accum_general->rx_enable_counter, + delta_general->rx_enable_counter, + max_general->rx_enable_counter); + pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format, + "num_of_sos_states:", + le32_to_cpu(general->num_of_sos_states), + accum_general->num_of_sos_states, + delta_general->num_of_sos_states, + max_general->num_of_sos_states); + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_sensitivity_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int pos = 0; + int cnt = 0; + char *buf; + int bufsz = sizeof(struct iwl_sensitivity_data) * 4 + 100; + ssize_t ret; + struct iwl_sensitivity_data *data; + + data = &priv->sensitivity_data; + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + + pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n", + data->auto_corr_ofdm); + pos += scnprintf(buf + pos, bufsz - pos, + "auto_corr_ofdm_mrc:\t\t %u\n", + data->auto_corr_ofdm_mrc); + pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n", + data->auto_corr_ofdm_x1); + pos += scnprintf(buf + pos, bufsz - pos, + "auto_corr_ofdm_mrc_x1:\t\t %u\n", + data->auto_corr_ofdm_mrc_x1); + pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n", + data->auto_corr_cck); + pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n", + data->auto_corr_cck_mrc); + pos += scnprintf(buf + pos, bufsz - pos, + "last_bad_plcp_cnt_ofdm:\t\t %u\n", + data->last_bad_plcp_cnt_ofdm); + pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n", + data->last_fa_cnt_ofdm); + pos += scnprintf(buf + pos, bufsz - pos, + "last_bad_plcp_cnt_cck:\t\t %u\n", + data->last_bad_plcp_cnt_cck); + pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n", + data->last_fa_cnt_cck); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n", + data->nrg_curr_state); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n", + data->nrg_prev_state); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t"); + for (cnt = 0; cnt < 10; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, " %u", + data->nrg_value[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t"); + for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, " %u", + data->nrg_silence_rssi[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n", + data->nrg_silence_ref); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n", + data->nrg_energy_idx); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n", + data->nrg_silence_idx); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n", + data->nrg_th_cck); + pos += scnprintf(buf + pos, bufsz - pos, + "nrg_auto_corr_silence_diff:\t %u\n", + data->nrg_auto_corr_silence_diff); + pos += scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n", + data->num_in_cck_no_fa); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n", + data->nrg_th_ofdm); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + + +static ssize_t iwl_dbgfs_chain_noise_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int pos = 0; + int cnt = 0; + char *buf; + int bufsz = sizeof(struct iwl_chain_noise_data) * 4 + 100; + ssize_t ret; + struct iwl_chain_noise_data *data; + + data = &priv->chain_noise_data; + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + + pos += scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n", + data->active_chains); + pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n", + data->chain_noise_a); + pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n", + data->chain_noise_b); + pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n", + data->chain_noise_c); + pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n", + data->chain_signal_a); + pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n", + data->chain_signal_b); + pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n", + data->chain_signal_c); + pos += scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n", + data->beacon_count); + + pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t"); + for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, " %u", + data->disconn_array[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t"); + for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, " %u", + data->delta_gain_code[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + pos += scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n", + data->radio_write); + pos += scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n", + data->state); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_tx_power_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + char buf[128]; + int pos = 0; + const size_t bufsz = sizeof(buf); + struct statistics_tx *tx; + + if (!iwl_is_alive(priv)) + pos += scnprintf(buf + pos, bufsz - pos, "N/A\n"); + else { + tx = &priv->statistics.tx; + if (tx->tx_power.ant_a || + tx->tx_power.ant_b || + tx->tx_power.ant_c) { + pos += scnprintf(buf + pos, bufsz - pos, + "tx power: (1/2 dB step)\n"); + if ((priv->cfg->valid_tx_ant & ANT_A) && + tx->tx_power.ant_a) + pos += scnprintf(buf + pos, bufsz - pos, + "\tantenna A: 0x%X\n", + tx->tx_power.ant_a); + if ((priv->cfg->valid_tx_ant & ANT_B) && + tx->tx_power.ant_b) + pos += scnprintf(buf + pos, bufsz - pos, + "\tantenna B: 0x%X\n", + tx->tx_power.ant_b); + if ((priv->cfg->valid_tx_ant & ANT_C) && + tx->tx_power.ant_c) + pos += scnprintf(buf + pos, bufsz - pos, + "\tantenna C: 0x%X\n", + tx->tx_power.ant_c); + } else + pos += scnprintf(buf + pos, bufsz - pos, "N/A\n"); + } + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_power_save_status_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[60]; + int pos = 0; + const size_t bufsz = sizeof(buf); + u32 pwrsave_status; + + pwrsave_status = iwl_read32(priv, CSR_GP_CNTRL) & + CSR_GP_REG_POWER_SAVE_STATUS_MSK; + + pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: "); + pos += scnprintf(buf + pos, bufsz - pos, "%s\n", + (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" : + (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" : + (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" : + "error"); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int clear; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &clear) != 1) + return -EFAULT; + + /* make request to uCode to retrieve statistics information */ + mutex_lock(&priv->mutex); + iwl_send_statistics_request(priv, CMD_SYNC, true); + mutex_unlock(&priv->mutex); + + return count; +} + +static ssize_t iwl_dbgfs_csr_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int csr; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &csr) != 1) + return -EFAULT; + + if (priv->cfg->ops->lib->dump_csr) + priv->cfg->ops->lib->dump_csr(priv); + + return count; +} + +static ssize_t iwl_dbgfs_ucode_tracing_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = (struct iwl_priv *)file->private_data; + int pos = 0; + char buf[128]; + const size_t bufsz = sizeof(buf); + ssize_t ret; + + pos += scnprintf(buf + pos, bufsz - pos, "ucode trace timer is %s\n", + priv->event_log.ucode_trace ? "On" : "Off"); + pos += scnprintf(buf + pos, bufsz - pos, "non_wraps_count:\t\t %u\n", + priv->event_log.non_wraps_count); + pos += scnprintf(buf + pos, bufsz - pos, "wraps_once_count:\t\t %u\n", + priv->event_log.wraps_once_count); + pos += scnprintf(buf + pos, bufsz - pos, "wraps_more_count:\t\t %u\n", + priv->event_log.wraps_more_count); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + return ret; +} + +static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int trace; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &trace) != 1) + return -EFAULT; + + if (trace) { + priv->event_log.ucode_trace = true; + /* schedule the ucode timer to occur in UCODE_TRACE_PERIOD */ + mod_timer(&priv->ucode_trace, + jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD)); + } else { + priv->event_log.ucode_trace = false; + del_timer_sync(&priv->ucode_trace); + } + + return count; +} + +static ssize_t iwl_dbgfs_fh_reg_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = (struct iwl_priv *)file->private_data; + char *buf; + int pos = 0; + ssize_t ret = -EFAULT; + + if (priv->cfg->ops->lib->dump_fh) { + ret = pos = priv->cfg->ops->lib->dump_fh(priv, &buf, true); + if (buf) { + ret = simple_read_from_buffer(user_buf, + count, ppos, buf, pos); + kfree(buf); + } + } + + return ret; +} + +static ssize_t iwl_dbgfs_missed_beacon_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int pos = 0; + char buf[12]; + const size_t bufsz = sizeof(buf); + ssize_t ret; + + pos += scnprintf(buf + pos, bufsz - pos, "%d\n", + priv->missed_beacon_threshold); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + return ret; +} + +static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int missed; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &missed) != 1) + return -EINVAL; + + if (missed < IWL_MISSED_BEACON_THRESHOLD_MIN || + missed > IWL_MISSED_BEACON_THRESHOLD_MAX) + priv->missed_beacon_threshold = + IWL_MISSED_BEACON_THRESHOLD_DEF; + else + priv->missed_beacon_threshold = missed; + + return count; +} + +static ssize_t iwl_dbgfs_internal_scan_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int scan; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &scan) != 1) + return -EINVAL; + + iwl_internal_short_hw_scan(priv); + + return count; +} + +static ssize_t iwl_dbgfs_plcp_delta_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = (struct iwl_priv *)file->private_data; + int pos = 0; + char buf[12]; + const size_t bufsz = sizeof(buf); + ssize_t ret; + + pos += scnprintf(buf + pos, bufsz - pos, "%u\n", + priv->cfg->plcp_delta_threshold); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + return ret; +} + +static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int plcp; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &plcp) != 1) + return -EINVAL; + if ((plcp <= IWL_MAX_PLCP_ERR_THRESHOLD_MIN) || + (plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX)) + priv->cfg->plcp_delta_threshold = + IWL_MAX_PLCP_ERR_THRESHOLD_DEF; + else + priv->cfg->plcp_delta_threshold = plcp; + return count; +} + +static ssize_t iwl_dbgfs_force_reset_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int i, pos = 0; + char buf[300]; + const size_t bufsz = sizeof(buf); + struct iwl_force_reset *force_reset; + + for (i = 0; i < IWL_MAX_FORCE_RESET; i++) { + force_reset = &priv->force_reset[i]; + pos += scnprintf(buf + pos, bufsz - pos, + "Force reset method %d\n", i); + pos += scnprintf(buf + pos, bufsz - pos, + "\tnumber of reset request: %d\n", + force_reset->reset_request_count); + pos += scnprintf(buf + pos, bufsz - pos, + "\tnumber of reset request success: %d\n", + force_reset->reset_success_count); + pos += scnprintf(buf + pos, bufsz - pos, + "\tnumber of reset request reject: %d\n", + force_reset->reset_reject_count); + pos += scnprintf(buf + pos, bufsz - pos, + "\treset duration: %lu\n", + force_reset->reset_duration); + } + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_force_reset_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int reset, ret; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &reset) != 1) + return -EINVAL; + switch (reset) { + case IWL_RF_RESET: + case IWL_FW_RESET: + ret = iwl_force_reset(priv, reset); + break; + default: + return -EINVAL; + } + return ret ? ret : count; +} + +DEBUGFS_READ_FILE_OPS(rx_statistics); +DEBUGFS_READ_FILE_OPS(tx_statistics); +DEBUGFS_READ_WRITE_FILE_OPS(traffic_log); +DEBUGFS_READ_FILE_OPS(rx_queue); +DEBUGFS_READ_FILE_OPS(tx_queue); +DEBUGFS_READ_FILE_OPS(ucode_rx_stats); +DEBUGFS_READ_FILE_OPS(ucode_tx_stats); +DEBUGFS_READ_FILE_OPS(ucode_general_stats); +DEBUGFS_READ_FILE_OPS(sensitivity); +DEBUGFS_READ_FILE_OPS(chain_noise); +DEBUGFS_READ_FILE_OPS(tx_power); +DEBUGFS_READ_FILE_OPS(power_save_status); +DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics); +DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics); +DEBUGFS_WRITE_FILE_OPS(csr); +DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing); +DEBUGFS_READ_FILE_OPS(fh_reg); +DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon); +DEBUGFS_WRITE_FILE_OPS(internal_scan); +DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta); +DEBUGFS_READ_WRITE_FILE_OPS(force_reset); + +/* + * Create the debugfs files and directories + * + */ +int iwl_dbgfs_register(struct iwl_priv *priv, const char *name) +{ + struct dentry *phyd = priv->hw->wiphy->debugfsdir; + struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug; + + dir_drv = debugfs_create_dir(name, phyd); + if (!dir_drv) + return -ENOMEM; + + priv->debugfs_dir = dir_drv; + + dir_data = debugfs_create_dir("data", dir_drv); + if (!dir_data) + goto err; + dir_rf = debugfs_create_dir("rf", dir_drv); + if (!dir_rf) + goto err; + dir_debug = debugfs_create_dir("debug", dir_drv); + if (!dir_debug) + goto err; + + DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(log_event, dir_data, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(led, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(sleep_level_override, dir_data, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(current_sleep_command, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(thermal_throttling, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(tx_power, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(csr, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(internal_scan, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR); + if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) { + DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR); + } + DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf, &priv->disable_sens_cal); + DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf, + &priv->disable_chain_noise_cal); + if (((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965) || + ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_3945)) + DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf, + &priv->disable_tx_power_cal); + return 0; + +err: + IWL_ERR(priv, "Can't create the debugfs directory\n"); + iwl_dbgfs_unregister(priv); + return -ENOMEM; +} +EXPORT_SYMBOL(iwl_dbgfs_register); + +/** + * Remove the debugfs files and directories + * + */ +void iwl_dbgfs_unregister(struct iwl_priv *priv) +{ + if (!priv->debugfs_dir) + return; + + debugfs_remove_recursive(priv->debugfs_dir); + priv->debugfs_dir = NULL; +} +EXPORT_SYMBOL(iwl_dbgfs_unregister); + + + diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h new file mode 100644 index 0000000..6054c5f --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h @@ -0,0 +1,1446 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +/* + * Please use this file (iwl-dev.h) for driver implementation definitions. + * Please use iwl-commands.h for uCode API definitions. + * Please use iwl-4965-hw.h for hardware-related definitions. + */ + +#ifndef __iwl_dev_h__ +#define __iwl_dev_h__ + +#include /* for struct pci_device_id */ +#include +#include + +#include "iwl-eeprom.h" +#include "iwl-csr.h" +#include "iwl-prph.h" +#include "iwl-fh.h" +#include "iwl-debug.h" +#include "iwl-4965-hw.h" +#include "iwl-3945-hw.h" +#include "iwl-led.h" +#include "iwl-power.h" +#include "iwl-agn-rs.h" + +/* configuration for the iwl4965 */ +extern struct iwl_cfg iwl4965_agn_cfg; +extern struct iwl_cfg iwl5300_agn_cfg; +extern struct iwl_cfg iwl5100_agn_cfg; +extern struct iwl_cfg iwl5350_agn_cfg; +extern struct iwl_cfg iwl5100_bgn_cfg; +extern struct iwl_cfg iwl5100_abg_cfg; +extern struct iwl_cfg iwl5150_agn_cfg; +extern struct iwl_cfg iwl5150_abg_cfg; +extern struct iwl_cfg iwl6000i_2agn_cfg; +extern struct iwl_cfg iwl6000i_2abg_cfg; +extern struct iwl_cfg iwl6000i_2bg_cfg; +extern struct iwl_cfg iwl6000_3agn_cfg; +extern struct iwl_cfg iwl6050_2agn_cfg; +extern struct iwl_cfg iwl6050_2abg_cfg; +extern struct iwl_cfg iwl1000_bgn_cfg; +extern struct iwl_cfg iwl1000_bg_cfg; + +struct iwl_tx_queue; + +/* shared structures from iwl-5000.c */ +extern struct iwl_mod_params iwl50_mod_params; +extern struct iwl_ucode_ops iwl5000_ucode; +extern struct iwl_lib_ops iwl5000_lib; +extern struct iwl_hcmd_ops iwl5000_hcmd; +extern struct iwl_hcmd_utils_ops iwl5000_hcmd_utils; + +/* shared functions from iwl-5000.c */ +extern u16 iwl5000_get_hcmd_size(u8 cmd_id, u16 len); +extern u16 iwl5000_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, + u8 *data); +extern void iwl5000_rts_tx_cmd_flag(struct ieee80211_tx_info *info, + __le32 *tx_flags); +extern int iwl5000_calc_rssi(struct iwl_priv *priv, + struct iwl_rx_phy_res *rx_resp); +extern void iwl5000_nic_config(struct iwl_priv *priv); +extern u16 iwl5000_eeprom_calib_version(struct iwl_priv *priv); +extern const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv, + size_t offset); +extern void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + u16 byte_cnt); +extern void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv, + struct iwl_tx_queue *txq); +extern int iwl5000_load_ucode(struct iwl_priv *priv); +extern void iwl5000_init_alive_start(struct iwl_priv *priv); +extern int iwl5000_alive_notify(struct iwl_priv *priv); +extern int iwl5000_hw_set_hw_params(struct iwl_priv *priv); +extern int iwl5000_txq_agg_enable(struct iwl_priv *priv, int txq_id, + int tx_fifo, int sta_id, int tid, u16 ssn_idx); +extern int iwl5000_txq_agg_disable(struct iwl_priv *priv, u16 txq_id, + u16 ssn_idx, u8 tx_fifo); +extern void iwl5000_txq_set_sched(struct iwl_priv *priv, u32 mask); +extern void iwl5000_setup_deferred_work(struct iwl_priv *priv); +extern void iwl5000_rx_handler_setup(struct iwl_priv *priv); +extern int iwl5000_hw_valid_rtc_data_addr(u32 addr); +extern int iwl5000_send_tx_power(struct iwl_priv *priv); +extern void iwl5000_temperature(struct iwl_priv *priv); + +/* CT-KILL constants */ +#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */ +#define CT_KILL_THRESHOLD 114 /* in Celsius */ +#define CT_KILL_EXIT_THRESHOLD 95 /* in Celsius */ + +/* Default noise level to report when noise measurement is not available. + * This may be because we're: + * 1) Not associated (4965, no beacon statistics being sent to driver) + * 2) Scanning (noise measurement does not apply to associated channel) + * 3) Receiving CCK (3945 delivers noise info only for OFDM frames) + * Use default noise value of -127 ... this is below the range of measurable + * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user. + * Also, -127 works better than 0 when averaging frames with/without + * noise info (e.g. averaging might be done in app); measured dBm values are + * always negative ... using a negative value as the default keeps all + * averages within an s8's (used in some apps) range of negative values. */ +#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127) + +/* + * RTS threshold here is total size [2347] minus 4 FCS bytes + * Per spec: + * a value of 0 means RTS on all data/management packets + * a value > max MSDU size means no RTS + * else RTS for data/management frames where MPDU is larger + * than RTS value. + */ +#define DEFAULT_RTS_THRESHOLD 2347U +#define MIN_RTS_THRESHOLD 0U +#define MAX_RTS_THRESHOLD 2347U +#define MAX_MSDU_SIZE 2304U +#define MAX_MPDU_SIZE 2346U +#define DEFAULT_BEACON_INTERVAL 100U +#define DEFAULT_SHORT_RETRY_LIMIT 7U +#define DEFAULT_LONG_RETRY_LIMIT 4U + +struct iwl_rx_mem_buffer { + dma_addr_t page_dma; + struct page *page; + struct list_head list; +}; + +#define rxb_addr(r) page_address(r->page) + +/* defined below */ +struct iwl_device_cmd; + +struct iwl_cmd_meta { + /* only for SYNC commands, iff the reply skb is wanted */ + struct iwl_host_cmd *source; + /* + * only for ASYNC commands + * (which is somewhat stupid -- look at iwl-sta.c for instance + * which duplicates a bunch of code because the callback isn't + * invoked for SYNC commands, if it were and its result passed + * through it would be simpler...) + */ + void (*callback)(struct iwl_priv *priv, + struct iwl_device_cmd *cmd, + struct iwl_rx_packet *pkt); + + /* The CMD_SIZE_HUGE flag bit indicates that the command + * structure is stored at the end of the shared queue memory. */ + u32 flags; + + DECLARE_PCI_UNMAP_ADDR(mapping) + DECLARE_PCI_UNMAP_LEN(len) +}; + +/* + * Generic queue structure + * + * Contains common data for Rx and Tx queues + */ +struct iwl_queue { + int n_bd; /* number of BDs in this queue */ + int write_ptr; /* 1-st empty entry (index) host_w*/ + int read_ptr; /* last used entry (index) host_r*/ + dma_addr_t dma_addr; /* physical addr for BD's */ + int n_window; /* safe queue window */ + u32 id; + int low_mark; /* low watermark, resume queue if free + * space more than this */ + int high_mark; /* high watermark, stop queue if free + * space less than this */ +} __attribute__ ((packed)); + +/* One for each TFD */ +struct iwl_tx_info { + struct sk_buff *skb[IWL_NUM_OF_TBS - 1]; +}; + +/** + * struct iwl_tx_queue - Tx Queue for DMA + * @q: generic Rx/Tx queue descriptor + * @bd: base of circular buffer of TFDs + * @cmd: array of command/TX buffer pointers + * @meta: array of meta data for each command/tx buffer + * @dma_addr_cmd: physical address of cmd/tx buffer array + * @txb: array of per-TFD driver data + * @need_update: indicates need to update read/write index + * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled + * + * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame + * descriptors) and required locking structures. + */ +#define TFD_TX_CMD_SLOTS 256 +#define TFD_CMD_SLOTS 32 + +struct iwl_tx_queue { + struct iwl_queue q; + void *tfds; + struct iwl_device_cmd **cmd; + struct iwl_cmd_meta *meta; + struct iwl_tx_info *txb; + u8 need_update; + u8 sched_retry; + u8 active; + u8 swq_id; +}; + +#define IWL_NUM_SCAN_RATES (2) + +struct iwl4965_channel_tgd_info { + u8 type; + s8 max_power; +}; + +struct iwl4965_channel_tgh_info { + s64 last_radar_time; +}; + +#define IWL4965_MAX_RATE (33) + +struct iwl3945_clip_group { + /* maximum power level to prevent clipping for each rate, derived by + * us from this band's saturation power in EEPROM */ + const s8 clip_powers[IWL_MAX_RATES]; +}; + +/* current Tx power values to use, one for each rate for each channel. + * requested power is limited by: + * -- regulatory EEPROM limits for this channel + * -- hardware capabilities (clip-powers) + * -- spectrum management + * -- user preference (e.g. iwconfig) + * when requested power is set, base power index must also be set. */ +struct iwl3945_channel_power_info { + struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */ + s8 power_table_index; /* actual (compenst'd) index into gain table */ + s8 base_power_index; /* gain index for power at factory temp. */ + s8 requested_power; /* power (dBm) requested for this chnl/rate */ +}; + +/* current scan Tx power values to use, one for each scan rate for each + * channel. */ +struct iwl3945_scan_power_info { + struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */ + s8 power_table_index; /* actual (compenst'd) index into gain table */ + s8 requested_power; /* scan pwr (dBm) requested for chnl/rate */ +}; + +/* + * One for each channel, holds all channel setup data + * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant + * with one another! + */ +struct iwl_channel_info { + struct iwl4965_channel_tgd_info tgd; + struct iwl4965_channel_tgh_info tgh; + struct iwl_eeprom_channel eeprom; /* EEPROM regulatory limit */ + struct iwl_eeprom_channel ht40_eeprom; /* EEPROM regulatory limit for + * HT40 channel */ + + u8 channel; /* channel number */ + u8 flags; /* flags copied from EEPROM */ + s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */ + s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) limit */ + s8 min_power; /* always 0 */ + s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */ + + u8 group_index; /* 0-4, maps channel to group1/2/3/4/5 */ + u8 band_index; /* 0-4, maps channel to band1/2/3/4/5 */ + enum ieee80211_band band; + + /* HT40 channel info */ + s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */ + u8 ht40_flags; /* flags copied from EEPROM */ + u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */ + + /* Radio/DSP gain settings for each "normal" data Tx rate. + * These include, in addition to RF and DSP gain, a few fields for + * remembering/modifying gain settings (indexes). */ + struct iwl3945_channel_power_info power_info[IWL4965_MAX_RATE]; + + /* Radio/DSP gain settings for each scan rate, for directed scans. */ + struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES]; +}; + +#define IWL_TX_FIFO_AC0 0 +#define IWL_TX_FIFO_AC1 1 +#define IWL_TX_FIFO_AC2 2 +#define IWL_TX_FIFO_AC3 3 +#define IWL_TX_FIFO_HCCA_1 5 +#define IWL_TX_FIFO_HCCA_2 6 +#define IWL_TX_FIFO_NONE 7 + +/* Minimum number of queues. MAX_NUM is defined in hw specific files. + * Set the minimum to accommodate the 4 standard TX queues, 1 command + * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */ +#define IWL_MIN_NUM_QUEUES 10 + +/* + * Queue #4 is the command queue for 3945/4965/5x00/1000/6x00, + * the driver maps it into the appropriate device FIFO for the + * uCode. + */ +#define IWL_CMD_QUEUE_NUM 4 + +/* Power management (not Tx power) structures */ + +enum iwl_pwr_src { + IWL_PWR_SRC_VMAIN, + IWL_PWR_SRC_VAUX, +}; + +#define IEEE80211_DATA_LEN 2304 +#define IEEE80211_4ADDR_LEN 30 +#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN) +#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN) + +struct iwl_frame { + union { + struct ieee80211_hdr frame; + struct iwl_tx_beacon_cmd beacon; + u8 raw[IEEE80211_FRAME_LEN]; + u8 cmd[360]; + } u; + struct list_head list; +}; + +#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4) +#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ) +#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4) + +enum { + CMD_SYNC = 0, + CMD_SIZE_NORMAL = 0, + CMD_NO_SKB = 0, + CMD_SIZE_HUGE = (1 << 0), + CMD_ASYNC = (1 << 1), + CMD_WANT_SKB = (1 << 2), +}; + +#define DEF_CMD_PAYLOAD_SIZE 320 + +/* + * IWL_LINK_HDR_MAX should include ieee80211_hdr, radiotap header, + * SNAP header and alignment. It should also be big enough for 802.11 + * control frames. + */ +#define IWL_LINK_HDR_MAX 64 + +/** + * struct iwl_device_cmd + * + * For allocation of the command and tx queues, this establishes the overall + * size of the largest command we send to uCode, except for a scan command + * (which is relatively huge; space is allocated separately). + */ +struct iwl_device_cmd { + struct iwl_cmd_header hdr; /* uCode API */ + union { + u32 flags; + u8 val8; + u16 val16; + u32 val32; + struct iwl_tx_cmd tx; + struct iwl6000_channel_switch_cmd chswitch; + u8 payload[DEF_CMD_PAYLOAD_SIZE]; + } __attribute__ ((packed)) cmd; +} __attribute__ ((packed)); + +#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd)) + + +struct iwl_host_cmd { + const void *data; + unsigned long reply_page; + void (*callback)(struct iwl_priv *priv, + struct iwl_device_cmd *cmd, + struct iwl_rx_packet *pkt); + u32 flags; + u16 len; + u8 id; +}; + +#define SUP_RATE_11A_MAX_NUM_CHANNELS 8 +#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 +#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 + +/** + * struct iwl_rx_queue - Rx queue + * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) + * @dma_addr: bus address of buffer of receive buffer descriptors (rbd) + * @read: Shared index to newest available Rx buffer + * @write: Shared index to oldest written Rx packet + * @free_count: Number of pre-allocated buffers in rx_free + * @rx_free: list of free SKBs for use + * @rx_used: List of Rx buffers with no SKB + * @need_update: flag to indicate we need to update read/write index + * @rb_stts: driver's pointer to receive buffer status + * @rb_stts_dma: bus address of receive buffer status + * + * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers + */ +struct iwl_rx_queue { + __le32 *bd; + dma_addr_t dma_addr; + struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; + struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; + u32 read; + u32 write; + u32 free_count; + u32 write_actual; + struct list_head rx_free; + struct list_head rx_used; + int need_update; + struct iwl_rb_status *rb_stts; + dma_addr_t rb_stts_dma; + spinlock_t lock; +}; + +#define IWL_SUPPORTED_RATES_IE_LEN 8 + +#define MAX_TID_COUNT 9 + +#define IWL_INVALID_RATE 0xFF +#define IWL_INVALID_VALUE -1 + +/** + * struct iwl_ht_agg -- aggregation status while waiting for block-ack + * @txq_id: Tx queue used for Tx attempt + * @frame_count: # frames attempted by Tx command + * @wait_for_ba: Expect block-ack before next Tx reply + * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx window + * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx window + * @bitmap1: High order, one bit for each frame pending ACK in Tx window + * @rate_n_flags: Rate at which Tx was attempted + * + * If REPLY_TX indicates that aggregation was attempted, driver must wait + * for block ack (REPLY_COMPRESSED_BA). This struct stores tx reply info + * until block ack arrives. + */ +struct iwl_ht_agg { + u16 txq_id; + u16 frame_count; + u16 wait_for_ba; + u16 start_idx; + u64 bitmap; + u32 rate_n_flags; +#define IWL_AGG_OFF 0 +#define IWL_AGG_ON 1 +#define IWL_EMPTYING_HW_QUEUE_ADDBA 2 +#define IWL_EMPTYING_HW_QUEUE_DELBA 3 + u8 state; +}; + + +struct iwl_tid_data { + u16 seq_number; + u16 tfds_in_queue; + struct iwl_ht_agg agg; +}; + +struct iwl_hw_key { + enum ieee80211_key_alg alg; + int keylen; + u8 keyidx; + u8 key[32]; +}; + +union iwl_ht_rate_supp { + u16 rates; + struct { + u8 siso_rate; + u8 mimo_rate; + }; +}; + +#define CFG_HT_RX_AMPDU_FACTOR_DEF (0x3) + +/* + * Maximal MPDU density for TX aggregation + * 4 - 2us density + * 5 - 4us density + * 6 - 8us density + * 7 - 16us density + */ +#define CFG_HT_MPDU_DENSITY_4USEC (0x5) +#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC + +struct iwl_ht_config { + /* self configuration data */ + bool is_ht; + bool is_40mhz; + bool single_chain_sufficient; + enum ieee80211_smps_mode smps; /* current smps mode */ + /* BSS related data */ + u8 extension_chan_offset; + u8 ht_protection; + u8 non_GF_STA_present; +}; + +union iwl_qos_capabity { + struct { + u8 edca_count:4; /* bit 0-3 */ + u8 q_ack:1; /* bit 4 */ + u8 queue_request:1; /* bit 5 */ + u8 txop_request:1; /* bit 6 */ + u8 reserved:1; /* bit 7 */ + } q_AP; + struct { + u8 acvo_APSD:1; /* bit 0 */ + u8 acvi_APSD:1; /* bit 1 */ + u8 ac_bk_APSD:1; /* bit 2 */ + u8 ac_be_APSD:1; /* bit 3 */ + u8 q_ack:1; /* bit 4 */ + u8 max_len:2; /* bit 5-6 */ + u8 more_data_ack:1; /* bit 7 */ + } q_STA; + u8 val; +}; + +/* QoS structures */ +struct iwl_qos_info { + int qos_active; + union iwl_qos_capabity qos_cap; + struct iwl_qosparam_cmd def_qos_parm; +}; + +struct iwl_station_entry { + struct iwl_addsta_cmd sta; + struct iwl_tid_data tid[MAX_TID_COUNT]; + u8 used; + struct iwl_hw_key keyinfo; +}; + +/* + * iwl_station_priv: Driver's private station information + * + * When mac80211 creates a station it reserves some space (hw->sta_data_size) + * in the structure for use by driver. This structure is places in that + * space. + */ +struct iwl_station_priv { + struct iwl_lq_sta lq_sta; + atomic_t pending_frames; + bool client; + bool asleep; +}; + +/* one for each uCode image (inst/data, boot/init/runtime) */ +struct fw_desc { + void *v_addr; /* access by driver */ + dma_addr_t p_addr; /* access by card's busmaster DMA */ + u32 len; /* bytes */ +}; + +/* uCode file layout */ +struct iwl_ucode_header { + __le32 ver; /* major/minor/API/serial */ + union { + struct { + __le32 inst_size; /* bytes of runtime code */ + __le32 data_size; /* bytes of runtime data */ + __le32 init_size; /* bytes of init code */ + __le32 init_data_size; /* bytes of init data */ + __le32 boot_size; /* bytes of bootstrap code */ + u8 data[0]; /* in same order as sizes */ + } v1; + struct { + __le32 build; /* build number */ + __le32 inst_size; /* bytes of runtime code */ + __le32 data_size; /* bytes of runtime data */ + __le32 init_size; /* bytes of init code */ + __le32 init_data_size; /* bytes of init data */ + __le32 boot_size; /* bytes of bootstrap code */ + u8 data[0]; /* in same order as sizes */ + } v2; + } u; +}; +#define UCODE_HEADER_SIZE(ver) ((ver) == 1 ? 24 : 28) + +struct iwl4965_ibss_seq { + u8 mac[ETH_ALEN]; + u16 seq_num; + u16 frag_num; + unsigned long packet_time; + struct list_head list; +}; + +struct iwl_sensitivity_ranges { + u16 min_nrg_cck; + u16 max_nrg_cck; + + u16 nrg_th_cck; + u16 nrg_th_ofdm; + + u16 auto_corr_min_ofdm; + u16 auto_corr_min_ofdm_mrc; + u16 auto_corr_min_ofdm_x1; + u16 auto_corr_min_ofdm_mrc_x1; + + u16 auto_corr_max_ofdm; + u16 auto_corr_max_ofdm_mrc; + u16 auto_corr_max_ofdm_x1; + u16 auto_corr_max_ofdm_mrc_x1; + + u16 auto_corr_max_cck; + u16 auto_corr_max_cck_mrc; + u16 auto_corr_min_cck; + u16 auto_corr_min_cck_mrc; + + u16 barker_corr_th_min; + u16 barker_corr_th_min_mrc; + u16 nrg_th_cca; +}; + + +#define KELVIN_TO_CELSIUS(x) ((x)-273) +#define CELSIUS_TO_KELVIN(x) ((x)+273) + + +/** + * struct iwl_hw_params + * @max_txq_num: Max # Tx queues supported + * @dma_chnl_num: Number of Tx DMA/FIFO channels + * @scd_bc_tbls_size: size of scheduler byte count tables + * @tfd_size: TFD size + * @tx/rx_chains_num: Number of TX/RX chains + * @valid_tx/rx_ant: usable antennas + * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2) + * @max_rxq_log: Log-base-2 of max_rxq_size + * @rx_page_order: Rx buffer page order + * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR + * @max_stations: + * @bcast_sta_id: + * @ht40_channel: is 40MHz width possible in band 2.4 + * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ) + * @sw_crypto: 0 for hw, 1 for sw + * @max_xxx_size: for ucode uses + * @ct_kill_threshold: temperature threshold + * @calib_init_cfg: setup initial calibrations for the hw + * @struct iwl_sensitivity_ranges: range of sensitivity values + */ +struct iwl_hw_params { + u8 max_txq_num; + u8 dma_chnl_num; + u16 scd_bc_tbls_size; + u32 tfd_size; + u8 tx_chains_num; + u8 rx_chains_num; + u8 valid_tx_ant; + u8 valid_rx_ant; + u16 max_rxq_size; + u16 max_rxq_log; + u32 rx_page_order; + u32 rx_wrt_ptr_reg; + u8 max_stations; + u8 bcast_sta_id; + u8 ht40_channel; + u8 max_beacon_itrvl; /* in 1024 ms */ + u32 max_inst_size; + u32 max_data_size; + u32 max_bsm_size; + u32 ct_kill_threshold; /* value in hw-dependent units */ + u32 ct_kill_exit_threshold; /* value in hw-dependent units */ + /* for 1000, 6000 series and up */ + u32 calib_init_cfg; + const struct iwl_sensitivity_ranges *sens; +}; + + +/****************************************************************************** + * + * Functions implemented in core module which are forward declared here + * for use by iwl-[4-5].c + * + * NOTE: The implementation of these functions are not hardware specific + * which is why they are in the core module files. + * + * Naming convention -- + * iwl_ <-- Is part of iwlwifi + * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX) + * iwl4965_bg_ <-- Called from work queue context + * iwl4965_mac_ <-- mac80211 callback + * + ****************************************************************************/ +extern void iwl_update_chain_flags(struct iwl_priv *priv); +extern int iwl_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src); +extern const u8 iwl_bcast_addr[ETH_ALEN]; +extern int iwl_rxq_stop(struct iwl_priv *priv); +extern void iwl_txq_ctx_stop(struct iwl_priv *priv); +extern int iwl_queue_space(const struct iwl_queue *q); +static inline int iwl_queue_used(const struct iwl_queue *q, int i) +{ + return q->write_ptr >= q->read_ptr ? + (i >= q->read_ptr && i < q->write_ptr) : + !(i < q->read_ptr && i >= q->write_ptr); +} + + +static inline u8 get_cmd_index(struct iwl_queue *q, u32 index, int is_huge) +{ + /* + * This is for init calibration result and scan command which + * required buffer > TFD_MAX_PAYLOAD_SIZE, + * the big buffer at end of command array + */ + if (is_huge) + return q->n_window; /* must be power of 2 */ + + /* Otherwise, use normal size buffers */ + return index & (q->n_window - 1); +} + + +struct iwl_dma_ptr { + dma_addr_t dma; + void *addr; + size_t size; +}; + +#define IWL_OPERATION_MODE_AUTO 0 +#define IWL_OPERATION_MODE_HT_ONLY 1 +#define IWL_OPERATION_MODE_MIXED 2 +#define IWL_OPERATION_MODE_20MHZ 3 + +#define IWL_TX_CRC_SIZE 4 +#define IWL_TX_DELIMITER_SIZE 4 + +#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000 + +/* Sensitivity and chain noise calibration */ +#define INITIALIZATION_VALUE 0xFFFF +#define IWL4965_CAL_NUM_BEACONS 20 +#define IWL_CAL_NUM_BEACONS 16 +#define MAXIMUM_ALLOWED_PATHLOSS 15 + +#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3 + +#define MAX_FA_OFDM 50 +#define MIN_FA_OFDM 5 +#define MAX_FA_CCK 50 +#define MIN_FA_CCK 5 + +#define AUTO_CORR_STEP_OFDM 1 + +#define AUTO_CORR_STEP_CCK 3 +#define AUTO_CORR_MAX_TH_CCK 160 + +#define NRG_DIFF 2 +#define NRG_STEP_CCK 2 +#define NRG_MARGIN 8 +#define MAX_NUMBER_CCK_NO_FA 100 + +#define AUTO_CORR_CCK_MIN_VAL_DEF (125) + +#define CHAIN_A 0 +#define CHAIN_B 1 +#define CHAIN_C 2 +#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4 +#define ALL_BAND_FILTER 0xFF00 +#define IN_BAND_FILTER 0xFF +#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF + +#define NRG_NUM_PREV_STAT_L 20 +#define NUM_RX_CHAINS 3 + +enum iwl4965_false_alarm_state { + IWL_FA_TOO_MANY = 0, + IWL_FA_TOO_FEW = 1, + IWL_FA_GOOD_RANGE = 2, +}; + +enum iwl4965_chain_noise_state { + IWL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */ + IWL_CHAIN_NOISE_ACCUMULATE, + IWL_CHAIN_NOISE_CALIBRATED, + IWL_CHAIN_NOISE_DONE, +}; + +enum iwl4965_calib_enabled_state { + IWL_CALIB_DISABLED = 0, /* must be 0 */ + IWL_CALIB_ENABLED = 1, +}; + + +/* + * enum iwl_calib + * defines the order in which results of initial calibrations + * should be sent to the runtime uCode + */ +enum iwl_calib { + IWL_CALIB_XTAL, + IWL_CALIB_DC, + IWL_CALIB_LO, + IWL_CALIB_TX_IQ, + IWL_CALIB_TX_IQ_PERD, + IWL_CALIB_BASE_BAND, + IWL_CALIB_MAX +}; + +/* Opaque calibration results */ +struct iwl_calib_result { + void *buf; + size_t buf_len; +}; + +enum ucode_type { + UCODE_NONE = 0, + UCODE_INIT, + UCODE_RT +}; + +/* Sensitivity calib data */ +struct iwl_sensitivity_data { + u32 auto_corr_ofdm; + u32 auto_corr_ofdm_mrc; + u32 auto_corr_ofdm_x1; + u32 auto_corr_ofdm_mrc_x1; + u32 auto_corr_cck; + u32 auto_corr_cck_mrc; + + u32 last_bad_plcp_cnt_ofdm; + u32 last_fa_cnt_ofdm; + u32 last_bad_plcp_cnt_cck; + u32 last_fa_cnt_cck; + + u32 nrg_curr_state; + u32 nrg_prev_state; + u32 nrg_value[10]; + u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L]; + u32 nrg_silence_ref; + u32 nrg_energy_idx; + u32 nrg_silence_idx; + u32 nrg_th_cck; + s32 nrg_auto_corr_silence_diff; + u32 num_in_cck_no_fa; + u32 nrg_th_ofdm; + + u16 barker_corr_th_min; + u16 barker_corr_th_min_mrc; + u16 nrg_th_cca; +}; + +/* Chain noise (differential Rx gain) calib data */ +struct iwl_chain_noise_data { + u32 active_chains; + u32 chain_noise_a; + u32 chain_noise_b; + u32 chain_noise_c; + u32 chain_signal_a; + u32 chain_signal_b; + u32 chain_signal_c; + u16 beacon_count; + u8 disconn_array[NUM_RX_CHAINS]; + u8 delta_gain_code[NUM_RX_CHAINS]; + u8 radio_write; + u8 state; +}; + +#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */ +#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */ + +#define IWL_TRAFFIC_ENTRIES (256) +#define IWL_TRAFFIC_ENTRY_SIZE (64) + +enum { + MEASUREMENT_READY = (1 << 0), + MEASUREMENT_ACTIVE = (1 << 1), +}; + +enum iwl_nvm_type { + NVM_DEVICE_TYPE_EEPROM = 0, + NVM_DEVICE_TYPE_OTP, +}; + +/* + * Two types of OTP memory access modes + * IWL_OTP_ACCESS_ABSOLUTE - absolute address mode, + * based on physical memory addressing + * IWL_OTP_ACCESS_RELATIVE - relative address mode, + * based on logical memory addressing + */ +enum iwl_access_mode { + IWL_OTP_ACCESS_ABSOLUTE, + IWL_OTP_ACCESS_RELATIVE, +}; + +/** + * enum iwl_pa_type - Power Amplifier type + * @IWL_PA_SYSTEM: based on uCode configuration + * @IWL_PA_INTERNAL: use Internal only + */ +enum iwl_pa_type { + IWL_PA_SYSTEM = 0, + IWL_PA_INTERNAL = 1, +}; + +/* interrupt statistics */ +struct isr_statistics { + u32 hw; + u32 sw; + u32 sw_err; + u32 sch; + u32 alive; + u32 rfkill; + u32 ctkill; + u32 wakeup; + u32 rx; + u32 rx_handlers[REPLY_MAX]; + u32 tx; + u32 unhandled; +}; + +#ifdef CONFIG_IWLWIFI_DEBUGFS +/* management statistics */ +enum iwl_mgmt_stats { + MANAGEMENT_ASSOC_REQ = 0, + MANAGEMENT_ASSOC_RESP, + MANAGEMENT_REASSOC_REQ, + MANAGEMENT_REASSOC_RESP, + MANAGEMENT_PROBE_REQ, + MANAGEMENT_PROBE_RESP, + MANAGEMENT_BEACON, + MANAGEMENT_ATIM, + MANAGEMENT_DISASSOC, + MANAGEMENT_AUTH, + MANAGEMENT_DEAUTH, + MANAGEMENT_ACTION, + MANAGEMENT_MAX, +}; +/* control statistics */ +enum iwl_ctrl_stats { + CONTROL_BACK_REQ = 0, + CONTROL_BACK, + CONTROL_PSPOLL, + CONTROL_RTS, + CONTROL_CTS, + CONTROL_ACK, + CONTROL_CFEND, + CONTROL_CFENDACK, + CONTROL_MAX, +}; + +struct traffic_stats { + u32 mgmt[MANAGEMENT_MAX]; + u32 ctrl[CONTROL_MAX]; + u32 data_cnt; + u64 data_bytes; +}; +#else +struct traffic_stats { + u64 data_bytes; +}; +#endif + +/* + * iwl_switch_rxon: "channel switch" structure + * + * @ switch_in_progress: channel switch in progress + * @ channel: new channel + */ +struct iwl_switch_rxon { + bool switch_in_progress; + __le16 channel; +}; + +/* + * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds + * to perform continuous uCode event logging operation if enabled + */ +#define UCODE_TRACE_PERIOD (100) + +/* + * iwl_event_log: current uCode event log position + * + * @ucode_trace: enable/disable ucode continuous trace timer + * @num_wraps: how many times the event buffer wraps + * @next_entry: the entry just before the next one that uCode would fill + * @non_wraps_count: counter for no wrap detected when dump ucode events + * @wraps_once_count: counter for wrap once detected when dump ucode events + * @wraps_more_count: counter for wrap more than once detected + * when dump ucode events + */ +struct iwl_event_log { + bool ucode_trace; + u32 num_wraps; + u32 next_entry; + int non_wraps_count; + int wraps_once_count; + int wraps_more_count; +}; + +/* + * host interrupt timeout value + * used with setting interrupt coalescing timer + * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit + * + * default interrupt coalescing timer is 64 x 32 = 2048 usecs + * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs + */ +#define IWL_HOST_INT_TIMEOUT_MAX (0xFF) +#define IWL_HOST_INT_TIMEOUT_DEF (0x40) +#define IWL_HOST_INT_TIMEOUT_MIN (0x0) +#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF) +#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10) +#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0) + +/* + * This is the threshold value of plcp error rate per 100mSecs. It is + * used to set and check for the validity of plcp_delta. + */ +#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN (0) +#define IWL_MAX_PLCP_ERR_THRESHOLD_DEF (50) +#define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF (100) +#define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF (200) +#define IWL_MAX_PLCP_ERR_THRESHOLD_MAX (255) + +#define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3) +#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5) + +enum iwl_reset { + IWL_RF_RESET = 0, + IWL_FW_RESET, + IWL_MAX_FORCE_RESET, +}; + +struct iwl_force_reset { + int reset_request_count; + int reset_success_count; + int reset_reject_count; + unsigned long reset_duration; + unsigned long last_force_reset_jiffies; +}; + +struct iwl_priv { + + /* ieee device used by generic ieee processing code */ + struct ieee80211_hw *hw; + struct ieee80211_channel *ieee_channels; + struct ieee80211_rate *ieee_rates; + struct iwl_cfg *cfg; + + /* temporary frame storage list */ + struct list_head free_frames; + int frames_count; + + enum ieee80211_band band; + int alloc_rxb_page; + + void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); + + struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; + + /* spectrum measurement report caching */ + struct iwl_spectrum_notification measure_report; + u8 measurement_status; + + /* ucode beacon time */ + u32 ucode_beacon_time; + int missed_beacon_threshold; + + /* storing the jiffies when the plcp error rate is received */ + unsigned long plcp_jiffies; + + /* force reset */ + struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET]; + + /* we allocate array of iwl4965_channel_info for NIC's valid channels. + * Access via channel # using indirect index array */ + struct iwl_channel_info *channel_info; /* channel info array */ + u8 channel_count; /* # of channels */ + + /* each calibration channel group in the EEPROM has a derived + * clip setting for each rate. 3945 only.*/ + const struct iwl3945_clip_group clip39_groups[5]; + + /* thermal calibration */ + s32 temperature; /* degrees Kelvin */ + s32 last_temperature; + + /* init calibration results */ + struct iwl_calib_result calib_results[IWL_CALIB_MAX]; + + /* Scan related variables */ + unsigned long next_scan_jiffies; + unsigned long scan_start; + unsigned long scan_pass_start; + unsigned long scan_start_tsf; + void *scan; + int scan_bands; + struct cfg80211_scan_request *scan_request; + bool is_internal_short_scan; + u8 scan_tx_ant[IEEE80211_NUM_BANDS]; + u8 mgmt_tx_ant; + + /* spinlock */ + spinlock_t lock; /* protect general shared data */ + spinlock_t hcmd_lock; /* protect hcmd */ + spinlock_t reg_lock; /* protect hw register access */ + struct mutex mutex; + struct mutex sync_cmd_mutex; /* enable serialization of sync commands */ + + /* basic pci-network driver stuff */ + struct pci_dev *pci_dev; + + /* pci hardware address support */ + void __iomem *hw_base; + u32 hw_rev; + u32 hw_wa_rev; + u8 rev_id; + + /* uCode images, save to reload in case of failure */ + int fw_index; /* firmware we're trying to load */ + u32 ucode_ver; /* version of ucode, copy of + iwl_ucode.ver */ + struct fw_desc ucode_code; /* runtime inst */ + struct fw_desc ucode_data; /* runtime data original */ + struct fw_desc ucode_data_backup; /* runtime data save/restore */ + struct fw_desc ucode_init; /* initialization inst */ + struct fw_desc ucode_init_data; /* initialization data */ + struct fw_desc ucode_boot; /* bootstrap inst */ + enum ucode_type ucode_type; + u8 ucode_write_complete; /* the image write is complete */ + char firmware_name[25]; + + + struct iwl_rxon_time_cmd rxon_timing; + + /* We declare this const so it can only be + * changed via explicit cast within the + * routines that actually update the physical + * hardware */ + const struct iwl_rxon_cmd active_rxon; + struct iwl_rxon_cmd staging_rxon; + + struct iwl_switch_rxon switch_rxon; + + /* 1st responses from initialize and runtime uCode images. + * 4965's initialize alive response contains some calibration data. */ + struct iwl_init_alive_resp card_alive_init; + struct iwl_alive_resp card_alive; + + unsigned long last_blink_time; + u8 last_blink_rate; + u8 allow_blinking; + u64 led_tpt; + + u16 active_rate; + u16 active_rate_basic; + + u8 assoc_station_added; + u8 start_calib; + struct iwl_sensitivity_data sensitivity_data; + struct iwl_chain_noise_data chain_noise_data; + __le16 sensitivity_tbl[HD_TABLE_SIZE]; + + struct iwl_ht_config current_ht_config; + u8 last_phy_res[100]; + + /* Rate scaling data */ + u8 retry_rate; + + wait_queue_head_t wait_command_queue; + + int activity_timer_active; + + /* Rx and Tx DMA processing queues */ + struct iwl_rx_queue rxq; + struct iwl_tx_queue *txq; + unsigned long txq_ctx_active_msk; + struct iwl_dma_ptr kw; /* keep warm address */ + struct iwl_dma_ptr scd_bc_tbls; + + u32 scd_base_addr; /* scheduler sram base address */ + + unsigned long status; + + int last_rx_rssi; /* From Rx packet statistics */ + int last_rx_noise; /* From beacon statistics */ + + /* counts mgmt, ctl, and data packets */ + struct traffic_stats tx_stats; + struct traffic_stats rx_stats; + + /* counts interrupts */ + struct isr_statistics isr_stats; + + struct iwl_power_mgr power_data; + struct iwl_tt_mgmt thermal_throttle; + + struct iwl_notif_statistics statistics; +#ifdef CONFIG_IWLWIFI_DEBUG + struct iwl_notif_statistics accum_statistics; + struct iwl_notif_statistics delta_statistics; + struct iwl_notif_statistics max_delta; +#endif + + /* context information */ + u16 rates_mask; + + u8 bssid[ETH_ALEN]; + u16 rts_threshold; + u8 mac_addr[ETH_ALEN]; + + /*station table variables */ + spinlock_t sta_lock; + int num_stations; + struct iwl_station_entry stations[IWL_STATION_COUNT]; + struct iwl_wep_key wep_keys[WEP_KEYS_MAX]; + u8 default_wep_key; + u8 key_mapping_key; + unsigned long ucode_key_table; + + /* queue refcounts */ +#define IWL_MAX_HW_QUEUES 32 + unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; + /* for each AC */ + atomic_t queue_stop_count[4]; + + /* Indication if ieee80211_ops->open has been called */ + u8 is_open; + + u8 mac80211_registered; + + /* Rx'd packet timing information */ + u32 last_beacon_time; + u64 last_tsf; + + /* eeprom -- this is in the card's little endian byte order */ + u8 *eeprom; + int nvm_device_type; + struct iwl_eeprom_calib_info *calib_info; + + enum nl80211_iftype iw_mode; + + struct sk_buff *ibss_beacon; + + /* Last Rx'd beacon timestamp */ + u64 timestamp; + u16 beacon_int; + struct ieee80211_vif *vif; + + /*Added for 3945 */ + void *shared_virt; + dma_addr_t shared_phys; + /*End*/ + struct iwl_hw_params hw_params; + + /* INT ICT Table */ + __le32 *ict_tbl; + dma_addr_t ict_tbl_dma; + dma_addr_t aligned_ict_tbl_dma; + int ict_index; + void *ict_tbl_vir; + u32 inta; + bool use_ict; + + u32 inta_mask; + /* Current association information needed to configure the + * hardware */ + u16 assoc_id; + u16 assoc_capability; + + struct iwl_qos_info qos_data; + + struct workqueue_struct *workqueue; + + struct work_struct restart; + struct work_struct scan_completed; + struct work_struct rx_replenish; + struct work_struct abort_scan; + struct work_struct request_scan; + struct work_struct beacon_update; + struct work_struct tt_work; + struct work_struct ct_enter; + struct work_struct ct_exit; + + struct tasklet_struct irq_tasklet; + + struct delayed_work init_alive_start; + struct delayed_work alive_start; + struct delayed_work scan_check; + + /*For 3945 only*/ + struct delayed_work thermal_periodic; + struct delayed_work rfkill_poll; + + /* TX Power */ + s8 tx_power_user_lmt; + s8 tx_power_device_lmt; + s8 tx_power_lmt_in_half_dbm; /* max tx power in half-dBm format */ + + +#ifdef CONFIG_IWLWIFI_DEBUG + /* debugging info */ + u32 debug_level; /* per device debugging will override global + iwl_debug_level if set */ + u32 framecnt_to_us; + atomic_t restrict_refcnt; + bool disable_ht40; +#ifdef CONFIG_IWLWIFI_DEBUGFS + /* debugfs */ + u16 tx_traffic_idx; + u16 rx_traffic_idx; + u8 *tx_traffic; + u8 *rx_traffic; + struct dentry *debugfs_dir; + u32 dbgfs_sram_offset, dbgfs_sram_len; +#endif /* CONFIG_IWLWIFI_DEBUGFS */ +#endif /* CONFIG_IWLWIFI_DEBUG */ + + struct work_struct txpower_work; + u32 disable_sens_cal; + u32 disable_chain_noise_cal; + u32 disable_tx_power_cal; + struct work_struct run_time_calib_work; + struct timer_list statistics_periodic; + struct timer_list ucode_trace; + bool hw_ready; + /*For 3945*/ +#define IWL_DEFAULT_TX_POWER 0x0F + + struct iwl3945_notif_statistics statistics_39; + + u32 sta_supp_rates; + + struct iwl_event_log event_log; +}; /*iwl_priv */ + +static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id) +{ + set_bit(txq_id, &priv->txq_ctx_active_msk); +} + +static inline void iwl_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id) +{ + clear_bit(txq_id, &priv->txq_ctx_active_msk); +} + +#ifdef CONFIG_IWLWIFI_DEBUG +const char *iwl_get_tx_fail_reason(u32 status); +/* + * iwl_get_debug_level: Return active debug level for device + * + * Using sysfs it is possible to set per device debug level. This debug + * level will be used if set, otherwise the global debug level which can be + * set via module parameter is used. + */ +static inline u32 iwl_get_debug_level(struct iwl_priv *priv) +{ + if (priv->debug_level) + return priv->debug_level; + else + return iwl_debug_level; +} +#else +static inline const char *iwl_get_tx_fail_reason(u32 status) { return ""; } + +static inline u32 iwl_get_debug_level(struct iwl_priv *priv) +{ + return iwl_debug_level; +} +#endif + + +static inline struct ieee80211_hdr *iwl_tx_queue_get_hdr(struct iwl_priv *priv, + int txq_id, int idx) +{ + if (priv->txq[txq_id].txb[idx].skb[0]) + return (struct ieee80211_hdr *)priv->txq[txq_id]. + txb[idx].skb[0]->data; + return NULL; +} + + +static inline int iwl_is_associated(struct iwl_priv *priv) +{ + return (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0; +} + +static inline int is_channel_valid(const struct iwl_channel_info *ch_info) +{ + if (ch_info == NULL) + return 0; + return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0; +} + +static inline int is_channel_radar(const struct iwl_channel_info *ch_info) +{ + return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0; +} + +static inline u8 is_channel_a_band(const struct iwl_channel_info *ch_info) +{ + return ch_info->band == IEEE80211_BAND_5GHZ; +} + +static inline u8 is_channel_bg_band(const struct iwl_channel_info *ch_info) +{ + return ch_info->band == IEEE80211_BAND_2GHZ; +} + +static inline int is_channel_passive(const struct iwl_channel_info *ch) +{ + return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0; +} + +static inline int is_channel_ibss(const struct iwl_channel_info *ch) +{ + return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0; +} + +static inline void __iwl_free_pages(struct iwl_priv *priv, struct page *page) +{ + __free_pages(page, priv->hw_params.rx_page_order); + priv->alloc_rxb_page--; +} + +static inline void iwl_free_pages(struct iwl_priv *priv, unsigned long page) +{ + free_pages(page, priv->hw_params.rx_page_order); + priv->alloc_rxb_page--; +} +#endif /* __iwl_dev_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c new file mode 100644 index 0000000..36580d8 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c @@ -0,0 +1,42 @@ +/****************************************************************************** + * + * Copyright(c) 2009 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include + +/* sparse doesn't like tracepoint macros */ +#ifndef __CHECKER__ +#define CREATE_TRACE_POINTS +#include "iwl-devtrace.h" + +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite8); +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ioread32); +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite32); +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_rx); +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event); +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error); +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event); +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_wrap_event); +#endif diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h new file mode 100644 index 0000000..790c742 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h @@ -0,0 +1,271 @@ +/****************************************************************************** + * + * Copyright(c) 2009 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#if !defined(__IWLWIFI_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ) +#define __IWLWIFI_DEVICE_TRACE + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) +#include +#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) */ +#include "iwl-dev.h" + +#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__) +#undef TRACE_EVENT +#define TRACE_EVENT(name, proto, ...) \ +static inline void trace_ ## name(proto) {} +#endif + +#define PRIV_ENTRY __field(struct iwl_priv *, priv) +#define PRIV_ASSIGN __entry->priv = priv + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM iwlwifi_io + +TRACE_EVENT(iwlwifi_dev_ioread32, + TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val), + TP_ARGS(priv, offs, val), + TP_STRUCT__entry( + PRIV_ENTRY + __field(u32, offs) + __field(u32, val) + ), + TP_fast_assign( + PRIV_ASSIGN; + __entry->offs = offs; + __entry->val = val; + ), + TP_printk("[%p] read io[%#x] = %#x", __entry->priv, __entry->offs, __entry->val) +); + +TRACE_EVENT(iwlwifi_dev_iowrite8, + TP_PROTO(struct iwl_priv *priv, u32 offs, u8 val), + TP_ARGS(priv, offs, val), + TP_STRUCT__entry( + PRIV_ENTRY + __field(u32, offs) + __field(u8, val) + ), + TP_fast_assign( + PRIV_ASSIGN; + __entry->offs = offs; + __entry->val = val; + ), + TP_printk("[%p] write io[%#x] = %#x)", __entry->priv, __entry->offs, __entry->val) +); + +TRACE_EVENT(iwlwifi_dev_iowrite32, + TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val), + TP_ARGS(priv, offs, val), + TP_STRUCT__entry( + PRIV_ENTRY + __field(u32, offs) + __field(u32, val) + ), + TP_fast_assign( + PRIV_ASSIGN; + __entry->offs = offs; + __entry->val = val; + ), + TP_printk("[%p] write io[%#x] = %#x)", __entry->priv, __entry->offs, __entry->val) +); + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM iwlwifi_ucode + +TRACE_EVENT(iwlwifi_dev_ucode_cont_event, + TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev), + TP_ARGS(priv, time, data, ev), + TP_STRUCT__entry( + PRIV_ENTRY + + __field(u32, time) + __field(u32, data) + __field(u32, ev) + ), + TP_fast_assign( + PRIV_ASSIGN; + __entry->time = time; + __entry->data = data; + __entry->ev = ev; + ), + TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u", + __entry->priv, __entry->time, __entry->data, __entry->ev) +); + +TRACE_EVENT(iwlwifi_dev_ucode_wrap_event, + TP_PROTO(struct iwl_priv *priv, u32 wraps, u32 n_entry, u32 p_entry), + TP_ARGS(priv, wraps, n_entry, p_entry), + TP_STRUCT__entry( + PRIV_ENTRY + + __field(u32, wraps) + __field(u32, n_entry) + __field(u32, p_entry) + ), + TP_fast_assign( + PRIV_ASSIGN; + __entry->wraps = wraps; + __entry->n_entry = n_entry; + __entry->p_entry = p_entry; + ), + TP_printk("[%p] wraps=#%02d n=0x%X p=0x%X", + __entry->priv, __entry->wraps, __entry->n_entry, + __entry->p_entry) +); + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM iwlwifi + +TRACE_EVENT(iwlwifi_dev_hcmd, + TP_PROTO(struct iwl_priv *priv, void *hcmd, size_t len, u32 flags), + TP_ARGS(priv, hcmd, len, flags), + TP_STRUCT__entry( + PRIV_ENTRY + __dynamic_array(u8, hcmd, len) + __field(u32, flags) + ), + TP_fast_assign( + PRIV_ASSIGN; + memcpy(__get_dynamic_array(hcmd), hcmd, len); + __entry->flags = flags; + ), + TP_printk("[%p] hcmd %#.2x (%ssync)", + __entry->priv, ((u8 *)__get_dynamic_array(hcmd))[0], + __entry->flags & CMD_ASYNC ? "a" : "") +); + +TRACE_EVENT(iwlwifi_dev_rx, + TP_PROTO(struct iwl_priv *priv, void *rxbuf, size_t len), + TP_ARGS(priv, rxbuf, len), + TP_STRUCT__entry( + PRIV_ENTRY + __dynamic_array(u8, rxbuf, len) + ), + TP_fast_assign( + PRIV_ASSIGN; + memcpy(__get_dynamic_array(rxbuf), rxbuf, len); + ), + TP_printk("[%p] RX cmd %#.2x", + __entry->priv, ((u8 *)__get_dynamic_array(rxbuf))[4]) +); + +TRACE_EVENT(iwlwifi_dev_tx, + TP_PROTO(struct iwl_priv *priv, void *tfd, size_t tfdlen, + void *buf0, size_t buf0_len, + void *buf1, size_t buf1_len), + TP_ARGS(priv, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len), + TP_STRUCT__entry( + PRIV_ENTRY + + __field(size_t, framelen) + __dynamic_array(u8, tfd, tfdlen) + + /* + * Do not insert between or below these items, + * we want to keep the frame together (except + * for the possible padding). + */ + __dynamic_array(u8, buf0, buf0_len) + __dynamic_array(u8, buf1, buf1_len) + ), + TP_fast_assign( + PRIV_ASSIGN; + __entry->framelen = buf0_len + buf1_len; + memcpy(__get_dynamic_array(tfd), tfd, tfdlen); + memcpy(__get_dynamic_array(buf0), buf0, buf0_len); + memcpy(__get_dynamic_array(buf1), buf1, buf0_len); + ), + TP_printk("[%p] TX %.2x (%zu bytes)", + __entry->priv, + ((u8 *)__get_dynamic_array(buf0))[0], + __entry->framelen) +); + +TRACE_EVENT(iwlwifi_dev_ucode_error, + TP_PROTO(struct iwl_priv *priv, u32 desc, u32 time, + u32 data1, u32 data2, u32 line, u32 blink1, + u32 blink2, u32 ilink1, u32 ilink2), + TP_ARGS(priv, desc, time, data1, data2, line, + blink1, blink2, ilink1, ilink2), + TP_STRUCT__entry( + PRIV_ENTRY + __field(u32, desc) + __field(u32, time) + __field(u32, data1) + __field(u32, data2) + __field(u32, line) + __field(u32, blink1) + __field(u32, blink2) + __field(u32, ilink1) + __field(u32, ilink2) + ), + TP_fast_assign( + PRIV_ASSIGN; + __entry->desc = desc; + __entry->time = time; + __entry->data1 = data1; + __entry->data2 = data2; + __entry->line = line; + __entry->blink1 = blink1; + __entry->blink2 = blink2; + __entry->ilink1 = ilink1; + __entry->ilink2 = ilink2; + ), + TP_printk("[%p] #%02d %010u data 0x%08X 0x%08X line %u, " + "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X", + __entry->priv, __entry->desc, __entry->time, __entry->data1, + __entry->data2, __entry->line, __entry->blink1, + __entry->blink2, __entry->ilink1, __entry->ilink2) +); + +TRACE_EVENT(iwlwifi_dev_ucode_event, + TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev), + TP_ARGS(priv, time, data, ev), + TP_STRUCT__entry( + PRIV_ENTRY + + __field(u32, time) + __field(u32, data) + __field(u32, ev) + ), + TP_fast_assign( + PRIV_ASSIGN; + __entry->time = time; + __entry->data = data; + __entry->ev = ev; + ), + TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u", + __entry->priv, __entry->time, __entry->data, __entry->ev) +); +#endif /* __IWLWIFI_DEVICE_TRACE */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE iwl-devtrace +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,30)) +#include +#endif diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c new file mode 100644 index 0000000..fd37152 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c @@ -0,0 +1,1154 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + + +#include +#include +#include + +#include + +#include "iwl-commands.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-debug.h" +#include "iwl-eeprom.h" +#include "iwl-io.h" + +/************************** EEPROM BANDS **************************** + * + * The iwl_eeprom_band definitions below provide the mapping from the + * EEPROM contents to the specific channel number supported for each + * band. + * + * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3 + * definition below maps to physical channel 42 in the 5.2GHz spectrum. + * The specific geography and calibration information for that channel + * is contained in the eeprom map itself. + * + * During init, we copy the eeprom information and channel map + * information into priv->channel_info_24/52 and priv->channel_map_24/52 + * + * channel_map_24/52 provides the index in the channel_info array for a + * given channel. We have to have two separate maps as there is channel + * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and + * band_2 + * + * A value of 0xff stored in the channel_map indicates that the channel + * is not supported by the hardware at all. + * + * A value of 0xfe in the channel_map indicates that the channel is not + * valid for Tx with the current hardware. This means that + * while the system can tune and receive on a given channel, it may not + * be able to associate or transmit any frames on that + * channel. There is no corresponding channel information for that + * entry. + * + *********************************************************************/ + +/* 2.4 GHz */ +const u8 iwl_eeprom_band_1[14] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 +}; + +/* 5.2 GHz bands */ +static const u8 iwl_eeprom_band_2[] = { /* 4915-5080MHz */ + 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16 +}; + +static const u8 iwl_eeprom_band_3[] = { /* 5170-5320MHz */ + 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 +}; + +static const u8 iwl_eeprom_band_4[] = { /* 5500-5700MHz */ + 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 +}; + +static const u8 iwl_eeprom_band_5[] = { /* 5725-5825MHz */ + 145, 149, 153, 157, 161, 165 +}; + +static const u8 iwl_eeprom_band_6[] = { /* 2.4 ht40 channel */ + 1, 2, 3, 4, 5, 6, 7 +}; + +static const u8 iwl_eeprom_band_7[] = { /* 5.2 ht40 channel */ + 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157 +}; + +/** + * struct iwl_txpwr_section: eeprom section information + * @offset: indirect address into eeprom image + * @count: number of "struct iwl_eeprom_enhanced_txpwr" in this section + * @band: band type for the section + * @is_common - true: common section, false: channel section + * @is_cck - true: cck section, false: not cck section + * @is_ht_40 - true: all channel in the section are HT40 channel, + * false: legacy or HT 20 MHz + * ignore if it is common section + * @iwl_eeprom_section_channel: channel array in the section, + * ignore if common section + */ +struct iwl_txpwr_section { + u32 offset; + u8 count; + enum ieee80211_band band; + bool is_common; + bool is_cck; + bool is_ht40; + u8 iwl_eeprom_section_channel[EEPROM_MAX_TXPOWER_SECTION_ELEMENTS]; +}; + +/** + * section 1 - 3 are regulatory tx power apply to all channels based on + * modulation: CCK, OFDM + * Band: 2.4GHz, 5.2GHz + * section 4 - 10 are regulatory tx power apply to specified channels + * For example: + * 1L - Channel 1 Legacy + * 1HT - Channel 1 HT + * (1,+1) - Channel 1 HT40 "_above_" + * + * Section 1: all CCK channels + * Section 2: all 2.4 GHz OFDM (Legacy, HT and HT40) channels + * Section 3: all 5.2 GHz OFDM (Legacy, HT and HT40) channels + * Section 4: 2.4 GHz 20MHz channels: 1L, 1HT, 2L, 2HT, 10L, 10HT, 11L, 11HT + * Section 5: 2.4 GHz 40MHz channels: (1,+1) (2,+1) (6,+1) (7,+1) (9,+1) + * Section 6: 5.2 GHz 20MHz channels: 36L, 64L, 100L, 36HT, 64HT, 100HT + * Section 7: 5.2 GHz 40MHz channels: (36,+1) (60,+1) (100,+1) + * Section 8: 2.4 GHz channel: 13L, 13HT + * Section 9: 2.4 GHz channel: 140L, 140HT + * Section 10: 2.4 GHz 40MHz channels: (132,+1) (44,+1) + * + */ +static const struct iwl_txpwr_section enhinfo[] = { + { EEPROM_LB_CCK_20_COMMON, 1, IEEE80211_BAND_2GHZ, true, true, false }, + { EEPROM_LB_OFDM_COMMON, 3, IEEE80211_BAND_2GHZ, true, false, false }, + { EEPROM_HB_OFDM_COMMON, 3, IEEE80211_BAND_5GHZ, true, false, false }, + { EEPROM_LB_OFDM_20_BAND, 8, IEEE80211_BAND_2GHZ, + false, false, false, + {1, 1, 2, 2, 10, 10, 11, 11 } }, + { EEPROM_LB_OFDM_HT40_BAND, 5, IEEE80211_BAND_2GHZ, + false, false, true, + { 1, 2, 6, 7, 9 } }, + { EEPROM_HB_OFDM_20_BAND, 6, IEEE80211_BAND_5GHZ, + false, false, false, + { 36, 64, 100, 36, 64, 100 } }, + { EEPROM_HB_OFDM_HT40_BAND, 3, IEEE80211_BAND_5GHZ, + false, false, true, + { 36, 60, 100 } }, + { EEPROM_LB_OFDM_20_CHANNEL_13, 2, IEEE80211_BAND_2GHZ, + false, false, false, + { 13, 13 } }, + { EEPROM_HB_OFDM_20_CHANNEL_140, 2, IEEE80211_BAND_5GHZ, + false, false, false, + { 140, 140 } }, + { EEPROM_HB_OFDM_HT40_BAND_1, 2, IEEE80211_BAND_5GHZ, + false, false, true, + { 132, 44 } }, +}; + +/****************************************************************************** + * + * EEPROM related functions + * +******************************************************************************/ + +int iwlcore_eeprom_verify_signature(struct iwl_priv *priv) +{ + u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK; + int ret = 0; + + IWL_DEBUG_INFO(priv, "EEPROM signature=0x%08x\n", gp); + switch (gp) { + case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP: + if (priv->nvm_device_type != NVM_DEVICE_TYPE_OTP) { + IWL_ERR(priv, "EEPROM with bad signature: 0x%08x\n", + gp); + ret = -ENOENT; + } + break; + case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K: + case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K: + if (priv->nvm_device_type != NVM_DEVICE_TYPE_EEPROM) { + IWL_ERR(priv, "OTP with bad signature: 0x%08x\n", gp); + ret = -ENOENT; + } + break; + case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP: + default: + IWL_ERR(priv, "bad EEPROM/OTP signature, type=%s, " + "EEPROM_GP=0x%08x\n", + (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) + ? "OTP" : "EEPROM", gp); + ret = -ENOENT; + break; + } + return ret; +} +EXPORT_SYMBOL(iwlcore_eeprom_verify_signature); + +static void iwl_set_otp_access(struct iwl_priv *priv, enum iwl_access_mode mode) +{ + u32 otpgp; + + otpgp = iwl_read32(priv, CSR_OTP_GP_REG); + if (mode == IWL_OTP_ACCESS_ABSOLUTE) + iwl_clear_bit(priv, CSR_OTP_GP_REG, + CSR_OTP_GP_REG_OTP_ACCESS_MODE); + else + iwl_set_bit(priv, CSR_OTP_GP_REG, + CSR_OTP_GP_REG_OTP_ACCESS_MODE); +} + +static int iwlcore_get_nvm_type(struct iwl_priv *priv) +{ + u32 otpgp; + int nvm_type; + + /* OTP only valid for CP/PP and after */ + switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) { + case CSR_HW_REV_TYPE_NONE: + IWL_ERR(priv, "Unknown hardware type\n"); + return -ENOENT; + case CSR_HW_REV_TYPE_3945: + case CSR_HW_REV_TYPE_4965: + case CSR_HW_REV_TYPE_5300: + case CSR_HW_REV_TYPE_5350: + case CSR_HW_REV_TYPE_5100: + case CSR_HW_REV_TYPE_5150: + nvm_type = NVM_DEVICE_TYPE_EEPROM; + break; + default: + otpgp = iwl_read32(priv, CSR_OTP_GP_REG); + if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT) + nvm_type = NVM_DEVICE_TYPE_OTP; + else + nvm_type = NVM_DEVICE_TYPE_EEPROM; + break; + } + return nvm_type; +} + +/* + * The device's EEPROM semaphore prevents conflicts between driver and uCode + * when accessing the EEPROM; each access is a series of pulses to/from the + * EEPROM chip, not a single event, so even reads could conflict if they + * weren't arbitrated by the semaphore. + */ +int iwlcore_eeprom_acquire_semaphore(struct iwl_priv *priv) +{ + u16 count; + int ret; + + for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) { + /* Request semaphore */ + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); + + /* See if we got it */ + ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, + CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, + EEPROM_SEM_TIMEOUT); + if (ret >= 0) { + IWL_DEBUG_IO(priv, "Acquired semaphore after %d tries.\n", + count+1); + return ret; + } + } + + return ret; +} +EXPORT_SYMBOL(iwlcore_eeprom_acquire_semaphore); + +void iwlcore_eeprom_release_semaphore(struct iwl_priv *priv) +{ + iwl_clear_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); + +} +EXPORT_SYMBOL(iwlcore_eeprom_release_semaphore); + +const u8 *iwlcore_eeprom_query_addr(const struct iwl_priv *priv, size_t offset) +{ + BUG_ON(offset >= priv->cfg->eeprom_size); + return &priv->eeprom[offset]; +} +EXPORT_SYMBOL(iwlcore_eeprom_query_addr); + +static int iwl_init_otp_access(struct iwl_priv *priv) +{ + int ret; + + /* Enable 40MHz radio clock */ + _iwl_write32(priv, CSR_GP_CNTRL, + _iwl_read32(priv, CSR_GP_CNTRL) | + CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + + /* wait for clock to be ready */ + ret = iwl_poll_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + 25000); + if (ret < 0) + IWL_ERR(priv, "Time out access OTP\n"); + else { + iwl_set_bits_prph(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_RESET_REQ); + udelay(5); + iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_RESET_REQ); + + /* + * CSR auto clock gate disable bit - + * this is only applicable for HW with OTP shadow RAM + */ + if (priv->cfg->shadow_ram_support) + iwl_set_bit(priv, CSR_DBG_LINK_PWR_MGMT_REG, + CSR_RESET_LINK_PWR_MGMT_DISABLED); + } + return ret; +} + +static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_data) +{ + int ret = 0; + u32 r; + u32 otpgp; + + _iwl_write32(priv, CSR_EEPROM_REG, + CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); + ret = iwl_poll_bit(priv, CSR_EEPROM_REG, + CSR_EEPROM_REG_READ_VALID_MSK, + CSR_EEPROM_REG_READ_VALID_MSK, + IWL_EEPROM_ACCESS_TIMEOUT); + if (ret < 0) { + IWL_ERR(priv, "Time out reading OTP[%d]\n", addr); + return ret; + } + r = _iwl_read_direct32(priv, CSR_EEPROM_REG); + /* check for ECC errors: */ + otpgp = iwl_read32(priv, CSR_OTP_GP_REG); + if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) { + /* stop in this case */ + /* set the uncorrectable OTP ECC bit for acknowledgement */ + iwl_set_bit(priv, CSR_OTP_GP_REG, + CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK); + IWL_ERR(priv, "Uncorrectable OTP ECC error, abort OTP read\n"); + return -EINVAL; + } + if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) { + /* continue in this case */ + /* set the correctable OTP ECC bit for acknowledgement */ + iwl_set_bit(priv, CSR_OTP_GP_REG, + CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK); + IWL_ERR(priv, "Correctable OTP ECC error, continue read\n"); + } + *eeprom_data = cpu_to_le16(r >> 16); + return 0; +} + +/* + * iwl_is_otp_empty: check for empty OTP + */ +static bool iwl_is_otp_empty(struct iwl_priv *priv) +{ + u16 next_link_addr = 0; + __le16 link_value; + bool is_empty = false; + + /* locate the beginning of OTP link list */ + if (!iwl_read_otp_word(priv, next_link_addr, &link_value)) { + if (!link_value) { + IWL_ERR(priv, "OTP is empty\n"); + is_empty = true; + } + } else { + IWL_ERR(priv, "Unable to read first block of OTP list.\n"); + is_empty = true; + } + + return is_empty; +} + + +/* + * iwl_find_otp_image: find EEPROM image in OTP + * finding the OTP block that contains the EEPROM image. + * the last valid block on the link list (the block _before_ the last block) + * is the block we should read and used to configure the device. + * If all the available OTP blocks are full, the last block will be the block + * we should read and used to configure the device. + * only perform this operation if shadow RAM is disabled + */ +static int iwl_find_otp_image(struct iwl_priv *priv, + u16 *validblockaddr) +{ + u16 next_link_addr = 0, valid_addr; + __le16 link_value = 0; + int usedblocks = 0; + + /* set addressing mode to absolute to traverse the link list */ + iwl_set_otp_access(priv, IWL_OTP_ACCESS_ABSOLUTE); + + /* checking for empty OTP or error */ + if (iwl_is_otp_empty(priv)) + return -EINVAL; + + /* + * start traverse link list + * until reach the max number of OTP blocks + * different devices have different number of OTP blocks + */ + do { + /* save current valid block address + * check for more block on the link list + */ + valid_addr = next_link_addr; + next_link_addr = le16_to_cpu(link_value) * sizeof(u16); + IWL_DEBUG_INFO(priv, "OTP blocks %d addr 0x%x\n", + usedblocks, next_link_addr); + if (iwl_read_otp_word(priv, next_link_addr, &link_value)) + return -EINVAL; + if (!link_value) { + /* + * reach the end of link list, return success and + * set address point to the starting address + * of the image + */ + *validblockaddr = valid_addr; + /* skip first 2 bytes (link list pointer) */ + *validblockaddr += 2; + return 0; + } + /* more in the link list, continue */ + usedblocks++; + } while (usedblocks <= priv->cfg->max_ll_items); + + /* OTP has no valid blocks */ + IWL_DEBUG_INFO(priv, "OTP has no valid blocks\n"); + return -EINVAL; +} + +/** + * iwl_eeprom_init - read EEPROM contents + * + * Load the EEPROM contents from adapter into priv->eeprom + * + * NOTE: This routine uses the non-debug IO access functions. + */ +int iwl_eeprom_init(struct iwl_priv *priv) +{ + __le16 *e; + u32 gp = iwl_read32(priv, CSR_EEPROM_GP); + int sz; + int ret; + u16 addr; + u16 validblockaddr = 0; + u16 cache_addr = 0; + + priv->nvm_device_type = iwlcore_get_nvm_type(priv); + if (priv->nvm_device_type == -ENOENT) + return -ENOENT; + /* allocate eeprom */ + IWL_DEBUG_INFO(priv, "NVM size = %d\n", priv->cfg->eeprom_size); + sz = priv->cfg->eeprom_size; + priv->eeprom = kzalloc(sz, GFP_KERNEL); + if (!priv->eeprom) { + ret = -ENOMEM; + goto alloc_err; + } + e = (__le16 *)priv->eeprom; + + priv->cfg->ops->lib->apm_ops.init(priv); + + ret = priv->cfg->ops->lib->eeprom_ops.verify_signature(priv); + if (ret < 0) { + IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp); + ret = -ENOENT; + goto err; + } + + /* Make sure driver (instead of uCode) is allowed to read EEPROM */ + ret = priv->cfg->ops->lib->eeprom_ops.acquire_semaphore(priv); + if (ret < 0) { + IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n"); + ret = -ENOENT; + goto err; + } + + if (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) { + + ret = iwl_init_otp_access(priv); + if (ret) { + IWL_ERR(priv, "Failed to initialize OTP access.\n"); + ret = -ENOENT; + goto done; + } + _iwl_write32(priv, CSR_EEPROM_GP, + iwl_read32(priv, CSR_EEPROM_GP) & + ~CSR_EEPROM_GP_IF_OWNER_MSK); + + iwl_set_bit(priv, CSR_OTP_GP_REG, + CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK | + CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK); + /* traversing the linked list if no shadow ram supported */ + if (!priv->cfg->shadow_ram_support) { + if (iwl_find_otp_image(priv, &validblockaddr)) { + ret = -ENOENT; + goto done; + } + } + for (addr = validblockaddr; addr < validblockaddr + sz; + addr += sizeof(u16)) { + __le16 eeprom_data; + + ret = iwl_read_otp_word(priv, addr, &eeprom_data); + if (ret) + goto done; + e[cache_addr / 2] = eeprom_data; + cache_addr += sizeof(u16); + } + } else { + /* eeprom is an array of 16bit values */ + for (addr = 0; addr < sz; addr += sizeof(u16)) { + u32 r; + + _iwl_write32(priv, CSR_EEPROM_REG, + CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); + + ret = iwl_poll_bit(priv, CSR_EEPROM_REG, + CSR_EEPROM_REG_READ_VALID_MSK, + CSR_EEPROM_REG_READ_VALID_MSK, + IWL_EEPROM_ACCESS_TIMEOUT); + if (ret < 0) { + IWL_ERR(priv, "Time out reading EEPROM[%d]\n", addr); + goto done; + } + r = _iwl_read_direct32(priv, CSR_EEPROM_REG); + e[addr / 2] = cpu_to_le16(r >> 16); + } + } + ret = 0; +done: + priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv); +err: + if (ret) + iwl_eeprom_free(priv); + /* Reset chip to save power until we load uCode during "up". */ + priv->cfg->ops->lib->apm_ops.stop(priv); +alloc_err: + return ret; +} +EXPORT_SYMBOL(iwl_eeprom_init); + +void iwl_eeprom_free(struct iwl_priv *priv) +{ + kfree(priv->eeprom); + priv->eeprom = NULL; +} +EXPORT_SYMBOL(iwl_eeprom_free); + +int iwl_eeprom_check_version(struct iwl_priv *priv) +{ + u16 eeprom_ver; + u16 calib_ver; + + eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION); + calib_ver = priv->cfg->ops->lib->eeprom_ops.calib_version(priv); + + if (eeprom_ver < priv->cfg->eeprom_ver || + calib_ver < priv->cfg->eeprom_calib_ver) + goto err; + + return 0; +err: + IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n", + eeprom_ver, priv->cfg->eeprom_ver, + calib_ver, priv->cfg->eeprom_calib_ver); + return -EINVAL; + +} +EXPORT_SYMBOL(iwl_eeprom_check_version); + +const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset) +{ + return priv->cfg->ops->lib->eeprom_ops.query_addr(priv, offset); +} +EXPORT_SYMBOL(iwl_eeprom_query_addr); + +u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset) +{ + if (!priv->eeprom) + return 0; + return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8); +} +EXPORT_SYMBOL(iwl_eeprom_query16); + +void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac) +{ + const u8 *addr = priv->cfg->ops->lib->eeprom_ops.query_addr(priv, + EEPROM_MAC_ADDRESS); + memcpy(mac, addr, ETH_ALEN); +} +EXPORT_SYMBOL(iwl_eeprom_get_mac); + +static void iwl_init_band_reference(const struct iwl_priv *priv, + int eep_band, int *eeprom_ch_count, + const struct iwl_eeprom_channel **eeprom_ch_info, + const u8 **eeprom_ch_index) +{ + u32 offset = priv->cfg->ops->lib-> + eeprom_ops.regulatory_bands[eep_band - 1]; + switch (eep_band) { + case 1: /* 2.4GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1); + *eeprom_ch_info = (struct iwl_eeprom_channel *) + iwl_eeprom_query_addr(priv, offset); + *eeprom_ch_index = iwl_eeprom_band_1; + break; + case 2: /* 4.9GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2); + *eeprom_ch_info = (struct iwl_eeprom_channel *) + iwl_eeprom_query_addr(priv, offset); + *eeprom_ch_index = iwl_eeprom_band_2; + break; + case 3: /* 5.2GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3); + *eeprom_ch_info = (struct iwl_eeprom_channel *) + iwl_eeprom_query_addr(priv, offset); + *eeprom_ch_index = iwl_eeprom_band_3; + break; + case 4: /* 5.5GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4); + *eeprom_ch_info = (struct iwl_eeprom_channel *) + iwl_eeprom_query_addr(priv, offset); + *eeprom_ch_index = iwl_eeprom_band_4; + break; + case 5: /* 5.7GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5); + *eeprom_ch_info = (struct iwl_eeprom_channel *) + iwl_eeprom_query_addr(priv, offset); + *eeprom_ch_index = iwl_eeprom_band_5; + break; + case 6: /* 2.4GHz ht40 channels */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6); + *eeprom_ch_info = (struct iwl_eeprom_channel *) + iwl_eeprom_query_addr(priv, offset); + *eeprom_ch_index = iwl_eeprom_band_6; + break; + case 7: /* 5 GHz ht40 channels */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7); + *eeprom_ch_info = (struct iwl_eeprom_channel *) + iwl_eeprom_query_addr(priv, offset); + *eeprom_ch_index = iwl_eeprom_band_7; + break; + default: + BUG(); + return; + } +} + +#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \ + ? # x " " : "") + +/** + * iwl_mod_ht40_chan_info - Copy ht40 channel info into driver's priv. + * + * Does not set up a command, or touch hardware. + */ +static int iwl_mod_ht40_chan_info(struct iwl_priv *priv, + enum ieee80211_band band, u16 channel, + const struct iwl_eeprom_channel *eeprom_ch, + u8 clear_ht40_extension_channel) +{ + struct iwl_channel_info *ch_info; + + ch_info = (struct iwl_channel_info *) + iwl_get_channel_info(priv, band, channel); + + if (!is_channel_valid(ch_info)) + return -1; + + IWL_DEBUG_INFO(priv, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):" + " Ad-Hoc %ssupported\n", + ch_info->channel, + is_channel_a_band(ch_info) ? + "5.2" : "2.4", + CHECK_AND_PRINT(IBSS), + CHECK_AND_PRINT(ACTIVE), + CHECK_AND_PRINT(RADAR), + CHECK_AND_PRINT(WIDE), + CHECK_AND_PRINT(DFS), + eeprom_ch->flags, + eeprom_ch->max_power_avg, + ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) + && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? + "" : "not "); + + ch_info->ht40_eeprom = *eeprom_ch; + ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg; + ch_info->ht40_flags = eeprom_ch->flags; + if (eeprom_ch->flags & EEPROM_CHANNEL_VALID) + ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel; + + return 0; +} + +/** + * iwl_get_max_txpower_avg - get the highest tx power from all chains. + * find the highest tx power from all chains for the channel + */ +static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv, + struct iwl_eeprom_enhanced_txpwr *enhanced_txpower, + int element, s8 *max_txpower_in_half_dbm) +{ + s8 max_txpower_avg = 0; /* (dBm) */ + + IWL_DEBUG_INFO(priv, "%d - " + "chain_a: %d dB chain_b: %d dB " + "chain_c: %d dB mimo2: %d dB mimo3: %d dB\n", + element, + enhanced_txpower[element].chain_a_max >> 1, + enhanced_txpower[element].chain_b_max >> 1, + enhanced_txpower[element].chain_c_max >> 1, + enhanced_txpower[element].mimo2_max >> 1, + enhanced_txpower[element].mimo3_max >> 1); + /* Take the highest tx power from any valid chains */ + if ((priv->cfg->valid_tx_ant & ANT_A) && + (enhanced_txpower[element].chain_a_max > max_txpower_avg)) + max_txpower_avg = enhanced_txpower[element].chain_a_max; + if ((priv->cfg->valid_tx_ant & ANT_B) && + (enhanced_txpower[element].chain_b_max > max_txpower_avg)) + max_txpower_avg = enhanced_txpower[element].chain_b_max; + if ((priv->cfg->valid_tx_ant & ANT_C) && + (enhanced_txpower[element].chain_c_max > max_txpower_avg)) + max_txpower_avg = enhanced_txpower[element].chain_c_max; + if (((priv->cfg->valid_tx_ant == ANT_AB) | + (priv->cfg->valid_tx_ant == ANT_BC) | + (priv->cfg->valid_tx_ant == ANT_AC)) && + (enhanced_txpower[element].mimo2_max > max_txpower_avg)) + max_txpower_avg = enhanced_txpower[element].mimo2_max; + if ((priv->cfg->valid_tx_ant == ANT_ABC) && + (enhanced_txpower[element].mimo3_max > max_txpower_avg)) + max_txpower_avg = enhanced_txpower[element].mimo3_max; + + /* + * max. tx power in EEPROM is in 1/2 dBm format + * convert from 1/2 dBm to dBm (round-up convert) + * but we also do not want to loss 1/2 dBm resolution which + * will impact performance + */ + *max_txpower_in_half_dbm = max_txpower_avg; + return (max_txpower_avg & 0x01) + (max_txpower_avg >> 1); +} + +/** + * iwl_update_common_txpower: update channel tx power + * update tx power per band based on EEPROM enhanced tx power info. + */ +static s8 iwl_update_common_txpower(struct iwl_priv *priv, + struct iwl_eeprom_enhanced_txpwr *enhanced_txpower, + int section, int element, s8 *max_txpower_in_half_dbm) +{ + struct iwl_channel_info *ch_info; + int ch; + bool is_ht40 = false; + s8 max_txpower_avg; /* (dBm) */ + + /* it is common section, contain all type (Legacy, HT and HT40) + * based on the element in the section to determine + * is it HT 40 or not + */ + if (element == EEPROM_TXPOWER_COMMON_HT40_INDEX) + is_ht40 = true; + max_txpower_avg = + iwl_get_max_txpower_avg(priv, enhanced_txpower, + element, max_txpower_in_half_dbm); + + ch_info = priv->channel_info; + + for (ch = 0; ch < priv->channel_count; ch++) { + /* find matching band and update tx power if needed */ + if ((ch_info->band == enhinfo[section].band) && + (ch_info->max_power_avg < max_txpower_avg) && + (!is_ht40)) { + /* Update regulatory-based run-time data */ + ch_info->max_power_avg = ch_info->curr_txpow = + max_txpower_avg; + ch_info->scan_power = max_txpower_avg; + } + if ((ch_info->band == enhinfo[section].band) && is_ht40 && + (ch_info->ht40_max_power_avg < max_txpower_avg)) { + /* Update regulatory-based run-time data */ + ch_info->ht40_max_power_avg = max_txpower_avg; + } + ch_info++; + } + return max_txpower_avg; +} + +/** + * iwl_update_channel_txpower: update channel tx power + * update channel tx power based on EEPROM enhanced tx power info. + */ +static s8 iwl_update_channel_txpower(struct iwl_priv *priv, + struct iwl_eeprom_enhanced_txpwr *enhanced_txpower, + int section, int element, s8 *max_txpower_in_half_dbm) +{ + struct iwl_channel_info *ch_info; + int ch; + u8 channel; + s8 max_txpower_avg; /* (dBm) */ + + channel = enhinfo[section].iwl_eeprom_section_channel[element]; + max_txpower_avg = + iwl_get_max_txpower_avg(priv, enhanced_txpower, + element, max_txpower_in_half_dbm); + + ch_info = priv->channel_info; + for (ch = 0; ch < priv->channel_count; ch++) { + /* find matching channel and update tx power if needed */ + if (ch_info->channel == channel) { + if ((ch_info->max_power_avg < max_txpower_avg) && + (!enhinfo[section].is_ht40)) { + /* Update regulatory-based run-time data */ + ch_info->max_power_avg = max_txpower_avg; + ch_info->curr_txpow = max_txpower_avg; + ch_info->scan_power = max_txpower_avg; + } + if ((enhinfo[section].is_ht40) && + (ch_info->ht40_max_power_avg < max_txpower_avg)) { + /* Update regulatory-based run-time data */ + ch_info->ht40_max_power_avg = max_txpower_avg; + } + break; + } + ch_info++; + } + return max_txpower_avg; +} + +/** + * iwlcore_eeprom_enhanced_txpower: process enhanced tx power info + */ +void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv) +{ + int eeprom_section_count = 0; + int section, element; + struct iwl_eeprom_enhanced_txpwr *enhanced_txpower; + u32 offset; + s8 max_txpower_avg; /* (dBm) */ + s8 max_txpower_in_half_dbm; /* (half-dBm) */ + + /* Loop through all the sections + * adjust bands and channel's max tx power + * Set the tx_power_user_lmt to the highest power + * supported by any channels and chains + */ + for (section = 0; section < ARRAY_SIZE(enhinfo); section++) { + eeprom_section_count = enhinfo[section].count; + offset = enhinfo[section].offset; + enhanced_txpower = (struct iwl_eeprom_enhanced_txpwr *) + iwl_eeprom_query_addr(priv, offset); + + /* + * check for valid entry - + * different version of EEPROM might contain different set + * of enhanced tx power table + * always check for valid entry before process + * the information + */ + if (!enhanced_txpower->common || enhanced_txpower->reserved) + continue; + + for (element = 0; element < eeprom_section_count; element++) { + if (enhinfo[section].is_common) + max_txpower_avg = + iwl_update_common_txpower(priv, + enhanced_txpower, section, + element, + &max_txpower_in_half_dbm); + else + max_txpower_avg = + iwl_update_channel_txpower(priv, + enhanced_txpower, section, + element, + &max_txpower_in_half_dbm); + + /* Update the tx_power_user_lmt to the highest power + * supported by any channel */ + if (max_txpower_avg > priv->tx_power_user_lmt) + priv->tx_power_user_lmt = max_txpower_avg; + + /* + * Update the tx_power_lmt_in_half_dbm to + * the highest power supported by any channel + */ + if (max_txpower_in_half_dbm > + priv->tx_power_lmt_in_half_dbm) + priv->tx_power_lmt_in_half_dbm = + max_txpower_in_half_dbm; + } + } +} +EXPORT_SYMBOL(iwlcore_eeprom_enhanced_txpower); + +#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \ + ? # x " " : "") + +/** + * iwl_init_channel_map - Set up driver's info for all possible channels + */ +int iwl_init_channel_map(struct iwl_priv *priv) +{ + int eeprom_ch_count = 0; + const u8 *eeprom_ch_index = NULL; + const struct iwl_eeprom_channel *eeprom_ch_info = NULL; + int band, ch; + struct iwl_channel_info *ch_info; + + if (priv->channel_count) { + IWL_DEBUG_INFO(priv, "Channel map already initialized.\n"); + return 0; + } + + IWL_DEBUG_INFO(priv, "Initializing regulatory info from EEPROM\n"); + + priv->channel_count = + ARRAY_SIZE(iwl_eeprom_band_1) + + ARRAY_SIZE(iwl_eeprom_band_2) + + ARRAY_SIZE(iwl_eeprom_band_3) + + ARRAY_SIZE(iwl_eeprom_band_4) + + ARRAY_SIZE(iwl_eeprom_band_5); + + IWL_DEBUG_INFO(priv, "Parsing data for %d channels.\n", priv->channel_count); + + priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) * + priv->channel_count, GFP_KERNEL); + if (!priv->channel_info) { + IWL_ERR(priv, "Could not allocate channel_info\n"); + priv->channel_count = 0; + return -ENOMEM; + } + + ch_info = priv->channel_info; + + /* Loop through the 5 EEPROM bands adding them in order to the + * channel map we maintain (that contains additional information than + * what just in the EEPROM) */ + for (band = 1; band <= 5; band++) { + + iwl_init_band_reference(priv, band, &eeprom_ch_count, + &eeprom_ch_info, &eeprom_ch_index); + + /* Loop through each band adding each of the channels */ + for (ch = 0; ch < eeprom_ch_count; ch++) { + ch_info->channel = eeprom_ch_index[ch]; + ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ : + IEEE80211_BAND_5GHZ; + + /* permanently store EEPROM's channel regulatory flags + * and max power in channel info database. */ + ch_info->eeprom = eeprom_ch_info[ch]; + + /* Copy the run-time flags so they are there even on + * invalid channels */ + ch_info->flags = eeprom_ch_info[ch].flags; + /* First write that ht40 is not enabled, and then enable + * one by one */ + ch_info->ht40_extension_channel = + IEEE80211_CHAN_NO_HT40; + + if (!(is_channel_valid(ch_info))) { + IWL_DEBUG_INFO(priv, "Ch. %d Flags %x [%sGHz] - " + "No traffic\n", + ch_info->channel, + ch_info->flags, + is_channel_a_band(ch_info) ? + "5.2" : "2.4"); + ch_info++; + continue; + } + + /* Initialize regulatory-based run-time data */ + ch_info->max_power_avg = ch_info->curr_txpow = + eeprom_ch_info[ch].max_power_avg; + ch_info->scan_power = eeprom_ch_info[ch].max_power_avg; + ch_info->min_power = 0; + + IWL_DEBUG_INFO(priv, "Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x %ddBm):" + " Ad-Hoc %ssupported\n", + ch_info->channel, + is_channel_a_band(ch_info) ? + "5.2" : "2.4", + CHECK_AND_PRINT_I(VALID), + CHECK_AND_PRINT_I(IBSS), + CHECK_AND_PRINT_I(ACTIVE), + CHECK_AND_PRINT_I(RADAR), + CHECK_AND_PRINT_I(WIDE), + CHECK_AND_PRINT_I(DFS), + eeprom_ch_info[ch].flags, + eeprom_ch_info[ch].max_power_avg, + ((eeprom_ch_info[ch]. + flags & EEPROM_CHANNEL_IBSS) + && !(eeprom_ch_info[ch]. + flags & EEPROM_CHANNEL_RADAR)) + ? "" : "not "); + + /* Set the tx_power_user_lmt to the highest power + * supported by any channel */ + if (eeprom_ch_info[ch].max_power_avg > + priv->tx_power_user_lmt) + priv->tx_power_user_lmt = + eeprom_ch_info[ch].max_power_avg; + + ch_info++; + } + } + + /* Check if we do have HT40 channels */ + if (priv->cfg->ops->lib->eeprom_ops.regulatory_bands[5] == + EEPROM_REGULATORY_BAND_NO_HT40 && + priv->cfg->ops->lib->eeprom_ops.regulatory_bands[6] == + EEPROM_REGULATORY_BAND_NO_HT40) + return 0; + + /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */ + for (band = 6; band <= 7; band++) { + enum ieee80211_band ieeeband; + + iwl_init_band_reference(priv, band, &eeprom_ch_count, + &eeprom_ch_info, &eeprom_ch_index); + + /* EEPROM band 6 is 2.4, band 7 is 5 GHz */ + ieeeband = + (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + + /* Loop through each band adding each of the channels */ + for (ch = 0; ch < eeprom_ch_count; ch++) { + /* Set up driver's info for lower half */ + iwl_mod_ht40_chan_info(priv, ieeeband, + eeprom_ch_index[ch], + &eeprom_ch_info[ch], + IEEE80211_CHAN_NO_HT40PLUS); + + /* Set up driver's info for upper half */ + iwl_mod_ht40_chan_info(priv, ieeeband, + eeprom_ch_index[ch] + 4, + &eeprom_ch_info[ch], + IEEE80211_CHAN_NO_HT40MINUS); + } + } + + /* for newer device (6000 series and up) + * EEPROM contain enhanced tx power information + * driver need to process addition information + * to determine the max channel tx power limits + */ + if (priv->cfg->ops->lib->eeprom_ops.update_enhanced_txpower) + priv->cfg->ops->lib->eeprom_ops.update_enhanced_txpower(priv); + + return 0; +} +EXPORT_SYMBOL(iwl_init_channel_map); + +/* + * iwl_free_channel_map - undo allocations in iwl_init_channel_map + */ +void iwl_free_channel_map(struct iwl_priv *priv) +{ + kfree(priv->channel_info); + priv->channel_count = 0; +} +EXPORT_SYMBOL(iwl_free_channel_map); + +/** + * iwl_get_channel_info - Find driver's private channel info + * + * Based on band and channel number. + */ +const struct iwl_channel_info *iwl_get_channel_info(const struct iwl_priv *priv, + enum ieee80211_band band, u16 channel) +{ + int i; + + switch (band) { + case IEEE80211_BAND_5GHZ: + for (i = 14; i < priv->channel_count; i++) { + if (priv->channel_info[i].channel == channel) + return &priv->channel_info[i]; + } + break; + case IEEE80211_BAND_2GHZ: + if (channel >= 1 && channel <= 14) + return &priv->channel_info[channel - 1]; + break; + default: + BUG(); + } + + return NULL; +} +EXPORT_SYMBOL(iwl_get_channel_info); + diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h new file mode 100644 index 0000000..4e1ba82 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h @@ -0,0 +1,507 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __iwl_eeprom_h__ +#define __iwl_eeprom_h__ + +#include + +struct iwl_priv; + +/* + * EEPROM access time values: + * + * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG. + * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1). + * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec. + * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG. + */ +#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */ + +#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */ +#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */ + + +/* + * Regulatory channel usage flags in EEPROM struct iwl4965_eeprom_channel.flags. + * + * IBSS and/or AP operation is allowed *only* on those channels with + * (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because + * RADAR detection is not supported by the 4965 driver, but is a + * requirement for establishing a new network for legal operation on channels + * requiring RADAR detection or restricting ACTIVE scanning. + * + * NOTE: "WIDE" flag does not indicate anything about "HT40" 40 MHz channels. + * It only indicates that 20 MHz channel use is supported; HT40 channel + * usage is indicated by a separate set of regulatory flags for each + * HT40 channel pair. + * + * NOTE: Using a channel inappropriately will result in a uCode error! + */ +#define IWL_NUM_TX_CALIB_GROUPS 5 +enum { + EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */ + EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */ + /* Bit 2 Reserved */ + EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */ + EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */ + EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */ + /* Bit 6 Reserved (was Narrow Channel) */ + EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */ +}; + +/* SKU Capabilities */ +#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0) +#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1) + +/* *regulatory* channel data format in eeprom, one for each channel. + * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */ +struct iwl_eeprom_channel { + u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */ + s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */ +} __attribute__ ((packed)); + +/** + * iwl_eeprom_enhanced_txpwr structure + * This structure presents the enhanced regulatory tx power limit layout + * in eeprom image + * Enhanced regulatory tx power portion of eeprom image can be broken down + * into individual structures; each one is 8 bytes in size and contain the + * following information + * @common: (desc + channel) not used by driver, should _NOT_ be "zero" + * @chain_a_max_pwr: chain a max power in 1/2 dBm + * @chain_b_max_pwr: chain b max power in 1/2 dBm + * @chain_c_max_pwr: chain c max power in 1/2 dBm + * @reserved: not used, should be "zero" + * @mimo2_max_pwr: mimo2 max power in 1/2 dBm + * @mimo3_max_pwr: mimo3 max power in 1/2 dBm + * + */ +struct iwl_eeprom_enhanced_txpwr { + __le16 common; + s8 chain_a_max; + s8 chain_b_max; + s8 chain_c_max; + s8 reserved; + s8 mimo2_max; + s8 mimo3_max; +} __attribute__ ((packed)); + +/* 3945 Specific */ +#define EEPROM_3945_EEPROM_VERSION (0x2f) + +/* 4965 has two radio transmitters (and 3 radio receivers) */ +#define EEPROM_TX_POWER_TX_CHAINS (2) + +/* 4965 has room for up to 8 sets of txpower calibration data */ +#define EEPROM_TX_POWER_BANDS (8) + +/* 4965 factory calibration measures txpower gain settings for + * each of 3 target output levels */ +#define EEPROM_TX_POWER_MEASUREMENTS (3) + +/* 4965 Specific */ +/* 4965 driver does not work with txpower calibration version < 5 */ +#define EEPROM_4965_TX_POWER_VERSION (5) +#define EEPROM_4965_EEPROM_VERSION (0x2f) +#define EEPROM_4965_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */ +#define EEPROM_4965_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */ +#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */ +#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */ + +/* 5000 Specific */ +#define EEPROM_5000_TX_POWER_VERSION (4) +#define EEPROM_5000_EEPROM_VERSION (0x11A) + +/*5000 calibrations */ +#define EEPROM_5000_CALIB_ALL (INDIRECT_ADDRESS | INDIRECT_CALIBRATION) +#define EEPROM_5000_XTAL ((2*0x128) | EEPROM_5000_CALIB_ALL) +#define EEPROM_5000_TEMPERATURE ((2*0x12A) | EEPROM_5000_CALIB_ALL) + +/* 5000 links */ +#define EEPROM_5000_LINK_HOST (2*0x64) +#define EEPROM_5000_LINK_GENERAL (2*0x65) +#define EEPROM_5000_LINK_REGULATORY (2*0x66) +#define EEPROM_5000_LINK_CALIBRATION (2*0x67) +#define EEPROM_5000_LINK_PROCESS_ADJST (2*0x68) +#define EEPROM_5000_LINK_OTHERS (2*0x69) + +/* 5000 regulatory - indirect access */ +#define EEPROM_5000_REG_SKU_ID ((0x02)\ + | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 4 bytes */ +#define EEPROM_5000_REG_BAND_1_CHANNELS ((0x08)\ + | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 28 bytes */ +#define EEPROM_5000_REG_BAND_2_CHANNELS ((0x26)\ + | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 26 bytes */ +#define EEPROM_5000_REG_BAND_3_CHANNELS ((0x42)\ + | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */ +#define EEPROM_5000_REG_BAND_4_CHANNELS ((0x5C)\ + | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */ +#define EEPROM_5000_REG_BAND_5_CHANNELS ((0x74)\ + | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 12 bytes */ +#define EEPROM_5000_REG_BAND_24_HT40_CHANNELS ((0x82)\ + | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */ +#define EEPROM_5000_REG_BAND_52_HT40_CHANNELS ((0x92)\ + | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */ + +/* 6000 and up regulatory tx power - indirect access */ +/* max. elements per section */ +#define EEPROM_MAX_TXPOWER_SECTION_ELEMENTS (8) +#define EEPROM_TXPOWER_COMMON_HT40_INDEX (2) + +/** + * Partition the enhanced tx power portion of eeprom image into + * 10 sections based on band, modulation, frequency and channel + * + * Section 1: all CCK channels + * Section 2: all 2.4 GHz OFDM (Legacy, HT and HT40 ) channels + * Section 3: all 5.2 GHz OFDM (Legacy, HT and HT40) channels + * Section 4: 2.4 GHz 20MHz channels: 1, 2, 10, 11. Both Legacy and HT + * Section 5: 2.4 GHz 40MHz channels: 1, 2, 6, 7, 9, (_above_) + * Section 6: 5.2 GHz 20MHz channels: 36, 64, 100, both Legacy and HT + * Section 7: 5.2 GHz 40MHz channels: 36, 60, 100 (_above_) + * Section 8: 2.4 GHz channel 13, Both Legacy and HT + * Section 9: 2.4 GHz channel 140, Both Legacy and HT + * Section 10: 2.4 GHz 40MHz channels: 132, 44 (_above_) + */ +/* 2.4 GHz band: CCK */ +#define EEPROM_LB_CCK_20_COMMON ((0xA8)\ + | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 8 bytes */ +/* 2.4 GHz band: 20MHz-Legacy, 20MHz-HT, 40MHz-HT */ +#define EEPROM_LB_OFDM_COMMON ((0xB0)\ + | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */ +/* 5.2 GHz band: 20MHz-Legacy, 20MHz-HT, 40MHz-HT */ +#define EEPROM_HB_OFDM_COMMON ((0xC8)\ + | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */ +/* 2.4GHz band channels: + * 1Legacy, 1HT, 2Legacy, 2HT, 10Legacy, 10HT, 11Legacy, 11HT */ +#define EEPROM_LB_OFDM_20_BAND ((0xE0)\ + | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 64 bytes */ +/* 2.4 GHz band HT40 channels: (1,+1) (2,+1) (6,+1) (7,+1) (9,+1) */ +#define EEPROM_LB_OFDM_HT40_BAND ((0x120)\ + | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 40 bytes */ +/* 5.2GHz band channels: 36Legacy, 36HT, 64Legacy, 64HT, 100Legacy, 100HT */ +#define EEPROM_HB_OFDM_20_BAND ((0x148)\ + | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 48 bytes */ +/* 5.2 GHz band HT40 channels: (36,+1) (60,+1) (100,+1) */ +#define EEPROM_HB_OFDM_HT40_BAND ((0x178)\ + | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */ +/* 2.4 GHz band, channnel 13: Legacy, HT */ +#define EEPROM_LB_OFDM_20_CHANNEL_13 ((0x190)\ + | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 16 bytes */ +/* 5.2 GHz band, channnel 140: Legacy, HT */ +#define EEPROM_HB_OFDM_20_CHANNEL_140 ((0x1A0)\ + | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 16 bytes */ +/* 5.2 GHz band, HT40 channnels (132,+1) (44,+1) */ +#define EEPROM_HB_OFDM_HT40_BAND_1 ((0x1B0)\ + | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 16 bytes */ + + +/* 5050 Specific */ +#define EEPROM_5050_TX_POWER_VERSION (4) +#define EEPROM_5050_EEPROM_VERSION (0x21E) + +/* 1000 Specific */ +#define EEPROM_1000_EEPROM_VERSION (0x15C) + +/* 6x00 Specific */ +#define EEPROM_6000_EEPROM_VERSION (0x434) + +/* 6x50 Specific */ +#define EEPROM_6050_EEPROM_VERSION (0x532) + +/* OTP */ +/* lower blocks contain EEPROM image and calibration data */ +#define OTP_LOW_IMAGE_SIZE (2 * 512 * sizeof(u16)) /* 2 KB */ +/* high blocks contain PAPD data */ +#define OTP_HIGH_IMAGE_SIZE_6x00 (6 * 512 * sizeof(u16)) /* 6 KB */ +#define OTP_HIGH_IMAGE_SIZE_1000 (0x200 * sizeof(u16)) /* 1024 bytes */ +#define OTP_MAX_LL_ITEMS_1000 (3) /* OTP blocks for 1000 */ +#define OTP_MAX_LL_ITEMS_6x00 (4) /* OTP blocks for 6x00 */ +#define OTP_MAX_LL_ITEMS_6x50 (7) /* OTP blocks for 6x50 */ + +/* 2.4 GHz */ +extern const u8 iwl_eeprom_band_1[14]; + +/* + * factory calibration data for one txpower level, on one channel, + * measured on one of the 2 tx chains (radio transmitter and associated + * antenna). EEPROM contains: + * + * 1) Temperature (degrees Celsius) of device when measurement was made. + * + * 2) Gain table index used to achieve the target measurement power. + * This refers to the "well-known" gain tables (see iwl-4965-hw.h). + * + * 3) Actual measured output power, in half-dBm ("34" = 17 dBm). + * + * 4) RF power amplifier detector level measurement (not used). + */ +struct iwl_eeprom_calib_measure { + u8 temperature; /* Device temperature (Celsius) */ + u8 gain_idx; /* Index into gain table */ + u8 actual_pow; /* Measured RF output power, half-dBm */ + s8 pa_det; /* Power amp detector level (not used) */ +} __attribute__ ((packed)); + + +/* + * measurement set for one channel. EEPROM contains: + * + * 1) Channel number measured + * + * 2) Measurements for each of 3 power levels for each of 2 radio transmitters + * (a.k.a. "tx chains") (6 measurements altogether) + */ +struct iwl_eeprom_calib_ch_info { + u8 ch_num; + struct iwl_eeprom_calib_measure + measurements[EEPROM_TX_POWER_TX_CHAINS] + [EEPROM_TX_POWER_MEASUREMENTS]; +} __attribute__ ((packed)); + +/* + * txpower subband info. + * + * For each frequency subband, EEPROM contains the following: + * + * 1) First and last channels within range of the subband. "0" values + * indicate that this sample set is not being used. + * + * 2) Sample measurement sets for 2 channels close to the range endpoints. + */ +struct iwl_eeprom_calib_subband_info { + u8 ch_from; /* channel number of lowest channel in subband */ + u8 ch_to; /* channel number of highest channel in subband */ + struct iwl_eeprom_calib_ch_info ch1; + struct iwl_eeprom_calib_ch_info ch2; +} __attribute__ ((packed)); + + +/* + * txpower calibration info. EEPROM contains: + * + * 1) Factory-measured saturation power levels (maximum levels at which + * tx power amplifier can output a signal without too much distortion). + * There is one level for 2.4 GHz band and one for 5 GHz band. These + * values apply to all channels within each of the bands. + * + * 2) Factory-measured power supply voltage level. This is assumed to be + * constant (i.e. same value applies to all channels/bands) while the + * factory measurements are being made. + * + * 3) Up to 8 sets of factory-measured txpower calibration values. + * These are for different frequency ranges, since txpower gain + * characteristics of the analog radio circuitry vary with frequency. + * + * Not all sets need to be filled with data; + * struct iwl_eeprom_calib_subband_info contains range of channels + * (0 if unused) for each set of data. + */ +struct iwl_eeprom_calib_info { + u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */ + u8 saturation_power52; /* half-dBm */ + __le16 voltage; /* signed */ + struct iwl_eeprom_calib_subband_info + band_info[EEPROM_TX_POWER_BANDS]; +} __attribute__ ((packed)); + + +#define ADDRESS_MSK 0x0000FFFF +#define INDIRECT_TYPE_MSK 0x000F0000 +#define INDIRECT_HOST 0x00010000 +#define INDIRECT_GENERAL 0x00020000 +#define INDIRECT_REGULATORY 0x00030000 +#define INDIRECT_CALIBRATION 0x00040000 +#define INDIRECT_PROCESS_ADJST 0x00050000 +#define INDIRECT_OTHERS 0x00060000 +#define INDIRECT_ADDRESS 0x00100000 + +/* General */ +#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */ +#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */ +#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */ +#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */ +#define EEPROM_VERSION (2*0x44) /* 2 bytes */ +#define EEPROM_SKU_CAP (2*0x45) /* 1 bytes */ +#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */ +#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */ +#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */ +#define EEPROM_3945_M_VERSION (2*0x4A) /* 1 bytes */ + +/* The following masks are to be applied on EEPROM_RADIO_CONFIG */ +#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */ +#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */ +#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */ +#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */ +#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */ +#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */ + +#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0 +#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1 + +/* Radio Config for 5000 and up */ +#define EEPROM_RF_CONFIG_TYPE_R3x3 0x0 +#define EEPROM_RF_CONFIG_TYPE_R2x2 0x1 +#define EEPROM_RF_CONFIG_TYPE_R1x2 0x2 +#define EEPROM_RF_CONFIG_TYPE_MAX 0x3 + +/* + * Per-channel regulatory data. + * + * Each channel that *might* be supported by iwl has a fixed location + * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory + * txpower (MSB). + * + * Entries immediately below are for 20 MHz channel width. HT40 (40 MHz) + * channels (only for 4965, not supported by 3945) appear later in the EEPROM. + * + * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 + */ +#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */ +#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */ +#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */ + +/* + * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196, + * 5.0 GHz channels 7, 8, 11, 12, 16 + * (4915-5080MHz) (none of these is ever supported) + */ +#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */ +#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */ + +/* + * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 + * (5170-5320MHz) + */ +#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */ +#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */ + +/* + * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 + * (5500-5700MHz) + */ +#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */ +#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */ + +/* + * 5.7 GHz channels 145, 149, 153, 157, 161, 165 + * (5725-5825MHz) + */ +#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */ +#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */ + +/* + * 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11) + * + * The channel listed is the center of the lower 20 MHz half of the channel. + * The overall center frequency is actually 2 channels (10 MHz) above that, + * and the upper half of each HT40 channel is centered 4 channels (20 MHz) away + * from the lower half; e.g. the upper half of HT40 channel 1 is channel 5, + * and the overall HT40 channel width centers on channel 3. + * + * NOTE: The RXON command uses 20 MHz channel numbers to specify the + * control channel to which to tune. RXON also specifies whether the + * control channel is the upper or lower half of a HT40 channel. + * + * NOTE: 4965 does not support HT40 channels on 2.4 GHz. + */ +#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0) /* 14 bytes */ + +/* + * 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64), + * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161) + */ +#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8) /* 22 bytes */ + +#define EEPROM_REGULATORY_BAND_NO_HT40 (0) + +struct iwl_eeprom_ops { + const u32 regulatory_bands[7]; + int (*verify_signature) (struct iwl_priv *priv); + int (*acquire_semaphore) (struct iwl_priv *priv); + void (*release_semaphore) (struct iwl_priv *priv); + u16 (*calib_version) (struct iwl_priv *priv); + const u8* (*query_addr) (const struct iwl_priv *priv, size_t offset); + void (*update_enhanced_txpower) (struct iwl_priv *priv); +}; + + +void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac); +int iwl_eeprom_init(struct iwl_priv *priv); +void iwl_eeprom_free(struct iwl_priv *priv); +int iwl_eeprom_check_version(struct iwl_priv *priv); +const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset); +u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset); + +int iwlcore_eeprom_verify_signature(struct iwl_priv *priv); +int iwlcore_eeprom_acquire_semaphore(struct iwl_priv *priv); +void iwlcore_eeprom_release_semaphore(struct iwl_priv *priv); +const u8 *iwlcore_eeprom_query_addr(const struct iwl_priv *priv, size_t offset); +void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv); +int iwl_init_channel_map(struct iwl_priv *priv); +void iwl_free_channel_map(struct iwl_priv *priv); +const struct iwl_channel_info *iwl_get_channel_info( + const struct iwl_priv *priv, + enum ieee80211_band band, u16 channel); + +#endif /* __iwl_eeprom_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h new file mode 100644 index 0000000..113c366 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-fh.h @@ -0,0 +1,518 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_fh_h__ +#define __iwl_fh_h__ + +/****************************/ +/* Flow Handler Definitions */ +/****************************/ + +/** + * This I/O area is directly read/writable by driver (e.g. Linux uses writel()) + * Addresses are offsets from device's PCI hardware base address. + */ +#define FH_MEM_LOWER_BOUND (0x1000) +#define FH_MEM_UPPER_BOUND (0x2000) + +/** + * Keep-Warm (KW) buffer base address. + * + * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the + * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency + * DRAM access when 4965 is Txing or Rxing. The dummy accesses prevent host + * from going into a power-savings mode that would cause higher DRAM latency, + * and possible data over/under-runs, before all Tx/Rx is complete. + * + * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4) + * of the buffer, which must be 4K aligned. Once this is set up, the 4965 + * automatically invokes keep-warm accesses when normal accesses might not + * be sufficient to maintain fast DRAM response. + * + * Bit fields: + * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned + */ +#define FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C) + + +/** + * TFD Circular Buffers Base (CBBC) addresses + * + * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident + * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs) + * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04 + * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte + * aligned (address bits 0-7 must be 0). + * + * Bit fields in each pointer register: + * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned + */ +#define FH_MEM_CBBC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0) +#define FH_MEM_CBBC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10) + +/* Find TFD CB base pointer for given queue (range 0-15). */ +#define FH_MEM_CBBC_QUEUE(x) (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4) + + +/** + * Rx SRAM Control and Status Registers (RSCSR) + * + * These registers provide handshake between driver and 4965 for the Rx queue + * (this queue handles *all* command responses, notifications, Rx data, etc. + * sent from 4965 uCode to host driver). Unlike Tx, there is only one Rx + * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can + * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer + * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1 + * mapping between RBDs and RBs. + * + * Driver must allocate host DRAM memory for the following, and set the + * physical address of each into 4965 registers: + * + * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256 + * entries (although any power of 2, up to 4096, is selectable by driver). + * Each entry (1 dword) points to a receive buffer (RB) of consistent size + * (typically 4K, although 8K or 16K are also selectable by driver). + * Driver sets up RB size and number of RBDs in the CB via Rx config + * register FH_MEM_RCSR_CHNL0_CONFIG_REG. + * + * Bit fields within one RBD: + * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned + * + * Driver sets physical address [35:8] of base of RBD circular buffer + * into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0]. + * + * 2) Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers + * (RBs) have been filled, via a "write pointer", actually the index of + * the RB's corresponding RBD within the circular buffer. Driver sets + * physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0]. + * + * Bit fields in lower dword of Rx status buffer (upper dword not used + * by driver; see struct iwl4965_shared, val0): + * 31-12: Not used by driver + * 11- 0: Index of last filled Rx buffer descriptor + * (4965 writes, driver reads this value) + * + * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must + * enter pointers to these RBs into contiguous RBD circular buffer entries, + * and update the 4965's "write" index register, + * FH_RSCSR_CHNL0_RBDCB_WPTR_REG. + * + * This "write" index corresponds to the *next* RBD that the driver will make + * available, i.e. one RBD past the tail of the ready-to-fill RBDs within + * the circular buffer. This value should initially be 0 (before preparing any + * RBs), should be 8 after preparing the first 8 RBs (for example), and must + * wrap back to 0 at the end of the circular buffer (but don't wrap before + * "read" index has advanced past 1! See below). + * NOTE: 4965 EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8. + * + * As the 4965 fills RBs (referenced from contiguous RBDs within the circular + * buffer), it updates the Rx status buffer in host DRAM, 2) described above, + * to tell the driver the index of the latest filled RBD. The driver must + * read this "read" index from DRAM after receiving an Rx interrupt from 4965. + * + * The driver must also internally keep track of a third index, which is the + * next RBD to process. When receiving an Rx interrupt, driver should process + * all filled but unprocessed RBs up to, but not including, the RB + * corresponding to the "read" index. For example, if "read" index becomes "1", + * driver may process the RB pointed to by RBD 0. Depending on volume of + * traffic, there may be many RBs to process. + * + * If read index == write index, 4965 thinks there is no room to put new data. + * Due to this, the maximum number of filled RBs is 255, instead of 256. To + * be safe, make sure that there is a gap of at least 2 RBDs between "write" + * and "read" indexes; that is, make sure that there are no more than 254 + * buffers waiting to be filled. + */ +#define FH_MEM_RSCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBC0) +#define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00) +#define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND) + +/** + * Physical base address of 8-byte Rx Status buffer. + * Bit fields: + * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned. + */ +#define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0) + +/** + * Physical base address of Rx Buffer Descriptor Circular Buffer. + * Bit fields: + * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned. + */ +#define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004) + +/** + * Rx write pointer (index, really!). + * Bit fields: + * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1. + * NOTE: For 256-entry circular buffer, use only bits [7:0]. + */ +#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008) +#define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG) + + +/** + * Rx Config/Status Registers (RCSR) + * Rx Config Reg for channel 0 (only channel used) + * + * Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for + * normal operation (see bit fields). + * + * Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA. + * Driver should poll FH_MEM_RSSR_RX_STATUS_REG for + * FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing. + * + * Bit fields: + * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame, + * '10' operate normally + * 29-24: reserved + * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal), + * min "5" for 32 RBDs, max "12" for 4096 RBDs. + * 19-18: reserved + * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K, + * '10' 12K, '11' 16K. + * 15-14: reserved + * 13-12: IRQ destination; '00' none, '01' host driver (normal operation) + * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec) + * typical value 0x10 (about 1/2 msec) + * 3- 0: reserved + */ +#define FH_MEM_RCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC00) +#define FH_MEM_RCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xCC0) +#define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND) + +#define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0) + +#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */ +#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */ +#define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */ +#define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */ +#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */ +#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/ + +#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20) +#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4) +#define RX_RB_TIMEOUT (0x10) + +#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000) +#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000) +#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000) + +#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000) +#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000) +#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000) +#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000) + +#define FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004) +#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000) +#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000) + +#define FH_RSCSR_FRAME_SIZE_MSK (0x00003FFF) /* bits 0-13 */ + +/** + * Rx Shared Status Registers (RSSR) + * + * After stopping Rx DMA channel (writing 0 to + * FH_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll + * FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle. + * + * Bit fields: + * 24: 1 = Channel 0 is idle + * + * FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV + * contain default values that should not be altered by the driver. + */ +#define FH_MEM_RSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC40) +#define FH_MEM_RSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xD00) + +#define FH_MEM_RSSR_SHARED_CTRL_REG (FH_MEM_RSSR_LOWER_BOUND) +#define FH_MEM_RSSR_RX_STATUS_REG (FH_MEM_RSSR_LOWER_BOUND + 0x004) +#define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\ + (FH_MEM_RSSR_LOWER_BOUND + 0x008) + +#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000) + +#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28 + +/* TFDB Area - TFDs buffer table */ +#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF) +#define FH_TFDIB_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x900) +#define FH_TFDIB_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x958) +#define FH_TFDIB_CTRL0_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl)) +#define FH_TFDIB_CTRL1_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4) + +/** + * Transmit DMA Channel Control/Status Registers (TCSR) + * + * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels + * supported in hardware (don't confuse these with the 16 Tx queues in DRAM, + * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes. + * + * To use a Tx DMA channel, driver must initialize its + * FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with: + * + * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | + * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL + * + * All other bits should be 0. + * + * Bit fields: + * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame, + * '10' operate normally + * 29- 4: Reserved, set to "0" + * 3: Enable internal DMA requests (1, normal operation), disable (0) + * 2- 0: Reserved, set to "0" + */ +#define FH_TCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xD00) +#define FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60) + +/* Find Control/Status reg for given Tx DMA/FIFO channel */ +#define FH49_TCSR_CHNL_NUM (7) +#define FH50_TCSR_CHNL_NUM (8) + +/* TCSR: tx_config register values */ +#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \ + (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl)) +#define FH_TCSR_CHNL_TX_CREDIT_REG(_chnl) \ + (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4) +#define FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \ + (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8) + +#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000) +#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001) + +#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000) +#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008) + +#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000) +#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000) +#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000) + +#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000) +#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000) +#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000) + +#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000) +#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000) +#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000) + +#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000) +#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000) +#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003) + +#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20) +#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12) + +/** + * Tx Shared Status Registers (TSSR) + * + * After stopping Tx DMA channel (writing 0 to + * FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll + * FH_TSSR_TX_STATUS_REG until selected Tx channel is idle + * (channel's buffers empty | no pending requests). + * + * Bit fields: + * 31-24: 1 = Channel buffers empty (channel 7:0) + * 23-16: 1 = No pending requests (channel 7:0) + */ +#define FH_TSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xEA0) +#define FH_TSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xEC0) + +#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010) + +/** + * Bit fields for TSSR(Tx Shared Status & Control) error status register: + * 31: Indicates an address error when accessed to internal memory + * uCode/driver must write "1" in order to clear this flag + * 30: Indicates that Host did not send the expected number of dwords to FH + * uCode/driver must write "1" in order to clear this flag + * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA + * command was received from the scheduler while the TRB was already full + * with previous command + * uCode/driver must write "1" in order to clear this flag + * 7-0: Each status bit indicates a channel's TxCredit error. When an error + * bit is set, it indicates that the FH has received a full indication + * from the RTC TxFIFO and the current value of the TxCredit counter was + * not equal to zero. This mean that the credit mechanism was not + * synchronized to the TxFIFO status + * uCode/driver must write "1" in order to clear this flag + */ +#define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018) + +#define FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_chnl) ((1 << (_chnl)) << 24) +#define FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_chnl) ((1 << (_chnl)) << 16) + +#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) \ + (FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_chnl) | \ + FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_chnl)) + +/* Tx service channels */ +#define FH_SRVC_CHNL (9) +#define FH_SRVC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9C8) +#define FH_SRVC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0) +#define FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \ + (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4) + +#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98) +/* Instruct FH to increment the retry count of a packet when + * it is brought from the memory to TX-FIFO + */ +#define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002) + +#define RX_QUEUE_SIZE 256 +#define RX_QUEUE_MASK 255 +#define RX_QUEUE_SIZE_LOG 8 + +/* + * RX related structures and functions + */ +#define RX_FREE_BUFFERS 64 +#define RX_LOW_WATERMARK 8 + +/* Size of one Rx buffer in host DRAM */ +#define IWL_RX_BUF_SIZE_3K (3 * 1000) /* 3945 only */ +#define IWL_RX_BUF_SIZE_4K (4 * 1024) +#define IWL_RX_BUF_SIZE_8K (8 * 1024) + +/** + * struct iwl_rb_status - reseve buffer status + * host memory mapped FH registers + * @closed_rb_num [0:11] - Indicates the index of the RB which was closed + * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed + * @finished_rb_num [0:11] - Indicates the index of the current RB + * in which the last frame was written to + * @finished_fr_num [0:11] - Indicates the index of the RX Frame + * which was transfered + */ +struct iwl_rb_status { + __le16 closed_rb_num; + __le16 closed_fr_num; + __le16 finished_rb_num; + __le16 finished_fr_nam; + __le32 __unused; /* 3945 only */ +} __attribute__ ((packed)); + + +#define TFD_QUEUE_SIZE_MAX (256) +#define TFD_QUEUE_SIZE_BC_DUP (64) +#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP) +#define IWL_TX_DMA_MASK DMA_BIT_MASK(36) +#define IWL_NUM_OF_TBS 20 + +static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr) +{ + return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF; +} +/** + * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor + * + * This structure contains dma address and length of transmission address + * + * @lo: low [31:0] portion of the dma address of TX buffer + * every even is unaligned on 16 bit boundary + * @hi_n_len 0-3 [35:32] portion of dma + * 4-15 length of the tx buffer + */ +struct iwl_tfd_tb { + __le32 lo; + __le16 hi_n_len; +} __attribute__((packed)); + +/** + * struct iwl_tfd + * + * Transmit Frame Descriptor (TFD) + * + * @ __reserved1[3] reserved + * @ num_tbs 0-4 number of active tbs + * 5 reserved + * 6-7 padding (not used) + * @ tbs[20] transmit frame buffer descriptors + * @ __pad padding + * + * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM. + * Both driver and device share these circular buffers, each of which must be + * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes + * + * Driver must indicate the physical address of the base of each + * circular buffer via the FH_MEM_CBBC_QUEUE registers. + * + * Each TFD contains pointer/size information for up to 20 data buffers + * in host DRAM. These buffers collectively contain the (one) frame described + * by the TFD. Each buffer must be a single contiguous block of memory within + * itself, but buffers may be scattered in host DRAM. Each buffer has max size + * of (4K - 4). The concatenates all of a TFD's buffers into a single + * Tx frame, up to 8 KBytes in size. + * + * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx. + */ +struct iwl_tfd { + u8 __reserved1[3]; + u8 num_tbs; + struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS]; + __le32 __pad; +} __attribute__ ((packed)); + +/* Keep Warm Size */ +#define IWL_KW_SIZE 0x1000 /* 4k */ + +#endif /* !__iwl_fh_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c new file mode 100644 index 0000000..73681c4 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c @@ -0,0 +1,283 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ + +#include +#include +#include +#include + +#include "iwl-dev.h" /* FIXME: remove */ +#include "iwl-debug.h" +#include "iwl-eeprom.h" +#include "iwl-core.h" + + +const char *get_cmd_string(u8 cmd) +{ + switch (cmd) { + IWL_CMD(REPLY_ALIVE); + IWL_CMD(REPLY_ERROR); + IWL_CMD(REPLY_RXON); + IWL_CMD(REPLY_RXON_ASSOC); + IWL_CMD(REPLY_QOS_PARAM); + IWL_CMD(REPLY_RXON_TIMING); + IWL_CMD(REPLY_ADD_STA); + IWL_CMD(REPLY_REMOVE_STA); + IWL_CMD(REPLY_REMOVE_ALL_STA); + IWL_CMD(REPLY_WEPKEY); + IWL_CMD(REPLY_3945_RX); + IWL_CMD(REPLY_TX); + IWL_CMD(REPLY_RATE_SCALE); + IWL_CMD(REPLY_LEDS_CMD); + IWL_CMD(REPLY_TX_LINK_QUALITY_CMD); + IWL_CMD(COEX_PRIORITY_TABLE_CMD); + IWL_CMD(COEX_MEDIUM_NOTIFICATION); + IWL_CMD(COEX_EVENT_CMD); + IWL_CMD(REPLY_QUIET_CMD); + IWL_CMD(REPLY_CHANNEL_SWITCH); + IWL_CMD(CHANNEL_SWITCH_NOTIFICATION); + IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD); + IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION); + IWL_CMD(POWER_TABLE_CMD); + IWL_CMD(PM_SLEEP_NOTIFICATION); + IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC); + IWL_CMD(REPLY_SCAN_CMD); + IWL_CMD(REPLY_SCAN_ABORT_CMD); + IWL_CMD(SCAN_START_NOTIFICATION); + IWL_CMD(SCAN_RESULTS_NOTIFICATION); + IWL_CMD(SCAN_COMPLETE_NOTIFICATION); + IWL_CMD(BEACON_NOTIFICATION); + IWL_CMD(REPLY_TX_BEACON); + IWL_CMD(WHO_IS_AWAKE_NOTIFICATION); + IWL_CMD(QUIET_NOTIFICATION); + IWL_CMD(REPLY_TX_PWR_TABLE_CMD); + IWL_CMD(MEASURE_ABORT_NOTIFICATION); + IWL_CMD(REPLY_BT_CONFIG); + IWL_CMD(REPLY_STATISTICS_CMD); + IWL_CMD(STATISTICS_NOTIFICATION); + IWL_CMD(REPLY_CARD_STATE_CMD); + IWL_CMD(CARD_STATE_NOTIFICATION); + IWL_CMD(MISSED_BEACONS_NOTIFICATION); + IWL_CMD(REPLY_CT_KILL_CONFIG_CMD); + IWL_CMD(SENSITIVITY_CMD); + IWL_CMD(REPLY_PHY_CALIBRATION_CMD); + IWL_CMD(REPLY_RX_PHY_CMD); + IWL_CMD(REPLY_RX_MPDU_CMD); + IWL_CMD(REPLY_RX); + IWL_CMD(REPLY_COMPRESSED_BA); + IWL_CMD(CALIBRATION_CFG_CMD); + IWL_CMD(CALIBRATION_RES_NOTIFICATION); + IWL_CMD(CALIBRATION_COMPLETE_NOTIFICATION); + IWL_CMD(REPLY_TX_POWER_DBM_CMD); + IWL_CMD(TEMPERATURE_NOTIFICATION); + IWL_CMD(TX_ANT_CONFIGURATION_CMD); + default: + return "UNKNOWN"; + + } +} +EXPORT_SYMBOL(get_cmd_string); + +#define HOST_COMPLETE_TIMEOUT (HZ / 2) + +static void iwl_generic_cmd_callback(struct iwl_priv *priv, + struct iwl_device_cmd *cmd, + struct iwl_rx_packet *pkt) +{ + if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERR(priv, "Bad return from %s (0x%08X)\n", + get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); + return; + } + +#ifdef CONFIG_IWLWIFI_DEBUG + switch (cmd->hdr.cmd) { + case REPLY_TX_LINK_QUALITY_CMD: + case SENSITIVITY_CMD: + IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n", + get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); + break; + default: + IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n", + get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); + } +#endif +} + +static int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +{ + int ret; + + BUG_ON(!(cmd->flags & CMD_ASYNC)); + + /* An asynchronous command can not expect an SKB to be set. */ + BUG_ON(cmd->flags & CMD_WANT_SKB); + + /* Assign a generic callback if one is not provided */ + if (!cmd->callback) + cmd->callback = iwl_generic_cmd_callback; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return -EBUSY; + + ret = iwl_enqueue_hcmd(priv, cmd); + if (ret < 0) { + IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n", + get_cmd_string(cmd->id), ret); + return ret; + } + return 0; +} + +int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +{ + int cmd_idx; + int ret; + + BUG_ON(cmd->flags & CMD_ASYNC); + + /* A synchronous command can not have a callback set. */ + BUG_ON(cmd->callback); + + IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n", + get_cmd_string(cmd->id)); + mutex_lock(&priv->sync_cmd_mutex); + + set_bit(STATUS_HCMD_ACTIVE, &priv->status); + IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s \n", + get_cmd_string(cmd->id)); + + cmd_idx = iwl_enqueue_hcmd(priv, cmd); + if (cmd_idx < 0) { + ret = cmd_idx; + IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n", + get_cmd_string(cmd->id), ret); + goto out; + } + + ret = wait_event_interruptible_timeout(priv->wait_command_queue, + !test_bit(STATUS_HCMD_ACTIVE, &priv->status), + HOST_COMPLETE_TIMEOUT); + if (!ret) { + if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) { + IWL_ERR(priv, + "Error sending %s: time out after %dms.\n", + get_cmd_string(cmd->id), + jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); + + clear_bit(STATUS_HCMD_ACTIVE, &priv->status); + IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s \n", + get_cmd_string(cmd->id)); + ret = -ETIMEDOUT; + goto cancel; + } + } + + if (test_bit(STATUS_RF_KILL_HW, &priv->status)) { + IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n", + get_cmd_string(cmd->id)); + ret = -ECANCELED; + goto fail; + } + if (test_bit(STATUS_FW_ERROR, &priv->status)) { + IWL_ERR(priv, "Command %s failed: FW Error\n", + get_cmd_string(cmd->id)); + ret = -EIO; + goto fail; + } + if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) { + IWL_ERR(priv, "Error: Response NULL in '%s'\n", + get_cmd_string(cmd->id)); + ret = -EIO; + goto cancel; + } + + ret = 0; + goto out; + +cancel: + if (cmd->flags & CMD_WANT_SKB) { + /* + * Cancel the CMD_WANT_SKB flag for the cmd in the + * TX cmd queue. Otherwise in case the cmd comes + * in later, it will possibly set an invalid + * address (cmd->meta.source). + */ + priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_idx].flags &= + ~CMD_WANT_SKB; + } +fail: + if (cmd->reply_page) { + iwl_free_pages(priv, cmd->reply_page); + cmd->reply_page = 0; + } +out: + mutex_unlock(&priv->sync_cmd_mutex); + return ret; +} +EXPORT_SYMBOL(iwl_send_cmd_sync); + +int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +{ + if (cmd->flags & CMD_ASYNC) + return iwl_send_cmd_async(priv, cmd); + + return iwl_send_cmd_sync(priv, cmd); +} +EXPORT_SYMBOL(iwl_send_cmd); + +int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data) +{ + struct iwl_host_cmd cmd = { + .id = id, + .len = len, + .data = data, + }; + + return iwl_send_cmd_sync(priv, &cmd); +} +EXPORT_SYMBOL(iwl_send_cmd_pdu); + +int iwl_send_cmd_pdu_async(struct iwl_priv *priv, + u8 id, u16 len, const void *data, + void (*callback)(struct iwl_priv *priv, + struct iwl_device_cmd *cmd, + struct iwl_rx_packet *pkt)) +{ + struct iwl_host_cmd cmd = { + .id = id, + .len = len, + .data = data, + }; + + cmd.flags |= CMD_ASYNC; + cmd.callback = callback; + + return iwl_send_cmd_async(priv, &cmd); +} +EXPORT_SYMBOL(iwl_send_cmd_pdu_async); diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h new file mode 100644 index 0000000..51a67fb --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h @@ -0,0 +1,170 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_helpers_h__ +#define __iwl_helpers_h__ + +#include + +#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo)))) + + +static inline struct ieee80211_conf *ieee80211_get_hw_conf( + struct ieee80211_hw *hw) +{ + return &hw->conf; +} + +static inline int iwl_check_bits(unsigned long field, unsigned long mask) +{ + return ((field & mask) == mask) ? 1 : 0; +} + +static inline unsigned long elapsed_jiffies(unsigned long start, + unsigned long end) +{ + if (end >= start) + return end - start; + + return end + (MAX_JIFFY_OFFSET - start) + 1; +} + +/** + * iwl_queue_inc_wrap - increment queue index, wrap back to beginning + * @index -- current index + * @n_bd -- total number of entries in queue (must be power of 2) + */ +static inline int iwl_queue_inc_wrap(int index, int n_bd) +{ + return ++index & (n_bd - 1); +} + +/** + * iwl_queue_dec_wrap - decrement queue index, wrap back to end + * @index -- current index + * @n_bd -- total number of entries in queue (must be power of 2) + */ +static inline int iwl_queue_dec_wrap(int index, int n_bd) +{ + return --index & (n_bd - 1); +} + +/* TODO: Move fw_desc functions to iwl-pci.ko */ +static inline void iwl_free_fw_desc(struct pci_dev *pci_dev, + struct fw_desc *desc) +{ + if (desc->v_addr) + dma_free_coherent(&pci_dev->dev, desc->len, + desc->v_addr, desc->p_addr); + desc->v_addr = NULL; + desc->len = 0; +} + +static inline int iwl_alloc_fw_desc(struct pci_dev *pci_dev, + struct fw_desc *desc) +{ + desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len, + &desc->p_addr, GFP_KERNEL); + return (desc->v_addr != NULL) ? 0 : -ENOMEM; +} + +/* + * we have 8 bits used like this: + * + * 7 6 5 4 3 2 1 0 + * | | | | | | | | + * | | | | | | +-+-------- AC queue (0-3) + * | | | | | | + * | +-+-+-+-+------------ HW A-MPDU queue + * | + * +---------------------- indicates agg queue + */ +static inline u8 iwl_virtual_agg_queue_num(u8 ac, u8 hwq) +{ + BUG_ON(ac > 3); /* only have 2 bits */ + BUG_ON(hwq > 31); /* only have 5 bits */ + + return 0x80 | (hwq << 2) | ac; +} + +static inline void iwl_wake_queue(struct iwl_priv *priv, u8 queue) +{ + u8 ac = queue; + u8 hwq = queue; + + if (queue & 0x80) { + ac = queue & 3; + hwq = (queue >> 2) & 0x1f; + } + + if (test_and_clear_bit(hwq, priv->queue_stopped)) + if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0) + ieee80211_wake_queue(priv->hw, ac); +} + +static inline void iwl_stop_queue(struct iwl_priv *priv, u8 queue) +{ + u8 ac = queue; + u8 hwq = queue; + + if (queue & 0x80) { + ac = queue & 3; + hwq = (queue >> 2) & 0x1f; + } + + if (!test_and_set_bit(hwq, priv->queue_stopped)) + if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0) + ieee80211_stop_queue(priv->hw, ac); +} + +#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue +#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue + +static inline void iwl_disable_interrupts(struct iwl_priv *priv) +{ + clear_bit(STATUS_INT_ENABLED, &priv->status); + + /* disable interrupts from uCode/NIC to host */ + iwl_write32(priv, CSR_INT_MASK, 0x00000000); + + /* acknowledge/clear/reset any interrupts still pending + * from uCode or flow handler (Rx/Tx DMA) */ + iwl_write32(priv, CSR_INT, 0xffffffff); + iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff); + IWL_DEBUG_ISR(priv, "Disabled interrupts\n"); +} + +static inline void iwl_enable_interrupts(struct iwl_priv *priv) +{ + IWL_DEBUG_ISR(priv, "Enabling interrupts\n"); + set_bit(STATUS_INT_ENABLED, &priv->status); + iwl_write32(priv, CSR_INT_MASK, priv->inta_mask); +} + +#endif /* __iwl_helpers_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h new file mode 100644 index 0000000..c719baf --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-io.h @@ -0,0 +1,527 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_io_h__ +#define __iwl_io_h__ + +#include + +#include "iwl-debug.h" +#include "iwl-devtrace.h" + +/* + * IO, register, and NIC memory access functions + * + * NOTE on naming convention and macro usage for these + * + * A single _ prefix before a an access function means that no state + * check or debug information is printed when that function is called. + * + * A double __ prefix before an access function means that state is checked + * and the current line number and caller function name are printed in addition + * to any other debug output. + * + * The non-prefixed name is the #define that maps the caller into a + * #define that provides the caller's name and __LINE__ to the double + * prefix version. + * + * If you wish to call the function without any debug or state checking, + * you should use the single _ prefix version (as is used by dependent IO + * routines, for example _iwl_read_direct32 calls the non-check version of + * _iwl_read32.) + * + * These declarations are *extremely* useful in quickly isolating code deltas + * which result in misconfiguration of the hardware I/O. In combination with + * git-bisect and the IO debug level you can quickly determine the specific + * commit which breaks the IO sequence to the hardware. + * + */ + +static inline void _iwl_write8(struct iwl_priv *priv, u32 ofs, u8 val) +{ + trace_iwlwifi_dev_iowrite8(priv, ofs, val); + iowrite8(val, priv->hw_base + ofs); +} + +#ifdef CONFIG_IWLWIFI_DEBUG +static inline void __iwl_write8(const char *f, u32 l, struct iwl_priv *priv, + u32 ofs, u8 val) +{ + IWL_DEBUG_IO(priv, "write8(0x%08X, 0x%02X) - %s %d\n", ofs, val, f, l); + _iwl_write8(priv, ofs, val); +} +#define iwl_write8(priv, ofs, val) \ + __iwl_write8(__FILE__, __LINE__, priv, ofs, val) +#else +#define iwl_write8(priv, ofs, val) _iwl_write8(priv, ofs, val) +#endif + + +static inline void _iwl_write32(struct iwl_priv *priv, u32 ofs, u32 val) +{ + trace_iwlwifi_dev_iowrite32(priv, ofs, val); + iowrite32(val, priv->hw_base + ofs); +} + +#ifdef CONFIG_IWLWIFI_DEBUG +static inline void __iwl_write32(const char *f, u32 l, struct iwl_priv *priv, + u32 ofs, u32 val) +{ + IWL_DEBUG_IO(priv, "write32(0x%08X, 0x%08X) - %s %d\n", ofs, val, f, l); + _iwl_write32(priv, ofs, val); +} +#define iwl_write32(priv, ofs, val) \ + __iwl_write32(__FILE__, __LINE__, priv, ofs, val) +#else +#define iwl_write32(priv, ofs, val) _iwl_write32(priv, ofs, val) +#endif + +static inline u32 _iwl_read32(struct iwl_priv *priv, u32 ofs) +{ + u32 val = ioread32(priv->hw_base + ofs); + trace_iwlwifi_dev_ioread32(priv, ofs, val); + return val; +} + +#ifdef CONFIG_IWLWIFI_DEBUG +static inline u32 __iwl_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs) +{ + IWL_DEBUG_IO(priv, "read_direct32(0x%08X) - %s %d\n", ofs, f, l); + return _iwl_read32(priv, ofs); +} +#define iwl_read32(priv, ofs) __iwl_read32(__FILE__, __LINE__, priv, ofs) +#else +#define iwl_read32(p, o) _iwl_read32(p, o) +#endif + +#define IWL_POLL_INTERVAL 10 /* microseconds */ +static inline int _iwl_poll_bit(struct iwl_priv *priv, u32 addr, + u32 bits, u32 mask, int timeout) +{ + int t = 0; + + do { + if ((_iwl_read32(priv, addr) & mask) == (bits & mask)) + return t; + udelay(IWL_POLL_INTERVAL); + t += IWL_POLL_INTERVAL; + } while (t < timeout); + + return -ETIMEDOUT; +} +#ifdef CONFIG_IWLWIFI_DEBUG +static inline int __iwl_poll_bit(const char *f, u32 l, + struct iwl_priv *priv, u32 addr, + u32 bits, u32 mask, int timeout) +{ + int ret = _iwl_poll_bit(priv, addr, bits, mask, timeout); + IWL_DEBUG_IO(priv, "poll_bit(0x%08X, 0x%08X, 0x%08X) - %s- %s %d\n", + addr, bits, mask, + unlikely(ret == -ETIMEDOUT) ? "timeout" : "", f, l); + return ret; +} +#define iwl_poll_bit(priv, addr, bits, mask, timeout) \ + __iwl_poll_bit(__FILE__, __LINE__, priv, addr, bits, mask, timeout) +#else +#define iwl_poll_bit(p, a, b, m, t) _iwl_poll_bit(p, a, b, m, t) +#endif + +static inline void _iwl_set_bit(struct iwl_priv *priv, u32 reg, u32 mask) +{ + _iwl_write32(priv, reg, _iwl_read32(priv, reg) | mask); +} +#ifdef CONFIG_IWLWIFI_DEBUG +static inline void __iwl_set_bit(const char *f, u32 l, + struct iwl_priv *priv, u32 reg, u32 mask) +{ + u32 val = _iwl_read32(priv, reg) | mask; + IWL_DEBUG_IO(priv, "set_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val); + _iwl_write32(priv, reg, val); +} +static inline void iwl_set_bit(struct iwl_priv *p, u32 r, u32 m) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&p->reg_lock, reg_flags); + __iwl_set_bit(__FILE__, __LINE__, p, r, m); + spin_unlock_irqrestore(&p->reg_lock, reg_flags); +} +#else +static inline void iwl_set_bit(struct iwl_priv *p, u32 r, u32 m) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&p->reg_lock, reg_flags); + _iwl_set_bit(p, r, m); + spin_unlock_irqrestore(&p->reg_lock, reg_flags); +} +#endif + +static inline void _iwl_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask) +{ + _iwl_write32(priv, reg, _iwl_read32(priv, reg) & ~mask); +} +#ifdef CONFIG_IWLWIFI_DEBUG +static inline void __iwl_clear_bit(const char *f, u32 l, + struct iwl_priv *priv, u32 reg, u32 mask) +{ + u32 val = _iwl_read32(priv, reg) & ~mask; + IWL_DEBUG_IO(priv, "clear_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val); + _iwl_write32(priv, reg, val); +} +static inline void iwl_clear_bit(struct iwl_priv *p, u32 r, u32 m) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&p->reg_lock, reg_flags); + __iwl_clear_bit(__FILE__, __LINE__, p, r, m); + spin_unlock_irqrestore(&p->reg_lock, reg_flags); +} +#else +static inline void iwl_clear_bit(struct iwl_priv *p, u32 r, u32 m) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&p->reg_lock, reg_flags); + _iwl_clear_bit(p, r, m); + spin_unlock_irqrestore(&p->reg_lock, reg_flags); +} +#endif + +static inline int _iwl_grab_nic_access(struct iwl_priv *priv) +{ + int ret; + u32 val; + + /* this bit wakes up the NIC */ + _iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + + /* + * These bits say the device is running, and should keep running for + * at least a short while (at least as long as MAC_ACCESS_REQ stays 1), + * but they do not indicate that embedded SRAM is restored yet; + * 3945 and 4965 have volatile SRAM, and must save/restore contents + * to/from host DRAM when sleeping/waking for power-saving. + * Each direction takes approximately 1/4 millisecond; with this + * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a + * series of register accesses are expected (e.g. reading Event Log), + * to keep device from sleeping. + * + * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that + * SRAM is okay/restored. We don't check that here because this call + * is just for hardware register access; but GP1 MAC_SLEEP check is a + * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log). + * + * 5000 series and later (including 1000 series) have non-volatile SRAM, + * and do not save/restore SRAM when power cycling. + */ + ret = _iwl_poll_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, + (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | + CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); + if (ret < 0) { + val = _iwl_read32(priv, CSR_GP_CNTRL); + IWL_ERR(priv, "MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val); + _iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI); + return -EIO; + } + + return 0; +} + +#ifdef CONFIG_IWLWIFI_DEBUG +static inline int __iwl_grab_nic_access(const char *f, u32 l, + struct iwl_priv *priv) +{ + IWL_DEBUG_IO(priv, "grabbing nic access - %s %d\n", f, l); + return _iwl_grab_nic_access(priv); +} +#define iwl_grab_nic_access(priv) \ + __iwl_grab_nic_access(__FILE__, __LINE__, priv) +#else +#define iwl_grab_nic_access(priv) \ + _iwl_grab_nic_access(priv) +#endif + +static inline void _iwl_release_nic_access(struct iwl_priv *priv) +{ + _iwl_clear_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); +} +#ifdef CONFIG_IWLWIFI_DEBUG +static inline void __iwl_release_nic_access(const char *f, u32 l, + struct iwl_priv *priv) +{ + + IWL_DEBUG_IO(priv, "releasing nic access - %s %d\n", f, l); + _iwl_release_nic_access(priv); +} +#define iwl_release_nic_access(priv) \ + __iwl_release_nic_access(__FILE__, __LINE__, priv) +#else +#define iwl_release_nic_access(priv) \ + _iwl_release_nic_access(priv) +#endif + +static inline u32 _iwl_read_direct32(struct iwl_priv *priv, u32 reg) +{ + return _iwl_read32(priv, reg); +} +#ifdef CONFIG_IWLWIFI_DEBUG +static inline u32 __iwl_read_direct32(const char *f, u32 l, + struct iwl_priv *priv, u32 reg) +{ + u32 value = _iwl_read_direct32(priv, reg); + IWL_DEBUG_IO(priv, "read_direct32(0x%4X) = 0x%08x - %s %d \n", reg, value, + f, l); + return value; +} +static inline u32 iwl_read_direct32(struct iwl_priv *priv, u32 reg) +{ + u32 value; + unsigned long reg_flags; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + iwl_grab_nic_access(priv); + value = __iwl_read_direct32(__FILE__, __LINE__, priv, reg); + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); + return value; +} + +#else +static inline u32 iwl_read_direct32(struct iwl_priv *priv, u32 reg) +{ + u32 value; + unsigned long reg_flags; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + iwl_grab_nic_access(priv); + value = _iwl_read_direct32(priv, reg); + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); + return value; + +} +#endif + +static inline void _iwl_write_direct32(struct iwl_priv *priv, + u32 reg, u32 value) +{ + _iwl_write32(priv, reg, value); +} +static inline void iwl_write_direct32(struct iwl_priv *priv, u32 reg, u32 value) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + if (!iwl_grab_nic_access(priv)) { + _iwl_write_direct32(priv, reg, value); + iwl_release_nic_access(priv); + } + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); +} + +static inline void iwl_write_reg_buf(struct iwl_priv *priv, + u32 reg, u32 len, u32 *values) +{ + u32 count = sizeof(u32); + + if ((priv != NULL) && (values != NULL)) { + for (; 0 < len; len -= count, reg += count, values++) + iwl_write_direct32(priv, reg, *values); + } +} + +static inline int _iwl_poll_direct_bit(struct iwl_priv *priv, u32 addr, + u32 mask, int timeout) +{ + int t = 0; + + do { + if ((iwl_read_direct32(priv, addr) & mask) == mask) + return t; + udelay(IWL_POLL_INTERVAL); + t += IWL_POLL_INTERVAL; + } while (t < timeout); + + return -ETIMEDOUT; +} + +#ifdef CONFIG_IWLWIFI_DEBUG +static inline int __iwl_poll_direct_bit(const char *f, u32 l, + struct iwl_priv *priv, + u32 addr, u32 mask, int timeout) +{ + int ret = _iwl_poll_direct_bit(priv, addr, mask, timeout); + + if (unlikely(ret == -ETIMEDOUT)) + IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) - " + "timedout - %s %d\n", addr, mask, f, l); + else + IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) = 0x%08X " + "- %s %d\n", addr, mask, ret, f, l); + return ret; +} +#define iwl_poll_direct_bit(priv, addr, mask, timeout) \ + __iwl_poll_direct_bit(__FILE__, __LINE__, priv, addr, mask, timeout) +#else +#define iwl_poll_direct_bit _iwl_poll_direct_bit +#endif + +static inline u32 _iwl_read_prph(struct iwl_priv *priv, u32 reg) +{ + _iwl_write_direct32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24)); + rmb(); + return _iwl_read_direct32(priv, HBUS_TARG_PRPH_RDAT); +} +static inline u32 iwl_read_prph(struct iwl_priv *priv, u32 reg) +{ + unsigned long reg_flags; + u32 val; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + iwl_grab_nic_access(priv); + val = _iwl_read_prph(priv, reg); + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); + return val; +} + +static inline void _iwl_write_prph(struct iwl_priv *priv, + u32 addr, u32 val) +{ + _iwl_write_direct32(priv, HBUS_TARG_PRPH_WADDR, + ((addr & 0x0000FFFF) | (3 << 24))); + wmb(); + _iwl_write_direct32(priv, HBUS_TARG_PRPH_WDAT, val); +} + +static inline void iwl_write_prph(struct iwl_priv *priv, u32 addr, u32 val) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + if (!iwl_grab_nic_access(priv)) { + _iwl_write_prph(priv, addr, val); + iwl_release_nic_access(priv); + } + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); +} + +#define _iwl_set_bits_prph(priv, reg, mask) \ + _iwl_write_prph(priv, reg, (_iwl_read_prph(priv, reg) | mask)) + +static inline void iwl_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + iwl_grab_nic_access(priv); + _iwl_set_bits_prph(priv, reg, mask); + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); +} + +#define _iwl_set_bits_mask_prph(priv, reg, bits, mask) \ + _iwl_write_prph(priv, reg, ((_iwl_read_prph(priv, reg) & mask) | bits)) + +static inline void iwl_set_bits_mask_prph(struct iwl_priv *priv, u32 reg, + u32 bits, u32 mask) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + iwl_grab_nic_access(priv); + _iwl_set_bits_mask_prph(priv, reg, bits, mask); + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); +} + +static inline void iwl_clear_bits_prph(struct iwl_priv + *priv, u32 reg, u32 mask) +{ + unsigned long reg_flags; + u32 val; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + iwl_grab_nic_access(priv); + val = _iwl_read_prph(priv, reg); + _iwl_write_prph(priv, reg, (val & ~mask)); + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); +} + +static inline u32 iwl_read_targ_mem(struct iwl_priv *priv, u32 addr) +{ + unsigned long reg_flags; + u32 value; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + iwl_grab_nic_access(priv); + + _iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, addr); + rmb(); + value = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); + + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); + return value; +} + +static inline void iwl_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + if (!iwl_grab_nic_access(priv)) { + _iwl_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr); + wmb(); + _iwl_write_direct32(priv, HBUS_TARG_MEM_WDAT, val); + iwl_release_nic_access(priv); + } + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); +} + +static inline void iwl_write_targ_mem_buf(struct iwl_priv *priv, u32 addr, + u32 len, u32 *values) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + if (!iwl_grab_nic_access(priv)) { + _iwl_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr); + wmb(); + for (; 0 < len; len -= sizeof(u32), values++) + _iwl_write_direct32(priv, HBUS_TARG_MEM_WDAT, *values); + + iwl_release_nic_access(priv); + } + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); +} +#endif diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c new file mode 100644 index 0000000..a6f9c91 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-led.c @@ -0,0 +1,225 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-io.h" + +/* default: IWL_LED_BLINK(0) using blinking index table */ +static int led_mode; +module_param(led_mode, int, S_IRUGO); +MODULE_PARM_DESC(led_mode, "led mode: 0=blinking, 1=On(RF On)/Off(RF Off), " + "(default 0)\n"); + + +static const struct { + u16 tpt; /* Mb/s */ + u8 on_time; + u8 off_time; +} blink_tbl[] = +{ + {300, 25, 25}, + {200, 40, 40}, + {100, 55, 55}, + {70, 65, 65}, + {50, 75, 75}, + {20, 85, 85}, + {10, 95, 95}, + {5, 110, 110}, + {1, 130, 130}, + {0, 167, 167}, + /* SOLID_ON */ + {-1, IWL_LED_SOLID, 0} +}; + +#define IWL_1MB_RATE (128 * 1024) +#define IWL_LED_THRESHOLD (16) +#define IWL_MAX_BLINK_TBL (ARRAY_SIZE(blink_tbl) - 1) /* exclude SOLID_ON */ +#define IWL_SOLID_BLINK_IDX (ARRAY_SIZE(blink_tbl) - 1) + +/* + * Adjust led blink rate to compensate on a MAC Clock difference on every HW + * Led blink rate analysis showed an average deviation of 0% on 3945, + * 5% on 4965 HW and 20% on 5000 series and up. + * Need to compensate on the led on/off time per HW according to the deviation + * to achieve the desired led frequency + * The calculation is: (100-averageDeviation)/100 * blinkTime + * For code efficiency the calculation will be: + * compensation = (100 - averageDeviation) * 64 / 100 + * NewBlinkTime = (compensation * BlinkTime) / 64 + */ +static inline u8 iwl_blink_compensation(struct iwl_priv *priv, + u8 time, u16 compensation) +{ + if (!compensation) { + IWL_ERR(priv, "undefined blink compensation: " + "use pre-defined blinking time\n"); + return time; + } + + return (u8)((time * compensation) >> 6); +} + +/* Set led pattern command */ +static int iwl_led_pattern(struct iwl_priv *priv, unsigned int idx) +{ + struct iwl_led_cmd led_cmd = { + .id = IWL_LED_LINK, + .interval = IWL_DEF_LED_INTRVL + }; + + BUG_ON(idx > IWL_MAX_BLINK_TBL); + + IWL_DEBUG_LED(priv, "Led blink time compensation= %u\n", + priv->cfg->led_compensation); + led_cmd.on = + iwl_blink_compensation(priv, blink_tbl[idx].on_time, + priv->cfg->led_compensation); + led_cmd.off = + iwl_blink_compensation(priv, blink_tbl[idx].off_time, + priv->cfg->led_compensation); + + return priv->cfg->ops->led->cmd(priv, &led_cmd); +} + +int iwl_led_start(struct iwl_priv *priv) +{ + return priv->cfg->ops->led->on(priv); +} +EXPORT_SYMBOL(iwl_led_start); + +int iwl_led_associate(struct iwl_priv *priv) +{ + IWL_DEBUG_LED(priv, "Associated\n"); + if (led_mode == IWL_LED_BLINK) + priv->allow_blinking = 1; + priv->last_blink_time = jiffies; + + return 0; +} + +int iwl_led_disassociate(struct iwl_priv *priv) +{ + priv->allow_blinking = 0; + + return 0; +} + +/* + * calculate blink rate according to last second Tx/Rx activities + */ +static int iwl_get_blink_rate(struct iwl_priv *priv) +{ + int i; + /* count both tx and rx traffic to be able to + * handle traffic in either direction + */ + u64 current_tpt = priv->tx_stats.data_bytes + + priv->rx_stats.data_bytes; + s64 tpt = current_tpt - priv->led_tpt; + + if (tpt < 0) /* wraparound */ + tpt = -tpt; + + IWL_DEBUG_LED(priv, "tpt %lld current_tpt %llu\n", + (long long)tpt, + (unsigned long long)current_tpt); + priv->led_tpt = current_tpt; + + if (!priv->allow_blinking) + i = IWL_MAX_BLINK_TBL; + else + for (i = 0; i < IWL_MAX_BLINK_TBL; i++) + if (tpt > (blink_tbl[i].tpt * IWL_1MB_RATE)) + break; + + IWL_DEBUG_LED(priv, "LED BLINK IDX=%d\n", i); + return i; +} + +/* + * this function called from handler. Since setting Led command can + * happen very frequent we postpone led command to be called from + * REPLY handler so we know ucode is up + */ +void iwl_leds_background(struct iwl_priv *priv) +{ + u8 blink_idx; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { + priv->last_blink_time = 0; + return; + } + if (iwl_is_rfkill(priv)) { + priv->last_blink_time = 0; + return; + } + + if (!priv->allow_blinking) { + priv->last_blink_time = 0; + if (priv->last_blink_rate != IWL_SOLID_BLINK_IDX) { + priv->last_blink_rate = IWL_SOLID_BLINK_IDX; + iwl_led_pattern(priv, IWL_SOLID_BLINK_IDX); + } + return; + } + if (!priv->last_blink_time || + !time_after(jiffies, priv->last_blink_time + + msecs_to_jiffies(1000))) + return; + + blink_idx = iwl_get_blink_rate(priv); + + /* call only if blink rate change */ + if (blink_idx != priv->last_blink_rate) + iwl_led_pattern(priv, blink_idx); + + priv->last_blink_time = jiffies; + priv->last_blink_rate = blink_idx; +} +EXPORT_SYMBOL(iwl_leds_background); + +void iwl_leds_init(struct iwl_priv *priv) +{ + priv->last_blink_rate = 0; + priv->last_blink_time = 0; + priv->allow_blinking = 0; +} +EXPORT_SYMBOL(iwl_leds_init); diff --git a/drivers/net/wireless/iwlwifi/iwl-led.h b/drivers/net/wireless/iwlwifi/iwl-led.h new file mode 100644 index 0000000..49a70ba --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-led.h @@ -0,0 +1,66 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_leds_h__ +#define __iwl_leds_h__ + + +struct iwl_priv; + +#define IWL_LED_SOLID 11 +#define IWL_LED_NAME_LEN 31 +#define IWL_DEF_LED_INTRVL cpu_to_le32(1000) + +#define IWL_LED_ACTIVITY (0<<1) +#define IWL_LED_LINK (1<<1) + +enum led_type { + IWL_LED_TRG_TX, + IWL_LED_TRG_RX, + IWL_LED_TRG_ASSOC, + IWL_LED_TRG_RADIO, + IWL_LED_TRG_MAX, +}; + +/* + * LED mode + * IWL_LED_BLINK: adjust led blink rate based on blink table + * IWL_LED_RF_STATE: turn LED on/off based on RF state + * LED ON = RF ON + * LED OFF = RF OFF + */ +enum iwl_led_mode { + IWL_LED_BLINK, + IWL_LED_RF_STATE, +}; + +void iwl_leds_init(struct iwl_priv *priv); +void iwl_leds_background(struct iwl_priv *priv); +int iwl_led_start(struct iwl_priv *priv); +int iwl_led_associate(struct iwl_priv *priv); +int iwl_led_disassociate(struct iwl_priv *priv); + +#endif /* __iwl_leds_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c new file mode 100644 index 0000000..1a1a9f0 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-power.c @@ -0,0 +1,970 @@ +/****************************************************************************** + * + * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ + + +#include +#include +#include + +#include + +#include "iwl-eeprom.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-io.h" +#include "iwl-commands.h" +#include "iwl-debug.h" +#include "iwl-power.h" + +/* + * Setting power level allows the card to go to sleep when not busy. + * + * We calculate a sleep command based on the required latency, which + * we get from mac80211. In order to handle thermal throttling, we can + * also use pre-defined power levels. + */ + +/* + * For now, keep using power level 1 instead of automatically + * adjusting ... + */ +bool no_sleep_autoadjust = true; +module_param(no_sleep_autoadjust, bool, S_IRUGO); +MODULE_PARM_DESC(no_sleep_autoadjust, + "don't automatically adjust sleep level " + "according to maximum network latency"); + +/* + * This defines the old power levels. They are still used by default + * (level 1) and for thermal throttle (levels 3 through 5) + */ + +struct iwl_power_vec_entry { + struct iwl_powertable_cmd cmd; + u8 no_dtim; /* number of skip dtim */ +}; + +#define IWL_DTIM_RANGE_0_MAX 2 +#define IWL_DTIM_RANGE_1_MAX 10 + +#define NOSLP cpu_to_le16(0), 0, 0 +#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0 +#define TU_TO_USEC 1024 +#define SLP_TOUT(T) cpu_to_le32((T) * TU_TO_USEC) +#define SLP_VEC(X0, X1, X2, X3, X4) {cpu_to_le32(X0), \ + cpu_to_le32(X1), \ + cpu_to_le32(X2), \ + cpu_to_le32(X3), \ + cpu_to_le32(X4)} +/* default power management (not Tx power) table values */ +/* for DTIM period 0 through IWL_DTIM_RANGE_0_MAX */ +/* DTIM 0 - 2 */ +static const struct iwl_power_vec_entry range_0[IWL_POWER_NUM] = { + {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 1, 2, 2, 0xFF)}, 0}, + {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0}, + {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 2, 2, 2, 0xFF)}, 0}, + {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 2, 4, 4, 0xFF)}, 1}, + {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 2, 4, 6, 0xFF)}, 2} +}; + + +/* for DTIM period IWL_DTIM_RANGE_0_MAX + 1 through IWL_DTIM_RANGE_1_MAX */ +/* DTIM 3 - 10 */ +static const struct iwl_power_vec_entry range_1[IWL_POWER_NUM] = { + {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0}, + {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 3, 4, 7)}, 0}, + {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 4, 6, 7, 9)}, 0}, + {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 4, 6, 9, 10)}, 1}, + {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 4, 6, 10, 10)}, 2} +}; + +/* for DTIM period > IWL_DTIM_RANGE_1_MAX */ +/* DTIM 11 - */ +static const struct iwl_power_vec_entry range_2[IWL_POWER_NUM] = { + {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0}, + {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF)}, 0}, + {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0}, + {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0}, + {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 0xFF)}, 0} +}; + +static void iwl_static_sleep_cmd(struct iwl_priv *priv, + struct iwl_powertable_cmd *cmd, + enum iwl_power_level lvl, int period) +{ + const struct iwl_power_vec_entry *table; + int max_sleep[IWL_POWER_VEC_SIZE] = { 0 }; + int i; + u8 skip; + u32 slp_itrvl; + + table = range_2; + if (period <= IWL_DTIM_RANGE_1_MAX) + table = range_1; + if (period <= IWL_DTIM_RANGE_0_MAX) + table = range_0; + + BUG_ON(lvl < 0 || lvl >= IWL_POWER_NUM); + + *cmd = table[lvl].cmd; + + if (period == 0) { + skip = 0; + period = 1; + for (i = 0; i < IWL_POWER_VEC_SIZE; i++) + max_sleep[i] = 1; + + } else { + skip = table[lvl].no_dtim; + for (i = 0; i < IWL_POWER_VEC_SIZE; i++) + max_sleep[i] = le32_to_cpu(cmd->sleep_interval[i]); + max_sleep[IWL_POWER_VEC_SIZE - 1] = skip + 1; + } + + slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]); + /* figure out the listen interval based on dtim period and skip */ + if (slp_itrvl == 0xFF) + cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] = + cpu_to_le32(period * (skip + 1)); + + slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]); + if (slp_itrvl > period) + cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] = + cpu_to_le32((slp_itrvl / period) * period); + + if (skip) + cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK; + else + cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK; + + slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]); + if (slp_itrvl > IWL_CONN_MAX_LISTEN_INTERVAL) + cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] = + cpu_to_le32(IWL_CONN_MAX_LISTEN_INTERVAL); + + /* enforce max sleep interval */ + for (i = IWL_POWER_VEC_SIZE - 1; i >= 0 ; i--) { + if (le32_to_cpu(cmd->sleep_interval[i]) > + (max_sleep[i] * period)) + cmd->sleep_interval[i] = + cpu_to_le32(max_sleep[i] * period); + if (i != (IWL_POWER_VEC_SIZE - 1)) { + if (le32_to_cpu(cmd->sleep_interval[i]) > + le32_to_cpu(cmd->sleep_interval[i+1])) + cmd->sleep_interval[i] = + cmd->sleep_interval[i+1]; + } + } + + if (priv->power_data.pci_pm) + cmd->flags |= IWL_POWER_PCI_PM_MSK; + else + cmd->flags &= ~IWL_POWER_PCI_PM_MSK; + + IWL_DEBUG_POWER(priv, "numSkipDtim = %u, dtimPeriod = %d\n", + skip, period); + IWL_DEBUG_POWER(priv, "Sleep command for index %d\n", lvl + 1); +} + +/* default Thermal Throttling transaction table + * Current state | Throttling Down | Throttling Up + *============================================================================= + * Condition Nxt State Condition Nxt State Condition Nxt State + *----------------------------------------------------------------------------- + * IWL_TI_0 T >= 114 CT_KILL 114>T>=105 TI_1 N/A N/A + * IWL_TI_1 T >= 114 CT_KILL 114>T>=110 TI_2 T<=95 TI_0 + * IWL_TI_2 T >= 114 CT_KILL T<=100 TI_1 + * IWL_CT_KILL N/A N/A N/A N/A T<=95 TI_0 + *============================================================================= + */ +static const struct iwl_tt_trans tt_range_0[IWL_TI_STATE_MAX - 1] = { + {IWL_TI_0, IWL_ABSOLUTE_ZERO, 104}, + {IWL_TI_1, 105, CT_KILL_THRESHOLD - 1}, + {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX} +}; +static const struct iwl_tt_trans tt_range_1[IWL_TI_STATE_MAX - 1] = { + {IWL_TI_0, IWL_ABSOLUTE_ZERO, 95}, + {IWL_TI_2, 110, CT_KILL_THRESHOLD - 1}, + {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX} +}; +static const struct iwl_tt_trans tt_range_2[IWL_TI_STATE_MAX - 1] = { + {IWL_TI_1, IWL_ABSOLUTE_ZERO, 100}, + {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}, + {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX} +}; +static const struct iwl_tt_trans tt_range_3[IWL_TI_STATE_MAX - 1] = { + {IWL_TI_0, IWL_ABSOLUTE_ZERO, CT_KILL_EXIT_THRESHOLD}, + {IWL_TI_CT_KILL, CT_KILL_EXIT_THRESHOLD + 1, IWL_ABSOLUTE_MAX}, + {IWL_TI_CT_KILL, CT_KILL_EXIT_THRESHOLD + 1, IWL_ABSOLUTE_MAX} +}; + +/* Advance Thermal Throttling default restriction table */ +static const struct iwl_tt_restriction restriction_range[IWL_TI_STATE_MAX] = { + {IWL_ANT_OK_MULTI, IWL_ANT_OK_MULTI, true }, + {IWL_ANT_OK_SINGLE, IWL_ANT_OK_MULTI, true }, + {IWL_ANT_OK_SINGLE, IWL_ANT_OK_SINGLE, false }, + {IWL_ANT_OK_NONE, IWL_ANT_OK_NONE, false } +}; + + +static void iwl_power_sleep_cam_cmd(struct iwl_priv *priv, + struct iwl_powertable_cmd *cmd) +{ + memset(cmd, 0, sizeof(*cmd)); + + if (priv->power_data.pci_pm) + cmd->flags |= IWL_POWER_PCI_PM_MSK; + + IWL_DEBUG_POWER(priv, "Sleep command for CAM\n"); +} + +static void iwl_power_fill_sleep_cmd(struct iwl_priv *priv, + struct iwl_powertable_cmd *cmd, + int dynps_ms, int wakeup_period) +{ + /* + * These are the original power level 3 sleep successions. The + * device may behave better with such succession and was also + * only tested with that. Just like the original sleep commands, + * also adjust the succession here to the wakeup_period below. + * The ranges are the same as for the sleep commands, 0-2, 3-9 + * and >10, which is selected based on the DTIM interval for + * the sleep index but here we use the wakeup period since that + * is what we need to do for the latency requirements. + */ + static const u8 slp_succ_r0[IWL_POWER_VEC_SIZE] = { 2, 2, 2, 2, 2 }; + static const u8 slp_succ_r1[IWL_POWER_VEC_SIZE] = { 2, 4, 6, 7, 9 }; + static const u8 slp_succ_r2[IWL_POWER_VEC_SIZE] = { 2, 7, 9, 9, 0xFF }; + const u8 *slp_succ = slp_succ_r0; + int i; + + if (wakeup_period > IWL_DTIM_RANGE_0_MAX) + slp_succ = slp_succ_r1; + if (wakeup_period > IWL_DTIM_RANGE_1_MAX) + slp_succ = slp_succ_r2; + + memset(cmd, 0, sizeof(*cmd)); + + cmd->flags = IWL_POWER_DRIVER_ALLOW_SLEEP_MSK | + IWL_POWER_FAST_PD; /* no use seeing frames for others */ + + if (priv->power_data.pci_pm) + cmd->flags |= IWL_POWER_PCI_PM_MSK; + + cmd->rx_data_timeout = cpu_to_le32(1000 * dynps_ms); + cmd->tx_data_timeout = cpu_to_le32(1000 * dynps_ms); + + for (i = 0; i < IWL_POWER_VEC_SIZE; i++) + cmd->sleep_interval[i] = + cpu_to_le32(min_t(int, slp_succ[i], wakeup_period)); + + IWL_DEBUG_POWER(priv, "Automatic sleep command\n"); +} + +static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd) +{ + IWL_DEBUG_POWER(priv, "Sending power/sleep command\n"); + IWL_DEBUG_POWER(priv, "Flags value = 0x%08X\n", cmd->flags); + IWL_DEBUG_POWER(priv, "Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout)); + IWL_DEBUG_POWER(priv, "Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout)); + IWL_DEBUG_POWER(priv, "Sleep interval vector = { %d , %d , %d , %d , %d }\n", + le32_to_cpu(cmd->sleep_interval[0]), + le32_to_cpu(cmd->sleep_interval[1]), + le32_to_cpu(cmd->sleep_interval[2]), + le32_to_cpu(cmd->sleep_interval[3]), + le32_to_cpu(cmd->sleep_interval[4])); + + return iwl_send_cmd_pdu(priv, POWER_TABLE_CMD, + sizeof(struct iwl_powertable_cmd), cmd); +} + +/* priv->mutex must be held */ +int iwl_power_update_mode(struct iwl_priv *priv, bool force) +{ + int ret = 0; + struct iwl_tt_mgmt *tt = &priv->thermal_throttle; + bool enabled = priv->hw->conf.flags & IEEE80211_CONF_PS; + bool update_chains; + struct iwl_powertable_cmd cmd; + int dtimper; + + /* Don't update the RX chain when chain noise calibration is running */ + update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE || + priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE; + + if (priv->vif) + dtimper = priv->hw->conf.ps_dtim_period; + else + dtimper = 1; + + if (priv->cfg->broken_powersave) + iwl_power_sleep_cam_cmd(priv, &cmd); + else if (priv->cfg->supports_idle && + priv->hw->conf.flags & IEEE80211_CONF_IDLE) + iwl_static_sleep_cmd(priv, &cmd, IWL_POWER_INDEX_5, 20); + else if (tt->state >= IWL_TI_1) + iwl_static_sleep_cmd(priv, &cmd, tt->tt_power_mode, dtimper); + else if (!enabled) + iwl_power_sleep_cam_cmd(priv, &cmd); + else if (priv->power_data.debug_sleep_level_override >= 0) + iwl_static_sleep_cmd(priv, &cmd, + priv->power_data.debug_sleep_level_override, + dtimper); + else if (no_sleep_autoadjust) + iwl_static_sleep_cmd(priv, &cmd, IWL_POWER_INDEX_1, dtimper); + else + iwl_power_fill_sleep_cmd(priv, &cmd, + priv->hw->conf.dynamic_ps_timeout, + priv->hw->conf.max_sleep_period); + + if (iwl_is_ready_rf(priv) && + (memcmp(&priv->power_data.sleep_cmd, &cmd, sizeof(cmd)) || force)) { + if (cmd.flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK) + set_bit(STATUS_POWER_PMI, &priv->status); + + ret = iwl_set_power(priv, &cmd); + if (!ret) { + if (!(cmd.flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)) + clear_bit(STATUS_POWER_PMI, &priv->status); + + if (priv->cfg->ops->lib->update_chain_flags && + update_chains) + priv->cfg->ops->lib->update_chain_flags(priv); + else if (priv->cfg->ops->lib->update_chain_flags) + IWL_DEBUG_POWER(priv, + "Cannot update the power, chain noise " + "calibration running: %d\n", + priv->chain_noise_data.state); + memcpy(&priv->power_data.sleep_cmd, &cmd, sizeof(cmd)); + } else + IWL_ERR(priv, "set power fail, ret = %d", ret); + } + + return ret; +} +EXPORT_SYMBOL(iwl_power_update_mode); + +bool iwl_ht_enabled(struct iwl_priv *priv) +{ + struct iwl_tt_mgmt *tt = &priv->thermal_throttle; + struct iwl_tt_restriction *restriction; + + if (!priv->thermal_throttle.advanced_tt) + return true; + restriction = tt->restriction + tt->state; + return restriction->is_ht; +} +EXPORT_SYMBOL(iwl_ht_enabled); + +bool iwl_within_ct_kill_margin(struct iwl_priv *priv) +{ + s32 temp = priv->temperature; /* degrees CELSIUS except 4965 */ + bool within_margin = false; + + if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965) + temp = KELVIN_TO_CELSIUS(priv->temperature); + + if (!priv->thermal_throttle.advanced_tt) + within_margin = ((temp + IWL_TT_CT_KILL_MARGIN) >= + CT_KILL_THRESHOLD_LEGACY) ? true : false; + else + within_margin = ((temp + IWL_TT_CT_KILL_MARGIN) >= + CT_KILL_THRESHOLD) ? true : false; + return within_margin; +} + +enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv) +{ + struct iwl_tt_mgmt *tt = &priv->thermal_throttle; + struct iwl_tt_restriction *restriction; + + if (!priv->thermal_throttle.advanced_tt) + return IWL_ANT_OK_MULTI; + restriction = tt->restriction + tt->state; + return restriction->tx_stream; +} +EXPORT_SYMBOL(iwl_tx_ant_restriction); + +enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv) +{ + struct iwl_tt_mgmt *tt = &priv->thermal_throttle; + struct iwl_tt_restriction *restriction; + + if (!priv->thermal_throttle.advanced_tt) + return IWL_ANT_OK_MULTI; + restriction = tt->restriction + tt->state; + return restriction->rx_stream; +} + +#define CT_KILL_EXIT_DURATION (5) /* 5 seconds duration */ +#define CT_KILL_WAITING_DURATION (300) /* 300ms duration */ + +/* + * toggle the bit to wake up uCode and check the temperature + * if the temperature is below CT, uCode will stay awake and send card + * state notification with CT_KILL bit clear to inform Thermal Throttling + * Management to change state. Otherwise, uCode will go back to sleep + * without doing anything, driver should continue the 5 seconds timer + * to wake up uCode for temperature check until temperature drop below CT + */ +static void iwl_tt_check_exit_ct_kill(unsigned long data) +{ + struct iwl_priv *priv = (struct iwl_priv *)data; + struct iwl_tt_mgmt *tt = &priv->thermal_throttle; + unsigned long flags; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (tt->state == IWL_TI_CT_KILL) { + if (priv->thermal_throttle.ct_kill_toggle) { + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); + priv->thermal_throttle.ct_kill_toggle = false; + } else { + iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, + CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); + priv->thermal_throttle.ct_kill_toggle = true; + } + iwl_read32(priv, CSR_UCODE_DRV_GP1); + spin_lock_irqsave(&priv->reg_lock, flags); + if (!iwl_grab_nic_access(priv)) + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, flags); + + /* Reschedule the ct_kill timer to occur in + * CT_KILL_EXIT_DURATION seconds to ensure we get a + * thermal update */ + IWL_DEBUG_POWER(priv, "schedule ct_kill exit timer\n"); + mod_timer(&priv->thermal_throttle.ct_kill_exit_tm, jiffies + + CT_KILL_EXIT_DURATION * HZ); + } +} + +static void iwl_perform_ct_kill_task(struct iwl_priv *priv, + bool stop) +{ + if (stop) { + IWL_DEBUG_POWER(priv, "Stop all queues\n"); + if (priv->mac80211_registered) + ieee80211_stop_queues(priv->hw); + IWL_DEBUG_POWER(priv, + "Schedule 5 seconds CT_KILL Timer\n"); + mod_timer(&priv->thermal_throttle.ct_kill_exit_tm, jiffies + + CT_KILL_EXIT_DURATION * HZ); + } else { + IWL_DEBUG_POWER(priv, "Wake all queues\n"); + if (priv->mac80211_registered) + ieee80211_wake_queues(priv->hw); + } +} + +static void iwl_tt_ready_for_ct_kill(unsigned long data) +{ + struct iwl_priv *priv = (struct iwl_priv *)data; + struct iwl_tt_mgmt *tt = &priv->thermal_throttle; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + /* temperature timer expired, ready to go into CT_KILL state */ + if (tt->state != IWL_TI_CT_KILL) { + IWL_DEBUG_POWER(priv, "entering CT_KILL state when temperature timer expired\n"); + tt->state = IWL_TI_CT_KILL; + set_bit(STATUS_CT_KILL, &priv->status); + iwl_perform_ct_kill_task(priv, true); + } +} + +static void iwl_prepare_ct_kill_task(struct iwl_priv *priv) +{ + IWL_DEBUG_POWER(priv, "Prepare to enter IWL_TI_CT_KILL\n"); + /* make request to retrieve statistics information */ + iwl_send_statistics_request(priv, CMD_SYNC, false); + /* Reschedule the ct_kill wait timer */ + mod_timer(&priv->thermal_throttle.ct_kill_waiting_tm, + jiffies + msecs_to_jiffies(CT_KILL_WAITING_DURATION)); +} + +#define IWL_MINIMAL_POWER_THRESHOLD (CT_KILL_THRESHOLD_LEGACY) +#define IWL_REDUCED_PERFORMANCE_THRESHOLD_2 (100) +#define IWL_REDUCED_PERFORMANCE_THRESHOLD_1 (90) + +/* + * Legacy thermal throttling + * 1) Avoid NIC destruction due to high temperatures + * Chip will identify dangerously high temperatures that can + * harm the device and will power down + * 2) Avoid the NIC power down due to high temperature + * Throttle early enough to lower the power consumption before + * drastic steps are needed + */ +static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp, bool force) +{ + struct iwl_tt_mgmt *tt = &priv->thermal_throttle; + enum iwl_tt_state old_state; + +#ifdef CONFIG_IWLWIFI_DEBUG + if ((tt->tt_previous_temp) && + (temp > tt->tt_previous_temp) && + ((temp - tt->tt_previous_temp) > + IWL_TT_INCREASE_MARGIN)) { + IWL_DEBUG_POWER(priv, + "Temperature increase %d degree Celsius\n", + (temp - tt->tt_previous_temp)); + } +#endif + old_state = tt->state; + /* in Celsius */ + if (temp >= IWL_MINIMAL_POWER_THRESHOLD) + tt->state = IWL_TI_CT_KILL; + else if (temp >= IWL_REDUCED_PERFORMANCE_THRESHOLD_2) + tt->state = IWL_TI_2; + else if (temp >= IWL_REDUCED_PERFORMANCE_THRESHOLD_1) + tt->state = IWL_TI_1; + else + tt->state = IWL_TI_0; + +#ifdef CONFIG_IWLWIFI_DEBUG + tt->tt_previous_temp = temp; +#endif + /* stop ct_kill_waiting_tm timer */ + del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm); + if (tt->state != old_state) { + switch (tt->state) { + case IWL_TI_0: + /* + * When the system is ready to go back to IWL_TI_0 + * we only have to call iwl_power_update_mode() to + * do so. + */ + break; + case IWL_TI_1: + tt->tt_power_mode = IWL_POWER_INDEX_3; + break; + case IWL_TI_2: + tt->tt_power_mode = IWL_POWER_INDEX_4; + break; + default: + tt->tt_power_mode = IWL_POWER_INDEX_5; + break; + } + mutex_lock(&priv->mutex); + if (old_state == IWL_TI_CT_KILL) + clear_bit(STATUS_CT_KILL, &priv->status); + if (tt->state != IWL_TI_CT_KILL && + iwl_power_update_mode(priv, true)) { + /* TT state not updated + * try again during next temperature read + */ + if (old_state == IWL_TI_CT_KILL) + set_bit(STATUS_CT_KILL, &priv->status); + tt->state = old_state; + IWL_ERR(priv, "Cannot update power mode, " + "TT state not updated\n"); + } else { + if (tt->state == IWL_TI_CT_KILL) { + if (force) { + set_bit(STATUS_CT_KILL, &priv->status); + iwl_perform_ct_kill_task(priv, true); + } else { + iwl_prepare_ct_kill_task(priv); + tt->state = old_state; + } + } else if (old_state == IWL_TI_CT_KILL && + tt->state != IWL_TI_CT_KILL) + iwl_perform_ct_kill_task(priv, false); + IWL_DEBUG_POWER(priv, "Temperature state changed %u\n", + tt->state); + IWL_DEBUG_POWER(priv, "Power Index change to %u\n", + tt->tt_power_mode); + } + mutex_unlock(&priv->mutex); + } +} + +/* + * Advance thermal throttling + * 1) Avoid NIC destruction due to high temperatures + * Chip will identify dangerously high temperatures that can + * harm the device and will power down + * 2) Avoid the NIC power down due to high temperature + * Throttle early enough to lower the power consumption before + * drastic steps are needed + * Actions include relaxing the power down sleep thresholds and + * decreasing the number of TX streams + * 3) Avoid throughput performance impact as much as possible + * + *============================================================================= + * Condition Nxt State Condition Nxt State Condition Nxt State + *----------------------------------------------------------------------------- + * IWL_TI_0 T >= 114 CT_KILL 114>T>=105 TI_1 N/A N/A + * IWL_TI_1 T >= 114 CT_KILL 114>T>=110 TI_2 T<=95 TI_0 + * IWL_TI_2 T >= 114 CT_KILL T<=100 TI_1 + * IWL_CT_KILL N/A N/A N/A N/A T<=95 TI_0 + *============================================================================= + */ +static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force) +{ + struct iwl_tt_mgmt *tt = &priv->thermal_throttle; + int i; + bool changed = false; + enum iwl_tt_state old_state; + struct iwl_tt_trans *transaction; + + old_state = tt->state; + for (i = 0; i < IWL_TI_STATE_MAX - 1; i++) { + /* based on the current TT state, + * find the curresponding transaction table + * each table has (IWL_TI_STATE_MAX - 1) entries + * tt->transaction + ((old_state * (IWL_TI_STATE_MAX - 1)) + * will advance to the correct table. + * then based on the current temperature + * find the next state need to transaction to + * go through all the possible (IWL_TI_STATE_MAX - 1) entries + * in the current table to see if transaction is needed + */ + transaction = tt->transaction + + ((old_state * (IWL_TI_STATE_MAX - 1)) + i); + if (temp >= transaction->tt_low && + temp <= transaction->tt_high) { +#ifdef CONFIG_IWLWIFI_DEBUG + if ((tt->tt_previous_temp) && + (temp > tt->tt_previous_temp) && + ((temp - tt->tt_previous_temp) > + IWL_TT_INCREASE_MARGIN)) { + IWL_DEBUG_POWER(priv, + "Temperature increase %d " + "degree Celsius\n", + (temp - tt->tt_previous_temp)); + } + tt->tt_previous_temp = temp; +#endif + if (old_state != + transaction->next_state) { + changed = true; + tt->state = + transaction->next_state; + } + break; + } + } + /* stop ct_kill_waiting_tm timer */ + del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm); + if (changed) { + struct iwl_rxon_cmd *rxon = &priv->staging_rxon; + + if (tt->state >= IWL_TI_1) { + /* force PI = IWL_POWER_INDEX_5 in the case of TI > 0 */ + tt->tt_power_mode = IWL_POWER_INDEX_5; + if (!iwl_ht_enabled(priv)) + /* disable HT */ + rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK | + RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | + RXON_FLG_HT40_PROT_MSK | + RXON_FLG_HT_PROT_MSK); + else { + /* check HT capability and set + * according to the system HT capability + * in case get disabled before */ + iwl_set_rxon_ht(priv, &priv->current_ht_config); + } + + } else { + /* + * restore system power setting -- it will be + * recalculated automatically. + */ + + /* check HT capability and set + * according to the system HT capability + * in case get disabled before */ + iwl_set_rxon_ht(priv, &priv->current_ht_config); + } + mutex_lock(&priv->mutex); + if (old_state == IWL_TI_CT_KILL) + clear_bit(STATUS_CT_KILL, &priv->status); + if (tt->state != IWL_TI_CT_KILL && + iwl_power_update_mode(priv, true)) { + /* TT state not updated + * try again during next temperature read + */ + IWL_ERR(priv, "Cannot update power mode, " + "TT state not updated\n"); + if (old_state == IWL_TI_CT_KILL) + set_bit(STATUS_CT_KILL, &priv->status); + tt->state = old_state; + } else { + IWL_DEBUG_POWER(priv, + "Thermal Throttling to new state: %u\n", + tt->state); + if (old_state != IWL_TI_CT_KILL && + tt->state == IWL_TI_CT_KILL) { + if (force) { + IWL_DEBUG_POWER(priv, + "Enter IWL_TI_CT_KILL\n"); + set_bit(STATUS_CT_KILL, &priv->status); + iwl_perform_ct_kill_task(priv, true); + } else { + iwl_prepare_ct_kill_task(priv); + tt->state = old_state; + } + } else if (old_state == IWL_TI_CT_KILL && + tt->state != IWL_TI_CT_KILL) { + IWL_DEBUG_POWER(priv, "Exit IWL_TI_CT_KILL\n"); + iwl_perform_ct_kill_task(priv, false); + } + } + mutex_unlock(&priv->mutex); + } +} + +/* Card State Notification indicated reach critical temperature + * if PSP not enable, no Thermal Throttling function will be performed + * just set the GP1 bit to acknowledge the event + * otherwise, go into IWL_TI_CT_KILL state + * since Card State Notification will not provide any temperature reading + * for Legacy mode + * so just pass the CT_KILL temperature to iwl_legacy_tt_handler() + * for advance mode + * pass CT_KILL_THRESHOLD+1 to make sure move into IWL_TI_CT_KILL state + */ +static void iwl_bg_ct_enter(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_enter); + struct iwl_tt_mgmt *tt = &priv->thermal_throttle; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (!iwl_is_ready(priv)) + return; + + if (tt->state != IWL_TI_CT_KILL) { + IWL_ERR(priv, "Device reached critical temperature " + "- ucode going to sleep!\n"); + if (!priv->thermal_throttle.advanced_tt) + iwl_legacy_tt_handler(priv, + IWL_MINIMAL_POWER_THRESHOLD, + true); + else + iwl_advance_tt_handler(priv, + CT_KILL_THRESHOLD + 1, true); + } +} + +/* Card State Notification indicated out of critical temperature + * since Card State Notification will not provide any temperature reading + * so pass the IWL_REDUCED_PERFORMANCE_THRESHOLD_2 temperature + * to iwl_legacy_tt_handler() to get out of IWL_CT_KILL state + */ +static void iwl_bg_ct_exit(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_exit); + struct iwl_tt_mgmt *tt = &priv->thermal_throttle; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (!iwl_is_ready(priv)) + return; + + /* stop ct_kill_exit_tm timer */ + del_timer_sync(&priv->thermal_throttle.ct_kill_exit_tm); + + if (tt->state == IWL_TI_CT_KILL) { + IWL_ERR(priv, + "Device temperature below critical" + "- ucode awake!\n"); + /* + * exit from CT_KILL state + * reset the current temperature reading + */ + priv->temperature = 0; + if (!priv->thermal_throttle.advanced_tt) + iwl_legacy_tt_handler(priv, + IWL_REDUCED_PERFORMANCE_THRESHOLD_2, + true); + else + iwl_advance_tt_handler(priv, CT_KILL_EXIT_THRESHOLD, + true); + } +} + +void iwl_tt_enter_ct_kill(struct iwl_priv *priv) +{ + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + IWL_DEBUG_POWER(priv, "Queueing critical temperature enter.\n"); + queue_work(priv->workqueue, &priv->ct_enter); +} +EXPORT_SYMBOL(iwl_tt_enter_ct_kill); + +void iwl_tt_exit_ct_kill(struct iwl_priv *priv) +{ + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + IWL_DEBUG_POWER(priv, "Queueing critical temperature exit.\n"); + queue_work(priv->workqueue, &priv->ct_exit); +} +EXPORT_SYMBOL(iwl_tt_exit_ct_kill); + +static void iwl_bg_tt_work(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, tt_work); + s32 temp = priv->temperature; /* degrees CELSIUS except 4965 */ + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965) + temp = KELVIN_TO_CELSIUS(priv->temperature); + + if (!priv->thermal_throttle.advanced_tt) + iwl_legacy_tt_handler(priv, temp, false); + else + iwl_advance_tt_handler(priv, temp, false); +} + +void iwl_tt_handler(struct iwl_priv *priv) +{ + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + IWL_DEBUG_POWER(priv, "Queueing thermal throttling work.\n"); + queue_work(priv->workqueue, &priv->tt_work); +} +EXPORT_SYMBOL(iwl_tt_handler); + +/* Thermal throttling initialization + * For advance thermal throttling: + * Initialize Thermal Index and temperature threshold table + * Initialize thermal throttling restriction table + */ +void iwl_tt_initialize(struct iwl_priv *priv) +{ + struct iwl_tt_mgmt *tt = &priv->thermal_throttle; + int size = sizeof(struct iwl_tt_trans) * (IWL_TI_STATE_MAX - 1); + struct iwl_tt_trans *transaction; + + IWL_DEBUG_POWER(priv, "Initialize Thermal Throttling \n"); + + memset(tt, 0, sizeof(struct iwl_tt_mgmt)); + + tt->state = IWL_TI_0; + init_timer(&priv->thermal_throttle.ct_kill_exit_tm); + priv->thermal_throttle.ct_kill_exit_tm.data = (unsigned long)priv; + priv->thermal_throttle.ct_kill_exit_tm.function = + iwl_tt_check_exit_ct_kill; + init_timer(&priv->thermal_throttle.ct_kill_waiting_tm); + priv->thermal_throttle.ct_kill_waiting_tm.data = (unsigned long)priv; + priv->thermal_throttle.ct_kill_waiting_tm.function = + iwl_tt_ready_for_ct_kill; + /* setup deferred ct kill work */ + INIT_WORK(&priv->tt_work, iwl_bg_tt_work); + INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter); + INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit); + + if (priv->cfg->adv_thermal_throttle) { + IWL_DEBUG_POWER(priv, "Advanced Thermal Throttling\n"); + tt->restriction = kzalloc(sizeof(struct iwl_tt_restriction) * + IWL_TI_STATE_MAX, GFP_KERNEL); + tt->transaction = kzalloc(sizeof(struct iwl_tt_trans) * + IWL_TI_STATE_MAX * (IWL_TI_STATE_MAX - 1), + GFP_KERNEL); + if (!tt->restriction || !tt->transaction) { + IWL_ERR(priv, "Fallback to Legacy Throttling\n"); + priv->thermal_throttle.advanced_tt = false; + kfree(tt->restriction); + tt->restriction = NULL; + kfree(tt->transaction); + tt->transaction = NULL; + } else { + transaction = tt->transaction + + (IWL_TI_0 * (IWL_TI_STATE_MAX - 1)); + memcpy(transaction, &tt_range_0[0], size); + transaction = tt->transaction + + (IWL_TI_1 * (IWL_TI_STATE_MAX - 1)); + memcpy(transaction, &tt_range_1[0], size); + transaction = tt->transaction + + (IWL_TI_2 * (IWL_TI_STATE_MAX - 1)); + memcpy(transaction, &tt_range_2[0], size); + transaction = tt->transaction + + (IWL_TI_CT_KILL * (IWL_TI_STATE_MAX - 1)); + memcpy(transaction, &tt_range_3[0], size); + size = sizeof(struct iwl_tt_restriction) * + IWL_TI_STATE_MAX; + memcpy(tt->restriction, + &restriction_range[0], size); + priv->thermal_throttle.advanced_tt = true; + } + } else { + IWL_DEBUG_POWER(priv, "Legacy Thermal Throttling\n"); + priv->thermal_throttle.advanced_tt = false; + } +} +EXPORT_SYMBOL(iwl_tt_initialize); + +/* cleanup thermal throttling management related memory and timer */ +void iwl_tt_exit(struct iwl_priv *priv) +{ + struct iwl_tt_mgmt *tt = &priv->thermal_throttle; + + /* stop ct_kill_exit_tm timer if activated */ + del_timer_sync(&priv->thermal_throttle.ct_kill_exit_tm); + /* stop ct_kill_waiting_tm timer if activated */ + del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm); + cancel_work_sync(&priv->tt_work); + cancel_work_sync(&priv->ct_enter); + cancel_work_sync(&priv->ct_exit); + + if (priv->thermal_throttle.advanced_tt) { + /* free advance thermal throttling memory */ + kfree(tt->restriction); + tt->restriction = NULL; + kfree(tt->transaction); + tt->transaction = NULL; + } +} +EXPORT_SYMBOL(iwl_tt_exit); + +/* initialize to default */ +void iwl_power_initialize(struct iwl_priv *priv) +{ + u16 lctl = iwl_pcie_link_ctl(priv); + + priv->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN); + + priv->power_data.debug_sleep_level_override = -1; + + memset(&priv->power_data.sleep_cmd, 0, + sizeof(priv->power_data.sleep_cmd)); +} +EXPORT_SYMBOL(iwl_power_initialize); diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h new file mode 100644 index 0000000..5db91c1 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-power.h @@ -0,0 +1,146 @@ +/****************************************************************************** + * + * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ +#ifndef __iwl_power_setting_h__ +#define __iwl_power_setting_h__ + +#include "iwl-commands.h" + +#define IWL_ABSOLUTE_ZERO 0 +#define IWL_ABSOLUTE_MAX 0xFFFFFFFF +#define IWL_TT_INCREASE_MARGIN 5 +#define IWL_TT_CT_KILL_MARGIN 3 + +enum iwl_antenna_ok { + IWL_ANT_OK_NONE, + IWL_ANT_OK_SINGLE, + IWL_ANT_OK_MULTI, +}; + +/* Thermal Throttling State Machine states */ +enum iwl_tt_state { + IWL_TI_0, /* normal temperature, system power state */ + IWL_TI_1, /* high temperature detect, low power state */ + IWL_TI_2, /* higher temperature detected, lower power state */ + IWL_TI_CT_KILL, /* critical temperature detected, lowest power state */ + IWL_TI_STATE_MAX +}; + +/** + * struct iwl_tt_restriction - Thermal Throttling restriction table + * @tx_stream: number of tx stream allowed + * @is_ht: ht enable/disable + * @rx_stream: number of rx stream allowed + * + * This table is used by advance thermal throttling management + * based on the current thermal throttling state, and determines + * the number of tx/rx streams and the status of HT operation. + */ +struct iwl_tt_restriction { + enum iwl_antenna_ok tx_stream; + enum iwl_antenna_ok rx_stream; + bool is_ht; +}; + +/** + * struct iwl_tt_trans - Thermal Throttling transaction table + * @next_state: next thermal throttling mode + * @tt_low: low temperature threshold to change state + * @tt_high: high temperature threshold to change state + * + * This is used by the advanced thermal throttling algorithm + * to determine the next thermal state to go based on the + * current temperature. + */ +struct iwl_tt_trans { + enum iwl_tt_state next_state; + u32 tt_low; + u32 tt_high; +}; + +/** + * struct iwl_tt_mgnt - Thermal Throttling Management structure + * @advanced_tt: advanced thermal throttle required + * @state: current Thermal Throttling state + * @tt_power_mode: Thermal Throttling power mode index + * being used to set power level when + * when thermal throttling state != IWL_TI_0 + * the tt_power_mode should set to different + * power mode based on the current tt state + * @tt_previous_temperature: last measured temperature + * @iwl_tt_restriction: ptr to restriction tbl, used by advance + * thermal throttling to determine how many tx/rx streams + * should be used in tt state; and can HT be enabled or not + * @iwl_tt_trans: ptr to adv trans table, used by advance thermal throttling + * state transaction + * @ct_kill_toggle: used to toggle the CSR bit when checking uCode temperature + * @ct_kill_exit_tm: timer to exit thermal kill + */ +struct iwl_tt_mgmt { + enum iwl_tt_state state; + bool advanced_tt; + u8 tt_power_mode; + bool ct_kill_toggle; +#ifdef CONFIG_IWLWIFI_DEBUG + s32 tt_previous_temp; +#endif + struct iwl_tt_restriction *restriction; + struct iwl_tt_trans *transaction; + struct timer_list ct_kill_exit_tm; + struct timer_list ct_kill_waiting_tm; +}; + +enum iwl_power_level { + IWL_POWER_INDEX_1, + IWL_POWER_INDEX_2, + IWL_POWER_INDEX_3, + IWL_POWER_INDEX_4, + IWL_POWER_INDEX_5, + IWL_POWER_NUM +}; + +struct iwl_power_mgr { + struct iwl_powertable_cmd sleep_cmd; + int debug_sleep_level_override; + bool pci_pm; +}; + +int iwl_power_update_mode(struct iwl_priv *priv, bool force); +bool iwl_ht_enabled(struct iwl_priv *priv); +bool iwl_within_ct_kill_margin(struct iwl_priv *priv); +enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv); +enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv); +void iwl_tt_enter_ct_kill(struct iwl_priv *priv); +void iwl_tt_exit_ct_kill(struct iwl_priv *priv); +void iwl_tt_handler(struct iwl_priv *priv); +void iwl_tt_initialize(struct iwl_priv *priv); +void iwl_tt_exit(struct iwl_priv *priv); +void iwl_power_initialize(struct iwl_priv *priv); + +extern bool no_sleep_autoadjust; + +#endif /* __iwl_power_setting_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h new file mode 100644 index 0000000..d2d2a91 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-prph.h @@ -0,0 +1,577 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __iwl_prph_h__ +#define __iwl_prph_h__ + +/* + * Registers in this file are internal, not PCI bus memory mapped. + * Driver accesses these via HBUS_TARG_PRPH_* registers. + */ +#define PRPH_BASE (0x00000) +#define PRPH_END (0xFFFFF) + +/* APMG (power management) constants */ +#define APMG_BASE (PRPH_BASE + 0x3000) +#define APMG_CLK_CTRL_REG (APMG_BASE + 0x0000) +#define APMG_CLK_EN_REG (APMG_BASE + 0x0004) +#define APMG_CLK_DIS_REG (APMG_BASE + 0x0008) +#define APMG_PS_CTRL_REG (APMG_BASE + 0x000c) +#define APMG_PCIDEV_STT_REG (APMG_BASE + 0x0010) +#define APMG_RFKILL_REG (APMG_BASE + 0x0014) +#define APMG_RTC_INT_STT_REG (APMG_BASE + 0x001c) +#define APMG_RTC_INT_MSK_REG (APMG_BASE + 0x0020) +#define APMG_DIGITAL_SVR_REG (APMG_BASE + 0x0058) +#define APMG_ANALOG_SVR_REG (APMG_BASE + 0x006C) + +#define APMG_CLK_VAL_DMA_CLK_RQT (0x00000200) +#define APMG_CLK_VAL_BSM_CLK_RQT (0x00000800) + + +#define APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS (0x00400000) +#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000) +#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000) +#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000) +#define APMG_PS_CTRL_VAL_PWR_SRC_MAX (0x01000000) /* 3945 only */ +#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x02000000) +#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */ +#define APMG_SVR_DIGITAL_VOLTAGE_1_32 (0x00000060) + +#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800) + +/** + * BSM (Bootstrap State Machine) + * + * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program + * in special SRAM that does not power down when the embedded control + * processor is sleeping (e.g. for periodic power-saving shutdowns of radio). + * + * When powering back up after sleeps (or during initial uCode load), the BSM + * internally loads the short bootstrap program from the special SRAM into the + * embedded processor's instruction SRAM, and starts the processor so it runs + * the bootstrap program. + * + * This bootstrap program loads (via PCI busmaster DMA) instructions and data + * images for a uCode program from host DRAM locations. The host driver + * indicates DRAM locations and sizes for instruction and data images via the + * four BSM_DRAM_* registers. Once the bootstrap program loads the new program, + * the new program starts automatically. + * + * The uCode used for open-source drivers includes two programs: + * + * 1) Initialization -- performs hardware calibration and sets up some + * internal data, then notifies host via "initialize alive" notification + * (struct iwl_init_alive_resp) that it has completed all of its work. + * After signal from host, it then loads and starts the runtime program. + * The initialization program must be used when initially setting up the + * NIC after loading the driver. + * + * 2) Runtime/Protocol -- performs all normal runtime operations. This + * notifies host via "alive" notification (struct iwl_alive_resp) that it + * is ready to be used. + * + * When initializing the NIC, the host driver does the following procedure: + * + * 1) Load bootstrap program (instructions only, no data image for bootstrap) + * into bootstrap memory. Use dword writes starting at BSM_SRAM_LOWER_BOUND + * + * 2) Point (via BSM_DRAM_*) to the "initialize" uCode data and instruction + * images in host DRAM. + * + * 3) Set up BSM to copy from BSM SRAM into uCode instruction SRAM when asked: + * BSM_WR_MEM_SRC_REG = 0 + * BSM_WR_MEM_DST_REG = RTC_INST_LOWER_BOUND + * BSM_WR_MEM_DWCOUNT_REG = # dwords in bootstrap instruction image + * + * 4) Load bootstrap into instruction SRAM: + * BSM_WR_CTRL_REG = BSM_WR_CTRL_REG_BIT_START + * + * 5) Wait for load completion: + * Poll BSM_WR_CTRL_REG for BSM_WR_CTRL_REG_BIT_START = 0 + * + * 6) Enable future boot loads whenever NIC's power management triggers it: + * BSM_WR_CTRL_REG = BSM_WR_CTRL_REG_BIT_START_EN + * + * 7) Start the NIC by removing all reset bits: + * CSR_RESET = 0 + * + * The bootstrap uCode (already in instruction SRAM) loads initialization + * uCode. Initialization uCode performs data initialization, sends + * "initialize alive" notification to host, and waits for a signal from + * host to load runtime code. + * + * 4) Point (via BSM_DRAM_*) to the "runtime" uCode data and instruction + * images in host DRAM. The last register loaded must be the instruction + * byte count register ("1" in MSbit tells initialization uCode to load + * the runtime uCode): + * BSM_DRAM_INST_BYTECOUNT_REG = byte count | BSM_DRAM_INST_LOAD + * + * 5) Wait for "alive" notification, then issue normal runtime commands. + * + * Data caching during power-downs: + * + * Just before the embedded controller powers down (e.g for automatic + * power-saving modes, or for RFKILL), uCode stores (via PCI busmaster DMA) + * a current snapshot of the embedded processor's data SRAM into host DRAM. + * This caches the data while the embedded processor's memory is powered down. + * Location and size are controlled by BSM_DRAM_DATA_* registers. + * + * NOTE: Instruction SRAM does not need to be saved, since that doesn't + * change during operation; the original image (from uCode distribution + * file) can be used for reload. + * + * When powering back up, the BSM loads the bootstrap program. Bootstrap looks + * at the BSM_DRAM_* registers, which now point to the runtime instruction + * image and the cached (modified) runtime data (*not* the initialization + * uCode). Bootstrap reloads these runtime images into SRAM, and restarts the + * uCode from where it left off before the power-down. + * + * NOTE: Initialization uCode does *not* run as part of the save/restore + * procedure. + * + * This save/restore method is mostly for autonomous power management during + * normal operation (result of POWER_TABLE_CMD). Platform suspend/resume and + * RFKILL should use complete restarts (with total re-initialization) of uCode, + * allowing total shutdown (including BSM memory). + * + * Note that, during normal operation, the host DRAM that held the initial + * startup data for the runtime code is now being used as a backup data cache + * for modified data! If you need to completely re-initialize the NIC, make + * sure that you use the runtime data image from the uCode distribution file, + * not the modified/saved runtime data. You may want to store a separate + * "clean" runtime data image in DRAM to avoid disk reads of distribution file. + */ + +/* BSM bit fields */ +#define BSM_WR_CTRL_REG_BIT_START (0x80000000) /* start boot load now */ +#define BSM_WR_CTRL_REG_BIT_START_EN (0x40000000) /* enable boot after pwrup*/ +#define BSM_DRAM_INST_LOAD (0x80000000) /* start program load now */ + +/* BSM addresses */ +#define BSM_BASE (PRPH_BASE + 0x3400) +#define BSM_END (PRPH_BASE + 0x3800) + +#define BSM_WR_CTRL_REG (BSM_BASE + 0x000) /* ctl and status */ +#define BSM_WR_MEM_SRC_REG (BSM_BASE + 0x004) /* source in BSM mem */ +#define BSM_WR_MEM_DST_REG (BSM_BASE + 0x008) /* dest in SRAM mem */ +#define BSM_WR_DWCOUNT_REG (BSM_BASE + 0x00C) /* bytes */ +#define BSM_WR_STATUS_REG (BSM_BASE + 0x010) /* bit 0: 1 == done */ + +/* + * Pointers and size regs for bootstrap load and data SRAM save/restore. + * NOTE: 3945 pointers use bits 31:0 of DRAM address. + * 4965 pointers use bits 35:4 of DRAM address. + */ +#define BSM_DRAM_INST_PTR_REG (BSM_BASE + 0x090) +#define BSM_DRAM_INST_BYTECOUNT_REG (BSM_BASE + 0x094) +#define BSM_DRAM_DATA_PTR_REG (BSM_BASE + 0x098) +#define BSM_DRAM_DATA_BYTECOUNT_REG (BSM_BASE + 0x09C) + +/* + * BSM special memory, stays powered on during power-save sleeps. + * Read/write, address range from LOWER_BOUND to (LOWER_BOUND + SIZE -1) + */ +#define BSM_SRAM_LOWER_BOUND (PRPH_BASE + 0x3800) +#define BSM_SRAM_SIZE (1024) /* bytes */ + + +/* 3945 Tx scheduler registers */ +#define ALM_SCD_BASE (PRPH_BASE + 0x2E00) +#define ALM_SCD_MODE_REG (ALM_SCD_BASE + 0x000) +#define ALM_SCD_ARASTAT_REG (ALM_SCD_BASE + 0x004) +#define ALM_SCD_TXFACT_REG (ALM_SCD_BASE + 0x010) +#define ALM_SCD_TXF4MF_REG (ALM_SCD_BASE + 0x014) +#define ALM_SCD_TXF5MF_REG (ALM_SCD_BASE + 0x020) +#define ALM_SCD_SBYP_MODE_1_REG (ALM_SCD_BASE + 0x02C) +#define ALM_SCD_SBYP_MODE_2_REG (ALM_SCD_BASE + 0x030) + +/** + * Tx Scheduler + * + * The Tx Scheduler selects the next frame to be transmitted, choosing TFDs + * (Transmit Frame Descriptors) from up to 16 circular Tx queues resident in + * host DRAM. It steers each frame's Tx command (which contains the frame + * data) into one of up to 7 prioritized Tx DMA FIFO channels within the + * device. A queue maps to only one (selectable by driver) Tx DMA channel, + * but one DMA channel may take input from several queues. + * + * Tx DMA channels have dedicated purposes. For 4965, they are used as follows + * (cf. default_queue_to_tx_fifo in iwl-4965.c): + * + * 0 -- EDCA BK (background) frames, lowest priority + * 1 -- EDCA BE (best effort) frames, normal priority + * 2 -- EDCA VI (video) frames, higher priority + * 3 -- EDCA VO (voice) and management frames, highest priority + * 4 -- Commands (e.g. RXON, etc.) + * 5 -- HCCA short frames + * 6 -- HCCA long frames + * 7 -- not used by driver (device-internal only) + * + * For 5000 series and up, they are used slightly differently + * (cf. iwl5000_default_queue_to_tx_fifo in iwl-5000.c): + * + * 0 -- EDCA BK (background) frames, lowest priority + * 1 -- EDCA BE (best effort) frames, normal priority + * 2 -- EDCA VI (video) frames, higher priority + * 3 -- EDCA VO (voice) and management frames, highest priority + * 4 -- (TBD) + * 5 -- HCCA short frames + * 6 -- HCCA long frames + * 7 -- Commands + * + * Driver should normally map queues 0-6 to Tx DMA/FIFO channels 0-6. + * In addition, driver can map the remaining queues to Tx DMA/FIFO + * channels 0-3 to support 11n aggregation via EDCA DMA channels. + * + * The driver sets up each queue to work in one of two modes: + * + * 1) Scheduler-Ack, in which the scheduler automatically supports a + * block-ack (BA) window of up to 64 TFDs. In this mode, each queue + * contains TFDs for a unique combination of Recipient Address (RA) + * and Traffic Identifier (TID), that is, traffic of a given + * Quality-Of-Service (QOS) priority, destined for a single station. + * + * In scheduler-ack mode, the scheduler keeps track of the Tx status of + * each frame within the BA window, including whether it's been transmitted, + * and whether it's been acknowledged by the receiving station. The device + * automatically processes block-acks received from the receiving STA, + * and reschedules un-acked frames to be retransmitted (successful + * Tx completion may end up being out-of-order). + * + * The driver must maintain the queue's Byte Count table in host DRAM + * (struct iwl4965_sched_queue_byte_cnt_tbl) for this mode. + * This mode does not support fragmentation. + * + * 2) FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order. + * The device may automatically retry Tx, but will retry only one frame + * at a time, until receiving ACK from receiving station, or reaching + * retry limit and giving up. + * + * The command queue (#4) must use this mode! + * This mode does not require use of the Byte Count table in host DRAM. + * + * Driver controls scheduler operation via 3 means: + * 1) Scheduler registers + * 2) Shared scheduler data base in internal 4956 SRAM + * 3) Shared data in host DRAM + * + * Initialization: + * + * When loading, driver should allocate memory for: + * 1) 16 TFD circular buffers, each with space for (typically) 256 TFDs. + * 2) 16 Byte Count circular buffers in 16 KBytes contiguous memory + * (1024 bytes for each queue). + * + * After receiving "Alive" response from uCode, driver must initialize + * the scheduler (especially for queue #4, the command queue, otherwise + * the driver can't issue commands!): + */ + +/** + * Max Tx window size is the max number of contiguous TFDs that the scheduler + * can keep track of at one time when creating block-ack chains of frames. + * Note that "64" matches the number of ack bits in a block-ack packet. + * Driver should use SCD_WIN_SIZE and SCD_FRAME_LIMIT values to initialize + * IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) values. + */ +#define SCD_WIN_SIZE 64 +#define SCD_FRAME_LIMIT 64 + +/* SCD registers are internal, must be accessed via HBUS_TARG_PRPH regs */ +#define IWL49_SCD_START_OFFSET 0xa02c00 + +/* + * 4965 tells driver SRAM address for internal scheduler structs via this reg. + * Value is valid only after "Alive" response from uCode. + */ +#define IWL49_SCD_SRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x0) + +/* + * Driver may need to update queue-empty bits after changing queue's + * write and read pointers (indexes) during (re-)initialization (i.e. when + * scheduler is not tracking what's happening). + * Bit fields: + * 31-16: Write mask -- 1: update empty bit, 0: don't change empty bit + * 15-00: Empty state, one for each queue -- 1: empty, 0: non-empty + * NOTE: This register is not used by Linux driver. + */ +#define IWL49_SCD_EMPTY_BITS (IWL49_SCD_START_OFFSET + 0x4) + +/* + * Physical base address of array of byte count (BC) circular buffers (CBs). + * Each Tx queue has a BC CB in host DRAM to support Scheduler-ACK mode. + * This register points to BC CB for queue 0, must be on 1024-byte boundary. + * Others are spaced by 1024 bytes. + * Each BC CB is 2 bytes * (256 + 64) = 740 bytes, followed by 384 bytes pad. + * (Index into a queue's BC CB) = (index into queue's TFD CB) = (SSN & 0xff). + * Bit fields: + * 25-00: Byte Count CB physical address [35:10], must be 1024-byte aligned. + */ +#define IWL49_SCD_DRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x10) + +/* + * Enables any/all Tx DMA/FIFO channels. + * Scheduler generates requests for only the active channels. + * Set this to 0xff to enable all 8 channels (normal usage). + * Bit fields: + * 7- 0: Enable (1), disable (0), one bit for each channel 0-7 + */ +#define IWL49_SCD_TXFACT (IWL49_SCD_START_OFFSET + 0x1c) +/* + * Queue (x) Write Pointers (indexes, really!), one for each Tx queue. + * Initialized and updated by driver as new TFDs are added to queue. + * NOTE: If using Block Ack, index must correspond to frame's + * Start Sequence Number; index = (SSN & 0xff) + * NOTE: Alternative to HBUS_TARG_WRPTR, which is what Linux driver uses? + */ +#define IWL49_SCD_QUEUE_WRPTR(x) (IWL49_SCD_START_OFFSET + 0x24 + (x) * 4) + +/* + * Queue (x) Read Pointers (indexes, really!), one for each Tx queue. + * For FIFO mode, index indicates next frame to transmit. + * For Scheduler-ACK mode, index indicates first frame in Tx window. + * Initialized by driver, updated by scheduler. + */ +#define IWL49_SCD_QUEUE_RDPTR(x) (IWL49_SCD_START_OFFSET + 0x64 + (x) * 4) + +/* + * Select which queues work in chain mode (1) vs. not (0). + * Use chain mode to build chains of aggregated frames. + * Bit fields: + * 31-16: Reserved + * 15-00: Mode, one bit for each queue -- 1: Chain mode, 0: one-at-a-time + * NOTE: If driver sets up queue for chain mode, it should be also set up + * Scheduler-ACK mode as well, via SCD_QUEUE_STATUS_BITS(x). + */ +#define IWL49_SCD_QUEUECHAIN_SEL (IWL49_SCD_START_OFFSET + 0xd0) + +/* + * Select which queues interrupt driver when scheduler increments + * a queue's read pointer (index). + * Bit fields: + * 31-16: Reserved + * 15-00: Interrupt enable, one bit for each queue -- 1: enabled, 0: disabled + * NOTE: This functionality is apparently a no-op; driver relies on interrupts + * from Rx queue to read Tx command responses and update Tx queues. + */ +#define IWL49_SCD_INTERRUPT_MASK (IWL49_SCD_START_OFFSET + 0xe4) + +/* + * Queue search status registers. One for each queue. + * Sets up queue mode and assigns queue to Tx DMA channel. + * Bit fields: + * 19-10: Write mask/enable bits for bits 0-9 + * 9: Driver should init to "0" + * 8: Scheduler-ACK mode (1), non-Scheduler-ACK (i.e. FIFO) mode (0). + * Driver should init to "1" for aggregation mode, or "0" otherwise. + * 7-6: Driver should init to "0" + * 5: Window Size Left; indicates whether scheduler can request + * another TFD, based on window size, etc. Driver should init + * this bit to "1" for aggregation mode, or "0" for non-agg. + * 4-1: Tx FIFO to use (range 0-7). + * 0: Queue is active (1), not active (0). + * Other bits should be written as "0" + * + * NOTE: If enabling Scheduler-ACK mode, chain mode should also be enabled + * via SCD_QUEUECHAIN_SEL. + */ +#define IWL49_SCD_QUEUE_STATUS_BITS(x)\ + (IWL49_SCD_START_OFFSET + 0x104 + (x) * 4) + +/* Bit field positions */ +#define IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE (0) +#define IWL49_SCD_QUEUE_STTS_REG_POS_TXF (1) +#define IWL49_SCD_QUEUE_STTS_REG_POS_WSL (5) +#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK (8) + +/* Write masks */ +#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10) +#define IWL49_SCD_QUEUE_STTS_REG_MSK (0x0007FC00) + +/** + * 4965 internal SRAM structures for scheduler, shared with driver ... + * + * Driver should clear and initialize the following areas after receiving + * "Alive" response from 4965 uCode, i.e. after initial + * uCode load, or after a uCode load done for error recovery: + * + * SCD_CONTEXT_DATA_OFFSET (size 128 bytes) + * SCD_TX_STTS_BITMAP_OFFSET (size 256 bytes) + * SCD_TRANSLATE_TBL_OFFSET (size 32 bytes) + * + * Driver accesses SRAM via HBUS_TARG_MEM_* registers. + * Driver reads base address of this scheduler area from SCD_SRAM_BASE_ADDR. + * All OFFSET values must be added to this base address. + */ + +/* + * Queue context. One 8-byte entry for each of 16 queues. + * + * Driver should clear this entire area (size 0x80) to 0 after receiving + * "Alive" notification from uCode. Additionally, driver should init + * each queue's entry as follows: + * + * LS Dword bit fields: + * 0-06: Max Tx window size for Scheduler-ACK. Driver should init to 64. + * + * MS Dword bit fields: + * 16-22: Frame limit. Driver should init to 10 (0xa). + * + * Driver should init all other bits to 0. + * + * Init must be done after driver receives "Alive" response from 4965 uCode, + * and when setting up queue for aggregation. + */ +#define IWL49_SCD_CONTEXT_DATA_OFFSET 0x380 +#define IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) \ + (IWL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8)) + +#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0) +#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F) +#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16) +#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000) + +/* + * Tx Status Bitmap + * + * Driver should clear this entire area (size 0x100) to 0 after receiving + * "Alive" notification from uCode. Area is used only by device itself; + * no other support (besides clearing) is required from driver. + */ +#define IWL49_SCD_TX_STTS_BITMAP_OFFSET 0x400 + +/* + * RAxTID to queue translation mapping. + * + * When queue is in Scheduler-ACK mode, frames placed in a that queue must be + * for only one combination of receiver address (RA) and traffic ID (TID), i.e. + * one QOS priority level destined for one station (for this wireless link, + * not final destination). The SCD_TRANSLATE_TABLE area provides 16 16-bit + * mappings, one for each of the 16 queues. If queue is not in Scheduler-ACK + * mode, the device ignores the mapping value. + * + * Bit fields, for each 16-bit map: + * 15-9: Reserved, set to 0 + * 8-4: Index into device's station table for recipient station + * 3-0: Traffic ID (tid), range 0-15 + * + * Driver should clear this entire area (size 32 bytes) to 0 after receiving + * "Alive" notification from uCode. To update a 16-bit map value, driver + * must read a dword-aligned value from device SRAM, replace the 16-bit map + * value of interest, and write the dword value back into device SRAM. + */ +#define IWL49_SCD_TRANSLATE_TBL_OFFSET 0x500 + +/* Find translation table dword to read/write for given queue */ +#define IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \ + ((IWL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc) + +#define IWL_SCD_TXFIFO_POS_TID (0) +#define IWL_SCD_TXFIFO_POS_RA (4) +#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF) + +/* 5000 SCD */ +#define IWL50_SCD_QUEUE_STTS_REG_POS_TXF (0) +#define IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE (3) +#define IWL50_SCD_QUEUE_STTS_REG_POS_WSL (4) +#define IWL50_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19) +#define IWL50_SCD_QUEUE_STTS_REG_MSK (0x00FF0000) + +#define IWL50_SCD_QUEUE_CTX_REG1_CREDIT_POS (8) +#define IWL50_SCD_QUEUE_CTX_REG1_CREDIT_MSK (0x00FFFF00) +#define IWL50_SCD_QUEUE_CTX_REG1_SUPER_CREDIT_POS (24) +#define IWL50_SCD_QUEUE_CTX_REG1_SUPER_CREDIT_MSK (0xFF000000) +#define IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS (0) +#define IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK (0x0000007F) +#define IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16) +#define IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000) + +#define IWL50_SCD_CONTEXT_DATA_OFFSET (0x600) +#define IWL50_SCD_TX_STTS_BITMAP_OFFSET (0x7B1) +#define IWL50_SCD_TRANSLATE_TBL_OFFSET (0x7E0) + +#define IWL50_SCD_CONTEXT_QUEUE_OFFSET(x)\ + (IWL50_SCD_CONTEXT_DATA_OFFSET + ((x) * 8)) + +#define IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \ + ((IWL50_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffc) + +#define IWL50_SCD_QUEUECHAIN_SEL_ALL(x) (((1<<(x)) - 1) &\ + (~(1< + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include +#include +#include +#include "iwl-eeprom.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-sta.h" +#include "iwl-io.h" +#include "iwl-calib.h" +#include "iwl-helpers.h" +/************************** RX-FUNCTIONS ****************************/ +/* + * Rx theory of operation + * + * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), + * each of which point to Receive Buffers to be filled by the NIC. These get + * used not only for Rx frames, but for any command response or notification + * from the NIC. The driver and NIC manage the Rx buffers by means + * of indexes into the circular buffer. + * + * Rx Queue Indexes + * The host/firmware share two index registers for managing the Rx buffers. + * + * The READ index maps to the first position that the firmware may be writing + * to -- the driver can read up to (but not including) this position and get + * good data. + * The READ index is managed by the firmware once the card is enabled. + * + * The WRITE index maps to the last position the driver has read from -- the + * position preceding WRITE is the last slot the firmware can place a packet. + * + * The queue is empty (no good data) if WRITE = READ - 1, and is full if + * WRITE = READ. + * + * During initialization, the host sets up the READ queue position to the first + * INDEX position, and WRITE to the last (READ - 1 wrapped) + * + * When the firmware places a packet in a buffer, it will advance the READ index + * and fire the RX interrupt. The driver can then query the READ index and + * process as many packets as possible, moving the WRITE index forward as it + * resets the Rx queue buffers with new memory. + * + * The management in the driver is as follows: + * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When + * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled + * to replenish the iwl->rxq->rx_free. + * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the + * iwl->rxq is replenished and the READ INDEX is updated (updating the + * 'processed' and 'read' driver indexes as well) + * + A received packet is processed and handed to the kernel network stack, + * detached from the iwl->rxq. The driver 'processed' index is updated. + * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free + * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ + * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there + * were enough free buffers and RX_STALLED is set it is cleared. + * + * + * Driver sequence: + * + * iwl_rx_queue_alloc() Allocates rx_free + * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls + * iwl_rx_queue_restock + * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx + * queue, updates firmware pointers, and updates + * the WRITE index. If insufficient rx_free buffers + * are available, schedules iwl_rx_replenish + * + * -- enable interrupts -- + * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the + * READ INDEX, detaching the SKB from the pool. + * Moves the packet buffer from queue to rx_used. + * Calls iwl_rx_queue_restock to refill any empty + * slots. + * ... + * + */ + +/** + * iwl_rx_queue_space - Return number of free slots available in queue. + */ +int iwl_rx_queue_space(const struct iwl_rx_queue *q) +{ + int s = q->read - q->write; + if (s <= 0) + s += RX_QUEUE_SIZE; + /* keep some buffer to not confuse full and empty queue */ + s -= 2; + if (s < 0) + s = 0; + return s; +} +EXPORT_SYMBOL(iwl_rx_queue_space); + +/** + * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue + */ +void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q) +{ + unsigned long flags; + u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg; + u32 reg; + + spin_lock_irqsave(&q->lock, flags); + + if (q->need_update == 0) + goto exit_unlock; + + /* If power-saving is in use, make sure device is awake */ + if (test_bit(STATUS_POWER_PMI, &priv->status)) { + reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); + + if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { + IWL_DEBUG_INFO(priv, "Rx queue requesting wakeup, GP1 = 0x%x\n", + reg); + iwl_set_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + goto exit_unlock; + } + + q->write_actual = (q->write & ~0x7); + iwl_write_direct32(priv, rx_wrt_ptr_reg, q->write_actual); + + /* Else device is assumed to be awake */ + } else { + /* Device expects a multiple of 8 */ + q->write_actual = (q->write & ~0x7); + iwl_write_direct32(priv, rx_wrt_ptr_reg, q->write_actual); + } + + q->need_update = 0; + + exit_unlock: + spin_unlock_irqrestore(&q->lock, flags); +} +EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr); +/** + * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr + */ +static inline __le32 iwl_dma_addr2rbd_ptr(struct iwl_priv *priv, + dma_addr_t dma_addr) +{ + return cpu_to_le32((u32)(dma_addr >> 8)); +} + +/** + * iwl_rx_queue_restock - refill RX queue from pre-allocated pool + * + * If there are slots in the RX queue that need to be restocked, + * and we have free pre-allocated buffers, fill the ranks as much + * as we can, pulling from rx_free. + * + * This moves the 'write' index forward to catch up with 'processed', and + * also updates the memory address in the firmware to reference the new + * target buffer. + */ +void iwl_rx_queue_restock(struct iwl_priv *priv) +{ + struct iwl_rx_queue *rxq = &priv->rxq; + struct list_head *element; + struct iwl_rx_mem_buffer *rxb; + unsigned long flags; + int write; + + spin_lock_irqsave(&rxq->lock, flags); + write = rxq->write & ~0x7; + while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) { + /* Get next free Rx buffer, remove from free list */ + element = rxq->rx_free.next; + rxb = list_entry(element, struct iwl_rx_mem_buffer, list); + list_del(element); + + /* Point to Rx buffer via next RBD in circular buffer */ + rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->page_dma); + rxq->queue[rxq->write] = rxb; + rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; + rxq->free_count--; + } + spin_unlock_irqrestore(&rxq->lock, flags); + /* If the pre-allocated buffer pool is dropping low, schedule to + * refill it */ + if (rxq->free_count <= RX_LOW_WATERMARK) + queue_work(priv->workqueue, &priv->rx_replenish); + + + /* If we've added more space for the firmware to place data, tell it. + * Increment device's write pointer in multiples of 8. */ + if (rxq->write_actual != (rxq->write & ~0x7)) { + spin_lock_irqsave(&rxq->lock, flags); + rxq->need_update = 1; + spin_unlock_irqrestore(&rxq->lock, flags); + iwl_rx_queue_update_write_ptr(priv, rxq); + } +} +EXPORT_SYMBOL(iwl_rx_queue_restock); + + +/** + * iwl_rx_replenish - Move all used packet from rx_used to rx_free + * + * When moving to rx_free an SKB is allocated for the slot. + * + * Also restock the Rx queue via iwl_rx_queue_restock. + * This is called as a scheduled work item (except for during initialization) + */ +void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority) +{ + struct iwl_rx_queue *rxq = &priv->rxq; + struct list_head *element; + struct iwl_rx_mem_buffer *rxb; + struct page *page; + unsigned long flags; + gfp_t gfp_mask = priority; + + while (1) { + spin_lock_irqsave(&rxq->lock, flags); + if (list_empty(&rxq->rx_used)) { + spin_unlock_irqrestore(&rxq->lock, flags); + return; + } + spin_unlock_irqrestore(&rxq->lock, flags); + + if (rxq->free_count > RX_LOW_WATERMARK) + gfp_mask |= __GFP_NOWARN; + + if (priv->hw_params.rx_page_order > 0) + gfp_mask |= __GFP_COMP; + + /* Alloc a new receive buffer */ + page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order); + if (!page) { + if (net_ratelimit()) + IWL_DEBUG_INFO(priv, "alloc_pages failed, " + "order: %d\n", + priv->hw_params.rx_page_order); + + if ((rxq->free_count <= RX_LOW_WATERMARK) && + net_ratelimit()) + IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n", + priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL", + rxq->free_count); + /* We don't reschedule replenish work here -- we will + * call the restock method and if it still needs + * more buffers it will schedule replenish */ + return; + } + + spin_lock_irqsave(&rxq->lock, flags); + + if (list_empty(&rxq->rx_used)) { + spin_unlock_irqrestore(&rxq->lock, flags); + __free_pages(page, priv->hw_params.rx_page_order); + return; + } + element = rxq->rx_used.next; + rxb = list_entry(element, struct iwl_rx_mem_buffer, list); + list_del(element); + + spin_unlock_irqrestore(&rxq->lock, flags); + + rxb->page = page; + /* Get physical address of the RB */ + rxb->page_dma = pci_map_page(priv->pci_dev, page, 0, + PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + /* dma address must be no more than 36 bits */ + BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); + /* and also 256 byte aligned! */ + BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); + + spin_lock_irqsave(&rxq->lock, flags); + + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + priv->alloc_rxb_page++; + + spin_unlock_irqrestore(&rxq->lock, flags); + } +} + +void iwl_rx_replenish(struct iwl_priv *priv) +{ + unsigned long flags; + + iwl_rx_allocate(priv, GFP_KERNEL); + + spin_lock_irqsave(&priv->lock, flags); + iwl_rx_queue_restock(priv); + spin_unlock_irqrestore(&priv->lock, flags); +} +EXPORT_SYMBOL(iwl_rx_replenish); + +void iwl_rx_replenish_now(struct iwl_priv *priv) +{ + iwl_rx_allocate(priv, GFP_ATOMIC); + + iwl_rx_queue_restock(priv); +} +EXPORT_SYMBOL(iwl_rx_replenish_now); + + +/* Assumes that the skb field of the buffers in 'pool' is kept accurate. + * If an SKB has been detached, the POOL needs to have its SKB set to NULL + * This free routine walks the list of POOL entries and if SKB is set to + * non NULL it is unmapped and freed + */ +void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq) +{ + int i; + for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { + if (rxq->pool[i].page != NULL) { + pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, + PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + __iwl_free_pages(priv, rxq->pool[i].page); + rxq->pool[i].page = NULL; + } + } + + dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, + rxq->dma_addr); + dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status), + rxq->rb_stts, rxq->rb_stts_dma); + rxq->bd = NULL; + rxq->rb_stts = NULL; +} +EXPORT_SYMBOL(iwl_rx_queue_free); + +int iwl_rx_queue_alloc(struct iwl_priv *priv) +{ + struct iwl_rx_queue *rxq = &priv->rxq; + struct device *dev = &priv->pci_dev->dev; + int i; + + spin_lock_init(&rxq->lock); + INIT_LIST_HEAD(&rxq->rx_free); + INIT_LIST_HEAD(&rxq->rx_used); + + /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */ + rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr, + GFP_KERNEL); + if (!rxq->bd) + goto err_bd; + + rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status), + &rxq->rb_stts_dma, GFP_KERNEL); + if (!rxq->rb_stts) + goto err_rb; + + /* Fill the rx_used queue with _all_ of the Rx buffers */ + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) + list_add_tail(&rxq->pool[i].list, &rxq->rx_used); + + /* Set us so that we have processed and used all buffers, but have + * not restocked the Rx queue with fresh buffers */ + rxq->read = rxq->write = 0; + rxq->write_actual = 0; + rxq->free_count = 0; + rxq->need_update = 0; + return 0; + +err_rb: + dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, + rxq->dma_addr); +err_bd: + return -ENOMEM; +} +EXPORT_SYMBOL(iwl_rx_queue_alloc); + +void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) +{ + unsigned long flags; + int i; + spin_lock_irqsave(&rxq->lock, flags); + INIT_LIST_HEAD(&rxq->rx_free); + INIT_LIST_HEAD(&rxq->rx_used); + /* Fill the rx_used queue with _all_ of the Rx buffers */ + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { + /* In the reset function, these buffers may have been allocated + * to an SKB, so we need to unmap and free potential storage */ + if (rxq->pool[i].page != NULL) { + pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, + PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + __iwl_free_pages(priv, rxq->pool[i].page); + rxq->pool[i].page = NULL; + } + list_add_tail(&rxq->pool[i].list, &rxq->rx_used); + } + + /* Set us so that we have processed and used all buffers, but have + * not restocked the Rx queue with fresh buffers */ + rxq->read = rxq->write = 0; + rxq->write_actual = 0; + rxq->free_count = 0; + spin_unlock_irqrestore(&rxq->lock, flags); +} + +int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) +{ + u32 rb_size; + const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ + u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */ + + if (!priv->cfg->use_isr_legacy) + rb_timeout = RX_RB_TIMEOUT; + + if (priv->cfg->mod_params->amsdu_size_8K) + rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; + else + rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; + + /* Stop Rx DMA */ + iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); + + /* Reset driver's Rx queue write index */ + iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); + + /* Tell device where to find RBD circular buffer in DRAM */ + iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG, + (u32)(rxq->dma_addr >> 8)); + + /* Tell device where in DRAM to update its Rx status */ + iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG, + rxq->rb_stts_dma >> 4); + + /* Enable Rx DMA + * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in + * the credit mechanism in 5000 HW RX FIFO + * Direct rx interrupts to hosts + * Rx buffer size 4 or 8k + * RB timeout 0x10 + * 256 RBDs + */ + iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, + FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | + FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | + FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | + FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK | + rb_size| + (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| + (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); + + /* Set interrupt coalescing timer to default (2048 usecs) */ + iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); + + return 0; +} + +int iwl_rxq_stop(struct iwl_priv *priv) +{ + + /* stop Rx DMA */ + iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); + iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG, + FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); + + return 0; +} +EXPORT_SYMBOL(iwl_rxq_stop); + +void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) + +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_missed_beacon_notif *missed_beacon; + + missed_beacon = &pkt->u.missed_beacon; + if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) > + priv->missed_beacon_threshold) { + IWL_DEBUG_CALIB(priv, "missed bcn cnsq %d totl %d rcd %d expctd %d\n", + le32_to_cpu(missed_beacon->consecutive_missed_beacons), + le32_to_cpu(missed_beacon->total_missed_becons), + le32_to_cpu(missed_beacon->num_recvd_beacons), + le32_to_cpu(missed_beacon->num_expected_beacons)); + if (!test_bit(STATUS_SCANNING, &priv->status)) + iwl_init_sensitivity(priv); + } +} +EXPORT_SYMBOL(iwl_rx_missed_beacon_notif); + +void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif); + + if (!report->state) { + IWL_DEBUG_11H(priv, + "Spectrum Measure Notification: Start\n"); + return; + } + + memcpy(&priv->measure_report, report, sizeof(*report)); + priv->measurement_status |= MEASUREMENT_READY; +} +EXPORT_SYMBOL(iwl_rx_spectrum_measure_notif); + + + +/* Calculate noise level, based on measurements during network silence just + * before arriving beacon. This measurement can be done only if we know + * exactly when to expect beacons, therefore only when we're associated. */ +static void iwl_rx_calc_noise(struct iwl_priv *priv) +{ + struct statistics_rx_non_phy *rx_info + = &(priv->statistics.rx.general); + int num_active_rx = 0; + int total_silence = 0; + int bcn_silence_a = + le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER; + int bcn_silence_b = + le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER; + int bcn_silence_c = + le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER; + + if (bcn_silence_a) { + total_silence += bcn_silence_a; + num_active_rx++; + } + if (bcn_silence_b) { + total_silence += bcn_silence_b; + num_active_rx++; + } + if (bcn_silence_c) { + total_silence += bcn_silence_c; + num_active_rx++; + } + + /* Average among active antennas */ + if (num_active_rx) + priv->last_rx_noise = (total_silence / num_active_rx) - 107; + else + priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE; + + IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n", + bcn_silence_a, bcn_silence_b, bcn_silence_c, + priv->last_rx_noise); +} + +#ifdef CONFIG_IWLWIFI_DEBUG +/* + * based on the assumption of all statistics counter are in DWORD + * FIXME: This function is for debugging, do not deal with + * the case of counters roll-over. + */ +static void iwl_accumulative_statistics(struct iwl_priv *priv, + __le32 *stats) +{ + int i; + __le32 *prev_stats; + u32 *accum_stats; + u32 *delta, *max_delta; + + prev_stats = (__le32 *)&priv->statistics; + accum_stats = (u32 *)&priv->accum_statistics; + delta = (u32 *)&priv->delta_statistics; + max_delta = (u32 *)&priv->max_delta; + + for (i = sizeof(__le32); i < sizeof(struct iwl_notif_statistics); + i += sizeof(__le32), stats++, prev_stats++, delta++, + max_delta++, accum_stats++) { + if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) { + *delta = (le32_to_cpu(*stats) - + le32_to_cpu(*prev_stats)); + *accum_stats += *delta; + if (*delta > *max_delta) + *max_delta = *delta; + } + } + + /* reset accumulative statistics for "no-counter" type statistics */ + priv->accum_statistics.general.temperature = + priv->statistics.general.temperature; + priv->accum_statistics.general.temperature_m = + priv->statistics.general.temperature_m; + priv->accum_statistics.general.ttl_timestamp = + priv->statistics.general.ttl_timestamp; + priv->accum_statistics.tx.tx_power.ant_a = + priv->statistics.tx.tx_power.ant_a; + priv->accum_statistics.tx.tx_power.ant_b = + priv->statistics.tx.tx_power.ant_b; + priv->accum_statistics.tx.tx_power.ant_c = + priv->statistics.tx.tx_power.ant_c; +} +#endif + +#define REG_RECALIB_PERIOD (60) + +#define PLCP_MSG "plcp_err exceeded %u, %u, %u, %u, %u, %d, %u mSecs\n" +void iwl_rx_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + int change; + struct iwl_rx_packet *pkt = rxb_addr(rxb); + int combined_plcp_delta; + unsigned int plcp_msec; + unsigned long plcp_received_jiffies; + + IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n", + (int)sizeof(priv->statistics), + le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); + + change = ((priv->statistics.general.temperature != + pkt->u.stats.general.temperature) || + ((priv->statistics.flag & + STATISTICS_REPLY_FLG_HT40_MODE_MSK) != + (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK))); + +#ifdef CONFIG_IWLWIFI_DEBUG + iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats); +#endif + /* + * check for plcp_err and trigger radio reset if it exceeds + * the plcp error threshold plcp_delta. + */ + plcp_received_jiffies = jiffies; + plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies - + (long) priv->plcp_jiffies); + priv->plcp_jiffies = plcp_received_jiffies; + /* + * check to make sure plcp_msec is not 0 to prevent division + * by zero. + */ + if (plcp_msec) { + combined_plcp_delta = + (le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err) - + le32_to_cpu(priv->statistics.rx.ofdm.plcp_err)) + + (le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err) - + le32_to_cpu(priv->statistics.rx.ofdm_ht.plcp_err)); + + if ((combined_plcp_delta > 0) && + ((combined_plcp_delta * 100) / plcp_msec) > + priv->cfg->plcp_delta_threshold) { + /* + * if plcp_err exceed the threshold, the following + * data is printed in csv format: + * Text: plcp_err exceeded %d, + * Received ofdm.plcp_err, + * Current ofdm.plcp_err, + * Received ofdm_ht.plcp_err, + * Current ofdm_ht.plcp_err, + * combined_plcp_delta, + * plcp_msec + */ + IWL_DEBUG_RADIO(priv, PLCP_MSG, + priv->cfg->plcp_delta_threshold, + le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err), + le32_to_cpu(priv->statistics.rx.ofdm.plcp_err), + le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err), + le32_to_cpu( + priv->statistics.rx.ofdm_ht.plcp_err), + combined_plcp_delta, plcp_msec); + + /* + * Reset the RF radio due to the high plcp + * error rate + */ + iwl_force_reset(priv, IWL_RF_RESET); + } + } + + memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics)); + + set_bit(STATUS_STATISTICS, &priv->status); + + /* Reschedule the statistics timer to occur in + * REG_RECALIB_PERIOD seconds to ensure we get a + * thermal update even if the uCode doesn't give + * us one */ + mod_timer(&priv->statistics_periodic, jiffies + + msecs_to_jiffies(REG_RECALIB_PERIOD * 1000)); + + if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) && + (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) { + iwl_rx_calc_noise(priv); + queue_work(priv->workqueue, &priv->run_time_calib_work); + } + if (priv->cfg->ops->lib->temp_ops.temperature && change) + priv->cfg->ops->lib->temp_ops.temperature(priv); +} +EXPORT_SYMBOL(iwl_rx_statistics); + +void iwl_reply_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + + if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) { +#ifdef CONFIG_IWLWIFI_DEBUG + memset(&priv->accum_statistics, 0, + sizeof(struct iwl_notif_statistics)); + memset(&priv->delta_statistics, 0, + sizeof(struct iwl_notif_statistics)); + memset(&priv->max_delta, 0, + sizeof(struct iwl_notif_statistics)); +#endif + IWL_DEBUG_RX(priv, "Statistics have been cleared\n"); + } + iwl_rx_statistics(priv, rxb); +} +EXPORT_SYMBOL(iwl_reply_statistics); + +/* Calc max signal level (dBm) among 3 possible receivers */ +static inline int iwl_calc_rssi(struct iwl_priv *priv, + struct iwl_rx_phy_res *rx_resp) +{ + return priv->cfg->ops->utils->calc_rssi(priv, rx_resp); +} + +#ifdef CONFIG_IWLWIFI_DEBUG +/** + * iwl_dbg_report_frame - dump frame to syslog during debug sessions + * + * You may hack this function to show different aspects of received frames, + * including selective frame dumps. + * group100 parameter selects whether to show 1 out of 100 good data frames. + * All beacon and probe response frames are printed. + */ +static void iwl_dbg_report_frame(struct iwl_priv *priv, + struct iwl_rx_phy_res *phy_res, u16 length, + struct ieee80211_hdr *header, int group100) +{ + u32 to_us; + u32 print_summary = 0; + u32 print_dump = 0; /* set to 1 to dump all frames' contents */ + u32 hundred = 0; + u32 dataframe = 0; + __le16 fc; + u16 seq_ctl; + u16 channel; + u16 phy_flags; + u32 rate_n_flags; + u32 tsf_low; + int rssi; + + if (likely(!(iwl_get_debug_level(priv) & IWL_DL_RX))) + return; + + /* MAC header */ + fc = header->frame_control; + seq_ctl = le16_to_cpu(header->seq_ctrl); + + /* metadata */ + channel = le16_to_cpu(phy_res->channel); + phy_flags = le16_to_cpu(phy_res->phy_flags); + rate_n_flags = le32_to_cpu(phy_res->rate_n_flags); + + /* signal statistics */ + rssi = iwl_calc_rssi(priv, phy_res); + tsf_low = le64_to_cpu(phy_res->timestamp) & 0x0ffffffff; + + to_us = !compare_ether_addr(header->addr1, priv->mac_addr); + + /* if data frame is to us and all is good, + * (optionally) print summary for only 1 out of every 100 */ + if (to_us && (fc & ~cpu_to_le16(IEEE80211_FCTL_PROTECTED)) == + cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) { + dataframe = 1; + if (!group100) + print_summary = 1; /* print each frame */ + else if (priv->framecnt_to_us < 100) { + priv->framecnt_to_us++; + print_summary = 0; + } else { + priv->framecnt_to_us = 0; + print_summary = 1; + hundred = 1; + } + } else { + /* print summary for all other frames */ + print_summary = 1; + } + + if (print_summary) { + char *title; + int rate_idx; + u32 bitrate; + + if (hundred) + title = "100Frames"; + else if (ieee80211_has_retry(fc)) + title = "Retry"; + else if (ieee80211_is_assoc_resp(fc)) + title = "AscRsp"; + else if (ieee80211_is_reassoc_resp(fc)) + title = "RasRsp"; + else if (ieee80211_is_probe_resp(fc)) { + title = "PrbRsp"; + print_dump = 1; /* dump frame contents */ + } else if (ieee80211_is_beacon(fc)) { + title = "Beacon"; + print_dump = 1; /* dump frame contents */ + } else if (ieee80211_is_atim(fc)) + title = "ATIM"; + else if (ieee80211_is_auth(fc)) + title = "Auth"; + else if (ieee80211_is_deauth(fc)) + title = "DeAuth"; + else if (ieee80211_is_disassoc(fc)) + title = "DisAssoc"; + else + title = "Frame"; + + rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags); + if (unlikely((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT))) { + bitrate = 0; + WARN_ON_ONCE(1); + } else { + bitrate = iwl_rates[rate_idx].ieee / 2; + } + + /* print frame summary. + * MAC addresses show just the last byte (for brevity), + * but you can hack it to show more, if you'd like to. */ + if (dataframe) + IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, " + "len=%u, rssi=%d, chnl=%d, rate=%u, \n", + title, le16_to_cpu(fc), header->addr1[5], + length, rssi, channel, bitrate); + else { + /* src/dst addresses assume managed mode */ + IWL_DEBUG_RX(priv, "%s: 0x%04x, dst=0x%02x, src=0x%02x, " + "len=%u, rssi=%d, tim=%lu usec, " + "phy=0x%02x, chnl=%d\n", + title, le16_to_cpu(fc), header->addr1[5], + header->addr3[5], length, rssi, + tsf_low - priv->scan_start_tsf, + phy_flags, channel); + } + } + if (print_dump) + iwl_print_hex_dump(priv, IWL_DL_RX, header, length); +} +#endif + +/* + * returns non-zero if packet should be dropped + */ +int iwl_set_decrypted_flag(struct iwl_priv *priv, + struct ieee80211_hdr *hdr, + u32 decrypt_res, + struct ieee80211_rx_status *stats) +{ + u16 fc = le16_to_cpu(hdr->frame_control); + + if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK) + return 0; + + if (!(fc & IEEE80211_FCTL_PROTECTED)) + return 0; + + IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res); + switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) { + case RX_RES_STATUS_SEC_TYPE_TKIP: + /* The uCode has got a bad phase 1 Key, pushes the packet. + * Decryption will be done in SW. */ + if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == + RX_RES_STATUS_BAD_KEY_TTAK) + break; + + case RX_RES_STATUS_SEC_TYPE_WEP: + if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == + RX_RES_STATUS_BAD_ICV_MIC) { + /* bad ICV, the packet is destroyed since the + * decryption is inplace, drop it */ + IWL_DEBUG_RX(priv, "Packet destroyed\n"); + return -1; + } + case RX_RES_STATUS_SEC_TYPE_CCMP: + if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == + RX_RES_STATUS_DECRYPT_OK) { + IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n"); + stats->flag |= RX_FLAG_DECRYPTED; + } + break; + + default: + break; + } + return 0; +} +EXPORT_SYMBOL(iwl_set_decrypted_flag); + +static u32 iwl_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in) +{ + u32 decrypt_out = 0; + + if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) == + RX_RES_STATUS_STATION_FOUND) + decrypt_out |= (RX_RES_STATUS_STATION_FOUND | + RX_RES_STATUS_NO_STATION_INFO_MISMATCH); + + decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK); + + /* packet was not encrypted */ + if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == + RX_RES_STATUS_SEC_TYPE_NONE) + return decrypt_out; + + /* packet was encrypted with unknown alg */ + if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == + RX_RES_STATUS_SEC_TYPE_ERR) + return decrypt_out; + + /* decryption was not done in HW */ + if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) != + RX_MPDU_RES_STATUS_DEC_DONE_MSK) + return decrypt_out; + + switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) { + + case RX_RES_STATUS_SEC_TYPE_CCMP: + /* alg is CCM: check MIC only */ + if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK)) + /* Bad MIC */ + decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; + else + decrypt_out |= RX_RES_STATUS_DECRYPT_OK; + + break; + + case RX_RES_STATUS_SEC_TYPE_TKIP: + if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) { + /* Bad TTAK */ + decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK; + break; + } + /* fall through if TTAK OK */ + default: + if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK)) + decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; + else + decrypt_out |= RX_RES_STATUS_DECRYPT_OK; + break; + }; + + IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n", + decrypt_in, decrypt_out); + + return decrypt_out; +} + +static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv, + struct ieee80211_hdr *hdr, + u16 len, + u32 ampdu_status, + struct iwl_rx_mem_buffer *rxb, + struct ieee80211_rx_status *stats) +{ + struct sk_buff *skb; + int ret = 0; + __le16 fc = hdr->frame_control; + + /* We only process data packets if the interface is open */ + if (unlikely(!priv->is_open)) { + IWL_DEBUG_DROP_LIMIT(priv, + "Dropping packet while interface is not open.\n"); + return; + } + + /* In case of HW accelerated crypto and bad decryption, drop */ + if (!priv->cfg->mod_params->sw_crypto && + iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats)) + return; + + skb = alloc_skb(IWL_LINK_HDR_MAX * 2, GFP_ATOMIC); + if (!skb) { + IWL_ERR(priv, "alloc_skb failed\n"); + return; + } + + skb_reserve(skb, IWL_LINK_HDR_MAX); + skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len); + + /* mac80211 currently doesn't support paged SKB. Convert it to + * linear SKB for management frame and data frame requires + * software decryption or software defragementation. */ + if (ieee80211_is_mgmt(fc) || + ieee80211_has_protected(fc) || + ieee80211_has_morefrags(fc) || + le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG || + (ieee80211_is_data_qos(fc) && + *ieee80211_get_qos_ctl(hdr) & + IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)) + ret = skb_linearize(skb); + else + ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ? + 0 : -ENOMEM; + + if (ret) { + kfree_skb(skb); + goto out; + } + + /* + * XXX: We cannot touch the page and its virtual memory (hdr) after + * here. It might have already been freed by the above skb change. + */ + + iwl_update_stats(priv, false, fc, len); + memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); + + ieee80211_rx(priv->hw, skb); + out: + priv->alloc_rxb_page--; + rxb->page = NULL; +} + +/* This is necessary only for a number of statistics, see the caller. */ +static int iwl_is_network_packet(struct iwl_priv *priv, + struct ieee80211_hdr *header) +{ + /* Filter incoming packets to determine if they are targeted toward + * this network, discarding packets coming from ourselves */ + switch (priv->iw_mode) { + case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */ + /* packets to our IBSS update information */ + return !compare_ether_addr(header->addr3, priv->bssid); + case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */ + /* packets to our IBSS update information */ + return !compare_ether_addr(header->addr2, priv->bssid); + default: + return 1; + } +} + +/* Called for REPLY_RX (legacy ABG frames), or + * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */ +void iwl_rx_reply_rx(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct ieee80211_hdr *header; + struct ieee80211_rx_status rx_status; + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_rx_phy_res *phy_res; + __le32 rx_pkt_status; + struct iwl4965_rx_mpdu_res_start *amsdu; + u32 len; + u32 ampdu_status; + u32 rate_n_flags; + + /** + * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently. + * REPLY_RX: physical layer info is in this buffer + * REPLY_RX_MPDU_CMD: physical layer info was sent in separate + * command and cached in priv->last_phy_res + * + * Here we set up local variables depending on which command is + * received. + */ + if (pkt->hdr.cmd == REPLY_RX) { + phy_res = (struct iwl_rx_phy_res *)pkt->u.raw; + header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) + + phy_res->cfg_phy_cnt); + + len = le16_to_cpu(phy_res->byte_count); + rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) + + phy_res->cfg_phy_cnt + len); + ampdu_status = le32_to_cpu(rx_pkt_status); + } else { + if (!priv->last_phy_res[0]) { + IWL_ERR(priv, "MPDU frame without cached PHY data\n"); + return; + } + phy_res = (struct iwl_rx_phy_res *)&priv->last_phy_res[1]; + amsdu = (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw; + header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu)); + len = le16_to_cpu(amsdu->byte_count); + rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len); + ampdu_status = iwl_translate_rx_status(priv, + le32_to_cpu(rx_pkt_status)); + } + + if ((unlikely(phy_res->cfg_phy_cnt > 20))) { + IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n", + phy_res->cfg_phy_cnt); + return; + } + + if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) || + !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) { + IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", + le32_to_cpu(rx_pkt_status)); + return; + } + + /* This will be used in several places later */ + rate_n_flags = le32_to_cpu(phy_res->rate_n_flags); + + /* rx_status carries information about the packet to mac80211 */ + rx_status.mactime = le64_to_cpu(phy_res->timestamp); + rx_status.freq = + ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel)); + rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? + IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + rx_status.rate_idx = + iwl_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band); + rx_status.flag = 0; + + /* TSF isn't reliable. In order to allow smooth user experience, + * this W/A doesn't propagate it to the mac80211 */ + /*rx_status.flag |= RX_FLAG_TSFT;*/ + + priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp); + + /* Find max signal strength (dBm) among 3 antenna/receiver chains */ + rx_status.signal = iwl_calc_rssi(priv, phy_res); + + /* Meaningful noise values are available only from beacon statistics, + * which are gathered only when associated, and indicate noise + * only for the associated network channel ... + * Ignore these noise values while scanning (other channels) */ + if (iwl_is_associated(priv) && + !test_bit(STATUS_SCANNING, &priv->status)) { + rx_status.noise = priv->last_rx_noise; + } else { + rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE; + } + + /* Reset beacon noise level if not associated. */ + if (!iwl_is_associated(priv)) + priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE; + +#ifdef CONFIG_IWLWIFI_DEBUG + /* Set "1" to report good data frames in groups of 100 */ + if (unlikely(iwl_get_debug_level(priv) & IWL_DL_RX)) + iwl_dbg_report_frame(priv, phy_res, len, header, 1); +#endif + iwl_dbg_log_rx_data_frame(priv, len, header); + IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, noise %d, TSF %llu\n", + rx_status.signal, rx_status.noise, + (unsigned long long)rx_status.mactime); + + /* + * "antenna number" + * + * It seems that the antenna field in the phy flags value + * is actually a bit field. This is undefined by radiotap, + * it wants an actual antenna number but I always get "7" + * for most legacy frames I receive indicating that the + * same frame was received on all three RX chains. + * + * I think this field should be removed in favor of a + * new 802.11n radiotap field "RX chains" that is defined + * as a bitmask. + */ + rx_status.antenna = + (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) + >> RX_RES_PHY_FLAGS_ANTENNA_POS; + + /* set the preamble flag if appropriate */ + if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) + rx_status.flag |= RX_FLAG_SHORTPRE; + + /* Set up the HT phy flags */ + if (rate_n_flags & RATE_MCS_HT_MSK) + rx_status.flag |= RX_FLAG_HT; + if (rate_n_flags & RATE_MCS_HT40_MSK) + rx_status.flag |= RX_FLAG_40MHZ; + if (rate_n_flags & RATE_MCS_SGI_MSK) + rx_status.flag |= RX_FLAG_SHORT_GI; + + if (iwl_is_network_packet(priv, header)) { + priv->last_rx_rssi = rx_status.signal; + priv->last_beacon_time = priv->ucode_beacon_time; + priv->last_tsf = le64_to_cpu(phy_res->timestamp); + } + + iwl_pass_packet_to_mac80211(priv, header, len, ampdu_status, + rxb, &rx_status); +} +EXPORT_SYMBOL(iwl_rx_reply_rx); + +/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD). + * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */ +void iwl_rx_reply_rx_phy(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + priv->last_phy_res[0] = 1; + memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]), + sizeof(struct iwl_rx_phy_res)); +} +EXPORT_SYMBOL(iwl_rx_reply_rx_phy); diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c new file mode 100644 index 0000000..dd9ff2e --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-scan.c @@ -0,0 +1,987 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ +#include +#include +#include + +#include "iwl-eeprom.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-sta.h" +#include "iwl-io.h" +#include "iwl-helpers.h" + +/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after + * sending probe req. This should be set long enough to hear probe responses + * from more than one AP. */ +#define IWL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */ +#define IWL_ACTIVE_DWELL_TIME_52 (20) + +#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3) +#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2) + +/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel. + * Must be set longer than active dwell time. + * For the most reliable scan, set > AP beacon interval (typically 100msec). */ +#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */ +#define IWL_PASSIVE_DWELL_TIME_52 (10) +#define IWL_PASSIVE_DWELL_BASE (100) +#define IWL_CHANNEL_TUNE_TIME 5 + + + +/** + * iwl_scan_cancel - Cancel any currently executing HW scan + * + * NOTE: priv->mutex is not required before calling this function + */ +int iwl_scan_cancel(struct iwl_priv *priv) +{ + if (!test_bit(STATUS_SCAN_HW, &priv->status)) { + clear_bit(STATUS_SCANNING, &priv->status); + return 0; + } + + if (test_bit(STATUS_SCANNING, &priv->status)) { + if (!test_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG_SCAN(priv, "Queuing scan abort.\n"); + set_bit(STATUS_SCAN_ABORTING, &priv->status); + queue_work(priv->workqueue, &priv->abort_scan); + + } else + IWL_DEBUG_SCAN(priv, "Scan abort already in progress.\n"); + + return test_bit(STATUS_SCANNING, &priv->status); + } + + return 0; +} +EXPORT_SYMBOL(iwl_scan_cancel); +/** + * iwl_scan_cancel_timeout - Cancel any currently executing HW scan + * @ms: amount of time to wait (in milliseconds) for scan to abort + * + * NOTE: priv->mutex must be held before calling this function + */ +int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms) +{ + unsigned long now = jiffies; + int ret; + + ret = iwl_scan_cancel(priv); + if (ret && ms) { + mutex_unlock(&priv->mutex); + while (!time_after(jiffies, now + msecs_to_jiffies(ms)) && + test_bit(STATUS_SCANNING, &priv->status)) + msleep(1); + mutex_lock(&priv->mutex); + + return test_bit(STATUS_SCANNING, &priv->status); + } + + return ret; +} +EXPORT_SYMBOL(iwl_scan_cancel_timeout); + +static int iwl_send_scan_abort(struct iwl_priv *priv) +{ + int ret = 0; + struct iwl_rx_packet *pkt; + struct iwl_host_cmd cmd = { + .id = REPLY_SCAN_ABORT_CMD, + .flags = CMD_WANT_SKB, + }; + + /* If there isn't a scan actively going on in the hardware + * then we are in between scan bands and not actually + * actively scanning, so don't send the abort command */ + if (!test_bit(STATUS_SCAN_HW, &priv->status)) { + clear_bit(STATUS_SCAN_ABORTING, &priv->status); + return 0; + } + + ret = iwl_send_cmd_sync(priv, &cmd); + if (ret) { + clear_bit(STATUS_SCAN_ABORTING, &priv->status); + return ret; + } + + pkt = (struct iwl_rx_packet *)cmd.reply_page; + if (pkt->u.status != CAN_ABORT_STATUS) { + /* The scan abort will return 1 for success or + * 2 for "failure". A failure condition can be + * due to simply not being in an active scan which + * can occur if we send the scan abort before we + * the microcode has notified us that a scan is + * completed. */ + IWL_DEBUG_INFO(priv, "SCAN_ABORT returned %d.\n", pkt->u.status); + clear_bit(STATUS_SCAN_ABORTING, &priv->status); + clear_bit(STATUS_SCAN_HW, &priv->status); + } + + iwl_free_pages(priv, cmd.reply_page); + + return ret; +} + +/* Service response to REPLY_SCAN_CMD (0x80) */ +static void iwl_rx_reply_scan(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_scanreq_notification *notif = + (struct iwl_scanreq_notification *)pkt->u.raw; + + IWL_DEBUG_RX(priv, "Scan request status = 0x%x\n", notif->status); +#endif +} + +/* Service SCAN_START_NOTIFICATION (0x82) */ +static void iwl_rx_scan_start_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_scanstart_notification *notif = + (struct iwl_scanstart_notification *)pkt->u.raw; + priv->scan_start_tsf = le32_to_cpu(notif->tsf_low); + IWL_DEBUG_SCAN(priv, "Scan start: " + "%d [802.11%s] " + "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", + notif->channel, + notif->band ? "bg" : "a", + le32_to_cpu(notif->tsf_high), + le32_to_cpu(notif->tsf_low), + notif->status, notif->beacon_timer); +} + +/* Service SCAN_RESULTS_NOTIFICATION (0x83) */ +static void iwl_rx_scan_results_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_scanresults_notification *notif = + (struct iwl_scanresults_notification *)pkt->u.raw; + + IWL_DEBUG_SCAN(priv, "Scan ch.res: " + "%d [802.11%s] " + "(TSF: 0x%08X:%08X) - %d " + "elapsed=%lu usec\n", + notif->channel, + notif->band ? "bg" : "a", + le32_to_cpu(notif->tsf_high), + le32_to_cpu(notif->tsf_low), + le32_to_cpu(notif->statistics[0]), + le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf); +#endif + + if (!priv->is_internal_short_scan) + priv->next_scan_jiffies = 0; +} + +/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */ +static void iwl_rx_scan_complete_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw; + + IWL_DEBUG_SCAN(priv, "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n", + scan_notif->scanned_channels, + scan_notif->tsf_low, + scan_notif->tsf_high, scan_notif->status); +#endif + + /* The HW is no longer scanning */ + clear_bit(STATUS_SCAN_HW, &priv->status); + + IWL_DEBUG_INFO(priv, "Scan pass on %sGHz took %dms\n", + (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) ? + "2.4" : "5.2", + jiffies_to_msecs(elapsed_jiffies + (priv->scan_pass_start, jiffies))); + + /* Remove this scanned band from the list of pending + * bands to scan, band G precedes A in order of scanning + * as seen in iwl_bg_request_scan */ + if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) + priv->scan_bands &= ~BIT(IEEE80211_BAND_2GHZ); + else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) + priv->scan_bands &= ~BIT(IEEE80211_BAND_5GHZ); + + /* If a request to abort was given, or the scan did not succeed + * then we reset the scan state machine and terminate, + * re-queuing another scan if one has been requested */ + if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG_INFO(priv, "Aborted scan completed.\n"); + clear_bit(STATUS_SCAN_ABORTING, &priv->status); + } else { + /* If there are more bands on this scan pass reschedule */ + if (priv->scan_bands) + goto reschedule; + } + + if (!priv->is_internal_short_scan) + priv->next_scan_jiffies = 0; + + IWL_DEBUG_INFO(priv, "Setting scan to off\n"); + + clear_bit(STATUS_SCANNING, &priv->status); + + IWL_DEBUG_INFO(priv, "Scan took %dms\n", + jiffies_to_msecs(elapsed_jiffies(priv->scan_start, jiffies))); + + queue_work(priv->workqueue, &priv->scan_completed); + + return; + +reschedule: + priv->scan_pass_start = jiffies; + queue_work(priv->workqueue, &priv->request_scan); +} + +void iwl_setup_rx_scan_handlers(struct iwl_priv *priv) +{ + /* scan handlers */ + priv->rx_handlers[REPLY_SCAN_CMD] = iwl_rx_reply_scan; + priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl_rx_scan_start_notif; + priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] = + iwl_rx_scan_results_notif; + priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] = + iwl_rx_scan_complete_notif; +} +EXPORT_SYMBOL(iwl_setup_rx_scan_handlers); + +inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv, + enum ieee80211_band band, + u8 n_probes) +{ + if (band == IEEE80211_BAND_5GHZ) + return IWL_ACTIVE_DWELL_TIME_52 + + IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1); + else + return IWL_ACTIVE_DWELL_TIME_24 + + IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1); +} +EXPORT_SYMBOL(iwl_get_active_dwell_time); + +u16 iwl_get_passive_dwell_time(struct iwl_priv *priv, + enum ieee80211_band band) +{ + u16 passive = (band == IEEE80211_BAND_2GHZ) ? + IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 : + IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52; + + if (iwl_is_associated(priv)) { + /* If we're associated, we clamp the maximum passive + * dwell time to be 98% of the beacon interval (minus + * 2 * channel tune time) */ + passive = priv->beacon_int; + if ((passive > IWL_PASSIVE_DWELL_BASE) || !passive) + passive = IWL_PASSIVE_DWELL_BASE; + passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2; + } + + return passive; +} +EXPORT_SYMBOL(iwl_get_passive_dwell_time); + +static int iwl_get_single_channel_for_scan(struct iwl_priv *priv, + enum ieee80211_band band, + struct iwl_scan_channel *scan_ch) +{ + const struct ieee80211_supported_band *sband; + const struct iwl_channel_info *ch_info; + u16 passive_dwell = 0; + u16 active_dwell = 0; + int i, added = 0; + u16 channel = 0; + + sband = iwl_get_hw_mode(priv, band); + if (!sband) { + IWL_ERR(priv, "invalid band\n"); + return added; + } + + active_dwell = iwl_get_active_dwell_time(priv, band, 0); + passive_dwell = iwl_get_passive_dwell_time(priv, band); + + if (passive_dwell <= active_dwell) + passive_dwell = active_dwell + 1; + + /* only scan single channel, good enough to reset the RF */ + /* pick the first valid not in-use channel */ + if (band == IEEE80211_BAND_5GHZ) { + for (i = 14; i < priv->channel_count; i++) { + if (priv->channel_info[i].channel != + le16_to_cpu(priv->staging_rxon.channel)) { + channel = priv->channel_info[i].channel; + ch_info = iwl_get_channel_info(priv, + band, channel); + if (is_channel_valid(ch_info)) + break; + } + } + } else { + for (i = 0; i < 14; i++) { + if (priv->channel_info[i].channel != + le16_to_cpu(priv->staging_rxon.channel)) { + channel = + priv->channel_info[i].channel; + ch_info = iwl_get_channel_info(priv, + band, channel); + if (is_channel_valid(ch_info)) + break; + } + } + } + if (channel) { + scan_ch->channel = cpu_to_le16(channel); + scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE; + scan_ch->active_dwell = cpu_to_le16(active_dwell); + scan_ch->passive_dwell = cpu_to_le16(passive_dwell); + /* Set txpower levels to defaults */ + scan_ch->dsp_atten = 110; + if (band == IEEE80211_BAND_5GHZ) + scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3; + else + scan_ch->tx_gain = ((1 << 5) | (5 << 3)); + added++; + } else + IWL_ERR(priv, "no valid channel found\n"); + return added; +} + +static int iwl_get_channels_for_scan(struct iwl_priv *priv, + enum ieee80211_band band, + u8 is_active, u8 n_probes, + struct iwl_scan_channel *scan_ch) +{ + struct ieee80211_channel *chan; + const struct ieee80211_supported_band *sband; + const struct iwl_channel_info *ch_info; + u16 passive_dwell = 0; + u16 active_dwell = 0; + int added, i; + u16 channel; + + sband = iwl_get_hw_mode(priv, band); + if (!sband) + return 0; + + active_dwell = iwl_get_active_dwell_time(priv, band, n_probes); + passive_dwell = iwl_get_passive_dwell_time(priv, band); + + if (passive_dwell <= active_dwell) + passive_dwell = active_dwell + 1; + + for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) { + chan = priv->scan_request->channels[i]; + + if (chan->band != band) + continue; + + channel = ieee80211_frequency_to_channel(chan->center_freq); + scan_ch->channel = cpu_to_le16(channel); + + ch_info = iwl_get_channel_info(priv, band, channel); + if (!is_channel_valid(ch_info)) { + IWL_DEBUG_SCAN(priv, "Channel %d is INVALID for this band.\n", + channel); + continue; + } + + if (!is_active || is_channel_passive(ch_info) || + (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) + scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE; + else + scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE; + + if (n_probes) + scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes); + + scan_ch->active_dwell = cpu_to_le16(active_dwell); + scan_ch->passive_dwell = cpu_to_le16(passive_dwell); + + /* Set txpower levels to defaults */ + scan_ch->dsp_atten = 110; + + /* NOTE: if we were doing 6Mb OFDM for scans we'd use + * power level: + * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3; + */ + if (band == IEEE80211_BAND_5GHZ) + scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3; + else + scan_ch->tx_gain = ((1 << 5) | (5 << 3)); + + IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n", + channel, le32_to_cpu(scan_ch->type), + (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ? + "ACTIVE" : "PASSIVE", + (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ? + active_dwell : passive_dwell); + + scan_ch++; + added++; + } + + IWL_DEBUG_SCAN(priv, "total channels to scan %d \n", added); + return added; +} + +void iwl_init_scan_params(struct iwl_priv *priv) +{ + u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1; + if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ]) + priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx; + if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ]) + priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx; +} +EXPORT_SYMBOL(iwl_init_scan_params); + +static int iwl_scan_initiate(struct iwl_priv *priv) +{ + IWL_DEBUG_INFO(priv, "Starting scan...\n"); + set_bit(STATUS_SCANNING, &priv->status); + priv->is_internal_short_scan = false; + priv->scan_start = jiffies; + priv->scan_pass_start = priv->scan_start; + + queue_work(priv->workqueue, &priv->request_scan); + + return 0; +} + +#define IWL_DELAY_NEXT_SCAN (HZ*2) + +int iwl_mac_hw_scan(struct ieee80211_hw *hw, + struct cfg80211_scan_request *req) +{ + unsigned long flags; + struct iwl_priv *priv = hw->priv; + int ret, i; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + mutex_lock(&priv->mutex); + spin_lock_irqsave(&priv->lock, flags); + + if (!iwl_is_ready_rf(priv)) { + ret = -EIO; + IWL_DEBUG_MAC80211(priv, "leave - not ready or exit pending\n"); + goto out_unlock; + } + + if (test_bit(STATUS_SCANNING, &priv->status)) { + IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); + ret = -EAGAIN; + goto out_unlock; + } + + if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n"); + ret = -EAGAIN; + goto out_unlock; + } + + /* We don't schedule scan within next_scan_jiffies period. + * Avoid scanning during possible EAPOL exchange, return + * success immediately. + */ + if (priv->next_scan_jiffies && + time_after(priv->next_scan_jiffies, jiffies)) { + IWL_DEBUG_SCAN(priv, "scan rejected: within next scan period\n"); + queue_work(priv->workqueue, &priv->scan_completed); + ret = 0; + goto out_unlock; + } + + priv->scan_bands = 0; + for (i = 0; i < req->n_channels; i++) + priv->scan_bands |= BIT(req->channels[i]->band); + + priv->scan_request = req; + + ret = iwl_scan_initiate(priv); + + IWL_DEBUG_MAC80211(priv, "leave\n"); + +out_unlock: + spin_unlock_irqrestore(&priv->lock, flags); + mutex_unlock(&priv->mutex); + + return ret; +} +EXPORT_SYMBOL(iwl_mac_hw_scan); + +/* + * internal short scan, this function should only been called while associated. + * It will reset and tune the radio to prevent possible RF related problem + */ +int iwl_internal_short_hw_scan(struct iwl_priv *priv) +{ + int ret = 0; + + if (!iwl_is_ready_rf(priv)) { + ret = -EIO; + IWL_DEBUG_SCAN(priv, "not ready or exit pending\n"); + goto out; + } + if (test_bit(STATUS_SCANNING, &priv->status)) { + IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); + ret = -EAGAIN; + goto out; + } + if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n"); + ret = -EAGAIN; + goto out; + } + + priv->scan_bands = 0; + if (priv->band == IEEE80211_BAND_5GHZ) + priv->scan_bands |= BIT(IEEE80211_BAND_5GHZ); + else + priv->scan_bands |= BIT(IEEE80211_BAND_2GHZ); + + IWL_DEBUG_SCAN(priv, "Start internal short scan...\n"); + set_bit(STATUS_SCANNING, &priv->status); + priv->is_internal_short_scan = true; + queue_work(priv->workqueue, &priv->request_scan); + +out: + return ret; +} +EXPORT_SYMBOL(iwl_internal_short_hw_scan); + +#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ) + +void iwl_bg_scan_check(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, scan_check.work); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + if (test_bit(STATUS_SCANNING, &priv->status) || + test_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG_SCAN(priv, "Scan completion watchdog resetting " + "adapter (%dms)\n", + jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG)); + + if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) + iwl_send_scan_abort(priv); + } + mutex_unlock(&priv->mutex); +} +EXPORT_SYMBOL(iwl_bg_scan_check); + +/** + * iwl_fill_probe_req - fill in all required fields and IE for probe request + */ + +u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame, + const u8 *ies, int ie_len, int left) +{ + int len = 0; + u8 *pos = NULL; + + /* Make sure there is enough space for the probe request, + * two mandatory IEs and the data */ + left -= 24; + if (left < 0) + return 0; + + frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); + memcpy(frame->da, iwl_bcast_addr, ETH_ALEN); + memcpy(frame->sa, priv->mac_addr, ETH_ALEN); + memcpy(frame->bssid, iwl_bcast_addr, ETH_ALEN); + frame->seq_ctrl = 0; + + len += 24; + + /* ...next IE... */ + pos = &frame->u.probe_req.variable[0]; + + /* fill in our indirect SSID IE */ + left -= 2; + if (left < 0) + return 0; + *pos++ = WLAN_EID_SSID; + if (!priv->is_internal_short_scan && + priv->scan_request->n_ssids) { + struct cfg80211_ssid *ssid = + priv->scan_request->ssids; + + /* Broadcast if ssid_len is 0 */ + *pos++ = ssid->ssid_len; + memcpy(pos, ssid->ssid, ssid->ssid_len); + pos += ssid->ssid_len; + len += 2 + ssid->ssid_len; + } else { + *pos++ = 0; + len += 2; + } + + if (WARN_ON(left < ie_len)) + return len; + + if (ies) + memcpy(pos, ies, ie_len); + len += ie_len; + left -= ie_len; + + return (u16)len; +} +EXPORT_SYMBOL(iwl_fill_probe_req); + +static void iwl_bg_request_scan(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, request_scan); + struct iwl_host_cmd cmd = { + .id = REPLY_SCAN_CMD, + .len = sizeof(struct iwl_scan_cmd), + .flags = CMD_SIZE_HUGE, + }; + struct iwl_scan_cmd *scan; + struct ieee80211_conf *conf = NULL; + int ret = 0; + u32 rate_flags = 0; + u16 cmd_len; + u16 rx_chain = 0; + enum ieee80211_band band; + u8 n_probes = 0; + u8 rx_ant = priv->hw_params.valid_rx_ant; + u8 rate; + bool is_active = false; + int chan_mod; + u8 active_chains; + + conf = ieee80211_get_hw_conf(priv->hw); + + mutex_lock(&priv->mutex); + + cancel_delayed_work(&priv->scan_check); + + if (!iwl_is_ready(priv)) { + IWL_WARN(priv, "request scan called when driver not ready.\n"); + goto done; + } + + /* Make sure the scan wasn't canceled before this queued work + * was given the chance to run... */ + if (!test_bit(STATUS_SCANNING, &priv->status)) + goto done; + + /* This should never be called or scheduled if there is currently + * a scan active in the hardware. */ + if (test_bit(STATUS_SCAN_HW, &priv->status)) { + IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests in parallel. " + "Ignoring second request.\n"); + ret = -EIO; + goto done; + } + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { + IWL_DEBUG_SCAN(priv, "Aborting scan due to device shutdown\n"); + goto done; + } + + if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG_HC(priv, "Scan request while abort pending. Queuing.\n"); + goto done; + } + + if (iwl_is_rfkill(priv)) { + IWL_DEBUG_HC(priv, "Aborting scan due to RF Kill activation\n"); + goto done; + } + + if (!test_bit(STATUS_READY, &priv->status)) { + IWL_DEBUG_HC(priv, "Scan request while uninitialized. Queuing.\n"); + goto done; + } + + if (!priv->scan_bands) { + IWL_DEBUG_HC(priv, "Aborting scan due to no requested bands\n"); + goto done; + } + + if (!priv->scan) { + priv->scan = kmalloc(sizeof(struct iwl_scan_cmd) + + IWL_MAX_SCAN_SIZE, GFP_KERNEL); + if (!priv->scan) { + ret = -ENOMEM; + goto done; + } + } + scan = priv->scan; + memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE); + + scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; + scan->quiet_time = IWL_ACTIVE_QUIET_TIME; + + if (iwl_is_associated(priv)) { + u16 interval = 0; + u32 extra; + u32 suspend_time = 100; + u32 scan_suspend_time = 100; + unsigned long flags; + + IWL_DEBUG_INFO(priv, "Scanning while associated...\n"); + spin_lock_irqsave(&priv->lock, flags); + interval = priv->beacon_int; + spin_unlock_irqrestore(&priv->lock, flags); + + scan->suspend_time = 0; + scan->max_out_time = cpu_to_le32(200 * 1024); + if (!interval) + interval = suspend_time; + + extra = (suspend_time / interval) << 22; + scan_suspend_time = (extra | + ((suspend_time % interval) * 1024)); + scan->suspend_time = cpu_to_le32(scan_suspend_time); + IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n", + scan_suspend_time, interval); + } + + if (priv->is_internal_short_scan) { + IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n"); + } else if (priv->scan_request->n_ssids) { + IWL_DEBUG_SCAN(priv, "Kicking off active scan\n"); + /* + * The first SSID to scan is stuffed into the probe request + * template and the remaining ones are handled through the + * direct_scan array. + */ + if (priv->scan_request->n_ssids > 1) { + int i, p = 0; + for (i = 1; i < priv->scan_request->n_ssids; i++) { + if (!priv->scan_request->ssids[i].ssid_len) + continue; + scan->direct_scan[p].id = WLAN_EID_SSID; + scan->direct_scan[p].len = + priv->scan_request->ssids[i].ssid_len; + memcpy(scan->direct_scan[p].ssid, + priv->scan_request->ssids[i].ssid, + priv->scan_request->ssids[i].ssid_len); + n_probes++; + p++; + } + } + is_active = true; + } else + IWL_DEBUG_SCAN(priv, "Start passive scan.\n"); + + scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; + scan->tx_cmd.sta_id = priv->hw_params.bcast_sta_id; + scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + + + if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) { + band = IEEE80211_BAND_2GHZ; + scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; + chan_mod = le32_to_cpu(priv->active_rxon.flags & RXON_FLG_CHANNEL_MODE_MSK) + >> RXON_FLG_CHANNEL_MODE_POS; + if (chan_mod == CHANNEL_MODE_PURE_40) { + rate = IWL_RATE_6M_PLCP; + } else { + rate = IWL_RATE_1M_PLCP; + rate_flags = RATE_MCS_CCK_MSK; + } + scan->good_CRC_th = 0; + } else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) { + band = IEEE80211_BAND_5GHZ; + rate = IWL_RATE_6M_PLCP; + /* + * If active scaning is requested but a certain channel + * is marked passive, we can do active scanning if we + * detect transmissions. + */ + scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH : 0; + + /* Force use of chains B and C (0x6) for scan Rx for 4965 + * Avoid A (0x1) because of its off-channel reception on A-band. + */ + if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965) + rx_ant = ANT_BC; + } else { + IWL_WARN(priv, "Invalid scan band count\n"); + goto done; + } + + priv->scan_tx_ant[band] = + iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band]); + rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]); + scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags); + + /* In power save mode use one chain, otherwise use all chains */ + if (test_bit(STATUS_POWER_PMI, &priv->status)) { + /* rx_ant has been set to all valid chains previously */ + active_chains = rx_ant & + ((u8)(priv->chain_noise_data.active_chains)); + if (!active_chains) + active_chains = rx_ant; + + IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n", + priv->chain_noise_data.active_chains); + + rx_ant = first_antenna(active_chains); + } + /* MIMO is not used here, but value is required */ + rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS; + rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS; + rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS; + rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS; + scan->rx_chain = cpu_to_le16(rx_chain); + if (!priv->is_internal_short_scan) { + cmd_len = iwl_fill_probe_req(priv, + (struct ieee80211_mgmt *)scan->data, + priv->scan_request->ie, + priv->scan_request->ie_len, + IWL_MAX_SCAN_SIZE - sizeof(*scan)); + } else { + cmd_len = iwl_fill_probe_req(priv, + (struct ieee80211_mgmt *)scan->data, + NULL, 0, + IWL_MAX_SCAN_SIZE - sizeof(*scan)); + + } + scan->tx_cmd.len = cpu_to_le16(cmd_len); + if (iwl_is_monitor_mode(priv)) + scan->filter_flags = RXON_FILTER_PROMISC_MSK; + + scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK | + RXON_FILTER_BCON_AWARE_MSK); + + if (priv->is_internal_short_scan) { + scan->channel_count = + iwl_get_single_channel_for_scan(priv, band, + (void *)&scan->data[le16_to_cpu( + scan->tx_cmd.len)]); + } else { + scan->channel_count = + iwl_get_channels_for_scan(priv, band, + is_active, n_probes, + (void *)&scan->data[le16_to_cpu( + scan->tx_cmd.len)]); + } + if (scan->channel_count == 0) { + IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count); + goto done; + } + + cmd.len += le16_to_cpu(scan->tx_cmd.len) + + scan->channel_count * sizeof(struct iwl_scan_channel); + cmd.data = scan; + scan->len = cpu_to_le16(cmd.len); + + set_bit(STATUS_SCAN_HW, &priv->status); + ret = iwl_send_cmd_sync(priv, &cmd); + if (ret) + goto done; + + queue_delayed_work(priv->workqueue, &priv->scan_check, + IWL_SCAN_CHECK_WATCHDOG); + + mutex_unlock(&priv->mutex); + return; + + done: + /* Cannot perform scan. Make sure we clear scanning + * bits from status so next scan request can be performed. + * If we don't clear scanning status bit here all next scan + * will fail + */ + clear_bit(STATUS_SCAN_HW, &priv->status); + clear_bit(STATUS_SCANNING, &priv->status); + /* inform mac80211 scan aborted */ + queue_work(priv->workqueue, &priv->scan_completed); + mutex_unlock(&priv->mutex); +} + +void iwl_bg_abort_scan(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan); + + if (!test_bit(STATUS_READY, &priv->status) || + !test_bit(STATUS_GEO_CONFIGURED, &priv->status)) + return; + + mutex_lock(&priv->mutex); + + set_bit(STATUS_SCAN_ABORTING, &priv->status); + iwl_send_scan_abort(priv); + + mutex_unlock(&priv->mutex); +} +EXPORT_SYMBOL(iwl_bg_abort_scan); + +void iwl_bg_scan_completed(struct work_struct *work) +{ + struct iwl_priv *priv = + container_of(work, struct iwl_priv, scan_completed); + + IWL_DEBUG_SCAN(priv, "SCAN complete scan\n"); + + cancel_delayed_work(&priv->scan_check); + + if (!priv->is_internal_short_scan) + ieee80211_scan_completed(priv->hw, false); + else { + priv->is_internal_short_scan = false; + IWL_DEBUG_SCAN(priv, "internal short scan completed\n"); + } + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + /* Since setting the TXPOWER may have been deferred while + * performing the scan, fire one off */ + mutex_lock(&priv->mutex); + iwl_set_tx_power(priv, priv->tx_power_user_lmt, true); + mutex_unlock(&priv->mutex); +} +EXPORT_SYMBOL(iwl_bg_scan_completed); + +void iwl_setup_scan_deferred_work(struct iwl_priv *priv) +{ + INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed); + INIT_WORK(&priv->request_scan, iwl_bg_request_scan); + INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan); + INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check); +} +EXPORT_SYMBOL(iwl_setup_scan_deferred_work); + diff --git a/drivers/net/wireless/iwlwifi/iwl-spectrum.h b/drivers/net/wireless/iwlwifi/iwl-spectrum.h new file mode 100644 index 0000000..af6babe --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-spectrum.h @@ -0,0 +1,92 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_spectrum_h__ +#define __iwl_spectrum_h__ +enum { /* ieee80211_basic_report.map */ + IEEE80211_BASIC_MAP_BSS = (1 << 0), + IEEE80211_BASIC_MAP_OFDM = (1 << 1), + IEEE80211_BASIC_MAP_UNIDENTIFIED = (1 << 2), + IEEE80211_BASIC_MAP_RADAR = (1 << 3), + IEEE80211_BASIC_MAP_UNMEASURED = (1 << 4), + /* Bits 5-7 are reserved */ + +}; +struct ieee80211_basic_report { + u8 channel; + __le64 start_time; + __le16 duration; + u8 map; +} __attribute__ ((packed)); + +enum { /* ieee80211_measurement_request.mode */ + /* Bit 0 is reserved */ + IEEE80211_MEASUREMENT_ENABLE = (1 << 1), + IEEE80211_MEASUREMENT_REQUEST = (1 << 2), + IEEE80211_MEASUREMENT_REPORT = (1 << 3), + /* Bits 4-7 are reserved */ +}; + +enum { + IEEE80211_REPORT_BASIC = 0, /* required */ + IEEE80211_REPORT_CCA = 1, /* optional */ + IEEE80211_REPORT_RPI = 2, /* optional */ + /* 3-255 reserved */ +}; + +struct ieee80211_measurement_params { + u8 channel; + __le64 start_time; + __le16 duration; +} __attribute__ ((packed)); + +struct ieee80211_info_element { + u8 id; + u8 len; + u8 data[0]; +} __attribute__ ((packed)); + +struct ieee80211_measurement_request { + struct ieee80211_info_element ie; + u8 token; + u8 mode; + u8 type; + struct ieee80211_measurement_params params[0]; +} __attribute__ ((packed)); + +struct ieee80211_measurement_report { + struct ieee80211_info_element ie; + u8 token; + u8 mode; + u8 type; + union { + struct ieee80211_basic_report basic[0]; + } u; +} __attribute__ ((packed)); + +#endif diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c new file mode 100644 index 0000000..4a6686f --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-sta.c @@ -0,0 +1,1293 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include +#include + +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-sta.h" + +#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */ +#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */ + +u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr) +{ + int i; + int start = 0; + int ret = IWL_INVALID_STATION; + unsigned long flags; + + if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) || + (priv->iw_mode == NL80211_IFTYPE_AP)) + start = IWL_STA_ID; + + if (is_broadcast_ether_addr(addr)) + return priv->hw_params.bcast_sta_id; + + spin_lock_irqsave(&priv->sta_lock, flags); + for (i = start; i < priv->hw_params.max_stations; i++) + if (priv->stations[i].used && + (!compare_ether_addr(priv->stations[i].sta.sta.addr, + addr))) { + ret = i; + goto out; + } + + IWL_DEBUG_ASSOC_LIMIT(priv, "can not find STA %pM total %d\n", + addr, priv->num_stations); + + out: + spin_unlock_irqrestore(&priv->sta_lock, flags); + return ret; +} +EXPORT_SYMBOL(iwl_find_station); + +int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr) +{ + if (priv->iw_mode == NL80211_IFTYPE_STATION) { + return IWL_AP_ID; + } else { + u8 *da = ieee80211_get_DA(hdr); + return iwl_find_station(priv, da); + } +} +EXPORT_SYMBOL(iwl_get_ra_sta_id); + +/* priv->sta_lock must be held */ +static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id) +{ + + if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) + IWL_ERR(priv, "ACTIVATE a non DRIVER active station id %u addr %pM\n", + sta_id, priv->stations[sta_id].sta.sta.addr); + + if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) { + IWL_DEBUG_ASSOC(priv, + "STA id %u addr %pM already present in uCode (according to driver)\n", + sta_id, priv->stations[sta_id].sta.sta.addr); + } else { + priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE; + IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n", + sta_id, priv->stations[sta_id].sta.sta.addr); + } +} + +static void iwl_process_add_sta_resp(struct iwl_priv *priv, + struct iwl_addsta_cmd *addsta, + struct iwl_rx_packet *pkt, + bool sync) +{ + u8 sta_id = addsta->sta.sta_id; + unsigned long flags; + + if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n", + pkt->hdr.flags); + return; + } + + IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n", + sta_id); + + spin_lock_irqsave(&priv->sta_lock, flags); + + switch (pkt->u.add_sta.status) { + case ADD_STA_SUCCESS_MSK: + IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n"); + iwl_sta_ucode_activate(priv, sta_id); + break; + case ADD_STA_NO_ROOM_IN_TABLE: + IWL_ERR(priv, "Adding station %d failed, no room in table.\n", + sta_id); + break; + case ADD_STA_NO_BLOCK_ACK_RESOURCE: + IWL_ERR(priv, "Adding station %d failed, no block ack resource.\n", + sta_id); + break; + case ADD_STA_MODIFY_NON_EXIST_STA: + IWL_ERR(priv, "Attempting to modify non-existing station %d \n", + sta_id); + break; + default: + IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n", + pkt->u.add_sta.status); + break; + } + + IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n", + priv->stations[sta_id].sta.mode == + STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", + sta_id, priv->stations[sta_id].sta.sta.addr); + + /* + * XXX: The MAC address in the command buffer is often changed from + * the original sent to the device. That is, the MAC address + * written to the command buffer often is not the same MAC adress + * read from the command buffer when the command returns. This + * issue has not yet been resolved and this debugging is left to + * observe the problem. + */ + IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n", + priv->stations[sta_id].sta.mode == + STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", + addsta->sta.addr); + + /* + * Determine if we wanted to modify or add a station, + * if adding a station succeeded we have some more initialization + * to do when using station notification. TODO + */ + + spin_unlock_irqrestore(&priv->sta_lock, flags); +} + +static void iwl_add_sta_callback(struct iwl_priv *priv, + struct iwl_device_cmd *cmd, + struct iwl_rx_packet *pkt) +{ + struct iwl_addsta_cmd *addsta = + (struct iwl_addsta_cmd *)cmd->cmd.payload; + + iwl_process_add_sta_resp(priv, addsta, pkt, false); + +} + +int iwl_send_add_sta(struct iwl_priv *priv, + struct iwl_addsta_cmd *sta, u8 flags) +{ + struct iwl_rx_packet *pkt = NULL; + int ret = 0; + u8 data[sizeof(*sta)]; + struct iwl_host_cmd cmd = { + .id = REPLY_ADD_STA, + .flags = flags, + .data = data, + }; + + if (flags & CMD_ASYNC) + cmd.callback = iwl_add_sta_callback; + else + cmd.flags |= CMD_WANT_SKB; + + cmd.len = priv->cfg->ops->utils->build_addsta_hcmd(sta, data); + ret = iwl_send_cmd(priv, &cmd); + + if (ret || (flags & CMD_ASYNC)) + return ret; + + if (ret == 0) { + pkt = (struct iwl_rx_packet *)cmd.reply_page; + iwl_process_add_sta_resp(priv, sta, pkt, true); + } + iwl_free_pages(priv, cmd.reply_page); + + return ret; +} +EXPORT_SYMBOL(iwl_send_add_sta); + +static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index, + struct ieee80211_sta_ht_cap *sta_ht_inf) +{ + __le32 sta_flags; + u8 mimo_ps_mode; + + if (!sta_ht_inf || !sta_ht_inf->ht_supported) + goto done; + + mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2; + IWL_DEBUG_ASSOC(priv, "spatial multiplexing power save mode: %s\n", + (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ? + "static" : + (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ? + "dynamic" : "disabled"); + + sta_flags = priv->stations[index].sta.station_flags; + + sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK); + + switch (mimo_ps_mode) { + case WLAN_HT_CAP_SM_PS_STATIC: + sta_flags |= STA_FLG_MIMO_DIS_MSK; + break; + case WLAN_HT_CAP_SM_PS_DYNAMIC: + sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK; + break; + case WLAN_HT_CAP_SM_PS_DISABLED: + break; + default: + IWL_WARN(priv, "Invalid MIMO PS mode %d\n", mimo_ps_mode); + break; + } + + sta_flags |= cpu_to_le32( + (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS); + + sta_flags |= cpu_to_le32( + (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS); + + if (iwl_is_ht40_tx_allowed(priv, sta_ht_inf)) + sta_flags |= STA_FLG_HT40_EN_MSK; + else + sta_flags &= ~STA_FLG_HT40_EN_MSK; + + priv->stations[index].sta.station_flags = sta_flags; + done: + return; +} + +/** + * iwl_add_station - Add station to tables in driver and device + */ +u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap, u8 flags, + struct ieee80211_sta_ht_cap *ht_info) +{ + struct iwl_station_entry *station; + unsigned long flags_spin; + int i; + int sta_id = IWL_INVALID_STATION; + u16 rate; + + spin_lock_irqsave(&priv->sta_lock, flags_spin); + if (is_ap) + sta_id = IWL_AP_ID; + else if (is_broadcast_ether_addr(addr)) + sta_id = priv->hw_params.bcast_sta_id; + else + for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) { + if (!compare_ether_addr(priv->stations[i].sta.sta.addr, + addr)) { + sta_id = i; + break; + } + + if (!priv->stations[i].used && + sta_id == IWL_INVALID_STATION) + sta_id = i; + } + + /* These two conditions have the same outcome, but keep them separate + since they have different meanings */ + if (unlikely(sta_id == IWL_INVALID_STATION)) { + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + return sta_id; + } + + if (priv->stations[sta_id].used && + !compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) { + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + return sta_id; + } + + + station = &priv->stations[sta_id]; + station->used = IWL_STA_DRIVER_ACTIVE; + IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n", + sta_id, addr); + priv->num_stations++; + + /* Set up the REPLY_ADD_STA command to send to device */ + memset(&station->sta, 0, sizeof(struct iwl_addsta_cmd)); + memcpy(station->sta.sta.addr, addr, ETH_ALEN); + station->sta.mode = 0; + station->sta.sta.sta_id = sta_id; + station->sta.station_flags = 0; + + /* BCAST station and IBSS stations do not work in HT mode */ + if (sta_id != priv->hw_params.bcast_sta_id && + priv->iw_mode != NL80211_IFTYPE_ADHOC) + iwl_set_ht_add_station(priv, sta_id, ht_info); + + /* 3945 only */ + rate = (priv->band == IEEE80211_BAND_5GHZ) ? + IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP; + /* Turn on both antennas for the station... */ + station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK); + + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + + /* Add station to device's station table */ + iwl_send_add_sta(priv, &station->sta, flags); + return sta_id; + +} +EXPORT_SYMBOL(iwl_add_station); + +static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, const u8 *addr) +{ + unsigned long flags; + u8 sta_id = iwl_find_station(priv, addr); + + BUG_ON(sta_id == IWL_INVALID_STATION); + + IWL_DEBUG_ASSOC(priv, "Removed STA from Ucode: %pM\n", addr); + + spin_lock_irqsave(&priv->sta_lock, flags); + + /* Ucode must be active and driver must be non active */ + if (priv->stations[sta_id].used != IWL_STA_UCODE_ACTIVE) + IWL_ERR(priv, "removed non active STA %d\n", sta_id); + + priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE; + + memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry)); + spin_unlock_irqrestore(&priv->sta_lock, flags); +} + +static void iwl_remove_sta_callback(struct iwl_priv *priv, + struct iwl_device_cmd *cmd, + struct iwl_rx_packet *pkt) +{ + struct iwl_rem_sta_cmd *rm_sta = + (struct iwl_rem_sta_cmd *)cmd->cmd.payload; + const u8 *addr = rm_sta->addr; + + if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n", + pkt->hdr.flags); + return; + } + + switch (pkt->u.rem_sta.status) { + case REM_STA_SUCCESS_MSK: + iwl_sta_ucode_deactivate(priv, addr); + break; + default: + IWL_ERR(priv, "REPLY_REMOVE_STA failed\n"); + break; + } +} + +static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr, + u8 flags) +{ + struct iwl_rx_packet *pkt; + int ret; + + struct iwl_rem_sta_cmd rm_sta_cmd; + + struct iwl_host_cmd cmd = { + .id = REPLY_REMOVE_STA, + .len = sizeof(struct iwl_rem_sta_cmd), + .flags = flags, + .data = &rm_sta_cmd, + }; + + memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd)); + rm_sta_cmd.num_sta = 1; + memcpy(&rm_sta_cmd.addr, addr , ETH_ALEN); + + if (flags & CMD_ASYNC) + cmd.callback = iwl_remove_sta_callback; + else + cmd.flags |= CMD_WANT_SKB; + ret = iwl_send_cmd(priv, &cmd); + + if (ret || (flags & CMD_ASYNC)) + return ret; + + pkt = (struct iwl_rx_packet *)cmd.reply_page; + if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n", + pkt->hdr.flags); + ret = -EIO; + } + + if (!ret) { + switch (pkt->u.rem_sta.status) { + case REM_STA_SUCCESS_MSK: + iwl_sta_ucode_deactivate(priv, addr); + IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n"); + break; + default: + ret = -EIO; + IWL_ERR(priv, "REPLY_REMOVE_STA failed\n"); + break; + } + } + iwl_free_pages(priv, cmd.reply_page); + + return ret; +} + +/** + * iwl_remove_station - Remove driver's knowledge of station. + */ +int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, bool is_ap) +{ + int sta_id = IWL_INVALID_STATION; + int i, ret = -EINVAL; + unsigned long flags; + + spin_lock_irqsave(&priv->sta_lock, flags); + + if (is_ap) + sta_id = IWL_AP_ID; + else if (is_broadcast_ether_addr(addr)) + sta_id = priv->hw_params.bcast_sta_id; + else + for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) + if (priv->stations[i].used && + !compare_ether_addr(priv->stations[i].sta.sta.addr, + addr)) { + sta_id = i; + break; + } + + if (unlikely(sta_id == IWL_INVALID_STATION)) + goto out; + + IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n", + sta_id, addr); + + if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) { + IWL_ERR(priv, "Removing %pM but non DRIVER active\n", + addr); + goto out; + } + + if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) { + IWL_ERR(priv, "Removing %pM but non UCODE active\n", + addr); + goto out; + } + + + priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE; + + priv->num_stations--; + + BUG_ON(priv->num_stations < 0); + + spin_unlock_irqrestore(&priv->sta_lock, flags); + + ret = iwl_send_remove_station(priv, addr, CMD_ASYNC); + return ret; +out: + spin_unlock_irqrestore(&priv->sta_lock, flags); + return ret; +} + +/** + * iwl_clear_stations_table - Clear the driver's station table + * + * NOTE: This does not clear or otherwise alter the device's station table. + */ +void iwl_clear_stations_table(struct iwl_priv *priv) +{ + unsigned long flags; + int i; + + spin_lock_irqsave(&priv->sta_lock, flags); + + if (iwl_is_alive(priv) && + !test_bit(STATUS_EXIT_PENDING, &priv->status) && + iwl_send_cmd_pdu_async(priv, REPLY_REMOVE_ALL_STA, 0, NULL, NULL)) + IWL_ERR(priv, "Couldn't clear the station table\n"); + + priv->num_stations = 0; + memset(priv->stations, 0, sizeof(priv->stations)); + + /* clean ucode key table bit map */ + priv->ucode_key_table = 0; + + /* keep track of static keys */ + for (i = 0; i < WEP_KEYS_MAX ; i++) { + if (priv->wep_keys[i].key_size) + set_bit(i, &priv->ucode_key_table); + } + + spin_unlock_irqrestore(&priv->sta_lock, flags); +} +EXPORT_SYMBOL(iwl_clear_stations_table); + +int iwl_get_free_ucode_key_index(struct iwl_priv *priv) +{ + int i; + + for (i = 0; i < STA_KEY_MAX_NUM; i++) + if (!test_and_set_bit(i, &priv->ucode_key_table)) + return i; + + return WEP_INVALID_OFFSET; +} +EXPORT_SYMBOL(iwl_get_free_ucode_key_index); + +int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty) +{ + int i, not_empty = 0; + u8 buff[sizeof(struct iwl_wep_cmd) + + sizeof(struct iwl_wep_key) * WEP_KEYS_MAX]; + struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff; + size_t cmd_size = sizeof(struct iwl_wep_cmd); + struct iwl_host_cmd cmd = { + .id = REPLY_WEPKEY, + .data = wep_cmd, + .flags = CMD_ASYNC, + }; + + memset(wep_cmd, 0, cmd_size + + (sizeof(struct iwl_wep_key) * WEP_KEYS_MAX)); + + for (i = 0; i < WEP_KEYS_MAX ; i++) { + wep_cmd->key[i].key_index = i; + if (priv->wep_keys[i].key_size) { + wep_cmd->key[i].key_offset = i; + not_empty = 1; + } else { + wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET; + } + + wep_cmd->key[i].key_size = priv->wep_keys[i].key_size; + memcpy(&wep_cmd->key[i].key[3], priv->wep_keys[i].key, + priv->wep_keys[i].key_size); + } + + wep_cmd->global_key_type = WEP_KEY_WEP_TYPE; + wep_cmd->num_keys = WEP_KEYS_MAX; + + cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX; + + cmd.len = cmd_size; + + if (not_empty || send_if_empty) + return iwl_send_cmd(priv, &cmd); + else + return 0; +} +EXPORT_SYMBOL(iwl_send_static_wepkey_cmd); + +int iwl_remove_default_wep_key(struct iwl_priv *priv, + struct ieee80211_key_conf *keyconf) +{ + int ret; + unsigned long flags; + + spin_lock_irqsave(&priv->sta_lock, flags); + IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n", + keyconf->keyidx); + + if (!test_and_clear_bit(keyconf->keyidx, &priv->ucode_key_table)) + IWL_ERR(priv, "index %d not used in uCode key table.\n", + keyconf->keyidx); + + priv->default_wep_key--; + memset(&priv->wep_keys[keyconf->keyidx], 0, sizeof(priv->wep_keys[0])); + if (iwl_is_rfkill(priv)) { + IWL_DEBUG_WEP(priv, "Not sending REPLY_WEPKEY command due to RFKILL.\n"); + spin_unlock_irqrestore(&priv->sta_lock, flags); + return 0; + } + ret = iwl_send_static_wepkey_cmd(priv, 1); + IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n", + keyconf->keyidx, ret); + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return ret; +} +EXPORT_SYMBOL(iwl_remove_default_wep_key); + +int iwl_set_default_wep_key(struct iwl_priv *priv, + struct ieee80211_key_conf *keyconf) +{ + int ret; + unsigned long flags; + + if (keyconf->keylen != WEP_KEY_LEN_128 && + keyconf->keylen != WEP_KEY_LEN_64) { + IWL_DEBUG_WEP(priv, "Bad WEP key length %d\n", keyconf->keylen); + return -EINVAL; + } + + keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV; + keyconf->hw_key_idx = HW_KEY_DEFAULT; + priv->stations[IWL_AP_ID].keyinfo.alg = ALG_WEP; + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->default_wep_key++; + + if (test_and_set_bit(keyconf->keyidx, &priv->ucode_key_table)) + IWL_ERR(priv, "index %d already used in uCode key table.\n", + keyconf->keyidx); + + priv->wep_keys[keyconf->keyidx].key_size = keyconf->keylen; + memcpy(&priv->wep_keys[keyconf->keyidx].key, &keyconf->key, + keyconf->keylen); + + ret = iwl_send_static_wepkey_cmd(priv, 0); + IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n", + keyconf->keylen, keyconf->keyidx, ret); + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return ret; +} +EXPORT_SYMBOL(iwl_set_default_wep_key); + +static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv, + struct ieee80211_key_conf *keyconf, + u8 sta_id) +{ + unsigned long flags; + __le16 key_flags = 0; + int ret; + + keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV; + + key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK); + key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); + key_flags &= ~STA_KEY_FLG_INVALID; + + if (keyconf->keylen == WEP_KEY_LEN_128) + key_flags |= STA_KEY_FLG_KEY_SIZE_MSK; + + if (sta_id == priv->hw_params.bcast_sta_id) + key_flags |= STA_KEY_MULTICAST_MSK; + + spin_lock_irqsave(&priv->sta_lock, flags); + + priv->stations[sta_id].keyinfo.alg = keyconf->alg; + priv->stations[sta_id].keyinfo.keylen = keyconf->keylen; + priv->stations[sta_id].keyinfo.keyidx = keyconf->keyidx; + + memcpy(priv->stations[sta_id].keyinfo.key, + keyconf->key, keyconf->keylen); + + memcpy(&priv->stations[sta_id].sta.key.key[3], + keyconf->key, keyconf->keylen); + + if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) + == STA_KEY_FLG_NO_ENC) + priv->stations[sta_id].sta.key.key_offset = + iwl_get_free_ucode_key_index(priv); + /* else, we are overriding an existing key => no need to allocated room + * in uCode. */ + + WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, + "no space for a new key"); + + priv->stations[sta_id].sta.key.key_flags = key_flags; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + + ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); + + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return ret; +} + +static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv, + struct ieee80211_key_conf *keyconf, + u8 sta_id) +{ + unsigned long flags; + __le16 key_flags = 0; + int ret; + + key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK); + key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); + key_flags &= ~STA_KEY_FLG_INVALID; + + if (sta_id == priv->hw_params.bcast_sta_id) + key_flags |= STA_KEY_MULTICAST_MSK; + + keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].keyinfo.alg = keyconf->alg; + priv->stations[sta_id].keyinfo.keylen = keyconf->keylen; + + memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, + keyconf->keylen); + + memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, + keyconf->keylen); + + if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) + == STA_KEY_FLG_NO_ENC) + priv->stations[sta_id].sta.key.key_offset = + iwl_get_free_ucode_key_index(priv); + /* else, we are overriding an existing key => no need to allocated room + * in uCode. */ + + WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, + "no space for a new key"); + + priv->stations[sta_id].sta.key.key_flags = key_flags; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + + ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); + + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return ret; +} + +static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv, + struct ieee80211_key_conf *keyconf, + u8 sta_id) +{ + unsigned long flags; + int ret = 0; + __le16 key_flags = 0; + + key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK); + key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); + key_flags &= ~STA_KEY_FLG_INVALID; + + if (sta_id == priv->hw_params.bcast_sta_id) + key_flags |= STA_KEY_MULTICAST_MSK; + + keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; + + spin_lock_irqsave(&priv->sta_lock, flags); + + priv->stations[sta_id].keyinfo.alg = keyconf->alg; + priv->stations[sta_id].keyinfo.keylen = 16; + + if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) + == STA_KEY_FLG_NO_ENC) + priv->stations[sta_id].sta.key.key_offset = + iwl_get_free_ucode_key_index(priv); + /* else, we are overriding an existing key => no need to allocated room + * in uCode. */ + + WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, + "no space for a new key"); + + priv->stations[sta_id].sta.key.key_flags = key_flags; + + + /* This copy is acutally not needed: we get the key with each TX */ + memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16); + + memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 16); + + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return ret; +} + +void iwl_update_tkip_key(struct iwl_priv *priv, + struct ieee80211_key_conf *keyconf, + const u8 *addr, u32 iv32, u16 *phase1key) +{ + u8 sta_id = IWL_INVALID_STATION; + unsigned long flags; + int i; + + sta_id = iwl_find_station(priv, addr); + if (sta_id == IWL_INVALID_STATION) { + IWL_DEBUG_MAC80211(priv, "leave - %pM not in station map.\n", + addr); + return; + } + + if (iwl_scan_cancel(priv)) { + /* cancel scan failed, just live w/ bad key and rely + briefly on SW decryption */ + return; + } + + spin_lock_irqsave(&priv->sta_lock, flags); + + priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32; + + for (i = 0; i < 5; i++) + priv->stations[sta_id].sta.key.tkip_rx_ttak[i] = + cpu_to_le16(phase1key[i]); + + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + + iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); + + spin_unlock_irqrestore(&priv->sta_lock, flags); + +} +EXPORT_SYMBOL(iwl_update_tkip_key); + +int iwl_remove_dynamic_key(struct iwl_priv *priv, + struct ieee80211_key_conf *keyconf, + u8 sta_id) +{ + unsigned long flags; + int ret = 0; + u16 key_flags; + u8 keyidx; + + priv->key_mapping_key--; + + spin_lock_irqsave(&priv->sta_lock, flags); + key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags); + keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3; + + IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n", + keyconf->keyidx, sta_id); + + if (keyconf->keyidx != keyidx) { + /* We need to remove a key with index different that the one + * in the uCode. This means that the key we need to remove has + * been replaced by another one with different index. + * Don't do anything and return ok + */ + spin_unlock_irqrestore(&priv->sta_lock, flags); + return 0; + } + + if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) { + IWL_WARN(priv, "Removing wrong key %d 0x%x\n", + keyconf->keyidx, key_flags); + spin_unlock_irqrestore(&priv->sta_lock, flags); + return 0; + } + + if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset, + &priv->ucode_key_table)) + IWL_ERR(priv, "index %d not used in uCode key table.\n", + priv->stations[sta_id].sta.key.key_offset); + memset(&priv->stations[sta_id].keyinfo, 0, + sizeof(struct iwl_hw_key)); + memset(&priv->stations[sta_id].sta.key, 0, + sizeof(struct iwl4965_keyinfo)); + priv->stations[sta_id].sta.key.key_flags = + STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID; + priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + + if (iwl_is_rfkill(priv)) { + IWL_DEBUG_WEP(priv, "Not sending REPLY_ADD_STA command because RFKILL enabled. \n"); + spin_unlock_irqrestore(&priv->sta_lock, flags); + return 0; + } + ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); + spin_unlock_irqrestore(&priv->sta_lock, flags); + return ret; +} +EXPORT_SYMBOL(iwl_remove_dynamic_key); + +int iwl_set_dynamic_key(struct iwl_priv *priv, + struct ieee80211_key_conf *keyconf, u8 sta_id) +{ + int ret; + + priv->key_mapping_key++; + keyconf->hw_key_idx = HW_KEY_DYNAMIC; + + switch (keyconf->alg) { + case ALG_CCMP: + ret = iwl_set_ccmp_dynamic_key_info(priv, keyconf, sta_id); + break; + case ALG_TKIP: + ret = iwl_set_tkip_dynamic_key_info(priv, keyconf, sta_id); + break; + case ALG_WEP: + ret = iwl_set_wep_dynamic_key_info(priv, keyconf, sta_id); + break; + default: + IWL_ERR(priv, + "Unknown alg: %s alg = %d\n", __func__, keyconf->alg); + ret = -EINVAL; + } + + IWL_DEBUG_WEP(priv, "Set dynamic key: alg= %d len=%d idx=%d sta=%d ret=%d\n", + keyconf->alg, keyconf->keylen, keyconf->keyidx, + sta_id, ret); + + return ret; +} +EXPORT_SYMBOL(iwl_set_dynamic_key); + +#ifdef CONFIG_IWLWIFI_DEBUG +static void iwl_dump_lq_cmd(struct iwl_priv *priv, + struct iwl_link_quality_cmd *lq) +{ + int i; + IWL_DEBUG_RATE(priv, "lq station id 0x%x\n", lq->sta_id); + IWL_DEBUG_RATE(priv, "lq ant 0x%X 0x%X\n", + lq->general_params.single_stream_ant_msk, + lq->general_params.dual_stream_ant_msk); + + for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) + IWL_DEBUG_RATE(priv, "lq index %d 0x%X\n", + i, lq->rs_table[i].rate_n_flags); +} +#else +static inline void iwl_dump_lq_cmd(struct iwl_priv *priv, + struct iwl_link_quality_cmd *lq) +{ +} +#endif + +int iwl_send_lq_cmd(struct iwl_priv *priv, + struct iwl_link_quality_cmd *lq, u8 flags) +{ + struct iwl_host_cmd cmd = { + .id = REPLY_TX_LINK_QUALITY_CMD, + .len = sizeof(struct iwl_link_quality_cmd), + .flags = flags, + .data = lq, + }; + + if ((lq->sta_id == 0xFF) && + (priv->iw_mode == NL80211_IFTYPE_ADHOC)) + return -EINVAL; + + if (lq->sta_id == 0xFF) + lq->sta_id = IWL_AP_ID; + + iwl_dump_lq_cmd(priv, lq); + + if (iwl_is_associated(priv) && priv->assoc_station_added) + return iwl_send_cmd(priv, &cmd); + + return 0; +} +EXPORT_SYMBOL(iwl_send_lq_cmd); + +/** + * iwl_sta_init_lq - Initialize a station's hardware rate table + * + * The uCode's station table contains a table of fallback rates + * for automatic fallback during transmission. + * + * NOTE: This sets up a default set of values. These will be replaced later + * if the driver's iwl-agn-rs rate scaling algorithm is used, instead of + * rc80211_simple. + * + * NOTE: Run REPLY_ADD_STA command to set up station table entry, before + * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD, + * which requires station table entry to exist). + */ +static void iwl_sta_init_lq(struct iwl_priv *priv, const u8 *addr, bool is_ap) +{ + int i, r; + struct iwl_link_quality_cmd link_cmd = { + .reserved1 = 0, + }; + u32 rate_flags; + + /* Set up the rate scaling to start at selected rate, fall back + * all the way down to 1M in IEEE order, and then spin on 1M */ + if (is_ap) + r = IWL_RATE_54M_INDEX; + else if (priv->band == IEEE80211_BAND_5GHZ) + r = IWL_RATE_6M_INDEX; + else + r = IWL_RATE_1M_INDEX; + + for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { + rate_flags = 0; + if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE) + rate_flags |= RATE_MCS_CCK_MSK; + + rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) << + RATE_MCS_ANT_POS; + + link_cmd.rs_table[i].rate_n_flags = + iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags); + r = iwl_get_prev_ieee_rate(r); + } + + link_cmd.general_params.single_stream_ant_msk = + first_antenna(priv->hw_params.valid_tx_ant); + link_cmd.general_params.dual_stream_ant_msk = 3; + link_cmd.agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; + link_cmd.agg_params.agg_time_limit = + cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); + + /* Update the rate scaling for control frame Tx to AP */ + link_cmd.sta_id = is_ap ? IWL_AP_ID : priv->hw_params.bcast_sta_id; + + iwl_send_cmd_pdu_async(priv, REPLY_TX_LINK_QUALITY_CMD, + sizeof(link_cmd), &link_cmd, NULL); +} + +/** + * iwl_rxon_add_station - add station into station table. + * + * there is only one AP station with id= IWL_AP_ID + * NOTE: mutex must be held before calling this function + */ +int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap) +{ + struct ieee80211_sta *sta; + struct ieee80211_sta_ht_cap ht_config; + struct ieee80211_sta_ht_cap *cur_ht_config = NULL; + u8 sta_id; + + /* + * Set HT capabilities. It is ok to set this struct even if not using + * HT config: the priv->current_ht_config.is_ht flag will just be false + */ + rcu_read_lock(); + sta = ieee80211_find_sta(priv->vif, addr); + if (sta) { + memcpy(&ht_config, &sta->ht_cap, sizeof(ht_config)); + cur_ht_config = &ht_config; + } + rcu_read_unlock(); + + /* Add station to device's station table */ + sta_id = iwl_add_station(priv, addr, is_ap, CMD_SYNC, cur_ht_config); + + /* Set up default rate scaling table in device's station table */ + iwl_sta_init_lq(priv, addr, is_ap); + + return sta_id; +} +EXPORT_SYMBOL(iwl_rxon_add_station); + +/** + * iwl_sta_init_bcast_lq - Initialize a bcast station's hardware rate table + * + * NOTE: Run REPLY_ADD_STA command to set up station table entry, before + * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD, + * which requires station table entry to exist). + */ +static void iwl_sta_init_bcast_lq(struct iwl_priv *priv) +{ + int i, r; + struct iwl_link_quality_cmd link_cmd = { + .reserved1 = 0, + }; + u32 rate_flags; + + /* Set up the rate scaling to start at selected rate, fall back + * all the way down to 1M in IEEE order, and then spin on 1M */ + if (priv->band == IEEE80211_BAND_5GHZ) + r = IWL_RATE_6M_INDEX; + else + r = IWL_RATE_1M_INDEX; + + for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { + rate_flags = 0; + if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE) + rate_flags |= RATE_MCS_CCK_MSK; + + rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) << + RATE_MCS_ANT_POS; + + link_cmd.rs_table[i].rate_n_flags = + iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags); + r = iwl_get_prev_ieee_rate(r); + } + + link_cmd.general_params.single_stream_ant_msk = + first_antenna(priv->hw_params.valid_tx_ant); + link_cmd.general_params.dual_stream_ant_msk = 3; + link_cmd.agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; + link_cmd.agg_params.agg_time_limit = + cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); + + /* Update the rate scaling for control frame Tx to AP */ + link_cmd.sta_id = priv->hw_params.bcast_sta_id; + + iwl_send_cmd_pdu_async(priv, REPLY_TX_LINK_QUALITY_CMD, + sizeof(link_cmd), &link_cmd, NULL); +} + + +/** + * iwl_add_bcast_station - add broadcast station into station table. + */ +void iwl_add_bcast_station(struct iwl_priv *priv) +{ + IWL_DEBUG_INFO(priv, "Adding broadcast station to station table\n"); + iwl_add_station(priv, iwl_bcast_addr, false, CMD_SYNC, NULL); + + /* Set up default rate scaling table in device's station table */ + iwl_sta_init_bcast_lq(priv); +} +EXPORT_SYMBOL(iwl_add_bcast_station); + +/** + * iwl3945_add_bcast_station - add broadcast station into station table. + */ +void iwl3945_add_bcast_station(struct iwl_priv *priv) +{ + IWL_DEBUG_INFO(priv, "Adding broadcast station to station table\n"); + iwl_add_station(priv, iwl_bcast_addr, false, CMD_SYNC, NULL); +} +EXPORT_SYMBOL(iwl3945_add_bcast_station); + +/** + * iwl_get_sta_id - Find station's index within station table + * + * If new IBSS station, create new entry in station table + */ +int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr) +{ + int sta_id; + __le16 fc = hdr->frame_control; + + /* If this frame is broadcast or management, use broadcast station id */ + if (!ieee80211_is_data(fc) || is_multicast_ether_addr(hdr->addr1)) + return priv->hw_params.bcast_sta_id; + + switch (priv->iw_mode) { + + /* If we are a client station in a BSS network, use the special + * AP station entry (that's the only station we communicate with) */ + case NL80211_IFTYPE_STATION: + return IWL_AP_ID; + + /* If we are an AP, then find the station, or use BCAST */ + case NL80211_IFTYPE_AP: + sta_id = iwl_find_station(priv, hdr->addr1); + if (sta_id != IWL_INVALID_STATION) + return sta_id; + return priv->hw_params.bcast_sta_id; + + /* If this frame is going out to an IBSS network, find the station, + * or create a new station table entry */ + case NL80211_IFTYPE_ADHOC: + sta_id = iwl_find_station(priv, hdr->addr1); + if (sta_id != IWL_INVALID_STATION) + return sta_id; + + /* Create new station table entry */ + sta_id = iwl_add_station(priv, hdr->addr1, false, + CMD_ASYNC, NULL); + + if (sta_id != IWL_INVALID_STATION) + return sta_id; + + IWL_DEBUG_DROP(priv, "Station %pM not in station map. " + "Defaulting to broadcast...\n", + hdr->addr1); + iwl_print_hex_dump(priv, IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr)); + return priv->hw_params.bcast_sta_id; + + default: + IWL_WARN(priv, "Unknown mode of operation: %d\n", + priv->iw_mode); + return priv->hw_params.bcast_sta_id; + } +} +EXPORT_SYMBOL(iwl_get_sta_id); + +/** + * iwl_sta_tx_modify_enable_tid - Enable Tx for this TID in station table + */ +void iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid) +{ + unsigned long flags; + + /* Remove "disable" flag, to enable Tx for this TID */ + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX; + priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid)); + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); +} +EXPORT_SYMBOL(iwl_sta_tx_modify_enable_tid); + +int iwl_sta_rx_agg_start(struct iwl_priv *priv, + const u8 *addr, int tid, u16 ssn) +{ + unsigned long flags; + int sta_id; + + sta_id = iwl_find_station(priv, addr); + if (sta_id == IWL_INVALID_STATION) + return -ENXIO; + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].sta.station_flags_msk = 0; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK; + priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid; + priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn); + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return iwl_send_add_sta(priv, &priv->stations[sta_id].sta, + CMD_ASYNC); +} +EXPORT_SYMBOL(iwl_sta_rx_agg_start); + +int iwl_sta_rx_agg_stop(struct iwl_priv *priv, const u8 *addr, int tid) +{ + unsigned long flags; + int sta_id; + + sta_id = iwl_find_station(priv, addr); + if (sta_id == IWL_INVALID_STATION) { + IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid); + return -ENXIO; + } + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].sta.station_flags_msk = 0; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK; + priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return iwl_send_add_sta(priv, &priv->stations[sta_id].sta, + CMD_ASYNC); +} +EXPORT_SYMBOL(iwl_sta_rx_agg_stop); + +void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK; + priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK; + priv->stations[sta_id].sta.sta.modify_mask = 0; + priv->stations[sta_id].sta.sleep_tx_count = 0; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); +} +EXPORT_SYMBOL(iwl_sta_modify_ps_wake); + +void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK; + priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK; + priv->stations[sta_id].sta.sta.modify_mask = + STA_MODIFY_SLEEP_TX_COUNT_MSK; + priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt); + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); +} diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h new file mode 100644 index 0000000..2dc35fe --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-sta.h @@ -0,0 +1,72 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +#ifndef __iwl_sta_h__ +#define __iwl_sta_h__ + +#define HW_KEY_DYNAMIC 0 +#define HW_KEY_DEFAULT 1 + +/** + * iwl_find_station - Find station id for a given BSSID + * @bssid: MAC address of station ID to find + */ +u8 iwl_find_station(struct iwl_priv *priv, const u8 *bssid); + +int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty); +int iwl_remove_default_wep_key(struct iwl_priv *priv, + struct ieee80211_key_conf *key); +int iwl_set_default_wep_key(struct iwl_priv *priv, + struct ieee80211_key_conf *key); +int iwl_set_dynamic_key(struct iwl_priv *priv, + struct ieee80211_key_conf *key, u8 sta_id); +int iwl_remove_dynamic_key(struct iwl_priv *priv, + struct ieee80211_key_conf *key, u8 sta_id); +void iwl_update_tkip_key(struct iwl_priv *priv, + struct ieee80211_key_conf *keyconf, + const u8 *addr, u32 iv32, u16 *phase1key); + +int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap); +void iwl_add_bcast_station(struct iwl_priv *priv); +void iwl3945_add_bcast_station(struct iwl_priv *priv); +int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, bool is_ap); +void iwl_clear_stations_table(struct iwl_priv *priv); +int iwl_get_free_ucode_key_index(struct iwl_priv *priv); +int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr); +int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr); +int iwl_send_add_sta(struct iwl_priv *priv, + struct iwl_addsta_cmd *sta, u8 flags); +u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap, u8 flags, + struct ieee80211_sta_ht_cap *ht_info); +void iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid); +int iwl_sta_rx_agg_start(struct iwl_priv *priv, + const u8 *addr, int tid, u16 ssn); +int iwl_sta_rx_agg_stop(struct iwl_priv *priv, const u8 *addr, int tid); +void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id); +void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt); +#endif /* __iwl_sta_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c new file mode 100644 index 0000000..1ed5206 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c @@ -0,0 +1,1626 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include +#include +#include +#include "iwl-eeprom.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-sta.h" +#include "iwl-io.h" +#include "iwl-helpers.h" + +static const u16 default_tid_to_tx_fifo[] = { + IWL_TX_FIFO_AC1, + IWL_TX_FIFO_AC0, + IWL_TX_FIFO_AC0, + IWL_TX_FIFO_AC1, + IWL_TX_FIFO_AC2, + IWL_TX_FIFO_AC2, + IWL_TX_FIFO_AC3, + IWL_TX_FIFO_AC3, + IWL_TX_FIFO_NONE, + IWL_TX_FIFO_NONE, + IWL_TX_FIFO_NONE, + IWL_TX_FIFO_NONE, + IWL_TX_FIFO_NONE, + IWL_TX_FIFO_NONE, + IWL_TX_FIFO_NONE, + IWL_TX_FIFO_NONE, + IWL_TX_FIFO_AC3 +}; + +static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv, + struct iwl_dma_ptr *ptr, size_t size) +{ + ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma, + GFP_KERNEL); + if (!ptr->addr) + return -ENOMEM; + ptr->size = size; + return 0; +} + +static inline void iwl_free_dma_ptr(struct iwl_priv *priv, + struct iwl_dma_ptr *ptr) +{ + if (unlikely(!ptr->addr)) + return; + + dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma); + memset(ptr, 0, sizeof(*ptr)); +} + +/** + * iwl_txq_update_write_ptr - Send new write index to hardware + */ +void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq) +{ + u32 reg = 0; + int txq_id = txq->q.id; + + if (txq->need_update == 0) + return; + + /* if we're trying to save power */ + if (test_bit(STATUS_POWER_PMI, &priv->status)) { + /* wake up nic if it's powered down ... + * uCode will wake up, and interrupt us again, so next + * time we'll skip this part. */ + reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); + + if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { + IWL_DEBUG_INFO(priv, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", + txq_id, reg); + iwl_set_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + return; + } + + iwl_write_direct32(priv, HBUS_TARG_WRPTR, + txq->q.write_ptr | (txq_id << 8)); + + /* else not in power-save mode, uCode will never sleep when we're + * trying to tx (during RFKILL, we're not trying to tx). */ + } else + iwl_write32(priv, HBUS_TARG_WRPTR, + txq->q.write_ptr | (txq_id << 8)); + + txq->need_update = 0; +} +EXPORT_SYMBOL(iwl_txq_update_write_ptr); + + +void iwl_free_tfds_in_queue(struct iwl_priv *priv, + int sta_id, int tid, int freed) +{ + if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed) + priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; + else { + IWL_ERR(priv, "free more than tfds_in_queue (%u:%d)\n", + priv->stations[sta_id].tid[tid].tfds_in_queue, + freed); + priv->stations[sta_id].tid[tid].tfds_in_queue = 0; + } +} +EXPORT_SYMBOL(iwl_free_tfds_in_queue); + +/** + * iwl_tx_queue_free - Deallocate DMA queue. + * @txq: Transmit queue to deallocate. + * + * Empty queue by removing and destroying all BD's. + * Free all buffers. + * 0-fill, but do not free "txq" descriptor structure. + */ +void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id) +{ + struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct iwl_queue *q = &txq->q; + struct device *dev = &priv->pci_dev->dev; + int i; + + if (q->n_bd == 0) + return; + + /* first, empty all BD's */ + for (; q->write_ptr != q->read_ptr; + q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) + priv->cfg->ops->lib->txq_free_tfd(priv, txq); + + /* De-alloc array of command/tx buffers */ + for (i = 0; i < TFD_TX_CMD_SLOTS; i++) + kfree(txq->cmd[i]); + + /* De-alloc circular buffer of TFDs */ + if (txq->q.n_bd) + dma_free_coherent(dev, priv->hw_params.tfd_size * + txq->q.n_bd, txq->tfds, txq->q.dma_addr); + + /* De-alloc array of per-TFD driver data */ + kfree(txq->txb); + txq->txb = NULL; + + /* deallocate arrays */ + kfree(txq->cmd); + kfree(txq->meta); + txq->cmd = NULL; + txq->meta = NULL; + + /* 0-fill queue descriptor structure */ + memset(txq, 0, sizeof(*txq)); +} +EXPORT_SYMBOL(iwl_tx_queue_free); + +/** + * iwl_cmd_queue_free - Deallocate DMA queue. + * @txq: Transmit queue to deallocate. + * + * Empty queue by removing and destroying all BD's. + * Free all buffers. + * 0-fill, but do not free "txq" descriptor structure. + */ +void iwl_cmd_queue_free(struct iwl_priv *priv) +{ + struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; + struct iwl_queue *q = &txq->q; + struct device *dev = &priv->pci_dev->dev; + int i; + + if (q->n_bd == 0) + return; + + /* De-alloc array of command/tx buffers */ + for (i = 0; i <= TFD_CMD_SLOTS; i++) + kfree(txq->cmd[i]); + + /* De-alloc circular buffer of TFDs */ + if (txq->q.n_bd) + dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd, + txq->tfds, txq->q.dma_addr); + + /* deallocate arrays */ + kfree(txq->cmd); + kfree(txq->meta); + txq->cmd = NULL; + txq->meta = NULL; + + /* 0-fill queue descriptor structure */ + memset(txq, 0, sizeof(*txq)); +} +EXPORT_SYMBOL(iwl_cmd_queue_free); + +/*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** + * DMA services + * + * Theory of operation + * + * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer + * of buffer descriptors, each of which points to one or more data buffers for + * the device to read from or fill. Driver and device exchange status of each + * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty + * entries in each circular buffer, to protect against confusing empty and full + * queue states. + * + * The device reads or writes the data in the queues via the device's several + * DMA/FIFO channels. Each queue is mapped to a single DMA channel. + * + * For Tx queue, there are low mark and high mark limits. If, after queuing + * the packet for Tx, free space become < low mark, Tx queue stopped. When + * reclaiming packets (on 'tx done IRQ), if free space become > high mark, + * Tx queue resumed. + * + * See more detailed info in iwl-4965-hw.h. + ***************************************************/ + +int iwl_queue_space(const struct iwl_queue *q) +{ + int s = q->read_ptr - q->write_ptr; + + if (q->read_ptr > q->write_ptr) + s -= q->n_bd; + + if (s <= 0) + s += q->n_window; + /* keep some reserve to not confuse empty and full situations */ + s -= 2; + if (s < 0) + s = 0; + return s; +} +EXPORT_SYMBOL(iwl_queue_space); + + +/** + * iwl_queue_init - Initialize queue's high/low-water and read/write indexes + */ +static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q, + int count, int slots_num, u32 id) +{ + q->n_bd = count; + q->n_window = slots_num; + q->id = id; + + /* count must be power-of-two size, otherwise iwl_queue_inc_wrap + * and iwl_queue_dec_wrap are broken. */ + BUG_ON(!is_power_of_2(count)); + + /* slots_num must be power-of-two size, otherwise + * get_cmd_index is broken. */ + BUG_ON(!is_power_of_2(slots_num)); + + q->low_mark = q->n_window / 4; + if (q->low_mark < 4) + q->low_mark = 4; + + q->high_mark = q->n_window / 8; + if (q->high_mark < 2) + q->high_mark = 2; + + q->write_ptr = q->read_ptr = 0; + + return 0; +} + +/** + * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue + */ +static int iwl_tx_queue_alloc(struct iwl_priv *priv, + struct iwl_tx_queue *txq, u32 id) +{ + struct device *dev = &priv->pci_dev->dev; + size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX; + + /* Driver private data, only for Tx (not command) queues, + * not shared with device. */ + if (id != IWL_CMD_QUEUE_NUM) { + txq->txb = kmalloc(sizeof(txq->txb[0]) * + TFD_QUEUE_SIZE_MAX, GFP_KERNEL); + if (!txq->txb) { + IWL_ERR(priv, "kmalloc for auxiliary BD " + "structures failed\n"); + goto error; + } + } else { + txq->txb = NULL; + } + + /* Circular buffer of transmit frame descriptors (TFDs), + * shared with device */ + txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, + GFP_KERNEL); + if (!txq->tfds) { + IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz); + goto error; + } + txq->q.id = id; + + return 0; + + error: + kfree(txq->txb); + txq->txb = NULL; + + return -ENOMEM; +} + +/** + * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue + */ +int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, + int slots_num, u32 txq_id) +{ + int i, len; + int ret; + int actual_slots = slots_num; + + /* + * Alloc buffer array for commands (Tx or other types of commands). + * For the command queue (#4), allocate command space + one big + * command for scan, since scan command is very huge; the system will + * not have two scans at the same time, so only one is needed. + * For normal Tx queues (all other queues), no super-size command + * space is needed. + */ + if (txq_id == IWL_CMD_QUEUE_NUM) + actual_slots++; + + txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots, + GFP_KERNEL); + txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots, + GFP_KERNEL); + + if (!txq->meta || !txq->cmd) + goto out_free_arrays; + + len = sizeof(struct iwl_device_cmd); + for (i = 0; i < actual_slots; i++) { + /* only happens for cmd queue */ + if (i == slots_num) + len = IWL_MAX_CMD_SIZE; + + txq->cmd[i] = kmalloc(len, GFP_KERNEL); + if (!txq->cmd[i]) + goto err; + } + + /* Alloc driver data array and TFD circular buffer */ + ret = iwl_tx_queue_alloc(priv, txq, txq_id); + if (ret) + goto err; + + txq->need_update = 0; + + /* + * Aggregation TX queues will get their ID when aggregation begins; + * they overwrite the setting done here. The command FIFO doesn't + * need an swq_id so don't set one to catch errors, all others can + * be set up to the identity mapping. + */ + if (txq_id != IWL_CMD_QUEUE_NUM) + txq->swq_id = txq_id; + + /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise + * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ + BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); + + /* Initialize queue's high/low-water marks, and head/tail indexes */ + iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); + + /* Tell device where to find queue */ + priv->cfg->ops->lib->txq_init(priv, txq); + + return 0; +err: + for (i = 0; i < actual_slots; i++) + kfree(txq->cmd[i]); +out_free_arrays: + kfree(txq->meta); + kfree(txq->cmd); + + return -ENOMEM; +} +EXPORT_SYMBOL(iwl_tx_queue_init); + +/** + * iwl_hw_txq_ctx_free - Free TXQ Context + * + * Destroy all TX DMA queues and structures + */ +void iwl_hw_txq_ctx_free(struct iwl_priv *priv) +{ + int txq_id; + + /* Tx queues */ + if (priv->txq) { + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; + txq_id++) + if (txq_id == IWL_CMD_QUEUE_NUM) + iwl_cmd_queue_free(priv); + else + iwl_tx_queue_free(priv, txq_id); + } + iwl_free_dma_ptr(priv, &priv->kw); + + iwl_free_dma_ptr(priv, &priv->scd_bc_tbls); + + /* free tx queue structure */ + iwl_free_txq_mem(priv); +} +EXPORT_SYMBOL(iwl_hw_txq_ctx_free); + +/** + * iwl_txq_ctx_reset - Reset TX queue context + * Destroys all DMA structures and initialize them again + * + * @param priv + * @return error code + */ +int iwl_txq_ctx_reset(struct iwl_priv *priv) +{ + int ret = 0; + int txq_id, slots_num; + unsigned long flags; + + /* Free all tx/cmd queues and keep-warm buffer */ + iwl_hw_txq_ctx_free(priv); + + ret = iwl_alloc_dma_ptr(priv, &priv->scd_bc_tbls, + priv->hw_params.scd_bc_tbls_size); + if (ret) { + IWL_ERR(priv, "Scheduler BC Table allocation failed\n"); + goto error_bc_tbls; + } + /* Alloc keep-warm buffer */ + ret = iwl_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE); + if (ret) { + IWL_ERR(priv, "Keep Warm allocation failed\n"); + goto error_kw; + } + + /* allocate tx queue structure */ + ret = iwl_alloc_txq_mem(priv); + if (ret) + goto error; + + spin_lock_irqsave(&priv->lock, flags); + + /* Turn off all Tx DMA fifos */ + priv->cfg->ops->lib->txq_set_sched(priv, 0); + + /* Tell NIC where to find the "keep warm" buffer */ + iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); + + spin_unlock_irqrestore(&priv->lock, flags); + + /* Alloc and init all Tx queues, including the command queue (#4) */ + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { + slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ? + TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; + ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num, + txq_id); + if (ret) { + IWL_ERR(priv, "Tx %d queue init failed\n", txq_id); + goto error; + } + } + + return ret; + + error: + iwl_hw_txq_ctx_free(priv); + iwl_free_dma_ptr(priv, &priv->kw); + error_kw: + iwl_free_dma_ptr(priv, &priv->scd_bc_tbls); + error_bc_tbls: + return ret; +} + +/** + * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory + */ +void iwl_txq_ctx_stop(struct iwl_priv *priv) +{ + int ch; + unsigned long flags; + + /* Turn off all Tx DMA fifos */ + spin_lock_irqsave(&priv->lock, flags); + + priv->cfg->ops->lib->txq_set_sched(priv, 0); + + /* Stop each Tx DMA channel, and wait for it to be idle */ + for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) { + iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); + iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG, + FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), + 1000); + } + spin_unlock_irqrestore(&priv->lock, flags); + + /* Deallocate memory for all Tx queues */ + iwl_hw_txq_ctx_free(priv); +} +EXPORT_SYMBOL(iwl_txq_ctx_stop); + +/* + * handle build REPLY_TX command notification. + */ +static void iwl_tx_cmd_build_basic(struct iwl_priv *priv, + struct iwl_tx_cmd *tx_cmd, + struct ieee80211_tx_info *info, + struct ieee80211_hdr *hdr, + u8 std_id) +{ + __le16 fc = hdr->frame_control; + __le32 tx_flags = tx_cmd->tx_flags; + + tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { + tx_flags |= TX_CMD_FLG_ACK_MSK; + if (ieee80211_is_mgmt(fc)) + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + if (ieee80211_is_probe_resp(fc) && + !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) + tx_flags |= TX_CMD_FLG_TSF_MSK; + } else { + tx_flags &= (~TX_CMD_FLG_ACK_MSK); + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + } + + if (ieee80211_is_back_req(fc)) + tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; + + + tx_cmd->sta_id = std_id; + if (ieee80211_has_morefrags(fc)) + tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; + + if (ieee80211_is_data_qos(fc)) { + u8 *qc = ieee80211_get_qos_ctl(hdr); + tx_cmd->tid_tspec = qc[0] & 0xf; + tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; + } else { + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + } + + priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags); + + if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK)) + tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; + + tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); + if (ieee80211_is_mgmt(fc)) { + if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) + tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); + else + tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); + } else { + tx_cmd->timeout.pm_frame_timeout = 0; + } + + tx_cmd->driver_txop = 0; + tx_cmd->tx_flags = tx_flags; + tx_cmd->next_frame_len = 0; +} + +#define RTS_HCCA_RETRY_LIMIT 3 +#define RTS_DFAULT_RETRY_LIMIT 60 + +static void iwl_tx_cmd_build_rate(struct iwl_priv *priv, + struct iwl_tx_cmd *tx_cmd, + struct ieee80211_tx_info *info, + __le16 fc, int is_hcca) +{ + u32 rate_flags; + int rate_idx; + u8 rts_retry_limit; + u8 data_retry_limit; + u8 rate_plcp; + + /* Set retry limit on DATA packets and Probe Responses*/ + if (ieee80211_is_probe_resp(fc)) + data_retry_limit = 3; + else + data_retry_limit = IWL_DEFAULT_TX_RETRY; + tx_cmd->data_retry_limit = data_retry_limit; + + /* Set retry limit on RTS packets */ + rts_retry_limit = (is_hcca) ? RTS_HCCA_RETRY_LIMIT : + RTS_DFAULT_RETRY_LIMIT; + if (data_retry_limit < rts_retry_limit) + rts_retry_limit = data_retry_limit; + tx_cmd->rts_retry_limit = rts_retry_limit; + + /* DATA packets will use the uCode station table for rate/antenna + * selection */ + if (ieee80211_is_data(fc)) { + tx_cmd->initial_rate_index = 0; + tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; + return; + } + + /** + * If the current TX rate stored in mac80211 has the MCS bit set, it's + * not really a TX rate. Thus, we use the lowest supported rate for + * this band. Also use the lowest supported rate if the stored rate + * index is invalid. + */ + rate_idx = info->control.rates[0].idx; + if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS || + (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY)) + rate_idx = rate_lowest_index(&priv->bands[info->band], + info->control.sta); + /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ + if (info->band == IEEE80211_BAND_5GHZ) + rate_idx += IWL_FIRST_OFDM_RATE; + /* Get PLCP rate for tx_cmd->rate_n_flags */ + rate_plcp = iwl_rates[rate_idx].plcp; + /* Zero out flags for this packet */ + rate_flags = 0; + + /* Set CCK flag as needed */ + if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) + rate_flags |= RATE_MCS_CCK_MSK; + + /* Set up RTS and CTS flags for certain packets */ + switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { + case cpu_to_le16(IEEE80211_STYPE_AUTH): + case cpu_to_le16(IEEE80211_STYPE_DEAUTH): + case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): + case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): + if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) { + tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK; + tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK; + } + break; + default: + break; + } + + /* Set up antennas */ + priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant); + rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant); + + /* Set the rate in the TX cmd */ + tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags); +} + +static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv, + struct ieee80211_tx_info *info, + struct iwl_tx_cmd *tx_cmd, + struct sk_buff *skb_frag, + int sta_id) +{ + struct ieee80211_key_conf *keyconf = info->control.hw_key; + + switch (keyconf->alg) { + case ALG_CCMP: + tx_cmd->sec_ctl = TX_CMD_SEC_CCM; + memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); + if (info->flags & IEEE80211_TX_CTL_AMPDU) + tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK; + IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n"); + break; + + case ALG_TKIP: + tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; + ieee80211_get_tkip_key(keyconf, skb_frag, + IEEE80211_TKIP_P2_KEY, tx_cmd->key); + IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n"); + break; + + case ALG_WEP: + tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP | + (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT); + + if (keyconf->keylen == WEP_KEY_LEN_128) + tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; + + memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); + + IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption " + "with key %d\n", keyconf->keyidx); + break; + + default: + IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg); + break; + } +} + +/* + * start REPLY_TX command process + */ +int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_sta *sta = info->control.sta; + struct iwl_station_priv *sta_priv = NULL; + struct iwl_tx_queue *txq; + struct iwl_queue *q; + struct iwl_device_cmd *out_cmd; + struct iwl_cmd_meta *out_meta; + struct iwl_tx_cmd *tx_cmd; + int swq_id, txq_id; + dma_addr_t phys_addr; + dma_addr_t txcmd_phys; + dma_addr_t scratch_phys; + u16 len, len_org, firstlen, secondlen; + u16 seq_number = 0; + __le16 fc; + u8 hdr_len; + u8 sta_id; + u8 wait_write_ptr = 0; + u8 tid = 0; + u8 *qc = NULL; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + if (iwl_is_rfkill(priv)) { + IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); + goto drop_unlock; + } + + fc = hdr->frame_control; + +#ifdef CONFIG_IWLWIFI_DEBUG + if (ieee80211_is_auth(fc)) + IWL_DEBUG_TX(priv, "Sending AUTH frame\n"); + else if (ieee80211_is_assoc_req(fc)) + IWL_DEBUG_TX(priv, "Sending ASSOC frame\n"); + else if (ieee80211_is_reassoc_req(fc)) + IWL_DEBUG_TX(priv, "Sending REASSOC frame\n"); +#endif + + /* drop all non-injected data frame if we are not associated */ + if (ieee80211_is_data(fc) && + !(info->flags & IEEE80211_TX_CTL_INJECTED) && + (!iwl_is_associated(priv) || + ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id) || + !priv->assoc_station_added)) { + IWL_DEBUG_DROP(priv, "Dropping - !iwl_is_associated\n"); + goto drop_unlock; + } + + hdr_len = ieee80211_hdrlen(fc); + + /* Find (or create) index into station table for destination station */ + if (info->flags & IEEE80211_TX_CTL_INJECTED) + sta_id = priv->hw_params.bcast_sta_id; + else + sta_id = iwl_get_sta_id(priv, hdr); + if (sta_id == IWL_INVALID_STATION) { + IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", + hdr->addr1); + goto drop_unlock; + } + + IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); + + if (sta) + sta_priv = (void *)sta->drv_priv; + + if (sta_priv && sta_id != priv->hw_params.bcast_sta_id && + sta_priv->asleep) { + WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)); + /* + * This sends an asynchronous command to the device, + * but we can rely on it being processed before the + * next frame is processed -- and the next frame to + * this station is the one that will consume this + * counter. + * For now set the counter to just 1 since we do not + * support uAPSD yet. + */ + iwl_sta_modify_sleep_tx_count(priv, sta_id, 1); + } + + txq_id = skb_get_queue_mapping(skb); + if (ieee80211_is_data_qos(fc)) { + qc = ieee80211_get_qos_ctl(hdr); + tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; + if (unlikely(tid >= MAX_TID_COUNT)) + goto drop_unlock; + seq_number = priv->stations[sta_id].tid[tid].seq_number; + seq_number &= IEEE80211_SCTL_SEQ; + hdr->seq_ctrl = hdr->seq_ctrl & + cpu_to_le16(IEEE80211_SCTL_FRAG); + hdr->seq_ctrl |= cpu_to_le16(seq_number); + seq_number += 0x10; + /* aggregation is on for this */ + if (info->flags & IEEE80211_TX_CTL_AMPDU && + priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) { + txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; + } + } + + txq = &priv->txq[txq_id]; + swq_id = txq->swq_id; + q = &txq->q; + + if (unlikely(iwl_queue_space(q) < q->high_mark)) + goto drop_unlock; + + if (ieee80211_is_data_qos(fc)) + priv->stations[sta_id].tid[tid].tfds_in_queue++; + + /* Set up driver data for this TFD */ + memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); + txq->txb[q->write_ptr].skb[0] = skb; + + /* Set up first empty entry in queue's array of Tx/cmd buffers */ + out_cmd = txq->cmd[q->write_ptr]; + out_meta = &txq->meta[q->write_ptr]; + tx_cmd = &out_cmd->cmd.tx; + memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); + memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd)); + + /* + * Set up the Tx-command (not MAC!) header. + * Store the chosen Tx queue and TFD index within the sequence field; + * after Tx, uCode's Tx response will return this value so driver can + * locate the frame within the tx queue and do post-tx processing. + */ + out_cmd->hdr.cmd = REPLY_TX; + out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | + INDEX_TO_SEQ(q->write_ptr))); + + /* Copy MAC header from skb into command buffer */ + memcpy(tx_cmd->hdr, hdr, hdr_len); + + + /* Total # bytes to be transmitted */ + len = (u16)skb->len; + tx_cmd->len = cpu_to_le16(len); + + if (info->control.hw_key) + iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id); + + /* TODO need this for burst mode later on */ + iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id); + iwl_dbg_log_tx_data_frame(priv, len, hdr); + + /* set is_hcca to 0; it probably will never be implemented */ + iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, 0); + + iwl_update_stats(priv, true, fc, len); + /* + * Use the first empty entry in this queue's command buffer array + * to contain the Tx command and MAC header concatenated together + * (payload data will be in another buffer). + * Size of this varies, due to varying MAC header length. + * If end is not dword aligned, we'll have 2 extra bytes at the end + * of the MAC header (device reads on dword boundaries). + * We'll tell device about this padding later. + */ + len = sizeof(struct iwl_tx_cmd) + + sizeof(struct iwl_cmd_header) + hdr_len; + + len_org = len; + firstlen = len = (len + 3) & ~3; + + if (len_org != len) + len_org = 1; + else + len_org = 0; + + /* Tell NIC about any 2-byte padding after MAC header */ + if (len_org) + tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; + + /* Physical address of this Tx command's header (not MAC header!), + * within command buffer array. */ + txcmd_phys = pci_map_single(priv->pci_dev, + &out_cmd->hdr, len, + PCI_DMA_BIDIRECTIONAL); + pci_unmap_addr_set(out_meta, mapping, txcmd_phys); + pci_unmap_len_set(out_meta, len, len); + /* Add buffer containing Tx command and MAC(!) header to TFD's + * first entry */ + priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, + txcmd_phys, len, 1, 0); + + if (!ieee80211_has_morefrags(hdr->frame_control)) { + txq->need_update = 1; + if (qc) + priv->stations[sta_id].tid[tid].seq_number = seq_number; + } else { + wait_write_ptr = 1; + txq->need_update = 0; + } + + /* Set up TFD's 2nd entry to point directly to remainder of skb, + * if any (802.11 null frames have no payload). */ + secondlen = len = skb->len - hdr_len; + if (len) { + phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, + len, PCI_DMA_TODEVICE); + priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, + phys_addr, len, + 0, 0); + } + + scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + + offsetof(struct iwl_tx_cmd, scratch); + + len = sizeof(struct iwl_tx_cmd) + + sizeof(struct iwl_cmd_header) + hdr_len; + /* take back ownership of DMA buffer to enable update */ + pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys, + len, PCI_DMA_BIDIRECTIONAL); + tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); + tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); + + IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n", + le16_to_cpu(out_cmd->hdr.sequence)); + IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags)); + iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd)); + iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); + + /* Set up entry for this TFD in Tx byte-count array */ + if (info->flags & IEEE80211_TX_CTL_AMPDU) + priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, + le16_to_cpu(tx_cmd->len)); + + pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys, + len, PCI_DMA_BIDIRECTIONAL); + + trace_iwlwifi_dev_tx(priv, + &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr], + sizeof(struct iwl_tfd), + &out_cmd->hdr, firstlen, + skb->data + hdr_len, secondlen); + + /* Tell device the write index *just past* this latest filled TFD */ + q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); + iwl_txq_update_write_ptr(priv, txq); + spin_unlock_irqrestore(&priv->lock, flags); + + /* + * At this point the frame is "transmitted" successfully + * and we will get a TX status notification eventually, + * regardless of the value of ret. "ret" only indicates + * whether or not we should update the write pointer. + */ + + /* avoid atomic ops if it isn't an associated client */ + if (sta_priv && sta_priv->client) + atomic_inc(&sta_priv->pending_frames); + + if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) { + if (wait_write_ptr) { + spin_lock_irqsave(&priv->lock, flags); + txq->need_update = 1; + iwl_txq_update_write_ptr(priv, txq); + spin_unlock_irqrestore(&priv->lock, flags); + } else { + iwl_stop_queue(priv, txq->swq_id); + } + } + + return 0; + +drop_unlock: + spin_unlock_irqrestore(&priv->lock, flags); + return -1; +} +EXPORT_SYMBOL(iwl_tx_skb); + +/*************** HOST COMMAND QUEUE FUNCTIONS *****/ + +/** + * iwl_enqueue_hcmd - enqueue a uCode command + * @priv: device private data point + * @cmd: a point to the ucode command structure + * + * The function returns < 0 values to indicate the operation is + * failed. On success, it turns the index (> 0) of command in the + * command queue. + */ +int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +{ + struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; + struct iwl_queue *q = &txq->q; + struct iwl_device_cmd *out_cmd; + struct iwl_cmd_meta *out_meta; + dma_addr_t phys_addr; + unsigned long flags; + int len; + u32 idx; + u16 fix_size; + + cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len); + fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr)); + + /* If any of the command structures end up being larger than + * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then + * we will need to increase the size of the TFD entries + * Also, check to see if command buffer should not exceed the size + * of device_cmd and max_cmd_size. */ + BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && + !(cmd->flags & CMD_SIZE_HUGE)); + BUG_ON(fix_size > IWL_MAX_CMD_SIZE); + + if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) { + IWL_WARN(priv, "Not sending command - %s KILL\n", + iwl_is_rfkill(priv) ? "RF" : "CT"); + return -EIO; + } + + if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { + IWL_ERR(priv, "No space in command queue\n"); + if (iwl_within_ct_kill_margin(priv)) + iwl_tt_enter_ct_kill(priv); + else { + IWL_ERR(priv, "Restarting adapter due to queue full\n"); + queue_work(priv->workqueue, &priv->restart); + } + return -ENOSPC; + } + + spin_lock_irqsave(&priv->hcmd_lock, flags); + + idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); + out_cmd = txq->cmd[idx]; + out_meta = &txq->meta[idx]; + + memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ + out_meta->flags = cmd->flags; + if (cmd->flags & CMD_WANT_SKB) + out_meta->source = cmd; + if (cmd->flags & CMD_ASYNC) + out_meta->callback = cmd->callback; + + out_cmd->hdr.cmd = cmd->id; + memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len); + + /* At this point, the out_cmd now has all of the incoming cmd + * information */ + + out_cmd->hdr.flags = 0; + out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) | + INDEX_TO_SEQ(q->write_ptr)); + if (cmd->flags & CMD_SIZE_HUGE) + out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; + len = sizeof(struct iwl_device_cmd); + if (idx == TFD_CMD_SLOTS) + len = IWL_MAX_CMD_SIZE; + +#ifdef CONFIG_IWLWIFI_DEBUG + switch (out_cmd->hdr.cmd) { + case REPLY_TX_LINK_QUALITY_CMD: + case SENSITIVITY_CMD: + IWL_DEBUG_HC_DUMP(priv, "Sending command %s (#%x), seq: 0x%04X, " + "%d bytes at %d[%d]:%d\n", + get_cmd_string(out_cmd->hdr.cmd), + out_cmd->hdr.cmd, + le16_to_cpu(out_cmd->hdr.sequence), fix_size, + q->write_ptr, idx, IWL_CMD_QUEUE_NUM); + break; + default: + IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, " + "%d bytes at %d[%d]:%d\n", + get_cmd_string(out_cmd->hdr.cmd), + out_cmd->hdr.cmd, + le16_to_cpu(out_cmd->hdr.sequence), fix_size, + q->write_ptr, idx, IWL_CMD_QUEUE_NUM); + } +#endif + txq->need_update = 1; + + if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl) + /* Set up entry in queue's byte count circular buffer */ + priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0); + + phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr, + fix_size, PCI_DMA_BIDIRECTIONAL); + pci_unmap_addr_set(out_meta, mapping, phys_addr); + pci_unmap_len_set(out_meta, len, fix_size); + + trace_iwlwifi_dev_hcmd(priv, &out_cmd->hdr, fix_size, cmd->flags); + + priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, + phys_addr, fix_size, 1, + U32_PAD(cmd->len)); + + /* Increment and update queue's write index */ + q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); + iwl_txq_update_write_ptr(priv, txq); + + spin_unlock_irqrestore(&priv->hcmd_lock, flags); + return idx; +} + +static void iwl_tx_status(struct iwl_priv *priv, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct ieee80211_sta *sta; + struct iwl_station_priv *sta_priv; + + sta = ieee80211_find_sta(priv->vif, hdr->addr1); + if (sta) { + sta_priv = (void *)sta->drv_priv; + /* avoid atomic ops if this isn't a client */ + if (sta_priv->client && + atomic_dec_return(&sta_priv->pending_frames) == 0) + ieee80211_sta_block_awake(priv->hw, sta, false); + } + + ieee80211_tx_status_irqsafe(priv->hw, skb); +} + +int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) +{ + struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct iwl_queue *q = &txq->q; + struct iwl_tx_info *tx_info; + int nfreed = 0; + struct ieee80211_hdr *hdr; + + if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { + IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " + "is out of range [0-%d] %d %d.\n", txq_id, + index, q->n_bd, q->write_ptr, q->read_ptr); + return 0; + } + + for (index = iwl_queue_inc_wrap(index, q->n_bd); + q->read_ptr != index; + q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { + + tx_info = &txq->txb[txq->q.read_ptr]; + iwl_tx_status(priv, tx_info->skb[0]); + + hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data; + if (hdr && ieee80211_is_data_qos(hdr->frame_control)) + nfreed++; + tx_info->skb[0] = NULL; + + if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) + priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq); + + priv->cfg->ops->lib->txq_free_tfd(priv, txq); + } + return nfreed; +} +EXPORT_SYMBOL(iwl_tx_queue_reclaim); + + +/** + * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd + * + * When FW advances 'R' index, all entries between old and new 'R' index + * need to be reclaimed. As result, some free space forms. If there is + * enough free space (> low mark), wake the stack that feeds us. + */ +static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, + int idx, int cmd_idx) +{ + struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct iwl_queue *q = &txq->q; + int nfreed = 0; + + if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) { + IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " + "is out of range [0-%d] %d %d.\n", txq_id, + idx, q->n_bd, q->write_ptr, q->read_ptr); + return; + } + + for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; + q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { + + if (nfreed++ > 0) { + IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx, + q->write_ptr, q->read_ptr); + queue_work(priv->workqueue, &priv->restart); + } + + } +} + +/** + * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them + * @rxb: Rx buffer to reclaim + * + * If an Rx buffer has an async callback associated with it the callback + * will be executed. The attached skb (if present) will only be freed + * if the callback returns 1 + */ +void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + u16 sequence = le16_to_cpu(pkt->hdr.sequence); + int txq_id = SEQ_TO_QUEUE(sequence); + int index = SEQ_TO_INDEX(sequence); + int cmd_index; + bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); + struct iwl_device_cmd *cmd; + struct iwl_cmd_meta *meta; + + /* If a Tx command is being handled and it isn't in the actual + * command queue then there a command routing bug has been introduced + * in the queue management code. */ + if (WARN(txq_id != IWL_CMD_QUEUE_NUM, + "wrong command queue %d, sequence 0x%X readp=%d writep=%d\n", + txq_id, sequence, + priv->txq[IWL_CMD_QUEUE_NUM].q.read_ptr, + priv->txq[IWL_CMD_QUEUE_NUM].q.write_ptr)) { + iwl_print_hex_error(priv, pkt, 32); + return; + } + + cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge); + cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index]; + meta = &priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_index]; + + pci_unmap_single(priv->pci_dev, + pci_unmap_addr(meta, mapping), + pci_unmap_len(meta, len), + PCI_DMA_BIDIRECTIONAL); + + /* Input error checking is done when commands are added to queue. */ + if (meta->flags & CMD_WANT_SKB) { + meta->source->reply_page = (unsigned long)rxb_addr(rxb); + rxb->page = NULL; + } else if (meta->callback) + meta->callback(priv, cmd, pkt); + + iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index); + + if (!(meta->flags & CMD_ASYNC)) { + clear_bit(STATUS_HCMD_ACTIVE, &priv->status); + IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s \n", + get_cmd_string(cmd->hdr.cmd)); + wake_up_interruptible(&priv->wait_command_queue); + } +} +EXPORT_SYMBOL(iwl_tx_cmd_complete); + +/* + * Find first available (lowest unused) Tx Queue, mark it "active". + * Called only when finding queue for aggregation. + * Should never return anything < 7, because they should already + * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6). + */ +static int iwl_txq_ctx_activate_free(struct iwl_priv *priv) +{ + int txq_id; + + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) + if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk)) + return txq_id; + return -1; +} + +int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn) +{ + int sta_id; + int tx_fifo; + int txq_id; + int ret; + unsigned long flags; + struct iwl_tid_data *tid_data; + + if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo))) + tx_fifo = default_tid_to_tx_fifo[tid]; + else + return -EINVAL; + + IWL_WARN(priv, "%s on ra = %pM tid = %d\n", + __func__, ra, tid); + + sta_id = iwl_find_station(priv, ra); + if (sta_id == IWL_INVALID_STATION) { + IWL_ERR(priv, "Start AGG on invalid station\n"); + return -ENXIO; + } + if (unlikely(tid >= MAX_TID_COUNT)) + return -EINVAL; + + if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) { + IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n"); + return -ENXIO; + } + + txq_id = iwl_txq_ctx_activate_free(priv); + if (txq_id == -1) { + IWL_ERR(priv, "No free aggregation queue available\n"); + return -ENXIO; + } + + spin_lock_irqsave(&priv->sta_lock, flags); + tid_data = &priv->stations[sta_id].tid[tid]; + *ssn = SEQ_TO_SN(tid_data->seq_number); + tid_data->agg.txq_id = txq_id; + priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(tx_fifo, txq_id); + spin_unlock_irqrestore(&priv->sta_lock, flags); + + ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo, + sta_id, tid, *ssn); + if (ret) + return ret; + + if (tid_data->tfds_in_queue == 0) { + IWL_DEBUG_HT(priv, "HW queue is empty\n"); + tid_data->agg.state = IWL_AGG_ON; + ieee80211_start_tx_ba_cb_irqsafe(priv->vif, ra, tid); + } else { + IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n", + tid_data->tfds_in_queue); + tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA; + } + return ret; +} +EXPORT_SYMBOL(iwl_tx_agg_start); + +int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid) +{ + int tx_fifo_id, txq_id, sta_id, ssn = -1; + struct iwl_tid_data *tid_data; + int write_ptr, read_ptr; + unsigned long flags; + + if (!ra) { + IWL_ERR(priv, "ra = NULL\n"); + return -EINVAL; + } + + if (unlikely(tid >= MAX_TID_COUNT)) + return -EINVAL; + + if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo))) + tx_fifo_id = default_tid_to_tx_fifo[tid]; + else + return -EINVAL; + + sta_id = iwl_find_station(priv, ra); + + if (sta_id == IWL_INVALID_STATION) { + IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid); + return -ENXIO; + } + + if (priv->stations[sta_id].tid[tid].agg.state == + IWL_EMPTYING_HW_QUEUE_ADDBA) { + IWL_DEBUG_HT(priv, "AGG stop before setup done\n"); + ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid); + priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; + return 0; + } + + if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON) + IWL_WARN(priv, "Stopping AGG while state not ON or starting\n"); + + tid_data = &priv->stations[sta_id].tid[tid]; + ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; + txq_id = tid_data->agg.txq_id; + write_ptr = priv->txq[txq_id].q.write_ptr; + read_ptr = priv->txq[txq_id].q.read_ptr; + + /* The queue is not empty */ + if (write_ptr != read_ptr) { + IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n"); + priv->stations[sta_id].tid[tid].agg.state = + IWL_EMPTYING_HW_QUEUE_DELBA; + return 0; + } + + IWL_DEBUG_HT(priv, "HW queue is empty\n"); + priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; + + spin_lock_irqsave(&priv->lock, flags); + /* + * the only reason this call can fail is queue number out of range, + * which can happen if uCode is reloaded and all the station + * information are lost. if it is outside the range, there is no need + * to deactivate the uCode queue, just return "success" to allow + * mac80211 to clean up it own data. + */ + priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn, + tx_fifo_id); + spin_unlock_irqrestore(&priv->lock, flags); + + ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid); + + return 0; +} +EXPORT_SYMBOL(iwl_tx_agg_stop); + +int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id) +{ + struct iwl_queue *q = &priv->txq[txq_id].q; + u8 *addr = priv->stations[sta_id].sta.sta.addr; + struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid]; + + switch (priv->stations[sta_id].tid[tid].agg.state) { + case IWL_EMPTYING_HW_QUEUE_DELBA: + /* We are reclaiming the last packet of the */ + /* aggregated HW queue */ + if ((txq_id == tid_data->agg.txq_id) && + (q->read_ptr == q->write_ptr)) { + u16 ssn = SEQ_TO_SN(tid_data->seq_number); + int tx_fifo = default_tid_to_tx_fifo[tid]; + IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n"); + priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, + ssn, tx_fifo); + tid_data->agg.state = IWL_AGG_OFF; + ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, addr, tid); + } + break; + case IWL_EMPTYING_HW_QUEUE_ADDBA: + /* We are reclaiming the last packet of the queue */ + if (tid_data->tfds_in_queue == 0) { + IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n"); + tid_data->agg.state = IWL_AGG_ON; + ieee80211_start_tx_ba_cb_irqsafe(priv->vif, addr, tid); + } + break; + } + return 0; +} +EXPORT_SYMBOL(iwl_txq_check_empty); + +/** + * iwl_tx_status_reply_compressed_ba - Update tx status from block-ack + * + * Go through block-ack's bitmap of ACK'd frames, update driver's record of + * ACK vs. not. This gets sent to mac80211, then to rate scaling algo. + */ +static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv, + struct iwl_ht_agg *agg, + struct iwl_compressed_ba_resp *ba_resp) + +{ + int i, sh, ack; + u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl); + u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); + u64 bitmap; + int successes = 0; + struct ieee80211_tx_info *info; + + if (unlikely(!agg->wait_for_ba)) { + IWL_ERR(priv, "Received BA when not expected\n"); + return -EINVAL; + } + + /* Mark that the expected block-ack response arrived */ + agg->wait_for_ba = 0; + IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl); + + /* Calculate shift to align block-ack bits with our Tx window bits */ + sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4); + if (sh < 0) /* tbw something is wrong with indices */ + sh += 0x100; + + /* don't use 64-bit values for now */ + bitmap = le64_to_cpu(ba_resp->bitmap) >> sh; + + if (agg->frame_count > (64 - sh)) { + IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size"); + return -1; + } + + /* check for success or failure according to the + * transmitted bitmap and block-ack bitmap */ + bitmap &= agg->bitmap; + + /* For each frame attempted in aggregation, + * update driver's record of tx frame's status. */ + for (i = 0; i < agg->frame_count ; i++) { + ack = bitmap & (1ULL << i); + successes += !!ack; + IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n", + ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff, + agg->start_idx + i); + } + + info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]); + memset(&info->status, 0, sizeof(info->status)); + info->flags |= IEEE80211_TX_STAT_ACK; + info->flags |= IEEE80211_TX_STAT_AMPDU; + info->status.ampdu_ack_map = successes; + info->status.ampdu_ack_len = agg->frame_count; + iwl_hwrate_to_tx_control(priv, agg->rate_n_flags, info); + + IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap); + + return 0; +} + +/** + * iwl_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA + * + * Handles block-acknowledge notification from device, which reports success + * of frames sent via aggregation. + */ +void iwl_rx_reply_compressed_ba(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; + struct iwl_tx_queue *txq = NULL; + struct iwl_ht_agg *agg; + int index; + int sta_id; + int tid; + + /* "flow" corresponds to Tx queue */ + u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); + + /* "ssn" is start of block-ack Tx window, corresponds to index + * (in Tx queue's circular buffer) of first TFD/frame in window */ + u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); + + if (scd_flow >= priv->hw_params.max_txq_num) { + IWL_ERR(priv, + "BUG_ON scd_flow is bigger than number of queues\n"); + return; + } + + txq = &priv->txq[scd_flow]; + sta_id = ba_resp->sta_id; + tid = ba_resp->tid; + agg = &priv->stations[sta_id].tid[tid].agg; + + /* Find index just before block-ack window */ + index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd); + + /* TODO: Need to get this copy more safely - now good for debug */ + + IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, " + "sta_id = %d\n", + agg->wait_for_ba, + (u8 *) &ba_resp->sta_addr_lo32, + ba_resp->sta_id); + IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = " + "%d, scd_ssn = %d\n", + ba_resp->tid, + ba_resp->seq_ctl, + (unsigned long long)le64_to_cpu(ba_resp->bitmap), + ba_resp->scd_flow, + ba_resp->scd_ssn); + IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx \n", + agg->start_idx, + (unsigned long long)agg->bitmap); + + /* Update driver's record of ACK vs. not for each frame in window */ + iwl_tx_status_reply_compressed_ba(priv, agg, ba_resp); + + /* Release all TFDs before the SSN, i.e. all TFDs in front of + * block-ack window (we assume that they've been successfully + * transmitted ... if not, it's too late anyway). */ + if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { + /* calculate mac80211 ampdu sw queue to wake */ + int freed = iwl_tx_queue_reclaim(priv, scd_flow, index); + iwl_free_tfds_in_queue(priv, sta_id, tid, freed); + + if ((iwl_queue_space(&txq->q) > txq->q.low_mark) && + priv->mac80211_registered && + (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) + iwl_wake_queue(priv, txq->swq_id); + + iwl_txq_check_empty(priv, sta_id, tid, scd_flow); + } +} +EXPORT_SYMBOL(iwl_rx_reply_compressed_ba); + +#ifdef CONFIG_IWLWIFI_DEBUG +#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x + +const char *iwl_get_tx_fail_reason(u32 status) +{ + switch (status & TX_STATUS_MSK) { + case TX_STATUS_SUCCESS: + return "SUCCESS"; + TX_STATUS_ENTRY(SHORT_LIMIT); + TX_STATUS_ENTRY(LONG_LIMIT); + TX_STATUS_ENTRY(FIFO_UNDERRUN); + TX_STATUS_ENTRY(MGMNT_ABORT); + TX_STATUS_ENTRY(NEXT_FRAG); + TX_STATUS_ENTRY(LIFE_EXPIRE); + TX_STATUS_ENTRY(DEST_PS); + TX_STATUS_ENTRY(ABORTED); + TX_STATUS_ENTRY(BT_RETRY); + TX_STATUS_ENTRY(STA_INVALID); + TX_STATUS_ENTRY(FRAG_DROPPED); + TX_STATUS_ENTRY(TID_DISABLE); + TX_STATUS_ENTRY(FRAME_FLUSHED); + TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL); + TX_STATUS_ENTRY(TX_LOCKED); + TX_STATUS_ENTRY(NO_BEACON_ON_RADAR); + } + + return "UNKNOWN"; +} +EXPORT_SYMBOL(iwl_get_tx_fail_reason); +#endif /* CONFIG_IWLWIFI_DEBUG */ diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c new file mode 100644 index 0000000..54daa38 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c @@ -0,0 +1,4313 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#define DRV_NAME "iwl3945" + +#include "iwl-fh.h" +#include "iwl-3945-fh.h" +#include "iwl-commands.h" +#include "iwl-sta.h" +#include "iwl-3945.h" +#include "iwl-core.h" +#include "iwl-helpers.h" +#include "iwl-dev.h" +#include "iwl-spectrum.h" + +/* + * module name, copyright, version, etc. + */ + +#define DRV_DESCRIPTION \ +"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux" + +#ifdef CONFIG_IWLWIFI_DEBUG +#define VD "d" +#else +#define VD +#endif + +/* + * add "s" to indicate spectrum measurement included. + * we add it here to be consistent with previous releases in which + * this was configurable. + */ +#define DRV_VERSION IWLWIFI_VERSION VD "s" +#define DRV_COPYRIGHT "Copyright(c) 2003-2010 Intel Corporation" +#define DRV_AUTHOR "" + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_VERSION(DRV_VERSION); +MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); +MODULE_LICENSE("GPL"); + + /* module parameters */ +struct iwl_mod_params iwl3945_mod_params = { + .sw_crypto = 1, + .restart_fw = 1, + /* the rest are 0 by default */ +}; + +/** + * iwl3945_get_antenna_flags - Get antenna flags for RXON command + * @priv: eeprom and antenna fields are used to determine antenna flags + * + * priv->eeprom39 is used to determine if antenna AUX/MAIN are reversed + * iwl3945_mod_params.antenna specifies the antenna diversity mode: + * + * IWL_ANTENNA_DIVERSITY - NIC selects best antenna by itself + * IWL_ANTENNA_MAIN - Force MAIN antenna + * IWL_ANTENNA_AUX - Force AUX antenna + */ +__le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv) +{ + struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; + + switch (iwl3945_mod_params.antenna) { + case IWL_ANTENNA_DIVERSITY: + return 0; + + case IWL_ANTENNA_MAIN: + if (eeprom->antenna_switch_type) + return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK; + return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK; + + case IWL_ANTENNA_AUX: + if (eeprom->antenna_switch_type) + return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK; + return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK; + } + + /* bad antenna selector value */ + IWL_ERR(priv, "Bad antenna selector value (0x%x)\n", + iwl3945_mod_params.antenna); + + return 0; /* "diversity" is default if error */ +} + +static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv, + struct ieee80211_key_conf *keyconf, + u8 sta_id) +{ + unsigned long flags; + __le16 key_flags = 0; + int ret; + + key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK); + key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); + + if (sta_id == priv->hw_params.bcast_sta_id) + key_flags |= STA_KEY_MULTICAST_MSK; + + keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + keyconf->hw_key_idx = keyconf->keyidx; + key_flags &= ~STA_KEY_FLG_INVALID; + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].keyinfo.alg = keyconf->alg; + priv->stations[sta_id].keyinfo.keylen = keyconf->keylen; + memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, + keyconf->keylen); + + memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, + keyconf->keylen); + + if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) + == STA_KEY_FLG_NO_ENC) + priv->stations[sta_id].sta.key.key_offset = + iwl_get_free_ucode_key_index(priv); + /* else, we are overriding an existing key => no need to allocated room + * in uCode. */ + + WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, + "no space for a new key"); + + priv->stations[sta_id].sta.key.key_flags = key_flags; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + + IWL_DEBUG_INFO(priv, "hwcrypto: modify ucode station key info\n"); + + ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); + + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return ret; +} + +static int iwl3945_set_tkip_dynamic_key_info(struct iwl_priv *priv, + struct ieee80211_key_conf *keyconf, + u8 sta_id) +{ + return -EOPNOTSUPP; +} + +static int iwl3945_set_wep_dynamic_key_info(struct iwl_priv *priv, + struct ieee80211_key_conf *keyconf, + u8 sta_id) +{ + return -EOPNOTSUPP; +} + +static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->sta_lock, flags); + memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key)); + memset(&priv->stations[sta_id].sta.key, 0, + sizeof(struct iwl4965_keyinfo)); + priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n"); + iwl_send_add_sta(priv, &priv->stations[sta_id].sta, 0); + return 0; +} + +static int iwl3945_set_dynamic_key(struct iwl_priv *priv, + struct ieee80211_key_conf *keyconf, u8 sta_id) +{ + int ret = 0; + + keyconf->hw_key_idx = HW_KEY_DYNAMIC; + + switch (keyconf->alg) { + case ALG_CCMP: + ret = iwl3945_set_ccmp_dynamic_key_info(priv, keyconf, sta_id); + break; + case ALG_TKIP: + ret = iwl3945_set_tkip_dynamic_key_info(priv, keyconf, sta_id); + break; + case ALG_WEP: + ret = iwl3945_set_wep_dynamic_key_info(priv, keyconf, sta_id); + break; + default: + IWL_ERR(priv, "Unknown alg: %s alg = %d\n", __func__, keyconf->alg); + ret = -EINVAL; + } + + IWL_DEBUG_WEP(priv, "Set dynamic key: alg= %d len=%d idx=%d sta=%d ret=%d\n", + keyconf->alg, keyconf->keylen, keyconf->keyidx, + sta_id, ret); + + return ret; +} + +static int iwl3945_remove_static_key(struct iwl_priv *priv) +{ + int ret = -EOPNOTSUPP; + + return ret; +} + +static int iwl3945_set_static_key(struct iwl_priv *priv, + struct ieee80211_key_conf *key) +{ + if (key->alg == ALG_WEP) + return -EOPNOTSUPP; + + IWL_ERR(priv, "Static key invalid: alg %d\n", key->alg); + return -EINVAL; +} + +static void iwl3945_clear_free_frames(struct iwl_priv *priv) +{ + struct list_head *element; + + IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n", + priv->frames_count); + + while (!list_empty(&priv->free_frames)) { + element = priv->free_frames.next; + list_del(element); + kfree(list_entry(element, struct iwl3945_frame, list)); + priv->frames_count--; + } + + if (priv->frames_count) { + IWL_WARN(priv, "%d frames still in use. Did we lose one?\n", + priv->frames_count); + priv->frames_count = 0; + } +} + +static struct iwl3945_frame *iwl3945_get_free_frame(struct iwl_priv *priv) +{ + struct iwl3945_frame *frame; + struct list_head *element; + if (list_empty(&priv->free_frames)) { + frame = kzalloc(sizeof(*frame), GFP_KERNEL); + if (!frame) { + IWL_ERR(priv, "Could not allocate frame!\n"); + return NULL; + } + + priv->frames_count++; + return frame; + } + + element = priv->free_frames.next; + list_del(element); + return list_entry(element, struct iwl3945_frame, list); +} + +static void iwl3945_free_frame(struct iwl_priv *priv, struct iwl3945_frame *frame) +{ + memset(frame, 0, sizeof(*frame)); + list_add(&frame->list, &priv->free_frames); +} + +unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv, + struct ieee80211_hdr *hdr, + int left) +{ + + if (!iwl_is_associated(priv) || !priv->ibss_beacon || + ((priv->iw_mode != NL80211_IFTYPE_ADHOC) && + (priv->iw_mode != NL80211_IFTYPE_AP))) + return 0; + + if (priv->ibss_beacon->len > left) + return 0; + + memcpy(hdr, priv->ibss_beacon->data, priv->ibss_beacon->len); + + return priv->ibss_beacon->len; +} + +static int iwl3945_send_beacon_cmd(struct iwl_priv *priv) +{ + struct iwl3945_frame *frame; + unsigned int frame_size; + int rc; + u8 rate; + + frame = iwl3945_get_free_frame(priv); + + if (!frame) { + IWL_ERR(priv, "Could not obtain free frame buffer for beacon " + "command.\n"); + return -ENOMEM; + } + + rate = iwl_rate_get_lowest_plcp(priv); + + frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate); + + rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size, + &frame->u.cmd[0]); + + iwl3945_free_frame(priv, frame); + + return rc; +} + +static void iwl3945_unset_hw_params(struct iwl_priv *priv) +{ + if (priv->shared_virt) + dma_free_coherent(&priv->pci_dev->dev, + sizeof(struct iwl3945_shared), + priv->shared_virt, + priv->shared_phys); +} + +static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv, + struct ieee80211_tx_info *info, + struct iwl_device_cmd *cmd, + struct sk_buff *skb_frag, + int sta_id) +{ + struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload; + struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo; + + switch (keyinfo->alg) { + case ALG_CCMP: + tx_cmd->sec_ctl = TX_CMD_SEC_CCM; + memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen); + IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n"); + break; + + case ALG_TKIP: + break; + + case ALG_WEP: + tx_cmd->sec_ctl = TX_CMD_SEC_WEP | + (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT; + + if (keyinfo->keylen == 13) + tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; + + memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen); + + IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption " + "with key %d\n", info->control.hw_key->hw_key_idx); + break; + + default: + IWL_ERR(priv, "Unknown encode alg %d\n", keyinfo->alg); + break; + } +} + +/* + * handle build REPLY_TX command notification. + */ +static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv, + struct iwl_device_cmd *cmd, + struct ieee80211_tx_info *info, + struct ieee80211_hdr *hdr, u8 std_id) +{ + struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload; + __le32 tx_flags = tx_cmd->tx_flags; + __le16 fc = hdr->frame_control; + + tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { + tx_flags |= TX_CMD_FLG_ACK_MSK; + if (ieee80211_is_mgmt(fc)) + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + if (ieee80211_is_probe_resp(fc) && + !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) + tx_flags |= TX_CMD_FLG_TSF_MSK; + } else { + tx_flags &= (~TX_CMD_FLG_ACK_MSK); + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + } + + tx_cmd->sta_id = std_id; + if (ieee80211_has_morefrags(fc)) + tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; + + if (ieee80211_is_data_qos(fc)) { + u8 *qc = ieee80211_get_qos_ctl(hdr); + tx_cmd->tid_tspec = qc[0] & 0xf; + tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; + } else { + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + } + + priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags); + + if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK)) + tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; + + tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); + if (ieee80211_is_mgmt(fc)) { + if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) + tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); + else + tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); + } else { + tx_cmd->timeout.pm_frame_timeout = 0; + } + + tx_cmd->driver_txop = 0; + tx_cmd->tx_flags = tx_flags; + tx_cmd->next_frame_len = 0; +} + +/* + * start REPLY_TX command process + */ +static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct iwl3945_tx_cmd *tx_cmd; + struct iwl_tx_queue *txq = NULL; + struct iwl_queue *q = NULL; + struct iwl_device_cmd *out_cmd; + struct iwl_cmd_meta *out_meta; + dma_addr_t phys_addr; + dma_addr_t txcmd_phys; + int txq_id = skb_get_queue_mapping(skb); + u16 len, idx, len_org, hdr_len; /* TODO: len_org is not used */ + u8 id; + u8 unicast; + u8 sta_id; + u8 tid = 0; + u16 seq_number = 0; + __le16 fc; + u8 wait_write_ptr = 0; + u8 *qc = NULL; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + if (iwl_is_rfkill(priv)) { + IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); + goto drop_unlock; + } + + if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == IWL_INVALID_RATE) { + IWL_ERR(priv, "ERROR: No TX rate available.\n"); + goto drop_unlock; + } + + unicast = !is_multicast_ether_addr(hdr->addr1); + id = 0; + + fc = hdr->frame_control; + +#ifdef CONFIG_IWLWIFI_DEBUG + if (ieee80211_is_auth(fc)) + IWL_DEBUG_TX(priv, "Sending AUTH frame\n"); + else if (ieee80211_is_assoc_req(fc)) + IWL_DEBUG_TX(priv, "Sending ASSOC frame\n"); + else if (ieee80211_is_reassoc_req(fc)) + IWL_DEBUG_TX(priv, "Sending REASSOC frame\n"); +#endif + + /* drop all non-injected data frame if we are not associated */ + if (ieee80211_is_data(fc) && + !(info->flags & IEEE80211_TX_CTL_INJECTED) && + (!iwl_is_associated(priv) || + ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id))) { + IWL_DEBUG_DROP(priv, "Dropping - !iwl_is_associated\n"); + goto drop_unlock; + } + + spin_unlock_irqrestore(&priv->lock, flags); + + hdr_len = ieee80211_hdrlen(fc); + + /* Find (or create) index into station table for destination station */ + if (info->flags & IEEE80211_TX_CTL_INJECTED) + sta_id = priv->hw_params.bcast_sta_id; + else + sta_id = iwl_get_sta_id(priv, hdr); + if (sta_id == IWL_INVALID_STATION) { + IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", + hdr->addr1); + goto drop; + } + + IWL_DEBUG_RATE(priv, "station Id %d\n", sta_id); + + if (ieee80211_is_data_qos(fc)) { + qc = ieee80211_get_qos_ctl(hdr); + tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; + if (unlikely(tid >= MAX_TID_COUNT)) + goto drop; + seq_number = priv->stations[sta_id].tid[tid].seq_number & + IEEE80211_SCTL_SEQ; + hdr->seq_ctrl = cpu_to_le16(seq_number) | + (hdr->seq_ctrl & + cpu_to_le16(IEEE80211_SCTL_FRAG)); + seq_number += 0x10; + } + + /* Descriptor for chosen Tx queue */ + txq = &priv->txq[txq_id]; + q = &txq->q; + + if ((iwl_queue_space(q) < q->high_mark)) + goto drop; + + spin_lock_irqsave(&priv->lock, flags); + + idx = get_cmd_index(q, q->write_ptr, 0); + + /* Set up driver data for this TFD */ + memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); + txq->txb[q->write_ptr].skb[0] = skb; + + /* Init first empty entry in queue's array of Tx/cmd buffers */ + out_cmd = txq->cmd[idx]; + out_meta = &txq->meta[idx]; + tx_cmd = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload; + memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); + memset(tx_cmd, 0, sizeof(*tx_cmd)); + + /* + * Set up the Tx-command (not MAC!) header. + * Store the chosen Tx queue and TFD index within the sequence field; + * after Tx, uCode's Tx response will return this value so driver can + * locate the frame within the tx queue and do post-tx processing. + */ + out_cmd->hdr.cmd = REPLY_TX; + out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | + INDEX_TO_SEQ(q->write_ptr))); + + /* Copy MAC header from skb into command buffer */ + memcpy(tx_cmd->hdr, hdr, hdr_len); + + + if (info->control.hw_key) + iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, sta_id); + + /* TODO need this for burst mode later on */ + iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id); + + /* set is_hcca to 0; it probably will never be implemented */ + iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0); + + /* Total # bytes to be transmitted */ + len = (u16)skb->len; + tx_cmd->len = cpu_to_le16(len); + + iwl_dbg_log_tx_data_frame(priv, len, hdr); + iwl_update_stats(priv, true, fc, len); + tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK; + tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK; + + if (!ieee80211_has_morefrags(hdr->frame_control)) { + txq->need_update = 1; + if (qc) + priv->stations[sta_id].tid[tid].seq_number = seq_number; + } else { + wait_write_ptr = 1; + txq->need_update = 0; + } + + IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n", + le16_to_cpu(out_cmd->hdr.sequence)); + IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags)); + iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd)); + iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, + ieee80211_hdrlen(fc)); + + /* + * Use the first empty entry in this queue's command buffer array + * to contain the Tx command and MAC header concatenated together + * (payload data will be in another buffer). + * Size of this varies, due to varying MAC header length. + * If end is not dword aligned, we'll have 2 extra bytes at the end + * of the MAC header (device reads on dword boundaries). + * We'll tell device about this padding later. + */ + len = sizeof(struct iwl3945_tx_cmd) + + sizeof(struct iwl_cmd_header) + hdr_len; + + len_org = len; + len = (len + 3) & ~3; + + if (len_org != len) + len_org = 1; + else + len_org = 0; + + /* Physical address of this Tx command's header (not MAC header!), + * within command buffer array. */ + txcmd_phys = pci_map_single(priv->pci_dev, &out_cmd->hdr, + len, PCI_DMA_TODEVICE); + /* we do not map meta data ... so we can safely access address to + * provide to unmap command*/ + pci_unmap_addr_set(out_meta, mapping, txcmd_phys); + pci_unmap_len_set(out_meta, len, len); + + /* Add buffer containing Tx command and MAC(!) header to TFD's + * first entry */ + priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, + txcmd_phys, len, 1, 0); + + + /* Set up TFD's 2nd entry to point directly to remainder of skb, + * if any (802.11 null frames have no payload). */ + len = skb->len - hdr_len; + if (len) { + phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, + len, PCI_DMA_TODEVICE); + priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, + phys_addr, len, + 0, U32_PAD(len)); + } + + + /* Tell device the write index *just past* this latest filled TFD */ + q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); + iwl_txq_update_write_ptr(priv, txq); + spin_unlock_irqrestore(&priv->lock, flags); + + if ((iwl_queue_space(q) < q->high_mark) + && priv->mac80211_registered) { + if (wait_write_ptr) { + spin_lock_irqsave(&priv->lock, flags); + txq->need_update = 1; + iwl_txq_update_write_ptr(priv, txq); + spin_unlock_irqrestore(&priv->lock, flags); + } + + iwl_stop_queue(priv, skb_get_queue_mapping(skb)); + } + + return 0; + +drop_unlock: + spin_unlock_irqrestore(&priv->lock, flags); +drop: + return -1; +} + +#define BEACON_TIME_MASK_LOW 0x00FFFFFF +#define BEACON_TIME_MASK_HIGH 0xFF000000 +#define TIME_UNIT 1024 + +/* + * extended beacon time format + * time in usec will be changed into a 32-bit value in 8:24 format + * the high 1 byte is the beacon counts + * the lower 3 bytes is the time in usec within one beacon interval + */ + +static u32 iwl3945_usecs_to_beacons(u32 usec, u32 beacon_interval) +{ + u32 quot; + u32 rem; + u32 interval = beacon_interval * 1024; + + if (!interval || !usec) + return 0; + + quot = (usec / interval) & (BEACON_TIME_MASK_HIGH >> 24); + rem = (usec % interval) & BEACON_TIME_MASK_LOW; + + return (quot << 24) + rem; +} + +/* base is usually what we get from ucode with each received frame, + * the same as HW timer counter counting down + */ + +static __le32 iwl3945_add_beacon_time(u32 base, u32 addon, u32 beacon_interval) +{ + u32 base_low = base & BEACON_TIME_MASK_LOW; + u32 addon_low = addon & BEACON_TIME_MASK_LOW; + u32 interval = beacon_interval * TIME_UNIT; + u32 res = (base & BEACON_TIME_MASK_HIGH) + + (addon & BEACON_TIME_MASK_HIGH); + + if (base_low > addon_low) + res += base_low - addon_low; + else if (base_low < addon_low) { + res += interval + base_low - addon_low; + res += (1 << 24); + } else + res += (1 << 24); + + return cpu_to_le32(res); +} + +static int iwl3945_get_measurement(struct iwl_priv *priv, + struct ieee80211_measurement_params *params, + u8 type) +{ + struct iwl_spectrum_cmd spectrum; + struct iwl_rx_packet *pkt; + struct iwl_host_cmd cmd = { + .id = REPLY_SPECTRUM_MEASUREMENT_CMD, + .data = (void *)&spectrum, + .flags = CMD_WANT_SKB, + }; + u32 add_time = le64_to_cpu(params->start_time); + int rc; + int spectrum_resp_status; + int duration = le16_to_cpu(params->duration); + + if (iwl_is_associated(priv)) + add_time = + iwl3945_usecs_to_beacons( + le64_to_cpu(params->start_time) - priv->last_tsf, + le16_to_cpu(priv->rxon_timing.beacon_interval)); + + memset(&spectrum, 0, sizeof(spectrum)); + + spectrum.channel_count = cpu_to_le16(1); + spectrum.flags = + RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK; + spectrum.filter_flags = MEASUREMENT_FILTER_FLAG; + cmd.len = sizeof(spectrum); + spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len)); + + if (iwl_is_associated(priv)) + spectrum.start_time = + iwl3945_add_beacon_time(priv->last_beacon_time, + add_time, + le16_to_cpu(priv->rxon_timing.beacon_interval)); + else + spectrum.start_time = 0; + + spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT); + spectrum.channels[0].channel = params->channel; + spectrum.channels[0].type = type; + if (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK) + spectrum.flags |= RXON_FLG_BAND_24G_MSK | + RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK; + + rc = iwl_send_cmd_sync(priv, &cmd); + if (rc) + return rc; + + pkt = (struct iwl_rx_packet *)cmd.reply_page; + if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n"); + rc = -EIO; + } + + spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status); + switch (spectrum_resp_status) { + case 0: /* Command will be handled */ + if (pkt->u.spectrum.id != 0xff) { + IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n", + pkt->u.spectrum.id); + priv->measurement_status &= ~MEASUREMENT_READY; + } + priv->measurement_status |= MEASUREMENT_ACTIVE; + rc = 0; + break; + + case 1: /* Command will not be handled */ + rc = -EAGAIN; + break; + } + + iwl_free_pages(priv, cmd.reply_page); + + return rc; +} + +static void iwl3945_rx_reply_alive(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_alive_resp *palive; + struct delayed_work *pwork; + + palive = &pkt->u.alive_frame; + + IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision " + "0x%01X 0x%01X\n", + palive->is_valid, palive->ver_type, + palive->ver_subtype); + + if (palive->ver_subtype == INITIALIZE_SUBTYPE) { + IWL_DEBUG_INFO(priv, "Initialization Alive received.\n"); + memcpy(&priv->card_alive_init, &pkt->u.alive_frame, + sizeof(struct iwl_alive_resp)); + pwork = &priv->init_alive_start; + } else { + IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); + memcpy(&priv->card_alive, &pkt->u.alive_frame, + sizeof(struct iwl_alive_resp)); + pwork = &priv->alive_start; + iwl3945_disable_events(priv); + } + + /* We delay the ALIVE response by 5ms to + * give the HW RF Kill time to activate... */ + if (palive->is_valid == UCODE_VALID_OK) + queue_delayed_work(priv->workqueue, pwork, + msecs_to_jiffies(5)); + else + IWL_WARN(priv, "uCode did not respond OK.\n"); +} + +static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + struct iwl_rx_packet *pkt = rxb_addr(rxb); +#endif + + IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status); + return; +} + +static void iwl3945_bg_beacon_update(struct work_struct *work) +{ + struct iwl_priv *priv = + container_of(work, struct iwl_priv, beacon_update); + struct sk_buff *beacon; + + /* Pull updated AP beacon from mac80211. will fail if not in AP mode */ + beacon = ieee80211_beacon_get(priv->hw, priv->vif); + + if (!beacon) { + IWL_ERR(priv, "update beacon failed\n"); + return; + } + + mutex_lock(&priv->mutex); + /* new beacon skb is allocated every time; dispose previous.*/ + if (priv->ibss_beacon) + dev_kfree_skb(priv->ibss_beacon); + + priv->ibss_beacon = beacon; + mutex_unlock(&priv->mutex); + + iwl3945_send_beacon_cmd(priv); +} + +static void iwl3945_rx_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status); + u8 rate = beacon->beacon_notify_hdr.rate; + + IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d " + "tsf %d %d rate %d\n", + le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK, + beacon->beacon_notify_hdr.failure_frame, + le32_to_cpu(beacon->ibss_mgr_status), + le32_to_cpu(beacon->high_tsf), + le32_to_cpu(beacon->low_tsf), rate); +#endif + + if ((priv->iw_mode == NL80211_IFTYPE_AP) && + (!test_bit(STATUS_EXIT_PENDING, &priv->status))) + queue_work(priv->workqueue, &priv->beacon_update); +} + +/* Handle notification from uCode that card's power state is changing + * due to software, hardware, or critical temperature RFKILL */ +static void iwl3945_rx_card_state_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); + unsigned long status = priv->status; + + IWL_WARN(priv, "Card state received: HW:%s SW:%s\n", + (flags & HW_CARD_DISABLED) ? "Kill" : "On", + (flags & SW_CARD_DISABLED) ? "Kill" : "On"); + + iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + + if (flags & HW_CARD_DISABLED) + set_bit(STATUS_RF_KILL_HW, &priv->status); + else + clear_bit(STATUS_RF_KILL_HW, &priv->status); + + + iwl_scan_cancel(priv); + + if ((test_bit(STATUS_RF_KILL_HW, &status) != + test_bit(STATUS_RF_KILL_HW, &priv->status))) + wiphy_rfkill_set_hw_state(priv->hw->wiphy, + test_bit(STATUS_RF_KILL_HW, &priv->status)); + else + wake_up_interruptible(&priv->wait_command_queue); +} + +/** + * iwl3945_setup_rx_handlers - Initialize Rx handler callbacks + * + * Setup the RX handlers for each of the reply types sent from the uCode + * to the host. + * + * This function chains into the hardware specific files for them to setup + * any hardware specific handlers as well. + */ +static void iwl3945_setup_rx_handlers(struct iwl_priv *priv) +{ + priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive; + priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta; + priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error; + priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa; + priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] = + iwl_rx_spectrum_measure_notif; + priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif; + priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] = + iwl_rx_pm_debug_statistics_notif; + priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif; + + /* + * The same handler is used for both the REPLY to a discrete + * statistics request from the host as well as for the periodic + * statistics notifications (after received beacons) from the uCode. + */ + priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_hw_rx_statistics; + priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics; + + iwl_setup_rx_scan_handlers(priv); + priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif; + + /* Set up hardware specific Rx handlers */ + iwl3945_hw_rx_handler_setup(priv); +} + +/************************** RX-FUNCTIONS ****************************/ +/* + * Rx theory of operation + * + * The host allocates 32 DMA target addresses and passes the host address + * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is + * 0 to 31 + * + * Rx Queue Indexes + * The host/firmware share two index registers for managing the Rx buffers. + * + * The READ index maps to the first position that the firmware may be writing + * to -- the driver can read up to (but not including) this position and get + * good data. + * The READ index is managed by the firmware once the card is enabled. + * + * The WRITE index maps to the last position the driver has read from -- the + * position preceding WRITE is the last slot the firmware can place a packet. + * + * The queue is empty (no good data) if WRITE = READ - 1, and is full if + * WRITE = READ. + * + * During initialization, the host sets up the READ queue position to the first + * INDEX position, and WRITE to the last (READ - 1 wrapped) + * + * When the firmware places a packet in a buffer, it will advance the READ index + * and fire the RX interrupt. The driver can then query the READ index and + * process as many packets as possible, moving the WRITE index forward as it + * resets the Rx queue buffers with new memory. + * + * The management in the driver is as follows: + * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When + * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled + * to replenish the iwl->rxq->rx_free. + * + In iwl3945_rx_replenish (scheduled) if 'processed' != 'read' then the + * iwl->rxq is replenished and the READ INDEX is updated (updating the + * 'processed' and 'read' driver indexes as well) + * + A received packet is processed and handed to the kernel network stack, + * detached from the iwl->rxq. The driver 'processed' index is updated. + * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free + * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ + * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there + * were enough free buffers and RX_STALLED is set it is cleared. + * + * + * Driver sequence: + * + * iwl3945_rx_replenish() Replenishes rx_free list from rx_used, and calls + * iwl3945_rx_queue_restock + * iwl3945_rx_queue_restock() Moves available buffers from rx_free into Rx + * queue, updates firmware pointers, and updates + * the WRITE index. If insufficient rx_free buffers + * are available, schedules iwl3945_rx_replenish + * + * -- enable interrupts -- + * ISR - iwl3945_rx() Detach iwl_rx_mem_buffers from pool up to the + * READ INDEX, detaching the SKB from the pool. + * Moves the packet buffer from queue to rx_used. + * Calls iwl3945_rx_queue_restock to refill any empty + * slots. + * ... + * + */ + +/** + * iwl3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr + */ +static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl_priv *priv, + dma_addr_t dma_addr) +{ + return cpu_to_le32((u32)dma_addr); +} + +/** + * iwl3945_rx_queue_restock - refill RX queue from pre-allocated pool + * + * If there are slots in the RX queue that need to be restocked, + * and we have free pre-allocated buffers, fill the ranks as much + * as we can, pulling from rx_free. + * + * This moves the 'write' index forward to catch up with 'processed', and + * also updates the memory address in the firmware to reference the new + * target buffer. + */ +static void iwl3945_rx_queue_restock(struct iwl_priv *priv) +{ + struct iwl_rx_queue *rxq = &priv->rxq; + struct list_head *element; + struct iwl_rx_mem_buffer *rxb; + unsigned long flags; + int write; + + spin_lock_irqsave(&rxq->lock, flags); + write = rxq->write & ~0x7; + while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) { + /* Get next free Rx buffer, remove from free list */ + element = rxq->rx_free.next; + rxb = list_entry(element, struct iwl_rx_mem_buffer, list); + list_del(element); + + /* Point to Rx buffer via next RBD in circular buffer */ + rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->page_dma); + rxq->queue[rxq->write] = rxb; + rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; + rxq->free_count--; + } + spin_unlock_irqrestore(&rxq->lock, flags); + /* If the pre-allocated buffer pool is dropping low, schedule to + * refill it */ + if (rxq->free_count <= RX_LOW_WATERMARK) + queue_work(priv->workqueue, &priv->rx_replenish); + + + /* If we've added more space for the firmware to place data, tell it. + * Increment device's write pointer in multiples of 8. */ + if ((rxq->write_actual != (rxq->write & ~0x7)) + || (abs(rxq->write - rxq->read) > 7)) { + spin_lock_irqsave(&rxq->lock, flags); + rxq->need_update = 1; + spin_unlock_irqrestore(&rxq->lock, flags); + iwl_rx_queue_update_write_ptr(priv, rxq); + } +} + +/** + * iwl3945_rx_replenish - Move all used packet from rx_used to rx_free + * + * When moving to rx_free an SKB is allocated for the slot. + * + * Also restock the Rx queue via iwl3945_rx_queue_restock. + * This is called as a scheduled work item (except for during initialization) + */ +static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority) +{ + struct iwl_rx_queue *rxq = &priv->rxq; + struct list_head *element; + struct iwl_rx_mem_buffer *rxb; + struct page *page; + unsigned long flags; + gfp_t gfp_mask = priority; + + while (1) { + spin_lock_irqsave(&rxq->lock, flags); + + if (list_empty(&rxq->rx_used)) { + spin_unlock_irqrestore(&rxq->lock, flags); + return; + } + spin_unlock_irqrestore(&rxq->lock, flags); + + if (rxq->free_count > RX_LOW_WATERMARK) + gfp_mask |= __GFP_NOWARN; + + if (priv->hw_params.rx_page_order > 0) + gfp_mask |= __GFP_COMP; + + /* Alloc a new receive buffer */ + page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order); + if (!page) { + if (net_ratelimit()) + IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n"); + if ((rxq->free_count <= RX_LOW_WATERMARK) && + net_ratelimit()) + IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n", + priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL", + rxq->free_count); + /* We don't reschedule replenish work here -- we will + * call the restock method and if it still needs + * more buffers it will schedule replenish */ + break; + } + + spin_lock_irqsave(&rxq->lock, flags); + if (list_empty(&rxq->rx_used)) { + spin_unlock_irqrestore(&rxq->lock, flags); + __free_pages(page, priv->hw_params.rx_page_order); + return; + } + element = rxq->rx_used.next; + rxb = list_entry(element, struct iwl_rx_mem_buffer, list); + list_del(element); + spin_unlock_irqrestore(&rxq->lock, flags); + + rxb->page = page; + /* Get physical address of RB/SKB */ + rxb->page_dma = pci_map_page(priv->pci_dev, page, 0, + PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + + spin_lock_irqsave(&rxq->lock, flags); + + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + priv->alloc_rxb_page++; + + spin_unlock_irqrestore(&rxq->lock, flags); + } +} + +void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) +{ + unsigned long flags; + int i; + spin_lock_irqsave(&rxq->lock, flags); + INIT_LIST_HEAD(&rxq->rx_free); + INIT_LIST_HEAD(&rxq->rx_used); + /* Fill the rx_used queue with _all_ of the Rx buffers */ + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { + /* In the reset function, these buffers may have been allocated + * to an SKB, so we need to unmap and free potential storage */ + if (rxq->pool[i].page != NULL) { + pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, + PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + __iwl_free_pages(priv, rxq->pool[i].page); + rxq->pool[i].page = NULL; + } + list_add_tail(&rxq->pool[i].list, &rxq->rx_used); + } + + /* Set us so that we have processed and used all buffers, but have + * not restocked the Rx queue with fresh buffers */ + rxq->read = rxq->write = 0; + rxq->write_actual = 0; + rxq->free_count = 0; + spin_unlock_irqrestore(&rxq->lock, flags); +} + +void iwl3945_rx_replenish(void *data) +{ + struct iwl_priv *priv = data; + unsigned long flags; + + iwl3945_rx_allocate(priv, GFP_KERNEL); + + spin_lock_irqsave(&priv->lock, flags); + iwl3945_rx_queue_restock(priv); + spin_unlock_irqrestore(&priv->lock, flags); +} + +static void iwl3945_rx_replenish_now(struct iwl_priv *priv) +{ + iwl3945_rx_allocate(priv, GFP_ATOMIC); + + iwl3945_rx_queue_restock(priv); +} + + +/* Assumes that the skb field of the buffers in 'pool' is kept accurate. + * If an SKB has been detached, the POOL needs to have its SKB set to NULL + * This free routine walks the list of POOL entries and if SKB is set to + * non NULL it is unmapped and freed + */ +static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq) +{ + int i; + for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { + if (rxq->pool[i].page != NULL) { + pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, + PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + __iwl_free_pages(priv, rxq->pool[i].page); + rxq->pool[i].page = NULL; + } + } + + dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, + rxq->dma_addr); + dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status), + rxq->rb_stts, rxq->rb_stts_dma); + rxq->bd = NULL; + rxq->rb_stts = NULL; +} + + +/* Convert linear signal-to-noise ratio into dB */ +static u8 ratio2dB[100] = { +/* 0 1 2 3 4 5 6 7 8 9 */ + 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */ + 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */ + 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */ + 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */ + 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */ + 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */ + 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */ + 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */ + 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */ + 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */ +}; + +/* Calculates a relative dB value from a ratio of linear + * (i.e. not dB) signal levels. + * Conversion assumes that levels are voltages (20*log), not powers (10*log). */ +int iwl3945_calc_db_from_ratio(int sig_ratio) +{ + /* 1000:1 or higher just report as 60 dB */ + if (sig_ratio >= 1000) + return 60; + + /* 100:1 or higher, divide by 10 and use table, + * add 20 dB to make up for divide by 10 */ + if (sig_ratio >= 100) + return 20 + (int)ratio2dB[sig_ratio/10]; + + /* We shouldn't see this */ + if (sig_ratio < 1) + return 0; + + /* Use table for ratios 1:1 - 99:1 */ + return (int)ratio2dB[sig_ratio]; +} + +/** + * iwl3945_rx_handle - Main entry function for receiving responses from uCode + * + * Uses the priv->rx_handlers callback function array to invoke + * the appropriate handlers, including command responses, + * frame-received notifications, and other notifications. + */ +static void iwl3945_rx_handle(struct iwl_priv *priv) +{ + struct iwl_rx_mem_buffer *rxb; + struct iwl_rx_packet *pkt; + struct iwl_rx_queue *rxq = &priv->rxq; + u32 r, i; + int reclaim; + unsigned long flags; + u8 fill_rx = 0; + u32 count = 8; + int total_empty = 0; + + /* uCode's read index (stored in shared DRAM) indicates the last Rx + * buffer that the driver may process (last buffer filled by ucode). */ + r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; + i = rxq->read; + + /* calculate total frames need to be restock after handling RX */ + total_empty = r - rxq->write_actual; + if (total_empty < 0) + total_empty += RX_QUEUE_SIZE; + + if (total_empty > (RX_QUEUE_SIZE / 2)) + fill_rx = 1; + /* Rx interrupt, but nothing sent from uCode */ + if (i == r) + IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i); + + while (i != r) { + rxb = rxq->queue[i]; + + /* If an RXB doesn't have a Rx queue slot associated with it, + * then a bug has been introduced in the queue refilling + * routines -- catch it here */ + BUG_ON(rxb == NULL); + + rxq->queue[i] = NULL; + + pci_unmap_page(priv->pci_dev, rxb->page_dma, + PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + pkt = rxb_addr(rxb); + + trace_iwlwifi_dev_rx(priv, pkt, + le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); + + /* Reclaim a command buffer only if this packet is a response + * to a (driver-originated) command. + * If the packet (e.g. Rx frame) originated from uCode, + * there is no command buffer to reclaim. + * Ucode should set SEQ_RX_FRAME bit if ucode-originated, + * but apparently a few don't get set; catch them here. */ + reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) && + (pkt->hdr.cmd != STATISTICS_NOTIFICATION) && + (pkt->hdr.cmd != REPLY_TX); + + /* Based on type of command response or notification, + * handle those that need handling via function in + * rx_handlers table. See iwl3945_setup_rx_handlers() */ + if (priv->rx_handlers[pkt->hdr.cmd]) { + IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i, + get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); + priv->isr_stats.rx_handlers[pkt->hdr.cmd]++; + priv->rx_handlers[pkt->hdr.cmd] (priv, rxb); + } else { + /* No handling needed */ + IWL_DEBUG_RX(priv, + "r %d i %d No handler needed for %s, 0x%02x\n", + r, i, get_cmd_string(pkt->hdr.cmd), + pkt->hdr.cmd); + } + + /* + * XXX: After here, we should always check rxb->page + * against NULL before touching it or its virtual + * memory (pkt). Because some rx_handler might have + * already taken or freed the pages. + */ + + if (reclaim) { + /* Invoke any callbacks, transfer the buffer to caller, + * and fire off the (possibly) blocking iwl_send_cmd() + * as we reclaim the driver command queue */ + if (rxb->page) + iwl_tx_cmd_complete(priv, rxb); + else + IWL_WARN(priv, "Claim null rxb?\n"); + } + + /* Reuse the page if possible. For notification packets and + * SKBs that fail to Rx correctly, add them back into the + * rx_free list for reuse later. */ + spin_lock_irqsave(&rxq->lock, flags); + if (rxb->page != NULL) { + rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page, + 0, PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + } else + list_add_tail(&rxb->list, &rxq->rx_used); + + spin_unlock_irqrestore(&rxq->lock, flags); + + i = (i + 1) & RX_QUEUE_MASK; + /* If there are a lot of unused frames, + * restock the Rx queue so ucode won't assert. */ + if (fill_rx) { + count++; + if (count >= 8) { + rxq->read = i; + iwl3945_rx_replenish_now(priv); + count = 0; + } + } + } + + /* Backtrack one entry */ + rxq->read = i; + if (fill_rx) + iwl3945_rx_replenish_now(priv); + else + iwl3945_rx_queue_restock(priv); +} + +/* call this function to flush any scheduled tasklet */ +static inline void iwl_synchronize_irq(struct iwl_priv *priv) +{ + /* wait to make sure we flush pending tasklet*/ + synchronize_irq(priv->pci_dev->irq); + tasklet_kill(&priv->irq_tasklet); +} + +static const char *desc_lookup(int i) +{ + switch (i) { + case 1: + return "FAIL"; + case 2: + return "BAD_PARAM"; + case 3: + return "BAD_CHECKSUM"; + case 4: + return "NMI_INTERRUPT"; + case 5: + return "SYSASSERT"; + case 6: + return "FATAL_ERROR"; + } + + return "UNKNOWN"; +} + +#define ERROR_START_OFFSET (1 * sizeof(u32)) +#define ERROR_ELEM_SIZE (7 * sizeof(u32)) + +void iwl3945_dump_nic_error_log(struct iwl_priv *priv) +{ + u32 i; + u32 desc, time, count, base, data1; + u32 blink1, blink2, ilink1, ilink2; + + base = le32_to_cpu(priv->card_alive.error_event_table_ptr); + + if (!iwl3945_hw_valid_rtc_data_addr(base)) { + IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base); + return; + } + + + count = iwl_read_targ_mem(priv, base); + + if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { + IWL_ERR(priv, "Start IWL Error Log Dump:\n"); + IWL_ERR(priv, "Status: 0x%08lX, count: %d\n", + priv->status, count); + } + + IWL_ERR(priv, "Desc Time asrtPC blink2 " + "ilink1 nmiPC Line\n"); + for (i = ERROR_START_OFFSET; + i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET; + i += ERROR_ELEM_SIZE) { + desc = iwl_read_targ_mem(priv, base + i); + time = + iwl_read_targ_mem(priv, base + i + 1 * sizeof(u32)); + blink1 = + iwl_read_targ_mem(priv, base + i + 2 * sizeof(u32)); + blink2 = + iwl_read_targ_mem(priv, base + i + 3 * sizeof(u32)); + ilink1 = + iwl_read_targ_mem(priv, base + i + 4 * sizeof(u32)); + ilink2 = + iwl_read_targ_mem(priv, base + i + 5 * sizeof(u32)); + data1 = + iwl_read_targ_mem(priv, base + i + 6 * sizeof(u32)); + + IWL_ERR(priv, + "%-13s (#%d) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n", + desc_lookup(desc), desc, time, blink1, blink2, + ilink1, ilink2, data1); + trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, 0, + 0, blink1, blink2, ilink1, ilink2); + } +} + +#define EVENT_START_OFFSET (6 * sizeof(u32)) + +/** + * iwl3945_print_event_log - Dump error event log to syslog + * + */ +static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx, + u32 num_events, u32 mode, + int pos, char **buf, size_t bufsz) +{ + u32 i; + u32 base; /* SRAM byte address of event log header */ + u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */ + u32 ptr; /* SRAM byte address of log data */ + u32 ev, time, data; /* event log data */ + unsigned long reg_flags; + + if (num_events == 0) + return pos; + + base = le32_to_cpu(priv->card_alive.log_event_table_ptr); + + if (mode == 0) + event_size = 2 * sizeof(u32); + else + event_size = 3 * sizeof(u32); + + ptr = base + EVENT_START_OFFSET + (start_idx * event_size); + + /* Make sure device is powered up for SRAM reads */ + spin_lock_irqsave(&priv->reg_lock, reg_flags); + iwl_grab_nic_access(priv); + + /* Set starting address; reads will auto-increment */ + _iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr); + rmb(); + + /* "time" is actually "data" for mode 0 (no timestamp). + * place event id # at far right for easier visual parsing. */ + for (i = 0; i < num_events; i++) { + ev = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); + time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); + if (mode == 0) { + /* data, ev */ + if (bufsz) { + pos += scnprintf(*buf + pos, bufsz - pos, + "0x%08x:%04u\n", + time, ev); + } else { + IWL_ERR(priv, "0x%08x\t%04u\n", time, ev); + trace_iwlwifi_dev_ucode_event(priv, 0, + time, ev); + } + } else { + data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); + if (bufsz) { + pos += scnprintf(*buf + pos, bufsz - pos, + "%010u:0x%08x:%04u\n", + time, data, ev); + } else { + IWL_ERR(priv, "%010u\t0x%08x\t%04u\n", + time, data, ev); + trace_iwlwifi_dev_ucode_event(priv, time, + data, ev); + } + } + } + + /* Allow device to power down */ + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); + return pos; +} + +/** + * iwl3945_print_last_event_logs - Dump the newest # of event log to syslog + */ +static int iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity, + u32 num_wraps, u32 next_entry, + u32 size, u32 mode, + int pos, char **buf, size_t bufsz) +{ + /* + * display the newest DEFAULT_LOG_ENTRIES entries + * i.e the entries just before the next ont that uCode would fill. + */ + if (num_wraps) { + if (next_entry < size) { + pos = iwl3945_print_event_log(priv, + capacity - (size - next_entry), + size - next_entry, mode, + pos, buf, bufsz); + pos = iwl3945_print_event_log(priv, 0, + next_entry, mode, + pos, buf, bufsz); + } else + pos = iwl3945_print_event_log(priv, next_entry - size, + size, mode, + pos, buf, bufsz); + } else { + if (next_entry < size) + pos = iwl3945_print_event_log(priv, 0, + next_entry, mode, + pos, buf, bufsz); + else + pos = iwl3945_print_event_log(priv, next_entry - size, + size, mode, + pos, buf, bufsz); + } + return pos; +} + +/* For sanity check only. Actual size is determined by uCode, typ. 512 */ +#define IWL3945_MAX_EVENT_LOG_SIZE (512) + +#define DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES (20) + +int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log, + char **buf, bool display) +{ + u32 base; /* SRAM byte address of event log header */ + u32 capacity; /* event log capacity in # entries */ + u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */ + u32 num_wraps; /* # times uCode wrapped to top of log */ + u32 next_entry; /* index of next entry to be written by uCode */ + u32 size; /* # entries that we'll print */ + int pos = 0; + size_t bufsz = 0; + + base = le32_to_cpu(priv->card_alive.log_event_table_ptr); + if (!iwl3945_hw_valid_rtc_data_addr(base)) { + IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base); + return -EINVAL; + } + + /* event log header */ + capacity = iwl_read_targ_mem(priv, base); + mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32))); + num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32))); + next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32))); + + if (capacity > IWL3945_MAX_EVENT_LOG_SIZE) { + IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n", + capacity, IWL3945_MAX_EVENT_LOG_SIZE); + capacity = IWL3945_MAX_EVENT_LOG_SIZE; + } + + if (next_entry > IWL3945_MAX_EVENT_LOG_SIZE) { + IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n", + next_entry, IWL3945_MAX_EVENT_LOG_SIZE); + next_entry = IWL3945_MAX_EVENT_LOG_SIZE; + } + + size = num_wraps ? capacity : next_entry; + + /* bail out if nothing in log */ + if (size == 0) { + IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n"); + return pos; + } + +#ifdef CONFIG_IWLWIFI_DEBUG + if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log) + size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES) + ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size; +#else + size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES) + ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size; +#endif + + IWL_ERR(priv, "Start IWL Event Log Dump: display last %d count\n", + size); + +#ifdef CONFIG_IWLWIFI_DEBUG + if (display) { + if (full_log) + bufsz = capacity * 48; + else + bufsz = size * 48; + *buf = kmalloc(bufsz, GFP_KERNEL); + if (!*buf) + return -ENOMEM; + } + if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) { + /* if uCode has wrapped back to top of log, + * start at the oldest entry, + * i.e the next one that uCode would fill. + */ + if (num_wraps) + pos = iwl3945_print_event_log(priv, next_entry, + capacity - next_entry, mode, + pos, buf, bufsz); + + /* (then/else) start at top of log */ + pos = iwl3945_print_event_log(priv, 0, next_entry, mode, + pos, buf, bufsz); + } else + pos = iwl3945_print_last_event_logs(priv, capacity, num_wraps, + next_entry, size, mode, + pos, buf, bufsz); +#else + pos = iwl3945_print_last_event_logs(priv, capacity, num_wraps, + next_entry, size, mode, + pos, buf, bufsz); +#endif + return pos; +} + +static void iwl3945_irq_tasklet(struct iwl_priv *priv) +{ + u32 inta, handled = 0; + u32 inta_fh; + unsigned long flags; +#ifdef CONFIG_IWLWIFI_DEBUG + u32 inta_mask; +#endif + + spin_lock_irqsave(&priv->lock, flags); + + /* Ack/clear/reset pending uCode interrupts. + * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, + * and will clear only when CSR_FH_INT_STATUS gets cleared. */ + inta = iwl_read32(priv, CSR_INT); + iwl_write32(priv, CSR_INT, inta); + + /* Ack/clear/reset pending flow-handler (DMA) interrupts. + * Any new interrupts that happen after this, either while we're + * in this tasklet, or later, will show up in next ISR/tasklet. */ + inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); + iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh); + +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_get_debug_level(priv) & IWL_DL_ISR) { + /* just for debug */ + inta_mask = iwl_read32(priv, CSR_INT_MASK); + IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", + inta, inta_mask, inta_fh); + } +#endif + + spin_unlock_irqrestore(&priv->lock, flags); + + /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not + * atomic, make sure that inta covers all the interrupts that + * we've discovered, even if FH interrupt came in just after + * reading CSR_INT. */ + if (inta_fh & CSR39_FH_INT_RX_MASK) + inta |= CSR_INT_BIT_FH_RX; + if (inta_fh & CSR39_FH_INT_TX_MASK) + inta |= CSR_INT_BIT_FH_TX; + + /* Now service all interrupt bits discovered above. */ + if (inta & CSR_INT_BIT_HW_ERR) { + IWL_ERR(priv, "Hardware error detected. Restarting.\n"); + + /* Tell the device to stop sending interrupts */ + iwl_disable_interrupts(priv); + + priv->isr_stats.hw++; + iwl_irq_handle_error(priv); + + handled |= CSR_INT_BIT_HW_ERR; + + return; + } + +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { + /* NIC fires this, but we don't use it, redundant with WAKEUP */ + if (inta & CSR_INT_BIT_SCD) { + IWL_DEBUG_ISR(priv, "Scheduler finished to transmit " + "the frame/frames.\n"); + priv->isr_stats.sch++; + } + + /* Alive notification via Rx interrupt will do the real work */ + if (inta & CSR_INT_BIT_ALIVE) { + IWL_DEBUG_ISR(priv, "Alive interrupt\n"); + priv->isr_stats.alive++; + } + } +#endif + /* Safely ignore these bits for debug checks below */ + inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); + + /* Error detected by uCode */ + if (inta & CSR_INT_BIT_SW_ERR) { + IWL_ERR(priv, "Microcode SW error detected. " + "Restarting 0x%X.\n", inta); + priv->isr_stats.sw++; + priv->isr_stats.sw_err = inta; + iwl_irq_handle_error(priv); + handled |= CSR_INT_BIT_SW_ERR; + } + + /* uCode wakes up after power-down sleep */ + if (inta & CSR_INT_BIT_WAKEUP) { + IWL_DEBUG_ISR(priv, "Wakeup interrupt\n"); + iwl_rx_queue_update_write_ptr(priv, &priv->rxq); + iwl_txq_update_write_ptr(priv, &priv->txq[0]); + iwl_txq_update_write_ptr(priv, &priv->txq[1]); + iwl_txq_update_write_ptr(priv, &priv->txq[2]); + iwl_txq_update_write_ptr(priv, &priv->txq[3]); + iwl_txq_update_write_ptr(priv, &priv->txq[4]); + iwl_txq_update_write_ptr(priv, &priv->txq[5]); + + priv->isr_stats.wakeup++; + handled |= CSR_INT_BIT_WAKEUP; + } + + /* All uCode command responses, including Tx command responses, + * Rx "responses" (frame-received notification), and other + * notifications from uCode come through here*/ + if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { + iwl3945_rx_handle(priv); + priv->isr_stats.rx++; + handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); + } + + if (inta & CSR_INT_BIT_FH_TX) { + IWL_DEBUG_ISR(priv, "Tx interrupt\n"); + priv->isr_stats.tx++; + + iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6)); + iwl_write_direct32(priv, FH39_TCSR_CREDIT + (FH39_SRVC_CHNL), 0x0); + handled |= CSR_INT_BIT_FH_TX; + } + + if (inta & ~handled) { + IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled); + priv->isr_stats.unhandled++; + } + + if (inta & ~priv->inta_mask) { + IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n", + inta & ~priv->inta_mask); + IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh); + } + + /* Re-enable all interrupts */ + /* only Re-enable if disabled by irq */ + if (test_bit(STATUS_INT_ENABLED, &priv->status)) + iwl_enable_interrupts(priv); + +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { + inta = iwl_read32(priv, CSR_INT); + inta_mask = iwl_read32(priv, CSR_INT_MASK); + inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); + IWL_DEBUG_ISR(priv, "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, " + "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); + } +#endif +} + +static int iwl3945_get_channels_for_scan(struct iwl_priv *priv, + enum ieee80211_band band, + u8 is_active, u8 n_probes, + struct iwl3945_scan_channel *scan_ch) +{ + struct ieee80211_channel *chan; + const struct ieee80211_supported_band *sband; + const struct iwl_channel_info *ch_info; + u16 passive_dwell = 0; + u16 active_dwell = 0; + int added, i; + + sband = iwl_get_hw_mode(priv, band); + if (!sband) + return 0; + + active_dwell = iwl_get_active_dwell_time(priv, band, n_probes); + passive_dwell = iwl_get_passive_dwell_time(priv, band); + + if (passive_dwell <= active_dwell) + passive_dwell = active_dwell + 1; + + for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) { + chan = priv->scan_request->channels[i]; + + if (chan->band != band) + continue; + + scan_ch->channel = chan->hw_value; + + ch_info = iwl_get_channel_info(priv, band, scan_ch->channel); + if (!is_channel_valid(ch_info)) { + IWL_DEBUG_SCAN(priv, "Channel %d is INVALID for this band.\n", + scan_ch->channel); + continue; + } + + scan_ch->active_dwell = cpu_to_le16(active_dwell); + scan_ch->passive_dwell = cpu_to_le16(passive_dwell); + /* If passive , set up for auto-switch + * and use long active_dwell time. + */ + if (!is_active || is_channel_passive(ch_info) || + (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) { + scan_ch->type = 0; /* passive */ + if (IWL_UCODE_API(priv->ucode_ver) == 1) + scan_ch->active_dwell = cpu_to_le16(passive_dwell - 1); + } else { + scan_ch->type = 1; /* active */ + } + + /* Set direct probe bits. These may be used both for active + * scan channels (probes gets sent right away), + * or for passive channels (probes get se sent only after + * hearing clear Rx packet).*/ + if (IWL_UCODE_API(priv->ucode_ver) >= 2) { + if (n_probes) + scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes); + } else { + /* uCode v1 does not allow setting direct probe bits on + * passive channel. */ + if ((scan_ch->type & 1) && n_probes) + scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes); + } + + /* Set txpower levels to defaults */ + scan_ch->tpc.dsp_atten = 110; + /* scan_pwr_info->tpc.dsp_atten; */ + + /*scan_pwr_info->tpc.tx_gain; */ + if (band == IEEE80211_BAND_5GHZ) + scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3; + else { + scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3)); + /* NOTE: if we were doing 6Mb OFDM for scans we'd use + * power level: + * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3; + */ + } + + IWL_DEBUG_SCAN(priv, "Scanning %d [%s %d]\n", + scan_ch->channel, + (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE", + (scan_ch->type & 1) ? + active_dwell : passive_dwell); + + scan_ch++; + added++; + } + + IWL_DEBUG_SCAN(priv, "total channels to scan %d \n", added); + return added; +} + +static void iwl3945_init_hw_rates(struct iwl_priv *priv, + struct ieee80211_rate *rates) +{ + int i; + + for (i = 0; i < IWL_RATE_COUNT; i++) { + rates[i].bitrate = iwl3945_rates[i].ieee * 5; + rates[i].hw_value = i; /* Rate scaling will work on indexes */ + rates[i].hw_value_short = i; + rates[i].flags = 0; + if ((i > IWL39_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) { + /* + * If CCK != 1M then set short preamble rate flag. + */ + rates[i].flags |= (iwl3945_rates[i].plcp == 10) ? + 0 : IEEE80211_RATE_SHORT_PREAMBLE; + } + } +} + +/****************************************************************************** + * + * uCode download functions + * + ******************************************************************************/ + +static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv) +{ + iwl_free_fw_desc(priv->pci_dev, &priv->ucode_code); + iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data); + iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup); + iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init); + iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init_data); + iwl_free_fw_desc(priv->pci_dev, &priv->ucode_boot); +} + +/** + * iwl3945_verify_inst_full - verify runtime uCode image in card vs. host, + * looking at all data. + */ +static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 len) +{ + u32 val; + u32 save_len = len; + int rc = 0; + u32 errcnt; + + IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); + + iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, + IWL39_RTC_INST_LOWER_BOUND); + + errcnt = 0; + for (; len > 0; len -= sizeof(u32), image++) { + /* read data comes through single port, auto-incr addr */ + /* NOTE: Use the debugless read so we don't flood kernel log + * if IWL_DL_IO is set */ + val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); + if (val != le32_to_cpu(*image)) { + IWL_ERR(priv, "uCode INST section is invalid at " + "offset 0x%x, is 0x%x, s/b 0x%x\n", + save_len - len, val, le32_to_cpu(*image)); + rc = -EIO; + errcnt++; + if (errcnt >= 20) + break; + } + } + + + if (!errcnt) + IWL_DEBUG_INFO(priv, + "ucode image in INSTRUCTION memory is good\n"); + + return rc; +} + + +/** + * iwl3945_verify_inst_sparse - verify runtime uCode image in card vs. host, + * using sample data 100 bytes apart. If these sample points are good, + * it's a pretty good bet that everything between them is good, too. + */ +static int iwl3945_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len) +{ + u32 val; + int rc = 0; + u32 errcnt = 0; + u32 i; + + IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); + + for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) { + /* read data comes through single port, auto-incr addr */ + /* NOTE: Use the debugless read so we don't flood kernel log + * if IWL_DL_IO is set */ + iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, + i + IWL39_RTC_INST_LOWER_BOUND); + val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); + if (val != le32_to_cpu(*image)) { +#if 0 /* Enable this if you want to see details */ + IWL_ERR(priv, "uCode INST section is invalid at " + "offset 0x%x, is 0x%x, s/b 0x%x\n", + i, val, *image); +#endif + rc = -EIO; + errcnt++; + if (errcnt >= 3) + break; + } + } + + return rc; +} + + +/** + * iwl3945_verify_ucode - determine which instruction image is in SRAM, + * and verify its contents + */ +static int iwl3945_verify_ucode(struct iwl_priv *priv) +{ + __le32 *image; + u32 len; + int rc = 0; + + /* Try bootstrap */ + image = (__le32 *)priv->ucode_boot.v_addr; + len = priv->ucode_boot.len; + rc = iwl3945_verify_inst_sparse(priv, image, len); + if (rc == 0) { + IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n"); + return 0; + } + + /* Try initialize */ + image = (__le32 *)priv->ucode_init.v_addr; + len = priv->ucode_init.len; + rc = iwl3945_verify_inst_sparse(priv, image, len); + if (rc == 0) { + IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n"); + return 0; + } + + /* Try runtime/protocol */ + image = (__le32 *)priv->ucode_code.v_addr; + len = priv->ucode_code.len; + rc = iwl3945_verify_inst_sparse(priv, image, len); + if (rc == 0) { + IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n"); + return 0; + } + + IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n"); + + /* Since nothing seems to match, show first several data entries in + * instruction SRAM, so maybe visual inspection will give a clue. + * Selection of bootstrap image (vs. other images) is arbitrary. */ + image = (__le32 *)priv->ucode_boot.v_addr; + len = priv->ucode_boot.len; + rc = iwl3945_verify_inst_full(priv, image, len); + + return rc; +} + +static void iwl3945_nic_start(struct iwl_priv *priv) +{ + /* Remove all resets to allow NIC to operate */ + iwl_write32(priv, CSR_RESET, 0); +} + +/** + * iwl3945_read_ucode - Read uCode images from disk file. + * + * Copy into buffers for card to fetch via bus-mastering + */ +static int iwl3945_read_ucode(struct iwl_priv *priv) +{ + const struct iwl_ucode_header *ucode; + int ret = -EINVAL, index; + const struct firmware *ucode_raw; + /* firmware file name contains uCode/driver compatibility version */ + const char *name_pre = priv->cfg->fw_name_pre; + const unsigned int api_max = priv->cfg->ucode_api_max; + const unsigned int api_min = priv->cfg->ucode_api_min; + char buf[25]; + u8 *src; + size_t len; + u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size; + + /* Ask kernel firmware_class module to get the boot firmware off disk. + * request_firmware() is synchronous, file is in memory on return. */ + for (index = api_max; index >= api_min; index--) { + sprintf(buf, "%s%u%s", name_pre, index, ".ucode"); + ret = request_firmware(&ucode_raw, buf, &priv->pci_dev->dev); + if (ret < 0) { + IWL_ERR(priv, "%s firmware file req failed: %d\n", + buf, ret); + if (ret == -ENOENT) + continue; + else + goto error; + } else { + if (index < api_max) + IWL_ERR(priv, "Loaded firmware %s, " + "which is deprecated. " + " Please use API v%u instead.\n", + buf, api_max); + IWL_DEBUG_INFO(priv, "Got firmware '%s' file " + "(%zd bytes) from disk\n", + buf, ucode_raw->size); + break; + } + } + + if (ret < 0) + goto error; + + /* Make sure that we got at least our header! */ + if (ucode_raw->size < priv->cfg->ops->ucode->get_header_size(1)) { + IWL_ERR(priv, "File size way too small!\n"); + ret = -EINVAL; + goto err_release; + } + + /* Data from ucode file: header followed by uCode images */ + ucode = (struct iwl_ucode_header *)ucode_raw->data; + + priv->ucode_ver = le32_to_cpu(ucode->ver); + api_ver = IWL_UCODE_API(priv->ucode_ver); + inst_size = priv->cfg->ops->ucode->get_inst_size(ucode, api_ver); + data_size = priv->cfg->ops->ucode->get_data_size(ucode, api_ver); + init_size = priv->cfg->ops->ucode->get_init_size(ucode, api_ver); + init_data_size = + priv->cfg->ops->ucode->get_init_data_size(ucode, api_ver); + boot_size = priv->cfg->ops->ucode->get_boot_size(ucode, api_ver); + src = priv->cfg->ops->ucode->get_data(ucode, api_ver); + + /* api_ver should match the api version forming part of the + * firmware filename ... but we don't check for that and only rely + * on the API version read from firmware header from here on forward */ + + if (api_ver < api_min || api_ver > api_max) { + IWL_ERR(priv, "Driver unable to support your firmware API. " + "Driver supports v%u, firmware is v%u.\n", + api_max, api_ver); + priv->ucode_ver = 0; + ret = -EINVAL; + goto err_release; + } + if (api_ver != api_max) + IWL_ERR(priv, "Firmware has old API version. Expected %u, " + "got %u. New firmware can be obtained " + "from http://www.intellinuxwireless.org.\n", + api_max, api_ver); + + IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n", + IWL_UCODE_MAJOR(priv->ucode_ver), + IWL_UCODE_MINOR(priv->ucode_ver), + IWL_UCODE_API(priv->ucode_ver), + IWL_UCODE_SERIAL(priv->ucode_ver)); + + snprintf(priv->hw->wiphy->fw_version, + sizeof(priv->hw->wiphy->fw_version), + "%u.%u.%u.%u", + IWL_UCODE_MAJOR(priv->ucode_ver), + IWL_UCODE_MINOR(priv->ucode_ver), + IWL_UCODE_API(priv->ucode_ver), + IWL_UCODE_SERIAL(priv->ucode_ver)); + + IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n", + priv->ucode_ver); + IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n", + inst_size); + IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %u\n", + data_size); + IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %u\n", + init_size); + IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %u\n", + init_data_size); + IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %u\n", + boot_size); + + + /* Verify size of file vs. image size info in file's header */ + if (ucode_raw->size != priv->cfg->ops->ucode->get_header_size(api_ver) + + inst_size + data_size + init_size + + init_data_size + boot_size) { + + IWL_DEBUG_INFO(priv, + "uCode file size %zd does not match expected size\n", + ucode_raw->size); + ret = -EINVAL; + goto err_release; + } + + /* Verify that uCode images will fit in card's SRAM */ + if (inst_size > IWL39_MAX_INST_SIZE) { + IWL_DEBUG_INFO(priv, "uCode instr len %d too large to fit in\n", + inst_size); + ret = -EINVAL; + goto err_release; + } + + if (data_size > IWL39_MAX_DATA_SIZE) { + IWL_DEBUG_INFO(priv, "uCode data len %d too large to fit in\n", + data_size); + ret = -EINVAL; + goto err_release; + } + if (init_size > IWL39_MAX_INST_SIZE) { + IWL_DEBUG_INFO(priv, + "uCode init instr len %d too large to fit in\n", + init_size); + ret = -EINVAL; + goto err_release; + } + if (init_data_size > IWL39_MAX_DATA_SIZE) { + IWL_DEBUG_INFO(priv, + "uCode init data len %d too large to fit in\n", + init_data_size); + ret = -EINVAL; + goto err_release; + } + if (boot_size > IWL39_MAX_BSM_SIZE) { + IWL_DEBUG_INFO(priv, + "uCode boot instr len %d too large to fit in\n", + boot_size); + ret = -EINVAL; + goto err_release; + } + + /* Allocate ucode buffers for card's bus-master loading ... */ + + /* Runtime instructions and 2 copies of data: + * 1) unmodified from disk + * 2) backup cache for save/restore during power-downs */ + priv->ucode_code.len = inst_size; + iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_code); + + priv->ucode_data.len = data_size; + iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data); + + priv->ucode_data_backup.len = data_size; + iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup); + + if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr || + !priv->ucode_data_backup.v_addr) + goto err_pci_alloc; + + /* Initialization instructions and data */ + if (init_size && init_data_size) { + priv->ucode_init.len = init_size; + iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init); + + priv->ucode_init_data.len = init_data_size; + iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data); + + if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr) + goto err_pci_alloc; + } + + /* Bootstrap (instructions only, no data) */ + if (boot_size) { + priv->ucode_boot.len = boot_size; + iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot); + + if (!priv->ucode_boot.v_addr) + goto err_pci_alloc; + } + + /* Copy images into buffers for card's bus-master reads ... */ + + /* Runtime instructions (first block of data in file) */ + len = inst_size; + IWL_DEBUG_INFO(priv, + "Copying (but not loading) uCode instr len %zd\n", len); + memcpy(priv->ucode_code.v_addr, src, len); + src += len; + + IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n", + priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr); + + /* Runtime data (2nd block) + * NOTE: Copy into backup buffer will be done in iwl3945_up() */ + len = data_size; + IWL_DEBUG_INFO(priv, + "Copying (but not loading) uCode data len %zd\n", len); + memcpy(priv->ucode_data.v_addr, src, len); + memcpy(priv->ucode_data_backup.v_addr, src, len); + src += len; + + /* Initialization instructions (3rd block) */ + if (init_size) { + len = init_size; + IWL_DEBUG_INFO(priv, + "Copying (but not loading) init instr len %zd\n", len); + memcpy(priv->ucode_init.v_addr, src, len); + src += len; + } + + /* Initialization data (4th block) */ + if (init_data_size) { + len = init_data_size; + IWL_DEBUG_INFO(priv, + "Copying (but not loading) init data len %zd\n", len); + memcpy(priv->ucode_init_data.v_addr, src, len); + src += len; + } + + /* Bootstrap instructions (5th block) */ + len = boot_size; + IWL_DEBUG_INFO(priv, + "Copying (but not loading) boot instr len %zd\n", len); + memcpy(priv->ucode_boot.v_addr, src, len); + + /* We have our copies now, allow OS release its copies */ + release_firmware(ucode_raw); + return 0; + + err_pci_alloc: + IWL_ERR(priv, "failed to allocate pci memory\n"); + ret = -ENOMEM; + iwl3945_dealloc_ucode_pci(priv); + + err_release: + release_firmware(ucode_raw); + + error: + return ret; +} + + +/** + * iwl3945_set_ucode_ptrs - Set uCode address location + * + * Tell initialization uCode where to find runtime uCode. + * + * BSM registers initially contain pointers to initialization uCode. + * We need to replace them to load runtime uCode inst and data, + * and to save runtime data when powering down. + */ +static int iwl3945_set_ucode_ptrs(struct iwl_priv *priv) +{ + dma_addr_t pinst; + dma_addr_t pdata; + + /* bits 31:0 for 3945 */ + pinst = priv->ucode_code.p_addr; + pdata = priv->ucode_data_backup.p_addr; + + /* Tell bootstrap uCode where to find image to load */ + iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); + iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); + iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, + priv->ucode_data.len); + + /* Inst byte count must be last to set up, bit 31 signals uCode + * that all new ptr/size info is in place */ + iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, + priv->ucode_code.len | BSM_DRAM_INST_LOAD); + + IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n"); + + return 0; +} + +/** + * iwl3945_init_alive_start - Called after REPLY_ALIVE notification received + * + * Called after REPLY_ALIVE notification received from "initialize" uCode. + * + * Tell "initialize" uCode to go ahead and load the runtime uCode. + */ +static void iwl3945_init_alive_start(struct iwl_priv *priv) +{ + /* Check alive response for "valid" sign from uCode */ + if (priv->card_alive_init.is_valid != UCODE_VALID_OK) { + /* We had an error bringing up the hardware, so take it + * all the way back down so we can try again */ + IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n"); + goto restart; + } + + /* Bootstrap uCode has loaded initialize uCode ... verify inst image. + * This is a paranoid check, because we would not have gotten the + * "initialize" alive if code weren't properly loaded. */ + if (iwl3945_verify_ucode(priv)) { + /* Runtime instruction load was bad; + * take it all the way back down so we can try again */ + IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n"); + goto restart; + } + + /* Send pointers to protocol/runtime uCode image ... init code will + * load and launch runtime uCode, which will send us another "Alive" + * notification. */ + IWL_DEBUG_INFO(priv, "Initialization Alive received.\n"); + if (iwl3945_set_ucode_ptrs(priv)) { + /* Runtime instruction load won't happen; + * take it all the way back down so we can try again */ + IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n"); + goto restart; + } + return; + + restart: + queue_work(priv->workqueue, &priv->restart); +} + +/** + * iwl3945_alive_start - called after REPLY_ALIVE notification received + * from protocol/runtime uCode (initialization uCode's + * Alive gets handled by iwl3945_init_alive_start()). + */ +static void iwl3945_alive_start(struct iwl_priv *priv) +{ + int thermal_spin = 0; + u32 rfkill; + + IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); + + if (priv->card_alive.is_valid != UCODE_VALID_OK) { + /* We had an error bringing up the hardware, so take it + * all the way back down so we can try again */ + IWL_DEBUG_INFO(priv, "Alive failed.\n"); + goto restart; + } + + /* Initialize uCode has loaded Runtime uCode ... verify inst image. + * This is a paranoid check, because we would not have gotten the + * "runtime" alive if code weren't properly loaded. */ + if (iwl3945_verify_ucode(priv)) { + /* Runtime instruction load was bad; + * take it all the way back down so we can try again */ + IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n"); + goto restart; + } + + iwl_clear_stations_table(priv); + + rfkill = iwl_read_prph(priv, APMG_RFKILL_REG); + IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill); + + if (rfkill & 0x1) { + clear_bit(STATUS_RF_KILL_HW, &priv->status); + /* if RFKILL is not on, then wait for thermal + * sensor in adapter to kick in */ + while (iwl3945_hw_get_temperature(priv) == 0) { + thermal_spin++; + udelay(10); + } + + if (thermal_spin) + IWL_DEBUG_INFO(priv, "Thermal calibration took %dus\n", + thermal_spin * 10); + } else + set_bit(STATUS_RF_KILL_HW, &priv->status); + + /* After the ALIVE response, we can send commands to 3945 uCode */ + set_bit(STATUS_ALIVE, &priv->status); + + if (iwl_is_rfkill(priv)) + return; + + ieee80211_wake_queues(priv->hw); + + priv->active_rate = priv->rates_mask; + priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK; + + iwl_power_update_mode(priv, true); + + if (iwl_is_associated(priv)) { + struct iwl3945_rxon_cmd *active_rxon = + (struct iwl3945_rxon_cmd *)(&priv->active_rxon); + + priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; + active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; + } else { + /* Initialize our rx_config data */ + iwl_connection_init_rx_config(priv, priv->iw_mode); + } + + /* Configure Bluetooth device coexistence support */ + iwl_send_bt_config(priv); + + /* Configure the adapter for unassociated operation */ + iwlcore_commit_rxon(priv); + + iwl3945_reg_txpower_periodic(priv); + + iwl_leds_init(priv); + + IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); + set_bit(STATUS_READY, &priv->status); + wake_up_interruptible(&priv->wait_command_queue); + + /* reassociate for ADHOC mode */ + if (priv->vif && (priv->iw_mode == NL80211_IFTYPE_ADHOC)) { + struct sk_buff *beacon = ieee80211_beacon_get(priv->hw, + priv->vif); + if (beacon) + iwl_mac_beacon_update(priv->hw, beacon); + } + + if (test_and_clear_bit(STATUS_MODE_PENDING, &priv->status)) + iwl_set_mode(priv, priv->iw_mode); + + return; + + restart: + queue_work(priv->workqueue, &priv->restart); +} + +static void iwl3945_cancel_deferred_work(struct iwl_priv *priv); + +static void __iwl3945_down(struct iwl_priv *priv) +{ + unsigned long flags; + int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status); + struct ieee80211_conf *conf = NULL; + + IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n"); + + conf = ieee80211_get_hw_conf(priv->hw); + + if (!exit_pending) + set_bit(STATUS_EXIT_PENDING, &priv->status); + + iwl_clear_stations_table(priv); + + /* Unblock any waiting calls */ + wake_up_interruptible_all(&priv->wait_command_queue); + + /* Wipe out the EXIT_PENDING status bit if we are not actually + * exiting the module */ + if (!exit_pending) + clear_bit(STATUS_EXIT_PENDING, &priv->status); + + /* stop and reset the on-board processor */ + iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); + + /* tell the device to stop sending interrupts */ + spin_lock_irqsave(&priv->lock, flags); + iwl_disable_interrupts(priv); + spin_unlock_irqrestore(&priv->lock, flags); + iwl_synchronize_irq(priv); + + if (priv->mac80211_registered) + ieee80211_stop_queues(priv->hw); + + /* If we have not previously called iwl3945_init() then + * clear all bits but the RF Kill bits and return */ + if (!iwl_is_init(priv)) { + priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) << + STATUS_RF_KILL_HW | + test_bit(STATUS_GEO_CONFIGURED, &priv->status) << + STATUS_GEO_CONFIGURED | + test_bit(STATUS_EXIT_PENDING, &priv->status) << + STATUS_EXIT_PENDING; + goto exit; + } + + /* ...otherwise clear out all the status bits but the RF Kill + * bit and continue taking the NIC down. */ + priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) << + STATUS_RF_KILL_HW | + test_bit(STATUS_GEO_CONFIGURED, &priv->status) << + STATUS_GEO_CONFIGURED | + test_bit(STATUS_FW_ERROR, &priv->status) << + STATUS_FW_ERROR | + test_bit(STATUS_EXIT_PENDING, &priv->status) << + STATUS_EXIT_PENDING; + + iwl3945_hw_txq_ctx_stop(priv); + iwl3945_hw_rxq_stop(priv); + + /* Power-down device's busmaster DMA clocks */ + iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); + udelay(5); + + /* Stop the device, and put it in low power state */ + priv->cfg->ops->lib->apm_ops.stop(priv); + + exit: + memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); + + if (priv->ibss_beacon) + dev_kfree_skb(priv->ibss_beacon); + priv->ibss_beacon = NULL; + + /* clear out any free frames */ + iwl3945_clear_free_frames(priv); +} + +static void iwl3945_down(struct iwl_priv *priv) +{ + mutex_lock(&priv->mutex); + __iwl3945_down(priv); + mutex_unlock(&priv->mutex); + + iwl3945_cancel_deferred_work(priv); +} + +#define MAX_HW_RESTARTS 5 + +static int __iwl3945_up(struct iwl_priv *priv) +{ + int rc, i; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { + IWL_WARN(priv, "Exit pending; will not bring the NIC up\n"); + return -EIO; + } + + if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) { + IWL_ERR(priv, "ucode not available for device bring up\n"); + return -EIO; + } + + /* If platform's RF_KILL switch is NOT set to KILL */ + if (iwl_read32(priv, CSR_GP_CNTRL) & + CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) + clear_bit(STATUS_RF_KILL_HW, &priv->status); + else { + set_bit(STATUS_RF_KILL_HW, &priv->status); + IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n"); + return -ENODEV; + } + + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + + rc = iwl3945_hw_nic_init(priv); + if (rc) { + IWL_ERR(priv, "Unable to int nic\n"); + return rc; + } + + /* make sure rfkill handshake bits are cleared */ + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + + /* clear (again), then enable host interrupts */ + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + iwl_enable_interrupts(priv); + + /* really make sure rfkill handshake bits are cleared */ + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + + /* Copy original ucode data image from disk into backup cache. + * This will be used to initialize the on-board processor's + * data SRAM for a clean start when the runtime program first loads. */ + memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr, + priv->ucode_data.len); + + /* We return success when we resume from suspend and rf_kill is on. */ + if (test_bit(STATUS_RF_KILL_HW, &priv->status)) + return 0; + + for (i = 0; i < MAX_HW_RESTARTS; i++) { + + iwl_clear_stations_table(priv); + + /* load bootstrap state machine, + * load bootstrap program into processor's memory, + * prepare to load the "initialize" uCode */ + priv->cfg->ops->lib->load_ucode(priv); + + if (rc) { + IWL_ERR(priv, + "Unable to set up bootstrap uCode: %d\n", rc); + continue; + } + + /* start card; "initialize" will load runtime ucode */ + iwl3945_nic_start(priv); + + IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n"); + + return 0; + } + + set_bit(STATUS_EXIT_PENDING, &priv->status); + __iwl3945_down(priv); + clear_bit(STATUS_EXIT_PENDING, &priv->status); + + /* tried to restart and config the device for as long as our + * patience could withstand */ + IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i); + return -EIO; +} + + +/***************************************************************************** + * + * Workqueue callbacks + * + *****************************************************************************/ + +static void iwl3945_bg_init_alive_start(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, init_alive_start.work); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + iwl3945_init_alive_start(priv); + mutex_unlock(&priv->mutex); +} + +static void iwl3945_bg_alive_start(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, alive_start.work); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + iwl3945_alive_start(priv); + mutex_unlock(&priv->mutex); +} + +/* + * 3945 cannot interrupt driver when hardware rf kill switch toggles; + * driver must poll CSR_GP_CNTRL_REG register for change. This register + * *is* readable even when device has been SW_RESET into low power mode + * (e.g. during RF KILL). + */ +static void iwl3945_rfkill_poll(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, rfkill_poll.work); + bool old_rfkill = test_bit(STATUS_RF_KILL_HW, &priv->status); + bool new_rfkill = !(iwl_read32(priv, CSR_GP_CNTRL) + & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); + + if (new_rfkill != old_rfkill) { + if (new_rfkill) + set_bit(STATUS_RF_KILL_HW, &priv->status); + else + clear_bit(STATUS_RF_KILL_HW, &priv->status); + + wiphy_rfkill_set_hw_state(priv->hw->wiphy, new_rfkill); + + IWL_DEBUG_RF_KILL(priv, "RF_KILL bit toggled to %s.\n", + new_rfkill ? "disable radio" : "enable radio"); + } + + /* Keep this running, even if radio now enabled. This will be + * cancelled in mac_start() if system decides to start again */ + queue_delayed_work(priv->workqueue, &priv->rfkill_poll, + round_jiffies_relative(2 * HZ)); + +} + +#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ) +static void iwl3945_bg_request_scan(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, request_scan); + struct iwl_host_cmd cmd = { + .id = REPLY_SCAN_CMD, + .len = sizeof(struct iwl3945_scan_cmd), + .flags = CMD_SIZE_HUGE, + }; + int rc = 0; + struct iwl3945_scan_cmd *scan; + struct ieee80211_conf *conf = NULL; + u8 n_probes = 0; + enum ieee80211_band band; + bool is_active = false; + + conf = ieee80211_get_hw_conf(priv->hw); + + mutex_lock(&priv->mutex); + + cancel_delayed_work(&priv->scan_check); + + if (!iwl_is_ready(priv)) { + IWL_WARN(priv, "request scan called when driver not ready.\n"); + goto done; + } + + /* Make sure the scan wasn't canceled before this queued work + * was given the chance to run... */ + if (!test_bit(STATUS_SCANNING, &priv->status)) + goto done; + + /* This should never be called or scheduled if there is currently + * a scan active in the hardware. */ + if (test_bit(STATUS_SCAN_HW, &priv->status)) { + IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests " + "Ignoring second request.\n"); + rc = -EIO; + goto done; + } + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { + IWL_DEBUG_SCAN(priv, "Aborting scan due to device shutdown\n"); + goto done; + } + + if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG_HC(priv, + "Scan request while abort pending. Queuing.\n"); + goto done; + } + + if (iwl_is_rfkill(priv)) { + IWL_DEBUG_HC(priv, "Aborting scan due to RF Kill activation\n"); + goto done; + } + + if (!test_bit(STATUS_READY, &priv->status)) { + IWL_DEBUG_HC(priv, + "Scan request while uninitialized. Queuing.\n"); + goto done; + } + + if (!priv->scan_bands) { + IWL_DEBUG_HC(priv, "Aborting scan due to no requested bands\n"); + goto done; + } + + if (!priv->scan) { + priv->scan = kmalloc(sizeof(struct iwl3945_scan_cmd) + + IWL_MAX_SCAN_SIZE, GFP_KERNEL); + if (!priv->scan) { + rc = -ENOMEM; + goto done; + } + } + scan = priv->scan; + memset(scan, 0, sizeof(struct iwl3945_scan_cmd) + IWL_MAX_SCAN_SIZE); + + scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; + scan->quiet_time = IWL_ACTIVE_QUIET_TIME; + + if (iwl_is_associated(priv)) { + u16 interval = 0; + u32 extra; + u32 suspend_time = 100; + u32 scan_suspend_time = 100; + unsigned long flags; + + IWL_DEBUG_INFO(priv, "Scanning while associated...\n"); + + spin_lock_irqsave(&priv->lock, flags); + interval = priv->beacon_int; + spin_unlock_irqrestore(&priv->lock, flags); + + scan->suspend_time = 0; + scan->max_out_time = cpu_to_le32(200 * 1024); + if (!interval) + interval = suspend_time; + /* + * suspend time format: + * 0-19: beacon interval in usec (time before exec.) + * 20-23: 0 + * 24-31: number of beacons (suspend between channels) + */ + + extra = (suspend_time / interval) << 24; + scan_suspend_time = 0xFF0FFFFF & + (extra | ((suspend_time % interval) * 1024)); + + scan->suspend_time = cpu_to_le32(scan_suspend_time); + IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n", + scan_suspend_time, interval); + } + + if (priv->scan_request->n_ssids) { + int i, p = 0; + IWL_DEBUG_SCAN(priv, "Kicking off active scan\n"); + for (i = 0; i < priv->scan_request->n_ssids; i++) { + /* always does wildcard anyway */ + if (!priv->scan_request->ssids[i].ssid_len) + continue; + scan->direct_scan[p].id = WLAN_EID_SSID; + scan->direct_scan[p].len = + priv->scan_request->ssids[i].ssid_len; + memcpy(scan->direct_scan[p].ssid, + priv->scan_request->ssids[i].ssid, + priv->scan_request->ssids[i].ssid_len); + n_probes++; + p++; + } + is_active = true; + } else + IWL_DEBUG_SCAN(priv, "Kicking off passive scan.\n"); + + /* We don't build a direct scan probe request; the uCode will do + * that based on the direct_mask added to each channel entry */ + scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; + scan->tx_cmd.sta_id = priv->hw_params.bcast_sta_id; + scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + + /* flags + rate selection */ + + if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) { + scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; + scan->tx_cmd.rate = IWL_RATE_1M_PLCP; + scan->good_CRC_th = 0; + band = IEEE80211_BAND_2GHZ; + } else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) { + scan->tx_cmd.rate = IWL_RATE_6M_PLCP; + /* + * If active scaning is requested but a certain channel + * is marked passive, we can do active scanning if we + * detect transmissions. + */ + scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH : 0; + band = IEEE80211_BAND_5GHZ; + } else { + IWL_WARN(priv, "Invalid scan band count\n"); + goto done; + } + + scan->tx_cmd.len = cpu_to_le16( + iwl_fill_probe_req(priv, + (struct ieee80211_mgmt *)scan->data, + priv->scan_request->ie, + priv->scan_request->ie_len, + IWL_MAX_SCAN_SIZE - sizeof(*scan))); + + /* select Rx antennas */ + scan->flags |= iwl3945_get_antenna_flags(priv); + + if (iwl_is_monitor_mode(priv)) + scan->filter_flags = RXON_FILTER_PROMISC_MSK; + + scan->channel_count = + iwl3945_get_channels_for_scan(priv, band, is_active, n_probes, + (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]); + + if (scan->channel_count == 0) { + IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count); + goto done; + } + + cmd.len += le16_to_cpu(scan->tx_cmd.len) + + scan->channel_count * sizeof(struct iwl3945_scan_channel); + cmd.data = scan; + scan->len = cpu_to_le16(cmd.len); + + set_bit(STATUS_SCAN_HW, &priv->status); + rc = iwl_send_cmd_sync(priv, &cmd); + if (rc) + goto done; + + queue_delayed_work(priv->workqueue, &priv->scan_check, + IWL_SCAN_CHECK_WATCHDOG); + + mutex_unlock(&priv->mutex); + return; + + done: + /* can not perform scan make sure we clear scanning + * bits from status so next scan request can be performed. + * if we dont clear scanning status bit here all next scan + * will fail + */ + clear_bit(STATUS_SCAN_HW, &priv->status); + clear_bit(STATUS_SCANNING, &priv->status); + + /* inform mac80211 scan aborted */ + queue_work(priv->workqueue, &priv->scan_completed); + mutex_unlock(&priv->mutex); +} + +static void iwl3945_bg_restart(struct work_struct *data) +{ + struct iwl_priv *priv = container_of(data, struct iwl_priv, restart); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) { + mutex_lock(&priv->mutex); + priv->vif = NULL; + priv->is_open = 0; + mutex_unlock(&priv->mutex); + iwl3945_down(priv); + ieee80211_restart_hw(priv->hw); + } else { + iwl3945_down(priv); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + __iwl3945_up(priv); + mutex_unlock(&priv->mutex); + } +} + +static void iwl3945_bg_rx_replenish(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, rx_replenish); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + iwl3945_rx_replenish(priv); + mutex_unlock(&priv->mutex); +} + +#define IWL_DELAY_NEXT_SCAN (HZ*2) + +void iwl3945_post_associate(struct iwl_priv *priv) +{ + int rc = 0; + struct ieee80211_conf *conf = NULL; + + if (priv->iw_mode == NL80211_IFTYPE_AP) { + IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__); + return; + } + + + IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n", + priv->assoc_id, priv->active_rxon.bssid_addr); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (!priv->vif || !priv->is_open) + return; + + iwl_scan_cancel_timeout(priv, 200); + + conf = ieee80211_get_hw_conf(priv->hw); + + priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + iwlcore_commit_rxon(priv); + + memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd)); + iwl_setup_rxon_timing(priv); + rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING, + sizeof(priv->rxon_timing), &priv->rxon_timing); + if (rc) + IWL_WARN(priv, "REPLY_RXON_TIMING failed - " + "Attempting to continue.\n"); + + priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; + + priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id); + + IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n", + priv->assoc_id, priv->beacon_int); + + if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE) + priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; + else + priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; + + if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { + if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME) + priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; + else + priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + + if (priv->iw_mode == NL80211_IFTYPE_ADHOC) + priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + + } + + iwlcore_commit_rxon(priv); + + switch (priv->iw_mode) { + case NL80211_IFTYPE_STATION: + iwl3945_rate_scale_init(priv->hw, IWL_AP_ID); + break; + + case NL80211_IFTYPE_ADHOC: + + priv->assoc_id = 1; + iwl_add_station(priv, priv->bssid, 0, CMD_SYNC, NULL); + iwl3945_sync_sta(priv, IWL_STA_ID, + (priv->band == IEEE80211_BAND_5GHZ) ? + IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP, + CMD_ASYNC); + iwl3945_rate_scale_init(priv->hw, IWL_STA_ID); + iwl3945_send_beacon_cmd(priv); + + break; + + default: + IWL_ERR(priv, "%s Should not be called in %d mode\n", + __func__, priv->iw_mode); + break; + } + + iwl_activate_qos(priv, 0); + + /* we have just associated, don't start scan too early */ + priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN; +} + +/***************************************************************************** + * + * mac80211 entry point functions + * + *****************************************************************************/ + +#define UCODE_READY_TIMEOUT (2 * HZ) + +static int iwl3945_mac_start(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + int ret; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + /* we should be verifying the device is ready to be opened */ + mutex_lock(&priv->mutex); + + /* fetch ucode file from disk, alloc and copy to bus-master buffers ... + * ucode filename and max sizes are card-specific. */ + + if (!priv->ucode_code.len) { + ret = iwl3945_read_ucode(priv); + if (ret) { + IWL_ERR(priv, "Could not read microcode: %d\n", ret); + mutex_unlock(&priv->mutex); + goto out_release_irq; + } + } + + ret = __iwl3945_up(priv); + + mutex_unlock(&priv->mutex); + + if (ret) + goto out_release_irq; + + IWL_DEBUG_INFO(priv, "Start UP work.\n"); + + /* Wait for START_ALIVE from ucode. Otherwise callbacks from + * mac80211 will not be run successfully. */ + ret = wait_event_interruptible_timeout(priv->wait_command_queue, + test_bit(STATUS_READY, &priv->status), + UCODE_READY_TIMEOUT); + if (!ret) { + if (!test_bit(STATUS_READY, &priv->status)) { + IWL_ERR(priv, + "Wait for START_ALIVE timeout after %dms.\n", + jiffies_to_msecs(UCODE_READY_TIMEOUT)); + ret = -ETIMEDOUT; + goto out_release_irq; + } + } + + /* ucode is running and will send rfkill notifications, + * no need to poll the killswitch state anymore */ + cancel_delayed_work(&priv->rfkill_poll); + + iwl_led_start(priv); + + priv->is_open = 1; + IWL_DEBUG_MAC80211(priv, "leave\n"); + return 0; + +out_release_irq: + priv->is_open = 0; + IWL_DEBUG_MAC80211(priv, "leave - failed\n"); + return ret; +} + +static void iwl3945_mac_stop(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (!priv->is_open) { + IWL_DEBUG_MAC80211(priv, "leave - skip\n"); + return; + } + + priv->is_open = 0; + + if (iwl_is_ready_rf(priv)) { + /* stop mac, cancel any scan request and clear + * RXON_FILTER_ASSOC_MSK BIT + */ + mutex_lock(&priv->mutex); + iwl_scan_cancel_timeout(priv, 100); + mutex_unlock(&priv->mutex); + } + + iwl3945_down(priv); + + flush_workqueue(priv->workqueue); + + /* start polling the killswitch state again */ + queue_delayed_work(priv->workqueue, &priv->rfkill_poll, + round_jiffies_relative(2 * HZ)); + + IWL_DEBUG_MAC80211(priv, "leave\n"); +} + +static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, + ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); + + if (iwl3945_tx_skb(priv, skb)) + dev_kfree_skb_any(skb); + + IWL_DEBUG_MAC80211(priv, "leave\n"); + return NETDEV_TX_OK; +} + +void iwl3945_config_ap(struct iwl_priv *priv) +{ + int rc = 0; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + /* The following should be done only at AP bring up */ + if (!(iwl_is_associated(priv))) { + + /* RXON - unassoc (to set timing command) */ + priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + iwlcore_commit_rxon(priv); + + /* RXON Timing */ + memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd)); + iwl_setup_rxon_timing(priv); + rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING, + sizeof(priv->rxon_timing), + &priv->rxon_timing); + if (rc) + IWL_WARN(priv, "REPLY_RXON_TIMING failed - " + "Attempting to continue.\n"); + + /* FIXME: what should be the assoc_id for AP? */ + priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id); + if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE) + priv->staging_rxon.flags |= + RXON_FLG_SHORT_PREAMBLE_MSK; + else + priv->staging_rxon.flags &= + ~RXON_FLG_SHORT_PREAMBLE_MSK; + + if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { + if (priv->assoc_capability & + WLAN_CAPABILITY_SHORT_SLOT_TIME) + priv->staging_rxon.flags |= + RXON_FLG_SHORT_SLOT_MSK; + else + priv->staging_rxon.flags &= + ~RXON_FLG_SHORT_SLOT_MSK; + + if (priv->iw_mode == NL80211_IFTYPE_ADHOC) + priv->staging_rxon.flags &= + ~RXON_FLG_SHORT_SLOT_MSK; + } + /* restore RXON assoc */ + priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; + iwlcore_commit_rxon(priv); + iwl_add_station(priv, iwl_bcast_addr, 0, CMD_SYNC, NULL); + } + iwl3945_send_beacon_cmd(priv); + + /* FIXME - we need to add code here to detect a totally new + * configuration, reset the AP, unassoc, rxon timing, assoc, + * clear sta table, add BCAST sta... */ +} + +static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct iwl_priv *priv = hw->priv; + const u8 *addr; + int ret = 0; + u8 sta_id = IWL_INVALID_STATION; + u8 static_key; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (iwl3945_mod_params.sw_crypto) { + IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n"); + return -EOPNOTSUPP; + } + + addr = sta ? sta->addr : iwl_bcast_addr; + static_key = !iwl_is_associated(priv); + + if (!static_key) { + sta_id = iwl_find_station(priv, addr); + if (sta_id == IWL_INVALID_STATION) { + IWL_DEBUG_MAC80211(priv, "leave - %pM not in station map.\n", + addr); + return -EINVAL; + } + } + + mutex_lock(&priv->mutex); + iwl_scan_cancel_timeout(priv, 100); + mutex_unlock(&priv->mutex); + + switch (cmd) { + case SET_KEY: + if (static_key) + ret = iwl3945_set_static_key(priv, key); + else + ret = iwl3945_set_dynamic_key(priv, key, sta_id); + IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n"); + break; + case DISABLE_KEY: + if (static_key) + ret = iwl3945_remove_static_key(priv); + else + ret = iwl3945_clear_sta_key_info(priv, sta_id); + IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n"); + break; + default: + ret = -EINVAL; + } + + IWL_DEBUG_MAC80211(priv, "leave\n"); + + return ret; +} + +/***************************************************************************** + * + * sysfs attributes + * + *****************************************************************************/ + +#ifdef CONFIG_IWLWIFI_DEBUG + +/* + * The following adds a new attribute to the sysfs representation + * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/) + * used for controlling the debug level. + * + * See the level definitions in iwl for details. + * + * The debug_level being managed using sysfs below is a per device debug + * level that is used instead of the global debug level if it (the per + * device debug level) is set. + */ +static ssize_t show_debug_level(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + return sprintf(buf, "0x%08X\n", iwl_get_debug_level(priv)); +} +static ssize_t store_debug_level(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + unsigned long val; + int ret; + + ret = strict_strtoul(buf, 0, &val); + if (ret) + IWL_INFO(priv, "%s is not in hex or decimal form.\n", buf); + else { + priv->debug_level = val; + if (iwl_alloc_traffic_mem(priv)) + IWL_ERR(priv, + "Not enough memory to generate traffic log\n"); + } + return strnlen(buf, count); +} + +static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, + show_debug_level, store_debug_level); + +#endif /* CONFIG_IWLWIFI_DEBUG */ + +static ssize_t show_temperature(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + + if (!iwl_is_alive(priv)) + return -EAGAIN; + + return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv)); +} + +static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL); + +static ssize_t show_tx_power(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + return sprintf(buf, "%d\n", priv->tx_power_user_lmt); +} + +static ssize_t store_tx_power(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + char *p = (char *)buf; + u32 val; + + val = simple_strtoul(p, &p, 10); + if (p == buf) + IWL_INFO(priv, ": %s is not in decimal form.\n", buf); + else + iwl3945_hw_reg_set_txpower(priv, val); + + return count; +} + +static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power); + +static ssize_t show_flags(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + + return sprintf(buf, "0x%04X\n", priv->active_rxon.flags); +} + +static ssize_t store_flags(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + u32 flags = simple_strtoul(buf, NULL, 0); + + mutex_lock(&priv->mutex); + if (le32_to_cpu(priv->staging_rxon.flags) != flags) { + /* Cancel any currently running scans... */ + if (iwl_scan_cancel_timeout(priv, 100)) + IWL_WARN(priv, "Could not cancel scan.\n"); + else { + IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n", + flags); + priv->staging_rxon.flags = cpu_to_le32(flags); + iwlcore_commit_rxon(priv); + } + } + mutex_unlock(&priv->mutex); + + return count; +} + +static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags); + +static ssize_t show_filter_flags(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + + return sprintf(buf, "0x%04X\n", + le32_to_cpu(priv->active_rxon.filter_flags)); +} + +static ssize_t store_filter_flags(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + u32 filter_flags = simple_strtoul(buf, NULL, 0); + + mutex_lock(&priv->mutex); + if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) { + /* Cancel any currently running scans... */ + if (iwl_scan_cancel_timeout(priv, 100)) + IWL_WARN(priv, "Could not cancel scan.\n"); + else { + IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = " + "0x%04X\n", filter_flags); + priv->staging_rxon.filter_flags = + cpu_to_le32(filter_flags); + iwlcore_commit_rxon(priv); + } + } + mutex_unlock(&priv->mutex); + + return count; +} + +static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags, + store_filter_flags); + +static ssize_t show_measurement(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + struct iwl_spectrum_notification measure_report; + u32 size = sizeof(measure_report), len = 0, ofs = 0; + u8 *data = (u8 *)&measure_report; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + if (!(priv->measurement_status & MEASUREMENT_READY)) { + spin_unlock_irqrestore(&priv->lock, flags); + return 0; + } + memcpy(&measure_report, &priv->measure_report, size); + priv->measurement_status = 0; + spin_unlock_irqrestore(&priv->lock, flags); + + while (size && (PAGE_SIZE - len)) { + hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len, + PAGE_SIZE - len, 1); + len = strlen(buf); + if (PAGE_SIZE - len) + buf[len++] = '\n'; + + ofs += 16; + size -= min(size, 16U); + } + + return len; +} + +static ssize_t store_measurement(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + struct ieee80211_measurement_params params = { + .channel = le16_to_cpu(priv->active_rxon.channel), + .start_time = cpu_to_le64(priv->last_tsf), + .duration = cpu_to_le16(1), + }; + u8 type = IWL_MEASURE_BASIC; + u8 buffer[32]; + u8 channel; + + if (count) { + char *p = buffer; + strncpy(buffer, buf, min(sizeof(buffer), count)); + channel = simple_strtoul(p, NULL, 0); + if (channel) + params.channel = channel; + + p = buffer; + while (*p && *p != ' ') + p++; + if (*p) + type = simple_strtoul(p + 1, NULL, 0); + } + + IWL_DEBUG_INFO(priv, "Invoking measurement of type %d on " + "channel %d (for '%s')\n", type, params.channel, buf); + iwl3945_get_measurement(priv, ¶ms, type); + + return count; +} + +static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR, + show_measurement, store_measurement); + +static ssize_t store_retry_rate(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + + priv->retry_rate = simple_strtoul(buf, NULL, 0); + if (priv->retry_rate <= 0) + priv->retry_rate = 1; + + return count; +} + +static ssize_t show_retry_rate(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + return sprintf(buf, "%d", priv->retry_rate); +} + +static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, show_retry_rate, + store_retry_rate); + + +static ssize_t show_channels(struct device *d, + struct device_attribute *attr, char *buf) +{ + /* all this shit doesn't belong into sysfs anyway */ + return 0; +} + +static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL); + +static ssize_t show_statistics(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + u32 size = sizeof(struct iwl3945_notif_statistics); + u32 len = 0, ofs = 0; + u8 *data = (u8 *)&priv->statistics_39; + int rc = 0; + + if (!iwl_is_alive(priv)) + return -EAGAIN; + + mutex_lock(&priv->mutex); + rc = iwl_send_statistics_request(priv, CMD_SYNC, false); + mutex_unlock(&priv->mutex); + + if (rc) { + len = sprintf(buf, + "Error sending statistics request: 0x%08X\n", rc); + return len; + } + + while (size && (PAGE_SIZE - len)) { + hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len, + PAGE_SIZE - len, 1); + len = strlen(buf); + if (PAGE_SIZE - len) + buf[len++] = '\n'; + + ofs += 16; + size -= min(size, 16U); + } + + return len; +} + +static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL); + +static ssize_t show_antenna(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + + if (!iwl_is_alive(priv)) + return -EAGAIN; + + return sprintf(buf, "%d\n", iwl3945_mod_params.antenna); +} + +static ssize_t store_antenna(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv __maybe_unused = dev_get_drvdata(d); + int ant; + + if (count == 0) + return 0; + + if (sscanf(buf, "%1i", &ant) != 1) { + IWL_DEBUG_INFO(priv, "not in hex or decimal form.\n"); + return count; + } + + if ((ant >= 0) && (ant <= 2)) { + IWL_DEBUG_INFO(priv, "Setting antenna select to %d.\n", ant); + iwl3945_mod_params.antenna = (enum iwl3945_antenna)ant; + } else + IWL_DEBUG_INFO(priv, "Bad antenna select value %d.\n", ant); + + + return count; +} + +static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna); + +static ssize_t show_status(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + if (!iwl_is_alive(priv)) + return -EAGAIN; + return sprintf(buf, "0x%08x\n", (int)priv->status); +} + +static DEVICE_ATTR(status, S_IRUGO, show_status, NULL); + +static ssize_t dump_error_log(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + char *p = (char *)buf; + + if (p[0] == '1') + iwl3945_dump_nic_error_log(priv); + + return strnlen(buf, count); +} + +static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log); + +/***************************************************************************** + * + * driver setup and tear down + * + *****************************************************************************/ + +static void iwl3945_setup_deferred_work(struct iwl_priv *priv) +{ + priv->workqueue = create_singlethread_workqueue(DRV_NAME); + + init_waitqueue_head(&priv->wait_command_queue); + + INIT_WORK(&priv->restart, iwl3945_bg_restart); + INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish); + INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update); + INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start); + INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start); + INIT_DELAYED_WORK(&priv->rfkill_poll, iwl3945_rfkill_poll); + INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed); + INIT_WORK(&priv->request_scan, iwl3945_bg_request_scan); + INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan); + INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check); + + iwl3945_hw_setup_deferred_work(priv); + + tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) + iwl3945_irq_tasklet, (unsigned long)priv); +} + +static void iwl3945_cancel_deferred_work(struct iwl_priv *priv) +{ + iwl3945_hw_cancel_deferred_work(priv); + + cancel_delayed_work_sync(&priv->init_alive_start); + cancel_delayed_work(&priv->scan_check); + cancel_delayed_work(&priv->alive_start); + cancel_work_sync(&priv->beacon_update); +} + +static struct attribute *iwl3945_sysfs_entries[] = { + &dev_attr_antenna.attr, + &dev_attr_channels.attr, + &dev_attr_dump_errors.attr, + &dev_attr_flags.attr, + &dev_attr_filter_flags.attr, + &dev_attr_measurement.attr, + &dev_attr_retry_rate.attr, + &dev_attr_statistics.attr, + &dev_attr_status.attr, + &dev_attr_temperature.attr, + &dev_attr_tx_power.attr, +#ifdef CONFIG_IWLWIFI_DEBUG + &dev_attr_debug_level.attr, +#endif + NULL +}; + +static struct attribute_group iwl3945_attribute_group = { + .name = NULL, /* put in device directory */ + .attrs = iwl3945_sysfs_entries, +}; + +static struct ieee80211_ops iwl3945_hw_ops = { + .tx = iwl3945_mac_tx, + .start = iwl3945_mac_start, + .stop = iwl3945_mac_stop, + .add_interface = iwl_mac_add_interface, + .remove_interface = iwl_mac_remove_interface, + .config = iwl_mac_config, + .configure_filter = iwl_configure_filter, + .set_key = iwl3945_mac_set_key, + .conf_tx = iwl_mac_conf_tx, + .reset_tsf = iwl_mac_reset_tsf, + .bss_info_changed = iwl_bss_info_changed, + .hw_scan = iwl_mac_hw_scan +}; + +static int iwl3945_init_drv(struct iwl_priv *priv) +{ + int ret; + struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; + + priv->retry_rate = 1; + priv->ibss_beacon = NULL; + + spin_lock_init(&priv->sta_lock); + spin_lock_init(&priv->hcmd_lock); + + INIT_LIST_HEAD(&priv->free_frames); + + mutex_init(&priv->mutex); + mutex_init(&priv->sync_cmd_mutex); + + /* Clear the driver's (not device's) station table */ + iwl_clear_stations_table(priv); + + priv->ieee_channels = NULL; + priv->ieee_rates = NULL; + priv->band = IEEE80211_BAND_2GHZ; + + priv->iw_mode = NL80211_IFTYPE_STATION; + priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF; + + iwl_reset_qos(priv); + + priv->qos_data.qos_active = 0; + priv->qos_data.qos_cap.val = 0; + + priv->rates_mask = IWL_RATES_MASK; + priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER; + + if (eeprom->version < EEPROM_3945_EEPROM_VERSION) { + IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n", + eeprom->version); + ret = -EINVAL; + goto err; + } + ret = iwl_init_channel_map(priv); + if (ret) { + IWL_ERR(priv, "initializing regulatory failed: %d\n", ret); + goto err; + } + + /* Set up txpower settings in driver for all channels */ + if (iwl3945_txpower_set_from_eeprom(priv)) { + ret = -EIO; + goto err_free_channel_map; + } + + ret = iwlcore_init_geos(priv); + if (ret) { + IWL_ERR(priv, "initializing geos failed: %d\n", ret); + goto err_free_channel_map; + } + iwl3945_init_hw_rates(priv, priv->ieee_rates); + + return 0; + +err_free_channel_map: + iwl_free_channel_map(priv); +err: + return ret; +} + +static int iwl3945_setup_mac(struct iwl_priv *priv) +{ + int ret; + struct ieee80211_hw *hw = priv->hw; + + hw->rate_control_algorithm = "iwl-3945-rs"; + hw->sta_data_size = sizeof(struct iwl3945_sta_priv); + + /* Tell mac80211 our characteristics */ + hw->flags = IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_NOISE_DBM | + IEEE80211_HW_SPECTRUM_MGMT; + + if (!priv->cfg->broken_powersave) + hw->flags |= IEEE80211_HW_SUPPORTS_PS | + IEEE80211_HW_SUPPORTS_DYNAMIC_PS; + + hw->wiphy->interface_modes = + BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_ADHOC); + + hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY | + WIPHY_FLAG_DISABLE_BEACON_HINTS; + + hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945; + /* we create the 802.11 header and a zero-length SSID element */ + hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2; + + /* Default value; 4 EDCA QOS priorities */ + hw->queues = 4; + + if (priv->bands[IEEE80211_BAND_2GHZ].n_channels) + priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = + &priv->bands[IEEE80211_BAND_2GHZ]; + + if (priv->bands[IEEE80211_BAND_5GHZ].n_channels) + priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = + &priv->bands[IEEE80211_BAND_5GHZ]; + + ret = ieee80211_register_hw(priv->hw); + if (ret) { + IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); + return ret; + } + priv->mac80211_registered = 1; + + return 0; +} + +static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + int err = 0; + struct iwl_priv *priv; + struct ieee80211_hw *hw; + struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); + struct iwl3945_eeprom *eeprom; + unsigned long flags; + + /*********************** + * 1. Allocating HW data + * ********************/ + + /* mac80211 allocates memory for this device instance, including + * space for this driver's private structure */ + hw = iwl_alloc_all(cfg, &iwl3945_hw_ops); + if (hw == NULL) { + printk(KERN_ERR DRV_NAME "Can not allocate network device\n"); + err = -ENOMEM; + goto out; + } + priv = hw->priv; + SET_IEEE80211_DEV(hw, &pdev->dev); + + /* + * Disabling hardware scan means that mac80211 will perform scans + * "the hard way", rather than using device's scan. + */ + if (iwl3945_mod_params.disable_hw_scan) { + IWL_DEBUG_INFO(priv, "Disabling hw_scan\n"); + iwl3945_hw_ops.hw_scan = NULL; + } + + + IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n"); + priv->cfg = cfg; + priv->pci_dev = pdev; + priv->inta_mask = CSR_INI_SET_MASK; + +#ifdef CONFIG_IWLWIFI_DEBUG + atomic_set(&priv->restrict_refcnt, 0); +#endif + if (iwl_alloc_traffic_mem(priv)) + IWL_ERR(priv, "Not enough memory to generate traffic log\n"); + + /*************************** + * 2. Initializing PCI bus + * *************************/ + if (pci_enable_device(pdev)) { + err = -ENODEV; + goto out_ieee80211_free_hw; + } + + pci_set_master(pdev); + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (!err) + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + IWL_WARN(priv, "No suitable DMA available.\n"); + goto out_pci_disable_device; + } + + pci_set_drvdata(pdev, priv); + err = pci_request_regions(pdev, DRV_NAME); + if (err) + goto out_pci_disable_device; + + /*********************** + * 3. Read REV Register + * ********************/ + priv->hw_base = pci_iomap(pdev, 0, 0); + if (!priv->hw_base) { + err = -ENODEV; + goto out_pci_release_regions; + } + + IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n", + (unsigned long long) pci_resource_len(pdev, 0)); + IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base); + + /* We disable the RETRY_TIMEOUT register (0x41) to keep + * PCI Tx retries from interfering with C3 CPU state */ + pci_write_config_byte(pdev, 0x41, 0x00); + + /* these spin locks will be used in apm_ops.init and EEPROM access + * we should init now + */ + spin_lock_init(&priv->reg_lock); + spin_lock_init(&priv->lock); + + /* + * stop and reset the on-board processor just in case it is in a + * strange state ... like being left stranded by a primary kernel + * and this is now the kdump kernel trying to start up + */ + iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); + + /*********************** + * 4. Read EEPROM + * ********************/ + + /* Read the EEPROM */ + err = iwl_eeprom_init(priv); + if (err) { + IWL_ERR(priv, "Unable to init EEPROM\n"); + goto out_iounmap; + } + /* MAC Address location in EEPROM same for 3945/4965 */ + eeprom = (struct iwl3945_eeprom *)priv->eeprom; + memcpy(priv->mac_addr, eeprom->mac_address, ETH_ALEN); + IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->mac_addr); + SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr); + + /*********************** + * 5. Setup HW Constants + * ********************/ + /* Device-specific setup */ + if (iwl3945_hw_set_hw_params(priv)) { + IWL_ERR(priv, "failed to set hw settings\n"); + goto out_eeprom_free; + } + + /*********************** + * 6. Setup priv + * ********************/ + + err = iwl3945_init_drv(priv); + if (err) { + IWL_ERR(priv, "initializing driver failed\n"); + goto out_unset_hw_params; + } + + IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s\n", + priv->cfg->name); + + /*********************** + * 7. Setup Services + * ********************/ + + spin_lock_irqsave(&priv->lock, flags); + iwl_disable_interrupts(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + pci_enable_msi(priv->pci_dev); + + err = request_irq(priv->pci_dev->irq, priv->cfg->ops->lib->isr, + IRQF_SHARED, DRV_NAME, priv); + if (err) { + IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq); + goto out_disable_msi; + } + + err = sysfs_create_group(&pdev->dev.kobj, &iwl3945_attribute_group); + if (err) { + IWL_ERR(priv, "failed to create sysfs device attributes\n"); + goto out_release_irq; + } + + iwl_set_rxon_channel(priv, + &priv->bands[IEEE80211_BAND_2GHZ].channels[5]); + iwl3945_setup_deferred_work(priv); + iwl3945_setup_rx_handlers(priv); + iwl_power_initialize(priv); + + /********************************* + * 8. Setup and Register mac80211 + * *******************************/ + + iwl_enable_interrupts(priv); + + err = iwl3945_setup_mac(priv); + if (err) + goto out_remove_sysfs; + + err = iwl_dbgfs_register(priv, DRV_NAME); + if (err) + IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err); + + /* Start monitoring the killswitch */ + queue_delayed_work(priv->workqueue, &priv->rfkill_poll, + 2 * HZ); + + return 0; + + out_remove_sysfs: + destroy_workqueue(priv->workqueue); + priv->workqueue = NULL; + sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group); + out_release_irq: + free_irq(priv->pci_dev->irq, priv); + out_disable_msi: + pci_disable_msi(priv->pci_dev); + iwlcore_free_geos(priv); + iwl_free_channel_map(priv); + out_unset_hw_params: + iwl3945_unset_hw_params(priv); + out_eeprom_free: + iwl_eeprom_free(priv); + out_iounmap: + pci_iounmap(pdev, priv->hw_base); + out_pci_release_regions: + pci_release_regions(pdev); + out_pci_disable_device: + pci_set_drvdata(pdev, NULL); + pci_disable_device(pdev); + out_ieee80211_free_hw: + iwl_free_traffic_mem(priv); + ieee80211_free_hw(priv->hw); + out: + return err; +} + +static void __devexit iwl3945_pci_remove(struct pci_dev *pdev) +{ + struct iwl_priv *priv = pci_get_drvdata(pdev); + unsigned long flags; + + if (!priv) + return; + + IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n"); + + iwl_dbgfs_unregister(priv); + + set_bit(STATUS_EXIT_PENDING, &priv->status); + + if (priv->mac80211_registered) { + ieee80211_unregister_hw(priv->hw); + priv->mac80211_registered = 0; + } else { + iwl3945_down(priv); + } + + /* + * Make sure device is reset to low power before unloading driver. + * This may be redundant with iwl_down(), but there are paths to + * run iwl_down() without calling apm_ops.stop(), and there are + * paths to avoid running iwl_down() at all before leaving driver. + * This (inexpensive) call *makes sure* device is reset. + */ + priv->cfg->ops->lib->apm_ops.stop(priv); + + /* make sure we flush any pending irq or + * tasklet for the driver + */ + spin_lock_irqsave(&priv->lock, flags); + iwl_disable_interrupts(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + iwl_synchronize_irq(priv); + + sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group); + + cancel_delayed_work_sync(&priv->rfkill_poll); + + iwl3945_dealloc_ucode_pci(priv); + + if (priv->rxq.bd) + iwl3945_rx_queue_free(priv, &priv->rxq); + iwl3945_hw_txq_ctx_free(priv); + + iwl3945_unset_hw_params(priv); + iwl_clear_stations_table(priv); + + /*netif_stop_queue(dev); */ + flush_workqueue(priv->workqueue); + + /* ieee80211_unregister_hw calls iwl3945_mac_stop, which flushes + * priv->workqueue... so we can't take down the workqueue + * until now... */ + destroy_workqueue(priv->workqueue); + priv->workqueue = NULL; + iwl_free_traffic_mem(priv); + + free_irq(pdev->irq, priv); + pci_disable_msi(pdev); + + pci_iounmap(pdev, priv->hw_base); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + + iwl_free_channel_map(priv); + iwlcore_free_geos(priv); + kfree(priv->scan); + if (priv->ibss_beacon) + dev_kfree_skb(priv->ibss_beacon); + + ieee80211_free_hw(priv->hw); +} + + +/***************************************************************************** + * + * driver and module entry point + * + *****************************************************************************/ + +static struct pci_driver iwl3945_driver = { + .name = DRV_NAME, + .id_table = iwl3945_hw_card_ids, + .probe = iwl3945_pci_probe, + .remove = __devexit_p(iwl3945_pci_remove), +#ifdef CONFIG_PM + .suspend = iwl_pci_suspend, + .resume = iwl_pci_resume, +#endif +}; + +static int __init iwl3945_init(void) +{ + + int ret; + printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n"); + printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n"); + + ret = iwl3945_rate_control_register(); + if (ret) { + printk(KERN_ERR DRV_NAME + "Unable to register rate control algorithm: %d\n", ret); + return ret; + } + + ret = pci_register_driver(&iwl3945_driver); + if (ret) { + printk(KERN_ERR DRV_NAME "Unable to initialize PCI module\n"); + goto error_register; + } + + return ret; + +error_register: + iwl3945_rate_control_unregister(); + return ret; +} + +static void __exit iwl3945_exit(void) +{ + pci_unregister_driver(&iwl3945_driver); + iwl3945_rate_control_unregister(); +} + +MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX)); + +module_param_named(antenna, iwl3945_mod_params.antenna, int, S_IRUGO); +MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); +module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, S_IRUGO); +MODULE_PARM_DESC(swcrypto, + "using software crypto (default 1 [software])\n"); +#ifdef CONFIG_IWLWIFI_DEBUG +module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug, "debug output mask"); +#endif +module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan, + int, S_IRUGO); +MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)"); +module_param_named(fw_restart3945, iwl3945_mod_params.restart_fw, int, S_IRUGO); +MODULE_PARM_DESC(fw_restart3945, "restart firmware in case of error"); + +module_exit(iwl3945_exit); +module_init(iwl3945_init); diff --git a/drivers/net/wireless/iwmc3200wifi/Makefile b/drivers/net/wireless/iwmc3200wifi/Makefile new file mode 100644 index 0000000..d34291b --- /dev/null +++ b/drivers/net/wireless/iwmc3200wifi/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_IWM) := iwmc3200wifi.o +iwmc3200wifi-objs += main.o netdev.o rx.o tx.o sdio.o hal.o fw.o +iwmc3200wifi-objs += commands.o cfg80211.o eeprom.o + +iwmc3200wifi-$(CONFIG_IWM_DEBUG) += debugfs.o diff --git a/drivers/net/wireless/iwmc3200wifi/bus.h b/drivers/net/wireless/iwmc3200wifi/bus.h new file mode 100644 index 0000000..836663e --- /dev/null +++ b/drivers/net/wireless/iwmc3200wifi/bus.h @@ -0,0 +1,57 @@ +/* + * Intel Wireless Multicomm 3200 WiFi driver + * + * Copyright (C) 2009 Intel Corporation + * Samuel Ortiz + * Zhu Yi + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#ifndef __IWM_BUS_H__ +#define __IWM_BUS_H__ + +#include "iwm.h" + +struct iwm_if_ops { + int (*enable)(struct iwm_priv *iwm); + int (*disable)(struct iwm_priv *iwm); + int (*send_chunk)(struct iwm_priv *iwm, u8* buf, int count); + + int (*debugfs_init)(struct iwm_priv *iwm, struct dentry *parent_dir); + void (*debugfs_exit)(struct iwm_priv *iwm); + + const char *umac_name; + const char *calib_lmac_name; + const char *lmac_name; +}; + +static inline int iwm_bus_send_chunk(struct iwm_priv *iwm, u8 *buf, int count) +{ + return iwm->bus_ops->send_chunk(iwm, buf, count); +} + +static inline int iwm_bus_enable(struct iwm_priv *iwm) +{ + return iwm->bus_ops->enable(iwm); +} + +static inline int iwm_bus_disable(struct iwm_priv *iwm) +{ + return iwm->bus_ops->disable(iwm); +} + +#endif diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c new file mode 100644 index 0000000..7c4f44a --- /dev/null +++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.c @@ -0,0 +1,856 @@ +/* + * Intel Wireless Multicomm 3200 WiFi driver + * + * Copyright (C) 2009 Intel Corporation + * Samuel Ortiz + * Zhu Yi + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "iwm.h" +#include "commands.h" +#include "cfg80211.h" +#include "debug.h" + +#define RATETAB_ENT(_rate, _rateid, _flags) \ + { \ + .bitrate = (_rate), \ + .hw_value = (_rateid), \ + .flags = (_flags), \ + } + +#define CHAN2G(_channel, _freq, _flags) { \ + .band = IEEE80211_BAND_2GHZ, \ + .center_freq = (_freq), \ + .hw_value = (_channel), \ + .flags = (_flags), \ + .max_antenna_gain = 0, \ + .max_power = 30, \ +} + +#define CHAN5G(_channel, _flags) { \ + .band = IEEE80211_BAND_5GHZ, \ + .center_freq = 5000 + (5 * (_channel)), \ + .hw_value = (_channel), \ + .flags = (_flags), \ + .max_antenna_gain = 0, \ + .max_power = 30, \ +} + +static struct ieee80211_rate iwm_rates[] = { + RATETAB_ENT(10, 0x1, 0), + RATETAB_ENT(20, 0x2, 0), + RATETAB_ENT(55, 0x4, 0), + RATETAB_ENT(110, 0x8, 0), + RATETAB_ENT(60, 0x10, 0), + RATETAB_ENT(90, 0x20, 0), + RATETAB_ENT(120, 0x40, 0), + RATETAB_ENT(180, 0x80, 0), + RATETAB_ENT(240, 0x100, 0), + RATETAB_ENT(360, 0x200, 0), + RATETAB_ENT(480, 0x400, 0), + RATETAB_ENT(540, 0x800, 0), +}; + +#define iwm_a_rates (iwm_rates + 4) +#define iwm_a_rates_size 8 +#define iwm_g_rates (iwm_rates + 0) +#define iwm_g_rates_size 12 + +static struct ieee80211_channel iwm_2ghz_channels[] = { + CHAN2G(1, 2412, 0), + CHAN2G(2, 2417, 0), + CHAN2G(3, 2422, 0), + CHAN2G(4, 2427, 0), + CHAN2G(5, 2432, 0), + CHAN2G(6, 2437, 0), + CHAN2G(7, 2442, 0), + CHAN2G(8, 2447, 0), + CHAN2G(9, 2452, 0), + CHAN2G(10, 2457, 0), + CHAN2G(11, 2462, 0), + CHAN2G(12, 2467, 0), + CHAN2G(13, 2472, 0), + CHAN2G(14, 2484, 0), +}; + +static struct ieee80211_channel iwm_5ghz_a_channels[] = { + CHAN5G(34, 0), CHAN5G(36, 0), + CHAN5G(38, 0), CHAN5G(40, 0), + CHAN5G(42, 0), CHAN5G(44, 0), + CHAN5G(46, 0), CHAN5G(48, 0), + CHAN5G(52, 0), CHAN5G(56, 0), + CHAN5G(60, 0), CHAN5G(64, 0), + CHAN5G(100, 0), CHAN5G(104, 0), + CHAN5G(108, 0), CHAN5G(112, 0), + CHAN5G(116, 0), CHAN5G(120, 0), + CHAN5G(124, 0), CHAN5G(128, 0), + CHAN5G(132, 0), CHAN5G(136, 0), + CHAN5G(140, 0), CHAN5G(149, 0), + CHAN5G(153, 0), CHAN5G(157, 0), + CHAN5G(161, 0), CHAN5G(165, 0), + CHAN5G(184, 0), CHAN5G(188, 0), + CHAN5G(192, 0), CHAN5G(196, 0), + CHAN5G(200, 0), CHAN5G(204, 0), + CHAN5G(208, 0), CHAN5G(212, 0), + CHAN5G(216, 0), +}; + +static struct ieee80211_supported_band iwm_band_2ghz = { + .channels = iwm_2ghz_channels, + .n_channels = ARRAY_SIZE(iwm_2ghz_channels), + .bitrates = iwm_g_rates, + .n_bitrates = iwm_g_rates_size, +}; + +static struct ieee80211_supported_band iwm_band_5ghz = { + .channels = iwm_5ghz_a_channels, + .n_channels = ARRAY_SIZE(iwm_5ghz_a_channels), + .bitrates = iwm_a_rates, + .n_bitrates = iwm_a_rates_size, +}; + +static int iwm_key_init(struct iwm_key *key, u8 key_index, + const u8 *mac_addr, struct key_params *params) +{ + key->hdr.key_idx = key_index; + if (!mac_addr || is_broadcast_ether_addr(mac_addr)) { + key->hdr.multicast = 1; + memset(key->hdr.mac, 0xff, ETH_ALEN); + } else { + key->hdr.multicast = 0; + memcpy(key->hdr.mac, mac_addr, ETH_ALEN); + } + + if (params) { + if (params->key_len > WLAN_MAX_KEY_LEN || + params->seq_len > IW_ENCODE_SEQ_MAX_SIZE) + return -EINVAL; + + key->cipher = params->cipher; + key->key_len = params->key_len; + key->seq_len = params->seq_len; + memcpy(key->key, params->key, key->key_len); + memcpy(key->seq, params->seq, key->seq_len); + } + + return 0; +} + +static int iwm_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, + u8 key_index, const u8 *mac_addr, + struct key_params *params) +{ + struct iwm_priv *iwm = ndev_to_iwm(ndev); + struct iwm_key *key = &iwm->keys[key_index]; + int ret; + + IWM_DBG_WEXT(iwm, DBG, "Adding key for %pM\n", mac_addr); + + memset(key, 0, sizeof(struct iwm_key)); + ret = iwm_key_init(key, key_index, mac_addr, params); + if (ret < 0) { + IWM_ERR(iwm, "Invalid key_params\n"); + return ret; + } + + return iwm_set_key(iwm, 0, key); +} + +static int iwm_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, + u8 key_index, const u8 *mac_addr, void *cookie, + void (*callback)(void *cookie, + struct key_params*)) +{ + struct iwm_priv *iwm = ndev_to_iwm(ndev); + struct iwm_key *key = &iwm->keys[key_index]; + struct key_params params; + + IWM_DBG_WEXT(iwm, DBG, "Getting key %d\n", key_index); + + memset(¶ms, 0, sizeof(params)); + + params.cipher = key->cipher; + params.key_len = key->key_len; + params.seq_len = key->seq_len; + params.seq = key->seq; + params.key = key->key; + + callback(cookie, ¶ms); + + return key->key_len ? 0 : -ENOENT; +} + + +static int iwm_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev, + u8 key_index, const u8 *mac_addr) +{ + struct iwm_priv *iwm = ndev_to_iwm(ndev); + struct iwm_key *key = &iwm->keys[key_index]; + + if (!iwm->keys[key_index].key_len) { + IWM_DBG_WEXT(iwm, DBG, "Key %d not used\n", key_index); + return 0; + } + + if (key_index == iwm->default_key) + iwm->default_key = -1; + + return iwm_set_key(iwm, 1, key); +} + +static int iwm_cfg80211_set_default_key(struct wiphy *wiphy, + struct net_device *ndev, + u8 key_index) +{ + struct iwm_priv *iwm = ndev_to_iwm(ndev); + + IWM_DBG_WEXT(iwm, DBG, "Default key index is: %d\n", key_index); + + if (!iwm->keys[key_index].key_len) { + IWM_ERR(iwm, "Key %d not used\n", key_index); + return -EINVAL; + } + + iwm->default_key = key_index; + + return iwm_set_tx_key(iwm, key_index); +} + +static int iwm_cfg80211_get_station(struct wiphy *wiphy, + struct net_device *ndev, + u8 *mac, struct station_info *sinfo) +{ + struct iwm_priv *iwm = ndev_to_iwm(ndev); + + if (memcmp(mac, iwm->bssid, ETH_ALEN)) + return -ENOENT; + + sinfo->filled |= STATION_INFO_TX_BITRATE; + sinfo->txrate.legacy = iwm->rate * 10; + + if (test_bit(IWM_STATUS_ASSOCIATED, &iwm->status)) { + sinfo->filled |= STATION_INFO_SIGNAL; + sinfo->signal = iwm->wstats.qual.level; + } + + return 0; +} + + +int iwm_cfg80211_inform_bss(struct iwm_priv *iwm) +{ + struct wiphy *wiphy = iwm_to_wiphy(iwm); + struct iwm_bss_info *bss, *next; + struct iwm_umac_notif_bss_info *umac_bss; + struct ieee80211_mgmt *mgmt; + struct ieee80211_channel *channel; + struct ieee80211_supported_band *band; + s32 signal; + int freq; + + list_for_each_entry_safe(bss, next, &iwm->bss_list, node) { + umac_bss = bss->bss; + mgmt = (struct ieee80211_mgmt *)(umac_bss->frame_buf); + + if (umac_bss->band == UMAC_BAND_2GHZ) + band = wiphy->bands[IEEE80211_BAND_2GHZ]; + else if (umac_bss->band == UMAC_BAND_5GHZ) + band = wiphy->bands[IEEE80211_BAND_5GHZ]; + else { + IWM_ERR(iwm, "Invalid band: %d\n", umac_bss->band); + return -EINVAL; + } + + freq = ieee80211_channel_to_frequency(umac_bss->channel); + channel = ieee80211_get_channel(wiphy, freq); + signal = umac_bss->rssi * 100; + + if (!cfg80211_inform_bss_frame(wiphy, channel, mgmt, + le16_to_cpu(umac_bss->frame_len), + signal, GFP_KERNEL)) + return -EINVAL; + } + + return 0; +} + +static int iwm_cfg80211_change_iface(struct wiphy *wiphy, + struct net_device *ndev, + enum nl80211_iftype type, u32 *flags, + struct vif_params *params) +{ + struct wireless_dev *wdev; + struct iwm_priv *iwm; + u32 old_mode; + + wdev = ndev->ieee80211_ptr; + iwm = ndev_to_iwm(ndev); + old_mode = iwm->conf.mode; + + switch (type) { + case NL80211_IFTYPE_STATION: + iwm->conf.mode = UMAC_MODE_BSS; + break; + case NL80211_IFTYPE_ADHOC: + iwm->conf.mode = UMAC_MODE_IBSS; + break; + default: + return -EOPNOTSUPP; + } + + wdev->iftype = type; + + if ((old_mode == iwm->conf.mode) || !iwm->umac_profile) + return 0; + + iwm->umac_profile->mode = cpu_to_le32(iwm->conf.mode); + + if (iwm->umac_profile_active) + iwm_invalidate_mlme_profile(iwm); + + return 0; +} + +static int iwm_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, + struct cfg80211_scan_request *request) +{ + struct iwm_priv *iwm = ndev_to_iwm(ndev); + int ret; + + if (!test_bit(IWM_STATUS_READY, &iwm->status)) { + IWM_ERR(iwm, "Scan while device is not ready\n"); + return -EIO; + } + + if (test_bit(IWM_STATUS_SCANNING, &iwm->status)) { + IWM_ERR(iwm, "Scanning already\n"); + return -EAGAIN; + } + + if (test_bit(IWM_STATUS_SCAN_ABORTING, &iwm->status)) { + IWM_ERR(iwm, "Scanning being aborted\n"); + return -EAGAIN; + } + + set_bit(IWM_STATUS_SCANNING, &iwm->status); + + ret = iwm_scan_ssids(iwm, request->ssids, request->n_ssids); + if (ret) { + clear_bit(IWM_STATUS_SCANNING, &iwm->status); + return ret; + } + + iwm->scan_request = request; + return 0; +} + +static int iwm_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) +{ + struct iwm_priv *iwm = wiphy_to_iwm(wiphy); + + if (changed & WIPHY_PARAM_RTS_THRESHOLD && + (iwm->conf.rts_threshold != wiphy->rts_threshold)) { + int ret; + + iwm->conf.rts_threshold = wiphy->rts_threshold; + + ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX, + CFG_RTS_THRESHOLD, + iwm->conf.rts_threshold); + if (ret < 0) + return ret; + } + + if (changed & WIPHY_PARAM_FRAG_THRESHOLD && + (iwm->conf.frag_threshold != wiphy->frag_threshold)) { + int ret; + + iwm->conf.frag_threshold = wiphy->frag_threshold; + + ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_FA_CFG_FIX, + CFG_FRAG_THRESHOLD, + iwm->conf.frag_threshold); + if (ret < 0) + return ret; + } + + return 0; +} + +static int iwm_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_ibss_params *params) +{ + struct iwm_priv *iwm = wiphy_to_iwm(wiphy); + struct ieee80211_channel *chan = params->channel; + + if (!test_bit(IWM_STATUS_READY, &iwm->status)) + return -EIO; + + /* UMAC doesn't support creating or joining an IBSS network + * with specified bssid. */ + if (params->bssid) + return -EOPNOTSUPP; + + iwm->channel = ieee80211_frequency_to_channel(chan->center_freq); + iwm->umac_profile->ibss.band = chan->band; + iwm->umac_profile->ibss.channel = iwm->channel; + iwm->umac_profile->ssid.ssid_len = params->ssid_len; + memcpy(iwm->umac_profile->ssid.ssid, params->ssid, params->ssid_len); + + return iwm_send_mlme_profile(iwm); +} + +static int iwm_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev) +{ + struct iwm_priv *iwm = wiphy_to_iwm(wiphy); + + if (iwm->umac_profile_active) + return iwm_invalidate_mlme_profile(iwm); + + return 0; +} + +static int iwm_set_auth_type(struct iwm_priv *iwm, + enum nl80211_auth_type sme_auth_type) +{ + u8 *auth_type = &iwm->umac_profile->sec.auth_type; + + switch (sme_auth_type) { + case NL80211_AUTHTYPE_AUTOMATIC: + case NL80211_AUTHTYPE_OPEN_SYSTEM: + IWM_DBG_WEXT(iwm, DBG, "OPEN auth\n"); + *auth_type = UMAC_AUTH_TYPE_OPEN; + break; + case NL80211_AUTHTYPE_SHARED_KEY: + if (iwm->umac_profile->sec.flags & + (UMAC_SEC_FLG_WPA_ON_MSK | UMAC_SEC_FLG_RSNA_ON_MSK)) { + IWM_DBG_WEXT(iwm, DBG, "WPA auth alg\n"); + *auth_type = UMAC_AUTH_TYPE_RSNA_PSK; + } else { + IWM_DBG_WEXT(iwm, DBG, "WEP shared key auth alg\n"); + *auth_type = UMAC_AUTH_TYPE_LEGACY_PSK; + } + + break; + default: + IWM_ERR(iwm, "Unsupported auth alg: 0x%x\n", sme_auth_type); + return -ENOTSUPP; + } + + return 0; +} + +static int iwm_set_wpa_version(struct iwm_priv *iwm, u32 wpa_version) +{ + IWM_DBG_WEXT(iwm, DBG, "wpa_version: %d\n", wpa_version); + + if (!wpa_version) { + iwm->umac_profile->sec.flags = UMAC_SEC_FLG_LEGACY_PROFILE; + return 0; + } + + if (wpa_version & NL80211_WPA_VERSION_1) + iwm->umac_profile->sec.flags = UMAC_SEC_FLG_WPA_ON_MSK; + + if (wpa_version & NL80211_WPA_VERSION_2) + iwm->umac_profile->sec.flags = UMAC_SEC_FLG_RSNA_ON_MSK; + + return 0; +} + +static int iwm_set_cipher(struct iwm_priv *iwm, u32 cipher, bool ucast) +{ + u8 *profile_cipher = ucast ? &iwm->umac_profile->sec.ucast_cipher : + &iwm->umac_profile->sec.mcast_cipher; + + if (!cipher) { + *profile_cipher = UMAC_CIPHER_TYPE_NONE; + return 0; + } + + IWM_DBG_WEXT(iwm, DBG, "%ccast cipher is 0x%x\n", ucast ? 'u' : 'm', + cipher); + + switch (cipher) { + case IW_AUTH_CIPHER_NONE: + *profile_cipher = UMAC_CIPHER_TYPE_NONE; + break; + case WLAN_CIPHER_SUITE_WEP40: + *profile_cipher = UMAC_CIPHER_TYPE_WEP_40; + break; + case WLAN_CIPHER_SUITE_WEP104: + *profile_cipher = UMAC_CIPHER_TYPE_WEP_104; + break; + case WLAN_CIPHER_SUITE_TKIP: + *profile_cipher = UMAC_CIPHER_TYPE_TKIP; + break; + case WLAN_CIPHER_SUITE_CCMP: + *profile_cipher = UMAC_CIPHER_TYPE_CCMP; + break; + default: + IWM_ERR(iwm, "Unsupported cipher: 0x%x\n", cipher); + return -ENOTSUPP; + } + + return 0; +} + +static int iwm_set_key_mgt(struct iwm_priv *iwm, u32 key_mgt) +{ + u8 *auth_type = &iwm->umac_profile->sec.auth_type; + + IWM_DBG_WEXT(iwm, DBG, "key_mgt: 0x%x\n", key_mgt); + + if (key_mgt == WLAN_AKM_SUITE_8021X) + *auth_type = UMAC_AUTH_TYPE_8021X; + else if (key_mgt == WLAN_AKM_SUITE_PSK) { + if (iwm->umac_profile->sec.flags & + (UMAC_SEC_FLG_WPA_ON_MSK | UMAC_SEC_FLG_RSNA_ON_MSK)) + *auth_type = UMAC_AUTH_TYPE_RSNA_PSK; + else + *auth_type = UMAC_AUTH_TYPE_LEGACY_PSK; + } else { + IWM_ERR(iwm, "Invalid key mgt: 0x%x\n", key_mgt); + return -EINVAL; + } + + return 0; +} + + +static int iwm_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_connect_params *sme) +{ + struct iwm_priv *iwm = wiphy_to_iwm(wiphy); + struct ieee80211_channel *chan = sme->channel; + struct key_params key_param; + int ret; + + if (!test_bit(IWM_STATUS_READY, &iwm->status)) + return -EIO; + + if (!sme->ssid) + return -EINVAL; + + if (iwm->umac_profile_active) { + ret = iwm_invalidate_mlme_profile(iwm); + if (ret) { + IWM_ERR(iwm, "Couldn't invalidate profile\n"); + return ret; + } + } + + if (chan) + iwm->channel = + ieee80211_frequency_to_channel(chan->center_freq); + + iwm->umac_profile->ssid.ssid_len = sme->ssid_len; + memcpy(iwm->umac_profile->ssid.ssid, sme->ssid, sme->ssid_len); + + if (sme->bssid) { + IWM_DBG_WEXT(iwm, DBG, "BSSID: %pM\n", sme->bssid); + memcpy(&iwm->umac_profile->bssid[0], sme->bssid, ETH_ALEN); + iwm->umac_profile->bss_num = 1; + } else { + memset(&iwm->umac_profile->bssid[0], 0, ETH_ALEN); + iwm->umac_profile->bss_num = 0; + } + + ret = iwm_set_wpa_version(iwm, sme->crypto.wpa_versions); + if (ret < 0) + return ret; + + ret = iwm_set_auth_type(iwm, sme->auth_type); + if (ret < 0) + return ret; + + if (sme->crypto.n_ciphers_pairwise) { + ret = iwm_set_cipher(iwm, sme->crypto.ciphers_pairwise[0], + true); + if (ret < 0) + return ret; + } + + ret = iwm_set_cipher(iwm, sme->crypto.cipher_group, false); + if (ret < 0) + return ret; + + if (sme->crypto.n_akm_suites) { + ret = iwm_set_key_mgt(iwm, sme->crypto.akm_suites[0]); + if (ret < 0) + return ret; + } + + /* + * We save the WEP key in case we want to do shared authentication. + * We have to do it so because UMAC will assert whenever it gets a + * key before a profile. + */ + if (sme->key) { + key_param.key = kmemdup(sme->key, sme->key_len, GFP_KERNEL); + if (key_param.key == NULL) + return -ENOMEM; + key_param.key_len = sme->key_len; + key_param.seq_len = 0; + key_param.cipher = sme->crypto.ciphers_pairwise[0]; + + ret = iwm_key_init(&iwm->keys[sme->key_idx], sme->key_idx, + NULL, &key_param); + kfree(key_param.key); + if (ret < 0) { + IWM_ERR(iwm, "Invalid key_params\n"); + return ret; + } + + iwm->default_key = sme->key_idx; + } + + /* WPA and open AUTH type from wpa_s means WPS (a.k.a. WSC) */ + if ((iwm->umac_profile->sec.flags & + (UMAC_SEC_FLG_WPA_ON_MSK | UMAC_SEC_FLG_RSNA_ON_MSK)) && + iwm->umac_profile->sec.auth_type == UMAC_AUTH_TYPE_OPEN) { + iwm->umac_profile->sec.flags = UMAC_SEC_FLG_WSC_ON_MSK; + } + + ret = iwm_send_mlme_profile(iwm); + + if (iwm->umac_profile->sec.auth_type != UMAC_AUTH_TYPE_LEGACY_PSK || + sme->key == NULL) + return ret; + + /* + * We want to do shared auth. + * We need to actually set the key we previously cached, + * and then tell the UMAC it's the default one. + * That will trigger the auth+assoc UMAC machinery, and again, + * this must be done after setting the profile. + */ + ret = iwm_set_key(iwm, 0, &iwm->keys[sme->key_idx]); + if (ret < 0) + return ret; + + return iwm_set_tx_key(iwm, iwm->default_key); +} + +static int iwm_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev, + u16 reason_code) +{ + struct iwm_priv *iwm = wiphy_to_iwm(wiphy); + + IWM_DBG_WEXT(iwm, DBG, "Active: %d\n", iwm->umac_profile_active); + + if (iwm->umac_profile_active) + iwm_invalidate_mlme_profile(iwm); + + return 0; +} + +static int iwm_cfg80211_set_txpower(struct wiphy *wiphy, + enum tx_power_setting type, int dbm) +{ + struct iwm_priv *iwm = wiphy_to_iwm(wiphy); + int ret; + + switch (type) { + case TX_POWER_AUTOMATIC: + return 0; + case TX_POWER_FIXED: + if (!test_bit(IWM_STATUS_READY, &iwm->status)) + return 0; + + ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX, + CFG_TX_PWR_LIMIT_USR, dbm * 2); + if (ret < 0) + return ret; + + return iwm_tx_power_trigger(iwm); + default: + IWM_ERR(iwm, "Unsupported power type: %d\n", type); + return -EOPNOTSUPP; + } + + return 0; +} + +static int iwm_cfg80211_get_txpower(struct wiphy *wiphy, int *dbm) +{ + struct iwm_priv *iwm = wiphy_to_iwm(wiphy); + + *dbm = iwm->txpower >> 1; + + return 0; +} + +static int iwm_cfg80211_set_power_mgmt(struct wiphy *wiphy, + struct net_device *dev, + bool enabled, int timeout) +{ + struct iwm_priv *iwm = wiphy_to_iwm(wiphy); + u32 power_index; + + if (enabled) + power_index = IWM_POWER_INDEX_DEFAULT; + else + power_index = IWM_POWER_INDEX_MIN; + + if (power_index == iwm->conf.power_index) + return 0; + + iwm->conf.power_index = power_index; + + return iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX, + CFG_POWER_INDEX, iwm->conf.power_index); +} + +int iwm_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *netdev, + struct cfg80211_pmksa *pmksa) +{ + struct iwm_priv *iwm = wiphy_to_iwm(wiphy); + + return iwm_send_pmkid_update(iwm, pmksa, IWM_CMD_PMKID_ADD); +} + +int iwm_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *netdev, + struct cfg80211_pmksa *pmksa) +{ + struct iwm_priv *iwm = wiphy_to_iwm(wiphy); + + return iwm_send_pmkid_update(iwm, pmksa, IWM_CMD_PMKID_DEL); +} + +int iwm_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev) +{ + struct iwm_priv *iwm = wiphy_to_iwm(wiphy); + struct cfg80211_pmksa pmksa; + + memset(&pmksa, 0, sizeof(struct cfg80211_pmksa)); + + return iwm_send_pmkid_update(iwm, &pmksa, IWM_CMD_PMKID_FLUSH); +} + + +static struct cfg80211_ops iwm_cfg80211_ops = { + .change_virtual_intf = iwm_cfg80211_change_iface, + .add_key = iwm_cfg80211_add_key, + .get_key = iwm_cfg80211_get_key, + .del_key = iwm_cfg80211_del_key, + .set_default_key = iwm_cfg80211_set_default_key, + .get_station = iwm_cfg80211_get_station, + .scan = iwm_cfg80211_scan, + .set_wiphy_params = iwm_cfg80211_set_wiphy_params, + .connect = iwm_cfg80211_connect, + .disconnect = iwm_cfg80211_disconnect, + .join_ibss = iwm_cfg80211_join_ibss, + .leave_ibss = iwm_cfg80211_leave_ibss, + .set_tx_power = iwm_cfg80211_set_txpower, + .get_tx_power = iwm_cfg80211_get_txpower, + .set_power_mgmt = iwm_cfg80211_set_power_mgmt, + .set_pmksa = iwm_cfg80211_set_pmksa, + .del_pmksa = iwm_cfg80211_del_pmksa, + .flush_pmksa = iwm_cfg80211_flush_pmksa, +}; + +static const u32 cipher_suites[] = { + WLAN_CIPHER_SUITE_WEP40, + WLAN_CIPHER_SUITE_WEP104, + WLAN_CIPHER_SUITE_TKIP, + WLAN_CIPHER_SUITE_CCMP, +}; + +struct wireless_dev *iwm_wdev_alloc(int sizeof_bus, struct device *dev) +{ + int ret = 0; + struct wireless_dev *wdev; + + /* + * We're trying to have the following memory + * layout: + * + * +-------------------------+ + * | struct wiphy | + * +-------------------------+ + * | struct iwm_priv | + * +-------------------------+ + * | bus private data | + * | (e.g. iwm_priv_sdio) | + * +-------------------------+ + * + */ + + wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL); + if (!wdev) { + dev_err(dev, "Couldn't allocate wireless device\n"); + return ERR_PTR(-ENOMEM); + } + + wdev->wiphy = wiphy_new(&iwm_cfg80211_ops, + sizeof(struct iwm_priv) + sizeof_bus); + if (!wdev->wiphy) { + dev_err(dev, "Couldn't allocate wiphy device\n"); + ret = -ENOMEM; + goto out_err_new; + } + + set_wiphy_dev(wdev->wiphy, dev); + wdev->wiphy->max_scan_ssids = UMAC_WIFI_IF_PROBE_OPTION_MAX; + wdev->wiphy->max_num_pmkids = UMAC_MAX_NUM_PMKIDS; + wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_ADHOC); + wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &iwm_band_2ghz; + wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &iwm_band_5ghz; + wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; + + wdev->wiphy->cipher_suites = cipher_suites; + wdev->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); + + ret = wiphy_register(wdev->wiphy); + if (ret < 0) { + dev_err(dev, "Couldn't register wiphy device\n"); + goto out_err_register; + } + + return wdev; + + out_err_register: + wiphy_free(wdev->wiphy); + + out_err_new: + kfree(wdev); + + return ERR_PTR(ret); +} + +void iwm_wdev_free(struct iwm_priv *iwm) +{ + struct wireless_dev *wdev = iwm_to_wdev(iwm); + + if (!wdev) + return; + + wiphy_unregister(wdev->wiphy); + wiphy_free(wdev->wiphy); + kfree(wdev); +} diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.h b/drivers/net/wireless/iwmc3200wifi/cfg80211.h new file mode 100644 index 0000000..56a3414 --- /dev/null +++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.h @@ -0,0 +1,31 @@ +/* + * Intel Wireless Multicomm 3200 WiFi driver + * + * Copyright (C) 2009 Intel Corporation + * Samuel Ortiz + * Zhu Yi + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#ifndef __IWM_CFG80211_H__ +#define __IWM_CFG80211_H__ + +int iwm_cfg80211_inform_bss(struct iwm_priv *iwm); +struct wireless_dev *iwm_wdev_alloc(int sizeof_bus, struct device *dev); +void iwm_wdev_free(struct iwm_priv *iwm); + +#endif diff --git a/drivers/net/wireless/iwmc3200wifi/commands.c b/drivers/net/wireless/iwmc3200wifi/commands.c new file mode 100644 index 0000000..1e41ad0 --- /dev/null +++ b/drivers/net/wireless/iwmc3200wifi/commands.c @@ -0,0 +1,994 @@ +/* + * Intel Wireless Multicomm 3200 WiFi driver + * + * Copyright (C) 2009 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Intel Corporation + * Samuel Ortiz + * Zhu Yi + * + */ + +#include +#include +#include +#include +#include + +#include "iwm.h" +#include "bus.h" +#include "hal.h" +#include "umac.h" +#include "commands.h" +#include "debug.h" + +static int iwm_send_lmac_ptrough_cmd(struct iwm_priv *iwm, + u8 lmac_cmd_id, + const void *lmac_payload, + u16 lmac_payload_size, + u8 resp) +{ + struct iwm_udma_wifi_cmd udma_cmd = UDMA_LMAC_INIT; + struct iwm_umac_cmd umac_cmd; + struct iwm_lmac_cmd lmac_cmd; + + lmac_cmd.id = lmac_cmd_id; + + umac_cmd.id = UMAC_CMD_OPCODE_WIFI_PASS_THROUGH; + umac_cmd.resp = resp; + + return iwm_hal_send_host_cmd(iwm, &udma_cmd, &umac_cmd, &lmac_cmd, + lmac_payload, lmac_payload_size); +} + +int iwm_send_wifi_if_cmd(struct iwm_priv *iwm, void *payload, u16 payload_size, + bool resp) +{ + struct iwm_umac_wifi_if *hdr = (struct iwm_umac_wifi_if *)payload; + struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT; + struct iwm_umac_cmd umac_cmd; + int ret; + u8 oid = hdr->oid; + + if (!test_bit(IWM_STATUS_READY, &iwm->status)) { + IWM_ERR(iwm, "Interface is not ready yet"); + return -EAGAIN; + } + + umac_cmd.id = UMAC_CMD_OPCODE_WIFI_IF_WRAPPER; + umac_cmd.resp = resp; + + ret = iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd, + payload, payload_size); + + if (resp) { + ret = wait_event_interruptible_timeout(iwm->wifi_ntfy_queue, + test_and_clear_bit(oid, &iwm->wifi_ntfy[0]), + 3 * HZ); + + return ret ? 0 : -EBUSY; + } + + return ret; +} + +static int modparam_wiwi = COEX_MODE_CM; +module_param_named(wiwi, modparam_wiwi, int, 0644); +MODULE_PARM_DESC(wiwi, "Wifi-WiMAX coexistence: 1=SA, 2=XOR, 3=CM (default)"); + +static struct coex_event iwm_sta_xor_prio_tbl[COEX_EVENTS_NUM] = +{ + {4, 3, 0, COEX_UNASSOC_IDLE_FLAGS}, + {4, 3, 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS}, + {4, 3, 0, COEX_UNASSOC_AUTO_SCAN_FLAGS}, + {4, 3, 0, COEX_CALIBRATION_FLAGS}, + {4, 3, 0, COEX_PERIODIC_CALIBRATION_FLAGS}, + {4, 3, 0, COEX_CONNECTION_ESTAB_FLAGS}, + {4, 3, 0, COEX_ASSOCIATED_IDLE_FLAGS}, + {4, 3, 0, COEX_ASSOC_MANUAL_SCAN_FLAGS}, + {4, 3, 0, COEX_ASSOC_AUTO_SCAN_FLAGS}, + {4, 3, 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS}, + {6, 3, 0, COEX_XOR_RF_ON_FLAGS}, + {4, 3, 0, COEX_RF_OFF_FLAGS}, + {6, 6, 0, COEX_STAND_ALONE_DEBUG_FLAGS}, + {4, 3, 0, COEX_IPAN_ASSOC_LEVEL_FLAGS}, + {4, 3, 0, COEX_RSRVD1_FLAGS}, + {4, 3, 0, COEX_RSRVD2_FLAGS} +}; + +static struct coex_event iwm_sta_cm_prio_tbl[COEX_EVENTS_NUM] = +{ + {1, 1, 0, COEX_UNASSOC_IDLE_FLAGS}, + {4, 4, 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS}, + {3, 3, 0, COEX_UNASSOC_AUTO_SCAN_FLAGS}, + {6, 6, 0, COEX_CALIBRATION_FLAGS}, + {3, 3, 0, COEX_PERIODIC_CALIBRATION_FLAGS}, + {6, 5, 0, COEX_CONNECTION_ESTAB_FLAGS}, + {4, 4, 0, COEX_ASSOCIATED_IDLE_FLAGS}, + {4, 4, 0, COEX_ASSOC_MANUAL_SCAN_FLAGS}, + {4, 4, 0, COEX_ASSOC_AUTO_SCAN_FLAGS}, + {4, 4, 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS}, + {1, 1, 0, COEX_RF_ON_FLAGS}, + {1, 1, 0, COEX_RF_OFF_FLAGS}, + {7, 7, 0, COEX_STAND_ALONE_DEBUG_FLAGS}, + {5, 4, 0, COEX_IPAN_ASSOC_LEVEL_FLAGS}, + {1, 1, 0, COEX_RSRVD1_FLAGS}, + {1, 1, 0, COEX_RSRVD2_FLAGS} +}; + +int iwm_send_prio_table(struct iwm_priv *iwm) +{ + struct iwm_coex_prio_table_cmd coex_table_cmd; + u32 coex_enabled, mode_enabled; + + memset(&coex_table_cmd, 0, sizeof(struct iwm_coex_prio_table_cmd)); + + coex_table_cmd.flags = COEX_FLAGS_STA_TABLE_VALID_MSK; + + switch (modparam_wiwi) { + case COEX_MODE_XOR: + case COEX_MODE_CM: + coex_enabled = 1; + break; + default: + coex_enabled = 0; + break; + } + + switch (iwm->conf.mode) { + case UMAC_MODE_BSS: + case UMAC_MODE_IBSS: + mode_enabled = 1; + break; + default: + mode_enabled = 0; + break; + } + + if (coex_enabled && mode_enabled) { + coex_table_cmd.flags |= COEX_FLAGS_COEX_ENABLE_MSK | + COEX_FLAGS_ASSOC_WAKEUP_UMASK_MSK | + COEX_FLAGS_UNASSOC_WAKEUP_UMASK_MSK; + + switch (modparam_wiwi) { + case COEX_MODE_XOR: + memcpy(coex_table_cmd.sta_prio, iwm_sta_xor_prio_tbl, + sizeof(iwm_sta_xor_prio_tbl)); + break; + case COEX_MODE_CM: + memcpy(coex_table_cmd.sta_prio, iwm_sta_cm_prio_tbl, + sizeof(iwm_sta_cm_prio_tbl)); + break; + default: + IWM_ERR(iwm, "Invalid coex_mode 0x%x\n", + modparam_wiwi); + break; + } + } else + IWM_WARN(iwm, "coexistense disabled\n"); + + return iwm_send_lmac_ptrough_cmd(iwm, COEX_PRIORITY_TABLE_CMD, + &coex_table_cmd, + sizeof(struct iwm_coex_prio_table_cmd), 0); +} + +int iwm_send_init_calib_cfg(struct iwm_priv *iwm, u8 calib_requested) +{ + struct iwm_lmac_cal_cfg_cmd cal_cfg_cmd; + + memset(&cal_cfg_cmd, 0, sizeof(struct iwm_lmac_cal_cfg_cmd)); + + cal_cfg_cmd.ucode_cfg.init.enable = cpu_to_le32(calib_requested); + cal_cfg_cmd.ucode_cfg.init.start = cpu_to_le32(calib_requested); + cal_cfg_cmd.ucode_cfg.init.send_res = cpu_to_le32(calib_requested); + cal_cfg_cmd.ucode_cfg.flags = + cpu_to_le32(CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_AFTER_MSK); + + return iwm_send_lmac_ptrough_cmd(iwm, CALIBRATION_CFG_CMD, &cal_cfg_cmd, + sizeof(struct iwm_lmac_cal_cfg_cmd), 1); +} + +int iwm_send_periodic_calib_cfg(struct iwm_priv *iwm, u8 calib_requested) +{ + struct iwm_lmac_cal_cfg_cmd cal_cfg_cmd; + + memset(&cal_cfg_cmd, 0, sizeof(struct iwm_lmac_cal_cfg_cmd)); + + cal_cfg_cmd.ucode_cfg.periodic.enable = cpu_to_le32(calib_requested); + cal_cfg_cmd.ucode_cfg.periodic.start = cpu_to_le32(calib_requested); + + return iwm_send_lmac_ptrough_cmd(iwm, CALIBRATION_CFG_CMD, &cal_cfg_cmd, + sizeof(struct iwm_lmac_cal_cfg_cmd), 0); +} + +int iwm_store_rxiq_calib_result(struct iwm_priv *iwm) +{ + struct iwm_calib_rxiq *rxiq; + u8 *eeprom_rxiq = iwm_eeprom_access(iwm, IWM_EEPROM_CALIB_RXIQ); + int grplen = sizeof(struct iwm_calib_rxiq_group); + + rxiq = kzalloc(sizeof(struct iwm_calib_rxiq), GFP_KERNEL); + if (!rxiq) { + IWM_ERR(iwm, "Couldn't alloc memory for RX IQ\n"); + return -ENOMEM; + } + + eeprom_rxiq = iwm_eeprom_access(iwm, IWM_EEPROM_CALIB_RXIQ); + if (IS_ERR(eeprom_rxiq)) { + IWM_ERR(iwm, "Couldn't access EEPROM RX IQ entry\n"); + kfree(rxiq); + return PTR_ERR(eeprom_rxiq); + } + + iwm->calib_res[SHILOH_PHY_CALIBRATE_RX_IQ_CMD].buf = (u8 *)rxiq; + iwm->calib_res[SHILOH_PHY_CALIBRATE_RX_IQ_CMD].size = sizeof(*rxiq); + + rxiq->hdr.opcode = SHILOH_PHY_CALIBRATE_RX_IQ_CMD; + rxiq->hdr.first_grp = 0; + rxiq->hdr.grp_num = 1; + rxiq->hdr.all_data_valid = 1; + + memcpy(&rxiq->group[0], eeprom_rxiq, 4 * grplen); + memcpy(&rxiq->group[4], eeprom_rxiq + 6 * grplen, grplen); + + return 0; +} + +int iwm_send_calib_results(struct iwm_priv *iwm) +{ + int i, ret = 0; + + for (i = PHY_CALIBRATE_OPCODES_NUM; i < CALIBRATION_CMD_NUM; i++) { + if (test_bit(i - PHY_CALIBRATE_OPCODES_NUM, + &iwm->calib_done_map)) { + IWM_DBG_CMD(iwm, DBG, + "Send calibration %d result\n", i); + ret |= iwm_send_lmac_ptrough_cmd(iwm, + REPLY_PHY_CALIBRATION_CMD, + iwm->calib_res[i].buf, + iwm->calib_res[i].size, 0); + + kfree(iwm->calib_res[i].buf); + iwm->calib_res[i].buf = NULL; + iwm->calib_res[i].size = 0; + } + } + + return ret; +} + +int iwm_send_ct_kill_cfg(struct iwm_priv *iwm, u8 entry, u8 exit) +{ + struct iwm_ct_kill_cfg_cmd cmd; + + cmd.entry_threshold = entry; + cmd.exit_threshold = exit; + + return iwm_send_lmac_ptrough_cmd(iwm, REPLY_CT_KILL_CONFIG_CMD, &cmd, + sizeof(struct iwm_ct_kill_cfg_cmd), 0); +} + +int iwm_send_umac_reset(struct iwm_priv *iwm, __le32 reset_flags, bool resp) +{ + struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT; + struct iwm_umac_cmd umac_cmd; + struct iwm_umac_cmd_reset reset; + + reset.flags = reset_flags; + + umac_cmd.id = UMAC_CMD_OPCODE_RESET; + umac_cmd.resp = resp; + + return iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd, &reset, + sizeof(struct iwm_umac_cmd_reset)); +} + +int iwm_umac_set_config_fix(struct iwm_priv *iwm, u16 tbl, u16 key, u32 value) +{ + struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT; + struct iwm_umac_cmd umac_cmd; + struct iwm_umac_cmd_set_param_fix param; + + if ((tbl != UMAC_PARAM_TBL_CFG_FIX) && + (tbl != UMAC_PARAM_TBL_FA_CFG_FIX)) + return -EINVAL; + + umac_cmd.id = UMAC_CMD_OPCODE_SET_PARAM_FIX; + umac_cmd.resp = 0; + + param.tbl = cpu_to_le16(tbl); + param.key = cpu_to_le16(key); + param.value = cpu_to_le32(value); + + return iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd, ¶m, + sizeof(struct iwm_umac_cmd_set_param_fix)); +} + +int iwm_umac_set_config_var(struct iwm_priv *iwm, u16 key, + void *payload, u16 payload_size) +{ + struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT; + struct iwm_umac_cmd umac_cmd; + struct iwm_umac_cmd_set_param_var *param_hdr; + u8 *param; + int ret; + + param = kzalloc(payload_size + + sizeof(struct iwm_umac_cmd_set_param_var), GFP_KERNEL); + if (!param) { + IWM_ERR(iwm, "Couldn't allocate param\n"); + return -ENOMEM; + } + + param_hdr = (struct iwm_umac_cmd_set_param_var *)param; + + umac_cmd.id = UMAC_CMD_OPCODE_SET_PARAM_VAR; + umac_cmd.resp = 0; + + param_hdr->tbl = cpu_to_le16(UMAC_PARAM_TBL_CFG_VAR); + param_hdr->key = cpu_to_le16(key); + param_hdr->len = cpu_to_le16(payload_size); + memcpy(param + sizeof(struct iwm_umac_cmd_set_param_var), + payload, payload_size); + + ret = iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd, param, + sizeof(struct iwm_umac_cmd_set_param_var) + + payload_size); + kfree(param); + + return ret; +} + +int iwm_send_umac_config(struct iwm_priv *iwm, __le32 reset_flags) +{ + int ret; + + /* Use UMAC default values */ + ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX, + CFG_POWER_INDEX, iwm->conf.power_index); + if (ret < 0) + return ret; + + ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_FA_CFG_FIX, + CFG_FRAG_THRESHOLD, + iwm->conf.frag_threshold); + if (ret < 0) + return ret; + + ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX, + CFG_RTS_THRESHOLD, + iwm->conf.rts_threshold); + if (ret < 0) + return ret; + + ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX, + CFG_CTS_TO_SELF, iwm->conf.cts_to_self); + if (ret < 0) + return ret; + + ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX, + CFG_WIRELESS_MODE, + iwm->conf.wireless_mode); + if (ret < 0) + return ret; + + ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX, + CFG_COEX_MODE, modparam_wiwi); + if (ret < 0) + return ret; + + /* + ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX, + CFG_ASSOCIATION_TIMEOUT, + iwm->conf.assoc_timeout); + if (ret < 0) + return ret; + + ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX, + CFG_ROAM_TIMEOUT, + iwm->conf.roam_timeout); + if (ret < 0) + return ret; + + ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX, + CFG_WIRELESS_MODE, + WIRELESS_MODE_11A | WIRELESS_MODE_11G); + if (ret < 0) + return ret; + */ + + ret = iwm_umac_set_config_var(iwm, CFG_NET_ADDR, + iwm_to_ndev(iwm)->dev_addr, ETH_ALEN); + if (ret < 0) + return ret; + + /* UMAC PM static configurations */ + ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX, + CFG_PM_LEGACY_RX_TIMEOUT, 0x12C); + if (ret < 0) + return ret; + + ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX, + CFG_PM_LEGACY_TX_TIMEOUT, 0x15E); + if (ret < 0) + return ret; + + ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX, + CFG_PM_CTRL_FLAGS, 0x1); + if (ret < 0) + return ret; + + ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX, + CFG_PM_KEEP_ALIVE_IN_BEACONS, 0x80); + if (ret < 0) + return ret; + + /* reset UMAC */ + ret = iwm_send_umac_reset(iwm, reset_flags, 1); + if (ret < 0) + return ret; + + ret = iwm_notif_handle(iwm, UMAC_CMD_OPCODE_RESET, IWM_SRC_UMAC, + WAIT_NOTIF_TIMEOUT); + if (ret) { + IWM_ERR(iwm, "Wait for UMAC RESET timeout\n"); + return ret; + } + + return ret; +} + +int iwm_send_packet(struct iwm_priv *iwm, struct sk_buff *skb, int pool_id) +{ + struct iwm_udma_wifi_cmd udma_cmd; + struct iwm_umac_cmd umac_cmd; + struct iwm_tx_info *tx_info = skb_to_tx_info(skb); + + udma_cmd.eop = 1; /* always set eop for non-concatenated Tx */ + udma_cmd.credit_group = pool_id; + udma_cmd.ra_tid = tx_info->sta << 4 | tx_info->tid; + udma_cmd.lmac_offset = 0; + + umac_cmd.id = REPLY_TX; + umac_cmd.color = tx_info->color; + umac_cmd.resp = 0; + + return iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd, + skb->data, skb->len); +} + +static int iwm_target_read(struct iwm_priv *iwm, __le32 address, + u8 *response, u32 resp_size) +{ + struct iwm_udma_nonwifi_cmd target_cmd; + struct iwm_nonwifi_cmd *cmd; + u16 seq_num; + int ret = 0; + + target_cmd.opcode = UMAC_HDI_OUT_OPCODE_READ; + target_cmd.addr = address; + target_cmd.op1_sz = cpu_to_le32(resp_size); + target_cmd.op2 = 0; + target_cmd.handle_by_hw = 0; + target_cmd.resp = 1; + target_cmd.eop = 1; + + ret = iwm_hal_send_target_cmd(iwm, &target_cmd, NULL); + if (ret < 0) { + IWM_ERR(iwm, "Couldn't send READ command\n"); + return ret; + } + + /* When succeding, the send_target routine returns the seq number */ + seq_num = ret; + + ret = wait_event_interruptible_timeout(iwm->nonwifi_queue, + (cmd = iwm_get_pending_nonwifi_cmd(iwm, seq_num, + UMAC_HDI_OUT_OPCODE_READ)) != NULL, + 2 * HZ); + + if (!ret) { + IWM_ERR(iwm, "Didn't receive a target READ answer\n"); + return ret; + } + + memcpy(response, cmd->buf.hdr + sizeof(struct iwm_udma_in_hdr), + resp_size); + + kfree(cmd); + + return 0; +} + +int iwm_read_mac(struct iwm_priv *iwm, u8 *mac) +{ + int ret; + u8 mac_align[ALIGN(ETH_ALEN, 8)]; + + ret = iwm_target_read(iwm, cpu_to_le32(WICO_MAC_ADDRESS_ADDR), + mac_align, sizeof(mac_align)); + if (ret) + return ret; + + if (is_valid_ether_addr(mac_align)) + memcpy(mac, mac_align, ETH_ALEN); + else { + IWM_ERR(iwm, "Invalid EEPROM MAC\n"); + memcpy(mac, iwm->conf.mac_addr, ETH_ALEN); + get_random_bytes(&mac[3], 3); + } + + return 0; +} + +static int iwm_check_profile(struct iwm_priv *iwm) +{ + if (!iwm->umac_profile_active) + return -EAGAIN; + + if (iwm->umac_profile->sec.ucast_cipher != UMAC_CIPHER_TYPE_WEP_40 && + iwm->umac_profile->sec.ucast_cipher != UMAC_CIPHER_TYPE_WEP_104 && + iwm->umac_profile->sec.ucast_cipher != UMAC_CIPHER_TYPE_TKIP && + iwm->umac_profile->sec.ucast_cipher != UMAC_CIPHER_TYPE_CCMP) { + IWM_ERR(iwm, "Wrong unicast cipher: 0x%x\n", + iwm->umac_profile->sec.ucast_cipher); + return -EAGAIN; + } + + if (iwm->umac_profile->sec.mcast_cipher != UMAC_CIPHER_TYPE_WEP_40 && + iwm->umac_profile->sec.mcast_cipher != UMAC_CIPHER_TYPE_WEP_104 && + iwm->umac_profile->sec.mcast_cipher != UMAC_CIPHER_TYPE_TKIP && + iwm->umac_profile->sec.mcast_cipher != UMAC_CIPHER_TYPE_CCMP) { + IWM_ERR(iwm, "Wrong multicast cipher: 0x%x\n", + iwm->umac_profile->sec.mcast_cipher); + return -EAGAIN; + } + + if ((iwm->umac_profile->sec.ucast_cipher == UMAC_CIPHER_TYPE_WEP_40 || + iwm->umac_profile->sec.ucast_cipher == UMAC_CIPHER_TYPE_WEP_104) && + (iwm->umac_profile->sec.ucast_cipher != + iwm->umac_profile->sec.mcast_cipher)) { + IWM_ERR(iwm, "Unicast and multicast ciphers differ for WEP\n"); + } + + return 0; +} + +int iwm_set_tx_key(struct iwm_priv *iwm, u8 key_idx) +{ + struct iwm_umac_tx_key_id tx_key_id; + int ret; + + ret = iwm_check_profile(iwm); + if (ret < 0) + return ret; + + /* UMAC only allows to set default key for WEP and auth type is + * NOT 802.1X or RSNA. */ + if ((iwm->umac_profile->sec.ucast_cipher != UMAC_CIPHER_TYPE_WEP_40 && + iwm->umac_profile->sec.ucast_cipher != UMAC_CIPHER_TYPE_WEP_104) || + iwm->umac_profile->sec.auth_type == UMAC_AUTH_TYPE_8021X || + iwm->umac_profile->sec.auth_type == UMAC_AUTH_TYPE_RSNA_PSK) + return 0; + + tx_key_id.hdr.oid = UMAC_WIFI_IF_CMD_GLOBAL_TX_KEY_ID; + tx_key_id.hdr.buf_size = cpu_to_le16(sizeof(struct iwm_umac_tx_key_id) - + sizeof(struct iwm_umac_wifi_if)); + + tx_key_id.key_idx = key_idx; + + return iwm_send_wifi_if_cmd(iwm, &tx_key_id, sizeof(tx_key_id), 1); +} + +int iwm_set_key(struct iwm_priv *iwm, bool remove, struct iwm_key *key) +{ + int ret = 0; + u8 cmd[64], *sta_addr, *key_data, key_len; + s8 key_idx; + u16 cmd_size = 0; + struct iwm_umac_key_hdr *key_hdr = &key->hdr; + struct iwm_umac_key_wep40 *wep40 = (struct iwm_umac_key_wep40 *)cmd; + struct iwm_umac_key_wep104 *wep104 = (struct iwm_umac_key_wep104 *)cmd; + struct iwm_umac_key_tkip *tkip = (struct iwm_umac_key_tkip *)cmd; + struct iwm_umac_key_ccmp *ccmp = (struct iwm_umac_key_ccmp *)cmd; + + if (!remove) { + ret = iwm_check_profile(iwm); + if (ret < 0) + return ret; + } + + sta_addr = key->hdr.mac; + key_data = key->key; + key_len = key->key_len; + key_idx = key->hdr.key_idx; + + if (!remove) { + u8 auth_type = iwm->umac_profile->sec.auth_type; + + IWM_DBG_WEXT(iwm, DBG, "key_idx:%d\n", key_idx); + IWM_DBG_WEXT(iwm, DBG, "key_len:%d\n", key_len); + IWM_DBG_WEXT(iwm, DBG, "MAC:%pM, idx:%d, multicast:%d\n", + key_hdr->mac, key_hdr->key_idx, key_hdr->multicast); + + IWM_DBG_WEXT(iwm, DBG, "profile: mcast:0x%x, ucast:0x%x\n", + iwm->umac_profile->sec.mcast_cipher, + iwm->umac_profile->sec.ucast_cipher); + IWM_DBG_WEXT(iwm, DBG, "profile: auth_type:0x%x, flags:0x%x\n", + iwm->umac_profile->sec.auth_type, + iwm->umac_profile->sec.flags); + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + wep40->hdr.oid = UMAC_WIFI_IF_CMD_ADD_WEP40_KEY; + wep40->hdr.buf_size = + cpu_to_le16(sizeof(struct iwm_umac_key_wep40) - + sizeof(struct iwm_umac_wifi_if)); + + memcpy(&wep40->key_hdr, key_hdr, + sizeof(struct iwm_umac_key_hdr)); + memcpy(wep40->key, key_data, key_len); + wep40->static_key = + !!((auth_type != UMAC_AUTH_TYPE_8021X) && + (auth_type != UMAC_AUTH_TYPE_RSNA_PSK)); + + cmd_size = sizeof(struct iwm_umac_key_wep40); + break; + + case WLAN_CIPHER_SUITE_WEP104: + wep104->hdr.oid = UMAC_WIFI_IF_CMD_ADD_WEP104_KEY; + wep104->hdr.buf_size = + cpu_to_le16(sizeof(struct iwm_umac_key_wep104) - + sizeof(struct iwm_umac_wifi_if)); + + memcpy(&wep104->key_hdr, key_hdr, + sizeof(struct iwm_umac_key_hdr)); + memcpy(wep104->key, key_data, key_len); + wep104->static_key = + !!((auth_type != UMAC_AUTH_TYPE_8021X) && + (auth_type != UMAC_AUTH_TYPE_RSNA_PSK)); + + cmd_size = sizeof(struct iwm_umac_key_wep104); + break; + + case WLAN_CIPHER_SUITE_CCMP: + key_hdr->key_idx++; + ccmp->hdr.oid = UMAC_WIFI_IF_CMD_ADD_CCMP_KEY; + ccmp->hdr.buf_size = + cpu_to_le16(sizeof(struct iwm_umac_key_ccmp) - + sizeof(struct iwm_umac_wifi_if)); + + memcpy(&ccmp->key_hdr, key_hdr, + sizeof(struct iwm_umac_key_hdr)); + + memcpy(ccmp->key, key_data, key_len); + + if (key->seq_len) + memcpy(ccmp->iv_count, key->seq, key->seq_len); + + cmd_size = sizeof(struct iwm_umac_key_ccmp); + break; + + case WLAN_CIPHER_SUITE_TKIP: + key_hdr->key_idx++; + tkip->hdr.oid = UMAC_WIFI_IF_CMD_ADD_TKIP_KEY; + tkip->hdr.buf_size = + cpu_to_le16(sizeof(struct iwm_umac_key_tkip) - + sizeof(struct iwm_umac_wifi_if)); + + memcpy(&tkip->key_hdr, key_hdr, + sizeof(struct iwm_umac_key_hdr)); + + memcpy(tkip->tkip_key, key_data, IWM_TKIP_KEY_SIZE); + memcpy(tkip->mic_tx_key, key_data + IWM_TKIP_KEY_SIZE, + IWM_TKIP_MIC_SIZE); + memcpy(tkip->mic_rx_key, + key_data + IWM_TKIP_KEY_SIZE + IWM_TKIP_MIC_SIZE, + IWM_TKIP_MIC_SIZE); + + if (key->seq_len) + memcpy(ccmp->iv_count, key->seq, key->seq_len); + + cmd_size = sizeof(struct iwm_umac_key_tkip); + break; + + default: + return -ENOTSUPP; + } + + if ((key->cipher == WLAN_CIPHER_SUITE_TKIP) || + (key->cipher == WLAN_CIPHER_SUITE_CCMP)) + /* + * UGLY_UGLY_UGLY + * Copied HACK from the MWG driver. + * Without it, the key is set before the second + * EAPOL frame is sent, and the latter is thus + * encrypted. + */ + schedule_timeout_interruptible(usecs_to_jiffies(300)); + + ret = iwm_send_wifi_if_cmd(iwm, cmd, cmd_size, 1); + } else { + struct iwm_umac_key_remove key_remove; + + IWM_DBG_WEXT(iwm, ERR, "Removing key_idx:%d\n", key_idx); + + key_remove.hdr.oid = UMAC_WIFI_IF_CMD_REMOVE_KEY; + key_remove.hdr.buf_size = + cpu_to_le16(sizeof(struct iwm_umac_key_remove) - + sizeof(struct iwm_umac_wifi_if)); + memcpy(&key_remove.key_hdr, key_hdr, + sizeof(struct iwm_umac_key_hdr)); + + ret = iwm_send_wifi_if_cmd(iwm, &key_remove, + sizeof(struct iwm_umac_key_remove), + 1); + if (ret) + return ret; + + iwm->keys[key_idx].key_len = 0; + } + + return ret; +} + + +int iwm_send_mlme_profile(struct iwm_priv *iwm) +{ + int ret; + struct iwm_umac_profile profile; + + memcpy(&profile, iwm->umac_profile, sizeof(profile)); + + profile.hdr.oid = UMAC_WIFI_IF_CMD_SET_PROFILE; + profile.hdr.buf_size = cpu_to_le16(sizeof(struct iwm_umac_profile) - + sizeof(struct iwm_umac_wifi_if)); + + ret = iwm_send_wifi_if_cmd(iwm, &profile, sizeof(profile), 1); + if (ret) { + IWM_ERR(iwm, "Send profile command failed\n"); + return ret; + } + + set_bit(IWM_STATUS_SME_CONNECTING, &iwm->status); + return 0; +} + +int iwm_invalidate_mlme_profile(struct iwm_priv *iwm) +{ + struct iwm_umac_invalidate_profile invalid; + int ret; + + invalid.hdr.oid = UMAC_WIFI_IF_CMD_INVALIDATE_PROFILE; + invalid.hdr.buf_size = + cpu_to_le16(sizeof(struct iwm_umac_invalidate_profile) - + sizeof(struct iwm_umac_wifi_if)); + + invalid.reason = WLAN_REASON_UNSPECIFIED; + + ret = iwm_send_wifi_if_cmd(iwm, &invalid, sizeof(invalid), 1); + if (ret) + return ret; + + ret = wait_event_interruptible_timeout(iwm->mlme_queue, + (iwm->umac_profile_active == 0), 5 * HZ); + + return ret ? 0 : -EBUSY; +} + +int iwm_tx_power_trigger(struct iwm_priv *iwm) +{ + struct iwm_umac_pwr_trigger pwr_trigger; + + pwr_trigger.hdr.oid = UMAC_WIFI_IF_CMD_TX_PWR_TRIGGER; + pwr_trigger.hdr.buf_size = + cpu_to_le16(sizeof(struct iwm_umac_pwr_trigger) - + sizeof(struct iwm_umac_wifi_if)); + + + return iwm_send_wifi_if_cmd(iwm, &pwr_trigger, sizeof(pwr_trigger), 1); +} + +int iwm_send_umac_stats_req(struct iwm_priv *iwm, u32 flags) +{ + struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT; + struct iwm_umac_cmd umac_cmd; + struct iwm_umac_cmd_stats_req stats_req; + + stats_req.flags = cpu_to_le32(flags); + + umac_cmd.id = UMAC_CMD_OPCODE_STATISTIC_REQUEST; + umac_cmd.resp = 0; + + return iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd, &stats_req, + sizeof(struct iwm_umac_cmd_stats_req)); +} + +int iwm_send_umac_channel_list(struct iwm_priv *iwm) +{ + struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT; + struct iwm_umac_cmd umac_cmd; + struct iwm_umac_cmd_get_channel_list *ch_list; + int size = sizeof(struct iwm_umac_cmd_get_channel_list) + + sizeof(struct iwm_umac_channel_info) * 4; + int ret; + + ch_list = kzalloc(size, GFP_KERNEL); + if (!ch_list) { + IWM_ERR(iwm, "Couldn't allocate channel list cmd\n"); + return -ENOMEM; + } + + ch_list->ch[0].band = UMAC_BAND_2GHZ; + ch_list->ch[0].type = UMAC_CHANNEL_WIDTH_20MHZ; + ch_list->ch[0].flags = UMAC_CHANNEL_FLAG_VALID; + + ch_list->ch[1].band = UMAC_BAND_5GHZ; + ch_list->ch[1].type = UMAC_CHANNEL_WIDTH_20MHZ; + ch_list->ch[1].flags = UMAC_CHANNEL_FLAG_VALID; + + ch_list->ch[2].band = UMAC_BAND_2GHZ; + ch_list->ch[2].type = UMAC_CHANNEL_WIDTH_20MHZ; + ch_list->ch[2].flags = UMAC_CHANNEL_FLAG_VALID | UMAC_CHANNEL_FLAG_IBSS; + + ch_list->ch[3].band = UMAC_BAND_5GHZ; + ch_list->ch[3].type = UMAC_CHANNEL_WIDTH_20MHZ; + ch_list->ch[3].flags = UMAC_CHANNEL_FLAG_VALID | UMAC_CHANNEL_FLAG_IBSS; + + ch_list->count = cpu_to_le16(4); + + umac_cmd.id = UMAC_CMD_OPCODE_GET_CHAN_INFO_LIST; + umac_cmd.resp = 1; + + ret = iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd, ch_list, size); + + kfree(ch_list); + + return ret; +} + +int iwm_scan_ssids(struct iwm_priv *iwm, struct cfg80211_ssid *ssids, + int ssid_num) +{ + struct iwm_umac_cmd_scan_request req; + int i, ret; + + memset(&req, 0, sizeof(struct iwm_umac_cmd_scan_request)); + + req.hdr.oid = UMAC_WIFI_IF_CMD_SCAN_REQUEST; + req.hdr.buf_size = cpu_to_le16(sizeof(struct iwm_umac_cmd_scan_request) + - sizeof(struct iwm_umac_wifi_if)); + req.type = UMAC_WIFI_IF_SCAN_TYPE_USER; + req.timeout = 2; + req.seq_num = iwm->scan_id; + req.ssid_num = min(ssid_num, UMAC_WIFI_IF_PROBE_OPTION_MAX); + + for (i = 0; i < req.ssid_num; i++) { + memcpy(req.ssids[i].ssid, ssids[i].ssid, ssids[i].ssid_len); + req.ssids[i].ssid_len = ssids[i].ssid_len; + } + + ret = iwm_send_wifi_if_cmd(iwm, &req, sizeof(req), 0); + if (ret) { + IWM_ERR(iwm, "Couldn't send scan request\n"); + return ret; + } + + iwm->scan_id = iwm->scan_id++ % IWM_SCAN_ID_MAX; + + return 0; +} + +int iwm_scan_one_ssid(struct iwm_priv *iwm, u8 *ssid, int ssid_len) +{ + struct cfg80211_ssid one_ssid; + + if (test_and_set_bit(IWM_STATUS_SCANNING, &iwm->status)) + return 0; + + one_ssid.ssid_len = min(ssid_len, IEEE80211_MAX_SSID_LEN); + memcpy(&one_ssid.ssid, ssid, one_ssid.ssid_len); + + return iwm_scan_ssids(iwm, &one_ssid, 1); +} + +int iwm_target_reset(struct iwm_priv *iwm) +{ + struct iwm_udma_nonwifi_cmd target_cmd; + + target_cmd.opcode = UMAC_HDI_OUT_OPCODE_REBOOT; + target_cmd.addr = 0; + target_cmd.op1_sz = 0; + target_cmd.op2 = 0; + target_cmd.handle_by_hw = 0; + target_cmd.resp = 0; + target_cmd.eop = 1; + + return iwm_hal_send_target_cmd(iwm, &target_cmd, NULL); +} + +int iwm_send_umac_stop_resume_tx(struct iwm_priv *iwm, + struct iwm_umac_notif_stop_resume_tx *ntf) +{ + struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT; + struct iwm_umac_cmd umac_cmd; + struct iwm_umac_cmd_stop_resume_tx stp_res_cmd; + struct iwm_sta_info *sta_info; + u8 sta_id = STA_ID_N_COLOR_ID(ntf->sta_id); + int i; + + sta_info = &iwm->sta_table[sta_id]; + if (!sta_info->valid) { + IWM_ERR(iwm, "Invalid STA: %d\n", sta_id); + return -EINVAL; + } + + umac_cmd.id = UMAC_CMD_OPCODE_STOP_RESUME_STA_TX; + umac_cmd.resp = 0; + + stp_res_cmd.flags = ntf->flags; + stp_res_cmd.sta_id = ntf->sta_id; + stp_res_cmd.stop_resume_tid_msk = ntf->stop_resume_tid_msk; + for (i = 0; i < IWM_UMAC_TID_NR; i++) + stp_res_cmd.last_seq_num[i] = + sta_info->tid_info[i].last_seq_num; + + return iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd, &stp_res_cmd, + sizeof(struct iwm_umac_cmd_stop_resume_tx)); + +} + +int iwm_send_pmkid_update(struct iwm_priv *iwm, + struct cfg80211_pmksa *pmksa, u32 command) +{ + struct iwm_umac_pmkid_update update; + int ret; + + memset(&update, 0, sizeof(struct iwm_umac_pmkid_update)); + + update.hdr.oid = UMAC_WIFI_IF_CMD_PMKID_UPDATE; + update.hdr.buf_size = cpu_to_le16(sizeof(struct iwm_umac_pmkid_update) - + sizeof(struct iwm_umac_wifi_if)); + + update.command = cpu_to_le32(command); + if (pmksa->bssid) + memcpy(&update.bssid, pmksa->bssid, ETH_ALEN); + if (pmksa->pmkid) + memcpy(&update.pmkid, pmksa->pmkid, WLAN_PMKID_LEN); + + ret = iwm_send_wifi_if_cmd(iwm, &update, + sizeof(struct iwm_umac_pmkid_update), 0); + if (ret) { + IWM_ERR(iwm, "PMKID update command failed\n"); + return ret; + } + + return 0; +} diff --git a/drivers/net/wireless/iwmc3200wifi/commands.h b/drivers/net/wireless/iwmc3200wifi/commands.h new file mode 100644 index 0000000..3dfd9f0 --- /dev/null +++ b/drivers/net/wireless/iwmc3200wifi/commands.h @@ -0,0 +1,508 @@ +/* + * Intel Wireless Multicomm 3200 WiFi driver + * + * Copyright (C) 2009 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Intel Corporation + * Samuel Ortiz + * Zhu Yi + * + */ + +#ifndef __IWM_COMMANDS_H__ +#define __IWM_COMMANDS_H__ + +#include + +#define IWM_BARKER_REBOOT_NOTIFICATION 0xF +#define IWM_ACK_BARKER_NOTIFICATION 0x10 + +/* UMAC commands */ +#define UMAC_RST_CTRL_FLG_LARC_CLK_EN 0x0001 +#define UMAC_RST_CTRL_FLG_LARC_RESET 0x0002 +#define UMAC_RST_CTRL_FLG_FUNC_RESET 0x0004 +#define UMAC_RST_CTRL_FLG_DEV_RESET 0x0008 +#define UMAC_RST_CTRL_FLG_WIFI_CORE_EN 0x0010 +#define UMAC_RST_CTRL_FLG_WIFI_LINK_EN 0x0040 +#define UMAC_RST_CTRL_FLG_WIFI_MLME_EN 0x0080 +#define UMAC_RST_CTRL_FLG_NVM_RELOAD 0x0100 + +struct iwm_umac_cmd_reset { + __le32 flags; +} __attribute__ ((packed)); + +#define UMAC_PARAM_TBL_ORD_FIX 0x0 +#define UMAC_PARAM_TBL_ORD_VAR 0x1 +#define UMAC_PARAM_TBL_CFG_FIX 0x2 +#define UMAC_PARAM_TBL_CFG_VAR 0x3 +#define UMAC_PARAM_TBL_BSS_TRK 0x4 +#define UMAC_PARAM_TBL_FA_CFG_FIX 0x5 +#define UMAC_PARAM_TBL_STA 0x6 +#define UMAC_PARAM_TBL_CHN 0x7 +#define UMAC_PARAM_TBL_STATISTICS 0x8 + +/* fast access table */ +enum { + CFG_FRAG_THRESHOLD = 0, + CFG_FRAME_RETRY_LIMIT, + CFG_OS_QUEUE_UTIL_TH, + CFG_RX_FILTER, + /* <-- LAST --> */ + FAST_ACCESS_CFG_TBL_FIX_LAST +}; + +/* fixed size table */ +enum { + CFG_POWER_INDEX = 0, + CFG_PM_LEGACY_RX_TIMEOUT, + CFG_PM_LEGACY_TX_TIMEOUT, + CFG_PM_CTRL_FLAGS, + CFG_PM_KEEP_ALIVE_IN_BEACONS, + CFG_BT_ON_THRESHOLD, + CFG_RTS_THRESHOLD, + CFG_CTS_TO_SELF, + CFG_COEX_MODE, + CFG_WIRELESS_MODE, + CFG_ASSOCIATION_TIMEOUT, + CFG_ROAM_TIMEOUT, + CFG_CAPABILITY_SUPPORTED_RATES, + CFG_SCAN_ALLOWED_UNASSOC_FLAGS, + CFG_SCAN_ALLOWED_MAIN_ASSOC_FLAGS, + CFG_SCAN_ALLOWED_PAN_ASSOC_FLAGS, + CFG_SCAN_INTERNAL_PERIODIC_ENABLED, + CFG_SCAN_IMM_INTERNAL_PERIODIC_SCAN_ON_INIT, + CFG_SCAN_DEFAULT_PERIODIC_FREQ_SEC, + CFG_SCAN_NUM_PASSIVE_CHAN_PER_PARTIAL_SCAN, + CFG_TLC_SUPPORTED_TX_HT_RATES, + CFG_TLC_SUPPORTED_TX_RATES, + CFG_TLC_SPATIAL_STREAM_SUPPORTED, + CFG_TLC_RETRY_PER_RATE, + CFG_TLC_RETRY_PER_HT_RATE, + CFG_TLC_FIXED_MCS, + CFG_TLC_CONTROL_FLAGS, + CFG_TLC_SR_MIN_FAIL, + CFG_TLC_SR_MIN_PASS, + CFG_TLC_HT_STAY_IN_COL_PASS_THRESH, + CFG_TLC_HT_STAY_IN_COL_FAIL_THRESH, + CFG_TLC_LEGACY_STAY_IN_COL_PASS_THRESH, + CFG_TLC_LEGACY_STAY_IN_COL_FAIL_THRESH, + CFG_TLC_HT_FLUSH_STATS_PACKETS, + CFG_TLC_LEGACY_FLUSH_STATS_PACKETS, + CFG_TLC_LEGACY_FLUSH_STATS_MS, + CFG_TLC_HT_FLUSH_STATS_MS, + CFG_TLC_STAY_IN_COL_TIME_OUT, + CFG_TLC_AGG_SHORT_LIM, + CFG_TLC_AGG_LONG_LIM, + CFG_TLC_HT_SR_NO_DECREASE, + CFG_TLC_LEGACY_SR_NO_DECREASE, + CFG_TLC_SR_FORCE_DECREASE, + CFG_TLC_SR_ALLOW_INCREASE, + CFG_TLC_AGG_SET_LONG, + CFG_TLC_AUTO_AGGREGATION, + CFG_TLC_AGG_THRESHOLD, + CFG_TLC_TID_LOAD_THRESHOLD, + CFG_TLC_BLOCK_ACK_TIMEOUT, + CFG_TLC_NO_BA_COUNTED_AS_ONE, + CFG_TLC_NUM_BA_STREAMS_ALLOWED, + CFG_TLC_NUM_BA_STREAMS_PRESENT, + CFG_TLC_RENEW_ADDBA_DELAY, + CFG_TLC_NUM_OF_MULTISEC_TO_COUN_LOAD, + CFG_TLC_IS_STABLE_IN_HT, + CFG_TLC_SR_SIC_1ST_FAIL, + CFG_TLC_SR_SIC_1ST_PASS, + CFG_TLC_SR_SIC_TOTAL_FAIL, + CFG_TLC_SR_SIC_TOTAL_PASS, + CFG_RLC_CHAIN_CTRL, + CFG_TRK_TABLE_OP_MODE, + CFG_TRK_TABLE_RSSI_THRESHOLD, + CFG_TX_PWR_TARGET, /* Used By xVT */ + CFG_TX_PWR_LIMIT_USR, + CFG_TX_PWR_LIMIT_BSS, /* 11d limit */ + CFG_TX_PWR_LIMIT_BSS_CONSTRAINT, /* 11h constraint */ + CFG_TX_PWR_MODE, + CFG_MLME_DBG_NOTIF_BLOCK, + CFG_BT_OFF_BECONS_INTERVALS, + CFG_BT_FRAG_DURATION, + CFG_ACTIVE_CHAINS, + CFG_CALIB_CTRL, + CFG_CAPABILITY_SUPPORTED_HT_RATES, + CFG_HT_MAC_PARAM_INFO, + CFG_MIMO_PS_MODE, + CFG_HT_DEFAULT_CAPABILIES_INFO, + CFG_LED_SC_RESOLUTION_FACTOR, + CFG_PTAM_ENERGY_CCK_DET_DEFAULT, + CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_DEFAULT, + CFG_PTAM_CORR40_4_TH_ADD_MIN_DEFAULT, + CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_DEFAULT, + CFG_PTAM_CORR32_4_TH_ADD_MIN_DEFAULT, + CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_DEFAULT, + CFG_PTAM_CORR32_1_TH_ADD_MIN_DEFAULT, + CFG_PTAM_ENERGY_CCK_DET_MIN_VAL, + CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_MIN_VAL, + CFG_PTAM_CORR40_4_TH_ADD_MIN_MIN_VAL, + CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_MIN_VAL, + CFG_PTAM_CORR32_4_TH_ADD_MIN_MIN_VAL, + CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_MIN_VAL, + CFG_PTAM_CORR32_1_TH_ADD_MIN_MIN_VAL, + CFG_PTAM_ENERGY_CCK_DET_MAX_VAL, + CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_MAX_VAL, + CFG_PTAM_CORR40_4_TH_ADD_MIN_MAX_VAL, + CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_MAX_VAL, + CFG_PTAM_CORR32_4_TH_ADD_MIN_MAX_VAL, + CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_MAX_VAL, + CFG_PTAM_CORR32_1_TH_ADD_MIN_MAX_VAL, + CFG_PTAM_ENERGY_CCK_DET_STEP_VAL, + CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_STEP_VAL, + CFG_PTAM_CORR40_4_TH_ADD_MIN_STEP_VAL, + CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_STEP_VAL, + CFG_PTAM_CORR32_4_TH_ADD_MIN_STEP_VAL, + CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_STEP_VAL, + CFG_PTAM_CORR32_1_TH_ADD_MIN_STEP_VAL, + CFG_PTAM_LINK_SENS_FA_OFDM_MAX, + CFG_PTAM_LINK_SENS_FA_OFDM_MIN, + CFG_PTAM_LINK_SENS_FA_CCK_MAX, + CFG_PTAM_LINK_SENS_FA_CCK_MIN, + CFG_PTAM_LINK_SENS_NRG_DIFF, + CFG_PTAM_LINK_SENS_NRG_MARGIN, + CFG_PTAM_LINK_SENS_MAX_NUMBER_OF_TIMES_IN_CCK_NO_FA, + CFG_PTAM_LINK_SENS_AUTO_CORR_MAX_TH_CCK, + CFG_AGG_MGG_TID_LOAD_ADDBA_THRESHOLD, + CFG_AGG_MGG_TID_LOAD_DELBA_THRESHOLD, + CFG_AGG_MGG_ADDBA_BUF_SIZE, + CFG_AGG_MGG_ADDBA_INACTIVE_TIMEOUT, + CFG_AGG_MGG_ADDBA_DEBUG_FLAGS, + CFG_SCAN_PERIODIC_RSSI_HIGH_THRESHOLD, + CFG_SCAN_PERIODIC_COEF_RSSI_HIGH, + CFG_11D_ENABLED, + CFG_11H_FEATURE_FLAGS, + + /* <-- LAST --> */ + CFG_TBL_FIX_LAST +}; + +/* variable size table */ +enum { + CFG_NET_ADDR = 0, + CFG_LED_PATTERN_TABLE, + + /* <-- LAST --> */ + CFG_TBL_VAR_LAST +}; + +struct iwm_umac_cmd_set_param_fix { + __le16 tbl; + __le16 key; + __le32 value; +} __attribute__ ((packed)); + +struct iwm_umac_cmd_set_param_var { + __le16 tbl; + __le16 key; + __le16 len; + __le16 reserved; +} __attribute__ ((packed)); + +struct iwm_umac_cmd_get_param { + __le16 tbl; + __le16 key; +} __attribute__ ((packed)); + +struct iwm_umac_cmd_get_param_resp { + __le16 tbl; + __le16 key; + __le16 len; + __le16 reserved; +} __attribute__ ((packed)); + +struct iwm_umac_cmd_eeprom_proxy_hdr { + __le32 type; + __le32 offset; + __le32 len; +} __attribute__ ((packed)); + +struct iwm_umac_cmd_eeprom_proxy { + struct iwm_umac_cmd_eeprom_proxy_hdr hdr; + u8 buf[0]; +} __attribute__ ((packed)); + +#define IWM_UMAC_CMD_EEPROM_TYPE_READ 0x1 +#define IWM_UMAC_CMD_EEPROM_TYPE_WRITE 0x2 + +#define UMAC_CHANNEL_FLAG_VALID BIT(0) +#define UMAC_CHANNEL_FLAG_IBSS BIT(1) +#define UMAC_CHANNEL_FLAG_ACTIVE BIT(3) +#define UMAC_CHANNEL_FLAG_RADAR BIT(4) +#define UMAC_CHANNEL_FLAG_DFS BIT(7) + +struct iwm_umac_channel_info { + u8 band; + u8 type; + u8 reserved; + u8 flags; + __le32 channels_mask; +} __attribute__ ((packed)); + +struct iwm_umac_cmd_get_channel_list { + __le16 count; + __le16 reserved; + struct iwm_umac_channel_info ch[0]; +} __attribute__ ((packed)); + + +/* UMAC WiFi interface commands */ + +/* Coexistence mode */ +#define COEX_MODE_SA 0x1 +#define COEX_MODE_XOR 0x2 +#define COEX_MODE_CM 0x3 +#define COEX_MODE_MAX 0x4 + +/* Wireless mode */ +#define WIRELESS_MODE_11A 0x1 +#define WIRELESS_MODE_11G 0x2 +#define WIRELESS_MODE_11N 0x4 + +#define UMAC_PROFILE_EX_IE_REQUIRED 0x1 +#define UMAC_PROFILE_QOS_ALLOWED 0x2 + +/* Scanning */ +#define UMAC_WIFI_IF_PROBE_OPTION_MAX 10 + +#define UMAC_WIFI_IF_SCAN_TYPE_USER 0x0 +#define UMAC_WIFI_IF_SCAN_TYPE_UMAC_RESERVED 0x1 +#define UMAC_WIFI_IF_SCAN_TYPE_HOST_PERIODIC 0x2 +#define UMAC_WIFI_IF_SCAN_TYPE_MAX 0x3 + +struct iwm_umac_ssid { + u8 ssid_len; + u8 ssid[IEEE80211_MAX_SSID_LEN]; + u8 reserved[3]; +} __attribute__ ((packed)); + +struct iwm_umac_cmd_scan_request { + struct iwm_umac_wifi_if hdr; + __le32 type; /* UMAC_WIFI_IF_SCAN_TYPE_* */ + u8 ssid_num; + u8 seq_num; + u8 timeout; /* In seconds */ + u8 reserved; + struct iwm_umac_ssid ssids[UMAC_WIFI_IF_PROBE_OPTION_MAX]; +} __attribute__ ((packed)); + +#define UMAC_CIPHER_TYPE_NONE 0xFF +#define UMAC_CIPHER_TYPE_USE_GROUPCAST 0x00 +#define UMAC_CIPHER_TYPE_WEP_40 0x01 +#define UMAC_CIPHER_TYPE_WEP_104 0x02 +#define UMAC_CIPHER_TYPE_TKIP 0x04 +#define UMAC_CIPHER_TYPE_CCMP 0x08 + +/* Supported authentication types - bitmap */ +#define UMAC_AUTH_TYPE_OPEN 0x00 +#define UMAC_AUTH_TYPE_LEGACY_PSK 0x01 +#define UMAC_AUTH_TYPE_8021X 0x02 +#define UMAC_AUTH_TYPE_RSNA_PSK 0x04 + +/* iwm_umac_security.flag is WPA supported -- bits[0:0] */ +#define UMAC_SEC_FLG_WPA_ON_POS 0 +#define UMAC_SEC_FLG_WPA_ON_SEED 1 +#define UMAC_SEC_FLG_WPA_ON_MSK (UMAC_SEC_FLG_WPA_ON_SEED << \ + UMAC_SEC_FLG_WPA_ON_POS) + +/* iwm_umac_security.flag is WPA2 supported -- bits [1:1] */ +#define UMAC_SEC_FLG_RSNA_ON_POS 1 +#define UMAC_SEC_FLG_RSNA_ON_SEED 1 +#define UMAC_SEC_FLG_RSNA_ON_MSK (UMAC_SEC_FLG_RSNA_ON_SEED << \ + UMAC_SEC_FLG_RSNA_ON_POS) + +/* iwm_umac_security.flag is WSC mode on -- bits [2:2] */ +#define UMAC_SEC_FLG_WSC_ON_POS 2 +#define UMAC_SEC_FLG_WSC_ON_SEED 1 +#define UMAC_SEC_FLG_WSC_ON_MSK (UMAC_SEC_FLG_WSC_ON_SEED << \ + UMAC_SEC_FLG_WSC_ON_POS) + + +/* Legacy profile can use only WEP40 and WEP104 for encryption and + * OPEN or PSK for authentication */ +#define UMAC_SEC_FLG_LEGACY_PROFILE 0 + +struct iwm_umac_security { + u8 auth_type; + u8 ucast_cipher; + u8 mcast_cipher; + u8 flags; +} __attribute__ ((packed)); + +struct iwm_umac_ibss { + u8 beacon_interval; /* in millisecond */ + u8 atim; /* in millisecond */ + s8 join_only; + u8 band; + u8 channel; + u8 reserved[3]; +} __attribute__ ((packed)); + +#define UMAC_MODE_BSS 0 +#define UMAC_MODE_IBSS 1 + +#define UMAC_BSSID_MAX 4 + +struct iwm_umac_profile { + struct iwm_umac_wifi_if hdr; + __le32 mode; + struct iwm_umac_ssid ssid; + u8 bssid[UMAC_BSSID_MAX][ETH_ALEN]; + struct iwm_umac_security sec; + struct iwm_umac_ibss ibss; + __le32 channel_2ghz; + __le32 channel_5ghz; + __le16 flags; + u8 wireless_mode; + u8 bss_num; +} __attribute__ ((packed)); + +struct iwm_umac_invalidate_profile { + struct iwm_umac_wifi_if hdr; + u8 reason; + u8 reserved[3]; +} __attribute__ ((packed)); + +/* Encryption key commands */ +struct iwm_umac_key_wep40 { + struct iwm_umac_wifi_if hdr; + struct iwm_umac_key_hdr key_hdr; + u8 key[WLAN_KEY_LEN_WEP40]; + u8 static_key; + u8 reserved[2]; +} __attribute__ ((packed)); + +struct iwm_umac_key_wep104 { + struct iwm_umac_wifi_if hdr; + struct iwm_umac_key_hdr key_hdr; + u8 key[WLAN_KEY_LEN_WEP104]; + u8 static_key; + u8 reserved[2]; +} __attribute__ ((packed)); + +#define IWM_TKIP_KEY_SIZE 16 +#define IWM_TKIP_MIC_SIZE 8 +struct iwm_umac_key_tkip { + struct iwm_umac_wifi_if hdr; + struct iwm_umac_key_hdr key_hdr; + u8 iv_count[6]; + u8 reserved[2]; + u8 tkip_key[IWM_TKIP_KEY_SIZE]; + u8 mic_rx_key[IWM_TKIP_MIC_SIZE]; + u8 mic_tx_key[IWM_TKIP_MIC_SIZE]; +} __attribute__ ((packed)); + +struct iwm_umac_key_ccmp { + struct iwm_umac_wifi_if hdr; + struct iwm_umac_key_hdr key_hdr; + u8 iv_count[6]; + u8 reserved[2]; + u8 key[WLAN_KEY_LEN_CCMP]; +} __attribute__ ((packed)); + +struct iwm_umac_key_remove { + struct iwm_umac_wifi_if hdr; + struct iwm_umac_key_hdr key_hdr; +} __attribute__ ((packed)); + +struct iwm_umac_tx_key_id { + struct iwm_umac_wifi_if hdr; + u8 key_idx; + u8 reserved[3]; +} __attribute__ ((packed)); + +struct iwm_umac_pwr_trigger { + struct iwm_umac_wifi_if hdr; + __le32 reseved; +} __attribute__ ((packed)); + +struct iwm_umac_cmd_stats_req { + __le32 flags; +} __attribute__ ((packed)); + +struct iwm_umac_cmd_stop_resume_tx { + u8 flags; + u8 sta_id; + __le16 stop_resume_tid_msk; + __le16 last_seq_num[IWM_UMAC_TID_NR]; + u16 reserved; +} __attribute__ ((packed)); + +#define IWM_CMD_PMKID_ADD 1 +#define IWM_CMD_PMKID_DEL 2 +#define IWM_CMD_PMKID_FLUSH 3 + +struct iwm_umac_pmkid_update { + struct iwm_umac_wifi_if hdr; + __le32 command; + u8 bssid[ETH_ALEN]; + __le16 reserved; + u8 pmkid[WLAN_PMKID_LEN]; +} __attribute__ ((packed)); + +/* LMAC commands */ +int iwm_read_mac(struct iwm_priv *iwm, u8 *mac); +int iwm_send_prio_table(struct iwm_priv *iwm); +int iwm_send_init_calib_cfg(struct iwm_priv *iwm, u8 calib_requested); +int iwm_send_periodic_calib_cfg(struct iwm_priv *iwm, u8 calib_requested); +int iwm_send_calib_results(struct iwm_priv *iwm); +int iwm_store_rxiq_calib_result(struct iwm_priv *iwm); +int iwm_send_ct_kill_cfg(struct iwm_priv *iwm, u8 entry, u8 exit); + +/* UMAC commands */ +int iwm_send_wifi_if_cmd(struct iwm_priv *iwm, void *payload, u16 payload_size, + bool resp); +int iwm_send_umac_reset(struct iwm_priv *iwm, __le32 reset_flags, bool resp); +int iwm_umac_set_config_fix(struct iwm_priv *iwm, u16 tbl, u16 key, u32 value); +int iwm_umac_set_config_var(struct iwm_priv *iwm, u16 key, + void *payload, u16 payload_size); +int iwm_send_umac_config(struct iwm_priv *iwm, __le32 reset_flags); +int iwm_send_mlme_profile(struct iwm_priv *iwm); +int iwm_invalidate_mlme_profile(struct iwm_priv *iwm); +int iwm_send_packet(struct iwm_priv *iwm, struct sk_buff *skb, int pool_id); +int iwm_set_tx_key(struct iwm_priv *iwm, u8 key_idx); +int iwm_set_key(struct iwm_priv *iwm, bool remove, struct iwm_key *key); +int iwm_tx_power_trigger(struct iwm_priv *iwm); +int iwm_send_umac_stats_req(struct iwm_priv *iwm, u32 flags); +int iwm_send_umac_channel_list(struct iwm_priv *iwm); +int iwm_scan_ssids(struct iwm_priv *iwm, struct cfg80211_ssid *ssids, + int ssid_num); +int iwm_scan_one_ssid(struct iwm_priv *iwm, u8 *ssid, int ssid_len); +int iwm_send_umac_stop_resume_tx(struct iwm_priv *iwm, + struct iwm_umac_notif_stop_resume_tx *ntf); +int iwm_send_pmkid_update(struct iwm_priv *iwm, + struct cfg80211_pmksa *pmksa, u32 command); + +/* UDMA commands */ +int iwm_target_reset(struct iwm_priv *iwm); +#endif diff --git a/drivers/net/wireless/iwmc3200wifi/debug.h b/drivers/net/wireless/iwmc3200wifi/debug.h new file mode 100644 index 0000000..e35c9b6 --- /dev/null +++ b/drivers/net/wireless/iwmc3200wifi/debug.h @@ -0,0 +1,126 @@ +/* + * Intel Wireless Multicomm 3200 WiFi driver + * + * Copyright (C) 2009 Intel Corporation + * Samuel Ortiz + * Zhu Yi + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#ifndef __IWM_DEBUG_H__ +#define __IWM_DEBUG_H__ + +#define IWM_ERR(p, f, a...) dev_err(iwm_to_dev(p), f, ## a) +#define IWM_WARN(p, f, a...) dev_warn(iwm_to_dev(p), f, ## a) +#define IWM_INFO(p, f, a...) dev_info(iwm_to_dev(p), f, ## a) +#define IWM_CRIT(p, f, a...) dev_crit(iwm_to_dev(p), f, ## a) + +#ifdef CONFIG_IWM_DEBUG + +#define IWM_DEBUG_MODULE(i, level, module, f, a...) \ +do { \ + if (unlikely(i->dbg.dbg_module[IWM_DM_##module] >= (IWM_DL_##level)))\ + dev_printk(KERN_INFO, (iwm_to_dev(i)), \ + "%s " f, __func__ , ## a); \ +} while (0) + +#define IWM_HEXDUMP(i, level, module, pref, buf, len) \ +do { \ + if (unlikely(i->dbg.dbg_module[IWM_DM_##module] >= (IWM_DL_##level)))\ + print_hex_dump(KERN_INFO, pref, DUMP_PREFIX_OFFSET, \ + 16, 1, buf, len, 1); \ +} while (0) + +#else + +#define IWM_DEBUG_MODULE(i, level, module, f, a...) +#define IWM_HEXDUMP(i, level, module, pref, buf, len) + +#endif /* CONFIG_IWM_DEBUG */ + +/* Debug modules */ +enum iwm_debug_module_id { + IWM_DM_BOOT = 0, + IWM_DM_FW, + IWM_DM_SDIO, + IWM_DM_NTF, + IWM_DM_RX, + IWM_DM_TX, + IWM_DM_MLME, + IWM_DM_CMD, + IWM_DM_WEXT, + __IWM_DM_NR, +}; +#define IWM_DM_DEFAULT 0 + +#define IWM_DBG_BOOT(i, l, f, a...) IWM_DEBUG_MODULE(i, l, BOOT, f, ## a) +#define IWM_DBG_FW(i, l, f, a...) IWM_DEBUG_MODULE(i, l, FW, f, ## a) +#define IWM_DBG_SDIO(i, l, f, a...) IWM_DEBUG_MODULE(i, l, SDIO, f, ## a) +#define IWM_DBG_NTF(i, l, f, a...) IWM_DEBUG_MODULE(i, l, NTF, f, ## a) +#define IWM_DBG_RX(i, l, f, a...) IWM_DEBUG_MODULE(i, l, RX, f, ## a) +#define IWM_DBG_TX(i, l, f, a...) IWM_DEBUG_MODULE(i, l, TX, f, ## a) +#define IWM_DBG_MLME(i, l, f, a...) IWM_DEBUG_MODULE(i, l, MLME, f, ## a) +#define IWM_DBG_CMD(i, l, f, a...) IWM_DEBUG_MODULE(i, l, CMD, f, ## a) +#define IWM_DBG_WEXT(i, l, f, a...) IWM_DEBUG_MODULE(i, l, WEXT, f, ## a) + +/* Debug levels */ +enum iwm_debug_level { + IWM_DL_NONE = 0, + IWM_DL_ERR, + IWM_DL_WARN, + IWM_DL_INFO, + IWM_DL_DBG, +}; +#define IWM_DL_DEFAULT IWM_DL_ERR + +struct iwm_debugfs { + struct iwm_priv *iwm; + struct dentry *rootdir; + struct dentry *devdir; + struct dentry *dbgdir; + struct dentry *txdir; + struct dentry *rxdir; + struct dentry *busdir; + + u32 dbg_level; + struct dentry *dbg_level_dentry; + + unsigned long dbg_modules; + struct dentry *dbg_modules_dentry; + + u8 dbg_module[__IWM_DM_NR]; + struct dentry *dbg_module_dentries[__IWM_DM_NR]; + + struct dentry *txq_dentry; + struct dentry *tx_credit_dentry; + struct dentry *rx_ticket_dentry; + + struct dentry *fw_err_dentry; +}; + +#ifdef CONFIG_IWM_DEBUG +int iwm_debugfs_init(struct iwm_priv *iwm); +void iwm_debugfs_exit(struct iwm_priv *iwm); +#else +static inline int iwm_debugfs_init(struct iwm_priv *iwm) +{ + return 0; +} +static inline void iwm_debugfs_exit(struct iwm_priv *iwm) {} +#endif + +#endif diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c new file mode 100644 index 0000000..c29c994 --- /dev/null +++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c @@ -0,0 +1,573 @@ +/* + * Intel Wireless Multicomm 3200 WiFi driver + * + * Copyright (C) 2009 Intel Corporation + * Samuel Ortiz + * Zhu Yi + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include +#include +#include + +#include "iwm.h" +#include "bus.h" +#include "rx.h" +#include "debug.h" + +static struct { + u8 id; + char *name; +} iwm_debug_module[__IWM_DM_NR] = { + {IWM_DM_BOOT, "boot"}, + {IWM_DM_FW, "fw"}, + {IWM_DM_SDIO, "sdio"}, + {IWM_DM_NTF, "ntf"}, + {IWM_DM_RX, "rx"}, + {IWM_DM_TX, "tx"}, + {IWM_DM_MLME, "mlme"}, + {IWM_DM_CMD, "cmd"}, + {IWM_DM_WEXT, "wext"}, +}; + +#define add_dbg_module(dbg, name, id, initlevel) \ +do { \ + struct dentry *d; \ + dbg.dbg_module[id] = (initlevel); \ + d = debugfs_create_x8(name, 0600, dbg.dbgdir, \ + &(dbg.dbg_module[id])); \ + if (!IS_ERR(d)) \ + dbg.dbg_module_dentries[id] = d; \ +} while (0) + +static int iwm_debugfs_u32_read(void *data, u64 *val) +{ + struct iwm_priv *iwm = data; + + *val = iwm->dbg.dbg_level; + return 0; +} + +static int iwm_debugfs_dbg_level_write(void *data, u64 val) +{ + struct iwm_priv *iwm = data; + int i; + + iwm->dbg.dbg_level = val; + + for (i = 0; i < __IWM_DM_NR; i++) + iwm->dbg.dbg_module[i] = val; + + return 0; +} +DEFINE_SIMPLE_ATTRIBUTE(fops_iwm_dbg_level, + iwm_debugfs_u32_read, iwm_debugfs_dbg_level_write, + "%llu\n"); + +static int iwm_debugfs_dbg_modules_write(void *data, u64 val) +{ + struct iwm_priv *iwm = data; + int i, bit; + + iwm->dbg.dbg_modules = val; + + for (i = 0; i < __IWM_DM_NR; i++) + iwm->dbg.dbg_module[i] = 0; + + for_each_set_bit(bit, &iwm->dbg.dbg_modules, __IWM_DM_NR) + iwm->dbg.dbg_module[bit] = iwm->dbg.dbg_level; + + return 0; +} +DEFINE_SIMPLE_ATTRIBUTE(fops_iwm_dbg_modules, + iwm_debugfs_u32_read, iwm_debugfs_dbg_modules_write, + "%llu\n"); + +static int iwm_generic_open(struct inode *inode, struct file *filp) +{ + filp->private_data = inode->i_private; + return 0; +} + + +static ssize_t iwm_debugfs_txq_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct iwm_priv *iwm = filp->private_data; + char *buf; + int i, buf_len = 4096; + size_t len = 0; + ssize_t ret; + + if (*ppos != 0) + return 0; + if (count < sizeof(buf)) + return -ENOSPC; + + buf = kzalloc(buf_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + for (i = 0; i < IWM_TX_QUEUES; i++) { + struct iwm_tx_queue *txq = &iwm->txq[i]; + struct sk_buff *skb; + int j; + unsigned long flags; + + spin_lock_irqsave(&txq->queue.lock, flags); + + skb = (struct sk_buff *)&txq->queue; + + len += snprintf(buf + len, buf_len - len, "TXQ #%d\n", i); + len += snprintf(buf + len, buf_len - len, "\tStopped: %d\n", + __netif_subqueue_stopped(iwm_to_ndev(iwm), + txq->id)); + len += snprintf(buf + len, buf_len - len, "\tConcat count:%d\n", + txq->concat_count); + len += snprintf(buf + len, buf_len - len, "\tQueue len: %d\n", + skb_queue_len(&txq->queue)); + for (j = 0; j < skb_queue_len(&txq->queue); j++) { + struct iwm_tx_info *tx_info; + + skb = skb->next; + tx_info = skb_to_tx_info(skb); + + len += snprintf(buf + len, buf_len - len, + "\tSKB #%d\n", j); + len += snprintf(buf + len, buf_len - len, + "\t\tsta: %d\n", tx_info->sta); + len += snprintf(buf + len, buf_len - len, + "\t\tcolor: %d\n", tx_info->color); + len += snprintf(buf + len, buf_len - len, + "\t\ttid: %d\n", tx_info->tid); + } + + spin_unlock_irqrestore(&txq->queue.lock, flags); + + spin_lock_irqsave(&txq->stopped_queue.lock, flags); + + len += snprintf(buf + len, buf_len - len, + "\tStopped Queue len: %d\n", + skb_queue_len(&txq->stopped_queue)); + for (j = 0; j < skb_queue_len(&txq->stopped_queue); j++) { + struct iwm_tx_info *tx_info; + + skb = skb->next; + tx_info = skb_to_tx_info(skb); + + len += snprintf(buf + len, buf_len - len, + "\tSKB #%d\n", j); + len += snprintf(buf + len, buf_len - len, + "\t\tsta: %d\n", tx_info->sta); + len += snprintf(buf + len, buf_len - len, + "\t\tcolor: %d\n", tx_info->color); + len += snprintf(buf + len, buf_len - len, + "\t\ttid: %d\n", tx_info->tid); + } + + spin_unlock_irqrestore(&txq->stopped_queue.lock, flags); + } + + ret = simple_read_from_buffer(buffer, len, ppos, buf, buf_len); + kfree(buf); + + return ret; +} + +static ssize_t iwm_debugfs_tx_credit_read(struct file *filp, + char __user *buffer, + size_t count, loff_t *ppos) +{ + struct iwm_priv *iwm = filp->private_data; + struct iwm_tx_credit *credit = &iwm->tx_credit; + char *buf; + int i, buf_len = 4096; + size_t len = 0; + ssize_t ret; + + if (*ppos != 0) + return 0; + if (count < sizeof(buf)) + return -ENOSPC; + + buf = kzalloc(buf_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + len += snprintf(buf + len, buf_len - len, + "NR pools: %d\n", credit->pool_nr); + len += snprintf(buf + len, buf_len - len, + "pools map: 0x%lx\n", credit->full_pools_map); + + len += snprintf(buf + len, buf_len - len, "\n### POOLS ###\n"); + for (i = 0; i < IWM_MACS_OUT_GROUPS; i++) { + len += snprintf(buf + len, buf_len - len, + "pools entry #%d\n", i); + len += snprintf(buf + len, buf_len - len, + "\tid: %d\n", + credit->pools[i].id); + len += snprintf(buf + len, buf_len - len, + "\tsid: %d\n", + credit->pools[i].sid); + len += snprintf(buf + len, buf_len - len, + "\tmin_pages: %d\n", + credit->pools[i].min_pages); + len += snprintf(buf + len, buf_len - len, + "\tmax_pages: %d\n", + credit->pools[i].max_pages); + len += snprintf(buf + len, buf_len - len, + "\talloc_pages: %d\n", + credit->pools[i].alloc_pages); + len += snprintf(buf + len, buf_len - len, + "\tfreed_pages: %d\n", + credit->pools[i].total_freed_pages); + } + + len += snprintf(buf + len, buf_len - len, "\n### SPOOLS ###\n"); + for (i = 0; i < IWM_MACS_OUT_SGROUPS; i++) { + len += snprintf(buf + len, buf_len - len, + "spools entry #%d\n", i); + len += snprintf(buf + len, buf_len - len, + "\tid: %d\n", + credit->spools[i].id); + len += snprintf(buf + len, buf_len - len, + "\tmax_pages: %d\n", + credit->spools[i].max_pages); + len += snprintf(buf + len, buf_len - len, + "\talloc_pages: %d\n", + credit->spools[i].alloc_pages); + + } + + ret = simple_read_from_buffer(buffer, len, ppos, buf, buf_len); + kfree(buf); + + return ret; +} + +static ssize_t iwm_debugfs_rx_ticket_read(struct file *filp, + char __user *buffer, + size_t count, loff_t *ppos) +{ + struct iwm_priv *iwm = filp->private_data; + struct iwm_rx_ticket_node *ticket, *next; + char *buf; + int buf_len = 4096, i; + size_t len = 0; + ssize_t ret; + + if (*ppos != 0) + return 0; + if (count < sizeof(buf)) + return -ENOSPC; + + buf = kzalloc(buf_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + list_for_each_entry_safe(ticket, next, &iwm->rx_tickets, node) { + len += snprintf(buf + len, buf_len - len, "Ticket #%d\n", + ticket->ticket->id); + len += snprintf(buf + len, buf_len - len, "\taction: 0x%x\n", + ticket->ticket->action); + len += snprintf(buf + len, buf_len - len, "\tflags: 0x%x\n", + ticket->ticket->flags); + } + + for (i = 0; i < IWM_RX_ID_HASH; i++) { + struct iwm_rx_packet *packet, *nxt; + struct list_head *pkt_list = &iwm->rx_packets[i]; + if (!list_empty(pkt_list)) { + len += snprintf(buf + len, buf_len - len, + "Packet hash #%d\n", i); + list_for_each_entry_safe(packet, nxt, pkt_list, node) { + len += snprintf(buf + len, buf_len - len, + "\tPacket id: %d\n", + packet->id); + len += snprintf(buf + len, buf_len - len, + "\tPacket length: %lu\n", + packet->pkt_size); + } + } + } + + ret = simple_read_from_buffer(buffer, len, ppos, buf, buf_len); + kfree(buf); + + return ret; +} + +static ssize_t iwm_debugfs_fw_err_read(struct file *filp, + char __user *buffer, + size_t count, loff_t *ppos) +{ + + struct iwm_priv *iwm = filp->private_data; + char buf[512]; + int buf_len = 512; + size_t len = 0; + + if (*ppos != 0) + return 0; + if (count < sizeof(buf)) + return -ENOSPC; + + if (!iwm->last_fw_err) + return -ENOMEM; + + if (iwm->last_fw_err->line_num == 0) + goto out; + + len += snprintf(buf + len, buf_len - len, "%cMAC FW ERROR:\n", + (le32_to_cpu(iwm->last_fw_err->category) == UMAC_SYS_ERR_CAT_LMAC) + ? 'L' : 'U'); + len += snprintf(buf + len, buf_len - len, + "\tCategory: %d\n", + le32_to_cpu(iwm->last_fw_err->category)); + + len += snprintf(buf + len, buf_len - len, + "\tStatus: 0x%x\n", + le32_to_cpu(iwm->last_fw_err->status)); + + len += snprintf(buf + len, buf_len - len, + "\tPC: 0x%x\n", + le32_to_cpu(iwm->last_fw_err->pc)); + + len += snprintf(buf + len, buf_len - len, + "\tblink1: %d\n", + le32_to_cpu(iwm->last_fw_err->blink1)); + + len += snprintf(buf + len, buf_len - len, + "\tblink2: %d\n", + le32_to_cpu(iwm->last_fw_err->blink2)); + + len += snprintf(buf + len, buf_len - len, + "\tilink1: %d\n", + le32_to_cpu(iwm->last_fw_err->ilink1)); + + len += snprintf(buf + len, buf_len - len, + "\tilink2: %d\n", + le32_to_cpu(iwm->last_fw_err->ilink2)); + + len += snprintf(buf + len, buf_len - len, + "\tData1: 0x%x\n", + le32_to_cpu(iwm->last_fw_err->data1)); + + len += snprintf(buf + len, buf_len - len, + "\tData2: 0x%x\n", + le32_to_cpu(iwm->last_fw_err->data2)); + + len += snprintf(buf + len, buf_len - len, + "\tLine number: %d\n", + le32_to_cpu(iwm->last_fw_err->line_num)); + + len += snprintf(buf + len, buf_len - len, + "\tUMAC status: 0x%x\n", + le32_to_cpu(iwm->last_fw_err->umac_status)); + + len += snprintf(buf + len, buf_len - len, + "\tLMAC status: 0x%x\n", + le32_to_cpu(iwm->last_fw_err->lmac_status)); + + len += snprintf(buf + len, buf_len - len, + "\tSDIO status: 0x%x\n", + le32_to_cpu(iwm->last_fw_err->sdio_status)); + +out: + + return simple_read_from_buffer(buffer, len, ppos, buf, buf_len); +} + +static const struct file_operations iwm_debugfs_txq_fops = { + .owner = THIS_MODULE, + .open = iwm_generic_open, + .read = iwm_debugfs_txq_read, +}; + +static const struct file_operations iwm_debugfs_tx_credit_fops = { + .owner = THIS_MODULE, + .open = iwm_generic_open, + .read = iwm_debugfs_tx_credit_read, +}; + +static const struct file_operations iwm_debugfs_rx_ticket_fops = { + .owner = THIS_MODULE, + .open = iwm_generic_open, + .read = iwm_debugfs_rx_ticket_read, +}; + +static const struct file_operations iwm_debugfs_fw_err_fops = { + .owner = THIS_MODULE, + .open = iwm_generic_open, + .read = iwm_debugfs_fw_err_read, +}; + +int iwm_debugfs_init(struct iwm_priv *iwm) +{ + int i, result; + char devdir[16]; + + iwm->dbg.rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL); + result = PTR_ERR(iwm->dbg.rootdir); + if (!result || IS_ERR(iwm->dbg.rootdir)) { + if (result == -ENODEV) { + IWM_ERR(iwm, "DebugFS (CONFIG_DEBUG_FS) not " + "enabled in kernel config\n"); + result = 0; /* No debugfs support */ + } + IWM_ERR(iwm, "Couldn't create rootdir: %d\n", result); + goto error; + } + + snprintf(devdir, sizeof(devdir), "%s", wiphy_name(iwm_to_wiphy(iwm))); + + iwm->dbg.devdir = debugfs_create_dir(devdir, iwm->dbg.rootdir); + result = PTR_ERR(iwm->dbg.devdir); + if (IS_ERR(iwm->dbg.devdir) && (result != -ENODEV)) { + IWM_ERR(iwm, "Couldn't create devdir: %d\n", result); + goto error; + } + + iwm->dbg.dbgdir = debugfs_create_dir("debug", iwm->dbg.devdir); + result = PTR_ERR(iwm->dbg.dbgdir); + if (IS_ERR(iwm->dbg.dbgdir) && (result != -ENODEV)) { + IWM_ERR(iwm, "Couldn't create dbgdir: %d\n", result); + goto error; + } + + iwm->dbg.rxdir = debugfs_create_dir("rx", iwm->dbg.devdir); + result = PTR_ERR(iwm->dbg.rxdir); + if (IS_ERR(iwm->dbg.rxdir) && (result != -ENODEV)) { + IWM_ERR(iwm, "Couldn't create rx dir: %d\n", result); + goto error; + } + + iwm->dbg.txdir = debugfs_create_dir("tx", iwm->dbg.devdir); + result = PTR_ERR(iwm->dbg.txdir); + if (IS_ERR(iwm->dbg.txdir) && (result != -ENODEV)) { + IWM_ERR(iwm, "Couldn't create tx dir: %d\n", result); + goto error; + } + + iwm->dbg.busdir = debugfs_create_dir("bus", iwm->dbg.devdir); + result = PTR_ERR(iwm->dbg.busdir); + if (IS_ERR(iwm->dbg.busdir) && (result != -ENODEV)) { + IWM_ERR(iwm, "Couldn't create bus dir: %d\n", result); + goto error; + } + + if (iwm->bus_ops->debugfs_init) { + result = iwm->bus_ops->debugfs_init(iwm, iwm->dbg.busdir); + if (result < 0) { + IWM_ERR(iwm, "Couldn't create bus entry: %d\n", result); + goto error; + } + } + + + iwm->dbg.dbg_level = IWM_DL_NONE; + iwm->dbg.dbg_level_dentry = + debugfs_create_file("level", 0200, iwm->dbg.dbgdir, iwm, + &fops_iwm_dbg_level); + result = PTR_ERR(iwm->dbg.dbg_level_dentry); + if (IS_ERR(iwm->dbg.dbg_level_dentry) && (result != -ENODEV)) { + IWM_ERR(iwm, "Couldn't create dbg_level: %d\n", result); + goto error; + } + + + iwm->dbg.dbg_modules = IWM_DM_DEFAULT; + iwm->dbg.dbg_modules_dentry = + debugfs_create_file("modules", 0200, iwm->dbg.dbgdir, iwm, + &fops_iwm_dbg_modules); + result = PTR_ERR(iwm->dbg.dbg_modules_dentry); + if (IS_ERR(iwm->dbg.dbg_modules_dentry) && (result != -ENODEV)) { + IWM_ERR(iwm, "Couldn't create dbg_modules: %d\n", result); + goto error; + } + + for (i = 0; i < __IWM_DM_NR; i++) + add_dbg_module(iwm->dbg, iwm_debug_module[i].name, + iwm_debug_module[i].id, IWM_DL_DEFAULT); + + iwm->dbg.txq_dentry = debugfs_create_file("queues", 0200, + iwm->dbg.txdir, iwm, + &iwm_debugfs_txq_fops); + result = PTR_ERR(iwm->dbg.txq_dentry); + if (IS_ERR(iwm->dbg.txq_dentry) && (result != -ENODEV)) { + IWM_ERR(iwm, "Couldn't create tx queue: %d\n", result); + goto error; + } + + iwm->dbg.tx_credit_dentry = debugfs_create_file("credits", 0200, + iwm->dbg.txdir, iwm, + &iwm_debugfs_tx_credit_fops); + result = PTR_ERR(iwm->dbg.tx_credit_dentry); + if (IS_ERR(iwm->dbg.tx_credit_dentry) && (result != -ENODEV)) { + IWM_ERR(iwm, "Couldn't create tx credit: %d\n", result); + goto error; + } + + iwm->dbg.rx_ticket_dentry = debugfs_create_file("tickets", 0200, + iwm->dbg.rxdir, iwm, + &iwm_debugfs_rx_ticket_fops); + result = PTR_ERR(iwm->dbg.rx_ticket_dentry); + if (IS_ERR(iwm->dbg.rx_ticket_dentry) && (result != -ENODEV)) { + IWM_ERR(iwm, "Couldn't create rx ticket: %d\n", result); + goto error; + } + + iwm->dbg.fw_err_dentry = debugfs_create_file("last_fw_err", 0200, + iwm->dbg.dbgdir, iwm, + &iwm_debugfs_fw_err_fops); + result = PTR_ERR(iwm->dbg.fw_err_dentry); + if (IS_ERR(iwm->dbg.fw_err_dentry) && (result != -ENODEV)) { + IWM_ERR(iwm, "Couldn't create last FW err: %d\n", result); + goto error; + } + + + return 0; + + error: + return result; +} + +void iwm_debugfs_exit(struct iwm_priv *iwm) +{ + int i; + + for (i = 0; i < __IWM_DM_NR; i++) + debugfs_remove(iwm->dbg.dbg_module_dentries[i]); + + debugfs_remove(iwm->dbg.dbg_modules_dentry); + debugfs_remove(iwm->dbg.dbg_level_dentry); + debugfs_remove(iwm->dbg.txq_dentry); + debugfs_remove(iwm->dbg.tx_credit_dentry); + debugfs_remove(iwm->dbg.rx_ticket_dentry); + debugfs_remove(iwm->dbg.fw_err_dentry); + if (iwm->bus_ops->debugfs_exit) + iwm->bus_ops->debugfs_exit(iwm); + + debugfs_remove(iwm->dbg.busdir); + debugfs_remove(iwm->dbg.dbgdir); + debugfs_remove(iwm->dbg.txdir); + debugfs_remove(iwm->dbg.rxdir); + debugfs_remove(iwm->dbg.devdir); + debugfs_remove(iwm->dbg.rootdir); +} diff --git a/drivers/net/wireless/iwmc3200wifi/eeprom.c b/drivers/net/wireless/iwmc3200wifi/eeprom.c new file mode 100644 index 0000000..8091421 --- /dev/null +++ b/drivers/net/wireless/iwmc3200wifi/eeprom.c @@ -0,0 +1,233 @@ +/* + * Intel Wireless Multicomm 3200 WiFi driver + * + * Copyright (C) 2009 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Intel Corporation + * Samuel Ortiz + * Zhu Yi + * + */ + +#include + +#include "iwm.h" +#include "umac.h" +#include "commands.h" +#include "eeprom.h" + +static struct iwm_eeprom_entry eeprom_map[] = { + [IWM_EEPROM_SIG] = + {"Signature", IWM_EEPROM_SIG_OFF, IWM_EEPROM_SIG_LEN}, + + [IWM_EEPROM_VERSION] = + {"Version", IWM_EEPROM_VERSION_OFF, IWM_EEPROM_VERSION_LEN}, + + [IWM_EEPROM_OEM_HW_VERSION] = + {"OEM HW version", IWM_EEPROM_OEM_HW_VERSION_OFF, + IWM_EEPROM_OEM_HW_VERSION_LEN}, + + [IWM_EEPROM_MAC_VERSION] = + {"MAC version", IWM_EEPROM_MAC_VERSION_OFF, IWM_EEPROM_MAC_VERSION_LEN}, + + [IWM_EEPROM_CARD_ID] = + {"Card ID", IWM_EEPROM_CARD_ID_OFF, IWM_EEPROM_CARD_ID_LEN}, + + [IWM_EEPROM_RADIO_CONF] = + {"Radio config", IWM_EEPROM_RADIO_CONF_OFF, IWM_EEPROM_RADIO_CONF_LEN}, + + [IWM_EEPROM_SKU_CAP] = + {"SKU capabilities", IWM_EEPROM_SKU_CAP_OFF, IWM_EEPROM_SKU_CAP_LEN}, + + [IWM_EEPROM_FAT_CHANNELS_CAP] = + {"HT channels capabilities", IWM_EEPROM_FAT_CHANNELS_CAP_OFF, + IWM_EEPROM_FAT_CHANNELS_CAP_LEN}, + + [IWM_EEPROM_CALIB_RXIQ_OFFSET] = + {"RX IQ offset", IWM_EEPROM_CALIB_RXIQ_OFF, IWM_EEPROM_INDIRECT_LEN}, + + [IWM_EEPROM_CALIB_RXIQ] = + {"Calib RX IQ", 0, IWM_EEPROM_CALIB_RXIQ_LEN}, +}; + + +static int iwm_eeprom_read(struct iwm_priv *iwm, u8 eeprom_id) +{ + int ret; + u32 entry_size, chunk_size, data_offset = 0, addr_offset = 0; + u32 addr; + struct iwm_udma_wifi_cmd udma_cmd; + struct iwm_umac_cmd umac_cmd; + struct iwm_umac_cmd_eeprom_proxy eeprom_cmd; + + if (eeprom_id > (IWM_EEPROM_LAST - 1)) + return -EINVAL; + + entry_size = eeprom_map[eeprom_id].length; + + if (eeprom_id >= IWM_EEPROM_INDIRECT_DATA) { + /* indirect data */ + u32 off_id = eeprom_id - IWM_EEPROM_INDIRECT_DATA + + IWM_EEPROM_INDIRECT_OFFSET; + + eeprom_map[eeprom_id].offset = + *(u16 *)(iwm->eeprom + eeprom_map[off_id].offset) << 1; + } + + addr = eeprom_map[eeprom_id].offset; + + udma_cmd.eop = 1; + udma_cmd.credit_group = 0x4; + udma_cmd.ra_tid = UMAC_HDI_ACT_TBL_IDX_HOST_CMD; + udma_cmd.lmac_offset = 0; + + umac_cmd.id = UMAC_CMD_OPCODE_EEPROM_PROXY; + umac_cmd.resp = 1; + + while (entry_size > 0) { + chunk_size = min_t(u32, entry_size, IWM_MAX_EEPROM_DATA_LEN); + + eeprom_cmd.hdr.type = + cpu_to_le32(IWM_UMAC_CMD_EEPROM_TYPE_READ); + eeprom_cmd.hdr.offset = cpu_to_le32(addr + addr_offset); + eeprom_cmd.hdr.len = cpu_to_le32(chunk_size); + + ret = iwm_hal_send_umac_cmd(iwm, &udma_cmd, + &umac_cmd, &eeprom_cmd, + sizeof(struct iwm_umac_cmd_eeprom_proxy)); + if (ret < 0) { + IWM_ERR(iwm, "Couldn't read eeprom\n"); + return ret; + } + + ret = iwm_notif_handle(iwm, UMAC_CMD_OPCODE_EEPROM_PROXY, + IWM_SRC_UMAC, 2*HZ); + if (ret < 0) { + IWM_ERR(iwm, "Did not get any eeprom answer\n"); + return ret; + } + + data_offset += chunk_size; + addr_offset += chunk_size; + entry_size -= chunk_size; + } + + return 0; +} + +u8 *iwm_eeprom_access(struct iwm_priv *iwm, u8 eeprom_id) +{ + if (!iwm->eeprom) + return ERR_PTR(-ENODEV); + + return iwm->eeprom + eeprom_map[eeprom_id].offset; +} + +int iwm_eeprom_fat_channels(struct iwm_priv *iwm) +{ + struct wiphy *wiphy = iwm_to_wiphy(iwm); + struct ieee80211_supported_band *band; + u16 *channels, i; + + channels = (u16 *)iwm_eeprom_access(iwm, IWM_EEPROM_FAT_CHANNELS_CAP); + if (IS_ERR(channels)) + return PTR_ERR(channels); + + band = wiphy->bands[IEEE80211_BAND_2GHZ]; + band->ht_cap.ht_supported = true; + + for (i = 0; i < IWM_EEPROM_FAT_CHANNELS_24; i++) + if (!(channels[i] & IWM_EEPROM_FAT_CHANNEL_ENABLED)) + band->ht_cap.ht_supported = false; + + band = wiphy->bands[IEEE80211_BAND_5GHZ]; + band->ht_cap.ht_supported = true; + for (i = IWM_EEPROM_FAT_CHANNELS_24; i < IWM_EEPROM_FAT_CHANNELS; i++) + if (!(channels[i] & IWM_EEPROM_FAT_CHANNEL_ENABLED)) + band->ht_cap.ht_supported = false; + + return 0; +} + +u32 iwm_eeprom_wireless_mode(struct iwm_priv *iwm) +{ + u16 sku_cap; + u32 wireless_mode = 0; + + sku_cap = *((u16 *)iwm_eeprom_access(iwm, IWM_EEPROM_SKU_CAP)); + + if (sku_cap & IWM_EEPROM_SKU_CAP_BAND_24GHZ) + wireless_mode |= WIRELESS_MODE_11G; + + if (sku_cap & IWM_EEPROM_SKU_CAP_BAND_52GHZ) + wireless_mode |= WIRELESS_MODE_11A; + + if (sku_cap & IWM_EEPROM_SKU_CAP_11N_ENABLE) + wireless_mode |= WIRELESS_MODE_11N; + + return wireless_mode; +} + + +int iwm_eeprom_init(struct iwm_priv *iwm) +{ + int i, ret = 0; + char name[32]; + + iwm->eeprom = kzalloc(IWM_EEPROM_LEN, GFP_KERNEL); + if (!iwm->eeprom) + return -ENOMEM; + + for (i = IWM_EEPROM_FIRST; i < IWM_EEPROM_LAST; i++) { + ret = iwm_eeprom_read(iwm, i); + if (ret < 0) { + IWM_ERR(iwm, "Couldn't read eeprom entry #%d: %s\n", + i, eeprom_map[i].name); + break; + } + } + + IWM_DBG_BOOT(iwm, DBG, "EEPROM dump:\n"); + for (i = IWM_EEPROM_FIRST; i < IWM_EEPROM_LAST; i++) { + memset(name, 0, 32); + sprintf(name, "%s: ", eeprom_map[i].name); + + IWM_HEXDUMP(iwm, DBG, BOOT, name, + iwm->eeprom + eeprom_map[i].offset, + eeprom_map[i].length); + } + + return ret; +} + +void iwm_eeprom_exit(struct iwm_priv *iwm) +{ + kfree(iwm->eeprom); +} diff --git a/drivers/net/wireless/iwmc3200wifi/eeprom.h b/drivers/net/wireless/iwmc3200wifi/eeprom.h new file mode 100644 index 0000000..4e3a3fd --- /dev/null +++ b/drivers/net/wireless/iwmc3200wifi/eeprom.h @@ -0,0 +1,127 @@ +/* + * Intel Wireless Multicomm 3200 WiFi driver + * + * Copyright (C) 2009 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Intel Corporation + * Samuel Ortiz + * Zhu Yi + * + */ + +#ifndef __IWM_EEPROM_H__ +#define __IWM_EEPROM_H__ + +enum { + IWM_EEPROM_SIG = 0, + IWM_EEPROM_FIRST = IWM_EEPROM_SIG, + IWM_EEPROM_VERSION, + IWM_EEPROM_OEM_HW_VERSION, + IWM_EEPROM_MAC_VERSION, + IWM_EEPROM_CARD_ID, + IWM_EEPROM_RADIO_CONF, + IWM_EEPROM_SKU_CAP, + IWM_EEPROM_FAT_CHANNELS_CAP, + + IWM_EEPROM_INDIRECT_OFFSET, + IWM_EEPROM_CALIB_RXIQ_OFFSET = IWM_EEPROM_INDIRECT_OFFSET, + + IWM_EEPROM_INDIRECT_DATA, + IWM_EEPROM_CALIB_RXIQ = IWM_EEPROM_INDIRECT_DATA, + + IWM_EEPROM_LAST, +}; + +#define IWM_EEPROM_SIG_OFF 0x00 +#define IWM_EEPROM_VERSION_OFF (0x54 << 1) +#define IWM_EEPROM_OEM_HW_VERSION_OFF (0x56 << 1) +#define IWM_EEPROM_MAC_VERSION_OFF (0x30 << 1) +#define IWM_EEPROM_CARD_ID_OFF (0x5d << 1) +#define IWM_EEPROM_RADIO_CONF_OFF (0x58 << 1) +#define IWM_EEPROM_SKU_CAP_OFF (0x55 << 1) +#define IWM_EEPROM_CALIB_CONFIG_OFF (0x7c << 1) +#define IWM_EEPROM_FAT_CHANNELS_CAP_OFF (0xde << 1) + +#define IWM_EEPROM_SIG_LEN 4 +#define IWM_EEPROM_VERSION_LEN 2 +#define IWM_EEPROM_OEM_HW_VERSION_LEN 2 +#define IWM_EEPROM_MAC_VERSION_LEN 1 +#define IWM_EEPROM_CARD_ID_LEN 2 +#define IWM_EEPROM_RADIO_CONF_LEN 2 +#define IWM_EEPROM_SKU_CAP_LEN 2 +#define IWM_EEPROM_FAT_CHANNELS_CAP_LEN 40 +#define IWM_EEPROM_INDIRECT_LEN 2 + +#define IWM_MAX_EEPROM_DATA_LEN 240 +#define IWM_EEPROM_LEN 0x800 + +#define IWM_EEPROM_MIN_ALLOWED_VERSION 0x0610 +#define IWM_EEPROM_MAX_ALLOWED_VERSION 0x0700 +#define IWM_EEPROM_CURRENT_VERSION 0x0612 + +#define IWM_EEPROM_SKU_CAP_BAND_24GHZ (1 << 4) +#define IWM_EEPROM_SKU_CAP_BAND_52GHZ (1 << 5) +#define IWM_EEPROM_SKU_CAP_11N_ENABLE (1 << 6) + +#define IWM_EEPROM_FAT_CHANNELS 20 +/* 2.4 gHz FAT primary channels: 1, 2, 3, 4, 5, 6, 7, 8, 9 */ +#define IWM_EEPROM_FAT_CHANNELS_24 9 +/* 5.2 gHz FAT primary channels: 36,44,52,60,100,108,116,124,132,149,157 */ +#define IWM_EEPROM_FAT_CHANNELS_52 11 + +#define IWM_EEPROM_FAT_CHANNEL_ENABLED (1 << 0) + +enum { + IWM_EEPROM_CALIB_CAL_HDR, + IWM_EEPROM_CALIB_TX_POWER, + IWM_EEPROM_CALIB_XTAL, + IWM_EEPROM_CALIB_TEMPERATURE, + IWM_EEPROM_CALIB_RX_BB_FILTER, + IWM_EEPROM_CALIB_RX_IQ, + IWM_EEPROM_CALIB_MAX, +}; + +#define IWM_EEPROM_CALIB_RXIQ_OFF (IWM_EEPROM_CALIB_CONFIG_OFF + \ + (IWM_EEPROM_CALIB_RX_IQ << 1)) +#define IWM_EEPROM_CALIB_RXIQ_LEN sizeof(struct iwm_lmac_calib_rxiq) + +struct iwm_eeprom_entry { + char *name; + u32 offset; + u32 length; +}; + +int iwm_eeprom_init(struct iwm_priv *iwm); +void iwm_eeprom_exit(struct iwm_priv *iwm); +u8 *iwm_eeprom_access(struct iwm_priv *iwm, u8 eeprom_id); +int iwm_eeprom_fat_channels(struct iwm_priv *iwm); +u32 iwm_eeprom_wireless_mode(struct iwm_priv *iwm); + +#endif diff --git a/drivers/net/wireless/iwmc3200wifi/fw.c b/drivers/net/wireless/iwmc3200wifi/fw.c new file mode 100644 index 0000000..4906709 --- /dev/null +++ b/drivers/net/wireless/iwmc3200wifi/fw.c @@ -0,0 +1,416 @@ +/* + * Intel Wireless Multicomm 3200 WiFi driver + * + * Copyright (C) 2009 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Intel Corporation + * Samuel Ortiz + * Zhu Yi + * + */ + +#include +#include + +#include "iwm.h" +#include "bus.h" +#include "hal.h" +#include "umac.h" +#include "debug.h" +#include "fw.h" +#include "commands.h" + +static const char fw_barker[] = "*WESTOPFORNOONE*"; + +/* + * @op_code: Op code we're looking for. + * @index: There can be several instances of the same opcode within + * the firmware. Index specifies which one we're looking for. + */ +static int iwm_fw_op_offset(struct iwm_priv *iwm, const struct firmware *fw, + u16 op_code, u32 index) +{ + int offset = -EINVAL, fw_offset; + u32 op_index = 0; + const u8 *fw_ptr; + struct iwm_fw_hdr_rec *rec; + + fw_offset = 0; + fw_ptr = fw->data; + + /* We first need to look for the firmware barker */ + if (memcmp(fw_ptr, fw_barker, IWM_HDR_BARKER_LEN)) { + IWM_ERR(iwm, "No barker string in this FW\n"); + return -EINVAL; + } + + if (fw->size < IWM_HDR_LEN) { + IWM_ERR(iwm, "FW is too small (%zu)\n", fw->size); + return -EINVAL; + } + + fw_offset += IWM_HDR_BARKER_LEN; + + while (fw_offset < fw->size) { + rec = (struct iwm_fw_hdr_rec *)(fw_ptr + fw_offset); + + IWM_DBG_FW(iwm, DBG, "FW: op_code: 0x%x, len: %d @ 0x%x\n", + rec->op_code, rec->len, fw_offset); + + if (rec->op_code == IWM_HDR_REC_OP_INVALID) { + IWM_DBG_FW(iwm, DBG, "Reached INVALID op code\n"); + break; + } + + if (rec->op_code == op_code) { + if (op_index == index) { + fw_offset += sizeof(struct iwm_fw_hdr_rec); + offset = fw_offset; + goto out; + } + op_index++; + } + + fw_offset += sizeof(struct iwm_fw_hdr_rec) + rec->len; + } + + out: + return offset; +} + +static int iwm_load_firmware_chunk(struct iwm_priv *iwm, + const struct firmware *fw, + struct iwm_fw_img_desc *img_desc) +{ + struct iwm_udma_nonwifi_cmd target_cmd; + u32 chunk_size; + const u8 *chunk_ptr; + int ret = 0; + + IWM_DBG_FW(iwm, INFO, "Loading FW chunk: %d bytes @ 0x%x\n", + img_desc->length, img_desc->address); + + target_cmd.opcode = UMAC_HDI_OUT_OPCODE_WRITE; + target_cmd.handle_by_hw = 1; + target_cmd.op2 = 0; + target_cmd.resp = 0; + target_cmd.eop = 1; + + chunk_size = img_desc->length; + chunk_ptr = fw->data + img_desc->offset; + + while (chunk_size > 0) { + u32 tmp_chunk_size; + + tmp_chunk_size = min_t(u32, chunk_size, + IWM_MAX_NONWIFI_CMD_BUFF_SIZE); + + target_cmd.addr = cpu_to_le32(img_desc->address + + (chunk_ptr - fw->data - img_desc->offset)); + target_cmd.op1_sz = cpu_to_le32(tmp_chunk_size); + + IWM_DBG_FW(iwm, DBG, "\t%d bytes @ 0x%x\n", + tmp_chunk_size, target_cmd.addr); + + ret = iwm_hal_send_target_cmd(iwm, &target_cmd, chunk_ptr); + if (ret < 0) { + IWM_ERR(iwm, "Couldn't load FW chunk\n"); + break; + } + + chunk_size -= tmp_chunk_size; + chunk_ptr += tmp_chunk_size; + } + + return ret; +} +/* + * To load a fw image to the target, we basically go through the + * fw, looking for OP_MEM_DESC records. Once we found one, we + * pass it to iwm_load_firmware_chunk(). + * The OP_MEM_DESC records contain the actuall memory chunk to be + * sent, but also the destination address. + */ +static int iwm_load_img(struct iwm_priv *iwm, const char *img_name) +{ + const struct firmware *fw; + struct iwm_fw_img_desc *img_desc; + struct iwm_fw_img_ver *ver; + int ret = 0, fw_offset; + u32 opcode_idx = 0, build_date; + char *build_tag; + + ret = request_firmware(&fw, img_name, iwm_to_dev(iwm)); + if (ret) { + IWM_ERR(iwm, "Request firmware failed"); + return ret; + } + + IWM_DBG_FW(iwm, INFO, "Start to load FW %s\n", img_name); + + while (1) { + fw_offset = iwm_fw_op_offset(iwm, fw, + IWM_HDR_REC_OP_MEM_DESC, + opcode_idx); + if (fw_offset < 0) + break; + + img_desc = (struct iwm_fw_img_desc *)(fw->data + fw_offset); + ret = iwm_load_firmware_chunk(iwm, fw, img_desc); + if (ret < 0) + goto err_release_fw; + opcode_idx++; + }; + + /* Read firmware version */ + fw_offset = iwm_fw_op_offset(iwm, fw, IWM_HDR_REC_OP_SW_VER, 0); + if (fw_offset < 0) + goto err_release_fw; + + ver = (struct iwm_fw_img_ver *)(fw->data + fw_offset); + + /* Read build tag */ + fw_offset = iwm_fw_op_offset(iwm, fw, IWM_HDR_REC_OP_BUILD_TAG, 0); + if (fw_offset < 0) + goto err_release_fw; + + build_tag = (char *)(fw->data + fw_offset); + + /* Read build date */ + fw_offset = iwm_fw_op_offset(iwm, fw, IWM_HDR_REC_OP_BUILD_DATE, 0); + if (fw_offset < 0) + goto err_release_fw; + + build_date = *(u32 *)(fw->data + fw_offset); + + IWM_INFO(iwm, "%s:\n", img_name); + IWM_INFO(iwm, "\tVersion: %02X.%02X\n", ver->major, ver->minor); + IWM_INFO(iwm, "\tBuild tag: %s\n", build_tag); + IWM_INFO(iwm, "\tBuild date: %x-%x-%x\n", + IWM_BUILD_YEAR(build_date), IWM_BUILD_MONTH(build_date), + IWM_BUILD_DAY(build_date)); + + if (!strcmp(img_name, iwm->bus_ops->umac_name)) + sprintf(iwm->umac_version, "%02X.%02X", + ver->major, ver->minor); + + if (!strcmp(img_name, iwm->bus_ops->lmac_name)) + sprintf(iwm->lmac_version, "%02X.%02X", + ver->major, ver->minor); + + err_release_fw: + release_firmware(fw); + + return ret; +} + +static int iwm_load_umac(struct iwm_priv *iwm) +{ + struct iwm_udma_nonwifi_cmd target_cmd; + int ret; + + ret = iwm_load_img(iwm, iwm->bus_ops->umac_name); + if (ret < 0) + return ret; + + /* We've loaded the UMAC, we can tell the target to jump there */ + target_cmd.opcode = UMAC_HDI_OUT_OPCODE_JUMP; + target_cmd.addr = cpu_to_le32(UMAC_MU_FW_INST_DATA_12_ADDR); + target_cmd.op1_sz = 0; + target_cmd.op2 = 0; + target_cmd.handle_by_hw = 0; + target_cmd.resp = 1 ; + target_cmd.eop = 1; + + ret = iwm_hal_send_target_cmd(iwm, &target_cmd, NULL); + if (ret < 0) + IWM_ERR(iwm, "Couldn't send JMP command\n"); + + return ret; +} + +static int iwm_load_lmac(struct iwm_priv *iwm, const char *img_name) +{ + int ret; + + ret = iwm_load_img(iwm, img_name); + if (ret < 0) + return ret; + + return iwm_send_umac_reset(iwm, + cpu_to_le32(UMAC_RST_CTRL_FLG_LARC_CLK_EN), 0); +} + +static int iwm_init_calib(struct iwm_priv *iwm, unsigned long cfg_bitmap, + unsigned long expected_bitmap, u8 rx_iq_cmd) +{ + /* Read RX IQ calibration result from EEPROM */ + if (test_bit(rx_iq_cmd, &cfg_bitmap)) { + iwm_store_rxiq_calib_result(iwm); + set_bit(PHY_CALIBRATE_RX_IQ_CMD, &iwm->calib_done_map); + } + + iwm_send_prio_table(iwm); + iwm_send_init_calib_cfg(iwm, cfg_bitmap); + + while (iwm->calib_done_map != expected_bitmap) { + if (iwm_notif_handle(iwm, CALIBRATION_RES_NOTIFICATION, + IWM_SRC_LMAC, WAIT_NOTIF_TIMEOUT)) { + IWM_DBG_FW(iwm, DBG, "Initial calibration timeout\n"); + return -ETIMEDOUT; + } + + IWM_DBG_FW(iwm, DBG, "Got calibration result. calib_done_map: " + "0x%lx, expected calibrations: 0x%lx\n", + iwm->calib_done_map, expected_bitmap); + } + + return 0; +} + +/* + * We currently have to load 3 FWs: + * 1) The UMAC (Upper MAC). + * 2) The calibration LMAC (Lower MAC). + * We then send the calibration init command, so that the device can + * run a first calibration round. + * 3) The operational LMAC, which replaces the calibration one when it's + * done with the first calibration round. + * + * Once those 3 FWs have been loaded, we send the periodic calibration + * command, and then the device is available for regular 802.11 operations. + */ +int iwm_load_fw(struct iwm_priv *iwm) +{ + unsigned long init_calib_map, periodic_calib_map; + unsigned long expected_calib_map; + int ret; + + /* We first start downloading the UMAC */ + ret = iwm_load_umac(iwm); + if (ret < 0) { + IWM_ERR(iwm, "UMAC loading failed\n"); + return ret; + } + + /* Handle UMAC_ALIVE notification */ + ret = iwm_notif_handle(iwm, UMAC_NOTIFY_OPCODE_ALIVE, IWM_SRC_UMAC, + WAIT_NOTIF_TIMEOUT); + if (ret) { + IWM_ERR(iwm, "Handle UMAC_ALIVE failed: %d\n", ret); + return ret; + } + + /* UMAC is alive, we can download the calibration LMAC */ + ret = iwm_load_lmac(iwm, iwm->bus_ops->calib_lmac_name); + if (ret) { + IWM_ERR(iwm, "Calibration LMAC loading failed\n"); + return ret; + } + + /* Handle UMAC_INIT_COMPLETE notification */ + ret = iwm_notif_handle(iwm, UMAC_NOTIFY_OPCODE_INIT_COMPLETE, + IWM_SRC_UMAC, WAIT_NOTIF_TIMEOUT); + if (ret) { + IWM_ERR(iwm, "Handle INIT_COMPLETE failed for calibration " + "LMAC: %d\n", ret); + return ret; + } + + /* Read EEPROM data */ + ret = iwm_eeprom_init(iwm); + if (ret < 0) { + IWM_ERR(iwm, "Couldn't init eeprom array\n"); + return ret; + } + + init_calib_map = iwm->conf.calib_map & IWM_CALIB_MAP_INIT_MSK; + expected_calib_map = iwm->conf.expected_calib_map & + IWM_CALIB_MAP_INIT_MSK; + periodic_calib_map = IWM_CALIB_MAP_PER_LMAC(iwm->conf.calib_map); + + ret = iwm_init_calib(iwm, init_calib_map, expected_calib_map, + CALIB_CFG_RX_IQ_IDX); + if (ret < 0) { + /* Let's try the old way */ + ret = iwm_init_calib(iwm, expected_calib_map, + expected_calib_map, + PHY_CALIBRATE_RX_IQ_CMD); + if (ret < 0) { + IWM_ERR(iwm, "Calibration result timeout\n"); + goto out; + } + } + + /* Handle LMAC CALIBRATION_COMPLETE notification */ + ret = iwm_notif_handle(iwm, CALIBRATION_COMPLETE_NOTIFICATION, + IWM_SRC_LMAC, WAIT_NOTIF_TIMEOUT); + if (ret) { + IWM_ERR(iwm, "Wait for CALIBRATION_COMPLETE timeout\n"); + goto out; + } + + IWM_INFO(iwm, "LMAC calibration done: 0x%lx\n", iwm->calib_done_map); + + iwm_send_umac_reset(iwm, cpu_to_le32(UMAC_RST_CTRL_FLG_LARC_RESET), 1); + + ret = iwm_notif_handle(iwm, UMAC_CMD_OPCODE_RESET, IWM_SRC_UMAC, + WAIT_NOTIF_TIMEOUT); + if (ret) { + IWM_ERR(iwm, "Wait for UMAC RESET timeout\n"); + goto out; + } + + /* Download the operational LMAC */ + ret = iwm_load_lmac(iwm, iwm->bus_ops->lmac_name); + if (ret) { + IWM_ERR(iwm, "LMAC loading failed\n"); + goto out; + } + + ret = iwm_notif_handle(iwm, UMAC_NOTIFY_OPCODE_INIT_COMPLETE, + IWM_SRC_UMAC, WAIT_NOTIF_TIMEOUT); + if (ret) { + IWM_ERR(iwm, "Handle INIT_COMPLETE failed for LMAC: %d\n", ret); + goto out; + } + + iwm_send_prio_table(iwm); + iwm_send_calib_results(iwm); + iwm_send_periodic_calib_cfg(iwm, periodic_calib_map); + iwm_send_ct_kill_cfg(iwm, iwm->conf.ct_kill_entry, + iwm->conf.ct_kill_exit); + + return 0; + + out: + iwm_eeprom_exit(iwm); + return ret; +} diff --git a/drivers/net/wireless/iwmc3200wifi/fw.h b/drivers/net/wireless/iwmc3200wifi/fw.h new file mode 100644 index 0000000..c70a3b4 --- /dev/null +++ b/drivers/net/wireless/iwmc3200wifi/fw.h @@ -0,0 +1,100 @@ +/* + * Intel Wireless Multicomm 3200 WiFi driver + * + * Copyright (C) 2009 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Intel Corporation + * Samuel Ortiz + * Zhu Yi + * + */ + +#ifndef __IWM_FW_H__ +#define __IWM_FW_H__ + +/** + * struct iwm_fw_hdr_rec - An iwm firmware image is a + * concatenation of various records. Each of them is + * defined by an ID (aka op code), a length, and the + * actual data. + * @op_code: The record ID, see IWM_HDR_REC_OP_* + * + * @len: The record payload length + * + * @buf: The record payload + */ +struct iwm_fw_hdr_rec { + u16 op_code; + u16 len; + u8 buf[0]; +}; + +/* Header's definitions */ +#define IWM_HDR_LEN (512) +#define IWM_HDR_BARKER_LEN (16) + +/* Header's opcodes */ +#define IWM_HDR_REC_OP_INVALID (0x00) +#define IWM_HDR_REC_OP_BUILD_DATE (0x01) +#define IWM_HDR_REC_OP_BUILD_TAG (0x02) +#define IWM_HDR_REC_OP_SW_VER (0x03) +#define IWM_HDR_REC_OP_HW_SKU (0x04) +#define IWM_HDR_REC_OP_BUILD_OPT (0x05) +#define IWM_HDR_REC_OP_MEM_DESC (0x06) +#define IWM_HDR_REC_USERDEFS (0x07) + +/* Header's records length (in bytes) */ +#define IWM_HDR_REC_LEN_BUILD_DATE (4) +#define IWM_HDR_REC_LEN_BUILD_TAG (64) +#define IWM_HDR_REC_LEN_SW_VER (4) +#define IWM_HDR_REC_LEN_HW_SKU (4) +#define IWM_HDR_REC_LEN_BUILD_OPT (4) +#define IWM_HDR_REC_LEN_MEM_DESC (12) +#define IWM_HDR_REC_LEN_USERDEF (64) + +#define IWM_BUILD_YEAR(date) ((date >> 16) & 0xffff) +#define IWM_BUILD_MONTH(date) ((date >> 8) & 0xff) +#define IWM_BUILD_DAY(date) (date & 0xff) + +struct iwm_fw_img_desc { + u32 offset; + u32 address; + u32 length; +}; + +struct iwm_fw_img_ver { + u8 minor; + u8 major; + u16 reserved; +}; + +int iwm_load_fw(struct iwm_priv *iwm); + +#endif diff --git a/drivers/net/wireless/iwmc3200wifi/hal.c b/drivers/net/wireless/iwmc3200wifi/hal.c new file mode 100644 index 0000000..d13c885 --- /dev/null +++ b/drivers/net/wireless/iwmc3200wifi/hal.c @@ -0,0 +1,466 @@ +/* + * Intel Wireless Multicomm 3200 WiFi driver + * + * Copyright (C) 2009 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Intel Corporation + * Samuel Ortiz + * Zhu Yi + * + */ + +/* + * Hardware Abstraction Layer for iwm. + * + * This file mostly defines an abstraction API for + * sending various commands to the target. + * + * We have 2 types of commands: wifi and non-wifi ones. + * + * - wifi commands: + * They are used for sending LMAC and UMAC commands, + * and thus are the most commonly used ones. + * There are 2 different wifi command types, the regular + * one and the LMAC one. The former is used to send + * UMAC commands (see UMAC_CMD_OPCODE_* from umac.h) + * while the latter is used for sending commands to the + * LMAC. If you look at LMAC commands you'll se that they + * are actually regular iwlwifi target commands encapsulated + * into a special UMAC command called UMAC passthrough. + * This is due to the fact the the host talks exclusively + * to the UMAC and so there needs to be a special UMAC + * command for talking to the LMAC. + * This is how a wifi command is layed out: + * ------------------------ + * | iwm_udma_out_wifi_hdr | + * ------------------------ + * | SW meta_data (32 bits) | + * ------------------------ + * | iwm_dev_cmd_hdr | + * ------------------------ + * | payload | + * | .... | + * + * - non-wifi, or general commands: + * Those commands are handled by the device's bootrom, + * and are typically sent when the UMAC and the LMAC + * are not yet available. + * * This is how a non-wifi command is layed out: + * --------------------------- + * | iwm_udma_out_nonwifi_hdr | + * --------------------------- + * | payload | + * | .... | + + * + * All the commands start with a UDMA header, which is + * basically a 32 bits field. The 4 LSB there define + * an opcode that allows the target to differentiate + * between wifi (opcode is 0xf) and non-wifi commands + * (opcode is [0..0xe]). + * + * When a command (wifi or non-wifi) is supposed to receive + * an answer, we queue the command buffer. When we do receive + * a command response from the UMAC, we go through the list + * of pending command, and pass both the command and the answer + * to the rx handler. Each command is sent with a unique + * sequence id, and the answer is sent with the same one. This + * is how we're supposed to match an answer with its command. + * See rx.c:iwm_rx_handle_[non]wifi() and iwm_get_pending_[non]wifi() + * for the implementation details. + */ +#include +#include + +#include "iwm.h" +#include "bus.h" +#include "hal.h" +#include "umac.h" +#include "debug.h" + +static int iwm_nonwifi_cmd_init(struct iwm_priv *iwm, + struct iwm_nonwifi_cmd *cmd, + struct iwm_udma_nonwifi_cmd *udma_cmd) +{ + INIT_LIST_HEAD(&cmd->pending); + + spin_lock(&iwm->cmd_lock); + + cmd->resp_received = 0; + + cmd->seq_num = iwm->nonwifi_seq_num; + udma_cmd->seq_num = cpu_to_le16(cmd->seq_num); + + iwm->nonwifi_seq_num++; + iwm->nonwifi_seq_num %= UMAC_NONWIFI_SEQ_NUM_MAX; + + if (udma_cmd->resp) + list_add_tail(&cmd->pending, &iwm->nonwifi_pending_cmd); + + spin_unlock(&iwm->cmd_lock); + + cmd->buf.start = cmd->buf.payload; + cmd->buf.len = 0; + + memcpy(&cmd->udma_cmd, udma_cmd, sizeof(*udma_cmd)); + + return cmd->seq_num; +} + +u16 iwm_alloc_wifi_cmd_seq(struct iwm_priv *iwm) +{ + u16 seq_num = iwm->wifi_seq_num; + + iwm->wifi_seq_num++; + iwm->wifi_seq_num %= UMAC_WIFI_SEQ_NUM_MAX; + + return seq_num; +} + +static void iwm_wifi_cmd_init(struct iwm_priv *iwm, + struct iwm_wifi_cmd *cmd, + struct iwm_udma_wifi_cmd *udma_cmd, + struct iwm_umac_cmd *umac_cmd, + struct iwm_lmac_cmd *lmac_cmd, + u16 payload_size) +{ + INIT_LIST_HEAD(&cmd->pending); + + spin_lock(&iwm->cmd_lock); + + cmd->seq_num = iwm_alloc_wifi_cmd_seq(iwm); + umac_cmd->seq_num = cpu_to_le16(cmd->seq_num); + + if (umac_cmd->resp) + list_add_tail(&cmd->pending, &iwm->wifi_pending_cmd); + + spin_unlock(&iwm->cmd_lock); + + cmd->buf.start = cmd->buf.payload; + cmd->buf.len = 0; + + if (lmac_cmd) { + cmd->buf.start -= sizeof(struct iwm_lmac_hdr); + + lmac_cmd->seq_num = cpu_to_le16(cmd->seq_num); + lmac_cmd->count = cpu_to_le16(payload_size); + + memcpy(&cmd->lmac_cmd, lmac_cmd, sizeof(*lmac_cmd)); + + umac_cmd->count = cpu_to_le16(sizeof(struct iwm_lmac_hdr)); + } else + umac_cmd->count = 0; + + umac_cmd->count = cpu_to_le16(payload_size + + le16_to_cpu(umac_cmd->count)); + udma_cmd->count = cpu_to_le16(sizeof(struct iwm_umac_fw_cmd_hdr) + + le16_to_cpu(umac_cmd->count)); + + memcpy(&cmd->udma_cmd, udma_cmd, sizeof(*udma_cmd)); + memcpy(&cmd->umac_cmd, umac_cmd, sizeof(*umac_cmd)); +} + +void iwm_cmd_flush(struct iwm_priv *iwm) +{ + struct iwm_wifi_cmd *wcmd, *wnext; + struct iwm_nonwifi_cmd *nwcmd, *nwnext; + + list_for_each_entry_safe(wcmd, wnext, &iwm->wifi_pending_cmd, pending) { + list_del(&wcmd->pending); + kfree(wcmd); + } + + list_for_each_entry_safe(nwcmd, nwnext, &iwm->nonwifi_pending_cmd, + pending) { + list_del(&nwcmd->pending); + kfree(nwcmd); + } +} + +struct iwm_wifi_cmd *iwm_get_pending_wifi_cmd(struct iwm_priv *iwm, u16 seq_num) +{ + struct iwm_wifi_cmd *cmd, *next; + + list_for_each_entry_safe(cmd, next, &iwm->wifi_pending_cmd, pending) + if (cmd->seq_num == seq_num) { + list_del(&cmd->pending); + return cmd; + } + + return NULL; +} + +struct iwm_nonwifi_cmd * +iwm_get_pending_nonwifi_cmd(struct iwm_priv *iwm, u8 seq_num, u8 cmd_opcode) +{ + struct iwm_nonwifi_cmd *cmd, *next; + + list_for_each_entry_safe(cmd, next, &iwm->nonwifi_pending_cmd, pending) + if ((cmd->seq_num == seq_num) && + (cmd->udma_cmd.opcode == cmd_opcode) && + (cmd->resp_received)) { + list_del(&cmd->pending); + return cmd; + } + + return NULL; +} + +static void iwm_build_udma_nonwifi_hdr(struct iwm_priv *iwm, + struct iwm_udma_out_nonwifi_hdr *hdr, + struct iwm_udma_nonwifi_cmd *cmd) +{ + memset(hdr, 0, sizeof(*hdr)); + + SET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_OPCODE, cmd->opcode); + SET_VAL32(hdr->cmd, UDMA_HDI_OUT_NW_CMD_RESP, cmd->resp); + SET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_EOT, 1); + SET_VAL32(hdr->cmd, UDMA_HDI_OUT_NW_CMD_HANDLE_BY_HW, + cmd->handle_by_hw); + SET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_SIGNATURE, UMAC_HDI_OUT_SIGNATURE); + SET_VAL32(hdr->cmd, UDMA_HDI_OUT_CMD_NON_WIFI_HW_SEQ_NUM, + le16_to_cpu(cmd->seq_num)); + + hdr->addr = cmd->addr; + hdr->op1_sz = cmd->op1_sz; + hdr->op2 = cmd->op2; +} + +static int iwm_send_udma_nonwifi_cmd(struct iwm_priv *iwm, + struct iwm_nonwifi_cmd *cmd) +{ + struct iwm_udma_out_nonwifi_hdr *udma_hdr; + struct iwm_nonwifi_cmd_buff *buf; + struct iwm_udma_nonwifi_cmd *udma_cmd = &cmd->udma_cmd; + + buf = &cmd->buf; + + buf->start -= sizeof(struct iwm_umac_nonwifi_out_hdr); + buf->len += sizeof(struct iwm_umac_nonwifi_out_hdr); + + udma_hdr = (struct iwm_udma_out_nonwifi_hdr *)(buf->start); + + iwm_build_udma_nonwifi_hdr(iwm, udma_hdr, udma_cmd); + + IWM_DBG_CMD(iwm, DBG, + "Send UDMA nonwifi cmd: opcode = 0x%x, resp = 0x%x, " + "hw = 0x%x, seqnum = %d, addr = 0x%x, op1_sz = 0x%x, " + "op2 = 0x%x\n", udma_cmd->opcode, udma_cmd->resp, + udma_cmd->handle_by_hw, cmd->seq_num, udma_cmd->addr, + udma_cmd->op1_sz, udma_cmd->op2); + + return iwm_bus_send_chunk(iwm, buf->start, buf->len); +} + +void iwm_udma_wifi_hdr_set_eop(struct iwm_priv *iwm, u8 *buf, u8 eop) +{ + struct iwm_udma_out_wifi_hdr *hdr = (struct iwm_udma_out_wifi_hdr *)buf; + + SET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_EOT, eop); +} + +void iwm_build_udma_wifi_hdr(struct iwm_priv *iwm, + struct iwm_udma_out_wifi_hdr *hdr, + struct iwm_udma_wifi_cmd *cmd) +{ + memset(hdr, 0, sizeof(*hdr)); + + SET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_OPCODE, UMAC_HDI_OUT_OPCODE_WIFI); + SET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_EOT, cmd->eop); + SET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_SIGNATURE, UMAC_HDI_OUT_SIGNATURE); + + SET_VAL32(hdr->meta_data, UMAC_HDI_OUT_BYTE_COUNT, + le16_to_cpu(cmd->count)); + SET_VAL32(hdr->meta_data, UMAC_HDI_OUT_CREDIT_GRP, cmd->credit_group); + SET_VAL32(hdr->meta_data, UMAC_HDI_OUT_RATID, cmd->ra_tid); + SET_VAL32(hdr->meta_data, UMAC_HDI_OUT_LMAC_OFFSET, cmd->lmac_offset); +} + +void iwm_build_umac_hdr(struct iwm_priv *iwm, + struct iwm_umac_fw_cmd_hdr *hdr, + struct iwm_umac_cmd *cmd) +{ + memset(hdr, 0, sizeof(*hdr)); + + SET_VAL32(hdr->meta_data, UMAC_FW_CMD_BYTE_COUNT, + le16_to_cpu(cmd->count)); + SET_VAL32(hdr->meta_data, UMAC_FW_CMD_TX_STA_COLOR, cmd->color); + SET_VAL8(hdr->cmd.flags, UMAC_DEV_CMD_FLAGS_RESP_REQ, cmd->resp); + + hdr->cmd.cmd = cmd->id; + hdr->cmd.seq_num = cmd->seq_num; +} + +static int iwm_send_udma_wifi_cmd(struct iwm_priv *iwm, + struct iwm_wifi_cmd *cmd) +{ + struct iwm_umac_wifi_out_hdr *umac_hdr; + struct iwm_wifi_cmd_buff *buf; + struct iwm_udma_wifi_cmd *udma_cmd = &cmd->udma_cmd; + struct iwm_umac_cmd *umac_cmd = &cmd->umac_cmd; + int ret; + + buf = &cmd->buf; + + buf->start -= sizeof(struct iwm_umac_wifi_out_hdr); + buf->len += sizeof(struct iwm_umac_wifi_out_hdr); + + umac_hdr = (struct iwm_umac_wifi_out_hdr *)(buf->start); + + iwm_build_udma_wifi_hdr(iwm, &umac_hdr->hw_hdr, udma_cmd); + iwm_build_umac_hdr(iwm, &umac_hdr->sw_hdr, umac_cmd); + + IWM_DBG_CMD(iwm, DBG, + "Send UDMA wifi cmd: opcode = 0x%x, UMAC opcode = 0x%x, " + "eop = 0x%x, count = 0x%x, credit_group = 0x%x, " + "ra_tid = 0x%x, lmac_offset = 0x%x, seqnum = %d\n", + UMAC_HDI_OUT_OPCODE_WIFI, umac_cmd->id, + udma_cmd->eop, udma_cmd->count, udma_cmd->credit_group, + udma_cmd->ra_tid, udma_cmd->lmac_offset, cmd->seq_num); + + if (umac_cmd->id == UMAC_CMD_OPCODE_WIFI_PASS_THROUGH) + IWM_DBG_CMD(iwm, DBG, "\tLMAC opcode: 0x%x\n", + cmd->lmac_cmd.id); + + ret = iwm_tx_credit_alloc(iwm, udma_cmd->credit_group, buf->len); + + /* We keep sending UMAC reset regardless of the command credits. + * The UMAC is supposed to be reset anyway and the Tx credits are + * reinitialized afterwards. If we are lucky, the reset could + * still be done even though we have run out of credits for the + * command pool at this moment.*/ + if (ret && (umac_cmd->id != UMAC_CMD_OPCODE_RESET)) { + IWM_DBG_TX(iwm, DBG, "Failed to alloc tx credit for cmd %d\n", + umac_cmd->id); + return ret; + } + + return iwm_bus_send_chunk(iwm, buf->start, buf->len); +} + +/* target_cmd a.k.a udma_nonwifi_cmd can be sent when UMAC is not available */ +int iwm_hal_send_target_cmd(struct iwm_priv *iwm, + struct iwm_udma_nonwifi_cmd *udma_cmd, + const void *payload) +{ + struct iwm_nonwifi_cmd *cmd; + int ret, seq_num; + + cmd = kzalloc(sizeof(struct iwm_nonwifi_cmd), GFP_KERNEL); + if (!cmd) { + IWM_ERR(iwm, "Couldn't alloc memory for hal cmd\n"); + return -ENOMEM; + } + + seq_num = iwm_nonwifi_cmd_init(iwm, cmd, udma_cmd); + + if (cmd->udma_cmd.opcode == UMAC_HDI_OUT_OPCODE_WRITE || + cmd->udma_cmd.opcode == UMAC_HDI_OUT_OPCODE_WRITE_PERSISTENT) { + cmd->buf.len = le32_to_cpu(cmd->udma_cmd.op1_sz); + memcpy(&cmd->buf.payload, payload, cmd->buf.len); + } + + ret = iwm_send_udma_nonwifi_cmd(iwm, cmd); + + if (!udma_cmd->resp) + kfree(cmd); + + if (ret < 0) + return ret; + + return seq_num; +} + +static void iwm_build_lmac_hdr(struct iwm_priv *iwm, struct iwm_lmac_hdr *hdr, + struct iwm_lmac_cmd *cmd) +{ + memset(hdr, 0, sizeof(*hdr)); + + hdr->id = cmd->id; + hdr->flags = 0; /* Is this ever used? */ + hdr->seq_num = cmd->seq_num; +} + +/* + * iwm_hal_send_host_cmd(): sends commands to the UMAC or the LMAC. + * Sending command to the LMAC is equivalent to sending a + * regular UMAC command with the LMAC passthrough or the LMAC + * wrapper UMAC command IDs. + */ +int iwm_hal_send_host_cmd(struct iwm_priv *iwm, + struct iwm_udma_wifi_cmd *udma_cmd, + struct iwm_umac_cmd *umac_cmd, + struct iwm_lmac_cmd *lmac_cmd, + const void *payload, u16 payload_size) +{ + struct iwm_wifi_cmd *cmd; + struct iwm_lmac_hdr *hdr; + int lmac_hdr_len = 0; + int ret; + + cmd = kzalloc(sizeof(struct iwm_wifi_cmd), GFP_KERNEL); + if (!cmd) { + IWM_ERR(iwm, "Couldn't alloc memory for wifi hal cmd\n"); + return -ENOMEM; + } + + iwm_wifi_cmd_init(iwm, cmd, udma_cmd, umac_cmd, lmac_cmd, payload_size); + + if (lmac_cmd) { + hdr = (struct iwm_lmac_hdr *)(cmd->buf.start); + + iwm_build_lmac_hdr(iwm, hdr, &cmd->lmac_cmd); + lmac_hdr_len = sizeof(struct iwm_lmac_hdr); + } + + memcpy(cmd->buf.payload, payload, payload_size); + cmd->buf.len = le16_to_cpu(umac_cmd->count); + + ret = iwm_send_udma_wifi_cmd(iwm, cmd); + + /* We free the cmd if we're not expecting any response */ + if (!umac_cmd->resp) + kfree(cmd); + return ret; +} + +/* + * iwm_hal_send_umac_cmd(): This is a special case for + * iwm_hal_send_host_cmd() to send direct UMAC cmd (without + * LMAC involved). + */ +int iwm_hal_send_umac_cmd(struct iwm_priv *iwm, + struct iwm_udma_wifi_cmd *udma_cmd, + struct iwm_umac_cmd *umac_cmd, + const void *payload, u16 payload_size) +{ + return iwm_hal_send_host_cmd(iwm, udma_cmd, umac_cmd, NULL, + payload, payload_size); +} diff --git a/drivers/net/wireless/iwmc3200wifi/hal.h b/drivers/net/wireless/iwmc3200wifi/hal.h new file mode 100644 index 0000000..0adfdc8 --- /dev/null +++ b/drivers/net/wireless/iwmc3200wifi/hal.h @@ -0,0 +1,236 @@ +/* + * Intel Wireless Multicomm 3200 WiFi driver + * + * Copyright (C) 2009 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Intel Corporation + * Samuel Ortiz + * Zhu Yi + * + */ + +#ifndef _IWM_HAL_H_ +#define _IWM_HAL_H_ + +#include "umac.h" + +#define GET_VAL8(s, name) ((s >> name##_POS) & name##_SEED) +#define GET_VAL16(s, name) ((le16_to_cpu(s) >> name##_POS) & name##_SEED) +#define GET_VAL32(s, name) ((le32_to_cpu(s) >> name##_POS) & name##_SEED) + +#define SET_VAL8(s, name, val) \ +do { \ + s = (s & ~(name##_SEED << name##_POS)) | \ + ((val & name##_SEED) << name##_POS); \ +} while (0) + +#define SET_VAL16(s, name, val) \ +do { \ + s = cpu_to_le16((le16_to_cpu(s) & ~(name##_SEED << name##_POS)) | \ + ((val & name##_SEED) << name##_POS)); \ +} while (0) + +#define SET_VAL32(s, name, val) \ +do { \ + s = cpu_to_le32((le32_to_cpu(s) & ~(name##_SEED << name##_POS)) | \ + ((val & name##_SEED) << name##_POS)); \ +} while (0) + + +#define UDMA_UMAC_INIT { .eop = 1, \ + .credit_group = 0x4, \ + .ra_tid = UMAC_HDI_ACT_TBL_IDX_HOST_CMD, \ + .lmac_offset = 0 } +#define UDMA_LMAC_INIT { .eop = 1, \ + .credit_group = 0x4, \ + .ra_tid = UMAC_HDI_ACT_TBL_IDX_HOST_CMD, \ + .lmac_offset = 4 } + + +/* UDMA IN OP CODE -- cmd bits [3:0] */ +#define UDMA_IN_OPCODE_MASK 0xF + +#define UDMA_IN_OPCODE_GENERAL_RESP 0x0 +#define UDMA_IN_OPCODE_READ_RESP 0x1 +#define UDMA_IN_OPCODE_WRITE_RESP 0x2 +#define UDMA_IN_OPCODE_PERS_WRITE_RESP 0x5 +#define UDMA_IN_OPCODE_PERS_READ_RESP 0x6 +#define UDMA_IN_OPCODE_RD_MDFY_WR_RESP 0x7 +#define UDMA_IN_OPCODE_EP_MNGMT_MSG 0x8 +#define UDMA_IN_OPCODE_CRDT_CHNG_MSG 0x9 +#define UDMA_IN_OPCODE_CNTRL_DATABASE_MSG 0xA +#define UDMA_IN_OPCODE_SW_MSG 0xB +#define UDMA_IN_OPCODE_WIFI 0xF +#define UDMA_IN_OPCODE_WIFI_LMAC 0x1F +#define UDMA_IN_OPCODE_WIFI_UMAC 0x2F + +/* HW API: udma_hdi_nonwifi API (OUT and IN) */ + +/* iwm_udma_nonwifi_cmd request response -- bits [9:9] */ +#define UDMA_HDI_OUT_NW_CMD_RESP_POS 9 +#define UDMA_HDI_OUT_NW_CMD_RESP_SEED 0x1 + +/* iwm_udma_nonwifi_cmd handle by HW -- bits [11:11] */ +#define UDMA_HDI_OUT_NW_CMD_HANDLE_BY_HW_POS 11 +#define UDMA_HDI_OUT_NW_CMD_HANDLE_BY_HW_SEED 0x1 + +/* iwm_udma_nonwifi_cmd sequence-number -- bits [12:15] */ +#define UDMA_HDI_OUT_NW_CMD_SEQ_NUM_POS 12 +#define UDMA_HDI_OUT_NW_CMD_SEQ_NUM_SEED 0xF + +/* UDMA IN Non-WIFI HW sequence number -- bits [12:15] */ +#define UDMA_IN_NW_HW_SEQ_NUM_POS 12 +#define UDMA_IN_NW_HW_SEQ_NUM_SEED 0xF + +/* UDMA IN Non-WIFI HW signature -- bits [16:31] */ +#define UDMA_IN_NW_HW_SIG_POS 16 +#define UDMA_IN_NW_HW_SIG_SEED 0xFFFF + +/* fixed signature */ +#define UDMA_IN_NW_HW_SIG 0xCBBC + +/* UDMA IN Non-WIFI HW block length -- bits [32:35] */ +#define UDMA_IN_NW_HW_LENGTH_SEED 0xF +#define UDMA_IN_NW_HW_LENGTH_POS 32 + +/* End of HW API: udma_hdi_nonwifi API (OUT and IN) */ + +#define IWM_SDIO_FW_MAX_CHUNK_SIZE 2032 +#define IWM_MAX_WIFI_HEADERS_SIZE 32 +#define IWM_MAX_NONWIFI_HEADERS_SIZE 16 +#define IWM_MAX_NONWIFI_CMD_BUFF_SIZE (IWM_SDIO_FW_MAX_CHUNK_SIZE - \ + IWM_MAX_NONWIFI_HEADERS_SIZE) +#define IWM_MAX_WIFI_CMD_BUFF_SIZE (IWM_SDIO_FW_MAX_CHUNK_SIZE - \ + IWM_MAX_WIFI_HEADERS_SIZE) + +#define IWM_HAL_CONCATENATE_BUF_SIZE 8192 + +struct iwm_wifi_cmd_buff { + u16 len; + u8 *start; + u8 hdr[IWM_MAX_WIFI_HEADERS_SIZE]; + u8 payload[IWM_MAX_WIFI_CMD_BUFF_SIZE]; +}; + +struct iwm_nonwifi_cmd_buff { + u16 len; + u8 *start; + u8 hdr[IWM_MAX_NONWIFI_HEADERS_SIZE]; + u8 payload[IWM_MAX_NONWIFI_CMD_BUFF_SIZE]; +}; + +struct iwm_udma_nonwifi_cmd { + u8 opcode; + u8 eop; + u8 resp; + u8 handle_by_hw; + __le32 addr; + __le32 op1_sz; + __le32 op2; + __le16 seq_num; +}; + +struct iwm_udma_wifi_cmd { + __le16 count; + u8 eop; + u8 credit_group; + u8 ra_tid; + u8 lmac_offset; +}; + +struct iwm_umac_cmd { + u8 id; + __le16 count; + u8 resp; + __le16 seq_num; + u8 color; +}; + +struct iwm_lmac_cmd { + u8 id; + __le16 count; + u8 resp; + __le16 seq_num; +}; + +struct iwm_nonwifi_cmd { + u16 seq_num; + bool resp_received; + struct list_head pending; + struct iwm_udma_nonwifi_cmd udma_cmd; + struct iwm_umac_cmd umac_cmd; + struct iwm_lmac_cmd lmac_cmd; + struct iwm_nonwifi_cmd_buff buf; + u32 flags; +}; + +struct iwm_wifi_cmd { + u16 seq_num; + struct list_head pending; + struct iwm_udma_wifi_cmd udma_cmd; + struct iwm_umac_cmd umac_cmd; + struct iwm_lmac_cmd lmac_cmd; + struct iwm_wifi_cmd_buff buf; + u32 flags; +}; + +void iwm_cmd_flush(struct iwm_priv *iwm); + +struct iwm_wifi_cmd *iwm_get_pending_wifi_cmd(struct iwm_priv *iwm, + u16 seq_num); +struct iwm_nonwifi_cmd *iwm_get_pending_nonwifi_cmd(struct iwm_priv *iwm, + u8 seq_num, u8 cmd_opcode); + + +int iwm_hal_send_target_cmd(struct iwm_priv *iwm, + struct iwm_udma_nonwifi_cmd *ucmd, + const void *payload); + +int iwm_hal_send_host_cmd(struct iwm_priv *iwm, + struct iwm_udma_wifi_cmd *udma_cmd, + struct iwm_umac_cmd *umac_cmd, + struct iwm_lmac_cmd *lmac_cmd, + const void *payload, u16 payload_size); + +int iwm_hal_send_umac_cmd(struct iwm_priv *iwm, + struct iwm_udma_wifi_cmd *udma_cmd, + struct iwm_umac_cmd *umac_cmd, + const void *payload, u16 payload_size); + +u16 iwm_alloc_wifi_cmd_seq(struct iwm_priv *iwm); + +void iwm_udma_wifi_hdr_set_eop(struct iwm_priv *iwm, u8 *buf, u8 eop); +void iwm_build_udma_wifi_hdr(struct iwm_priv *iwm, + struct iwm_udma_out_wifi_hdr *hdr, + struct iwm_udma_wifi_cmd *cmd); +void iwm_build_umac_hdr(struct iwm_priv *iwm, + struct iwm_umac_fw_cmd_hdr *hdr, + struct iwm_umac_cmd *cmd); +#endif /* _IWM_HAL_H_ */ diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h new file mode 100644 index 0000000..79ffa3b --- /dev/null +++ b/drivers/net/wireless/iwmc3200wifi/iwm.h @@ -0,0 +1,364 @@ +/* + * Intel Wireless Multicomm 3200 WiFi driver + * + * Copyright (C) 2009 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Intel Corporation + * Samuel Ortiz + * Zhu Yi + * + */ + +#ifndef __IWM_H__ +#define __IWM_H__ + +#include +#include +#include + +#include "debug.h" +#include "hal.h" +#include "umac.h" +#include "lmac.h" +#include "eeprom.h" + +#define IWM_COPYRIGHT "Copyright(c) 2009 Intel Corporation" +#define IWM_AUTHOR "" + +#define IWM_SRC_LMAC UMAC_HDI_IN_SOURCE_FHRX +#define IWM_SRC_UDMA UMAC_HDI_IN_SOURCE_UDMA +#define IWM_SRC_UMAC UMAC_HDI_IN_SOURCE_FW +#define IWM_SRC_NUM 3 + +#define IWM_POWER_INDEX_MIN 0 +#define IWM_POWER_INDEX_MAX 5 +#define IWM_POWER_INDEX_DEFAULT 3 + +struct iwm_conf { + u32 sdio_ior_timeout; + unsigned long calib_map; + unsigned long expected_calib_map; + u8 ct_kill_entry; + u8 ct_kill_exit; + bool reset_on_fatal_err; + bool auto_connect; + bool wimax_not_present; + bool enable_qos; + u32 mode; + + u32 power_index; + u32 frag_threshold; + u32 rts_threshold; + bool cts_to_self; + + u32 assoc_timeout; + u32 roam_timeout; + u32 wireless_mode; + + u8 ibss_band; + u8 ibss_channel; + + u8 mac_addr[ETH_ALEN]; +}; + +enum { + COEX_MODE_SA = 1, + COEX_MODE_XOR, + COEX_MODE_CM, + COEX_MODE_MAX, +}; + +struct iwm_if_ops; +struct iwm_wifi_cmd; + +struct pool_entry { + int id; /* group id */ + int sid; /* super group id */ + int min_pages; /* min capacity in pages */ + int max_pages; /* max capacity in pages */ + int alloc_pages; /* allocated # of pages. incresed by driver */ + int total_freed_pages; /* total freed # of pages. incresed by UMAC */ +}; + +struct spool_entry { + int id; + int max_pages; + int alloc_pages; +}; + +struct iwm_tx_credit { + spinlock_t lock; + int pool_nr; + unsigned long full_pools_map; /* bitmap for # of filled tx pools */ + struct pool_entry pools[IWM_MACS_OUT_GROUPS]; + struct spool_entry spools[IWM_MACS_OUT_SGROUPS]; +}; + +struct iwm_notif { + struct list_head pending; + u32 cmd_id; + void *cmd; + u8 src; + void *buf; + unsigned long buf_size; +}; + +struct iwm_tid_info { + __le16 last_seq_num; + bool stopped; + struct mutex mutex; +}; + +struct iwm_sta_info { + u8 addr[ETH_ALEN]; + bool valid; + bool qos; + u8 color; + struct iwm_tid_info tid_info[IWM_UMAC_TID_NR]; +}; + +struct iwm_tx_info { + u8 sta; + u8 color; + u8 tid; +}; + +struct iwm_rx_info { + unsigned long rx_size; + unsigned long rx_buf_size; +}; + +#define IWM_NUM_KEYS 4 + +struct iwm_umac_key_hdr { + u8 mac[ETH_ALEN]; + u8 key_idx; + u8 multicast; /* BCast encrypt & BCast decrypt of frames FROM mac */ +} __attribute__ ((packed)); + +struct iwm_key { + struct iwm_umac_key_hdr hdr; + u32 cipher; + u8 key[WLAN_MAX_KEY_LEN]; + u8 seq[IW_ENCODE_SEQ_MAX_SIZE]; + int key_len; + int seq_len; +}; + +#define IWM_RX_ID_HASH 0xff +#define IWM_RX_ID_GET_HASH(id) ((id) % IWM_RX_ID_HASH) + +#define IWM_STA_TABLE_NUM 16 +#define IWM_TX_LIST_SIZE 64 +#define IWM_RX_LIST_SIZE 256 + +#define IWM_SCAN_ID_MAX 0xff + +#define IWM_STATUS_READY 0 +#define IWM_STATUS_SCANNING 1 +#define IWM_STATUS_SCAN_ABORTING 2 +#define IWM_STATUS_SME_CONNECTING 3 +#define IWM_STATUS_ASSOCIATED 4 +#define IWM_STATUS_RESETTING 5 + +struct iwm_tx_queue { + int id; + struct sk_buff_head queue; + struct sk_buff_head stopped_queue; + spinlock_t lock; + struct workqueue_struct *wq; + struct work_struct worker; + u8 concat_buf[IWM_HAL_CONCATENATE_BUF_SIZE]; + int concat_count; + u8 *concat_ptr; +}; + +/* Queues 0 ~ 3 for AC data, 5 for iPAN */ +#define IWM_TX_QUEUES 5 +#define IWM_TX_DATA_QUEUES 4 +#define IWM_TX_CMD_QUEUE 4 + +struct iwm_bss_info { + struct list_head node; + struct cfg80211_bss *cfg_bss; + struct iwm_umac_notif_bss_info *bss; +}; + +typedef int (*iwm_handler)(struct iwm_priv *priv, u8 *buf, + unsigned long buf_size, struct iwm_wifi_cmd *cmd); + +#define IWM_WATCHDOG_PERIOD (6 * HZ) + +struct iwm_priv { + struct wireless_dev *wdev; + struct iwm_if_ops *bus_ops; + + struct iwm_conf conf; + + unsigned long status; + + struct list_head pending_notif; + wait_queue_head_t notif_queue; + + wait_queue_head_t nonwifi_queue; + + unsigned long calib_done_map; + struct { + u8 *buf; + u32 size; + } calib_res[CALIBRATION_CMD_NUM]; + + struct iwm_umac_profile *umac_profile; + bool umac_profile_active; + + u8 bssid[ETH_ALEN]; + u8 channel; + u16 rate; + u32 txpower; + + struct iwm_sta_info sta_table[IWM_STA_TABLE_NUM]; + struct list_head bss_list; + + void (*nonwifi_rx_handlers[UMAC_HDI_IN_OPCODE_NONWIFI_MAX]) + (struct iwm_priv *priv, u8 *buf, unsigned long buf_size); + + const iwm_handler *umac_handlers; + const iwm_handler *lmac_handlers; + DECLARE_BITMAP(lmac_handler_map, LMAC_COMMAND_ID_NUM); + DECLARE_BITMAP(umac_handler_map, LMAC_COMMAND_ID_NUM); + DECLARE_BITMAP(udma_handler_map, LMAC_COMMAND_ID_NUM); + + struct list_head wifi_pending_cmd; + struct list_head nonwifi_pending_cmd; + u16 wifi_seq_num; + u8 nonwifi_seq_num; + spinlock_t cmd_lock; + + u32 core_enabled; + + u8 scan_id; + struct cfg80211_scan_request *scan_request; + + struct sk_buff_head rx_list; + struct list_head rx_tickets; + struct list_head rx_packets[IWM_RX_ID_HASH]; + struct workqueue_struct *rx_wq; + struct work_struct rx_worker; + + struct iwm_tx_credit tx_credit; + struct iwm_tx_queue txq[IWM_TX_QUEUES]; + + struct iwm_key keys[IWM_NUM_KEYS]; + s8 default_key; + + DECLARE_BITMAP(wifi_ntfy, WIFI_IF_NTFY_MAX); + wait_queue_head_t wifi_ntfy_queue; + + wait_queue_head_t mlme_queue; + + struct iw_statistics wstats; + struct delayed_work stats_request; + struct delayed_work disconnect; + struct delayed_work ct_kill_delay; + + struct iwm_debugfs dbg; + + u8 *eeprom; + struct timer_list watchdog; + struct work_struct reset_worker; + struct work_struct auth_retry_worker; + struct mutex mutex; + + u8 *req_ie; + int req_ie_len; + u8 *resp_ie; + int resp_ie_len; + + struct iwm_fw_error_hdr *last_fw_err; + char umac_version[8]; + char lmac_version[8]; + + char private[0] __attribute__((__aligned__(NETDEV_ALIGN))); +}; + +static inline void *iwm_private(struct iwm_priv *iwm) +{ + BUG_ON(!iwm); + return &iwm->private; +} + +#define hw_to_iwm(h) (h->iwm) +#define iwm_to_dev(i) (wiphy_dev(i->wdev->wiphy)) +#define iwm_to_wiphy(i) (i->wdev->wiphy) +#define wiphy_to_iwm(w) (struct iwm_priv *)(wiphy_priv(w)) +#define iwm_to_wdev(i) (i->wdev) +#define wdev_to_iwm(w) (struct iwm_priv *)(wdev_priv(w)) +#define iwm_to_ndev(i) (i->wdev->netdev) +#define ndev_to_iwm(n) (wdev_to_iwm(n->ieee80211_ptr)) +#define skb_to_rx_info(s) ((struct iwm_rx_info *)(s->cb)) +#define skb_to_tx_info(s) ((struct iwm_tx_info *)s->cb) + +void *iwm_if_alloc(int sizeof_bus, struct device *dev, + struct iwm_if_ops *if_ops); +void iwm_if_free(struct iwm_priv *iwm); +int iwm_if_add(struct iwm_priv *iwm); +void iwm_if_remove(struct iwm_priv *iwm); +int iwm_mode_to_nl80211_iftype(int mode); +int iwm_priv_init(struct iwm_priv *iwm); +void iwm_priv_deinit(struct iwm_priv *iwm); +void iwm_reset(struct iwm_priv *iwm); +void iwm_resetting(struct iwm_priv *iwm); +void iwm_tx_credit_init_pools(struct iwm_priv *iwm, + struct iwm_umac_notif_alive *alive); +int iwm_tx_credit_alloc(struct iwm_priv *iwm, int id, int nb); +int iwm_notif_send(struct iwm_priv *iwm, struct iwm_wifi_cmd *cmd, + u8 cmd_id, u8 source, u8 *buf, unsigned long buf_size); +int iwm_notif_handle(struct iwm_priv *iwm, u32 cmd, u8 source, long timeout); +void iwm_init_default_profile(struct iwm_priv *iwm, + struct iwm_umac_profile *profile); +void iwm_link_on(struct iwm_priv *iwm); +void iwm_link_off(struct iwm_priv *iwm); +int iwm_up(struct iwm_priv *iwm); +int iwm_down(struct iwm_priv *iwm); + +/* TX API */ +int iwm_tid_to_queue(u16 tid); +void iwm_tx_credit_inc(struct iwm_priv *iwm, int id, int total_freed_pages); +void iwm_tx_worker(struct work_struct *work); +int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev); + +/* RX API */ +void iwm_rx_setup_handlers(struct iwm_priv *iwm); +int iwm_rx_handle(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size); +int iwm_rx_handle_resp(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size, + struct iwm_wifi_cmd *cmd); +void iwm_rx_free(struct iwm_priv *iwm); + +#endif diff --git a/drivers/net/wireless/iwmc3200wifi/lmac.h b/drivers/net/wireless/iwmc3200wifi/lmac.h new file mode 100644 index 0000000..a855a99 --- /dev/null +++ b/drivers/net/wireless/iwmc3200wifi/lmac.h @@ -0,0 +1,484 @@ +/* + * Intel Wireless Multicomm 3200 WiFi driver + * + * Copyright (C) 2009 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Intel Corporation + * Samuel Ortiz + * Zhu Yi + * + */ + +#ifndef __IWM_LMAC_H__ +#define __IWM_LMAC_H__ + +struct iwm_lmac_hdr { + u8 id; + u8 flags; + __le16 seq_num; +} __attribute__ ((packed)); + +/* LMAC commands */ +#define CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_AFTER_MSK 0x1 + +struct iwm_lmac_cal_cfg_elt { + __le32 enable; /* 1 means LMAC needs to do something */ + __le32 start; /* 1 to start calibration, 0 to stop */ + __le32 send_res; /* 1 for sending back results */ + __le32 apply_res; /* 1 for applying calibration results to HW */ + __le32 reserved; +} __attribute__ ((packed)); + +struct iwm_lmac_cal_cfg_status { + struct iwm_lmac_cal_cfg_elt init; + struct iwm_lmac_cal_cfg_elt periodic; + __le32 flags; /* CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_AFTER_MSK */ +} __attribute__ ((packed)); + +struct iwm_lmac_cal_cfg_cmd { + struct iwm_lmac_cal_cfg_status ucode_cfg; + struct iwm_lmac_cal_cfg_status driver_cfg; + __le32 reserved; +} __attribute__ ((packed)); + +struct iwm_lmac_cal_cfg_resp { + __le32 status; +} __attribute__ ((packed)); + +#define IWM_CARD_STATE_SW_HW_ENABLED 0x00 +#define IWM_CARD_STATE_HW_DISABLED 0x01 +#define IWM_CARD_STATE_SW_DISABLED 0x02 +#define IWM_CARD_STATE_CTKILL_DISABLED 0x04 +#define IWM_CARD_STATE_IS_RXON 0x10 + +struct iwm_lmac_card_state { + __le32 flags; +} __attribute__ ((packed)); + +/** + * COEX_PRIORITY_TABLE_CMD + * + * Priority entry for each state + * Will keep two tables, for STA and WIPAN + */ +enum { + /* UN-ASSOCIATION PART */ + COEX_UNASSOC_IDLE = 0, + COEX_UNASSOC_MANUAL_SCAN, + COEX_UNASSOC_AUTO_SCAN, + + /* CALIBRATION */ + COEX_CALIBRATION, + COEX_PERIODIC_CALIBRATION, + + /* CONNECTION */ + COEX_CONNECTION_ESTAB, + + /* ASSOCIATION PART */ + COEX_ASSOCIATED_IDLE, + COEX_ASSOC_MANUAL_SCAN, + COEX_ASSOC_AUTO_SCAN, + COEX_ASSOC_ACTIVE_LEVEL, + + /* RF ON/OFF */ + COEX_RF_ON, + COEX_RF_OFF, + COEX_STAND_ALONE_DEBUG, + + /* IPNN */ + COEX_IPAN_ASSOC_LEVEL, + + /* RESERVED */ + COEX_RSRVD1, + COEX_RSRVD2, + + COEX_EVENTS_NUM +}; + +#define COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK 0x1 +#define COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK 0x2 +#define COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_MSK 0x4 + +struct coex_event { + u8 req_prio; + u8 win_med_prio; + u8 reserved; + u8 flags; +} __attribute__ ((packed)); + +#define COEX_FLAGS_STA_TABLE_VALID_MSK 0x1 +#define COEX_FLAGS_UNASSOC_WAKEUP_UMASK_MSK 0x4 +#define COEX_FLAGS_ASSOC_WAKEUP_UMASK_MSK 0x8 +#define COEX_FLAGS_COEX_ENABLE_MSK 0x80 + +struct iwm_coex_prio_table_cmd { + u8 flags; + u8 reserved[3]; + struct coex_event sta_prio[COEX_EVENTS_NUM]; +} __attribute__ ((packed)); + +/* Coexistence definitions + * + * Constants to fill in the Priorities' Tables + * RP - Requested Priority + * WP - Win Medium Priority: priority assigned when the contention has been won + * FLAGS - Combination of COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK and + * COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK + */ + +#define COEX_UNASSOC_IDLE_FLAGS 0 +#define COEX_UNASSOC_MANUAL_SCAN_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \ + COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK) +#define COEX_UNASSOC_AUTO_SCAN_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \ + COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK) +#define COEX_CALIBRATION_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \ + COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK) +#define COEX_PERIODIC_CALIBRATION_FLAGS 0 +/* COEX_CONNECTION_ESTAB: we need DELAY_MEDIUM_FREE_NTFY to let WiMAX + * disconnect from network. */ +#define COEX_CONNECTION_ESTAB_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \ + COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK | \ + COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_MSK) +#define COEX_ASSOCIATED_IDLE_FLAGS 0 +#define COEX_ASSOC_MANUAL_SCAN_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \ + COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK) +#define COEX_ASSOC_AUTO_SCAN_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \ + COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK) +#define COEX_ASSOC_ACTIVE_LEVEL_FLAGS 0 +#define COEX_RF_ON_FLAGS 0 +#define COEX_RF_OFF_FLAGS 0 +#define COEX_STAND_ALONE_DEBUG_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \ + COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK) +#define COEX_IPAN_ASSOC_LEVEL_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \ + COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK | \ + COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_MSK) +#define COEX_RSRVD1_FLAGS 0 +#define COEX_RSRVD2_FLAGS 0 +/* XOR_RF_ON is the event wrapping all radio ownership. We need + * DELAY_MEDIUM_FREE_NTFY to let WiMAX disconnect from network. */ +#define COEX_XOR_RF_ON_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \ + COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK | \ + COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_MSK) + +/* CT kill config command */ +struct iwm_ct_kill_cfg_cmd { + u32 exit_threshold; + u32 reserved; + u32 entry_threshold; +} __attribute__ ((packed)); + + +/* LMAC OP CODES */ +#define REPLY_PAD 0x0 +#define REPLY_ALIVE 0x1 +#define REPLY_ERROR 0x2 +#define REPLY_ECHO 0x3 +#define REPLY_HALT 0x6 + +/* RXON state commands */ +#define REPLY_RX_ON 0x10 +#define REPLY_RX_ON_ASSOC 0x11 +#define REPLY_RX_OFF 0x12 +#define REPLY_QOS_PARAM 0x13 +#define REPLY_RX_ON_TIMING 0x14 +#define REPLY_INTERNAL_QOS_PARAM 0x15 +#define REPLY_RX_INT_TIMEOUT_CNFG 0x16 +#define REPLY_NULL 0x17 + +/* Multi-Station support */ +#define REPLY_ADD_STA 0x18 +#define REPLY_REMOVE_STA 0x19 +#define REPLY_RESET_ALL_STA 0x1a + +/* RX, TX */ +#define REPLY_ALM_RX 0x1b +#define REPLY_TX 0x1c +#define REPLY_TXFIFO_FLUSH 0x1e + +/* MISC commands */ +#define REPLY_MGMT_MCAST_KEY 0x1f +#define REPLY_WEPKEY 0x20 +#define REPLY_INIT_IV 0x21 +#define REPLY_WRITE_MIB 0x22 +#define REPLY_READ_MIB 0x23 +#define REPLY_RADIO_FE 0x24 +#define REPLY_TXFIFO_CFG 0x25 +#define REPLY_WRITE_READ 0x26 +#define REPLY_INSTALL_SEC_KEY 0x27 + + +#define REPLY_RATE_SCALE 0x47 +#define REPLY_LEDS_CMD 0x48 +#define REPLY_TX_LINK_QUALITY_CMD 0x4e +#define REPLY_ANA_MIB_OVERRIDE_CMD 0x4f +#define REPLY_WRITE2REG_CMD 0x50 + +/* winfi-wifi coexistence */ +#define COEX_PRIORITY_TABLE_CMD 0x5a +#define COEX_MEDIUM_NOTIFICATION 0x5b +#define COEX_EVENT_CMD 0x5c + +/* more Protocol and Protocol-test commands */ +#define REPLY_MAX_SLEEP_TIME_CMD 0x61 +#define CALIBRATION_CFG_CMD 0x65 +#define CALIBRATION_RES_NOTIFICATION 0x66 +#define CALIBRATION_COMPLETE_NOTIFICATION 0x67 + +/* Measurements */ +#define REPLY_QUIET_CMD 0x71 +#define REPLY_CHANNEL_SWITCH 0x72 +#define CHANNEL_SWITCH_NOTIFICATION 0x73 + +#define REPLY_SPECTRUM_MEASUREMENT_CMD 0x74 +#define SPECTRUM_MEASURE_NOTIFICATION 0x75 +#define REPLY_MEASUREMENT_ABORT_CMD 0x76 + +/* Power Management */ +#define POWER_TABLE_CMD 0x77 +#define SAVE_RESTORE_ADDRESS_CMD 0x78 +#define REPLY_WATERMARK_CMD 0x79 +#define PM_DEBUG_STATISTIC_NOTIFIC 0x7B +#define PD_FLUSH_N_NOTIFICATION 0x7C + +/* Scan commands and notifications */ +#define REPLY_SCAN_REQUEST_CMD 0x80 +#define REPLY_SCAN_ABORT_CMD 0x81 +#define SCAN_START_NOTIFICATION 0x82 +#define SCAN_RESULTS_NOTIFICATION 0x83 +#define SCAN_COMPLETE_NOTIFICATION 0x84 + +/* Continuous TX commands */ +#define REPLY_CONT_TX_CMD 0x85 +#define END_OF_CONT_TX_NOTIFICATION 0x86 + +/* Timer/Eeprom commands */ +#define TIMER_CMD 0x87 +#define EEPROM_WRITE_CMD 0x88 + +/* PAPD commands */ +#define FEEDBACK_REQUEST_NOTIFICATION 0x8b +#define REPLY_CW_CMD 0x8c + +/* IBSS/AP commands Continue */ +#define BEACON_NOTIFICATION 0x90 +#define REPLY_TX_BEACON 0x91 +#define REPLY_REQUEST_ATIM 0x93 +#define WHO_IS_AWAKE_NOTIFICATION 0x94 +#define TX_PWR_DBM_LIMIT_CMD 0x95 +#define QUIET_NOTIFICATION 0x96 +#define TX_PWR_TABLE_CMD 0x97 +#define TX_ANT_CONFIGURATION_CMD 0x98 +#define MEASURE_ABORT_NOTIFICATION 0x99 +#define REPLY_CALIBRATION_TUNE 0x9a + +/* bt config command */ +#define REPLY_BT_CONFIG 0x9b +#define REPLY_STATISTICS_CMD 0x9c +#define STATISTICS_NOTIFICATION 0x9d + +/* RF-KILL commands and notifications */ +#define REPLY_CARD_STATE_CMD 0xa0 +#define CARD_STATE_NOTIFICATION 0xa1 + +/* Missed beacons notification */ +#define MISSED_BEACONS_NOTIFICATION 0xa2 +#define MISSED_BEACONS_NOTIFICATION_TH_CMD 0xa3 + +#define REPLY_CT_KILL_CONFIG_CMD 0xa4 + +/* HD commands and notifications */ +#define REPLY_HD_PARAMS_CMD 0xa6 +#define HD_PARAMS_NOTIFICATION 0xa7 +#define SENSITIVITY_CMD 0xa8 +#define U_APSD_PARAMS_CMD 0xa9 +#define NOISY_PLATFORM_CMD 0xaa +#define ILLEGAL_CMD 0xac +#define REPLY_PHY_CALIBRATION_CMD 0xb0 +#define REPLAY_RX_GAIN_CALIB_CMD 0xb1 + +/* WiPAN commands */ +#define REPLY_WIPAN_PARAMS_CMD 0xb2 +#define REPLY_WIPAN_RX_ON_CMD 0xb3 +#define REPLY_WIPAN_RX_ON_TIMING 0xb4 +#define REPLY_WIPAN_TX_PWR_TABLE_CMD 0xb5 +#define REPLY_WIPAN_RXON_ASSOC_CMD 0xb6 +#define REPLY_WIPAN_QOS_PARAM 0xb7 +#define WIPAN_REPLY_WEPKEY 0xb8 + +/* BeamForming commands */ +#define BEAMFORMER_CFG_CMD 0xba +#define BEAMFORMEE_NOTIFICATION 0xbb + +/* TGn new Commands */ +#define REPLY_RX_PHY_CMD 0xc0 +#define REPLY_RX_MPDU_CMD 0xc1 +#define REPLY_MULTICAST_HASH 0xc2 +#define REPLY_KDR_RX 0xc3 +#define REPLY_RX_DSP_EXT_INFO 0xc4 +#define REPLY_COMPRESSED_BA 0xc5 + +/* PNC commands */ +#define PNC_CONFIG_CMD 0xc8 +#define PNC_UPDATE_TABLE_CMD 0xc9 +#define XVT_GENERAL_CTRL_CMD 0xca +#define REPLY_LEGACY_RADIO_FE 0xdd + +/* WoWLAN commands */ +#define WOWLAN_PATTERNS 0xe0 +#define WOWLAN_WAKEUP_FILTER 0xe1 +#define WOWLAN_TSC_RSC_PARAM 0xe2 +#define WOWLAN_TKIP_PARAM 0xe3 +#define WOWLAN_KEK_KCK_MATERIAL 0xe4 +#define WOWLAN_GET_STATUSES 0xe5 +#define WOWLAN_TX_POWER_PER_DB 0xe6 +#define REPLY_WOWLAN_GET_STATUSES WOWLAN_GET_STATUSES + +#define REPLY_DEBUG_CMD 0xf0 +#define REPLY_DSP_DEBUG_CMD 0xf1 +#define REPLY_DEBUG_MONITOR_CMD 0xf2 +#define REPLY_DEBUG_XVT_CMD 0xf3 +#define REPLY_DEBUG_DC_CALIB 0xf4 +#define REPLY_DYNAMIC_BP 0xf5 + +/* General purpose Commands */ +#define REPLY_GP1_CMD 0xfa +#define REPLY_GP2_CMD 0xfb +#define REPLY_GP3_CMD 0xfc +#define REPLY_GP4_CMD 0xfd +#define REPLY_REPLAY_WRAPPER 0xfe +#define REPLY_FRAME_DURATION_CALC_CMD 0xff + +#define LMAC_COMMAND_ID_MAX 0xff +#define LMAC_COMMAND_ID_NUM (LMAC_COMMAND_ID_MAX + 1) + + +/* Calibration */ + +enum { + PHY_CALIBRATE_DC_CMD = 0, + PHY_CALIBRATE_LO_CMD = 1, + PHY_CALIBRATE_RX_BB_CMD = 2, + PHY_CALIBRATE_TX_IQ_CMD = 3, + PHY_CALIBRATE_RX_IQ_CMD = 4, + PHY_CALIBRATION_NOISE_CMD = 5, + PHY_CALIBRATE_AGC_TABLE_CMD = 6, + PHY_CALIBRATE_CRYSTAL_FRQ_CMD = 7, + PHY_CALIBRATE_OPCODES_NUM, + SHILOH_PHY_CALIBRATE_DC_CMD = 8, + SHILOH_PHY_CALIBRATE_LO_CMD = 9, + SHILOH_PHY_CALIBRATE_RX_BB_CMD = 10, + SHILOH_PHY_CALIBRATE_TX_IQ_CMD = 11, + SHILOH_PHY_CALIBRATE_RX_IQ_CMD = 12, + SHILOH_PHY_CALIBRATION_NOISE_CMD = 13, + SHILOH_PHY_CALIBRATE_AGC_TABLE_CMD = 14, + SHILOH_PHY_CALIBRATE_CRYSTAL_FRQ_CMD = 15, + SHILOH_PHY_CALIBRATE_BASE_BAND_CMD = 16, + SHILOH_PHY_CALIBRATE_TXIQ_PERIODIC_CMD = 17, + CALIBRATION_CMD_NUM, +}; + +enum { + CALIB_CFG_RX_BB_IDX = 0, + CALIB_CFG_DC_IDX = 1, + CALIB_CFG_LO_IDX = 2, + CALIB_CFG_TX_IQ_IDX = 3, + CALIB_CFG_RX_IQ_IDX = 4, + CALIB_CFG_NOISE_IDX = 5, + CALIB_CFG_CRYSTAL_IDX = 6, + CALIB_CFG_TEMPERATURE_IDX = 7, + CALIB_CFG_PAPD_IDX = 8, + CALIB_CFG_LAST_IDX = CALIB_CFG_PAPD_IDX, + CALIB_CFG_MODULE_NUM, +}; + +#define IWM_CALIB_MAP_INIT_MSK 0xFFFF +#define IWM_CALIB_MAP_PER_LMAC(m) ((m & 0xFF0000) >> 16) +#define IWM_CALIB_MAP_PER_UMAC(m) ((m & 0xFF000000) >> 24) +#define IWM_CALIB_OPCODE_TO_INDEX(op) (op - PHY_CALIBRATE_OPCODES_NUM) + +struct iwm_lmac_calib_hdr { + u8 opcode; + u8 first_grp; + u8 grp_num; + u8 all_data_valid; +} __attribute__ ((packed)); + +#define IWM_LMAC_CALIB_FREQ_GROUPS_NR 7 +#define IWM_CALIB_FREQ_GROUPS_NR 5 +#define IWM_CALIB_DC_MODES_NR 12 + +struct iwm_calib_rxiq_entry { + u16 ptam_postdist_ars; + u16 ptam_postdist_arc; +} __attribute__ ((packed)); + +struct iwm_calib_rxiq_group { + struct iwm_calib_rxiq_entry mode[IWM_CALIB_DC_MODES_NR]; +} __attribute__ ((packed)); + +struct iwm_lmac_calib_rxiq { + struct iwm_calib_rxiq_group group[IWM_LMAC_CALIB_FREQ_GROUPS_NR]; +} __attribute__ ((packed)); + +struct iwm_calib_rxiq { + struct iwm_lmac_calib_hdr hdr; + struct iwm_calib_rxiq_group group[IWM_CALIB_FREQ_GROUPS_NR]; +} __attribute__ ((packed)); + +#define LMAC_STA_ID_SEED 0x0f +#define LMAC_STA_ID_POS 0 + +#define LMAC_STA_COLOR_SEED 0x7 +#define LMAC_STA_COLOR_POS 4 + +struct iwm_lmac_power_report { + u8 pa_status; + u8 pa_integ_res_A[3]; + u8 pa_integ_res_B[3]; + u8 pa_integ_res_C[3]; +} __attribute__ ((packed)); + +struct iwm_lmac_tx_resp { + u8 frame_cnt; /* 1-no aggregation, greater then 1 - aggregation */ + u8 bt_kill_cnt; + __le16 retry_cnt; + __le32 initial_tx_rate; + __le16 wireless_media_time; + struct iwm_lmac_power_report power_report; + __le32 tfd_info; + __le16 seq_ctl; + __le16 byte_cnt; + u8 tlc_rate_info; + u8 ra_tid; + __le16 frame_ctl; + __le32 status; +} __attribute__ ((packed)); + +#endif diff --git a/drivers/net/wireless/iwmc3200wifi/main.c b/drivers/net/wireless/iwmc3200wifi/main.c new file mode 100644 index 0000000..7f34d6d --- /dev/null +++ b/drivers/net/wireless/iwmc3200wifi/main.c @@ -0,0 +1,842 @@ +/* + * Intel Wireless Multicomm 3200 WiFi driver + * + * Copyright (C) 2009 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Intel Corporation + * Samuel Ortiz + * Zhu Yi + * + */ + +#include +#include +#include +#include +#include + +#include "iwm.h" +#include "debug.h" +#include "bus.h" +#include "umac.h" +#include "commands.h" +#include "hal.h" +#include "fw.h" +#include "rx.h" + +static struct iwm_conf def_iwm_conf = { + + .sdio_ior_timeout = 5000, + .calib_map = BIT(CALIB_CFG_DC_IDX) | + BIT(CALIB_CFG_LO_IDX) | + BIT(CALIB_CFG_TX_IQ_IDX) | + BIT(CALIB_CFG_RX_IQ_IDX) | + BIT(SHILOH_PHY_CALIBRATE_BASE_BAND_CMD), + .expected_calib_map = BIT(PHY_CALIBRATE_DC_CMD) | + BIT(PHY_CALIBRATE_LO_CMD) | + BIT(PHY_CALIBRATE_TX_IQ_CMD) | + BIT(PHY_CALIBRATE_RX_IQ_CMD) | + BIT(SHILOH_PHY_CALIBRATE_BASE_BAND_CMD), + .ct_kill_entry = 110, + .ct_kill_exit = 110, + .reset_on_fatal_err = 1, + .auto_connect = 1, + .enable_qos = 1, + .mode = UMAC_MODE_BSS, + + /* UMAC configuration */ + .power_index = 0, + .frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD, + .rts_threshold = IEEE80211_MAX_RTS_THRESHOLD, + .cts_to_self = 0, + + .assoc_timeout = 2, + .roam_timeout = 10, + .wireless_mode = WIRELESS_MODE_11A | WIRELESS_MODE_11G | + WIRELESS_MODE_11N, + + /* IBSS */ + .ibss_band = UMAC_BAND_2GHZ, + .ibss_channel = 1, + + .mac_addr = {0x00, 0x02, 0xb3, 0x01, 0x02, 0x03}, +}; + +static int modparam_reset; +module_param_named(reset, modparam_reset, bool, 0644); +MODULE_PARM_DESC(reset, "reset on firmware errors (default 0 [not reset])"); + +static int modparam_wimax_enable = 1; +module_param_named(wimax_enable, modparam_wimax_enable, bool, 0644); +MODULE_PARM_DESC(wimax_enable, "Enable wimax core (default 1 [wimax enabled])"); + +int iwm_mode_to_nl80211_iftype(int mode) +{ + switch (mode) { + case UMAC_MODE_BSS: + return NL80211_IFTYPE_STATION; + case UMAC_MODE_IBSS: + return NL80211_IFTYPE_ADHOC; + default: + return NL80211_IFTYPE_UNSPECIFIED; + } + + return 0; +} + +static void iwm_statistics_request(struct work_struct *work) +{ + struct iwm_priv *iwm = + container_of(work, struct iwm_priv, stats_request.work); + + iwm_send_umac_stats_req(iwm, 0); +} + +static void iwm_disconnect_work(struct work_struct *work) +{ + struct iwm_priv *iwm = + container_of(work, struct iwm_priv, disconnect.work); + + if (iwm->umac_profile_active) + iwm_invalidate_mlme_profile(iwm); + + clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status); + iwm->umac_profile_active = 0; + memset(iwm->bssid, 0, ETH_ALEN); + iwm->channel = 0; + + iwm_link_off(iwm); + + wake_up_interruptible(&iwm->mlme_queue); + + cfg80211_disconnected(iwm_to_ndev(iwm), 0, NULL, 0, GFP_KERNEL); +} + +static void iwm_ct_kill_work(struct work_struct *work) +{ + struct iwm_priv *iwm = + container_of(work, struct iwm_priv, ct_kill_delay.work); + struct wiphy *wiphy = iwm_to_wiphy(iwm); + + IWM_INFO(iwm, "CT kill delay timeout\n"); + + wiphy_rfkill_set_hw_state(wiphy, false); +} + +static int __iwm_up(struct iwm_priv *iwm); +static int __iwm_down(struct iwm_priv *iwm); + +static void iwm_reset_worker(struct work_struct *work) +{ + struct iwm_priv *iwm; + struct iwm_umac_profile *profile = NULL; + int uninitialized_var(ret), retry = 0; + + iwm = container_of(work, struct iwm_priv, reset_worker); + + /* + * XXX: The iwm->mutex is introduced purely for this reset work, + * because the other users for iwm_up and iwm_down are only netdev + * ndo_open and ndo_stop which are already protected by rtnl. + * Please remove iwm->mutex together if iwm_reset_worker() is not + * required in the future. + */ + if (!mutex_trylock(&iwm->mutex)) { + IWM_WARN(iwm, "We are in the middle of interface bringing " + "UP/DOWN. Skip driver resetting.\n"); + return; + } + + if (iwm->umac_profile_active) { + profile = kmalloc(sizeof(struct iwm_umac_profile), GFP_KERNEL); + if (profile) + memcpy(profile, iwm->umac_profile, sizeof(*profile)); + else + IWM_ERR(iwm, "Couldn't alloc memory for profile\n"); + } + + __iwm_down(iwm); + + while (retry++ < 3) { + ret = __iwm_up(iwm); + if (!ret) + break; + + schedule_timeout_uninterruptible(10 * HZ); + } + + if (ret) { + IWM_WARN(iwm, "iwm_up() failed: %d\n", ret); + + kfree(profile); + goto out; + } + + if (profile) { + IWM_DBG_MLME(iwm, DBG, "Resend UMAC profile\n"); + memcpy(iwm->umac_profile, profile, sizeof(*profile)); + iwm_send_mlme_profile(iwm); + kfree(profile); + } else + clear_bit(IWM_STATUS_RESETTING, &iwm->status); + + out: + mutex_unlock(&iwm->mutex); +} + +static void iwm_auth_retry_worker(struct work_struct *work) +{ + struct iwm_priv *iwm; + int i, ret; + + iwm = container_of(work, struct iwm_priv, auth_retry_worker); + if (iwm->umac_profile_active) { + ret = iwm_invalidate_mlme_profile(iwm); + if (ret < 0) + return; + } + + iwm->umac_profile->sec.auth_type = UMAC_AUTH_TYPE_LEGACY_PSK; + + ret = iwm_send_mlme_profile(iwm); + if (ret < 0) + return; + + for (i = 0; i < IWM_NUM_KEYS; i++) + if (iwm->keys[i].key_len) + iwm_set_key(iwm, 0, &iwm->keys[i]); + + iwm_set_tx_key(iwm, iwm->default_key); +} + + + +static void iwm_watchdog(unsigned long data) +{ + struct iwm_priv *iwm = (struct iwm_priv *)data; + + IWM_WARN(iwm, "Watchdog expired: UMAC stalls!\n"); + + if (modparam_reset) + iwm_resetting(iwm); +} + +int iwm_priv_init(struct iwm_priv *iwm) +{ + int i, j; + char name[32]; + + iwm->status = 0; + INIT_LIST_HEAD(&iwm->pending_notif); + init_waitqueue_head(&iwm->notif_queue); + init_waitqueue_head(&iwm->nonwifi_queue); + init_waitqueue_head(&iwm->wifi_ntfy_queue); + init_waitqueue_head(&iwm->mlme_queue); + memcpy(&iwm->conf, &def_iwm_conf, sizeof(struct iwm_conf)); + spin_lock_init(&iwm->tx_credit.lock); + INIT_LIST_HEAD(&iwm->wifi_pending_cmd); + INIT_LIST_HEAD(&iwm->nonwifi_pending_cmd); + iwm->wifi_seq_num = UMAC_WIFI_SEQ_NUM_BASE; + iwm->nonwifi_seq_num = UMAC_NONWIFI_SEQ_NUM_BASE; + spin_lock_init(&iwm->cmd_lock); + iwm->scan_id = 1; + INIT_DELAYED_WORK(&iwm->stats_request, iwm_statistics_request); + INIT_DELAYED_WORK(&iwm->disconnect, iwm_disconnect_work); + INIT_DELAYED_WORK(&iwm->ct_kill_delay, iwm_ct_kill_work); + INIT_WORK(&iwm->reset_worker, iwm_reset_worker); + INIT_WORK(&iwm->auth_retry_worker, iwm_auth_retry_worker); + INIT_LIST_HEAD(&iwm->bss_list); + + skb_queue_head_init(&iwm->rx_list); + INIT_LIST_HEAD(&iwm->rx_tickets); + for (i = 0; i < IWM_RX_ID_HASH; i++) + INIT_LIST_HEAD(&iwm->rx_packets[i]); + + INIT_WORK(&iwm->rx_worker, iwm_rx_worker); + + iwm->rx_wq = create_singlethread_workqueue(KBUILD_MODNAME "_rx"); + if (!iwm->rx_wq) + return -EAGAIN; + + for (i = 0; i < IWM_TX_QUEUES; i++) { + INIT_WORK(&iwm->txq[i].worker, iwm_tx_worker); + snprintf(name, 32, KBUILD_MODNAME "_tx_%d", i); + iwm->txq[i].id = i; + iwm->txq[i].wq = create_singlethread_workqueue(name); + if (!iwm->txq[i].wq) + return -EAGAIN; + + skb_queue_head_init(&iwm->txq[i].queue); + skb_queue_head_init(&iwm->txq[i].stopped_queue); + spin_lock_init(&iwm->txq[i].lock); + } + + for (i = 0; i < IWM_NUM_KEYS; i++) + memset(&iwm->keys[i], 0, sizeof(struct iwm_key)); + + iwm->default_key = -1; + + for (i = 0; i < IWM_STA_TABLE_NUM; i++) + for (j = 0; j < IWM_UMAC_TID_NR; j++) { + mutex_init(&iwm->sta_table[i].tid_info[j].mutex); + iwm->sta_table[i].tid_info[j].stopped = false; + } + + init_timer(&iwm->watchdog); + iwm->watchdog.function = iwm_watchdog; + iwm->watchdog.data = (unsigned long)iwm; + mutex_init(&iwm->mutex); + + iwm->last_fw_err = kzalloc(sizeof(struct iwm_fw_error_hdr), + GFP_KERNEL); + if (iwm->last_fw_err == NULL) + return -ENOMEM; + + return 0; +} + +void iwm_priv_deinit(struct iwm_priv *iwm) +{ + int i; + + for (i = 0; i < IWM_TX_QUEUES; i++) + destroy_workqueue(iwm->txq[i].wq); + + destroy_workqueue(iwm->rx_wq); + kfree(iwm->last_fw_err); +} + +/* + * We reset all the structures, and we reset the UMAC. + * After calling this routine, you're expected to reload + * the firmware. + */ +void iwm_reset(struct iwm_priv *iwm) +{ + struct iwm_notif *notif, *next; + + if (test_bit(IWM_STATUS_READY, &iwm->status)) + iwm_target_reset(iwm); + + if (test_bit(IWM_STATUS_RESETTING, &iwm->status)) { + iwm->status = 0; + set_bit(IWM_STATUS_RESETTING, &iwm->status); + } else + iwm->status = 0; + iwm->scan_id = 1; + + list_for_each_entry_safe(notif, next, &iwm->pending_notif, pending) { + list_del(¬if->pending); + kfree(notif->buf); + kfree(notif); + } + + iwm_cmd_flush(iwm); + + flush_workqueue(iwm->rx_wq); + + iwm_link_off(iwm); +} + +void iwm_resetting(struct iwm_priv *iwm) +{ + set_bit(IWM_STATUS_RESETTING, &iwm->status); + + schedule_work(&iwm->reset_worker); +} + +/* + * Notification code: + * + * We're faced with the following issue: Any host command can + * have an answer or not, and if there's an answer to expect, + * it can be treated synchronously or asynchronously. + * To work around the synchronous answer case, we implemented + * our notification mechanism. + * When a code path needs to wait for a command response + * synchronously, it calls notif_handle(), which waits for the + * right notification to show up, and then process it. Before + * starting to wait, it registered as a waiter for this specific + * answer (by toggling a bit in on of the handler_map), so that + * the rx code knows that it needs to send a notification to the + * waiting processes. It does so by calling iwm_notif_send(), + * which adds the notification to the pending notifications list, + * and then wakes the waiting processes up. + */ +int iwm_notif_send(struct iwm_priv *iwm, struct iwm_wifi_cmd *cmd, + u8 cmd_id, u8 source, u8 *buf, unsigned long buf_size) +{ + struct iwm_notif *notif; + + notif = kzalloc(sizeof(struct iwm_notif), GFP_KERNEL); + if (!notif) { + IWM_ERR(iwm, "Couldn't alloc memory for notification\n"); + return -ENOMEM; + } + + INIT_LIST_HEAD(¬if->pending); + notif->cmd = cmd; + notif->cmd_id = cmd_id; + notif->src = source; + notif->buf = kzalloc(buf_size, GFP_KERNEL); + if (!notif->buf) { + IWM_ERR(iwm, "Couldn't alloc notification buffer\n"); + kfree(notif); + return -ENOMEM; + } + notif->buf_size = buf_size; + memcpy(notif->buf, buf, buf_size); + list_add_tail(¬if->pending, &iwm->pending_notif); + + wake_up_interruptible(&iwm->notif_queue); + + return 0; +} + +static struct iwm_notif *iwm_notif_find(struct iwm_priv *iwm, u32 cmd, + u8 source) +{ + struct iwm_notif *notif, *next; + + list_for_each_entry_safe(notif, next, &iwm->pending_notif, pending) { + if ((notif->cmd_id == cmd) && (notif->src == source)) { + list_del(¬if->pending); + return notif; + } + } + + return NULL; +} + +static struct iwm_notif *iwm_notif_wait(struct iwm_priv *iwm, u32 cmd, + u8 source, long timeout) +{ + int ret; + struct iwm_notif *notif; + unsigned long *map = NULL; + + switch (source) { + case IWM_SRC_LMAC: + map = &iwm->lmac_handler_map[0]; + break; + case IWM_SRC_UMAC: + map = &iwm->umac_handler_map[0]; + break; + case IWM_SRC_UDMA: + map = &iwm->udma_handler_map[0]; + break; + } + + set_bit(cmd, map); + + ret = wait_event_interruptible_timeout(iwm->notif_queue, + ((notif = iwm_notif_find(iwm, cmd, source)) != NULL), + timeout); + clear_bit(cmd, map); + + if (!ret) + return NULL; + + return notif; +} + +int iwm_notif_handle(struct iwm_priv *iwm, u32 cmd, u8 source, long timeout) +{ + int ret; + struct iwm_notif *notif; + + notif = iwm_notif_wait(iwm, cmd, source, timeout); + if (!notif) + return -ETIME; + + ret = iwm_rx_handle_resp(iwm, notif->buf, notif->buf_size, notif->cmd); + kfree(notif->buf); + kfree(notif); + + return ret; +} + +static int iwm_config_boot_params(struct iwm_priv *iwm) +{ + struct iwm_udma_nonwifi_cmd target_cmd; + int ret; + + /* check Wimax is off and config debug monitor */ + if (!modparam_wimax_enable) { + u32 data1 = 0x1f; + u32 addr1 = 0x606BE258; + + u32 data2_set = 0x0; + u32 data2_clr = 0x1; + u32 addr2 = 0x606BE100; + + u32 data3 = 0x1; + u32 addr3 = 0x606BEC00; + + target_cmd.resp = 0; + target_cmd.handle_by_hw = 0; + target_cmd.eop = 1; + + target_cmd.opcode = UMAC_HDI_OUT_OPCODE_WRITE; + target_cmd.addr = cpu_to_le32(addr1); + target_cmd.op1_sz = cpu_to_le32(sizeof(u32)); + target_cmd.op2 = 0; + + ret = iwm_hal_send_target_cmd(iwm, &target_cmd, &data1); + if (ret < 0) { + IWM_ERR(iwm, "iwm_hal_send_target_cmd failed\n"); + return ret; + } + + target_cmd.opcode = UMAC_HDI_OUT_OPCODE_READ_MODIFY_WRITE; + target_cmd.addr = cpu_to_le32(addr2); + target_cmd.op1_sz = cpu_to_le32(data2_set); + target_cmd.op2 = cpu_to_le32(data2_clr); + + ret = iwm_hal_send_target_cmd(iwm, &target_cmd, &data1); + if (ret < 0) { + IWM_ERR(iwm, "iwm_hal_send_target_cmd failed\n"); + return ret; + } + + target_cmd.opcode = UMAC_HDI_OUT_OPCODE_WRITE; + target_cmd.addr = cpu_to_le32(addr3); + target_cmd.op1_sz = cpu_to_le32(sizeof(u32)); + target_cmd.op2 = 0; + + ret = iwm_hal_send_target_cmd(iwm, &target_cmd, &data3); + if (ret < 0) { + IWM_ERR(iwm, "iwm_hal_send_target_cmd failed\n"); + return ret; + } + } + + return 0; +} + +void iwm_init_default_profile(struct iwm_priv *iwm, + struct iwm_umac_profile *profile) +{ + memset(profile, 0, sizeof(struct iwm_umac_profile)); + + profile->sec.auth_type = UMAC_AUTH_TYPE_OPEN; + profile->sec.flags = UMAC_SEC_FLG_LEGACY_PROFILE; + profile->sec.ucast_cipher = UMAC_CIPHER_TYPE_NONE; + profile->sec.mcast_cipher = UMAC_CIPHER_TYPE_NONE; + + if (iwm->conf.enable_qos) + profile->flags |= cpu_to_le16(UMAC_PROFILE_QOS_ALLOWED); + + profile->wireless_mode = iwm->conf.wireless_mode; + profile->mode = cpu_to_le32(iwm->conf.mode); + + profile->ibss.atim = 0; + profile->ibss.beacon_interval = 100; + profile->ibss.join_only = 0; + profile->ibss.band = iwm->conf.ibss_band; + profile->ibss.channel = iwm->conf.ibss_channel; +} + +void iwm_link_on(struct iwm_priv *iwm) +{ + netif_carrier_on(iwm_to_ndev(iwm)); + netif_tx_wake_all_queues(iwm_to_ndev(iwm)); + + iwm_send_umac_stats_req(iwm, 0); +} + +void iwm_link_off(struct iwm_priv *iwm) +{ + struct iw_statistics *wstats = &iwm->wstats; + int i; + + netif_tx_stop_all_queues(iwm_to_ndev(iwm)); + netif_carrier_off(iwm_to_ndev(iwm)); + + for (i = 0; i < IWM_TX_QUEUES; i++) { + skb_queue_purge(&iwm->txq[i].queue); + skb_queue_purge(&iwm->txq[i].stopped_queue); + + iwm->txq[i].concat_count = 0; + iwm->txq[i].concat_ptr = iwm->txq[i].concat_buf; + + flush_workqueue(iwm->txq[i].wq); + } + + iwm_rx_free(iwm); + + cancel_delayed_work_sync(&iwm->stats_request); + memset(wstats, 0, sizeof(struct iw_statistics)); + wstats->qual.updated = IW_QUAL_ALL_INVALID; + + kfree(iwm->req_ie); + iwm->req_ie = NULL; + iwm->req_ie_len = 0; + kfree(iwm->resp_ie); + iwm->resp_ie = NULL; + iwm->resp_ie_len = 0; + + del_timer_sync(&iwm->watchdog); +} + +static void iwm_bss_list_clean(struct iwm_priv *iwm) +{ + struct iwm_bss_info *bss, *next; + + list_for_each_entry_safe(bss, next, &iwm->bss_list, node) { + list_del(&bss->node); + kfree(bss->bss); + kfree(bss); + } +} + +static int iwm_channels_init(struct iwm_priv *iwm) +{ + int ret; + + ret = iwm_send_umac_channel_list(iwm); + if (ret) { + IWM_ERR(iwm, "Send channel list failed\n"); + return ret; + } + + ret = iwm_notif_handle(iwm, UMAC_CMD_OPCODE_GET_CHAN_INFO_LIST, + IWM_SRC_UMAC, WAIT_NOTIF_TIMEOUT); + if (ret) { + IWM_ERR(iwm, "Didn't get a channel list notification\n"); + return ret; + } + + return 0; +} + +static int __iwm_up(struct iwm_priv *iwm) +{ + int ret; + struct iwm_notif *notif_reboot, *notif_ack = NULL; + struct wiphy *wiphy = iwm_to_wiphy(iwm); + u32 wireless_mode; + + ret = iwm_bus_enable(iwm); + if (ret) { + IWM_ERR(iwm, "Couldn't enable function\n"); + return ret; + } + + iwm_rx_setup_handlers(iwm); + + /* Wait for initial BARKER_REBOOT from hardware */ + notif_reboot = iwm_notif_wait(iwm, IWM_BARKER_REBOOT_NOTIFICATION, + IWM_SRC_UDMA, 2 * HZ); + if (!notif_reboot) { + IWM_ERR(iwm, "Wait for REBOOT_BARKER timeout\n"); + goto err_disable; + } + + /* We send the barker back */ + ret = iwm_bus_send_chunk(iwm, notif_reboot->buf, 16); + if (ret) { + IWM_ERR(iwm, "REBOOT barker response failed\n"); + kfree(notif_reboot); + goto err_disable; + } + + kfree(notif_reboot->buf); + kfree(notif_reboot); + + /* Wait for ACK_BARKER from hardware */ + notif_ack = iwm_notif_wait(iwm, IWM_ACK_BARKER_NOTIFICATION, + IWM_SRC_UDMA, 2 * HZ); + if (!notif_ack) { + IWM_ERR(iwm, "Wait for ACK_BARKER timeout\n"); + goto err_disable; + } + + kfree(notif_ack->buf); + kfree(notif_ack); + + /* We start to config static boot parameters */ + ret = iwm_config_boot_params(iwm); + if (ret) { + IWM_ERR(iwm, "Config boot parameters failed\n"); + goto err_disable; + } + + ret = iwm_read_mac(iwm, iwm_to_ndev(iwm)->dev_addr); + if (ret) { + IWM_ERR(iwm, "MAC reading failed\n"); + goto err_disable; + } + memcpy(iwm_to_ndev(iwm)->perm_addr, iwm_to_ndev(iwm)->dev_addr, + ETH_ALEN); + + /* We can load the FWs */ + ret = iwm_load_fw(iwm); + if (ret) { + IWM_ERR(iwm, "FW loading failed\n"); + goto err_disable; + } + + ret = iwm_eeprom_fat_channels(iwm); + if (ret) { + IWM_ERR(iwm, "Couldnt read HT channels EEPROM entries\n"); + goto err_fw; + } + + /* + * Read our SKU capabilities. + * If it's valid, we AND the configured wireless mode with the + * device EEPROM value as the current profile wireless mode. + */ + wireless_mode = iwm_eeprom_wireless_mode(iwm); + if (wireless_mode) { + iwm->conf.wireless_mode &= wireless_mode; + if (iwm->umac_profile) + iwm->umac_profile->wireless_mode = + iwm->conf.wireless_mode; + } else + IWM_ERR(iwm, "Wrong SKU capabilities: 0x%x\n", + *((u16 *)iwm_eeprom_access(iwm, IWM_EEPROM_SKU_CAP))); + + snprintf(wiphy->fw_version, sizeof(wiphy->fw_version), "L%s_U%s", + iwm->lmac_version, iwm->umac_version); + + /* We configure the UMAC and enable the wifi module */ + ret = iwm_send_umac_config(iwm, + cpu_to_le32(UMAC_RST_CTRL_FLG_WIFI_CORE_EN) | + cpu_to_le32(UMAC_RST_CTRL_FLG_WIFI_LINK_EN) | + cpu_to_le32(UMAC_RST_CTRL_FLG_WIFI_MLME_EN)); + if (ret) { + IWM_ERR(iwm, "UMAC config failed\n"); + goto err_fw; + } + + ret = iwm_notif_handle(iwm, UMAC_NOTIFY_OPCODE_WIFI_CORE_STATUS, + IWM_SRC_UMAC, WAIT_NOTIF_TIMEOUT); + if (ret) { + IWM_ERR(iwm, "Didn't get a wifi core status notification\n"); + goto err_fw; + } + + if (iwm->core_enabled != (UMAC_NTFY_WIFI_CORE_STATUS_LINK_EN | + UMAC_NTFY_WIFI_CORE_STATUS_MLME_EN)) { + IWM_DBG_BOOT(iwm, DBG, "Not all cores enabled:0x%x\n", + iwm->core_enabled); + ret = iwm_notif_handle(iwm, UMAC_NOTIFY_OPCODE_WIFI_CORE_STATUS, + IWM_SRC_UMAC, WAIT_NOTIF_TIMEOUT); + if (ret) { + IWM_ERR(iwm, "Didn't get a core status notification\n"); + goto err_fw; + } + + if (iwm->core_enabled != (UMAC_NTFY_WIFI_CORE_STATUS_LINK_EN | + UMAC_NTFY_WIFI_CORE_STATUS_MLME_EN)) { + IWM_ERR(iwm, "Not all cores enabled: 0x%x\n", + iwm->core_enabled); + goto err_fw; + } else { + IWM_INFO(iwm, "All cores enabled\n"); + } + } + + ret = iwm_channels_init(iwm); + if (ret < 0) { + IWM_ERR(iwm, "Couldn't init channels\n"); + goto err_fw; + } + + /* Set the READY bit to indicate interface is brought up successfully */ + set_bit(IWM_STATUS_READY, &iwm->status); + + return 0; + + err_fw: + iwm_eeprom_exit(iwm); + + err_disable: + ret = iwm_bus_disable(iwm); + if (ret < 0) + IWM_ERR(iwm, "Couldn't disable function\n"); + + return -EIO; +} + +int iwm_up(struct iwm_priv *iwm) +{ + int ret; + + mutex_lock(&iwm->mutex); + ret = __iwm_up(iwm); + mutex_unlock(&iwm->mutex); + + return ret; +} + +static int __iwm_down(struct iwm_priv *iwm) +{ + int ret; + + /* The interface is already down */ + if (!test_bit(IWM_STATUS_READY, &iwm->status)) + return 0; + + if (iwm->scan_request) { + cfg80211_scan_done(iwm->scan_request, true); + iwm->scan_request = NULL; + } + + clear_bit(IWM_STATUS_READY, &iwm->status); + + iwm_eeprom_exit(iwm); + iwm_bss_list_clean(iwm); + iwm_init_default_profile(iwm, iwm->umac_profile); + iwm->umac_profile_active = false; + iwm->default_key = -1; + iwm->core_enabled = 0; + + ret = iwm_bus_disable(iwm); + if (ret < 0) { + IWM_ERR(iwm, "Couldn't disable function\n"); + return ret; + } + + return 0; +} + +int iwm_down(struct iwm_priv *iwm) +{ + int ret; + + mutex_lock(&iwm->mutex); + ret = __iwm_down(iwm); + mutex_unlock(&iwm->mutex); + + return ret; +} diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c new file mode 100644 index 0000000..c4c0d23 --- /dev/null +++ b/drivers/net/wireless/iwmc3200wifi/netdev.c @@ -0,0 +1,188 @@ +/* + * Intel Wireless Multicomm 3200 WiFi driver + * + * Copyright (C) 2009 Intel Corporation + * Samuel Ortiz + * Zhu Yi + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +/* + * This is the netdev related hooks for iwm. + * + * Some interesting code paths: + * + * iwm_open() (Called at netdev interface bringup time) + * -> iwm_up() (main.c) + * -> iwm_bus_enable() + * -> if_sdio_enable() (In case of an SDIO bus) + * -> sdio_enable_func() + * -> iwm_notif_wait(BARKER_REBOOT) (wait for reboot barker) + * -> iwm_notif_wait(ACK_BARKER) (wait for ACK barker) + * -> iwm_load_fw() (fw.c) + * -> iwm_load_umac() + * -> iwm_load_lmac() (Calibration LMAC) + * -> iwm_load_lmac() (Operational LMAC) + * -> iwm_send_umac_config() + * + * iwm_stop() (Called at netdev interface bringdown time) + * -> iwm_down() + * -> iwm_bus_disable() + * -> if_sdio_disable() (In case of an SDIO bus) + * -> sdio_disable_func() + */ +#include + +#include "iwm.h" +#include "commands.h" +#include "cfg80211.h" +#include "debug.h" + +static int iwm_open(struct net_device *ndev) +{ + struct iwm_priv *iwm = ndev_to_iwm(ndev); + + return iwm_up(iwm); +} + +static int iwm_stop(struct net_device *ndev) +{ + struct iwm_priv *iwm = ndev_to_iwm(ndev); + + return iwm_down(iwm); +} + +/* + * iwm AC to queue mapping + * + * AC_VO -> queue 3 + * AC_VI -> queue 2 + * AC_BE -> queue 1 + * AC_BK -> queue 0 + */ +static const u16 iwm_1d_to_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 }; + +int iwm_tid_to_queue(u16 tid) +{ + if (tid > IWM_UMAC_TID_NR - 2) + return -EINVAL; + + return iwm_1d_to_queue[tid]; +} + +static u16 iwm_select_queue(struct net_device *dev, struct sk_buff *skb) +{ + skb->priority = cfg80211_classify8021d(skb); + + return iwm_1d_to_queue[skb->priority]; +} + +static const struct net_device_ops iwm_netdev_ops = { + .ndo_open = iwm_open, + .ndo_stop = iwm_stop, + .ndo_start_xmit = iwm_xmit_frame, + .ndo_select_queue = iwm_select_queue, +}; + +void *iwm_if_alloc(int sizeof_bus, struct device *dev, + struct iwm_if_ops *if_ops) +{ + struct net_device *ndev; + struct wireless_dev *wdev; + struct iwm_priv *iwm; + int ret = 0; + + wdev = iwm_wdev_alloc(sizeof_bus, dev); + if (IS_ERR(wdev)) + return wdev; + + iwm = wdev_to_iwm(wdev); + iwm->bus_ops = if_ops; + iwm->wdev = wdev; + + ret = iwm_priv_init(iwm); + if (ret) { + dev_err(dev, "failed to init iwm_priv\n"); + goto out_wdev; + } + + wdev->iftype = iwm_mode_to_nl80211_iftype(iwm->conf.mode); + + ndev = alloc_netdev_mq(0, "wlan%d", ether_setup, IWM_TX_QUEUES); + if (!ndev) { + dev_err(dev, "no memory for network device instance\n"); + goto out_priv; + } + + ndev->netdev_ops = &iwm_netdev_ops; + ndev->ieee80211_ptr = wdev; + SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy)); + wdev->netdev = ndev; + + iwm->umac_profile = kmalloc(sizeof(struct iwm_umac_profile), + GFP_KERNEL); + if (!iwm->umac_profile) { + dev_err(dev, "Couldn't alloc memory for profile\n"); + goto out_profile; + } + + iwm_init_default_profile(iwm, iwm->umac_profile); + + return iwm; + + out_profile: + free_netdev(ndev); + + out_priv: + iwm_priv_deinit(iwm); + + out_wdev: + iwm_wdev_free(iwm); + return ERR_PTR(ret); +} + +void iwm_if_free(struct iwm_priv *iwm) +{ + if (!iwm_to_ndev(iwm)) + return; + + cancel_delayed_work_sync(&iwm->ct_kill_delay); + free_netdev(iwm_to_ndev(iwm)); + iwm_priv_deinit(iwm); + kfree(iwm->umac_profile); + iwm->umac_profile = NULL; + iwm_wdev_free(iwm); +} + +int iwm_if_add(struct iwm_priv *iwm) +{ + struct net_device *ndev = iwm_to_ndev(iwm); + int ret; + + ret = register_netdev(ndev); + if (ret < 0) { + dev_err(&ndev->dev, "Failed to register netdev: %d\n", ret); + return ret; + } + + return 0; +} + +void iwm_if_remove(struct iwm_priv *iwm) +{ + unregister_netdev(iwm_to_ndev(iwm)); +} diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c new file mode 100644 index 0000000..8456b4d --- /dev/null +++ b/drivers/net/wireless/iwmc3200wifi/rx.c @@ -0,0 +1,1666 @@ +/* + * Intel Wireless Multicomm 3200 WiFi driver + * + * Copyright (C) 2009 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Intel Corporation + * Samuel Ortiz + * Zhu Yi + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "iwm.h" +#include "debug.h" +#include "hal.h" +#include "umac.h" +#include "lmac.h" +#include "commands.h" +#include "rx.h" +#include "cfg80211.h" +#include "eeprom.h" + +static int iwm_rx_check_udma_hdr(struct iwm_udma_in_hdr *hdr) +{ + if ((le32_to_cpu(hdr->cmd) == UMAC_PAD_TERMINAL) || + (le32_to_cpu(hdr->size) == UMAC_PAD_TERMINAL)) + return -EINVAL; + + return 0; +} + +static inline int iwm_rx_resp_size(struct iwm_udma_in_hdr *hdr) +{ + return ALIGN(le32_to_cpu(hdr->size) + sizeof(struct iwm_udma_in_hdr), + 16); +} + +/* + * Notification handlers: + * + * For every possible notification we can receive from the + * target, we have a handler. + * When we get a target notification, and there is no one + * waiting for it, it's just processed through the rx code + * path: + * + * iwm_rx_handle() + * -> iwm_rx_handle_umac() + * -> iwm_rx_handle_wifi() + * -> iwm_rx_handle_resp() + * -> iwm_ntf_*() + * + * OR + * + * -> iwm_rx_handle_non_wifi() + * + * If there are processes waiting for this notification, then + * iwm_rx_handle_wifi() just wakes those processes up and they + * grab the pending notification. + */ +static int iwm_ntf_error(struct iwm_priv *iwm, u8 *buf, + unsigned long buf_size, struct iwm_wifi_cmd *cmd) +{ + struct iwm_umac_notif_error *error; + struct iwm_fw_error_hdr *fw_err; + + error = (struct iwm_umac_notif_error *)buf; + fw_err = &error->err; + + memcpy(iwm->last_fw_err, fw_err, sizeof(struct iwm_fw_error_hdr)); + + IWM_ERR(iwm, "%cMAC FW ERROR:\n", + (le32_to_cpu(fw_err->category) == UMAC_SYS_ERR_CAT_LMAC) ? 'L' : 'U'); + IWM_ERR(iwm, "\tCategory: %d\n", le32_to_cpu(fw_err->category)); + IWM_ERR(iwm, "\tStatus: 0x%x\n", le32_to_cpu(fw_err->status)); + IWM_ERR(iwm, "\tPC: 0x%x\n", le32_to_cpu(fw_err->pc)); + IWM_ERR(iwm, "\tblink1: %d\n", le32_to_cpu(fw_err->blink1)); + IWM_ERR(iwm, "\tblink2: %d\n", le32_to_cpu(fw_err->blink2)); + IWM_ERR(iwm, "\tilink1: %d\n", le32_to_cpu(fw_err->ilink1)); + IWM_ERR(iwm, "\tilink2: %d\n", le32_to_cpu(fw_err->ilink2)); + IWM_ERR(iwm, "\tData1: 0x%x\n", le32_to_cpu(fw_err->data1)); + IWM_ERR(iwm, "\tData2: 0x%x\n", le32_to_cpu(fw_err->data2)); + IWM_ERR(iwm, "\tLine number: %d\n", le32_to_cpu(fw_err->line_num)); + IWM_ERR(iwm, "\tUMAC status: 0x%x\n", le32_to_cpu(fw_err->umac_status)); + IWM_ERR(iwm, "\tLMAC status: 0x%x\n", le32_to_cpu(fw_err->lmac_status)); + IWM_ERR(iwm, "\tSDIO status: 0x%x\n", le32_to_cpu(fw_err->sdio_status)); + + iwm_resetting(iwm); + + return 0; +} + +static int iwm_ntf_umac_alive(struct iwm_priv *iwm, u8 *buf, + unsigned long buf_size, struct iwm_wifi_cmd *cmd) +{ + struct iwm_umac_notif_alive *alive_resp = + (struct iwm_umac_notif_alive *)(buf); + u16 status = le16_to_cpu(alive_resp->status); + + if (status == UMAC_NTFY_ALIVE_STATUS_ERR) { + IWM_ERR(iwm, "Receive error UMAC_ALIVE\n"); + return -EIO; + } + + iwm_tx_credit_init_pools(iwm, alive_resp); + + return 0; +} + +static int iwm_ntf_init_complete(struct iwm_priv *iwm, u8 *buf, + unsigned long buf_size, + struct iwm_wifi_cmd *cmd) +{ + struct wiphy *wiphy = iwm_to_wiphy(iwm); + struct iwm_umac_notif_init_complete *init_complete = + (struct iwm_umac_notif_init_complete *)(buf); + u16 status = le16_to_cpu(init_complete->status); + bool blocked = (status == UMAC_NTFY_INIT_COMPLETE_STATUS_ERR); + + if (blocked) + IWM_DBG_NTF(iwm, DBG, "Hardware rf kill is on (radio off)\n"); + else + IWM_DBG_NTF(iwm, DBG, "Hardware rf kill is off (radio on)\n"); + + wiphy_rfkill_set_hw_state(wiphy, blocked); + + return 0; +} + +static int iwm_ntf_tx_credit_update(struct iwm_priv *iwm, u8 *buf, + unsigned long buf_size, + struct iwm_wifi_cmd *cmd) +{ + int pool_nr, total_freed_pages; + unsigned long pool_map; + int i, id; + struct iwm_umac_notif_page_dealloc *dealloc = + (struct iwm_umac_notif_page_dealloc *)buf; + + pool_nr = GET_VAL32(dealloc->changes, UMAC_DEALLOC_NTFY_CHANGES_CNT); + pool_map = GET_VAL32(dealloc->changes, UMAC_DEALLOC_NTFY_CHANGES_MSK); + + IWM_DBG_TX(iwm, DBG, "UMAC dealloc notification: pool nr %d, " + "update map 0x%lx\n", pool_nr, pool_map); + + spin_lock(&iwm->tx_credit.lock); + + for (i = 0; i < pool_nr; i++) { + id = GET_VAL32(dealloc->grp_info[i], + UMAC_DEALLOC_NTFY_GROUP_NUM); + if (test_bit(id, &pool_map)) { + total_freed_pages = GET_VAL32(dealloc->grp_info[i], + UMAC_DEALLOC_NTFY_PAGE_CNT); + iwm_tx_credit_inc(iwm, id, total_freed_pages); + } + } + + spin_unlock(&iwm->tx_credit.lock); + + return 0; +} + +static int iwm_ntf_umac_reset(struct iwm_priv *iwm, u8 *buf, + unsigned long buf_size, struct iwm_wifi_cmd *cmd) +{ + IWM_DBG_NTF(iwm, DBG, "UMAC RESET done\n"); + + return 0; +} + +static int iwm_ntf_lmac_version(struct iwm_priv *iwm, u8 *buf, + unsigned long buf_size, + struct iwm_wifi_cmd *cmd) +{ + IWM_DBG_NTF(iwm, INFO, "LMAC Version: %x.%x\n", buf[9], buf[8]); + + return 0; +} + +static int iwm_ntf_tx(struct iwm_priv *iwm, u8 *buf, + unsigned long buf_size, struct iwm_wifi_cmd *cmd) +{ + struct iwm_lmac_tx_resp *tx_resp; + struct iwm_umac_wifi_in_hdr *hdr; + + tx_resp = (struct iwm_lmac_tx_resp *) + (buf + sizeof(struct iwm_umac_wifi_in_hdr)); + hdr = (struct iwm_umac_wifi_in_hdr *)buf; + + IWM_DBG_TX(iwm, DBG, "REPLY_TX, buf size: %lu\n", buf_size); + + IWM_DBG_TX(iwm, DBG, "Seqnum: %d\n", + le16_to_cpu(hdr->sw_hdr.cmd.seq_num)); + IWM_DBG_TX(iwm, DBG, "\tFrame cnt: %d\n", tx_resp->frame_cnt); + IWM_DBG_TX(iwm, DBG, "\tRetry cnt: %d\n", + le16_to_cpu(tx_resp->retry_cnt)); + IWM_DBG_TX(iwm, DBG, "\tSeq ctl: %d\n", le16_to_cpu(tx_resp->seq_ctl)); + IWM_DBG_TX(iwm, DBG, "\tByte cnt: %d\n", + le16_to_cpu(tx_resp->byte_cnt)); + IWM_DBG_TX(iwm, DBG, "\tStatus: 0x%x\n", le32_to_cpu(tx_resp->status)); + + return 0; +} + + +static int iwm_ntf_calib_res(struct iwm_priv *iwm, u8 *buf, + unsigned long buf_size, struct iwm_wifi_cmd *cmd) +{ + u8 opcode; + u8 *calib_buf; + struct iwm_lmac_calib_hdr *hdr = (struct iwm_lmac_calib_hdr *) + (buf + sizeof(struct iwm_umac_wifi_in_hdr)); + + opcode = hdr->opcode; + + BUG_ON(opcode >= CALIBRATION_CMD_NUM || + opcode < PHY_CALIBRATE_OPCODES_NUM); + + IWM_DBG_NTF(iwm, DBG, "Store calibration result for opcode: %d\n", + opcode); + + buf_size -= sizeof(struct iwm_umac_wifi_in_hdr); + calib_buf = iwm->calib_res[opcode].buf; + + if (!calib_buf || (iwm->calib_res[opcode].size < buf_size)) { + kfree(calib_buf); + calib_buf = kzalloc(buf_size, GFP_KERNEL); + if (!calib_buf) { + IWM_ERR(iwm, "Memory allocation failed: calib_res\n"); + return -ENOMEM; + } + iwm->calib_res[opcode].buf = calib_buf; + iwm->calib_res[opcode].size = buf_size; + } + + memcpy(calib_buf, hdr, buf_size); + set_bit(opcode - PHY_CALIBRATE_OPCODES_NUM, &iwm->calib_done_map); + + return 0; +} + +static int iwm_ntf_calib_complete(struct iwm_priv *iwm, u8 *buf, + unsigned long buf_size, + struct iwm_wifi_cmd *cmd) +{ + IWM_DBG_NTF(iwm, DBG, "Calibration completed\n"); + + return 0; +} + +static int iwm_ntf_calib_cfg(struct iwm_priv *iwm, u8 *buf, + unsigned long buf_size, struct iwm_wifi_cmd *cmd) +{ + struct iwm_lmac_cal_cfg_resp *cal_resp; + + cal_resp = (struct iwm_lmac_cal_cfg_resp *) + (buf + sizeof(struct iwm_umac_wifi_in_hdr)); + + IWM_DBG_NTF(iwm, DBG, "Calibration CFG command status: %d\n", + le32_to_cpu(cal_resp->status)); + + return 0; +} + +static int iwm_ntf_wifi_status(struct iwm_priv *iwm, u8 *buf, + unsigned long buf_size, struct iwm_wifi_cmd *cmd) +{ + struct iwm_umac_notif_wifi_status *status = + (struct iwm_umac_notif_wifi_status *)buf; + + iwm->core_enabled |= le16_to_cpu(status->status); + + return 0; +} + +static struct iwm_rx_ticket_node * +iwm_rx_ticket_node_alloc(struct iwm_priv *iwm, struct iwm_rx_ticket *ticket) +{ + struct iwm_rx_ticket_node *ticket_node; + + ticket_node = kzalloc(sizeof(struct iwm_rx_ticket_node), GFP_KERNEL); + if (!ticket_node) { + IWM_ERR(iwm, "Couldn't allocate ticket node\n"); + return ERR_PTR(-ENOMEM); + } + + ticket_node->ticket = kzalloc(sizeof(struct iwm_rx_ticket), GFP_KERNEL); + if (!ticket_node->ticket) { + IWM_ERR(iwm, "Couldn't allocate RX ticket\n"); + kfree(ticket_node); + return ERR_PTR(-ENOMEM); + } + + memcpy(ticket_node->ticket, ticket, sizeof(struct iwm_rx_ticket)); + INIT_LIST_HEAD(&ticket_node->node); + + return ticket_node; +} + +static void iwm_rx_ticket_node_free(struct iwm_rx_ticket_node *ticket_node) +{ + kfree(ticket_node->ticket); + kfree(ticket_node); +} + +static struct iwm_rx_packet *iwm_rx_packet_get(struct iwm_priv *iwm, u16 id) +{ + u8 id_hash = IWM_RX_ID_GET_HASH(id); + struct list_head *packet_list; + struct iwm_rx_packet *packet, *next; + + packet_list = &iwm->rx_packets[id_hash]; + + list_for_each_entry_safe(packet, next, packet_list, node) + if (packet->id == id) + return packet; + + return NULL; +} + +static struct iwm_rx_packet *iwm_rx_packet_alloc(struct iwm_priv *iwm, u8 *buf, + u32 size, u16 id) +{ + struct iwm_rx_packet *packet; + + packet = kzalloc(sizeof(struct iwm_rx_packet), GFP_KERNEL); + if (!packet) { + IWM_ERR(iwm, "Couldn't allocate packet\n"); + return ERR_PTR(-ENOMEM); + } + + packet->skb = dev_alloc_skb(size); + if (!packet->skb) { + IWM_ERR(iwm, "Couldn't allocate packet SKB\n"); + kfree(packet); + return ERR_PTR(-ENOMEM); + } + + packet->pkt_size = size; + + skb_put(packet->skb, size); + memcpy(packet->skb->data, buf, size); + INIT_LIST_HEAD(&packet->node); + packet->id = id; + + return packet; +} + +void iwm_rx_free(struct iwm_priv *iwm) +{ + struct iwm_rx_ticket_node *ticket, *nt; + struct iwm_rx_packet *packet, *np; + int i; + + list_for_each_entry_safe(ticket, nt, &iwm->rx_tickets, node) { + list_del(&ticket->node); + iwm_rx_ticket_node_free(ticket); + } + + for (i = 0; i < IWM_RX_ID_HASH; i++) { + list_for_each_entry_safe(packet, np, &iwm->rx_packets[i], + node) { + list_del(&packet->node); + kfree_skb(packet->skb); + kfree(packet); + } + } +} + +static int iwm_ntf_rx_ticket(struct iwm_priv *iwm, u8 *buf, + unsigned long buf_size, struct iwm_wifi_cmd *cmd) +{ + struct iwm_umac_notif_rx_ticket *ntf_rx_ticket = + (struct iwm_umac_notif_rx_ticket *)buf; + struct iwm_rx_ticket *ticket = + (struct iwm_rx_ticket *)ntf_rx_ticket->tickets; + int i, schedule_rx = 0; + + for (i = 0; i < ntf_rx_ticket->num_tickets; i++) { + struct iwm_rx_ticket_node *ticket_node; + + switch (le16_to_cpu(ticket->action)) { + case IWM_RX_TICKET_RELEASE: + case IWM_RX_TICKET_DROP: + /* We can push the packet to the stack */ + ticket_node = iwm_rx_ticket_node_alloc(iwm, ticket); + if (IS_ERR(ticket_node)) + return PTR_ERR(ticket_node); + + IWM_DBG_RX(iwm, DBG, "TICKET %s(%d)\n", + ticket->action == IWM_RX_TICKET_RELEASE ? + "RELEASE" : "DROP", + ticket->id); + list_add_tail(&ticket_node->node, &iwm->rx_tickets); + + /* + * We received an Rx ticket, most likely there's + * a packet pending for it, it's not worth going + * through the packet hash list to double check. + * Let's just fire the rx worker.. + */ + schedule_rx = 1; + + break; + + default: + IWM_ERR(iwm, "Invalid RX ticket action: 0x%x\n", + ticket->action); + } + + ticket++; + } + + if (schedule_rx) + queue_work(iwm->rx_wq, &iwm->rx_worker); + + return 0; +} + +static int iwm_ntf_rx_packet(struct iwm_priv *iwm, u8 *buf, + unsigned long buf_size, struct iwm_wifi_cmd *cmd) +{ + struct iwm_umac_wifi_in_hdr *wifi_hdr; + struct iwm_rx_packet *packet; + u16 id, buf_offset; + u32 packet_size; + + IWM_DBG_RX(iwm, DBG, "\n"); + + wifi_hdr = (struct iwm_umac_wifi_in_hdr *)buf; + id = le16_to_cpu(wifi_hdr->sw_hdr.cmd.seq_num); + buf_offset = sizeof(struct iwm_umac_wifi_in_hdr); + packet_size = buf_size - sizeof(struct iwm_umac_wifi_in_hdr); + + IWM_DBG_RX(iwm, DBG, "CMD:0x%x, seqnum: %d, packet size: %d\n", + wifi_hdr->sw_hdr.cmd.cmd, id, packet_size); + IWM_DBG_RX(iwm, DBG, "Packet id: %d\n", id); + IWM_HEXDUMP(iwm, DBG, RX, "PACKET: ", buf + buf_offset, packet_size); + + packet = iwm_rx_packet_alloc(iwm, buf + buf_offset, packet_size, id); + if (IS_ERR(packet)) + return PTR_ERR(packet); + + list_add_tail(&packet->node, &iwm->rx_packets[IWM_RX_ID_GET_HASH(id)]); + + /* We might (unlikely) have received the packet _after_ the ticket */ + queue_work(iwm->rx_wq, &iwm->rx_worker); + + return 0; +} + +/* MLME handlers */ +static int iwm_mlme_assoc_start(struct iwm_priv *iwm, u8 *buf, + unsigned long buf_size, + struct iwm_wifi_cmd *cmd) +{ + struct iwm_umac_notif_assoc_start *start; + + start = (struct iwm_umac_notif_assoc_start *)buf; + + IWM_DBG_MLME(iwm, INFO, "Association with %pM Started, reason: %d\n", + start->bssid, le32_to_cpu(start->roam_reason)); + + wake_up_interruptible(&iwm->mlme_queue); + + return 0; +} + +static u8 iwm_is_open_wep_profile(struct iwm_priv *iwm) +{ + if ((iwm->umac_profile->sec.ucast_cipher == UMAC_CIPHER_TYPE_WEP_40 || + iwm->umac_profile->sec.ucast_cipher == UMAC_CIPHER_TYPE_WEP_104) && + (iwm->umac_profile->sec.ucast_cipher == + iwm->umac_profile->sec.mcast_cipher) && + (iwm->umac_profile->sec.auth_type == UMAC_AUTH_TYPE_OPEN)) + return 1; + + return 0; +} + +static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf, + unsigned long buf_size, + struct iwm_wifi_cmd *cmd) +{ + struct iwm_umac_notif_assoc_complete *complete = + (struct iwm_umac_notif_assoc_complete *)buf; + + IWM_DBG_MLME(iwm, INFO, "Association with %pM completed, status: %d\n", + complete->bssid, complete->status); + + switch (le32_to_cpu(complete->status)) { + case UMAC_ASSOC_COMPLETE_SUCCESS: + set_bit(IWM_STATUS_ASSOCIATED, &iwm->status); + memcpy(iwm->bssid, complete->bssid, ETH_ALEN); + iwm->channel = complete->channel; + + /* Internal roaming state, avoid notifying SME. */ + if (!test_and_clear_bit(IWM_STATUS_SME_CONNECTING, &iwm->status) + && iwm->conf.mode == UMAC_MODE_BSS) { + cancel_delayed_work(&iwm->disconnect); + cfg80211_roamed(iwm_to_ndev(iwm), + complete->bssid, + iwm->req_ie, iwm->req_ie_len, + iwm->resp_ie, iwm->resp_ie_len, + GFP_KERNEL); + break; + } + + iwm_link_on(iwm); + + if (iwm->conf.mode == UMAC_MODE_IBSS) + goto ibss; + + if (!test_bit(IWM_STATUS_RESETTING, &iwm->status)) + cfg80211_connect_result(iwm_to_ndev(iwm), + complete->bssid, + iwm->req_ie, iwm->req_ie_len, + iwm->resp_ie, iwm->resp_ie_len, + WLAN_STATUS_SUCCESS, + GFP_KERNEL); + else + cfg80211_roamed(iwm_to_ndev(iwm), + complete->bssid, + iwm->req_ie, iwm->req_ie_len, + iwm->resp_ie, iwm->resp_ie_len, + GFP_KERNEL); + break; + case UMAC_ASSOC_COMPLETE_FAILURE: + clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status); + memset(iwm->bssid, 0, ETH_ALEN); + iwm->channel = 0; + + /* Internal roaming state, avoid notifying SME. */ + if (!test_and_clear_bit(IWM_STATUS_SME_CONNECTING, &iwm->status) + && iwm->conf.mode == UMAC_MODE_BSS) { + cancel_delayed_work(&iwm->disconnect); + break; + } + + iwm_link_off(iwm); + + if (iwm->conf.mode == UMAC_MODE_IBSS) + goto ibss; + + if (!test_bit(IWM_STATUS_RESETTING, &iwm->status)) + if (!iwm_is_open_wep_profile(iwm)) { + cfg80211_connect_result(iwm_to_ndev(iwm), + complete->bssid, + NULL, 0, NULL, 0, + WLAN_STATUS_UNSPECIFIED_FAILURE, + GFP_KERNEL); + } else { + /* Let's try shared WEP auth */ + IWM_ERR(iwm, "Trying WEP shared auth\n"); + schedule_work(&iwm->auth_retry_worker); + } + else + cfg80211_disconnected(iwm_to_ndev(iwm), 0, NULL, 0, + GFP_KERNEL); + break; + default: + break; + } + + clear_bit(IWM_STATUS_RESETTING, &iwm->status); + return 0; + + ibss: + cfg80211_ibss_joined(iwm_to_ndev(iwm), iwm->bssid, GFP_KERNEL); + clear_bit(IWM_STATUS_RESETTING, &iwm->status); + return 0; +} + +static int iwm_mlme_profile_invalidate(struct iwm_priv *iwm, u8 *buf, + unsigned long buf_size, + struct iwm_wifi_cmd *cmd) +{ + struct iwm_umac_notif_profile_invalidate *invalid; + u32 reason; + + invalid = (struct iwm_umac_notif_profile_invalidate *)buf; + reason = le32_to_cpu(invalid->reason); + + IWM_DBG_MLME(iwm, INFO, "Profile Invalidated. Reason: %d\n", reason); + + if (reason != UMAC_PROFILE_INVALID_REQUEST && + test_bit(IWM_STATUS_SME_CONNECTING, &iwm->status)) + cfg80211_connect_result(iwm_to_ndev(iwm), NULL, NULL, 0, NULL, + 0, WLAN_STATUS_UNSPECIFIED_FAILURE, + GFP_KERNEL); + + clear_bit(IWM_STATUS_SME_CONNECTING, &iwm->status); + clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status); + + iwm->umac_profile_active = 0; + memset(iwm->bssid, 0, ETH_ALEN); + iwm->channel = 0; + + iwm_link_off(iwm); + + wake_up_interruptible(&iwm->mlme_queue); + + return 0; +} + +#define IWM_DISCONNECT_INTERVAL (5 * HZ) + +static int iwm_mlme_connection_terminated(struct iwm_priv *iwm, u8 *buf, + unsigned long buf_size, + struct iwm_wifi_cmd *cmd) +{ + IWM_DBG_MLME(iwm, DBG, "Connection terminated\n"); + + schedule_delayed_work(&iwm->disconnect, IWM_DISCONNECT_INTERVAL); + + return 0; +} + +static int iwm_mlme_scan_complete(struct iwm_priv *iwm, u8 *buf, + unsigned long buf_size, + struct iwm_wifi_cmd *cmd) +{ + int ret; + struct iwm_umac_notif_scan_complete *scan_complete = + (struct iwm_umac_notif_scan_complete *)buf; + u32 result = le32_to_cpu(scan_complete->result); + + IWM_DBG_MLME(iwm, INFO, "type:0x%x result:0x%x seq:%d\n", + le32_to_cpu(scan_complete->type), + le32_to_cpu(scan_complete->result), + scan_complete->seq_num); + + if (!test_and_clear_bit(IWM_STATUS_SCANNING, &iwm->status)) { + IWM_ERR(iwm, "Scan complete while device not scanning\n"); + return -EIO; + } + if (!iwm->scan_request) + return 0; + + ret = iwm_cfg80211_inform_bss(iwm); + + cfg80211_scan_done(iwm->scan_request, + (result & UMAC_SCAN_RESULT_ABORTED) ? 1 : !!ret); + iwm->scan_request = NULL; + + return ret; +} + +static int iwm_mlme_update_sta_table(struct iwm_priv *iwm, u8 *buf, + unsigned long buf_size, + struct iwm_wifi_cmd *cmd) +{ + struct iwm_umac_notif_sta_info *umac_sta = + (struct iwm_umac_notif_sta_info *)buf; + struct iwm_sta_info *sta; + int i; + + switch (le32_to_cpu(umac_sta->opcode)) { + case UMAC_OPCODE_ADD_MODIFY: + sta = &iwm->sta_table[GET_VAL8(umac_sta->sta_id, LMAC_STA_ID)]; + + IWM_DBG_MLME(iwm, INFO, "%s STA: ID = %d, Color = %d, " + "addr = %pM, qos = %d\n", + sta->valid ? "Modify" : "Add", + GET_VAL8(umac_sta->sta_id, LMAC_STA_ID), + GET_VAL8(umac_sta->sta_id, LMAC_STA_COLOR), + umac_sta->mac_addr, + umac_sta->flags & UMAC_STA_FLAG_QOS); + + sta->valid = 1; + sta->qos = umac_sta->flags & UMAC_STA_FLAG_QOS; + sta->color = GET_VAL8(umac_sta->sta_id, LMAC_STA_COLOR); + memcpy(sta->addr, umac_sta->mac_addr, ETH_ALEN); + break; + case UMAC_OPCODE_REMOVE: + IWM_DBG_MLME(iwm, INFO, "Remove STA: ID = %d, Color = %d, " + "addr = %pM\n", + GET_VAL8(umac_sta->sta_id, LMAC_STA_ID), + GET_VAL8(umac_sta->sta_id, LMAC_STA_COLOR), + umac_sta->mac_addr); + + sta = &iwm->sta_table[GET_VAL8(umac_sta->sta_id, LMAC_STA_ID)]; + + if (!memcmp(sta->addr, umac_sta->mac_addr, ETH_ALEN)) + sta->valid = 0; + + break; + case UMAC_OPCODE_CLEAR_ALL: + for (i = 0; i < IWM_STA_TABLE_NUM; i++) + iwm->sta_table[i].valid = 0; + + break; + default: + break; + } + + return 0; +} + +static int iwm_mlme_medium_lost(struct iwm_priv *iwm, u8 *buf, + unsigned long buf_size, + struct iwm_wifi_cmd *cmd) +{ + struct wiphy *wiphy = iwm_to_wiphy(iwm); + + IWM_DBG_NTF(iwm, DBG, "WiFi/WiMax coexistence radio is OFF\n"); + + wiphy_rfkill_set_hw_state(wiphy, true); + + return 0; +} + +static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf, + unsigned long buf_size, + struct iwm_wifi_cmd *cmd) +{ + struct wiphy *wiphy = iwm_to_wiphy(iwm); + struct ieee80211_mgmt *mgmt; + struct iwm_umac_notif_bss_info *umac_bss = + (struct iwm_umac_notif_bss_info *)buf; + struct ieee80211_channel *channel; + struct ieee80211_supported_band *band; + struct iwm_bss_info *bss, *next; + s32 signal; + int freq; + u16 frame_len = le16_to_cpu(umac_bss->frame_len); + size_t bss_len = sizeof(struct iwm_umac_notif_bss_info) + frame_len; + + mgmt = (struct ieee80211_mgmt *)(umac_bss->frame_buf); + + IWM_DBG_MLME(iwm, DBG, "New BSS info entry: %pM\n", mgmt->bssid); + IWM_DBG_MLME(iwm, DBG, "\tType: 0x%x\n", le32_to_cpu(umac_bss->type)); + IWM_DBG_MLME(iwm, DBG, "\tTimestamp: %d\n", + le32_to_cpu(umac_bss->timestamp)); + IWM_DBG_MLME(iwm, DBG, "\tTable Index: %d\n", + le16_to_cpu(umac_bss->table_idx)); + IWM_DBG_MLME(iwm, DBG, "\tBand: %d\n", umac_bss->band); + IWM_DBG_MLME(iwm, DBG, "\tChannel: %d\n", umac_bss->channel); + IWM_DBG_MLME(iwm, DBG, "\tRSSI: %d\n", umac_bss->rssi); + IWM_DBG_MLME(iwm, DBG, "\tFrame Length: %d\n", frame_len); + + list_for_each_entry_safe(bss, next, &iwm->bss_list, node) + if (bss->bss->table_idx == umac_bss->table_idx) + break; + + if (&bss->node != &iwm->bss_list) { + /* Remove the old BSS entry, we will add it back later. */ + list_del(&bss->node); + kfree(bss->bss); + } else { + /* New BSS entry */ + + bss = kzalloc(sizeof(struct iwm_bss_info), GFP_KERNEL); + if (!bss) { + IWM_ERR(iwm, "Couldn't allocate bss_info\n"); + return -ENOMEM; + } + } + + bss->bss = kzalloc(bss_len, GFP_KERNEL); + if (!bss->bss) { + kfree(bss); + IWM_ERR(iwm, "Couldn't allocate bss\n"); + return -ENOMEM; + } + + INIT_LIST_HEAD(&bss->node); + memcpy(bss->bss, umac_bss, bss_len); + + if (umac_bss->band == UMAC_BAND_2GHZ) + band = wiphy->bands[IEEE80211_BAND_2GHZ]; + else if (umac_bss->band == UMAC_BAND_5GHZ) + band = wiphy->bands[IEEE80211_BAND_5GHZ]; + else { + IWM_ERR(iwm, "Invalid band: %d\n", umac_bss->band); + goto err; + } + + freq = ieee80211_channel_to_frequency(umac_bss->channel); + channel = ieee80211_get_channel(wiphy, freq); + signal = umac_bss->rssi * 100; + + bss->cfg_bss = cfg80211_inform_bss_frame(wiphy, channel, + mgmt, frame_len, + signal, GFP_KERNEL); + if (!bss->cfg_bss) + goto err; + + list_add_tail(&bss->node, &iwm->bss_list); + + return 0; + err: + kfree(bss->bss); + kfree(bss); + + return -EINVAL; +} + +static int iwm_mlme_remove_bss(struct iwm_priv *iwm, u8 *buf, + unsigned long buf_size, struct iwm_wifi_cmd *cmd) +{ + struct iwm_umac_notif_bss_removed *bss_rm = + (struct iwm_umac_notif_bss_removed *)buf; + struct iwm_bss_info *bss, *next; + u16 table_idx; + int i; + + for (i = 0; i < le32_to_cpu(bss_rm->count); i++) { + table_idx = (le16_to_cpu(bss_rm->entries[i]) + & IWM_BSS_REMOVE_INDEX_MSK); + list_for_each_entry_safe(bss, next, &iwm->bss_list, node) + if (bss->bss->table_idx == cpu_to_le16(table_idx)) { + struct ieee80211_mgmt *mgmt; + + mgmt = (struct ieee80211_mgmt *) + (bss->bss->frame_buf); + IWM_DBG_MLME(iwm, ERR, + "BSS removed: %pM\n", + mgmt->bssid); + list_del(&bss->node); + kfree(bss->bss); + kfree(bss); + } + } + + return 0; +} + +static int iwm_mlme_mgt_frame(struct iwm_priv *iwm, u8 *buf, + unsigned long buf_size, struct iwm_wifi_cmd *cmd) +{ + struct iwm_umac_notif_mgt_frame *mgt_frame = + (struct iwm_umac_notif_mgt_frame *)buf; + struct ieee80211_mgmt *mgt = (struct ieee80211_mgmt *)mgt_frame->frame; + + IWM_HEXDUMP(iwm, DBG, MLME, "MGT: ", mgt_frame->frame, + le16_to_cpu(mgt_frame->len)); + + if (ieee80211_is_assoc_req(mgt->frame_control)) { + iwm->req_ie_len = le16_to_cpu(mgt_frame->len) + - offsetof(struct ieee80211_mgmt, + u.assoc_req.variable); + kfree(iwm->req_ie); + iwm->req_ie = kmemdup(mgt->u.assoc_req.variable, + iwm->req_ie_len, GFP_KERNEL); + } else if (ieee80211_is_reassoc_req(mgt->frame_control)) { + iwm->req_ie_len = le16_to_cpu(mgt_frame->len) + - offsetof(struct ieee80211_mgmt, + u.reassoc_req.variable); + kfree(iwm->req_ie); + iwm->req_ie = kmemdup(mgt->u.reassoc_req.variable, + iwm->req_ie_len, GFP_KERNEL); + } else if (ieee80211_is_assoc_resp(mgt->frame_control)) { + iwm->resp_ie_len = le16_to_cpu(mgt_frame->len) + - offsetof(struct ieee80211_mgmt, + u.assoc_resp.variable); + kfree(iwm->resp_ie); + iwm->resp_ie = kmemdup(mgt->u.assoc_resp.variable, + iwm->resp_ie_len, GFP_KERNEL); + } else if (ieee80211_is_reassoc_resp(mgt->frame_control)) { + iwm->resp_ie_len = le16_to_cpu(mgt_frame->len) + - offsetof(struct ieee80211_mgmt, + u.reassoc_resp.variable); + kfree(iwm->resp_ie); + iwm->resp_ie = kmemdup(mgt->u.reassoc_resp.variable, + iwm->resp_ie_len, GFP_KERNEL); + } else { + IWM_ERR(iwm, "Unsupported management frame: 0x%x", + le16_to_cpu(mgt->frame_control)); + return 0; + } + + return 0; +} + +static int iwm_ntf_mlme(struct iwm_priv *iwm, u8 *buf, + unsigned long buf_size, struct iwm_wifi_cmd *cmd) +{ + struct iwm_umac_notif_wifi_if *notif = + (struct iwm_umac_notif_wifi_if *)buf; + + switch (notif->status) { + case WIFI_IF_NTFY_ASSOC_START: + return iwm_mlme_assoc_start(iwm, buf, buf_size, cmd); + case WIFI_IF_NTFY_ASSOC_COMPLETE: + return iwm_mlme_assoc_complete(iwm, buf, buf_size, cmd); + case WIFI_IF_NTFY_PROFILE_INVALIDATE_COMPLETE: + return iwm_mlme_profile_invalidate(iwm, buf, buf_size, cmd); + case WIFI_IF_NTFY_CONNECTION_TERMINATED: + return iwm_mlme_connection_terminated(iwm, buf, buf_size, cmd); + case WIFI_IF_NTFY_SCAN_COMPLETE: + return iwm_mlme_scan_complete(iwm, buf, buf_size, cmd); + case WIFI_IF_NTFY_STA_TABLE_CHANGE: + return iwm_mlme_update_sta_table(iwm, buf, buf_size, cmd); + case WIFI_IF_NTFY_EXTENDED_IE_REQUIRED: + IWM_DBG_MLME(iwm, DBG, "Extended IE required\n"); + break; + case WIFI_IF_NTFY_RADIO_PREEMPTION: + return iwm_mlme_medium_lost(iwm, buf, buf_size, cmd); + case WIFI_IF_NTFY_BSS_TRK_TABLE_CHANGED: + return iwm_mlme_update_bss_table(iwm, buf, buf_size, cmd); + case WIFI_IF_NTFY_BSS_TRK_ENTRIES_REMOVED: + return iwm_mlme_remove_bss(iwm, buf, buf_size, cmd); + break; + case WIFI_IF_NTFY_MGMT_FRAME: + return iwm_mlme_mgt_frame(iwm, buf, buf_size, cmd); + case WIFI_DBG_IF_NTFY_SCAN_SUPER_JOB_START: + case WIFI_DBG_IF_NTFY_SCAN_SUPER_JOB_COMPLETE: + case WIFI_DBG_IF_NTFY_SCAN_CHANNEL_START: + case WIFI_DBG_IF_NTFY_SCAN_CHANNEL_RESULT: + case WIFI_DBG_IF_NTFY_SCAN_MINI_JOB_START: + case WIFI_DBG_IF_NTFY_SCAN_MINI_JOB_COMPLETE: + case WIFI_DBG_IF_NTFY_CNCT_ATC_START: + case WIFI_DBG_IF_NTFY_COEX_NOTIFICATION: + case WIFI_DBG_IF_NTFY_COEX_HANDLE_ENVELOP: + case WIFI_DBG_IF_NTFY_COEX_HANDLE_RELEASE_ENVELOP: + IWM_DBG_MLME(iwm, DBG, "MLME debug notification: 0x%x\n", + notif->status); + break; + default: + IWM_ERR(iwm, "Unhandled notification: 0x%x\n", notif->status); + break; + } + + return 0; +} + +#define IWM_STATS_UPDATE_INTERVAL (2 * HZ) + +static int iwm_ntf_statistics(struct iwm_priv *iwm, u8 *buf, + unsigned long buf_size, struct iwm_wifi_cmd *cmd) +{ + struct iwm_umac_notif_stats *stats = (struct iwm_umac_notif_stats *)buf; + struct iw_statistics *wstats = &iwm->wstats; + u16 max_rate = 0; + int i; + + IWM_DBG_MLME(iwm, DBG, "Statistics notification received\n"); + + if (test_bit(IWM_STATUS_ASSOCIATED, &iwm->status)) { + for (i = 0; i < UMAC_NTF_RATE_SAMPLE_NR; i++) { + max_rate = max_t(u16, max_rate, + max(le16_to_cpu(stats->tx_rate[i]), + le16_to_cpu(stats->rx_rate[i]))); + } + /* UMAC passes rate info multiplies by 2 */ + iwm->rate = max_rate >> 1; + } + iwm->txpower = le32_to_cpu(stats->tx_power); + + wstats->status = 0; + + wstats->discard.nwid = le32_to_cpu(stats->rx_drop_other_bssid); + wstats->discard.code = le32_to_cpu(stats->rx_drop_decode); + wstats->discard.fragment = le32_to_cpu(stats->rx_drop_reassembly); + wstats->discard.retries = le32_to_cpu(stats->tx_drop_max_retry); + + wstats->miss.beacon = le32_to_cpu(stats->missed_beacons); + + /* according to cfg80211 */ + if (stats->rssi_dbm < -110) + wstats->qual.qual = 0; + else if (stats->rssi_dbm > -40) + wstats->qual.qual = 70; + else + wstats->qual.qual = stats->rssi_dbm + 110; + + wstats->qual.level = stats->rssi_dbm; + wstats->qual.noise = stats->noise_dbm; + wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; + + schedule_delayed_work(&iwm->stats_request, IWM_STATS_UPDATE_INTERVAL); + + mod_timer(&iwm->watchdog, round_jiffies(jiffies + IWM_WATCHDOG_PERIOD)); + + return 0; +} + +static int iwm_ntf_eeprom_proxy(struct iwm_priv *iwm, u8 *buf, + unsigned long buf_size, + struct iwm_wifi_cmd *cmd) +{ + struct iwm_umac_cmd_eeprom_proxy *eeprom_proxy = + (struct iwm_umac_cmd_eeprom_proxy *) + (buf + sizeof(struct iwm_umac_wifi_in_hdr)); + struct iwm_umac_cmd_eeprom_proxy_hdr *hdr = &eeprom_proxy->hdr; + u32 hdr_offset = le32_to_cpu(hdr->offset); + u32 hdr_len = le32_to_cpu(hdr->len); + u32 hdr_type = le32_to_cpu(hdr->type); + + IWM_DBG_NTF(iwm, DBG, "type: 0x%x, len: %d, offset: 0x%x\n", + hdr_type, hdr_len, hdr_offset); + + if ((hdr_offset + hdr_len) > IWM_EEPROM_LEN) + return -EINVAL; + + switch (hdr_type) { + case IWM_UMAC_CMD_EEPROM_TYPE_READ: + memcpy(iwm->eeprom + hdr_offset, eeprom_proxy->buf, hdr_len); + break; + case IWM_UMAC_CMD_EEPROM_TYPE_WRITE: + default: + return -ENOTSUPP; + } + + return 0; +} + +static int iwm_ntf_channel_info_list(struct iwm_priv *iwm, u8 *buf, + unsigned long buf_size, + struct iwm_wifi_cmd *cmd) +{ + struct iwm_umac_cmd_get_channel_list *ch_list = + (struct iwm_umac_cmd_get_channel_list *) + (buf + sizeof(struct iwm_umac_wifi_in_hdr)); + struct wiphy *wiphy = iwm_to_wiphy(iwm); + struct ieee80211_supported_band *band; + int i; + + band = wiphy->bands[IEEE80211_BAND_2GHZ]; + + for (i = 0; i < band->n_channels; i++) { + unsigned long ch_mask_0 = + le32_to_cpu(ch_list->ch[0].channels_mask); + unsigned long ch_mask_2 = + le32_to_cpu(ch_list->ch[2].channels_mask); + + if (!test_bit(i, &ch_mask_0)) + band->channels[i].flags |= IEEE80211_CHAN_DISABLED; + + if (!test_bit(i, &ch_mask_2)) + band->channels[i].flags |= IEEE80211_CHAN_NO_IBSS; + } + + band = wiphy->bands[IEEE80211_BAND_5GHZ]; + + for (i = 0; i < min(band->n_channels, 32); i++) { + unsigned long ch_mask_1 = + le32_to_cpu(ch_list->ch[1].channels_mask); + unsigned long ch_mask_3 = + le32_to_cpu(ch_list->ch[3].channels_mask); + + if (!test_bit(i, &ch_mask_1)) + band->channels[i].flags |= IEEE80211_CHAN_DISABLED; + + if (!test_bit(i, &ch_mask_3)) + band->channels[i].flags |= IEEE80211_CHAN_NO_IBSS; + } + + return 0; +} + +static int iwm_ntf_stop_resume_tx(struct iwm_priv *iwm, u8 *buf, + unsigned long buf_size, + struct iwm_wifi_cmd *cmd) +{ + struct iwm_umac_notif_stop_resume_tx *stp_res_tx = + (struct iwm_umac_notif_stop_resume_tx *)buf; + struct iwm_sta_info *sta_info; + struct iwm_tid_info *tid_info; + u8 sta_id = STA_ID_N_COLOR_ID(stp_res_tx->sta_id); + u16 tid_msk = le16_to_cpu(stp_res_tx->stop_resume_tid_msk); + int bit, ret = 0; + bool stop = false; + + IWM_DBG_NTF(iwm, DBG, "stop/resume notification:\n" + "\tflags: 0x%x\n" + "\tSTA id: %d\n" + "\tTID bitmask: 0x%x\n", + stp_res_tx->flags, stp_res_tx->sta_id, + stp_res_tx->stop_resume_tid_msk); + + if (stp_res_tx->flags & UMAC_STOP_TX_FLAG) + stop = true; + + sta_info = &iwm->sta_table[sta_id]; + if (!sta_info->valid) { + IWM_ERR(iwm, "Stoping an invalid STA: %d %d\n", + sta_id, stp_res_tx->sta_id); + return -EINVAL; + } + + for_each_set_bit(bit, (unsigned long *)&tid_msk, IWM_UMAC_TID_NR) { + tid_info = &sta_info->tid_info[bit]; + + mutex_lock(&tid_info->mutex); + tid_info->stopped = stop; + mutex_unlock(&tid_info->mutex); + + if (!stop) { + struct iwm_tx_queue *txq; + int queue = iwm_tid_to_queue(bit); + + if (queue < 0) + continue; + + txq = &iwm->txq[queue]; + /* + * If we resume, we have to move our SKBs + * back to the tx queue and queue some work. + */ + spin_lock_bh(&txq->lock); + skb_queue_splice_init(&txq->queue, &txq->stopped_queue); + spin_unlock_bh(&txq->lock); + + queue_work(txq->wq, &txq->worker); + } + + } + + /* We send an ACK only for the stop case */ + if (stop) + ret = iwm_send_umac_stop_resume_tx(iwm, stp_res_tx); + + return ret; +} + +static int iwm_ntf_wifi_if_wrapper(struct iwm_priv *iwm, u8 *buf, + unsigned long buf_size, + struct iwm_wifi_cmd *cmd) +{ + struct iwm_umac_wifi_if *hdr; + + if (cmd == NULL) { + IWM_ERR(iwm, "Couldn't find expected wifi command\n"); + return -EINVAL; + } + + hdr = (struct iwm_umac_wifi_if *)cmd->buf.payload; + + IWM_DBG_NTF(iwm, DBG, "WIFI_IF_WRAPPER cmd is delivered to UMAC: " + "oid is 0x%x\n", hdr->oid); + + if (hdr->oid <= WIFI_IF_NTFY_MAX) { + set_bit(hdr->oid, &iwm->wifi_ntfy[0]); + wake_up_interruptible(&iwm->wifi_ntfy_queue); + } else + return -EINVAL; + + switch (hdr->oid) { + case UMAC_WIFI_IF_CMD_SET_PROFILE: + iwm->umac_profile_active = 1; + break; + default: + break; + } + + return 0; +} + +#define CT_KILL_DELAY (30 * HZ) +static int iwm_ntf_card_state(struct iwm_priv *iwm, u8 *buf, + unsigned long buf_size, struct iwm_wifi_cmd *cmd) +{ + struct wiphy *wiphy = iwm_to_wiphy(iwm); + struct iwm_lmac_card_state *state = (struct iwm_lmac_card_state *) + (buf + sizeof(struct iwm_umac_wifi_in_hdr)); + u32 flags = le32_to_cpu(state->flags); + + IWM_INFO(iwm, "HW RF Kill %s, CT Kill %s\n", + flags & IWM_CARD_STATE_HW_DISABLED ? "ON" : "OFF", + flags & IWM_CARD_STATE_CTKILL_DISABLED ? "ON" : "OFF"); + + if (flags & IWM_CARD_STATE_CTKILL_DISABLED) { + /* + * We got a CTKILL event: We bring the interface down in + * oder to cool the device down, and try to bring it up + * 30 seconds later. If it's still too hot, we'll go through + * this code path again. + */ + cancel_delayed_work_sync(&iwm->ct_kill_delay); + schedule_delayed_work(&iwm->ct_kill_delay, CT_KILL_DELAY); + } + + wiphy_rfkill_set_hw_state(wiphy, flags & + (IWM_CARD_STATE_HW_DISABLED | + IWM_CARD_STATE_CTKILL_DISABLED)); + + return 0; +} + +static int iwm_rx_handle_wifi(struct iwm_priv *iwm, u8 *buf, + unsigned long buf_size) +{ + struct iwm_umac_wifi_in_hdr *wifi_hdr; + struct iwm_wifi_cmd *cmd; + u8 source, cmd_id; + u16 seq_num; + u32 count; + u8 resp; + + wifi_hdr = (struct iwm_umac_wifi_in_hdr *)buf; + cmd_id = wifi_hdr->sw_hdr.cmd.cmd; + + source = GET_VAL32(wifi_hdr->hw_hdr.cmd, UMAC_HDI_IN_CMD_SOURCE); + if (source >= IWM_SRC_NUM) { + IWM_CRIT(iwm, "invalid source %d\n", source); + return -EINVAL; + } + + count = (GET_VAL32(wifi_hdr->sw_hdr.meta_data, UMAC_FW_CMD_BYTE_COUNT)); + count += sizeof(struct iwm_umac_wifi_in_hdr) - + sizeof(struct iwm_dev_cmd_hdr); + if (count > buf_size) { + IWM_CRIT(iwm, "count %d, buf size:%ld\n", count, buf_size); + return -EINVAL; + } + + resp = GET_VAL32(wifi_hdr->sw_hdr.meta_data, UMAC_FW_CMD_STATUS); + + seq_num = le16_to_cpu(wifi_hdr->sw_hdr.cmd.seq_num); + + IWM_DBG_RX(iwm, DBG, "CMD:0x%x, source: 0x%x, seqnum: %d\n", + cmd_id, source, seq_num); + + /* + * If this is a response to a previously sent command, there must + * be a pending command for this sequence number. + */ + cmd = iwm_get_pending_wifi_cmd(iwm, seq_num); + + /* Notify the caller only for sync commands. */ + switch (source) { + case UMAC_HDI_IN_SOURCE_FHRX: + if (iwm->lmac_handlers[cmd_id] && + test_bit(cmd_id, &iwm->lmac_handler_map[0])) + return iwm_notif_send(iwm, cmd, cmd_id, source, + buf, count); + break; + case UMAC_HDI_IN_SOURCE_FW: + if (iwm->umac_handlers[cmd_id] && + test_bit(cmd_id, &iwm->umac_handler_map[0])) + return iwm_notif_send(iwm, cmd, cmd_id, source, + buf, count); + break; + case UMAC_HDI_IN_SOURCE_UDMA: + break; + } + + return iwm_rx_handle_resp(iwm, buf, count, cmd); +} + +int iwm_rx_handle_resp(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size, + struct iwm_wifi_cmd *cmd) +{ + u8 source, cmd_id; + struct iwm_umac_wifi_in_hdr *wifi_hdr; + int ret = 0; + + wifi_hdr = (struct iwm_umac_wifi_in_hdr *)buf; + cmd_id = wifi_hdr->sw_hdr.cmd.cmd; + + source = GET_VAL32(wifi_hdr->hw_hdr.cmd, UMAC_HDI_IN_CMD_SOURCE); + + IWM_DBG_RX(iwm, DBG, "CMD:0x%x, source: 0x%x\n", cmd_id, source); + + switch (source) { + case UMAC_HDI_IN_SOURCE_FHRX: + if (iwm->lmac_handlers[cmd_id]) + ret = iwm->lmac_handlers[cmd_id] + (iwm, buf, buf_size, cmd); + break; + case UMAC_HDI_IN_SOURCE_FW: + if (iwm->umac_handlers[cmd_id]) + ret = iwm->umac_handlers[cmd_id] + (iwm, buf, buf_size, cmd); + break; + case UMAC_HDI_IN_SOURCE_UDMA: + ret = -EINVAL; + break; + } + + kfree(cmd); + + return ret; +} + +static int iwm_rx_handle_nonwifi(struct iwm_priv *iwm, u8 *buf, + unsigned long buf_size) +{ + u8 seq_num; + struct iwm_udma_in_hdr *hdr = (struct iwm_udma_in_hdr *)buf; + struct iwm_nonwifi_cmd *cmd, *next; + + seq_num = GET_VAL32(hdr->cmd, UDMA_HDI_IN_CMD_NON_WIFI_HW_SEQ_NUM); + + /* + * We received a non wifi answer. + * Let's check if there's a pending command for it, and if so + * replace the command payload with the buffer, and then wake the + * callers up. + * That means we only support synchronised non wifi command response + * schemes. + */ + list_for_each_entry_safe(cmd, next, &iwm->nonwifi_pending_cmd, pending) + if (cmd->seq_num == seq_num) { + cmd->resp_received = 1; + cmd->buf.len = buf_size; + memcpy(cmd->buf.hdr, buf, buf_size); + wake_up_interruptible(&iwm->nonwifi_queue); + } + + return 0; +} + +static int iwm_rx_handle_umac(struct iwm_priv *iwm, u8 *buf, + unsigned long buf_size) +{ + int ret = 0; + u8 op_code; + unsigned long buf_offset = 0; + struct iwm_udma_in_hdr *hdr; + + /* + * To allow for a more efficient bus usage, UMAC + * messages are encapsulated into UDMA ones. This + * way we can have several UMAC messages in one bus + * transfer. + * A UDMA frame size is always aligned on 16 bytes, + * and a UDMA frame must not start with a UMAC_PAD_TERMINAL + * word. This is how we parse a bus frame into several + * UDMA ones. + */ + while (buf_offset < buf_size) { + + hdr = (struct iwm_udma_in_hdr *)(buf + buf_offset); + + if (iwm_rx_check_udma_hdr(hdr) < 0) { + IWM_DBG_RX(iwm, DBG, "End of frame\n"); + break; + } + + op_code = GET_VAL32(hdr->cmd, UMAC_HDI_IN_CMD_OPCODE); + + IWM_DBG_RX(iwm, DBG, "Op code: 0x%x\n", op_code); + + if (op_code == UMAC_HDI_IN_OPCODE_WIFI) { + ret |= iwm_rx_handle_wifi(iwm, buf + buf_offset, + buf_size - buf_offset); + } else if (op_code < UMAC_HDI_IN_OPCODE_NONWIFI_MAX) { + if (GET_VAL32(hdr->cmd, + UDMA_HDI_IN_CMD_NON_WIFI_HW_SIG) != + UDMA_HDI_IN_CMD_NON_WIFI_HW_SIG) { + IWM_ERR(iwm, "Incorrect hw signature\n"); + return -EINVAL; + } + ret |= iwm_rx_handle_nonwifi(iwm, buf + buf_offset, + buf_size - buf_offset); + } else { + IWM_ERR(iwm, "Invalid RX opcode: 0x%x\n", op_code); + ret |= -EINVAL; + } + + buf_offset += iwm_rx_resp_size(hdr); + } + + return ret; +} + +int iwm_rx_handle(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size) +{ + struct iwm_udma_in_hdr *hdr; + + hdr = (struct iwm_udma_in_hdr *)buf; + + switch (le32_to_cpu(hdr->cmd)) { + case UMAC_REBOOT_BARKER: + if (test_bit(IWM_STATUS_READY, &iwm->status)) { + IWM_ERR(iwm, "Unexpected BARKER\n"); + + schedule_work(&iwm->reset_worker); + + return 0; + } + + return iwm_notif_send(iwm, NULL, IWM_BARKER_REBOOT_NOTIFICATION, + IWM_SRC_UDMA, buf, buf_size); + case UMAC_ACK_BARKER: + return iwm_notif_send(iwm, NULL, IWM_ACK_BARKER_NOTIFICATION, + IWM_SRC_UDMA, NULL, 0); + default: + IWM_DBG_RX(iwm, DBG, "Received cmd: 0x%x\n", hdr->cmd); + return iwm_rx_handle_umac(iwm, buf, buf_size); + } + + return 0; +} + +static const iwm_handler iwm_umac_handlers[] = +{ + [UMAC_NOTIFY_OPCODE_ERROR] = iwm_ntf_error, + [UMAC_NOTIFY_OPCODE_ALIVE] = iwm_ntf_umac_alive, + [UMAC_NOTIFY_OPCODE_INIT_COMPLETE] = iwm_ntf_init_complete, + [UMAC_NOTIFY_OPCODE_WIFI_CORE_STATUS] = iwm_ntf_wifi_status, + [UMAC_NOTIFY_OPCODE_WIFI_IF_WRAPPER] = iwm_ntf_mlme, + [UMAC_NOTIFY_OPCODE_PAGE_DEALLOC] = iwm_ntf_tx_credit_update, + [UMAC_NOTIFY_OPCODE_RX_TICKET] = iwm_ntf_rx_ticket, + [UMAC_CMD_OPCODE_RESET] = iwm_ntf_umac_reset, + [UMAC_NOTIFY_OPCODE_STATS] = iwm_ntf_statistics, + [UMAC_CMD_OPCODE_EEPROM_PROXY] = iwm_ntf_eeprom_proxy, + [UMAC_CMD_OPCODE_GET_CHAN_INFO_LIST] = iwm_ntf_channel_info_list, + [UMAC_CMD_OPCODE_STOP_RESUME_STA_TX] = iwm_ntf_stop_resume_tx, + [REPLY_RX_MPDU_CMD] = iwm_ntf_rx_packet, + [UMAC_CMD_OPCODE_WIFI_IF_WRAPPER] = iwm_ntf_wifi_if_wrapper, +}; + +static const iwm_handler iwm_lmac_handlers[] = +{ + [REPLY_TX] = iwm_ntf_tx, + [REPLY_ALIVE] = iwm_ntf_lmac_version, + [CALIBRATION_RES_NOTIFICATION] = iwm_ntf_calib_res, + [CALIBRATION_COMPLETE_NOTIFICATION] = iwm_ntf_calib_complete, + [CALIBRATION_CFG_CMD] = iwm_ntf_calib_cfg, + [REPLY_RX_MPDU_CMD] = iwm_ntf_rx_packet, + [CARD_STATE_NOTIFICATION] = iwm_ntf_card_state, +}; + +void iwm_rx_setup_handlers(struct iwm_priv *iwm) +{ + iwm->umac_handlers = (iwm_handler *) iwm_umac_handlers; + iwm->lmac_handlers = (iwm_handler *) iwm_lmac_handlers; +} + +static void iwm_remove_iv(struct sk_buff *skb, u32 hdr_total_len) +{ + struct ieee80211_hdr *hdr; + unsigned int hdr_len; + + hdr = (struct ieee80211_hdr *)skb->data; + + if (!ieee80211_has_protected(hdr->frame_control)) + return; + + hdr_len = ieee80211_hdrlen(hdr->frame_control); + if (hdr_total_len <= hdr_len) + return; + + memmove(skb->data + (hdr_total_len - hdr_len), skb->data, hdr_len); + skb_pull(skb, (hdr_total_len - hdr_len)); +} + +static void iwm_rx_adjust_packet(struct iwm_priv *iwm, + struct iwm_rx_packet *packet, + struct iwm_rx_ticket_node *ticket_node) +{ + u32 payload_offset = 0, payload_len; + struct iwm_rx_ticket *ticket = ticket_node->ticket; + struct iwm_rx_mpdu_hdr *mpdu_hdr; + struct ieee80211_hdr *hdr; + + mpdu_hdr = (struct iwm_rx_mpdu_hdr *)packet->skb->data; + payload_offset += sizeof(struct iwm_rx_mpdu_hdr); + /* Padding is 0 or 2 bytes */ + payload_len = le16_to_cpu(mpdu_hdr->len) + + (le16_to_cpu(ticket->flags) & IWM_RX_TICKET_PAD_SIZE_MSK); + payload_len -= ticket->tail_len; + + IWM_DBG_RX(iwm, DBG, "Packet adjusted, len:%d, offset:%d, " + "ticket offset:%d ticket tail len:%d\n", + payload_len, payload_offset, ticket->payload_offset, + ticket->tail_len); + + IWM_HEXDUMP(iwm, DBG, RX, "RAW: ", packet->skb->data, packet->skb->len); + + skb_pull(packet->skb, payload_offset); + skb_trim(packet->skb, payload_len); + + iwm_remove_iv(packet->skb, ticket->payload_offset); + + hdr = (struct ieee80211_hdr *) packet->skb->data; + if (ieee80211_is_data_qos(hdr->frame_control)) { + /* UMAC handed QOS_DATA frame with 2 padding bytes appended + * to the qos_ctl field in IEEE 802.11 headers. */ + memmove(packet->skb->data + IEEE80211_QOS_CTL_LEN + 2, + packet->skb->data, + ieee80211_hdrlen(hdr->frame_control) - + IEEE80211_QOS_CTL_LEN); + hdr = (struct ieee80211_hdr *) skb_pull(packet->skb, + IEEE80211_QOS_CTL_LEN + 2); + hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA); + } + + IWM_HEXDUMP(iwm, DBG, RX, "ADJUSTED: ", + packet->skb->data, packet->skb->len); +} + +static void classify8023(struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + + if (ieee80211_is_data_qos(hdr->frame_control)) { + u8 *qc = ieee80211_get_qos_ctl(hdr); + /* frame has qos control */ + skb->priority = *qc & IEEE80211_QOS_CTL_TID_MASK; + } else { + skb->priority = 0; + } +} + +static void iwm_rx_process_amsdu(struct iwm_priv *iwm, struct sk_buff *skb) +{ + struct wireless_dev *wdev = iwm_to_wdev(iwm); + struct net_device *ndev = iwm_to_ndev(iwm); + struct sk_buff_head list; + struct sk_buff *frame; + + IWM_HEXDUMP(iwm, DBG, RX, "A-MSDU: ", skb->data, skb->len); + + __skb_queue_head_init(&list); + ieee80211_amsdu_to_8023s(skb, &list, ndev->dev_addr, wdev->iftype, 0); + + while ((frame = __skb_dequeue(&list))) { + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += frame->len; + + frame->protocol = eth_type_trans(frame, ndev); + frame->ip_summed = CHECKSUM_NONE; + memset(frame->cb, 0, sizeof(frame->cb)); + + if (netif_rx_ni(frame) == NET_RX_DROP) { + IWM_ERR(iwm, "Packet dropped\n"); + ndev->stats.rx_dropped++; + } + } +} + +static void iwm_rx_process_packet(struct iwm_priv *iwm, + struct iwm_rx_packet *packet, + struct iwm_rx_ticket_node *ticket_node) +{ + int ret; + struct sk_buff *skb = packet->skb; + struct wireless_dev *wdev = iwm_to_wdev(iwm); + struct net_device *ndev = iwm_to_ndev(iwm); + + IWM_DBG_RX(iwm, DBG, "Processing packet ID %d\n", packet->id); + + switch (le16_to_cpu(ticket_node->ticket->action)) { + case IWM_RX_TICKET_RELEASE: + IWM_DBG_RX(iwm, DBG, "RELEASE packet\n"); + + iwm_rx_adjust_packet(iwm, packet, ticket_node); + skb->dev = iwm_to_ndev(iwm); + classify8023(skb); + + if (le16_to_cpu(ticket_node->ticket->flags) & + IWM_RX_TICKET_AMSDU_MSK) { + iwm_rx_process_amsdu(iwm, skb); + break; + } + + ret = ieee80211_data_to_8023(skb, ndev->dev_addr, wdev->iftype); + if (ret < 0) { + IWM_DBG_RX(iwm, DBG, "Couldn't convert 802.11 header - " + "%d\n", ret); + kfree_skb(packet->skb); + break; + } + + IWM_HEXDUMP(iwm, DBG, RX, "802.3: ", skb->data, skb->len); + + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += skb->len; + + skb->protocol = eth_type_trans(skb, ndev); + skb->ip_summed = CHECKSUM_NONE; + memset(skb->cb, 0, sizeof(skb->cb)); + + if (netif_rx_ni(skb) == NET_RX_DROP) { + IWM_ERR(iwm, "Packet dropped\n"); + ndev->stats.rx_dropped++; + } + break; + case IWM_RX_TICKET_DROP: + IWM_DBG_RX(iwm, DBG, "DROP packet: 0x%x\n", + le16_to_cpu(ticket_node->ticket->flags)); + kfree_skb(packet->skb); + break; + default: + IWM_ERR(iwm, "Unknown ticket action: %d\n", + le16_to_cpu(ticket_node->ticket->action)); + kfree_skb(packet->skb); + } + + kfree(packet); + iwm_rx_ticket_node_free(ticket_node); +} + +/* + * Rx data processing: + * + * We're receiving Rx packet from the LMAC, and Rx ticket from + * the UMAC. + * To forward a target data packet upstream (i.e. to the + * kernel network stack), we must have received an Rx ticket + * that tells us we're allowed to release this packet (ticket + * action is IWM_RX_TICKET_RELEASE). The Rx ticket also indicates, + * among other things, where valid data actually starts in the Rx + * packet. + */ +void iwm_rx_worker(struct work_struct *work) +{ + struct iwm_priv *iwm; + struct iwm_rx_ticket_node *ticket, *next; + + iwm = container_of(work, struct iwm_priv, rx_worker); + + /* + * We go through the tickets list and if there is a pending + * packet for it, we push it upstream. + * We stop whenever a ticket is missing its packet, as we're + * supposed to send the packets in order. + */ + list_for_each_entry_safe(ticket, next, &iwm->rx_tickets, node) { + struct iwm_rx_packet *packet = + iwm_rx_packet_get(iwm, le16_to_cpu(ticket->ticket->id)); + + if (!packet) { + IWM_DBG_RX(iwm, DBG, "Skip rx_work: Wait for ticket %d " + "to be handled first\n", + le16_to_cpu(ticket->ticket->id)); + return; + } + + list_del(&ticket->node); + list_del(&packet->node); + iwm_rx_process_packet(iwm, packet, ticket); + } +} + diff --git a/drivers/net/wireless/iwmc3200wifi/rx.h b/drivers/net/wireless/iwmc3200wifi/rx.h new file mode 100644 index 0000000..da0db91 --- /dev/null +++ b/drivers/net/wireless/iwmc3200wifi/rx.h @@ -0,0 +1,60 @@ +/* + * Intel Wireless Multicomm 3200 WiFi driver + * + * Copyright (C) 2009 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Intel Corporation + * Samuel Ortiz + * Zhu Yi + * + */ + +#ifndef __IWM_RX_H__ +#define __IWM_RX_H__ + +#include + +#include "umac.h" + +struct iwm_rx_ticket_node { + struct list_head node; + struct iwm_rx_ticket *ticket; +}; + +struct iwm_rx_packet { + struct list_head node; + u16 id; + struct sk_buff *skb; + unsigned long pkt_size; +}; + +void iwm_rx_worker(struct work_struct *work); + +#endif diff --git a/drivers/net/wireless/iwmc3200wifi/sdio.c b/drivers/net/wireless/iwmc3200wifi/sdio.c new file mode 100644 index 0000000..a7ec7ea --- /dev/null +++ b/drivers/net/wireless/iwmc3200wifi/sdio.c @@ -0,0 +1,528 @@ +/* + * Intel Wireless Multicomm 3200 WiFi driver + * + * Copyright (C) 2009 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Intel Corporation + * Samuel Ortiz + * Zhu Yi + * + */ + +/* + * This is the SDIO bus specific hooks for iwm. + * It also is the module's entry point. + * + * Interesting code paths: + * iwm_sdio_probe() (Called by an SDIO bus scan) + * -> iwm_if_alloc() (netdev.c) + * -> iwm_wdev_alloc() (cfg80211.c, allocates and register our wiphy) + * -> wiphy_new() + * -> wiphy_register() + * -> alloc_netdev_mq() + * -> register_netdev() + * + * iwm_sdio_remove() + * -> iwm_if_free() (netdev.c) + * -> unregister_netdev() + * -> iwm_wdev_free() (cfg80211.c) + * -> wiphy_unregister() + * -> wiphy_free() + * + * iwm_sdio_isr() (called in process context from the SDIO core code) + * -> queue_work(.., isr_worker) + * -- [async] --> iwm_sdio_isr_worker() + * -> iwm_rx_handle() + */ + +#include +#include +#include +#include +#include +#include + +#include "iwm.h" +#include "debug.h" +#include "bus.h" +#include "sdio.h" + +static void iwm_sdio_isr_worker(struct work_struct *work) +{ + struct iwm_sdio_priv *hw; + struct iwm_priv *iwm; + struct iwm_rx_info *rx_info; + struct sk_buff *skb; + u8 *rx_buf; + unsigned long rx_size; + + hw = container_of(work, struct iwm_sdio_priv, isr_worker); + iwm = hw_to_iwm(hw); + + while (!skb_queue_empty(&iwm->rx_list)) { + skb = skb_dequeue(&iwm->rx_list); + rx_info = skb_to_rx_info(skb); + rx_size = rx_info->rx_size; + rx_buf = skb->data; + + IWM_HEXDUMP(iwm, DBG, SDIO, "RX: ", rx_buf, rx_size); + if (iwm_rx_handle(iwm, rx_buf, rx_size) < 0) + IWM_WARN(iwm, "RX error\n"); + + kfree_skb(skb); + } +} + +static void iwm_sdio_isr(struct sdio_func *func) +{ + struct iwm_priv *iwm; + struct iwm_sdio_priv *hw; + struct iwm_rx_info *rx_info; + struct sk_buff *skb; + unsigned long buf_size, read_size; + int ret; + u8 val; + + hw = sdio_get_drvdata(func); + iwm = hw_to_iwm(hw); + + buf_size = hw->blk_size; + + /* We're checking the status */ + val = sdio_readb(func, IWM_SDIO_INTR_STATUS_ADDR, &ret); + if (val == 0 || ret < 0) { + IWM_ERR(iwm, "Wrong INTR_STATUS\n"); + return; + } + + /* See if we have free buffers */ + if (skb_queue_len(&iwm->rx_list) > IWM_RX_LIST_SIZE) { + IWM_ERR(iwm, "No buffer for more Rx frames\n"); + return; + } + + /* We first read the transaction size */ + read_size = sdio_readb(func, IWM_SDIO_INTR_GET_SIZE_ADDR + 1, &ret); + read_size = read_size << 8; + + if (ret < 0) { + IWM_ERR(iwm, "Couldn't read the xfer size\n"); + return; + } + + /* We need to clear the INT register */ + sdio_writeb(func, 1, IWM_SDIO_INTR_CLEAR_ADDR, &ret); + if (ret < 0) { + IWM_ERR(iwm, "Couldn't clear the INT register\n"); + return; + } + + while (buf_size < read_size) + buf_size <<= 1; + + skb = dev_alloc_skb(buf_size); + if (!skb) { + IWM_ERR(iwm, "Couldn't alloc RX skb\n"); + return; + } + rx_info = skb_to_rx_info(skb); + rx_info->rx_size = read_size; + rx_info->rx_buf_size = buf_size; + + /* Now we can read the actual buffer */ + ret = sdio_memcpy_fromio(func, skb_put(skb, read_size), + IWM_SDIO_DATA_ADDR, read_size); + + /* The skb is put on a driver's specific Rx SKB list */ + skb_queue_tail(&iwm->rx_list, skb); + + /* We can now schedule the actual worker */ + queue_work(hw->isr_wq, &hw->isr_worker); +} + +static void iwm_sdio_rx_free(struct iwm_sdio_priv *hw) +{ + struct iwm_priv *iwm = hw_to_iwm(hw); + + flush_workqueue(hw->isr_wq); + + skb_queue_purge(&iwm->rx_list); +} + +/* Bus ops */ +static int if_sdio_enable(struct iwm_priv *iwm) +{ + struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm); + int ret; + + sdio_claim_host(hw->func); + + ret = sdio_enable_func(hw->func); + if (ret) { + IWM_ERR(iwm, "Couldn't enable the device: is TOP driver " + "loaded and functional?\n"); + goto release_host; + } + + iwm_reset(iwm); + + ret = sdio_claim_irq(hw->func, iwm_sdio_isr); + if (ret) { + IWM_ERR(iwm, "Failed to claim irq: %d\n", ret); + goto release_host; + } + + sdio_writeb(hw->func, 1, IWM_SDIO_INTR_ENABLE_ADDR, &ret); + if (ret < 0) { + IWM_ERR(iwm, "Couldn't enable INTR: %d\n", ret); + goto release_irq; + } + + sdio_release_host(hw->func); + + IWM_DBG_SDIO(iwm, INFO, "IWM SDIO enable\n"); + + return 0; + + release_irq: + sdio_release_irq(hw->func); + release_host: + sdio_release_host(hw->func); + + return ret; +} + +static int if_sdio_disable(struct iwm_priv *iwm) +{ + struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm); + int ret; + + sdio_claim_host(hw->func); + sdio_writeb(hw->func, 0, IWM_SDIO_INTR_ENABLE_ADDR, &ret); + if (ret < 0) + IWM_WARN(iwm, "Couldn't disable INTR: %d\n", ret); + + sdio_release_irq(hw->func); + sdio_disable_func(hw->func); + sdio_release_host(hw->func); + + iwm_sdio_rx_free(hw); + + iwm_reset(iwm); + + IWM_DBG_SDIO(iwm, INFO, "IWM SDIO disable\n"); + + return 0; +} + +static int if_sdio_send_chunk(struct iwm_priv *iwm, u8 *buf, int count) +{ + struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm); + int aligned_count = ALIGN(count, hw->blk_size); + int ret; + + if ((unsigned long)buf & 0x3) { + IWM_ERR(iwm, "buf <%p> is not dword aligned\n", buf); + /* TODO: Is this a hardware limitation? use get_unligned */ + return -EINVAL; + } + + sdio_claim_host(hw->func); + ret = sdio_memcpy_toio(hw->func, IWM_SDIO_DATA_ADDR, buf, + aligned_count); + sdio_release_host(hw->func); + + return ret; +} + +/* debugfs hooks */ +static int iwm_debugfs_sdio_open(struct inode *inode, struct file *filp) +{ + filp->private_data = inode->i_private; + return 0; +} + +static ssize_t iwm_debugfs_sdio_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct iwm_priv *iwm = filp->private_data; + struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm); + char *buf; + u8 cccr; + int buf_len = 4096, ret; + size_t len = 0; + + if (*ppos != 0) + return 0; + if (count < sizeof(buf)) + return -ENOSPC; + + buf = kzalloc(buf_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + sdio_claim_host(hw->func); + + cccr = sdio_f0_readb(hw->func, SDIO_CCCR_IOEx, &ret); + if (ret) { + IWM_ERR(iwm, "Could not read SDIO_CCCR_IOEx\n"); + goto err; + } + len += snprintf(buf + len, buf_len - len, "CCCR_IOEx: 0x%x\n", cccr); + + cccr = sdio_f0_readb(hw->func, SDIO_CCCR_IORx, &ret); + if (ret) { + IWM_ERR(iwm, "Could not read SDIO_CCCR_IORx\n"); + goto err; + } + len += snprintf(buf + len, buf_len - len, "CCCR_IORx: 0x%x\n", cccr); + + + cccr = sdio_f0_readb(hw->func, SDIO_CCCR_IENx, &ret); + if (ret) { + IWM_ERR(iwm, "Could not read SDIO_CCCR_IENx\n"); + goto err; + } + len += snprintf(buf + len, buf_len - len, "CCCR_IENx: 0x%x\n", cccr); + + + cccr = sdio_f0_readb(hw->func, SDIO_CCCR_INTx, &ret); + if (ret) { + IWM_ERR(iwm, "Could not read SDIO_CCCR_INTx\n"); + goto err; + } + len += snprintf(buf + len, buf_len - len, "CCCR_INTx: 0x%x\n", cccr); + + + cccr = sdio_f0_readb(hw->func, SDIO_CCCR_ABORT, &ret); + if (ret) { + IWM_ERR(iwm, "Could not read SDIO_CCCR_ABORTx\n"); + goto err; + } + len += snprintf(buf + len, buf_len - len, "CCCR_ABORT: 0x%x\n", cccr); + + cccr = sdio_f0_readb(hw->func, SDIO_CCCR_IF, &ret); + if (ret) { + IWM_ERR(iwm, "Could not read SDIO_CCCR_IF\n"); + goto err; + } + len += snprintf(buf + len, buf_len - len, "CCCR_IF: 0x%x\n", cccr); + + + cccr = sdio_f0_readb(hw->func, SDIO_CCCR_CAPS, &ret); + if (ret) { + IWM_ERR(iwm, "Could not read SDIO_CCCR_CAPS\n"); + goto err; + } + len += snprintf(buf + len, buf_len - len, "CCCR_CAPS: 0x%x\n", cccr); + + cccr = sdio_f0_readb(hw->func, SDIO_CCCR_CIS, &ret); + if (ret) { + IWM_ERR(iwm, "Could not read SDIO_CCCR_CIS\n"); + goto err; + } + len += snprintf(buf + len, buf_len - len, "CCCR_CIS: 0x%x\n", cccr); + + ret = simple_read_from_buffer(buffer, len, ppos, buf, buf_len); +err: + sdio_release_host(hw->func); + + kfree(buf); + + return ret; +} + +static const struct file_operations iwm_debugfs_sdio_fops = { + .owner = THIS_MODULE, + .open = iwm_debugfs_sdio_open, + .read = iwm_debugfs_sdio_read, +}; + +static int if_sdio_debugfs_init(struct iwm_priv *iwm, struct dentry *parent_dir) +{ + int result; + struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm); + + hw->cccr_dentry = debugfs_create_file("cccr", 0200, + parent_dir, iwm, + &iwm_debugfs_sdio_fops); + result = PTR_ERR(hw->cccr_dentry); + if (IS_ERR(hw->cccr_dentry) && (result != -ENODEV)) { + IWM_ERR(iwm, "Couldn't create CCCR entry: %d\n", result); + return result; + } + + return 0; +} + +static void if_sdio_debugfs_exit(struct iwm_priv *iwm) +{ + struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm); + + debugfs_remove(hw->cccr_dentry); +} + +static struct iwm_if_ops if_sdio_ops = { + .enable = if_sdio_enable, + .disable = if_sdio_disable, + .send_chunk = if_sdio_send_chunk, + .debugfs_init = if_sdio_debugfs_init, + .debugfs_exit = if_sdio_debugfs_exit, + .umac_name = "iwmc3200wifi-umac-sdio.bin", + .calib_lmac_name = "iwmc3200wifi-calib-sdio.bin", + .lmac_name = "iwmc3200wifi-lmac-sdio.bin", +}; +MODULE_FIRMWARE("iwmc3200wifi-umac-sdio.bin"); +MODULE_FIRMWARE("iwmc3200wifi-calib-sdio.bin"); +MODULE_FIRMWARE("iwmc3200wifi-lmac-sdio.bin"); + +static int iwm_sdio_probe(struct sdio_func *func, + const struct sdio_device_id *id) +{ + struct iwm_priv *iwm; + struct iwm_sdio_priv *hw; + struct device *dev = &func->dev; + int ret; + + /* check if TOP has already initialized the card */ + sdio_claim_host(func); + ret = sdio_enable_func(func); + if (ret) { + dev_err(dev, "wait for TOP to enable the device\n"); + sdio_release_host(func); + return ret; + } + + ret = sdio_set_block_size(func, IWM_SDIO_BLK_SIZE); + + sdio_disable_func(func); + sdio_release_host(func); + + if (ret < 0) { + dev_err(dev, "Failed to set block size: %d\n", ret); + return ret; + } + + iwm = iwm_if_alloc(sizeof(struct iwm_sdio_priv), dev, &if_sdio_ops); + if (IS_ERR(iwm)) { + dev_err(dev, "allocate SDIO interface failed\n"); + return PTR_ERR(iwm); + } + + hw = iwm_private(iwm); + hw->iwm = iwm; + + ret = iwm_debugfs_init(iwm); + if (ret < 0) { + IWM_ERR(iwm, "Debugfs registration failed\n"); + goto if_free; + } + + sdio_set_drvdata(func, hw); + + hw->func = func; + hw->blk_size = IWM_SDIO_BLK_SIZE; + + hw->isr_wq = create_singlethread_workqueue(KBUILD_MODNAME "_sdio"); + if (!hw->isr_wq) { + ret = -ENOMEM; + goto debugfs_exit; + } + + INIT_WORK(&hw->isr_worker, iwm_sdio_isr_worker); + + ret = iwm_if_add(iwm); + if (ret) { + dev_err(dev, "add SDIO interface failed\n"); + goto destroy_wq; + } + + dev_info(dev, "IWM SDIO probe\n"); + + return 0; + + destroy_wq: + destroy_workqueue(hw->isr_wq); + debugfs_exit: + iwm_debugfs_exit(iwm); + if_free: + iwm_if_free(iwm); + return ret; +} + +static void iwm_sdio_remove(struct sdio_func *func) +{ + struct iwm_sdio_priv *hw = sdio_get_drvdata(func); + struct iwm_priv *iwm = hw_to_iwm(hw); + struct device *dev = &func->dev; + + iwm_if_remove(iwm); + destroy_workqueue(hw->isr_wq); + iwm_debugfs_exit(iwm); + iwm_if_free(iwm); + + sdio_set_drvdata(func, NULL); + + dev_info(dev, "IWM SDIO remove\n"); + + return; +} + +static const struct sdio_device_id iwm_sdio_ids[] = { + /* Global/AGN SKU */ + { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 0x1403) }, + /* BGN SKU */ + { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 0x1408) }, + { /* end: all zeroes */ }, +}; +MODULE_DEVICE_TABLE(sdio, iwm_sdio_ids); + +static struct sdio_driver iwm_sdio_driver = { + .name = "iwm_sdio", + .id_table = iwm_sdio_ids, + .probe = iwm_sdio_probe, + .remove = iwm_sdio_remove, +}; + +static int __init iwm_sdio_init_module(void) +{ + return sdio_register_driver(&iwm_sdio_driver); +} + +static void __exit iwm_sdio_exit_module(void) +{ + sdio_unregister_driver(&iwm_sdio_driver); +} + +module_init(iwm_sdio_init_module); +module_exit(iwm_sdio_exit_module); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR(IWM_COPYRIGHT " " IWM_AUTHOR); diff --git a/drivers/net/wireless/iwmc3200wifi/sdio.h b/drivers/net/wireless/iwmc3200wifi/sdio.h new file mode 100644 index 0000000..aab6b68 --- /dev/null +++ b/drivers/net/wireless/iwmc3200wifi/sdio.h @@ -0,0 +1,64 @@ +/* + * Intel Wireless Multicomm 3200 WiFi driver + * + * Copyright (C) 2009 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Intel Corporation + * Samuel Ortiz + * Zhu Yi + * + */ + +#ifndef __IWM_SDIO_H__ +#define __IWM_SDIO_H__ + +#define IWM_SDIO_DATA_ADDR 0x0 +#define IWM_SDIO_INTR_ENABLE_ADDR 0x14 +#define IWM_SDIO_INTR_STATUS_ADDR 0x13 +#define IWM_SDIO_INTR_CLEAR_ADDR 0x13 +#define IWM_SDIO_INTR_GET_SIZE_ADDR 0x2C + +#define IWM_SDIO_BLK_SIZE 256 + +#define iwm_to_if_sdio(i) (struct iwm_sdio_priv *)(iwm->private) + +struct iwm_sdio_priv { + struct sdio_func *func; + struct iwm_priv *iwm; + + struct workqueue_struct *isr_wq; + struct work_struct isr_worker; + + struct dentry *cccr_dentry; + + unsigned int blk_size; +}; + +#endif diff --git a/drivers/net/wireless/iwmc3200wifi/tx.c b/drivers/net/wireless/iwmc3200wifi/tx.c new file mode 100644 index 0000000..55905f0 --- /dev/null +++ b/drivers/net/wireless/iwmc3200wifi/tx.c @@ -0,0 +1,528 @@ +/* + * Intel Wireless Multicomm 3200 WiFi driver + * + * Copyright (C) 2009 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Intel Corporation + * Samuel Ortiz + * Zhu Yi + * + */ + +/* + * iwm Tx theory of operation: + * + * 1) We receive a 802.3 frame from the stack + * 2) We convert it to a 802.11 frame [iwm_xmit_frame] + * 3) We queue it to its corresponding tx queue [iwm_xmit_frame] + * 4) We schedule the tx worker. There is one worker per tx + * queue. [iwm_xmit_frame] + * 5) The tx worker is scheduled + * 6) We go through every queued skb on the tx queue, and for each + * and every one of them: [iwm_tx_worker] + * a) We check if we have enough Tx credits (see below for a Tx + * credits description) for the frame length. [iwm_tx_worker] + * b) If we do, we aggregate the Tx frame into a UDMA one, by + * concatenating one REPLY_TX command per Tx frame. [iwm_tx_worker] + * c) When we run out of credits, or when we reach the maximum + * concatenation size, we actually send the concatenated UDMA + * frame. [iwm_tx_worker] + * + * When we run out of Tx credits, the skbs are filling the tx queue, + * and eventually we will stop the netdev queue. [iwm_tx_worker] + * The tx queue is emptied as we're getting new tx credits, by + * scheduling the tx_worker. [iwm_tx_credit_inc] + * The netdev queue is started again when we have enough tx credits, + * and when our tx queue has some reasonable amout of space available + * (i.e. half of the max size). [iwm_tx_worker] + */ + +#include +#include +#include + +#include "iwm.h" +#include "debug.h" +#include "commands.h" +#include "hal.h" +#include "umac.h" +#include "bus.h" + +#define IWM_UMAC_PAGE_ALLOC_WRAP 0xffff + +#define BYTES_TO_PAGES(n) (1 + ((n) >> ilog2(IWM_UMAC_PAGE_SIZE)) - \ + (((n) & (IWM_UMAC_PAGE_SIZE - 1)) == 0)) + +#define pool_id_to_queue(id) ((id < IWM_TX_CMD_QUEUE) ? id : id - 1) +#define queue_to_pool_id(q) ((q < IWM_TX_CMD_QUEUE) ? q : q + 1) + +/* require to hold tx_credit lock */ +static int iwm_tx_credit_get(struct iwm_tx_credit *tx_credit, int id) +{ + struct pool_entry *pool = &tx_credit->pools[id]; + struct spool_entry *spool = &tx_credit->spools[pool->sid]; + int spool_pages; + + /* number of pages can be taken from spool by this pool */ + spool_pages = spool->max_pages - spool->alloc_pages + + max(pool->min_pages - pool->alloc_pages, 0); + + return min(pool->max_pages - pool->alloc_pages, spool_pages); +} + +static bool iwm_tx_credit_ok(struct iwm_priv *iwm, int id, int nb) +{ + u32 npages = BYTES_TO_PAGES(nb); + + if (npages <= iwm_tx_credit_get(&iwm->tx_credit, id)) + return 1; + + set_bit(id, &iwm->tx_credit.full_pools_map); + + IWM_DBG_TX(iwm, DBG, "LINK: stop txq[%d], available credit: %d\n", + pool_id_to_queue(id), + iwm_tx_credit_get(&iwm->tx_credit, id)); + + return 0; +} + +void iwm_tx_credit_inc(struct iwm_priv *iwm, int id, int total_freed_pages) +{ + struct pool_entry *pool; + struct spool_entry *spool; + int freed_pages; + int queue; + + BUG_ON(id >= IWM_MACS_OUT_GROUPS); + + pool = &iwm->tx_credit.pools[id]; + spool = &iwm->tx_credit.spools[pool->sid]; + + freed_pages = total_freed_pages - pool->total_freed_pages; + IWM_DBG_TX(iwm, DBG, "Free %d pages for pool[%d]\n", freed_pages, id); + + if (!freed_pages) { + IWM_DBG_TX(iwm, DBG, "No pages are freed by UMAC\n"); + return; + } else if (freed_pages < 0) + freed_pages += IWM_UMAC_PAGE_ALLOC_WRAP + 1; + + if (pool->alloc_pages > pool->min_pages) { + int spool_pages = pool->alloc_pages - pool->min_pages; + spool_pages = min(spool_pages, freed_pages); + spool->alloc_pages -= spool_pages; + } + + pool->alloc_pages -= freed_pages; + pool->total_freed_pages = total_freed_pages; + + IWM_DBG_TX(iwm, DBG, "Pool[%d] pages alloc: %d, total_freed: %d, " + "Spool[%d] pages alloc: %d\n", id, pool->alloc_pages, + pool->total_freed_pages, pool->sid, spool->alloc_pages); + + if (test_bit(id, &iwm->tx_credit.full_pools_map) && + (pool->alloc_pages < pool->max_pages / 2)) { + clear_bit(id, &iwm->tx_credit.full_pools_map); + + queue = pool_id_to_queue(id); + + IWM_DBG_TX(iwm, DBG, "LINK: start txq[%d], available " + "credit: %d\n", queue, + iwm_tx_credit_get(&iwm->tx_credit, id)); + queue_work(iwm->txq[queue].wq, &iwm->txq[queue].worker); + } +} + +static void iwm_tx_credit_dec(struct iwm_priv *iwm, int id, int alloc_pages) +{ + struct pool_entry *pool; + struct spool_entry *spool; + int spool_pages; + + IWM_DBG_TX(iwm, DBG, "Allocate %d pages for pool[%d]\n", + alloc_pages, id); + + BUG_ON(id >= IWM_MACS_OUT_GROUPS); + + pool = &iwm->tx_credit.pools[id]; + spool = &iwm->tx_credit.spools[pool->sid]; + + spool_pages = pool->alloc_pages + alloc_pages - pool->min_pages; + + if (pool->alloc_pages >= pool->min_pages) + spool->alloc_pages += alloc_pages; + else if (spool_pages > 0) + spool->alloc_pages += spool_pages; + + pool->alloc_pages += alloc_pages; + + IWM_DBG_TX(iwm, DBG, "Pool[%d] pages alloc: %d, total_freed: %d, " + "Spool[%d] pages alloc: %d\n", id, pool->alloc_pages, + pool->total_freed_pages, pool->sid, spool->alloc_pages); +} + +int iwm_tx_credit_alloc(struct iwm_priv *iwm, int id, int nb) +{ + u32 npages = BYTES_TO_PAGES(nb); + int ret = 0; + + spin_lock(&iwm->tx_credit.lock); + + if (!iwm_tx_credit_ok(iwm, id, nb)) { + IWM_DBG_TX(iwm, DBG, "No credit avaliable for pool[%d]\n", id); + ret = -ENOSPC; + goto out; + } + + iwm_tx_credit_dec(iwm, id, npages); + + out: + spin_unlock(&iwm->tx_credit.lock); + return ret; +} + +/* + * Since we're on an SDIO or USB bus, we are not sharing memory + * for storing to be transmitted frames. The host needs to push + * them upstream. As a consequence there needs to be a way for + * the target to let us know if it can actually take more TX frames + * or not. This is what Tx credits are for. + * + * For each Tx HW queue, we have a Tx pool, and then we have one + * unique super pool (spool), which is actually a global pool of + * all the UMAC pages. + * For each Tx pool we have a min_pages, a max_pages fields, and a + * alloc_pages fields. The alloc_pages tracks the number of pages + * currently allocated from the tx pool. + * Here are the rules to check if given a tx frame we have enough + * tx credits for it: + * 1) We translate the frame length into a number of UMAC pages. + * Let's call them n_pages. + * 2) For the corresponding tx pool, we check if n_pages + + * pool->alloc_pages is higher than pool->min_pages. min_pages + * represent a set of pre-allocated pages on the tx pool. If + * that's the case, then we need to allocate those pages from + * the spool. We can do so until we reach spool->max_pages. + * 3) Each tx pool is not allowed to allocate more than pool->max_pages + * from the spool, so once we're over min_pages, we can allocate + * pages from the spool, but not more than max_pages. + * + * When the tx code path needs to send a tx frame, it checks first + * if it has enough tx credits, following those rules. [iwm_tx_credit_get] + * If it does, it then updates the pool and spool counters and + * then send the frame. [iwm_tx_credit_alloc and iwm_tx_credit_dec] + * On the other side, when the UMAC is done transmitting frames, it + * will send a credit update notification to the host. This is when + * the pool and spool counters gets to be decreased. [iwm_tx_credit_inc, + * called from rx.c:iwm_ntf_tx_credit_update] + * + */ +void iwm_tx_credit_init_pools(struct iwm_priv *iwm, + struct iwm_umac_notif_alive *alive) +{ + int i, sid, pool_pages; + + spin_lock(&iwm->tx_credit.lock); + + iwm->tx_credit.pool_nr = le16_to_cpu(alive->page_grp_count); + iwm->tx_credit.full_pools_map = 0; + memset(&iwm->tx_credit.spools[0], 0, sizeof(struct spool_entry)); + + IWM_DBG_TX(iwm, DBG, "Pools number is %d\n", iwm->tx_credit.pool_nr); + + for (i = 0; i < iwm->tx_credit.pool_nr; i++) { + __le32 page_grp_state = alive->page_grp_state[i]; + + iwm->tx_credit.pools[i].id = GET_VAL32(page_grp_state, + UMAC_ALIVE_PAGE_STS_GRP_NUM); + iwm->tx_credit.pools[i].sid = GET_VAL32(page_grp_state, + UMAC_ALIVE_PAGE_STS_SGRP_NUM); + iwm->tx_credit.pools[i].min_pages = GET_VAL32(page_grp_state, + UMAC_ALIVE_PAGE_STS_GRP_MIN_SIZE); + iwm->tx_credit.pools[i].max_pages = GET_VAL32(page_grp_state, + UMAC_ALIVE_PAGE_STS_GRP_MAX_SIZE); + iwm->tx_credit.pools[i].alloc_pages = 0; + iwm->tx_credit.pools[i].total_freed_pages = 0; + + sid = iwm->tx_credit.pools[i].sid; + pool_pages = iwm->tx_credit.pools[i].min_pages; + + if (iwm->tx_credit.spools[sid].max_pages == 0) { + iwm->tx_credit.spools[sid].id = sid; + iwm->tx_credit.spools[sid].max_pages = + GET_VAL32(page_grp_state, + UMAC_ALIVE_PAGE_STS_SGRP_MAX_SIZE); + iwm->tx_credit.spools[sid].alloc_pages = 0; + } + + iwm->tx_credit.spools[sid].alloc_pages += pool_pages; + + IWM_DBG_TX(iwm, DBG, "Pool idx: %d, id: %d, sid: %d, capacity " + "min: %d, max: %d, pool alloc: %d, total_free: %d, " + "super poll alloc: %d\n", + i, iwm->tx_credit.pools[i].id, + iwm->tx_credit.pools[i].sid, + iwm->tx_credit.pools[i].min_pages, + iwm->tx_credit.pools[i].max_pages, + iwm->tx_credit.pools[i].alloc_pages, + iwm->tx_credit.pools[i].total_freed_pages, + iwm->tx_credit.spools[sid].alloc_pages); + } + + spin_unlock(&iwm->tx_credit.lock); +} + +#define IWM_UDMA_HDR_LEN sizeof(struct iwm_umac_wifi_out_hdr) + +static int iwm_tx_build_packet(struct iwm_priv *iwm, struct sk_buff *skb, + int pool_id, u8 *buf) +{ + struct iwm_umac_wifi_out_hdr *hdr = (struct iwm_umac_wifi_out_hdr *)buf; + struct iwm_udma_wifi_cmd udma_cmd; + struct iwm_umac_cmd umac_cmd; + struct iwm_tx_info *tx_info = skb_to_tx_info(skb); + + udma_cmd.count = cpu_to_le16(skb->len + + sizeof(struct iwm_umac_fw_cmd_hdr)); + /* set EOP to 0 here. iwm_udma_wifi_hdr_set_eop() will be + * called later to set EOP for the last packet. */ + udma_cmd.eop = 0; + udma_cmd.credit_group = pool_id; + udma_cmd.ra_tid = tx_info->sta << 4 | tx_info->tid; + udma_cmd.lmac_offset = 0; + + umac_cmd.id = REPLY_TX; + umac_cmd.count = cpu_to_le16(skb->len); + umac_cmd.color = tx_info->color; + umac_cmd.resp = 0; + umac_cmd.seq_num = cpu_to_le16(iwm_alloc_wifi_cmd_seq(iwm)); + + iwm_build_udma_wifi_hdr(iwm, &hdr->hw_hdr, &udma_cmd); + iwm_build_umac_hdr(iwm, &hdr->sw_hdr, &umac_cmd); + + memcpy(buf + sizeof(*hdr), skb->data, skb->len); + + return umac_cmd.seq_num; +} + +static int iwm_tx_send_concat_packets(struct iwm_priv *iwm, + struct iwm_tx_queue *txq) +{ + int ret; + + if (!txq->concat_count) + return 0; + + IWM_DBG_TX(iwm, DBG, "Send concatenated Tx: queue %d, %d bytes\n", + txq->id, txq->concat_count); + + /* mark EOP for the last packet */ + iwm_udma_wifi_hdr_set_eop(iwm, txq->concat_ptr, 1); + + ret = iwm_bus_send_chunk(iwm, txq->concat_buf, txq->concat_count); + + txq->concat_count = 0; + txq->concat_ptr = txq->concat_buf; + + return ret; +} + +void iwm_tx_worker(struct work_struct *work) +{ + struct iwm_priv *iwm; + struct iwm_tx_info *tx_info = NULL; + struct sk_buff *skb; + struct iwm_tx_queue *txq; + struct iwm_sta_info *sta_info; + struct iwm_tid_info *tid_info; + int cmdlen, ret, pool_id; + + txq = container_of(work, struct iwm_tx_queue, worker); + iwm = container_of(txq, struct iwm_priv, txq[txq->id]); + + pool_id = queue_to_pool_id(txq->id); + + while (!test_bit(pool_id, &iwm->tx_credit.full_pools_map) && + !skb_queue_empty(&txq->queue)) { + + spin_lock_bh(&txq->lock); + skb = skb_dequeue(&txq->queue); + spin_unlock_bh(&txq->lock); + + tx_info = skb_to_tx_info(skb); + sta_info = &iwm->sta_table[tx_info->sta]; + if (!sta_info->valid) { + IWM_ERR(iwm, "Trying to send a frame to unknown STA\n"); + kfree_skb(skb); + continue; + } + + tid_info = &sta_info->tid_info[tx_info->tid]; + + mutex_lock(&tid_info->mutex); + + /* + * If the RAxTID is stopped, we queue the skb to the stopped + * queue. + * Whenever we'll get a UMAC notification to resume the tx flow + * for this RAxTID, we'll merge back the stopped queue into the + * regular queue. See iwm_ntf_stop_resume_tx() from rx.c. + */ + if (tid_info->stopped) { + IWM_DBG_TX(iwm, DBG, "%dx%d stopped\n", + tx_info->sta, tx_info->tid); + spin_lock_bh(&txq->lock); + skb_queue_tail(&txq->stopped_queue, skb); + spin_unlock_bh(&txq->lock); + + mutex_unlock(&tid_info->mutex); + continue; + } + + cmdlen = IWM_UDMA_HDR_LEN + skb->len; + + IWM_DBG_TX(iwm, DBG, "Tx frame on queue %d: skb: 0x%p, sta: " + "%d, color: %d\n", txq->id, skb, tx_info->sta, + tx_info->color); + + if (txq->concat_count + cmdlen > IWM_HAL_CONCATENATE_BUF_SIZE) + iwm_tx_send_concat_packets(iwm, txq); + + ret = iwm_tx_credit_alloc(iwm, pool_id, cmdlen); + if (ret) { + IWM_DBG_TX(iwm, DBG, "not enough tx_credit for queue " + "%d, Tx worker stopped\n", txq->id); + spin_lock_bh(&txq->lock); + skb_queue_head(&txq->queue, skb); + spin_unlock_bh(&txq->lock); + + mutex_unlock(&tid_info->mutex); + break; + } + + txq->concat_ptr = txq->concat_buf + txq->concat_count; + tid_info->last_seq_num = + iwm_tx_build_packet(iwm, skb, pool_id, txq->concat_ptr); + txq->concat_count += ALIGN(cmdlen, 16); + + mutex_unlock(&tid_info->mutex); + + kfree_skb(skb); + } + + iwm_tx_send_concat_packets(iwm, txq); + + if (__netif_subqueue_stopped(iwm_to_ndev(iwm), txq->id) && + !test_bit(pool_id, &iwm->tx_credit.full_pools_map) && + (skb_queue_len(&txq->queue) < IWM_TX_LIST_SIZE / 2)) { + IWM_DBG_TX(iwm, DBG, "LINK: start netif_subqueue[%d]", txq->id); + netif_wake_subqueue(iwm_to_ndev(iwm), txq->id); + } +} + +int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct iwm_priv *iwm = ndev_to_iwm(netdev); + struct net_device *ndev = iwm_to_ndev(iwm); + struct wireless_dev *wdev = iwm_to_wdev(iwm); + struct iwm_tx_info *tx_info; + struct iwm_tx_queue *txq; + struct iwm_sta_info *sta_info; + u8 *dst_addr, sta_id; + u16 queue; + int ret; + + + if (!test_bit(IWM_STATUS_ASSOCIATED, &iwm->status)) { + IWM_DBG_TX(iwm, DBG, "LINK: stop netif_all_queues: " + "not associated\n"); + netif_tx_stop_all_queues(netdev); + goto drop; + } + + queue = skb_get_queue_mapping(skb); + BUG_ON(queue >= IWM_TX_DATA_QUEUES); /* no iPAN yet */ + + txq = &iwm->txq[queue]; + + /* No free space for Tx, tx_worker is too slow */ + if ((skb_queue_len(&txq->queue) > IWM_TX_LIST_SIZE) || + (skb_queue_len(&txq->stopped_queue) > IWM_TX_LIST_SIZE)) { + IWM_DBG_TX(iwm, DBG, "LINK: stop netif_subqueue[%d]\n", queue); + netif_stop_subqueue(netdev, queue); + return NETDEV_TX_BUSY; + } + + ret = ieee80211_data_from_8023(skb, netdev->dev_addr, wdev->iftype, + iwm->bssid, 0); + if (ret) { + IWM_ERR(iwm, "build wifi header failed\n"); + goto drop; + } + + dst_addr = ((struct ieee80211_hdr *)(skb->data))->addr1; + + for (sta_id = 0; sta_id < IWM_STA_TABLE_NUM; sta_id++) { + sta_info = &iwm->sta_table[sta_id]; + if (sta_info->valid && + !memcmp(dst_addr, sta_info->addr, ETH_ALEN)) + break; + } + + if (sta_id == IWM_STA_TABLE_NUM) { + IWM_ERR(iwm, "STA %pM not found in sta_table, Tx ignored\n", + dst_addr); + goto drop; + } + + tx_info = skb_to_tx_info(skb); + tx_info->sta = sta_id; + tx_info->color = sta_info->color; + /* UMAC uses TID 8 (vs. 0) for non QoS packets */ + if (sta_info->qos) + tx_info->tid = skb->priority; + else + tx_info->tid = IWM_UMAC_MGMT_TID; + + spin_lock_bh(&iwm->txq[queue].lock); + skb_queue_tail(&iwm->txq[queue].queue, skb); + spin_unlock_bh(&iwm->txq[queue].lock); + + queue_work(iwm->txq[queue].wq, &iwm->txq[queue].worker); + + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += skb->len; + return NETDEV_TX_OK; + + drop: + ndev->stats.tx_dropped++; + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} diff --git a/drivers/net/wireless/iwmc3200wifi/umac.h b/drivers/net/wireless/iwmc3200wifi/umac.h new file mode 100644 index 0000000..7f54a14 --- /dev/null +++ b/drivers/net/wireless/iwmc3200wifi/umac.h @@ -0,0 +1,789 @@ +/* + * Intel Wireless Multicomm 3200 WiFi driver + * + * Copyright (C) 2009 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Intel Corporation + * Samuel Ortiz + * Zhu Yi + * + */ + +#ifndef __IWM_UMAC_H__ +#define __IWM_UMAC_H__ + +struct iwm_udma_in_hdr { + __le32 cmd; + __le32 size; +} __attribute__ ((packed)); + +struct iwm_udma_out_nonwifi_hdr { + __le32 cmd; + __le32 addr; + __le32 op1_sz; + __le32 op2; +} __attribute__ ((packed)); + +struct iwm_udma_out_wifi_hdr { + __le32 cmd; + __le32 meta_data; +} __attribute__ ((packed)); + +/* Sequence numbering */ +#define UMAC_WIFI_SEQ_NUM_BASE 1 +#define UMAC_WIFI_SEQ_NUM_MAX 0x4000 +#define UMAC_NONWIFI_SEQ_NUM_BASE 1 +#define UMAC_NONWIFI_SEQ_NUM_MAX 0x10 + +/* MAC address address */ +#define WICO_MAC_ADDRESS_ADDR 0x604008F8 + +/* RA / TID */ +#define UMAC_HDI_ACT_TBL_IDX_TID_POS 0 +#define UMAC_HDI_ACT_TBL_IDX_TID_SEED 0xF + +#define UMAC_HDI_ACT_TBL_IDX_RA_POS 4 +#define UMAC_HDI_ACT_TBL_IDX_RA_SEED 0xF + +#define UMAC_HDI_ACT_TBL_IDX_RA_UMAC 0xF +#define UMAC_HDI_ACT_TBL_IDX_TID_UMAC 0x9 +#define UMAC_HDI_ACT_TBL_IDX_TID_LMAC 0xA + +#define UMAC_HDI_ACT_TBL_IDX_HOST_CMD \ + ((UMAC_HDI_ACT_TBL_IDX_RA_UMAC << UMAC_HDI_ACT_TBL_IDX_RA_POS) |\ + (UMAC_HDI_ACT_TBL_IDX_TID_UMAC << UMAC_HDI_ACT_TBL_IDX_TID_POS)) +#define UMAC_HDI_ACT_TBL_IDX_UMAC_CMD \ + ((UMAC_HDI_ACT_TBL_IDX_RA_UMAC << UMAC_HDI_ACT_TBL_IDX_RA_POS) |\ + (UMAC_HDI_ACT_TBL_IDX_TID_LMAC << UMAC_HDI_ACT_TBL_IDX_TID_POS)) + +/* STA ID and color */ +#define STA_ID_SEED (0x0f) +#define STA_ID_POS (0) +#define STA_ID_MSK (STA_ID_SEED << STA_ID_POS) + +#define STA_COLOR_SEED (0x7) +#define STA_COLOR_POS (4) +#define STA_COLOR_MSK (STA_COLOR_SEED << STA_COLOR_POS) + +#define STA_ID_N_COLOR_COLOR(id_n_color) \ + (((id_n_color) & STA_COLOR_MSK) >> STA_COLOR_POS) +#define STA_ID_N_COLOR_ID(id_n_color) \ + (((id_n_color) & STA_ID_MSK) >> STA_ID_POS) + +/* iwm_umac_notif_alive.page_grp_state Group number -- bits [3:0] */ +#define UMAC_ALIVE_PAGE_STS_GRP_NUM_POS 0 +#define UMAC_ALIVE_PAGE_STS_GRP_NUM_SEED 0xF + +/* iwm_umac_notif_alive.page_grp_state Super group number -- bits [7:4] */ +#define UMAC_ALIVE_PAGE_STS_SGRP_NUM_POS 4 +#define UMAC_ALIVE_PAGE_STS_SGRP_NUM_SEED 0xF + +/* iwm_umac_notif_alive.page_grp_state Group min size -- bits [15:8] */ +#define UMAC_ALIVE_PAGE_STS_GRP_MIN_SIZE_POS 8 +#define UMAC_ALIVE_PAGE_STS_GRP_MIN_SIZE_SEED 0xFF + +/* iwm_umac_notif_alive.page_grp_state Group max size -- bits [23:16] */ +#define UMAC_ALIVE_PAGE_STS_GRP_MAX_SIZE_POS 16 +#define UMAC_ALIVE_PAGE_STS_GRP_MAX_SIZE_SEED 0xFF + +/* iwm_umac_notif_alive.page_grp_state Super group max size -- bits [31:24] */ +#define UMAC_ALIVE_PAGE_STS_SGRP_MAX_SIZE_POS 24 +#define UMAC_ALIVE_PAGE_STS_SGRP_MAX_SIZE_SEED 0xFF + +/* Barkers */ +#define UMAC_REBOOT_BARKER 0xdeadbeef +#define UMAC_ACK_BARKER 0xfeedbabe +#define UMAC_PAD_TERMINAL 0xadadadad + +/* UMAC JMP address */ +#define UMAC_MU_FW_INST_DATA_12_ADDR 0xBF0000 + +/* iwm_umac_hdi_out_hdr.cmd OP code -- bits [3:0] */ +#define UMAC_HDI_OUT_CMD_OPCODE_POS 0 +#define UMAC_HDI_OUT_CMD_OPCODE_SEED 0xF + +/* iwm_umac_hdi_out_hdr.cmd End-Of-Transfer -- bits [10:10] */ +#define UMAC_HDI_OUT_CMD_EOT_POS 10 +#define UMAC_HDI_OUT_CMD_EOT_SEED 0x1 + +/* iwm_umac_hdi_out_hdr.cmd UTFD only usage -- bits [11:11] */ +#define UMAC_HDI_OUT_CMD_UTFD_ONLY_POS 11 +#define UMAC_HDI_OUT_CMD_UTFD_ONLY_SEED 0x1 + +/* iwm_umac_hdi_out_hdr.cmd Non-WiFi HW sequence number -- bits [12:15] */ +#define UDMA_HDI_OUT_CMD_NON_WIFI_HW_SEQ_NUM_POS 12 +#define UDMA_HDI_OUT_CMD_NON_WIFI_HW_SEQ_NUM_SEED 0xF + +/* iwm_umac_hdi_out_hdr.cmd Signature -- bits [31:16] */ +#define UMAC_HDI_OUT_CMD_SIGNATURE_POS 16 +#define UMAC_HDI_OUT_CMD_SIGNATURE_SEED 0xFFFF + +/* iwm_umac_hdi_out_hdr.meta_data Byte count -- bits [11:0] */ +#define UMAC_HDI_OUT_BYTE_COUNT_POS 0 +#define UMAC_HDI_OUT_BYTE_COUNT_SEED 0xFFF + +/* iwm_umac_hdi_out_hdr.meta_data Credit group -- bits [15:12] */ +#define UMAC_HDI_OUT_CREDIT_GRP_POS 12 +#define UMAC_HDI_OUT_CREDIT_GRP_SEED 0xF + +/* iwm_umac_hdi_out_hdr.meta_data RA/TID -- bits [23:16] */ +#define UMAC_HDI_OUT_RATID_POS 16 +#define UMAC_HDI_OUT_RATID_SEED 0xFF + +/* iwm_umac_hdi_out_hdr.meta_data LMAC offset -- bits [31:24] */ +#define UMAC_HDI_OUT_LMAC_OFFSET_POS 24 +#define UMAC_HDI_OUT_LMAC_OFFSET_SEED 0xFF + +/* Signature */ +#define UMAC_HDI_OUT_SIGNATURE 0xCBBC + +/* buffer alignment */ +#define UMAC_HDI_BUF_ALIGN_MSK 0xF + +/* iwm_umac_hdi_in_hdr.cmd OP code -- bits [3:0] */ +#define UMAC_HDI_IN_CMD_OPCODE_POS 0 +#define UMAC_HDI_IN_CMD_OPCODE_SEED 0xF + +/* iwm_umac_hdi_in_hdr.cmd Non-WiFi API response -- bits [6:4] */ +#define UMAC_HDI_IN_CMD_NON_WIFI_RESP_POS 4 +#define UMAC_HDI_IN_CMD_NON_WIFI_RESP_SEED 0x7 + +/* iwm_umac_hdi_in_hdr.cmd WiFi API source -- bits [5:4] */ +#define UMAC_HDI_IN_CMD_SOURCE_POS 4 +#define UMAC_HDI_IN_CMD_SOURCE_SEED 0x3 + +/* iwm_umac_hdi_in_hdr.cmd WiFi API EOT -- bits [6:6] */ +#define UMAC_HDI_IN_CMD_EOT_POS 6 +#define UMAC_HDI_IN_CMD_EOT_SEED 0x1 + +/* iwm_umac_hdi_in_hdr.cmd timestamp present -- bits [7:7] */ +#define UMAC_HDI_IN_CMD_TIME_STAMP_PRESENT_POS 7 +#define UMAC_HDI_IN_CMD_TIME_STAMP_PRESENT_SEED 0x1 + +/* iwm_umac_hdi_in_hdr.cmd WiFi Non-last AMSDU -- bits [8:8] */ +#define UMAC_HDI_IN_CMD_NON_LAST_AMSDU_POS 8 +#define UMAC_HDI_IN_CMD_NON_LAST_AMSDU_SEED 0x1 + +/* iwm_umac_hdi_in_hdr.cmd WiFi HW sequence number -- bits [31:9] */ +#define UMAC_HDI_IN_CMD_HW_SEQ_NUM_POS 9 +#define UMAC_HDI_IN_CMD_HW_SEQ_NUM_SEED 0x7FFFFF + +/* iwm_umac_hdi_in_hdr.cmd Non-WiFi HW sequence number -- bits [12:15] */ +#define UDMA_HDI_IN_CMD_NON_WIFI_HW_SEQ_NUM_POS 12 +#define UDMA_HDI_IN_CMD_NON_WIFI_HW_SEQ_NUM_SEED 0xF + +/* iwm_umac_hdi_in_hdr.cmd Non-WiFi HW signature -- bits [16:31] */ +#define UDMA_HDI_IN_CMD_NON_WIFI_HW_SIG_POS 16 +#define UDMA_HDI_IN_CMD_NON_WIFI_HW_SIG_SEED 0xFFFF + +/* Fixed Non-WiFi signature */ +#define UDMA_HDI_IN_CMD_NON_WIFI_HW_SIG 0xCBBC + +/* IN NTFY op-codes */ +#define UMAC_NOTIFY_OPCODE_ALIVE 0xA1 +#define UMAC_NOTIFY_OPCODE_INIT_COMPLETE 0xA2 +#define UMAC_NOTIFY_OPCODE_WIFI_CORE_STATUS 0xA3 +#define UMAC_NOTIFY_OPCODE_ERROR 0xA4 +#define UMAC_NOTIFY_OPCODE_DEBUG 0xA5 +#define UMAC_NOTIFY_OPCODE_WIFI_IF_WRAPPER 0xB0 +#define UMAC_NOTIFY_OPCODE_STATS 0xB1 +#define UMAC_NOTIFY_OPCODE_PAGE_DEALLOC 0xB3 +#define UMAC_NOTIFY_OPCODE_RX_TICKET 0xB4 +#define UMAC_NOTIFY_OPCODE_MAX (UMAC_NOTIFY_OPCODE_RX_TICKET -\ + UMAC_NOTIFY_OPCODE_ALIVE + 1) +#define UMAC_NOTIFY_OPCODE_FIRST (UMAC_NOTIFY_OPCODE_ALIVE) + +/* HDI OUT OP CODE */ +#define UMAC_HDI_OUT_OPCODE_PING 0x0 +#define UMAC_HDI_OUT_OPCODE_READ 0x1 +#define UMAC_HDI_OUT_OPCODE_WRITE 0x2 +#define UMAC_HDI_OUT_OPCODE_JUMP 0x3 +#define UMAC_HDI_OUT_OPCODE_REBOOT 0x4 +#define UMAC_HDI_OUT_OPCODE_WRITE_PERSISTENT 0x5 +#define UMAC_HDI_OUT_OPCODE_READ_PERSISTENT 0x6 +#define UMAC_HDI_OUT_OPCODE_READ_MODIFY_WRITE 0x7 +/* #define UMAC_HDI_OUT_OPCODE_RESERVED 0x8..0xA */ +#define UMAC_HDI_OUT_OPCODE_WRITE_AUX_REG 0xB +#define UMAC_HDI_OUT_OPCODE_WIFI 0xF + +/* HDI IN OP CODE -- Non WiFi*/ +#define UMAC_HDI_IN_OPCODE_PING 0x0 +#define UMAC_HDI_IN_OPCODE_READ 0x1 +#define UMAC_HDI_IN_OPCODE_WRITE 0x2 +#define UMAC_HDI_IN_OPCODE_WRITE_PERSISTENT 0x5 +#define UMAC_HDI_IN_OPCODE_READ_PERSISTENT 0x6 +#define UMAC_HDI_IN_OPCODE_READ_MODIFY_WRITE 0x7 +#define UMAC_HDI_IN_OPCODE_EP_MGMT 0x8 +#define UMAC_HDI_IN_OPCODE_CREDIT_CHANGE 0x9 +#define UMAC_HDI_IN_OPCODE_CTRL_DATABASE 0xA +#define UMAC_HDI_IN_OPCODE_WRITE_AUX_REG 0xB +#define UMAC_HDI_IN_OPCODE_NONWIFI_MAX \ + (UMAC_HDI_IN_OPCODE_WRITE_AUX_REG + 1) +#define UMAC_HDI_IN_OPCODE_WIFI 0xF + +/* HDI IN SOURCE */ +#define UMAC_HDI_IN_SOURCE_FHRX 0x0 +#define UMAC_HDI_IN_SOURCE_UDMA 0x1 +#define UMAC_HDI_IN_SOURCE_FW 0x2 +#define UMAC_HDI_IN_SOURCE_RESERVED 0x3 + +/* OUT CMD op-codes */ +#define UMAC_CMD_OPCODE_ECHO 0x01 +#define UMAC_CMD_OPCODE_HALT 0x02 +#define UMAC_CMD_OPCODE_RESET 0x03 +#define UMAC_CMD_OPCODE_BULK_EP_INACT_TIMEOUT 0x09 +#define UMAC_CMD_OPCODE_URB_CANCEL_ACK 0x0A +#define UMAC_CMD_OPCODE_DCACHE_FLUSH 0x0B +#define UMAC_CMD_OPCODE_EEPROM_PROXY 0x0C +#define UMAC_CMD_OPCODE_TX_ECHO 0x0D +#define UMAC_CMD_OPCODE_DBG_MON 0x0E +#define UMAC_CMD_OPCODE_INTERNAL_TX 0x0F +#define UMAC_CMD_OPCODE_SET_PARAM_FIX 0x10 +#define UMAC_CMD_OPCODE_SET_PARAM_VAR 0x11 +#define UMAC_CMD_OPCODE_GET_PARAM 0x12 +#define UMAC_CMD_OPCODE_DBG_EVENT_WRAPPER 0x13 +#define UMAC_CMD_OPCODE_TARGET 0x14 +#define UMAC_CMD_OPCODE_STATISTIC_REQUEST 0x15 +#define UMAC_CMD_OPCODE_GET_CHAN_INFO_LIST 0x16 +#define UMAC_CMD_OPCODE_SET_PARAM_LIST 0x17 +#define UMAC_CMD_OPCODE_GET_PARAM_LIST 0x18 +#define UMAC_CMD_OPCODE_STOP_RESUME_STA_TX 0x19 +#define UMAC_CMD_OPCODE_TEST_BLOCK_ACK 0x1A + +#define UMAC_CMD_OPCODE_BASE_WRAPPER 0xFA +#define UMAC_CMD_OPCODE_LMAC_WRAPPER 0xFB +#define UMAC_CMD_OPCODE_HW_TEST_WRAPPER 0xFC +#define UMAC_CMD_OPCODE_WIFI_IF_WRAPPER 0xFD +#define UMAC_CMD_OPCODE_WIFI_WRAPPER 0xFE +#define UMAC_CMD_OPCODE_WIFI_PASS_THROUGH 0xFF + +/* UMAC WiFi interface op-codes */ +#define UMAC_WIFI_IF_CMD_SET_PROFILE 0x11 +#define UMAC_WIFI_IF_CMD_INVALIDATE_PROFILE 0x12 +#define UMAC_WIFI_IF_CMD_SET_EXCLUDE_LIST 0x13 +#define UMAC_WIFI_IF_CMD_SCAN_REQUEST 0x14 +#define UMAC_WIFI_IF_CMD_SCAN_CONFIG 0x15 +#define UMAC_WIFI_IF_CMD_ADD_WEP40_KEY 0x16 +#define UMAC_WIFI_IF_CMD_ADD_WEP104_KEY 0x17 +#define UMAC_WIFI_IF_CMD_ADD_TKIP_KEY 0x18 +#define UMAC_WIFI_IF_CMD_ADD_CCMP_KEY 0x19 +#define UMAC_WIFI_IF_CMD_REMOVE_KEY 0x1A +#define UMAC_WIFI_IF_CMD_GLOBAL_TX_KEY_ID 0x1B +#define UMAC_WIFI_IF_CMD_SET_HOST_EXTENDED_IE 0x1C +#define UMAC_WIFI_IF_CMD_GET_SUPPORTED_CHANNELS 0x1E +#define UMAC_WIFI_IF_CMD_PMKID_UPDATE 0x1F +#define UMAC_WIFI_IF_CMD_TX_PWR_TRIGGER 0x20 + +/* UMAC WiFi interface ports */ +#define UMAC_WIFI_IF_FLG_PORT_DEF 0x00 +#define UMAC_WIFI_IF_FLG_PORT_PAN 0x01 +#define UMAC_WIFI_IF_FLG_PORT_PAN_INVALID WIFI_IF_FLG_PORT_DEF + +/* UMAC WiFi interface actions */ +#define UMAC_WIFI_IF_FLG_ACT_GET 0x10 +#define UMAC_WIFI_IF_FLG_ACT_SET 0x20 + +/* iwm_umac_fw_cmd_hdr.meta_data byte count -- bits [11:0] */ +#define UMAC_FW_CMD_BYTE_COUNT_POS 0 +#define UMAC_FW_CMD_BYTE_COUNT_SEED 0xFFF + +/* iwm_umac_fw_cmd_hdr.meta_data status -- bits [15:12] */ +#define UMAC_FW_CMD_STATUS_POS 12 +#define UMAC_FW_CMD_STATUS_SEED 0xF + +/* iwm_umac_fw_cmd_hdr.meta_data full TX command by Driver -- bits [16:16] */ +#define UMAC_FW_CMD_TX_DRV_FULL_CMD_POS 16 +#define UMAC_FW_CMD_TX_DRV_FULL_CMD_SEED 0x1 + +/* iwm_umac_fw_cmd_hdr.meta_data TX command by FW -- bits [17:17] */ +#define UMAC_FW_CMD_TX_FW_CMD_POS 17 +#define UMAC_FW_CMD_TX_FW_CMD_SEED 0x1 + +/* iwm_umac_fw_cmd_hdr.meta_data TX plaintext mode -- bits [18:18] */ +#define UMAC_FW_CMD_TX_PLAINTEXT_POS 18 +#define UMAC_FW_CMD_TX_PLAINTEXT_SEED 0x1 + +/* iwm_umac_fw_cmd_hdr.meta_data STA color -- bits [22:20] */ +#define UMAC_FW_CMD_TX_STA_COLOR_POS 20 +#define UMAC_FW_CMD_TX_STA_COLOR_SEED 0x7 + +/* iwm_umac_fw_cmd_hdr.meta_data TX life time (TU) -- bits [31:24] */ +#define UMAC_FW_CMD_TX_LIFETIME_TU_POS 24 +#define UMAC_FW_CMD_TX_LIFETIME_TU_SEED 0xFF + +/* iwm_dev_cmd_hdr.flags Response required -- bits [5:5] */ +#define UMAC_DEV_CMD_FLAGS_RESP_REQ_POS 5 +#define UMAC_DEV_CMD_FLAGS_RESP_REQ_SEED 0x1 + +/* iwm_dev_cmd_hdr.flags Aborted command -- bits [6:6] */ +#define UMAC_DEV_CMD_FLAGS_ABORT_POS 6 +#define UMAC_DEV_CMD_FLAGS_ABORT_SEED 0x1 + +/* iwm_dev_cmd_hdr.flags Internal command -- bits [7:7] */ +#define DEV_CMD_FLAGS_FLD_INTERNAL_POS 7 +#define DEV_CMD_FLAGS_FLD_INTERNAL_SEED 0x1 + +/* Rx */ +/* Rx actions */ +#define IWM_RX_TICKET_DROP 0x0 +#define IWM_RX_TICKET_RELEASE 0x1 +#define IWM_RX_TICKET_SNIFFER 0x2 +#define IWM_RX_TICKET_ENQUEUE 0x3 + +/* Rx flags */ +#define IWM_RX_TICKET_PAD_SIZE_MSK 0x2 +#define IWM_RX_TICKET_SPECIAL_SNAP_MSK 0x4 +#define IWM_RX_TICKET_AMSDU_MSK 0x8 +#define IWM_RX_TICKET_DROP_REASON_POS 4 +#define IWM_RX_TICKET_DROP_REASON_MSK (0x1F << RX_TICKET_FLAGS_DROP_REASON_POS) + +#define IWM_RX_DROP_NO_DROP 0x0 +#define IWM_RX_DROP_BAD_CRC 0x1 +/* L2P no address match */ +#define IWM_RX_DROP_LMAC_ADDR_FILTER 0x2 +/* Multicast address not in list */ +#define IWM_RX_DROP_MCAST_ADDR_FILTER 0x3 +/* Control frames are not sent to the driver */ +#define IWM_RX_DROP_CTL_FRAME 0x4 +/* Our frame is back */ +#define IWM_RX_DROP_OUR_TX 0x5 +/* Association class filtering */ +#define IWM_RX_DROP_CLASS_FILTER 0x6 +/* Duplicated frame */ +#define IWM_RX_DROP_DUPLICATE_FILTER 0x7 +/* Decryption error */ +#define IWM_RX_DROP_SEC_ERR 0x8 +/* Unencrypted frame while encryption is on */ +#define IWM_RX_DROP_SEC_NO_ENCRYPTION 0x9 +/* Replay check failure */ +#define IWM_RX_DROP_SEC_REPLAY_ERR 0xa +/* uCode and FW key color mismatch, check before replay */ +#define IWM_RX_DROP_SEC_KEY_COLOR_MISMATCH 0xb +#define IWM_RX_DROP_SEC_TKIP_COUNTER_MEASURE 0xc +/* No fragmentations Db is found */ +#define IWM_RX_DROP_FRAG_NO_RESOURCE 0xd +/* Fragmention Db has seqCtl mismatch Vs. non-1st frag */ +#define IWM_RX_DROP_FRAG_ERR 0xe +#define IWM_RX_DROP_FRAG_LOST 0xf +#define IWM_RX_DROP_FRAG_COMPLETE 0x10 +/* Should be handled by UMAC */ +#define IWM_RX_DROP_MANAGEMENT 0x11 +/* STA not found by UMAC */ +#define IWM_RX_DROP_NO_STATION 0x12 +/* NULL or QoS NULL */ +#define IWM_RX_DROP_NULL_DATA 0x13 +#define IWM_RX_DROP_BA_REORDER_OLD_SEQCTL 0x14 +#define IWM_RX_DROP_BA_REORDER_DUPLICATE 0x15 + +struct iwm_rx_ticket { + __le16 action; + __le16 id; + __le16 flags; + u8 payload_offset; /* includes: MAC header, pad, IV */ + u8 tail_len; /* includes: MIC, ICV, CRC (w/o STATUS) */ +} __attribute__ ((packed)); + +struct iwm_rx_mpdu_hdr { + __le16 len; + __le16 reserved; +} __attribute__ ((packed)); + +/* UMAC SW WIFI API */ + +struct iwm_dev_cmd_hdr { + u8 cmd; + u8 flags; + __le16 seq_num; +} __attribute__ ((packed)); + +struct iwm_umac_fw_cmd_hdr { + __le32 meta_data; + struct iwm_dev_cmd_hdr cmd; +} __attribute__ ((packed)); + +struct iwm_umac_wifi_out_hdr { + struct iwm_udma_out_wifi_hdr hw_hdr; + struct iwm_umac_fw_cmd_hdr sw_hdr; +} __attribute__ ((packed)); + +struct iwm_umac_nonwifi_out_hdr { + struct iwm_udma_out_nonwifi_hdr hw_hdr; +} __attribute__ ((packed)); + +struct iwm_umac_wifi_in_hdr { + struct iwm_udma_in_hdr hw_hdr; + struct iwm_umac_fw_cmd_hdr sw_hdr; +} __attribute__ ((packed)); + +struct iwm_umac_nonwifi_in_hdr { + struct iwm_udma_in_hdr hw_hdr; + __le32 time_stamp; +} __attribute__ ((packed)); + +#define IWM_UMAC_PAGE_SIZE 0x200 + +/* Notify structures */ +struct iwm_fw_version { + u8 minor; + u8 major; + __le16 id; +}; + +struct iwm_fw_build { + u8 type; + u8 subtype; + u8 platform; + u8 opt; +}; + +struct iwm_fw_alive_hdr { + struct iwm_fw_version ver; + struct iwm_fw_build build; + __le32 os_build; + __le32 log_hdr_addr; + __le32 log_buf_addr; + __le32 sys_timer_addr; +}; + +#define WAIT_NOTIF_TIMEOUT (2 * HZ) +#define SCAN_COMPLETE_TIMEOUT (3 * HZ) + +#define UMAC_NTFY_ALIVE_STATUS_ERR 0xDEAD +#define UMAC_NTFY_ALIVE_STATUS_OK 0xCAFE + +#define UMAC_NTFY_INIT_COMPLETE_STATUS_ERR 0xDEAD +#define UMAC_NTFY_INIT_COMPLETE_STATUS_OK 0xCAFE + +#define UMAC_NTFY_WIFI_CORE_STATUS_LINK_EN 0x40 +#define UMAC_NTFY_WIFI_CORE_STATUS_MLME_EN 0x80 + +#define IWM_MACS_OUT_GROUPS 6 +#define IWM_MACS_OUT_SGROUPS 1 + + +#define WIFI_IF_NTFY_ASSOC_START 0x80 +#define WIFI_IF_NTFY_ASSOC_COMPLETE 0x81 +#define WIFI_IF_NTFY_PROFILE_INVALIDATE_COMPLETE 0x82 +#define WIFI_IF_NTFY_CONNECTION_TERMINATED 0x83 +#define WIFI_IF_NTFY_SCAN_COMPLETE 0x84 +#define WIFI_IF_NTFY_STA_TABLE_CHANGE 0x85 +#define WIFI_IF_NTFY_EXTENDED_IE_REQUIRED 0x86 +#define WIFI_IF_NTFY_RADIO_PREEMPTION 0x87 +#define WIFI_IF_NTFY_BSS_TRK_TABLE_CHANGED 0x88 +#define WIFI_IF_NTFY_BSS_TRK_ENTRIES_REMOVED 0x89 +#define WIFI_IF_NTFY_LINK_QUALITY_STATISTICS 0x8A +#define WIFI_IF_NTFY_MGMT_FRAME 0x8B + +/* DEBUG INDICATIONS */ +#define WIFI_DBG_IF_NTFY_SCAN_SUPER_JOB_START 0xE0 +#define WIFI_DBG_IF_NTFY_SCAN_SUPER_JOB_COMPLETE 0xE1 +#define WIFI_DBG_IF_NTFY_SCAN_CHANNEL_START 0xE2 +#define WIFI_DBG_IF_NTFY_SCAN_CHANNEL_RESULT 0xE3 +#define WIFI_DBG_IF_NTFY_SCAN_MINI_JOB_START 0xE4 +#define WIFI_DBG_IF_NTFY_SCAN_MINI_JOB_COMPLETE 0xE5 +#define WIFI_DBG_IF_NTFY_CNCT_ATC_START 0xE6 +#define WIFI_DBG_IF_NTFY_COEX_NOTIFICATION 0xE7 +#define WIFI_DBG_IF_NTFY_COEX_HANDLE_ENVELOP 0xE8 +#define WIFI_DBG_IF_NTFY_COEX_HANDLE_RELEASE_ENVELOP 0xE9 + +#define WIFI_IF_NTFY_MAX 0xff + +/* Notification structures */ +struct iwm_umac_notif_wifi_if { + struct iwm_umac_wifi_in_hdr hdr; + u8 status; + u8 flags; + __le16 buf_size; +} __attribute__ ((packed)); + +#define UMAC_ROAM_REASON_FIRST_SELECTION 0x1 +#define UMAC_ROAM_REASON_AP_DEAUTH 0x2 +#define UMAC_ROAM_REASON_AP_CONNECT_LOST 0x3 +#define UMAC_ROAM_REASON_RSSI 0x4 +#define UMAC_ROAM_REASON_AP_ASSISTED_ROAM 0x5 +#define UMAC_ROAM_REASON_IBSS_COALESCING 0x6 + +struct iwm_umac_notif_assoc_start { + struct iwm_umac_notif_wifi_if mlme_hdr; + __le32 roam_reason; + u8 bssid[ETH_ALEN]; + u8 reserved[2]; +} __attribute__ ((packed)); + +#define UMAC_ASSOC_COMPLETE_SUCCESS 0x0 +#define UMAC_ASSOC_COMPLETE_FAILURE 0x1 + +struct iwm_umac_notif_assoc_complete { + struct iwm_umac_notif_wifi_if mlme_hdr; + __le32 status; + u8 bssid[ETH_ALEN]; + u8 band; + u8 channel; +} __attribute__ ((packed)); + +#define UMAC_PROFILE_INVALID_ASSOC_TIMEOUT 0x0 +#define UMAC_PROFILE_INVALID_ROAM_TIMEOUT 0x1 +#define UMAC_PROFILE_INVALID_REQUEST 0x2 +#define UMAC_PROFILE_INVALID_RF_PREEMPTED 0x3 + +struct iwm_umac_notif_profile_invalidate { + struct iwm_umac_notif_wifi_if mlme_hdr; + __le32 reason; +} __attribute__ ((packed)); + +#define UMAC_SCAN_RESULT_SUCCESS 0x0 +#define UMAC_SCAN_RESULT_ABORTED 0x1 +#define UMAC_SCAN_RESULT_REJECTED 0x2 +#define UMAC_SCAN_RESULT_FAILED 0x3 + +struct iwm_umac_notif_scan_complete { + struct iwm_umac_notif_wifi_if mlme_hdr; + __le32 type; + __le32 result; + u8 seq_num; +} __attribute__ ((packed)); + +#define UMAC_OPCODE_ADD_MODIFY 0x0 +#define UMAC_OPCODE_REMOVE 0x1 +#define UMAC_OPCODE_CLEAR_ALL 0x2 + +#define UMAC_STA_FLAG_QOS 0x1 + +struct iwm_umac_notif_sta_info { + struct iwm_umac_notif_wifi_if mlme_hdr; + __le32 opcode; + u8 mac_addr[ETH_ALEN]; + u8 sta_id; /* bits 0-3: station ID, bits 4-7: station color */ + u8 flags; +} __attribute__ ((packed)); + +#define UMAC_BAND_2GHZ 0 +#define UMAC_BAND_5GHZ 1 + +#define UMAC_CHANNEL_WIDTH_20MHZ 0 +#define UMAC_CHANNEL_WIDTH_40MHZ 1 + +struct iwm_umac_notif_bss_info { + struct iwm_umac_notif_wifi_if mlme_hdr; + __le32 type; + __le32 timestamp; + __le16 table_idx; + __le16 frame_len; + u8 band; + u8 channel; + s8 rssi; + u8 reserved; + u8 frame_buf[1]; +} __attribute__ ((packed)); + +#define IWM_BSS_REMOVE_INDEX_MSK 0x0fff +#define IWM_BSS_REMOVE_FLAGS_MSK 0xfc00 + +#define IWM_BSS_REMOVE_FLG_AGE 0x1000 +#define IWM_BSS_REMOVE_FLG_TIMEOUT 0x2000 +#define IWM_BSS_REMOVE_FLG_TABLE_FULL 0x4000 + +struct iwm_umac_notif_bss_removed { + struct iwm_umac_notif_wifi_if mlme_hdr; + __le32 count; + __le16 entries[0]; +} __attribute__ ((packed)); + +struct iwm_umac_notif_mgt_frame { + struct iwm_umac_notif_wifi_if mlme_hdr; + __le16 len; + u8 frame[1]; +} __attribute__ ((packed)); + +struct iwm_umac_notif_alive { + struct iwm_umac_wifi_in_hdr hdr; + __le16 status; + __le16 reserved1; + struct iwm_fw_alive_hdr alive_data; + __le16 reserved2; + __le16 page_grp_count; + __le32 page_grp_state[IWM_MACS_OUT_GROUPS]; +} __attribute__ ((packed)); + +struct iwm_umac_notif_init_complete { + struct iwm_umac_wifi_in_hdr hdr; + __le16 status; + __le16 reserved; +} __attribute__ ((packed)); + +/* error categories */ +enum { + UMAC_SYS_ERR_CAT_NONE = 0, + UMAC_SYS_ERR_CAT_BOOT, + UMAC_SYS_ERR_CAT_UMAC, + UMAC_SYS_ERR_CAT_UAXM, + UMAC_SYS_ERR_CAT_LMAC, + UMAC_SYS_ERR_CAT_MAX +}; + +struct iwm_fw_error_hdr { + __le32 category; + __le32 status; + __le32 pc; + __le32 blink1; + __le32 blink2; + __le32 ilink1; + __le32 ilink2; + __le32 data1; + __le32 data2; + __le32 line_num; + __le32 umac_status; + __le32 lmac_status; + __le32 sdio_status; + __le32 dbm_sample_ctrl; + __le32 dbm_buf_base; + __le32 dbm_buf_end; + __le32 dbm_buf_write_ptr; + __le32 dbm_buf_cycle_cnt; +} __attribute__ ((packed)); + +struct iwm_umac_notif_error { + struct iwm_umac_wifi_in_hdr hdr; + struct iwm_fw_error_hdr err; +} __attribute__ ((packed)); + +#define UMAC_DEALLOC_NTFY_CHANGES_CNT_POS 0 +#define UMAC_DEALLOC_NTFY_CHANGES_CNT_SEED 0xff +#define UMAC_DEALLOC_NTFY_CHANGES_MSK_POS 8 +#define UMAC_DEALLOC_NTFY_CHANGES_MSK_SEED 0xffffff +#define UMAC_DEALLOC_NTFY_PAGE_CNT_POS 0 +#define UMAC_DEALLOC_NTFY_PAGE_CNT_SEED 0xffffff +#define UMAC_DEALLOC_NTFY_GROUP_NUM_POS 24 +#define UMAC_DEALLOC_NTFY_GROUP_NUM_SEED 0xf + +struct iwm_umac_notif_page_dealloc { + struct iwm_umac_wifi_in_hdr hdr; + __le32 changes; + __le32 grp_info[IWM_MACS_OUT_GROUPS]; +} __attribute__ ((packed)); + +struct iwm_umac_notif_wifi_status { + struct iwm_umac_wifi_in_hdr hdr; + __le16 status; + __le16 reserved; +} __attribute__ ((packed)); + +struct iwm_umac_notif_rx_ticket { + struct iwm_umac_wifi_in_hdr hdr; + u8 num_tickets; + u8 reserved[3]; + struct iwm_rx_ticket tickets[1]; +} __attribute__ ((packed)); + +/* Tx/Rx rates window (number of max of last update window per second) */ +#define UMAC_NTF_RATE_SAMPLE_NR 4 + +/* Max numbers of bits required to go through all antennae in bitmasks */ +#define UMAC_PHY_NUM_CHAINS 3 + +#define IWM_UMAC_MGMT_TID 8 +#define IWM_UMAC_TID_NR 9 /* 8 TIDs + MGMT */ + +struct iwm_umac_notif_stats { + struct iwm_umac_wifi_in_hdr hdr; + __le32 flags; + __le32 timestamp; + __le16 tid_load[IWM_UMAC_TID_NR + 1]; /* 1 non-QoS + 1 dword align */ + __le16 tx_rate[UMAC_NTF_RATE_SAMPLE_NR]; + __le16 rx_rate[UMAC_NTF_RATE_SAMPLE_NR]; + __le32 chain_energy[UMAC_PHY_NUM_CHAINS]; + s32 rssi_dbm; + s32 noise_dbm; + __le32 supp_rates; + __le32 supp_ht_rates; + __le32 missed_beacons; + __le32 rx_beacons; + __le32 rx_dir_pkts; + __le32 rx_nondir_pkts; + __le32 rx_multicast; + __le32 rx_errors; + __le32 rx_drop_other_bssid; + __le32 rx_drop_decode; + __le32 rx_drop_reassembly; + __le32 rx_drop_bad_len; + __le32 rx_drop_overflow; + __le32 rx_drop_crc; + __le32 rx_drop_missed; + __le32 tx_dir_pkts; + __le32 tx_nondir_pkts; + __le32 tx_failure; + __le32 tx_errors; + __le32 tx_drop_max_retry; + __le32 tx_err_abort; + __le32 tx_err_carrier; + __le32 rx_bytes; + __le32 tx_bytes; + __le32 tx_power; + __le32 tx_max_power; + __le32 roam_threshold; + __le32 ap_assoc_nr; + __le32 scan_full; + __le32 scan_abort; + __le32 ap_nr; + __le32 roam_nr; + __le32 roam_missed_beacons; + __le32 roam_rssi; + __le32 roam_unassoc; + __le32 roam_deauth; + __le32 roam_ap_loadblance; +} __attribute__ ((packed)); + +#define UMAC_STOP_TX_FLAG 0x1 +#define UMAC_RESUME_TX_FLAG 0x2 + +#define LAST_SEQ_NUM_INVALID 0xFFFF + +struct iwm_umac_notif_stop_resume_tx { + struct iwm_umac_wifi_in_hdr hdr; + u8 flags; /* UMAC_*_TX_FLAG_* */ + u8 sta_id; + __le16 stop_resume_tid_msk; /* tid bitmask */ +} __attribute__ ((packed)); + +#define UMAC_MAX_NUM_PMKIDS 4 + +/* WiFi interface wrapper header */ +struct iwm_umac_wifi_if { + u8 oid; + u8 flags; + __le16 buf_size; +} __attribute__ ((packed)); + +#define IWM_SEQ_NUM_HOST_MSK 0x0000 +#define IWM_SEQ_NUM_UMAC_MSK 0x4000 +#define IWM_SEQ_NUM_LMAC_MSK 0x8000 +#define IWM_SEQ_NUM_MSK 0xC000 + +#endif diff --git a/drivers/net/wireless/libertas/Makefile b/drivers/net/wireless/libertas/Makefile new file mode 100644 index 0000000..45e870e --- /dev/null +++ b/drivers/net/wireless/libertas/Makefile @@ -0,0 +1,23 @@ +libertas-y += assoc.o +libertas-y += cfg.o +libertas-y += cmd.o +libertas-y += cmdresp.o +libertas-y += debugfs.o +libertas-y += ethtool.o +libertas-y += main.o +libertas-y += rx.o +libertas-y += scan.o +libertas-y += tx.o +libertas-y += wext.o +libertas-$(CONFIG_LIBERTAS_MESH) += mesh.o + +usb8xxx-objs += if_usb.o +libertas_cs-objs += if_cs.o +libertas_sdio-objs += if_sdio.o +libertas_spi-objs += if_spi.o + +obj-$(CONFIG_LIBERTAS) += libertas.o +obj-$(CONFIG_LIBERTAS_USB) += usb8xxx.o +obj-$(CONFIG_LIBERTAS_CS) += libertas_cs.o +obj-$(CONFIG_LIBERTAS_SDIO) += libertas_sdio.o +obj-$(CONFIG_LIBERTAS_SPI) += libertas_spi.o diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c new file mode 100644 index 0000000..e88b12b --- /dev/null +++ b/drivers/net/wireless/libertas/assoc.c @@ -0,0 +1,2246 @@ +/* Copyright (C) 2006, Red Hat, Inc. */ + +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29)) +#include +#endif + +#include "assoc.h" +#include "decl.h" +#include "host.h" +#include "scan.h" +#include "cmd.h" + +static const u8 bssid_any[ETH_ALEN] __attribute__ ((aligned (2))) = + { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; +static const u8 bssid_off[ETH_ALEN] __attribute__ ((aligned (2))) = + { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; + +/* The firmware needs the following bits masked out of the beacon-derived + * capability field when associating/joining to a BSS: + * 9 (QoS), 11 (APSD), 12 (unused), 14 (unused), 15 (unused) + */ +#define CAPINFO_MASK (~(0xda00)) + +/** + * 802.11b/g supported bitrates (in 500Kb/s units) + */ +u8 lbs_bg_rates[MAX_RATES] = + { 0x02, 0x04, 0x0b, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6c, +0x00, 0x00 }; + + +/** + * @brief This function finds common rates between rates and card rates. + * + * It will fill common rates in rates as output if found. + * + * NOTE: Setting the MSB of the basic rates need to be taken + * care, either before or after calling this function + * + * @param priv A pointer to struct lbs_private structure + * @param rates the buffer which keeps input and output + * @param rates_size the size of rates buffer; new size of buffer on return, + * which will be less than or equal to original rates_size + * + * @return 0 on success, or -1 on error + */ +static int get_common_rates(struct lbs_private *priv, + u8 *rates, + u16 *rates_size) +{ + int i, j; + u8 intersection[MAX_RATES]; + u16 intersection_size; + u16 num_rates = 0; + + intersection_size = min_t(u16, *rates_size, ARRAY_SIZE(intersection)); + + /* Allow each rate from 'rates' that is supported by the hardware */ + for (i = 0; i < ARRAY_SIZE(lbs_bg_rates) && lbs_bg_rates[i]; i++) { + for (j = 0; j < intersection_size && rates[j]; j++) { + if (rates[j] == lbs_bg_rates[i]) + intersection[num_rates++] = rates[j]; + } + } + + lbs_deb_hex(LBS_DEB_JOIN, "AP rates ", rates, *rates_size); + lbs_deb_hex(LBS_DEB_JOIN, "card rates ", lbs_bg_rates, + ARRAY_SIZE(lbs_bg_rates)); + lbs_deb_hex(LBS_DEB_JOIN, "common rates", intersection, num_rates); + lbs_deb_join("TX data rate 0x%02x\n", priv->cur_rate); + + if (!priv->enablehwauto) { + for (i = 0; i < num_rates; i++) { + if (intersection[i] == priv->cur_rate) + goto done; + } + lbs_pr_alert("Previously set fixed data rate %#x isn't " + "compatible with the network.\n", priv->cur_rate); + return -1; + } + +done: + memset(rates, 0, *rates_size); + *rates_size = num_rates; + memcpy(rates, intersection, num_rates); + return 0; +} + + +/** + * @brief Sets the MSB on basic rates as the firmware requires + * + * Scan through an array and set the MSB for basic data rates. + * + * @param rates buffer of data rates + * @param len size of buffer + */ +static void lbs_set_basic_rate_flags(u8 *rates, size_t len) +{ + int i; + + for (i = 0; i < len; i++) { + if (rates[i] == 0x02 || rates[i] == 0x04 || + rates[i] == 0x0b || rates[i] == 0x16) + rates[i] |= 0x80; + } +} + + +static u8 iw_auth_to_ieee_auth(u8 auth) +{ + if (auth == IW_AUTH_ALG_OPEN_SYSTEM) + return 0x00; + else if (auth == IW_AUTH_ALG_SHARED_KEY) + return 0x01; + else if (auth == IW_AUTH_ALG_LEAP) + return 0x80; + + lbs_deb_join("%s: invalid auth alg 0x%X\n", __func__, auth); + return 0; +} + +/** + * @brief This function prepares the authenticate command. AUTHENTICATE only + * sets the authentication suite for future associations, as the firmware + * handles authentication internally during the ASSOCIATE command. + * + * @param priv A pointer to struct lbs_private structure + * @param bssid The peer BSSID with which to authenticate + * @param auth The authentication mode to use (from wireless.h) + * + * @return 0 or -1 + */ +static int lbs_set_authentication(struct lbs_private *priv, u8 bssid[6], u8 auth) +{ + struct cmd_ds_802_11_authenticate cmd; + int ret = -1; + + lbs_deb_enter(LBS_DEB_JOIN); + + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + memcpy(cmd.bssid, bssid, ETH_ALEN); + + cmd.authtype = iw_auth_to_ieee_auth(auth); + + lbs_deb_join("AUTH_CMD: BSSID %pM, auth 0x%x\n", bssid, cmd.authtype); + + ret = lbs_cmd_with_response(priv, CMD_802_11_AUTHENTICATE, &cmd); + + lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret); + return ret; +} + + +int lbs_cmd_802_11_set_wep(struct lbs_private *priv, uint16_t cmd_action, + struct assoc_request *assoc) +{ + struct cmd_ds_802_11_set_wep cmd; + int ret = 0; + + lbs_deb_enter(LBS_DEB_CMD); + + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.command = cpu_to_le16(CMD_802_11_SET_WEP); + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + + cmd.action = cpu_to_le16(cmd_action); + + if (cmd_action == CMD_ACT_ADD) { + int i; + + /* default tx key index */ + cmd.keyindex = cpu_to_le16(assoc->wep_tx_keyidx & + CMD_WEP_KEY_INDEX_MASK); + + /* Copy key types and material to host command structure */ + for (i = 0; i < 4; i++) { + struct enc_key *pkey = &assoc->wep_keys[i]; + + switch (pkey->len) { + case KEY_LEN_WEP_40: + cmd.keytype[i] = CMD_TYPE_WEP_40_BIT; + memmove(cmd.keymaterial[i], pkey->key, pkey->len); + lbs_deb_cmd("SET_WEP: add key %d (40 bit)\n", i); + break; + case KEY_LEN_WEP_104: + cmd.keytype[i] = CMD_TYPE_WEP_104_BIT; + memmove(cmd.keymaterial[i], pkey->key, pkey->len); + lbs_deb_cmd("SET_WEP: add key %d (104 bit)\n", i); + break; + case 0: + break; + default: + lbs_deb_cmd("SET_WEP: invalid key %d, length %d\n", + i, pkey->len); + ret = -1; + goto done; + break; + } + } + } else if (cmd_action == CMD_ACT_REMOVE) { + /* ACT_REMOVE clears _all_ WEP keys */ + + /* default tx key index */ + cmd.keyindex = cpu_to_le16(priv->wep_tx_keyidx & + CMD_WEP_KEY_INDEX_MASK); + lbs_deb_cmd("SET_WEP: remove key %d\n", priv->wep_tx_keyidx); + } + + ret = lbs_cmd_with_response(priv, CMD_802_11_SET_WEP, &cmd); +done: + lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); + return ret; +} + +int lbs_cmd_802_11_enable_rsn(struct lbs_private *priv, uint16_t cmd_action, + uint16_t *enable) +{ + struct cmd_ds_802_11_enable_rsn cmd; + int ret; + + lbs_deb_enter(LBS_DEB_CMD); + + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + cmd.action = cpu_to_le16(cmd_action); + + if (cmd_action == CMD_ACT_GET) + cmd.enable = 0; + else { + if (*enable) + cmd.enable = cpu_to_le16(CMD_ENABLE_RSN); + else + cmd.enable = cpu_to_le16(CMD_DISABLE_RSN); + lbs_deb_cmd("ENABLE_RSN: %d\n", *enable); + } + + ret = lbs_cmd_with_response(priv, CMD_802_11_ENABLE_RSN, &cmd); + if (!ret && cmd_action == CMD_ACT_GET) + *enable = le16_to_cpu(cmd.enable); + + lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); + return ret; +} + +static void set_one_wpa_key(struct MrvlIEtype_keyParamSet *keyparam, + struct enc_key *key) +{ + lbs_deb_enter(LBS_DEB_CMD); + + if (key->flags & KEY_INFO_WPA_ENABLED) + keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_ENABLED); + if (key->flags & KEY_INFO_WPA_UNICAST) + keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_UNICAST); + if (key->flags & KEY_INFO_WPA_MCAST) + keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_MCAST); + + keyparam->type = cpu_to_le16(TLV_TYPE_KEY_MATERIAL); + keyparam->keytypeid = cpu_to_le16(key->type); + keyparam->keylen = cpu_to_le16(key->len); + memcpy(keyparam->key, key->key, key->len); + + /* Length field doesn't include the {type,length} header */ + keyparam->length = cpu_to_le16(sizeof(*keyparam) - 4); + lbs_deb_leave(LBS_DEB_CMD); +} + +int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action, + struct assoc_request *assoc) +{ + struct cmd_ds_802_11_key_material cmd; + int ret = 0; + int index = 0; + + lbs_deb_enter(LBS_DEB_CMD); + + cmd.action = cpu_to_le16(cmd_action); + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + + if (cmd_action == CMD_ACT_GET) { + cmd.hdr.size = cpu_to_le16(sizeof(struct cmd_header) + 2); + } else { + memset(cmd.keyParamSet, 0, sizeof(cmd.keyParamSet)); + + if (test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc->flags)) { + set_one_wpa_key(&cmd.keyParamSet[index], + &assoc->wpa_unicast_key); + index++; + } + + if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc->flags)) { + set_one_wpa_key(&cmd.keyParamSet[index], + &assoc->wpa_mcast_key); + index++; + } + + /* The common header and as many keys as we included */ + cmd.hdr.size = cpu_to_le16(offsetof(typeof(cmd), + keyParamSet[index])); + } + ret = lbs_cmd_with_response(priv, CMD_802_11_KEY_MATERIAL, &cmd); + /* Copy the returned key to driver private data */ + if (!ret && cmd_action == CMD_ACT_GET) { + void *buf_ptr = cmd.keyParamSet; + void *resp_end = &(&cmd)[1]; + + while (buf_ptr < resp_end) { + struct MrvlIEtype_keyParamSet *keyparam = buf_ptr; + struct enc_key *key; + uint16_t param_set_len = le16_to_cpu(keyparam->length); + uint16_t key_len = le16_to_cpu(keyparam->keylen); + uint16_t key_flags = le16_to_cpu(keyparam->keyinfo); + uint16_t key_type = le16_to_cpu(keyparam->keytypeid); + void *end; + + end = (void *)keyparam + sizeof(keyparam->type) + + sizeof(keyparam->length) + param_set_len; + + /* Make sure we don't access past the end of the IEs */ + if (end > resp_end) + break; + + if (key_flags & KEY_INFO_WPA_UNICAST) + key = &priv->wpa_unicast_key; + else if (key_flags & KEY_INFO_WPA_MCAST) + key = &priv->wpa_mcast_key; + else + break; + + /* Copy returned key into driver */ + memset(key, 0, sizeof(struct enc_key)); + if (key_len > sizeof(key->key)) + break; + key->type = key_type; + key->flags = key_flags; + key->len = key_len; + memcpy(key->key, keyparam->key, key->len); + + buf_ptr = end + 1; + } + } + + lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); + return ret; +} + +static __le16 lbs_rate_to_fw_bitmap(int rate, int lower_rates_ok) +{ +/* Bit Rate +* 15:13 Reserved +* 12 54 Mbps +* 11 48 Mbps +* 10 36 Mbps +* 9 24 Mbps +* 8 18 Mbps +* 7 12 Mbps +* 6 9 Mbps +* 5 6 Mbps +* 4 Reserved +* 3 11 Mbps +* 2 5.5 Mbps +* 1 2 Mbps +* 0 1 Mbps +**/ + + uint16_t ratemask; + int i = lbs_data_rate_to_fw_index(rate); + if (lower_rates_ok) + ratemask = (0x1fef >> (12 - i)); + else + ratemask = (1 << i); + return cpu_to_le16(ratemask); +} + +int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv, + uint16_t cmd_action) +{ + struct cmd_ds_802_11_rate_adapt_rateset cmd; + int ret; + + lbs_deb_enter(LBS_DEB_CMD); + + if (!priv->cur_rate && !priv->enablehwauto) + return -EINVAL; + + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + + cmd.action = cpu_to_le16(cmd_action); + cmd.enablehwauto = cpu_to_le16(priv->enablehwauto); + cmd.bitmap = lbs_rate_to_fw_bitmap(priv->cur_rate, priv->enablehwauto); + ret = lbs_cmd_with_response(priv, CMD_802_11_RATE_ADAPT_RATESET, &cmd); + if (!ret && cmd_action == CMD_ACT_GET) + priv->enablehwauto = le16_to_cpu(cmd.enablehwauto); + + lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); + return ret; +} + +/** + * @brief Set the data rate + * + * @param priv A pointer to struct lbs_private structure + * @param rate The desired data rate, or 0 to clear a locked rate + * + * @return 0 on success, error on failure + */ +int lbs_set_data_rate(struct lbs_private *priv, u8 rate) +{ + struct cmd_ds_802_11_data_rate cmd; + int ret = 0; + + lbs_deb_enter(LBS_DEB_CMD); + + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + + if (rate > 0) { + cmd.action = cpu_to_le16(CMD_ACT_SET_TX_FIX_RATE); + cmd.rates[0] = lbs_data_rate_to_fw_index(rate); + if (cmd.rates[0] == 0) { + lbs_deb_cmd("DATA_RATE: invalid requested rate of" + " 0x%02X\n", rate); + ret = 0; + goto out; + } + lbs_deb_cmd("DATA_RATE: set fixed 0x%02X\n", cmd.rates[0]); + } else { + cmd.action = cpu_to_le16(CMD_ACT_SET_TX_AUTO); + lbs_deb_cmd("DATA_RATE: setting auto\n"); + } + + ret = lbs_cmd_with_response(priv, CMD_802_11_DATA_RATE, &cmd); + if (ret) + goto out; + + lbs_deb_hex(LBS_DEB_CMD, "DATA_RATE_RESP", (u8 *) &cmd, sizeof(cmd)); + + /* FIXME: get actual rates FW can do if this command actually returns + * all data rates supported. + */ + priv->cur_rate = lbs_fw_index_to_data_rate(cmd.rates[0]); + lbs_deb_cmd("DATA_RATE: current rate is 0x%02x\n", priv->cur_rate); + +out: + lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); + return ret; +} + + +int lbs_cmd_802_11_rssi(struct lbs_private *priv, + struct cmd_ds_command *cmd) +{ + + lbs_deb_enter(LBS_DEB_CMD); + cmd->command = cpu_to_le16(CMD_802_11_RSSI); + cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_rssi) + + sizeof(struct cmd_header)); + cmd->params.rssi.N = cpu_to_le16(DEFAULT_BCN_AVG_FACTOR); + + /* reset Beacon SNR/NF/RSSI values */ + priv->SNR[TYPE_BEACON][TYPE_NOAVG] = 0; + priv->SNR[TYPE_BEACON][TYPE_AVG] = 0; + priv->NF[TYPE_BEACON][TYPE_NOAVG] = 0; + priv->NF[TYPE_BEACON][TYPE_AVG] = 0; + priv->RSSI[TYPE_BEACON][TYPE_NOAVG] = 0; + priv->RSSI[TYPE_BEACON][TYPE_AVG] = 0; + + lbs_deb_leave(LBS_DEB_CMD); + return 0; +} + +int lbs_ret_802_11_rssi(struct lbs_private *priv, + struct cmd_ds_command *resp) +{ + struct cmd_ds_802_11_rssi_rsp *rssirsp = &resp->params.rssirsp; + + lbs_deb_enter(LBS_DEB_CMD); + + /* store the non average value */ + priv->SNR[TYPE_BEACON][TYPE_NOAVG] = get_unaligned_le16(&rssirsp->SNR); + priv->NF[TYPE_BEACON][TYPE_NOAVG] = + get_unaligned_le16(&rssirsp->noisefloor); + + priv->SNR[TYPE_BEACON][TYPE_AVG] = get_unaligned_le16(&rssirsp->avgSNR); + priv->NF[TYPE_BEACON][TYPE_AVG] = + get_unaligned_le16(&rssirsp->avgnoisefloor); + + priv->RSSI[TYPE_BEACON][TYPE_NOAVG] = + CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_NOAVG], + priv->NF[TYPE_BEACON][TYPE_NOAVG]); + + priv->RSSI[TYPE_BEACON][TYPE_AVG] = + CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_AVG] / AVG_SCALE, + priv->NF[TYPE_BEACON][TYPE_AVG] / AVG_SCALE); + + lbs_deb_cmd("RSSI: beacon %d, avg %d\n", + priv->RSSI[TYPE_BEACON][TYPE_NOAVG], + priv->RSSI[TYPE_BEACON][TYPE_AVG]); + + lbs_deb_leave(LBS_DEB_CMD); + return 0; +} + + +int lbs_cmd_bcn_ctrl(struct lbs_private *priv, + struct cmd_ds_command *cmd, + u16 cmd_action) +{ + struct cmd_ds_802_11_beacon_control + *bcn_ctrl = &cmd->params.bcn_ctrl; + + lbs_deb_enter(LBS_DEB_CMD); + cmd->size = + cpu_to_le16(sizeof(struct cmd_ds_802_11_beacon_control) + + sizeof(struct cmd_header)); + cmd->command = cpu_to_le16(CMD_802_11_BEACON_CTRL); + + bcn_ctrl->action = cpu_to_le16(cmd_action); + bcn_ctrl->beacon_enable = cpu_to_le16(priv->beacon_enable); + bcn_ctrl->beacon_period = cpu_to_le16(priv->beacon_period); + + lbs_deb_leave(LBS_DEB_CMD); + return 0; +} + +int lbs_ret_802_11_bcn_ctrl(struct lbs_private *priv, + struct cmd_ds_command *resp) +{ + struct cmd_ds_802_11_beacon_control *bcn_ctrl = + &resp->params.bcn_ctrl; + + lbs_deb_enter(LBS_DEB_CMD); + + if (bcn_ctrl->action == CMD_ACT_GET) { + priv->beacon_enable = (u8) le16_to_cpu(bcn_ctrl->beacon_enable); + priv->beacon_period = le16_to_cpu(bcn_ctrl->beacon_period); + } + + lbs_deb_enter(LBS_DEB_CMD); + return 0; +} + + + +static int lbs_assoc_post(struct lbs_private *priv, + struct cmd_ds_802_11_associate_response *resp) +{ + int ret = 0; + union iwreq_data wrqu; + struct bss_descriptor *bss; + u16 status_code; + + lbs_deb_enter(LBS_DEB_ASSOC); + + if (!priv->in_progress_assoc_req) { + lbs_deb_assoc("ASSOC_RESP: no in-progress assoc request\n"); + ret = -1; + goto done; + } + bss = &priv->in_progress_assoc_req->bss; + + /* + * Older FW versions map the IEEE 802.11 Status Code in the association + * response to the following values returned in resp->statuscode: + * + * IEEE Status Code Marvell Status Code + * 0 -> 0x0000 ASSOC_RESULT_SUCCESS + * 13 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED + * 14 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED + * 15 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED + * 16 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED + * others -> 0x0003 ASSOC_RESULT_REFUSED + * + * Other response codes: + * 0x0001 -> ASSOC_RESULT_INVALID_PARAMETERS (unused) + * 0x0002 -> ASSOC_RESULT_TIMEOUT (internal timer expired waiting for + * association response from the AP) + */ + + status_code = le16_to_cpu(resp->statuscode); + if (priv->fwrelease < 0x09000000) { + switch (status_code) { + case 0x00: + break; + case 0x01: + lbs_deb_assoc("ASSOC_RESP: invalid parameters\n"); + break; + case 0x02: + lbs_deb_assoc("ASSOC_RESP: internal timer " + "expired while waiting for the AP\n"); + break; + case 0x03: + lbs_deb_assoc("ASSOC_RESP: association " + "refused by AP\n"); + break; + case 0x04: + lbs_deb_assoc("ASSOC_RESP: authentication " + "refused by AP\n"); + break; + default: + lbs_deb_assoc("ASSOC_RESP: failure reason 0x%02x " + " unknown\n", status_code); + break; + } + } else { + /* v9+ returns the AP's association response */ + lbs_deb_assoc("ASSOC_RESP: failure reason 0x%02x\n", status_code); + } + + if (status_code) { + lbs_mac_event_disconnected(priv); + ret = -1; + goto done; + } + + lbs_deb_hex(LBS_DEB_ASSOC, "ASSOC_RESP", + (void *) (resp + sizeof (resp->hdr)), + le16_to_cpu(resp->hdr.size) - sizeof (resp->hdr)); + + /* Send a Media Connected event, according to the Spec */ + priv->connect_status = LBS_CONNECTED; + + /* Update current SSID and BSSID */ + memcpy(&priv->curbssparams.ssid, &bss->ssid, IEEE80211_MAX_SSID_LEN); + priv->curbssparams.ssid_len = bss->ssid_len; + memcpy(priv->curbssparams.bssid, bss->bssid, ETH_ALEN); + + priv->SNR[TYPE_RXPD][TYPE_AVG] = 0; + priv->NF[TYPE_RXPD][TYPE_AVG] = 0; + + memset(priv->rawSNR, 0x00, sizeof(priv->rawSNR)); + memset(priv->rawNF, 0x00, sizeof(priv->rawNF)); + priv->nextSNRNF = 0; + priv->numSNRNF = 0; + + netif_carrier_on(priv->dev); + if (!priv->tx_pending_len) + netif_wake_queue(priv->dev); + + memcpy(wrqu.ap_addr.sa_data, priv->curbssparams.bssid, ETH_ALEN); + wrqu.ap_addr.sa_family = ARPHRD_ETHER; + wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL); + +done: + lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); + return ret; +} + +/** + * @brief This function prepares an association-class command. + * + * @param priv A pointer to struct lbs_private structure + * @param assoc_req The association request describing the BSS to associate + * or reassociate with + * @param command The actual command, either CMD_802_11_ASSOCIATE or + * CMD_802_11_REASSOCIATE + * + * @return 0 or -1 + */ +static int lbs_associate(struct lbs_private *priv, + struct assoc_request *assoc_req, + u16 command) +{ + struct cmd_ds_802_11_associate cmd; + int ret = 0; + struct bss_descriptor *bss = &assoc_req->bss; + u8 *pos = &(cmd.iebuf[0]); + u16 tmpcap, tmplen, tmpauth; + struct mrvl_ie_ssid_param_set *ssid; + struct mrvl_ie_ds_param_set *ds; + struct mrvl_ie_cf_param_set *cf; + struct mrvl_ie_rates_param_set *rates; + struct mrvl_ie_rsn_param_set *rsn; + struct mrvl_ie_auth_type *auth; + + lbs_deb_enter(LBS_DEB_ASSOC); + + BUG_ON((command != CMD_802_11_ASSOCIATE) && + (command != CMD_802_11_REASSOCIATE)); + + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.command = cpu_to_le16(command); + + /* Fill in static fields */ + memcpy(cmd.bssid, bss->bssid, ETH_ALEN); + cmd.listeninterval = cpu_to_le16(MRVDRV_DEFAULT_LISTEN_INTERVAL); + + /* Capability info */ + tmpcap = (bss->capability & CAPINFO_MASK); + if (bss->mode == IW_MODE_INFRA) + tmpcap |= WLAN_CAPABILITY_ESS; + cmd.capability = cpu_to_le16(tmpcap); + lbs_deb_assoc("ASSOC_CMD: capability 0x%04x\n", tmpcap); + + /* SSID */ + ssid = (struct mrvl_ie_ssid_param_set *) pos; + ssid->header.type = cpu_to_le16(TLV_TYPE_SSID); + tmplen = bss->ssid_len; + ssid->header.len = cpu_to_le16(tmplen); + memcpy(ssid->ssid, bss->ssid, tmplen); + pos += sizeof(ssid->header) + tmplen; + + ds = (struct mrvl_ie_ds_param_set *) pos; + ds->header.type = cpu_to_le16(TLV_TYPE_PHY_DS); + ds->header.len = cpu_to_le16(1); + ds->channel = bss->phy.ds.channel; + pos += sizeof(ds->header) + 1; + + cf = (struct mrvl_ie_cf_param_set *) pos; + cf->header.type = cpu_to_le16(TLV_TYPE_CF); + tmplen = sizeof(*cf) - sizeof (cf->header); + cf->header.len = cpu_to_le16(tmplen); + /* IE payload should be zeroed, firmware fills it in for us */ + pos += sizeof(*cf); + + rates = (struct mrvl_ie_rates_param_set *) pos; + rates->header.type = cpu_to_le16(TLV_TYPE_RATES); + tmplen = min_t(u16, ARRAY_SIZE(bss->rates), MAX_RATES); + memcpy(&rates->rates, &bss->rates, tmplen); + if (get_common_rates(priv, rates->rates, &tmplen)) { + ret = -1; + goto done; + } + pos += sizeof(rates->header) + tmplen; + rates->header.len = cpu_to_le16(tmplen); + lbs_deb_assoc("ASSOC_CMD: num rates %u\n", tmplen); + + /* Copy the infra. association rates into Current BSS state structure */ + memset(&priv->curbssparams.rates, 0, sizeof(priv->curbssparams.rates)); + memcpy(&priv->curbssparams.rates, &rates->rates, tmplen); + + /* Set MSB on basic rates as the firmware requires, but _after_ + * copying to current bss rates. + */ + lbs_set_basic_rate_flags(rates->rates, tmplen); + + /* Firmware v9+ indicate authentication suites as a TLV */ + if (priv->fwrelease >= 0x09000000) { + auth = (struct mrvl_ie_auth_type *) pos; + auth->header.type = cpu_to_le16(TLV_TYPE_AUTH_TYPE); + auth->header.len = cpu_to_le16(2); + tmpauth = iw_auth_to_ieee_auth(priv->secinfo.auth_mode); + auth->auth = cpu_to_le16(tmpauth); + pos += sizeof(auth->header) + 2; + + lbs_deb_join("AUTH_CMD: BSSID %pM, auth 0x%x\n", + bss->bssid, priv->secinfo.auth_mode); + } + + /* WPA/WPA2 IEs */ + if (assoc_req->secinfo.WPAenabled || assoc_req->secinfo.WPA2enabled) { + rsn = (struct mrvl_ie_rsn_param_set *) pos; + /* WPA_IE or WPA2_IE */ + rsn->header.type = cpu_to_le16((u16) assoc_req->wpa_ie[0]); + tmplen = (u16) assoc_req->wpa_ie[1]; + rsn->header.len = cpu_to_le16(tmplen); + memcpy(rsn->rsnie, &assoc_req->wpa_ie[2], tmplen); + lbs_deb_hex(LBS_DEB_JOIN, "ASSOC_CMD: WPA/RSN IE", (u8 *) rsn, + sizeof(rsn->header) + tmplen); + pos += sizeof(rsn->header) + tmplen; + } + + cmd.hdr.size = cpu_to_le16((sizeof(cmd) - sizeof(cmd.iebuf)) + + (u16)(pos - (u8 *) &cmd.iebuf)); + + /* update curbssparams */ + priv->channel = bss->phy.ds.channel; + + ret = lbs_cmd_with_response(priv, command, &cmd); + if (ret == 0) { + ret = lbs_assoc_post(priv, + (struct cmd_ds_802_11_associate_response *) &cmd); + } + +done: + lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); + return ret; +} + +/** + * @brief Associate to a specific BSS discovered in a scan + * + * @param priv A pointer to struct lbs_private structure + * @param assoc_req The association request describing the BSS to associate with + * + * @return 0-success, otherwise fail + */ +static int lbs_try_associate(struct lbs_private *priv, + struct assoc_request *assoc_req) +{ + int ret; + u8 preamble = RADIO_PREAMBLE_LONG; + + lbs_deb_enter(LBS_DEB_ASSOC); + + /* FW v9 and higher indicate authentication suites as a TLV in the + * association command, not as a separate authentication command. + */ + if (priv->fwrelease < 0x09000000) { + ret = lbs_set_authentication(priv, assoc_req->bss.bssid, + priv->secinfo.auth_mode); + if (ret) + goto out; + } + + /* Use short preamble only when both the BSS and firmware support it */ + if (assoc_req->bss.capability & WLAN_CAPABILITY_SHORT_PREAMBLE) + preamble = RADIO_PREAMBLE_SHORT; + + ret = lbs_set_radio(priv, preamble, 1); + if (ret) + goto out; + + ret = lbs_associate(priv, assoc_req, CMD_802_11_ASSOCIATE); + +out: + lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); + return ret; +} + +static int lbs_adhoc_post(struct lbs_private *priv, + struct cmd_ds_802_11_ad_hoc_result *resp) +{ + int ret = 0; + u16 command = le16_to_cpu(resp->hdr.command); + u16 result = le16_to_cpu(resp->hdr.result); + union iwreq_data wrqu; + struct bss_descriptor *bss; + DECLARE_SSID_BUF(ssid); + + lbs_deb_enter(LBS_DEB_JOIN); + + if (!priv->in_progress_assoc_req) { + lbs_deb_join("ADHOC_RESP: no in-progress association " + "request\n"); + ret = -1; + goto done; + } + bss = &priv->in_progress_assoc_req->bss; + + /* + * Join result code 0 --> SUCCESS + */ + if (result) { + lbs_deb_join("ADHOC_RESP: failed (result 0x%X)\n", result); + if (priv->connect_status == LBS_CONNECTED) + lbs_mac_event_disconnected(priv); + ret = -1; + goto done; + } + + /* Send a Media Connected event, according to the Spec */ + priv->connect_status = LBS_CONNECTED; + + if (command == CMD_RET(CMD_802_11_AD_HOC_START)) { + /* Update the created network descriptor with the new BSSID */ + memcpy(bss->bssid, resp->bssid, ETH_ALEN); + } + + /* Set the BSSID from the joined/started descriptor */ + memcpy(&priv->curbssparams.bssid, bss->bssid, ETH_ALEN); + + /* Set the new SSID to current SSID */ + memcpy(&priv->curbssparams.ssid, &bss->ssid, IEEE80211_MAX_SSID_LEN); + priv->curbssparams.ssid_len = bss->ssid_len; + + netif_carrier_on(priv->dev); + if (!priv->tx_pending_len) + netif_wake_queue(priv->dev); + + memset(&wrqu, 0, sizeof(wrqu)); + memcpy(wrqu.ap_addr.sa_data, priv->curbssparams.bssid, ETH_ALEN); + wrqu.ap_addr.sa_family = ARPHRD_ETHER; + wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL); + + lbs_deb_join("ADHOC_RESP: Joined/started '%s', BSSID %pM, channel %d\n", + print_ssid(ssid, bss->ssid, bss->ssid_len), + priv->curbssparams.bssid, + priv->channel); + +done: + lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret); + return ret; +} + +/** + * @brief Join an adhoc network found in a previous scan + * + * @param priv A pointer to struct lbs_private structure + * @param assoc_req The association request describing the BSS to join + * + * @return 0 on success, error on failure + */ +static int lbs_adhoc_join(struct lbs_private *priv, + struct assoc_request *assoc_req) +{ + struct cmd_ds_802_11_ad_hoc_join cmd; + struct bss_descriptor *bss = &assoc_req->bss; + u8 preamble = RADIO_PREAMBLE_LONG; + DECLARE_SSID_BUF(ssid); + u16 ratesize = 0; + int ret = 0; + + lbs_deb_enter(LBS_DEB_ASSOC); + + lbs_deb_join("current SSID '%s', ssid length %u\n", + print_ssid(ssid, priv->curbssparams.ssid, + priv->curbssparams.ssid_len), + priv->curbssparams.ssid_len); + lbs_deb_join("requested ssid '%s', ssid length %u\n", + print_ssid(ssid, bss->ssid, bss->ssid_len), + bss->ssid_len); + + /* check if the requested SSID is already joined */ + if (priv->curbssparams.ssid_len && + !lbs_ssid_cmp(priv->curbssparams.ssid, + priv->curbssparams.ssid_len, + bss->ssid, bss->ssid_len) && + (priv->mode == IW_MODE_ADHOC) && + (priv->connect_status == LBS_CONNECTED)) { + union iwreq_data wrqu; + + lbs_deb_join("ADHOC_J_CMD: New ad-hoc SSID is the same as " + "current, not attempting to re-join"); + + /* Send the re-association event though, because the association + * request really was successful, even if just a null-op. + */ + memset(&wrqu, 0, sizeof(wrqu)); + memcpy(wrqu.ap_addr.sa_data, priv->curbssparams.bssid, + ETH_ALEN); + wrqu.ap_addr.sa_family = ARPHRD_ETHER; + wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL); + goto out; + } + + /* Use short preamble only when both the BSS and firmware support it */ + if (bss->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) { + lbs_deb_join("AdhocJoin: Short preamble\n"); + preamble = RADIO_PREAMBLE_SHORT; + } + + ret = lbs_set_radio(priv, preamble, 1); + if (ret) + goto out; + + lbs_deb_join("AdhocJoin: channel = %d\n", assoc_req->channel); + lbs_deb_join("AdhocJoin: band = %c\n", assoc_req->band); + + priv->adhoccreate = 0; + priv->channel = bss->channel; + + /* Build the join command */ + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + + cmd.bss.type = CMD_BSS_TYPE_IBSS; + cmd.bss.beaconperiod = cpu_to_le16(bss->beaconperiod); + + memcpy(&cmd.bss.bssid, &bss->bssid, ETH_ALEN); + memcpy(&cmd.bss.ssid, &bss->ssid, bss->ssid_len); + + memcpy(&cmd.bss.ds, &bss->phy.ds, sizeof(struct ieee_ie_ds_param_set)); + + memcpy(&cmd.bss.ibss, &bss->ss.ibss, + sizeof(struct ieee_ie_ibss_param_set)); + + cmd.bss.capability = cpu_to_le16(bss->capability & CAPINFO_MASK); + lbs_deb_join("ADHOC_J_CMD: tmpcap=%4X CAPINFO_MASK=%4X\n", + bss->capability, CAPINFO_MASK); + + /* information on BSSID descriptor passed to FW */ + lbs_deb_join("ADHOC_J_CMD: BSSID = %pM, SSID = '%s'\n", + cmd.bss.bssid, cmd.bss.ssid); + + /* Only v8 and below support setting these */ + if (priv->fwrelease < 0x09000000) { + /* failtimeout */ + cmd.failtimeout = cpu_to_le16(MRVDRV_ASSOCIATION_TIME_OUT); + /* probedelay */ + cmd.probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME); + } + + /* Copy Data rates from the rates recorded in scan response */ + memset(cmd.bss.rates, 0, sizeof(cmd.bss.rates)); + ratesize = min_t(u16, ARRAY_SIZE(cmd.bss.rates), ARRAY_SIZE (bss->rates)); + memcpy(cmd.bss.rates, bss->rates, ratesize); + if (get_common_rates(priv, cmd.bss.rates, &ratesize)) { + lbs_deb_join("ADHOC_JOIN: get_common_rates returned error.\n"); + ret = -1; + goto out; + } + + /* Copy the ad-hoc creation rates into Current BSS state structure */ + memset(&priv->curbssparams.rates, 0, sizeof(priv->curbssparams.rates)); + memcpy(&priv->curbssparams.rates, cmd.bss.rates, ratesize); + + /* Set MSB on basic rates as the firmware requires, but _after_ + * copying to current bss rates. + */ + lbs_set_basic_rate_flags(cmd.bss.rates, ratesize); + + cmd.bss.ibss.atimwindow = bss->atimwindow; + + if (assoc_req->secinfo.wep_enabled) { + u16 tmp = le16_to_cpu(cmd.bss.capability); + tmp |= WLAN_CAPABILITY_PRIVACY; + cmd.bss.capability = cpu_to_le16(tmp); + } + + if (priv->psmode == LBS802_11POWERMODEMAX_PSP) { + __le32 local_ps_mode = cpu_to_le32(LBS802_11POWERMODECAM); + + /* wake up first */ + ret = lbs_prepare_and_send_command(priv, CMD_802_11_PS_MODE, + CMD_ACT_SET, 0, 0, + &local_ps_mode); + if (ret) { + ret = -1; + goto out; + } + } + + ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_JOIN, &cmd); + if (ret == 0) { + ret = lbs_adhoc_post(priv, + (struct cmd_ds_802_11_ad_hoc_result *)&cmd); + } + +out: + lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); + return ret; +} + +/** + * @brief Start an Adhoc Network + * + * @param priv A pointer to struct lbs_private structure + * @param assoc_req The association request describing the BSS to start + * + * @return 0 on success, error on failure + */ +static int lbs_adhoc_start(struct lbs_private *priv, + struct assoc_request *assoc_req) +{ + struct cmd_ds_802_11_ad_hoc_start cmd; + u8 preamble = RADIO_PREAMBLE_SHORT; + size_t ratesize = 0; + u16 tmpcap = 0; + int ret = 0; + DECLARE_SSID_BUF(ssid); + + lbs_deb_enter(LBS_DEB_ASSOC); + + ret = lbs_set_radio(priv, preamble, 1); + if (ret) + goto out; + + /* Build the start command */ + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + + memcpy(cmd.ssid, assoc_req->ssid, assoc_req->ssid_len); + + lbs_deb_join("ADHOC_START: SSID '%s', ssid length %u\n", + print_ssid(ssid, assoc_req->ssid, assoc_req->ssid_len), + assoc_req->ssid_len); + + cmd.bsstype = CMD_BSS_TYPE_IBSS; + + if (priv->beacon_period == 0) + priv->beacon_period = MRVDRV_BEACON_INTERVAL; + cmd.beaconperiod = cpu_to_le16(priv->beacon_period); + + WARN_ON(!assoc_req->channel); + + /* set Physical parameter set */ + cmd.ds.header.id = WLAN_EID_DS_PARAMS; + cmd.ds.header.len = 1; + cmd.ds.channel = assoc_req->channel; + + /* set IBSS parameter set */ + cmd.ibss.header.id = WLAN_EID_IBSS_PARAMS; + cmd.ibss.header.len = 2; + cmd.ibss.atimwindow = cpu_to_le16(0); + + /* set capability info */ + tmpcap = WLAN_CAPABILITY_IBSS; + if (assoc_req->secinfo.wep_enabled || + assoc_req->secinfo.WPAenabled || + assoc_req->secinfo.WPA2enabled) { + lbs_deb_join("ADHOC_START: WEP/WPA enabled, privacy on\n"); + tmpcap |= WLAN_CAPABILITY_PRIVACY; + } else + lbs_deb_join("ADHOC_START: WEP disabled, privacy off\n"); + + cmd.capability = cpu_to_le16(tmpcap); + + /* Only v8 and below support setting probe delay */ + if (priv->fwrelease < 0x09000000) + cmd.probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME); + + ratesize = min(sizeof(cmd.rates), sizeof(lbs_bg_rates)); + memcpy(cmd.rates, lbs_bg_rates, ratesize); + + /* Copy the ad-hoc creating rates into Current BSS state structure */ + memset(&priv->curbssparams.rates, 0, sizeof(priv->curbssparams.rates)); + memcpy(&priv->curbssparams.rates, &cmd.rates, ratesize); + + /* Set MSB on basic rates as the firmware requires, but _after_ + * copying to current bss rates. + */ + lbs_set_basic_rate_flags(cmd.rates, ratesize); + + lbs_deb_join("ADHOC_START: rates=%02x %02x %02x %02x\n", + cmd.rates[0], cmd.rates[1], cmd.rates[2], cmd.rates[3]); + + lbs_deb_join("ADHOC_START: Starting Ad-Hoc BSS on channel %d, band %d\n", + assoc_req->channel, assoc_req->band); + + priv->adhoccreate = 1; + priv->mode = IW_MODE_ADHOC; + + ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_START, &cmd); + if (ret == 0) + ret = lbs_adhoc_post(priv, + (struct cmd_ds_802_11_ad_hoc_result *)&cmd); + +out: + lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); + return ret; +} + +/** + * @brief Stop and Ad-Hoc network and exit Ad-Hoc mode + * + * @param priv A pointer to struct lbs_private structure + * @return 0 on success, or an error + */ +int lbs_adhoc_stop(struct lbs_private *priv) +{ + struct cmd_ds_802_11_ad_hoc_stop cmd; + int ret; + + lbs_deb_enter(LBS_DEB_JOIN); + + memset(&cmd, 0, sizeof (cmd)); + cmd.hdr.size = cpu_to_le16 (sizeof (cmd)); + + ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_STOP, &cmd); + + /* Clean up everything even if there was an error */ + lbs_mac_event_disconnected(priv); + + lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); + return ret; +} + +static inline int match_bss_no_security(struct lbs_802_11_security *secinfo, + struct bss_descriptor *match_bss) +{ + if (!secinfo->wep_enabled && + !secinfo->WPAenabled && !secinfo->WPA2enabled && + match_bss->wpa_ie[0] != WLAN_EID_GENERIC && + match_bss->rsn_ie[0] != WLAN_EID_RSN && + !(match_bss->capability & WLAN_CAPABILITY_PRIVACY)) + return 1; + else + return 0; +} + +static inline int match_bss_static_wep(struct lbs_802_11_security *secinfo, + struct bss_descriptor *match_bss) +{ + if (secinfo->wep_enabled && + !secinfo->WPAenabled && !secinfo->WPA2enabled && + (match_bss->capability & WLAN_CAPABILITY_PRIVACY)) + return 1; + else + return 0; +} + +static inline int match_bss_wpa(struct lbs_802_11_security *secinfo, + struct bss_descriptor *match_bss) +{ + if (!secinfo->wep_enabled && secinfo->WPAenabled && + (match_bss->wpa_ie[0] == WLAN_EID_GENERIC) + /* privacy bit may NOT be set in some APs like LinkSys WRT54G + && (match_bss->capability & WLAN_CAPABILITY_PRIVACY) */ + ) + return 1; + else + return 0; +} + +static inline int match_bss_wpa2(struct lbs_802_11_security *secinfo, + struct bss_descriptor *match_bss) +{ + if (!secinfo->wep_enabled && secinfo->WPA2enabled && + (match_bss->rsn_ie[0] == WLAN_EID_RSN) + /* privacy bit may NOT be set in some APs like LinkSys WRT54G + (match_bss->capability & WLAN_CAPABILITY_PRIVACY) */ + ) + return 1; + else + return 0; +} + +static inline int match_bss_dynamic_wep(struct lbs_802_11_security *secinfo, + struct bss_descriptor *match_bss) +{ + if (!secinfo->wep_enabled && + !secinfo->WPAenabled && !secinfo->WPA2enabled && + (match_bss->wpa_ie[0] != WLAN_EID_GENERIC) && + (match_bss->rsn_ie[0] != WLAN_EID_RSN) && + (match_bss->capability & WLAN_CAPABILITY_PRIVACY)) + return 1; + else + return 0; +} + +/** + * @brief Check if a scanned network compatible with the driver settings + * + * WEP WPA WPA2 ad-hoc encrypt Network + * enabled enabled enabled AES mode privacy WPA WPA2 Compatible + * 0 0 0 0 NONE 0 0 0 yes No security + * 1 0 0 0 NONE 1 0 0 yes Static WEP + * 0 1 0 0 x 1x 1 x yes WPA + * 0 0 1 0 x 1x x 1 yes WPA2 + * 0 0 0 1 NONE 1 0 0 yes Ad-hoc AES + * 0 0 0 0 !=NONE 1 0 0 yes Dynamic WEP + * + * + * @param priv A pointer to struct lbs_private + * @param index Index in scantable to check against current driver settings + * @param mode Network mode: Infrastructure or IBSS + * + * @return Index in scantable, or error code if negative + */ +static int is_network_compatible(struct lbs_private *priv, + struct bss_descriptor *bss, uint8_t mode) +{ + int matched = 0; + + lbs_deb_enter(LBS_DEB_SCAN); + + if (bss->mode != mode) + goto done; + + matched = match_bss_no_security(&priv->secinfo, bss); + if (matched) + goto done; + matched = match_bss_static_wep(&priv->secinfo, bss); + if (matched) + goto done; + matched = match_bss_wpa(&priv->secinfo, bss); + if (matched) { + lbs_deb_scan("is_network_compatible() WPA: wpa_ie 0x%x " + "wpa2_ie 0x%x WEP %s WPA %s WPA2 %s " + "privacy 0x%x\n", bss->wpa_ie[0], bss->rsn_ie[0], + priv->secinfo.wep_enabled ? "e" : "d", + priv->secinfo.WPAenabled ? "e" : "d", + priv->secinfo.WPA2enabled ? "e" : "d", + (bss->capability & WLAN_CAPABILITY_PRIVACY)); + goto done; + } + matched = match_bss_wpa2(&priv->secinfo, bss); + if (matched) { + lbs_deb_scan("is_network_compatible() WPA2: wpa_ie 0x%x " + "wpa2_ie 0x%x WEP %s WPA %s WPA2 %s " + "privacy 0x%x\n", bss->wpa_ie[0], bss->rsn_ie[0], + priv->secinfo.wep_enabled ? "e" : "d", + priv->secinfo.WPAenabled ? "e" : "d", + priv->secinfo.WPA2enabled ? "e" : "d", + (bss->capability & WLAN_CAPABILITY_PRIVACY)); + goto done; + } + matched = match_bss_dynamic_wep(&priv->secinfo, bss); + if (matched) { + lbs_deb_scan("is_network_compatible() dynamic WEP: " + "wpa_ie 0x%x wpa2_ie 0x%x privacy 0x%x\n", + bss->wpa_ie[0], bss->rsn_ie[0], + (bss->capability & WLAN_CAPABILITY_PRIVACY)); + goto done; + } + + /* bss security settings don't match those configured on card */ + lbs_deb_scan("is_network_compatible() FAILED: wpa_ie 0x%x " + "wpa2_ie 0x%x WEP %s WPA %s WPA2 %s privacy 0x%x\n", + bss->wpa_ie[0], bss->rsn_ie[0], + priv->secinfo.wep_enabled ? "e" : "d", + priv->secinfo.WPAenabled ? "e" : "d", + priv->secinfo.WPA2enabled ? "e" : "d", + (bss->capability & WLAN_CAPABILITY_PRIVACY)); + +done: + lbs_deb_leave_args(LBS_DEB_SCAN, "matched: %d", matched); + return matched; +} + +/** + * @brief This function finds a specific compatible BSSID in the scan list + * + * Used in association code + * + * @param priv A pointer to struct lbs_private + * @param bssid BSSID to find in the scan list + * @param mode Network mode: Infrastructure or IBSS + * + * @return index in BSSID list, or error return code (< 0) + */ +static struct bss_descriptor *lbs_find_bssid_in_list(struct lbs_private *priv, + uint8_t *bssid, uint8_t mode) +{ + struct bss_descriptor *iter_bss; + struct bss_descriptor *found_bss = NULL; + + lbs_deb_enter(LBS_DEB_SCAN); + + if (!bssid) + goto out; + + lbs_deb_hex(LBS_DEB_SCAN, "looking for", bssid, ETH_ALEN); + + /* Look through the scan table for a compatible match. The loop will + * continue past a matched bssid that is not compatible in case there + * is an AP with multiple SSIDs assigned to the same BSSID + */ + mutex_lock(&priv->lock); + list_for_each_entry(iter_bss, &priv->network_list, list) { + if (compare_ether_addr(iter_bss->bssid, bssid)) + continue; /* bssid doesn't match */ + switch (mode) { + case IW_MODE_INFRA: + case IW_MODE_ADHOC: + if (!is_network_compatible(priv, iter_bss, mode)) + break; + found_bss = iter_bss; + break; + default: + found_bss = iter_bss; + break; + } + } + mutex_unlock(&priv->lock); + +out: + lbs_deb_leave_args(LBS_DEB_SCAN, "found_bss %p", found_bss); + return found_bss; +} + +/** + * @brief This function finds ssid in ssid list. + * + * Used in association code + * + * @param priv A pointer to struct lbs_private + * @param ssid SSID to find in the list + * @param bssid BSSID to qualify the SSID selection (if provided) + * @param mode Network mode: Infrastructure or IBSS + * + * @return index in BSSID list + */ +static struct bss_descriptor *lbs_find_ssid_in_list(struct lbs_private *priv, + uint8_t *ssid, uint8_t ssid_len, + uint8_t *bssid, uint8_t mode, + int channel) +{ + u32 bestrssi = 0; + struct bss_descriptor *iter_bss = NULL; + struct bss_descriptor *found_bss = NULL; + struct bss_descriptor *tmp_oldest = NULL; + + lbs_deb_enter(LBS_DEB_SCAN); + + mutex_lock(&priv->lock); + + list_for_each_entry(iter_bss, &priv->network_list, list) { + if (!tmp_oldest || + (iter_bss->last_scanned < tmp_oldest->last_scanned)) + tmp_oldest = iter_bss; + + if (lbs_ssid_cmp(iter_bss->ssid, iter_bss->ssid_len, + ssid, ssid_len) != 0) + continue; /* ssid doesn't match */ + if (bssid && compare_ether_addr(iter_bss->bssid, bssid) != 0) + continue; /* bssid doesn't match */ + if ((channel > 0) && (iter_bss->channel != channel)) + continue; /* channel doesn't match */ + + switch (mode) { + case IW_MODE_INFRA: + case IW_MODE_ADHOC: + if (!is_network_compatible(priv, iter_bss, mode)) + break; + + if (bssid) { + /* Found requested BSSID */ + found_bss = iter_bss; + goto out; + } + + if (SCAN_RSSI(iter_bss->rssi) > bestrssi) { + bestrssi = SCAN_RSSI(iter_bss->rssi); + found_bss = iter_bss; + } + break; + case IW_MODE_AUTO: + default: + if (SCAN_RSSI(iter_bss->rssi) > bestrssi) { + bestrssi = SCAN_RSSI(iter_bss->rssi); + found_bss = iter_bss; + } + break; + } + } + +out: + mutex_unlock(&priv->lock); + lbs_deb_leave_args(LBS_DEB_SCAN, "found_bss %p", found_bss); + return found_bss; +} + +static int assoc_helper_essid(struct lbs_private *priv, + struct assoc_request * assoc_req) +{ + int ret = 0; + struct bss_descriptor * bss; + int channel = -1; + DECLARE_SSID_BUF(ssid); + + lbs_deb_enter(LBS_DEB_ASSOC); + + /* FIXME: take channel into account when picking SSIDs if a channel + * is set. + */ + + if (test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags)) + channel = assoc_req->channel; + + lbs_deb_assoc("SSID '%s' requested\n", + print_ssid(ssid, assoc_req->ssid, assoc_req->ssid_len)); + if (assoc_req->mode == IW_MODE_INFRA) { + lbs_send_specific_ssid_scan(priv, assoc_req->ssid, + assoc_req->ssid_len); + + bss = lbs_find_ssid_in_list(priv, assoc_req->ssid, + assoc_req->ssid_len, NULL, IW_MODE_INFRA, channel); + if (bss != NULL) { + memcpy(&assoc_req->bss, bss, sizeof(struct bss_descriptor)); + ret = lbs_try_associate(priv, assoc_req); + } else { + lbs_deb_assoc("SSID not found; cannot associate\n"); + } + } else if (assoc_req->mode == IW_MODE_ADHOC) { + /* Scan for the network, do not save previous results. Stale + * scan data will cause us to join a non-existant adhoc network + */ + lbs_send_specific_ssid_scan(priv, assoc_req->ssid, + assoc_req->ssid_len); + + /* Search for the requested SSID in the scan table */ + bss = lbs_find_ssid_in_list(priv, assoc_req->ssid, + assoc_req->ssid_len, NULL, IW_MODE_ADHOC, channel); + if (bss != NULL) { + lbs_deb_assoc("SSID found, will join\n"); + memcpy(&assoc_req->bss, bss, sizeof(struct bss_descriptor)); + lbs_adhoc_join(priv, assoc_req); + } else { + /* else send START command */ + lbs_deb_assoc("SSID not found, creating adhoc network\n"); + memcpy(&assoc_req->bss.ssid, &assoc_req->ssid, + IEEE80211_MAX_SSID_LEN); + assoc_req->bss.ssid_len = assoc_req->ssid_len; + lbs_adhoc_start(priv, assoc_req); + } + } + + lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); + return ret; +} + + +static int assoc_helper_bssid(struct lbs_private *priv, + struct assoc_request * assoc_req) +{ + int ret = 0; + struct bss_descriptor * bss; + + lbs_deb_enter_args(LBS_DEB_ASSOC, "BSSID %pM", assoc_req->bssid); + + /* Search for index position in list for requested MAC */ + bss = lbs_find_bssid_in_list(priv, assoc_req->bssid, + assoc_req->mode); + if (bss == NULL) { + lbs_deb_assoc("ASSOC: WAP: BSSID %pM not found, " + "cannot associate.\n", assoc_req->bssid); + goto out; + } + + memcpy(&assoc_req->bss, bss, sizeof(struct bss_descriptor)); + if (assoc_req->mode == IW_MODE_INFRA) { + ret = lbs_try_associate(priv, assoc_req); + lbs_deb_assoc("ASSOC: lbs_try_associate(bssid) returned %d\n", + ret); + } else if (assoc_req->mode == IW_MODE_ADHOC) { + lbs_adhoc_join(priv, assoc_req); + } + +out: + lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); + return ret; +} + + +static int assoc_helper_associate(struct lbs_private *priv, + struct assoc_request * assoc_req) +{ + int ret = 0, done = 0; + + lbs_deb_enter(LBS_DEB_ASSOC); + + /* If we're given and 'any' BSSID, try associating based on SSID */ + + if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags)) { + if (compare_ether_addr(bssid_any, assoc_req->bssid) && + compare_ether_addr(bssid_off, assoc_req->bssid)) { + ret = assoc_helper_bssid(priv, assoc_req); + done = 1; + } + } + + if (!done && test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) { + ret = assoc_helper_essid(priv, assoc_req); + } + + lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); + return ret; +} + + +static int assoc_helper_mode(struct lbs_private *priv, + struct assoc_request * assoc_req) +{ + int ret = 0; + + lbs_deb_enter(LBS_DEB_ASSOC); + + if (assoc_req->mode == priv->mode) + goto done; + + if (assoc_req->mode == IW_MODE_INFRA) { + if (priv->psstate != PS_STATE_FULL_POWER) + lbs_ps_wakeup(priv, CMD_OPTION_WAITFORRSP); + priv->psmode = LBS802_11POWERMODECAM; + } + + priv->mode = assoc_req->mode; + ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_BSS_TYPE, + assoc_req->mode == IW_MODE_ADHOC ? 2 : 1); + +done: + lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); + return ret; +} + +static int assoc_helper_channel(struct lbs_private *priv, + struct assoc_request * assoc_req) +{ + int ret = 0; + + lbs_deb_enter(LBS_DEB_ASSOC); + + ret = lbs_update_channel(priv); + if (ret) { + lbs_deb_assoc("ASSOC: channel: error getting channel.\n"); + goto done; + } + + if (assoc_req->channel == priv->channel) + goto done; + + if (priv->mesh_dev) { + /* Change mesh channel first; 21.p21 firmware won't let + you change channel otherwise (even though it'll return + an error to this */ + lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_STOP, + assoc_req->channel); + } + + lbs_deb_assoc("ASSOC: channel: %d -> %d\n", + priv->channel, assoc_req->channel); + + ret = lbs_set_channel(priv, assoc_req->channel); + if (ret < 0) + lbs_deb_assoc("ASSOC: channel: error setting channel.\n"); + + /* FIXME: shouldn't need to grab the channel _again_ after setting + * it since the firmware is supposed to return the new channel, but + * whatever... */ + ret = lbs_update_channel(priv); + if (ret) { + lbs_deb_assoc("ASSOC: channel: error getting channel.\n"); + goto done; + } + + if (assoc_req->channel != priv->channel) { + lbs_deb_assoc("ASSOC: channel: failed to update channel to %d\n", + assoc_req->channel); + goto restore_mesh; + } + + if (assoc_req->secinfo.wep_enabled && + (assoc_req->wep_keys[0].len || assoc_req->wep_keys[1].len || + assoc_req->wep_keys[2].len || assoc_req->wep_keys[3].len)) { + /* Make sure WEP keys are re-sent to firmware */ + set_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags); + } + + /* Must restart/rejoin adhoc networks after channel change */ + set_bit(ASSOC_FLAG_SSID, &assoc_req->flags); + + restore_mesh: + if (priv->mesh_dev) + lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, + priv->channel); + + done: + lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); + return ret; +} + + +static int assoc_helper_wep_keys(struct lbs_private *priv, + struct assoc_request *assoc_req) +{ + int i; + int ret = 0; + + lbs_deb_enter(LBS_DEB_ASSOC); + + /* Set or remove WEP keys */ + if (assoc_req->wep_keys[0].len || assoc_req->wep_keys[1].len || + assoc_req->wep_keys[2].len || assoc_req->wep_keys[3].len) + ret = lbs_cmd_802_11_set_wep(priv, CMD_ACT_ADD, assoc_req); + else + ret = lbs_cmd_802_11_set_wep(priv, CMD_ACT_REMOVE, assoc_req); + + if (ret) + goto out; + + /* enable/disable the MAC's WEP packet filter */ + if (assoc_req->secinfo.wep_enabled) + priv->mac_control |= CMD_ACT_MAC_WEP_ENABLE; + else + priv->mac_control &= ~CMD_ACT_MAC_WEP_ENABLE; + + lbs_set_mac_control(priv); + + mutex_lock(&priv->lock); + + /* Copy WEP keys into priv wep key fields */ + for (i = 0; i < 4; i++) { + memcpy(&priv->wep_keys[i], &assoc_req->wep_keys[i], + sizeof(struct enc_key)); + } + priv->wep_tx_keyidx = assoc_req->wep_tx_keyidx; + + mutex_unlock(&priv->lock); + +out: + lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); + return ret; +} + +static int assoc_helper_secinfo(struct lbs_private *priv, + struct assoc_request * assoc_req) +{ + int ret = 0; + uint16_t do_wpa; + uint16_t rsn = 0; + + lbs_deb_enter(LBS_DEB_ASSOC); + + memcpy(&priv->secinfo, &assoc_req->secinfo, + sizeof(struct lbs_802_11_security)); + + lbs_set_mac_control(priv); + + /* If RSN is already enabled, don't try to enable it again, since + * ENABLE_RSN resets internal state machines and will clobber the + * 4-way WPA handshake. + */ + + /* Get RSN enabled/disabled */ + ret = lbs_cmd_802_11_enable_rsn(priv, CMD_ACT_GET, &rsn); + if (ret) { + lbs_deb_assoc("Failed to get RSN status: %d\n", ret); + goto out; + } + + /* Don't re-enable RSN if it's already enabled */ + do_wpa = assoc_req->secinfo.WPAenabled || assoc_req->secinfo.WPA2enabled; + if (do_wpa == rsn) + goto out; + + /* Set RSN enabled/disabled */ + ret = lbs_cmd_802_11_enable_rsn(priv, CMD_ACT_SET, &do_wpa); + +out: + lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); + return ret; +} + + +static int assoc_helper_wpa_keys(struct lbs_private *priv, + struct assoc_request * assoc_req) +{ + int ret = 0; + unsigned int flags = assoc_req->flags; + + lbs_deb_enter(LBS_DEB_ASSOC); + + /* Work around older firmware bug where WPA unicast and multicast + * keys must be set independently. Seen in SDIO parts with firmware + * version 5.0.11p0. + */ + + if (test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags)) { + clear_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags); + ret = lbs_cmd_802_11_key_material(priv, CMD_ACT_SET, assoc_req); + assoc_req->flags = flags; + } + + if (ret) + goto out; + + memcpy(&priv->wpa_unicast_key, &assoc_req->wpa_unicast_key, + sizeof(struct enc_key)); + + if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags)) { + clear_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags); + + ret = lbs_cmd_802_11_key_material(priv, CMD_ACT_SET, assoc_req); + assoc_req->flags = flags; + + memcpy(&priv->wpa_mcast_key, &assoc_req->wpa_mcast_key, + sizeof(struct enc_key)); + } + +out: + lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); + return ret; +} + + +static int assoc_helper_wpa_ie(struct lbs_private *priv, + struct assoc_request * assoc_req) +{ + int ret = 0; + + lbs_deb_enter(LBS_DEB_ASSOC); + + if (assoc_req->secinfo.WPAenabled || assoc_req->secinfo.WPA2enabled) { + memcpy(&priv->wpa_ie, &assoc_req->wpa_ie, assoc_req->wpa_ie_len); + priv->wpa_ie_len = assoc_req->wpa_ie_len; + } else { + memset(&priv->wpa_ie, 0, MAX_WPA_IE_LEN); + priv->wpa_ie_len = 0; + } + + lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); + return ret; +} + + +static int should_deauth_infrastructure(struct lbs_private *priv, + struct assoc_request * assoc_req) +{ + int ret = 0; + + if (priv->connect_status != LBS_CONNECTED) + return 0; + + lbs_deb_enter(LBS_DEB_ASSOC); + if (test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) { + lbs_deb_assoc("Deauthenticating due to new SSID\n"); + ret = 1; + goto out; + } + + if (test_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags)) { + if (priv->secinfo.auth_mode != assoc_req->secinfo.auth_mode) { + lbs_deb_assoc("Deauthenticating due to new security\n"); + ret = 1; + goto out; + } + } + + if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags)) { + lbs_deb_assoc("Deauthenticating due to new BSSID\n"); + ret = 1; + goto out; + } + + if (test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags)) { + lbs_deb_assoc("Deauthenticating due to channel switch\n"); + ret = 1; + goto out; + } + + /* FIXME: deal with 'auto' mode somehow */ + if (test_bit(ASSOC_FLAG_MODE, &assoc_req->flags)) { + if (assoc_req->mode != IW_MODE_INFRA) { + lbs_deb_assoc("Deauthenticating due to leaving " + "infra mode\n"); + ret = 1; + goto out; + } + } + +out: + lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); + return ret; +} + + +static int should_stop_adhoc(struct lbs_private *priv, + struct assoc_request * assoc_req) +{ + lbs_deb_enter(LBS_DEB_ASSOC); + + if (priv->connect_status != LBS_CONNECTED) + return 0; + + if (lbs_ssid_cmp(priv->curbssparams.ssid, + priv->curbssparams.ssid_len, + assoc_req->ssid, assoc_req->ssid_len) != 0) + return 1; + + /* FIXME: deal with 'auto' mode somehow */ + if (test_bit(ASSOC_FLAG_MODE, &assoc_req->flags)) { + if (assoc_req->mode != IW_MODE_ADHOC) + return 1; + } + + if (test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags)) { + if (assoc_req->channel != priv->channel) + return 1; + } + + lbs_deb_leave(LBS_DEB_ASSOC); + return 0; +} + + +/** + * @brief This function finds the best SSID in the Scan List + * + * Search the scan table for the best SSID that also matches the current + * adapter network preference (infrastructure or adhoc) + * + * @param priv A pointer to struct lbs_private + * + * @return index in BSSID list + */ +static struct bss_descriptor *lbs_find_best_ssid_in_list( + struct lbs_private *priv, uint8_t mode) +{ + uint8_t bestrssi = 0; + struct bss_descriptor *iter_bss; + struct bss_descriptor *best_bss = NULL; + + lbs_deb_enter(LBS_DEB_SCAN); + + mutex_lock(&priv->lock); + + list_for_each_entry(iter_bss, &priv->network_list, list) { + switch (mode) { + case IW_MODE_INFRA: + case IW_MODE_ADHOC: + if (!is_network_compatible(priv, iter_bss, mode)) + break; + if (SCAN_RSSI(iter_bss->rssi) <= bestrssi) + break; + bestrssi = SCAN_RSSI(iter_bss->rssi); + best_bss = iter_bss; + break; + case IW_MODE_AUTO: + default: + if (SCAN_RSSI(iter_bss->rssi) <= bestrssi) + break; + bestrssi = SCAN_RSSI(iter_bss->rssi); + best_bss = iter_bss; + break; + } + } + + mutex_unlock(&priv->lock); + lbs_deb_leave_args(LBS_DEB_SCAN, "best_bss %p", best_bss); + return best_bss; +} + +/** + * @brief Find the best AP + * + * Used from association worker. + * + * @param priv A pointer to struct lbs_private structure + * @param pSSID A pointer to AP's ssid + * + * @return 0--success, otherwise--fail + */ +static int lbs_find_best_network_ssid(struct lbs_private *priv, + uint8_t *out_ssid, uint8_t *out_ssid_len, uint8_t preferred_mode, + uint8_t *out_mode) +{ + int ret = -1; + struct bss_descriptor *found; + + lbs_deb_enter(LBS_DEB_SCAN); + + priv->scan_ssid_len = 0; + lbs_scan_networks(priv, 1); + if (priv->surpriseremoved) + goto out; + + found = lbs_find_best_ssid_in_list(priv, preferred_mode); + if (found && (found->ssid_len > 0)) { + memcpy(out_ssid, &found->ssid, IEEE80211_MAX_SSID_LEN); + *out_ssid_len = found->ssid_len; + *out_mode = found->mode; + ret = 0; + } + +out: + lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret); + return ret; +} + + +void lbs_association_worker(struct work_struct *work) +{ + struct lbs_private *priv = container_of(work, struct lbs_private, + assoc_work.work); + struct assoc_request * assoc_req = NULL; + int ret = 0; + int find_any_ssid = 0; + DECLARE_SSID_BUF(ssid); + + lbs_deb_enter(LBS_DEB_ASSOC); + + mutex_lock(&priv->lock); + assoc_req = priv->pending_assoc_req; + priv->pending_assoc_req = NULL; + priv->in_progress_assoc_req = assoc_req; + mutex_unlock(&priv->lock); + + if (!assoc_req) + goto done; + + lbs_deb_assoc( + "Association Request:\n" + " flags: 0x%08lx\n" + " SSID: '%s'\n" + " chann: %d\n" + " band: %d\n" + " mode: %d\n" + " BSSID: %pM\n" + " secinfo: %s%s%s\n" + " auth_mode: %d\n", + assoc_req->flags, + print_ssid(ssid, assoc_req->ssid, assoc_req->ssid_len), + assoc_req->channel, assoc_req->band, assoc_req->mode, + assoc_req->bssid, + assoc_req->secinfo.WPAenabled ? " WPA" : "", + assoc_req->secinfo.WPA2enabled ? " WPA2" : "", + assoc_req->secinfo.wep_enabled ? " WEP" : "", + assoc_req->secinfo.auth_mode); + + /* If 'any' SSID was specified, find an SSID to associate with */ + if (test_bit(ASSOC_FLAG_SSID, &assoc_req->flags) && + !assoc_req->ssid_len) + find_any_ssid = 1; + + /* But don't use 'any' SSID if there's a valid locked BSSID to use */ + if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags)) { + if (compare_ether_addr(assoc_req->bssid, bssid_any) && + compare_ether_addr(assoc_req->bssid, bssid_off)) + find_any_ssid = 0; + } + + if (find_any_ssid) { + u8 new_mode = assoc_req->mode; + + ret = lbs_find_best_network_ssid(priv, assoc_req->ssid, + &assoc_req->ssid_len, assoc_req->mode, &new_mode); + if (ret) { + lbs_deb_assoc("Could not find best network\n"); + ret = -ENETUNREACH; + goto out; + } + + /* Ensure we switch to the mode of the AP */ + if (assoc_req->mode == IW_MODE_AUTO) { + set_bit(ASSOC_FLAG_MODE, &assoc_req->flags); + assoc_req->mode = new_mode; + } + } + + /* + * Check if the attributes being changing require deauthentication + * from the currently associated infrastructure access point. + */ + if (priv->mode == IW_MODE_INFRA) { + if (should_deauth_infrastructure(priv, assoc_req)) { + ret = lbs_cmd_80211_deauthenticate(priv, + priv->curbssparams.bssid, + WLAN_REASON_DEAUTH_LEAVING); + if (ret) { + lbs_deb_assoc("Deauthentication due to new " + "configuration request failed: %d\n", + ret); + } + } + } else if (priv->mode == IW_MODE_ADHOC) { + if (should_stop_adhoc(priv, assoc_req)) { + ret = lbs_adhoc_stop(priv); + if (ret) { + lbs_deb_assoc("Teardown of AdHoc network due to " + "new configuration request failed: %d\n", + ret); + } + + } + } + + /* Send the various configuration bits to the firmware */ + if (test_bit(ASSOC_FLAG_MODE, &assoc_req->flags)) { + ret = assoc_helper_mode(priv, assoc_req); + if (ret) + goto out; + } + + if (test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags)) { + ret = assoc_helper_channel(priv, assoc_req); + if (ret) + goto out; + } + + if (test_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags)) { + ret = assoc_helper_secinfo(priv, assoc_req); + if (ret) + goto out; + } + + if (test_bit(ASSOC_FLAG_WPA_IE, &assoc_req->flags)) { + ret = assoc_helper_wpa_ie(priv, assoc_req); + if (ret) + goto out; + } + + /* + * v10 FW wants WPA keys to be set/cleared before WEP key operations, + * otherwise it will fail to correctly associate to WEP networks. + * Other firmware versions don't appear to care. + */ + if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags) || + test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags)) { + ret = assoc_helper_wpa_keys(priv, assoc_req); + if (ret) + goto out; + } + + if (test_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags) || + test_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags)) { + ret = assoc_helper_wep_keys(priv, assoc_req); + if (ret) + goto out; + } + + + /* SSID/BSSID should be the _last_ config option set, because they + * trigger the association attempt. + */ + if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags) || + test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) { + int success = 1; + + ret = assoc_helper_associate(priv, assoc_req); + if (ret) { + lbs_deb_assoc("ASSOC: association unsuccessful: %d\n", + ret); + success = 0; + } + + if (priv->connect_status != LBS_CONNECTED) { + lbs_deb_assoc("ASSOC: association unsuccessful, " + "not connected\n"); + success = 0; + } + + if (success) { + lbs_deb_assoc("associated to %pM\n", + priv->curbssparams.bssid); + lbs_prepare_and_send_command(priv, + CMD_802_11_RSSI, + 0, CMD_OPTION_WAITFORRSP, 0, NULL); + } else { + ret = -1; + } + } + +out: + if (ret) { + lbs_deb_assoc("ASSOC: reconfiguration attempt unsuccessful: %d\n", + ret); + } + + mutex_lock(&priv->lock); + priv->in_progress_assoc_req = NULL; + mutex_unlock(&priv->lock); + kfree(assoc_req); + +done: + lbs_deb_leave(LBS_DEB_ASSOC); +} + + +/* + * Caller MUST hold any necessary locks + */ +struct assoc_request *lbs_get_association_request(struct lbs_private *priv) +{ + struct assoc_request * assoc_req; + + lbs_deb_enter(LBS_DEB_ASSOC); + if (!priv->pending_assoc_req) { + priv->pending_assoc_req = kzalloc(sizeof(struct assoc_request), + GFP_KERNEL); + if (!priv->pending_assoc_req) { + lbs_pr_info("Not enough memory to allocate association" + " request!\n"); + return NULL; + } + } + + /* Copy current configuration attributes to the association request, + * but don't overwrite any that are already set. + */ + assoc_req = priv->pending_assoc_req; + if (!test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) { + memcpy(&assoc_req->ssid, &priv->curbssparams.ssid, + IEEE80211_MAX_SSID_LEN); + assoc_req->ssid_len = priv->curbssparams.ssid_len; + } + + if (!test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags)) + assoc_req->channel = priv->channel; + + if (!test_bit(ASSOC_FLAG_BAND, &assoc_req->flags)) + assoc_req->band = priv->curbssparams.band; + + if (!test_bit(ASSOC_FLAG_MODE, &assoc_req->flags)) + assoc_req->mode = priv->mode; + + if (!test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags)) { + memcpy(&assoc_req->bssid, priv->curbssparams.bssid, + ETH_ALEN); + } + + if (!test_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags)) { + int i; + for (i = 0; i < 4; i++) { + memcpy(&assoc_req->wep_keys[i], &priv->wep_keys[i], + sizeof(struct enc_key)); + } + } + + if (!test_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags)) + assoc_req->wep_tx_keyidx = priv->wep_tx_keyidx; + + if (!test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags)) { + memcpy(&assoc_req->wpa_mcast_key, &priv->wpa_mcast_key, + sizeof(struct enc_key)); + } + + if (!test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags)) { + memcpy(&assoc_req->wpa_unicast_key, &priv->wpa_unicast_key, + sizeof(struct enc_key)); + } + + if (!test_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags)) { + memcpy(&assoc_req->secinfo, &priv->secinfo, + sizeof(struct lbs_802_11_security)); + } + + if (!test_bit(ASSOC_FLAG_WPA_IE, &assoc_req->flags)) { + memcpy(&assoc_req->wpa_ie, &priv->wpa_ie, + MAX_WPA_IE_LEN); + assoc_req->wpa_ie_len = priv->wpa_ie_len; + } + + lbs_deb_leave(LBS_DEB_ASSOC); + return assoc_req; +} + + +/** + * @brief Deauthenticate from a specific BSS + * + * @param priv A pointer to struct lbs_private structure + * @param bssid The specific BSS to deauthenticate from + * @param reason The 802.11 sec. 7.3.1.7 Reason Code for deauthenticating + * + * @return 0 on success, error on failure + */ +int lbs_cmd_80211_deauthenticate(struct lbs_private *priv, u8 bssid[ETH_ALEN], + u16 reason) +{ + struct cmd_ds_802_11_deauthenticate cmd; + int ret; + + lbs_deb_enter(LBS_DEB_JOIN); + + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + memcpy(cmd.macaddr, &bssid[0], ETH_ALEN); + cmd.reasoncode = cpu_to_le16(reason); + + ret = lbs_cmd_with_response(priv, CMD_802_11_DEAUTHENTICATE, &cmd); + + /* Clean up everything even if there was an error; can't assume that + * we're still authenticated to the AP after trying to deauth. + */ + lbs_mac_event_disconnected(priv); + + lbs_deb_leave(LBS_DEB_JOIN); + return ret; +} + diff --git a/drivers/net/wireless/libertas/assoc.h b/drivers/net/wireless/libertas/assoc.h new file mode 100644 index 0000000..40621b7 --- /dev/null +++ b/drivers/net/wireless/libertas/assoc.h @@ -0,0 +1,155 @@ +/* Copyright (C) 2006, Red Hat, Inc. */ + +#ifndef _LBS_ASSOC_H_ +#define _LBS_ASSOC_H_ + + +#include "defs.h" +#include "host.h" + + +struct lbs_private; + +/* + * In theory, the IE is limited to the IE length, 255, + * but in practice 64 bytes are enough. + */ +#define MAX_WPA_IE_LEN 64 + + + +struct lbs_802_11_security { + u8 WPAenabled; + u8 WPA2enabled; + u8 wep_enabled; + u8 auth_mode; + u32 key_mgmt; +}; + +/** Current Basic Service Set State Structure */ +struct current_bss_params { + /** bssid */ + u8 bssid[ETH_ALEN]; + /** ssid */ + u8 ssid[IEEE80211_MAX_SSID_LEN + 1]; + u8 ssid_len; + + /** band */ + u8 band; + /** channel is directly in priv->channel */ + /** zero-terminated array of supported data rates */ + u8 rates[MAX_RATES + 1]; +}; + +/** + * @brief Structure used to store information for each beacon/probe response + */ +struct bss_descriptor { + u8 bssid[ETH_ALEN]; + + u8 ssid[IEEE80211_MAX_SSID_LEN + 1]; + u8 ssid_len; + + u16 capability; + u32 rssi; + u32 channel; + u16 beaconperiod; + __le16 atimwindow; + + /* IW_MODE_AUTO, IW_MODE_ADHOC, IW_MODE_INFRA */ + u8 mode; + + /* zero-terminated array of supported data rates */ + u8 rates[MAX_RATES + 1]; + + unsigned long last_scanned; + + union ieee_phy_param_set phy; + union ieee_ss_param_set ss; + + u8 wpa_ie[MAX_WPA_IE_LEN]; + size_t wpa_ie_len; + u8 rsn_ie[MAX_WPA_IE_LEN]; + size_t rsn_ie_len; + + u8 mesh; + + struct list_head list; +}; + +/** Association request + * + * Encapsulates all the options that describe a specific assocation request + * or configuration of the wireless card's radio, mode, and security settings. + */ +struct assoc_request { +#define ASSOC_FLAG_SSID 1 +#define ASSOC_FLAG_CHANNEL 2 +#define ASSOC_FLAG_BAND 3 +#define ASSOC_FLAG_MODE 4 +#define ASSOC_FLAG_BSSID 5 +#define ASSOC_FLAG_WEP_KEYS 6 +#define ASSOC_FLAG_WEP_TX_KEYIDX 7 +#define ASSOC_FLAG_WPA_MCAST_KEY 8 +#define ASSOC_FLAG_WPA_UCAST_KEY 9 +#define ASSOC_FLAG_SECINFO 10 +#define ASSOC_FLAG_WPA_IE 11 + unsigned long flags; + + u8 ssid[IEEE80211_MAX_SSID_LEN + 1]; + u8 ssid_len; + u8 channel; + u8 band; + u8 mode; + u8 bssid[ETH_ALEN] __attribute__ ((aligned (2))); + + /** WEP keys */ + struct enc_key wep_keys[4]; + u16 wep_tx_keyidx; + + /** WPA keys */ + struct enc_key wpa_mcast_key; + struct enc_key wpa_unicast_key; + + struct lbs_802_11_security secinfo; + + /** WPA Information Elements*/ + u8 wpa_ie[MAX_WPA_IE_LEN]; + u8 wpa_ie_len; + + /* BSS to associate with for infrastructure of Ad-Hoc join */ + struct bss_descriptor bss; +}; + + +extern u8 lbs_bg_rates[MAX_RATES]; + +void lbs_association_worker(struct work_struct *work); +struct assoc_request *lbs_get_association_request(struct lbs_private *priv); + +int lbs_adhoc_stop(struct lbs_private *priv); + +int lbs_cmd_80211_deauthenticate(struct lbs_private *priv, + u8 bssid[ETH_ALEN], u16 reason); + +int lbs_cmd_802_11_rssi(struct lbs_private *priv, + struct cmd_ds_command *cmd); +int lbs_ret_802_11_rssi(struct lbs_private *priv, + struct cmd_ds_command *resp); + +int lbs_cmd_bcn_ctrl(struct lbs_private *priv, + struct cmd_ds_command *cmd, + u16 cmd_action); +int lbs_ret_802_11_bcn_ctrl(struct lbs_private *priv, + struct cmd_ds_command *resp); + +int lbs_cmd_802_11_set_wep(struct lbs_private *priv, uint16_t cmd_action, + struct assoc_request *assoc); + +int lbs_cmd_802_11_enable_rsn(struct lbs_private *priv, uint16_t cmd_action, + uint16_t *enable); + +int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action, + struct assoc_request *assoc); + +#endif /* _LBS_ASSOC_H */ diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c new file mode 100644 index 0000000..4396dcc --- /dev/null +++ b/drivers/net/wireless/libertas/cfg.c @@ -0,0 +1,198 @@ +/* + * Implement cfg80211 ("iw") support. + * + * Copyright (C) 2009 M&N Solutions GmbH, 61191 Rosbach, Germany + * Holger Schurig + * + */ + +#include + +#include "cfg.h" +#include "cmd.h" + + +#define CHAN2G(_channel, _freq, _flags) { \ + .band = IEEE80211_BAND_2GHZ, \ + .center_freq = (_freq), \ + .hw_value = (_channel), \ + .flags = (_flags), \ + .max_antenna_gain = 0, \ + .max_power = 30, \ +} + +static struct ieee80211_channel lbs_2ghz_channels[] = { + CHAN2G(1, 2412, 0), + CHAN2G(2, 2417, 0), + CHAN2G(3, 2422, 0), + CHAN2G(4, 2427, 0), + CHAN2G(5, 2432, 0), + CHAN2G(6, 2437, 0), + CHAN2G(7, 2442, 0), + CHAN2G(8, 2447, 0), + CHAN2G(9, 2452, 0), + CHAN2G(10, 2457, 0), + CHAN2G(11, 2462, 0), + CHAN2G(12, 2467, 0), + CHAN2G(13, 2472, 0), + CHAN2G(14, 2484, 0), +}; + +#define RATETAB_ENT(_rate, _rateid, _flags) { \ + .bitrate = (_rate), \ + .hw_value = (_rateid), \ + .flags = (_flags), \ +} + + +static struct ieee80211_rate lbs_rates[] = { + RATETAB_ENT(10, 0x1, 0), + RATETAB_ENT(20, 0x2, 0), + RATETAB_ENT(55, 0x4, 0), + RATETAB_ENT(110, 0x8, 0), + RATETAB_ENT(60, 0x10, 0), + RATETAB_ENT(90, 0x20, 0), + RATETAB_ENT(120, 0x40, 0), + RATETAB_ENT(180, 0x80, 0), + RATETAB_ENT(240, 0x100, 0), + RATETAB_ENT(360, 0x200, 0), + RATETAB_ENT(480, 0x400, 0), + RATETAB_ENT(540, 0x800, 0), +}; + +static struct ieee80211_supported_band lbs_band_2ghz = { + .channels = lbs_2ghz_channels, + .n_channels = ARRAY_SIZE(lbs_2ghz_channels), + .bitrates = lbs_rates, + .n_bitrates = ARRAY_SIZE(lbs_rates), +}; + + +static const u32 cipher_suites[] = { + WLAN_CIPHER_SUITE_WEP40, + WLAN_CIPHER_SUITE_WEP104, + WLAN_CIPHER_SUITE_TKIP, + WLAN_CIPHER_SUITE_CCMP, +}; + + + +static int lbs_cfg_set_channel(struct wiphy *wiphy, + struct ieee80211_channel *chan, + enum nl80211_channel_type channel_type) +{ + struct lbs_private *priv = wiphy_priv(wiphy); + int ret = -ENOTSUPP; + + lbs_deb_enter_args(LBS_DEB_CFG80211, "freq %d, type %d", chan->center_freq, channel_type); + + if (channel_type != NL80211_CHAN_NO_HT) + goto out; + + ret = lbs_set_channel(priv, chan->hw_value); + + out: + lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); + return ret; +} + + + + +static struct cfg80211_ops lbs_cfg80211_ops = { + .set_channel = lbs_cfg_set_channel, +}; + + +/* + * At this time lbs_private *priv doesn't even exist, so we just allocate + * memory and don't initialize the wiphy further. This is postponed until we + * can talk to the firmware and happens at registration time in + * lbs_cfg_wiphy_register(). + */ +struct wireless_dev *lbs_cfg_alloc(struct device *dev) +{ + int ret = 0; + struct wireless_dev *wdev; + + lbs_deb_enter(LBS_DEB_CFG80211); + + wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL); + if (!wdev) { + dev_err(dev, "cannot allocate wireless device\n"); + return ERR_PTR(-ENOMEM); + } + + wdev->wiphy = wiphy_new(&lbs_cfg80211_ops, sizeof(struct lbs_private)); + if (!wdev->wiphy) { + dev_err(dev, "cannot allocate wiphy\n"); + ret = -ENOMEM; + goto err_wiphy_new; + } + + lbs_deb_leave(LBS_DEB_CFG80211); + return wdev; + + err_wiphy_new: + kfree(wdev); + lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); + return ERR_PTR(ret); +} + + +/* + * This function get's called after lbs_setup_firmware() determined the + * firmware capabities. So we can setup the wiphy according to our + * hardware/firmware. + */ +int lbs_cfg_register(struct lbs_private *priv) +{ + struct wireless_dev *wdev = priv->wdev; + int ret; + + lbs_deb_enter(LBS_DEB_CFG80211); + + wdev->wiphy->max_scan_ssids = 1; + wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; + + /* TODO: BIT(NL80211_IFTYPE_ADHOC); */ + wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); + + /* TODO: honor priv->regioncode */ + wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &lbs_band_2ghz; + + /* + * We could check priv->fwcapinfo && FW_CAPINFO_WPA, but I have + * never seen a firmware without WPA + */ + wdev->wiphy->cipher_suites = cipher_suites; + wdev->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); + + ret = wiphy_register(wdev->wiphy); + if (ret < 0) + lbs_pr_err("cannot register wiphy device\n"); + + ret = register_netdev(priv->dev); + if (ret) + lbs_pr_err("cannot register network device\n"); + + lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); + return ret; +} + + +void lbs_cfg_free(struct lbs_private *priv) +{ + struct wireless_dev *wdev = priv->wdev; + + lbs_deb_enter(LBS_DEB_CFG80211); + + if (!wdev) + return; + + if (wdev->wiphy) { + wiphy_unregister(wdev->wiphy); + wiphy_free(wdev->wiphy); + } + kfree(wdev); +} diff --git a/drivers/net/wireless/libertas/cfg.h b/drivers/net/wireless/libertas/cfg.h new file mode 100644 index 0000000..e09a193 --- /dev/null +++ b/drivers/net/wireless/libertas/cfg.h @@ -0,0 +1,16 @@ +#ifndef __LBS_CFG80211_H__ +#define __LBS_CFG80211_H__ + +#include "dev.h" + +struct wireless_dev *lbs_cfg_alloc(struct device *dev); +int lbs_cfg_register(struct lbs_private *priv); +void lbs_cfg_free(struct lbs_private *priv); + +int lbs_send_specific_ssid_scan(struct lbs_private *priv, u8 *ssid, + u8 ssid_len); +int lbs_scan_networks(struct lbs_private *priv, int full_scan); +void lbs_cfg_scan_worker(struct work_struct *work); + + +#endif diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c new file mode 100644 index 0000000..82371ef --- /dev/null +++ b/drivers/net/wireless/libertas/cmd.c @@ -0,0 +1,1608 @@ +/** + * This file contains the handling of command. + * It prepares command and sends it to firmware when it is ready. + */ + +#include +#include + +#include "host.h" +#include "decl.h" +#include "defs.h" +#include "dev.h" +#include "assoc.h" +#include "wext.h" +#include "scan.h" +#include "cmd.h" + + +static struct cmd_ctrl_node *lbs_get_cmd_ctrl_node(struct lbs_private *priv); + +/** + * @brief Simple callback that copies response back into command + * + * @param priv A pointer to struct lbs_private structure + * @param extra A pointer to the original command structure for which + * 'resp' is a response + * @param resp A pointer to the command response + * + * @return 0 on success, error on failure + */ +int lbs_cmd_copyback(struct lbs_private *priv, unsigned long extra, + struct cmd_header *resp) +{ + struct cmd_header *buf = (void *)extra; + uint16_t copy_len; + + copy_len = min(le16_to_cpu(buf->size), le16_to_cpu(resp->size)); + memcpy(buf, resp, copy_len); + return 0; +} +EXPORT_SYMBOL_GPL(lbs_cmd_copyback); + +/** + * @brief Simple callback that ignores the result. Use this if + * you just want to send a command to the hardware, but don't + * care for the result. + * + * @param priv ignored + * @param extra ignored + * @param resp ignored + * + * @return 0 for success + */ +static int lbs_cmd_async_callback(struct lbs_private *priv, unsigned long extra, + struct cmd_header *resp) +{ + return 0; +} + + +/** + * @brief Checks whether a command is allowed in Power Save mode + * + * @param command the command ID + * @return 1 if allowed, 0 if not allowed + */ +static u8 is_command_allowed_in_ps(u16 cmd) +{ + switch (cmd) { + case CMD_802_11_RSSI: + return 1; + default: + break; + } + return 0; +} + +/** + * @brief This function checks if the command is allowed. + * + * @param priv A pointer to lbs_private structure + * @return allowed or not allowed. + */ + +static int lbs_is_cmd_allowed(struct lbs_private *priv) +{ + int ret = 1; + + lbs_deb_enter(LBS_DEB_CMD); + + if (!priv->is_auto_deep_sleep_enabled) { + if (priv->is_deep_sleep) { + lbs_deb_cmd("command not allowed in deep sleep\n"); + ret = 0; + } + } + + lbs_deb_leave(LBS_DEB_CMD); + return ret; +} + +/** + * @brief Updates the hardware details like MAC address and regulatory region + * + * @param priv A pointer to struct lbs_private structure + * + * @return 0 on success, error on failure + */ +int lbs_update_hw_spec(struct lbs_private *priv) +{ + struct cmd_ds_get_hw_spec cmd; + int ret = -1; + u32 i; + + lbs_deb_enter(LBS_DEB_CMD); + + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + memcpy(cmd.permanentaddr, priv->current_addr, ETH_ALEN); + ret = lbs_cmd_with_response(priv, CMD_GET_HW_SPEC, &cmd); + if (ret) + goto out; + + priv->fwcapinfo = le32_to_cpu(cmd.fwcapinfo); + + /* The firmware release is in an interesting format: the patch + * level is in the most significant nibble ... so fix that: */ + priv->fwrelease = le32_to_cpu(cmd.fwrelease); + priv->fwrelease = (priv->fwrelease << 8) | + (priv->fwrelease >> 24 & 0xff); + + /* Some firmware capabilities: + * CF card firmware 5.0.16p0: cap 0x00000303 + * USB dongle firmware 5.110.17p2: cap 0x00000303 + */ + lbs_pr_info("%pM, fw %u.%u.%up%u, cap 0x%08x\n", + cmd.permanentaddr, + priv->fwrelease >> 24 & 0xff, + priv->fwrelease >> 16 & 0xff, + priv->fwrelease >> 8 & 0xff, + priv->fwrelease & 0xff, + priv->fwcapinfo); + lbs_deb_cmd("GET_HW_SPEC: hardware interface 0x%x, hardware spec 0x%04x\n", + cmd.hwifversion, cmd.version); + + /* Clamp region code to 8-bit since FW spec indicates that it should + * only ever be 8-bit, even though the field size is 16-bit. Some firmware + * returns non-zero high 8 bits here. + * + * Firmware version 4.0.102 used in CF8381 has region code shifted. We + * need to check for this problem and handle it properly. + */ + if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V4) + priv->regioncode = (le16_to_cpu(cmd.regioncode) >> 8) & 0xFF; + else + priv->regioncode = le16_to_cpu(cmd.regioncode) & 0xFF; + + for (i = 0; i < MRVDRV_MAX_REGION_CODE; i++) { + /* use the region code to search for the index */ + if (priv->regioncode == lbs_region_code_to_index[i]) + break; + } + + /* if it's unidentified region code, use the default (USA) */ + if (i >= MRVDRV_MAX_REGION_CODE) { + priv->regioncode = 0x10; + lbs_pr_info("unidentified region code; using the default (USA)\n"); + } + + if (priv->current_addr[0] == 0xff) + memmove(priv->current_addr, cmd.permanentaddr, ETH_ALEN); + + memcpy(priv->dev->dev_addr, priv->current_addr, ETH_ALEN); + if (priv->mesh_dev) + memcpy(priv->mesh_dev->dev_addr, priv->current_addr, ETH_ALEN); + + if (lbs_set_regiontable(priv, priv->regioncode, 0)) { + ret = -1; + goto out; + } + +out: + lbs_deb_leave(LBS_DEB_CMD); + return ret; +} + +int lbs_host_sleep_cfg(struct lbs_private *priv, uint32_t criteria, + struct wol_config *p_wol_config) +{ + struct cmd_ds_host_sleep cmd_config; + int ret; + + cmd_config.hdr.size = cpu_to_le16(sizeof(cmd_config)); + cmd_config.criteria = cpu_to_le32(criteria); + cmd_config.gpio = priv->wol_gpio; + cmd_config.gap = priv->wol_gap; + + if (p_wol_config != NULL) + memcpy((uint8_t *)&cmd_config.wol_conf, (uint8_t *)p_wol_config, + sizeof(struct wol_config)); + else + cmd_config.wol_conf.action = CMD_ACT_ACTION_NONE; + + ret = lbs_cmd_with_response(priv, CMD_802_11_HOST_SLEEP_CFG, &cmd_config); + if (!ret) { + if (criteria) { + lbs_deb_cmd("Set WOL criteria to %x\n", criteria); + priv->wol_criteria = criteria; + } else + memcpy((uint8_t *) p_wol_config, + (uint8_t *)&cmd_config.wol_conf, + sizeof(struct wol_config)); + } else { + lbs_pr_info("HOST_SLEEP_CFG failed %d\n", ret); + } + + return ret; +} +EXPORT_SYMBOL_GPL(lbs_host_sleep_cfg); + +static int lbs_cmd_802_11_ps_mode(struct cmd_ds_command *cmd, + u16 cmd_action) +{ + struct cmd_ds_802_11_ps_mode *psm = &cmd->params.psmode; + + lbs_deb_enter(LBS_DEB_CMD); + + cmd->command = cpu_to_le16(CMD_802_11_PS_MODE); + cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_ps_mode) + + sizeof(struct cmd_header)); + psm->action = cpu_to_le16(cmd_action); + psm->multipledtim = 0; + switch (cmd_action) { + case CMD_SUBCMD_ENTER_PS: + lbs_deb_cmd("PS command:" "SubCode- Enter PS\n"); + + psm->locallisteninterval = 0; + psm->nullpktinterval = 0; + psm->multipledtim = + cpu_to_le16(MRVDRV_DEFAULT_MULTIPLE_DTIM); + break; + + case CMD_SUBCMD_EXIT_PS: + lbs_deb_cmd("PS command:" "SubCode- Exit PS\n"); + break; + + case CMD_SUBCMD_SLEEP_CONFIRMED: + lbs_deb_cmd("PS command: SubCode- sleep confirm\n"); + break; + + default: + break; + } + + lbs_deb_leave(LBS_DEB_CMD); + return 0; +} + +int lbs_cmd_802_11_sleep_params(struct lbs_private *priv, uint16_t cmd_action, + struct sleep_params *sp) +{ + struct cmd_ds_802_11_sleep_params cmd; + int ret; + + lbs_deb_enter(LBS_DEB_CMD); + + if (cmd_action == CMD_ACT_GET) { + memset(&cmd, 0, sizeof(cmd)); + } else { + cmd.error = cpu_to_le16(sp->sp_error); + cmd.offset = cpu_to_le16(sp->sp_offset); + cmd.stabletime = cpu_to_le16(sp->sp_stabletime); + cmd.calcontrol = sp->sp_calcontrol; + cmd.externalsleepclk = sp->sp_extsleepclk; + cmd.reserved = cpu_to_le16(sp->sp_reserved); + } + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + cmd.action = cpu_to_le16(cmd_action); + + ret = lbs_cmd_with_response(priv, CMD_802_11_SLEEP_PARAMS, &cmd); + + if (!ret) { + lbs_deb_cmd("error 0x%x, offset 0x%x, stabletime 0x%x, " + "calcontrol 0x%x extsleepclk 0x%x\n", + le16_to_cpu(cmd.error), le16_to_cpu(cmd.offset), + le16_to_cpu(cmd.stabletime), cmd.calcontrol, + cmd.externalsleepclk); + + sp->sp_error = le16_to_cpu(cmd.error); + sp->sp_offset = le16_to_cpu(cmd.offset); + sp->sp_stabletime = le16_to_cpu(cmd.stabletime); + sp->sp_calcontrol = cmd.calcontrol; + sp->sp_extsleepclk = cmd.externalsleepclk; + sp->sp_reserved = le16_to_cpu(cmd.reserved); + } + + lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); + return 0; +} + +static int lbs_wait_for_ds_awake(struct lbs_private *priv) +{ + int ret = 0; + + lbs_deb_enter(LBS_DEB_CMD); + + if (priv->is_deep_sleep) { + if (!wait_event_interruptible_timeout(priv->ds_awake_q, + !priv->is_deep_sleep, (10 * HZ))) { + lbs_pr_err("ds_awake_q: timer expired\n"); + ret = -1; + } + } + + lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); + return ret; +} + +int lbs_set_deep_sleep(struct lbs_private *priv, int deep_sleep) +{ + int ret = 0; + + lbs_deb_enter(LBS_DEB_CMD); + + if (deep_sleep) { + if (priv->is_deep_sleep != 1) { + lbs_deb_cmd("deep sleep: sleep\n"); + BUG_ON(!priv->enter_deep_sleep); + ret = priv->enter_deep_sleep(priv); + if (!ret) { + netif_stop_queue(priv->dev); + netif_carrier_off(priv->dev); + } + } else { + lbs_pr_err("deep sleep: already enabled\n"); + } + } else { + if (priv->is_deep_sleep) { + lbs_deb_cmd("deep sleep: wakeup\n"); + BUG_ON(!priv->exit_deep_sleep); + ret = priv->exit_deep_sleep(priv); + if (!ret) { + ret = lbs_wait_for_ds_awake(priv); + if (ret) + lbs_pr_err("deep sleep: wakeup" + "failed\n"); + } + } + } + + lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); + return ret; +} + +/** + * @brief Set an SNMP MIB value + * + * @param priv A pointer to struct lbs_private structure + * @param oid The OID to set in the firmware + * @param val Value to set the OID to + * + * @return 0 on success, error on failure + */ +int lbs_set_snmp_mib(struct lbs_private *priv, u32 oid, u16 val) +{ + struct cmd_ds_802_11_snmp_mib cmd; + int ret; + + lbs_deb_enter(LBS_DEB_CMD); + + memset(&cmd, 0, sizeof (cmd)); + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + cmd.action = cpu_to_le16(CMD_ACT_SET); + cmd.oid = cpu_to_le16((u16) oid); + + switch (oid) { + case SNMP_MIB_OID_BSS_TYPE: + cmd.bufsize = cpu_to_le16(sizeof(u8)); + cmd.value[0] = val; + break; + case SNMP_MIB_OID_11D_ENABLE: + case SNMP_MIB_OID_FRAG_THRESHOLD: + case SNMP_MIB_OID_RTS_THRESHOLD: + case SNMP_MIB_OID_SHORT_RETRY_LIMIT: + case SNMP_MIB_OID_LONG_RETRY_LIMIT: + cmd.bufsize = cpu_to_le16(sizeof(u16)); + *((__le16 *)(&cmd.value)) = cpu_to_le16(val); + break; + default: + lbs_deb_cmd("SNMP_CMD: (set) unhandled OID 0x%x\n", oid); + ret = -EINVAL; + goto out; + } + + lbs_deb_cmd("SNMP_CMD: (set) oid 0x%x, oid size 0x%x, value 0x%x\n", + le16_to_cpu(cmd.oid), le16_to_cpu(cmd.bufsize), val); + + ret = lbs_cmd_with_response(priv, CMD_802_11_SNMP_MIB, &cmd); + +out: + lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); + return ret; +} + +/** + * @brief Get an SNMP MIB value + * + * @param priv A pointer to struct lbs_private structure + * @param oid The OID to retrieve from the firmware + * @param out_val Location for the returned value + * + * @return 0 on success, error on failure + */ +int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val) +{ + struct cmd_ds_802_11_snmp_mib cmd; + int ret; + + lbs_deb_enter(LBS_DEB_CMD); + + memset(&cmd, 0, sizeof (cmd)); + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + cmd.action = cpu_to_le16(CMD_ACT_GET); + cmd.oid = cpu_to_le16(oid); + + ret = lbs_cmd_with_response(priv, CMD_802_11_SNMP_MIB, &cmd); + if (ret) + goto out; + + switch (le16_to_cpu(cmd.bufsize)) { + case sizeof(u8): + *out_val = cmd.value[0]; + break; + case sizeof(u16): + *out_val = le16_to_cpu(*((__le16 *)(&cmd.value))); + break; + default: + lbs_deb_cmd("SNMP_CMD: (get) unhandled OID 0x%x size %d\n", + oid, le16_to_cpu(cmd.bufsize)); + break; + } + +out: + lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); + return ret; +} + +/** + * @brief Get the min, max, and current TX power + * + * @param priv A pointer to struct lbs_private structure + * @param curlevel Current power level in dBm + * @param minlevel Minimum supported power level in dBm (optional) + * @param maxlevel Maximum supported power level in dBm (optional) + * + * @return 0 on success, error on failure + */ +int lbs_get_tx_power(struct lbs_private *priv, s16 *curlevel, s16 *minlevel, + s16 *maxlevel) +{ + struct cmd_ds_802_11_rf_tx_power cmd; + int ret; + + lbs_deb_enter(LBS_DEB_CMD); + + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + cmd.action = cpu_to_le16(CMD_ACT_GET); + + ret = lbs_cmd_with_response(priv, CMD_802_11_RF_TX_POWER, &cmd); + if (ret == 0) { + *curlevel = le16_to_cpu(cmd.curlevel); + if (minlevel) + *minlevel = cmd.minlevel; + if (maxlevel) + *maxlevel = cmd.maxlevel; + } + + lbs_deb_leave(LBS_DEB_CMD); + return ret; +} + +/** + * @brief Set the TX power + * + * @param priv A pointer to struct lbs_private structure + * @param dbm The desired power level in dBm + * + * @return 0 on success, error on failure + */ +int lbs_set_tx_power(struct lbs_private *priv, s16 dbm) +{ + struct cmd_ds_802_11_rf_tx_power cmd; + int ret; + + lbs_deb_enter(LBS_DEB_CMD); + + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + cmd.action = cpu_to_le16(CMD_ACT_SET); + cmd.curlevel = cpu_to_le16(dbm); + + lbs_deb_cmd("SET_RF_TX_POWER: %d dBm\n", dbm); + + ret = lbs_cmd_with_response(priv, CMD_802_11_RF_TX_POWER, &cmd); + + lbs_deb_leave(LBS_DEB_CMD); + return ret; +} + +static int lbs_cmd_802_11_monitor_mode(struct cmd_ds_command *cmd, + u16 cmd_action, void *pdata_buf) +{ + struct cmd_ds_802_11_monitor_mode *monitor = &cmd->params.monitor; + + cmd->command = cpu_to_le16(CMD_802_11_MONITOR_MODE); + cmd->size = + cpu_to_le16(sizeof(struct cmd_ds_802_11_monitor_mode) + + sizeof(struct cmd_header)); + + monitor->action = cpu_to_le16(cmd_action); + if (cmd_action == CMD_ACT_SET) { + monitor->mode = + cpu_to_le16((u16) (*(u32 *) pdata_buf)); + } + + return 0; +} + +/** + * @brief Get the radio channel + * + * @param priv A pointer to struct lbs_private structure + * + * @return The channel on success, error on failure + */ +static int lbs_get_channel(struct lbs_private *priv) +{ + struct cmd_ds_802_11_rf_channel cmd; + int ret = 0; + + lbs_deb_enter(LBS_DEB_CMD); + + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + cmd.action = cpu_to_le16(CMD_OPT_802_11_RF_CHANNEL_GET); + + ret = lbs_cmd_with_response(priv, CMD_802_11_RF_CHANNEL, &cmd); + if (ret) + goto out; + + ret = le16_to_cpu(cmd.channel); + lbs_deb_cmd("current radio channel is %d\n", ret); + +out: + lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); + return ret; +} + +int lbs_update_channel(struct lbs_private *priv) +{ + int ret; + + /* the channel in f/w could be out of sync; get the current channel */ + lbs_deb_enter(LBS_DEB_ASSOC); + + ret = lbs_get_channel(priv); + if (ret > 0) { + priv->channel = ret; + ret = 0; + } + lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); + return ret; +} + +/** + * @brief Set the radio channel + * + * @param priv A pointer to struct lbs_private structure + * @param channel The desired channel, or 0 to clear a locked channel + * + * @return 0 on success, error on failure + */ +int lbs_set_channel(struct lbs_private *priv, u8 channel) +{ + struct cmd_ds_802_11_rf_channel cmd; +#ifdef DEBUG + u8 old_channel = priv->channel; +#endif + int ret = 0; + + lbs_deb_enter(LBS_DEB_CMD); + + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + cmd.action = cpu_to_le16(CMD_OPT_802_11_RF_CHANNEL_SET); + cmd.channel = cpu_to_le16(channel); + + ret = lbs_cmd_with_response(priv, CMD_802_11_RF_CHANNEL, &cmd); + if (ret) + goto out; + + priv->channel = (uint8_t) le16_to_cpu(cmd.channel); + lbs_deb_cmd("channel switch from %d to %d\n", old_channel, + priv->channel); + +out: + lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); + return ret; +} + +static int lbs_cmd_reg_access(struct cmd_ds_command *cmdptr, + u8 cmd_action, void *pdata_buf) +{ + struct lbs_offset_value *offval; + + lbs_deb_enter(LBS_DEB_CMD); + + offval = (struct lbs_offset_value *)pdata_buf; + + switch (le16_to_cpu(cmdptr->command)) { + case CMD_MAC_REG_ACCESS: + { + struct cmd_ds_mac_reg_access *macreg; + + cmdptr->size = + cpu_to_le16(sizeof (struct cmd_ds_mac_reg_access) + + sizeof(struct cmd_header)); + macreg = + (struct cmd_ds_mac_reg_access *)&cmdptr->params. + macreg; + + macreg->action = cpu_to_le16(cmd_action); + macreg->offset = cpu_to_le16((u16) offval->offset); + macreg->value = cpu_to_le32(offval->value); + + break; + } + + case CMD_BBP_REG_ACCESS: + { + struct cmd_ds_bbp_reg_access *bbpreg; + + cmdptr->size = + cpu_to_le16(sizeof + (struct cmd_ds_bbp_reg_access) + + sizeof(struct cmd_header)); + bbpreg = + (struct cmd_ds_bbp_reg_access *)&cmdptr->params. + bbpreg; + + bbpreg->action = cpu_to_le16(cmd_action); + bbpreg->offset = cpu_to_le16((u16) offval->offset); + bbpreg->value = (u8) offval->value; + + break; + } + + case CMD_RF_REG_ACCESS: + { + struct cmd_ds_rf_reg_access *rfreg; + + cmdptr->size = + cpu_to_le16(sizeof + (struct cmd_ds_rf_reg_access) + + sizeof(struct cmd_header)); + rfreg = + (struct cmd_ds_rf_reg_access *)&cmdptr->params. + rfreg; + + rfreg->action = cpu_to_le16(cmd_action); + rfreg->offset = cpu_to_le16((u16) offval->offset); + rfreg->value = (u8) offval->value; + + break; + } + + default: + break; + } + + lbs_deb_leave(LBS_DEB_CMD); + return 0; +} + +static void lbs_queue_cmd(struct lbs_private *priv, + struct cmd_ctrl_node *cmdnode) +{ + unsigned long flags; + int addtail = 1; + + lbs_deb_enter(LBS_DEB_HOST); + + if (!cmdnode) { + lbs_deb_host("QUEUE_CMD: cmdnode is NULL\n"); + goto done; + } + if (!cmdnode->cmdbuf->size) { + lbs_deb_host("DNLD_CMD: cmd size is zero\n"); + goto done; + } + cmdnode->result = 0; + + /* Exit_PS command needs to be queued in the header always. */ + if (le16_to_cpu(cmdnode->cmdbuf->command) == CMD_802_11_PS_MODE) { + struct cmd_ds_802_11_ps_mode *psm = (void *) &cmdnode->cmdbuf[1]; + + if (psm->action == cpu_to_le16(CMD_SUBCMD_EXIT_PS)) { + if (priv->psstate != PS_STATE_FULL_POWER) + addtail = 0; + } + } + + spin_lock_irqsave(&priv->driver_lock, flags); + + if (addtail) + list_add_tail(&cmdnode->list, &priv->cmdpendingq); + else + list_add(&cmdnode->list, &priv->cmdpendingq); + + spin_unlock_irqrestore(&priv->driver_lock, flags); + + lbs_deb_host("QUEUE_CMD: inserted command 0x%04x into cmdpendingq\n", + le16_to_cpu(cmdnode->cmdbuf->command)); + +done: + lbs_deb_leave(LBS_DEB_HOST); +} + +static void lbs_submit_command(struct lbs_private *priv, + struct cmd_ctrl_node *cmdnode) +{ + unsigned long flags; + struct cmd_header *cmd; + uint16_t cmdsize; + uint16_t command; + int timeo = 3 * HZ; + int ret; + + lbs_deb_enter(LBS_DEB_HOST); + + cmd = cmdnode->cmdbuf; + + spin_lock_irqsave(&priv->driver_lock, flags); + priv->cur_cmd = cmdnode; + priv->cur_cmd_retcode = 0; + spin_unlock_irqrestore(&priv->driver_lock, flags); + + cmdsize = le16_to_cpu(cmd->size); + command = le16_to_cpu(cmd->command); + + /* These commands take longer */ + if (command == CMD_802_11_SCAN || command == CMD_802_11_ASSOCIATE) + timeo = 5 * HZ; + + lbs_deb_cmd("DNLD_CMD: command 0x%04x, seq %d, size %d\n", + command, le16_to_cpu(cmd->seqnum), cmdsize); + lbs_deb_hex(LBS_DEB_CMD, "DNLD_CMD", (void *) cmdnode->cmdbuf, cmdsize); + + ret = priv->hw_host_to_card(priv, MVMS_CMD, (u8 *) cmd, cmdsize); + + if (ret) { + lbs_pr_info("DNLD_CMD: hw_host_to_card failed: %d\n", ret); + /* Let the timer kick in and retry, and potentially reset + the whole thing if the condition persists */ + timeo = HZ/4; + } + + if (command == CMD_802_11_DEEP_SLEEP) { + if (priv->is_auto_deep_sleep_enabled) { + priv->wakeup_dev_required = 1; + priv->dnld_sent = 0; + } + priv->is_deep_sleep = 1; + lbs_complete_command(priv, cmdnode, 0); + } else { + /* Setup the timer after transmit command */ + mod_timer(&priv->command_timer, jiffies + timeo); + } + + lbs_deb_leave(LBS_DEB_HOST); +} + +/** + * This function inserts command node to cmdfreeq + * after cleans it. Requires priv->driver_lock held. + */ +static void __lbs_cleanup_and_insert_cmd(struct lbs_private *priv, + struct cmd_ctrl_node *cmdnode) +{ + lbs_deb_enter(LBS_DEB_HOST); + + if (!cmdnode) + goto out; + + cmdnode->callback = NULL; + cmdnode->callback_arg = 0; + + memset(cmdnode->cmdbuf, 0, LBS_CMD_BUFFER_SIZE); + + list_add_tail(&cmdnode->list, &priv->cmdfreeq); + out: + lbs_deb_leave(LBS_DEB_HOST); +} + +static void lbs_cleanup_and_insert_cmd(struct lbs_private *priv, + struct cmd_ctrl_node *ptempcmd) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->driver_lock, flags); + __lbs_cleanup_and_insert_cmd(priv, ptempcmd); + spin_unlock_irqrestore(&priv->driver_lock, flags); +} + +void lbs_complete_command(struct lbs_private *priv, struct cmd_ctrl_node *cmd, + int result) +{ + if (cmd == priv->cur_cmd) + priv->cur_cmd_retcode = result; + + cmd->result = result; + cmd->cmdwaitqwoken = 1; + wake_up_interruptible(&cmd->cmdwait_q); + + if (!cmd->callback || cmd->callback == lbs_cmd_async_callback) + __lbs_cleanup_and_insert_cmd(priv, cmd); + priv->cur_cmd = NULL; +} + +int lbs_set_radio(struct lbs_private *priv, u8 preamble, u8 radio_on) +{ + struct cmd_ds_802_11_radio_control cmd; + int ret = -EINVAL; + + lbs_deb_enter(LBS_DEB_CMD); + + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + cmd.action = cpu_to_le16(CMD_ACT_SET); + + /* Only v8 and below support setting the preamble */ + if (priv->fwrelease < 0x09000000) { + switch (preamble) { + case RADIO_PREAMBLE_SHORT: + case RADIO_PREAMBLE_AUTO: + case RADIO_PREAMBLE_LONG: + cmd.control = cpu_to_le16(preamble); + break; + default: + goto out; + } + } + + if (radio_on) + cmd.control |= cpu_to_le16(0x1); + else { + cmd.control &= cpu_to_le16(~0x1); + priv->txpower_cur = 0; + } + + lbs_deb_cmd("RADIO_CONTROL: radio %s, preamble %d\n", + radio_on ? "ON" : "OFF", preamble); + + priv->radio_on = radio_on; + + ret = lbs_cmd_with_response(priv, CMD_802_11_RADIO_CONTROL, &cmd); + +out: + lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); + return ret; +} + +void lbs_set_mac_control(struct lbs_private *priv) +{ + struct cmd_ds_mac_control cmd; + + lbs_deb_enter(LBS_DEB_CMD); + + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + cmd.action = cpu_to_le16(priv->mac_control); + cmd.reserved = 0; + + lbs_cmd_async(priv, CMD_MAC_CONTROL, &cmd.hdr, sizeof(cmd)); + + lbs_deb_leave(LBS_DEB_CMD); +} + +/** + * @brief This function prepare the command before send to firmware. + * + * @param priv A pointer to struct lbs_private structure + * @param cmd_no command number + * @param cmd_action command action: GET or SET + * @param wait_option wait option: wait response or not + * @param cmd_oid cmd oid: treated as sub command + * @param pdata_buf A pointer to informaion buffer + * @return 0 or -1 + */ +int lbs_prepare_and_send_command(struct lbs_private *priv, + u16 cmd_no, + u16 cmd_action, + u16 wait_option, u32 cmd_oid, void *pdata_buf) +{ + int ret = 0; + struct cmd_ctrl_node *cmdnode; + struct cmd_ds_command *cmdptr; + unsigned long flags; + + lbs_deb_enter(LBS_DEB_HOST); + + if (!priv) { + lbs_deb_host("PREP_CMD: priv is NULL\n"); + ret = -1; + goto done; + } + + if (priv->surpriseremoved) { + lbs_deb_host("PREP_CMD: card removed\n"); + ret = -1; + goto done; + } + + if (!lbs_is_cmd_allowed(priv)) { + ret = -EBUSY; + goto done; + } + + cmdnode = lbs_get_cmd_ctrl_node(priv); + + if (cmdnode == NULL) { + lbs_deb_host("PREP_CMD: cmdnode is NULL\n"); + + /* Wake up main thread to execute next command */ + wake_up_interruptible(&priv->waitq); + ret = -1; + goto done; + } + + cmdnode->callback = NULL; + cmdnode->callback_arg = (unsigned long)pdata_buf; + + cmdptr = (struct cmd_ds_command *)cmdnode->cmdbuf; + + lbs_deb_host("PREP_CMD: command 0x%04x\n", cmd_no); + + /* Set sequence number, command and INT option */ + priv->seqnum++; + cmdptr->seqnum = cpu_to_le16(priv->seqnum); + + cmdptr->command = cpu_to_le16(cmd_no); + cmdptr->result = 0; + + switch (cmd_no) { + case CMD_802_11_PS_MODE: + ret = lbs_cmd_802_11_ps_mode(cmdptr, cmd_action); + break; + + case CMD_MAC_REG_ACCESS: + case CMD_BBP_REG_ACCESS: + case CMD_RF_REG_ACCESS: + ret = lbs_cmd_reg_access(cmdptr, cmd_action, pdata_buf); + break; + + case CMD_802_11_MONITOR_MODE: + ret = lbs_cmd_802_11_monitor_mode(cmdptr, + cmd_action, pdata_buf); + break; + + case CMD_802_11_RSSI: + ret = lbs_cmd_802_11_rssi(priv, cmdptr); + break; + + case CMD_802_11_SET_AFC: + case CMD_802_11_GET_AFC: + + cmdptr->command = cpu_to_le16(cmd_no); + cmdptr->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_afc) + + sizeof(struct cmd_header)); + + memmove(&cmdptr->params.afc, + pdata_buf, sizeof(struct cmd_ds_802_11_afc)); + + ret = 0; + goto done; + + case CMD_802_11_TPC_CFG: + cmdptr->command = cpu_to_le16(CMD_802_11_TPC_CFG); + cmdptr->size = + cpu_to_le16(sizeof(struct cmd_ds_802_11_tpc_cfg) + + sizeof(struct cmd_header)); + + memmove(&cmdptr->params.tpccfg, + pdata_buf, sizeof(struct cmd_ds_802_11_tpc_cfg)); + + ret = 0; + break; + +#ifdef CONFIG_LIBERTAS_MESH + + case CMD_BT_ACCESS: + ret = lbs_cmd_bt_access(cmdptr, cmd_action, pdata_buf); + break; + + case CMD_FWT_ACCESS: + ret = lbs_cmd_fwt_access(cmdptr, cmd_action, pdata_buf); + break; + +#endif + + case CMD_802_11_BEACON_CTRL: + ret = lbs_cmd_bcn_ctrl(priv, cmdptr, cmd_action); + break; + case CMD_802_11_DEEP_SLEEP: + cmdptr->command = cpu_to_le16(CMD_802_11_DEEP_SLEEP); + cmdptr->size = cpu_to_le16(sizeof(struct cmd_header)); + break; + default: + lbs_pr_err("PREP_CMD: unknown command 0x%04x\n", cmd_no); + ret = -1; + break; + } + + /* return error, since the command preparation failed */ + if (ret != 0) { + lbs_deb_host("PREP_CMD: command preparation failed\n"); + lbs_cleanup_and_insert_cmd(priv, cmdnode); + ret = -1; + goto done; + } + + cmdnode->cmdwaitqwoken = 0; + + lbs_queue_cmd(priv, cmdnode); + wake_up_interruptible(&priv->waitq); + + if (wait_option & CMD_OPTION_WAITFORRSP) { + lbs_deb_host("PREP_CMD: wait for response\n"); + might_sleep(); + wait_event_interruptible(cmdnode->cmdwait_q, + cmdnode->cmdwaitqwoken); + } + + spin_lock_irqsave(&priv->driver_lock, flags); + if (priv->cur_cmd_retcode) { + lbs_deb_host("PREP_CMD: command failed with return code %d\n", + priv->cur_cmd_retcode); + priv->cur_cmd_retcode = 0; + ret = -1; + } + spin_unlock_irqrestore(&priv->driver_lock, flags); + +done: + lbs_deb_leave_args(LBS_DEB_HOST, "ret %d", ret); + return ret; +} + +/** + * @brief This function allocates the command buffer and link + * it to command free queue. + * + * @param priv A pointer to struct lbs_private structure + * @return 0 or -1 + */ +int lbs_allocate_cmd_buffer(struct lbs_private *priv) +{ + int ret = 0; + u32 bufsize; + u32 i; + struct cmd_ctrl_node *cmdarray; + + lbs_deb_enter(LBS_DEB_HOST); + + /* Allocate and initialize the command array */ + bufsize = sizeof(struct cmd_ctrl_node) * LBS_NUM_CMD_BUFFERS; + if (!(cmdarray = kzalloc(bufsize, GFP_KERNEL))) { + lbs_deb_host("ALLOC_CMD_BUF: tempcmd_array is NULL\n"); + ret = -1; + goto done; + } + priv->cmd_array = cmdarray; + + /* Allocate and initialize each command buffer in the command array */ + for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) { + cmdarray[i].cmdbuf = kzalloc(LBS_CMD_BUFFER_SIZE, GFP_KERNEL); + if (!cmdarray[i].cmdbuf) { + lbs_deb_host("ALLOC_CMD_BUF: ptempvirtualaddr is NULL\n"); + ret = -1; + goto done; + } + } + + for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) { + init_waitqueue_head(&cmdarray[i].cmdwait_q); + lbs_cleanup_and_insert_cmd(priv, &cmdarray[i]); + } + ret = 0; + +done: + lbs_deb_leave_args(LBS_DEB_HOST, "ret %d", ret); + return ret; +} + +/** + * @brief This function frees the command buffer. + * + * @param priv A pointer to struct lbs_private structure + * @return 0 or -1 + */ +int lbs_free_cmd_buffer(struct lbs_private *priv) +{ + struct cmd_ctrl_node *cmdarray; + unsigned int i; + + lbs_deb_enter(LBS_DEB_HOST); + + /* need to check if cmd array is allocated or not */ + if (priv->cmd_array == NULL) { + lbs_deb_host("FREE_CMD_BUF: cmd_array is NULL\n"); + goto done; + } + + cmdarray = priv->cmd_array; + + /* Release shared memory buffers */ + for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) { + if (cmdarray[i].cmdbuf) { + kfree(cmdarray[i].cmdbuf); + cmdarray[i].cmdbuf = NULL; + } + } + + /* Release cmd_ctrl_node */ + if (priv->cmd_array) { + kfree(priv->cmd_array); + priv->cmd_array = NULL; + } + +done: + lbs_deb_leave(LBS_DEB_HOST); + return 0; +} + +/** + * @brief This function gets a free command node if available in + * command free queue. + * + * @param priv A pointer to struct lbs_private structure + * @return cmd_ctrl_node A pointer to cmd_ctrl_node structure or NULL + */ +static struct cmd_ctrl_node *lbs_get_cmd_ctrl_node(struct lbs_private *priv) +{ + struct cmd_ctrl_node *tempnode; + unsigned long flags; + + lbs_deb_enter(LBS_DEB_HOST); + + if (!priv) + return NULL; + + spin_lock_irqsave(&priv->driver_lock, flags); + + if (!list_empty(&priv->cmdfreeq)) { + tempnode = list_first_entry(&priv->cmdfreeq, + struct cmd_ctrl_node, list); + list_del(&tempnode->list); + } else { + lbs_deb_host("GET_CMD_NODE: cmd_ctrl_node is not available\n"); + tempnode = NULL; + } + + spin_unlock_irqrestore(&priv->driver_lock, flags); + + lbs_deb_leave(LBS_DEB_HOST); + return tempnode; +} + +/** + * @brief This function executes next command in command + * pending queue. It will put firmware back to PS mode + * if applicable. + * + * @param priv A pointer to struct lbs_private structure + * @return 0 or -1 + */ +int lbs_execute_next_command(struct lbs_private *priv) +{ + struct cmd_ctrl_node *cmdnode = NULL; + struct cmd_header *cmd; + unsigned long flags; + int ret = 0; + + /* Debug group is LBS_DEB_THREAD and not LBS_DEB_HOST, because the + * only caller to us is lbs_thread() and we get even when a + * data packet is received */ + lbs_deb_enter(LBS_DEB_THREAD); + + spin_lock_irqsave(&priv->driver_lock, flags); + + if (priv->cur_cmd) { + lbs_pr_alert( "EXEC_NEXT_CMD: already processing command!\n"); + spin_unlock_irqrestore(&priv->driver_lock, flags); + ret = -1; + goto done; + } + + if (!list_empty(&priv->cmdpendingq)) { + cmdnode = list_first_entry(&priv->cmdpendingq, + struct cmd_ctrl_node, list); + } + + spin_unlock_irqrestore(&priv->driver_lock, flags); + + if (cmdnode) { + cmd = cmdnode->cmdbuf; + + if (is_command_allowed_in_ps(le16_to_cpu(cmd->command))) { + if ((priv->psstate == PS_STATE_SLEEP) || + (priv->psstate == PS_STATE_PRE_SLEEP)) { + lbs_deb_host( + "EXEC_NEXT_CMD: cannot send cmd 0x%04x in psstate %d\n", + le16_to_cpu(cmd->command), + priv->psstate); + ret = -1; + goto done; + } + lbs_deb_host("EXEC_NEXT_CMD: OK to send command " + "0x%04x in psstate %d\n", + le16_to_cpu(cmd->command), priv->psstate); + } else if (priv->psstate != PS_STATE_FULL_POWER) { + /* + * 1. Non-PS command: + * Queue it. set needtowakeup to TRUE if current state + * is SLEEP, otherwise call lbs_ps_wakeup to send Exit_PS. + * 2. PS command but not Exit_PS: + * Ignore it. + * 3. PS command Exit_PS: + * Set needtowakeup to TRUE if current state is SLEEP, + * otherwise send this command down to firmware + * immediately. + */ + if (cmd->command != cpu_to_le16(CMD_802_11_PS_MODE)) { + /* Prepare to send Exit PS, + * this non PS command will be sent later */ + if ((priv->psstate == PS_STATE_SLEEP) + || (priv->psstate == PS_STATE_PRE_SLEEP) + ) { + /* w/ new scheme, it will not reach here. + since it is blocked in main_thread. */ + priv->needtowakeup = 1; + } else + lbs_ps_wakeup(priv, 0); + + ret = 0; + goto done; + } else { + /* + * PS command. Ignore it if it is not Exit_PS. + * otherwise send it down immediately. + */ + struct cmd_ds_802_11_ps_mode *psm = (void *)&cmd[1]; + + lbs_deb_host( + "EXEC_NEXT_CMD: PS cmd, action 0x%02x\n", + psm->action); + if (psm->action != + cpu_to_le16(CMD_SUBCMD_EXIT_PS)) { + lbs_deb_host( + "EXEC_NEXT_CMD: ignore ENTER_PS cmd\n"); + list_del(&cmdnode->list); + spin_lock_irqsave(&priv->driver_lock, flags); + lbs_complete_command(priv, cmdnode, 0); + spin_unlock_irqrestore(&priv->driver_lock, flags); + + ret = 0; + goto done; + } + + if ((priv->psstate == PS_STATE_SLEEP) || + (priv->psstate == PS_STATE_PRE_SLEEP)) { + lbs_deb_host( + "EXEC_NEXT_CMD: ignore EXIT_PS cmd in sleep\n"); + list_del(&cmdnode->list); + spin_lock_irqsave(&priv->driver_lock, flags); + lbs_complete_command(priv, cmdnode, 0); + spin_unlock_irqrestore(&priv->driver_lock, flags); + priv->needtowakeup = 1; + + ret = 0; + goto done; + } + + lbs_deb_host( + "EXEC_NEXT_CMD: sending EXIT_PS\n"); + } + } + list_del(&cmdnode->list); + lbs_deb_host("EXEC_NEXT_CMD: sending command 0x%04x\n", + le16_to_cpu(cmd->command)); + lbs_submit_command(priv, cmdnode); + } else { + /* + * check if in power save mode, if yes, put the device back + * to PS mode + */ + if ((priv->psmode != LBS802_11POWERMODECAM) && + (priv->psstate == PS_STATE_FULL_POWER) && + ((priv->connect_status == LBS_CONNECTED) || + lbs_mesh_connected(priv))) { + if (priv->secinfo.WPAenabled || + priv->secinfo.WPA2enabled) { + /* check for valid WPA group keys */ + if (priv->wpa_mcast_key.len || + priv->wpa_unicast_key.len) { + lbs_deb_host( + "EXEC_NEXT_CMD: WPA enabled and GTK_SET" + " go back to PS_SLEEP"); + lbs_ps_sleep(priv, 0); + } + } else { + lbs_deb_host( + "EXEC_NEXT_CMD: cmdpendingq empty, " + "go back to PS_SLEEP"); + lbs_ps_sleep(priv, 0); + } + } + } + + ret = 0; +done: + lbs_deb_leave(LBS_DEB_THREAD); + return ret; +} + +static void lbs_send_confirmsleep(struct lbs_private *priv) +{ + unsigned long flags; + int ret; + + lbs_deb_enter(LBS_DEB_HOST); + lbs_deb_hex(LBS_DEB_HOST, "sleep confirm", (u8 *) &confirm_sleep, + sizeof(confirm_sleep)); + + ret = priv->hw_host_to_card(priv, MVMS_CMD, (u8 *) &confirm_sleep, + sizeof(confirm_sleep)); + if (ret) { + lbs_pr_alert("confirm_sleep failed\n"); + goto out; + } + + spin_lock_irqsave(&priv->driver_lock, flags); + + /* We don't get a response on the sleep-confirmation */ + priv->dnld_sent = DNLD_RES_RECEIVED; + + /* If nothing to do, go back to sleep (?) */ + if (!kfifo_len(&priv->event_fifo) && !priv->resp_len[priv->resp_idx]) + priv->psstate = PS_STATE_SLEEP; + + spin_unlock_irqrestore(&priv->driver_lock, flags); + +out: + lbs_deb_leave(LBS_DEB_HOST); +} + +void lbs_ps_sleep(struct lbs_private *priv, int wait_option) +{ + lbs_deb_enter(LBS_DEB_HOST); + + /* + * PS is currently supported only in Infrastructure mode + * Remove this check if it is to be supported in IBSS mode also + */ + + lbs_prepare_and_send_command(priv, CMD_802_11_PS_MODE, + CMD_SUBCMD_ENTER_PS, wait_option, 0, NULL); + + lbs_deb_leave(LBS_DEB_HOST); +} + +/** + * @brief This function sends Exit_PS command to firmware. + * + * @param priv A pointer to struct lbs_private structure + * @param wait_option wait response or not + * @return n/a + */ +void lbs_ps_wakeup(struct lbs_private *priv, int wait_option) +{ + __le32 Localpsmode; + + lbs_deb_enter(LBS_DEB_HOST); + + Localpsmode = cpu_to_le32(LBS802_11POWERMODECAM); + + lbs_prepare_and_send_command(priv, CMD_802_11_PS_MODE, + CMD_SUBCMD_EXIT_PS, + wait_option, 0, &Localpsmode); + + lbs_deb_leave(LBS_DEB_HOST); +} + +/** + * @brief This function checks condition and prepares to + * send sleep confirm command to firmware if ok. + * + * @param priv A pointer to struct lbs_private structure + * @param psmode Power Saving mode + * @return n/a + */ +void lbs_ps_confirm_sleep(struct lbs_private *priv) +{ + unsigned long flags =0; + int allowed = 1; + + lbs_deb_enter(LBS_DEB_HOST); + + spin_lock_irqsave(&priv->driver_lock, flags); + if (priv->dnld_sent) { + allowed = 0; + lbs_deb_host("dnld_sent was set\n"); + } + + /* In-progress command? */ + if (priv->cur_cmd) { + allowed = 0; + lbs_deb_host("cur_cmd was set\n"); + } + + /* Pending events or command responses? */ + if (kfifo_len(&priv->event_fifo) || priv->resp_len[priv->resp_idx]) { + allowed = 0; + lbs_deb_host("pending events or command responses\n"); + } + spin_unlock_irqrestore(&priv->driver_lock, flags); + + if (allowed) { + lbs_deb_host("sending lbs_ps_confirm_sleep\n"); + lbs_send_confirmsleep(priv); + } else { + lbs_deb_host("sleep confirm has been delayed\n"); + } + + lbs_deb_leave(LBS_DEB_HOST); +} + + +/** + * @brief Configures the transmission power control functionality. + * + * @param priv A pointer to struct lbs_private structure + * @param enable Transmission power control enable + * @param p0 Power level when link quality is good (dBm). + * @param p1 Power level when link quality is fair (dBm). + * @param p2 Power level when link quality is poor (dBm). + * @param usesnr Use Signal to Noise Ratio in TPC + * + * @return 0 on success + */ +int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1, + int8_t p2, int usesnr) +{ + struct cmd_ds_802_11_tpc_cfg cmd; + int ret; + + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + cmd.action = cpu_to_le16(CMD_ACT_SET); + cmd.enable = !!enable; + cmd.usesnr = !!usesnr; + cmd.P0 = p0; + cmd.P1 = p1; + cmd.P2 = p2; + + ret = lbs_cmd_with_response(priv, CMD_802_11_TPC_CFG, &cmd); + + return ret; +} + +/** + * @brief Configures the power adaptation settings. + * + * @param priv A pointer to struct lbs_private structure + * @param enable Power adaptation enable + * @param p0 Power level for 1, 2, 5.5 and 11 Mbps (dBm). + * @param p1 Power level for 6, 9, 12, 18, 22, 24 and 36 Mbps (dBm). + * @param p2 Power level for 48 and 54 Mbps (dBm). + * + * @return 0 on Success + */ + +int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0, + int8_t p1, int8_t p2) +{ + struct cmd_ds_802_11_pa_cfg cmd; + int ret; + + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + cmd.action = cpu_to_le16(CMD_ACT_SET); + cmd.enable = !!enable; + cmd.P0 = p0; + cmd.P1 = p1; + cmd.P2 = p2; + + ret = lbs_cmd_with_response(priv, CMD_802_11_PA_CFG , &cmd); + + return ret; +} + + +struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv, + uint16_t command, struct cmd_header *in_cmd, int in_cmd_size, + int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *), + unsigned long callback_arg) +{ + struct cmd_ctrl_node *cmdnode; + + lbs_deb_enter(LBS_DEB_HOST); + + if (priv->surpriseremoved) { + lbs_deb_host("PREP_CMD: card removed\n"); + cmdnode = ERR_PTR(-ENOENT); + goto done; + } + + if (!lbs_is_cmd_allowed(priv)) { + cmdnode = ERR_PTR(-EBUSY); + goto done; + } + + cmdnode = lbs_get_cmd_ctrl_node(priv); + if (cmdnode == NULL) { + lbs_deb_host("PREP_CMD: cmdnode is NULL\n"); + + /* Wake up main thread to execute next command */ + wake_up_interruptible(&priv->waitq); + cmdnode = ERR_PTR(-ENOBUFS); + goto done; + } + + cmdnode->callback = callback; + cmdnode->callback_arg = callback_arg; + + /* Copy the incoming command to the buffer */ + memcpy(cmdnode->cmdbuf, in_cmd, in_cmd_size); + + /* Set sequence number, clean result, move to buffer */ + priv->seqnum++; + cmdnode->cmdbuf->command = cpu_to_le16(command); + cmdnode->cmdbuf->size = cpu_to_le16(in_cmd_size); + cmdnode->cmdbuf->seqnum = cpu_to_le16(priv->seqnum); + cmdnode->cmdbuf->result = 0; + + lbs_deb_host("PREP_CMD: command 0x%04x\n", command); + + cmdnode->cmdwaitqwoken = 0; + lbs_queue_cmd(priv, cmdnode); + wake_up_interruptible(&priv->waitq); + + done: + lbs_deb_leave_args(LBS_DEB_HOST, "ret %p", cmdnode); + return cmdnode; +} + +void lbs_cmd_async(struct lbs_private *priv, uint16_t command, + struct cmd_header *in_cmd, int in_cmd_size) +{ + lbs_deb_enter(LBS_DEB_CMD); + __lbs_cmd_async(priv, command, in_cmd, in_cmd_size, + lbs_cmd_async_callback, 0); + lbs_deb_leave(LBS_DEB_CMD); +} + +int __lbs_cmd(struct lbs_private *priv, uint16_t command, + struct cmd_header *in_cmd, int in_cmd_size, + int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *), + unsigned long callback_arg) +{ + struct cmd_ctrl_node *cmdnode; + unsigned long flags; + int ret = 0; + + lbs_deb_enter(LBS_DEB_HOST); + + cmdnode = __lbs_cmd_async(priv, command, in_cmd, in_cmd_size, + callback, callback_arg); + if (IS_ERR(cmdnode)) { + ret = PTR_ERR(cmdnode); + goto done; + } + + might_sleep(); + wait_event_interruptible(cmdnode->cmdwait_q, cmdnode->cmdwaitqwoken); + + spin_lock_irqsave(&priv->driver_lock, flags); + ret = cmdnode->result; + if (ret) + lbs_pr_info("PREP_CMD: command 0x%04x failed: %d\n", + command, ret); + + __lbs_cleanup_and_insert_cmd(priv, cmdnode); + spin_unlock_irqrestore(&priv->driver_lock, flags); + +done: + lbs_deb_leave_args(LBS_DEB_HOST, "ret %d", ret); + return ret; +} +EXPORT_SYMBOL_GPL(__lbs_cmd); diff --git a/drivers/net/wireless/libertas/cmd.h b/drivers/net/wireless/libertas/cmd.h new file mode 100644 index 0000000..cb4138a --- /dev/null +++ b/drivers/net/wireless/libertas/cmd.h @@ -0,0 +1,130 @@ +/* Copyright (C) 2007, Red Hat, Inc. */ + +#ifndef _LBS_CMD_H_ +#define _LBS_CMD_H_ + +#include "host.h" +#include "dev.h" + + +/* Command & response transfer between host and card */ + +struct cmd_ctrl_node { + struct list_head list; + int result; + /* command response */ + int (*callback)(struct lbs_private *, + unsigned long, + struct cmd_header *); + unsigned long callback_arg; + /* command data */ + struct cmd_header *cmdbuf; + /* wait queue */ + u16 cmdwaitqwoken; + wait_queue_head_t cmdwait_q; +}; + + +/* lbs_cmd() infers the size of the buffer to copy data back into, from + the size of the target of the pointer. Since the command to be sent + may often be smaller, that size is set in cmd->size by the caller.*/ +#define lbs_cmd(priv, cmdnr, cmd, cb, cb_arg) ({ \ + uint16_t __sz = le16_to_cpu((cmd)->hdr.size); \ + (cmd)->hdr.size = cpu_to_le16(sizeof(*(cmd))); \ + __lbs_cmd(priv, cmdnr, &(cmd)->hdr, __sz, cb, cb_arg); \ +}) + +#define lbs_cmd_with_response(priv, cmdnr, cmd) \ + lbs_cmd(priv, cmdnr, cmd, lbs_cmd_copyback, (unsigned long) (cmd)) + +int lbs_prepare_and_send_command(struct lbs_private *priv, + u16 cmd_no, + u16 cmd_action, + u16 wait_option, u32 cmd_oid, void *pdata_buf); + +void lbs_cmd_async(struct lbs_private *priv, uint16_t command, + struct cmd_header *in_cmd, int in_cmd_size); + +int __lbs_cmd(struct lbs_private *priv, uint16_t command, + struct cmd_header *in_cmd, int in_cmd_size, + int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *), + unsigned long callback_arg); + +struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv, + uint16_t command, struct cmd_header *in_cmd, int in_cmd_size, + int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *), + unsigned long callback_arg); + +int lbs_cmd_copyback(struct lbs_private *priv, unsigned long extra, + struct cmd_header *resp); + +int lbs_allocate_cmd_buffer(struct lbs_private *priv); +int lbs_free_cmd_buffer(struct lbs_private *priv); + +int lbs_execute_next_command(struct lbs_private *priv); +void lbs_complete_command(struct lbs_private *priv, struct cmd_ctrl_node *cmd, + int result); +int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len); + + +/* From cmdresp.c */ + +void lbs_mac_event_disconnected(struct lbs_private *priv); + + + +/* Events */ + +int lbs_process_event(struct lbs_private *priv, u32 event); + + +/* Actual commands */ + +int lbs_update_hw_spec(struct lbs_private *priv); + +int lbs_set_channel(struct lbs_private *priv, u8 channel); + +int lbs_update_channel(struct lbs_private *priv); + +int lbs_host_sleep_cfg(struct lbs_private *priv, uint32_t criteria, + struct wol_config *p_wol_config); + +int lbs_cmd_802_11_sleep_params(struct lbs_private *priv, uint16_t cmd_action, + struct sleep_params *sp); + +void lbs_ps_sleep(struct lbs_private *priv, int wait_option); + +void lbs_ps_wakeup(struct lbs_private *priv, int wait_option); + +void lbs_ps_confirm_sleep(struct lbs_private *priv); + +int lbs_set_radio(struct lbs_private *priv, u8 preamble, u8 radio_on); + +void lbs_set_mac_control(struct lbs_private *priv); + +int lbs_get_tx_power(struct lbs_private *priv, s16 *curlevel, s16 *minlevel, + s16 *maxlevel); + +int lbs_set_snmp_mib(struct lbs_private *priv, u32 oid, u16 val); + +int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val); + + +/* Commands only used in wext.c, assoc. and scan.c */ + +int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0, + int8_t p1, int8_t p2); + +int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1, + int8_t p2, int usesnr); + +int lbs_set_data_rate(struct lbs_private *priv, u8 rate); + +int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv, + uint16_t cmd_action); + +int lbs_set_tx_power(struct lbs_private *priv, s16 dbm); + +int lbs_set_deep_sleep(struct lbs_private *priv, int deep_sleep); + +#endif /* _LBS_CMD_H */ diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c new file mode 100644 index 0000000..e747044 --- /dev/null +++ b/drivers/net/wireless/libertas/cmdresp.c @@ -0,0 +1,494 @@ +/** + * This file contains the handling of command + * responses as well as events generated by firmware. + */ +#include +#include +#include +#include +#include +#include + +#include "host.h" +#include "decl.h" +#include "cmd.h" +#include "defs.h" +#include "dev.h" +#include "assoc.h" +#include "wext.h" + +/** + * @brief This function handles disconnect event. it + * reports disconnect to upper layer, clean tx/rx packets, + * reset link state etc. + * + * @param priv A pointer to struct lbs_private structure + * @return n/a + */ +void lbs_mac_event_disconnected(struct lbs_private *priv) +{ + if (priv->connect_status != LBS_CONNECTED) + return; + + lbs_deb_enter(LBS_DEB_ASSOC); + + /* + * Cisco AP sends EAP failure and de-auth in less than 0.5 ms. + * It causes problem in the Supplicant + */ + msleep_interruptible(1000); + lbs_send_disconnect_notification(priv); + + /* report disconnect to upper layer */ + netif_stop_queue(priv->dev); + netif_carrier_off(priv->dev); + + /* Free Tx and Rx packets */ + kfree_skb(priv->currenttxskb); + priv->currenttxskb = NULL; + priv->tx_pending_len = 0; + + /* reset SNR/NF/RSSI values */ + memset(priv->SNR, 0x00, sizeof(priv->SNR)); + memset(priv->NF, 0x00, sizeof(priv->NF)); + memset(priv->RSSI, 0x00, sizeof(priv->RSSI)); + memset(priv->rawSNR, 0x00, sizeof(priv->rawSNR)); + memset(priv->rawNF, 0x00, sizeof(priv->rawNF)); + priv->nextSNRNF = 0; + priv->numSNRNF = 0; + priv->connect_status = LBS_DISCONNECTED; + + /* Clear out associated SSID and BSSID since connection is + * no longer valid. + */ + memset(&priv->curbssparams.bssid, 0, ETH_ALEN); + memset(&priv->curbssparams.ssid, 0, IEEE80211_MAX_SSID_LEN); + priv->curbssparams.ssid_len = 0; + + if (priv->psstate != PS_STATE_FULL_POWER) { + /* make firmware to exit PS mode */ + lbs_deb_cmd("disconnected, so exit PS mode\n"); + lbs_ps_wakeup(priv, 0); + } + lbs_deb_leave(LBS_DEB_ASSOC); +} + +static int lbs_ret_reg_access(struct lbs_private *priv, + u16 type, struct cmd_ds_command *resp) +{ + int ret = 0; + + lbs_deb_enter(LBS_DEB_CMD); + + switch (type) { + case CMD_RET(CMD_MAC_REG_ACCESS): + { + struct cmd_ds_mac_reg_access *reg = &resp->params.macreg; + + priv->offsetvalue.offset = (u32)le16_to_cpu(reg->offset); + priv->offsetvalue.value = le32_to_cpu(reg->value); + break; + } + + case CMD_RET(CMD_BBP_REG_ACCESS): + { + struct cmd_ds_bbp_reg_access *reg = &resp->params.bbpreg; + + priv->offsetvalue.offset = (u32)le16_to_cpu(reg->offset); + priv->offsetvalue.value = reg->value; + break; + } + + case CMD_RET(CMD_RF_REG_ACCESS): + { + struct cmd_ds_rf_reg_access *reg = &resp->params.rfreg; + + priv->offsetvalue.offset = (u32)le16_to_cpu(reg->offset); + priv->offsetvalue.value = reg->value; + break; + } + + default: + ret = -1; + } + + lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); + return ret; +} + +static inline int handle_cmd_response(struct lbs_private *priv, + struct cmd_header *cmd_response) +{ + struct cmd_ds_command *resp = (struct cmd_ds_command *) cmd_response; + int ret = 0; + unsigned long flags; + uint16_t respcmd = le16_to_cpu(resp->command); + + lbs_deb_enter(LBS_DEB_HOST); + + switch (respcmd) { + case CMD_RET(CMD_MAC_REG_ACCESS): + case CMD_RET(CMD_BBP_REG_ACCESS): + case CMD_RET(CMD_RF_REG_ACCESS): + ret = lbs_ret_reg_access(priv, respcmd, resp); + break; + + case CMD_RET(CMD_802_11_SET_AFC): + case CMD_RET(CMD_802_11_GET_AFC): + spin_lock_irqsave(&priv->driver_lock, flags); + memmove((void *)priv->cur_cmd->callback_arg, &resp->params.afc, + sizeof(struct cmd_ds_802_11_afc)); + spin_unlock_irqrestore(&priv->driver_lock, flags); + + break; + + case CMD_RET(CMD_802_11_BEACON_STOP): + break; + + case CMD_RET(CMD_802_11_RSSI): + ret = lbs_ret_802_11_rssi(priv, resp); + break; + + case CMD_RET(CMD_802_11_TPC_CFG): + spin_lock_irqsave(&priv->driver_lock, flags); + memmove((void *)priv->cur_cmd->callback_arg, &resp->params.tpccfg, + sizeof(struct cmd_ds_802_11_tpc_cfg)); + spin_unlock_irqrestore(&priv->driver_lock, flags); + break; + + case CMD_RET(CMD_BT_ACCESS): + spin_lock_irqsave(&priv->driver_lock, flags); + if (priv->cur_cmd->callback_arg) + memcpy((void *)priv->cur_cmd->callback_arg, + &resp->params.bt.addr1, 2 * ETH_ALEN); + spin_unlock_irqrestore(&priv->driver_lock, flags); + break; + case CMD_RET(CMD_FWT_ACCESS): + spin_lock_irqsave(&priv->driver_lock, flags); + if (priv->cur_cmd->callback_arg) + memcpy((void *)priv->cur_cmd->callback_arg, &resp->params.fwt, + sizeof(resp->params.fwt)); + spin_unlock_irqrestore(&priv->driver_lock, flags); + break; + case CMD_RET(CMD_802_11_BEACON_CTRL): + ret = lbs_ret_802_11_bcn_ctrl(priv, resp); + break; + + default: + lbs_pr_err("CMD_RESP: unknown cmd response 0x%04x\n", + le16_to_cpu(resp->command)); + break; + } + lbs_deb_leave(LBS_DEB_HOST); + return ret; +} + +int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len) +{ + uint16_t respcmd, curcmd; + struct cmd_header *resp; + int ret = 0; + unsigned long flags; + uint16_t result; + + lbs_deb_enter(LBS_DEB_HOST); + + mutex_lock(&priv->lock); + spin_lock_irqsave(&priv->driver_lock, flags); + + if (!priv->cur_cmd) { + lbs_deb_host("CMD_RESP: cur_cmd is NULL\n"); + ret = -1; + spin_unlock_irqrestore(&priv->driver_lock, flags); + goto done; + } + + resp = (void *)data; + curcmd = le16_to_cpu(priv->cur_cmd->cmdbuf->command); + respcmd = le16_to_cpu(resp->command); + result = le16_to_cpu(resp->result); + + lbs_deb_cmd("CMD_RESP: response 0x%04x, seq %d, size %d\n", + respcmd, le16_to_cpu(resp->seqnum), len); + lbs_deb_hex(LBS_DEB_CMD, "CMD_RESP", (void *) resp, len); + + if (resp->seqnum != priv->cur_cmd->cmdbuf->seqnum) { + lbs_pr_info("Received CMD_RESP with invalid sequence %d (expected %d)\n", + le16_to_cpu(resp->seqnum), le16_to_cpu(priv->cur_cmd->cmdbuf->seqnum)); + spin_unlock_irqrestore(&priv->driver_lock, flags); + ret = -1; + goto done; + } + if (respcmd != CMD_RET(curcmd) && + respcmd != CMD_RET_802_11_ASSOCIATE && curcmd != CMD_802_11_ASSOCIATE) { + lbs_pr_info("Invalid CMD_RESP %x to command %x!\n", respcmd, curcmd); + spin_unlock_irqrestore(&priv->driver_lock, flags); + ret = -1; + goto done; + } + + if (resp->result == cpu_to_le16(0x0004)) { + /* 0x0004 means -EAGAIN. Drop the response, let it time out + and be resubmitted */ + lbs_pr_info("Firmware returns DEFER to command %x. Will let it time out...\n", + le16_to_cpu(resp->command)); + spin_unlock_irqrestore(&priv->driver_lock, flags); + ret = -1; + goto done; + } + + /* Now we got response from FW, cancel the command timer */ + del_timer(&priv->command_timer); + priv->cmd_timed_out = 0; + + /* Store the response code to cur_cmd_retcode. */ + priv->cur_cmd_retcode = result; + + if (respcmd == CMD_RET(CMD_802_11_PS_MODE)) { + struct cmd_ds_802_11_ps_mode *psmode = (void *) &resp[1]; + u16 action = le16_to_cpu(psmode->action); + + lbs_deb_host( + "CMD_RESP: PS_MODE cmd reply result 0x%x, action 0x%x\n", + result, action); + + if (result) { + lbs_deb_host("CMD_RESP: PS command failed with 0x%x\n", + result); + /* + * We should not re-try enter-ps command in + * ad-hoc mode. It takes place in + * lbs_execute_next_command(). + */ + if (priv->mode == IW_MODE_ADHOC && + action == CMD_SUBCMD_ENTER_PS) + priv->psmode = LBS802_11POWERMODECAM; + } else if (action == CMD_SUBCMD_ENTER_PS) { + priv->needtowakeup = 0; + priv->psstate = PS_STATE_AWAKE; + + lbs_deb_host("CMD_RESP: ENTER_PS command response\n"); + if (priv->connect_status != LBS_CONNECTED) { + /* + * When Deauth Event received before Enter_PS command + * response, We need to wake up the firmware. + */ + lbs_deb_host( + "disconnected, invoking lbs_ps_wakeup\n"); + + spin_unlock_irqrestore(&priv->driver_lock, flags); + mutex_unlock(&priv->lock); + lbs_ps_wakeup(priv, 0); + mutex_lock(&priv->lock); + spin_lock_irqsave(&priv->driver_lock, flags); + } + } else if (action == CMD_SUBCMD_EXIT_PS) { + priv->needtowakeup = 0; + priv->psstate = PS_STATE_FULL_POWER; + lbs_deb_host("CMD_RESP: EXIT_PS command response\n"); + } else { + lbs_deb_host("CMD_RESP: PS action 0x%X\n", action); + } + + lbs_complete_command(priv, priv->cur_cmd, result); + spin_unlock_irqrestore(&priv->driver_lock, flags); + + ret = 0; + goto done; + } + + /* If the command is not successful, cleanup and return failure */ + if ((result != 0 || !(respcmd & 0x8000))) { + lbs_deb_host("CMD_RESP: error 0x%04x in command reply 0x%04x\n", + result, respcmd); + /* + * Handling errors here + */ + switch (respcmd) { + case CMD_RET(CMD_GET_HW_SPEC): + case CMD_RET(CMD_802_11_RESET): + lbs_deb_host("CMD_RESP: reset failed\n"); + break; + + } + lbs_complete_command(priv, priv->cur_cmd, result); + spin_unlock_irqrestore(&priv->driver_lock, flags); + + ret = -1; + goto done; + } + + spin_unlock_irqrestore(&priv->driver_lock, flags); + + if (priv->cur_cmd && priv->cur_cmd->callback) { + ret = priv->cur_cmd->callback(priv, priv->cur_cmd->callback_arg, + resp); + } else + ret = handle_cmd_response(priv, resp); + + spin_lock_irqsave(&priv->driver_lock, flags); + + if (priv->cur_cmd) { + /* Clean up and Put current command back to cmdfreeq */ + lbs_complete_command(priv, priv->cur_cmd, result); + } + spin_unlock_irqrestore(&priv->driver_lock, flags); + +done: + mutex_unlock(&priv->lock); + lbs_deb_leave_args(LBS_DEB_HOST, "ret %d", ret); + return ret; +} + +static int lbs_send_confirmwake(struct lbs_private *priv) +{ + struct cmd_header cmd; + int ret = 0; + + lbs_deb_enter(LBS_DEB_HOST); + + cmd.command = cpu_to_le16(CMD_802_11_WAKEUP_CONFIRM); + cmd.size = cpu_to_le16(sizeof(cmd)); + cmd.seqnum = cpu_to_le16(++priv->seqnum); + cmd.result = 0; + + lbs_deb_hex(LBS_DEB_HOST, "wake confirm", (u8 *) &cmd, + sizeof(cmd)); + + ret = priv->hw_host_to_card(priv, MVMS_CMD, (u8 *) &cmd, sizeof(cmd)); + if (ret) + lbs_pr_alert("SEND_WAKEC_CMD: Host to Card failed for Confirm Wake\n"); + + lbs_deb_leave_args(LBS_DEB_HOST, "ret %d", ret); + return ret; +} + +int lbs_process_event(struct lbs_private *priv, u32 event) +{ + int ret = 0; + + lbs_deb_enter(LBS_DEB_CMD); + + switch (event) { + case MACREG_INT_CODE_LINK_SENSED: + lbs_deb_cmd("EVENT: link sensed\n"); + break; + + case MACREG_INT_CODE_DEAUTHENTICATED: + lbs_deb_cmd("EVENT: deauthenticated\n"); + lbs_mac_event_disconnected(priv); + break; + + case MACREG_INT_CODE_DISASSOCIATED: + lbs_deb_cmd("EVENT: disassociated\n"); + lbs_mac_event_disconnected(priv); + break; + + case MACREG_INT_CODE_LINK_LOST_NO_SCAN: + lbs_deb_cmd("EVENT: link lost\n"); + lbs_mac_event_disconnected(priv); + break; + + case MACREG_INT_CODE_PS_SLEEP: + lbs_deb_cmd("EVENT: ps sleep\n"); + + /* handle unexpected PS SLEEP event */ + if (priv->psstate == PS_STATE_FULL_POWER) { + lbs_deb_cmd( + "EVENT: in FULL POWER mode, ignoreing PS_SLEEP\n"); + break; + } + priv->psstate = PS_STATE_PRE_SLEEP; + + lbs_ps_confirm_sleep(priv); + + break; + + case MACREG_INT_CODE_HOST_AWAKE: + lbs_deb_cmd("EVENT: host awake\n"); + if (priv->reset_deep_sleep_wakeup) + priv->reset_deep_sleep_wakeup(priv); + priv->is_deep_sleep = 0; + lbs_send_confirmwake(priv); + break; + + case MACREG_INT_CODE_DEEP_SLEEP_AWAKE: + if (priv->reset_deep_sleep_wakeup) + priv->reset_deep_sleep_wakeup(priv); + lbs_deb_cmd("EVENT: ds awake\n"); + priv->is_deep_sleep = 0; + priv->wakeup_dev_required = 0; + wake_up_interruptible(&priv->ds_awake_q); + break; + + case MACREG_INT_CODE_PS_AWAKE: + lbs_deb_cmd("EVENT: ps awake\n"); + /* handle unexpected PS AWAKE event */ + if (priv->psstate == PS_STATE_FULL_POWER) { + lbs_deb_cmd( + "EVENT: In FULL POWER mode - ignore PS AWAKE\n"); + break; + } + + priv->psstate = PS_STATE_AWAKE; + + if (priv->needtowakeup) { + /* + * wait for the command processing to finish + * before resuming sending + * priv->needtowakeup will be set to FALSE + * in lbs_ps_wakeup() + */ + lbs_deb_cmd("waking up ...\n"); + lbs_ps_wakeup(priv, 0); + } + break; + + case MACREG_INT_CODE_MIC_ERR_UNICAST: + lbs_deb_cmd("EVENT: UNICAST MIC ERROR\n"); + lbs_send_mic_failureevent(priv, event); + break; + + case MACREG_INT_CODE_MIC_ERR_MULTICAST: + lbs_deb_cmd("EVENT: MULTICAST MIC ERROR\n"); + lbs_send_mic_failureevent(priv, event); + break; + + case MACREG_INT_CODE_MIB_CHANGED: + lbs_deb_cmd("EVENT: MIB CHANGED\n"); + break; + case MACREG_INT_CODE_INIT_DONE: + lbs_deb_cmd("EVENT: INIT DONE\n"); + break; + case MACREG_INT_CODE_ADHOC_BCN_LOST: + lbs_deb_cmd("EVENT: ADHOC beacon lost\n"); + break; + case MACREG_INT_CODE_RSSI_LOW: + lbs_pr_alert("EVENT: rssi low\n"); + break; + case MACREG_INT_CODE_SNR_LOW: + lbs_pr_alert("EVENT: snr low\n"); + break; + case MACREG_INT_CODE_MAX_FAIL: + lbs_pr_alert("EVENT: max fail\n"); + break; + case MACREG_INT_CODE_RSSI_HIGH: + lbs_pr_alert("EVENT: rssi high\n"); + break; + case MACREG_INT_CODE_SNR_HIGH: + lbs_pr_alert("EVENT: snr high\n"); + break; + + case MACREG_INT_CODE_MESH_AUTO_STARTED: + /* Ignore spurious autostart events */ + lbs_pr_info("EVENT: MESH_AUTO_STARTED (ignoring)\n"); + break; + + default: + lbs_pr_alert("EVENT: unknown event id %d\n", event); + break; + } + + lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); + return ret; +} diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c new file mode 100644 index 0000000..587b0cb --- /dev/null +++ b/drivers/net/wireless/libertas/debugfs.c @@ -0,0 +1,1006 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dev.h" +#include "decl.h" +#include "host.h" +#include "debugfs.h" +#include "cmd.h" + +static struct dentry *lbs_dir; +static char *szStates[] = { + "Connected", + "Disconnected" +}; + +#ifdef PROC_DEBUG +static void lbs_debug_init(struct lbs_private *priv); +#endif + +static int open_file_generic(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t write_file_dummy(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + return -EINVAL; +} + +static const size_t len = PAGE_SIZE; + +static ssize_t lbs_dev_info(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct lbs_private *priv = file->private_data; + size_t pos = 0; + unsigned long addr = get_zeroed_page(GFP_KERNEL); + char *buf = (char *)addr; + ssize_t res; + if (!buf) + return -ENOMEM; + + pos += snprintf(buf+pos, len-pos, "state = %s\n", + szStates[priv->connect_status]); + pos += snprintf(buf+pos, len-pos, "region_code = %02x\n", + (u32) priv->regioncode); + + res = simple_read_from_buffer(userbuf, count, ppos, buf, pos); + + free_page(addr); + return res; +} + + +static ssize_t lbs_getscantable(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct lbs_private *priv = file->private_data; + size_t pos = 0; + int numscansdone = 0, res; + unsigned long addr = get_zeroed_page(GFP_KERNEL); + char *buf = (char *)addr; + DECLARE_SSID_BUF(ssid); + struct bss_descriptor * iter_bss; + if (!buf) + return -ENOMEM; + + pos += snprintf(buf+pos, len-pos, + "# | ch | rssi | bssid | cap | Qual | SSID \n"); + + mutex_lock(&priv->lock); + list_for_each_entry (iter_bss, &priv->network_list, list) { + u16 ibss = (iter_bss->capability & WLAN_CAPABILITY_IBSS); + u16 privacy = (iter_bss->capability & WLAN_CAPABILITY_PRIVACY); + u16 spectrum_mgmt = (iter_bss->capability & WLAN_CAPABILITY_SPECTRUM_MGMT); + + pos += snprintf(buf+pos, len-pos, "%02u| %03d | %04d | %pM |", + numscansdone, iter_bss->channel, iter_bss->rssi, + iter_bss->bssid); + pos += snprintf(buf+pos, len-pos, " %04x-", iter_bss->capability); + pos += snprintf(buf+pos, len-pos, "%c%c%c |", + ibss ? 'A' : 'I', privacy ? 'P' : ' ', + spectrum_mgmt ? 'S' : ' '); + pos += snprintf(buf+pos, len-pos, " %04d |", SCAN_RSSI(iter_bss->rssi)); + pos += snprintf(buf+pos, len-pos, " %s\n", + print_ssid(ssid, iter_bss->ssid, + iter_bss->ssid_len)); + + numscansdone++; + } + mutex_unlock(&priv->lock); + + res = simple_read_from_buffer(userbuf, count, ppos, buf, pos); + + free_page(addr); + return res; +} + +static ssize_t lbs_sleepparams_write(struct file *file, + const char __user *user_buf, size_t count, + loff_t *ppos) +{ + struct lbs_private *priv = file->private_data; + ssize_t buf_size, ret; + struct sleep_params sp; + int p1, p2, p3, p4, p5, p6; + unsigned long addr = get_zeroed_page(GFP_KERNEL); + char *buf = (char *)addr; + if (!buf) + return -ENOMEM; + + buf_size = min(count, len - 1); + if (copy_from_user(buf, user_buf, buf_size)) { + ret = -EFAULT; + goto out_unlock; + } + ret = sscanf(buf, "%d %d %d %d %d %d", &p1, &p2, &p3, &p4, &p5, &p6); + if (ret != 6) { + ret = -EINVAL; + goto out_unlock; + } + sp.sp_error = p1; + sp.sp_offset = p2; + sp.sp_stabletime = p3; + sp.sp_calcontrol = p4; + sp.sp_extsleepclk = p5; + sp.sp_reserved = p6; + + ret = lbs_cmd_802_11_sleep_params(priv, CMD_ACT_SET, &sp); + if (!ret) + ret = count; + else if (ret > 0) + ret = -EINVAL; + +out_unlock: + free_page(addr); + return ret; +} + +static ssize_t lbs_sleepparams_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct lbs_private *priv = file->private_data; + ssize_t ret; + size_t pos = 0; + struct sleep_params sp; + unsigned long addr = get_zeroed_page(GFP_KERNEL); + char *buf = (char *)addr; + if (!buf) + return -ENOMEM; + + ret = lbs_cmd_802_11_sleep_params(priv, CMD_ACT_GET, &sp); + if (ret) + goto out_unlock; + + pos += snprintf(buf, len, "%d %d %d %d %d %d\n", sp.sp_error, + sp.sp_offset, sp.sp_stabletime, + sp.sp_calcontrol, sp.sp_extsleepclk, + sp.sp_reserved); + + ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos); + +out_unlock: + free_page(addr); + return ret; +} + +/* + * When calling CMD_802_11_SUBSCRIBE_EVENT with CMD_ACT_GET, me might + * get a bunch of vendor-specific TLVs (a.k.a. IEs) back from the + * firmware. Here's an example: + * 04 01 02 00 00 00 05 01 02 00 00 00 06 01 02 00 + * 00 00 07 01 02 00 3c 00 00 00 00 00 00 00 03 03 + * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + * + * The 04 01 is the TLV type (here TLV_TYPE_RSSI_LOW), 02 00 is the length, + * 00 00 are the data bytes of this TLV. For this TLV, their meaning is + * defined in mrvlietypes_thresholds + * + * This function searches in this TLV data chunk for a given TLV type + * and returns a pointer to the first data byte of the TLV, or to NULL + * if the TLV hasn't been found. + */ +static void *lbs_tlv_find(uint16_t tlv_type, const uint8_t *tlv, uint16_t size) +{ + struct mrvl_ie_header *tlv_h; + uint16_t length; + ssize_t pos = 0; + + while (pos < size) { + tlv_h = (struct mrvl_ie_header *) tlv; + if (!tlv_h->len) + return NULL; + if (tlv_h->type == cpu_to_le16(tlv_type)) + return tlv_h; + length = le16_to_cpu(tlv_h->len) + sizeof(*tlv_h); + pos += length; + tlv += length; + } + return NULL; +} + + +static ssize_t lbs_threshold_read(uint16_t tlv_type, uint16_t event_mask, + struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct cmd_ds_802_11_subscribe_event *subscribed; + struct mrvl_ie_thresholds *got; + struct lbs_private *priv = file->private_data; + ssize_t ret = 0; + size_t pos = 0; + char *buf; + u8 value; + u8 freq; + int events = 0; + + buf = (char *)get_zeroed_page(GFP_KERNEL); + if (!buf) + return -ENOMEM; + + subscribed = kzalloc(sizeof(*subscribed), GFP_KERNEL); + if (!subscribed) { + ret = -ENOMEM; + goto out_page; + } + + subscribed->hdr.size = cpu_to_le16(sizeof(*subscribed)); + subscribed->action = cpu_to_le16(CMD_ACT_GET); + + ret = lbs_cmd_with_response(priv, CMD_802_11_SUBSCRIBE_EVENT, subscribed); + if (ret) + goto out_cmd; + + got = lbs_tlv_find(tlv_type, subscribed->tlv, sizeof(subscribed->tlv)); + if (got) { + value = got->value; + freq = got->freq; + events = le16_to_cpu(subscribed->events); + + pos += snprintf(buf, len, "%d %d %d\n", value, freq, + !!(events & event_mask)); + } + + ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos); + + out_cmd: + kfree(subscribed); + + out_page: + free_page((unsigned long)buf); + return ret; +} + + +static ssize_t lbs_threshold_write(uint16_t tlv_type, uint16_t event_mask, + struct file *file, + const char __user *userbuf, size_t count, + loff_t *ppos) +{ + struct cmd_ds_802_11_subscribe_event *events; + struct mrvl_ie_thresholds *tlv; + struct lbs_private *priv = file->private_data; + ssize_t buf_size; + int value, freq, new_mask; + uint16_t curr_mask; + char *buf; + int ret; + + buf = (char *)get_zeroed_page(GFP_KERNEL); + if (!buf) + return -ENOMEM; + + buf_size = min(count, len - 1); + if (copy_from_user(buf, userbuf, buf_size)) { + ret = -EFAULT; + goto out_page; + } + ret = sscanf(buf, "%d %d %d", &value, &freq, &new_mask); + if (ret != 3) { + ret = -EINVAL; + goto out_page; + } + events = kzalloc(sizeof(*events), GFP_KERNEL); + if (!events) { + ret = -ENOMEM; + goto out_page; + } + + events->hdr.size = cpu_to_le16(sizeof(*events)); + events->action = cpu_to_le16(CMD_ACT_GET); + + ret = lbs_cmd_with_response(priv, CMD_802_11_SUBSCRIBE_EVENT, events); + if (ret) + goto out_events; + + curr_mask = le16_to_cpu(events->events); + + if (new_mask) + new_mask = curr_mask | event_mask; + else + new_mask = curr_mask & ~event_mask; + + /* Now everything is set and we can send stuff down to the firmware */ + + tlv = (void *)events->tlv; + + events->action = cpu_to_le16(CMD_ACT_SET); + events->events = cpu_to_le16(new_mask); + tlv->header.type = cpu_to_le16(tlv_type); + tlv->header.len = cpu_to_le16(sizeof(*tlv) - sizeof(tlv->header)); + tlv->value = value; + if (tlv_type != TLV_TYPE_BCNMISS) + tlv->freq = freq; + + /* The command header, the action, the event mask, and one TLV */ + events->hdr.size = cpu_to_le16(sizeof(events->hdr) + 4 + sizeof(*tlv)); + + ret = lbs_cmd_with_response(priv, CMD_802_11_SUBSCRIBE_EVENT, events); + + if (!ret) + ret = count; + out_events: + kfree(events); + out_page: + free_page((unsigned long)buf); + return ret; +} + + +static ssize_t lbs_lowrssi_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + return lbs_threshold_read(TLV_TYPE_RSSI_LOW, CMD_SUBSCRIBE_RSSI_LOW, + file, userbuf, count, ppos); +} + + +static ssize_t lbs_lowrssi_write(struct file *file, const char __user *userbuf, + size_t count, loff_t *ppos) +{ + return lbs_threshold_write(TLV_TYPE_RSSI_LOW, CMD_SUBSCRIBE_RSSI_LOW, + file, userbuf, count, ppos); +} + + +static ssize_t lbs_lowsnr_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + return lbs_threshold_read(TLV_TYPE_SNR_LOW, CMD_SUBSCRIBE_SNR_LOW, + file, userbuf, count, ppos); +} + + +static ssize_t lbs_lowsnr_write(struct file *file, const char __user *userbuf, + size_t count, loff_t *ppos) +{ + return lbs_threshold_write(TLV_TYPE_SNR_LOW, CMD_SUBSCRIBE_SNR_LOW, + file, userbuf, count, ppos); +} + + +static ssize_t lbs_failcount_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + return lbs_threshold_read(TLV_TYPE_FAILCOUNT, CMD_SUBSCRIBE_FAILCOUNT, + file, userbuf, count, ppos); +} + + +static ssize_t lbs_failcount_write(struct file *file, const char __user *userbuf, + size_t count, loff_t *ppos) +{ + return lbs_threshold_write(TLV_TYPE_FAILCOUNT, CMD_SUBSCRIBE_FAILCOUNT, + file, userbuf, count, ppos); +} + + +static ssize_t lbs_highrssi_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + return lbs_threshold_read(TLV_TYPE_RSSI_HIGH, CMD_SUBSCRIBE_RSSI_HIGH, + file, userbuf, count, ppos); +} + + +static ssize_t lbs_highrssi_write(struct file *file, const char __user *userbuf, + size_t count, loff_t *ppos) +{ + return lbs_threshold_write(TLV_TYPE_RSSI_HIGH, CMD_SUBSCRIBE_RSSI_HIGH, + file, userbuf, count, ppos); +} + + +static ssize_t lbs_highsnr_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + return lbs_threshold_read(TLV_TYPE_SNR_HIGH, CMD_SUBSCRIBE_SNR_HIGH, + file, userbuf, count, ppos); +} + + +static ssize_t lbs_highsnr_write(struct file *file, const char __user *userbuf, + size_t count, loff_t *ppos) +{ + return lbs_threshold_write(TLV_TYPE_SNR_HIGH, CMD_SUBSCRIBE_SNR_HIGH, + file, userbuf, count, ppos); +} + +static ssize_t lbs_bcnmiss_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + return lbs_threshold_read(TLV_TYPE_BCNMISS, CMD_SUBSCRIBE_BCNMISS, + file, userbuf, count, ppos); +} + + +static ssize_t lbs_bcnmiss_write(struct file *file, const char __user *userbuf, + size_t count, loff_t *ppos) +{ + return lbs_threshold_write(TLV_TYPE_BCNMISS, CMD_SUBSCRIBE_BCNMISS, + file, userbuf, count, ppos); +} + + + +static ssize_t lbs_rdmac_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct lbs_private *priv = file->private_data; + struct lbs_offset_value offval; + ssize_t pos = 0; + int ret; + unsigned long addr = get_zeroed_page(GFP_KERNEL); + char *buf = (char *)addr; + if (!buf) + return -ENOMEM; + + offval.offset = priv->mac_offset; + offval.value = 0; + + ret = lbs_prepare_and_send_command(priv, + CMD_MAC_REG_ACCESS, 0, + CMD_OPTION_WAITFORRSP, 0, &offval); + mdelay(10); + if (!ret) { + pos += snprintf(buf+pos, len-pos, "MAC[0x%x] = 0x%08x\n", + priv->mac_offset, priv->offsetvalue.value); + + ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos); + } + free_page(addr); + return ret; +} + +static ssize_t lbs_rdmac_write(struct file *file, + const char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct lbs_private *priv = file->private_data; + ssize_t res, buf_size; + unsigned long addr = get_zeroed_page(GFP_KERNEL); + char *buf = (char *)addr; + if (!buf) + return -ENOMEM; + + buf_size = min(count, len - 1); + if (copy_from_user(buf, userbuf, buf_size)) { + res = -EFAULT; + goto out_unlock; + } + priv->mac_offset = simple_strtoul((char *)buf, NULL, 16); + res = count; +out_unlock: + free_page(addr); + return res; +} + +static ssize_t lbs_wrmac_write(struct file *file, + const char __user *userbuf, + size_t count, loff_t *ppos) +{ + + struct lbs_private *priv = file->private_data; + ssize_t res, buf_size; + u32 offset, value; + struct lbs_offset_value offval; + unsigned long addr = get_zeroed_page(GFP_KERNEL); + char *buf = (char *)addr; + if (!buf) + return -ENOMEM; + + buf_size = min(count, len - 1); + if (copy_from_user(buf, userbuf, buf_size)) { + res = -EFAULT; + goto out_unlock; + } + res = sscanf(buf, "%x %x", &offset, &value); + if (res != 2) { + res = -EFAULT; + goto out_unlock; + } + + offval.offset = offset; + offval.value = value; + res = lbs_prepare_and_send_command(priv, + CMD_MAC_REG_ACCESS, 1, + CMD_OPTION_WAITFORRSP, 0, &offval); + mdelay(10); + + if (!res) + res = count; +out_unlock: + free_page(addr); + return res; +} + +static ssize_t lbs_rdbbp_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct lbs_private *priv = file->private_data; + struct lbs_offset_value offval; + ssize_t pos = 0; + int ret; + unsigned long addr = get_zeroed_page(GFP_KERNEL); + char *buf = (char *)addr; + if (!buf) + return -ENOMEM; + + offval.offset = priv->bbp_offset; + offval.value = 0; + + ret = lbs_prepare_and_send_command(priv, + CMD_BBP_REG_ACCESS, 0, + CMD_OPTION_WAITFORRSP, 0, &offval); + mdelay(10); + if (!ret) { + pos += snprintf(buf+pos, len-pos, "BBP[0x%x] = 0x%08x\n", + priv->bbp_offset, priv->offsetvalue.value); + + ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos); + } + free_page(addr); + + return ret; +} + +static ssize_t lbs_rdbbp_write(struct file *file, + const char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct lbs_private *priv = file->private_data; + ssize_t res, buf_size; + unsigned long addr = get_zeroed_page(GFP_KERNEL); + char *buf = (char *)addr; + if (!buf) + return -ENOMEM; + + buf_size = min(count, len - 1); + if (copy_from_user(buf, userbuf, buf_size)) { + res = -EFAULT; + goto out_unlock; + } + priv->bbp_offset = simple_strtoul((char *)buf, NULL, 16); + res = count; +out_unlock: + free_page(addr); + return res; +} + +static ssize_t lbs_wrbbp_write(struct file *file, + const char __user *userbuf, + size_t count, loff_t *ppos) +{ + + struct lbs_private *priv = file->private_data; + ssize_t res, buf_size; + u32 offset, value; + struct lbs_offset_value offval; + unsigned long addr = get_zeroed_page(GFP_KERNEL); + char *buf = (char *)addr; + if (!buf) + return -ENOMEM; + + buf_size = min(count, len - 1); + if (copy_from_user(buf, userbuf, buf_size)) { + res = -EFAULT; + goto out_unlock; + } + res = sscanf(buf, "%x %x", &offset, &value); + if (res != 2) { + res = -EFAULT; + goto out_unlock; + } + + offval.offset = offset; + offval.value = value; + res = lbs_prepare_and_send_command(priv, + CMD_BBP_REG_ACCESS, 1, + CMD_OPTION_WAITFORRSP, 0, &offval); + mdelay(10); + + if (!res) + res = count; +out_unlock: + free_page(addr); + return res; +} + +static ssize_t lbs_rdrf_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct lbs_private *priv = file->private_data; + struct lbs_offset_value offval; + ssize_t pos = 0; + int ret; + unsigned long addr = get_zeroed_page(GFP_KERNEL); + char *buf = (char *)addr; + if (!buf) + return -ENOMEM; + + offval.offset = priv->rf_offset; + offval.value = 0; + + ret = lbs_prepare_and_send_command(priv, + CMD_RF_REG_ACCESS, 0, + CMD_OPTION_WAITFORRSP, 0, &offval); + mdelay(10); + if (!ret) { + pos += snprintf(buf+pos, len-pos, "RF[0x%x] = 0x%08x\n", + priv->rf_offset, priv->offsetvalue.value); + + ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos); + } + free_page(addr); + + return ret; +} + +static ssize_t lbs_rdrf_write(struct file *file, + const char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct lbs_private *priv = file->private_data; + ssize_t res, buf_size; + unsigned long addr = get_zeroed_page(GFP_KERNEL); + char *buf = (char *)addr; + if (!buf) + return -ENOMEM; + + buf_size = min(count, len - 1); + if (copy_from_user(buf, userbuf, buf_size)) { + res = -EFAULT; + goto out_unlock; + } + priv->rf_offset = simple_strtoul(buf, NULL, 16); + res = count; +out_unlock: + free_page(addr); + return res; +} + +static ssize_t lbs_wrrf_write(struct file *file, + const char __user *userbuf, + size_t count, loff_t *ppos) +{ + + struct lbs_private *priv = file->private_data; + ssize_t res, buf_size; + u32 offset, value; + struct lbs_offset_value offval; + unsigned long addr = get_zeroed_page(GFP_KERNEL); + char *buf = (char *)addr; + if (!buf) + return -ENOMEM; + + buf_size = min(count, len - 1); + if (copy_from_user(buf, userbuf, buf_size)) { + res = -EFAULT; + goto out_unlock; + } + res = sscanf(buf, "%x %x", &offset, &value); + if (res != 2) { + res = -EFAULT; + goto out_unlock; + } + + offval.offset = offset; + offval.value = value; + res = lbs_prepare_and_send_command(priv, + CMD_RF_REG_ACCESS, 1, + CMD_OPTION_WAITFORRSP, 0, &offval); + mdelay(10); + + if (!res) + res = count; +out_unlock: + free_page(addr); + return res; +} + +#define FOPS(fread, fwrite) { \ + .owner = THIS_MODULE, \ + .open = open_file_generic, \ + .read = (fread), \ + .write = (fwrite), \ +} + +struct lbs_debugfs_files { + const char *name; + int perm; + struct file_operations fops; +}; + +static const struct lbs_debugfs_files debugfs_files[] = { + { "info", 0444, FOPS(lbs_dev_info, write_file_dummy), }, + { "getscantable", 0444, FOPS(lbs_getscantable, + write_file_dummy), }, + { "sleepparams", 0644, FOPS(lbs_sleepparams_read, + lbs_sleepparams_write), }, +}; + +static const struct lbs_debugfs_files debugfs_events_files[] = { + {"low_rssi", 0644, FOPS(lbs_lowrssi_read, + lbs_lowrssi_write), }, + {"low_snr", 0644, FOPS(lbs_lowsnr_read, + lbs_lowsnr_write), }, + {"failure_count", 0644, FOPS(lbs_failcount_read, + lbs_failcount_write), }, + {"beacon_missed", 0644, FOPS(lbs_bcnmiss_read, + lbs_bcnmiss_write), }, + {"high_rssi", 0644, FOPS(lbs_highrssi_read, + lbs_highrssi_write), }, + {"high_snr", 0644, FOPS(lbs_highsnr_read, + lbs_highsnr_write), }, +}; + +static const struct lbs_debugfs_files debugfs_regs_files[] = { + {"rdmac", 0644, FOPS(lbs_rdmac_read, lbs_rdmac_write), }, + {"wrmac", 0600, FOPS(NULL, lbs_wrmac_write), }, + {"rdbbp", 0644, FOPS(lbs_rdbbp_read, lbs_rdbbp_write), }, + {"wrbbp", 0600, FOPS(NULL, lbs_wrbbp_write), }, + {"rdrf", 0644, FOPS(lbs_rdrf_read, lbs_rdrf_write), }, + {"wrrf", 0600, FOPS(NULL, lbs_wrrf_write), }, +}; + +void lbs_debugfs_init(void) +{ + if (!lbs_dir) + lbs_dir = debugfs_create_dir("lbs_wireless", NULL); + + return; +} + +void lbs_debugfs_remove(void) +{ + if (lbs_dir) + debugfs_remove(lbs_dir); + return; +} + +void lbs_debugfs_init_one(struct lbs_private *priv, struct net_device *dev) +{ + int i; + const struct lbs_debugfs_files *files; + if (!lbs_dir) + goto exit; + + priv->debugfs_dir = debugfs_create_dir(dev->name, lbs_dir); + if (!priv->debugfs_dir) + goto exit; + + for (i=0; idebugfs_files[i] = debugfs_create_file(files->name, + files->perm, + priv->debugfs_dir, + priv, + &files->fops); + } + + priv->events_dir = debugfs_create_dir("subscribed_events", priv->debugfs_dir); + if (!priv->events_dir) + goto exit; + + for (i=0; idebugfs_events_files[i] = debugfs_create_file(files->name, + files->perm, + priv->events_dir, + priv, + &files->fops); + } + + priv->regs_dir = debugfs_create_dir("registers", priv->debugfs_dir); + if (!priv->regs_dir) + goto exit; + + for (i=0; idebugfs_regs_files[i] = debugfs_create_file(files->name, + files->perm, + priv->regs_dir, + priv, + &files->fops); + } + +#ifdef PROC_DEBUG + lbs_debug_init(priv); +#endif +exit: + return; +} + +void lbs_debugfs_remove_one(struct lbs_private *priv) +{ + int i; + + for(i=0; idebugfs_regs_files[i]); + + debugfs_remove(priv->regs_dir); + + for(i=0; idebugfs_events_files[i]); + + debugfs_remove(priv->events_dir); +#ifdef PROC_DEBUG + debugfs_remove(priv->debugfs_debug); +#endif + for(i=0; idebugfs_files[i]); + debugfs_remove(priv->debugfs_dir); +} + + + +/* debug entry */ + +#ifdef PROC_DEBUG + +#define item_size(n) (FIELD_SIZEOF(struct lbs_private, n)) +#define item_addr(n) (offsetof(struct lbs_private, n)) + + +struct debug_data { + char name[32]; + u32 size; + size_t addr; +}; + +/* To debug any member of struct lbs_private, simply add one line here. + */ +static struct debug_data items[] = { + {"psmode", item_size(psmode), item_addr(psmode)}, + {"psstate", item_size(psstate), item_addr(psstate)}, +}; + +static int num_of_items = ARRAY_SIZE(items); + +/** + * @brief proc read function + * + * @param page pointer to buffer + * @param s read data starting position + * @param off offset + * @param cnt counter + * @param eof end of file flag + * @param data data to output + * @return number of output data + */ +static ssize_t lbs_debugfs_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + int val = 0; + size_t pos = 0; + ssize_t res; + char *p; + int i; + struct debug_data *d; + unsigned long addr = get_zeroed_page(GFP_KERNEL); + char *buf = (char *)addr; + if (!buf) + return -ENOMEM; + + p = buf; + + d = (struct debug_data *)file->private_data; + + for (i = 0; i < num_of_items; i++) { + if (d[i].size == 1) + val = *((u8 *) d[i].addr); + else if (d[i].size == 2) + val = *((u16 *) d[i].addr); + else if (d[i].size == 4) + val = *((u32 *) d[i].addr); + else if (d[i].size == 8) + val = *((u64 *) d[i].addr); + + pos += sprintf(p + pos, "%s=%d\n", d[i].name, val); + } + + res = simple_read_from_buffer(userbuf, count, ppos, p, pos); + + free_page(addr); + return res; +} + +/** + * @brief proc write function + * + * @param f file pointer + * @param buf pointer to data buffer + * @param cnt data number to write + * @param data data to write + * @return number of data + */ +static ssize_t lbs_debugfs_write(struct file *f, const char __user *buf, + size_t cnt, loff_t *ppos) +{ + int r, i; + char *pdata; + char *p; + char *p0; + char *p1; + char *p2; + struct debug_data *d = (struct debug_data *)f->private_data; + + pdata = kmalloc(cnt, GFP_KERNEL); + if (pdata == NULL) + return 0; + + if (copy_from_user(pdata, buf, cnt)) { + lbs_deb_debugfs("Copy from user failed\n"); + kfree(pdata); + return 0; + } + + p0 = pdata; + for (i = 0; i < num_of_items; i++) { + do { + p = strstr(p0, d[i].name); + if (p == NULL) + break; + p1 = strchr(p, '\n'); + if (p1 == NULL) + break; + p0 = p1++; + p2 = strchr(p, '='); + if (!p2) + break; + p2++; + r = simple_strtoul(p2, NULL, 0); + if (d[i].size == 1) + *((u8 *) d[i].addr) = (u8) r; + else if (d[i].size == 2) + *((u16 *) d[i].addr) = (u16) r; + else if (d[i].size == 4) + *((u32 *) d[i].addr) = (u32) r; + else if (d[i].size == 8) + *((u64 *) d[i].addr) = (u64) r; + break; + } while (1); + } + kfree(pdata); + + return (ssize_t)cnt; +} + +static const struct file_operations lbs_debug_fops = { + .owner = THIS_MODULE, + .open = open_file_generic, + .write = lbs_debugfs_write, + .read = lbs_debugfs_read, +}; + +/** + * @brief create debug proc file + * + * @param priv pointer struct lbs_private + * @param dev pointer net_device + * @return N/A + */ +static void lbs_debug_init(struct lbs_private *priv) +{ + int i; + + if (!priv->debugfs_dir) + return; + + for (i = 0; i < num_of_items; i++) + items[i].addr += (size_t) priv; + + priv->debugfs_debug = debugfs_create_file("debug", 0644, + priv->debugfs_dir, &items[0], + &lbs_debug_fops); +} +#endif diff --git a/drivers/net/wireless/libertas/debugfs.h b/drivers/net/wireless/libertas/debugfs.h new file mode 100644 index 0000000..f2b9c7f --- /dev/null +++ b/drivers/net/wireless/libertas/debugfs.h @@ -0,0 +1,10 @@ +#ifndef _LBS_DEBUGFS_H_ +#define _LBS_DEBUGFS_H_ + +void lbs_debugfs_init(void); +void lbs_debugfs_remove(void); + +void lbs_debugfs_init_one(struct lbs_private *priv, struct net_device *dev); +void lbs_debugfs_remove_one(struct lbs_private *priv); + +#endif diff --git a/drivers/net/wireless/libertas/decl.h b/drivers/net/wireless/libertas/decl.h new file mode 100644 index 0000000..709ffca --- /dev/null +++ b/drivers/net/wireless/libertas/decl.h @@ -0,0 +1,53 @@ +/** + * This file contains declaration referring to + * functions defined in other source files + */ + +#ifndef _LBS_DECL_H_ +#define _LBS_DECL_H_ + +#include + + +struct lbs_private; +struct sk_buff; +struct net_device; + + +/* ethtool.c */ +extern const struct ethtool_ops lbs_ethtool_ops; + + +/* tx.c */ +void lbs_send_tx_feedback(struct lbs_private *priv, u32 try_count); +netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb, + struct net_device *dev); + +/* rx.c */ +int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *); + + +/* main.c */ +struct lbs_private *lbs_add_card(void *card, struct device *dmdev); +void lbs_remove_card(struct lbs_private *priv); +int lbs_start_card(struct lbs_private *priv); +void lbs_stop_card(struct lbs_private *priv); +void lbs_host_to_card_done(struct lbs_private *priv); + +int lbs_set_mac_address(struct net_device *dev, void *addr); +void lbs_set_multicast_list(struct net_device *dev); + +int lbs_suspend(struct lbs_private *priv); +void lbs_resume(struct lbs_private *priv); + +void lbs_queue_event(struct lbs_private *priv, u32 event); +void lbs_notify_command_response(struct lbs_private *priv, u8 resp_idx); + +int lbs_enter_auto_deep_sleep(struct lbs_private *priv); +int lbs_exit_auto_deep_sleep(struct lbs_private *priv); + +u32 lbs_fw_index_to_data_rate(u8 index); +u8 lbs_data_rate_to_fw_index(u32 rate); + + +#endif diff --git a/drivers/net/wireless/libertas/defs.h b/drivers/net/wireless/libertas/defs.h new file mode 100644 index 0000000..a61878f --- /dev/null +++ b/drivers/net/wireless/libertas/defs.h @@ -0,0 +1,421 @@ +/** + * This header file contains global constant/enum definitions, + * global variable declaration. + */ +#ifndef _LBS_DEFS_H_ +#define _LBS_DEFS_H_ + +#include + +#ifdef CONFIG_LIBERTAS_DEBUG +#define DEBUG +#define PROC_DEBUG +#endif + +#ifndef DRV_NAME +#define DRV_NAME "libertas" +#endif + +/* + * Really nasty hack to avoid stuffing compat.diff with tons of ifdefs, + * we could add this to a compat header file but too lazy to check ml_priv + * is not used anywhere else + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) +#define ml_priv priv +#endif + +#define LBS_DEB_ENTER 0x00000001 +#define LBS_DEB_LEAVE 0x00000002 +#define LBS_DEB_MAIN 0x00000004 +#define LBS_DEB_NET 0x00000008 +#define LBS_DEB_MESH 0x00000010 +#define LBS_DEB_WEXT 0x00000020 +#define LBS_DEB_IOCTL 0x00000040 +#define LBS_DEB_SCAN 0x00000080 +#define LBS_DEB_ASSOC 0x00000100 +#define LBS_DEB_JOIN 0x00000200 +#define LBS_DEB_11D 0x00000400 +#define LBS_DEB_DEBUGFS 0x00000800 +#define LBS_DEB_ETHTOOL 0x00001000 +#define LBS_DEB_HOST 0x00002000 +#define LBS_DEB_CMD 0x00004000 +#define LBS_DEB_RX 0x00008000 +#define LBS_DEB_TX 0x00010000 +#define LBS_DEB_USB 0x00020000 +#define LBS_DEB_CS 0x00040000 +#define LBS_DEB_FW 0x00080000 +#define LBS_DEB_THREAD 0x00100000 +#define LBS_DEB_HEX 0x00200000 +#define LBS_DEB_SDIO 0x00400000 +#define LBS_DEB_SYSFS 0x00800000 +#define LBS_DEB_SPI 0x01000000 +#define LBS_DEB_CFG80211 0x02000000 + +extern unsigned int lbs_debug; + +#ifdef DEBUG +#define LBS_DEB_LL(grp, grpnam, fmt, args...) \ +do { if ((lbs_debug & (grp)) == (grp)) \ + printk(KERN_DEBUG DRV_NAME grpnam "%s: " fmt, \ + in_interrupt() ? " (INT)" : "", ## args); } while (0) +#else +#define LBS_DEB_LL(grp, grpnam, fmt, args...) do {} while (0) +#endif + +#define lbs_deb_enter(grp) \ + LBS_DEB_LL(grp | LBS_DEB_ENTER, " enter", "%s()\n", __func__); +#define lbs_deb_enter_args(grp, fmt, args...) \ + LBS_DEB_LL(grp | LBS_DEB_ENTER, " enter", "%s(" fmt ")\n", __func__, ## args); +#define lbs_deb_leave(grp) \ + LBS_DEB_LL(grp | LBS_DEB_LEAVE, " leave", "%s()\n", __func__); +#define lbs_deb_leave_args(grp, fmt, args...) \ + LBS_DEB_LL(grp | LBS_DEB_LEAVE, " leave", "%s(), " fmt "\n", \ + __func__, ##args); +#define lbs_deb_main(fmt, args...) LBS_DEB_LL(LBS_DEB_MAIN, " main", fmt, ##args) +#define lbs_deb_net(fmt, args...) LBS_DEB_LL(LBS_DEB_NET, " net", fmt, ##args) +#define lbs_deb_mesh(fmt, args...) LBS_DEB_LL(LBS_DEB_MESH, " mesh", fmt, ##args) +#define lbs_deb_wext(fmt, args...) LBS_DEB_LL(LBS_DEB_WEXT, " wext", fmt, ##args) +#define lbs_deb_ioctl(fmt, args...) LBS_DEB_LL(LBS_DEB_IOCTL, " ioctl", fmt, ##args) +#define lbs_deb_scan(fmt, args...) LBS_DEB_LL(LBS_DEB_SCAN, " scan", fmt, ##args) +#define lbs_deb_assoc(fmt, args...) LBS_DEB_LL(LBS_DEB_ASSOC, " assoc", fmt, ##args) +#define lbs_deb_join(fmt, args...) LBS_DEB_LL(LBS_DEB_JOIN, " join", fmt, ##args) +#define lbs_deb_11d(fmt, args...) LBS_DEB_LL(LBS_DEB_11D, " 11d", fmt, ##args) +#define lbs_deb_debugfs(fmt, args...) LBS_DEB_LL(LBS_DEB_DEBUGFS, " debugfs", fmt, ##args) +#define lbs_deb_ethtool(fmt, args...) LBS_DEB_LL(LBS_DEB_ETHTOOL, " ethtool", fmt, ##args) +#define lbs_deb_host(fmt, args...) LBS_DEB_LL(LBS_DEB_HOST, " host", fmt, ##args) +#define lbs_deb_cmd(fmt, args...) LBS_DEB_LL(LBS_DEB_CMD, " cmd", fmt, ##args) +#define lbs_deb_rx(fmt, args...) LBS_DEB_LL(LBS_DEB_RX, " rx", fmt, ##args) +#define lbs_deb_tx(fmt, args...) LBS_DEB_LL(LBS_DEB_TX, " tx", fmt, ##args) +#define lbs_deb_fw(fmt, args...) LBS_DEB_LL(LBS_DEB_FW, " fw", fmt, ##args) +#define lbs_deb_usb(fmt, args...) LBS_DEB_LL(LBS_DEB_USB, " usb", fmt, ##args) +#define lbs_deb_usbd(dev, fmt, args...) LBS_DEB_LL(LBS_DEB_USB, " usbd", "%s:" fmt, dev_name(dev), ##args) +#define lbs_deb_cs(fmt, args...) LBS_DEB_LL(LBS_DEB_CS, " cs", fmt, ##args) +#define lbs_deb_thread(fmt, args...) LBS_DEB_LL(LBS_DEB_THREAD, " thread", fmt, ##args) +#define lbs_deb_sdio(fmt, args...) LBS_DEB_LL(LBS_DEB_SDIO, " sdio", fmt, ##args) +#define lbs_deb_sysfs(fmt, args...) LBS_DEB_LL(LBS_DEB_SYSFS, " sysfs", fmt, ##args) +#define lbs_deb_spi(fmt, args...) LBS_DEB_LL(LBS_DEB_SPI, " spi", fmt, ##args) +#define lbs_deb_cfg80211(fmt, args...) LBS_DEB_LL(LBS_DEB_CFG80211, " cfg80211", fmt, ##args) + +#define lbs_pr_info(format, args...) \ + printk(KERN_INFO DRV_NAME": " format, ## args) +#define lbs_pr_err(format, args...) \ + printk(KERN_ERR DRV_NAME": " format, ## args) +#define lbs_pr_alert(format, args...) \ + printk(KERN_ALERT DRV_NAME": " format, ## args) + +#ifdef DEBUG +static inline void lbs_deb_hex(unsigned int grp, const char *prompt, u8 *buf, int len) +{ + int i = 0; + + if (len && + (lbs_debug & LBS_DEB_HEX) && + (lbs_debug & grp)) + { + for (i = 1; i <= len; i++) { + if ((i & 0xf) == 1) { + if (i != 1) + printk("\n"); + printk(DRV_NAME " %s: ", prompt); + } + printk("%02x ", (u8) * buf); + buf++; + } + printk("\n"); + } +} +#else +#define lbs_deb_hex(grp,prompt,buf,len) do {} while (0) +#endif + + + +/** Buffer Constants */ + +/* The size of SQ memory PPA, DPA are 8 DWORDs, that keep the physical +* addresses of TxPD buffers. Station has only 8 TxPD available, Whereas +* driver has more local TxPDs. Each TxPD on the host memory is associated +* with a Tx control node. The driver maintains 8 RxPD descriptors for +* station firmware to store Rx packet information. +* +* Current version of MAC has a 32x6 multicast address buffer. +* +* 802.11b can have up to 14 channels, the driver keeps the +* BSSID(MAC address) of each APs or Ad hoc stations it has sensed. +*/ + +#define MRVDRV_MAX_MULTICAST_LIST_SIZE 32 +#define LBS_NUM_CMD_BUFFERS 10 +#define LBS_CMD_BUFFER_SIZE (2 * 1024) +#define MRVDRV_MAX_CHANNEL_SIZE 14 +#define MRVDRV_ASSOCIATION_TIME_OUT 255 +#define MRVDRV_SNAP_HEADER_LEN 8 + +#define LBS_UPLD_SIZE 2312 +#define DEV_NAME_LEN 32 + +/* Wake criteria for HOST_SLEEP_CFG command */ +#define EHS_WAKE_ON_BROADCAST_DATA 0x0001 +#define EHS_WAKE_ON_UNICAST_DATA 0x0002 +#define EHS_WAKE_ON_MAC_EVENT 0x0004 +#define EHS_WAKE_ON_MULTICAST_DATA 0x0008 +#define EHS_REMOVE_WAKEUP 0xFFFFFFFF +/* Wake rules for Host_Sleep_CFG command */ +#define WOL_RULE_NET_TYPE_INFRA_OR_IBSS 0x00 +#define WOL_RULE_NET_TYPE_MESH 0x10 +#define WOL_RULE_ADDR_TYPE_BCAST 0x01 +#define WOL_RULE_ADDR_TYPE_MCAST 0x08 +#define WOL_RULE_ADDR_TYPE_UCAST 0x02 +#define WOL_RULE_OP_AND 0x01 +#define WOL_RULE_OP_OR 0x02 +#define WOL_RULE_OP_INVALID 0xFF +#define WOL_RESULT_VALID_CMD 0 +#define WOL_RESULT_NOSPC_ERR 1 +#define WOL_RESULT_EEXIST_ERR 2 + +/** Misc constants */ +/* This section defines 802.11 specific contants */ + +#define MRVDRV_MAX_BSS_DESCRIPTS 16 +#define MRVDRV_MAX_REGION_CODE 6 + +#define MRVDRV_IGNORE_MULTIPLE_DTIM 0xfffe +#define MRVDRV_MIN_MULTIPLE_DTIM 1 +#define MRVDRV_MAX_MULTIPLE_DTIM 5 +#define MRVDRV_DEFAULT_MULTIPLE_DTIM 1 + +#define MRVDRV_DEFAULT_LISTEN_INTERVAL 10 + +#define MRVDRV_CHANNELS_PER_SCAN 4 +#define MRVDRV_MAX_CHANNELS_PER_SCAN 14 + +#define MRVDRV_MIN_BEACON_INTERVAL 20 +#define MRVDRV_MAX_BEACON_INTERVAL 1000 +#define MRVDRV_BEACON_INTERVAL 100 + +#define MARVELL_MESH_IE_LENGTH 9 + +/* Values used to populate the struct mrvl_mesh_ie. The only time you need this + * is when enabling the mesh using CMD_MESH_CONFIG. + */ +#define MARVELL_MESH_IE_TYPE 4 +#define MARVELL_MESH_IE_SUBTYPE 0 +#define MARVELL_MESH_IE_VERSION 0 +#define MARVELL_MESH_PROTO_ID_HWMP 0 +#define MARVELL_MESH_METRIC_ID 0 +#define MARVELL_MESH_CAPABILITY 0 + +/** INT status Bit Definition*/ +#define MRVDRV_TX_DNLD_RDY 0x0001 +#define MRVDRV_RX_UPLD_RDY 0x0002 +#define MRVDRV_CMD_DNLD_RDY 0x0004 +#define MRVDRV_CMD_UPLD_RDY 0x0008 +#define MRVDRV_CARDEVENT 0x0010 + +/* Automatic TX control default levels */ +#define POW_ADAPT_DEFAULT_P0 13 +#define POW_ADAPT_DEFAULT_P1 15 +#define POW_ADAPT_DEFAULT_P2 18 +#define TPC_DEFAULT_P0 5 +#define TPC_DEFAULT_P1 10 +#define TPC_DEFAULT_P2 13 + +/** TxPD status */ + +/* Station firmware use TxPD status field to report final Tx transmit +* result, Bit masks are used to present combined situations. +*/ + +#define MRVDRV_TxPD_POWER_MGMT_NULL_PACKET 0x01 +#define MRVDRV_TxPD_POWER_MGMT_LAST_PACKET 0x08 + +/** Tx mesh flag */ +/* Currently we are using normal WDS flag as mesh flag. + * TODO: change to proper mesh flag when MAC understands it. + */ +#define TxPD_CONTROL_WDS_FRAME (1<<17) +#define TxPD_MESH_FRAME TxPD_CONTROL_WDS_FRAME + +/** Mesh interface ID */ +#define MESH_IFACE_ID 0x0001 +/** Mesh id should be in bits 14-13-12 */ +#define MESH_IFACE_BIT_OFFSET 0x000c +/** Mesh enable bit in FW capability */ +#define MESH_CAPINFO_ENABLE_MASK (1<<16) + +/** FW definition from Marvell v4 */ +#define MRVL_FW_V4 (0x04) +/** FW definition from Marvell v5 */ +#define MRVL_FW_V5 (0x05) +/** FW definition from Marvell v10 */ +#define MRVL_FW_V10 (0x0a) +/** FW major revision definition */ +#define MRVL_FW_MAJOR_REV(x) ((x)>>24) + +/** RxPD status */ + +#define MRVDRV_RXPD_STATUS_OK 0x0001 + +/** RxPD status - Received packet types */ +/** Rx mesh flag */ +/* Currently we are using normal WDS flag as mesh flag. + * TODO: change to proper mesh flag when MAC understands it. + */ +#define RxPD_CONTROL_WDS_FRAME (0x40) +#define RxPD_MESH_FRAME RxPD_CONTROL_WDS_FRAME + +/** RSSI-related defines */ +/* RSSI constants are used to implement 802.11 RSSI threshold +* indication. if the Rx packet signal got too weak for 5 consecutive +* times, miniport driver (driver) will report this event to wrapper +*/ + +#define MRVDRV_NF_DEFAULT_SCAN_VALUE (-96) + +/** RTS/FRAG related defines */ +#define MRVDRV_RTS_MIN_VALUE 0 +#define MRVDRV_RTS_MAX_VALUE 2347 +#define MRVDRV_FRAG_MIN_VALUE 256 +#define MRVDRV_FRAG_MAX_VALUE 2346 + +/* This is for firmware specific length */ +#define EXTRA_LEN 36 + +#define MRVDRV_ETH_TX_PACKET_BUFFER_SIZE \ + (ETH_FRAME_LEN + sizeof(struct txpd) + EXTRA_LEN) + +#define MRVDRV_ETH_RX_PACKET_BUFFER_SIZE \ + (ETH_FRAME_LEN + sizeof(struct rxpd) \ + + MRVDRV_SNAP_HEADER_LEN + EXTRA_LEN) + +#define CMD_F_HOSTCMD (1 << 0) +#define FW_CAPINFO_WPA (1 << 0) +#define FW_CAPINFO_PS (1 << 1) +#define FW_CAPINFO_FIRMWARE_UPGRADE (1 << 13) +#define FW_CAPINFO_BOOT2_UPGRADE (1<<14) +#define FW_CAPINFO_PERSISTENT_CONFIG (1<<15) + +#define KEY_LEN_WPA_AES 16 +#define KEY_LEN_WPA_TKIP 32 +#define KEY_LEN_WEP_104 13 +#define KEY_LEN_WEP_40 5 + +#define RF_ANTENNA_1 0x1 +#define RF_ANTENNA_2 0x2 +#define RF_ANTENNA_AUTO 0xFFFF + +#define BAND_B (0x01) +#define BAND_G (0x02) +#define ALL_802_11_BANDS (BAND_B | BAND_G) + +/** MACRO DEFINITIONS */ +#define CAL_NF(NF) ((s32)(-(s32)(NF))) +#define CAL_RSSI(SNR, NF) ((s32)((s32)(SNR) + CAL_NF(NF))) +#define SCAN_RSSI(RSSI) (0x100 - ((u8)(RSSI))) + +#define DEFAULT_BCN_AVG_FACTOR 8 +#define DEFAULT_DATA_AVG_FACTOR 8 +#define AVG_SCALE 100 +#define CAL_AVG_SNR_NF(AVG, SNRNF, N) \ + (((AVG) == 0) ? ((u16)(SNRNF) * AVG_SCALE) : \ + ((((int)(AVG) * (N -1)) + ((u16)(SNRNF) * \ + AVG_SCALE)) / N)) + +#define MAX_RATES 14 + +#define MAX_LEDS 8 + +/** Global Variable Declaration */ +extern const char lbs_driver_version[]; +extern u16 lbs_region_code_to_index[MRVDRV_MAX_REGION_CODE]; + + +/** ENUM definition*/ +/** SNRNF_TYPE */ +enum SNRNF_TYPE { + TYPE_BEACON = 0, + TYPE_RXPD, + MAX_TYPE_B +}; + +/** SNRNF_DATA*/ +enum SNRNF_DATA { + TYPE_NOAVG = 0, + TYPE_AVG, + MAX_TYPE_AVG +}; + +/** LBS_802_11_POWER_MODE */ +enum LBS_802_11_POWER_MODE { + LBS802_11POWERMODECAM, + LBS802_11POWERMODEMAX_PSP, + LBS802_11POWERMODEFAST_PSP, + /*not a real mode, defined as an upper bound */ + LBS802_11POWEMODEMAX +}; + +/** PS_STATE */ +enum PS_STATE { + PS_STATE_FULL_POWER, + PS_STATE_AWAKE, + PS_STATE_PRE_SLEEP, + PS_STATE_SLEEP +}; + +/** DNLD_STATE */ +enum DNLD_STATE { + DNLD_RES_RECEIVED, + DNLD_DATA_SENT, + DNLD_CMD_SENT, + DNLD_BOOTCMD_SENT, +}; + +/** LBS_MEDIA_STATE */ +enum LBS_MEDIA_STATE { + LBS_CONNECTED, + LBS_DISCONNECTED +}; + +/** LBS_802_11_PRIVACY_FILTER */ +enum LBS_802_11_PRIVACY_FILTER { + LBS802_11PRIVFILTERACCEPTALL, + LBS802_11PRIVFILTER8021XWEP +}; + +/** mv_ms_type */ +enum mv_ms_type { + MVMS_DAT = 0, + MVMS_CMD = 1, + MVMS_TXDONE = 2, + MVMS_EVENT +}; + +/** KEY_TYPE_ID */ +enum KEY_TYPE_ID { + KEY_TYPE_ID_WEP = 0, + KEY_TYPE_ID_TKIP, + KEY_TYPE_ID_AES +}; + +/** KEY_INFO_WPA (applies to both TKIP and AES/CCMP) */ +enum KEY_INFO_WPA { + KEY_INFO_WPA_MCAST = 0x01, + KEY_INFO_WPA_UNICAST = 0x02, + KEY_INFO_WPA_ENABLED = 0x04 +}; + +/* Default values for fwt commands. */ +#define FWT_DEFAULT_METRIC 0 +#define FWT_DEFAULT_DIR 1 +/* Default Rate, 11Mbps */ +#define FWT_DEFAULT_RATE 3 +#define FWT_DEFAULT_SSN 0xffffffff +#define FWT_DEFAULT_DSN 0 +#define FWT_DEFAULT_HOPCOUNT 0 +#define FWT_DEFAULT_TTL 0 +#define FWT_DEFAULT_EXPIRATION 0 +#define FWT_DEFAULT_SLEEPMODE 0 +#define FWT_DEFAULT_SNR 0 + +#endif diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h new file mode 100644 index 0000000..bde21cf --- /dev/null +++ b/drivers/net/wireless/libertas/dev.h @@ -0,0 +1,208 @@ +/** + * This file contains definitions and data structures specific + * to Marvell 802.11 NIC. It contains the Device Information + * structure struct lbs_private.. + */ +#ifndef _LBS_DEV_H_ +#define _LBS_DEV_H_ + +#include "mesh.h" +#include "scan.h" +#include "assoc.h" + +#include + +/** sleep_params */ +struct sleep_params { + uint16_t sp_error; + uint16_t sp_offset; + uint16_t sp_stabletime; + uint8_t sp_calcontrol; + uint8_t sp_extsleepclk; + uint16_t sp_reserved; +}; + + +/** Private structure for the MV device */ +struct lbs_private { + + /* Basic networking */ + struct net_device *dev; + u32 connect_status; + int infra_open; + struct work_struct mcast_work; + u32 nr_of_multicastmacaddr; + u8 multicastlist[MRVDRV_MAX_MULTICAST_LIST_SIZE][ETH_ALEN]; + + /* CFG80211 */ + struct wireless_dev *wdev; + + /* Mesh */ + struct net_device *mesh_dev; /* Virtual device */ +#ifdef CONFIG_LIBERTAS_MESH + u32 mesh_connect_status; + struct lbs_mesh_stats mstats; + int mesh_open; + uint16_t mesh_tlv; + u8 mesh_ssid[IEEE80211_MAX_SSID_LEN + 1]; + u8 mesh_ssid_len; +#endif + + /* Monitor mode */ + struct net_device *rtap_net_dev; + u32 monitormode; + + /* Debugfs */ + struct dentry *debugfs_dir; + struct dentry *debugfs_debug; + struct dentry *debugfs_files[6]; + struct dentry *events_dir; + struct dentry *debugfs_events_files[6]; + struct dentry *regs_dir; + struct dentry *debugfs_regs_files[6]; + + /* Hardware debugging */ + u32 mac_offset; + u32 bbp_offset; + u32 rf_offset; + struct lbs_offset_value offsetvalue; + + /* Power management */ + u16 psmode; + u32 psstate; + u8 needtowakeup; + + /* Deep sleep */ + int is_deep_sleep; + int is_auto_deep_sleep_enabled; + int wakeup_dev_required; + int is_activity_detected; + int auto_deep_sleep_timeout; /* in ms */ + wait_queue_head_t ds_awake_q; + struct timer_list auto_deepsleep_timer; + + /* Hardware access */ + void *card; + u8 fw_ready; + u8 surpriseremoved; + int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb); + void (*reset_card) (struct lbs_private *priv); + int (*enter_deep_sleep) (struct lbs_private *priv); + int (*exit_deep_sleep) (struct lbs_private *priv); + int (*reset_deep_sleep_wakeup) (struct lbs_private *priv); + + /* Adapter info (from EEPROM) */ + u32 fwrelease; + u32 fwcapinfo; + u16 regioncode; + u8 current_addr[ETH_ALEN]; + + /* Command download */ + u8 dnld_sent; + /* bit0 1/0=data_sent/data_tx_done, + bit1 1/0=cmd_sent/cmd_tx_done, + all other bits reserved 0 */ + u16 seqnum; + struct cmd_ctrl_node *cmd_array; + struct cmd_ctrl_node *cur_cmd; + struct list_head cmdfreeq; /* free command buffers */ + struct list_head cmdpendingq; /* pending command buffers */ + wait_queue_head_t cmd_pending; + struct timer_list command_timer; + int cmd_timed_out; + + /* Command responses sent from the hardware to the driver */ + int cur_cmd_retcode; + u8 resp_idx; + u8 resp_buf[2][LBS_UPLD_SIZE]; + u32 resp_len[2]; + + /* Events sent from hardware to driver */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)) + struct kfifo event_fifo; +#else + struct kfifo *event_fifo; +#endif + + /** thread to service interrupts */ + struct task_struct *main_thread; + wait_queue_head_t waitq; + struct workqueue_struct *work_thread; + + /** Encryption stuff */ + struct lbs_802_11_security secinfo; + struct enc_key wpa_mcast_key; + struct enc_key wpa_unicast_key; + u8 wpa_ie[MAX_WPA_IE_LEN]; + u8 wpa_ie_len; + u16 wep_tx_keyidx; + struct enc_key wep_keys[4]; + + /* Wake On LAN */ + uint32_t wol_criteria; + uint8_t wol_gpio; + uint8_t wol_gap; + + /* Transmitting */ + int tx_pending_len; /* -1 while building packet */ + u8 tx_pending_buf[LBS_UPLD_SIZE]; + /* protected by hard_start_xmit serialization */ + u8 txretrycount; + struct sk_buff *currenttxskb; + + /* Locks */ + struct mutex lock; + spinlock_t driver_lock; + + /* NIC/link operation characteristics */ + u16 mac_control; + u8 radio_on; + u8 channel; + s16 txpower_cur; + s16 txpower_min; + s16 txpower_max; + + /** Scanning */ + struct delayed_work scan_work; + int scan_channel; + /* remember which channel was scanned last, != 0 if currently scanning */ + u8 scan_ssid[IEEE80211_MAX_SSID_LEN + 1]; + u8 scan_ssid_len; + + /* Associating */ + struct delayed_work assoc_work; + struct current_bss_params curbssparams; + u8 mode; + struct list_head network_list; + struct list_head network_free_list; + struct bss_descriptor *networks; + struct assoc_request * pending_assoc_req; + struct assoc_request * in_progress_assoc_req; + uint16_t enablehwauto; + + /* ADHOC */ + u16 beacon_period; + u8 beacon_enable; + u8 adhoccreate; + + /* WEXT */ + char name[DEV_NAME_LEN]; + u8 nodename[16]; + struct iw_statistics wstats; + u8 cur_rate; +#define MAX_REGION_CHANNEL_NUM 2 + struct region_channel region_channel[MAX_REGION_CHANNEL_NUM]; + + /** Requested Signal Strength*/ + u16 SNR[MAX_TYPE_B][MAX_TYPE_AVG]; + u16 NF[MAX_TYPE_B][MAX_TYPE_AVG]; + u8 RSSI[MAX_TYPE_B][MAX_TYPE_AVG]; + u8 rawSNR[DEFAULT_DATA_AVG_FACTOR]; + u8 rawNF[DEFAULT_DATA_AVG_FACTOR]; + u16 nextSNRNF; + u16 numSNRNF; +}; + +extern struct cmd_confirm_sleep confirm_sleep; + +#endif diff --git a/drivers/net/wireless/libertas/ethtool.c b/drivers/net/wireless/libertas/ethtool.c new file mode 100644 index 0000000..3804a58 --- /dev/null +++ b/drivers/net/wireless/libertas/ethtool.c @@ -0,0 +1,125 @@ +#include +#include +#include + +#include "host.h" +#include "decl.h" +#include "defs.h" +#include "dev.h" +#include "wext.h" +#include "cmd.h" +#include "mesh.h" + + +static void lbs_ethtool_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct lbs_private *priv = dev->ml_priv; + + snprintf(info->fw_version, 32, "%u.%u.%u.p%u", + priv->fwrelease >> 24 & 0xff, + priv->fwrelease >> 16 & 0xff, + priv->fwrelease >> 8 & 0xff, + priv->fwrelease & 0xff); + strcpy(info->driver, "libertas"); + strcpy(info->version, lbs_driver_version); +} + +/* All 8388 parts have 16KiB EEPROM size at the time of writing. + * In case that changes this needs fixing. + */ +#define LBS_EEPROM_LEN 16384 + +static int lbs_ethtool_get_eeprom_len(struct net_device *dev) +{ + return LBS_EEPROM_LEN; +} + +static int lbs_ethtool_get_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 * bytes) +{ + struct lbs_private *priv = dev->ml_priv; + struct cmd_ds_802_11_eeprom_access cmd; + int ret; + + lbs_deb_enter(LBS_DEB_ETHTOOL); + + if (eeprom->offset + eeprom->len > LBS_EEPROM_LEN || + eeprom->len > LBS_EEPROM_READ_LEN) { + ret = -EINVAL; + goto out; + } + + cmd.hdr.size = cpu_to_le16(sizeof(struct cmd_ds_802_11_eeprom_access) - + LBS_EEPROM_READ_LEN + eeprom->len); + cmd.action = cpu_to_le16(CMD_ACT_GET); + cmd.offset = cpu_to_le16(eeprom->offset); + cmd.len = cpu_to_le16(eeprom->len); + ret = lbs_cmd_with_response(priv, CMD_802_11_EEPROM_ACCESS, &cmd); + if (!ret) + memcpy(bytes, cmd.value, eeprom->len); + +out: + lbs_deb_leave_args(LBS_DEB_ETHTOOL, "ret %d", ret); + return ret; +} + +static void lbs_ethtool_get_wol(struct net_device *dev, + struct ethtool_wolinfo *wol) +{ + struct lbs_private *priv = dev->ml_priv; + + if (priv->wol_criteria == 0xffffffff) { + /* Interface driver didn't configure wake */ + wol->supported = wol->wolopts = 0; + return; + } + + wol->supported = WAKE_UCAST|WAKE_MCAST|WAKE_BCAST|WAKE_PHY; + + if (priv->wol_criteria & EHS_WAKE_ON_UNICAST_DATA) + wol->wolopts |= WAKE_UCAST; + if (priv->wol_criteria & EHS_WAKE_ON_MULTICAST_DATA) + wol->wolopts |= WAKE_MCAST; + if (priv->wol_criteria & EHS_WAKE_ON_BROADCAST_DATA) + wol->wolopts |= WAKE_BCAST; + if (priv->wol_criteria & EHS_WAKE_ON_MAC_EVENT) + wol->wolopts |= WAKE_PHY; +} + +static int lbs_ethtool_set_wol(struct net_device *dev, + struct ethtool_wolinfo *wol) +{ + struct lbs_private *priv = dev->ml_priv; + uint32_t criteria = 0; + + if (wol->wolopts & ~(WAKE_UCAST|WAKE_MCAST|WAKE_BCAST|WAKE_PHY)) + return -EOPNOTSUPP; + + if (wol->wolopts & WAKE_UCAST) + criteria |= EHS_WAKE_ON_UNICAST_DATA; + if (wol->wolopts & WAKE_MCAST) + criteria |= EHS_WAKE_ON_MULTICAST_DATA; + if (wol->wolopts & WAKE_BCAST) + criteria |= EHS_WAKE_ON_BROADCAST_DATA; + if (wol->wolopts & WAKE_PHY) + criteria |= EHS_WAKE_ON_MAC_EVENT; + if (wol->wolopts == 0) + criteria |= EHS_REMOVE_WAKEUP; + + return lbs_host_sleep_cfg(priv, criteria, (struct wol_config *)NULL); +} + +const struct ethtool_ops lbs_ethtool_ops = { + .get_drvinfo = lbs_ethtool_get_drvinfo, + .get_eeprom = lbs_ethtool_get_eeprom, + .get_eeprom_len = lbs_ethtool_get_eeprom_len, +#ifdef CONFIG_LIBERTAS_MESH + .get_sset_count = lbs_mesh_ethtool_get_sset_count, + .get_ethtool_stats = lbs_mesh_ethtool_get_stats, + .get_strings = lbs_mesh_ethtool_get_strings, +#endif + .get_wol = lbs_ethtool_get_wol, + .set_wol = lbs_ethtool_set_wol, +}; + diff --git a/drivers/net/wireless/libertas/host.h b/drivers/net/wireless/libertas/host.h new file mode 100644 index 0000000..3809c0b --- /dev/null +++ b/drivers/net/wireless/libertas/host.h @@ -0,0 +1,962 @@ +/** + * This file function prototypes, data structure + * and definitions for all the host/station commands + */ + +#ifndef _LBS_HOST_H_ +#define _LBS_HOST_H_ + +#include "types.h" +#include "defs.h" + +#define DEFAULT_AD_HOC_CHANNEL 6 + +#define CMD_OPTION_WAITFORRSP 0x0002 + +/** Host command IDs */ + +/* Return command are almost always the same as the host command, but with + * bit 15 set high. There are a few exceptions, though... + */ +#define CMD_RET(cmd) (0x8000 | cmd) + +/* Return command convention exceptions: */ +#define CMD_RET_802_11_ASSOCIATE 0x8012 + +/* Command codes */ +#define CMD_GET_HW_SPEC 0x0003 +#define CMD_EEPROM_UPDATE 0x0004 +#define CMD_802_11_RESET 0x0005 +#define CMD_802_11_SCAN 0x0006 +#define CMD_802_11_GET_LOG 0x000b +#define CMD_MAC_MULTICAST_ADR 0x0010 +#define CMD_802_11_AUTHENTICATE 0x0011 +#define CMD_802_11_EEPROM_ACCESS 0x0059 +#define CMD_802_11_ASSOCIATE 0x0050 +#define CMD_802_11_SET_WEP 0x0013 +#define CMD_802_11_GET_STAT 0x0014 +#define CMD_802_3_GET_STAT 0x0015 +#define CMD_802_11_SNMP_MIB 0x0016 +#define CMD_MAC_REG_MAP 0x0017 +#define CMD_BBP_REG_MAP 0x0018 +#define CMD_MAC_REG_ACCESS 0x0019 +#define CMD_BBP_REG_ACCESS 0x001a +#define CMD_RF_REG_ACCESS 0x001b +#define CMD_802_11_RADIO_CONTROL 0x001c +#define CMD_802_11_RF_CHANNEL 0x001d +#define CMD_802_11_RF_TX_POWER 0x001e +#define CMD_802_11_RSSI 0x001f +#define CMD_802_11_RF_ANTENNA 0x0020 +#define CMD_802_11_PS_MODE 0x0021 +#define CMD_802_11_DATA_RATE 0x0022 +#define CMD_RF_REG_MAP 0x0023 +#define CMD_802_11_DEAUTHENTICATE 0x0024 +#define CMD_802_11_REASSOCIATE 0x0025 +#define CMD_MAC_CONTROL 0x0028 +#define CMD_802_11_AD_HOC_START 0x002b +#define CMD_802_11_AD_HOC_JOIN 0x002c +#define CMD_802_11_QUERY_TKIP_REPLY_CNTRS 0x002e +#define CMD_802_11_ENABLE_RSN 0x002f +#define CMD_802_11_SET_AFC 0x003c +#define CMD_802_11_GET_AFC 0x003d +#define CMD_802_11_DEEP_SLEEP 0x003e +#define CMD_802_11_AD_HOC_STOP 0x0040 +#define CMD_802_11_HOST_SLEEP_CFG 0x0043 +#define CMD_802_11_WAKEUP_CONFIRM 0x0044 +#define CMD_802_11_HOST_SLEEP_ACTIVATE 0x0045 +#define CMD_802_11_BEACON_STOP 0x0049 +#define CMD_802_11_MAC_ADDRESS 0x004d +#define CMD_802_11_LED_GPIO_CTRL 0x004e +#define CMD_802_11_EEPROM_ACCESS 0x0059 +#define CMD_802_11_BAND_CONFIG 0x0058 +#define CMD_GSPI_BUS_CONFIG 0x005a +#define CMD_802_11D_DOMAIN_INFO 0x005b +#define CMD_802_11_KEY_MATERIAL 0x005e +#define CMD_802_11_SLEEP_PARAMS 0x0066 +#define CMD_802_11_INACTIVITY_TIMEOUT 0x0067 +#define CMD_802_11_SLEEP_PERIOD 0x0068 +#define CMD_802_11_TPC_CFG 0x0072 +#define CMD_802_11_PA_CFG 0x0073 +#define CMD_802_11_FW_WAKE_METHOD 0x0074 +#define CMD_802_11_SUBSCRIBE_EVENT 0x0075 +#define CMD_802_11_RATE_ADAPT_RATESET 0x0076 +#define CMD_802_11_TX_RATE_QUERY 0x007f +#define CMD_GET_TSF 0x0080 +#define CMD_BT_ACCESS 0x0087 +#define CMD_FWT_ACCESS 0x0095 +#define CMD_802_11_MONITOR_MODE 0x0098 +#define CMD_MESH_ACCESS 0x009b +#define CMD_MESH_CONFIG_OLD 0x00a3 +#define CMD_MESH_CONFIG 0x00ac +#define CMD_SET_BOOT2_VER 0x00a5 +#define CMD_FUNC_INIT 0x00a9 +#define CMD_FUNC_SHUTDOWN 0x00aa +#define CMD_802_11_BEACON_CTRL 0x00b0 + +/* For the IEEE Power Save */ +#define CMD_SUBCMD_ENTER_PS 0x0030 +#define CMD_SUBCMD_EXIT_PS 0x0031 +#define CMD_SUBCMD_SLEEP_CONFIRMED 0x0034 +#define CMD_SUBCMD_FULL_POWERDOWN 0x0035 +#define CMD_SUBCMD_FULL_POWERUP 0x0036 + +#define CMD_ENABLE_RSN 0x0001 +#define CMD_DISABLE_RSN 0x0000 + +#define CMD_ACT_GET 0x0000 +#define CMD_ACT_SET 0x0001 + +/* Define action or option for CMD_802_11_SET_WEP */ +#define CMD_ACT_ADD 0x0002 +#define CMD_ACT_REMOVE 0x0004 + +#define CMD_TYPE_WEP_40_BIT 0x01 +#define CMD_TYPE_WEP_104_BIT 0x02 + +#define CMD_NUM_OF_WEP_KEYS 4 + +#define CMD_WEP_KEY_INDEX_MASK 0x3fff + +/* Define action or option for CMD_802_11_SCAN */ +#define CMD_BSS_TYPE_BSS 0x0001 +#define CMD_BSS_TYPE_IBSS 0x0002 +#define CMD_BSS_TYPE_ANY 0x0003 + +/* Define action or option for CMD_802_11_SCAN */ +#define CMD_SCAN_TYPE_ACTIVE 0x0000 +#define CMD_SCAN_TYPE_PASSIVE 0x0001 + +#define CMD_SCAN_RADIO_TYPE_BG 0 + +#define CMD_SCAN_PROBE_DELAY_TIME 0 + +/* Define action or option for CMD_MAC_CONTROL */ +#define CMD_ACT_MAC_RX_ON 0x0001 +#define CMD_ACT_MAC_TX_ON 0x0002 +#define CMD_ACT_MAC_LOOPBACK_ON 0x0004 +#define CMD_ACT_MAC_WEP_ENABLE 0x0008 +#define CMD_ACT_MAC_INT_ENABLE 0x0010 +#define CMD_ACT_MAC_MULTICAST_ENABLE 0x0020 +#define CMD_ACT_MAC_BROADCAST_ENABLE 0x0040 +#define CMD_ACT_MAC_PROMISCUOUS_ENABLE 0x0080 +#define CMD_ACT_MAC_ALL_MULTICAST_ENABLE 0x0100 +#define CMD_ACT_MAC_STRICT_PROTECTION_ENABLE 0x0400 + +/* Event flags for CMD_802_11_SUBSCRIBE_EVENT */ +#define CMD_SUBSCRIBE_RSSI_LOW 0x0001 +#define CMD_SUBSCRIBE_SNR_LOW 0x0002 +#define CMD_SUBSCRIBE_FAILCOUNT 0x0004 +#define CMD_SUBSCRIBE_BCNMISS 0x0008 +#define CMD_SUBSCRIBE_RSSI_HIGH 0x0010 +#define CMD_SUBSCRIBE_SNR_HIGH 0x0020 + +#define RADIO_PREAMBLE_LONG 0x00 +#define RADIO_PREAMBLE_SHORT 0x02 +#define RADIO_PREAMBLE_AUTO 0x04 + +/* Define action or option for CMD_802_11_RF_CHANNEL */ +#define CMD_OPT_802_11_RF_CHANNEL_GET 0x00 +#define CMD_OPT_802_11_RF_CHANNEL_SET 0x01 + +/* Define action or option for CMD_802_11_DATA_RATE */ +#define CMD_ACT_SET_TX_AUTO 0x0000 +#define CMD_ACT_SET_TX_FIX_RATE 0x0001 +#define CMD_ACT_GET_TX_RATE 0x0002 + +/* Define action or option for CMD_802_11_PS_MODE */ +#define CMD_TYPE_CAM 0x0000 +#define CMD_TYPE_MAX_PSP 0x0001 +#define CMD_TYPE_FAST_PSP 0x0002 + +/* Options for CMD_802_11_FW_WAKE_METHOD */ +#define CMD_WAKE_METHOD_UNCHANGED 0x0000 +#define CMD_WAKE_METHOD_COMMAND_INT 0x0001 +#define CMD_WAKE_METHOD_GPIO 0x0002 + +/* Object IDs for CMD_802_11_SNMP_MIB */ +#define SNMP_MIB_OID_BSS_TYPE 0x0000 +#define SNMP_MIB_OID_OP_RATE_SET 0x0001 +#define SNMP_MIB_OID_BEACON_PERIOD 0x0002 /* Reserved on v9+ */ +#define SNMP_MIB_OID_DTIM_PERIOD 0x0003 /* Reserved on v9+ */ +#define SNMP_MIB_OID_ASSOC_TIMEOUT 0x0004 /* Reserved on v9+ */ +#define SNMP_MIB_OID_RTS_THRESHOLD 0x0005 +#define SNMP_MIB_OID_SHORT_RETRY_LIMIT 0x0006 +#define SNMP_MIB_OID_LONG_RETRY_LIMIT 0x0007 +#define SNMP_MIB_OID_FRAG_THRESHOLD 0x0008 +#define SNMP_MIB_OID_11D_ENABLE 0x0009 +#define SNMP_MIB_OID_11H_ENABLE 0x000A + +/* Define action or option for CMD_BT_ACCESS */ +enum cmd_bt_access_opts { + /* The bt commands start at 5 instead of 1 because the old dft commands + * are mapped to 1-4. These old commands are no longer maintained and + * should not be called. + */ + CMD_ACT_BT_ACCESS_ADD = 5, + CMD_ACT_BT_ACCESS_DEL, + CMD_ACT_BT_ACCESS_LIST, + CMD_ACT_BT_ACCESS_RESET, + CMD_ACT_BT_ACCESS_SET_INVERT, + CMD_ACT_BT_ACCESS_GET_INVERT +}; + +/* Define action or option for CMD_FWT_ACCESS */ +enum cmd_fwt_access_opts { + CMD_ACT_FWT_ACCESS_ADD = 1, + CMD_ACT_FWT_ACCESS_DEL, + CMD_ACT_FWT_ACCESS_LOOKUP, + CMD_ACT_FWT_ACCESS_LIST, + CMD_ACT_FWT_ACCESS_LIST_ROUTE, + CMD_ACT_FWT_ACCESS_LIST_NEIGHBOR, + CMD_ACT_FWT_ACCESS_RESET, + CMD_ACT_FWT_ACCESS_CLEANUP, + CMD_ACT_FWT_ACCESS_TIME, +}; + +/* Define action or option for CMD_802_11_HOST_SLEEP_CFG */ +enum cmd_wol_cfg_opts { + CMD_ACT_ACTION_NONE = 0, + CMD_ACT_SET_WOL_RULE, + CMD_ACT_GET_WOL_RULE, + CMD_ACT_RESET_WOL_RULE, +}; + +/* Define action or option for CMD_MESH_ACCESS */ +enum cmd_mesh_access_opts { + CMD_ACT_MESH_GET_TTL = 1, + CMD_ACT_MESH_SET_TTL, + CMD_ACT_MESH_GET_STATS, + CMD_ACT_MESH_GET_ANYCAST, + CMD_ACT_MESH_SET_ANYCAST, + CMD_ACT_MESH_SET_LINK_COSTS, + CMD_ACT_MESH_GET_LINK_COSTS, + CMD_ACT_MESH_SET_BCAST_RATE, + CMD_ACT_MESH_GET_BCAST_RATE, + CMD_ACT_MESH_SET_RREQ_DELAY, + CMD_ACT_MESH_GET_RREQ_DELAY, + CMD_ACT_MESH_SET_ROUTE_EXP, + CMD_ACT_MESH_GET_ROUTE_EXP, + CMD_ACT_MESH_SET_AUTOSTART_ENABLED, + CMD_ACT_MESH_GET_AUTOSTART_ENABLED, + CMD_ACT_MESH_SET_GET_PRB_RSP_LIMIT = 17, +}; + +/* Define actions and types for CMD_MESH_CONFIG */ +enum cmd_mesh_config_actions { + CMD_ACT_MESH_CONFIG_STOP = 0, + CMD_ACT_MESH_CONFIG_START, + CMD_ACT_MESH_CONFIG_SET, + CMD_ACT_MESH_CONFIG_GET, +}; + +enum cmd_mesh_config_types { + CMD_TYPE_MESH_SET_BOOTFLAG = 1, + CMD_TYPE_MESH_SET_BOOTTIME, + CMD_TYPE_MESH_SET_DEF_CHANNEL, + CMD_TYPE_MESH_SET_MESH_IE, + CMD_TYPE_MESH_GET_DEFAULTS, + CMD_TYPE_MESH_GET_MESH_IE, /* GET_DEFAULTS is superset of GET_MESHIE */ +}; + +/** Card Event definition */ +#define MACREG_INT_CODE_TX_PPA_FREE 0 +#define MACREG_INT_CODE_TX_DMA_DONE 1 +#define MACREG_INT_CODE_LINK_LOST_W_SCAN 2 +#define MACREG_INT_CODE_LINK_LOST_NO_SCAN 3 +#define MACREG_INT_CODE_LINK_SENSED 4 +#define MACREG_INT_CODE_CMD_FINISHED 5 +#define MACREG_INT_CODE_MIB_CHANGED 6 +#define MACREG_INT_CODE_INIT_DONE 7 +#define MACREG_INT_CODE_DEAUTHENTICATED 8 +#define MACREG_INT_CODE_DISASSOCIATED 9 +#define MACREG_INT_CODE_PS_AWAKE 10 +#define MACREG_INT_CODE_PS_SLEEP 11 +#define MACREG_INT_CODE_MIC_ERR_MULTICAST 13 +#define MACREG_INT_CODE_MIC_ERR_UNICAST 14 +#define MACREG_INT_CODE_WM_AWAKE 15 +#define MACREG_INT_CODE_DEEP_SLEEP_AWAKE 16 +#define MACREG_INT_CODE_ADHOC_BCN_LOST 17 +#define MACREG_INT_CODE_HOST_AWAKE 18 +#define MACREG_INT_CODE_STOP_TX 19 +#define MACREG_INT_CODE_START_TX 20 +#define MACREG_INT_CODE_CHANNEL_SWITCH 21 +#define MACREG_INT_CODE_MEASUREMENT_RDY 22 +#define MACREG_INT_CODE_WMM_CHANGE 23 +#define MACREG_INT_CODE_BG_SCAN_REPORT 24 +#define MACREG_INT_CODE_RSSI_LOW 25 +#define MACREG_INT_CODE_SNR_LOW 26 +#define MACREG_INT_CODE_MAX_FAIL 27 +#define MACREG_INT_CODE_RSSI_HIGH 28 +#define MACREG_INT_CODE_SNR_HIGH 29 +#define MACREG_INT_CODE_MESH_AUTO_STARTED 35 +#define MACREG_INT_CODE_FIRMWARE_READY 48 + + +/* 802.11-related definitions */ + +/* TxPD descriptor */ +struct txpd { + /* union to cope up with later FW revisions */ + union { + /* Current Tx packet status */ + __le32 tx_status; + struct { + /* BSS type: client, AP, etc. */ + u8 bss_type; + /* BSS number */ + u8 bss_num; + /* Reserved */ + __le16 reserved; + } bss; + } u; + /* Tx control */ + __le32 tx_control; + __le32 tx_packet_location; + /* Tx packet length */ + __le16 tx_packet_length; + /* First 2 byte of destination MAC address */ + u8 tx_dest_addr_high[2]; + /* Last 4 byte of destination MAC address */ + u8 tx_dest_addr_low[4]; + /* Pkt Priority */ + u8 priority; + /* Pkt Trasnit Power control */ + u8 powermgmt; + /* Amount of time the packet has been queued (units = 2ms) */ + u8 pktdelay_2ms; + /* reserved */ + u8 reserved1; +} __attribute__ ((packed)); + +/* RxPD Descriptor */ +struct rxpd { + /* union to cope up with later FW revisions */ + union { + /* Current Rx packet status */ + __le16 status; + struct { + /* BSS type: client, AP, etc. */ + u8 bss_type; + /* BSS number */ + u8 bss_num; + } __attribute__ ((packed)) bss; + } __attribute__ ((packed)) u; + + /* SNR */ + u8 snr; + + /* Tx control */ + u8 rx_control; + + /* Pkt length */ + __le16 pkt_len; + + /* Noise Floor */ + u8 nf; + + /* Rx Packet Rate */ + u8 rx_rate; + + /* Pkt addr */ + __le32 pkt_ptr; + + /* Next Rx RxPD addr */ + __le32 next_rxpd_ptr; + + /* Pkt Priority */ + u8 priority; + u8 reserved[3]; +} __attribute__ ((packed)); + +struct cmd_header { + __le16 command; + __le16 size; + __le16 seqnum; + __le16 result; +} __attribute__ ((packed)); + +/* Generic structure to hold all key types. */ +struct enc_key { + u16 len; + u16 flags; /* KEY_INFO_* from defs.h */ + u16 type; /* KEY_TYPE_* from defs.h */ + u8 key[32]; +}; + +/* lbs_offset_value */ +struct lbs_offset_value { + u32 offset; + u32 value; +} __attribute__ ((packed)); + +/* + * Define data structure for CMD_GET_HW_SPEC + * This structure defines the response for the GET_HW_SPEC command + */ +struct cmd_ds_get_hw_spec { + struct cmd_header hdr; + + /* HW Interface version number */ + __le16 hwifversion; + /* HW version number */ + __le16 version; + /* Max number of TxPD FW can handle */ + __le16 nr_txpd; + /* Max no of Multicast address */ + __le16 nr_mcast_adr; + /* MAC address */ + u8 permanentaddr[6]; + + /* region Code */ + __le16 regioncode; + + /* Number of antenna used */ + __le16 nr_antenna; + + /* FW release number, example 0x01030304 = 2.3.4p1 */ + __le32 fwrelease; + + /* Base Address of TxPD queue */ + __le32 wcb_base; + /* Read Pointer of RxPd queue */ + __le32 rxpd_rdptr; + + /* Write Pointer of RxPd queue */ + __le32 rxpd_wrptr; + + /*FW/HW capability */ + __le32 fwcapinfo; +} __attribute__ ((packed)); + +struct cmd_ds_802_11_subscribe_event { + struct cmd_header hdr; + + __le16 action; + __le16 events; + + /* A TLV to the CMD_802_11_SUBSCRIBE_EVENT command can contain a + * number of TLVs. From the v5.1 manual, those TLVs would add up to + * 40 bytes. However, future firmware might add additional TLVs, so I + * bump this up a bit. + */ + uint8_t tlv[128]; +} __attribute__ ((packed)); + +/* + * This scan handle Country Information IE(802.11d compliant) + * Define data structure for CMD_802_11_SCAN + */ +struct cmd_ds_802_11_scan { + struct cmd_header hdr; + + uint8_t bsstype; + uint8_t bssid[ETH_ALEN]; + uint8_t tlvbuffer[0]; +} __attribute__ ((packed)); + +struct cmd_ds_802_11_scan_rsp { + struct cmd_header hdr; + + __le16 bssdescriptsize; + uint8_t nr_sets; + uint8_t bssdesc_and_tlvbuffer[0]; +} __attribute__ ((packed)); + +struct cmd_ds_802_11_get_log { + struct cmd_header hdr; + + __le32 mcasttxframe; + __le32 failed; + __le32 retry; + __le32 multiretry; + __le32 framedup; + __le32 rtssuccess; + __le32 rtsfailure; + __le32 ackfailure; + __le32 rxfrag; + __le32 mcastrxframe; + __le32 fcserror; + __le32 txframe; + __le32 wepundecryptable; +} __attribute__ ((packed)); + +struct cmd_ds_mac_control { + struct cmd_header hdr; + __le16 action; + u16 reserved; +} __attribute__ ((packed)); + +struct cmd_ds_mac_multicast_adr { + struct cmd_header hdr; + __le16 action; + __le16 nr_of_adrs; + u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE]; +} __attribute__ ((packed)); + +struct cmd_ds_802_11_authenticate { + struct cmd_header hdr; + + u8 bssid[ETH_ALEN]; + u8 authtype; + u8 reserved[10]; +} __attribute__ ((packed)); + +struct cmd_ds_802_11_deauthenticate { + struct cmd_header hdr; + + u8 macaddr[ETH_ALEN]; + __le16 reasoncode; +} __attribute__ ((packed)); + +struct cmd_ds_802_11_associate { + struct cmd_header hdr; + + u8 bssid[6]; + __le16 capability; + __le16 listeninterval; + __le16 bcnperiod; + u8 dtimperiod; + u8 iebuf[512]; /* Enough for required and most optional IEs */ +} __attribute__ ((packed)); + +struct cmd_ds_802_11_associate_response { + struct cmd_header hdr; + + __le16 capability; + __le16 statuscode; + __le16 aid; + u8 iebuf[512]; +} __attribute__ ((packed)); + +struct cmd_ds_802_11_set_wep { + struct cmd_header hdr; + + /* ACT_ADD, ACT_REMOVE or ACT_ENABLE */ + __le16 action; + + /* key Index selected for Tx */ + __le16 keyindex; + + /* 40, 128bit or TXWEP */ + uint8_t keytype[4]; + uint8_t keymaterial[4][16]; +} __attribute__ ((packed)); + +struct cmd_ds_802_11_snmp_mib { + struct cmd_header hdr; + + __le16 action; + __le16 oid; + __le16 bufsize; + u8 value[128]; +} __attribute__ ((packed)); + +struct cmd_ds_mac_reg_access { + __le16 action; + __le16 offset; + __le32 value; +} __attribute__ ((packed)); + +struct cmd_ds_bbp_reg_access { + __le16 action; + __le16 offset; + u8 value; + u8 reserved[3]; +} __attribute__ ((packed)); + +struct cmd_ds_rf_reg_access { + __le16 action; + __le16 offset; + u8 value; + u8 reserved[3]; +} __attribute__ ((packed)); + +struct cmd_ds_802_11_radio_control { + struct cmd_header hdr; + + __le16 action; + __le16 control; +} __attribute__ ((packed)); + +struct cmd_ds_802_11_beacon_control { + __le16 action; + __le16 beacon_enable; + __le16 beacon_period; +} __attribute__ ((packed)); + +struct cmd_ds_802_11_sleep_params { + struct cmd_header hdr; + + /* ACT_GET/ACT_SET */ + __le16 action; + + /* Sleep clock error in ppm */ + __le16 error; + + /* Wakeup offset in usec */ + __le16 offset; + + /* Clock stabilization time in usec */ + __le16 stabletime; + + /* control periodic calibration */ + uint8_t calcontrol; + + /* control the use of external sleep clock */ + uint8_t externalsleepclk; + + /* reserved field, should be set to zero */ + __le16 reserved; +} __attribute__ ((packed)); + +struct cmd_ds_802_11_rf_channel { + struct cmd_header hdr; + + __le16 action; + __le16 channel; + __le16 rftype; /* unused */ + __le16 reserved; /* unused */ + u8 channellist[32]; /* unused */ +} __attribute__ ((packed)); + +struct cmd_ds_802_11_rssi { + /* weighting factor */ + __le16 N; + + __le16 reserved_0; + __le16 reserved_1; + __le16 reserved_2; +} __attribute__ ((packed)); + +struct cmd_ds_802_11_rssi_rsp { + __le16 SNR; + __le16 noisefloor; + __le16 avgSNR; + __le16 avgnoisefloor; +} __attribute__ ((packed)); + +struct cmd_ds_802_11_mac_address { + struct cmd_header hdr; + + __le16 action; + u8 macadd[ETH_ALEN]; +} __attribute__ ((packed)); + +struct cmd_ds_802_11_rf_tx_power { + struct cmd_header hdr; + + __le16 action; + __le16 curlevel; + s8 maxlevel; + s8 minlevel; +} __attribute__ ((packed)); + +struct cmd_ds_802_11_monitor_mode { + __le16 action; + __le16 mode; +} __attribute__ ((packed)); + +struct cmd_ds_set_boot2_ver { + struct cmd_header hdr; + + __le16 action; + __le16 version; +} __attribute__ ((packed)); + +struct cmd_ds_802_11_fw_wake_method { + struct cmd_header hdr; + + __le16 action; + __le16 method; +} __attribute__ ((packed)); + +struct cmd_ds_802_11_ps_mode { + __le16 action; + __le16 nullpktinterval; + __le16 multipledtim; + __le16 reserved; + __le16 locallisteninterval; +} __attribute__ ((packed)); + +struct cmd_confirm_sleep { + struct cmd_header hdr; + + __le16 action; + __le16 nullpktinterval; + __le16 multipledtim; + __le16 reserved; + __le16 locallisteninterval; +} __attribute__ ((packed)); + +struct cmd_ds_802_11_data_rate { + struct cmd_header hdr; + + __le16 action; + __le16 reserved; + u8 rates[MAX_RATES]; +} __attribute__ ((packed)); + +struct cmd_ds_802_11_rate_adapt_rateset { + struct cmd_header hdr; + __le16 action; + __le16 enablehwauto; + __le16 bitmap; +} __attribute__ ((packed)); + +struct cmd_ds_802_11_ad_hoc_start { + struct cmd_header hdr; + + u8 ssid[IEEE80211_MAX_SSID_LEN]; + u8 bsstype; + __le16 beaconperiod; + u8 dtimperiod; /* Reserved on v9 and later */ + struct ieee_ie_ibss_param_set ibss; + u8 reserved1[4]; + struct ieee_ie_ds_param_set ds; + u8 reserved2[4]; + __le16 probedelay; /* Reserved on v9 and later */ + __le16 capability; + u8 rates[MAX_RATES]; + u8 tlv_memory_size_pad[100]; +} __attribute__ ((packed)); + +struct cmd_ds_802_11_ad_hoc_result { + struct cmd_header hdr; + + u8 pad[3]; + u8 bssid[ETH_ALEN]; +} __attribute__ ((packed)); + +struct adhoc_bssdesc { + u8 bssid[ETH_ALEN]; + u8 ssid[IEEE80211_MAX_SSID_LEN]; + u8 type; + __le16 beaconperiod; + u8 dtimperiod; + __le64 timestamp; + __le64 localtime; + struct ieee_ie_ds_param_set ds; + u8 reserved1[4]; + struct ieee_ie_ibss_param_set ibss; + u8 reserved2[4]; + __le16 capability; + u8 rates[MAX_RATES]; + + /* DO NOT ADD ANY FIELDS TO THIS STRUCTURE. It is used below in the + * Adhoc join command and will cause a binary layout mismatch with + * the firmware + */ +} __attribute__ ((packed)); + +struct cmd_ds_802_11_ad_hoc_join { + struct cmd_header hdr; + + struct adhoc_bssdesc bss; + __le16 failtimeout; /* Reserved on v9 and later */ + __le16 probedelay; /* Reserved on v9 and later */ +} __attribute__ ((packed)); + +struct cmd_ds_802_11_ad_hoc_stop { + struct cmd_header hdr; +} __attribute__ ((packed)); + +struct cmd_ds_802_11_enable_rsn { + struct cmd_header hdr; + + __le16 action; + __le16 enable; +} __attribute__ ((packed)); + +struct MrvlIEtype_keyParamSet { + /* type ID */ + __le16 type; + + /* length of Payload */ + __le16 length; + + /* type of key: WEP=0, TKIP=1, AES=2 */ + __le16 keytypeid; + + /* key control Info specific to a keytypeid */ + __le16 keyinfo; + + /* length of key */ + __le16 keylen; + + /* key material of size keylen */ + u8 key[32]; +} __attribute__ ((packed)); + +#define MAX_WOL_RULES 16 + +struct host_wol_rule { + uint8_t rule_no; + uint8_t rule_ops; + __le16 sig_offset; + __le16 sig_length; + __le16 reserve; + __be32 sig_mask; + __be32 signature; +} __attribute__ ((packed)); + +struct wol_config { + uint8_t action; + uint8_t pattern; + uint8_t no_rules_in_cmd; + uint8_t result; + struct host_wol_rule rule[MAX_WOL_RULES]; +} __attribute__ ((packed)); + +struct cmd_ds_host_sleep { + struct cmd_header hdr; + __le32 criteria; + uint8_t gpio; + uint16_t gap; + struct wol_config wol_conf; +} __attribute__ ((packed)); + + + +struct cmd_ds_802_11_key_material { + struct cmd_header hdr; + + __le16 action; + struct MrvlIEtype_keyParamSet keyParamSet[2]; +} __attribute__ ((packed)); + +struct cmd_ds_802_11_eeprom_access { + struct cmd_header hdr; + __le16 action; + __le16 offset; + __le16 len; + /* firmware says it returns a maximum of 20 bytes */ +#define LBS_EEPROM_READ_LEN 20 + u8 value[LBS_EEPROM_READ_LEN]; +} __attribute__ ((packed)); + +struct cmd_ds_802_11_tpc_cfg { + struct cmd_header hdr; + + __le16 action; + uint8_t enable; + int8_t P0; + int8_t P1; + int8_t P2; + uint8_t usesnr; +} __attribute__ ((packed)); + + +struct cmd_ds_802_11_pa_cfg { + struct cmd_header hdr; + + __le16 action; + uint8_t enable; + int8_t P0; + int8_t P1; + int8_t P2; +} __attribute__ ((packed)); + + +struct cmd_ds_802_11_led_ctrl { + __le16 action; + __le16 numled; + u8 data[256]; +} __attribute__ ((packed)); + +struct cmd_ds_802_11_afc { + __le16 afc_auto; + union { + struct { + __le16 threshold; + __le16 period; + }; + struct { + __le16 timing_offset; /* signed */ + __le16 carrier_offset; /* signed */ + }; + }; +} __attribute__ ((packed)); + +struct cmd_tx_rate_query { + __le16 txrate; +} __attribute__ ((packed)); + +struct cmd_ds_get_tsf { + __le64 tsfvalue; +} __attribute__ ((packed)); + +struct cmd_ds_bt_access { + __le16 action; + __le32 id; + u8 addr1[ETH_ALEN]; + u8 addr2[ETH_ALEN]; +} __attribute__ ((packed)); + +struct cmd_ds_fwt_access { + __le16 action; + __le32 id; + u8 valid; + u8 da[ETH_ALEN]; + u8 dir; + u8 ra[ETH_ALEN]; + __le32 ssn; + __le32 dsn; + __le32 metric; + u8 rate; + u8 hopcount; + u8 ttl; + __le32 expiration; + u8 sleepmode; + __le32 snr; + __le32 references; + u8 prec[ETH_ALEN]; +} __attribute__ ((packed)); + +struct cmd_ds_mesh_config { + struct cmd_header hdr; + + __le16 action; + __le16 channel; + __le16 type; + __le16 length; + u8 data[128]; /* last position reserved */ +} __attribute__ ((packed)); + +struct cmd_ds_mesh_access { + struct cmd_header hdr; + + __le16 action; + __le32 data[32]; /* last position reserved */ +} __attribute__ ((packed)); + +/* Number of stats counters returned by the firmware */ +#define MESH_STATS_NUM 8 + +struct cmd_ds_command { + /* command header */ + __le16 command; + __le16 size; + __le16 seqnum; + __le16 result; + + /* command Body */ + union { + struct cmd_ds_802_11_ps_mode psmode; + struct cmd_ds_802_11_monitor_mode monitor; + struct cmd_ds_802_11_rssi rssi; + struct cmd_ds_802_11_rssi_rsp rssirsp; + struct cmd_ds_mac_reg_access macreg; + struct cmd_ds_bbp_reg_access bbpreg; + struct cmd_ds_rf_reg_access rfreg; + + struct cmd_ds_802_11_tpc_cfg tpccfg; + struct cmd_ds_802_11_afc afc; + struct cmd_ds_802_11_led_ctrl ledgpio; + + struct cmd_ds_bt_access bt; + struct cmd_ds_fwt_access fwt; + struct cmd_ds_802_11_beacon_control bcn_ctrl; + } params; +} __attribute__ ((packed)); + +#endif diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c new file mode 100644 index 0000000..1f6cb58 --- /dev/null +++ b/drivers/net/wireless/libertas/if_cs.c @@ -0,0 +1,1042 @@ +/* + + Driver for the Marvell 8385 based compact flash WLAN cards. + + (C) 2007 by Holger Schurig + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +#define DRV_NAME "libertas_cs" + +#include "decl.h" +#include "defs.h" +#include "dev.h" + + +/********************************************************************/ +/* Module stuff */ +/********************************************************************/ + +MODULE_AUTHOR("Holger Schurig "); +MODULE_DESCRIPTION("Driver for Marvell 83xx compact flash WLAN cards"); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE("libertas_cs_helper.fw"); + + + +/********************************************************************/ +/* Data structures */ +/********************************************************************/ + +struct if_cs_card { + struct pcmcia_device *p_dev; + struct lbs_private *priv; + void __iomem *iobase; + bool align_regs; +}; + + + +/********************************************************************/ +/* Hardware access */ +/********************************************************************/ + +/* This define enables wrapper functions which allow you + to dump all register accesses. You normally won't this, + except for development */ +/* #define DEBUG_IO */ + +#ifdef DEBUG_IO +static int debug_output = 0; +#else +/* This way the compiler optimizes the printk's away */ +#define debug_output 0 +#endif + +static inline unsigned int if_cs_read8(struct if_cs_card *card, uint reg) +{ + unsigned int val = ioread8(card->iobase + reg); + if (debug_output) + printk(KERN_INFO "inb %08x<%02x\n", reg, val); + return val; +} +static inline unsigned int if_cs_read16(struct if_cs_card *card, uint reg) +{ + unsigned int val = ioread16(card->iobase + reg); + if (debug_output) + printk(KERN_INFO "inw %08x<%04x\n", reg, val); + return val; +} +static inline void if_cs_read16_rep( + struct if_cs_card *card, + uint reg, + void *buf, + unsigned long count) +{ + if (debug_output) + printk(KERN_INFO "insw %08x<(0x%lx words)\n", + reg, count); + ioread16_rep(card->iobase + reg, buf, count); +} + +static inline void if_cs_write8(struct if_cs_card *card, uint reg, u8 val) +{ + if (debug_output) + printk(KERN_INFO "outb %08x>%02x\n", reg, val); + iowrite8(val, card->iobase + reg); +} + +static inline void if_cs_write16(struct if_cs_card *card, uint reg, u16 val) +{ + if (debug_output) + printk(KERN_INFO "outw %08x>%04x\n", reg, val); + iowrite16(val, card->iobase + reg); +} + +static inline void if_cs_write16_rep( + struct if_cs_card *card, + uint reg, + const void *buf, + unsigned long count) +{ + if (debug_output) + printk(KERN_INFO "outsw %08x>(0x%lx words)\n", + reg, count); + iowrite16_rep(card->iobase + reg, buf, count); +} + + +/* + * I know that polling/delaying is frowned upon. However, this procedure + * with polling is needed while downloading the firmware. At this stage, + * the hardware does unfortunately not create any interrupts. + * + * Fortunately, this function is never used once the firmware is in + * the card. :-) + * + * As a reference, see the "Firmware Specification v5.1", page 18 + * and 19. I did not follow their suggested timing to the word, + * but this works nice & fast anyway. + */ +static int if_cs_poll_while_fw_download(struct if_cs_card *card, uint addr, u8 reg) +{ + int i; + + for (i = 0; i < 100000; i++) { + u8 val = if_cs_read8(card, addr); + if (val == reg) + return 0; + udelay(5); + } + return -ETIME; +} + + + +/* + * First the bitmasks for the host/card interrupt/status registers: + */ +#define IF_CS_BIT_TX 0x0001 +#define IF_CS_BIT_RX 0x0002 +#define IF_CS_BIT_COMMAND 0x0004 +#define IF_CS_BIT_RESP 0x0008 +#define IF_CS_BIT_EVENT 0x0010 +#define IF_CS_BIT_MASK 0x001f + + + +/* + * It's not really clear to me what the host status register is for. It + * needs to be set almost in union with "host int cause". The following + * bits from above are used: + * + * IF_CS_BIT_TX driver downloaded a data packet + * IF_CS_BIT_RX driver got a data packet + * IF_CS_BIT_COMMAND driver downloaded a command + * IF_CS_BIT_RESP not used (has some meaning with powerdown) + * IF_CS_BIT_EVENT driver read a host event + */ +#define IF_CS_HOST_STATUS 0x00000000 + +/* + * With the host int cause register can the host (that is, Linux) cause + * an interrupt in the firmware, to tell the firmware about those events: + * + * IF_CS_BIT_TX a data packet has been downloaded + * IF_CS_BIT_RX a received data packet has retrieved + * IF_CS_BIT_COMMAND a firmware block or a command has been downloaded + * IF_CS_BIT_RESP not used (has some meaning with powerdown) + * IF_CS_BIT_EVENT a host event (link lost etc) has been retrieved + */ +#define IF_CS_HOST_INT_CAUSE 0x00000002 + +/* + * The host int mask register is used to enable/disable interrupt. However, + * I have the suspicion that disabled interrupts are lost. + */ +#define IF_CS_HOST_INT_MASK 0x00000004 + +/* + * Used to send or receive data packets: + */ +#define IF_CS_WRITE 0x00000016 +#define IF_CS_WRITE_LEN 0x00000014 +#define IF_CS_READ 0x00000010 +#define IF_CS_READ_LEN 0x00000024 + +/* + * Used to send commands (and to send firmware block) and to + * receive command responses: + */ +#define IF_CS_CMD 0x0000001A +#define IF_CS_CMD_LEN 0x00000018 +#define IF_CS_RESP 0x00000012 +#define IF_CS_RESP_LEN 0x00000030 + +/* + * The card status registers shows what the card/firmware actually + * accepts: + * + * IF_CS_BIT_TX you may send a data packet + * IF_CS_BIT_RX you may retrieve a data packet + * IF_CS_BIT_COMMAND you may send a command + * IF_CS_BIT_RESP you may retrieve a command response + * IF_CS_BIT_EVENT the card has a event for use (link lost, snr low etc) + * + * When reading this register several times, you will get back the same + * results --- with one exception: the IF_CS_BIT_EVENT clear itself + * automatically. + * + * Not that we don't rely on BIT_RX,_BIT_RESP or BIT_EVENT because + * we handle this via the card int cause register. + */ +#define IF_CS_CARD_STATUS 0x00000020 +#define IF_CS_CARD_STATUS_MASK 0x7f00 + +/* + * The card int cause register is used by the card/firmware to notify us + * about the following events: + * + * IF_CS_BIT_TX a data packet has successfully been sentx + * IF_CS_BIT_RX a data packet has been received and can be retrieved + * IF_CS_BIT_COMMAND not used + * IF_CS_BIT_RESP the firmware has a command response for us + * IF_CS_BIT_EVENT the card has a event for use (link lost, snr low etc) + */ +#define IF_CS_CARD_INT_CAUSE 0x00000022 + +/* + * This is used to for handshaking with the card's bootloader/helper image + * to synchronize downloading of firmware blocks. + */ +#define IF_CS_SQ_READ_LOW 0x00000028 +#define IF_CS_SQ_HELPER_OK 0x10 + +/* + * The scratch register tells us ... + * + * IF_CS_SCRATCH_BOOT_OK the bootloader runs + * IF_CS_SCRATCH_HELPER_OK the helper firmware already runs + */ +#define IF_CS_SCRATCH 0x0000003F +#define IF_CS_SCRATCH_BOOT_OK 0x00 +#define IF_CS_SCRATCH_HELPER_OK 0x5a + +/* + * Used to detect ancient chips: + */ +#define IF_CS_PRODUCT_ID 0x0000001C +#define IF_CS_CF8385_B1_REV 0x12 +#define IF_CS_CF8381_B3_REV 0x04 +#define IF_CS_CF8305_B1_REV 0x03 + +/* + * Used to detect other cards than CF8385 since their revisions of silicon + * doesn't match those from CF8385, eg. CF8381 B3 works with this driver. + */ +#define CF8305_MANFID 0x02db +#define CF8305_CARDID 0x8103 +#define CF8381_MANFID 0x02db +#define CF8381_CARDID 0x6064 +#define CF8385_MANFID 0x02df +#define CF8385_CARDID 0x8103 + +static inline int if_cs_hw_is_cf8305(struct pcmcia_device *p_dev) +{ + return (p_dev->manf_id == CF8305_MANFID && + p_dev->card_id == CF8305_CARDID); +} + +static inline int if_cs_hw_is_cf8381(struct pcmcia_device *p_dev) +{ + return (p_dev->manf_id == CF8381_MANFID && + p_dev->card_id == CF8381_CARDID); +} + +static inline int if_cs_hw_is_cf8385(struct pcmcia_device *p_dev) +{ + return (p_dev->manf_id == CF8385_MANFID && + p_dev->card_id == CF8385_CARDID); +} + +/********************************************************************/ +/* I/O and interrupt handling */ +/********************************************************************/ + +static inline void if_cs_enable_ints(struct if_cs_card *card) +{ + lbs_deb_enter(LBS_DEB_CS); + if_cs_write16(card, IF_CS_HOST_INT_MASK, 0); +} + +static inline void if_cs_disable_ints(struct if_cs_card *card) +{ + lbs_deb_enter(LBS_DEB_CS); + if_cs_write16(card, IF_CS_HOST_INT_MASK, IF_CS_BIT_MASK); +} + +/* + * Called from if_cs_host_to_card to send a command to the hardware + */ +static int if_cs_send_cmd(struct lbs_private *priv, u8 *buf, u16 nb) +{ + struct if_cs_card *card = (struct if_cs_card *)priv->card; + int ret = -1; + int loops = 0; + + lbs_deb_enter(LBS_DEB_CS); + if_cs_disable_ints(card); + + /* Is hardware ready? */ + while (1) { + u16 status = if_cs_read16(card, IF_CS_CARD_STATUS); + if (status & IF_CS_BIT_COMMAND) + break; + if (++loops > 100) { + lbs_pr_err("card not ready for commands\n"); + goto done; + } + mdelay(1); + } + + if_cs_write16(card, IF_CS_CMD_LEN, nb); + + if_cs_write16_rep(card, IF_CS_CMD, buf, nb / 2); + /* Are we supposed to transfer an odd amount of bytes? */ + if (nb & 1) + if_cs_write8(card, IF_CS_CMD, buf[nb-1]); + + /* "Assert the download over interrupt command in the Host + * status register" */ + if_cs_write16(card, IF_CS_HOST_STATUS, IF_CS_BIT_COMMAND); + + /* "Assert the download over interrupt command in the Card + * interrupt case register" */ + if_cs_write16(card, IF_CS_HOST_INT_CAUSE, IF_CS_BIT_COMMAND); + ret = 0; + +done: + if_cs_enable_ints(card); + lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret); + return ret; +} + +/* + * Called from if_cs_host_to_card to send a data to the hardware + */ +static void if_cs_send_data(struct lbs_private *priv, u8 *buf, u16 nb) +{ + struct if_cs_card *card = (struct if_cs_card *)priv->card; + u16 status; + + lbs_deb_enter(LBS_DEB_CS); + if_cs_disable_ints(card); + + status = if_cs_read16(card, IF_CS_CARD_STATUS); + BUG_ON((status & IF_CS_BIT_TX) == 0); + + if_cs_write16(card, IF_CS_WRITE_LEN, nb); + + /* write even number of bytes, then odd byte if necessary */ + if_cs_write16_rep(card, IF_CS_WRITE, buf, nb / 2); + if (nb & 1) + if_cs_write8(card, IF_CS_WRITE, buf[nb-1]); + + if_cs_write16(card, IF_CS_HOST_STATUS, IF_CS_BIT_TX); + if_cs_write16(card, IF_CS_HOST_INT_CAUSE, IF_CS_BIT_TX); + if_cs_enable_ints(card); + + lbs_deb_leave(LBS_DEB_CS); +} + +/* + * Get the command result out of the card. + */ +static int if_cs_receive_cmdres(struct lbs_private *priv, u8 *data, u32 *len) +{ + unsigned long flags; + int ret = -1; + u16 status; + + lbs_deb_enter(LBS_DEB_CS); + + /* is hardware ready? */ + status = if_cs_read16(priv->card, IF_CS_CARD_STATUS); + if ((status & IF_CS_BIT_RESP) == 0) { + lbs_pr_err("no cmd response in card\n"); + *len = 0; + goto out; + } + + *len = if_cs_read16(priv->card, IF_CS_RESP_LEN); + if ((*len == 0) || (*len > LBS_CMD_BUFFER_SIZE)) { + lbs_pr_err("card cmd buffer has invalid # of bytes (%d)\n", *len); + goto out; + } + + /* read even number of bytes, then odd byte if necessary */ + if_cs_read16_rep(priv->card, IF_CS_RESP, data, *len/sizeof(u16)); + if (*len & 1) + data[*len-1] = if_cs_read8(priv->card, IF_CS_RESP); + + /* This is a workaround for a firmware that reports too much + * bytes */ + *len -= 8; + ret = 0; + + /* Clear this flag again */ + spin_lock_irqsave(&priv->driver_lock, flags); + priv->dnld_sent = DNLD_RES_RECEIVED; + spin_unlock_irqrestore(&priv->driver_lock, flags); + +out: + lbs_deb_leave_args(LBS_DEB_CS, "ret %d, len %d", ret, *len); + return ret; +} + +static struct sk_buff *if_cs_receive_data(struct lbs_private *priv) +{ + struct sk_buff *skb = NULL; + u16 len; + u8 *data; + + lbs_deb_enter(LBS_DEB_CS); + + len = if_cs_read16(priv->card, IF_CS_READ_LEN); + if (len == 0 || len > MRVDRV_ETH_RX_PACKET_BUFFER_SIZE) { + lbs_pr_err("card data buffer has invalid # of bytes (%d)\n", len); + priv->dev->stats.rx_dropped++; + goto dat_err; + } + + skb = dev_alloc_skb(MRVDRV_ETH_RX_PACKET_BUFFER_SIZE + 2); + if (!skb) + goto out; + skb_put(skb, len); + skb_reserve(skb, 2);/* 16 byte align */ + data = skb->data; + + /* read even number of bytes, then odd byte if necessary */ + if_cs_read16_rep(priv->card, IF_CS_READ, data, len/sizeof(u16)); + if (len & 1) + data[len-1] = if_cs_read8(priv->card, IF_CS_READ); + +dat_err: + if_cs_write16(priv->card, IF_CS_HOST_STATUS, IF_CS_BIT_RX); + if_cs_write16(priv->card, IF_CS_HOST_INT_CAUSE, IF_CS_BIT_RX); + +out: + lbs_deb_leave_args(LBS_DEB_CS, "ret %p", skb); + return skb; +} + +static irqreturn_t if_cs_interrupt(int irq, void *data) +{ + struct if_cs_card *card = data; + struct lbs_private *priv = card->priv; + u16 cause; + + lbs_deb_enter(LBS_DEB_CS); + + /* Ask card interrupt cause register if there is something for us */ + cause = if_cs_read16(card, IF_CS_CARD_INT_CAUSE); + lbs_deb_cs("cause 0x%04x\n", cause); + + if (cause == 0) { + /* Not for us */ + return IRQ_NONE; + } + + if (cause == 0xffff) { + /* Read in junk, the card has probably been removed */ + card->priv->surpriseremoved = 1; + return IRQ_HANDLED; + } + + if (cause & IF_CS_BIT_RX) { + struct sk_buff *skb; + lbs_deb_cs("rx packet\n"); + skb = if_cs_receive_data(priv); + if (skb) + lbs_process_rxed_packet(priv, skb); + } + + if (cause & IF_CS_BIT_TX) { + lbs_deb_cs("tx done\n"); + lbs_host_to_card_done(priv); + } + + if (cause & IF_CS_BIT_RESP) { + unsigned long flags; + u8 i; + + lbs_deb_cs("cmd resp\n"); + spin_lock_irqsave(&priv->driver_lock, flags); + i = (priv->resp_idx == 0) ? 1 : 0; + spin_unlock_irqrestore(&priv->driver_lock, flags); + + BUG_ON(priv->resp_len[i]); + if_cs_receive_cmdres(priv, priv->resp_buf[i], + &priv->resp_len[i]); + + spin_lock_irqsave(&priv->driver_lock, flags); + lbs_notify_command_response(priv, i); + spin_unlock_irqrestore(&priv->driver_lock, flags); + } + + if (cause & IF_CS_BIT_EVENT) { + u16 status = if_cs_read16(priv->card, IF_CS_CARD_STATUS); + if_cs_write16(priv->card, IF_CS_HOST_INT_CAUSE, + IF_CS_BIT_EVENT); + lbs_queue_event(priv, (status & IF_CS_CARD_STATUS_MASK) >> 8); + } + + /* Clear interrupt cause */ + if_cs_write16(card, IF_CS_CARD_INT_CAUSE, cause & IF_CS_BIT_MASK); + + lbs_deb_leave(LBS_DEB_CS); + return IRQ_HANDLED; +} + + + + +/********************************************************************/ +/* Firmware */ +/********************************************************************/ + +/* + * Tries to program the helper firmware. + * + * Return 0 on success + */ +static int if_cs_prog_helper(struct if_cs_card *card) +{ + int ret = 0; + int sent = 0; + u8 scratch; + const struct firmware *fw; + + lbs_deb_enter(LBS_DEB_CS); + + /* + * This is the only place where an unaligned register access happens on + * the CF8305 card, therefore for the sake of speed of the driver, we do + * the alignment correction here. + */ + if (card->align_regs) + scratch = if_cs_read16(card, IF_CS_SCRATCH) >> 8; + else + scratch = if_cs_read8(card, IF_CS_SCRATCH); + + /* "If the value is 0x5a, the firmware is already + * downloaded successfully" + */ + if (scratch == IF_CS_SCRATCH_HELPER_OK) + goto done; + + /* "If the value is != 00, it is invalid value of register */ + if (scratch != IF_CS_SCRATCH_BOOT_OK) { + ret = -ENODEV; + goto done; + } + + /* TODO: make firmware file configurable */ + ret = request_firmware(&fw, "libertas_cs_helper.fw", + &card->p_dev->dev); + if (ret) { + lbs_pr_err("can't load helper firmware\n"); + ret = -ENODEV; + goto done; + } + lbs_deb_cs("helper size %td\n", fw->size); + + /* "Set the 5 bytes of the helper image to 0" */ + /* Not needed, this contains an ARM branch instruction */ + + for (;;) { + /* "the number of bytes to send is 256" */ + int count = 256; + int remain = fw->size - sent; + + if (remain < count) + count = remain; + + /* "write the number of bytes to be sent to the I/O Command + * write length register" */ + if_cs_write16(card, IF_CS_CMD_LEN, count); + + /* "write this to I/O Command port register as 16 bit writes */ + if (count) + if_cs_write16_rep(card, IF_CS_CMD, + &fw->data[sent], + count >> 1); + + /* "Assert the download over interrupt command in the Host + * status register" */ + if_cs_write8(card, IF_CS_HOST_STATUS, IF_CS_BIT_COMMAND); + + /* "Assert the download over interrupt command in the Card + * interrupt case register" */ + if_cs_write16(card, IF_CS_HOST_INT_CAUSE, IF_CS_BIT_COMMAND); + + /* "The host polls the Card Status register ... for 50 ms before + declaring a failure */ + ret = if_cs_poll_while_fw_download(card, IF_CS_CARD_STATUS, + IF_CS_BIT_COMMAND); + if (ret < 0) { + lbs_pr_err("can't download helper at 0x%x, ret %d\n", + sent, ret); + goto err_release; + } + + if (count == 0) + break; + + sent += count; + } + +err_release: + release_firmware(fw); +done: + lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret); + return ret; +} + + +static int if_cs_prog_real(struct if_cs_card *card) +{ + const struct firmware *fw; + int ret = 0; + int retry = 0; + int len = 0; + int sent; + + lbs_deb_enter(LBS_DEB_CS); + + /* TODO: make firmware file configurable */ + ret = request_firmware(&fw, "libertas_cs.fw", + &card->p_dev->dev); + if (ret) { + lbs_pr_err("can't load firmware\n"); + ret = -ENODEV; + goto done; + } + lbs_deb_cs("fw size %td\n", fw->size); + + ret = if_cs_poll_while_fw_download(card, IF_CS_SQ_READ_LOW, + IF_CS_SQ_HELPER_OK); + if (ret < 0) { + lbs_pr_err("helper firmware doesn't answer\n"); + goto err_release; + } + + for (sent = 0; sent < fw->size; sent += len) { + len = if_cs_read16(card, IF_CS_SQ_READ_LOW); + if (len & 1) { + retry++; + lbs_pr_info("odd, need to retry this firmware block\n"); + } else { + retry = 0; + } + + if (retry > 20) { + lbs_pr_err("could not download firmware\n"); + ret = -ENODEV; + goto err_release; + } + if (retry) { + sent -= len; + } + + + if_cs_write16(card, IF_CS_CMD_LEN, len); + + if_cs_write16_rep(card, IF_CS_CMD, + &fw->data[sent], + (len+1) >> 1); + if_cs_write8(card, IF_CS_HOST_STATUS, IF_CS_BIT_COMMAND); + if_cs_write16(card, IF_CS_HOST_INT_CAUSE, IF_CS_BIT_COMMAND); + + ret = if_cs_poll_while_fw_download(card, IF_CS_CARD_STATUS, + IF_CS_BIT_COMMAND); + if (ret < 0) { + lbs_pr_err("can't download firmware at 0x%x\n", sent); + goto err_release; + } + } + + ret = if_cs_poll_while_fw_download(card, IF_CS_SCRATCH, 0x5a); + if (ret < 0) + lbs_pr_err("firmware download failed\n"); + +err_release: + release_firmware(fw); + +done: + lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret); + return ret; +} + + + +/********************************************************************/ +/* Callback functions for libertas.ko */ +/********************************************************************/ + +/* Send commands or data packets to the card */ +static int if_cs_host_to_card(struct lbs_private *priv, + u8 type, + u8 *buf, + u16 nb) +{ + int ret = -1; + + lbs_deb_enter_args(LBS_DEB_CS, "type %d, bytes %d", type, nb); + + switch (type) { + case MVMS_DAT: + priv->dnld_sent = DNLD_DATA_SENT; + if_cs_send_data(priv, buf, nb); + ret = 0; + break; + case MVMS_CMD: + priv->dnld_sent = DNLD_CMD_SENT; + ret = if_cs_send_cmd(priv, buf, nb); + break; + default: + lbs_pr_err("%s: unsupported type %d\n", __func__, type); + } + + lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret); + return ret; +} + + +/********************************************************************/ +/* Card Services */ +/********************************************************************/ + +/* + * After a card is removed, if_cs_release() will unregister the + * device, and release the PCMCIA configuration. If the device is + * still open, this will be postponed until it is closed. + */ +static void if_cs_release(struct pcmcia_device *p_dev) +{ + struct if_cs_card *card = p_dev->priv; + + lbs_deb_enter(LBS_DEB_CS); + + free_irq(p_dev->irq.AssignedIRQ, card); + pcmcia_disable_device(p_dev); + if (card->iobase) + ioport_unmap(card->iobase); + + lbs_deb_leave(LBS_DEB_CS); +} + + +/* + * This creates an "instance" of the driver, allocating local data + * structures for one device. The device is registered with Card + * Services. + * + * The dev_link structure is initialized, but we don't actually + * configure the card at this point -- we wait until we receive a card + * insertion event. + */ + +static int if_cs_ioprobe(struct pcmcia_device *p_dev, + cistpl_cftable_entry_t *cfg, + cistpl_cftable_entry_t *dflt, + unsigned int vcc, + void *priv_data) +{ + p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; + p_dev->io.BasePort1 = cfg->io.win[0].base; + p_dev->io.NumPorts1 = cfg->io.win[0].len; + + /* Do we need to allocate an interrupt? */ + if (cfg->irq.IRQInfo1) + p_dev->conf.Attributes |= CONF_ENABLE_IRQ; + + /* IO window settings */ + if (cfg->io.nwin != 1) { + lbs_pr_err("wrong CIS (check number of IO windows)\n"); + return -ENODEV; + } + + /* This reserves IO space but doesn't actually enable it */ + return pcmcia_request_io(p_dev, &p_dev->io); +} + +static int if_cs_probe(struct pcmcia_device *p_dev) +{ + int ret = -ENOMEM; + unsigned int prod_id; + struct lbs_private *priv; + struct if_cs_card *card; + + lbs_deb_enter(LBS_DEB_CS); + + card = kzalloc(sizeof(struct if_cs_card), GFP_KERNEL); + if (!card) { + lbs_pr_err("error in kzalloc\n"); + goto out; + } + card->p_dev = p_dev; + p_dev->priv = card; + + p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; + p_dev->irq.Handler = NULL; + + p_dev->conf.Attributes = 0; + p_dev->conf.IntType = INT_MEMORY_AND_IO; + + if (pcmcia_loop_config(p_dev, if_cs_ioprobe, NULL)) { + lbs_pr_err("error in pcmcia_loop_config\n"); + goto out1; + } + + + /* + * Allocate an interrupt line. Note that this does not assign + * a handler to the interrupt, unless the 'Handler' member of + * the irq structure is initialized. + */ + if (p_dev->conf.Attributes & CONF_ENABLE_IRQ) { + ret = pcmcia_request_irq(p_dev, &p_dev->irq); + if (ret) { + lbs_pr_err("error in pcmcia_request_irq\n"); + goto out1; + } + } + + /* Initialize io access */ + card->iobase = ioport_map(p_dev->io.BasePort1, p_dev->io.NumPorts1); + if (!card->iobase) { + lbs_pr_err("error in ioport_map\n"); + ret = -EIO; + goto out1; + } + + /* + * This actually configures the PCMCIA socket -- setting up + * the I/O windows and the interrupt mapping, and putting the + * card and host interface into "Memory and IO" mode. + */ + ret = pcmcia_request_configuration(p_dev, &p_dev->conf); + if (ret) { + lbs_pr_err("error in pcmcia_request_configuration\n"); + goto out2; + } + + /* Finally, report what we've done */ + lbs_deb_cs("irq %d, io 0x%04x-0x%04x\n", + p_dev->irq.AssignedIRQ, p_dev->io.BasePort1, + p_dev->io.BasePort1 + p_dev->io.NumPorts1 - 1); + + /* + * Most of the libertas cards can do unaligned register access, but some + * weird ones can not. That's especially true for the CF8305 card. + */ + card->align_regs = 0; + + /* Check if we have a current silicon */ + prod_id = if_cs_read8(card, IF_CS_PRODUCT_ID); + if (if_cs_hw_is_cf8305(p_dev)) { + card->align_regs = 1; + if (prod_id < IF_CS_CF8305_B1_REV) { + lbs_pr_err("old chips like 8305 rev B3 " + "aren't supported\n"); + ret = -ENODEV; + goto out2; + } + } + + if (if_cs_hw_is_cf8381(p_dev) && prod_id < IF_CS_CF8381_B3_REV) { + lbs_pr_err("old chips like 8381 rev B3 aren't supported\n"); + ret = -ENODEV; + goto out2; + } + + if (if_cs_hw_is_cf8385(p_dev) && prod_id < IF_CS_CF8385_B1_REV) { + lbs_pr_err("old chips like 8385 rev B1 aren't supported\n"); + ret = -ENODEV; + goto out2; + } + + /* Load the firmware early, before calling into libertas.ko */ + ret = if_cs_prog_helper(card); + if (ret == 0 && !if_cs_hw_is_cf8305(p_dev)) + ret = if_cs_prog_real(card); + if (ret) + goto out2; + + /* Make this card known to the libertas driver */ + priv = lbs_add_card(card, &p_dev->dev); + if (!priv) { + ret = -ENOMEM; + goto out2; + } + + /* Finish setting up fields in lbs_private */ + card->priv = priv; + priv->card = card; + priv->hw_host_to_card = if_cs_host_to_card; + priv->enter_deep_sleep = NULL; + priv->exit_deep_sleep = NULL; + priv->reset_deep_sleep_wakeup = NULL; + priv->fw_ready = 1; + + /* Now actually get the IRQ */ + ret = request_irq(p_dev->irq.AssignedIRQ, if_cs_interrupt, + IRQF_SHARED, DRV_NAME, card); + if (ret) { + lbs_pr_err("error in request_irq\n"); + goto out3; + } + + /* Clear any interrupt cause that happend while sending + * firmware/initializing card */ + if_cs_write16(card, IF_CS_CARD_INT_CAUSE, IF_CS_BIT_MASK); + if_cs_enable_ints(card); + + /* And finally bring the card up */ + if (lbs_start_card(priv) != 0) { + lbs_pr_err("could not activate card\n"); + goto out3; + } + + ret = 0; + goto out; + +out3: + lbs_remove_card(priv); +out2: + ioport_unmap(card->iobase); +out1: + pcmcia_disable_device(p_dev); +out: + lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret); + return ret; +} + + +/* + * This deletes a driver "instance". The device is de-registered with + * Card Services. If it has been released, all local data structures + * are freed. Otherwise, the structures will be freed when the device + * is released. + */ +static void if_cs_detach(struct pcmcia_device *p_dev) +{ + struct if_cs_card *card = p_dev->priv; + + lbs_deb_enter(LBS_DEB_CS); + + lbs_stop_card(card->priv); + lbs_remove_card(card->priv); + if_cs_disable_ints(card); + if_cs_release(p_dev); + kfree(card); + + lbs_deb_leave(LBS_DEB_CS); +} + + + +/********************************************************************/ +/* Module initialization */ +/********************************************************************/ + +static struct pcmcia_device_id if_cs_ids[] = { + PCMCIA_DEVICE_MANF_CARD(CF8305_MANFID, CF8305_CARDID), + PCMCIA_DEVICE_MANF_CARD(CF8381_MANFID, CF8381_CARDID), + PCMCIA_DEVICE_MANF_CARD(CF8385_MANFID, CF8385_CARDID), + PCMCIA_DEVICE_NULL, +}; +MODULE_DEVICE_TABLE(pcmcia, if_cs_ids); + + +static struct pcmcia_driver lbs_driver = { + .owner = THIS_MODULE, + .drv = { + .name = DRV_NAME, + }, + .probe = if_cs_probe, + .remove = if_cs_detach, + .id_table = if_cs_ids, +}; + + +static int __init if_cs_init(void) +{ + int ret; + + lbs_deb_enter(LBS_DEB_CS); + ret = pcmcia_register_driver(&lbs_driver); + lbs_deb_leave(LBS_DEB_CS); + return ret; +} + + +static void __exit if_cs_exit(void) +{ + lbs_deb_enter(LBS_DEB_CS); + pcmcia_unregister_driver(&lbs_driver); + lbs_deb_leave(LBS_DEB_CS); +} + + +module_init(if_cs_init); +module_exit(if_cs_exit); diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c new file mode 100644 index 0000000..7a73f62 --- /dev/null +++ b/drivers/net/wireless/libertas/if_sdio.c @@ -0,0 +1,1230 @@ +/* + * linux/drivers/net/wireless/libertas/if_sdio.c + * + * Copyright 2007-2008 Pierre Ossman + * + * Inspired by if_cs.c, Copyright 2007 Holger Schurig + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This hardware has more or less no CMD53 support, so all registers + * must be accessed using sdio_readb()/sdio_writeb(). + * + * Transfers must be in one transaction or the firmware goes bonkers. + * This means that the transfer must either be small enough to do a + * byte based transfer or it must be padded to a multiple of the + * current block size. + * + * As SDIO is still new to the kernel, it is unfortunately common with + * bugs in the host controllers related to that. One such bug is that + * controllers cannot do transfers that aren't a multiple of 4 bytes. + * If you don't have time to fix the host controller driver, you can + * work around the problem by modifying if_sdio_host_to_card() and + * if_sdio_card_to_host() to pad the data. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "host.h" +#include "decl.h" +#include "defs.h" +#include "dev.h" +#include "cmd.h" +#include "if_sdio.h" + +/* The if_sdio_remove() callback function is called when + * user removes this module from kernel space or ejects + * the card from the slot. The driver handles these 2 cases + * differently for SD8688 combo chip. + * If the user is removing the module, the FUNC_SHUTDOWN + * command for SD8688 is sent to the firmware. + * If the card is removed, there is no need to send this command. + * + * The variable 'user_rmmod' is used to distinguish these two + * scenarios. This flag is initialized as FALSE in case the card + * is removed, and will be set to TRUE for module removal when + * module_exit function is called. + */ +static u8 user_rmmod; + +static char *lbs_helper_name = NULL; +module_param_named(helper_name, lbs_helper_name, charp, 0644); + +static char *lbs_fw_name = NULL; +module_param_named(fw_name, lbs_fw_name, charp, 0644); + +static const struct sdio_device_id if_sdio_ids[] = { + { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, + SDIO_DEVICE_ID_MARVELL_LIBERTAS) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, + SDIO_DEVICE_ID_MARVELL_8688WLAN) }, + { /* end: all zeroes */ }, +}; + +MODULE_DEVICE_TABLE(sdio, if_sdio_ids); + +struct if_sdio_model { + int model; + const char *helper; + const char *firmware; +}; + +static struct if_sdio_model if_sdio_models[] = { + { + /* 8385 */ + .model = IF_SDIO_MODEL_8385, + .helper = "sd8385_helper.bin", + .firmware = "sd8385.bin", + }, + { + /* 8686 */ + .model = IF_SDIO_MODEL_8686, + .helper = "sd8686_helper.bin", + .firmware = "sd8686.bin", + }, + { + /* 8688 */ + .model = IF_SDIO_MODEL_8688, + .helper = "sd8688_helper.bin", + .firmware = "sd8688.bin", + }, +}; +MODULE_FIRMWARE("sd8385_helper.bin"); +MODULE_FIRMWARE("sd8385.bin"); +MODULE_FIRMWARE("sd8686_helper.bin"); +MODULE_FIRMWARE("sd8686.bin"); +MODULE_FIRMWARE("sd8688_helper.bin"); +MODULE_FIRMWARE("sd8688.bin"); + +struct if_sdio_packet { + struct if_sdio_packet *next; + u16 nb; + u8 buffer[0] __attribute__((aligned(4))); +}; + +struct if_sdio_card { + struct sdio_func *func; + struct lbs_private *priv; + + int model; + unsigned long ioport; + unsigned int scratch_reg; + + const char *helper; + const char *firmware; + + u8 buffer[65536]; + + spinlock_t lock; + struct if_sdio_packet *packets; + + struct workqueue_struct *workqueue; + struct work_struct packet_worker; + + u8 rx_unit; +}; + +/********************************************************************/ +/* I/O */ +/********************************************************************/ + +/* + * For SD8385/SD8686, this function reads firmware status after + * the image is downloaded, or reads RX packet length when + * interrupt (with IF_SDIO_H_INT_UPLD bit set) is received. + * For SD8688, this function reads firmware status only. + */ +static u16 if_sdio_read_scratch(struct if_sdio_card *card, int *err) +{ + int ret; + u16 scratch; + + scratch = sdio_readb(card->func, card->scratch_reg, &ret); + if (!ret) + scratch |= sdio_readb(card->func, card->scratch_reg + 1, + &ret) << 8; + + if (err) + *err = ret; + + if (ret) + return 0xffff; + + return scratch; +} + +static u8 if_sdio_read_rx_unit(struct if_sdio_card *card) +{ + int ret; + u8 rx_unit; + + rx_unit = sdio_readb(card->func, IF_SDIO_RX_UNIT, &ret); + + if (ret) + rx_unit = 0; + + return rx_unit; +} + +static u16 if_sdio_read_rx_len(struct if_sdio_card *card, int *err) +{ + int ret; + u16 rx_len; + + switch (card->model) { + case IF_SDIO_MODEL_8385: + case IF_SDIO_MODEL_8686: + rx_len = if_sdio_read_scratch(card, &ret); + break; + case IF_SDIO_MODEL_8688: + default: /* for newer chipsets */ + rx_len = sdio_readb(card->func, IF_SDIO_RX_LEN, &ret); + if (!ret) + rx_len <<= card->rx_unit; + else + rx_len = 0xffff; /* invalid length */ + + break; + } + + if (err) + *err = ret; + + return rx_len; +} + +static int if_sdio_handle_cmd(struct if_sdio_card *card, + u8 *buffer, unsigned size) +{ + struct lbs_private *priv = card->priv; + int ret; + unsigned long flags; + u8 i; + + lbs_deb_enter(LBS_DEB_SDIO); + + if (size > LBS_CMD_BUFFER_SIZE) { + lbs_deb_sdio("response packet too large (%d bytes)\n", + (int)size); + ret = -E2BIG; + goto out; + } + + spin_lock_irqsave(&priv->driver_lock, flags); + + i = (priv->resp_idx == 0) ? 1 : 0; + BUG_ON(priv->resp_len[i]); + priv->resp_len[i] = size; + memcpy(priv->resp_buf[i], buffer, size); + lbs_notify_command_response(priv, i); + + spin_unlock_irqrestore(&card->priv->driver_lock, flags); + + ret = 0; + +out: + lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret); + return ret; +} + +static int if_sdio_handle_data(struct if_sdio_card *card, + u8 *buffer, unsigned size) +{ + int ret; + struct sk_buff *skb; + char *data; + + lbs_deb_enter(LBS_DEB_SDIO); + + if (size > MRVDRV_ETH_RX_PACKET_BUFFER_SIZE) { + lbs_deb_sdio("response packet too large (%d bytes)\n", + (int)size); + ret = -E2BIG; + goto out; + } + + skb = dev_alloc_skb(MRVDRV_ETH_RX_PACKET_BUFFER_SIZE + NET_IP_ALIGN); + if (!skb) { + ret = -ENOMEM; + goto out; + } + + skb_reserve(skb, NET_IP_ALIGN); + + data = skb_put(skb, size); + + memcpy(data, buffer, size); + + lbs_process_rxed_packet(card->priv, skb); + + ret = 0; + +out: + lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret); + + return ret; +} + +static int if_sdio_handle_event(struct if_sdio_card *card, + u8 *buffer, unsigned size) +{ + int ret; + u32 event; + + lbs_deb_enter(LBS_DEB_SDIO); + + if (card->model == IF_SDIO_MODEL_8385) { + event = sdio_readb(card->func, IF_SDIO_EVENT, &ret); + if (ret) + goto out; + + /* right shift 3 bits to get the event id */ + event >>= 3; + } else { + if (size < 4) { + lbs_deb_sdio("event packet too small (%d bytes)\n", + (int)size); + ret = -EINVAL; + goto out; + } + event = buffer[3] << 24; + event |= buffer[2] << 16; + event |= buffer[1] << 8; + event |= buffer[0] << 0; + } + + lbs_queue_event(card->priv, event & 0xFF); + ret = 0; + +out: + lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret); + + return ret; +} + +static int if_sdio_card_to_host(struct if_sdio_card *card) +{ + int ret; + u8 status; + u16 size, type, chunk; + unsigned long timeout; + + lbs_deb_enter(LBS_DEB_SDIO); + + size = if_sdio_read_rx_len(card, &ret); + if (ret) + goto out; + + if (size < 4) { + lbs_deb_sdio("invalid packet size (%d bytes) from firmware\n", + (int)size); + ret = -EINVAL; + goto out; + } + + timeout = jiffies + HZ; + while (1) { + status = sdio_readb(card->func, IF_SDIO_STATUS, &ret); + if (ret) + goto out; + if (status & IF_SDIO_IO_RDY) + break; + if (time_after(jiffies, timeout)) { + ret = -ETIMEDOUT; + goto out; + } + mdelay(1); + } + + /* + * The transfer must be in one transaction or the firmware + * goes suicidal. There's no way to guarantee that for all + * controllers, but we can at least try. + */ + chunk = sdio_align_size(card->func, size); + + ret = sdio_readsb(card->func, card->buffer, card->ioport, chunk); + if (ret) + goto out; + + chunk = card->buffer[0] | (card->buffer[1] << 8); + type = card->buffer[2] | (card->buffer[3] << 8); + + lbs_deb_sdio("packet of type %d and size %d bytes\n", + (int)type, (int)chunk); + + if (chunk > size) { + lbs_deb_sdio("packet fragment (%d > %d)\n", + (int)chunk, (int)size); + ret = -EINVAL; + goto out; + } + + if (chunk < size) { + lbs_deb_sdio("packet fragment (%d < %d)\n", + (int)chunk, (int)size); + } + + switch (type) { + case MVMS_CMD: + ret = if_sdio_handle_cmd(card, card->buffer + 4, chunk - 4); + if (ret) + goto out; + break; + case MVMS_DAT: + ret = if_sdio_handle_data(card, card->buffer + 4, chunk - 4); + if (ret) + goto out; + break; + case MVMS_EVENT: + ret = if_sdio_handle_event(card, card->buffer + 4, chunk - 4); + if (ret) + goto out; + break; + default: + lbs_deb_sdio("invalid type (%d) from firmware\n", + (int)type); + ret = -EINVAL; + goto out; + } + +out: + if (ret) + lbs_pr_err("problem fetching packet from firmware\n"); + + lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret); + + return ret; +} + +static void if_sdio_host_to_card_worker(struct work_struct *work) +{ + struct if_sdio_card *card; + struct if_sdio_packet *packet; + unsigned long timeout; + u8 status; + int ret; + unsigned long flags; + + lbs_deb_enter(LBS_DEB_SDIO); + + card = container_of(work, struct if_sdio_card, packet_worker); + + while (1) { + spin_lock_irqsave(&card->lock, flags); + packet = card->packets; + if (packet) + card->packets = packet->next; + spin_unlock_irqrestore(&card->lock, flags); + + if (!packet) + break; + + sdio_claim_host(card->func); + + timeout = jiffies + HZ; + while (1) { + status = sdio_readb(card->func, IF_SDIO_STATUS, &ret); + if (ret) + goto release; + if (status & IF_SDIO_IO_RDY) + break; + if (time_after(jiffies, timeout)) { + ret = -ETIMEDOUT; + goto release; + } + mdelay(1); + } + + ret = sdio_writesb(card->func, card->ioport, + packet->buffer, packet->nb); + if (ret) + goto release; +release: + sdio_release_host(card->func); + + kfree(packet); + } + + lbs_deb_leave(LBS_DEB_SDIO); +} + +/********************************************************************/ +/* Firmware */ +/********************************************************************/ + +static int if_sdio_prog_helper(struct if_sdio_card *card) +{ + int ret; + u8 status; + const struct firmware *fw; + unsigned long timeout; + u8 *chunk_buffer; + u32 chunk_size; + const u8 *firmware; + size_t size; + + lbs_deb_enter(LBS_DEB_SDIO); + + ret = request_firmware(&fw, card->helper, &card->func->dev); + if (ret) { + lbs_pr_err("can't load helper firmware\n"); + goto out; + } + + chunk_buffer = kzalloc(64, GFP_KERNEL); + if (!chunk_buffer) { + ret = -ENOMEM; + goto release_fw; + } + + sdio_claim_host(card->func); + + ret = sdio_set_block_size(card->func, 32); + if (ret) + goto release; + + firmware = fw->data; + size = fw->size; + + while (size) { + timeout = jiffies + HZ; + while (1) { + status = sdio_readb(card->func, IF_SDIO_STATUS, &ret); + if (ret) + goto release; + if ((status & IF_SDIO_IO_RDY) && + (status & IF_SDIO_DL_RDY)) + break; + if (time_after(jiffies, timeout)) { + ret = -ETIMEDOUT; + goto release; + } + mdelay(1); + } + + chunk_size = min(size, (size_t)60); + + *((__le32*)chunk_buffer) = cpu_to_le32(chunk_size); + memcpy(chunk_buffer + 4, firmware, chunk_size); +/* + lbs_deb_sdio("sending %d bytes chunk\n", chunk_size); +*/ + ret = sdio_writesb(card->func, card->ioport, + chunk_buffer, 64); + if (ret) + goto release; + + firmware += chunk_size; + size -= chunk_size; + } + + /* an empty block marks the end of the transfer */ + memset(chunk_buffer, 0, 4); + ret = sdio_writesb(card->func, card->ioport, chunk_buffer, 64); + if (ret) + goto release; + + lbs_deb_sdio("waiting for helper to boot...\n"); + + /* wait for the helper to boot by looking at the size register */ + timeout = jiffies + HZ; + while (1) { + u16 req_size; + + req_size = sdio_readb(card->func, IF_SDIO_RD_BASE, &ret); + if (ret) + goto release; + + req_size |= sdio_readb(card->func, IF_SDIO_RD_BASE + 1, &ret) << 8; + if (ret) + goto release; + + if (req_size != 0) + break; + + if (time_after(jiffies, timeout)) { + ret = -ETIMEDOUT; + goto release; + } + + msleep(10); + } + + ret = 0; + +release: + sdio_release_host(card->func); + kfree(chunk_buffer); +release_fw: + release_firmware(fw); + +out: + if (ret) + lbs_pr_err("failed to load helper firmware\n"); + + lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret); + + return ret; +} + +static int if_sdio_prog_real(struct if_sdio_card *card) +{ + int ret; + u8 status; + const struct firmware *fw; + unsigned long timeout; + u8 *chunk_buffer; + u32 chunk_size; + const u8 *firmware; + size_t size, req_size; + + lbs_deb_enter(LBS_DEB_SDIO); + + ret = request_firmware(&fw, card->firmware, &card->func->dev); + if (ret) { + lbs_pr_err("can't load firmware\n"); + goto out; + } + + chunk_buffer = kzalloc(512, GFP_KERNEL); + if (!chunk_buffer) { + ret = -ENOMEM; + goto release_fw; + } + + sdio_claim_host(card->func); + + ret = sdio_set_block_size(card->func, 32); + if (ret) + goto release; + + firmware = fw->data; + size = fw->size; + + while (size) { + timeout = jiffies + HZ; + while (1) { + status = sdio_readb(card->func, IF_SDIO_STATUS, &ret); + if (ret) + goto release; + if ((status & IF_SDIO_IO_RDY) && + (status & IF_SDIO_DL_RDY)) + break; + if (time_after(jiffies, timeout)) { + ret = -ETIMEDOUT; + goto release; + } + mdelay(1); + } + + req_size = sdio_readb(card->func, IF_SDIO_RD_BASE, &ret); + if (ret) + goto release; + + req_size |= sdio_readb(card->func, IF_SDIO_RD_BASE + 1, &ret) << 8; + if (ret) + goto release; +/* + lbs_deb_sdio("firmware wants %d bytes\n", (int)req_size); +*/ + if (req_size == 0) { + lbs_deb_sdio("firmware helper gave up early\n"); + ret = -EIO; + goto release; + } + + if (req_size & 0x01) { + lbs_deb_sdio("firmware helper signalled error\n"); + ret = -EIO; + goto release; + } + + if (req_size > size) + req_size = size; + + while (req_size) { + chunk_size = min(req_size, (size_t)512); + + memcpy(chunk_buffer, firmware, chunk_size); +/* + lbs_deb_sdio("sending %d bytes (%d bytes) chunk\n", + chunk_size, (chunk_size + 31) / 32 * 32); +*/ + ret = sdio_writesb(card->func, card->ioport, + chunk_buffer, roundup(chunk_size, 32)); + if (ret) + goto release; + + firmware += chunk_size; + size -= chunk_size; + req_size -= chunk_size; + } + } + + ret = 0; + + lbs_deb_sdio("waiting for firmware to boot...\n"); + + /* wait for the firmware to boot */ + timeout = jiffies + HZ; + while (1) { + u16 scratch; + + scratch = if_sdio_read_scratch(card, &ret); + if (ret) + goto release; + + if (scratch == IF_SDIO_FIRMWARE_OK) + break; + + if (time_after(jiffies, timeout)) { + ret = -ETIMEDOUT; + goto release; + } + + msleep(10); + } + + ret = 0; + +release: + sdio_release_host(card->func); + kfree(chunk_buffer); +release_fw: + release_firmware(fw); + +out: + if (ret) + lbs_pr_err("failed to load firmware\n"); + + lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret); + + return ret; +} + +static int if_sdio_prog_firmware(struct if_sdio_card *card) +{ + int ret; + u16 scratch; + + lbs_deb_enter(LBS_DEB_SDIO); + + sdio_claim_host(card->func); + scratch = if_sdio_read_scratch(card, &ret); + sdio_release_host(card->func); + + if (ret) + goto out; + + lbs_deb_sdio("firmware status = %#x\n", scratch); + + if (scratch == IF_SDIO_FIRMWARE_OK) { + lbs_deb_sdio("firmware already loaded\n"); + goto success; + } + + ret = if_sdio_prog_helper(card); + if (ret) + goto out; + + ret = if_sdio_prog_real(card); + if (ret) + goto out; + +success: + sdio_claim_host(card->func); + sdio_set_block_size(card->func, IF_SDIO_BLOCK_SIZE); + sdio_release_host(card->func); + ret = 0; + +out: + lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret); + + return ret; +} + +/*******************************************************************/ +/* Libertas callbacks */ +/*******************************************************************/ + +static int if_sdio_host_to_card(struct lbs_private *priv, + u8 type, u8 *buf, u16 nb) +{ + int ret; + struct if_sdio_card *card; + struct if_sdio_packet *packet, *cur; + u16 size; + unsigned long flags; + + lbs_deb_enter_args(LBS_DEB_SDIO, "type %d, bytes %d", type, nb); + + card = priv->card; + + if (nb > (65536 - sizeof(struct if_sdio_packet) - 4)) { + ret = -EINVAL; + goto out; + } + + /* + * The transfer must be in one transaction or the firmware + * goes suicidal. There's no way to guarantee that for all + * controllers, but we can at least try. + */ + size = sdio_align_size(card->func, nb + 4); + + packet = kzalloc(sizeof(struct if_sdio_packet) + size, + GFP_ATOMIC); + if (!packet) { + ret = -ENOMEM; + goto out; + } + + packet->next = NULL; + packet->nb = size; + + /* + * SDIO specific header. + */ + packet->buffer[0] = (nb + 4) & 0xff; + packet->buffer[1] = ((nb + 4) >> 8) & 0xff; + packet->buffer[2] = type; + packet->buffer[3] = 0; + + memcpy(packet->buffer + 4, buf, nb); + + spin_lock_irqsave(&card->lock, flags); + + if (!card->packets) + card->packets = packet; + else { + cur = card->packets; + while (cur->next) + cur = cur->next; + cur->next = packet; + } + + switch (type) { + case MVMS_CMD: + priv->dnld_sent = DNLD_CMD_SENT; + break; + case MVMS_DAT: + priv->dnld_sent = DNLD_DATA_SENT; + break; + default: + lbs_deb_sdio("unknown packet type %d\n", (int)type); + } + + spin_unlock_irqrestore(&card->lock, flags); + + queue_work(card->workqueue, &card->packet_worker); + + ret = 0; + +out: + lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret); + + return ret; +} + +static int if_sdio_enter_deep_sleep(struct lbs_private *priv) +{ + int ret = -1; + struct cmd_header cmd; + + memset(&cmd, 0, sizeof(cmd)); + + lbs_deb_sdio("send DEEP_SLEEP command\n"); + ret = __lbs_cmd(priv, CMD_802_11_DEEP_SLEEP, &cmd, sizeof(cmd), + lbs_cmd_copyback, (unsigned long) &cmd); + if (ret) + lbs_pr_err("DEEP_SLEEP cmd failed\n"); + + mdelay(200); + return ret; +} + +static int if_sdio_exit_deep_sleep(struct lbs_private *priv) +{ + struct if_sdio_card *card = priv->card; + int ret = -1; + + lbs_deb_enter(LBS_DEB_SDIO); + sdio_claim_host(card->func); + + sdio_writeb(card->func, HOST_POWER_UP, CONFIGURATION_REG, &ret); + if (ret) + lbs_pr_err("sdio_writeb failed!\n"); + + sdio_release_host(card->func); + lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret); + return ret; +} + +static int if_sdio_reset_deep_sleep_wakeup(struct lbs_private *priv) +{ + struct if_sdio_card *card = priv->card; + int ret = -1; + + lbs_deb_enter(LBS_DEB_SDIO); + sdio_claim_host(card->func); + + sdio_writeb(card->func, 0, CONFIGURATION_REG, &ret); + if (ret) + lbs_pr_err("sdio_writeb failed!\n"); + + sdio_release_host(card->func); + lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret); + return ret; + +} + +/*******************************************************************/ +/* SDIO callbacks */ +/*******************************************************************/ + +static void if_sdio_interrupt(struct sdio_func *func) +{ + int ret; + struct if_sdio_card *card; + u8 cause; + + lbs_deb_enter(LBS_DEB_SDIO); + + card = sdio_get_drvdata(func); + + cause = sdio_readb(card->func, IF_SDIO_H_INT_STATUS, &ret); + if (ret) + goto out; + + lbs_deb_sdio("interrupt: 0x%X\n", (unsigned)cause); + + sdio_writeb(card->func, ~cause, IF_SDIO_H_INT_STATUS, &ret); + if (ret) + goto out; + + /* + * Ignore the define name, this really means the card has + * successfully received the command. + */ + card->priv->is_activity_detected = 1; + if (cause & IF_SDIO_H_INT_DNLD) + lbs_host_to_card_done(card->priv); + + + if (cause & IF_SDIO_H_INT_UPLD) { + ret = if_sdio_card_to_host(card); + if (ret) + goto out; + } + + ret = 0; + +out: + lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret); +} + +static int if_sdio_probe(struct sdio_func *func, + const struct sdio_device_id *id) +{ + struct if_sdio_card *card; + struct lbs_private *priv; + int ret, i; + unsigned int model; + struct if_sdio_packet *packet; + + lbs_deb_enter(LBS_DEB_SDIO); + + for (i = 0;i < func->card->num_info;i++) { + if (sscanf(func->card->info[i], + "802.11 SDIO ID: %x", &model) == 1) + break; + if (sscanf(func->card->info[i], + "ID: %x", &model) == 1) + break; + if (!strcmp(func->card->info[i], "IBIS Wireless SDIO Card")) { + model = IF_SDIO_MODEL_8385; + break; + } + } + + if (i == func->card->num_info) { + lbs_pr_err("unable to identify card model\n"); + return -ENODEV; + } + + card = kzalloc(sizeof(struct if_sdio_card), GFP_KERNEL); + if (!card) + return -ENOMEM; + + card->func = func; + card->model = model; + + switch (card->model) { + case IF_SDIO_MODEL_8385: + card->scratch_reg = IF_SDIO_SCRATCH_OLD; + break; + case IF_SDIO_MODEL_8686: + card->scratch_reg = IF_SDIO_SCRATCH; + break; + case IF_SDIO_MODEL_8688: + default: /* for newer chipsets */ + card->scratch_reg = IF_SDIO_FW_STATUS; + break; + } + + spin_lock_init(&card->lock); + card->workqueue = create_workqueue("libertas_sdio"); + INIT_WORK(&card->packet_worker, if_sdio_host_to_card_worker); + + for (i = 0;i < ARRAY_SIZE(if_sdio_models);i++) { + if (card->model == if_sdio_models[i].model) + break; + } + + if (i == ARRAY_SIZE(if_sdio_models)) { + lbs_pr_err("unknown card model 0x%x\n", card->model); + ret = -ENODEV; + goto free; + } + + card->helper = if_sdio_models[i].helper; + card->firmware = if_sdio_models[i].firmware; + + if (lbs_helper_name) { + lbs_deb_sdio("overriding helper firmware: %s\n", + lbs_helper_name); + card->helper = lbs_helper_name; + } + + if (lbs_fw_name) { + lbs_deb_sdio("overriding firmware: %s\n", lbs_fw_name); + card->firmware = lbs_fw_name; + } + + sdio_claim_host(func); + + ret = sdio_enable_func(func); + if (ret) + goto release; + + ret = sdio_claim_irq(func, if_sdio_interrupt); + if (ret) + goto disable; + + card->ioport = sdio_readb(func, IF_SDIO_IOPORT, &ret); + if (ret) + goto release_int; + + card->ioport |= sdio_readb(func, IF_SDIO_IOPORT + 1, &ret) << 8; + if (ret) + goto release_int; + + card->ioport |= sdio_readb(func, IF_SDIO_IOPORT + 2, &ret) << 16; + if (ret) + goto release_int; + + sdio_release_host(func); + + sdio_set_drvdata(func, card); + + lbs_deb_sdio("class = 0x%X, vendor = 0x%X, " + "device = 0x%X, model = 0x%X, ioport = 0x%X\n", + func->class, func->vendor, func->device, + model, (unsigned)card->ioport); + + ret = if_sdio_prog_firmware(card); + if (ret) + goto reclaim; + + priv = lbs_add_card(card, &func->dev); + if (!priv) { + ret = -ENOMEM; + goto reclaim; + } + + card->priv = priv; + + priv->card = card; + priv->hw_host_to_card = if_sdio_host_to_card; + priv->enter_deep_sleep = if_sdio_enter_deep_sleep; + priv->exit_deep_sleep = if_sdio_exit_deep_sleep; + priv->reset_deep_sleep_wakeup = if_sdio_reset_deep_sleep_wakeup; + + priv->fw_ready = 1; + + sdio_claim_host(func); + + /* + * Get rx_unit if the chip is SD8688 or newer. + * SD8385 & SD8686 do not have rx_unit. + */ + if ((card->model != IF_SDIO_MODEL_8385) + && (card->model != IF_SDIO_MODEL_8686)) + card->rx_unit = if_sdio_read_rx_unit(card); + else + card->rx_unit = 0; + + /* + * Enable interrupts now that everything is set up + */ + sdio_writeb(func, 0x0f, IF_SDIO_H_INT_MASK, &ret); + sdio_release_host(func); + if (ret) + goto reclaim; + + /* + * FUNC_INIT is required for SD8688 WLAN/BT multiple functions + */ + if (card->model == IF_SDIO_MODEL_8688) { + struct cmd_header cmd; + + memset(&cmd, 0, sizeof(cmd)); + + lbs_deb_sdio("send function INIT command\n"); + if (__lbs_cmd(priv, CMD_FUNC_INIT, &cmd, sizeof(cmd), + lbs_cmd_copyback, (unsigned long) &cmd)) + lbs_pr_alert("CMD_FUNC_INIT cmd failed\n"); + } + + ret = lbs_start_card(priv); + if (ret) + goto err_activate_card; + +out: + lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret); + + return ret; + +err_activate_card: + flush_workqueue(card->workqueue); + lbs_remove_card(priv); +reclaim: + sdio_claim_host(func); +release_int: + sdio_release_irq(func); +disable: + sdio_disable_func(func); +release: + sdio_release_host(func); +free: + destroy_workqueue(card->workqueue); + while (card->packets) { + packet = card->packets; + card->packets = card->packets->next; + kfree(packet); + } + + kfree(card); + + goto out; +} + +static void if_sdio_remove(struct sdio_func *func) +{ + struct if_sdio_card *card; + struct if_sdio_packet *packet; + + lbs_deb_enter(LBS_DEB_SDIO); + + card = sdio_get_drvdata(func); + + if (user_rmmod && (card->model == IF_SDIO_MODEL_8688)) { + /* + * FUNC_SHUTDOWN is required for SD8688 WLAN/BT + * multiple functions + */ + struct cmd_header cmd; + + memset(&cmd, 0, sizeof(cmd)); + + lbs_deb_sdio("send function SHUTDOWN command\n"); + if (__lbs_cmd(card->priv, CMD_FUNC_SHUTDOWN, + &cmd, sizeof(cmd), lbs_cmd_copyback, + (unsigned long) &cmd)) + lbs_pr_alert("CMD_FUNC_SHUTDOWN cmd failed\n"); + } + + + lbs_deb_sdio("call remove card\n"); + lbs_stop_card(card->priv); + lbs_remove_card(card->priv); + card->priv->surpriseremoved = 1; + + flush_workqueue(card->workqueue); + destroy_workqueue(card->workqueue); + + sdio_claim_host(func); + sdio_release_irq(func); + sdio_disable_func(func); + sdio_release_host(func); + + while (card->packets) { + packet = card->packets; + card->packets = card->packets->next; + kfree(packet); + } + + kfree(card); + + lbs_deb_leave(LBS_DEB_SDIO); +} + +static struct sdio_driver if_sdio_driver = { + .name = "libertas_sdio", + .id_table = if_sdio_ids, + .probe = if_sdio_probe, + .remove = if_sdio_remove, +}; + +/*******************************************************************/ +/* Module functions */ +/*******************************************************************/ + +static int __init if_sdio_init_module(void) +{ + int ret = 0; + + lbs_deb_enter(LBS_DEB_SDIO); + + printk(KERN_INFO "libertas_sdio: Libertas SDIO driver\n"); + printk(KERN_INFO "libertas_sdio: Copyright Pierre Ossman\n"); + + ret = sdio_register_driver(&if_sdio_driver); + + /* Clear the flag in case user removes the card. */ + user_rmmod = 0; + + lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret); + + return ret; +} + +static void __exit if_sdio_exit_module(void) +{ + lbs_deb_enter(LBS_DEB_SDIO); + + /* Set the flag as user is removing this module. */ + user_rmmod = 1; + + sdio_unregister_driver(&if_sdio_driver); + + lbs_deb_leave(LBS_DEB_SDIO); +} + +module_init(if_sdio_init_module); +module_exit(if_sdio_exit_module); + +MODULE_DESCRIPTION("Libertas SDIO WLAN Driver"); +MODULE_AUTHOR("Pierre Ossman"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/libertas/if_sdio.h b/drivers/net/wireless/libertas/if_sdio.h new file mode 100644 index 0000000..12179c1 --- /dev/null +++ b/drivers/net/wireless/libertas/if_sdio.h @@ -0,0 +1,56 @@ +/* + * linux/drivers/net/wireless/libertas/if_sdio.h + * + * Copyright 2007 Pierre Ossman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +#ifndef _LBS_IF_SDIO_H +#define _LBS_IF_SDIO_H + +#define IF_SDIO_MODEL_8385 0x04 +#define IF_SDIO_MODEL_8686 0x0b +#define IF_SDIO_MODEL_8688 0x10 + +#define IF_SDIO_IOPORT 0x00 + +#define IF_SDIO_H_INT_MASK 0x04 +#define IF_SDIO_H_INT_OFLOW 0x08 +#define IF_SDIO_H_INT_UFLOW 0x04 +#define IF_SDIO_H_INT_DNLD 0x02 +#define IF_SDIO_H_INT_UPLD 0x01 + +#define IF_SDIO_H_INT_STATUS 0x05 +#define IF_SDIO_H_INT_RSR 0x06 +#define IF_SDIO_H_INT_STATUS2 0x07 + +#define IF_SDIO_RD_BASE 0x10 + +#define IF_SDIO_STATUS 0x20 +#define IF_SDIO_IO_RDY 0x08 +#define IF_SDIO_CIS_RDY 0x04 +#define IF_SDIO_UL_RDY 0x02 +#define IF_SDIO_DL_RDY 0x01 + +#define IF_SDIO_C_INT_MASK 0x24 +#define IF_SDIO_C_INT_STATUS 0x28 +#define IF_SDIO_C_INT_RSR 0x2C + +#define IF_SDIO_SCRATCH 0x34 +#define IF_SDIO_SCRATCH_OLD 0x80fe +#define IF_SDIO_FW_STATUS 0x40 +#define IF_SDIO_FIRMWARE_OK 0xfedc + +#define IF_SDIO_RX_LEN 0x42 +#define IF_SDIO_RX_UNIT 0x43 + +#define IF_SDIO_EVENT 0x80fc + +#define IF_SDIO_BLOCK_SIZE 256 +#define CONFIGURATION_REG 0x03 +#define HOST_POWER_UP (0x1U << 1) +#endif diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c new file mode 100644 index 0000000..3ea03f2 --- /dev/null +++ b/drivers/net/wireless/libertas/if_spi.c @@ -0,0 +1,1109 @@ +/* + * linux/drivers/net/wireless/libertas/if_spi.c + * + * Driver for Marvell SPI WLAN cards. + * + * Copyright 2008 Analog Devices Inc. + * + * Authors: + * Andrey Yurovsky + * Colin McCabe + * + * Inspired by if_sdio.c, Copyright 2007-2008 Pierre Ossman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "host.h" +#include "decl.h" +#include "defs.h" +#include "dev.h" +#include "if_spi.h" + +struct if_spi_card { + struct spi_device *spi; + struct lbs_private *priv; + struct libertas_spi_platform_data *pdata; + + char helper_fw_name[IF_SPI_FW_NAME_MAX]; + char main_fw_name[IF_SPI_FW_NAME_MAX]; + + /* The card ID and card revision, as reported by the hardware. */ + u16 card_id; + u8 card_rev; + + /* The last time that we initiated an SPU operation */ + unsigned long prev_xfer_time; + + int use_dummy_writes; + unsigned long spu_port_delay; + unsigned long spu_reg_delay; + + /* Handles all SPI communication (except for FW load) */ + struct task_struct *spi_thread; + int run_thread; + + /* Used to wake up the spi_thread */ + struct semaphore spi_ready; + struct semaphore spi_thread_terminated; + + u8 cmd_buffer[IF_SPI_CMD_BUF_SIZE]; +}; + +static void free_if_spi_card(struct if_spi_card *card) +{ + spi_set_drvdata(card->spi, NULL); + kfree(card); +} + +static struct chip_ident chip_id_to_device_name[] = { + { .chip_id = 0x04, .name = 8385 }, + { .chip_id = 0x0b, .name = 8686 }, +}; + +/* + * SPI Interface Unit Routines + * + * The SPU sits between the host and the WLAN module. + * All communication with the firmware is through SPU transactions. + * + * First we have to put a SPU register name on the bus. Then we can + * either read from or write to that register. + * + */ + +static void spu_transaction_init(struct if_spi_card *card) +{ + if (!time_after(jiffies, card->prev_xfer_time + 1)) { + /* Unfortunately, the SPU requires a delay between successive + * transactions. If our last transaction was more than a jiffy + * ago, we have obviously already delayed enough. + * If not, we have to busy-wait to be on the safe side. */ + ndelay(400); + } +} + +static void spu_transaction_finish(struct if_spi_card *card) +{ + card->prev_xfer_time = jiffies; +} + +/* Write out a byte buffer to an SPI register, + * using a series of 16-bit transfers. */ +static int spu_write(struct if_spi_card *card, u16 reg, const u8 *buf, int len) +{ + int err = 0; + __le16 reg_out = cpu_to_le16(reg | IF_SPI_WRITE_OPERATION_MASK); + struct spi_message m; + struct spi_transfer reg_trans; + struct spi_transfer data_trans; + + spi_message_init(&m); + memset(®_trans, 0, sizeof(reg_trans)); + memset(&data_trans, 0, sizeof(data_trans)); + + /* You must give an even number of bytes to the SPU, even if it + * doesn't care about the last one. */ + BUG_ON(len & 0x1); + + spu_transaction_init(card); + + /* write SPU register index */ + reg_trans.tx_buf = ®_out; + reg_trans.len = sizeof(reg_out); + + data_trans.tx_buf = buf; + data_trans.len = len; + + spi_message_add_tail(®_trans, &m); + spi_message_add_tail(&data_trans, &m); + + err = spi_sync(card->spi, &m); + spu_transaction_finish(card); + return err; +} + +static inline int spu_write_u16(struct if_spi_card *card, u16 reg, u16 val) +{ + __le16 buff; + + buff = cpu_to_le16(val); + return spu_write(card, reg, (u8 *)&buff, sizeof(u16)); +} + +static inline int spu_reg_is_port_reg(u16 reg) +{ + switch (reg) { + case IF_SPI_IO_RDWRPORT_REG: + case IF_SPI_CMD_RDWRPORT_REG: + case IF_SPI_DATA_RDWRPORT_REG: + return 1; + default: + return 0; + } +} + +static int spu_read(struct if_spi_card *card, u16 reg, u8 *buf, int len) +{ + unsigned int delay; + int err = 0; + __le16 reg_out = cpu_to_le16(reg | IF_SPI_READ_OPERATION_MASK); + struct spi_message m; + struct spi_transfer reg_trans; + struct spi_transfer dummy_trans; + struct spi_transfer data_trans; + + /* You must take an even number of bytes from the SPU, even if you + * don't care about the last one. */ + BUG_ON(len & 0x1); + + spu_transaction_init(card); + + spi_message_init(&m); + memset(®_trans, 0, sizeof(reg_trans)); + memset(&dummy_trans, 0, sizeof(dummy_trans)); + memset(&data_trans, 0, sizeof(data_trans)); + + /* write SPU register index */ + reg_trans.tx_buf = ®_out; + reg_trans.len = sizeof(reg_out); + spi_message_add_tail(®_trans, &m); + + delay = spu_reg_is_port_reg(reg) ? card->spu_port_delay : + card->spu_reg_delay; + if (card->use_dummy_writes) { + /* Clock in dummy cycles while the SPU fills the FIFO */ + dummy_trans.len = delay / 8; + spi_message_add_tail(&dummy_trans, &m); + } else { + /* Busy-wait while the SPU fills the FIFO */ + reg_trans.delay_usecs = + DIV_ROUND_UP((100 + (delay * 10)), 1000); + } + + /* read in data */ + data_trans.rx_buf = buf; + data_trans.len = len; + spi_message_add_tail(&data_trans, &m); + + err = spi_sync(card->spi, &m); + spu_transaction_finish(card); + return err; +} + +/* Read 16 bits from an SPI register */ +static inline int spu_read_u16(struct if_spi_card *card, u16 reg, u16 *val) +{ + __le16 buf; + int ret; + + ret = spu_read(card, reg, (u8 *)&buf, sizeof(buf)); + if (ret == 0) + *val = le16_to_cpup(&buf); + return ret; +} + +/* Read 32 bits from an SPI register. + * The low 16 bits are read first. */ +static int spu_read_u32(struct if_spi_card *card, u16 reg, u32 *val) +{ + __le32 buf; + int err; + + err = spu_read(card, reg, (u8 *)&buf, sizeof(buf)); + if (!err) + *val = le32_to_cpup(&buf); + return err; +} + +/* Keep reading 16 bits from an SPI register until you get the correct result. + * + * If mask = 0, the correct result is any non-zero number. + * If mask != 0, the correct result is any number where + * number & target_mask == target + * + * Returns -ETIMEDOUT if a second passes without the correct result. */ +static int spu_wait_for_u16(struct if_spi_card *card, u16 reg, + u16 target_mask, u16 target) +{ + int err; + unsigned long timeout = jiffies + 5*HZ; + while (1) { + u16 val; + err = spu_read_u16(card, reg, &val); + if (err) + return err; + if (target_mask) { + if ((val & target_mask) == target) + return 0; + } else { + if (val) + return 0; + } + udelay(100); + if (time_after(jiffies, timeout)) { + lbs_pr_err("%s: timeout with val=%02x, " + "target_mask=%02x, target=%02x\n", + __func__, val, target_mask, target); + return -ETIMEDOUT; + } + } +} + +/* Read 16 bits from an SPI register until you receive a specific value. + * Returns -ETIMEDOUT if a 4 tries pass without success. */ +static int spu_wait_for_u32(struct if_spi_card *card, u32 reg, u32 target) +{ + int err, try; + for (try = 0; try < 4; ++try) { + u32 val = 0; + err = spu_read_u32(card, reg, &val); + if (err) + return err; + if (val == target) + return 0; + mdelay(100); + } + return -ETIMEDOUT; +} + +static int spu_set_interrupt_mode(struct if_spi_card *card, + int suppress_host_int, + int auto_int) +{ + int err = 0; + + /* We can suppress a host interrupt by clearing the appropriate + * bit in the "host interrupt status mask" register */ + if (suppress_host_int) { + err = spu_write_u16(card, IF_SPI_HOST_INT_STATUS_MASK_REG, 0); + if (err) + return err; + } else { + err = spu_write_u16(card, IF_SPI_HOST_INT_STATUS_MASK_REG, + IF_SPI_HISM_TX_DOWNLOAD_RDY | + IF_SPI_HISM_RX_UPLOAD_RDY | + IF_SPI_HISM_CMD_DOWNLOAD_RDY | + IF_SPI_HISM_CARDEVENT | + IF_SPI_HISM_CMD_UPLOAD_RDY); + if (err) + return err; + } + + /* If auto-interrupts are on, the completion of certain transactions + * will trigger an interrupt automatically. If auto-interrupts + * are off, we need to set the "Card Interrupt Cause" register to + * trigger a card interrupt. */ + if (auto_int) { + err = spu_write_u16(card, IF_SPI_HOST_INT_CTRL_REG, + IF_SPI_HICT_TX_DOWNLOAD_OVER_AUTO | + IF_SPI_HICT_RX_UPLOAD_OVER_AUTO | + IF_SPI_HICT_CMD_DOWNLOAD_OVER_AUTO | + IF_SPI_HICT_CMD_UPLOAD_OVER_AUTO); + if (err) + return err; + } else { + err = spu_write_u16(card, IF_SPI_HOST_INT_STATUS_MASK_REG, 0); + if (err) + return err; + } + return err; +} + +static int spu_get_chip_revision(struct if_spi_card *card, + u16 *card_id, u8 *card_rev) +{ + int err = 0; + u32 dev_ctrl; + err = spu_read_u32(card, IF_SPI_DEVICEID_CTRL_REG, &dev_ctrl); + if (err) + return err; + *card_id = IF_SPI_DEVICEID_CTRL_REG_TO_CARD_ID(dev_ctrl); + *card_rev = IF_SPI_DEVICEID_CTRL_REG_TO_CARD_REV(dev_ctrl); + return err; +} + +static int spu_set_bus_mode(struct if_spi_card *card, u16 mode) +{ + int err = 0; + u16 rval; + /* set bus mode */ + err = spu_write_u16(card, IF_SPI_SPU_BUS_MODE_REG, mode); + if (err) + return err; + /* Check that we were able to read back what we just wrote. */ + err = spu_read_u16(card, IF_SPI_SPU_BUS_MODE_REG, &rval); + if (err) + return err; + if ((rval & 0xF) != mode) { + lbs_pr_err("Can't read bus mode register.\n"); + return -EIO; + } + return 0; +} + +static int spu_init(struct if_spi_card *card, int use_dummy_writes) +{ + int err = 0; + u32 delay; + + /* We have to start up in timed delay mode so that we can safely + * read the Delay Read Register. */ + card->use_dummy_writes = 0; + err = spu_set_bus_mode(card, + IF_SPI_BUS_MODE_SPI_CLOCK_PHASE_RISING | + IF_SPI_BUS_MODE_DELAY_METHOD_TIMED | + IF_SPI_BUS_MODE_16_BIT_ADDRESS_16_BIT_DATA); + if (err) + return err; + card->spu_port_delay = 1000; + card->spu_reg_delay = 1000; + err = spu_read_u32(card, IF_SPI_DELAY_READ_REG, &delay); + if (err) + return err; + card->spu_port_delay = delay & 0x0000ffff; + card->spu_reg_delay = (delay & 0xffff0000) >> 16; + + /* If dummy clock delay mode has been requested, switch to it now */ + if (use_dummy_writes) { + card->use_dummy_writes = 1; + err = spu_set_bus_mode(card, + IF_SPI_BUS_MODE_SPI_CLOCK_PHASE_RISING | + IF_SPI_BUS_MODE_DELAY_METHOD_DUMMY_CLOCK | + IF_SPI_BUS_MODE_16_BIT_ADDRESS_16_BIT_DATA); + if (err) + return err; + } + + lbs_deb_spi("Initialized SPU unit. " + "spu_port_delay=0x%04lx, spu_reg_delay=0x%04lx\n", + card->spu_port_delay, card->spu_reg_delay); + return err; +} + +/* + * Firmware Loading + */ + +static int if_spi_prog_helper_firmware(struct if_spi_card *card) +{ + int err = 0; + const struct firmware *firmware = NULL; + int bytes_remaining; + const u8 *fw; + u8 temp[HELPER_FW_LOAD_CHUNK_SZ]; + struct spi_device *spi = card->spi; + + lbs_deb_enter(LBS_DEB_SPI); + + err = spu_set_interrupt_mode(card, 1, 0); + if (err) + goto out; + /* Get helper firmware image */ + err = request_firmware(&firmware, card->helper_fw_name, &spi->dev); + if (err) { + lbs_pr_err("request_firmware failed with err = %d\n", err); + goto out; + } + bytes_remaining = firmware->size; + fw = firmware->data; + + /* Load helper firmware image */ + while (bytes_remaining > 0) { + /* Scratch pad 1 should contain the number of bytes we + * want to download to the firmware */ + err = spu_write_u16(card, IF_SPI_SCRATCH_1_REG, + HELPER_FW_LOAD_CHUNK_SZ); + if (err) + goto release_firmware; + + err = spu_wait_for_u16(card, IF_SPI_HOST_INT_STATUS_REG, + IF_SPI_HIST_CMD_DOWNLOAD_RDY, + IF_SPI_HIST_CMD_DOWNLOAD_RDY); + if (err) + goto release_firmware; + + /* Feed the data into the command read/write port reg + * in chunks of 64 bytes */ + memset(temp, 0, sizeof(temp)); + memcpy(temp, fw, + min(bytes_remaining, HELPER_FW_LOAD_CHUNK_SZ)); + mdelay(10); + err = spu_write(card, IF_SPI_CMD_RDWRPORT_REG, + temp, HELPER_FW_LOAD_CHUNK_SZ); + if (err) + goto release_firmware; + + /* Interrupt the boot code */ + err = spu_write_u16(card, IF_SPI_HOST_INT_STATUS_REG, 0); + if (err) + goto release_firmware; + err = spu_write_u16(card, IF_SPI_CARD_INT_CAUSE_REG, + IF_SPI_CIC_CMD_DOWNLOAD_OVER); + if (err) + goto release_firmware; + bytes_remaining -= HELPER_FW_LOAD_CHUNK_SZ; + fw += HELPER_FW_LOAD_CHUNK_SZ; + } + + /* Once the helper / single stage firmware download is complete, + * write 0 to scratch pad 1 and interrupt the + * bootloader. This completes the helper download. */ + err = spu_write_u16(card, IF_SPI_SCRATCH_1_REG, FIRMWARE_DNLD_OK); + if (err) + goto release_firmware; + err = spu_write_u16(card, IF_SPI_HOST_INT_STATUS_REG, 0); + if (err) + goto release_firmware; + err = spu_write_u16(card, IF_SPI_CARD_INT_CAUSE_REG, + IF_SPI_CIC_CMD_DOWNLOAD_OVER); + goto release_firmware; + + lbs_deb_spi("waiting for helper to boot...\n"); + +release_firmware: + release_firmware(firmware); +out: + if (err) + lbs_pr_err("failed to load helper firmware (err=%d)\n", err); + lbs_deb_leave_args(LBS_DEB_SPI, "err %d", err); + return err; +} + +/* Returns the length of the next packet the firmware expects us to send + * Sets crc_err if the previous transfer had a CRC error. */ +static int if_spi_prog_main_firmware_check_len(struct if_spi_card *card, + int *crc_err) +{ + u16 len; + int err = 0; + + /* wait until the host interrupt status register indicates + * that we are ready to download */ + err = spu_wait_for_u16(card, IF_SPI_HOST_INT_STATUS_REG, + IF_SPI_HIST_CMD_DOWNLOAD_RDY, + IF_SPI_HIST_CMD_DOWNLOAD_RDY); + if (err) { + lbs_pr_err("timed out waiting for host_int_status\n"); + return err; + } + + /* Ask the device how many bytes of firmware it wants. */ + err = spu_read_u16(card, IF_SPI_SCRATCH_1_REG, &len); + if (err) + return err; + + if (len > IF_SPI_CMD_BUF_SIZE) { + lbs_pr_err("firmware load device requested a larger " + "tranfer than we are prepared to " + "handle. (len = %d)\n", len); + return -EIO; + } + if (len & 0x1) { + lbs_deb_spi("%s: crc error\n", __func__); + len &= ~0x1; + *crc_err = 1; + } else + *crc_err = 0; + + return len; +} + +static int if_spi_prog_main_firmware(struct if_spi_card *card) +{ + int len, prev_len; + int bytes, crc_err = 0, err = 0; + const struct firmware *firmware = NULL; + const u8 *fw; + struct spi_device *spi = card->spi; + u16 num_crc_errs; + + lbs_deb_enter(LBS_DEB_SPI); + + err = spu_set_interrupt_mode(card, 1, 0); + if (err) + goto out; + + /* Get firmware image */ + err = request_firmware(&firmware, card->main_fw_name, &spi->dev); + if (err) { + lbs_pr_err("%s: can't get firmware '%s' from kernel. " + "err = %d\n", __func__, card->main_fw_name, err); + goto out; + } + + err = spu_wait_for_u16(card, IF_SPI_SCRATCH_1_REG, 0, 0); + if (err) { + lbs_pr_err("%s: timed out waiting for initial " + "scratch reg = 0\n", __func__); + goto release_firmware; + } + + num_crc_errs = 0; + prev_len = 0; + bytes = firmware->size; + fw = firmware->data; + while ((len = if_spi_prog_main_firmware_check_len(card, &crc_err))) { + if (len < 0) { + err = len; + goto release_firmware; + } + if (bytes < 0) { + /* If there are no more bytes left, we would normally + * expect to have terminated with len = 0 */ + lbs_pr_err("Firmware load wants more bytes " + "than we have to offer.\n"); + break; + } + if (crc_err) { + /* Previous transfer failed. */ + if (++num_crc_errs > MAX_MAIN_FW_LOAD_CRC_ERR) { + lbs_pr_err("Too many CRC errors encountered " + "in firmware load.\n"); + err = -EIO; + goto release_firmware; + } + } else { + /* Previous transfer succeeded. Advance counters. */ + bytes -= prev_len; + fw += prev_len; + } + if (bytes < len) { + memset(card->cmd_buffer, 0, len); + memcpy(card->cmd_buffer, fw, bytes); + } else + memcpy(card->cmd_buffer, fw, len); + + err = spu_write_u16(card, IF_SPI_HOST_INT_STATUS_REG, 0); + if (err) + goto release_firmware; + err = spu_write(card, IF_SPI_CMD_RDWRPORT_REG, + card->cmd_buffer, len); + if (err) + goto release_firmware; + err = spu_write_u16(card, IF_SPI_CARD_INT_CAUSE_REG , + IF_SPI_CIC_CMD_DOWNLOAD_OVER); + if (err) + goto release_firmware; + prev_len = len; + } + if (bytes > prev_len) { + lbs_pr_err("firmware load wants fewer bytes than " + "we have to offer.\n"); + } + + /* Confirm firmware download */ + err = spu_wait_for_u32(card, IF_SPI_SCRATCH_4_REG, + SUCCESSFUL_FW_DOWNLOAD_MAGIC); + if (err) { + lbs_pr_err("failed to confirm the firmware download\n"); + goto release_firmware; + } + +release_firmware: + release_firmware(firmware); + +out: + if (err) + lbs_pr_err("failed to load firmware (err=%d)\n", err); + lbs_deb_leave_args(LBS_DEB_SPI, "err %d", err); + return err; +} + +/* + * SPI Transfer Thread + * + * The SPI thread handles all SPI transfers, so there is no need for a lock. + */ + +/* Move a command from the card to the host */ +static int if_spi_c2h_cmd(struct if_spi_card *card) +{ + struct lbs_private *priv = card->priv; + unsigned long flags; + int err = 0; + u16 len; + u8 i; + + /* We need a buffer big enough to handle whatever people send to + * hw_host_to_card */ + BUILD_BUG_ON(IF_SPI_CMD_BUF_SIZE < LBS_CMD_BUFFER_SIZE); + BUILD_BUG_ON(IF_SPI_CMD_BUF_SIZE < LBS_UPLD_SIZE); + + /* It's just annoying if the buffer size isn't a multiple of 4, because + * then we might have len < IF_SPI_CMD_BUF_SIZE but + * ALIGN(len, 4) > IF_SPI_CMD_BUF_SIZE */ + BUILD_BUG_ON(IF_SPI_CMD_BUF_SIZE % 4 != 0); + + lbs_deb_enter(LBS_DEB_SPI); + + /* How many bytes are there to read? */ + err = spu_read_u16(card, IF_SPI_SCRATCH_2_REG, &len); + if (err) + goto out; + if (!len) { + lbs_pr_err("%s: error: card has no data for host\n", + __func__); + err = -EINVAL; + goto out; + } else if (len > IF_SPI_CMD_BUF_SIZE) { + lbs_pr_err("%s: error: response packet too large: " + "%d bytes, but maximum is %d\n", + __func__, len, IF_SPI_CMD_BUF_SIZE); + err = -EINVAL; + goto out; + } + + /* Read the data from the WLAN module into our command buffer */ + err = spu_read(card, IF_SPI_CMD_RDWRPORT_REG, + card->cmd_buffer, ALIGN(len, 4)); + if (err) + goto out; + + spin_lock_irqsave(&priv->driver_lock, flags); + i = (priv->resp_idx == 0) ? 1 : 0; + BUG_ON(priv->resp_len[i]); + priv->resp_len[i] = len; + memcpy(priv->resp_buf[i], card->cmd_buffer, len); + lbs_notify_command_response(priv, i); + spin_unlock_irqrestore(&priv->driver_lock, flags); + +out: + if (err) + lbs_pr_err("%s: err=%d\n", __func__, err); + lbs_deb_leave(LBS_DEB_SPI); + return err; +} + +/* Move data from the card to the host */ +static int if_spi_c2h_data(struct if_spi_card *card) +{ + struct sk_buff *skb; + char *data; + u16 len; + int err = 0; + + lbs_deb_enter(LBS_DEB_SPI); + + /* How many bytes are there to read? */ + err = spu_read_u16(card, IF_SPI_SCRATCH_1_REG, &len); + if (err) + goto out; + if (!len) { + lbs_pr_err("%s: error: card has no data for host\n", + __func__); + err = -EINVAL; + goto out; + } else if (len > MRVDRV_ETH_RX_PACKET_BUFFER_SIZE) { + lbs_pr_err("%s: error: card has %d bytes of data, but " + "our maximum skb size is %zu\n", + __func__, len, MRVDRV_ETH_RX_PACKET_BUFFER_SIZE); + err = -EINVAL; + goto out; + } + + /* TODO: should we allocate a smaller skb if we have less data? */ + skb = dev_alloc_skb(MRVDRV_ETH_RX_PACKET_BUFFER_SIZE); + if (!skb) { + err = -ENOBUFS; + goto out; + } + skb_reserve(skb, IPFIELD_ALIGN_OFFSET); + data = skb_put(skb, len); + + /* Read the data from the WLAN module into our skb... */ + err = spu_read(card, IF_SPI_DATA_RDWRPORT_REG, data, ALIGN(len, 4)); + if (err) + goto free_skb; + + /* pass the SKB to libertas */ + err = lbs_process_rxed_packet(card->priv, skb); + if (err) + goto free_skb; + + /* success */ + goto out; + +free_skb: + dev_kfree_skb(skb); +out: + if (err) + lbs_pr_err("%s: err=%d\n", __func__, err); + lbs_deb_leave(LBS_DEB_SPI); + return err; +} + +/* Inform the host about a card event */ +static void if_spi_e2h(struct if_spi_card *card) +{ + int err = 0; + u32 cause; + struct lbs_private *priv = card->priv; + + err = spu_read_u32(card, IF_SPI_SCRATCH_3_REG, &cause); + if (err) + goto out; + + /* re-enable the card event interrupt */ + spu_write_u16(card, IF_SPI_HOST_INT_STATUS_REG, + ~IF_SPI_HICU_CARD_EVENT); + + /* generate a card interrupt */ + spu_write_u16(card, IF_SPI_CARD_INT_CAUSE_REG, IF_SPI_CIC_HOST_EVENT); + + lbs_queue_event(priv, cause & 0xff); +out: + if (err) + lbs_pr_err("%s: error %d\n", __func__, err); +} + +static int lbs_spi_thread(void *data) +{ + int err; + struct if_spi_card *card = data; + u16 hiStatus; + + while (1) { + /* Wait to be woken up by one of two things. First, our ISR + * could tell us that something happened on the WLAN. + * Secondly, libertas could call hw_host_to_card with more + * data, which we might be able to send. + */ + do { + err = down_interruptible(&card->spi_ready); + if (!card->run_thread) { + up(&card->spi_thread_terminated); + do_exit(0); + } + } while (err == EINTR); + + /* Read the host interrupt status register to see what we + * can do. */ + err = spu_read_u16(card, IF_SPI_HOST_INT_STATUS_REG, + &hiStatus); + if (err) { + lbs_pr_err("I/O error\n"); + goto err; + } + + if (hiStatus & IF_SPI_HIST_CMD_UPLOAD_RDY) + err = if_spi_c2h_cmd(card); + if (err) + goto err; + if (hiStatus & IF_SPI_HIST_RX_UPLOAD_RDY) + err = if_spi_c2h_data(card); + if (err) + goto err; + + /* workaround: in PS mode, the card does not set the Command + * Download Ready bit, but it sets TX Download Ready. */ + if (hiStatus & IF_SPI_HIST_CMD_DOWNLOAD_RDY || + (card->priv->psstate != PS_STATE_FULL_POWER && + (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY))) { + lbs_host_to_card_done(card->priv); + } + + if (hiStatus & IF_SPI_HIST_CARD_EVENT) + if_spi_e2h(card); + +err: + if (err) + lbs_pr_err("%s: got error %d\n", __func__, err); + } +} + +/* Block until lbs_spi_thread thread has terminated */ +static void if_spi_terminate_spi_thread(struct if_spi_card *card) +{ + /* It would be nice to use kthread_stop here, but that function + * can't wake threads waiting for a semaphore. */ + card->run_thread = 0; + up(&card->spi_ready); + down(&card->spi_thread_terminated); +} + +/* + * Host to Card + * + * Called from Libertas to transfer some data to the WLAN device + * We can't sleep here. */ +static int if_spi_host_to_card(struct lbs_private *priv, + u8 type, u8 *buf, u16 nb) +{ + int err = 0; + struct if_spi_card *card = priv->card; + + lbs_deb_enter_args(LBS_DEB_SPI, "type %d, bytes %d", type, nb); + + nb = ALIGN(nb, 4); + + switch (type) { + case MVMS_CMD: + err = spu_write(card, IF_SPI_CMD_RDWRPORT_REG, buf, nb); + break; + case MVMS_DAT: + err = spu_write(card, IF_SPI_DATA_RDWRPORT_REG, buf, nb); + break; + default: + lbs_pr_err("can't transfer buffer of type %d", type); + err = -EINVAL; + break; + } + + lbs_deb_leave_args(LBS_DEB_SPI, "err=%d", err); + return err; +} + +/* + * Host Interrupts + * + * Service incoming interrupts from the WLAN device. We can't sleep here, so + * don't try to talk on the SPI bus, just wake up the SPI thread. + */ +static irqreturn_t if_spi_host_interrupt(int irq, void *dev_id) +{ + struct if_spi_card *card = dev_id; + + up(&card->spi_ready); + return IRQ_HANDLED; +} + +/* + * SPI callbacks + */ + +static int if_spi_calculate_fw_names(u16 card_id, + char *helper_fw, char *main_fw) +{ + int i; + for (i = 0; i < ARRAY_SIZE(chip_id_to_device_name); ++i) { + if (card_id == chip_id_to_device_name[i].chip_id) + break; + } + if (i == ARRAY_SIZE(chip_id_to_device_name)) { + lbs_pr_err("Unsupported chip_id: 0x%02x\n", card_id); + return -EAFNOSUPPORT; + } + snprintf(helper_fw, IF_SPI_FW_NAME_MAX, "libertas/gspi%d_hlp.bin", + chip_id_to_device_name[i].name); + snprintf(main_fw, IF_SPI_FW_NAME_MAX, "libertas/gspi%d.bin", + chip_id_to_device_name[i].name); + return 0; +} +MODULE_FIRMWARE("libertas/gspi8385_hlp.bin"); +MODULE_FIRMWARE("libertas/gspi8385.bin"); +MODULE_FIRMWARE("libertas/gspi8686_hlp.bin"); +MODULE_FIRMWARE("libertas/gspi8686.bin"); + +static int __devinit if_spi_probe(struct spi_device *spi) +{ + struct if_spi_card *card; + struct lbs_private *priv = NULL; + struct libertas_spi_platform_data *pdata = spi->dev.platform_data; + int err = 0; + u32 scratch; + struct sched_param param = { .sched_priority = 1 }; + + lbs_deb_enter(LBS_DEB_SPI); + + if (!pdata) { + err = -EINVAL; + goto out; + } + + if (pdata->setup) { + err = pdata->setup(spi); + if (err) + goto out; + } + + /* Allocate card structure to represent this specific device */ + card = kzalloc(sizeof(struct if_spi_card), GFP_KERNEL); + if (!card) { + err = -ENOMEM; + goto out; + } + spi_set_drvdata(spi, card); + card->pdata = pdata; + card->spi = spi; + card->prev_xfer_time = jiffies; + + sema_init(&card->spi_ready, 0); + sema_init(&card->spi_thread_terminated, 0); + + /* Initialize the SPI Interface Unit */ + err = spu_init(card, pdata->use_dummy_writes); + if (err) + goto free_card; + err = spu_get_chip_revision(card, &card->card_id, &card->card_rev); + if (err) + goto free_card; + + /* Firmware load */ + err = spu_read_u32(card, IF_SPI_SCRATCH_4_REG, &scratch); + if (err) + goto free_card; + if (scratch == SUCCESSFUL_FW_DOWNLOAD_MAGIC) + lbs_deb_spi("Firmware is already loaded for " + "Marvell WLAN 802.11 adapter\n"); + else { + err = if_spi_calculate_fw_names(card->card_id, + card->helper_fw_name, card->main_fw_name); + if (err) + goto free_card; + + lbs_deb_spi("Initializing FW for Marvell WLAN 802.11 adapter " + "(chip_id = 0x%04x, chip_rev = 0x%02x) " + "attached to SPI bus_num %d, chip_select %d. " + "spi->max_speed_hz=%d\n", + card->card_id, card->card_rev, + spi->master->bus_num, spi->chip_select, + spi->max_speed_hz); + err = if_spi_prog_helper_firmware(card); + if (err) + goto free_card; + err = if_spi_prog_main_firmware(card); + if (err) + goto free_card; + lbs_deb_spi("loaded FW for Marvell WLAN 802.11 adapter\n"); + } + + err = spu_set_interrupt_mode(card, 0, 1); + if (err) + goto free_card; + + /* Register our card with libertas. + * This will call alloc_etherdev */ + priv = lbs_add_card(card, &spi->dev); + if (!priv) { + err = -ENOMEM; + goto free_card; + } + card->priv = priv; + priv->card = card; + priv->hw_host_to_card = if_spi_host_to_card; + priv->enter_deep_sleep = NULL; + priv->exit_deep_sleep = NULL; + priv->reset_deep_sleep_wakeup = NULL; + priv->fw_ready = 1; + + /* Initialize interrupt handling stuff. */ + card->run_thread = 1; + card->spi_thread = kthread_run(lbs_spi_thread, card, "lbs_spi_thread"); + if (IS_ERR(card->spi_thread)) { + card->run_thread = 0; + err = PTR_ERR(card->spi_thread); + lbs_pr_err("error creating SPI thread: err=%d\n", err); + goto remove_card; + } + if (sched_setscheduler(card->spi_thread, SCHED_FIFO, ¶m)) + lbs_pr_err("Error setting scheduler, using default.\n"); + + err = request_irq(spi->irq, if_spi_host_interrupt, + IRQF_TRIGGER_FALLING, "libertas_spi", card); + if (err) { + lbs_pr_err("can't get host irq line-- request_irq failed\n"); + goto terminate_thread; + } + + /* poke the IRQ handler so that we don't miss the first interrupt */ + up(&card->spi_ready); + + /* Start the card. + * This will call register_netdev, and we'll start + * getting interrupts... */ + err = lbs_start_card(priv); + if (err) + goto release_irq; + + lbs_deb_spi("Finished initializing WLAN module.\n"); + + /* successful exit */ + goto out; + +release_irq: + free_irq(spi->irq, card); +terminate_thread: + if_spi_terminate_spi_thread(card); +remove_card: + lbs_remove_card(priv); /* will call free_netdev */ +free_card: + free_if_spi_card(card); +out: + lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err); + return err; +} + +static int __devexit libertas_spi_remove(struct spi_device *spi) +{ + struct if_spi_card *card = spi_get_drvdata(spi); + struct lbs_private *priv = card->priv; + + lbs_deb_spi("libertas_spi_remove\n"); + lbs_deb_enter(LBS_DEB_SPI); + + lbs_stop_card(priv); + lbs_remove_card(priv); /* will call free_netdev */ + + priv->surpriseremoved = 1; + free_irq(spi->irq, card); + if_spi_terminate_spi_thread(card); + if (card->pdata->teardown) + card->pdata->teardown(spi); + free_if_spi_card(card); + lbs_deb_leave(LBS_DEB_SPI); + return 0; +} + +static struct spi_driver libertas_spi_driver = { + .probe = if_spi_probe, + .remove = __devexit_p(libertas_spi_remove), + .driver = { + .name = "libertas_spi", + .bus = &spi_bus_type, + .owner = THIS_MODULE, + }, +}; + +/* + * Module functions + */ + +static int __init if_spi_init_module(void) +{ + int ret = 0; + lbs_deb_enter(LBS_DEB_SPI); + printk(KERN_INFO "libertas_spi: Libertas SPI driver\n"); + ret = spi_register_driver(&libertas_spi_driver); + lbs_deb_leave(LBS_DEB_SPI); + return ret; +} + +static void __exit if_spi_exit_module(void) +{ + lbs_deb_enter(LBS_DEB_SPI); + spi_unregister_driver(&libertas_spi_driver); + lbs_deb_leave(LBS_DEB_SPI); +} + +module_init(if_spi_init_module); +module_exit(if_spi_exit_module); + +MODULE_DESCRIPTION("Libertas SPI WLAN Driver"); +MODULE_AUTHOR("Andrey Yurovsky , " + "Colin McCabe "); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("spi:libertas_spi"); diff --git a/drivers/net/wireless/libertas/if_spi.h b/drivers/net/wireless/libertas/if_spi.h new file mode 100644 index 0000000..f87eec4 --- /dev/null +++ b/drivers/net/wireless/libertas/if_spi.h @@ -0,0 +1,211 @@ +/* + * linux/drivers/net/wireless/libertas/if_spi.c + * + * Driver for Marvell SPI WLAN cards. + * + * Copyright 2008 Analog Devices Inc. + * + * Authors: + * Andrey Yurovsky + * Colin McCabe + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +#ifndef _LBS_IF_SPI_H_ +#define _LBS_IF_SPI_H_ + +#define IPFIELD_ALIGN_OFFSET 2 +#define IF_SPI_CMD_BUF_SIZE 2400 + +/***************** Firmware *****************/ + +#define IF_SPI_FW_NAME_MAX 30 + +struct chip_ident { + u16 chip_id; + u16 name; +}; + +#define MAX_MAIN_FW_LOAD_CRC_ERR 10 + +/* Chunk size when loading the helper firmware */ +#define HELPER_FW_LOAD_CHUNK_SZ 64 + +/* Value to write to indicate end of helper firmware dnld */ +#define FIRMWARE_DNLD_OK 0x0000 + +/* Value to check once the main firmware is downloaded */ +#define SUCCESSFUL_FW_DOWNLOAD_MAGIC 0x88888888 + +/***************** SPI Interface Unit *****************/ +/* Masks used in SPI register read/write operations */ +#define IF_SPI_READ_OPERATION_MASK 0x0 +#define IF_SPI_WRITE_OPERATION_MASK 0x8000 + +/* SPI register offsets. 4-byte aligned. */ +#define IF_SPI_DEVICEID_CTRL_REG 0x00 /* DeviceID controller reg */ +#define IF_SPI_IO_READBASE_REG 0x04 /* Read I/O base reg */ +#define IF_SPI_IO_WRITEBASE_REG 0x08 /* Write I/O base reg */ +#define IF_SPI_IO_RDWRPORT_REG 0x0C /* Read/Write I/O port reg */ + +#define IF_SPI_CMD_READBASE_REG 0x10 /* Read command base reg */ +#define IF_SPI_CMD_WRITEBASE_REG 0x14 /* Write command base reg */ +#define IF_SPI_CMD_RDWRPORT_REG 0x18 /* Read/Write command port reg */ + +#define IF_SPI_DATA_READBASE_REG 0x1C /* Read data base reg */ +#define IF_SPI_DATA_WRITEBASE_REG 0x20 /* Write data base reg */ +#define IF_SPI_DATA_RDWRPORT_REG 0x24 /* Read/Write data port reg */ + +#define IF_SPI_SCRATCH_1_REG 0x28 /* Scratch reg 1 */ +#define IF_SPI_SCRATCH_2_REG 0x2C /* Scratch reg 2 */ +#define IF_SPI_SCRATCH_3_REG 0x30 /* Scratch reg 3 */ +#define IF_SPI_SCRATCH_4_REG 0x34 /* Scratch reg 4 */ + +#define IF_SPI_TX_FRAME_SEQ_NUM_REG 0x38 /* Tx frame sequence number reg */ +#define IF_SPI_TX_FRAME_STATUS_REG 0x3C /* Tx frame status reg */ + +#define IF_SPI_HOST_INT_CTRL_REG 0x40 /* Host interrupt controller reg */ + +#define IF_SPI_CARD_INT_CAUSE_REG 0x44 /* Card interrupt cause reg */ +#define IF_SPI_CARD_INT_STATUS_REG 0x48 /* Card interupt status reg */ +#define IF_SPI_CARD_INT_EVENT_MASK_REG 0x4C /* Card interrupt event mask */ +#define IF_SPI_CARD_INT_STATUS_MASK_REG 0x50 /* Card interrupt status mask */ + +#define IF_SPI_CARD_INT_RESET_SELECT_REG 0x54 /* Card interrupt reset select */ + +#define IF_SPI_HOST_INT_CAUSE_REG 0x58 /* Host interrupt cause reg */ +#define IF_SPI_HOST_INT_STATUS_REG 0x5C /* Host interrupt status reg */ +#define IF_SPI_HOST_INT_EVENT_MASK_REG 0x60 /* Host interrupt event mask */ +#define IF_SPI_HOST_INT_STATUS_MASK_REG 0x64 /* Host interrupt status mask */ +#define IF_SPI_HOST_INT_RESET_SELECT_REG 0x68 /* Host interrupt reset select */ + +#define IF_SPI_DELAY_READ_REG 0x6C /* Delay read reg */ +#define IF_SPI_SPU_BUS_MODE_REG 0x70 /* SPU BUS mode reg */ + +/***************** IF_SPI_DEVICEID_CTRL_REG *****************/ +#define IF_SPI_DEVICEID_CTRL_REG_TO_CARD_ID(dc) ((dc & 0xffff0000)>>16) +#define IF_SPI_DEVICEID_CTRL_REG_TO_CARD_REV(dc) (dc & 0x000000ff) + +/***************** IF_SPI_HOST_INT_CTRL_REG *****************/ +/** Host Interrupt Control bit : Wake up */ +#define IF_SPI_HICT_WAKE_UP (1<<0) +/** Host Interrupt Control bit : WLAN ready */ +#define IF_SPI_HICT_WLAN_READY (1<<1) +/*#define IF_SPI_HICT_FIFO_FIRST_HALF_EMPTY (1<<2) */ +/*#define IF_SPI_HICT_FIFO_SECOND_HALF_EMPTY (1<<3) */ +/*#define IF_SPI_HICT_IRQSRC_WLAN (1<<4) */ +/** Host Interrupt Control bit : Tx auto download */ +#define IF_SPI_HICT_TX_DOWNLOAD_OVER_AUTO (1<<5) +/** Host Interrupt Control bit : Rx auto upload */ +#define IF_SPI_HICT_RX_UPLOAD_OVER_AUTO (1<<6) +/** Host Interrupt Control bit : Command auto download */ +#define IF_SPI_HICT_CMD_DOWNLOAD_OVER_AUTO (1<<7) +/** Host Interrupt Control bit : Command auto upload */ +#define IF_SPI_HICT_CMD_UPLOAD_OVER_AUTO (1<<8) + +/***************** IF_SPI_CARD_INT_CAUSE_REG *****************/ +/** Card Interrupt Case bit : Tx download over */ +#define IF_SPI_CIC_TX_DOWNLOAD_OVER (1<<0) +/** Card Interrupt Case bit : Rx upload over */ +#define IF_SPI_CIC_RX_UPLOAD_OVER (1<<1) +/** Card Interrupt Case bit : Command download over */ +#define IF_SPI_CIC_CMD_DOWNLOAD_OVER (1<<2) +/** Card Interrupt Case bit : Host event */ +#define IF_SPI_CIC_HOST_EVENT (1<<3) +/** Card Interrupt Case bit : Command upload over */ +#define IF_SPI_CIC_CMD_UPLOAD_OVER (1<<4) +/** Card Interrupt Case bit : Power down */ +#define IF_SPI_CIC_POWER_DOWN (1<<5) + +/***************** IF_SPI_CARD_INT_STATUS_REG *****************/ +#define IF_SPI_CIS_TX_DOWNLOAD_OVER (1<<0) +#define IF_SPI_CIS_RX_UPLOAD_OVER (1<<1) +#define IF_SPI_CIS_CMD_DOWNLOAD_OVER (1<<2) +#define IF_SPI_CIS_HOST_EVENT (1<<3) +#define IF_SPI_CIS_CMD_UPLOAD_OVER (1<<4) +#define IF_SPI_CIS_POWER_DOWN (1<<5) + +/***************** IF_SPI_HOST_INT_CAUSE_REG *****************/ +#define IF_SPI_HICU_TX_DOWNLOAD_RDY (1<<0) +#define IF_SPI_HICU_RX_UPLOAD_RDY (1<<1) +#define IF_SPI_HICU_CMD_DOWNLOAD_RDY (1<<2) +#define IF_SPI_HICU_CARD_EVENT (1<<3) +#define IF_SPI_HICU_CMD_UPLOAD_RDY (1<<4) +#define IF_SPI_HICU_IO_WR_FIFO_OVERFLOW (1<<5) +#define IF_SPI_HICU_IO_RD_FIFO_UNDERFLOW (1<<6) +#define IF_SPI_HICU_DATA_WR_FIFO_OVERFLOW (1<<7) +#define IF_SPI_HICU_DATA_RD_FIFO_UNDERFLOW (1<<8) +#define IF_SPI_HICU_CMD_WR_FIFO_OVERFLOW (1<<9) +#define IF_SPI_HICU_CMD_RD_FIFO_UNDERFLOW (1<<10) + +/***************** IF_SPI_HOST_INT_STATUS_REG *****************/ +/** Host Interrupt Status bit : Tx download ready */ +#define IF_SPI_HIST_TX_DOWNLOAD_RDY (1<<0) +/** Host Interrupt Status bit : Rx upload ready */ +#define IF_SPI_HIST_RX_UPLOAD_RDY (1<<1) +/** Host Interrupt Status bit : Command download ready */ +#define IF_SPI_HIST_CMD_DOWNLOAD_RDY (1<<2) +/** Host Interrupt Status bit : Card event */ +#define IF_SPI_HIST_CARD_EVENT (1<<3) +/** Host Interrupt Status bit : Command upload ready */ +#define IF_SPI_HIST_CMD_UPLOAD_RDY (1<<4) +/** Host Interrupt Status bit : I/O write FIFO overflow */ +#define IF_SPI_HIST_IO_WR_FIFO_OVERFLOW (1<<5) +/** Host Interrupt Status bit : I/O read FIFO underflow */ +#define IF_SPI_HIST_IO_RD_FIFO_UNDRFLOW (1<<6) +/** Host Interrupt Status bit : Data write FIFO overflow */ +#define IF_SPI_HIST_DATA_WR_FIFO_OVERFLOW (1<<7) +/** Host Interrupt Status bit : Data read FIFO underflow */ +#define IF_SPI_HIST_DATA_RD_FIFO_UNDERFLOW (1<<8) +/** Host Interrupt Status bit : Command write FIFO overflow */ +#define IF_SPI_HIST_CMD_WR_FIFO_OVERFLOW (1<<9) +/** Host Interrupt Status bit : Command read FIFO underflow */ +#define IF_SPI_HIST_CMD_RD_FIFO_UNDERFLOW (1<<10) + +/***************** IF_SPI_HOST_INT_STATUS_MASK_REG *****************/ +/** Host Interrupt Status Mask bit : Tx download ready */ +#define IF_SPI_HISM_TX_DOWNLOAD_RDY (1<<0) +/** Host Interrupt Status Mask bit : Rx upload ready */ +#define IF_SPI_HISM_RX_UPLOAD_RDY (1<<1) +/** Host Interrupt Status Mask bit : Command download ready */ +#define IF_SPI_HISM_CMD_DOWNLOAD_RDY (1<<2) +/** Host Interrupt Status Mask bit : Card event */ +#define IF_SPI_HISM_CARDEVENT (1<<3) +/** Host Interrupt Status Mask bit : Command upload ready */ +#define IF_SPI_HISM_CMD_UPLOAD_RDY (1<<4) +/** Host Interrupt Status Mask bit : I/O write FIFO overflow */ +#define IF_SPI_HISM_IO_WR_FIFO_OVERFLOW (1<<5) +/** Host Interrupt Status Mask bit : I/O read FIFO underflow */ +#define IF_SPI_HISM_IO_RD_FIFO_UNDERFLOW (1<<6) +/** Host Interrupt Status Mask bit : Data write FIFO overflow */ +#define IF_SPI_HISM_DATA_WR_FIFO_OVERFLOW (1<<7) +/** Host Interrupt Status Mask bit : Data write FIFO underflow */ +#define IF_SPI_HISM_DATA_RD_FIFO_UNDERFLOW (1<<8) +/** Host Interrupt Status Mask bit : Command write FIFO overflow */ +#define IF_SPI_HISM_CMD_WR_FIFO_OVERFLOW (1<<9) +/** Host Interrupt Status Mask bit : Command write FIFO underflow */ +#define IF_SPI_HISM_CMD_RD_FIFO_UNDERFLOW (1<<10) + +/***************** IF_SPI_SPU_BUS_MODE_REG *****************/ +/* SCK edge on which the WLAN module outputs data on MISO */ +#define IF_SPI_BUS_MODE_SPI_CLOCK_PHASE_FALLING 0x8 +#define IF_SPI_BUS_MODE_SPI_CLOCK_PHASE_RISING 0x0 + +/* In a SPU read operation, there is a delay between writing the SPU + * register name and getting back data from the WLAN module. + * This can be specified in terms of nanoseconds or in terms of dummy + * clock cycles which the master must output before receiving a response. */ +#define IF_SPI_BUS_MODE_DELAY_METHOD_DUMMY_CLOCK 0x4 +#define IF_SPI_BUS_MODE_DELAY_METHOD_TIMED 0x0 + +/* Some different modes of SPI operation */ +#define IF_SPI_BUS_MODE_8_BIT_ADDRESS_16_BIT_DATA 0x00 +#define IF_SPI_BUS_MODE_8_BIT_ADDRESS_32_BIT_DATA 0x01 +#define IF_SPI_BUS_MODE_16_BIT_ADDRESS_16_BIT_DATA 0x02 +#define IF_SPI_BUS_MODE_16_BIT_ADDRESS_32_BIT_DATA 0x03 + +#endif diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c new file mode 100644 index 0000000..65e1745 --- /dev/null +++ b/drivers/net/wireless/libertas/if_usb.c @@ -0,0 +1,1117 @@ +/** + * This file contains functions used in USB interface module. + */ +#include +#include +#include +#include +#include + +#ifdef CONFIG_OLPC +#include +#endif + +#define DRV_NAME "usb8xxx" + +#include "host.h" +#include "decl.h" +#include "defs.h" +#include "dev.h" +#include "cmd.h" +#include "if_usb.h" + +#define INSANEDEBUG 0 +#define lbs_deb_usb2(...) do { if (INSANEDEBUG) lbs_deb_usbd(__VA_ARGS__); } while (0) + +#define MESSAGE_HEADER_LEN 4 + +static char *lbs_fw_name = "usb8388.bin"; +module_param_named(fw_name, lbs_fw_name, charp, 0644); + +MODULE_FIRMWARE("usb8388.bin"); + +static struct usb_device_id if_usb_table[] = { + /* Enter the device signature inside */ + { USB_DEVICE(0x1286, 0x2001) }, + { USB_DEVICE(0x05a3, 0x8388) }, + {} /* Terminating entry */ +}; + +MODULE_DEVICE_TABLE(usb, if_usb_table); + +static void if_usb_receive(struct urb *urb); +static void if_usb_receive_fwload(struct urb *urb); +static int __if_usb_prog_firmware(struct if_usb_card *cardp, + const char *fwname, int cmd); +static int if_usb_prog_firmware(struct if_usb_card *cardp, + const char *fwname, int cmd); +static int if_usb_host_to_card(struct lbs_private *priv, uint8_t type, + uint8_t *payload, uint16_t nb); +static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload, + uint16_t nb); +static void if_usb_free(struct if_usb_card *cardp); +static int if_usb_submit_rx_urb(struct if_usb_card *cardp); +static int if_usb_reset_device(struct if_usb_card *cardp); + +/* sysfs hooks */ + +/** + * Set function to write firmware to device's persistent memory + */ +static ssize_t if_usb_firmware_set(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct lbs_private *priv = to_net_dev(dev)->ml_priv; + struct if_usb_card *cardp = priv->card; + int ret; + + ret = if_usb_prog_firmware(cardp, buf, BOOT_CMD_UPDATE_FW); + if (ret == 0) + return count; + + return ret; +} + +/** + * lbs_flash_fw attribute to be exported per ethX interface through sysfs + * (/sys/class/net/ethX/lbs_flash_fw). Use this like so to write firmware to + * the device's persistent memory: + * echo usb8388-5.126.0.p5.bin > /sys/class/net/ethX/lbs_flash_fw + */ +static DEVICE_ATTR(lbs_flash_fw, 0200, NULL, if_usb_firmware_set); + +/** + * Set function to write firmware to device's persistent memory + */ +static ssize_t if_usb_boot2_set(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct lbs_private *priv = to_net_dev(dev)->ml_priv; + struct if_usb_card *cardp = priv->card; + int ret; + + ret = if_usb_prog_firmware(cardp, buf, BOOT_CMD_UPDATE_BOOT2); + if (ret == 0) + return count; + + return ret; +} + +/** + * lbs_flash_boot2 attribute to be exported per ethX interface through sysfs + * (/sys/class/net/ethX/lbs_flash_boot2). Use this like so to write firmware + * to the device's persistent memory: + * echo usb8388-5.126.0.p5.bin > /sys/class/net/ethX/lbs_flash_boot2 + */ +static DEVICE_ATTR(lbs_flash_boot2, 0200, NULL, if_usb_boot2_set); + +/** + * @brief call back function to handle the status of the URB + * @param urb pointer to urb structure + * @return N/A + */ +static void if_usb_write_bulk_callback(struct urb *urb) +{ + struct if_usb_card *cardp = (struct if_usb_card *) urb->context; + + /* handle the transmission complete validations */ + + if (urb->status == 0) { + struct lbs_private *priv = cardp->priv; + + lbs_deb_usb2(&urb->dev->dev, "URB status is successful\n"); + lbs_deb_usb2(&urb->dev->dev, "Actual length transmitted %d\n", + urb->actual_length); + + /* Boot commands such as UPDATE_FW and UPDATE_BOOT2 are not + * passed up to the lbs level. + */ + if (priv && priv->dnld_sent != DNLD_BOOTCMD_SENT) + lbs_host_to_card_done(priv); + } else { + /* print the failure status number for debug */ + lbs_pr_info("URB in failure status: %d\n", urb->status); + } + + return; +} + +/** + * @brief free tx/rx urb, skb and rx buffer + * @param cardp pointer if_usb_card + * @return N/A + */ +static void if_usb_free(struct if_usb_card *cardp) +{ + lbs_deb_enter(LBS_DEB_USB); + + /* Unlink tx & rx urb */ + usb_kill_urb(cardp->tx_urb); + usb_kill_urb(cardp->rx_urb); + + usb_free_urb(cardp->tx_urb); + cardp->tx_urb = NULL; + + usb_free_urb(cardp->rx_urb); + cardp->rx_urb = NULL; + + kfree(cardp->ep_out_buf); + cardp->ep_out_buf = NULL; + + lbs_deb_leave(LBS_DEB_USB); +} + +static void if_usb_setup_firmware(struct lbs_private *priv) +{ + struct if_usb_card *cardp = priv->card; + struct cmd_ds_set_boot2_ver b2_cmd; + struct cmd_ds_802_11_fw_wake_method wake_method; + + b2_cmd.hdr.size = cpu_to_le16(sizeof(b2_cmd)); + b2_cmd.action = 0; + b2_cmd.version = cardp->boot2_version; + + if (lbs_cmd_with_response(priv, CMD_SET_BOOT2_VER, &b2_cmd)) + lbs_deb_usb("Setting boot2 version failed\n"); + + priv->wol_gpio = 2; /* Wake via GPIO2... */ + priv->wol_gap = 20; /* ... after 20ms */ + lbs_host_sleep_cfg(priv, EHS_WAKE_ON_UNICAST_DATA, + (struct wol_config *) NULL); + + wake_method.hdr.size = cpu_to_le16(sizeof(wake_method)); + wake_method.action = cpu_to_le16(CMD_ACT_GET); + if (lbs_cmd_with_response(priv, CMD_802_11_FW_WAKE_METHOD, &wake_method)) { + lbs_pr_info("Firmware does not seem to support PS mode\n"); + priv->fwcapinfo &= ~FW_CAPINFO_PS; + } else { + if (le16_to_cpu(wake_method.method) == CMD_WAKE_METHOD_COMMAND_INT) { + lbs_deb_usb("Firmware seems to support PS with wake-via-command\n"); + } else { + /* The versions which boot up this way don't seem to + work even if we set it to the command interrupt */ + priv->fwcapinfo &= ~FW_CAPINFO_PS; + lbs_pr_info("Firmware doesn't wake via command interrupt; disabling PS mode\n"); + } + } +} + +static void if_usb_fw_timeo(unsigned long priv) +{ + struct if_usb_card *cardp = (void *)priv; + + if (cardp->fwdnldover) { + lbs_deb_usb("Download complete, no event. Assuming success\n"); + } else { + lbs_pr_err("Download timed out\n"); + cardp->surprise_removed = 1; + } + wake_up(&cardp->fw_wq); +} + +#ifdef CONFIG_OLPC +static void if_usb_reset_olpc_card(struct lbs_private *priv) +{ + printk(KERN_CRIT "Resetting OLPC wireless via EC...\n"); + olpc_ec_cmd(0x25, NULL, 0, NULL, 0); +} +#endif + +/** + * @brief sets the configuration values + * @param ifnum interface number + * @param id pointer to usb_device_id + * @return 0 on success, error code on failure + */ +static int if_usb_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct usb_device *udev; + struct usb_host_interface *iface_desc; + struct usb_endpoint_descriptor *endpoint; + struct lbs_private *priv; + struct if_usb_card *cardp; + int i; + + udev = interface_to_usbdev(intf); + + cardp = kzalloc(sizeof(struct if_usb_card), GFP_KERNEL); + if (!cardp) { + lbs_pr_err("Out of memory allocating private data.\n"); + goto error; + } + + setup_timer(&cardp->fw_timeout, if_usb_fw_timeo, (unsigned long)cardp); + init_waitqueue_head(&cardp->fw_wq); + + cardp->udev = udev; + iface_desc = intf->cur_altsetting; + + lbs_deb_usbd(&udev->dev, "bcdUSB = 0x%X bDeviceClass = 0x%X" + " bDeviceSubClass = 0x%X, bDeviceProtocol = 0x%X\n", + le16_to_cpu(udev->descriptor.bcdUSB), + udev->descriptor.bDeviceClass, + udev->descriptor.bDeviceSubClass, + udev->descriptor.bDeviceProtocol); + + for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { + endpoint = &iface_desc->endpoint[i].desc; + if (usb_endpoint_is_bulk_in(endpoint)) { + cardp->ep_in_size = le16_to_cpu(endpoint->wMaxPacketSize); + cardp->ep_in = usb_endpoint_num(endpoint); + + lbs_deb_usbd(&udev->dev, "in_endpoint = %d\n", cardp->ep_in); + lbs_deb_usbd(&udev->dev, "Bulk in size is %d\n", cardp->ep_in_size); + + } else if (usb_endpoint_is_bulk_out(endpoint)) { + cardp->ep_out_size = le16_to_cpu(endpoint->wMaxPacketSize); + cardp->ep_out = usb_endpoint_num(endpoint); + + lbs_deb_usbd(&udev->dev, "out_endpoint = %d\n", cardp->ep_out); + lbs_deb_usbd(&udev->dev, "Bulk out size is %d\n", cardp->ep_out_size); + } + } + if (!cardp->ep_out_size || !cardp->ep_in_size) { + lbs_deb_usbd(&udev->dev, "Endpoints not found\n"); + goto dealloc; + } + if (!(cardp->rx_urb = usb_alloc_urb(0, GFP_KERNEL))) { + lbs_deb_usbd(&udev->dev, "Rx URB allocation failed\n"); + goto dealloc; + } + if (!(cardp->tx_urb = usb_alloc_urb(0, GFP_KERNEL))) { + lbs_deb_usbd(&udev->dev, "Tx URB allocation failed\n"); + goto dealloc; + } + cardp->ep_out_buf = kmalloc(MRVDRV_ETH_TX_PACKET_BUFFER_SIZE, GFP_KERNEL); + if (!cardp->ep_out_buf) { + lbs_deb_usbd(&udev->dev, "Could not allocate buffer\n"); + goto dealloc; + } + + /* Upload firmware */ + if (__if_usb_prog_firmware(cardp, lbs_fw_name, BOOT_CMD_FW_BY_USB)) { + lbs_deb_usbd(&udev->dev, "FW upload failed\n"); + goto err_prog_firmware; + } + + if (!(priv = lbs_add_card(cardp, &udev->dev))) + goto err_prog_firmware; + + cardp->priv = priv; + cardp->priv->fw_ready = 1; + + priv->hw_host_to_card = if_usb_host_to_card; + priv->enter_deep_sleep = NULL; + priv->exit_deep_sleep = NULL; + priv->reset_deep_sleep_wakeup = NULL; +#ifdef CONFIG_OLPC + if (machine_is_olpc()) + priv->reset_card = if_usb_reset_olpc_card; +#endif + + cardp->boot2_version = udev->descriptor.bcdDevice; + + if_usb_submit_rx_urb(cardp); + + if (lbs_start_card(priv)) + goto err_start_card; + + if_usb_setup_firmware(priv); + + usb_get_dev(udev); + usb_set_intfdata(intf, cardp); + + if (device_create_file(&priv->dev->dev, &dev_attr_lbs_flash_fw)) + lbs_pr_err("cannot register lbs_flash_fw attribute\n"); + + if (device_create_file(&priv->dev->dev, &dev_attr_lbs_flash_boot2)) + lbs_pr_err("cannot register lbs_flash_boot2 attribute\n"); + + return 0; + +err_start_card: + lbs_remove_card(priv); +err_prog_firmware: + if_usb_reset_device(cardp); +dealloc: + if_usb_free(cardp); + +error: + return -ENOMEM; +} + +/** + * @brief free resource and cleanup + * @param intf USB interface structure + * @return N/A + */ +static void if_usb_disconnect(struct usb_interface *intf) +{ + struct if_usb_card *cardp = usb_get_intfdata(intf); + struct lbs_private *priv = (struct lbs_private *) cardp->priv; + + lbs_deb_enter(LBS_DEB_MAIN); + + device_remove_file(&priv->dev->dev, &dev_attr_lbs_flash_boot2); + device_remove_file(&priv->dev->dev, &dev_attr_lbs_flash_fw); + + cardp->surprise_removed = 1; + + if (priv) { + priv->surpriseremoved = 1; + lbs_stop_card(priv); + lbs_remove_card(priv); + } + + /* Unlink and free urb */ + if_usb_free(cardp); + + usb_set_intfdata(intf, NULL); + usb_put_dev(interface_to_usbdev(intf)); + + lbs_deb_leave(LBS_DEB_MAIN); +} + +/** + * @brief This function download FW + * @param priv pointer to struct lbs_private + * @return 0 + */ +static int if_usb_send_fw_pkt(struct if_usb_card *cardp) +{ + struct fwdata *fwdata = cardp->ep_out_buf; + const uint8_t *firmware = cardp->fw->data; + + /* If we got a CRC failure on the last block, back + up and retry it */ + if (!cardp->CRC_OK) { + cardp->totalbytes = cardp->fwlastblksent; + cardp->fwseqnum--; + } + + lbs_deb_usb2(&cardp->udev->dev, "totalbytes = %d\n", + cardp->totalbytes); + + /* struct fwdata (which we sent to the card) has an + extra __le32 field in between the header and the data, + which is not in the struct fwheader in the actual + firmware binary. Insert the seqnum in the middle... */ + memcpy(&fwdata->hdr, &firmware[cardp->totalbytes], + sizeof(struct fwheader)); + + cardp->fwlastblksent = cardp->totalbytes; + cardp->totalbytes += sizeof(struct fwheader); + + memcpy(fwdata->data, &firmware[cardp->totalbytes], + le32_to_cpu(fwdata->hdr.datalength)); + + lbs_deb_usb2(&cardp->udev->dev, "Data length = %d\n", + le32_to_cpu(fwdata->hdr.datalength)); + + fwdata->seqnum = cpu_to_le32(++cardp->fwseqnum); + cardp->totalbytes += le32_to_cpu(fwdata->hdr.datalength); + + usb_tx_block(cardp, cardp->ep_out_buf, sizeof(struct fwdata) + + le32_to_cpu(fwdata->hdr.datalength)); + + if (fwdata->hdr.dnldcmd == cpu_to_le32(FW_HAS_DATA_TO_RECV)) { + lbs_deb_usb2(&cardp->udev->dev, "There are data to follow\n"); + lbs_deb_usb2(&cardp->udev->dev, "seqnum = %d totalbytes = %d\n", + cardp->fwseqnum, cardp->totalbytes); + } else if (fwdata->hdr.dnldcmd == cpu_to_le32(FW_HAS_LAST_BLOCK)) { + lbs_deb_usb2(&cardp->udev->dev, "Host has finished FW downloading\n"); + lbs_deb_usb2(&cardp->udev->dev, "Donwloading FW JUMP BLOCK\n"); + + cardp->fwfinalblk = 1; + } + + lbs_deb_usb2(&cardp->udev->dev, "Firmware download done; size %d\n", + cardp->totalbytes); + + return 0; +} + +static int if_usb_reset_device(struct if_usb_card *cardp) +{ + struct cmd_ds_command *cmd = cardp->ep_out_buf + 4; + int ret; + + lbs_deb_enter(LBS_DEB_USB); + + *(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_REQUEST); + + cmd->command = cpu_to_le16(CMD_802_11_RESET); + cmd->size = cpu_to_le16(sizeof(struct cmd_header)); + cmd->result = cpu_to_le16(0); + cmd->seqnum = cpu_to_le16(0x5a5a); + usb_tx_block(cardp, cardp->ep_out_buf, 4 + sizeof(struct cmd_header)); + + msleep(100); + ret = usb_reset_device(cardp->udev); + msleep(100); + +#ifdef CONFIG_OLPC + if (ret && machine_is_olpc()) + if_usb_reset_olpc_card(NULL); +#endif + + lbs_deb_leave_args(LBS_DEB_USB, "ret %d", ret); + + return ret; +} + +/** + * @brief This function transfer the data to the device. + * @param priv pointer to struct lbs_private + * @param payload pointer to payload data + * @param nb data length + * @return 0 or -1 + */ +static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload, uint16_t nb) +{ + int ret = -1; + + /* check if device is removed */ + if (cardp->surprise_removed) { + lbs_deb_usbd(&cardp->udev->dev, "Device removed\n"); + goto tx_ret; + } + + usb_fill_bulk_urb(cardp->tx_urb, cardp->udev, + usb_sndbulkpipe(cardp->udev, + cardp->ep_out), + payload, nb, if_usb_write_bulk_callback, cardp); + + cardp->tx_urb->transfer_flags |= URB_ZERO_PACKET; + + if ((ret = usb_submit_urb(cardp->tx_urb, GFP_ATOMIC))) { + lbs_deb_usbd(&cardp->udev->dev, "usb_submit_urb failed: %d\n", ret); + ret = -1; + } else { + lbs_deb_usb2(&cardp->udev->dev, "usb_submit_urb success\n"); + ret = 0; + } + +tx_ret: + return ret; +} + +static int __if_usb_submit_rx_urb(struct if_usb_card *cardp, + void (*callbackfn)(struct urb *urb)) +{ + struct sk_buff *skb; + int ret = -1; + + if (!(skb = dev_alloc_skb(MRVDRV_ETH_RX_PACKET_BUFFER_SIZE))) { + lbs_pr_err("No free skb\n"); + goto rx_ret; + } + + cardp->rx_skb = skb; + + /* Fill the receive configuration URB and initialise the Rx call back */ + usb_fill_bulk_urb(cardp->rx_urb, cardp->udev, + usb_rcvbulkpipe(cardp->udev, cardp->ep_in), + skb->data + IPFIELD_ALIGN_OFFSET, + MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn, + cardp); + + cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET; + + lbs_deb_usb2(&cardp->udev->dev, "Pointer for rx_urb %p\n", cardp->rx_urb); + if ((ret = usb_submit_urb(cardp->rx_urb, GFP_ATOMIC))) { + lbs_deb_usbd(&cardp->udev->dev, "Submit Rx URB failed: %d\n", ret); + kfree_skb(skb); + cardp->rx_skb = NULL; + ret = -1; + } else { + lbs_deb_usb2(&cardp->udev->dev, "Submit Rx URB success\n"); + ret = 0; + } + +rx_ret: + return ret; +} + +static int if_usb_submit_rx_urb_fwload(struct if_usb_card *cardp) +{ + return __if_usb_submit_rx_urb(cardp, &if_usb_receive_fwload); +} + +static int if_usb_submit_rx_urb(struct if_usb_card *cardp) +{ + return __if_usb_submit_rx_urb(cardp, &if_usb_receive); +} + +static void if_usb_receive_fwload(struct urb *urb) +{ + struct if_usb_card *cardp = urb->context; + struct sk_buff *skb = cardp->rx_skb; + struct fwsyncheader *syncfwheader; + struct bootcmdresp bootcmdresp; + + if (urb->status) { + lbs_deb_usbd(&cardp->udev->dev, + "URB status is failed during fw load\n"); + kfree_skb(skb); + return; + } + + if (cardp->fwdnldover) { + __le32 *tmp = (__le32 *)(skb->data + IPFIELD_ALIGN_OFFSET); + + if (tmp[0] == cpu_to_le32(CMD_TYPE_INDICATION) && + tmp[1] == cpu_to_le32(MACREG_INT_CODE_FIRMWARE_READY)) { + lbs_pr_info("Firmware ready event received\n"); + wake_up(&cardp->fw_wq); + } else { + lbs_deb_usb("Waiting for confirmation; got %x %x\n", + le32_to_cpu(tmp[0]), le32_to_cpu(tmp[1])); + if_usb_submit_rx_urb_fwload(cardp); + } + kfree_skb(skb); + return; + } + if (cardp->bootcmdresp <= 0) { + memcpy (&bootcmdresp, skb->data + IPFIELD_ALIGN_OFFSET, + sizeof(bootcmdresp)); + + if (le16_to_cpu(cardp->udev->descriptor.bcdDevice) < 0x3106) { + kfree_skb(skb); + if_usb_submit_rx_urb_fwload(cardp); + cardp->bootcmdresp = BOOT_CMD_RESP_OK; + lbs_deb_usbd(&cardp->udev->dev, + "Received valid boot command response\n"); + return; + } + if (bootcmdresp.magic != cpu_to_le32(BOOT_CMD_MAGIC_NUMBER)) { + if (bootcmdresp.magic == cpu_to_le32(CMD_TYPE_REQUEST) || + bootcmdresp.magic == cpu_to_le32(CMD_TYPE_DATA) || + bootcmdresp.magic == cpu_to_le32(CMD_TYPE_INDICATION)) { + if (!cardp->bootcmdresp) + lbs_pr_info("Firmware already seems alive; resetting\n"); + cardp->bootcmdresp = -1; + } else { + lbs_pr_info("boot cmd response wrong magic number (0x%x)\n", + le32_to_cpu(bootcmdresp.magic)); + } + } else if ((bootcmdresp.cmd != BOOT_CMD_FW_BY_USB) && + (bootcmdresp.cmd != BOOT_CMD_UPDATE_FW) && + (bootcmdresp.cmd != BOOT_CMD_UPDATE_BOOT2)) { + lbs_pr_info("boot cmd response cmd_tag error (%d)\n", + bootcmdresp.cmd); + } else if (bootcmdresp.result != BOOT_CMD_RESP_OK) { + lbs_pr_info("boot cmd response result error (%d)\n", + bootcmdresp.result); + } else { + cardp->bootcmdresp = 1; + lbs_deb_usbd(&cardp->udev->dev, + "Received valid boot command response\n"); + } + kfree_skb(skb); + if_usb_submit_rx_urb_fwload(cardp); + return; + } + + syncfwheader = kmalloc(sizeof(struct fwsyncheader), GFP_ATOMIC); + if (!syncfwheader) { + lbs_deb_usbd(&cardp->udev->dev, "Failure to allocate syncfwheader\n"); + kfree_skb(skb); + return; + } + + memcpy(syncfwheader, skb->data + IPFIELD_ALIGN_OFFSET, + sizeof(struct fwsyncheader)); + + if (!syncfwheader->cmd) { + lbs_deb_usb2(&cardp->udev->dev, "FW received Blk with correct CRC\n"); + lbs_deb_usb2(&cardp->udev->dev, "FW received Blk seqnum = %d\n", + le32_to_cpu(syncfwheader->seqnum)); + cardp->CRC_OK = 1; + } else { + lbs_deb_usbd(&cardp->udev->dev, "FW received Blk with CRC error\n"); + cardp->CRC_OK = 0; + } + + kfree_skb(skb); + + /* Give device 5s to either write firmware to its RAM or eeprom */ + mod_timer(&cardp->fw_timeout, jiffies + (HZ*5)); + + if (cardp->fwfinalblk) { + cardp->fwdnldover = 1; + goto exit; + } + + if_usb_send_fw_pkt(cardp); + + exit: + if_usb_submit_rx_urb_fwload(cardp); + + kfree(syncfwheader); + + return; +} + +#define MRVDRV_MIN_PKT_LEN 30 + +static inline void process_cmdtypedata(int recvlength, struct sk_buff *skb, + struct if_usb_card *cardp, + struct lbs_private *priv) +{ + if (recvlength > MRVDRV_ETH_RX_PACKET_BUFFER_SIZE + MESSAGE_HEADER_LEN + || recvlength < MRVDRV_MIN_PKT_LEN) { + lbs_deb_usbd(&cardp->udev->dev, "Packet length is Invalid\n"); + kfree_skb(skb); + return; + } + + skb_reserve(skb, IPFIELD_ALIGN_OFFSET); + skb_put(skb, recvlength); + skb_pull(skb, MESSAGE_HEADER_LEN); + + lbs_process_rxed_packet(priv, skb); +} + +static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff, + struct sk_buff *skb, + struct if_usb_card *cardp, + struct lbs_private *priv) +{ + u8 i; + + if (recvlength > LBS_CMD_BUFFER_SIZE) { + lbs_deb_usbd(&cardp->udev->dev, + "The receive buffer is too large\n"); + kfree_skb(skb); + return; + } + + BUG_ON(!in_interrupt()); + + spin_lock(&priv->driver_lock); + + i = (priv->resp_idx == 0) ? 1 : 0; + BUG_ON(priv->resp_len[i]); + priv->resp_len[i] = (recvlength - MESSAGE_HEADER_LEN); + memcpy(priv->resp_buf[i], recvbuff + MESSAGE_HEADER_LEN, + priv->resp_len[i]); + kfree_skb(skb); + lbs_notify_command_response(priv, i); + + spin_unlock(&priv->driver_lock); + + lbs_deb_usbd(&cardp->udev->dev, + "Wake up main thread to handle cmd response\n"); +} + +/** + * @brief This function reads of the packet into the upload buff, + * wake up the main thread and initialise the Rx callack. + * + * @param urb pointer to struct urb + * @return N/A + */ +static void if_usb_receive(struct urb *urb) +{ + struct if_usb_card *cardp = urb->context; + struct sk_buff *skb = cardp->rx_skb; + struct lbs_private *priv = cardp->priv; + int recvlength = urb->actual_length; + uint8_t *recvbuff = NULL; + uint32_t recvtype = 0; + __le32 *pkt = (__le32 *)(skb->data + IPFIELD_ALIGN_OFFSET); + uint32_t event; + + lbs_deb_enter(LBS_DEB_USB); + + if (recvlength) { + if (urb->status) { + lbs_deb_usbd(&cardp->udev->dev, "RX URB failed: %d\n", + urb->status); + kfree_skb(skb); + goto setup_for_next; + } + + recvbuff = skb->data + IPFIELD_ALIGN_OFFSET; + recvtype = le32_to_cpu(pkt[0]); + lbs_deb_usbd(&cardp->udev->dev, + "Recv length = 0x%x, Recv type = 0x%X\n", + recvlength, recvtype); + } else if (urb->status) { + kfree_skb(skb); + goto rx_exit; + } + + switch (recvtype) { + case CMD_TYPE_DATA: + process_cmdtypedata(recvlength, skb, cardp, priv); + break; + + case CMD_TYPE_REQUEST: + process_cmdrequest(recvlength, recvbuff, skb, cardp, priv); + break; + + case CMD_TYPE_INDICATION: + /* Event handling */ + event = le32_to_cpu(pkt[1]); + lbs_deb_usbd(&cardp->udev->dev, "**EVENT** 0x%X\n", event); + kfree_skb(skb); + + /* Icky undocumented magic special case */ + if (event & 0xffff0000) { + u32 trycount = (event & 0xffff0000) >> 16; + + lbs_send_tx_feedback(priv, trycount); + } else + lbs_queue_event(priv, event & 0xFF); + break; + + default: + lbs_deb_usbd(&cardp->udev->dev, "Unknown command type 0x%X\n", + recvtype); + kfree_skb(skb); + break; + } + +setup_for_next: + if_usb_submit_rx_urb(cardp); +rx_exit: + lbs_deb_leave(LBS_DEB_USB); +} + +/** + * @brief This function downloads data to FW + * @param priv pointer to struct lbs_private structure + * @param type type of data + * @param buf pointer to data buffer + * @param len number of bytes + * @return 0 or -1 + */ +static int if_usb_host_to_card(struct lbs_private *priv, uint8_t type, + uint8_t *payload, uint16_t nb) +{ + struct if_usb_card *cardp = priv->card; + + lbs_deb_usbd(&cardp->udev->dev,"*** type = %u\n", type); + lbs_deb_usbd(&cardp->udev->dev,"size after = %d\n", nb); + + if (type == MVMS_CMD) { + *(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_REQUEST); + priv->dnld_sent = DNLD_CMD_SENT; + } else { + *(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_DATA); + priv->dnld_sent = DNLD_DATA_SENT; + } + + memcpy((cardp->ep_out_buf + MESSAGE_HEADER_LEN), payload, nb); + + return usb_tx_block(cardp, cardp->ep_out_buf, nb + MESSAGE_HEADER_LEN); +} + +/** + * @brief This function issues Boot command to the Boot2 code + * @param ivalue 1:Boot from FW by USB-Download + * 2:Boot from FW in EEPROM + * @return 0 + */ +static int if_usb_issue_boot_command(struct if_usb_card *cardp, int ivalue) +{ + struct bootcmd *bootcmd = cardp->ep_out_buf; + + /* Prepare command */ + bootcmd->magic = cpu_to_le32(BOOT_CMD_MAGIC_NUMBER); + bootcmd->cmd = ivalue; + memset(bootcmd->pad, 0, sizeof(bootcmd->pad)); + + /* Issue command */ + usb_tx_block(cardp, cardp->ep_out_buf, sizeof(*bootcmd)); + + return 0; +} + + +/** + * @brief This function checks the validity of Boot2/FW image. + * + * @param data pointer to image + * len image length + * @return 0 or -1 + */ +static int check_fwfile_format(const uint8_t *data, uint32_t totlen) +{ + uint32_t bincmd, exit; + uint32_t blksize, offset, len; + int ret; + + ret = 1; + exit = len = 0; + + do { + struct fwheader *fwh = (void *)data; + + bincmd = le32_to_cpu(fwh->dnldcmd); + blksize = le32_to_cpu(fwh->datalength); + switch (bincmd) { + case FW_HAS_DATA_TO_RECV: + offset = sizeof(struct fwheader) + blksize; + data += offset; + len += offset; + if (len >= totlen) + exit = 1; + break; + case FW_HAS_LAST_BLOCK: + exit = 1; + ret = 0; + break; + default: + exit = 1; + break; + } + } while (!exit); + + if (ret) + lbs_pr_err("firmware file format check FAIL\n"); + else + lbs_deb_fw("firmware file format check PASS\n"); + + return ret; +} + + +/** +* @brief This function programs the firmware subject to cmd +* +* @param cardp the if_usb_card descriptor +* fwname firmware or boot2 image file name +* cmd either BOOT_CMD_FW_BY_USB, BOOT_CMD_UPDATE_FW, +* or BOOT_CMD_UPDATE_BOOT2. +* @return 0 or error code +*/ +static int if_usb_prog_firmware(struct if_usb_card *cardp, + const char *fwname, int cmd) +{ + struct lbs_private *priv = cardp->priv; + unsigned long flags, caps; + int ret; + + caps = priv->fwcapinfo; + if (((cmd == BOOT_CMD_UPDATE_FW) && !(caps & FW_CAPINFO_FIRMWARE_UPGRADE)) || + ((cmd == BOOT_CMD_UPDATE_BOOT2) && !(caps & FW_CAPINFO_BOOT2_UPGRADE))) + return -EOPNOTSUPP; + + /* Ensure main thread is idle. */ + spin_lock_irqsave(&priv->driver_lock, flags); + while (priv->cur_cmd != NULL || priv->dnld_sent != DNLD_RES_RECEIVED) { + spin_unlock_irqrestore(&priv->driver_lock, flags); + if (wait_event_interruptible(priv->waitq, + (priv->cur_cmd == NULL && + priv->dnld_sent == DNLD_RES_RECEIVED))) { + return -ERESTARTSYS; + } + spin_lock_irqsave(&priv->driver_lock, flags); + } + priv->dnld_sent = DNLD_BOOTCMD_SENT; + spin_unlock_irqrestore(&priv->driver_lock, flags); + + ret = __if_usb_prog_firmware(cardp, fwname, cmd); + + spin_lock_irqsave(&priv->driver_lock, flags); + priv->dnld_sent = DNLD_RES_RECEIVED; + spin_unlock_irqrestore(&priv->driver_lock, flags); + + wake_up_interruptible(&priv->waitq); + + return ret; +} + +static int __if_usb_prog_firmware(struct if_usb_card *cardp, + const char *fwname, int cmd) +{ + int i = 0; + static int reset_count = 10; + int ret = 0; + + lbs_deb_enter(LBS_DEB_USB); + + ret = request_firmware(&cardp->fw, fwname, &cardp->udev->dev); + if (ret < 0) { + lbs_pr_err("request_firmware() failed with %#x\n", ret); + lbs_pr_err("firmware %s not found\n", fwname); + goto done; + } + + if (check_fwfile_format(cardp->fw->data, cardp->fw->size)) { + ret = -EINVAL; + goto release_fw; + } + + /* Cancel any pending usb business */ + usb_kill_urb(cardp->rx_urb); + usb_kill_urb(cardp->tx_urb); + + cardp->fwlastblksent = 0; + cardp->fwdnldover = 0; + cardp->totalbytes = 0; + cardp->fwfinalblk = 0; + cardp->bootcmdresp = 0; + +restart: + if (if_usb_submit_rx_urb_fwload(cardp) < 0) { + lbs_deb_usbd(&cardp->udev->dev, "URB submission is failed\n"); + ret = -EIO; + goto release_fw; + } + + cardp->bootcmdresp = 0; + do { + int j = 0; + i++; + if_usb_issue_boot_command(cardp, cmd); + /* wait for command response */ + do { + j++; + msleep_interruptible(100); + } while (cardp->bootcmdresp == 0 && j < 10); + } while (cardp->bootcmdresp == 0 && i < 5); + + if (cardp->bootcmdresp == BOOT_CMD_RESP_NOT_SUPPORTED) { + /* Return to normal operation */ + ret = -EOPNOTSUPP; + usb_kill_urb(cardp->rx_urb); + usb_kill_urb(cardp->tx_urb); + if (if_usb_submit_rx_urb(cardp) < 0) + ret = -EIO; + goto release_fw; + } else if (cardp->bootcmdresp <= 0) { + if (--reset_count >= 0) { + if_usb_reset_device(cardp); + goto restart; + } + ret = -EIO; + goto release_fw; + } + + i = 0; + + cardp->totalbytes = 0; + cardp->fwlastblksent = 0; + cardp->CRC_OK = 1; + cardp->fwdnldover = 0; + cardp->fwseqnum = -1; + cardp->totalbytes = 0; + cardp->fwfinalblk = 0; + + /* Send the first firmware packet... */ + if_usb_send_fw_pkt(cardp); + + /* ... and wait for the process to complete */ + wait_event_interruptible(cardp->fw_wq, cardp->surprise_removed || cardp->fwdnldover); + + del_timer_sync(&cardp->fw_timeout); + usb_kill_urb(cardp->rx_urb); + + if (!cardp->fwdnldover) { + lbs_pr_info("failed to load fw, resetting device!\n"); + if (--reset_count >= 0) { + if_usb_reset_device(cardp); + goto restart; + } + + lbs_pr_info("FW download failure, time = %d ms\n", i * 100); + ret = -EIO; + goto release_fw; + } + + release_fw: + release_firmware(cardp->fw); + cardp->fw = NULL; + + done: + lbs_deb_leave_args(LBS_DEB_USB, "ret %d", ret); + return ret; +} + + +#ifdef CONFIG_PM +static int if_usb_suspend(struct usb_interface *intf, pm_message_t message) +{ + struct if_usb_card *cardp = usb_get_intfdata(intf); + struct lbs_private *priv = cardp->priv; + int ret; + + lbs_deb_enter(LBS_DEB_USB); + + if (priv->psstate != PS_STATE_FULL_POWER) + return -1; + + ret = lbs_suspend(priv); + if (ret) + goto out; + + /* Unlink tx & rx urb */ + usb_kill_urb(cardp->tx_urb); + usb_kill_urb(cardp->rx_urb); + + out: + lbs_deb_leave(LBS_DEB_USB); + return ret; +} + +static int if_usb_resume(struct usb_interface *intf) +{ + struct if_usb_card *cardp = usb_get_intfdata(intf); + struct lbs_private *priv = cardp->priv; + + lbs_deb_enter(LBS_DEB_USB); + + if_usb_submit_rx_urb(cardp); + + lbs_resume(priv); + + lbs_deb_leave(LBS_DEB_USB); + return 0; +} +#else +#define if_usb_suspend NULL +#define if_usb_resume NULL +#endif + +static struct usb_driver if_usb_driver = { + .name = DRV_NAME, + .probe = if_usb_probe, + .disconnect = if_usb_disconnect, + .id_table = if_usb_table, + .suspend = if_usb_suspend, + .resume = if_usb_resume, + .reset_resume = if_usb_resume, +}; + +static int __init if_usb_init_module(void) +{ + int ret = 0; + + lbs_deb_enter(LBS_DEB_MAIN); + + ret = usb_register(&if_usb_driver); + + lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret); + return ret; +} + +static void __exit if_usb_exit_module(void) +{ + lbs_deb_enter(LBS_DEB_MAIN); + + usb_deregister(&if_usb_driver); + + lbs_deb_leave(LBS_DEB_MAIN); +} + +module_init(if_usb_init_module); +module_exit(if_usb_exit_module); + +MODULE_DESCRIPTION("8388 USB WLAN Driver"); +MODULE_AUTHOR("Marvell International Ltd. and Red Hat, Inc."); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/libertas/if_usb.h b/drivers/net/wireless/libertas/if_usb.h new file mode 100644 index 0000000..5ba0aee --- /dev/null +++ b/drivers/net/wireless/libertas/if_usb.h @@ -0,0 +1,105 @@ +#ifndef _LBS_IF_USB_H +#define _LBS_IF_USB_H + +#include +#include + +struct lbs_private; + +/** + * This file contains definition for USB interface. + */ +#define CMD_TYPE_REQUEST 0xF00DFACE +#define CMD_TYPE_DATA 0xBEADC0DE +#define CMD_TYPE_INDICATION 0xBEEFFACE + +#define IPFIELD_ALIGN_OFFSET 2 + +#define BOOT_CMD_FW_BY_USB 0x01 +#define BOOT_CMD_FW_IN_EEPROM 0x02 +#define BOOT_CMD_UPDATE_BOOT2 0x03 +#define BOOT_CMD_UPDATE_FW 0x04 +#define BOOT_CMD_MAGIC_NUMBER 0x4C56524D /* LVRM */ + +struct bootcmd +{ + __le32 magic; + uint8_t cmd; + uint8_t pad[11]; +}; + +#define BOOT_CMD_RESP_OK 0x0001 +#define BOOT_CMD_RESP_FAIL 0x0000 +#define BOOT_CMD_RESP_NOT_SUPPORTED 0x0002 + +struct bootcmdresp +{ + __le32 magic; + uint8_t cmd; + uint8_t result; + uint8_t pad[2]; +}; + +/** USB card description structure*/ +struct if_usb_card { + struct usb_device *udev; + struct urb *rx_urb, *tx_urb; + struct lbs_private *priv; + + struct sk_buff *rx_skb; + + uint8_t ep_in; + uint8_t ep_out; + + /* bootcmdresp == 0 means command is pending + * bootcmdresp < 0 means error + * bootcmdresp > 0 is a BOOT_CMD_RESP_* from firmware + */ + int8_t bootcmdresp; + + int ep_in_size; + + void *ep_out_buf; + int ep_out_size; + + const struct firmware *fw; + struct timer_list fw_timeout; + wait_queue_head_t fw_wq; + uint32_t fwseqnum; + uint32_t totalbytes; + uint32_t fwlastblksent; + uint8_t CRC_OK; + uint8_t fwdnldover; + uint8_t fwfinalblk; + uint8_t surprise_removed; + + __le16 boot2_version; +}; + +/** fwheader */ +struct fwheader { + __le32 dnldcmd; + __le32 baseaddr; + __le32 datalength; + __le32 CRC; +}; + +#define FW_MAX_DATA_BLK_SIZE 600 +/** FWData */ +struct fwdata { + struct fwheader hdr; + __le32 seqnum; + uint8_t data[0]; +}; + +/** fwsyncheader */ +struct fwsyncheader { + __le32 cmd; + __le32 seqnum; +}; + +#define FW_HAS_DATA_TO_RECV 0x00000001 +#define FW_HAS_LAST_BLOCK 0x00000004 + + +#endif diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c new file mode 100644 index 0000000..dc47290 --- /dev/null +++ b/drivers/net/wireless/libertas/main.c @@ -0,0 +1,1325 @@ +/** + * This file contains the major functions in WLAN + * driver. It includes init, exit, open, close and main + * thread etc.. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "host.h" +#include "decl.h" +#include "dev.h" +#include "wext.h" +#include "cfg.h" +#include "debugfs.h" +#include "scan.h" +#include "assoc.h" +#include "cmd.h" + +#define DRIVER_RELEASE_VERSION "323.p0" +const char lbs_driver_version[] = "COMM-USB8388-" DRIVER_RELEASE_VERSION +#ifdef DEBUG + "-dbg" +#endif + ""; + + +/* Module parameters */ +unsigned int lbs_debug; +EXPORT_SYMBOL_GPL(lbs_debug); +module_param_named(libertas_debug, lbs_debug, int, 0644); + + +/* This global structure is used to send the confirm_sleep command as + * fast as possible down to the firmware. */ +struct cmd_confirm_sleep confirm_sleep; + + +/** + * the table to keep region code + */ +u16 lbs_region_code_to_index[MRVDRV_MAX_REGION_CODE] = + { 0x10, 0x20, 0x30, 0x31, 0x32, 0x40 }; + +/** + * FW rate table. FW refers to rates by their index in this table, not by the + * rate value itself. Values of 0x00 are + * reserved positions. + */ +static u8 fw_data_rates[MAX_RATES] = + { 0x02, 0x04, 0x0B, 0x16, 0x00, 0x0C, 0x12, + 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C, 0x00 +}; + +/** + * @brief use index to get the data rate + * + * @param idx The index of data rate + * @return data rate or 0 + */ +u32 lbs_fw_index_to_data_rate(u8 idx) +{ + if (idx >= sizeof(fw_data_rates)) + idx = 0; + return fw_data_rates[idx]; +} + +/** + * @brief use rate to get the index + * + * @param rate data rate + * @return index or 0 + */ +u8 lbs_data_rate_to_fw_index(u32 rate) +{ + u8 i; + + if (!rate) + return 0; + + for (i = 0; i < sizeof(fw_data_rates); i++) { + if (rate == fw_data_rates[i]) + return i; + } + return 0; +} + + +static int lbs_add_rtap(struct lbs_private *priv); +static void lbs_remove_rtap(struct lbs_private *priv); + + +/** + * Get function for sysfs attribute rtap + */ +static ssize_t lbs_rtap_get(struct device *dev, + struct device_attribute *attr, char * buf) +{ + struct lbs_private *priv = to_net_dev(dev)->ml_priv; + return snprintf(buf, 5, "0x%X\n", priv->monitormode); +} + +/** + * Set function for sysfs attribute rtap + */ +static ssize_t lbs_rtap_set(struct device *dev, + struct device_attribute *attr, const char * buf, size_t count) +{ + int monitor_mode; + struct lbs_private *priv = to_net_dev(dev)->ml_priv; + + sscanf(buf, "%x", &monitor_mode); + if (monitor_mode) { + if (priv->monitormode == monitor_mode) + return strlen(buf); + if (!priv->monitormode) { + if (priv->infra_open || lbs_mesh_open(priv)) + return -EBUSY; + if (priv->mode == IW_MODE_INFRA) + lbs_cmd_80211_deauthenticate(priv, + priv->curbssparams.bssid, + WLAN_REASON_DEAUTH_LEAVING); + else if (priv->mode == IW_MODE_ADHOC) + lbs_adhoc_stop(priv); + lbs_add_rtap(priv); + } + priv->monitormode = monitor_mode; + } else { + if (!priv->monitormode) + return strlen(buf); + priv->monitormode = 0; + lbs_remove_rtap(priv); + + if (priv->currenttxskb) { + dev_kfree_skb_any(priv->currenttxskb); + priv->currenttxskb = NULL; + } + + /* Wake queues, command thread, etc. */ + lbs_host_to_card_done(priv); + } + + lbs_prepare_and_send_command(priv, + CMD_802_11_MONITOR_MODE, CMD_ACT_SET, + CMD_OPTION_WAITFORRSP, 0, &priv->monitormode); + return strlen(buf); +} + +/** + * lbs_rtap attribute to be exported per ethX interface + * through sysfs (/sys/class/net/ethX/lbs_rtap) + */ +static DEVICE_ATTR(lbs_rtap, 0644, lbs_rtap_get, lbs_rtap_set ); + +/** + * @brief This function opens the ethX interface + * + * @param dev A pointer to net_device structure + * @return 0 or -EBUSY if monitor mode active + */ +static int lbs_dev_open(struct net_device *dev) +{ + struct lbs_private *priv = dev->ml_priv; + int ret = 0; + + lbs_deb_enter(LBS_DEB_NET); + + spin_lock_irq(&priv->driver_lock); + + if (priv->monitormode) { + ret = -EBUSY; + goto out; + } + + priv->infra_open = 1; + + if (priv->connect_status == LBS_CONNECTED) + netif_carrier_on(dev); + else + netif_carrier_off(dev); + + if (!priv->tx_pending_len) + netif_wake_queue(dev); + out: + + spin_unlock_irq(&priv->driver_lock); + lbs_deb_leave_args(LBS_DEB_NET, "ret %d", ret); + return ret; +} + +/** + * @brief This function closes the ethX interface + * + * @param dev A pointer to net_device structure + * @return 0 + */ +static int lbs_eth_stop(struct net_device *dev) +{ + struct lbs_private *priv = dev->ml_priv; + + lbs_deb_enter(LBS_DEB_NET); + + spin_lock_irq(&priv->driver_lock); + priv->infra_open = 0; + netif_stop_queue(dev); + spin_unlock_irq(&priv->driver_lock); + + schedule_work(&priv->mcast_work); + + lbs_deb_leave(LBS_DEB_NET); + return 0; +} + +static void lbs_tx_timeout(struct net_device *dev) +{ + struct lbs_private *priv = dev->ml_priv; + + lbs_deb_enter(LBS_DEB_TX); + + lbs_pr_err("tx watch dog timeout\n"); + + dev->trans_start = jiffies; + + if (priv->currenttxskb) + lbs_send_tx_feedback(priv, 0); + + /* XX: Shouldn't we also call into the hw-specific driver + to kick it somehow? */ + lbs_host_to_card_done(priv); + + /* More often than not, this actually happens because the + firmware has crapped itself -- rather than just a very + busy medium. So send a harmless command, and if/when + _that_ times out, we'll kick it in the head. */ + lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0, + 0, 0, NULL); + + lbs_deb_leave(LBS_DEB_TX); +} + +void lbs_host_to_card_done(struct lbs_private *priv) +{ + unsigned long flags; + + lbs_deb_enter(LBS_DEB_THREAD); + + spin_lock_irqsave(&priv->driver_lock, flags); + + priv->dnld_sent = DNLD_RES_RECEIVED; + + /* Wake main thread if commands are pending */ + if (!priv->cur_cmd || priv->tx_pending_len > 0) { + if (!priv->wakeup_dev_required) + wake_up_interruptible(&priv->waitq); + } + + spin_unlock_irqrestore(&priv->driver_lock, flags); + lbs_deb_leave(LBS_DEB_THREAD); +} +EXPORT_SYMBOL_GPL(lbs_host_to_card_done); + +int lbs_set_mac_address(struct net_device *dev, void *addr) +{ + int ret = 0; + struct lbs_private *priv = dev->ml_priv; + struct sockaddr *phwaddr = addr; + struct cmd_ds_802_11_mac_address cmd; + + lbs_deb_enter(LBS_DEB_NET); + + /* In case it was called from the mesh device */ + dev = priv->dev; + + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + cmd.action = cpu_to_le16(CMD_ACT_SET); + memcpy(cmd.macadd, phwaddr->sa_data, ETH_ALEN); + + ret = lbs_cmd_with_response(priv, CMD_802_11_MAC_ADDRESS, &cmd); + if (ret) { + lbs_deb_net("set MAC address failed\n"); + goto done; + } + + memcpy(priv->current_addr, phwaddr->sa_data, ETH_ALEN); + memcpy(dev->dev_addr, phwaddr->sa_data, ETH_ALEN); + if (priv->mesh_dev) + memcpy(priv->mesh_dev->dev_addr, phwaddr->sa_data, ETH_ALEN); + +done: + lbs_deb_leave_args(LBS_DEB_NET, "ret %d", ret); + return ret; +} + + +static inline int mac_in_list(unsigned char *list, int list_len, + unsigned char *mac) +{ + while (list_len) { + if (!memcmp(list, mac, ETH_ALEN)) + return 1; + list += ETH_ALEN; + list_len--; + } + return 0; +} + + +static int lbs_add_mcast_addrs(struct cmd_ds_mac_multicast_adr *cmd, + struct net_device *dev, int nr_addrs) +{ + int i = nr_addrs; + struct dev_mc_list *mc_list; + int cnt; + + if ((dev->flags & (IFF_UP|IFF_MULTICAST)) != (IFF_UP|IFF_MULTICAST)) + return nr_addrs; + + netif_addr_lock_bh(dev); + cnt = netdev_mc_count(dev); + netdev_for_each_mc_addr(mc_list, dev) { + if (mac_in_list(cmd->maclist, nr_addrs, mc_list->dmi_addr)) { + lbs_deb_net("mcast address %s:%pM skipped\n", dev->name, + mc_list->dmi_addr); + cnt--; + continue; + } + + if (i == MRVDRV_MAX_MULTICAST_LIST_SIZE) + break; + memcpy(&cmd->maclist[6*i], mc_list->dmi_addr, ETH_ALEN); + lbs_deb_net("mcast address %s:%pM added to filter\n", dev->name, + mc_list->dmi_addr); + i++; + cnt--; + } + netif_addr_unlock_bh(dev); + if (cnt) + return -EOVERFLOW; + + return i; +} + +static void lbs_set_mcast_worker(struct work_struct *work) +{ + struct lbs_private *priv = container_of(work, struct lbs_private, mcast_work); + struct cmd_ds_mac_multicast_adr mcast_cmd; + int dev_flags; + int nr_addrs; + int old_mac_control = priv->mac_control; + + lbs_deb_enter(LBS_DEB_NET); + + dev_flags = priv->dev->flags; + if (priv->mesh_dev) + dev_flags |= priv->mesh_dev->flags; + + if (dev_flags & IFF_PROMISC) { + priv->mac_control |= CMD_ACT_MAC_PROMISCUOUS_ENABLE; + priv->mac_control &= ~(CMD_ACT_MAC_ALL_MULTICAST_ENABLE | + CMD_ACT_MAC_MULTICAST_ENABLE); + goto out_set_mac_control; + } else if (dev_flags & IFF_ALLMULTI) { + do_allmulti: + priv->mac_control |= CMD_ACT_MAC_ALL_MULTICAST_ENABLE; + priv->mac_control &= ~(CMD_ACT_MAC_PROMISCUOUS_ENABLE | + CMD_ACT_MAC_MULTICAST_ENABLE); + goto out_set_mac_control; + } + + /* Once for priv->dev, again for priv->mesh_dev if it exists */ + nr_addrs = lbs_add_mcast_addrs(&mcast_cmd, priv->dev, 0); + if (nr_addrs >= 0 && priv->mesh_dev) + nr_addrs = lbs_add_mcast_addrs(&mcast_cmd, priv->mesh_dev, nr_addrs); + if (nr_addrs < 0) + goto do_allmulti; + + if (nr_addrs) { + int size = offsetof(struct cmd_ds_mac_multicast_adr, + maclist[6*nr_addrs]); + + mcast_cmd.action = cpu_to_le16(CMD_ACT_SET); + mcast_cmd.hdr.size = cpu_to_le16(size); + mcast_cmd.nr_of_adrs = cpu_to_le16(nr_addrs); + + lbs_cmd_async(priv, CMD_MAC_MULTICAST_ADR, &mcast_cmd.hdr, size); + + priv->mac_control |= CMD_ACT_MAC_MULTICAST_ENABLE; + } else + priv->mac_control &= ~CMD_ACT_MAC_MULTICAST_ENABLE; + + priv->mac_control &= ~(CMD_ACT_MAC_PROMISCUOUS_ENABLE | + CMD_ACT_MAC_ALL_MULTICAST_ENABLE); + out_set_mac_control: + if (priv->mac_control != old_mac_control) + lbs_set_mac_control(priv); + + lbs_deb_leave(LBS_DEB_NET); +} + +void lbs_set_multicast_list(struct net_device *dev) +{ + struct lbs_private *priv = dev->ml_priv; + + schedule_work(&priv->mcast_work); +} + +/** + * @brief This function handles the major jobs in the LBS driver. + * It handles all events generated by firmware, RX data received + * from firmware and TX data sent from kernel. + * + * @param data A pointer to lbs_thread structure + * @return 0 + */ +static int lbs_thread(void *data) +{ + struct net_device *dev = data; + struct lbs_private *priv = dev->ml_priv; + wait_queue_t wait; + + lbs_deb_enter(LBS_DEB_THREAD); + + init_waitqueue_entry(&wait, current); + + for (;;) { + int shouldsleep; + u8 resp_idx; + + lbs_deb_thread("1: currenttxskb %p, dnld_sent %d\n", + priv->currenttxskb, priv->dnld_sent); + + add_wait_queue(&priv->waitq, &wait); + set_current_state(TASK_INTERRUPTIBLE); + spin_lock_irq(&priv->driver_lock); + + if (kthread_should_stop()) + shouldsleep = 0; /* Bye */ + else if (priv->surpriseremoved) + shouldsleep = 1; /* We need to wait until we're _told_ to die */ + else if (priv->psstate == PS_STATE_SLEEP) + shouldsleep = 1; /* Sleep mode. Nothing we can do till it wakes */ + else if (priv->cmd_timed_out) + shouldsleep = 0; /* Command timed out. Recover */ + else if (!priv->fw_ready) + shouldsleep = 1; /* Firmware not ready. We're waiting for it */ + else if (priv->dnld_sent) + shouldsleep = 1; /* Something is en route to the device already */ + else if (priv->tx_pending_len > 0) + shouldsleep = 0; /* We've a packet to send */ + else if (priv->resp_len[priv->resp_idx]) + shouldsleep = 0; /* We have a command response */ + else if (priv->cur_cmd) + shouldsleep = 1; /* Can't send a command; one already running */ + else if (!list_empty(&priv->cmdpendingq) && + !(priv->wakeup_dev_required)) + shouldsleep = 0; /* We have a command to send */ + else if (kfifo_len(&priv->event_fifo)) + shouldsleep = 0; /* We have an event to process */ + else + shouldsleep = 1; /* No command */ + + if (shouldsleep) { + lbs_deb_thread("sleeping, connect_status %d, " + "psmode %d, psstate %d\n", + priv->connect_status, + priv->psmode, priv->psstate); + spin_unlock_irq(&priv->driver_lock); + schedule(); + } else + spin_unlock_irq(&priv->driver_lock); + + lbs_deb_thread("2: currenttxskb %p, dnld_send %d\n", + priv->currenttxskb, priv->dnld_sent); + + set_current_state(TASK_RUNNING); + remove_wait_queue(&priv->waitq, &wait); + + lbs_deb_thread("3: currenttxskb %p, dnld_sent %d\n", + priv->currenttxskb, priv->dnld_sent); + + if (kthread_should_stop()) { + lbs_deb_thread("break from main thread\n"); + break; + } + + if (priv->surpriseremoved) { + lbs_deb_thread("adapter removed; waiting to die...\n"); + continue; + } + + lbs_deb_thread("4: currenttxskb %p, dnld_sent %d\n", + priv->currenttxskb, priv->dnld_sent); + + /* Process any pending command response */ + spin_lock_irq(&priv->driver_lock); + resp_idx = priv->resp_idx; + if (priv->resp_len[resp_idx]) { + spin_unlock_irq(&priv->driver_lock); + lbs_process_command_response(priv, + priv->resp_buf[resp_idx], + priv->resp_len[resp_idx]); + spin_lock_irq(&priv->driver_lock); + priv->resp_len[resp_idx] = 0; + } + spin_unlock_irq(&priv->driver_lock); + + /* Process hardware events, e.g. card removed, link lost */ + spin_lock_irq(&priv->driver_lock); + while (kfifo_len(&priv->event_fifo)) { + u32 event; + + if (kfifo_out(&priv->event_fifo, + (unsigned char *) &event, sizeof(event)) != + sizeof(event)) + break; + spin_unlock_irq(&priv->driver_lock); + lbs_process_event(priv, event); + spin_lock_irq(&priv->driver_lock); + } + spin_unlock_irq(&priv->driver_lock); + + if (priv->wakeup_dev_required) { + lbs_deb_thread("Waking up device...\n"); + /* Wake up device */ + if (priv->exit_deep_sleep(priv)) + lbs_deb_thread("Wakeup device failed\n"); + continue; + } + + /* command timeout stuff */ + if (priv->cmd_timed_out && priv->cur_cmd) { + struct cmd_ctrl_node *cmdnode = priv->cur_cmd; + + lbs_pr_info("Timeout submitting command 0x%04x\n", + le16_to_cpu(cmdnode->cmdbuf->command)); + lbs_complete_command(priv, cmdnode, -ETIMEDOUT); + if (priv->reset_card) + priv->reset_card(priv); + } + priv->cmd_timed_out = 0; + + if (!priv->fw_ready) + continue; + + /* Check if we need to confirm Sleep Request received previously */ + if (priv->psstate == PS_STATE_PRE_SLEEP && + !priv->dnld_sent && !priv->cur_cmd) { + if (priv->connect_status == LBS_CONNECTED) { + lbs_deb_thread("pre-sleep, currenttxskb %p, " + "dnld_sent %d, cur_cmd %p\n", + priv->currenttxskb, priv->dnld_sent, + priv->cur_cmd); + + lbs_ps_confirm_sleep(priv); + } else { + /* workaround for firmware sending + * deauth/linkloss event immediately + * after sleep request; remove this + * after firmware fixes it + */ + priv->psstate = PS_STATE_AWAKE; + lbs_pr_alert("ignore PS_SleepConfirm in " + "non-connected state\n"); + } + } + + /* The PS state is changed during processing of Sleep Request + * event above + */ + if ((priv->psstate == PS_STATE_SLEEP) || + (priv->psstate == PS_STATE_PRE_SLEEP)) + continue; + + if (priv->is_deep_sleep) + continue; + + /* Execute the next command */ + if (!priv->dnld_sent && !priv->cur_cmd) + lbs_execute_next_command(priv); + + /* Wake-up command waiters which can't sleep in + * lbs_prepare_and_send_command + */ + if (!list_empty(&priv->cmdpendingq)) + wake_up_all(&priv->cmd_pending); + + spin_lock_irq(&priv->driver_lock); + if (!priv->dnld_sent && priv->tx_pending_len > 0) { + int ret = priv->hw_host_to_card(priv, MVMS_DAT, + priv->tx_pending_buf, + priv->tx_pending_len); + if (ret) { + lbs_deb_tx("host_to_card failed %d\n", ret); + priv->dnld_sent = DNLD_RES_RECEIVED; + } + priv->tx_pending_len = 0; + if (!priv->currenttxskb) { + /* We can wake the queues immediately if we aren't + waiting for TX feedback */ + if (priv->connect_status == LBS_CONNECTED) + netif_wake_queue(priv->dev); + if (priv->mesh_dev && + lbs_mesh_connected(priv)) + netif_wake_queue(priv->mesh_dev); + } + } + spin_unlock_irq(&priv->driver_lock); + } + + del_timer(&priv->command_timer); + del_timer(&priv->auto_deepsleep_timer); + wake_up_all(&priv->cmd_pending); + + lbs_deb_leave(LBS_DEB_THREAD); + return 0; +} + +static int lbs_suspend_callback(struct lbs_private *priv, unsigned long dummy, + struct cmd_header *cmd) +{ + lbs_deb_enter(LBS_DEB_FW); + + netif_device_detach(priv->dev); + if (priv->mesh_dev) + netif_device_detach(priv->mesh_dev); + + priv->fw_ready = 0; + lbs_deb_leave(LBS_DEB_FW); + return 0; +} + +int lbs_suspend(struct lbs_private *priv) +{ + struct cmd_header cmd; + int ret; + + lbs_deb_enter(LBS_DEB_FW); + + if (priv->wol_criteria == 0xffffffff) { + lbs_pr_info("Suspend attempt without configuring wake params!\n"); + return -EINVAL; + } + + memset(&cmd, 0, sizeof(cmd)); + + ret = __lbs_cmd(priv, CMD_802_11_HOST_SLEEP_ACTIVATE, &cmd, + sizeof(cmd), lbs_suspend_callback, 0); + if (ret) + lbs_pr_info("HOST_SLEEP_ACTIVATE failed: %d\n", ret); + + lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret); + return ret; +} +EXPORT_SYMBOL_GPL(lbs_suspend); + +void lbs_resume(struct lbs_private *priv) +{ + lbs_deb_enter(LBS_DEB_FW); + + priv->fw_ready = 1; + + /* Firmware doesn't seem to give us RX packets any more + until we send it some command. Might as well update */ + lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0, + 0, 0, NULL); + + netif_device_attach(priv->dev); + if (priv->mesh_dev) + netif_device_attach(priv->mesh_dev); + + lbs_deb_leave(LBS_DEB_FW); +} +EXPORT_SYMBOL_GPL(lbs_resume); + +/** + * @brief This function gets the HW spec from the firmware and sets + * some basic parameters. + * + * @param priv A pointer to struct lbs_private structure + * @return 0 or -1 + */ +static int lbs_setup_firmware(struct lbs_private *priv) +{ + int ret = -1; + s16 curlevel = 0, minlevel = 0, maxlevel = 0; + + lbs_deb_enter(LBS_DEB_FW); + + /* Read MAC address from firmware */ + memset(priv->current_addr, 0xff, ETH_ALEN); + ret = lbs_update_hw_spec(priv); + if (ret) + goto done; + + /* Read power levels if available */ + ret = lbs_get_tx_power(priv, &curlevel, &minlevel, &maxlevel); + if (ret == 0) { + priv->txpower_cur = curlevel; + priv->txpower_min = minlevel; + priv->txpower_max = maxlevel; + } + + lbs_set_mac_control(priv); +done: + lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret); + return ret; +} + +/** + * This function handles the timeout of command sending. + * It will re-send the same command again. + */ +static void lbs_cmd_timeout_handler(unsigned long data) +{ + struct lbs_private *priv = (struct lbs_private *)data; + unsigned long flags; + + lbs_deb_enter(LBS_DEB_CMD); + spin_lock_irqsave(&priv->driver_lock, flags); + + if (!priv->cur_cmd) + goto out; + + lbs_pr_info("command 0x%04x timed out\n", + le16_to_cpu(priv->cur_cmd->cmdbuf->command)); + + priv->cmd_timed_out = 1; + wake_up_interruptible(&priv->waitq); +out: + spin_unlock_irqrestore(&priv->driver_lock, flags); + lbs_deb_leave(LBS_DEB_CMD); +} + +/** + * This function put the device back to deep sleep mode when timer expires + * and no activity (command, event, data etc.) is detected. + */ +static void auto_deepsleep_timer_fn(unsigned long data) +{ + struct lbs_private *priv = (struct lbs_private *)data; + int ret; + + lbs_deb_enter(LBS_DEB_CMD); + + if (priv->is_activity_detected) { + priv->is_activity_detected = 0; + } else { + if (priv->is_auto_deep_sleep_enabled && + (!priv->wakeup_dev_required) && + (priv->connect_status != LBS_CONNECTED)) { + lbs_deb_main("Entering auto deep sleep mode...\n"); + ret = lbs_prepare_and_send_command(priv, + CMD_802_11_DEEP_SLEEP, 0, + 0, 0, NULL); + if (ret) + lbs_pr_err("Enter Deep Sleep command failed\n"); + } + } + mod_timer(&priv->auto_deepsleep_timer , jiffies + + (priv->auto_deep_sleep_timeout * HZ)/1000); + lbs_deb_leave(LBS_DEB_CMD); +} + +int lbs_enter_auto_deep_sleep(struct lbs_private *priv) +{ + lbs_deb_enter(LBS_DEB_SDIO); + + priv->is_auto_deep_sleep_enabled = 1; + if (priv->is_deep_sleep) + priv->wakeup_dev_required = 1; + mod_timer(&priv->auto_deepsleep_timer , + jiffies + (priv->auto_deep_sleep_timeout * HZ)/1000); + + lbs_deb_leave(LBS_DEB_SDIO); + return 0; +} + +int lbs_exit_auto_deep_sleep(struct lbs_private *priv) +{ + lbs_deb_enter(LBS_DEB_SDIO); + + priv->is_auto_deep_sleep_enabled = 0; + priv->auto_deep_sleep_timeout = 0; + del_timer(&priv->auto_deepsleep_timer); + + lbs_deb_leave(LBS_DEB_SDIO); + return 0; +} + +static int lbs_init_adapter(struct lbs_private *priv) +{ + size_t bufsize; + int i, ret = 0; + + lbs_deb_enter(LBS_DEB_MAIN); + + /* Allocate buffer to store the BSSID list */ + bufsize = MAX_NETWORK_COUNT * sizeof(struct bss_descriptor); + priv->networks = kzalloc(bufsize, GFP_KERNEL); + if (!priv->networks) { + lbs_pr_err("Out of memory allocating beacons\n"); + ret = -1; + goto out; + } + + /* Initialize scan result lists */ + INIT_LIST_HEAD(&priv->network_free_list); + INIT_LIST_HEAD(&priv->network_list); + for (i = 0; i < MAX_NETWORK_COUNT; i++) { + list_add_tail(&priv->networks[i].list, + &priv->network_free_list); + } + + memset(priv->current_addr, 0xff, ETH_ALEN); + + priv->connect_status = LBS_DISCONNECTED; + priv->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM; + priv->mode = IW_MODE_INFRA; + priv->channel = DEFAULT_AD_HOC_CHANNEL; + priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON; + priv->radio_on = 1; + priv->enablehwauto = 1; + priv->psmode = LBS802_11POWERMODECAM; + priv->psstate = PS_STATE_FULL_POWER; + priv->is_deep_sleep = 0; + priv->is_auto_deep_sleep_enabled = 0; + priv->wakeup_dev_required = 0; + init_waitqueue_head(&priv->ds_awake_q); + + mutex_init(&priv->lock); + + setup_timer(&priv->command_timer, lbs_cmd_timeout_handler, + (unsigned long)priv); + setup_timer(&priv->auto_deepsleep_timer, auto_deepsleep_timer_fn, + (unsigned long)priv); + + INIT_LIST_HEAD(&priv->cmdfreeq); + INIT_LIST_HEAD(&priv->cmdpendingq); + + spin_lock_init(&priv->driver_lock); + init_waitqueue_head(&priv->cmd_pending); + + /* Allocate the command buffers */ + if (lbs_allocate_cmd_buffer(priv)) { + lbs_pr_err("Out of memory allocating command buffers\n"); + ret = -ENOMEM; + goto out; + } + priv->resp_idx = 0; + priv->resp_len[0] = priv->resp_len[1] = 0; + + /* Create the event FIFO */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)) + ret = kfifo_alloc(&priv->event_fifo, sizeof(u32) * 16, GFP_KERNEL); + if (ret) { +#else + priv->event_fifo = kfifo_alloc(sizeof(u32) * 16, GFP_KERNEL, NULL); + if (IS_ERR(priv->event_fifo)) { + ret = -ENOMEM; +#endif + lbs_pr_err("Out of memory allocating event FIFO buffer\n"); + goto out; + } + +out: + lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret); + + return ret; +} + +static void lbs_free_adapter(struct lbs_private *priv) +{ + lbs_deb_enter(LBS_DEB_MAIN); + + lbs_free_cmd_buffer(priv); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)) + kfifo_free(&priv->event_fifo); +#else + if (priv->event_fifo) + kfifo_free(priv->event_fifo); +#endif + del_timer(&priv->command_timer); + del_timer(&priv->auto_deepsleep_timer); + kfree(priv->networks); + priv->networks = NULL; + + lbs_deb_leave(LBS_DEB_MAIN); +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) +static const struct net_device_ops lbs_netdev_ops = { + .ndo_open = lbs_dev_open, + .ndo_stop = lbs_eth_stop, + .ndo_start_xmit = lbs_hard_start_xmit, + .ndo_set_mac_address = lbs_set_mac_address, + .ndo_tx_timeout = lbs_tx_timeout, + .ndo_set_multicast_list = lbs_set_multicast_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, +}; +#endif + +/** + * @brief This function adds the card. it will probe the + * card, allocate the lbs_priv and initialize the device. + * + * @param card A pointer to card + * @return A pointer to struct lbs_private structure + */ +struct lbs_private *lbs_add_card(void *card, struct device *dmdev) +{ + struct net_device *dev; + struct wireless_dev *wdev; + struct lbs_private *priv = NULL; + + lbs_deb_enter(LBS_DEB_MAIN); + + /* Allocate an Ethernet device and register it */ + wdev = lbs_cfg_alloc(dmdev); + if (IS_ERR(wdev)) { + lbs_pr_err("cfg80211 init failed\n"); + goto done; + } + /* TODO? */ + wdev->iftype = NL80211_IFTYPE_STATION; + priv = wdev_priv(wdev); + priv->wdev = wdev; + + if (lbs_init_adapter(priv)) { + lbs_pr_err("failed to initialize adapter structure.\n"); + goto err_wdev; + } + + //TODO? dev = alloc_netdev_mq(0, "wlan%d", ether_setup, IWM_TX_QUEUES); + dev = alloc_netdev(0, "wlan%d", ether_setup); + if (!dev) { + dev_err(dmdev, "no memory for network device instance\n"); + goto err_adapter; + } + + dev->ieee80211_ptr = wdev; + dev->ml_priv = priv; + SET_NETDEV_DEV(dev, dmdev); + wdev->netdev = dev; + priv->dev = dev; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + dev->netdev_ops = &lbs_netdev_ops; +#else + dev->open = lbs_dev_open; + dev->hard_start_xmit = lbs_hard_start_xmit; + dev->stop = lbs_eth_stop; + dev->set_mac_address = lbs_set_mac_address; + dev->tx_timeout = lbs_tx_timeout; + dev->set_multicast_list = lbs_set_multicast_list; +#endif + dev->watchdog_timeo = 5 * HZ; + dev->ethtool_ops = &lbs_ethtool_ops; +#ifdef WIRELESS_EXT + dev->wireless_handlers = &lbs_handler_def; +#endif + dev->flags |= IFF_BROADCAST | IFF_MULTICAST; + + + // TODO: kzalloc + iwm_init_default_profile(iwm, iwm->umac_profile); ?? + + + priv->card = card; + priv->infra_open = 0; + + + priv->rtap_net_dev = NULL; + strcpy(dev->name, "wlan%d"); + + lbs_deb_thread("Starting main thread...\n"); + init_waitqueue_head(&priv->waitq); + priv->main_thread = kthread_run(lbs_thread, dev, "lbs_main"); + if (IS_ERR(priv->main_thread)) { + lbs_deb_thread("Error creating main thread.\n"); + goto err_ndev; + } + + priv->work_thread = create_singlethread_workqueue("lbs_worker"); + INIT_DELAYED_WORK(&priv->assoc_work, lbs_association_worker); + INIT_DELAYED_WORK(&priv->scan_work, lbs_scan_worker); + INIT_WORK(&priv->mcast_work, lbs_set_mcast_worker); + + priv->wol_criteria = 0xffffffff; + priv->wol_gpio = 0xff; + + goto done; + + err_ndev: + free_netdev(dev); + + err_adapter: + lbs_free_adapter(priv); + + err_wdev: + lbs_cfg_free(priv); + + priv = NULL; + +done: + lbs_deb_leave_args(LBS_DEB_MAIN, "priv %p", priv); + return priv; +} +EXPORT_SYMBOL_GPL(lbs_add_card); + + +void lbs_remove_card(struct lbs_private *priv) +{ + struct net_device *dev = priv->dev; + + lbs_deb_enter(LBS_DEB_MAIN); + + lbs_remove_mesh(priv); + lbs_remove_rtap(priv); + + dev = priv->dev; + + cancel_delayed_work_sync(&priv->scan_work); + cancel_delayed_work_sync(&priv->assoc_work); + cancel_work_sync(&priv->mcast_work); + + /* worker thread destruction blocks on the in-flight command which + * should have been cleared already in lbs_stop_card(). + */ + lbs_deb_main("destroying worker thread\n"); + destroy_workqueue(priv->work_thread); + lbs_deb_main("done destroying worker thread\n"); + + if (priv->psmode == LBS802_11POWERMODEMAX_PSP) { + priv->psmode = LBS802_11POWERMODECAM; + lbs_ps_wakeup(priv, CMD_OPTION_WAITFORRSP); + } + + lbs_send_disconnect_notification(priv); + + if (priv->is_deep_sleep) { + priv->is_deep_sleep = 0; + wake_up_interruptible(&priv->ds_awake_q); + } + + /* Stop the thread servicing the interrupts */ + priv->surpriseremoved = 1; + kthread_stop(priv->main_thread); + + lbs_free_adapter(priv); + lbs_cfg_free(priv); + + priv->dev = NULL; + free_netdev(dev); + + lbs_deb_leave(LBS_DEB_MAIN); +} +EXPORT_SYMBOL_GPL(lbs_remove_card); + + +static int lbs_rtap_supported(struct lbs_private *priv) +{ + if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V5) + return 1; + + /* newer firmware use a capability mask */ + return ((MRVL_FW_MAJOR_REV(priv->fwrelease) >= MRVL_FW_V10) && + (priv->fwcapinfo & MESH_CAPINFO_ENABLE_MASK)); +} + + +int lbs_start_card(struct lbs_private *priv) +{ + struct net_device *dev = priv->dev; + int ret = -1; + + lbs_deb_enter(LBS_DEB_MAIN); + + /* poke the firmware */ + ret = lbs_setup_firmware(priv); + if (ret) + goto done; + + if (lbs_cfg_register(priv)) { + lbs_pr_err("cannot register device\n"); + goto done; + } + + lbs_update_channel(priv); + + lbs_init_mesh(priv); + + /* + * While rtap isn't related to mesh, only mesh-enabled + * firmware implements the rtap functionality via + * CMD_802_11_MONITOR_MODE. + */ + if (lbs_rtap_supported(priv)) { + if (device_create_file(&dev->dev, &dev_attr_lbs_rtap)) + lbs_pr_err("cannot register lbs_rtap attribute\n"); + } + + lbs_debugfs_init_one(priv, dev); + + lbs_pr_info("%s: Marvell WLAN 802.11 adapter\n", dev->name); + + ret = 0; + +done: + lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret); + return ret; +} +EXPORT_SYMBOL_GPL(lbs_start_card); + + +void lbs_stop_card(struct lbs_private *priv) +{ + struct net_device *dev; + struct cmd_ctrl_node *cmdnode; + unsigned long flags; + + lbs_deb_enter(LBS_DEB_MAIN); + + if (!priv) + goto out; + dev = priv->dev; + + netif_stop_queue(dev); + netif_carrier_off(dev); + + lbs_debugfs_remove_one(priv); + lbs_deinit_mesh(priv); + + if (lbs_rtap_supported(priv)) + device_remove_file(&dev->dev, &dev_attr_lbs_rtap); + + /* Delete the timeout of the currently processing command */ + del_timer_sync(&priv->command_timer); + del_timer_sync(&priv->auto_deepsleep_timer); + + /* Flush pending command nodes */ + spin_lock_irqsave(&priv->driver_lock, flags); + lbs_deb_main("clearing pending commands\n"); + list_for_each_entry(cmdnode, &priv->cmdpendingq, list) { + cmdnode->result = -ENOENT; + cmdnode->cmdwaitqwoken = 1; + wake_up_interruptible(&cmdnode->cmdwait_q); + } + + /* Flush the command the card is currently processing */ + if (priv->cur_cmd) { + lbs_deb_main("clearing current command\n"); + priv->cur_cmd->result = -ENOENT; + priv->cur_cmd->cmdwaitqwoken = 1; + wake_up_interruptible(&priv->cur_cmd->cmdwait_q); + } + lbs_deb_main("done clearing commands\n"); + spin_unlock_irqrestore(&priv->driver_lock, flags); + + unregister_netdev(dev); + +out: + lbs_deb_leave(LBS_DEB_MAIN); +} +EXPORT_SYMBOL_GPL(lbs_stop_card); + + +void lbs_queue_event(struct lbs_private *priv, u32 event) +{ + unsigned long flags; + + lbs_deb_enter(LBS_DEB_THREAD); + spin_lock_irqsave(&priv->driver_lock, flags); + + if (priv->psstate == PS_STATE_SLEEP) + priv->psstate = PS_STATE_AWAKE; + + kfifo_in(&priv->event_fifo, (unsigned char *) &event, sizeof(u32)); + + wake_up_interruptible(&priv->waitq); + + spin_unlock_irqrestore(&priv->driver_lock, flags); + lbs_deb_leave(LBS_DEB_THREAD); +} +EXPORT_SYMBOL_GPL(lbs_queue_event); + +void lbs_notify_command_response(struct lbs_private *priv, u8 resp_idx) +{ + lbs_deb_enter(LBS_DEB_THREAD); + + if (priv->psstate == PS_STATE_SLEEP) + priv->psstate = PS_STATE_AWAKE; + + /* Swap buffers by flipping the response index */ + BUG_ON(resp_idx > 1); + priv->resp_idx = resp_idx; + + wake_up_interruptible(&priv->waitq); + + lbs_deb_leave(LBS_DEB_THREAD); +} +EXPORT_SYMBOL_GPL(lbs_notify_command_response); + +static int __init lbs_init_module(void) +{ + lbs_deb_enter(LBS_DEB_MAIN); + memset(&confirm_sleep, 0, sizeof(confirm_sleep)); + confirm_sleep.hdr.command = cpu_to_le16(CMD_802_11_PS_MODE); + confirm_sleep.hdr.size = cpu_to_le16(sizeof(confirm_sleep)); + confirm_sleep.action = cpu_to_le16(CMD_SUBCMD_SLEEP_CONFIRMED); + lbs_debugfs_init(); + lbs_deb_leave(LBS_DEB_MAIN); + return 0; +} + +static void __exit lbs_exit_module(void) +{ + lbs_deb_enter(LBS_DEB_MAIN); + lbs_debugfs_remove(); + lbs_deb_leave(LBS_DEB_MAIN); +} + +/* + * rtap interface support fuctions + */ + +static int lbs_rtap_open(struct net_device *dev) +{ + /* Yes, _stop_ the queue. Because we don't support injection */ + lbs_deb_enter(LBS_DEB_MAIN); + netif_carrier_off(dev); + netif_stop_queue(dev); + lbs_deb_leave(LBS_DEB_LEAVE); + return 0; +} + +static int lbs_rtap_stop(struct net_device *dev) +{ + lbs_deb_enter(LBS_DEB_MAIN); + lbs_deb_leave(LBS_DEB_MAIN); + return 0; +} + +static netdev_tx_t lbs_rtap_hard_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + netif_stop_queue(dev); + return NETDEV_TX_BUSY; +} + +static void lbs_remove_rtap(struct lbs_private *priv) +{ + lbs_deb_enter(LBS_DEB_MAIN); + if (priv->rtap_net_dev == NULL) + goto out; + unregister_netdev(priv->rtap_net_dev); + free_netdev(priv->rtap_net_dev); + priv->rtap_net_dev = NULL; +out: + lbs_deb_leave(LBS_DEB_MAIN); +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) +static const struct net_device_ops rtap_netdev_ops = { + .ndo_open = lbs_rtap_open, + .ndo_stop = lbs_rtap_stop, + .ndo_start_xmit = lbs_rtap_hard_start_xmit, +}; +#endif + +static int lbs_add_rtap(struct lbs_private *priv) +{ + int ret = 0; + struct net_device *rtap_dev; + + lbs_deb_enter(LBS_DEB_MAIN); + if (priv->rtap_net_dev) { + ret = -EPERM; + goto out; + } + + rtap_dev = alloc_netdev(0, "rtap%d", ether_setup); + if (rtap_dev == NULL) { + ret = -ENOMEM; + goto out; + } + + memcpy(rtap_dev->dev_addr, priv->current_addr, ETH_ALEN); + rtap_dev->type = ARPHRD_IEEE80211_RADIOTAP; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + rtap_dev->netdev_ops = &rtap_netdev_ops; +#else + rtap_dev->open = lbs_rtap_open; + rtap_dev->stop = lbs_rtap_stop; + rtap_dev->hard_start_xmit = lbs_rtap_hard_start_xmit; +#endif + rtap_dev->ml_priv = priv; + SET_NETDEV_DEV(rtap_dev, priv->dev->dev.parent); + + ret = register_netdev(rtap_dev); + if (ret) { + free_netdev(rtap_dev); + goto out; + } + priv->rtap_net_dev = rtap_dev; + +out: + lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret); + return ret; +} + +module_init(lbs_init_module); +module_exit(lbs_exit_module); + +MODULE_DESCRIPTION("Libertas WLAN Driver Library"); +MODULE_AUTHOR("Marvell International Ltd."); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/libertas/main.c.orig b/drivers/net/wireless/libertas/main.c.orig new file mode 100644 index 0000000..727c4d5 --- /dev/null +++ b/drivers/net/wireless/libertas/main.c.orig @@ -0,0 +1,1314 @@ +/** + * This file contains the major functions in WLAN + * driver. It includes init, exit, open, close and main + * thread etc.. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "host.h" +#include "decl.h" +#include "dev.h" +#include "wext.h" +#include "cfg.h" +#include "debugfs.h" +#include "scan.h" +#include "assoc.h" +#include "cmd.h" + +#define DRIVER_RELEASE_VERSION "323.p0" +const char lbs_driver_version[] = "COMM-USB8388-" DRIVER_RELEASE_VERSION +#ifdef DEBUG + "-dbg" +#endif + ""; + + +/* Module parameters */ +unsigned int lbs_debug; +EXPORT_SYMBOL_GPL(lbs_debug); +module_param_named(libertas_debug, lbs_debug, int, 0644); + + +/* This global structure is used to send the confirm_sleep command as + * fast as possible down to the firmware. */ +struct cmd_confirm_sleep confirm_sleep; + + +/** + * the table to keep region code + */ +u16 lbs_region_code_to_index[MRVDRV_MAX_REGION_CODE] = + { 0x10, 0x20, 0x30, 0x31, 0x32, 0x40 }; + +/** + * FW rate table. FW refers to rates by their index in this table, not by the + * rate value itself. Values of 0x00 are + * reserved positions. + */ +static u8 fw_data_rates[MAX_RATES] = + { 0x02, 0x04, 0x0B, 0x16, 0x00, 0x0C, 0x12, + 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C, 0x00 +}; + +/** + * @brief use index to get the data rate + * + * @param idx The index of data rate + * @return data rate or 0 + */ +u32 lbs_fw_index_to_data_rate(u8 idx) +{ + if (idx >= sizeof(fw_data_rates)) + idx = 0; + return fw_data_rates[idx]; +} + +/** + * @brief use rate to get the index + * + * @param rate data rate + * @return index or 0 + */ +u8 lbs_data_rate_to_fw_index(u32 rate) +{ + u8 i; + + if (!rate) + return 0; + + for (i = 0; i < sizeof(fw_data_rates); i++) { + if (rate == fw_data_rates[i]) + return i; + } + return 0; +} + + +static int lbs_add_rtap(struct lbs_private *priv); +static void lbs_remove_rtap(struct lbs_private *priv); + + +/** + * Get function for sysfs attribute rtap + */ +static ssize_t lbs_rtap_get(struct device *dev, + struct device_attribute *attr, char * buf) +{ + struct lbs_private *priv = to_net_dev(dev)->ml_priv; + return snprintf(buf, 5, "0x%X\n", priv->monitormode); +} + +/** + * Set function for sysfs attribute rtap + */ +static ssize_t lbs_rtap_set(struct device *dev, + struct device_attribute *attr, const char * buf, size_t count) +{ + int monitor_mode; + struct lbs_private *priv = to_net_dev(dev)->ml_priv; + + sscanf(buf, "%x", &monitor_mode); + if (monitor_mode) { + if (priv->monitormode == monitor_mode) + return strlen(buf); + if (!priv->monitormode) { + if (priv->infra_open || lbs_mesh_open(priv)) + return -EBUSY; + if (priv->mode == IW_MODE_INFRA) + lbs_cmd_80211_deauthenticate(priv, + priv->curbssparams.bssid, + WLAN_REASON_DEAUTH_LEAVING); + else if (priv->mode == IW_MODE_ADHOC) + lbs_adhoc_stop(priv); + lbs_add_rtap(priv); + } + priv->monitormode = monitor_mode; + } else { + if (!priv->monitormode) + return strlen(buf); + priv->monitormode = 0; + lbs_remove_rtap(priv); + + if (priv->currenttxskb) { + dev_kfree_skb_any(priv->currenttxskb); + priv->currenttxskb = NULL; + } + + /* Wake queues, command thread, etc. */ + lbs_host_to_card_done(priv); + } + + lbs_prepare_and_send_command(priv, + CMD_802_11_MONITOR_MODE, CMD_ACT_SET, + CMD_OPTION_WAITFORRSP, 0, &priv->monitormode); + return strlen(buf); +} + +/** + * lbs_rtap attribute to be exported per ethX interface + * through sysfs (/sys/class/net/ethX/lbs_rtap) + */ +static DEVICE_ATTR(lbs_rtap, 0644, lbs_rtap_get, lbs_rtap_set ); + +/** + * @brief This function opens the ethX interface + * + * @param dev A pointer to net_device structure + * @return 0 or -EBUSY if monitor mode active + */ +static int lbs_dev_open(struct net_device *dev) +{ + struct lbs_private *priv = dev->ml_priv; + int ret = 0; + + lbs_deb_enter(LBS_DEB_NET); + + spin_lock_irq(&priv->driver_lock); + + if (priv->monitormode) { + ret = -EBUSY; + goto out; + } + + priv->infra_open = 1; + + if (priv->connect_status == LBS_CONNECTED) + netif_carrier_on(dev); + else + netif_carrier_off(dev); + + if (!priv->tx_pending_len) + netif_wake_queue(dev); + out: + + spin_unlock_irq(&priv->driver_lock); + lbs_deb_leave_args(LBS_DEB_NET, "ret %d", ret); + return ret; +} + +/** + * @brief This function closes the ethX interface + * + * @param dev A pointer to net_device structure + * @return 0 + */ +static int lbs_eth_stop(struct net_device *dev) +{ + struct lbs_private *priv = dev->ml_priv; + + lbs_deb_enter(LBS_DEB_NET); + + spin_lock_irq(&priv->driver_lock); + priv->infra_open = 0; + netif_stop_queue(dev); + spin_unlock_irq(&priv->driver_lock); + + schedule_work(&priv->mcast_work); + + lbs_deb_leave(LBS_DEB_NET); + return 0; +} + +static void lbs_tx_timeout(struct net_device *dev) +{ + struct lbs_private *priv = dev->ml_priv; + + lbs_deb_enter(LBS_DEB_TX); + + lbs_pr_err("tx watch dog timeout\n"); + + dev->trans_start = jiffies; + + if (priv->currenttxskb) + lbs_send_tx_feedback(priv, 0); + + /* XX: Shouldn't we also call into the hw-specific driver + to kick it somehow? */ + lbs_host_to_card_done(priv); + + /* More often than not, this actually happens because the + firmware has crapped itself -- rather than just a very + busy medium. So send a harmless command, and if/when + _that_ times out, we'll kick it in the head. */ + lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0, + 0, 0, NULL); + + lbs_deb_leave(LBS_DEB_TX); +} + +void lbs_host_to_card_done(struct lbs_private *priv) +{ + unsigned long flags; + + lbs_deb_enter(LBS_DEB_THREAD); + + spin_lock_irqsave(&priv->driver_lock, flags); + + priv->dnld_sent = DNLD_RES_RECEIVED; + + /* Wake main thread if commands are pending */ + if (!priv->cur_cmd || priv->tx_pending_len > 0) { + if (!priv->wakeup_dev_required) + wake_up_interruptible(&priv->waitq); + } + + spin_unlock_irqrestore(&priv->driver_lock, flags); + lbs_deb_leave(LBS_DEB_THREAD); +} +EXPORT_SYMBOL_GPL(lbs_host_to_card_done); + +int lbs_set_mac_address(struct net_device *dev, void *addr) +{ + int ret = 0; + struct lbs_private *priv = dev->ml_priv; + struct sockaddr *phwaddr = addr; + struct cmd_ds_802_11_mac_address cmd; + + lbs_deb_enter(LBS_DEB_NET); + + /* In case it was called from the mesh device */ + dev = priv->dev; + + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + cmd.action = cpu_to_le16(CMD_ACT_SET); + memcpy(cmd.macadd, phwaddr->sa_data, ETH_ALEN); + + ret = lbs_cmd_with_response(priv, CMD_802_11_MAC_ADDRESS, &cmd); + if (ret) { + lbs_deb_net("set MAC address failed\n"); + goto done; + } + + memcpy(priv->current_addr, phwaddr->sa_data, ETH_ALEN); + memcpy(dev->dev_addr, phwaddr->sa_data, ETH_ALEN); + if (priv->mesh_dev) + memcpy(priv->mesh_dev->dev_addr, phwaddr->sa_data, ETH_ALEN); + +done: + lbs_deb_leave_args(LBS_DEB_NET, "ret %d", ret); + return ret; +} + + +static inline int mac_in_list(unsigned char *list, int list_len, + unsigned char *mac) +{ + while (list_len) { + if (!memcmp(list, mac, ETH_ALEN)) + return 1; + list += ETH_ALEN; + list_len--; + } + return 0; +} + + +static int lbs_add_mcast_addrs(struct cmd_ds_mac_multicast_adr *cmd, + struct net_device *dev, int nr_addrs) +{ + int i = nr_addrs; + struct dev_mc_list *mc_list; + int cnt; + + if ((dev->flags & (IFF_UP|IFF_MULTICAST)) != (IFF_UP|IFF_MULTICAST)) + return nr_addrs; + + netif_addr_lock_bh(dev); + cnt = netdev_mc_count(dev); + netdev_for_each_mc_addr(mc_list, dev) { + if (mac_in_list(cmd->maclist, nr_addrs, mc_list->dmi_addr)) { + lbs_deb_net("mcast address %s:%pM skipped\n", dev->name, + mc_list->dmi_addr); + cnt--; + continue; + } + + if (i == MRVDRV_MAX_MULTICAST_LIST_SIZE) + break; + memcpy(&cmd->maclist[6*i], mc_list->dmi_addr, ETH_ALEN); + lbs_deb_net("mcast address %s:%pM added to filter\n", dev->name, + mc_list->dmi_addr); + i++; + cnt--; + } + netif_addr_unlock_bh(dev); + if (cnt) + return -EOVERFLOW; + + return i; +} + +static void lbs_set_mcast_worker(struct work_struct *work) +{ + struct lbs_private *priv = container_of(work, struct lbs_private, mcast_work); + struct cmd_ds_mac_multicast_adr mcast_cmd; + int dev_flags; + int nr_addrs; + int old_mac_control = priv->mac_control; + + lbs_deb_enter(LBS_DEB_NET); + + dev_flags = priv->dev->flags; + if (priv->mesh_dev) + dev_flags |= priv->mesh_dev->flags; + + if (dev_flags & IFF_PROMISC) { + priv->mac_control |= CMD_ACT_MAC_PROMISCUOUS_ENABLE; + priv->mac_control &= ~(CMD_ACT_MAC_ALL_MULTICAST_ENABLE | + CMD_ACT_MAC_MULTICAST_ENABLE); + goto out_set_mac_control; + } else if (dev_flags & IFF_ALLMULTI) { + do_allmulti: + priv->mac_control |= CMD_ACT_MAC_ALL_MULTICAST_ENABLE; + priv->mac_control &= ~(CMD_ACT_MAC_PROMISCUOUS_ENABLE | + CMD_ACT_MAC_MULTICAST_ENABLE); + goto out_set_mac_control; + } + + /* Once for priv->dev, again for priv->mesh_dev if it exists */ + nr_addrs = lbs_add_mcast_addrs(&mcast_cmd, priv->dev, 0); + if (nr_addrs >= 0 && priv->mesh_dev) + nr_addrs = lbs_add_mcast_addrs(&mcast_cmd, priv->mesh_dev, nr_addrs); + if (nr_addrs < 0) + goto do_allmulti; + + if (nr_addrs) { + int size = offsetof(struct cmd_ds_mac_multicast_adr, + maclist[6*nr_addrs]); + + mcast_cmd.action = cpu_to_le16(CMD_ACT_SET); + mcast_cmd.hdr.size = cpu_to_le16(size); + mcast_cmd.nr_of_adrs = cpu_to_le16(nr_addrs); + + lbs_cmd_async(priv, CMD_MAC_MULTICAST_ADR, &mcast_cmd.hdr, size); + + priv->mac_control |= CMD_ACT_MAC_MULTICAST_ENABLE; + } else + priv->mac_control &= ~CMD_ACT_MAC_MULTICAST_ENABLE; + + priv->mac_control &= ~(CMD_ACT_MAC_PROMISCUOUS_ENABLE | + CMD_ACT_MAC_ALL_MULTICAST_ENABLE); + out_set_mac_control: + if (priv->mac_control != old_mac_control) + lbs_set_mac_control(priv); + + lbs_deb_leave(LBS_DEB_NET); +} + +void lbs_set_multicast_list(struct net_device *dev) +{ + struct lbs_private *priv = dev->ml_priv; + + schedule_work(&priv->mcast_work); +} + +/** + * @brief This function handles the major jobs in the LBS driver. + * It handles all events generated by firmware, RX data received + * from firmware and TX data sent from kernel. + * + * @param data A pointer to lbs_thread structure + * @return 0 + */ +static int lbs_thread(void *data) +{ + struct net_device *dev = data; + struct lbs_private *priv = dev->ml_priv; + wait_queue_t wait; + + lbs_deb_enter(LBS_DEB_THREAD); + + init_waitqueue_entry(&wait, current); + + for (;;) { + int shouldsleep; + u8 resp_idx; + + lbs_deb_thread("1: currenttxskb %p, dnld_sent %d\n", + priv->currenttxskb, priv->dnld_sent); + + add_wait_queue(&priv->waitq, &wait); + set_current_state(TASK_INTERRUPTIBLE); + spin_lock_irq(&priv->driver_lock); + + if (kthread_should_stop()) + shouldsleep = 0; /* Bye */ + else if (priv->surpriseremoved) + shouldsleep = 1; /* We need to wait until we're _told_ to die */ + else if (priv->psstate == PS_STATE_SLEEP) + shouldsleep = 1; /* Sleep mode. Nothing we can do till it wakes */ + else if (priv->cmd_timed_out) + shouldsleep = 0; /* Command timed out. Recover */ + else if (!priv->fw_ready) + shouldsleep = 1; /* Firmware not ready. We're waiting for it */ + else if (priv->dnld_sent) + shouldsleep = 1; /* Something is en route to the device already */ + else if (priv->tx_pending_len > 0) + shouldsleep = 0; /* We've a packet to send */ + else if (priv->resp_len[priv->resp_idx]) + shouldsleep = 0; /* We have a command response */ + else if (priv->cur_cmd) + shouldsleep = 1; /* Can't send a command; one already running */ + else if (!list_empty(&priv->cmdpendingq) && + !(priv->wakeup_dev_required)) + shouldsleep = 0; /* We have a command to send */ + else if (kfifo_len(&priv->event_fifo)) + shouldsleep = 0; /* We have an event to process */ + else + shouldsleep = 1; /* No command */ + + if (shouldsleep) { + lbs_deb_thread("sleeping, connect_status %d, " + "psmode %d, psstate %d\n", + priv->connect_status, + priv->psmode, priv->psstate); + spin_unlock_irq(&priv->driver_lock); + schedule(); + } else + spin_unlock_irq(&priv->driver_lock); + + lbs_deb_thread("2: currenttxskb %p, dnld_send %d\n", + priv->currenttxskb, priv->dnld_sent); + + set_current_state(TASK_RUNNING); + remove_wait_queue(&priv->waitq, &wait); + + lbs_deb_thread("3: currenttxskb %p, dnld_sent %d\n", + priv->currenttxskb, priv->dnld_sent); + + if (kthread_should_stop()) { + lbs_deb_thread("break from main thread\n"); + break; + } + + if (priv->surpriseremoved) { + lbs_deb_thread("adapter removed; waiting to die...\n"); + continue; + } + + lbs_deb_thread("4: currenttxskb %p, dnld_sent %d\n", + priv->currenttxskb, priv->dnld_sent); + + /* Process any pending command response */ + spin_lock_irq(&priv->driver_lock); + resp_idx = priv->resp_idx; + if (priv->resp_len[resp_idx]) { + spin_unlock_irq(&priv->driver_lock); + lbs_process_command_response(priv, + priv->resp_buf[resp_idx], + priv->resp_len[resp_idx]); + spin_lock_irq(&priv->driver_lock); + priv->resp_len[resp_idx] = 0; + } + spin_unlock_irq(&priv->driver_lock); + + /* Process hardware events, e.g. card removed, link lost */ + spin_lock_irq(&priv->driver_lock); + while (kfifo_len(&priv->event_fifo)) { + u32 event; + + if (kfifo_out(&priv->event_fifo, + (unsigned char *) &event, sizeof(event)) != + sizeof(event)) + break; + spin_unlock_irq(&priv->driver_lock); + lbs_process_event(priv, event); + spin_lock_irq(&priv->driver_lock); + } + spin_unlock_irq(&priv->driver_lock); + + if (priv->wakeup_dev_required) { + lbs_deb_thread("Waking up device...\n"); + /* Wake up device */ + if (priv->exit_deep_sleep(priv)) + lbs_deb_thread("Wakeup device failed\n"); + continue; + } + + /* command timeout stuff */ + if (priv->cmd_timed_out && priv->cur_cmd) { + struct cmd_ctrl_node *cmdnode = priv->cur_cmd; + + lbs_pr_info("Timeout submitting command 0x%04x\n", + le16_to_cpu(cmdnode->cmdbuf->command)); + lbs_complete_command(priv, cmdnode, -ETIMEDOUT); + if (priv->reset_card) + priv->reset_card(priv); + } + priv->cmd_timed_out = 0; + + if (!priv->fw_ready) + continue; + + /* Check if we need to confirm Sleep Request received previously */ + if (priv->psstate == PS_STATE_PRE_SLEEP && + !priv->dnld_sent && !priv->cur_cmd) { + if (priv->connect_status == LBS_CONNECTED) { + lbs_deb_thread("pre-sleep, currenttxskb %p, " + "dnld_sent %d, cur_cmd %p\n", + priv->currenttxskb, priv->dnld_sent, + priv->cur_cmd); + + lbs_ps_confirm_sleep(priv); + } else { + /* workaround for firmware sending + * deauth/linkloss event immediately + * after sleep request; remove this + * after firmware fixes it + */ + priv->psstate = PS_STATE_AWAKE; + lbs_pr_alert("ignore PS_SleepConfirm in " + "non-connected state\n"); + } + } + + /* The PS state is changed during processing of Sleep Request + * event above + */ + if ((priv->psstate == PS_STATE_SLEEP) || + (priv->psstate == PS_STATE_PRE_SLEEP)) + continue; + + if (priv->is_deep_sleep) + continue; + + /* Execute the next command */ + if (!priv->dnld_sent && !priv->cur_cmd) + lbs_execute_next_command(priv); + + /* Wake-up command waiters which can't sleep in + * lbs_prepare_and_send_command + */ + if (!list_empty(&priv->cmdpendingq)) + wake_up_all(&priv->cmd_pending); + + spin_lock_irq(&priv->driver_lock); + if (!priv->dnld_sent && priv->tx_pending_len > 0) { + int ret = priv->hw_host_to_card(priv, MVMS_DAT, + priv->tx_pending_buf, + priv->tx_pending_len); + if (ret) { + lbs_deb_tx("host_to_card failed %d\n", ret); + priv->dnld_sent = DNLD_RES_RECEIVED; + } + priv->tx_pending_len = 0; + if (!priv->currenttxskb) { + /* We can wake the queues immediately if we aren't + waiting for TX feedback */ + if (priv->connect_status == LBS_CONNECTED) + netif_wake_queue(priv->dev); + if (priv->mesh_dev && + lbs_mesh_connected(priv)) + netif_wake_queue(priv->mesh_dev); + } + } + spin_unlock_irq(&priv->driver_lock); + } + + del_timer(&priv->command_timer); + del_timer(&priv->auto_deepsleep_timer); + wake_up_all(&priv->cmd_pending); + + lbs_deb_leave(LBS_DEB_THREAD); + return 0; +} + +static int lbs_suspend_callback(struct lbs_private *priv, unsigned long dummy, + struct cmd_header *cmd) +{ + lbs_deb_enter(LBS_DEB_FW); + + netif_device_detach(priv->dev); + if (priv->mesh_dev) + netif_device_detach(priv->mesh_dev); + + priv->fw_ready = 0; + lbs_deb_leave(LBS_DEB_FW); + return 0; +} + +int lbs_suspend(struct lbs_private *priv) +{ + struct cmd_header cmd; + int ret; + + lbs_deb_enter(LBS_DEB_FW); + + if (priv->wol_criteria == 0xffffffff) { + lbs_pr_info("Suspend attempt without configuring wake params!\n"); + return -EINVAL; + } + + memset(&cmd, 0, sizeof(cmd)); + + ret = __lbs_cmd(priv, CMD_802_11_HOST_SLEEP_ACTIVATE, &cmd, + sizeof(cmd), lbs_suspend_callback, 0); + if (ret) + lbs_pr_info("HOST_SLEEP_ACTIVATE failed: %d\n", ret); + + lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret); + return ret; +} +EXPORT_SYMBOL_GPL(lbs_suspend); + +void lbs_resume(struct lbs_private *priv) +{ + lbs_deb_enter(LBS_DEB_FW); + + priv->fw_ready = 1; + + /* Firmware doesn't seem to give us RX packets any more + until we send it some command. Might as well update */ + lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0, + 0, 0, NULL); + + netif_device_attach(priv->dev); + if (priv->mesh_dev) + netif_device_attach(priv->mesh_dev); + + lbs_deb_leave(LBS_DEB_FW); +} +EXPORT_SYMBOL_GPL(lbs_resume); + +/** + * @brief This function gets the HW spec from the firmware and sets + * some basic parameters. + * + * @param priv A pointer to struct lbs_private structure + * @return 0 or -1 + */ +static int lbs_setup_firmware(struct lbs_private *priv) +{ + int ret = -1; + s16 curlevel = 0, minlevel = 0, maxlevel = 0; + + lbs_deb_enter(LBS_DEB_FW); + + /* Read MAC address from firmware */ + memset(priv->current_addr, 0xff, ETH_ALEN); + ret = lbs_update_hw_spec(priv); + if (ret) + goto done; + + /* Read power levels if available */ + ret = lbs_get_tx_power(priv, &curlevel, &minlevel, &maxlevel); + if (ret == 0) { + priv->txpower_cur = curlevel; + priv->txpower_min = minlevel; + priv->txpower_max = maxlevel; + } + + lbs_set_mac_control(priv); +done: + lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret); + return ret; +} + +/** + * This function handles the timeout of command sending. + * It will re-send the same command again. + */ +static void lbs_cmd_timeout_handler(unsigned long data) +{ + struct lbs_private *priv = (struct lbs_private *)data; + unsigned long flags; + + lbs_deb_enter(LBS_DEB_CMD); + spin_lock_irqsave(&priv->driver_lock, flags); + + if (!priv->cur_cmd) + goto out; + + lbs_pr_info("command 0x%04x timed out\n", + le16_to_cpu(priv->cur_cmd->cmdbuf->command)); + + priv->cmd_timed_out = 1; + wake_up_interruptible(&priv->waitq); +out: + spin_unlock_irqrestore(&priv->driver_lock, flags); + lbs_deb_leave(LBS_DEB_CMD); +} + +/** + * This function put the device back to deep sleep mode when timer expires + * and no activity (command, event, data etc.) is detected. + */ +static void auto_deepsleep_timer_fn(unsigned long data) +{ + struct lbs_private *priv = (struct lbs_private *)data; + int ret; + + lbs_deb_enter(LBS_DEB_CMD); + + if (priv->is_activity_detected) { + priv->is_activity_detected = 0; + } else { + if (priv->is_auto_deep_sleep_enabled && + (!priv->wakeup_dev_required) && + (priv->connect_status != LBS_CONNECTED)) { + lbs_deb_main("Entering auto deep sleep mode...\n"); + ret = lbs_prepare_and_send_command(priv, + CMD_802_11_DEEP_SLEEP, 0, + 0, 0, NULL); + if (ret) + lbs_pr_err("Enter Deep Sleep command failed\n"); + } + } + mod_timer(&priv->auto_deepsleep_timer , jiffies + + (priv->auto_deep_sleep_timeout * HZ)/1000); + lbs_deb_leave(LBS_DEB_CMD); +} + +int lbs_enter_auto_deep_sleep(struct lbs_private *priv) +{ + lbs_deb_enter(LBS_DEB_SDIO); + + priv->is_auto_deep_sleep_enabled = 1; + if (priv->is_deep_sleep) + priv->wakeup_dev_required = 1; + mod_timer(&priv->auto_deepsleep_timer , + jiffies + (priv->auto_deep_sleep_timeout * HZ)/1000); + + lbs_deb_leave(LBS_DEB_SDIO); + return 0; +} + +int lbs_exit_auto_deep_sleep(struct lbs_private *priv) +{ + lbs_deb_enter(LBS_DEB_SDIO); + + priv->is_auto_deep_sleep_enabled = 0; + priv->auto_deep_sleep_timeout = 0; + del_timer(&priv->auto_deepsleep_timer); + + lbs_deb_leave(LBS_DEB_SDIO); + return 0; +} + +static int lbs_init_adapter(struct lbs_private *priv) +{ + size_t bufsize; + int i, ret = 0; + + lbs_deb_enter(LBS_DEB_MAIN); + + /* Allocate buffer to store the BSSID list */ + bufsize = MAX_NETWORK_COUNT * sizeof(struct bss_descriptor); + priv->networks = kzalloc(bufsize, GFP_KERNEL); + if (!priv->networks) { + lbs_pr_err("Out of memory allocating beacons\n"); + ret = -1; + goto out; + } + + /* Initialize scan result lists */ + INIT_LIST_HEAD(&priv->network_free_list); + INIT_LIST_HEAD(&priv->network_list); + for (i = 0; i < MAX_NETWORK_COUNT; i++) { + list_add_tail(&priv->networks[i].list, + &priv->network_free_list); + } + + memset(priv->current_addr, 0xff, ETH_ALEN); + + priv->connect_status = LBS_DISCONNECTED; + priv->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM; + priv->mode = IW_MODE_INFRA; + priv->channel = DEFAULT_AD_HOC_CHANNEL; + priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON; + priv->radio_on = 1; + priv->enablehwauto = 1; + priv->psmode = LBS802_11POWERMODECAM; + priv->psstate = PS_STATE_FULL_POWER; + priv->is_deep_sleep = 0; + priv->is_auto_deep_sleep_enabled = 0; + priv->wakeup_dev_required = 0; + init_waitqueue_head(&priv->ds_awake_q); + + mutex_init(&priv->lock); + + setup_timer(&priv->command_timer, lbs_cmd_timeout_handler, + (unsigned long)priv); + setup_timer(&priv->auto_deepsleep_timer, auto_deepsleep_timer_fn, + (unsigned long)priv); + + INIT_LIST_HEAD(&priv->cmdfreeq); + INIT_LIST_HEAD(&priv->cmdpendingq); + + spin_lock_init(&priv->driver_lock); + init_waitqueue_head(&priv->cmd_pending); + + /* Allocate the command buffers */ + if (lbs_allocate_cmd_buffer(priv)) { + lbs_pr_err("Out of memory allocating command buffers\n"); + ret = -ENOMEM; + goto out; + } + priv->resp_idx = 0; + priv->resp_len[0] = priv->resp_len[1] = 0; + + /* Create the event FIFO */ + ret = kfifo_alloc(&priv->event_fifo, sizeof(u32) * 16, GFP_KERNEL); + if (ret) { + lbs_pr_err("Out of memory allocating event FIFO buffer\n"); + goto out; + } + +out: + lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret); + + return ret; +} + +static void lbs_free_adapter(struct lbs_private *priv) +{ + lbs_deb_enter(LBS_DEB_MAIN); + + lbs_free_cmd_buffer(priv); + kfifo_free(&priv->event_fifo); + del_timer(&priv->command_timer); + del_timer(&priv->auto_deepsleep_timer); + kfree(priv->networks); + priv->networks = NULL; + + lbs_deb_leave(LBS_DEB_MAIN); +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) +static const struct net_device_ops lbs_netdev_ops = { + .ndo_open = lbs_dev_open, + .ndo_stop = lbs_eth_stop, + .ndo_start_xmit = lbs_hard_start_xmit, + .ndo_set_mac_address = lbs_set_mac_address, + .ndo_tx_timeout = lbs_tx_timeout, + .ndo_set_multicast_list = lbs_set_multicast_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, +}; +#endif + +/** + * @brief This function adds the card. it will probe the + * card, allocate the lbs_priv and initialize the device. + * + * @param card A pointer to card + * @return A pointer to struct lbs_private structure + */ +struct lbs_private *lbs_add_card(void *card, struct device *dmdev) +{ + struct net_device *dev; + struct wireless_dev *wdev; + struct lbs_private *priv = NULL; + + lbs_deb_enter(LBS_DEB_MAIN); + + /* Allocate an Ethernet device and register it */ + wdev = lbs_cfg_alloc(dmdev); + if (IS_ERR(wdev)) { + lbs_pr_err("cfg80211 init failed\n"); + goto done; + } + /* TODO? */ + wdev->iftype = NL80211_IFTYPE_STATION; + priv = wdev_priv(wdev); + priv->wdev = wdev; + + if (lbs_init_adapter(priv)) { + lbs_pr_err("failed to initialize adapter structure.\n"); + goto err_wdev; + } + + //TODO? dev = alloc_netdev_mq(0, "wlan%d", ether_setup, IWM_TX_QUEUES); + dev = alloc_netdev(0, "wlan%d", ether_setup); + if (!dev) { + dev_err(dmdev, "no memory for network device instance\n"); + goto err_adapter; + } + + dev->ieee80211_ptr = wdev; + dev->ml_priv = priv; + SET_NETDEV_DEV(dev, dmdev); + wdev->netdev = dev; + priv->dev = dev; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + dev->netdev_ops = &lbs_netdev_ops; +#else + dev->open = lbs_dev_open; + dev->hard_start_xmit = lbs_hard_start_xmit; + dev->stop = lbs_eth_stop; + dev->set_mac_address = lbs_set_mac_address; + dev->tx_timeout = lbs_tx_timeout; + dev->set_multicast_list = lbs_set_multicast_list; +#endif + dev->watchdog_timeo = 5 * HZ; + dev->ethtool_ops = &lbs_ethtool_ops; +#ifdef WIRELESS_EXT + dev->wireless_handlers = &lbs_handler_def; +#endif + dev->flags |= IFF_BROADCAST | IFF_MULTICAST; + + + // TODO: kzalloc + iwm_init_default_profile(iwm, iwm->umac_profile); ?? + + + priv->card = card; + priv->infra_open = 0; + + + priv->rtap_net_dev = NULL; + strcpy(dev->name, "wlan%d"); + + lbs_deb_thread("Starting main thread...\n"); + init_waitqueue_head(&priv->waitq); + priv->main_thread = kthread_run(lbs_thread, dev, "lbs_main"); + if (IS_ERR(priv->main_thread)) { + lbs_deb_thread("Error creating main thread.\n"); + goto err_ndev; + } + + priv->work_thread = create_singlethread_workqueue("lbs_worker"); + INIT_DELAYED_WORK(&priv->assoc_work, lbs_association_worker); + INIT_DELAYED_WORK(&priv->scan_work, lbs_scan_worker); + INIT_WORK(&priv->mcast_work, lbs_set_mcast_worker); + + priv->wol_criteria = 0xffffffff; + priv->wol_gpio = 0xff; + + goto done; + + err_ndev: + free_netdev(dev); + + err_adapter: + lbs_free_adapter(priv); + + err_wdev: + lbs_cfg_free(priv); + + priv = NULL; + +done: + lbs_deb_leave_args(LBS_DEB_MAIN, "priv %p", priv); + return priv; +} +EXPORT_SYMBOL_GPL(lbs_add_card); + + +void lbs_remove_card(struct lbs_private *priv) +{ + struct net_device *dev = priv->dev; + + lbs_deb_enter(LBS_DEB_MAIN); + + lbs_remove_mesh(priv); + lbs_remove_rtap(priv); + + dev = priv->dev; + + cancel_delayed_work_sync(&priv->scan_work); + cancel_delayed_work_sync(&priv->assoc_work); + cancel_work_sync(&priv->mcast_work); + + /* worker thread destruction blocks on the in-flight command which + * should have been cleared already in lbs_stop_card(). + */ + lbs_deb_main("destroying worker thread\n"); + destroy_workqueue(priv->work_thread); + lbs_deb_main("done destroying worker thread\n"); + + if (priv->psmode == LBS802_11POWERMODEMAX_PSP) { + priv->psmode = LBS802_11POWERMODECAM; + lbs_ps_wakeup(priv, CMD_OPTION_WAITFORRSP); + } + + lbs_send_disconnect_notification(priv); + + if (priv->is_deep_sleep) { + priv->is_deep_sleep = 0; + wake_up_interruptible(&priv->ds_awake_q); + } + + /* Stop the thread servicing the interrupts */ + priv->surpriseremoved = 1; + kthread_stop(priv->main_thread); + + lbs_free_adapter(priv); + lbs_cfg_free(priv); + + priv->dev = NULL; + free_netdev(dev); + + lbs_deb_leave(LBS_DEB_MAIN); +} +EXPORT_SYMBOL_GPL(lbs_remove_card); + + +static int lbs_rtap_supported(struct lbs_private *priv) +{ + if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V5) + return 1; + + /* newer firmware use a capability mask */ + return ((MRVL_FW_MAJOR_REV(priv->fwrelease) >= MRVL_FW_V10) && + (priv->fwcapinfo & MESH_CAPINFO_ENABLE_MASK)); +} + + +int lbs_start_card(struct lbs_private *priv) +{ + struct net_device *dev = priv->dev; + int ret = -1; + + lbs_deb_enter(LBS_DEB_MAIN); + + /* poke the firmware */ + ret = lbs_setup_firmware(priv); + if (ret) + goto done; + + if (lbs_cfg_register(priv)) { + lbs_pr_err("cannot register device\n"); + goto done; + } + + lbs_update_channel(priv); + + lbs_init_mesh(priv); + + /* + * While rtap isn't related to mesh, only mesh-enabled + * firmware implements the rtap functionality via + * CMD_802_11_MONITOR_MODE. + */ + if (lbs_rtap_supported(priv)) { + if (device_create_file(&dev->dev, &dev_attr_lbs_rtap)) + lbs_pr_err("cannot register lbs_rtap attribute\n"); + } + + lbs_debugfs_init_one(priv, dev); + + lbs_pr_info("%s: Marvell WLAN 802.11 adapter\n", dev->name); + + ret = 0; + +done: + lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret); + return ret; +} +EXPORT_SYMBOL_GPL(lbs_start_card); + + +void lbs_stop_card(struct lbs_private *priv) +{ + struct net_device *dev; + struct cmd_ctrl_node *cmdnode; + unsigned long flags; + + lbs_deb_enter(LBS_DEB_MAIN); + + if (!priv) + goto out; + dev = priv->dev; + + netif_stop_queue(dev); + netif_carrier_off(dev); + + lbs_debugfs_remove_one(priv); + lbs_deinit_mesh(priv); + + if (lbs_rtap_supported(priv)) + device_remove_file(&dev->dev, &dev_attr_lbs_rtap); + + /* Delete the timeout of the currently processing command */ + del_timer_sync(&priv->command_timer); + del_timer_sync(&priv->auto_deepsleep_timer); + + /* Flush pending command nodes */ + spin_lock_irqsave(&priv->driver_lock, flags); + lbs_deb_main("clearing pending commands\n"); + list_for_each_entry(cmdnode, &priv->cmdpendingq, list) { + cmdnode->result = -ENOENT; + cmdnode->cmdwaitqwoken = 1; + wake_up_interruptible(&cmdnode->cmdwait_q); + } + + /* Flush the command the card is currently processing */ + if (priv->cur_cmd) { + lbs_deb_main("clearing current command\n"); + priv->cur_cmd->result = -ENOENT; + priv->cur_cmd->cmdwaitqwoken = 1; + wake_up_interruptible(&priv->cur_cmd->cmdwait_q); + } + lbs_deb_main("done clearing commands\n"); + spin_unlock_irqrestore(&priv->driver_lock, flags); + + unregister_netdev(dev); + +out: + lbs_deb_leave(LBS_DEB_MAIN); +} +EXPORT_SYMBOL_GPL(lbs_stop_card); + + +void lbs_queue_event(struct lbs_private *priv, u32 event) +{ + unsigned long flags; + + lbs_deb_enter(LBS_DEB_THREAD); + spin_lock_irqsave(&priv->driver_lock, flags); + + if (priv->psstate == PS_STATE_SLEEP) + priv->psstate = PS_STATE_AWAKE; + + kfifo_in(&priv->event_fifo, (unsigned char *) &event, sizeof(u32)); + + wake_up_interruptible(&priv->waitq); + + spin_unlock_irqrestore(&priv->driver_lock, flags); + lbs_deb_leave(LBS_DEB_THREAD); +} +EXPORT_SYMBOL_GPL(lbs_queue_event); + +void lbs_notify_command_response(struct lbs_private *priv, u8 resp_idx) +{ + lbs_deb_enter(LBS_DEB_THREAD); + + if (priv->psstate == PS_STATE_SLEEP) + priv->psstate = PS_STATE_AWAKE; + + /* Swap buffers by flipping the response index */ + BUG_ON(resp_idx > 1); + priv->resp_idx = resp_idx; + + wake_up_interruptible(&priv->waitq); + + lbs_deb_leave(LBS_DEB_THREAD); +} +EXPORT_SYMBOL_GPL(lbs_notify_command_response); + +static int __init lbs_init_module(void) +{ + lbs_deb_enter(LBS_DEB_MAIN); + memset(&confirm_sleep, 0, sizeof(confirm_sleep)); + confirm_sleep.hdr.command = cpu_to_le16(CMD_802_11_PS_MODE); + confirm_sleep.hdr.size = cpu_to_le16(sizeof(confirm_sleep)); + confirm_sleep.action = cpu_to_le16(CMD_SUBCMD_SLEEP_CONFIRMED); + lbs_debugfs_init(); + lbs_deb_leave(LBS_DEB_MAIN); + return 0; +} + +static void __exit lbs_exit_module(void) +{ + lbs_deb_enter(LBS_DEB_MAIN); + lbs_debugfs_remove(); + lbs_deb_leave(LBS_DEB_MAIN); +} + +/* + * rtap interface support fuctions + */ + +static int lbs_rtap_open(struct net_device *dev) +{ + /* Yes, _stop_ the queue. Because we don't support injection */ + lbs_deb_enter(LBS_DEB_MAIN); + netif_carrier_off(dev); + netif_stop_queue(dev); + lbs_deb_leave(LBS_DEB_LEAVE); + return 0; +} + +static int lbs_rtap_stop(struct net_device *dev) +{ + lbs_deb_enter(LBS_DEB_MAIN); + lbs_deb_leave(LBS_DEB_MAIN); + return 0; +} + +static netdev_tx_t lbs_rtap_hard_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + netif_stop_queue(dev); + return NETDEV_TX_BUSY; +} + +static void lbs_remove_rtap(struct lbs_private *priv) +{ + lbs_deb_enter(LBS_DEB_MAIN); + if (priv->rtap_net_dev == NULL) + goto out; + unregister_netdev(priv->rtap_net_dev); + free_netdev(priv->rtap_net_dev); + priv->rtap_net_dev = NULL; +out: + lbs_deb_leave(LBS_DEB_MAIN); +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) +static const struct net_device_ops rtap_netdev_ops = { + .ndo_open = lbs_rtap_open, + .ndo_stop = lbs_rtap_stop, + .ndo_start_xmit = lbs_rtap_hard_start_xmit, +}; +#endif + +static int lbs_add_rtap(struct lbs_private *priv) +{ + int ret = 0; + struct net_device *rtap_dev; + + lbs_deb_enter(LBS_DEB_MAIN); + if (priv->rtap_net_dev) { + ret = -EPERM; + goto out; + } + + rtap_dev = alloc_netdev(0, "rtap%d", ether_setup); + if (rtap_dev == NULL) { + ret = -ENOMEM; + goto out; + } + + memcpy(rtap_dev->dev_addr, priv->current_addr, ETH_ALEN); + rtap_dev->type = ARPHRD_IEEE80211_RADIOTAP; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + rtap_dev->netdev_ops = &rtap_netdev_ops; +#else + rtap_dev->open = lbs_rtap_open; + rtap_dev->stop = lbs_rtap_stop; + rtap_dev->hard_start_xmit = lbs_rtap_hard_start_xmit; +#endif + rtap_dev->ml_priv = priv; + SET_NETDEV_DEV(rtap_dev, priv->dev->dev.parent); + + ret = register_netdev(rtap_dev); + if (ret) { + free_netdev(rtap_dev); + goto out; + } + priv->rtap_net_dev = rtap_dev; + +out: + lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret); + return ret; +} + +module_init(lbs_init_module); +module_exit(lbs_exit_module); + +MODULE_DESCRIPTION("Libertas WLAN Driver Library"); +MODULE_AUTHOR("Marvell International Ltd."); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c new file mode 100644 index 0000000..eb7c141 --- /dev/null +++ b/drivers/net/wireless/libertas/mesh.c @@ -0,0 +1,1164 @@ +#include +#include +#include +#include +#include +#include +#include + +#include "mesh.h" +#include "decl.h" +#include "cmd.h" + + +/*************************************************************************** + * Mesh sysfs support + */ + +/** + * Attributes exported through sysfs + */ + +/** + * @brief Get function for sysfs attribute anycast_mask + */ +static ssize_t lbs_anycast_get(struct device *dev, + struct device_attribute *attr, char * buf) +{ + struct lbs_private *priv = to_net_dev(dev)->ml_priv; + struct cmd_ds_mesh_access mesh_access; + int ret; + + memset(&mesh_access, 0, sizeof(mesh_access)); + + ret = lbs_mesh_access(priv, CMD_ACT_MESH_GET_ANYCAST, &mesh_access); + if (ret) + return ret; + + return snprintf(buf, 12, "0x%X\n", le32_to_cpu(mesh_access.data[0])); +} + +/** + * @brief Set function for sysfs attribute anycast_mask + */ +static ssize_t lbs_anycast_set(struct device *dev, + struct device_attribute *attr, const char * buf, size_t count) +{ + struct lbs_private *priv = to_net_dev(dev)->ml_priv; + struct cmd_ds_mesh_access mesh_access; + uint32_t datum; + int ret; + + memset(&mesh_access, 0, sizeof(mesh_access)); + sscanf(buf, "%x", &datum); + mesh_access.data[0] = cpu_to_le32(datum); + + ret = lbs_mesh_access(priv, CMD_ACT_MESH_SET_ANYCAST, &mesh_access); + if (ret) + return ret; + + return strlen(buf); +} + +/** + * @brief Get function for sysfs attribute prb_rsp_limit + */ +static ssize_t lbs_prb_rsp_limit_get(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct lbs_private *priv = to_net_dev(dev)->ml_priv; + struct cmd_ds_mesh_access mesh_access; + int ret; + u32 retry_limit; + + memset(&mesh_access, 0, sizeof(mesh_access)); + mesh_access.data[0] = cpu_to_le32(CMD_ACT_GET); + + ret = lbs_mesh_access(priv, CMD_ACT_MESH_SET_GET_PRB_RSP_LIMIT, + &mesh_access); + if (ret) + return ret; + + retry_limit = le32_to_cpu(mesh_access.data[1]); + return snprintf(buf, 10, "%d\n", retry_limit); +} + +/** + * @brief Set function for sysfs attribute prb_rsp_limit + */ +static ssize_t lbs_prb_rsp_limit_set(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct lbs_private *priv = to_net_dev(dev)->ml_priv; + struct cmd_ds_mesh_access mesh_access; + int ret; + unsigned long retry_limit; + + memset(&mesh_access, 0, sizeof(mesh_access)); + mesh_access.data[0] = cpu_to_le32(CMD_ACT_SET); + + if (!strict_strtoul(buf, 10, &retry_limit)) + return -ENOTSUPP; + if (retry_limit > 15) + return -ENOTSUPP; + + mesh_access.data[1] = cpu_to_le32(retry_limit); + + ret = lbs_mesh_access(priv, CMD_ACT_MESH_SET_GET_PRB_RSP_LIMIT, + &mesh_access); + if (ret) + return ret; + + return strlen(buf); +} + +/** + * Get function for sysfs attribute mesh + */ +static ssize_t lbs_mesh_get(struct device *dev, + struct device_attribute *attr, char * buf) +{ + struct lbs_private *priv = to_net_dev(dev)->ml_priv; + return snprintf(buf, 5, "0x%X\n", !!priv->mesh_dev); +} + +/** + * Set function for sysfs attribute mesh + */ +static ssize_t lbs_mesh_set(struct device *dev, + struct device_attribute *attr, const char * buf, size_t count) +{ + struct lbs_private *priv = to_net_dev(dev)->ml_priv; + int enable; + int ret, action = CMD_ACT_MESH_CONFIG_STOP; + + sscanf(buf, "%x", &enable); + enable = !!enable; + if (enable == !!priv->mesh_dev) + return count; + if (enable) + action = CMD_ACT_MESH_CONFIG_START; + ret = lbs_mesh_config(priv, action, priv->channel); + if (ret) + return ret; + + if (enable) + lbs_add_mesh(priv); + else + lbs_remove_mesh(priv); + + return count; +} + +/** + * lbs_mesh attribute to be exported per ethX interface + * through sysfs (/sys/class/net/ethX/lbs_mesh) + */ +static DEVICE_ATTR(lbs_mesh, 0644, lbs_mesh_get, lbs_mesh_set); + +/** + * anycast_mask attribute to be exported per mshX interface + * through sysfs (/sys/class/net/mshX/anycast_mask) + */ +static DEVICE_ATTR(anycast_mask, 0644, lbs_anycast_get, lbs_anycast_set); + +/** + * prb_rsp_limit attribute to be exported per mshX interface + * through sysfs (/sys/class/net/mshX/prb_rsp_limit) + */ +static DEVICE_ATTR(prb_rsp_limit, 0644, lbs_prb_rsp_limit_get, + lbs_prb_rsp_limit_set); + +static struct attribute *lbs_mesh_sysfs_entries[] = { + &dev_attr_anycast_mask.attr, + &dev_attr_prb_rsp_limit.attr, + NULL, +}; + +static struct attribute_group lbs_mesh_attr_group = { + .attrs = lbs_mesh_sysfs_entries, +}; + + + +/*************************************************************************** + * Initializing and starting, stopping mesh + */ + +/* + * Check mesh FW version and appropriately send the mesh start + * command + */ +int lbs_init_mesh(struct lbs_private *priv) +{ + struct net_device *dev = priv->dev; + int ret = 0; + + lbs_deb_enter(LBS_DEB_MESH); + + priv->mesh_connect_status = LBS_DISCONNECTED; + + /* Determine mesh_fw_ver from fwrelease and fwcapinfo */ + /* 5.0.16p0 9.0.0.p0 is known to NOT support any mesh */ + /* 5.110.22 have mesh command with 0xa3 command id */ + /* 10.0.0.p0 FW brings in mesh config command with different id */ + /* Check FW version MSB and initialize mesh_fw_ver */ + if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V5) { + /* Enable mesh, if supported, and work out which TLV it uses. + 0x100 + 291 is an unofficial value used in 5.110.20.pXX + 0x100 + 37 is the official value used in 5.110.21.pXX + but we check them in that order because 20.pXX doesn't + give an error -- it just silently fails. */ + + /* 5.110.20.pXX firmware will fail the command if the channel + doesn't match the existing channel. But only if the TLV + is correct. If the channel is wrong, _BOTH_ versions will + give an error to 0x100+291, and allow 0x100+37 to succeed. + It's just that 5.110.20.pXX will not have done anything + useful */ + + priv->mesh_tlv = TLV_TYPE_OLD_MESH_ID; + if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, + priv->channel)) { + priv->mesh_tlv = TLV_TYPE_MESH_ID; + if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, + priv->channel)) + priv->mesh_tlv = 0; + } + } else + if ((MRVL_FW_MAJOR_REV(priv->fwrelease) >= MRVL_FW_V10) && + (priv->fwcapinfo & MESH_CAPINFO_ENABLE_MASK)) { + /* 10.0.0.pXX new firmwares should succeed with TLV + * 0x100+37; Do not invoke command with old TLV. + */ + priv->mesh_tlv = TLV_TYPE_MESH_ID; + if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, + priv->channel)) + priv->mesh_tlv = 0; + } + + + if (priv->mesh_tlv) { + sprintf(priv->mesh_ssid, "mesh"); + priv->mesh_ssid_len = 4; + + lbs_add_mesh(priv); + + if (device_create_file(&dev->dev, &dev_attr_lbs_mesh)) + lbs_pr_err("cannot register lbs_mesh attribute\n"); + + ret = 1; + } + + lbs_deb_leave_args(LBS_DEB_MESH, "ret %d", ret); + return ret; +} + + +int lbs_deinit_mesh(struct lbs_private *priv) +{ + struct net_device *dev = priv->dev; + int ret = 0; + + lbs_deb_enter(LBS_DEB_MESH); + + if (priv->mesh_tlv) { + device_remove_file(&dev->dev, &dev_attr_lbs_mesh); + ret = 1; + } + + lbs_deb_leave_args(LBS_DEB_MESH, "ret %d", ret); + return ret; +} + + +/** + * @brief This function closes the mshX interface + * + * @param dev A pointer to net_device structure + * @return 0 + */ +static int lbs_mesh_stop(struct net_device *dev) +{ + struct lbs_private *priv = dev->ml_priv; + + lbs_deb_enter(LBS_DEB_MESH); + spin_lock_irq(&priv->driver_lock); + + priv->mesh_open = 0; + priv->mesh_connect_status = LBS_DISCONNECTED; + + netif_stop_queue(dev); + netif_carrier_off(dev); + + spin_unlock_irq(&priv->driver_lock); + + schedule_work(&priv->mcast_work); + + lbs_deb_leave(LBS_DEB_MESH); + return 0; +} + +/** + * @brief This function opens the mshX interface + * + * @param dev A pointer to net_device structure + * @return 0 or -EBUSY if monitor mode active + */ +static int lbs_mesh_dev_open(struct net_device *dev) +{ + struct lbs_private *priv = dev->ml_priv; + int ret = 0; + + lbs_deb_enter(LBS_DEB_NET); + + spin_lock_irq(&priv->driver_lock); + + if (priv->monitormode) { + ret = -EBUSY; + goto out; + } + + priv->mesh_open = 1; + priv->mesh_connect_status = LBS_CONNECTED; + netif_carrier_on(dev); + + if (!priv->tx_pending_len) + netif_wake_queue(dev); + out: + + spin_unlock_irq(&priv->driver_lock); + lbs_deb_leave_args(LBS_DEB_NET, "ret %d", ret); + return ret; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) +static const struct net_device_ops mesh_netdev_ops = { + .ndo_open = lbs_mesh_dev_open, + .ndo_stop = lbs_mesh_stop, + .ndo_start_xmit = lbs_hard_start_xmit, + .ndo_set_mac_address = lbs_set_mac_address, + .ndo_set_multicast_list = lbs_set_multicast_list, +}; +#endif + +/** + * @brief This function adds mshX interface + * + * @param priv A pointer to the struct lbs_private structure + * @return 0 if successful, -X otherwise + */ +int lbs_add_mesh(struct lbs_private *priv) +{ + struct net_device *mesh_dev = NULL; + int ret = 0; + + lbs_deb_enter(LBS_DEB_MESH); + + /* Allocate a virtual mesh device */ + mesh_dev = alloc_netdev(0, "msh%d", ether_setup); + if (!mesh_dev) { + lbs_deb_mesh("init mshX device failed\n"); + ret = -ENOMEM; + goto done; + } + mesh_dev->ml_priv = priv; + priv->mesh_dev = mesh_dev; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + mesh_dev->netdev_ops = &mesh_netdev_ops; +#else + mesh_dev->open = lbs_mesh_dev_open; + mesh_dev->hard_start_xmit = lbs_hard_start_xmit; + mesh_dev->stop = lbs_mesh_stop; + mesh_dev->set_mac_address = lbs_set_mac_address; + mesh_dev->set_multicast_list = lbs_set_multicast_list; +#endif + mesh_dev->ethtool_ops = &lbs_ethtool_ops; + memcpy(mesh_dev->dev_addr, priv->dev->dev_addr, ETH_ALEN); + + SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent); + +#ifdef WIRELESS_EXT + mesh_dev->wireless_handlers = &mesh_handler_def; +#endif + mesh_dev->flags |= IFF_BROADCAST | IFF_MULTICAST; + /* Register virtual mesh interface */ + ret = register_netdev(mesh_dev); + if (ret) { + lbs_pr_err("cannot register mshX virtual interface\n"); + goto err_free; + } + + ret = sysfs_create_group(&(mesh_dev->dev.kobj), &lbs_mesh_attr_group); + if (ret) + goto err_unregister; + + lbs_persist_config_init(mesh_dev); + + /* Everything successful */ + ret = 0; + goto done; + +err_unregister: + unregister_netdev(mesh_dev); + +err_free: + free_netdev(mesh_dev); + +done: + lbs_deb_leave_args(LBS_DEB_MESH, "ret %d", ret); + return ret; +} + +void lbs_remove_mesh(struct lbs_private *priv) +{ + struct net_device *mesh_dev; + + mesh_dev = priv->mesh_dev; + if (!mesh_dev) + return; + + lbs_deb_enter(LBS_DEB_MESH); + netif_stop_queue(mesh_dev); + netif_carrier_off(mesh_dev); + sysfs_remove_group(&(mesh_dev->dev.kobj), &lbs_mesh_attr_group); + lbs_persist_config_remove(mesh_dev); + unregister_netdev(mesh_dev); + priv->mesh_dev = NULL; + free_netdev(mesh_dev); + lbs_deb_leave(LBS_DEB_MESH); +} + + + +/*************************************************************************** + * Sending and receiving + */ +struct net_device *lbs_mesh_set_dev(struct lbs_private *priv, + struct net_device *dev, struct rxpd *rxpd) +{ + if (priv->mesh_dev) { + if (priv->mesh_tlv == TLV_TYPE_OLD_MESH_ID) { + if (rxpd->rx_control & RxPD_MESH_FRAME) + dev = priv->mesh_dev; + } else if (priv->mesh_tlv == TLV_TYPE_MESH_ID) { + if (rxpd->u.bss.bss_num == MESH_IFACE_ID) + dev = priv->mesh_dev; + } + } + return dev; +} + + +void lbs_mesh_set_txpd(struct lbs_private *priv, + struct net_device *dev, struct txpd *txpd) +{ + if (dev == priv->mesh_dev) { + if (priv->mesh_tlv == TLV_TYPE_OLD_MESH_ID) + txpd->tx_control |= cpu_to_le32(TxPD_MESH_FRAME); + else if (priv->mesh_tlv == TLV_TYPE_MESH_ID) + txpd->u.bss.bss_num = MESH_IFACE_ID; + } +} + + +/*************************************************************************** + * Mesh command handling + */ + +int lbs_cmd_bt_access(struct cmd_ds_command *cmd, + u16 cmd_action, void *pdata_buf) +{ + struct cmd_ds_bt_access *bt_access = &cmd->params.bt; + lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action); + + cmd->command = cpu_to_le16(CMD_BT_ACCESS); + cmd->size = cpu_to_le16(sizeof(struct cmd_ds_bt_access) + + sizeof(struct cmd_header)); + cmd->result = 0; + bt_access->action = cpu_to_le16(cmd_action); + + switch (cmd_action) { + case CMD_ACT_BT_ACCESS_ADD: + memcpy(bt_access->addr1, pdata_buf, 2 * ETH_ALEN); + lbs_deb_hex(LBS_DEB_MESH, "BT_ADD: blinded MAC addr", + bt_access->addr1, 6); + break; + case CMD_ACT_BT_ACCESS_DEL: + memcpy(bt_access->addr1, pdata_buf, 1 * ETH_ALEN); + lbs_deb_hex(LBS_DEB_MESH, "BT_DEL: blinded MAC addr", + bt_access->addr1, 6); + break; + case CMD_ACT_BT_ACCESS_LIST: + bt_access->id = cpu_to_le32(*(u32 *) pdata_buf); + break; + case CMD_ACT_BT_ACCESS_RESET: + break; + case CMD_ACT_BT_ACCESS_SET_INVERT: + bt_access->id = cpu_to_le32(*(u32 *) pdata_buf); + break; + case CMD_ACT_BT_ACCESS_GET_INVERT: + break; + default: + break; + } + lbs_deb_leave(LBS_DEB_CMD); + return 0; +} + +int lbs_cmd_fwt_access(struct cmd_ds_command *cmd, + u16 cmd_action, void *pdata_buf) +{ + struct cmd_ds_fwt_access *fwt_access = &cmd->params.fwt; + lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action); + + cmd->command = cpu_to_le16(CMD_FWT_ACCESS); + cmd->size = cpu_to_le16(sizeof(struct cmd_ds_fwt_access) + + sizeof(struct cmd_header)); + cmd->result = 0; + + if (pdata_buf) + memcpy(fwt_access, pdata_buf, sizeof(*fwt_access)); + else + memset(fwt_access, 0, sizeof(*fwt_access)); + + fwt_access->action = cpu_to_le16(cmd_action); + + lbs_deb_leave(LBS_DEB_CMD); + return 0; +} + +int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action, + struct cmd_ds_mesh_access *cmd) +{ + int ret; + + lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action); + + cmd->hdr.command = cpu_to_le16(CMD_MESH_ACCESS); + cmd->hdr.size = cpu_to_le16(sizeof(*cmd)); + cmd->hdr.result = 0; + + cmd->action = cpu_to_le16(cmd_action); + + ret = lbs_cmd_with_response(priv, CMD_MESH_ACCESS, cmd); + + lbs_deb_leave(LBS_DEB_CMD); + return ret; +} + +static int __lbs_mesh_config_send(struct lbs_private *priv, + struct cmd_ds_mesh_config *cmd, + uint16_t action, uint16_t type) +{ + int ret; + u16 command = CMD_MESH_CONFIG_OLD; + + lbs_deb_enter(LBS_DEB_CMD); + + /* + * Command id is 0xac for v10 FW along with mesh interface + * id in bits 14-13-12. + */ + if (priv->mesh_tlv == TLV_TYPE_MESH_ID) + command = CMD_MESH_CONFIG | + (MESH_IFACE_ID << MESH_IFACE_BIT_OFFSET); + + cmd->hdr.command = cpu_to_le16(command); + cmd->hdr.size = cpu_to_le16(sizeof(struct cmd_ds_mesh_config)); + cmd->hdr.result = 0; + + cmd->type = cpu_to_le16(type); + cmd->action = cpu_to_le16(action); + + ret = lbs_cmd_with_response(priv, command, cmd); + + lbs_deb_leave(LBS_DEB_CMD); + return ret; +} + +int lbs_mesh_config_send(struct lbs_private *priv, + struct cmd_ds_mesh_config *cmd, + uint16_t action, uint16_t type) +{ + int ret; + + if (!(priv->fwcapinfo & FW_CAPINFO_PERSISTENT_CONFIG)) + return -EOPNOTSUPP; + + ret = __lbs_mesh_config_send(priv, cmd, action, type); + return ret; +} + +/* This function is the CMD_MESH_CONFIG legacy function. It only handles the + * START and STOP actions. The extended actions supported by CMD_MESH_CONFIG + * are all handled by preparing a struct cmd_ds_mesh_config and passing it to + * lbs_mesh_config_send. + */ +int lbs_mesh_config(struct lbs_private *priv, uint16_t action, uint16_t chan) +{ + struct cmd_ds_mesh_config cmd; + struct mrvl_meshie *ie; + DECLARE_SSID_BUF(ssid); + + memset(&cmd, 0, sizeof(cmd)); + cmd.channel = cpu_to_le16(chan); + ie = (struct mrvl_meshie *)cmd.data; + + switch (action) { + case CMD_ACT_MESH_CONFIG_START: + ie->id = WLAN_EID_GENERIC; + ie->val.oui[0] = 0x00; + ie->val.oui[1] = 0x50; + ie->val.oui[2] = 0x43; + ie->val.type = MARVELL_MESH_IE_TYPE; + ie->val.subtype = MARVELL_MESH_IE_SUBTYPE; + ie->val.version = MARVELL_MESH_IE_VERSION; + ie->val.active_protocol_id = MARVELL_MESH_PROTO_ID_HWMP; + ie->val.active_metric_id = MARVELL_MESH_METRIC_ID; + ie->val.mesh_capability = MARVELL_MESH_CAPABILITY; + ie->val.mesh_id_len = priv->mesh_ssid_len; + memcpy(ie->val.mesh_id, priv->mesh_ssid, priv->mesh_ssid_len); + ie->len = sizeof(struct mrvl_meshie_val) - + IEEE80211_MAX_SSID_LEN + priv->mesh_ssid_len; + cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie_val)); + break; + case CMD_ACT_MESH_CONFIG_STOP: + break; + default: + return -1; + } + lbs_deb_cmd("mesh config action %d type %x channel %d SSID %s\n", + action, priv->mesh_tlv, chan, + print_ssid(ssid, priv->mesh_ssid, priv->mesh_ssid_len)); + + return __lbs_mesh_config_send(priv, &cmd, action, priv->mesh_tlv); +} + + + +/*************************************************************************** + * Persistent configuration support + */ + +static int mesh_get_default_parameters(struct device *dev, + struct mrvl_mesh_defaults *defs) +{ + struct lbs_private *priv = to_net_dev(dev)->ml_priv; + struct cmd_ds_mesh_config cmd; + int ret; + + memset(&cmd, 0, sizeof(struct cmd_ds_mesh_config)); + ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_GET, + CMD_TYPE_MESH_GET_DEFAULTS); + + if (ret) + return -EOPNOTSUPP; + + memcpy(defs, &cmd.data[0], sizeof(struct mrvl_mesh_defaults)); + + return 0; +} + +/** + * @brief Get function for sysfs attribute bootflag + */ +static ssize_t bootflag_get(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct mrvl_mesh_defaults defs; + int ret; + + ret = mesh_get_default_parameters(dev, &defs); + + if (ret) + return ret; + + return snprintf(buf, 12, "%d\n", le32_to_cpu(defs.bootflag)); +} + +/** + * @brief Set function for sysfs attribute bootflag + */ +static ssize_t bootflag_set(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct lbs_private *priv = to_net_dev(dev)->ml_priv; + struct cmd_ds_mesh_config cmd; + uint32_t datum; + int ret; + + memset(&cmd, 0, sizeof(cmd)); + ret = sscanf(buf, "%d", &datum); + if ((ret != 1) || (datum > 1)) + return -EINVAL; + + *((__le32 *)&cmd.data[0]) = cpu_to_le32(!!datum); + cmd.length = cpu_to_le16(sizeof(uint32_t)); + ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET, + CMD_TYPE_MESH_SET_BOOTFLAG); + if (ret) + return ret; + + return strlen(buf); +} + +/** + * @brief Get function for sysfs attribute boottime + */ +static ssize_t boottime_get(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct mrvl_mesh_defaults defs; + int ret; + + ret = mesh_get_default_parameters(dev, &defs); + + if (ret) + return ret; + + return snprintf(buf, 12, "%d\n", defs.boottime); +} + +/** + * @brief Set function for sysfs attribute boottime + */ +static ssize_t boottime_set(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct lbs_private *priv = to_net_dev(dev)->ml_priv; + struct cmd_ds_mesh_config cmd; + uint32_t datum; + int ret; + + memset(&cmd, 0, sizeof(cmd)); + ret = sscanf(buf, "%d", &datum); + if ((ret != 1) || (datum > 255)) + return -EINVAL; + + /* A too small boot time will result in the device booting into + * standalone (no-host) mode before the host can take control of it, + * so the change will be hard to revert. This may be a desired + * feature (e.g to configure a very fast boot time for devices that + * will not be attached to a host), but dangerous. So I'm enforcing a + * lower limit of 20 seconds: remove and recompile the driver if this + * does not work for you. + */ + datum = (datum < 20) ? 20 : datum; + cmd.data[0] = datum; + cmd.length = cpu_to_le16(sizeof(uint8_t)); + ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET, + CMD_TYPE_MESH_SET_BOOTTIME); + if (ret) + return ret; + + return strlen(buf); +} + +/** + * @brief Get function for sysfs attribute channel + */ +static ssize_t channel_get(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct mrvl_mesh_defaults defs; + int ret; + + ret = mesh_get_default_parameters(dev, &defs); + + if (ret) + return ret; + + return snprintf(buf, 12, "%d\n", le16_to_cpu(defs.channel)); +} + +/** + * @brief Set function for sysfs attribute channel + */ +static ssize_t channel_set(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct lbs_private *priv = to_net_dev(dev)->ml_priv; + struct cmd_ds_mesh_config cmd; + uint32_t datum; + int ret; + + memset(&cmd, 0, sizeof(cmd)); + ret = sscanf(buf, "%d", &datum); + if (ret != 1 || datum < 1 || datum > 11) + return -EINVAL; + + *((__le16 *)&cmd.data[0]) = cpu_to_le16(datum); + cmd.length = cpu_to_le16(sizeof(uint16_t)); + ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET, + CMD_TYPE_MESH_SET_DEF_CHANNEL); + if (ret) + return ret; + + return strlen(buf); +} + +/** + * @brief Get function for sysfs attribute mesh_id + */ +static ssize_t mesh_id_get(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct mrvl_mesh_defaults defs; + int maxlen; + int ret; + + ret = mesh_get_default_parameters(dev, &defs); + + if (ret) + return ret; + + if (defs.meshie.val.mesh_id_len > IEEE80211_MAX_SSID_LEN) { + lbs_pr_err("inconsistent mesh ID length"); + defs.meshie.val.mesh_id_len = IEEE80211_MAX_SSID_LEN; + } + + /* SSID not null terminated: reserve room for \0 + \n */ + maxlen = defs.meshie.val.mesh_id_len + 2; + maxlen = (PAGE_SIZE > maxlen) ? maxlen : PAGE_SIZE; + + defs.meshie.val.mesh_id[defs.meshie.val.mesh_id_len] = '\0'; + + return snprintf(buf, maxlen, "%s\n", defs.meshie.val.mesh_id); +} + +/** + * @brief Set function for sysfs attribute mesh_id + */ +static ssize_t mesh_id_set(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct cmd_ds_mesh_config cmd; + struct mrvl_mesh_defaults defs; + struct mrvl_meshie *ie; + struct lbs_private *priv = to_net_dev(dev)->ml_priv; + int len; + int ret; + + if (count < 2 || count > IEEE80211_MAX_SSID_LEN + 1) + return -EINVAL; + + memset(&cmd, 0, sizeof(struct cmd_ds_mesh_config)); + ie = (struct mrvl_meshie *) &cmd.data[0]; + + /* fetch all other Information Element parameters */ + ret = mesh_get_default_parameters(dev, &defs); + + cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie)); + + /* transfer IE elements */ + memcpy(ie, &defs.meshie, sizeof(struct mrvl_meshie)); + + len = count - 1; + memcpy(ie->val.mesh_id, buf, len); + /* SSID len */ + ie->val.mesh_id_len = len; + /* IE len */ + ie->len = sizeof(struct mrvl_meshie_val) - IEEE80211_MAX_SSID_LEN + len; + + ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET, + CMD_TYPE_MESH_SET_MESH_IE); + if (ret) + return ret; + + return strlen(buf); +} + +/** + * @brief Get function for sysfs attribute protocol_id + */ +static ssize_t protocol_id_get(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct mrvl_mesh_defaults defs; + int ret; + + ret = mesh_get_default_parameters(dev, &defs); + + if (ret) + return ret; + + return snprintf(buf, 5, "%d\n", defs.meshie.val.active_protocol_id); +} + +/** + * @brief Set function for sysfs attribute protocol_id + */ +static ssize_t protocol_id_set(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct cmd_ds_mesh_config cmd; + struct mrvl_mesh_defaults defs; + struct mrvl_meshie *ie; + struct lbs_private *priv = to_net_dev(dev)->ml_priv; + uint32_t datum; + int ret; + + memset(&cmd, 0, sizeof(cmd)); + ret = sscanf(buf, "%d", &datum); + if ((ret != 1) || (datum > 255)) + return -EINVAL; + + /* fetch all other Information Element parameters */ + ret = mesh_get_default_parameters(dev, &defs); + + cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie)); + + /* transfer IE elements */ + ie = (struct mrvl_meshie *) &cmd.data[0]; + memcpy(ie, &defs.meshie, sizeof(struct mrvl_meshie)); + /* update protocol id */ + ie->val.active_protocol_id = datum; + + ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET, + CMD_TYPE_MESH_SET_MESH_IE); + if (ret) + return ret; + + return strlen(buf); +} + +/** + * @brief Get function for sysfs attribute metric_id + */ +static ssize_t metric_id_get(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct mrvl_mesh_defaults defs; + int ret; + + ret = mesh_get_default_parameters(dev, &defs); + + if (ret) + return ret; + + return snprintf(buf, 5, "%d\n", defs.meshie.val.active_metric_id); +} + +/** + * @brief Set function for sysfs attribute metric_id + */ +static ssize_t metric_id_set(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct cmd_ds_mesh_config cmd; + struct mrvl_mesh_defaults defs; + struct mrvl_meshie *ie; + struct lbs_private *priv = to_net_dev(dev)->ml_priv; + uint32_t datum; + int ret; + + memset(&cmd, 0, sizeof(cmd)); + ret = sscanf(buf, "%d", &datum); + if ((ret != 1) || (datum > 255)) + return -EINVAL; + + /* fetch all other Information Element parameters */ + ret = mesh_get_default_parameters(dev, &defs); + + cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie)); + + /* transfer IE elements */ + ie = (struct mrvl_meshie *) &cmd.data[0]; + memcpy(ie, &defs.meshie, sizeof(struct mrvl_meshie)); + /* update metric id */ + ie->val.active_metric_id = datum; + + ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET, + CMD_TYPE_MESH_SET_MESH_IE); + if (ret) + return ret; + + return strlen(buf); +} + +/** + * @brief Get function for sysfs attribute capability + */ +static ssize_t capability_get(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct mrvl_mesh_defaults defs; + int ret; + + ret = mesh_get_default_parameters(dev, &defs); + + if (ret) + return ret; + + return snprintf(buf, 5, "%d\n", defs.meshie.val.mesh_capability); +} + +/** + * @brief Set function for sysfs attribute capability + */ +static ssize_t capability_set(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct cmd_ds_mesh_config cmd; + struct mrvl_mesh_defaults defs; + struct mrvl_meshie *ie; + struct lbs_private *priv = to_net_dev(dev)->ml_priv; + uint32_t datum; + int ret; + + memset(&cmd, 0, sizeof(cmd)); + ret = sscanf(buf, "%d", &datum); + if ((ret != 1) || (datum > 255)) + return -EINVAL; + + /* fetch all other Information Element parameters */ + ret = mesh_get_default_parameters(dev, &defs); + + cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie)); + + /* transfer IE elements */ + ie = (struct mrvl_meshie *) &cmd.data[0]; + memcpy(ie, &defs.meshie, sizeof(struct mrvl_meshie)); + /* update value */ + ie->val.mesh_capability = datum; + + ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET, + CMD_TYPE_MESH_SET_MESH_IE); + if (ret) + return ret; + + return strlen(buf); +} + + +static DEVICE_ATTR(bootflag, 0644, bootflag_get, bootflag_set); +static DEVICE_ATTR(boottime, 0644, boottime_get, boottime_set); +static DEVICE_ATTR(channel, 0644, channel_get, channel_set); +static DEVICE_ATTR(mesh_id, 0644, mesh_id_get, mesh_id_set); +static DEVICE_ATTR(protocol_id, 0644, protocol_id_get, protocol_id_set); +static DEVICE_ATTR(metric_id, 0644, metric_id_get, metric_id_set); +static DEVICE_ATTR(capability, 0644, capability_get, capability_set); + +static struct attribute *boot_opts_attrs[] = { + &dev_attr_bootflag.attr, + &dev_attr_boottime.attr, + &dev_attr_channel.attr, + NULL +}; + +static struct attribute_group boot_opts_group = { + .name = "boot_options", + .attrs = boot_opts_attrs, +}; + +static struct attribute *mesh_ie_attrs[] = { + &dev_attr_mesh_id.attr, + &dev_attr_protocol_id.attr, + &dev_attr_metric_id.attr, + &dev_attr_capability.attr, + NULL +}; + +static struct attribute_group mesh_ie_group = { + .name = "mesh_ie", + .attrs = mesh_ie_attrs, +}; + +void lbs_persist_config_init(struct net_device *dev) +{ + int ret; + ret = sysfs_create_group(&(dev->dev.kobj), &boot_opts_group); + ret = sysfs_create_group(&(dev->dev.kobj), &mesh_ie_group); +} + +void lbs_persist_config_remove(struct net_device *dev) +{ + sysfs_remove_group(&(dev->dev.kobj), &boot_opts_group); + sysfs_remove_group(&(dev->dev.kobj), &mesh_ie_group); +} + + + +/*************************************************************************** + * Ethtool related + */ + +static const char *mesh_stat_strings[] = { + "drop_duplicate_bcast", + "drop_ttl_zero", + "drop_no_fwd_route", + "drop_no_buffers", + "fwded_unicast_cnt", + "fwded_bcast_cnt", + "drop_blind_table", + "tx_failed_cnt" +}; + +void lbs_mesh_ethtool_get_stats(struct net_device *dev, + struct ethtool_stats *stats, uint64_t *data) +{ + struct lbs_private *priv = dev->ml_priv; + struct cmd_ds_mesh_access mesh_access; + int ret; + + lbs_deb_enter(LBS_DEB_ETHTOOL); + + /* Get Mesh Statistics */ + ret = lbs_mesh_access(priv, CMD_ACT_MESH_GET_STATS, &mesh_access); + + if (ret) { + memset(data, 0, MESH_STATS_NUM*(sizeof(uint64_t))); + return; + } + + priv->mstats.fwd_drop_rbt = le32_to_cpu(mesh_access.data[0]); + priv->mstats.fwd_drop_ttl = le32_to_cpu(mesh_access.data[1]); + priv->mstats.fwd_drop_noroute = le32_to_cpu(mesh_access.data[2]); + priv->mstats.fwd_drop_nobuf = le32_to_cpu(mesh_access.data[3]); + priv->mstats.fwd_unicast_cnt = le32_to_cpu(mesh_access.data[4]); + priv->mstats.fwd_bcast_cnt = le32_to_cpu(mesh_access.data[5]); + priv->mstats.drop_blind = le32_to_cpu(mesh_access.data[6]); + priv->mstats.tx_failed_cnt = le32_to_cpu(mesh_access.data[7]); + + data[0] = priv->mstats.fwd_drop_rbt; + data[1] = priv->mstats.fwd_drop_ttl; + data[2] = priv->mstats.fwd_drop_noroute; + data[3] = priv->mstats.fwd_drop_nobuf; + data[4] = priv->mstats.fwd_unicast_cnt; + data[5] = priv->mstats.fwd_bcast_cnt; + data[6] = priv->mstats.drop_blind; + data[7] = priv->mstats.tx_failed_cnt; + + lbs_deb_enter(LBS_DEB_ETHTOOL); +} + +int lbs_mesh_ethtool_get_sset_count(struct net_device *dev, int sset) +{ + struct lbs_private *priv = dev->ml_priv; + + if (sset == ETH_SS_STATS && dev == priv->mesh_dev) + return MESH_STATS_NUM; + + return -EOPNOTSUPP; +} + +void lbs_mesh_ethtool_get_strings(struct net_device *dev, + uint32_t stringset, uint8_t *s) +{ + int i; + + lbs_deb_enter(LBS_DEB_ETHTOOL); + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < MESH_STATS_NUM; i++) { + memcpy(s + i * ETH_GSTRING_LEN, + mesh_stat_strings[i], + ETH_GSTRING_LEN); + } + break; + } + lbs_deb_enter(LBS_DEB_ETHTOOL); +} diff --git a/drivers/net/wireless/libertas/mesh.h b/drivers/net/wireless/libertas/mesh.h new file mode 100644 index 0000000..e257330 --- /dev/null +++ b/drivers/net/wireless/libertas/mesh.h @@ -0,0 +1,110 @@ +/** + * Contains all definitions needed for the Libertas' MESH implementation. + */ +#ifndef _LBS_MESH_H_ +#define _LBS_MESH_H_ + + +#include +#include + + +#ifdef CONFIG_LIBERTAS_MESH + +/* Mesh statistics */ +struct lbs_mesh_stats { + u32 fwd_bcast_cnt; /* Fwd: Broadcast counter */ + u32 fwd_unicast_cnt; /* Fwd: Unicast counter */ + u32 fwd_drop_ttl; /* Fwd: TTL zero */ + u32 fwd_drop_rbt; /* Fwd: Recently Broadcasted */ + u32 fwd_drop_noroute; /* Fwd: No route to Destination */ + u32 fwd_drop_nobuf; /* Fwd: Run out of internal buffers */ + u32 drop_blind; /* Rx: Dropped by blinding table */ + u32 tx_failed_cnt; /* Tx: Failed transmissions */ +}; + + +struct net_device; +struct lbs_private; + +int lbs_init_mesh(struct lbs_private *priv); +int lbs_deinit_mesh(struct lbs_private *priv); + +int lbs_add_mesh(struct lbs_private *priv); +void lbs_remove_mesh(struct lbs_private *priv); + + +/* Sending / Receiving */ + +struct rxpd; +struct txpd; + +struct net_device *lbs_mesh_set_dev(struct lbs_private *priv, + struct net_device *dev, struct rxpd *rxpd); +void lbs_mesh_set_txpd(struct lbs_private *priv, + struct net_device *dev, struct txpd *txpd); + + +/* Command handling */ + +struct cmd_ds_command; +struct cmd_ds_mesh_access; +struct cmd_ds_mesh_config; + +int lbs_cmd_bt_access(struct cmd_ds_command *cmd, + u16 cmd_action, void *pdata_buf); +int lbs_cmd_fwt_access(struct cmd_ds_command *cmd, + u16 cmd_action, void *pdata_buf); +int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action, + struct cmd_ds_mesh_access *cmd); +int lbs_mesh_config_send(struct lbs_private *priv, + struct cmd_ds_mesh_config *cmd, + uint16_t action, uint16_t type); +int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan); + + + +/* Persistent configuration */ + +void lbs_persist_config_init(struct net_device *net); +void lbs_persist_config_remove(struct net_device *net); + + +/* WEXT handler */ + +extern struct iw_handler_def mesh_handler_def; + + +/* Ethtool statistics */ + +struct ethtool_stats; + +void lbs_mesh_ethtool_get_stats(struct net_device *dev, + struct ethtool_stats *stats, uint64_t *data); +int lbs_mesh_ethtool_get_sset_count(struct net_device *dev, int sset); +void lbs_mesh_ethtool_get_strings(struct net_device *dev, + uint32_t stringset, uint8_t *s); + + +/* Accessors */ + +#define lbs_mesh_open(priv) (priv->mesh_open) +#define lbs_mesh_connected(priv) (priv->mesh_connect_status == LBS_CONNECTED) + +#else + +#define lbs_init_mesh(priv) +#define lbs_deinit_mesh(priv) +#define lbs_add_mesh(priv) +#define lbs_remove_mesh(priv) +#define lbs_mesh_set_dev(priv, dev, rxpd) (dev) +#define lbs_mesh_set_txpd(priv, dev, txpd) +#define lbs_mesh_config(priv, enable, chan) +#define lbs_mesh_open(priv) (0) +#define lbs_mesh_connected(priv) (0) + +#endif + + + +#endif diff --git a/drivers/net/wireless/libertas/radiotap.h b/drivers/net/wireless/libertas/radiotap.h new file mode 100644 index 0000000..d16b264 --- /dev/null +++ b/drivers/net/wireless/libertas/radiotap.h @@ -0,0 +1,44 @@ +#include + +struct tx_radiotap_hdr { + struct ieee80211_radiotap_header hdr; + u8 rate; + u8 txpower; + u8 rts_retries; + u8 data_retries; +} __attribute__ ((packed)); + +#define TX_RADIOTAP_PRESENT ( \ + (1 << IEEE80211_RADIOTAP_RATE) | \ + (1 << IEEE80211_RADIOTAP_DBM_TX_POWER) | \ + (1 << IEEE80211_RADIOTAP_RTS_RETRIES) | \ + (1 << IEEE80211_RADIOTAP_DATA_RETRIES) | \ + 0) + +#define IEEE80211_FC_VERSION_MASK 0x0003 +#define IEEE80211_FC_TYPE_MASK 0x000c +#define IEEE80211_FC_TYPE_MGT 0x0000 +#define IEEE80211_FC_TYPE_CTL 0x0004 +#define IEEE80211_FC_TYPE_DATA 0x0008 +#define IEEE80211_FC_SUBTYPE_MASK 0x00f0 +#define IEEE80211_FC_TOFROMDS_MASK 0x0300 +#define IEEE80211_FC_TODS_MASK 0x0100 +#define IEEE80211_FC_FROMDS_MASK 0x0200 +#define IEEE80211_FC_NODS 0x0000 +#define IEEE80211_FC_TODS 0x0100 +#define IEEE80211_FC_FROMDS 0x0200 +#define IEEE80211_FC_DSTODS 0x0300 + +struct rx_radiotap_hdr { + struct ieee80211_radiotap_header hdr; + u8 flags; + u8 rate; + u8 antsignal; +} __attribute__ ((packed)); + +#define RX_RADIOTAP_PRESENT ( \ + (1 << IEEE80211_RADIOTAP_FLAGS) | \ + (1 << IEEE80211_RADIOTAP_RATE) | \ + (1 << IEEE80211_RADIOTAP_DB_ANTSIGNAL) |\ + 0) + diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c new file mode 100644 index 0000000..2daf8ff --- /dev/null +++ b/drivers/net/wireless/libertas/rx.c @@ -0,0 +1,376 @@ +/** + * This file contains the handling of RX in wlan driver. + */ +#include +#include + +#include "host.h" +#include "radiotap.h" +#include "decl.h" +#include "dev.h" +#include "wext.h" + +struct eth803hdr { + u8 dest_addr[6]; + u8 src_addr[6]; + u16 h803_len; +} __attribute__ ((packed)); + +struct rfc1042hdr { + u8 llc_dsap; + u8 llc_ssap; + u8 llc_ctrl; + u8 snap_oui[3]; + u16 snap_type; +} __attribute__ ((packed)); + +struct rxpackethdr { + struct eth803hdr eth803_hdr; + struct rfc1042hdr rfc1042_hdr; +} __attribute__ ((packed)); + +struct rx80211packethdr { + struct rxpd rx_pd; + void *eth80211_hdr; +} __attribute__ ((packed)); + +static int process_rxed_802_11_packet(struct lbs_private *priv, + struct sk_buff *skb); + +/** + * @brief This function computes the avgSNR . + * + * @param priv A pointer to struct lbs_private structure + * @return avgSNR + */ +static u8 lbs_getavgsnr(struct lbs_private *priv) +{ + u8 i; + u16 temp = 0; + if (priv->numSNRNF == 0) + return 0; + for (i = 0; i < priv->numSNRNF; i++) + temp += priv->rawSNR[i]; + return (u8) (temp / priv->numSNRNF); + +} + +/** + * @brief This function computes the AvgNF + * + * @param priv A pointer to struct lbs_private structure + * @return AvgNF + */ +static u8 lbs_getavgnf(struct lbs_private *priv) +{ + u8 i; + u16 temp = 0; + if (priv->numSNRNF == 0) + return 0; + for (i = 0; i < priv->numSNRNF; i++) + temp += priv->rawNF[i]; + return (u8) (temp / priv->numSNRNF); + +} + +/** + * @brief This function save the raw SNR/NF to our internel buffer + * + * @param priv A pointer to struct lbs_private structure + * @param prxpd A pointer to rxpd structure of received packet + * @return n/a + */ +static void lbs_save_rawSNRNF(struct lbs_private *priv, struct rxpd *p_rx_pd) +{ + if (priv->numSNRNF < DEFAULT_DATA_AVG_FACTOR) + priv->numSNRNF++; + priv->rawSNR[priv->nextSNRNF] = p_rx_pd->snr; + priv->rawNF[priv->nextSNRNF] = p_rx_pd->nf; + priv->nextSNRNF++; + if (priv->nextSNRNF >= DEFAULT_DATA_AVG_FACTOR) + priv->nextSNRNF = 0; + return; +} + +/** + * @brief This function computes the RSSI in received packet. + * + * @param priv A pointer to struct lbs_private structure + * @param prxpd A pointer to rxpd structure of received packet + * @return n/a + */ +static void lbs_compute_rssi(struct lbs_private *priv, struct rxpd *p_rx_pd) +{ + + lbs_deb_enter(LBS_DEB_RX); + + lbs_deb_rx("rxpd: SNR %d, NF %d\n", p_rx_pd->snr, p_rx_pd->nf); + lbs_deb_rx("before computing SNR: SNR-avg = %d, NF-avg = %d\n", + priv->SNR[TYPE_RXPD][TYPE_AVG] / AVG_SCALE, + priv->NF[TYPE_RXPD][TYPE_AVG] / AVG_SCALE); + + priv->SNR[TYPE_RXPD][TYPE_NOAVG] = p_rx_pd->snr; + priv->NF[TYPE_RXPD][TYPE_NOAVG] = p_rx_pd->nf; + lbs_save_rawSNRNF(priv, p_rx_pd); + + priv->SNR[TYPE_RXPD][TYPE_AVG] = lbs_getavgsnr(priv) * AVG_SCALE; + priv->NF[TYPE_RXPD][TYPE_AVG] = lbs_getavgnf(priv) * AVG_SCALE; + lbs_deb_rx("after computing SNR: SNR-avg = %d, NF-avg = %d\n", + priv->SNR[TYPE_RXPD][TYPE_AVG] / AVG_SCALE, + priv->NF[TYPE_RXPD][TYPE_AVG] / AVG_SCALE); + + priv->RSSI[TYPE_RXPD][TYPE_NOAVG] = + CAL_RSSI(priv->SNR[TYPE_RXPD][TYPE_NOAVG], + priv->NF[TYPE_RXPD][TYPE_NOAVG]); + + priv->RSSI[TYPE_RXPD][TYPE_AVG] = + CAL_RSSI(priv->SNR[TYPE_RXPD][TYPE_AVG] / AVG_SCALE, + priv->NF[TYPE_RXPD][TYPE_AVG] / AVG_SCALE); + + lbs_deb_leave(LBS_DEB_RX); +} + +/** + * @brief This function processes received packet and forwards it + * to kernel/upper layer + * + * @param priv A pointer to struct lbs_private + * @param skb A pointer to skb which includes the received packet + * @return 0 or -1 + */ +int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb) +{ + int ret = 0; + struct net_device *dev = priv->dev; + struct rxpackethdr *p_rx_pkt; + struct rxpd *p_rx_pd; + int hdrchop; + struct ethhdr *p_ethhdr; + const u8 rfc1042_eth_hdr[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; + + lbs_deb_enter(LBS_DEB_RX); + + BUG_ON(!skb); + + skb->ip_summed = CHECKSUM_NONE; + + if (priv->monitormode) + return process_rxed_802_11_packet(priv, skb); + + p_rx_pd = (struct rxpd *) skb->data; + p_rx_pkt = (struct rxpackethdr *) ((u8 *)p_rx_pd + + le32_to_cpu(p_rx_pd->pkt_ptr)); + + dev = lbs_mesh_set_dev(priv, dev, p_rx_pd); + + lbs_deb_hex(LBS_DEB_RX, "RX Data: Before chop rxpd", skb->data, + min_t(unsigned int, skb->len, 100)); + + if (skb->len < (ETH_HLEN + 8 + sizeof(struct rxpd))) { + lbs_deb_rx("rx err: frame received with bad length\n"); + dev->stats.rx_length_errors++; + ret = 0; + dev_kfree_skb(skb); + goto done; + } + + lbs_deb_rx("rx data: skb->len - pkt_ptr = %d-%zd = %zd\n", + skb->len, (size_t)le32_to_cpu(p_rx_pd->pkt_ptr), + skb->len - (size_t)le32_to_cpu(p_rx_pd->pkt_ptr)); + + lbs_deb_hex(LBS_DEB_RX, "RX Data: Dest", p_rx_pkt->eth803_hdr.dest_addr, + sizeof(p_rx_pkt->eth803_hdr.dest_addr)); + lbs_deb_hex(LBS_DEB_RX, "RX Data: Src", p_rx_pkt->eth803_hdr.src_addr, + sizeof(p_rx_pkt->eth803_hdr.src_addr)); + + if (memcmp(&p_rx_pkt->rfc1042_hdr, + rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr)) == 0) { + /* + * Replace the 803 header and rfc1042 header (llc/snap) with an + * EthernetII header, keep the src/dst and snap_type (ethertype) + * + * The firmware only passes up SNAP frames converting + * all RX Data from 802.11 to 802.2/LLC/SNAP frames. + * + * To create the Ethernet II, just move the src, dst address right + * before the snap_type. + */ + p_ethhdr = (struct ethhdr *) + ((u8 *) & p_rx_pkt->eth803_hdr + + sizeof(p_rx_pkt->eth803_hdr) + sizeof(p_rx_pkt->rfc1042_hdr) + - sizeof(p_rx_pkt->eth803_hdr.dest_addr) + - sizeof(p_rx_pkt->eth803_hdr.src_addr) + - sizeof(p_rx_pkt->rfc1042_hdr.snap_type)); + + memcpy(p_ethhdr->h_source, p_rx_pkt->eth803_hdr.src_addr, + sizeof(p_ethhdr->h_source)); + memcpy(p_ethhdr->h_dest, p_rx_pkt->eth803_hdr.dest_addr, + sizeof(p_ethhdr->h_dest)); + + /* Chop off the rxpd + the excess memory from the 802.2/llc/snap header + * that was removed + */ + hdrchop = (u8 *)p_ethhdr - (u8 *)p_rx_pd; + } else { + lbs_deb_hex(LBS_DEB_RX, "RX Data: LLC/SNAP", + (u8 *) & p_rx_pkt->rfc1042_hdr, + sizeof(p_rx_pkt->rfc1042_hdr)); + + /* Chop off the rxpd */ + hdrchop = (u8 *)&p_rx_pkt->eth803_hdr - (u8 *)p_rx_pd; + } + + /* Chop off the leading header bytes so the skb points to the start of + * either the reconstructed EthII frame or the 802.2/llc/snap frame + */ + skb_pull(skb, hdrchop); + + /* Take the data rate from the rxpd structure + * only if the rate is auto + */ + if (priv->enablehwauto) + priv->cur_rate = lbs_fw_index_to_data_rate(p_rx_pd->rx_rate); + + lbs_compute_rssi(priv, p_rx_pd); + + lbs_deb_rx("rx data: size of actual packet %d\n", skb->len); + dev->stats.rx_bytes += skb->len; + dev->stats.rx_packets++; + + skb->protocol = eth_type_trans(skb, dev); + if (in_interrupt()) + netif_rx(skb); + else + netif_rx_ni(skb); + + ret = 0; +done: + lbs_deb_leave_args(LBS_DEB_RX, "ret %d", ret); + return ret; +} +EXPORT_SYMBOL_GPL(lbs_process_rxed_packet); + +/** + * @brief This function converts Tx/Rx rates from the Marvell WLAN format + * (see Table 2 in Section 3.1) to IEEE80211_RADIOTAP_RATE units (500 Kb/s) + * + * @param rate Input rate + * @return Output Rate (0 if invalid) + */ +static u8 convert_mv_rate_to_radiotap(u8 rate) +{ + switch (rate) { + case 0: /* 1 Mbps */ + return 2; + case 1: /* 2 Mbps */ + return 4; + case 2: /* 5.5 Mbps */ + return 11; + case 3: /* 11 Mbps */ + return 22; + /* case 4: reserved */ + case 5: /* 6 Mbps */ + return 12; + case 6: /* 9 Mbps */ + return 18; + case 7: /* 12 Mbps */ + return 24; + case 8: /* 18 Mbps */ + return 36; + case 9: /* 24 Mbps */ + return 48; + case 10: /* 36 Mbps */ + return 72; + case 11: /* 48 Mbps */ + return 96; + case 12: /* 54 Mbps */ + return 108; + } + lbs_pr_alert("Invalid Marvell WLAN rate %i\n", rate); + return 0; +} + +/** + * @brief This function processes a received 802.11 packet and forwards it + * to kernel/upper layer + * + * @param priv A pointer to struct lbs_private + * @param skb A pointer to skb which includes the received packet + * @return 0 or -1 + */ +static int process_rxed_802_11_packet(struct lbs_private *priv, + struct sk_buff *skb) +{ + int ret = 0; + struct net_device *dev = priv->dev; + struct rx80211packethdr *p_rx_pkt; + struct rxpd *prxpd; + struct rx_radiotap_hdr radiotap_hdr; + struct rx_radiotap_hdr *pradiotap_hdr; + + lbs_deb_enter(LBS_DEB_RX); + + p_rx_pkt = (struct rx80211packethdr *) skb->data; + prxpd = &p_rx_pkt->rx_pd; + + // lbs_deb_hex(LBS_DEB_RX, "RX Data: Before chop rxpd", skb->data, min(skb->len, 100)); + + if (skb->len < (ETH_HLEN + 8 + sizeof(struct rxpd))) { + lbs_deb_rx("rx err: frame received with bad length\n"); + dev->stats.rx_length_errors++; + ret = -EINVAL; + kfree_skb(skb); + goto done; + } + + lbs_deb_rx("rx data: skb->len-sizeof(RxPd) = %d-%zd = %zd\n", + skb->len, sizeof(struct rxpd), skb->len - sizeof(struct rxpd)); + + /* create the exported radio header */ + + /* radiotap header */ + radiotap_hdr.hdr.it_version = 0; + /* XXX must check this value for pad */ + radiotap_hdr.hdr.it_pad = 0; + radiotap_hdr.hdr.it_len = cpu_to_le16 (sizeof(struct rx_radiotap_hdr)); + radiotap_hdr.hdr.it_present = cpu_to_le32 (RX_RADIOTAP_PRESENT); + radiotap_hdr.rate = convert_mv_rate_to_radiotap(prxpd->rx_rate); + /* XXX must check no carryout */ + radiotap_hdr.antsignal = prxpd->snr + prxpd->nf; + + /* chop the rxpd */ + skb_pull(skb, sizeof(struct rxpd)); + + /* add space for the new radio header */ + if ((skb_headroom(skb) < sizeof(struct rx_radiotap_hdr)) && + pskb_expand_head(skb, sizeof(struct rx_radiotap_hdr), 0, GFP_ATOMIC)) { + lbs_pr_alert("%s: couldn't pskb_expand_head\n", __func__); + ret = -ENOMEM; + kfree_skb(skb); + goto done; + } + + pradiotap_hdr = (void *)skb_push(skb, sizeof(struct rx_radiotap_hdr)); + memcpy(pradiotap_hdr, &radiotap_hdr, sizeof(struct rx_radiotap_hdr)); + + /* Take the data rate from the rxpd structure + * only if the rate is auto + */ + if (priv->enablehwauto) + priv->cur_rate = lbs_fw_index_to_data_rate(prxpd->rx_rate); + + lbs_compute_rssi(priv, prxpd); + + lbs_deb_rx("rx data: size of actual packet %d\n", skb->len); + dev->stats.rx_bytes += skb->len; + dev->stats.rx_packets++; + + skb->protocol = eth_type_trans(skb, priv->rtap_net_dev); + netif_rx(skb); + + ret = 0; + +done: + lbs_deb_leave_args(LBS_DEB_RX, "ret %d", ret); + return ret; +} diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c new file mode 100644 index 0000000..220361e --- /dev/null +++ b/drivers/net/wireless/libertas/scan.c @@ -0,0 +1,1353 @@ +/** + * Functions implementing wlan scan IOCTL and firmware command APIs + * + * IOCTL handlers as well as command preperation and response routines + * for sending scan commands to the firmware. + */ +#include +#include +#include +#include +#include +#include + +#include "host.h" +#include "dev.h" +#include "scan.h" +#include "assoc.h" +#include "wext.h" +#include "cmd.h" + +//! Approximate amount of data needed to pass a scan result back to iwlist +#define MAX_SCAN_CELL_SIZE (IW_EV_ADDR_LEN \ + + IEEE80211_MAX_SSID_LEN \ + + IW_EV_UINT_LEN \ + + IW_EV_FREQ_LEN \ + + IW_EV_QUAL_LEN \ + + IEEE80211_MAX_SSID_LEN \ + + IW_EV_PARAM_LEN \ + + 40) /* 40 for WPAIE */ + +//! Memory needed to store a max sized channel List TLV for a firmware scan +#define CHAN_TLV_MAX_SIZE (sizeof(struct mrvl_ie_header) \ + + (MRVDRV_MAX_CHANNELS_PER_SCAN \ + * sizeof(struct chanscanparamset))) + +//! Memory needed to store a max number/size SSID TLV for a firmware scan +#define SSID_TLV_MAX_SIZE (1 * sizeof(struct mrvl_ie_ssid_param_set)) + +//! Maximum memory needed for a cmd_ds_802_11_scan with all TLVs at max +#define MAX_SCAN_CFG_ALLOC (sizeof(struct cmd_ds_802_11_scan) \ + + CHAN_TLV_MAX_SIZE + SSID_TLV_MAX_SIZE) + +//! The maximum number of channels the firmware can scan per command +#define MRVDRV_MAX_CHANNELS_PER_SCAN 14 + +/** + * @brief Number of channels to scan per firmware scan command issuance. + * + * Number restricted to prevent hitting the limit on the amount of scan data + * returned in a single firmware scan command. + */ +#define MRVDRV_CHANNELS_PER_SCAN_CMD 4 + +//! Scan time specified in the channel TLV for each channel for passive scans +#define MRVDRV_PASSIVE_SCAN_CHAN_TIME 100 + +//! Scan time specified in the channel TLV for each channel for active scans +#define MRVDRV_ACTIVE_SCAN_CHAN_TIME 100 + +#define DEFAULT_MAX_SCAN_AGE (15 * HZ) + +static int lbs_ret_80211_scan(struct lbs_private *priv, unsigned long dummy, + struct cmd_header *resp); + +/*********************************************************************/ +/* */ +/* Misc helper functions */ +/* */ +/*********************************************************************/ + +/** + * @brief Unsets the MSB on basic rates + * + * Scan through an array and unset the MSB for basic data rates. + * + * @param rates buffer of data rates + * @param len size of buffer + */ +static void lbs_unset_basic_rate_flags(u8 *rates, size_t len) +{ + int i; + + for (i = 0; i < len; i++) + rates[i] &= 0x7f; +} + + +static inline void clear_bss_descriptor(struct bss_descriptor *bss) +{ + /* Don't blow away ->list, just BSS data */ + memset(bss, 0, offsetof(struct bss_descriptor, list)); +} + +/** + * @brief Compare two SSIDs + * + * @param ssid1 A pointer to ssid to compare + * @param ssid2 A pointer to ssid to compare + * + * @return 0: ssid is same, otherwise is different + */ +int lbs_ssid_cmp(uint8_t *ssid1, uint8_t ssid1_len, uint8_t *ssid2, + uint8_t ssid2_len) +{ + if (ssid1_len != ssid2_len) + return -1; + + return memcmp(ssid1, ssid2, ssid1_len); +} + +static inline int is_same_network(struct bss_descriptor *src, + struct bss_descriptor *dst) +{ + /* A network is only a duplicate if the channel, BSSID, and ESSID + * all match. We treat all with the same BSSID and channel + * as one network */ + return ((src->ssid_len == dst->ssid_len) && + (src->channel == dst->channel) && + !compare_ether_addr(src->bssid, dst->bssid) && + !memcmp(src->ssid, dst->ssid, src->ssid_len)); +} + + + +/*********************************************************************/ +/* */ +/* Region channel support */ +/* */ +/*********************************************************************/ + +#define LBS_TX_PWR_DEFAULT 20 /*100mW */ +#define LBS_TX_PWR_US_DEFAULT 20 /*100mW */ +#define LBS_TX_PWR_JP_DEFAULT 16 /*50mW */ +#define LBS_TX_PWR_FR_DEFAULT 20 /*100mW */ +#define LBS_TX_PWR_EMEA_DEFAULT 20 /*100mW */ + +/* Format { channel, frequency (MHz), maxtxpower } */ +/* band: 'B/G', region: USA FCC/Canada IC */ +static struct chan_freq_power channel_freq_power_US_BG[] = { + {1, 2412, LBS_TX_PWR_US_DEFAULT}, + {2, 2417, LBS_TX_PWR_US_DEFAULT}, + {3, 2422, LBS_TX_PWR_US_DEFAULT}, + {4, 2427, LBS_TX_PWR_US_DEFAULT}, + {5, 2432, LBS_TX_PWR_US_DEFAULT}, + {6, 2437, LBS_TX_PWR_US_DEFAULT}, + {7, 2442, LBS_TX_PWR_US_DEFAULT}, + {8, 2447, LBS_TX_PWR_US_DEFAULT}, + {9, 2452, LBS_TX_PWR_US_DEFAULT}, + {10, 2457, LBS_TX_PWR_US_DEFAULT}, + {11, 2462, LBS_TX_PWR_US_DEFAULT} +}; + +/* band: 'B/G', region: Europe ETSI */ +static struct chan_freq_power channel_freq_power_EU_BG[] = { + {1, 2412, LBS_TX_PWR_EMEA_DEFAULT}, + {2, 2417, LBS_TX_PWR_EMEA_DEFAULT}, + {3, 2422, LBS_TX_PWR_EMEA_DEFAULT}, + {4, 2427, LBS_TX_PWR_EMEA_DEFAULT}, + {5, 2432, LBS_TX_PWR_EMEA_DEFAULT}, + {6, 2437, LBS_TX_PWR_EMEA_DEFAULT}, + {7, 2442, LBS_TX_PWR_EMEA_DEFAULT}, + {8, 2447, LBS_TX_PWR_EMEA_DEFAULT}, + {9, 2452, LBS_TX_PWR_EMEA_DEFAULT}, + {10, 2457, LBS_TX_PWR_EMEA_DEFAULT}, + {11, 2462, LBS_TX_PWR_EMEA_DEFAULT}, + {12, 2467, LBS_TX_PWR_EMEA_DEFAULT}, + {13, 2472, LBS_TX_PWR_EMEA_DEFAULT} +}; + +/* band: 'B/G', region: Spain */ +static struct chan_freq_power channel_freq_power_SPN_BG[] = { + {10, 2457, LBS_TX_PWR_DEFAULT}, + {11, 2462, LBS_TX_PWR_DEFAULT} +}; + +/* band: 'B/G', region: France */ +static struct chan_freq_power channel_freq_power_FR_BG[] = { + {10, 2457, LBS_TX_PWR_FR_DEFAULT}, + {11, 2462, LBS_TX_PWR_FR_DEFAULT}, + {12, 2467, LBS_TX_PWR_FR_DEFAULT}, + {13, 2472, LBS_TX_PWR_FR_DEFAULT} +}; + +/* band: 'B/G', region: Japan */ +static struct chan_freq_power channel_freq_power_JPN_BG[] = { + {1, 2412, LBS_TX_PWR_JP_DEFAULT}, + {2, 2417, LBS_TX_PWR_JP_DEFAULT}, + {3, 2422, LBS_TX_PWR_JP_DEFAULT}, + {4, 2427, LBS_TX_PWR_JP_DEFAULT}, + {5, 2432, LBS_TX_PWR_JP_DEFAULT}, + {6, 2437, LBS_TX_PWR_JP_DEFAULT}, + {7, 2442, LBS_TX_PWR_JP_DEFAULT}, + {8, 2447, LBS_TX_PWR_JP_DEFAULT}, + {9, 2452, LBS_TX_PWR_JP_DEFAULT}, + {10, 2457, LBS_TX_PWR_JP_DEFAULT}, + {11, 2462, LBS_TX_PWR_JP_DEFAULT}, + {12, 2467, LBS_TX_PWR_JP_DEFAULT}, + {13, 2472, LBS_TX_PWR_JP_DEFAULT}, + {14, 2484, LBS_TX_PWR_JP_DEFAULT} +}; + +/** + * the structure for channel, frequency and power + */ +struct region_cfp_table { + u8 region; + struct chan_freq_power *cfp_BG; + int cfp_no_BG; +}; + +/** + * the structure for the mapping between region and CFP + */ +static struct region_cfp_table region_cfp_table[] = { + {0x10, /*US FCC */ + channel_freq_power_US_BG, + ARRAY_SIZE(channel_freq_power_US_BG), + } + , + {0x20, /*CANADA IC */ + channel_freq_power_US_BG, + ARRAY_SIZE(channel_freq_power_US_BG), + } + , + {0x30, /*EU*/ channel_freq_power_EU_BG, + ARRAY_SIZE(channel_freq_power_EU_BG), + } + , + {0x31, /*SPAIN*/ channel_freq_power_SPN_BG, + ARRAY_SIZE(channel_freq_power_SPN_BG), + } + , + {0x32, /*FRANCE*/ channel_freq_power_FR_BG, + ARRAY_SIZE(channel_freq_power_FR_BG), + } + , + {0x40, /*JAPAN*/ channel_freq_power_JPN_BG, + ARRAY_SIZE(channel_freq_power_JPN_BG), + } + , +/*Add new region here */ +}; + +/** + * @brief This function finds the CFP in + * region_cfp_table based on region and band parameter. + * + * @param region The region code + * @param band The band + * @param cfp_no A pointer to CFP number + * @return A pointer to CFP + */ +static struct chan_freq_power *lbs_get_region_cfp_table(u8 region, int *cfp_no) +{ + int i, end; + + lbs_deb_enter(LBS_DEB_MAIN); + + end = ARRAY_SIZE(region_cfp_table); + + for (i = 0; i < end ; i++) { + lbs_deb_main("region_cfp_table[i].region=%d\n", + region_cfp_table[i].region); + if (region_cfp_table[i].region == region) { + *cfp_no = region_cfp_table[i].cfp_no_BG; + lbs_deb_leave(LBS_DEB_MAIN); + return region_cfp_table[i].cfp_BG; + } + } + + lbs_deb_leave_args(LBS_DEB_MAIN, "ret NULL"); + return NULL; +} + +int lbs_set_regiontable(struct lbs_private *priv, u8 region, u8 band) +{ + int ret = 0; + int i = 0; + + struct chan_freq_power *cfp; + int cfp_no; + + lbs_deb_enter(LBS_DEB_MAIN); + + memset(priv->region_channel, 0, sizeof(priv->region_channel)); + + cfp = lbs_get_region_cfp_table(region, &cfp_no); + if (cfp != NULL) { + priv->region_channel[i].nrcfp = cfp_no; + priv->region_channel[i].CFP = cfp; + } else { + lbs_deb_main("wrong region code %#x in band B/G\n", + region); + ret = -1; + goto out; + } + priv->region_channel[i].valid = 1; + priv->region_channel[i].region = region; + priv->region_channel[i].band = band; + i++; +out: + lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret); + return ret; +} + + + + +/*********************************************************************/ +/* */ +/* Main scanning support */ +/* */ +/*********************************************************************/ + +/** + * @brief Create a channel list for the driver to scan based on region info + * + * Only used from lbs_scan_setup_scan_config() + * + * Use the driver region/band information to construct a comprehensive list + * of channels to scan. This routine is used for any scan that is not + * provided a specific channel list to scan. + * + * @param priv A pointer to struct lbs_private structure + * @param scanchanlist Output parameter: resulting channel list to scan + * + * @return void + */ +static int lbs_scan_create_channel_list(struct lbs_private *priv, + struct chanscanparamset *scanchanlist) +{ + struct region_channel *scanregion; + struct chan_freq_power *cfp; + int rgnidx; + int chanidx; + int nextchan; + uint8_t scantype; + + chanidx = 0; + + /* Set the default scan type to the user specified type, will later + * be changed to passive on a per channel basis if restricted by + * regulatory requirements (11d or 11h) + */ + scantype = CMD_SCAN_TYPE_ACTIVE; + + for (rgnidx = 0; rgnidx < ARRAY_SIZE(priv->region_channel); rgnidx++) { + if (!priv->region_channel[rgnidx].valid) + continue; + scanregion = &priv->region_channel[rgnidx]; + + for (nextchan = 0; nextchan < scanregion->nrcfp; nextchan++, chanidx++) { + struct chanscanparamset *chan = &scanchanlist[chanidx]; + + cfp = scanregion->CFP + nextchan; + + if (scanregion->band == BAND_B || scanregion->band == BAND_G) + chan->radiotype = CMD_SCAN_RADIO_TYPE_BG; + + if (scantype == CMD_SCAN_TYPE_PASSIVE) { + chan->maxscantime = cpu_to_le16(MRVDRV_PASSIVE_SCAN_CHAN_TIME); + chan->chanscanmode.passivescan = 1; + } else { + chan->maxscantime = cpu_to_le16(MRVDRV_ACTIVE_SCAN_CHAN_TIME); + chan->chanscanmode.passivescan = 0; + } + + chan->channumber = cfp->channel; + } + } + return chanidx; +} + +/* + * Add SSID TLV of the form: + * + * TLV-ID SSID 00 00 + * length 06 00 + * ssid 4d 4e 54 45 53 54 + */ +static int lbs_scan_add_ssid_tlv(struct lbs_private *priv, u8 *tlv) +{ + struct mrvl_ie_ssid_param_set *ssid_tlv = (void *)tlv; + + ssid_tlv->header.type = cpu_to_le16(TLV_TYPE_SSID); + ssid_tlv->header.len = cpu_to_le16(priv->scan_ssid_len); + memcpy(ssid_tlv->ssid, priv->scan_ssid, priv->scan_ssid_len); + return sizeof(ssid_tlv->header) + priv->scan_ssid_len; +} + +/* + * Add CHANLIST TLV of the form + * + * TLV-ID CHANLIST 01 01 + * length 5b 00 + * channel 1 00 01 00 00 00 64 00 + * radio type 00 + * channel 01 + * scan type 00 + * min scan time 00 00 + * max scan time 64 00 + * channel 2 00 02 00 00 00 64 00 + * channel 3 00 03 00 00 00 64 00 + * channel 4 00 04 00 00 00 64 00 + * channel 5 00 05 00 00 00 64 00 + * channel 6 00 06 00 00 00 64 00 + * channel 7 00 07 00 00 00 64 00 + * channel 8 00 08 00 00 00 64 00 + * channel 9 00 09 00 00 00 64 00 + * channel 10 00 0a 00 00 00 64 00 + * channel 11 00 0b 00 00 00 64 00 + * channel 12 00 0c 00 00 00 64 00 + * channel 13 00 0d 00 00 00 64 00 + * + */ +static int lbs_scan_add_chanlist_tlv(uint8_t *tlv, + struct chanscanparamset *chan_list, + int chan_count) +{ + size_t size = sizeof(struct chanscanparamset) *chan_count; + struct mrvl_ie_chanlist_param_set *chan_tlv = (void *)tlv; + + chan_tlv->header.type = cpu_to_le16(TLV_TYPE_CHANLIST); + memcpy(chan_tlv->chanscanparam, chan_list, size); + chan_tlv->header.len = cpu_to_le16(size); + return sizeof(chan_tlv->header) + size; +} + +/* + * Add RATES TLV of the form + * + * TLV-ID RATES 01 00 + * length 0e 00 + * rates 82 84 8b 96 0c 12 18 24 30 48 60 6c + * + * The rates are in lbs_bg_rates[], but for the 802.11b + * rates the high bit isn't set. + */ +static int lbs_scan_add_rates_tlv(uint8_t *tlv) +{ + int i; + struct mrvl_ie_rates_param_set *rate_tlv = (void *)tlv; + + rate_tlv->header.type = cpu_to_le16(TLV_TYPE_RATES); + tlv += sizeof(rate_tlv->header); + for (i = 0; i < MAX_RATES; i++) { + *tlv = lbs_bg_rates[i]; + if (*tlv == 0) + break; + /* This code makes sure that the 802.11b rates (1 MBit/s, 2 + MBit/s, 5.5 MBit/s and 11 MBit/s get's the high bit set. + Note that the values are MBit/s * 2, to mark them as + basic rates so that the firmware likes it better */ + if (*tlv == 0x02 || *tlv == 0x04 || + *tlv == 0x0b || *tlv == 0x16) + *tlv |= 0x80; + tlv++; + } + rate_tlv->header.len = cpu_to_le16(i); + return sizeof(rate_tlv->header) + i; +} + +/* + * Generate the CMD_802_11_SCAN command with the proper tlv + * for a bunch of channels. + */ +static int lbs_do_scan(struct lbs_private *priv, uint8_t bsstype, + struct chanscanparamset *chan_list, int chan_count) +{ + int ret = -ENOMEM; + struct cmd_ds_802_11_scan *scan_cmd; + uint8_t *tlv; /* pointer into our current, growing TLV storage area */ + + lbs_deb_enter_args(LBS_DEB_SCAN, "bsstype %d, chanlist[].chan %d, chan_count %d", + bsstype, chan_list ? chan_list[0].channumber : -1, + chan_count); + + /* create the fixed part for scan command */ + scan_cmd = kzalloc(MAX_SCAN_CFG_ALLOC, GFP_KERNEL); + if (scan_cmd == NULL) + goto out; + + tlv = scan_cmd->tlvbuffer; + /* TODO: do we need to scan for a specific BSSID? + memcpy(scan_cmd->bssid, priv->scan_bssid, ETH_ALEN); */ + scan_cmd->bsstype = bsstype; + + /* add TLVs */ + if (priv->scan_ssid_len) + tlv += lbs_scan_add_ssid_tlv(priv, tlv); + if (chan_list && chan_count) + tlv += lbs_scan_add_chanlist_tlv(tlv, chan_list, chan_count); + tlv += lbs_scan_add_rates_tlv(tlv); + + /* This is the final data we are about to send */ + scan_cmd->hdr.size = cpu_to_le16(tlv - (uint8_t *)scan_cmd); + lbs_deb_hex(LBS_DEB_SCAN, "SCAN_CMD", (void *)scan_cmd, + sizeof(*scan_cmd)); + lbs_deb_hex(LBS_DEB_SCAN, "SCAN_TLV", scan_cmd->tlvbuffer, + tlv - scan_cmd->tlvbuffer); + + ret = __lbs_cmd(priv, CMD_802_11_SCAN, &scan_cmd->hdr, + le16_to_cpu(scan_cmd->hdr.size), + lbs_ret_80211_scan, 0); + +out: + kfree(scan_cmd); + lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret); + return ret; +} + +/** + * @brief Internal function used to start a scan based on an input config + * + * Use the input user scan configuration information when provided in + * order to send the appropriate scan commands to firmware to populate or + * update the internal driver scan table + * + * @param priv A pointer to struct lbs_private structure + * @param full_scan Do a full-scan (blocking) + * + * @return 0 or < 0 if error + */ +int lbs_scan_networks(struct lbs_private *priv, int full_scan) +{ + int ret = -ENOMEM; + struct chanscanparamset *chan_list; + struct chanscanparamset *curr_chans; + int chan_count; + uint8_t bsstype = CMD_BSS_TYPE_ANY; + int numchannels = MRVDRV_CHANNELS_PER_SCAN_CMD; + union iwreq_data wrqu; +#ifdef CONFIG_LIBERTAS_DEBUG + struct bss_descriptor *iter; + int i = 0; + DECLARE_SSID_BUF(ssid); +#endif + + lbs_deb_enter_args(LBS_DEB_SCAN, "full_scan %d", full_scan); + + /* Cancel any partial outstanding partial scans if this scan + * is a full scan. + */ + if (full_scan && delayed_work_pending(&priv->scan_work)) + cancel_delayed_work(&priv->scan_work); + + /* User-specified bsstype or channel list + TODO: this can be implemented if some user-space application + need the feature. Formerly, it was accessible from debugfs, + but then nowhere used. + if (user_cfg) { + if (user_cfg->bsstype) + bsstype = user_cfg->bsstype; + } */ + + lbs_deb_scan("numchannels %d, bsstype %d\n", numchannels, bsstype); + + /* Create list of channels to scan */ + chan_list = kzalloc(sizeof(struct chanscanparamset) * + LBS_IOCTL_USER_SCAN_CHAN_MAX, GFP_KERNEL); + if (!chan_list) { + lbs_pr_alert("SCAN: chan_list empty\n"); + goto out; + } + + /* We want to scan all channels */ + chan_count = lbs_scan_create_channel_list(priv, chan_list); + + netif_stop_queue(priv->dev); + if (priv->mesh_dev) + netif_stop_queue(priv->mesh_dev); + + /* Prepare to continue an interrupted scan */ + lbs_deb_scan("chan_count %d, scan_channel %d\n", + chan_count, priv->scan_channel); + curr_chans = chan_list; + /* advance channel list by already-scanned-channels */ + if (priv->scan_channel > 0) { + curr_chans += priv->scan_channel; + chan_count -= priv->scan_channel; + } + + /* Send scan command(s) + * numchannels contains the number of channels we should maximally scan + * chan_count is the total number of channels to scan + */ + + while (chan_count) { + int to_scan = min(numchannels, chan_count); + lbs_deb_scan("scanning %d of %d channels\n", + to_scan, chan_count); + ret = lbs_do_scan(priv, bsstype, curr_chans, + to_scan); + if (ret) { + lbs_pr_err("SCAN_CMD failed\n"); + goto out2; + } + curr_chans += to_scan; + chan_count -= to_scan; + + /* somehow schedule the next part of the scan */ + if (chan_count && !full_scan && + !priv->surpriseremoved) { + /* -1 marks just that we're currently scanning */ + if (priv->scan_channel < 0) + priv->scan_channel = to_scan; + else + priv->scan_channel += to_scan; + cancel_delayed_work(&priv->scan_work); + queue_delayed_work(priv->work_thread, &priv->scan_work, + msecs_to_jiffies(300)); + /* skip over GIWSCAN event */ + goto out; + } + + } + memset(&wrqu, 0, sizeof(union iwreq_data)); + wireless_send_event(priv->dev, SIOCGIWSCAN, &wrqu, NULL); + +#ifdef CONFIG_LIBERTAS_DEBUG + /* Dump the scan table */ + mutex_lock(&priv->lock); + lbs_deb_scan("scan table:\n"); + list_for_each_entry(iter, &priv->network_list, list) + lbs_deb_scan("%02d: BSSID %pM, RSSI %d, SSID '%s'\n", + i++, iter->bssid, iter->rssi, + print_ssid(ssid, iter->ssid, iter->ssid_len)); + mutex_unlock(&priv->lock); +#endif + +out2: + priv->scan_channel = 0; + +out: + if (priv->connect_status == LBS_CONNECTED && !priv->tx_pending_len) + netif_wake_queue(priv->dev); + + if (priv->mesh_dev && lbs_mesh_connected(priv) && + !priv->tx_pending_len) + netif_wake_queue(priv->mesh_dev); + + kfree(chan_list); + + lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret); + return ret; +} + +void lbs_scan_worker(struct work_struct *work) +{ + struct lbs_private *priv = + container_of(work, struct lbs_private, scan_work.work); + + lbs_deb_enter(LBS_DEB_SCAN); + lbs_scan_networks(priv, 0); + lbs_deb_leave(LBS_DEB_SCAN); +} + + +/*********************************************************************/ +/* */ +/* Result interpretation */ +/* */ +/*********************************************************************/ + +/** + * @brief Interpret a BSS scan response returned from the firmware + * + * Parse the various fixed fields and IEs passed back for a a BSS probe + * response or beacon from the scan command. Record information as needed + * in the scan table struct bss_descriptor for that entry. + * + * @param bss Output parameter: Pointer to the BSS Entry + * + * @return 0 or -1 + */ +static int lbs_process_bss(struct bss_descriptor *bss, + uint8_t **pbeaconinfo, int *bytesleft) +{ + struct ieee_ie_fh_param_set *fh; + struct ieee_ie_ds_param_set *ds; + struct ieee_ie_cf_param_set *cf; + struct ieee_ie_ibss_param_set *ibss; + DECLARE_SSID_BUF(ssid); + uint8_t *pos, *end, *p; + uint8_t n_ex_rates = 0, got_basic_rates = 0, n_basic_rates = 0; + uint16_t beaconsize = 0; + int ret; + + lbs_deb_enter(LBS_DEB_SCAN); + + if (*bytesleft >= sizeof(beaconsize)) { + /* Extract & convert beacon size from the command buffer */ + beaconsize = get_unaligned_le16(*pbeaconinfo); + *bytesleft -= sizeof(beaconsize); + *pbeaconinfo += sizeof(beaconsize); + } + + if (beaconsize == 0 || beaconsize > *bytesleft) { + *pbeaconinfo += *bytesleft; + *bytesleft = 0; + ret = -1; + goto done; + } + + /* Initialize the current working beacon pointer for this BSS iteration */ + pos = *pbeaconinfo; + end = pos + beaconsize; + + /* Advance the return beacon pointer past the current beacon */ + *pbeaconinfo += beaconsize; + *bytesleft -= beaconsize; + + memcpy(bss->bssid, pos, ETH_ALEN); + lbs_deb_scan("process_bss: BSSID %pM\n", bss->bssid); + pos += ETH_ALEN; + + if ((end - pos) < 12) { + lbs_deb_scan("process_bss: Not enough bytes left\n"); + ret = -1; + goto done; + } + + /* + * next 4 fields are RSSI, time stamp, beacon interval, + * and capability information + */ + + /* RSSI is 1 byte long */ + bss->rssi = *pos; + lbs_deb_scan("process_bss: RSSI %d\n", *pos); + pos++; + + /* time stamp is 8 bytes long */ + pos += 8; + + /* beacon interval is 2 bytes long */ + bss->beaconperiod = get_unaligned_le16(pos); + pos += 2; + + /* capability information is 2 bytes long */ + bss->capability = get_unaligned_le16(pos); + lbs_deb_scan("process_bss: capabilities 0x%04x\n", bss->capability); + pos += 2; + + if (bss->capability & WLAN_CAPABILITY_PRIVACY) + lbs_deb_scan("process_bss: WEP enabled\n"); + if (bss->capability & WLAN_CAPABILITY_IBSS) + bss->mode = IW_MODE_ADHOC; + else + bss->mode = IW_MODE_INFRA; + + /* rest of the current buffer are IE's */ + lbs_deb_scan("process_bss: IE len %zd\n", end - pos); + lbs_deb_hex(LBS_DEB_SCAN, "process_bss: IE info", pos, end - pos); + + /* process variable IE */ + while (pos <= end - 2) { + if (pos + pos[1] > end) { + lbs_deb_scan("process_bss: error in processing IE, " + "bytes left < IE length\n"); + break; + } + + switch (pos[0]) { + case WLAN_EID_SSID: + bss->ssid_len = min_t(int, IEEE80211_MAX_SSID_LEN, pos[1]); + memcpy(bss->ssid, pos + 2, bss->ssid_len); + lbs_deb_scan("got SSID IE: '%s', len %u\n", + print_ssid(ssid, bss->ssid, bss->ssid_len), + bss->ssid_len); + break; + + case WLAN_EID_SUPP_RATES: + n_basic_rates = min_t(uint8_t, MAX_RATES, pos[1]); + memcpy(bss->rates, pos + 2, n_basic_rates); + got_basic_rates = 1; + lbs_deb_scan("got RATES IE\n"); + break; + + case WLAN_EID_FH_PARAMS: + fh = (struct ieee_ie_fh_param_set *) pos; + memcpy(&bss->phy.fh, fh, sizeof(*fh)); + lbs_deb_scan("got FH IE\n"); + break; + + case WLAN_EID_DS_PARAMS: + ds = (struct ieee_ie_ds_param_set *) pos; + bss->channel = ds->channel; + memcpy(&bss->phy.ds, ds, sizeof(*ds)); + lbs_deb_scan("got DS IE, channel %d\n", bss->channel); + break; + + case WLAN_EID_CF_PARAMS: + cf = (struct ieee_ie_cf_param_set *) pos; + memcpy(&bss->ss.cf, cf, sizeof(*cf)); + lbs_deb_scan("got CF IE\n"); + break; + + case WLAN_EID_IBSS_PARAMS: + ibss = (struct ieee_ie_ibss_param_set *) pos; + bss->atimwindow = ibss->atimwindow; + memcpy(&bss->ss.ibss, ibss, sizeof(*ibss)); + lbs_deb_scan("got IBSS IE\n"); + break; + + case WLAN_EID_EXT_SUPP_RATES: + /* only process extended supported rate if data rate is + * already found. Data rate IE should come before + * extended supported rate IE + */ + lbs_deb_scan("got RATESEX IE\n"); + if (!got_basic_rates) { + lbs_deb_scan("... but ignoring it\n"); + break; + } + + n_ex_rates = pos[1]; + if (n_basic_rates + n_ex_rates > MAX_RATES) + n_ex_rates = MAX_RATES - n_basic_rates; + + p = bss->rates + n_basic_rates; + memcpy(p, pos + 2, n_ex_rates); + break; + + case WLAN_EID_GENERIC: + if (pos[1] >= 4 && + pos[2] == 0x00 && pos[3] == 0x50 && + pos[4] == 0xf2 && pos[5] == 0x01) { + bss->wpa_ie_len = min(pos[1] + 2, MAX_WPA_IE_LEN); + memcpy(bss->wpa_ie, pos, bss->wpa_ie_len); + lbs_deb_scan("got WPA IE\n"); + lbs_deb_hex(LBS_DEB_SCAN, "WPA IE", bss->wpa_ie, + bss->wpa_ie_len); + } else if (pos[1] >= MARVELL_MESH_IE_LENGTH && + pos[2] == 0x00 && pos[3] == 0x50 && + pos[4] == 0x43 && pos[5] == 0x04) { + lbs_deb_scan("got mesh IE\n"); + bss->mesh = 1; + } else { + lbs_deb_scan("got generic IE: %02x:%02x:%02x:%02x, len %d\n", + pos[2], pos[3], + pos[4], pos[5], + pos[1]); + } + break; + + case WLAN_EID_RSN: + lbs_deb_scan("got RSN IE\n"); + bss->rsn_ie_len = min(pos[1] + 2, MAX_WPA_IE_LEN); + memcpy(bss->rsn_ie, pos, bss->rsn_ie_len); + lbs_deb_hex(LBS_DEB_SCAN, "process_bss: RSN_IE", + bss->rsn_ie, bss->rsn_ie_len); + break; + + default: + lbs_deb_scan("got IE 0x%04x, len %d\n", + pos[0], pos[1]); + break; + } + + pos += pos[1] + 2; + } + + /* Timestamp */ + bss->last_scanned = jiffies; + lbs_unset_basic_rate_flags(bss->rates, sizeof(bss->rates)); + + ret = 0; + +done: + lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret); + return ret; +} + +/** + * @brief Send a scan command for all available channels filtered on a spec + * + * Used in association code and from debugfs + * + * @param priv A pointer to struct lbs_private structure + * @param ssid A pointer to the SSID to scan for + * @param ssid_len Length of the SSID + * + * @return 0-success, otherwise fail + */ +int lbs_send_specific_ssid_scan(struct lbs_private *priv, uint8_t *ssid, + uint8_t ssid_len) +{ + DECLARE_SSID_BUF(ssid_buf); + int ret = 0; + + lbs_deb_enter_args(LBS_DEB_SCAN, "SSID '%s'\n", + print_ssid(ssid_buf, ssid, ssid_len)); + + if (!ssid_len) + goto out; + + memcpy(priv->scan_ssid, ssid, ssid_len); + priv->scan_ssid_len = ssid_len; + + lbs_scan_networks(priv, 1); + if (priv->surpriseremoved) { + ret = -1; + goto out; + } + +out: + lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret); + return ret; +} + + + + +/*********************************************************************/ +/* */ +/* Support for Wireless Extensions */ +/* */ +/*********************************************************************/ + + +#define MAX_CUSTOM_LEN 64 + +static inline char *lbs_translate_scan(struct lbs_private *priv, + struct iw_request_info *info, + char *start, char *stop, + struct bss_descriptor *bss) +{ + struct chan_freq_power *cfp; + char *current_val; /* For rates */ + struct iw_event iwe; /* Temporary buffer */ + int j; +#define PERFECT_RSSI ((uint8_t)50) +#define WORST_RSSI ((uint8_t)0) +#define RSSI_DIFF ((uint8_t)(PERFECT_RSSI - WORST_RSSI)) + uint8_t rssi; + + lbs_deb_enter(LBS_DEB_SCAN); + + cfp = lbs_find_cfp_by_band_and_channel(priv, 0, bss->channel); + if (!cfp) { + lbs_deb_scan("Invalid channel number %d\n", bss->channel); + start = NULL; + goto out; + } + + /* First entry *MUST* be the BSSID */ + iwe.cmd = SIOCGIWAP; + iwe.u.ap_addr.sa_family = ARPHRD_ETHER; + memcpy(iwe.u.ap_addr.sa_data, &bss->bssid, ETH_ALEN); + start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_ADDR_LEN); + + /* SSID */ + iwe.cmd = SIOCGIWESSID; + iwe.u.data.flags = 1; + iwe.u.data.length = min((uint32_t) bss->ssid_len, (uint32_t) IEEE80211_MAX_SSID_LEN); + start = iwe_stream_add_point(info, start, stop, &iwe, bss->ssid); + + /* Mode */ + iwe.cmd = SIOCGIWMODE; + iwe.u.mode = bss->mode; + start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_UINT_LEN); + + /* Frequency */ + iwe.cmd = SIOCGIWFREQ; + iwe.u.freq.m = (long)cfp->freq * 100000; + iwe.u.freq.e = 1; + start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_FREQ_LEN); + + /* Add quality statistics */ + iwe.cmd = IWEVQUAL; + iwe.u.qual.updated = IW_QUAL_ALL_UPDATED; + iwe.u.qual.level = SCAN_RSSI(bss->rssi); + + rssi = iwe.u.qual.level - MRVDRV_NF_DEFAULT_SCAN_VALUE; + iwe.u.qual.qual = + (100 * RSSI_DIFF * RSSI_DIFF - (PERFECT_RSSI - rssi) * + (15 * (RSSI_DIFF) + 62 * (PERFECT_RSSI - rssi))) / + (RSSI_DIFF * RSSI_DIFF); + if (iwe.u.qual.qual > 100) + iwe.u.qual.qual = 100; + + if (priv->NF[TYPE_BEACON][TYPE_NOAVG] == 0) { + iwe.u.qual.noise = MRVDRV_NF_DEFAULT_SCAN_VALUE; + } else { + iwe.u.qual.noise = CAL_NF(priv->NF[TYPE_BEACON][TYPE_NOAVG]); + } + + /* Locally created ad-hoc BSSs won't have beacons if this is the + * only station in the adhoc network; so get signal strength + * from receive statistics. + */ + if ((priv->mode == IW_MODE_ADHOC) && priv->adhoccreate + && !lbs_ssid_cmp(priv->curbssparams.ssid, + priv->curbssparams.ssid_len, + bss->ssid, bss->ssid_len)) { + int snr, nf; + snr = priv->SNR[TYPE_RXPD][TYPE_AVG] / AVG_SCALE; + nf = priv->NF[TYPE_RXPD][TYPE_AVG] / AVG_SCALE; + iwe.u.qual.level = CAL_RSSI(snr, nf); + } + start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_QUAL_LEN); + + /* Add encryption capability */ + iwe.cmd = SIOCGIWENCODE; + if (bss->capability & WLAN_CAPABILITY_PRIVACY) { + iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; + } else { + iwe.u.data.flags = IW_ENCODE_DISABLED; + } + iwe.u.data.length = 0; + start = iwe_stream_add_point(info, start, stop, &iwe, bss->ssid); + + current_val = start + iwe_stream_lcp_len(info); + + iwe.cmd = SIOCGIWRATE; + iwe.u.bitrate.fixed = 0; + iwe.u.bitrate.disabled = 0; + iwe.u.bitrate.value = 0; + + for (j = 0; j < ARRAY_SIZE(bss->rates) && bss->rates[j]; j++) { + /* Bit rate given in 500 kb/s units */ + iwe.u.bitrate.value = bss->rates[j] * 500000; + current_val = iwe_stream_add_value(info, start, current_val, + stop, &iwe, IW_EV_PARAM_LEN); + } + if ((bss->mode == IW_MODE_ADHOC) && priv->adhoccreate + && !lbs_ssid_cmp(priv->curbssparams.ssid, + priv->curbssparams.ssid_len, + bss->ssid, bss->ssid_len)) { + iwe.u.bitrate.value = 22 * 500000; + current_val = iwe_stream_add_value(info, start, current_val, + stop, &iwe, IW_EV_PARAM_LEN); + } + /* Check if we added any event */ + if ((current_val - start) > iwe_stream_lcp_len(info)) + start = current_val; + + memset(&iwe, 0, sizeof(iwe)); + if (bss->wpa_ie_len) { + char buf[MAX_WPA_IE_LEN]; + memcpy(buf, bss->wpa_ie, bss->wpa_ie_len); + iwe.cmd = IWEVGENIE; + iwe.u.data.length = bss->wpa_ie_len; + start = iwe_stream_add_point(info, start, stop, &iwe, buf); + } + + memset(&iwe, 0, sizeof(iwe)); + if (bss->rsn_ie_len) { + char buf[MAX_WPA_IE_LEN]; + memcpy(buf, bss->rsn_ie, bss->rsn_ie_len); + iwe.cmd = IWEVGENIE; + iwe.u.data.length = bss->rsn_ie_len; + start = iwe_stream_add_point(info, start, stop, &iwe, buf); + } + + if (bss->mesh) { + char custom[MAX_CUSTOM_LEN]; + char *p = custom; + + iwe.cmd = IWEVCUSTOM; + p += snprintf(p, MAX_CUSTOM_LEN, "mesh-type: olpc"); + iwe.u.data.length = p - custom; + if (iwe.u.data.length) + start = iwe_stream_add_point(info, start, stop, + &iwe, custom); + } + +out: + lbs_deb_leave_args(LBS_DEB_SCAN, "start %p", start); + return start; +} + + +/** + * @brief Handle Scan Network ioctl + * + * @param dev A pointer to net_device structure + * @param info A pointer to iw_request_info structure + * @param vwrq A pointer to iw_param structure + * @param extra A pointer to extra data buf + * + * @return 0 --success, otherwise fail + */ +int lbs_set_scan(struct net_device *dev, struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + DECLARE_SSID_BUF(ssid); + struct lbs_private *priv = dev->ml_priv; + int ret = 0; + + lbs_deb_enter(LBS_DEB_WEXT); + + if (!priv->radio_on) { + ret = -EINVAL; + goto out; + } + + if (!netif_running(dev)) { + ret = -ENETDOWN; + goto out; + } + + /* mac80211 does this: + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + if (sdata->type != IEEE80211_IF_TYPE_xxx) { + ret = -EOPNOTSUPP; + goto out; + } + */ + + if (wrqu->data.length == sizeof(struct iw_scan_req) && + wrqu->data.flags & IW_SCAN_THIS_ESSID) { + struct iw_scan_req *req = (struct iw_scan_req *)extra; + priv->scan_ssid_len = req->essid_len; + memcpy(priv->scan_ssid, req->essid, priv->scan_ssid_len); + lbs_deb_wext("set_scan, essid '%s'\n", + print_ssid(ssid, priv->scan_ssid, priv->scan_ssid_len)); + } else { + priv->scan_ssid_len = 0; + } + + if (!delayed_work_pending(&priv->scan_work)) + queue_delayed_work(priv->work_thread, &priv->scan_work, + msecs_to_jiffies(50)); + /* set marker that currently a scan is taking place */ + priv->scan_channel = -1; + + if (priv->surpriseremoved) + ret = -EIO; + +out: + lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); + return ret; +} + + +/** + * @brief Handle Retrieve scan table ioctl + * + * @param dev A pointer to net_device structure + * @param info A pointer to iw_request_info structure + * @param dwrq A pointer to iw_point structure + * @param extra A pointer to extra data buf + * + * @return 0 --success, otherwise fail + */ +int lbs_get_scan(struct net_device *dev, struct iw_request_info *info, + struct iw_point *dwrq, char *extra) +{ +#define SCAN_ITEM_SIZE 128 + struct lbs_private *priv = dev->ml_priv; + int err = 0; + char *ev = extra; + char *stop = ev + dwrq->length; + struct bss_descriptor *iter_bss; + struct bss_descriptor *safe; + + lbs_deb_enter(LBS_DEB_WEXT); + + /* iwlist should wait until the current scan is finished */ + if (priv->scan_channel) + return -EAGAIN; + + /* Update RSSI if current BSS is a locally created ad-hoc BSS */ + if ((priv->mode == IW_MODE_ADHOC) && priv->adhoccreate) { + err = lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0, + CMD_OPTION_WAITFORRSP, 0, NULL); + if (err) + goto out; + } + + mutex_lock(&priv->lock); + list_for_each_entry_safe (iter_bss, safe, &priv->network_list, list) { + char *next_ev; + unsigned long stale_time; + + if (stop - ev < SCAN_ITEM_SIZE) { + err = -E2BIG; + break; + } + + /* For mesh device, list only mesh networks */ + if (dev == priv->mesh_dev && !iter_bss->mesh) + continue; + + /* Prune old an old scan result */ + stale_time = iter_bss->last_scanned + DEFAULT_MAX_SCAN_AGE; + if (time_after(jiffies, stale_time)) { + list_move_tail(&iter_bss->list, &priv->network_free_list); + clear_bss_descriptor(iter_bss); + continue; + } + + /* Translate to WE format this entry */ + next_ev = lbs_translate_scan(priv, info, ev, stop, iter_bss); + if (next_ev == NULL) + continue; + ev = next_ev; + } + mutex_unlock(&priv->lock); + + dwrq->length = (ev - extra); + dwrq->flags = 0; +out: + lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", err); + return err; +} + + + + +/*********************************************************************/ +/* */ +/* Command execution */ +/* */ +/*********************************************************************/ + + +/** + * @brief This function handles the command response of scan + * + * Called from handle_cmd_response() in cmdrespc. + * + * The response buffer for the scan command has the following + * memory layout: + * + * .-----------------------------------------------------------. + * | header (4 * sizeof(u16)): Standard command response hdr | + * .-----------------------------------------------------------. + * | bufsize (u16) : sizeof the BSS Description data | + * .-----------------------------------------------------------. + * | NumOfSet (u8) : Number of BSS Descs returned | + * .-----------------------------------------------------------. + * | BSSDescription data (variable, size given in bufsize) | + * .-----------------------------------------------------------. + * | TLV data (variable, size calculated using header->size, | + * | bufsize and sizeof the fixed fields above) | + * .-----------------------------------------------------------. + * + * @param priv A pointer to struct lbs_private structure + * @param resp A pointer to cmd_ds_command + * + * @return 0 or -1 + */ +static int lbs_ret_80211_scan(struct lbs_private *priv, unsigned long dummy, + struct cmd_header *resp) +{ + struct cmd_ds_802_11_scan_rsp *scanresp = (void *)resp; + struct bss_descriptor *iter_bss; + struct bss_descriptor *safe; + uint8_t *bssinfo; + uint16_t scanrespsize; + int bytesleft; + int idx; + int tlvbufsize; + int ret; + + lbs_deb_enter(LBS_DEB_SCAN); + + /* Prune old entries from scan table */ + list_for_each_entry_safe (iter_bss, safe, &priv->network_list, list) { + unsigned long stale_time = iter_bss->last_scanned + DEFAULT_MAX_SCAN_AGE; + if (time_before(jiffies, stale_time)) + continue; + list_move_tail (&iter_bss->list, &priv->network_free_list); + clear_bss_descriptor(iter_bss); + } + + if (scanresp->nr_sets > MAX_NETWORK_COUNT) { + lbs_deb_scan("SCAN_RESP: too many scan results (%d, max %d)\n", + scanresp->nr_sets, MAX_NETWORK_COUNT); + ret = -1; + goto done; + } + + bytesleft = get_unaligned_le16(&scanresp->bssdescriptsize); + lbs_deb_scan("SCAN_RESP: bssdescriptsize %d\n", bytesleft); + + scanrespsize = le16_to_cpu(resp->size); + lbs_deb_scan("SCAN_RESP: scan results %d\n", scanresp->nr_sets); + + bssinfo = scanresp->bssdesc_and_tlvbuffer; + + /* The size of the TLV buffer is equal to the entire command response + * size (scanrespsize) minus the fixed fields (sizeof()'s), the + * BSS Descriptions (bssdescriptsize as bytesLef) and the command + * response header (sizeof(struct cmd_header)) + */ + tlvbufsize = scanrespsize - (bytesleft + sizeof(scanresp->bssdescriptsize) + + sizeof(scanresp->nr_sets) + + sizeof(struct cmd_header)); + + /* + * Process each scan response returned (scanresp->nr_sets). Save + * the information in the newbssentry and then insert into the + * driver scan table either as an update to an existing entry + * or as an addition at the end of the table + */ + for (idx = 0; idx < scanresp->nr_sets && bytesleft; idx++) { + struct bss_descriptor new; + struct bss_descriptor *found = NULL; + struct bss_descriptor *oldest = NULL; + + /* Process the data fields and IEs returned for this BSS */ + memset(&new, 0, sizeof (struct bss_descriptor)); + if (lbs_process_bss(&new, &bssinfo, &bytesleft) != 0) { + /* error parsing the scan response, skipped */ + lbs_deb_scan("SCAN_RESP: process_bss returned ERROR\n"); + continue; + } + + /* Try to find this bss in the scan table */ + list_for_each_entry (iter_bss, &priv->network_list, list) { + if (is_same_network(iter_bss, &new)) { + found = iter_bss; + break; + } + + if ((oldest == NULL) || + (iter_bss->last_scanned < oldest->last_scanned)) + oldest = iter_bss; + } + + if (found) { + /* found, clear it */ + clear_bss_descriptor(found); + } else if (!list_empty(&priv->network_free_list)) { + /* Pull one from the free list */ + found = list_entry(priv->network_free_list.next, + struct bss_descriptor, list); + list_move_tail(&found->list, &priv->network_list); + } else if (oldest) { + /* If there are no more slots, expire the oldest */ + found = oldest; + clear_bss_descriptor(found); + list_move_tail(&found->list, &priv->network_list); + } else { + continue; + } + + lbs_deb_scan("SCAN_RESP: BSSID %pM\n", new.bssid); + + /* Copy the locally created newbssentry to the scan table */ + memcpy(found, &new, offsetof(struct bss_descriptor, list)); + } + + ret = 0; + +done: + lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret); + return ret; +} diff --git a/drivers/net/wireless/libertas/scan.h b/drivers/net/wireless/libertas/scan.h new file mode 100644 index 0000000..8fb1706 --- /dev/null +++ b/drivers/net/wireless/libertas/scan.h @@ -0,0 +1,63 @@ +/** + * Interface for the wlan network scan routines + * + * Driver interface functions and type declarations for the scan module + * implemented in scan.c. + */ +#ifndef _LBS_SCAN_H +#define _LBS_SCAN_H + +#include + +struct lbs_private; + +#define MAX_NETWORK_COUNT 128 + +/** Chan-freq-TxPower mapping table*/ +struct chan_freq_power { + /** channel Number */ + u16 channel; + /** frequency of this channel */ + u32 freq; + /** Max allowed Tx power level */ + u16 maxtxpower; + /** TRUE:channel unsupported; FLASE:supported*/ + u8 unsupported; +}; + +/** region-band mapping table*/ +struct region_channel { + /** TRUE if this entry is valid */ + u8 valid; + /** region code for US, Japan ... */ + u8 region; + /** band B/G/A, used for BAND_CONFIG cmd */ + u8 band; + /** Actual No. of elements in the array below */ + u8 nrcfp; + /** chan-freq-txpower mapping table*/ + struct chan_freq_power *CFP; +}; + +/** + * @brief Maximum number of channels that can be sent in a setuserscan ioctl + */ +#define LBS_IOCTL_USER_SCAN_CHAN_MAX 50 + +int lbs_ssid_cmp(u8 *ssid1, u8 ssid1_len, u8 *ssid2, u8 ssid2_len); + +int lbs_set_regiontable(struct lbs_private *priv, u8 region, u8 band); + +int lbs_send_specific_ssid_scan(struct lbs_private *priv, u8 *ssid, + u8 ssid_len); + +int lbs_get_scan(struct net_device *dev, struct iw_request_info *info, + struct iw_point *dwrq, char *extra); +int lbs_set_scan(struct net_device *dev, struct iw_request_info *info, + union iwreq_data *wrqu, char *extra); + +int lbs_scan_networks(struct lbs_private *priv, int full_scan); + +void lbs_scan_worker(struct work_struct *work); + +#endif diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/libertas/tx.c new file mode 100644 index 0000000..52d244e --- /dev/null +++ b/drivers/net/wireless/libertas/tx.c @@ -0,0 +1,204 @@ +/** + * This file contains the handling of TX in wlan driver. + */ +#include +#include +#include + +#include "host.h" +#include "radiotap.h" +#include "decl.h" +#include "defs.h" +#include "dev.h" +#include "wext.h" + +/** + * @brief This function converts Tx/Rx rates from IEEE80211_RADIOTAP_RATE + * units (500 Kb/s) into Marvell WLAN format (see Table 8 in Section 3.2.1) + * + * @param rate Input rate + * @return Output Rate (0 if invalid) + */ +static u32 convert_radiotap_rate_to_mv(u8 rate) +{ + switch (rate) { + case 2: /* 1 Mbps */ + return 0 | (1 << 4); + case 4: /* 2 Mbps */ + return 1 | (1 << 4); + case 11: /* 5.5 Mbps */ + return 2 | (1 << 4); + case 22: /* 11 Mbps */ + return 3 | (1 << 4); + case 12: /* 6 Mbps */ + return 4 | (1 << 4); + case 18: /* 9 Mbps */ + return 5 | (1 << 4); + case 24: /* 12 Mbps */ + return 6 | (1 << 4); + case 36: /* 18 Mbps */ + return 7 | (1 << 4); + case 48: /* 24 Mbps */ + return 8 | (1 << 4); + case 72: /* 36 Mbps */ + return 9 | (1 << 4); + case 96: /* 48 Mbps */ + return 10 | (1 << 4); + case 108: /* 54 Mbps */ + return 11 | (1 << 4); + } + return 0; +} + +/** + * @brief This function checks the conditions and sends packet to IF + * layer if everything is ok. + * + * @param priv A pointer to struct lbs_private structure + * @param skb A pointer to skb which includes TX packet + * @return 0 or -1 + */ +netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + unsigned long flags; + struct lbs_private *priv = dev->ml_priv; + struct txpd *txpd; + char *p802x_hdr; + uint16_t pkt_len; + netdev_tx_t ret = NETDEV_TX_OK; + + lbs_deb_enter(LBS_DEB_TX); + + /* We need to protect against the queues being restarted before + we get round to stopping them */ + spin_lock_irqsave(&priv->driver_lock, flags); + + if (priv->surpriseremoved) + goto free; + + if (!skb->len || (skb->len > MRVDRV_ETH_TX_PACKET_BUFFER_SIZE)) { + lbs_deb_tx("tx err: skb length %d 0 or > %zd\n", + skb->len, MRVDRV_ETH_TX_PACKET_BUFFER_SIZE); + /* We'll never manage to send this one; drop it and return 'OK' */ + + dev->stats.tx_dropped++; + dev->stats.tx_errors++; + goto free; + } + + + netif_stop_queue(priv->dev); + if (priv->mesh_dev) + netif_stop_queue(priv->mesh_dev); + + if (priv->tx_pending_len) { + /* This can happen if packets come in on the mesh and eth + device simultaneously -- there's no mutual exclusion on + hard_start_xmit() calls between devices. */ + lbs_deb_tx("Packet on %s while busy\n", dev->name); + ret = NETDEV_TX_BUSY; + goto unlock; + } + + priv->tx_pending_len = -1; + spin_unlock_irqrestore(&priv->driver_lock, flags); + + lbs_deb_hex(LBS_DEB_TX, "TX Data", skb->data, min_t(unsigned int, skb->len, 100)); + + txpd = (void *)priv->tx_pending_buf; + memset(txpd, 0, sizeof(struct txpd)); + + p802x_hdr = skb->data; + pkt_len = skb->len; + + if (dev == priv->rtap_net_dev) { + struct tx_radiotap_hdr *rtap_hdr = (void *)skb->data; + + /* set txpd fields from the radiotap header */ + txpd->tx_control = cpu_to_le32(convert_radiotap_rate_to_mv(rtap_hdr->rate)); + + /* skip the radiotap header */ + p802x_hdr += sizeof(*rtap_hdr); + pkt_len -= sizeof(*rtap_hdr); + + /* copy destination address from 802.11 header */ + memcpy(txpd->tx_dest_addr_high, p802x_hdr + 4, ETH_ALEN); + } else { + /* copy destination address from 802.3 header */ + memcpy(txpd->tx_dest_addr_high, p802x_hdr, ETH_ALEN); + } + + txpd->tx_packet_length = cpu_to_le16(pkt_len); + txpd->tx_packet_location = cpu_to_le32(sizeof(struct txpd)); + + lbs_mesh_set_txpd(priv, dev, txpd); + + lbs_deb_hex(LBS_DEB_TX, "txpd", (u8 *) &txpd, sizeof(struct txpd)); + + lbs_deb_hex(LBS_DEB_TX, "Tx Data", (u8 *) p802x_hdr, le16_to_cpu(txpd->tx_packet_length)); + + memcpy(&txpd[1], p802x_hdr, le16_to_cpu(txpd->tx_packet_length)); + + spin_lock_irqsave(&priv->driver_lock, flags); + priv->tx_pending_len = pkt_len + sizeof(struct txpd); + + lbs_deb_tx("%s lined up packet\n", __func__); + + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + + dev->trans_start = jiffies; + + if (priv->monitormode) { + /* Keep the skb to echo it back once Tx feedback is + received from FW */ + skb_orphan(skb); + + /* Keep the skb around for when we get feedback */ + priv->currenttxskb = skb; + } else { + free: + dev_kfree_skb_any(skb); + } + unlock: + spin_unlock_irqrestore(&priv->driver_lock, flags); + wake_up(&priv->waitq); + + lbs_deb_leave_args(LBS_DEB_TX, "ret %d", ret); + return ret; +} + +/** + * @brief This function sends to the host the last transmitted packet, + * filling the radiotap headers with transmission information. + * + * @param priv A pointer to struct lbs_private structure + * @param status A 32 bit value containing transmission status. + * + * @returns void + */ +void lbs_send_tx_feedback(struct lbs_private *priv, u32 try_count) +{ + struct tx_radiotap_hdr *radiotap_hdr; + + if (!priv->monitormode || priv->currenttxskb == NULL) + return; + + radiotap_hdr = (struct tx_radiotap_hdr *)priv->currenttxskb->data; + + radiotap_hdr->data_retries = try_count ? + (1 + priv->txretrycount - try_count) : 0; + + priv->currenttxskb->protocol = eth_type_trans(priv->currenttxskb, + priv->rtap_net_dev); + netif_rx(priv->currenttxskb); + + priv->currenttxskb = NULL; + + if (priv->connect_status == LBS_CONNECTED) + netif_wake_queue(priv->dev); + + if (priv->mesh_dev && lbs_mesh_connected(priv)) + netif_wake_queue(priv->mesh_dev); +} +EXPORT_SYMBOL_GPL(lbs_send_tx_feedback); diff --git a/drivers/net/wireless/libertas/types.h b/drivers/net/wireless/libertas/types.h new file mode 100644 index 0000000..3e72c86 --- /dev/null +++ b/drivers/net/wireless/libertas/types.h @@ -0,0 +1,266 @@ +/** + * This header file contains definition for global types + */ +#ifndef _LBS_TYPES_H_ +#define _LBS_TYPES_H_ + +#include +#include +#include + +struct ieee_ie_header { + u8 id; + u8 len; +} __attribute__ ((packed)); + +struct ieee_ie_cf_param_set { + struct ieee_ie_header header; + + u8 cfpcnt; + u8 cfpperiod; + __le16 cfpmaxduration; + __le16 cfpdurationremaining; +} __attribute__ ((packed)); + + +struct ieee_ie_ibss_param_set { + struct ieee_ie_header header; + + __le16 atimwindow; +} __attribute__ ((packed)); + +union ieee_ss_param_set { + struct ieee_ie_cf_param_set cf; + struct ieee_ie_ibss_param_set ibss; +} __attribute__ ((packed)); + +struct ieee_ie_fh_param_set { + struct ieee_ie_header header; + + __le16 dwelltime; + u8 hopset; + u8 hoppattern; + u8 hopindex; +} __attribute__ ((packed)); + +struct ieee_ie_ds_param_set { + struct ieee_ie_header header; + + u8 channel; +} __attribute__ ((packed)); + +union ieee_phy_param_set { + struct ieee_ie_fh_param_set fh; + struct ieee_ie_ds_param_set ds; +} __attribute__ ((packed)); + +/** TLV type ID definition */ +#define PROPRIETARY_TLV_BASE_ID 0x0100 + +/* Terminating TLV type */ +#define MRVL_TERMINATE_TLV_ID 0xffff + +#define TLV_TYPE_SSID 0x0000 +#define TLV_TYPE_RATES 0x0001 +#define TLV_TYPE_PHY_FH 0x0002 +#define TLV_TYPE_PHY_DS 0x0003 +#define TLV_TYPE_CF 0x0004 +#define TLV_TYPE_IBSS 0x0006 + +#define TLV_TYPE_DOMAIN 0x0007 + +#define TLV_TYPE_POWER_CAPABILITY 0x0021 + +#define TLV_TYPE_KEY_MATERIAL (PROPRIETARY_TLV_BASE_ID + 0) +#define TLV_TYPE_CHANLIST (PROPRIETARY_TLV_BASE_ID + 1) +#define TLV_TYPE_NUMPROBES (PROPRIETARY_TLV_BASE_ID + 2) +#define TLV_TYPE_RSSI_LOW (PROPRIETARY_TLV_BASE_ID + 4) +#define TLV_TYPE_SNR_LOW (PROPRIETARY_TLV_BASE_ID + 5) +#define TLV_TYPE_FAILCOUNT (PROPRIETARY_TLV_BASE_ID + 6) +#define TLV_TYPE_BCNMISS (PROPRIETARY_TLV_BASE_ID + 7) +#define TLV_TYPE_LED_GPIO (PROPRIETARY_TLV_BASE_ID + 8) +#define TLV_TYPE_LEDBEHAVIOR (PROPRIETARY_TLV_BASE_ID + 9) +#define TLV_TYPE_PASSTHROUGH (PROPRIETARY_TLV_BASE_ID + 10) +#define TLV_TYPE_REASSOCAP (PROPRIETARY_TLV_BASE_ID + 11) +#define TLV_TYPE_POWER_TBL_2_4GHZ (PROPRIETARY_TLV_BASE_ID + 12) +#define TLV_TYPE_POWER_TBL_5GHZ (PROPRIETARY_TLV_BASE_ID + 13) +#define TLV_TYPE_BCASTPROBE (PROPRIETARY_TLV_BASE_ID + 14) +#define TLV_TYPE_NUMSSID_PROBE (PROPRIETARY_TLV_BASE_ID + 15) +#define TLV_TYPE_WMMQSTATUS (PROPRIETARY_TLV_BASE_ID + 16) +#define TLV_TYPE_CRYPTO_DATA (PROPRIETARY_TLV_BASE_ID + 17) +#define TLV_TYPE_WILDCARDSSID (PROPRIETARY_TLV_BASE_ID + 18) +#define TLV_TYPE_TSFTIMESTAMP (PROPRIETARY_TLV_BASE_ID + 19) +#define TLV_TYPE_RSSI_HIGH (PROPRIETARY_TLV_BASE_ID + 22) +#define TLV_TYPE_SNR_HIGH (PROPRIETARY_TLV_BASE_ID + 23) +#define TLV_TYPE_AUTH_TYPE (PROPRIETARY_TLV_BASE_ID + 31) +#define TLV_TYPE_MESH_ID (PROPRIETARY_TLV_BASE_ID + 37) +#define TLV_TYPE_OLD_MESH_ID (PROPRIETARY_TLV_BASE_ID + 291) + +/** TLV related data structures*/ +struct mrvl_ie_header { + __le16 type; + __le16 len; +} __attribute__ ((packed)); + +struct mrvl_ie_data { + struct mrvl_ie_header header; + u8 Data[1]; +} __attribute__ ((packed)); + +struct mrvl_ie_rates_param_set { + struct mrvl_ie_header header; + u8 rates[1]; +} __attribute__ ((packed)); + +struct mrvl_ie_ssid_param_set { + struct mrvl_ie_header header; + u8 ssid[1]; +} __attribute__ ((packed)); + +struct mrvl_ie_wildcard_ssid_param_set { + struct mrvl_ie_header header; + u8 MaxSsidlength; + u8 ssid[1]; +} __attribute__ ((packed)); + +struct chanscanmode { +#ifdef __BIG_ENDIAN_BITFIELD + u8 reserved_2_7:6; + u8 disablechanfilt:1; + u8 passivescan:1; +#else + u8 passivescan:1; + u8 disablechanfilt:1; + u8 reserved_2_7:6; +#endif +} __attribute__ ((packed)); + +struct chanscanparamset { + u8 radiotype; + u8 channumber; + struct chanscanmode chanscanmode; + __le16 minscantime; + __le16 maxscantime; +} __attribute__ ((packed)); + +struct mrvl_ie_chanlist_param_set { + struct mrvl_ie_header header; + struct chanscanparamset chanscanparam[1]; +} __attribute__ ((packed)); + +struct mrvl_ie_cf_param_set { + struct mrvl_ie_header header; + u8 cfpcnt; + u8 cfpperiod; + __le16 cfpmaxduration; + __le16 cfpdurationremaining; +} __attribute__ ((packed)); + +struct mrvl_ie_ds_param_set { + struct mrvl_ie_header header; + u8 channel; +} __attribute__ ((packed)); + +struct mrvl_ie_rsn_param_set { + struct mrvl_ie_header header; + u8 rsnie[1]; +} __attribute__ ((packed)); + +struct mrvl_ie_tsf_timestamp { + struct mrvl_ie_header header; + __le64 tsftable[1]; +} __attribute__ ((packed)); + +/* v9 and later firmware only */ +struct mrvl_ie_auth_type { + struct mrvl_ie_header header; + __le16 auth; +} __attribute__ ((packed)); + +/** Local Power capability */ +struct mrvl_ie_power_capability { + struct mrvl_ie_header header; + s8 minpower; + s8 maxpower; +} __attribute__ ((packed)); + +/* used in CMD_802_11_SUBSCRIBE_EVENT for SNR, RSSI and Failure */ +struct mrvl_ie_thresholds { + struct mrvl_ie_header header; + u8 value; + u8 freq; +} __attribute__ ((packed)); + +struct mrvl_ie_beacons_missed { + struct mrvl_ie_header header; + u8 beaconmissed; + u8 reserved; +} __attribute__ ((packed)); + +struct mrvl_ie_num_probes { + struct mrvl_ie_header header; + __le16 numprobes; +} __attribute__ ((packed)); + +struct mrvl_ie_bcast_probe { + struct mrvl_ie_header header; + __le16 bcastprobe; +} __attribute__ ((packed)); + +struct mrvl_ie_num_ssid_probe { + struct mrvl_ie_header header; + __le16 numssidprobe; +} __attribute__ ((packed)); + +struct led_pin { + u8 led; + u8 pin; +} __attribute__ ((packed)); + +struct mrvl_ie_ledgpio { + struct mrvl_ie_header header; + struct led_pin ledpin[1]; +} __attribute__ ((packed)); + +struct led_bhv { + uint8_t firmwarestate; + uint8_t led; + uint8_t ledstate; + uint8_t ledarg; +} __attribute__ ((packed)); + + +struct mrvl_ie_ledbhv { + struct mrvl_ie_header header; + struct led_bhv ledbhv[1]; +} __attribute__ ((packed)); + +/* Meant to be packed as the value member of a struct ieee80211_info_element. + * Note that the len member of the ieee80211_info_element varies depending on + * the mesh_id_len */ +struct mrvl_meshie_val { + uint8_t oui[3]; + uint8_t type; + uint8_t subtype; + uint8_t version; + uint8_t active_protocol_id; + uint8_t active_metric_id; + uint8_t mesh_capability; + uint8_t mesh_id_len; + uint8_t mesh_id[IEEE80211_MAX_SSID_LEN]; +} __attribute__ ((packed)); + +struct mrvl_meshie { + u8 id, len; + struct mrvl_meshie_val val; +} __attribute__ ((packed)); + +struct mrvl_mesh_defaults { + __le32 bootflag; + uint8_t boottime; + uint8_t reserved; + __le16 channel; + struct mrvl_meshie meshie; +} __attribute__ ((packed)); + +#endif diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c new file mode 100644 index 0000000..71f88a0 --- /dev/null +++ b/drivers/net/wireless/libertas/wext.c @@ -0,0 +1,2348 @@ +/** + * This file contains ioctl functions + */ +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "host.h" +#include "radiotap.h" +#include "decl.h" +#include "defs.h" +#include "dev.h" +#include "wext.h" +#include "scan.h" +#include "assoc.h" +#include "cmd.h" + + +static inline void lbs_postpone_association_work(struct lbs_private *priv) +{ + if (priv->surpriseremoved) + return; + cancel_delayed_work(&priv->assoc_work); + queue_delayed_work(priv->work_thread, &priv->assoc_work, HZ / 2); +} + +static inline void lbs_do_association_work(struct lbs_private *priv) +{ + if (priv->surpriseremoved) + return; + cancel_delayed_work(&priv->assoc_work); + queue_delayed_work(priv->work_thread, &priv->assoc_work, 0); +} + +static inline void lbs_cancel_association_work(struct lbs_private *priv) +{ + cancel_delayed_work(&priv->assoc_work); + kfree(priv->pending_assoc_req); + priv->pending_assoc_req = NULL; +} + +void lbs_send_disconnect_notification(struct lbs_private *priv) +{ + union iwreq_data wrqu; + + memset(wrqu.ap_addr.sa_data, 0x00, ETH_ALEN); + wrqu.ap_addr.sa_family = ARPHRD_ETHER; + wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL); +} + +static void lbs_send_iwevcustom_event(struct lbs_private *priv, s8 *str) +{ + union iwreq_data iwrq; + u8 buf[50]; + + lbs_deb_enter(LBS_DEB_WEXT); + + memset(&iwrq, 0, sizeof(union iwreq_data)); + memset(buf, 0, sizeof(buf)); + + snprintf(buf, sizeof(buf) - 1, "%s", str); + + iwrq.data.length = strlen(buf) + 1 + IW_EV_LCP_LEN; + + /* Send Event to upper layer */ + lbs_deb_wext("event indication string %s\n", (char *)buf); + lbs_deb_wext("event indication length %d\n", iwrq.data.length); + lbs_deb_wext("sending wireless event IWEVCUSTOM for %s\n", str); + + wireless_send_event(priv->dev, IWEVCUSTOM, &iwrq, buf); + + lbs_deb_leave(LBS_DEB_WEXT); +} + +/** + * @brief This function handles MIC failure event. + * + * @param priv A pointer to struct lbs_private structure + * @para event the event id + * @return n/a + */ +void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event) +{ + char buf[50]; + + lbs_deb_enter(LBS_DEB_CMD); + memset(buf, 0, sizeof(buf)); + + sprintf(buf, "%s", "MLME-MICHAELMICFAILURE.indication "); + + if (event == MACREG_INT_CODE_MIC_ERR_UNICAST) + strcat(buf, "unicast "); + else + strcat(buf, "multicast "); + + lbs_send_iwevcustom_event(priv, buf); + lbs_deb_leave(LBS_DEB_CMD); +} + +/** + * @brief Find the channel frequency power info with specific channel + * + * @param priv A pointer to struct lbs_private structure + * @param band it can be BAND_A, BAND_G or BAND_B + * @param channel the channel for looking + * @return A pointer to struct chan_freq_power structure or NULL if not find. + */ +struct chan_freq_power *lbs_find_cfp_by_band_and_channel( + struct lbs_private *priv, + u8 band, + u16 channel) +{ + struct chan_freq_power *cfp = NULL; + struct region_channel *rc; + int i, j; + + for (j = 0; !cfp && (j < ARRAY_SIZE(priv->region_channel)); j++) { + rc = &priv->region_channel[j]; + + if (!rc->valid || !rc->CFP) + continue; + if (rc->band != band) + continue; + for (i = 0; i < rc->nrcfp; i++) { + if (rc->CFP[i].channel == channel) { + cfp = &rc->CFP[i]; + break; + } + } + } + + if (!cfp && channel) + lbs_deb_wext("lbs_find_cfp_by_band_and_channel: can't find " + "cfp by band %d / channel %d\n", band, channel); + + return cfp; +} + +/** + * @brief Find the channel frequency power info with specific frequency + * + * @param priv A pointer to struct lbs_private structure + * @param band it can be BAND_A, BAND_G or BAND_B + * @param freq the frequency for looking + * @return A pointer to struct chan_freq_power structure or NULL if not find. + */ +static struct chan_freq_power *find_cfp_by_band_and_freq( + struct lbs_private *priv, + u8 band, + u32 freq) +{ + struct chan_freq_power *cfp = NULL; + struct region_channel *rc; + int i, j; + + for (j = 0; !cfp && (j < ARRAY_SIZE(priv->region_channel)); j++) { + rc = &priv->region_channel[j]; + + if (!rc->valid || !rc->CFP) + continue; + if (rc->band != band) + continue; + for (i = 0; i < rc->nrcfp; i++) { + if (rc->CFP[i].freq == freq) { + cfp = &rc->CFP[i]; + break; + } + } + } + + if (!cfp && freq) + lbs_deb_wext("find_cfp_by_band_and_freql: can't find cfp by " + "band %d / freq %d\n", band, freq); + + return cfp; +} + +/** + * @brief Copy active data rates based on adapter mode and status + * + * @param priv A pointer to struct lbs_private structure + * @param rate The buf to return the active rates + */ +static void copy_active_data_rates(struct lbs_private *priv, u8 *rates) +{ + lbs_deb_enter(LBS_DEB_WEXT); + + if ((priv->connect_status != LBS_CONNECTED) && + !lbs_mesh_connected(priv)) + memcpy(rates, lbs_bg_rates, MAX_RATES); + else + memcpy(rates, priv->curbssparams.rates, MAX_RATES); + + lbs_deb_leave(LBS_DEB_WEXT); +} + +static int lbs_get_name(struct net_device *dev, struct iw_request_info *info, + char *cwrq, char *extra) +{ + + lbs_deb_enter(LBS_DEB_WEXT); + + /* We could add support for 802.11n here as needed. Jean II */ + snprintf(cwrq, IFNAMSIZ, "IEEE 802.11b/g"); + + lbs_deb_leave(LBS_DEB_WEXT); + return 0; +} + +static int lbs_get_freq(struct net_device *dev, struct iw_request_info *info, + struct iw_freq *fwrq, char *extra) +{ + struct lbs_private *priv = dev->ml_priv; + struct chan_freq_power *cfp; + + lbs_deb_enter(LBS_DEB_WEXT); + + cfp = lbs_find_cfp_by_band_and_channel(priv, 0, + priv->channel); + + if (!cfp) { + if (priv->channel) + lbs_deb_wext("invalid channel %d\n", + priv->channel); + return -EINVAL; + } + + fwrq->m = (long)cfp->freq * 100000; + fwrq->e = 1; + + lbs_deb_wext("freq %u\n", fwrq->m); + lbs_deb_leave(LBS_DEB_WEXT); + return 0; +} + +static int lbs_get_wap(struct net_device *dev, struct iw_request_info *info, + struct sockaddr *awrq, char *extra) +{ + struct lbs_private *priv = dev->ml_priv; + + lbs_deb_enter(LBS_DEB_WEXT); + + if (priv->connect_status == LBS_CONNECTED) { + memcpy(awrq->sa_data, priv->curbssparams.bssid, ETH_ALEN); + } else { + memset(awrq->sa_data, 0, ETH_ALEN); + } + awrq->sa_family = ARPHRD_ETHER; + + lbs_deb_leave(LBS_DEB_WEXT); + return 0; +} + +static int lbs_set_nick(struct net_device *dev, struct iw_request_info *info, + struct iw_point *dwrq, char *extra) +{ + struct lbs_private *priv = dev->ml_priv; + + lbs_deb_enter(LBS_DEB_WEXT); + + /* + * Check the size of the string + */ + + if (dwrq->length > 16) { + return -E2BIG; + } + + mutex_lock(&priv->lock); + memset(priv->nodename, 0, sizeof(priv->nodename)); + memcpy(priv->nodename, extra, dwrq->length); + mutex_unlock(&priv->lock); + + lbs_deb_leave(LBS_DEB_WEXT); + return 0; +} + +static int lbs_get_nick(struct net_device *dev, struct iw_request_info *info, + struct iw_point *dwrq, char *extra) +{ + struct lbs_private *priv = dev->ml_priv; + + lbs_deb_enter(LBS_DEB_WEXT); + + dwrq->length = strlen(priv->nodename); + memcpy(extra, priv->nodename, dwrq->length); + extra[dwrq->length] = '\0'; + + dwrq->flags = 1; /* active */ + + lbs_deb_leave(LBS_DEB_WEXT); + return 0; +} + +#ifdef CONFIG_LIBERTAS_MESH +static int mesh_get_nick(struct net_device *dev, struct iw_request_info *info, + struct iw_point *dwrq, char *extra) +{ + struct lbs_private *priv = dev->ml_priv; + + lbs_deb_enter(LBS_DEB_WEXT); + + /* Use nickname to indicate that mesh is on */ + + if (lbs_mesh_connected(priv)) { + strncpy(extra, "Mesh", 12); + extra[12] = '\0'; + dwrq->length = strlen(extra); + } + + else { + extra[0] = '\0'; + dwrq->length = 0; + } + + lbs_deb_leave(LBS_DEB_WEXT); + return 0; +} +#endif + +static int lbs_set_rts(struct net_device *dev, struct iw_request_info *info, + struct iw_param *vwrq, char *extra) +{ + int ret = 0; + struct lbs_private *priv = dev->ml_priv; + u32 val = vwrq->value; + + lbs_deb_enter(LBS_DEB_WEXT); + + if (vwrq->disabled) + val = MRVDRV_RTS_MAX_VALUE; + + if (val > MRVDRV_RTS_MAX_VALUE) /* min rts value is 0 */ + return -EINVAL; + + ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_RTS_THRESHOLD, (u16) val); + + lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); + return ret; +} + +static int lbs_get_rts(struct net_device *dev, struct iw_request_info *info, + struct iw_param *vwrq, char *extra) +{ + struct lbs_private *priv = dev->ml_priv; + int ret = 0; + u16 val = 0; + + lbs_deb_enter(LBS_DEB_WEXT); + + ret = lbs_get_snmp_mib(priv, SNMP_MIB_OID_RTS_THRESHOLD, &val); + if (ret) + goto out; + + vwrq->value = val; + vwrq->disabled = val > MRVDRV_RTS_MAX_VALUE; /* min rts value is 0 */ + vwrq->fixed = 1; + +out: + lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); + return ret; +} + +static int lbs_set_frag(struct net_device *dev, struct iw_request_info *info, + struct iw_param *vwrq, char *extra) +{ + struct lbs_private *priv = dev->ml_priv; + int ret = 0; + u32 val = vwrq->value; + + lbs_deb_enter(LBS_DEB_WEXT); + + if (vwrq->disabled) + val = MRVDRV_FRAG_MAX_VALUE; + + if (val < MRVDRV_FRAG_MIN_VALUE || val > MRVDRV_FRAG_MAX_VALUE) + return -EINVAL; + + ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_FRAG_THRESHOLD, (u16) val); + + lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); + return ret; +} + +static int lbs_get_frag(struct net_device *dev, struct iw_request_info *info, + struct iw_param *vwrq, char *extra) +{ + struct lbs_private *priv = dev->ml_priv; + int ret = 0; + u16 val = 0; + + lbs_deb_enter(LBS_DEB_WEXT); + + ret = lbs_get_snmp_mib(priv, SNMP_MIB_OID_FRAG_THRESHOLD, &val); + if (ret) + goto out; + + vwrq->value = val; + vwrq->disabled = ((val < MRVDRV_FRAG_MIN_VALUE) + || (val > MRVDRV_FRAG_MAX_VALUE)); + vwrq->fixed = 1; + +out: + lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); + return ret; +} + +static int lbs_get_mode(struct net_device *dev, + struct iw_request_info *info, u32 * uwrq, char *extra) +{ + struct lbs_private *priv = dev->ml_priv; + + lbs_deb_enter(LBS_DEB_WEXT); + + *uwrq = priv->mode; + + lbs_deb_leave(LBS_DEB_WEXT); + return 0; +} + +#ifdef CONFIG_LIBERTAS_MESH +static int mesh_wlan_get_mode(struct net_device *dev, + struct iw_request_info *info, u32 * uwrq, + char *extra) +{ + lbs_deb_enter(LBS_DEB_WEXT); + + *uwrq = IW_MODE_REPEAT; + + lbs_deb_leave(LBS_DEB_WEXT); + return 0; +} +#endif + +static int lbs_get_txpow(struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, char *extra) +{ + struct lbs_private *priv = dev->ml_priv; + s16 curlevel = 0; + int ret = 0; + + lbs_deb_enter(LBS_DEB_WEXT); + + if (!priv->radio_on) { + lbs_deb_wext("tx power off\n"); + vwrq->value = 0; + vwrq->disabled = 1; + goto out; + } + + ret = lbs_get_tx_power(priv, &curlevel, NULL, NULL); + if (ret) + goto out; + + lbs_deb_wext("tx power level %d dbm\n", curlevel); + priv->txpower_cur = curlevel; + + vwrq->value = curlevel; + vwrq->fixed = 1; + vwrq->disabled = 0; + vwrq->flags = IW_TXPOW_DBM; + +out: + lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); + return ret; +} + +static int lbs_set_retry(struct net_device *dev, struct iw_request_info *info, + struct iw_param *vwrq, char *extra) +{ + struct lbs_private *priv = dev->ml_priv; + int ret = 0; + u16 slimit = 0, llimit = 0; + + lbs_deb_enter(LBS_DEB_WEXT); + + if ((vwrq->flags & IW_RETRY_TYPE) != IW_RETRY_LIMIT) + return -EOPNOTSUPP; + + /* The MAC has a 4-bit Total_Tx_Count register + Total_Tx_Count = 1 + Tx_Retry_Count */ +#define TX_RETRY_MIN 0 +#define TX_RETRY_MAX 14 + if (vwrq->value < TX_RETRY_MIN || vwrq->value > TX_RETRY_MAX) + return -EINVAL; + + /* Add 1 to convert retry count to try count */ + if (vwrq->flags & IW_RETRY_SHORT) + slimit = (u16) (vwrq->value + 1); + else if (vwrq->flags & IW_RETRY_LONG) + llimit = (u16) (vwrq->value + 1); + else + slimit = llimit = (u16) (vwrq->value + 1); /* set both */ + + if (llimit) { + ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_LONG_RETRY_LIMIT, + llimit); + if (ret) + goto out; + } + + if (slimit) { + /* txretrycount follows the short retry limit */ + priv->txretrycount = slimit; + ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_SHORT_RETRY_LIMIT, + slimit); + if (ret) + goto out; + } + +out: + lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); + return ret; +} + +static int lbs_get_retry(struct net_device *dev, struct iw_request_info *info, + struct iw_param *vwrq, char *extra) +{ + struct lbs_private *priv = dev->ml_priv; + int ret = 0; + u16 val = 0; + + lbs_deb_enter(LBS_DEB_WEXT); + + vwrq->disabled = 0; + + if (vwrq->flags & IW_RETRY_LONG) { + ret = lbs_get_snmp_mib(priv, SNMP_MIB_OID_LONG_RETRY_LIMIT, &val); + if (ret) + goto out; + + /* Subtract 1 to convert try count to retry count */ + vwrq->value = val - 1; + vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_LONG; + } else { + ret = lbs_get_snmp_mib(priv, SNMP_MIB_OID_SHORT_RETRY_LIMIT, &val); + if (ret) + goto out; + + /* txretry count follows the short retry limit */ + priv->txretrycount = val; + /* Subtract 1 to convert try count to retry count */ + vwrq->value = val - 1; + vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_SHORT; + } + +out: + lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); + return ret; +} + +static inline void sort_channels(struct iw_freq *freq, int num) +{ + int i, j; + struct iw_freq temp; + + for (i = 0; i < num; i++) + for (j = i + 1; j < num; j++) + if (freq[i].i > freq[j].i) { + temp.i = freq[i].i; + temp.m = freq[i].m; + + freq[i].i = freq[j].i; + freq[i].m = freq[j].m; + + freq[j].i = temp.i; + freq[j].m = temp.m; + } +} + +/* data rate listing + MULTI_BANDS: + abg a b b/g + Infra G(12) A(8) B(4) G(12) + Adhoc A+B(12) A(8) B(4) B(4) + + non-MULTI_BANDS: + b b/g + Infra B(4) G(12) + Adhoc B(4) B(4) + */ +/** + * @brief Get Range Info + * + * @param dev A pointer to net_device structure + * @param info A pointer to iw_request_info structure + * @param vwrq A pointer to iw_param structure + * @param extra A pointer to extra data buf + * @return 0 --success, otherwise fail + */ +static int lbs_get_range(struct net_device *dev, struct iw_request_info *info, + struct iw_point *dwrq, char *extra) +{ + int i, j; + struct lbs_private *priv = dev->ml_priv; + struct iw_range *range = (struct iw_range *)extra; + struct chan_freq_power *cfp; + u8 rates[MAX_RATES + 1]; + + lbs_deb_enter(LBS_DEB_WEXT); + + dwrq->length = sizeof(struct iw_range); + memset(range, 0, sizeof(struct iw_range)); + + range->min_nwid = 0; + range->max_nwid = 0; + + memset(rates, 0, sizeof(rates)); + copy_active_data_rates(priv, rates); + range->num_bitrates = strnlen(rates, IW_MAX_BITRATES); + for (i = 0; i < range->num_bitrates; i++) + range->bitrate[i] = rates[i] * 500000; + range->num_bitrates = i; + lbs_deb_wext("IW_MAX_BITRATES %d, num_bitrates %d\n", IW_MAX_BITRATES, + range->num_bitrates); + + range->num_frequency = 0; + + range->scan_capa = IW_SCAN_CAPA_ESSID; + + for (j = 0; (range->num_frequency < IW_MAX_FREQUENCIES) + && (j < ARRAY_SIZE(priv->region_channel)); j++) { + cfp = priv->region_channel[j].CFP; + for (i = 0; (range->num_frequency < IW_MAX_FREQUENCIES) + && priv->region_channel[j].valid + && cfp + && (i < priv->region_channel[j].nrcfp); i++) { + range->freq[range->num_frequency].i = + (long)cfp->channel; + range->freq[range->num_frequency].m = + (long)cfp->freq * 100000; + range->freq[range->num_frequency].e = 1; + cfp++; + range->num_frequency++; + } + } + + lbs_deb_wext("IW_MAX_FREQUENCIES %d, num_frequency %d\n", + IW_MAX_FREQUENCIES, range->num_frequency); + + range->num_channels = range->num_frequency; + + sort_channels(&range->freq[0], range->num_frequency); + + /* + * Set an indication of the max TCP throughput in bit/s that we can + * expect using this interface + */ + if (i > 2) + range->throughput = 5000 * 1000; + else + range->throughput = 1500 * 1000; + + range->min_rts = MRVDRV_RTS_MIN_VALUE; + range->max_rts = MRVDRV_RTS_MAX_VALUE; + range->min_frag = MRVDRV_FRAG_MIN_VALUE; + range->max_frag = MRVDRV_FRAG_MAX_VALUE; + + range->encoding_size[0] = 5; + range->encoding_size[1] = 13; + range->num_encoding_sizes = 2; + range->max_encoding_tokens = 4; + + /* + * Right now we support only "iwconfig ethX power on|off" + */ + range->pm_capa = IW_POWER_ON; + + /* + * Minimum version we recommend + */ + range->we_version_source = 15; + + /* + * Version we are compiled with + */ + range->we_version_compiled = WIRELESS_EXT; + + range->retry_capa = IW_RETRY_LIMIT; + range->retry_flags = IW_RETRY_LIMIT | IW_RETRY_MAX; + + range->min_retry = TX_RETRY_MIN; + range->max_retry = TX_RETRY_MAX; + + /* + * Set the qual, level and noise range values + */ + range->max_qual.qual = 100; + range->max_qual.level = 0; + range->max_qual.noise = 0; + range->max_qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; + + range->avg_qual.qual = 70; + /* TODO: Find real 'good' to 'bad' threshold value for RSSI */ + range->avg_qual.level = 0; + range->avg_qual.noise = 0; + range->avg_qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; + + range->sensitivity = 0; + + /* Setup the supported power level ranges */ + memset(range->txpower, 0, sizeof(range->txpower)); + range->txpower_capa = IW_TXPOW_DBM | IW_TXPOW_RANGE; + range->txpower[0] = priv->txpower_min; + range->txpower[1] = priv->txpower_max; + range->num_txpower = 2; + + range->event_capa[0] = (IW_EVENT_CAPA_K_0 | + IW_EVENT_CAPA_MASK(SIOCGIWAP) | + IW_EVENT_CAPA_MASK(SIOCGIWSCAN)); + range->event_capa[1] = IW_EVENT_CAPA_K_1; + + if (priv->fwcapinfo & FW_CAPINFO_WPA) { + range->enc_capa = IW_ENC_CAPA_WPA + | IW_ENC_CAPA_WPA2 + | IW_ENC_CAPA_CIPHER_TKIP + | IW_ENC_CAPA_CIPHER_CCMP; + } + + lbs_deb_leave(LBS_DEB_WEXT); + return 0; +} + +static int lbs_set_power(struct net_device *dev, struct iw_request_info *info, + struct iw_param *vwrq, char *extra) +{ + struct lbs_private *priv = dev->ml_priv; + int ret = 0; + + lbs_deb_enter(LBS_DEB_WEXT); + + if (!(priv->fwcapinfo & FW_CAPINFO_PS)) { + if (vwrq->disabled) + return 0; + else + return -EINVAL; + } + + /* PS is currently supported only in Infrastructure mode + * Remove this check if it is to be supported in IBSS mode also + */ + + if (vwrq->disabled) { + priv->psmode = LBS802_11POWERMODECAM; + if (priv->psstate != PS_STATE_FULL_POWER) { + lbs_ps_wakeup(priv, CMD_OPTION_WAITFORRSP); + } + + return 0; + } + + if ((vwrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) { + lbs_deb_wext( + "setting power timeout is not supported\n"); + return -EINVAL; + } else if ((vwrq->flags & IW_POWER_TYPE) == IW_POWER_PERIOD) { + vwrq->value = vwrq->value / 1000; + if (!priv->enter_deep_sleep) { + lbs_pr_err("deep sleep feature is not implemented " + "for this interface driver\n"); + return -EINVAL; + } + + if (priv->connect_status == LBS_CONNECTED) { + if ((priv->is_auto_deep_sleep_enabled) && + (vwrq->value == -1000)) { + lbs_exit_auto_deep_sleep(priv); + return 0; + } else { + lbs_pr_err("can't use deep sleep cmd in " + "connected state\n"); + return -EINVAL; + } + } + + if ((vwrq->value < 0) && (vwrq->value != -1000)) { + lbs_pr_err("unknown option\n"); + return -EINVAL; + } + + if (vwrq->value > 0) { + if (!priv->is_auto_deep_sleep_enabled) { + priv->is_activity_detected = 0; + priv->auto_deep_sleep_timeout = vwrq->value; + lbs_enter_auto_deep_sleep(priv); + } else { + priv->auto_deep_sleep_timeout = vwrq->value; + lbs_deb_debugfs("auto deep sleep: " + "already enabled\n"); + } + return 0; + } else { + if (priv->is_auto_deep_sleep_enabled) { + lbs_exit_auto_deep_sleep(priv); + /* Try to exit deep sleep if auto */ + /*deep sleep disabled */ + ret = lbs_set_deep_sleep(priv, 0); + } + if (vwrq->value == 0) + ret = lbs_set_deep_sleep(priv, 1); + else if (vwrq->value == -1000) + ret = lbs_set_deep_sleep(priv, 0); + return ret; + } + } + + if (priv->psmode != LBS802_11POWERMODECAM) { + return 0; + } + + priv->psmode = LBS802_11POWERMODEMAX_PSP; + + if (priv->connect_status == LBS_CONNECTED) { + lbs_ps_sleep(priv, CMD_OPTION_WAITFORRSP); + } + + lbs_deb_leave(LBS_DEB_WEXT); + + return 0; +} + +static int lbs_get_power(struct net_device *dev, struct iw_request_info *info, + struct iw_param *vwrq, char *extra) +{ + struct lbs_private *priv = dev->ml_priv; + + lbs_deb_enter(LBS_DEB_WEXT); + + vwrq->value = 0; + vwrq->flags = 0; + vwrq->disabled = priv->psmode == LBS802_11POWERMODECAM + || priv->connect_status == LBS_DISCONNECTED; + + lbs_deb_leave(LBS_DEB_WEXT); + return 0; +} + +static struct iw_statistics *lbs_get_wireless_stats(struct net_device *dev) +{ + enum { + POOR = 30, + FAIR = 60, + GOOD = 80, + VERY_GOOD = 90, + EXCELLENT = 95, + PERFECT = 100 + }; + struct lbs_private *priv = dev->ml_priv; + u32 rssi_qual; + u32 tx_qual; + u32 quality = 0; + int ret, stats_valid = 0; + u8 rssi; + u32 tx_retries; + struct cmd_ds_802_11_get_log log; + + lbs_deb_enter(LBS_DEB_WEXT); + + priv->wstats.status = priv->mode; + + /* If we're not associated, all quality values are meaningless */ + if ((priv->connect_status != LBS_CONNECTED) && + !lbs_mesh_connected(priv)) + goto out; + + /* Quality by RSSI */ + priv->wstats.qual.level = + CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_NOAVG], + priv->NF[TYPE_BEACON][TYPE_NOAVG]); + + if (priv->NF[TYPE_BEACON][TYPE_NOAVG] == 0) { + priv->wstats.qual.noise = MRVDRV_NF_DEFAULT_SCAN_VALUE; + } else { + priv->wstats.qual.noise = + CAL_NF(priv->NF[TYPE_BEACON][TYPE_NOAVG]); + } + + lbs_deb_wext("signal level %#x\n", priv->wstats.qual.level); + lbs_deb_wext("noise %#x\n", priv->wstats.qual.noise); + + rssi = priv->wstats.qual.level - priv->wstats.qual.noise; + if (rssi < 15) + rssi_qual = rssi * POOR / 10; + else if (rssi < 20) + rssi_qual = (rssi - 15) * (FAIR - POOR) / 5 + POOR; + else if (rssi < 30) + rssi_qual = (rssi - 20) * (GOOD - FAIR) / 5 + FAIR; + else if (rssi < 40) + rssi_qual = (rssi - 30) * (VERY_GOOD - GOOD) / + 10 + GOOD; + else + rssi_qual = (rssi - 40) * (PERFECT - VERY_GOOD) / + 10 + VERY_GOOD; + quality = rssi_qual; + + /* Quality by TX errors */ + priv->wstats.discard.retries = dev->stats.tx_errors; + + memset(&log, 0, sizeof(log)); + log.hdr.size = cpu_to_le16(sizeof(log)); + ret = lbs_cmd_with_response(priv, CMD_802_11_GET_LOG, &log); + if (ret) + goto out; + + tx_retries = le32_to_cpu(log.retry); + + if (tx_retries > 75) + tx_qual = (90 - tx_retries) * POOR / 15; + else if (tx_retries > 70) + tx_qual = (75 - tx_retries) * (FAIR - POOR) / 5 + POOR; + else if (tx_retries > 65) + tx_qual = (70 - tx_retries) * (GOOD - FAIR) / 5 + FAIR; + else if (tx_retries > 50) + tx_qual = (65 - tx_retries) * (VERY_GOOD - GOOD) / + 15 + GOOD; + else + tx_qual = (50 - tx_retries) * + (PERFECT - VERY_GOOD) / 50 + VERY_GOOD; + quality = min(quality, tx_qual); + + priv->wstats.discard.code = le32_to_cpu(log.wepundecryptable); + priv->wstats.discard.retries = tx_retries; + priv->wstats.discard.misc = le32_to_cpu(log.ackfailure); + + /* Calculate quality */ + priv->wstats.qual.qual = min_t(u8, quality, 100); + priv->wstats.qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; + stats_valid = 1; + + /* update stats asynchronously for future calls */ + ret = lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0, + 0, 0, NULL); + if (ret) + lbs_pr_err("RSSI command failed\n"); +out: + if (!stats_valid) { + priv->wstats.miss.beacon = 0; + priv->wstats.discard.retries = 0; + priv->wstats.qual.qual = 0; + priv->wstats.qual.level = 0; + priv->wstats.qual.noise = 0; + priv->wstats.qual.updated = IW_QUAL_ALL_UPDATED; + priv->wstats.qual.updated |= IW_QUAL_NOISE_INVALID | + IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID; + } + + lbs_deb_leave(LBS_DEB_WEXT); + return &priv->wstats; + + +} + +static int lbs_set_freq(struct net_device *dev, struct iw_request_info *info, + struct iw_freq *fwrq, char *extra) +{ + int ret = -EINVAL; + struct lbs_private *priv = dev->ml_priv; + struct chan_freq_power *cfp; + struct assoc_request * assoc_req; + + lbs_deb_enter(LBS_DEB_WEXT); + + mutex_lock(&priv->lock); + assoc_req = lbs_get_association_request(priv); + if (!assoc_req) { + ret = -ENOMEM; + goto out; + } + + /* If setting by frequency, convert to a channel */ + if (fwrq->e == 1) { + long f = fwrq->m / 100000; + + cfp = find_cfp_by_band_and_freq(priv, 0, f); + if (!cfp) { + lbs_deb_wext("invalid freq %ld\n", f); + goto out; + } + + fwrq->e = 0; + fwrq->m = (int) cfp->channel; + } + + /* Setting by channel number */ + if (fwrq->m > 1000 || fwrq->e > 0) { + goto out; + } + + cfp = lbs_find_cfp_by_band_and_channel(priv, 0, fwrq->m); + if (!cfp) { + goto out; + } + + assoc_req->channel = fwrq->m; + ret = 0; + +out: + if (ret == 0) { + set_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags); + lbs_postpone_association_work(priv); + } else { + lbs_cancel_association_work(priv); + } + mutex_unlock(&priv->lock); + + lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); + return ret; +} + +#ifdef CONFIG_LIBERTAS_MESH +static int lbs_mesh_set_freq(struct net_device *dev, + struct iw_request_info *info, + struct iw_freq *fwrq, char *extra) +{ + struct lbs_private *priv = dev->ml_priv; + struct chan_freq_power *cfp; + int ret = -EINVAL; + + lbs_deb_enter(LBS_DEB_WEXT); + + /* If setting by frequency, convert to a channel */ + if (fwrq->e == 1) { + long f = fwrq->m / 100000; + + cfp = find_cfp_by_band_and_freq(priv, 0, f); + if (!cfp) { + lbs_deb_wext("invalid freq %ld\n", f); + goto out; + } + + fwrq->e = 0; + fwrq->m = (int) cfp->channel; + } + + /* Setting by channel number */ + if (fwrq->m > 1000 || fwrq->e > 0) { + goto out; + } + + cfp = lbs_find_cfp_by_band_and_channel(priv, 0, fwrq->m); + if (!cfp) { + goto out; + } + + if (fwrq->m != priv->channel) { + lbs_deb_wext("mesh channel change forces eth disconnect\n"); + if (priv->mode == IW_MODE_INFRA) + lbs_cmd_80211_deauthenticate(priv, + priv->curbssparams.bssid, + WLAN_REASON_DEAUTH_LEAVING); + else if (priv->mode == IW_MODE_ADHOC) + lbs_adhoc_stop(priv); + } + lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, fwrq->m); + lbs_update_channel(priv); + ret = 0; + +out: + lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); + return ret; +} +#endif + +static int lbs_set_rate(struct net_device *dev, struct iw_request_info *info, + struct iw_param *vwrq, char *extra) +{ + struct lbs_private *priv = dev->ml_priv; + u8 new_rate = 0; + int ret = -EINVAL; + u8 rates[MAX_RATES + 1]; + + lbs_deb_enter(LBS_DEB_WEXT); + + lbs_deb_wext("vwrq->value %d\n", vwrq->value); + lbs_deb_wext("vwrq->fixed %d\n", vwrq->fixed); + + if (vwrq->fixed && vwrq->value == -1) + goto out; + + /* Auto rate? */ + priv->enablehwauto = !vwrq->fixed; + + if (vwrq->value == -1) + priv->cur_rate = 0; + else { + if (vwrq->value % 100000) + goto out; + + new_rate = vwrq->value / 500000; + priv->cur_rate = new_rate; + /* the rest is only needed for lbs_set_data_rate() */ + memset(rates, 0, sizeof(rates)); + copy_active_data_rates(priv, rates); + if (!memchr(rates, new_rate, sizeof(rates))) { + lbs_pr_alert("fixed data rate 0x%X out of range\n", + new_rate); + goto out; + } + if (priv->fwrelease < 0x09000000) { + ret = lbs_set_power_adapt_cfg(priv, 0, + POW_ADAPT_DEFAULT_P0, + POW_ADAPT_DEFAULT_P1, + POW_ADAPT_DEFAULT_P2); + if (ret) + goto out; + } + ret = lbs_set_tpc_cfg(priv, 0, TPC_DEFAULT_P0, TPC_DEFAULT_P1, + TPC_DEFAULT_P2, 1); + if (ret) + goto out; + } + + /* Try the newer command first (Firmware Spec 5.1 and above) */ + ret = lbs_cmd_802_11_rate_adapt_rateset(priv, CMD_ACT_SET); + + /* Fallback to older version */ + if (ret) + ret = lbs_set_data_rate(priv, new_rate); + +out: + lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); + return ret; +} + +static int lbs_get_rate(struct net_device *dev, struct iw_request_info *info, + struct iw_param *vwrq, char *extra) +{ + struct lbs_private *priv = dev->ml_priv; + + lbs_deb_enter(LBS_DEB_WEXT); + + if (priv->connect_status == LBS_CONNECTED) { + vwrq->value = priv->cur_rate * 500000; + + if (priv->enablehwauto) + vwrq->fixed = 0; + else + vwrq->fixed = 1; + + } else { + vwrq->fixed = 0; + vwrq->value = 0; + } + + lbs_deb_leave(LBS_DEB_WEXT); + return 0; +} + +static int lbs_set_mode(struct net_device *dev, + struct iw_request_info *info, u32 * uwrq, char *extra) +{ + int ret = 0; + struct lbs_private *priv = dev->ml_priv; + struct assoc_request * assoc_req; + + lbs_deb_enter(LBS_DEB_WEXT); + + if ( (*uwrq != IW_MODE_ADHOC) + && (*uwrq != IW_MODE_INFRA) + && (*uwrq != IW_MODE_AUTO)) { + lbs_deb_wext("Invalid mode: 0x%x\n", *uwrq); + ret = -EINVAL; + goto out; + } + + mutex_lock(&priv->lock); + assoc_req = lbs_get_association_request(priv); + if (!assoc_req) { + ret = -ENOMEM; + lbs_cancel_association_work(priv); + } else { + assoc_req->mode = *uwrq; + set_bit(ASSOC_FLAG_MODE, &assoc_req->flags); + lbs_postpone_association_work(priv); + lbs_deb_wext("Switching to mode: 0x%x\n", *uwrq); + } + mutex_unlock(&priv->lock); + +out: + lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); + return ret; +} + + +/** + * @brief Get Encryption key + * + * @param dev A pointer to net_device structure + * @param info A pointer to iw_request_info structure + * @param vwrq A pointer to iw_param structure + * @param extra A pointer to extra data buf + * @return 0 --success, otherwise fail + */ +static int lbs_get_encode(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, u8 * extra) +{ + struct lbs_private *priv = dev->ml_priv; + int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; + + lbs_deb_enter(LBS_DEB_WEXT); + + lbs_deb_wext("flags 0x%x, index %d, length %d, wep_tx_keyidx %d\n", + dwrq->flags, index, dwrq->length, priv->wep_tx_keyidx); + + dwrq->flags = 0; + + /* Authentication method */ + switch (priv->secinfo.auth_mode) { + case IW_AUTH_ALG_OPEN_SYSTEM: + dwrq->flags = IW_ENCODE_OPEN; + break; + + case IW_AUTH_ALG_SHARED_KEY: + case IW_AUTH_ALG_LEAP: + dwrq->flags = IW_ENCODE_RESTRICTED; + break; + default: + dwrq->flags = IW_ENCODE_DISABLED | IW_ENCODE_OPEN; + break; + } + + memset(extra, 0, 16); + + mutex_lock(&priv->lock); + + /* Default to returning current transmit key */ + if (index < 0) + index = priv->wep_tx_keyidx; + + if ((priv->wep_keys[index].len) && priv->secinfo.wep_enabled) { + memcpy(extra, priv->wep_keys[index].key, + priv->wep_keys[index].len); + dwrq->length = priv->wep_keys[index].len; + + dwrq->flags |= (index + 1); + /* Return WEP enabled */ + dwrq->flags &= ~IW_ENCODE_DISABLED; + } else if ((priv->secinfo.WPAenabled) + || (priv->secinfo.WPA2enabled)) { + /* return WPA enabled */ + dwrq->flags &= ~IW_ENCODE_DISABLED; + dwrq->flags |= IW_ENCODE_NOKEY; + } else { + dwrq->flags |= IW_ENCODE_DISABLED; + } + + mutex_unlock(&priv->lock); + + lbs_deb_wext("key: %02x:%02x:%02x:%02x:%02x:%02x, keylen %d\n", + extra[0], extra[1], extra[2], + extra[3], extra[4], extra[5], dwrq->length); + + lbs_deb_wext("return flags 0x%x\n", dwrq->flags); + + lbs_deb_leave(LBS_DEB_WEXT); + return 0; +} + +/** + * @brief Set Encryption key (internal) + * + * @param priv A pointer to private card structure + * @param key_material A pointer to key material + * @param key_length length of key material + * @param index key index to set + * @param set_tx_key Force set TX key (1 = yes, 0 = no) + * @return 0 --success, otherwise fail + */ +static int lbs_set_wep_key(struct assoc_request *assoc_req, + const char *key_material, + u16 key_length, + u16 index, + int set_tx_key) +{ + int ret = 0; + struct enc_key *pkey; + + lbs_deb_enter(LBS_DEB_WEXT); + + /* Paranoid validation of key index */ + if (index > 3) { + ret = -EINVAL; + goto out; + } + + /* validate max key length */ + if (key_length > KEY_LEN_WEP_104) { + ret = -EINVAL; + goto out; + } + + pkey = &assoc_req->wep_keys[index]; + + if (key_length > 0) { + memset(pkey, 0, sizeof(struct enc_key)); + pkey->type = KEY_TYPE_ID_WEP; + + /* Standardize the key length */ + pkey->len = (key_length > KEY_LEN_WEP_40) ? + KEY_LEN_WEP_104 : KEY_LEN_WEP_40; + memcpy(pkey->key, key_material, key_length); + } + + if (set_tx_key) { + /* Ensure the chosen key is valid */ + if (!pkey->len) { + lbs_deb_wext("key not set, so cannot enable it\n"); + ret = -EINVAL; + goto out; + } + assoc_req->wep_tx_keyidx = index; + } + + assoc_req->secinfo.wep_enabled = 1; + +out: + lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); + return ret; +} + +static int validate_key_index(u16 def_index, u16 raw_index, + u16 *out_index, u16 *is_default) +{ + if (!out_index || !is_default) + return -EINVAL; + + /* Verify index if present, otherwise use default TX key index */ + if (raw_index > 0) { + if (raw_index > 4) + return -EINVAL; + *out_index = raw_index - 1; + } else { + *out_index = def_index; + *is_default = 1; + } + return 0; +} + +static void disable_wep(struct assoc_request *assoc_req) +{ + int i; + + lbs_deb_enter(LBS_DEB_WEXT); + + /* Set Open System auth mode */ + assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM; + + /* Clear WEP keys and mark WEP as disabled */ + assoc_req->secinfo.wep_enabled = 0; + for (i = 0; i < 4; i++) + assoc_req->wep_keys[i].len = 0; + + set_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags); + set_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags); + + lbs_deb_leave(LBS_DEB_WEXT); +} + +static void disable_wpa(struct assoc_request *assoc_req) +{ + lbs_deb_enter(LBS_DEB_WEXT); + + memset(&assoc_req->wpa_mcast_key, 0, sizeof (struct enc_key)); + assoc_req->wpa_mcast_key.flags = KEY_INFO_WPA_MCAST; + set_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags); + + memset(&assoc_req->wpa_unicast_key, 0, sizeof (struct enc_key)); + assoc_req->wpa_unicast_key.flags = KEY_INFO_WPA_UNICAST; + set_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags); + + assoc_req->secinfo.WPAenabled = 0; + assoc_req->secinfo.WPA2enabled = 0; + set_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags); + + lbs_deb_leave(LBS_DEB_WEXT); +} + +/** + * @brief Set Encryption key + * + * @param dev A pointer to net_device structure + * @param info A pointer to iw_request_info structure + * @param vwrq A pointer to iw_param structure + * @param extra A pointer to extra data buf + * @return 0 --success, otherwise fail + */ +static int lbs_set_encode(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, char *extra) +{ + int ret = 0; + struct lbs_private *priv = dev->ml_priv; + struct assoc_request * assoc_req; + u16 is_default = 0, index = 0, set_tx_key = 0; + + lbs_deb_enter(LBS_DEB_WEXT); + + mutex_lock(&priv->lock); + assoc_req = lbs_get_association_request(priv); + if (!assoc_req) { + ret = -ENOMEM; + goto out; + } + + if (dwrq->flags & IW_ENCODE_DISABLED) { + disable_wep (assoc_req); + disable_wpa (assoc_req); + goto out; + } + + ret = validate_key_index(assoc_req->wep_tx_keyidx, + (dwrq->flags & IW_ENCODE_INDEX), + &index, &is_default); + if (ret) { + ret = -EINVAL; + goto out; + } + + /* If WEP isn't enabled, or if there is no key data but a valid + * index, set the TX key. + */ + if (!assoc_req->secinfo.wep_enabled || (dwrq->length == 0 && !is_default)) + set_tx_key = 1; + + ret = lbs_set_wep_key(assoc_req, extra, dwrq->length, index, set_tx_key); + if (ret) + goto out; + + if (dwrq->length) + set_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags); + if (set_tx_key) + set_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags); + + if (dwrq->flags & IW_ENCODE_RESTRICTED) { + assoc_req->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY; + } else if (dwrq->flags & IW_ENCODE_OPEN) { + assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM; + } + +out: + if (ret == 0) { + set_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags); + lbs_postpone_association_work(priv); + } else { + lbs_cancel_association_work(priv); + } + mutex_unlock(&priv->lock); + + lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); + return ret; +} + +/** + * @brief Get Extended Encryption key (WPA/802.1x and WEP) + * + * @param dev A pointer to net_device structure + * @param info A pointer to iw_request_info structure + * @param vwrq A pointer to iw_param structure + * @param extra A pointer to extra data buf + * @return 0 on success, otherwise failure + */ +static int lbs_get_encodeext(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra) +{ + int ret = -EINVAL; + struct lbs_private *priv = dev->ml_priv; + struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; + int index, max_key_len; + + lbs_deb_enter(LBS_DEB_WEXT); + + max_key_len = dwrq->length - sizeof(*ext); + if (max_key_len < 0) + goto out; + + index = dwrq->flags & IW_ENCODE_INDEX; + if (index) { + if (index < 1 || index > 4) + goto out; + index--; + } else { + index = priv->wep_tx_keyidx; + } + + if (!(ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) && + ext->alg != IW_ENCODE_ALG_WEP) { + if (index != 0 || priv->mode != IW_MODE_INFRA) + goto out; + } + + dwrq->flags = index + 1; + memset(ext, 0, sizeof(*ext)); + + if ( !priv->secinfo.wep_enabled + && !priv->secinfo.WPAenabled + && !priv->secinfo.WPA2enabled) { + ext->alg = IW_ENCODE_ALG_NONE; + ext->key_len = 0; + dwrq->flags |= IW_ENCODE_DISABLED; + } else { + u8 *key = NULL; + + if ( priv->secinfo.wep_enabled + && !priv->secinfo.WPAenabled + && !priv->secinfo.WPA2enabled) { + /* WEP */ + ext->alg = IW_ENCODE_ALG_WEP; + ext->key_len = priv->wep_keys[index].len; + key = &priv->wep_keys[index].key[0]; + } else if ( !priv->secinfo.wep_enabled + && (priv->secinfo.WPAenabled || + priv->secinfo.WPA2enabled)) { + /* WPA */ + struct enc_key * pkey = NULL; + + if ( priv->wpa_mcast_key.len + && (priv->wpa_mcast_key.flags & KEY_INFO_WPA_ENABLED)) + pkey = &priv->wpa_mcast_key; + else if ( priv->wpa_unicast_key.len + && (priv->wpa_unicast_key.flags & KEY_INFO_WPA_ENABLED)) + pkey = &priv->wpa_unicast_key; + + if (pkey) { + if (pkey->type == KEY_TYPE_ID_AES) { + ext->alg = IW_ENCODE_ALG_CCMP; + } else { + ext->alg = IW_ENCODE_ALG_TKIP; + } + ext->key_len = pkey->len; + key = &pkey->key[0]; + } else { + ext->alg = IW_ENCODE_ALG_TKIP; + ext->key_len = 0; + } + } else { + goto out; + } + + if (ext->key_len > max_key_len) { + ret = -E2BIG; + goto out; + } + + if (ext->key_len) + memcpy(ext->key, key, ext->key_len); + else + dwrq->flags |= IW_ENCODE_NOKEY; + dwrq->flags |= IW_ENCODE_ENABLED; + } + ret = 0; + +out: + lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); + return ret; +} + +/** + * @brief Set Encryption key Extended (WPA/802.1x and WEP) + * + * @param dev A pointer to net_device structure + * @param info A pointer to iw_request_info structure + * @param vwrq A pointer to iw_param structure + * @param extra A pointer to extra data buf + * @return 0 --success, otherwise fail + */ +static int lbs_set_encodeext(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra) +{ + int ret = 0; + struct lbs_private *priv = dev->ml_priv; + struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; + int alg = ext->alg; + struct assoc_request * assoc_req; + + lbs_deb_enter(LBS_DEB_WEXT); + + mutex_lock(&priv->lock); + assoc_req = lbs_get_association_request(priv); + if (!assoc_req) { + ret = -ENOMEM; + goto out; + } + + if ((alg == IW_ENCODE_ALG_NONE) || (dwrq->flags & IW_ENCODE_DISABLED)) { + disable_wep (assoc_req); + disable_wpa (assoc_req); + } else if (alg == IW_ENCODE_ALG_WEP) { + u16 is_default = 0, index, set_tx_key = 0; + + ret = validate_key_index(assoc_req->wep_tx_keyidx, + (dwrq->flags & IW_ENCODE_INDEX), + &index, &is_default); + if (ret) + goto out; + + /* If WEP isn't enabled, or if there is no key data but a valid + * index, or if the set-TX-key flag was passed, set the TX key. + */ + if ( !assoc_req->secinfo.wep_enabled + || (dwrq->length == 0 && !is_default) + || (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)) + set_tx_key = 1; + + /* Copy key to driver */ + ret = lbs_set_wep_key(assoc_req, ext->key, ext->key_len, index, + set_tx_key); + if (ret) + goto out; + + if (dwrq->flags & IW_ENCODE_RESTRICTED) { + assoc_req->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY; + } else if (dwrq->flags & IW_ENCODE_OPEN) { + assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM; + } + + /* Mark the various WEP bits as modified */ + set_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags); + if (dwrq->length) + set_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags); + if (set_tx_key) + set_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags); + } else if ((alg == IW_ENCODE_ALG_TKIP) || (alg == IW_ENCODE_ALG_CCMP)) { + struct enc_key * pkey; + + /* validate key length */ + if (((alg == IW_ENCODE_ALG_TKIP) + && (ext->key_len != KEY_LEN_WPA_TKIP)) + || ((alg == IW_ENCODE_ALG_CCMP) + && (ext->key_len != KEY_LEN_WPA_AES))) { + lbs_deb_wext("invalid size %d for key of alg " + "type %d\n", + ext->key_len, + alg); + ret = -EINVAL; + goto out; + } + + if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) { + pkey = &assoc_req->wpa_mcast_key; + set_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags); + } else { + pkey = &assoc_req->wpa_unicast_key; + set_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags); + } + + memset(pkey, 0, sizeof (struct enc_key)); + memcpy(pkey->key, ext->key, ext->key_len); + pkey->len = ext->key_len; + if (pkey->len) + pkey->flags |= KEY_INFO_WPA_ENABLED; + + /* Do this after zeroing key structure */ + if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) { + pkey->flags |= KEY_INFO_WPA_MCAST; + } else { + pkey->flags |= KEY_INFO_WPA_UNICAST; + } + + if (alg == IW_ENCODE_ALG_TKIP) { + pkey->type = KEY_TYPE_ID_TKIP; + } else if (alg == IW_ENCODE_ALG_CCMP) { + pkey->type = KEY_TYPE_ID_AES; + } + + /* If WPA isn't enabled yet, do that now */ + if ( assoc_req->secinfo.WPAenabled == 0 + && assoc_req->secinfo.WPA2enabled == 0) { + assoc_req->secinfo.WPAenabled = 1; + assoc_req->secinfo.WPA2enabled = 1; + set_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags); + } + + /* Only disable wep if necessary: can't waste time here. */ + if (priv->mac_control & CMD_ACT_MAC_WEP_ENABLE) + disable_wep(assoc_req); + } + +out: + if (ret == 0) { + /* 802.1x and WPA rekeying must happen as quickly as possible, + * especially during the 4-way handshake; thus if in + * infrastructure mode, and either (a) 802.1x is enabled or + * (b) WPA is being used, set the key right away. + */ + if (assoc_req->mode == IW_MODE_INFRA && + ((assoc_req->secinfo.key_mgmt & IW_AUTH_KEY_MGMT_802_1X) || + (assoc_req->secinfo.key_mgmt & IW_AUTH_KEY_MGMT_PSK) || + assoc_req->secinfo.WPAenabled || + assoc_req->secinfo.WPA2enabled)) { + lbs_do_association_work(priv); + } else + lbs_postpone_association_work(priv); + } else { + lbs_cancel_association_work(priv); + } + mutex_unlock(&priv->lock); + + lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); + return ret; +} + + +static int lbs_set_genie(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra) +{ + struct lbs_private *priv = dev->ml_priv; + int ret = 0; + struct assoc_request * assoc_req; + + lbs_deb_enter(LBS_DEB_WEXT); + + mutex_lock(&priv->lock); + assoc_req = lbs_get_association_request(priv); + if (!assoc_req) { + ret = -ENOMEM; + goto out; + } + + if (dwrq->length > MAX_WPA_IE_LEN || + (dwrq->length && extra == NULL)) { + ret = -EINVAL; + goto out; + } + + if (dwrq->length) { + memcpy(&assoc_req->wpa_ie[0], extra, dwrq->length); + assoc_req->wpa_ie_len = dwrq->length; + } else { + memset(&assoc_req->wpa_ie[0], 0, sizeof(priv->wpa_ie)); + assoc_req->wpa_ie_len = 0; + } + +out: + if (ret == 0) { + set_bit(ASSOC_FLAG_WPA_IE, &assoc_req->flags); + lbs_postpone_association_work(priv); + } else { + lbs_cancel_association_work(priv); + } + mutex_unlock(&priv->lock); + + lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); + return ret; +} + +static int lbs_get_genie(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra) +{ + int ret = 0; + struct lbs_private *priv = dev->ml_priv; + + lbs_deb_enter(LBS_DEB_WEXT); + + if (priv->wpa_ie_len == 0) { + dwrq->length = 0; + goto out; + } + + if (dwrq->length < priv->wpa_ie_len) { + ret = -E2BIG; + goto out; + } + + dwrq->length = priv->wpa_ie_len; + memcpy(extra, &priv->wpa_ie[0], priv->wpa_ie_len); + +out: + lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); + return ret; +} + + +static int lbs_set_auth(struct net_device *dev, + struct iw_request_info *info, + struct iw_param *dwrq, + char *extra) +{ + struct lbs_private *priv = dev->ml_priv; + struct assoc_request * assoc_req; + int ret = 0; + int updated = 0; + + lbs_deb_enter(LBS_DEB_WEXT); + + mutex_lock(&priv->lock); + assoc_req = lbs_get_association_request(priv); + if (!assoc_req) { + ret = -ENOMEM; + goto out; + } + + switch (dwrq->flags & IW_AUTH_INDEX) { + case IW_AUTH_PRIVACY_INVOKED: + case IW_AUTH_RX_UNENCRYPTED_EAPOL: + case IW_AUTH_TKIP_COUNTERMEASURES: + case IW_AUTH_CIPHER_PAIRWISE: + case IW_AUTH_CIPHER_GROUP: + case IW_AUTH_DROP_UNENCRYPTED: + /* + * libertas does not use these parameters + */ + break; + + case IW_AUTH_KEY_MGMT: + assoc_req->secinfo.key_mgmt = dwrq->value; + updated = 1; + break; + + case IW_AUTH_WPA_VERSION: + if (dwrq->value & IW_AUTH_WPA_VERSION_DISABLED) { + assoc_req->secinfo.WPAenabled = 0; + assoc_req->secinfo.WPA2enabled = 0; + disable_wpa (assoc_req); + } + if (dwrq->value & IW_AUTH_WPA_VERSION_WPA) { + assoc_req->secinfo.WPAenabled = 1; + assoc_req->secinfo.wep_enabled = 0; + assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM; + } + if (dwrq->value & IW_AUTH_WPA_VERSION_WPA2) { + assoc_req->secinfo.WPA2enabled = 1; + assoc_req->secinfo.wep_enabled = 0; + assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM; + } + updated = 1; + break; + + case IW_AUTH_80211_AUTH_ALG: + if (dwrq->value & IW_AUTH_ALG_SHARED_KEY) { + assoc_req->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY; + } else if (dwrq->value & IW_AUTH_ALG_OPEN_SYSTEM) { + assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM; + } else if (dwrq->value & IW_AUTH_ALG_LEAP) { + assoc_req->secinfo.auth_mode = IW_AUTH_ALG_LEAP; + } else { + ret = -EINVAL; + } + updated = 1; + break; + + case IW_AUTH_WPA_ENABLED: + if (dwrq->value) { + if (!assoc_req->secinfo.WPAenabled && + !assoc_req->secinfo.WPA2enabled) { + assoc_req->secinfo.WPAenabled = 1; + assoc_req->secinfo.WPA2enabled = 1; + assoc_req->secinfo.wep_enabled = 0; + assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM; + } + } else { + assoc_req->secinfo.WPAenabled = 0; + assoc_req->secinfo.WPA2enabled = 0; + disable_wpa (assoc_req); + } + updated = 1; + break; + + default: + ret = -EOPNOTSUPP; + break; + } + +out: + if (ret == 0) { + if (updated) + set_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags); + lbs_postpone_association_work(priv); + } else if (ret != -EOPNOTSUPP) { + lbs_cancel_association_work(priv); + } + mutex_unlock(&priv->lock); + + lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); + return ret; +} + +static int lbs_get_auth(struct net_device *dev, + struct iw_request_info *info, + struct iw_param *dwrq, + char *extra) +{ + int ret = 0; + struct lbs_private *priv = dev->ml_priv; + + lbs_deb_enter(LBS_DEB_WEXT); + + switch (dwrq->flags & IW_AUTH_INDEX) { + case IW_AUTH_KEY_MGMT: + dwrq->value = priv->secinfo.key_mgmt; + break; + + case IW_AUTH_WPA_VERSION: + dwrq->value = 0; + if (priv->secinfo.WPAenabled) + dwrq->value |= IW_AUTH_WPA_VERSION_WPA; + if (priv->secinfo.WPA2enabled) + dwrq->value |= IW_AUTH_WPA_VERSION_WPA2; + if (!dwrq->value) + dwrq->value |= IW_AUTH_WPA_VERSION_DISABLED; + break; + + case IW_AUTH_80211_AUTH_ALG: + dwrq->value = priv->secinfo.auth_mode; + break; + + case IW_AUTH_WPA_ENABLED: + if (priv->secinfo.WPAenabled && priv->secinfo.WPA2enabled) + dwrq->value = 1; + break; + + default: + ret = -EOPNOTSUPP; + } + + lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); + return ret; +} + + +static int lbs_set_txpow(struct net_device *dev, struct iw_request_info *info, + struct iw_param *vwrq, char *extra) +{ + int ret = 0; + struct lbs_private *priv = dev->ml_priv; + s16 dbm = (s16) vwrq->value; + + lbs_deb_enter(LBS_DEB_WEXT); + + if (vwrq->disabled) { + lbs_set_radio(priv, RADIO_PREAMBLE_AUTO, 0); + goto out; + } + + if (vwrq->fixed == 0) { + /* User requests automatic tx power control, however there are + * many auto tx settings. For now use firmware defaults until + * we come up with a good way to expose these to the user. */ + if (priv->fwrelease < 0x09000000) { + ret = lbs_set_power_adapt_cfg(priv, 1, + POW_ADAPT_DEFAULT_P0, + POW_ADAPT_DEFAULT_P1, + POW_ADAPT_DEFAULT_P2); + if (ret) + goto out; + } + ret = lbs_set_tpc_cfg(priv, 0, TPC_DEFAULT_P0, TPC_DEFAULT_P1, + TPC_DEFAULT_P2, 1); + if (ret) + goto out; + dbm = priv->txpower_max; + } else { + /* Userspace check in iwrange if it should use dBm or mW, + * therefore this should never happen... Jean II */ + if ((vwrq->flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM) { + ret = -EOPNOTSUPP; + goto out; + } + + /* Validate requested power level against firmware allowed + * levels */ + if (priv->txpower_min && (dbm < priv->txpower_min)) { + ret = -EINVAL; + goto out; + } + + if (priv->txpower_max && (dbm > priv->txpower_max)) { + ret = -EINVAL; + goto out; + } + if (priv->fwrelease < 0x09000000) { + ret = lbs_set_power_adapt_cfg(priv, 0, + POW_ADAPT_DEFAULT_P0, + POW_ADAPT_DEFAULT_P1, + POW_ADAPT_DEFAULT_P2); + if (ret) + goto out; + } + ret = lbs_set_tpc_cfg(priv, 0, TPC_DEFAULT_P0, TPC_DEFAULT_P1, + TPC_DEFAULT_P2, 1); + if (ret) + goto out; + } + + /* If the radio was off, turn it on */ + if (!priv->radio_on) { + ret = lbs_set_radio(priv, RADIO_PREAMBLE_AUTO, 1); + if (ret) + goto out; + } + + lbs_deb_wext("txpower set %d dBm\n", dbm); + + ret = lbs_set_tx_power(priv, dbm); + +out: + lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); + return ret; +} + +static int lbs_get_essid(struct net_device *dev, struct iw_request_info *info, + struct iw_point *dwrq, char *extra) +{ + struct lbs_private *priv = dev->ml_priv; + + lbs_deb_enter(LBS_DEB_WEXT); + + /* + * Note : if dwrq->flags != 0, we should get the relevant SSID from + * the SSID list... + */ + + /* + * Get the current SSID + */ + if (priv->connect_status == LBS_CONNECTED) { + memcpy(extra, priv->curbssparams.ssid, + priv->curbssparams.ssid_len); + } else { + memset(extra, 0, 32); + } + /* + * If none, we may want to get the one that was set + */ + + dwrq->length = priv->curbssparams.ssid_len; + + dwrq->flags = 1; /* active */ + + lbs_deb_leave(LBS_DEB_WEXT); + return 0; +} + +static int lbs_set_essid(struct net_device *dev, struct iw_request_info *info, + struct iw_point *dwrq, char *extra) +{ + struct lbs_private *priv = dev->ml_priv; + int ret = 0; + u8 ssid[IEEE80211_MAX_SSID_LEN]; + u8 ssid_len = 0; + struct assoc_request * assoc_req; + int in_ssid_len = dwrq->length; + DECLARE_SSID_BUF(ssid_buf); + + lbs_deb_enter(LBS_DEB_WEXT); + + if (!priv->radio_on) { + ret = -EINVAL; + goto out; + } + + /* Check the size of the string */ + if (in_ssid_len > IEEE80211_MAX_SSID_LEN) { + ret = -E2BIG; + goto out; + } + + memset(&ssid, 0, sizeof(ssid)); + + if (!dwrq->flags || !in_ssid_len) { + /* "any" SSID requested; leave SSID blank */ + } else { + /* Specific SSID requested */ + memcpy(&ssid, extra, in_ssid_len); + ssid_len = in_ssid_len; + } + + if (!ssid_len) { + lbs_deb_wext("requested any SSID\n"); + } else { + lbs_deb_wext("requested SSID '%s'\n", + print_ssid(ssid_buf, ssid, ssid_len)); + } + +out: + mutex_lock(&priv->lock); + if (ret == 0) { + /* Get or create the current association request */ + assoc_req = lbs_get_association_request(priv); + if (!assoc_req) { + ret = -ENOMEM; + } else { + /* Copy the SSID to the association request */ + memcpy(&assoc_req->ssid, &ssid, IEEE80211_MAX_SSID_LEN); + assoc_req->ssid_len = ssid_len; + set_bit(ASSOC_FLAG_SSID, &assoc_req->flags); + lbs_postpone_association_work(priv); + } + } + + /* Cancel the association request if there was an error */ + if (ret != 0) { + lbs_cancel_association_work(priv); + } + + mutex_unlock(&priv->lock); + + lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); + return ret; +} + +#ifdef CONFIG_LIBERTAS_MESH +static int lbs_mesh_get_essid(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, char *extra) +{ + struct lbs_private *priv = dev->ml_priv; + + lbs_deb_enter(LBS_DEB_WEXT); + + memcpy(extra, priv->mesh_ssid, priv->mesh_ssid_len); + + dwrq->length = priv->mesh_ssid_len; + + dwrq->flags = 1; /* active */ + + lbs_deb_leave(LBS_DEB_WEXT); + return 0; +} + +static int lbs_mesh_set_essid(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, char *extra) +{ + struct lbs_private *priv = dev->ml_priv; + int ret = 0; + + lbs_deb_enter(LBS_DEB_WEXT); + + if (!priv->radio_on) { + ret = -EINVAL; + goto out; + } + + /* Check the size of the string */ + if (dwrq->length > IEEE80211_MAX_SSID_LEN) { + ret = -E2BIG; + goto out; + } + + if (!dwrq->flags || !dwrq->length) { + ret = -EINVAL; + goto out; + } else { + /* Specific SSID requested */ + memcpy(priv->mesh_ssid, extra, dwrq->length); + priv->mesh_ssid_len = dwrq->length; + } + + lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, + priv->channel); + out: + lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); + return ret; +} +#endif + +/** + * @brief Connect to the AP or Ad-hoc Network with specific bssid + * + * @param dev A pointer to net_device structure + * @param info A pointer to iw_request_info structure + * @param awrq A pointer to iw_param structure + * @param extra A pointer to extra data buf + * @return 0 --success, otherwise fail + */ +static int lbs_set_wap(struct net_device *dev, struct iw_request_info *info, + struct sockaddr *awrq, char *extra) +{ + struct lbs_private *priv = dev->ml_priv; + struct assoc_request * assoc_req; + int ret = 0; + + lbs_deb_enter(LBS_DEB_WEXT); + + if (!priv->radio_on) + return -EINVAL; + + if (awrq->sa_family != ARPHRD_ETHER) + return -EINVAL; + + lbs_deb_wext("ASSOC: WAP: sa_data %pM\n", awrq->sa_data); + + mutex_lock(&priv->lock); + + /* Get or create the current association request */ + assoc_req = lbs_get_association_request(priv); + if (!assoc_req) { + lbs_cancel_association_work(priv); + ret = -ENOMEM; + } else { + /* Copy the BSSID to the association request */ + memcpy(&assoc_req->bssid, awrq->sa_data, ETH_ALEN); + set_bit(ASSOC_FLAG_BSSID, &assoc_req->flags); + lbs_postpone_association_work(priv); + } + + mutex_unlock(&priv->lock); + + return ret; +} + +/* + * iwconfig settable callbacks + */ +static const iw_handler lbs_handler[] = { + (iw_handler) NULL, /* SIOCSIWCOMMIT */ + (iw_handler) lbs_get_name, /* SIOCGIWNAME */ + (iw_handler) NULL, /* SIOCSIWNWID */ + (iw_handler) NULL, /* SIOCGIWNWID */ + (iw_handler) lbs_set_freq, /* SIOCSIWFREQ */ + (iw_handler) lbs_get_freq, /* SIOCGIWFREQ */ + (iw_handler) lbs_set_mode, /* SIOCSIWMODE */ + (iw_handler) lbs_get_mode, /* SIOCGIWMODE */ + (iw_handler) NULL, /* SIOCSIWSENS */ + (iw_handler) NULL, /* SIOCGIWSENS */ + (iw_handler) NULL, /* SIOCSIWRANGE */ + (iw_handler) lbs_get_range, /* SIOCGIWRANGE */ + (iw_handler) NULL, /* SIOCSIWPRIV */ + (iw_handler) NULL, /* SIOCGIWPRIV */ + (iw_handler) NULL, /* SIOCSIWSTATS */ + (iw_handler) NULL, /* SIOCGIWSTATS */ + iw_handler_set_spy, /* SIOCSIWSPY */ + iw_handler_get_spy, /* SIOCGIWSPY */ + iw_handler_set_thrspy, /* SIOCSIWTHRSPY */ + iw_handler_get_thrspy, /* SIOCGIWTHRSPY */ + (iw_handler) lbs_set_wap, /* SIOCSIWAP */ + (iw_handler) lbs_get_wap, /* SIOCGIWAP */ + (iw_handler) NULL, /* SIOCSIWMLME */ + (iw_handler) NULL, /* SIOCGIWAPLIST - deprecated */ + (iw_handler) lbs_set_scan, /* SIOCSIWSCAN */ + (iw_handler) lbs_get_scan, /* SIOCGIWSCAN */ + (iw_handler) lbs_set_essid, /* SIOCSIWESSID */ + (iw_handler) lbs_get_essid, /* SIOCGIWESSID */ + (iw_handler) lbs_set_nick, /* SIOCSIWNICKN */ + (iw_handler) lbs_get_nick, /* SIOCGIWNICKN */ + (iw_handler) NULL, /* -- hole -- */ + (iw_handler) NULL, /* -- hole -- */ + (iw_handler) lbs_set_rate, /* SIOCSIWRATE */ + (iw_handler) lbs_get_rate, /* SIOCGIWRATE */ + (iw_handler) lbs_set_rts, /* SIOCSIWRTS */ + (iw_handler) lbs_get_rts, /* SIOCGIWRTS */ + (iw_handler) lbs_set_frag, /* SIOCSIWFRAG */ + (iw_handler) lbs_get_frag, /* SIOCGIWFRAG */ + (iw_handler) lbs_set_txpow, /* SIOCSIWTXPOW */ + (iw_handler) lbs_get_txpow, /* SIOCGIWTXPOW */ + (iw_handler) lbs_set_retry, /* SIOCSIWRETRY */ + (iw_handler) lbs_get_retry, /* SIOCGIWRETRY */ + (iw_handler) lbs_set_encode, /* SIOCSIWENCODE */ + (iw_handler) lbs_get_encode, /* SIOCGIWENCODE */ + (iw_handler) lbs_set_power, /* SIOCSIWPOWER */ + (iw_handler) lbs_get_power, /* SIOCGIWPOWER */ + (iw_handler) NULL, /* -- hole -- */ + (iw_handler) NULL, /* -- hole -- */ + (iw_handler) lbs_set_genie, /* SIOCSIWGENIE */ + (iw_handler) lbs_get_genie, /* SIOCGIWGENIE */ + (iw_handler) lbs_set_auth, /* SIOCSIWAUTH */ + (iw_handler) lbs_get_auth, /* SIOCGIWAUTH */ + (iw_handler) lbs_set_encodeext,/* SIOCSIWENCODEEXT */ + (iw_handler) lbs_get_encodeext,/* SIOCGIWENCODEEXT */ + (iw_handler) NULL, /* SIOCSIWPMKSA */ +}; +struct iw_handler_def lbs_handler_def = { + .num_standard = ARRAY_SIZE(lbs_handler), + .standard = (iw_handler *) lbs_handler, + .get_wireless_stats = lbs_get_wireless_stats, +}; + +#ifdef CONFIG_LIBERTAS_MESH +static const iw_handler mesh_wlan_handler[] = { + (iw_handler) NULL, /* SIOCSIWCOMMIT */ + (iw_handler) lbs_get_name, /* SIOCGIWNAME */ + (iw_handler) NULL, /* SIOCSIWNWID */ + (iw_handler) NULL, /* SIOCGIWNWID */ + (iw_handler) lbs_mesh_set_freq, /* SIOCSIWFREQ */ + (iw_handler) lbs_get_freq, /* SIOCGIWFREQ */ + (iw_handler) NULL, /* SIOCSIWMODE */ + (iw_handler) mesh_wlan_get_mode, /* SIOCGIWMODE */ + (iw_handler) NULL, /* SIOCSIWSENS */ + (iw_handler) NULL, /* SIOCGIWSENS */ + (iw_handler) NULL, /* SIOCSIWRANGE */ + (iw_handler) lbs_get_range, /* SIOCGIWRANGE */ + (iw_handler) NULL, /* SIOCSIWPRIV */ + (iw_handler) NULL, /* SIOCGIWPRIV */ + (iw_handler) NULL, /* SIOCSIWSTATS */ + (iw_handler) NULL, /* SIOCGIWSTATS */ + iw_handler_set_spy, /* SIOCSIWSPY */ + iw_handler_get_spy, /* SIOCGIWSPY */ + iw_handler_set_thrspy, /* SIOCSIWTHRSPY */ + iw_handler_get_thrspy, /* SIOCGIWTHRSPY */ + (iw_handler) NULL, /* SIOCSIWAP */ + (iw_handler) NULL, /* SIOCGIWAP */ + (iw_handler) NULL, /* SIOCSIWMLME */ + (iw_handler) NULL, /* SIOCGIWAPLIST - deprecated */ + (iw_handler) lbs_set_scan, /* SIOCSIWSCAN */ + (iw_handler) lbs_get_scan, /* SIOCGIWSCAN */ + (iw_handler) lbs_mesh_set_essid,/* SIOCSIWESSID */ + (iw_handler) lbs_mesh_get_essid,/* SIOCGIWESSID */ + (iw_handler) NULL, /* SIOCSIWNICKN */ + (iw_handler) mesh_get_nick, /* SIOCGIWNICKN */ + (iw_handler) NULL, /* -- hole -- */ + (iw_handler) NULL, /* -- hole -- */ + (iw_handler) lbs_set_rate, /* SIOCSIWRATE */ + (iw_handler) lbs_get_rate, /* SIOCGIWRATE */ + (iw_handler) lbs_set_rts, /* SIOCSIWRTS */ + (iw_handler) lbs_get_rts, /* SIOCGIWRTS */ + (iw_handler) lbs_set_frag, /* SIOCSIWFRAG */ + (iw_handler) lbs_get_frag, /* SIOCGIWFRAG */ + (iw_handler) lbs_set_txpow, /* SIOCSIWTXPOW */ + (iw_handler) lbs_get_txpow, /* SIOCGIWTXPOW */ + (iw_handler) lbs_set_retry, /* SIOCSIWRETRY */ + (iw_handler) lbs_get_retry, /* SIOCGIWRETRY */ + (iw_handler) lbs_set_encode, /* SIOCSIWENCODE */ + (iw_handler) lbs_get_encode, /* SIOCGIWENCODE */ + (iw_handler) lbs_set_power, /* SIOCSIWPOWER */ + (iw_handler) lbs_get_power, /* SIOCGIWPOWER */ + (iw_handler) NULL, /* -- hole -- */ + (iw_handler) NULL, /* -- hole -- */ + (iw_handler) lbs_set_genie, /* SIOCSIWGENIE */ + (iw_handler) lbs_get_genie, /* SIOCGIWGENIE */ + (iw_handler) lbs_set_auth, /* SIOCSIWAUTH */ + (iw_handler) lbs_get_auth, /* SIOCGIWAUTH */ + (iw_handler) lbs_set_encodeext,/* SIOCSIWENCODEEXT */ + (iw_handler) lbs_get_encodeext,/* SIOCGIWENCODEEXT */ + (iw_handler) NULL, /* SIOCSIWPMKSA */ +}; + +struct iw_handler_def mesh_handler_def = { + .num_standard = ARRAY_SIZE(mesh_wlan_handler), + .standard = (iw_handler *) mesh_wlan_handler, + .get_wireless_stats = lbs_get_wireless_stats, +}; +#endif diff --git a/drivers/net/wireless/libertas/wext.h b/drivers/net/wireless/libertas/wext.h new file mode 100644 index 0000000..f3f19fe --- /dev/null +++ b/drivers/net/wireless/libertas/wext.h @@ -0,0 +1,17 @@ +/** + * This file contains definition for IOCTL call. + */ +#ifndef _LBS_WEXT_H_ +#define _LBS_WEXT_H_ + +void lbs_send_disconnect_notification(struct lbs_private *priv); +void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event); + +struct chan_freq_power *lbs_find_cfp_by_band_and_channel( + struct lbs_private *priv, + u8 band, + u16 channel); + +extern struct iw_handler_def lbs_handler_def; + +#endif diff --git a/drivers/net/wireless/libertas_tf/Makefile b/drivers/net/wireless/libertas_tf/Makefile new file mode 100644 index 0000000..ff5544d --- /dev/null +++ b/drivers/net/wireless/libertas_tf/Makefile @@ -0,0 +1,6 @@ +libertas_tf-objs := main.o cmd.o + +libertas_tf_usb-objs += if_usb.o + +obj-$(CONFIG_LIBERTAS_THINFIRM) += libertas_tf.o +obj-$(CONFIG_LIBERTAS_THINFIRM_USB) += libertas_tf_usb.o diff --git a/drivers/net/wireless/libertas_tf/cmd.c b/drivers/net/wireless/libertas_tf/cmd.c new file mode 100644 index 0000000..28790e0 --- /dev/null +++ b/drivers/net/wireless/libertas_tf/cmd.c @@ -0,0 +1,668 @@ +/* + * Copyright (C) 2008, cozybit Inc. + * Copyright (C) 2003-2006, Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ +#include "libertas_tf.h" + +static const struct channel_range channel_ranges[] = { + { LBTF_REGDOMAIN_US, 1, 12 }, + { LBTF_REGDOMAIN_CA, 1, 12 }, + { LBTF_REGDOMAIN_EU, 1, 14 }, + { LBTF_REGDOMAIN_JP, 1, 14 }, + { LBTF_REGDOMAIN_SP, 1, 14 }, + { LBTF_REGDOMAIN_FR, 1, 14 }, +}; + +static u16 lbtf_region_code_to_index[MRVDRV_MAX_REGION_CODE] = +{ + LBTF_REGDOMAIN_US, LBTF_REGDOMAIN_CA, LBTF_REGDOMAIN_EU, + LBTF_REGDOMAIN_SP, LBTF_REGDOMAIN_FR, LBTF_REGDOMAIN_JP, +}; + +static struct cmd_ctrl_node *lbtf_get_cmd_ctrl_node(struct lbtf_private *priv); + + +/** + * lbtf_cmd_copyback - Simple callback that copies response back into command + * + * @priv A pointer to struct lbtf_private structure + * @extra A pointer to the original command structure for which + * 'resp' is a response + * @resp A pointer to the command response + * + * Returns: 0 on success, error on failure + */ +int lbtf_cmd_copyback(struct lbtf_private *priv, unsigned long extra, + struct cmd_header *resp) +{ + struct cmd_header *buf = (void *)extra; + uint16_t copy_len; + + copy_len = min(le16_to_cpu(buf->size), le16_to_cpu(resp->size)); + memcpy(buf, resp, copy_len); + return 0; +} +EXPORT_SYMBOL_GPL(lbtf_cmd_copyback); + +#define CHAN_TO_IDX(chan) ((chan) - 1) + +static void lbtf_geo_init(struct lbtf_private *priv) +{ + const struct channel_range *range = channel_ranges; + u8 ch; + int i; + + for (i = 0; i < ARRAY_SIZE(channel_ranges); i++) + if (channel_ranges[i].regdomain == priv->regioncode) { + range = &channel_ranges[i]; + break; + } + + for (ch = priv->range.start; ch < priv->range.end; ch++) + priv->channels[CHAN_TO_IDX(ch)].flags = 0; +} + +/** + * lbtf_update_hw_spec: Updates the hardware details. + * + * @priv A pointer to struct lbtf_private structure + * + * Returns: 0 on success, error on failure + */ +int lbtf_update_hw_spec(struct lbtf_private *priv) +{ + struct cmd_ds_get_hw_spec cmd; + int ret = -1; + u32 i; + + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + memcpy(cmd.permanentaddr, priv->current_addr, ETH_ALEN); + ret = lbtf_cmd_with_response(priv, CMD_GET_HW_SPEC, &cmd); + if (ret) + goto out; + + priv->fwcapinfo = le32_to_cpu(cmd.fwcapinfo); + + /* The firmware release is in an interesting format: the patch + * level is in the most significant nibble ... so fix that: */ + priv->fwrelease = le32_to_cpu(cmd.fwrelease); + priv->fwrelease = (priv->fwrelease << 8) | + (priv->fwrelease >> 24 & 0xff); + + printk(KERN_INFO "libertastf: %pM, fw %u.%u.%up%u, cap 0x%08x\n", + cmd.permanentaddr, + priv->fwrelease >> 24 & 0xff, + priv->fwrelease >> 16 & 0xff, + priv->fwrelease >> 8 & 0xff, + priv->fwrelease & 0xff, + priv->fwcapinfo); + + /* Clamp region code to 8-bit since FW spec indicates that it should + * only ever be 8-bit, even though the field size is 16-bit. Some + * firmware returns non-zero high 8 bits here. + */ + priv->regioncode = le16_to_cpu(cmd.regioncode) & 0xFF; + + for (i = 0; i < MRVDRV_MAX_REGION_CODE; i++) { + /* use the region code to search for the index */ + if (priv->regioncode == lbtf_region_code_to_index[i]) + break; + } + + /* if it's unidentified region code, use the default (USA) */ + if (i >= MRVDRV_MAX_REGION_CODE) + priv->regioncode = 0x10; + + if (priv->current_addr[0] == 0xff) + memmove(priv->current_addr, cmd.permanentaddr, ETH_ALEN); + + SET_IEEE80211_PERM_ADDR(priv->hw, priv->current_addr); + + lbtf_geo_init(priv); +out: + return ret; +} + +/** + * lbtf_set_channel: Set the radio channel + * + * @priv A pointer to struct lbtf_private structure + * @channel The desired channel, or 0 to clear a locked channel + * + * Returns: 0 on success, error on failure + */ +int lbtf_set_channel(struct lbtf_private *priv, u8 channel) +{ + struct cmd_ds_802_11_rf_channel cmd; + + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + cmd.action = cpu_to_le16(CMD_OPT_802_11_RF_CHANNEL_SET); + cmd.channel = cpu_to_le16(channel); + + return lbtf_cmd_with_response(priv, CMD_802_11_RF_CHANNEL, &cmd); +} + +int lbtf_beacon_set(struct lbtf_private *priv, struct sk_buff *beacon) +{ + struct cmd_ds_802_11_beacon_set cmd; + int size; + + if (beacon->len > MRVL_MAX_BCN_SIZE) + return -1; + size = sizeof(cmd) - sizeof(cmd.beacon) + beacon->len; + cmd.hdr.size = cpu_to_le16(size); + cmd.len = cpu_to_le16(beacon->len); + memcpy(cmd.beacon, (u8 *) beacon->data, beacon->len); + + lbtf_cmd_async(priv, CMD_802_11_BEACON_SET, &cmd.hdr, size); + return 0; +} + +int lbtf_beacon_ctrl(struct lbtf_private *priv, bool beacon_enable, + int beacon_int) { + struct cmd_ds_802_11_beacon_control cmd; + + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + cmd.action = cpu_to_le16(CMD_ACT_SET); + cmd.beacon_enable = cpu_to_le16(beacon_enable); + cmd.beacon_period = cpu_to_le16(beacon_int); + + lbtf_cmd_async(priv, CMD_802_11_BEACON_CTRL, &cmd.hdr, sizeof(cmd)); + return 0; +} + +static void lbtf_queue_cmd(struct lbtf_private *priv, + struct cmd_ctrl_node *cmdnode) +{ + unsigned long flags; + + if (!cmdnode) + return; + + if (!cmdnode->cmdbuf->size) + return; + + cmdnode->result = 0; + spin_lock_irqsave(&priv->driver_lock, flags); + list_add_tail(&cmdnode->list, &priv->cmdpendingq); + spin_unlock_irqrestore(&priv->driver_lock, flags); +} + +static void lbtf_submit_command(struct lbtf_private *priv, + struct cmd_ctrl_node *cmdnode) +{ + unsigned long flags; + struct cmd_header *cmd; + uint16_t cmdsize; + uint16_t command; + int timeo = 5 * HZ; + int ret; + + cmd = cmdnode->cmdbuf; + + spin_lock_irqsave(&priv->driver_lock, flags); + priv->cur_cmd = cmdnode; + cmdsize = le16_to_cpu(cmd->size); + command = le16_to_cpu(cmd->command); + ret = priv->hw_host_to_card(priv, MVMS_CMD, (u8 *) cmd, cmdsize); + spin_unlock_irqrestore(&priv->driver_lock, flags); + + if (ret) + /* Let the timer kick in and retry, and potentially reset + the whole thing if the condition persists */ + timeo = HZ; + + /* Setup the timer after transmit command */ + mod_timer(&priv->command_timer, jiffies + timeo); +} + +/** + * This function inserts command node to cmdfreeq + * after cleans it. Requires priv->driver_lock held. + */ +static void __lbtf_cleanup_and_insert_cmd(struct lbtf_private *priv, + struct cmd_ctrl_node *cmdnode) +{ + if (!cmdnode) + return; + + cmdnode->callback = NULL; + cmdnode->callback_arg = 0; + + memset(cmdnode->cmdbuf, 0, LBS_CMD_BUFFER_SIZE); + + list_add_tail(&cmdnode->list, &priv->cmdfreeq); +} + +static void lbtf_cleanup_and_insert_cmd(struct lbtf_private *priv, + struct cmd_ctrl_node *ptempcmd) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->driver_lock, flags); + __lbtf_cleanup_and_insert_cmd(priv, ptempcmd); + spin_unlock_irqrestore(&priv->driver_lock, flags); +} + +void lbtf_complete_command(struct lbtf_private *priv, struct cmd_ctrl_node *cmd, + int result) +{ + cmd->result = result; + cmd->cmdwaitqwoken = 1; + wake_up_interruptible(&cmd->cmdwait_q); + + if (!cmd->callback) + __lbtf_cleanup_and_insert_cmd(priv, cmd); + priv->cur_cmd = NULL; +} + +int lbtf_cmd_set_mac_multicast_addr(struct lbtf_private *priv) +{ + struct cmd_ds_mac_multicast_addr cmd; + + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + cmd.action = cpu_to_le16(CMD_ACT_SET); + + cmd.nr_of_adrs = cpu_to_le16((u16) priv->nr_of_multicastmacaddr); + memcpy(cmd.maclist, priv->multicastlist, + priv->nr_of_multicastmacaddr * ETH_ALEN); + + lbtf_cmd_async(priv, CMD_MAC_MULTICAST_ADR, &cmd.hdr, sizeof(cmd)); + return 0; +} + +void lbtf_set_mode(struct lbtf_private *priv, enum lbtf_mode mode) +{ + struct cmd_ds_set_mode cmd; + + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + cmd.mode = cpu_to_le16(mode); + lbtf_cmd_async(priv, CMD_802_11_SET_MODE, &cmd.hdr, sizeof(cmd)); +} + +void lbtf_set_bssid(struct lbtf_private *priv, bool activate, const u8 *bssid) +{ + struct cmd_ds_set_bssid cmd; + + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + cmd.activate = activate ? 1 : 0; + if (activate) + memcpy(cmd.bssid, bssid, ETH_ALEN); + + lbtf_cmd_async(priv, CMD_802_11_SET_BSSID, &cmd.hdr, sizeof(cmd)); +} + +int lbtf_set_mac_address(struct lbtf_private *priv, uint8_t *mac_addr) +{ + struct cmd_ds_802_11_mac_address cmd; + + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + cmd.action = cpu_to_le16(CMD_ACT_SET); + + memcpy(cmd.macadd, mac_addr, ETH_ALEN); + + lbtf_cmd_async(priv, CMD_802_11_MAC_ADDRESS, &cmd.hdr, sizeof(cmd)); + return 0; +} + +int lbtf_set_radio_control(struct lbtf_private *priv) +{ + int ret = 0; + struct cmd_ds_802_11_radio_control cmd; + + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + cmd.action = cpu_to_le16(CMD_ACT_SET); + + switch (priv->preamble) { + case CMD_TYPE_SHORT_PREAMBLE: + cmd.control = cpu_to_le16(SET_SHORT_PREAMBLE); + break; + + case CMD_TYPE_LONG_PREAMBLE: + cmd.control = cpu_to_le16(SET_LONG_PREAMBLE); + break; + + case CMD_TYPE_AUTO_PREAMBLE: + default: + cmd.control = cpu_to_le16(SET_AUTO_PREAMBLE); + break; + } + + if (priv->radioon) + cmd.control |= cpu_to_le16(TURN_ON_RF); + else + cmd.control &= cpu_to_le16(~TURN_ON_RF); + + ret = lbtf_cmd_with_response(priv, CMD_802_11_RADIO_CONTROL, &cmd); + return ret; +} + +void lbtf_set_mac_control(struct lbtf_private *priv) +{ + struct cmd_ds_mac_control cmd; + cmd.hdr.size = cpu_to_le16(sizeof(cmd)); + cmd.action = cpu_to_le16(priv->mac_control); + cmd.reserved = 0; + + lbtf_cmd_async(priv, CMD_MAC_CONTROL, + &cmd.hdr, sizeof(cmd)); +} + +/** + * lbtf_allocate_cmd_buffer - Allocates cmd buffer, links it to free cmd queue + * + * @priv A pointer to struct lbtf_private structure + * + * Returns: 0 on success. + */ +int lbtf_allocate_cmd_buffer(struct lbtf_private *priv) +{ + u32 bufsize; + u32 i; + struct cmd_ctrl_node *cmdarray; + + /* Allocate and initialize the command array */ + bufsize = sizeof(struct cmd_ctrl_node) * LBS_NUM_CMD_BUFFERS; + cmdarray = kzalloc(bufsize, GFP_KERNEL); + if (!cmdarray) + return -1; + priv->cmd_array = cmdarray; + + /* Allocate and initialize each command buffer in the command array */ + for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) { + cmdarray[i].cmdbuf = kzalloc(LBS_CMD_BUFFER_SIZE, GFP_KERNEL); + if (!cmdarray[i].cmdbuf) + return -1; + } + + for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) { + init_waitqueue_head(&cmdarray[i].cmdwait_q); + lbtf_cleanup_and_insert_cmd(priv, &cmdarray[i]); + } + return 0; +} + +/** + * lbtf_free_cmd_buffer - Frees the cmd buffer. + * + * @priv A pointer to struct lbtf_private structure + * + * Returns: 0 + */ +int lbtf_free_cmd_buffer(struct lbtf_private *priv) +{ + struct cmd_ctrl_node *cmdarray; + unsigned int i; + + /* need to check if cmd array is allocated or not */ + if (priv->cmd_array == NULL) + return 0; + + cmdarray = priv->cmd_array; + + /* Release shared memory buffers */ + for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) { + kfree(cmdarray[i].cmdbuf); + cmdarray[i].cmdbuf = NULL; + } + + /* Release cmd_ctrl_node */ + kfree(priv->cmd_array); + priv->cmd_array = NULL; + + return 0; +} + +/** + * lbtf_get_cmd_ctrl_node - Gets free cmd node from free cmd queue. + * + * @priv A pointer to struct lbtf_private structure + * + * Returns: pointer to a struct cmd_ctrl_node or NULL if none available. + */ +static struct cmd_ctrl_node *lbtf_get_cmd_ctrl_node(struct lbtf_private *priv) +{ + struct cmd_ctrl_node *tempnode; + unsigned long flags; + + if (!priv) + return NULL; + + spin_lock_irqsave(&priv->driver_lock, flags); + + if (!list_empty(&priv->cmdfreeq)) { + tempnode = list_first_entry(&priv->cmdfreeq, + struct cmd_ctrl_node, list); + list_del(&tempnode->list); + } else + tempnode = NULL; + + spin_unlock_irqrestore(&priv->driver_lock, flags); + + return tempnode; +} + +/** + * lbtf_execute_next_command: execute next command in cmd pending queue. + * + * @priv A pointer to struct lbtf_private structure + * + * Returns: 0 on success. + */ +int lbtf_execute_next_command(struct lbtf_private *priv) +{ + struct cmd_ctrl_node *cmdnode = NULL; + struct cmd_header *cmd; + unsigned long flags; + + /* Debug group is LBS_DEB_THREAD and not LBS_DEB_HOST, because the + * only caller to us is lbtf_thread() and we get even when a + * data packet is received */ + + spin_lock_irqsave(&priv->driver_lock, flags); + + if (priv->cur_cmd) { + spin_unlock_irqrestore(&priv->driver_lock, flags); + return -1; + } + + if (!list_empty(&priv->cmdpendingq)) { + cmdnode = list_first_entry(&priv->cmdpendingq, + struct cmd_ctrl_node, list); + } + + if (cmdnode) { + cmd = cmdnode->cmdbuf; + + list_del(&cmdnode->list); + spin_unlock_irqrestore(&priv->driver_lock, flags); + lbtf_submit_command(priv, cmdnode); + } else + spin_unlock_irqrestore(&priv->driver_lock, flags); + return 0; +} + +static struct cmd_ctrl_node *__lbtf_cmd_async(struct lbtf_private *priv, + uint16_t command, struct cmd_header *in_cmd, int in_cmd_size, + int (*callback)(struct lbtf_private *, unsigned long, + struct cmd_header *), + unsigned long callback_arg) +{ + struct cmd_ctrl_node *cmdnode; + + if (priv->surpriseremoved) + return ERR_PTR(-ENOENT); + + cmdnode = lbtf_get_cmd_ctrl_node(priv); + if (cmdnode == NULL) { + /* Wake up main thread to execute next command */ + queue_work(lbtf_wq, &priv->cmd_work); + return ERR_PTR(-ENOBUFS); + } + + cmdnode->callback = callback; + cmdnode->callback_arg = callback_arg; + + /* Copy the incoming command to the buffer */ + memcpy(cmdnode->cmdbuf, in_cmd, in_cmd_size); + + /* Set sequence number, clean result, move to buffer */ + priv->seqnum++; + cmdnode->cmdbuf->command = cpu_to_le16(command); + cmdnode->cmdbuf->size = cpu_to_le16(in_cmd_size); + cmdnode->cmdbuf->seqnum = cpu_to_le16(priv->seqnum); + cmdnode->cmdbuf->result = 0; + cmdnode->cmdwaitqwoken = 0; + lbtf_queue_cmd(priv, cmdnode); + queue_work(lbtf_wq, &priv->cmd_work); + + return cmdnode; +} + +void lbtf_cmd_async(struct lbtf_private *priv, uint16_t command, + struct cmd_header *in_cmd, int in_cmd_size) +{ + __lbtf_cmd_async(priv, command, in_cmd, in_cmd_size, NULL, 0); +} + +int __lbtf_cmd(struct lbtf_private *priv, uint16_t command, + struct cmd_header *in_cmd, int in_cmd_size, + int (*callback)(struct lbtf_private *, + unsigned long, struct cmd_header *), + unsigned long callback_arg) +{ + struct cmd_ctrl_node *cmdnode; + unsigned long flags; + int ret = 0; + + cmdnode = __lbtf_cmd_async(priv, command, in_cmd, in_cmd_size, + callback, callback_arg); + if (IS_ERR(cmdnode)) + return PTR_ERR(cmdnode); + + might_sleep(); + ret = wait_event_interruptible(cmdnode->cmdwait_q, + cmdnode->cmdwaitqwoken); + if (ret) { + printk(KERN_DEBUG + "libertastf: command 0x%04x interrupted by signal", + command); + return ret; + } + + spin_lock_irqsave(&priv->driver_lock, flags); + ret = cmdnode->result; + if (ret) + printk(KERN_DEBUG "libertastf: command 0x%04x failed: %d\n", + command, ret); + + __lbtf_cleanup_and_insert_cmd(priv, cmdnode); + spin_unlock_irqrestore(&priv->driver_lock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(__lbtf_cmd); + +/* Call holding driver_lock */ +void lbtf_cmd_response_rx(struct lbtf_private *priv) +{ + priv->cmd_response_rxed = 1; + queue_work(lbtf_wq, &priv->cmd_work); +} +EXPORT_SYMBOL_GPL(lbtf_cmd_response_rx); + +int lbtf_process_rx_command(struct lbtf_private *priv) +{ + uint16_t respcmd, curcmd; + struct cmd_header *resp; + int ret = 0; + unsigned long flags; + uint16_t result; + + mutex_lock(&priv->lock); + spin_lock_irqsave(&priv->driver_lock, flags); + + if (!priv->cur_cmd) { + ret = -1; + spin_unlock_irqrestore(&priv->driver_lock, flags); + goto done; + } + + resp = (void *)priv->cmd_resp_buff; + curcmd = le16_to_cpu(priv->cur_cmd->cmdbuf->command); + respcmd = le16_to_cpu(resp->command); + result = le16_to_cpu(resp->result); + + if (net_ratelimit()) + printk(KERN_DEBUG "libertastf: cmd response 0x%04x, seq %d, size %d\n", + respcmd, le16_to_cpu(resp->seqnum), + le16_to_cpu(resp->size)); + + if (resp->seqnum != priv->cur_cmd->cmdbuf->seqnum) { + spin_unlock_irqrestore(&priv->driver_lock, flags); + ret = -1; + goto done; + } + if (respcmd != CMD_RET(curcmd)) { + spin_unlock_irqrestore(&priv->driver_lock, flags); + ret = -1; + goto done; + } + + if (resp->result == cpu_to_le16(0x0004)) { + /* 0x0004 means -EAGAIN. Drop the response, let it time out + and be resubmitted */ + spin_unlock_irqrestore(&priv->driver_lock, flags); + ret = -1; + goto done; + } + + /* Now we got response from FW, cancel the command timer */ + del_timer(&priv->command_timer); + priv->cmd_timed_out = 0; + if (priv->nr_retries) + priv->nr_retries = 0; + + /* If the command is not successful, cleanup and return failure */ + if ((result != 0 || !(respcmd & 0x8000))) { + /* + * Handling errors here + */ + switch (respcmd) { + case CMD_RET(CMD_GET_HW_SPEC): + case CMD_RET(CMD_802_11_RESET): + printk(KERN_DEBUG "libertastf: reset failed\n"); + break; + + } + lbtf_complete_command(priv, priv->cur_cmd, result); + spin_unlock_irqrestore(&priv->driver_lock, flags); + + ret = -1; + goto done; + } + + spin_unlock_irqrestore(&priv->driver_lock, flags); + + if (priv->cur_cmd && priv->cur_cmd->callback) { + ret = priv->cur_cmd->callback(priv, priv->cur_cmd->callback_arg, + resp); + } + spin_lock_irqsave(&priv->driver_lock, flags); + + if (priv->cur_cmd) { + /* Clean up and Put current command back to cmdfreeq */ + lbtf_complete_command(priv, priv->cur_cmd, result); + } + spin_unlock_irqrestore(&priv->driver_lock, flags); + +done: + mutex_unlock(&priv->lock); + return ret; +} diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c new file mode 100644 index 0000000..3691c30 --- /dev/null +++ b/drivers/net/wireless/libertas_tf/if_usb.c @@ -0,0 +1,767 @@ +/* + * Copyright (C) 2008, cozybit Inc. + * Copyright (C) 2003-2006, Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ +#include +#include +#include +#include +#include + +#define DRV_NAME "lbtf_usb" + +#include "libertas_tf.h" +#include "if_usb.h" + +#define MESSAGE_HEADER_LEN 4 + +static char *lbtf_fw_name = "lbtf_usb.bin"; +module_param_named(fw_name, lbtf_fw_name, charp, 0644); + +MODULE_FIRMWARE("lbtf_usb.bin"); + +static struct usb_device_id if_usb_table[] = { + /* Enter the device signature inside */ + { USB_DEVICE(0x1286, 0x2001) }, + { USB_DEVICE(0x05a3, 0x8388) }, + {} /* Terminating entry */ +}; + +MODULE_DEVICE_TABLE(usb, if_usb_table); + +static void if_usb_receive(struct urb *urb); +static void if_usb_receive_fwload(struct urb *urb); +static int if_usb_prog_firmware(struct if_usb_card *cardp); +static int if_usb_host_to_card(struct lbtf_private *priv, uint8_t type, + uint8_t *payload, uint16_t nb); +static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload, + uint16_t nb, u8 data); +static void if_usb_free(struct if_usb_card *cardp); +static int if_usb_submit_rx_urb(struct if_usb_card *cardp); +static int if_usb_reset_device(struct if_usb_card *cardp); + +/** + * if_usb_wrike_bulk_callback - call back to handle URB status + * + * @param urb pointer to urb structure + */ +static void if_usb_write_bulk_callback(struct urb *urb) +{ + if (urb->status != 0) + printk(KERN_INFO "libertastf: URB in failure status: %d\n", + urb->status); +} + +/** + * if_usb_free - free tx/rx urb, skb and rx buffer + * + * @param cardp pointer if_usb_card + */ +static void if_usb_free(struct if_usb_card *cardp) +{ + /* Unlink tx & rx urb */ + usb_kill_urb(cardp->tx_urb); + usb_kill_urb(cardp->rx_urb); + usb_kill_urb(cardp->cmd_urb); + + usb_free_urb(cardp->tx_urb); + cardp->tx_urb = NULL; + + usb_free_urb(cardp->rx_urb); + cardp->rx_urb = NULL; + + usb_free_urb(cardp->cmd_urb); + cardp->cmd_urb = NULL; + + kfree(cardp->ep_out_buf); + cardp->ep_out_buf = NULL; +} + +static void if_usb_setup_firmware(struct lbtf_private *priv) +{ + struct if_usb_card *cardp = priv->card; + struct cmd_ds_set_boot2_ver b2_cmd; + + if_usb_submit_rx_urb(cardp); + b2_cmd.hdr.size = cpu_to_le16(sizeof(b2_cmd)); + b2_cmd.action = 0; + b2_cmd.version = cardp->boot2_version; + + if (lbtf_cmd_with_response(priv, CMD_SET_BOOT2_VER, &b2_cmd)) + printk(KERN_INFO "libertastf: setting boot2 version failed\n"); +} + +static void if_usb_fw_timeo(unsigned long priv) +{ + struct if_usb_card *cardp = (void *)priv; + + if (!cardp->fwdnldover) + /* Download timed out */ + cardp->priv->surpriseremoved = 1; + wake_up(&cardp->fw_wq); +} + +/** + * if_usb_probe - sets the configuration values + * + * @ifnum interface number + * @id pointer to usb_device_id + * + * Returns: 0 on success, error code on failure + */ +static int if_usb_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct usb_device *udev; + struct usb_host_interface *iface_desc; + struct usb_endpoint_descriptor *endpoint; + struct lbtf_private *priv; + struct if_usb_card *cardp; + int i; + + udev = interface_to_usbdev(intf); + + cardp = kzalloc(sizeof(struct if_usb_card), GFP_KERNEL); + if (!cardp) + goto error; + + setup_timer(&cardp->fw_timeout, if_usb_fw_timeo, (unsigned long)cardp); + init_waitqueue_head(&cardp->fw_wq); + + cardp->udev = udev; + iface_desc = intf->cur_altsetting; + + for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { + endpoint = &iface_desc->endpoint[i].desc; + if (usb_endpoint_is_bulk_in(endpoint)) { + cardp->ep_in_size = + le16_to_cpu(endpoint->wMaxPacketSize); + cardp->ep_in = usb_endpoint_num(endpoint); + } else if (usb_endpoint_is_bulk_out(endpoint)) { + cardp->ep_out_size = + le16_to_cpu(endpoint->wMaxPacketSize); + cardp->ep_out = usb_endpoint_num(endpoint); + } + } + if (!cardp->ep_out_size || !cardp->ep_in_size) + /* Endpoints not found */ + goto dealloc; + + cardp->rx_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!cardp->rx_urb) + goto dealloc; + + cardp->tx_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!cardp->tx_urb) + goto dealloc; + + cardp->cmd_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!cardp->cmd_urb) + goto dealloc; + + cardp->ep_out_buf = kmalloc(MRVDRV_ETH_TX_PACKET_BUFFER_SIZE, + GFP_KERNEL); + if (!cardp->ep_out_buf) + goto dealloc; + + priv = lbtf_add_card(cardp, &udev->dev); + if (!priv) + goto dealloc; + + cardp->priv = priv; + + priv->hw_host_to_card = if_usb_host_to_card; + priv->hw_prog_firmware = if_usb_prog_firmware; + priv->hw_reset_device = if_usb_reset_device; + cardp->boot2_version = udev->descriptor.bcdDevice; + + usb_get_dev(udev); + usb_set_intfdata(intf, cardp); + + return 0; + +dealloc: + if_usb_free(cardp); +error: + return -ENOMEM; +} + +/** + * if_usb_disconnect - free resource and cleanup + * + * @intf USB interface structure + */ +static void if_usb_disconnect(struct usb_interface *intf) +{ + struct if_usb_card *cardp = usb_get_intfdata(intf); + struct lbtf_private *priv = (struct lbtf_private *) cardp->priv; + + if_usb_reset_device(cardp); + + if (priv) + lbtf_remove_card(priv); + + /* Unlink and free urb */ + if_usb_free(cardp); + + usb_set_intfdata(intf, NULL); + usb_put_dev(interface_to_usbdev(intf)); +} + +/** + * if_usb_send_fw_pkt - This function downloads the FW + * + * @priv pointer to struct lbtf_private + * + * Returns: 0 + */ +static int if_usb_send_fw_pkt(struct if_usb_card *cardp) +{ + struct fwdata *fwdata = cardp->ep_out_buf; + u8 *firmware = (u8 *) cardp->fw->data; + + /* If we got a CRC failure on the last block, back + up and retry it */ + if (!cardp->CRC_OK) { + cardp->totalbytes = cardp->fwlastblksent; + cardp->fwseqnum--; + } + + /* struct fwdata (which we sent to the card) has an + extra __le32 field in between the header and the data, + which is not in the struct fwheader in the actual + firmware binary. Insert the seqnum in the middle... */ + memcpy(&fwdata->hdr, &firmware[cardp->totalbytes], + sizeof(struct fwheader)); + + cardp->fwlastblksent = cardp->totalbytes; + cardp->totalbytes += sizeof(struct fwheader); + + memcpy(fwdata->data, &firmware[cardp->totalbytes], + le32_to_cpu(fwdata->hdr.datalength)); + + fwdata->seqnum = cpu_to_le32(++cardp->fwseqnum); + cardp->totalbytes += le32_to_cpu(fwdata->hdr.datalength); + + usb_tx_block(cardp, cardp->ep_out_buf, sizeof(struct fwdata) + + le32_to_cpu(fwdata->hdr.datalength), 0); + + if (fwdata->hdr.dnldcmd == cpu_to_le32(FW_HAS_LAST_BLOCK)) + /* Host has finished FW downloading + * Donwloading FW JUMP BLOCK + */ + cardp->fwfinalblk = 1; + + return 0; +} + +static int if_usb_reset_device(struct if_usb_card *cardp) +{ + struct cmd_ds_802_11_reset *cmd = cardp->ep_out_buf + 4; + int ret; + + *(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_REQUEST); + + cmd->hdr.command = cpu_to_le16(CMD_802_11_RESET); + cmd->hdr.size = cpu_to_le16(sizeof(struct cmd_ds_802_11_reset)); + cmd->hdr.result = cpu_to_le16(0); + cmd->hdr.seqnum = cpu_to_le16(0x5a5a); + cmd->action = cpu_to_le16(CMD_ACT_HALT); + usb_tx_block(cardp, cardp->ep_out_buf, + 4 + sizeof(struct cmd_ds_802_11_reset), 0); + + msleep(100); + ret = usb_reset_device(cardp->udev); + msleep(100); + + return ret; +} +EXPORT_SYMBOL_GPL(if_usb_reset_device); + +/** + * usb_tx_block - transfer data to the device + * + * @priv pointer to struct lbtf_private + * @payload pointer to payload data + * @nb data length + * @data non-zero for data, zero for commands + * + * Returns: 0 on success, nonzero otherwise. + */ +static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload, + uint16_t nb, u8 data) +{ + struct urb *urb; + + /* check if device is removed */ + if (cardp->priv->surpriseremoved) + return -1; + + if (data) + urb = cardp->tx_urb; + else + urb = cardp->cmd_urb; + + usb_fill_bulk_urb(urb, cardp->udev, + usb_sndbulkpipe(cardp->udev, + cardp->ep_out), + payload, nb, if_usb_write_bulk_callback, cardp); + + urb->transfer_flags |= URB_ZERO_PACKET; + + if (usb_submit_urb(urb, GFP_ATOMIC)) + return -1; + return 0; +} + +static int __if_usb_submit_rx_urb(struct if_usb_card *cardp, + void (*callbackfn)(struct urb *urb)) +{ + struct sk_buff *skb; + + skb = dev_alloc_skb(MRVDRV_ETH_RX_PACKET_BUFFER_SIZE); + if (!skb) + return -1; + + cardp->rx_skb = skb; + + /* Fill the receive configuration URB and initialise the Rx call back */ + usb_fill_bulk_urb(cardp->rx_urb, cardp->udev, + usb_rcvbulkpipe(cardp->udev, cardp->ep_in), + skb_tail_pointer(skb), + MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn, cardp); + + cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET; + + if (usb_submit_urb(cardp->rx_urb, GFP_ATOMIC)) { + kfree_skb(skb); + cardp->rx_skb = NULL; + return -1; + } else + return 0; +} + +static int if_usb_submit_rx_urb_fwload(struct if_usb_card *cardp) +{ + return __if_usb_submit_rx_urb(cardp, &if_usb_receive_fwload); +} + +static int if_usb_submit_rx_urb(struct if_usb_card *cardp) +{ + return __if_usb_submit_rx_urb(cardp, &if_usb_receive); +} + +static void if_usb_receive_fwload(struct urb *urb) +{ + struct if_usb_card *cardp = urb->context; + struct sk_buff *skb = cardp->rx_skb; + struct fwsyncheader *syncfwheader; + struct bootcmdresp bcmdresp; + + if (urb->status) { + kfree_skb(skb); + return; + } + + if (cardp->fwdnldover) { + __le32 *tmp = (__le32 *)(skb->data); + + if (tmp[0] == cpu_to_le32(CMD_TYPE_INDICATION) && + tmp[1] == cpu_to_le32(MACREG_INT_CODE_FIRMWARE_READY)) + /* Firmware ready event received */ + wake_up(&cardp->fw_wq); + else + if_usb_submit_rx_urb_fwload(cardp); + kfree_skb(skb); + return; + } + if (cardp->bootcmdresp <= 0) { + memcpy(&bcmdresp, skb->data, sizeof(bcmdresp)); + + if (le16_to_cpu(cardp->udev->descriptor.bcdDevice) < 0x3106) { + kfree_skb(skb); + if_usb_submit_rx_urb_fwload(cardp); + cardp->bootcmdresp = 1; + /* Received valid boot command response */ + return; + } + if (bcmdresp.magic != cpu_to_le32(BOOT_CMD_MAGIC_NUMBER)) { + if (bcmdresp.magic == cpu_to_le32(CMD_TYPE_REQUEST) || + bcmdresp.magic == cpu_to_le32(CMD_TYPE_DATA) || + bcmdresp.magic == cpu_to_le32(CMD_TYPE_INDICATION)) + cardp->bootcmdresp = -1; + } else if (bcmdresp.cmd == BOOT_CMD_FW_BY_USB && + bcmdresp.result == BOOT_CMD_RESP_OK) + cardp->bootcmdresp = 1; + + kfree_skb(skb); + if_usb_submit_rx_urb_fwload(cardp); + return; + } + + syncfwheader = kmalloc(sizeof(struct fwsyncheader), GFP_ATOMIC); + if (!syncfwheader) { + kfree_skb(skb); + return; + } + + memcpy(syncfwheader, skb->data, sizeof(struct fwsyncheader)); + + if (!syncfwheader->cmd) + cardp->CRC_OK = 1; + else + cardp->CRC_OK = 0; + kfree_skb(skb); + + /* reschedule timer for 200ms hence */ + mod_timer(&cardp->fw_timeout, jiffies + (HZ/5)); + + if (cardp->fwfinalblk) { + cardp->fwdnldover = 1; + goto exit; + } + + if_usb_send_fw_pkt(cardp); + + exit: + if_usb_submit_rx_urb_fwload(cardp); + + kfree(syncfwheader); + + return; +} + +#define MRVDRV_MIN_PKT_LEN 30 + +static inline void process_cmdtypedata(int recvlength, struct sk_buff *skb, + struct if_usb_card *cardp, + struct lbtf_private *priv) +{ + if (recvlength > MRVDRV_ETH_RX_PACKET_BUFFER_SIZE + MESSAGE_HEADER_LEN + || recvlength < MRVDRV_MIN_PKT_LEN) { + kfree_skb(skb); + return; + } + + skb_put(skb, recvlength); + skb_pull(skb, MESSAGE_HEADER_LEN); + lbtf_rx(priv, skb); +} + +static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff, + struct sk_buff *skb, + struct if_usb_card *cardp, + struct lbtf_private *priv) +{ + if (recvlength > LBS_CMD_BUFFER_SIZE) { + kfree_skb(skb); + return; + } + + BUG_ON(!in_interrupt()); + + spin_lock(&priv->driver_lock); + memcpy(priv->cmd_resp_buff, recvbuff + MESSAGE_HEADER_LEN, + recvlength - MESSAGE_HEADER_LEN); + kfree_skb(skb); + lbtf_cmd_response_rx(priv); + spin_unlock(&priv->driver_lock); +} + +/** + * if_usb_receive - read data received from the device. + * + * @urb pointer to struct urb + */ +static void if_usb_receive(struct urb *urb) +{ + struct if_usb_card *cardp = urb->context; + struct sk_buff *skb = cardp->rx_skb; + struct lbtf_private *priv = cardp->priv; + int recvlength = urb->actual_length; + uint8_t *recvbuff = NULL; + uint32_t recvtype = 0; + __le32 *pkt = (__le32 *) skb->data; + + if (recvlength) { + if (urb->status) { + kfree_skb(skb); + goto setup_for_next; + } + + recvbuff = skb->data; + recvtype = le32_to_cpu(pkt[0]); + } else if (urb->status) { + kfree_skb(skb); + return; + } + + switch (recvtype) { + case CMD_TYPE_DATA: + process_cmdtypedata(recvlength, skb, cardp, priv); + break; + + case CMD_TYPE_REQUEST: + process_cmdrequest(recvlength, recvbuff, skb, cardp, priv); + break; + + case CMD_TYPE_INDICATION: + { + /* Event cause handling */ + u32 event_cause = le32_to_cpu(pkt[1]); + + /* Icky undocumented magic special case */ + if (event_cause & 0xffff0000) { + u16 tmp; + u8 retrycnt; + u8 failure; + + tmp = event_cause >> 16; + retrycnt = tmp & 0x00ff; + failure = (tmp & 0xff00) >> 8; + lbtf_send_tx_feedback(priv, retrycnt, failure); + } else if (event_cause == LBTF_EVENT_BCN_SENT) + lbtf_bcn_sent(priv); + else + printk(KERN_DEBUG + "Unsupported notification %d received\n", + event_cause); + kfree_skb(skb); + break; + } + default: + printk(KERN_DEBUG "libertastf: unknown command type 0x%X\n", + recvtype); + kfree_skb(skb); + break; + } + +setup_for_next: + if_usb_submit_rx_urb(cardp); +} + +/** + * if_usb_host_to_card - Download data to the device + * + * @priv pointer to struct lbtf_private structure + * @type type of data + * @buf pointer to data buffer + * @len number of bytes + * + * Returns: 0 on success, nonzero otherwise + */ +static int if_usb_host_to_card(struct lbtf_private *priv, uint8_t type, + uint8_t *payload, uint16_t nb) +{ + struct if_usb_card *cardp = priv->card; + u8 data = 0; + + if (type == MVMS_CMD) { + *(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_REQUEST); + } else { + *(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_DATA); + data = 1; + } + + memcpy((cardp->ep_out_buf + MESSAGE_HEADER_LEN), payload, nb); + + return usb_tx_block(cardp, cardp->ep_out_buf, nb + MESSAGE_HEADER_LEN, + data); +} + +/** + * if_usb_issue_boot_command - Issue boot command to Boot2. + * + * @ivalue 1 boots from FW by USB-Download, 2 boots from FW in EEPROM. + * + * Returns: 0 + */ +static int if_usb_issue_boot_command(struct if_usb_card *cardp, int ivalue) +{ + struct bootcmd *bootcmd = cardp->ep_out_buf; + + /* Prepare command */ + bootcmd->magic = cpu_to_le32(BOOT_CMD_MAGIC_NUMBER); + bootcmd->cmd = ivalue; + memset(bootcmd->pad, 0, sizeof(bootcmd->pad)); + + /* Issue command */ + usb_tx_block(cardp, cardp->ep_out_buf, sizeof(*bootcmd), 0); + + return 0; +} + + +/** + * check_fwfile_format - Check the validity of Boot2/FW image. + * + * @data pointer to image + * @totlen image length + * + * Returns: 0 if the image is valid, nonzero otherwise. + */ +static int check_fwfile_format(const u8 *data, u32 totlen) +{ + u32 bincmd, exit; + u32 blksize, offset, len; + int ret; + + ret = 1; + exit = len = 0; + + do { + struct fwheader *fwh = (void *) data; + + bincmd = le32_to_cpu(fwh->dnldcmd); + blksize = le32_to_cpu(fwh->datalength); + switch (bincmd) { + case FW_HAS_DATA_TO_RECV: + offset = sizeof(struct fwheader) + blksize; + data += offset; + len += offset; + if (len >= totlen) + exit = 1; + break; + case FW_HAS_LAST_BLOCK: + exit = 1; + ret = 0; + break; + default: + exit = 1; + break; + } + } while (!exit); + + if (ret) + printk(KERN_INFO + "libertastf: firmware file format check failed\n"); + return ret; +} + + +static int if_usb_prog_firmware(struct if_usb_card *cardp) +{ + int i = 0; + static int reset_count = 10; + int ret = 0; + + ret = request_firmware(&cardp->fw, lbtf_fw_name, &cardp->udev->dev); + if (ret < 0) { + printk(KERN_INFO "libertastf: firmware %s not found\n", + lbtf_fw_name); + goto done; + } + + if (check_fwfile_format(cardp->fw->data, cardp->fw->size)) + goto release_fw; + +restart: + if (if_usb_submit_rx_urb_fwload(cardp) < 0) { + ret = -1; + goto release_fw; + } + + cardp->bootcmdresp = 0; + do { + int j = 0; + i++; + /* Issue Boot command = 1, Boot from Download-FW */ + if_usb_issue_boot_command(cardp, BOOT_CMD_FW_BY_USB); + /* wait for command response */ + do { + j++; + msleep_interruptible(100); + } while (cardp->bootcmdresp == 0 && j < 10); + } while (cardp->bootcmdresp == 0 && i < 5); + + if (cardp->bootcmdresp <= 0) { + if (--reset_count >= 0) { + if_usb_reset_device(cardp); + goto restart; + } + return -1; + } + + i = 0; + + cardp->totalbytes = 0; + cardp->fwlastblksent = 0; + cardp->CRC_OK = 1; + cardp->fwdnldover = 0; + cardp->fwseqnum = -1; + cardp->totalbytes = 0; + cardp->fwfinalblk = 0; + + /* Send the first firmware packet... */ + if_usb_send_fw_pkt(cardp); + + /* ... and wait for the process to complete */ + wait_event_interruptible(cardp->fw_wq, cardp->priv->surpriseremoved || + cardp->fwdnldover); + + del_timer_sync(&cardp->fw_timeout); + usb_kill_urb(cardp->rx_urb); + + if (!cardp->fwdnldover) { + printk(KERN_INFO "libertastf: failed to load fw," + " resetting device!\n"); + if (--reset_count >= 0) { + if_usb_reset_device(cardp); + goto restart; + } + + printk(KERN_INFO "libertastf: fw download failure\n"); + ret = -1; + goto release_fw; + } + + cardp->priv->fw_ready = 1; + + release_fw: + release_firmware(cardp->fw); + cardp->fw = NULL; + + if_usb_setup_firmware(cardp->priv); + + done: + return ret; +} +EXPORT_SYMBOL_GPL(if_usb_prog_firmware); + + +#define if_usb_suspend NULL +#define if_usb_resume NULL + +static struct usb_driver if_usb_driver = { + .name = DRV_NAME, + .probe = if_usb_probe, + .disconnect = if_usb_disconnect, + .id_table = if_usb_table, + .suspend = if_usb_suspend, + .resume = if_usb_resume, +}; + +static int __init if_usb_init_module(void) +{ + int ret = 0; + + ret = usb_register(&if_usb_driver); + return ret; +} + +static void __exit if_usb_exit_module(void) +{ + usb_deregister(&if_usb_driver); +} + +module_init(if_usb_init_module); +module_exit(if_usb_exit_module); + +MODULE_DESCRIPTION("8388 USB WLAN Thinfirm Driver"); +MODULE_AUTHOR("Cozybit Inc."); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/libertas_tf/if_usb.h b/drivers/net/wireless/libertas_tf/if_usb.h new file mode 100644 index 0000000..6fa5b3f --- /dev/null +++ b/drivers/net/wireless/libertas_tf/if_usb.h @@ -0,0 +1,98 @@ +/* + * Copyright (C) 2008, cozybit Inc. + * Copyright (C) 2003-2006, Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ +#include +#include + +struct lbtf_private; + +/** + * This file contains definition for USB interface. + */ +#define CMD_TYPE_REQUEST 0xF00DFACE +#define CMD_TYPE_DATA 0xBEADC0DE +#define CMD_TYPE_INDICATION 0xBEEFFACE + +#define BOOT_CMD_FW_BY_USB 0x01 +#define BOOT_CMD_FW_IN_EEPROM 0x02 +#define BOOT_CMD_UPDATE_BOOT2 0x03 +#define BOOT_CMD_UPDATE_FW 0x04 +#define BOOT_CMD_MAGIC_NUMBER 0x4C56524D /* LVRM */ + +struct bootcmd { + __le32 magic; + uint8_t cmd; + uint8_t pad[11]; +}; + +#define BOOT_CMD_RESP_OK 0x0001 +#define BOOT_CMD_RESP_FAIL 0x0000 + +struct bootcmdresp { + __le32 magic; + uint8_t cmd; + uint8_t result; + uint8_t pad[2]; +}; + +/** USB card description structure*/ +struct if_usb_card { + struct usb_device *udev; + struct urb *rx_urb, *tx_urb, *cmd_urb; + struct lbtf_private *priv; + + struct sk_buff *rx_skb; + + uint8_t ep_in; + uint8_t ep_out; + + int8_t bootcmdresp; + + int ep_in_size; + + void *ep_out_buf; + int ep_out_size; + + const struct firmware *fw; + struct timer_list fw_timeout; + wait_queue_head_t fw_wq; + uint32_t fwseqnum; + uint32_t totalbytes; + uint32_t fwlastblksent; + uint8_t CRC_OK; + uint8_t fwdnldover; + uint8_t fwfinalblk; + + __le16 boot2_version; +}; + +/** fwheader */ +struct fwheader { + __le32 dnldcmd; + __le32 baseaddr; + __le32 datalength; + __le32 CRC; +}; + +#define FW_MAX_DATA_BLK_SIZE 600 +/** FWData */ +struct fwdata { + struct fwheader hdr; + __le32 seqnum; + uint8_t data[0]; +}; + +/** fwsyncheader */ +struct fwsyncheader { + __le32 cmd; + __le32 seqnum; +}; + +#define FW_HAS_DATA_TO_RECV 0x00000001 +#define FW_HAS_LAST_BLOCK 0x00000004 diff --git a/drivers/net/wireless/libertas_tf/libertas_tf.h b/drivers/net/wireless/libertas_tf/libertas_tf.h new file mode 100644 index 0000000..4cc42dd --- /dev/null +++ b/drivers/net/wireless/libertas_tf/libertas_tf.h @@ -0,0 +1,514 @@ +/* + * Copyright (C) 2008, cozybit Inc. + * Copyright (C) 2007, Red Hat, Inc. + * Copyright (C) 2003-2006, Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ +#include +#include +#include +#include + +#ifndef DRV_NAME +#define DRV_NAME "libertas_tf" +#endif + +#define MRVL_DEFAULT_RETRIES 9 +#define MRVL_PER_PACKET_RATE 0x10 +#define MRVL_MAX_BCN_SIZE 440 +#define CMD_OPTION_WAITFORRSP 0x0002 + +/* Return command are almost always the same as the host command, but with + * bit 15 set high. There are a few exceptions, though... + */ +#define CMD_RET(cmd) (0x8000 | cmd) + +/* Command codes */ +#define CMD_GET_HW_SPEC 0x0003 +#define CMD_802_11_RESET 0x0005 +#define CMD_MAC_MULTICAST_ADR 0x0010 +#define CMD_802_11_RADIO_CONTROL 0x001c +#define CMD_802_11_RF_CHANNEL 0x001d +#define CMD_802_11_RF_TX_POWER 0x001e +#define CMD_MAC_CONTROL 0x0028 +#define CMD_802_11_MAC_ADDRESS 0x004d +#define CMD_SET_BOOT2_VER 0x00a5 +#define CMD_802_11_BEACON_CTRL 0x00b0 +#define CMD_802_11_BEACON_SET 0x00cb +#define CMD_802_11_SET_MODE 0x00cc +#define CMD_802_11_SET_BSSID 0x00cd + +#define CMD_ACT_GET 0x0000 +#define CMD_ACT_SET 0x0001 + +/* Define action or option for CMD_802_11_RESET */ +#define CMD_ACT_HALT 0x0003 + +/* Define action or option for CMD_MAC_CONTROL */ +#define CMD_ACT_MAC_RX_ON 0x0001 +#define CMD_ACT_MAC_TX_ON 0x0002 +#define CMD_ACT_MAC_MULTICAST_ENABLE 0x0020 +#define CMD_ACT_MAC_BROADCAST_ENABLE 0x0040 +#define CMD_ACT_MAC_PROMISCUOUS_ENABLE 0x0080 +#define CMD_ACT_MAC_ALL_MULTICAST_ENABLE 0x0100 + +/* Define action or option for CMD_802_11_RADIO_CONTROL */ +#define CMD_TYPE_AUTO_PREAMBLE 0x0001 +#define CMD_TYPE_SHORT_PREAMBLE 0x0002 +#define CMD_TYPE_LONG_PREAMBLE 0x0003 + +#define TURN_ON_RF 0x01 +#define RADIO_ON 0x01 +#define RADIO_OFF 0x00 + +#define SET_AUTO_PREAMBLE 0x05 +#define SET_SHORT_PREAMBLE 0x03 +#define SET_LONG_PREAMBLE 0x01 + +/* Define action or option for CMD_802_11_RF_CHANNEL */ +#define CMD_OPT_802_11_RF_CHANNEL_GET 0x00 +#define CMD_OPT_802_11_RF_CHANNEL_SET 0x01 + +/* Codes for CMD_802_11_SET_MODE */ +enum lbtf_mode { + LBTF_PASSIVE_MODE, + LBTF_STA_MODE, + LBTF_AP_MODE, +}; + +/** Card Event definition */ +#define MACREG_INT_CODE_FIRMWARE_READY 48 +/** Buffer Constants */ + +/* The size of SQ memory PPA, DPA are 8 DWORDs, that keep the physical +* addresses of TxPD buffers. Station has only 8 TxPD available, Whereas +* driver has more local TxPDs. Each TxPD on the host memory is associated +* with a Tx control node. The driver maintains 8 RxPD descriptors for +* station firmware to store Rx packet information. +* +* Current version of MAC has a 32x6 multicast address buffer. +* +* 802.11b can have up to 14 channels, the driver keeps the +* BSSID(MAC address) of each APs or Ad hoc stations it has sensed. +*/ + +#define MRVDRV_MAX_MULTICAST_LIST_SIZE 32 +#define LBS_NUM_CMD_BUFFERS 10 +#define LBS_CMD_BUFFER_SIZE (2 * 1024) +#define MRVDRV_MAX_CHANNEL_SIZE 14 +#define MRVDRV_SNAP_HEADER_LEN 8 + +#define LBS_UPLD_SIZE 2312 +#define DEV_NAME_LEN 32 + +/** Misc constants */ +/* This section defines 802.11 specific contants */ + +#define MRVDRV_MAX_REGION_CODE 6 +/** + * the table to keep region code + */ +#define LBTF_REGDOMAIN_US 0x10 +#define LBTF_REGDOMAIN_CA 0x20 +#define LBTF_REGDOMAIN_EU 0x30 +#define LBTF_REGDOMAIN_SP 0x31 +#define LBTF_REGDOMAIN_FR 0x32 +#define LBTF_REGDOMAIN_JP 0x40 + +#define SBI_EVENT_CAUSE_SHIFT 3 + +/** RxPD status */ + +#define MRVDRV_RXPD_STATUS_OK 0x0001 + + +/* This is for firmware specific length */ +#define EXTRA_LEN 36 + +#define MRVDRV_ETH_TX_PACKET_BUFFER_SIZE \ + (ETH_FRAME_LEN + sizeof(struct txpd) + EXTRA_LEN) + +#define MRVDRV_ETH_RX_PACKET_BUFFER_SIZE \ + (ETH_FRAME_LEN + sizeof(struct rxpd) \ + + MRVDRV_SNAP_HEADER_LEN + EXTRA_LEN) + +#define CMD_F_HOSTCMD (1 << 0) +#define FW_CAPINFO_WPA (1 << 0) + +#define RF_ANTENNA_1 0x1 +#define RF_ANTENNA_2 0x2 +#define RF_ANTENNA_AUTO 0xFFFF + +#define LBTF_EVENT_BCN_SENT 55 + +/** Global Variable Declaration */ +/** mv_ms_type */ +enum mv_ms_type { + MVMS_DAT = 0, + MVMS_CMD = 1, + MVMS_TXDONE = 2, + MVMS_EVENT +}; + +extern struct workqueue_struct *lbtf_wq; + +struct lbtf_private; + +struct lbtf_offset_value { + u32 offset; + u32 value; +}; + +struct channel_range { + u8 regdomain; + u8 start; + u8 end; /* exclusive (channel must be less than end) */ +}; + +struct if_usb_card; + +/** Private structure for the MV device */ +struct lbtf_private { + void *card; + struct ieee80211_hw *hw; + + /* Command response buffer */ + u8 cmd_resp_buff[LBS_UPLD_SIZE]; + /* Download sent: + bit0 1/0=data_sent/data_tx_done, + bit1 1/0=cmd_sent/cmd_tx_done, + all other bits reserved 0 */ + struct ieee80211_vif *vif; + + struct work_struct cmd_work; + struct work_struct tx_work; + /** Hardware access */ + int (*hw_host_to_card) (struct lbtf_private *priv, u8 type, u8 *payload, u16 nb); + int (*hw_prog_firmware) (struct if_usb_card *cardp); + int (*hw_reset_device) (struct if_usb_card *cardp); + + + /** Wlan adapter data structure*/ + /** STATUS variables */ + u32 fwrelease; + u32 fwcapinfo; + /* protected with big lock */ + + struct mutex lock; + + /** command-related variables */ + u16 seqnum; + /* protected by big lock */ + + struct cmd_ctrl_node *cmd_array; + /** Current command */ + struct cmd_ctrl_node *cur_cmd; + /** command Queues */ + /** Free command buffers */ + struct list_head cmdfreeq; + /** Pending command buffers */ + struct list_head cmdpendingq; + + /** spin locks */ + spinlock_t driver_lock; + + /** Timers */ + struct timer_list command_timer; + int nr_retries; + int cmd_timed_out; + + u8 cmd_response_rxed; + + /** capability Info used in Association, start, join */ + u16 capability; + + /** MAC address information */ + u8 current_addr[ETH_ALEN]; + u8 multicastlist[MRVDRV_MAX_MULTICAST_LIST_SIZE][ETH_ALEN]; + u32 nr_of_multicastmacaddr; + int cur_freq; + + struct sk_buff *skb_to_tx; + struct sk_buff *tx_skb; + + /** NIC Operation characteristics */ + u16 mac_control; + u16 regioncode; + struct channel_range range; + + u8 radioon; + u32 preamble; + + struct ieee80211_channel channels[14]; + struct ieee80211_rate rates[12]; + struct ieee80211_supported_band band; + struct lbtf_offset_value offsetvalue; + + u8 fw_ready; + u8 surpriseremoved; + struct sk_buff_head bc_ps_buf; +}; + +/* 802.11-related definitions */ + +/* TxPD descriptor */ +struct txpd { + /* Current Tx packet status */ + __le32 tx_status; + /* Tx control */ + __le32 tx_control; + __le32 tx_packet_location; + /* Tx packet length */ + __le16 tx_packet_length; + /* First 2 byte of destination MAC address */ + u8 tx_dest_addr_high[2]; + /* Last 4 byte of destination MAC address */ + u8 tx_dest_addr_low[4]; + /* Pkt Priority */ + u8 priority; + /* Pkt Trasnit Power control */ + u8 powermgmt; + /* Time the packet has been queued in the driver (units = 2ms) */ + u8 pktdelay_2ms; + /* reserved */ + u8 reserved1; +}; + +/* RxPD Descriptor */ +struct rxpd { + /* Current Rx packet status */ + __le16 status; + + /* SNR */ + u8 snr; + + /* Tx control */ + u8 rx_control; + + /* Pkt length */ + __le16 pkt_len; + + /* Noise Floor */ + u8 nf; + + /* Rx Packet Rate */ + u8 rx_rate; + + /* Pkt addr */ + __le32 pkt_ptr; + + /* Next Rx RxPD addr */ + __le32 next_rxpd_ptr; + + /* Pkt Priority */ + u8 priority; + u8 reserved[3]; +}; + +struct cmd_header { + __le16 command; + __le16 size; + __le16 seqnum; + __le16 result; +} __attribute__ ((packed)); + +struct cmd_ctrl_node { + struct list_head list; + int result; + /* command response */ + int (*callback)(struct lbtf_private *, + unsigned long, struct cmd_header *); + unsigned long callback_arg; + /* command data */ + struct cmd_header *cmdbuf; + /* wait queue */ + u16 cmdwaitqwoken; + wait_queue_head_t cmdwait_q; +}; + +/* + * Define data structure for CMD_GET_HW_SPEC + * This structure defines the response for the GET_HW_SPEC command + */ +struct cmd_ds_get_hw_spec { + struct cmd_header hdr; + + /* HW Interface version number */ + __le16 hwifversion; + /* HW version number */ + __le16 version; + /* Max number of TxPD FW can handle */ + __le16 nr_txpd; + /* Max no of Multicast address */ + __le16 nr_mcast_adr; + /* MAC address */ + u8 permanentaddr[6]; + + /* region Code */ + __le16 regioncode; + + /* Number of antenna used */ + __le16 nr_antenna; + + /* FW release number, example 0x01030304 = 2.3.4p1 */ + __le32 fwrelease; + + /* Base Address of TxPD queue */ + __le32 wcb_base; + /* Read Pointer of RxPd queue */ + __le32 rxpd_rdptr; + + /* Write Pointer of RxPd queue */ + __le32 rxpd_wrptr; + + /*FW/HW capability */ + __le32 fwcapinfo; +} __attribute__ ((packed)); + +struct cmd_ds_mac_control { + struct cmd_header hdr; + __le16 action; + u16 reserved; +}; + +struct cmd_ds_802_11_mac_address { + struct cmd_header hdr; + + __le16 action; + uint8_t macadd[ETH_ALEN]; +}; + +struct cmd_ds_mac_multicast_addr { + struct cmd_header hdr; + + __le16 action; + __le16 nr_of_adrs; + u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE]; +}; + +struct cmd_ds_set_mode { + struct cmd_header hdr; + + __le16 mode; +}; + +struct cmd_ds_set_bssid { + struct cmd_header hdr; + + u8 bssid[6]; + u8 activate; +}; + +struct cmd_ds_802_11_radio_control { + struct cmd_header hdr; + + __le16 action; + __le16 control; +}; + + +struct cmd_ds_802_11_rf_channel { + struct cmd_header hdr; + + __le16 action; + __le16 channel; + __le16 rftype; /* unused */ + __le16 reserved; /* unused */ + u8 channellist[32]; /* unused */ +}; + +struct cmd_ds_set_boot2_ver { + struct cmd_header hdr; + + __le16 action; + __le16 version; +}; + +struct cmd_ds_802_11_reset { + struct cmd_header hdr; + + __le16 action; +}; + +struct cmd_ds_802_11_beacon_control { + struct cmd_header hdr; + + __le16 action; + __le16 beacon_enable; + __le16 beacon_period; +}; + +struct cmd_ds_802_11_beacon_set { + struct cmd_header hdr; + + __le16 len; + u8 beacon[MRVL_MAX_BCN_SIZE]; +}; + +struct lbtf_private; +struct cmd_ctrl_node; + +/** Function Prototype Declaration */ +void lbtf_set_mac_control(struct lbtf_private *priv); + +int lbtf_free_cmd_buffer(struct lbtf_private *priv); + +int lbtf_allocate_cmd_buffer(struct lbtf_private *priv); +int lbtf_execute_next_command(struct lbtf_private *priv); +int lbtf_set_radio_control(struct lbtf_private *priv); +int lbtf_update_hw_spec(struct lbtf_private *priv); +int lbtf_cmd_set_mac_multicast_addr(struct lbtf_private *priv); +void lbtf_set_mode(struct lbtf_private *priv, enum lbtf_mode mode); +void lbtf_set_bssid(struct lbtf_private *priv, bool activate, const u8 *bssid); +int lbtf_set_mac_address(struct lbtf_private *priv, uint8_t *mac_addr); + +int lbtf_set_channel(struct lbtf_private *priv, u8 channel); + +int lbtf_beacon_set(struct lbtf_private *priv, struct sk_buff *beacon); +int lbtf_beacon_ctrl(struct lbtf_private *priv, bool beacon_enable, + int beacon_int); + + +int lbtf_process_rx_command(struct lbtf_private *priv); +void lbtf_complete_command(struct lbtf_private *priv, struct cmd_ctrl_node *cmd, + int result); +void lbtf_cmd_response_rx(struct lbtf_private *priv); + +/* main.c */ +struct chan_freq_power *lbtf_get_region_cfp_table(u8 region, + int *cfp_no); +struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev); +int lbtf_remove_card(struct lbtf_private *priv); +int lbtf_start_card(struct lbtf_private *priv); +int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb); +void lbtf_send_tx_feedback(struct lbtf_private *priv, u8 retrycnt, u8 fail); +void lbtf_bcn_sent(struct lbtf_private *priv); + +/* support functions for cmd.c */ +/* lbtf_cmd() infers the size of the buffer to copy data back into, from + the size of the target of the pointer. Since the command to be sent + may often be smaller, that size is set in cmd->size by the caller.*/ +#define lbtf_cmd(priv, cmdnr, cmd, cb, cb_arg) ({ \ + uint16_t __sz = le16_to_cpu((cmd)->hdr.size); \ + (cmd)->hdr.size = cpu_to_le16(sizeof(*(cmd))); \ + __lbtf_cmd(priv, cmdnr, &(cmd)->hdr, __sz, cb, cb_arg); \ +}) + +#define lbtf_cmd_with_response(priv, cmdnr, cmd) \ + lbtf_cmd(priv, cmdnr, cmd, lbtf_cmd_copyback, (unsigned long) (cmd)) + +void lbtf_cmd_async(struct lbtf_private *priv, uint16_t command, + struct cmd_header *in_cmd, int in_cmd_size); + +int __lbtf_cmd(struct lbtf_private *priv, uint16_t command, + struct cmd_header *in_cmd, int in_cmd_size, + int (*callback)(struct lbtf_private *, unsigned long, + struct cmd_header *), + unsigned long callback_arg); + +int lbtf_cmd_copyback(struct lbtf_private *priv, unsigned long extra, + struct cmd_header *resp); diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c new file mode 100644 index 0000000..6ab3003 --- /dev/null +++ b/drivers/net/wireless/libertas_tf/main.c @@ -0,0 +1,670 @@ +/* + * Copyright (C) 2008, cozybit Inc. + * Copyright (C) 2003-2006, Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ +#include "libertas_tf.h" +#include "linux/etherdevice.h" + +#define DRIVER_RELEASE_VERSION "004.p0" +/* thinfirm version: 5.132.X.pX */ +#define LBTF_FW_VER_MIN 0x05840300 +#define LBTF_FW_VER_MAX 0x0584ffff +#define QOS_CONTROL_LEN 2 + +static const char lbtf_driver_version[] = "THINFIRM-USB8388-" DRIVER_RELEASE_VERSION; +struct workqueue_struct *lbtf_wq; + +static const struct ieee80211_channel lbtf_channels[] = { + { .center_freq = 2412, .hw_value = 1 }, + { .center_freq = 2417, .hw_value = 2 }, + { .center_freq = 2422, .hw_value = 3 }, + { .center_freq = 2427, .hw_value = 4 }, + { .center_freq = 2432, .hw_value = 5 }, + { .center_freq = 2437, .hw_value = 6 }, + { .center_freq = 2442, .hw_value = 7 }, + { .center_freq = 2447, .hw_value = 8 }, + { .center_freq = 2452, .hw_value = 9 }, + { .center_freq = 2457, .hw_value = 10 }, + { .center_freq = 2462, .hw_value = 11 }, + { .center_freq = 2467, .hw_value = 12 }, + { .center_freq = 2472, .hw_value = 13 }, + { .center_freq = 2484, .hw_value = 14 }, +}; + +/* This table contains the hardware specific values for the modulation rates. */ +static const struct ieee80211_rate lbtf_rates[] = { + { .bitrate = 10, + .hw_value = 0, }, + { .bitrate = 20, + .hw_value = 1, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 55, + .hw_value = 2, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 110, + .hw_value = 3, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 60, + .hw_value = 5, + .flags = 0 }, + { .bitrate = 90, + .hw_value = 6, + .flags = 0 }, + { .bitrate = 120, + .hw_value = 7, + .flags = 0 }, + { .bitrate = 180, + .hw_value = 8, + .flags = 0 }, + { .bitrate = 240, + .hw_value = 9, + .flags = 0 }, + { .bitrate = 360, + .hw_value = 10, + .flags = 0 }, + { .bitrate = 480, + .hw_value = 11, + .flags = 0 }, + { .bitrate = 540, + .hw_value = 12, + .flags = 0 }, +}; + +static void lbtf_cmd_work(struct work_struct *work) +{ + struct lbtf_private *priv = container_of(work, struct lbtf_private, + cmd_work); + spin_lock_irq(&priv->driver_lock); + /* command response? */ + if (priv->cmd_response_rxed) { + priv->cmd_response_rxed = 0; + spin_unlock_irq(&priv->driver_lock); + lbtf_process_rx_command(priv); + spin_lock_irq(&priv->driver_lock); + } + + if (priv->cmd_timed_out && priv->cur_cmd) { + struct cmd_ctrl_node *cmdnode = priv->cur_cmd; + + if (++priv->nr_retries > 10) { + lbtf_complete_command(priv, cmdnode, + -ETIMEDOUT); + priv->nr_retries = 0; + } else { + priv->cur_cmd = NULL; + + /* Stick it back at the _top_ of the pending + * queue for immediate resubmission */ + list_add(&cmdnode->list, &priv->cmdpendingq); + } + } + priv->cmd_timed_out = 0; + spin_unlock_irq(&priv->driver_lock); + + if (!priv->fw_ready) + return; + /* Execute the next command */ + if (!priv->cur_cmd) + lbtf_execute_next_command(priv); +} + +/** + * lbtf_setup_firmware: initialize firmware. + * + * @priv A pointer to struct lbtf_private structure + * + * Returns: 0 on success. + */ +static int lbtf_setup_firmware(struct lbtf_private *priv) +{ + int ret = -1; + + /* + * Read priv address from HW + */ + memset(priv->current_addr, 0xff, ETH_ALEN); + ret = lbtf_update_hw_spec(priv); + if (ret) { + ret = -1; + goto done; + } + + lbtf_set_mac_control(priv); + lbtf_set_radio_control(priv); + + ret = 0; +done: + return ret; +} + +/** + * This function handles the timeout of command sending. + * It will re-send the same command again. + */ +static void command_timer_fn(unsigned long data) +{ + struct lbtf_private *priv = (struct lbtf_private *)data; + unsigned long flags; + + spin_lock_irqsave(&priv->driver_lock, flags); + + if (!priv->cur_cmd) { + printk(KERN_DEBUG "libertastf: command timer expired; " + "no pending command\n"); + goto out; + } + + printk(KERN_DEBUG "libertas: command %x timed out\n", + le16_to_cpu(priv->cur_cmd->cmdbuf->command)); + + priv->cmd_timed_out = 1; + queue_work(lbtf_wq, &priv->cmd_work); +out: + spin_unlock_irqrestore(&priv->driver_lock, flags); +} + +static int lbtf_init_adapter(struct lbtf_private *priv) +{ + memset(priv->current_addr, 0xff, ETH_ALEN); + mutex_init(&priv->lock); + + priv->vif = NULL; + setup_timer(&priv->command_timer, command_timer_fn, + (unsigned long)priv); + + INIT_LIST_HEAD(&priv->cmdfreeq); + INIT_LIST_HEAD(&priv->cmdpendingq); + + spin_lock_init(&priv->driver_lock); + + /* Allocate the command buffers */ + if (lbtf_allocate_cmd_buffer(priv)) + return -1; + + return 0; +} + +static void lbtf_free_adapter(struct lbtf_private *priv) +{ + lbtf_free_cmd_buffer(priv); + del_timer(&priv->command_timer); +} + +static int lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct lbtf_private *priv = hw->priv; + + priv->skb_to_tx = skb; + queue_work(lbtf_wq, &priv->tx_work); + /* + * queue will be restarted when we receive transmission feedback if + * there are no buffered multicast frames to send + */ + ieee80211_stop_queues(priv->hw); + return NETDEV_TX_OK; +} + +static void lbtf_tx_work(struct work_struct *work) +{ + struct lbtf_private *priv = container_of(work, struct lbtf_private, + tx_work); + unsigned int len; + struct ieee80211_tx_info *info; + struct txpd *txpd; + struct sk_buff *skb = NULL; + int err; + + if ((priv->vif->type == NL80211_IFTYPE_AP) && + (!skb_queue_empty(&priv->bc_ps_buf))) + skb = skb_dequeue(&priv->bc_ps_buf); + else if (priv->skb_to_tx) { + skb = priv->skb_to_tx; + priv->skb_to_tx = NULL; + } else + return; + + len = skb->len; + info = IEEE80211_SKB_CB(skb); + txpd = (struct txpd *) skb_push(skb, sizeof(struct txpd)); + + if (priv->surpriseremoved) { + dev_kfree_skb_any(skb); + return; + } + + memset(txpd, 0, sizeof(struct txpd)); + /* Activate per-packet rate selection */ + txpd->tx_control |= cpu_to_le32(MRVL_PER_PACKET_RATE | + ieee80211_get_tx_rate(priv->hw, info)->hw_value); + + /* copy destination address from 802.11 header */ + memcpy(txpd->tx_dest_addr_high, skb->data + sizeof(struct txpd) + 4, + ETH_ALEN); + txpd->tx_packet_length = cpu_to_le16(len); + txpd->tx_packet_location = cpu_to_le32(sizeof(struct txpd)); + BUG_ON(priv->tx_skb); + spin_lock_irq(&priv->driver_lock); + priv->tx_skb = skb; + err = priv->hw_host_to_card(priv, MVMS_DAT, skb->data, skb->len); + spin_unlock_irq(&priv->driver_lock); + if (err) { + dev_kfree_skb_any(skb); + priv->tx_skb = NULL; + } +} + +static int lbtf_op_start(struct ieee80211_hw *hw) +{ + struct lbtf_private *priv = hw->priv; + void *card = priv->card; + int ret = -1; + + if (!priv->fw_ready) + /* Upload firmware */ + if (priv->hw_prog_firmware(card)) + goto err_prog_firmware; + + /* poke the firmware */ + priv->capability = WLAN_CAPABILITY_SHORT_PREAMBLE; + priv->radioon = RADIO_ON; + priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON; + ret = lbtf_setup_firmware(priv); + if (ret) + goto err_prog_firmware; + + if ((priv->fwrelease < LBTF_FW_VER_MIN) || + (priv->fwrelease > LBTF_FW_VER_MAX)) { + ret = -1; + goto err_prog_firmware; + } + + printk(KERN_INFO "libertastf: Marvell WLAN 802.11 thinfirm adapter\n"); + return 0; + +err_prog_firmware: + priv->hw_reset_device(card); + return ret; +} + +static void lbtf_op_stop(struct ieee80211_hw *hw) +{ + struct lbtf_private *priv = hw->priv; + unsigned long flags; + struct sk_buff *skb; + + struct cmd_ctrl_node *cmdnode; + /* Flush pending command nodes */ + spin_lock_irqsave(&priv->driver_lock, flags); + list_for_each_entry(cmdnode, &priv->cmdpendingq, list) { + cmdnode->result = -ENOENT; + cmdnode->cmdwaitqwoken = 1; + wake_up_interruptible(&cmdnode->cmdwait_q); + } + + spin_unlock_irqrestore(&priv->driver_lock, flags); + cancel_work_sync(&priv->cmd_work); + cancel_work_sync(&priv->tx_work); + while ((skb = skb_dequeue(&priv->bc_ps_buf))) + dev_kfree_skb_any(skb); + priv->radioon = RADIO_OFF; + lbtf_set_radio_control(priv); + + return; +} + +static int lbtf_op_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct lbtf_private *priv = hw->priv; + if (priv->vif != NULL) + return -EOPNOTSUPP; + + priv->vif = vif; + switch (vif->type) { + case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_AP: + lbtf_set_mode(priv, LBTF_AP_MODE); + break; + case NL80211_IFTYPE_STATION: + lbtf_set_mode(priv, LBTF_STA_MODE); + break; + default: + priv->vif = NULL; + return -EOPNOTSUPP; + } + lbtf_set_mac_address(priv, (u8 *) vif->addr); + return 0; +} + +static void lbtf_op_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct lbtf_private *priv = hw->priv; + + if (priv->vif->type == NL80211_IFTYPE_AP || + priv->vif->type == NL80211_IFTYPE_MESH_POINT) + lbtf_beacon_ctrl(priv, 0, 0); + lbtf_set_mode(priv, LBTF_PASSIVE_MODE); + lbtf_set_bssid(priv, 0, NULL); + priv->vif = NULL; +} + +static int lbtf_op_config(struct ieee80211_hw *hw, u32 changed) +{ + struct lbtf_private *priv = hw->priv; + struct ieee80211_conf *conf = &hw->conf; + + if (conf->channel->center_freq != priv->cur_freq) { + priv->cur_freq = conf->channel->center_freq; + lbtf_set_channel(priv, conf->channel->hw_value); + } + return 0; +} + +static u64 lbtf_op_prepare_multicast(struct ieee80211_hw *hw, + int mc_count, struct dev_addr_list *mclist) +{ + struct lbtf_private *priv = hw->priv; + int i; + + if (!mc_count || mc_count > MRVDRV_MAX_MULTICAST_LIST_SIZE) + return mc_count; + + priv->nr_of_multicastmacaddr = mc_count; + for (i = 0; i < mc_count; i++) { + if (!mclist) + break; + memcpy(&priv->multicastlist[i], mclist->da_addr, + ETH_ALEN); + mclist = mclist->next; + } + + return mc_count; +} + +#define SUPPORTED_FIF_FLAGS (FIF_PROMISC_IN_BSS | FIF_ALLMULTI) +static void lbtf_op_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *new_flags, + u64 multicast) +{ + struct lbtf_private *priv = hw->priv; + int old_mac_control = priv->mac_control; + changed_flags &= SUPPORTED_FIF_FLAGS; + *new_flags &= SUPPORTED_FIF_FLAGS; + + if (!changed_flags) + return; + + if (*new_flags & (FIF_PROMISC_IN_BSS)) + priv->mac_control |= CMD_ACT_MAC_PROMISCUOUS_ENABLE; + else + priv->mac_control &= ~CMD_ACT_MAC_PROMISCUOUS_ENABLE; + if (*new_flags & (FIF_ALLMULTI) || + multicast > MRVDRV_MAX_MULTICAST_LIST_SIZE) { + priv->mac_control |= CMD_ACT_MAC_ALL_MULTICAST_ENABLE; + priv->mac_control &= ~CMD_ACT_MAC_MULTICAST_ENABLE; + } else if (multicast) { + priv->mac_control |= CMD_ACT_MAC_MULTICAST_ENABLE; + priv->mac_control &= ~CMD_ACT_MAC_ALL_MULTICAST_ENABLE; + lbtf_cmd_set_mac_multicast_addr(priv); + } else { + priv->mac_control &= ~(CMD_ACT_MAC_MULTICAST_ENABLE | + CMD_ACT_MAC_ALL_MULTICAST_ENABLE); + if (priv->nr_of_multicastmacaddr) { + priv->nr_of_multicastmacaddr = 0; + lbtf_cmd_set_mac_multicast_addr(priv); + } + } + + + if (priv->mac_control != old_mac_control) + lbtf_set_mac_control(priv); +} + +static void lbtf_op_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changes) +{ + struct lbtf_private *priv = hw->priv; + struct sk_buff *beacon; + + if (changes & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_INT)) { + switch (priv->vif->type) { + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_MESH_POINT: + beacon = ieee80211_beacon_get(hw, vif); + if (beacon) { + lbtf_beacon_set(priv, beacon); + kfree_skb(beacon); + lbtf_beacon_ctrl(priv, 1, + bss_conf->beacon_int); + } + break; + default: + break; + } + } + + if (changes & BSS_CHANGED_BSSID) { + bool activate = !is_zero_ether_addr(bss_conf->bssid); + lbtf_set_bssid(priv, activate, bss_conf->bssid); + } + + if (changes & BSS_CHANGED_ERP_PREAMBLE) { + if (bss_conf->use_short_preamble) + priv->preamble = CMD_TYPE_SHORT_PREAMBLE; + else + priv->preamble = CMD_TYPE_LONG_PREAMBLE; + lbtf_set_radio_control(priv); + } +} + +static const struct ieee80211_ops lbtf_ops = { + .tx = lbtf_op_tx, + .start = lbtf_op_start, + .stop = lbtf_op_stop, + .add_interface = lbtf_op_add_interface, + .remove_interface = lbtf_op_remove_interface, + .config = lbtf_op_config, + .prepare_multicast = lbtf_op_prepare_multicast, + .configure_filter = lbtf_op_configure_filter, + .bss_info_changed = lbtf_op_bss_info_changed, +}; + +int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb) +{ + struct ieee80211_rx_status stats; + struct rxpd *prxpd; + int need_padding; + unsigned int flags; + struct ieee80211_hdr *hdr; + + prxpd = (struct rxpd *) skb->data; + + stats.flag = 0; + if (!(prxpd->status & cpu_to_le16(MRVDRV_RXPD_STATUS_OK))) + stats.flag |= RX_FLAG_FAILED_FCS_CRC; + stats.freq = priv->cur_freq; + stats.band = IEEE80211_BAND_2GHZ; + stats.signal = prxpd->snr; + stats.noise = prxpd->nf; + /* Marvell rate index has a hole at value 4 */ + if (prxpd->rx_rate > 4) + --prxpd->rx_rate; + stats.rate_idx = prxpd->rx_rate; + skb_pull(skb, sizeof(struct rxpd)); + + hdr = (struct ieee80211_hdr *)skb->data; + flags = le32_to_cpu(*(__le32 *)(skb->data + 4)); + + need_padding = ieee80211_is_data_qos(hdr->frame_control); + need_padding ^= ieee80211_has_a4(hdr->frame_control); + need_padding ^= ieee80211_is_data_qos(hdr->frame_control) && + (*ieee80211_get_qos_ctl(hdr) & + IEEE80211_QOS_CONTROL_A_MSDU_PRESENT); + + if (need_padding) { + memmove(skb->data + 2, skb->data, skb->len); + skb_reserve(skb, 2); + } + + memcpy(IEEE80211_SKB_RXCB(skb), &stats, sizeof(stats)); + ieee80211_rx_irqsafe(priv->hw, skb); + return 0; +} +EXPORT_SYMBOL_GPL(lbtf_rx); + +/** + * lbtf_add_card: Add and initialize the card, no fw upload yet. + * + * @card A pointer to card + * + * Returns: pointer to struct lbtf_priv. + */ +struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev) +{ + struct ieee80211_hw *hw; + struct lbtf_private *priv = NULL; + + hw = ieee80211_alloc_hw(sizeof(struct lbtf_private), &lbtf_ops); + if (!hw) + goto done; + + priv = hw->priv; + if (lbtf_init_adapter(priv)) + goto err_init_adapter; + + priv->hw = hw; + priv->card = card; + priv->tx_skb = NULL; + + hw->queues = 1; + hw->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING; + hw->extra_tx_headroom = sizeof(struct txpd); + memcpy(priv->channels, lbtf_channels, sizeof(lbtf_channels)); + memcpy(priv->rates, lbtf_rates, sizeof(lbtf_rates)); + priv->band.n_bitrates = ARRAY_SIZE(lbtf_rates); + priv->band.bitrates = priv->rates; + priv->band.n_channels = ARRAY_SIZE(lbtf_channels); + priv->band.channels = priv->channels; + hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; + hw->wiphy->interface_modes = + BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_ADHOC); + skb_queue_head_init(&priv->bc_ps_buf); + + SET_IEEE80211_DEV(hw, dmdev); + + INIT_WORK(&priv->cmd_work, lbtf_cmd_work); + INIT_WORK(&priv->tx_work, lbtf_tx_work); + if (ieee80211_register_hw(hw)) + goto err_init_adapter; + + goto done; + +err_init_adapter: + lbtf_free_adapter(priv); + ieee80211_free_hw(hw); + priv = NULL; + +done: + return priv; +} +EXPORT_SYMBOL_GPL(lbtf_add_card); + + +int lbtf_remove_card(struct lbtf_private *priv) +{ + struct ieee80211_hw *hw = priv->hw; + + priv->surpriseremoved = 1; + del_timer(&priv->command_timer); + lbtf_free_adapter(priv); + priv->hw = NULL; + ieee80211_unregister_hw(hw); + ieee80211_free_hw(hw); + + return 0; +} +EXPORT_SYMBOL_GPL(lbtf_remove_card); + +void lbtf_send_tx_feedback(struct lbtf_private *priv, u8 retrycnt, u8 fail) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(priv->tx_skb); + + ieee80211_tx_info_clear_status(info); + /* + * Commented out, otherwise we never go beyond 1Mbit/s using mac80211 + * default pid rc algorithm. + * + * info->status.retry_count = MRVL_DEFAULT_RETRIES - retrycnt; + */ + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) && !fail) + info->flags |= IEEE80211_TX_STAT_ACK; + skb_pull(priv->tx_skb, sizeof(struct txpd)); + ieee80211_tx_status_irqsafe(priv->hw, priv->tx_skb); + priv->tx_skb = NULL; + if (!priv->skb_to_tx && skb_queue_empty(&priv->bc_ps_buf)) + ieee80211_wake_queues(priv->hw); + else + queue_work(lbtf_wq, &priv->tx_work); +} +EXPORT_SYMBOL_GPL(lbtf_send_tx_feedback); + +void lbtf_bcn_sent(struct lbtf_private *priv) +{ + struct sk_buff *skb = NULL; + + if (priv->vif->type != NL80211_IFTYPE_AP) + return; + + if (skb_queue_empty(&priv->bc_ps_buf)) { + bool tx_buff_bc = 0; + + while ((skb = ieee80211_get_buffered_bc(priv->hw, priv->vif))) { + skb_queue_tail(&priv->bc_ps_buf, skb); + tx_buff_bc = 1; + } + if (tx_buff_bc) { + ieee80211_stop_queues(priv->hw); + queue_work(lbtf_wq, &priv->tx_work); + } + } + + skb = ieee80211_beacon_get(priv->hw, priv->vif); + + if (skb) { + lbtf_beacon_set(priv, skb); + kfree_skb(skb); + } +} +EXPORT_SYMBOL_GPL(lbtf_bcn_sent); + +static int __init lbtf_init_module(void) +{ + lbtf_wq = create_workqueue("libertastf"); + if (lbtf_wq == NULL) { + printk(KERN_ERR "libertastf: couldn't create workqueue\n"); + return -ENOMEM; + } + return 0; +} + +static void __exit lbtf_exit_module(void) +{ + destroy_workqueue(lbtf_wq); +} + +module_init(lbtf_init_module); +module_exit(lbtf_exit_module); + +MODULE_DESCRIPTION("Libertas WLAN Thinfirm Driver Library"); +MODULE_AUTHOR("Cozybit Inc."); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c new file mode 100644 index 0000000..59446ec --- /dev/null +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -0,0 +1,1472 @@ +/* + * mac80211_hwsim - software simulator of 802.11 radio(s) for mac80211 + * Copyright (c) 2008, Jouni Malinen + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* + * TODO: + * - IBSS mode simulation (Beacon transmission with competition for "air time") + * - RX filtering based on filter configuration (data->rx_filter) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +MODULE_AUTHOR("Jouni Malinen"); +MODULE_DESCRIPTION("Software simulator of 802.11 radio(s) for mac80211"); +MODULE_LICENSE("GPL"); + +static int radios = 2; +module_param(radios, int, 0444); +MODULE_PARM_DESC(radios, "Number of simulated radios"); + +static bool fake_hw_scan; +module_param(fake_hw_scan, bool, 0444); +MODULE_PARM_DESC(fake_hw_scan, "Install fake (no-op) hw-scan handler"); + +/** + * enum hwsim_regtest - the type of regulatory tests we offer + * + * These are the different values you can use for the regtest + * module parameter. This is useful to help test world roaming + * and the driver regulatory_hint() call and combinations of these. + * If you want to do specific alpha2 regulatory domain tests simply + * use the userspace regulatory request as that will be respected as + * well without the need of this module parameter. This is designed + * only for testing the driver regulatory request, world roaming + * and all possible combinations. + * + * @HWSIM_REGTEST_DISABLED: No regulatory tests are performed, + * this is the default value. + * @HWSIM_REGTEST_DRIVER_REG_FOLLOW: Used for testing the driver regulatory + * hint, only one driver regulatory hint will be sent as such the + * secondary radios are expected to follow. + * @HWSIM_REGTEST_DRIVER_REG_ALL: Used for testing the driver regulatory + * request with all radios reporting the same regulatory domain. + * @HWSIM_REGTEST_DIFF_COUNTRY: Used for testing the drivers calling + * different regulatory domains requests. Expected behaviour is for + * an intersection to occur but each device will still use their + * respective regulatory requested domains. Subsequent radios will + * use the resulting intersection. + * @HWSIM_REGTEST_WORLD_ROAM: Used for testing the world roaming. We acomplish + * this by using a custom beacon-capable regulatory domain for the first + * radio. All other device world roam. + * @HWSIM_REGTEST_CUSTOM_WORLD: Used for testing the custom world regulatory + * domain requests. All radios will adhere to this custom world regulatory + * domain. + * @HWSIM_REGTEST_CUSTOM_WORLD_2: Used for testing 2 custom world regulatory + * domain requests. The first radio will adhere to the first custom world + * regulatory domain, the second one to the second custom world regulatory + * domain. All other devices will world roam. + * @HWSIM_REGTEST_STRICT_FOLLOW_: Used for testing strict regulatory domain + * settings, only the first radio will send a regulatory domain request + * and use strict settings. The rest of the radios are expected to follow. + * @HWSIM_REGTEST_STRICT_ALL: Used for testing strict regulatory domain + * settings. All radios will adhere to this. + * @HWSIM_REGTEST_STRICT_AND_DRIVER_REG: Used for testing strict regulatory + * domain settings, combined with secondary driver regulatory domain + * settings. The first radio will get a strict regulatory domain setting + * using the first driver regulatory request and the second radio will use + * non-strict settings using the second driver regulatory request. All + * other devices should follow the intersection created between the + * first two. + * @HWSIM_REGTEST_ALL: Used for testing every possible mix. You will need + * at least 6 radios for a complete test. We will test in this order: + * 1 - driver custom world regulatory domain + * 2 - second custom world regulatory domain + * 3 - first driver regulatory domain request + * 4 - second driver regulatory domain request + * 5 - strict regulatory domain settings using the third driver regulatory + * domain request + * 6 and on - should follow the intersection of the 3rd, 4rth and 5th radio + * regulatory requests. + */ +enum hwsim_regtest { + HWSIM_REGTEST_DISABLED = 0, + HWSIM_REGTEST_DRIVER_REG_FOLLOW = 1, + HWSIM_REGTEST_DRIVER_REG_ALL = 2, + HWSIM_REGTEST_DIFF_COUNTRY = 3, + HWSIM_REGTEST_WORLD_ROAM = 4, + HWSIM_REGTEST_CUSTOM_WORLD = 5, + HWSIM_REGTEST_CUSTOM_WORLD_2 = 6, + HWSIM_REGTEST_STRICT_FOLLOW = 7, + HWSIM_REGTEST_STRICT_ALL = 8, + HWSIM_REGTEST_STRICT_AND_DRIVER_REG = 9, + HWSIM_REGTEST_ALL = 10, +}; + +/* Set to one of the HWSIM_REGTEST_* values above */ +static int regtest = HWSIM_REGTEST_DISABLED; +module_param(regtest, int, 0444); +MODULE_PARM_DESC(regtest, "The type of regulatory test we want to run"); + +static const char *hwsim_alpha2s[] = { + "FI", + "AL", + "US", + "DE", + "JP", + "AL", +}; + +static const struct ieee80211_regdomain hwsim_world_regdom_custom_01 = { + .n_reg_rules = 4, + .alpha2 = "99", + .reg_rules = { + REG_RULE(2412-10, 2462+10, 40, 0, 20, 0), + REG_RULE(2484-10, 2484+10, 40, 0, 20, 0), + REG_RULE(5150-10, 5240+10, 40, 0, 30, 0), + REG_RULE(5745-10, 5825+10, 40, 0, 30, 0), + } +}; + +static const struct ieee80211_regdomain hwsim_world_regdom_custom_02 = { + .n_reg_rules = 2, + .alpha2 = "99", + .reg_rules = { + REG_RULE(2412-10, 2462+10, 40, 0, 20, 0), + REG_RULE(5725-10, 5850+10, 40, 0, 30, + NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS), + } +}; + +struct hwsim_vif_priv { + u32 magic; + u8 bssid[ETH_ALEN]; + bool assoc; + u16 aid; +}; + +#define HWSIM_VIF_MAGIC 0x69537748 + +static inline void hwsim_check_magic(struct ieee80211_vif *vif) +{ + struct hwsim_vif_priv *vp = (void *)vif->drv_priv; + WARN_ON(vp->magic != HWSIM_VIF_MAGIC); +} + +static inline void hwsim_set_magic(struct ieee80211_vif *vif) +{ + struct hwsim_vif_priv *vp = (void *)vif->drv_priv; + vp->magic = HWSIM_VIF_MAGIC; +} + +static inline void hwsim_clear_magic(struct ieee80211_vif *vif) +{ + struct hwsim_vif_priv *vp = (void *)vif->drv_priv; + vp->magic = 0; +} + +struct hwsim_sta_priv { + u32 magic; +}; + +#define HWSIM_STA_MAGIC 0x6d537748 + +static inline void hwsim_check_sta_magic(struct ieee80211_sta *sta) +{ + struct hwsim_sta_priv *sp = (void *)sta->drv_priv; + WARN_ON(sp->magic != HWSIM_STA_MAGIC); +} + +static inline void hwsim_set_sta_magic(struct ieee80211_sta *sta) +{ + struct hwsim_sta_priv *sp = (void *)sta->drv_priv; + sp->magic = HWSIM_STA_MAGIC; +} + +static inline void hwsim_clear_sta_magic(struct ieee80211_sta *sta) +{ + struct hwsim_sta_priv *sp = (void *)sta->drv_priv; + sp->magic = 0; +} + +static struct class *hwsim_class; + +static struct net_device *hwsim_mon; /* global monitor netdev */ + +#define CHAN2G(_freq) { \ + .band = IEEE80211_BAND_2GHZ, \ + .center_freq = (_freq), \ + .hw_value = (_freq), \ + .max_power = 20, \ +} + +#define CHAN5G(_freq) { \ + .band = IEEE80211_BAND_5GHZ, \ + .center_freq = (_freq), \ + .hw_value = (_freq), \ + .max_power = 20, \ +} + +static const struct ieee80211_channel hwsim_channels_2ghz[] = { + CHAN2G(2412), /* Channel 1 */ + CHAN2G(2417), /* Channel 2 */ + CHAN2G(2422), /* Channel 3 */ + CHAN2G(2427), /* Channel 4 */ + CHAN2G(2432), /* Channel 5 */ + CHAN2G(2437), /* Channel 6 */ + CHAN2G(2442), /* Channel 7 */ + CHAN2G(2447), /* Channel 8 */ + CHAN2G(2452), /* Channel 9 */ + CHAN2G(2457), /* Channel 10 */ + CHAN2G(2462), /* Channel 11 */ + CHAN2G(2467), /* Channel 12 */ + CHAN2G(2472), /* Channel 13 */ + CHAN2G(2484), /* Channel 14 */ +}; + +static const struct ieee80211_channel hwsim_channels_5ghz[] = { + CHAN5G(5180), /* Channel 36 */ + CHAN5G(5200), /* Channel 40 */ + CHAN5G(5220), /* Channel 44 */ + CHAN5G(5240), /* Channel 48 */ + + CHAN5G(5260), /* Channel 52 */ + CHAN5G(5280), /* Channel 56 */ + CHAN5G(5300), /* Channel 60 */ + CHAN5G(5320), /* Channel 64 */ + + CHAN5G(5500), /* Channel 100 */ + CHAN5G(5520), /* Channel 104 */ + CHAN5G(5540), /* Channel 108 */ + CHAN5G(5560), /* Channel 112 */ + CHAN5G(5580), /* Channel 116 */ + CHAN5G(5600), /* Channel 120 */ + CHAN5G(5620), /* Channel 124 */ + CHAN5G(5640), /* Channel 128 */ + CHAN5G(5660), /* Channel 132 */ + CHAN5G(5680), /* Channel 136 */ + CHAN5G(5700), /* Channel 140 */ + + CHAN5G(5745), /* Channel 149 */ + CHAN5G(5765), /* Channel 153 */ + CHAN5G(5785), /* Channel 157 */ + CHAN5G(5805), /* Channel 161 */ + CHAN5G(5825), /* Channel 165 */ +}; + +static const struct ieee80211_rate hwsim_rates[] = { + { .bitrate = 10 }, + { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 60 }, + { .bitrate = 90 }, + { .bitrate = 120 }, + { .bitrate = 180 }, + { .bitrate = 240 }, + { .bitrate = 360 }, + { .bitrate = 480 }, + { .bitrate = 540 } +}; + +static spinlock_t hwsim_radio_lock; +static struct list_head hwsim_radios; + +struct mac80211_hwsim_data { + struct list_head list; + struct ieee80211_hw *hw; + struct device *dev; + struct ieee80211_supported_band bands[2]; + struct ieee80211_channel channels_2ghz[ARRAY_SIZE(hwsim_channels_2ghz)]; + struct ieee80211_channel channels_5ghz[ARRAY_SIZE(hwsim_channels_5ghz)]; + struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)]; + + struct mac_address addresses[2]; + + struct ieee80211_channel *channel; + unsigned long beacon_int; /* in jiffies unit */ + unsigned int rx_filter; + bool started, idle; + struct timer_list beacon_timer; + enum ps_mode { + PS_DISABLED, PS_ENABLED, PS_AUTO_POLL, PS_MANUAL_POLL + } ps; + bool ps_poll_pending; + struct dentry *debugfs; + struct dentry *debugfs_ps; + + /* + * Only radios in the same group can communicate together (the + * channel has to match too). Each bit represents a group. A + * radio can be in more then one group. + */ + u64 group; + struct dentry *debugfs_group; +}; + + +struct hwsim_radiotap_hdr { + struct ieee80211_radiotap_header hdr; + u8 rt_flags; + u8 rt_rate; + __le16 rt_channel; + __le16 rt_chbitmask; +} __attribute__ ((packed)); + + +static netdev_tx_t hwsim_mon_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + /* TODO: allow packet injection */ + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + + +static void mac80211_hwsim_monitor_rx(struct ieee80211_hw *hw, + struct sk_buff *tx_skb) +{ + struct mac80211_hwsim_data *data = hw->priv; + struct sk_buff *skb; + struct hwsim_radiotap_hdr *hdr; + u16 flags; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_skb); + struct ieee80211_rate *txrate = ieee80211_get_tx_rate(hw, info); + + if (!netif_running(hwsim_mon)) + return; + + skb = skb_copy_expand(tx_skb, sizeof(*hdr), 0, GFP_ATOMIC); + if (skb == NULL) + return; + + hdr = (struct hwsim_radiotap_hdr *) skb_push(skb, sizeof(*hdr)); + hdr->hdr.it_version = PKTHDR_RADIOTAP_VERSION; + hdr->hdr.it_pad = 0; + hdr->hdr.it_len = cpu_to_le16(sizeof(*hdr)); + hdr->hdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | + (1 << IEEE80211_RADIOTAP_RATE) | + (1 << IEEE80211_RADIOTAP_CHANNEL)); + hdr->rt_flags = 0; + hdr->rt_rate = txrate->bitrate / 5; + hdr->rt_channel = cpu_to_le16(data->channel->center_freq); + flags = IEEE80211_CHAN_2GHZ; + if (txrate->flags & IEEE80211_RATE_ERP_G) + flags |= IEEE80211_CHAN_OFDM; + else + flags |= IEEE80211_CHAN_CCK; + hdr->rt_chbitmask = cpu_to_le16(flags); + + skb->dev = hwsim_mon; + skb_set_mac_header(skb, 0); + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->pkt_type = PACKET_OTHERHOST; + skb->protocol = htons(ETH_P_802_2); + memset(skb->cb, 0, sizeof(skb->cb)); + netif_rx(skb); +} + + +static void mac80211_hwsim_monitor_ack(struct ieee80211_hw *hw, const u8 *addr) +{ + struct mac80211_hwsim_data *data = hw->priv; + struct sk_buff *skb; + struct hwsim_radiotap_hdr *hdr; + u16 flags; + struct ieee80211_hdr *hdr11; + + if (!netif_running(hwsim_mon)) + return; + + skb = dev_alloc_skb(100); + if (skb == NULL) + return; + + hdr = (struct hwsim_radiotap_hdr *) skb_put(skb, sizeof(*hdr)); + hdr->hdr.it_version = PKTHDR_RADIOTAP_VERSION; + hdr->hdr.it_pad = 0; + hdr->hdr.it_len = cpu_to_le16(sizeof(*hdr)); + hdr->hdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | + (1 << IEEE80211_RADIOTAP_CHANNEL)); + hdr->rt_flags = 0; + hdr->rt_rate = 0; + hdr->rt_channel = cpu_to_le16(data->channel->center_freq); + flags = IEEE80211_CHAN_2GHZ; + hdr->rt_chbitmask = cpu_to_le16(flags); + + hdr11 = (struct ieee80211_hdr *) skb_put(skb, 10); + hdr11->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | + IEEE80211_STYPE_ACK); + hdr11->duration_id = cpu_to_le16(0); + memcpy(hdr11->addr1, addr, ETH_ALEN); + + skb->dev = hwsim_mon; + skb_set_mac_header(skb, 0); + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->pkt_type = PACKET_OTHERHOST; + skb->protocol = htons(ETH_P_802_2); + memset(skb->cb, 0, sizeof(skb->cb)); + netif_rx(skb); +} + + +static bool hwsim_ps_rx_ok(struct mac80211_hwsim_data *data, + struct sk_buff *skb) +{ + switch (data->ps) { + case PS_DISABLED: + return true; + case PS_ENABLED: + return false; + case PS_AUTO_POLL: + /* TODO: accept (some) Beacons by default and other frames only + * if pending PS-Poll has been sent */ + return true; + case PS_MANUAL_POLL: + /* Allow unicast frames to own address if there is a pending + * PS-Poll */ + if (data->ps_poll_pending && + memcmp(data->hw->wiphy->perm_addr, skb->data + 4, + ETH_ALEN) == 0) { + data->ps_poll_pending = false; + return true; + } + return false; + } + + return true; +} + + +struct mac80211_hwsim_addr_match_data { + bool ret; + const u8 *addr; +}; + +static void mac80211_hwsim_addr_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct mac80211_hwsim_addr_match_data *md = data; + if (memcmp(mac, md->addr, ETH_ALEN) == 0) + md->ret = true; +} + + +static bool mac80211_hwsim_addr_match(struct mac80211_hwsim_data *data, + const u8 *addr) +{ + struct mac80211_hwsim_addr_match_data md; + + if (memcmp(addr, data->hw->wiphy->perm_addr, ETH_ALEN) == 0) + return true; + + md.ret = false; + md.addr = addr; + ieee80211_iterate_active_interfaces_atomic(data->hw, + mac80211_hwsim_addr_iter, + &md); + + return md.ret; +} + + +static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw, + struct sk_buff *skb) +{ + struct mac80211_hwsim_data *data = hw->priv, *data2; + bool ack = false; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_rx_status rx_status; + + if (data->idle) { + printk(KERN_DEBUG "%s: Trying to TX when idle - reject\n", + wiphy_name(hw->wiphy)); + return false; + } + + memset(&rx_status, 0, sizeof(rx_status)); + /* TODO: set mactime */ + rx_status.freq = data->channel->center_freq; + rx_status.band = data->channel->band; + rx_status.rate_idx = info->control.rates[0].idx; + /* TODO: simulate real signal strength (and optional packet loss) */ + rx_status.signal = -50; + + if (data->ps != PS_DISABLED) + hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); + + /* release the skb's source info */ + skb_orphan(skb); + skb_dst_drop(skb); + skb->mark = 0; + secpath_reset(skb); + nf_reset(skb); + + /* Copy skb to all enabled radios that are on the current frequency */ + spin_lock(&hwsim_radio_lock); + list_for_each_entry(data2, &hwsim_radios, list) { + struct sk_buff *nskb; + + if (data == data2) + continue; + + if (data2->idle || !data2->started || + !hwsim_ps_rx_ok(data2, skb) || + !data->channel || !data2->channel || + data->channel->center_freq != data2->channel->center_freq || + !(data->group & data2->group)) + continue; + + nskb = skb_copy(skb, GFP_ATOMIC); + if (nskb == NULL) + continue; + + if (mac80211_hwsim_addr_match(data2, hdr->addr1)) + ack = true; + memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status)); + ieee80211_rx_irqsafe(data2->hw, nskb); + } + spin_unlock(&hwsim_radio_lock); + + return ack; +} + + +static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + bool ack; + struct ieee80211_tx_info *txi; + + mac80211_hwsim_monitor_rx(hw, skb); + + if (skb->len < 10) { + /* Should not happen; just a sanity check for addr1 use */ + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + ack = mac80211_hwsim_tx_frame(hw, skb); + if (ack && skb->len >= 16) { + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + mac80211_hwsim_monitor_ack(hw, hdr->addr2); + } + + txi = IEEE80211_SKB_CB(skb); + + if (txi->control.vif) + hwsim_check_magic(txi->control.vif); + if (txi->control.sta) + hwsim_check_sta_magic(txi->control.sta); + + ieee80211_tx_info_clear_status(txi); + if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK) && ack) + txi->flags |= IEEE80211_TX_STAT_ACK; + ieee80211_tx_status_irqsafe(hw, skb); + return NETDEV_TX_OK; +} + + +static int mac80211_hwsim_start(struct ieee80211_hw *hw) +{ + struct mac80211_hwsim_data *data = hw->priv; + printk(KERN_DEBUG "%s:%s\n", wiphy_name(hw->wiphy), __func__); + data->started = 1; + return 0; +} + + +static void mac80211_hwsim_stop(struct ieee80211_hw *hw) +{ + struct mac80211_hwsim_data *data = hw->priv; + data->started = 0; + del_timer(&data->beacon_timer); + printk(KERN_DEBUG "%s:%s\n", wiphy_name(hw->wiphy), __func__); +} + + +static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%pM)\n", + wiphy_name(hw->wiphy), __func__, vif->type, + vif->addr); + hwsim_set_magic(vif); + return 0; +} + + +static void mac80211_hwsim_remove_interface( + struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%pM)\n", + wiphy_name(hw->wiphy), __func__, vif->type, + vif->addr); + hwsim_check_magic(vif); + hwsim_clear_magic(vif); +} + + +static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac, + struct ieee80211_vif *vif) +{ + struct ieee80211_hw *hw = arg; + struct sk_buff *skb; + struct ieee80211_tx_info *info; + + hwsim_check_magic(vif); + + if (vif->type != NL80211_IFTYPE_AP && + vif->type != NL80211_IFTYPE_MESH_POINT) + return; + + skb = ieee80211_beacon_get(hw, vif); + if (skb == NULL) + return; + info = IEEE80211_SKB_CB(skb); + + mac80211_hwsim_monitor_rx(hw, skb); + mac80211_hwsim_tx_frame(hw, skb); + dev_kfree_skb(skb); +} + + +static void mac80211_hwsim_beacon(unsigned long arg) +{ + struct ieee80211_hw *hw = (struct ieee80211_hw *) arg; + struct mac80211_hwsim_data *data = hw->priv; + + if (!data->started) + return; + + ieee80211_iterate_active_interfaces_atomic( + hw, mac80211_hwsim_beacon_tx, hw); + + data->beacon_timer.expires = jiffies + data->beacon_int; + add_timer(&data->beacon_timer); +} + + +static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed) +{ + struct mac80211_hwsim_data *data = hw->priv; + struct ieee80211_conf *conf = &hw->conf; + static const char *chantypes[4] = { + [NL80211_CHAN_NO_HT] = "noht", + [NL80211_CHAN_HT20] = "ht20", + [NL80211_CHAN_HT40MINUS] = "ht40-", + [NL80211_CHAN_HT40PLUS] = "ht40+", + }; + static const char *smps_modes[IEEE80211_SMPS_NUM_MODES] = { + [IEEE80211_SMPS_AUTOMATIC] = "auto", + [IEEE80211_SMPS_OFF] = "off", + [IEEE80211_SMPS_STATIC] = "static", + [IEEE80211_SMPS_DYNAMIC] = "dynamic", + }; + + printk(KERN_DEBUG "%s:%s (freq=%d/%s idle=%d ps=%d smps=%s)\n", + wiphy_name(hw->wiphy), __func__, + conf->channel->center_freq, + chantypes[conf->channel_type], + !!(conf->flags & IEEE80211_CONF_IDLE), + !!(conf->flags & IEEE80211_CONF_PS), + smps_modes[conf->smps_mode]); + + data->idle = !!(conf->flags & IEEE80211_CONF_IDLE); + + data->channel = conf->channel; + if (!data->started || !data->beacon_int) + del_timer(&data->beacon_timer); + else + mod_timer(&data->beacon_timer, jiffies + data->beacon_int); + + return 0; +} + + +static void mac80211_hwsim_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags,u64 multicast) +{ + struct mac80211_hwsim_data *data = hw->priv; + + printk(KERN_DEBUG "%s:%s\n", wiphy_name(hw->wiphy), __func__); + + data->rx_filter = 0; + if (*total_flags & FIF_PROMISC_IN_BSS) + data->rx_filter |= FIF_PROMISC_IN_BSS; + if (*total_flags & FIF_ALLMULTI) + data->rx_filter |= FIF_ALLMULTI; + + *total_flags = data->rx_filter; +} + +static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, + u32 changed) +{ + struct hwsim_vif_priv *vp = (void *)vif->drv_priv; + struct mac80211_hwsim_data *data = hw->priv; + + hwsim_check_magic(vif); + + printk(KERN_DEBUG "%s:%s(changed=0x%x)\n", + wiphy_name(hw->wiphy), __func__, changed); + + if (changed & BSS_CHANGED_BSSID) { + printk(KERN_DEBUG "%s:%s: BSSID changed: %pM\n", + wiphy_name(hw->wiphy), __func__, + info->bssid); + memcpy(vp->bssid, info->bssid, ETH_ALEN); + } + + if (changed & BSS_CHANGED_ASSOC) { + printk(KERN_DEBUG " %s: ASSOC: assoc=%d aid=%d\n", + wiphy_name(hw->wiphy), info->assoc, info->aid); + vp->assoc = info->assoc; + vp->aid = info->aid; + } + + if (changed & BSS_CHANGED_BEACON_INT) { + printk(KERN_DEBUG " %s: BCNINT: %d\n", + wiphy_name(hw->wiphy), info->beacon_int); + data->beacon_int = 1024 * info->beacon_int / 1000 * HZ / 1000; + if (WARN_ON(!data->beacon_int)) + data->beacon_int = 1; + if (data->started) + mod_timer(&data->beacon_timer, + jiffies + data->beacon_int); + } + + if (changed & BSS_CHANGED_ERP_CTS_PROT) { + printk(KERN_DEBUG " %s: ERP_CTS_PROT: %d\n", + wiphy_name(hw->wiphy), info->use_cts_prot); + } + + if (changed & BSS_CHANGED_ERP_PREAMBLE) { + printk(KERN_DEBUG " %s: ERP_PREAMBLE: %d\n", + wiphy_name(hw->wiphy), info->use_short_preamble); + } + + if (changed & BSS_CHANGED_ERP_SLOT) { + printk(KERN_DEBUG " %s: ERP_SLOT: %d\n", + wiphy_name(hw->wiphy), info->use_short_slot); + } + + if (changed & BSS_CHANGED_HT) { + printk(KERN_DEBUG " %s: HT: op_mode=0x%x\n", + wiphy_name(hw->wiphy), + info->ht_operation_mode); + } + + if (changed & BSS_CHANGED_BASIC_RATES) { + printk(KERN_DEBUG " %s: BASIC_RATES: 0x%llx\n", + wiphy_name(hw->wiphy), + (unsigned long long) info->basic_rates); + } +} + +static int mac80211_hwsim_sta_add(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + hwsim_check_magic(vif); + hwsim_set_sta_magic(sta); + + return 0; +} + +static int mac80211_hwsim_sta_remove(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + hwsim_check_magic(vif); + hwsim_clear_sta_magic(sta); + + return 0; +} + +static void mac80211_hwsim_sta_notify(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum sta_notify_cmd cmd, + struct ieee80211_sta *sta) +{ + hwsim_check_magic(vif); + + switch (cmd) { + case STA_NOTIFY_SLEEP: + case STA_NOTIFY_AWAKE: + /* TODO: make good use of these flags */ + break; + default: + WARN(1, "Invalid sta notify: %d\n", cmd); + break; + } +} + +static int mac80211_hwsim_set_tim(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + bool set) +{ + hwsim_check_sta_magic(sta); + return 0; +} + +static int mac80211_hwsim_conf_tx( + struct ieee80211_hw *hw, u16 queue, + const struct ieee80211_tx_queue_params *params) +{ + printk(KERN_DEBUG "%s:%s (queue=%d txop=%d cw_min=%d cw_max=%d " + "aifs=%d)\n", + wiphy_name(hw->wiphy), __func__, queue, + params->txop, params->cw_min, params->cw_max, params->aifs); + return 0; +} + +#ifdef CONFIG_NL80211_TESTMODE +/* + * This section contains example code for using netlink + * attributes with the testmode command in nl80211. + */ + +/* These enums need to be kept in sync with userspace */ +enum hwsim_testmode_attr { + __HWSIM_TM_ATTR_INVALID = 0, + HWSIM_TM_ATTR_CMD = 1, + HWSIM_TM_ATTR_PS = 2, + + /* keep last */ + __HWSIM_TM_ATTR_AFTER_LAST, + HWSIM_TM_ATTR_MAX = __HWSIM_TM_ATTR_AFTER_LAST - 1 +}; + +enum hwsim_testmode_cmd { + HWSIM_TM_CMD_SET_PS = 0, + HWSIM_TM_CMD_GET_PS = 1, +}; + +static const struct nla_policy hwsim_testmode_policy[HWSIM_TM_ATTR_MAX + 1] = { + [HWSIM_TM_ATTR_CMD] = { .type = NLA_U32 }, + [HWSIM_TM_ATTR_PS] = { .type = NLA_U32 }, +}; + +static int hwsim_fops_ps_write(void *dat, u64 val); + +static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw, + void *data, int len) +{ + struct mac80211_hwsim_data *hwsim = hw->priv; + struct nlattr *tb[HWSIM_TM_ATTR_MAX + 1]; + struct sk_buff *skb; + int err, ps; + + err = nla_parse(tb, HWSIM_TM_ATTR_MAX, data, len, + hwsim_testmode_policy); + if (err) + return err; + + if (!tb[HWSIM_TM_ATTR_CMD]) + return -EINVAL; + + switch (nla_get_u32(tb[HWSIM_TM_ATTR_CMD])) { + case HWSIM_TM_CMD_SET_PS: + if (!tb[HWSIM_TM_ATTR_PS]) + return -EINVAL; + ps = nla_get_u32(tb[HWSIM_TM_ATTR_PS]); + return hwsim_fops_ps_write(hwsim, ps); + case HWSIM_TM_CMD_GET_PS: + skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, + nla_total_size(sizeof(u32))); + if (!skb) + return -ENOMEM; + NLA_PUT_U32(skb, HWSIM_TM_ATTR_PS, hwsim->ps); + return cfg80211_testmode_reply(skb); + default: + return -EOPNOTSUPP; + } + + nla_put_failure: + kfree_skb(skb); + return -ENOBUFS; +} +#endif + +static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, u16 *ssn) +{ + switch (action) { + case IEEE80211_AMPDU_TX_START: + ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); + break; + case IEEE80211_AMPDU_TX_STOP: + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + break; + case IEEE80211_AMPDU_TX_OPERATIONAL: + break; + case IEEE80211_AMPDU_RX_START: + case IEEE80211_AMPDU_RX_STOP: + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static void mac80211_hwsim_flush(struct ieee80211_hw *hw, bool drop) +{ + /* + * In this special case, there's nothing we need to + * do because hwsim does transmission synchronously. + * In the future, when it does transmissions via + * userspace, we may need to do something. + */ +} + +struct hw_scan_done { + struct delayed_work w; + struct ieee80211_hw *hw; +}; + +static void hw_scan_done(struct work_struct *work) +{ + struct hw_scan_done *hsd = + container_of(work, struct hw_scan_done, w.work); + + ieee80211_scan_completed(hsd->hw, false); + kfree(hsd); +} + +static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw, + struct cfg80211_scan_request *req) +{ + struct hw_scan_done *hsd = kzalloc(sizeof(*hsd), GFP_KERNEL); + int i; + + if (!hsd) + return -ENOMEM; + + hsd->hw = hw; + INIT_DELAYED_WORK(&hsd->w, hw_scan_done); + + printk(KERN_DEBUG "hwsim scan request\n"); + for (i = 0; i < req->n_channels; i++) + printk(KERN_DEBUG "hwsim scan freq %d\n", + req->channels[i]->center_freq); + + ieee80211_queue_delayed_work(hw, &hsd->w, 2 * HZ); + + return 0; +} + +static struct ieee80211_ops mac80211_hwsim_ops = +{ + .tx = mac80211_hwsim_tx, + .start = mac80211_hwsim_start, + .stop = mac80211_hwsim_stop, + .add_interface = mac80211_hwsim_add_interface, + .remove_interface = mac80211_hwsim_remove_interface, + .config = mac80211_hwsim_config, + .configure_filter = mac80211_hwsim_configure_filter, + .bss_info_changed = mac80211_hwsim_bss_info_changed, + .sta_add = mac80211_hwsim_sta_add, + .sta_remove = mac80211_hwsim_sta_remove, + .sta_notify = mac80211_hwsim_sta_notify, + .set_tim = mac80211_hwsim_set_tim, + .conf_tx = mac80211_hwsim_conf_tx, + CFG80211_TESTMODE_CMD(mac80211_hwsim_testmode_cmd) + .ampdu_action = mac80211_hwsim_ampdu_action, + .flush = mac80211_hwsim_flush, +}; + + +static void mac80211_hwsim_free(void) +{ + struct list_head tmplist, *i, *tmp; + struct mac80211_hwsim_data *data, *tmpdata; + + INIT_LIST_HEAD(&tmplist); + + spin_lock_bh(&hwsim_radio_lock); + list_for_each_safe(i, tmp, &hwsim_radios) + list_move(i, &tmplist); + spin_unlock_bh(&hwsim_radio_lock); + + list_for_each_entry_safe(data, tmpdata, &tmplist, list) { + debugfs_remove(data->debugfs_group); + debugfs_remove(data->debugfs_ps); + debugfs_remove(data->debugfs); + ieee80211_unregister_hw(data->hw); + device_unregister(data->dev); + ieee80211_free_hw(data->hw); + } + class_destroy(hwsim_class); +} + + +static struct device_driver mac80211_hwsim_driver = { + .name = "mac80211_hwsim" +}; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) +static const struct net_device_ops hwsim_netdev_ops = { + .ndo_start_xmit = hwsim_mon_xmit, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; +#endif + +static void hwsim_mon_setup(struct net_device *dev) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + dev->netdev_ops = &hwsim_netdev_ops; +#else + dev->hard_start_xmit = hwsim_mon_xmit; +#endif + dev->destructor = free_netdev; + ether_setup(dev); + dev->tx_queue_len = 0; + dev->type = ARPHRD_IEEE80211_RADIOTAP; + memset(dev->dev_addr, 0, ETH_ALEN); + dev->dev_addr[0] = 0x12; +} + + +static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif) +{ + struct mac80211_hwsim_data *data = dat; + struct hwsim_vif_priv *vp = (void *)vif->drv_priv; + struct sk_buff *skb; + struct ieee80211_pspoll *pspoll; + + if (!vp->assoc) + return; + + printk(KERN_DEBUG "%s:%s: send PS-Poll to %pM for aid %d\n", + wiphy_name(data->hw->wiphy), __func__, vp->bssid, vp->aid); + + skb = dev_alloc_skb(sizeof(*pspoll)); + if (!skb) + return; + pspoll = (void *) skb_put(skb, sizeof(*pspoll)); + pspoll->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | + IEEE80211_STYPE_PSPOLL | + IEEE80211_FCTL_PM); + pspoll->aid = cpu_to_le16(0xc000 | vp->aid); + memcpy(pspoll->bssid, vp->bssid, ETH_ALEN); + memcpy(pspoll->ta, mac, ETH_ALEN); + if (!mac80211_hwsim_tx_frame(data->hw, skb)) + printk(KERN_DEBUG "%s: PS-Poll frame not ack'ed\n", __func__); + dev_kfree_skb(skb); +} + + +static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac, + struct ieee80211_vif *vif, int ps) +{ + struct hwsim_vif_priv *vp = (void *)vif->drv_priv; + struct sk_buff *skb; + struct ieee80211_hdr *hdr; + + if (!vp->assoc) + return; + + printk(KERN_DEBUG "%s:%s: send data::nullfunc to %pM ps=%d\n", + wiphy_name(data->hw->wiphy), __func__, vp->bssid, ps); + + skb = dev_alloc_skb(sizeof(*hdr)); + if (!skb) + return; + hdr = (void *) skb_put(skb, sizeof(*hdr) - ETH_ALEN); + hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | + IEEE80211_STYPE_NULLFUNC | + (ps ? IEEE80211_FCTL_PM : 0)); + hdr->duration_id = cpu_to_le16(0); + memcpy(hdr->addr1, vp->bssid, ETH_ALEN); + memcpy(hdr->addr2, mac, ETH_ALEN); + memcpy(hdr->addr3, vp->bssid, ETH_ALEN); + if (!mac80211_hwsim_tx_frame(data->hw, skb)) + printk(KERN_DEBUG "%s: nullfunc frame not ack'ed\n", __func__); + dev_kfree_skb(skb); +} + + +static void hwsim_send_nullfunc_ps(void *dat, u8 *mac, + struct ieee80211_vif *vif) +{ + struct mac80211_hwsim_data *data = dat; + hwsim_send_nullfunc(data, mac, vif, 1); +} + + +static void hwsim_send_nullfunc_no_ps(void *dat, u8 *mac, + struct ieee80211_vif *vif) +{ + struct mac80211_hwsim_data *data = dat; + hwsim_send_nullfunc(data, mac, vif, 0); +} + + +static int hwsim_fops_ps_read(void *dat, u64 *val) +{ + struct mac80211_hwsim_data *data = dat; + *val = data->ps; + return 0; +} + +static int hwsim_fops_ps_write(void *dat, u64 val) +{ + struct mac80211_hwsim_data *data = dat; + enum ps_mode old_ps; + + if (val != PS_DISABLED && val != PS_ENABLED && val != PS_AUTO_POLL && + val != PS_MANUAL_POLL) + return -EINVAL; + + old_ps = data->ps; + data->ps = val; + + if (val == PS_MANUAL_POLL) { + ieee80211_iterate_active_interfaces(data->hw, + hwsim_send_ps_poll, data); + data->ps_poll_pending = true; + } else if (old_ps == PS_DISABLED && val != PS_DISABLED) { + ieee80211_iterate_active_interfaces(data->hw, + hwsim_send_nullfunc_ps, + data); + } else if (old_ps != PS_DISABLED && val == PS_DISABLED) { + ieee80211_iterate_active_interfaces(data->hw, + hwsim_send_nullfunc_no_ps, + data); + } + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_ps, hwsim_fops_ps_read, hwsim_fops_ps_write, + "%llu\n"); + + +static int hwsim_fops_group_read(void *dat, u64 *val) +{ + struct mac80211_hwsim_data *data = dat; + *val = data->group; + return 0; +} + +static int hwsim_fops_group_write(void *dat, u64 val) +{ + struct mac80211_hwsim_data *data = dat; + data->group = val; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_group, + hwsim_fops_group_read, hwsim_fops_group_write, + "%llx\n"); + +static int __init init_mac80211_hwsim(void) +{ + int i, err = 0; + u8 addr[ETH_ALEN]; + struct mac80211_hwsim_data *data; + struct ieee80211_hw *hw; + enum ieee80211_band band; + + if (radios < 1 || radios > 100) + return -EINVAL; + + if (fake_hw_scan) + mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan; + + spin_lock_init(&hwsim_radio_lock); + INIT_LIST_HEAD(&hwsim_radios); + + hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim"); + if (IS_ERR(hwsim_class)) + return PTR_ERR(hwsim_class); + + memset(addr, 0, ETH_ALEN); + addr[0] = 0x02; + + for (i = 0; i < radios; i++) { + printk(KERN_DEBUG "mac80211_hwsim: Initializing radio %d\n", + i); + hw = ieee80211_alloc_hw(sizeof(*data), &mac80211_hwsim_ops); + if (!hw) { + printk(KERN_DEBUG "mac80211_hwsim: ieee80211_alloc_hw " + "failed\n"); + err = -ENOMEM; + goto failed; + } + data = hw->priv; + data->hw = hw; + + data->dev = device_create(hwsim_class, NULL, 0, hw, + "hwsim%d", i); + if (IS_ERR(data->dev)) { + printk(KERN_DEBUG + "mac80211_hwsim: device_create " + "failed (%ld)\n", PTR_ERR(data->dev)); + err = -ENOMEM; + goto failed_drvdata; + } + data->dev->driver = &mac80211_hwsim_driver; + + SET_IEEE80211_DEV(hw, data->dev); + addr[3] = i >> 8; + addr[4] = i; + memcpy(data->addresses[0].addr, addr, ETH_ALEN); + memcpy(data->addresses[1].addr, addr, ETH_ALEN); + data->addresses[1].addr[0] |= 0x40; + hw->wiphy->n_addresses = 2; + hw->wiphy->addresses = data->addresses; + + hw->channel_change_time = 1; + hw->queues = 4; + hw->wiphy->interface_modes = + BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_MESH_POINT); + + hw->flags = IEEE80211_HW_MFP_CAPABLE | + IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_SUPPORTS_STATIC_SMPS | + IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS; + + /* ask mac80211 to reserve space for magic */ + hw->vif_data_size = sizeof(struct hwsim_vif_priv); + hw->sta_data_size = sizeof(struct hwsim_sta_priv); + + memcpy(data->channels_2ghz, hwsim_channels_2ghz, + sizeof(hwsim_channels_2ghz)); + memcpy(data->channels_5ghz, hwsim_channels_5ghz, + sizeof(hwsim_channels_5ghz)); + memcpy(data->rates, hwsim_rates, sizeof(hwsim_rates)); + + for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) { + struct ieee80211_supported_band *sband = &data->bands[band]; + switch (band) { + case IEEE80211_BAND_2GHZ: + sband->channels = data->channels_2ghz; + sband->n_channels = + ARRAY_SIZE(hwsim_channels_2ghz); + sband->bitrates = data->rates; + sband->n_bitrates = ARRAY_SIZE(hwsim_rates); + break; + case IEEE80211_BAND_5GHZ: + sband->channels = data->channels_5ghz; + sband->n_channels = + ARRAY_SIZE(hwsim_channels_5ghz); + sband->bitrates = data->rates + 4; + sband->n_bitrates = ARRAY_SIZE(hwsim_rates) - 4; + break; + default: + break; + } + + sband->ht_cap.ht_supported = true; + sband->ht_cap.cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 | + IEEE80211_HT_CAP_GRN_FLD | + IEEE80211_HT_CAP_SGI_40 | + IEEE80211_HT_CAP_DSSSCCK40; + sband->ht_cap.ampdu_factor = 0x3; + sband->ht_cap.ampdu_density = 0x6; + memset(&sband->ht_cap.mcs, 0, + sizeof(sband->ht_cap.mcs)); + sband->ht_cap.mcs.rx_mask[0] = 0xff; + sband->ht_cap.mcs.rx_mask[1] = 0xff; + sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; + + hw->wiphy->bands[band] = sband; + } + /* By default all radios are belonging to the first group */ + data->group = 1; + + /* Work to be done prior to ieee80211_register_hw() */ + switch (regtest) { + case HWSIM_REGTEST_DISABLED: + case HWSIM_REGTEST_DRIVER_REG_FOLLOW: + case HWSIM_REGTEST_DRIVER_REG_ALL: + case HWSIM_REGTEST_DIFF_COUNTRY: + /* + * Nothing to be done for driver regulatory domain + * hints prior to ieee80211_register_hw() + */ + break; + case HWSIM_REGTEST_WORLD_ROAM: + if (i == 0) { + hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY; + wiphy_apply_custom_regulatory(hw->wiphy, + &hwsim_world_regdom_custom_01); + } + break; + case HWSIM_REGTEST_CUSTOM_WORLD: + hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY; + wiphy_apply_custom_regulatory(hw->wiphy, + &hwsim_world_regdom_custom_01); + break; + case HWSIM_REGTEST_CUSTOM_WORLD_2: + if (i == 0) { + hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY; + wiphy_apply_custom_regulatory(hw->wiphy, + &hwsim_world_regdom_custom_01); + } else if (i == 1) { + hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY; + wiphy_apply_custom_regulatory(hw->wiphy, + &hwsim_world_regdom_custom_02); + } + break; + case HWSIM_REGTEST_STRICT_ALL: + hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY; + break; + case HWSIM_REGTEST_STRICT_FOLLOW: + case HWSIM_REGTEST_STRICT_AND_DRIVER_REG: + if (i == 0) + hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY; + break; + case HWSIM_REGTEST_ALL: + if (i == 0) { + hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY; + wiphy_apply_custom_regulatory(hw->wiphy, + &hwsim_world_regdom_custom_01); + } else if (i == 1) { + hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY; + wiphy_apply_custom_regulatory(hw->wiphy, + &hwsim_world_regdom_custom_02); + } else if (i == 4) + hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY; + break; + default: + break; + } + + /* give the regulatory workqueue a chance to run */ + if (regtest) + schedule_timeout_interruptible(1); + err = ieee80211_register_hw(hw); + if (err < 0) { + printk(KERN_DEBUG "mac80211_hwsim: " + "ieee80211_register_hw failed (%d)\n", err); + goto failed_hw; + } + + /* Work to be done after to ieee80211_register_hw() */ + switch (regtest) { + case HWSIM_REGTEST_WORLD_ROAM: + case HWSIM_REGTEST_DISABLED: + break; + case HWSIM_REGTEST_DRIVER_REG_FOLLOW: + if (!i) + regulatory_hint(hw->wiphy, hwsim_alpha2s[0]); + break; + case HWSIM_REGTEST_DRIVER_REG_ALL: + case HWSIM_REGTEST_STRICT_ALL: + regulatory_hint(hw->wiphy, hwsim_alpha2s[0]); + break; + case HWSIM_REGTEST_DIFF_COUNTRY: + if (i < ARRAY_SIZE(hwsim_alpha2s)) + regulatory_hint(hw->wiphy, hwsim_alpha2s[i]); + break; + case HWSIM_REGTEST_CUSTOM_WORLD: + case HWSIM_REGTEST_CUSTOM_WORLD_2: + /* + * Nothing to be done for custom world regulatory + * domains after to ieee80211_register_hw + */ + break; + case HWSIM_REGTEST_STRICT_FOLLOW: + if (i == 0) + regulatory_hint(hw->wiphy, hwsim_alpha2s[0]); + break; + case HWSIM_REGTEST_STRICT_AND_DRIVER_REG: + if (i == 0) + regulatory_hint(hw->wiphy, hwsim_alpha2s[0]); + else if (i == 1) + regulatory_hint(hw->wiphy, hwsim_alpha2s[1]); + break; + case HWSIM_REGTEST_ALL: + if (i == 2) + regulatory_hint(hw->wiphy, hwsim_alpha2s[0]); + else if (i == 3) + regulatory_hint(hw->wiphy, hwsim_alpha2s[1]); + else if (i == 4) + regulatory_hint(hw->wiphy, hwsim_alpha2s[2]); + break; + default: + break; + } + + printk(KERN_DEBUG "%s: hwaddr %pM registered\n", + wiphy_name(hw->wiphy), + hw->wiphy->perm_addr); + + data->debugfs = debugfs_create_dir("hwsim", + hw->wiphy->debugfsdir); + data->debugfs_ps = debugfs_create_file("ps", 0666, + data->debugfs, data, + &hwsim_fops_ps); + data->debugfs_group = debugfs_create_file("group", 0666, + data->debugfs, data, + &hwsim_fops_group); + + setup_timer(&data->beacon_timer, mac80211_hwsim_beacon, + (unsigned long) hw); + + list_add_tail(&data->list, &hwsim_radios); + } + + hwsim_mon = alloc_netdev(0, "hwsim%d", hwsim_mon_setup); + if (hwsim_mon == NULL) + goto failed; + + rtnl_lock(); + + err = dev_alloc_name(hwsim_mon, hwsim_mon->name); + if (err < 0) + goto failed_mon; + + + err = register_netdevice(hwsim_mon); + if (err < 0) + goto failed_mon; + + rtnl_unlock(); + + return 0; + +failed_mon: + rtnl_unlock(); + free_netdev(hwsim_mon); + mac80211_hwsim_free(); + return err; + +failed_hw: + device_unregister(data->dev); +failed_drvdata: + ieee80211_free_hw(hw); +failed: + mac80211_hwsim_free(); + return err; +} + + +static void __exit exit_mac80211_hwsim(void) +{ + printk(KERN_DEBUG "mac80211_hwsim: unregister radios\n"); + + mac80211_hwsim_free(); + unregister_netdev(hwsim_mon); +} + + +module_init(init_mac80211_hwsim); +module_exit(exit_mac80211_hwsim); diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c new file mode 100644 index 0000000..ac65e13 --- /dev/null +++ b/drivers/net/wireless/mwl8k.c @@ -0,0 +1,4214 @@ +/* + * drivers/net/wireless/mwl8k.c + * Driver for Marvell TOPDOG 802.11 Wireless cards + * + * Copyright (C) 2008, 2009, 2010 Marvell Semiconductor Inc. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MWL8K_DESC "Marvell TOPDOG(R) 802.11 Wireless Network Driver" +#define MWL8K_NAME KBUILD_MODNAME +#define MWL8K_VERSION "0.12" + +/* Register definitions */ +#define MWL8K_HIU_GEN_PTR 0x00000c10 +#define MWL8K_MODE_STA 0x0000005a +#define MWL8K_MODE_AP 0x000000a5 +#define MWL8K_HIU_INT_CODE 0x00000c14 +#define MWL8K_FWSTA_READY 0xf0f1f2f4 +#define MWL8K_FWAP_READY 0xf1f2f4a5 +#define MWL8K_INT_CODE_CMD_FINISHED 0x00000005 +#define MWL8K_HIU_SCRATCH 0x00000c40 + +/* Host->device communications */ +#define MWL8K_HIU_H2A_INTERRUPT_EVENTS 0x00000c18 +#define MWL8K_HIU_H2A_INTERRUPT_STATUS 0x00000c1c +#define MWL8K_HIU_H2A_INTERRUPT_MASK 0x00000c20 +#define MWL8K_HIU_H2A_INTERRUPT_CLEAR_SEL 0x00000c24 +#define MWL8K_HIU_H2A_INTERRUPT_STATUS_MASK 0x00000c28 +#define MWL8K_H2A_INT_DUMMY (1 << 20) +#define MWL8K_H2A_INT_RESET (1 << 15) +#define MWL8K_H2A_INT_DOORBELL (1 << 1) +#define MWL8K_H2A_INT_PPA_READY (1 << 0) + +/* Device->host communications */ +#define MWL8K_HIU_A2H_INTERRUPT_EVENTS 0x00000c2c +#define MWL8K_HIU_A2H_INTERRUPT_STATUS 0x00000c30 +#define MWL8K_HIU_A2H_INTERRUPT_MASK 0x00000c34 +#define MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL 0x00000c38 +#define MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK 0x00000c3c +#define MWL8K_A2H_INT_DUMMY (1 << 20) +#define MWL8K_A2H_INT_CHNL_SWITCHED (1 << 11) +#define MWL8K_A2H_INT_QUEUE_EMPTY (1 << 10) +#define MWL8K_A2H_INT_RADAR_DETECT (1 << 7) +#define MWL8K_A2H_INT_RADIO_ON (1 << 6) +#define MWL8K_A2H_INT_RADIO_OFF (1 << 5) +#define MWL8K_A2H_INT_MAC_EVENT (1 << 3) +#define MWL8K_A2H_INT_OPC_DONE (1 << 2) +#define MWL8K_A2H_INT_RX_READY (1 << 1) +#define MWL8K_A2H_INT_TX_DONE (1 << 0) + +#define MWL8K_A2H_EVENTS (MWL8K_A2H_INT_DUMMY | \ + MWL8K_A2H_INT_CHNL_SWITCHED | \ + MWL8K_A2H_INT_QUEUE_EMPTY | \ + MWL8K_A2H_INT_RADAR_DETECT | \ + MWL8K_A2H_INT_RADIO_ON | \ + MWL8K_A2H_INT_RADIO_OFF | \ + MWL8K_A2H_INT_MAC_EVENT | \ + MWL8K_A2H_INT_OPC_DONE | \ + MWL8K_A2H_INT_RX_READY | \ + MWL8K_A2H_INT_TX_DONE) + +#define MWL8K_RX_QUEUES 1 +#define MWL8K_TX_QUEUES 4 + +struct rxd_ops { + int rxd_size; + void (*rxd_init)(void *rxd, dma_addr_t next_dma_addr); + void (*rxd_refill)(void *rxd, dma_addr_t addr, int len); + int (*rxd_process)(void *rxd, struct ieee80211_rx_status *status, + __le16 *qos); +}; + +struct mwl8k_device_info { + char *part_name; + char *helper_image; + char *fw_image; + struct rxd_ops *ap_rxd_ops; +}; + +struct mwl8k_rx_queue { + int rxd_count; + + /* hw receives here */ + int head; + + /* refill descs here */ + int tail; + + void *rxd; + dma_addr_t rxd_dma; + struct { + struct sk_buff *skb; + DECLARE_PCI_UNMAP_ADDR(dma) + } *buf; +}; + +struct mwl8k_tx_queue { + /* hw transmits here */ + int head; + + /* sw appends here */ + int tail; + + unsigned int len; + struct mwl8k_tx_desc *txd; + dma_addr_t txd_dma; + struct sk_buff **skb; +}; + +struct mwl8k_priv { + struct ieee80211_hw *hw; + struct pci_dev *pdev; + + struct mwl8k_device_info *device_info; + + void __iomem *sram; + void __iomem *regs; + + /* firmware */ + struct firmware *fw_helper; + struct firmware *fw_ucode; + + /* hardware/firmware parameters */ + bool ap_fw; + struct rxd_ops *rxd_ops; + struct ieee80211_supported_band band_24; + struct ieee80211_channel channels_24[14]; + struct ieee80211_rate rates_24[14]; + struct ieee80211_supported_band band_50; + struct ieee80211_channel channels_50[4]; + struct ieee80211_rate rates_50[9]; + u32 ap_macids_supported; + u32 sta_macids_supported; + + /* firmware access */ + struct mutex fw_mutex; + struct task_struct *fw_mutex_owner; + int fw_mutex_depth; + struct completion *hostcmd_wait; + + /* lock held over TX and TX reap */ + spinlock_t tx_lock; + + /* TX quiesce completion, protected by fw_mutex and tx_lock */ + struct completion *tx_wait; + + /* List of interfaces. */ + u32 macids_used; + struct list_head vif_list; + + /* power management status cookie from firmware */ + u32 *cookie; + dma_addr_t cookie_dma; + + u16 num_mcaddrs; + u8 hw_rev; + u32 fw_rev; + + /* + * Running count of TX packets in flight, to avoid + * iterating over the transmit rings each time. + */ + int pending_tx_pkts; + + struct mwl8k_rx_queue rxq[MWL8K_RX_QUEUES]; + struct mwl8k_tx_queue txq[MWL8K_TX_QUEUES]; + + bool radio_on; + bool radio_short_preamble; + bool sniffer_enabled; + bool wmm_enabled; + + /* XXX need to convert this to handle multiple interfaces */ + bool capture_beacon; + u8 capture_bssid[ETH_ALEN]; + struct sk_buff *beacon_skb; + + /* + * This FJ worker has to be global as it is scheduled from the + * RX handler. At this point we don't know which interface it + * belongs to until the list of bssids waiting to complete join + * is checked. + */ + struct work_struct finalize_join_worker; + + /* Tasklet to perform TX reclaim. */ + struct tasklet_struct poll_tx_task; + + /* Tasklet to perform RX. */ + struct tasklet_struct poll_rx_task; +}; + +/* Per interface specific private data */ +struct mwl8k_vif { + struct list_head list; + struct ieee80211_vif *vif; + + /* Firmware macid for this vif. */ + int macid; + + /* Non AMPDU sequence number assigned by driver. */ + u16 seqno; +}; +#define MWL8K_VIF(_vif) ((struct mwl8k_vif *)&((_vif)->drv_priv)) + +struct mwl8k_sta { + /* Index into station database. Returned by UPDATE_STADB. */ + u8 peer_id; +}; +#define MWL8K_STA(_sta) ((struct mwl8k_sta *)&((_sta)->drv_priv)) + +static const struct ieee80211_channel mwl8k_channels_24[] = { + { .center_freq = 2412, .hw_value = 1, }, + { .center_freq = 2417, .hw_value = 2, }, + { .center_freq = 2422, .hw_value = 3, }, + { .center_freq = 2427, .hw_value = 4, }, + { .center_freq = 2432, .hw_value = 5, }, + { .center_freq = 2437, .hw_value = 6, }, + { .center_freq = 2442, .hw_value = 7, }, + { .center_freq = 2447, .hw_value = 8, }, + { .center_freq = 2452, .hw_value = 9, }, + { .center_freq = 2457, .hw_value = 10, }, + { .center_freq = 2462, .hw_value = 11, }, + { .center_freq = 2467, .hw_value = 12, }, + { .center_freq = 2472, .hw_value = 13, }, + { .center_freq = 2484, .hw_value = 14, }, +}; + +static const struct ieee80211_rate mwl8k_rates_24[] = { + { .bitrate = 10, .hw_value = 2, }, + { .bitrate = 20, .hw_value = 4, }, + { .bitrate = 55, .hw_value = 11, }, + { .bitrate = 110, .hw_value = 22, }, + { .bitrate = 220, .hw_value = 44, }, + { .bitrate = 60, .hw_value = 12, }, + { .bitrate = 90, .hw_value = 18, }, + { .bitrate = 120, .hw_value = 24, }, + { .bitrate = 180, .hw_value = 36, }, + { .bitrate = 240, .hw_value = 48, }, + { .bitrate = 360, .hw_value = 72, }, + { .bitrate = 480, .hw_value = 96, }, + { .bitrate = 540, .hw_value = 108, }, + { .bitrate = 720, .hw_value = 144, }, +}; + +static const struct ieee80211_channel mwl8k_channels_50[] = { + { .center_freq = 5180, .hw_value = 36, }, + { .center_freq = 5200, .hw_value = 40, }, + { .center_freq = 5220, .hw_value = 44, }, + { .center_freq = 5240, .hw_value = 48, }, +}; + +static const struct ieee80211_rate mwl8k_rates_50[] = { + { .bitrate = 60, .hw_value = 12, }, + { .bitrate = 90, .hw_value = 18, }, + { .bitrate = 120, .hw_value = 24, }, + { .bitrate = 180, .hw_value = 36, }, + { .bitrate = 240, .hw_value = 48, }, + { .bitrate = 360, .hw_value = 72, }, + { .bitrate = 480, .hw_value = 96, }, + { .bitrate = 540, .hw_value = 108, }, + { .bitrate = 720, .hw_value = 144, }, +}; + +/* Set or get info from Firmware */ +#define MWL8K_CMD_SET 0x0001 +#define MWL8K_CMD_GET 0x0000 + +/* Firmware command codes */ +#define MWL8K_CMD_CODE_DNLD 0x0001 +#define MWL8K_CMD_GET_HW_SPEC 0x0003 +#define MWL8K_CMD_SET_HW_SPEC 0x0004 +#define MWL8K_CMD_MAC_MULTICAST_ADR 0x0010 +#define MWL8K_CMD_GET_STAT 0x0014 +#define MWL8K_CMD_RADIO_CONTROL 0x001c +#define MWL8K_CMD_RF_TX_POWER 0x001e +#define MWL8K_CMD_RF_ANTENNA 0x0020 +#define MWL8K_CMD_SET_BEACON 0x0100 /* per-vif */ +#define MWL8K_CMD_SET_PRE_SCAN 0x0107 +#define MWL8K_CMD_SET_POST_SCAN 0x0108 +#define MWL8K_CMD_SET_RF_CHANNEL 0x010a +#define MWL8K_CMD_SET_AID 0x010d +#define MWL8K_CMD_SET_RATE 0x0110 +#define MWL8K_CMD_SET_FINALIZE_JOIN 0x0111 +#define MWL8K_CMD_RTS_THRESHOLD 0x0113 +#define MWL8K_CMD_SET_SLOT 0x0114 +#define MWL8K_CMD_SET_EDCA_PARAMS 0x0115 +#define MWL8K_CMD_SET_WMM_MODE 0x0123 +#define MWL8K_CMD_MIMO_CONFIG 0x0125 +#define MWL8K_CMD_USE_FIXED_RATE 0x0126 +#define MWL8K_CMD_ENABLE_SNIFFER 0x0150 +#define MWL8K_CMD_SET_MAC_ADDR 0x0202 /* per-vif */ +#define MWL8K_CMD_SET_RATEADAPT_MODE 0x0203 +#define MWL8K_CMD_BSS_START 0x1100 /* per-vif */ +#define MWL8K_CMD_SET_NEW_STN 0x1111 /* per-vif */ +#define MWL8K_CMD_UPDATE_STADB 0x1123 + +static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize) +{ +#define MWL8K_CMDNAME(x) case MWL8K_CMD_##x: do {\ + snprintf(buf, bufsize, "%s", #x);\ + return buf;\ + } while (0) + switch (cmd & ~0x8000) { + MWL8K_CMDNAME(CODE_DNLD); + MWL8K_CMDNAME(GET_HW_SPEC); + MWL8K_CMDNAME(SET_HW_SPEC); + MWL8K_CMDNAME(MAC_MULTICAST_ADR); + MWL8K_CMDNAME(GET_STAT); + MWL8K_CMDNAME(RADIO_CONTROL); + MWL8K_CMDNAME(RF_TX_POWER); + MWL8K_CMDNAME(RF_ANTENNA); + MWL8K_CMDNAME(SET_BEACON); + MWL8K_CMDNAME(SET_PRE_SCAN); + MWL8K_CMDNAME(SET_POST_SCAN); + MWL8K_CMDNAME(SET_RF_CHANNEL); + MWL8K_CMDNAME(SET_AID); + MWL8K_CMDNAME(SET_RATE); + MWL8K_CMDNAME(SET_FINALIZE_JOIN); + MWL8K_CMDNAME(RTS_THRESHOLD); + MWL8K_CMDNAME(SET_SLOT); + MWL8K_CMDNAME(SET_EDCA_PARAMS); + MWL8K_CMDNAME(SET_WMM_MODE); + MWL8K_CMDNAME(MIMO_CONFIG); + MWL8K_CMDNAME(USE_FIXED_RATE); + MWL8K_CMDNAME(ENABLE_SNIFFER); + MWL8K_CMDNAME(SET_MAC_ADDR); + MWL8K_CMDNAME(SET_RATEADAPT_MODE); + MWL8K_CMDNAME(BSS_START); + MWL8K_CMDNAME(SET_NEW_STN); + MWL8K_CMDNAME(UPDATE_STADB); + default: + snprintf(buf, bufsize, "0x%x", cmd); + } +#undef MWL8K_CMDNAME + + return buf; +} + +/* Hardware and firmware reset */ +static void mwl8k_hw_reset(struct mwl8k_priv *priv) +{ + iowrite32(MWL8K_H2A_INT_RESET, + priv->regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS); + iowrite32(MWL8K_H2A_INT_RESET, + priv->regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS); + msleep(20); +} + +/* Release fw image */ +static void mwl8k_release_fw(struct firmware **fw) +{ + if (*fw == NULL) + return; + release_firmware(*fw); + *fw = NULL; +} + +static void mwl8k_release_firmware(struct mwl8k_priv *priv) +{ + mwl8k_release_fw(&priv->fw_ucode); + mwl8k_release_fw(&priv->fw_helper); +} + +/* Request fw image */ +static int mwl8k_request_fw(struct mwl8k_priv *priv, + const char *fname, struct firmware **fw) +{ + /* release current image */ + if (*fw != NULL) + mwl8k_release_fw(fw); + + return request_firmware((const struct firmware **)fw, + fname, &priv->pdev->dev); +} + +static int mwl8k_request_firmware(struct mwl8k_priv *priv) +{ + struct mwl8k_device_info *di = priv->device_info; + int rc; + + if (di->helper_image != NULL) { + rc = mwl8k_request_fw(priv, di->helper_image, &priv->fw_helper); + if (rc) { + printk(KERN_ERR "%s: Error requesting helper " + "firmware file %s\n", pci_name(priv->pdev), + di->helper_image); + return rc; + } + } + + rc = mwl8k_request_fw(priv, di->fw_image, &priv->fw_ucode); + if (rc) { + printk(KERN_ERR "%s: Error requesting firmware file %s\n", + pci_name(priv->pdev), di->fw_image); + mwl8k_release_fw(&priv->fw_helper); + return rc; + } + + return 0; +} + +struct mwl8k_cmd_pkt { + __le16 code; + __le16 length; + __u8 seq_num; + __u8 macid; + __le16 result; + char payload[0]; +} __attribute__((packed)); + +/* + * Firmware loading. + */ +static int +mwl8k_send_fw_load_cmd(struct mwl8k_priv *priv, void *data, int length) +{ + void __iomem *regs = priv->regs; + dma_addr_t dma_addr; + int loops; + + dma_addr = pci_map_single(priv->pdev, data, length, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(priv->pdev, dma_addr)) + return -ENOMEM; + + iowrite32(dma_addr, regs + MWL8K_HIU_GEN_PTR); + iowrite32(0, regs + MWL8K_HIU_INT_CODE); + iowrite32(MWL8K_H2A_INT_DOORBELL, + regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS); + iowrite32(MWL8K_H2A_INT_DUMMY, + regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS); + + loops = 1000; + do { + u32 int_code; + + int_code = ioread32(regs + MWL8K_HIU_INT_CODE); + if (int_code == MWL8K_INT_CODE_CMD_FINISHED) { + iowrite32(0, regs + MWL8K_HIU_INT_CODE); + break; + } + + cond_resched(); + udelay(1); + } while (--loops); + + pci_unmap_single(priv->pdev, dma_addr, length, PCI_DMA_TODEVICE); + + return loops ? 0 : -ETIMEDOUT; +} + +static int mwl8k_load_fw_image(struct mwl8k_priv *priv, + const u8 *data, size_t length) +{ + struct mwl8k_cmd_pkt *cmd; + int done; + int rc = 0; + + cmd = kmalloc(sizeof(*cmd) + 256, GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + + cmd->code = cpu_to_le16(MWL8K_CMD_CODE_DNLD); + cmd->seq_num = 0; + cmd->macid = 0; + cmd->result = 0; + + done = 0; + while (length) { + int block_size = length > 256 ? 256 : length; + + memcpy(cmd->payload, data + done, block_size); + cmd->length = cpu_to_le16(block_size); + + rc = mwl8k_send_fw_load_cmd(priv, cmd, + sizeof(*cmd) + block_size); + if (rc) + break; + + done += block_size; + length -= block_size; + } + + if (!rc) { + cmd->length = 0; + rc = mwl8k_send_fw_load_cmd(priv, cmd, sizeof(*cmd)); + } + + kfree(cmd); + + return rc; +} + +static int mwl8k_feed_fw_image(struct mwl8k_priv *priv, + const u8 *data, size_t length) +{ + unsigned char *buffer; + int may_continue, rc = 0; + u32 done, prev_block_size; + + buffer = kmalloc(1024, GFP_KERNEL); + if (buffer == NULL) + return -ENOMEM; + + done = 0; + prev_block_size = 0; + may_continue = 1000; + while (may_continue > 0) { + u32 block_size; + + block_size = ioread32(priv->regs + MWL8K_HIU_SCRATCH); + if (block_size & 1) { + block_size &= ~1; + may_continue--; + } else { + done += prev_block_size; + length -= prev_block_size; + } + + if (block_size > 1024 || block_size > length) { + rc = -EOVERFLOW; + break; + } + + if (length == 0) { + rc = 0; + break; + } + + if (block_size == 0) { + rc = -EPROTO; + may_continue--; + udelay(1); + continue; + } + + prev_block_size = block_size; + memcpy(buffer, data + done, block_size); + + rc = mwl8k_send_fw_load_cmd(priv, buffer, block_size); + if (rc) + break; + } + + if (!rc && length != 0) + rc = -EREMOTEIO; + + kfree(buffer); + + return rc; +} + +static int mwl8k_load_firmware(struct ieee80211_hw *hw) +{ + struct mwl8k_priv *priv = hw->priv; + struct firmware *fw = priv->fw_ucode; + int rc; + int loops; + + if (!memcmp(fw->data, "\x01\x00\x00\x00", 4)) { + struct firmware *helper = priv->fw_helper; + + if (helper == NULL) { + printk(KERN_ERR "%s: helper image needed but none " + "given\n", pci_name(priv->pdev)); + return -EINVAL; + } + + rc = mwl8k_load_fw_image(priv, helper->data, helper->size); + if (rc) { + printk(KERN_ERR "%s: unable to load firmware " + "helper image\n", pci_name(priv->pdev)); + return rc; + } + msleep(5); + + rc = mwl8k_feed_fw_image(priv, fw->data, fw->size); + } else { + rc = mwl8k_load_fw_image(priv, fw->data, fw->size); + } + + if (rc) { + printk(KERN_ERR "%s: unable to load firmware image\n", + pci_name(priv->pdev)); + return rc; + } + + iowrite32(MWL8K_MODE_STA, priv->regs + MWL8K_HIU_GEN_PTR); + + loops = 500000; + do { + u32 ready_code; + + ready_code = ioread32(priv->regs + MWL8K_HIU_INT_CODE); + if (ready_code == MWL8K_FWAP_READY) { + priv->ap_fw = 1; + break; + } else if (ready_code == MWL8K_FWSTA_READY) { + priv->ap_fw = 0; + break; + } + + cond_resched(); + udelay(1); + } while (--loops); + + return loops ? 0 : -ETIMEDOUT; +} + + +/* DMA header used by firmware and hardware. */ +struct mwl8k_dma_data { + __le16 fwlen; + struct ieee80211_hdr wh; + char data[0]; +} __attribute__((packed)); + +/* Routines to add/remove DMA header from skb. */ +static inline void mwl8k_remove_dma_header(struct sk_buff *skb, __le16 qos) +{ + struct mwl8k_dma_data *tr; + int hdrlen; + + tr = (struct mwl8k_dma_data *)skb->data; + hdrlen = ieee80211_hdrlen(tr->wh.frame_control); + + if (hdrlen != sizeof(tr->wh)) { + if (ieee80211_is_data_qos(tr->wh.frame_control)) { + memmove(tr->data - hdrlen, &tr->wh, hdrlen - 2); + *((__le16 *)(tr->data - 2)) = qos; + } else { + memmove(tr->data - hdrlen, &tr->wh, hdrlen); + } + } + + if (hdrlen != sizeof(*tr)) + skb_pull(skb, sizeof(*tr) - hdrlen); +} + +static inline void mwl8k_add_dma_header(struct sk_buff *skb) +{ + struct ieee80211_hdr *wh; + int hdrlen; + struct mwl8k_dma_data *tr; + + /* + * Add a firmware DMA header; the firmware requires that we + * present a 2-byte payload length followed by a 4-address + * header (without QoS field), followed (optionally) by any + * WEP/ExtIV header (but only filled in for CCMP). + */ + wh = (struct ieee80211_hdr *)skb->data; + + hdrlen = ieee80211_hdrlen(wh->frame_control); + if (hdrlen != sizeof(*tr)) + skb_push(skb, sizeof(*tr) - hdrlen); + + if (ieee80211_is_data_qos(wh->frame_control)) + hdrlen -= 2; + + tr = (struct mwl8k_dma_data *)skb->data; + if (wh != &tr->wh) + memmove(&tr->wh, wh, hdrlen); + if (hdrlen != sizeof(tr->wh)) + memset(((void *)&tr->wh) + hdrlen, 0, sizeof(tr->wh) - hdrlen); + + /* + * Firmware length is the length of the fully formed "802.11 + * payload". That is, everything except for the 802.11 header. + * This includes all crypto material including the MIC. + */ + tr->fwlen = cpu_to_le16(skb->len - sizeof(*tr)); +} + + +/* + * Packet reception for 88w8366 AP firmware. + */ +struct mwl8k_rxd_8366_ap { + __le16 pkt_len; + __u8 sq2; + __u8 rate; + __le32 pkt_phys_addr; + __le32 next_rxd_phys_addr; + __le16 qos_control; + __le16 htsig2; + __le32 hw_rssi_info; + __le32 hw_noise_floor_info; + __u8 noise_floor; + __u8 pad0[3]; + __u8 rssi; + __u8 rx_status; + __u8 channel; + __u8 rx_ctrl; +} __attribute__((packed)); + +#define MWL8K_8366_AP_RATE_INFO_MCS_FORMAT 0x80 +#define MWL8K_8366_AP_RATE_INFO_40MHZ 0x40 +#define MWL8K_8366_AP_RATE_INFO_RATEID(x) ((x) & 0x3f) + +#define MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST 0x80 + +static void mwl8k_rxd_8366_ap_init(void *_rxd, dma_addr_t next_dma_addr) +{ + struct mwl8k_rxd_8366_ap *rxd = _rxd; + + rxd->next_rxd_phys_addr = cpu_to_le32(next_dma_addr); + rxd->rx_ctrl = MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST; +} + +static void mwl8k_rxd_8366_ap_refill(void *_rxd, dma_addr_t addr, int len) +{ + struct mwl8k_rxd_8366_ap *rxd = _rxd; + + rxd->pkt_len = cpu_to_le16(len); + rxd->pkt_phys_addr = cpu_to_le32(addr); + wmb(); + rxd->rx_ctrl = 0; +} + +static int +mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status, + __le16 *qos) +{ + struct mwl8k_rxd_8366_ap *rxd = _rxd; + + if (!(rxd->rx_ctrl & MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST)) + return -1; + rmb(); + + memset(status, 0, sizeof(*status)); + + status->signal = -rxd->rssi; + status->noise = -rxd->noise_floor; + + if (rxd->rate & MWL8K_8366_AP_RATE_INFO_MCS_FORMAT) { + status->flag |= RX_FLAG_HT; + if (rxd->rate & MWL8K_8366_AP_RATE_INFO_40MHZ) + status->flag |= RX_FLAG_40MHZ; + status->rate_idx = MWL8K_8366_AP_RATE_INFO_RATEID(rxd->rate); + } else { + int i; + + for (i = 0; i < ARRAY_SIZE(mwl8k_rates_24); i++) { + if (mwl8k_rates_24[i].hw_value == rxd->rate) { + status->rate_idx = i; + break; + } + } + } + + if (rxd->channel > 14) { + status->band = IEEE80211_BAND_5GHZ; + if (!(status->flag & RX_FLAG_HT)) + status->rate_idx -= 5; + } else { + status->band = IEEE80211_BAND_2GHZ; + } + status->freq = ieee80211_channel_to_frequency(rxd->channel); + + *qos = rxd->qos_control; + + return le16_to_cpu(rxd->pkt_len); +} + +static struct rxd_ops rxd_8366_ap_ops = { + .rxd_size = sizeof(struct mwl8k_rxd_8366_ap), + .rxd_init = mwl8k_rxd_8366_ap_init, + .rxd_refill = mwl8k_rxd_8366_ap_refill, + .rxd_process = mwl8k_rxd_8366_ap_process, +}; + +/* + * Packet reception for STA firmware. + */ +struct mwl8k_rxd_sta { + __le16 pkt_len; + __u8 link_quality; + __u8 noise_level; + __le32 pkt_phys_addr; + __le32 next_rxd_phys_addr; + __le16 qos_control; + __le16 rate_info; + __le32 pad0[4]; + __u8 rssi; + __u8 channel; + __le16 pad1; + __u8 rx_ctrl; + __u8 rx_status; + __u8 pad2[2]; +} __attribute__((packed)); + +#define MWL8K_STA_RATE_INFO_SHORTPRE 0x8000 +#define MWL8K_STA_RATE_INFO_ANTSELECT(x) (((x) >> 11) & 0x3) +#define MWL8K_STA_RATE_INFO_RATEID(x) (((x) >> 3) & 0x3f) +#define MWL8K_STA_RATE_INFO_40MHZ 0x0004 +#define MWL8K_STA_RATE_INFO_SHORTGI 0x0002 +#define MWL8K_STA_RATE_INFO_MCS_FORMAT 0x0001 + +#define MWL8K_STA_RX_CTRL_OWNED_BY_HOST 0x02 + +static void mwl8k_rxd_sta_init(void *_rxd, dma_addr_t next_dma_addr) +{ + struct mwl8k_rxd_sta *rxd = _rxd; + + rxd->next_rxd_phys_addr = cpu_to_le32(next_dma_addr); + rxd->rx_ctrl = MWL8K_STA_RX_CTRL_OWNED_BY_HOST; +} + +static void mwl8k_rxd_sta_refill(void *_rxd, dma_addr_t addr, int len) +{ + struct mwl8k_rxd_sta *rxd = _rxd; + + rxd->pkt_len = cpu_to_le16(len); + rxd->pkt_phys_addr = cpu_to_le32(addr); + wmb(); + rxd->rx_ctrl = 0; +} + +static int +mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status, + __le16 *qos) +{ + struct mwl8k_rxd_sta *rxd = _rxd; + u16 rate_info; + + if (!(rxd->rx_ctrl & MWL8K_STA_RX_CTRL_OWNED_BY_HOST)) + return -1; + rmb(); + + rate_info = le16_to_cpu(rxd->rate_info); + + memset(status, 0, sizeof(*status)); + + status->signal = -rxd->rssi; + status->noise = -rxd->noise_level; + status->antenna = MWL8K_STA_RATE_INFO_ANTSELECT(rate_info); + status->rate_idx = MWL8K_STA_RATE_INFO_RATEID(rate_info); + + if (rate_info & MWL8K_STA_RATE_INFO_SHORTPRE) + status->flag |= RX_FLAG_SHORTPRE; + if (rate_info & MWL8K_STA_RATE_INFO_40MHZ) + status->flag |= RX_FLAG_40MHZ; + if (rate_info & MWL8K_STA_RATE_INFO_SHORTGI) + status->flag |= RX_FLAG_SHORT_GI; + if (rate_info & MWL8K_STA_RATE_INFO_MCS_FORMAT) + status->flag |= RX_FLAG_HT; + + if (rxd->channel > 14) { + status->band = IEEE80211_BAND_5GHZ; + if (!(status->flag & RX_FLAG_HT)) + status->rate_idx -= 5; + } else { + status->band = IEEE80211_BAND_2GHZ; + } + status->freq = ieee80211_channel_to_frequency(rxd->channel); + + *qos = rxd->qos_control; + + return le16_to_cpu(rxd->pkt_len); +} + +static struct rxd_ops rxd_sta_ops = { + .rxd_size = sizeof(struct mwl8k_rxd_sta), + .rxd_init = mwl8k_rxd_sta_init, + .rxd_refill = mwl8k_rxd_sta_refill, + .rxd_process = mwl8k_rxd_sta_process, +}; + + +#define MWL8K_RX_DESCS 256 +#define MWL8K_RX_MAXSZ 3800 + +static int mwl8k_rxq_init(struct ieee80211_hw *hw, int index) +{ + struct mwl8k_priv *priv = hw->priv; + struct mwl8k_rx_queue *rxq = priv->rxq + index; + int size; + int i; + + rxq->rxd_count = 0; + rxq->head = 0; + rxq->tail = 0; + + size = MWL8K_RX_DESCS * priv->rxd_ops->rxd_size; + + rxq->rxd = pci_alloc_consistent(priv->pdev, size, &rxq->rxd_dma); + if (rxq->rxd == NULL) { + printk(KERN_ERR "%s: failed to alloc RX descriptors\n", + wiphy_name(hw->wiphy)); + return -ENOMEM; + } + memset(rxq->rxd, 0, size); + + rxq->buf = kmalloc(MWL8K_RX_DESCS * sizeof(*rxq->buf), GFP_KERNEL); + if (rxq->buf == NULL) { + printk(KERN_ERR "%s: failed to alloc RX skbuff list\n", + wiphy_name(hw->wiphy)); + pci_free_consistent(priv->pdev, size, rxq->rxd, rxq->rxd_dma); + return -ENOMEM; + } + memset(rxq->buf, 0, MWL8K_RX_DESCS * sizeof(*rxq->buf)); + + for (i = 0; i < MWL8K_RX_DESCS; i++) { + int desc_size; + void *rxd; + int nexti; + dma_addr_t next_dma_addr; + + desc_size = priv->rxd_ops->rxd_size; + rxd = rxq->rxd + (i * priv->rxd_ops->rxd_size); + + nexti = i + 1; + if (nexti == MWL8K_RX_DESCS) + nexti = 0; + next_dma_addr = rxq->rxd_dma + (nexti * desc_size); + + priv->rxd_ops->rxd_init(rxd, next_dma_addr); + } + + return 0; +} + +static int rxq_refill(struct ieee80211_hw *hw, int index, int limit) +{ + struct mwl8k_priv *priv = hw->priv; + struct mwl8k_rx_queue *rxq = priv->rxq + index; + int refilled; + + refilled = 0; + while (rxq->rxd_count < MWL8K_RX_DESCS && limit--) { + struct sk_buff *skb; + dma_addr_t addr; + int rx; + void *rxd; + + skb = dev_alloc_skb(MWL8K_RX_MAXSZ); + if (skb == NULL) + break; + + addr = pci_map_single(priv->pdev, skb->data, + MWL8K_RX_MAXSZ, DMA_FROM_DEVICE); + + rxq->rxd_count++; + rx = rxq->tail++; + if (rxq->tail == MWL8K_RX_DESCS) + rxq->tail = 0; + rxq->buf[rx].skb = skb; + pci_unmap_addr_set(&rxq->buf[rx], dma, addr); + + rxd = rxq->rxd + (rx * priv->rxd_ops->rxd_size); + priv->rxd_ops->rxd_refill(rxd, addr, MWL8K_RX_MAXSZ); + + refilled++; + } + + return refilled; +} + +/* Must be called only when the card's reception is completely halted */ +static void mwl8k_rxq_deinit(struct ieee80211_hw *hw, int index) +{ + struct mwl8k_priv *priv = hw->priv; + struct mwl8k_rx_queue *rxq = priv->rxq + index; + int i; + + for (i = 0; i < MWL8K_RX_DESCS; i++) { + if (rxq->buf[i].skb != NULL) { + pci_unmap_single(priv->pdev, + pci_unmap_addr(&rxq->buf[i], dma), + MWL8K_RX_MAXSZ, PCI_DMA_FROMDEVICE); + pci_unmap_addr_set(&rxq->buf[i], dma, 0); + + kfree_skb(rxq->buf[i].skb); + rxq->buf[i].skb = NULL; + } + } + + kfree(rxq->buf); + rxq->buf = NULL; + + pci_free_consistent(priv->pdev, + MWL8K_RX_DESCS * priv->rxd_ops->rxd_size, + rxq->rxd, rxq->rxd_dma); + rxq->rxd = NULL; +} + + +/* + * Scan a list of BSSIDs to process for finalize join. + * Allows for extension to process multiple BSSIDs. + */ +static inline int +mwl8k_capture_bssid(struct mwl8k_priv *priv, struct ieee80211_hdr *wh) +{ + return priv->capture_beacon && + ieee80211_is_beacon(wh->frame_control) && + !compare_ether_addr(wh->addr3, priv->capture_bssid); +} + +static inline void mwl8k_save_beacon(struct ieee80211_hw *hw, + struct sk_buff *skb) +{ + struct mwl8k_priv *priv = hw->priv; + + priv->capture_beacon = false; + memset(priv->capture_bssid, 0, ETH_ALEN); + + /* + * Use GFP_ATOMIC as rxq_process is called from + * the primary interrupt handler, memory allocation call + * must not sleep. + */ + priv->beacon_skb = skb_copy(skb, GFP_ATOMIC); + if (priv->beacon_skb != NULL) + ieee80211_queue_work(hw, &priv->finalize_join_worker); +} + +static int rxq_process(struct ieee80211_hw *hw, int index, int limit) +{ + struct mwl8k_priv *priv = hw->priv; + struct mwl8k_rx_queue *rxq = priv->rxq + index; + int processed; + + processed = 0; + while (rxq->rxd_count && limit--) { + struct sk_buff *skb; + void *rxd; + int pkt_len; + struct ieee80211_rx_status status; + __le16 qos; + + skb = rxq->buf[rxq->head].skb; + if (skb == NULL) + break; + + rxd = rxq->rxd + (rxq->head * priv->rxd_ops->rxd_size); + + pkt_len = priv->rxd_ops->rxd_process(rxd, &status, &qos); + if (pkt_len < 0) + break; + + rxq->buf[rxq->head].skb = NULL; + + pci_unmap_single(priv->pdev, + pci_unmap_addr(&rxq->buf[rxq->head], dma), + MWL8K_RX_MAXSZ, PCI_DMA_FROMDEVICE); + pci_unmap_addr_set(&rxq->buf[rxq->head], dma, 0); + + rxq->head++; + if (rxq->head == MWL8K_RX_DESCS) + rxq->head = 0; + + rxq->rxd_count--; + + skb_put(skb, pkt_len); + mwl8k_remove_dma_header(skb, qos); + + /* + * Check for a pending join operation. Save a + * copy of the beacon and schedule a tasklet to + * send a FINALIZE_JOIN command to the firmware. + */ + if (mwl8k_capture_bssid(priv, (void *)skb->data)) + mwl8k_save_beacon(hw, skb); + + memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); + ieee80211_rx_irqsafe(hw, skb); + + processed++; + } + + return processed; +} + + +/* + * Packet transmission. + */ + +#define MWL8K_TXD_STATUS_OK 0x00000001 +#define MWL8K_TXD_STATUS_OK_RETRY 0x00000002 +#define MWL8K_TXD_STATUS_OK_MORE_RETRY 0x00000004 +#define MWL8K_TXD_STATUS_MULTICAST_TX 0x00000008 +#define MWL8K_TXD_STATUS_FW_OWNED 0x80000000 + +#define MWL8K_QOS_QLEN_UNSPEC 0xff00 +#define MWL8K_QOS_ACK_POLICY_MASK 0x0060 +#define MWL8K_QOS_ACK_POLICY_NORMAL 0x0000 +#define MWL8K_QOS_ACK_POLICY_BLOCKACK 0x0060 +#define MWL8K_QOS_EOSP 0x0010 + +struct mwl8k_tx_desc { + __le32 status; + __u8 data_rate; + __u8 tx_priority; + __le16 qos_control; + __le32 pkt_phys_addr; + __le16 pkt_len; + __u8 dest_MAC_addr[ETH_ALEN]; + __le32 next_txd_phys_addr; + __le32 reserved; + __le16 rate_info; + __u8 peer_id; + __u8 tx_frag_cnt; +} __attribute__((packed)); + +#define MWL8K_TX_DESCS 128 + +static int mwl8k_txq_init(struct ieee80211_hw *hw, int index) +{ + struct mwl8k_priv *priv = hw->priv; + struct mwl8k_tx_queue *txq = priv->txq + index; + int size; + int i; + + txq->len = 0; + txq->head = 0; + txq->tail = 0; + + size = MWL8K_TX_DESCS * sizeof(struct mwl8k_tx_desc); + + txq->txd = pci_alloc_consistent(priv->pdev, size, &txq->txd_dma); + if (txq->txd == NULL) { + printk(KERN_ERR "%s: failed to alloc TX descriptors\n", + wiphy_name(hw->wiphy)); + return -ENOMEM; + } + memset(txq->txd, 0, size); + + txq->skb = kmalloc(MWL8K_TX_DESCS * sizeof(*txq->skb), GFP_KERNEL); + if (txq->skb == NULL) { + printk(KERN_ERR "%s: failed to alloc TX skbuff list\n", + wiphy_name(hw->wiphy)); + pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma); + return -ENOMEM; + } + memset(txq->skb, 0, MWL8K_TX_DESCS * sizeof(*txq->skb)); + + for (i = 0; i < MWL8K_TX_DESCS; i++) { + struct mwl8k_tx_desc *tx_desc; + int nexti; + + tx_desc = txq->txd + i; + nexti = (i + 1) % MWL8K_TX_DESCS; + + tx_desc->status = 0; + tx_desc->next_txd_phys_addr = + cpu_to_le32(txq->txd_dma + nexti * sizeof(*tx_desc)); + } + + return 0; +} + +static inline void mwl8k_tx_start(struct mwl8k_priv *priv) +{ + iowrite32(MWL8K_H2A_INT_PPA_READY, + priv->regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS); + iowrite32(MWL8K_H2A_INT_DUMMY, + priv->regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS); + ioread32(priv->regs + MWL8K_HIU_INT_CODE); +} + +static void mwl8k_dump_tx_rings(struct ieee80211_hw *hw) +{ + struct mwl8k_priv *priv = hw->priv; + int i; + + for (i = 0; i < MWL8K_TX_QUEUES; i++) { + struct mwl8k_tx_queue *txq = priv->txq + i; + int fw_owned = 0; + int drv_owned = 0; + int unused = 0; + int desc; + + for (desc = 0; desc < MWL8K_TX_DESCS; desc++) { + struct mwl8k_tx_desc *tx_desc = txq->txd + desc; + u32 status; + + status = le32_to_cpu(tx_desc->status); + if (status & MWL8K_TXD_STATUS_FW_OWNED) + fw_owned++; + else + drv_owned++; + + if (tx_desc->pkt_len == 0) + unused++; + } + + printk(KERN_ERR "%s: txq[%d] len=%d head=%d tail=%d " + "fw_owned=%d drv_owned=%d unused=%d\n", + wiphy_name(hw->wiphy), i, + txq->len, txq->head, txq->tail, + fw_owned, drv_owned, unused); + } +} + +/* + * Must be called with priv->fw_mutex held and tx queues stopped. + */ +#define MWL8K_TX_WAIT_TIMEOUT_MS 5000 + +static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw) +{ + struct mwl8k_priv *priv = hw->priv; + DECLARE_COMPLETION_ONSTACK(tx_wait); + int retry; + int rc; + + might_sleep(); + + /* + * The TX queues are stopped at this point, so this test + * doesn't need to take ->tx_lock. + */ + if (!priv->pending_tx_pkts) + return 0; + + retry = 0; + rc = 0; + + spin_lock_bh(&priv->tx_lock); + priv->tx_wait = &tx_wait; + while (!rc) { + int oldcount; + unsigned long timeout; + + oldcount = priv->pending_tx_pkts; + + spin_unlock_bh(&priv->tx_lock); + timeout = wait_for_completion_timeout(&tx_wait, + msecs_to_jiffies(MWL8K_TX_WAIT_TIMEOUT_MS)); + spin_lock_bh(&priv->tx_lock); + + if (timeout) { + WARN_ON(priv->pending_tx_pkts); + if (retry) { + printk(KERN_NOTICE "%s: tx rings drained\n", + wiphy_name(hw->wiphy)); + } + break; + } + + if (priv->pending_tx_pkts < oldcount) { + printk(KERN_NOTICE "%s: waiting for tx rings " + "to drain (%d -> %d pkts)\n", + wiphy_name(hw->wiphy), oldcount, + priv->pending_tx_pkts); + retry = 1; + continue; + } + + priv->tx_wait = NULL; + + printk(KERN_ERR "%s: tx rings stuck for %d ms\n", + wiphy_name(hw->wiphy), MWL8K_TX_WAIT_TIMEOUT_MS); + mwl8k_dump_tx_rings(hw); + + rc = -ETIMEDOUT; + } + spin_unlock_bh(&priv->tx_lock); + + return rc; +} + +#define MWL8K_TXD_SUCCESS(status) \ + ((status) & (MWL8K_TXD_STATUS_OK | \ + MWL8K_TXD_STATUS_OK_RETRY | \ + MWL8K_TXD_STATUS_OK_MORE_RETRY)) + +static int +mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int limit, int force) +{ + struct mwl8k_priv *priv = hw->priv; + struct mwl8k_tx_queue *txq = priv->txq + index; + int processed; + + processed = 0; + while (txq->len > 0 && limit--) { + int tx; + struct mwl8k_tx_desc *tx_desc; + unsigned long addr; + int size; + struct sk_buff *skb; + struct ieee80211_tx_info *info; + u32 status; + + tx = txq->head; + tx_desc = txq->txd + tx; + + status = le32_to_cpu(tx_desc->status); + + if (status & MWL8K_TXD_STATUS_FW_OWNED) { + if (!force) + break; + tx_desc->status &= + ~cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED); + } + + txq->head = (tx + 1) % MWL8K_TX_DESCS; + BUG_ON(txq->len == 0); + txq->len--; + priv->pending_tx_pkts--; + + addr = le32_to_cpu(tx_desc->pkt_phys_addr); + size = le16_to_cpu(tx_desc->pkt_len); + skb = txq->skb[tx]; + txq->skb[tx] = NULL; + + BUG_ON(skb == NULL); + pci_unmap_single(priv->pdev, addr, size, PCI_DMA_TODEVICE); + + mwl8k_remove_dma_header(skb, tx_desc->qos_control); + + /* Mark descriptor as unused */ + tx_desc->pkt_phys_addr = 0; + tx_desc->pkt_len = 0; + + info = IEEE80211_SKB_CB(skb); + ieee80211_tx_info_clear_status(info); + if (MWL8K_TXD_SUCCESS(status)) + info->flags |= IEEE80211_TX_STAT_ACK; + + ieee80211_tx_status_irqsafe(hw, skb); + + processed++; + } + + if (processed && priv->radio_on && !mutex_is_locked(&priv->fw_mutex)) + ieee80211_wake_queue(hw, index); + + return processed; +} + +/* must be called only when the card's transmit is completely halted */ +static void mwl8k_txq_deinit(struct ieee80211_hw *hw, int index) +{ + struct mwl8k_priv *priv = hw->priv; + struct mwl8k_tx_queue *txq = priv->txq + index; + + mwl8k_txq_reclaim(hw, index, INT_MAX, 1); + + kfree(txq->skb); + txq->skb = NULL; + + pci_free_consistent(priv->pdev, + MWL8K_TX_DESCS * sizeof(struct mwl8k_tx_desc), + txq->txd, txq->txd_dma); + txq->txd = NULL; +} + +static int +mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb) +{ + struct mwl8k_priv *priv = hw->priv; + struct ieee80211_tx_info *tx_info; + struct mwl8k_vif *mwl8k_vif; + struct ieee80211_hdr *wh; + struct mwl8k_tx_queue *txq; + struct mwl8k_tx_desc *tx; + dma_addr_t dma; + u32 txstatus; + u8 txdatarate; + u16 qos; + + wh = (struct ieee80211_hdr *)skb->data; + if (ieee80211_is_data_qos(wh->frame_control)) + qos = le16_to_cpu(*((__le16 *)ieee80211_get_qos_ctl(wh))); + else + qos = 0; + + mwl8k_add_dma_header(skb); + wh = &((struct mwl8k_dma_data *)skb->data)->wh; + + tx_info = IEEE80211_SKB_CB(skb); + mwl8k_vif = MWL8K_VIF(tx_info->control.vif); + + if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { + wh->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); + wh->seq_ctrl |= cpu_to_le16(mwl8k_vif->seqno); + mwl8k_vif->seqno += 0x10; + } + + /* Setup firmware control bit fields for each frame type. */ + txstatus = 0; + txdatarate = 0; + if (ieee80211_is_mgmt(wh->frame_control) || + ieee80211_is_ctl(wh->frame_control)) { + txdatarate = 0; + qos |= MWL8K_QOS_QLEN_UNSPEC | MWL8K_QOS_EOSP; + } else if (ieee80211_is_data(wh->frame_control)) { + txdatarate = 1; + if (is_multicast_ether_addr(wh->addr1)) + txstatus |= MWL8K_TXD_STATUS_MULTICAST_TX; + + qos &= ~MWL8K_QOS_ACK_POLICY_MASK; + if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) + qos |= MWL8K_QOS_ACK_POLICY_BLOCKACK; + else + qos |= MWL8K_QOS_ACK_POLICY_NORMAL; + } + + dma = pci_map_single(priv->pdev, skb->data, + skb->len, PCI_DMA_TODEVICE); + + if (pci_dma_mapping_error(priv->pdev, dma)) { + printk(KERN_DEBUG "%s: failed to dma map skb, " + "dropping TX frame.\n", wiphy_name(hw->wiphy)); + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + spin_lock_bh(&priv->tx_lock); + + txq = priv->txq + index; + + BUG_ON(txq->skb[txq->tail] != NULL); + txq->skb[txq->tail] = skb; + + tx = txq->txd + txq->tail; + tx->data_rate = txdatarate; + tx->tx_priority = index; + tx->qos_control = cpu_to_le16(qos); + tx->pkt_phys_addr = cpu_to_le32(dma); + tx->pkt_len = cpu_to_le16(skb->len); + tx->rate_info = 0; + if (!priv->ap_fw && tx_info->control.sta != NULL) + tx->peer_id = MWL8K_STA(tx_info->control.sta)->peer_id; + else + tx->peer_id = 0; + wmb(); + tx->status = cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED | txstatus); + + txq->len++; + priv->pending_tx_pkts++; + + txq->tail++; + if (txq->tail == MWL8K_TX_DESCS) + txq->tail = 0; + + if (txq->head == txq->tail) + ieee80211_stop_queue(hw, index); + + mwl8k_tx_start(priv); + + spin_unlock_bh(&priv->tx_lock); + + return NETDEV_TX_OK; +} + + +/* + * Firmware access. + * + * We have the following requirements for issuing firmware commands: + * - Some commands require that the packet transmit path is idle when + * the command is issued. (For simplicity, we'll just quiesce the + * transmit path for every command.) + * - There are certain sequences of commands that need to be issued to + * the hardware sequentially, with no other intervening commands. + * + * This leads to an implementation of a "firmware lock" as a mutex that + * can be taken recursively, and which is taken by both the low-level + * command submission function (mwl8k_post_cmd) as well as any users of + * that function that require issuing of an atomic sequence of commands, + * and quiesces the transmit path whenever it's taken. + */ +static int mwl8k_fw_lock(struct ieee80211_hw *hw) +{ + struct mwl8k_priv *priv = hw->priv; + + if (priv->fw_mutex_owner != current) { + int rc; + + mutex_lock(&priv->fw_mutex); + ieee80211_stop_queues(hw); + + rc = mwl8k_tx_wait_empty(hw); + if (rc) { + ieee80211_wake_queues(hw); + mutex_unlock(&priv->fw_mutex); + + return rc; + } + + priv->fw_mutex_owner = current; + } + + priv->fw_mutex_depth++; + + return 0; +} + +static void mwl8k_fw_unlock(struct ieee80211_hw *hw) +{ + struct mwl8k_priv *priv = hw->priv; + + if (!--priv->fw_mutex_depth) { + ieee80211_wake_queues(hw); + priv->fw_mutex_owner = NULL; + mutex_unlock(&priv->fw_mutex); + } +} + + +/* + * Command processing. + */ + +/* Timeout firmware commands after 10s */ +#define MWL8K_CMD_TIMEOUT_MS 10000 + +static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd) +{ + DECLARE_COMPLETION_ONSTACK(cmd_wait); + struct mwl8k_priv *priv = hw->priv; + void __iomem *regs = priv->regs; + dma_addr_t dma_addr; + unsigned int dma_size; + int rc; + unsigned long timeout = 0; + u8 buf[32]; + + cmd->result = 0xffff; + dma_size = le16_to_cpu(cmd->length); + dma_addr = pci_map_single(priv->pdev, cmd, dma_size, + PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(priv->pdev, dma_addr)) + return -ENOMEM; + + rc = mwl8k_fw_lock(hw); + if (rc) { + pci_unmap_single(priv->pdev, dma_addr, dma_size, + PCI_DMA_BIDIRECTIONAL); + return rc; + } + + priv->hostcmd_wait = &cmd_wait; + iowrite32(dma_addr, regs + MWL8K_HIU_GEN_PTR); + iowrite32(MWL8K_H2A_INT_DOORBELL, + regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS); + iowrite32(MWL8K_H2A_INT_DUMMY, + regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS); + + timeout = wait_for_completion_timeout(&cmd_wait, + msecs_to_jiffies(MWL8K_CMD_TIMEOUT_MS)); + + priv->hostcmd_wait = NULL; + + mwl8k_fw_unlock(hw); + + pci_unmap_single(priv->pdev, dma_addr, dma_size, + PCI_DMA_BIDIRECTIONAL); + + if (!timeout) { + printk(KERN_ERR "%s: Command %s timeout after %u ms\n", + wiphy_name(hw->wiphy), + mwl8k_cmd_name(cmd->code, buf, sizeof(buf)), + MWL8K_CMD_TIMEOUT_MS); + rc = -ETIMEDOUT; + } else { + int ms; + + ms = MWL8K_CMD_TIMEOUT_MS - jiffies_to_msecs(timeout); + + rc = cmd->result ? -EINVAL : 0; + if (rc) + printk(KERN_ERR "%s: Command %s error 0x%x\n", + wiphy_name(hw->wiphy), + mwl8k_cmd_name(cmd->code, buf, sizeof(buf)), + le16_to_cpu(cmd->result)); + else if (ms > 2000) + printk(KERN_NOTICE "%s: Command %s took %d ms\n", + wiphy_name(hw->wiphy), + mwl8k_cmd_name(cmd->code, buf, sizeof(buf)), + ms); + } + + return rc; +} + +static int mwl8k_post_pervif_cmd(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct mwl8k_cmd_pkt *cmd) +{ + if (vif != NULL) + cmd->macid = MWL8K_VIF(vif)->macid; + return mwl8k_post_cmd(hw, cmd); +} + +/* + * Setup code shared between STA and AP firmware images. + */ +static void mwl8k_setup_2ghz_band(struct ieee80211_hw *hw) +{ + struct mwl8k_priv *priv = hw->priv; + + BUILD_BUG_ON(sizeof(priv->channels_24) != sizeof(mwl8k_channels_24)); + memcpy(priv->channels_24, mwl8k_channels_24, sizeof(mwl8k_channels_24)); + + BUILD_BUG_ON(sizeof(priv->rates_24) != sizeof(mwl8k_rates_24)); + memcpy(priv->rates_24, mwl8k_rates_24, sizeof(mwl8k_rates_24)); + + priv->band_24.band = IEEE80211_BAND_2GHZ; + priv->band_24.channels = priv->channels_24; + priv->band_24.n_channels = ARRAY_SIZE(mwl8k_channels_24); + priv->band_24.bitrates = priv->rates_24; + priv->band_24.n_bitrates = ARRAY_SIZE(mwl8k_rates_24); + + hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band_24; +} + +static void mwl8k_setup_5ghz_band(struct ieee80211_hw *hw) +{ + struct mwl8k_priv *priv = hw->priv; + + BUILD_BUG_ON(sizeof(priv->channels_50) != sizeof(mwl8k_channels_50)); + memcpy(priv->channels_50, mwl8k_channels_50, sizeof(mwl8k_channels_50)); + + BUILD_BUG_ON(sizeof(priv->rates_50) != sizeof(mwl8k_rates_50)); + memcpy(priv->rates_50, mwl8k_rates_50, sizeof(mwl8k_rates_50)); + + priv->band_50.band = IEEE80211_BAND_5GHZ; + priv->band_50.channels = priv->channels_50; + priv->band_50.n_channels = ARRAY_SIZE(mwl8k_channels_50); + priv->band_50.bitrates = priv->rates_50; + priv->band_50.n_bitrates = ARRAY_SIZE(mwl8k_rates_50); + + hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &priv->band_50; +} + +/* + * CMD_GET_HW_SPEC (STA version). + */ +struct mwl8k_cmd_get_hw_spec_sta { + struct mwl8k_cmd_pkt header; + __u8 hw_rev; + __u8 host_interface; + __le16 num_mcaddrs; + __u8 perm_addr[ETH_ALEN]; + __le16 region_code; + __le32 fw_rev; + __le32 ps_cookie; + __le32 caps; + __u8 mcs_bitmap[16]; + __le32 rx_queue_ptr; + __le32 num_tx_queues; + __le32 tx_queue_ptrs[MWL8K_TX_QUEUES]; + __le32 caps2; + __le32 num_tx_desc_per_queue; + __le32 total_rxd; +} __attribute__((packed)); + +#define MWL8K_CAP_MAX_AMSDU 0x20000000 +#define MWL8K_CAP_GREENFIELD 0x08000000 +#define MWL8K_CAP_AMPDU 0x04000000 +#define MWL8K_CAP_RX_STBC 0x01000000 +#define MWL8K_CAP_TX_STBC 0x00800000 +#define MWL8K_CAP_SHORTGI_40MHZ 0x00400000 +#define MWL8K_CAP_SHORTGI_20MHZ 0x00200000 +#define MWL8K_CAP_RX_ANTENNA_MASK 0x000e0000 +#define MWL8K_CAP_TX_ANTENNA_MASK 0x0001c000 +#define MWL8K_CAP_DELAY_BA 0x00003000 +#define MWL8K_CAP_MIMO 0x00000200 +#define MWL8K_CAP_40MHZ 0x00000100 +#define MWL8K_CAP_BAND_MASK 0x00000007 +#define MWL8K_CAP_5GHZ 0x00000004 +#define MWL8K_CAP_2GHZ4 0x00000001 + +static void +mwl8k_set_ht_caps(struct ieee80211_hw *hw, + struct ieee80211_supported_band *band, u32 cap) +{ + int rx_streams; + int tx_streams; + + band->ht_cap.ht_supported = 1; + + if (cap & MWL8K_CAP_MAX_AMSDU) + band->ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU; + if (cap & MWL8K_CAP_GREENFIELD) + band->ht_cap.cap |= IEEE80211_HT_CAP_GRN_FLD; + if (cap & MWL8K_CAP_AMPDU) { + hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; + band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; + band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE; + } + if (cap & MWL8K_CAP_RX_STBC) + band->ht_cap.cap |= IEEE80211_HT_CAP_RX_STBC; + if (cap & MWL8K_CAP_TX_STBC) + band->ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC; + if (cap & MWL8K_CAP_SHORTGI_40MHZ) + band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40; + if (cap & MWL8K_CAP_SHORTGI_20MHZ) + band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20; + if (cap & MWL8K_CAP_DELAY_BA) + band->ht_cap.cap |= IEEE80211_HT_CAP_DELAY_BA; + if (cap & MWL8K_CAP_40MHZ) + band->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; + + rx_streams = hweight32(cap & MWL8K_CAP_RX_ANTENNA_MASK); + tx_streams = hweight32(cap & MWL8K_CAP_TX_ANTENNA_MASK); + + band->ht_cap.mcs.rx_mask[0] = 0xff; + if (rx_streams >= 2) + band->ht_cap.mcs.rx_mask[1] = 0xff; + if (rx_streams >= 3) + band->ht_cap.mcs.rx_mask[2] = 0xff; + band->ht_cap.mcs.rx_mask[4] = 0x01; + band->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; + + if (rx_streams != tx_streams) { + band->ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; + band->ht_cap.mcs.tx_params |= (tx_streams - 1) << + IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT; + } +} + +static void +mwl8k_set_caps(struct ieee80211_hw *hw, u32 caps) +{ + struct mwl8k_priv *priv = hw->priv; + + if ((caps & MWL8K_CAP_2GHZ4) || !(caps & MWL8K_CAP_BAND_MASK)) { + mwl8k_setup_2ghz_band(hw); + if (caps & MWL8K_CAP_MIMO) + mwl8k_set_ht_caps(hw, &priv->band_24, caps); + } + + if (caps & MWL8K_CAP_5GHZ) { + mwl8k_setup_5ghz_band(hw); + if (caps & MWL8K_CAP_MIMO) + mwl8k_set_ht_caps(hw, &priv->band_50, caps); + } +} + +static int mwl8k_cmd_get_hw_spec_sta(struct ieee80211_hw *hw) +{ + struct mwl8k_priv *priv = hw->priv; + struct mwl8k_cmd_get_hw_spec_sta *cmd; + int rc; + int i; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + + cmd->header.code = cpu_to_le16(MWL8K_CMD_GET_HW_SPEC); + cmd->header.length = cpu_to_le16(sizeof(*cmd)); + + memset(cmd->perm_addr, 0xff, sizeof(cmd->perm_addr)); + cmd->ps_cookie = cpu_to_le32(priv->cookie_dma); + cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rxd_dma); + cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES); + for (i = 0; i < MWL8K_TX_QUEUES; i++) + cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].txd_dma); + cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS); + cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS); + + rc = mwl8k_post_cmd(hw, &cmd->header); + + if (!rc) { + SET_IEEE80211_PERM_ADDR(hw, cmd->perm_addr); + priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs); + priv->fw_rev = le32_to_cpu(cmd->fw_rev); + priv->hw_rev = cmd->hw_rev; + mwl8k_set_caps(hw, le32_to_cpu(cmd->caps)); + priv->ap_macids_supported = 0x00000000; + priv->sta_macids_supported = 0x00000001; + } + + kfree(cmd); + return rc; +} + +/* + * CMD_GET_HW_SPEC (AP version). + */ +struct mwl8k_cmd_get_hw_spec_ap { + struct mwl8k_cmd_pkt header; + __u8 hw_rev; + __u8 host_interface; + __le16 num_wcb; + __le16 num_mcaddrs; + __u8 perm_addr[ETH_ALEN]; + __le16 region_code; + __le16 num_antenna; + __le32 fw_rev; + __le32 wcbbase0; + __le32 rxwrptr; + __le32 rxrdptr; + __le32 ps_cookie; + __le32 wcbbase1; + __le32 wcbbase2; + __le32 wcbbase3; +} __attribute__((packed)); + +static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw) +{ + struct mwl8k_priv *priv = hw->priv; + struct mwl8k_cmd_get_hw_spec_ap *cmd; + int rc; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + + cmd->header.code = cpu_to_le16(MWL8K_CMD_GET_HW_SPEC); + cmd->header.length = cpu_to_le16(sizeof(*cmd)); + + memset(cmd->perm_addr, 0xff, sizeof(cmd->perm_addr)); + cmd->ps_cookie = cpu_to_le32(priv->cookie_dma); + + rc = mwl8k_post_cmd(hw, &cmd->header); + + if (!rc) { + int off; + + SET_IEEE80211_PERM_ADDR(hw, cmd->perm_addr); + priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs); + priv->fw_rev = le32_to_cpu(cmd->fw_rev); + priv->hw_rev = cmd->hw_rev; + mwl8k_setup_2ghz_band(hw); + priv->ap_macids_supported = 0x000000ff; + priv->sta_macids_supported = 0x00000000; + + off = le32_to_cpu(cmd->wcbbase0) & 0xffff; + iowrite32(cpu_to_le32(priv->txq[0].txd_dma), priv->sram + off); + + off = le32_to_cpu(cmd->rxwrptr) & 0xffff; + iowrite32(cpu_to_le32(priv->rxq[0].rxd_dma), priv->sram + off); + + off = le32_to_cpu(cmd->rxrdptr) & 0xffff; + iowrite32(cpu_to_le32(priv->rxq[0].rxd_dma), priv->sram + off); + + off = le32_to_cpu(cmd->wcbbase1) & 0xffff; + iowrite32(cpu_to_le32(priv->txq[1].txd_dma), priv->sram + off); + + off = le32_to_cpu(cmd->wcbbase2) & 0xffff; + iowrite32(cpu_to_le32(priv->txq[2].txd_dma), priv->sram + off); + + off = le32_to_cpu(cmd->wcbbase3) & 0xffff; + iowrite32(cpu_to_le32(priv->txq[3].txd_dma), priv->sram + off); + } + + kfree(cmd); + return rc; +} + +/* + * CMD_SET_HW_SPEC. + */ +struct mwl8k_cmd_set_hw_spec { + struct mwl8k_cmd_pkt header; + __u8 hw_rev; + __u8 host_interface; + __le16 num_mcaddrs; + __u8 perm_addr[ETH_ALEN]; + __le16 region_code; + __le32 fw_rev; + __le32 ps_cookie; + __le32 caps; + __le32 rx_queue_ptr; + __le32 num_tx_queues; + __le32 tx_queue_ptrs[MWL8K_TX_QUEUES]; + __le32 flags; + __le32 num_tx_desc_per_queue; + __le32 total_rxd; +} __attribute__((packed)); + +#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080 +#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP 0x00000020 +#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON 0x00000010 + +static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw) +{ + struct mwl8k_priv *priv = hw->priv; + struct mwl8k_cmd_set_hw_spec *cmd; + int rc; + int i; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + + cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_HW_SPEC); + cmd->header.length = cpu_to_le16(sizeof(*cmd)); + + cmd->ps_cookie = cpu_to_le32(priv->cookie_dma); + cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rxd_dma); + cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES); + for (i = 0; i < MWL8K_TX_QUEUES; i++) + cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].txd_dma); + cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT | + MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP | + MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON); + cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS); + cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS); + + rc = mwl8k_post_cmd(hw, &cmd->header); + kfree(cmd); + + return rc; +} + +/* + * CMD_MAC_MULTICAST_ADR. + */ +struct mwl8k_cmd_mac_multicast_adr { + struct mwl8k_cmd_pkt header; + __le16 action; + __le16 numaddr; + __u8 addr[0][ETH_ALEN]; +}; + +#define MWL8K_ENABLE_RX_DIRECTED 0x0001 +#define MWL8K_ENABLE_RX_MULTICAST 0x0002 +#define MWL8K_ENABLE_RX_ALL_MULTICAST 0x0004 +#define MWL8K_ENABLE_RX_BROADCAST 0x0008 + +static struct mwl8k_cmd_pkt * +__mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti, + int mc_count, struct dev_addr_list *mclist) +{ + struct mwl8k_priv *priv = hw->priv; + struct mwl8k_cmd_mac_multicast_adr *cmd; + int size; + + if (allmulti || mc_count > priv->num_mcaddrs) { + allmulti = 1; + mc_count = 0; + } + + size = sizeof(*cmd) + mc_count * ETH_ALEN; + + cmd = kzalloc(size, GFP_ATOMIC); + if (cmd == NULL) + return NULL; + + cmd->header.code = cpu_to_le16(MWL8K_CMD_MAC_MULTICAST_ADR); + cmd->header.length = cpu_to_le16(size); + cmd->action = cpu_to_le16(MWL8K_ENABLE_RX_DIRECTED | + MWL8K_ENABLE_RX_BROADCAST); + + if (allmulti) { + cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_ALL_MULTICAST); + } else if (mc_count) { + int i; + + cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_MULTICAST); + cmd->numaddr = cpu_to_le16(mc_count); + for (i = 0; i < mc_count && mclist; i++) { + if (mclist->da_addrlen != ETH_ALEN) { + kfree(cmd); + return NULL; + } + memcpy(cmd->addr[i], mclist->da_addr, ETH_ALEN); + mclist = mclist->next; + } + } + + return &cmd->header; +} + +/* + * CMD_GET_STAT. + */ +struct mwl8k_cmd_get_stat { + struct mwl8k_cmd_pkt header; + __le32 stats[64]; +} __attribute__((packed)); + +#define MWL8K_STAT_ACK_FAILURE 9 +#define MWL8K_STAT_RTS_FAILURE 12 +#define MWL8K_STAT_FCS_ERROR 24 +#define MWL8K_STAT_RTS_SUCCESS 11 + +static int mwl8k_cmd_get_stat(struct ieee80211_hw *hw, + struct ieee80211_low_level_stats *stats) +{ + struct mwl8k_cmd_get_stat *cmd; + int rc; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + + cmd->header.code = cpu_to_le16(MWL8K_CMD_GET_STAT); + cmd->header.length = cpu_to_le16(sizeof(*cmd)); + + rc = mwl8k_post_cmd(hw, &cmd->header); + if (!rc) { + stats->dot11ACKFailureCount = + le32_to_cpu(cmd->stats[MWL8K_STAT_ACK_FAILURE]); + stats->dot11RTSFailureCount = + le32_to_cpu(cmd->stats[MWL8K_STAT_RTS_FAILURE]); + stats->dot11FCSErrorCount = + le32_to_cpu(cmd->stats[MWL8K_STAT_FCS_ERROR]); + stats->dot11RTSSuccessCount = + le32_to_cpu(cmd->stats[MWL8K_STAT_RTS_SUCCESS]); + } + kfree(cmd); + + return rc; +} + +/* + * CMD_RADIO_CONTROL. + */ +struct mwl8k_cmd_radio_control { + struct mwl8k_cmd_pkt header; + __le16 action; + __le16 control; + __le16 radio_on; +} __attribute__((packed)); + +static int +mwl8k_cmd_radio_control(struct ieee80211_hw *hw, bool enable, bool force) +{ + struct mwl8k_priv *priv = hw->priv; + struct mwl8k_cmd_radio_control *cmd; + int rc; + + if (enable == priv->radio_on && !force) + return 0; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + + cmd->header.code = cpu_to_le16(MWL8K_CMD_RADIO_CONTROL); + cmd->header.length = cpu_to_le16(sizeof(*cmd)); + cmd->action = cpu_to_le16(MWL8K_CMD_SET); + cmd->control = cpu_to_le16(priv->radio_short_preamble ? 3 : 1); + cmd->radio_on = cpu_to_le16(enable ? 0x0001 : 0x0000); + + rc = mwl8k_post_cmd(hw, &cmd->header); + kfree(cmd); + + if (!rc) + priv->radio_on = enable; + + return rc; +} + +static int mwl8k_cmd_radio_disable(struct ieee80211_hw *hw) +{ + return mwl8k_cmd_radio_control(hw, 0, 0); +} + +static int mwl8k_cmd_radio_enable(struct ieee80211_hw *hw) +{ + return mwl8k_cmd_radio_control(hw, 1, 0); +} + +static int +mwl8k_set_radio_preamble(struct ieee80211_hw *hw, bool short_preamble) +{ + struct mwl8k_priv *priv = hw->priv; + + priv->radio_short_preamble = short_preamble; + + return mwl8k_cmd_radio_control(hw, 1, 1); +} + +/* + * CMD_RF_TX_POWER. + */ +#define MWL8K_TX_POWER_LEVEL_TOTAL 8 + +struct mwl8k_cmd_rf_tx_power { + struct mwl8k_cmd_pkt header; + __le16 action; + __le16 support_level; + __le16 current_level; + __le16 reserved; + __le16 power_level_list[MWL8K_TX_POWER_LEVEL_TOTAL]; +} __attribute__((packed)); + +static int mwl8k_cmd_rf_tx_power(struct ieee80211_hw *hw, int dBm) +{ + struct mwl8k_cmd_rf_tx_power *cmd; + int rc; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + + cmd->header.code = cpu_to_le16(MWL8K_CMD_RF_TX_POWER); + cmd->header.length = cpu_to_le16(sizeof(*cmd)); + cmd->action = cpu_to_le16(MWL8K_CMD_SET); + cmd->support_level = cpu_to_le16(dBm); + + rc = mwl8k_post_cmd(hw, &cmd->header); + kfree(cmd); + + return rc; +} + +/* + * CMD_RF_ANTENNA. + */ +struct mwl8k_cmd_rf_antenna { + struct mwl8k_cmd_pkt header; + __le16 antenna; + __le16 mode; +} __attribute__((packed)); + +#define MWL8K_RF_ANTENNA_RX 1 +#define MWL8K_RF_ANTENNA_TX 2 + +static int +mwl8k_cmd_rf_antenna(struct ieee80211_hw *hw, int antenna, int mask) +{ + struct mwl8k_cmd_rf_antenna *cmd; + int rc; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + + cmd->header.code = cpu_to_le16(MWL8K_CMD_RF_ANTENNA); + cmd->header.length = cpu_to_le16(sizeof(*cmd)); + cmd->antenna = cpu_to_le16(antenna); + cmd->mode = cpu_to_le16(mask); + + rc = mwl8k_post_cmd(hw, &cmd->header); + kfree(cmd); + + return rc; +} + +/* + * CMD_SET_BEACON. + */ +struct mwl8k_cmd_set_beacon { + struct mwl8k_cmd_pkt header; + __le16 beacon_len; + __u8 beacon[0]; +}; + +static int mwl8k_cmd_set_beacon(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, u8 *beacon, int len) +{ + struct mwl8k_cmd_set_beacon *cmd; + int rc; + + cmd = kzalloc(sizeof(*cmd) + len, GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + + cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_BEACON); + cmd->header.length = cpu_to_le16(sizeof(*cmd) + len); + cmd->beacon_len = cpu_to_le16(len); + memcpy(cmd->beacon, beacon, len); + + rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header); + kfree(cmd); + + return rc; +} + +/* + * CMD_SET_PRE_SCAN. + */ +struct mwl8k_cmd_set_pre_scan { + struct mwl8k_cmd_pkt header; +} __attribute__((packed)); + +static int mwl8k_cmd_set_pre_scan(struct ieee80211_hw *hw) +{ + struct mwl8k_cmd_set_pre_scan *cmd; + int rc; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + + cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_PRE_SCAN); + cmd->header.length = cpu_to_le16(sizeof(*cmd)); + + rc = mwl8k_post_cmd(hw, &cmd->header); + kfree(cmd); + + return rc; +} + +/* + * CMD_SET_POST_SCAN. + */ +struct mwl8k_cmd_set_post_scan { + struct mwl8k_cmd_pkt header; + __le32 isibss; + __u8 bssid[ETH_ALEN]; +} __attribute__((packed)); + +static int +mwl8k_cmd_set_post_scan(struct ieee80211_hw *hw, const __u8 *mac) +{ + struct mwl8k_cmd_set_post_scan *cmd; + int rc; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + + cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_POST_SCAN); + cmd->header.length = cpu_to_le16(sizeof(*cmd)); + cmd->isibss = 0; + memcpy(cmd->bssid, mac, ETH_ALEN); + + rc = mwl8k_post_cmd(hw, &cmd->header); + kfree(cmd); + + return rc; +} + +/* + * CMD_SET_RF_CHANNEL. + */ +struct mwl8k_cmd_set_rf_channel { + struct mwl8k_cmd_pkt header; + __le16 action; + __u8 current_channel; + __le32 channel_flags; +} __attribute__((packed)); + +static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw, + struct ieee80211_conf *conf) +{ + struct ieee80211_channel *channel = conf->channel; + struct mwl8k_cmd_set_rf_channel *cmd; + int rc; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + + cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RF_CHANNEL); + cmd->header.length = cpu_to_le16(sizeof(*cmd)); + cmd->action = cpu_to_le16(MWL8K_CMD_SET); + cmd->current_channel = channel->hw_value; + + if (channel->band == IEEE80211_BAND_2GHZ) + cmd->channel_flags |= cpu_to_le32(0x00000001); + else if (channel->band == IEEE80211_BAND_5GHZ) + cmd->channel_flags |= cpu_to_le32(0x00000004); + + if (conf->channel_type == NL80211_CHAN_NO_HT || + conf->channel_type == NL80211_CHAN_HT20) + cmd->channel_flags |= cpu_to_le32(0x00000080); + else if (conf->channel_type == NL80211_CHAN_HT40MINUS) + cmd->channel_flags |= cpu_to_le32(0x000001900); + else if (conf->channel_type == NL80211_CHAN_HT40PLUS) + cmd->channel_flags |= cpu_to_le32(0x000000900); + + rc = mwl8k_post_cmd(hw, &cmd->header); + kfree(cmd); + + return rc; +} + +/* + * CMD_SET_AID. + */ +#define MWL8K_FRAME_PROT_DISABLED 0x00 +#define MWL8K_FRAME_PROT_11G 0x07 +#define MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY 0x02 +#define MWL8K_FRAME_PROT_11N_HT_ALL 0x06 + +struct mwl8k_cmd_update_set_aid { + struct mwl8k_cmd_pkt header; + __le16 aid; + + /* AP's MAC address (BSSID) */ + __u8 bssid[ETH_ALEN]; + __le16 protection_mode; + __u8 supp_rates[14]; +} __attribute__((packed)); + +static void legacy_rate_mask_to_array(u8 *rates, u32 mask) +{ + int i; + int j; + + /* + * Clear nonstandard rates 4 and 13. + */ + mask &= 0x1fef; + + for (i = 0, j = 0; i < 14; i++) { + if (mask & (1 << i)) + rates[j++] = mwl8k_rates_24[i].hw_value; + } +} + +static int +mwl8k_cmd_set_aid(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, u32 legacy_rate_mask) +{ + struct mwl8k_cmd_update_set_aid *cmd; + u16 prot_mode; + int rc; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + + cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_AID); + cmd->header.length = cpu_to_le16(sizeof(*cmd)); + cmd->aid = cpu_to_le16(vif->bss_conf.aid); + memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN); + + if (vif->bss_conf.use_cts_prot) { + prot_mode = MWL8K_FRAME_PROT_11G; + } else { + switch (vif->bss_conf.ht_operation_mode & + IEEE80211_HT_OP_MODE_PROTECTION) { + case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ: + prot_mode = MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY; + break; + case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED: + prot_mode = MWL8K_FRAME_PROT_11N_HT_ALL; + break; + default: + prot_mode = MWL8K_FRAME_PROT_DISABLED; + break; + } + } + cmd->protection_mode = cpu_to_le16(prot_mode); + + legacy_rate_mask_to_array(cmd->supp_rates, legacy_rate_mask); + + rc = mwl8k_post_cmd(hw, &cmd->header); + kfree(cmd); + + return rc; +} + +/* + * CMD_SET_RATE. + */ +struct mwl8k_cmd_set_rate { + struct mwl8k_cmd_pkt header; + __u8 legacy_rates[14]; + + /* Bitmap for supported MCS codes. */ + __u8 mcs_set[16]; + __u8 reserved[16]; +} __attribute__((packed)); + +static int +mwl8k_cmd_set_rate(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 legacy_rate_mask, u8 *mcs_rates) +{ + struct mwl8k_cmd_set_rate *cmd; + int rc; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + + cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATE); + cmd->header.length = cpu_to_le16(sizeof(*cmd)); + legacy_rate_mask_to_array(cmd->legacy_rates, legacy_rate_mask); + memcpy(cmd->mcs_set, mcs_rates, 16); + + rc = mwl8k_post_cmd(hw, &cmd->header); + kfree(cmd); + + return rc; +} + +/* + * CMD_FINALIZE_JOIN. + */ +#define MWL8K_FJ_BEACON_MAXLEN 128 + +struct mwl8k_cmd_finalize_join { + struct mwl8k_cmd_pkt header; + __le32 sleep_interval; /* Number of beacon periods to sleep */ + __u8 beacon_data[MWL8K_FJ_BEACON_MAXLEN]; +} __attribute__((packed)); + +static int mwl8k_cmd_finalize_join(struct ieee80211_hw *hw, void *frame, + int framelen, int dtim) +{ + struct mwl8k_cmd_finalize_join *cmd; + struct ieee80211_mgmt *payload = frame; + int payload_len; + int rc; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + + cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_FINALIZE_JOIN); + cmd->header.length = cpu_to_le16(sizeof(*cmd)); + cmd->sleep_interval = cpu_to_le32(dtim ? dtim : 1); + + payload_len = framelen - ieee80211_hdrlen(payload->frame_control); + if (payload_len < 0) + payload_len = 0; + else if (payload_len > MWL8K_FJ_BEACON_MAXLEN) + payload_len = MWL8K_FJ_BEACON_MAXLEN; + + memcpy(cmd->beacon_data, &payload->u.beacon, payload_len); + + rc = mwl8k_post_cmd(hw, &cmd->header); + kfree(cmd); + + return rc; +} + +/* + * CMD_SET_RTS_THRESHOLD. + */ +struct mwl8k_cmd_set_rts_threshold { + struct mwl8k_cmd_pkt header; + __le16 action; + __le16 threshold; +} __attribute__((packed)); + +static int +mwl8k_cmd_set_rts_threshold(struct ieee80211_hw *hw, int rts_thresh) +{ + struct mwl8k_cmd_set_rts_threshold *cmd; + int rc; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + + cmd->header.code = cpu_to_le16(MWL8K_CMD_RTS_THRESHOLD); + cmd->header.length = cpu_to_le16(sizeof(*cmd)); + cmd->action = cpu_to_le16(MWL8K_CMD_SET); + cmd->threshold = cpu_to_le16(rts_thresh); + + rc = mwl8k_post_cmd(hw, &cmd->header); + kfree(cmd); + + return rc; +} + +/* + * CMD_SET_SLOT. + */ +struct mwl8k_cmd_set_slot { + struct mwl8k_cmd_pkt header; + __le16 action; + __u8 short_slot; +} __attribute__((packed)); + +static int mwl8k_cmd_set_slot(struct ieee80211_hw *hw, bool short_slot_time) +{ + struct mwl8k_cmd_set_slot *cmd; + int rc; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + + cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_SLOT); + cmd->header.length = cpu_to_le16(sizeof(*cmd)); + cmd->action = cpu_to_le16(MWL8K_CMD_SET); + cmd->short_slot = short_slot_time; + + rc = mwl8k_post_cmd(hw, &cmd->header); + kfree(cmd); + + return rc; +} + +/* + * CMD_SET_EDCA_PARAMS. + */ +struct mwl8k_cmd_set_edca_params { + struct mwl8k_cmd_pkt header; + + /* See MWL8K_SET_EDCA_XXX below */ + __le16 action; + + /* TX opportunity in units of 32 us */ + __le16 txop; + + union { + struct { + /* Log exponent of max contention period: 0...15 */ + __le32 log_cw_max; + + /* Log exponent of min contention period: 0...15 */ + __le32 log_cw_min; + + /* Adaptive interframe spacing in units of 32us */ + __u8 aifs; + + /* TX queue to configure */ + __u8 txq; + } ap; + struct { + /* Log exponent of max contention period: 0...15 */ + __u8 log_cw_max; + + /* Log exponent of min contention period: 0...15 */ + __u8 log_cw_min; + + /* Adaptive interframe spacing in units of 32us */ + __u8 aifs; + + /* TX queue to configure */ + __u8 txq; + } sta; + }; +} __attribute__((packed)); + +#define MWL8K_SET_EDCA_CW 0x01 +#define MWL8K_SET_EDCA_TXOP 0x02 +#define MWL8K_SET_EDCA_AIFS 0x04 + +#define MWL8K_SET_EDCA_ALL (MWL8K_SET_EDCA_CW | \ + MWL8K_SET_EDCA_TXOP | \ + MWL8K_SET_EDCA_AIFS) + +static int +mwl8k_cmd_set_edca_params(struct ieee80211_hw *hw, __u8 qnum, + __u16 cw_min, __u16 cw_max, + __u8 aifs, __u16 txop) +{ + struct mwl8k_priv *priv = hw->priv; + struct mwl8k_cmd_set_edca_params *cmd; + int rc; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + + cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_EDCA_PARAMS); + cmd->header.length = cpu_to_le16(sizeof(*cmd)); + cmd->action = cpu_to_le16(MWL8K_SET_EDCA_ALL); + cmd->txop = cpu_to_le16(txop); + if (priv->ap_fw) { + cmd->ap.log_cw_max = cpu_to_le32(ilog2(cw_max + 1)); + cmd->ap.log_cw_min = cpu_to_le32(ilog2(cw_min + 1)); + cmd->ap.aifs = aifs; + cmd->ap.txq = qnum; + } else { + cmd->sta.log_cw_max = (u8)ilog2(cw_max + 1); + cmd->sta.log_cw_min = (u8)ilog2(cw_min + 1); + cmd->sta.aifs = aifs; + cmd->sta.txq = qnum; + } + + rc = mwl8k_post_cmd(hw, &cmd->header); + kfree(cmd); + + return rc; +} + +/* + * CMD_SET_WMM_MODE. + */ +struct mwl8k_cmd_set_wmm_mode { + struct mwl8k_cmd_pkt header; + __le16 action; +} __attribute__((packed)); + +static int mwl8k_cmd_set_wmm_mode(struct ieee80211_hw *hw, bool enable) +{ + struct mwl8k_priv *priv = hw->priv; + struct mwl8k_cmd_set_wmm_mode *cmd; + int rc; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + + cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_WMM_MODE); + cmd->header.length = cpu_to_le16(sizeof(*cmd)); + cmd->action = cpu_to_le16(!!enable); + + rc = mwl8k_post_cmd(hw, &cmd->header); + kfree(cmd); + + if (!rc) + priv->wmm_enabled = enable; + + return rc; +} + +/* + * CMD_MIMO_CONFIG. + */ +struct mwl8k_cmd_mimo_config { + struct mwl8k_cmd_pkt header; + __le32 action; + __u8 rx_antenna_map; + __u8 tx_antenna_map; +} __attribute__((packed)); + +static int mwl8k_cmd_mimo_config(struct ieee80211_hw *hw, __u8 rx, __u8 tx) +{ + struct mwl8k_cmd_mimo_config *cmd; + int rc; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + + cmd->header.code = cpu_to_le16(MWL8K_CMD_MIMO_CONFIG); + cmd->header.length = cpu_to_le16(sizeof(*cmd)); + cmd->action = cpu_to_le32((u32)MWL8K_CMD_SET); + cmd->rx_antenna_map = rx; + cmd->tx_antenna_map = tx; + + rc = mwl8k_post_cmd(hw, &cmd->header); + kfree(cmd); + + return rc; +} + +/* + * CMD_USE_FIXED_RATE (STA version). + */ +struct mwl8k_cmd_use_fixed_rate_sta { + struct mwl8k_cmd_pkt header; + __le32 action; + __le32 allow_rate_drop; + __le32 num_rates; + struct { + __le32 is_ht_rate; + __le32 enable_retry; + __le32 rate; + __le32 retry_count; + } rate_entry[8]; + __le32 rate_type; + __le32 reserved1; + __le32 reserved2; +} __attribute__((packed)); + +#define MWL8K_USE_AUTO_RATE 0x0002 +#define MWL8K_UCAST_RATE 0 + +static int mwl8k_cmd_use_fixed_rate_sta(struct ieee80211_hw *hw) +{ + struct mwl8k_cmd_use_fixed_rate_sta *cmd; + int rc; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + + cmd->header.code = cpu_to_le16(MWL8K_CMD_USE_FIXED_RATE); + cmd->header.length = cpu_to_le16(sizeof(*cmd)); + cmd->action = cpu_to_le32(MWL8K_USE_AUTO_RATE); + cmd->rate_type = cpu_to_le32(MWL8K_UCAST_RATE); + + rc = mwl8k_post_cmd(hw, &cmd->header); + kfree(cmd); + + return rc; +} + +/* + * CMD_USE_FIXED_RATE (AP version). + */ +struct mwl8k_cmd_use_fixed_rate_ap { + struct mwl8k_cmd_pkt header; + __le32 action; + __le32 allow_rate_drop; + __le32 num_rates; + struct mwl8k_rate_entry_ap { + __le32 is_ht_rate; + __le32 enable_retry; + __le32 rate; + __le32 retry_count; + } rate_entry[4]; + u8 multicast_rate; + u8 multicast_rate_type; + u8 management_rate; +} __attribute__((packed)); + +static int +mwl8k_cmd_use_fixed_rate_ap(struct ieee80211_hw *hw, int mcast, int mgmt) +{ + struct mwl8k_cmd_use_fixed_rate_ap *cmd; + int rc; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + + cmd->header.code = cpu_to_le16(MWL8K_CMD_USE_FIXED_RATE); + cmd->header.length = cpu_to_le16(sizeof(*cmd)); + cmd->action = cpu_to_le32(MWL8K_USE_AUTO_RATE); + cmd->multicast_rate = mcast; + cmd->management_rate = mgmt; + + rc = mwl8k_post_cmd(hw, &cmd->header); + kfree(cmd); + + return rc; +} + +/* + * CMD_ENABLE_SNIFFER. + */ +struct mwl8k_cmd_enable_sniffer { + struct mwl8k_cmd_pkt header; + __le32 action; +} __attribute__((packed)); + +static int mwl8k_cmd_enable_sniffer(struct ieee80211_hw *hw, bool enable) +{ + struct mwl8k_cmd_enable_sniffer *cmd; + int rc; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + + cmd->header.code = cpu_to_le16(MWL8K_CMD_ENABLE_SNIFFER); + cmd->header.length = cpu_to_le16(sizeof(*cmd)); + cmd->action = cpu_to_le32(!!enable); + + rc = mwl8k_post_cmd(hw, &cmd->header); + kfree(cmd); + + return rc; +} + +/* + * CMD_SET_MAC_ADDR. + */ +struct mwl8k_cmd_set_mac_addr { + struct mwl8k_cmd_pkt header; + union { + struct { + __le16 mac_type; + __u8 mac_addr[ETH_ALEN]; + } mbss; + __u8 mac_addr[ETH_ALEN]; + }; +} __attribute__((packed)); + +#define MWL8K_MAC_TYPE_PRIMARY_CLIENT 0 +#define MWL8K_MAC_TYPE_SECONDARY_CLIENT 1 +#define MWL8K_MAC_TYPE_PRIMARY_AP 2 +#define MWL8K_MAC_TYPE_SECONDARY_AP 3 + +static int mwl8k_cmd_set_mac_addr(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, u8 *mac) +{ + struct mwl8k_priv *priv = hw->priv; + struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif); + struct mwl8k_cmd_set_mac_addr *cmd; + int mac_type; + int rc; + + mac_type = MWL8K_MAC_TYPE_PRIMARY_AP; + if (vif != NULL && vif->type == NL80211_IFTYPE_STATION) { + if (mwl8k_vif->macid + 1 == ffs(priv->sta_macids_supported)) + mac_type = MWL8K_MAC_TYPE_PRIMARY_CLIENT; + else + mac_type = MWL8K_MAC_TYPE_SECONDARY_CLIENT; + } else if (vif != NULL && vif->type == NL80211_IFTYPE_AP) { + if (mwl8k_vif->macid + 1 == ffs(priv->ap_macids_supported)) + mac_type = MWL8K_MAC_TYPE_PRIMARY_AP; + else + mac_type = MWL8K_MAC_TYPE_SECONDARY_AP; + } + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + + cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_MAC_ADDR); + cmd->header.length = cpu_to_le16(sizeof(*cmd)); + if (priv->ap_fw) { + cmd->mbss.mac_type = cpu_to_le16(mac_type); + memcpy(cmd->mbss.mac_addr, mac, ETH_ALEN); + } else { + memcpy(cmd->mac_addr, mac, ETH_ALEN); + } + + rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header); + kfree(cmd); + + return rc; +} + +/* + * CMD_SET_RATEADAPT_MODE. + */ +struct mwl8k_cmd_set_rate_adapt_mode { + struct mwl8k_cmd_pkt header; + __le16 action; + __le16 mode; +} __attribute__((packed)); + +static int mwl8k_cmd_set_rateadapt_mode(struct ieee80211_hw *hw, __u16 mode) +{ + struct mwl8k_cmd_set_rate_adapt_mode *cmd; + int rc; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + + cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATEADAPT_MODE); + cmd->header.length = cpu_to_le16(sizeof(*cmd)); + cmd->action = cpu_to_le16(MWL8K_CMD_SET); + cmd->mode = cpu_to_le16(mode); + + rc = mwl8k_post_cmd(hw, &cmd->header); + kfree(cmd); + + return rc; +} + +/* + * CMD_BSS_START. + */ +struct mwl8k_cmd_bss_start { + struct mwl8k_cmd_pkt header; + __le32 enable; +} __attribute__((packed)); + +static int mwl8k_cmd_bss_start(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, int enable) +{ + struct mwl8k_cmd_bss_start *cmd; + int rc; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + + cmd->header.code = cpu_to_le16(MWL8K_CMD_BSS_START); + cmd->header.length = cpu_to_le16(sizeof(*cmd)); + cmd->enable = cpu_to_le32(enable); + + rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header); + kfree(cmd); + + return rc; +} + +/* + * CMD_SET_NEW_STN. + */ +struct mwl8k_cmd_set_new_stn { + struct mwl8k_cmd_pkt header; + __le16 aid; + __u8 mac_addr[6]; + __le16 stn_id; + __le16 action; + __le16 rsvd; + __le32 legacy_rates; + __u8 ht_rates[4]; + __le16 cap_info; + __le16 ht_capabilities_info; + __u8 mac_ht_param_info; + __u8 rev; + __u8 control_channel; + __u8 add_channel; + __le16 op_mode; + __le16 stbc; + __u8 add_qos_info; + __u8 is_qos_sta; + __le32 fw_sta_ptr; +} __attribute__((packed)); + +#define MWL8K_STA_ACTION_ADD 0 +#define MWL8K_STA_ACTION_REMOVE 2 + +static int mwl8k_cmd_set_new_stn_add(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mwl8k_cmd_set_new_stn *cmd; + u32 rates; + int rc; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + + cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_NEW_STN); + cmd->header.length = cpu_to_le16(sizeof(*cmd)); + cmd->aid = cpu_to_le16(sta->aid); + memcpy(cmd->mac_addr, sta->addr, ETH_ALEN); + cmd->stn_id = cpu_to_le16(sta->aid); + cmd->action = cpu_to_le16(MWL8K_STA_ACTION_ADD); + if (hw->conf.channel->band == IEEE80211_BAND_2GHZ) + rates = sta->supp_rates[IEEE80211_BAND_2GHZ]; + else + rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5; + cmd->legacy_rates = cpu_to_le32(rates); + if (sta->ht_cap.ht_supported) { + cmd->ht_rates[0] = sta->ht_cap.mcs.rx_mask[0]; + cmd->ht_rates[1] = sta->ht_cap.mcs.rx_mask[1]; + cmd->ht_rates[2] = sta->ht_cap.mcs.rx_mask[2]; + cmd->ht_rates[3] = sta->ht_cap.mcs.rx_mask[3]; + cmd->ht_capabilities_info = cpu_to_le16(sta->ht_cap.cap); + cmd->mac_ht_param_info = (sta->ht_cap.ampdu_factor & 3) | + ((sta->ht_cap.ampdu_density & 7) << 2); + cmd->is_qos_sta = 1; + } + + rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header); + kfree(cmd); + + return rc; +} + +static int mwl8k_cmd_set_new_stn_add_self(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct mwl8k_cmd_set_new_stn *cmd; + int rc; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + + cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_NEW_STN); + cmd->header.length = cpu_to_le16(sizeof(*cmd)); + memcpy(cmd->mac_addr, vif->addr, ETH_ALEN); + + rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header); + kfree(cmd); + + return rc; +} + +static int mwl8k_cmd_set_new_stn_del(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, u8 *addr) +{ + struct mwl8k_cmd_set_new_stn *cmd; + int rc; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + + cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_NEW_STN); + cmd->header.length = cpu_to_le16(sizeof(*cmd)); + memcpy(cmd->mac_addr, addr, ETH_ALEN); + cmd->action = cpu_to_le16(MWL8K_STA_ACTION_REMOVE); + + rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header); + kfree(cmd); + + return rc; +} + +/* + * CMD_UPDATE_STADB. + */ +struct ewc_ht_info { + __le16 control1; + __le16 control2; + __le16 control3; +} __attribute__((packed)); + +struct peer_capability_info { + /* Peer type - AP vs. STA. */ + __u8 peer_type; + + /* Basic 802.11 capabilities from assoc resp. */ + __le16 basic_caps; + + /* Set if peer supports 802.11n high throughput (HT). */ + __u8 ht_support; + + /* Valid if HT is supported. */ + __le16 ht_caps; + __u8 extended_ht_caps; + struct ewc_ht_info ewc_info; + + /* Legacy rate table. Intersection of our rates and peer rates. */ + __u8 legacy_rates[12]; + + /* HT rate table. Intersection of our rates and peer rates. */ + __u8 ht_rates[16]; + __u8 pad[16]; + + /* If set, interoperability mode, no proprietary extensions. */ + __u8 interop; + __u8 pad2; + __u8 station_id; + __le16 amsdu_enabled; +} __attribute__((packed)); + +struct mwl8k_cmd_update_stadb { + struct mwl8k_cmd_pkt header; + + /* See STADB_ACTION_TYPE */ + __le32 action; + + /* Peer MAC address */ + __u8 peer_addr[ETH_ALEN]; + + __le32 reserved; + + /* Peer info - valid during add/update. */ + struct peer_capability_info peer_info; +} __attribute__((packed)); + +#define MWL8K_STA_DB_MODIFY_ENTRY 1 +#define MWL8K_STA_DB_DEL_ENTRY 2 + +/* Peer Entry flags - used to define the type of the peer node */ +#define MWL8K_PEER_TYPE_ACCESSPOINT 2 + +static int mwl8k_cmd_update_stadb_add(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mwl8k_cmd_update_stadb *cmd; + struct peer_capability_info *p; + u32 rates; + int rc; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + + cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_STADB); + cmd->header.length = cpu_to_le16(sizeof(*cmd)); + cmd->action = cpu_to_le32(MWL8K_STA_DB_MODIFY_ENTRY); + memcpy(cmd->peer_addr, sta->addr, ETH_ALEN); + + p = &cmd->peer_info; + p->peer_type = MWL8K_PEER_TYPE_ACCESSPOINT; + p->basic_caps = cpu_to_le16(vif->bss_conf.assoc_capability); + p->ht_support = sta->ht_cap.ht_supported; + p->ht_caps = sta->ht_cap.cap; + p->extended_ht_caps = (sta->ht_cap.ampdu_factor & 3) | + ((sta->ht_cap.ampdu_density & 7) << 2); + if (hw->conf.channel->band == IEEE80211_BAND_2GHZ) + rates = sta->supp_rates[IEEE80211_BAND_2GHZ]; + else + rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5; + legacy_rate_mask_to_array(p->legacy_rates, rates); + memcpy(p->ht_rates, sta->ht_cap.mcs.rx_mask, 16); + p->interop = 1; + p->amsdu_enabled = 0; + + rc = mwl8k_post_cmd(hw, &cmd->header); + kfree(cmd); + + return rc ? rc : p->station_id; +} + +static int mwl8k_cmd_update_stadb_del(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, u8 *addr) +{ + struct mwl8k_cmd_update_stadb *cmd; + int rc; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + + cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_STADB); + cmd->header.length = cpu_to_le16(sizeof(*cmd)); + cmd->action = cpu_to_le32(MWL8K_STA_DB_DEL_ENTRY); + memcpy(cmd->peer_addr, addr, ETH_ALEN); + + rc = mwl8k_post_cmd(hw, &cmd->header); + kfree(cmd); + + return rc; +} + + +/* + * Interrupt handling. + */ +static irqreturn_t mwl8k_interrupt(int irq, void *dev_id) +{ + struct ieee80211_hw *hw = dev_id; + struct mwl8k_priv *priv = hw->priv; + u32 status; + + status = ioread32(priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS); + if (!status) + return IRQ_NONE; + + if (status & MWL8K_A2H_INT_TX_DONE) { + status &= ~MWL8K_A2H_INT_TX_DONE; + tasklet_schedule(&priv->poll_tx_task); + } + + if (status & MWL8K_A2H_INT_RX_READY) { + status &= ~MWL8K_A2H_INT_RX_READY; + tasklet_schedule(&priv->poll_rx_task); + } + + if (status) + iowrite32(~status, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS); + + if (status & MWL8K_A2H_INT_OPC_DONE) { + if (priv->hostcmd_wait != NULL) + complete(priv->hostcmd_wait); + } + + if (status & MWL8K_A2H_INT_QUEUE_EMPTY) { + if (!mutex_is_locked(&priv->fw_mutex) && + priv->radio_on && priv->pending_tx_pkts) + mwl8k_tx_start(priv); + } + + return IRQ_HANDLED; +} + +static void mwl8k_tx_poll(unsigned long data) +{ + struct ieee80211_hw *hw = (struct ieee80211_hw *)data; + struct mwl8k_priv *priv = hw->priv; + int limit; + int i; + + limit = 32; + + spin_lock_bh(&priv->tx_lock); + + for (i = 0; i < MWL8K_TX_QUEUES; i++) + limit -= mwl8k_txq_reclaim(hw, i, limit, 0); + + if (!priv->pending_tx_pkts && priv->tx_wait != NULL) { + complete(priv->tx_wait); + priv->tx_wait = NULL; + } + + spin_unlock_bh(&priv->tx_lock); + + if (limit) { + writel(~MWL8K_A2H_INT_TX_DONE, + priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS); + } else { + tasklet_schedule(&priv->poll_tx_task); + } +} + +static void mwl8k_rx_poll(unsigned long data) +{ + struct ieee80211_hw *hw = (struct ieee80211_hw *)data; + struct mwl8k_priv *priv = hw->priv; + int limit; + + limit = 32; + limit -= rxq_process(hw, 0, limit); + limit -= rxq_refill(hw, 0, limit); + + if (limit) { + writel(~MWL8K_A2H_INT_RX_READY, + priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS); + } else { + tasklet_schedule(&priv->poll_rx_task); + } +} + + +/* + * Core driver operations. + */ +static int mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct mwl8k_priv *priv = hw->priv; + int index = skb_get_queue_mapping(skb); + int rc; + + if (!priv->radio_on) { + printk(KERN_DEBUG "%s: dropped TX frame since radio " + "disabled\n", wiphy_name(hw->wiphy)); + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + rc = mwl8k_txq_xmit(hw, index, skb); + + return rc; +} + +static int mwl8k_start(struct ieee80211_hw *hw) +{ + struct mwl8k_priv *priv = hw->priv; + int rc; + + rc = request_irq(priv->pdev->irq, mwl8k_interrupt, + IRQF_SHARED, MWL8K_NAME, hw); + if (rc) { + printk(KERN_ERR "%s: failed to register IRQ handler\n", + wiphy_name(hw->wiphy)); + return -EIO; + } + + /* Enable TX reclaim and RX tasklets. */ + tasklet_enable(&priv->poll_tx_task); + tasklet_enable(&priv->poll_rx_task); + + /* Enable interrupts */ + iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); + + rc = mwl8k_fw_lock(hw); + if (!rc) { + rc = mwl8k_cmd_radio_enable(hw); + + if (!priv->ap_fw) { + if (!rc) + rc = mwl8k_cmd_enable_sniffer(hw, 0); + + if (!rc) + rc = mwl8k_cmd_set_pre_scan(hw); + + if (!rc) + rc = mwl8k_cmd_set_post_scan(hw, + "\x00\x00\x00\x00\x00\x00"); + } + + if (!rc) + rc = mwl8k_cmd_set_rateadapt_mode(hw, 0); + + if (!rc) + rc = mwl8k_cmd_set_wmm_mode(hw, 0); + + mwl8k_fw_unlock(hw); + } + + if (rc) { + iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); + free_irq(priv->pdev->irq, hw); + tasklet_disable(&priv->poll_tx_task); + tasklet_disable(&priv->poll_rx_task); + } + + return rc; +} + +static void mwl8k_stop(struct ieee80211_hw *hw) +{ + struct mwl8k_priv *priv = hw->priv; + int i; + + mwl8k_cmd_radio_disable(hw); + + ieee80211_stop_queues(hw); + + /* Disable interrupts */ + iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); + free_irq(priv->pdev->irq, hw); + + /* Stop finalize join worker */ + cancel_work_sync(&priv->finalize_join_worker); + if (priv->beacon_skb != NULL) + dev_kfree_skb(priv->beacon_skb); + + /* Stop TX reclaim and RX tasklets. */ + tasklet_disable(&priv->poll_tx_task); + tasklet_disable(&priv->poll_rx_task); + + /* Return all skbs to mac80211 */ + for (i = 0; i < MWL8K_TX_QUEUES; i++) + mwl8k_txq_reclaim(hw, i, INT_MAX, 1); +} + +static int mwl8k_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct mwl8k_priv *priv = hw->priv; + struct mwl8k_vif *mwl8k_vif; + u32 macids_supported; + int macid; + + /* + * Reject interface creation if sniffer mode is active, as + * STA operation is mutually exclusive with hardware sniffer + * mode. (Sniffer mode is only used on STA firmware.) + */ + if (priv->sniffer_enabled) { + printk(KERN_INFO "%s: unable to create STA " + "interface due to sniffer mode being enabled\n", + wiphy_name(hw->wiphy)); + return -EINVAL; + } + + + switch (vif->type) { + case NL80211_IFTYPE_AP: + macids_supported = priv->ap_macids_supported; + break; + case NL80211_IFTYPE_STATION: + macids_supported = priv->sta_macids_supported; + break; + default: + return -EINVAL; + } + + macid = ffs(macids_supported & ~priv->macids_used); + if (!macid--) + return -EBUSY; + + /* Setup driver private area. */ + mwl8k_vif = MWL8K_VIF(vif); + memset(mwl8k_vif, 0, sizeof(*mwl8k_vif)); + mwl8k_vif->vif = vif; + mwl8k_vif->macid = macid; + mwl8k_vif->seqno = 0; + + /* Set the mac address. */ + mwl8k_cmd_set_mac_addr(hw, vif, vif->addr); + + if (priv->ap_fw) + mwl8k_cmd_set_new_stn_add_self(hw, vif); + + priv->macids_used |= 1 << mwl8k_vif->macid; + list_add_tail(&mwl8k_vif->list, &priv->vif_list); + + return 0; +} + +static void mwl8k_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct mwl8k_priv *priv = hw->priv; + struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif); + + if (priv->ap_fw) + mwl8k_cmd_set_new_stn_del(hw, vif, vif->addr); + + mwl8k_cmd_set_mac_addr(hw, vif, "\x00\x00\x00\x00\x00\x00"); + + priv->macids_used &= ~(1 << mwl8k_vif->macid); + list_del(&mwl8k_vif->list); +} + +static int mwl8k_config(struct ieee80211_hw *hw, u32 changed) +{ + struct ieee80211_conf *conf = &hw->conf; + struct mwl8k_priv *priv = hw->priv; + int rc; + + if (conf->flags & IEEE80211_CONF_IDLE) { + mwl8k_cmd_radio_disable(hw); + return 0; + } + + rc = mwl8k_fw_lock(hw); + if (rc) + return rc; + + rc = mwl8k_cmd_radio_enable(hw); + if (rc) + goto out; + + rc = mwl8k_cmd_set_rf_channel(hw, conf); + if (rc) + goto out; + + if (conf->power_level > 18) + conf->power_level = 18; + rc = mwl8k_cmd_rf_tx_power(hw, conf->power_level); + if (rc) + goto out; + + if (priv->ap_fw) { + rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_RX, 0x7); + if (!rc) + rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_TX, 0x7); + } else { + rc = mwl8k_cmd_mimo_config(hw, 0x7, 0x7); + } + +out: + mwl8k_fw_unlock(hw); + + return rc; +} + +static void +mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, u32 changed) +{ + struct mwl8k_priv *priv = hw->priv; + u32 ap_legacy_rates; + u8 ap_mcs_rates[16]; + int rc; + + if (mwl8k_fw_lock(hw)) + return; + + /* + * No need to capture a beacon if we're no longer associated. + */ + if ((changed & BSS_CHANGED_ASSOC) && !vif->bss_conf.assoc) + priv->capture_beacon = false; + + /* + * Get the AP's legacy and MCS rates. + */ + if (vif->bss_conf.assoc) { + struct ieee80211_sta *ap; + + rcu_read_lock(); + + ap = ieee80211_find_sta(vif, vif->bss_conf.bssid); + if (ap == NULL) { + rcu_read_unlock(); + goto out; + } + + if (hw->conf.channel->band == IEEE80211_BAND_2GHZ) { + ap_legacy_rates = ap->supp_rates[IEEE80211_BAND_2GHZ]; + } else { + ap_legacy_rates = + ap->supp_rates[IEEE80211_BAND_5GHZ] << 5; + } + memcpy(ap_mcs_rates, ap->ht_cap.mcs.rx_mask, 16); + + rcu_read_unlock(); + } + + if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc) { + rc = mwl8k_cmd_set_rate(hw, vif, ap_legacy_rates, ap_mcs_rates); + if (rc) + goto out; + + rc = mwl8k_cmd_use_fixed_rate_sta(hw); + if (rc) + goto out; + } + + if (changed & BSS_CHANGED_ERP_PREAMBLE) { + rc = mwl8k_set_radio_preamble(hw, + vif->bss_conf.use_short_preamble); + if (rc) + goto out; + } + + if (changed & BSS_CHANGED_ERP_SLOT) { + rc = mwl8k_cmd_set_slot(hw, vif->bss_conf.use_short_slot); + if (rc) + goto out; + } + + if (vif->bss_conf.assoc && + (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_ERP_CTS_PROT | + BSS_CHANGED_HT))) { + rc = mwl8k_cmd_set_aid(hw, vif, ap_legacy_rates); + if (rc) + goto out; + } + + if (vif->bss_conf.assoc && + (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INT))) { + /* + * Finalize the join. Tell rx handler to process + * next beacon from our BSSID. + */ + memcpy(priv->capture_bssid, vif->bss_conf.bssid, ETH_ALEN); + priv->capture_beacon = true; + } + +out: + mwl8k_fw_unlock(hw); +} + +static void +mwl8k_bss_info_changed_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, u32 changed) +{ + int rc; + + if (mwl8k_fw_lock(hw)) + return; + + if (changed & BSS_CHANGED_ERP_PREAMBLE) { + rc = mwl8k_set_radio_preamble(hw, + vif->bss_conf.use_short_preamble); + if (rc) + goto out; + } + + if (changed & BSS_CHANGED_BASIC_RATES) { + int idx; + int rate; + + /* + * Use lowest supported basic rate for multicasts + * and management frames (such as probe responses -- + * beacons will always go out at 1 Mb/s). + */ + idx = ffs(vif->bss_conf.basic_rates); + if (idx) + idx--; + + if (hw->conf.channel->band == IEEE80211_BAND_2GHZ) + rate = mwl8k_rates_24[idx].hw_value; + else + rate = mwl8k_rates_50[idx].hw_value; + + mwl8k_cmd_use_fixed_rate_ap(hw, rate, rate); + } + + if (changed & (BSS_CHANGED_BEACON_INT | BSS_CHANGED_BEACON)) { + struct sk_buff *skb; + + skb = ieee80211_beacon_get(hw, vif); + if (skb != NULL) { + mwl8k_cmd_set_beacon(hw, vif, skb->data, skb->len); + kfree_skb(skb); + } + } + + if (changed & BSS_CHANGED_BEACON_ENABLED) + mwl8k_cmd_bss_start(hw, vif, info->enable_beacon); + +out: + mwl8k_fw_unlock(hw); +} + +static void +mwl8k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, u32 changed) +{ + struct mwl8k_priv *priv = hw->priv; + + if (!priv->ap_fw) + mwl8k_bss_info_changed_sta(hw, vif, info, changed); + else + mwl8k_bss_info_changed_ap(hw, vif, info, changed); +} + +static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw, + int mc_count, struct dev_addr_list *mclist) +{ + struct mwl8k_cmd_pkt *cmd; + + /* + * Synthesize and return a command packet that programs the + * hardware multicast address filter. At this point we don't + * know whether FIF_ALLMULTI is being requested, but if it is, + * we'll end up throwing this packet away and creating a new + * one in mwl8k_configure_filter(). + */ + cmd = __mwl8k_cmd_mac_multicast_adr(hw, 0, mc_count, mclist); + + return (unsigned long)cmd; +} + +static int +mwl8k_configure_filter_sniffer(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags) +{ + struct mwl8k_priv *priv = hw->priv; + + /* + * Hardware sniffer mode is mutually exclusive with STA + * operation, so refuse to enable sniffer mode if a STA + * interface is active. + */ + if (!list_empty(&priv->vif_list)) { + if (net_ratelimit()) + printk(KERN_INFO "%s: not enabling sniffer " + "mode because STA interface is active\n", + wiphy_name(hw->wiphy)); + return 0; + } + + if (!priv->sniffer_enabled) { + if (mwl8k_cmd_enable_sniffer(hw, 1)) + return 0; + priv->sniffer_enabled = true; + } + + *total_flags &= FIF_PROMISC_IN_BSS | FIF_ALLMULTI | + FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL | + FIF_OTHER_BSS; + + return 1; +} + +static struct mwl8k_vif *mwl8k_first_vif(struct mwl8k_priv *priv) +{ + if (!list_empty(&priv->vif_list)) + return list_entry(priv->vif_list.next, struct mwl8k_vif, list); + + return NULL; +} + +static void mwl8k_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) +{ + struct mwl8k_priv *priv = hw->priv; + struct mwl8k_cmd_pkt *cmd = (void *)(unsigned long)multicast; + + /* + * AP firmware doesn't allow fine-grained control over + * the receive filter. + */ + if (priv->ap_fw) { + *total_flags &= FIF_ALLMULTI | FIF_BCN_PRBRESP_PROMISC; + kfree(cmd); + return; + } + + /* + * Enable hardware sniffer mode if FIF_CONTROL or + * FIF_OTHER_BSS is requested. + */ + if (*total_flags & (FIF_CONTROL | FIF_OTHER_BSS) && + mwl8k_configure_filter_sniffer(hw, changed_flags, total_flags)) { + kfree(cmd); + return; + } + + /* Clear unsupported feature flags */ + *total_flags &= FIF_ALLMULTI | FIF_BCN_PRBRESP_PROMISC; + + if (mwl8k_fw_lock(hw)) { + kfree(cmd); + return; + } + + if (priv->sniffer_enabled) { + mwl8k_cmd_enable_sniffer(hw, 0); + priv->sniffer_enabled = false; + } + + if (changed_flags & FIF_BCN_PRBRESP_PROMISC) { + if (*total_flags & FIF_BCN_PRBRESP_PROMISC) { + /* + * Disable the BSS filter. + */ + mwl8k_cmd_set_pre_scan(hw); + } else { + struct mwl8k_vif *mwl8k_vif; + const u8 *bssid; + + /* + * Enable the BSS filter. + * + * If there is an active STA interface, use that + * interface's BSSID, otherwise use a dummy one + * (where the OUI part needs to be nonzero for + * the BSSID to be accepted by POST_SCAN). + */ + mwl8k_vif = mwl8k_first_vif(priv); + if (mwl8k_vif != NULL) + bssid = mwl8k_vif->vif->bss_conf.bssid; + else + bssid = "\x01\x00\x00\x00\x00\x00"; + + mwl8k_cmd_set_post_scan(hw, bssid); + } + } + + /* + * If FIF_ALLMULTI is being requested, throw away the command + * packet that ->prepare_multicast() built and replace it with + * a command packet that enables reception of all multicast + * packets. + */ + if (*total_flags & FIF_ALLMULTI) { + kfree(cmd); + cmd = __mwl8k_cmd_mac_multicast_adr(hw, 1, 0, NULL); + } + + if (cmd != NULL) { + mwl8k_post_cmd(hw, cmd); + kfree(cmd); + } + + mwl8k_fw_unlock(hw); +} + +static int mwl8k_set_rts_threshold(struct ieee80211_hw *hw, u32 value) +{ + return mwl8k_cmd_set_rts_threshold(hw, value); +} + +static int mwl8k_sta_remove(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mwl8k_priv *priv = hw->priv; + + if (priv->ap_fw) + return mwl8k_cmd_set_new_stn_del(hw, vif, sta->addr); + else + return mwl8k_cmd_update_stadb_del(hw, vif, sta->addr); +} + +static int mwl8k_sta_add(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mwl8k_priv *priv = hw->priv; + int ret; + + if (!priv->ap_fw) { + ret = mwl8k_cmd_update_stadb_add(hw, vif, sta); + if (ret >= 0) { + MWL8K_STA(sta)->peer_id = ret; + return 0; + } + + return ret; + } + + return mwl8k_cmd_set_new_stn_add(hw, vif, sta); +} + +static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue, + const struct ieee80211_tx_queue_params *params) +{ + struct mwl8k_priv *priv = hw->priv; + int rc; + + rc = mwl8k_fw_lock(hw); + if (!rc) { + if (!priv->wmm_enabled) + rc = mwl8k_cmd_set_wmm_mode(hw, 1); + + if (!rc) + rc = mwl8k_cmd_set_edca_params(hw, queue, + params->cw_min, + params->cw_max, + params->aifs, + params->txop); + + mwl8k_fw_unlock(hw); + } + + return rc; +} + +static int mwl8k_get_stats(struct ieee80211_hw *hw, + struct ieee80211_low_level_stats *stats) +{ + return mwl8k_cmd_get_stat(hw, stats); +} + +static int +mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, u16 *ssn) +{ + switch (action) { + case IEEE80211_AMPDU_RX_START: + case IEEE80211_AMPDU_RX_STOP: + if (!(hw->flags & IEEE80211_HW_AMPDU_AGGREGATION)) + return -ENOTSUPP; + return 0; + default: + return -ENOTSUPP; + } +} + +static const struct ieee80211_ops mwl8k_ops = { + .tx = mwl8k_tx, + .start = mwl8k_start, + .stop = mwl8k_stop, + .add_interface = mwl8k_add_interface, + .remove_interface = mwl8k_remove_interface, + .config = mwl8k_config, + .bss_info_changed = mwl8k_bss_info_changed, + .prepare_multicast = mwl8k_prepare_multicast, + .configure_filter = mwl8k_configure_filter, + .set_rts_threshold = mwl8k_set_rts_threshold, + .sta_add = mwl8k_sta_add, + .sta_remove = mwl8k_sta_remove, + .conf_tx = mwl8k_conf_tx, + .get_stats = mwl8k_get_stats, + .ampdu_action = mwl8k_ampdu_action, +}; + +static void mwl8k_finalize_join_worker(struct work_struct *work) +{ + struct mwl8k_priv *priv = + container_of(work, struct mwl8k_priv, finalize_join_worker); + struct sk_buff *skb = priv->beacon_skb; + struct ieee80211_mgmt *mgmt = (void *)skb->data; + int len = skb->len - offsetof(struct ieee80211_mgmt, u.beacon.variable); + const u8 *tim = cfg80211_find_ie(WLAN_EID_TIM, + mgmt->u.beacon.variable, len); + int dtim_period = 1; + + if (tim && tim[1] >= 2) + dtim_period = tim[3]; + + mwl8k_cmd_finalize_join(priv->hw, skb->data, skb->len, dtim_period); + + dev_kfree_skb(skb); + priv->beacon_skb = NULL; +} + +enum { + MWL8363 = 0, + MWL8687, + MWL8366, +}; + +static struct mwl8k_device_info mwl8k_info_tbl[] __devinitdata = { + [MWL8363] = { + .part_name = "88w8363", + .helper_image = "mwl8k/helper_8363.fw", + .fw_image = "mwl8k/fmimage_8363.fw", + }, + [MWL8687] = { + .part_name = "88w8687", + .helper_image = "mwl8k/helper_8687.fw", + .fw_image = "mwl8k/fmimage_8687.fw", + }, + [MWL8366] = { + .part_name = "88w8366", + .helper_image = "mwl8k/helper_8366.fw", + .fw_image = "mwl8k/fmimage_8366.fw", + .ap_rxd_ops = &rxd_8366_ap_ops, + }, +}; + +MODULE_FIRMWARE("mwl8k/helper_8363.fw"); +MODULE_FIRMWARE("mwl8k/fmimage_8363.fw"); +MODULE_FIRMWARE("mwl8k/helper_8687.fw"); +MODULE_FIRMWARE("mwl8k/fmimage_8687.fw"); +MODULE_FIRMWARE("mwl8k/helper_8366.fw"); +MODULE_FIRMWARE("mwl8k/fmimage_8366.fw"); + +static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = { + { PCI_VDEVICE(MARVELL, 0x2a0c), .driver_data = MWL8363, }, + { PCI_VDEVICE(MARVELL, 0x2a24), .driver_data = MWL8363, }, + { PCI_VDEVICE(MARVELL, 0x2a2b), .driver_data = MWL8687, }, + { PCI_VDEVICE(MARVELL, 0x2a30), .driver_data = MWL8687, }, + { PCI_VDEVICE(MARVELL, 0x2a40), .driver_data = MWL8366, }, + { PCI_VDEVICE(MARVELL, 0x2a43), .driver_data = MWL8366, }, + { }, +}; +MODULE_DEVICE_TABLE(pci, mwl8k_pci_id_table); + +static int __devinit mwl8k_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + static int printed_version = 0; + struct ieee80211_hw *hw; + struct mwl8k_priv *priv; + int rc; + int i; + + if (!printed_version) { + printk(KERN_INFO "%s version %s\n", MWL8K_DESC, MWL8K_VERSION); + printed_version = 1; + } + + + rc = pci_enable_device(pdev); + if (rc) { + printk(KERN_ERR "%s: Cannot enable new PCI device\n", + MWL8K_NAME); + return rc; + } + + rc = pci_request_regions(pdev, MWL8K_NAME); + if (rc) { + printk(KERN_ERR "%s: Cannot obtain PCI resources\n", + MWL8K_NAME); + goto err_disable_device; + } + + pci_set_master(pdev); + + + hw = ieee80211_alloc_hw(sizeof(*priv), &mwl8k_ops); + if (hw == NULL) { + printk(KERN_ERR "%s: ieee80211 alloc failed\n", MWL8K_NAME); + rc = -ENOMEM; + goto err_free_reg; + } + + SET_IEEE80211_DEV(hw, &pdev->dev); + pci_set_drvdata(pdev, hw); + + priv = hw->priv; + priv->hw = hw; + priv->pdev = pdev; + priv->device_info = &mwl8k_info_tbl[id->driver_data]; + + + priv->sram = pci_iomap(pdev, 0, 0x10000); + if (priv->sram == NULL) { + printk(KERN_ERR "%s: Cannot map device SRAM\n", + wiphy_name(hw->wiphy)); + goto err_iounmap; + } + + /* + * If BAR0 is a 32 bit BAR, the register BAR will be BAR1. + * If BAR0 is a 64 bit BAR, the register BAR will be BAR2. + */ + priv->regs = pci_iomap(pdev, 1, 0x10000); + if (priv->regs == NULL) { + priv->regs = pci_iomap(pdev, 2, 0x10000); + if (priv->regs == NULL) { + printk(KERN_ERR "%s: Cannot map device registers\n", + wiphy_name(hw->wiphy)); + goto err_iounmap; + } + } + + + /* Reset firmware and hardware */ + mwl8k_hw_reset(priv); + + /* Ask userland hotplug daemon for the device firmware */ + rc = mwl8k_request_firmware(priv); + if (rc) { + printk(KERN_ERR "%s: Firmware files not found\n", + wiphy_name(hw->wiphy)); + goto err_stop_firmware; + } + + /* Load firmware into hardware */ + rc = mwl8k_load_firmware(hw); + if (rc) { + printk(KERN_ERR "%s: Cannot start firmware\n", + wiphy_name(hw->wiphy)); + goto err_stop_firmware; + } + + /* Reclaim memory once firmware is successfully loaded */ + mwl8k_release_firmware(priv); + + + if (priv->ap_fw) { + priv->rxd_ops = priv->device_info->ap_rxd_ops; + if (priv->rxd_ops == NULL) { + printk(KERN_ERR "%s: Driver does not have AP " + "firmware image support for this hardware\n", + wiphy_name(hw->wiphy)); + goto err_stop_firmware; + } + } else { + priv->rxd_ops = &rxd_sta_ops; + } + + priv->sniffer_enabled = false; + priv->wmm_enabled = false; + priv->pending_tx_pkts = 0; + + + /* + * Extra headroom is the size of the required DMA header + * minus the size of the smallest 802.11 frame (CTS frame). + */ + hw->extra_tx_headroom = + sizeof(struct mwl8k_dma_data) - sizeof(struct ieee80211_cts); + + hw->channel_change_time = 10; + + hw->queues = MWL8K_TX_QUEUES; + + /* Set rssi and noise values to dBm */ + hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_NOISE_DBM; + hw->vif_data_size = sizeof(struct mwl8k_vif); + hw->sta_data_size = sizeof(struct mwl8k_sta); + + priv->macids_used = 0; + INIT_LIST_HEAD(&priv->vif_list); + + /* Set default radio state and preamble */ + priv->radio_on = 0; + priv->radio_short_preamble = 0; + + /* Finalize join worker */ + INIT_WORK(&priv->finalize_join_worker, mwl8k_finalize_join_worker); + + /* TX reclaim and RX tasklets. */ + tasklet_init(&priv->poll_tx_task, mwl8k_tx_poll, (unsigned long)hw); + tasklet_disable(&priv->poll_tx_task); + tasklet_init(&priv->poll_rx_task, mwl8k_rx_poll, (unsigned long)hw); + tasklet_disable(&priv->poll_rx_task); + + /* Power management cookie */ + priv->cookie = pci_alloc_consistent(priv->pdev, 4, &priv->cookie_dma); + if (priv->cookie == NULL) + goto err_stop_firmware; + + rc = mwl8k_rxq_init(hw, 0); + if (rc) + goto err_free_cookie; + rxq_refill(hw, 0, INT_MAX); + + mutex_init(&priv->fw_mutex); + priv->fw_mutex_owner = NULL; + priv->fw_mutex_depth = 0; + priv->hostcmd_wait = NULL; + + spin_lock_init(&priv->tx_lock); + + priv->tx_wait = NULL; + + for (i = 0; i < MWL8K_TX_QUEUES; i++) { + rc = mwl8k_txq_init(hw, i); + if (rc) + goto err_free_queues; + } + + iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS); + iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); + iowrite32(MWL8K_A2H_INT_TX_DONE | MWL8K_A2H_INT_RX_READY, + priv->regs + MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL); + iowrite32(0xffffffff, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK); + + rc = request_irq(priv->pdev->irq, mwl8k_interrupt, + IRQF_SHARED, MWL8K_NAME, hw); + if (rc) { + printk(KERN_ERR "%s: failed to register IRQ handler\n", + wiphy_name(hw->wiphy)); + goto err_free_queues; + } + + /* + * Temporarily enable interrupts. Initial firmware host + * commands use interrupts and avoid polling. Disable + * interrupts when done. + */ + iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); + + /* Get config data, mac addrs etc */ + if (priv->ap_fw) { + rc = mwl8k_cmd_get_hw_spec_ap(hw); + if (!rc) + rc = mwl8k_cmd_set_hw_spec(hw); + } else { + rc = mwl8k_cmd_get_hw_spec_sta(hw); + } + if (rc) { + printk(KERN_ERR "%s: Cannot initialise firmware\n", + wiphy_name(hw->wiphy)); + goto err_free_irq; + } + + hw->wiphy->interface_modes = 0; + if (priv->ap_macids_supported) + hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP); + if (priv->sta_macids_supported) + hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_STATION); + + + /* Turn radio off */ + rc = mwl8k_cmd_radio_disable(hw); + if (rc) { + printk(KERN_ERR "%s: Cannot disable\n", wiphy_name(hw->wiphy)); + goto err_free_irq; + } + + /* Clear MAC address */ + rc = mwl8k_cmd_set_mac_addr(hw, NULL, "\x00\x00\x00\x00\x00\x00"); + if (rc) { + printk(KERN_ERR "%s: Cannot clear MAC address\n", + wiphy_name(hw->wiphy)); + goto err_free_irq; + } + + /* Disable interrupts */ + iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); + free_irq(priv->pdev->irq, hw); + + rc = ieee80211_register_hw(hw); + if (rc) { + printk(KERN_ERR "%s: Cannot register device\n", + wiphy_name(hw->wiphy)); + goto err_free_queues; + } + + printk(KERN_INFO "%s: %s v%d, %pM, %s firmware %u.%u.%u.%u\n", + wiphy_name(hw->wiphy), priv->device_info->part_name, + priv->hw_rev, hw->wiphy->perm_addr, + priv->ap_fw ? "AP" : "STA", + (priv->fw_rev >> 24) & 0xff, (priv->fw_rev >> 16) & 0xff, + (priv->fw_rev >> 8) & 0xff, priv->fw_rev & 0xff); + + return 0; + +err_free_irq: + iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); + free_irq(priv->pdev->irq, hw); + +err_free_queues: + for (i = 0; i < MWL8K_TX_QUEUES; i++) + mwl8k_txq_deinit(hw, i); + mwl8k_rxq_deinit(hw, 0); + +err_free_cookie: + if (priv->cookie != NULL) + pci_free_consistent(priv->pdev, 4, + priv->cookie, priv->cookie_dma); + +err_stop_firmware: + mwl8k_hw_reset(priv); + mwl8k_release_firmware(priv); + +err_iounmap: + if (priv->regs != NULL) + pci_iounmap(pdev, priv->regs); + + if (priv->sram != NULL) + pci_iounmap(pdev, priv->sram); + + pci_set_drvdata(pdev, NULL); + ieee80211_free_hw(hw); + +err_free_reg: + pci_release_regions(pdev); + +err_disable_device: + pci_disable_device(pdev); + + return rc; +} + +static void __devexit mwl8k_shutdown(struct pci_dev *pdev) +{ + printk(KERN_ERR "===>%s(%u)\n", __func__, __LINE__); +} + +static void __devexit mwl8k_remove(struct pci_dev *pdev) +{ + struct ieee80211_hw *hw = pci_get_drvdata(pdev); + struct mwl8k_priv *priv; + int i; + + if (hw == NULL) + return; + priv = hw->priv; + + ieee80211_stop_queues(hw); + + ieee80211_unregister_hw(hw); + + /* Remove TX reclaim and RX tasklets. */ + tasklet_kill(&priv->poll_tx_task); + tasklet_kill(&priv->poll_rx_task); + + /* Stop hardware */ + mwl8k_hw_reset(priv); + + /* Return all skbs to mac80211 */ + for (i = 0; i < MWL8K_TX_QUEUES; i++) + mwl8k_txq_reclaim(hw, i, INT_MAX, 1); + + for (i = 0; i < MWL8K_TX_QUEUES; i++) + mwl8k_txq_deinit(hw, i); + + mwl8k_rxq_deinit(hw, 0); + + pci_free_consistent(priv->pdev, 4, priv->cookie, priv->cookie_dma); + + pci_iounmap(pdev, priv->regs); + pci_iounmap(pdev, priv->sram); + pci_set_drvdata(pdev, NULL); + ieee80211_free_hw(hw); + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static struct pci_driver mwl8k_driver = { + .name = MWL8K_NAME, + .id_table = mwl8k_pci_id_table, + .probe = mwl8k_probe, + .remove = __devexit_p(mwl8k_remove), + .shutdown = __devexit_p(mwl8k_shutdown), +}; + +static int __init mwl8k_init(void) +{ + return pci_register_driver(&mwl8k_driver); +} + +static void __exit mwl8k_exit(void) +{ + pci_unregister_driver(&mwl8k_driver); +} + +module_init(mwl8k_init); +module_exit(mwl8k_exit); + +MODULE_DESCRIPTION(MWL8K_DESC); +MODULE_VERSION(MWL8K_VERSION); +MODULE_AUTHOR("Lennert Buytenhek "); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/p54/Makefile b/drivers/net/wireless/p54/Makefile new file mode 100644 index 0000000..b542e68 --- /dev/null +++ b/drivers/net/wireless/p54/Makefile @@ -0,0 +1,7 @@ +p54common-objs := eeprom.o fwio.o txrx.o main.o +p54common-$(CONFIG_P54_LEDS) += led.o + +obj-$(CONFIG_P54_COMMON) += p54common.o +obj-$(CONFIG_P54_USB) += p54usb.o +obj-$(CONFIG_P54_PCI) += p54pci.o +obj-$(CONFIG_P54_SPI) += p54spi.o diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c new file mode 100644 index 0000000..8e3818f --- /dev/null +++ b/drivers/net/wireless/p54/eeprom.c @@ -0,0 +1,760 @@ +/* + * EEPROM parser code for mac80211 Prism54 drivers + * + * Copyright (c) 2006, Michael Wu + * Copyright (c) 2007-2009, Christian Lamparter + * Copyright 2008, Johannes Berg + * + * Based on: + * - the islsm (softmac prism54) driver, which is: + * Copyright 2004-2006 Jean-Baptiste Note , et al. + * - stlc45xx driver + * Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies). + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include + +#include + +#include "p54.h" +#include "eeprom.h" +#include "lmac.h" + +static struct ieee80211_rate p54_bgrates[] = { + { .bitrate = 10, .hw_value = 0, }, + { .bitrate = 20, .hw_value = 1, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 55, .hw_value = 2, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 110, .hw_value = 3, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 60, .hw_value = 4, }, + { .bitrate = 90, .hw_value = 5, }, + { .bitrate = 120, .hw_value = 6, }, + { .bitrate = 180, .hw_value = 7, }, + { .bitrate = 240, .hw_value = 8, }, + { .bitrate = 360, .hw_value = 9, }, + { .bitrate = 480, .hw_value = 10, }, + { .bitrate = 540, .hw_value = 11, }, +}; + +static struct ieee80211_rate p54_arates[] = { + { .bitrate = 60, .hw_value = 4, }, + { .bitrate = 90, .hw_value = 5, }, + { .bitrate = 120, .hw_value = 6, }, + { .bitrate = 180, .hw_value = 7, }, + { .bitrate = 240, .hw_value = 8, }, + { .bitrate = 360, .hw_value = 9, }, + { .bitrate = 480, .hw_value = 10, }, + { .bitrate = 540, .hw_value = 11, }, +}; + +#define CHAN_HAS_CAL BIT(0) +#define CHAN_HAS_LIMIT BIT(1) +#define CHAN_HAS_CURVE BIT(2) +#define CHAN_HAS_ALL (CHAN_HAS_CAL | CHAN_HAS_LIMIT | CHAN_HAS_CURVE) + +struct p54_channel_entry { + u16 freq; + u16 data; + int index; + enum ieee80211_band band; +}; + +struct p54_channel_list { + struct p54_channel_entry *channels; + size_t entries; + size_t max_entries; + size_t band_channel_num[IEEE80211_NUM_BANDS]; +}; + +static int p54_get_band_from_freq(u16 freq) +{ + /* FIXME: sync these values with the 802.11 spec */ + + if ((freq >= 2412) && (freq <= 2484)) + return IEEE80211_BAND_2GHZ; + + if ((freq >= 4920) && (freq <= 5825)) + return IEEE80211_BAND_5GHZ; + + return -1; +} + +static int p54_compare_channels(const void *_a, + const void *_b) +{ + const struct p54_channel_entry *a = _a; + const struct p54_channel_entry *b = _b; + + return a->index - b->index; +} + +static int p54_fill_band_bitrates(struct ieee80211_hw *dev, + struct ieee80211_supported_band *band_entry, + enum ieee80211_band band) +{ + /* TODO: generate rate array dynamically */ + + switch (band) { + case IEEE80211_BAND_2GHZ: + band_entry->bitrates = p54_bgrates; + band_entry->n_bitrates = ARRAY_SIZE(p54_bgrates); + break; + case IEEE80211_BAND_5GHZ: + band_entry->bitrates = p54_arates; + band_entry->n_bitrates = ARRAY_SIZE(p54_arates); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int p54_generate_band(struct ieee80211_hw *dev, + struct p54_channel_list *list, + enum ieee80211_band band) +{ + struct p54_common *priv = dev->priv; + struct ieee80211_supported_band *tmp, *old; + unsigned int i, j; + int ret = -ENOMEM; + + if ((!list->entries) || (!list->band_channel_num[band])) + return -EINVAL; + + tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); + if (!tmp) + goto err_out; + + tmp->channels = kzalloc(sizeof(struct ieee80211_channel) * + list->band_channel_num[band], GFP_KERNEL); + if (!tmp->channels) + goto err_out; + + ret = p54_fill_band_bitrates(dev, tmp, band); + if (ret) + goto err_out; + + for (i = 0, j = 0; (j < list->band_channel_num[band]) && + (i < list->entries); i++) { + + if (list->channels[i].band != band) + continue; + + if (list->channels[i].data != CHAN_HAS_ALL) { + printk(KERN_ERR "%s:%s%s%s is/are missing for " + "channel:%d [%d MHz].\n", + wiphy_name(dev->wiphy), + (list->channels[i].data & CHAN_HAS_CAL ? "" : + " [iqauto calibration data]"), + (list->channels[i].data & CHAN_HAS_LIMIT ? "" : + " [output power limits]"), + (list->channels[i].data & CHAN_HAS_CURVE ? "" : + " [curve data]"), + list->channels[i].index, list->channels[i].freq); + continue; + } + + tmp->channels[j].band = list->channels[i].band; + tmp->channels[j].center_freq = list->channels[i].freq; + j++; + } + + if (j == 0) { + printk(KERN_ERR "%s: Disabling totally damaged %s band.\n", + wiphy_name(dev->wiphy), (band == IEEE80211_BAND_2GHZ) ? + "2 GHz" : "5 GHz"); + + ret = -ENODATA; + goto err_out; + } + + tmp->n_channels = j; + old = priv->band_table[band]; + priv->band_table[band] = tmp; + if (old) { + kfree(old->channels); + kfree(old); + } + + return 0; + +err_out: + if (tmp) { + kfree(tmp->channels); + kfree(tmp); + } + + return ret; +} + +static void p54_update_channel_param(struct p54_channel_list *list, + u16 freq, u16 data) +{ + int band, i; + + /* + * usually all lists in the eeprom are mostly sorted. + * so it's very likely that the entry we are looking for + * is right at the end of the list + */ + for (i = list->entries; i >= 0; i--) { + if (freq == list->channels[i].freq) { + list->channels[i].data |= data; + break; + } + } + + if ((i < 0) && (list->entries < list->max_entries)) { + /* entry does not exist yet. Initialize a new one. */ + band = p54_get_band_from_freq(freq); + + /* + * filter out frequencies which don't belong into + * any supported band. + */ + if (band < 0) + return ; + + i = list->entries++; + list->band_channel_num[band]++; + + list->channels[i].freq = freq; + list->channels[i].data = data; + list->channels[i].band = band; + list->channels[i].index = ieee80211_frequency_to_channel(freq); + /* TODO: parse output_limit and fill max_power */ + } +} + +static int p54_generate_channel_lists(struct ieee80211_hw *dev) +{ + struct p54_common *priv = dev->priv; + struct p54_channel_list *list; + unsigned int i, j, max_channel_num; + int ret = 0; + u16 freq; + + if ((priv->iq_autocal_len != priv->curve_data->entries) || + (priv->iq_autocal_len != priv->output_limit->entries)) + printk(KERN_ERR "%s: Unsupported or damaged EEPROM detected. " + "You may not be able to use all channels.\n", + wiphy_name(dev->wiphy)); + + max_channel_num = max_t(unsigned int, priv->output_limit->entries, + priv->iq_autocal_len); + max_channel_num = max_t(unsigned int, max_channel_num, + priv->curve_data->entries); + + list = kzalloc(sizeof(*list), GFP_KERNEL); + if (!list) { + ret = -ENOMEM; + goto free; + } + + list->max_entries = max_channel_num; + list->channels = kzalloc(sizeof(struct p54_channel_entry) * + max_channel_num, GFP_KERNEL); + if (!list->channels) + goto free; + + for (i = 0; i < max_channel_num; i++) { + if (i < priv->iq_autocal_len) { + freq = le16_to_cpu(priv->iq_autocal[i].freq); + p54_update_channel_param(list, freq, CHAN_HAS_CAL); + } + + if (i < priv->output_limit->entries) { + freq = le16_to_cpup((__le16 *) (i * + priv->output_limit->entry_size + + priv->output_limit->offset + + priv->output_limit->data)); + + p54_update_channel_param(list, freq, CHAN_HAS_LIMIT); + } + + if (i < priv->curve_data->entries) { + freq = le16_to_cpup((__le16 *) (i * + priv->curve_data->entry_size + + priv->curve_data->offset + + priv->curve_data->data)); + + p54_update_channel_param(list, freq, CHAN_HAS_CURVE); + } + } + + /* sort the list by the channel index */ + sort(list->channels, list->entries, sizeof(struct p54_channel_entry), + p54_compare_channels, NULL); + + for (i = 0, j = 0; i < IEEE80211_NUM_BANDS; i++) { + if (p54_generate_band(dev, list, i) == 0) + j++; + } + if (j == 0) { + /* no useable band available. */ + ret = -EINVAL; + } + +free: + if (list) { + kfree(list->channels); + kfree(list); + } + + return ret; +} + +static int p54_convert_rev0(struct ieee80211_hw *dev, + struct pda_pa_curve_data *curve_data) +{ + struct p54_common *priv = dev->priv; + struct p54_pa_curve_data_sample *dst; + struct pda_pa_curve_data_sample_rev0 *src; + size_t cd_len = sizeof(*curve_data) + + (curve_data->points_per_channel*sizeof(*dst) + 2) * + curve_data->channels; + unsigned int i, j; + void *source, *target; + + priv->curve_data = kmalloc(sizeof(*priv->curve_data) + cd_len, + GFP_KERNEL); + if (!priv->curve_data) + return -ENOMEM; + + priv->curve_data->entries = curve_data->channels; + priv->curve_data->entry_size = sizeof(__le16) + + sizeof(*dst) * curve_data->points_per_channel; + priv->curve_data->offset = offsetof(struct pda_pa_curve_data, data); + priv->curve_data->len = cd_len; + memcpy(priv->curve_data->data, curve_data, sizeof(*curve_data)); + source = curve_data->data; + target = ((struct pda_pa_curve_data *) priv->curve_data->data)->data; + for (i = 0; i < curve_data->channels; i++) { + __le16 *freq = source; + source += sizeof(__le16); + *((__le16 *)target) = *freq; + target += sizeof(__le16); + for (j = 0; j < curve_data->points_per_channel; j++) { + dst = target; + src = source; + + dst->rf_power = src->rf_power; + dst->pa_detector = src->pa_detector; + dst->data_64qam = src->pcv; + /* "invent" the points for the other modulations */ +#define SUB(x, y) (u8)(((x) - (y)) > (x) ? 0 : (x) - (y)) + dst->data_16qam = SUB(src->pcv, 12); + dst->data_qpsk = SUB(dst->data_16qam, 12); + dst->data_bpsk = SUB(dst->data_qpsk, 12); + dst->data_barker = SUB(dst->data_bpsk, 14); +#undef SUB + target += sizeof(*dst); + source += sizeof(*src); + } + } + + return 0; +} + +static int p54_convert_rev1(struct ieee80211_hw *dev, + struct pda_pa_curve_data *curve_data) +{ + struct p54_common *priv = dev->priv; + struct p54_pa_curve_data_sample *dst; + struct pda_pa_curve_data_sample_rev1 *src; + size_t cd_len = sizeof(*curve_data) + + (curve_data->points_per_channel*sizeof(*dst) + 2) * + curve_data->channels; + unsigned int i, j; + void *source, *target; + + priv->curve_data = kzalloc(cd_len + sizeof(*priv->curve_data), + GFP_KERNEL); + if (!priv->curve_data) + return -ENOMEM; + + priv->curve_data->entries = curve_data->channels; + priv->curve_data->entry_size = sizeof(__le16) + + sizeof(*dst) * curve_data->points_per_channel; + priv->curve_data->offset = offsetof(struct pda_pa_curve_data, data); + priv->curve_data->len = cd_len; + memcpy(priv->curve_data->data, curve_data, sizeof(*curve_data)); + source = curve_data->data; + target = ((struct pda_pa_curve_data *) priv->curve_data->data)->data; + for (i = 0; i < curve_data->channels; i++) { + __le16 *freq = source; + source += sizeof(__le16); + *((__le16 *)target) = *freq; + target += sizeof(__le16); + for (j = 0; j < curve_data->points_per_channel; j++) { + memcpy(target, source, sizeof(*src)); + + target += sizeof(*dst); + source += sizeof(*src); + } + source++; + } + + return 0; +} + +static const char *p54_rf_chips[] = { "INVALID-0", "Duette3", "Duette2", + "Frisbee", "Xbow", "Longbow", "INVALID-6", "INVALID-7" }; + +static void p54_parse_rssical(struct ieee80211_hw *dev, void *data, int len, + u16 type) +{ + struct p54_common *priv = dev->priv; + int offset = (type == PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED) ? 2 : 0; + int entry_size = sizeof(struct pda_rssi_cal_entry) + offset; + int num_entries = (type == PDR_RSSI_LINEAR_APPROXIMATION) ? 1 : 2; + int i; + + if (len != (entry_size * num_entries)) { + printk(KERN_ERR "%s: unknown rssi calibration data packing " + " type:(%x) len:%d.\n", + wiphy_name(dev->wiphy), type, len); + + print_hex_dump_bytes("rssical:", DUMP_PREFIX_NONE, + data, len); + + printk(KERN_ERR "%s: please report this issue.\n", + wiphy_name(dev->wiphy)); + return; + } + + for (i = 0; i < num_entries; i++) { + struct pda_rssi_cal_entry *cal = data + + (offset + i * entry_size); + priv->rssical_db[i].mul = (s16) le16_to_cpu(cal->mul); + priv->rssical_db[i].add = (s16) le16_to_cpu(cal->add); + } +} + +static void p54_parse_default_country(struct ieee80211_hw *dev, + void *data, int len) +{ + struct pda_country *country; + + if (len != sizeof(*country)) { + printk(KERN_ERR "%s: found possible invalid default country " + "eeprom entry. (entry size: %d)\n", + wiphy_name(dev->wiphy), len); + + print_hex_dump_bytes("country:", DUMP_PREFIX_NONE, + data, len); + + printk(KERN_ERR "%s: please report this issue.\n", + wiphy_name(dev->wiphy)); + return; + } + + country = (struct pda_country *) data; + if (country->flags == PDR_COUNTRY_CERT_CODE_PSEUDO) + regulatory_hint(dev->wiphy, country->alpha2); + else { + /* TODO: + * write a shared/common function that converts + * "Regulatory domain codes" (802.11-2007 14.8.2.2) + * into ISO/IEC 3166-1 alpha2 for regulatory_hint. + */ + } +} + +static int p54_convert_output_limits(struct ieee80211_hw *dev, + u8 *data, size_t len) +{ + struct p54_common *priv = dev->priv; + + if (len < 2) + return -EINVAL; + + if (data[0] != 0) { + printk(KERN_ERR "%s: unknown output power db revision:%x\n", + wiphy_name(dev->wiphy), data[0]); + return -EINVAL; + } + + if (2 + data[1] * sizeof(struct pda_channel_output_limit) > len) + return -EINVAL; + + priv->output_limit = kmalloc(data[1] * + sizeof(struct pda_channel_output_limit) + + sizeof(*priv->output_limit), GFP_KERNEL); + + if (!priv->output_limit) + return -ENOMEM; + + priv->output_limit->offset = 0; + priv->output_limit->entries = data[1]; + priv->output_limit->entry_size = + sizeof(struct pda_channel_output_limit); + priv->output_limit->len = priv->output_limit->entry_size * + priv->output_limit->entries + + priv->output_limit->offset; + + memcpy(priv->output_limit->data, &data[2], + data[1] * sizeof(struct pda_channel_output_limit)); + + return 0; +} + +static struct p54_cal_database *p54_convert_db(struct pda_custom_wrapper *src, + size_t total_len) +{ + struct p54_cal_database *dst; + size_t payload_len, entries, entry_size, offset; + + payload_len = le16_to_cpu(src->len); + entries = le16_to_cpu(src->entries); + entry_size = le16_to_cpu(src->entry_size); + offset = le16_to_cpu(src->offset); + if (((entries * entry_size + offset) != payload_len) || + (payload_len + sizeof(*src) != total_len)) + return NULL; + + dst = kmalloc(sizeof(*dst) + payload_len, GFP_KERNEL); + if (!dst) + return NULL; + + dst->entries = entries; + dst->entry_size = entry_size; + dst->offset = offset; + dst->len = payload_len; + + memcpy(dst->data, src->data, payload_len); + return dst; +} + +int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len) +{ + struct p54_common *priv = dev->priv; + struct eeprom_pda_wrap *wrap; + struct pda_entry *entry; + unsigned int data_len, entry_len; + void *tmp; + int err; + u8 *end = (u8 *)eeprom + len; + u16 synth = 0; + + wrap = (struct eeprom_pda_wrap *) eeprom; + entry = (void *)wrap->data + le16_to_cpu(wrap->len); + + /* verify that at least the entry length/code fits */ + while ((u8 *)entry <= end - sizeof(*entry)) { + entry_len = le16_to_cpu(entry->len); + data_len = ((entry_len - 1) << 1); + + /* abort if entry exceeds whole structure */ + if ((u8 *)entry + sizeof(*entry) + data_len > end) + break; + + switch (le16_to_cpu(entry->code)) { + case PDR_MAC_ADDRESS: + if (data_len != ETH_ALEN) + break; + SET_IEEE80211_PERM_ADDR(dev, entry->data); + break; + case PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS: + if (priv->output_limit) + break; + err = p54_convert_output_limits(dev, entry->data, + data_len); + if (err) + goto err; + break; + case PDR_PRISM_PA_CAL_CURVE_DATA: { + struct pda_pa_curve_data *curve_data = + (struct pda_pa_curve_data *)entry->data; + if (data_len < sizeof(*curve_data)) { + err = -EINVAL; + goto err; + } + + switch (curve_data->cal_method_rev) { + case 0: + err = p54_convert_rev0(dev, curve_data); + break; + case 1: + err = p54_convert_rev1(dev, curve_data); + break; + default: + printk(KERN_ERR "%s: unknown curve data " + "revision %d\n", + wiphy_name(dev->wiphy), + curve_data->cal_method_rev); + err = -ENODEV; + break; + } + if (err) + goto err; + } + break; + case PDR_PRISM_ZIF_TX_IQ_CALIBRATION: + priv->iq_autocal = kmalloc(data_len, GFP_KERNEL); + if (!priv->iq_autocal) { + err = -ENOMEM; + goto err; + } + + memcpy(priv->iq_autocal, entry->data, data_len); + priv->iq_autocal_len = data_len / sizeof(struct pda_iq_autocal_entry); + break; + case PDR_DEFAULT_COUNTRY: + p54_parse_default_country(dev, entry->data, data_len); + break; + case PDR_INTERFACE_LIST: + tmp = entry->data; + while ((u8 *)tmp < entry->data + data_len) { + struct exp_if *exp_if = tmp; + if (exp_if->if_id == cpu_to_le16(IF_ID_ISL39000)) + synth = le16_to_cpu(exp_if->variant); + tmp += sizeof(*exp_if); + } + break; + case PDR_HARDWARE_PLATFORM_COMPONENT_ID: + if (data_len < 2) + break; + priv->version = *(u8 *)(entry->data + 1); + break; + case PDR_RSSI_LINEAR_APPROXIMATION: + case PDR_RSSI_LINEAR_APPROXIMATION_DUAL_BAND: + case PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED: + p54_parse_rssical(dev, entry->data, data_len, + le16_to_cpu(entry->code)); + break; + case PDR_RSSI_LINEAR_APPROXIMATION_CUSTOM: { + __le16 *src = (void *) entry->data; + s16 *dst = (void *) &priv->rssical_db; + int i; + + if (data_len != sizeof(priv->rssical_db)) { + err = -EINVAL; + goto err; + } + for (i = 0; i < sizeof(priv->rssical_db) / + sizeof(*src); i++) + *(dst++) = (s16) le16_to_cpu(*(src++)); + } + break; + case PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS_CUSTOM: { + struct pda_custom_wrapper *pda = (void *) entry->data; + if (priv->output_limit || data_len < sizeof(*pda)) + break; + priv->output_limit = p54_convert_db(pda, data_len); + } + break; + case PDR_PRISM_PA_CAL_CURVE_DATA_CUSTOM: { + struct pda_custom_wrapper *pda = (void *) entry->data; + if (priv->curve_data || data_len < sizeof(*pda)) + break; + priv->curve_data = p54_convert_db(pda, data_len); + } + break; + case PDR_END: + /* make it overrun */ + entry_len = len; + break; + default: + break; + } + + entry = (void *)entry + (entry_len + 1)*2; + } + + if (!synth || !priv->iq_autocal || !priv->output_limit || + !priv->curve_data) { + printk(KERN_ERR "%s: not all required entries found in eeprom!\n", + wiphy_name(dev->wiphy)); + err = -EINVAL; + goto err; + } + + err = p54_generate_channel_lists(dev); + if (err) + goto err; + + priv->rxhw = synth & PDR_SYNTH_FRONTEND_MASK; + if (priv->rxhw == PDR_SYNTH_FRONTEND_XBOW) + p54_init_xbow_synth(priv); + if (!(synth & PDR_SYNTH_24_GHZ_DISABLED)) + dev->wiphy->bands[IEEE80211_BAND_2GHZ] = + priv->band_table[IEEE80211_BAND_2GHZ]; + if (!(synth & PDR_SYNTH_5_GHZ_DISABLED)) + dev->wiphy->bands[IEEE80211_BAND_5GHZ] = + priv->band_table[IEEE80211_BAND_5GHZ]; + if ((synth & PDR_SYNTH_RX_DIV_MASK) == PDR_SYNTH_RX_DIV_SUPPORTED) + priv->rx_diversity_mask = 3; + if ((synth & PDR_SYNTH_TX_DIV_MASK) == PDR_SYNTH_TX_DIV_SUPPORTED) + priv->tx_diversity_mask = 3; + + if (!is_valid_ether_addr(dev->wiphy->perm_addr)) { + u8 perm_addr[ETH_ALEN]; + + printk(KERN_WARNING "%s: Invalid hwaddr! Using randomly generated MAC addr\n", + wiphy_name(dev->wiphy)); + random_ether_addr(perm_addr); + SET_IEEE80211_PERM_ADDR(dev, perm_addr); + } + + printk(KERN_INFO "%s: hwaddr %pM, MAC:isl38%02x RF:%s\n", + wiphy_name(dev->wiphy), dev->wiphy->perm_addr, priv->version, + p54_rf_chips[priv->rxhw]); + + return 0; + +err: + kfree(priv->iq_autocal); + kfree(priv->output_limit); + kfree(priv->curve_data); + priv->iq_autocal = NULL; + priv->output_limit = NULL; + priv->curve_data = NULL; + + printk(KERN_ERR "%s: eeprom parse failed!\n", + wiphy_name(dev->wiphy)); + return err; +} +EXPORT_SYMBOL_GPL(p54_parse_eeprom); + +int p54_read_eeprom(struct ieee80211_hw *dev) +{ + struct p54_common *priv = dev->priv; + size_t eeprom_size = 0x2020, offset = 0, blocksize, maxblocksize; + int ret = -ENOMEM; + void *eeprom; + + maxblocksize = EEPROM_READBACK_LEN; + if (priv->fw_var >= 0x509) + maxblocksize -= 0xc; + else + maxblocksize -= 0x4; + + eeprom = kzalloc(eeprom_size, GFP_KERNEL); + if (unlikely(!eeprom)) + goto free; + + while (eeprom_size) { + blocksize = min(eeprom_size, maxblocksize); + ret = p54_download_eeprom(priv, (void *) (eeprom + offset), + offset, blocksize); + if (unlikely(ret)) + goto free; + + offset += blocksize; + eeprom_size -= blocksize; + } + + ret = p54_parse_eeprom(dev, eeprom, offset); +free: + kfree(eeprom); + return ret; +} +EXPORT_SYMBOL_GPL(p54_read_eeprom); diff --git a/drivers/net/wireless/p54/eeprom.h b/drivers/net/wireless/p54/eeprom.h new file mode 100644 index 0000000..9051aef --- /dev/null +++ b/drivers/net/wireless/p54/eeprom.h @@ -0,0 +1,226 @@ +/* + * eeprom specific definitions for mac80211 Prism54 drivers + * + * Copyright (c) 2006, Michael Wu + * Copyright (c) 2007-2009, Christian Lamparter + * + * Based on: + * - the islsm (softmac prism54) driver, which is: + * Copyright 2004-2006 Jean-Baptiste Note , et al. + * + * - LMAC API interface header file for STLC4560 (lmac_longbow.h) + * Copyright (C) 2007 Conexant Systems, Inc. + * + * - islmvc driver + * Copyright (C) 2001 Intersil Americas Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef EEPROM_H +#define EEPROM_H + +/* PDA defines are Copyright (C) 2005 Nokia Corporation (taken from islsm_pda.h) */ + +struct pda_entry { + __le16 len; /* includes both code and data */ + __le16 code; + u8 data[0]; +} __packed; + +struct eeprom_pda_wrap { + __le32 magic; + __le16 pad; + __le16 len; + __le32 arm_opcode; + u8 data[0]; +} __packed; + +struct p54_iq_autocal_entry { + __le16 iq_param[4]; +} __packed; + +struct pda_iq_autocal_entry { + __le16 freq; + struct p54_iq_autocal_entry params; +} __packed; + +struct pda_channel_output_limit { + __le16 freq; + u8 val_bpsk; + u8 val_qpsk; + u8 val_16qam; + u8 val_64qam; + u8 rate_set_mask; + u8 rate_set_size; +} __packed; + +struct pda_pa_curve_data_sample_rev0 { + u8 rf_power; + u8 pa_detector; + u8 pcv; +} __packed; + +struct pda_pa_curve_data_sample_rev1 { + u8 rf_power; + u8 pa_detector; + u8 data_barker; + u8 data_bpsk; + u8 data_qpsk; + u8 data_16qam; + u8 data_64qam; +} __packed; + +struct pda_pa_curve_data { + u8 cal_method_rev; + u8 channels; + u8 points_per_channel; + u8 padding; + u8 data[0]; +} __packed; + +struct pda_rssi_cal_entry { + __le16 mul; + __le16 add; +} __packed; + +struct pda_country { + u8 regdomain; + u8 alpha2[2]; + u8 flags; +} __packed; + +struct pda_antenna_gain { + struct { + u8 gain_5GHz; /* 0.25 dBi units */ + u8 gain_2GHz; /* 0.25 dBi units */ + } __packed antenna[0]; +} __packed; + +struct pda_custom_wrapper { + __le16 entries; + __le16 entry_size; + __le16 offset; + __le16 len; + u8 data[0]; +} __packed; + +/* + * this defines the PDR codes used to build PDAs as defined in document + * number 553155. The current implementation mirrors version 1.1 of the + * document and lists only PDRs supported by the ARM platform. + */ + +/* common and choice range (0x0000 - 0x0fff) */ +#define PDR_END 0x0000 +#define PDR_MANUFACTURING_PART_NUMBER 0x0001 +#define PDR_PDA_VERSION 0x0002 +#define PDR_NIC_SERIAL_NUMBER 0x0003 +#define PDR_NIC_RAM_SIZE 0x0005 +#define PDR_RFMODEM_SUP_RANGE 0x0006 +#define PDR_PRISM_MAC_SUP_RANGE 0x0007 +#define PDR_NIC_ID 0x0008 + +#define PDR_MAC_ADDRESS 0x0101 +#define PDR_REGULATORY_DOMAIN_LIST 0x0103 /* obsolete */ +#define PDR_ALLOWED_CHAN_SET 0x0104 +#define PDR_DEFAULT_CHAN 0x0105 +#define PDR_TEMPERATURE_TYPE 0x0107 + +#define PDR_IFR_SETTING 0x0200 +#define PDR_RFR_SETTING 0x0201 +#define PDR_3861_BASELINE_REG_SETTINGS 0x0202 +#define PDR_3861_SHADOW_REG_SETTINGS 0x0203 +#define PDR_3861_IFRF_REG_SETTINGS 0x0204 + +#define PDR_3861_CHAN_CALIB_SET_POINTS 0x0300 +#define PDR_3861_CHAN_CALIB_INTEGRATOR 0x0301 + +#define PDR_3842_PRISM_II_NIC_CONFIG 0x0400 +#define PDR_PRISM_USB_ID 0x0401 +#define PDR_PRISM_PCI_ID 0x0402 +#define PDR_PRISM_PCI_IF_CONFIG 0x0403 +#define PDR_PRISM_PCI_PM_CONFIG 0x0404 + +#define PDR_3861_MF_TEST_CHAN_SET_POINTS 0x0900 +#define PDR_3861_MF_TEST_CHAN_INTEGRATORS 0x0901 + +/* ARM range (0x1000 - 0x1fff) */ +#define PDR_COUNTRY_INFORMATION 0x1000 /* obsolete */ +#define PDR_INTERFACE_LIST 0x1001 +#define PDR_HARDWARE_PLATFORM_COMPONENT_ID 0x1002 +#define PDR_OEM_NAME 0x1003 +#define PDR_PRODUCT_NAME 0x1004 +#define PDR_UTF8_OEM_NAME 0x1005 +#define PDR_UTF8_PRODUCT_NAME 0x1006 +#define PDR_COUNTRY_LIST 0x1007 +#define PDR_DEFAULT_COUNTRY 0x1008 + +#define PDR_ANTENNA_GAIN 0x1100 + +#define PDR_PRISM_INDIGO_PA_CALIBRATION_DATA 0x1901 +#define PDR_RSSI_LINEAR_APPROXIMATION 0x1902 +#define PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS 0x1903 +#define PDR_PRISM_PA_CAL_CURVE_DATA 0x1904 +#define PDR_RSSI_LINEAR_APPROXIMATION_DUAL_BAND 0x1905 +#define PDR_PRISM_ZIF_TX_IQ_CALIBRATION 0x1906 +#define PDR_REGULATORY_POWER_LIMITS 0x1907 +#define PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED 0x1908 +#define PDR_RADIATED_TRANSMISSION_CORRECTION 0x1909 +#define PDR_PRISM_TX_IQ_CALIBRATION 0x190a + +/* reserved range (0x2000 - 0x7fff) */ + +/* customer range (0x8000 - 0xffff) */ +#define PDR_BASEBAND_REGISTERS 0x8000 +#define PDR_PER_CHANNEL_BASEBAND_REGISTERS 0x8001 + +/* used by our modificated eeprom image */ +#define PDR_RSSI_LINEAR_APPROXIMATION_CUSTOM 0xDEAD +#define PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS_CUSTOM 0xBEEF +#define PDR_PRISM_PA_CAL_CURVE_DATA_CUSTOM 0xB05D + +/* Interface Definitions */ +#define PDR_INTERFACE_ROLE_SERVER 0x0000 +#define PDR_INTERFACE_ROLE_CLIENT 0x0001 + +/* PDR definitions for default country & country list */ +#define PDR_COUNTRY_CERT_CODE 0x80 +#define PDR_COUNTRY_CERT_CODE_REAL 0x00 +#define PDR_COUNTRY_CERT_CODE_PSEUDO 0x80 +#define PDR_COUNTRY_CERT_BAND 0x40 +#define PDR_COUNTRY_CERT_BAND_2GHZ 0x00 +#define PDR_COUNTRY_CERT_BAND_5GHZ 0x40 +#define PDR_COUNTRY_CERT_IODOOR 0x30 +#define PDR_COUNTRY_CERT_IODOOR_BOTH 0x00 +#define PDR_COUNTRY_CERT_IODOOR_INDOOR 0x20 +#define PDR_COUNTRY_CERT_IODOOR_OUTDOOR 0x30 +#define PDR_COUNTRY_CERT_INDEX 0x0f + +/* Specific LMAC FW/HW variant definitions */ +#define PDR_SYNTH_FRONTEND_MASK 0x0007 +#define PDR_SYNTH_FRONTEND_DUETTE3 0x0001 +#define PDR_SYNTH_FRONTEND_DUETTE2 0x0002 +#define PDR_SYNTH_FRONTEND_FRISBEE 0x0003 +#define PDR_SYNTH_FRONTEND_XBOW 0x0004 +#define PDR_SYNTH_FRONTEND_LONGBOW 0x0005 +#define PDR_SYNTH_IQ_CAL_MASK 0x0018 +#define PDR_SYNTH_IQ_CAL_PA_DETECTOR 0x0000 +#define PDR_SYNTH_IQ_CAL_DISABLED 0x0008 +#define PDR_SYNTH_IQ_CAL_ZIF 0x0010 +#define PDR_SYNTH_FAA_SWITCH_MASK 0x0020 +#define PDR_SYNTH_FAA_SWITCH_ENABLED 0x0020 +#define PDR_SYNTH_24_GHZ_MASK 0x0040 +#define PDR_SYNTH_24_GHZ_DISABLED 0x0040 +#define PDR_SYNTH_5_GHZ_MASK 0x0080 +#define PDR_SYNTH_5_GHZ_DISABLED 0x0080 +#define PDR_SYNTH_RX_DIV_MASK 0x0100 +#define PDR_SYNTH_RX_DIV_SUPPORTED 0x0100 +#define PDR_SYNTH_TX_DIV_MASK 0x0200 +#define PDR_SYNTH_TX_DIV_SUPPORTED 0x0200 +#define PDR_SYNTH_ASM_MASK 0x0400 +#define PDR_SYNTH_ASM_XSWON 0x0400 + +#endif /* EEPROM_H */ diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c new file mode 100644 index 0000000..e7b9e9c --- /dev/null +++ b/drivers/net/wireless/p54/fwio.c @@ -0,0 +1,716 @@ +/* + * Firmware I/O code for mac80211 Prism54 drivers + * + * Copyright (c) 2006, Michael Wu + * Copyright (c) 2007-2009, Christian Lamparter + * Copyright 2008, Johannes Berg + * + * Based on: + * - the islsm (softmac prism54) driver, which is: + * Copyright 2004-2006 Jean-Baptiste Note , et al. + * - stlc45xx driver + * Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies). + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include + +#include + +#include "p54.h" +#include "eeprom.h" +#include "lmac.h" + +int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw) +{ + struct p54_common *priv = dev->priv; + struct exp_if *exp_if; + struct bootrec *bootrec; + u32 *data = (u32 *)fw->data; + u32 *end_data = (u32 *)fw->data + (fw->size >> 2); + u8 *fw_version = NULL; + size_t len; + int i; + int maxlen; + + if (priv->rx_start) + return 0; + + while (data < end_data && *data) + data++; + + while (data < end_data && !*data) + data++; + + bootrec = (struct bootrec *) data; + + while (bootrec->data <= end_data && (bootrec->data + + (len = le32_to_cpu(bootrec->len))) <= end_data) { + u32 code = le32_to_cpu(bootrec->code); + switch (code) { + case BR_CODE_COMPONENT_ID: + priv->fw_interface = be32_to_cpup((__be32 *) + bootrec->data); + switch (priv->fw_interface) { + case FW_LM86: + case FW_LM20: + case FW_LM87: { + char *iftype = (char *)bootrec->data; + printk(KERN_INFO "%s: p54 detected a LM%c%c " + "firmware\n", + wiphy_name(priv->hw->wiphy), + iftype[2], iftype[3]); + break; + } + case FW_FMAC: + default: + printk(KERN_ERR "%s: unsupported firmware\n", + wiphy_name(priv->hw->wiphy)); + return -ENODEV; + } + break; + case BR_CODE_COMPONENT_VERSION: + /* 24 bytes should be enough for all firmwares */ + if (strnlen((unsigned char *) bootrec->data, 24) < 24) + fw_version = (unsigned char *) bootrec->data; + break; + case BR_CODE_DESCR: { + struct bootrec_desc *desc = + (struct bootrec_desc *)bootrec->data; + priv->rx_start = le32_to_cpu(desc->rx_start); + /* FIXME add sanity checking */ + priv->rx_end = le32_to_cpu(desc->rx_end) - 0x3500; + priv->headroom = desc->headroom; + priv->tailroom = desc->tailroom; + priv->privacy_caps = desc->privacy_caps; + priv->rx_keycache_size = desc->rx_keycache_size; + if (le32_to_cpu(bootrec->len) == 11) + priv->rx_mtu = le16_to_cpu(desc->rx_mtu); + else + priv->rx_mtu = (size_t) + 0x620 - priv->tx_hdr_len; + maxlen = priv->tx_hdr_len + /* USB devices */ + sizeof(struct p54_rx_data) + + 4 + /* rx alignment */ + IEEE80211_MAX_FRAG_THRESHOLD; + if (priv->rx_mtu > maxlen && PAGE_SIZE == 4096) { + printk(KERN_INFO "p54: rx_mtu reduced from %d " + "to %d\n", priv->rx_mtu, maxlen); + priv->rx_mtu = maxlen; + } + break; + } + case BR_CODE_EXPOSED_IF: + exp_if = (struct exp_if *) bootrec->data; + for (i = 0; i < (len * sizeof(*exp_if) / 4); i++) + if (exp_if[i].if_id == cpu_to_le16(IF_ID_LMAC)) + priv->fw_var = le16_to_cpu(exp_if[i].variant); + break; + case BR_CODE_DEPENDENT_IF: + break; + case BR_CODE_END_OF_BRA: + case LEGACY_BR_CODE_END_OF_BRA: + end_data = NULL; + break; + default: + break; + } + bootrec = (struct bootrec *)&bootrec->data[len]; + } + + if (fw_version) + printk(KERN_INFO "%s: FW rev %s - Softmac protocol %x.%x\n", + wiphy_name(priv->hw->wiphy), fw_version, + priv->fw_var >> 8, priv->fw_var & 0xff); + + if (priv->fw_var < 0x500) + printk(KERN_INFO "%s: you are using an obsolete firmware. " + "visit http://wireless.kernel.org/en/users/Drivers/p54 " + "and grab one for \"kernel >= 2.6.28\"!\n", + wiphy_name(priv->hw->wiphy)); + + if (priv->fw_var >= 0x300) { + /* Firmware supports QoS, use it! */ + + if (priv->fw_var >= 0x500) { + priv->tx_stats[P54_QUEUE_AC_VO].limit = 16; + priv->tx_stats[P54_QUEUE_AC_VI].limit = 16; + priv->tx_stats[P54_QUEUE_AC_BE].limit = 16; + priv->tx_stats[P54_QUEUE_AC_BK].limit = 16; + } else { + priv->tx_stats[P54_QUEUE_AC_VO].limit = 3; + priv->tx_stats[P54_QUEUE_AC_VI].limit = 4; + priv->tx_stats[P54_QUEUE_AC_BE].limit = 3; + priv->tx_stats[P54_QUEUE_AC_BK].limit = 2; + } + priv->hw->queues = P54_QUEUE_AC_NUM; + } + + printk(KERN_INFO "%s: cryptographic accelerator " + "WEP:%s, TKIP:%s, CCMP:%s\n", wiphy_name(priv->hw->wiphy), + (priv->privacy_caps & BR_DESC_PRIV_CAP_WEP) ? "YES" : + "no", (priv->privacy_caps & (BR_DESC_PRIV_CAP_TKIP | + BR_DESC_PRIV_CAP_MICHAEL)) ? "YES" : "no", + (priv->privacy_caps & BR_DESC_PRIV_CAP_AESCCMP) ? + "YES" : "no"); + + if (priv->rx_keycache_size) { + /* + * NOTE: + * + * The firmware provides at most 255 (0 - 254) slots + * for keys which are then used to offload decryption. + * As a result the 255 entry (aka 0xff) can be used + * safely by the driver to mark keys that didn't fit + * into the full cache. This trick saves us from + * keeping a extra list for uploaded keys. + */ + + priv->used_rxkeys = kzalloc(BITS_TO_LONGS( + priv->rx_keycache_size), GFP_KERNEL); + + if (!priv->used_rxkeys) + return -ENOMEM; + } + + return 0; +} +EXPORT_SYMBOL_GPL(p54_parse_firmware); + +static struct sk_buff *p54_alloc_skb(struct p54_common *priv, u16 hdr_flags, + u16 payload_len, u16 type, gfp_t memflags) +{ + struct p54_hdr *hdr; + struct sk_buff *skb; + size_t frame_len = sizeof(*hdr) + payload_len; + + if (frame_len > P54_MAX_CTRL_FRAME_LEN) + return NULL; + + if (unlikely(skb_queue_len(&priv->tx_pending) > 64)) + return NULL; + + skb = __dev_alloc_skb(priv->tx_hdr_len + frame_len, memflags); + if (!skb) + return NULL; + skb_reserve(skb, priv->tx_hdr_len); + + hdr = (struct p54_hdr *) skb_put(skb, sizeof(*hdr)); + hdr->flags = cpu_to_le16(hdr_flags); + hdr->len = cpu_to_le16(payload_len); + hdr->type = cpu_to_le16(type); + hdr->tries = hdr->rts_tries = 0; + return skb; +} + +int p54_download_eeprom(struct p54_common *priv, void *buf, + u16 offset, u16 len) +{ + struct p54_eeprom_lm86 *eeprom_hdr; + struct sk_buff *skb; + size_t eeprom_hdr_size; + int ret = 0; + + if (priv->fw_var >= 0x509) + eeprom_hdr_size = sizeof(*eeprom_hdr); + else + eeprom_hdr_size = 0x4; + + skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL, eeprom_hdr_size + + len, P54_CONTROL_TYPE_EEPROM_READBACK, + GFP_KERNEL); + if (unlikely(!skb)) + return -ENOMEM; + + mutex_lock(&priv->eeprom_mutex); + priv->eeprom = buf; + eeprom_hdr = (struct p54_eeprom_lm86 *) skb_put(skb, + eeprom_hdr_size + len); + + if (priv->fw_var < 0x509) { + eeprom_hdr->v1.offset = cpu_to_le16(offset); + eeprom_hdr->v1.len = cpu_to_le16(len); + } else { + eeprom_hdr->v2.offset = cpu_to_le32(offset); + eeprom_hdr->v2.len = cpu_to_le16(len); + eeprom_hdr->v2.magic2 = 0xf; + memcpy(eeprom_hdr->v2.magic, (const char *)"LOCK", 4); + } + + p54_tx(priv, skb); + + if (!wait_for_completion_interruptible_timeout( + &priv->eeprom_comp, HZ)) { + printk(KERN_ERR "%s: device does not respond!\n", + wiphy_name(priv->hw->wiphy)); + ret = -EBUSY; + } + priv->eeprom = NULL; + mutex_unlock(&priv->eeprom_mutex); + return ret; +} + +int p54_update_beacon_tim(struct p54_common *priv, u16 aid, bool set) +{ + struct sk_buff *skb; + struct p54_tim *tim; + + skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*tim), + P54_CONTROL_TYPE_TIM, GFP_ATOMIC); + if (unlikely(!skb)) + return -ENOMEM; + + tim = (struct p54_tim *) skb_put(skb, sizeof(*tim)); + tim->count = 1; + tim->entry[0] = cpu_to_le16(set ? (aid | 0x8000) : aid); + p54_tx(priv, skb); + return 0; +} + +int p54_sta_unlock(struct p54_common *priv, u8 *addr) +{ + struct sk_buff *skb; + struct p54_sta_unlock *sta; + + skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*sta), + P54_CONTROL_TYPE_PSM_STA_UNLOCK, GFP_ATOMIC); + if (unlikely(!skb)) + return -ENOMEM; + + sta = (struct p54_sta_unlock *)skb_put(skb, sizeof(*sta)); + memcpy(sta->addr, addr, ETH_ALEN); + p54_tx(priv, skb); + return 0; +} + +int p54_tx_cancel(struct p54_common *priv, __le32 req_id) +{ + struct sk_buff *skb; + struct p54_txcancel *cancel; + u32 _req_id = le32_to_cpu(req_id); + + if (unlikely(_req_id < priv->rx_start || _req_id > priv->rx_end)) + return -EINVAL; + + skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*cancel), + P54_CONTROL_TYPE_TXCANCEL, GFP_ATOMIC); + if (unlikely(!skb)) + return -ENOMEM; + + cancel = (struct p54_txcancel *)skb_put(skb, sizeof(*cancel)); + cancel->req_id = req_id; + p54_tx(priv, skb); + return 0; +} + +int p54_setup_mac(struct p54_common *priv) +{ + struct sk_buff *skb; + struct p54_setup_mac *setup; + u16 mode; + + skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*setup), + P54_CONTROL_TYPE_SETUP, GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + setup = (struct p54_setup_mac *) skb_put(skb, sizeof(*setup)); + if (!(priv->hw->conf.flags & IEEE80211_CONF_IDLE)) { + switch (priv->mode) { + case NL80211_IFTYPE_STATION: + mode = P54_FILTER_TYPE_STATION; + break; + case NL80211_IFTYPE_AP: + mode = P54_FILTER_TYPE_AP; + break; + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_MESH_POINT: + mode = P54_FILTER_TYPE_IBSS; + break; + case NL80211_IFTYPE_MONITOR: + mode = P54_FILTER_TYPE_PROMISCUOUS; + break; + default: + mode = P54_FILTER_TYPE_HIBERNATE; + break; + } + + /* + * "TRANSPARENT and PROMISCUOUS are mutually exclusive" + * STSW45X0C LMAC API - page 12 + */ + if (((priv->filter_flags & FIF_PROMISC_IN_BSS) || + (priv->filter_flags & FIF_OTHER_BSS)) && + (mode != P54_FILTER_TYPE_PROMISCUOUS)) + mode |= P54_FILTER_TYPE_TRANSPARENT; + } else { + mode = P54_FILTER_TYPE_HIBERNATE; + } + + setup->mac_mode = cpu_to_le16(mode); + memcpy(setup->mac_addr, priv->mac_addr, ETH_ALEN); + memcpy(setup->bssid, priv->bssid, ETH_ALEN); + setup->rx_antenna = 2 & priv->rx_diversity_mask; /* automatic */ + setup->rx_align = 0; + if (priv->fw_var < 0x500) { + setup->v1.basic_rate_mask = cpu_to_le32(priv->basic_rate_mask); + memset(setup->v1.rts_rates, 0, 8); + setup->v1.rx_addr = cpu_to_le32(priv->rx_end); + setup->v1.max_rx = cpu_to_le16(priv->rx_mtu); + setup->v1.rxhw = cpu_to_le16(priv->rxhw); + setup->v1.wakeup_timer = cpu_to_le16(priv->wakeup_timer); + setup->v1.unalloc0 = cpu_to_le16(0); + } else { + setup->v2.rx_addr = cpu_to_le32(priv->rx_end); + setup->v2.max_rx = cpu_to_le16(priv->rx_mtu); + setup->v2.rxhw = cpu_to_le16(priv->rxhw); + setup->v2.timer = cpu_to_le16(priv->wakeup_timer); + setup->v2.truncate = cpu_to_le16(48896); + setup->v2.basic_rate_mask = cpu_to_le32(priv->basic_rate_mask); + setup->v2.sbss_offset = 0; + setup->v2.mcast_window = 0; + setup->v2.rx_rssi_threshold = 0; + setup->v2.rx_ed_threshold = 0; + setup->v2.ref_clock = cpu_to_le32(644245094); + setup->v2.lpf_bandwidth = cpu_to_le16(65535); + setup->v2.osc_start_delay = cpu_to_le16(65535); + } + p54_tx(priv, skb); + return 0; +} + +int p54_scan(struct p54_common *priv, u16 mode, u16 dwell) +{ + struct sk_buff *skb; + struct p54_hdr *hdr; + struct p54_scan_head *head; + struct p54_iq_autocal_entry *iq_autocal; + union p54_scan_body_union *body; + struct p54_scan_tail_rate *rate; + struct pda_rssi_cal_entry *rssi; + unsigned int i; + void *entry; + int band = priv->hw->conf.channel->band; + __le16 freq = cpu_to_le16(priv->hw->conf.channel->center_freq); + + skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*head) + + 2 + sizeof(*iq_autocal) + sizeof(*body) + + sizeof(*rate) + 2 * sizeof(*rssi), + P54_CONTROL_TYPE_SCAN, GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + head = (struct p54_scan_head *) skb_put(skb, sizeof(*head)); + memset(head->scan_params, 0, sizeof(head->scan_params)); + head->mode = cpu_to_le16(mode); + head->dwell = cpu_to_le16(dwell); + head->freq = freq; + + if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) { + __le16 *pa_power_points = (__le16 *) skb_put(skb, 2); + *pa_power_points = cpu_to_le16(0x0c); + } + + iq_autocal = (void *) skb_put(skb, sizeof(*iq_autocal)); + for (i = 0; i < priv->iq_autocal_len; i++) { + if (priv->iq_autocal[i].freq != freq) + continue; + + memcpy(iq_autocal, &priv->iq_autocal[i].params, + sizeof(struct p54_iq_autocal_entry)); + break; + } + if (i == priv->iq_autocal_len) + goto err; + + if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) + body = (void *) skb_put(skb, sizeof(body->longbow)); + else + body = (void *) skb_put(skb, sizeof(body->normal)); + + for (i = 0; i < priv->output_limit->entries; i++) { + __le16 *entry_freq = (void *) (priv->output_limit->data + + priv->output_limit->entry_size * i); + + if (*entry_freq != freq) + continue; + + if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) { + memcpy(&body->longbow.power_limits, + (void *) entry_freq + sizeof(__le16), + priv->output_limit->entry_size); + } else { + struct pda_channel_output_limit *limits = + (void *) entry_freq; + + body->normal.val_barker = 0x38; + body->normal.val_bpsk = body->normal.dup_bpsk = + limits->val_bpsk; + body->normal.val_qpsk = body->normal.dup_qpsk = + limits->val_qpsk; + body->normal.val_16qam = body->normal.dup_16qam = + limits->val_16qam; + body->normal.val_64qam = body->normal.dup_64qam = + limits->val_64qam; + } + break; + } + if (i == priv->output_limit->entries) + goto err; + + entry = (void *)(priv->curve_data->data + priv->curve_data->offset); + for (i = 0; i < priv->curve_data->entries; i++) { + if (*((__le16 *)entry) != freq) { + entry += priv->curve_data->entry_size; + continue; + } + + if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) { + memcpy(&body->longbow.curve_data, + (void *) entry + sizeof(__le16), + priv->curve_data->entry_size); + } else { + struct p54_scan_body *chan = &body->normal; + struct pda_pa_curve_data *curve_data = + (void *) priv->curve_data->data; + + entry += sizeof(__le16); + chan->pa_points_per_curve = 8; + memset(chan->curve_data, 0, sizeof(*chan->curve_data)); + memcpy(chan->curve_data, entry, + sizeof(struct p54_pa_curve_data_sample) * + min((u8)8, curve_data->points_per_channel)); + } + break; + } + if (i == priv->curve_data->entries) + goto err; + + if ((priv->fw_var >= 0x500) && (priv->fw_var < 0x509)) { + rate = (void *) skb_put(skb, sizeof(*rate)); + rate->basic_rate_mask = cpu_to_le32(priv->basic_rate_mask); + for (i = 0; i < sizeof(rate->rts_rates); i++) + rate->rts_rates[i] = i; + } + + rssi = (struct pda_rssi_cal_entry *) skb_put(skb, sizeof(*rssi)); + rssi->mul = cpu_to_le16(priv->rssical_db[band].mul); + rssi->add = cpu_to_le16(priv->rssical_db[band].add); + if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) { + /* Longbow frontend needs ever more */ + rssi = (void *) skb_put(skb, sizeof(*rssi)); + rssi->mul = cpu_to_le16(priv->rssical_db[band].longbow_unkn); + rssi->add = cpu_to_le16(priv->rssical_db[band].longbow_unk2); + } + + if (priv->fw_var >= 0x509) { + rate = (void *) skb_put(skb, sizeof(*rate)); + rate->basic_rate_mask = cpu_to_le32(priv->basic_rate_mask); + for (i = 0; i < sizeof(rate->rts_rates); i++) + rate->rts_rates[i] = i; + } + + hdr = (struct p54_hdr *) skb->data; + hdr->len = cpu_to_le16(skb->len - sizeof(*hdr)); + + p54_tx(priv, skb); + return 0; + +err: + printk(KERN_ERR "%s: frequency change to channel %d failed.\n", + wiphy_name(priv->hw->wiphy), ieee80211_frequency_to_channel( + priv->hw->conf.channel->center_freq)); + + dev_kfree_skb_any(skb); + return -EINVAL; +} + +int p54_set_leds(struct p54_common *priv) +{ + struct sk_buff *skb; + struct p54_led *led; + + skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*led), + P54_CONTROL_TYPE_LED, GFP_ATOMIC); + if (unlikely(!skb)) + return -ENOMEM; + + led = (struct p54_led *) skb_put(skb, sizeof(*led)); + led->flags = cpu_to_le16(0x0003); + led->mask[0] = led->mask[1] = cpu_to_le16(priv->softled_state); + led->delay[0] = cpu_to_le16(1); + led->delay[1] = cpu_to_le16(0); + p54_tx(priv, skb); + return 0; +} + +int p54_set_edcf(struct p54_common *priv) +{ + struct sk_buff *skb; + struct p54_edcf *edcf; + + skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*edcf), + P54_CONTROL_TYPE_DCFINIT, GFP_ATOMIC); + if (unlikely(!skb)) + return -ENOMEM; + + edcf = (struct p54_edcf *)skb_put(skb, sizeof(*edcf)); + if (priv->use_short_slot) { + edcf->slottime = 9; + edcf->sifs = 0x10; + edcf->eofpad = 0x00; + } else { + edcf->slottime = 20; + edcf->sifs = 0x0a; + edcf->eofpad = 0x06; + } + /* (see prism54/isl_oid.h for further details) */ + edcf->frameburst = cpu_to_le16(0); + edcf->round_trip_delay = cpu_to_le16(0); + edcf->flags = 0; + memset(edcf->mapping, 0, sizeof(edcf->mapping)); + memcpy(edcf->queue, priv->qos_params, sizeof(edcf->queue)); + p54_tx(priv, skb); + return 0; +} + +int p54_set_ps(struct p54_common *priv) +{ + struct sk_buff *skb; + struct p54_psm *psm; + unsigned int i; + u16 mode; + + if (priv->hw->conf.flags & IEEE80211_CONF_PS && + !priv->powersave_override) + mode = P54_PSM | P54_PSM_BEACON_TIMEOUT | P54_PSM_DTIM | + P54_PSM_CHECKSUM | P54_PSM_MCBC; + else + mode = P54_PSM_CAM; + + skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*psm), + P54_CONTROL_TYPE_PSM, GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + psm = (struct p54_psm *)skb_put(skb, sizeof(*psm)); + psm->mode = cpu_to_le16(mode); + psm->aid = cpu_to_le16(priv->aid); + for (i = 0; i < ARRAY_SIZE(psm->intervals); i++) { + psm->intervals[i].interval = + cpu_to_le16(priv->hw->conf.listen_interval); + psm->intervals[i].periods = cpu_to_le16(1); + } + + psm->beacon_rssi_skip_max = 200; + psm->rssi_delta_threshold = 0; + psm->nr = 1; + psm->exclude[0] = WLAN_EID_TIM; + + p54_tx(priv, skb); + return 0; +} + +int p54_init_xbow_synth(struct p54_common *priv) +{ + struct sk_buff *skb; + struct p54_xbow_synth *xbow; + + skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*xbow), + P54_CONTROL_TYPE_XBOW_SYNTH_CFG, GFP_KERNEL); + if (unlikely(!skb)) + return -ENOMEM; + + xbow = (struct p54_xbow_synth *)skb_put(skb, sizeof(*xbow)); + xbow->magic1 = cpu_to_le16(0x1); + xbow->magic2 = cpu_to_le16(0x2); + xbow->freq = cpu_to_le16(5390); + memset(xbow->padding, 0, sizeof(xbow->padding)); + p54_tx(priv, skb); + return 0; +} + +int p54_upload_key(struct p54_common *priv, u8 algo, int slot, u8 idx, u8 len, + u8 *addr, u8* key) +{ + struct sk_buff *skb; + struct p54_keycache *rxkey; + + skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*rxkey), + P54_CONTROL_TYPE_RX_KEYCACHE, GFP_KERNEL); + if (unlikely(!skb)) + return -ENOMEM; + + rxkey = (struct p54_keycache *)skb_put(skb, sizeof(*rxkey)); + rxkey->entry = slot; + rxkey->key_id = idx; + rxkey->key_type = algo; + if (addr) + memcpy(rxkey->mac, addr, ETH_ALEN); + else + memset(rxkey->mac, ~0, ETH_ALEN); + + switch (algo) { + case P54_CRYPTO_WEP: + case P54_CRYPTO_AESCCMP: + rxkey->key_len = min_t(u8, 16, len); + memcpy(rxkey->key, key, rxkey->key_len); + break; + + case P54_CRYPTO_TKIPMICHAEL: + rxkey->key_len = 24; + memcpy(rxkey->key, key, 16); + memcpy(&(rxkey->key[16]), &(key + [NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]), 8); + break; + + case P54_CRYPTO_NONE: + rxkey->key_len = 0; + memset(rxkey->key, 0, sizeof(rxkey->key)); + break; + + default: + printk(KERN_ERR "%s: invalid cryptographic algorithm: %d\n", + wiphy_name(priv->hw->wiphy), algo); + dev_kfree_skb(skb); + return -EINVAL; + } + + p54_tx(priv, skb); + return 0; +} + +int p54_fetch_statistics(struct p54_common *priv) +{ + struct ieee80211_tx_info *txinfo; + struct p54_tx_info *p54info; + struct sk_buff *skb; + + skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL, + sizeof(struct p54_statistics), + P54_CONTROL_TYPE_STAT_READBACK, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + /* + * The statistic feedback causes some extra headaches here, if it + * is not to crash/corrupt the firmware data structures. + * + * Unlike all other Control Get OIDs we can not use helpers like + * skb_put to reserve the space for the data we're requesting. + * Instead the extra frame length -which will hold the results later- + * will only be told to the p54_assign_address, so that following + * frames won't be placed into the allegedly empty area. + */ + txinfo = IEEE80211_SKB_CB(skb); + p54info = (void *) txinfo->rate_driver_data; + p54info->extra_len = sizeof(struct p54_statistics); + + p54_tx(priv, skb); + return 0; +} diff --git a/drivers/net/wireless/p54/led.c b/drivers/net/wireless/p54/led.c new file mode 100644 index 0000000..9575ac0 --- /dev/null +++ b/drivers/net/wireless/p54/led.c @@ -0,0 +1,162 @@ +/* + * Common code for mac80211 Prism54 drivers + * + * Copyright (c) 2006, Michael Wu + * Copyright (c) 2007-2009, Christian Lamparter + * Copyright 2008, Johannes Berg + * + * Based on: + * - the islsm (softmac prism54) driver, which is: + * Copyright 2004-2006 Jean-Baptiste Note , et al. + * - stlc45xx driver + * Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies). + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include + +#include +#ifdef CONFIG_P54_LEDS +#include +#endif /* CONFIG_P54_LEDS */ + +#include "p54.h" +#include "lmac.h" + +static void p54_update_leds(struct work_struct *work) +{ + struct p54_common *priv = container_of(work, struct p54_common, + led_work.work); + int err, i, tmp, blink_delay = 400; + bool rerun = false; + + /* Don't toggle the LED, when the device is down. */ + if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) + return ; + + for (i = 0; i < ARRAY_SIZE(priv->leds); i++) + if (priv->leds[i].toggled) { + priv->softled_state |= BIT(i); + + tmp = 70 + 200 / (priv->leds[i].toggled); + if (tmp < blink_delay) + blink_delay = tmp; + + if (priv->leds[i].led_dev.brightness == LED_OFF) + rerun = true; + + priv->leds[i].toggled = + !!priv->leds[i].led_dev.brightness; + } else + priv->softled_state &= ~BIT(i); + + err = p54_set_leds(priv); + if (err && net_ratelimit()) + printk(KERN_ERR "%s: failed to update LEDs (%d).\n", + wiphy_name(priv->hw->wiphy), err); + + if (rerun) + ieee80211_queue_delayed_work(priv->hw, &priv->led_work, + msecs_to_jiffies(blink_delay)); +} + +static void p54_led_brightness_set(struct led_classdev *led_dev, + enum led_brightness brightness) +{ + struct p54_led_dev *led = container_of(led_dev, struct p54_led_dev, + led_dev); + struct ieee80211_hw *dev = led->hw_dev; + struct p54_common *priv = dev->priv; + + if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) + return ; + + if ((brightness) && (led->registered)) { + led->toggled++; + ieee80211_queue_delayed_work(priv->hw, &priv->led_work, HZ/10); + } +} + +static int p54_register_led(struct p54_common *priv, + unsigned int led_index, + char *name, char *trigger) +{ + struct p54_led_dev *led = &priv->leds[led_index]; + int err; + + if (led->registered) + return -EEXIST; + + snprintf(led->name, sizeof(led->name), "p54-%s::%s", + wiphy_name(priv->hw->wiphy), name); + led->hw_dev = priv->hw; + led->index = led_index; + led->led_dev.name = led->name; + led->led_dev.default_trigger = trigger; + led->led_dev.brightness_set = p54_led_brightness_set; + + err = led_classdev_register(wiphy_dev(priv->hw->wiphy), &led->led_dev); + if (err) + printk(KERN_ERR "%s: Failed to register %s LED.\n", + wiphy_name(priv->hw->wiphy), name); + else + led->registered = 1; + + return err; +} + +int p54_init_leds(struct p54_common *priv) +{ + int err; + + /* + * TODO: + * Figure out if the EEPROM contains some hints about the number + * of available/programmable LEDs of the device. + */ + + INIT_DELAYED_WORK(&priv->led_work, p54_update_leds); + + err = p54_register_led(priv, 0, "assoc", + ieee80211_get_assoc_led_name(priv->hw)); + if (err) + return err; + + err = p54_register_led(priv, 1, "tx", + ieee80211_get_tx_led_name(priv->hw)); + if (err) + return err; + + err = p54_register_led(priv, 2, "rx", + ieee80211_get_rx_led_name(priv->hw)); + if (err) + return err; + + err = p54_register_led(priv, 3, "radio", + ieee80211_get_radio_led_name(priv->hw)); + if (err) + return err; + + err = p54_set_leds(priv); + return err; +} + +void p54_unregister_leds(struct p54_common *priv) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(priv->leds); i++) { + if (priv->leds[i].registered) { + priv->leds[i].registered = false; + priv->leds[i].toggled = 0; + led_classdev_unregister(&priv->leds[i].led_dev); + } + } + + cancel_delayed_work_sync(&priv->led_work); +} diff --git a/drivers/net/wireless/p54/lmac.h b/drivers/net/wireless/p54/lmac.h new file mode 100644 index 0000000..04b63ec --- /dev/null +++ b/drivers/net/wireless/p54/lmac.h @@ -0,0 +1,558 @@ +/* + * LMAC Interface specific definitions for mac80211 Prism54 drivers + * + * Copyright (c) 2006, Michael Wu + * Copyright (c) 2007 - 2009, Christian Lamparter + * + * Based on: + * - the islsm (softmac prism54) driver, which is: + * Copyright 2004-2006 Jean-Baptiste Note , et al. + * + * - LMAC API interface header file for STLC4560 (lmac_longbow.h) + * Copyright (C) 2007 Conexant Systems, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef LMAC_H +#define LMAC_H + +enum p54_control_frame_types { + P54_CONTROL_TYPE_SETUP = 0, + P54_CONTROL_TYPE_SCAN, + P54_CONTROL_TYPE_TRAP, + P54_CONTROL_TYPE_DCFINIT, + P54_CONTROL_TYPE_RX_KEYCACHE, + P54_CONTROL_TYPE_TIM, + P54_CONTROL_TYPE_PSM, + P54_CONTROL_TYPE_TXCANCEL, + P54_CONTROL_TYPE_TXDONE, + P54_CONTROL_TYPE_BURST, + P54_CONTROL_TYPE_STAT_READBACK, + P54_CONTROL_TYPE_BBP, + P54_CONTROL_TYPE_EEPROM_READBACK, + P54_CONTROL_TYPE_LED, + P54_CONTROL_TYPE_GPIO, + P54_CONTROL_TYPE_TIMER, + P54_CONTROL_TYPE_MODULATION, + P54_CONTROL_TYPE_SYNTH_CONFIG, + P54_CONTROL_TYPE_DETECTOR_VALUE, + P54_CONTROL_TYPE_XBOW_SYNTH_CFG, + P54_CONTROL_TYPE_CCE_QUIET, + P54_CONTROL_TYPE_PSM_STA_UNLOCK, + P54_CONTROL_TYPE_PCS, + P54_CONTROL_TYPE_BT_BALANCER = 28, + P54_CONTROL_TYPE_GROUP_ADDRESS_TABLE = 30, + P54_CONTROL_TYPE_ARPTABLE = 31, + P54_CONTROL_TYPE_BT_OPTIONS = 35, +}; + +#define P54_HDR_FLAG_CONTROL BIT(15) +#define P54_HDR_FLAG_CONTROL_OPSET (BIT(15) + BIT(0)) +#define P54_HDR_FLAG_DATA_ALIGN BIT(14) + +#define P54_HDR_FLAG_DATA_OUT_PROMISC BIT(0) +#define P54_HDR_FLAG_DATA_OUT_TIMESTAMP BIT(1) +#define P54_HDR_FLAG_DATA_OUT_SEQNR BIT(2) +#define P54_HDR_FLAG_DATA_OUT_BIT3 BIT(3) +#define P54_HDR_FLAG_DATA_OUT_BURST BIT(4) +#define P54_HDR_FLAG_DATA_OUT_NOCANCEL BIT(5) +#define P54_HDR_FLAG_DATA_OUT_CLEARTIM BIT(6) +#define P54_HDR_FLAG_DATA_OUT_HITCHHIKE BIT(7) +#define P54_HDR_FLAG_DATA_OUT_COMPRESS BIT(8) +#define P54_HDR_FLAG_DATA_OUT_CONCAT BIT(9) +#define P54_HDR_FLAG_DATA_OUT_PCS_ACCEPT BIT(10) +#define P54_HDR_FLAG_DATA_OUT_WAITEOSP BIT(11) + +#define P54_HDR_FLAG_DATA_IN_FCS_GOOD BIT(0) +#define P54_HDR_FLAG_DATA_IN_MATCH_MAC BIT(1) +#define P54_HDR_FLAG_DATA_IN_MCBC BIT(2) +#define P54_HDR_FLAG_DATA_IN_BEACON BIT(3) +#define P54_HDR_FLAG_DATA_IN_MATCH_BSS BIT(4) +#define P54_HDR_FLAG_DATA_IN_BCAST_BSS BIT(5) +#define P54_HDR_FLAG_DATA_IN_DATA BIT(6) +#define P54_HDR_FLAG_DATA_IN_TRUNCATED BIT(7) +#define P54_HDR_FLAG_DATA_IN_BIT8 BIT(8) +#define P54_HDR_FLAG_DATA_IN_TRANSPARENT BIT(9) + +struct p54_hdr { + __le16 flags; + __le16 len; + __le32 req_id; + __le16 type; /* enum p54_control_frame_types */ + u8 rts_tries; + u8 tries; + u8 data[0]; +} __packed; + +#define GET_REQ_ID(skb) \ + (((struct p54_hdr *) ((struct sk_buff *) skb)->data)->req_id) \ + +#define FREE_AFTER_TX(skb) \ + ((((struct p54_hdr *) ((struct sk_buff *) skb)->data)-> \ + flags) == cpu_to_le16(P54_HDR_FLAG_CONTROL_OPSET)) + +#define IS_DATA_FRAME(skb) \ + (!((((struct p54_hdr *) ((struct sk_buff *) skb)->data)-> \ + flags) & cpu_to_le16(P54_HDR_FLAG_CONTROL))) + +#define GET_HW_QUEUE(skb) \ + (((struct p54_tx_data *)((struct p54_hdr *) \ + skb->data)->data)->hw_queue) + +/* + * shared interface ID definitions + * The interface ID is a unique identification of a specific interface. + * The following values are reserved: 0x0000, 0x0002, 0x0012, 0x0014, 0x0015 + */ +#define IF_ID_ISL36356A 0x0001 /* ISL36356A <-> Firmware */ +#define IF_ID_MVC 0x0003 /* MAC Virtual Coprocessor */ +#define IF_ID_DEBUG 0x0008 /* PolDebug Interface */ +#define IF_ID_PRODUCT 0x0009 +#define IF_ID_OEM 0x000a +#define IF_ID_PCI3877 0x000b /* 3877 <-> Host PCI */ +#define IF_ID_ISL37704C 0x000c /* ISL37704C <-> Fw */ +#define IF_ID_ISL39000 0x000f /* ISL39000 <-> Fw */ +#define IF_ID_ISL39300A 0x0010 /* ISL39300A <-> Fw */ +#define IF_ID_ISL37700_UAP 0x0016 /* ISL37700 uAP Fw <-> Fw */ +#define IF_ID_ISL39000_UAP 0x0017 /* ISL39000 uAP Fw <-> Fw */ +#define IF_ID_LMAC 0x001a /* Interface exposed by LMAC */ + +struct exp_if { + __le16 role; + __le16 if_id; + __le16 variant; + __le16 btm_compat; + __le16 top_compat; +} __packed; + +struct dep_if { + __le16 role; + __le16 if_id; + __le16 variant; +} __packed; + +/* driver <-> lmac definitions */ +struct p54_eeprom_lm86 { + union { + struct { + __le16 offset; + __le16 len; + u8 data[0]; + } __packed v1; + struct { + __le32 offset; + __le16 len; + u8 magic2; + u8 pad; + u8 magic[4]; + u8 data[0]; + } __packed v2; + } __packed; +} __packed; + +enum p54_rx_decrypt_status { + P54_DECRYPT_NONE = 0, + P54_DECRYPT_OK, + P54_DECRYPT_NOKEY, + P54_DECRYPT_NOMICHAEL, + P54_DECRYPT_NOCKIPMIC, + P54_DECRYPT_FAIL_WEP, + P54_DECRYPT_FAIL_TKIP, + P54_DECRYPT_FAIL_MICHAEL, + P54_DECRYPT_FAIL_CKIPKP, + P54_DECRYPT_FAIL_CKIPMIC, + P54_DECRYPT_FAIL_AESCCMP +}; + +struct p54_rx_data { + __le16 flags; + __le16 len; + __le16 freq; + u8 antenna; + u8 rate; + u8 rssi; + u8 quality; + u8 decrypt_status; + u8 rssi_raw; + __le32 tsf32; + __le32 unalloc0; + u8 align[0]; +} __packed; + +enum p54_trap_type { + P54_TRAP_SCAN = 0, + P54_TRAP_TIMER, + P54_TRAP_BEACON_TX, + P54_TRAP_FAA_RADIO_ON, + P54_TRAP_FAA_RADIO_OFF, + P54_TRAP_RADAR, + P54_TRAP_NO_BEACON, + P54_TRAP_TBTT, + P54_TRAP_SCO_ENTER, + P54_TRAP_SCO_EXIT +}; + +struct p54_trap { + __le16 event; + __le16 frequency; +} __packed; + +enum p54_frame_sent_status { + P54_TX_OK = 0, + P54_TX_FAILED, + P54_TX_PSM, + P54_TX_PSM_CANCELLED = 4 +}; + +struct p54_frame_sent { + u8 status; + u8 tries; + u8 ack_rssi; + u8 quality; + __le16 seq; + u8 antenna; + u8 padding; +} __packed; + +enum p54_tx_data_crypt { + P54_CRYPTO_NONE = 0, + P54_CRYPTO_WEP, + P54_CRYPTO_TKIP, + P54_CRYPTO_TKIPMICHAEL, + P54_CRYPTO_CCX_WEPMIC, + P54_CRYPTO_CCX_KPMIC, + P54_CRYPTO_CCX_KP, + P54_CRYPTO_AESCCMP +}; + +enum p54_tx_data_queue { + P54_QUEUE_BEACON = 0, + P54_QUEUE_FWSCAN = 1, + P54_QUEUE_MGMT = 2, + P54_QUEUE_CAB = 3, + P54_QUEUE_DATA = 4, + + P54_QUEUE_AC_NUM = 4, + P54_QUEUE_AC_VO = 4, + P54_QUEUE_AC_VI = 5, + P54_QUEUE_AC_BE = 6, + P54_QUEUE_AC_BK = 7, + + /* keep last */ + P54_QUEUE_NUM = 8, +}; + +#define IS_QOS_QUEUE(n) (n >= P54_QUEUE_DATA) + +struct p54_tx_data { + u8 rateset[8]; + u8 rts_rate_idx; + u8 crypt_offset; + u8 key_type; + u8 key_len; + u8 key[16]; + u8 hw_queue; + u8 backlog; + __le16 durations[4]; + u8 tx_antenna; + union { + struct { + u8 cts_rate; + __le16 output_power; + } __packed longbow; + struct { + u8 output_power; + u8 cts_rate; + u8 unalloc; + } __packed normal; + } __packed; + u8 unalloc2[2]; + u8 align[0]; +} __packed; + +/* unit is ms */ +#define P54_TX_FRAME_LIFETIME 2000 +#define P54_TX_TIMEOUT 4000 +#define P54_STATISTICS_UPDATE 5000 + +#define P54_FILTER_TYPE_NONE 0 +#define P54_FILTER_TYPE_STATION BIT(0) +#define P54_FILTER_TYPE_IBSS BIT(1) +#define P54_FILTER_TYPE_AP BIT(2) +#define P54_FILTER_TYPE_TRANSPARENT BIT(3) +#define P54_FILTER_TYPE_PROMISCUOUS BIT(4) +#define P54_FILTER_TYPE_HIBERNATE BIT(5) +#define P54_FILTER_TYPE_NOACK BIT(6) +#define P54_FILTER_TYPE_RX_DISABLED BIT(7) + +struct p54_setup_mac { + __le16 mac_mode; + u8 mac_addr[ETH_ALEN]; + u8 bssid[ETH_ALEN]; + u8 rx_antenna; + u8 rx_align; + union { + struct { + __le32 basic_rate_mask; + u8 rts_rates[8]; + __le32 rx_addr; + __le16 max_rx; + __le16 rxhw; + __le16 wakeup_timer; + __le16 unalloc0; + } __packed v1; + struct { + __le32 rx_addr; + __le16 max_rx; + __le16 rxhw; + __le16 timer; + __le16 truncate; + __le32 basic_rate_mask; + u8 sbss_offset; + u8 mcast_window; + u8 rx_rssi_threshold; + u8 rx_ed_threshold; + __le32 ref_clock; + __le16 lpf_bandwidth; + __le16 osc_start_delay; + } __packed v2; + } __packed; +} __packed; + +#define P54_SETUP_V1_LEN 40 +#define P54_SETUP_V2_LEN (sizeof(struct p54_setup_mac)) + +#define P54_SCAN_EXIT BIT(0) +#define P54_SCAN_TRAP BIT(1) +#define P54_SCAN_ACTIVE BIT(2) +#define P54_SCAN_FILTER BIT(3) + +struct p54_scan_head { + __le16 mode; + __le16 dwell; + u8 scan_params[20]; + __le16 freq; +} __packed; + +struct p54_pa_curve_data_sample { + u8 rf_power; + u8 pa_detector; + u8 data_barker; + u8 data_bpsk; + u8 data_qpsk; + u8 data_16qam; + u8 data_64qam; + u8 padding; +} __packed; + +struct p54_scan_body { + u8 pa_points_per_curve; + u8 val_barker; + u8 val_bpsk; + u8 val_qpsk; + u8 val_16qam; + u8 val_64qam; + struct p54_pa_curve_data_sample curve_data[8]; + u8 dup_bpsk; + u8 dup_qpsk; + u8 dup_16qam; + u8 dup_64qam; +} __packed; + +/* + * Warning: Longbow's structures are bogus. + */ +struct p54_channel_output_limit_longbow { + __le16 rf_power_points[12]; +} __packed; + +struct p54_pa_curve_data_sample_longbow { + __le16 rf_power; + __le16 pa_detector; + struct { + __le16 data[4]; + } points[3] __packed; +} __packed; + +struct p54_scan_body_longbow { + struct p54_channel_output_limit_longbow power_limits; + struct p54_pa_curve_data_sample_longbow curve_data[8]; + __le16 unkn[6]; /* maybe more power_limits or rate_mask */ +} __packed; + +union p54_scan_body_union { + struct p54_scan_body normal; + struct p54_scan_body_longbow longbow; +} __packed; + +struct p54_scan_tail_rate { + __le32 basic_rate_mask; + u8 rts_rates[8]; +} __packed; + +struct p54_led { + __le16 flags; + __le16 mask[2]; + __le16 delay[2]; +} __packed; + +struct p54_edcf { + u8 flags; + u8 slottime; + u8 sifs; + u8 eofpad; + struct p54_edcf_queue_param queue[8]; + u8 mapping[4]; + __le16 frameburst; + __le16 round_trip_delay; +} __packed; + +struct p54_statistics { + __le32 rx_success; + __le32 rx_bad_fcs; + __le32 rx_abort; + __le32 rx_abort_phy; + __le32 rts_success; + __le32 rts_fail; + __le32 tsf32; + __le32 airtime; + __le32 noise; + __le32 sample_noise[8]; + __le32 sample_cca; + __le32 sample_tx; +} __packed; + +struct p54_xbow_synth { + __le16 magic1; + __le16 magic2; + __le16 freq; + u32 padding[5]; +} __packed; + +struct p54_timer { + __le32 interval; +} __packed; + +struct p54_keycache { + u8 entry; + u8 key_id; + u8 mac[ETH_ALEN]; + u8 padding[2]; + u8 key_type; + u8 key_len; + u8 key[24]; +} __packed; + +struct p54_burst { + u8 flags; + u8 queue; + u8 backlog; + u8 pad; + __le16 durations[32]; +} __packed; + +struct p54_psm_interval { + __le16 interval; + __le16 periods; +} __packed; + +#define P54_PSM_CAM 0 +#define P54_PSM BIT(0) +#define P54_PSM_DTIM BIT(1) +#define P54_PSM_MCBC BIT(2) +#define P54_PSM_CHECKSUM BIT(3) +#define P54_PSM_SKIP_MORE_DATA BIT(4) +#define P54_PSM_BEACON_TIMEOUT BIT(5) +#define P54_PSM_HFOSLEEP BIT(6) +#define P54_PSM_AUTOSWITCH_SLEEP BIT(7) +#define P54_PSM_LPIT BIT(8) +#define P54_PSM_BF_UCAST_SKIP BIT(9) +#define P54_PSM_BF_MCAST_SKIP BIT(10) + +struct p54_psm { + __le16 mode; + __le16 aid; + struct p54_psm_interval intervals[4]; + u8 beacon_rssi_skip_max; + u8 rssi_delta_threshold; + u8 nr; + u8 exclude[1]; +} __packed; + +#define MC_FILTER_ADDRESS_NUM 4 + +struct p54_group_address_table { + __le16 filter_enable; + __le16 num_address; + u8 mac_list[MC_FILTER_ADDRESS_NUM][ETH_ALEN]; +} __packed; + +struct p54_txcancel { + __le32 req_id; +} __packed; + +struct p54_sta_unlock { + u8 addr[ETH_ALEN]; + u16 padding; +} __packed; + +#define P54_TIM_CLEAR BIT(15) +struct p54_tim { + u8 count; + u8 padding[3]; + __le16 entry[8]; +} __packed; + +struct p54_cce_quiet { + __le32 period; +} __packed; + +struct p54_bt_balancer { + __le16 prio_thresh; + __le16 acl_thresh; +} __packed; + +struct p54_arp_table { + __le16 filter_enable; + u8 ipv4_addr[4]; +} __packed; + +/* LED control */ +int p54_set_leds(struct p54_common *priv); +int p54_init_leds(struct p54_common *priv); +void p54_unregister_leds(struct p54_common *priv); + +/* xmit functions */ +int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb); +int p54_tx_cancel(struct p54_common *priv, __le32 req_id); +void p54_tx(struct p54_common *priv, struct sk_buff *skb); + +/* synth/phy configuration */ +int p54_init_xbow_synth(struct p54_common *priv); +int p54_scan(struct p54_common *priv, u16 mode, u16 dwell); + +/* MAC */ +int p54_sta_unlock(struct p54_common *priv, u8 *addr); +int p54_update_beacon_tim(struct p54_common *priv, u16 aid, bool set); +int p54_setup_mac(struct p54_common *priv); +int p54_set_ps(struct p54_common *priv); +int p54_fetch_statistics(struct p54_common *priv); + +/* e/v DCF setup */ +int p54_set_edcf(struct p54_common *priv); + +/* cryptographic engine */ +int p54_upload_key(struct p54_common *priv, u8 algo, int slot, + u8 idx, u8 len, u8 *addr, u8* key); + +/* eeprom */ +int p54_download_eeprom(struct p54_common *priv, void *buf, + u16 offset, u16 len); + +/* utility */ +u8 *p54_find_ie(struct sk_buff *skb, u8 ie); + +#endif /* LMAC_H */ diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c new file mode 100644 index 0000000..4f752a2 --- /dev/null +++ b/drivers/net/wireless/p54/main.c @@ -0,0 +1,647 @@ +/* + * mac80211 glue code for mac80211 Prism54 drivers + * + * Copyright (c) 2006, Michael Wu + * Copyright (c) 2007-2009, Christian Lamparter + * Copyright 2008, Johannes Berg + * + * Based on: + * - the islsm (softmac prism54) driver, which is: + * Copyright 2004-2006 Jean-Baptiste Note , et al. + * - stlc45xx driver + * Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies). + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include + +#include + +#include "p54.h" +#include "lmac.h" + +static int modparam_nohwcrypt; +module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); +MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); +MODULE_AUTHOR("Michael Wu "); +MODULE_DESCRIPTION("Softmac Prism54 common code"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("prism54common"); + +static int p54_sta_add_remove(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct p54_common *priv = hw->priv; + + /* + * Notify the firmware that we don't want or we don't + * need to buffer frames for this station anymore. + */ + + p54_sta_unlock(priv, sta->addr); + + return 0; +} + +static void p54_sta_notify(struct ieee80211_hw *dev, struct ieee80211_vif *vif, + enum sta_notify_cmd notify_cmd, + struct ieee80211_sta *sta) +{ + struct p54_common *priv = dev->priv; + + switch (notify_cmd) { + case STA_NOTIFY_AWAKE: + /* update the firmware's filter table */ + p54_sta_unlock(priv, sta->addr); + break; + default: + break; + } +} + +static int p54_set_tim(struct ieee80211_hw *dev, struct ieee80211_sta *sta, + bool set) +{ + struct p54_common *priv = dev->priv; + + return p54_update_beacon_tim(priv, sta->aid, set); +} + +u8 *p54_find_ie(struct sk_buff *skb, u8 ie) +{ + struct ieee80211_mgmt *mgmt = (void *)skb->data; + u8 *pos, *end; + + if (skb->len <= sizeof(mgmt)) + return NULL; + + pos = (u8 *)mgmt->u.beacon.variable; + end = skb->data + skb->len; + while (pos < end) { + if (pos + 2 + pos[1] > end) + return NULL; + + if (pos[0] == ie) + return pos; + + pos += 2 + pos[1]; + } + return NULL; +} + +static int p54_beacon_format_ie_tim(struct sk_buff *skb) +{ + /* + * the good excuse for this mess is ... the firmware. + * The dummy TIM MUST be at the end of the beacon frame, + * because it'll be overwritten! + */ + u8 *tim; + u8 dtim_len; + u8 dtim_period; + u8 *next; + + tim = p54_find_ie(skb, WLAN_EID_TIM); + if (!tim) + return 0; + + dtim_len = tim[1]; + dtim_period = tim[3]; + next = tim + 2 + dtim_len; + + if (dtim_len < 3) + return -EINVAL; + + memmove(tim, next, skb_tail_pointer(skb) - next); + tim = skb_tail_pointer(skb) - (dtim_len + 2); + + /* add the dummy at the end */ + tim[0] = WLAN_EID_TIM; + tim[1] = 3; + tim[2] = 0; + tim[3] = dtim_period; + tim[4] = 0; + + if (dtim_len > 3) + skb_trim(skb, skb->len - (dtim_len - 3)); + + return 0; +} + +static int p54_beacon_update(struct p54_common *priv, + struct ieee80211_vif *vif) +{ + struct sk_buff *beacon; + int ret; + + beacon = ieee80211_beacon_get(priv->hw, vif); + if (!beacon) + return -ENOMEM; + ret = p54_beacon_format_ie_tim(beacon); + if (ret) + return ret; + + /* + * During operation, the firmware takes care of beaconing. + * The driver only needs to upload a new beacon template, once + * the template was changed by the stack or userspace. + * + * LMAC API 3.2.2 also specifies that the driver does not need + * to cancel the old beacon template by hand, instead the firmware + * will release the previous one through the feedback mechanism. + */ + WARN_ON(p54_tx_80211(priv->hw, beacon)); + priv->tsf_high32 = 0; + priv->tsf_low32 = 0; + + return 0; +} + +static int p54_start(struct ieee80211_hw *dev) +{ + struct p54_common *priv = dev->priv; + int err; + + mutex_lock(&priv->conf_mutex); + err = priv->open(dev); + if (err) + goto out; + P54_SET_QUEUE(priv->qos_params[0], 0x0002, 0x0003, 0x0007, 47); + P54_SET_QUEUE(priv->qos_params[1], 0x0002, 0x0007, 0x000f, 94); + P54_SET_QUEUE(priv->qos_params[2], 0x0003, 0x000f, 0x03ff, 0); + P54_SET_QUEUE(priv->qos_params[3], 0x0007, 0x000f, 0x03ff, 0); + err = p54_set_edcf(priv); + if (err) + goto out; + + memset(priv->bssid, ~0, ETH_ALEN); + priv->mode = NL80211_IFTYPE_MONITOR; + err = p54_setup_mac(priv); + if (err) { + priv->mode = NL80211_IFTYPE_UNSPECIFIED; + goto out; + } + + ieee80211_queue_delayed_work(dev, &priv->work, 0); + + priv->softled_state = 0; + err = p54_set_leds(priv); + +out: + mutex_unlock(&priv->conf_mutex); + return err; +} + +static void p54_stop(struct ieee80211_hw *dev) +{ + struct p54_common *priv = dev->priv; + int i; + + mutex_lock(&priv->conf_mutex); + priv->mode = NL80211_IFTYPE_UNSPECIFIED; + priv->softled_state = 0; + p54_set_leds(priv); + + cancel_delayed_work_sync(&priv->work); + + priv->stop(dev); + skb_queue_purge(&priv->tx_pending); + skb_queue_purge(&priv->tx_queue); + for (i = 0; i < P54_QUEUE_NUM; i++) { + priv->tx_stats[i].count = 0; + priv->tx_stats[i].len = 0; + } + + priv->beacon_req_id = cpu_to_le32(0); + priv->tsf_high32 = priv->tsf_low32 = 0; + mutex_unlock(&priv->conf_mutex); +} + +static int p54_add_interface(struct ieee80211_hw *dev, + struct ieee80211_vif *vif) +{ + struct p54_common *priv = dev->priv; + + mutex_lock(&priv->conf_mutex); + if (priv->mode != NL80211_IFTYPE_MONITOR) { + mutex_unlock(&priv->conf_mutex); + return -EOPNOTSUPP; + } + + priv->vif = vif; + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_MESH_POINT: + priv->mode = vif->type; + break; + default: + mutex_unlock(&priv->conf_mutex); + return -EOPNOTSUPP; + } + + memcpy(priv->mac_addr, vif->addr, ETH_ALEN); + p54_setup_mac(priv); + mutex_unlock(&priv->conf_mutex); + return 0; +} + +static void p54_remove_interface(struct ieee80211_hw *dev, + struct ieee80211_vif *vif) +{ + struct p54_common *priv = dev->priv; + + mutex_lock(&priv->conf_mutex); + priv->vif = NULL; + + /* + * LMAC API 3.2.2 states that any active beacon template must be + * canceled by the driver before attempting a mode transition. + */ + if (le32_to_cpu(priv->beacon_req_id) != 0) { + p54_tx_cancel(priv, priv->beacon_req_id); + wait_for_completion_interruptible_timeout(&priv->beacon_comp, HZ); + } + priv->mode = NL80211_IFTYPE_MONITOR; + memset(priv->mac_addr, 0, ETH_ALEN); + memset(priv->bssid, 0, ETH_ALEN); + p54_setup_mac(priv); + mutex_unlock(&priv->conf_mutex); +} + +static int p54_config(struct ieee80211_hw *dev, u32 changed) +{ + int ret = 0; + struct p54_common *priv = dev->priv; + struct ieee80211_conf *conf = &dev->conf; + + mutex_lock(&priv->conf_mutex); + if (changed & IEEE80211_CONF_CHANGE_POWER) + priv->output_power = conf->power_level << 2; + if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { + ret = p54_scan(priv, P54_SCAN_EXIT, 0); + if (ret) + goto out; + } + if (changed & IEEE80211_CONF_CHANGE_PS) { + ret = p54_set_ps(priv); + if (ret) + goto out; + } + if (changed & IEEE80211_CONF_CHANGE_IDLE) { + ret = p54_setup_mac(priv); + if (ret) + goto out; + } + +out: + mutex_unlock(&priv->conf_mutex); + return ret; +} + +static void p54_configure_filter(struct ieee80211_hw *dev, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) +{ + struct p54_common *priv = dev->priv; + + *total_flags &= FIF_PROMISC_IN_BSS | + FIF_OTHER_BSS; + + priv->filter_flags = *total_flags; + + if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) + p54_setup_mac(priv); +} + +static int p54_conf_tx(struct ieee80211_hw *dev, u16 queue, + const struct ieee80211_tx_queue_params *params) +{ + struct p54_common *priv = dev->priv; + int ret; + + mutex_lock(&priv->conf_mutex); + if (queue < dev->queues) { + P54_SET_QUEUE(priv->qos_params[queue], params->aifs, + params->cw_min, params->cw_max, params->txop); + ret = p54_set_edcf(priv); + } else + ret = -EINVAL; + mutex_unlock(&priv->conf_mutex); + return ret; +} + +static void p54_work(struct work_struct *work) +{ + struct p54_common *priv = container_of(work, struct p54_common, + work.work); + + if (unlikely(priv->mode == NL80211_IFTYPE_UNSPECIFIED)) + return ; + + /* + * TODO: walk through tx_queue and do the following tasks + * 1. initiate bursts. + * 2. cancel stuck frames / reset the device if necessary. + */ + + p54_fetch_statistics(priv); +} + +static int p54_get_stats(struct ieee80211_hw *dev, + struct ieee80211_low_level_stats *stats) +{ + struct p54_common *priv = dev->priv; + + memcpy(stats, &priv->stats, sizeof(*stats)); + return 0; +} + +static void p54_bss_info_changed(struct ieee80211_hw *dev, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, + u32 changed) +{ + struct p54_common *priv = dev->priv; + + mutex_lock(&priv->conf_mutex); + if (changed & BSS_CHANGED_BSSID) { + memcpy(priv->bssid, info->bssid, ETH_ALEN); + p54_setup_mac(priv); + } + + if (changed & BSS_CHANGED_BEACON) { + p54_scan(priv, P54_SCAN_EXIT, 0); + p54_setup_mac(priv); + p54_beacon_update(priv, vif); + p54_set_edcf(priv); + } + + if (changed & (BSS_CHANGED_ERP_SLOT | BSS_CHANGED_BEACON)) { + priv->use_short_slot = info->use_short_slot; + p54_set_edcf(priv); + } + if (changed & BSS_CHANGED_BASIC_RATES) { + if (dev->conf.channel->band == IEEE80211_BAND_5GHZ) + priv->basic_rate_mask = (info->basic_rates << 4); + else + priv->basic_rate_mask = info->basic_rates; + p54_setup_mac(priv); + if (priv->fw_var >= 0x500) + p54_scan(priv, P54_SCAN_EXIT, 0); + } + if (changed & BSS_CHANGED_ASSOC) { + if (info->assoc) { + priv->aid = info->aid; + priv->wakeup_timer = info->beacon_int * + info->dtim_period * 5; + p54_setup_mac(priv); + } else { + priv->wakeup_timer = 500; + priv->aid = 0; + } + } + + mutex_unlock(&priv->conf_mutex); +} + +static int p54_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct p54_common *priv = dev->priv; + int slot, ret = 0; + u8 algo = 0; + u8 *addr = NULL; + + if (modparam_nohwcrypt) + return -EOPNOTSUPP; + + mutex_lock(&priv->conf_mutex); + if (cmd == SET_KEY) { + switch (key->alg) { + case ALG_TKIP: + if (!(priv->privacy_caps & (BR_DESC_PRIV_CAP_MICHAEL | + BR_DESC_PRIV_CAP_TKIP))) { + ret = -EOPNOTSUPP; + goto out_unlock; + } + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + algo = P54_CRYPTO_TKIPMICHAEL; + break; + case ALG_WEP: + if (!(priv->privacy_caps & BR_DESC_PRIV_CAP_WEP)) { + ret = -EOPNOTSUPP; + goto out_unlock; + } + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + algo = P54_CRYPTO_WEP; + break; + case ALG_CCMP: + if (!(priv->privacy_caps & BR_DESC_PRIV_CAP_AESCCMP)) { + ret = -EOPNOTSUPP; + goto out_unlock; + } + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + algo = P54_CRYPTO_AESCCMP; + break; + default: + ret = -EOPNOTSUPP; + goto out_unlock; + } + slot = bitmap_find_free_region(priv->used_rxkeys, + priv->rx_keycache_size, 0); + + if (slot < 0) { + /* + * The device supports the choosen algorithm, but the + * firmware does not provide enough key slots to store + * all of them. + * But encryption offload for outgoing frames is always + * possible, so we just pretend that the upload was + * successful and do the decryption in software. + */ + + /* mark the key as invalid. */ + key->hw_key_idx = 0xff; + goto out_unlock; + } + } else { + slot = key->hw_key_idx; + + if (slot == 0xff) { + /* This key was not uploaded into the rx key cache. */ + + goto out_unlock; + } + + bitmap_release_region(priv->used_rxkeys, slot, 0); + algo = 0; + } + + if (sta) + addr = sta->addr; + + ret = p54_upload_key(priv, algo, slot, key->keyidx, + key->keylen, addr, key->key); + if (ret) { + bitmap_release_region(priv->used_rxkeys, slot, 0); + ret = -EOPNOTSUPP; + goto out_unlock; + } + + key->hw_key_idx = slot; + +out_unlock: + mutex_unlock(&priv->conf_mutex); + return ret; +} + +static const struct ieee80211_ops p54_ops = { + .tx = p54_tx_80211, + .start = p54_start, + .stop = p54_stop, + .add_interface = p54_add_interface, + .remove_interface = p54_remove_interface, + .set_tim = p54_set_tim, + .sta_notify = p54_sta_notify, + .sta_add = p54_sta_add_remove, + .sta_remove = p54_sta_add_remove, + .set_key = p54_set_key, + .config = p54_config, + .bss_info_changed = p54_bss_info_changed, + .configure_filter = p54_configure_filter, + .conf_tx = p54_conf_tx, + .get_stats = p54_get_stats, +}; + +struct ieee80211_hw *p54_init_common(size_t priv_data_len) +{ + struct ieee80211_hw *dev; + struct p54_common *priv; + + dev = ieee80211_alloc_hw(priv_data_len, &p54_ops); + if (!dev) + return NULL; + + priv = dev->priv; + priv->hw = dev; + priv->mode = NL80211_IFTYPE_UNSPECIFIED; + priv->basic_rate_mask = 0x15f; + spin_lock_init(&priv->tx_stats_lock); + skb_queue_head_init(&priv->tx_queue); + skb_queue_head_init(&priv->tx_pending); + dev->flags = IEEE80211_HW_RX_INCLUDES_FCS | + IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_SUPPORTS_PS | + IEEE80211_HW_PS_NULLFUNC_STACK | + IEEE80211_HW_BEACON_FILTER | + IEEE80211_HW_NOISE_DBM; + + dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_ADHOC) | + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_MESH_POINT); + + dev->channel_change_time = 1000; /* TODO: find actual value */ + priv->beacon_req_id = cpu_to_le32(0); + priv->tx_stats[P54_QUEUE_BEACON].limit = 1; + priv->tx_stats[P54_QUEUE_FWSCAN].limit = 1; + priv->tx_stats[P54_QUEUE_MGMT].limit = 3; + priv->tx_stats[P54_QUEUE_CAB].limit = 3; + priv->tx_stats[P54_QUEUE_DATA].limit = 5; + dev->queues = 1; + priv->noise = -94; + /* + * We support at most 8 tries no matter which rate they're at, + * we cannot support max_rates * max_rate_tries as we set it + * here, but setting it correctly to 4/2 or so would limit us + * artificially if the RC algorithm wants just two rates, so + * let's say 4/7, we'll redistribute it at TX time, see the + * comments there. + */ + dev->max_rates = 4; + dev->max_rate_tries = 7; + dev->extra_tx_headroom = sizeof(struct p54_hdr) + 4 + + sizeof(struct p54_tx_data); + + /* + * For now, disable PS by default because it affects + * link stability significantly. + */ + dev->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; + + mutex_init(&priv->conf_mutex); + mutex_init(&priv->eeprom_mutex); + init_completion(&priv->eeprom_comp); + init_completion(&priv->beacon_comp); + INIT_DELAYED_WORK(&priv->work, p54_work); + + return dev; +} +EXPORT_SYMBOL_GPL(p54_init_common); + +int p54_register_common(struct ieee80211_hw *dev, struct device *pdev) +{ + struct p54_common *priv = dev->priv; + int err; + + err = ieee80211_register_hw(dev); + if (err) { + dev_err(pdev, "Cannot register device (%d).\n", err); + return err; + } + +#ifdef CONFIG_P54_LEDS + err = p54_init_leds(priv); + if (err) + return err; +#endif /* CONFIG_P54_LEDS */ + + dev_info(pdev, "is registered as '%s'\n", wiphy_name(dev->wiphy)); + return 0; +} +EXPORT_SYMBOL_GPL(p54_register_common); + +void p54_free_common(struct ieee80211_hw *dev) +{ + struct p54_common *priv = dev->priv; + unsigned int i; + + for (i = 0; i < IEEE80211_NUM_BANDS; i++) + kfree(priv->band_table[i]); + + kfree(priv->iq_autocal); + kfree(priv->output_limit); + kfree(priv->curve_data); + kfree(priv->used_rxkeys); + priv->iq_autocal = NULL; + priv->output_limit = NULL; + priv->curve_data = NULL; + priv->used_rxkeys = NULL; + ieee80211_free_hw(dev); +} +EXPORT_SYMBOL_GPL(p54_free_common); + +void p54_unregister_common(struct ieee80211_hw *dev) +{ + struct p54_common *priv = dev->priv; + +#ifdef CONFIG_P54_LEDS + p54_unregister_leds(priv); +#endif /* CONFIG_P54_LEDS */ + + ieee80211_unregister_hw(dev); + mutex_destroy(&priv->conf_mutex); + mutex_destroy(&priv->eeprom_mutex); +} +EXPORT_SYMBOL_GPL(p54_unregister_common); diff --git a/drivers/net/wireless/p54/net2280.h b/drivers/net/wireless/p54/net2280.h new file mode 100644 index 0000000..4915d9d --- /dev/null +++ b/drivers/net/wireless/p54/net2280.h @@ -0,0 +1,452 @@ +#ifndef NET2280_H +#define NET2280_H +/* + * NetChip 2280 high/full speed USB device controller. + * Unlike many such controllers, this one talks PCI. + */ + +/* + * Copyright (C) 2002 NetChip Technology, Inc. (http://www.netchip.com) + * Copyright (C) 2003 David Brownell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/*-------------------------------------------------------------------------*/ + +/* NET2280 MEMORY MAPPED REGISTERS + * + * The register layout came from the chip documentation, and the bit + * number definitions were extracted from chip specification. + * + * Use the shift operator ('<<') to build bit masks, with readl/writel + * to access the registers through PCI. + */ + +/* main registers, BAR0 + 0x0000 */ +struct net2280_regs { + /* offset 0x0000 */ + __le32 devinit; +#define LOCAL_CLOCK_FREQUENCY 8 +#define FORCE_PCI_RESET 7 +#define PCI_ID 6 +#define PCI_ENABLE 5 +#define FIFO_SOFT_RESET 4 +#define CFG_SOFT_RESET 3 +#define PCI_SOFT_RESET 2 +#define USB_SOFT_RESET 1 +#define M8051_RESET 0 + __le32 eectl; +#define EEPROM_ADDRESS_WIDTH 23 +#define EEPROM_CHIP_SELECT_ACTIVE 22 +#define EEPROM_PRESENT 21 +#define EEPROM_VALID 20 +#define EEPROM_BUSY 19 +#define EEPROM_CHIP_SELECT_ENABLE 18 +#define EEPROM_BYTE_READ_START 17 +#define EEPROM_BYTE_WRITE_START 16 +#define EEPROM_READ_DATA 8 +#define EEPROM_WRITE_DATA 0 + __le32 eeclkfreq; + u32 _unused0; + /* offset 0x0010 */ + + __le32 pciirqenb0; /* interrupt PCI master ... */ +#define SETUP_PACKET_INTERRUPT_ENABLE 7 +#define ENDPOINT_F_INTERRUPT_ENABLE 6 +#define ENDPOINT_E_INTERRUPT_ENABLE 5 +#define ENDPOINT_D_INTERRUPT_ENABLE 4 +#define ENDPOINT_C_INTERRUPT_ENABLE 3 +#define ENDPOINT_B_INTERRUPT_ENABLE 2 +#define ENDPOINT_A_INTERRUPT_ENABLE 1 +#define ENDPOINT_0_INTERRUPT_ENABLE 0 + __le32 pciirqenb1; +#define PCI_INTERRUPT_ENABLE 31 +#define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27 +#define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26 +#define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25 +#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20 +#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19 +#define PCI_TARGET_ABORT_ASSERTED_INTERRUPT_ENABLE 18 +#define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17 +#define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16 +#define GPIO_INTERRUPT_ENABLE 13 +#define DMA_D_INTERRUPT_ENABLE 12 +#define DMA_C_INTERRUPT_ENABLE 11 +#define DMA_B_INTERRUPT_ENABLE 10 +#define DMA_A_INTERRUPT_ENABLE 9 +#define EEPROM_DONE_INTERRUPT_ENABLE 8 +#define VBUS_INTERRUPT_ENABLE 7 +#define CONTROL_STATUS_INTERRUPT_ENABLE 6 +#define ROOT_PORT_RESET_INTERRUPT_ENABLE 4 +#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3 +#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2 +#define RESUME_INTERRUPT_ENABLE 1 +#define SOF_INTERRUPT_ENABLE 0 + __le32 cpu_irqenb0; /* ... or onboard 8051 */ +#define SETUP_PACKET_INTERRUPT_ENABLE 7 +#define ENDPOINT_F_INTERRUPT_ENABLE 6 +#define ENDPOINT_E_INTERRUPT_ENABLE 5 +#define ENDPOINT_D_INTERRUPT_ENABLE 4 +#define ENDPOINT_C_INTERRUPT_ENABLE 3 +#define ENDPOINT_B_INTERRUPT_ENABLE 2 +#define ENDPOINT_A_INTERRUPT_ENABLE 1 +#define ENDPOINT_0_INTERRUPT_ENABLE 0 + __le32 cpu_irqenb1; +#define CPU_INTERRUPT_ENABLE 31 +#define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27 +#define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26 +#define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25 +#define PCI_INTA_INTERRUPT_ENABLE 24 +#define PCI_PME_INTERRUPT_ENABLE 23 +#define PCI_SERR_INTERRUPT_ENABLE 22 +#define PCI_PERR_INTERRUPT_ENABLE 21 +#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20 +#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19 +#define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17 +#define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16 +#define GPIO_INTERRUPT_ENABLE 13 +#define DMA_D_INTERRUPT_ENABLE 12 +#define DMA_C_INTERRUPT_ENABLE 11 +#define DMA_B_INTERRUPT_ENABLE 10 +#define DMA_A_INTERRUPT_ENABLE 9 +#define EEPROM_DONE_INTERRUPT_ENABLE 8 +#define VBUS_INTERRUPT_ENABLE 7 +#define CONTROL_STATUS_INTERRUPT_ENABLE 6 +#define ROOT_PORT_RESET_INTERRUPT_ENABLE 4 +#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3 +#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2 +#define RESUME_INTERRUPT_ENABLE 1 +#define SOF_INTERRUPT_ENABLE 0 + + /* offset 0x0020 */ + u32 _unused1; + __le32 usbirqenb1; +#define USB_INTERRUPT_ENABLE 31 +#define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27 +#define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26 +#define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25 +#define PCI_INTA_INTERRUPT_ENABLE 24 +#define PCI_PME_INTERRUPT_ENABLE 23 +#define PCI_SERR_INTERRUPT_ENABLE 22 +#define PCI_PERR_INTERRUPT_ENABLE 21 +#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20 +#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19 +#define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17 +#define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16 +#define GPIO_INTERRUPT_ENABLE 13 +#define DMA_D_INTERRUPT_ENABLE 12 +#define DMA_C_INTERRUPT_ENABLE 11 +#define DMA_B_INTERRUPT_ENABLE 10 +#define DMA_A_INTERRUPT_ENABLE 9 +#define EEPROM_DONE_INTERRUPT_ENABLE 8 +#define VBUS_INTERRUPT_ENABLE 7 +#define CONTROL_STATUS_INTERRUPT_ENABLE 6 +#define ROOT_PORT_RESET_INTERRUPT_ENABLE 4 +#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3 +#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2 +#define RESUME_INTERRUPT_ENABLE 1 +#define SOF_INTERRUPT_ENABLE 0 + __le32 irqstat0; +#define INTA_ASSERTED 12 +#define SETUP_PACKET_INTERRUPT 7 +#define ENDPOINT_F_INTERRUPT 6 +#define ENDPOINT_E_INTERRUPT 5 +#define ENDPOINT_D_INTERRUPT 4 +#define ENDPOINT_C_INTERRUPT 3 +#define ENDPOINT_B_INTERRUPT 2 +#define ENDPOINT_A_INTERRUPT 1 +#define ENDPOINT_0_INTERRUPT 0 + __le32 irqstat1; +#define POWER_STATE_CHANGE_INTERRUPT 27 +#define PCI_ARBITER_TIMEOUT_INTERRUPT 26 +#define PCI_PARITY_ERROR_INTERRUPT 25 +#define PCI_INTA_INTERRUPT 24 +#define PCI_PME_INTERRUPT 23 +#define PCI_SERR_INTERRUPT 22 +#define PCI_PERR_INTERRUPT 21 +#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT 20 +#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT 19 +#define PCI_RETRY_ABORT_INTERRUPT 17 +#define PCI_MASTER_CYCLE_DONE_INTERRUPT 16 +#define GPIO_INTERRUPT 13 +#define DMA_D_INTERRUPT 12 +#define DMA_C_INTERRUPT 11 +#define DMA_B_INTERRUPT 10 +#define DMA_A_INTERRUPT 9 +#define EEPROM_DONE_INTERRUPT 8 +#define VBUS_INTERRUPT 7 +#define CONTROL_STATUS_INTERRUPT 6 +#define ROOT_PORT_RESET_INTERRUPT 4 +#define SUSPEND_REQUEST_INTERRUPT 3 +#define SUSPEND_REQUEST_CHANGE_INTERRUPT 2 +#define RESUME_INTERRUPT 1 +#define SOF_INTERRUPT 0 + /* offset 0x0030 */ + __le32 idxaddr; + __le32 idxdata; + __le32 fifoctl; +#define PCI_BASE2_RANGE 16 +#define IGNORE_FIFO_AVAILABILITY 3 +#define PCI_BASE2_SELECT 2 +#define FIFO_CONFIGURATION_SELECT 0 + u32 _unused2; + /* offset 0x0040 */ + __le32 memaddr; +#define START 28 +#define DIRECTION 27 +#define FIFO_DIAGNOSTIC_SELECT 24 +#define MEMORY_ADDRESS 0 + __le32 memdata0; + __le32 memdata1; + u32 _unused3; + /* offset 0x0050 */ + __le32 gpioctl; +#define GPIO3_LED_SELECT 12 +#define GPIO3_INTERRUPT_ENABLE 11 +#define GPIO2_INTERRUPT_ENABLE 10 +#define GPIO1_INTERRUPT_ENABLE 9 +#define GPIO0_INTERRUPT_ENABLE 8 +#define GPIO3_OUTPUT_ENABLE 7 +#define GPIO2_OUTPUT_ENABLE 6 +#define GPIO1_OUTPUT_ENABLE 5 +#define GPIO0_OUTPUT_ENABLE 4 +#define GPIO3_DATA 3 +#define GPIO2_DATA 2 +#define GPIO1_DATA 1 +#define GPIO0_DATA 0 + __le32 gpiostat; +#define GPIO3_INTERRUPT 3 +#define GPIO2_INTERRUPT 2 +#define GPIO1_INTERRUPT 1 +#define GPIO0_INTERRUPT 0 +} __attribute__ ((packed)); + +/* usb control, BAR0 + 0x0080 */ +struct net2280_usb_regs { + /* offset 0x0080 */ + __le32 stdrsp; +#define STALL_UNSUPPORTED_REQUESTS 31 +#define SET_TEST_MODE 16 +#define GET_OTHER_SPEED_CONFIGURATION 15 +#define GET_DEVICE_QUALIFIER 14 +#define SET_ADDRESS 13 +#define ENDPOINT_SET_CLEAR_HALT 12 +#define DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP 11 +#define GET_STRING_DESCRIPTOR_2 10 +#define GET_STRING_DESCRIPTOR_1 9 +#define GET_STRING_DESCRIPTOR_0 8 +#define GET_SET_INTERFACE 6 +#define GET_SET_CONFIGURATION 5 +#define GET_CONFIGURATION_DESCRIPTOR 4 +#define GET_DEVICE_DESCRIPTOR 3 +#define GET_ENDPOINT_STATUS 2 +#define GET_INTERFACE_STATUS 1 +#define GET_DEVICE_STATUS 0 + __le32 prodvendid; +#define PRODUCT_ID 16 +#define VENDOR_ID 0 + __le32 relnum; + __le32 usbctl; +#define SERIAL_NUMBER_INDEX 16 +#define PRODUCT_ID_STRING_ENABLE 13 +#define VENDOR_ID_STRING_ENABLE 12 +#define USB_ROOT_PORT_WAKEUP_ENABLE 11 +#define VBUS_PIN 10 +#define TIMED_DISCONNECT 9 +#define SUSPEND_IMMEDIATELY 7 +#define SELF_POWERED_USB_DEVICE 6 +#define REMOTE_WAKEUP_SUPPORT 5 +#define PME_POLARITY 4 +#define USB_DETECT_ENABLE 3 +#define PME_WAKEUP_ENABLE 2 +#define DEVICE_REMOTE_WAKEUP_ENABLE 1 +#define SELF_POWERED_STATUS 0 + /* offset 0x0090 */ + __le32 usbstat; +#define HIGH_SPEED 7 +#define FULL_SPEED 6 +#define GENERATE_RESUME 5 +#define GENERATE_DEVICE_REMOTE_WAKEUP 4 + __le32 xcvrdiag; +#define FORCE_HIGH_SPEED_MODE 31 +#define FORCE_FULL_SPEED_MODE 30 +#define USB_TEST_MODE 24 +#define LINE_STATE 16 +#define TRANSCEIVER_OPERATION_MODE 2 +#define TRANSCEIVER_SELECT 1 +#define TERMINATION_SELECT 0 + __le32 setup0123; + __le32 setup4567; + /* offset 0x0090 */ + u32 _unused0; + __le32 ouraddr; +#define FORCE_IMMEDIATE 7 +#define OUR_USB_ADDRESS 0 + __le32 ourconfig; +} __attribute__ ((packed)); + +/* pci control, BAR0 + 0x0100 */ +struct net2280_pci_regs { + /* offset 0x0100 */ + __le32 pcimstctl; +#define PCI_ARBITER_PARK_SELECT 13 +#define PCI_MULTI LEVEL_ARBITER 12 +#define PCI_RETRY_ABORT_ENABLE 11 +#define DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE 10 +#define DMA_READ_MULTIPLE_ENABLE 9 +#define DMA_READ_LINE_ENABLE 8 +#define PCI_MASTER_COMMAND_SELECT 6 +#define MEM_READ_OR_WRITE 0 +#define IO_READ_OR_WRITE 1 +#define CFG_READ_OR_WRITE 2 +#define PCI_MASTER_START 5 +#define PCI_MASTER_READ_WRITE 4 +#define PCI_MASTER_WRITE 0 +#define PCI_MASTER_READ 1 +#define PCI_MASTER_BYTE_WRITE_ENABLES 0 + __le32 pcimstaddr; + __le32 pcimstdata; + __le32 pcimststat; +#define PCI_ARBITER_CLEAR 2 +#define PCI_EXTERNAL_ARBITER 1 +#define PCI_HOST_MODE 0 +} __attribute__ ((packed)); + +/* dma control, BAR0 + 0x0180 ... array of four structs like this, + * for channels 0..3. see also struct net2280_dma: descriptor + * that can be loaded into some of these registers. + */ +struct net2280_dma_regs { /* [11.7] */ + /* offset 0x0180, 0x01a0, 0x01c0, 0x01e0, */ + __le32 dmactl; +#define DMA_SCATTER_GATHER_DONE_INTERRUPT_ENABLE 25 +#define DMA_CLEAR_COUNT_ENABLE 21 +#define DESCRIPTOR_POLLING_RATE 19 +#define POLL_CONTINUOUS 0 +#define POLL_1_USEC 1 +#define POLL_100_USEC 2 +#define POLL_1_MSEC 3 +#define DMA_VALID_BIT_POLLING_ENABLE 18 +#define DMA_VALID_BIT_ENABLE 17 +#define DMA_SCATTER_GATHER_ENABLE 16 +#define DMA_OUT_AUTO_START_ENABLE 4 +#define DMA_PREEMPT_ENABLE 3 +#define DMA_FIFO_VALIDATE 2 +#define DMA_ENABLE 1 +#define DMA_ADDRESS_HOLD 0 + __le32 dmastat; +#define DMA_SCATTER_GATHER_DONE_INTERRUPT 25 +#define DMA_TRANSACTION_DONE_INTERRUPT 24 +#define DMA_ABORT 1 +#define DMA_START 0 + u32 _unused0[2]; + /* offset 0x0190, 0x01b0, 0x01d0, 0x01f0, */ + __le32 dmacount; +#define VALID_BIT 31 +#define DMA_DIRECTION 30 +#define DMA_DONE_INTERRUPT_ENABLE 29 +#define END_OF_CHAIN 28 +#define DMA_BYTE_COUNT_MASK ((1<<24)-1) +#define DMA_BYTE_COUNT 0 + __le32 dmaaddr; + __le32 dmadesc; + u32 _unused1; +} __attribute__ ((packed)); + +/* dedicated endpoint registers, BAR0 + 0x0200 */ + +struct net2280_dep_regs { /* [11.8] */ + /* offset 0x0200, 0x0210, 0x220, 0x230, 0x240 */ + __le32 dep_cfg; + /* offset 0x0204, 0x0214, 0x224, 0x234, 0x244 */ + __le32 dep_rsp; + u32 _unused[2]; +} __attribute__ ((packed)); + +/* configurable endpoint registers, BAR0 + 0x0300 ... array of seven structs + * like this, for ep0 then the configurable endpoints A..F + * ep0 reserved for control; E and F have only 64 bytes of fifo + */ +struct net2280_ep_regs { /* [11.9] */ + /* offset 0x0300, 0x0320, 0x0340, 0x0360, 0x0380, 0x03a0, 0x03c0 */ + __le32 ep_cfg; +#define ENDPOINT_BYTE_COUNT 16 +#define ENDPOINT_ENABLE 10 +#define ENDPOINT_TYPE 8 +#define ENDPOINT_DIRECTION 7 +#define ENDPOINT_NUMBER 0 + __le32 ep_rsp; +#define SET_NAK_OUT_PACKETS 15 +#define SET_EP_HIDE_STATUS_PHASE 14 +#define SET_EP_FORCE_CRC_ERROR 13 +#define SET_INTERRUPT_MODE 12 +#define SET_CONTROL_STATUS_PHASE_HANDSHAKE 11 +#define SET_NAK_OUT_PACKETS_MODE 10 +#define SET_ENDPOINT_TOGGLE 9 +#define SET_ENDPOINT_HALT 8 +#define CLEAR_NAK_OUT_PACKETS 7 +#define CLEAR_EP_HIDE_STATUS_PHASE 6 +#define CLEAR_EP_FORCE_CRC_ERROR 5 +#define CLEAR_INTERRUPT_MODE 4 +#define CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE 3 +#define CLEAR_NAK_OUT_PACKETS_MODE 2 +#define CLEAR_ENDPOINT_TOGGLE 1 +#define CLEAR_ENDPOINT_HALT 0 + __le32 ep_irqenb; +#define SHORT_PACKET_OUT_DONE_INTERRUPT_ENABLE 6 +#define SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE 5 +#define DATA_PACKET_RECEIVED_INTERRUPT_ENABLE 3 +#define DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE 2 +#define DATA_OUT_PING_TOKEN_INTERRUPT_ENABLE 1 +#define DATA_IN_TOKEN_INTERRUPT_ENABLE 0 + __le32 ep_stat; +#define FIFO_VALID_COUNT 24 +#define HIGH_BANDWIDTH_OUT_TRANSACTION_PID 22 +#define TIMEOUT 21 +#define USB_STALL_SENT 20 +#define USB_IN_NAK_SENT 19 +#define USB_IN_ACK_RCVD 18 +#define USB_OUT_PING_NAK_SENT 17 +#define USB_OUT_ACK_SENT 16 +#define FIFO_OVERFLOW 13 +#define FIFO_UNDERFLOW 12 +#define FIFO_FULL 11 +#define FIFO_EMPTY 10 +#define FIFO_FLUSH 9 +#define SHORT_PACKET_OUT_DONE_INTERRUPT 6 +#define SHORT_PACKET_TRANSFERRED_INTERRUPT 5 +#define NAK_OUT_PACKETS 4 +#define DATA_PACKET_RECEIVED_INTERRUPT 3 +#define DATA_PACKET_TRANSMITTED_INTERRUPT 2 +#define DATA_OUT_PING_TOKEN_INTERRUPT 1 +#define DATA_IN_TOKEN_INTERRUPT 0 + /* offset 0x0310, 0x0330, 0x0350, 0x0370, 0x0390, 0x03b0, 0x03d0 */ + __le32 ep_avail; + __le32 ep_data; + u32 _unused0[2]; +} __attribute__ ((packed)); + +struct net2280_reg_write { + __le16 port; + __le32 addr; + __le32 val; +} __attribute__ ((packed)); + +struct net2280_reg_read { + __le16 port; + __le32 addr; +} __attribute__ ((packed)); +#endif /* NET2280_H */ diff --git a/drivers/net/wireless/p54/p54.h b/drivers/net/wireless/p54/p54.h new file mode 100644 index 0000000..43a3b2e --- /dev/null +++ b/drivers/net/wireless/p54/p54.h @@ -0,0 +1,257 @@ +/* + * Shared defines for all mac80211 Prism54 code + * + * Copyright (c) 2006, Michael Wu + * + * Based on the islsm (softmac prism54) driver, which is: + * Copyright 2004-2006 Jean-Baptiste Note , et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef P54_H +#define P54_H + +#ifdef CONFIG_P54_LEDS +#include +#endif /* CONFIG_P54_LEDS */ + +#define ISL38XX_DEV_FIRMWARE_ADDR 0x20000 + +#define BR_CODE_MIN 0x80000000 +#define BR_CODE_COMPONENT_ID 0x80000001 +#define BR_CODE_COMPONENT_VERSION 0x80000002 +#define BR_CODE_DEPENDENT_IF 0x80000003 +#define BR_CODE_EXPOSED_IF 0x80000004 +#define BR_CODE_DESCR 0x80000101 +#define BR_CODE_MAX 0x8FFFFFFF +#define BR_CODE_END_OF_BRA 0xFF0000FF +#define LEGACY_BR_CODE_END_OF_BRA 0xFFFFFFFF + +struct bootrec { + __le32 code; + __le32 len; + u32 data[10]; +} __packed; + +/* Interface role definitions */ +#define BR_INTERFACE_ROLE_SERVER 0x0000 +#define BR_INTERFACE_ROLE_CLIENT 0x8000 + +#define BR_DESC_PRIV_CAP_WEP BIT(0) +#define BR_DESC_PRIV_CAP_TKIP BIT(1) +#define BR_DESC_PRIV_CAP_MICHAEL BIT(2) +#define BR_DESC_PRIV_CAP_CCX_CP BIT(3) +#define BR_DESC_PRIV_CAP_CCX_MIC BIT(4) +#define BR_DESC_PRIV_CAP_AESCCMP BIT(5) + +struct bootrec_desc { + __le16 modes; + __le16 flags; + __le32 rx_start; + __le32 rx_end; + u8 headroom; + u8 tailroom; + u8 tx_queues; + u8 tx_depth; + u8 privacy_caps; + u8 rx_keycache_size; + u8 time_size; + u8 padding; + u8 rates[16]; + u8 padding2[4]; + __le16 rx_mtu; +} __packed; + +#define FW_FMAC 0x464d4143 +#define FW_LM86 0x4c4d3836 +#define FW_LM87 0x4c4d3837 +#define FW_LM20 0x4c4d3230 + +struct bootrec_comp_id { + __le32 fw_variant; +} __packed; + +struct bootrec_comp_ver { + char fw_version[24]; +} __packed; + +struct bootrec_end { + __le16 crc; + u8 padding[2]; + u8 md5[16]; +} __packed; + +/* provide 16 bytes for the transport back-end */ +#define P54_TX_INFO_DATA_SIZE 16 + +/* stored in ieee80211_tx_info's rate_driver_data */ +struct p54_tx_info { + u32 start_addr; + u32 end_addr; + union { + void *data[P54_TX_INFO_DATA_SIZE / sizeof(void *)]; + struct { + u32 extra_len; + }; + }; +}; + +#define P54_MAX_CTRL_FRAME_LEN 0x1000 + +#define P54_SET_QUEUE(queue, ai_fs, cw_min, cw_max, _txop) \ +do { \ + queue.aifs = cpu_to_le16(ai_fs); \ + queue.cwmin = cpu_to_le16(cw_min); \ + queue.cwmax = cpu_to_le16(cw_max); \ + queue.txop = cpu_to_le16(_txop); \ +} while (0) + +struct p54_edcf_queue_param { + __le16 aifs; + __le16 cwmin; + __le16 cwmax; + __le16 txop; +} __packed; + +struct p54_rssi_linear_approximation { + s16 mul; + s16 add; + s16 longbow_unkn; + s16 longbow_unk2; +}; + +struct p54_cal_database { + size_t entries; + size_t entry_size; + size_t offset; + size_t len; + u8 data[0]; +}; + +#define EEPROM_READBACK_LEN 0x3fc + +enum fw_state { + FW_STATE_OFF, + FW_STATE_BOOTING, + FW_STATE_READY, + FW_STATE_RESET, + FW_STATE_RESETTING, +}; + +#ifdef CONFIG_P54_LEDS + +#define P54_LED_MAX_NAME_LEN 31 + +struct p54_led_dev { + struct ieee80211_hw *hw_dev; + struct led_classdev led_dev; + char name[P54_LED_MAX_NAME_LEN + 1]; + + unsigned int toggled; + unsigned int index; + unsigned int registered; +}; + +#endif /* CONFIG_P54_LEDS */ + +struct p54_tx_queue_stats { + unsigned int len; + unsigned int limit; + unsigned int count; +}; + +struct p54_common { + struct ieee80211_hw *hw; + struct ieee80211_vif *vif; + void (*tx)(struct ieee80211_hw *dev, struct sk_buff *skb); + int (*open)(struct ieee80211_hw *dev); + void (*stop)(struct ieee80211_hw *dev); + struct sk_buff_head tx_pending; + struct sk_buff_head tx_queue; + struct mutex conf_mutex; + + /* memory management (as seen by the firmware) */ + u32 rx_start; + u32 rx_end; + u16 rx_mtu; + u8 headroom; + u8 tailroom; + + /* firmware/hardware info */ + unsigned int tx_hdr_len; + unsigned int fw_var; + unsigned int fw_interface; + u8 version; + + /* (e)DCF / QOS state */ + bool use_short_slot; + spinlock_t tx_stats_lock; + struct p54_tx_queue_stats tx_stats[8]; + struct p54_edcf_queue_param qos_params[8]; + + /* Radio data */ + u16 rxhw; + u8 rx_diversity_mask; + u8 tx_diversity_mask; + unsigned int output_power; + int noise; + /* calibration, output power limit and rssi<->dBm conversation data */ + struct pda_iq_autocal_entry *iq_autocal; + unsigned int iq_autocal_len; + struct p54_cal_database *curve_data; + struct p54_cal_database *output_limit; + struct p54_rssi_linear_approximation rssical_db[IEEE80211_NUM_BANDS]; + struct ieee80211_supported_band *band_table[IEEE80211_NUM_BANDS]; + + /* BBP/MAC state */ + u8 mac_addr[ETH_ALEN]; + u8 bssid[ETH_ALEN]; + u16 wakeup_timer; + unsigned int filter_flags; + int mode; + u32 tsf_low32, tsf_high32; + u32 basic_rate_mask; + u16 aid; + bool powersave_override; + __le32 beacon_req_id; + struct completion beacon_comp; + + /* cryptographic engine information */ + u8 privacy_caps; + u8 rx_keycache_size; + unsigned long *used_rxkeys; + + /* LED management */ +#ifdef CONFIG_P54_LEDS + struct p54_led_dev leds[4]; + struct delayed_work led_work; +#endif /* CONFIG_P54_LEDS */ + u16 softled_state; /* bit field of glowing LEDs */ + + /* statistics */ + struct ieee80211_low_level_stats stats; + struct delayed_work work; + + /* eeprom handling */ + void *eeprom; + struct completion eeprom_comp; + struct mutex eeprom_mutex; +}; + +/* interfaces for the drivers */ +int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb); +void p54_free_skb(struct ieee80211_hw *dev, struct sk_buff *skb); +int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw); +int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len); +int p54_read_eeprom(struct ieee80211_hw *dev); + +struct ieee80211_hw *p54_init_common(size_t priv_data_len); +int p54_register_common(struct ieee80211_hw *dev, struct device *pdev); +void p54_free_common(struct ieee80211_hw *dev); + +void p54_unregister_common(struct ieee80211_hw *dev); + +#endif /* P54_H */ diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c new file mode 100644 index 0000000..ed4bdff --- /dev/null +++ b/drivers/net/wireless/p54/p54pci.c @@ -0,0 +1,675 @@ + +/* + * Linux device driver for PCI based Prism54 + * + * Copyright (c) 2006, Michael Wu + * Copyright (c) 2008, Christian Lamparter + * + * Based on the islsm (softmac prism54) driver, which is: + * Copyright 2004-2006 Jean-Baptiste Note , et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "p54.h" +#include "lmac.h" +#include "p54pci.h" + +MODULE_AUTHOR("Michael Wu "); +MODULE_DESCRIPTION("Prism54 PCI wireless driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("prism54pci"); +MODULE_FIRMWARE("isl3886pci"); + +static DEFINE_PCI_DEVICE_TABLE(p54p_table) = { + /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */ + { PCI_DEVICE(0x1260, 0x3890) }, + /* 3COM 3CRWE154G72 Wireless LAN adapter */ + { PCI_DEVICE(0x10b7, 0x6001) }, + /* Intersil PRISM Indigo Wireless LAN adapter */ + { PCI_DEVICE(0x1260, 0x3877) }, + /* Intersil PRISM Javelin/Xbow Wireless LAN adapter */ + { PCI_DEVICE(0x1260, 0x3886) }, + { }, +}; + +MODULE_DEVICE_TABLE(pci, p54p_table); + +static int p54p_upload_firmware(struct ieee80211_hw *dev) +{ + struct p54p_priv *priv = dev->priv; + __le32 reg; + int err; + __le32 *data; + u32 remains, left, device_addr; + + P54P_WRITE(int_enable, cpu_to_le32(0)); + P54P_READ(int_enable); + udelay(10); + + reg = P54P_READ(ctrl_stat); + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET); + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RAMBOOT); + P54P_WRITE(ctrl_stat, reg); + P54P_READ(ctrl_stat); + udelay(10); + + reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RESET); + P54P_WRITE(ctrl_stat, reg); + wmb(); + udelay(10); + + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET); + P54P_WRITE(ctrl_stat, reg); + wmb(); + + /* wait for the firmware to reset properly */ + mdelay(10); + + err = p54_parse_firmware(dev, priv->firmware); + if (err) + return err; + + if (priv->common.fw_interface != FW_LM86) { + dev_err(&priv->pdev->dev, "wrong firmware, " + "please get a LM86(PCI) firmware a try again.\n"); + return -EINVAL; + } + + data = (__le32 *) priv->firmware->data; + remains = priv->firmware->size; + device_addr = ISL38XX_DEV_FIRMWARE_ADDR; + while (remains) { + u32 i = 0; + left = min((u32)0x1000, remains); + P54P_WRITE(direct_mem_base, cpu_to_le32(device_addr)); + P54P_READ(int_enable); + + device_addr += 0x1000; + while (i < left) { + P54P_WRITE(direct_mem_win[i], *data++); + i += sizeof(u32); + } + + remains -= left; + P54P_READ(int_enable); + } + + reg = P54P_READ(ctrl_stat); + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_CLKRUN); + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET); + reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RAMBOOT); + P54P_WRITE(ctrl_stat, reg); + P54P_READ(ctrl_stat); + udelay(10); + + reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RESET); + P54P_WRITE(ctrl_stat, reg); + wmb(); + udelay(10); + + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET); + P54P_WRITE(ctrl_stat, reg); + wmb(); + udelay(10); + + /* wait for the firmware to boot properly */ + mdelay(100); + + return 0; +} + +static void p54p_refill_rx_ring(struct ieee80211_hw *dev, + int ring_index, struct p54p_desc *ring, u32 ring_limit, + struct sk_buff **rx_buf) +{ + struct p54p_priv *priv = dev->priv; + struct p54p_ring_control *ring_control = priv->ring_control; + u32 limit, idx, i; + + idx = le32_to_cpu(ring_control->host_idx[ring_index]); + limit = idx; + limit -= le32_to_cpu(ring_control->device_idx[ring_index]); + limit = ring_limit - limit; + + i = idx % ring_limit; + while (limit-- > 1) { + struct p54p_desc *desc = &ring[i]; + + if (!desc->host_addr) { + struct sk_buff *skb; + dma_addr_t mapping; + skb = dev_alloc_skb(priv->common.rx_mtu + 32); + if (!skb) + break; + + mapping = pci_map_single(priv->pdev, + skb_tail_pointer(skb), + priv->common.rx_mtu + 32, + PCI_DMA_FROMDEVICE); + + if (pci_dma_mapping_error(priv->pdev, mapping)) { + dev_kfree_skb_any(skb); + dev_err(&priv->pdev->dev, + "RX DMA Mapping error\n"); + break; + } + + desc->host_addr = cpu_to_le32(mapping); + desc->device_addr = 0; // FIXME: necessary? + desc->len = cpu_to_le16(priv->common.rx_mtu + 32); + desc->flags = 0; + rx_buf[i] = skb; + } + + i++; + idx++; + i %= ring_limit; + } + + wmb(); + ring_control->host_idx[ring_index] = cpu_to_le32(idx); +} + +static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index, + int ring_index, struct p54p_desc *ring, u32 ring_limit, + struct sk_buff **rx_buf) +{ + struct p54p_priv *priv = dev->priv; + struct p54p_ring_control *ring_control = priv->ring_control; + struct p54p_desc *desc; + u32 idx, i; + + i = (*index) % ring_limit; + (*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]); + idx %= ring_limit; + while (i != idx) { + u16 len; + struct sk_buff *skb; + desc = &ring[i]; + len = le16_to_cpu(desc->len); + skb = rx_buf[i]; + + if (!skb) { + i++; + i %= ring_limit; + continue; + } + + if (unlikely(len > priv->common.rx_mtu)) { + if (net_ratelimit()) + dev_err(&priv->pdev->dev, "rx'd frame size " + "exceeds length threshold.\n"); + + len = priv->common.rx_mtu; + } + skb_put(skb, len); + + if (p54_rx(dev, skb)) { + pci_unmap_single(priv->pdev, + le32_to_cpu(desc->host_addr), + priv->common.rx_mtu + 32, + PCI_DMA_FROMDEVICE); + rx_buf[i] = NULL; + desc->host_addr = 0; + } else { + skb_trim(skb, 0); + desc->len = cpu_to_le16(priv->common.rx_mtu + 32); + } + + i++; + i %= ring_limit; + } + + p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf); +} + +static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index, + int ring_index, struct p54p_desc *ring, u32 ring_limit, + struct sk_buff **tx_buf) +{ + struct p54p_priv *priv = dev->priv; + struct p54p_ring_control *ring_control = priv->ring_control; + struct p54p_desc *desc; + struct sk_buff *skb; + u32 idx, i; + + i = (*index) % ring_limit; + (*index) = idx = le32_to_cpu(ring_control->device_idx[1]); + idx %= ring_limit; + + while (i != idx) { + desc = &ring[i]; + + skb = tx_buf[i]; + tx_buf[i] = NULL; + + pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr), + le16_to_cpu(desc->len), PCI_DMA_TODEVICE); + + desc->host_addr = 0; + desc->device_addr = 0; + desc->len = 0; + desc->flags = 0; + + if (skb && FREE_AFTER_TX(skb)) + p54_free_skb(dev, skb); + + i++; + i %= ring_limit; + } +} + +static void p54p_tasklet(unsigned long dev_id) +{ + struct ieee80211_hw *dev = (struct ieee80211_hw *)dev_id; + struct p54p_priv *priv = dev->priv; + struct p54p_ring_control *ring_control = priv->ring_control; + + p54p_check_tx_ring(dev, &priv->tx_idx_mgmt, 3, ring_control->tx_mgmt, + ARRAY_SIZE(ring_control->tx_mgmt), + priv->tx_buf_mgmt); + + p54p_check_tx_ring(dev, &priv->tx_idx_data, 1, ring_control->tx_data, + ARRAY_SIZE(ring_control->tx_data), + priv->tx_buf_data); + + p54p_check_rx_ring(dev, &priv->rx_idx_mgmt, 2, ring_control->rx_mgmt, + ARRAY_SIZE(ring_control->rx_mgmt), priv->rx_buf_mgmt); + + p54p_check_rx_ring(dev, &priv->rx_idx_data, 0, ring_control->rx_data, + ARRAY_SIZE(ring_control->rx_data), priv->rx_buf_data); + + wmb(); + P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE)); +} + +static irqreturn_t p54p_interrupt(int irq, void *dev_id) +{ + struct ieee80211_hw *dev = dev_id; + struct p54p_priv *priv = dev->priv; + __le32 reg; + + reg = P54P_READ(int_ident); + if (unlikely(reg == cpu_to_le32(0xFFFFFFFF))) { + goto out; + } + P54P_WRITE(int_ack, reg); + + reg &= P54P_READ(int_enable); + + if (reg & cpu_to_le32(ISL38XX_INT_IDENT_UPDATE)) + tasklet_schedule(&priv->tasklet); + else if (reg & cpu_to_le32(ISL38XX_INT_IDENT_INIT)) + complete(&priv->boot_comp); + +out: + return reg ? IRQ_HANDLED : IRQ_NONE; +} + +static void p54p_tx(struct ieee80211_hw *dev, struct sk_buff *skb) +{ + unsigned long flags; + struct p54p_priv *priv = dev->priv; + struct p54p_ring_control *ring_control = priv->ring_control; + struct p54p_desc *desc; + dma_addr_t mapping; + u32 device_idx, idx, i; + + spin_lock_irqsave(&priv->lock, flags); + device_idx = le32_to_cpu(ring_control->device_idx[1]); + idx = le32_to_cpu(ring_control->host_idx[1]); + i = idx % ARRAY_SIZE(ring_control->tx_data); + + mapping = pci_map_single(priv->pdev, skb->data, skb->len, + PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(priv->pdev, mapping)) { + spin_unlock_irqrestore(&priv->lock, flags); + p54_free_skb(dev, skb); + dev_err(&priv->pdev->dev, "TX DMA mapping error\n"); + return ; + } + priv->tx_buf_data[i] = skb; + + desc = &ring_control->tx_data[i]; + desc->host_addr = cpu_to_le32(mapping); + desc->device_addr = ((struct p54_hdr *)skb->data)->req_id; + desc->len = cpu_to_le16(skb->len); + desc->flags = 0; + + wmb(); + ring_control->host_idx[1] = cpu_to_le32(idx + 1); + spin_unlock_irqrestore(&priv->lock, flags); + + P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE)); + P54P_READ(dev_int); +} + +static void p54p_stop(struct ieee80211_hw *dev) +{ + struct p54p_priv *priv = dev->priv; + struct p54p_ring_control *ring_control = priv->ring_control; + unsigned int i; + struct p54p_desc *desc; + + P54P_WRITE(int_enable, cpu_to_le32(0)); + P54P_READ(int_enable); + udelay(10); + + free_irq(priv->pdev->irq, dev); + + tasklet_kill(&priv->tasklet); + + P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET)); + + for (i = 0; i < ARRAY_SIZE(priv->rx_buf_data); i++) { + desc = &ring_control->rx_data[i]; + if (desc->host_addr) + pci_unmap_single(priv->pdev, + le32_to_cpu(desc->host_addr), + priv->common.rx_mtu + 32, + PCI_DMA_FROMDEVICE); + kfree_skb(priv->rx_buf_data[i]); + priv->rx_buf_data[i] = NULL; + } + + for (i = 0; i < ARRAY_SIZE(priv->rx_buf_mgmt); i++) { + desc = &ring_control->rx_mgmt[i]; + if (desc->host_addr) + pci_unmap_single(priv->pdev, + le32_to_cpu(desc->host_addr), + priv->common.rx_mtu + 32, + PCI_DMA_FROMDEVICE); + kfree_skb(priv->rx_buf_mgmt[i]); + priv->rx_buf_mgmt[i] = NULL; + } + + for (i = 0; i < ARRAY_SIZE(priv->tx_buf_data); i++) { + desc = &ring_control->tx_data[i]; + if (desc->host_addr) + pci_unmap_single(priv->pdev, + le32_to_cpu(desc->host_addr), + le16_to_cpu(desc->len), + PCI_DMA_TODEVICE); + + p54_free_skb(dev, priv->tx_buf_data[i]); + priv->tx_buf_data[i] = NULL; + } + + for (i = 0; i < ARRAY_SIZE(priv->tx_buf_mgmt); i++) { + desc = &ring_control->tx_mgmt[i]; + if (desc->host_addr) + pci_unmap_single(priv->pdev, + le32_to_cpu(desc->host_addr), + le16_to_cpu(desc->len), + PCI_DMA_TODEVICE); + + p54_free_skb(dev, priv->tx_buf_mgmt[i]); + priv->tx_buf_mgmt[i] = NULL; + } + + memset(ring_control, 0, sizeof(*ring_control)); +} + +static int p54p_open(struct ieee80211_hw *dev) +{ + struct p54p_priv *priv = dev->priv; + int err; + + init_completion(&priv->boot_comp); + err = request_irq(priv->pdev->irq, p54p_interrupt, + IRQF_SHARED, "p54pci", dev); + if (err) { + dev_err(&priv->pdev->dev, "failed to register IRQ handler\n"); + return err; + } + + memset(priv->ring_control, 0, sizeof(*priv->ring_control)); + err = p54p_upload_firmware(dev); + if (err) { + free_irq(priv->pdev->irq, dev); + return err; + } + priv->rx_idx_data = priv->tx_idx_data = 0; + priv->rx_idx_mgmt = priv->tx_idx_mgmt = 0; + + p54p_refill_rx_ring(dev, 0, priv->ring_control->rx_data, + ARRAY_SIZE(priv->ring_control->rx_data), priv->rx_buf_data); + + p54p_refill_rx_ring(dev, 2, priv->ring_control->rx_mgmt, + ARRAY_SIZE(priv->ring_control->rx_mgmt), priv->rx_buf_mgmt); + + P54P_WRITE(ring_control_base, cpu_to_le32(priv->ring_control_dma)); + P54P_READ(ring_control_base); + wmb(); + udelay(10); + + P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_INIT)); + P54P_READ(int_enable); + wmb(); + udelay(10); + + P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET)); + P54P_READ(dev_int); + + if (!wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ)) { + printk(KERN_ERR "%s: Cannot boot firmware!\n", + wiphy_name(dev->wiphy)); + p54p_stop(dev); + return -ETIMEDOUT; + } + + P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_UPDATE)); + P54P_READ(int_enable); + wmb(); + udelay(10); + + P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE)); + P54P_READ(dev_int); + wmb(); + udelay(10); + + return 0; +} + +static int __devinit p54p_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct p54p_priv *priv; + struct ieee80211_hw *dev; + unsigned long mem_addr, mem_len; + int err; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "Cannot enable new PCI device\n"); + return err; + } + + mem_addr = pci_resource_start(pdev, 0); + mem_len = pci_resource_len(pdev, 0); + if (mem_len < sizeof(struct p54p_csr)) { + dev_err(&pdev->dev, "Too short PCI resources\n"); + goto err_disable_dev; + } + + err = pci_request_regions(pdev, "p54pci"); + if (err) { + dev_err(&pdev->dev, "Cannot obtain PCI resources\n"); + goto err_disable_dev; + } + + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) || + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { + dev_err(&pdev->dev, "No suitable DMA available\n"); + goto err_free_reg; + } + + pci_set_master(pdev); + pci_try_set_mwi(pdev); + + pci_write_config_byte(pdev, 0x40, 0); + pci_write_config_byte(pdev, 0x41, 0); + + dev = p54_init_common(sizeof(*priv)); + if (!dev) { + dev_err(&pdev->dev, "ieee80211 alloc failed\n"); + err = -ENOMEM; + goto err_free_reg; + } + + priv = dev->priv; + priv->pdev = pdev; + + SET_IEEE80211_DEV(dev, &pdev->dev); + pci_set_drvdata(pdev, dev); + + priv->map = ioremap(mem_addr, mem_len); + if (!priv->map) { + dev_err(&pdev->dev, "Cannot map device memory\n"); + err = -ENOMEM; + goto err_free_dev; + } + + priv->ring_control = pci_alloc_consistent(pdev, sizeof(*priv->ring_control), + &priv->ring_control_dma); + if (!priv->ring_control) { + dev_err(&pdev->dev, "Cannot allocate rings\n"); + err = -ENOMEM; + goto err_iounmap; + } + priv->common.open = p54p_open; + priv->common.stop = p54p_stop; + priv->common.tx = p54p_tx; + + spin_lock_init(&priv->lock); + tasklet_init(&priv->tasklet, p54p_tasklet, (unsigned long)dev); + + err = request_firmware(&priv->firmware, "isl3886pci", + &priv->pdev->dev); + if (err) { + dev_err(&pdev->dev, "Cannot find firmware (isl3886pci)\n"); + err = request_firmware(&priv->firmware, "isl3886", + &priv->pdev->dev); + if (err) + goto err_free_common; + } + + err = p54p_open(dev); + if (err) + goto err_free_common; + err = p54_read_eeprom(dev); + p54p_stop(dev); + if (err) + goto err_free_common; + + err = p54_register_common(dev, &pdev->dev); + if (err) + goto err_free_common; + + return 0; + + err_free_common: + release_firmware(priv->firmware); + pci_free_consistent(pdev, sizeof(*priv->ring_control), + priv->ring_control, priv->ring_control_dma); + + err_iounmap: + iounmap(priv->map); + + err_free_dev: + pci_set_drvdata(pdev, NULL); + p54_free_common(dev); + + err_free_reg: + pci_release_regions(pdev); + err_disable_dev: + pci_disable_device(pdev); + return err; +} + +static void __devexit p54p_remove(struct pci_dev *pdev) +{ + struct ieee80211_hw *dev = pci_get_drvdata(pdev); + struct p54p_priv *priv; + + if (!dev) + return; + + p54_unregister_common(dev); + priv = dev->priv; + release_firmware(priv->firmware); + pci_free_consistent(pdev, sizeof(*priv->ring_control), + priv->ring_control, priv->ring_control_dma); + iounmap(priv->map); + pci_release_regions(pdev); + pci_disable_device(pdev); + p54_free_common(dev); +} + +#ifdef CONFIG_PM +static int p54p_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct ieee80211_hw *dev = pci_get_drvdata(pdev); + struct p54p_priv *priv = dev->priv; + + if (priv->common.mode != NL80211_IFTYPE_UNSPECIFIED) { + ieee80211_stop_queues(dev); + p54p_stop(dev); + } + + pci_save_state(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + return 0; +} + +static int p54p_resume(struct pci_dev *pdev) +{ + struct ieee80211_hw *dev = pci_get_drvdata(pdev); + struct p54p_priv *priv = dev->priv; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + if (priv->common.mode != NL80211_IFTYPE_UNSPECIFIED) { + p54p_open(dev); + ieee80211_wake_queues(dev); + } + + return 0; +} +#endif /* CONFIG_PM */ + +static struct pci_driver p54p_driver = { + .name = "p54pci", + .id_table = p54p_table, + .probe = p54p_probe, + .remove = __devexit_p(p54p_remove), +#ifdef CONFIG_PM + .suspend = p54p_suspend, + .resume = p54p_resume, +#endif /* CONFIG_PM */ +}; + +static int __init p54p_init(void) +{ + return pci_register_driver(&p54p_driver); +} + +static void __exit p54p_exit(void) +{ + pci_unregister_driver(&p54p_driver); +} + +module_init(p54p_init); +module_exit(p54p_exit); diff --git a/drivers/net/wireless/p54/p54pci.h b/drivers/net/wireless/p54/p54pci.h new file mode 100644 index 0000000..2feead6 --- /dev/null +++ b/drivers/net/wireless/p54/p54pci.h @@ -0,0 +1,110 @@ +#ifndef P54PCI_H +#define P54PCI_H + +/* + * Defines for PCI based mac80211 Prism54 driver + * + * Copyright (c) 2006, Michael Wu + * + * Based on the islsm (softmac prism54) driver, which is: + * Copyright 2004-2006 Jean-Baptiste Note , et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* Device Interrupt register bits */ +#define ISL38XX_DEV_INT_RESET 0x0001 +#define ISL38XX_DEV_INT_UPDATE 0x0002 +#define ISL38XX_DEV_INT_WAKEUP 0x0008 +#define ISL38XX_DEV_INT_SLEEP 0x0010 +#define ISL38XX_DEV_INT_ABORT 0x0020 +/* these two only used in USB */ +#define ISL38XX_DEV_INT_DATA 0x0040 +#define ISL38XX_DEV_INT_MGMT 0x0080 + +#define ISL38XX_DEV_INT_PCIUART_CTS 0x4000 +#define ISL38XX_DEV_INT_PCIUART_DR 0x8000 + +/* Interrupt Identification/Acknowledge/Enable register bits */ +#define ISL38XX_INT_IDENT_UPDATE 0x0002 +#define ISL38XX_INT_IDENT_INIT 0x0004 +#define ISL38XX_INT_IDENT_WAKEUP 0x0008 +#define ISL38XX_INT_IDENT_SLEEP 0x0010 +#define ISL38XX_INT_IDENT_PCIUART_CTS 0x4000 +#define ISL38XX_INT_IDENT_PCIUART_DR 0x8000 + +/* Control/Status register bits */ +#define ISL38XX_CTRL_STAT_SLEEPMODE 0x00000200 +#define ISL38XX_CTRL_STAT_CLKRUN 0x00800000 +#define ISL38XX_CTRL_STAT_RESET 0x10000000 +#define ISL38XX_CTRL_STAT_RAMBOOT 0x20000000 +#define ISL38XX_CTRL_STAT_STARTHALTED 0x40000000 +#define ISL38XX_CTRL_STAT_HOST_OVERRIDE 0x80000000 + +struct p54p_csr { + __le32 dev_int; + u8 unused_1[12]; + __le32 int_ident; + __le32 int_ack; + __le32 int_enable; + u8 unused_2[4]; + union { + __le32 ring_control_base; + __le32 gen_purp_com[2]; + }; + u8 unused_3[8]; + __le32 direct_mem_base; + u8 unused_4[44]; + __le32 dma_addr; + __le32 dma_len; + __le32 dma_ctrl; + u8 unused_5[12]; + __le32 ctrl_stat; + u8 unused_6[1924]; + u8 cardbus_cis[0x800]; + u8 direct_mem_win[0x1000]; +} __attribute__ ((packed)); + +/* usb backend only needs the register defines above */ +#ifndef P54USB_H +struct p54p_desc { + __le32 host_addr; + __le32 device_addr; + __le16 len; + __le16 flags; +} __attribute__ ((packed)); + +struct p54p_ring_control { + __le32 host_idx[4]; + __le32 device_idx[4]; + struct p54p_desc rx_data[8]; + struct p54p_desc tx_data[32]; + struct p54p_desc rx_mgmt[4]; + struct p54p_desc tx_mgmt[4]; +} __attribute__ ((packed)); + +#define P54P_READ(r) (__force __le32)__raw_readl(&priv->map->r) +#define P54P_WRITE(r, val) __raw_writel((__force u32)(__le32)(val), &priv->map->r) + +struct p54p_priv { + struct p54_common common; + struct pci_dev *pdev; + struct p54p_csr __iomem *map; + struct tasklet_struct tasklet; + const struct firmware *firmware; + spinlock_t lock; + struct p54p_ring_control *ring_control; + dma_addr_t ring_control_dma; + u32 rx_idx_data, tx_idx_data; + u32 rx_idx_mgmt, tx_idx_mgmt; + struct sk_buff *rx_buf_data[8]; + struct sk_buff *rx_buf_mgmt[4]; + struct sk_buff *tx_buf_data[32]; + struct sk_buff *tx_buf_mgmt[4]; + struct completion boot_comp; +}; + +#endif /* P54USB_H */ +#endif /* P54PCI_H */ diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c new file mode 100644 index 0000000..afd26bf --- /dev/null +++ b/drivers/net/wireless/p54/p54spi.c @@ -0,0 +1,734 @@ +/* + * Copyright (C) 2008 Christian Lamparter + * Copyright 2008 Johannes Berg + * + * This driver is a port from stlc45xx: + * Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies). + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "p54spi.h" +#include "p54spi_eeprom.h" +#include "p54.h" + +#include "lmac.h" + +MODULE_FIRMWARE("3826.arm"); +MODULE_ALIAS("stlc45xx"); + +/* + * gpios should be handled in board files and provided via platform data, + * but because it's currently impossible for p54spi to have a header file + * in include/linux, let's use module paramaters for now + */ + +static int p54spi_gpio_power = 97; +module_param(p54spi_gpio_power, int, 0444); +MODULE_PARM_DESC(p54spi_gpio_power, "gpio number for power line"); + +static int p54spi_gpio_irq = 87; +module_param(p54spi_gpio_irq, int, 0444); +MODULE_PARM_DESC(p54spi_gpio_irq, "gpio number for irq line"); + +static void p54spi_spi_read(struct p54s_priv *priv, u8 address, + void *buf, size_t len) +{ + struct spi_transfer t[2]; + struct spi_message m; + __le16 addr; + + /* We first push the address */ + addr = cpu_to_le16(address << 8 | SPI_ADRS_READ_BIT_15); + + spi_message_init(&m); + memset(t, 0, sizeof(t)); + + t[0].tx_buf = &addr; + t[0].len = sizeof(addr); + spi_message_add_tail(&t[0], &m); + + t[1].rx_buf = buf; + t[1].len = len; + spi_message_add_tail(&t[1], &m); + + spi_sync(priv->spi, &m); +} + + +static void p54spi_spi_write(struct p54s_priv *priv, u8 address, + const void *buf, size_t len) +{ + struct spi_transfer t[3]; + struct spi_message m; + __le16 addr; + + /* We first push the address */ + addr = cpu_to_le16(address << 8); + + spi_message_init(&m); + memset(t, 0, sizeof(t)); + + t[0].tx_buf = &addr; + t[0].len = sizeof(addr); + spi_message_add_tail(&t[0], &m); + + t[1].tx_buf = buf; + t[1].len = len & ~1; + spi_message_add_tail(&t[1], &m); + + if (len % 2) { + __le16 last_word; + last_word = cpu_to_le16(((u8 *)buf)[len - 1]); + + t[2].tx_buf = &last_word; + t[2].len = sizeof(last_word); + spi_message_add_tail(&t[2], &m); + } + + spi_sync(priv->spi, &m); +} + +static u32 p54spi_read32(struct p54s_priv *priv, u8 addr) +{ + __le32 val; + + p54spi_spi_read(priv, addr, &val, sizeof(val)); + + return le32_to_cpu(val); +} + +static inline void p54spi_write16(struct p54s_priv *priv, u8 addr, __le16 val) +{ + p54spi_spi_write(priv, addr, &val, sizeof(val)); +} + +static inline void p54spi_write32(struct p54s_priv *priv, u8 addr, __le32 val) +{ + p54spi_spi_write(priv, addr, &val, sizeof(val)); +} + +static int p54spi_wait_bit(struct p54s_priv *priv, u16 reg, u32 bits) +{ + int i; + + for (i = 0; i < 2000; i++) { + u32 buffer = p54spi_read32(priv, reg); + if ((buffer & bits) == bits) + return 1; + } + return 0; +} + +static int p54spi_spi_write_dma(struct p54s_priv *priv, __le32 base, + const void *buf, size_t len) +{ + if (!p54spi_wait_bit(priv, SPI_ADRS_DMA_WRITE_CTRL, HOST_ALLOWED)) { + dev_err(&priv->spi->dev, "spi_write_dma not allowed " + "to DMA write.\n"); + return -EAGAIN; + } + + p54spi_write16(priv, SPI_ADRS_DMA_WRITE_CTRL, + cpu_to_le16(SPI_DMA_WRITE_CTRL_ENABLE)); + + p54spi_write16(priv, SPI_ADRS_DMA_WRITE_LEN, cpu_to_le16(len)); + p54spi_write32(priv, SPI_ADRS_DMA_WRITE_BASE, base); + p54spi_spi_write(priv, SPI_ADRS_DMA_DATA, buf, len); + return 0; +} + +static int p54spi_request_firmware(struct ieee80211_hw *dev) +{ + struct p54s_priv *priv = dev->priv; + int ret; + + /* FIXME: should driver use it's own struct device? */ + ret = request_firmware(&priv->firmware, "3826.arm", &priv->spi->dev); + + if (ret < 0) { + dev_err(&priv->spi->dev, "request_firmware() failed: %d", ret); + return ret; + } + + ret = p54_parse_firmware(dev, priv->firmware); + if (ret) { + release_firmware(priv->firmware); + return ret; + } + + return 0; +} + +static int p54spi_request_eeprom(struct ieee80211_hw *dev) +{ + struct p54s_priv *priv = dev->priv; + const struct firmware *eeprom; + int ret; + + /* + * allow users to customize their eeprom. + */ + + ret = request_firmware(&eeprom, "3826.eeprom", &priv->spi->dev); + if (ret < 0) { + dev_info(&priv->spi->dev, "loading default eeprom...\n"); + ret = p54_parse_eeprom(dev, (void *) p54spi_eeprom, + sizeof(p54spi_eeprom)); + } else { + dev_info(&priv->spi->dev, "loading user eeprom...\n"); + ret = p54_parse_eeprom(dev, (void *) eeprom->data, + (int)eeprom->size); + release_firmware(eeprom); + } + return ret; +} + +static int p54spi_upload_firmware(struct ieee80211_hw *dev) +{ + struct p54s_priv *priv = dev->priv; + unsigned long fw_len, _fw_len; + unsigned int offset = 0; + int err = 0; + u8 *fw; + + fw_len = priv->firmware->size; + fw = kmemdup(priv->firmware->data, fw_len, GFP_KERNEL); + if (!fw) + return -ENOMEM; + + /* stop the device */ + p54spi_write16(priv, SPI_ADRS_DEV_CTRL_STAT, cpu_to_le16( + SPI_CTRL_STAT_HOST_OVERRIDE | SPI_CTRL_STAT_HOST_RESET | + SPI_CTRL_STAT_START_HALTED)); + + msleep(TARGET_BOOT_SLEEP); + + p54spi_write16(priv, SPI_ADRS_DEV_CTRL_STAT, cpu_to_le16( + SPI_CTRL_STAT_HOST_OVERRIDE | + SPI_CTRL_STAT_START_HALTED)); + + msleep(TARGET_BOOT_SLEEP); + + while (fw_len > 0) { + _fw_len = min_t(long, fw_len, SPI_MAX_PACKET_SIZE); + + err = p54spi_spi_write_dma(priv, cpu_to_le32( + ISL38XX_DEV_FIRMWARE_ADDR + offset), + (fw + offset), _fw_len); + if (err < 0) + goto out; + + fw_len -= _fw_len; + offset += _fw_len; + } + + BUG_ON(fw_len != 0); + + /* enable host interrupts */ + p54spi_write32(priv, SPI_ADRS_HOST_INT_EN, + cpu_to_le32(SPI_HOST_INTS_DEFAULT)); + + /* boot the device */ + p54spi_write16(priv, SPI_ADRS_DEV_CTRL_STAT, cpu_to_le16( + SPI_CTRL_STAT_HOST_OVERRIDE | SPI_CTRL_STAT_HOST_RESET | + SPI_CTRL_STAT_RAM_BOOT)); + + msleep(TARGET_BOOT_SLEEP); + + p54spi_write16(priv, SPI_ADRS_DEV_CTRL_STAT, cpu_to_le16( + SPI_CTRL_STAT_HOST_OVERRIDE | SPI_CTRL_STAT_RAM_BOOT)); + msleep(TARGET_BOOT_SLEEP); + +out: + kfree(fw); + return err; +} + +static void p54spi_power_off(struct p54s_priv *priv) +{ + disable_irq(gpio_to_irq(p54spi_gpio_irq)); + gpio_set_value(p54spi_gpio_power, 0); +} + +static void p54spi_power_on(struct p54s_priv *priv) +{ + gpio_set_value(p54spi_gpio_power, 1); + enable_irq(gpio_to_irq(p54spi_gpio_irq)); + + /* + * need to wait a while before device can be accessed, the lenght + * is just a guess + */ + msleep(10); +} + +static inline void p54spi_int_ack(struct p54s_priv *priv, u32 val) +{ + p54spi_write32(priv, SPI_ADRS_HOST_INT_ACK, cpu_to_le32(val)); +} + +static int p54spi_wakeup(struct p54s_priv *priv) +{ + /* wake the chip */ + p54spi_write32(priv, SPI_ADRS_ARM_INTERRUPTS, + cpu_to_le32(SPI_TARGET_INT_WAKEUP)); + + /* And wait for the READY interrupt */ + if (!p54spi_wait_bit(priv, SPI_ADRS_HOST_INTERRUPTS, + SPI_HOST_INT_READY)) { + dev_err(&priv->spi->dev, "INT_READY timeout\n"); + return -EBUSY; + } + + p54spi_int_ack(priv, SPI_HOST_INT_READY); + return 0; +} + +static inline void p54spi_sleep(struct p54s_priv *priv) +{ + p54spi_write32(priv, SPI_ADRS_ARM_INTERRUPTS, + cpu_to_le32(SPI_TARGET_INT_SLEEP)); +} + +static void p54spi_int_ready(struct p54s_priv *priv) +{ + p54spi_write32(priv, SPI_ADRS_HOST_INT_EN, cpu_to_le32( + SPI_HOST_INT_UPDATE | SPI_HOST_INT_SW_UPDATE)); + + switch (priv->fw_state) { + case FW_STATE_BOOTING: + priv->fw_state = FW_STATE_READY; + complete(&priv->fw_comp); + break; + case FW_STATE_RESETTING: + priv->fw_state = FW_STATE_READY; + /* TODO: reinitialize state */ + break; + default: + break; + } +} + +static int p54spi_rx(struct p54s_priv *priv) +{ + struct sk_buff *skb; + u16 len; + u16 rx_head[2]; +#define READAHEAD_SZ (sizeof(rx_head)-sizeof(u16)) + + if (p54spi_wakeup(priv) < 0) + return -EBUSY; + + /* Read data size and first data word in one SPI transaction + * This is workaround for firmware/DMA bug, + * when first data word gets lost under high load. + */ + p54spi_spi_read(priv, SPI_ADRS_DMA_DATA, rx_head, sizeof(rx_head)); + len = rx_head[0]; + + if (len == 0) { + p54spi_sleep(priv); + dev_err(&priv->spi->dev, "rx request of zero bytes\n"); + return 0; + } + + /* Firmware may insert up to 4 padding bytes after the lmac header, + * but it does not amend the size of SPI data transfer. + * Such packets has correct data size in header, thus referencing + * past the end of allocated skb. Reserve extra 4 bytes for this case */ + skb = dev_alloc_skb(len + 4); + if (!skb) { + p54spi_sleep(priv); + dev_err(&priv->spi->dev, "could not alloc skb"); + return -ENOMEM; + } + + if (len <= READAHEAD_SZ) { + memcpy(skb_put(skb, len), rx_head + 1, len); + } else { + memcpy(skb_put(skb, READAHEAD_SZ), rx_head + 1, READAHEAD_SZ); + p54spi_spi_read(priv, SPI_ADRS_DMA_DATA, + skb_put(skb, len - READAHEAD_SZ), + len - READAHEAD_SZ); + } + p54spi_sleep(priv); + /* Put additional bytes to compensate for the possible + * alignment-caused truncation */ + skb_put(skb, 4); + + if (p54_rx(priv->hw, skb) == 0) + dev_kfree_skb(skb); + + return 0; +} + + +static irqreturn_t p54spi_interrupt(int irq, void *config) +{ + struct spi_device *spi = config; + struct p54s_priv *priv = dev_get_drvdata(&spi->dev); + + ieee80211_queue_work(priv->hw, &priv->work); + + return IRQ_HANDLED; +} + +static int p54spi_tx_frame(struct p54s_priv *priv, struct sk_buff *skb) +{ + struct p54_hdr *hdr = (struct p54_hdr *) skb->data; + int ret = 0; + + if (p54spi_wakeup(priv) < 0) + return -EBUSY; + + ret = p54spi_spi_write_dma(priv, hdr->req_id, skb->data, skb->len); + if (ret < 0) + goto out; + + if (!p54spi_wait_bit(priv, SPI_ADRS_HOST_INTERRUPTS, + SPI_HOST_INT_WR_READY)) { + dev_err(&priv->spi->dev, "WR_READY timeout\n"); + ret = -EAGAIN; + goto out; + } + + p54spi_int_ack(priv, SPI_HOST_INT_WR_READY); + + if (FREE_AFTER_TX(skb)) + p54_free_skb(priv->hw, skb); +out: + p54spi_sleep(priv); + return ret; +} + +static int p54spi_wq_tx(struct p54s_priv *priv) +{ + struct p54s_tx_info *entry; + struct sk_buff *skb; + struct ieee80211_tx_info *info; + struct p54_tx_info *minfo; + struct p54s_tx_info *dinfo; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&priv->tx_lock, flags); + + while (!list_empty(&priv->tx_pending)) { + entry = list_entry(priv->tx_pending.next, + struct p54s_tx_info, tx_list); + + list_del_init(&entry->tx_list); + + spin_unlock_irqrestore(&priv->tx_lock, flags); + + dinfo = container_of((void *) entry, struct p54s_tx_info, + tx_list); + minfo = container_of((void *) dinfo, struct p54_tx_info, + data); + info = container_of((void *) minfo, struct ieee80211_tx_info, + rate_driver_data); + skb = container_of((void *) info, struct sk_buff, cb); + + ret = p54spi_tx_frame(priv, skb); + + if (ret < 0) { + p54_free_skb(priv->hw, skb); + return ret; + } + + spin_lock_irqsave(&priv->tx_lock, flags); + } + spin_unlock_irqrestore(&priv->tx_lock, flags); + return ret; +} + +static void p54spi_op_tx(struct ieee80211_hw *dev, struct sk_buff *skb) +{ + struct p54s_priv *priv = dev->priv; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct p54_tx_info *mi = (struct p54_tx_info *) info->rate_driver_data; + struct p54s_tx_info *di = (struct p54s_tx_info *) mi->data; + unsigned long flags; + + BUILD_BUG_ON(sizeof(*di) > sizeof((mi->data))); + + spin_lock_irqsave(&priv->tx_lock, flags); + list_add_tail(&di->tx_list, &priv->tx_pending); + spin_unlock_irqrestore(&priv->tx_lock, flags); + + ieee80211_queue_work(priv->hw, &priv->work); +} + +static void p54spi_work(struct work_struct *work) +{ + struct p54s_priv *priv = container_of(work, struct p54s_priv, work); + u32 ints; + int ret; + + mutex_lock(&priv->mutex); + + if (priv->fw_state == FW_STATE_OFF) + goto out; + + ints = p54spi_read32(priv, SPI_ADRS_HOST_INTERRUPTS); + + if (ints & SPI_HOST_INT_READY) { + p54spi_int_ready(priv); + p54spi_int_ack(priv, SPI_HOST_INT_READY); + } + + if (priv->fw_state != FW_STATE_READY) + goto out; + + if (ints & SPI_HOST_INT_UPDATE) { + p54spi_int_ack(priv, SPI_HOST_INT_UPDATE); + ret = p54spi_rx(priv); + if (ret < 0) + goto out; + } + if (ints & SPI_HOST_INT_SW_UPDATE) { + p54spi_int_ack(priv, SPI_HOST_INT_SW_UPDATE); + ret = p54spi_rx(priv); + if (ret < 0) + goto out; + } + + ret = p54spi_wq_tx(priv); +out: + mutex_unlock(&priv->mutex); +} + +static int p54spi_op_start(struct ieee80211_hw *dev) +{ + struct p54s_priv *priv = dev->priv; + unsigned long timeout; + int ret = 0; + + if (mutex_lock_interruptible(&priv->mutex)) { + ret = -EINTR; + goto out; + } + + priv->fw_state = FW_STATE_BOOTING; + + p54spi_power_on(priv); + + ret = p54spi_upload_firmware(dev); + if (ret < 0) { + p54spi_power_off(priv); + goto out_unlock; + } + + mutex_unlock(&priv->mutex); + + timeout = msecs_to_jiffies(2000); + timeout = wait_for_completion_interruptible_timeout(&priv->fw_comp, + timeout); + if (!timeout) { + dev_err(&priv->spi->dev, "firmware boot failed"); + p54spi_power_off(priv); + ret = -1; + goto out; + } + + if (mutex_lock_interruptible(&priv->mutex)) { + ret = -EINTR; + p54spi_power_off(priv); + goto out; + } + + WARN_ON(priv->fw_state != FW_STATE_READY); + +out_unlock: + mutex_unlock(&priv->mutex); + +out: + return ret; +} + +static void p54spi_op_stop(struct ieee80211_hw *dev) +{ + struct p54s_priv *priv = dev->priv; + unsigned long flags; + + if (mutex_lock_interruptible(&priv->mutex)) { + /* FIXME: how to handle this error? */ + return; + } + + WARN_ON(priv->fw_state != FW_STATE_READY); + + cancel_work_sync(&priv->work); + + p54spi_power_off(priv); + spin_lock_irqsave(&priv->tx_lock, flags); + INIT_LIST_HEAD(&priv->tx_pending); + spin_unlock_irqrestore(&priv->tx_lock, flags); + + priv->fw_state = FW_STATE_OFF; + mutex_unlock(&priv->mutex); +} + +static int __devinit p54spi_probe(struct spi_device *spi) +{ + struct p54s_priv *priv = NULL; + struct ieee80211_hw *hw; + int ret = -EINVAL; + + hw = p54_init_common(sizeof(*priv)); + if (!hw) { + dev_err(&spi->dev, "could not alloc ieee80211_hw"); + return -ENOMEM; + } + + priv = hw->priv; + priv->hw = hw; + dev_set_drvdata(&spi->dev, priv); + priv->spi = spi; + + spi->bits_per_word = 16; + spi->max_speed_hz = 24000000; + + ret = spi_setup(spi); + if (ret < 0) { + dev_err(&priv->spi->dev, "spi_setup failed"); + goto err_free_common; + } + + ret = gpio_request(p54spi_gpio_power, "p54spi power"); + if (ret < 0) { + dev_err(&priv->spi->dev, "power GPIO request failed: %d", ret); + goto err_free_common; + } + + ret = gpio_request(p54spi_gpio_irq, "p54spi irq"); + if (ret < 0) { + dev_err(&priv->spi->dev, "irq GPIO request failed: %d", ret); + goto err_free_common; + } + + gpio_direction_output(p54spi_gpio_power, 0); + gpio_direction_input(p54spi_gpio_irq); + + ret = request_irq(gpio_to_irq(p54spi_gpio_irq), + p54spi_interrupt, IRQF_DISABLED, "p54spi", + priv->spi); + if (ret < 0) { + dev_err(&priv->spi->dev, "request_irq() failed"); + goto err_free_common; + } + + set_irq_type(gpio_to_irq(p54spi_gpio_irq), + IRQ_TYPE_EDGE_RISING); + + disable_irq(gpio_to_irq(p54spi_gpio_irq)); + + INIT_WORK(&priv->work, p54spi_work); + init_completion(&priv->fw_comp); + INIT_LIST_HEAD(&priv->tx_pending); + mutex_init(&priv->mutex); + SET_IEEE80211_DEV(hw, &spi->dev); + priv->common.open = p54spi_op_start; + priv->common.stop = p54spi_op_stop; + priv->common.tx = p54spi_op_tx; + + ret = p54spi_request_firmware(hw); + if (ret < 0) + goto err_free_common; + + ret = p54spi_request_eeprom(hw); + if (ret) + goto err_free_common; + + ret = p54_register_common(hw, &priv->spi->dev); + if (ret) + goto err_free_common; + + return 0; + +err_free_common: + p54_free_common(priv->hw); + return ret; +} + +static int __devexit p54spi_remove(struct spi_device *spi) +{ + struct p54s_priv *priv = dev_get_drvdata(&spi->dev); + + p54_unregister_common(priv->hw); + + free_irq(gpio_to_irq(p54spi_gpio_irq), spi); + + gpio_free(p54spi_gpio_power); + gpio_free(p54spi_gpio_irq); + release_firmware(priv->firmware); + + mutex_destroy(&priv->mutex); + + p54_free_common(priv->hw); + + return 0; +} + + +static struct spi_driver p54spi_driver = { + .driver = { + /* use cx3110x name because board-n800.c uses that for the + * SPI port */ + .name = "cx3110x", + .bus = &spi_bus_type, + .owner = THIS_MODULE, + }, + + .probe = p54spi_probe, + .remove = __devexit_p(p54spi_remove), +}; + +static int __init p54spi_init(void) +{ + int ret; + + ret = spi_register_driver(&p54spi_driver); + if (ret < 0) { + printk(KERN_ERR "failed to register SPI driver: %d", ret); + goto out; + } + +out: + return ret; +} + +static void __exit p54spi_exit(void) +{ + spi_unregister_driver(&p54spi_driver); +} + +module_init(p54spi_init); +module_exit(p54spi_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Christian Lamparter "); +MODULE_ALIAS("spi:cx3110x"); diff --git a/drivers/net/wireless/p54/p54spi.h b/drivers/net/wireless/p54/p54spi.h new file mode 100644 index 0000000..7fbe8d8 --- /dev/null +++ b/drivers/net/wireless/p54/p54spi.h @@ -0,0 +1,125 @@ +/* + * Copyright (C) 2008 Christian Lamparter + * + * This driver is a port from stlc45xx: + * Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies). + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef P54SPI_H +#define P54SPI_H + +#include +#include +#include + +#include "p54.h" + +/* Bit 15 is read/write bit; ON = READ, OFF = WRITE */ +#define SPI_ADRS_READ_BIT_15 0x8000 + +#define SPI_ADRS_ARM_INTERRUPTS 0x00 +#define SPI_ADRS_ARM_INT_EN 0x04 + +#define SPI_ADRS_HOST_INTERRUPTS 0x08 +#define SPI_ADRS_HOST_INT_EN 0x0c +#define SPI_ADRS_HOST_INT_ACK 0x10 + +#define SPI_ADRS_GEN_PURP_1 0x14 +#define SPI_ADRS_GEN_PURP_2 0x18 + +#define SPI_ADRS_DEV_CTRL_STAT 0x26 /* high word */ + +#define SPI_ADRS_DMA_DATA 0x28 + +#define SPI_ADRS_DMA_WRITE_CTRL 0x2c +#define SPI_ADRS_DMA_WRITE_LEN 0x2e +#define SPI_ADRS_DMA_WRITE_BASE 0x30 + +#define SPI_ADRS_DMA_READ_CTRL 0x34 +#define SPI_ADRS_DMA_READ_LEN 0x36 +#define SPI_ADRS_DMA_READ_BASE 0x38 + +#define SPI_CTRL_STAT_HOST_OVERRIDE 0x8000 +#define SPI_CTRL_STAT_START_HALTED 0x4000 +#define SPI_CTRL_STAT_RAM_BOOT 0x2000 +#define SPI_CTRL_STAT_HOST_RESET 0x1000 +#define SPI_CTRL_STAT_HOST_CPU_EN 0x0800 + +#define SPI_DMA_WRITE_CTRL_ENABLE 0x0001 +#define SPI_DMA_READ_CTRL_ENABLE 0x0001 +#define HOST_ALLOWED (1 << 7) + +#define SPI_TIMEOUT 100 /* msec */ + +#define SPI_MAX_TX_PACKETS 32 + +#define SPI_MAX_PACKET_SIZE 32767 + +#define SPI_TARGET_INT_WAKEUP 0x00000001 +#define SPI_TARGET_INT_SLEEP 0x00000002 +#define SPI_TARGET_INT_RDDONE 0x00000004 + +#define SPI_TARGET_INT_CTS 0x00004000 +#define SPI_TARGET_INT_DR 0x00008000 + +#define SPI_HOST_INT_READY 0x00000001 +#define SPI_HOST_INT_WR_READY 0x00000002 +#define SPI_HOST_INT_SW_UPDATE 0x00000004 +#define SPI_HOST_INT_UPDATE 0x10000000 + +/* clear to send */ +#define SPI_HOST_INT_CR 0x00004000 + +/* data ready */ +#define SPI_HOST_INT_DR 0x00008000 + +#define SPI_HOST_INTS_DEFAULT \ + (SPI_HOST_INT_READY | SPI_HOST_INT_UPDATE | SPI_HOST_INT_SW_UPDATE) + +#define TARGET_BOOT_SLEEP 50 + +struct p54s_dma_regs { + __le16 cmd; + __le16 len; + __le32 addr; +} __attribute__ ((packed)); + +struct p54s_tx_info { + struct list_head tx_list; +}; + +struct p54s_priv { + /* p54_common has to be the first entry */ + struct p54_common common; + struct ieee80211_hw *hw; + struct spi_device *spi; + + struct work_struct work; + + struct mutex mutex; + struct completion fw_comp; + + spinlock_t tx_lock; + + /* protected by tx_lock */ + struct list_head tx_pending; + + enum fw_state fw_state; + const struct firmware *firmware; +}; + +#endif /* P54SPI_H */ diff --git a/drivers/net/wireless/p54/p54spi_eeprom.h b/drivers/net/wireless/p54/p54spi_eeprom.h new file mode 100644 index 0000000..1ea1050 --- /dev/null +++ b/drivers/net/wireless/p54/p54spi_eeprom.h @@ -0,0 +1,678 @@ +/* + * Copyright (C) 2003 Conexant Americas Inc. All Rights Reserved. + * Copyright (C) 2004, 2005, 2006 Nokia Corporation + * Copyright 2008 Johannes Berg + * Copyright 2008 Christian Lamparter + * + * based on: + * - cx3110x's pda.h from Nokia + * - cx3110-transfer.log by Johannes Berg + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef P54SPI_EEPROM_H +#define P54SPI_EEPROM_H + +static unsigned char p54spi_eeprom[] = { + +/* struct eeprom_pda_wrap */ +0x47, 0x4d, 0x55, 0xaa, /* magic */ +0x00, 0x00, /* pad */ +0x00, 0x00, /* eeprom_pda_data_wrap length */ +0x00, 0x00, 0x00, 0x00, /* arm opcode */ + +/* bogus MAC address */ +0x04, 0x00, 0x01, 0x01, /* PDR_MAC_ADDRESS */ + 0x00, 0x02, 0xee, 0xc0, 0xff, 0xee, + +/* struct bootrec_exp_if */ +0x06, 0x00, 0x01, 0x10, /* PDR_INTERFACE_LIST */ + 0x00, 0x00, /* role */ + 0x0f, 0x00, /* if_id */ + 0x85, 0x00, /* variant = Longbow RF, 2GHz */ + 0x01, 0x00, /* btm_compat */ + 0x1f, 0x00, /* top_compat */ + +0x03, 0x00, 0x02, 0x10, /* PDR_HARDWARE_PLATFORM_COMPONENT_ID */ + 0x03, 0x20, 0x00, 0x43, + +/* struct pda_country[6] */ +0x0d, 0x00, 0x07, 0x10, /* PDR_COUNTRY_LIST */ + 0x10, 0x00, 0x00, 0x00, + 0x20, 0x00, 0x00, 0x00, + 0x30, 0x00, 0x00, 0x00, + 0x31, 0x00, 0x00, 0x00, + 0x32, 0x00, 0x00, 0x00, + 0x40, 0x00, 0x00, 0x00, + +/* struct pda_country */ +0x03, 0x00, 0x08, 0x10, /* PDR_DEFAULT_COUNTRY */ + 0x30, 0x00, 0x00, 0x00, /* ETSI */ + +0x03, 0x00, 0x00, 0x11, /* PDR_ANTENNA_GAIN */ + 0x08, 0x08, 0x08, 0x08, + +0x09, 0x00, 0xad, 0xde, /* PDR_RSSI_LINEAR_APPROXIMATION_CUSTOM */ + 0x0a, 0x01, 0x72, 0xfe, 0x1a, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + +/* struct pda_custom_wrapper */ +0x10, 0x06, 0x5d, 0xb0, /* PDR_PRISM_PA_CAL_CURVE_DATA_CUSTOM */ + 0x0d, 0x00, 0xee, 0x00, /* 13 entries, 238 bytes per entry */ + 0x00, 0x00, 0x16, 0x0c, /* no offset, 3094 total len */ + /* 2412 MHz */ + 0x6c, 0x09, + 0x10, 0x01, 0x9a, 0x84, + 0xaa, 0x8a, 0xaa, 0x8a, 0xaa, 0x8a, 0xaa, 0x8a, + 0x3c, 0xb6, 0x3c, 0xb6, 0x3c, 0xb6, 0x3c, 0xb6, + 0x3c, 0xb6, 0x3c, 0xb6, 0x3c, 0xb6, 0x3c, 0xb6, + 0xf0, 0x00, 0x94, 0x6c, + 0x99, 0x82, 0x99, 0x82, 0x99, 0x82, 0x99, 0x82, + 0x2b, 0xae, 0x2b, 0xae, 0x2b, 0xae, 0x2b, 0xae, + 0x2b, 0xae, 0x2b, 0xae, 0x2b, 0xae, 0x2b, 0xae, + 0xd0, 0x00, 0xaa, 0x5a, + 0x88, 0x7a, 0x88, 0x7a, 0x88, 0x7a, 0x88, 0x7a, + 0x1a, 0xa6, 0x1a, 0xa6, 0x1a, 0xa6, 0x1a, 0xa6, + 0x1a, 0xa6, 0x1a, 0xa6, 0x1a, 0xa6, 0x1a, 0xa6, + 0xa0, 0x00, 0xf3, 0x47, + 0x6e, 0x6e, 0x6e, 0x6e, 0x6e, 0x6e, 0x6e, 0x6e, + 0x00, 0x9a, 0x00, 0x9a, 0x00, 0x9a, 0x00, 0x9a, + 0x00, 0x9a, 0x00, 0x9a, 0x00, 0x9a, 0x00, 0x9a, + 0x50, 0x00, 0x59, 0x36, + 0x43, 0x5a, 0x43, 0x5a, 0x43, 0x5a, 0x43, 0x5a, + 0xd5, 0x85, 0xd5, 0x85, 0xd5, 0x85, 0xd5, 0x85, + 0xd5, 0x85, 0xd5, 0x85, 0xd5, 0x85, 0xd5, 0x85, + 0x00, 0x00, 0xe4, 0x2d, + 0x18, 0x46, 0x18, 0x46, 0x18, 0x46, 0x18, 0x46, + 0xaa, 0x71, 0xaa, 0x71, 0xaa, 0x71, 0xaa, 0x71, + 0xaa, 0x71, 0xaa, 0x71, 0xaa, 0x71, 0xaa, 0x71, + 0x00, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x06, 0x80, 0x80, 0x00, + + /* 2417 MHz */ + 0x71, 0x09, + 0x10, 0x01, 0xb9, 0x83, + 0x7d, 0x8a, 0x7d, 0x8a, 0x7d, 0x8a, 0x7d, 0x8a, + 0x0f, 0xb6, 0x0f, 0xb6, 0x0f, 0xb6, 0x0f, 0xb6, + 0x0f, 0xb6, 0x0f, 0xb6, 0x0f, 0xb6, 0x0f, 0xb6, + 0xf0, 0x00, 0x2e, 0x6c, + 0x68, 0x82, 0x68, 0x82, 0x68, 0x82, 0x68, 0x82, + 0xfa, 0xad, 0xfa, 0xad, 0xfa, 0xad, 0xfa, 0xad, + 0xfa, 0xad, 0xfa, 0xad, 0xfa, 0xad, 0xfa, 0xad, + 0xd0, 0x00, 0x8d, 0x5a, + 0x52, 0x7a, 0x52, 0x7a, 0x52, 0x7a, 0x52, 0x7a, + 0xe4, 0xa5, 0xe4, 0xa5, 0xe4, 0xa5, 0xe4, 0xa5, + 0xe4, 0xa5, 0xe4, 0xa5, 0xe4, 0xa5, 0xe4, 0xa5, + 0xa0, 0x00, 0x0a, 0x48, + 0x32, 0x6e, 0x32, 0x6e, 0x32, 0x6e, 0x32, 0x6e, + 0xc4, 0x99, 0xc4, 0x99, 0xc4, 0x99, 0xc4, 0x99, + 0xc4, 0x99, 0xc4, 0x99, 0xc4, 0x99, 0xc4, 0x99, + 0x50, 0x00, 0x7c, 0x36, + 0xfc, 0x59, 0xfc, 0x59, 0xfc, 0x59, 0xfc, 0x59, + 0x8e, 0x85, 0x8e, 0x85, 0x8e, 0x85, 0x8e, 0x85, + 0x8e, 0x85, 0x8e, 0x85, 0x8e, 0x85, 0x8e, 0x85, + 0x00, 0x00, 0xf5, 0x2d, + 0xc6, 0x45, 0xc6, 0x45, 0xc6, 0x45, 0xc6, 0x45, + 0x58, 0x71, 0x58, 0x71, 0x58, 0x71, 0x58, 0x71, + 0x58, 0x71, 0x58, 0x71, 0x58, 0x71, 0x58, 0x71, + 0x00, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x06, 0x80, 0x80, 0x00, + + /* 2422 MHz */ + 0x76, 0x09, + 0x10, 0x01, 0xb9, 0x83, + 0x7d, 0x8a, 0x7d, 0x8a, 0x7d, 0x8a, 0x7d, 0x8a, + 0x0f, 0xb6, 0x0f, 0xb6, 0x0f, 0xb6, 0x0f, 0xb6, + 0x0f, 0xb6, 0x0f, 0xb6, 0x0f, 0xb6, 0x0f, 0xb6, + 0xf0, 0x00, 0x2e, 0x6c, + 0x68, 0x82, 0x68, 0x82, 0x68, 0x82, 0x68, 0x82, + 0xfa, 0xad, 0xfa, 0xad, 0xfa, 0xad, 0xfa, 0xad, + 0xfa, 0xad, 0xfa, 0xad, 0xfa, 0xad, 0xfa, 0xad, + 0xd0, 0x00, 0x8d, 0x5a, + 0x52, 0x7a, 0x52, 0x7a, 0x52, 0x7a, 0x52, 0x7a, + 0xe4, 0xa5, 0xe4, 0xa5, 0xe4, 0xa5, 0xe4, 0xa5, + 0xe4, 0xa5, 0xe4, 0xa5, 0xe4, 0xa5, 0xe4, 0xa5, + 0xa0, 0x00, 0x0a, 0x48, + 0x32, 0x6e, 0x32, 0x6e, 0x32, 0x6e, 0x32, 0x6e, + 0xc4, 0x99, 0xc4, 0x99, 0xc4, 0x99, 0xc4, 0x99, + 0xc4, 0x99, 0xc4, 0x99, 0xc4, 0x99, 0xc4, 0x99, + 0x50, 0x00, 0x7c, 0x36, + 0xfc, 0x59, 0xfc, 0x59, 0xfc, 0x59, 0xfc, 0x59, + 0x8e, 0x85, 0x8e, 0x85, 0x8e, 0x85, 0x8e, 0x85, + 0x8e, 0x85, 0x8e, 0x85, 0x8e, 0x85, 0x8e, 0x85, + 0x00, 0x00, 0xf5, 0x2d, + 0xc6, 0x45, 0xc6, 0x45, 0xc6, 0x45, 0xc6, 0x45, + 0x58, 0x71, 0x58, 0x71, 0x58, 0x71, 0x58, 0x71, + 0x58, 0x71, 0x58, 0x71, 0x58, 0x71, 0x58, 0x71, + 0x00, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x06, 0x80, 0x80, 0x00, + + /* 2427 MHz */ + 0x7b, 0x09, + 0x10, 0x01, 0x48, 0x83, + 0x67, 0x8a, 0x67, 0x8a, 0x67, 0x8a, 0x67, 0x8a, + 0xf9, 0xb5, 0xf9, 0xb5, 0xf9, 0xb5, 0xf9, 0xb5, + 0xf9, 0xb5, 0xf9, 0xb5, 0xf9, 0xb5, 0xf9, 0xb5, + 0xf0, 0x00, 0xfb, 0x6b, + 0x50, 0x82, 0x50, 0x82, 0x50, 0x82, 0x50, 0x82, + 0xe2, 0xad, 0xe2, 0xad, 0xe2, 0xad, 0xe2, 0xad, + 0xe2, 0xad, 0xe2, 0xad, 0xe2, 0xad, 0xe2, 0xad, + 0xd0, 0x00, 0x7e, 0x5a, + 0x38, 0x7a, 0x38, 0x7a, 0x38, 0x7a, 0x38, 0x7a, + 0xca, 0xa5, 0xca, 0xa5, 0xca, 0xa5, 0xca, 0xa5, + 0xca, 0xa5, 0xca, 0xa5, 0xca, 0xa5, 0xca, 0xa5, + 0xa0, 0x00, 0x15, 0x48, + 0x14, 0x6e, 0x14, 0x6e, 0x14, 0x6e, 0x14, 0x6e, + 0xa6, 0x99, 0xa6, 0x99, 0xa6, 0x99, 0xa6, 0x99, + 0xa6, 0x99, 0xa6, 0x99, 0xa6, 0x99, 0xa6, 0x99, + 0x50, 0x00, 0x8e, 0x36, + 0xd9, 0x59, 0xd9, 0x59, 0xd9, 0x59, 0xd9, 0x59, + 0x6b, 0x85, 0x6b, 0x85, 0x6b, 0x85, 0x6b, 0x85, + 0x6b, 0x85, 0x6b, 0x85, 0x6b, 0x85, 0x6b, 0x85, + 0x00, 0x00, 0xfe, 0x2d, + 0x9d, 0x45, 0x9d, 0x45, 0x9d, 0x45, 0x9d, 0x45, + 0x2f, 0x71, 0x2f, 0x71, 0x2f, 0x71, 0x2f, 0x71, + 0x2f, 0x71, 0x2f, 0x71, 0x2f, 0x71, 0x2f, 0x71, + 0x00, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x06, 0x80, 0x80, 0x00, + + /* 2432 MHz */ + 0x80, 0x09, + 0x10, 0x01, 0xd7, 0x82, + 0x51, 0x8a, 0x51, 0x8a, 0x51, 0x8a, 0x51, 0x8a, + 0xe3, 0xb5, 0xe3, 0xb5, 0xe3, 0xb5, 0xe3, 0xb5, + 0xe3, 0xb5, 0xe3, 0xb5, 0xe3, 0xb5, 0xe3, 0xb5, + 0xf0, 0x00, 0xc8, 0x6b, + 0x37, 0x82, 0x37, 0x82, 0x37, 0x82, 0x37, 0x82, + 0xc9, 0xad, 0xc9, 0xad, 0xc9, 0xad, 0xc9, 0xad, + 0xc9, 0xad, 0xc9, 0xad, 0xc9, 0xad, 0xc9, 0xad, + 0xd0, 0x00, 0x6f, 0x5a, + 0x1d, 0x7a, 0x1d, 0x7a, 0x1d, 0x7a, 0x1d, 0x7a, + 0xaf, 0xa5, 0xaf, 0xa5, 0xaf, 0xa5, 0xaf, 0xa5, + 0xaf, 0xa5, 0xaf, 0xa5, 0xaf, 0xa5, 0xaf, 0xa5, + 0xa0, 0x00, 0x20, 0x48, + 0xf6, 0x6d, 0xf6, 0x6d, 0xf6, 0x6d, 0xf6, 0x6d, + 0x88, 0x99, 0x88, 0x99, 0x88, 0x99, 0x88, 0x99, + 0x88, 0x99, 0x88, 0x99, 0x88, 0x99, 0x88, 0x99, + 0x50, 0x00, 0x9f, 0x36, + 0xb5, 0x59, 0xb5, 0x59, 0xb5, 0x59, 0xb5, 0x59, + 0x47, 0x85, 0x47, 0x85, 0x47, 0x85, 0x47, 0x85, + 0x47, 0x85, 0x47, 0x85, 0x47, 0x85, 0x47, 0x85, + 0x00, 0x00, 0x06, 0x2e, + 0x74, 0x45, 0x74, 0x45, 0x74, 0x45, 0x74, 0x45, + 0x06, 0x71, 0x06, 0x71, 0x06, 0x71, 0x06, 0x71, + 0x06, 0x71, 0x06, 0x71, 0x06, 0x71, 0x06, 0x71, + 0x00, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x06, 0x80, 0x80, 0x00, + + /* 2437 MHz */ + 0x85, 0x09, + 0x10, 0x01, 0x67, 0x82, + 0x3a, 0x8a, 0x3a, 0x8a, 0x3a, 0x8a, 0x3a, 0x8a, + 0xcc, 0xb5, 0xcc, 0xb5, 0xcc, 0xb5, 0xcc, 0xb5, + 0xcc, 0xb5, 0xcc, 0xb5, 0xcc, 0xb5, 0xcc, 0xb5, + 0xf0, 0x00, 0x95, 0x6b, + 0x1f, 0x82, 0x1f, 0x82, 0x1f, 0x82, 0x1f, 0x82, + 0xb1, 0xad, 0xb1, 0xad, 0xb1, 0xad, 0xb1, 0xad, + 0xb1, 0xad, 0xb1, 0xad, 0xb1, 0xad, 0xb1, 0xad, + 0xd0, 0x00, 0x61, 0x5a, + 0x02, 0x7a, 0x02, 0x7a, 0x02, 0x7a, 0x02, 0x7a, + 0x94, 0xa5, 0x94, 0xa5, 0x94, 0xa5, 0x94, 0xa5, + 0x94, 0xa5, 0x94, 0xa5, 0x94, 0xa5, 0x94, 0xa5, + 0xa0, 0x00, 0x2c, 0x48, + 0xd8, 0x6d, 0xd8, 0x6d, 0xd8, 0x6d, 0xd8, 0x6d, + 0x6a, 0x99, 0x6a, 0x99, 0x6a, 0x99, 0x6a, 0x99, + 0x6a, 0x99, 0x6a, 0x99, 0x6a, 0x99, 0x6a, 0x99, + 0x50, 0x00, 0xb1, 0x36, + 0x92, 0x59, 0x92, 0x59, 0x92, 0x59, 0x92, 0x59, + 0x24, 0x85, 0x24, 0x85, 0x24, 0x85, 0x24, 0x85, + 0x24, 0x85, 0x24, 0x85, 0x24, 0x85, 0x24, 0x85, + 0x00, 0x00, 0x0f, 0x2e, + 0x4b, 0x45, 0x4b, 0x45, 0x4b, 0x45, 0x4b, 0x45, + 0xdd, 0x70, 0xdd, 0x70, 0xdd, 0x70, 0xdd, 0x70, + 0xdd, 0x70, 0xdd, 0x70, 0xdd, 0x70, 0xdd, 0x70, + 0x00, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x06, 0x80, 0x80, 0x00, + + /* 2442 MHz */ + 0x8a, 0x09, + 0x10, 0x01, 0xf6, 0x81, + 0x24, 0x8a, 0x24, 0x8a, 0x24, 0x8a, 0x24, 0x8a, + 0xb6, 0xb5, 0xb6, 0xb5, 0xb6, 0xb5, 0xb6, 0xb5, + 0xb6, 0xb5, 0xb6, 0xb5, 0xb6, 0xb5, 0xb6, 0xb5, + 0xf0, 0x00, 0x62, 0x6b, + 0x06, 0x82, 0x06, 0x82, 0x06, 0x82, 0x06, 0x82, + 0x98, 0xad, 0x98, 0xad, 0x98, 0xad, 0x98, 0xad, + 0x98, 0xad, 0x98, 0xad, 0x98, 0xad, 0x98, 0xad, + 0xd0, 0x00, 0x52, 0x5a, + 0xe7, 0x79, 0xe7, 0x79, 0xe7, 0x79, 0xe7, 0x79, + 0x79, 0xa5, 0x79, 0xa5, 0x79, 0xa5, 0x79, 0xa5, + 0x79, 0xa5, 0x79, 0xa5, 0x79, 0xa5, 0x79, 0xa5, + 0xa0, 0x00, 0x37, 0x48, + 0xba, 0x6d, 0xba, 0x6d, 0xba, 0x6d, 0xba, 0x6d, + 0x4c, 0x99, 0x4c, 0x99, 0x4c, 0x99, 0x4c, 0x99, + 0x4c, 0x99, 0x4c, 0x99, 0x4c, 0x99, 0x4c, 0x99, + 0x50, 0x00, 0xc2, 0x36, + 0x6e, 0x59, 0x6e, 0x59, 0x6e, 0x59, 0x6e, 0x59, + 0x00, 0x85, 0x00, 0x85, 0x00, 0x85, 0x00, 0x85, + 0x00, 0x85, 0x00, 0x85, 0x00, 0x85, 0x00, 0x85, + 0x00, 0x00, 0x17, 0x2e, + 0x22, 0x45, 0x22, 0x45, 0x22, 0x45, 0x22, 0x45, + 0xb4, 0x70, 0xb4, 0x70, 0xb4, 0x70, 0xb4, 0x70, + 0xb4, 0x70, 0xb4, 0x70, 0xb4, 0x70, 0xb4, 0x70, + 0x00, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x06, 0x80, 0x80, 0x00, + + /* 2447 MHz */ + 0x8f, 0x09, + 0x10, 0x01, 0x75, 0x83, + 0x61, 0x8a, 0x61, 0x8a, 0x61, 0x8a, 0x61, 0x8a, + 0xf3, 0xb5, 0xf3, 0xb5, 0xf3, 0xb5, 0xf3, 0xb5, + 0xf3, 0xb5, 0xf3, 0xb5, 0xf3, 0xb5, 0xf3, 0xb5, + 0xf0, 0x00, 0x4b, 0x6c, + 0x3f, 0x82, 0x3f, 0x82, 0x3f, 0x82, 0x3f, 0x82, + 0xd1, 0xad, 0xd1, 0xad, 0xd1, 0xad, 0xd1, 0xad, + 0xd1, 0xad, 0xd1, 0xad, 0xd1, 0xad, 0xd1, 0xad, + 0xd0, 0x00, 0xda, 0x5a, + 0x1c, 0x7a, 0x1c, 0x7a, 0x1c, 0x7a, 0x1c, 0x7a, + 0xae, 0xa5, 0xae, 0xa5, 0xae, 0xa5, 0xae, 0xa5, + 0xae, 0xa5, 0xae, 0xa5, 0xae, 0xa5, 0xae, 0xa5, + 0xa0, 0x00, 0x6d, 0x48, + 0xe9, 0x6d, 0xe9, 0x6d, 0xe9, 0x6d, 0xe9, 0x6d, + 0x7b, 0x99, 0x7b, 0x99, 0x7b, 0x99, 0x7b, 0x99, + 0x7b, 0x99, 0x7b, 0x99, 0x7b, 0x99, 0x7b, 0x99, + 0x50, 0x00, 0xc6, 0x36, + 0x92, 0x59, 0x92, 0x59, 0x92, 0x59, 0x92, 0x59, + 0x24, 0x85, 0x24, 0x85, 0x24, 0x85, 0x24, 0x85, + 0x24, 0x85, 0x24, 0x85, 0x24, 0x85, 0x24, 0x85, + 0x00, 0x00, 0x15, 0x2e, + 0x3c, 0x45, 0x3c, 0x45, 0x3c, 0x45, 0x3c, 0x45, + 0xce, 0x70, 0xce, 0x70, 0xce, 0x70, 0xce, 0x70, + 0xce, 0x70, 0xce, 0x70, 0xce, 0x70, 0xce, 0x70, + 0x00, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x06, 0x80, 0x80, 0x00, + + /* 2452 MHz */ + 0x94, 0x09, + 0x10, 0x01, 0xf4, 0x84, + 0x9e, 0x8a, 0x9e, 0x8a, 0x9e, 0x8a, 0x9e, 0x8a, + 0x30, 0xb6, 0x30, 0xb6, 0x30, 0xb6, 0x30, 0xb6, + 0x30, 0xb6, 0x30, 0xb6, 0x30, 0xb6, 0x30, 0xb6, + 0xf0, 0x00, 0x34, 0x6d, + 0x77, 0x82, 0x77, 0x82, 0x77, 0x82, 0x77, 0x82, + 0x09, 0xae, 0x09, 0xae, 0x09, 0xae, 0x09, 0xae, + 0x09, 0xae, 0x09, 0xae, 0x09, 0xae, 0x09, 0xae, + 0xd0, 0x00, 0x62, 0x5b, + 0x50, 0x7a, 0x50, 0x7a, 0x50, 0x7a, 0x50, 0x7a, + 0xe2, 0xa5, 0xe2, 0xa5, 0xe2, 0xa5, 0xe2, 0xa5, + 0xe2, 0xa5, 0xe2, 0xa5, 0xe2, 0xa5, 0xe2, 0xa5, + 0xa0, 0x00, 0xa2, 0x48, + 0x17, 0x6e, 0x17, 0x6e, 0x17, 0x6e, 0x17, 0x6e, + 0xa9, 0x99, 0xa9, 0x99, 0xa9, 0x99, 0xa9, 0x99, + 0xa9, 0x99, 0xa9, 0x99, 0xa9, 0x99, 0xa9, 0x99, + 0x50, 0x00, 0xc9, 0x36, + 0xb7, 0x59, 0xb7, 0x59, 0xb7, 0x59, 0xb7, 0x59, + 0x49, 0x85, 0x49, 0x85, 0x49, 0x85, 0x49, 0x85, + 0x49, 0x85, 0x49, 0x85, 0x49, 0x85, 0x49, 0x85, + 0x00, 0x00, 0x12, 0x2e, + 0x57, 0x45, 0x57, 0x45, 0x57, 0x45, 0x57, 0x45, + 0xe9, 0x70, 0xe9, 0x70, 0xe9, 0x70, 0xe9, 0x70, + 0xe9, 0x70, 0xe9, 0x70, 0xe9, 0x70, 0xe9, 0x70, + 0x00, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x06, 0x80, 0x80, 0x00, + + /* 2452 MHz */ + 0x99, 0x09, + 0x10, 0x01, 0x74, 0x86, + 0xdb, 0x8a, 0xdb, 0x8a, 0xdb, 0x8a, 0xdb, 0x8a, + 0x6d, 0xb6, 0x6d, 0xb6, 0x6d, 0xb6, 0x6d, 0xb6, + 0x6d, 0xb6, 0x6d, 0xb6, 0x6d, 0xb6, 0x6d, 0xb6, + 0xf0, 0x00, 0x1e, 0x6e, + 0xb0, 0x82, 0xb0, 0x82, 0xb0, 0x82, 0xb0, 0x82, + 0x42, 0xae, 0x42, 0xae, 0x42, 0xae, 0x42, 0xae, + 0x42, 0xae, 0x42, 0xae, 0x42, 0xae, 0x42, 0xae, + 0xd0, 0x00, 0xeb, 0x5b, + 0x85, 0x7a, 0x85, 0x7a, 0x85, 0x7a, 0x85, 0x7a, + 0x17, 0xa6, 0x17, 0xa6, 0x17, 0xa6, 0x17, 0xa6, + 0x17, 0xa6, 0x17, 0xa6, 0x17, 0xa6, 0x17, 0xa6, + 0xa0, 0x00, 0xd8, 0x48, + 0x46, 0x6e, 0x46, 0x6e, 0x46, 0x6e, 0x46, 0x6e, + 0xd8, 0x99, 0xd8, 0x99, 0xd8, 0x99, 0xd8, 0x99, + 0xd8, 0x99, 0xd8, 0x99, 0xd8, 0x99, 0xd8, 0x99, + 0x50, 0x00, 0xcd, 0x36, + 0xdb, 0x59, 0xdb, 0x59, 0xdb, 0x59, 0xdb, 0x59, + 0x6d, 0x85, 0x6d, 0x85, 0x6d, 0x85, 0x6d, 0x85, + 0x6d, 0x85, 0x6d, 0x85, 0x6d, 0x85, 0x6d, 0x85, + 0x00, 0x00, 0x10, 0x2e, + 0x71, 0x45, 0x71, 0x45, 0x71, 0x45, 0x71, 0x45, + 0x03, 0x71, 0x03, 0x71, 0x03, 0x71, 0x03, 0x71, + 0x03, 0x71, 0x03, 0x71, 0x03, 0x71, 0x03, 0x71, + 0x00, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x06, 0x80, 0x80, 0x00, + + /* 2557 MHz */ + 0x9e, 0x09, + 0x10, 0x01, 0xf3, 0x87, + 0x17, 0x8b, 0x17, 0x8b, 0x17, 0x8b, 0x17, 0x8b, + 0xa9, 0xb6, 0xa9, 0xb6, 0xa9, 0xb6, 0xa9, 0xb6, + 0xa9, 0xb6, 0xa9, 0xb6, 0xa9, 0xb6, 0xa9, 0xb6, + 0xf0, 0x00, 0x07, 0x6f, + 0xe9, 0x82, 0xe9, 0x82, 0xe9, 0x82, 0xe9, 0x82, + 0x7b, 0xae, 0x7b, 0xae, 0x7b, 0xae, 0x7b, 0xae, + 0x7b, 0xae, 0x7b, 0xae, 0x7b, 0xae, 0x7b, 0xae, + 0xd0, 0x00, 0x73, 0x5c, + 0xba, 0x7a, 0xba, 0x7a, 0xba, 0x7a, 0xba, 0x7a, + 0x4c, 0xa6, 0x4c, 0xa6, 0x4c, 0xa6, 0x4c, 0xa6, + 0x4c, 0xa6, 0x4c, 0xa6, 0x4c, 0xa6, 0x4c, 0xa6, + 0xa0, 0x00, 0x0d, 0x49, + 0x74, 0x6e, 0x74, 0x6e, 0x74, 0x6e, 0x74, 0x6e, + 0x06, 0x9a, 0x06, 0x9a, 0x06, 0x9a, 0x06, 0x9a, + 0x06, 0x9a, 0x06, 0x9a, 0x06, 0x9a, 0x06, 0x9a, + 0x50, 0x00, 0xd1, 0x36, + 0xff, 0x59, 0xff, 0x59, 0xff, 0x59, 0xff, 0x59, + 0x91, 0x85, 0x91, 0x85, 0x91, 0x85, 0x91, 0x85, + 0x91, 0x85, 0x91, 0x85, 0x91, 0x85, 0x91, 0x85, + 0x00, 0x00, 0x0e, 0x2e, + 0x8b, 0x45, 0x8b, 0x45, 0x8b, 0x45, 0x8b, 0x45, + 0x1d, 0x71, 0x1d, 0x71, 0x1d, 0x71, 0x1d, 0x71, + 0x1d, 0x71, 0x1d, 0x71, 0x1d, 0x71, 0x1d, 0x71, + 0x00, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x06, 0x80, 0x80, 0x00, + + /* 2562 MHz */ + 0xa3, 0x09, + 0x10, 0x01, 0x72, 0x89, + 0x54, 0x8b, 0x54, 0x8b, 0x54, 0x8b, 0x54, 0x8b, + 0xe6, 0xb6, 0xe6, 0xb6, 0xe6, 0xb6, 0xe6, 0xb6, + 0xe6, 0xb6, 0xe6, 0xb6, 0xe6, 0xb6, 0xe6, 0xb6, + 0xf0, 0x00, 0xf0, 0x6f, + 0x21, 0x83, 0x21, 0x83, 0x21, 0x83, 0x21, 0x83, + 0xb3, 0xae, 0xb3, 0xae, 0xb3, 0xae, 0xb3, 0xae, + 0xb3, 0xae, 0xb3, 0xae, 0xb3, 0xae, 0xb3, 0xae, + 0xd0, 0x00, 0xfb, 0x5c, + 0xee, 0x7a, 0xee, 0x7a, 0xee, 0x7a, 0xee, 0x7a, + 0x80, 0xa6, 0x80, 0xa6, 0x80, 0xa6, 0x80, 0xa6, + 0x80, 0xa6, 0x80, 0xa6, 0x80, 0xa6, 0x80, 0xa6, + 0xa0, 0x00, 0x43, 0x49, + 0xa3, 0x6e, 0xa3, 0x6e, 0xa3, 0x6e, 0xa3, 0x6e, + 0x35, 0x9a, 0x35, 0x9a, 0x35, 0x9a, 0x35, 0x9a, + 0x35, 0x9a, 0x35, 0x9a, 0x35, 0x9a, 0x35, 0x9a, + 0x50, 0x00, 0xd4, 0x36, + 0x24, 0x5a, 0x24, 0x5a, 0x24, 0x5a, 0x24, 0x5a, + 0xb6, 0x85, 0xb6, 0x85, 0xb6, 0x85, 0xb6, 0x85, + 0xb6, 0x85, 0xb6, 0x85, 0xb6, 0x85, 0xb6, 0x85, + 0x00, 0x00, 0x0b, 0x2e, + 0xa6, 0x45, 0xa6, 0x45, 0xa6, 0x45, 0xa6, 0x45, + 0x38, 0x71, 0x38, 0x71, 0x38, 0x71, 0x38, 0x71, + 0x38, 0x71, 0x38, 0x71, 0x38, 0x71, 0x38, 0x71, + 0x00, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x06, 0x80, 0x80, 0x00, + + /* 2572 MHz */ + 0xa8, 0x09, + 0x10, 0x01, 0xf1, 0x8a, + 0x91, 0x8b, 0x91, 0x8b, 0x91, 0x8b, 0x91, 0x8b, + 0x23, 0xb7, 0x23, 0xb7, 0x23, 0xb7, 0x23, 0xb7, + 0x23, 0xb7, 0x23, 0xb7, 0x23, 0xb7, 0x23, 0xb7, + 0xf0, 0x00, 0xd9, 0x70, + 0x5a, 0x83, 0x5a, 0x83, 0x5a, 0x83, 0x5a, 0x83, + 0xec, 0xae, 0xec, 0xae, 0xec, 0xae, 0xec, 0xae, + 0xec, 0xae, 0xec, 0xae, 0xec, 0xae, 0xec, 0xae, + 0xd0, 0x00, 0x83, 0x5d, + 0x23, 0x7b, 0x23, 0x7b, 0x23, 0x7b, 0x23, 0x7b, + 0xb5, 0xa6, 0xb5, 0xa6, 0xb5, 0xa6, 0xb5, 0xa6, + 0xb5, 0xa6, 0xb5, 0xa6, 0xb5, 0xa6, 0xb5, 0xa6, + 0xa0, 0x00, 0x78, 0x49, + 0xd1, 0x6e, 0xd1, 0x6e, 0xd1, 0x6e, 0xd1, 0x6e, + 0x63, 0x9a, 0x63, 0x9a, 0x63, 0x9a, 0x63, 0x9a, + 0x63, 0x9a, 0x63, 0x9a, 0x63, 0x9a, 0x63, 0x9a, + 0x50, 0x00, 0xd8, 0x36, + 0x48, 0x5a, 0x48, 0x5a, 0x48, 0x5a, 0x48, 0x5a, + 0xda, 0x85, 0xda, 0x85, 0xda, 0x85, 0xda, 0x85, + 0xda, 0x85, 0xda, 0x85, 0xda, 0x85, 0xda, 0x85, + 0x00, 0x00, 0x09, 0x2e, + 0xc0, 0x45, 0xc0, 0x45, 0xc0, 0x45, 0xc0, 0x45, + 0x52, 0x71, 0x52, 0x71, 0x52, 0x71, 0x52, 0x71, + 0x52, 0x71, 0x52, 0x71, 0x52, 0x71, 0x52, 0x71, + 0x00, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x06, 0x80, 0x80, 0x00, + +/* + * Not really sure if this is actually the power_limit database, + * it looks a bit "related" to PDR_PRISM_ZIF_TX_IQ_CALIBRATION + */ +/* struct pda_custom_wrapper */ +0xae, 0x00, 0xef, 0xbe, /* PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS_CUSTOM */ + 0x0d, 0x00, 0x1a, 0x00, /* 13 entries, 26 bytes per entry */ + 0x00, 0x00, 0x52, 0x01, /* no offset, 338 bytes total */ + + /* 2412 MHz */ + 0x6c, 0x09, + 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, + 0xe0, 0x00, 0xe0, 0x00, 0xe0, 0x00, 0xe0, 0x00, + 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, + + /* 2417 MHz */ + 0x71, 0x09, + 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, + 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, + 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, + + /* 2422 MHz */ + 0x76, 0x09, + 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, + 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, + 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, + + /* 2427 MHz */ + 0x7b, 0x09, + 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, + 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, + 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, + + /* 2432 MHz */ + 0x80, 0x09, + 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, + 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, + 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, + + /* 2437 MHz */ + 0x85, 0x09, + 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, + 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, + 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, + + /* 2442 MHz */ + 0x8a, 0x09, + 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, + 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, + 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, + + /* 2447 MHz */ + 0x8f, 0x09, + 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, + 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, + 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, + + /* 2452 MHz */ + 0x94, 0x09, + 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, + 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, + 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, + + /* 2457 MHz */ + 0x99, 0x09, + 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, + 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, + 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, + + /* 2462 MHz */ + 0x9e, 0x09, + 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, + 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, + 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, + + /* 2467 MHz */ + 0xa3, 0x09, + 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, + 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, + 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, + + /* 2472 MHz */ + 0xa8, 0x09, + 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, + 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, + 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, + +/* struct pda_iq_autocal_entry[13] */ +0x42, 0x00, 0x06, 0x19, /* PDR_PRISM_ZIF_TX_IQ_CALIBRATION */ + /* 2412 MHz */ + 0x6c, 0x09, 0x26, 0x00, 0xf8, 0xff, 0xf7, 0xff, 0xff, 0x00, + /* 2417 MHz */ + 0x71, 0x09, 0x26, 0x00, 0xf8, 0xff, 0xf7, 0xff, 0xff, 0x00, + /* 2422 MHz */ + 0x76, 0x09, 0x26, 0x00, 0xf8, 0xff, 0xf7, 0xff, 0xff, 0x00, + /* 2427 MHz */ + 0x7b, 0x09, 0x26, 0x00, 0xf8, 0xff, 0xf7, 0xff, 0xff, 0x00, + /* 2432 MHz */ + 0x80, 0x09, 0x25, 0x00, 0xf7, 0xff, 0xf7, 0xff, 0xff, 0x00, + /* 2437 MHz */ + 0x85, 0x09, 0x25, 0x00, 0xf7, 0xff, 0xf7, 0xff, 0xff, 0x00, + /* 2442 MHz */ + 0x8a, 0x09, 0x25, 0x00, 0xf7, 0xff, 0xf7, 0xff, 0xff, 0x00, + /* 2447 MHz */ + 0x8f, 0x09, 0x25, 0x00, 0xf7, 0xff, 0xf7, 0xff, 0xff, 0x00, + /* 2452 MHz */ + 0x94, 0x09, 0x25, 0x00, 0xf7, 0xff, 0xf7, 0xff, 0xff, 0x00, + /* 2457 MHz */ + 0x99, 0x09, 0x25, 0x00, 0xf5, 0xff, 0xf9, 0xff, 0x00, 0x01, + /* 2462 MHz */ + 0x9e, 0x09, 0x25, 0x00, 0xf5, 0xff, 0xf9, 0xff, 0x00, 0x01, + /* 2467 MHz */ + 0xa3, 0x09, 0x25, 0x00, 0xf5, 0xff, 0xf9, 0xff, 0x00, 0x01, + /* 2472 MHz */ + 0xa8, 0x09, 0x25, 0x00, 0xf5, 0xff, 0xf9, 0xff, 0x00, 0x01, + +0x02, 0x00, 0x00, 0x00, /* PDR_END */ + 0xa8, 0xf5 /* bogus data */ +}; + +#endif /* P54SPI_EEPROM_H */ + diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c new file mode 100644 index 0000000..336a15b --- /dev/null +++ b/drivers/net/wireless/p54/p54usb.c @@ -0,0 +1,1073 @@ + +/* + * Linux device driver for USB based Prism54 + * + * Copyright (c) 2006, Michael Wu + * + * Based on the islsm (softmac prism54) driver, which is: + * Copyright 2004-2006 Jean-Baptiste Note , et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "p54.h" +#include "lmac.h" +#include "p54usb.h" + +MODULE_AUTHOR("Michael Wu "); +MODULE_DESCRIPTION("Prism54 USB wireless driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("prism54usb"); +MODULE_FIRMWARE("isl3886usb"); +MODULE_FIRMWARE("isl3887usb"); + +static struct usb_device_id p54u_table[] __devinitdata = { + /* Version 1 devices (pci chip + net2280) */ + {USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */ + {USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */ + {USB_DEVICE(0x07aa, 0x001c)}, /* Corega CG-WLUSB2GT */ + {USB_DEVICE(0x083a, 0x4501)}, /* Accton 802.11g WN4501 USB */ + {USB_DEVICE(0x083a, 0x4502)}, /* Siemens Gigaset USB Adapter */ + {USB_DEVICE(0x083a, 0x5501)}, /* Phillips CPWUA054 */ + {USB_DEVICE(0x0846, 0x4200)}, /* Netgear WG121 */ + {USB_DEVICE(0x0846, 0x4210)}, /* Netgear WG121 the second ? */ + {USB_DEVICE(0x0846, 0x4220)}, /* Netgear WG111 */ + {USB_DEVICE(0x09aa, 0x1000)}, /* Spinnaker Proto board */ + {USB_DEVICE(0x0cde, 0x0006)}, /* Medion 40900, Roper Europe */ + {USB_DEVICE(0x124a, 0x4023)}, /* Shuttle PN15, Airvast WM168g, IOGear GWU513 */ + {USB_DEVICE(0x1915, 0x2234)}, /* Linksys WUSB54G OEM */ + {USB_DEVICE(0x1915, 0x2235)}, /* Linksys WUSB54G Portable OEM */ + {USB_DEVICE(0x2001, 0x3701)}, /* DLink DWL-G120 Spinnaker */ + {USB_DEVICE(0x2001, 0x3703)}, /* DLink DWL-G122 */ + {USB_DEVICE(0x5041, 0x2234)}, /* Linksys WUSB54G */ + {USB_DEVICE(0x5041, 0x2235)}, /* Linksys WUSB54G Portable */ + + /* Version 2 devices (3887) */ + {USB_DEVICE(0x0471, 0x1230)}, /* Philips CPWUA054/00 */ + {USB_DEVICE(0x050d, 0x7050)}, /* Belkin F5D7050 ver 1000 */ + {USB_DEVICE(0x0572, 0x2000)}, /* Cohiba Proto board */ + {USB_DEVICE(0x0572, 0x2002)}, /* Cohiba Proto board */ + {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */ + {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */ + {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */ + {USB_DEVICE(0x083a, 0xf503)}, /* Accton FD7050E ver 1010ec */ + {USB_DEVICE(0x0846, 0x4240)}, /* Netgear WG111 (v2) */ + {USB_DEVICE(0x0915, 0x2000)}, /* Cohiba Proto board */ + {USB_DEVICE(0x0915, 0x2002)}, /* Cohiba Proto board */ + {USB_DEVICE(0x0baf, 0x0118)}, /* U.S. Robotics U5 802.11g Adapter*/ + {USB_DEVICE(0x0bf8, 0x1009)}, /* FUJITSU E-5400 USB D1700*/ + {USB_DEVICE(0x0cde, 0x0006)}, /* Medion MD40900 */ + {USB_DEVICE(0x0cde, 0x0008)}, /* Sagem XG703A */ + {USB_DEVICE(0x0cde, 0x0015)}, /* Zcomax XG-705A */ + {USB_DEVICE(0x0d8e, 0x3762)}, /* DLink DWL-G120 Cohiba */ + {USB_DEVICE(0x124a, 0x4025)}, /* IOGear GWU513 (GW3887IK chip) */ + {USB_DEVICE(0x1260, 0xee22)}, /* SMC 2862W-G version 2 */ + {USB_DEVICE(0x13b1, 0x000a)}, /* Linksys WUSB54G ver 2 */ + {USB_DEVICE(0x13B1, 0x000C)}, /* Linksys WUSB54AG */ + {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */ + {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */ + {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */ + {USB_DEVICE(0x413c, 0x8102)}, /* Spinnaker DUT */ + {USB_DEVICE(0x413c, 0x8104)}, /* Cohiba Proto board */ + {} +}; + +MODULE_DEVICE_TABLE(usb, p54u_table); + +static const struct { + u32 intf; + enum p54u_hw_type type; + const char *fw; + const char *fw_legacy; + char hw[20]; +} p54u_fwlist[__NUM_P54U_HWTYPES] = { + { + .type = P54U_NET2280, + .intf = FW_LM86, + .fw = "isl3886usb", + .fw_legacy = "isl3890usb", + .hw = "ISL3886 + net2280", + }, + { + .type = P54U_3887, + .intf = FW_LM87, + .fw = "isl3887usb", + .fw_legacy = "isl3887usb_bare", + .hw = "ISL3887", + }, +}; + +static void p54u_rx_cb(struct urb *urb) +{ + struct sk_buff *skb = (struct sk_buff *) urb->context; + struct p54u_rx_info *info = (struct p54u_rx_info *)skb->cb; + struct ieee80211_hw *dev = info->dev; + struct p54u_priv *priv = dev->priv; + + skb_unlink(skb, &priv->rx_queue); + + if (unlikely(urb->status)) { + dev_kfree_skb_irq(skb); + return; + } + + skb_put(skb, urb->actual_length); + + if (priv->hw_type == P54U_NET2280) + skb_pull(skb, priv->common.tx_hdr_len); + if (priv->common.fw_interface == FW_LM87) { + skb_pull(skb, 4); + skb_put(skb, 4); + } + + if (p54_rx(dev, skb)) { + skb = dev_alloc_skb(priv->common.rx_mtu + 32); + if (unlikely(!skb)) { + /* TODO check rx queue length and refill *somewhere* */ + return; + } + + info = (struct p54u_rx_info *) skb->cb; + info->urb = urb; + info->dev = dev; + urb->transfer_buffer = skb_tail_pointer(skb); + urb->context = skb; + } else { + if (priv->hw_type == P54U_NET2280) + skb_push(skb, priv->common.tx_hdr_len); + if (priv->common.fw_interface == FW_LM87) { + skb_push(skb, 4); + skb_put(skb, 4); + } + skb_reset_tail_pointer(skb); + skb_trim(skb, 0); + urb->transfer_buffer = skb_tail_pointer(skb); + } + skb_queue_tail(&priv->rx_queue, skb); + usb_anchor_urb(urb, &priv->submitted); + if (usb_submit_urb(urb, GFP_ATOMIC)) { + skb_unlink(skb, &priv->rx_queue); + usb_unanchor_urb(urb); + dev_kfree_skb_irq(skb); + } +} + +static void p54u_tx_cb(struct urb *urb) +{ + struct sk_buff *skb = urb->context; + struct ieee80211_hw *dev = (struct ieee80211_hw *) + usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0)); + + p54_free_skb(dev, skb); +} + +static void p54u_tx_dummy_cb(struct urb *urb) { } + +static void p54u_free_urbs(struct ieee80211_hw *dev) +{ + struct p54u_priv *priv = dev->priv; + usb_kill_anchored_urbs(&priv->submitted); +} + +static int p54u_init_urbs(struct ieee80211_hw *dev) +{ + struct p54u_priv *priv = dev->priv; + struct urb *entry = NULL; + struct sk_buff *skb; + struct p54u_rx_info *info; + int ret = 0; + + while (skb_queue_len(&priv->rx_queue) < 32) { + skb = __dev_alloc_skb(priv->common.rx_mtu + 32, GFP_KERNEL); + if (!skb) { + ret = -ENOMEM; + goto err; + } + entry = usb_alloc_urb(0, GFP_KERNEL); + if (!entry) { + ret = -ENOMEM; + goto err; + } + + usb_fill_bulk_urb(entry, priv->udev, + usb_rcvbulkpipe(priv->udev, P54U_PIPE_DATA), + skb_tail_pointer(skb), + priv->common.rx_mtu + 32, p54u_rx_cb, skb); + info = (struct p54u_rx_info *) skb->cb; + info->urb = entry; + info->dev = dev; + skb_queue_tail(&priv->rx_queue, skb); + + usb_anchor_urb(entry, &priv->submitted); + ret = usb_submit_urb(entry, GFP_KERNEL); + if (ret) { + skb_unlink(skb, &priv->rx_queue); + usb_unanchor_urb(entry); + goto err; + } + usb_free_urb(entry); + entry = NULL; + } + + return 0; + + err: + usb_free_urb(entry); + kfree_skb(skb); + p54u_free_urbs(dev); + return ret; +} + +static __le32 p54u_lm87_chksum(const __le32 *data, size_t length) +{ + u32 chk = 0; + + length >>= 2; + while (length--) { + chk ^= le32_to_cpu(*data++); + chk = (chk >> 5) ^ (chk << 3); + } + + return cpu_to_le32(chk); +} + +static void p54u_tx_lm87(struct ieee80211_hw *dev, struct sk_buff *skb) +{ + struct p54u_priv *priv = dev->priv; + struct urb *data_urb; + struct lm87_tx_hdr *hdr = (void *)skb->data - sizeof(*hdr); + + data_urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!data_urb) { + p54_free_skb(dev, skb); + return; + } + + hdr->chksum = p54u_lm87_chksum((__le32 *)skb->data, skb->len); + hdr->device_addr = ((struct p54_hdr *)skb->data)->req_id; + + usb_fill_bulk_urb(data_urb, priv->udev, + usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), + hdr, skb->len + sizeof(*hdr), FREE_AFTER_TX(skb) ? + p54u_tx_cb : p54u_tx_dummy_cb, skb); + data_urb->transfer_flags |= URB_ZERO_PACKET; + + usb_anchor_urb(data_urb, &priv->submitted); + if (usb_submit_urb(data_urb, GFP_ATOMIC)) { + usb_unanchor_urb(data_urb); + p54_free_skb(dev, skb); + } + usb_free_urb(data_urb); +} + +static void p54u_tx_net2280(struct ieee80211_hw *dev, struct sk_buff *skb) +{ + struct p54u_priv *priv = dev->priv; + struct urb *int_urb = NULL, *data_urb = NULL; + struct net2280_tx_hdr *hdr = (void *)skb->data - sizeof(*hdr); + struct net2280_reg_write *reg = NULL; + int err = -ENOMEM; + + reg = kmalloc(sizeof(*reg), GFP_ATOMIC); + if (!reg) + goto out; + + int_urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!int_urb) + goto out; + + data_urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!data_urb) + goto out; + + reg->port = cpu_to_le16(NET2280_DEV_U32); + reg->addr = cpu_to_le32(P54U_DEV_BASE); + reg->val = cpu_to_le32(ISL38XX_DEV_INT_DATA); + + memset(hdr, 0, sizeof(*hdr)); + hdr->len = cpu_to_le16(skb->len); + hdr->device_addr = ((struct p54_hdr *) skb->data)->req_id; + + usb_fill_bulk_urb(int_urb, priv->udev, + usb_sndbulkpipe(priv->udev, P54U_PIPE_DEV), reg, sizeof(*reg), + p54u_tx_dummy_cb, dev); + + /* + * URB_FREE_BUFFER triggers a code path in the USB subsystem that will + * free what is inside the transfer_buffer after the last reference to + * the int_urb is dropped. + */ + int_urb->transfer_flags |= URB_FREE_BUFFER | URB_ZERO_PACKET; + reg = NULL; + + usb_fill_bulk_urb(data_urb, priv->udev, + usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), + hdr, skb->len + sizeof(*hdr), FREE_AFTER_TX(skb) ? + p54u_tx_cb : p54u_tx_dummy_cb, skb); + data_urb->transfer_flags |= URB_ZERO_PACKET; + + usb_anchor_urb(int_urb, &priv->submitted); + err = usb_submit_urb(int_urb, GFP_ATOMIC); + if (err) { + usb_unanchor_urb(int_urb); + goto out; + } + + usb_anchor_urb(data_urb, &priv->submitted); + err = usb_submit_urb(data_urb, GFP_ATOMIC); + if (err) { + usb_unanchor_urb(data_urb); + goto out; + } +out: + usb_free_urb(int_urb); + usb_free_urb(data_urb); + + if (err) { + kfree(reg); + p54_free_skb(dev, skb); + } +} + +static int p54u_write(struct p54u_priv *priv, + struct net2280_reg_write *buf, + enum net2280_op_type type, + __le32 addr, __le32 val) +{ + unsigned int ep; + int alen; + + if (type & 0x0800) + ep = usb_sndbulkpipe(priv->udev, P54U_PIPE_DEV); + else + ep = usb_sndbulkpipe(priv->udev, P54U_PIPE_BRG); + + buf->port = cpu_to_le16(type); + buf->addr = addr; + buf->val = val; + + return usb_bulk_msg(priv->udev, ep, buf, sizeof(*buf), &alen, 1000); +} + +static int p54u_read(struct p54u_priv *priv, void *buf, + enum net2280_op_type type, + __le32 addr, __le32 *val) +{ + struct net2280_reg_read *read = buf; + __le32 *reg = buf; + unsigned int ep; + int alen, err; + + if (type & 0x0800) + ep = P54U_PIPE_DEV; + else + ep = P54U_PIPE_BRG; + + read->port = cpu_to_le16(type); + read->addr = addr; + + err = usb_bulk_msg(priv->udev, usb_sndbulkpipe(priv->udev, ep), + read, sizeof(*read), &alen, 1000); + if (err) + return err; + + err = usb_bulk_msg(priv->udev, usb_rcvbulkpipe(priv->udev, ep), + reg, sizeof(*reg), &alen, 1000); + if (err) + return err; + + *val = *reg; + return 0; +} + +static int p54u_bulk_msg(struct p54u_priv *priv, unsigned int ep, + void *data, size_t len) +{ + int alen; + return usb_bulk_msg(priv->udev, usb_sndbulkpipe(priv->udev, ep), + data, len, &alen, 2000); +} + +static int p54u_device_reset(struct ieee80211_hw *dev) +{ + struct p54u_priv *priv = dev->priv; + int ret, lock = (priv->intf->condition != USB_INTERFACE_BINDING); + + if (lock) { + ret = usb_lock_device_for_reset(priv->udev, priv->intf); + if (ret < 0) { + dev_err(&priv->udev->dev, "(p54usb) unable to lock " + "device for reset (%d)!\n", ret); + return ret; + } + } + + ret = usb_reset_device(priv->udev); + if (lock) + usb_unlock_device(priv->udev); + + if (ret) + dev_err(&priv->udev->dev, "(p54usb) unable to reset " + "device (%d)!\n", ret); + + return ret; +} + +static const char p54u_romboot_3887[] = "~~~~"; +static int p54u_firmware_reset_3887(struct ieee80211_hw *dev) +{ + struct p54u_priv *priv = dev->priv; + u8 *buf; + int ret; + + buf = kmalloc(4, GFP_KERNEL); + if (!buf) + return -ENOMEM; + memcpy(buf, p54u_romboot_3887, 4); + ret = p54u_bulk_msg(priv, P54U_PIPE_DATA, + buf, 4); + kfree(buf); + if (ret) + dev_err(&priv->udev->dev, "(p54usb) unable to jump to " + "boot ROM (%d)!\n", ret); + + return ret; +} + +static const char p54u_firmware_upload_3887[] = "<\r"; +static int p54u_upload_firmware_3887(struct ieee80211_hw *dev) +{ + struct p54u_priv *priv = dev->priv; + int err, alen; + u8 carry = 0; + u8 *buf, *tmp; + const u8 *data; + unsigned int left, remains, block_size; + struct x2_header *hdr; + unsigned long timeout; + + err = p54u_firmware_reset_3887(dev); + if (err) + return err; + + tmp = buf = kmalloc(P54U_FW_BLOCK, GFP_KERNEL); + if (!buf) { + dev_err(&priv->udev->dev, "(p54usb) cannot allocate firmware" + "upload buffer!\n"); + return -ENOMEM; + } + + left = block_size = min((size_t)P54U_FW_BLOCK, priv->fw->size); + strcpy(buf, p54u_firmware_upload_3887); + left -= strlen(p54u_firmware_upload_3887); + tmp += strlen(p54u_firmware_upload_3887); + + data = priv->fw->data; + remains = priv->fw->size; + + hdr = (struct x2_header *)(buf + strlen(p54u_firmware_upload_3887)); + memcpy(hdr->signature, X2_SIGNATURE, X2_SIGNATURE_SIZE); + hdr->fw_load_addr = cpu_to_le32(ISL38XX_DEV_FIRMWARE_ADDR); + hdr->fw_length = cpu_to_le32(priv->fw->size); + hdr->crc = cpu_to_le32(~crc32_le(~0, (void *)&hdr->fw_load_addr, + sizeof(u32)*2)); + left -= sizeof(*hdr); + tmp += sizeof(*hdr); + + while (remains) { + while (left--) { + if (carry) { + *tmp++ = carry; + carry = 0; + remains--; + continue; + } + switch (*data) { + case '~': + *tmp++ = '}'; + carry = '^'; + break; + case '}': + *tmp++ = '}'; + carry = ']'; + break; + default: + *tmp++ = *data; + remains--; + break; + } + data++; + } + + err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, block_size); + if (err) { + dev_err(&priv->udev->dev, "(p54usb) firmware " + "upload failed!\n"); + goto err_upload_failed; + } + + tmp = buf; + left = block_size = min((unsigned int)P54U_FW_BLOCK, remains); + } + + *((__le32 *)buf) = cpu_to_le32(~crc32_le(~0, priv->fw->data, + priv->fw->size)); + err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, sizeof(u32)); + if (err) { + dev_err(&priv->udev->dev, "(p54usb) firmware upload failed!\n"); + goto err_upload_failed; + } + timeout = jiffies + msecs_to_jiffies(1000); + while (!(err = usb_bulk_msg(priv->udev, + usb_rcvbulkpipe(priv->udev, P54U_PIPE_DATA), buf, 128, &alen, 1000))) { + if (alen > 2 && !memcmp(buf, "OK", 2)) + break; + + if (alen > 5 && !memcmp(buf, "ERROR", 5)) { + err = -EINVAL; + break; + } + + if (time_after(jiffies, timeout)) { + dev_err(&priv->udev->dev, "(p54usb) firmware boot " + "timed out!\n"); + err = -ETIMEDOUT; + break; + } + } + if (err) { + dev_err(&priv->udev->dev, "(p54usb) firmware upload failed!\n"); + goto err_upload_failed; + } + + buf[0] = 'g'; + buf[1] = '\r'; + err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, 2); + if (err) { + dev_err(&priv->udev->dev, "(p54usb) firmware boot failed!\n"); + goto err_upload_failed; + } + + timeout = jiffies + msecs_to_jiffies(1000); + while (!(err = usb_bulk_msg(priv->udev, + usb_rcvbulkpipe(priv->udev, P54U_PIPE_DATA), buf, 128, &alen, 1000))) { + if (alen > 0 && buf[0] == 'g') + break; + + if (time_after(jiffies, timeout)) { + err = -ETIMEDOUT; + break; + } + } + if (err) + goto err_upload_failed; + +err_upload_failed: + kfree(buf); + return err; +} + +static int p54u_upload_firmware_net2280(struct ieee80211_hw *dev) +{ + struct p54u_priv *priv = dev->priv; + const struct p54p_csr *devreg = (const struct p54p_csr *) P54U_DEV_BASE; + int err, alen; + void *buf; + __le32 reg; + unsigned int remains, offset; + const u8 *data; + + buf = kmalloc(512, GFP_KERNEL); + if (!buf) { + dev_err(&priv->udev->dev, "(p54usb) firmware buffer " + "alloc failed!\n"); + return -ENOMEM; + } + +#define P54U_WRITE(type, addr, data) \ + do {\ + err = p54u_write(priv, buf, type,\ + cpu_to_le32((u32)(unsigned long)addr), data);\ + if (err) \ + goto fail;\ + } while (0) + +#define P54U_READ(type, addr) \ + do {\ + err = p54u_read(priv, buf, type,\ + cpu_to_le32((u32)(unsigned long)addr), ®);\ + if (err)\ + goto fail;\ + } while (0) + + /* power down net2280 bridge */ + P54U_READ(NET2280_BRG_U32, NET2280_GPIOCTL); + reg |= cpu_to_le32(P54U_BRG_POWER_DOWN); + reg &= cpu_to_le32(~P54U_BRG_POWER_UP); + P54U_WRITE(NET2280_BRG_U32, NET2280_GPIOCTL, reg); + + mdelay(100); + + /* power up bridge */ + reg |= cpu_to_le32(P54U_BRG_POWER_UP); + reg &= cpu_to_le32(~P54U_BRG_POWER_DOWN); + P54U_WRITE(NET2280_BRG_U32, NET2280_GPIOCTL, reg); + + mdelay(100); + + P54U_WRITE(NET2280_BRG_U32, NET2280_DEVINIT, + cpu_to_le32(NET2280_CLK_30Mhz | + NET2280_PCI_ENABLE | + NET2280_PCI_SOFT_RESET)); + + mdelay(20); + + P54U_WRITE(NET2280_BRG_CFG_U16, PCI_COMMAND, + cpu_to_le32(PCI_COMMAND_MEMORY | + PCI_COMMAND_MASTER)); + + P54U_WRITE(NET2280_BRG_CFG_U32, PCI_BASE_ADDRESS_0, + cpu_to_le32(NET2280_BASE)); + + P54U_READ(NET2280_BRG_CFG_U16, PCI_STATUS); + reg |= cpu_to_le32(PCI_STATUS_REC_MASTER_ABORT); + P54U_WRITE(NET2280_BRG_CFG_U16, PCI_STATUS, reg); + + // TODO: we really need this? + P54U_READ(NET2280_BRG_U32, NET2280_RELNUM); + + P54U_WRITE(NET2280_BRG_U32, NET2280_EPA_RSP, + cpu_to_le32(NET2280_CLEAR_NAK_OUT_PACKETS_MODE)); + P54U_WRITE(NET2280_BRG_U32, NET2280_EPC_RSP, + cpu_to_le32(NET2280_CLEAR_NAK_OUT_PACKETS_MODE)); + + P54U_WRITE(NET2280_BRG_CFG_U32, PCI_BASE_ADDRESS_2, + cpu_to_le32(NET2280_BASE2)); + + /* finally done setting up the bridge */ + + P54U_WRITE(NET2280_DEV_CFG_U16, 0x10000 | PCI_COMMAND, + cpu_to_le32(PCI_COMMAND_MEMORY | + PCI_COMMAND_MASTER)); + + P54U_WRITE(NET2280_DEV_CFG_U16, 0x10000 | 0x40 /* TRDY timeout */, 0); + P54U_WRITE(NET2280_DEV_CFG_U32, 0x10000 | PCI_BASE_ADDRESS_0, + cpu_to_le32(P54U_DEV_BASE)); + + P54U_WRITE(NET2280_BRG_U32, NET2280_USBIRQENB1, 0); + P54U_WRITE(NET2280_BRG_U32, NET2280_IRQSTAT1, + cpu_to_le32(NET2280_PCI_INTA_INTERRUPT)); + + /* do romboot */ + P54U_WRITE(NET2280_DEV_U32, &devreg->int_enable, 0); + + P54U_READ(NET2280_DEV_U32, &devreg->ctrl_stat); + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET); + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RAMBOOT); + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_CLKRUN); + P54U_WRITE(NET2280_DEV_U32, &devreg->ctrl_stat, reg); + + mdelay(20); + + reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RESET); + P54U_WRITE(NET2280_DEV_U32, &devreg->ctrl_stat, reg); + + mdelay(20); + + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET); + P54U_WRITE(NET2280_DEV_U32, &devreg->ctrl_stat, reg); + + mdelay(100); + + P54U_READ(NET2280_DEV_U32, &devreg->int_ident); + P54U_WRITE(NET2280_DEV_U32, &devreg->int_ack, reg); + + /* finally, we can upload firmware now! */ + remains = priv->fw->size; + data = priv->fw->data; + offset = ISL38XX_DEV_FIRMWARE_ADDR; + + while (remains) { + unsigned int block_len = min(remains, (unsigned int)512); + memcpy(buf, data, block_len); + + err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, block_len); + if (err) { + dev_err(&priv->udev->dev, "(p54usb) firmware block " + "upload failed\n"); + goto fail; + } + + P54U_WRITE(NET2280_DEV_U32, &devreg->direct_mem_base, + cpu_to_le32(0xc0000f00)); + + P54U_WRITE(NET2280_DEV_U32, + 0x0020 | (unsigned long)&devreg->direct_mem_win, 0); + P54U_WRITE(NET2280_DEV_U32, + 0x0020 | (unsigned long)&devreg->direct_mem_win, + cpu_to_le32(1)); + + P54U_WRITE(NET2280_DEV_U32, + 0x0024 | (unsigned long)&devreg->direct_mem_win, + cpu_to_le32(block_len)); + P54U_WRITE(NET2280_DEV_U32, + 0x0028 | (unsigned long)&devreg->direct_mem_win, + cpu_to_le32(offset)); + + P54U_WRITE(NET2280_DEV_U32, &devreg->dma_addr, + cpu_to_le32(NET2280_EPA_FIFO_PCI_ADDR)); + P54U_WRITE(NET2280_DEV_U32, &devreg->dma_len, + cpu_to_le32(block_len >> 2)); + P54U_WRITE(NET2280_DEV_U32, &devreg->dma_ctrl, + cpu_to_le32(ISL38XX_DMA_MASTER_CONTROL_TRIGGER)); + + mdelay(10); + + P54U_READ(NET2280_DEV_U32, + 0x002C | (unsigned long)&devreg->direct_mem_win); + if (!(reg & cpu_to_le32(ISL38XX_DMA_STATUS_DONE)) || + !(reg & cpu_to_le32(ISL38XX_DMA_STATUS_READY))) { + dev_err(&priv->udev->dev, "(p54usb) firmware DMA " + "transfer failed\n"); + goto fail; + } + + P54U_WRITE(NET2280_BRG_U32, NET2280_EPA_STAT, + cpu_to_le32(NET2280_FIFO_FLUSH)); + + remains -= block_len; + data += block_len; + offset += block_len; + } + + /* do ramboot */ + P54U_READ(NET2280_DEV_U32, &devreg->ctrl_stat); + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET); + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_CLKRUN); + reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RAMBOOT); + P54U_WRITE(NET2280_DEV_U32, &devreg->ctrl_stat, reg); + + mdelay(20); + + reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RESET); + P54U_WRITE(NET2280_DEV_U32, &devreg->ctrl_stat, reg); + + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET); + P54U_WRITE(NET2280_DEV_U32, &devreg->ctrl_stat, reg); + + mdelay(100); + + P54U_READ(NET2280_DEV_U32, &devreg->int_ident); + P54U_WRITE(NET2280_DEV_U32, &devreg->int_ack, reg); + + /* start up the firmware */ + P54U_WRITE(NET2280_DEV_U32, &devreg->int_enable, + cpu_to_le32(ISL38XX_INT_IDENT_INIT)); + + P54U_WRITE(NET2280_BRG_U32, NET2280_IRQSTAT1, + cpu_to_le32(NET2280_PCI_INTA_INTERRUPT)); + + P54U_WRITE(NET2280_BRG_U32, NET2280_USBIRQENB1, + cpu_to_le32(NET2280_PCI_INTA_INTERRUPT_ENABLE | + NET2280_USB_INTERRUPT_ENABLE)); + + P54U_WRITE(NET2280_DEV_U32, &devreg->dev_int, + cpu_to_le32(ISL38XX_DEV_INT_RESET)); + + err = usb_interrupt_msg(priv->udev, + usb_rcvbulkpipe(priv->udev, P54U_PIPE_INT), + buf, sizeof(__le32), &alen, 1000); + if (err || alen != sizeof(__le32)) + goto fail; + + P54U_READ(NET2280_DEV_U32, &devreg->int_ident); + P54U_WRITE(NET2280_DEV_U32, &devreg->int_ack, reg); + + if (!(reg & cpu_to_le32(ISL38XX_INT_IDENT_INIT))) + err = -EINVAL; + + P54U_WRITE(NET2280_BRG_U32, NET2280_USBIRQENB1, 0); + P54U_WRITE(NET2280_BRG_U32, NET2280_IRQSTAT1, + cpu_to_le32(NET2280_PCI_INTA_INTERRUPT)); + +#undef P54U_WRITE +#undef P54U_READ + +fail: + kfree(buf); + return err; +} + +static int p54u_load_firmware(struct ieee80211_hw *dev) +{ + struct p54u_priv *priv = dev->priv; + int err, i; + + BUILD_BUG_ON(ARRAY_SIZE(p54u_fwlist) != __NUM_P54U_HWTYPES); + + for (i = 0; i < __NUM_P54U_HWTYPES; i++) + if (p54u_fwlist[i].type == priv->hw_type) + break; + + if (i == __NUM_P54U_HWTYPES) + return -EOPNOTSUPP; + + err = request_firmware(&priv->fw, p54u_fwlist[i].fw, &priv->udev->dev); + if (err) { + dev_err(&priv->udev->dev, "(p54usb) cannot load firmware %s " + "(%d)!\n", p54u_fwlist[i].fw, err); + + err = request_firmware(&priv->fw, p54u_fwlist[i].fw_legacy, + &priv->udev->dev); + if (err) + return err; + } + + err = p54_parse_firmware(dev, priv->fw); + if (err) + goto out; + + if (priv->common.fw_interface != p54u_fwlist[i].intf) { + dev_err(&priv->udev->dev, "wrong firmware, please get " + "a firmware for \"%s\" and try again.\n", + p54u_fwlist[i].hw); + err = -EINVAL; + } + +out: + if (err) + release_firmware(priv->fw); + + return err; +} + +static int p54u_open(struct ieee80211_hw *dev) +{ + struct p54u_priv *priv = dev->priv; + int err; + + err = p54u_init_urbs(dev); + if (err) { + return err; + } + + priv->common.open = p54u_init_urbs; + + return 0; +} + +static void p54u_stop(struct ieee80211_hw *dev) +{ + /* TODO: figure out how to reliably stop the 3887 and net2280 so + the hardware is still usable next time we want to start it. + until then, we just stop listening to the hardware.. */ + p54u_free_urbs(dev); + return; +} + +static int __devinit p54u_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct usb_device *udev = interface_to_usbdev(intf); + struct ieee80211_hw *dev; + struct p54u_priv *priv; + int err; + unsigned int i, recognized_pipes; + + dev = p54_init_common(sizeof(*priv)); + + if (!dev) { + dev_err(&udev->dev, "(p54usb) ieee80211 alloc failed\n"); + return -ENOMEM; + } + + priv = dev->priv; + priv->hw_type = P54U_INVALID_HW; + + SET_IEEE80211_DEV(dev, &intf->dev); + usb_set_intfdata(intf, dev); + priv->udev = udev; + priv->intf = intf; + skb_queue_head_init(&priv->rx_queue); + init_usb_anchor(&priv->submitted); + + usb_get_dev(udev); + + /* really lazy and simple way of figuring out if we're a 3887 */ + /* TODO: should just stick the identification in the device table */ + i = intf->altsetting->desc.bNumEndpoints; + recognized_pipes = 0; + while (i--) { + switch (intf->altsetting->endpoint[i].desc.bEndpointAddress) { + case P54U_PIPE_DATA: + case P54U_PIPE_MGMT: + case P54U_PIPE_BRG: + case P54U_PIPE_DEV: + case P54U_PIPE_DATA | USB_DIR_IN: + case P54U_PIPE_MGMT | USB_DIR_IN: + case P54U_PIPE_BRG | USB_DIR_IN: + case P54U_PIPE_DEV | USB_DIR_IN: + case P54U_PIPE_INT | USB_DIR_IN: + recognized_pipes++; + } + } + priv->common.open = p54u_open; + priv->common.stop = p54u_stop; + if (recognized_pipes < P54U_PIPE_NUMBER) { +#ifdef CONFIG_PM + /* ISL3887 needs a full reset on resume */ + udev->reset_resume = 1; + err = p54u_device_reset(dev); +#endif + + priv->hw_type = P54U_3887; + dev->extra_tx_headroom += sizeof(struct lm87_tx_hdr); + priv->common.tx_hdr_len = sizeof(struct lm87_tx_hdr); + priv->common.tx = p54u_tx_lm87; + priv->upload_fw = p54u_upload_firmware_3887; + } else { + priv->hw_type = P54U_NET2280; + dev->extra_tx_headroom += sizeof(struct net2280_tx_hdr); + priv->common.tx_hdr_len = sizeof(struct net2280_tx_hdr); + priv->common.tx = p54u_tx_net2280; + priv->upload_fw = p54u_upload_firmware_net2280; + } + err = p54u_load_firmware(dev); + if (err) + goto err_free_dev; + + err = priv->upload_fw(dev); + if (err) + goto err_free_fw; + + p54u_open(dev); + err = p54_read_eeprom(dev); + p54u_stop(dev); + if (err) + goto err_free_fw; + + err = p54_register_common(dev, &udev->dev); + if (err) + goto err_free_fw; + + return 0; + +err_free_fw: + release_firmware(priv->fw); + +err_free_dev: + p54_free_common(dev); + usb_set_intfdata(intf, NULL); + usb_put_dev(udev); + return err; +} + +static void __devexit p54u_disconnect(struct usb_interface *intf) +{ + struct ieee80211_hw *dev = usb_get_intfdata(intf); + struct p54u_priv *priv; + + if (!dev) + return; + + p54_unregister_common(dev); + + priv = dev->priv; + usb_put_dev(interface_to_usbdev(intf)); + release_firmware(priv->fw); + p54_free_common(dev); +} + +static int p54u_pre_reset(struct usb_interface *intf) +{ + struct ieee80211_hw *dev = usb_get_intfdata(intf); + + if (!dev) + return -ENODEV; + + p54u_stop(dev); + return 0; +} + +static int p54u_resume(struct usb_interface *intf) +{ + struct ieee80211_hw *dev = usb_get_intfdata(intf); + struct p54u_priv *priv; + + if (!dev) + return -ENODEV; + + priv = dev->priv; + if (unlikely(!(priv->upload_fw && priv->fw))) + return 0; + + return priv->upload_fw(dev); +} + +static int p54u_post_reset(struct usb_interface *intf) +{ + struct ieee80211_hw *dev = usb_get_intfdata(intf); + struct p54u_priv *priv; + int err; + + err = p54u_resume(intf); + if (err) + return err; + + /* reinitialize old device state */ + priv = dev->priv; + if (priv->common.mode != NL80211_IFTYPE_UNSPECIFIED) + ieee80211_restart_hw(dev); + + return 0; +} + +#ifdef CONFIG_PM + +static int p54u_suspend(struct usb_interface *intf, pm_message_t message) +{ + return p54u_pre_reset(intf); +} + +#endif /* CONFIG_PM */ + +static struct usb_driver p54u_driver = { + .name = "p54usb", + .id_table = p54u_table, + .probe = p54u_probe, + .disconnect = p54u_disconnect, + .pre_reset = p54u_pre_reset, + .post_reset = p54u_post_reset, +#ifdef CONFIG_PM + .suspend = p54u_suspend, + .resume = p54u_resume, + .reset_resume = p54u_resume, +#endif /* CONFIG_PM */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) + .soft_unbind = 1, +#endif +}; + +static int __init p54u_init(void) +{ + return usb_register(&p54u_driver); +} + +static void __exit p54u_exit(void) +{ + usb_deregister(&p54u_driver); +} + +module_init(p54u_init); +module_exit(p54u_exit); diff --git a/drivers/net/wireless/p54/p54usb.h b/drivers/net/wireless/p54/p54usb.h new file mode 100644 index 0000000..e935b79 --- /dev/null +++ b/drivers/net/wireless/p54/p54usb.h @@ -0,0 +1,148 @@ +#ifndef P54USB_H +#define P54USB_H + +/* + * Defines for USB based mac80211 Prism54 driver + * + * Copyright (c) 2006, Michael Wu + * + * Based on the islsm (softmac prism54) driver, which is: + * Copyright 2004-2006 Jean-Baptiste Note , et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* for isl3886 register definitions used on ver 1 devices */ +#include "p54pci.h" +#include "net2280.h" + +/* pci */ +#define NET2280_BASE 0x10000000 +#define NET2280_BASE2 0x20000000 + +/* gpio */ +#define P54U_BRG_POWER_UP (1 << GPIO0_DATA) +#define P54U_BRG_POWER_DOWN (1 << GPIO1_DATA) + +/* devinit */ +#define NET2280_CLK_4Mhz (15 << LOCAL_CLOCK_FREQUENCY) +#define NET2280_CLK_30Mhz (2 << LOCAL_CLOCK_FREQUENCY) +#define NET2280_CLK_60Mhz (1 << LOCAL_CLOCK_FREQUENCY) +#define NET2280_CLK_STOP (0 << LOCAL_CLOCK_FREQUENCY) +#define NET2280_PCI_ENABLE (1 << PCI_ENABLE) +#define NET2280_PCI_SOFT_RESET (1 << PCI_SOFT_RESET) + +/* endpoints */ +#define NET2280_CLEAR_NAK_OUT_PACKETS_MODE (1 << CLEAR_NAK_OUT_PACKETS_MODE) +#define NET2280_FIFO_FLUSH (1 << FIFO_FLUSH) + +/* irq */ +#define NET2280_USB_INTERRUPT_ENABLE (1 << USB_INTERRUPT_ENABLE) +#define NET2280_PCI_INTA_INTERRUPT (1 << PCI_INTA_INTERRUPT) +#define NET2280_PCI_INTA_INTERRUPT_ENABLE (1 << PCI_INTA_INTERRUPT_ENABLE) + +/* registers */ +#define NET2280_DEVINIT 0x00 +#define NET2280_USBIRQENB1 0x24 +#define NET2280_IRQSTAT1 0x2c +#define NET2280_FIFOCTL 0x38 +#define NET2280_GPIOCTL 0x50 +#define NET2280_RELNUM 0x88 +#define NET2280_EPA_RSP 0x324 +#define NET2280_EPA_STAT 0x32c +#define NET2280_EPB_STAT 0x34c +#define NET2280_EPC_RSP 0x364 +#define NET2280_EPC_STAT 0x36c +#define NET2280_EPD_STAT 0x38c + +#define NET2280_EPA_CFG 0x320 +#define NET2280_EPB_CFG 0x340 +#define NET2280_EPC_CFG 0x360 +#define NET2280_EPD_CFG 0x380 +#define NET2280_EPE_CFG 0x3A0 +#define NET2280_EPF_CFG 0x3C0 +#define P54U_DEV_BASE 0x40000000 + +struct net2280_tx_hdr { + __le32 device_addr; + __le16 len; + __le16 follower; /* ? */ + u8 padding[8]; +} __attribute__((packed)); + +struct lm87_tx_hdr { + __le32 device_addr; + __le32 chksum; +} __attribute__((packed)); + +/* Some flags for the isl hardware registers controlling DMA inside the + * chip */ +#define ISL38XX_DMA_STATUS_DONE 0x00000001 +#define ISL38XX_DMA_STATUS_READY 0x00000002 +#define NET2280_EPA_FIFO_PCI_ADDR 0x20000000 +#define ISL38XX_DMA_MASTER_CONTROL_TRIGGER 0x00000004 + +enum net2280_op_type { + NET2280_BRG_U32 = 0x001F, + NET2280_BRG_CFG_U32 = 0x000F, + NET2280_BRG_CFG_U16 = 0x0003, + NET2280_DEV_U32 = 0x080F, + NET2280_DEV_CFG_U32 = 0x088F, + NET2280_DEV_CFG_U16 = 0x0883 +}; + +#define P54U_FW_BLOCK 2048 + +#define X2_SIGNATURE "x2 " +#define X2_SIGNATURE_SIZE 4 + +struct x2_header { + u8 signature[X2_SIGNATURE_SIZE]; + __le32 fw_load_addr; + __le32 fw_length; + __le32 crc; +} __attribute__((packed)); + +/* pipes 3 and 4 are not used by the driver */ +#define P54U_PIPE_NUMBER 9 + +enum p54u_pipe_addr { + P54U_PIPE_DATA = 0x01, + P54U_PIPE_MGMT = 0x02, + P54U_PIPE_3 = 0x03, + P54U_PIPE_4 = 0x04, + P54U_PIPE_BRG = 0x0d, + P54U_PIPE_DEV = 0x0e, + P54U_PIPE_INT = 0x0f +}; + +struct p54u_rx_info { + struct urb *urb; + struct ieee80211_hw *dev; +}; + +enum p54u_hw_type { + P54U_INVALID_HW, + P54U_NET2280, + P54U_3887, + + /* keep last */ + __NUM_P54U_HWTYPES, +}; + +struct p54u_priv { + struct p54_common common; + struct usb_device *udev; + struct usb_interface *intf; + int (*upload_fw)(struct ieee80211_hw *dev); + + enum p54u_hw_type hw_type; + spinlock_t lock; + struct sk_buff_head rx_queue; + struct usb_anchor submitted; + const struct firmware *fw; +}; + +#endif /* P54USB_H */ diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c new file mode 100644 index 0000000..6605799 --- /dev/null +++ b/drivers/net/wireless/p54/txrx.c @@ -0,0 +1,869 @@ +/* + * Common code for mac80211 Prism54 drivers + * + * Copyright (c) 2006, Michael Wu + * Copyright (c) 2007-2009, Christian Lamparter + * Copyright 2008, Johannes Berg + * + * Based on: + * - the islsm (softmac prism54) driver, which is: + * Copyright 2004-2006 Jean-Baptiste Note , et al. + * - stlc45xx driver + * Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies). + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include + +#include + +#include "p54.h" +#include "lmac.h" + +#ifdef P54_MM_DEBUG +static void p54_dump_tx_queue(struct p54_common *priv) +{ + unsigned long flags; + struct ieee80211_tx_info *info; + struct p54_tx_info *range; + struct sk_buff *skb; + struct p54_hdr *hdr; + unsigned int i = 0; + u32 prev_addr; + u32 largest_hole = 0, free; + + spin_lock_irqsave(&priv->tx_queue.lock, flags); + printk(KERN_DEBUG "%s: / --- tx queue dump (%d entries) --- \n", + wiphy_name(priv->hw->wiphy), skb_queue_len(&priv->tx_queue)); + + prev_addr = priv->rx_start; + skb_queue_walk(&priv->tx_queue, skb) { + info = IEEE80211_SKB_CB(skb); + range = (void *) info->rate_driver_data; + hdr = (void *) skb->data; + + free = range->start_addr - prev_addr; + printk(KERN_DEBUG "%s: | [%02d] => [skb:%p skb_len:0x%04x " + "hdr:{flags:%02x len:%04x req_id:%04x type:%02x} " + "mem:{start:%04x end:%04x, free:%d}]\n", + wiphy_name(priv->hw->wiphy), i++, skb, skb->len, + le16_to_cpu(hdr->flags), le16_to_cpu(hdr->len), + le32_to_cpu(hdr->req_id), le16_to_cpu(hdr->type), + range->start_addr, range->end_addr, free); + + prev_addr = range->end_addr; + largest_hole = max(largest_hole, free); + } + free = priv->rx_end - prev_addr; + largest_hole = max(largest_hole, free); + printk(KERN_DEBUG "%s: \\ --- [free: %d], largest free block: %d ---\n", + wiphy_name(priv->hw->wiphy), free, largest_hole); + spin_unlock_irqrestore(&priv->tx_queue.lock, flags); +} +#endif /* P54_MM_DEBUG */ + +/* + * So, the firmware is somewhat stupid and doesn't know what places in its + * memory incoming data should go to. By poking around in the firmware, we + * can find some unused memory to upload our packets to. However, data that we + * want the card to TX needs to stay intact until the card has told us that + * it is done with it. This function finds empty places we can upload to and + * marks allocated areas as reserved if necessary. p54_find_and_unlink_skb or + * p54_free_skb frees allocated areas. + */ +static int p54_assign_address(struct p54_common *priv, struct sk_buff *skb) +{ + struct sk_buff *entry, *target_skb = NULL; + struct ieee80211_tx_info *info; + struct p54_tx_info *range; + struct p54_hdr *data = (void *) skb->data; + unsigned long flags; + u32 last_addr = priv->rx_start; + u32 target_addr = priv->rx_start; + u16 len = priv->headroom + skb->len + priv->tailroom + 3; + + info = IEEE80211_SKB_CB(skb); + range = (void *) info->rate_driver_data; + len = (range->extra_len + len) & ~0x3; + + spin_lock_irqsave(&priv->tx_queue.lock, flags); + if (unlikely(skb_queue_len(&priv->tx_queue) == 32)) { + /* + * The tx_queue is now really full. + * + * TODO: check if the device has crashed and reset it. + */ + spin_unlock_irqrestore(&priv->tx_queue.lock, flags); + return -EBUSY; + } + + skb_queue_walk(&priv->tx_queue, entry) { + u32 hole_size; + info = IEEE80211_SKB_CB(entry); + range = (void *) info->rate_driver_data; + hole_size = range->start_addr - last_addr; + + if (!target_skb && hole_size >= len) { + target_skb = entry->prev; + hole_size -= len; + target_addr = last_addr; + break; + } + last_addr = range->end_addr; + } + if (unlikely(!target_skb)) { + if (priv->rx_end - last_addr >= len) { + target_skb = priv->tx_queue.prev; + if (!skb_queue_empty(&priv->tx_queue)) { + info = IEEE80211_SKB_CB(target_skb); + range = (void *)info->rate_driver_data; + target_addr = range->end_addr; + } + } else { + spin_unlock_irqrestore(&priv->tx_queue.lock, flags); + return -ENOSPC; + } + } + + info = IEEE80211_SKB_CB(skb); + range = (void *) info->rate_driver_data; + range->start_addr = target_addr; + range->end_addr = target_addr + len; + data->req_id = cpu_to_le32(target_addr + priv->headroom); + if (IS_DATA_FRAME(skb) && + unlikely(GET_HW_QUEUE(skb) == P54_QUEUE_BEACON)) + priv->beacon_req_id = data->req_id; + + __skb_queue_after(&priv->tx_queue, target_skb, skb); + spin_unlock_irqrestore(&priv->tx_queue.lock, flags); + return 0; +} + +static void p54_tx_pending(struct p54_common *priv) +{ + struct sk_buff *skb; + int ret; + + skb = skb_dequeue(&priv->tx_pending); + if (unlikely(!skb)) + return ; + + ret = p54_assign_address(priv, skb); + if (unlikely(ret)) + skb_queue_head(&priv->tx_pending, skb); + else + priv->tx(priv->hw, skb); +} + +static void p54_wake_queues(struct p54_common *priv) +{ + unsigned long flags; + unsigned int i; + + if (unlikely(priv->mode == NL80211_IFTYPE_UNSPECIFIED)) + return ; + + p54_tx_pending(priv); + + spin_lock_irqsave(&priv->tx_stats_lock, flags); + for (i = 0; i < priv->hw->queues; i++) { + if (priv->tx_stats[i + P54_QUEUE_DATA].len < + priv->tx_stats[i + P54_QUEUE_DATA].limit) + ieee80211_wake_queue(priv->hw, i); + } + spin_unlock_irqrestore(&priv->tx_stats_lock, flags); +} + +static int p54_tx_qos_accounting_alloc(struct p54_common *priv, + struct sk_buff *skb, + const u16 p54_queue) +{ + struct p54_tx_queue_stats *queue; + unsigned long flags; + + if (WARN_ON(p54_queue >= P54_QUEUE_NUM)) + return -EINVAL; + + queue = &priv->tx_stats[p54_queue]; + + spin_lock_irqsave(&priv->tx_stats_lock, flags); + if (unlikely(queue->len >= queue->limit && IS_QOS_QUEUE(p54_queue))) { + spin_unlock_irqrestore(&priv->tx_stats_lock, flags); + return -ENOSPC; + } + + queue->len++; + queue->count++; + + if (unlikely(queue->len == queue->limit && IS_QOS_QUEUE(p54_queue))) { + u16 ac_queue = p54_queue - P54_QUEUE_DATA; + ieee80211_stop_queue(priv->hw, ac_queue); + } + + spin_unlock_irqrestore(&priv->tx_stats_lock, flags); + return 0; +} + +static void p54_tx_qos_accounting_free(struct p54_common *priv, + struct sk_buff *skb) +{ + if (IS_DATA_FRAME(skb)) { + unsigned long flags; + + spin_lock_irqsave(&priv->tx_stats_lock, flags); + priv->tx_stats[GET_HW_QUEUE(skb)].len--; + spin_unlock_irqrestore(&priv->tx_stats_lock, flags); + + if (unlikely(GET_HW_QUEUE(skb) == P54_QUEUE_BEACON)) { + if (priv->beacon_req_id == GET_REQ_ID(skb)) { + /* this is the active beacon set anymore */ + priv->beacon_req_id = 0; + } + complete(&priv->beacon_comp); + } + } + p54_wake_queues(priv); +} + +void p54_free_skb(struct ieee80211_hw *dev, struct sk_buff *skb) +{ + struct p54_common *priv = dev->priv; + if (unlikely(!skb)) + return ; + + skb_unlink(skb, &priv->tx_queue); + p54_tx_qos_accounting_free(priv, skb); + dev_kfree_skb_any(skb); +} +EXPORT_SYMBOL_GPL(p54_free_skb); + +static struct sk_buff *p54_find_and_unlink_skb(struct p54_common *priv, + const __le32 req_id) +{ + struct sk_buff *entry; + unsigned long flags; + + spin_lock_irqsave(&priv->tx_queue.lock, flags); + skb_queue_walk(&priv->tx_queue, entry) { + struct p54_hdr *hdr = (struct p54_hdr *) entry->data; + + if (hdr->req_id == req_id) { + __skb_unlink(entry, &priv->tx_queue); + spin_unlock_irqrestore(&priv->tx_queue.lock, flags); + p54_tx_qos_accounting_free(priv, entry); + return entry; + } + } + spin_unlock_irqrestore(&priv->tx_queue.lock, flags); + return NULL; +} + +void p54_tx(struct p54_common *priv, struct sk_buff *skb) +{ + skb_queue_tail(&priv->tx_pending, skb); + p54_tx_pending(priv); +} + +static int p54_rssi_to_dbm(struct p54_common *priv, int rssi) +{ + int band = priv->hw->conf.channel->band; + + if (priv->rxhw != 5) + return ((rssi * priv->rssical_db[band].mul) / 64 + + priv->rssical_db[band].add) / 4; + else + /* + * TODO: find the correct formula + */ + return ((rssi * priv->rssical_db[band].mul) / 64 + + priv->rssical_db[band].add) / 4; +} + +/* + * Even if the firmware is capable of dealing with incoming traffic, + * while dozing, we have to prepared in case mac80211 uses PS-POLL + * to retrieve outstanding frames from our AP. + * (see comment in net/mac80211/mlme.c @ line 1993) + */ +static void p54_pspoll_workaround(struct p54_common *priv, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (void *) skb->data; + struct ieee80211_tim_ie *tim_ie; + u8 *tim; + u8 tim_len; + bool new_psm; + + /* only beacons have a TIM IE */ + if (!ieee80211_is_beacon(hdr->frame_control)) + return; + + if (!priv->aid) + return; + + /* only consider beacons from the associated BSSID */ + if (compare_ether_addr(hdr->addr3, priv->bssid)) + return; + + tim = p54_find_ie(skb, WLAN_EID_TIM); + if (!tim) + return; + + tim_len = tim[1]; + tim_ie = (struct ieee80211_tim_ie *) &tim[2]; + + new_psm = ieee80211_check_tim(tim_ie, tim_len, priv->aid); + if (new_psm != priv->powersave_override) { + priv->powersave_override = new_psm; + p54_set_ps(priv); + } +} + +static int p54_rx_data(struct p54_common *priv, struct sk_buff *skb) +{ + struct p54_rx_data *hdr = (struct p54_rx_data *) skb->data; + struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); + u16 freq = le16_to_cpu(hdr->freq); + size_t header_len = sizeof(*hdr); + u32 tsf32; + u8 rate = hdr->rate & 0xf; + + /* + * If the device is in a unspecified state we have to + * ignore all data frames. Else we could end up with a + * nasty crash. + */ + if (unlikely(priv->mode == NL80211_IFTYPE_UNSPECIFIED)) + return 0; + + if (!(hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_IN_FCS_GOOD))) + return 0; + + if (hdr->decrypt_status == P54_DECRYPT_OK) + rx_status->flag |= RX_FLAG_DECRYPTED; + if ((hdr->decrypt_status == P54_DECRYPT_FAIL_MICHAEL) || + (hdr->decrypt_status == P54_DECRYPT_FAIL_TKIP)) + rx_status->flag |= RX_FLAG_MMIC_ERROR; + + rx_status->signal = p54_rssi_to_dbm(priv, hdr->rssi); + rx_status->noise = priv->noise; + if (hdr->rate & 0x10) + rx_status->flag |= RX_FLAG_SHORTPRE; + if (priv->hw->conf.channel->band == IEEE80211_BAND_5GHZ) + rx_status->rate_idx = (rate < 4) ? 0 : rate - 4; + else + rx_status->rate_idx = rate; + + rx_status->freq = freq; + rx_status->band = priv->hw->conf.channel->band; + rx_status->antenna = hdr->antenna; + + tsf32 = le32_to_cpu(hdr->tsf32); + if (tsf32 < priv->tsf_low32) + priv->tsf_high32++; + rx_status->mactime = ((u64)priv->tsf_high32) << 32 | tsf32; + priv->tsf_low32 = tsf32; + + rx_status->flag |= RX_FLAG_TSFT; + + if (hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_ALIGN)) + header_len += hdr->align[0]; + + skb_pull(skb, header_len); + skb_trim(skb, le16_to_cpu(hdr->len)); + if (unlikely(priv->hw->conf.flags & IEEE80211_CONF_PS)) + p54_pspoll_workaround(priv, skb); + + ieee80211_rx_irqsafe(priv->hw, skb); + + ieee80211_queue_delayed_work(priv->hw, &priv->work, + msecs_to_jiffies(P54_STATISTICS_UPDATE)); + + return -1; +} + +static void p54_rx_frame_sent(struct p54_common *priv, struct sk_buff *skb) +{ + struct p54_hdr *hdr = (struct p54_hdr *) skb->data; + struct p54_frame_sent *payload = (struct p54_frame_sent *) hdr->data; + struct ieee80211_tx_info *info; + struct p54_hdr *entry_hdr; + struct p54_tx_data *entry_data; + struct sk_buff *entry; + unsigned int pad = 0, frame_len; + int count, idx; + + entry = p54_find_and_unlink_skb(priv, hdr->req_id); + if (unlikely(!entry)) + return ; + + frame_len = entry->len; + info = IEEE80211_SKB_CB(entry); + entry_hdr = (struct p54_hdr *) entry->data; + entry_data = (struct p54_tx_data *) entry_hdr->data; + priv->stats.dot11ACKFailureCount += payload->tries - 1; + + /* + * Frames in P54_QUEUE_FWSCAN and P54_QUEUE_BEACON are + * generated by the driver. Therefore tx_status is bogus + * and we don't want to confuse the mac80211 stack. + */ + if (unlikely(entry_data->hw_queue < P54_QUEUE_FWSCAN)) { + dev_kfree_skb_any(entry); + return ; + } + + /* + * Clear manually, ieee80211_tx_info_clear_status would + * clear the counts too and we need them. + */ + memset(&info->status.ampdu_ack_len, 0, + sizeof(struct ieee80211_tx_info) - + offsetof(struct ieee80211_tx_info, status.ampdu_ack_len)); + BUILD_BUG_ON(offsetof(struct ieee80211_tx_info, + status.ampdu_ack_len) != 23); + + if (entry_hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_ALIGN)) + pad = entry_data->align[0]; + + /* walk through the rates array and adjust the counts */ + count = payload->tries; + for (idx = 0; idx < 4; idx++) { + if (count >= info->status.rates[idx].count) { + count -= info->status.rates[idx].count; + } else if (count > 0) { + info->status.rates[idx].count = count; + count = 0; + } else { + info->status.rates[idx].idx = -1; + info->status.rates[idx].count = 0; + } + } + + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) && + (!payload->status)) + info->flags |= IEEE80211_TX_STAT_ACK; + if (payload->status & P54_TX_PSM_CANCELLED) + info->flags |= IEEE80211_TX_STAT_TX_FILTERED; + info->status.ack_signal = p54_rssi_to_dbm(priv, + (int)payload->ack_rssi); + + /* Undo all changes to the frame. */ + switch (entry_data->key_type) { + case P54_CRYPTO_TKIPMICHAEL: { + u8 *iv = (u8 *)(entry_data->align + pad + + entry_data->crypt_offset); + + /* Restore the original TKIP IV. */ + iv[2] = iv[0]; + iv[0] = iv[1]; + iv[1] = (iv[0] | 0x20) & 0x7f; /* WEPSeed - 8.3.2.2 */ + + frame_len -= 12; /* remove TKIP_MMIC + TKIP_ICV */ + break; + } + case P54_CRYPTO_AESCCMP: + frame_len -= 8; /* remove CCMP_MIC */ + break; + case P54_CRYPTO_WEP: + frame_len -= 4; /* remove WEP_ICV */ + break; + } + + skb_trim(entry, frame_len); + skb_pull(entry, sizeof(*hdr) + pad + sizeof(*entry_data)); + ieee80211_tx_status_irqsafe(priv->hw, entry); +} + +static void p54_rx_eeprom_readback(struct p54_common *priv, + struct sk_buff *skb) +{ + struct p54_hdr *hdr = (struct p54_hdr *) skb->data; + struct p54_eeprom_lm86 *eeprom = (struct p54_eeprom_lm86 *) hdr->data; + struct sk_buff *tmp; + + if (!priv->eeprom) + return ; + + if (priv->fw_var >= 0x509) { + memcpy(priv->eeprom, eeprom->v2.data, + le16_to_cpu(eeprom->v2.len)); + } else { + memcpy(priv->eeprom, eeprom->v1.data, + le16_to_cpu(eeprom->v1.len)); + } + + priv->eeprom = NULL; + tmp = p54_find_and_unlink_skb(priv, hdr->req_id); + dev_kfree_skb_any(tmp); + complete(&priv->eeprom_comp); +} + +static void p54_rx_stats(struct p54_common *priv, struct sk_buff *skb) +{ + struct p54_hdr *hdr = (struct p54_hdr *) skb->data; + struct p54_statistics *stats = (struct p54_statistics *) hdr->data; + struct sk_buff *tmp; + u32 tsf32; + + if (unlikely(priv->mode == NL80211_IFTYPE_UNSPECIFIED)) + return ; + + tsf32 = le32_to_cpu(stats->tsf32); + if (tsf32 < priv->tsf_low32) + priv->tsf_high32++; + priv->tsf_low32 = tsf32; + + priv->stats.dot11RTSFailureCount = le32_to_cpu(stats->rts_fail); + priv->stats.dot11RTSSuccessCount = le32_to_cpu(stats->rts_success); + priv->stats.dot11FCSErrorCount = le32_to_cpu(stats->rx_bad_fcs); + + priv->noise = p54_rssi_to_dbm(priv, le32_to_cpu(stats->noise)); + + tmp = p54_find_and_unlink_skb(priv, hdr->req_id); + dev_kfree_skb_any(tmp); +} + +static void p54_rx_trap(struct p54_common *priv, struct sk_buff *skb) +{ + struct p54_hdr *hdr = (struct p54_hdr *) skb->data; + struct p54_trap *trap = (struct p54_trap *) hdr->data; + u16 event = le16_to_cpu(trap->event); + u16 freq = le16_to_cpu(trap->frequency); + + switch (event) { + case P54_TRAP_BEACON_TX: + break; + case P54_TRAP_RADAR: + printk(KERN_INFO "%s: radar (freq:%d MHz)\n", + wiphy_name(priv->hw->wiphy), freq); + break; + case P54_TRAP_NO_BEACON: + if (priv->vif) + ieee80211_beacon_loss(priv->vif); + break; + case P54_TRAP_SCAN: + break; + case P54_TRAP_TBTT: + break; + case P54_TRAP_TIMER: + break; + case P54_TRAP_FAA_RADIO_OFF: + wiphy_rfkill_set_hw_state(priv->hw->wiphy, true); + break; + case P54_TRAP_FAA_RADIO_ON: + wiphy_rfkill_set_hw_state(priv->hw->wiphy, false); + break; + default: + printk(KERN_INFO "%s: received event:%x freq:%d\n", + wiphy_name(priv->hw->wiphy), event, freq); + break; + } +} + +static int p54_rx_control(struct p54_common *priv, struct sk_buff *skb) +{ + struct p54_hdr *hdr = (struct p54_hdr *) skb->data; + + switch (le16_to_cpu(hdr->type)) { + case P54_CONTROL_TYPE_TXDONE: + p54_rx_frame_sent(priv, skb); + break; + case P54_CONTROL_TYPE_TRAP: + p54_rx_trap(priv, skb); + break; + case P54_CONTROL_TYPE_BBP: + break; + case P54_CONTROL_TYPE_STAT_READBACK: + p54_rx_stats(priv, skb); + break; + case P54_CONTROL_TYPE_EEPROM_READBACK: + p54_rx_eeprom_readback(priv, skb); + break; + default: + printk(KERN_DEBUG "%s: not handling 0x%02x type control frame\n", + wiphy_name(priv->hw->wiphy), le16_to_cpu(hdr->type)); + break; + } + return 0; +} + +/* returns zero if skb can be reused */ +int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb) +{ + struct p54_common *priv = dev->priv; + u16 type = le16_to_cpu(*((__le16 *)skb->data)); + + if (type & P54_HDR_FLAG_CONTROL) + return p54_rx_control(priv, skb); + else + return p54_rx_data(priv, skb); +} +EXPORT_SYMBOL_GPL(p54_rx); + +static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb, + struct ieee80211_tx_info *info, u8 *queue, + u32 *extra_len, u16 *flags, u16 *aid, + bool *burst_possible) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + + if (ieee80211_is_data_qos(hdr->frame_control)) + *burst_possible = true; + else + *burst_possible = false; + + if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) + *flags |= P54_HDR_FLAG_DATA_OUT_SEQNR; + + if (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE) + *flags |= P54_HDR_FLAG_DATA_OUT_NOCANCEL; + + if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) + *flags |= P54_HDR_FLAG_DATA_OUT_NOCANCEL; + + *queue = skb_get_queue_mapping(skb) + P54_QUEUE_DATA; + + switch (priv->mode) { + case NL80211_IFTYPE_MONITOR: + /* + * We have to set P54_HDR_FLAG_DATA_OUT_PROMISC for + * every frame in promiscuous/monitor mode. + * see STSW45x0C LMAC API - page 12. + */ + *aid = 0; + *flags |= P54_HDR_FLAG_DATA_OUT_PROMISC; + break; + case NL80211_IFTYPE_STATION: + *aid = 1; + break; + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_MESH_POINT: + if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { + *aid = 0; + *queue = P54_QUEUE_CAB; + return; + } + + if (unlikely(ieee80211_is_mgmt(hdr->frame_control))) { + if (ieee80211_is_probe_resp(hdr->frame_control)) { + *aid = 0; + *flags |= P54_HDR_FLAG_DATA_OUT_TIMESTAMP | + P54_HDR_FLAG_DATA_OUT_NOCANCEL; + return; + } else if (ieee80211_is_beacon(hdr->frame_control)) { + *aid = 0; + + if (info->flags & IEEE80211_TX_CTL_INJECTED) { + /* + * Injecting beacons on top of a AP is + * not a good idea... nevertheless, + * it should be doable. + */ + + return; + } + + *flags |= P54_HDR_FLAG_DATA_OUT_TIMESTAMP; + *queue = P54_QUEUE_BEACON; + *extra_len = IEEE80211_MAX_TIM_LEN; + return; + } + } + + if (info->control.sta) + *aid = info->control.sta->aid; + break; + } +} + +static u8 p54_convert_algo(enum ieee80211_key_alg alg) +{ + switch (alg) { + case ALG_WEP: + return P54_CRYPTO_WEP; + case ALG_TKIP: + return P54_CRYPTO_TKIPMICHAEL; + case ALG_CCMP: + return P54_CRYPTO_AESCCMP; + default: + return 0; + } +} + +int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb) +{ + struct p54_common *priv = dev->priv; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct p54_tx_info *p54info; + struct p54_hdr *hdr; + struct p54_tx_data *txhdr; + unsigned int padding, len, extra_len; + int i, j, ridx; + u16 hdr_flags = 0, aid = 0; + u8 rate, queue = 0, crypt_offset = 0; + u8 cts_rate = 0x20; + u8 rc_flags; + u8 calculated_tries[4]; + u8 nrates = 0, nremaining = 8; + bool burst_allowed = false; + + p54_tx_80211_header(priv, skb, info, &queue, &extra_len, + &hdr_flags, &aid, &burst_allowed); + + if (p54_tx_qos_accounting_alloc(priv, skb, queue)) { + if (!IS_QOS_QUEUE(queue)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } else { + return NETDEV_TX_BUSY; + } + } + + padding = (unsigned long)(skb->data - (sizeof(*hdr) + sizeof(*txhdr))) & 3; + len = skb->len; + + if (info->control.hw_key) { + crypt_offset = ieee80211_get_hdrlen_from_skb(skb); + if (info->control.hw_key->alg == ALG_TKIP) { + u8 *iv = (u8 *)(skb->data + crypt_offset); + /* + * The firmware excepts that the IV has to have + * this special format + */ + iv[1] = iv[0]; + iv[0] = iv[2]; + iv[2] = 0; + } + } + + txhdr = (struct p54_tx_data *) skb_push(skb, sizeof(*txhdr) + padding); + hdr = (struct p54_hdr *) skb_push(skb, sizeof(*hdr)); + + if (padding) + hdr_flags |= P54_HDR_FLAG_DATA_ALIGN; + hdr->type = cpu_to_le16(aid); + hdr->rts_tries = info->control.rates[0].count; + + /* + * we register the rates in perfect order, and + * RTS/CTS won't happen on 5 GHz + */ + cts_rate = info->control.rts_cts_rate_idx; + + memset(&txhdr->rateset, 0, sizeof(txhdr->rateset)); + + /* see how many rates got used */ + for (i = 0; i < dev->max_rates; i++) { + if (info->control.rates[i].idx < 0) + break; + nrates++; + } + + /* limit tries to 8/nrates per rate */ + for (i = 0; i < nrates; i++) { + /* + * The magic expression here is equivalent to 8/nrates for + * all values that matter, but avoids division and jumps. + * Note that nrates can only take the values 1 through 4. + */ + calculated_tries[i] = min_t(int, ((15 >> nrates) | 1) + 1, + info->control.rates[i].count); + nremaining -= calculated_tries[i]; + } + + /* if there are tries left, distribute from back to front */ + for (i = nrates - 1; nremaining > 0 && i >= 0; i--) { + int tmp = info->control.rates[i].count - calculated_tries[i]; + + if (tmp <= 0) + continue; + /* RC requested more tries at this rate */ + + tmp = min_t(int, tmp, nremaining); + calculated_tries[i] += tmp; + nremaining -= tmp; + } + + ridx = 0; + for (i = 0; i < nrates && ridx < 8; i++) { + /* we register the rates in perfect order */ + rate = info->control.rates[i].idx; + if (info->band == IEEE80211_BAND_5GHZ) + rate += 4; + + /* store the count we actually calculated for TX status */ + info->control.rates[i].count = calculated_tries[i]; + + rc_flags = info->control.rates[i].flags; + if (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) { + rate |= 0x10; + cts_rate |= 0x10; + } + if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) { + burst_allowed = false; + rate |= 0x40; + } else if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { + rate |= 0x20; + burst_allowed = false; + } + for (j = 0; j < calculated_tries[i] && ridx < 8; j++) { + txhdr->rateset[ridx] = rate; + ridx++; + } + } + + if (burst_allowed) + hdr_flags |= P54_HDR_FLAG_DATA_OUT_BURST; + + /* TODO: enable bursting */ + hdr->flags = cpu_to_le16(hdr_flags); + hdr->tries = ridx; + txhdr->rts_rate_idx = 0; + if (info->control.hw_key) { + txhdr->key_type = p54_convert_algo(info->control.hw_key->alg); + txhdr->key_len = min((u8)16, info->control.hw_key->keylen); + memcpy(txhdr->key, info->control.hw_key->key, txhdr->key_len); + if (info->control.hw_key->alg == ALG_TKIP) { + /* reserve space for the MIC key */ + len += 8; + memcpy(skb_put(skb, 8), &(info->control.hw_key->key + [NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY]), 8); + } + /* reserve some space for ICV */ + len += info->control.hw_key->icv_len; + memset(skb_put(skb, info->control.hw_key->icv_len), 0, + info->control.hw_key->icv_len); + } else { + txhdr->key_type = 0; + txhdr->key_len = 0; + } + txhdr->crypt_offset = crypt_offset; + txhdr->hw_queue = queue; + txhdr->backlog = priv->tx_stats[queue].len - 1; + memset(txhdr->durations, 0, sizeof(txhdr->durations)); + txhdr->tx_antenna = ((info->antenna_sel_tx == 0) ? + 2 : info->antenna_sel_tx - 1) & priv->tx_diversity_mask; + if (priv->rxhw == 5) { + txhdr->longbow.cts_rate = cts_rate; + txhdr->longbow.output_power = cpu_to_le16(priv->output_power); + } else { + txhdr->normal.output_power = priv->output_power; + txhdr->normal.cts_rate = cts_rate; + } + if (padding) + txhdr->align[0] = padding; + + hdr->len = cpu_to_le16(len); + /* modifies skb->cb and with it info, so must be last! */ + p54info = (void *) info->rate_driver_data; + p54info->extra_len = extra_len; + + p54_tx(priv, skb); + return NETDEV_TX_OK; +} diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c new file mode 100644 index 0000000..834c581 --- /dev/null +++ b/drivers/net/wireless/rndis_wlan.c @@ -0,0 +1,3118 @@ +/* + * Driver for RNDIS based wireless USB devices. + * + * Copyright (C) 2007 by Bjorge Dijkstra + * Copyright (C) 2008-2009 by Jussi Kivilinna + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Portions of this file are based on NDISwrapper project, + * Copyright (C) 2003-2005 Pontus Fuchs, Giridhar Pemmasani + * http://ndiswrapper.sourceforge.net/ + */ + +// #define DEBUG // error path messages, extra info +// #define VERBOSE // more; success messages + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* NOTE: All these are settings for Broadcom chipset */ +static char modparam_country[4] = "EU"; +module_param_string(country, modparam_country, 4, 0444); +MODULE_PARM_DESC(country, "Country code (ISO 3166-1 alpha-2), default: EU"); + +static int modparam_frameburst = 1; +module_param_named(frameburst, modparam_frameburst, int, 0444); +MODULE_PARM_DESC(frameburst, "enable frame bursting (default: on)"); + +static int modparam_afterburner = 0; +module_param_named(afterburner, modparam_afterburner, int, 0444); +MODULE_PARM_DESC(afterburner, + "enable afterburner aka '125 High Speed Mode' (default: off)"); + +static int modparam_power_save = 0; +module_param_named(power_save, modparam_power_save, int, 0444); +MODULE_PARM_DESC(power_save, + "set power save mode: 0=off, 1=on, 2=fast (default: off)"); + +static int modparam_power_output = 3; +module_param_named(power_output, modparam_power_output, int, 0444); +MODULE_PARM_DESC(power_output, + "set power output: 0=25%, 1=50%, 2=75%, 3=100% (default: 100%)"); + +static int modparam_roamtrigger = -70; +module_param_named(roamtrigger, modparam_roamtrigger, int, 0444); +MODULE_PARM_DESC(roamtrigger, + "set roaming dBm trigger: -80=optimize for distance, " + "-60=bandwidth (default: -70)"); + +static int modparam_roamdelta = 1; +module_param_named(roamdelta, modparam_roamdelta, int, 0444); +MODULE_PARM_DESC(roamdelta, + "set roaming tendency: 0=aggressive, 1=moderate, " + "2=conservative (default: moderate)"); + +static int modparam_workaround_interval; +module_param_named(workaround_interval, modparam_workaround_interval, + int, 0444); +MODULE_PARM_DESC(workaround_interval, + "set stall workaround interval in msecs (0=disabled) (default: 0)"); + + +/* various RNDIS OID defs */ +#define OID_GEN_LINK_SPEED cpu_to_le32(0x00010107) +#define OID_GEN_RNDIS_CONFIG_PARAMETER cpu_to_le32(0x0001021b) + +#define OID_GEN_XMIT_OK cpu_to_le32(0x00020101) +#define OID_GEN_RCV_OK cpu_to_le32(0x00020102) +#define OID_GEN_XMIT_ERROR cpu_to_le32(0x00020103) +#define OID_GEN_RCV_ERROR cpu_to_le32(0x00020104) +#define OID_GEN_RCV_NO_BUFFER cpu_to_le32(0x00020105) + +#define OID_802_3_CURRENT_ADDRESS cpu_to_le32(0x01010102) +#define OID_802_3_MULTICAST_LIST cpu_to_le32(0x01010103) +#define OID_802_3_MAXIMUM_LIST_SIZE cpu_to_le32(0x01010104) + +#define OID_802_11_BSSID cpu_to_le32(0x0d010101) +#define OID_802_11_SSID cpu_to_le32(0x0d010102) +#define OID_802_11_INFRASTRUCTURE_MODE cpu_to_le32(0x0d010108) +#define OID_802_11_ADD_WEP cpu_to_le32(0x0d010113) +#define OID_802_11_REMOVE_WEP cpu_to_le32(0x0d010114) +#define OID_802_11_DISASSOCIATE cpu_to_le32(0x0d010115) +#define OID_802_11_AUTHENTICATION_MODE cpu_to_le32(0x0d010118) +#define OID_802_11_PRIVACY_FILTER cpu_to_le32(0x0d010119) +#define OID_802_11_BSSID_LIST_SCAN cpu_to_le32(0x0d01011a) +#define OID_802_11_ENCRYPTION_STATUS cpu_to_le32(0x0d01011b) +#define OID_802_11_ADD_KEY cpu_to_le32(0x0d01011d) +#define OID_802_11_REMOVE_KEY cpu_to_le32(0x0d01011e) +#define OID_802_11_ASSOCIATION_INFORMATION cpu_to_le32(0x0d01011f) +#define OID_802_11_PMKID cpu_to_le32(0x0d010123) +#define OID_802_11_NETWORK_TYPES_SUPPORTED cpu_to_le32(0x0d010203) +#define OID_802_11_NETWORK_TYPE_IN_USE cpu_to_le32(0x0d010204) +#define OID_802_11_TX_POWER_LEVEL cpu_to_le32(0x0d010205) +#define OID_802_11_RSSI cpu_to_le32(0x0d010206) +#define OID_802_11_RSSI_TRIGGER cpu_to_le32(0x0d010207) +#define OID_802_11_FRAGMENTATION_THRESHOLD cpu_to_le32(0x0d010209) +#define OID_802_11_RTS_THRESHOLD cpu_to_le32(0x0d01020a) +#define OID_802_11_SUPPORTED_RATES cpu_to_le32(0x0d01020e) +#define OID_802_11_CONFIGURATION cpu_to_le32(0x0d010211) +#define OID_802_11_BSSID_LIST cpu_to_le32(0x0d010217) + + +/* Typical noise/maximum signal level values taken from ndiswrapper iw_ndis.h */ +#define WL_NOISE -96 /* typical noise level in dBm */ +#define WL_SIGMAX -32 /* typical maximum signal level in dBm */ + + +/* Assume that Broadcom 4320 (only chipset at time of writing known to be + * based on wireless rndis) has default txpower of 13dBm. + * This value is from Linksys WUSB54GSC User Guide, Appendix F: Specifications. + * 100% : 20 mW ~ 13dBm + * 75% : 15 mW ~ 12dBm + * 50% : 10 mW ~ 10dBm + * 25% : 5 mW ~ 7dBm + */ +#define BCM4320_DEFAULT_TXPOWER_DBM_100 13 +#define BCM4320_DEFAULT_TXPOWER_DBM_75 12 +#define BCM4320_DEFAULT_TXPOWER_DBM_50 10 +#define BCM4320_DEFAULT_TXPOWER_DBM_25 7 + + +/* codes for "status" field of completion messages */ +#define RNDIS_STATUS_ADAPTER_NOT_READY cpu_to_le32(0xc0010011) +#define RNDIS_STATUS_ADAPTER_NOT_OPEN cpu_to_le32(0xc0010012) + + +/* NDIS data structures. Taken from wpa_supplicant driver_ndis.c + * slightly modified for datatype endianess, etc + */ +#define NDIS_802_11_LENGTH_SSID 32 +#define NDIS_802_11_LENGTH_RATES 8 +#define NDIS_802_11_LENGTH_RATES_EX 16 + +enum ndis_80211_net_type { + NDIS_80211_TYPE_FREQ_HOP, + NDIS_80211_TYPE_DIRECT_SEQ, + NDIS_80211_TYPE_OFDM_A, + NDIS_80211_TYPE_OFDM_G +}; + +enum ndis_80211_net_infra { + NDIS_80211_INFRA_ADHOC, + NDIS_80211_INFRA_INFRA, + NDIS_80211_INFRA_AUTO_UNKNOWN +}; + +enum ndis_80211_auth_mode { + NDIS_80211_AUTH_OPEN, + NDIS_80211_AUTH_SHARED, + NDIS_80211_AUTH_AUTO_SWITCH, + NDIS_80211_AUTH_WPA, + NDIS_80211_AUTH_WPA_PSK, + NDIS_80211_AUTH_WPA_NONE, + NDIS_80211_AUTH_WPA2, + NDIS_80211_AUTH_WPA2_PSK +}; + +enum ndis_80211_encr_status { + NDIS_80211_ENCR_WEP_ENABLED, + NDIS_80211_ENCR_DISABLED, + NDIS_80211_ENCR_WEP_KEY_ABSENT, + NDIS_80211_ENCR_NOT_SUPPORTED, + NDIS_80211_ENCR_TKIP_ENABLED, + NDIS_80211_ENCR_TKIP_KEY_ABSENT, + NDIS_80211_ENCR_CCMP_ENABLED, + NDIS_80211_ENCR_CCMP_KEY_ABSENT +}; + +enum ndis_80211_priv_filter { + NDIS_80211_PRIV_ACCEPT_ALL, + NDIS_80211_PRIV_8021X_WEP +}; + +enum ndis_80211_status_type { + NDIS_80211_STATUSTYPE_AUTHENTICATION, + NDIS_80211_STATUSTYPE_MEDIASTREAMMODE, + NDIS_80211_STATUSTYPE_PMKID_CANDIDATELIST, + NDIS_80211_STATUSTYPE_RADIOSTATE, +}; + +enum ndis_80211_media_stream_mode { + NDIS_80211_MEDIA_STREAM_OFF, + NDIS_80211_MEDIA_STREAM_ON +}; + +enum ndis_80211_radio_status { + NDIS_80211_RADIO_STATUS_ON, + NDIS_80211_RADIO_STATUS_HARDWARE_OFF, + NDIS_80211_RADIO_STATUS_SOFTWARE_OFF, +}; + +enum ndis_80211_addkey_bits { + NDIS_80211_ADDKEY_8021X_AUTH = cpu_to_le32(1 << 28), + NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ = cpu_to_le32(1 << 29), + NDIS_80211_ADDKEY_PAIRWISE_KEY = cpu_to_le32(1 << 30), + NDIS_80211_ADDKEY_TRANSMIT_KEY = cpu_to_le32(1 << 31) +}; + +enum ndis_80211_addwep_bits { + NDIS_80211_ADDWEP_PERCLIENT_KEY = cpu_to_le32(1 << 30), + NDIS_80211_ADDWEP_TRANSMIT_KEY = cpu_to_le32(1 << 31) +}; + +struct ndis_80211_auth_request { + __le32 length; + u8 bssid[6]; + u8 padding[2]; + __le32 flags; +} __attribute__((packed)); + +struct ndis_80211_pmkid_candidate { + u8 bssid[6]; + u8 padding[2]; + __le32 flags; +} __attribute__((packed)); + +struct ndis_80211_pmkid_cand_list { + __le32 version; + __le32 num_candidates; + struct ndis_80211_pmkid_candidate candidate_list[0]; +} __attribute__((packed)); + +struct ndis_80211_status_indication { + __le32 status_type; + union { + __le32 media_stream_mode; + __le32 radio_status; + struct ndis_80211_auth_request auth_request[0]; + struct ndis_80211_pmkid_cand_list cand_list; + } u; +} __attribute__((packed)); + +struct ndis_80211_ssid { + __le32 length; + u8 essid[NDIS_802_11_LENGTH_SSID]; +} __attribute__((packed)); + +struct ndis_80211_conf_freq_hop { + __le32 length; + __le32 hop_pattern; + __le32 hop_set; + __le32 dwell_time; +} __attribute__((packed)); + +struct ndis_80211_conf { + __le32 length; + __le32 beacon_period; + __le32 atim_window; + __le32 ds_config; + struct ndis_80211_conf_freq_hop fh_config; +} __attribute__((packed)); + +struct ndis_80211_bssid_ex { + __le32 length; + u8 mac[6]; + u8 padding[2]; + struct ndis_80211_ssid ssid; + __le32 privacy; + __le32 rssi; + __le32 net_type; + struct ndis_80211_conf config; + __le32 net_infra; + u8 rates[NDIS_802_11_LENGTH_RATES_EX]; + __le32 ie_length; + u8 ies[0]; +} __attribute__((packed)); + +struct ndis_80211_bssid_list_ex { + __le32 num_items; + struct ndis_80211_bssid_ex bssid[0]; +} __attribute__((packed)); + +struct ndis_80211_fixed_ies { + u8 timestamp[8]; + __le16 beacon_interval; + __le16 capabilities; +} __attribute__((packed)); + +struct ndis_80211_wep_key { + __le32 size; + __le32 index; + __le32 length; + u8 material[32]; +} __attribute__((packed)); + +struct ndis_80211_key { + __le32 size; + __le32 index; + __le32 length; + u8 bssid[6]; + u8 padding[6]; + u8 rsc[8]; + u8 material[32]; +} __attribute__((packed)); + +struct ndis_80211_remove_key { + __le32 size; + __le32 index; + u8 bssid[6]; + u8 padding[2]; +} __attribute__((packed)); + +struct ndis_config_param { + __le32 name_offs; + __le32 name_length; + __le32 type; + __le32 value_offs; + __le32 value_length; +} __attribute__((packed)); + +struct ndis_80211_assoc_info { + __le32 length; + __le16 req_ies; + struct req_ie { + __le16 capa; + __le16 listen_interval; + u8 cur_ap_address[6]; + } req_ie; + __le32 req_ie_length; + __le32 offset_req_ies; + __le16 resp_ies; + struct resp_ie { + __le16 capa; + __le16 status_code; + __le16 assoc_id; + } resp_ie; + __le32 resp_ie_length; + __le32 offset_resp_ies; +} __attribute__((packed)); + +/* + * private data + */ +#define NET_TYPE_11FB 0 + +#define CAP_MODE_80211A 1 +#define CAP_MODE_80211B 2 +#define CAP_MODE_80211G 4 +#define CAP_MODE_MASK 7 + +#define WORK_LINK_UP (1<<0) +#define WORK_LINK_DOWN (1<<1) +#define WORK_SET_MULTICAST_LIST (1<<2) + +#define RNDIS_WLAN_ALG_NONE 0 +#define RNDIS_WLAN_ALG_WEP (1<<0) +#define RNDIS_WLAN_ALG_TKIP (1<<1) +#define RNDIS_WLAN_ALG_CCMP (1<<2) + +#define RNDIS_WLAN_KEY_MGMT_NONE 0 +#define RNDIS_WLAN_KEY_MGMT_802_1X (1<<0) +#define RNDIS_WLAN_KEY_MGMT_PSK (1<<1) + +#define COMMAND_BUFFER_SIZE (CONTROL_BUFFER_SIZE + sizeof(struct rndis_set)) + +static const struct ieee80211_channel rndis_channels[] = { + { .center_freq = 2412 }, + { .center_freq = 2417 }, + { .center_freq = 2422 }, + { .center_freq = 2427 }, + { .center_freq = 2432 }, + { .center_freq = 2437 }, + { .center_freq = 2442 }, + { .center_freq = 2447 }, + { .center_freq = 2452 }, + { .center_freq = 2457 }, + { .center_freq = 2462 }, + { .center_freq = 2467 }, + { .center_freq = 2472 }, + { .center_freq = 2484 }, +}; + +static const struct ieee80211_rate rndis_rates[] = { + { .bitrate = 10 }, + { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 60 }, + { .bitrate = 90 }, + { .bitrate = 120 }, + { .bitrate = 180 }, + { .bitrate = 240 }, + { .bitrate = 360 }, + { .bitrate = 480 }, + { .bitrate = 540 } +}; + +static const u32 rndis_cipher_suites[] = { + WLAN_CIPHER_SUITE_WEP40, + WLAN_CIPHER_SUITE_WEP104, + WLAN_CIPHER_SUITE_TKIP, + WLAN_CIPHER_SUITE_CCMP, +}; + +struct rndis_wlan_encr_key { + int len; + u32 cipher; + u8 material[32]; + u8 bssid[ETH_ALEN]; + bool pairwise; + bool tx_key; +}; + +/* RNDIS device private data */ +struct rndis_wlan_private { + struct usbnet *usbdev; + + struct wireless_dev wdev; + + struct cfg80211_scan_request *scan_request; + + struct workqueue_struct *workqueue; + struct delayed_work dev_poller_work; + struct delayed_work scan_work; + struct work_struct work; + struct mutex command_lock; + unsigned long work_pending; + int last_qual; + + struct ieee80211_supported_band band; + struct ieee80211_channel channels[ARRAY_SIZE(rndis_channels)]; + struct ieee80211_rate rates[ARRAY_SIZE(rndis_rates)]; + u32 cipher_suites[ARRAY_SIZE(rndis_cipher_suites)]; + + int caps; + int multicast_size; + + /* module parameters */ + char param_country[4]; + int param_frameburst; + int param_afterburner; + int param_power_save; + int param_power_output; + int param_roamtrigger; + int param_roamdelta; + u32 param_workaround_interval; + + /* hardware state */ + bool radio_on; + int infra_mode; + bool connected; + u8 bssid[ETH_ALEN]; + struct ndis_80211_ssid essid; + __le32 current_command_oid; + + /* encryption stuff */ + int encr_tx_key_index; + struct rndis_wlan_encr_key encr_keys[4]; + enum nl80211_auth_type wpa_auth_type; + int wpa_version; + int wpa_keymgmt; + int wpa_ie_len; + u8 *wpa_ie; + int wpa_cipher_pair; + int wpa_cipher_group; + + u8 command_buffer[COMMAND_BUFFER_SIZE]; +}; + +/* + * cfg80211 ops + */ +static int rndis_change_virtual_intf(struct wiphy *wiphy, + struct net_device *dev, + enum nl80211_iftype type, u32 *flags, + struct vif_params *params); + +static int rndis_scan(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_scan_request *request); + +static int rndis_set_wiphy_params(struct wiphy *wiphy, u32 changed); + +static int rndis_set_tx_power(struct wiphy *wiphy, enum tx_power_setting type, + int dbm); +static int rndis_get_tx_power(struct wiphy *wiphy, int *dbm); + +static int rndis_connect(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_connect_params *sme); + +static int rndis_disconnect(struct wiphy *wiphy, struct net_device *dev, + u16 reason_code); + +static int rndis_join_ibss(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_ibss_params *params); + +static int rndis_leave_ibss(struct wiphy *wiphy, struct net_device *dev); + +static int rndis_set_channel(struct wiphy *wiphy, + struct ieee80211_channel *chan, enum nl80211_channel_type channel_type); + +static int rndis_add_key(struct wiphy *wiphy, struct net_device *netdev, + u8 key_index, const u8 *mac_addr, + struct key_params *params); + +static int rndis_del_key(struct wiphy *wiphy, struct net_device *netdev, + u8 key_index, const u8 *mac_addr); + +static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev, + u8 key_index); + +static int rndis_get_station(struct wiphy *wiphy, struct net_device *dev, + u8 *mac, struct station_info *sinfo); + +static int rndis_dump_station(struct wiphy *wiphy, struct net_device *dev, + int idx, u8 *mac, struct station_info *sinfo); + +static struct cfg80211_ops rndis_config_ops = { + .change_virtual_intf = rndis_change_virtual_intf, + .scan = rndis_scan, + .set_wiphy_params = rndis_set_wiphy_params, + .set_tx_power = rndis_set_tx_power, + .get_tx_power = rndis_get_tx_power, + .connect = rndis_connect, + .disconnect = rndis_disconnect, + .join_ibss = rndis_join_ibss, + .leave_ibss = rndis_leave_ibss, + .set_channel = rndis_set_channel, + .add_key = rndis_add_key, + .del_key = rndis_del_key, + .set_default_key = rndis_set_default_key, + .get_station = rndis_get_station, + .dump_station = rndis_dump_station, +}; + +static void *rndis_wiphy_privid = &rndis_wiphy_privid; + + +static struct rndis_wlan_private *get_rndis_wlan_priv(struct usbnet *dev) +{ + return (struct rndis_wlan_private *)dev->driver_priv; +} + +static u32 get_bcm4320_power_dbm(struct rndis_wlan_private *priv) +{ + switch (priv->param_power_output) { + default: + case 3: + return BCM4320_DEFAULT_TXPOWER_DBM_100; + case 2: + return BCM4320_DEFAULT_TXPOWER_DBM_75; + case 1: + return BCM4320_DEFAULT_TXPOWER_DBM_50; + case 0: + return BCM4320_DEFAULT_TXPOWER_DBM_25; + } +} + +static bool is_wpa_key(struct rndis_wlan_private *priv, int idx) +{ + int cipher = priv->encr_keys[idx].cipher; + + return (cipher == WLAN_CIPHER_SUITE_CCMP || + cipher == WLAN_CIPHER_SUITE_TKIP); +} + +static int rndis_cipher_to_alg(u32 cipher) +{ + switch (cipher) { + default: + return RNDIS_WLAN_ALG_NONE; + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + return RNDIS_WLAN_ALG_WEP; + case WLAN_CIPHER_SUITE_TKIP: + return RNDIS_WLAN_ALG_TKIP; + case WLAN_CIPHER_SUITE_CCMP: + return RNDIS_WLAN_ALG_CCMP; + } +} + +static int rndis_akm_suite_to_key_mgmt(u32 akm_suite) +{ + switch (akm_suite) { + default: + return RNDIS_WLAN_KEY_MGMT_NONE; + case WLAN_AKM_SUITE_8021X: + return RNDIS_WLAN_KEY_MGMT_802_1X; + case WLAN_AKM_SUITE_PSK: + return RNDIS_WLAN_KEY_MGMT_PSK; + } +} + +#ifdef DEBUG +static const char *oid_to_string(__le32 oid) +{ + switch (oid) { +#define OID_STR(oid) case oid: return(#oid) + /* from rndis_host.h */ + OID_STR(OID_802_3_PERMANENT_ADDRESS); + OID_STR(OID_GEN_MAXIMUM_FRAME_SIZE); + OID_STR(OID_GEN_CURRENT_PACKET_FILTER); + OID_STR(OID_GEN_PHYSICAL_MEDIUM); + + /* from rndis_wlan.c */ + OID_STR(OID_GEN_LINK_SPEED); + OID_STR(OID_GEN_RNDIS_CONFIG_PARAMETER); + + OID_STR(OID_GEN_XMIT_OK); + OID_STR(OID_GEN_RCV_OK); + OID_STR(OID_GEN_XMIT_ERROR); + OID_STR(OID_GEN_RCV_ERROR); + OID_STR(OID_GEN_RCV_NO_BUFFER); + + OID_STR(OID_802_3_CURRENT_ADDRESS); + OID_STR(OID_802_3_MULTICAST_LIST); + OID_STR(OID_802_3_MAXIMUM_LIST_SIZE); + + OID_STR(OID_802_11_BSSID); + OID_STR(OID_802_11_SSID); + OID_STR(OID_802_11_INFRASTRUCTURE_MODE); + OID_STR(OID_802_11_ADD_WEP); + OID_STR(OID_802_11_REMOVE_WEP); + OID_STR(OID_802_11_DISASSOCIATE); + OID_STR(OID_802_11_AUTHENTICATION_MODE); + OID_STR(OID_802_11_PRIVACY_FILTER); + OID_STR(OID_802_11_BSSID_LIST_SCAN); + OID_STR(OID_802_11_ENCRYPTION_STATUS); + OID_STR(OID_802_11_ADD_KEY); + OID_STR(OID_802_11_REMOVE_KEY); + OID_STR(OID_802_11_ASSOCIATION_INFORMATION); + OID_STR(OID_802_11_PMKID); + OID_STR(OID_802_11_NETWORK_TYPES_SUPPORTED); + OID_STR(OID_802_11_NETWORK_TYPE_IN_USE); + OID_STR(OID_802_11_TX_POWER_LEVEL); + OID_STR(OID_802_11_RSSI); + OID_STR(OID_802_11_RSSI_TRIGGER); + OID_STR(OID_802_11_FRAGMENTATION_THRESHOLD); + OID_STR(OID_802_11_RTS_THRESHOLD); + OID_STR(OID_802_11_SUPPORTED_RATES); + OID_STR(OID_802_11_CONFIGURATION); + OID_STR(OID_802_11_BSSID_LIST); +#undef OID_STR + } + + return "?"; +} +#else +static const char *oid_to_string(__le32 oid) +{ + return "?"; +} +#endif + +/* translate error code */ +static int rndis_error_status(__le32 rndis_status) +{ + int ret = -EINVAL; + switch (rndis_status) { + case RNDIS_STATUS_SUCCESS: + ret = 0; + break; + case RNDIS_STATUS_FAILURE: + case RNDIS_STATUS_INVALID_DATA: + ret = -EINVAL; + break; + case RNDIS_STATUS_NOT_SUPPORTED: + ret = -EOPNOTSUPP; + break; + case RNDIS_STATUS_ADAPTER_NOT_READY: + case RNDIS_STATUS_ADAPTER_NOT_OPEN: + ret = -EBUSY; + break; + } + return ret; +} + +static int rndis_query_oid(struct usbnet *dev, __le32 oid, void *data, int *len) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(dev); + union { + void *buf; + struct rndis_msg_hdr *header; + struct rndis_query *get; + struct rndis_query_c *get_c; + } u; + int ret, buflen; + + buflen = *len + sizeof(*u.get); + if (buflen < CONTROL_BUFFER_SIZE) + buflen = CONTROL_BUFFER_SIZE; + + if (buflen > COMMAND_BUFFER_SIZE) { + u.buf = kmalloc(buflen, GFP_KERNEL); + if (!u.buf) + return -ENOMEM; + } else { + u.buf = priv->command_buffer; + } + + mutex_lock(&priv->command_lock); + + memset(u.get, 0, sizeof *u.get); + u.get->msg_type = RNDIS_MSG_QUERY; + u.get->msg_len = cpu_to_le32(sizeof *u.get); + u.get->oid = oid; + + priv->current_command_oid = oid; + ret = rndis_command(dev, u.header, buflen); + priv->current_command_oid = 0; + if (ret < 0) + netdev_dbg(dev->net, "%s(%s): rndis_command() failed, %d (%08x)\n", + __func__, oid_to_string(oid), ret, + le32_to_cpu(u.get_c->status)); + + if (ret == 0) { + memcpy(data, u.buf + le32_to_cpu(u.get_c->offset) + 8, *len); + + ret = le32_to_cpu(u.get_c->len); + if (ret > *len) + *len = ret; + + ret = rndis_error_status(u.get_c->status); + if (ret < 0) + netdev_dbg(dev->net, "%s(%s): device returned error, 0x%08x (%d)\n", + __func__, oid_to_string(oid), + le32_to_cpu(u.get_c->status), ret); + } + + mutex_unlock(&priv->command_lock); + + if (u.buf != priv->command_buffer) + kfree(u.buf); + return ret; +} + +static int rndis_set_oid(struct usbnet *dev, __le32 oid, void *data, int len) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(dev); + union { + void *buf; + struct rndis_msg_hdr *header; + struct rndis_set *set; + struct rndis_set_c *set_c; + } u; + int ret, buflen; + + buflen = len + sizeof(*u.set); + if (buflen < CONTROL_BUFFER_SIZE) + buflen = CONTROL_BUFFER_SIZE; + + if (buflen > COMMAND_BUFFER_SIZE) { + u.buf = kmalloc(buflen, GFP_KERNEL); + if (!u.buf) + return -ENOMEM; + } else { + u.buf = priv->command_buffer; + } + + mutex_lock(&priv->command_lock); + + memset(u.set, 0, sizeof *u.set); + u.set->msg_type = RNDIS_MSG_SET; + u.set->msg_len = cpu_to_le32(sizeof(*u.set) + len); + u.set->oid = oid; + u.set->len = cpu_to_le32(len); + u.set->offset = cpu_to_le32(sizeof(*u.set) - 8); + u.set->handle = cpu_to_le32(0); + memcpy(u.buf + sizeof(*u.set), data, len); + + priv->current_command_oid = oid; + ret = rndis_command(dev, u.header, buflen); + priv->current_command_oid = 0; + if (ret < 0) + netdev_dbg(dev->net, "%s(%s): rndis_command() failed, %d (%08x)\n", + __func__, oid_to_string(oid), ret, + le32_to_cpu(u.set_c->status)); + + if (ret == 0) { + ret = rndis_error_status(u.set_c->status); + + if (ret < 0) + netdev_dbg(dev->net, "%s(%s): device returned error, 0x%08x (%d)\n", + __func__, oid_to_string(oid), + le32_to_cpu(u.set_c->status), ret); + } + + mutex_unlock(&priv->command_lock); + + if (u.buf != priv->command_buffer) + kfree(u.buf); + return ret; +} + +static int rndis_reset(struct usbnet *usbdev) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + struct rndis_reset *reset; + int ret; + + mutex_lock(&priv->command_lock); + + reset = (void *)priv->command_buffer; + memset(reset, 0, sizeof(*reset)); + reset->msg_type = RNDIS_MSG_RESET; + reset->msg_len = cpu_to_le32(sizeof(*reset)); + priv->current_command_oid = 0; + ret = rndis_command(usbdev, (void *)reset, CONTROL_BUFFER_SIZE); + + mutex_unlock(&priv->command_lock); + + if (ret < 0) + return ret; + return 0; +} + +/* + * Specs say that we can only set config parameters only soon after device + * initialization. + * value_type: 0 = u32, 2 = unicode string + */ +static int rndis_set_config_parameter(struct usbnet *dev, char *param, + int value_type, void *value) +{ + struct ndis_config_param *infobuf; + int value_len, info_len, param_len, ret, i; + __le16 *unibuf; + __le32 *dst_value; + + if (value_type == 0) + value_len = sizeof(__le32); + else if (value_type == 2) + value_len = strlen(value) * sizeof(__le16); + else + return -EINVAL; + + param_len = strlen(param) * sizeof(__le16); + info_len = sizeof(*infobuf) + param_len + value_len; + +#ifdef DEBUG + info_len += 12; +#endif + infobuf = kmalloc(info_len, GFP_KERNEL); + if (!infobuf) + return -ENOMEM; + +#ifdef DEBUG + info_len -= 12; + /* extra 12 bytes are for padding (debug output) */ + memset(infobuf, 0xCC, info_len + 12); +#endif + + if (value_type == 2) + netdev_dbg(dev->net, "setting config parameter: %s, value: %s\n", + param, (u8 *)value); + else + netdev_dbg(dev->net, "setting config parameter: %s, value: %d\n", + param, *(u32 *)value); + + infobuf->name_offs = cpu_to_le32(sizeof(*infobuf)); + infobuf->name_length = cpu_to_le32(param_len); + infobuf->type = cpu_to_le32(value_type); + infobuf->value_offs = cpu_to_le32(sizeof(*infobuf) + param_len); + infobuf->value_length = cpu_to_le32(value_len); + + /* simple string to unicode string conversion */ + unibuf = (void *)infobuf + sizeof(*infobuf); + for (i = 0; i < param_len / sizeof(__le16); i++) + unibuf[i] = cpu_to_le16(param[i]); + + if (value_type == 2) { + unibuf = (void *)infobuf + sizeof(*infobuf) + param_len; + for (i = 0; i < value_len / sizeof(__le16); i++) + unibuf[i] = cpu_to_le16(((u8 *)value)[i]); + } else { + dst_value = (void *)infobuf + sizeof(*infobuf) + param_len; + *dst_value = cpu_to_le32(*(u32 *)value); + } + +#ifdef DEBUG + netdev_dbg(dev->net, "info buffer (len: %d)\n", info_len); + for (i = 0; i < info_len; i += 12) { + u32 *tmp = (u32 *)((u8 *)infobuf + i); + netdev_dbg(dev->net, "%08X:%08X:%08X\n", + cpu_to_be32(tmp[0]), + cpu_to_be32(tmp[1]), + cpu_to_be32(tmp[2])); + } +#endif + + ret = rndis_set_oid(dev, OID_GEN_RNDIS_CONFIG_PARAMETER, + infobuf, info_len); + if (ret != 0) + netdev_dbg(dev->net, "setting rndis config parameter failed, %d\n", + ret); + + kfree(infobuf); + return ret; +} + +static int rndis_set_config_parameter_str(struct usbnet *dev, + char *param, char *value) +{ + return rndis_set_config_parameter(dev, param, 2, value); +} + +/* + * data conversion functions + */ +static int level_to_qual(int level) +{ + int qual = 100 * (level - WL_NOISE) / (WL_SIGMAX - WL_NOISE); + return qual >= 0 ? (qual <= 100 ? qual : 100) : 0; +} + +/* + * common functions + */ +static int set_infra_mode(struct usbnet *usbdev, int mode); +static void restore_keys(struct usbnet *usbdev); +static int rndis_check_bssid_list(struct usbnet *usbdev); + +static int set_essid(struct usbnet *usbdev, struct ndis_80211_ssid *ssid) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + int ret; + + ret = rndis_set_oid(usbdev, OID_802_11_SSID, ssid, sizeof(*ssid)); + if (ret < 0) { + netdev_warn(usbdev->net, "setting SSID failed (%08X)\n", ret); + return ret; + } + if (ret == 0) { + memcpy(&priv->essid, ssid, sizeof(priv->essid)); + priv->radio_on = true; + netdev_dbg(usbdev->net, "%s(): radio_on = true\n", __func__); + } + + return ret; +} + +static int set_bssid(struct usbnet *usbdev, u8 bssid[ETH_ALEN]) +{ + int ret; + + ret = rndis_set_oid(usbdev, OID_802_11_BSSID, bssid, ETH_ALEN); + if (ret < 0) { + netdev_warn(usbdev->net, "setting BSSID[%pM] failed (%08X)\n", + bssid, ret); + return ret; + } + + return ret; +} + +static int clear_bssid(struct usbnet *usbdev) +{ + u8 broadcast_mac[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + + return set_bssid(usbdev, broadcast_mac); +} + +static int get_bssid(struct usbnet *usbdev, u8 bssid[ETH_ALEN]) +{ + int ret, len; + + len = ETH_ALEN; + ret = rndis_query_oid(usbdev, OID_802_11_BSSID, bssid, &len); + + if (ret != 0) + memset(bssid, 0, ETH_ALEN); + + return ret; +} + +static int get_association_info(struct usbnet *usbdev, + struct ndis_80211_assoc_info *info, int len) +{ + return rndis_query_oid(usbdev, OID_802_11_ASSOCIATION_INFORMATION, + info, &len); +} + +static bool is_associated(struct usbnet *usbdev) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + u8 bssid[ETH_ALEN]; + int ret; + + if (!priv->radio_on) + return false; + + ret = get_bssid(usbdev, bssid); + + return (ret == 0 && !is_zero_ether_addr(bssid)); +} + +static int disassociate(struct usbnet *usbdev, bool reset_ssid) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + struct ndis_80211_ssid ssid; + int i, ret = 0; + + if (priv->radio_on) { + ret = rndis_set_oid(usbdev, OID_802_11_DISASSOCIATE, NULL, 0); + if (ret == 0) { + priv->radio_on = false; + netdev_dbg(usbdev->net, "%s(): radio_on = false\n", + __func__); + + if (reset_ssid) + msleep(100); + } + } + + /* disassociate causes radio to be turned off; if reset_ssid + * is given, set random ssid to enable radio */ + if (reset_ssid) { + /* Set device to infrastructure mode so we don't get ad-hoc + * 'media connect' indications with the random ssid. + */ + set_infra_mode(usbdev, NDIS_80211_INFRA_INFRA); + + ssid.length = cpu_to_le32(sizeof(ssid.essid)); + get_random_bytes(&ssid.essid[2], sizeof(ssid.essid)-2); + ssid.essid[0] = 0x1; + ssid.essid[1] = 0xff; + for (i = 2; i < sizeof(ssid.essid); i++) + ssid.essid[i] = 0x1 + (ssid.essid[i] * 0xfe / 0xff); + ret = set_essid(usbdev, &ssid); + } + return ret; +} + +static int set_auth_mode(struct usbnet *usbdev, u32 wpa_version, + enum nl80211_auth_type auth_type, int keymgmt) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + __le32 tmp; + int auth_mode, ret; + + netdev_dbg(usbdev->net, "%s(): wpa_version=0x%x authalg=0x%x keymgmt=0x%x\n", + __func__, wpa_version, auth_type, keymgmt); + + if (wpa_version & NL80211_WPA_VERSION_2) { + if (keymgmt & RNDIS_WLAN_KEY_MGMT_802_1X) + auth_mode = NDIS_80211_AUTH_WPA2; + else + auth_mode = NDIS_80211_AUTH_WPA2_PSK; + } else if (wpa_version & NL80211_WPA_VERSION_1) { + if (keymgmt & RNDIS_WLAN_KEY_MGMT_802_1X) + auth_mode = NDIS_80211_AUTH_WPA; + else if (keymgmt & RNDIS_WLAN_KEY_MGMT_PSK) + auth_mode = NDIS_80211_AUTH_WPA_PSK; + else + auth_mode = NDIS_80211_AUTH_WPA_NONE; + } else if (auth_type == NL80211_AUTHTYPE_SHARED_KEY) + auth_mode = NDIS_80211_AUTH_SHARED; + else if (auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM) + auth_mode = NDIS_80211_AUTH_OPEN; + else if (auth_type == NL80211_AUTHTYPE_AUTOMATIC) + auth_mode = NDIS_80211_AUTH_AUTO_SWITCH; + else + return -ENOTSUPP; + + tmp = cpu_to_le32(auth_mode); + ret = rndis_set_oid(usbdev, OID_802_11_AUTHENTICATION_MODE, &tmp, + sizeof(tmp)); + if (ret != 0) { + netdev_warn(usbdev->net, "setting auth mode failed (%08X)\n", + ret); + return ret; + } + + priv->wpa_version = wpa_version; + priv->wpa_auth_type = auth_type; + priv->wpa_keymgmt = keymgmt; + + return 0; +} + +static int set_priv_filter(struct usbnet *usbdev) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + __le32 tmp; + + netdev_dbg(usbdev->net, "%s(): wpa_version=0x%x\n", + __func__, priv->wpa_version); + + if (priv->wpa_version & NL80211_WPA_VERSION_2 || + priv->wpa_version & NL80211_WPA_VERSION_1) + tmp = cpu_to_le32(NDIS_80211_PRIV_8021X_WEP); + else + tmp = cpu_to_le32(NDIS_80211_PRIV_ACCEPT_ALL); + + return rndis_set_oid(usbdev, OID_802_11_PRIVACY_FILTER, &tmp, + sizeof(tmp)); +} + +static int set_encr_mode(struct usbnet *usbdev, int pairwise, int groupwise) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + __le32 tmp; + int encr_mode, ret; + + netdev_dbg(usbdev->net, "%s(): cipher_pair=0x%x cipher_group=0x%x\n", + __func__, pairwise, groupwise); + + if (pairwise & RNDIS_WLAN_ALG_CCMP) + encr_mode = NDIS_80211_ENCR_CCMP_ENABLED; + else if (pairwise & RNDIS_WLAN_ALG_TKIP) + encr_mode = NDIS_80211_ENCR_TKIP_ENABLED; + else if (pairwise & RNDIS_WLAN_ALG_WEP) + encr_mode = NDIS_80211_ENCR_WEP_ENABLED; + else if (groupwise & RNDIS_WLAN_ALG_CCMP) + encr_mode = NDIS_80211_ENCR_CCMP_ENABLED; + else if (groupwise & RNDIS_WLAN_ALG_TKIP) + encr_mode = NDIS_80211_ENCR_TKIP_ENABLED; + else + encr_mode = NDIS_80211_ENCR_DISABLED; + + tmp = cpu_to_le32(encr_mode); + ret = rndis_set_oid(usbdev, OID_802_11_ENCRYPTION_STATUS, &tmp, + sizeof(tmp)); + if (ret != 0) { + netdev_warn(usbdev->net, "setting encr mode failed (%08X)\n", + ret); + return ret; + } + + priv->wpa_cipher_pair = pairwise; + priv->wpa_cipher_group = groupwise; + return 0; +} + +static int set_infra_mode(struct usbnet *usbdev, int mode) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + __le32 tmp; + int ret; + + netdev_dbg(usbdev->net, "%s(): infra_mode=0x%x\n", + __func__, priv->infra_mode); + + tmp = cpu_to_le32(mode); + ret = rndis_set_oid(usbdev, OID_802_11_INFRASTRUCTURE_MODE, &tmp, + sizeof(tmp)); + if (ret != 0) { + netdev_warn(usbdev->net, "setting infra mode failed (%08X)\n", + ret); + return ret; + } + + /* NDIS drivers clear keys when infrastructure mode is + * changed. But Linux tools assume otherwise. So set the + * keys */ + restore_keys(usbdev); + + priv->infra_mode = mode; + return 0; +} + +static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold) +{ + __le32 tmp; + + netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold); + + if (rts_threshold < 0 || rts_threshold > 2347) + rts_threshold = 2347; + + tmp = cpu_to_le32(rts_threshold); + return rndis_set_oid(usbdev, OID_802_11_RTS_THRESHOLD, &tmp, + sizeof(tmp)); +} + +static int set_frag_threshold(struct usbnet *usbdev, u32 frag_threshold) +{ + __le32 tmp; + + netdev_dbg(usbdev->net, "%s(): %i\n", __func__, frag_threshold); + + if (frag_threshold < 256 || frag_threshold > 2346) + frag_threshold = 2346; + + tmp = cpu_to_le32(frag_threshold); + return rndis_set_oid(usbdev, OID_802_11_FRAGMENTATION_THRESHOLD, &tmp, + sizeof(tmp)); +} + +static void set_default_iw_params(struct usbnet *usbdev) +{ + set_infra_mode(usbdev, NDIS_80211_INFRA_INFRA); + set_auth_mode(usbdev, 0, NL80211_AUTHTYPE_OPEN_SYSTEM, + RNDIS_WLAN_KEY_MGMT_NONE); + set_priv_filter(usbdev); + set_encr_mode(usbdev, RNDIS_WLAN_ALG_NONE, RNDIS_WLAN_ALG_NONE); +} + +static int deauthenticate(struct usbnet *usbdev) +{ + int ret; + + ret = disassociate(usbdev, true); + set_default_iw_params(usbdev); + return ret; +} + +static int set_channel(struct usbnet *usbdev, int channel) +{ + struct ndis_80211_conf config; + unsigned int dsconfig; + int len, ret; + + netdev_dbg(usbdev->net, "%s(%d)\n", __func__, channel); + + /* this OID is valid only when not associated */ + if (is_associated(usbdev)) + return 0; + + dsconfig = ieee80211_dsss_chan_to_freq(channel) * 1000; + + len = sizeof(config); + ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len); + if (ret < 0) { + netdev_dbg(usbdev->net, "%s(): querying configuration failed\n", + __func__); + return ret; + } + + config.ds_config = cpu_to_le32(dsconfig); + ret = rndis_set_oid(usbdev, OID_802_11_CONFIGURATION, &config, + sizeof(config)); + + netdev_dbg(usbdev->net, "%s(): %d -> %d\n", __func__, channel, ret); + + return ret; +} + +/* index must be 0 - N, as per NDIS */ +static int add_wep_key(struct usbnet *usbdev, const u8 *key, int key_len, + int index) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + struct ndis_80211_wep_key ndis_key; + u32 cipher; + int ret; + + netdev_dbg(usbdev->net, "%s(idx: %d, len: %d)\n", + __func__, index, key_len); + + if ((key_len != 5 && key_len != 13) || index < 0 || index > 3) + return -EINVAL; + + if (key_len == 5) + cipher = WLAN_CIPHER_SUITE_WEP40; + else + cipher = WLAN_CIPHER_SUITE_WEP104; + + memset(&ndis_key, 0, sizeof(ndis_key)); + + ndis_key.size = cpu_to_le32(sizeof(ndis_key)); + ndis_key.length = cpu_to_le32(key_len); + ndis_key.index = cpu_to_le32(index); + memcpy(&ndis_key.material, key, key_len); + + if (index == priv->encr_tx_key_index) { + ndis_key.index |= NDIS_80211_ADDWEP_TRANSMIT_KEY; + ret = set_encr_mode(usbdev, RNDIS_WLAN_ALG_WEP, + RNDIS_WLAN_ALG_NONE); + if (ret) + netdev_warn(usbdev->net, "encryption couldn't be enabled (%08X)\n", + ret); + } + + ret = rndis_set_oid(usbdev, OID_802_11_ADD_WEP, &ndis_key, + sizeof(ndis_key)); + if (ret != 0) { + netdev_warn(usbdev->net, "adding encryption key %d failed (%08X)\n", + index + 1, ret); + return ret; + } + + priv->encr_keys[index].len = key_len; + priv->encr_keys[index].cipher = cipher; + memcpy(&priv->encr_keys[index].material, key, key_len); + memset(&priv->encr_keys[index].bssid, 0xff, ETH_ALEN); + + return 0; +} + +static int add_wpa_key(struct usbnet *usbdev, const u8 *key, int key_len, + int index, const u8 *addr, const u8 *rx_seq, + int seq_len, u32 cipher, __le32 flags) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + struct ndis_80211_key ndis_key; + bool is_addr_ok; + int ret; + + if (index < 0 || index >= 4) { + netdev_dbg(usbdev->net, "%s(): index out of range (%i)\n", + __func__, index); + return -EINVAL; + } + if (key_len > sizeof(ndis_key.material) || key_len < 0) { + netdev_dbg(usbdev->net, "%s(): key length out of range (%i)\n", + __func__, key_len); + return -EINVAL; + } + if (flags & NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ) { + if (!rx_seq || seq_len <= 0) { + netdev_dbg(usbdev->net, "%s(): recv seq flag without buffer\n", + __func__); + return -EINVAL; + } + if (rx_seq && seq_len > sizeof(ndis_key.rsc)) { + netdev_dbg(usbdev->net, "%s(): too big recv seq buffer\n", __func__); + return -EINVAL; + } + } + + is_addr_ok = addr && !is_zero_ether_addr(addr) && + !is_broadcast_ether_addr(addr); + if ((flags & NDIS_80211_ADDKEY_PAIRWISE_KEY) && !is_addr_ok) { + netdev_dbg(usbdev->net, "%s(): pairwise but bssid invalid (%pM)\n", + __func__, addr); + return -EINVAL; + } + + netdev_dbg(usbdev->net, "%s(%i): flags:%i%i%i\n", + __func__, index, + !!(flags & NDIS_80211_ADDKEY_TRANSMIT_KEY), + !!(flags & NDIS_80211_ADDKEY_PAIRWISE_KEY), + !!(flags & NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ)); + + memset(&ndis_key, 0, sizeof(ndis_key)); + + ndis_key.size = cpu_to_le32(sizeof(ndis_key) - + sizeof(ndis_key.material) + key_len); + ndis_key.length = cpu_to_le32(key_len); + ndis_key.index = cpu_to_le32(index) | flags; + + if (cipher == WLAN_CIPHER_SUITE_TKIP && key_len == 32) { + /* wpa_supplicant gives us the Michael MIC RX/TX keys in + * different order than NDIS spec, so swap the order here. */ + memcpy(ndis_key.material, key, 16); + memcpy(ndis_key.material + 16, key + 24, 8); + memcpy(ndis_key.material + 24, key + 16, 8); + } else + memcpy(ndis_key.material, key, key_len); + + if (flags & NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ) + memcpy(ndis_key.rsc, rx_seq, seq_len); + + if (flags & NDIS_80211_ADDKEY_PAIRWISE_KEY) { + /* pairwise key */ + memcpy(ndis_key.bssid, addr, ETH_ALEN); + } else { + /* group key */ + if (priv->infra_mode == NDIS_80211_INFRA_ADHOC) + memset(ndis_key.bssid, 0xff, ETH_ALEN); + else + get_bssid(usbdev, ndis_key.bssid); + } + + ret = rndis_set_oid(usbdev, OID_802_11_ADD_KEY, &ndis_key, + le32_to_cpu(ndis_key.size)); + netdev_dbg(usbdev->net, "%s(): OID_802_11_ADD_KEY -> %08X\n", + __func__, ret); + if (ret != 0) + return ret; + + memset(&priv->encr_keys[index], 0, sizeof(priv->encr_keys[index])); + priv->encr_keys[index].len = key_len; + priv->encr_keys[index].cipher = cipher; + memcpy(&priv->encr_keys[index].material, key, key_len); + if (flags & NDIS_80211_ADDKEY_PAIRWISE_KEY) + memcpy(&priv->encr_keys[index].bssid, ndis_key.bssid, ETH_ALEN); + else + memset(&priv->encr_keys[index].bssid, 0xff, ETH_ALEN); + + if (flags & NDIS_80211_ADDKEY_TRANSMIT_KEY) + priv->encr_tx_key_index = index; + + return 0; +} + +static int restore_key(struct usbnet *usbdev, int key_idx) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + struct rndis_wlan_encr_key key; + + if (is_wpa_key(priv, key_idx)) + return 0; + + key = priv->encr_keys[key_idx]; + + netdev_dbg(usbdev->net, "%s(): %i:%i\n", __func__, key_idx, key.len); + + if (key.len == 0) + return 0; + + return add_wep_key(usbdev, key.material, key.len, key_idx); +} + +static void restore_keys(struct usbnet *usbdev) +{ + int i; + + for (i = 0; i < 4; i++) + restore_key(usbdev, i); +} + +static void clear_key(struct rndis_wlan_private *priv, int idx) +{ + memset(&priv->encr_keys[idx], 0, sizeof(priv->encr_keys[idx])); +} + +/* remove_key is for both wep and wpa */ +static int remove_key(struct usbnet *usbdev, int index, const u8 *bssid) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + struct ndis_80211_remove_key remove_key; + __le32 keyindex; + bool is_wpa; + int ret; + + if (priv->encr_keys[index].len == 0) + return 0; + + is_wpa = is_wpa_key(priv, index); + + netdev_dbg(usbdev->net, "%s(): %i:%s:%i\n", + __func__, index, is_wpa ? "wpa" : "wep", + priv->encr_keys[index].len); + + clear_key(priv, index); + + if (is_wpa) { + remove_key.size = cpu_to_le32(sizeof(remove_key)); + remove_key.index = cpu_to_le32(index); + if (bssid) { + /* pairwise key */ + if (!is_broadcast_ether_addr(bssid)) + remove_key.index |= + NDIS_80211_ADDKEY_PAIRWISE_KEY; + memcpy(remove_key.bssid, bssid, + sizeof(remove_key.bssid)); + } else + memset(remove_key.bssid, 0xff, + sizeof(remove_key.bssid)); + + ret = rndis_set_oid(usbdev, OID_802_11_REMOVE_KEY, &remove_key, + sizeof(remove_key)); + if (ret != 0) + return ret; + } else { + keyindex = cpu_to_le32(index); + ret = rndis_set_oid(usbdev, OID_802_11_REMOVE_WEP, &keyindex, + sizeof(keyindex)); + if (ret != 0) { + netdev_warn(usbdev->net, + "removing encryption key %d failed (%08X)\n", + index, ret); + return ret; + } + } + + /* if it is transmit key, disable encryption */ + if (index == priv->encr_tx_key_index) + set_encr_mode(usbdev, RNDIS_WLAN_ALG_NONE, RNDIS_WLAN_ALG_NONE); + + return 0; +} + +static void set_multicast_list(struct usbnet *usbdev) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + struct dev_mc_list *mclist; + __le32 filter, basefilter; + int ret; + char *mc_addrs = NULL; + int mc_count; + + basefilter = filter = RNDIS_PACKET_TYPE_DIRECTED | + RNDIS_PACKET_TYPE_BROADCAST; + + if (usbdev->net->flags & IFF_PROMISC) { + filter |= RNDIS_PACKET_TYPE_PROMISCUOUS | + RNDIS_PACKET_TYPE_ALL_LOCAL; + } else if (usbdev->net->flags & IFF_ALLMULTI) { + filter |= RNDIS_PACKET_TYPE_ALL_MULTICAST; + } + + if (filter != basefilter) + goto set_filter; + + /* + * mc_list should be accessed holding the lock, so copy addresses to + * local buffer first. + */ + netif_addr_lock_bh(usbdev->net); + mc_count = netdev_mc_count(usbdev->net); + if (mc_count > priv->multicast_size) { + filter |= RNDIS_PACKET_TYPE_ALL_MULTICAST; + } else if (mc_count) { + int i = 0; + + mc_addrs = kmalloc(mc_count * ETH_ALEN, GFP_ATOMIC); + if (!mc_addrs) { + netdev_warn(usbdev->net, + "couldn't alloc %d bytes of memory\n", + mc_count * ETH_ALEN); + netif_addr_unlock_bh(usbdev->net); + return; + } + + netdev_for_each_mc_addr(mclist, usbdev->net) + memcpy(mc_addrs + i++ * ETH_ALEN, + mclist->dmi_addr, ETH_ALEN); + } + netif_addr_unlock_bh(usbdev->net); + + if (filter != basefilter) + goto set_filter; + + if (mc_count) { + ret = rndis_set_oid(usbdev, OID_802_3_MULTICAST_LIST, mc_addrs, + mc_count * ETH_ALEN); + kfree(mc_addrs); + if (ret == 0) + filter |= RNDIS_PACKET_TYPE_MULTICAST; + else + filter |= RNDIS_PACKET_TYPE_ALL_MULTICAST; + + netdev_dbg(usbdev->net, "OID_802_3_MULTICAST_LIST(%d, max: %d) -> %d\n", + mc_count, priv->multicast_size, ret); + } + +set_filter: + ret = rndis_set_oid(usbdev, OID_GEN_CURRENT_PACKET_FILTER, &filter, + sizeof(filter)); + if (ret < 0) { + netdev_warn(usbdev->net, "couldn't set packet filter: %08x\n", + le32_to_cpu(filter)); + } + + netdev_dbg(usbdev->net, "OID_GEN_CURRENT_PACKET_FILTER(%08x) -> %d\n", + le32_to_cpu(filter), ret); +} + +/* + * cfg80211 ops + */ +static int rndis_change_virtual_intf(struct wiphy *wiphy, + struct net_device *dev, + enum nl80211_iftype type, u32 *flags, + struct vif_params *params) +{ + struct rndis_wlan_private *priv = wiphy_priv(wiphy); + struct usbnet *usbdev = priv->usbdev; + int mode; + + switch (type) { + case NL80211_IFTYPE_ADHOC: + mode = NDIS_80211_INFRA_ADHOC; + break; + case NL80211_IFTYPE_STATION: + mode = NDIS_80211_INFRA_INFRA; + break; + default: + return -EINVAL; + } + + priv->wdev.iftype = type; + + return set_infra_mode(usbdev, mode); +} + +static int rndis_set_wiphy_params(struct wiphy *wiphy, u32 changed) +{ + struct rndis_wlan_private *priv = wiphy_priv(wiphy); + struct usbnet *usbdev = priv->usbdev; + int err; + + if (changed & WIPHY_PARAM_FRAG_THRESHOLD) { + err = set_frag_threshold(usbdev, wiphy->frag_threshold); + if (err < 0) + return err; + } + + if (changed & WIPHY_PARAM_RTS_THRESHOLD) { + err = set_rts_threshold(usbdev, wiphy->rts_threshold); + if (err < 0) + return err; + } + + return 0; +} + +static int rndis_set_tx_power(struct wiphy *wiphy, enum tx_power_setting type, + int dbm) +{ + struct rndis_wlan_private *priv = wiphy_priv(wiphy); + struct usbnet *usbdev = priv->usbdev; + + netdev_dbg(usbdev->net, "%s(): type:0x%x dbm:%i\n", + __func__, type, dbm); + + /* Device doesn't support changing txpower after initialization, only + * turn off/on radio. Support 'auto' mode and setting same dBm that is + * currently used. + */ + if (type == TX_POWER_AUTOMATIC || dbm == get_bcm4320_power_dbm(priv)) { + if (!priv->radio_on) + disassociate(usbdev, true); /* turn on radio */ + + return 0; + } + + return -ENOTSUPP; +} + +static int rndis_get_tx_power(struct wiphy *wiphy, int *dbm) +{ + struct rndis_wlan_private *priv = wiphy_priv(wiphy); + struct usbnet *usbdev = priv->usbdev; + + *dbm = get_bcm4320_power_dbm(priv); + + netdev_dbg(usbdev->net, "%s(): dbm:%i\n", __func__, *dbm); + + return 0; +} + +#define SCAN_DELAY_JIFFIES (6 * HZ) +static int rndis_scan(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_scan_request *request) +{ + struct usbnet *usbdev = netdev_priv(dev); + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + int ret; + __le32 tmp; + + netdev_dbg(usbdev->net, "cfg80211.scan\n"); + + /* Get current bssid list from device before new scan, as new scan + * clears internal bssid list. + */ + rndis_check_bssid_list(usbdev); + + if (!request) + return -EINVAL; + + if (priv->scan_request && priv->scan_request != request) + return -EBUSY; + + priv->scan_request = request; + + tmp = cpu_to_le32(1); + ret = rndis_set_oid(usbdev, OID_802_11_BSSID_LIST_SCAN, &tmp, + sizeof(tmp)); + if (ret == 0) { + /* Wait before retrieving scan results from device */ + queue_delayed_work(priv->workqueue, &priv->scan_work, + SCAN_DELAY_JIFFIES); + } + + return ret; +} + +static struct cfg80211_bss *rndis_bss_info_update(struct usbnet *usbdev, + struct ndis_80211_bssid_ex *bssid) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + struct ieee80211_channel *channel; + s32 signal; + u64 timestamp; + u16 capability; + u16 beacon_interval; + struct ndis_80211_fixed_ies *fixed; + int ie_len, bssid_len; + u8 *ie; + + netdev_dbg(usbdev->net, " found bssid: '%.32s' [%pM]\n", + bssid->ssid.essid, bssid->mac); + + /* parse bssid structure */ + bssid_len = le32_to_cpu(bssid->length); + + if (bssid_len < sizeof(struct ndis_80211_bssid_ex) + + sizeof(struct ndis_80211_fixed_ies)) + return NULL; + + fixed = (struct ndis_80211_fixed_ies *)bssid->ies; + + ie = (void *)(bssid->ies + sizeof(struct ndis_80211_fixed_ies)); + ie_len = min(bssid_len - (int)sizeof(*bssid), + (int)le32_to_cpu(bssid->ie_length)); + ie_len -= sizeof(struct ndis_80211_fixed_ies); + if (ie_len < 0) + return NULL; + + /* extract data for cfg80211_inform_bss */ + channel = ieee80211_get_channel(priv->wdev.wiphy, + KHZ_TO_MHZ(le32_to_cpu(bssid->config.ds_config))); + if (!channel) + return NULL; + + signal = level_to_qual(le32_to_cpu(bssid->rssi)); + timestamp = le64_to_cpu(*(__le64 *)fixed->timestamp); + capability = le16_to_cpu(fixed->capabilities); + beacon_interval = le16_to_cpu(fixed->beacon_interval); + + return cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid->mac, + timestamp, capability, beacon_interval, ie, ie_len, signal, + GFP_KERNEL); +} + +static int rndis_check_bssid_list(struct usbnet *usbdev) +{ + void *buf = NULL; + struct ndis_80211_bssid_list_ex *bssid_list; + struct ndis_80211_bssid_ex *bssid; + int ret = -EINVAL, len, count, bssid_len; + bool resized = false; + + netdev_dbg(usbdev->net, "check_bssid_list\n"); + + len = CONTROL_BUFFER_SIZE; +resize_buf: + buf = kmalloc(len, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto out; + } + + ret = rndis_query_oid(usbdev, OID_802_11_BSSID_LIST, buf, &len); + if (ret != 0) + goto out; + + if (!resized && len > CONTROL_BUFFER_SIZE) { + resized = true; + kfree(buf); + goto resize_buf; + } + + bssid_list = buf; + bssid = bssid_list->bssid; + bssid_len = le32_to_cpu(bssid->length); + count = le32_to_cpu(bssid_list->num_items); + netdev_dbg(usbdev->net, "check_bssid_list: %d BSSIDs found (buflen: %d)\n", + count, len); + + while (count && ((void *)bssid + bssid_len) <= (buf + len)) { + rndis_bss_info_update(usbdev, bssid); + + bssid = (void *)bssid + bssid_len; + bssid_len = le32_to_cpu(bssid->length); + count--; + } + +out: + kfree(buf); + return ret; +} + +static void rndis_get_scan_results(struct work_struct *work) +{ + struct rndis_wlan_private *priv = + container_of(work, struct rndis_wlan_private, scan_work.work); + struct usbnet *usbdev = priv->usbdev; + int ret; + + netdev_dbg(usbdev->net, "get_scan_results\n"); + + if (!priv->scan_request) + return; + + ret = rndis_check_bssid_list(usbdev); + + cfg80211_scan_done(priv->scan_request, ret < 0); + + priv->scan_request = NULL; +} + +static int rndis_connect(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_connect_params *sme) +{ + struct rndis_wlan_private *priv = wiphy_priv(wiphy); + struct usbnet *usbdev = priv->usbdev; + struct ieee80211_channel *channel = sme->channel; + struct ndis_80211_ssid ssid; + int pairwise = RNDIS_WLAN_ALG_NONE; + int groupwise = RNDIS_WLAN_ALG_NONE; + int keymgmt = RNDIS_WLAN_KEY_MGMT_NONE; + int length, i, ret, chan = -1; + + if (channel) + chan = ieee80211_frequency_to_channel(channel->center_freq); + + groupwise = rndis_cipher_to_alg(sme->crypto.cipher_group); + for (i = 0; i < sme->crypto.n_ciphers_pairwise; i++) + pairwise |= + rndis_cipher_to_alg(sme->crypto.ciphers_pairwise[i]); + + if (sme->crypto.n_ciphers_pairwise > 0 && + pairwise == RNDIS_WLAN_ALG_NONE) { + netdev_err(usbdev->net, "Unsupported pairwise cipher\n"); + return -ENOTSUPP; + } + + for (i = 0; i < sme->crypto.n_akm_suites; i++) + keymgmt |= + rndis_akm_suite_to_key_mgmt(sme->crypto.akm_suites[i]); + + if (sme->crypto.n_akm_suites > 0 && + keymgmt == RNDIS_WLAN_KEY_MGMT_NONE) { + netdev_err(usbdev->net, "Invalid keymgmt\n"); + return -ENOTSUPP; + } + + netdev_dbg(usbdev->net, "cfg80211.connect('%.32s':[%pM]:%d:[%d,0x%x:0x%x]:[0x%x:0x%x]:0x%x)\n", + sme->ssid, sme->bssid, chan, + sme->privacy, sme->crypto.wpa_versions, sme->auth_type, + groupwise, pairwise, keymgmt); + + if (is_associated(usbdev)) + disassociate(usbdev, false); + + ret = set_infra_mode(usbdev, NDIS_80211_INFRA_INFRA); + if (ret < 0) { + netdev_dbg(usbdev->net, "connect: set_infra_mode failed, %d\n", + ret); + goto err_turn_radio_on; + } + + ret = set_auth_mode(usbdev, sme->crypto.wpa_versions, sme->auth_type, + keymgmt); + if (ret < 0) { + netdev_dbg(usbdev->net, "connect: set_auth_mode failed, %d\n", + ret); + goto err_turn_radio_on; + } + + set_priv_filter(usbdev); + + ret = set_encr_mode(usbdev, pairwise, groupwise); + if (ret < 0) { + netdev_dbg(usbdev->net, "connect: set_encr_mode failed, %d\n", + ret); + goto err_turn_radio_on; + } + + if (channel) { + ret = set_channel(usbdev, chan); + if (ret < 0) { + netdev_dbg(usbdev->net, "connect: set_channel failed, %d\n", + ret); + goto err_turn_radio_on; + } + } + + if (sme->key && ((groupwise | pairwise) & RNDIS_WLAN_ALG_WEP)) { + priv->encr_tx_key_index = sme->key_idx; + ret = add_wep_key(usbdev, sme->key, sme->key_len, sme->key_idx); + if (ret < 0) { + netdev_dbg(usbdev->net, "connect: add_wep_key failed, %d (%d, %d)\n", + ret, sme->key_len, sme->key_idx); + goto err_turn_radio_on; + } + } + + if (sme->bssid && !is_zero_ether_addr(sme->bssid) && + !is_broadcast_ether_addr(sme->bssid)) { + ret = set_bssid(usbdev, sme->bssid); + if (ret < 0) { + netdev_dbg(usbdev->net, "connect: set_bssid failed, %d\n", + ret); + goto err_turn_radio_on; + } + } else + clear_bssid(usbdev); + + length = sme->ssid_len; + if (length > NDIS_802_11_LENGTH_SSID) + length = NDIS_802_11_LENGTH_SSID; + + memset(&ssid, 0, sizeof(ssid)); + ssid.length = cpu_to_le32(length); + memcpy(ssid.essid, sme->ssid, length); + + /* Pause and purge rx queue, so we don't pass packets before + * 'media connect'-indication. + */ + usbnet_pause_rx(usbdev); + usbnet_purge_paused_rxq(usbdev); + + ret = set_essid(usbdev, &ssid); + if (ret < 0) + netdev_dbg(usbdev->net, "connect: set_essid failed, %d\n", ret); + return ret; + +err_turn_radio_on: + disassociate(usbdev, true); + + return ret; +} + +static int rndis_disconnect(struct wiphy *wiphy, struct net_device *dev, + u16 reason_code) +{ + struct rndis_wlan_private *priv = wiphy_priv(wiphy); + struct usbnet *usbdev = priv->usbdev; + + netdev_dbg(usbdev->net, "cfg80211.disconnect(%d)\n", reason_code); + + priv->connected = false; + memset(priv->bssid, 0, ETH_ALEN); + + return deauthenticate(usbdev); +} + +static int rndis_join_ibss(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_ibss_params *params) +{ + struct rndis_wlan_private *priv = wiphy_priv(wiphy); + struct usbnet *usbdev = priv->usbdev; + struct ieee80211_channel *channel = params->channel; + struct ndis_80211_ssid ssid; + enum nl80211_auth_type auth_type; + int ret, alg, length, chan = -1; + + if (channel) + chan = ieee80211_frequency_to_channel(channel->center_freq); + + /* TODO: How to handle ad-hoc encryption? + * connect() has *key, join_ibss() doesn't. RNDIS requires key to be + * pre-shared for encryption (open/shared/wpa), is key set before + * join_ibss? Which auth_type to use (not in params)? What about WPA? + */ + if (params->privacy) { + auth_type = NL80211_AUTHTYPE_SHARED_KEY; + alg = RNDIS_WLAN_ALG_WEP; + } else { + auth_type = NL80211_AUTHTYPE_OPEN_SYSTEM; + alg = RNDIS_WLAN_ALG_NONE; + } + + netdev_dbg(usbdev->net, "cfg80211.join_ibss('%.32s':[%pM]:%d:%d)\n", + params->ssid, params->bssid, chan, params->privacy); + + if (is_associated(usbdev)) + disassociate(usbdev, false); + + ret = set_infra_mode(usbdev, NDIS_80211_INFRA_ADHOC); + if (ret < 0) { + netdev_dbg(usbdev->net, "join_ibss: set_infra_mode failed, %d\n", + ret); + goto err_turn_radio_on; + } + + ret = set_auth_mode(usbdev, 0, auth_type, RNDIS_WLAN_KEY_MGMT_NONE); + if (ret < 0) { + netdev_dbg(usbdev->net, "join_ibss: set_auth_mode failed, %d\n", + ret); + goto err_turn_radio_on; + } + + set_priv_filter(usbdev); + + ret = set_encr_mode(usbdev, alg, RNDIS_WLAN_ALG_NONE); + if (ret < 0) { + netdev_dbg(usbdev->net, "join_ibss: set_encr_mode failed, %d\n", + ret); + goto err_turn_radio_on; + } + + if (channel) { + ret = set_channel(usbdev, chan); + if (ret < 0) { + netdev_dbg(usbdev->net, "join_ibss: set_channel failed, %d\n", + ret); + goto err_turn_radio_on; + } + } + + if (params->bssid && !is_zero_ether_addr(params->bssid) && + !is_broadcast_ether_addr(params->bssid)) { + ret = set_bssid(usbdev, params->bssid); + if (ret < 0) { + netdev_dbg(usbdev->net, "join_ibss: set_bssid failed, %d\n", + ret); + goto err_turn_radio_on; + } + } else + clear_bssid(usbdev); + + length = params->ssid_len; + if (length > NDIS_802_11_LENGTH_SSID) + length = NDIS_802_11_LENGTH_SSID; + + memset(&ssid, 0, sizeof(ssid)); + ssid.length = cpu_to_le32(length); + memcpy(ssid.essid, params->ssid, length); + + /* Don't need to pause rx queue for ad-hoc. */ + usbnet_purge_paused_rxq(usbdev); + usbnet_resume_rx(usbdev); + + ret = set_essid(usbdev, &ssid); + if (ret < 0) + netdev_dbg(usbdev->net, "join_ibss: set_essid failed, %d\n", + ret); + return ret; + +err_turn_radio_on: + disassociate(usbdev, true); + + return ret; +} + +static int rndis_leave_ibss(struct wiphy *wiphy, struct net_device *dev) +{ + struct rndis_wlan_private *priv = wiphy_priv(wiphy); + struct usbnet *usbdev = priv->usbdev; + + netdev_dbg(usbdev->net, "cfg80211.leave_ibss()\n"); + + priv->connected = false; + memset(priv->bssid, 0, ETH_ALEN); + + return deauthenticate(usbdev); +} + +static int rndis_set_channel(struct wiphy *wiphy, + struct ieee80211_channel *chan, enum nl80211_channel_type channel_type) +{ + struct rndis_wlan_private *priv = wiphy_priv(wiphy); + struct usbnet *usbdev = priv->usbdev; + + return set_channel(usbdev, + ieee80211_frequency_to_channel(chan->center_freq)); +} + +static int rndis_add_key(struct wiphy *wiphy, struct net_device *netdev, + u8 key_index, const u8 *mac_addr, + struct key_params *params) +{ + struct rndis_wlan_private *priv = wiphy_priv(wiphy); + struct usbnet *usbdev = priv->usbdev; + __le32 flags; + + netdev_dbg(usbdev->net, "%s(%i, %pM, %08x)\n", + __func__, key_index, mac_addr, params->cipher); + + switch (params->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + return add_wep_key(usbdev, params->key, params->key_len, + key_index); + case WLAN_CIPHER_SUITE_TKIP: + case WLAN_CIPHER_SUITE_CCMP: + flags = 0; + + if (params->seq && params->seq_len > 0) + flags |= NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ; + if (mac_addr) + flags |= NDIS_80211_ADDKEY_PAIRWISE_KEY | + NDIS_80211_ADDKEY_TRANSMIT_KEY; + + return add_wpa_key(usbdev, params->key, params->key_len, + key_index, mac_addr, params->seq, + params->seq_len, params->cipher, flags); + default: + netdev_dbg(usbdev->net, "%s(): unsupported cipher %08x\n", + __func__, params->cipher); + return -ENOTSUPP; + } +} + +static int rndis_del_key(struct wiphy *wiphy, struct net_device *netdev, + u8 key_index, const u8 *mac_addr) +{ + struct rndis_wlan_private *priv = wiphy_priv(wiphy); + struct usbnet *usbdev = priv->usbdev; + + netdev_dbg(usbdev->net, "%s(%i, %pM)\n", __func__, key_index, mac_addr); + + return remove_key(usbdev, key_index, mac_addr); +} + +static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev, + u8 key_index) +{ + struct rndis_wlan_private *priv = wiphy_priv(wiphy); + struct usbnet *usbdev = priv->usbdev; + struct rndis_wlan_encr_key key; + + netdev_dbg(usbdev->net, "%s(%i)\n", __func__, key_index); + + priv->encr_tx_key_index = key_index; + + key = priv->encr_keys[key_index]; + + return add_wep_key(usbdev, key.material, key.len, key_index); +} + +static void rndis_fill_station_info(struct usbnet *usbdev, + struct station_info *sinfo) +{ + __le32 linkspeed, rssi; + int ret, len; + + memset(sinfo, 0, sizeof(*sinfo)); + + len = sizeof(linkspeed); + ret = rndis_query_oid(usbdev, OID_GEN_LINK_SPEED, &linkspeed, &len); + if (ret == 0) { + sinfo->txrate.legacy = le32_to_cpu(linkspeed) / 1000; + sinfo->filled |= STATION_INFO_TX_BITRATE; + } + + len = sizeof(rssi); + ret = rndis_query_oid(usbdev, OID_802_11_RSSI, &rssi, &len); + if (ret == 0) { + sinfo->signal = level_to_qual(le32_to_cpu(rssi)); + sinfo->filled |= STATION_INFO_SIGNAL; + } +} + +static int rndis_get_station(struct wiphy *wiphy, struct net_device *dev, + u8 *mac, struct station_info *sinfo) +{ + struct rndis_wlan_private *priv = wiphy_priv(wiphy); + struct usbnet *usbdev = priv->usbdev; + + if (compare_ether_addr(priv->bssid, mac)) + return -ENOENT; + + rndis_fill_station_info(usbdev, sinfo); + + return 0; +} + +static int rndis_dump_station(struct wiphy *wiphy, struct net_device *dev, + int idx, u8 *mac, struct station_info *sinfo) +{ + struct rndis_wlan_private *priv = wiphy_priv(wiphy); + struct usbnet *usbdev = priv->usbdev; + + if (idx != 0) + return -ENOENT; + + memcpy(mac, priv->bssid, ETH_ALEN); + + rndis_fill_station_info(usbdev, sinfo); + + return 0; +} + +/* + * workers, indication handlers, device poller + */ +static void rndis_wlan_do_link_up_work(struct usbnet *usbdev) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + struct ndis_80211_assoc_info *info; + u8 assoc_buf[sizeof(*info) + IW_CUSTOM_MAX + 32]; + u8 bssid[ETH_ALEN]; + int resp_ie_len, req_ie_len; + u8 *req_ie, *resp_ie; + int ret, offset; + bool roamed = false; + + if (priv->infra_mode == NDIS_80211_INFRA_INFRA && priv->connected) { + /* received media connect indication while connected, either + * device reassociated with same AP or roamed to new. */ + roamed = true; + } + + req_ie_len = 0; + resp_ie_len = 0; + req_ie = NULL; + resp_ie = NULL; + + if (priv->infra_mode == NDIS_80211_INFRA_INFRA) { + memset(assoc_buf, 0, sizeof(assoc_buf)); + info = (void *)assoc_buf; + + /* Get association info IEs from device and send them back to + * userspace. */ + ret = get_association_info(usbdev, info, sizeof(assoc_buf)); + if (!ret) { + req_ie_len = le32_to_cpu(info->req_ie_length); + if (req_ie_len > 0) { + offset = le32_to_cpu(info->offset_req_ies); + req_ie = (u8 *)info + offset; + } + + resp_ie_len = le32_to_cpu(info->resp_ie_length); + if (resp_ie_len > 0) { + offset = le32_to_cpu(info->offset_resp_ies); + resp_ie = (u8 *)info + offset; + } + } + } else if (WARN_ON(priv->infra_mode != NDIS_80211_INFRA_ADHOC)) + return; + + ret = get_bssid(usbdev, bssid); + if (ret < 0) + memset(bssid, 0, sizeof(bssid)); + + netdev_dbg(usbdev->net, "link up work: [%pM]%s\n", + bssid, roamed ? " roamed" : ""); + + /* Internal bss list in device always contains at least the currently + * connected bss and we can get it to cfg80211 with + * rndis_check_bssid_list(). + * NOTE: This is true for Broadcom chip, but not mentioned in RNDIS + * spec. + */ + rndis_check_bssid_list(usbdev); + + if (priv->infra_mode == NDIS_80211_INFRA_INFRA) { + if (!roamed) + cfg80211_connect_result(usbdev->net, bssid, req_ie, + req_ie_len, resp_ie, + resp_ie_len, 0, GFP_KERNEL); + else + cfg80211_roamed(usbdev->net, bssid, req_ie, req_ie_len, + resp_ie, resp_ie_len, GFP_KERNEL); + } else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC) + cfg80211_ibss_joined(usbdev->net, bssid, GFP_KERNEL); + + priv->connected = true; + memcpy(priv->bssid, bssid, ETH_ALEN); + + usbnet_resume_rx(usbdev); + netif_carrier_on(usbdev->net); +} + +static void rndis_wlan_do_link_down_work(struct usbnet *usbdev) +{ + union iwreq_data evt; + + netif_carrier_off(usbdev->net); + + evt.data.flags = 0; + evt.data.length = 0; + memset(evt.ap_addr.sa_data, 0, ETH_ALEN); + wireless_send_event(usbdev->net, SIOCGIWAP, &evt, NULL); +} + +static void rndis_wlan_worker(struct work_struct *work) +{ + struct rndis_wlan_private *priv = + container_of(work, struct rndis_wlan_private, work); + struct usbnet *usbdev = priv->usbdev; + + if (test_and_clear_bit(WORK_LINK_UP, &priv->work_pending)) + rndis_wlan_do_link_up_work(usbdev); + + if (test_and_clear_bit(WORK_LINK_DOWN, &priv->work_pending)) + rndis_wlan_do_link_down_work(usbdev); + + if (test_and_clear_bit(WORK_SET_MULTICAST_LIST, &priv->work_pending)) + set_multicast_list(usbdev); +} + +static void rndis_wlan_set_multicast_list(struct net_device *dev) +{ + struct usbnet *usbdev = netdev_priv(dev); + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + + if (test_bit(WORK_SET_MULTICAST_LIST, &priv->work_pending)) + return; + + set_bit(WORK_SET_MULTICAST_LIST, &priv->work_pending); + queue_work(priv->workqueue, &priv->work); +} + +static void rndis_wlan_auth_indication(struct usbnet *usbdev, + struct ndis_80211_status_indication *indication, + int len) +{ + u8 *buf; + const char *type; + int flags, buflen, key_id; + bool pairwise_error, group_error; + struct ndis_80211_auth_request *auth_req; + enum nl80211_key_type key_type; + + /* must have at least one array entry */ + if (len < offsetof(struct ndis_80211_status_indication, u) + + sizeof(struct ndis_80211_auth_request)) { + netdev_info(usbdev->net, "authentication indication: too short message (%i)\n", + len); + return; + } + + buf = (void *)&indication->u.auth_request[0]; + buflen = len - offsetof(struct ndis_80211_status_indication, u); + + while (buflen >= sizeof(*auth_req)) { + auth_req = (void *)buf; + type = "unknown"; + flags = le32_to_cpu(auth_req->flags); + pairwise_error = false; + group_error = false; + + if (flags & 0x1) + type = "reauth request"; + if (flags & 0x2) + type = "key update request"; + if (flags & 0x6) { + pairwise_error = true; + type = "pairwise_error"; + } + if (flags & 0xe) { + group_error = true; + type = "group_error"; + } + + netdev_info(usbdev->net, "authentication indication: %s (0x%08x)\n", + type, le32_to_cpu(auth_req->flags)); + + if (pairwise_error) { + key_type = NL80211_KEYTYPE_PAIRWISE; + key_id = -1; + + cfg80211_michael_mic_failure(usbdev->net, + auth_req->bssid, + key_type, key_id, NULL, + GFP_KERNEL); + } + + if (group_error) { + key_type = NL80211_KEYTYPE_GROUP; + key_id = -1; + + cfg80211_michael_mic_failure(usbdev->net, + auth_req->bssid, + key_type, key_id, NULL, + GFP_KERNEL); + } + + buflen -= le32_to_cpu(auth_req->length); + buf += le32_to_cpu(auth_req->length); + } +} + +static void rndis_wlan_pmkid_cand_list_indication(struct usbnet *usbdev, + struct ndis_80211_status_indication *indication, + int len) +{ + struct ndis_80211_pmkid_cand_list *cand_list; + int list_len, expected_len, i; + + if (len < offsetof(struct ndis_80211_status_indication, u) + + sizeof(struct ndis_80211_pmkid_cand_list)) { + netdev_info(usbdev->net, "pmkid candidate list indication: too short message (%i)\n", + len); + return; + } + + list_len = le32_to_cpu(indication->u.cand_list.num_candidates) * + sizeof(struct ndis_80211_pmkid_candidate); + expected_len = sizeof(struct ndis_80211_pmkid_cand_list) + list_len + + offsetof(struct ndis_80211_status_indication, u); + + if (len < expected_len) { + netdev_info(usbdev->net, "pmkid candidate list indication: list larger than buffer (%i < %i)\n", + len, expected_len); + return; + } + + cand_list = &indication->u.cand_list; + + netdev_info(usbdev->net, "pmkid candidate list indication: version %i, candidates %i\n", + le32_to_cpu(cand_list->version), + le32_to_cpu(cand_list->num_candidates)); + + if (le32_to_cpu(cand_list->version) != 1) + return; + + for (i = 0; i < le32_to_cpu(cand_list->num_candidates); i++) { + struct ndis_80211_pmkid_candidate *cand = + &cand_list->candidate_list[i]; + + netdev_dbg(usbdev->net, "cand[%i]: flags: 0x%08x, bssid: %pM\n", + i, le32_to_cpu(cand->flags), cand->bssid); + +#if 0 + struct iw_pmkid_cand pcand; + union iwreq_data wrqu; + + memset(&pcand, 0, sizeof(pcand)); + if (le32_to_cpu(cand->flags) & 0x01) + pcand.flags |= IW_PMKID_CAND_PREAUTH; + pcand.index = i; + memcpy(pcand.bssid.sa_data, cand->bssid, ETH_ALEN); + + memset(&wrqu, 0, sizeof(wrqu)); + wrqu.data.length = sizeof(pcand); + wireless_send_event(usbdev->net, IWEVPMKIDCAND, &wrqu, + (u8 *)&pcand); +#endif + } +} + +static void rndis_wlan_media_specific_indication(struct usbnet *usbdev, + struct rndis_indicate *msg, int buflen) +{ + struct ndis_80211_status_indication *indication; + int len, offset; + + offset = offsetof(struct rndis_indicate, status) + + le32_to_cpu(msg->offset); + len = le32_to_cpu(msg->length); + + if (len < 8) { + netdev_info(usbdev->net, "media specific indication, ignore too short message (%i < 8)\n", + len); + return; + } + + if (offset + len > buflen) { + netdev_info(usbdev->net, "media specific indication, too large to fit to buffer (%i > %i)\n", + offset + len, buflen); + return; + } + + indication = (void *)((u8 *)msg + offset); + + switch (le32_to_cpu(indication->status_type)) { + case NDIS_80211_STATUSTYPE_RADIOSTATE: + netdev_info(usbdev->net, "radio state indication: %i\n", + le32_to_cpu(indication->u.radio_status)); + return; + + case NDIS_80211_STATUSTYPE_MEDIASTREAMMODE: + netdev_info(usbdev->net, "media stream mode indication: %i\n", + le32_to_cpu(indication->u.media_stream_mode)); + return; + + case NDIS_80211_STATUSTYPE_AUTHENTICATION: + rndis_wlan_auth_indication(usbdev, indication, len); + return; + + case NDIS_80211_STATUSTYPE_PMKID_CANDIDATELIST: + rndis_wlan_pmkid_cand_list_indication(usbdev, indication, len); + return; + + default: + netdev_info(usbdev->net, "media specific indication: unknown status type 0x%08x\n", + le32_to_cpu(indication->status_type)); + } +} + +static void rndis_wlan_indication(struct usbnet *usbdev, void *ind, int buflen) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + struct rndis_indicate *msg = ind; + + switch (msg->status) { + case RNDIS_STATUS_MEDIA_CONNECT: + if (priv->current_command_oid == OID_802_11_ADD_KEY) { + /* OID_802_11_ADD_KEY causes sometimes extra + * "media connect" indications which confuses driver + * and userspace to think that device is + * roaming/reassociating when it isn't. + */ + netdev_dbg(usbdev->net, "ignored OID_802_11_ADD_KEY triggered 'media connect'\n"); + return; + } + + usbnet_pause_rx(usbdev); + + netdev_info(usbdev->net, "media connect\n"); + + /* queue work to avoid recursive calls into rndis_command */ + set_bit(WORK_LINK_UP, &priv->work_pending); + queue_work(priv->workqueue, &priv->work); + break; + + case RNDIS_STATUS_MEDIA_DISCONNECT: + netdev_info(usbdev->net, "media disconnect\n"); + + /* queue work to avoid recursive calls into rndis_command */ + set_bit(WORK_LINK_DOWN, &priv->work_pending); + queue_work(priv->workqueue, &priv->work); + break; + + case RNDIS_STATUS_MEDIA_SPECIFIC_INDICATION: + rndis_wlan_media_specific_indication(usbdev, msg, buflen); + break; + + default: + netdev_info(usbdev->net, "indication: 0x%08x\n", + le32_to_cpu(msg->status)); + break; + } +} + +static int rndis_wlan_get_caps(struct usbnet *usbdev) +{ + struct { + __le32 num_items; + __le32 items[8]; + } networks_supported; + int len, retval, i, n; + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + + /* determine supported modes */ + len = sizeof(networks_supported); + retval = rndis_query_oid(usbdev, OID_802_11_NETWORK_TYPES_SUPPORTED, + &networks_supported, &len); + if (retval >= 0) { + n = le32_to_cpu(networks_supported.num_items); + if (n > 8) + n = 8; + for (i = 0; i < n; i++) { + switch (le32_to_cpu(networks_supported.items[i])) { + case NDIS_80211_TYPE_FREQ_HOP: + case NDIS_80211_TYPE_DIRECT_SEQ: + priv->caps |= CAP_MODE_80211B; + break; + case NDIS_80211_TYPE_OFDM_A: + priv->caps |= CAP_MODE_80211A; + break; + case NDIS_80211_TYPE_OFDM_G: + priv->caps |= CAP_MODE_80211G; + break; + } + } + } + + return retval; +} + +#define DEVICE_POLLER_JIFFIES (HZ) +static void rndis_device_poller(struct work_struct *work) +{ + struct rndis_wlan_private *priv = + container_of(work, struct rndis_wlan_private, + dev_poller_work.work); + struct usbnet *usbdev = priv->usbdev; + __le32 rssi, tmp; + int len, ret, j; + int update_jiffies = DEVICE_POLLER_JIFFIES; + void *buf; + + /* Only check/do workaround when connected. Calling is_associated() + * also polls device with rndis_command() and catches for media link + * indications. + */ + if (!is_associated(usbdev)) + goto end; + + len = sizeof(rssi); + ret = rndis_query_oid(usbdev, OID_802_11_RSSI, &rssi, &len); + if (ret == 0) + priv->last_qual = level_to_qual(le32_to_cpu(rssi)); + + netdev_dbg(usbdev->net, "dev-poller: OID_802_11_RSSI -> %d, rssi:%d, qual: %d\n", + ret, le32_to_cpu(rssi), level_to_qual(le32_to_cpu(rssi))); + + /* Workaround transfer stalls on poor quality links. + * TODO: find right way to fix these stalls (as stalls do not happen + * with ndiswrapper/windows driver). */ + if (priv->param_workaround_interval > 0 && priv->last_qual <= 25) { + /* Decrease stats worker interval to catch stalls. + * faster. Faster than 400-500ms causes packet loss, + * Slower doesn't catch stalls fast enough. + */ + j = msecs_to_jiffies(priv->param_workaround_interval); + if (j > DEVICE_POLLER_JIFFIES) + j = DEVICE_POLLER_JIFFIES; + else if (j <= 0) + j = 1; + update_jiffies = j; + + /* Send scan OID. Use of both OIDs is required to get device + * working. + */ + tmp = cpu_to_le32(1); + rndis_set_oid(usbdev, OID_802_11_BSSID_LIST_SCAN, &tmp, + sizeof(tmp)); + + len = CONTROL_BUFFER_SIZE; + buf = kmalloc(len, GFP_KERNEL); + if (!buf) + goto end; + + rndis_query_oid(usbdev, OID_802_11_BSSID_LIST, buf, &len); + kfree(buf); + } + +end: + if (update_jiffies >= HZ) + update_jiffies = round_jiffies_relative(update_jiffies); + else { + j = round_jiffies_relative(update_jiffies); + if (abs(j - update_jiffies) <= 10) + update_jiffies = j; + } + + queue_delayed_work(priv->workqueue, &priv->dev_poller_work, + update_jiffies); +} + +/* + * driver/device initialization + */ +static void rndis_copy_module_params(struct usbnet *usbdev) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + + priv->param_country[0] = modparam_country[0]; + priv->param_country[1] = modparam_country[1]; + priv->param_country[2] = 0; + priv->param_frameburst = modparam_frameburst; + priv->param_afterburner = modparam_afterburner; + priv->param_power_save = modparam_power_save; + priv->param_power_output = modparam_power_output; + priv->param_roamtrigger = modparam_roamtrigger; + priv->param_roamdelta = modparam_roamdelta; + + priv->param_country[0] = toupper(priv->param_country[0]); + priv->param_country[1] = toupper(priv->param_country[1]); + /* doesn't support EU as country code, use FI instead */ + if (!strcmp(priv->param_country, "EU")) + strcpy(priv->param_country, "FI"); + + if (priv->param_power_save < 0) + priv->param_power_save = 0; + else if (priv->param_power_save > 2) + priv->param_power_save = 2; + + if (priv->param_power_output < 0) + priv->param_power_output = 0; + else if (priv->param_power_output > 3) + priv->param_power_output = 3; + + if (priv->param_roamtrigger < -80) + priv->param_roamtrigger = -80; + else if (priv->param_roamtrigger > -60) + priv->param_roamtrigger = -60; + + if (priv->param_roamdelta < 0) + priv->param_roamdelta = 0; + else if (priv->param_roamdelta > 2) + priv->param_roamdelta = 2; + + if (modparam_workaround_interval < 0) + priv->param_workaround_interval = 500; + else + priv->param_workaround_interval = modparam_workaround_interval; +} + +static int bcm4320a_early_init(struct usbnet *usbdev) +{ + /* copy module parameters for bcm4320a so that iwconfig reports txpower + * and workaround parameter is copied to private structure correctly. + */ + rndis_copy_module_params(usbdev); + + /* bcm4320a doesn't handle configuration parameters well. Try + * set any and you get partially zeroed mac and broken device. + */ + + return 0; +} + +static int bcm4320b_early_init(struct usbnet *usbdev) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + char buf[8]; + + rndis_copy_module_params(usbdev); + + /* Early initialization settings, setting these won't have effect + * if called after generic_rndis_bind(). + */ + + rndis_set_config_parameter_str(usbdev, "Country", priv->param_country); + rndis_set_config_parameter_str(usbdev, "FrameBursting", + priv->param_frameburst ? "1" : "0"); + rndis_set_config_parameter_str(usbdev, "Afterburner", + priv->param_afterburner ? "1" : "0"); + sprintf(buf, "%d", priv->param_power_save); + rndis_set_config_parameter_str(usbdev, "PowerSaveMode", buf); + sprintf(buf, "%d", priv->param_power_output); + rndis_set_config_parameter_str(usbdev, "PwrOut", buf); + sprintf(buf, "%d", priv->param_roamtrigger); + rndis_set_config_parameter_str(usbdev, "RoamTrigger", buf); + sprintf(buf, "%d", priv->param_roamdelta); + rndis_set_config_parameter_str(usbdev, "RoamDelta", buf); + + return 0; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) +/* same as rndis_netdev_ops but with local multicast handler */ +static const struct net_device_ops rndis_wlan_netdev_ops = { + .ndo_open = usbnet_open, + .ndo_stop = usbnet_stop, + .ndo_start_xmit = usbnet_start_xmit, + .ndo_tx_timeout = usbnet_tx_timeout, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_multicast_list = rndis_wlan_set_multicast_list, +}; +#endif + +static int rndis_wlan_bind(struct usbnet *usbdev, struct usb_interface *intf) +{ + struct wiphy *wiphy; + struct rndis_wlan_private *priv; + int retval, len; + __le32 tmp; + + /* allocate wiphy and rndis private data + * NOTE: We only support a single virtual interface, so wiphy + * and wireless_dev are somewhat synonymous for this device. + */ + wiphy = wiphy_new(&rndis_config_ops, sizeof(struct rndis_wlan_private)); + if (!wiphy) + return -ENOMEM; + + priv = wiphy_priv(wiphy); + usbdev->net->ieee80211_ptr = &priv->wdev; + priv->wdev.wiphy = wiphy; + priv->wdev.iftype = NL80211_IFTYPE_STATION; + + /* These have to be initialized before calling generic_rndis_bind(). + * Otherwise we'll be in big trouble in rndis_wlan_early_init(). + */ + usbdev->driver_priv = priv; + priv->usbdev = usbdev; + + mutex_init(&priv->command_lock); + + /* because rndis_command() sleeps we need to use workqueue */ + priv->workqueue = create_singlethread_workqueue("rndis_wlan"); + INIT_WORK(&priv->work, rndis_wlan_worker); + INIT_DELAYED_WORK(&priv->dev_poller_work, rndis_device_poller); + INIT_DELAYED_WORK(&priv->scan_work, rndis_get_scan_results); + + /* try bind rndis_host */ + retval = generic_rndis_bind(usbdev, intf, FLAG_RNDIS_PHYM_WIRELESS); + if (retval < 0) + goto fail; + + /* generic_rndis_bind set packet filter to multicast_all+ + * promisc mode which doesn't work well for our devices (device + * picks up rssi to closest station instead of to access point). + * + * rndis_host wants to avoid all OID as much as possible + * so do promisc/multicast handling in rndis_wlan. + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + usbdev->net->netdev_ops = &rndis_wlan_netdev_ops; +#else + usbdev->net->set_multicast_list = rndis_wlan_set_multicast_list; +#endif + + tmp = RNDIS_PACKET_TYPE_DIRECTED | RNDIS_PACKET_TYPE_BROADCAST; + retval = rndis_set_oid(usbdev, OID_GEN_CURRENT_PACKET_FILTER, &tmp, + sizeof(tmp)); + + len = sizeof(tmp); + retval = rndis_query_oid(usbdev, OID_802_3_MAXIMUM_LIST_SIZE, &tmp, + &len); + priv->multicast_size = le32_to_cpu(tmp); + if (retval < 0 || priv->multicast_size < 0) + priv->multicast_size = 0; + if (priv->multicast_size > 0) + usbdev->net->flags |= IFF_MULTICAST; + else + usbdev->net->flags &= ~IFF_MULTICAST; + + /* fill-out wiphy structure and register w/ cfg80211 */ + memcpy(wiphy->perm_addr, usbdev->net->dev_addr, ETH_ALEN); + wiphy->privid = rndis_wiphy_privid; + wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) + | BIT(NL80211_IFTYPE_ADHOC); + wiphy->max_scan_ssids = 1; + + /* TODO: fill-out band/encr information based on priv->caps */ + rndis_wlan_get_caps(usbdev); + + memcpy(priv->channels, rndis_channels, sizeof(rndis_channels)); + memcpy(priv->rates, rndis_rates, sizeof(rndis_rates)); + priv->band.channels = priv->channels; + priv->band.n_channels = ARRAY_SIZE(rndis_channels); + priv->band.bitrates = priv->rates; + priv->band.n_bitrates = ARRAY_SIZE(rndis_rates); + wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; + wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC; + + memcpy(priv->cipher_suites, rndis_cipher_suites, + sizeof(rndis_cipher_suites)); + wiphy->cipher_suites = priv->cipher_suites; + wiphy->n_cipher_suites = ARRAY_SIZE(rndis_cipher_suites); + + set_wiphy_dev(wiphy, &usbdev->udev->dev); + + if (wiphy_register(wiphy)) { + retval = -ENODEV; + goto fail; + } + + set_default_iw_params(usbdev); + + /* set default rts/frag */ + rndis_set_wiphy_params(wiphy, + WIPHY_PARAM_FRAG_THRESHOLD | WIPHY_PARAM_RTS_THRESHOLD); + + /* turn radio on */ + priv->radio_on = true; + disassociate(usbdev, true); + netif_carrier_off(usbdev->net); + + return 0; + +fail: + cancel_delayed_work_sync(&priv->dev_poller_work); + cancel_delayed_work_sync(&priv->scan_work); + cancel_work_sync(&priv->work); + flush_workqueue(priv->workqueue); + destroy_workqueue(priv->workqueue); + + wiphy_free(wiphy); + return retval; +} + +static void rndis_wlan_unbind(struct usbnet *usbdev, struct usb_interface *intf) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + + /* turn radio off */ + disassociate(usbdev, false); + + cancel_delayed_work_sync(&priv->dev_poller_work); + cancel_delayed_work_sync(&priv->scan_work); + cancel_work_sync(&priv->work); + flush_workqueue(priv->workqueue); + destroy_workqueue(priv->workqueue); + + if (priv && priv->wpa_ie_len) + kfree(priv->wpa_ie); + + rndis_unbind(usbdev, intf); + + wiphy_unregister(priv->wdev.wiphy); + wiphy_free(priv->wdev.wiphy); +} + +static int rndis_wlan_reset(struct usbnet *usbdev) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + int retval; + + netdev_dbg(usbdev->net, "%s()\n", __func__); + + retval = rndis_reset(usbdev); + if (retval) + netdev_warn(usbdev->net, "rndis_reset failed: %d\n", retval); + + /* rndis_reset cleared multicast list, so restore here. + (set_multicast_list() also turns on current packet filter) */ + set_multicast_list(usbdev); + + queue_delayed_work(priv->workqueue, &priv->dev_poller_work, + round_jiffies_relative(DEVICE_POLLER_JIFFIES)); + + return deauthenticate(usbdev); +} + +static int rndis_wlan_stop(struct usbnet *usbdev) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + int retval; + __le32 filter; + + netdev_dbg(usbdev->net, "%s()\n", __func__); + + retval = disassociate(usbdev, false); + + priv->work_pending = 0; + cancel_delayed_work_sync(&priv->dev_poller_work); + cancel_delayed_work_sync(&priv->scan_work); + cancel_work_sync(&priv->work); + flush_workqueue(priv->workqueue); + + if (priv->scan_request) { + cfg80211_scan_done(priv->scan_request, true); + priv->scan_request = NULL; + } + + /* Set current packet filter zero to block receiving data packets from + device. */ + filter = 0; + rndis_set_oid(usbdev, OID_GEN_CURRENT_PACKET_FILTER, &filter, + sizeof(filter)); + + return retval; +} + +static const struct driver_info bcm4320b_info = { + .description = "Wireless RNDIS device, BCM4320b based", + .flags = FLAG_WLAN | FLAG_FRAMING_RN | FLAG_NO_SETINT | + FLAG_AVOID_UNLINK_URBS, + .bind = rndis_wlan_bind, + .unbind = rndis_wlan_unbind, + .status = rndis_status, + .rx_fixup = rndis_rx_fixup, + .tx_fixup = rndis_tx_fixup, + .reset = rndis_wlan_reset, + .stop = rndis_wlan_stop, + .early_init = bcm4320b_early_init, + .indication = rndis_wlan_indication, +}; + +static const struct driver_info bcm4320a_info = { + .description = "Wireless RNDIS device, BCM4320a based", + .flags = FLAG_WLAN | FLAG_FRAMING_RN | FLAG_NO_SETINT | + FLAG_AVOID_UNLINK_URBS, + .bind = rndis_wlan_bind, + .unbind = rndis_wlan_unbind, + .status = rndis_status, + .rx_fixup = rndis_rx_fixup, + .tx_fixup = rndis_tx_fixup, + .reset = rndis_wlan_reset, + .stop = rndis_wlan_stop, + .early_init = bcm4320a_early_init, + .indication = rndis_wlan_indication, +}; + +static const struct driver_info rndis_wlan_info = { + .description = "Wireless RNDIS device", + .flags = FLAG_WLAN | FLAG_FRAMING_RN | FLAG_NO_SETINT | + FLAG_AVOID_UNLINK_URBS, + .bind = rndis_wlan_bind, + .unbind = rndis_wlan_unbind, + .status = rndis_status, + .rx_fixup = rndis_rx_fixup, + .tx_fixup = rndis_tx_fixup, + .reset = rndis_wlan_reset, + .stop = rndis_wlan_stop, + .early_init = bcm4320a_early_init, + .indication = rndis_wlan_indication, +}; + +/*-------------------------------------------------------------------------*/ + +static const struct usb_device_id products [] = { +#define RNDIS_MASTER_INTERFACE \ + .bInterfaceClass = USB_CLASS_COMM, \ + .bInterfaceSubClass = 2 /* ACM */, \ + .bInterfaceProtocol = 0x0ff + +/* INF driver for these devices have DriverVer >= 4.xx.xx.xx and many custom + * parameters available. Chipset marked as 'BCM4320SKFBG' in NDISwrapper-wiki. + */ +{ + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x0411, + .idProduct = 0x00bc, /* Buffalo WLI-U2-KG125S */ + RNDIS_MASTER_INTERFACE, + .driver_info = (unsigned long) &bcm4320b_info, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x0baf, + .idProduct = 0x011b, /* U.S. Robotics USR5421 */ + RNDIS_MASTER_INTERFACE, + .driver_info = (unsigned long) &bcm4320b_info, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x050d, + .idProduct = 0x011b, /* Belkin F5D7051 */ + RNDIS_MASTER_INTERFACE, + .driver_info = (unsigned long) &bcm4320b_info, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x1799, /* Belkin has two vendor ids */ + .idProduct = 0x011b, /* Belkin F5D7051 */ + RNDIS_MASTER_INTERFACE, + .driver_info = (unsigned long) &bcm4320b_info, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x13b1, + .idProduct = 0x0014, /* Linksys WUSB54GSv2 */ + RNDIS_MASTER_INTERFACE, + .driver_info = (unsigned long) &bcm4320b_info, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x13b1, + .idProduct = 0x0026, /* Linksys WUSB54GSC */ + RNDIS_MASTER_INTERFACE, + .driver_info = (unsigned long) &bcm4320b_info, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x0b05, + .idProduct = 0x1717, /* Asus WL169gE */ + RNDIS_MASTER_INTERFACE, + .driver_info = (unsigned long) &bcm4320b_info, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x0a5c, + .idProduct = 0xd11b, /* Eminent EM4045 */ + RNDIS_MASTER_INTERFACE, + .driver_info = (unsigned long) &bcm4320b_info, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x1690, + .idProduct = 0x0715, /* BT Voyager 1055 */ + RNDIS_MASTER_INTERFACE, + .driver_info = (unsigned long) &bcm4320b_info, +}, +/* These devices have DriverVer < 4.xx.xx.xx and do not have any custom + * parameters available, hardware probably contain older firmware version with + * no way of updating. Chipset marked as 'BCM4320????' in NDISwrapper-wiki. + */ +{ + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x13b1, + .idProduct = 0x000e, /* Linksys WUSB54GSv1 */ + RNDIS_MASTER_INTERFACE, + .driver_info = (unsigned long) &bcm4320a_info, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x0baf, + .idProduct = 0x0111, /* U.S. Robotics USR5420 */ + RNDIS_MASTER_INTERFACE, + .driver_info = (unsigned long) &bcm4320a_info, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x0411, + .idProduct = 0x004b, /* BUFFALO WLI-USB-G54 */ + RNDIS_MASTER_INTERFACE, + .driver_info = (unsigned long) &bcm4320a_info, +}, +/* Generic Wireless RNDIS devices that we don't have exact + * idVendor/idProduct/chip yet. + */ +{ + /* RNDIS is MSFT's un-official variant of CDC ACM */ + USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff), + .driver_info = (unsigned long) &rndis_wlan_info, +}, { + /* "ActiveSync" is an undocumented variant of RNDIS, used in WM5 */ + USB_INTERFACE_INFO(USB_CLASS_MISC, 1, 1), + .driver_info = (unsigned long) &rndis_wlan_info, +}, + { }, // END +}; +MODULE_DEVICE_TABLE(usb, products); + +static struct usb_driver rndis_wlan_driver = { + .name = "rndis_wlan", + .id_table = products, + .probe = usbnet_probe, + .disconnect = usbnet_disconnect, + .suspend = usbnet_suspend, + .resume = usbnet_resume, +}; + +static int __init rndis_wlan_init(void) +{ + return usb_register(&rndis_wlan_driver); +} +module_init(rndis_wlan_init); + +static void __exit rndis_wlan_exit(void) +{ + usb_deregister(&rndis_wlan_driver); +} +module_exit(rndis_wlan_exit); + +MODULE_AUTHOR("Bjorge Dijkstra"); +MODULE_AUTHOR("Jussi Kivilinna"); +MODULE_DESCRIPTION("Driver for RNDIS based USB Wireless adapters"); +MODULE_LICENSE("GPL"); + diff --git a/drivers/net/wireless/rndis_wlan.c.orig b/drivers/net/wireless/rndis_wlan.c.orig new file mode 100644 index 0000000..2887047 --- /dev/null +++ b/drivers/net/wireless/rndis_wlan.c.orig @@ -0,0 +1,3112 @@ +/* + * Driver for RNDIS based wireless USB devices. + * + * Copyright (C) 2007 by Bjorge Dijkstra + * Copyright (C) 2008-2009 by Jussi Kivilinna + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Portions of this file are based on NDISwrapper project, + * Copyright (C) 2003-2005 Pontus Fuchs, Giridhar Pemmasani + * http://ndiswrapper.sourceforge.net/ + */ + +// #define DEBUG // error path messages, extra info +// #define VERBOSE // more; success messages + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* NOTE: All these are settings for Broadcom chipset */ +static char modparam_country[4] = "EU"; +module_param_string(country, modparam_country, 4, 0444); +MODULE_PARM_DESC(country, "Country code (ISO 3166-1 alpha-2), default: EU"); + +static int modparam_frameburst = 1; +module_param_named(frameburst, modparam_frameburst, int, 0444); +MODULE_PARM_DESC(frameburst, "enable frame bursting (default: on)"); + +static int modparam_afterburner = 0; +module_param_named(afterburner, modparam_afterburner, int, 0444); +MODULE_PARM_DESC(afterburner, + "enable afterburner aka '125 High Speed Mode' (default: off)"); + +static int modparam_power_save = 0; +module_param_named(power_save, modparam_power_save, int, 0444); +MODULE_PARM_DESC(power_save, + "set power save mode: 0=off, 1=on, 2=fast (default: off)"); + +static int modparam_power_output = 3; +module_param_named(power_output, modparam_power_output, int, 0444); +MODULE_PARM_DESC(power_output, + "set power output: 0=25%, 1=50%, 2=75%, 3=100% (default: 100%)"); + +static int modparam_roamtrigger = -70; +module_param_named(roamtrigger, modparam_roamtrigger, int, 0444); +MODULE_PARM_DESC(roamtrigger, + "set roaming dBm trigger: -80=optimize for distance, " + "-60=bandwidth (default: -70)"); + +static int modparam_roamdelta = 1; +module_param_named(roamdelta, modparam_roamdelta, int, 0444); +MODULE_PARM_DESC(roamdelta, + "set roaming tendency: 0=aggressive, 1=moderate, " + "2=conservative (default: moderate)"); + +static int modparam_workaround_interval; +module_param_named(workaround_interval, modparam_workaround_interval, + int, 0444); +MODULE_PARM_DESC(workaround_interval, + "set stall workaround interval in msecs (0=disabled) (default: 0)"); + + +/* various RNDIS OID defs */ +#define OID_GEN_LINK_SPEED cpu_to_le32(0x00010107) +#define OID_GEN_RNDIS_CONFIG_PARAMETER cpu_to_le32(0x0001021b) + +#define OID_GEN_XMIT_OK cpu_to_le32(0x00020101) +#define OID_GEN_RCV_OK cpu_to_le32(0x00020102) +#define OID_GEN_XMIT_ERROR cpu_to_le32(0x00020103) +#define OID_GEN_RCV_ERROR cpu_to_le32(0x00020104) +#define OID_GEN_RCV_NO_BUFFER cpu_to_le32(0x00020105) + +#define OID_802_3_CURRENT_ADDRESS cpu_to_le32(0x01010102) +#define OID_802_3_MULTICAST_LIST cpu_to_le32(0x01010103) +#define OID_802_3_MAXIMUM_LIST_SIZE cpu_to_le32(0x01010104) + +#define OID_802_11_BSSID cpu_to_le32(0x0d010101) +#define OID_802_11_SSID cpu_to_le32(0x0d010102) +#define OID_802_11_INFRASTRUCTURE_MODE cpu_to_le32(0x0d010108) +#define OID_802_11_ADD_WEP cpu_to_le32(0x0d010113) +#define OID_802_11_REMOVE_WEP cpu_to_le32(0x0d010114) +#define OID_802_11_DISASSOCIATE cpu_to_le32(0x0d010115) +#define OID_802_11_AUTHENTICATION_MODE cpu_to_le32(0x0d010118) +#define OID_802_11_PRIVACY_FILTER cpu_to_le32(0x0d010119) +#define OID_802_11_BSSID_LIST_SCAN cpu_to_le32(0x0d01011a) +#define OID_802_11_ENCRYPTION_STATUS cpu_to_le32(0x0d01011b) +#define OID_802_11_ADD_KEY cpu_to_le32(0x0d01011d) +#define OID_802_11_REMOVE_KEY cpu_to_le32(0x0d01011e) +#define OID_802_11_ASSOCIATION_INFORMATION cpu_to_le32(0x0d01011f) +#define OID_802_11_PMKID cpu_to_le32(0x0d010123) +#define OID_802_11_NETWORK_TYPES_SUPPORTED cpu_to_le32(0x0d010203) +#define OID_802_11_NETWORK_TYPE_IN_USE cpu_to_le32(0x0d010204) +#define OID_802_11_TX_POWER_LEVEL cpu_to_le32(0x0d010205) +#define OID_802_11_RSSI cpu_to_le32(0x0d010206) +#define OID_802_11_RSSI_TRIGGER cpu_to_le32(0x0d010207) +#define OID_802_11_FRAGMENTATION_THRESHOLD cpu_to_le32(0x0d010209) +#define OID_802_11_RTS_THRESHOLD cpu_to_le32(0x0d01020a) +#define OID_802_11_SUPPORTED_RATES cpu_to_le32(0x0d01020e) +#define OID_802_11_CONFIGURATION cpu_to_le32(0x0d010211) +#define OID_802_11_BSSID_LIST cpu_to_le32(0x0d010217) + + +/* Typical noise/maximum signal level values taken from ndiswrapper iw_ndis.h */ +#define WL_NOISE -96 /* typical noise level in dBm */ +#define WL_SIGMAX -32 /* typical maximum signal level in dBm */ + + +/* Assume that Broadcom 4320 (only chipset at time of writing known to be + * based on wireless rndis) has default txpower of 13dBm. + * This value is from Linksys WUSB54GSC User Guide, Appendix F: Specifications. + * 100% : 20 mW ~ 13dBm + * 75% : 15 mW ~ 12dBm + * 50% : 10 mW ~ 10dBm + * 25% : 5 mW ~ 7dBm + */ +#define BCM4320_DEFAULT_TXPOWER_DBM_100 13 +#define BCM4320_DEFAULT_TXPOWER_DBM_75 12 +#define BCM4320_DEFAULT_TXPOWER_DBM_50 10 +#define BCM4320_DEFAULT_TXPOWER_DBM_25 7 + + +/* codes for "status" field of completion messages */ +#define RNDIS_STATUS_ADAPTER_NOT_READY cpu_to_le32(0xc0010011) +#define RNDIS_STATUS_ADAPTER_NOT_OPEN cpu_to_le32(0xc0010012) + + +/* NDIS data structures. Taken from wpa_supplicant driver_ndis.c + * slightly modified for datatype endianess, etc + */ +#define NDIS_802_11_LENGTH_SSID 32 +#define NDIS_802_11_LENGTH_RATES 8 +#define NDIS_802_11_LENGTH_RATES_EX 16 + +enum ndis_80211_net_type { + NDIS_80211_TYPE_FREQ_HOP, + NDIS_80211_TYPE_DIRECT_SEQ, + NDIS_80211_TYPE_OFDM_A, + NDIS_80211_TYPE_OFDM_G +}; + +enum ndis_80211_net_infra { + NDIS_80211_INFRA_ADHOC, + NDIS_80211_INFRA_INFRA, + NDIS_80211_INFRA_AUTO_UNKNOWN +}; + +enum ndis_80211_auth_mode { + NDIS_80211_AUTH_OPEN, + NDIS_80211_AUTH_SHARED, + NDIS_80211_AUTH_AUTO_SWITCH, + NDIS_80211_AUTH_WPA, + NDIS_80211_AUTH_WPA_PSK, + NDIS_80211_AUTH_WPA_NONE, + NDIS_80211_AUTH_WPA2, + NDIS_80211_AUTH_WPA2_PSK +}; + +enum ndis_80211_encr_status { + NDIS_80211_ENCR_WEP_ENABLED, + NDIS_80211_ENCR_DISABLED, + NDIS_80211_ENCR_WEP_KEY_ABSENT, + NDIS_80211_ENCR_NOT_SUPPORTED, + NDIS_80211_ENCR_TKIP_ENABLED, + NDIS_80211_ENCR_TKIP_KEY_ABSENT, + NDIS_80211_ENCR_CCMP_ENABLED, + NDIS_80211_ENCR_CCMP_KEY_ABSENT +}; + +enum ndis_80211_priv_filter { + NDIS_80211_PRIV_ACCEPT_ALL, + NDIS_80211_PRIV_8021X_WEP +}; + +enum ndis_80211_status_type { + NDIS_80211_STATUSTYPE_AUTHENTICATION, + NDIS_80211_STATUSTYPE_MEDIASTREAMMODE, + NDIS_80211_STATUSTYPE_PMKID_CANDIDATELIST, + NDIS_80211_STATUSTYPE_RADIOSTATE, +}; + +enum ndis_80211_media_stream_mode { + NDIS_80211_MEDIA_STREAM_OFF, + NDIS_80211_MEDIA_STREAM_ON +}; + +enum ndis_80211_radio_status { + NDIS_80211_RADIO_STATUS_ON, + NDIS_80211_RADIO_STATUS_HARDWARE_OFF, + NDIS_80211_RADIO_STATUS_SOFTWARE_OFF, +}; + +enum ndis_80211_addkey_bits { + NDIS_80211_ADDKEY_8021X_AUTH = cpu_to_le32(1 << 28), + NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ = cpu_to_le32(1 << 29), + NDIS_80211_ADDKEY_PAIRWISE_KEY = cpu_to_le32(1 << 30), + NDIS_80211_ADDKEY_TRANSMIT_KEY = cpu_to_le32(1 << 31) +}; + +enum ndis_80211_addwep_bits { + NDIS_80211_ADDWEP_PERCLIENT_KEY = cpu_to_le32(1 << 30), + NDIS_80211_ADDWEP_TRANSMIT_KEY = cpu_to_le32(1 << 31) +}; + +struct ndis_80211_auth_request { + __le32 length; + u8 bssid[6]; + u8 padding[2]; + __le32 flags; +} __attribute__((packed)); + +struct ndis_80211_pmkid_candidate { + u8 bssid[6]; + u8 padding[2]; + __le32 flags; +} __attribute__((packed)); + +struct ndis_80211_pmkid_cand_list { + __le32 version; + __le32 num_candidates; + struct ndis_80211_pmkid_candidate candidate_list[0]; +} __attribute__((packed)); + +struct ndis_80211_status_indication { + __le32 status_type; + union { + __le32 media_stream_mode; + __le32 radio_status; + struct ndis_80211_auth_request auth_request[0]; + struct ndis_80211_pmkid_cand_list cand_list; + } u; +} __attribute__((packed)); + +struct ndis_80211_ssid { + __le32 length; + u8 essid[NDIS_802_11_LENGTH_SSID]; +} __attribute__((packed)); + +struct ndis_80211_conf_freq_hop { + __le32 length; + __le32 hop_pattern; + __le32 hop_set; + __le32 dwell_time; +} __attribute__((packed)); + +struct ndis_80211_conf { + __le32 length; + __le32 beacon_period; + __le32 atim_window; + __le32 ds_config; + struct ndis_80211_conf_freq_hop fh_config; +} __attribute__((packed)); + +struct ndis_80211_bssid_ex { + __le32 length; + u8 mac[6]; + u8 padding[2]; + struct ndis_80211_ssid ssid; + __le32 privacy; + __le32 rssi; + __le32 net_type; + struct ndis_80211_conf config; + __le32 net_infra; + u8 rates[NDIS_802_11_LENGTH_RATES_EX]; + __le32 ie_length; + u8 ies[0]; +} __attribute__((packed)); + +struct ndis_80211_bssid_list_ex { + __le32 num_items; + struct ndis_80211_bssid_ex bssid[0]; +} __attribute__((packed)); + +struct ndis_80211_fixed_ies { + u8 timestamp[8]; + __le16 beacon_interval; + __le16 capabilities; +} __attribute__((packed)); + +struct ndis_80211_wep_key { + __le32 size; + __le32 index; + __le32 length; + u8 material[32]; +} __attribute__((packed)); + +struct ndis_80211_key { + __le32 size; + __le32 index; + __le32 length; + u8 bssid[6]; + u8 padding[6]; + u8 rsc[8]; + u8 material[32]; +} __attribute__((packed)); + +struct ndis_80211_remove_key { + __le32 size; + __le32 index; + u8 bssid[6]; + u8 padding[2]; +} __attribute__((packed)); + +struct ndis_config_param { + __le32 name_offs; + __le32 name_length; + __le32 type; + __le32 value_offs; + __le32 value_length; +} __attribute__((packed)); + +struct ndis_80211_assoc_info { + __le32 length; + __le16 req_ies; + struct req_ie { + __le16 capa; + __le16 listen_interval; + u8 cur_ap_address[6]; + } req_ie; + __le32 req_ie_length; + __le32 offset_req_ies; + __le16 resp_ies; + struct resp_ie { + __le16 capa; + __le16 status_code; + __le16 assoc_id; + } resp_ie; + __le32 resp_ie_length; + __le32 offset_resp_ies; +} __attribute__((packed)); + +/* + * private data + */ +#define NET_TYPE_11FB 0 + +#define CAP_MODE_80211A 1 +#define CAP_MODE_80211B 2 +#define CAP_MODE_80211G 4 +#define CAP_MODE_MASK 7 + +#define WORK_LINK_UP (1<<0) +#define WORK_LINK_DOWN (1<<1) +#define WORK_SET_MULTICAST_LIST (1<<2) + +#define RNDIS_WLAN_ALG_NONE 0 +#define RNDIS_WLAN_ALG_WEP (1<<0) +#define RNDIS_WLAN_ALG_TKIP (1<<1) +#define RNDIS_WLAN_ALG_CCMP (1<<2) + +#define RNDIS_WLAN_KEY_MGMT_NONE 0 +#define RNDIS_WLAN_KEY_MGMT_802_1X (1<<0) +#define RNDIS_WLAN_KEY_MGMT_PSK (1<<1) + +#define COMMAND_BUFFER_SIZE (CONTROL_BUFFER_SIZE + sizeof(struct rndis_set)) + +static const struct ieee80211_channel rndis_channels[] = { + { .center_freq = 2412 }, + { .center_freq = 2417 }, + { .center_freq = 2422 }, + { .center_freq = 2427 }, + { .center_freq = 2432 }, + { .center_freq = 2437 }, + { .center_freq = 2442 }, + { .center_freq = 2447 }, + { .center_freq = 2452 }, + { .center_freq = 2457 }, + { .center_freq = 2462 }, + { .center_freq = 2467 }, + { .center_freq = 2472 }, + { .center_freq = 2484 }, +}; + +static const struct ieee80211_rate rndis_rates[] = { + { .bitrate = 10 }, + { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 60 }, + { .bitrate = 90 }, + { .bitrate = 120 }, + { .bitrate = 180 }, + { .bitrate = 240 }, + { .bitrate = 360 }, + { .bitrate = 480 }, + { .bitrate = 540 } +}; + +static const u32 rndis_cipher_suites[] = { + WLAN_CIPHER_SUITE_WEP40, + WLAN_CIPHER_SUITE_WEP104, + WLAN_CIPHER_SUITE_TKIP, + WLAN_CIPHER_SUITE_CCMP, +}; + +struct rndis_wlan_encr_key { + int len; + u32 cipher; + u8 material[32]; + u8 bssid[ETH_ALEN]; + bool pairwise; + bool tx_key; +}; + +/* RNDIS device private data */ +struct rndis_wlan_private { + struct usbnet *usbdev; + + struct wireless_dev wdev; + + struct cfg80211_scan_request *scan_request; + + struct workqueue_struct *workqueue; + struct delayed_work dev_poller_work; + struct delayed_work scan_work; + struct work_struct work; + struct mutex command_lock; + unsigned long work_pending; + int last_qual; + + struct ieee80211_supported_band band; + struct ieee80211_channel channels[ARRAY_SIZE(rndis_channels)]; + struct ieee80211_rate rates[ARRAY_SIZE(rndis_rates)]; + u32 cipher_suites[ARRAY_SIZE(rndis_cipher_suites)]; + + int caps; + int multicast_size; + + /* module parameters */ + char param_country[4]; + int param_frameburst; + int param_afterburner; + int param_power_save; + int param_power_output; + int param_roamtrigger; + int param_roamdelta; + u32 param_workaround_interval; + + /* hardware state */ + bool radio_on; + int infra_mode; + bool connected; + u8 bssid[ETH_ALEN]; + struct ndis_80211_ssid essid; + __le32 current_command_oid; + + /* encryption stuff */ + int encr_tx_key_index; + struct rndis_wlan_encr_key encr_keys[4]; + enum nl80211_auth_type wpa_auth_type; + int wpa_version; + int wpa_keymgmt; + int wpa_ie_len; + u8 *wpa_ie; + int wpa_cipher_pair; + int wpa_cipher_group; + + u8 command_buffer[COMMAND_BUFFER_SIZE]; +}; + +/* + * cfg80211 ops + */ +static int rndis_change_virtual_intf(struct wiphy *wiphy, + struct net_device *dev, + enum nl80211_iftype type, u32 *flags, + struct vif_params *params); + +static int rndis_scan(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_scan_request *request); + +static int rndis_set_wiphy_params(struct wiphy *wiphy, u32 changed); + +static int rndis_set_tx_power(struct wiphy *wiphy, enum tx_power_setting type, + int dbm); +static int rndis_get_tx_power(struct wiphy *wiphy, int *dbm); + +static int rndis_connect(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_connect_params *sme); + +static int rndis_disconnect(struct wiphy *wiphy, struct net_device *dev, + u16 reason_code); + +static int rndis_join_ibss(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_ibss_params *params); + +static int rndis_leave_ibss(struct wiphy *wiphy, struct net_device *dev); + +static int rndis_set_channel(struct wiphy *wiphy, + struct ieee80211_channel *chan, enum nl80211_channel_type channel_type); + +static int rndis_add_key(struct wiphy *wiphy, struct net_device *netdev, + u8 key_index, const u8 *mac_addr, + struct key_params *params); + +static int rndis_del_key(struct wiphy *wiphy, struct net_device *netdev, + u8 key_index, const u8 *mac_addr); + +static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev, + u8 key_index); + +static int rndis_get_station(struct wiphy *wiphy, struct net_device *dev, + u8 *mac, struct station_info *sinfo); + +static int rndis_dump_station(struct wiphy *wiphy, struct net_device *dev, + int idx, u8 *mac, struct station_info *sinfo); + +static struct cfg80211_ops rndis_config_ops = { + .change_virtual_intf = rndis_change_virtual_intf, + .scan = rndis_scan, + .set_wiphy_params = rndis_set_wiphy_params, + .set_tx_power = rndis_set_tx_power, + .get_tx_power = rndis_get_tx_power, + .connect = rndis_connect, + .disconnect = rndis_disconnect, + .join_ibss = rndis_join_ibss, + .leave_ibss = rndis_leave_ibss, + .set_channel = rndis_set_channel, + .add_key = rndis_add_key, + .del_key = rndis_del_key, + .set_default_key = rndis_set_default_key, + .get_station = rndis_get_station, + .dump_station = rndis_dump_station, +}; + +static void *rndis_wiphy_privid = &rndis_wiphy_privid; + + +static struct rndis_wlan_private *get_rndis_wlan_priv(struct usbnet *dev) +{ + return (struct rndis_wlan_private *)dev->driver_priv; +} + +static u32 get_bcm4320_power_dbm(struct rndis_wlan_private *priv) +{ + switch (priv->param_power_output) { + default: + case 3: + return BCM4320_DEFAULT_TXPOWER_DBM_100; + case 2: + return BCM4320_DEFAULT_TXPOWER_DBM_75; + case 1: + return BCM4320_DEFAULT_TXPOWER_DBM_50; + case 0: + return BCM4320_DEFAULT_TXPOWER_DBM_25; + } +} + +static bool is_wpa_key(struct rndis_wlan_private *priv, int idx) +{ + int cipher = priv->encr_keys[idx].cipher; + + return (cipher == WLAN_CIPHER_SUITE_CCMP || + cipher == WLAN_CIPHER_SUITE_TKIP); +} + +static int rndis_cipher_to_alg(u32 cipher) +{ + switch (cipher) { + default: + return RNDIS_WLAN_ALG_NONE; + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + return RNDIS_WLAN_ALG_WEP; + case WLAN_CIPHER_SUITE_TKIP: + return RNDIS_WLAN_ALG_TKIP; + case WLAN_CIPHER_SUITE_CCMP: + return RNDIS_WLAN_ALG_CCMP; + } +} + +static int rndis_akm_suite_to_key_mgmt(u32 akm_suite) +{ + switch (akm_suite) { + default: + return RNDIS_WLAN_KEY_MGMT_NONE; + case WLAN_AKM_SUITE_8021X: + return RNDIS_WLAN_KEY_MGMT_802_1X; + case WLAN_AKM_SUITE_PSK: + return RNDIS_WLAN_KEY_MGMT_PSK; + } +} + +#ifdef DEBUG +static const char *oid_to_string(__le32 oid) +{ + switch (oid) { +#define OID_STR(oid) case oid: return(#oid) + /* from rndis_host.h */ + OID_STR(OID_802_3_PERMANENT_ADDRESS); + OID_STR(OID_GEN_MAXIMUM_FRAME_SIZE); + OID_STR(OID_GEN_CURRENT_PACKET_FILTER); + OID_STR(OID_GEN_PHYSICAL_MEDIUM); + + /* from rndis_wlan.c */ + OID_STR(OID_GEN_LINK_SPEED); + OID_STR(OID_GEN_RNDIS_CONFIG_PARAMETER); + + OID_STR(OID_GEN_XMIT_OK); + OID_STR(OID_GEN_RCV_OK); + OID_STR(OID_GEN_XMIT_ERROR); + OID_STR(OID_GEN_RCV_ERROR); + OID_STR(OID_GEN_RCV_NO_BUFFER); + + OID_STR(OID_802_3_CURRENT_ADDRESS); + OID_STR(OID_802_3_MULTICAST_LIST); + OID_STR(OID_802_3_MAXIMUM_LIST_SIZE); + + OID_STR(OID_802_11_BSSID); + OID_STR(OID_802_11_SSID); + OID_STR(OID_802_11_INFRASTRUCTURE_MODE); + OID_STR(OID_802_11_ADD_WEP); + OID_STR(OID_802_11_REMOVE_WEP); + OID_STR(OID_802_11_DISASSOCIATE); + OID_STR(OID_802_11_AUTHENTICATION_MODE); + OID_STR(OID_802_11_PRIVACY_FILTER); + OID_STR(OID_802_11_BSSID_LIST_SCAN); + OID_STR(OID_802_11_ENCRYPTION_STATUS); + OID_STR(OID_802_11_ADD_KEY); + OID_STR(OID_802_11_REMOVE_KEY); + OID_STR(OID_802_11_ASSOCIATION_INFORMATION); + OID_STR(OID_802_11_PMKID); + OID_STR(OID_802_11_NETWORK_TYPES_SUPPORTED); + OID_STR(OID_802_11_NETWORK_TYPE_IN_USE); + OID_STR(OID_802_11_TX_POWER_LEVEL); + OID_STR(OID_802_11_RSSI); + OID_STR(OID_802_11_RSSI_TRIGGER); + OID_STR(OID_802_11_FRAGMENTATION_THRESHOLD); + OID_STR(OID_802_11_RTS_THRESHOLD); + OID_STR(OID_802_11_SUPPORTED_RATES); + OID_STR(OID_802_11_CONFIGURATION); + OID_STR(OID_802_11_BSSID_LIST); +#undef OID_STR + } + + return "?"; +} +#else +static const char *oid_to_string(__le32 oid) +{ + return "?"; +} +#endif + +/* translate error code */ +static int rndis_error_status(__le32 rndis_status) +{ + int ret = -EINVAL; + switch (rndis_status) { + case RNDIS_STATUS_SUCCESS: + ret = 0; + break; + case RNDIS_STATUS_FAILURE: + case RNDIS_STATUS_INVALID_DATA: + ret = -EINVAL; + break; + case RNDIS_STATUS_NOT_SUPPORTED: + ret = -EOPNOTSUPP; + break; + case RNDIS_STATUS_ADAPTER_NOT_READY: + case RNDIS_STATUS_ADAPTER_NOT_OPEN: + ret = -EBUSY; + break; + } + return ret; +} + +static int rndis_query_oid(struct usbnet *dev, __le32 oid, void *data, int *len) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(dev); + union { + void *buf; + struct rndis_msg_hdr *header; + struct rndis_query *get; + struct rndis_query_c *get_c; + } u; + int ret, buflen; + + buflen = *len + sizeof(*u.get); + if (buflen < CONTROL_BUFFER_SIZE) + buflen = CONTROL_BUFFER_SIZE; + + if (buflen > COMMAND_BUFFER_SIZE) { + u.buf = kmalloc(buflen, GFP_KERNEL); + if (!u.buf) + return -ENOMEM; + } else { + u.buf = priv->command_buffer; + } + + mutex_lock(&priv->command_lock); + + memset(u.get, 0, sizeof *u.get); + u.get->msg_type = RNDIS_MSG_QUERY; + u.get->msg_len = cpu_to_le32(sizeof *u.get); + u.get->oid = oid; + + priv->current_command_oid = oid; + ret = rndis_command(dev, u.header, buflen); + priv->current_command_oid = 0; + if (ret < 0) + netdev_dbg(dev->net, "%s(%s): rndis_command() failed, %d (%08x)\n", + __func__, oid_to_string(oid), ret, + le32_to_cpu(u.get_c->status)); + + if (ret == 0) { + memcpy(data, u.buf + le32_to_cpu(u.get_c->offset) + 8, *len); + + ret = le32_to_cpu(u.get_c->len); + if (ret > *len) + *len = ret; + + ret = rndis_error_status(u.get_c->status); + if (ret < 0) + netdev_dbg(dev->net, "%s(%s): device returned error, 0x%08x (%d)\n", + __func__, oid_to_string(oid), + le32_to_cpu(u.get_c->status), ret); + } + + mutex_unlock(&priv->command_lock); + + if (u.buf != priv->command_buffer) + kfree(u.buf); + return ret; +} + +static int rndis_set_oid(struct usbnet *dev, __le32 oid, void *data, int len) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(dev); + union { + void *buf; + struct rndis_msg_hdr *header; + struct rndis_set *set; + struct rndis_set_c *set_c; + } u; + int ret, buflen; + + buflen = len + sizeof(*u.set); + if (buflen < CONTROL_BUFFER_SIZE) + buflen = CONTROL_BUFFER_SIZE; + + if (buflen > COMMAND_BUFFER_SIZE) { + u.buf = kmalloc(buflen, GFP_KERNEL); + if (!u.buf) + return -ENOMEM; + } else { + u.buf = priv->command_buffer; + } + + mutex_lock(&priv->command_lock); + + memset(u.set, 0, sizeof *u.set); + u.set->msg_type = RNDIS_MSG_SET; + u.set->msg_len = cpu_to_le32(sizeof(*u.set) + len); + u.set->oid = oid; + u.set->len = cpu_to_le32(len); + u.set->offset = cpu_to_le32(sizeof(*u.set) - 8); + u.set->handle = cpu_to_le32(0); + memcpy(u.buf + sizeof(*u.set), data, len); + + priv->current_command_oid = oid; + ret = rndis_command(dev, u.header, buflen); + priv->current_command_oid = 0; + if (ret < 0) + netdev_dbg(dev->net, "%s(%s): rndis_command() failed, %d (%08x)\n", + __func__, oid_to_string(oid), ret, + le32_to_cpu(u.set_c->status)); + + if (ret == 0) { + ret = rndis_error_status(u.set_c->status); + + if (ret < 0) + netdev_dbg(dev->net, "%s(%s): device returned error, 0x%08x (%d)\n", + __func__, oid_to_string(oid), + le32_to_cpu(u.set_c->status), ret); + } + + mutex_unlock(&priv->command_lock); + + if (u.buf != priv->command_buffer) + kfree(u.buf); + return ret; +} + +static int rndis_reset(struct usbnet *usbdev) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + struct rndis_reset *reset; + int ret; + + mutex_lock(&priv->command_lock); + + reset = (void *)priv->command_buffer; + memset(reset, 0, sizeof(*reset)); + reset->msg_type = RNDIS_MSG_RESET; + reset->msg_len = cpu_to_le32(sizeof(*reset)); + priv->current_command_oid = 0; + ret = rndis_command(usbdev, (void *)reset, CONTROL_BUFFER_SIZE); + + mutex_unlock(&priv->command_lock); + + if (ret < 0) + return ret; + return 0; +} + +/* + * Specs say that we can only set config parameters only soon after device + * initialization. + * value_type: 0 = u32, 2 = unicode string + */ +static int rndis_set_config_parameter(struct usbnet *dev, char *param, + int value_type, void *value) +{ + struct ndis_config_param *infobuf; + int value_len, info_len, param_len, ret, i; + __le16 *unibuf; + __le32 *dst_value; + + if (value_type == 0) + value_len = sizeof(__le32); + else if (value_type == 2) + value_len = strlen(value) * sizeof(__le16); + else + return -EINVAL; + + param_len = strlen(param) * sizeof(__le16); + info_len = sizeof(*infobuf) + param_len + value_len; + +#ifdef DEBUG + info_len += 12; +#endif + infobuf = kmalloc(info_len, GFP_KERNEL); + if (!infobuf) + return -ENOMEM; + +#ifdef DEBUG + info_len -= 12; + /* extra 12 bytes are for padding (debug output) */ + memset(infobuf, 0xCC, info_len + 12); +#endif + + if (value_type == 2) + netdev_dbg(dev->net, "setting config parameter: %s, value: %s\n", + param, (u8 *)value); + else + netdev_dbg(dev->net, "setting config parameter: %s, value: %d\n", + param, *(u32 *)value); + + infobuf->name_offs = cpu_to_le32(sizeof(*infobuf)); + infobuf->name_length = cpu_to_le32(param_len); + infobuf->type = cpu_to_le32(value_type); + infobuf->value_offs = cpu_to_le32(sizeof(*infobuf) + param_len); + infobuf->value_length = cpu_to_le32(value_len); + + /* simple string to unicode string conversion */ + unibuf = (void *)infobuf + sizeof(*infobuf); + for (i = 0; i < param_len / sizeof(__le16); i++) + unibuf[i] = cpu_to_le16(param[i]); + + if (value_type == 2) { + unibuf = (void *)infobuf + sizeof(*infobuf) + param_len; + for (i = 0; i < value_len / sizeof(__le16); i++) + unibuf[i] = cpu_to_le16(((u8 *)value)[i]); + } else { + dst_value = (void *)infobuf + sizeof(*infobuf) + param_len; + *dst_value = cpu_to_le32(*(u32 *)value); + } + +#ifdef DEBUG + netdev_dbg(dev->net, "info buffer (len: %d)\n", info_len); + for (i = 0; i < info_len; i += 12) { + u32 *tmp = (u32 *)((u8 *)infobuf + i); + netdev_dbg(dev->net, "%08X:%08X:%08X\n", + cpu_to_be32(tmp[0]), + cpu_to_be32(tmp[1]), + cpu_to_be32(tmp[2])); + } +#endif + + ret = rndis_set_oid(dev, OID_GEN_RNDIS_CONFIG_PARAMETER, + infobuf, info_len); + if (ret != 0) + netdev_dbg(dev->net, "setting rndis config parameter failed, %d\n", + ret); + + kfree(infobuf); + return ret; +} + +static int rndis_set_config_parameter_str(struct usbnet *dev, + char *param, char *value) +{ + return rndis_set_config_parameter(dev, param, 2, value); +} + +/* + * data conversion functions + */ +static int level_to_qual(int level) +{ + int qual = 100 * (level - WL_NOISE) / (WL_SIGMAX - WL_NOISE); + return qual >= 0 ? (qual <= 100 ? qual : 100) : 0; +} + +/* + * common functions + */ +static int set_infra_mode(struct usbnet *usbdev, int mode); +static void restore_keys(struct usbnet *usbdev); +static int rndis_check_bssid_list(struct usbnet *usbdev); + +static int set_essid(struct usbnet *usbdev, struct ndis_80211_ssid *ssid) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + int ret; + + ret = rndis_set_oid(usbdev, OID_802_11_SSID, ssid, sizeof(*ssid)); + if (ret < 0) { + netdev_warn(usbdev->net, "setting SSID failed (%08X)\n", ret); + return ret; + } + if (ret == 0) { + memcpy(&priv->essid, ssid, sizeof(priv->essid)); + priv->radio_on = true; + netdev_dbg(usbdev->net, "%s(): radio_on = true\n", __func__); + } + + return ret; +} + +static int set_bssid(struct usbnet *usbdev, u8 bssid[ETH_ALEN]) +{ + int ret; + + ret = rndis_set_oid(usbdev, OID_802_11_BSSID, bssid, ETH_ALEN); + if (ret < 0) { + netdev_warn(usbdev->net, "setting BSSID[%pM] failed (%08X)\n", + bssid, ret); + return ret; + } + + return ret; +} + +static int clear_bssid(struct usbnet *usbdev) +{ + u8 broadcast_mac[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + + return set_bssid(usbdev, broadcast_mac); +} + +static int get_bssid(struct usbnet *usbdev, u8 bssid[ETH_ALEN]) +{ + int ret, len; + + len = ETH_ALEN; + ret = rndis_query_oid(usbdev, OID_802_11_BSSID, bssid, &len); + + if (ret != 0) + memset(bssid, 0, ETH_ALEN); + + return ret; +} + +static int get_association_info(struct usbnet *usbdev, + struct ndis_80211_assoc_info *info, int len) +{ + return rndis_query_oid(usbdev, OID_802_11_ASSOCIATION_INFORMATION, + info, &len); +} + +static bool is_associated(struct usbnet *usbdev) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + u8 bssid[ETH_ALEN]; + int ret; + + if (!priv->radio_on) + return false; + + ret = get_bssid(usbdev, bssid); + + return (ret == 0 && !is_zero_ether_addr(bssid)); +} + +static int disassociate(struct usbnet *usbdev, bool reset_ssid) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + struct ndis_80211_ssid ssid; + int i, ret = 0; + + if (priv->radio_on) { + ret = rndis_set_oid(usbdev, OID_802_11_DISASSOCIATE, NULL, 0); + if (ret == 0) { + priv->radio_on = false; + netdev_dbg(usbdev->net, "%s(): radio_on = false\n", + __func__); + + if (reset_ssid) + msleep(100); + } + } + + /* disassociate causes radio to be turned off; if reset_ssid + * is given, set random ssid to enable radio */ + if (reset_ssid) { + /* Set device to infrastructure mode so we don't get ad-hoc + * 'media connect' indications with the random ssid. + */ + set_infra_mode(usbdev, NDIS_80211_INFRA_INFRA); + + ssid.length = cpu_to_le32(sizeof(ssid.essid)); + get_random_bytes(&ssid.essid[2], sizeof(ssid.essid)-2); + ssid.essid[0] = 0x1; + ssid.essid[1] = 0xff; + for (i = 2; i < sizeof(ssid.essid); i++) + ssid.essid[i] = 0x1 + (ssid.essid[i] * 0xfe / 0xff); + ret = set_essid(usbdev, &ssid); + } + return ret; +} + +static int set_auth_mode(struct usbnet *usbdev, u32 wpa_version, + enum nl80211_auth_type auth_type, int keymgmt) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + __le32 tmp; + int auth_mode, ret; + + netdev_dbg(usbdev->net, "%s(): wpa_version=0x%x authalg=0x%x keymgmt=0x%x\n", + __func__, wpa_version, auth_type, keymgmt); + + if (wpa_version & NL80211_WPA_VERSION_2) { + if (keymgmt & RNDIS_WLAN_KEY_MGMT_802_1X) + auth_mode = NDIS_80211_AUTH_WPA2; + else + auth_mode = NDIS_80211_AUTH_WPA2_PSK; + } else if (wpa_version & NL80211_WPA_VERSION_1) { + if (keymgmt & RNDIS_WLAN_KEY_MGMT_802_1X) + auth_mode = NDIS_80211_AUTH_WPA; + else if (keymgmt & RNDIS_WLAN_KEY_MGMT_PSK) + auth_mode = NDIS_80211_AUTH_WPA_PSK; + else + auth_mode = NDIS_80211_AUTH_WPA_NONE; + } else if (auth_type == NL80211_AUTHTYPE_SHARED_KEY) + auth_mode = NDIS_80211_AUTH_SHARED; + else if (auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM) + auth_mode = NDIS_80211_AUTH_OPEN; + else if (auth_type == NL80211_AUTHTYPE_AUTOMATIC) + auth_mode = NDIS_80211_AUTH_AUTO_SWITCH; + else + return -ENOTSUPP; + + tmp = cpu_to_le32(auth_mode); + ret = rndis_set_oid(usbdev, OID_802_11_AUTHENTICATION_MODE, &tmp, + sizeof(tmp)); + if (ret != 0) { + netdev_warn(usbdev->net, "setting auth mode failed (%08X)\n", + ret); + return ret; + } + + priv->wpa_version = wpa_version; + priv->wpa_auth_type = auth_type; + priv->wpa_keymgmt = keymgmt; + + return 0; +} + +static int set_priv_filter(struct usbnet *usbdev) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + __le32 tmp; + + netdev_dbg(usbdev->net, "%s(): wpa_version=0x%x\n", + __func__, priv->wpa_version); + + if (priv->wpa_version & NL80211_WPA_VERSION_2 || + priv->wpa_version & NL80211_WPA_VERSION_1) + tmp = cpu_to_le32(NDIS_80211_PRIV_8021X_WEP); + else + tmp = cpu_to_le32(NDIS_80211_PRIV_ACCEPT_ALL); + + return rndis_set_oid(usbdev, OID_802_11_PRIVACY_FILTER, &tmp, + sizeof(tmp)); +} + +static int set_encr_mode(struct usbnet *usbdev, int pairwise, int groupwise) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + __le32 tmp; + int encr_mode, ret; + + netdev_dbg(usbdev->net, "%s(): cipher_pair=0x%x cipher_group=0x%x\n", + __func__, pairwise, groupwise); + + if (pairwise & RNDIS_WLAN_ALG_CCMP) + encr_mode = NDIS_80211_ENCR_CCMP_ENABLED; + else if (pairwise & RNDIS_WLAN_ALG_TKIP) + encr_mode = NDIS_80211_ENCR_TKIP_ENABLED; + else if (pairwise & RNDIS_WLAN_ALG_WEP) + encr_mode = NDIS_80211_ENCR_WEP_ENABLED; + else if (groupwise & RNDIS_WLAN_ALG_CCMP) + encr_mode = NDIS_80211_ENCR_CCMP_ENABLED; + else if (groupwise & RNDIS_WLAN_ALG_TKIP) + encr_mode = NDIS_80211_ENCR_TKIP_ENABLED; + else + encr_mode = NDIS_80211_ENCR_DISABLED; + + tmp = cpu_to_le32(encr_mode); + ret = rndis_set_oid(usbdev, OID_802_11_ENCRYPTION_STATUS, &tmp, + sizeof(tmp)); + if (ret != 0) { + netdev_warn(usbdev->net, "setting encr mode failed (%08X)\n", + ret); + return ret; + } + + priv->wpa_cipher_pair = pairwise; + priv->wpa_cipher_group = groupwise; + return 0; +} + +static int set_infra_mode(struct usbnet *usbdev, int mode) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + __le32 tmp; + int ret; + + netdev_dbg(usbdev->net, "%s(): infra_mode=0x%x\n", + __func__, priv->infra_mode); + + tmp = cpu_to_le32(mode); + ret = rndis_set_oid(usbdev, OID_802_11_INFRASTRUCTURE_MODE, &tmp, + sizeof(tmp)); + if (ret != 0) { + netdev_warn(usbdev->net, "setting infra mode failed (%08X)\n", + ret); + return ret; + } + + /* NDIS drivers clear keys when infrastructure mode is + * changed. But Linux tools assume otherwise. So set the + * keys */ + restore_keys(usbdev); + + priv->infra_mode = mode; + return 0; +} + +static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold) +{ + __le32 tmp; + + netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold); + + if (rts_threshold < 0 || rts_threshold > 2347) + rts_threshold = 2347; + + tmp = cpu_to_le32(rts_threshold); + return rndis_set_oid(usbdev, OID_802_11_RTS_THRESHOLD, &tmp, + sizeof(tmp)); +} + +static int set_frag_threshold(struct usbnet *usbdev, u32 frag_threshold) +{ + __le32 tmp; + + netdev_dbg(usbdev->net, "%s(): %i\n", __func__, frag_threshold); + + if (frag_threshold < 256 || frag_threshold > 2346) + frag_threshold = 2346; + + tmp = cpu_to_le32(frag_threshold); + return rndis_set_oid(usbdev, OID_802_11_FRAGMENTATION_THRESHOLD, &tmp, + sizeof(tmp)); +} + +static void set_default_iw_params(struct usbnet *usbdev) +{ + set_infra_mode(usbdev, NDIS_80211_INFRA_INFRA); + set_auth_mode(usbdev, 0, NL80211_AUTHTYPE_OPEN_SYSTEM, + RNDIS_WLAN_KEY_MGMT_NONE); + set_priv_filter(usbdev); + set_encr_mode(usbdev, RNDIS_WLAN_ALG_NONE, RNDIS_WLAN_ALG_NONE); +} + +static int deauthenticate(struct usbnet *usbdev) +{ + int ret; + + ret = disassociate(usbdev, true); + set_default_iw_params(usbdev); + return ret; +} + +static int set_channel(struct usbnet *usbdev, int channel) +{ + struct ndis_80211_conf config; + unsigned int dsconfig; + int len, ret; + + netdev_dbg(usbdev->net, "%s(%d)\n", __func__, channel); + + /* this OID is valid only when not associated */ + if (is_associated(usbdev)) + return 0; + + dsconfig = ieee80211_dsss_chan_to_freq(channel) * 1000; + + len = sizeof(config); + ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len); + if (ret < 0) { + netdev_dbg(usbdev->net, "%s(): querying configuration failed\n", + __func__); + return ret; + } + + config.ds_config = cpu_to_le32(dsconfig); + ret = rndis_set_oid(usbdev, OID_802_11_CONFIGURATION, &config, + sizeof(config)); + + netdev_dbg(usbdev->net, "%s(): %d -> %d\n", __func__, channel, ret); + + return ret; +} + +/* index must be 0 - N, as per NDIS */ +static int add_wep_key(struct usbnet *usbdev, const u8 *key, int key_len, + int index) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + struct ndis_80211_wep_key ndis_key; + u32 cipher; + int ret; + + netdev_dbg(usbdev->net, "%s(idx: %d, len: %d)\n", + __func__, index, key_len); + + if ((key_len != 5 && key_len != 13) || index < 0 || index > 3) + return -EINVAL; + + if (key_len == 5) + cipher = WLAN_CIPHER_SUITE_WEP40; + else + cipher = WLAN_CIPHER_SUITE_WEP104; + + memset(&ndis_key, 0, sizeof(ndis_key)); + + ndis_key.size = cpu_to_le32(sizeof(ndis_key)); + ndis_key.length = cpu_to_le32(key_len); + ndis_key.index = cpu_to_le32(index); + memcpy(&ndis_key.material, key, key_len); + + if (index == priv->encr_tx_key_index) { + ndis_key.index |= NDIS_80211_ADDWEP_TRANSMIT_KEY; + ret = set_encr_mode(usbdev, RNDIS_WLAN_ALG_WEP, + RNDIS_WLAN_ALG_NONE); + if (ret) + netdev_warn(usbdev->net, "encryption couldn't be enabled (%08X)\n", + ret); + } + + ret = rndis_set_oid(usbdev, OID_802_11_ADD_WEP, &ndis_key, + sizeof(ndis_key)); + if (ret != 0) { + netdev_warn(usbdev->net, "adding encryption key %d failed (%08X)\n", + index + 1, ret); + return ret; + } + + priv->encr_keys[index].len = key_len; + priv->encr_keys[index].cipher = cipher; + memcpy(&priv->encr_keys[index].material, key, key_len); + memset(&priv->encr_keys[index].bssid, 0xff, ETH_ALEN); + + return 0; +} + +static int add_wpa_key(struct usbnet *usbdev, const u8 *key, int key_len, + int index, const u8 *addr, const u8 *rx_seq, + int seq_len, u32 cipher, __le32 flags) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + struct ndis_80211_key ndis_key; + bool is_addr_ok; + int ret; + + if (index < 0 || index >= 4) { + netdev_dbg(usbdev->net, "%s(): index out of range (%i)\n", + __func__, index); + return -EINVAL; + } + if (key_len > sizeof(ndis_key.material) || key_len < 0) { + netdev_dbg(usbdev->net, "%s(): key length out of range (%i)\n", + __func__, key_len); + return -EINVAL; + } + if (flags & NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ) { + if (!rx_seq || seq_len <= 0) { + netdev_dbg(usbdev->net, "%s(): recv seq flag without buffer\n", + __func__); + return -EINVAL; + } + if (rx_seq && seq_len > sizeof(ndis_key.rsc)) { + netdev_dbg(usbdev->net, "%s(): too big recv seq buffer\n", __func__); + return -EINVAL; + } + } + + is_addr_ok = addr && !is_zero_ether_addr(addr) && + !is_broadcast_ether_addr(addr); + if ((flags & NDIS_80211_ADDKEY_PAIRWISE_KEY) && !is_addr_ok) { + netdev_dbg(usbdev->net, "%s(): pairwise but bssid invalid (%pM)\n", + __func__, addr); + return -EINVAL; + } + + netdev_dbg(usbdev->net, "%s(%i): flags:%i%i%i\n", + __func__, index, + !!(flags & NDIS_80211_ADDKEY_TRANSMIT_KEY), + !!(flags & NDIS_80211_ADDKEY_PAIRWISE_KEY), + !!(flags & NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ)); + + memset(&ndis_key, 0, sizeof(ndis_key)); + + ndis_key.size = cpu_to_le32(sizeof(ndis_key) - + sizeof(ndis_key.material) + key_len); + ndis_key.length = cpu_to_le32(key_len); + ndis_key.index = cpu_to_le32(index) | flags; + + if (cipher == WLAN_CIPHER_SUITE_TKIP && key_len == 32) { + /* wpa_supplicant gives us the Michael MIC RX/TX keys in + * different order than NDIS spec, so swap the order here. */ + memcpy(ndis_key.material, key, 16); + memcpy(ndis_key.material + 16, key + 24, 8); + memcpy(ndis_key.material + 24, key + 16, 8); + } else + memcpy(ndis_key.material, key, key_len); + + if (flags & NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ) + memcpy(ndis_key.rsc, rx_seq, seq_len); + + if (flags & NDIS_80211_ADDKEY_PAIRWISE_KEY) { + /* pairwise key */ + memcpy(ndis_key.bssid, addr, ETH_ALEN); + } else { + /* group key */ + if (priv->infra_mode == NDIS_80211_INFRA_ADHOC) + memset(ndis_key.bssid, 0xff, ETH_ALEN); + else + get_bssid(usbdev, ndis_key.bssid); + } + + ret = rndis_set_oid(usbdev, OID_802_11_ADD_KEY, &ndis_key, + le32_to_cpu(ndis_key.size)); + netdev_dbg(usbdev->net, "%s(): OID_802_11_ADD_KEY -> %08X\n", + __func__, ret); + if (ret != 0) + return ret; + + memset(&priv->encr_keys[index], 0, sizeof(priv->encr_keys[index])); + priv->encr_keys[index].len = key_len; + priv->encr_keys[index].cipher = cipher; + memcpy(&priv->encr_keys[index].material, key, key_len); + if (flags & NDIS_80211_ADDKEY_PAIRWISE_KEY) + memcpy(&priv->encr_keys[index].bssid, ndis_key.bssid, ETH_ALEN); + else + memset(&priv->encr_keys[index].bssid, 0xff, ETH_ALEN); + + if (flags & NDIS_80211_ADDKEY_TRANSMIT_KEY) + priv->encr_tx_key_index = index; + + return 0; +} + +static int restore_key(struct usbnet *usbdev, int key_idx) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + struct rndis_wlan_encr_key key; + + if (is_wpa_key(priv, key_idx)) + return 0; + + key = priv->encr_keys[key_idx]; + + netdev_dbg(usbdev->net, "%s(): %i:%i\n", __func__, key_idx, key.len); + + if (key.len == 0) + return 0; + + return add_wep_key(usbdev, key.material, key.len, key_idx); +} + +static void restore_keys(struct usbnet *usbdev) +{ + int i; + + for (i = 0; i < 4; i++) + restore_key(usbdev, i); +} + +static void clear_key(struct rndis_wlan_private *priv, int idx) +{ + memset(&priv->encr_keys[idx], 0, sizeof(priv->encr_keys[idx])); +} + +/* remove_key is for both wep and wpa */ +static int remove_key(struct usbnet *usbdev, int index, const u8 *bssid) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + struct ndis_80211_remove_key remove_key; + __le32 keyindex; + bool is_wpa; + int ret; + + if (priv->encr_keys[index].len == 0) + return 0; + + is_wpa = is_wpa_key(priv, index); + + netdev_dbg(usbdev->net, "%s(): %i:%s:%i\n", + __func__, index, is_wpa ? "wpa" : "wep", + priv->encr_keys[index].len); + + clear_key(priv, index); + + if (is_wpa) { + remove_key.size = cpu_to_le32(sizeof(remove_key)); + remove_key.index = cpu_to_le32(index); + if (bssid) { + /* pairwise key */ + if (!is_broadcast_ether_addr(bssid)) + remove_key.index |= + NDIS_80211_ADDKEY_PAIRWISE_KEY; + memcpy(remove_key.bssid, bssid, + sizeof(remove_key.bssid)); + } else + memset(remove_key.bssid, 0xff, + sizeof(remove_key.bssid)); + + ret = rndis_set_oid(usbdev, OID_802_11_REMOVE_KEY, &remove_key, + sizeof(remove_key)); + if (ret != 0) + return ret; + } else { + keyindex = cpu_to_le32(index); + ret = rndis_set_oid(usbdev, OID_802_11_REMOVE_WEP, &keyindex, + sizeof(keyindex)); + if (ret != 0) { + netdev_warn(usbdev->net, + "removing encryption key %d failed (%08X)\n", + index, ret); + return ret; + } + } + + /* if it is transmit key, disable encryption */ + if (index == priv->encr_tx_key_index) + set_encr_mode(usbdev, RNDIS_WLAN_ALG_NONE, RNDIS_WLAN_ALG_NONE); + + return 0; +} + +static void set_multicast_list(struct usbnet *usbdev) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + struct dev_mc_list *mclist; + __le32 filter, basefilter; + int ret; + char *mc_addrs = NULL; + int mc_count; + + basefilter = filter = RNDIS_PACKET_TYPE_DIRECTED | + RNDIS_PACKET_TYPE_BROADCAST; + + if (usbdev->net->flags & IFF_PROMISC) { + filter |= RNDIS_PACKET_TYPE_PROMISCUOUS | + RNDIS_PACKET_TYPE_ALL_LOCAL; + } else if (usbdev->net->flags & IFF_ALLMULTI) { + filter |= RNDIS_PACKET_TYPE_ALL_MULTICAST; + } + + if (filter != basefilter) + goto set_filter; + + /* + * mc_list should be accessed holding the lock, so copy addresses to + * local buffer first. + */ + netif_addr_lock_bh(usbdev->net); + mc_count = netdev_mc_count(usbdev->net); + if (mc_count > priv->multicast_size) { + filter |= RNDIS_PACKET_TYPE_ALL_MULTICAST; + } else if (mc_count) { + int i = 0; + + mc_addrs = kmalloc(mc_count * ETH_ALEN, GFP_ATOMIC); + if (!mc_addrs) { + netdev_warn(usbdev->net, + "couldn't alloc %d bytes of memory\n", + mc_count * ETH_ALEN); + netif_addr_unlock_bh(usbdev->net); + return; + } + + netdev_for_each_mc_addr(mclist, usbdev->net) + memcpy(mc_addrs + i++ * ETH_ALEN, + mclist->dmi_addr, ETH_ALEN); + } + netif_addr_unlock_bh(usbdev->net); + + if (filter != basefilter) + goto set_filter; + + if (mc_count) { + ret = rndis_set_oid(usbdev, OID_802_3_MULTICAST_LIST, mc_addrs, + mc_count * ETH_ALEN); + kfree(mc_addrs); + if (ret == 0) + filter |= RNDIS_PACKET_TYPE_MULTICAST; + else + filter |= RNDIS_PACKET_TYPE_ALL_MULTICAST; + + netdev_dbg(usbdev->net, "OID_802_3_MULTICAST_LIST(%d, max: %d) -> %d\n", + mc_count, priv->multicast_size, ret); + } + +set_filter: + ret = rndis_set_oid(usbdev, OID_GEN_CURRENT_PACKET_FILTER, &filter, + sizeof(filter)); + if (ret < 0) { + netdev_warn(usbdev->net, "couldn't set packet filter: %08x\n", + le32_to_cpu(filter)); + } + + netdev_dbg(usbdev->net, "OID_GEN_CURRENT_PACKET_FILTER(%08x) -> %d\n", + le32_to_cpu(filter), ret); +} + +/* + * cfg80211 ops + */ +static int rndis_change_virtual_intf(struct wiphy *wiphy, + struct net_device *dev, + enum nl80211_iftype type, u32 *flags, + struct vif_params *params) +{ + struct rndis_wlan_private *priv = wiphy_priv(wiphy); + struct usbnet *usbdev = priv->usbdev; + int mode; + + switch (type) { + case NL80211_IFTYPE_ADHOC: + mode = NDIS_80211_INFRA_ADHOC; + break; + case NL80211_IFTYPE_STATION: + mode = NDIS_80211_INFRA_INFRA; + break; + default: + return -EINVAL; + } + + priv->wdev.iftype = type; + + return set_infra_mode(usbdev, mode); +} + +static int rndis_set_wiphy_params(struct wiphy *wiphy, u32 changed) +{ + struct rndis_wlan_private *priv = wiphy_priv(wiphy); + struct usbnet *usbdev = priv->usbdev; + int err; + + if (changed & WIPHY_PARAM_FRAG_THRESHOLD) { + err = set_frag_threshold(usbdev, wiphy->frag_threshold); + if (err < 0) + return err; + } + + if (changed & WIPHY_PARAM_RTS_THRESHOLD) { + err = set_rts_threshold(usbdev, wiphy->rts_threshold); + if (err < 0) + return err; + } + + return 0; +} + +static int rndis_set_tx_power(struct wiphy *wiphy, enum tx_power_setting type, + int dbm) +{ + struct rndis_wlan_private *priv = wiphy_priv(wiphy); + struct usbnet *usbdev = priv->usbdev; + + netdev_dbg(usbdev->net, "%s(): type:0x%x dbm:%i\n", + __func__, type, dbm); + + /* Device doesn't support changing txpower after initialization, only + * turn off/on radio. Support 'auto' mode and setting same dBm that is + * currently used. + */ + if (type == TX_POWER_AUTOMATIC || dbm == get_bcm4320_power_dbm(priv)) { + if (!priv->radio_on) + disassociate(usbdev, true); /* turn on radio */ + + return 0; + } + + return -ENOTSUPP; +} + +static int rndis_get_tx_power(struct wiphy *wiphy, int *dbm) +{ + struct rndis_wlan_private *priv = wiphy_priv(wiphy); + struct usbnet *usbdev = priv->usbdev; + + *dbm = get_bcm4320_power_dbm(priv); + + netdev_dbg(usbdev->net, "%s(): dbm:%i\n", __func__, *dbm); + + return 0; +} + +#define SCAN_DELAY_JIFFIES (6 * HZ) +static int rndis_scan(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_scan_request *request) +{ + struct usbnet *usbdev = netdev_priv(dev); + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + int ret; + __le32 tmp; + + netdev_dbg(usbdev->net, "cfg80211.scan\n"); + + /* Get current bssid list from device before new scan, as new scan + * clears internal bssid list. + */ + rndis_check_bssid_list(usbdev); + + if (!request) + return -EINVAL; + + if (priv->scan_request && priv->scan_request != request) + return -EBUSY; + + priv->scan_request = request; + + tmp = cpu_to_le32(1); + ret = rndis_set_oid(usbdev, OID_802_11_BSSID_LIST_SCAN, &tmp, + sizeof(tmp)); + if (ret == 0) { + /* Wait before retrieving scan results from device */ + queue_delayed_work(priv->workqueue, &priv->scan_work, + SCAN_DELAY_JIFFIES); + } + + return ret; +} + +static struct cfg80211_bss *rndis_bss_info_update(struct usbnet *usbdev, + struct ndis_80211_bssid_ex *bssid) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + struct ieee80211_channel *channel; + s32 signal; + u64 timestamp; + u16 capability; + u16 beacon_interval; + struct ndis_80211_fixed_ies *fixed; + int ie_len, bssid_len; + u8 *ie; + + netdev_dbg(usbdev->net, " found bssid: '%.32s' [%pM]\n", + bssid->ssid.essid, bssid->mac); + + /* parse bssid structure */ + bssid_len = le32_to_cpu(bssid->length); + + if (bssid_len < sizeof(struct ndis_80211_bssid_ex) + + sizeof(struct ndis_80211_fixed_ies)) + return NULL; + + fixed = (struct ndis_80211_fixed_ies *)bssid->ies; + + ie = (void *)(bssid->ies + sizeof(struct ndis_80211_fixed_ies)); + ie_len = min(bssid_len - (int)sizeof(*bssid), + (int)le32_to_cpu(bssid->ie_length)); + ie_len -= sizeof(struct ndis_80211_fixed_ies); + if (ie_len < 0) + return NULL; + + /* extract data for cfg80211_inform_bss */ + channel = ieee80211_get_channel(priv->wdev.wiphy, + KHZ_TO_MHZ(le32_to_cpu(bssid->config.ds_config))); + if (!channel) + return NULL; + + signal = level_to_qual(le32_to_cpu(bssid->rssi)); + timestamp = le64_to_cpu(*(__le64 *)fixed->timestamp); + capability = le16_to_cpu(fixed->capabilities); + beacon_interval = le16_to_cpu(fixed->beacon_interval); + + return cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid->mac, + timestamp, capability, beacon_interval, ie, ie_len, signal, + GFP_KERNEL); +} + +static int rndis_check_bssid_list(struct usbnet *usbdev) +{ + void *buf = NULL; + struct ndis_80211_bssid_list_ex *bssid_list; + struct ndis_80211_bssid_ex *bssid; + int ret = -EINVAL, len, count, bssid_len; + bool resized = false; + + netdev_dbg(usbdev->net, "check_bssid_list\n"); + + len = CONTROL_BUFFER_SIZE; +resize_buf: + buf = kmalloc(len, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto out; + } + + ret = rndis_query_oid(usbdev, OID_802_11_BSSID_LIST, buf, &len); + if (ret != 0) + goto out; + + if (!resized && len > CONTROL_BUFFER_SIZE) { + resized = true; + kfree(buf); + goto resize_buf; + } + + bssid_list = buf; + bssid = bssid_list->bssid; + bssid_len = le32_to_cpu(bssid->length); + count = le32_to_cpu(bssid_list->num_items); + netdev_dbg(usbdev->net, "check_bssid_list: %d BSSIDs found (buflen: %d)\n", + count, len); + + while (count && ((void *)bssid + bssid_len) <= (buf + len)) { + rndis_bss_info_update(usbdev, bssid); + + bssid = (void *)bssid + bssid_len; + bssid_len = le32_to_cpu(bssid->length); + count--; + } + +out: + kfree(buf); + return ret; +} + +static void rndis_get_scan_results(struct work_struct *work) +{ + struct rndis_wlan_private *priv = + container_of(work, struct rndis_wlan_private, scan_work.work); + struct usbnet *usbdev = priv->usbdev; + int ret; + + netdev_dbg(usbdev->net, "get_scan_results\n"); + + if (!priv->scan_request) + return; + + ret = rndis_check_bssid_list(usbdev); + + cfg80211_scan_done(priv->scan_request, ret < 0); + + priv->scan_request = NULL; +} + +static int rndis_connect(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_connect_params *sme) +{ + struct rndis_wlan_private *priv = wiphy_priv(wiphy); + struct usbnet *usbdev = priv->usbdev; + struct ieee80211_channel *channel = sme->channel; + struct ndis_80211_ssid ssid; + int pairwise = RNDIS_WLAN_ALG_NONE; + int groupwise = RNDIS_WLAN_ALG_NONE; + int keymgmt = RNDIS_WLAN_KEY_MGMT_NONE; + int length, i, ret, chan = -1; + + if (channel) + chan = ieee80211_frequency_to_channel(channel->center_freq); + + groupwise = rndis_cipher_to_alg(sme->crypto.cipher_group); + for (i = 0; i < sme->crypto.n_ciphers_pairwise; i++) + pairwise |= + rndis_cipher_to_alg(sme->crypto.ciphers_pairwise[i]); + + if (sme->crypto.n_ciphers_pairwise > 0 && + pairwise == RNDIS_WLAN_ALG_NONE) { + netdev_err(usbdev->net, "Unsupported pairwise cipher\n"); + return -ENOTSUPP; + } + + for (i = 0; i < sme->crypto.n_akm_suites; i++) + keymgmt |= + rndis_akm_suite_to_key_mgmt(sme->crypto.akm_suites[i]); + + if (sme->crypto.n_akm_suites > 0 && + keymgmt == RNDIS_WLAN_KEY_MGMT_NONE) { + netdev_err(usbdev->net, "Invalid keymgmt\n"); + return -ENOTSUPP; + } + + netdev_dbg(usbdev->net, "cfg80211.connect('%.32s':[%pM]:%d:[%d,0x%x:0x%x]:[0x%x:0x%x]:0x%x)\n", + sme->ssid, sme->bssid, chan, + sme->privacy, sme->crypto.wpa_versions, sme->auth_type, + groupwise, pairwise, keymgmt); + + if (is_associated(usbdev)) + disassociate(usbdev, false); + + ret = set_infra_mode(usbdev, NDIS_80211_INFRA_INFRA); + if (ret < 0) { + netdev_dbg(usbdev->net, "connect: set_infra_mode failed, %d\n", + ret); + goto err_turn_radio_on; + } + + ret = set_auth_mode(usbdev, sme->crypto.wpa_versions, sme->auth_type, + keymgmt); + if (ret < 0) { + netdev_dbg(usbdev->net, "connect: set_auth_mode failed, %d\n", + ret); + goto err_turn_radio_on; + } + + set_priv_filter(usbdev); + + ret = set_encr_mode(usbdev, pairwise, groupwise); + if (ret < 0) { + netdev_dbg(usbdev->net, "connect: set_encr_mode failed, %d\n", + ret); + goto err_turn_radio_on; + } + + if (channel) { + ret = set_channel(usbdev, chan); + if (ret < 0) { + netdev_dbg(usbdev->net, "connect: set_channel failed, %d\n", + ret); + goto err_turn_radio_on; + } + } + + if (sme->key && ((groupwise | pairwise) & RNDIS_WLAN_ALG_WEP)) { + priv->encr_tx_key_index = sme->key_idx; + ret = add_wep_key(usbdev, sme->key, sme->key_len, sme->key_idx); + if (ret < 0) { + netdev_dbg(usbdev->net, "connect: add_wep_key failed, %d (%d, %d)\n", + ret, sme->key_len, sme->key_idx); + goto err_turn_radio_on; + } + } + + if (sme->bssid && !is_zero_ether_addr(sme->bssid) && + !is_broadcast_ether_addr(sme->bssid)) { + ret = set_bssid(usbdev, sme->bssid); + if (ret < 0) { + netdev_dbg(usbdev->net, "connect: set_bssid failed, %d\n", + ret); + goto err_turn_radio_on; + } + } else + clear_bssid(usbdev); + + length = sme->ssid_len; + if (length > NDIS_802_11_LENGTH_SSID) + length = NDIS_802_11_LENGTH_SSID; + + memset(&ssid, 0, sizeof(ssid)); + ssid.length = cpu_to_le32(length); + memcpy(ssid.essid, sme->ssid, length); + + /* Pause and purge rx queue, so we don't pass packets before + * 'media connect'-indication. + */ + usbnet_pause_rx(usbdev); + usbnet_purge_paused_rxq(usbdev); + + ret = set_essid(usbdev, &ssid); + if (ret < 0) + netdev_dbg(usbdev->net, "connect: set_essid failed, %d\n", ret); + return ret; + +err_turn_radio_on: + disassociate(usbdev, true); + + return ret; +} + +static int rndis_disconnect(struct wiphy *wiphy, struct net_device *dev, + u16 reason_code) +{ + struct rndis_wlan_private *priv = wiphy_priv(wiphy); + struct usbnet *usbdev = priv->usbdev; + + netdev_dbg(usbdev->net, "cfg80211.disconnect(%d)\n", reason_code); + + priv->connected = false; + memset(priv->bssid, 0, ETH_ALEN); + + return deauthenticate(usbdev); +} + +static int rndis_join_ibss(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_ibss_params *params) +{ + struct rndis_wlan_private *priv = wiphy_priv(wiphy); + struct usbnet *usbdev = priv->usbdev; + struct ieee80211_channel *channel = params->channel; + struct ndis_80211_ssid ssid; + enum nl80211_auth_type auth_type; + int ret, alg, length, chan = -1; + + if (channel) + chan = ieee80211_frequency_to_channel(channel->center_freq); + + /* TODO: How to handle ad-hoc encryption? + * connect() has *key, join_ibss() doesn't. RNDIS requires key to be + * pre-shared for encryption (open/shared/wpa), is key set before + * join_ibss? Which auth_type to use (not in params)? What about WPA? + */ + if (params->privacy) { + auth_type = NL80211_AUTHTYPE_SHARED_KEY; + alg = RNDIS_WLAN_ALG_WEP; + } else { + auth_type = NL80211_AUTHTYPE_OPEN_SYSTEM; + alg = RNDIS_WLAN_ALG_NONE; + } + + netdev_dbg(usbdev->net, "cfg80211.join_ibss('%.32s':[%pM]:%d:%d)\n", + params->ssid, params->bssid, chan, params->privacy); + + if (is_associated(usbdev)) + disassociate(usbdev, false); + + ret = set_infra_mode(usbdev, NDIS_80211_INFRA_ADHOC); + if (ret < 0) { + netdev_dbg(usbdev->net, "join_ibss: set_infra_mode failed, %d\n", + ret); + goto err_turn_radio_on; + } + + ret = set_auth_mode(usbdev, 0, auth_type, RNDIS_WLAN_KEY_MGMT_NONE); + if (ret < 0) { + netdev_dbg(usbdev->net, "join_ibss: set_auth_mode failed, %d\n", + ret); + goto err_turn_radio_on; + } + + set_priv_filter(usbdev); + + ret = set_encr_mode(usbdev, alg, RNDIS_WLAN_ALG_NONE); + if (ret < 0) { + netdev_dbg(usbdev->net, "join_ibss: set_encr_mode failed, %d\n", + ret); + goto err_turn_radio_on; + } + + if (channel) { + ret = set_channel(usbdev, chan); + if (ret < 0) { + netdev_dbg(usbdev->net, "join_ibss: set_channel failed, %d\n", + ret); + goto err_turn_radio_on; + } + } + + if (params->bssid && !is_zero_ether_addr(params->bssid) && + !is_broadcast_ether_addr(params->bssid)) { + ret = set_bssid(usbdev, params->bssid); + if (ret < 0) { + netdev_dbg(usbdev->net, "join_ibss: set_bssid failed, %d\n", + ret); + goto err_turn_radio_on; + } + } else + clear_bssid(usbdev); + + length = params->ssid_len; + if (length > NDIS_802_11_LENGTH_SSID) + length = NDIS_802_11_LENGTH_SSID; + + memset(&ssid, 0, sizeof(ssid)); + ssid.length = cpu_to_le32(length); + memcpy(ssid.essid, params->ssid, length); + + /* Don't need to pause rx queue for ad-hoc. */ + usbnet_purge_paused_rxq(usbdev); + usbnet_resume_rx(usbdev); + + ret = set_essid(usbdev, &ssid); + if (ret < 0) + netdev_dbg(usbdev->net, "join_ibss: set_essid failed, %d\n", + ret); + return ret; + +err_turn_radio_on: + disassociate(usbdev, true); + + return ret; +} + +static int rndis_leave_ibss(struct wiphy *wiphy, struct net_device *dev) +{ + struct rndis_wlan_private *priv = wiphy_priv(wiphy); + struct usbnet *usbdev = priv->usbdev; + + netdev_dbg(usbdev->net, "cfg80211.leave_ibss()\n"); + + priv->connected = false; + memset(priv->bssid, 0, ETH_ALEN); + + return deauthenticate(usbdev); +} + +static int rndis_set_channel(struct wiphy *wiphy, + struct ieee80211_channel *chan, enum nl80211_channel_type channel_type) +{ + struct rndis_wlan_private *priv = wiphy_priv(wiphy); + struct usbnet *usbdev = priv->usbdev; + + return set_channel(usbdev, + ieee80211_frequency_to_channel(chan->center_freq)); +} + +static int rndis_add_key(struct wiphy *wiphy, struct net_device *netdev, + u8 key_index, const u8 *mac_addr, + struct key_params *params) +{ + struct rndis_wlan_private *priv = wiphy_priv(wiphy); + struct usbnet *usbdev = priv->usbdev; + __le32 flags; + + netdev_dbg(usbdev->net, "%s(%i, %pM, %08x)\n", + __func__, key_index, mac_addr, params->cipher); + + switch (params->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + return add_wep_key(usbdev, params->key, params->key_len, + key_index); + case WLAN_CIPHER_SUITE_TKIP: + case WLAN_CIPHER_SUITE_CCMP: + flags = 0; + + if (params->seq && params->seq_len > 0) + flags |= NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ; + if (mac_addr) + flags |= NDIS_80211_ADDKEY_PAIRWISE_KEY | + NDIS_80211_ADDKEY_TRANSMIT_KEY; + + return add_wpa_key(usbdev, params->key, params->key_len, + key_index, mac_addr, params->seq, + params->seq_len, params->cipher, flags); + default: + netdev_dbg(usbdev->net, "%s(): unsupported cipher %08x\n", + __func__, params->cipher); + return -ENOTSUPP; + } +} + +static int rndis_del_key(struct wiphy *wiphy, struct net_device *netdev, + u8 key_index, const u8 *mac_addr) +{ + struct rndis_wlan_private *priv = wiphy_priv(wiphy); + struct usbnet *usbdev = priv->usbdev; + + netdev_dbg(usbdev->net, "%s(%i, %pM)\n", __func__, key_index, mac_addr); + + return remove_key(usbdev, key_index, mac_addr); +} + +static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev, + u8 key_index) +{ + struct rndis_wlan_private *priv = wiphy_priv(wiphy); + struct usbnet *usbdev = priv->usbdev; + struct rndis_wlan_encr_key key; + + netdev_dbg(usbdev->net, "%s(%i)\n", __func__, key_index); + + priv->encr_tx_key_index = key_index; + + key = priv->encr_keys[key_index]; + + return add_wep_key(usbdev, key.material, key.len, key_index); +} + +static void rndis_fill_station_info(struct usbnet *usbdev, + struct station_info *sinfo) +{ + __le32 linkspeed, rssi; + int ret, len; + + memset(sinfo, 0, sizeof(*sinfo)); + + len = sizeof(linkspeed); + ret = rndis_query_oid(usbdev, OID_GEN_LINK_SPEED, &linkspeed, &len); + if (ret == 0) { + sinfo->txrate.legacy = le32_to_cpu(linkspeed) / 1000; + sinfo->filled |= STATION_INFO_TX_BITRATE; + } + + len = sizeof(rssi); + ret = rndis_query_oid(usbdev, OID_802_11_RSSI, &rssi, &len); + if (ret == 0) { + sinfo->signal = level_to_qual(le32_to_cpu(rssi)); + sinfo->filled |= STATION_INFO_SIGNAL; + } +} + +static int rndis_get_station(struct wiphy *wiphy, struct net_device *dev, + u8 *mac, struct station_info *sinfo) +{ + struct rndis_wlan_private *priv = wiphy_priv(wiphy); + struct usbnet *usbdev = priv->usbdev; + + if (compare_ether_addr(priv->bssid, mac)) + return -ENOENT; + + rndis_fill_station_info(usbdev, sinfo); + + return 0; +} + +static int rndis_dump_station(struct wiphy *wiphy, struct net_device *dev, + int idx, u8 *mac, struct station_info *sinfo) +{ + struct rndis_wlan_private *priv = wiphy_priv(wiphy); + struct usbnet *usbdev = priv->usbdev; + + if (idx != 0) + return -ENOENT; + + memcpy(mac, priv->bssid, ETH_ALEN); + + rndis_fill_station_info(usbdev, sinfo); + + return 0; +} + +/* + * workers, indication handlers, device poller + */ +static void rndis_wlan_do_link_up_work(struct usbnet *usbdev) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + struct ndis_80211_assoc_info *info; + u8 assoc_buf[sizeof(*info) + IW_CUSTOM_MAX + 32]; + u8 bssid[ETH_ALEN]; + int resp_ie_len, req_ie_len; + u8 *req_ie, *resp_ie; + int ret, offset; + bool roamed = false; + + if (priv->infra_mode == NDIS_80211_INFRA_INFRA && priv->connected) { + /* received media connect indication while connected, either + * device reassociated with same AP or roamed to new. */ + roamed = true; + } + + req_ie_len = 0; + resp_ie_len = 0; + req_ie = NULL; + resp_ie = NULL; + + if (priv->infra_mode == NDIS_80211_INFRA_INFRA) { + memset(assoc_buf, 0, sizeof(assoc_buf)); + info = (void *)assoc_buf; + + /* Get association info IEs from device and send them back to + * userspace. */ + ret = get_association_info(usbdev, info, sizeof(assoc_buf)); + if (!ret) { + req_ie_len = le32_to_cpu(info->req_ie_length); + if (req_ie_len > 0) { + offset = le32_to_cpu(info->offset_req_ies); + req_ie = (u8 *)info + offset; + } + + resp_ie_len = le32_to_cpu(info->resp_ie_length); + if (resp_ie_len > 0) { + offset = le32_to_cpu(info->offset_resp_ies); + resp_ie = (u8 *)info + offset; + } + } + } else if (WARN_ON(priv->infra_mode != NDIS_80211_INFRA_ADHOC)) + return; + + ret = get_bssid(usbdev, bssid); + if (ret < 0) + memset(bssid, 0, sizeof(bssid)); + + netdev_dbg(usbdev->net, "link up work: [%pM]%s\n", + bssid, roamed ? " roamed" : ""); + + /* Internal bss list in device always contains at least the currently + * connected bss and we can get it to cfg80211 with + * rndis_check_bssid_list(). + * NOTE: This is true for Broadcom chip, but not mentioned in RNDIS + * spec. + */ + rndis_check_bssid_list(usbdev); + + if (priv->infra_mode == NDIS_80211_INFRA_INFRA) { + if (!roamed) + cfg80211_connect_result(usbdev->net, bssid, req_ie, + req_ie_len, resp_ie, + resp_ie_len, 0, GFP_KERNEL); + else + cfg80211_roamed(usbdev->net, bssid, req_ie, req_ie_len, + resp_ie, resp_ie_len, GFP_KERNEL); + } else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC) + cfg80211_ibss_joined(usbdev->net, bssid, GFP_KERNEL); + + priv->connected = true; + memcpy(priv->bssid, bssid, ETH_ALEN); + + usbnet_resume_rx(usbdev); + netif_carrier_on(usbdev->net); +} + +static void rndis_wlan_do_link_down_work(struct usbnet *usbdev) +{ + union iwreq_data evt; + + netif_carrier_off(usbdev->net); + + evt.data.flags = 0; + evt.data.length = 0; + memset(evt.ap_addr.sa_data, 0, ETH_ALEN); + wireless_send_event(usbdev->net, SIOCGIWAP, &evt, NULL); +} + +static void rndis_wlan_worker(struct work_struct *work) +{ + struct rndis_wlan_private *priv = + container_of(work, struct rndis_wlan_private, work); + struct usbnet *usbdev = priv->usbdev; + + if (test_and_clear_bit(WORK_LINK_UP, &priv->work_pending)) + rndis_wlan_do_link_up_work(usbdev); + + if (test_and_clear_bit(WORK_LINK_DOWN, &priv->work_pending)) + rndis_wlan_do_link_down_work(usbdev); + + if (test_and_clear_bit(WORK_SET_MULTICAST_LIST, &priv->work_pending)) + set_multicast_list(usbdev); +} + +static void rndis_wlan_set_multicast_list(struct net_device *dev) +{ + struct usbnet *usbdev = netdev_priv(dev); + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + + if (test_bit(WORK_SET_MULTICAST_LIST, &priv->work_pending)) + return; + + set_bit(WORK_SET_MULTICAST_LIST, &priv->work_pending); + queue_work(priv->workqueue, &priv->work); +} + +static void rndis_wlan_auth_indication(struct usbnet *usbdev, + struct ndis_80211_status_indication *indication, + int len) +{ + u8 *buf; + const char *type; + int flags, buflen, key_id; + bool pairwise_error, group_error; + struct ndis_80211_auth_request *auth_req; + enum nl80211_key_type key_type; + + /* must have at least one array entry */ + if (len < offsetof(struct ndis_80211_status_indication, u) + + sizeof(struct ndis_80211_auth_request)) { + netdev_info(usbdev->net, "authentication indication: too short message (%i)\n", + len); + return; + } + + buf = (void *)&indication->u.auth_request[0]; + buflen = len - offsetof(struct ndis_80211_status_indication, u); + + while (buflen >= sizeof(*auth_req)) { + auth_req = (void *)buf; + type = "unknown"; + flags = le32_to_cpu(auth_req->flags); + pairwise_error = false; + group_error = false; + + if (flags & 0x1) + type = "reauth request"; + if (flags & 0x2) + type = "key update request"; + if (flags & 0x6) { + pairwise_error = true; + type = "pairwise_error"; + } + if (flags & 0xe) { + group_error = true; + type = "group_error"; + } + + netdev_info(usbdev->net, "authentication indication: %s (0x%08x)\n", + type, le32_to_cpu(auth_req->flags)); + + if (pairwise_error) { + key_type = NL80211_KEYTYPE_PAIRWISE; + key_id = -1; + + cfg80211_michael_mic_failure(usbdev->net, + auth_req->bssid, + key_type, key_id, NULL, + GFP_KERNEL); + } + + if (group_error) { + key_type = NL80211_KEYTYPE_GROUP; + key_id = -1; + + cfg80211_michael_mic_failure(usbdev->net, + auth_req->bssid, + key_type, key_id, NULL, + GFP_KERNEL); + } + + buflen -= le32_to_cpu(auth_req->length); + buf += le32_to_cpu(auth_req->length); + } +} + +static void rndis_wlan_pmkid_cand_list_indication(struct usbnet *usbdev, + struct ndis_80211_status_indication *indication, + int len) +{ + struct ndis_80211_pmkid_cand_list *cand_list; + int list_len, expected_len, i; + + if (len < offsetof(struct ndis_80211_status_indication, u) + + sizeof(struct ndis_80211_pmkid_cand_list)) { + netdev_info(usbdev->net, "pmkid candidate list indication: too short message (%i)\n", + len); + return; + } + + list_len = le32_to_cpu(indication->u.cand_list.num_candidates) * + sizeof(struct ndis_80211_pmkid_candidate); + expected_len = sizeof(struct ndis_80211_pmkid_cand_list) + list_len + + offsetof(struct ndis_80211_status_indication, u); + + if (len < expected_len) { + netdev_info(usbdev->net, "pmkid candidate list indication: list larger than buffer (%i < %i)\n", + len, expected_len); + return; + } + + cand_list = &indication->u.cand_list; + + netdev_info(usbdev->net, "pmkid candidate list indication: version %i, candidates %i\n", + le32_to_cpu(cand_list->version), + le32_to_cpu(cand_list->num_candidates)); + + if (le32_to_cpu(cand_list->version) != 1) + return; + + for (i = 0; i < le32_to_cpu(cand_list->num_candidates); i++) { + struct ndis_80211_pmkid_candidate *cand = + &cand_list->candidate_list[i]; + + netdev_dbg(usbdev->net, "cand[%i]: flags: 0x%08x, bssid: %pM\n", + i, le32_to_cpu(cand->flags), cand->bssid); + +#if 0 + struct iw_pmkid_cand pcand; + union iwreq_data wrqu; + + memset(&pcand, 0, sizeof(pcand)); + if (le32_to_cpu(cand->flags) & 0x01) + pcand.flags |= IW_PMKID_CAND_PREAUTH; + pcand.index = i; + memcpy(pcand.bssid.sa_data, cand->bssid, ETH_ALEN); + + memset(&wrqu, 0, sizeof(wrqu)); + wrqu.data.length = sizeof(pcand); + wireless_send_event(usbdev->net, IWEVPMKIDCAND, &wrqu, + (u8 *)&pcand); +#endif + } +} + +static void rndis_wlan_media_specific_indication(struct usbnet *usbdev, + struct rndis_indicate *msg, int buflen) +{ + struct ndis_80211_status_indication *indication; + int len, offset; + + offset = offsetof(struct rndis_indicate, status) + + le32_to_cpu(msg->offset); + len = le32_to_cpu(msg->length); + + if (len < 8) { + netdev_info(usbdev->net, "media specific indication, ignore too short message (%i < 8)\n", + len); + return; + } + + if (offset + len > buflen) { + netdev_info(usbdev->net, "media specific indication, too large to fit to buffer (%i > %i)\n", + offset + len, buflen); + return; + } + + indication = (void *)((u8 *)msg + offset); + + switch (le32_to_cpu(indication->status_type)) { + case NDIS_80211_STATUSTYPE_RADIOSTATE: + netdev_info(usbdev->net, "radio state indication: %i\n", + le32_to_cpu(indication->u.radio_status)); + return; + + case NDIS_80211_STATUSTYPE_MEDIASTREAMMODE: + netdev_info(usbdev->net, "media stream mode indication: %i\n", + le32_to_cpu(indication->u.media_stream_mode)); + return; + + case NDIS_80211_STATUSTYPE_AUTHENTICATION: + rndis_wlan_auth_indication(usbdev, indication, len); + return; + + case NDIS_80211_STATUSTYPE_PMKID_CANDIDATELIST: + rndis_wlan_pmkid_cand_list_indication(usbdev, indication, len); + return; + + default: + netdev_info(usbdev->net, "media specific indication: unknown status type 0x%08x\n", + le32_to_cpu(indication->status_type)); + } +} + +static void rndis_wlan_indication(struct usbnet *usbdev, void *ind, int buflen) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + struct rndis_indicate *msg = ind; + + switch (msg->status) { + case RNDIS_STATUS_MEDIA_CONNECT: + if (priv->current_command_oid == OID_802_11_ADD_KEY) { + /* OID_802_11_ADD_KEY causes sometimes extra + * "media connect" indications which confuses driver + * and userspace to think that device is + * roaming/reassociating when it isn't. + */ + netdev_dbg(usbdev->net, "ignored OID_802_11_ADD_KEY triggered 'media connect'\n"); + return; + } + + usbnet_pause_rx(usbdev); + + netdev_info(usbdev->net, "media connect\n"); + + /* queue work to avoid recursive calls into rndis_command */ + set_bit(WORK_LINK_UP, &priv->work_pending); + queue_work(priv->workqueue, &priv->work); + break; + + case RNDIS_STATUS_MEDIA_DISCONNECT: + netdev_info(usbdev->net, "media disconnect\n"); + + /* queue work to avoid recursive calls into rndis_command */ + set_bit(WORK_LINK_DOWN, &priv->work_pending); + queue_work(priv->workqueue, &priv->work); + break; + + case RNDIS_STATUS_MEDIA_SPECIFIC_INDICATION: + rndis_wlan_media_specific_indication(usbdev, msg, buflen); + break; + + default: + netdev_info(usbdev->net, "indication: 0x%08x\n", + le32_to_cpu(msg->status)); + break; + } +} + +static int rndis_wlan_get_caps(struct usbnet *usbdev) +{ + struct { + __le32 num_items; + __le32 items[8]; + } networks_supported; + int len, retval, i, n; + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + + /* determine supported modes */ + len = sizeof(networks_supported); + retval = rndis_query_oid(usbdev, OID_802_11_NETWORK_TYPES_SUPPORTED, + &networks_supported, &len); + if (retval >= 0) { + n = le32_to_cpu(networks_supported.num_items); + if (n > 8) + n = 8; + for (i = 0; i < n; i++) { + switch (le32_to_cpu(networks_supported.items[i])) { + case NDIS_80211_TYPE_FREQ_HOP: + case NDIS_80211_TYPE_DIRECT_SEQ: + priv->caps |= CAP_MODE_80211B; + break; + case NDIS_80211_TYPE_OFDM_A: + priv->caps |= CAP_MODE_80211A; + break; + case NDIS_80211_TYPE_OFDM_G: + priv->caps |= CAP_MODE_80211G; + break; + } + } + } + + return retval; +} + +#define DEVICE_POLLER_JIFFIES (HZ) +static void rndis_device_poller(struct work_struct *work) +{ + struct rndis_wlan_private *priv = + container_of(work, struct rndis_wlan_private, + dev_poller_work.work); + struct usbnet *usbdev = priv->usbdev; + __le32 rssi, tmp; + int len, ret, j; + int update_jiffies = DEVICE_POLLER_JIFFIES; + void *buf; + + /* Only check/do workaround when connected. Calling is_associated() + * also polls device with rndis_command() and catches for media link + * indications. + */ + if (!is_associated(usbdev)) + goto end; + + len = sizeof(rssi); + ret = rndis_query_oid(usbdev, OID_802_11_RSSI, &rssi, &len); + if (ret == 0) + priv->last_qual = level_to_qual(le32_to_cpu(rssi)); + + netdev_dbg(usbdev->net, "dev-poller: OID_802_11_RSSI -> %d, rssi:%d, qual: %d\n", + ret, le32_to_cpu(rssi), level_to_qual(le32_to_cpu(rssi))); + + /* Workaround transfer stalls on poor quality links. + * TODO: find right way to fix these stalls (as stalls do not happen + * with ndiswrapper/windows driver). */ + if (priv->param_workaround_interval > 0 && priv->last_qual <= 25) { + /* Decrease stats worker interval to catch stalls. + * faster. Faster than 400-500ms causes packet loss, + * Slower doesn't catch stalls fast enough. + */ + j = msecs_to_jiffies(priv->param_workaround_interval); + if (j > DEVICE_POLLER_JIFFIES) + j = DEVICE_POLLER_JIFFIES; + else if (j <= 0) + j = 1; + update_jiffies = j; + + /* Send scan OID. Use of both OIDs is required to get device + * working. + */ + tmp = cpu_to_le32(1); + rndis_set_oid(usbdev, OID_802_11_BSSID_LIST_SCAN, &tmp, + sizeof(tmp)); + + len = CONTROL_BUFFER_SIZE; + buf = kmalloc(len, GFP_KERNEL); + if (!buf) + goto end; + + rndis_query_oid(usbdev, OID_802_11_BSSID_LIST, buf, &len); + kfree(buf); + } + +end: + if (update_jiffies >= HZ) + update_jiffies = round_jiffies_relative(update_jiffies); + else { + j = round_jiffies_relative(update_jiffies); + if (abs(j - update_jiffies) <= 10) + update_jiffies = j; + } + + queue_delayed_work(priv->workqueue, &priv->dev_poller_work, + update_jiffies); +} + +/* + * driver/device initialization + */ +static void rndis_copy_module_params(struct usbnet *usbdev) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + + priv->param_country[0] = modparam_country[0]; + priv->param_country[1] = modparam_country[1]; + priv->param_country[2] = 0; + priv->param_frameburst = modparam_frameburst; + priv->param_afterburner = modparam_afterburner; + priv->param_power_save = modparam_power_save; + priv->param_power_output = modparam_power_output; + priv->param_roamtrigger = modparam_roamtrigger; + priv->param_roamdelta = modparam_roamdelta; + + priv->param_country[0] = toupper(priv->param_country[0]); + priv->param_country[1] = toupper(priv->param_country[1]); + /* doesn't support EU as country code, use FI instead */ + if (!strcmp(priv->param_country, "EU")) + strcpy(priv->param_country, "FI"); + + if (priv->param_power_save < 0) + priv->param_power_save = 0; + else if (priv->param_power_save > 2) + priv->param_power_save = 2; + + if (priv->param_power_output < 0) + priv->param_power_output = 0; + else if (priv->param_power_output > 3) + priv->param_power_output = 3; + + if (priv->param_roamtrigger < -80) + priv->param_roamtrigger = -80; + else if (priv->param_roamtrigger > -60) + priv->param_roamtrigger = -60; + + if (priv->param_roamdelta < 0) + priv->param_roamdelta = 0; + else if (priv->param_roamdelta > 2) + priv->param_roamdelta = 2; + + if (modparam_workaround_interval < 0) + priv->param_workaround_interval = 500; + else + priv->param_workaround_interval = modparam_workaround_interval; +} + +static int bcm4320a_early_init(struct usbnet *usbdev) +{ + /* copy module parameters for bcm4320a so that iwconfig reports txpower + * and workaround parameter is copied to private structure correctly. + */ + rndis_copy_module_params(usbdev); + + /* bcm4320a doesn't handle configuration parameters well. Try + * set any and you get partially zeroed mac and broken device. + */ + + return 0; +} + +static int bcm4320b_early_init(struct usbnet *usbdev) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + char buf[8]; + + rndis_copy_module_params(usbdev); + + /* Early initialization settings, setting these won't have effect + * if called after generic_rndis_bind(). + */ + + rndis_set_config_parameter_str(usbdev, "Country", priv->param_country); + rndis_set_config_parameter_str(usbdev, "FrameBursting", + priv->param_frameburst ? "1" : "0"); + rndis_set_config_parameter_str(usbdev, "Afterburner", + priv->param_afterburner ? "1" : "0"); + sprintf(buf, "%d", priv->param_power_save); + rndis_set_config_parameter_str(usbdev, "PowerSaveMode", buf); + sprintf(buf, "%d", priv->param_power_output); + rndis_set_config_parameter_str(usbdev, "PwrOut", buf); + sprintf(buf, "%d", priv->param_roamtrigger); + rndis_set_config_parameter_str(usbdev, "RoamTrigger", buf); + sprintf(buf, "%d", priv->param_roamdelta); + rndis_set_config_parameter_str(usbdev, "RoamDelta", buf); + + return 0; +} + +/* same as rndis_netdev_ops but with local multicast handler */ +static const struct net_device_ops rndis_wlan_netdev_ops = { + .ndo_open = usbnet_open, + .ndo_stop = usbnet_stop, + .ndo_start_xmit = usbnet_start_xmit, + .ndo_tx_timeout = usbnet_tx_timeout, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_multicast_list = rndis_wlan_set_multicast_list, +}; + +static int rndis_wlan_bind(struct usbnet *usbdev, struct usb_interface *intf) +{ + struct wiphy *wiphy; + struct rndis_wlan_private *priv; + int retval, len; + __le32 tmp; + + /* allocate wiphy and rndis private data + * NOTE: We only support a single virtual interface, so wiphy + * and wireless_dev are somewhat synonymous for this device. + */ + wiphy = wiphy_new(&rndis_config_ops, sizeof(struct rndis_wlan_private)); + if (!wiphy) + return -ENOMEM; + + priv = wiphy_priv(wiphy); + usbdev->net->ieee80211_ptr = &priv->wdev; + priv->wdev.wiphy = wiphy; + priv->wdev.iftype = NL80211_IFTYPE_STATION; + + /* These have to be initialized before calling generic_rndis_bind(). + * Otherwise we'll be in big trouble in rndis_wlan_early_init(). + */ + usbdev->driver_priv = priv; + priv->usbdev = usbdev; + + mutex_init(&priv->command_lock); + + /* because rndis_command() sleeps we need to use workqueue */ + priv->workqueue = create_singlethread_workqueue("rndis_wlan"); + INIT_WORK(&priv->work, rndis_wlan_worker); + INIT_DELAYED_WORK(&priv->dev_poller_work, rndis_device_poller); + INIT_DELAYED_WORK(&priv->scan_work, rndis_get_scan_results); + + /* try bind rndis_host */ + retval = generic_rndis_bind(usbdev, intf, FLAG_RNDIS_PHYM_WIRELESS); + if (retval < 0) + goto fail; + + /* generic_rndis_bind set packet filter to multicast_all+ + * promisc mode which doesn't work well for our devices (device + * picks up rssi to closest station instead of to access point). + * + * rndis_host wants to avoid all OID as much as possible + * so do promisc/multicast handling in rndis_wlan. + */ + usbdev->net->netdev_ops = &rndis_wlan_netdev_ops; + + tmp = RNDIS_PACKET_TYPE_DIRECTED | RNDIS_PACKET_TYPE_BROADCAST; + retval = rndis_set_oid(usbdev, OID_GEN_CURRENT_PACKET_FILTER, &tmp, + sizeof(tmp)); + + len = sizeof(tmp); + retval = rndis_query_oid(usbdev, OID_802_3_MAXIMUM_LIST_SIZE, &tmp, + &len); + priv->multicast_size = le32_to_cpu(tmp); + if (retval < 0 || priv->multicast_size < 0) + priv->multicast_size = 0; + if (priv->multicast_size > 0) + usbdev->net->flags |= IFF_MULTICAST; + else + usbdev->net->flags &= ~IFF_MULTICAST; + + /* fill-out wiphy structure and register w/ cfg80211 */ + memcpy(wiphy->perm_addr, usbdev->net->dev_addr, ETH_ALEN); + wiphy->privid = rndis_wiphy_privid; + wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) + | BIT(NL80211_IFTYPE_ADHOC); + wiphy->max_scan_ssids = 1; + + /* TODO: fill-out band/encr information based on priv->caps */ + rndis_wlan_get_caps(usbdev); + + memcpy(priv->channels, rndis_channels, sizeof(rndis_channels)); + memcpy(priv->rates, rndis_rates, sizeof(rndis_rates)); + priv->band.channels = priv->channels; + priv->band.n_channels = ARRAY_SIZE(rndis_channels); + priv->band.bitrates = priv->rates; + priv->band.n_bitrates = ARRAY_SIZE(rndis_rates); + wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; + wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC; + + memcpy(priv->cipher_suites, rndis_cipher_suites, + sizeof(rndis_cipher_suites)); + wiphy->cipher_suites = priv->cipher_suites; + wiphy->n_cipher_suites = ARRAY_SIZE(rndis_cipher_suites); + + set_wiphy_dev(wiphy, &usbdev->udev->dev); + + if (wiphy_register(wiphy)) { + retval = -ENODEV; + goto fail; + } + + set_default_iw_params(usbdev); + + /* set default rts/frag */ + rndis_set_wiphy_params(wiphy, + WIPHY_PARAM_FRAG_THRESHOLD | WIPHY_PARAM_RTS_THRESHOLD); + + /* turn radio on */ + priv->radio_on = true; + disassociate(usbdev, true); + netif_carrier_off(usbdev->net); + + return 0; + +fail: + cancel_delayed_work_sync(&priv->dev_poller_work); + cancel_delayed_work_sync(&priv->scan_work); + cancel_work_sync(&priv->work); + flush_workqueue(priv->workqueue); + destroy_workqueue(priv->workqueue); + + wiphy_free(wiphy); + return retval; +} + +static void rndis_wlan_unbind(struct usbnet *usbdev, struct usb_interface *intf) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + + /* turn radio off */ + disassociate(usbdev, false); + + cancel_delayed_work_sync(&priv->dev_poller_work); + cancel_delayed_work_sync(&priv->scan_work); + cancel_work_sync(&priv->work); + flush_workqueue(priv->workqueue); + destroy_workqueue(priv->workqueue); + + if (priv && priv->wpa_ie_len) + kfree(priv->wpa_ie); + + rndis_unbind(usbdev, intf); + + wiphy_unregister(priv->wdev.wiphy); + wiphy_free(priv->wdev.wiphy); +} + +static int rndis_wlan_reset(struct usbnet *usbdev) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + int retval; + + netdev_dbg(usbdev->net, "%s()\n", __func__); + + retval = rndis_reset(usbdev); + if (retval) + netdev_warn(usbdev->net, "rndis_reset failed: %d\n", retval); + + /* rndis_reset cleared multicast list, so restore here. + (set_multicast_list() also turns on current packet filter) */ + set_multicast_list(usbdev); + + queue_delayed_work(priv->workqueue, &priv->dev_poller_work, + round_jiffies_relative(DEVICE_POLLER_JIFFIES)); + + return deauthenticate(usbdev); +} + +static int rndis_wlan_stop(struct usbnet *usbdev) +{ + struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); + int retval; + __le32 filter; + + netdev_dbg(usbdev->net, "%s()\n", __func__); + + retval = disassociate(usbdev, false); + + priv->work_pending = 0; + cancel_delayed_work_sync(&priv->dev_poller_work); + cancel_delayed_work_sync(&priv->scan_work); + cancel_work_sync(&priv->work); + flush_workqueue(priv->workqueue); + + if (priv->scan_request) { + cfg80211_scan_done(priv->scan_request, true); + priv->scan_request = NULL; + } + + /* Set current packet filter zero to block receiving data packets from + device. */ + filter = 0; + rndis_set_oid(usbdev, OID_GEN_CURRENT_PACKET_FILTER, &filter, + sizeof(filter)); + + return retval; +} + +static const struct driver_info bcm4320b_info = { + .description = "Wireless RNDIS device, BCM4320b based", + .flags = FLAG_WLAN | FLAG_FRAMING_RN | FLAG_NO_SETINT | + FLAG_AVOID_UNLINK_URBS, + .bind = rndis_wlan_bind, + .unbind = rndis_wlan_unbind, + .status = rndis_status, + .rx_fixup = rndis_rx_fixup, + .tx_fixup = rndis_tx_fixup, + .reset = rndis_wlan_reset, + .stop = rndis_wlan_stop, + .early_init = bcm4320b_early_init, + .indication = rndis_wlan_indication, +}; + +static const struct driver_info bcm4320a_info = { + .description = "Wireless RNDIS device, BCM4320a based", + .flags = FLAG_WLAN | FLAG_FRAMING_RN | FLAG_NO_SETINT | + FLAG_AVOID_UNLINK_URBS, + .bind = rndis_wlan_bind, + .unbind = rndis_wlan_unbind, + .status = rndis_status, + .rx_fixup = rndis_rx_fixup, + .tx_fixup = rndis_tx_fixup, + .reset = rndis_wlan_reset, + .stop = rndis_wlan_stop, + .early_init = bcm4320a_early_init, + .indication = rndis_wlan_indication, +}; + +static const struct driver_info rndis_wlan_info = { + .description = "Wireless RNDIS device", + .flags = FLAG_WLAN | FLAG_FRAMING_RN | FLAG_NO_SETINT | + FLAG_AVOID_UNLINK_URBS, + .bind = rndis_wlan_bind, + .unbind = rndis_wlan_unbind, + .status = rndis_status, + .rx_fixup = rndis_rx_fixup, + .tx_fixup = rndis_tx_fixup, + .reset = rndis_wlan_reset, + .stop = rndis_wlan_stop, + .early_init = bcm4320a_early_init, + .indication = rndis_wlan_indication, +}; + +/*-------------------------------------------------------------------------*/ + +static const struct usb_device_id products [] = { +#define RNDIS_MASTER_INTERFACE \ + .bInterfaceClass = USB_CLASS_COMM, \ + .bInterfaceSubClass = 2 /* ACM */, \ + .bInterfaceProtocol = 0x0ff + +/* INF driver for these devices have DriverVer >= 4.xx.xx.xx and many custom + * parameters available. Chipset marked as 'BCM4320SKFBG' in NDISwrapper-wiki. + */ +{ + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x0411, + .idProduct = 0x00bc, /* Buffalo WLI-U2-KG125S */ + RNDIS_MASTER_INTERFACE, + .driver_info = (unsigned long) &bcm4320b_info, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x0baf, + .idProduct = 0x011b, /* U.S. Robotics USR5421 */ + RNDIS_MASTER_INTERFACE, + .driver_info = (unsigned long) &bcm4320b_info, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x050d, + .idProduct = 0x011b, /* Belkin F5D7051 */ + RNDIS_MASTER_INTERFACE, + .driver_info = (unsigned long) &bcm4320b_info, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x1799, /* Belkin has two vendor ids */ + .idProduct = 0x011b, /* Belkin F5D7051 */ + RNDIS_MASTER_INTERFACE, + .driver_info = (unsigned long) &bcm4320b_info, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x13b1, + .idProduct = 0x0014, /* Linksys WUSB54GSv2 */ + RNDIS_MASTER_INTERFACE, + .driver_info = (unsigned long) &bcm4320b_info, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x13b1, + .idProduct = 0x0026, /* Linksys WUSB54GSC */ + RNDIS_MASTER_INTERFACE, + .driver_info = (unsigned long) &bcm4320b_info, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x0b05, + .idProduct = 0x1717, /* Asus WL169gE */ + RNDIS_MASTER_INTERFACE, + .driver_info = (unsigned long) &bcm4320b_info, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x0a5c, + .idProduct = 0xd11b, /* Eminent EM4045 */ + RNDIS_MASTER_INTERFACE, + .driver_info = (unsigned long) &bcm4320b_info, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x1690, + .idProduct = 0x0715, /* BT Voyager 1055 */ + RNDIS_MASTER_INTERFACE, + .driver_info = (unsigned long) &bcm4320b_info, +}, +/* These devices have DriverVer < 4.xx.xx.xx and do not have any custom + * parameters available, hardware probably contain older firmware version with + * no way of updating. Chipset marked as 'BCM4320????' in NDISwrapper-wiki. + */ +{ + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x13b1, + .idProduct = 0x000e, /* Linksys WUSB54GSv1 */ + RNDIS_MASTER_INTERFACE, + .driver_info = (unsigned long) &bcm4320a_info, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x0baf, + .idProduct = 0x0111, /* U.S. Robotics USR5420 */ + RNDIS_MASTER_INTERFACE, + .driver_info = (unsigned long) &bcm4320a_info, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x0411, + .idProduct = 0x004b, /* BUFFALO WLI-USB-G54 */ + RNDIS_MASTER_INTERFACE, + .driver_info = (unsigned long) &bcm4320a_info, +}, +/* Generic Wireless RNDIS devices that we don't have exact + * idVendor/idProduct/chip yet. + */ +{ + /* RNDIS is MSFT's un-official variant of CDC ACM */ + USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff), + .driver_info = (unsigned long) &rndis_wlan_info, +}, { + /* "ActiveSync" is an undocumented variant of RNDIS, used in WM5 */ + USB_INTERFACE_INFO(USB_CLASS_MISC, 1, 1), + .driver_info = (unsigned long) &rndis_wlan_info, +}, + { }, // END +}; +MODULE_DEVICE_TABLE(usb, products); + +static struct usb_driver rndis_wlan_driver = { + .name = "rndis_wlan", + .id_table = products, + .probe = usbnet_probe, + .disconnect = usbnet_disconnect, + .suspend = usbnet_suspend, + .resume = usbnet_resume, +}; + +static int __init rndis_wlan_init(void) +{ + return usb_register(&rndis_wlan_driver); +} +module_init(rndis_wlan_init); + +static void __exit rndis_wlan_exit(void) +{ + usb_deregister(&rndis_wlan_driver); +} +module_exit(rndis_wlan_exit); + +MODULE_AUTHOR("Bjorge Dijkstra"); +MODULE_AUTHOR("Jussi Kivilinna"); +MODULE_DESCRIPTION("Driver for RNDIS based USB Wireless adapters"); +MODULE_LICENSE("GPL"); + diff --git a/drivers/net/wireless/rt2x00/Makefile b/drivers/net/wireless/rt2x00/Makefile new file mode 100644 index 0000000..9713398 --- /dev/null +++ b/drivers/net/wireless/rt2x00/Makefile @@ -0,0 +1,23 @@ +rt2x00lib-y += rt2x00dev.o +rt2x00lib-y += rt2x00mac.o +rt2x00lib-y += rt2x00config.o +rt2x00lib-y += rt2x00queue.o +rt2x00lib-y += rt2x00link.o +rt2x00lib-$(CONFIG_RT2X00_LIB_DEBUGFS) += rt2x00debug.o +rt2x00lib-$(CONFIG_RT2X00_LIB_CRYPTO) += rt2x00crypto.o +rt2x00lib-$(CONFIG_RT2X00_LIB_FIRMWARE) += rt2x00firmware.o +rt2x00lib-$(CONFIG_RT2X00_LIB_LEDS) += rt2x00leds.o +rt2x00lib-$(CONFIG_RT2X00_LIB_HT) += rt2x00ht.o + +obj-$(CONFIG_RT2X00_LIB) += rt2x00lib.o +obj-$(CONFIG_RT2X00_LIB_PCI) += rt2x00pci.o +obj-$(CONFIG_RT2X00_LIB_SOC) += rt2x00soc.o +obj-$(CONFIG_RT2X00_LIB_USB) += rt2x00usb.o +obj-$(CONFIG_RT2800_LIB) += rt2800lib.o +obj-$(CONFIG_RT2400PCI) += rt2400pci.o +obj-$(CONFIG_RT2500PCI) += rt2500pci.o +obj-$(CONFIG_RT61PCI) += rt61pci.o +obj-$(CONFIG_RT2800PCI) += rt2800pci.o +obj-$(CONFIG_RT2500USB) += rt2500usb.o +obj-$(CONFIG_RT73USB) += rt73usb.o +obj-$(CONFIG_RT2800USB) += rt2800usb.o diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c new file mode 100644 index 0000000..c22b040 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2400pci.c @@ -0,0 +1,1676 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2400pci + Abstract: rt2400pci device specific routines. + Supported chipsets: RT2460. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "rt2x00.h" +#include "rt2x00pci.h" +#include "rt2400pci.h" + +/* + * Register access. + * All access to the CSR registers will go through the methods + * rt2x00pci_register_read and rt2x00pci_register_write. + * BBP and RF register require indirect register access, + * and use the CSR registers BBPCSR and RFCSR to achieve this. + * These indirect registers work with busy bits, + * and we will try maximal REGISTER_BUSY_COUNT times to access + * the register while taking a REGISTER_BUSY_DELAY us delay + * between each attampt. When the busy bit is still set at that time, + * the access attempt is considered to have failed, + * and we will print an error. + */ +#define WAIT_FOR_BBP(__dev, __reg) \ + rt2x00pci_regbusy_read((__dev), BBPCSR, BBPCSR_BUSY, (__reg)) +#define WAIT_FOR_RF(__dev, __reg) \ + rt2x00pci_regbusy_read((__dev), RFCSR, RFCSR_BUSY, (__reg)) + +static void rt2400pci_bbp_write(struct rt2x00_dev *rt2x00dev, + const unsigned int word, const u8 value) +{ + u32 reg; + + mutex_lock(&rt2x00dev->csr_mutex); + + /* + * Wait until the BBP becomes available, afterwards we + * can safely write the new data into the register. + */ + if (WAIT_FOR_BBP(rt2x00dev, ®)) { + reg = 0; + rt2x00_set_field32(®, BBPCSR_VALUE, value); + rt2x00_set_field32(®, BBPCSR_REGNUM, word); + rt2x00_set_field32(®, BBPCSR_BUSY, 1); + rt2x00_set_field32(®, BBPCSR_WRITE_CONTROL, 1); + + rt2x00pci_register_write(rt2x00dev, BBPCSR, reg); + } + + mutex_unlock(&rt2x00dev->csr_mutex); +} + +static void rt2400pci_bbp_read(struct rt2x00_dev *rt2x00dev, + const unsigned int word, u8 *value) +{ + u32 reg; + + mutex_lock(&rt2x00dev->csr_mutex); + + /* + * Wait until the BBP becomes available, afterwards we + * can safely write the read request into the register. + * After the data has been written, we wait until hardware + * returns the correct value, if at any time the register + * doesn't become available in time, reg will be 0xffffffff + * which means we return 0xff to the caller. + */ + if (WAIT_FOR_BBP(rt2x00dev, ®)) { + reg = 0; + rt2x00_set_field32(®, BBPCSR_REGNUM, word); + rt2x00_set_field32(®, BBPCSR_BUSY, 1); + rt2x00_set_field32(®, BBPCSR_WRITE_CONTROL, 0); + + rt2x00pci_register_write(rt2x00dev, BBPCSR, reg); + + WAIT_FOR_BBP(rt2x00dev, ®); + } + + *value = rt2x00_get_field32(reg, BBPCSR_VALUE); + + mutex_unlock(&rt2x00dev->csr_mutex); +} + +static void rt2400pci_rf_write(struct rt2x00_dev *rt2x00dev, + const unsigned int word, const u32 value) +{ + u32 reg; + + mutex_lock(&rt2x00dev->csr_mutex); + + /* + * Wait until the RF becomes available, afterwards we + * can safely write the new data into the register. + */ + if (WAIT_FOR_RF(rt2x00dev, ®)) { + reg = 0; + rt2x00_set_field32(®, RFCSR_VALUE, value); + rt2x00_set_field32(®, RFCSR_NUMBER_OF_BITS, 20); + rt2x00_set_field32(®, RFCSR_IF_SELECT, 0); + rt2x00_set_field32(®, RFCSR_BUSY, 1); + + rt2x00pci_register_write(rt2x00dev, RFCSR, reg); + rt2x00_rf_write(rt2x00dev, word, value); + } + + mutex_unlock(&rt2x00dev->csr_mutex); +} + +static void rt2400pci_eepromregister_read(struct eeprom_93cx6 *eeprom) +{ + struct rt2x00_dev *rt2x00dev = eeprom->data; + u32 reg; + + rt2x00pci_register_read(rt2x00dev, CSR21, ®); + + eeprom->reg_data_in = !!rt2x00_get_field32(reg, CSR21_EEPROM_DATA_IN); + eeprom->reg_data_out = !!rt2x00_get_field32(reg, CSR21_EEPROM_DATA_OUT); + eeprom->reg_data_clock = + !!rt2x00_get_field32(reg, CSR21_EEPROM_DATA_CLOCK); + eeprom->reg_chip_select = + !!rt2x00_get_field32(reg, CSR21_EEPROM_CHIP_SELECT); +} + +static void rt2400pci_eepromregister_write(struct eeprom_93cx6 *eeprom) +{ + struct rt2x00_dev *rt2x00dev = eeprom->data; + u32 reg = 0; + + rt2x00_set_field32(®, CSR21_EEPROM_DATA_IN, !!eeprom->reg_data_in); + rt2x00_set_field32(®, CSR21_EEPROM_DATA_OUT, !!eeprom->reg_data_out); + rt2x00_set_field32(®, CSR21_EEPROM_DATA_CLOCK, + !!eeprom->reg_data_clock); + rt2x00_set_field32(®, CSR21_EEPROM_CHIP_SELECT, + !!eeprom->reg_chip_select); + + rt2x00pci_register_write(rt2x00dev, CSR21, reg); +} + +#ifdef CONFIG_RT2X00_LIB_DEBUGFS +static const struct rt2x00debug rt2400pci_rt2x00debug = { + .owner = THIS_MODULE, + .csr = { + .read = rt2x00pci_register_read, + .write = rt2x00pci_register_write, + .flags = RT2X00DEBUGFS_OFFSET, + .word_base = CSR_REG_BASE, + .word_size = sizeof(u32), + .word_count = CSR_REG_SIZE / sizeof(u32), + }, + .eeprom = { + .read = rt2x00_eeprom_read, + .write = rt2x00_eeprom_write, + .word_base = EEPROM_BASE, + .word_size = sizeof(u16), + .word_count = EEPROM_SIZE / sizeof(u16), + }, + .bbp = { + .read = rt2400pci_bbp_read, + .write = rt2400pci_bbp_write, + .word_base = BBP_BASE, + .word_size = sizeof(u8), + .word_count = BBP_SIZE / sizeof(u8), + }, + .rf = { + .read = rt2x00_rf_read, + .write = rt2400pci_rf_write, + .word_base = RF_BASE, + .word_size = sizeof(u32), + .word_count = RF_SIZE / sizeof(u32), + }, +}; +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ + +static int rt2400pci_rfkill_poll(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, GPIOCSR, ®); + return rt2x00_get_field32(reg, GPIOCSR_BIT0); +} + +#ifdef CONFIG_RT2X00_LIB_LEDS +static void rt2400pci_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct rt2x00_led *led = + container_of(led_cdev, struct rt2x00_led, led_dev); + unsigned int enabled = brightness != LED_OFF; + u32 reg; + + rt2x00pci_register_read(led->rt2x00dev, LEDCSR, ®); + + if (led->type == LED_TYPE_RADIO || led->type == LED_TYPE_ASSOC) + rt2x00_set_field32(®, LEDCSR_LINK, enabled); + else if (led->type == LED_TYPE_ACTIVITY) + rt2x00_set_field32(®, LEDCSR_ACTIVITY, enabled); + + rt2x00pci_register_write(led->rt2x00dev, LEDCSR, reg); +} + +static int rt2400pci_blink_set(struct led_classdev *led_cdev, + unsigned long *delay_on, + unsigned long *delay_off) +{ + struct rt2x00_led *led = + container_of(led_cdev, struct rt2x00_led, led_dev); + u32 reg; + + rt2x00pci_register_read(led->rt2x00dev, LEDCSR, ®); + rt2x00_set_field32(®, LEDCSR_ON_PERIOD, *delay_on); + rt2x00_set_field32(®, LEDCSR_OFF_PERIOD, *delay_off); + rt2x00pci_register_write(led->rt2x00dev, LEDCSR, reg); + + return 0; +} + +static void rt2400pci_init_led(struct rt2x00_dev *rt2x00dev, + struct rt2x00_led *led, + enum led_type type) +{ + led->rt2x00dev = rt2x00dev; + led->type = type; + led->led_dev.brightness_set = rt2400pci_brightness_set; + led->led_dev.blink_set = rt2400pci_blink_set; + led->flags = LED_INITIALIZED; +} +#endif /* CONFIG_RT2X00_LIB_LEDS */ + +/* + * Configuration handlers. + */ +static void rt2400pci_config_filter(struct rt2x00_dev *rt2x00dev, + const unsigned int filter_flags) +{ + u32 reg; + + /* + * Start configuration steps. + * Note that the version error will always be dropped + * since there is no filter for it at this time. + */ + rt2x00pci_register_read(rt2x00dev, RXCSR0, ®); + rt2x00_set_field32(®, RXCSR0_DROP_CRC, + !(filter_flags & FIF_FCSFAIL)); + rt2x00_set_field32(®, RXCSR0_DROP_PHYSICAL, + !(filter_flags & FIF_PLCPFAIL)); + rt2x00_set_field32(®, RXCSR0_DROP_CONTROL, + !(filter_flags & FIF_CONTROL)); + rt2x00_set_field32(®, RXCSR0_DROP_NOT_TO_ME, + !(filter_flags & FIF_PROMISC_IN_BSS)); + rt2x00_set_field32(®, RXCSR0_DROP_TODS, + !(filter_flags & FIF_PROMISC_IN_BSS) && + !rt2x00dev->intf_ap_count); + rt2x00_set_field32(®, RXCSR0_DROP_VERSION_ERROR, 1); + rt2x00pci_register_write(rt2x00dev, RXCSR0, reg); +} + +static void rt2400pci_config_intf(struct rt2x00_dev *rt2x00dev, + struct rt2x00_intf *intf, + struct rt2x00intf_conf *conf, + const unsigned int flags) +{ + unsigned int bcn_preload; + u32 reg; + + if (flags & CONFIG_UPDATE_TYPE) { + /* + * Enable beacon config + */ + bcn_preload = PREAMBLE + GET_DURATION(IEEE80211_HEADER, 20); + rt2x00pci_register_read(rt2x00dev, BCNCSR1, ®); + rt2x00_set_field32(®, BCNCSR1_PRELOAD, bcn_preload); + rt2x00pci_register_write(rt2x00dev, BCNCSR1, reg); + + /* + * Enable synchronisation. + */ + rt2x00pci_register_read(rt2x00dev, CSR14, ®); + rt2x00_set_field32(®, CSR14_TSF_COUNT, 1); + rt2x00_set_field32(®, CSR14_TSF_SYNC, conf->sync); + rt2x00_set_field32(®, CSR14_TBCN, 1); + rt2x00pci_register_write(rt2x00dev, CSR14, reg); + } + + if (flags & CONFIG_UPDATE_MAC) + rt2x00pci_register_multiwrite(rt2x00dev, CSR3, + conf->mac, sizeof(conf->mac)); + + if (flags & CONFIG_UPDATE_BSSID) + rt2x00pci_register_multiwrite(rt2x00dev, CSR5, + conf->bssid, sizeof(conf->bssid)); +} + +static void rt2400pci_config_erp(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_erp *erp) +{ + int preamble_mask; + u32 reg; + + /* + * When short preamble is enabled, we should set bit 0x08 + */ + preamble_mask = erp->short_preamble << 3; + + rt2x00pci_register_read(rt2x00dev, TXCSR1, ®); + rt2x00_set_field32(®, TXCSR1_ACK_TIMEOUT, 0x1ff); + rt2x00_set_field32(®, TXCSR1_ACK_CONSUME_TIME, 0x13a); + rt2x00_set_field32(®, TXCSR1_TSF_OFFSET, IEEE80211_HEADER); + rt2x00_set_field32(®, TXCSR1_AUTORESPONDER, 1); + rt2x00pci_register_write(rt2x00dev, TXCSR1, reg); + + rt2x00pci_register_read(rt2x00dev, ARCSR2, ®); + rt2x00_set_field32(®, ARCSR2_SIGNAL, 0x00); + rt2x00_set_field32(®, ARCSR2_SERVICE, 0x04); + rt2x00_set_field32(®, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 10)); + rt2x00pci_register_write(rt2x00dev, ARCSR2, reg); + + rt2x00pci_register_read(rt2x00dev, ARCSR3, ®); + rt2x00_set_field32(®, ARCSR3_SIGNAL, 0x01 | preamble_mask); + rt2x00_set_field32(®, ARCSR3_SERVICE, 0x04); + rt2x00_set_field32(®, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 20)); + rt2x00pci_register_write(rt2x00dev, ARCSR3, reg); + + rt2x00pci_register_read(rt2x00dev, ARCSR4, ®); + rt2x00_set_field32(®, ARCSR4_SIGNAL, 0x02 | preamble_mask); + rt2x00_set_field32(®, ARCSR4_SERVICE, 0x04); + rt2x00_set_field32(®, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 55)); + rt2x00pci_register_write(rt2x00dev, ARCSR4, reg); + + rt2x00pci_register_read(rt2x00dev, ARCSR5, ®); + rt2x00_set_field32(®, ARCSR5_SIGNAL, 0x03 | preamble_mask); + rt2x00_set_field32(®, ARCSR5_SERVICE, 0x84); + rt2x00_set_field32(®, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 110)); + rt2x00pci_register_write(rt2x00dev, ARCSR5, reg); + + rt2x00pci_register_write(rt2x00dev, ARCSR1, erp->basic_rates); + + rt2x00pci_register_read(rt2x00dev, CSR11, ®); + rt2x00_set_field32(®, CSR11_SLOT_TIME, erp->slot_time); + rt2x00pci_register_write(rt2x00dev, CSR11, reg); + + rt2x00pci_register_read(rt2x00dev, CSR12, ®); + rt2x00_set_field32(®, CSR12_BEACON_INTERVAL, erp->beacon_int * 16); + rt2x00_set_field32(®, CSR12_CFP_MAX_DURATION, erp->beacon_int * 16); + rt2x00pci_register_write(rt2x00dev, CSR12, reg); + + rt2x00pci_register_read(rt2x00dev, CSR18, ®); + rt2x00_set_field32(®, CSR18_SIFS, erp->sifs); + rt2x00_set_field32(®, CSR18_PIFS, erp->pifs); + rt2x00pci_register_write(rt2x00dev, CSR18, reg); + + rt2x00pci_register_read(rt2x00dev, CSR19, ®); + rt2x00_set_field32(®, CSR19_DIFS, erp->difs); + rt2x00_set_field32(®, CSR19_EIFS, erp->eifs); + rt2x00pci_register_write(rt2x00dev, CSR19, reg); +} + +static void rt2400pci_config_ant(struct rt2x00_dev *rt2x00dev, + struct antenna_setup *ant) +{ + u8 r1; + u8 r4; + + /* + * We should never come here because rt2x00lib is supposed + * to catch this and send us the correct antenna explicitely. + */ + BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY || + ant->tx == ANTENNA_SW_DIVERSITY); + + rt2400pci_bbp_read(rt2x00dev, 4, &r4); + rt2400pci_bbp_read(rt2x00dev, 1, &r1); + + /* + * Configure the TX antenna. + */ + switch (ant->tx) { + case ANTENNA_HW_DIVERSITY: + rt2x00_set_field8(&r1, BBP_R1_TX_ANTENNA, 1); + break; + case ANTENNA_A: + rt2x00_set_field8(&r1, BBP_R1_TX_ANTENNA, 0); + break; + case ANTENNA_B: + default: + rt2x00_set_field8(&r1, BBP_R1_TX_ANTENNA, 2); + break; + } + + /* + * Configure the RX antenna. + */ + switch (ant->rx) { + case ANTENNA_HW_DIVERSITY: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 1); + break; + case ANTENNA_A: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 0); + break; + case ANTENNA_B: + default: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 2); + break; + } + + rt2400pci_bbp_write(rt2x00dev, 4, r4); + rt2400pci_bbp_write(rt2x00dev, 1, r1); +} + +static void rt2400pci_config_channel(struct rt2x00_dev *rt2x00dev, + struct rf_channel *rf) +{ + /* + * Switch on tuning bits. + */ + rt2x00_set_field32(&rf->rf1, RF1_TUNER, 1); + rt2x00_set_field32(&rf->rf3, RF3_TUNER, 1); + + rt2400pci_rf_write(rt2x00dev, 1, rf->rf1); + rt2400pci_rf_write(rt2x00dev, 2, rf->rf2); + rt2400pci_rf_write(rt2x00dev, 3, rf->rf3); + + /* + * RF2420 chipset don't need any additional actions. + */ + if (rt2x00_rf(rt2x00dev, RF2420)) + return; + + /* + * For the RT2421 chipsets we need to write an invalid + * reference clock rate to activate auto_tune. + * After that we set the value back to the correct channel. + */ + rt2400pci_rf_write(rt2x00dev, 1, rf->rf1); + rt2400pci_rf_write(rt2x00dev, 2, 0x000c2a32); + rt2400pci_rf_write(rt2x00dev, 3, rf->rf3); + + msleep(1); + + rt2400pci_rf_write(rt2x00dev, 1, rf->rf1); + rt2400pci_rf_write(rt2x00dev, 2, rf->rf2); + rt2400pci_rf_write(rt2x00dev, 3, rf->rf3); + + msleep(1); + + /* + * Switch off tuning bits. + */ + rt2x00_set_field32(&rf->rf1, RF1_TUNER, 0); + rt2x00_set_field32(&rf->rf3, RF3_TUNER, 0); + + rt2400pci_rf_write(rt2x00dev, 1, rf->rf1); + rt2400pci_rf_write(rt2x00dev, 3, rf->rf3); + + /* + * Clear false CRC during channel switch. + */ + rt2x00pci_register_read(rt2x00dev, CNT0, &rf->rf1); +} + +static void rt2400pci_config_txpower(struct rt2x00_dev *rt2x00dev, int txpower) +{ + rt2400pci_bbp_write(rt2x00dev, 3, TXPOWER_TO_DEV(txpower)); +} + +static void rt2400pci_config_retry_limit(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_conf *libconf) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, CSR11, ®); + rt2x00_set_field32(®, CSR11_LONG_RETRY, + libconf->conf->long_frame_max_tx_count); + rt2x00_set_field32(®, CSR11_SHORT_RETRY, + libconf->conf->short_frame_max_tx_count); + rt2x00pci_register_write(rt2x00dev, CSR11, reg); +} + +static void rt2400pci_config_ps(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_conf *libconf) +{ + enum dev_state state = + (libconf->conf->flags & IEEE80211_CONF_PS) ? + STATE_SLEEP : STATE_AWAKE; + u32 reg; + + if (state == STATE_SLEEP) { + rt2x00pci_register_read(rt2x00dev, CSR20, ®); + rt2x00_set_field32(®, CSR20_DELAY_AFTER_TBCN, + (rt2x00dev->beacon_int - 20) * 16); + rt2x00_set_field32(®, CSR20_TBCN_BEFORE_WAKEUP, + libconf->conf->listen_interval - 1); + + /* We must first disable autowake before it can be enabled */ + rt2x00_set_field32(®, CSR20_AUTOWAKE, 0); + rt2x00pci_register_write(rt2x00dev, CSR20, reg); + + rt2x00_set_field32(®, CSR20_AUTOWAKE, 1); + rt2x00pci_register_write(rt2x00dev, CSR20, reg); + } + + rt2x00dev->ops->lib->set_device_state(rt2x00dev, state); +} + +static void rt2400pci_config(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_conf *libconf, + const unsigned int flags) +{ + if (flags & IEEE80211_CONF_CHANGE_CHANNEL) + rt2400pci_config_channel(rt2x00dev, &libconf->rf); + if (flags & IEEE80211_CONF_CHANGE_POWER) + rt2400pci_config_txpower(rt2x00dev, + libconf->conf->power_level); + if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS) + rt2400pci_config_retry_limit(rt2x00dev, libconf); + if (flags & IEEE80211_CONF_CHANGE_PS) + rt2400pci_config_ps(rt2x00dev, libconf); +} + +static void rt2400pci_config_cw(struct rt2x00_dev *rt2x00dev, + const int cw_min, const int cw_max) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, CSR11, ®); + rt2x00_set_field32(®, CSR11_CWMIN, cw_min); + rt2x00_set_field32(®, CSR11_CWMAX, cw_max); + rt2x00pci_register_write(rt2x00dev, CSR11, reg); +} + +/* + * Link tuning + */ +static void rt2400pci_link_stats(struct rt2x00_dev *rt2x00dev, + struct link_qual *qual) +{ + u32 reg; + u8 bbp; + + /* + * Update FCS error count from register. + */ + rt2x00pci_register_read(rt2x00dev, CNT0, ®); + qual->rx_failed = rt2x00_get_field32(reg, CNT0_FCS_ERROR); + + /* + * Update False CCA count from register. + */ + rt2400pci_bbp_read(rt2x00dev, 39, &bbp); + qual->false_cca = bbp; +} + +static inline void rt2400pci_set_vgc(struct rt2x00_dev *rt2x00dev, + struct link_qual *qual, u8 vgc_level) +{ + rt2400pci_bbp_write(rt2x00dev, 13, vgc_level); + qual->vgc_level = vgc_level; + qual->vgc_level_reg = vgc_level; +} + +static void rt2400pci_reset_tuner(struct rt2x00_dev *rt2x00dev, + struct link_qual *qual) +{ + rt2400pci_set_vgc(rt2x00dev, qual, 0x08); +} + +static void rt2400pci_link_tuner(struct rt2x00_dev *rt2x00dev, + struct link_qual *qual, const u32 count) +{ + /* + * The link tuner should not run longer then 60 seconds, + * and should run once every 2 seconds. + */ + if (count > 60 || !(count & 1)) + return; + + /* + * Base r13 link tuning on the false cca count. + */ + if ((qual->false_cca > 512) && (qual->vgc_level < 0x20)) + rt2400pci_set_vgc(rt2x00dev, qual, ++qual->vgc_level); + else if ((qual->false_cca < 100) && (qual->vgc_level > 0x08)) + rt2400pci_set_vgc(rt2x00dev, qual, --qual->vgc_level); +} + +/* + * Initialization functions. + */ +static bool rt2400pci_get_entry_state(struct queue_entry *entry) +{ + struct queue_entry_priv_pci *entry_priv = entry->priv_data; + u32 word; + + if (entry->queue->qid == QID_RX) { + rt2x00_desc_read(entry_priv->desc, 0, &word); + + return rt2x00_get_field32(word, RXD_W0_OWNER_NIC); + } else { + rt2x00_desc_read(entry_priv->desc, 0, &word); + + return (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) || + rt2x00_get_field32(word, TXD_W0_VALID)); + } +} + +static void rt2400pci_clear_entry(struct queue_entry *entry) +{ + struct queue_entry_priv_pci *entry_priv = entry->priv_data; + struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); + u32 word; + + if (entry->queue->qid == QID_RX) { + rt2x00_desc_read(entry_priv->desc, 2, &word); + rt2x00_set_field32(&word, RXD_W2_BUFFER_LENGTH, entry->skb->len); + rt2x00_desc_write(entry_priv->desc, 2, word); + + rt2x00_desc_read(entry_priv->desc, 1, &word); + rt2x00_set_field32(&word, RXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma); + rt2x00_desc_write(entry_priv->desc, 1, word); + + rt2x00_desc_read(entry_priv->desc, 0, &word); + rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1); + rt2x00_desc_write(entry_priv->desc, 0, word); + } else { + rt2x00_desc_read(entry_priv->desc, 0, &word); + rt2x00_set_field32(&word, TXD_W0_VALID, 0); + rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0); + rt2x00_desc_write(entry_priv->desc, 0, word); + } +} + +static int rt2400pci_init_queues(struct rt2x00_dev *rt2x00dev) +{ + struct queue_entry_priv_pci *entry_priv; + u32 reg; + + /* + * Initialize registers. + */ + rt2x00pci_register_read(rt2x00dev, TXCSR2, ®); + rt2x00_set_field32(®, TXCSR2_TXD_SIZE, rt2x00dev->tx[0].desc_size); + rt2x00_set_field32(®, TXCSR2_NUM_TXD, rt2x00dev->tx[1].limit); + rt2x00_set_field32(®, TXCSR2_NUM_ATIM, rt2x00dev->bcn[1].limit); + rt2x00_set_field32(®, TXCSR2_NUM_PRIO, rt2x00dev->tx[0].limit); + rt2x00pci_register_write(rt2x00dev, TXCSR2, reg); + + entry_priv = rt2x00dev->tx[1].entries[0].priv_data; + rt2x00pci_register_read(rt2x00dev, TXCSR3, ®); + rt2x00_set_field32(®, TXCSR3_TX_RING_REGISTER, + entry_priv->desc_dma); + rt2x00pci_register_write(rt2x00dev, TXCSR3, reg); + + entry_priv = rt2x00dev->tx[0].entries[0].priv_data; + rt2x00pci_register_read(rt2x00dev, TXCSR5, ®); + rt2x00_set_field32(®, TXCSR5_PRIO_RING_REGISTER, + entry_priv->desc_dma); + rt2x00pci_register_write(rt2x00dev, TXCSR5, reg); + + entry_priv = rt2x00dev->bcn[1].entries[0].priv_data; + rt2x00pci_register_read(rt2x00dev, TXCSR4, ®); + rt2x00_set_field32(®, TXCSR4_ATIM_RING_REGISTER, + entry_priv->desc_dma); + rt2x00pci_register_write(rt2x00dev, TXCSR4, reg); + + entry_priv = rt2x00dev->bcn[0].entries[0].priv_data; + rt2x00pci_register_read(rt2x00dev, TXCSR6, ®); + rt2x00_set_field32(®, TXCSR6_BEACON_RING_REGISTER, + entry_priv->desc_dma); + rt2x00pci_register_write(rt2x00dev, TXCSR6, reg); + + rt2x00pci_register_read(rt2x00dev, RXCSR1, ®); + rt2x00_set_field32(®, RXCSR1_RXD_SIZE, rt2x00dev->rx->desc_size); + rt2x00_set_field32(®, RXCSR1_NUM_RXD, rt2x00dev->rx->limit); + rt2x00pci_register_write(rt2x00dev, RXCSR1, reg); + + entry_priv = rt2x00dev->rx->entries[0].priv_data; + rt2x00pci_register_read(rt2x00dev, RXCSR2, ®); + rt2x00_set_field32(®, RXCSR2_RX_RING_REGISTER, + entry_priv->desc_dma); + rt2x00pci_register_write(rt2x00dev, RXCSR2, reg); + + return 0; +} + +static int rt2400pci_init_registers(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + rt2x00pci_register_write(rt2x00dev, PSCSR0, 0x00020002); + rt2x00pci_register_write(rt2x00dev, PSCSR1, 0x00000002); + rt2x00pci_register_write(rt2x00dev, PSCSR2, 0x00023f20); + rt2x00pci_register_write(rt2x00dev, PSCSR3, 0x00000002); + + rt2x00pci_register_read(rt2x00dev, TIMECSR, ®); + rt2x00_set_field32(®, TIMECSR_US_COUNT, 33); + rt2x00_set_field32(®, TIMECSR_US_64_COUNT, 63); + rt2x00_set_field32(®, TIMECSR_BEACON_EXPECT, 0); + rt2x00pci_register_write(rt2x00dev, TIMECSR, reg); + + rt2x00pci_register_read(rt2x00dev, CSR9, ®); + rt2x00_set_field32(®, CSR9_MAX_FRAME_UNIT, + (rt2x00dev->rx->data_size / 128)); + rt2x00pci_register_write(rt2x00dev, CSR9, reg); + + rt2x00pci_register_read(rt2x00dev, CSR14, ®); + rt2x00_set_field32(®, CSR14_TSF_COUNT, 0); + rt2x00_set_field32(®, CSR14_TSF_SYNC, 0); + rt2x00_set_field32(®, CSR14_TBCN, 0); + rt2x00_set_field32(®, CSR14_TCFP, 0); + rt2x00_set_field32(®, CSR14_TATIMW, 0); + rt2x00_set_field32(®, CSR14_BEACON_GEN, 0); + rt2x00_set_field32(®, CSR14_CFP_COUNT_PRELOAD, 0); + rt2x00_set_field32(®, CSR14_TBCM_PRELOAD, 0); + rt2x00pci_register_write(rt2x00dev, CSR14, reg); + + rt2x00pci_register_write(rt2x00dev, CNT3, 0x3f080000); + + rt2x00pci_register_read(rt2x00dev, ARCSR0, ®); + rt2x00_set_field32(®, ARCSR0_AR_BBP_DATA0, 133); + rt2x00_set_field32(®, ARCSR0_AR_BBP_ID0, 134); + rt2x00_set_field32(®, ARCSR0_AR_BBP_DATA1, 136); + rt2x00_set_field32(®, ARCSR0_AR_BBP_ID1, 135); + rt2x00pci_register_write(rt2x00dev, ARCSR0, reg); + + rt2x00pci_register_read(rt2x00dev, RXCSR3, ®); + rt2x00_set_field32(®, RXCSR3_BBP_ID0, 3); /* Tx power.*/ + rt2x00_set_field32(®, RXCSR3_BBP_ID0_VALID, 1); + rt2x00_set_field32(®, RXCSR3_BBP_ID1, 32); /* Signal */ + rt2x00_set_field32(®, RXCSR3_BBP_ID1_VALID, 1); + rt2x00_set_field32(®, RXCSR3_BBP_ID2, 36); /* Rssi */ + rt2x00_set_field32(®, RXCSR3_BBP_ID2_VALID, 1); + rt2x00pci_register_write(rt2x00dev, RXCSR3, reg); + + rt2x00pci_register_write(rt2x00dev, PWRCSR0, 0x3f3b3100); + + if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE)) + return -EBUSY; + + rt2x00pci_register_write(rt2x00dev, MACCSR0, 0x00217223); + rt2x00pci_register_write(rt2x00dev, MACCSR1, 0x00235518); + + rt2x00pci_register_read(rt2x00dev, MACCSR2, ®); + rt2x00_set_field32(®, MACCSR2_DELAY, 64); + rt2x00pci_register_write(rt2x00dev, MACCSR2, reg); + + rt2x00pci_register_read(rt2x00dev, RALINKCSR, ®); + rt2x00_set_field32(®, RALINKCSR_AR_BBP_DATA0, 17); + rt2x00_set_field32(®, RALINKCSR_AR_BBP_ID0, 154); + rt2x00_set_field32(®, RALINKCSR_AR_BBP_DATA1, 0); + rt2x00_set_field32(®, RALINKCSR_AR_BBP_ID1, 154); + rt2x00pci_register_write(rt2x00dev, RALINKCSR, reg); + + rt2x00pci_register_read(rt2x00dev, CSR1, ®); + rt2x00_set_field32(®, CSR1_SOFT_RESET, 1); + rt2x00_set_field32(®, CSR1_BBP_RESET, 0); + rt2x00_set_field32(®, CSR1_HOST_READY, 0); + rt2x00pci_register_write(rt2x00dev, CSR1, reg); + + rt2x00pci_register_read(rt2x00dev, CSR1, ®); + rt2x00_set_field32(®, CSR1_SOFT_RESET, 0); + rt2x00_set_field32(®, CSR1_HOST_READY, 1); + rt2x00pci_register_write(rt2x00dev, CSR1, reg); + + /* + * We must clear the FCS and FIFO error count. + * These registers are cleared on read, + * so we may pass a useless variable to store the value. + */ + rt2x00pci_register_read(rt2x00dev, CNT0, ®); + rt2x00pci_register_read(rt2x00dev, CNT4, ®); + + return 0; +} + +static int rt2400pci_wait_bbp_ready(struct rt2x00_dev *rt2x00dev) +{ + unsigned int i; + u8 value; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2400pci_bbp_read(rt2x00dev, 0, &value); + if ((value != 0xff) && (value != 0x00)) + return 0; + udelay(REGISTER_BUSY_DELAY); + } + + ERROR(rt2x00dev, "BBP register access failed, aborting.\n"); + return -EACCES; +} + +static int rt2400pci_init_bbp(struct rt2x00_dev *rt2x00dev) +{ + unsigned int i; + u16 eeprom; + u8 reg_id; + u8 value; + + if (unlikely(rt2400pci_wait_bbp_ready(rt2x00dev))) + return -EACCES; + + rt2400pci_bbp_write(rt2x00dev, 1, 0x00); + rt2400pci_bbp_write(rt2x00dev, 3, 0x27); + rt2400pci_bbp_write(rt2x00dev, 4, 0x08); + rt2400pci_bbp_write(rt2x00dev, 10, 0x0f); + rt2400pci_bbp_write(rt2x00dev, 15, 0x72); + rt2400pci_bbp_write(rt2x00dev, 16, 0x74); + rt2400pci_bbp_write(rt2x00dev, 17, 0x20); + rt2400pci_bbp_write(rt2x00dev, 18, 0x72); + rt2400pci_bbp_write(rt2x00dev, 19, 0x0b); + rt2400pci_bbp_write(rt2x00dev, 20, 0x00); + rt2400pci_bbp_write(rt2x00dev, 28, 0x11); + rt2400pci_bbp_write(rt2x00dev, 29, 0x04); + rt2400pci_bbp_write(rt2x00dev, 30, 0x21); + rt2400pci_bbp_write(rt2x00dev, 31, 0x00); + + for (i = 0; i < EEPROM_BBP_SIZE; i++) { + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom); + + if (eeprom != 0xffff && eeprom != 0x0000) { + reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID); + value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE); + rt2400pci_bbp_write(rt2x00dev, reg_id, value); + } + } + + return 0; +} + +/* + * Device state switch handlers. + */ +static void rt2400pci_toggle_rx(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, RXCSR0, ®); + rt2x00_set_field32(®, RXCSR0_DISABLE_RX, + (state == STATE_RADIO_RX_OFF) || + (state == STATE_RADIO_RX_OFF_LINK)); + rt2x00pci_register_write(rt2x00dev, RXCSR0, reg); +} + +static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + int mask = (state == STATE_RADIO_IRQ_OFF); + u32 reg; + + /* + * When interrupts are being enabled, the interrupt registers + * should clear the register to assure a clean state. + */ + if (state == STATE_RADIO_IRQ_ON) { + rt2x00pci_register_read(rt2x00dev, CSR7, ®); + rt2x00pci_register_write(rt2x00dev, CSR7, reg); + } + + /* + * Only toggle the interrupts bits we are going to use. + * Non-checked interrupt bits are disabled by default. + */ + rt2x00pci_register_read(rt2x00dev, CSR8, ®); + rt2x00_set_field32(®, CSR8_TBCN_EXPIRE, mask); + rt2x00_set_field32(®, CSR8_TXDONE_TXRING, mask); + rt2x00_set_field32(®, CSR8_TXDONE_ATIMRING, mask); + rt2x00_set_field32(®, CSR8_TXDONE_PRIORING, mask); + rt2x00_set_field32(®, CSR8_RXDONE, mask); + rt2x00pci_register_write(rt2x00dev, CSR8, reg); +} + +static int rt2400pci_enable_radio(struct rt2x00_dev *rt2x00dev) +{ + /* + * Initialize all registers. + */ + if (unlikely(rt2400pci_init_queues(rt2x00dev) || + rt2400pci_init_registers(rt2x00dev) || + rt2400pci_init_bbp(rt2x00dev))) + return -EIO; + + return 0; +} + +static void rt2400pci_disable_radio(struct rt2x00_dev *rt2x00dev) +{ + /* + * Disable power + */ + rt2x00pci_register_write(rt2x00dev, PWRCSR0, 0); +} + +static int rt2400pci_set_state(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + u32 reg; + unsigned int i; + char put_to_sleep; + char bbp_state; + char rf_state; + + put_to_sleep = (state != STATE_AWAKE); + + rt2x00pci_register_read(rt2x00dev, PWRCSR1, ®); + rt2x00_set_field32(®, PWRCSR1_SET_STATE, 1); + rt2x00_set_field32(®, PWRCSR1_BBP_DESIRE_STATE, state); + rt2x00_set_field32(®, PWRCSR1_RF_DESIRE_STATE, state); + rt2x00_set_field32(®, PWRCSR1_PUT_TO_SLEEP, put_to_sleep); + rt2x00pci_register_write(rt2x00dev, PWRCSR1, reg); + + /* + * Device is not guaranteed to be in the requested state yet. + * We must wait until the register indicates that the + * device has entered the correct state. + */ + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2x00pci_register_read(rt2x00dev, PWRCSR1, ®); + bbp_state = rt2x00_get_field32(reg, PWRCSR1_BBP_CURR_STATE); + rf_state = rt2x00_get_field32(reg, PWRCSR1_RF_CURR_STATE); + if (bbp_state == state && rf_state == state) + return 0; + msleep(10); + } + + return -EBUSY; +} + +static int rt2400pci_set_device_state(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + int retval = 0; + + switch (state) { + case STATE_RADIO_ON: + retval = rt2400pci_enable_radio(rt2x00dev); + break; + case STATE_RADIO_OFF: + rt2400pci_disable_radio(rt2x00dev); + break; + case STATE_RADIO_RX_ON: + case STATE_RADIO_RX_ON_LINK: + case STATE_RADIO_RX_OFF: + case STATE_RADIO_RX_OFF_LINK: + rt2400pci_toggle_rx(rt2x00dev, state); + break; + case STATE_RADIO_IRQ_ON: + case STATE_RADIO_IRQ_OFF: + rt2400pci_toggle_irq(rt2x00dev, state); + break; + case STATE_DEEP_SLEEP: + case STATE_SLEEP: + case STATE_STANDBY: + case STATE_AWAKE: + retval = rt2400pci_set_state(rt2x00dev, state); + break; + default: + retval = -ENOTSUPP; + break; + } + + if (unlikely(retval)) + ERROR(rt2x00dev, "Device failed to enter state %d (%d).\n", + state, retval); + + return retval; +} + +/* + * TX descriptor initialization + */ +static void rt2400pci_write_tx_desc(struct rt2x00_dev *rt2x00dev, + struct sk_buff *skb, + struct txentry_desc *txdesc) +{ + struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); + struct queue_entry_priv_pci *entry_priv = skbdesc->entry->priv_data; + __le32 *txd = skbdesc->desc; + u32 word; + + /* + * Start writing the descriptor words. + */ + rt2x00_desc_read(entry_priv->desc, 1, &word); + rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma); + rt2x00_desc_write(entry_priv->desc, 1, word); + + rt2x00_desc_read(txd, 2, &word); + rt2x00_set_field32(&word, TXD_W2_BUFFER_LENGTH, skb->len); + rt2x00_set_field32(&word, TXD_W2_DATABYTE_COUNT, skb->len); + rt2x00_desc_write(txd, 2, word); + + rt2x00_desc_read(txd, 3, &word); + rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->signal); + rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL_REGNUM, 5); + rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL_BUSY, 1); + rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, txdesc->service); + rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE_REGNUM, 6); + rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE_BUSY, 1); + rt2x00_desc_write(txd, 3, word); + + rt2x00_desc_read(txd, 4, &word); + rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_LOW, txdesc->length_low); + rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW_REGNUM, 8); + rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW_BUSY, 1); + rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_HIGH, txdesc->length_high); + rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH_REGNUM, 7); + rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH_BUSY, 1); + rt2x00_desc_write(txd, 4, word); + + rt2x00_desc_read(txd, 0, &word); + rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 1); + rt2x00_set_field32(&word, TXD_W0_VALID, 1); + rt2x00_set_field32(&word, TXD_W0_MORE_FRAG, + test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags)); + rt2x00_set_field32(&word, TXD_W0_ACK, + test_bit(ENTRY_TXD_ACK, &txdesc->flags)); + rt2x00_set_field32(&word, TXD_W0_TIMESTAMP, + test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags)); + rt2x00_set_field32(&word, TXD_W0_RTS, + test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)); + rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); + rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, + test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags)); + rt2x00_desc_write(txd, 0, word); +} + +/* + * TX data initialization + */ +static void rt2400pci_write_beacon(struct queue_entry *entry) +{ + struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; + struct queue_entry_priv_pci *entry_priv = entry->priv_data; + struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); + u32 word; + u32 reg; + + /* + * Disable beaconing while we are reloading the beacon data, + * otherwise we might be sending out invalid data. + */ + rt2x00pci_register_read(rt2x00dev, CSR14, ®); + rt2x00_set_field32(®, CSR14_BEACON_GEN, 0); + rt2x00pci_register_write(rt2x00dev, CSR14, reg); + + /* + * Replace rt2x00lib allocated descriptor with the + * pointer to the _real_ hardware descriptor. + * After that, map the beacon to DMA and update the + * descriptor. + */ + memcpy(entry_priv->desc, skbdesc->desc, skbdesc->desc_len); + skbdesc->desc = entry_priv->desc; + + rt2x00queue_map_txskb(rt2x00dev, entry->skb); + + rt2x00_desc_read(entry_priv->desc, 1, &word); + rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma); + rt2x00_desc_write(entry_priv->desc, 1, word); +} + +static void rt2400pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev, + const enum data_queue_qid queue) +{ + u32 reg; + + if (queue == QID_BEACON) { + rt2x00pci_register_read(rt2x00dev, CSR14, ®); + if (!rt2x00_get_field32(reg, CSR14_BEACON_GEN)) { + rt2x00_set_field32(®, CSR14_TSF_COUNT, 1); + rt2x00_set_field32(®, CSR14_TBCN, 1); + rt2x00_set_field32(®, CSR14_BEACON_GEN, 1); + rt2x00pci_register_write(rt2x00dev, CSR14, reg); + } + return; + } + + rt2x00pci_register_read(rt2x00dev, TXCSR0, ®); + rt2x00_set_field32(®, TXCSR0_KICK_PRIO, (queue == QID_AC_BE)); + rt2x00_set_field32(®, TXCSR0_KICK_TX, (queue == QID_AC_BK)); + rt2x00_set_field32(®, TXCSR0_KICK_ATIM, (queue == QID_ATIM)); + rt2x00pci_register_write(rt2x00dev, TXCSR0, reg); +} + +static void rt2400pci_kill_tx_queue(struct rt2x00_dev *rt2x00dev, + const enum data_queue_qid qid) +{ + u32 reg; + + if (qid == QID_BEACON) { + rt2x00pci_register_write(rt2x00dev, CSR14, 0); + } else { + rt2x00pci_register_read(rt2x00dev, TXCSR0, ®); + rt2x00_set_field32(®, TXCSR0_ABORT, 1); + rt2x00pci_register_write(rt2x00dev, TXCSR0, reg); + } +} + +/* + * RX control handlers + */ +static void rt2400pci_fill_rxdone(struct queue_entry *entry, + struct rxdone_entry_desc *rxdesc) +{ + struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; + struct queue_entry_priv_pci *entry_priv = entry->priv_data; + u32 word0; + u32 word2; + u32 word3; + u32 word4; + u64 tsf; + u32 rx_low; + u32 rx_high; + + rt2x00_desc_read(entry_priv->desc, 0, &word0); + rt2x00_desc_read(entry_priv->desc, 2, &word2); + rt2x00_desc_read(entry_priv->desc, 3, &word3); + rt2x00_desc_read(entry_priv->desc, 4, &word4); + + if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) + rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; + if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR)) + rxdesc->flags |= RX_FLAG_FAILED_PLCP_CRC; + + /* + * We only get the lower 32bits from the timestamp, + * to get the full 64bits we must complement it with + * the timestamp from get_tsf(). + * Note that when a wraparound of the lower 32bits + * has occurred between the frame arrival and the get_tsf() + * call, we must decrease the higher 32bits with 1 to get + * to correct value. + */ + tsf = rt2x00dev->ops->hw->get_tsf(rt2x00dev->hw); + rx_low = rt2x00_get_field32(word4, RXD_W4_RX_END_TIME); + rx_high = upper_32_bits(tsf); + + if ((u32)tsf <= rx_low) + rx_high--; + + /* + * Obtain the status about this packet. + * The signal is the PLCP value, and needs to be stripped + * of the preamble bit (0x08). + */ + rxdesc->timestamp = ((u64)rx_high << 32) | rx_low; + rxdesc->signal = rt2x00_get_field32(word2, RXD_W2_SIGNAL) & ~0x08; + rxdesc->rssi = rt2x00_get_field32(word2, RXD_W3_RSSI) - + entry->queue->rt2x00dev->rssi_offset; + rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); + + rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP; + if (rt2x00_get_field32(word0, RXD_W0_MY_BSS)) + rxdesc->dev_flags |= RXDONE_MY_BSS; +} + +/* + * Interrupt functions. + */ +static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev, + const enum data_queue_qid queue_idx) +{ + struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, queue_idx); + struct queue_entry_priv_pci *entry_priv; + struct queue_entry *entry; + struct txdone_entry_desc txdesc; + u32 word; + + while (!rt2x00queue_empty(queue)) { + entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); + entry_priv = entry->priv_data; + rt2x00_desc_read(entry_priv->desc, 0, &word); + + if (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) || + !rt2x00_get_field32(word, TXD_W0_VALID)) + break; + + /* + * Obtain the status about this packet. + */ + txdesc.flags = 0; + switch (rt2x00_get_field32(word, TXD_W0_RESULT)) { + case 0: /* Success */ + case 1: /* Success with retry */ + __set_bit(TXDONE_SUCCESS, &txdesc.flags); + break; + case 2: /* Failure, excessive retries */ + __set_bit(TXDONE_EXCESSIVE_RETRY, &txdesc.flags); + /* Don't break, this is a failed frame! */ + default: /* Failure */ + __set_bit(TXDONE_FAILURE, &txdesc.flags); + } + txdesc.retry = rt2x00_get_field32(word, TXD_W0_RETRY_COUNT); + + rt2x00lib_txdone(entry, &txdesc); + } +} + +static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance) +{ + struct rt2x00_dev *rt2x00dev = dev_instance; + u32 reg; + + /* + * Get the interrupt sources & saved to local variable. + * Write register value back to clear pending interrupts. + */ + rt2x00pci_register_read(rt2x00dev, CSR7, ®); + rt2x00pci_register_write(rt2x00dev, CSR7, reg); + + if (!reg) + return IRQ_NONE; + + if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) + return IRQ_HANDLED; + + /* + * Handle interrupts, walk through all bits + * and run the tasks, the bits are checked in order of + * priority. + */ + + /* + * 1 - Beacon timer expired interrupt. + */ + if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE)) + rt2x00lib_beacondone(rt2x00dev); + + /* + * 2 - Rx ring done interrupt. + */ + if (rt2x00_get_field32(reg, CSR7_RXDONE)) + rt2x00pci_rxdone(rt2x00dev); + + /* + * 3 - Atim ring transmit done interrupt. + */ + if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING)) + rt2400pci_txdone(rt2x00dev, QID_ATIM); + + /* + * 4 - Priority ring transmit done interrupt. + */ + if (rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING)) + rt2400pci_txdone(rt2x00dev, QID_AC_BE); + + /* + * 5 - Tx ring transmit done interrupt. + */ + if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) + rt2400pci_txdone(rt2x00dev, QID_AC_BK); + + return IRQ_HANDLED; +} + +/* + * Device probe functions. + */ +static int rt2400pci_validate_eeprom(struct rt2x00_dev *rt2x00dev) +{ + struct eeprom_93cx6 eeprom; + u32 reg; + u16 word; + u8 *mac; + + rt2x00pci_register_read(rt2x00dev, CSR21, ®); + + eeprom.data = rt2x00dev; + eeprom.register_read = rt2400pci_eepromregister_read; + eeprom.register_write = rt2400pci_eepromregister_write; + eeprom.width = rt2x00_get_field32(reg, CSR21_TYPE_93C46) ? + PCI_EEPROM_WIDTH_93C46 : PCI_EEPROM_WIDTH_93C66; + eeprom.reg_data_in = 0; + eeprom.reg_data_out = 0; + eeprom.reg_data_clock = 0; + eeprom.reg_chip_select = 0; + + eeprom_93cx6_multiread(&eeprom, EEPROM_BASE, rt2x00dev->eeprom, + EEPROM_SIZE / sizeof(u16)); + + /* + * Start validation of the data that has been read. + */ + mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); + if (!is_valid_ether_addr(mac)) { + random_ether_addr(mac); + EEPROM(rt2x00dev, "MAC: %pM\n", mac); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word); + if (word == 0xffff) { + ERROR(rt2x00dev, "Invalid EEPROM data detected.\n"); + return -EINVAL; + } + + return 0; +} + +static int rt2400pci_init_eeprom(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + u16 value; + u16 eeprom; + + /* + * Read EEPROM word for configuration. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom); + + /* + * Identify RF chipset. + */ + value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); + rt2x00pci_register_read(rt2x00dev, CSR0, ®); + rt2x00_set_chip(rt2x00dev, RT2460, value, + rt2x00_get_field32(reg, CSR0_REVISION)); + + if (!rt2x00_rf(rt2x00dev, RF2420) && !rt2x00_rf(rt2x00dev, RF2421)) { + ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); + return -ENODEV; + } + + /* + * Identify default antenna configuration. + */ + rt2x00dev->default_ant.tx = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TX_DEFAULT); + rt2x00dev->default_ant.rx = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_DEFAULT); + + /* + * When the eeprom indicates SW_DIVERSITY use HW_DIVERSITY instead. + * I am not 100% sure about this, but the legacy drivers do not + * indicate antenna swapping in software is required when + * diversity is enabled. + */ + if (rt2x00dev->default_ant.tx == ANTENNA_SW_DIVERSITY) + rt2x00dev->default_ant.tx = ANTENNA_HW_DIVERSITY; + if (rt2x00dev->default_ant.rx == ANTENNA_SW_DIVERSITY) + rt2x00dev->default_ant.rx = ANTENNA_HW_DIVERSITY; + + /* + * Store led mode, for correct led behaviour. + */ +#ifdef CONFIG_RT2X00_LIB_LEDS + value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE); + + rt2400pci_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO); + if (value == LED_MODE_TXRX_ACTIVITY || + value == LED_MODE_DEFAULT || + value == LED_MODE_ASUS) + rt2400pci_init_led(rt2x00dev, &rt2x00dev->led_qual, + LED_TYPE_ACTIVITY); +#endif /* CONFIG_RT2X00_LIB_LEDS */ + + /* + * Detect if this device has an hardware controlled radio. + */ + if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) + __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags); + + /* + * Check if the BBP tuning should be enabled. + */ + if (!rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_AGCVGC_TUNING)) + __set_bit(CONFIG_DISABLE_LINK_TUNING, &rt2x00dev->flags); + + return 0; +} + +/* + * RF value list for RF2420 & RF2421 + * Supports: 2.4 GHz + */ +static const struct rf_channel rf_vals_b[] = { + { 1, 0x00022058, 0x000c1fda, 0x00000101, 0 }, + { 2, 0x00022058, 0x000c1fee, 0x00000101, 0 }, + { 3, 0x00022058, 0x000c2002, 0x00000101, 0 }, + { 4, 0x00022058, 0x000c2016, 0x00000101, 0 }, + { 5, 0x00022058, 0x000c202a, 0x00000101, 0 }, + { 6, 0x00022058, 0x000c203e, 0x00000101, 0 }, + { 7, 0x00022058, 0x000c2052, 0x00000101, 0 }, + { 8, 0x00022058, 0x000c2066, 0x00000101, 0 }, + { 9, 0x00022058, 0x000c207a, 0x00000101, 0 }, + { 10, 0x00022058, 0x000c208e, 0x00000101, 0 }, + { 11, 0x00022058, 0x000c20a2, 0x00000101, 0 }, + { 12, 0x00022058, 0x000c20b6, 0x00000101, 0 }, + { 13, 0x00022058, 0x000c20ca, 0x00000101, 0 }, + { 14, 0x00022058, 0x000c20fa, 0x00000101, 0 }, +}; + +static int rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) +{ + struct hw_mode_spec *spec = &rt2x00dev->spec; + struct channel_info *info; + char *tx_power; + unsigned int i; + + /* + * Initialize all hw fields. + */ + rt2x00dev->hw->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | + IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_SUPPORTS_PS | + IEEE80211_HW_PS_NULLFUNC_STACK; + + SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev); + SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, + rt2x00_eeprom_addr(rt2x00dev, + EEPROM_MAC_ADDR_0)); + + /* + * Initialize hw_mode information. + */ + spec->supported_bands = SUPPORT_BAND_2GHZ; + spec->supported_rates = SUPPORT_RATE_CCK; + + spec->num_channels = ARRAY_SIZE(rf_vals_b); + spec->channels = rf_vals_b; + + /* + * Create channel information array + */ + info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + spec->channels_info = info; + + tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START); + for (i = 0; i < 14; i++) + info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]); + + return 0; +} + +static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev) +{ + int retval; + + /* + * Allocate eeprom data. + */ + retval = rt2400pci_validate_eeprom(rt2x00dev); + if (retval) + return retval; + + retval = rt2400pci_init_eeprom(rt2x00dev); + if (retval) + return retval; + + /* + * Initialize hw specifications. + */ + retval = rt2400pci_probe_hw_mode(rt2x00dev); + if (retval) + return retval; + + /* + * This device requires the atim queue and DMA-mapped skbs. + */ + __set_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags); + __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags); + + /* + * Set the rssi offset. + */ + rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET; + + return 0; +} + +/* + * IEEE80211 stack callback functions. + */ +static int rt2400pci_conf_tx(struct ieee80211_hw *hw, u16 queue, + const struct ieee80211_tx_queue_params *params) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + + /* + * We don't support variating cw_min and cw_max variables + * per queue. So by default we only configure the TX queue, + * and ignore all other configurations. + */ + if (queue != 0) + return -EINVAL; + + if (rt2x00mac_conf_tx(hw, queue, params)) + return -EINVAL; + + /* + * Write configuration to register. + */ + rt2400pci_config_cw(rt2x00dev, + rt2x00dev->tx->cw_min, rt2x00dev->tx->cw_max); + + return 0; +} + +static u64 rt2400pci_get_tsf(struct ieee80211_hw *hw) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + u64 tsf; + u32 reg; + + rt2x00pci_register_read(rt2x00dev, CSR17, ®); + tsf = (u64) rt2x00_get_field32(reg, CSR17_HIGH_TSFTIMER) << 32; + rt2x00pci_register_read(rt2x00dev, CSR16, ®); + tsf |= rt2x00_get_field32(reg, CSR16_LOW_TSFTIMER); + + return tsf; +} + +static int rt2400pci_tx_last_beacon(struct ieee80211_hw *hw) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + u32 reg; + + rt2x00pci_register_read(rt2x00dev, CSR15, ®); + return rt2x00_get_field32(reg, CSR15_BEACON_SENT); +} + +static const struct ieee80211_ops rt2400pci_mac80211_ops = { + .tx = rt2x00mac_tx, + .start = rt2x00mac_start, + .stop = rt2x00mac_stop, + .add_interface = rt2x00mac_add_interface, + .remove_interface = rt2x00mac_remove_interface, + .config = rt2x00mac_config, + .configure_filter = rt2x00mac_configure_filter, + .set_tim = rt2x00mac_set_tim, + .get_stats = rt2x00mac_get_stats, + .bss_info_changed = rt2x00mac_bss_info_changed, + .conf_tx = rt2400pci_conf_tx, + .get_tsf = rt2400pci_get_tsf, + .tx_last_beacon = rt2400pci_tx_last_beacon, + .rfkill_poll = rt2x00mac_rfkill_poll, +}; + +static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = { + .irq_handler = rt2400pci_interrupt, + .probe_hw = rt2400pci_probe_hw, + .initialize = rt2x00pci_initialize, + .uninitialize = rt2x00pci_uninitialize, + .get_entry_state = rt2400pci_get_entry_state, + .clear_entry = rt2400pci_clear_entry, + .set_device_state = rt2400pci_set_device_state, + .rfkill_poll = rt2400pci_rfkill_poll, + .link_stats = rt2400pci_link_stats, + .reset_tuner = rt2400pci_reset_tuner, + .link_tuner = rt2400pci_link_tuner, + .write_tx_desc = rt2400pci_write_tx_desc, + .write_tx_data = rt2x00pci_write_tx_data, + .write_beacon = rt2400pci_write_beacon, + .kick_tx_queue = rt2400pci_kick_tx_queue, + .kill_tx_queue = rt2400pci_kill_tx_queue, + .fill_rxdone = rt2400pci_fill_rxdone, + .config_filter = rt2400pci_config_filter, + .config_intf = rt2400pci_config_intf, + .config_erp = rt2400pci_config_erp, + .config_ant = rt2400pci_config_ant, + .config = rt2400pci_config, +}; + +static const struct data_queue_desc rt2400pci_queue_rx = { + .entry_num = RX_ENTRIES, + .data_size = DATA_FRAME_SIZE, + .desc_size = RXD_DESC_SIZE, + .priv_size = sizeof(struct queue_entry_priv_pci), +}; + +static const struct data_queue_desc rt2400pci_queue_tx = { + .entry_num = TX_ENTRIES, + .data_size = DATA_FRAME_SIZE, + .desc_size = TXD_DESC_SIZE, + .priv_size = sizeof(struct queue_entry_priv_pci), +}; + +static const struct data_queue_desc rt2400pci_queue_bcn = { + .entry_num = BEACON_ENTRIES, + .data_size = MGMT_FRAME_SIZE, + .desc_size = TXD_DESC_SIZE, + .priv_size = sizeof(struct queue_entry_priv_pci), +}; + +static const struct data_queue_desc rt2400pci_queue_atim = { + .entry_num = ATIM_ENTRIES, + .data_size = DATA_FRAME_SIZE, + .desc_size = TXD_DESC_SIZE, + .priv_size = sizeof(struct queue_entry_priv_pci), +}; + +static const struct rt2x00_ops rt2400pci_ops = { + .name = KBUILD_MODNAME, + .max_sta_intf = 1, + .max_ap_intf = 1, + .eeprom_size = EEPROM_SIZE, + .rf_size = RF_SIZE, + .tx_queues = NUM_TX_QUEUES, + .extra_tx_headroom = 0, + .rx = &rt2400pci_queue_rx, + .tx = &rt2400pci_queue_tx, + .bcn = &rt2400pci_queue_bcn, + .atim = &rt2400pci_queue_atim, + .lib = &rt2400pci_rt2x00_ops, + .hw = &rt2400pci_mac80211_ops, +#ifdef CONFIG_RT2X00_LIB_DEBUGFS + .debugfs = &rt2400pci_rt2x00debug, +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ +}; + +/* + * RT2400pci module information. + */ +static DEFINE_PCI_DEVICE_TABLE(rt2400pci_device_table) = { + { PCI_DEVICE(0x1814, 0x0101), PCI_DEVICE_DATA(&rt2400pci_ops) }, + { 0, } +}; + +MODULE_AUTHOR(DRV_PROJECT); +MODULE_VERSION(DRV_VERSION); +MODULE_DESCRIPTION("Ralink RT2400 PCI & PCMCIA Wireless LAN driver."); +MODULE_SUPPORTED_DEVICE("Ralink RT2460 PCI & PCMCIA chipset based cards"); +MODULE_DEVICE_TABLE(pci, rt2400pci_device_table); +MODULE_LICENSE("GPL"); + +static struct pci_driver rt2400pci_driver = { + .name = KBUILD_MODNAME, + .id_table = rt2400pci_device_table, + .probe = rt2x00pci_probe, + .remove = __devexit_p(rt2x00pci_remove), + .suspend = rt2x00pci_suspend, + .resume = rt2x00pci_resume, +}; + +static int __init rt2400pci_init(void) +{ + return pci_register_driver(&rt2400pci_driver); +} + +static void __exit rt2400pci_exit(void) +{ + pci_unregister_driver(&rt2400pci_driver); +} + +module_init(rt2400pci_init); +module_exit(rt2400pci_exit); diff --git a/drivers/net/wireless/rt2x00/rt2400pci.h b/drivers/net/wireless/rt2x00/rt2400pci.h new file mode 100644 index 0000000..c048b18 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2400pci.h @@ -0,0 +1,953 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2400pci + Abstract: Data structures and registers for the rt2400pci module. + Supported chipsets: RT2460. + */ + +#ifndef RT2400PCI_H +#define RT2400PCI_H + +/* + * RF chip defines. + */ +#define RF2420 0x0000 +#define RF2421 0x0001 + +/* + * Signal information. + * Default offset is required for RSSI <-> dBm conversion. + */ +#define DEFAULT_RSSI_OFFSET 100 + +/* + * Register layout information. + */ +#define CSR_REG_BASE 0x0000 +#define CSR_REG_SIZE 0x014c +#define EEPROM_BASE 0x0000 +#define EEPROM_SIZE 0x0100 +#define BBP_BASE 0x0000 +#define BBP_SIZE 0x0020 +#define RF_BASE 0x0004 +#define RF_SIZE 0x000c + +/* + * Number of TX queues. + */ +#define NUM_TX_QUEUES 2 + +/* + * Control/Status Registers(CSR). + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * CSR0: ASIC revision number. + */ +#define CSR0 0x0000 +#define CSR0_REVISION FIELD32(0x0000ffff) + +/* + * CSR1: System control register. + * SOFT_RESET: Software reset, 1: reset, 0: normal. + * BBP_RESET: Hardware reset, 1: reset, 0, release. + * HOST_READY: Host ready after initialization. + */ +#define CSR1 0x0004 +#define CSR1_SOFT_RESET FIELD32(0x00000001) +#define CSR1_BBP_RESET FIELD32(0x00000002) +#define CSR1_HOST_READY FIELD32(0x00000004) + +/* + * CSR2: System admin status register (invalid). + */ +#define CSR2 0x0008 + +/* + * CSR3: STA MAC address register 0. + */ +#define CSR3 0x000c +#define CSR3_BYTE0 FIELD32(0x000000ff) +#define CSR3_BYTE1 FIELD32(0x0000ff00) +#define CSR3_BYTE2 FIELD32(0x00ff0000) +#define CSR3_BYTE3 FIELD32(0xff000000) + +/* + * CSR4: STA MAC address register 1. + */ +#define CSR4 0x0010 +#define CSR4_BYTE4 FIELD32(0x000000ff) +#define CSR4_BYTE5 FIELD32(0x0000ff00) + +/* + * CSR5: BSSID register 0. + */ +#define CSR5 0x0014 +#define CSR5_BYTE0 FIELD32(0x000000ff) +#define CSR5_BYTE1 FIELD32(0x0000ff00) +#define CSR5_BYTE2 FIELD32(0x00ff0000) +#define CSR5_BYTE3 FIELD32(0xff000000) + +/* + * CSR6: BSSID register 1. + */ +#define CSR6 0x0018 +#define CSR6_BYTE4 FIELD32(0x000000ff) +#define CSR6_BYTE5 FIELD32(0x0000ff00) + +/* + * CSR7: Interrupt source register. + * Write 1 to clear interrupt. + * TBCN_EXPIRE: Beacon timer expired interrupt. + * TWAKE_EXPIRE: Wakeup timer expired interrupt. + * TATIMW_EXPIRE: Timer of atim window expired interrupt. + * TXDONE_TXRING: Tx ring transmit done interrupt. + * TXDONE_ATIMRING: Atim ring transmit done interrupt. + * TXDONE_PRIORING: Priority ring transmit done interrupt. + * RXDONE: Receive done interrupt. + */ +#define CSR7 0x001c +#define CSR7_TBCN_EXPIRE FIELD32(0x00000001) +#define CSR7_TWAKE_EXPIRE FIELD32(0x00000002) +#define CSR7_TATIMW_EXPIRE FIELD32(0x00000004) +#define CSR7_TXDONE_TXRING FIELD32(0x00000008) +#define CSR7_TXDONE_ATIMRING FIELD32(0x00000010) +#define CSR7_TXDONE_PRIORING FIELD32(0x00000020) +#define CSR7_RXDONE FIELD32(0x00000040) + +/* + * CSR8: Interrupt mask register. + * Write 1 to mask interrupt. + * TBCN_EXPIRE: Beacon timer expired interrupt. + * TWAKE_EXPIRE: Wakeup timer expired interrupt. + * TATIMW_EXPIRE: Timer of atim window expired interrupt. + * TXDONE_TXRING: Tx ring transmit done interrupt. + * TXDONE_ATIMRING: Atim ring transmit done interrupt. + * TXDONE_PRIORING: Priority ring transmit done interrupt. + * RXDONE: Receive done interrupt. + */ +#define CSR8 0x0020 +#define CSR8_TBCN_EXPIRE FIELD32(0x00000001) +#define CSR8_TWAKE_EXPIRE FIELD32(0x00000002) +#define CSR8_TATIMW_EXPIRE FIELD32(0x00000004) +#define CSR8_TXDONE_TXRING FIELD32(0x00000008) +#define CSR8_TXDONE_ATIMRING FIELD32(0x00000010) +#define CSR8_TXDONE_PRIORING FIELD32(0x00000020) +#define CSR8_RXDONE FIELD32(0x00000040) + +/* + * CSR9: Maximum frame length register. + * MAX_FRAME_UNIT: Maximum frame length in 128b unit, default: 12. + */ +#define CSR9 0x0024 +#define CSR9_MAX_FRAME_UNIT FIELD32(0x00000f80) + +/* + * CSR11: Back-off control register. + * CWMIN: CWmin. Default cwmin is 31 (2^5 - 1). + * CWMAX: CWmax. Default cwmax is 1023 (2^10 - 1). + * SLOT_TIME: Slot time, default is 20us for 802.11b. + * LONG_RETRY: Long retry count. + * SHORT_RETRY: Short retry count. + */ +#define CSR11 0x002c +#define CSR11_CWMIN FIELD32(0x0000000f) +#define CSR11_CWMAX FIELD32(0x000000f0) +#define CSR11_SLOT_TIME FIELD32(0x00001f00) +#define CSR11_LONG_RETRY FIELD32(0x00ff0000) +#define CSR11_SHORT_RETRY FIELD32(0xff000000) + +/* + * CSR12: Synchronization configuration register 0. + * All units in 1/16 TU. + * BEACON_INTERVAL: Beacon interval, default is 100 TU. + * CFPMAX_DURATION: Cfp maximum duration, default is 100 TU. + */ +#define CSR12 0x0030 +#define CSR12_BEACON_INTERVAL FIELD32(0x0000ffff) +#define CSR12_CFP_MAX_DURATION FIELD32(0xffff0000) + +/* + * CSR13: Synchronization configuration register 1. + * All units in 1/16 TU. + * ATIMW_DURATION: Atim window duration. + * CFP_PERIOD: Cfp period, default is 0 TU. + */ +#define CSR13 0x0034 +#define CSR13_ATIMW_DURATION FIELD32(0x0000ffff) +#define CSR13_CFP_PERIOD FIELD32(0x00ff0000) + +/* + * CSR14: Synchronization control register. + * TSF_COUNT: Enable tsf auto counting. + * TSF_SYNC: Tsf sync, 0: disable, 1: infra, 2: ad-hoc/master mode. + * TBCN: Enable tbcn with reload value. + * TCFP: Enable tcfp & cfp / cp switching. + * TATIMW: Enable tatimw & atim window switching. + * BEACON_GEN: Enable beacon generator. + * CFP_COUNT_PRELOAD: Cfp count preload value. + * TBCM_PRELOAD: Tbcn preload value in units of 64us. + */ +#define CSR14 0x0038 +#define CSR14_TSF_COUNT FIELD32(0x00000001) +#define CSR14_TSF_SYNC FIELD32(0x00000006) +#define CSR14_TBCN FIELD32(0x00000008) +#define CSR14_TCFP FIELD32(0x00000010) +#define CSR14_TATIMW FIELD32(0x00000020) +#define CSR14_BEACON_GEN FIELD32(0x00000040) +#define CSR14_CFP_COUNT_PRELOAD FIELD32(0x0000ff00) +#define CSR14_TBCM_PRELOAD FIELD32(0xffff0000) + +/* + * CSR15: Synchronization status register. + * CFP: ASIC is in contention-free period. + * ATIMW: ASIC is in ATIM window. + * BEACON_SENT: Beacon is send. + */ +#define CSR15 0x003c +#define CSR15_CFP FIELD32(0x00000001) +#define CSR15_ATIMW FIELD32(0x00000002) +#define CSR15_BEACON_SENT FIELD32(0x00000004) + +/* + * CSR16: TSF timer register 0. + */ +#define CSR16 0x0040 +#define CSR16_LOW_TSFTIMER FIELD32(0xffffffff) + +/* + * CSR17: TSF timer register 1. + */ +#define CSR17 0x0044 +#define CSR17_HIGH_TSFTIMER FIELD32(0xffffffff) + +/* + * CSR18: IFS timer register 0. + * SIFS: Sifs, default is 10 us. + * PIFS: Pifs, default is 30 us. + */ +#define CSR18 0x0048 +#define CSR18_SIFS FIELD32(0x0000ffff) +#define CSR18_PIFS FIELD32(0xffff0000) + +/* + * CSR19: IFS timer register 1. + * DIFS: Difs, default is 50 us. + * EIFS: Eifs, default is 364 us. + */ +#define CSR19 0x004c +#define CSR19_DIFS FIELD32(0x0000ffff) +#define CSR19_EIFS FIELD32(0xffff0000) + +/* + * CSR20: Wakeup timer register. + * DELAY_AFTER_TBCN: Delay after tbcn expired in units of 1/16 TU. + * TBCN_BEFORE_WAKEUP: Number of beacon before wakeup. + * AUTOWAKE: Enable auto wakeup / sleep mechanism. + */ +#define CSR20 0x0050 +#define CSR20_DELAY_AFTER_TBCN FIELD32(0x0000ffff) +#define CSR20_TBCN_BEFORE_WAKEUP FIELD32(0x00ff0000) +#define CSR20_AUTOWAKE FIELD32(0x01000000) + +/* + * CSR21: EEPROM control register. + * RELOAD: Write 1 to reload eeprom content. + * TYPE_93C46: 1: 93c46, 0:93c66. + */ +#define CSR21 0x0054 +#define CSR21_RELOAD FIELD32(0x00000001) +#define CSR21_EEPROM_DATA_CLOCK FIELD32(0x00000002) +#define CSR21_EEPROM_CHIP_SELECT FIELD32(0x00000004) +#define CSR21_EEPROM_DATA_IN FIELD32(0x00000008) +#define CSR21_EEPROM_DATA_OUT FIELD32(0x00000010) +#define CSR21_TYPE_93C46 FIELD32(0x00000020) + +/* + * CSR22: CFP control register. + * CFP_DURATION_REMAIN: Cfp duration remain, in units of TU. + * RELOAD_CFP_DURATION: Write 1 to reload cfp duration remain. + */ +#define CSR22 0x0058 +#define CSR22_CFP_DURATION_REMAIN FIELD32(0x0000ffff) +#define CSR22_RELOAD_CFP_DURATION FIELD32(0x00010000) + +/* + * Transmit related CSRs. + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * TXCSR0: TX Control Register. + * KICK_TX: Kick tx ring. + * KICK_ATIM: Kick atim ring. + * KICK_PRIO: Kick priority ring. + * ABORT: Abort all transmit related ring operation. + */ +#define TXCSR0 0x0060 +#define TXCSR0_KICK_TX FIELD32(0x00000001) +#define TXCSR0_KICK_ATIM FIELD32(0x00000002) +#define TXCSR0_KICK_PRIO FIELD32(0x00000004) +#define TXCSR0_ABORT FIELD32(0x00000008) + +/* + * TXCSR1: TX Configuration Register. + * ACK_TIMEOUT: Ack timeout, default = sifs + 2*slottime + acktime @ 1mbps. + * ACK_CONSUME_TIME: Ack consume time, default = sifs + acktime @ 1mbps. + * TSF_OFFSET: Insert tsf offset. + * AUTORESPONDER: Enable auto responder which include ack & cts. + */ +#define TXCSR1 0x0064 +#define TXCSR1_ACK_TIMEOUT FIELD32(0x000001ff) +#define TXCSR1_ACK_CONSUME_TIME FIELD32(0x0003fe00) +#define TXCSR1_TSF_OFFSET FIELD32(0x00fc0000) +#define TXCSR1_AUTORESPONDER FIELD32(0x01000000) + +/* + * TXCSR2: Tx descriptor configuration register. + * TXD_SIZE: Tx descriptor size, default is 48. + * NUM_TXD: Number of tx entries in ring. + * NUM_ATIM: Number of atim entries in ring. + * NUM_PRIO: Number of priority entries in ring. + */ +#define TXCSR2 0x0068 +#define TXCSR2_TXD_SIZE FIELD32(0x000000ff) +#define TXCSR2_NUM_TXD FIELD32(0x0000ff00) +#define TXCSR2_NUM_ATIM FIELD32(0x00ff0000) +#define TXCSR2_NUM_PRIO FIELD32(0xff000000) + +/* + * TXCSR3: TX Ring Base address register. + */ +#define TXCSR3 0x006c +#define TXCSR3_TX_RING_REGISTER FIELD32(0xffffffff) + +/* + * TXCSR4: TX Atim Ring Base address register. + */ +#define TXCSR4 0x0070 +#define TXCSR4_ATIM_RING_REGISTER FIELD32(0xffffffff) + +/* + * TXCSR5: TX Prio Ring Base address register. + */ +#define TXCSR5 0x0074 +#define TXCSR5_PRIO_RING_REGISTER FIELD32(0xffffffff) + +/* + * TXCSR6: Beacon Base address register. + */ +#define TXCSR6 0x0078 +#define TXCSR6_BEACON_RING_REGISTER FIELD32(0xffffffff) + +/* + * TXCSR7: Auto responder control register. + * AR_POWERMANAGEMENT: Auto responder power management bit. + */ +#define TXCSR7 0x007c +#define TXCSR7_AR_POWERMANAGEMENT FIELD32(0x00000001) + +/* + * Receive related CSRs. + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * RXCSR0: RX Control Register. + * DISABLE_RX: Disable rx engine. + * DROP_CRC: Drop crc error. + * DROP_PHYSICAL: Drop physical error. + * DROP_CONTROL: Drop control frame. + * DROP_NOT_TO_ME: Drop not to me unicast frame. + * DROP_TODS: Drop frame tods bit is true. + * DROP_VERSION_ERROR: Drop version error frame. + * PASS_CRC: Pass all packets with crc attached. + */ +#define RXCSR0 0x0080 +#define RXCSR0_DISABLE_RX FIELD32(0x00000001) +#define RXCSR0_DROP_CRC FIELD32(0x00000002) +#define RXCSR0_DROP_PHYSICAL FIELD32(0x00000004) +#define RXCSR0_DROP_CONTROL FIELD32(0x00000008) +#define RXCSR0_DROP_NOT_TO_ME FIELD32(0x00000010) +#define RXCSR0_DROP_TODS FIELD32(0x00000020) +#define RXCSR0_DROP_VERSION_ERROR FIELD32(0x00000040) +#define RXCSR0_PASS_CRC FIELD32(0x00000080) + +/* + * RXCSR1: RX descriptor configuration register. + * RXD_SIZE: Rx descriptor size, default is 32b. + * NUM_RXD: Number of rx entries in ring. + */ +#define RXCSR1 0x0084 +#define RXCSR1_RXD_SIZE FIELD32(0x000000ff) +#define RXCSR1_NUM_RXD FIELD32(0x0000ff00) + +/* + * RXCSR2: RX Ring base address register. + */ +#define RXCSR2 0x0088 +#define RXCSR2_RX_RING_REGISTER FIELD32(0xffffffff) + +/* + * RXCSR3: BBP ID register for Rx operation. + * BBP_ID#: BBP register # id. + * BBP_ID#_VALID: BBP register # id is valid or not. + */ +#define RXCSR3 0x0090 +#define RXCSR3_BBP_ID0 FIELD32(0x0000007f) +#define RXCSR3_BBP_ID0_VALID FIELD32(0x00000080) +#define RXCSR3_BBP_ID1 FIELD32(0x00007f00) +#define RXCSR3_BBP_ID1_VALID FIELD32(0x00008000) +#define RXCSR3_BBP_ID2 FIELD32(0x007f0000) +#define RXCSR3_BBP_ID2_VALID FIELD32(0x00800000) +#define RXCSR3_BBP_ID3 FIELD32(0x7f000000) +#define RXCSR3_BBP_ID3_VALID FIELD32(0x80000000) + +/* + * RXCSR4: BBP ID register for Rx operation. + * BBP_ID#: BBP register # id. + * BBP_ID#_VALID: BBP register # id is valid or not. + */ +#define RXCSR4 0x0094 +#define RXCSR4_BBP_ID4 FIELD32(0x0000007f) +#define RXCSR4_BBP_ID4_VALID FIELD32(0x00000080) +#define RXCSR4_BBP_ID5 FIELD32(0x00007f00) +#define RXCSR4_BBP_ID5_VALID FIELD32(0x00008000) + +/* + * ARCSR0: Auto Responder PLCP config register 0. + * ARCSR0_AR_BBP_DATA#: Auto responder BBP register # data. + * ARCSR0_AR_BBP_ID#: Auto responder BBP register # Id. + */ +#define ARCSR0 0x0098 +#define ARCSR0_AR_BBP_DATA0 FIELD32(0x000000ff) +#define ARCSR0_AR_BBP_ID0 FIELD32(0x0000ff00) +#define ARCSR0_AR_BBP_DATA1 FIELD32(0x00ff0000) +#define ARCSR0_AR_BBP_ID1 FIELD32(0xff000000) + +/* + * ARCSR1: Auto Responder PLCP config register 1. + * ARCSR0_AR_BBP_DATA#: Auto responder BBP register # data. + * ARCSR0_AR_BBP_ID#: Auto responder BBP register # Id. + */ +#define ARCSR1 0x009c +#define ARCSR1_AR_BBP_DATA2 FIELD32(0x000000ff) +#define ARCSR1_AR_BBP_ID2 FIELD32(0x0000ff00) +#define ARCSR1_AR_BBP_DATA3 FIELD32(0x00ff0000) +#define ARCSR1_AR_BBP_ID3 FIELD32(0xff000000) + +/* + * Miscellaneous Registers. + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * PCICSR: PCI control register. + * BIG_ENDIAN: 1: big endian, 0: little endian. + * RX_TRESHOLD: Rx threshold in dw to start pci access + * 0: 16dw (default), 1: 8dw, 2: 4dw, 3: 32dw. + * TX_TRESHOLD: Tx threshold in dw to start pci access + * 0: 0dw (default), 1: 1dw, 2: 4dw, 3: forward. + * BURST_LENTH: Pci burst length 0: 4dw (default, 1: 8dw, 2: 16dw, 3:32dw. + * ENABLE_CLK: Enable clk_run, pci clock can't going down to non-operational. + */ +#define PCICSR 0x008c +#define PCICSR_BIG_ENDIAN FIELD32(0x00000001) +#define PCICSR_RX_TRESHOLD FIELD32(0x00000006) +#define PCICSR_TX_TRESHOLD FIELD32(0x00000018) +#define PCICSR_BURST_LENTH FIELD32(0x00000060) +#define PCICSR_ENABLE_CLK FIELD32(0x00000080) + +/* + * CNT0: FCS error count. + * FCS_ERROR: FCS error count, cleared when read. + */ +#define CNT0 0x00a0 +#define CNT0_FCS_ERROR FIELD32(0x0000ffff) + +/* + * Statistic Register. + * CNT1: PLCP error count. + * CNT2: Long error count. + * CNT3: CCA false alarm count. + * CNT4: Rx FIFO overflow count. + * CNT5: Tx FIFO underrun count. + */ +#define TIMECSR2 0x00a8 +#define CNT1 0x00ac +#define CNT2 0x00b0 +#define TIMECSR3 0x00b4 +#define CNT3 0x00b8 +#define CNT4 0x00bc +#define CNT5 0x00c0 + +/* + * Baseband Control Register. + */ + +/* + * PWRCSR0: Power mode configuration register. + */ +#define PWRCSR0 0x00c4 + +/* + * Power state transition time registers. + */ +#define PSCSR0 0x00c8 +#define PSCSR1 0x00cc +#define PSCSR2 0x00d0 +#define PSCSR3 0x00d4 + +/* + * PWRCSR1: Manual power control / status register. + * Allowed state: 0 deep_sleep, 1: sleep, 2: standby, 3: awake. + * SET_STATE: Set state. Write 1 to trigger, self cleared. + * BBP_DESIRE_STATE: BBP desired state. + * RF_DESIRE_STATE: RF desired state. + * BBP_CURR_STATE: BBP current state. + * RF_CURR_STATE: RF current state. + * PUT_TO_SLEEP: Put to sleep. Write 1 to trigger, self cleared. + */ +#define PWRCSR1 0x00d8 +#define PWRCSR1_SET_STATE FIELD32(0x00000001) +#define PWRCSR1_BBP_DESIRE_STATE FIELD32(0x00000006) +#define PWRCSR1_RF_DESIRE_STATE FIELD32(0x00000018) +#define PWRCSR1_BBP_CURR_STATE FIELD32(0x00000060) +#define PWRCSR1_RF_CURR_STATE FIELD32(0x00000180) +#define PWRCSR1_PUT_TO_SLEEP FIELD32(0x00000200) + +/* + * TIMECSR: Timer control register. + * US_COUNT: 1 us timer count in units of clock cycles. + * US_64_COUNT: 64 us timer count in units of 1 us timer. + * BEACON_EXPECT: Beacon expect window. + */ +#define TIMECSR 0x00dc +#define TIMECSR_US_COUNT FIELD32(0x000000ff) +#define TIMECSR_US_64_COUNT FIELD32(0x0000ff00) +#define TIMECSR_BEACON_EXPECT FIELD32(0x00070000) + +/* + * MACCSR0: MAC configuration register 0. + */ +#define MACCSR0 0x00e0 + +/* + * MACCSR1: MAC configuration register 1. + * KICK_RX: Kick one-shot rx in one-shot rx mode. + * ONESHOT_RXMODE: Enable one-shot rx mode for debugging. + * BBPRX_RESET_MODE: Ralink bbp rx reset mode. + * AUTO_TXBBP: Auto tx logic access bbp control register. + * AUTO_RXBBP: Auto rx logic access bbp control register. + * LOOPBACK: Loopback mode. 0: normal, 1: internal, 2: external, 3:rsvd. + * INTERSIL_IF: Intersil if calibration pin. + */ +#define MACCSR1 0x00e4 +#define MACCSR1_KICK_RX FIELD32(0x00000001) +#define MACCSR1_ONESHOT_RXMODE FIELD32(0x00000002) +#define MACCSR1_BBPRX_RESET_MODE FIELD32(0x00000004) +#define MACCSR1_AUTO_TXBBP FIELD32(0x00000008) +#define MACCSR1_AUTO_RXBBP FIELD32(0x00000010) +#define MACCSR1_LOOPBACK FIELD32(0x00000060) +#define MACCSR1_INTERSIL_IF FIELD32(0x00000080) + +/* + * RALINKCSR: Ralink Rx auto-reset BBCR. + * AR_BBP_DATA#: Auto reset BBP register # data. + * AR_BBP_ID#: Auto reset BBP register # id. + */ +#define RALINKCSR 0x00e8 +#define RALINKCSR_AR_BBP_DATA0 FIELD32(0x000000ff) +#define RALINKCSR_AR_BBP_ID0 FIELD32(0x0000ff00) +#define RALINKCSR_AR_BBP_DATA1 FIELD32(0x00ff0000) +#define RALINKCSR_AR_BBP_ID1 FIELD32(0xff000000) + +/* + * BCNCSR: Beacon interval control register. + * CHANGE: Write one to change beacon interval. + * DELTATIME: The delta time value. + * NUM_BEACON: Number of beacon according to mode. + * MODE: Please refer to asic specs. + * PLUS: Plus or minus delta time value. + */ +#define BCNCSR 0x00ec +#define BCNCSR_CHANGE FIELD32(0x00000001) +#define BCNCSR_DELTATIME FIELD32(0x0000001e) +#define BCNCSR_NUM_BEACON FIELD32(0x00001fe0) +#define BCNCSR_MODE FIELD32(0x00006000) +#define BCNCSR_PLUS FIELD32(0x00008000) + +/* + * BBP / RF / IF Control Register. + */ + +/* + * BBPCSR: BBP serial control register. + * VALUE: Register value to program into BBP. + * REGNUM: Selected BBP register. + * BUSY: 1: asic is busy execute BBP programming. + * WRITE_CONTROL: 1: write BBP, 0: read BBP. + */ +#define BBPCSR 0x00f0 +#define BBPCSR_VALUE FIELD32(0x000000ff) +#define BBPCSR_REGNUM FIELD32(0x00007f00) +#define BBPCSR_BUSY FIELD32(0x00008000) +#define BBPCSR_WRITE_CONTROL FIELD32(0x00010000) + +/* + * RFCSR: RF serial control register. + * VALUE: Register value + id to program into rf/if. + * NUMBER_OF_BITS: Number of bits used in value (i:20, rfmd:22). + * IF_SELECT: Chip to program: 0: rf, 1: if. + * PLL_LD: Rf pll_ld status. + * BUSY: 1: asic is busy execute rf programming. + */ +#define RFCSR 0x00f4 +#define RFCSR_VALUE FIELD32(0x00ffffff) +#define RFCSR_NUMBER_OF_BITS FIELD32(0x1f000000) +#define RFCSR_IF_SELECT FIELD32(0x20000000) +#define RFCSR_PLL_LD FIELD32(0x40000000) +#define RFCSR_BUSY FIELD32(0x80000000) + +/* + * LEDCSR: LED control register. + * ON_PERIOD: On period, default 70ms. + * OFF_PERIOD: Off period, default 30ms. + * LINK: 0: linkoff, 1: linkup. + * ACTIVITY: 0: idle, 1: active. + */ +#define LEDCSR 0x00f8 +#define LEDCSR_ON_PERIOD FIELD32(0x000000ff) +#define LEDCSR_OFF_PERIOD FIELD32(0x0000ff00) +#define LEDCSR_LINK FIELD32(0x00010000) +#define LEDCSR_ACTIVITY FIELD32(0x00020000) + +/* + * ASIC pointer information. + * RXPTR: Current RX ring address. + * TXPTR: Current Tx ring address. + * PRIPTR: Current Priority ring address. + * ATIMPTR: Current ATIM ring address. + */ +#define RXPTR 0x0100 +#define TXPTR 0x0104 +#define PRIPTR 0x0108 +#define ATIMPTR 0x010c + +/* + * GPIO and others. + */ + +/* + * GPIOCSR: GPIO control register. + */ +#define GPIOCSR 0x0120 +#define GPIOCSR_BIT0 FIELD32(0x00000001) +#define GPIOCSR_BIT1 FIELD32(0x00000002) +#define GPIOCSR_BIT2 FIELD32(0x00000004) +#define GPIOCSR_BIT3 FIELD32(0x00000008) +#define GPIOCSR_BIT4 FIELD32(0x00000010) +#define GPIOCSR_BIT5 FIELD32(0x00000020) +#define GPIOCSR_BIT6 FIELD32(0x00000040) +#define GPIOCSR_BIT7 FIELD32(0x00000080) + +/* + * BBPPCSR: BBP Pin control register. + */ +#define BBPPCSR 0x0124 + +/* + * BCNCSR1: Tx BEACON offset time control register. + * PRELOAD: Beacon timer offset in units of usec. + */ +#define BCNCSR1 0x0130 +#define BCNCSR1_PRELOAD FIELD32(0x0000ffff) + +/* + * MACCSR2: TX_PE to RX_PE turn-around time control register + * DELAY: RX_PE low width, in units of pci clock cycle. + */ +#define MACCSR2 0x0134 +#define MACCSR2_DELAY FIELD32(0x000000ff) + +/* + * ARCSR2: 1 Mbps ACK/CTS PLCP. + */ +#define ARCSR2 0x013c +#define ARCSR2_SIGNAL FIELD32(0x000000ff) +#define ARCSR2_SERVICE FIELD32(0x0000ff00) +#define ARCSR2_LENGTH_LOW FIELD32(0x00ff0000) +#define ARCSR2_LENGTH FIELD32(0xffff0000) + +/* + * ARCSR3: 2 Mbps ACK/CTS PLCP. + */ +#define ARCSR3 0x0140 +#define ARCSR3_SIGNAL FIELD32(0x000000ff) +#define ARCSR3_SERVICE FIELD32(0x0000ff00) +#define ARCSR3_LENGTH FIELD32(0xffff0000) + +/* + * ARCSR4: 5.5 Mbps ACK/CTS PLCP. + */ +#define ARCSR4 0x0144 +#define ARCSR4_SIGNAL FIELD32(0x000000ff) +#define ARCSR4_SERVICE FIELD32(0x0000ff00) +#define ARCSR4_LENGTH FIELD32(0xffff0000) + +/* + * ARCSR5: 11 Mbps ACK/CTS PLCP. + */ +#define ARCSR5 0x0148 +#define ARCSR5_SIGNAL FIELD32(0x000000ff) +#define ARCSR5_SERVICE FIELD32(0x0000ff00) +#define ARCSR5_LENGTH FIELD32(0xffff0000) + +/* + * BBP registers. + * The wordsize of the BBP is 8 bits. + */ + +/* + * R1: TX antenna control + */ +#define BBP_R1_TX_ANTENNA FIELD8(0x03) + +/* + * R4: RX antenna control + */ +#define BBP_R4_RX_ANTENNA FIELD8(0x06) + +/* + * RF registers + */ + +/* + * RF 1 + */ +#define RF1_TUNER FIELD32(0x00020000) + +/* + * RF 3 + */ +#define RF3_TUNER FIELD32(0x00000100) +#define RF3_TXPOWER FIELD32(0x00003e00) + +/* + * EEPROM content. + * The wordsize of the EEPROM is 16 bits. + */ + +/* + * HW MAC address. + */ +#define EEPROM_MAC_ADDR_0 0x0002 +#define EEPROM_MAC_ADDR_BYTE0 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE1 FIELD16(0xff00) +#define EEPROM_MAC_ADDR1 0x0003 +#define EEPROM_MAC_ADDR_BYTE2 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE3 FIELD16(0xff00) +#define EEPROM_MAC_ADDR_2 0x0004 +#define EEPROM_MAC_ADDR_BYTE4 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE5 FIELD16(0xff00) + +/* + * EEPROM antenna. + * ANTENNA_NUM: Number of antenna's. + * TX_DEFAULT: Default antenna 0: diversity, 1: A, 2: B. + * RX_DEFAULT: Default antenna 0: diversity, 1: A, 2: B. + * RF_TYPE: Rf_type of this adapter. + * LED_MODE: 0: default, 1: TX/RX activity,2: Single (ignore link), 3: rsvd. + * RX_AGCVGC: 0: disable, 1:enable BBP R13 tuning. + * HARDWARE_RADIO: 1: Hardware controlled radio. Read GPIO0. + */ +#define EEPROM_ANTENNA 0x0b +#define EEPROM_ANTENNA_NUM FIELD16(0x0003) +#define EEPROM_ANTENNA_TX_DEFAULT FIELD16(0x000c) +#define EEPROM_ANTENNA_RX_DEFAULT FIELD16(0x0030) +#define EEPROM_ANTENNA_RF_TYPE FIELD16(0x0040) +#define EEPROM_ANTENNA_LED_MODE FIELD16(0x0180) +#define EEPROM_ANTENNA_RX_AGCVGC_TUNING FIELD16(0x0200) +#define EEPROM_ANTENNA_HARDWARE_RADIO FIELD16(0x0400) + +/* + * EEPROM BBP. + */ +#define EEPROM_BBP_START 0x0c +#define EEPROM_BBP_SIZE 7 +#define EEPROM_BBP_VALUE FIELD16(0x00ff) +#define EEPROM_BBP_REG_ID FIELD16(0xff00) + +/* + * EEPROM TXPOWER + */ +#define EEPROM_TXPOWER_START 0x13 +#define EEPROM_TXPOWER_SIZE 7 +#define EEPROM_TXPOWER_1 FIELD16(0x00ff) +#define EEPROM_TXPOWER_2 FIELD16(0xff00) + +/* + * DMA descriptor defines. + */ +#define TXD_DESC_SIZE ( 8 * sizeof(__le32) ) +#define RXD_DESC_SIZE ( 8 * sizeof(__le32) ) + +/* + * TX descriptor format for TX, PRIO, ATIM and Beacon Ring. + */ + +/* + * Word0 + */ +#define TXD_W0_OWNER_NIC FIELD32(0x00000001) +#define TXD_W0_VALID FIELD32(0x00000002) +#define TXD_W0_RESULT FIELD32(0x0000001c) +#define TXD_W0_RETRY_COUNT FIELD32(0x000000e0) +#define TXD_W0_MORE_FRAG FIELD32(0x00000100) +#define TXD_W0_ACK FIELD32(0x00000200) +#define TXD_W0_TIMESTAMP FIELD32(0x00000400) +#define TXD_W0_RTS FIELD32(0x00000800) +#define TXD_W0_IFS FIELD32(0x00006000) +#define TXD_W0_RETRY_MODE FIELD32(0x00008000) +#define TXD_W0_AGC FIELD32(0x00ff0000) +#define TXD_W0_R2 FIELD32(0xff000000) + +/* + * Word1 + */ +#define TXD_W1_BUFFER_ADDRESS FIELD32(0xffffffff) + +/* + * Word2 + */ +#define TXD_W2_BUFFER_LENGTH FIELD32(0x0000ffff) +#define TXD_W2_DATABYTE_COUNT FIELD32(0xffff0000) + +/* + * Word3 & 4: PLCP information + * The PLCP values should be treated as if they were BBP values. + */ +#define TXD_W3_PLCP_SIGNAL FIELD32(0x000000ff) +#define TXD_W3_PLCP_SIGNAL_REGNUM FIELD32(0x00007f00) +#define TXD_W3_PLCP_SIGNAL_BUSY FIELD32(0x00008000) +#define TXD_W3_PLCP_SERVICE FIELD32(0x00ff0000) +#define TXD_W3_PLCP_SERVICE_REGNUM FIELD32(0x7f000000) +#define TXD_W3_PLCP_SERVICE_BUSY FIELD32(0x80000000) + +#define TXD_W4_PLCP_LENGTH_LOW FIELD32(0x000000ff) +#define TXD_W3_PLCP_LENGTH_LOW_REGNUM FIELD32(0x00007f00) +#define TXD_W3_PLCP_LENGTH_LOW_BUSY FIELD32(0x00008000) +#define TXD_W4_PLCP_LENGTH_HIGH FIELD32(0x00ff0000) +#define TXD_W3_PLCP_LENGTH_HIGH_REGNUM FIELD32(0x7f000000) +#define TXD_W3_PLCP_LENGTH_HIGH_BUSY FIELD32(0x80000000) + +/* + * Word5 + */ +#define TXD_W5_BBCR4 FIELD32(0x0000ffff) +#define TXD_W5_AGC_REG FIELD32(0x007f0000) +#define TXD_W5_AGC_REG_VALID FIELD32(0x00800000) +#define TXD_W5_XXX_REG FIELD32(0x7f000000) +#define TXD_W5_XXX_REG_VALID FIELD32(0x80000000) + +/* + * Word6 + */ +#define TXD_W6_SK_BUFF FIELD32(0xffffffff) + +/* + * Word7 + */ +#define TXD_W7_RESERVED FIELD32(0xffffffff) + +/* + * RX descriptor format for RX Ring. + */ + +/* + * Word0 + */ +#define RXD_W0_OWNER_NIC FIELD32(0x00000001) +#define RXD_W0_UNICAST_TO_ME FIELD32(0x00000002) +#define RXD_W0_MULTICAST FIELD32(0x00000004) +#define RXD_W0_BROADCAST FIELD32(0x00000008) +#define RXD_W0_MY_BSS FIELD32(0x00000010) +#define RXD_W0_CRC_ERROR FIELD32(0x00000020) +#define RXD_W0_PHYSICAL_ERROR FIELD32(0x00000080) +#define RXD_W0_DATABYTE_COUNT FIELD32(0xffff0000) + +/* + * Word1 + */ +#define RXD_W1_BUFFER_ADDRESS FIELD32(0xffffffff) + +/* + * Word2 + */ +#define RXD_W2_BUFFER_LENGTH FIELD32(0x0000ffff) +#define RXD_W2_BBR0 FIELD32(0x00ff0000) +#define RXD_W2_SIGNAL FIELD32(0xff000000) + +/* + * Word3 + */ +#define RXD_W3_RSSI FIELD32(0x000000ff) +#define RXD_W3_BBR3 FIELD32(0x0000ff00) +#define RXD_W3_BBR4 FIELD32(0x00ff0000) +#define RXD_W3_BBR5 FIELD32(0xff000000) + +/* + * Word4 + */ +#define RXD_W4_RX_END_TIME FIELD32(0xffffffff) + +/* + * Word5 & 6 & 7: Reserved + */ +#define RXD_W5_RESERVED FIELD32(0xffffffff) +#define RXD_W6_RESERVED FIELD32(0xffffffff) +#define RXD_W7_RESERVED FIELD32(0xffffffff) + +/* + * Macros for converting txpower from EEPROM to mac80211 value + * and from mac80211 value to register value. + * NOTE: Logics in rt2400pci for txpower are reversed + * compared to the other rt2x00 drivers. A higher txpower + * value means that the txpower must be lowered. This is + * important when converting the value coming from the + * mac80211 stack to the rt2400 acceptable value. + */ +#define MIN_TXPOWER 31 +#define MAX_TXPOWER 62 +#define DEFAULT_TXPOWER 39 + +#define __CLAMP_TX(__txpower) \ + clamp_t(char, (__txpower), MIN_TXPOWER, MAX_TXPOWER) + +#define TXPOWER_FROM_DEV(__txpower) \ + ((__CLAMP_TX(__txpower) - MAX_TXPOWER) + MIN_TXPOWER) + +#define TXPOWER_TO_DEV(__txpower) \ + MAX_TXPOWER - (__CLAMP_TX(__txpower) - MIN_TXPOWER) + +#endif /* RT2400PCI_H */ diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c new file mode 100644 index 0000000..52bbcf1 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2500pci.c @@ -0,0 +1,1974 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2500pci + Abstract: rt2500pci device specific routines. + Supported chipsets: RT2560. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "rt2x00.h" +#include "rt2x00pci.h" +#include "rt2500pci.h" + +/* + * Register access. + * All access to the CSR registers will go through the methods + * rt2x00pci_register_read and rt2x00pci_register_write. + * BBP and RF register require indirect register access, + * and use the CSR registers BBPCSR and RFCSR to achieve this. + * These indirect registers work with busy bits, + * and we will try maximal REGISTER_BUSY_COUNT times to access + * the register while taking a REGISTER_BUSY_DELAY us delay + * between each attampt. When the busy bit is still set at that time, + * the access attempt is considered to have failed, + * and we will print an error. + */ +#define WAIT_FOR_BBP(__dev, __reg) \ + rt2x00pci_regbusy_read((__dev), BBPCSR, BBPCSR_BUSY, (__reg)) +#define WAIT_FOR_RF(__dev, __reg) \ + rt2x00pci_regbusy_read((__dev), RFCSR, RFCSR_BUSY, (__reg)) + +static void rt2500pci_bbp_write(struct rt2x00_dev *rt2x00dev, + const unsigned int word, const u8 value) +{ + u32 reg; + + mutex_lock(&rt2x00dev->csr_mutex); + + /* + * Wait until the BBP becomes available, afterwards we + * can safely write the new data into the register. + */ + if (WAIT_FOR_BBP(rt2x00dev, ®)) { + reg = 0; + rt2x00_set_field32(®, BBPCSR_VALUE, value); + rt2x00_set_field32(®, BBPCSR_REGNUM, word); + rt2x00_set_field32(®, BBPCSR_BUSY, 1); + rt2x00_set_field32(®, BBPCSR_WRITE_CONTROL, 1); + + rt2x00pci_register_write(rt2x00dev, BBPCSR, reg); + } + + mutex_unlock(&rt2x00dev->csr_mutex); +} + +static void rt2500pci_bbp_read(struct rt2x00_dev *rt2x00dev, + const unsigned int word, u8 *value) +{ + u32 reg; + + mutex_lock(&rt2x00dev->csr_mutex); + + /* + * Wait until the BBP becomes available, afterwards we + * can safely write the read request into the register. + * After the data has been written, we wait until hardware + * returns the correct value, if at any time the register + * doesn't become available in time, reg will be 0xffffffff + * which means we return 0xff to the caller. + */ + if (WAIT_FOR_BBP(rt2x00dev, ®)) { + reg = 0; + rt2x00_set_field32(®, BBPCSR_REGNUM, word); + rt2x00_set_field32(®, BBPCSR_BUSY, 1); + rt2x00_set_field32(®, BBPCSR_WRITE_CONTROL, 0); + + rt2x00pci_register_write(rt2x00dev, BBPCSR, reg); + + WAIT_FOR_BBP(rt2x00dev, ®); + } + + *value = rt2x00_get_field32(reg, BBPCSR_VALUE); + + mutex_unlock(&rt2x00dev->csr_mutex); +} + +static void rt2500pci_rf_write(struct rt2x00_dev *rt2x00dev, + const unsigned int word, const u32 value) +{ + u32 reg; + + mutex_lock(&rt2x00dev->csr_mutex); + + /* + * Wait until the RF becomes available, afterwards we + * can safely write the new data into the register. + */ + if (WAIT_FOR_RF(rt2x00dev, ®)) { + reg = 0; + rt2x00_set_field32(®, RFCSR_VALUE, value); + rt2x00_set_field32(®, RFCSR_NUMBER_OF_BITS, 20); + rt2x00_set_field32(®, RFCSR_IF_SELECT, 0); + rt2x00_set_field32(®, RFCSR_BUSY, 1); + + rt2x00pci_register_write(rt2x00dev, RFCSR, reg); + rt2x00_rf_write(rt2x00dev, word, value); + } + + mutex_unlock(&rt2x00dev->csr_mutex); +} + +static void rt2500pci_eepromregister_read(struct eeprom_93cx6 *eeprom) +{ + struct rt2x00_dev *rt2x00dev = eeprom->data; + u32 reg; + + rt2x00pci_register_read(rt2x00dev, CSR21, ®); + + eeprom->reg_data_in = !!rt2x00_get_field32(reg, CSR21_EEPROM_DATA_IN); + eeprom->reg_data_out = !!rt2x00_get_field32(reg, CSR21_EEPROM_DATA_OUT); + eeprom->reg_data_clock = + !!rt2x00_get_field32(reg, CSR21_EEPROM_DATA_CLOCK); + eeprom->reg_chip_select = + !!rt2x00_get_field32(reg, CSR21_EEPROM_CHIP_SELECT); +} + +static void rt2500pci_eepromregister_write(struct eeprom_93cx6 *eeprom) +{ + struct rt2x00_dev *rt2x00dev = eeprom->data; + u32 reg = 0; + + rt2x00_set_field32(®, CSR21_EEPROM_DATA_IN, !!eeprom->reg_data_in); + rt2x00_set_field32(®, CSR21_EEPROM_DATA_OUT, !!eeprom->reg_data_out); + rt2x00_set_field32(®, CSR21_EEPROM_DATA_CLOCK, + !!eeprom->reg_data_clock); + rt2x00_set_field32(®, CSR21_EEPROM_CHIP_SELECT, + !!eeprom->reg_chip_select); + + rt2x00pci_register_write(rt2x00dev, CSR21, reg); +} + +#ifdef CONFIG_RT2X00_LIB_DEBUGFS +static const struct rt2x00debug rt2500pci_rt2x00debug = { + .owner = THIS_MODULE, + .csr = { + .read = rt2x00pci_register_read, + .write = rt2x00pci_register_write, + .flags = RT2X00DEBUGFS_OFFSET, + .word_base = CSR_REG_BASE, + .word_size = sizeof(u32), + .word_count = CSR_REG_SIZE / sizeof(u32), + }, + .eeprom = { + .read = rt2x00_eeprom_read, + .write = rt2x00_eeprom_write, + .word_base = EEPROM_BASE, + .word_size = sizeof(u16), + .word_count = EEPROM_SIZE / sizeof(u16), + }, + .bbp = { + .read = rt2500pci_bbp_read, + .write = rt2500pci_bbp_write, + .word_base = BBP_BASE, + .word_size = sizeof(u8), + .word_count = BBP_SIZE / sizeof(u8), + }, + .rf = { + .read = rt2x00_rf_read, + .write = rt2500pci_rf_write, + .word_base = RF_BASE, + .word_size = sizeof(u32), + .word_count = RF_SIZE / sizeof(u32), + }, +}; +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ + +static int rt2500pci_rfkill_poll(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, GPIOCSR, ®); + return rt2x00_get_field32(reg, GPIOCSR_BIT0); +} + +#ifdef CONFIG_RT2X00_LIB_LEDS +static void rt2500pci_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct rt2x00_led *led = + container_of(led_cdev, struct rt2x00_led, led_dev); + unsigned int enabled = brightness != LED_OFF; + u32 reg; + + rt2x00pci_register_read(led->rt2x00dev, LEDCSR, ®); + + if (led->type == LED_TYPE_RADIO || led->type == LED_TYPE_ASSOC) + rt2x00_set_field32(®, LEDCSR_LINK, enabled); + else if (led->type == LED_TYPE_ACTIVITY) + rt2x00_set_field32(®, LEDCSR_ACTIVITY, enabled); + + rt2x00pci_register_write(led->rt2x00dev, LEDCSR, reg); +} + +static int rt2500pci_blink_set(struct led_classdev *led_cdev, + unsigned long *delay_on, + unsigned long *delay_off) +{ + struct rt2x00_led *led = + container_of(led_cdev, struct rt2x00_led, led_dev); + u32 reg; + + rt2x00pci_register_read(led->rt2x00dev, LEDCSR, ®); + rt2x00_set_field32(®, LEDCSR_ON_PERIOD, *delay_on); + rt2x00_set_field32(®, LEDCSR_OFF_PERIOD, *delay_off); + rt2x00pci_register_write(led->rt2x00dev, LEDCSR, reg); + + return 0; +} + +static void rt2500pci_init_led(struct rt2x00_dev *rt2x00dev, + struct rt2x00_led *led, + enum led_type type) +{ + led->rt2x00dev = rt2x00dev; + led->type = type; + led->led_dev.brightness_set = rt2500pci_brightness_set; + led->led_dev.blink_set = rt2500pci_blink_set; + led->flags = LED_INITIALIZED; +} +#endif /* CONFIG_RT2X00_LIB_LEDS */ + +/* + * Configuration handlers. + */ +static void rt2500pci_config_filter(struct rt2x00_dev *rt2x00dev, + const unsigned int filter_flags) +{ + u32 reg; + + /* + * Start configuration steps. + * Note that the version error will always be dropped + * and broadcast frames will always be accepted since + * there is no filter for it at this time. + */ + rt2x00pci_register_read(rt2x00dev, RXCSR0, ®); + rt2x00_set_field32(®, RXCSR0_DROP_CRC, + !(filter_flags & FIF_FCSFAIL)); + rt2x00_set_field32(®, RXCSR0_DROP_PHYSICAL, + !(filter_flags & FIF_PLCPFAIL)); + rt2x00_set_field32(®, RXCSR0_DROP_CONTROL, + !(filter_flags & FIF_CONTROL)); + rt2x00_set_field32(®, RXCSR0_DROP_NOT_TO_ME, + !(filter_flags & FIF_PROMISC_IN_BSS)); + rt2x00_set_field32(®, RXCSR0_DROP_TODS, + !(filter_flags & FIF_PROMISC_IN_BSS) && + !rt2x00dev->intf_ap_count); + rt2x00_set_field32(®, RXCSR0_DROP_VERSION_ERROR, 1); + rt2x00_set_field32(®, RXCSR0_DROP_MCAST, + !(filter_flags & FIF_ALLMULTI)); + rt2x00_set_field32(®, RXCSR0_DROP_BCAST, 0); + rt2x00pci_register_write(rt2x00dev, RXCSR0, reg); +} + +static void rt2500pci_config_intf(struct rt2x00_dev *rt2x00dev, + struct rt2x00_intf *intf, + struct rt2x00intf_conf *conf, + const unsigned int flags) +{ + struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, QID_BEACON); + unsigned int bcn_preload; + u32 reg; + + if (flags & CONFIG_UPDATE_TYPE) { + /* + * Enable beacon config + */ + bcn_preload = PREAMBLE + GET_DURATION(IEEE80211_HEADER, 20); + rt2x00pci_register_read(rt2x00dev, BCNCSR1, ®); + rt2x00_set_field32(®, BCNCSR1_PRELOAD, bcn_preload); + rt2x00_set_field32(®, BCNCSR1_BEACON_CWMIN, queue->cw_min); + rt2x00pci_register_write(rt2x00dev, BCNCSR1, reg); + + /* + * Enable synchronisation. + */ + rt2x00pci_register_read(rt2x00dev, CSR14, ®); + rt2x00_set_field32(®, CSR14_TSF_COUNT, 1); + rt2x00_set_field32(®, CSR14_TSF_SYNC, conf->sync); + rt2x00_set_field32(®, CSR14_TBCN, 1); + rt2x00pci_register_write(rt2x00dev, CSR14, reg); + } + + if (flags & CONFIG_UPDATE_MAC) + rt2x00pci_register_multiwrite(rt2x00dev, CSR3, + conf->mac, sizeof(conf->mac)); + + if (flags & CONFIG_UPDATE_BSSID) + rt2x00pci_register_multiwrite(rt2x00dev, CSR5, + conf->bssid, sizeof(conf->bssid)); +} + +static void rt2500pci_config_erp(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_erp *erp) +{ + int preamble_mask; + u32 reg; + + /* + * When short preamble is enabled, we should set bit 0x08 + */ + preamble_mask = erp->short_preamble << 3; + + rt2x00pci_register_read(rt2x00dev, TXCSR1, ®); + rt2x00_set_field32(®, TXCSR1_ACK_TIMEOUT, 0x162); + rt2x00_set_field32(®, TXCSR1_ACK_CONSUME_TIME, 0xa2); + rt2x00_set_field32(®, TXCSR1_TSF_OFFSET, IEEE80211_HEADER); + rt2x00_set_field32(®, TXCSR1_AUTORESPONDER, 1); + rt2x00pci_register_write(rt2x00dev, TXCSR1, reg); + + rt2x00pci_register_read(rt2x00dev, ARCSR2, ®); + rt2x00_set_field32(®, ARCSR2_SIGNAL, 0x00); + rt2x00_set_field32(®, ARCSR2_SERVICE, 0x04); + rt2x00_set_field32(®, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 10)); + rt2x00pci_register_write(rt2x00dev, ARCSR2, reg); + + rt2x00pci_register_read(rt2x00dev, ARCSR3, ®); + rt2x00_set_field32(®, ARCSR3_SIGNAL, 0x01 | preamble_mask); + rt2x00_set_field32(®, ARCSR3_SERVICE, 0x04); + rt2x00_set_field32(®, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 20)); + rt2x00pci_register_write(rt2x00dev, ARCSR3, reg); + + rt2x00pci_register_read(rt2x00dev, ARCSR4, ®); + rt2x00_set_field32(®, ARCSR4_SIGNAL, 0x02 | preamble_mask); + rt2x00_set_field32(®, ARCSR4_SERVICE, 0x04); + rt2x00_set_field32(®, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 55)); + rt2x00pci_register_write(rt2x00dev, ARCSR4, reg); + + rt2x00pci_register_read(rt2x00dev, ARCSR5, ®); + rt2x00_set_field32(®, ARCSR5_SIGNAL, 0x03 | preamble_mask); + rt2x00_set_field32(®, ARCSR5_SERVICE, 0x84); + rt2x00_set_field32(®, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 110)); + rt2x00pci_register_write(rt2x00dev, ARCSR5, reg); + + rt2x00pci_register_write(rt2x00dev, ARCSR1, erp->basic_rates); + + rt2x00pci_register_read(rt2x00dev, CSR11, ®); + rt2x00_set_field32(®, CSR11_SLOT_TIME, erp->slot_time); + rt2x00pci_register_write(rt2x00dev, CSR11, reg); + + rt2x00pci_register_read(rt2x00dev, CSR12, ®); + rt2x00_set_field32(®, CSR12_BEACON_INTERVAL, erp->beacon_int * 16); + rt2x00_set_field32(®, CSR12_CFP_MAX_DURATION, erp->beacon_int * 16); + rt2x00pci_register_write(rt2x00dev, CSR12, reg); + + rt2x00pci_register_read(rt2x00dev, CSR18, ®); + rt2x00_set_field32(®, CSR18_SIFS, erp->sifs); + rt2x00_set_field32(®, CSR18_PIFS, erp->pifs); + rt2x00pci_register_write(rt2x00dev, CSR18, reg); + + rt2x00pci_register_read(rt2x00dev, CSR19, ®); + rt2x00_set_field32(®, CSR19_DIFS, erp->difs); + rt2x00_set_field32(®, CSR19_EIFS, erp->eifs); + rt2x00pci_register_write(rt2x00dev, CSR19, reg); +} + +static void rt2500pci_config_ant(struct rt2x00_dev *rt2x00dev, + struct antenna_setup *ant) +{ + u32 reg; + u8 r14; + u8 r2; + + /* + * We should never come here because rt2x00lib is supposed + * to catch this and send us the correct antenna explicitely. + */ + BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY || + ant->tx == ANTENNA_SW_DIVERSITY); + + rt2x00pci_register_read(rt2x00dev, BBPCSR1, ®); + rt2500pci_bbp_read(rt2x00dev, 14, &r14); + rt2500pci_bbp_read(rt2x00dev, 2, &r2); + + /* + * Configure the TX antenna. + */ + switch (ant->tx) { + case ANTENNA_A: + rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 0); + rt2x00_set_field32(®, BBPCSR1_CCK, 0); + rt2x00_set_field32(®, BBPCSR1_OFDM, 0); + break; + case ANTENNA_B: + default: + rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 2); + rt2x00_set_field32(®, BBPCSR1_CCK, 2); + rt2x00_set_field32(®, BBPCSR1_OFDM, 2); + break; + } + + /* + * Configure the RX antenna. + */ + switch (ant->rx) { + case ANTENNA_A: + rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 0); + break; + case ANTENNA_B: + default: + rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 2); + break; + } + + /* + * RT2525E and RT5222 need to flip TX I/Q + */ + if (rt2x00_rf(rt2x00dev, RF2525E) || rt2x00_rf(rt2x00dev, RF5222)) { + rt2x00_set_field8(&r2, BBP_R2_TX_IQ_FLIP, 1); + rt2x00_set_field32(®, BBPCSR1_CCK_FLIP, 1); + rt2x00_set_field32(®, BBPCSR1_OFDM_FLIP, 1); + + /* + * RT2525E does not need RX I/Q Flip. + */ + if (rt2x00_rf(rt2x00dev, RF2525E)) + rt2x00_set_field8(&r14, BBP_R14_RX_IQ_FLIP, 0); + } else { + rt2x00_set_field32(®, BBPCSR1_CCK_FLIP, 0); + rt2x00_set_field32(®, BBPCSR1_OFDM_FLIP, 0); + } + + rt2x00pci_register_write(rt2x00dev, BBPCSR1, reg); + rt2500pci_bbp_write(rt2x00dev, 14, r14); + rt2500pci_bbp_write(rt2x00dev, 2, r2); +} + +static void rt2500pci_config_channel(struct rt2x00_dev *rt2x00dev, + struct rf_channel *rf, const int txpower) +{ + u8 r70; + + /* + * Set TXpower. + */ + rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); + + /* + * Switch on tuning bits. + * For RT2523 devices we do not need to update the R1 register. + */ + if (!rt2x00_rf(rt2x00dev, RF2523)) + rt2x00_set_field32(&rf->rf1, RF1_TUNER, 1); + rt2x00_set_field32(&rf->rf3, RF3_TUNER, 1); + + /* + * For RT2525 we should first set the channel to half band higher. + */ + if (rt2x00_rf(rt2x00dev, RF2525)) { + static const u32 vals[] = { + 0x00080cbe, 0x00080d02, 0x00080d06, 0x00080d0a, + 0x00080d0e, 0x00080d12, 0x00080d16, 0x00080d1a, + 0x00080d1e, 0x00080d22, 0x00080d26, 0x00080d2a, + 0x00080d2e, 0x00080d3a + }; + + rt2500pci_rf_write(rt2x00dev, 1, rf->rf1); + rt2500pci_rf_write(rt2x00dev, 2, vals[rf->channel - 1]); + rt2500pci_rf_write(rt2x00dev, 3, rf->rf3); + if (rf->rf4) + rt2500pci_rf_write(rt2x00dev, 4, rf->rf4); + } + + rt2500pci_rf_write(rt2x00dev, 1, rf->rf1); + rt2500pci_rf_write(rt2x00dev, 2, rf->rf2); + rt2500pci_rf_write(rt2x00dev, 3, rf->rf3); + if (rf->rf4) + rt2500pci_rf_write(rt2x00dev, 4, rf->rf4); + + /* + * Channel 14 requires the Japan filter bit to be set. + */ + r70 = 0x46; + rt2x00_set_field8(&r70, BBP_R70_JAPAN_FILTER, rf->channel == 14); + rt2500pci_bbp_write(rt2x00dev, 70, r70); + + msleep(1); + + /* + * Switch off tuning bits. + * For RT2523 devices we do not need to update the R1 register. + */ + if (!rt2x00_rf(rt2x00dev, RF2523)) { + rt2x00_set_field32(&rf->rf1, RF1_TUNER, 0); + rt2500pci_rf_write(rt2x00dev, 1, rf->rf1); + } + + rt2x00_set_field32(&rf->rf3, RF3_TUNER, 0); + rt2500pci_rf_write(rt2x00dev, 3, rf->rf3); + + /* + * Clear false CRC during channel switch. + */ + rt2x00pci_register_read(rt2x00dev, CNT0, &rf->rf1); +} + +static void rt2500pci_config_txpower(struct rt2x00_dev *rt2x00dev, + const int txpower) +{ + u32 rf3; + + rt2x00_rf_read(rt2x00dev, 3, &rf3); + rt2x00_set_field32(&rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); + rt2500pci_rf_write(rt2x00dev, 3, rf3); +} + +static void rt2500pci_config_retry_limit(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_conf *libconf) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, CSR11, ®); + rt2x00_set_field32(®, CSR11_LONG_RETRY, + libconf->conf->long_frame_max_tx_count); + rt2x00_set_field32(®, CSR11_SHORT_RETRY, + libconf->conf->short_frame_max_tx_count); + rt2x00pci_register_write(rt2x00dev, CSR11, reg); +} + +static void rt2500pci_config_ps(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_conf *libconf) +{ + enum dev_state state = + (libconf->conf->flags & IEEE80211_CONF_PS) ? + STATE_SLEEP : STATE_AWAKE; + u32 reg; + + if (state == STATE_SLEEP) { + rt2x00pci_register_read(rt2x00dev, CSR20, ®); + rt2x00_set_field32(®, CSR20_DELAY_AFTER_TBCN, + (rt2x00dev->beacon_int - 20) * 16); + rt2x00_set_field32(®, CSR20_TBCN_BEFORE_WAKEUP, + libconf->conf->listen_interval - 1); + + /* We must first disable autowake before it can be enabled */ + rt2x00_set_field32(®, CSR20_AUTOWAKE, 0); + rt2x00pci_register_write(rt2x00dev, CSR20, reg); + + rt2x00_set_field32(®, CSR20_AUTOWAKE, 1); + rt2x00pci_register_write(rt2x00dev, CSR20, reg); + } + + rt2x00dev->ops->lib->set_device_state(rt2x00dev, state); +} + +static void rt2500pci_config(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_conf *libconf, + const unsigned int flags) +{ + if (flags & IEEE80211_CONF_CHANGE_CHANNEL) + rt2500pci_config_channel(rt2x00dev, &libconf->rf, + libconf->conf->power_level); + if ((flags & IEEE80211_CONF_CHANGE_POWER) && + !(flags & IEEE80211_CONF_CHANGE_CHANNEL)) + rt2500pci_config_txpower(rt2x00dev, + libconf->conf->power_level); + if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS) + rt2500pci_config_retry_limit(rt2x00dev, libconf); + if (flags & IEEE80211_CONF_CHANGE_PS) + rt2500pci_config_ps(rt2x00dev, libconf); +} + +/* + * Link tuning + */ +static void rt2500pci_link_stats(struct rt2x00_dev *rt2x00dev, + struct link_qual *qual) +{ + u32 reg; + + /* + * Update FCS error count from register. + */ + rt2x00pci_register_read(rt2x00dev, CNT0, ®); + qual->rx_failed = rt2x00_get_field32(reg, CNT0_FCS_ERROR); + + /* + * Update False CCA count from register. + */ + rt2x00pci_register_read(rt2x00dev, CNT3, ®); + qual->false_cca = rt2x00_get_field32(reg, CNT3_FALSE_CCA); +} + +static inline void rt2500pci_set_vgc(struct rt2x00_dev *rt2x00dev, + struct link_qual *qual, u8 vgc_level) +{ + if (qual->vgc_level_reg != vgc_level) { + rt2500pci_bbp_write(rt2x00dev, 17, vgc_level); + qual->vgc_level_reg = vgc_level; + } +} + +static void rt2500pci_reset_tuner(struct rt2x00_dev *rt2x00dev, + struct link_qual *qual) +{ + rt2500pci_set_vgc(rt2x00dev, qual, 0x48); +} + +static void rt2500pci_link_tuner(struct rt2x00_dev *rt2x00dev, + struct link_qual *qual, const u32 count) +{ + /* + * To prevent collisions with MAC ASIC on chipsets + * up to version C the link tuning should halt after 20 + * seconds while being associated. + */ + if (rt2x00_rev(rt2x00dev) < RT2560_VERSION_D && + rt2x00dev->intf_associated && count > 20) + return; + + /* + * Chipset versions C and lower should directly continue + * to the dynamic CCA tuning. Chipset version D and higher + * should go straight to dynamic CCA tuning when they + * are not associated. + */ + if (rt2x00_rev(rt2x00dev) < RT2560_VERSION_D || + !rt2x00dev->intf_associated) + goto dynamic_cca_tune; + + /* + * A too low RSSI will cause too much false CCA which will + * then corrupt the R17 tuning. To remidy this the tuning should + * be stopped (While making sure the R17 value will not exceed limits) + */ + if (qual->rssi < -80 && count > 20) { + if (qual->vgc_level_reg >= 0x41) + rt2500pci_set_vgc(rt2x00dev, qual, qual->vgc_level); + return; + } + + /* + * Special big-R17 for short distance + */ + if (qual->rssi >= -58) { + rt2500pci_set_vgc(rt2x00dev, qual, 0x50); + return; + } + + /* + * Special mid-R17 for middle distance + */ + if (qual->rssi >= -74) { + rt2500pci_set_vgc(rt2x00dev, qual, 0x41); + return; + } + + /* + * Leave short or middle distance condition, restore r17 + * to the dynamic tuning range. + */ + if (qual->vgc_level_reg >= 0x41) { + rt2500pci_set_vgc(rt2x00dev, qual, qual->vgc_level); + return; + } + +dynamic_cca_tune: + + /* + * R17 is inside the dynamic tuning range, + * start tuning the link based on the false cca counter. + */ + if (qual->false_cca > 512 && qual->vgc_level_reg < 0x40) { + rt2500pci_set_vgc(rt2x00dev, qual, ++qual->vgc_level_reg); + qual->vgc_level = qual->vgc_level_reg; + } else if (qual->false_cca < 100 && qual->vgc_level_reg > 0x32) { + rt2500pci_set_vgc(rt2x00dev, qual, --qual->vgc_level_reg); + qual->vgc_level = qual->vgc_level_reg; + } +} + +/* + * Initialization functions. + */ +static bool rt2500pci_get_entry_state(struct queue_entry *entry) +{ + struct queue_entry_priv_pci *entry_priv = entry->priv_data; + u32 word; + + if (entry->queue->qid == QID_RX) { + rt2x00_desc_read(entry_priv->desc, 0, &word); + + return rt2x00_get_field32(word, RXD_W0_OWNER_NIC); + } else { + rt2x00_desc_read(entry_priv->desc, 0, &word); + + return (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) || + rt2x00_get_field32(word, TXD_W0_VALID)); + } +} + +static void rt2500pci_clear_entry(struct queue_entry *entry) +{ + struct queue_entry_priv_pci *entry_priv = entry->priv_data; + struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); + u32 word; + + if (entry->queue->qid == QID_RX) { + rt2x00_desc_read(entry_priv->desc, 1, &word); + rt2x00_set_field32(&word, RXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma); + rt2x00_desc_write(entry_priv->desc, 1, word); + + rt2x00_desc_read(entry_priv->desc, 0, &word); + rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1); + rt2x00_desc_write(entry_priv->desc, 0, word); + } else { + rt2x00_desc_read(entry_priv->desc, 0, &word); + rt2x00_set_field32(&word, TXD_W0_VALID, 0); + rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0); + rt2x00_desc_write(entry_priv->desc, 0, word); + } +} + +static int rt2500pci_init_queues(struct rt2x00_dev *rt2x00dev) +{ + struct queue_entry_priv_pci *entry_priv; + u32 reg; + + /* + * Initialize registers. + */ + rt2x00pci_register_read(rt2x00dev, TXCSR2, ®); + rt2x00_set_field32(®, TXCSR2_TXD_SIZE, rt2x00dev->tx[0].desc_size); + rt2x00_set_field32(®, TXCSR2_NUM_TXD, rt2x00dev->tx[1].limit); + rt2x00_set_field32(®, TXCSR2_NUM_ATIM, rt2x00dev->bcn[1].limit); + rt2x00_set_field32(®, TXCSR2_NUM_PRIO, rt2x00dev->tx[0].limit); + rt2x00pci_register_write(rt2x00dev, TXCSR2, reg); + + entry_priv = rt2x00dev->tx[1].entries[0].priv_data; + rt2x00pci_register_read(rt2x00dev, TXCSR3, ®); + rt2x00_set_field32(®, TXCSR3_TX_RING_REGISTER, + entry_priv->desc_dma); + rt2x00pci_register_write(rt2x00dev, TXCSR3, reg); + + entry_priv = rt2x00dev->tx[0].entries[0].priv_data; + rt2x00pci_register_read(rt2x00dev, TXCSR5, ®); + rt2x00_set_field32(®, TXCSR5_PRIO_RING_REGISTER, + entry_priv->desc_dma); + rt2x00pci_register_write(rt2x00dev, TXCSR5, reg); + + entry_priv = rt2x00dev->bcn[1].entries[0].priv_data; + rt2x00pci_register_read(rt2x00dev, TXCSR4, ®); + rt2x00_set_field32(®, TXCSR4_ATIM_RING_REGISTER, + entry_priv->desc_dma); + rt2x00pci_register_write(rt2x00dev, TXCSR4, reg); + + entry_priv = rt2x00dev->bcn[0].entries[0].priv_data; + rt2x00pci_register_read(rt2x00dev, TXCSR6, ®); + rt2x00_set_field32(®, TXCSR6_BEACON_RING_REGISTER, + entry_priv->desc_dma); + rt2x00pci_register_write(rt2x00dev, TXCSR6, reg); + + rt2x00pci_register_read(rt2x00dev, RXCSR1, ®); + rt2x00_set_field32(®, RXCSR1_RXD_SIZE, rt2x00dev->rx->desc_size); + rt2x00_set_field32(®, RXCSR1_NUM_RXD, rt2x00dev->rx->limit); + rt2x00pci_register_write(rt2x00dev, RXCSR1, reg); + + entry_priv = rt2x00dev->rx->entries[0].priv_data; + rt2x00pci_register_read(rt2x00dev, RXCSR2, ®); + rt2x00_set_field32(®, RXCSR2_RX_RING_REGISTER, + entry_priv->desc_dma); + rt2x00pci_register_write(rt2x00dev, RXCSR2, reg); + + return 0; +} + +static int rt2500pci_init_registers(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + rt2x00pci_register_write(rt2x00dev, PSCSR0, 0x00020002); + rt2x00pci_register_write(rt2x00dev, PSCSR1, 0x00000002); + rt2x00pci_register_write(rt2x00dev, PSCSR2, 0x00020002); + rt2x00pci_register_write(rt2x00dev, PSCSR3, 0x00000002); + + rt2x00pci_register_read(rt2x00dev, TIMECSR, ®); + rt2x00_set_field32(®, TIMECSR_US_COUNT, 33); + rt2x00_set_field32(®, TIMECSR_US_64_COUNT, 63); + rt2x00_set_field32(®, TIMECSR_BEACON_EXPECT, 0); + rt2x00pci_register_write(rt2x00dev, TIMECSR, reg); + + rt2x00pci_register_read(rt2x00dev, CSR9, ®); + rt2x00_set_field32(®, CSR9_MAX_FRAME_UNIT, + rt2x00dev->rx->data_size / 128); + rt2x00pci_register_write(rt2x00dev, CSR9, reg); + + /* + * Always use CWmin and CWmax set in descriptor. + */ + rt2x00pci_register_read(rt2x00dev, CSR11, ®); + rt2x00_set_field32(®, CSR11_CW_SELECT, 0); + rt2x00pci_register_write(rt2x00dev, CSR11, reg); + + rt2x00pci_register_read(rt2x00dev, CSR14, ®); + rt2x00_set_field32(®, CSR14_TSF_COUNT, 0); + rt2x00_set_field32(®, CSR14_TSF_SYNC, 0); + rt2x00_set_field32(®, CSR14_TBCN, 0); + rt2x00_set_field32(®, CSR14_TCFP, 0); + rt2x00_set_field32(®, CSR14_TATIMW, 0); + rt2x00_set_field32(®, CSR14_BEACON_GEN, 0); + rt2x00_set_field32(®, CSR14_CFP_COUNT_PRELOAD, 0); + rt2x00_set_field32(®, CSR14_TBCM_PRELOAD, 0); + rt2x00pci_register_write(rt2x00dev, CSR14, reg); + + rt2x00pci_register_write(rt2x00dev, CNT3, 0); + + rt2x00pci_register_read(rt2x00dev, TXCSR8, ®); + rt2x00_set_field32(®, TXCSR8_BBP_ID0, 10); + rt2x00_set_field32(®, TXCSR8_BBP_ID0_VALID, 1); + rt2x00_set_field32(®, TXCSR8_BBP_ID1, 11); + rt2x00_set_field32(®, TXCSR8_BBP_ID1_VALID, 1); + rt2x00_set_field32(®, TXCSR8_BBP_ID2, 13); + rt2x00_set_field32(®, TXCSR8_BBP_ID2_VALID, 1); + rt2x00_set_field32(®, TXCSR8_BBP_ID3, 12); + rt2x00_set_field32(®, TXCSR8_BBP_ID3_VALID, 1); + rt2x00pci_register_write(rt2x00dev, TXCSR8, reg); + + rt2x00pci_register_read(rt2x00dev, ARTCSR0, ®); + rt2x00_set_field32(®, ARTCSR0_ACK_CTS_1MBS, 112); + rt2x00_set_field32(®, ARTCSR0_ACK_CTS_2MBS, 56); + rt2x00_set_field32(®, ARTCSR0_ACK_CTS_5_5MBS, 20); + rt2x00_set_field32(®, ARTCSR0_ACK_CTS_11MBS, 10); + rt2x00pci_register_write(rt2x00dev, ARTCSR0, reg); + + rt2x00pci_register_read(rt2x00dev, ARTCSR1, ®); + rt2x00_set_field32(®, ARTCSR1_ACK_CTS_6MBS, 45); + rt2x00_set_field32(®, ARTCSR1_ACK_CTS_9MBS, 37); + rt2x00_set_field32(®, ARTCSR1_ACK_CTS_12MBS, 33); + rt2x00_set_field32(®, ARTCSR1_ACK_CTS_18MBS, 29); + rt2x00pci_register_write(rt2x00dev, ARTCSR1, reg); + + rt2x00pci_register_read(rt2x00dev, ARTCSR2, ®); + rt2x00_set_field32(®, ARTCSR2_ACK_CTS_24MBS, 29); + rt2x00_set_field32(®, ARTCSR2_ACK_CTS_36MBS, 25); + rt2x00_set_field32(®, ARTCSR2_ACK_CTS_48MBS, 25); + rt2x00_set_field32(®, ARTCSR2_ACK_CTS_54MBS, 25); + rt2x00pci_register_write(rt2x00dev, ARTCSR2, reg); + + rt2x00pci_register_read(rt2x00dev, RXCSR3, ®); + rt2x00_set_field32(®, RXCSR3_BBP_ID0, 47); /* CCK Signal */ + rt2x00_set_field32(®, RXCSR3_BBP_ID0_VALID, 1); + rt2x00_set_field32(®, RXCSR3_BBP_ID1, 51); /* Rssi */ + rt2x00_set_field32(®, RXCSR3_BBP_ID1_VALID, 1); + rt2x00_set_field32(®, RXCSR3_BBP_ID2, 42); /* OFDM Rate */ + rt2x00_set_field32(®, RXCSR3_BBP_ID2_VALID, 1); + rt2x00_set_field32(®, RXCSR3_BBP_ID3, 51); /* RSSI */ + rt2x00_set_field32(®, RXCSR3_BBP_ID3_VALID, 1); + rt2x00pci_register_write(rt2x00dev, RXCSR3, reg); + + rt2x00pci_register_read(rt2x00dev, PCICSR, ®); + rt2x00_set_field32(®, PCICSR_BIG_ENDIAN, 0); + rt2x00_set_field32(®, PCICSR_RX_TRESHOLD, 0); + rt2x00_set_field32(®, PCICSR_TX_TRESHOLD, 3); + rt2x00_set_field32(®, PCICSR_BURST_LENTH, 1); + rt2x00_set_field32(®, PCICSR_ENABLE_CLK, 1); + rt2x00_set_field32(®, PCICSR_READ_MULTIPLE, 1); + rt2x00_set_field32(®, PCICSR_WRITE_INVALID, 1); + rt2x00pci_register_write(rt2x00dev, PCICSR, reg); + + rt2x00pci_register_write(rt2x00dev, PWRCSR0, 0x3f3b3100); + + rt2x00pci_register_write(rt2x00dev, GPIOCSR, 0x0000ff00); + rt2x00pci_register_write(rt2x00dev, TESTCSR, 0x000000f0); + + if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE)) + return -EBUSY; + + rt2x00pci_register_write(rt2x00dev, MACCSR0, 0x00213223); + rt2x00pci_register_write(rt2x00dev, MACCSR1, 0x00235518); + + rt2x00pci_register_read(rt2x00dev, MACCSR2, ®); + rt2x00_set_field32(®, MACCSR2_DELAY, 64); + rt2x00pci_register_write(rt2x00dev, MACCSR2, reg); + + rt2x00pci_register_read(rt2x00dev, RALINKCSR, ®); + rt2x00_set_field32(®, RALINKCSR_AR_BBP_DATA0, 17); + rt2x00_set_field32(®, RALINKCSR_AR_BBP_ID0, 26); + rt2x00_set_field32(®, RALINKCSR_AR_BBP_VALID0, 1); + rt2x00_set_field32(®, RALINKCSR_AR_BBP_DATA1, 0); + rt2x00_set_field32(®, RALINKCSR_AR_BBP_ID1, 26); + rt2x00_set_field32(®, RALINKCSR_AR_BBP_VALID1, 1); + rt2x00pci_register_write(rt2x00dev, RALINKCSR, reg); + + rt2x00pci_register_write(rt2x00dev, BBPCSR1, 0x82188200); + + rt2x00pci_register_write(rt2x00dev, TXACKCSR0, 0x00000020); + + rt2x00pci_register_read(rt2x00dev, CSR1, ®); + rt2x00_set_field32(®, CSR1_SOFT_RESET, 1); + rt2x00_set_field32(®, CSR1_BBP_RESET, 0); + rt2x00_set_field32(®, CSR1_HOST_READY, 0); + rt2x00pci_register_write(rt2x00dev, CSR1, reg); + + rt2x00pci_register_read(rt2x00dev, CSR1, ®); + rt2x00_set_field32(®, CSR1_SOFT_RESET, 0); + rt2x00_set_field32(®, CSR1_HOST_READY, 1); + rt2x00pci_register_write(rt2x00dev, CSR1, reg); + + /* + * We must clear the FCS and FIFO error count. + * These registers are cleared on read, + * so we may pass a useless variable to store the value. + */ + rt2x00pci_register_read(rt2x00dev, CNT0, ®); + rt2x00pci_register_read(rt2x00dev, CNT4, ®); + + return 0; +} + +static int rt2500pci_wait_bbp_ready(struct rt2x00_dev *rt2x00dev) +{ + unsigned int i; + u8 value; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2500pci_bbp_read(rt2x00dev, 0, &value); + if ((value != 0xff) && (value != 0x00)) + return 0; + udelay(REGISTER_BUSY_DELAY); + } + + ERROR(rt2x00dev, "BBP register access failed, aborting.\n"); + return -EACCES; +} + +static int rt2500pci_init_bbp(struct rt2x00_dev *rt2x00dev) +{ + unsigned int i; + u16 eeprom; + u8 reg_id; + u8 value; + + if (unlikely(rt2500pci_wait_bbp_ready(rt2x00dev))) + return -EACCES; + + rt2500pci_bbp_write(rt2x00dev, 3, 0x02); + rt2500pci_bbp_write(rt2x00dev, 4, 0x19); + rt2500pci_bbp_write(rt2x00dev, 14, 0x1c); + rt2500pci_bbp_write(rt2x00dev, 15, 0x30); + rt2500pci_bbp_write(rt2x00dev, 16, 0xac); + rt2500pci_bbp_write(rt2x00dev, 18, 0x18); + rt2500pci_bbp_write(rt2x00dev, 19, 0xff); + rt2500pci_bbp_write(rt2x00dev, 20, 0x1e); + rt2500pci_bbp_write(rt2x00dev, 21, 0x08); + rt2500pci_bbp_write(rt2x00dev, 22, 0x08); + rt2500pci_bbp_write(rt2x00dev, 23, 0x08); + rt2500pci_bbp_write(rt2x00dev, 24, 0x70); + rt2500pci_bbp_write(rt2x00dev, 25, 0x40); + rt2500pci_bbp_write(rt2x00dev, 26, 0x08); + rt2500pci_bbp_write(rt2x00dev, 27, 0x23); + rt2500pci_bbp_write(rt2x00dev, 30, 0x10); + rt2500pci_bbp_write(rt2x00dev, 31, 0x2b); + rt2500pci_bbp_write(rt2x00dev, 32, 0xb9); + rt2500pci_bbp_write(rt2x00dev, 34, 0x12); + rt2500pci_bbp_write(rt2x00dev, 35, 0x50); + rt2500pci_bbp_write(rt2x00dev, 39, 0xc4); + rt2500pci_bbp_write(rt2x00dev, 40, 0x02); + rt2500pci_bbp_write(rt2x00dev, 41, 0x60); + rt2500pci_bbp_write(rt2x00dev, 53, 0x10); + rt2500pci_bbp_write(rt2x00dev, 54, 0x18); + rt2500pci_bbp_write(rt2x00dev, 56, 0x08); + rt2500pci_bbp_write(rt2x00dev, 57, 0x10); + rt2500pci_bbp_write(rt2x00dev, 58, 0x08); + rt2500pci_bbp_write(rt2x00dev, 61, 0x6d); + rt2500pci_bbp_write(rt2x00dev, 62, 0x10); + + for (i = 0; i < EEPROM_BBP_SIZE; i++) { + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom); + + if (eeprom != 0xffff && eeprom != 0x0000) { + reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID); + value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE); + rt2500pci_bbp_write(rt2x00dev, reg_id, value); + } + } + + return 0; +} + +/* + * Device state switch handlers. + */ +static void rt2500pci_toggle_rx(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, RXCSR0, ®); + rt2x00_set_field32(®, RXCSR0_DISABLE_RX, + (state == STATE_RADIO_RX_OFF) || + (state == STATE_RADIO_RX_OFF_LINK)); + rt2x00pci_register_write(rt2x00dev, RXCSR0, reg); +} + +static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + int mask = (state == STATE_RADIO_IRQ_OFF); + u32 reg; + + /* + * When interrupts are being enabled, the interrupt registers + * should clear the register to assure a clean state. + */ + if (state == STATE_RADIO_IRQ_ON) { + rt2x00pci_register_read(rt2x00dev, CSR7, ®); + rt2x00pci_register_write(rt2x00dev, CSR7, reg); + } + + /* + * Only toggle the interrupts bits we are going to use. + * Non-checked interrupt bits are disabled by default. + */ + rt2x00pci_register_read(rt2x00dev, CSR8, ®); + rt2x00_set_field32(®, CSR8_TBCN_EXPIRE, mask); + rt2x00_set_field32(®, CSR8_TXDONE_TXRING, mask); + rt2x00_set_field32(®, CSR8_TXDONE_ATIMRING, mask); + rt2x00_set_field32(®, CSR8_TXDONE_PRIORING, mask); + rt2x00_set_field32(®, CSR8_RXDONE, mask); + rt2x00pci_register_write(rt2x00dev, CSR8, reg); +} + +static int rt2500pci_enable_radio(struct rt2x00_dev *rt2x00dev) +{ + /* + * Initialize all registers. + */ + if (unlikely(rt2500pci_init_queues(rt2x00dev) || + rt2500pci_init_registers(rt2x00dev) || + rt2500pci_init_bbp(rt2x00dev))) + return -EIO; + + return 0; +} + +static void rt2500pci_disable_radio(struct rt2x00_dev *rt2x00dev) +{ + /* + * Disable power + */ + rt2x00pci_register_write(rt2x00dev, PWRCSR0, 0); +} + +static int rt2500pci_set_state(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + u32 reg; + unsigned int i; + char put_to_sleep; + char bbp_state; + char rf_state; + + put_to_sleep = (state != STATE_AWAKE); + + rt2x00pci_register_read(rt2x00dev, PWRCSR1, ®); + rt2x00_set_field32(®, PWRCSR1_SET_STATE, 1); + rt2x00_set_field32(®, PWRCSR1_BBP_DESIRE_STATE, state); + rt2x00_set_field32(®, PWRCSR1_RF_DESIRE_STATE, state); + rt2x00_set_field32(®, PWRCSR1_PUT_TO_SLEEP, put_to_sleep); + rt2x00pci_register_write(rt2x00dev, PWRCSR1, reg); + + /* + * Device is not guaranteed to be in the requested state yet. + * We must wait until the register indicates that the + * device has entered the correct state. + */ + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2x00pci_register_read(rt2x00dev, PWRCSR1, ®); + bbp_state = rt2x00_get_field32(reg, PWRCSR1_BBP_CURR_STATE); + rf_state = rt2x00_get_field32(reg, PWRCSR1_RF_CURR_STATE); + if (bbp_state == state && rf_state == state) + return 0; + msleep(10); + } + + return -EBUSY; +} + +static int rt2500pci_set_device_state(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + int retval = 0; + + switch (state) { + case STATE_RADIO_ON: + retval = rt2500pci_enable_radio(rt2x00dev); + break; + case STATE_RADIO_OFF: + rt2500pci_disable_radio(rt2x00dev); + break; + case STATE_RADIO_RX_ON: + case STATE_RADIO_RX_ON_LINK: + case STATE_RADIO_RX_OFF: + case STATE_RADIO_RX_OFF_LINK: + rt2500pci_toggle_rx(rt2x00dev, state); + break; + case STATE_RADIO_IRQ_ON: + case STATE_RADIO_IRQ_OFF: + rt2500pci_toggle_irq(rt2x00dev, state); + break; + case STATE_DEEP_SLEEP: + case STATE_SLEEP: + case STATE_STANDBY: + case STATE_AWAKE: + retval = rt2500pci_set_state(rt2x00dev, state); + break; + default: + retval = -ENOTSUPP; + break; + } + + if (unlikely(retval)) + ERROR(rt2x00dev, "Device failed to enter state %d (%d).\n", + state, retval); + + return retval; +} + +/* + * TX descriptor initialization + */ +static void rt2500pci_write_tx_desc(struct rt2x00_dev *rt2x00dev, + struct sk_buff *skb, + struct txentry_desc *txdesc) +{ + struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); + struct queue_entry_priv_pci *entry_priv = skbdesc->entry->priv_data; + __le32 *txd = skbdesc->desc; + u32 word; + + /* + * Start writing the descriptor words. + */ + rt2x00_desc_read(entry_priv->desc, 1, &word); + rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma); + rt2x00_desc_write(entry_priv->desc, 1, word); + + rt2x00_desc_read(txd, 2, &word); + rt2x00_set_field32(&word, TXD_W2_IV_OFFSET, IEEE80211_HEADER); + rt2x00_set_field32(&word, TXD_W2_AIFS, txdesc->aifs); + rt2x00_set_field32(&word, TXD_W2_CWMIN, txdesc->cw_min); + rt2x00_set_field32(&word, TXD_W2_CWMAX, txdesc->cw_max); + rt2x00_desc_write(txd, 2, word); + + rt2x00_desc_read(txd, 3, &word); + rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->signal); + rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, txdesc->service); + rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW, txdesc->length_low); + rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH, txdesc->length_high); + rt2x00_desc_write(txd, 3, word); + + rt2x00_desc_read(txd, 10, &word); + rt2x00_set_field32(&word, TXD_W10_RTS, + test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)); + rt2x00_desc_write(txd, 10, word); + + rt2x00_desc_read(txd, 0, &word); + rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 1); + rt2x00_set_field32(&word, TXD_W0_VALID, 1); + rt2x00_set_field32(&word, TXD_W0_MORE_FRAG, + test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags)); + rt2x00_set_field32(&word, TXD_W0_ACK, + test_bit(ENTRY_TXD_ACK, &txdesc->flags)); + rt2x00_set_field32(&word, TXD_W0_TIMESTAMP, + test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags)); + rt2x00_set_field32(&word, TXD_W0_OFDM, + (txdesc->rate_mode == RATE_MODE_OFDM)); + rt2x00_set_field32(&word, TXD_W0_CIPHER_OWNER, 1); + rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); + rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, + test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags)); + rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len); + rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, CIPHER_NONE); + rt2x00_desc_write(txd, 0, word); +} + +/* + * TX data initialization + */ +static void rt2500pci_write_beacon(struct queue_entry *entry) +{ + struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; + struct queue_entry_priv_pci *entry_priv = entry->priv_data; + struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); + u32 word; + u32 reg; + + /* + * Disable beaconing while we are reloading the beacon data, + * otherwise we might be sending out invalid data. + */ + rt2x00pci_register_read(rt2x00dev, CSR14, ®); + rt2x00_set_field32(®, CSR14_BEACON_GEN, 0); + rt2x00pci_register_write(rt2x00dev, CSR14, reg); + + /* + * Replace rt2x00lib allocated descriptor with the + * pointer to the _real_ hardware descriptor. + * After that, map the beacon to DMA and update the + * descriptor. + */ + memcpy(entry_priv->desc, skbdesc->desc, skbdesc->desc_len); + skbdesc->desc = entry_priv->desc; + + rt2x00queue_map_txskb(rt2x00dev, entry->skb); + + rt2x00_desc_read(entry_priv->desc, 1, &word); + rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma); + rt2x00_desc_write(entry_priv->desc, 1, word); +} + +static void rt2500pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev, + const enum data_queue_qid queue) +{ + u32 reg; + + if (queue == QID_BEACON) { + rt2x00pci_register_read(rt2x00dev, CSR14, ®); + if (!rt2x00_get_field32(reg, CSR14_BEACON_GEN)) { + rt2x00_set_field32(®, CSR14_TSF_COUNT, 1); + rt2x00_set_field32(®, CSR14_TBCN, 1); + rt2x00_set_field32(®, CSR14_BEACON_GEN, 1); + rt2x00pci_register_write(rt2x00dev, CSR14, reg); + } + return; + } + + rt2x00pci_register_read(rt2x00dev, TXCSR0, ®); + rt2x00_set_field32(®, TXCSR0_KICK_PRIO, (queue == QID_AC_BE)); + rt2x00_set_field32(®, TXCSR0_KICK_TX, (queue == QID_AC_BK)); + rt2x00_set_field32(®, TXCSR0_KICK_ATIM, (queue == QID_ATIM)); + rt2x00pci_register_write(rt2x00dev, TXCSR0, reg); +} + +static void rt2500pci_kill_tx_queue(struct rt2x00_dev *rt2x00dev, + const enum data_queue_qid qid) +{ + u32 reg; + + if (qid == QID_BEACON) { + rt2x00pci_register_write(rt2x00dev, CSR14, 0); + } else { + rt2x00pci_register_read(rt2x00dev, TXCSR0, ®); + rt2x00_set_field32(®, TXCSR0_ABORT, 1); + rt2x00pci_register_write(rt2x00dev, TXCSR0, reg); + } +} + +/* + * RX control handlers + */ +static void rt2500pci_fill_rxdone(struct queue_entry *entry, + struct rxdone_entry_desc *rxdesc) +{ + struct queue_entry_priv_pci *entry_priv = entry->priv_data; + u32 word0; + u32 word2; + + rt2x00_desc_read(entry_priv->desc, 0, &word0); + rt2x00_desc_read(entry_priv->desc, 2, &word2); + + if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) + rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; + if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR)) + rxdesc->flags |= RX_FLAG_FAILED_PLCP_CRC; + + /* + * Obtain the status about this packet. + * When frame was received with an OFDM bitrate, + * the signal is the PLCP value. If it was received with + * a CCK bitrate the signal is the rate in 100kbit/s. + */ + rxdesc->signal = rt2x00_get_field32(word2, RXD_W2_SIGNAL); + rxdesc->rssi = rt2x00_get_field32(word2, RXD_W2_RSSI) - + entry->queue->rt2x00dev->rssi_offset; + rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); + + if (rt2x00_get_field32(word0, RXD_W0_OFDM)) + rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP; + else + rxdesc->dev_flags |= RXDONE_SIGNAL_BITRATE; + if (rt2x00_get_field32(word0, RXD_W0_MY_BSS)) + rxdesc->dev_flags |= RXDONE_MY_BSS; +} + +/* + * Interrupt functions. + */ +static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev, + const enum data_queue_qid queue_idx) +{ + struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, queue_idx); + struct queue_entry_priv_pci *entry_priv; + struct queue_entry *entry; + struct txdone_entry_desc txdesc; + u32 word; + + while (!rt2x00queue_empty(queue)) { + entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); + entry_priv = entry->priv_data; + rt2x00_desc_read(entry_priv->desc, 0, &word); + + if (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) || + !rt2x00_get_field32(word, TXD_W0_VALID)) + break; + + /* + * Obtain the status about this packet. + */ + txdesc.flags = 0; + switch (rt2x00_get_field32(word, TXD_W0_RESULT)) { + case 0: /* Success */ + case 1: /* Success with retry */ + __set_bit(TXDONE_SUCCESS, &txdesc.flags); + break; + case 2: /* Failure, excessive retries */ + __set_bit(TXDONE_EXCESSIVE_RETRY, &txdesc.flags); + /* Don't break, this is a failed frame! */ + default: /* Failure */ + __set_bit(TXDONE_FAILURE, &txdesc.flags); + } + txdesc.retry = rt2x00_get_field32(word, TXD_W0_RETRY_COUNT); + + rt2x00lib_txdone(entry, &txdesc); + } +} + +static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance) +{ + struct rt2x00_dev *rt2x00dev = dev_instance; + u32 reg; + + /* + * Get the interrupt sources & saved to local variable. + * Write register value back to clear pending interrupts. + */ + rt2x00pci_register_read(rt2x00dev, CSR7, ®); + rt2x00pci_register_write(rt2x00dev, CSR7, reg); + + if (!reg) + return IRQ_NONE; + + if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) + return IRQ_HANDLED; + + /* + * Handle interrupts, walk through all bits + * and run the tasks, the bits are checked in order of + * priority. + */ + + /* + * 1 - Beacon timer expired interrupt. + */ + if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE)) + rt2x00lib_beacondone(rt2x00dev); + + /* + * 2 - Rx ring done interrupt. + */ + if (rt2x00_get_field32(reg, CSR7_RXDONE)) + rt2x00pci_rxdone(rt2x00dev); + + /* + * 3 - Atim ring transmit done interrupt. + */ + if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING)) + rt2500pci_txdone(rt2x00dev, QID_ATIM); + + /* + * 4 - Priority ring transmit done interrupt. + */ + if (rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING)) + rt2500pci_txdone(rt2x00dev, QID_AC_BE); + + /* + * 5 - Tx ring transmit done interrupt. + */ + if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) + rt2500pci_txdone(rt2x00dev, QID_AC_BK); + + return IRQ_HANDLED; +} + +/* + * Device probe functions. + */ +static int rt2500pci_validate_eeprom(struct rt2x00_dev *rt2x00dev) +{ + struct eeprom_93cx6 eeprom; + u32 reg; + u16 word; + u8 *mac; + + rt2x00pci_register_read(rt2x00dev, CSR21, ®); + + eeprom.data = rt2x00dev; + eeprom.register_read = rt2500pci_eepromregister_read; + eeprom.register_write = rt2500pci_eepromregister_write; + eeprom.width = rt2x00_get_field32(reg, CSR21_TYPE_93C46) ? + PCI_EEPROM_WIDTH_93C46 : PCI_EEPROM_WIDTH_93C66; + eeprom.reg_data_in = 0; + eeprom.reg_data_out = 0; + eeprom.reg_data_clock = 0; + eeprom.reg_chip_select = 0; + + eeprom_93cx6_multiread(&eeprom, EEPROM_BASE, rt2x00dev->eeprom, + EEPROM_SIZE / sizeof(u16)); + + /* + * Start validation of the data that has been read. + */ + mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); + if (!is_valid_ether_addr(mac)) { + random_ether_addr(mac); + EEPROM(rt2x00dev, "MAC: %pM\n", mac); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_ANTENNA_NUM, 2); + rt2x00_set_field16(&word, EEPROM_ANTENNA_TX_DEFAULT, + ANTENNA_SW_DIVERSITY); + rt2x00_set_field16(&word, EEPROM_ANTENNA_RX_DEFAULT, + ANTENNA_SW_DIVERSITY); + rt2x00_set_field16(&word, EEPROM_ANTENNA_LED_MODE, + LED_MODE_DEFAULT); + rt2x00_set_field16(&word, EEPROM_ANTENNA_DYN_TXAGC, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_HARDWARE_RADIO, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF2522); + rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word); + EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_NIC_CARDBUS_ACCEL, 0); + rt2x00_set_field16(&word, EEPROM_NIC_DYN_BBP_TUNE, 0); + rt2x00_set_field16(&word, EEPROM_NIC_CCK_TX_POWER, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word); + EEPROM(rt2x00dev, "NIC: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_CALIBRATE_OFFSET_RSSI, + DEFAULT_RSSI_OFFSET); + rt2x00_eeprom_write(rt2x00dev, EEPROM_CALIBRATE_OFFSET, word); + EEPROM(rt2x00dev, "Calibrate offset: 0x%04x\n", word); + } + + return 0; +} + +static int rt2500pci_init_eeprom(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + u16 value; + u16 eeprom; + + /* + * Read EEPROM word for configuration. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom); + + /* + * Identify RF chipset. + */ + value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); + rt2x00pci_register_read(rt2x00dev, CSR0, ®); + rt2x00_set_chip(rt2x00dev, RT2560, value, + rt2x00_get_field32(reg, CSR0_REVISION)); + + if (!rt2x00_rf(rt2x00dev, RF2522) && + !rt2x00_rf(rt2x00dev, RF2523) && + !rt2x00_rf(rt2x00dev, RF2524) && + !rt2x00_rf(rt2x00dev, RF2525) && + !rt2x00_rf(rt2x00dev, RF2525E) && + !rt2x00_rf(rt2x00dev, RF5222)) { + ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); + return -ENODEV; + } + + /* + * Identify default antenna configuration. + */ + rt2x00dev->default_ant.tx = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TX_DEFAULT); + rt2x00dev->default_ant.rx = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_DEFAULT); + + /* + * Store led mode, for correct led behaviour. + */ +#ifdef CONFIG_RT2X00_LIB_LEDS + value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE); + + rt2500pci_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO); + if (value == LED_MODE_TXRX_ACTIVITY || + value == LED_MODE_DEFAULT || + value == LED_MODE_ASUS) + rt2500pci_init_led(rt2x00dev, &rt2x00dev->led_qual, + LED_TYPE_ACTIVITY); +#endif /* CONFIG_RT2X00_LIB_LEDS */ + + /* + * Detect if this device has an hardware controlled radio. + */ + if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) + __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags); + + /* + * Check if the BBP tuning should be enabled. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom); + + if (rt2x00_get_field16(eeprom, EEPROM_NIC_DYN_BBP_TUNE)) + __set_bit(CONFIG_DISABLE_LINK_TUNING, &rt2x00dev->flags); + + /* + * Read the RSSI <-> dBm offset information. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET, &eeprom); + rt2x00dev->rssi_offset = + rt2x00_get_field16(eeprom, EEPROM_CALIBRATE_OFFSET_RSSI); + + return 0; +} + +/* + * RF value list for RF2522 + * Supports: 2.4 GHz + */ +static const struct rf_channel rf_vals_bg_2522[] = { + { 1, 0x00002050, 0x000c1fda, 0x00000101, 0 }, + { 2, 0x00002050, 0x000c1fee, 0x00000101, 0 }, + { 3, 0x00002050, 0x000c2002, 0x00000101, 0 }, + { 4, 0x00002050, 0x000c2016, 0x00000101, 0 }, + { 5, 0x00002050, 0x000c202a, 0x00000101, 0 }, + { 6, 0x00002050, 0x000c203e, 0x00000101, 0 }, + { 7, 0x00002050, 0x000c2052, 0x00000101, 0 }, + { 8, 0x00002050, 0x000c2066, 0x00000101, 0 }, + { 9, 0x00002050, 0x000c207a, 0x00000101, 0 }, + { 10, 0x00002050, 0x000c208e, 0x00000101, 0 }, + { 11, 0x00002050, 0x000c20a2, 0x00000101, 0 }, + { 12, 0x00002050, 0x000c20b6, 0x00000101, 0 }, + { 13, 0x00002050, 0x000c20ca, 0x00000101, 0 }, + { 14, 0x00002050, 0x000c20fa, 0x00000101, 0 }, +}; + +/* + * RF value list for RF2523 + * Supports: 2.4 GHz + */ +static const struct rf_channel rf_vals_bg_2523[] = { + { 1, 0x00022010, 0x00000c9e, 0x000e0111, 0x00000a1b }, + { 2, 0x00022010, 0x00000ca2, 0x000e0111, 0x00000a1b }, + { 3, 0x00022010, 0x00000ca6, 0x000e0111, 0x00000a1b }, + { 4, 0x00022010, 0x00000caa, 0x000e0111, 0x00000a1b }, + { 5, 0x00022010, 0x00000cae, 0x000e0111, 0x00000a1b }, + { 6, 0x00022010, 0x00000cb2, 0x000e0111, 0x00000a1b }, + { 7, 0x00022010, 0x00000cb6, 0x000e0111, 0x00000a1b }, + { 8, 0x00022010, 0x00000cba, 0x000e0111, 0x00000a1b }, + { 9, 0x00022010, 0x00000cbe, 0x000e0111, 0x00000a1b }, + { 10, 0x00022010, 0x00000d02, 0x000e0111, 0x00000a1b }, + { 11, 0x00022010, 0x00000d06, 0x000e0111, 0x00000a1b }, + { 12, 0x00022010, 0x00000d0a, 0x000e0111, 0x00000a1b }, + { 13, 0x00022010, 0x00000d0e, 0x000e0111, 0x00000a1b }, + { 14, 0x00022010, 0x00000d1a, 0x000e0111, 0x00000a03 }, +}; + +/* + * RF value list for RF2524 + * Supports: 2.4 GHz + */ +static const struct rf_channel rf_vals_bg_2524[] = { + { 1, 0x00032020, 0x00000c9e, 0x00000101, 0x00000a1b }, + { 2, 0x00032020, 0x00000ca2, 0x00000101, 0x00000a1b }, + { 3, 0x00032020, 0x00000ca6, 0x00000101, 0x00000a1b }, + { 4, 0x00032020, 0x00000caa, 0x00000101, 0x00000a1b }, + { 5, 0x00032020, 0x00000cae, 0x00000101, 0x00000a1b }, + { 6, 0x00032020, 0x00000cb2, 0x00000101, 0x00000a1b }, + { 7, 0x00032020, 0x00000cb6, 0x00000101, 0x00000a1b }, + { 8, 0x00032020, 0x00000cba, 0x00000101, 0x00000a1b }, + { 9, 0x00032020, 0x00000cbe, 0x00000101, 0x00000a1b }, + { 10, 0x00032020, 0x00000d02, 0x00000101, 0x00000a1b }, + { 11, 0x00032020, 0x00000d06, 0x00000101, 0x00000a1b }, + { 12, 0x00032020, 0x00000d0a, 0x00000101, 0x00000a1b }, + { 13, 0x00032020, 0x00000d0e, 0x00000101, 0x00000a1b }, + { 14, 0x00032020, 0x00000d1a, 0x00000101, 0x00000a03 }, +}; + +/* + * RF value list for RF2525 + * Supports: 2.4 GHz + */ +static const struct rf_channel rf_vals_bg_2525[] = { + { 1, 0x00022020, 0x00080c9e, 0x00060111, 0x00000a1b }, + { 2, 0x00022020, 0x00080ca2, 0x00060111, 0x00000a1b }, + { 3, 0x00022020, 0x00080ca6, 0x00060111, 0x00000a1b }, + { 4, 0x00022020, 0x00080caa, 0x00060111, 0x00000a1b }, + { 5, 0x00022020, 0x00080cae, 0x00060111, 0x00000a1b }, + { 6, 0x00022020, 0x00080cb2, 0x00060111, 0x00000a1b }, + { 7, 0x00022020, 0x00080cb6, 0x00060111, 0x00000a1b }, + { 8, 0x00022020, 0x00080cba, 0x00060111, 0x00000a1b }, + { 9, 0x00022020, 0x00080cbe, 0x00060111, 0x00000a1b }, + { 10, 0x00022020, 0x00080d02, 0x00060111, 0x00000a1b }, + { 11, 0x00022020, 0x00080d06, 0x00060111, 0x00000a1b }, + { 12, 0x00022020, 0x00080d0a, 0x00060111, 0x00000a1b }, + { 13, 0x00022020, 0x00080d0e, 0x00060111, 0x00000a1b }, + { 14, 0x00022020, 0x00080d1a, 0x00060111, 0x00000a03 }, +}; + +/* + * RF value list for RF2525e + * Supports: 2.4 GHz + */ +static const struct rf_channel rf_vals_bg_2525e[] = { + { 1, 0x00022020, 0x00081136, 0x00060111, 0x00000a0b }, + { 2, 0x00022020, 0x0008113a, 0x00060111, 0x00000a0b }, + { 3, 0x00022020, 0x0008113e, 0x00060111, 0x00000a0b }, + { 4, 0x00022020, 0x00081182, 0x00060111, 0x00000a0b }, + { 5, 0x00022020, 0x00081186, 0x00060111, 0x00000a0b }, + { 6, 0x00022020, 0x0008118a, 0x00060111, 0x00000a0b }, + { 7, 0x00022020, 0x0008118e, 0x00060111, 0x00000a0b }, + { 8, 0x00022020, 0x00081192, 0x00060111, 0x00000a0b }, + { 9, 0x00022020, 0x00081196, 0x00060111, 0x00000a0b }, + { 10, 0x00022020, 0x0008119a, 0x00060111, 0x00000a0b }, + { 11, 0x00022020, 0x0008119e, 0x00060111, 0x00000a0b }, + { 12, 0x00022020, 0x000811a2, 0x00060111, 0x00000a0b }, + { 13, 0x00022020, 0x000811a6, 0x00060111, 0x00000a0b }, + { 14, 0x00022020, 0x000811ae, 0x00060111, 0x00000a1b }, +}; + +/* + * RF value list for RF5222 + * Supports: 2.4 GHz & 5.2 GHz + */ +static const struct rf_channel rf_vals_5222[] = { + { 1, 0x00022020, 0x00001136, 0x00000101, 0x00000a0b }, + { 2, 0x00022020, 0x0000113a, 0x00000101, 0x00000a0b }, + { 3, 0x00022020, 0x0000113e, 0x00000101, 0x00000a0b }, + { 4, 0x00022020, 0x00001182, 0x00000101, 0x00000a0b }, + { 5, 0x00022020, 0x00001186, 0x00000101, 0x00000a0b }, + { 6, 0x00022020, 0x0000118a, 0x00000101, 0x00000a0b }, + { 7, 0x00022020, 0x0000118e, 0x00000101, 0x00000a0b }, + { 8, 0x00022020, 0x00001192, 0x00000101, 0x00000a0b }, + { 9, 0x00022020, 0x00001196, 0x00000101, 0x00000a0b }, + { 10, 0x00022020, 0x0000119a, 0x00000101, 0x00000a0b }, + { 11, 0x00022020, 0x0000119e, 0x00000101, 0x00000a0b }, + { 12, 0x00022020, 0x000011a2, 0x00000101, 0x00000a0b }, + { 13, 0x00022020, 0x000011a6, 0x00000101, 0x00000a0b }, + { 14, 0x00022020, 0x000011ae, 0x00000101, 0x00000a1b }, + + /* 802.11 UNI / HyperLan 2 */ + { 36, 0x00022010, 0x00018896, 0x00000101, 0x00000a1f }, + { 40, 0x00022010, 0x0001889a, 0x00000101, 0x00000a1f }, + { 44, 0x00022010, 0x0001889e, 0x00000101, 0x00000a1f }, + { 48, 0x00022010, 0x000188a2, 0x00000101, 0x00000a1f }, + { 52, 0x00022010, 0x000188a6, 0x00000101, 0x00000a1f }, + { 66, 0x00022010, 0x000188aa, 0x00000101, 0x00000a1f }, + { 60, 0x00022010, 0x000188ae, 0x00000101, 0x00000a1f }, + { 64, 0x00022010, 0x000188b2, 0x00000101, 0x00000a1f }, + + /* 802.11 HyperLan 2 */ + { 100, 0x00022010, 0x00008802, 0x00000101, 0x00000a0f }, + { 104, 0x00022010, 0x00008806, 0x00000101, 0x00000a0f }, + { 108, 0x00022010, 0x0000880a, 0x00000101, 0x00000a0f }, + { 112, 0x00022010, 0x0000880e, 0x00000101, 0x00000a0f }, + { 116, 0x00022010, 0x00008812, 0x00000101, 0x00000a0f }, + { 120, 0x00022010, 0x00008816, 0x00000101, 0x00000a0f }, + { 124, 0x00022010, 0x0000881a, 0x00000101, 0x00000a0f }, + { 128, 0x00022010, 0x0000881e, 0x00000101, 0x00000a0f }, + { 132, 0x00022010, 0x00008822, 0x00000101, 0x00000a0f }, + { 136, 0x00022010, 0x00008826, 0x00000101, 0x00000a0f }, + + /* 802.11 UNII */ + { 140, 0x00022010, 0x0000882a, 0x00000101, 0x00000a0f }, + { 149, 0x00022020, 0x000090a6, 0x00000101, 0x00000a07 }, + { 153, 0x00022020, 0x000090ae, 0x00000101, 0x00000a07 }, + { 157, 0x00022020, 0x000090b6, 0x00000101, 0x00000a07 }, + { 161, 0x00022020, 0x000090be, 0x00000101, 0x00000a07 }, +}; + +static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) +{ + struct hw_mode_spec *spec = &rt2x00dev->spec; + struct channel_info *info; + char *tx_power; + unsigned int i; + + /* + * Initialize all hw fields. + */ + rt2x00dev->hw->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | + IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_SUPPORTS_PS | + IEEE80211_HW_PS_NULLFUNC_STACK; + + SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev); + SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, + rt2x00_eeprom_addr(rt2x00dev, + EEPROM_MAC_ADDR_0)); + + /* + * Initialize hw_mode information. + */ + spec->supported_bands = SUPPORT_BAND_2GHZ; + spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; + + if (rt2x00_rf(rt2x00dev, RF2522)) { + spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522); + spec->channels = rf_vals_bg_2522; + } else if (rt2x00_rf(rt2x00dev, RF2523)) { + spec->num_channels = ARRAY_SIZE(rf_vals_bg_2523); + spec->channels = rf_vals_bg_2523; + } else if (rt2x00_rf(rt2x00dev, RF2524)) { + spec->num_channels = ARRAY_SIZE(rf_vals_bg_2524); + spec->channels = rf_vals_bg_2524; + } else if (rt2x00_rf(rt2x00dev, RF2525)) { + spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525); + spec->channels = rf_vals_bg_2525; + } else if (rt2x00_rf(rt2x00dev, RF2525E)) { + spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525e); + spec->channels = rf_vals_bg_2525e; + } else if (rt2x00_rf(rt2x00dev, RF5222)) { + spec->supported_bands |= SUPPORT_BAND_5GHZ; + spec->num_channels = ARRAY_SIZE(rf_vals_5222); + spec->channels = rf_vals_5222; + } + + /* + * Create channel information array + */ + info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + spec->channels_info = info; + + tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START); + for (i = 0; i < 14; i++) + info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]); + + if (spec->num_channels > 14) { + for (i = 14; i < spec->num_channels; i++) + info[i].tx_power1 = DEFAULT_TXPOWER; + } + + return 0; +} + +static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev) +{ + int retval; + + /* + * Allocate eeprom data. + */ + retval = rt2500pci_validate_eeprom(rt2x00dev); + if (retval) + return retval; + + retval = rt2500pci_init_eeprom(rt2x00dev); + if (retval) + return retval; + + /* + * Initialize hw specifications. + */ + retval = rt2500pci_probe_hw_mode(rt2x00dev); + if (retval) + return retval; + + /* + * This device requires the atim queue and DMA-mapped skbs. + */ + __set_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags); + __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags); + + /* + * Set the rssi offset. + */ + rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET; + + return 0; +} + +/* + * IEEE80211 stack callback functions. + */ +static u64 rt2500pci_get_tsf(struct ieee80211_hw *hw) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + u64 tsf; + u32 reg; + + rt2x00pci_register_read(rt2x00dev, CSR17, ®); + tsf = (u64) rt2x00_get_field32(reg, CSR17_HIGH_TSFTIMER) << 32; + rt2x00pci_register_read(rt2x00dev, CSR16, ®); + tsf |= rt2x00_get_field32(reg, CSR16_LOW_TSFTIMER); + + return tsf; +} + +static int rt2500pci_tx_last_beacon(struct ieee80211_hw *hw) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + u32 reg; + + rt2x00pci_register_read(rt2x00dev, CSR15, ®); + return rt2x00_get_field32(reg, CSR15_BEACON_SENT); +} + +static const struct ieee80211_ops rt2500pci_mac80211_ops = { + .tx = rt2x00mac_tx, + .start = rt2x00mac_start, + .stop = rt2x00mac_stop, + .add_interface = rt2x00mac_add_interface, + .remove_interface = rt2x00mac_remove_interface, + .config = rt2x00mac_config, + .configure_filter = rt2x00mac_configure_filter, + .set_tim = rt2x00mac_set_tim, + .get_stats = rt2x00mac_get_stats, + .bss_info_changed = rt2x00mac_bss_info_changed, + .conf_tx = rt2x00mac_conf_tx, + .get_tsf = rt2500pci_get_tsf, + .tx_last_beacon = rt2500pci_tx_last_beacon, + .rfkill_poll = rt2x00mac_rfkill_poll, +}; + +static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = { + .irq_handler = rt2500pci_interrupt, + .probe_hw = rt2500pci_probe_hw, + .initialize = rt2x00pci_initialize, + .uninitialize = rt2x00pci_uninitialize, + .get_entry_state = rt2500pci_get_entry_state, + .clear_entry = rt2500pci_clear_entry, + .set_device_state = rt2500pci_set_device_state, + .rfkill_poll = rt2500pci_rfkill_poll, + .link_stats = rt2500pci_link_stats, + .reset_tuner = rt2500pci_reset_tuner, + .link_tuner = rt2500pci_link_tuner, + .write_tx_desc = rt2500pci_write_tx_desc, + .write_tx_data = rt2x00pci_write_tx_data, + .write_beacon = rt2500pci_write_beacon, + .kick_tx_queue = rt2500pci_kick_tx_queue, + .kill_tx_queue = rt2500pci_kill_tx_queue, + .fill_rxdone = rt2500pci_fill_rxdone, + .config_filter = rt2500pci_config_filter, + .config_intf = rt2500pci_config_intf, + .config_erp = rt2500pci_config_erp, + .config_ant = rt2500pci_config_ant, + .config = rt2500pci_config, +}; + +static const struct data_queue_desc rt2500pci_queue_rx = { + .entry_num = RX_ENTRIES, + .data_size = DATA_FRAME_SIZE, + .desc_size = RXD_DESC_SIZE, + .priv_size = sizeof(struct queue_entry_priv_pci), +}; + +static const struct data_queue_desc rt2500pci_queue_tx = { + .entry_num = TX_ENTRIES, + .data_size = DATA_FRAME_SIZE, + .desc_size = TXD_DESC_SIZE, + .priv_size = sizeof(struct queue_entry_priv_pci), +}; + +static const struct data_queue_desc rt2500pci_queue_bcn = { + .entry_num = BEACON_ENTRIES, + .data_size = MGMT_FRAME_SIZE, + .desc_size = TXD_DESC_SIZE, + .priv_size = sizeof(struct queue_entry_priv_pci), +}; + +static const struct data_queue_desc rt2500pci_queue_atim = { + .entry_num = ATIM_ENTRIES, + .data_size = DATA_FRAME_SIZE, + .desc_size = TXD_DESC_SIZE, + .priv_size = sizeof(struct queue_entry_priv_pci), +}; + +static const struct rt2x00_ops rt2500pci_ops = { + .name = KBUILD_MODNAME, + .max_sta_intf = 1, + .max_ap_intf = 1, + .eeprom_size = EEPROM_SIZE, + .rf_size = RF_SIZE, + .tx_queues = NUM_TX_QUEUES, + .extra_tx_headroom = 0, + .rx = &rt2500pci_queue_rx, + .tx = &rt2500pci_queue_tx, + .bcn = &rt2500pci_queue_bcn, + .atim = &rt2500pci_queue_atim, + .lib = &rt2500pci_rt2x00_ops, + .hw = &rt2500pci_mac80211_ops, +#ifdef CONFIG_RT2X00_LIB_DEBUGFS + .debugfs = &rt2500pci_rt2x00debug, +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ +}; + +/* + * RT2500pci module information. + */ +static DEFINE_PCI_DEVICE_TABLE(rt2500pci_device_table) = { + { PCI_DEVICE(0x1814, 0x0201), PCI_DEVICE_DATA(&rt2500pci_ops) }, + { 0, } +}; + +MODULE_AUTHOR(DRV_PROJECT); +MODULE_VERSION(DRV_VERSION); +MODULE_DESCRIPTION("Ralink RT2500 PCI & PCMCIA Wireless LAN driver."); +MODULE_SUPPORTED_DEVICE("Ralink RT2560 PCI & PCMCIA chipset based cards"); +MODULE_DEVICE_TABLE(pci, rt2500pci_device_table); +MODULE_LICENSE("GPL"); + +static struct pci_driver rt2500pci_driver = { + .name = KBUILD_MODNAME, + .id_table = rt2500pci_device_table, + .probe = rt2x00pci_probe, + .remove = __devexit_p(rt2x00pci_remove), + .suspend = rt2x00pci_suspend, + .resume = rt2x00pci_resume, +}; + +static int __init rt2500pci_init(void) +{ + return pci_register_driver(&rt2500pci_driver); +} + +static void __exit rt2500pci_exit(void) +{ + pci_unregister_driver(&rt2500pci_driver); +} + +module_init(rt2500pci_init); +module_exit(rt2500pci_exit); diff --git a/drivers/net/wireless/rt2x00/rt2500pci.h b/drivers/net/wireless/rt2x00/rt2500pci.h new file mode 100644 index 0000000..d708031 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2500pci.h @@ -0,0 +1,1235 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2500pci + Abstract: Data structures and registers for the rt2500pci module. + Supported chipsets: RT2560. + */ + +#ifndef RT2500PCI_H +#define RT2500PCI_H + +/* + * RF chip defines. + */ +#define RF2522 0x0000 +#define RF2523 0x0001 +#define RF2524 0x0002 +#define RF2525 0x0003 +#define RF2525E 0x0004 +#define RF5222 0x0010 + +/* + * RT2560 version + */ +#define RT2560_VERSION_B 2 +#define RT2560_VERSION_C 3 +#define RT2560_VERSION_D 4 + +/* + * Signal information. + * Default offset is required for RSSI <-> dBm conversion. + */ +#define DEFAULT_RSSI_OFFSET 121 + +/* + * Register layout information. + */ +#define CSR_REG_BASE 0x0000 +#define CSR_REG_SIZE 0x0174 +#define EEPROM_BASE 0x0000 +#define EEPROM_SIZE 0x0200 +#define BBP_BASE 0x0000 +#define BBP_SIZE 0x0040 +#define RF_BASE 0x0004 +#define RF_SIZE 0x0010 + +/* + * Number of TX queues. + */ +#define NUM_TX_QUEUES 2 + +/* + * Control/Status Registers(CSR). + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * CSR0: ASIC revision number. + */ +#define CSR0 0x0000 +#define CSR0_REVISION FIELD32(0x0000ffff) + +/* + * CSR1: System control register. + * SOFT_RESET: Software reset, 1: reset, 0: normal. + * BBP_RESET: Hardware reset, 1: reset, 0, release. + * HOST_READY: Host ready after initialization. + */ +#define CSR1 0x0004 +#define CSR1_SOFT_RESET FIELD32(0x00000001) +#define CSR1_BBP_RESET FIELD32(0x00000002) +#define CSR1_HOST_READY FIELD32(0x00000004) + +/* + * CSR2: System admin status register (invalid). + */ +#define CSR2 0x0008 + +/* + * CSR3: STA MAC address register 0. + */ +#define CSR3 0x000c +#define CSR3_BYTE0 FIELD32(0x000000ff) +#define CSR3_BYTE1 FIELD32(0x0000ff00) +#define CSR3_BYTE2 FIELD32(0x00ff0000) +#define CSR3_BYTE3 FIELD32(0xff000000) + +/* + * CSR4: STA MAC address register 1. + */ +#define CSR4 0x0010 +#define CSR4_BYTE4 FIELD32(0x000000ff) +#define CSR4_BYTE5 FIELD32(0x0000ff00) + +/* + * CSR5: BSSID register 0. + */ +#define CSR5 0x0014 +#define CSR5_BYTE0 FIELD32(0x000000ff) +#define CSR5_BYTE1 FIELD32(0x0000ff00) +#define CSR5_BYTE2 FIELD32(0x00ff0000) +#define CSR5_BYTE3 FIELD32(0xff000000) + +/* + * CSR6: BSSID register 1. + */ +#define CSR6 0x0018 +#define CSR6_BYTE4 FIELD32(0x000000ff) +#define CSR6_BYTE5 FIELD32(0x0000ff00) + +/* + * CSR7: Interrupt source register. + * Write 1 to clear. + * TBCN_EXPIRE: Beacon timer expired interrupt. + * TWAKE_EXPIRE: Wakeup timer expired interrupt. + * TATIMW_EXPIRE: Timer of atim window expired interrupt. + * TXDONE_TXRING: Tx ring transmit done interrupt. + * TXDONE_ATIMRING: Atim ring transmit done interrupt. + * TXDONE_PRIORING: Priority ring transmit done interrupt. + * RXDONE: Receive done interrupt. + * DECRYPTION_DONE: Decryption done interrupt. + * ENCRYPTION_DONE: Encryption done interrupt. + * UART1_TX_TRESHOLD: UART1 TX reaches threshold. + * UART1_RX_TRESHOLD: UART1 RX reaches threshold. + * UART1_IDLE_TRESHOLD: UART1 IDLE over threshold. + * UART1_TX_BUFF_ERROR: UART1 TX buffer error. + * UART1_RX_BUFF_ERROR: UART1 RX buffer error. + * UART2_TX_TRESHOLD: UART2 TX reaches threshold. + * UART2_RX_TRESHOLD: UART2 RX reaches threshold. + * UART2_IDLE_TRESHOLD: UART2 IDLE over threshold. + * UART2_TX_BUFF_ERROR: UART2 TX buffer error. + * UART2_RX_BUFF_ERROR: UART2 RX buffer error. + * TIMER_CSR3_EXPIRE: TIMECSR3 timer expired (802.1H quiet period). + + */ +#define CSR7 0x001c +#define CSR7_TBCN_EXPIRE FIELD32(0x00000001) +#define CSR7_TWAKE_EXPIRE FIELD32(0x00000002) +#define CSR7_TATIMW_EXPIRE FIELD32(0x00000004) +#define CSR7_TXDONE_TXRING FIELD32(0x00000008) +#define CSR7_TXDONE_ATIMRING FIELD32(0x00000010) +#define CSR7_TXDONE_PRIORING FIELD32(0x00000020) +#define CSR7_RXDONE FIELD32(0x00000040) +#define CSR7_DECRYPTION_DONE FIELD32(0x00000080) +#define CSR7_ENCRYPTION_DONE FIELD32(0x00000100) +#define CSR7_UART1_TX_TRESHOLD FIELD32(0x00000200) +#define CSR7_UART1_RX_TRESHOLD FIELD32(0x00000400) +#define CSR7_UART1_IDLE_TRESHOLD FIELD32(0x00000800) +#define CSR7_UART1_TX_BUFF_ERROR FIELD32(0x00001000) +#define CSR7_UART1_RX_BUFF_ERROR FIELD32(0x00002000) +#define CSR7_UART2_TX_TRESHOLD FIELD32(0x00004000) +#define CSR7_UART2_RX_TRESHOLD FIELD32(0x00008000) +#define CSR7_UART2_IDLE_TRESHOLD FIELD32(0x00010000) +#define CSR7_UART2_TX_BUFF_ERROR FIELD32(0x00020000) +#define CSR7_UART2_RX_BUFF_ERROR FIELD32(0x00040000) +#define CSR7_TIMER_CSR3_EXPIRE FIELD32(0x00080000) + +/* + * CSR8: Interrupt mask register. + * Write 1 to mask interrupt. + * TBCN_EXPIRE: Beacon timer expired interrupt. + * TWAKE_EXPIRE: Wakeup timer expired interrupt. + * TATIMW_EXPIRE: Timer of atim window expired interrupt. + * TXDONE_TXRING: Tx ring transmit done interrupt. + * TXDONE_ATIMRING: Atim ring transmit done interrupt. + * TXDONE_PRIORING: Priority ring transmit done interrupt. + * RXDONE: Receive done interrupt. + * DECRYPTION_DONE: Decryption done interrupt. + * ENCRYPTION_DONE: Encryption done interrupt. + * UART1_TX_TRESHOLD: UART1 TX reaches threshold. + * UART1_RX_TRESHOLD: UART1 RX reaches threshold. + * UART1_IDLE_TRESHOLD: UART1 IDLE over threshold. + * UART1_TX_BUFF_ERROR: UART1 TX buffer error. + * UART1_RX_BUFF_ERROR: UART1 RX buffer error. + * UART2_TX_TRESHOLD: UART2 TX reaches threshold. + * UART2_RX_TRESHOLD: UART2 RX reaches threshold. + * UART2_IDLE_TRESHOLD: UART2 IDLE over threshold. + * UART2_TX_BUFF_ERROR: UART2 TX buffer error. + * UART2_RX_BUFF_ERROR: UART2 RX buffer error. + * TIMER_CSR3_EXPIRE: TIMECSR3 timer expired (802.1H quiet period). + */ +#define CSR8 0x0020 +#define CSR8_TBCN_EXPIRE FIELD32(0x00000001) +#define CSR8_TWAKE_EXPIRE FIELD32(0x00000002) +#define CSR8_TATIMW_EXPIRE FIELD32(0x00000004) +#define CSR8_TXDONE_TXRING FIELD32(0x00000008) +#define CSR8_TXDONE_ATIMRING FIELD32(0x00000010) +#define CSR8_TXDONE_PRIORING FIELD32(0x00000020) +#define CSR8_RXDONE FIELD32(0x00000040) +#define CSR8_DECRYPTION_DONE FIELD32(0x00000080) +#define CSR8_ENCRYPTION_DONE FIELD32(0x00000100) +#define CSR8_UART1_TX_TRESHOLD FIELD32(0x00000200) +#define CSR8_UART1_RX_TRESHOLD FIELD32(0x00000400) +#define CSR8_UART1_IDLE_TRESHOLD FIELD32(0x00000800) +#define CSR8_UART1_TX_BUFF_ERROR FIELD32(0x00001000) +#define CSR8_UART1_RX_BUFF_ERROR FIELD32(0x00002000) +#define CSR8_UART2_TX_TRESHOLD FIELD32(0x00004000) +#define CSR8_UART2_RX_TRESHOLD FIELD32(0x00008000) +#define CSR8_UART2_IDLE_TRESHOLD FIELD32(0x00010000) +#define CSR8_UART2_TX_BUFF_ERROR FIELD32(0x00020000) +#define CSR8_UART2_RX_BUFF_ERROR FIELD32(0x00040000) +#define CSR8_TIMER_CSR3_EXPIRE FIELD32(0x00080000) + +/* + * CSR9: Maximum frame length register. + * MAX_FRAME_UNIT: Maximum frame length in 128b unit, default: 12. + */ +#define CSR9 0x0024 +#define CSR9_MAX_FRAME_UNIT FIELD32(0x00000f80) + +/* + * SECCSR0: WEP control register. + * KICK_DECRYPT: Kick decryption engine, self-clear. + * ONE_SHOT: 0: ring mode, 1: One shot only mode. + * DESC_ADDRESS: Descriptor physical address of frame. + */ +#define SECCSR0 0x0028 +#define SECCSR0_KICK_DECRYPT FIELD32(0x00000001) +#define SECCSR0_ONE_SHOT FIELD32(0x00000002) +#define SECCSR0_DESC_ADDRESS FIELD32(0xfffffffc) + +/* + * CSR11: Back-off control register. + * CWMIN: CWmin. Default cwmin is 31 (2^5 - 1). + * CWMAX: CWmax. Default cwmax is 1023 (2^10 - 1). + * SLOT_TIME: Slot time, default is 20us for 802.11b + * CW_SELECT: CWmin/CWmax selection, 1: Register, 0: TXD. + * LONG_RETRY: Long retry count. + * SHORT_RETRY: Short retry count. + */ +#define CSR11 0x002c +#define CSR11_CWMIN FIELD32(0x0000000f) +#define CSR11_CWMAX FIELD32(0x000000f0) +#define CSR11_SLOT_TIME FIELD32(0x00001f00) +#define CSR11_CW_SELECT FIELD32(0x00002000) +#define CSR11_LONG_RETRY FIELD32(0x00ff0000) +#define CSR11_SHORT_RETRY FIELD32(0xff000000) + +/* + * CSR12: Synchronization configuration register 0. + * All units in 1/16 TU. + * BEACON_INTERVAL: Beacon interval, default is 100 TU. + * CFP_MAX_DURATION: Cfp maximum duration, default is 100 TU. + */ +#define CSR12 0x0030 +#define CSR12_BEACON_INTERVAL FIELD32(0x0000ffff) +#define CSR12_CFP_MAX_DURATION FIELD32(0xffff0000) + +/* + * CSR13: Synchronization configuration register 1. + * All units in 1/16 TU. + * ATIMW_DURATION: Atim window duration. + * CFP_PERIOD: Cfp period, default is 0 TU. + */ +#define CSR13 0x0034 +#define CSR13_ATIMW_DURATION FIELD32(0x0000ffff) +#define CSR13_CFP_PERIOD FIELD32(0x00ff0000) + +/* + * CSR14: Synchronization control register. + * TSF_COUNT: Enable tsf auto counting. + * TSF_SYNC: Tsf sync, 0: disable, 1: infra, 2: ad-hoc/master mode. + * TBCN: Enable tbcn with reload value. + * TCFP: Enable tcfp & cfp / cp switching. + * TATIMW: Enable tatimw & atim window switching. + * BEACON_GEN: Enable beacon generator. + * CFP_COUNT_PRELOAD: Cfp count preload value. + * TBCM_PRELOAD: Tbcn preload value in units of 64us. + */ +#define CSR14 0x0038 +#define CSR14_TSF_COUNT FIELD32(0x00000001) +#define CSR14_TSF_SYNC FIELD32(0x00000006) +#define CSR14_TBCN FIELD32(0x00000008) +#define CSR14_TCFP FIELD32(0x00000010) +#define CSR14_TATIMW FIELD32(0x00000020) +#define CSR14_BEACON_GEN FIELD32(0x00000040) +#define CSR14_CFP_COUNT_PRELOAD FIELD32(0x0000ff00) +#define CSR14_TBCM_PRELOAD FIELD32(0xffff0000) + +/* + * CSR15: Synchronization status register. + * CFP: ASIC is in contention-free period. + * ATIMW: ASIC is in ATIM window. + * BEACON_SENT: Beacon is send. + */ +#define CSR15 0x003c +#define CSR15_CFP FIELD32(0x00000001) +#define CSR15_ATIMW FIELD32(0x00000002) +#define CSR15_BEACON_SENT FIELD32(0x00000004) + +/* + * CSR16: TSF timer register 0. + */ +#define CSR16 0x0040 +#define CSR16_LOW_TSFTIMER FIELD32(0xffffffff) + +/* + * CSR17: TSF timer register 1. + */ +#define CSR17 0x0044 +#define CSR17_HIGH_TSFTIMER FIELD32(0xffffffff) + +/* + * CSR18: IFS timer register 0. + * SIFS: Sifs, default is 10 us. + * PIFS: Pifs, default is 30 us. + */ +#define CSR18 0x0048 +#define CSR18_SIFS FIELD32(0x000001ff) +#define CSR18_PIFS FIELD32(0x001f0000) + +/* + * CSR19: IFS timer register 1. + * DIFS: Difs, default is 50 us. + * EIFS: Eifs, default is 364 us. + */ +#define CSR19 0x004c +#define CSR19_DIFS FIELD32(0x0000ffff) +#define CSR19_EIFS FIELD32(0xffff0000) + +/* + * CSR20: Wakeup timer register. + * DELAY_AFTER_TBCN: Delay after tbcn expired in units of 1/16 TU. + * TBCN_BEFORE_WAKEUP: Number of beacon before wakeup. + * AUTOWAKE: Enable auto wakeup / sleep mechanism. + */ +#define CSR20 0x0050 +#define CSR20_DELAY_AFTER_TBCN FIELD32(0x0000ffff) +#define CSR20_TBCN_BEFORE_WAKEUP FIELD32(0x00ff0000) +#define CSR20_AUTOWAKE FIELD32(0x01000000) + +/* + * CSR21: EEPROM control register. + * RELOAD: Write 1 to reload eeprom content. + * TYPE_93C46: 1: 93c46, 0:93c66. + */ +#define CSR21 0x0054 +#define CSR21_RELOAD FIELD32(0x00000001) +#define CSR21_EEPROM_DATA_CLOCK FIELD32(0x00000002) +#define CSR21_EEPROM_CHIP_SELECT FIELD32(0x00000004) +#define CSR21_EEPROM_DATA_IN FIELD32(0x00000008) +#define CSR21_EEPROM_DATA_OUT FIELD32(0x00000010) +#define CSR21_TYPE_93C46 FIELD32(0x00000020) + +/* + * CSR22: CFP control register. + * CFP_DURATION_REMAIN: Cfp duration remain, in units of TU. + * RELOAD_CFP_DURATION: Write 1 to reload cfp duration remain. + */ +#define CSR22 0x0058 +#define CSR22_CFP_DURATION_REMAIN FIELD32(0x0000ffff) +#define CSR22_RELOAD_CFP_DURATION FIELD32(0x00010000) + +/* + * Transmit related CSRs. + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * TXCSR0: TX Control Register. + * KICK_TX: Kick tx ring. + * KICK_ATIM: Kick atim ring. + * KICK_PRIO: Kick priority ring. + * ABORT: Abort all transmit related ring operation. + */ +#define TXCSR0 0x0060 +#define TXCSR0_KICK_TX FIELD32(0x00000001) +#define TXCSR0_KICK_ATIM FIELD32(0x00000002) +#define TXCSR0_KICK_PRIO FIELD32(0x00000004) +#define TXCSR0_ABORT FIELD32(0x00000008) + +/* + * TXCSR1: TX Configuration Register. + * ACK_TIMEOUT: Ack timeout, default = sifs + 2*slottime + acktime @ 1mbps. + * ACK_CONSUME_TIME: Ack consume time, default = sifs + acktime @ 1mbps. + * TSF_OFFSET: Insert tsf offset. + * AUTORESPONDER: Enable auto responder which include ack & cts. + */ +#define TXCSR1 0x0064 +#define TXCSR1_ACK_TIMEOUT FIELD32(0x000001ff) +#define TXCSR1_ACK_CONSUME_TIME FIELD32(0x0003fe00) +#define TXCSR1_TSF_OFFSET FIELD32(0x00fc0000) +#define TXCSR1_AUTORESPONDER FIELD32(0x01000000) + +/* + * TXCSR2: Tx descriptor configuration register. + * TXD_SIZE: Tx descriptor size, default is 48. + * NUM_TXD: Number of tx entries in ring. + * NUM_ATIM: Number of atim entries in ring. + * NUM_PRIO: Number of priority entries in ring. + */ +#define TXCSR2 0x0068 +#define TXCSR2_TXD_SIZE FIELD32(0x000000ff) +#define TXCSR2_NUM_TXD FIELD32(0x0000ff00) +#define TXCSR2_NUM_ATIM FIELD32(0x00ff0000) +#define TXCSR2_NUM_PRIO FIELD32(0xff000000) + +/* + * TXCSR3: TX Ring Base address register. + */ +#define TXCSR3 0x006c +#define TXCSR3_TX_RING_REGISTER FIELD32(0xffffffff) + +/* + * TXCSR4: TX Atim Ring Base address register. + */ +#define TXCSR4 0x0070 +#define TXCSR4_ATIM_RING_REGISTER FIELD32(0xffffffff) + +/* + * TXCSR5: TX Prio Ring Base address register. + */ +#define TXCSR5 0x0074 +#define TXCSR5_PRIO_RING_REGISTER FIELD32(0xffffffff) + +/* + * TXCSR6: Beacon Base address register. + */ +#define TXCSR6 0x0078 +#define TXCSR6_BEACON_RING_REGISTER FIELD32(0xffffffff) + +/* + * TXCSR7: Auto responder control register. + * AR_POWERMANAGEMENT: Auto responder power management bit. + */ +#define TXCSR7 0x007c +#define TXCSR7_AR_POWERMANAGEMENT FIELD32(0x00000001) + +/* + * TXCSR8: CCK Tx BBP register. + */ +#define TXCSR8 0x0098 +#define TXCSR8_BBP_ID0 FIELD32(0x0000007f) +#define TXCSR8_BBP_ID0_VALID FIELD32(0x00000080) +#define TXCSR8_BBP_ID1 FIELD32(0x00007f00) +#define TXCSR8_BBP_ID1_VALID FIELD32(0x00008000) +#define TXCSR8_BBP_ID2 FIELD32(0x007f0000) +#define TXCSR8_BBP_ID2_VALID FIELD32(0x00800000) +#define TXCSR8_BBP_ID3 FIELD32(0x7f000000) +#define TXCSR8_BBP_ID3_VALID FIELD32(0x80000000) + +/* + * TXCSR9: OFDM TX BBP registers + * OFDM_SIGNAL: BBP rate field address for OFDM. + * OFDM_SERVICE: BBP service field address for OFDM. + * OFDM_LENGTH_LOW: BBP length low byte address for OFDM. + * OFDM_LENGTH_HIGH: BBP length high byte address for OFDM. + */ +#define TXCSR9 0x0094 +#define TXCSR9_OFDM_RATE FIELD32(0x000000ff) +#define TXCSR9_OFDM_SERVICE FIELD32(0x0000ff00) +#define TXCSR9_OFDM_LENGTH_LOW FIELD32(0x00ff0000) +#define TXCSR9_OFDM_LENGTH_HIGH FIELD32(0xff000000) + +/* + * Receive related CSRs. + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * RXCSR0: RX Control Register. + * DISABLE_RX: Disable rx engine. + * DROP_CRC: Drop crc error. + * DROP_PHYSICAL: Drop physical error. + * DROP_CONTROL: Drop control frame. + * DROP_NOT_TO_ME: Drop not to me unicast frame. + * DROP_TODS: Drop frame tods bit is true. + * DROP_VERSION_ERROR: Drop version error frame. + * PASS_CRC: Pass all packets with crc attached. + * PASS_CRC: Pass all packets with crc attached. + * PASS_PLCP: Pass all packets with 4 bytes PLCP attached. + * DROP_MCAST: Drop multicast frames. + * DROP_BCAST: Drop broadcast frames. + * ENABLE_QOS: Accept QOS data frame and parse QOS field. + */ +#define RXCSR0 0x0080 +#define RXCSR0_DISABLE_RX FIELD32(0x00000001) +#define RXCSR0_DROP_CRC FIELD32(0x00000002) +#define RXCSR0_DROP_PHYSICAL FIELD32(0x00000004) +#define RXCSR0_DROP_CONTROL FIELD32(0x00000008) +#define RXCSR0_DROP_NOT_TO_ME FIELD32(0x00000010) +#define RXCSR0_DROP_TODS FIELD32(0x00000020) +#define RXCSR0_DROP_VERSION_ERROR FIELD32(0x00000040) +#define RXCSR0_PASS_CRC FIELD32(0x00000080) +#define RXCSR0_PASS_PLCP FIELD32(0x00000100) +#define RXCSR0_DROP_MCAST FIELD32(0x00000200) +#define RXCSR0_DROP_BCAST FIELD32(0x00000400) +#define RXCSR0_ENABLE_QOS FIELD32(0x00000800) + +/* + * RXCSR1: RX descriptor configuration register. + * RXD_SIZE: Rx descriptor size, default is 32b. + * NUM_RXD: Number of rx entries in ring. + */ +#define RXCSR1 0x0084 +#define RXCSR1_RXD_SIZE FIELD32(0x000000ff) +#define RXCSR1_NUM_RXD FIELD32(0x0000ff00) + +/* + * RXCSR2: RX Ring base address register. + */ +#define RXCSR2 0x0088 +#define RXCSR2_RX_RING_REGISTER FIELD32(0xffffffff) + +/* + * RXCSR3: BBP ID register for Rx operation. + * BBP_ID#: BBP register # id. + * BBP_ID#_VALID: BBP register # id is valid or not. + */ +#define RXCSR3 0x0090 +#define RXCSR3_BBP_ID0 FIELD32(0x0000007f) +#define RXCSR3_BBP_ID0_VALID FIELD32(0x00000080) +#define RXCSR3_BBP_ID1 FIELD32(0x00007f00) +#define RXCSR3_BBP_ID1_VALID FIELD32(0x00008000) +#define RXCSR3_BBP_ID2 FIELD32(0x007f0000) +#define RXCSR3_BBP_ID2_VALID FIELD32(0x00800000) +#define RXCSR3_BBP_ID3 FIELD32(0x7f000000) +#define RXCSR3_BBP_ID3_VALID FIELD32(0x80000000) + +/* + * ARCSR1: Auto Responder PLCP config register 1. + * AR_BBP_DATA#: Auto responder BBP register # data. + * AR_BBP_ID#: Auto responder BBP register # Id. + */ +#define ARCSR1 0x009c +#define ARCSR1_AR_BBP_DATA2 FIELD32(0x000000ff) +#define ARCSR1_AR_BBP_ID2 FIELD32(0x0000ff00) +#define ARCSR1_AR_BBP_DATA3 FIELD32(0x00ff0000) +#define ARCSR1_AR_BBP_ID3 FIELD32(0xff000000) + +/* + * Miscellaneous Registers. + * Some values are set in TU, whereas 1 TU == 1024 us. + + */ + +/* + * PCICSR: PCI control register. + * BIG_ENDIAN: 1: big endian, 0: little endian. + * RX_TRESHOLD: Rx threshold in dw to start pci access + * 0: 16dw (default), 1: 8dw, 2: 4dw, 3: 32dw. + * TX_TRESHOLD: Tx threshold in dw to start pci access + * 0: 0dw (default), 1: 1dw, 2: 4dw, 3: forward. + * BURST_LENTH: Pci burst length 0: 4dw (default, 1: 8dw, 2: 16dw, 3:32dw. + * ENABLE_CLK: Enable clk_run, pci clock can't going down to non-operational. + * READ_MULTIPLE: Enable memory read multiple. + * WRITE_INVALID: Enable memory write & invalid. + */ +#define PCICSR 0x008c +#define PCICSR_BIG_ENDIAN FIELD32(0x00000001) +#define PCICSR_RX_TRESHOLD FIELD32(0x00000006) +#define PCICSR_TX_TRESHOLD FIELD32(0x00000018) +#define PCICSR_BURST_LENTH FIELD32(0x00000060) +#define PCICSR_ENABLE_CLK FIELD32(0x00000080) +#define PCICSR_READ_MULTIPLE FIELD32(0x00000100) +#define PCICSR_WRITE_INVALID FIELD32(0x00000200) + +/* + * CNT0: FCS error count. + * FCS_ERROR: FCS error count, cleared when read. + */ +#define CNT0 0x00a0 +#define CNT0_FCS_ERROR FIELD32(0x0000ffff) + +/* + * Statistic Register. + * CNT1: PLCP error count. + * CNT2: Long error count. + */ +#define TIMECSR2 0x00a8 +#define CNT1 0x00ac +#define CNT2 0x00b0 +#define TIMECSR3 0x00b4 + +/* + * CNT3: CCA false alarm count. + */ +#define CNT3 0x00b8 +#define CNT3_FALSE_CCA FIELD32(0x0000ffff) + +/* + * Statistic Register. + * CNT4: Rx FIFO overflow count. + * CNT5: Tx FIFO underrun count. + */ +#define CNT4 0x00bc +#define CNT5 0x00c0 + +/* + * Baseband Control Register. + */ + +/* + * PWRCSR0: Power mode configuration register. + */ +#define PWRCSR0 0x00c4 + +/* + * Power state transition time registers. + */ +#define PSCSR0 0x00c8 +#define PSCSR1 0x00cc +#define PSCSR2 0x00d0 +#define PSCSR3 0x00d4 + +/* + * PWRCSR1: Manual power control / status register. + * Allowed state: 0 deep_sleep, 1: sleep, 2: standby, 3: awake. + * SET_STATE: Set state. Write 1 to trigger, self cleared. + * BBP_DESIRE_STATE: BBP desired state. + * RF_DESIRE_STATE: RF desired state. + * BBP_CURR_STATE: BBP current state. + * RF_CURR_STATE: RF current state. + * PUT_TO_SLEEP: Put to sleep. Write 1 to trigger, self cleared. + */ +#define PWRCSR1 0x00d8 +#define PWRCSR1_SET_STATE FIELD32(0x00000001) +#define PWRCSR1_BBP_DESIRE_STATE FIELD32(0x00000006) +#define PWRCSR1_RF_DESIRE_STATE FIELD32(0x00000018) +#define PWRCSR1_BBP_CURR_STATE FIELD32(0x00000060) +#define PWRCSR1_RF_CURR_STATE FIELD32(0x00000180) +#define PWRCSR1_PUT_TO_SLEEP FIELD32(0x00000200) + +/* + * TIMECSR: Timer control register. + * US_COUNT: 1 us timer count in units of clock cycles. + * US_64_COUNT: 64 us timer count in units of 1 us timer. + * BEACON_EXPECT: Beacon expect window. + */ +#define TIMECSR 0x00dc +#define TIMECSR_US_COUNT FIELD32(0x000000ff) +#define TIMECSR_US_64_COUNT FIELD32(0x0000ff00) +#define TIMECSR_BEACON_EXPECT FIELD32(0x00070000) + +/* + * MACCSR0: MAC configuration register 0. + */ +#define MACCSR0 0x00e0 + +/* + * MACCSR1: MAC configuration register 1. + * KICK_RX: Kick one-shot rx in one-shot rx mode. + * ONESHOT_RXMODE: Enable one-shot rx mode for debugging. + * BBPRX_RESET_MODE: Ralink bbp rx reset mode. + * AUTO_TXBBP: Auto tx logic access bbp control register. + * AUTO_RXBBP: Auto rx logic access bbp control register. + * LOOPBACK: Loopback mode. 0: normal, 1: internal, 2: external, 3:rsvd. + * INTERSIL_IF: Intersil if calibration pin. + */ +#define MACCSR1 0x00e4 +#define MACCSR1_KICK_RX FIELD32(0x00000001) +#define MACCSR1_ONESHOT_RXMODE FIELD32(0x00000002) +#define MACCSR1_BBPRX_RESET_MODE FIELD32(0x00000004) +#define MACCSR1_AUTO_TXBBP FIELD32(0x00000008) +#define MACCSR1_AUTO_RXBBP FIELD32(0x00000010) +#define MACCSR1_LOOPBACK FIELD32(0x00000060) +#define MACCSR1_INTERSIL_IF FIELD32(0x00000080) + +/* + * RALINKCSR: Ralink Rx auto-reset BBCR. + * AR_BBP_DATA#: Auto reset BBP register # data. + * AR_BBP_ID#: Auto reset BBP register # id. + */ +#define RALINKCSR 0x00e8 +#define RALINKCSR_AR_BBP_DATA0 FIELD32(0x000000ff) +#define RALINKCSR_AR_BBP_ID0 FIELD32(0x00007f00) +#define RALINKCSR_AR_BBP_VALID0 FIELD32(0x00008000) +#define RALINKCSR_AR_BBP_DATA1 FIELD32(0x00ff0000) +#define RALINKCSR_AR_BBP_ID1 FIELD32(0x7f000000) +#define RALINKCSR_AR_BBP_VALID1 FIELD32(0x80000000) + +/* + * BCNCSR: Beacon interval control register. + * CHANGE: Write one to change beacon interval. + * DELTATIME: The delta time value. + * NUM_BEACON: Number of beacon according to mode. + * MODE: Please refer to asic specs. + * PLUS: Plus or minus delta time value. + */ +#define BCNCSR 0x00ec +#define BCNCSR_CHANGE FIELD32(0x00000001) +#define BCNCSR_DELTATIME FIELD32(0x0000001e) +#define BCNCSR_NUM_BEACON FIELD32(0x00001fe0) +#define BCNCSR_MODE FIELD32(0x00006000) +#define BCNCSR_PLUS FIELD32(0x00008000) + +/* + * BBP / RF / IF Control Register. + */ + +/* + * BBPCSR: BBP serial control register. + * VALUE: Register value to program into BBP. + * REGNUM: Selected BBP register. + * BUSY: 1: asic is busy execute BBP programming. + * WRITE_CONTROL: 1: write BBP, 0: read BBP. + */ +#define BBPCSR 0x00f0 +#define BBPCSR_VALUE FIELD32(0x000000ff) +#define BBPCSR_REGNUM FIELD32(0x00007f00) +#define BBPCSR_BUSY FIELD32(0x00008000) +#define BBPCSR_WRITE_CONTROL FIELD32(0x00010000) + +/* + * RFCSR: RF serial control register. + * VALUE: Register value + id to program into rf/if. + * NUMBER_OF_BITS: Number of bits used in value (i:20, rfmd:22). + * IF_SELECT: Chip to program: 0: rf, 1: if. + * PLL_LD: Rf pll_ld status. + * BUSY: 1: asic is busy execute rf programming. + */ +#define RFCSR 0x00f4 +#define RFCSR_VALUE FIELD32(0x00ffffff) +#define RFCSR_NUMBER_OF_BITS FIELD32(0x1f000000) +#define RFCSR_IF_SELECT FIELD32(0x20000000) +#define RFCSR_PLL_LD FIELD32(0x40000000) +#define RFCSR_BUSY FIELD32(0x80000000) + +/* + * LEDCSR: LED control register. + * ON_PERIOD: On period, default 70ms. + * OFF_PERIOD: Off period, default 30ms. + * LINK: 0: linkoff, 1: linkup. + * ACTIVITY: 0: idle, 1: active. + * LINK_POLARITY: 0: active low, 1: active high. + * ACTIVITY_POLARITY: 0: active low, 1: active high. + * LED_DEFAULT: LED state for "enable" 0: ON, 1: OFF. + */ +#define LEDCSR 0x00f8 +#define LEDCSR_ON_PERIOD FIELD32(0x000000ff) +#define LEDCSR_OFF_PERIOD FIELD32(0x0000ff00) +#define LEDCSR_LINK FIELD32(0x00010000) +#define LEDCSR_ACTIVITY FIELD32(0x00020000) +#define LEDCSR_LINK_POLARITY FIELD32(0x00040000) +#define LEDCSR_ACTIVITY_POLARITY FIELD32(0x00080000) +#define LEDCSR_LED_DEFAULT FIELD32(0x00100000) + +/* + * SECCSR3: AES control register. + */ +#define SECCSR3 0x00fc + +/* + * ASIC pointer information. + * RXPTR: Current RX ring address. + * TXPTR: Current Tx ring address. + * PRIPTR: Current Priority ring address. + * ATIMPTR: Current ATIM ring address. + */ +#define RXPTR 0x0100 +#define TXPTR 0x0104 +#define PRIPTR 0x0108 +#define ATIMPTR 0x010c + +/* + * TXACKCSR0: TX ACK timeout. + */ +#define TXACKCSR0 0x0110 + +/* + * ACK timeout count registers. + * ACKCNT0: TX ACK timeout count. + * ACKCNT1: RX ACK timeout count. + */ +#define ACKCNT0 0x0114 +#define ACKCNT1 0x0118 + +/* + * GPIO and others. + */ + +/* + * GPIOCSR: GPIO control register. + */ +#define GPIOCSR 0x0120 +#define GPIOCSR_BIT0 FIELD32(0x00000001) +#define GPIOCSR_BIT1 FIELD32(0x00000002) +#define GPIOCSR_BIT2 FIELD32(0x00000004) +#define GPIOCSR_BIT3 FIELD32(0x00000008) +#define GPIOCSR_BIT4 FIELD32(0x00000010) +#define GPIOCSR_BIT5 FIELD32(0x00000020) +#define GPIOCSR_BIT6 FIELD32(0x00000040) +#define GPIOCSR_BIT7 FIELD32(0x00000080) +#define GPIOCSR_DIR0 FIELD32(0x00000100) +#define GPIOCSR_DIR1 FIELD32(0x00000200) +#define GPIOCSR_DIR2 FIELD32(0x00000400) +#define GPIOCSR_DIR3 FIELD32(0x00000800) +#define GPIOCSR_DIR4 FIELD32(0x00001000) +#define GPIOCSR_DIR5 FIELD32(0x00002000) +#define GPIOCSR_DIR6 FIELD32(0x00004000) +#define GPIOCSR_DIR7 FIELD32(0x00008000) + +/* + * FIFO pointer registers. + * FIFOCSR0: TX FIFO pointer. + * FIFOCSR1: RX FIFO pointer. + */ +#define FIFOCSR0 0x0128 +#define FIFOCSR1 0x012c + +/* + * BCNCSR1: Tx BEACON offset time control register. + * PRELOAD: Beacon timer offset in units of usec. + * BEACON_CWMIN: 2^CwMin. + */ +#define BCNCSR1 0x0130 +#define BCNCSR1_PRELOAD FIELD32(0x0000ffff) +#define BCNCSR1_BEACON_CWMIN FIELD32(0x000f0000) + +/* + * MACCSR2: TX_PE to RX_PE turn-around time control register + * DELAY: RX_PE low width, in units of pci clock cycle. + */ +#define MACCSR2 0x0134 +#define MACCSR2_DELAY FIELD32(0x000000ff) + +/* + * TESTCSR: TEST mode selection register. + */ +#define TESTCSR 0x0138 + +/* + * ARCSR2: 1 Mbps ACK/CTS PLCP. + */ +#define ARCSR2 0x013c +#define ARCSR2_SIGNAL FIELD32(0x000000ff) +#define ARCSR2_SERVICE FIELD32(0x0000ff00) +#define ARCSR2_LENGTH FIELD32(0xffff0000) + +/* + * ARCSR3: 2 Mbps ACK/CTS PLCP. + */ +#define ARCSR3 0x0140 +#define ARCSR3_SIGNAL FIELD32(0x000000ff) +#define ARCSR3_SERVICE FIELD32(0x0000ff00) +#define ARCSR3_LENGTH FIELD32(0xffff0000) + +/* + * ARCSR4: 5.5 Mbps ACK/CTS PLCP. + */ +#define ARCSR4 0x0144 +#define ARCSR4_SIGNAL FIELD32(0x000000ff) +#define ARCSR4_SERVICE FIELD32(0x0000ff00) +#define ARCSR4_LENGTH FIELD32(0xffff0000) + +/* + * ARCSR5: 11 Mbps ACK/CTS PLCP. + */ +#define ARCSR5 0x0148 +#define ARCSR5_SIGNAL FIELD32(0x000000ff) +#define ARCSR5_SERVICE FIELD32(0x0000ff00) +#define ARCSR5_LENGTH FIELD32(0xffff0000) + +/* + * ARTCSR0: CCK ACK/CTS payload consumed time for 1/2/5.5/11 mbps. + */ +#define ARTCSR0 0x014c +#define ARTCSR0_ACK_CTS_11MBS FIELD32(0x000000ff) +#define ARTCSR0_ACK_CTS_5_5MBS FIELD32(0x0000ff00) +#define ARTCSR0_ACK_CTS_2MBS FIELD32(0x00ff0000) +#define ARTCSR0_ACK_CTS_1MBS FIELD32(0xff000000) + + +/* + * ARTCSR1: OFDM ACK/CTS payload consumed time for 6/9/12/18 mbps. + */ +#define ARTCSR1 0x0150 +#define ARTCSR1_ACK_CTS_6MBS FIELD32(0x000000ff) +#define ARTCSR1_ACK_CTS_9MBS FIELD32(0x0000ff00) +#define ARTCSR1_ACK_CTS_12MBS FIELD32(0x00ff0000) +#define ARTCSR1_ACK_CTS_18MBS FIELD32(0xff000000) + +/* + * ARTCSR2: OFDM ACK/CTS payload consumed time for 24/36/48/54 mbps. + */ +#define ARTCSR2 0x0154 +#define ARTCSR2_ACK_CTS_24MBS FIELD32(0x000000ff) +#define ARTCSR2_ACK_CTS_36MBS FIELD32(0x0000ff00) +#define ARTCSR2_ACK_CTS_48MBS FIELD32(0x00ff0000) +#define ARTCSR2_ACK_CTS_54MBS FIELD32(0xff000000) + +/* + * SECCSR1: WEP control register. + * KICK_ENCRYPT: Kick encryption engine, self-clear. + * ONE_SHOT: 0: ring mode, 1: One shot only mode. + * DESC_ADDRESS: Descriptor physical address of frame. + */ +#define SECCSR1 0x0158 +#define SECCSR1_KICK_ENCRYPT FIELD32(0x00000001) +#define SECCSR1_ONE_SHOT FIELD32(0x00000002) +#define SECCSR1_DESC_ADDRESS FIELD32(0xfffffffc) + +/* + * BBPCSR1: BBP TX configuration. + */ +#define BBPCSR1 0x015c +#define BBPCSR1_CCK FIELD32(0x00000003) +#define BBPCSR1_CCK_FLIP FIELD32(0x00000004) +#define BBPCSR1_OFDM FIELD32(0x00030000) +#define BBPCSR1_OFDM_FLIP FIELD32(0x00040000) + +/* + * Dual band configuration registers. + * DBANDCSR0: Dual band configuration register 0. + * DBANDCSR1: Dual band configuration register 1. + */ +#define DBANDCSR0 0x0160 +#define DBANDCSR1 0x0164 + +/* + * BBPPCSR: BBP Pin control register. + */ +#define BBPPCSR 0x0168 + +/* + * MAC special debug mode selection registers. + * DBGSEL0: MAC special debug mode selection register 0. + * DBGSEL1: MAC special debug mode selection register 1. + */ +#define DBGSEL0 0x016c +#define DBGSEL1 0x0170 + +/* + * BISTCSR: BBP BIST register. + */ +#define BISTCSR 0x0174 + +/* + * Multicast filter registers. + * MCAST0: Multicast filter register 0. + * MCAST1: Multicast filter register 1. + */ +#define MCAST0 0x0178 +#define MCAST1 0x017c + +/* + * UART registers. + * UARTCSR0: UART1 TX register. + * UARTCSR1: UART1 RX register. + * UARTCSR3: UART1 frame control register. + * UARTCSR4: UART1 buffer control register. + * UART2CSR0: UART2 TX register. + * UART2CSR1: UART2 RX register. + * UART2CSR3: UART2 frame control register. + * UART2CSR4: UART2 buffer control register. + */ +#define UARTCSR0 0x0180 +#define UARTCSR1 0x0184 +#define UARTCSR3 0x0188 +#define UARTCSR4 0x018c +#define UART2CSR0 0x0190 +#define UART2CSR1 0x0194 +#define UART2CSR3 0x0198 +#define UART2CSR4 0x019c + +/* + * BBP registers. + * The wordsize of the BBP is 8 bits. + */ + +/* + * R2: TX antenna control + */ +#define BBP_R2_TX_ANTENNA FIELD8(0x03) +#define BBP_R2_TX_IQ_FLIP FIELD8(0x04) + +/* + * R14: RX antenna control + */ +#define BBP_R14_RX_ANTENNA FIELD8(0x03) +#define BBP_R14_RX_IQ_FLIP FIELD8(0x04) + +/* + * BBP_R70 + */ +#define BBP_R70_JAPAN_FILTER FIELD8(0x08) + +/* + * RF registers + */ + +/* + * RF 1 + */ +#define RF1_TUNER FIELD32(0x00020000) + +/* + * RF 3 + */ +#define RF3_TUNER FIELD32(0x00000100) +#define RF3_TXPOWER FIELD32(0x00003e00) + +/* + * EEPROM content. + * The wordsize of the EEPROM is 16 bits. + */ + +/* + * HW MAC address. + */ +#define EEPROM_MAC_ADDR_0 0x0002 +#define EEPROM_MAC_ADDR_BYTE0 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE1 FIELD16(0xff00) +#define EEPROM_MAC_ADDR1 0x0003 +#define EEPROM_MAC_ADDR_BYTE2 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE3 FIELD16(0xff00) +#define EEPROM_MAC_ADDR_2 0x0004 +#define EEPROM_MAC_ADDR_BYTE4 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE5 FIELD16(0xff00) + +/* + * EEPROM antenna. + * ANTENNA_NUM: Number of antenna's. + * TX_DEFAULT: Default antenna 0: diversity, 1: A, 2: B. + * RX_DEFAULT: Default antenna 0: diversity, 1: A, 2: B. + * LED_MODE: 0: default, 1: TX/RX activity,2: Single (ignore link), 3: rsvd. + * DYN_TXAGC: Dynamic TX AGC control. + * HARDWARE_RADIO: 1: Hardware controlled radio. Read GPIO0. + * RF_TYPE: Rf_type of this adapter. + */ +#define EEPROM_ANTENNA 0x10 +#define EEPROM_ANTENNA_NUM FIELD16(0x0003) +#define EEPROM_ANTENNA_TX_DEFAULT FIELD16(0x000c) +#define EEPROM_ANTENNA_RX_DEFAULT FIELD16(0x0030) +#define EEPROM_ANTENNA_LED_MODE FIELD16(0x01c0) +#define EEPROM_ANTENNA_DYN_TXAGC FIELD16(0x0200) +#define EEPROM_ANTENNA_HARDWARE_RADIO FIELD16(0x0400) +#define EEPROM_ANTENNA_RF_TYPE FIELD16(0xf800) + +/* + * EEPROM NIC config. + * CARDBUS_ACCEL: 0: enable, 1: disable. + * DYN_BBP_TUNE: 0: enable, 1: disable. + * CCK_TX_POWER: CCK TX power compensation. + */ +#define EEPROM_NIC 0x11 +#define EEPROM_NIC_CARDBUS_ACCEL FIELD16(0x0001) +#define EEPROM_NIC_DYN_BBP_TUNE FIELD16(0x0002) +#define EEPROM_NIC_CCK_TX_POWER FIELD16(0x000c) + +/* + * EEPROM geography. + * GEO: Default geography setting for device. + */ +#define EEPROM_GEOGRAPHY 0x12 +#define EEPROM_GEOGRAPHY_GEO FIELD16(0x0f00) + +/* + * EEPROM BBP. + */ +#define EEPROM_BBP_START 0x13 +#define EEPROM_BBP_SIZE 16 +#define EEPROM_BBP_VALUE FIELD16(0x00ff) +#define EEPROM_BBP_REG_ID FIELD16(0xff00) + +/* + * EEPROM TXPOWER + */ +#define EEPROM_TXPOWER_START 0x23 +#define EEPROM_TXPOWER_SIZE 7 +#define EEPROM_TXPOWER_1 FIELD16(0x00ff) +#define EEPROM_TXPOWER_2 FIELD16(0xff00) + +/* + * RSSI <-> dBm offset calibration + */ +#define EEPROM_CALIBRATE_OFFSET 0x3e +#define EEPROM_CALIBRATE_OFFSET_RSSI FIELD16(0x00ff) + +/* + * DMA descriptor defines. + */ +#define TXD_DESC_SIZE ( 11 * sizeof(__le32) ) +#define RXD_DESC_SIZE ( 11 * sizeof(__le32) ) + +/* + * TX descriptor format for TX, PRIO, ATIM and Beacon Ring. + */ + +/* + * Word0 + */ +#define TXD_W0_OWNER_NIC FIELD32(0x00000001) +#define TXD_W0_VALID FIELD32(0x00000002) +#define TXD_W0_RESULT FIELD32(0x0000001c) +#define TXD_W0_RETRY_COUNT FIELD32(0x000000e0) +#define TXD_W0_MORE_FRAG FIELD32(0x00000100) +#define TXD_W0_ACK FIELD32(0x00000200) +#define TXD_W0_TIMESTAMP FIELD32(0x00000400) +#define TXD_W0_OFDM FIELD32(0x00000800) +#define TXD_W0_CIPHER_OWNER FIELD32(0x00001000) +#define TXD_W0_IFS FIELD32(0x00006000) +#define TXD_W0_RETRY_MODE FIELD32(0x00008000) +#define TXD_W0_DATABYTE_COUNT FIELD32(0x0fff0000) +#define TXD_W0_CIPHER_ALG FIELD32(0xe0000000) + +/* + * Word1 + */ +#define TXD_W1_BUFFER_ADDRESS FIELD32(0xffffffff) + +/* + * Word2 + */ +#define TXD_W2_IV_OFFSET FIELD32(0x0000003f) +#define TXD_W2_AIFS FIELD32(0x000000c0) +#define TXD_W2_CWMIN FIELD32(0x00000f00) +#define TXD_W2_CWMAX FIELD32(0x0000f000) + +/* + * Word3: PLCP information + */ +#define TXD_W3_PLCP_SIGNAL FIELD32(0x000000ff) +#define TXD_W3_PLCP_SERVICE FIELD32(0x0000ff00) +#define TXD_W3_PLCP_LENGTH_LOW FIELD32(0x00ff0000) +#define TXD_W3_PLCP_LENGTH_HIGH FIELD32(0xff000000) + +/* + * Word4 + */ +#define TXD_W4_IV FIELD32(0xffffffff) + +/* + * Word5 + */ +#define TXD_W5_EIV FIELD32(0xffffffff) + +/* + * Word6-9: Key + */ +#define TXD_W6_KEY FIELD32(0xffffffff) +#define TXD_W7_KEY FIELD32(0xffffffff) +#define TXD_W8_KEY FIELD32(0xffffffff) +#define TXD_W9_KEY FIELD32(0xffffffff) + +/* + * Word10 + */ +#define TXD_W10_RTS FIELD32(0x00000001) +#define TXD_W10_TX_RATE FIELD32(0x000000fe) + +/* + * RX descriptor format for RX Ring. + */ + +/* + * Word0 + */ +#define RXD_W0_OWNER_NIC FIELD32(0x00000001) +#define RXD_W0_UNICAST_TO_ME FIELD32(0x00000002) +#define RXD_W0_MULTICAST FIELD32(0x00000004) +#define RXD_W0_BROADCAST FIELD32(0x00000008) +#define RXD_W0_MY_BSS FIELD32(0x00000010) +#define RXD_W0_CRC_ERROR FIELD32(0x00000020) +#define RXD_W0_OFDM FIELD32(0x00000040) +#define RXD_W0_PHYSICAL_ERROR FIELD32(0x00000080) +#define RXD_W0_CIPHER_OWNER FIELD32(0x00000100) +#define RXD_W0_ICV_ERROR FIELD32(0x00000200) +#define RXD_W0_IV_OFFSET FIELD32(0x0000fc00) +#define RXD_W0_DATABYTE_COUNT FIELD32(0x0fff0000) +#define RXD_W0_CIPHER_ALG FIELD32(0xe0000000) + +/* + * Word1 + */ +#define RXD_W1_BUFFER_ADDRESS FIELD32(0xffffffff) + +/* + * Word2 + */ +#define RXD_W2_SIGNAL FIELD32(0x000000ff) +#define RXD_W2_RSSI FIELD32(0x0000ff00) +#define RXD_W2_TA FIELD32(0xffff0000) + +/* + * Word3 + */ +#define RXD_W3_TA FIELD32(0xffffffff) + +/* + * Word4 + */ +#define RXD_W4_IV FIELD32(0xffffffff) + +/* + * Word5 + */ +#define RXD_W5_EIV FIELD32(0xffffffff) + +/* + * Word6-9: Key + */ +#define RXD_W6_KEY FIELD32(0xffffffff) +#define RXD_W7_KEY FIELD32(0xffffffff) +#define RXD_W8_KEY FIELD32(0xffffffff) +#define RXD_W9_KEY FIELD32(0xffffffff) + +/* + * Word10 + */ +#define RXD_W10_DROP FIELD32(0x00000001) + +/* + * Macros for converting txpower from EEPROM to mac80211 value + * and from mac80211 value to register value. + */ +#define MIN_TXPOWER 0 +#define MAX_TXPOWER 31 +#define DEFAULT_TXPOWER 24 + +#define TXPOWER_FROM_DEV(__txpower) \ + (((u8)(__txpower)) > MAX_TXPOWER) ? DEFAULT_TXPOWER : (__txpower) + +#define TXPOWER_TO_DEV(__txpower) \ + clamp_t(char, __txpower, MIN_TXPOWER, MAX_TXPOWER) + +#endif /* RT2500PCI_H */ diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c new file mode 100644 index 0000000..9b04964 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2500usb.c @@ -0,0 +1,1924 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2500usb + Abstract: rt2500usb device specific routines. + Supported chipsets: RT2570. + */ + +#include +#include +#include +#include +#include +#include + +#include "rt2x00.h" +#include "rt2x00usb.h" +#include "rt2500usb.h" + +/* + * Allow hardware encryption to be disabled. + */ +static int modparam_nohwcrypt = 0; +module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); +MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); + +/* + * Register access. + * All access to the CSR registers will go through the methods + * rt2500usb_register_read and rt2500usb_register_write. + * BBP and RF register require indirect register access, + * and use the CSR registers BBPCSR and RFCSR to achieve this. + * These indirect registers work with busy bits, + * and we will try maximal REGISTER_BUSY_COUNT times to access + * the register while taking a REGISTER_BUSY_DELAY us delay + * between each attampt. When the busy bit is still set at that time, + * the access attempt is considered to have failed, + * and we will print an error. + * If the csr_mutex is already held then the _lock variants must + * be used instead. + */ +static inline void rt2500usb_register_read(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + u16 *value) +{ + __le16 reg; + rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ, + USB_VENDOR_REQUEST_IN, offset, + ®, sizeof(reg), REGISTER_TIMEOUT); + *value = le16_to_cpu(reg); +} + +static inline void rt2500usb_register_read_lock(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + u16 *value) +{ + __le16 reg; + rt2x00usb_vendor_req_buff_lock(rt2x00dev, USB_MULTI_READ, + USB_VENDOR_REQUEST_IN, offset, + ®, sizeof(reg), REGISTER_TIMEOUT); + *value = le16_to_cpu(reg); +} + +static inline void rt2500usb_register_multiread(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + void *value, const u16 length) +{ + rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ, + USB_VENDOR_REQUEST_IN, offset, + value, length, + REGISTER_TIMEOUT16(length)); +} + +static inline void rt2500usb_register_write(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + u16 value) +{ + __le16 reg = cpu_to_le16(value); + rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE, + USB_VENDOR_REQUEST_OUT, offset, + ®, sizeof(reg), REGISTER_TIMEOUT); +} + +static inline void rt2500usb_register_write_lock(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + u16 value) +{ + __le16 reg = cpu_to_le16(value); + rt2x00usb_vendor_req_buff_lock(rt2x00dev, USB_MULTI_WRITE, + USB_VENDOR_REQUEST_OUT, offset, + ®, sizeof(reg), REGISTER_TIMEOUT); +} + +static inline void rt2500usb_register_multiwrite(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + void *value, const u16 length) +{ + rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE, + USB_VENDOR_REQUEST_OUT, offset, + value, length, + REGISTER_TIMEOUT16(length)); +} + +static int rt2500usb_regbusy_read(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + struct rt2x00_field16 field, + u16 *reg) +{ + unsigned int i; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2500usb_register_read_lock(rt2x00dev, offset, reg); + if (!rt2x00_get_field16(*reg, field)) + return 1; + udelay(REGISTER_BUSY_DELAY); + } + + ERROR(rt2x00dev, "Indirect register access failed: " + "offset=0x%.08x, value=0x%.08x\n", offset, *reg); + *reg = ~0; + + return 0; +} + +#define WAIT_FOR_BBP(__dev, __reg) \ + rt2500usb_regbusy_read((__dev), PHY_CSR8, PHY_CSR8_BUSY, (__reg)) +#define WAIT_FOR_RF(__dev, __reg) \ + rt2500usb_regbusy_read((__dev), PHY_CSR10, PHY_CSR10_RF_BUSY, (__reg)) + +static void rt2500usb_bbp_write(struct rt2x00_dev *rt2x00dev, + const unsigned int word, const u8 value) +{ + u16 reg; + + mutex_lock(&rt2x00dev->csr_mutex); + + /* + * Wait until the BBP becomes available, afterwards we + * can safely write the new data into the register. + */ + if (WAIT_FOR_BBP(rt2x00dev, ®)) { + reg = 0; + rt2x00_set_field16(®, PHY_CSR7_DATA, value); + rt2x00_set_field16(®, PHY_CSR7_REG_ID, word); + rt2x00_set_field16(®, PHY_CSR7_READ_CONTROL, 0); + + rt2500usb_register_write_lock(rt2x00dev, PHY_CSR7, reg); + } + + mutex_unlock(&rt2x00dev->csr_mutex); +} + +static void rt2500usb_bbp_read(struct rt2x00_dev *rt2x00dev, + const unsigned int word, u8 *value) +{ + u16 reg; + + mutex_lock(&rt2x00dev->csr_mutex); + + /* + * Wait until the BBP becomes available, afterwards we + * can safely write the read request into the register. + * After the data has been written, we wait until hardware + * returns the correct value, if at any time the register + * doesn't become available in time, reg will be 0xffffffff + * which means we return 0xff to the caller. + */ + if (WAIT_FOR_BBP(rt2x00dev, ®)) { + reg = 0; + rt2x00_set_field16(®, PHY_CSR7_REG_ID, word); + rt2x00_set_field16(®, PHY_CSR7_READ_CONTROL, 1); + + rt2500usb_register_write_lock(rt2x00dev, PHY_CSR7, reg); + + if (WAIT_FOR_BBP(rt2x00dev, ®)) + rt2500usb_register_read_lock(rt2x00dev, PHY_CSR7, ®); + } + + *value = rt2x00_get_field16(reg, PHY_CSR7_DATA); + + mutex_unlock(&rt2x00dev->csr_mutex); +} + +static void rt2500usb_rf_write(struct rt2x00_dev *rt2x00dev, + const unsigned int word, const u32 value) +{ + u16 reg; + + mutex_lock(&rt2x00dev->csr_mutex); + + /* + * Wait until the RF becomes available, afterwards we + * can safely write the new data into the register. + */ + if (WAIT_FOR_RF(rt2x00dev, ®)) { + reg = 0; + rt2x00_set_field16(®, PHY_CSR9_RF_VALUE, value); + rt2500usb_register_write_lock(rt2x00dev, PHY_CSR9, reg); + + reg = 0; + rt2x00_set_field16(®, PHY_CSR10_RF_VALUE, value >> 16); + rt2x00_set_field16(®, PHY_CSR10_RF_NUMBER_OF_BITS, 20); + rt2x00_set_field16(®, PHY_CSR10_RF_IF_SELECT, 0); + rt2x00_set_field16(®, PHY_CSR10_RF_BUSY, 1); + + rt2500usb_register_write_lock(rt2x00dev, PHY_CSR10, reg); + rt2x00_rf_write(rt2x00dev, word, value); + } + + mutex_unlock(&rt2x00dev->csr_mutex); +} + +#ifdef CONFIG_RT2X00_LIB_DEBUGFS +static void _rt2500usb_register_read(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + u32 *value) +{ + rt2500usb_register_read(rt2x00dev, offset, (u16 *)value); +} + +static void _rt2500usb_register_write(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + u32 value) +{ + rt2500usb_register_write(rt2x00dev, offset, value); +} + +static const struct rt2x00debug rt2500usb_rt2x00debug = { + .owner = THIS_MODULE, + .csr = { + .read = _rt2500usb_register_read, + .write = _rt2500usb_register_write, + .flags = RT2X00DEBUGFS_OFFSET, + .word_base = CSR_REG_BASE, + .word_size = sizeof(u16), + .word_count = CSR_REG_SIZE / sizeof(u16), + }, + .eeprom = { + .read = rt2x00_eeprom_read, + .write = rt2x00_eeprom_write, + .word_base = EEPROM_BASE, + .word_size = sizeof(u16), + .word_count = EEPROM_SIZE / sizeof(u16), + }, + .bbp = { + .read = rt2500usb_bbp_read, + .write = rt2500usb_bbp_write, + .word_base = BBP_BASE, + .word_size = sizeof(u8), + .word_count = BBP_SIZE / sizeof(u8), + }, + .rf = { + .read = rt2x00_rf_read, + .write = rt2500usb_rf_write, + .word_base = RF_BASE, + .word_size = sizeof(u32), + .word_count = RF_SIZE / sizeof(u32), + }, +}; +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ + +static int rt2500usb_rfkill_poll(struct rt2x00_dev *rt2x00dev) +{ + u16 reg; + + rt2500usb_register_read(rt2x00dev, MAC_CSR19, ®); + return rt2x00_get_field32(reg, MAC_CSR19_BIT7); +} + +#ifdef CONFIG_RT2X00_LIB_LEDS +static void rt2500usb_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct rt2x00_led *led = + container_of(led_cdev, struct rt2x00_led, led_dev); + unsigned int enabled = brightness != LED_OFF; + u16 reg; + + rt2500usb_register_read(led->rt2x00dev, MAC_CSR20, ®); + + if (led->type == LED_TYPE_RADIO || led->type == LED_TYPE_ASSOC) + rt2x00_set_field16(®, MAC_CSR20_LINK, enabled); + else if (led->type == LED_TYPE_ACTIVITY) + rt2x00_set_field16(®, MAC_CSR20_ACTIVITY, enabled); + + rt2500usb_register_write(led->rt2x00dev, MAC_CSR20, reg); +} + +static int rt2500usb_blink_set(struct led_classdev *led_cdev, + unsigned long *delay_on, + unsigned long *delay_off) +{ + struct rt2x00_led *led = + container_of(led_cdev, struct rt2x00_led, led_dev); + u16 reg; + + rt2500usb_register_read(led->rt2x00dev, MAC_CSR21, ®); + rt2x00_set_field16(®, MAC_CSR21_ON_PERIOD, *delay_on); + rt2x00_set_field16(®, MAC_CSR21_OFF_PERIOD, *delay_off); + rt2500usb_register_write(led->rt2x00dev, MAC_CSR21, reg); + + return 0; +} + +static void rt2500usb_init_led(struct rt2x00_dev *rt2x00dev, + struct rt2x00_led *led, + enum led_type type) +{ + led->rt2x00dev = rt2x00dev; + led->type = type; + led->led_dev.brightness_set = rt2500usb_brightness_set; + led->led_dev.blink_set = rt2500usb_blink_set; + led->flags = LED_INITIALIZED; +} +#endif /* CONFIG_RT2X00_LIB_LEDS */ + +/* + * Configuration handlers. + */ + +/* + * rt2500usb does not differentiate between shared and pairwise + * keys, so we should use the same function for both key types. + */ +static int rt2500usb_config_key(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_crypto *crypto, + struct ieee80211_key_conf *key) +{ + int timeout; + u32 mask; + u16 reg; + + if (crypto->cmd == SET_KEY) { + /* + * Pairwise key will always be entry 0, but this + * could collide with a shared key on the same + * position... + */ + mask = TXRX_CSR0_KEY_ID.bit_mask; + + rt2500usb_register_read(rt2x00dev, TXRX_CSR0, ®); + reg &= mask; + + if (reg && reg == mask) + return -ENOSPC; + + reg = rt2x00_get_field16(reg, TXRX_CSR0_KEY_ID); + + key->hw_key_idx += reg ? ffz(reg) : 0; + + /* + * The encryption key doesn't fit within the CSR cache, + * this means we should allocate it separately and use + * rt2x00usb_vendor_request() to send the key to the hardware. + */ + reg = KEY_ENTRY(key->hw_key_idx); + timeout = REGISTER_TIMEOUT32(sizeof(crypto->key)); + rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE, + USB_VENDOR_REQUEST_OUT, reg, + crypto->key, + sizeof(crypto->key), + timeout); + + /* + * The driver does not support the IV/EIV generation + * in hardware. However it demands the data to be provided + * both separately as well as inside the frame. + * We already provided the CONFIG_CRYPTO_COPY_IV to rt2x00lib + * to ensure rt2x00lib will not strip the data from the + * frame after the copy, now we must tell mac80211 + * to generate the IV/EIV data. + */ + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; + } + + /* + * TXRX_CSR0_KEY_ID contains only single-bit fields to indicate + * a particular key is valid. + */ + rt2500usb_register_read(rt2x00dev, TXRX_CSR0, ®); + rt2x00_set_field16(®, TXRX_CSR0_ALGORITHM, crypto->cipher); + rt2x00_set_field16(®, TXRX_CSR0_IV_OFFSET, IEEE80211_HEADER); + + mask = rt2x00_get_field16(reg, TXRX_CSR0_KEY_ID); + if (crypto->cmd == SET_KEY) + mask |= 1 << key->hw_key_idx; + else if (crypto->cmd == DISABLE_KEY) + mask &= ~(1 << key->hw_key_idx); + rt2x00_set_field16(®, TXRX_CSR0_KEY_ID, mask); + rt2500usb_register_write(rt2x00dev, TXRX_CSR0, reg); + + return 0; +} + +static void rt2500usb_config_filter(struct rt2x00_dev *rt2x00dev, + const unsigned int filter_flags) +{ + u16 reg; + + /* + * Start configuration steps. + * Note that the version error will always be dropped + * and broadcast frames will always be accepted since + * there is no filter for it at this time. + */ + rt2500usb_register_read(rt2x00dev, TXRX_CSR2, ®); + rt2x00_set_field16(®, TXRX_CSR2_DROP_CRC, + !(filter_flags & FIF_FCSFAIL)); + rt2x00_set_field16(®, TXRX_CSR2_DROP_PHYSICAL, + !(filter_flags & FIF_PLCPFAIL)); + rt2x00_set_field16(®, TXRX_CSR2_DROP_CONTROL, + !(filter_flags & FIF_CONTROL)); + rt2x00_set_field16(®, TXRX_CSR2_DROP_NOT_TO_ME, + !(filter_flags & FIF_PROMISC_IN_BSS)); + rt2x00_set_field16(®, TXRX_CSR2_DROP_TODS, + !(filter_flags & FIF_PROMISC_IN_BSS) && + !rt2x00dev->intf_ap_count); + rt2x00_set_field16(®, TXRX_CSR2_DROP_VERSION_ERROR, 1); + rt2x00_set_field16(®, TXRX_CSR2_DROP_MULTICAST, + !(filter_flags & FIF_ALLMULTI)); + rt2x00_set_field16(®, TXRX_CSR2_DROP_BROADCAST, 0); + rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg); +} + +static void rt2500usb_config_intf(struct rt2x00_dev *rt2x00dev, + struct rt2x00_intf *intf, + struct rt2x00intf_conf *conf, + const unsigned int flags) +{ + unsigned int bcn_preload; + u16 reg; + + if (flags & CONFIG_UPDATE_TYPE) { + /* + * Enable beacon config + */ + bcn_preload = PREAMBLE + GET_DURATION(IEEE80211_HEADER, 20); + rt2500usb_register_read(rt2x00dev, TXRX_CSR20, ®); + rt2x00_set_field16(®, TXRX_CSR20_OFFSET, bcn_preload >> 6); + rt2x00_set_field16(®, TXRX_CSR20_BCN_EXPECT_WINDOW, + 2 * (conf->type != NL80211_IFTYPE_STATION)); + rt2500usb_register_write(rt2x00dev, TXRX_CSR20, reg); + + /* + * Enable synchronisation. + */ + rt2500usb_register_read(rt2x00dev, TXRX_CSR18, ®); + rt2x00_set_field16(®, TXRX_CSR18_OFFSET, 0); + rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg); + + rt2500usb_register_read(rt2x00dev, TXRX_CSR19, ®); + rt2x00_set_field16(®, TXRX_CSR19_TSF_COUNT, 1); + rt2x00_set_field16(®, TXRX_CSR19_TSF_SYNC, conf->sync); + rt2x00_set_field16(®, TXRX_CSR19_TBCN, 1); + rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); + } + + if (flags & CONFIG_UPDATE_MAC) + rt2500usb_register_multiwrite(rt2x00dev, MAC_CSR2, conf->mac, + (3 * sizeof(__le16))); + + if (flags & CONFIG_UPDATE_BSSID) + rt2500usb_register_multiwrite(rt2x00dev, MAC_CSR5, conf->bssid, + (3 * sizeof(__le16))); +} + +static void rt2500usb_config_erp(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_erp *erp) +{ + u16 reg; + + rt2500usb_register_read(rt2x00dev, TXRX_CSR10, ®); + rt2x00_set_field16(®, TXRX_CSR10_AUTORESPOND_PREAMBLE, + !!erp->short_preamble); + rt2500usb_register_write(rt2x00dev, TXRX_CSR10, reg); + + rt2500usb_register_write(rt2x00dev, TXRX_CSR11, erp->basic_rates); + + rt2500usb_register_read(rt2x00dev, TXRX_CSR18, ®); + rt2x00_set_field16(®, TXRX_CSR18_INTERVAL, erp->beacon_int * 4); + rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg); + + rt2500usb_register_write(rt2x00dev, MAC_CSR10, erp->slot_time); + rt2500usb_register_write(rt2x00dev, MAC_CSR11, erp->sifs); + rt2500usb_register_write(rt2x00dev, MAC_CSR12, erp->eifs); +} + +static void rt2500usb_config_ant(struct rt2x00_dev *rt2x00dev, + struct antenna_setup *ant) +{ + u8 r2; + u8 r14; + u16 csr5; + u16 csr6; + + /* + * We should never come here because rt2x00lib is supposed + * to catch this and send us the correct antenna explicitely. + */ + BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY || + ant->tx == ANTENNA_SW_DIVERSITY); + + rt2500usb_bbp_read(rt2x00dev, 2, &r2); + rt2500usb_bbp_read(rt2x00dev, 14, &r14); + rt2500usb_register_read(rt2x00dev, PHY_CSR5, &csr5); + rt2500usb_register_read(rt2x00dev, PHY_CSR6, &csr6); + + /* + * Configure the TX antenna. + */ + switch (ant->tx) { + case ANTENNA_HW_DIVERSITY: + rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 1); + rt2x00_set_field16(&csr5, PHY_CSR5_CCK, 1); + rt2x00_set_field16(&csr6, PHY_CSR6_OFDM, 1); + break; + case ANTENNA_A: + rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 0); + rt2x00_set_field16(&csr5, PHY_CSR5_CCK, 0); + rt2x00_set_field16(&csr6, PHY_CSR6_OFDM, 0); + break; + case ANTENNA_B: + default: + rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 2); + rt2x00_set_field16(&csr5, PHY_CSR5_CCK, 2); + rt2x00_set_field16(&csr6, PHY_CSR6_OFDM, 2); + break; + } + + /* + * Configure the RX antenna. + */ + switch (ant->rx) { + case ANTENNA_HW_DIVERSITY: + rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 1); + break; + case ANTENNA_A: + rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 0); + break; + case ANTENNA_B: + default: + rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 2); + break; + } + + /* + * RT2525E and RT5222 need to flip TX I/Q + */ + if (rt2x00_rf(rt2x00dev, RF2525E) || rt2x00_rf(rt2x00dev, RF5222)) { + rt2x00_set_field8(&r2, BBP_R2_TX_IQ_FLIP, 1); + rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 1); + rt2x00_set_field16(&csr6, PHY_CSR6_OFDM_FLIP, 1); + + /* + * RT2525E does not need RX I/Q Flip. + */ + if (rt2x00_rf(rt2x00dev, RF2525E)) + rt2x00_set_field8(&r14, BBP_R14_RX_IQ_FLIP, 0); + } else { + rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 0); + rt2x00_set_field16(&csr6, PHY_CSR6_OFDM_FLIP, 0); + } + + rt2500usb_bbp_write(rt2x00dev, 2, r2); + rt2500usb_bbp_write(rt2x00dev, 14, r14); + rt2500usb_register_write(rt2x00dev, PHY_CSR5, csr5); + rt2500usb_register_write(rt2x00dev, PHY_CSR6, csr6); +} + +static void rt2500usb_config_channel(struct rt2x00_dev *rt2x00dev, + struct rf_channel *rf, const int txpower) +{ + /* + * Set TXpower. + */ + rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); + + /* + * For RT2525E we should first set the channel to half band higher. + */ + if (rt2x00_rf(rt2x00dev, RF2525E)) { + static const u32 vals[] = { + 0x000008aa, 0x000008ae, 0x000008ae, 0x000008b2, + 0x000008b2, 0x000008b6, 0x000008b6, 0x000008ba, + 0x000008ba, 0x000008be, 0x000008b7, 0x00000902, + 0x00000902, 0x00000906 + }; + + rt2500usb_rf_write(rt2x00dev, 2, vals[rf->channel - 1]); + if (rf->rf4) + rt2500usb_rf_write(rt2x00dev, 4, rf->rf4); + } + + rt2500usb_rf_write(rt2x00dev, 1, rf->rf1); + rt2500usb_rf_write(rt2x00dev, 2, rf->rf2); + rt2500usb_rf_write(rt2x00dev, 3, rf->rf3); + if (rf->rf4) + rt2500usb_rf_write(rt2x00dev, 4, rf->rf4); +} + +static void rt2500usb_config_txpower(struct rt2x00_dev *rt2x00dev, + const int txpower) +{ + u32 rf3; + + rt2x00_rf_read(rt2x00dev, 3, &rf3); + rt2x00_set_field32(&rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); + rt2500usb_rf_write(rt2x00dev, 3, rf3); +} + +static void rt2500usb_config_ps(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_conf *libconf) +{ + enum dev_state state = + (libconf->conf->flags & IEEE80211_CONF_PS) ? + STATE_SLEEP : STATE_AWAKE; + u16 reg; + + if (state == STATE_SLEEP) { + rt2500usb_register_read(rt2x00dev, MAC_CSR18, ®); + rt2x00_set_field16(®, MAC_CSR18_DELAY_AFTER_BEACON, + rt2x00dev->beacon_int - 20); + rt2x00_set_field16(®, MAC_CSR18_BEACONS_BEFORE_WAKEUP, + libconf->conf->listen_interval - 1); + + /* We must first disable autowake before it can be enabled */ + rt2x00_set_field16(®, MAC_CSR18_AUTO_WAKE, 0); + rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg); + + rt2x00_set_field16(®, MAC_CSR18_AUTO_WAKE, 1); + rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg); + } + + rt2x00dev->ops->lib->set_device_state(rt2x00dev, state); +} + +static void rt2500usb_config(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_conf *libconf, + const unsigned int flags) +{ + if (flags & IEEE80211_CONF_CHANGE_CHANNEL) + rt2500usb_config_channel(rt2x00dev, &libconf->rf, + libconf->conf->power_level); + if ((flags & IEEE80211_CONF_CHANGE_POWER) && + !(flags & IEEE80211_CONF_CHANGE_CHANNEL)) + rt2500usb_config_txpower(rt2x00dev, + libconf->conf->power_level); + if (flags & IEEE80211_CONF_CHANGE_PS) + rt2500usb_config_ps(rt2x00dev, libconf); +} + +/* + * Link tuning + */ +static void rt2500usb_link_stats(struct rt2x00_dev *rt2x00dev, + struct link_qual *qual) +{ + u16 reg; + + /* + * Update FCS error count from register. + */ + rt2500usb_register_read(rt2x00dev, STA_CSR0, ®); + qual->rx_failed = rt2x00_get_field16(reg, STA_CSR0_FCS_ERROR); + + /* + * Update False CCA count from register. + */ + rt2500usb_register_read(rt2x00dev, STA_CSR3, ®); + qual->false_cca = rt2x00_get_field16(reg, STA_CSR3_FALSE_CCA_ERROR); +} + +static void rt2500usb_reset_tuner(struct rt2x00_dev *rt2x00dev, + struct link_qual *qual) +{ + u16 eeprom; + u16 value; + + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R24, &eeprom); + value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_R24_LOW); + rt2500usb_bbp_write(rt2x00dev, 24, value); + + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R25, &eeprom); + value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_R25_LOW); + rt2500usb_bbp_write(rt2x00dev, 25, value); + + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R61, &eeprom); + value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_R61_LOW); + rt2500usb_bbp_write(rt2x00dev, 61, value); + + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_VGC, &eeprom); + value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_VGCUPPER); + rt2500usb_bbp_write(rt2x00dev, 17, value); + + qual->vgc_level = value; +} + +/* + * Initialization functions. + */ +static int rt2500usb_init_registers(struct rt2x00_dev *rt2x00dev) +{ + u16 reg; + + rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0x0001, + USB_MODE_TEST, REGISTER_TIMEOUT); + rt2x00usb_vendor_request_sw(rt2x00dev, USB_SINGLE_WRITE, 0x0308, + 0x00f0, REGISTER_TIMEOUT); + + rt2500usb_register_read(rt2x00dev, TXRX_CSR2, ®); + rt2x00_set_field16(®, TXRX_CSR2_DISABLE_RX, 1); + rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg); + + rt2500usb_register_write(rt2x00dev, MAC_CSR13, 0x1111); + rt2500usb_register_write(rt2x00dev, MAC_CSR14, 0x1e11); + + rt2500usb_register_read(rt2x00dev, MAC_CSR1, ®); + rt2x00_set_field16(®, MAC_CSR1_SOFT_RESET, 1); + rt2x00_set_field16(®, MAC_CSR1_BBP_RESET, 1); + rt2x00_set_field16(®, MAC_CSR1_HOST_READY, 0); + rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg); + + rt2500usb_register_read(rt2x00dev, MAC_CSR1, ®); + rt2x00_set_field16(®, MAC_CSR1_SOFT_RESET, 0); + rt2x00_set_field16(®, MAC_CSR1_BBP_RESET, 0); + rt2x00_set_field16(®, MAC_CSR1_HOST_READY, 0); + rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg); + + rt2500usb_register_read(rt2x00dev, TXRX_CSR5, ®); + rt2x00_set_field16(®, TXRX_CSR5_BBP_ID0, 13); + rt2x00_set_field16(®, TXRX_CSR5_BBP_ID0_VALID, 1); + rt2x00_set_field16(®, TXRX_CSR5_BBP_ID1, 12); + rt2x00_set_field16(®, TXRX_CSR5_BBP_ID1_VALID, 1); + rt2500usb_register_write(rt2x00dev, TXRX_CSR5, reg); + + rt2500usb_register_read(rt2x00dev, TXRX_CSR6, ®); + rt2x00_set_field16(®, TXRX_CSR6_BBP_ID0, 10); + rt2x00_set_field16(®, TXRX_CSR6_BBP_ID0_VALID, 1); + rt2x00_set_field16(®, TXRX_CSR6_BBP_ID1, 11); + rt2x00_set_field16(®, TXRX_CSR6_BBP_ID1_VALID, 1); + rt2500usb_register_write(rt2x00dev, TXRX_CSR6, reg); + + rt2500usb_register_read(rt2x00dev, TXRX_CSR7, ®); + rt2x00_set_field16(®, TXRX_CSR7_BBP_ID0, 7); + rt2x00_set_field16(®, TXRX_CSR7_BBP_ID0_VALID, 1); + rt2x00_set_field16(®, TXRX_CSR7_BBP_ID1, 6); + rt2x00_set_field16(®, TXRX_CSR7_BBP_ID1_VALID, 1); + rt2500usb_register_write(rt2x00dev, TXRX_CSR7, reg); + + rt2500usb_register_read(rt2x00dev, TXRX_CSR8, ®); + rt2x00_set_field16(®, TXRX_CSR8_BBP_ID0, 5); + rt2x00_set_field16(®, TXRX_CSR8_BBP_ID0_VALID, 1); + rt2x00_set_field16(®, TXRX_CSR8_BBP_ID1, 0); + rt2x00_set_field16(®, TXRX_CSR8_BBP_ID1_VALID, 0); + rt2500usb_register_write(rt2x00dev, TXRX_CSR8, reg); + + rt2500usb_register_read(rt2x00dev, TXRX_CSR19, ®); + rt2x00_set_field16(®, TXRX_CSR19_TSF_COUNT, 0); + rt2x00_set_field16(®, TXRX_CSR19_TSF_SYNC, 0); + rt2x00_set_field16(®, TXRX_CSR19_TBCN, 0); + rt2x00_set_field16(®, TXRX_CSR19_BEACON_GEN, 0); + rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); + + rt2500usb_register_write(rt2x00dev, TXRX_CSR21, 0xe78f); + rt2500usb_register_write(rt2x00dev, MAC_CSR9, 0xff1d); + + if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE)) + return -EBUSY; + + rt2500usb_register_read(rt2x00dev, MAC_CSR1, ®); + rt2x00_set_field16(®, MAC_CSR1_SOFT_RESET, 0); + rt2x00_set_field16(®, MAC_CSR1_BBP_RESET, 0); + rt2x00_set_field16(®, MAC_CSR1_HOST_READY, 1); + rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg); + + if (rt2x00_rev(rt2x00dev) >= RT2570_VERSION_C) { + rt2500usb_register_read(rt2x00dev, PHY_CSR2, ®); + rt2x00_set_field16(®, PHY_CSR2_LNA, 0); + } else { + reg = 0; + rt2x00_set_field16(®, PHY_CSR2_LNA, 1); + rt2x00_set_field16(®, PHY_CSR2_LNA_MODE, 3); + } + rt2500usb_register_write(rt2x00dev, PHY_CSR2, reg); + + rt2500usb_register_write(rt2x00dev, MAC_CSR11, 0x0002); + rt2500usb_register_write(rt2x00dev, MAC_CSR22, 0x0053); + rt2500usb_register_write(rt2x00dev, MAC_CSR15, 0x01ee); + rt2500usb_register_write(rt2x00dev, MAC_CSR16, 0x0000); + + rt2500usb_register_read(rt2x00dev, MAC_CSR8, ®); + rt2x00_set_field16(®, MAC_CSR8_MAX_FRAME_UNIT, + rt2x00dev->rx->data_size); + rt2500usb_register_write(rt2x00dev, MAC_CSR8, reg); + + rt2500usb_register_read(rt2x00dev, TXRX_CSR0, ®); + rt2x00_set_field16(®, TXRX_CSR0_IV_OFFSET, IEEE80211_HEADER); + rt2x00_set_field16(®, TXRX_CSR0_KEY_ID, 0); + rt2500usb_register_write(rt2x00dev, TXRX_CSR0, reg); + + rt2500usb_register_read(rt2x00dev, MAC_CSR18, ®); + rt2x00_set_field16(®, MAC_CSR18_DELAY_AFTER_BEACON, 90); + rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg); + + rt2500usb_register_read(rt2x00dev, PHY_CSR4, ®); + rt2x00_set_field16(®, PHY_CSR4_LOW_RF_LE, 1); + rt2500usb_register_write(rt2x00dev, PHY_CSR4, reg); + + rt2500usb_register_read(rt2x00dev, TXRX_CSR1, ®); + rt2x00_set_field16(®, TXRX_CSR1_AUTO_SEQUENCE, 1); + rt2500usb_register_write(rt2x00dev, TXRX_CSR1, reg); + + return 0; +} + +static int rt2500usb_wait_bbp_ready(struct rt2x00_dev *rt2x00dev) +{ + unsigned int i; + u8 value; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2500usb_bbp_read(rt2x00dev, 0, &value); + if ((value != 0xff) && (value != 0x00)) + return 0; + udelay(REGISTER_BUSY_DELAY); + } + + ERROR(rt2x00dev, "BBP register access failed, aborting.\n"); + return -EACCES; +} + +static int rt2500usb_init_bbp(struct rt2x00_dev *rt2x00dev) +{ + unsigned int i; + u16 eeprom; + u8 value; + u8 reg_id; + + if (unlikely(rt2500usb_wait_bbp_ready(rt2x00dev))) + return -EACCES; + + rt2500usb_bbp_write(rt2x00dev, 3, 0x02); + rt2500usb_bbp_write(rt2x00dev, 4, 0x19); + rt2500usb_bbp_write(rt2x00dev, 14, 0x1c); + rt2500usb_bbp_write(rt2x00dev, 15, 0x30); + rt2500usb_bbp_write(rt2x00dev, 16, 0xac); + rt2500usb_bbp_write(rt2x00dev, 18, 0x18); + rt2500usb_bbp_write(rt2x00dev, 19, 0xff); + rt2500usb_bbp_write(rt2x00dev, 20, 0x1e); + rt2500usb_bbp_write(rt2x00dev, 21, 0x08); + rt2500usb_bbp_write(rt2x00dev, 22, 0x08); + rt2500usb_bbp_write(rt2x00dev, 23, 0x08); + rt2500usb_bbp_write(rt2x00dev, 24, 0x80); + rt2500usb_bbp_write(rt2x00dev, 25, 0x50); + rt2500usb_bbp_write(rt2x00dev, 26, 0x08); + rt2500usb_bbp_write(rt2x00dev, 27, 0x23); + rt2500usb_bbp_write(rt2x00dev, 30, 0x10); + rt2500usb_bbp_write(rt2x00dev, 31, 0x2b); + rt2500usb_bbp_write(rt2x00dev, 32, 0xb9); + rt2500usb_bbp_write(rt2x00dev, 34, 0x12); + rt2500usb_bbp_write(rt2x00dev, 35, 0x50); + rt2500usb_bbp_write(rt2x00dev, 39, 0xc4); + rt2500usb_bbp_write(rt2x00dev, 40, 0x02); + rt2500usb_bbp_write(rt2x00dev, 41, 0x60); + rt2500usb_bbp_write(rt2x00dev, 53, 0x10); + rt2500usb_bbp_write(rt2x00dev, 54, 0x18); + rt2500usb_bbp_write(rt2x00dev, 56, 0x08); + rt2500usb_bbp_write(rt2x00dev, 57, 0x10); + rt2500usb_bbp_write(rt2x00dev, 58, 0x08); + rt2500usb_bbp_write(rt2x00dev, 61, 0x60); + rt2500usb_bbp_write(rt2x00dev, 62, 0x10); + rt2500usb_bbp_write(rt2x00dev, 75, 0xff); + + for (i = 0; i < EEPROM_BBP_SIZE; i++) { + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom); + + if (eeprom != 0xffff && eeprom != 0x0000) { + reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID); + value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE); + rt2500usb_bbp_write(rt2x00dev, reg_id, value); + } + } + + return 0; +} + +/* + * Device state switch handlers. + */ +static void rt2500usb_toggle_rx(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + u16 reg; + + rt2500usb_register_read(rt2x00dev, TXRX_CSR2, ®); + rt2x00_set_field16(®, TXRX_CSR2_DISABLE_RX, + (state == STATE_RADIO_RX_OFF) || + (state == STATE_RADIO_RX_OFF_LINK)); + rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg); +} + +static int rt2500usb_enable_radio(struct rt2x00_dev *rt2x00dev) +{ + /* + * Initialize all registers. + */ + if (unlikely(rt2500usb_init_registers(rt2x00dev) || + rt2500usb_init_bbp(rt2x00dev))) + return -EIO; + + return 0; +} + +static void rt2500usb_disable_radio(struct rt2x00_dev *rt2x00dev) +{ + rt2500usb_register_write(rt2x00dev, MAC_CSR13, 0x2121); + rt2500usb_register_write(rt2x00dev, MAC_CSR14, 0x2121); + + /* + * Disable synchronisation. + */ + rt2500usb_register_write(rt2x00dev, TXRX_CSR19, 0); + + rt2x00usb_disable_radio(rt2x00dev); +} + +static int rt2500usb_set_state(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + u16 reg; + u16 reg2; + unsigned int i; + char put_to_sleep; + char bbp_state; + char rf_state; + + put_to_sleep = (state != STATE_AWAKE); + + reg = 0; + rt2x00_set_field16(®, MAC_CSR17_BBP_DESIRE_STATE, state); + rt2x00_set_field16(®, MAC_CSR17_RF_DESIRE_STATE, state); + rt2x00_set_field16(®, MAC_CSR17_PUT_TO_SLEEP, put_to_sleep); + rt2500usb_register_write(rt2x00dev, MAC_CSR17, reg); + rt2x00_set_field16(®, MAC_CSR17_SET_STATE, 1); + rt2500usb_register_write(rt2x00dev, MAC_CSR17, reg); + + /* + * Device is not guaranteed to be in the requested state yet. + * We must wait until the register indicates that the + * device has entered the correct state. + */ + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2500usb_register_read(rt2x00dev, MAC_CSR17, ®2); + bbp_state = rt2x00_get_field16(reg2, MAC_CSR17_BBP_CURR_STATE); + rf_state = rt2x00_get_field16(reg2, MAC_CSR17_RF_CURR_STATE); + if (bbp_state == state && rf_state == state) + return 0; + rt2500usb_register_write(rt2x00dev, MAC_CSR17, reg); + msleep(30); + } + + return -EBUSY; +} + +static int rt2500usb_set_device_state(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + int retval = 0; + + switch (state) { + case STATE_RADIO_ON: + retval = rt2500usb_enable_radio(rt2x00dev); + break; + case STATE_RADIO_OFF: + rt2500usb_disable_radio(rt2x00dev); + break; + case STATE_RADIO_RX_ON: + case STATE_RADIO_RX_ON_LINK: + case STATE_RADIO_RX_OFF: + case STATE_RADIO_RX_OFF_LINK: + rt2500usb_toggle_rx(rt2x00dev, state); + break; + case STATE_RADIO_IRQ_ON: + case STATE_RADIO_IRQ_OFF: + /* No support, but no error either */ + break; + case STATE_DEEP_SLEEP: + case STATE_SLEEP: + case STATE_STANDBY: + case STATE_AWAKE: + retval = rt2500usb_set_state(rt2x00dev, state); + break; + default: + retval = -ENOTSUPP; + break; + } + + if (unlikely(retval)) + ERROR(rt2x00dev, "Device failed to enter state %d (%d).\n", + state, retval); + + return retval; +} + +/* + * TX descriptor initialization + */ +static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev, + struct sk_buff *skb, + struct txentry_desc *txdesc) +{ + struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); + __le32 *txd = skbdesc->desc; + u32 word; + + /* + * Start writing the descriptor words. + */ + rt2x00_desc_read(txd, 1, &word); + rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset); + rt2x00_set_field32(&word, TXD_W1_AIFS, txdesc->aifs); + rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min); + rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max); + rt2x00_desc_write(txd, 1, word); + + rt2x00_desc_read(txd, 2, &word); + rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->signal); + rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->service); + rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->length_low); + rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high); + rt2x00_desc_write(txd, 2, word); + + if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) { + _rt2x00_desc_write(txd, 3, skbdesc->iv[0]); + _rt2x00_desc_write(txd, 4, skbdesc->iv[1]); + } + + rt2x00_desc_read(txd, 0, &word); + rt2x00_set_field32(&word, TXD_W0_RETRY_LIMIT, txdesc->retry_limit); + rt2x00_set_field32(&word, TXD_W0_MORE_FRAG, + test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags)); + rt2x00_set_field32(&word, TXD_W0_ACK, + test_bit(ENTRY_TXD_ACK, &txdesc->flags)); + rt2x00_set_field32(&word, TXD_W0_TIMESTAMP, + test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags)); + rt2x00_set_field32(&word, TXD_W0_OFDM, + (txdesc->rate_mode == RATE_MODE_OFDM)); + rt2x00_set_field32(&word, TXD_W0_NEW_SEQ, + test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)); + rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); + rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len); + rt2x00_set_field32(&word, TXD_W0_CIPHER, !!txdesc->cipher); + rt2x00_set_field32(&word, TXD_W0_KEY_ID, txdesc->key_idx); + rt2x00_desc_write(txd, 0, word); +} + +/* + * TX data initialization + */ +static void rt2500usb_beacondone(struct urb *urb); + +static void rt2500usb_write_beacon(struct queue_entry *entry) +{ + struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; + struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); + struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data; + struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); + int pipe = usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint); + int length; + u16 reg; + + /* + * Add the descriptor in front of the skb. + */ + skb_push(entry->skb, entry->queue->desc_size); + memcpy(entry->skb->data, skbdesc->desc, skbdesc->desc_len); + skbdesc->desc = entry->skb->data; + + /* + * Disable beaconing while we are reloading the beacon data, + * otherwise we might be sending out invalid data. + */ + rt2500usb_register_read(rt2x00dev, TXRX_CSR19, ®); + rt2x00_set_field16(®, TXRX_CSR19_BEACON_GEN, 0); + rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); + + /* + * USB devices cannot blindly pass the skb->len as the + * length of the data to usb_fill_bulk_urb. Pass the skb + * to the driver to determine what the length should be. + */ + length = rt2x00dev->ops->lib->get_tx_data_len(entry); + + usb_fill_bulk_urb(bcn_priv->urb, usb_dev, pipe, + entry->skb->data, length, rt2500usb_beacondone, + entry); + + /* + * Second we need to create the guardian byte. + * We only need a single byte, so lets recycle + * the 'flags' field we are not using for beacons. + */ + bcn_priv->guardian_data = 0; + usb_fill_bulk_urb(bcn_priv->guardian_urb, usb_dev, pipe, + &bcn_priv->guardian_data, 1, rt2500usb_beacondone, + entry); + + /* + * Send out the guardian byte. + */ + usb_submit_urb(bcn_priv->guardian_urb, GFP_ATOMIC); +} + +static int rt2500usb_get_tx_data_len(struct queue_entry *entry) +{ + int length; + + /* + * The length _must_ be a multiple of 2, + * but it must _not_ be a multiple of the USB packet size. + */ + length = roundup(entry->skb->len, 2); + length += (2 * !(length % entry->queue->usb_maxpacket)); + + return length; +} + +static void rt2500usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev, + const enum data_queue_qid queue) +{ + u16 reg, reg0; + + if (queue != QID_BEACON) { + rt2x00usb_kick_tx_queue(rt2x00dev, queue); + return; + } + + rt2500usb_register_read(rt2x00dev, TXRX_CSR19, ®); + if (!rt2x00_get_field16(reg, TXRX_CSR19_BEACON_GEN)) { + rt2x00_set_field16(®, TXRX_CSR19_TSF_COUNT, 1); + rt2x00_set_field16(®, TXRX_CSR19_TBCN, 1); + reg0 = reg; + rt2x00_set_field16(®, TXRX_CSR19_BEACON_GEN, 1); + /* + * Beacon generation will fail initially. + * To prevent this we need to change the TXRX_CSR19 + * register several times (reg0 is the same as reg + * except for TXRX_CSR19_BEACON_GEN, which is 0 in reg0 + * and 1 in reg). + */ + rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); + rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg0); + rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); + rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg0); + rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); + } +} + +/* + * RX control handlers + */ +static void rt2500usb_fill_rxdone(struct queue_entry *entry, + struct rxdone_entry_desc *rxdesc) +{ + struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; + struct queue_entry_priv_usb *entry_priv = entry->priv_data; + struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); + __le32 *rxd = + (__le32 *)(entry->skb->data + + (entry_priv->urb->actual_length - + entry->queue->desc_size)); + u32 word0; + u32 word1; + + /* + * Copy descriptor to the skbdesc->desc buffer, making it safe from moving of + * frame data in rt2x00usb. + */ + memcpy(skbdesc->desc, rxd, skbdesc->desc_len); + rxd = (__le32 *)skbdesc->desc; + + /* + * It is now safe to read the descriptor on all architectures. + */ + rt2x00_desc_read(rxd, 0, &word0); + rt2x00_desc_read(rxd, 1, &word1); + + if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) + rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; + if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR)) + rxdesc->flags |= RX_FLAG_FAILED_PLCP_CRC; + + if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) { + rxdesc->cipher = rt2x00_get_field32(word0, RXD_W0_CIPHER); + if (rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR)) + rxdesc->cipher_status = RX_CRYPTO_FAIL_KEY; + } + + if (rxdesc->cipher != CIPHER_NONE) { + _rt2x00_desc_read(rxd, 2, &rxdesc->iv[0]); + _rt2x00_desc_read(rxd, 3, &rxdesc->iv[1]); + rxdesc->dev_flags |= RXDONE_CRYPTO_IV; + + /* ICV is located at the end of frame */ + + rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; + if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) + rxdesc->flags |= RX_FLAG_DECRYPTED; + else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) + rxdesc->flags |= RX_FLAG_MMIC_ERROR; + } + + /* + * Obtain the status about this packet. + * When frame was received with an OFDM bitrate, + * the signal is the PLCP value. If it was received with + * a CCK bitrate the signal is the rate in 100kbit/s. + */ + rxdesc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL); + rxdesc->rssi = + rt2x00_get_field32(word1, RXD_W1_RSSI) - rt2x00dev->rssi_offset; + rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); + + if (rt2x00_get_field32(word0, RXD_W0_OFDM)) + rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP; + else + rxdesc->dev_flags |= RXDONE_SIGNAL_BITRATE; + if (rt2x00_get_field32(word0, RXD_W0_MY_BSS)) + rxdesc->dev_flags |= RXDONE_MY_BSS; + + /* + * Adjust the skb memory window to the frame boundaries. + */ + skb_trim(entry->skb, rxdesc->size); +} + +/* + * Interrupt functions. + */ +static void rt2500usb_beacondone(struct urb *urb) +{ + struct queue_entry *entry = (struct queue_entry *)urb->context; + struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data; + + if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &entry->queue->rt2x00dev->flags)) + return; + + /* + * Check if this was the guardian beacon, + * if that was the case we need to send the real beacon now. + * Otherwise we should free the sk_buffer, the device + * should be doing the rest of the work now. + */ + if (bcn_priv->guardian_urb == urb) { + usb_submit_urb(bcn_priv->urb, GFP_ATOMIC); + } else if (bcn_priv->urb == urb) { + dev_kfree_skb(entry->skb); + entry->skb = NULL; + } +} + +/* + * Device probe functions. + */ +static int rt2500usb_validate_eeprom(struct rt2x00_dev *rt2x00dev) +{ + u16 word; + u8 *mac; + u8 bbp; + + rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom, EEPROM_SIZE); + + /* + * Start validation of the data that has been read. + */ + mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); + if (!is_valid_ether_addr(mac)) { + random_ether_addr(mac); + EEPROM(rt2x00dev, "MAC: %pM\n", mac); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_ANTENNA_NUM, 2); + rt2x00_set_field16(&word, EEPROM_ANTENNA_TX_DEFAULT, + ANTENNA_SW_DIVERSITY); + rt2x00_set_field16(&word, EEPROM_ANTENNA_RX_DEFAULT, + ANTENNA_SW_DIVERSITY); + rt2x00_set_field16(&word, EEPROM_ANTENNA_LED_MODE, + LED_MODE_DEFAULT); + rt2x00_set_field16(&word, EEPROM_ANTENNA_DYN_TXAGC, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_HARDWARE_RADIO, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF2522); + rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word); + EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_NIC_CARDBUS_ACCEL, 0); + rt2x00_set_field16(&word, EEPROM_NIC_DYN_BBP_TUNE, 0); + rt2x00_set_field16(&word, EEPROM_NIC_CCK_TX_POWER, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word); + EEPROM(rt2x00dev, "NIC: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_CALIBRATE_OFFSET_RSSI, + DEFAULT_RSSI_OFFSET); + rt2x00_eeprom_write(rt2x00dev, EEPROM_CALIBRATE_OFFSET, word); + EEPROM(rt2x00dev, "Calibrate offset: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_BBPTUNE_THRESHOLD, 45); + rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE, word); + EEPROM(rt2x00dev, "BBPtune: 0x%04x\n", word); + } + + /* + * Switch lower vgc bound to current BBP R17 value, + * lower the value a bit for better quality. + */ + rt2500usb_bbp_read(rt2x00dev, 17, &bbp); + bbp -= 6; + + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_VGC, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCUPPER, 0x40); + rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCLOWER, bbp); + rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_VGC, word); + EEPROM(rt2x00dev, "BBPtune vgc: 0x%04x\n", word); + } else { + rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCLOWER, bbp); + rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_VGC, word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R17, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_BBPTUNE_R17_LOW, 0x48); + rt2x00_set_field16(&word, EEPROM_BBPTUNE_R17_HIGH, 0x41); + rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R17, word); + EEPROM(rt2x00dev, "BBPtune r17: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R24, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_BBPTUNE_R24_LOW, 0x40); + rt2x00_set_field16(&word, EEPROM_BBPTUNE_R24_HIGH, 0x80); + rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R24, word); + EEPROM(rt2x00dev, "BBPtune r24: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R25, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_BBPTUNE_R25_LOW, 0x40); + rt2x00_set_field16(&word, EEPROM_BBPTUNE_R25_HIGH, 0x50); + rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R25, word); + EEPROM(rt2x00dev, "BBPtune r25: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R61, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_BBPTUNE_R61_LOW, 0x60); + rt2x00_set_field16(&word, EEPROM_BBPTUNE_R61_HIGH, 0x6d); + rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R61, word); + EEPROM(rt2x00dev, "BBPtune r61: 0x%04x\n", word); + } + + return 0; +} + +static int rt2500usb_init_eeprom(struct rt2x00_dev *rt2x00dev) +{ + u16 reg; + u16 value; + u16 eeprom; + + /* + * Read EEPROM word for configuration. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom); + + /* + * Identify RF chipset. + */ + value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); + rt2500usb_register_read(rt2x00dev, MAC_CSR0, ®); + rt2x00_set_chip(rt2x00dev, RT2570, value, reg); + + if (((reg & 0xfff0) != 0) || ((reg & 0x0000000f) == 0)) { + ERROR(rt2x00dev, "Invalid RT chipset detected.\n"); + return -ENODEV; + } + + if (!rt2x00_rf(rt2x00dev, RF2522) && + !rt2x00_rf(rt2x00dev, RF2523) && + !rt2x00_rf(rt2x00dev, RF2524) && + !rt2x00_rf(rt2x00dev, RF2525) && + !rt2x00_rf(rt2x00dev, RF2525E) && + !rt2x00_rf(rt2x00dev, RF5222)) { + ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); + return -ENODEV; + } + + /* + * Identify default antenna configuration. + */ + rt2x00dev->default_ant.tx = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TX_DEFAULT); + rt2x00dev->default_ant.rx = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_DEFAULT); + + /* + * When the eeprom indicates SW_DIVERSITY use HW_DIVERSITY instead. + * I am not 100% sure about this, but the legacy drivers do not + * indicate antenna swapping in software is required when + * diversity is enabled. + */ + if (rt2x00dev->default_ant.tx == ANTENNA_SW_DIVERSITY) + rt2x00dev->default_ant.tx = ANTENNA_HW_DIVERSITY; + if (rt2x00dev->default_ant.rx == ANTENNA_SW_DIVERSITY) + rt2x00dev->default_ant.rx = ANTENNA_HW_DIVERSITY; + + /* + * Store led mode, for correct led behaviour. + */ +#ifdef CONFIG_RT2X00_LIB_LEDS + value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE); + + rt2500usb_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO); + if (value == LED_MODE_TXRX_ACTIVITY || + value == LED_MODE_DEFAULT || + value == LED_MODE_ASUS) + rt2500usb_init_led(rt2x00dev, &rt2x00dev->led_qual, + LED_TYPE_ACTIVITY); +#endif /* CONFIG_RT2X00_LIB_LEDS */ + + /* + * Detect if this device has an hardware controlled radio. + */ + if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) + __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags); + + /* + * Check if the BBP tuning should be disabled. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom); + if (rt2x00_get_field16(eeprom, EEPROM_NIC_DYN_BBP_TUNE)) + __set_bit(CONFIG_DISABLE_LINK_TUNING, &rt2x00dev->flags); + + /* + * Read the RSSI <-> dBm offset information. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET, &eeprom); + rt2x00dev->rssi_offset = + rt2x00_get_field16(eeprom, EEPROM_CALIBRATE_OFFSET_RSSI); + + return 0; +} + +/* + * RF value list for RF2522 + * Supports: 2.4 GHz + */ +static const struct rf_channel rf_vals_bg_2522[] = { + { 1, 0x00002050, 0x000c1fda, 0x00000101, 0 }, + { 2, 0x00002050, 0x000c1fee, 0x00000101, 0 }, + { 3, 0x00002050, 0x000c2002, 0x00000101, 0 }, + { 4, 0x00002050, 0x000c2016, 0x00000101, 0 }, + { 5, 0x00002050, 0x000c202a, 0x00000101, 0 }, + { 6, 0x00002050, 0x000c203e, 0x00000101, 0 }, + { 7, 0x00002050, 0x000c2052, 0x00000101, 0 }, + { 8, 0x00002050, 0x000c2066, 0x00000101, 0 }, + { 9, 0x00002050, 0x000c207a, 0x00000101, 0 }, + { 10, 0x00002050, 0x000c208e, 0x00000101, 0 }, + { 11, 0x00002050, 0x000c20a2, 0x00000101, 0 }, + { 12, 0x00002050, 0x000c20b6, 0x00000101, 0 }, + { 13, 0x00002050, 0x000c20ca, 0x00000101, 0 }, + { 14, 0x00002050, 0x000c20fa, 0x00000101, 0 }, +}; + +/* + * RF value list for RF2523 + * Supports: 2.4 GHz + */ +static const struct rf_channel rf_vals_bg_2523[] = { + { 1, 0x00022010, 0x00000c9e, 0x000e0111, 0x00000a1b }, + { 2, 0x00022010, 0x00000ca2, 0x000e0111, 0x00000a1b }, + { 3, 0x00022010, 0x00000ca6, 0x000e0111, 0x00000a1b }, + { 4, 0x00022010, 0x00000caa, 0x000e0111, 0x00000a1b }, + { 5, 0x00022010, 0x00000cae, 0x000e0111, 0x00000a1b }, + { 6, 0x00022010, 0x00000cb2, 0x000e0111, 0x00000a1b }, + { 7, 0x00022010, 0x00000cb6, 0x000e0111, 0x00000a1b }, + { 8, 0x00022010, 0x00000cba, 0x000e0111, 0x00000a1b }, + { 9, 0x00022010, 0x00000cbe, 0x000e0111, 0x00000a1b }, + { 10, 0x00022010, 0x00000d02, 0x000e0111, 0x00000a1b }, + { 11, 0x00022010, 0x00000d06, 0x000e0111, 0x00000a1b }, + { 12, 0x00022010, 0x00000d0a, 0x000e0111, 0x00000a1b }, + { 13, 0x00022010, 0x00000d0e, 0x000e0111, 0x00000a1b }, + { 14, 0x00022010, 0x00000d1a, 0x000e0111, 0x00000a03 }, +}; + +/* + * RF value list for RF2524 + * Supports: 2.4 GHz + */ +static const struct rf_channel rf_vals_bg_2524[] = { + { 1, 0x00032020, 0x00000c9e, 0x00000101, 0x00000a1b }, + { 2, 0x00032020, 0x00000ca2, 0x00000101, 0x00000a1b }, + { 3, 0x00032020, 0x00000ca6, 0x00000101, 0x00000a1b }, + { 4, 0x00032020, 0x00000caa, 0x00000101, 0x00000a1b }, + { 5, 0x00032020, 0x00000cae, 0x00000101, 0x00000a1b }, + { 6, 0x00032020, 0x00000cb2, 0x00000101, 0x00000a1b }, + { 7, 0x00032020, 0x00000cb6, 0x00000101, 0x00000a1b }, + { 8, 0x00032020, 0x00000cba, 0x00000101, 0x00000a1b }, + { 9, 0x00032020, 0x00000cbe, 0x00000101, 0x00000a1b }, + { 10, 0x00032020, 0x00000d02, 0x00000101, 0x00000a1b }, + { 11, 0x00032020, 0x00000d06, 0x00000101, 0x00000a1b }, + { 12, 0x00032020, 0x00000d0a, 0x00000101, 0x00000a1b }, + { 13, 0x00032020, 0x00000d0e, 0x00000101, 0x00000a1b }, + { 14, 0x00032020, 0x00000d1a, 0x00000101, 0x00000a03 }, +}; + +/* + * RF value list for RF2525 + * Supports: 2.4 GHz + */ +static const struct rf_channel rf_vals_bg_2525[] = { + { 1, 0x00022020, 0x00080c9e, 0x00060111, 0x00000a1b }, + { 2, 0x00022020, 0x00080ca2, 0x00060111, 0x00000a1b }, + { 3, 0x00022020, 0x00080ca6, 0x00060111, 0x00000a1b }, + { 4, 0x00022020, 0x00080caa, 0x00060111, 0x00000a1b }, + { 5, 0x00022020, 0x00080cae, 0x00060111, 0x00000a1b }, + { 6, 0x00022020, 0x00080cb2, 0x00060111, 0x00000a1b }, + { 7, 0x00022020, 0x00080cb6, 0x00060111, 0x00000a1b }, + { 8, 0x00022020, 0x00080cba, 0x00060111, 0x00000a1b }, + { 9, 0x00022020, 0x00080cbe, 0x00060111, 0x00000a1b }, + { 10, 0x00022020, 0x00080d02, 0x00060111, 0x00000a1b }, + { 11, 0x00022020, 0x00080d06, 0x00060111, 0x00000a1b }, + { 12, 0x00022020, 0x00080d0a, 0x00060111, 0x00000a1b }, + { 13, 0x00022020, 0x00080d0e, 0x00060111, 0x00000a1b }, + { 14, 0x00022020, 0x00080d1a, 0x00060111, 0x00000a03 }, +}; + +/* + * RF value list for RF2525e + * Supports: 2.4 GHz + */ +static const struct rf_channel rf_vals_bg_2525e[] = { + { 1, 0x00022010, 0x0000089a, 0x00060111, 0x00000e1b }, + { 2, 0x00022010, 0x0000089e, 0x00060111, 0x00000e07 }, + { 3, 0x00022010, 0x0000089e, 0x00060111, 0x00000e1b }, + { 4, 0x00022010, 0x000008a2, 0x00060111, 0x00000e07 }, + { 5, 0x00022010, 0x000008a2, 0x00060111, 0x00000e1b }, + { 6, 0x00022010, 0x000008a6, 0x00060111, 0x00000e07 }, + { 7, 0x00022010, 0x000008a6, 0x00060111, 0x00000e1b }, + { 8, 0x00022010, 0x000008aa, 0x00060111, 0x00000e07 }, + { 9, 0x00022010, 0x000008aa, 0x00060111, 0x00000e1b }, + { 10, 0x00022010, 0x000008ae, 0x00060111, 0x00000e07 }, + { 11, 0x00022010, 0x000008ae, 0x00060111, 0x00000e1b }, + { 12, 0x00022010, 0x000008b2, 0x00060111, 0x00000e07 }, + { 13, 0x00022010, 0x000008b2, 0x00060111, 0x00000e1b }, + { 14, 0x00022010, 0x000008b6, 0x00060111, 0x00000e23 }, +}; + +/* + * RF value list for RF5222 + * Supports: 2.4 GHz & 5.2 GHz + */ +static const struct rf_channel rf_vals_5222[] = { + { 1, 0x00022020, 0x00001136, 0x00000101, 0x00000a0b }, + { 2, 0x00022020, 0x0000113a, 0x00000101, 0x00000a0b }, + { 3, 0x00022020, 0x0000113e, 0x00000101, 0x00000a0b }, + { 4, 0x00022020, 0x00001182, 0x00000101, 0x00000a0b }, + { 5, 0x00022020, 0x00001186, 0x00000101, 0x00000a0b }, + { 6, 0x00022020, 0x0000118a, 0x00000101, 0x00000a0b }, + { 7, 0x00022020, 0x0000118e, 0x00000101, 0x00000a0b }, + { 8, 0x00022020, 0x00001192, 0x00000101, 0x00000a0b }, + { 9, 0x00022020, 0x00001196, 0x00000101, 0x00000a0b }, + { 10, 0x00022020, 0x0000119a, 0x00000101, 0x00000a0b }, + { 11, 0x00022020, 0x0000119e, 0x00000101, 0x00000a0b }, + { 12, 0x00022020, 0x000011a2, 0x00000101, 0x00000a0b }, + { 13, 0x00022020, 0x000011a6, 0x00000101, 0x00000a0b }, + { 14, 0x00022020, 0x000011ae, 0x00000101, 0x00000a1b }, + + /* 802.11 UNI / HyperLan 2 */ + { 36, 0x00022010, 0x00018896, 0x00000101, 0x00000a1f }, + { 40, 0x00022010, 0x0001889a, 0x00000101, 0x00000a1f }, + { 44, 0x00022010, 0x0001889e, 0x00000101, 0x00000a1f }, + { 48, 0x00022010, 0x000188a2, 0x00000101, 0x00000a1f }, + { 52, 0x00022010, 0x000188a6, 0x00000101, 0x00000a1f }, + { 66, 0x00022010, 0x000188aa, 0x00000101, 0x00000a1f }, + { 60, 0x00022010, 0x000188ae, 0x00000101, 0x00000a1f }, + { 64, 0x00022010, 0x000188b2, 0x00000101, 0x00000a1f }, + + /* 802.11 HyperLan 2 */ + { 100, 0x00022010, 0x00008802, 0x00000101, 0x00000a0f }, + { 104, 0x00022010, 0x00008806, 0x00000101, 0x00000a0f }, + { 108, 0x00022010, 0x0000880a, 0x00000101, 0x00000a0f }, + { 112, 0x00022010, 0x0000880e, 0x00000101, 0x00000a0f }, + { 116, 0x00022010, 0x00008812, 0x00000101, 0x00000a0f }, + { 120, 0x00022010, 0x00008816, 0x00000101, 0x00000a0f }, + { 124, 0x00022010, 0x0000881a, 0x00000101, 0x00000a0f }, + { 128, 0x00022010, 0x0000881e, 0x00000101, 0x00000a0f }, + { 132, 0x00022010, 0x00008822, 0x00000101, 0x00000a0f }, + { 136, 0x00022010, 0x00008826, 0x00000101, 0x00000a0f }, + + /* 802.11 UNII */ + { 140, 0x00022010, 0x0000882a, 0x00000101, 0x00000a0f }, + { 149, 0x00022020, 0x000090a6, 0x00000101, 0x00000a07 }, + { 153, 0x00022020, 0x000090ae, 0x00000101, 0x00000a07 }, + { 157, 0x00022020, 0x000090b6, 0x00000101, 0x00000a07 }, + { 161, 0x00022020, 0x000090be, 0x00000101, 0x00000a07 }, +}; + +static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev) +{ + struct hw_mode_spec *spec = &rt2x00dev->spec; + struct channel_info *info; + char *tx_power; + unsigned int i; + + /* + * Initialize all hw fields. + */ + rt2x00dev->hw->flags = + IEEE80211_HW_RX_INCLUDES_FCS | + IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | + IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_SUPPORTS_PS | + IEEE80211_HW_PS_NULLFUNC_STACK; + + SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev); + SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, + rt2x00_eeprom_addr(rt2x00dev, + EEPROM_MAC_ADDR_0)); + + /* + * Initialize hw_mode information. + */ + spec->supported_bands = SUPPORT_BAND_2GHZ; + spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; + + if (rt2x00_rf(rt2x00dev, RF2522)) { + spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522); + spec->channels = rf_vals_bg_2522; + } else if (rt2x00_rf(rt2x00dev, RF2523)) { + spec->num_channels = ARRAY_SIZE(rf_vals_bg_2523); + spec->channels = rf_vals_bg_2523; + } else if (rt2x00_rf(rt2x00dev, RF2524)) { + spec->num_channels = ARRAY_SIZE(rf_vals_bg_2524); + spec->channels = rf_vals_bg_2524; + } else if (rt2x00_rf(rt2x00dev, RF2525)) { + spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525); + spec->channels = rf_vals_bg_2525; + } else if (rt2x00_rf(rt2x00dev, RF2525E)) { + spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525e); + spec->channels = rf_vals_bg_2525e; + } else if (rt2x00_rf(rt2x00dev, RF5222)) { + spec->supported_bands |= SUPPORT_BAND_5GHZ; + spec->num_channels = ARRAY_SIZE(rf_vals_5222); + spec->channels = rf_vals_5222; + } + + /* + * Create channel information array + */ + info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + spec->channels_info = info; + + tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START); + for (i = 0; i < 14; i++) + info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]); + + if (spec->num_channels > 14) { + for (i = 14; i < spec->num_channels; i++) + info[i].tx_power1 = DEFAULT_TXPOWER; + } + + return 0; +} + +static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev) +{ + int retval; + + /* + * Allocate eeprom data. + */ + retval = rt2500usb_validate_eeprom(rt2x00dev); + if (retval) + return retval; + + retval = rt2500usb_init_eeprom(rt2x00dev); + if (retval) + return retval; + + /* + * Initialize hw specifications. + */ + retval = rt2500usb_probe_hw_mode(rt2x00dev); + if (retval) + return retval; + + /* + * This device requires the atim queue + */ + __set_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags); + __set_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags); + if (!modparam_nohwcrypt) { + __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags); + __set_bit(DRIVER_REQUIRE_COPY_IV, &rt2x00dev->flags); + } + __set_bit(CONFIG_DISABLE_LINK_TUNING, &rt2x00dev->flags); + + /* + * Set the rssi offset. + */ + rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET; + + return 0; +} + +static const struct ieee80211_ops rt2500usb_mac80211_ops = { + .tx = rt2x00mac_tx, + .start = rt2x00mac_start, + .stop = rt2x00mac_stop, + .add_interface = rt2x00mac_add_interface, + .remove_interface = rt2x00mac_remove_interface, + .config = rt2x00mac_config, + .configure_filter = rt2x00mac_configure_filter, + .set_tim = rt2x00mac_set_tim, + .set_key = rt2x00mac_set_key, + .get_stats = rt2x00mac_get_stats, + .bss_info_changed = rt2x00mac_bss_info_changed, + .conf_tx = rt2x00mac_conf_tx, + .rfkill_poll = rt2x00mac_rfkill_poll, +}; + +static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = { + .probe_hw = rt2500usb_probe_hw, + .initialize = rt2x00usb_initialize, + .uninitialize = rt2x00usb_uninitialize, + .clear_entry = rt2x00usb_clear_entry, + .set_device_state = rt2500usb_set_device_state, + .rfkill_poll = rt2500usb_rfkill_poll, + .link_stats = rt2500usb_link_stats, + .reset_tuner = rt2500usb_reset_tuner, + .write_tx_desc = rt2500usb_write_tx_desc, + .write_tx_data = rt2x00usb_write_tx_data, + .write_beacon = rt2500usb_write_beacon, + .get_tx_data_len = rt2500usb_get_tx_data_len, + .kick_tx_queue = rt2500usb_kick_tx_queue, + .kill_tx_queue = rt2x00usb_kill_tx_queue, + .fill_rxdone = rt2500usb_fill_rxdone, + .config_shared_key = rt2500usb_config_key, + .config_pairwise_key = rt2500usb_config_key, + .config_filter = rt2500usb_config_filter, + .config_intf = rt2500usb_config_intf, + .config_erp = rt2500usb_config_erp, + .config_ant = rt2500usb_config_ant, + .config = rt2500usb_config, +}; + +static const struct data_queue_desc rt2500usb_queue_rx = { + .entry_num = RX_ENTRIES, + .data_size = DATA_FRAME_SIZE, + .desc_size = RXD_DESC_SIZE, + .priv_size = sizeof(struct queue_entry_priv_usb), +}; + +static const struct data_queue_desc rt2500usb_queue_tx = { + .entry_num = TX_ENTRIES, + .data_size = DATA_FRAME_SIZE, + .desc_size = TXD_DESC_SIZE, + .priv_size = sizeof(struct queue_entry_priv_usb), +}; + +static const struct data_queue_desc rt2500usb_queue_bcn = { + .entry_num = BEACON_ENTRIES, + .data_size = MGMT_FRAME_SIZE, + .desc_size = TXD_DESC_SIZE, + .priv_size = sizeof(struct queue_entry_priv_usb_bcn), +}; + +static const struct data_queue_desc rt2500usb_queue_atim = { + .entry_num = ATIM_ENTRIES, + .data_size = DATA_FRAME_SIZE, + .desc_size = TXD_DESC_SIZE, + .priv_size = sizeof(struct queue_entry_priv_usb), +}; + +static const struct rt2x00_ops rt2500usb_ops = { + .name = KBUILD_MODNAME, + .max_sta_intf = 1, + .max_ap_intf = 1, + .eeprom_size = EEPROM_SIZE, + .rf_size = RF_SIZE, + .tx_queues = NUM_TX_QUEUES, + .extra_tx_headroom = TXD_DESC_SIZE, + .rx = &rt2500usb_queue_rx, + .tx = &rt2500usb_queue_tx, + .bcn = &rt2500usb_queue_bcn, + .atim = &rt2500usb_queue_atim, + .lib = &rt2500usb_rt2x00_ops, + .hw = &rt2500usb_mac80211_ops, +#ifdef CONFIG_RT2X00_LIB_DEBUGFS + .debugfs = &rt2500usb_rt2x00debug, +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ +}; + +/* + * rt2500usb module information. + */ +static struct usb_device_id rt2500usb_device_table[] = { + /* ASUS */ + { USB_DEVICE(0x0b05, 0x1706), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x0b05, 0x1707), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* Belkin */ + { USB_DEVICE(0x050d, 0x7050), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x050d, 0x7051), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x050d, 0x705a), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* Cisco Systems */ + { USB_DEVICE(0x13b1, 0x000d), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x13b1, 0x0011), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x13b1, 0x001a), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* CNet */ + { USB_DEVICE(0x1371, 0x9022), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* Conceptronic */ + { USB_DEVICE(0x14b2, 0x3c02), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* D-LINK */ + { USB_DEVICE(0x2001, 0x3c00), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* Gigabyte */ + { USB_DEVICE(0x1044, 0x8001), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x1044, 0x8007), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* Hercules */ + { USB_DEVICE(0x06f8, 0xe000), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* Melco */ + { USB_DEVICE(0x0411, 0x005e), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x0411, 0x0066), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x0411, 0x0067), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x0411, 0x008b), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x0411, 0x0097), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* MSI */ + { USB_DEVICE(0x0db0, 0x6861), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x0db0, 0x6865), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x0db0, 0x6869), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* Ralink */ + { USB_DEVICE(0x148f, 0x1706), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x148f, 0x2570), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x148f, 0x9020), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* Sagem */ + { USB_DEVICE(0x079b, 0x004b), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* Siemens */ + { USB_DEVICE(0x0681, 0x3c06), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* SMC */ + { USB_DEVICE(0x0707, 0xee13), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* Spairon */ + { USB_DEVICE(0x114b, 0x0110), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* SURECOM */ + { USB_DEVICE(0x0769, 0x11f3), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* Trust */ + { USB_DEVICE(0x0eb0, 0x9020), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* VTech */ + { USB_DEVICE(0x0f88, 0x3012), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* Zinwell */ + { USB_DEVICE(0x5a57, 0x0260), USB_DEVICE_DATA(&rt2500usb_ops) }, + { 0, } +}; + +MODULE_AUTHOR(DRV_PROJECT); +MODULE_VERSION(DRV_VERSION); +MODULE_DESCRIPTION("Ralink RT2500 USB Wireless LAN driver."); +MODULE_SUPPORTED_DEVICE("Ralink RT2570 USB chipset based cards"); +MODULE_DEVICE_TABLE(usb, rt2500usb_device_table); +MODULE_LICENSE("GPL"); + +static struct usb_driver rt2500usb_driver = { + .name = KBUILD_MODNAME, + .id_table = rt2500usb_device_table, + .probe = rt2x00usb_probe, + .disconnect = rt2x00usb_disconnect, + .suspend = rt2x00usb_suspend, + .resume = rt2x00usb_resume, +}; + +static int __init rt2500usb_init(void) +{ + return usb_register(&rt2500usb_driver); +} + +static void __exit rt2500usb_exit(void) +{ + usb_deregister(&rt2500usb_driver); +} + +module_init(rt2500usb_init); +module_exit(rt2500usb_exit); diff --git a/drivers/net/wireless/rt2x00/rt2500usb.h b/drivers/net/wireless/rt2x00/rt2500usb.h new file mode 100644 index 0000000..b493306 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2500usb.h @@ -0,0 +1,847 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2500usb + Abstract: Data structures and registers for the rt2500usb module. + Supported chipsets: RT2570. + */ + +#ifndef RT2500USB_H +#define RT2500USB_H + +/* + * RF chip defines. + */ +#define RF2522 0x0000 +#define RF2523 0x0001 +#define RF2524 0x0002 +#define RF2525 0x0003 +#define RF2525E 0x0005 +#define RF5222 0x0010 + +/* + * RT2570 version + */ +#define RT2570_VERSION_B 2 +#define RT2570_VERSION_C 3 +#define RT2570_VERSION_D 4 + +/* + * Signal information. + * Default offset is required for RSSI <-> dBm conversion. + */ +#define DEFAULT_RSSI_OFFSET 120 + +/* + * Register layout information. + */ +#define CSR_REG_BASE 0x0400 +#define CSR_REG_SIZE 0x0100 +#define EEPROM_BASE 0x0000 +#define EEPROM_SIZE 0x006a +#define BBP_BASE 0x0000 +#define BBP_SIZE 0x0060 +#define RF_BASE 0x0004 +#define RF_SIZE 0x0010 + +/* + * Number of TX queues. + */ +#define NUM_TX_QUEUES 2 + +/* + * Control/Status Registers(CSR). + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * MAC_CSR0: ASIC revision number. + */ +#define MAC_CSR0 0x0400 + +/* + * MAC_CSR1: System control. + * SOFT_RESET: Software reset, 1: reset, 0: normal. + * BBP_RESET: Hardware reset, 1: reset, 0, release. + * HOST_READY: Host ready after initialization. + */ +#define MAC_CSR1 0x0402 +#define MAC_CSR1_SOFT_RESET FIELD16(0x00000001) +#define MAC_CSR1_BBP_RESET FIELD16(0x00000002) +#define MAC_CSR1_HOST_READY FIELD16(0x00000004) + +/* + * MAC_CSR2: STA MAC register 0. + */ +#define MAC_CSR2 0x0404 +#define MAC_CSR2_BYTE0 FIELD16(0x00ff) +#define MAC_CSR2_BYTE1 FIELD16(0xff00) + +/* + * MAC_CSR3: STA MAC register 1. + */ +#define MAC_CSR3 0x0406 +#define MAC_CSR3_BYTE2 FIELD16(0x00ff) +#define MAC_CSR3_BYTE3 FIELD16(0xff00) + +/* + * MAC_CSR4: STA MAC register 2. + */ +#define MAC_CSR4 0X0408 +#define MAC_CSR4_BYTE4 FIELD16(0x00ff) +#define MAC_CSR4_BYTE5 FIELD16(0xff00) + +/* + * MAC_CSR5: BSSID register 0. + */ +#define MAC_CSR5 0x040a +#define MAC_CSR5_BYTE0 FIELD16(0x00ff) +#define MAC_CSR5_BYTE1 FIELD16(0xff00) + +/* + * MAC_CSR6: BSSID register 1. + */ +#define MAC_CSR6 0x040c +#define MAC_CSR6_BYTE2 FIELD16(0x00ff) +#define MAC_CSR6_BYTE3 FIELD16(0xff00) + +/* + * MAC_CSR7: BSSID register 2. + */ +#define MAC_CSR7 0x040e +#define MAC_CSR7_BYTE4 FIELD16(0x00ff) +#define MAC_CSR7_BYTE5 FIELD16(0xff00) + +/* + * MAC_CSR8: Max frame length. + */ +#define MAC_CSR8 0x0410 +#define MAC_CSR8_MAX_FRAME_UNIT FIELD16(0x0fff) + +/* + * Misc MAC_CSR registers. + * MAC_CSR9: Timer control. + * MAC_CSR10: Slot time. + * MAC_CSR11: SIFS. + * MAC_CSR12: EIFS. + * MAC_CSR13: Power mode0. + * MAC_CSR14: Power mode1. + * MAC_CSR15: Power saving transition0 + * MAC_CSR16: Power saving transition1 + */ +#define MAC_CSR9 0x0412 +#define MAC_CSR10 0x0414 +#define MAC_CSR11 0x0416 +#define MAC_CSR12 0x0418 +#define MAC_CSR13 0x041a +#define MAC_CSR14 0x041c +#define MAC_CSR15 0x041e +#define MAC_CSR16 0x0420 + +/* + * MAC_CSR17: Manual power control / status register. + * Allowed state: 0 deep_sleep, 1: sleep, 2: standby, 3: awake. + * SET_STATE: Set state. Write 1 to trigger, self cleared. + * BBP_DESIRE_STATE: BBP desired state. + * RF_DESIRE_STATE: RF desired state. + * BBP_CURRENT_STATE: BBP current state. + * RF_CURRENT_STATE: RF current state. + * PUT_TO_SLEEP: Put to sleep. Write 1 to trigger, self cleared. + */ +#define MAC_CSR17 0x0422 +#define MAC_CSR17_SET_STATE FIELD16(0x0001) +#define MAC_CSR17_BBP_DESIRE_STATE FIELD16(0x0006) +#define MAC_CSR17_RF_DESIRE_STATE FIELD16(0x0018) +#define MAC_CSR17_BBP_CURR_STATE FIELD16(0x0060) +#define MAC_CSR17_RF_CURR_STATE FIELD16(0x0180) +#define MAC_CSR17_PUT_TO_SLEEP FIELD16(0x0200) + +/* + * MAC_CSR18: Wakeup timer register. + * DELAY_AFTER_BEACON: Delay after Tbcn expired in units of 1/16 TU. + * BEACONS_BEFORE_WAKEUP: Number of beacon before wakeup. + * AUTO_WAKE: Enable auto wakeup / sleep mechanism. + */ +#define MAC_CSR18 0x0424 +#define MAC_CSR18_DELAY_AFTER_BEACON FIELD16(0x00ff) +#define MAC_CSR18_BEACONS_BEFORE_WAKEUP FIELD16(0x7f00) +#define MAC_CSR18_AUTO_WAKE FIELD16(0x8000) + +/* + * MAC_CSR19: GPIO control register. + */ +#define MAC_CSR19 0x0426 +#define MAC_CSR19_BIT0 FIELD32(0x0001) +#define MAC_CSR19_BIT1 FIELD32(0x0002) +#define MAC_CSR19_BIT2 FIELD32(0x0004) +#define MAC_CSR19_BIT3 FIELD32(0x0008) +#define MAC_CSR19_BIT4 FIELD32(0x0010) +#define MAC_CSR19_BIT5 FIELD32(0x0020) +#define MAC_CSR19_BIT6 FIELD32(0x0040) +#define MAC_CSR19_BIT7 FIELD32(0x0080) + +/* + * MAC_CSR20: LED control register. + * ACTIVITY: 0: idle, 1: active. + * LINK: 0: linkoff, 1: linkup. + * ACTIVITY_POLARITY: 0: active low, 1: active high. + */ +#define MAC_CSR20 0x0428 +#define MAC_CSR20_ACTIVITY FIELD16(0x0001) +#define MAC_CSR20_LINK FIELD16(0x0002) +#define MAC_CSR20_ACTIVITY_POLARITY FIELD16(0x0004) + +/* + * MAC_CSR21: LED control register. + * ON_PERIOD: On period, default 70ms. + * OFF_PERIOD: Off period, default 30ms. + */ +#define MAC_CSR21 0x042a +#define MAC_CSR21_ON_PERIOD FIELD16(0x00ff) +#define MAC_CSR21_OFF_PERIOD FIELD16(0xff00) + +/* + * MAC_CSR22: Collision window control register. + */ +#define MAC_CSR22 0x042c + +/* + * Transmit related CSRs. + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * TXRX_CSR0: Security control register. + */ +#define TXRX_CSR0 0x0440 +#define TXRX_CSR0_ALGORITHM FIELD16(0x0007) +#define TXRX_CSR0_IV_OFFSET FIELD16(0x01f8) +#define TXRX_CSR0_KEY_ID FIELD16(0x1e00) + +/* + * TXRX_CSR1: TX configuration. + * ACK_TIMEOUT: ACK Timeout in unit of 1-us. + * TSF_OFFSET: TSF offset in MAC header. + * AUTO_SEQUENCE: Let ASIC control frame sequence number. + */ +#define TXRX_CSR1 0x0442 +#define TXRX_CSR1_ACK_TIMEOUT FIELD16(0x00ff) +#define TXRX_CSR1_TSF_OFFSET FIELD16(0x7f00) +#define TXRX_CSR1_AUTO_SEQUENCE FIELD16(0x8000) + +/* + * TXRX_CSR2: RX control. + * DISABLE_RX: Disable rx engine. + * DROP_CRC: Drop crc error. + * DROP_PHYSICAL: Drop physical error. + * DROP_CONTROL: Drop control frame. + * DROP_NOT_TO_ME: Drop not to me unicast frame. + * DROP_TODS: Drop frame tods bit is true. + * DROP_VERSION_ERROR: Drop version error frame. + * DROP_MCAST: Drop multicast frames. + * DROP_BCAST: Drop broadcast frames. + */ +#define TXRX_CSR2 0x0444 +#define TXRX_CSR2_DISABLE_RX FIELD16(0x0001) +#define TXRX_CSR2_DROP_CRC FIELD16(0x0002) +#define TXRX_CSR2_DROP_PHYSICAL FIELD16(0x0004) +#define TXRX_CSR2_DROP_CONTROL FIELD16(0x0008) +#define TXRX_CSR2_DROP_NOT_TO_ME FIELD16(0x0010) +#define TXRX_CSR2_DROP_TODS FIELD16(0x0020) +#define TXRX_CSR2_DROP_VERSION_ERROR FIELD16(0x0040) +#define TXRX_CSR2_DROP_MULTICAST FIELD16(0x0200) +#define TXRX_CSR2_DROP_BROADCAST FIELD16(0x0400) + +/* + * RX BBP ID registers + * TXRX_CSR3: CCK RX BBP ID. + * TXRX_CSR4: OFDM RX BBP ID. + */ +#define TXRX_CSR3 0x0446 +#define TXRX_CSR4 0x0448 + +/* + * TXRX_CSR5: CCK TX BBP ID0. + */ +#define TXRX_CSR5 0x044a +#define TXRX_CSR5_BBP_ID0 FIELD16(0x007f) +#define TXRX_CSR5_BBP_ID0_VALID FIELD16(0x0080) +#define TXRX_CSR5_BBP_ID1 FIELD16(0x7f00) +#define TXRX_CSR5_BBP_ID1_VALID FIELD16(0x8000) + +/* + * TXRX_CSR6: CCK TX BBP ID1. + */ +#define TXRX_CSR6 0x044c +#define TXRX_CSR6_BBP_ID0 FIELD16(0x007f) +#define TXRX_CSR6_BBP_ID0_VALID FIELD16(0x0080) +#define TXRX_CSR6_BBP_ID1 FIELD16(0x7f00) +#define TXRX_CSR6_BBP_ID1_VALID FIELD16(0x8000) + +/* + * TXRX_CSR7: OFDM TX BBP ID0. + */ +#define TXRX_CSR7 0x044e +#define TXRX_CSR7_BBP_ID0 FIELD16(0x007f) +#define TXRX_CSR7_BBP_ID0_VALID FIELD16(0x0080) +#define TXRX_CSR7_BBP_ID1 FIELD16(0x7f00) +#define TXRX_CSR7_BBP_ID1_VALID FIELD16(0x8000) + +/* + * TXRX_CSR8: OFDM TX BBP ID1. + */ +#define TXRX_CSR8 0x0450 +#define TXRX_CSR8_BBP_ID0 FIELD16(0x007f) +#define TXRX_CSR8_BBP_ID0_VALID FIELD16(0x0080) +#define TXRX_CSR8_BBP_ID1 FIELD16(0x7f00) +#define TXRX_CSR8_BBP_ID1_VALID FIELD16(0x8000) + +/* + * TXRX_CSR9: TX ACK time-out. + */ +#define TXRX_CSR9 0x0452 + +/* + * TXRX_CSR10: Auto responder control. + */ +#define TXRX_CSR10 0x0454 +#define TXRX_CSR10_AUTORESPOND_PREAMBLE FIELD16(0x0004) + +/* + * TXRX_CSR11: Auto responder basic rate. + */ +#define TXRX_CSR11 0x0456 + +/* + * ACK/CTS time registers. + */ +#define TXRX_CSR12 0x0458 +#define TXRX_CSR13 0x045a +#define TXRX_CSR14 0x045c +#define TXRX_CSR15 0x045e +#define TXRX_CSR16 0x0460 +#define TXRX_CSR17 0x0462 + +/* + * TXRX_CSR18: Synchronization control register. + */ +#define TXRX_CSR18 0x0464 +#define TXRX_CSR18_OFFSET FIELD16(0x000f) +#define TXRX_CSR18_INTERVAL FIELD16(0xfff0) + +/* + * TXRX_CSR19: Synchronization control register. + * TSF_COUNT: Enable TSF auto counting. + * TSF_SYNC: Tsf sync, 0: disable, 1: infra, 2: ad-hoc/master mode. + * TBCN: Enable Tbcn with reload value. + * BEACON_GEN: Enable beacon generator. + */ +#define TXRX_CSR19 0x0466 +#define TXRX_CSR19_TSF_COUNT FIELD16(0x0001) +#define TXRX_CSR19_TSF_SYNC FIELD16(0x0006) +#define TXRX_CSR19_TBCN FIELD16(0x0008) +#define TXRX_CSR19_BEACON_GEN FIELD16(0x0010) + +/* + * TXRX_CSR20: Tx BEACON offset time control register. + * OFFSET: In units of usec. + * BCN_EXPECT_WINDOW: Default: 2^CWmin + */ +#define TXRX_CSR20 0x0468 +#define TXRX_CSR20_OFFSET FIELD16(0x1fff) +#define TXRX_CSR20_BCN_EXPECT_WINDOW FIELD16(0xe000) + +/* + * TXRX_CSR21 + */ +#define TXRX_CSR21 0x046a + +/* + * Encryption related CSRs. + * + */ + +/* + * SEC_CSR0: Shared key 0, word 0 + * SEC_CSR1: Shared key 0, word 1 + * SEC_CSR2: Shared key 0, word 2 + * SEC_CSR3: Shared key 0, word 3 + * SEC_CSR4: Shared key 0, word 4 + * SEC_CSR5: Shared key 0, word 5 + * SEC_CSR6: Shared key 0, word 6 + * SEC_CSR7: Shared key 0, word 7 + */ +#define SEC_CSR0 0x0480 +#define SEC_CSR1 0x0482 +#define SEC_CSR2 0x0484 +#define SEC_CSR3 0x0486 +#define SEC_CSR4 0x0488 +#define SEC_CSR5 0x048a +#define SEC_CSR6 0x048c +#define SEC_CSR7 0x048e + +/* + * SEC_CSR8: Shared key 1, word 0 + * SEC_CSR9: Shared key 1, word 1 + * SEC_CSR10: Shared key 1, word 2 + * SEC_CSR11: Shared key 1, word 3 + * SEC_CSR12: Shared key 1, word 4 + * SEC_CSR13: Shared key 1, word 5 + * SEC_CSR14: Shared key 1, word 6 + * SEC_CSR15: Shared key 1, word 7 + */ +#define SEC_CSR8 0x0490 +#define SEC_CSR9 0x0492 +#define SEC_CSR10 0x0494 +#define SEC_CSR11 0x0496 +#define SEC_CSR12 0x0498 +#define SEC_CSR13 0x049a +#define SEC_CSR14 0x049c +#define SEC_CSR15 0x049e + +/* + * SEC_CSR16: Shared key 2, word 0 + * SEC_CSR17: Shared key 2, word 1 + * SEC_CSR18: Shared key 2, word 2 + * SEC_CSR19: Shared key 2, word 3 + * SEC_CSR20: Shared key 2, word 4 + * SEC_CSR21: Shared key 2, word 5 + * SEC_CSR22: Shared key 2, word 6 + * SEC_CSR23: Shared key 2, word 7 + */ +#define SEC_CSR16 0x04a0 +#define SEC_CSR17 0x04a2 +#define SEC_CSR18 0X04A4 +#define SEC_CSR19 0x04a6 +#define SEC_CSR20 0x04a8 +#define SEC_CSR21 0x04aa +#define SEC_CSR22 0x04ac +#define SEC_CSR23 0x04ae + +/* + * SEC_CSR24: Shared key 3, word 0 + * SEC_CSR25: Shared key 3, word 1 + * SEC_CSR26: Shared key 3, word 2 + * SEC_CSR27: Shared key 3, word 3 + * SEC_CSR28: Shared key 3, word 4 + * SEC_CSR29: Shared key 3, word 5 + * SEC_CSR30: Shared key 3, word 6 + * SEC_CSR31: Shared key 3, word 7 + */ +#define SEC_CSR24 0x04b0 +#define SEC_CSR25 0x04b2 +#define SEC_CSR26 0x04b4 +#define SEC_CSR27 0x04b6 +#define SEC_CSR28 0x04b8 +#define SEC_CSR29 0x04ba +#define SEC_CSR30 0x04bc +#define SEC_CSR31 0x04be + +#define KEY_ENTRY(__idx) \ + ( SEC_CSR0 + ((__idx) * 16) ) + +/* + * PHY control registers. + */ + +/* + * PHY_CSR0: RF switching timing control. + */ +#define PHY_CSR0 0x04c0 + +/* + * PHY_CSR1: TX PA configuration. + */ +#define PHY_CSR1 0x04c2 + +/* + * MAC configuration registers. + */ + +/* + * PHY_CSR2: TX MAC configuration. + * NOTE: Both register fields are complete dummy, + * documentation and legacy drivers are unclear un + * what this register means or what fields exists. + */ +#define PHY_CSR2 0x04c4 +#define PHY_CSR2_LNA FIELD16(0x0002) +#define PHY_CSR2_LNA_MODE FIELD16(0x3000) + +/* + * PHY_CSR3: RX MAC configuration. + */ +#define PHY_CSR3 0x04c6 + +/* + * PHY_CSR4: Interface configuration. + */ +#define PHY_CSR4 0x04c8 +#define PHY_CSR4_LOW_RF_LE FIELD16(0x0001) + +/* + * BBP pre-TX registers. + * PHY_CSR5: BBP pre-TX CCK. + */ +#define PHY_CSR5 0x04ca +#define PHY_CSR5_CCK FIELD16(0x0003) +#define PHY_CSR5_CCK_FLIP FIELD16(0x0004) + +/* + * BBP pre-TX registers. + * PHY_CSR6: BBP pre-TX OFDM. + */ +#define PHY_CSR6 0x04cc +#define PHY_CSR6_OFDM FIELD16(0x0003) +#define PHY_CSR6_OFDM_FLIP FIELD16(0x0004) + +/* + * PHY_CSR7: BBP access register 0. + * BBP_DATA: BBP data. + * BBP_REG_ID: BBP register ID. + * BBP_READ_CONTROL: 0: write, 1: read. + */ +#define PHY_CSR7 0x04ce +#define PHY_CSR7_DATA FIELD16(0x00ff) +#define PHY_CSR7_REG_ID FIELD16(0x7f00) +#define PHY_CSR7_READ_CONTROL FIELD16(0x8000) + +/* + * PHY_CSR8: BBP access register 1. + * BBP_BUSY: ASIC is busy execute BBP programming. + */ +#define PHY_CSR8 0x04d0 +#define PHY_CSR8_BUSY FIELD16(0x0001) + +/* + * PHY_CSR9: RF access register. + * RF_VALUE: Register value + id to program into rf/if. + */ +#define PHY_CSR9 0x04d2 +#define PHY_CSR9_RF_VALUE FIELD16(0xffff) + +/* + * PHY_CSR10: RF access register. + * RF_VALUE: Register value + id to program into rf/if. + * RF_NUMBER_OF_BITS: Number of bits used in value (i:20, rfmd:22). + * RF_IF_SELECT: Chip to program: 0: rf, 1: if. + * RF_PLL_LD: Rf pll_ld status. + * RF_BUSY: 1: asic is busy execute rf programming. + */ +#define PHY_CSR10 0x04d4 +#define PHY_CSR10_RF_VALUE FIELD16(0x00ff) +#define PHY_CSR10_RF_NUMBER_OF_BITS FIELD16(0x1f00) +#define PHY_CSR10_RF_IF_SELECT FIELD16(0x2000) +#define PHY_CSR10_RF_PLL_LD FIELD16(0x4000) +#define PHY_CSR10_RF_BUSY FIELD16(0x8000) + +/* + * STA_CSR0: FCS error count. + * FCS_ERROR: FCS error count, cleared when read. + */ +#define STA_CSR0 0x04e0 +#define STA_CSR0_FCS_ERROR FIELD16(0xffff) + +/* + * STA_CSR1: PLCP error count. + */ +#define STA_CSR1 0x04e2 + +/* + * STA_CSR2: LONG error count. + */ +#define STA_CSR2 0x04e4 + +/* + * STA_CSR3: CCA false alarm. + * FALSE_CCA_ERROR: False CCA error count, cleared when read. + */ +#define STA_CSR3 0x04e6 +#define STA_CSR3_FALSE_CCA_ERROR FIELD16(0xffff) + +/* + * STA_CSR4: RX FIFO overflow. + */ +#define STA_CSR4 0x04e8 + +/* + * STA_CSR5: Beacon sent counter. + */ +#define STA_CSR5 0x04ea + +/* + * Statistics registers + */ +#define STA_CSR6 0x04ec +#define STA_CSR7 0x04ee +#define STA_CSR8 0x04f0 +#define STA_CSR9 0x04f2 +#define STA_CSR10 0x04f4 + +/* + * BBP registers. + * The wordsize of the BBP is 8 bits. + */ + +/* + * R2: TX antenna control + */ +#define BBP_R2_TX_ANTENNA FIELD8(0x03) +#define BBP_R2_TX_IQ_FLIP FIELD8(0x04) + +/* + * R14: RX antenna control + */ +#define BBP_R14_RX_ANTENNA FIELD8(0x03) +#define BBP_R14_RX_IQ_FLIP FIELD8(0x04) + +/* + * RF registers. + */ + +/* + * RF 1 + */ +#define RF1_TUNER FIELD32(0x00020000) + +/* + * RF 3 + */ +#define RF3_TUNER FIELD32(0x00000100) +#define RF3_TXPOWER FIELD32(0x00003e00) + +/* + * EEPROM contents. + */ + +/* + * HW MAC address. + */ +#define EEPROM_MAC_ADDR_0 0x0002 +#define EEPROM_MAC_ADDR_BYTE0 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE1 FIELD16(0xff00) +#define EEPROM_MAC_ADDR1 0x0003 +#define EEPROM_MAC_ADDR_BYTE2 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE3 FIELD16(0xff00) +#define EEPROM_MAC_ADDR_2 0x0004 +#define EEPROM_MAC_ADDR_BYTE4 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE5 FIELD16(0xff00) + +/* + * EEPROM antenna. + * ANTENNA_NUM: Number of antenna's. + * TX_DEFAULT: Default antenna 0: diversity, 1: A, 2: B. + * RX_DEFAULT: Default antenna 0: diversity, 1: A, 2: B. + * LED_MODE: 0: default, 1: TX/RX activity, 2: Single (ignore link), 3: rsvd. + * DYN_TXAGC: Dynamic TX AGC control. + * HARDWARE_RADIO: 1: Hardware controlled radio. Read GPIO0. + * RF_TYPE: Rf_type of this adapter. + */ +#define EEPROM_ANTENNA 0x000b +#define EEPROM_ANTENNA_NUM FIELD16(0x0003) +#define EEPROM_ANTENNA_TX_DEFAULT FIELD16(0x000c) +#define EEPROM_ANTENNA_RX_DEFAULT FIELD16(0x0030) +#define EEPROM_ANTENNA_LED_MODE FIELD16(0x01c0) +#define EEPROM_ANTENNA_DYN_TXAGC FIELD16(0x0200) +#define EEPROM_ANTENNA_HARDWARE_RADIO FIELD16(0x0400) +#define EEPROM_ANTENNA_RF_TYPE FIELD16(0xf800) + +/* + * EEPROM NIC config. + * CARDBUS_ACCEL: 0: enable, 1: disable. + * DYN_BBP_TUNE: 0: enable, 1: disable. + * CCK_TX_POWER: CCK TX power compensation. + */ +#define EEPROM_NIC 0x000c +#define EEPROM_NIC_CARDBUS_ACCEL FIELD16(0x0001) +#define EEPROM_NIC_DYN_BBP_TUNE FIELD16(0x0002) +#define EEPROM_NIC_CCK_TX_POWER FIELD16(0x000c) + +/* + * EEPROM geography. + * GEO: Default geography setting for device. + */ +#define EEPROM_GEOGRAPHY 0x000d +#define EEPROM_GEOGRAPHY_GEO FIELD16(0x0f00) + +/* + * EEPROM BBP. + */ +#define EEPROM_BBP_START 0x000e +#define EEPROM_BBP_SIZE 16 +#define EEPROM_BBP_VALUE FIELD16(0x00ff) +#define EEPROM_BBP_REG_ID FIELD16(0xff00) + +/* + * EEPROM TXPOWER + */ +#define EEPROM_TXPOWER_START 0x001e +#define EEPROM_TXPOWER_SIZE 7 +#define EEPROM_TXPOWER_1 FIELD16(0x00ff) +#define EEPROM_TXPOWER_2 FIELD16(0xff00) + +/* + * EEPROM Tuning threshold + */ +#define EEPROM_BBPTUNE 0x0030 +#define EEPROM_BBPTUNE_THRESHOLD FIELD16(0x00ff) + +/* + * EEPROM BBP R24 Tuning. + */ +#define EEPROM_BBPTUNE_R24 0x0031 +#define EEPROM_BBPTUNE_R24_LOW FIELD16(0x00ff) +#define EEPROM_BBPTUNE_R24_HIGH FIELD16(0xff00) + +/* + * EEPROM BBP R25 Tuning. + */ +#define EEPROM_BBPTUNE_R25 0x0032 +#define EEPROM_BBPTUNE_R25_LOW FIELD16(0x00ff) +#define EEPROM_BBPTUNE_R25_HIGH FIELD16(0xff00) + +/* + * EEPROM BBP R24 Tuning. + */ +#define EEPROM_BBPTUNE_R61 0x0033 +#define EEPROM_BBPTUNE_R61_LOW FIELD16(0x00ff) +#define EEPROM_BBPTUNE_R61_HIGH FIELD16(0xff00) + +/* + * EEPROM BBP VGC Tuning. + */ +#define EEPROM_BBPTUNE_VGC 0x0034 +#define EEPROM_BBPTUNE_VGCUPPER FIELD16(0x00ff) +#define EEPROM_BBPTUNE_VGCLOWER FIELD16(0xff00) + +/* + * EEPROM BBP R17 Tuning. + */ +#define EEPROM_BBPTUNE_R17 0x0035 +#define EEPROM_BBPTUNE_R17_LOW FIELD16(0x00ff) +#define EEPROM_BBPTUNE_R17_HIGH FIELD16(0xff00) + +/* + * RSSI <-> dBm offset calibration + */ +#define EEPROM_CALIBRATE_OFFSET 0x0036 +#define EEPROM_CALIBRATE_OFFSET_RSSI FIELD16(0x00ff) + +/* + * DMA descriptor defines. + */ +#define TXD_DESC_SIZE ( 5 * sizeof(__le32) ) +#define RXD_DESC_SIZE ( 4 * sizeof(__le32) ) + +/* + * TX descriptor format for TX, PRIO, ATIM and Beacon Ring. + */ + +/* + * Word0 + */ +#define TXD_W0_PACKET_ID FIELD32(0x0000000f) +#define TXD_W0_RETRY_LIMIT FIELD32(0x000000f0) +#define TXD_W0_MORE_FRAG FIELD32(0x00000100) +#define TXD_W0_ACK FIELD32(0x00000200) +#define TXD_W0_TIMESTAMP FIELD32(0x00000400) +#define TXD_W0_OFDM FIELD32(0x00000800) +#define TXD_W0_NEW_SEQ FIELD32(0x00001000) +#define TXD_W0_IFS FIELD32(0x00006000) +#define TXD_W0_DATABYTE_COUNT FIELD32(0x0fff0000) +#define TXD_W0_CIPHER FIELD32(0x20000000) +#define TXD_W0_KEY_ID FIELD32(0xc0000000) + +/* + * Word1 + */ +#define TXD_W1_IV_OFFSET FIELD32(0x0000003f) +#define TXD_W1_AIFS FIELD32(0x000000c0) +#define TXD_W1_CWMIN FIELD32(0x00000f00) +#define TXD_W1_CWMAX FIELD32(0x0000f000) + +/* + * Word2: PLCP information + */ +#define TXD_W2_PLCP_SIGNAL FIELD32(0x000000ff) +#define TXD_W2_PLCP_SERVICE FIELD32(0x0000ff00) +#define TXD_W2_PLCP_LENGTH_LOW FIELD32(0x00ff0000) +#define TXD_W2_PLCP_LENGTH_HIGH FIELD32(0xff000000) + +/* + * Word3 + */ +#define TXD_W3_IV FIELD32(0xffffffff) + +/* + * Word4 + */ +#define TXD_W4_EIV FIELD32(0xffffffff) + +/* + * RX descriptor format for RX Ring. + */ + +/* + * Word0 + */ +#define RXD_W0_UNICAST_TO_ME FIELD32(0x00000002) +#define RXD_W0_MULTICAST FIELD32(0x00000004) +#define RXD_W0_BROADCAST FIELD32(0x00000008) +#define RXD_W0_MY_BSS FIELD32(0x00000010) +#define RXD_W0_CRC_ERROR FIELD32(0x00000020) +#define RXD_W0_OFDM FIELD32(0x00000040) +#define RXD_W0_PHYSICAL_ERROR FIELD32(0x00000080) +#define RXD_W0_CIPHER FIELD32(0x00000100) +#define RXD_W0_CIPHER_ERROR FIELD32(0x00000200) +#define RXD_W0_DATABYTE_COUNT FIELD32(0x0fff0000) + +/* + * Word1 + */ +#define RXD_W1_RSSI FIELD32(0x000000ff) +#define RXD_W1_SIGNAL FIELD32(0x0000ff00) + +/* + * Word2 + */ +#define RXD_W2_IV FIELD32(0xffffffff) + +/* + * Word3 + */ +#define RXD_W3_EIV FIELD32(0xffffffff) + +/* + * Macros for converting txpower from EEPROM to mac80211 value + * and from mac80211 value to register value. + */ +#define MIN_TXPOWER 0 +#define MAX_TXPOWER 31 +#define DEFAULT_TXPOWER 24 + +#define TXPOWER_FROM_DEV(__txpower) \ + (((u8)(__txpower)) > MAX_TXPOWER) ? DEFAULT_TXPOWER : (__txpower) + +#define TXPOWER_TO_DEV(__txpower) \ + clamp_t(char, __txpower, MIN_TXPOWER, MAX_TXPOWER) + +#endif /* RT2500USB_H */ diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h new file mode 100644 index 0000000..74c0433 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2800.h @@ -0,0 +1,1852 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn + Copyright (C) 2009 Alban Browaeys + Copyright (C) 2009 Felix Fietkau + Copyright (C) 2009 Luis Correia + Copyright (C) 2009 Mattias Nissler + Copyright (C) 2009 Mark Asselstine + Copyright (C) 2009 Xose Vazquez Perez + Copyright (C) 2009 Bart Zolnierkiewicz + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2800 + Abstract: Data structures and registers for the rt2800 modules. + Supported chipsets: RT2800E, RT2800ED & RT2800U. + */ + +#ifndef RT2800_H +#define RT2800_H + +/* + * RF chip defines. + * + * RF2820 2.4G 2T3R + * RF2850 2.4G/5G 2T3R + * RF2720 2.4G 1T2R + * RF2750 2.4G/5G 1T2R + * RF3020 2.4G 1T1R + * RF2020 2.4G B/G + * RF3021 2.4G 1T2R + * RF3022 2.4G 2T2R + * RF3052 2.4G 2T2R + */ +#define RF2820 0x0001 +#define RF2850 0x0002 +#define RF2720 0x0003 +#define RF2750 0x0004 +#define RF3020 0x0005 +#define RF2020 0x0006 +#define RF3021 0x0007 +#define RF3022 0x0008 +#define RF3052 0x0009 + +/* + * Chipset version. + */ +#define RT2860C_VERSION 0x0100 +#define RT2860D_VERSION 0x0101 +#define RT2880E_VERSION 0x0200 +#define RT2883_VERSION 0x0300 +#define RT3070_VERSION 0x0200 + +/* + * Signal information. + * Default offset is required for RSSI <-> dBm conversion. + */ +#define DEFAULT_RSSI_OFFSET 120 /* FIXME */ + +/* + * Register layout information. + */ +#define CSR_REG_BASE 0x1000 +#define CSR_REG_SIZE 0x0800 +#define EEPROM_BASE 0x0000 +#define EEPROM_SIZE 0x0110 +#define BBP_BASE 0x0000 +#define BBP_SIZE 0x0080 +#define RF_BASE 0x0004 +#define RF_SIZE 0x0010 + +/* + * Number of TX queues. + */ +#define NUM_TX_QUEUES 4 + +/* + * USB registers. + */ + +/* + * INT_SOURCE_CSR: Interrupt source register. + * Write one to clear corresponding bit. + * TX_FIFO_STATUS: FIFO Statistics is full, sw should read 0x171c + */ +#define INT_SOURCE_CSR 0x0200 +#define INT_SOURCE_CSR_RXDELAYINT FIELD32(0x00000001) +#define INT_SOURCE_CSR_TXDELAYINT FIELD32(0x00000002) +#define INT_SOURCE_CSR_RX_DONE FIELD32(0x00000004) +#define INT_SOURCE_CSR_AC0_DMA_DONE FIELD32(0x00000008) +#define INT_SOURCE_CSR_AC1_DMA_DONE FIELD32(0x00000010) +#define INT_SOURCE_CSR_AC2_DMA_DONE FIELD32(0x00000020) +#define INT_SOURCE_CSR_AC3_DMA_DONE FIELD32(0x00000040) +#define INT_SOURCE_CSR_HCCA_DMA_DONE FIELD32(0x00000080) +#define INT_SOURCE_CSR_MGMT_DMA_DONE FIELD32(0x00000100) +#define INT_SOURCE_CSR_MCU_COMMAND FIELD32(0x00000200) +#define INT_SOURCE_CSR_RXTX_COHERENT FIELD32(0x00000400) +#define INT_SOURCE_CSR_TBTT FIELD32(0x00000800) +#define INT_SOURCE_CSR_PRE_TBTT FIELD32(0x00001000) +#define INT_SOURCE_CSR_TX_FIFO_STATUS FIELD32(0x00002000) +#define INT_SOURCE_CSR_AUTO_WAKEUP FIELD32(0x00004000) +#define INT_SOURCE_CSR_GPTIMER FIELD32(0x00008000) +#define INT_SOURCE_CSR_RX_COHERENT FIELD32(0x00010000) +#define INT_SOURCE_CSR_TX_COHERENT FIELD32(0x00020000) + +/* + * INT_MASK_CSR: Interrupt MASK register. 1: the interrupt is mask OFF. + */ +#define INT_MASK_CSR 0x0204 +#define INT_MASK_CSR_RXDELAYINT FIELD32(0x00000001) +#define INT_MASK_CSR_TXDELAYINT FIELD32(0x00000002) +#define INT_MASK_CSR_RX_DONE FIELD32(0x00000004) +#define INT_MASK_CSR_AC0_DMA_DONE FIELD32(0x00000008) +#define INT_MASK_CSR_AC1_DMA_DONE FIELD32(0x00000010) +#define INT_MASK_CSR_AC2_DMA_DONE FIELD32(0x00000020) +#define INT_MASK_CSR_AC3_DMA_DONE FIELD32(0x00000040) +#define INT_MASK_CSR_HCCA_DMA_DONE FIELD32(0x00000080) +#define INT_MASK_CSR_MGMT_DMA_DONE FIELD32(0x00000100) +#define INT_MASK_CSR_MCU_COMMAND FIELD32(0x00000200) +#define INT_MASK_CSR_RXTX_COHERENT FIELD32(0x00000400) +#define INT_MASK_CSR_TBTT FIELD32(0x00000800) +#define INT_MASK_CSR_PRE_TBTT FIELD32(0x00001000) +#define INT_MASK_CSR_TX_FIFO_STATUS FIELD32(0x00002000) +#define INT_MASK_CSR_AUTO_WAKEUP FIELD32(0x00004000) +#define INT_MASK_CSR_GPTIMER FIELD32(0x00008000) +#define INT_MASK_CSR_RX_COHERENT FIELD32(0x00010000) +#define INT_MASK_CSR_TX_COHERENT FIELD32(0x00020000) + +/* + * WPDMA_GLO_CFG + */ +#define WPDMA_GLO_CFG 0x0208 +#define WPDMA_GLO_CFG_ENABLE_TX_DMA FIELD32(0x00000001) +#define WPDMA_GLO_CFG_TX_DMA_BUSY FIELD32(0x00000002) +#define WPDMA_GLO_CFG_ENABLE_RX_DMA FIELD32(0x00000004) +#define WPDMA_GLO_CFG_RX_DMA_BUSY FIELD32(0x00000008) +#define WPDMA_GLO_CFG_WP_DMA_BURST_SIZE FIELD32(0x00000030) +#define WPDMA_GLO_CFG_TX_WRITEBACK_DONE FIELD32(0x00000040) +#define WPDMA_GLO_CFG_BIG_ENDIAN FIELD32(0x00000080) +#define WPDMA_GLO_CFG_RX_HDR_SCATTER FIELD32(0x0000ff00) +#define WPDMA_GLO_CFG_HDR_SEG_LEN FIELD32(0xffff0000) + +/* + * WPDMA_RST_IDX + */ +#define WPDMA_RST_IDX 0x020c +#define WPDMA_RST_IDX_DTX_IDX0 FIELD32(0x00000001) +#define WPDMA_RST_IDX_DTX_IDX1 FIELD32(0x00000002) +#define WPDMA_RST_IDX_DTX_IDX2 FIELD32(0x00000004) +#define WPDMA_RST_IDX_DTX_IDX3 FIELD32(0x00000008) +#define WPDMA_RST_IDX_DTX_IDX4 FIELD32(0x00000010) +#define WPDMA_RST_IDX_DTX_IDX5 FIELD32(0x00000020) +#define WPDMA_RST_IDX_DRX_IDX0 FIELD32(0x00010000) + +/* + * DELAY_INT_CFG + */ +#define DELAY_INT_CFG 0x0210 +#define DELAY_INT_CFG_RXMAX_PTIME FIELD32(0x000000ff) +#define DELAY_INT_CFG_RXMAX_PINT FIELD32(0x00007f00) +#define DELAY_INT_CFG_RXDLY_INT_EN FIELD32(0x00008000) +#define DELAY_INT_CFG_TXMAX_PTIME FIELD32(0x00ff0000) +#define DELAY_INT_CFG_TXMAX_PINT FIELD32(0x7f000000) +#define DELAY_INT_CFG_TXDLY_INT_EN FIELD32(0x80000000) + +/* + * WMM_AIFSN_CFG: Aifsn for each EDCA AC + * AIFSN0: AC_BE + * AIFSN1: AC_BK + * AIFSN2: AC_VI + * AIFSN3: AC_VO + */ +#define WMM_AIFSN_CFG 0x0214 +#define WMM_AIFSN_CFG_AIFSN0 FIELD32(0x0000000f) +#define WMM_AIFSN_CFG_AIFSN1 FIELD32(0x000000f0) +#define WMM_AIFSN_CFG_AIFSN2 FIELD32(0x00000f00) +#define WMM_AIFSN_CFG_AIFSN3 FIELD32(0x0000f000) + +/* + * WMM_CWMIN_CSR: CWmin for each EDCA AC + * CWMIN0: AC_BE + * CWMIN1: AC_BK + * CWMIN2: AC_VI + * CWMIN3: AC_VO + */ +#define WMM_CWMIN_CFG 0x0218 +#define WMM_CWMIN_CFG_CWMIN0 FIELD32(0x0000000f) +#define WMM_CWMIN_CFG_CWMIN1 FIELD32(0x000000f0) +#define WMM_CWMIN_CFG_CWMIN2 FIELD32(0x00000f00) +#define WMM_CWMIN_CFG_CWMIN3 FIELD32(0x0000f000) + +/* + * WMM_CWMAX_CSR: CWmax for each EDCA AC + * CWMAX0: AC_BE + * CWMAX1: AC_BK + * CWMAX2: AC_VI + * CWMAX3: AC_VO + */ +#define WMM_CWMAX_CFG 0x021c +#define WMM_CWMAX_CFG_CWMAX0 FIELD32(0x0000000f) +#define WMM_CWMAX_CFG_CWMAX1 FIELD32(0x000000f0) +#define WMM_CWMAX_CFG_CWMAX2 FIELD32(0x00000f00) +#define WMM_CWMAX_CFG_CWMAX3 FIELD32(0x0000f000) + +/* + * AC_TXOP0: AC_BK/AC_BE TXOP register + * AC0TXOP: AC_BK in unit of 32us + * AC1TXOP: AC_BE in unit of 32us + */ +#define WMM_TXOP0_CFG 0x0220 +#define WMM_TXOP0_CFG_AC0TXOP FIELD32(0x0000ffff) +#define WMM_TXOP0_CFG_AC1TXOP FIELD32(0xffff0000) + +/* + * AC_TXOP1: AC_VO/AC_VI TXOP register + * AC2TXOP: AC_VI in unit of 32us + * AC3TXOP: AC_VO in unit of 32us + */ +#define WMM_TXOP1_CFG 0x0224 +#define WMM_TXOP1_CFG_AC2TXOP FIELD32(0x0000ffff) +#define WMM_TXOP1_CFG_AC3TXOP FIELD32(0xffff0000) + +/* + * GPIO_CTRL_CFG: + */ +#define GPIO_CTRL_CFG 0x0228 +#define GPIO_CTRL_CFG_BIT0 FIELD32(0x00000001) +#define GPIO_CTRL_CFG_BIT1 FIELD32(0x00000002) +#define GPIO_CTRL_CFG_BIT2 FIELD32(0x00000004) +#define GPIO_CTRL_CFG_BIT3 FIELD32(0x00000008) +#define GPIO_CTRL_CFG_BIT4 FIELD32(0x00000010) +#define GPIO_CTRL_CFG_BIT5 FIELD32(0x00000020) +#define GPIO_CTRL_CFG_BIT6 FIELD32(0x00000040) +#define GPIO_CTRL_CFG_BIT7 FIELD32(0x00000080) +#define GPIO_CTRL_CFG_BIT8 FIELD32(0x00000100) + +/* + * MCU_CMD_CFG + */ +#define MCU_CMD_CFG 0x022c + +/* + * AC_BK register offsets + */ +#define TX_BASE_PTR0 0x0230 +#define TX_MAX_CNT0 0x0234 +#define TX_CTX_IDX0 0x0238 +#define TX_DTX_IDX0 0x023c + +/* + * AC_BE register offsets + */ +#define TX_BASE_PTR1 0x0240 +#define TX_MAX_CNT1 0x0244 +#define TX_CTX_IDX1 0x0248 +#define TX_DTX_IDX1 0x024c + +/* + * AC_VI register offsets + */ +#define TX_BASE_PTR2 0x0250 +#define TX_MAX_CNT2 0x0254 +#define TX_CTX_IDX2 0x0258 +#define TX_DTX_IDX2 0x025c + +/* + * AC_VO register offsets + */ +#define TX_BASE_PTR3 0x0260 +#define TX_MAX_CNT3 0x0264 +#define TX_CTX_IDX3 0x0268 +#define TX_DTX_IDX3 0x026c + +/* + * HCCA register offsets + */ +#define TX_BASE_PTR4 0x0270 +#define TX_MAX_CNT4 0x0274 +#define TX_CTX_IDX4 0x0278 +#define TX_DTX_IDX4 0x027c + +/* + * MGMT register offsets + */ +#define TX_BASE_PTR5 0x0280 +#define TX_MAX_CNT5 0x0284 +#define TX_CTX_IDX5 0x0288 +#define TX_DTX_IDX5 0x028c + +/* + * RX register offsets + */ +#define RX_BASE_PTR 0x0290 +#define RX_MAX_CNT 0x0294 +#define RX_CRX_IDX 0x0298 +#define RX_DRX_IDX 0x029c + +/* + * PBF_SYS_CTRL + * HOST_RAM_WRITE: enable Host program ram write selection + */ +#define PBF_SYS_CTRL 0x0400 +#define PBF_SYS_CTRL_READY FIELD32(0x00000080) +#define PBF_SYS_CTRL_HOST_RAM_WRITE FIELD32(0x00010000) + +/* + * HOST-MCU shared memory + */ +#define HOST_CMD_CSR 0x0404 +#define HOST_CMD_CSR_HOST_COMMAND FIELD32(0x000000ff) + +/* + * PBF registers + * Most are for debug. Driver doesn't touch PBF register. + */ +#define PBF_CFG 0x0408 +#define PBF_MAX_PCNT 0x040c +#define PBF_CTRL 0x0410 +#define PBF_INT_STA 0x0414 +#define PBF_INT_ENA 0x0418 + +/* + * BCN_OFFSET0: + */ +#define BCN_OFFSET0 0x042c +#define BCN_OFFSET0_BCN0 FIELD32(0x000000ff) +#define BCN_OFFSET0_BCN1 FIELD32(0x0000ff00) +#define BCN_OFFSET0_BCN2 FIELD32(0x00ff0000) +#define BCN_OFFSET0_BCN3 FIELD32(0xff000000) + +/* + * BCN_OFFSET1: + */ +#define BCN_OFFSET1 0x0430 +#define BCN_OFFSET1_BCN4 FIELD32(0x000000ff) +#define BCN_OFFSET1_BCN5 FIELD32(0x0000ff00) +#define BCN_OFFSET1_BCN6 FIELD32(0x00ff0000) +#define BCN_OFFSET1_BCN7 FIELD32(0xff000000) + +/* + * PBF registers + * Most are for debug. Driver doesn't touch PBF register. + */ +#define TXRXQ_PCNT 0x0438 +#define PBF_DBG 0x043c + +/* + * RF registers + */ +#define RF_CSR_CFG 0x0500 +#define RF_CSR_CFG_DATA FIELD32(0x000000ff) +#define RF_CSR_CFG_REGNUM FIELD32(0x00001f00) +#define RF_CSR_CFG_WRITE FIELD32(0x00010000) +#define RF_CSR_CFG_BUSY FIELD32(0x00020000) + +/* + * EFUSE_CSR: RT30x0 EEPROM + */ +#define EFUSE_CTRL 0x0580 +#define EFUSE_CTRL_ADDRESS_IN FIELD32(0x03fe0000) +#define EFUSE_CTRL_MODE FIELD32(0x000000c0) +#define EFUSE_CTRL_KICK FIELD32(0x40000000) +#define EFUSE_CTRL_PRESENT FIELD32(0x80000000) + +/* + * EFUSE_DATA0 + */ +#define EFUSE_DATA0 0x0590 + +/* + * EFUSE_DATA1 + */ +#define EFUSE_DATA1 0x0594 + +/* + * EFUSE_DATA2 + */ +#define EFUSE_DATA2 0x0598 + +/* + * EFUSE_DATA3 + */ +#define EFUSE_DATA3 0x059c + +/* + * MAC Control/Status Registers(CSR). + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * MAC_CSR0: ASIC revision number. + * ASIC_REV: 0 + * ASIC_VER: 2860 or 2870 + */ +#define MAC_CSR0 0x1000 +#define MAC_CSR0_REVISION FIELD32(0x0000ffff) +#define MAC_CSR0_CHIPSET FIELD32(0xffff0000) + +/* + * MAC_SYS_CTRL: + */ +#define MAC_SYS_CTRL 0x1004 +#define MAC_SYS_CTRL_RESET_CSR FIELD32(0x00000001) +#define MAC_SYS_CTRL_RESET_BBP FIELD32(0x00000002) +#define MAC_SYS_CTRL_ENABLE_TX FIELD32(0x00000004) +#define MAC_SYS_CTRL_ENABLE_RX FIELD32(0x00000008) +#define MAC_SYS_CTRL_CONTINUOUS_TX FIELD32(0x00000010) +#define MAC_SYS_CTRL_LOOPBACK FIELD32(0x00000020) +#define MAC_SYS_CTRL_WLAN_HALT FIELD32(0x00000040) +#define MAC_SYS_CTRL_RX_TIMESTAMP FIELD32(0x00000080) + +/* + * MAC_ADDR_DW0: STA MAC register 0 + */ +#define MAC_ADDR_DW0 0x1008 +#define MAC_ADDR_DW0_BYTE0 FIELD32(0x000000ff) +#define MAC_ADDR_DW0_BYTE1 FIELD32(0x0000ff00) +#define MAC_ADDR_DW0_BYTE2 FIELD32(0x00ff0000) +#define MAC_ADDR_DW0_BYTE3 FIELD32(0xff000000) + +/* + * MAC_ADDR_DW1: STA MAC register 1 + * UNICAST_TO_ME_MASK: + * Used to mask off bits from byte 5 of the MAC address + * to determine the UNICAST_TO_ME bit for RX frames. + * The full mask is complemented by BSS_ID_MASK: + * MASK = BSS_ID_MASK & UNICAST_TO_ME_MASK + */ +#define MAC_ADDR_DW1 0x100c +#define MAC_ADDR_DW1_BYTE4 FIELD32(0x000000ff) +#define MAC_ADDR_DW1_BYTE5 FIELD32(0x0000ff00) +#define MAC_ADDR_DW1_UNICAST_TO_ME_MASK FIELD32(0x00ff0000) + +/* + * MAC_BSSID_DW0: BSSID register 0 + */ +#define MAC_BSSID_DW0 0x1010 +#define MAC_BSSID_DW0_BYTE0 FIELD32(0x000000ff) +#define MAC_BSSID_DW0_BYTE1 FIELD32(0x0000ff00) +#define MAC_BSSID_DW0_BYTE2 FIELD32(0x00ff0000) +#define MAC_BSSID_DW0_BYTE3 FIELD32(0xff000000) + +/* + * MAC_BSSID_DW1: BSSID register 1 + * BSS_ID_MASK: + * 0: 1-BSSID mode (BSS index = 0) + * 1: 2-BSSID mode (BSS index: Byte5, bit 0) + * 2: 4-BSSID mode (BSS index: byte5, bit 0 - 1) + * 3: 8-BSSID mode (BSS index: byte5, bit 0 - 2) + * This mask is used to mask off bits 0, 1 and 2 of byte 5 of the + * BSSID. This will make sure that those bits will be ignored + * when determining the MY_BSS of RX frames. + */ +#define MAC_BSSID_DW1 0x1014 +#define MAC_BSSID_DW1_BYTE4 FIELD32(0x000000ff) +#define MAC_BSSID_DW1_BYTE5 FIELD32(0x0000ff00) +#define MAC_BSSID_DW1_BSS_ID_MASK FIELD32(0x00030000) +#define MAC_BSSID_DW1_BSS_BCN_NUM FIELD32(0x001c0000) + +/* + * MAX_LEN_CFG: Maximum frame length register. + * MAX_MPDU: rt2860b max 16k bytes + * MAX_PSDU: Maximum PSDU length + * (power factor) 0:2^13, 1:2^14, 2:2^15, 3:2^16 + */ +#define MAX_LEN_CFG 0x1018 +#define MAX_LEN_CFG_MAX_MPDU FIELD32(0x00000fff) +#define MAX_LEN_CFG_MAX_PSDU FIELD32(0x00003000) +#define MAX_LEN_CFG_MIN_PSDU FIELD32(0x0000c000) +#define MAX_LEN_CFG_MIN_MPDU FIELD32(0x000f0000) + +/* + * BBP_CSR_CFG: BBP serial control register + * VALUE: Register value to program into BBP + * REG_NUM: Selected BBP register + * READ_CONTROL: 0 write BBP, 1 read BBP + * BUSY: ASIC is busy executing BBP commands + * BBP_PAR_DUR: 0 4 MAC clocks, 1 8 MAC clocks + * BBP_RW_MODE: 0 serial, 1 paralell + */ +#define BBP_CSR_CFG 0x101c +#define BBP_CSR_CFG_VALUE FIELD32(0x000000ff) +#define BBP_CSR_CFG_REGNUM FIELD32(0x0000ff00) +#define BBP_CSR_CFG_READ_CONTROL FIELD32(0x00010000) +#define BBP_CSR_CFG_BUSY FIELD32(0x00020000) +#define BBP_CSR_CFG_BBP_PAR_DUR FIELD32(0x00040000) +#define BBP_CSR_CFG_BBP_RW_MODE FIELD32(0x00080000) + +/* + * RF_CSR_CFG0: RF control register + * REGID_AND_VALUE: Register value to program into RF + * BITWIDTH: Selected RF register + * STANDBYMODE: 0 high when standby, 1 low when standby + * SEL: 0 RF_LE0 activate, 1 RF_LE1 activate + * BUSY: ASIC is busy executing RF commands + */ +#define RF_CSR_CFG0 0x1020 +#define RF_CSR_CFG0_REGID_AND_VALUE FIELD32(0x00ffffff) +#define RF_CSR_CFG0_BITWIDTH FIELD32(0x1f000000) +#define RF_CSR_CFG0_REG_VALUE_BW FIELD32(0x1fffffff) +#define RF_CSR_CFG0_STANDBYMODE FIELD32(0x20000000) +#define RF_CSR_CFG0_SEL FIELD32(0x40000000) +#define RF_CSR_CFG0_BUSY FIELD32(0x80000000) + +/* + * RF_CSR_CFG1: RF control register + * REGID_AND_VALUE: Register value to program into RF + * RFGAP: Gap between BB_CONTROL_RF and RF_LE + * 0: 3 system clock cycle (37.5usec) + * 1: 5 system clock cycle (62.5usec) + */ +#define RF_CSR_CFG1 0x1024 +#define RF_CSR_CFG1_REGID_AND_VALUE FIELD32(0x00ffffff) +#define RF_CSR_CFG1_RFGAP FIELD32(0x1f000000) + +/* + * RF_CSR_CFG2: RF control register + * VALUE: Register value to program into RF + */ +#define RF_CSR_CFG2 0x1028 +#define RF_CSR_CFG2_VALUE FIELD32(0x00ffffff) + +/* + * LED_CFG: LED control + * color LED's: + * 0: off + * 1: blinking upon TX2 + * 2: periodic slow blinking + * 3: always on + * LED polarity: + * 0: active low + * 1: active high + */ +#define LED_CFG 0x102c +#define LED_CFG_ON_PERIOD FIELD32(0x000000ff) +#define LED_CFG_OFF_PERIOD FIELD32(0x0000ff00) +#define LED_CFG_SLOW_BLINK_PERIOD FIELD32(0x003f0000) +#define LED_CFG_R_LED_MODE FIELD32(0x03000000) +#define LED_CFG_G_LED_MODE FIELD32(0x0c000000) +#define LED_CFG_Y_LED_MODE FIELD32(0x30000000) +#define LED_CFG_LED_POLAR FIELD32(0x40000000) + +/* + * XIFS_TIME_CFG: MAC timing + * CCKM_SIFS_TIME: unit 1us. Applied after CCK RX/TX + * OFDM_SIFS_TIME: unit 1us. Applied after OFDM RX/TX + * OFDM_XIFS_TIME: unit 1us. Applied after OFDM RX + * when MAC doesn't reference BBP signal BBRXEND + * EIFS: unit 1us + * BB_RXEND_ENABLE: reference RXEND signal to begin XIFS defer + * + */ +#define XIFS_TIME_CFG 0x1100 +#define XIFS_TIME_CFG_CCKM_SIFS_TIME FIELD32(0x000000ff) +#define XIFS_TIME_CFG_OFDM_SIFS_TIME FIELD32(0x0000ff00) +#define XIFS_TIME_CFG_OFDM_XIFS_TIME FIELD32(0x000f0000) +#define XIFS_TIME_CFG_EIFS FIELD32(0x1ff00000) +#define XIFS_TIME_CFG_BB_RXEND_ENABLE FIELD32(0x20000000) + +/* + * BKOFF_SLOT_CFG: + */ +#define BKOFF_SLOT_CFG 0x1104 +#define BKOFF_SLOT_CFG_SLOT_TIME FIELD32(0x000000ff) +#define BKOFF_SLOT_CFG_CC_DELAY_TIME FIELD32(0x0000ff00) + +/* + * NAV_TIME_CFG: + */ +#define NAV_TIME_CFG 0x1108 +#define NAV_TIME_CFG_SIFS FIELD32(0x000000ff) +#define NAV_TIME_CFG_SLOT_TIME FIELD32(0x0000ff00) +#define NAV_TIME_CFG_EIFS FIELD32(0x01ff0000) +#define NAV_TIME_ZERO_SIFS FIELD32(0x02000000) + +/* + * CH_TIME_CFG: count as channel busy + */ +#define CH_TIME_CFG 0x110c + +/* + * PBF_LIFE_TIMER: TX/RX MPDU timestamp timer (free run) Unit: 1us + */ +#define PBF_LIFE_TIMER 0x1110 + +/* + * BCN_TIME_CFG: + * BEACON_INTERVAL: in unit of 1/16 TU + * TSF_TICKING: Enable TSF auto counting + * TSF_SYNC: Enable TSF sync, 00: disable, 01: infra mode, 10: ad-hoc mode + * BEACON_GEN: Enable beacon generator + */ +#define BCN_TIME_CFG 0x1114 +#define BCN_TIME_CFG_BEACON_INTERVAL FIELD32(0x0000ffff) +#define BCN_TIME_CFG_TSF_TICKING FIELD32(0x00010000) +#define BCN_TIME_CFG_TSF_SYNC FIELD32(0x00060000) +#define BCN_TIME_CFG_TBTT_ENABLE FIELD32(0x00080000) +#define BCN_TIME_CFG_BEACON_GEN FIELD32(0x00100000) +#define BCN_TIME_CFG_TX_TIME_COMPENSATE FIELD32(0xf0000000) + +/* + * TBTT_SYNC_CFG: + */ +#define TBTT_SYNC_CFG 0x1118 + +/* + * TSF_TIMER_DW0: Local lsb TSF timer, read-only + */ +#define TSF_TIMER_DW0 0x111c +#define TSF_TIMER_DW0_LOW_WORD FIELD32(0xffffffff) + +/* + * TSF_TIMER_DW1: Local msb TSF timer, read-only + */ +#define TSF_TIMER_DW1 0x1120 +#define TSF_TIMER_DW1_HIGH_WORD FIELD32(0xffffffff) + +/* + * TBTT_TIMER: TImer remains till next TBTT, read-only + */ +#define TBTT_TIMER 0x1124 + +/* + * INT_TIMER_CFG: + */ +#define INT_TIMER_CFG 0x1128 + +/* + * INT_TIMER_EN: GP-timer and pre-tbtt Int enable + */ +#define INT_TIMER_EN 0x112c + +/* + * CH_IDLE_STA: channel idle time + */ +#define CH_IDLE_STA 0x1130 + +/* + * CH_BUSY_STA: channel busy time + */ +#define CH_BUSY_STA 0x1134 + +/* + * MAC_STATUS_CFG: + * BBP_RF_BUSY: When set to 0, BBP and RF are stable. + * if 1 or higher one of the 2 registers is busy. + */ +#define MAC_STATUS_CFG 0x1200 +#define MAC_STATUS_CFG_BBP_RF_BUSY FIELD32(0x00000003) + +/* + * PWR_PIN_CFG: + */ +#define PWR_PIN_CFG 0x1204 + +/* + * AUTOWAKEUP_CFG: Manual power control / status register + * TBCN_BEFORE_WAKE: ForceWake has high privilege than PutToSleep when both set + * AUTOWAKE: 0:sleep, 1:awake + */ +#define AUTOWAKEUP_CFG 0x1208 +#define AUTOWAKEUP_CFG_AUTO_LEAD_TIME FIELD32(0x000000ff) +#define AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE FIELD32(0x00007f00) +#define AUTOWAKEUP_CFG_AUTOWAKE FIELD32(0x00008000) + +/* + * EDCA_AC0_CFG: + */ +#define EDCA_AC0_CFG 0x1300 +#define EDCA_AC0_CFG_TX_OP FIELD32(0x000000ff) +#define EDCA_AC0_CFG_AIFSN FIELD32(0x00000f00) +#define EDCA_AC0_CFG_CWMIN FIELD32(0x0000f000) +#define EDCA_AC0_CFG_CWMAX FIELD32(0x000f0000) + +/* + * EDCA_AC1_CFG: + */ +#define EDCA_AC1_CFG 0x1304 +#define EDCA_AC1_CFG_TX_OP FIELD32(0x000000ff) +#define EDCA_AC1_CFG_AIFSN FIELD32(0x00000f00) +#define EDCA_AC1_CFG_CWMIN FIELD32(0x0000f000) +#define EDCA_AC1_CFG_CWMAX FIELD32(0x000f0000) + +/* + * EDCA_AC2_CFG: + */ +#define EDCA_AC2_CFG 0x1308 +#define EDCA_AC2_CFG_TX_OP FIELD32(0x000000ff) +#define EDCA_AC2_CFG_AIFSN FIELD32(0x00000f00) +#define EDCA_AC2_CFG_CWMIN FIELD32(0x0000f000) +#define EDCA_AC2_CFG_CWMAX FIELD32(0x000f0000) + +/* + * EDCA_AC3_CFG: + */ +#define EDCA_AC3_CFG 0x130c +#define EDCA_AC3_CFG_TX_OP FIELD32(0x000000ff) +#define EDCA_AC3_CFG_AIFSN FIELD32(0x00000f00) +#define EDCA_AC3_CFG_CWMIN FIELD32(0x0000f000) +#define EDCA_AC3_CFG_CWMAX FIELD32(0x000f0000) + +/* + * EDCA_TID_AC_MAP: + */ +#define EDCA_TID_AC_MAP 0x1310 + +/* + * TX_PWR_CFG_0: + */ +#define TX_PWR_CFG_0 0x1314 +#define TX_PWR_CFG_0_1MBS FIELD32(0x0000000f) +#define TX_PWR_CFG_0_2MBS FIELD32(0x000000f0) +#define TX_PWR_CFG_0_55MBS FIELD32(0x00000f00) +#define TX_PWR_CFG_0_11MBS FIELD32(0x0000f000) +#define TX_PWR_CFG_0_6MBS FIELD32(0x000f0000) +#define TX_PWR_CFG_0_9MBS FIELD32(0x00f00000) +#define TX_PWR_CFG_0_12MBS FIELD32(0x0f000000) +#define TX_PWR_CFG_0_18MBS FIELD32(0xf0000000) + +/* + * TX_PWR_CFG_1: + */ +#define TX_PWR_CFG_1 0x1318 +#define TX_PWR_CFG_1_24MBS FIELD32(0x0000000f) +#define TX_PWR_CFG_1_36MBS FIELD32(0x000000f0) +#define TX_PWR_CFG_1_48MBS FIELD32(0x00000f00) +#define TX_PWR_CFG_1_54MBS FIELD32(0x0000f000) +#define TX_PWR_CFG_1_MCS0 FIELD32(0x000f0000) +#define TX_PWR_CFG_1_MCS1 FIELD32(0x00f00000) +#define TX_PWR_CFG_1_MCS2 FIELD32(0x0f000000) +#define TX_PWR_CFG_1_MCS3 FIELD32(0xf0000000) + +/* + * TX_PWR_CFG_2: + */ +#define TX_PWR_CFG_2 0x131c +#define TX_PWR_CFG_2_MCS4 FIELD32(0x0000000f) +#define TX_PWR_CFG_2_MCS5 FIELD32(0x000000f0) +#define TX_PWR_CFG_2_MCS6 FIELD32(0x00000f00) +#define TX_PWR_CFG_2_MCS7 FIELD32(0x0000f000) +#define TX_PWR_CFG_2_MCS8 FIELD32(0x000f0000) +#define TX_PWR_CFG_2_MCS9 FIELD32(0x00f00000) +#define TX_PWR_CFG_2_MCS10 FIELD32(0x0f000000) +#define TX_PWR_CFG_2_MCS11 FIELD32(0xf0000000) + +/* + * TX_PWR_CFG_3: + */ +#define TX_PWR_CFG_3 0x1320 +#define TX_PWR_CFG_3_MCS12 FIELD32(0x0000000f) +#define TX_PWR_CFG_3_MCS13 FIELD32(0x000000f0) +#define TX_PWR_CFG_3_MCS14 FIELD32(0x00000f00) +#define TX_PWR_CFG_3_MCS15 FIELD32(0x0000f000) +#define TX_PWR_CFG_3_UKNOWN1 FIELD32(0x000f0000) +#define TX_PWR_CFG_3_UKNOWN2 FIELD32(0x00f00000) +#define TX_PWR_CFG_3_UKNOWN3 FIELD32(0x0f000000) +#define TX_PWR_CFG_3_UKNOWN4 FIELD32(0xf0000000) + +/* + * TX_PWR_CFG_4: + */ +#define TX_PWR_CFG_4 0x1324 +#define TX_PWR_CFG_4_UKNOWN5 FIELD32(0x0000000f) +#define TX_PWR_CFG_4_UKNOWN6 FIELD32(0x000000f0) +#define TX_PWR_CFG_4_UKNOWN7 FIELD32(0x00000f00) +#define TX_PWR_CFG_4_UKNOWN8 FIELD32(0x0000f000) + +/* + * TX_PIN_CFG: + */ +#define TX_PIN_CFG 0x1328 +#define TX_PIN_CFG_PA_PE_A0_EN FIELD32(0x00000001) +#define TX_PIN_CFG_PA_PE_G0_EN FIELD32(0x00000002) +#define TX_PIN_CFG_PA_PE_A1_EN FIELD32(0x00000004) +#define TX_PIN_CFG_PA_PE_G1_EN FIELD32(0x00000008) +#define TX_PIN_CFG_PA_PE_A0_POL FIELD32(0x00000010) +#define TX_PIN_CFG_PA_PE_G0_POL FIELD32(0x00000020) +#define TX_PIN_CFG_PA_PE_A1_POL FIELD32(0x00000040) +#define TX_PIN_CFG_PA_PE_G1_POL FIELD32(0x00000080) +#define TX_PIN_CFG_LNA_PE_A0_EN FIELD32(0x00000100) +#define TX_PIN_CFG_LNA_PE_G0_EN FIELD32(0x00000200) +#define TX_PIN_CFG_LNA_PE_A1_EN FIELD32(0x00000400) +#define TX_PIN_CFG_LNA_PE_G1_EN FIELD32(0x00000800) +#define TX_PIN_CFG_LNA_PE_A0_POL FIELD32(0x00001000) +#define TX_PIN_CFG_LNA_PE_G0_POL FIELD32(0x00002000) +#define TX_PIN_CFG_LNA_PE_A1_POL FIELD32(0x00004000) +#define TX_PIN_CFG_LNA_PE_G1_POL FIELD32(0x00008000) +#define TX_PIN_CFG_RFTR_EN FIELD32(0x00010000) +#define TX_PIN_CFG_RFTR_POL FIELD32(0x00020000) +#define TX_PIN_CFG_TRSW_EN FIELD32(0x00040000) +#define TX_PIN_CFG_TRSW_POL FIELD32(0x00080000) + +/* + * TX_BAND_CFG: 0x1 use upper 20MHz, 0x0 use lower 20MHz + */ +#define TX_BAND_CFG 0x132c +#define TX_BAND_CFG_HT40_PLUS FIELD32(0x00000001) +#define TX_BAND_CFG_A FIELD32(0x00000002) +#define TX_BAND_CFG_BG FIELD32(0x00000004) + +/* + * TX_SW_CFG0: + */ +#define TX_SW_CFG0 0x1330 + +/* + * TX_SW_CFG1: + */ +#define TX_SW_CFG1 0x1334 + +/* + * TX_SW_CFG2: + */ +#define TX_SW_CFG2 0x1338 + +/* + * TXOP_THRES_CFG: + */ +#define TXOP_THRES_CFG 0x133c + +/* + * TXOP_CTRL_CFG: + */ +#define TXOP_CTRL_CFG 0x1340 + +/* + * TX_RTS_CFG: + * RTS_THRES: unit:byte + * RTS_FBK_EN: enable rts rate fallback + */ +#define TX_RTS_CFG 0x1344 +#define TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT FIELD32(0x000000ff) +#define TX_RTS_CFG_RTS_THRES FIELD32(0x00ffff00) +#define TX_RTS_CFG_RTS_FBK_EN FIELD32(0x01000000) + +/* + * TX_TIMEOUT_CFG: + * MPDU_LIFETIME: expiration time = 2^(9+MPDU LIFE TIME) us + * RX_ACK_TIMEOUT: unit:slot. Used for TX procedure + * TX_OP_TIMEOUT: TXOP timeout value for TXOP truncation. + * it is recommended that: + * (SLOT_TIME) > (TX_OP_TIMEOUT) > (RX_ACK_TIMEOUT) + */ +#define TX_TIMEOUT_CFG 0x1348 +#define TX_TIMEOUT_CFG_MPDU_LIFETIME FIELD32(0x000000f0) +#define TX_TIMEOUT_CFG_RX_ACK_TIMEOUT FIELD32(0x0000ff00) +#define TX_TIMEOUT_CFG_TX_OP_TIMEOUT FIELD32(0x00ff0000) + +/* + * TX_RTY_CFG: + * SHORT_RTY_LIMIT: short retry limit + * LONG_RTY_LIMIT: long retry limit + * LONG_RTY_THRE: Long retry threshoold + * NON_AGG_RTY_MODE: Non-Aggregate MPDU retry mode + * 0:expired by retry limit, 1: expired by mpdu life timer + * AGG_RTY_MODE: Aggregate MPDU retry mode + * 0:expired by retry limit, 1: expired by mpdu life timer + * TX_AUTO_FB_ENABLE: Tx retry PHY rate auto fallback enable + */ +#define TX_RTY_CFG 0x134c +#define TX_RTY_CFG_SHORT_RTY_LIMIT FIELD32(0x000000ff) +#define TX_RTY_CFG_LONG_RTY_LIMIT FIELD32(0x0000ff00) +#define TX_RTY_CFG_LONG_RTY_THRE FIELD32(0x0fff0000) +#define TX_RTY_CFG_NON_AGG_RTY_MODE FIELD32(0x10000000) +#define TX_RTY_CFG_AGG_RTY_MODE FIELD32(0x20000000) +#define TX_RTY_CFG_TX_AUTO_FB_ENABLE FIELD32(0x40000000) + +/* + * TX_LINK_CFG: + * REMOTE_MFB_LIFETIME: remote MFB life time. unit: 32us + * MFB_ENABLE: TX apply remote MFB 1:enable + * REMOTE_UMFS_ENABLE: remote unsolicit MFB enable + * 0: not apply remote remote unsolicit (MFS=7) + * TX_MRQ_EN: MCS request TX enable + * TX_RDG_EN: RDG TX enable + * TX_CF_ACK_EN: Piggyback CF-ACK enable + * REMOTE_MFB: remote MCS feedback + * REMOTE_MFS: remote MCS feedback sequence number + */ +#define TX_LINK_CFG 0x1350 +#define TX_LINK_CFG_REMOTE_MFB_LIFETIME FIELD32(0x000000ff) +#define TX_LINK_CFG_MFB_ENABLE FIELD32(0x00000100) +#define TX_LINK_CFG_REMOTE_UMFS_ENABLE FIELD32(0x00000200) +#define TX_LINK_CFG_TX_MRQ_EN FIELD32(0x00000400) +#define TX_LINK_CFG_TX_RDG_EN FIELD32(0x00000800) +#define TX_LINK_CFG_TX_CF_ACK_EN FIELD32(0x00001000) +#define TX_LINK_CFG_REMOTE_MFB FIELD32(0x00ff0000) +#define TX_LINK_CFG_REMOTE_MFS FIELD32(0xff000000) + +/* + * HT_FBK_CFG0: + */ +#define HT_FBK_CFG0 0x1354 +#define HT_FBK_CFG0_HTMCS0FBK FIELD32(0x0000000f) +#define HT_FBK_CFG0_HTMCS1FBK FIELD32(0x000000f0) +#define HT_FBK_CFG0_HTMCS2FBK FIELD32(0x00000f00) +#define HT_FBK_CFG0_HTMCS3FBK FIELD32(0x0000f000) +#define HT_FBK_CFG0_HTMCS4FBK FIELD32(0x000f0000) +#define HT_FBK_CFG0_HTMCS5FBK FIELD32(0x00f00000) +#define HT_FBK_CFG0_HTMCS6FBK FIELD32(0x0f000000) +#define HT_FBK_CFG0_HTMCS7FBK FIELD32(0xf0000000) + +/* + * HT_FBK_CFG1: + */ +#define HT_FBK_CFG1 0x1358 +#define HT_FBK_CFG1_HTMCS8FBK FIELD32(0x0000000f) +#define HT_FBK_CFG1_HTMCS9FBK FIELD32(0x000000f0) +#define HT_FBK_CFG1_HTMCS10FBK FIELD32(0x00000f00) +#define HT_FBK_CFG1_HTMCS11FBK FIELD32(0x0000f000) +#define HT_FBK_CFG1_HTMCS12FBK FIELD32(0x000f0000) +#define HT_FBK_CFG1_HTMCS13FBK FIELD32(0x00f00000) +#define HT_FBK_CFG1_HTMCS14FBK FIELD32(0x0f000000) +#define HT_FBK_CFG1_HTMCS15FBK FIELD32(0xf0000000) + +/* + * LG_FBK_CFG0: + */ +#define LG_FBK_CFG0 0x135c +#define LG_FBK_CFG0_OFDMMCS0FBK FIELD32(0x0000000f) +#define LG_FBK_CFG0_OFDMMCS1FBK FIELD32(0x000000f0) +#define LG_FBK_CFG0_OFDMMCS2FBK FIELD32(0x00000f00) +#define LG_FBK_CFG0_OFDMMCS3FBK FIELD32(0x0000f000) +#define LG_FBK_CFG0_OFDMMCS4FBK FIELD32(0x000f0000) +#define LG_FBK_CFG0_OFDMMCS5FBK FIELD32(0x00f00000) +#define LG_FBK_CFG0_OFDMMCS6FBK FIELD32(0x0f000000) +#define LG_FBK_CFG0_OFDMMCS7FBK FIELD32(0xf0000000) + +/* + * LG_FBK_CFG1: + */ +#define LG_FBK_CFG1 0x1360 +#define LG_FBK_CFG0_CCKMCS0FBK FIELD32(0x0000000f) +#define LG_FBK_CFG0_CCKMCS1FBK FIELD32(0x000000f0) +#define LG_FBK_CFG0_CCKMCS2FBK FIELD32(0x00000f00) +#define LG_FBK_CFG0_CCKMCS3FBK FIELD32(0x0000f000) + +/* + * CCK_PROT_CFG: CCK Protection + * PROTECT_RATE: Protection control frame rate for CCK TX(RTS/CTS/CFEnd) + * PROTECT_CTRL: Protection control frame type for CCK TX + * 0:none, 1:RTS/CTS, 2:CTS-to-self + * PROTECT_NAV: TXOP protection type for CCK TX + * 0:none, 1:ShortNAVprotect, 2:LongNAVProtect + * TX_OP_ALLOW_CCK: CCK TXOP allowance, 0:disallow + * TX_OP_ALLOW_OFDM: CCK TXOP allowance, 0:disallow + * TX_OP_ALLOW_MM20: CCK TXOP allowance, 0:disallow + * TX_OP_ALLOW_MM40: CCK TXOP allowance, 0:disallow + * TX_OP_ALLOW_GF20: CCK TXOP allowance, 0:disallow + * TX_OP_ALLOW_GF40: CCK TXOP allowance, 0:disallow + * RTS_TH_EN: RTS threshold enable on CCK TX + */ +#define CCK_PROT_CFG 0x1364 +#define CCK_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff) +#define CCK_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000) +#define CCK_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000) +#define CCK_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000) +#define CCK_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000) +#define CCK_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000) +#define CCK_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000) +#define CCK_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000) +#define CCK_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000) +#define CCK_PROT_CFG_RTS_TH_EN FIELD32(0x04000000) + +/* + * OFDM_PROT_CFG: OFDM Protection + */ +#define OFDM_PROT_CFG 0x1368 +#define OFDM_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff) +#define OFDM_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000) +#define OFDM_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000) +#define OFDM_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000) +#define OFDM_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000) +#define OFDM_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000) +#define OFDM_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000) +#define OFDM_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000) +#define OFDM_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000) +#define OFDM_PROT_CFG_RTS_TH_EN FIELD32(0x04000000) + +/* + * MM20_PROT_CFG: MM20 Protection + */ +#define MM20_PROT_CFG 0x136c +#define MM20_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff) +#define MM20_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000) +#define MM20_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000) +#define MM20_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000) +#define MM20_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000) +#define MM20_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000) +#define MM20_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000) +#define MM20_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000) +#define MM20_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000) +#define MM20_PROT_CFG_RTS_TH_EN FIELD32(0x04000000) + +/* + * MM40_PROT_CFG: MM40 Protection + */ +#define MM40_PROT_CFG 0x1370 +#define MM40_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff) +#define MM40_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000) +#define MM40_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000) +#define MM40_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000) +#define MM40_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000) +#define MM40_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000) +#define MM40_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000) +#define MM40_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000) +#define MM40_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000) +#define MM40_PROT_CFG_RTS_TH_EN FIELD32(0x04000000) + +/* + * GF20_PROT_CFG: GF20 Protection + */ +#define GF20_PROT_CFG 0x1374 +#define GF20_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff) +#define GF20_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000) +#define GF20_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000) +#define GF20_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000) +#define GF20_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000) +#define GF20_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000) +#define GF20_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000) +#define GF20_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000) +#define GF20_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000) +#define GF20_PROT_CFG_RTS_TH_EN FIELD32(0x04000000) + +/* + * GF40_PROT_CFG: GF40 Protection + */ +#define GF40_PROT_CFG 0x1378 +#define GF40_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff) +#define GF40_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000) +#define GF40_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000) +#define GF40_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000) +#define GF40_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000) +#define GF40_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000) +#define GF40_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000) +#define GF40_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000) +#define GF40_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000) +#define GF40_PROT_CFG_RTS_TH_EN FIELD32(0x04000000) + +/* + * EXP_CTS_TIME: + */ +#define EXP_CTS_TIME 0x137c + +/* + * EXP_ACK_TIME: + */ +#define EXP_ACK_TIME 0x1380 + +/* + * RX_FILTER_CFG: RX configuration register. + */ +#define RX_FILTER_CFG 0x1400 +#define RX_FILTER_CFG_DROP_CRC_ERROR FIELD32(0x00000001) +#define RX_FILTER_CFG_DROP_PHY_ERROR FIELD32(0x00000002) +#define RX_FILTER_CFG_DROP_NOT_TO_ME FIELD32(0x00000004) +#define RX_FILTER_CFG_DROP_NOT_MY_BSSD FIELD32(0x00000008) +#define RX_FILTER_CFG_DROP_VER_ERROR FIELD32(0x00000010) +#define RX_FILTER_CFG_DROP_MULTICAST FIELD32(0x00000020) +#define RX_FILTER_CFG_DROP_BROADCAST FIELD32(0x00000040) +#define RX_FILTER_CFG_DROP_DUPLICATE FIELD32(0x00000080) +#define RX_FILTER_CFG_DROP_CF_END_ACK FIELD32(0x00000100) +#define RX_FILTER_CFG_DROP_CF_END FIELD32(0x00000200) +#define RX_FILTER_CFG_DROP_ACK FIELD32(0x00000400) +#define RX_FILTER_CFG_DROP_CTS FIELD32(0x00000800) +#define RX_FILTER_CFG_DROP_RTS FIELD32(0x00001000) +#define RX_FILTER_CFG_DROP_PSPOLL FIELD32(0x00002000) +#define RX_FILTER_CFG_DROP_BA FIELD32(0x00004000) +#define RX_FILTER_CFG_DROP_BAR FIELD32(0x00008000) +#define RX_FILTER_CFG_DROP_CNTL FIELD32(0x00010000) + +/* + * AUTO_RSP_CFG: + * AUTORESPONDER: 0: disable, 1: enable + * BAC_ACK_POLICY: 0:long, 1:short preamble + * CTS_40_MMODE: Response CTS 40MHz duplicate mode + * CTS_40_MREF: Response CTS 40MHz duplicate mode + * AR_PREAMBLE: Auto responder preamble 0:long, 1:short preamble + * DUAL_CTS_EN: Power bit value in control frame + * ACK_CTS_PSM_BIT:Power bit value in control frame + */ +#define AUTO_RSP_CFG 0x1404 +#define AUTO_RSP_CFG_AUTORESPONDER FIELD32(0x00000001) +#define AUTO_RSP_CFG_BAC_ACK_POLICY FIELD32(0x00000002) +#define AUTO_RSP_CFG_CTS_40_MMODE FIELD32(0x00000004) +#define AUTO_RSP_CFG_CTS_40_MREF FIELD32(0x00000008) +#define AUTO_RSP_CFG_AR_PREAMBLE FIELD32(0x00000010) +#define AUTO_RSP_CFG_DUAL_CTS_EN FIELD32(0x00000040) +#define AUTO_RSP_CFG_ACK_CTS_PSM_BIT FIELD32(0x00000080) + +/* + * LEGACY_BASIC_RATE: + */ +#define LEGACY_BASIC_RATE 0x1408 + +/* + * HT_BASIC_RATE: + */ +#define HT_BASIC_RATE 0x140c + +/* + * HT_CTRL_CFG: + */ +#define HT_CTRL_CFG 0x1410 + +/* + * SIFS_COST_CFG: + */ +#define SIFS_COST_CFG 0x1414 + +/* + * RX_PARSER_CFG: + * Set NAV for all received frames + */ +#define RX_PARSER_CFG 0x1418 + +/* + * TX_SEC_CNT0: + */ +#define TX_SEC_CNT0 0x1500 + +/* + * RX_SEC_CNT0: + */ +#define RX_SEC_CNT0 0x1504 + +/* + * CCMP_FC_MUTE: + */ +#define CCMP_FC_MUTE 0x1508 + +/* + * TXOP_HLDR_ADDR0: + */ +#define TXOP_HLDR_ADDR0 0x1600 + +/* + * TXOP_HLDR_ADDR1: + */ +#define TXOP_HLDR_ADDR1 0x1604 + +/* + * TXOP_HLDR_ET: + */ +#define TXOP_HLDR_ET 0x1608 + +/* + * QOS_CFPOLL_RA_DW0: + */ +#define QOS_CFPOLL_RA_DW0 0x160c + +/* + * QOS_CFPOLL_RA_DW1: + */ +#define QOS_CFPOLL_RA_DW1 0x1610 + +/* + * QOS_CFPOLL_QC: + */ +#define QOS_CFPOLL_QC 0x1614 + +/* + * RX_STA_CNT0: RX PLCP error count & RX CRC error count + */ +#define RX_STA_CNT0 0x1700 +#define RX_STA_CNT0_CRC_ERR FIELD32(0x0000ffff) +#define RX_STA_CNT0_PHY_ERR FIELD32(0xffff0000) + +/* + * RX_STA_CNT1: RX False CCA count & RX LONG frame count + */ +#define RX_STA_CNT1 0x1704 +#define RX_STA_CNT1_FALSE_CCA FIELD32(0x0000ffff) +#define RX_STA_CNT1_PLCP_ERR FIELD32(0xffff0000) + +/* + * RX_STA_CNT2: + */ +#define RX_STA_CNT2 0x1708 +#define RX_STA_CNT2_RX_DUPLI_COUNT FIELD32(0x0000ffff) +#define RX_STA_CNT2_RX_FIFO_OVERFLOW FIELD32(0xffff0000) + +/* + * TX_STA_CNT0: TX Beacon count + */ +#define TX_STA_CNT0 0x170c +#define TX_STA_CNT0_TX_FAIL_COUNT FIELD32(0x0000ffff) +#define TX_STA_CNT0_TX_BEACON_COUNT FIELD32(0xffff0000) + +/* + * TX_STA_CNT1: TX tx count + */ +#define TX_STA_CNT1 0x1710 +#define TX_STA_CNT1_TX_SUCCESS FIELD32(0x0000ffff) +#define TX_STA_CNT1_TX_RETRANSMIT FIELD32(0xffff0000) + +/* + * TX_STA_CNT2: TX tx count + */ +#define TX_STA_CNT2 0x1714 +#define TX_STA_CNT2_TX_ZERO_LEN_COUNT FIELD32(0x0000ffff) +#define TX_STA_CNT2_TX_UNDER_FLOW_COUNT FIELD32(0xffff0000) + +/* + * TX_STA_FIFO: TX Result for specific PID status fifo register + */ +#define TX_STA_FIFO 0x1718 +#define TX_STA_FIFO_VALID FIELD32(0x00000001) +#define TX_STA_FIFO_PID_TYPE FIELD32(0x0000001e) +#define TX_STA_FIFO_TX_SUCCESS FIELD32(0x00000020) +#define TX_STA_FIFO_TX_AGGRE FIELD32(0x00000040) +#define TX_STA_FIFO_TX_ACK_REQUIRED FIELD32(0x00000080) +#define TX_STA_FIFO_WCID FIELD32(0x0000ff00) +#define TX_STA_FIFO_SUCCESS_RATE FIELD32(0xffff0000) +#define TX_STA_FIFO_MCS FIELD32(0x007f0000) +#define TX_STA_FIFO_PHYMODE FIELD32(0xc0000000) + +/* + * TX_AGG_CNT: Debug counter + */ +#define TX_AGG_CNT 0x171c +#define TX_AGG_CNT_NON_AGG_TX_COUNT FIELD32(0x0000ffff) +#define TX_AGG_CNT_AGG_TX_COUNT FIELD32(0xffff0000) + +/* + * TX_AGG_CNT0: + */ +#define TX_AGG_CNT0 0x1720 +#define TX_AGG_CNT0_AGG_SIZE_1_COUNT FIELD32(0x0000ffff) +#define TX_AGG_CNT0_AGG_SIZE_2_COUNT FIELD32(0xffff0000) + +/* + * TX_AGG_CNT1: + */ +#define TX_AGG_CNT1 0x1724 +#define TX_AGG_CNT1_AGG_SIZE_3_COUNT FIELD32(0x0000ffff) +#define TX_AGG_CNT1_AGG_SIZE_4_COUNT FIELD32(0xffff0000) + +/* + * TX_AGG_CNT2: + */ +#define TX_AGG_CNT2 0x1728 +#define TX_AGG_CNT2_AGG_SIZE_5_COUNT FIELD32(0x0000ffff) +#define TX_AGG_CNT2_AGG_SIZE_6_COUNT FIELD32(0xffff0000) + +/* + * TX_AGG_CNT3: + */ +#define TX_AGG_CNT3 0x172c +#define TX_AGG_CNT3_AGG_SIZE_7_COUNT FIELD32(0x0000ffff) +#define TX_AGG_CNT3_AGG_SIZE_8_COUNT FIELD32(0xffff0000) + +/* + * TX_AGG_CNT4: + */ +#define TX_AGG_CNT4 0x1730 +#define TX_AGG_CNT4_AGG_SIZE_9_COUNT FIELD32(0x0000ffff) +#define TX_AGG_CNT4_AGG_SIZE_10_COUNT FIELD32(0xffff0000) + +/* + * TX_AGG_CNT5: + */ +#define TX_AGG_CNT5 0x1734 +#define TX_AGG_CNT5_AGG_SIZE_11_COUNT FIELD32(0x0000ffff) +#define TX_AGG_CNT5_AGG_SIZE_12_COUNT FIELD32(0xffff0000) + +/* + * TX_AGG_CNT6: + */ +#define TX_AGG_CNT6 0x1738 +#define TX_AGG_CNT6_AGG_SIZE_13_COUNT FIELD32(0x0000ffff) +#define TX_AGG_CNT6_AGG_SIZE_14_COUNT FIELD32(0xffff0000) + +/* + * TX_AGG_CNT7: + */ +#define TX_AGG_CNT7 0x173c +#define TX_AGG_CNT7_AGG_SIZE_15_COUNT FIELD32(0x0000ffff) +#define TX_AGG_CNT7_AGG_SIZE_16_COUNT FIELD32(0xffff0000) + +/* + * MPDU_DENSITY_CNT: + * TX_ZERO_DEL: TX zero length delimiter count + * RX_ZERO_DEL: RX zero length delimiter count + */ +#define MPDU_DENSITY_CNT 0x1740 +#define MPDU_DENSITY_CNT_TX_ZERO_DEL FIELD32(0x0000ffff) +#define MPDU_DENSITY_CNT_RX_ZERO_DEL FIELD32(0xffff0000) + +/* + * Security key table memory. + * MAC_WCID_BASE: 8-bytes (use only 6 bytes) * 256 entry + * PAIRWISE_KEY_TABLE_BASE: 32-byte * 256 entry + * MAC_IVEIV_TABLE_BASE: 8-byte * 256-entry + * MAC_WCID_ATTRIBUTE_BASE: 4-byte * 256-entry + * SHARED_KEY_TABLE_BASE: 32-byte * 16-entry + * SHARED_KEY_MODE_BASE: 4-byte * 16-entry + */ +#define MAC_WCID_BASE 0x1800 +#define PAIRWISE_KEY_TABLE_BASE 0x4000 +#define MAC_IVEIV_TABLE_BASE 0x6000 +#define MAC_WCID_ATTRIBUTE_BASE 0x6800 +#define SHARED_KEY_TABLE_BASE 0x6c00 +#define SHARED_KEY_MODE_BASE 0x7000 + +#define MAC_WCID_ENTRY(__idx) \ + ( MAC_WCID_BASE + ((__idx) * sizeof(struct mac_wcid_entry)) ) +#define PAIRWISE_KEY_ENTRY(__idx) \ + ( PAIRWISE_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) ) +#define MAC_IVEIV_ENTRY(__idx) \ + ( MAC_IVEIV_TABLE_BASE + ((__idx) * sizeof(struct mac_iveiv_entry)) ) +#define MAC_WCID_ATTR_ENTRY(__idx) \ + ( MAC_WCID_ATTRIBUTE_BASE + ((__idx) * sizeof(u32)) ) +#define SHARED_KEY_ENTRY(__idx) \ + ( SHARED_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) ) +#define SHARED_KEY_MODE_ENTRY(__idx) \ + ( SHARED_KEY_MODE_BASE + ((__idx) * sizeof(u32)) ) + +struct mac_wcid_entry { + u8 mac[6]; + u8 reserved[2]; +} __attribute__ ((packed)); + +struct hw_key_entry { + u8 key[16]; + u8 tx_mic[8]; + u8 rx_mic[8]; +} __attribute__ ((packed)); + +struct mac_iveiv_entry { + u8 iv[8]; +} __attribute__ ((packed)); + +/* + * MAC_WCID_ATTRIBUTE: + */ +#define MAC_WCID_ATTRIBUTE_KEYTAB FIELD32(0x00000001) +#define MAC_WCID_ATTRIBUTE_CIPHER FIELD32(0x0000000e) +#define MAC_WCID_ATTRIBUTE_BSS_IDX FIELD32(0x00000070) +#define MAC_WCID_ATTRIBUTE_RX_WIUDF FIELD32(0x00000380) + +/* + * SHARED_KEY_MODE: + */ +#define SHARED_KEY_MODE_BSS0_KEY0 FIELD32(0x00000007) +#define SHARED_KEY_MODE_BSS0_KEY1 FIELD32(0x00000070) +#define SHARED_KEY_MODE_BSS0_KEY2 FIELD32(0x00000700) +#define SHARED_KEY_MODE_BSS0_KEY3 FIELD32(0x00007000) +#define SHARED_KEY_MODE_BSS1_KEY0 FIELD32(0x00070000) +#define SHARED_KEY_MODE_BSS1_KEY1 FIELD32(0x00700000) +#define SHARED_KEY_MODE_BSS1_KEY2 FIELD32(0x07000000) +#define SHARED_KEY_MODE_BSS1_KEY3 FIELD32(0x70000000) + +/* + * HOST-MCU communication + */ + +/* + * H2M_MAILBOX_CSR: Host-to-MCU Mailbox. + */ +#define H2M_MAILBOX_CSR 0x7010 +#define H2M_MAILBOX_CSR_ARG0 FIELD32(0x000000ff) +#define H2M_MAILBOX_CSR_ARG1 FIELD32(0x0000ff00) +#define H2M_MAILBOX_CSR_CMD_TOKEN FIELD32(0x00ff0000) +#define H2M_MAILBOX_CSR_OWNER FIELD32(0xff000000) + +/* + * H2M_MAILBOX_CID: + */ +#define H2M_MAILBOX_CID 0x7014 +#define H2M_MAILBOX_CID_CMD0 FIELD32(0x000000ff) +#define H2M_MAILBOX_CID_CMD1 FIELD32(0x0000ff00) +#define H2M_MAILBOX_CID_CMD2 FIELD32(0x00ff0000) +#define H2M_MAILBOX_CID_CMD3 FIELD32(0xff000000) + +/* + * H2M_MAILBOX_STATUS: + */ +#define H2M_MAILBOX_STATUS 0x701c + +/* + * H2M_INT_SRC: + */ +#define H2M_INT_SRC 0x7024 + +/* + * H2M_BBP_AGENT: + */ +#define H2M_BBP_AGENT 0x7028 + +/* + * MCU_LEDCS: LED control for MCU Mailbox. + */ +#define MCU_LEDCS_LED_MODE FIELD8(0x1f) +#define MCU_LEDCS_POLARITY FIELD8(0x01) + +/* + * HW_CS_CTS_BASE: + * Carrier-sense CTS frame base address. + * It's where mac stores carrier-sense frame for carrier-sense function. + */ +#define HW_CS_CTS_BASE 0x7700 + +/* + * HW_DFS_CTS_BASE: + * DFS CTS frame base address. It's where mac stores CTS frame for DFS. + */ +#define HW_DFS_CTS_BASE 0x7780 + +/* + * TXRX control registers - base address 0x3000 + */ + +/* + * TXRX_CSR1: + * rt2860b UNKNOWN reg use R/O Reg Addr 0x77d0 first.. + */ +#define TXRX_CSR1 0x77d0 + +/* + * HW_DEBUG_SETTING_BASE: + * since NULL frame won't be that long (256 byte) + * We steal 16 tail bytes to save debugging settings + */ +#define HW_DEBUG_SETTING_BASE 0x77f0 +#define HW_DEBUG_SETTING_BASE2 0x7770 + +/* + * HW_BEACON_BASE + * In order to support maximum 8 MBSS and its maximum length + * is 512 bytes for each beacon + * Three section discontinue memory segments will be used. + * 1. The original region for BCN 0~3 + * 2. Extract memory from FCE table for BCN 4~5 + * 3. Extract memory from Pair-wise key table for BCN 6~7 + * It occupied those memory of wcid 238~253 for BCN 6 + * and wcid 222~237 for BCN 7 + * + * IMPORTANT NOTE: Not sure why legacy driver does this, + * but HW_BEACON_BASE7 is 0x0200 bytes below HW_BEACON_BASE6. + */ +#define HW_BEACON_BASE0 0x7800 +#define HW_BEACON_BASE1 0x7a00 +#define HW_BEACON_BASE2 0x7c00 +#define HW_BEACON_BASE3 0x7e00 +#define HW_BEACON_BASE4 0x7200 +#define HW_BEACON_BASE5 0x7400 +#define HW_BEACON_BASE6 0x5dc0 +#define HW_BEACON_BASE7 0x5bc0 + +#define HW_BEACON_OFFSET(__index) \ + ( ((__index) < 4) ? ( HW_BEACON_BASE0 + (__index * 0x0200) ) : \ + (((__index) < 6) ? ( HW_BEACON_BASE4 + ((__index - 4) * 0x0200) ) : \ + (HW_BEACON_BASE6 - ((__index - 6) * 0x0200))) ) + +/* + * BBP registers. + * The wordsize of the BBP is 8 bits. + */ + +/* + * BBP 1: TX Antenna + */ +#define BBP1_TX_POWER FIELD8(0x07) +#define BBP1_TX_ANTENNA FIELD8(0x18) + +/* + * BBP 3: RX Antenna + */ +#define BBP3_RX_ANTENNA FIELD8(0x18) +#define BBP3_HT40_PLUS FIELD8(0x20) + +/* + * BBP 4: Bandwidth + */ +#define BBP4_TX_BF FIELD8(0x01) +#define BBP4_BANDWIDTH FIELD8(0x18) + +/* + * RFCSR registers + * The wordsize of the RFCSR is 8 bits. + */ + +/* + * RFCSR 6: + */ +#define RFCSR6_R FIELD8(0x03) + +/* + * RFCSR 7: + */ +#define RFCSR7_RF_TUNING FIELD8(0x01) + +/* + * RFCSR 12: + */ +#define RFCSR12_TX_POWER FIELD8(0x1f) + +/* + * RFCSR 22: + */ +#define RFCSR22_BASEBAND_LOOPBACK FIELD8(0x01) + +/* + * RFCSR 23: + */ +#define RFCSR23_FREQ_OFFSET FIELD8(0x7f) + +/* + * RFCSR 30: + */ +#define RFCSR30_RF_CALIBRATION FIELD8(0x80) + +/* + * RF registers + */ + +/* + * RF 2 + */ +#define RF2_ANTENNA_RX2 FIELD32(0x00000040) +#define RF2_ANTENNA_TX1 FIELD32(0x00004000) +#define RF2_ANTENNA_RX1 FIELD32(0x00020000) + +/* + * RF 3 + */ +#define RF3_TXPOWER_G FIELD32(0x00003e00) +#define RF3_TXPOWER_A_7DBM_BOOST FIELD32(0x00000200) +#define RF3_TXPOWER_A FIELD32(0x00003c00) + +/* + * RF 4 + */ +#define RF4_TXPOWER_G FIELD32(0x000007c0) +#define RF4_TXPOWER_A_7DBM_BOOST FIELD32(0x00000040) +#define RF4_TXPOWER_A FIELD32(0x00000780) +#define RF4_FREQ_OFFSET FIELD32(0x001f8000) +#define RF4_HT40 FIELD32(0x00200000) + +/* + * EEPROM content. + * The wordsize of the EEPROM is 16 bits. + */ + +/* + * EEPROM Version + */ +#define EEPROM_VERSION 0x0001 +#define EEPROM_VERSION_FAE FIELD16(0x00ff) +#define EEPROM_VERSION_VERSION FIELD16(0xff00) + +/* + * HW MAC address. + */ +#define EEPROM_MAC_ADDR_0 0x0002 +#define EEPROM_MAC_ADDR_BYTE0 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE1 FIELD16(0xff00) +#define EEPROM_MAC_ADDR_1 0x0003 +#define EEPROM_MAC_ADDR_BYTE2 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE3 FIELD16(0xff00) +#define EEPROM_MAC_ADDR_2 0x0004 +#define EEPROM_MAC_ADDR_BYTE4 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE5 FIELD16(0xff00) + +/* + * EEPROM ANTENNA config + * RXPATH: 1: 1R, 2: 2R, 3: 3R + * TXPATH: 1: 1T, 2: 2T + */ +#define EEPROM_ANTENNA 0x001a +#define EEPROM_ANTENNA_RXPATH FIELD16(0x000f) +#define EEPROM_ANTENNA_TXPATH FIELD16(0x00f0) +#define EEPROM_ANTENNA_RF_TYPE FIELD16(0x0f00) + +/* + * EEPROM NIC config + * CARDBUS_ACCEL: 0 - enable, 1 - disable + */ +#define EEPROM_NIC 0x001b +#define EEPROM_NIC_HW_RADIO FIELD16(0x0001) +#define EEPROM_NIC_DYNAMIC_TX_AGC FIELD16(0x0002) +#define EEPROM_NIC_EXTERNAL_LNA_BG FIELD16(0x0004) +#define EEPROM_NIC_EXTERNAL_LNA_A FIELD16(0x0008) +#define EEPROM_NIC_CARDBUS_ACCEL FIELD16(0x0010) +#define EEPROM_NIC_BW40M_SB_BG FIELD16(0x0020) +#define EEPROM_NIC_BW40M_SB_A FIELD16(0x0040) +#define EEPROM_NIC_WPS_PBC FIELD16(0x0080) +#define EEPROM_NIC_BW40M_BG FIELD16(0x0100) +#define EEPROM_NIC_BW40M_A FIELD16(0x0200) + +/* + * EEPROM frequency + */ +#define EEPROM_FREQ 0x001d +#define EEPROM_FREQ_OFFSET FIELD16(0x00ff) +#define EEPROM_FREQ_LED_MODE FIELD16(0x7f00) +#define EEPROM_FREQ_LED_POLARITY FIELD16(0x1000) + +/* + * EEPROM LED + * POLARITY_RDY_G: Polarity RDY_G setting. + * POLARITY_RDY_A: Polarity RDY_A setting. + * POLARITY_ACT: Polarity ACT setting. + * POLARITY_GPIO_0: Polarity GPIO0 setting. + * POLARITY_GPIO_1: Polarity GPIO1 setting. + * POLARITY_GPIO_2: Polarity GPIO2 setting. + * POLARITY_GPIO_3: Polarity GPIO3 setting. + * POLARITY_GPIO_4: Polarity GPIO4 setting. + * LED_MODE: Led mode. + */ +#define EEPROM_LED1 0x001e +#define EEPROM_LED2 0x001f +#define EEPROM_LED3 0x0020 +#define EEPROM_LED_POLARITY_RDY_BG FIELD16(0x0001) +#define EEPROM_LED_POLARITY_RDY_A FIELD16(0x0002) +#define EEPROM_LED_POLARITY_ACT FIELD16(0x0004) +#define EEPROM_LED_POLARITY_GPIO_0 FIELD16(0x0008) +#define EEPROM_LED_POLARITY_GPIO_1 FIELD16(0x0010) +#define EEPROM_LED_POLARITY_GPIO_2 FIELD16(0x0020) +#define EEPROM_LED_POLARITY_GPIO_3 FIELD16(0x0040) +#define EEPROM_LED_POLARITY_GPIO_4 FIELD16(0x0080) +#define EEPROM_LED_LED_MODE FIELD16(0x1f00) + +/* + * EEPROM LNA + */ +#define EEPROM_LNA 0x0022 +#define EEPROM_LNA_BG FIELD16(0x00ff) +#define EEPROM_LNA_A0 FIELD16(0xff00) + +/* + * EEPROM RSSI BG offset + */ +#define EEPROM_RSSI_BG 0x0023 +#define EEPROM_RSSI_BG_OFFSET0 FIELD16(0x00ff) +#define EEPROM_RSSI_BG_OFFSET1 FIELD16(0xff00) + +/* + * EEPROM RSSI BG2 offset + */ +#define EEPROM_RSSI_BG2 0x0024 +#define EEPROM_RSSI_BG2_OFFSET2 FIELD16(0x00ff) +#define EEPROM_RSSI_BG2_LNA_A1 FIELD16(0xff00) + +/* + * EEPROM RSSI A offset + */ +#define EEPROM_RSSI_A 0x0025 +#define EEPROM_RSSI_A_OFFSET0 FIELD16(0x00ff) +#define EEPROM_RSSI_A_OFFSET1 FIELD16(0xff00) + +/* + * EEPROM RSSI A2 offset + */ +#define EEPROM_RSSI_A2 0x0026 +#define EEPROM_RSSI_A2_OFFSET2 FIELD16(0x00ff) +#define EEPROM_RSSI_A2_LNA_A2 FIELD16(0xff00) + +/* + * EEPROM TXpower delta: 20MHZ AND 40 MHZ use different power. + * This is delta in 40MHZ. + * VALUE: Tx Power dalta value (MAX=4) + * TYPE: 1: Plus the delta value, 0: minus the delta value + * TXPOWER: Enable: + */ +#define EEPROM_TXPOWER_DELTA 0x0028 +#define EEPROM_TXPOWER_DELTA_VALUE FIELD16(0x003f) +#define EEPROM_TXPOWER_DELTA_TYPE FIELD16(0x0040) +#define EEPROM_TXPOWER_DELTA_TXPOWER FIELD16(0x0080) + +/* + * EEPROM TXPOWER 802.11BG + */ +#define EEPROM_TXPOWER_BG1 0x0029 +#define EEPROM_TXPOWER_BG2 0x0030 +#define EEPROM_TXPOWER_BG_SIZE 7 +#define EEPROM_TXPOWER_BG_1 FIELD16(0x00ff) +#define EEPROM_TXPOWER_BG_2 FIELD16(0xff00) + +/* + * EEPROM TXPOWER 802.11A + */ +#define EEPROM_TXPOWER_A1 0x003c +#define EEPROM_TXPOWER_A2 0x0053 +#define EEPROM_TXPOWER_A_SIZE 6 +#define EEPROM_TXPOWER_A_1 FIELD16(0x00ff) +#define EEPROM_TXPOWER_A_2 FIELD16(0xff00) + +/* + * EEPROM TXpower byrate: 20MHZ power + */ +#define EEPROM_TXPOWER_BYRATE 0x006f + +/* + * EEPROM BBP. + */ +#define EEPROM_BBP_START 0x0078 +#define EEPROM_BBP_SIZE 16 +#define EEPROM_BBP_VALUE FIELD16(0x00ff) +#define EEPROM_BBP_REG_ID FIELD16(0xff00) + +/* + * MCU mailbox commands. + */ +#define MCU_SLEEP 0x30 +#define MCU_WAKEUP 0x31 +#define MCU_RADIO_OFF 0x35 +#define MCU_CURRENT 0x36 +#define MCU_LED 0x50 +#define MCU_LED_STRENGTH 0x51 +#define MCU_LED_1 0x52 +#define MCU_LED_2 0x53 +#define MCU_LED_3 0x54 +#define MCU_RADAR 0x60 +#define MCU_BOOT_SIGNAL 0x72 +#define MCU_BBP_SIGNAL 0x80 +#define MCU_POWER_SAVE 0x83 + +/* + * MCU mailbox tokens + */ +#define TOKEN_WAKUP 3 + +/* + * DMA descriptor defines. + */ +#define TXWI_DESC_SIZE ( 4 * sizeof(__le32) ) +#define RXWI_DESC_SIZE ( 4 * sizeof(__le32) ) + +/* + * TX WI structure + */ + +/* + * Word0 + * FRAG: 1 To inform TKIP engine this is a fragment. + * MIMO_PS: The remote peer is in dynamic MIMO-PS mode + * TX_OP: 0:HT TXOP rule , 1:PIFS TX ,2:Backoff, 3:sifs + * BW: Channel bandwidth 20MHz or 40 MHz + * STBC: 1: STBC support MCS =0-7, 2,3 : RESERVED + */ +#define TXWI_W0_FRAG FIELD32(0x00000001) +#define TXWI_W0_MIMO_PS FIELD32(0x00000002) +#define TXWI_W0_CF_ACK FIELD32(0x00000004) +#define TXWI_W0_TS FIELD32(0x00000008) +#define TXWI_W0_AMPDU FIELD32(0x00000010) +#define TXWI_W0_MPDU_DENSITY FIELD32(0x000000e0) +#define TXWI_W0_TX_OP FIELD32(0x00000300) +#define TXWI_W0_MCS FIELD32(0x007f0000) +#define TXWI_W0_BW FIELD32(0x00800000) +#define TXWI_W0_SHORT_GI FIELD32(0x01000000) +#define TXWI_W0_STBC FIELD32(0x06000000) +#define TXWI_W0_IFS FIELD32(0x08000000) +#define TXWI_W0_PHYMODE FIELD32(0xc0000000) + +/* + * Word1 + */ +#define TXWI_W1_ACK FIELD32(0x00000001) +#define TXWI_W1_NSEQ FIELD32(0x00000002) +#define TXWI_W1_BW_WIN_SIZE FIELD32(0x000000fc) +#define TXWI_W1_WIRELESS_CLI_ID FIELD32(0x0000ff00) +#define TXWI_W1_MPDU_TOTAL_BYTE_COUNT FIELD32(0x0fff0000) +#define TXWI_W1_PACKETID FIELD32(0xf0000000) + +/* + * Word2 + */ +#define TXWI_W2_IV FIELD32(0xffffffff) + +/* + * Word3 + */ +#define TXWI_W3_EIV FIELD32(0xffffffff) + +/* + * RX WI structure + */ + +/* + * Word0 + */ +#define RXWI_W0_WIRELESS_CLI_ID FIELD32(0x000000ff) +#define RXWI_W0_KEY_INDEX FIELD32(0x00000300) +#define RXWI_W0_BSSID FIELD32(0x00001c00) +#define RXWI_W0_UDF FIELD32(0x0000e000) +#define RXWI_W0_MPDU_TOTAL_BYTE_COUNT FIELD32(0x0fff0000) +#define RXWI_W0_TID FIELD32(0xf0000000) + +/* + * Word1 + */ +#define RXWI_W1_FRAG FIELD32(0x0000000f) +#define RXWI_W1_SEQUENCE FIELD32(0x0000fff0) +#define RXWI_W1_MCS FIELD32(0x007f0000) +#define RXWI_W1_BW FIELD32(0x00800000) +#define RXWI_W1_SHORT_GI FIELD32(0x01000000) +#define RXWI_W1_STBC FIELD32(0x06000000) +#define RXWI_W1_PHYMODE FIELD32(0xc0000000) + +/* + * Word2 + */ +#define RXWI_W2_RSSI0 FIELD32(0x000000ff) +#define RXWI_W2_RSSI1 FIELD32(0x0000ff00) +#define RXWI_W2_RSSI2 FIELD32(0x00ff0000) + +/* + * Word3 + */ +#define RXWI_W3_SNR0 FIELD32(0x000000ff) +#define RXWI_W3_SNR1 FIELD32(0x0000ff00) + +/* + * Macros for converting txpower from EEPROM to mac80211 value + * and from mac80211 value to register value. + */ +#define MIN_G_TXPOWER 0 +#define MIN_A_TXPOWER -7 +#define MAX_G_TXPOWER 31 +#define MAX_A_TXPOWER 15 +#define DEFAULT_TXPOWER 5 + +#define TXPOWER_G_FROM_DEV(__txpower) \ + ((__txpower) > MAX_G_TXPOWER) ? DEFAULT_TXPOWER : (__txpower) + +#define TXPOWER_G_TO_DEV(__txpower) \ + clamp_t(char, __txpower, MIN_G_TXPOWER, MAX_G_TXPOWER) + +#define TXPOWER_A_FROM_DEV(__txpower) \ + ((__txpower) > MAX_A_TXPOWER) ? DEFAULT_TXPOWER : (__txpower) + +#define TXPOWER_A_TO_DEV(__txpower) \ + clamp_t(char, __txpower, MIN_A_TXPOWER, MAX_A_TXPOWER) + +#endif /* RT2800_H */ diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c new file mode 100644 index 0000000..18d4d8e --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -0,0 +1,2324 @@ +/* + Copyright (C) 2009 Bartlomiej Zolnierkiewicz + Copyright (C) 2009 Gertjan van Wingerde + + Based on the original rt2800pci.c and rt2800usb.c. + Copyright (C) 2009 Ivo van Doorn + Copyright (C) 2009 Alban Browaeys + Copyright (C) 2009 Felix Fietkau + Copyright (C) 2009 Luis Correia + Copyright (C) 2009 Mattias Nissler + Copyright (C) 2009 Mark Asselstine + Copyright (C) 2009 Xose Vazquez Perez + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2800lib + Abstract: rt2800 generic device routines. + */ + +#include +#include + +#include "rt2x00.h" +#if defined(CONFIG_RT2X00_LIB_USB) || defined(CONFIG_RT2X00_LIB_USB_MODULE) +#include "rt2x00usb.h" +#endif +#if defined(CONFIG_RT2X00_LIB_PCI) || defined(CONFIG_RT2X00_LIB_PCI_MODULE) +#include "rt2x00pci.h" +#endif +#include "rt2800lib.h" +#include "rt2800.h" +#include "rt2800usb.h" + +MODULE_AUTHOR("Bartlomiej Zolnierkiewicz"); +MODULE_DESCRIPTION("rt2800 library"); +MODULE_LICENSE("GPL"); + +/* + * Register access. + * All access to the CSR registers will go through the methods + * rt2800_register_read and rt2800_register_write. + * BBP and RF register require indirect register access, + * and use the CSR registers BBPCSR and RFCSR to achieve this. + * These indirect registers work with busy bits, + * and we will try maximal REGISTER_BUSY_COUNT times to access + * the register while taking a REGISTER_BUSY_DELAY us delay + * between each attampt. When the busy bit is still set at that time, + * the access attempt is considered to have failed, + * and we will print an error. + * The _lock versions must be used if you already hold the csr_mutex + */ +#define WAIT_FOR_BBP(__dev, __reg) \ + rt2800_regbusy_read((__dev), BBP_CSR_CFG, BBP_CSR_CFG_BUSY, (__reg)) +#define WAIT_FOR_RFCSR(__dev, __reg) \ + rt2800_regbusy_read((__dev), RF_CSR_CFG, RF_CSR_CFG_BUSY, (__reg)) +#define WAIT_FOR_RF(__dev, __reg) \ + rt2800_regbusy_read((__dev), RF_CSR_CFG0, RF_CSR_CFG0_BUSY, (__reg)) +#define WAIT_FOR_MCU(__dev, __reg) \ + rt2800_regbusy_read((__dev), H2M_MAILBOX_CSR, \ + H2M_MAILBOX_CSR_OWNER, (__reg)) + +static void rt2800_bbp_write(struct rt2x00_dev *rt2x00dev, + const unsigned int word, const u8 value) +{ + u32 reg; + + mutex_lock(&rt2x00dev->csr_mutex); + + /* + * Wait until the BBP becomes available, afterwards we + * can safely write the new data into the register. + */ + if (WAIT_FOR_BBP(rt2x00dev, ®)) { + reg = 0; + rt2x00_set_field32(®, BBP_CSR_CFG_VALUE, value); + rt2x00_set_field32(®, BBP_CSR_CFG_REGNUM, word); + rt2x00_set_field32(®, BBP_CSR_CFG_BUSY, 1); + rt2x00_set_field32(®, BBP_CSR_CFG_READ_CONTROL, 0); + if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) + rt2x00_set_field32(®, BBP_CSR_CFG_BBP_RW_MODE, 1); + + rt2800_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg); + } + + mutex_unlock(&rt2x00dev->csr_mutex); +} + +static void rt2800_bbp_read(struct rt2x00_dev *rt2x00dev, + const unsigned int word, u8 *value) +{ + u32 reg; + + mutex_lock(&rt2x00dev->csr_mutex); + + /* + * Wait until the BBP becomes available, afterwards we + * can safely write the read request into the register. + * After the data has been written, we wait until hardware + * returns the correct value, if at any time the register + * doesn't become available in time, reg will be 0xffffffff + * which means we return 0xff to the caller. + */ + if (WAIT_FOR_BBP(rt2x00dev, ®)) { + reg = 0; + rt2x00_set_field32(®, BBP_CSR_CFG_REGNUM, word); + rt2x00_set_field32(®, BBP_CSR_CFG_BUSY, 1); + rt2x00_set_field32(®, BBP_CSR_CFG_READ_CONTROL, 1); + if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) + rt2x00_set_field32(®, BBP_CSR_CFG_BBP_RW_MODE, 1); + + rt2800_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg); + + WAIT_FOR_BBP(rt2x00dev, ®); + } + + *value = rt2x00_get_field32(reg, BBP_CSR_CFG_VALUE); + + mutex_unlock(&rt2x00dev->csr_mutex); +} + +static void rt2800_rfcsr_write(struct rt2x00_dev *rt2x00dev, + const unsigned int word, const u8 value) +{ + u32 reg; + + mutex_lock(&rt2x00dev->csr_mutex); + + /* + * Wait until the RFCSR becomes available, afterwards we + * can safely write the new data into the register. + */ + if (WAIT_FOR_RFCSR(rt2x00dev, ®)) { + reg = 0; + rt2x00_set_field32(®, RF_CSR_CFG_DATA, value); + rt2x00_set_field32(®, RF_CSR_CFG_REGNUM, word); + rt2x00_set_field32(®, RF_CSR_CFG_WRITE, 1); + rt2x00_set_field32(®, RF_CSR_CFG_BUSY, 1); + + rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG, reg); + } + + mutex_unlock(&rt2x00dev->csr_mutex); +} + +static void rt2800_rfcsr_read(struct rt2x00_dev *rt2x00dev, + const unsigned int word, u8 *value) +{ + u32 reg; + + mutex_lock(&rt2x00dev->csr_mutex); + + /* + * Wait until the RFCSR becomes available, afterwards we + * can safely write the read request into the register. + * After the data has been written, we wait until hardware + * returns the correct value, if at any time the register + * doesn't become available in time, reg will be 0xffffffff + * which means we return 0xff to the caller. + */ + if (WAIT_FOR_RFCSR(rt2x00dev, ®)) { + reg = 0; + rt2x00_set_field32(®, RF_CSR_CFG_REGNUM, word); + rt2x00_set_field32(®, RF_CSR_CFG_WRITE, 0); + rt2x00_set_field32(®, RF_CSR_CFG_BUSY, 1); + + rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG, reg); + + WAIT_FOR_RFCSR(rt2x00dev, ®); + } + + *value = rt2x00_get_field32(reg, RF_CSR_CFG_DATA); + + mutex_unlock(&rt2x00dev->csr_mutex); +} + +static void rt2800_rf_write(struct rt2x00_dev *rt2x00dev, + const unsigned int word, const u32 value) +{ + u32 reg; + + mutex_lock(&rt2x00dev->csr_mutex); + + /* + * Wait until the RF becomes available, afterwards we + * can safely write the new data into the register. + */ + if (WAIT_FOR_RF(rt2x00dev, ®)) { + reg = 0; + rt2x00_set_field32(®, RF_CSR_CFG0_REG_VALUE_BW, value); + rt2x00_set_field32(®, RF_CSR_CFG0_STANDBYMODE, 0); + rt2x00_set_field32(®, RF_CSR_CFG0_SEL, 0); + rt2x00_set_field32(®, RF_CSR_CFG0_BUSY, 1); + + rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG0, reg); + rt2x00_rf_write(rt2x00dev, word, value); + } + + mutex_unlock(&rt2x00dev->csr_mutex); +} + +void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev, + const u8 command, const u8 token, + const u8 arg0, const u8 arg1) +{ + u32 reg; + + /* + * SOC devices don't support MCU requests. + */ + if (rt2x00_is_soc(rt2x00dev)) + return; + + mutex_lock(&rt2x00dev->csr_mutex); + + /* + * Wait until the MCU becomes available, afterwards we + * can safely write the new data into the register. + */ + if (WAIT_FOR_MCU(rt2x00dev, ®)) { + rt2x00_set_field32(®, H2M_MAILBOX_CSR_OWNER, 1); + rt2x00_set_field32(®, H2M_MAILBOX_CSR_CMD_TOKEN, token); + rt2x00_set_field32(®, H2M_MAILBOX_CSR_ARG0, arg0); + rt2x00_set_field32(®, H2M_MAILBOX_CSR_ARG1, arg1); + rt2800_register_write_lock(rt2x00dev, H2M_MAILBOX_CSR, reg); + + reg = 0; + rt2x00_set_field32(®, HOST_CMD_CSR_HOST_COMMAND, command); + rt2800_register_write_lock(rt2x00dev, HOST_CMD_CSR, reg); + } + + mutex_unlock(&rt2x00dev->csr_mutex); +} +EXPORT_SYMBOL_GPL(rt2800_mcu_request); + +int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev) +{ + unsigned int i; + u32 reg; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, ®); + if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) && + !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY)) + return 0; + + msleep(1); + } + + ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n"); + return -EACCES; +} +EXPORT_SYMBOL_GPL(rt2800_wait_wpdma_ready); + +#ifdef CONFIG_RT2X00_LIB_DEBUGFS +const struct rt2x00debug rt2800_rt2x00debug = { + .owner = THIS_MODULE, + .csr = { + .read = rt2800_register_read, + .write = rt2800_register_write, + .flags = RT2X00DEBUGFS_OFFSET, + .word_base = CSR_REG_BASE, + .word_size = sizeof(u32), + .word_count = CSR_REG_SIZE / sizeof(u32), + }, + .eeprom = { + .read = rt2x00_eeprom_read, + .write = rt2x00_eeprom_write, + .word_base = EEPROM_BASE, + .word_size = sizeof(u16), + .word_count = EEPROM_SIZE / sizeof(u16), + }, + .bbp = { + .read = rt2800_bbp_read, + .write = rt2800_bbp_write, + .word_base = BBP_BASE, + .word_size = sizeof(u8), + .word_count = BBP_SIZE / sizeof(u8), + }, + .rf = { + .read = rt2x00_rf_read, + .write = rt2800_rf_write, + .word_base = RF_BASE, + .word_size = sizeof(u32), + .word_count = RF_SIZE / sizeof(u32), + }, +}; +EXPORT_SYMBOL_GPL(rt2800_rt2x00debug); +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ + +int rt2800_rfkill_poll(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, ®); + return rt2x00_get_field32(reg, GPIO_CTRL_CFG_BIT2); +} +EXPORT_SYMBOL_GPL(rt2800_rfkill_poll); + +#ifdef CONFIG_RT2X00_LIB_LEDS +static void rt2800_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct rt2x00_led *led = + container_of(led_cdev, struct rt2x00_led, led_dev); + unsigned int enabled = brightness != LED_OFF; + unsigned int bg_mode = + (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_2GHZ); + unsigned int polarity = + rt2x00_get_field16(led->rt2x00dev->led_mcu_reg, + EEPROM_FREQ_LED_POLARITY); + unsigned int ledmode = + rt2x00_get_field16(led->rt2x00dev->led_mcu_reg, + EEPROM_FREQ_LED_MODE); + + if (led->type == LED_TYPE_RADIO) { + rt2800_mcu_request(led->rt2x00dev, MCU_LED, 0xff, ledmode, + enabled ? 0x20 : 0); + } else if (led->type == LED_TYPE_ASSOC) { + rt2800_mcu_request(led->rt2x00dev, MCU_LED, 0xff, ledmode, + enabled ? (bg_mode ? 0x60 : 0xa0) : 0x20); + } else if (led->type == LED_TYPE_QUALITY) { + /* + * The brightness is divided into 6 levels (0 - 5), + * The specs tell us the following levels: + * 0, 1 ,3, 7, 15, 31 + * to determine the level in a simple way we can simply + * work with bitshifting: + * (1 << level) - 1 + */ + rt2800_mcu_request(led->rt2x00dev, MCU_LED_STRENGTH, 0xff, + (1 << brightness / (LED_FULL / 6)) - 1, + polarity); + } +} + +static int rt2800_blink_set(struct led_classdev *led_cdev, + unsigned long *delay_on, unsigned long *delay_off) +{ + struct rt2x00_led *led = + container_of(led_cdev, struct rt2x00_led, led_dev); + u32 reg; + + rt2800_register_read(led->rt2x00dev, LED_CFG, ®); + rt2x00_set_field32(®, LED_CFG_ON_PERIOD, *delay_on); + rt2x00_set_field32(®, LED_CFG_OFF_PERIOD, *delay_off); + rt2x00_set_field32(®, LED_CFG_SLOW_BLINK_PERIOD, 3); + rt2x00_set_field32(®, LED_CFG_R_LED_MODE, 3); + rt2x00_set_field32(®, LED_CFG_G_LED_MODE, 3); + rt2x00_set_field32(®, LED_CFG_Y_LED_MODE, 3); + rt2x00_set_field32(®, LED_CFG_LED_POLAR, 1); + rt2800_register_write(led->rt2x00dev, LED_CFG, reg); + + return 0; +} + +static void rt2800_init_led(struct rt2x00_dev *rt2x00dev, + struct rt2x00_led *led, enum led_type type) +{ + led->rt2x00dev = rt2x00dev; + led->type = type; + led->led_dev.brightness_set = rt2800_brightness_set; + led->led_dev.blink_set = rt2800_blink_set; + led->flags = LED_INITIALIZED; +} +#endif /* CONFIG_RT2X00_LIB_LEDS */ + +/* + * Configuration handlers. + */ +static void rt2800_config_wcid_attr(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_crypto *crypto, + struct ieee80211_key_conf *key) +{ + struct mac_wcid_entry wcid_entry; + struct mac_iveiv_entry iveiv_entry; + u32 offset; + u32 reg; + + offset = MAC_WCID_ATTR_ENTRY(key->hw_key_idx); + + rt2800_register_read(rt2x00dev, offset, ®); + rt2x00_set_field32(®, MAC_WCID_ATTRIBUTE_KEYTAB, + !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)); + rt2x00_set_field32(®, MAC_WCID_ATTRIBUTE_CIPHER, + (crypto->cmd == SET_KEY) * crypto->cipher); + rt2x00_set_field32(®, MAC_WCID_ATTRIBUTE_BSS_IDX, + (crypto->cmd == SET_KEY) * crypto->bssidx); + rt2x00_set_field32(®, MAC_WCID_ATTRIBUTE_RX_WIUDF, crypto->cipher); + rt2800_register_write(rt2x00dev, offset, reg); + + offset = MAC_IVEIV_ENTRY(key->hw_key_idx); + + memset(&iveiv_entry, 0, sizeof(iveiv_entry)); + if ((crypto->cipher == CIPHER_TKIP) || + (crypto->cipher == CIPHER_TKIP_NO_MIC) || + (crypto->cipher == CIPHER_AES)) + iveiv_entry.iv[3] |= 0x20; + iveiv_entry.iv[3] |= key->keyidx << 6; + rt2800_register_multiwrite(rt2x00dev, offset, + &iveiv_entry, sizeof(iveiv_entry)); + + offset = MAC_WCID_ENTRY(key->hw_key_idx); + + memset(&wcid_entry, 0, sizeof(wcid_entry)); + if (crypto->cmd == SET_KEY) + memcpy(&wcid_entry, crypto->address, ETH_ALEN); + rt2800_register_multiwrite(rt2x00dev, offset, + &wcid_entry, sizeof(wcid_entry)); +} + +int rt2800_config_shared_key(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_crypto *crypto, + struct ieee80211_key_conf *key) +{ + struct hw_key_entry key_entry; + struct rt2x00_field32 field; + u32 offset; + u32 reg; + + if (crypto->cmd == SET_KEY) { + key->hw_key_idx = (4 * crypto->bssidx) + key->keyidx; + + memcpy(key_entry.key, crypto->key, + sizeof(key_entry.key)); + memcpy(key_entry.tx_mic, crypto->tx_mic, + sizeof(key_entry.tx_mic)); + memcpy(key_entry.rx_mic, crypto->rx_mic, + sizeof(key_entry.rx_mic)); + + offset = SHARED_KEY_ENTRY(key->hw_key_idx); + rt2800_register_multiwrite(rt2x00dev, offset, + &key_entry, sizeof(key_entry)); + } + + /* + * The cipher types are stored over multiple registers + * starting with SHARED_KEY_MODE_BASE each word will have + * 32 bits and contains the cipher types for 2 bssidx each. + * Using the correct defines correctly will cause overhead, + * so just calculate the correct offset. + */ + field.bit_offset = 4 * (key->hw_key_idx % 8); + field.bit_mask = 0x7 << field.bit_offset; + + offset = SHARED_KEY_MODE_ENTRY(key->hw_key_idx / 8); + + rt2800_register_read(rt2x00dev, offset, ®); + rt2x00_set_field32(®, field, + (crypto->cmd == SET_KEY) * crypto->cipher); + rt2800_register_write(rt2x00dev, offset, reg); + + /* + * Update WCID information + */ + rt2800_config_wcid_attr(rt2x00dev, crypto, key); + + return 0; +} +EXPORT_SYMBOL_GPL(rt2800_config_shared_key); + +int rt2800_config_pairwise_key(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_crypto *crypto, + struct ieee80211_key_conf *key) +{ + struct hw_key_entry key_entry; + u32 offset; + + if (crypto->cmd == SET_KEY) { + /* + * 1 pairwise key is possible per AID, this means that the AID + * equals our hw_key_idx. Make sure the WCID starts _after_ the + * last possible shared key entry. + */ + if (crypto->aid > (256 - 32)) + return -ENOSPC; + + key->hw_key_idx = 32 + crypto->aid; + + memcpy(key_entry.key, crypto->key, + sizeof(key_entry.key)); + memcpy(key_entry.tx_mic, crypto->tx_mic, + sizeof(key_entry.tx_mic)); + memcpy(key_entry.rx_mic, crypto->rx_mic, + sizeof(key_entry.rx_mic)); + + offset = PAIRWISE_KEY_ENTRY(key->hw_key_idx); + rt2800_register_multiwrite(rt2x00dev, offset, + &key_entry, sizeof(key_entry)); + } + + /* + * Update WCID information + */ + rt2800_config_wcid_attr(rt2x00dev, crypto, key); + + return 0; +} +EXPORT_SYMBOL_GPL(rt2800_config_pairwise_key); + +void rt2800_config_filter(struct rt2x00_dev *rt2x00dev, + const unsigned int filter_flags) +{ + u32 reg; + + /* + * Start configuration steps. + * Note that the version error will always be dropped + * and broadcast frames will always be accepted since + * there is no filter for it at this time. + */ + rt2800_register_read(rt2x00dev, RX_FILTER_CFG, ®); + rt2x00_set_field32(®, RX_FILTER_CFG_DROP_CRC_ERROR, + !(filter_flags & FIF_FCSFAIL)); + rt2x00_set_field32(®, RX_FILTER_CFG_DROP_PHY_ERROR, + !(filter_flags & FIF_PLCPFAIL)); + rt2x00_set_field32(®, RX_FILTER_CFG_DROP_NOT_TO_ME, + !(filter_flags & FIF_PROMISC_IN_BSS)); + rt2x00_set_field32(®, RX_FILTER_CFG_DROP_NOT_MY_BSSD, 0); + rt2x00_set_field32(®, RX_FILTER_CFG_DROP_VER_ERROR, 1); + rt2x00_set_field32(®, RX_FILTER_CFG_DROP_MULTICAST, + !(filter_flags & FIF_ALLMULTI)); + rt2x00_set_field32(®, RX_FILTER_CFG_DROP_BROADCAST, 0); + rt2x00_set_field32(®, RX_FILTER_CFG_DROP_DUPLICATE, 1); + rt2x00_set_field32(®, RX_FILTER_CFG_DROP_CF_END_ACK, + !(filter_flags & FIF_CONTROL)); + rt2x00_set_field32(®, RX_FILTER_CFG_DROP_CF_END, + !(filter_flags & FIF_CONTROL)); + rt2x00_set_field32(®, RX_FILTER_CFG_DROP_ACK, + !(filter_flags & FIF_CONTROL)); + rt2x00_set_field32(®, RX_FILTER_CFG_DROP_CTS, + !(filter_flags & FIF_CONTROL)); + rt2x00_set_field32(®, RX_FILTER_CFG_DROP_RTS, + !(filter_flags & FIF_CONTROL)); + rt2x00_set_field32(®, RX_FILTER_CFG_DROP_PSPOLL, + !(filter_flags & FIF_PSPOLL)); + rt2x00_set_field32(®, RX_FILTER_CFG_DROP_BA, 1); + rt2x00_set_field32(®, RX_FILTER_CFG_DROP_BAR, 0); + rt2x00_set_field32(®, RX_FILTER_CFG_DROP_CNTL, + !(filter_flags & FIF_CONTROL)); + rt2800_register_write(rt2x00dev, RX_FILTER_CFG, reg); +} +EXPORT_SYMBOL_GPL(rt2800_config_filter); + +void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf, + struct rt2x00intf_conf *conf, const unsigned int flags) +{ + unsigned int beacon_base; + u32 reg; + + if (flags & CONFIG_UPDATE_TYPE) { + /* + * Clear current synchronisation setup. + * For the Beacon base registers we only need to clear + * the first byte since that byte contains the VALID and OWNER + * bits which (when set to 0) will invalidate the entire beacon. + */ + beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx); + rt2800_register_write(rt2x00dev, beacon_base, 0); + + /* + * Enable synchronisation. + */ + rt2800_register_read(rt2x00dev, BCN_TIME_CFG, ®); + rt2x00_set_field32(®, BCN_TIME_CFG_TSF_TICKING, 1); + rt2x00_set_field32(®, BCN_TIME_CFG_TSF_SYNC, conf->sync); + rt2x00_set_field32(®, BCN_TIME_CFG_TBTT_ENABLE, + (conf->sync == TSF_SYNC_BEACON)); + rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); + } + + if (flags & CONFIG_UPDATE_MAC) { + reg = le32_to_cpu(conf->mac[1]); + rt2x00_set_field32(®, MAC_ADDR_DW1_UNICAST_TO_ME_MASK, 0xff); + conf->mac[1] = cpu_to_le32(reg); + + rt2800_register_multiwrite(rt2x00dev, MAC_ADDR_DW0, + conf->mac, sizeof(conf->mac)); + } + + if (flags & CONFIG_UPDATE_BSSID) { + reg = le32_to_cpu(conf->bssid[1]); + rt2x00_set_field32(®, MAC_BSSID_DW1_BSS_ID_MASK, 0); + rt2x00_set_field32(®, MAC_BSSID_DW1_BSS_BCN_NUM, 0); + conf->bssid[1] = cpu_to_le32(reg); + + rt2800_register_multiwrite(rt2x00dev, MAC_BSSID_DW0, + conf->bssid, sizeof(conf->bssid)); + } +} +EXPORT_SYMBOL_GPL(rt2800_config_intf); + +void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp) +{ + u32 reg; + + rt2800_register_read(rt2x00dev, TX_TIMEOUT_CFG, ®); + rt2x00_set_field32(®, TX_TIMEOUT_CFG_RX_ACK_TIMEOUT, 0x20); + rt2800_register_write(rt2x00dev, TX_TIMEOUT_CFG, reg); + + rt2800_register_read(rt2x00dev, AUTO_RSP_CFG, ®); + rt2x00_set_field32(®, AUTO_RSP_CFG_BAC_ACK_POLICY, + !!erp->short_preamble); + rt2x00_set_field32(®, AUTO_RSP_CFG_AR_PREAMBLE, + !!erp->short_preamble); + rt2800_register_write(rt2x00dev, AUTO_RSP_CFG, reg); + + rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, ®); + rt2x00_set_field32(®, OFDM_PROT_CFG_PROTECT_CTRL, + erp->cts_protection ? 2 : 0); + rt2800_register_write(rt2x00dev, OFDM_PROT_CFG, reg); + + rt2800_register_write(rt2x00dev, LEGACY_BASIC_RATE, + erp->basic_rates); + rt2800_register_write(rt2x00dev, HT_BASIC_RATE, 0x00008003); + + rt2800_register_read(rt2x00dev, BKOFF_SLOT_CFG, ®); + rt2x00_set_field32(®, BKOFF_SLOT_CFG_SLOT_TIME, erp->slot_time); + rt2x00_set_field32(®, BKOFF_SLOT_CFG_CC_DELAY_TIME, 2); + rt2800_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg); + + rt2800_register_read(rt2x00dev, XIFS_TIME_CFG, ®); + rt2x00_set_field32(®, XIFS_TIME_CFG_CCKM_SIFS_TIME, erp->sifs); + rt2x00_set_field32(®, XIFS_TIME_CFG_OFDM_SIFS_TIME, erp->sifs); + rt2x00_set_field32(®, XIFS_TIME_CFG_OFDM_XIFS_TIME, 4); + rt2x00_set_field32(®, XIFS_TIME_CFG_EIFS, erp->eifs); + rt2x00_set_field32(®, XIFS_TIME_CFG_BB_RXEND_ENABLE, 1); + rt2800_register_write(rt2x00dev, XIFS_TIME_CFG, reg); + + rt2800_register_read(rt2x00dev, BCN_TIME_CFG, ®); + rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_INTERVAL, + erp->beacon_int * 16); + rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); +} +EXPORT_SYMBOL_GPL(rt2800_config_erp); + +void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant) +{ + u8 r1; + u8 r3; + + rt2800_bbp_read(rt2x00dev, 1, &r1); + rt2800_bbp_read(rt2x00dev, 3, &r3); + + /* + * Configure the TX antenna. + */ + switch ((int)ant->tx) { + case 1: + rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0); + if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) + rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 0); + break; + case 2: + rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2); + break; + case 3: + /* Do nothing */ + break; + } + + /* + * Configure the RX antenna. + */ + switch ((int)ant->rx) { + case 1: + rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 0); + break; + case 2: + rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 1); + break; + case 3: + rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 2); + break; + } + + rt2800_bbp_write(rt2x00dev, 3, r3); + rt2800_bbp_write(rt2x00dev, 1, r1); +} +EXPORT_SYMBOL_GPL(rt2800_config_ant); + +static void rt2800_config_lna_gain(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_conf *libconf) +{ + u16 eeprom; + short lna_gain; + + if (libconf->rf.channel <= 14) { + rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom); + lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_BG); + } else if (libconf->rf.channel <= 64) { + rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom); + lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_A0); + } else if (libconf->rf.channel <= 128) { + rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom); + lna_gain = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG2_LNA_A1); + } else { + rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom); + lna_gain = rt2x00_get_field16(eeprom, EEPROM_RSSI_A2_LNA_A2); + } + + rt2x00dev->lna_gain = lna_gain; +} + +static void rt2800_config_channel_rt2x(struct rt2x00_dev *rt2x00dev, + struct ieee80211_conf *conf, + struct rf_channel *rf, + struct channel_info *info) +{ + rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset); + + if (rt2x00dev->default_ant.tx == 1) + rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_TX1, 1); + + if (rt2x00dev->default_ant.rx == 1) { + rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX1, 1); + rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1); + } else if (rt2x00dev->default_ant.rx == 2) + rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1); + + if (rf->channel > 14) { + /* + * When TX power is below 0, we should increase it by 7 to + * make it a positive value (Minumum value is -7). + * However this means that values between 0 and 7 have + * double meaning, and we should set a 7DBm boost flag. + */ + rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A_7DBM_BOOST, + (info->tx_power1 >= 0)); + + if (info->tx_power1 < 0) + info->tx_power1 += 7; + + rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A, + TXPOWER_A_TO_DEV(info->tx_power1)); + + rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A_7DBM_BOOST, + (info->tx_power2 >= 0)); + + if (info->tx_power2 < 0) + info->tx_power2 += 7; + + rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A, + TXPOWER_A_TO_DEV(info->tx_power2)); + } else { + rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_G, + TXPOWER_G_TO_DEV(info->tx_power1)); + rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_G, + TXPOWER_G_TO_DEV(info->tx_power2)); + } + + rt2x00_set_field32(&rf->rf4, RF4_HT40, conf_is_ht40(conf)); + + rt2800_rf_write(rt2x00dev, 1, rf->rf1); + rt2800_rf_write(rt2x00dev, 2, rf->rf2); + rt2800_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004); + rt2800_rf_write(rt2x00dev, 4, rf->rf4); + + udelay(200); + + rt2800_rf_write(rt2x00dev, 1, rf->rf1); + rt2800_rf_write(rt2x00dev, 2, rf->rf2); + rt2800_rf_write(rt2x00dev, 3, rf->rf3 | 0x00000004); + rt2800_rf_write(rt2x00dev, 4, rf->rf4); + + udelay(200); + + rt2800_rf_write(rt2x00dev, 1, rf->rf1); + rt2800_rf_write(rt2x00dev, 2, rf->rf2); + rt2800_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004); + rt2800_rf_write(rt2x00dev, 4, rf->rf4); +} + +static void rt2800_config_channel_rt3x(struct rt2x00_dev *rt2x00dev, + struct ieee80211_conf *conf, + struct rf_channel *rf, + struct channel_info *info) +{ + u8 rfcsr; + + rt2800_rfcsr_write(rt2x00dev, 2, rf->rf1); + rt2800_rfcsr_write(rt2x00dev, 3, rf->rf3); + + rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR6_R, rf->rf2); + rt2800_rfcsr_write(rt2x00dev, 6, rfcsr); + + rt2800_rfcsr_read(rt2x00dev, 12, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER, + TXPOWER_G_TO_DEV(info->tx_power1)); + rt2800_rfcsr_write(rt2x00dev, 12, rfcsr); + + rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR23_FREQ_OFFSET, rt2x00dev->freq_offset); + rt2800_rfcsr_write(rt2x00dev, 23, rfcsr); + + rt2800_rfcsr_write(rt2x00dev, 24, + rt2x00dev->calibration[conf_is_ht40(conf)]); + + rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1); + rt2800_rfcsr_write(rt2x00dev, 23, rfcsr); +} + +static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, + struct ieee80211_conf *conf, + struct rf_channel *rf, + struct channel_info *info) +{ + u32 reg; + unsigned int tx_pin; + u8 bbp; + + if ((rt2x00_rt(rt2x00dev, RT3070) || + rt2x00_rt(rt2x00dev, RT3090)) && + (rt2x00_rf(rt2x00dev, RF2020) || + rt2x00_rf(rt2x00dev, RF3020) || + rt2x00_rf(rt2x00dev, RF3021) || + rt2x00_rf(rt2x00dev, RF3022))) + rt2800_config_channel_rt3x(rt2x00dev, conf, rf, info); + else + rt2800_config_channel_rt2x(rt2x00dev, conf, rf, info); + + /* + * Change BBP settings + */ + rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain); + rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain); + rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain); + rt2800_bbp_write(rt2x00dev, 86, 0); + + if (rf->channel <= 14) { + if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) { + rt2800_bbp_write(rt2x00dev, 82, 0x62); + rt2800_bbp_write(rt2x00dev, 75, 0x46); + } else { + rt2800_bbp_write(rt2x00dev, 82, 0x84); + rt2800_bbp_write(rt2x00dev, 75, 0x50); + } + } else { + rt2800_bbp_write(rt2x00dev, 82, 0xf2); + + if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags)) + rt2800_bbp_write(rt2x00dev, 75, 0x46); + else + rt2800_bbp_write(rt2x00dev, 75, 0x50); + } + + rt2800_register_read(rt2x00dev, TX_BAND_CFG, ®); + rt2x00_set_field32(®, TX_BAND_CFG_HT40_PLUS, conf_is_ht40_plus(conf)); + rt2x00_set_field32(®, TX_BAND_CFG_A, rf->channel > 14); + rt2x00_set_field32(®, TX_BAND_CFG_BG, rf->channel <= 14); + rt2800_register_write(rt2x00dev, TX_BAND_CFG, reg); + + tx_pin = 0; + + /* Turn on unused PA or LNA when not using 1T or 1R */ + if (rt2x00dev->default_ant.tx != 1) { + rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A1_EN, 1); + rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G1_EN, 1); + } + + /* Turn on unused PA or LNA when not using 1T or 1R */ + if (rt2x00dev->default_ant.rx != 1) { + rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1); + rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1); + } + + rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, 1); + rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, 1); + rt2x00_set_field32(&tx_pin, TX_PIN_CFG_RFTR_EN, 1); + rt2x00_set_field32(&tx_pin, TX_PIN_CFG_TRSW_EN, 1); + rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN, rf->channel <= 14); + rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A0_EN, rf->channel > 14); + + rt2800_register_write(rt2x00dev, TX_PIN_CFG, tx_pin); + + rt2800_bbp_read(rt2x00dev, 4, &bbp); + rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * conf_is_ht40(conf)); + rt2800_bbp_write(rt2x00dev, 4, bbp); + + rt2800_bbp_read(rt2x00dev, 3, &bbp); + rt2x00_set_field8(&bbp, BBP3_HT40_PLUS, conf_is_ht40_plus(conf)); + rt2800_bbp_write(rt2x00dev, 3, bbp); + + if (rt2x00_rt(rt2x00dev, RT2860) && + (rt2x00_rev(rt2x00dev) == RT2860C_VERSION)) { + if (conf_is_ht40(conf)) { + rt2800_bbp_write(rt2x00dev, 69, 0x1a); + rt2800_bbp_write(rt2x00dev, 70, 0x0a); + rt2800_bbp_write(rt2x00dev, 73, 0x16); + } else { + rt2800_bbp_write(rt2x00dev, 69, 0x16); + rt2800_bbp_write(rt2x00dev, 70, 0x08); + rt2800_bbp_write(rt2x00dev, 73, 0x11); + } + } + + msleep(1); +} + +static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev, + const int txpower) +{ + u32 reg; + u32 value = TXPOWER_G_TO_DEV(txpower); + u8 r1; + + rt2800_bbp_read(rt2x00dev, 1, &r1); + rt2x00_set_field8(®, BBP1_TX_POWER, 0); + rt2800_bbp_write(rt2x00dev, 1, r1); + + rt2800_register_read(rt2x00dev, TX_PWR_CFG_0, ®); + rt2x00_set_field32(®, TX_PWR_CFG_0_1MBS, value); + rt2x00_set_field32(®, TX_PWR_CFG_0_2MBS, value); + rt2x00_set_field32(®, TX_PWR_CFG_0_55MBS, value); + rt2x00_set_field32(®, TX_PWR_CFG_0_11MBS, value); + rt2x00_set_field32(®, TX_PWR_CFG_0_6MBS, value); + rt2x00_set_field32(®, TX_PWR_CFG_0_9MBS, value); + rt2x00_set_field32(®, TX_PWR_CFG_0_12MBS, value); + rt2x00_set_field32(®, TX_PWR_CFG_0_18MBS, value); + rt2800_register_write(rt2x00dev, TX_PWR_CFG_0, reg); + + rt2800_register_read(rt2x00dev, TX_PWR_CFG_1, ®); + rt2x00_set_field32(®, TX_PWR_CFG_1_24MBS, value); + rt2x00_set_field32(®, TX_PWR_CFG_1_36MBS, value); + rt2x00_set_field32(®, TX_PWR_CFG_1_48MBS, value); + rt2x00_set_field32(®, TX_PWR_CFG_1_54MBS, value); + rt2x00_set_field32(®, TX_PWR_CFG_1_MCS0, value); + rt2x00_set_field32(®, TX_PWR_CFG_1_MCS1, value); + rt2x00_set_field32(®, TX_PWR_CFG_1_MCS2, value); + rt2x00_set_field32(®, TX_PWR_CFG_1_MCS3, value); + rt2800_register_write(rt2x00dev, TX_PWR_CFG_1, reg); + + rt2800_register_read(rt2x00dev, TX_PWR_CFG_2, ®); + rt2x00_set_field32(®, TX_PWR_CFG_2_MCS4, value); + rt2x00_set_field32(®, TX_PWR_CFG_2_MCS5, value); + rt2x00_set_field32(®, TX_PWR_CFG_2_MCS6, value); + rt2x00_set_field32(®, TX_PWR_CFG_2_MCS7, value); + rt2x00_set_field32(®, TX_PWR_CFG_2_MCS8, value); + rt2x00_set_field32(®, TX_PWR_CFG_2_MCS9, value); + rt2x00_set_field32(®, TX_PWR_CFG_2_MCS10, value); + rt2x00_set_field32(®, TX_PWR_CFG_2_MCS11, value); + rt2800_register_write(rt2x00dev, TX_PWR_CFG_2, reg); + + rt2800_register_read(rt2x00dev, TX_PWR_CFG_3, ®); + rt2x00_set_field32(®, TX_PWR_CFG_3_MCS12, value); + rt2x00_set_field32(®, TX_PWR_CFG_3_MCS13, value); + rt2x00_set_field32(®, TX_PWR_CFG_3_MCS14, value); + rt2x00_set_field32(®, TX_PWR_CFG_3_MCS15, value); + rt2x00_set_field32(®, TX_PWR_CFG_3_UKNOWN1, value); + rt2x00_set_field32(®, TX_PWR_CFG_3_UKNOWN2, value); + rt2x00_set_field32(®, TX_PWR_CFG_3_UKNOWN3, value); + rt2x00_set_field32(®, TX_PWR_CFG_3_UKNOWN4, value); + rt2800_register_write(rt2x00dev, TX_PWR_CFG_3, reg); + + rt2800_register_read(rt2x00dev, TX_PWR_CFG_4, ®); + rt2x00_set_field32(®, TX_PWR_CFG_4_UKNOWN5, value); + rt2x00_set_field32(®, TX_PWR_CFG_4_UKNOWN6, value); + rt2x00_set_field32(®, TX_PWR_CFG_4_UKNOWN7, value); + rt2x00_set_field32(®, TX_PWR_CFG_4_UKNOWN8, value); + rt2800_register_write(rt2x00dev, TX_PWR_CFG_4, reg); +} + +static void rt2800_config_retry_limit(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_conf *libconf) +{ + u32 reg; + + rt2800_register_read(rt2x00dev, TX_RTY_CFG, ®); + rt2x00_set_field32(®, TX_RTY_CFG_SHORT_RTY_LIMIT, + libconf->conf->short_frame_max_tx_count); + rt2x00_set_field32(®, TX_RTY_CFG_LONG_RTY_LIMIT, + libconf->conf->long_frame_max_tx_count); + rt2x00_set_field32(®, TX_RTY_CFG_LONG_RTY_THRE, 2000); + rt2x00_set_field32(®, TX_RTY_CFG_NON_AGG_RTY_MODE, 0); + rt2x00_set_field32(®, TX_RTY_CFG_AGG_RTY_MODE, 0); + rt2x00_set_field32(®, TX_RTY_CFG_TX_AUTO_FB_ENABLE, 1); + rt2800_register_write(rt2x00dev, TX_RTY_CFG, reg); +} + +static void rt2800_config_ps(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_conf *libconf) +{ + enum dev_state state = + (libconf->conf->flags & IEEE80211_CONF_PS) ? + STATE_SLEEP : STATE_AWAKE; + u32 reg; + + if (state == STATE_SLEEP) { + rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, 0); + + rt2800_register_read(rt2x00dev, AUTOWAKEUP_CFG, ®); + rt2x00_set_field32(®, AUTOWAKEUP_CFG_AUTO_LEAD_TIME, 5); + rt2x00_set_field32(®, AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE, + libconf->conf->listen_interval - 1); + rt2x00_set_field32(®, AUTOWAKEUP_CFG_AUTOWAKE, 1); + rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, reg); + + rt2x00dev->ops->lib->set_device_state(rt2x00dev, state); + } else { + rt2x00dev->ops->lib->set_device_state(rt2x00dev, state); + + rt2800_register_read(rt2x00dev, AUTOWAKEUP_CFG, ®); + rt2x00_set_field32(®, AUTOWAKEUP_CFG_AUTO_LEAD_TIME, 0); + rt2x00_set_field32(®, AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE, 0); + rt2x00_set_field32(®, AUTOWAKEUP_CFG_AUTOWAKE, 0); + rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, reg); + } +} + +void rt2800_config(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_conf *libconf, + const unsigned int flags) +{ + /* Always recalculate LNA gain before changing configuration */ + rt2800_config_lna_gain(rt2x00dev, libconf); + + if (flags & IEEE80211_CONF_CHANGE_CHANNEL) + rt2800_config_channel(rt2x00dev, libconf->conf, + &libconf->rf, &libconf->channel); + if (flags & IEEE80211_CONF_CHANGE_POWER) + rt2800_config_txpower(rt2x00dev, libconf->conf->power_level); + if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS) + rt2800_config_retry_limit(rt2x00dev, libconf); + if (flags & IEEE80211_CONF_CHANGE_PS) + rt2800_config_ps(rt2x00dev, libconf); +} +EXPORT_SYMBOL_GPL(rt2800_config); + +/* + * Link tuning + */ +void rt2800_link_stats(struct rt2x00_dev *rt2x00dev, struct link_qual *qual) +{ + u32 reg; + + /* + * Update FCS error count from register. + */ + rt2800_register_read(rt2x00dev, RX_STA_CNT0, ®); + qual->rx_failed = rt2x00_get_field32(reg, RX_STA_CNT0_CRC_ERR); +} +EXPORT_SYMBOL_GPL(rt2800_link_stats); + +static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev) +{ + if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) { + if (rt2x00_is_usb(rt2x00dev) && + rt2x00_rt(rt2x00dev, RT3070) && + (rt2x00_rev(rt2x00dev) == RT3070_VERSION)) + return 0x1c + (2 * rt2x00dev->lna_gain); + else + return 0x2e + rt2x00dev->lna_gain; + } + + if (!test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags)) + return 0x32 + (rt2x00dev->lna_gain * 5) / 3; + else + return 0x3a + (rt2x00dev->lna_gain * 5) / 3; +} + +static inline void rt2800_set_vgc(struct rt2x00_dev *rt2x00dev, + struct link_qual *qual, u8 vgc_level) +{ + if (qual->vgc_level != vgc_level) { + rt2800_bbp_write(rt2x00dev, 66, vgc_level); + qual->vgc_level = vgc_level; + qual->vgc_level_reg = vgc_level; + } +} + +void rt2800_reset_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual) +{ + rt2800_set_vgc(rt2x00dev, qual, rt2800_get_default_vgc(rt2x00dev)); +} +EXPORT_SYMBOL_GPL(rt2800_reset_tuner); + +void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual, + const u32 count) +{ + if (rt2x00_rt(rt2x00dev, RT2860) && + (rt2x00_rev(rt2x00dev) == RT2860C_VERSION)) + return; + + /* + * When RSSI is better then -80 increase VGC level with 0x10 + */ + rt2800_set_vgc(rt2x00dev, qual, + rt2800_get_default_vgc(rt2x00dev) + + ((qual->rssi > -80) * 0x10)); +} +EXPORT_SYMBOL_GPL(rt2800_link_tuner); + +/* + * Initialization functions. + */ +int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + unsigned int i; + + if (rt2x00_is_usb(rt2x00dev)) { + /* + * Wait until BBP and RF are ready. + */ + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2800_register_read(rt2x00dev, MAC_CSR0, ®); + if (reg && reg != ~0) + break; + msleep(1); + } + + if (i == REGISTER_BUSY_COUNT) { + ERROR(rt2x00dev, "Unstable hardware.\n"); + return -EBUSY; + } + + rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, ®); + rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, + reg & ~0x00002000); + } else if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) + rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003); + + rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, ®); + rt2x00_set_field32(®, MAC_SYS_CTRL_RESET_CSR, 1); + rt2x00_set_field32(®, MAC_SYS_CTRL_RESET_BBP, 1); + rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg); + + if (rt2x00_is_usb(rt2x00dev)) { + rt2800_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000); +#if defined(CONFIG_RT2X00_LIB_USB) || defined(CONFIG_RT2X00_LIB_USB_MODULE) + rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0, + USB_MODE_RESET, REGISTER_TIMEOUT); +#endif + } + + rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000); + + rt2800_register_read(rt2x00dev, BCN_OFFSET0, ®); + rt2x00_set_field32(®, BCN_OFFSET0_BCN0, 0xe0); /* 0x3800 */ + rt2x00_set_field32(®, BCN_OFFSET0_BCN1, 0xe8); /* 0x3a00 */ + rt2x00_set_field32(®, BCN_OFFSET0_BCN2, 0xf0); /* 0x3c00 */ + rt2x00_set_field32(®, BCN_OFFSET0_BCN3, 0xf8); /* 0x3e00 */ + rt2800_register_write(rt2x00dev, BCN_OFFSET0, reg); + + rt2800_register_read(rt2x00dev, BCN_OFFSET1, ®); + rt2x00_set_field32(®, BCN_OFFSET1_BCN4, 0xc8); /* 0x3200 */ + rt2x00_set_field32(®, BCN_OFFSET1_BCN5, 0xd0); /* 0x3400 */ + rt2x00_set_field32(®, BCN_OFFSET1_BCN6, 0x77); /* 0x1dc0 */ + rt2x00_set_field32(®, BCN_OFFSET1_BCN7, 0x6f); /* 0x1bc0 */ + rt2800_register_write(rt2x00dev, BCN_OFFSET1, reg); + + rt2800_register_write(rt2x00dev, LEGACY_BASIC_RATE, 0x0000013f); + rt2800_register_write(rt2x00dev, HT_BASIC_RATE, 0x00008003); + + rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000); + + rt2800_register_read(rt2x00dev, BCN_TIME_CFG, ®); + rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_INTERVAL, 0); + rt2x00_set_field32(®, BCN_TIME_CFG_TSF_TICKING, 0); + rt2x00_set_field32(®, BCN_TIME_CFG_TSF_SYNC, 0); + rt2x00_set_field32(®, BCN_TIME_CFG_TBTT_ENABLE, 0); + rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_GEN, 0); + rt2x00_set_field32(®, BCN_TIME_CFG_TX_TIME_COMPENSATE, 0); + rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); + + if (rt2x00_is_usb(rt2x00dev) && + rt2x00_rt(rt2x00dev, RT3070) && + (rt2x00_rev(rt2x00dev) == RT3070_VERSION)) { + rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400); + rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000); + rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000); + } else { + rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000); + rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); + } + + rt2800_register_read(rt2x00dev, TX_LINK_CFG, ®); + rt2x00_set_field32(®, TX_LINK_CFG_REMOTE_MFB_LIFETIME, 32); + rt2x00_set_field32(®, TX_LINK_CFG_MFB_ENABLE, 0); + rt2x00_set_field32(®, TX_LINK_CFG_REMOTE_UMFS_ENABLE, 0); + rt2x00_set_field32(®, TX_LINK_CFG_TX_MRQ_EN, 0); + rt2x00_set_field32(®, TX_LINK_CFG_TX_RDG_EN, 0); + rt2x00_set_field32(®, TX_LINK_CFG_TX_CF_ACK_EN, 1); + rt2x00_set_field32(®, TX_LINK_CFG_REMOTE_MFB, 0); + rt2x00_set_field32(®, TX_LINK_CFG_REMOTE_MFS, 0); + rt2800_register_write(rt2x00dev, TX_LINK_CFG, reg); + + rt2800_register_read(rt2x00dev, TX_TIMEOUT_CFG, ®); + rt2x00_set_field32(®, TX_TIMEOUT_CFG_MPDU_LIFETIME, 9); + rt2x00_set_field32(®, TX_TIMEOUT_CFG_TX_OP_TIMEOUT, 10); + rt2800_register_write(rt2x00dev, TX_TIMEOUT_CFG, reg); + + rt2800_register_read(rt2x00dev, MAX_LEN_CFG, ®); + rt2x00_set_field32(®, MAX_LEN_CFG_MAX_MPDU, AGGREGATION_SIZE); + if ((rt2x00_rt(rt2x00dev, RT2872) && + (rt2x00_rev(rt2x00dev) >= RT2880E_VERSION)) || + rt2x00_rt(rt2x00dev, RT2880) || + rt2x00_rt(rt2x00dev, RT2883) || + rt2x00_rt(rt2x00dev, RT2890) || + rt2x00_rt(rt2x00dev, RT3052) || + (rt2x00_rt(rt2x00dev, RT3070) && + (rt2x00_rev(rt2x00dev) < RT3070_VERSION))) + rt2x00_set_field32(®, MAX_LEN_CFG_MAX_PSDU, 2); + else + rt2x00_set_field32(®, MAX_LEN_CFG_MAX_PSDU, 1); + rt2x00_set_field32(®, MAX_LEN_CFG_MIN_PSDU, 0); + rt2x00_set_field32(®, MAX_LEN_CFG_MIN_MPDU, 0); + rt2800_register_write(rt2x00dev, MAX_LEN_CFG, reg); + + rt2800_register_write(rt2x00dev, PBF_MAX_PCNT, 0x1f3fbf9f); + + rt2800_register_read(rt2x00dev, AUTO_RSP_CFG, ®); + rt2x00_set_field32(®, AUTO_RSP_CFG_AUTORESPONDER, 1); + rt2x00_set_field32(®, AUTO_RSP_CFG_CTS_40_MMODE, 0); + rt2x00_set_field32(®, AUTO_RSP_CFG_CTS_40_MREF, 0); + rt2x00_set_field32(®, AUTO_RSP_CFG_DUAL_CTS_EN, 0); + rt2x00_set_field32(®, AUTO_RSP_CFG_ACK_CTS_PSM_BIT, 0); + rt2800_register_write(rt2x00dev, AUTO_RSP_CFG, reg); + + rt2800_register_read(rt2x00dev, CCK_PROT_CFG, ®); + rt2x00_set_field32(®, CCK_PROT_CFG_PROTECT_RATE, 8); + rt2x00_set_field32(®, CCK_PROT_CFG_PROTECT_CTRL, 0); + rt2x00_set_field32(®, CCK_PROT_CFG_PROTECT_NAV, 1); + rt2x00_set_field32(®, CCK_PROT_CFG_TX_OP_ALLOW_CCK, 1); + rt2x00_set_field32(®, CCK_PROT_CFG_TX_OP_ALLOW_OFDM, 1); + rt2x00_set_field32(®, CCK_PROT_CFG_TX_OP_ALLOW_MM20, 1); + rt2x00_set_field32(®, CCK_PROT_CFG_TX_OP_ALLOW_MM40, 1); + rt2x00_set_field32(®, CCK_PROT_CFG_TX_OP_ALLOW_GF20, 1); + rt2x00_set_field32(®, CCK_PROT_CFG_TX_OP_ALLOW_GF40, 1); + rt2800_register_write(rt2x00dev, CCK_PROT_CFG, reg); + + rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, ®); + rt2x00_set_field32(®, OFDM_PROT_CFG_PROTECT_RATE, 8); + rt2x00_set_field32(®, OFDM_PROT_CFG_PROTECT_CTRL, 0); + rt2x00_set_field32(®, OFDM_PROT_CFG_PROTECT_NAV, 1); + rt2x00_set_field32(®, OFDM_PROT_CFG_TX_OP_ALLOW_CCK, 1); + rt2x00_set_field32(®, OFDM_PROT_CFG_TX_OP_ALLOW_OFDM, 1); + rt2x00_set_field32(®, OFDM_PROT_CFG_TX_OP_ALLOW_MM20, 1); + rt2x00_set_field32(®, OFDM_PROT_CFG_TX_OP_ALLOW_MM40, 1); + rt2x00_set_field32(®, OFDM_PROT_CFG_TX_OP_ALLOW_GF20, 1); + rt2x00_set_field32(®, OFDM_PROT_CFG_TX_OP_ALLOW_GF40, 1); + rt2800_register_write(rt2x00dev, OFDM_PROT_CFG, reg); + + rt2800_register_read(rt2x00dev, MM20_PROT_CFG, ®); + rt2x00_set_field32(®, MM20_PROT_CFG_PROTECT_RATE, 0x4004); + rt2x00_set_field32(®, MM20_PROT_CFG_PROTECT_CTRL, 0); + rt2x00_set_field32(®, MM20_PROT_CFG_PROTECT_NAV, 1); + rt2x00_set_field32(®, MM20_PROT_CFG_TX_OP_ALLOW_CCK, 1); + rt2x00_set_field32(®, MM20_PROT_CFG_TX_OP_ALLOW_OFDM, 1); + rt2x00_set_field32(®, MM20_PROT_CFG_TX_OP_ALLOW_MM20, 1); + rt2x00_set_field32(®, MM20_PROT_CFG_TX_OP_ALLOW_MM40, 0); + rt2x00_set_field32(®, MM20_PROT_CFG_TX_OP_ALLOW_GF20, 1); + rt2x00_set_field32(®, MM20_PROT_CFG_TX_OP_ALLOW_GF40, 0); + rt2800_register_write(rt2x00dev, MM20_PROT_CFG, reg); + + rt2800_register_read(rt2x00dev, MM40_PROT_CFG, ®); + rt2x00_set_field32(®, MM40_PROT_CFG_PROTECT_RATE, 0x4084); + rt2x00_set_field32(®, MM40_PROT_CFG_PROTECT_CTRL, 0); + rt2x00_set_field32(®, MM40_PROT_CFG_PROTECT_NAV, 1); + rt2x00_set_field32(®, MM40_PROT_CFG_TX_OP_ALLOW_CCK, 1); + rt2x00_set_field32(®, MM40_PROT_CFG_TX_OP_ALLOW_OFDM, 1); + rt2x00_set_field32(®, MM40_PROT_CFG_TX_OP_ALLOW_MM20, 1); + rt2x00_set_field32(®, MM40_PROT_CFG_TX_OP_ALLOW_MM40, 1); + rt2x00_set_field32(®, MM40_PROT_CFG_TX_OP_ALLOW_GF20, 1); + rt2x00_set_field32(®, MM40_PROT_CFG_TX_OP_ALLOW_GF40, 1); + rt2800_register_write(rt2x00dev, MM40_PROT_CFG, reg); + + rt2800_register_read(rt2x00dev, GF20_PROT_CFG, ®); + rt2x00_set_field32(®, GF20_PROT_CFG_PROTECT_RATE, 0x4004); + rt2x00_set_field32(®, GF20_PROT_CFG_PROTECT_CTRL, 0); + rt2x00_set_field32(®, GF20_PROT_CFG_PROTECT_NAV, 1); + rt2x00_set_field32(®, GF20_PROT_CFG_TX_OP_ALLOW_CCK, 1); + rt2x00_set_field32(®, GF20_PROT_CFG_TX_OP_ALLOW_OFDM, 1); + rt2x00_set_field32(®, GF20_PROT_CFG_TX_OP_ALLOW_MM20, 1); + rt2x00_set_field32(®, GF20_PROT_CFG_TX_OP_ALLOW_MM40, 0); + rt2x00_set_field32(®, GF20_PROT_CFG_TX_OP_ALLOW_GF20, 1); + rt2x00_set_field32(®, GF20_PROT_CFG_TX_OP_ALLOW_GF40, 0); + rt2800_register_write(rt2x00dev, GF20_PROT_CFG, reg); + + rt2800_register_read(rt2x00dev, GF40_PROT_CFG, ®); + rt2x00_set_field32(®, GF40_PROT_CFG_PROTECT_RATE, 0x4084); + rt2x00_set_field32(®, GF40_PROT_CFG_PROTECT_CTRL, 0); + rt2x00_set_field32(®, GF40_PROT_CFG_PROTECT_NAV, 1); + rt2x00_set_field32(®, GF40_PROT_CFG_TX_OP_ALLOW_CCK, 1); + rt2x00_set_field32(®, GF40_PROT_CFG_TX_OP_ALLOW_OFDM, 1); + rt2x00_set_field32(®, GF40_PROT_CFG_TX_OP_ALLOW_MM20, 1); + rt2x00_set_field32(®, GF40_PROT_CFG_TX_OP_ALLOW_MM40, 1); + rt2x00_set_field32(®, GF40_PROT_CFG_TX_OP_ALLOW_GF20, 1); + rt2x00_set_field32(®, GF40_PROT_CFG_TX_OP_ALLOW_GF40, 1); + rt2800_register_write(rt2x00dev, GF40_PROT_CFG, reg); + + if (rt2x00_is_usb(rt2x00dev)) { + rt2800_register_write(rt2x00dev, PBF_CFG, 0xf40006); + + rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, ®); + rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0); + rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_DMA_BUSY, 0); + rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0); + rt2x00_set_field32(®, WPDMA_GLO_CFG_RX_DMA_BUSY, 0); + rt2x00_set_field32(®, WPDMA_GLO_CFG_WP_DMA_BURST_SIZE, 3); + rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 0); + rt2x00_set_field32(®, WPDMA_GLO_CFG_BIG_ENDIAN, 0); + rt2x00_set_field32(®, WPDMA_GLO_CFG_RX_HDR_SCATTER, 0); + rt2x00_set_field32(®, WPDMA_GLO_CFG_HDR_SEG_LEN, 0); + rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg); + } + + rt2800_register_write(rt2x00dev, TXOP_CTRL_CFG, 0x0000583f); + rt2800_register_write(rt2x00dev, TXOP_HLDR_ET, 0x00000002); + + rt2800_register_read(rt2x00dev, TX_RTS_CFG, ®); + rt2x00_set_field32(®, TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT, 32); + rt2x00_set_field32(®, TX_RTS_CFG_RTS_THRES, + IEEE80211_MAX_RTS_THRESHOLD); + rt2x00_set_field32(®, TX_RTS_CFG_RTS_FBK_EN, 0); + rt2800_register_write(rt2x00dev, TX_RTS_CFG, reg); + + rt2800_register_write(rt2x00dev, EXP_ACK_TIME, 0x002400ca); + rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003); + + /* + * ASIC will keep garbage value after boot, clear encryption keys. + */ + for (i = 0; i < 4; i++) + rt2800_register_write(rt2x00dev, + SHARED_KEY_MODE_ENTRY(i), 0); + + for (i = 0; i < 256; i++) { + u32 wcid[2] = { 0xffffffff, 0x00ffffff }; + rt2800_register_multiwrite(rt2x00dev, MAC_WCID_ENTRY(i), + wcid, sizeof(wcid)); + + rt2800_register_write(rt2x00dev, MAC_WCID_ATTR_ENTRY(i), 1); + rt2800_register_write(rt2x00dev, MAC_IVEIV_ENTRY(i), 0); + } + + /* + * Clear all beacons + * For the Beacon base registers we only need to clear + * the first byte since that byte contains the VALID and OWNER + * bits which (when set to 0) will invalidate the entire beacon. + */ + rt2800_register_write(rt2x00dev, HW_BEACON_BASE0, 0); + rt2800_register_write(rt2x00dev, HW_BEACON_BASE1, 0); + rt2800_register_write(rt2x00dev, HW_BEACON_BASE2, 0); + rt2800_register_write(rt2x00dev, HW_BEACON_BASE3, 0); + rt2800_register_write(rt2x00dev, HW_BEACON_BASE4, 0); + rt2800_register_write(rt2x00dev, HW_BEACON_BASE5, 0); + rt2800_register_write(rt2x00dev, HW_BEACON_BASE6, 0); + rt2800_register_write(rt2x00dev, HW_BEACON_BASE7, 0); + + if (rt2x00_is_usb(rt2x00dev)) { + rt2800_register_read(rt2x00dev, USB_CYC_CFG, ®); + rt2x00_set_field32(®, USB_CYC_CFG_CLOCK_CYCLE, 30); + rt2800_register_write(rt2x00dev, USB_CYC_CFG, reg); + } + + rt2800_register_read(rt2x00dev, HT_FBK_CFG0, ®); + rt2x00_set_field32(®, HT_FBK_CFG0_HTMCS0FBK, 0); + rt2x00_set_field32(®, HT_FBK_CFG0_HTMCS1FBK, 0); + rt2x00_set_field32(®, HT_FBK_CFG0_HTMCS2FBK, 1); + rt2x00_set_field32(®, HT_FBK_CFG0_HTMCS3FBK, 2); + rt2x00_set_field32(®, HT_FBK_CFG0_HTMCS4FBK, 3); + rt2x00_set_field32(®, HT_FBK_CFG0_HTMCS5FBK, 4); + rt2x00_set_field32(®, HT_FBK_CFG0_HTMCS6FBK, 5); + rt2x00_set_field32(®, HT_FBK_CFG0_HTMCS7FBK, 6); + rt2800_register_write(rt2x00dev, HT_FBK_CFG0, reg); + + rt2800_register_read(rt2x00dev, HT_FBK_CFG1, ®); + rt2x00_set_field32(®, HT_FBK_CFG1_HTMCS8FBK, 8); + rt2x00_set_field32(®, HT_FBK_CFG1_HTMCS9FBK, 8); + rt2x00_set_field32(®, HT_FBK_CFG1_HTMCS10FBK, 9); + rt2x00_set_field32(®, HT_FBK_CFG1_HTMCS11FBK, 10); + rt2x00_set_field32(®, HT_FBK_CFG1_HTMCS12FBK, 11); + rt2x00_set_field32(®, HT_FBK_CFG1_HTMCS13FBK, 12); + rt2x00_set_field32(®, HT_FBK_CFG1_HTMCS14FBK, 13); + rt2x00_set_field32(®, HT_FBK_CFG1_HTMCS15FBK, 14); + rt2800_register_write(rt2x00dev, HT_FBK_CFG1, reg); + + rt2800_register_read(rt2x00dev, LG_FBK_CFG0, ®); + rt2x00_set_field32(®, LG_FBK_CFG0_OFDMMCS0FBK, 8); + rt2x00_set_field32(®, LG_FBK_CFG0_OFDMMCS1FBK, 8); + rt2x00_set_field32(®, LG_FBK_CFG0_OFDMMCS2FBK, 9); + rt2x00_set_field32(®, LG_FBK_CFG0_OFDMMCS3FBK, 10); + rt2x00_set_field32(®, LG_FBK_CFG0_OFDMMCS4FBK, 11); + rt2x00_set_field32(®, LG_FBK_CFG0_OFDMMCS5FBK, 12); + rt2x00_set_field32(®, LG_FBK_CFG0_OFDMMCS6FBK, 13); + rt2x00_set_field32(®, LG_FBK_CFG0_OFDMMCS7FBK, 14); + rt2800_register_write(rt2x00dev, LG_FBK_CFG0, reg); + + rt2800_register_read(rt2x00dev, LG_FBK_CFG1, ®); + rt2x00_set_field32(®, LG_FBK_CFG0_CCKMCS0FBK, 0); + rt2x00_set_field32(®, LG_FBK_CFG0_CCKMCS1FBK, 0); + rt2x00_set_field32(®, LG_FBK_CFG0_CCKMCS2FBK, 1); + rt2x00_set_field32(®, LG_FBK_CFG0_CCKMCS3FBK, 2); + rt2800_register_write(rt2x00dev, LG_FBK_CFG1, reg); + + /* + * We must clear the error counters. + * These registers are cleared on read, + * so we may pass a useless variable to store the value. + */ + rt2800_register_read(rt2x00dev, RX_STA_CNT0, ®); + rt2800_register_read(rt2x00dev, RX_STA_CNT1, ®); + rt2800_register_read(rt2x00dev, RX_STA_CNT2, ®); + rt2800_register_read(rt2x00dev, TX_STA_CNT0, ®); + rt2800_register_read(rt2x00dev, TX_STA_CNT1, ®); + rt2800_register_read(rt2x00dev, TX_STA_CNT2, ®); + + return 0; +} +EXPORT_SYMBOL_GPL(rt2800_init_registers); + +static int rt2800_wait_bbp_rf_ready(struct rt2x00_dev *rt2x00dev) +{ + unsigned int i; + u32 reg; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2800_register_read(rt2x00dev, MAC_STATUS_CFG, ®); + if (!rt2x00_get_field32(reg, MAC_STATUS_CFG_BBP_RF_BUSY)) + return 0; + + udelay(REGISTER_BUSY_DELAY); + } + + ERROR(rt2x00dev, "BBP/RF register access failed, aborting.\n"); + return -EACCES; +} + +static int rt2800_wait_bbp_ready(struct rt2x00_dev *rt2x00dev) +{ + unsigned int i; + u8 value; + + /* + * BBP was enabled after firmware was loaded, + * but we need to reactivate it now. + */ + rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0); + rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0); + msleep(1); + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2800_bbp_read(rt2x00dev, 0, &value); + if ((value != 0xff) && (value != 0x00)) + return 0; + udelay(REGISTER_BUSY_DELAY); + } + + ERROR(rt2x00dev, "BBP register access failed, aborting.\n"); + return -EACCES; +} + +int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev) +{ + unsigned int i; + u16 eeprom; + u8 reg_id; + u8 value; + + if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev) || + rt2800_wait_bbp_ready(rt2x00dev))) + return -EACCES; + + rt2800_bbp_write(rt2x00dev, 65, 0x2c); + rt2800_bbp_write(rt2x00dev, 66, 0x38); + rt2800_bbp_write(rt2x00dev, 69, 0x12); + rt2800_bbp_write(rt2x00dev, 70, 0x0a); + rt2800_bbp_write(rt2x00dev, 73, 0x10); + rt2800_bbp_write(rt2x00dev, 81, 0x37); + rt2800_bbp_write(rt2x00dev, 82, 0x62); + rt2800_bbp_write(rt2x00dev, 83, 0x6a); + rt2800_bbp_write(rt2x00dev, 84, 0x99); + rt2800_bbp_write(rt2x00dev, 86, 0x00); + rt2800_bbp_write(rt2x00dev, 91, 0x04); + rt2800_bbp_write(rt2x00dev, 92, 0x00); + rt2800_bbp_write(rt2x00dev, 103, 0x00); + rt2800_bbp_write(rt2x00dev, 105, 0x05); + + if (rt2x00_rt(rt2x00dev, RT2860) && + (rt2x00_rev(rt2x00dev) == RT2860C_VERSION)) { + rt2800_bbp_write(rt2x00dev, 69, 0x16); + rt2800_bbp_write(rt2x00dev, 73, 0x12); + } + + if (rt2x00_rt(rt2x00dev, RT2860) && + (rt2x00_rev(rt2x00dev) > RT2860D_VERSION)) + rt2800_bbp_write(rt2x00dev, 84, 0x19); + + if (rt2x00_is_usb(rt2x00dev) && + rt2x00_rt(rt2x00dev, RT3070) && + (rt2x00_rev(rt2x00dev) == RT3070_VERSION)) { + rt2800_bbp_write(rt2x00dev, 70, 0x0a); + rt2800_bbp_write(rt2x00dev, 84, 0x99); + rt2800_bbp_write(rt2x00dev, 105, 0x05); + } + + if (rt2x00_rt(rt2x00dev, RT3052)) { + rt2800_bbp_write(rt2x00dev, 31, 0x08); + rt2800_bbp_write(rt2x00dev, 78, 0x0e); + rt2800_bbp_write(rt2x00dev, 80, 0x08); + } + + for (i = 0; i < EEPROM_BBP_SIZE; i++) { + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom); + + if (eeprom != 0xffff && eeprom != 0x0000) { + reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID); + value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE); + rt2800_bbp_write(rt2x00dev, reg_id, value); + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(rt2800_init_bbp); + +static u8 rt2800_init_rx_filter(struct rt2x00_dev *rt2x00dev, + bool bw40, u8 rfcsr24, u8 filter_target) +{ + unsigned int i; + u8 bbp; + u8 rfcsr; + u8 passband; + u8 stopband; + u8 overtuned = 0; + + rt2800_rfcsr_write(rt2x00dev, 24, rfcsr24); + + rt2800_bbp_read(rt2x00dev, 4, &bbp); + rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * bw40); + rt2800_bbp_write(rt2x00dev, 4, bbp); + + rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 1); + rt2800_rfcsr_write(rt2x00dev, 22, rfcsr); + + /* + * Set power & frequency of passband test tone + */ + rt2800_bbp_write(rt2x00dev, 24, 0); + + for (i = 0; i < 100; i++) { + rt2800_bbp_write(rt2x00dev, 25, 0x90); + msleep(1); + + rt2800_bbp_read(rt2x00dev, 55, &passband); + if (passband) + break; + } + + /* + * Set power & frequency of stopband test tone + */ + rt2800_bbp_write(rt2x00dev, 24, 0x06); + + for (i = 0; i < 100; i++) { + rt2800_bbp_write(rt2x00dev, 25, 0x90); + msleep(1); + + rt2800_bbp_read(rt2x00dev, 55, &stopband); + + if ((passband - stopband) <= filter_target) { + rfcsr24++; + overtuned += ((passband - stopband) == filter_target); + } else + break; + + rt2800_rfcsr_write(rt2x00dev, 24, rfcsr24); + } + + rfcsr24 -= !!overtuned; + + rt2800_rfcsr_write(rt2x00dev, 24, rfcsr24); + return rfcsr24; +} + +int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev) +{ + u8 rfcsr; + u8 bbp; + + if (rt2x00_is_usb(rt2x00dev) && + rt2x00_rt(rt2x00dev, RT3070) && + (rt2x00_rev(rt2x00dev) != RT3070_VERSION)) + return 0; + + if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) { + if (!rt2x00_rf(rt2x00dev, RF3020) && + !rt2x00_rf(rt2x00dev, RF3021) && + !rt2x00_rf(rt2x00dev, RF3022)) + return 0; + } + + /* + * Init RF calibration. + */ + rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1); + rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); + msleep(1); + rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0); + rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); + + if (rt2x00_is_usb(rt2x00dev)) { + rt2800_rfcsr_write(rt2x00dev, 4, 0x40); + rt2800_rfcsr_write(rt2x00dev, 5, 0x03); + rt2800_rfcsr_write(rt2x00dev, 6, 0x02); + rt2800_rfcsr_write(rt2x00dev, 7, 0x70); + rt2800_rfcsr_write(rt2x00dev, 9, 0x0f); + rt2800_rfcsr_write(rt2x00dev, 10, 0x71); + rt2800_rfcsr_write(rt2x00dev, 11, 0x21); + rt2800_rfcsr_write(rt2x00dev, 12, 0x7b); + rt2800_rfcsr_write(rt2x00dev, 14, 0x90); + rt2800_rfcsr_write(rt2x00dev, 15, 0x58); + rt2800_rfcsr_write(rt2x00dev, 16, 0xb3); + rt2800_rfcsr_write(rt2x00dev, 17, 0x92); + rt2800_rfcsr_write(rt2x00dev, 18, 0x2c); + rt2800_rfcsr_write(rt2x00dev, 19, 0x02); + rt2800_rfcsr_write(rt2x00dev, 20, 0xba); + rt2800_rfcsr_write(rt2x00dev, 21, 0xdb); + rt2800_rfcsr_write(rt2x00dev, 24, 0x16); + rt2800_rfcsr_write(rt2x00dev, 25, 0x01); + rt2800_rfcsr_write(rt2x00dev, 27, 0x03); + rt2800_rfcsr_write(rt2x00dev, 29, 0x1f); + } else if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) { + rt2800_rfcsr_write(rt2x00dev, 0, 0x50); + rt2800_rfcsr_write(rt2x00dev, 1, 0x01); + rt2800_rfcsr_write(rt2x00dev, 2, 0xf7); + rt2800_rfcsr_write(rt2x00dev, 3, 0x75); + rt2800_rfcsr_write(rt2x00dev, 4, 0x40); + rt2800_rfcsr_write(rt2x00dev, 5, 0x03); + rt2800_rfcsr_write(rt2x00dev, 6, 0x02); + rt2800_rfcsr_write(rt2x00dev, 7, 0x50); + rt2800_rfcsr_write(rt2x00dev, 8, 0x39); + rt2800_rfcsr_write(rt2x00dev, 9, 0x0f); + rt2800_rfcsr_write(rt2x00dev, 10, 0x60); + rt2800_rfcsr_write(rt2x00dev, 11, 0x21); + rt2800_rfcsr_write(rt2x00dev, 12, 0x75); + rt2800_rfcsr_write(rt2x00dev, 13, 0x75); + rt2800_rfcsr_write(rt2x00dev, 14, 0x90); + rt2800_rfcsr_write(rt2x00dev, 15, 0x58); + rt2800_rfcsr_write(rt2x00dev, 16, 0xb3); + rt2800_rfcsr_write(rt2x00dev, 17, 0x92); + rt2800_rfcsr_write(rt2x00dev, 18, 0x2c); + rt2800_rfcsr_write(rt2x00dev, 19, 0x02); + rt2800_rfcsr_write(rt2x00dev, 20, 0xba); + rt2800_rfcsr_write(rt2x00dev, 21, 0xdb); + rt2800_rfcsr_write(rt2x00dev, 22, 0x00); + rt2800_rfcsr_write(rt2x00dev, 23, 0x31); + rt2800_rfcsr_write(rt2x00dev, 24, 0x08); + rt2800_rfcsr_write(rt2x00dev, 25, 0x01); + rt2800_rfcsr_write(rt2x00dev, 26, 0x25); + rt2800_rfcsr_write(rt2x00dev, 27, 0x23); + rt2800_rfcsr_write(rt2x00dev, 28, 0x13); + rt2800_rfcsr_write(rt2x00dev, 29, 0x83); + } + + /* + * Set RX Filter calibration for 20MHz and 40MHz + */ + rt2x00dev->calibration[0] = + rt2800_init_rx_filter(rt2x00dev, false, 0x07, 0x16); + rt2x00dev->calibration[1] = + rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x19); + + /* + * Set back to initial state + */ + rt2800_bbp_write(rt2x00dev, 24, 0); + + rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 0); + rt2800_rfcsr_write(rt2x00dev, 22, rfcsr); + + /* + * set BBP back to BW20 + */ + rt2800_bbp_read(rt2x00dev, 4, &bbp); + rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0); + rt2800_bbp_write(rt2x00dev, 4, bbp); + + return 0; +} +EXPORT_SYMBOL_GPL(rt2800_init_rfcsr); + +int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + rt2800_register_read(rt2x00dev, EFUSE_CTRL, ®); + + return rt2x00_get_field32(reg, EFUSE_CTRL_PRESENT); +} +EXPORT_SYMBOL_GPL(rt2800_efuse_detect); + +static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i) +{ + u32 reg; + + mutex_lock(&rt2x00dev->csr_mutex); + + rt2800_register_read_lock(rt2x00dev, EFUSE_CTRL, ®); + rt2x00_set_field32(®, EFUSE_CTRL_ADDRESS_IN, i); + rt2x00_set_field32(®, EFUSE_CTRL_MODE, 0); + rt2x00_set_field32(®, EFUSE_CTRL_KICK, 1); + rt2800_register_write_lock(rt2x00dev, EFUSE_CTRL, reg); + + /* Wait until the EEPROM has been loaded */ + rt2800_regbusy_read(rt2x00dev, EFUSE_CTRL, EFUSE_CTRL_KICK, ®); + + /* Apparently the data is read from end to start */ + rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, + (u32 *)&rt2x00dev->eeprom[i]); + rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, + (u32 *)&rt2x00dev->eeprom[i + 2]); + rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, + (u32 *)&rt2x00dev->eeprom[i + 4]); + rt2800_register_read_lock(rt2x00dev, EFUSE_DATA0, + (u32 *)&rt2x00dev->eeprom[i + 6]); + + mutex_unlock(&rt2x00dev->csr_mutex); +} + +void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev) +{ + unsigned int i; + + for (i = 0; i < EEPROM_SIZE / sizeof(u16); i += 8) + rt2800_efuse_read(rt2x00dev, i); +} +EXPORT_SYMBOL_GPL(rt2800_read_eeprom_efuse); + +int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev) +{ + u16 word; + u8 *mac; + u8 default_lna_gain; + + /* + * Start validation of the data that has been read. + */ + mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); + if (!is_valid_ether_addr(mac)) { + random_ether_addr(mac); + EEPROM(rt2x00dev, "MAC: %pM\n", mac); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_ANTENNA_RXPATH, 2); + rt2x00_set_field16(&word, EEPROM_ANTENNA_TXPATH, 1); + rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF2820); + rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word); + EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word); + } else if (rt2x00_rt(rt2x00dev, RT2860) || + rt2x00_rt(rt2x00dev, RT2870) || + rt2x00_rt(rt2x00dev, RT2872) || + rt2x00_rt(rt2x00dev, RT2880) || + (rt2x00_rt(rt2x00dev, RT2883) && + (rt2x00_rev(rt2x00dev) < RT2883_VERSION))) { + /* + * There is a max of 2 RX streams for RT28x0 series + */ + if (rt2x00_get_field16(word, EEPROM_ANTENNA_RXPATH) > 2) + rt2x00_set_field16(&word, EEPROM_ANTENNA_RXPATH, 2); + rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_NIC_HW_RADIO, 0); + rt2x00_set_field16(&word, EEPROM_NIC_DYNAMIC_TX_AGC, 0); + rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA_BG, 0); + rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA_A, 0); + rt2x00_set_field16(&word, EEPROM_NIC_CARDBUS_ACCEL, 0); + rt2x00_set_field16(&word, EEPROM_NIC_BW40M_SB_BG, 0); + rt2x00_set_field16(&word, EEPROM_NIC_BW40M_SB_A, 0); + rt2x00_set_field16(&word, EEPROM_NIC_WPS_PBC, 0); + rt2x00_set_field16(&word, EEPROM_NIC_BW40M_BG, 0); + rt2x00_set_field16(&word, EEPROM_NIC_BW40M_A, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word); + EEPROM(rt2x00dev, "NIC: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &word); + if ((word & 0x00ff) == 0x00ff) { + rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0); + rt2x00_set_field16(&word, EEPROM_FREQ_LED_MODE, + LED_MODE_TXRX_ACTIVITY); + rt2x00_set_field16(&word, EEPROM_FREQ_LED_POLARITY, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word); + rt2x00_eeprom_write(rt2x00dev, EEPROM_LED1, 0x5555); + rt2x00_eeprom_write(rt2x00dev, EEPROM_LED2, 0x2221); + rt2x00_eeprom_write(rt2x00dev, EEPROM_LED3, 0xa9f8); + EEPROM(rt2x00dev, "Freq: 0x%04x\n", word); + } + + /* + * During the LNA validation we are going to use + * lna0 as correct value. Note that EEPROM_LNA + * is never validated. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &word); + default_lna_gain = rt2x00_get_field16(word, EEPROM_LNA_A0); + + rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &word); + if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG_OFFSET0)) > 10) + rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET0, 0); + if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG_OFFSET1)) > 10) + rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET1, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_BG, word); + + rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &word); + if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG2_OFFSET2)) > 10) + rt2x00_set_field16(&word, EEPROM_RSSI_BG2_OFFSET2, 0); + if (rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0x00 || + rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0xff) + rt2x00_set_field16(&word, EEPROM_RSSI_BG2_LNA_A1, + default_lna_gain); + rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_BG2, word); + + rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &word); + if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET0)) > 10) + rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET0, 0); + if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET1)) > 10) + rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET1, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A, word); + + rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &word); + if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A2_OFFSET2)) > 10) + rt2x00_set_field16(&word, EEPROM_RSSI_A2_OFFSET2, 0); + if (rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0x00 || + rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0xff) + rt2x00_set_field16(&word, EEPROM_RSSI_A2_LNA_A2, + default_lna_gain); + rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word); + + return 0; +} +EXPORT_SYMBOL_GPL(rt2800_validate_eeprom); + +int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + u16 value; + u16 eeprom; + + /* + * Read EEPROM word for configuration. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom); + + /* + * Identify RF chipset. + */ + value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); + rt2800_register_read(rt2x00dev, MAC_CSR0, ®); + + rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET), + value, rt2x00_get_field32(reg, MAC_CSR0_REVISION)); + + if (!rt2x00_rt(rt2x00dev, RT2860) && + !rt2x00_rt(rt2x00dev, RT2870) && + !rt2x00_rt(rt2x00dev, RT2872) && + !rt2x00_rt(rt2x00dev, RT2880) && + !rt2x00_rt(rt2x00dev, RT2883) && + !rt2x00_rt(rt2x00dev, RT2890) && + !rt2x00_rt(rt2x00dev, RT3052) && + !rt2x00_rt(rt2x00dev, RT3070) && + !rt2x00_rt(rt2x00dev, RT3071) && + !rt2x00_rt(rt2x00dev, RT3090) && + !rt2x00_rt(rt2x00dev, RT3390) && + !rt2x00_rt(rt2x00dev, RT3572)) { + ERROR(rt2x00dev, "Invalid RT chipset detected.\n"); + return -ENODEV; + } + + if (!rt2x00_rf(rt2x00dev, RF2820) && + !rt2x00_rf(rt2x00dev, RF2850) && + !rt2x00_rf(rt2x00dev, RF2720) && + !rt2x00_rf(rt2x00dev, RF2750) && + !rt2x00_rf(rt2x00dev, RF3020) && + !rt2x00_rf(rt2x00dev, RF2020) && + !rt2x00_rf(rt2x00dev, RF3021) && + !rt2x00_rf(rt2x00dev, RF3022) && + !rt2x00_rf(rt2x00dev, RF3052)) { + ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); + return -ENODEV; + } + + /* + * Identify default antenna configuration. + */ + rt2x00dev->default_ant.tx = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH); + rt2x00dev->default_ant.rx = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH); + + /* + * Read frequency offset and RF programming sequence. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom); + rt2x00dev->freq_offset = rt2x00_get_field16(eeprom, EEPROM_FREQ_OFFSET); + + /* + * Read external LNA informations. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom); + + if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_A)) + __set_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags); + if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_BG)) + __set_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags); + + /* + * Detect if this device has an hardware controlled radio. + */ + if (rt2x00_get_field16(eeprom, EEPROM_NIC_HW_RADIO)) + __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags); + + /* + * Store led settings, for correct led behaviour. + */ +#ifdef CONFIG_RT2X00_LIB_LEDS + rt2800_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO); + rt2800_init_led(rt2x00dev, &rt2x00dev->led_assoc, LED_TYPE_ASSOC); + rt2800_init_led(rt2x00dev, &rt2x00dev->led_qual, LED_TYPE_QUALITY); + + rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &rt2x00dev->led_mcu_reg); +#endif /* CONFIG_RT2X00_LIB_LEDS */ + + return 0; +} +EXPORT_SYMBOL_GPL(rt2800_init_eeprom); + +/* + * RF value list for rt28x0 + * Supports: 2.4 GHz (all) & 5.2 GHz (RF2850 & RF2750) + */ +static const struct rf_channel rf_vals[] = { + { 1, 0x18402ecc, 0x184c0786, 0x1816b455, 0x1800510b }, + { 2, 0x18402ecc, 0x184c0786, 0x18168a55, 0x1800519f }, + { 3, 0x18402ecc, 0x184c078a, 0x18168a55, 0x1800518b }, + { 4, 0x18402ecc, 0x184c078a, 0x18168a55, 0x1800519f }, + { 5, 0x18402ecc, 0x184c078e, 0x18168a55, 0x1800518b }, + { 6, 0x18402ecc, 0x184c078e, 0x18168a55, 0x1800519f }, + { 7, 0x18402ecc, 0x184c0792, 0x18168a55, 0x1800518b }, + { 8, 0x18402ecc, 0x184c0792, 0x18168a55, 0x1800519f }, + { 9, 0x18402ecc, 0x184c0796, 0x18168a55, 0x1800518b }, + { 10, 0x18402ecc, 0x184c0796, 0x18168a55, 0x1800519f }, + { 11, 0x18402ecc, 0x184c079a, 0x18168a55, 0x1800518b }, + { 12, 0x18402ecc, 0x184c079a, 0x18168a55, 0x1800519f }, + { 13, 0x18402ecc, 0x184c079e, 0x18168a55, 0x1800518b }, + { 14, 0x18402ecc, 0x184c07a2, 0x18168a55, 0x18005193 }, + + /* 802.11 UNI / HyperLan 2 */ + { 36, 0x18402ecc, 0x184c099a, 0x18158a55, 0x180ed1a3 }, + { 38, 0x18402ecc, 0x184c099e, 0x18158a55, 0x180ed193 }, + { 40, 0x18402ec8, 0x184c0682, 0x18158a55, 0x180ed183 }, + { 44, 0x18402ec8, 0x184c0682, 0x18158a55, 0x180ed1a3 }, + { 46, 0x18402ec8, 0x184c0686, 0x18158a55, 0x180ed18b }, + { 48, 0x18402ec8, 0x184c0686, 0x18158a55, 0x180ed19b }, + { 52, 0x18402ec8, 0x184c068a, 0x18158a55, 0x180ed193 }, + { 54, 0x18402ec8, 0x184c068a, 0x18158a55, 0x180ed1a3 }, + { 56, 0x18402ec8, 0x184c068e, 0x18158a55, 0x180ed18b }, + { 60, 0x18402ec8, 0x184c0692, 0x18158a55, 0x180ed183 }, + { 62, 0x18402ec8, 0x184c0692, 0x18158a55, 0x180ed193 }, + { 64, 0x18402ec8, 0x184c0692, 0x18158a55, 0x180ed1a3 }, + + /* 802.11 HyperLan 2 */ + { 100, 0x18402ec8, 0x184c06b2, 0x18178a55, 0x180ed783 }, + { 102, 0x18402ec8, 0x184c06b2, 0x18578a55, 0x180ed793 }, + { 104, 0x18402ec8, 0x185c06b2, 0x18578a55, 0x180ed1a3 }, + { 108, 0x18402ecc, 0x185c0a32, 0x18578a55, 0x180ed193 }, + { 110, 0x18402ecc, 0x184c0a36, 0x18178a55, 0x180ed183 }, + { 112, 0x18402ecc, 0x184c0a36, 0x18178a55, 0x180ed19b }, + { 116, 0x18402ecc, 0x184c0a3a, 0x18178a55, 0x180ed1a3 }, + { 118, 0x18402ecc, 0x184c0a3e, 0x18178a55, 0x180ed193 }, + { 120, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed183 }, + { 124, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed193 }, + { 126, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed15b }, + { 128, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed1a3 }, + { 132, 0x18402ec4, 0x184c0386, 0x18178a55, 0x180ed18b }, + { 134, 0x18402ec4, 0x184c0386, 0x18178a55, 0x180ed193 }, + { 136, 0x18402ec4, 0x184c0386, 0x18178a55, 0x180ed19b }, + { 140, 0x18402ec4, 0x184c038a, 0x18178a55, 0x180ed183 }, + + /* 802.11 UNII */ + { 149, 0x18402ec4, 0x184c038a, 0x18178a55, 0x180ed1a7 }, + { 151, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed187 }, + { 153, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed18f }, + { 157, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed19f }, + { 159, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed1a7 }, + { 161, 0x18402ec4, 0x184c0392, 0x18178a55, 0x180ed187 }, + { 165, 0x18402ec4, 0x184c0392, 0x18178a55, 0x180ed197 }, + { 167, 0x18402ec4, 0x184c03d2, 0x18179855, 0x1815531f }, + { 169, 0x18402ec4, 0x184c03d2, 0x18179855, 0x18155327 }, + { 171, 0x18402ec4, 0x184c03d6, 0x18179855, 0x18155307 }, + { 173, 0x18402ec4, 0x184c03d6, 0x18179855, 0x1815530f }, + + /* 802.11 Japan */ + { 184, 0x15002ccc, 0x1500491e, 0x1509be55, 0x150c0a0b }, + { 188, 0x15002ccc, 0x15004922, 0x1509be55, 0x150c0a13 }, + { 192, 0x15002ccc, 0x15004926, 0x1509be55, 0x150c0a1b }, + { 196, 0x15002ccc, 0x1500492a, 0x1509be55, 0x150c0a23 }, + { 208, 0x15002ccc, 0x1500493a, 0x1509be55, 0x150c0a13 }, + { 212, 0x15002ccc, 0x1500493e, 0x1509be55, 0x150c0a1b }, + { 216, 0x15002ccc, 0x15004982, 0x1509be55, 0x150c0a23 }, +}; + +/* + * RF value list for rt3070 + * Supports: 2.4 GHz + */ +static const struct rf_channel rf_vals_302x[] = { + {1, 241, 2, 2 }, + {2, 241, 2, 7 }, + {3, 242, 2, 2 }, + {4, 242, 2, 7 }, + {5, 243, 2, 2 }, + {6, 243, 2, 7 }, + {7, 244, 2, 2 }, + {8, 244, 2, 7 }, + {9, 245, 2, 2 }, + {10, 245, 2, 7 }, + {11, 246, 2, 2 }, + {12, 246, 2, 7 }, + {13, 247, 2, 2 }, + {14, 248, 2, 4 }, +}; + +int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) +{ + struct hw_mode_spec *spec = &rt2x00dev->spec; + struct channel_info *info; + char *tx_power1; + char *tx_power2; + unsigned int i; + u16 eeprom; + + /* + * Disable powersaving as default on PCI devices. + */ + if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) + rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; + + /* + * Initialize all hw fields. + */ + rt2x00dev->hw->flags = + IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | + IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_SUPPORTS_PS | + IEEE80211_HW_PS_NULLFUNC_STACK; + + SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev); + SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, + rt2x00_eeprom_addr(rt2x00dev, + EEPROM_MAC_ADDR_0)); + + rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom); + + /* + * Initialize hw_mode information. + */ + spec->supported_bands = SUPPORT_BAND_2GHZ; + spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; + + if (rt2x00_rf(rt2x00dev, RF2820) || + rt2x00_rf(rt2x00dev, RF2720) || + rt2x00_rf(rt2x00dev, RF3052)) { + spec->num_channels = 14; + spec->channels = rf_vals; + } else if (rt2x00_rf(rt2x00dev, RF2850) || rt2x00_rf(rt2x00dev, RF2750)) { + spec->supported_bands |= SUPPORT_BAND_5GHZ; + spec->num_channels = ARRAY_SIZE(rf_vals); + spec->channels = rf_vals; + } else if (rt2x00_rf(rt2x00dev, RF3020) || + rt2x00_rf(rt2x00dev, RF2020) || + rt2x00_rf(rt2x00dev, RF3021) || + rt2x00_rf(rt2x00dev, RF3022)) { + spec->num_channels = ARRAY_SIZE(rf_vals_302x); + spec->channels = rf_vals_302x; + } + + /* + * Initialize HT information. + */ + if (!rt2x00_rf(rt2x00dev, RF2020)) + spec->ht.ht_supported = true; + else + spec->ht.ht_supported = false; + + spec->ht.cap = + IEEE80211_HT_CAP_SUP_WIDTH_20_40 | + IEEE80211_HT_CAP_GRN_FLD | + IEEE80211_HT_CAP_SGI_20 | + IEEE80211_HT_CAP_SGI_40 | + IEEE80211_HT_CAP_TX_STBC | + IEEE80211_HT_CAP_RX_STBC; + spec->ht.ampdu_factor = 3; + spec->ht.ampdu_density = 4; + spec->ht.mcs.tx_params = + IEEE80211_HT_MCS_TX_DEFINED | + IEEE80211_HT_MCS_TX_RX_DIFF | + ((rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH) - 1) << + IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); + + switch (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH)) { + case 3: + spec->ht.mcs.rx_mask[2] = 0xff; + case 2: + spec->ht.mcs.rx_mask[1] = 0xff; + case 1: + spec->ht.mcs.rx_mask[0] = 0xff; + spec->ht.mcs.rx_mask[4] = 0x1; /* MCS32 */ + break; + } + + /* + * Create channel information array + */ + info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + spec->channels_info = info; + + tx_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1); + tx_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2); + + for (i = 0; i < 14; i++) { + info[i].tx_power1 = TXPOWER_G_FROM_DEV(tx_power1[i]); + info[i].tx_power2 = TXPOWER_G_FROM_DEV(tx_power2[i]); + } + + if (spec->num_channels > 14) { + tx_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A1); + tx_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2); + + for (i = 14; i < spec->num_channels; i++) { + info[i].tx_power1 = TXPOWER_A_FROM_DEV(tx_power1[i]); + info[i].tx_power2 = TXPOWER_A_FROM_DEV(tx_power2[i]); + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(rt2800_probe_hw_mode); + +/* + * IEEE80211 stack callback functions. + */ +static void rt2800_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx, + u32 *iv32, u16 *iv16) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + struct mac_iveiv_entry iveiv_entry; + u32 offset; + + offset = MAC_IVEIV_ENTRY(hw_key_idx); + rt2800_register_multiread(rt2x00dev, offset, + &iveiv_entry, sizeof(iveiv_entry)); + + memcpy(iv16, &iveiv_entry.iv[0], sizeof(*iv16)); + memcpy(iv32, &iveiv_entry.iv[4], sizeof(*iv32)); +} + +static int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + u32 reg; + bool enabled = (value < IEEE80211_MAX_RTS_THRESHOLD); + + rt2800_register_read(rt2x00dev, TX_RTS_CFG, ®); + rt2x00_set_field32(®, TX_RTS_CFG_RTS_THRES, value); + rt2800_register_write(rt2x00dev, TX_RTS_CFG, reg); + + rt2800_register_read(rt2x00dev, CCK_PROT_CFG, ®); + rt2x00_set_field32(®, CCK_PROT_CFG_RTS_TH_EN, enabled); + rt2800_register_write(rt2x00dev, CCK_PROT_CFG, reg); + + rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, ®); + rt2x00_set_field32(®, OFDM_PROT_CFG_RTS_TH_EN, enabled); + rt2800_register_write(rt2x00dev, OFDM_PROT_CFG, reg); + + rt2800_register_read(rt2x00dev, MM20_PROT_CFG, ®); + rt2x00_set_field32(®, MM20_PROT_CFG_RTS_TH_EN, enabled); + rt2800_register_write(rt2x00dev, MM20_PROT_CFG, reg); + + rt2800_register_read(rt2x00dev, MM40_PROT_CFG, ®); + rt2x00_set_field32(®, MM40_PROT_CFG_RTS_TH_EN, enabled); + rt2800_register_write(rt2x00dev, MM40_PROT_CFG, reg); + + rt2800_register_read(rt2x00dev, GF20_PROT_CFG, ®); + rt2x00_set_field32(®, GF20_PROT_CFG_RTS_TH_EN, enabled); + rt2800_register_write(rt2x00dev, GF20_PROT_CFG, reg); + + rt2800_register_read(rt2x00dev, GF40_PROT_CFG, ®); + rt2x00_set_field32(®, GF40_PROT_CFG_RTS_TH_EN, enabled); + rt2800_register_write(rt2x00dev, GF40_PROT_CFG, reg); + + return 0; +} + +static int rt2800_conf_tx(struct ieee80211_hw *hw, u16 queue_idx, + const struct ieee80211_tx_queue_params *params) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + struct data_queue *queue; + struct rt2x00_field32 field; + int retval; + u32 reg; + u32 offset; + + /* + * First pass the configuration through rt2x00lib, that will + * update the queue settings and validate the input. After that + * we are free to update the registers based on the value + * in the queue parameter. + */ + retval = rt2x00mac_conf_tx(hw, queue_idx, params); + if (retval) + return retval; + + /* + * We only need to perform additional register initialization + * for WMM queues/ + */ + if (queue_idx >= 4) + return 0; + + queue = rt2x00queue_get_queue(rt2x00dev, queue_idx); + + /* Update WMM TXOP register */ + offset = WMM_TXOP0_CFG + (sizeof(u32) * (!!(queue_idx & 2))); + field.bit_offset = (queue_idx & 1) * 16; + field.bit_mask = 0xffff << field.bit_offset; + + rt2800_register_read(rt2x00dev, offset, ®); + rt2x00_set_field32(®, field, queue->txop); + rt2800_register_write(rt2x00dev, offset, reg); + + /* Update WMM registers */ + field.bit_offset = queue_idx * 4; + field.bit_mask = 0xf << field.bit_offset; + + rt2800_register_read(rt2x00dev, WMM_AIFSN_CFG, ®); + rt2x00_set_field32(®, field, queue->aifs); + rt2800_register_write(rt2x00dev, WMM_AIFSN_CFG, reg); + + rt2800_register_read(rt2x00dev, WMM_CWMIN_CFG, ®); + rt2x00_set_field32(®, field, queue->cw_min); + rt2800_register_write(rt2x00dev, WMM_CWMIN_CFG, reg); + + rt2800_register_read(rt2x00dev, WMM_CWMAX_CFG, ®); + rt2x00_set_field32(®, field, queue->cw_max); + rt2800_register_write(rt2x00dev, WMM_CWMAX_CFG, reg); + + /* Update EDCA registers */ + offset = EDCA_AC0_CFG + (sizeof(u32) * queue_idx); + + rt2800_register_read(rt2x00dev, offset, ®); + rt2x00_set_field32(®, EDCA_AC0_CFG_TX_OP, queue->txop); + rt2x00_set_field32(®, EDCA_AC0_CFG_AIFSN, queue->aifs); + rt2x00_set_field32(®, EDCA_AC0_CFG_CWMIN, queue->cw_min); + rt2x00_set_field32(®, EDCA_AC0_CFG_CWMAX, queue->cw_max); + rt2800_register_write(rt2x00dev, offset, reg); + + return 0; +} + +static u64 rt2800_get_tsf(struct ieee80211_hw *hw) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + u64 tsf; + u32 reg; + + rt2800_register_read(rt2x00dev, TSF_TIMER_DW1, ®); + tsf = (u64) rt2x00_get_field32(reg, TSF_TIMER_DW1_HIGH_WORD) << 32; + rt2800_register_read(rt2x00dev, TSF_TIMER_DW0, ®); + tsf |= rt2x00_get_field32(reg, TSF_TIMER_DW0_LOW_WORD); + + return tsf; +} + +const struct ieee80211_ops rt2800_mac80211_ops = { + .tx = rt2x00mac_tx, + .start = rt2x00mac_start, + .stop = rt2x00mac_stop, + .add_interface = rt2x00mac_add_interface, + .remove_interface = rt2x00mac_remove_interface, + .config = rt2x00mac_config, + .configure_filter = rt2x00mac_configure_filter, + .set_tim = rt2x00mac_set_tim, + .set_key = rt2x00mac_set_key, + .get_stats = rt2x00mac_get_stats, + .get_tkip_seq = rt2800_get_tkip_seq, + .set_rts_threshold = rt2800_set_rts_threshold, + .bss_info_changed = rt2x00mac_bss_info_changed, + .conf_tx = rt2800_conf_tx, + .get_tsf = rt2800_get_tsf, + .rfkill_poll = rt2x00mac_rfkill_poll, +}; +EXPORT_SYMBOL_GPL(rt2800_mac80211_ops); diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h new file mode 100644 index 0000000..ebabeae --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2800lib.h @@ -0,0 +1,150 @@ +/* + Copyright (C) 2009 Bartlomiej Zolnierkiewicz + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef RT2800LIB_H +#define RT2800LIB_H + +struct rt2800_ops { + void (*register_read)(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, u32 *value); + void (*register_read_lock)(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, u32 *value); + void (*register_write)(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, u32 value); + void (*register_write_lock)(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, u32 value); + + void (*register_multiread)(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + void *value, const u32 length); + void (*register_multiwrite)(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + const void *value, const u32 length); + + int (*regbusy_read)(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + const struct rt2x00_field32 field, u32 *reg); +}; + +static inline void rt2800_register_read(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + u32 *value) +{ + const struct rt2800_ops *rt2800ops = rt2x00dev->priv; + + rt2800ops->register_read(rt2x00dev, offset, value); +} + +static inline void rt2800_register_read_lock(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + u32 *value) +{ + const struct rt2800_ops *rt2800ops = rt2x00dev->priv; + + rt2800ops->register_read_lock(rt2x00dev, offset, value); +} + +static inline void rt2800_register_write(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + u32 value) +{ + const struct rt2800_ops *rt2800ops = rt2x00dev->priv; + + rt2800ops->register_write(rt2x00dev, offset, value); +} + +static inline void rt2800_register_write_lock(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + u32 value) +{ + const struct rt2800_ops *rt2800ops = rt2x00dev->priv; + + rt2800ops->register_write_lock(rt2x00dev, offset, value); +} + +static inline void rt2800_register_multiread(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + void *value, const u32 length) +{ + const struct rt2800_ops *rt2800ops = rt2x00dev->priv; + + rt2800ops->register_multiread(rt2x00dev, offset, value, length); +} + +static inline void rt2800_register_multiwrite(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + const void *value, + const u32 length) +{ + const struct rt2800_ops *rt2800ops = rt2x00dev->priv; + + rt2800ops->register_multiwrite(rt2x00dev, offset, value, length); +} + +static inline int rt2800_regbusy_read(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + const struct rt2x00_field32 field, + u32 *reg) +{ + const struct rt2800_ops *rt2800ops = rt2x00dev->priv; + + return rt2800ops->regbusy_read(rt2x00dev, offset, field, reg); +} + +void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev, + const u8 command, const u8 token, + const u8 arg0, const u8 arg1); + +extern const struct rt2x00debug rt2800_rt2x00debug; + +int rt2800_rfkill_poll(struct rt2x00_dev *rt2x00dev); +int rt2800_config_shared_key(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_crypto *crypto, + struct ieee80211_key_conf *key); +int rt2800_config_pairwise_key(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_crypto *crypto, + struct ieee80211_key_conf *key); +void rt2800_config_filter(struct rt2x00_dev *rt2x00dev, + const unsigned int filter_flags); +void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf, + struct rt2x00intf_conf *conf, const unsigned int flags); +void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp); +void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant); +void rt2800_config(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_conf *libconf, + const unsigned int flags); +void rt2800_link_stats(struct rt2x00_dev *rt2x00dev, struct link_qual *qual); +void rt2800_reset_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual); +void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual, + const u32 count); + +int rt2800_init_registers(struct rt2x00_dev *rt2x00dev); +int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev); +int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev); +int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev); + +int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev); +void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev); +int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev); +int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev); +int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev); + +extern const struct ieee80211_ops rt2800_mac80211_ops; + +#endif /* RT2800LIB_H */ diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c new file mode 100644 index 0000000..91cce2d --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2800pci.c @@ -0,0 +1,1288 @@ +/* + Copyright (C) 2009 Ivo van Doorn + Copyright (C) 2009 Alban Browaeys + Copyright (C) 2009 Felix Fietkau + Copyright (C) 2009 Luis Correia + Copyright (C) 2009 Mattias Nissler + Copyright (C) 2009 Mark Asselstine + Copyright (C) 2009 Xose Vazquez Perez + Copyright (C) 2009 Bart Zolnierkiewicz + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2800pci + Abstract: rt2800pci device specific routines. + Supported chipsets: RT2800E & RT2800ED. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rt2x00.h" +#include "rt2x00pci.h" +#include "rt2x00soc.h" +#include "rt2800lib.h" +#include "rt2800.h" +#include "rt2800pci.h" + +/* + * Allow hardware encryption to be disabled. + */ +static int modparam_nohwcrypt = 1; +module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); +MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); + +static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token) +{ + unsigned int i; + u32 reg; + + for (i = 0; i < 200; i++) { + rt2800_register_read(rt2x00dev, H2M_MAILBOX_CID, ®); + + if ((rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD0) == token) || + (rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD1) == token) || + (rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD2) == token) || + (rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD3) == token)) + break; + + udelay(REGISTER_BUSY_DELAY); + } + + if (i == 200) + ERROR(rt2x00dev, "MCU request failed, no response from hardware\n"); + + rt2800_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0); + rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0); +} + +#ifdef CONFIG_RT2800PCI_SOC +static void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev) +{ + u32 *base_addr = (u32 *) KSEG1ADDR(0x1F040000); /* XXX for RT3052 */ + + memcpy_fromio(rt2x00dev->eeprom, base_addr, EEPROM_SIZE); +} +#else +static inline void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev) +{ +} +#endif /* CONFIG_RT2800PCI_SOC */ + +#ifdef CONFIG_RT2800PCI_PCI +static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom) +{ + struct rt2x00_dev *rt2x00dev = eeprom->data; + u32 reg; + + rt2800_register_read(rt2x00dev, E2PROM_CSR, ®); + + eeprom->reg_data_in = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_IN); + eeprom->reg_data_out = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_OUT); + eeprom->reg_data_clock = + !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_CLOCK); + eeprom->reg_chip_select = + !!rt2x00_get_field32(reg, E2PROM_CSR_CHIP_SELECT); +} + +static void rt2800pci_eepromregister_write(struct eeprom_93cx6 *eeprom) +{ + struct rt2x00_dev *rt2x00dev = eeprom->data; + u32 reg = 0; + + rt2x00_set_field32(®, E2PROM_CSR_DATA_IN, !!eeprom->reg_data_in); + rt2x00_set_field32(®, E2PROM_CSR_DATA_OUT, !!eeprom->reg_data_out); + rt2x00_set_field32(®, E2PROM_CSR_DATA_CLOCK, + !!eeprom->reg_data_clock); + rt2x00_set_field32(®, E2PROM_CSR_CHIP_SELECT, + !!eeprom->reg_chip_select); + + rt2800_register_write(rt2x00dev, E2PROM_CSR, reg); +} + +static void rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev) +{ + struct eeprom_93cx6 eeprom; + u32 reg; + + rt2800_register_read(rt2x00dev, E2PROM_CSR, ®); + + eeprom.data = rt2x00dev; + eeprom.register_read = rt2800pci_eepromregister_read; + eeprom.register_write = rt2800pci_eepromregister_write; + eeprom.width = !rt2x00_get_field32(reg, E2PROM_CSR_TYPE) ? + PCI_EEPROM_WIDTH_93C46 : PCI_EEPROM_WIDTH_93C66; + eeprom.reg_data_in = 0; + eeprom.reg_data_out = 0; + eeprom.reg_data_clock = 0; + eeprom.reg_chip_select = 0; + + eeprom_93cx6_multiread(&eeprom, EEPROM_BASE, rt2x00dev->eeprom, + EEPROM_SIZE / sizeof(u16)); +} + +static int rt2800pci_efuse_detect(struct rt2x00_dev *rt2x00dev) +{ + return rt2800_efuse_detect(rt2x00dev); +} + +static inline void rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev) +{ + rt2800_read_eeprom_efuse(rt2x00dev); +} +#else +static inline void rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev) +{ +} + +static inline int rt2800pci_efuse_detect(struct rt2x00_dev *rt2x00dev) +{ + return 0; +} + +static inline void rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev) +{ +} +#endif /* CONFIG_RT2800PCI_PCI */ + +/* + * Firmware functions + */ +static char *rt2800pci_get_firmware_name(struct rt2x00_dev *rt2x00dev) +{ + return FIRMWARE_RT2860; +} + +static int rt2800pci_check_firmware(struct rt2x00_dev *rt2x00dev, + const u8 *data, const size_t len) +{ + u16 fw_crc; + u16 crc; + + /* + * Only support 8kb firmware files. + */ + if (len != 8192) + return FW_BAD_LENGTH; + + /* + * The last 2 bytes in the firmware array are the crc checksum itself, + * this means that we should never pass those 2 bytes to the crc + * algorithm. + */ + fw_crc = (data[len - 2] << 8 | data[len - 1]); + + /* + * Use the crc ccitt algorithm. + * This will return the same value as the legacy driver which + * used bit ordering reversion on the both the firmware bytes + * before input input as well as on the final output. + * Obviously using crc ccitt directly is much more efficient. + */ + crc = crc_ccitt(~0, data, len - 2); + + /* + * There is a small difference between the crc-itu-t + bitrev and + * the crc-ccitt crc calculation. In the latter method the 2 bytes + * will be swapped, use swab16 to convert the crc to the correct + * value. + */ + crc = swab16(crc); + + return (fw_crc == crc) ? FW_OK : FW_BAD_CRC; +} + +static int rt2800pci_load_firmware(struct rt2x00_dev *rt2x00dev, + const u8 *data, const size_t len) +{ + unsigned int i; + u32 reg; + + /* + * Wait for stable hardware. + */ + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2800_register_read(rt2x00dev, MAC_CSR0, ®); + if (reg && reg != ~0) + break; + msleep(1); + } + + if (i == REGISTER_BUSY_COUNT) { + ERROR(rt2x00dev, "Unstable hardware.\n"); + return -EBUSY; + } + + rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000002); + rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, 0x00000000); + + /* + * Disable DMA, will be reenabled later when enabling + * the radio. + */ + rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, ®); + rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0); + rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_DMA_BUSY, 0); + rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0); + rt2x00_set_field32(®, WPDMA_GLO_CFG_RX_DMA_BUSY, 0); + rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1); + rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg); + + /* + * enable Host program ram write selection + */ + reg = 0; + rt2x00_set_field32(®, PBF_SYS_CTRL_HOST_RAM_WRITE, 1); + rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, reg); + + /* + * Write firmware to device. + */ + rt2800_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE, + data, len); + + rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000); + rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001); + + /* + * Wait for device to stabilize. + */ + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, ®); + if (rt2x00_get_field32(reg, PBF_SYS_CTRL_READY)) + break; + msleep(1); + } + + if (i == REGISTER_BUSY_COUNT) { + ERROR(rt2x00dev, "PBF system register not ready.\n"); + return -EBUSY; + } + + /* + * Disable interrupts + */ + rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_IRQ_OFF); + + /* + * Initialize BBP R/W access agent + */ + rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0); + rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0); + + return 0; +} + +/* + * Initialization functions. + */ +static bool rt2800pci_get_entry_state(struct queue_entry *entry) +{ + struct queue_entry_priv_pci *entry_priv = entry->priv_data; + u32 word; + + if (entry->queue->qid == QID_RX) { + rt2x00_desc_read(entry_priv->desc, 1, &word); + + return (!rt2x00_get_field32(word, RXD_W1_DMA_DONE)); + } else { + rt2x00_desc_read(entry_priv->desc, 1, &word); + + return (!rt2x00_get_field32(word, TXD_W1_DMA_DONE)); + } +} + +static void rt2800pci_clear_entry(struct queue_entry *entry) +{ + struct queue_entry_priv_pci *entry_priv = entry->priv_data; + struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); + u32 word; + + if (entry->queue->qid == QID_RX) { + rt2x00_desc_read(entry_priv->desc, 0, &word); + rt2x00_set_field32(&word, RXD_W0_SDP0, skbdesc->skb_dma); + rt2x00_desc_write(entry_priv->desc, 0, word); + + rt2x00_desc_read(entry_priv->desc, 1, &word); + rt2x00_set_field32(&word, RXD_W1_DMA_DONE, 0); + rt2x00_desc_write(entry_priv->desc, 1, word); + } else { + rt2x00_desc_read(entry_priv->desc, 1, &word); + rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 1); + rt2x00_desc_write(entry_priv->desc, 1, word); + } +} + +static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev) +{ + struct queue_entry_priv_pci *entry_priv; + u32 reg; + + rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, ®); + rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX0, 1); + rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX1, 1); + rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX2, 1); + rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX3, 1); + rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX4, 1); + rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX5, 1); + rt2x00_set_field32(®, WPDMA_RST_IDX_DRX_IDX0, 1); + rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg); + + rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f); + rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00); + + /* + * Initialize registers. + */ + entry_priv = rt2x00dev->tx[0].entries[0].priv_data; + rt2800_register_write(rt2x00dev, TX_BASE_PTR0, entry_priv->desc_dma); + rt2800_register_write(rt2x00dev, TX_MAX_CNT0, rt2x00dev->tx[0].limit); + rt2800_register_write(rt2x00dev, TX_CTX_IDX0, 0); + rt2800_register_write(rt2x00dev, TX_DTX_IDX0, 0); + + entry_priv = rt2x00dev->tx[1].entries[0].priv_data; + rt2800_register_write(rt2x00dev, TX_BASE_PTR1, entry_priv->desc_dma); + rt2800_register_write(rt2x00dev, TX_MAX_CNT1, rt2x00dev->tx[1].limit); + rt2800_register_write(rt2x00dev, TX_CTX_IDX1, 0); + rt2800_register_write(rt2x00dev, TX_DTX_IDX1, 0); + + entry_priv = rt2x00dev->tx[2].entries[0].priv_data; + rt2800_register_write(rt2x00dev, TX_BASE_PTR2, entry_priv->desc_dma); + rt2800_register_write(rt2x00dev, TX_MAX_CNT2, rt2x00dev->tx[2].limit); + rt2800_register_write(rt2x00dev, TX_CTX_IDX2, 0); + rt2800_register_write(rt2x00dev, TX_DTX_IDX2, 0); + + entry_priv = rt2x00dev->tx[3].entries[0].priv_data; + rt2800_register_write(rt2x00dev, TX_BASE_PTR3, entry_priv->desc_dma); + rt2800_register_write(rt2x00dev, TX_MAX_CNT3, rt2x00dev->tx[3].limit); + rt2800_register_write(rt2x00dev, TX_CTX_IDX3, 0); + rt2800_register_write(rt2x00dev, TX_DTX_IDX3, 0); + + entry_priv = rt2x00dev->rx->entries[0].priv_data; + rt2800_register_write(rt2x00dev, RX_BASE_PTR, entry_priv->desc_dma); + rt2800_register_write(rt2x00dev, RX_MAX_CNT, rt2x00dev->rx[0].limit); + rt2800_register_write(rt2x00dev, RX_CRX_IDX, rt2x00dev->rx[0].limit - 1); + rt2800_register_write(rt2x00dev, RX_DRX_IDX, 0); + + /* + * Enable global DMA configuration + */ + rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, ®); + rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0); + rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0); + rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1); + rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg); + + rt2800_register_write(rt2x00dev, DELAY_INT_CFG, 0); + + return 0; +} + +/* + * Device state switch handlers. + */ +static void rt2800pci_toggle_rx(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + u32 reg; + + rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, ®); + rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_RX, + (state == STATE_RADIO_RX_ON) || + (state == STATE_RADIO_RX_ON_LINK)); + rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg); +} + +static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + int mask = (state == STATE_RADIO_IRQ_ON); + u32 reg; + + /* + * When interrupts are being enabled, the interrupt registers + * should clear the register to assure a clean state. + */ + if (state == STATE_RADIO_IRQ_ON) { + rt2800_register_read(rt2x00dev, INT_SOURCE_CSR, ®); + rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, reg); + } + + rt2800_register_read(rt2x00dev, INT_MASK_CSR, ®); + rt2x00_set_field32(®, INT_MASK_CSR_RXDELAYINT, mask); + rt2x00_set_field32(®, INT_MASK_CSR_TXDELAYINT, mask); + rt2x00_set_field32(®, INT_MASK_CSR_RX_DONE, mask); + rt2x00_set_field32(®, INT_MASK_CSR_AC0_DMA_DONE, mask); + rt2x00_set_field32(®, INT_MASK_CSR_AC1_DMA_DONE, mask); + rt2x00_set_field32(®, INT_MASK_CSR_AC2_DMA_DONE, mask); + rt2x00_set_field32(®, INT_MASK_CSR_AC3_DMA_DONE, mask); + rt2x00_set_field32(®, INT_MASK_CSR_HCCA_DMA_DONE, mask); + rt2x00_set_field32(®, INT_MASK_CSR_MGMT_DMA_DONE, mask); + rt2x00_set_field32(®, INT_MASK_CSR_MCU_COMMAND, mask); + rt2x00_set_field32(®, INT_MASK_CSR_RXTX_COHERENT, mask); + rt2x00_set_field32(®, INT_MASK_CSR_TBTT, mask); + rt2x00_set_field32(®, INT_MASK_CSR_PRE_TBTT, mask); + rt2x00_set_field32(®, INT_MASK_CSR_TX_FIFO_STATUS, mask); + rt2x00_set_field32(®, INT_MASK_CSR_AUTO_WAKEUP, mask); + rt2x00_set_field32(®, INT_MASK_CSR_GPTIMER, mask); + rt2x00_set_field32(®, INT_MASK_CSR_RX_COHERENT, mask); + rt2x00_set_field32(®, INT_MASK_CSR_TX_COHERENT, mask); + rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg); +} + +static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + u16 word; + + /* + * Initialize all registers. + */ + if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) || + rt2800pci_init_queues(rt2x00dev) || + rt2800_init_registers(rt2x00dev) || + rt2800_wait_wpdma_ready(rt2x00dev) || + rt2800_init_bbp(rt2x00dev) || + rt2800_init_rfcsr(rt2x00dev))) + return -EIO; + + /* + * Send signal to firmware during boot time. + */ + rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0xff, 0, 0); + + /* + * Enable RX. + */ + rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, ®); + rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_TX, 1); + rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_RX, 0); + rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg); + + rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, ®); + rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_TX_DMA, 1); + rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_RX_DMA, 1); + rt2x00_set_field32(®, WPDMA_GLO_CFG_WP_DMA_BURST_SIZE, 2); + rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1); + rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg); + + rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, ®); + rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_TX, 1); + rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_RX, 1); + rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg); + + /* + * Initialize LED control + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_LED1, &word); + rt2800_mcu_request(rt2x00dev, MCU_LED_1, 0xff, + word & 0xff, (word >> 8) & 0xff); + + rt2x00_eeprom_read(rt2x00dev, EEPROM_LED2, &word); + rt2800_mcu_request(rt2x00dev, MCU_LED_2, 0xff, + word & 0xff, (word >> 8) & 0xff); + + rt2x00_eeprom_read(rt2x00dev, EEPROM_LED3, &word); + rt2800_mcu_request(rt2x00dev, MCU_LED_3, 0xff, + word & 0xff, (word >> 8) & 0xff); + + return 0; +} + +static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, ®); + rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0); + rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_DMA_BUSY, 0); + rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0); + rt2x00_set_field32(®, WPDMA_GLO_CFG_RX_DMA_BUSY, 0); + rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1); + rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg); + + rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0); + rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0); + rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0); + + rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001280); + + rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, ®); + rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX0, 1); + rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX1, 1); + rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX2, 1); + rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX3, 1); + rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX4, 1); + rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX5, 1); + rt2x00_set_field32(®, WPDMA_RST_IDX_DRX_IDX0, 1); + rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg); + + rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f); + rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00); + + /* Wait for DMA, ignore error */ + rt2800_wait_wpdma_ready(rt2x00dev); +} + +static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + /* + * Always put the device to sleep (even when we intend to wakeup!) + * if the device is booting and wasn't asleep it will return + * failure when attempting to wakeup. + */ + rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0, 2); + + if (state == STATE_AWAKE) { + rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0); + rt2800pci_mcu_status(rt2x00dev, TOKEN_WAKUP); + } + + return 0; +} + +static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + int retval = 0; + + switch (state) { + case STATE_RADIO_ON: + /* + * Before the radio can be enabled, the device first has + * to be woken up. After that it needs a bit of time + * to be fully awake and then the radio can be enabled. + */ + rt2800pci_set_state(rt2x00dev, STATE_AWAKE); + msleep(1); + retval = rt2800pci_enable_radio(rt2x00dev); + break; + case STATE_RADIO_OFF: + /* + * After the radio has been disabled, the device should + * be put to sleep for powersaving. + */ + rt2800pci_disable_radio(rt2x00dev); + rt2800pci_set_state(rt2x00dev, STATE_SLEEP); + break; + case STATE_RADIO_RX_ON: + case STATE_RADIO_RX_ON_LINK: + case STATE_RADIO_RX_OFF: + case STATE_RADIO_RX_OFF_LINK: + rt2800pci_toggle_rx(rt2x00dev, state); + break; + case STATE_RADIO_IRQ_ON: + case STATE_RADIO_IRQ_OFF: + rt2800pci_toggle_irq(rt2x00dev, state); + break; + case STATE_DEEP_SLEEP: + case STATE_SLEEP: + case STATE_STANDBY: + case STATE_AWAKE: + retval = rt2800pci_set_state(rt2x00dev, state); + break; + default: + retval = -ENOTSUPP; + break; + } + + if (unlikely(retval)) + ERROR(rt2x00dev, "Device failed to enter state %d (%d).\n", + state, retval); + + return retval; +} + +/* + * TX descriptor initialization + */ +static void rt2800pci_write_tx_desc(struct rt2x00_dev *rt2x00dev, + struct sk_buff *skb, + struct txentry_desc *txdesc) +{ + struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); + __le32 *txd = skbdesc->desc; + __le32 *txwi = (__le32 *)(skb->data - rt2x00dev->ops->extra_tx_headroom); + u32 word; + + /* + * Initialize TX Info descriptor + */ + rt2x00_desc_read(txwi, 0, &word); + rt2x00_set_field32(&word, TXWI_W0_FRAG, + test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags)); + rt2x00_set_field32(&word, TXWI_W0_MIMO_PS, 0); + rt2x00_set_field32(&word, TXWI_W0_CF_ACK, 0); + rt2x00_set_field32(&word, TXWI_W0_TS, + test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags)); + rt2x00_set_field32(&word, TXWI_W0_AMPDU, + test_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags)); + rt2x00_set_field32(&word, TXWI_W0_MPDU_DENSITY, txdesc->mpdu_density); + rt2x00_set_field32(&word, TXWI_W0_TX_OP, txdesc->ifs); + rt2x00_set_field32(&word, TXWI_W0_MCS, txdesc->mcs); + rt2x00_set_field32(&word, TXWI_W0_BW, + test_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags)); + rt2x00_set_field32(&word, TXWI_W0_SHORT_GI, + test_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags)); + rt2x00_set_field32(&word, TXWI_W0_STBC, txdesc->stbc); + rt2x00_set_field32(&word, TXWI_W0_PHYMODE, txdesc->rate_mode); + rt2x00_desc_write(txwi, 0, word); + + rt2x00_desc_read(txwi, 1, &word); + rt2x00_set_field32(&word, TXWI_W1_ACK, + test_bit(ENTRY_TXD_ACK, &txdesc->flags)); + rt2x00_set_field32(&word, TXWI_W1_NSEQ, + test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags)); + rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->ba_size); + rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID, + test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ? + txdesc->key_idx : 0xff); + rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT, + skb->len - txdesc->l2pad); + rt2x00_set_field32(&word, TXWI_W1_PACKETID, + skbdesc->entry->queue->qid + 1); + rt2x00_desc_write(txwi, 1, word); + + /* + * Always write 0 to IV/EIV fields, hardware will insert the IV + * from the IVEIV register when TXD_W3_WIV is set to 0. + * When TXD_W3_WIV is set to 1 it will use the IV data + * from the descriptor. The TXWI_W1_WIRELESS_CLI_ID indicates which + * crypto entry in the registers should be used to encrypt the frame. + */ + _rt2x00_desc_write(txwi, 2, 0 /* skbdesc->iv[0] */); + _rt2x00_desc_write(txwi, 3, 0 /* skbdesc->iv[1] */); + + /* + * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1 + * must contains a TXWI structure + 802.11 header + padding + 802.11 + * data. We choose to have SD_PTR0/SD_LEN0 only contains TXWI and + * SD_PTR1/SD_LEN1 contains 802.11 header + padding + 802.11 + * data. It means that LAST_SEC0 is always 0. + */ + + /* + * Initialize TX descriptor + */ + rt2x00_desc_read(txd, 0, &word); + rt2x00_set_field32(&word, TXD_W0_SD_PTR0, skbdesc->skb_dma); + rt2x00_desc_write(txd, 0, word); + + rt2x00_desc_read(txd, 1, &word); + rt2x00_set_field32(&word, TXD_W1_SD_LEN1, skb->len); + rt2x00_set_field32(&word, TXD_W1_LAST_SEC1, + !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags)); + rt2x00_set_field32(&word, TXD_W1_BURST, + test_bit(ENTRY_TXD_BURST, &txdesc->flags)); + rt2x00_set_field32(&word, TXD_W1_SD_LEN0, + rt2x00dev->ops->extra_tx_headroom); + rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0); + rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0); + rt2x00_desc_write(txd, 1, word); + + rt2x00_desc_read(txd, 2, &word); + rt2x00_set_field32(&word, TXD_W2_SD_PTR1, + skbdesc->skb_dma + rt2x00dev->ops->extra_tx_headroom); + rt2x00_desc_write(txd, 2, word); + + rt2x00_desc_read(txd, 3, &word); + rt2x00_set_field32(&word, TXD_W3_WIV, + !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags)); + rt2x00_set_field32(&word, TXD_W3_QSEL, 2); + rt2x00_desc_write(txd, 3, word); +} + +/* + * TX data initialization + */ +static void rt2800pci_write_beacon(struct queue_entry *entry) +{ + struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; + struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); + unsigned int beacon_base; + u32 reg; + + /* + * Disable beaconing while we are reloading the beacon data, + * otherwise we might be sending out invalid data. + */ + rt2800_register_read(rt2x00dev, BCN_TIME_CFG, ®); + rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_GEN, 0); + rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); + + /* + * Write entire beacon with descriptor to register. + */ + beacon_base = HW_BEACON_OFFSET(entry->entry_idx); + rt2800_register_multiwrite(rt2x00dev, + beacon_base, + skbdesc->desc, skbdesc->desc_len); + rt2800_register_multiwrite(rt2x00dev, + beacon_base + skbdesc->desc_len, + entry->skb->data, entry->skb->len); + + /* + * Clean up beacon skb. + */ + dev_kfree_skb_any(entry->skb); + entry->skb = NULL; +} + +static void rt2800pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev, + const enum data_queue_qid queue_idx) +{ + struct data_queue *queue; + unsigned int idx, qidx = 0; + u32 reg; + + if (queue_idx == QID_BEACON) { + rt2800_register_read(rt2x00dev, BCN_TIME_CFG, ®); + if (!rt2x00_get_field32(reg, BCN_TIME_CFG_BEACON_GEN)) { + rt2x00_set_field32(®, BCN_TIME_CFG_TSF_TICKING, 1); + rt2x00_set_field32(®, BCN_TIME_CFG_TBTT_ENABLE, 1); + rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_GEN, 1); + rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); + } + return; + } + + if (queue_idx > QID_HCCA && queue_idx != QID_MGMT) + return; + + queue = rt2x00queue_get_queue(rt2x00dev, queue_idx); + idx = queue->index[Q_INDEX]; + + if (queue_idx == QID_MGMT) + qidx = 5; + else + qidx = queue_idx; + + rt2800_register_write(rt2x00dev, TX_CTX_IDX(qidx), idx); +} + +static void rt2800pci_kill_tx_queue(struct rt2x00_dev *rt2x00dev, + const enum data_queue_qid qid) +{ + u32 reg; + + if (qid == QID_BEACON) { + rt2800_register_write(rt2x00dev, BCN_TIME_CFG, 0); + return; + } + + rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, ®); + rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX0, (qid == QID_AC_BE)); + rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX1, (qid == QID_AC_BK)); + rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX2, (qid == QID_AC_VI)); + rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX3, (qid == QID_AC_VO)); + rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg); +} + +/* + * RX control handlers + */ +static void rt2800pci_fill_rxdone(struct queue_entry *entry, + struct rxdone_entry_desc *rxdesc) +{ + struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; + struct queue_entry_priv_pci *entry_priv = entry->priv_data; + __le32 *rxd = entry_priv->desc; + __le32 *rxwi = (__le32 *)entry->skb->data; + u32 rxd3; + u32 rxwi0; + u32 rxwi1; + u32 rxwi2; + u32 rxwi3; + + rt2x00_desc_read(rxd, 3, &rxd3); + rt2x00_desc_read(rxwi, 0, &rxwi0); + rt2x00_desc_read(rxwi, 1, &rxwi1); + rt2x00_desc_read(rxwi, 2, &rxwi2); + rt2x00_desc_read(rxwi, 3, &rxwi3); + + if (rt2x00_get_field32(rxd3, RXD_W3_CRC_ERROR)) + rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; + + if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) { + /* + * Unfortunately we don't know the cipher type used during + * decryption. This prevents us from correct providing + * correct statistics through debugfs. + */ + rxdesc->cipher = rt2x00_get_field32(rxwi0, RXWI_W0_UDF); + rxdesc->cipher_status = + rt2x00_get_field32(rxd3, RXD_W3_CIPHER_ERROR); + } + + if (rt2x00_get_field32(rxd3, RXD_W3_DECRYPTED)) { + /* + * Hardware has stripped IV/EIV data from 802.11 frame during + * decryption. Unfortunately the descriptor doesn't contain + * any fields with the EIV/IV data either, so they can't + * be restored by rt2x00lib. + */ + rxdesc->flags |= RX_FLAG_IV_STRIPPED; + + if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) + rxdesc->flags |= RX_FLAG_DECRYPTED; + else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) + rxdesc->flags |= RX_FLAG_MMIC_ERROR; + } + + if (rt2x00_get_field32(rxd3, RXD_W3_MY_BSS)) + rxdesc->dev_flags |= RXDONE_MY_BSS; + + if (rt2x00_get_field32(rxd3, RXD_W3_L2PAD)) + rxdesc->dev_flags |= RXDONE_L2PAD; + + if (rt2x00_get_field32(rxwi1, RXWI_W1_SHORT_GI)) + rxdesc->flags |= RX_FLAG_SHORT_GI; + + if (rt2x00_get_field32(rxwi1, RXWI_W1_BW)) + rxdesc->flags |= RX_FLAG_40MHZ; + + /* + * Detect RX rate, always use MCS as signal type. + */ + rxdesc->dev_flags |= RXDONE_SIGNAL_MCS; + rxdesc->rate_mode = rt2x00_get_field32(rxwi1, RXWI_W1_PHYMODE); + rxdesc->signal = rt2x00_get_field32(rxwi1, RXWI_W1_MCS); + + /* + * Mask of 0x8 bit to remove the short preamble flag. + */ + if (rxdesc->rate_mode == RATE_MODE_CCK) + rxdesc->signal &= ~0x8; + + rxdesc->rssi = + (rt2x00_get_field32(rxwi2, RXWI_W2_RSSI0) + + rt2x00_get_field32(rxwi2, RXWI_W2_RSSI1)) / 2; + + rxdesc->noise = + (rt2x00_get_field32(rxwi3, RXWI_W3_SNR0) + + rt2x00_get_field32(rxwi3, RXWI_W3_SNR1)) / 2; + + rxdesc->size = rt2x00_get_field32(rxwi0, RXWI_W0_MPDU_TOTAL_BYTE_COUNT); + + /* + * Set RX IDX in register to inform hardware that we have handled + * this entry and it is available for reuse again. + */ + rt2800_register_write(rt2x00dev, RX_CRX_IDX, entry->entry_idx); + + /* + * Remove TXWI descriptor from start of buffer. + */ + skb_pull(entry->skb, RXWI_DESC_SIZE); +} + +/* + * Interrupt functions. + */ +static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev) +{ + struct data_queue *queue; + struct queue_entry *entry; + struct queue_entry *entry_done; + struct queue_entry_priv_pci *entry_priv; + struct txdone_entry_desc txdesc; + u32 word; + u32 reg; + u32 old_reg; + unsigned int type; + unsigned int index; + u16 mcs, real_mcs; + + /* + * During each loop we will compare the freshly read + * TX_STA_FIFO register value with the value read from + * the previous loop. If the 2 values are equal then + * we should stop processing because the chance it + * quite big that the device has been unplugged and + * we risk going into an endless loop. + */ + old_reg = 0; + + while (1) { + rt2800_register_read(rt2x00dev, TX_STA_FIFO, ®); + if (!rt2x00_get_field32(reg, TX_STA_FIFO_VALID)) + break; + + if (old_reg == reg) + break; + old_reg = reg; + + /* + * Skip this entry when it contains an invalid + * queue identication number. + */ + type = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE) - 1; + if (type >= QID_RX) + continue; + + queue = rt2x00queue_get_queue(rt2x00dev, type); + if (unlikely(!queue)) + continue; + + /* + * Skip this entry when it contains an invalid + * index number. + */ + index = rt2x00_get_field32(reg, TX_STA_FIFO_WCID) - 1; + if (unlikely(index >= queue->limit)) + continue; + + entry = &queue->entries[index]; + entry_priv = entry->priv_data; + rt2x00_desc_read((__le32 *)entry->skb->data, 0, &word); + + entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE); + while (entry != entry_done) { + /* + * Catch up. + * Just report any entries we missed as failed. + */ + WARNING(rt2x00dev, + "TX status report missed for entry %d\n", + entry_done->entry_idx); + + txdesc.flags = 0; + __set_bit(TXDONE_UNKNOWN, &txdesc.flags); + txdesc.retry = 0; + + rt2x00lib_txdone(entry_done, &txdesc); + entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE); + } + + /* + * Obtain the status about this packet. + */ + txdesc.flags = 0; + if (rt2x00_get_field32(reg, TX_STA_FIFO_TX_SUCCESS)) + __set_bit(TXDONE_SUCCESS, &txdesc.flags); + else + __set_bit(TXDONE_FAILURE, &txdesc.flags); + + /* + * Ralink has a retry mechanism using a global fallback + * table. We setup this fallback table to try immediate + * lower rate for all rates. In the TX_STA_FIFO, + * the MCS field contains the MCS used for the successfull + * transmission. If the first transmission succeed, + * we have mcs == tx_mcs. On the second transmission, + * we have mcs = tx_mcs - 1. So the number of + * retry is (tx_mcs - mcs). + */ + mcs = rt2x00_get_field32(word, TXWI_W0_MCS); + real_mcs = rt2x00_get_field32(reg, TX_STA_FIFO_MCS); + __set_bit(TXDONE_FALLBACK, &txdesc.flags); + txdesc.retry = mcs - min(mcs, real_mcs); + + rt2x00lib_txdone(entry, &txdesc); + } +} + +static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance) +{ + struct rt2x00_dev *rt2x00dev = dev_instance; + u32 reg; + + /* Read status and ACK all interrupts */ + rt2800_register_read(rt2x00dev, INT_SOURCE_CSR, ®); + rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, reg); + + if (!reg) + return IRQ_NONE; + + if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) + return IRQ_HANDLED; + + /* + * 1 - Rx ring done interrupt. + */ + if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE)) + rt2x00pci_rxdone(rt2x00dev); + + if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) + rt2800pci_txdone(rt2x00dev); + + return IRQ_HANDLED; +} + +/* + * Device probe functions. + */ +static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev) +{ + /* + * Read EEPROM into buffer + */ + if (rt2x00_is_soc(rt2x00dev)) + rt2800pci_read_eeprom_soc(rt2x00dev); + else if (rt2800pci_efuse_detect(rt2x00dev)) + rt2800pci_read_eeprom_efuse(rt2x00dev); + else + rt2800pci_read_eeprom_pci(rt2x00dev); + + return rt2800_validate_eeprom(rt2x00dev); +} + +static const struct rt2800_ops rt2800pci_rt2800_ops = { + .register_read = rt2x00pci_register_read, + .register_read_lock = rt2x00pci_register_read, /* same for PCI */ + .register_write = rt2x00pci_register_write, + .register_write_lock = rt2x00pci_register_write, /* same for PCI */ + + .register_multiread = rt2x00pci_register_multiread, + .register_multiwrite = rt2x00pci_register_multiwrite, + + .regbusy_read = rt2x00pci_regbusy_read, +}; + +static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev) +{ + int retval; + + rt2x00dev->priv = (void *)&rt2800pci_rt2800_ops; + + /* + * Allocate eeprom data. + */ + retval = rt2800pci_validate_eeprom(rt2x00dev); + if (retval) + return retval; + + retval = rt2800_init_eeprom(rt2x00dev); + if (retval) + return retval; + + /* + * Initialize hw specifications. + */ + retval = rt2800_probe_hw_mode(rt2x00dev); + if (retval) + return retval; + + /* + * This device has multiple filters for control frames + * and has a separate filter for PS Poll frames. + */ + __set_bit(DRIVER_SUPPORT_CONTROL_FILTERS, &rt2x00dev->flags); + __set_bit(DRIVER_SUPPORT_CONTROL_FILTER_PSPOLL, &rt2x00dev->flags); + + /* + * This device requires firmware. + */ + if (!rt2x00_is_soc(rt2x00dev)) + __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags); + __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags); + __set_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags); + if (!modparam_nohwcrypt) + __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags); + + /* + * Set the rssi offset. + */ + rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET; + + return 0; +} + +static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = { + .irq_handler = rt2800pci_interrupt, + .probe_hw = rt2800pci_probe_hw, + .get_firmware_name = rt2800pci_get_firmware_name, + .check_firmware = rt2800pci_check_firmware, + .load_firmware = rt2800pci_load_firmware, + .initialize = rt2x00pci_initialize, + .uninitialize = rt2x00pci_uninitialize, + .get_entry_state = rt2800pci_get_entry_state, + .clear_entry = rt2800pci_clear_entry, + .set_device_state = rt2800pci_set_device_state, + .rfkill_poll = rt2800_rfkill_poll, + .link_stats = rt2800_link_stats, + .reset_tuner = rt2800_reset_tuner, + .link_tuner = rt2800_link_tuner, + .write_tx_desc = rt2800pci_write_tx_desc, + .write_tx_data = rt2x00pci_write_tx_data, + .write_beacon = rt2800pci_write_beacon, + .kick_tx_queue = rt2800pci_kick_tx_queue, + .kill_tx_queue = rt2800pci_kill_tx_queue, + .fill_rxdone = rt2800pci_fill_rxdone, + .config_shared_key = rt2800_config_shared_key, + .config_pairwise_key = rt2800_config_pairwise_key, + .config_filter = rt2800_config_filter, + .config_intf = rt2800_config_intf, + .config_erp = rt2800_config_erp, + .config_ant = rt2800_config_ant, + .config = rt2800_config, +}; + +static const struct data_queue_desc rt2800pci_queue_rx = { + .entry_num = RX_ENTRIES, + .data_size = AGGREGATION_SIZE, + .desc_size = RXD_DESC_SIZE, + .priv_size = sizeof(struct queue_entry_priv_pci), +}; + +static const struct data_queue_desc rt2800pci_queue_tx = { + .entry_num = TX_ENTRIES, + .data_size = AGGREGATION_SIZE, + .desc_size = TXD_DESC_SIZE, + .priv_size = sizeof(struct queue_entry_priv_pci), +}; + +static const struct data_queue_desc rt2800pci_queue_bcn = { + .entry_num = 8 * BEACON_ENTRIES, + .data_size = 0, /* No DMA required for beacons */ + .desc_size = TXWI_DESC_SIZE, + .priv_size = sizeof(struct queue_entry_priv_pci), +}; + +static const struct rt2x00_ops rt2800pci_ops = { + .name = KBUILD_MODNAME, + .max_sta_intf = 1, + .max_ap_intf = 8, + .eeprom_size = EEPROM_SIZE, + .rf_size = RF_SIZE, + .tx_queues = NUM_TX_QUEUES, + .extra_tx_headroom = TXWI_DESC_SIZE, + .rx = &rt2800pci_queue_rx, + .tx = &rt2800pci_queue_tx, + .bcn = &rt2800pci_queue_bcn, + .lib = &rt2800pci_rt2x00_ops, + .hw = &rt2800_mac80211_ops, +#ifdef CONFIG_RT2X00_LIB_DEBUGFS + .debugfs = &rt2800_rt2x00debug, +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ +}; + +/* + * RT2800pci module information. + */ +static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = { + { PCI_DEVICE(0x1814, 0x0601), PCI_DEVICE_DATA(&rt2800pci_ops) }, + { PCI_DEVICE(0x1814, 0x0681), PCI_DEVICE_DATA(&rt2800pci_ops) }, + { PCI_DEVICE(0x1814, 0x0701), PCI_DEVICE_DATA(&rt2800pci_ops) }, + { PCI_DEVICE(0x1814, 0x0781), PCI_DEVICE_DATA(&rt2800pci_ops) }, + { PCI_DEVICE(0x1432, 0x7708), PCI_DEVICE_DATA(&rt2800pci_ops) }, + { PCI_DEVICE(0x1432, 0x7727), PCI_DEVICE_DATA(&rt2800pci_ops) }, + { PCI_DEVICE(0x1432, 0x7728), PCI_DEVICE_DATA(&rt2800pci_ops) }, + { PCI_DEVICE(0x1432, 0x7738), PCI_DEVICE_DATA(&rt2800pci_ops) }, + { PCI_DEVICE(0x1432, 0x7748), PCI_DEVICE_DATA(&rt2800pci_ops) }, + { PCI_DEVICE(0x1432, 0x7758), PCI_DEVICE_DATA(&rt2800pci_ops) }, + { PCI_DEVICE(0x1432, 0x7768), PCI_DEVICE_DATA(&rt2800pci_ops) }, + { PCI_DEVICE(0x1a3b, 0x1059), PCI_DEVICE_DATA(&rt2800pci_ops) }, +#ifdef CONFIG_RT2800PCI_RT30XX + { PCI_DEVICE(0x1814, 0x3090), PCI_DEVICE_DATA(&rt2800pci_ops) }, + { PCI_DEVICE(0x1814, 0x3091), PCI_DEVICE_DATA(&rt2800pci_ops) }, + { PCI_DEVICE(0x1814, 0x3092), PCI_DEVICE_DATA(&rt2800pci_ops) }, + { PCI_DEVICE(0x1462, 0x891a), PCI_DEVICE_DATA(&rt2800pci_ops) }, +#endif +#ifdef CONFIG_RT2800PCI_RT35XX + { PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) }, + { PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) }, + { PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) }, + { PCI_DEVICE(0x1814, 0x3592), PCI_DEVICE_DATA(&rt2800pci_ops) }, +#endif + { 0, } +}; + +MODULE_AUTHOR(DRV_PROJECT); +MODULE_VERSION(DRV_VERSION); +MODULE_DESCRIPTION("Ralink RT2800 PCI & PCMCIA Wireless LAN driver."); +MODULE_SUPPORTED_DEVICE("Ralink RT2860 PCI & PCMCIA chipset based cards"); +#ifdef CONFIG_RT2800PCI_PCI +MODULE_FIRMWARE(FIRMWARE_RT2860); +MODULE_DEVICE_TABLE(pci, rt2800pci_device_table); +#endif /* CONFIG_RT2800PCI_PCI */ +MODULE_LICENSE("GPL"); + +#ifdef CONFIG_RT2800PCI_SOC +static int rt2800soc_probe(struct platform_device *pdev) +{ + return rt2x00soc_probe(pdev, &rt2800pci_ops); +} + +static struct platform_driver rt2800soc_driver = { + .driver = { + .name = "rt2800_wmac", + .owner = THIS_MODULE, + .mod_name = KBUILD_MODNAME, + }, + .probe = rt2800soc_probe, + .remove = __devexit_p(rt2x00soc_remove), + .suspend = rt2x00soc_suspend, + .resume = rt2x00soc_resume, +}; +#endif /* CONFIG_RT2800PCI_SOC */ + +#ifdef CONFIG_RT2800PCI_PCI +static struct pci_driver rt2800pci_driver = { + .name = KBUILD_MODNAME, + .id_table = rt2800pci_device_table, + .probe = rt2x00pci_probe, + .remove = __devexit_p(rt2x00pci_remove), + .suspend = rt2x00pci_suspend, + .resume = rt2x00pci_resume, +}; +#endif /* CONFIG_RT2800PCI_PCI */ + +static int __init rt2800pci_init(void) +{ + int ret = 0; + +#ifdef CONFIG_RT2800PCI_SOC + ret = platform_driver_register(&rt2800soc_driver); + if (ret) + return ret; +#endif +#ifdef CONFIG_RT2800PCI_PCI + ret = pci_register_driver(&rt2800pci_driver); + if (ret) { +#ifdef CONFIG_RT2800PCI_SOC + platform_driver_unregister(&rt2800soc_driver); +#endif + return ret; + } +#endif + + return ret; +} + +static void __exit rt2800pci_exit(void) +{ +#ifdef CONFIG_RT2800PCI_PCI + pci_unregister_driver(&rt2800pci_driver); +#endif +#ifdef CONFIG_RT2800PCI_SOC + platform_driver_unregister(&rt2800soc_driver); +#endif +} + +module_init(rt2800pci_init); +module_exit(rt2800pci_exit); diff --git a/drivers/net/wireless/rt2x00/rt2800pci.h b/drivers/net/wireless/rt2x00/rt2800pci.h new file mode 100644 index 0000000..afc8e7d --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2800pci.h @@ -0,0 +1,159 @@ +/* + Copyright (C) 2009 Ivo van Doorn + Copyright (C) 2009 Alban Browaeys + Copyright (C) 2009 Felix Fietkau + Copyright (C) 2009 Luis Correia + Copyright (C) 2009 Mattias Nissler + Copyright (C) 2009 Mark Asselstine + Copyright (C) 2009 Xose Vazquez Perez + Copyright (C) 2009 Bart Zolnierkiewicz + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2800pci + Abstract: Data structures and registers for the rt2800pci module. + Supported chipsets: RT2800E & RT2800ED. + */ + +#ifndef RT2800PCI_H +#define RT2800PCI_H + +/* + * PCI registers. + */ + +/* + * E2PROM_CSR: EEPROM control register. + * RELOAD: Write 1 to reload eeprom content. + * TYPE: 0: 93c46, 1:93c66. + * LOAD_STATUS: 1:loading, 0:done. + */ +#define E2PROM_CSR 0x0004 +#define E2PROM_CSR_DATA_CLOCK FIELD32(0x00000001) +#define E2PROM_CSR_CHIP_SELECT FIELD32(0x00000002) +#define E2PROM_CSR_DATA_IN FIELD32(0x00000004) +#define E2PROM_CSR_DATA_OUT FIELD32(0x00000008) +#define E2PROM_CSR_TYPE FIELD32(0x00000030) +#define E2PROM_CSR_LOAD_STATUS FIELD32(0x00000040) +#define E2PROM_CSR_RELOAD FIELD32(0x00000080) + +/* + * Queue register offset macros + */ +#define TX_QUEUE_REG_OFFSET 0x10 +#define TX_BASE_PTR(__x) TX_BASE_PTR0 + ((__x) * TX_QUEUE_REG_OFFSET) +#define TX_MAX_CNT(__x) TX_MAX_CNT0 + ((__x) * TX_QUEUE_REG_OFFSET) +#define TX_CTX_IDX(__x) TX_CTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET) +#define TX_DTX_IDX(__x) TX_DTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET) + +/* + * 8051 firmware image. + */ +#define FIRMWARE_RT2860 "rt2860.bin" +#define FIRMWARE_IMAGE_BASE 0x2000 + +/* + * DMA descriptor defines. + */ +#define TXD_DESC_SIZE ( 4 * sizeof(__le32) ) +#define RXD_DESC_SIZE ( 4 * sizeof(__le32) ) + +/* + * TX descriptor format for TX, PRIO and Beacon Ring. + */ + +/* + * Word0 + */ +#define TXD_W0_SD_PTR0 FIELD32(0xffffffff) + +/* + * Word1 + */ +#define TXD_W1_SD_LEN1 FIELD32(0x00003fff) +#define TXD_W1_LAST_SEC1 FIELD32(0x00004000) +#define TXD_W1_BURST FIELD32(0x00008000) +#define TXD_W1_SD_LEN0 FIELD32(0x3fff0000) +#define TXD_W1_LAST_SEC0 FIELD32(0x40000000) +#define TXD_W1_DMA_DONE FIELD32(0x80000000) + +/* + * Word2 + */ +#define TXD_W2_SD_PTR1 FIELD32(0xffffffff) + +/* + * Word3 + * WIV: Wireless Info Valid. 1: Driver filled WI, 0: DMA needs to copy WI + * QSEL: Select on-chip FIFO ID for 2nd-stage output scheduler. + * 0:MGMT, 1:HCCA 2:EDCA + */ +#define TXD_W3_WIV FIELD32(0x01000000) +#define TXD_W3_QSEL FIELD32(0x06000000) +#define TXD_W3_TCO FIELD32(0x20000000) +#define TXD_W3_UCO FIELD32(0x40000000) +#define TXD_W3_ICO FIELD32(0x80000000) + +/* + * RX descriptor format for RX Ring. + */ + +/* + * Word0 + */ +#define RXD_W0_SDP0 FIELD32(0xffffffff) + +/* + * Word1 + */ +#define RXD_W1_SDL1 FIELD32(0x00003fff) +#define RXD_W1_SDL0 FIELD32(0x3fff0000) +#define RXD_W1_LS0 FIELD32(0x40000000) +#define RXD_W1_DMA_DONE FIELD32(0x80000000) + +/* + * Word2 + */ +#define RXD_W2_SDP1 FIELD32(0xffffffff) + +/* + * Word3 + * AMSDU: RX with 802.3 header, not 802.11 header. + * DECRYPTED: This frame is being decrypted. + */ +#define RXD_W3_BA FIELD32(0x00000001) +#define RXD_W3_DATA FIELD32(0x00000002) +#define RXD_W3_NULLDATA FIELD32(0x00000004) +#define RXD_W3_FRAG FIELD32(0x00000008) +#define RXD_W3_UNICAST_TO_ME FIELD32(0x00000010) +#define RXD_W3_MULTICAST FIELD32(0x00000020) +#define RXD_W3_BROADCAST FIELD32(0x00000040) +#define RXD_W3_MY_BSS FIELD32(0x00000080) +#define RXD_W3_CRC_ERROR FIELD32(0x00000100) +#define RXD_W3_CIPHER_ERROR FIELD32(0x00000600) +#define RXD_W3_AMSDU FIELD32(0x00000800) +#define RXD_W3_HTC FIELD32(0x00001000) +#define RXD_W3_RSSI FIELD32(0x00002000) +#define RXD_W3_L2PAD FIELD32(0x00004000) +#define RXD_W3_AMPDU FIELD32(0x00008000) +#define RXD_W3_DECRYPTED FIELD32(0x00010000) +#define RXD_W3_PLCP_SIGNAL FIELD32(0x00020000) +#define RXD_W3_PLCP_RSSI FIELD32(0x00040000) + +#endif /* RT2800PCI_H */ diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c new file mode 100644 index 0000000..d27d7d5 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2800usb.c @@ -0,0 +1,1119 @@ +/* + Copyright (C) 2009 Ivo van Doorn + Copyright (C) 2009 Mattias Nissler + Copyright (C) 2009 Felix Fietkau + Copyright (C) 2009 Xose Vazquez Perez + Copyright (C) 2009 Axel Kollhofer + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2800usb + Abstract: rt2800usb device specific routines. + Supported chipsets: RT2800U. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "rt2x00.h" +#include "rt2x00usb.h" +#include "rt2800lib.h" +#include "rt2800.h" +#include "rt2800usb.h" + +/* + * Allow hardware encryption to be disabled. + */ +static int modparam_nohwcrypt = 1; +module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); +MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); + +/* + * Firmware functions + */ +static char *rt2800usb_get_firmware_name(struct rt2x00_dev *rt2x00dev) +{ + return FIRMWARE_RT2870; +} + +static bool rt2800usb_check_crc(const u8 *data, const size_t len) +{ + u16 fw_crc; + u16 crc; + + /* + * The last 2 bytes in the firmware array are the crc checksum itself, + * this means that we should never pass those 2 bytes to the crc + * algorithm. + */ + fw_crc = (data[len - 2] << 8 | data[len - 1]); + + /* + * Use the crc ccitt algorithm. + * This will return the same value as the legacy driver which + * used bit ordering reversion on the both the firmware bytes + * before input input as well as on the final output. + * Obviously using crc ccitt directly is much more efficient. + */ + crc = crc_ccitt(~0, data, len - 2); + + /* + * There is a small difference between the crc-itu-t + bitrev and + * the crc-ccitt crc calculation. In the latter method the 2 bytes + * will be swapped, use swab16 to convert the crc to the correct + * value. + */ + crc = swab16(crc); + + return fw_crc == crc; +} + +static int rt2800usb_check_firmware(struct rt2x00_dev *rt2x00dev, + const u8 *data, const size_t len) +{ + size_t offset = 0; + + /* + * Firmware files: + * There are 2 variations of the rt2870 firmware. + * a) size: 4kb + * b) size: 8kb + * Note that (b) contains 2 separate firmware blobs of 4k + * within the file. The first blob is the same firmware as (a), + * but the second blob is for the additional chipsets. + */ + if (len != 4096 && len != 8192) + return FW_BAD_LENGTH; + + /* + * Check if we need the upper 4kb firmware data or not. + */ + if ((len == 4096) && + !rt2x00_rt(rt2x00dev, RT2860) && + !rt2x00_rt(rt2x00dev, RT2872) && + !rt2x00_rt(rt2x00dev, RT3070)) + return FW_BAD_VERSION; + + /* + * 8kb firmware files must be checked as if it were + * 2 separate firmware files. + */ + while (offset < len) { + if (!rt2800usb_check_crc(data + offset, 4096)) + return FW_BAD_CRC; + + offset += 4096; + } + + return FW_OK; +} + +static int rt2800usb_load_firmware(struct rt2x00_dev *rt2x00dev, + const u8 *data, const size_t len) +{ + unsigned int i; + int status; + u32 reg; + u32 offset; + u32 length; + + /* + * Check which section of the firmware we need. + */ + if (rt2x00_rt(rt2x00dev, RT2860) || + rt2x00_rt(rt2x00dev, RT2872) || + rt2x00_rt(rt2x00dev, RT3070)) { + offset = 0; + length = 4096; + } else { + offset = 4096; + length = 4096; + } + + /* + * Wait for stable hardware. + */ + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2800_register_read(rt2x00dev, MAC_CSR0, ®); + if (reg && reg != ~0) + break; + msleep(1); + } + + if (i == REGISTER_BUSY_COUNT) { + ERROR(rt2x00dev, "Unstable hardware.\n"); + return -EBUSY; + } + + /* + * Write firmware to device. + */ + rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE, + USB_VENDOR_REQUEST_OUT, + FIRMWARE_IMAGE_BASE, + data + offset, length, + REGISTER_TIMEOUT32(length)); + + rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0); + rt2800_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0); + + /* + * Send firmware request to device to load firmware, + * we need to specify a long timeout time. + */ + status = rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, + 0, USB_MODE_FIRMWARE, + REGISTER_TIMEOUT_FIRMWARE); + if (status < 0) { + ERROR(rt2x00dev, "Failed to write Firmware to device.\n"); + return status; + } + + msleep(10); + rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0); + + /* + * Send signal to firmware during boot time. + */ + rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0xff, 0, 0); + + if (rt2x00_rt(rt2x00dev, RT3070) || + rt2x00_rt(rt2x00dev, RT3071) || + rt2x00_rt(rt2x00dev, RT3572)) { + udelay(200); + rt2800_mcu_request(rt2x00dev, MCU_CURRENT, 0, 0, 0); + udelay(10); + } + + /* + * Wait for device to stabilize. + */ + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, ®); + if (rt2x00_get_field32(reg, PBF_SYS_CTRL_READY)) + break; + msleep(1); + } + + if (i == REGISTER_BUSY_COUNT) { + ERROR(rt2x00dev, "PBF system register not ready.\n"); + return -EBUSY; + } + + /* + * Initialize firmware. + */ + rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0); + rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0); + msleep(1); + + return 0; +} + +/* + * Device state switch handlers. + */ +static void rt2800usb_toggle_rx(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + u32 reg; + + rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, ®); + rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_RX, + (state == STATE_RADIO_RX_ON) || + (state == STATE_RADIO_RX_ON_LINK)); + rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg); +} + +static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + u16 word; + + /* + * Initialize all registers. + */ + if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) || + rt2800_init_registers(rt2x00dev) || + rt2800_init_bbp(rt2x00dev) || + rt2800_init_rfcsr(rt2x00dev))) + return -EIO; + + rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, ®); + rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_TX, 1); + rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg); + + udelay(50); + + rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, ®); + rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1); + rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_RX_DMA, 1); + rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_TX_DMA, 1); + rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg); + + + rt2800_register_read(rt2x00dev, USB_DMA_CFG, ®); + rt2x00_set_field32(®, USB_DMA_CFG_PHY_CLEAR, 0); + rt2x00_set_field32(®, USB_DMA_CFG_RX_BULK_AGG_EN, 0); + rt2x00_set_field32(®, USB_DMA_CFG_RX_BULK_AGG_TIMEOUT, 128); + /* + * Total room for RX frames in kilobytes, PBF might still exceed + * this limit so reduce the number to prevent errors. + */ + rt2x00_set_field32(®, USB_DMA_CFG_RX_BULK_AGG_LIMIT, + ((RX_ENTRIES * DATA_FRAME_SIZE) / 1024) - 3); + rt2x00_set_field32(®, USB_DMA_CFG_RX_BULK_EN, 1); + rt2x00_set_field32(®, USB_DMA_CFG_TX_BULK_EN, 1); + rt2800_register_write(rt2x00dev, USB_DMA_CFG, reg); + + rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, ®); + rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_TX, 1); + rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_RX, 1); + rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg); + + /* + * Initialize LED control + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_LED1, &word); + rt2800_mcu_request(rt2x00dev, MCU_LED_1, 0xff, + word & 0xff, (word >> 8) & 0xff); + + rt2x00_eeprom_read(rt2x00dev, EEPROM_LED2, &word); + rt2800_mcu_request(rt2x00dev, MCU_LED_2, 0xff, + word & 0xff, (word >> 8) & 0xff); + + rt2x00_eeprom_read(rt2x00dev, EEPROM_LED3, &word); + rt2800_mcu_request(rt2x00dev, MCU_LED_3, 0xff, + word & 0xff, (word >> 8) & 0xff); + + return 0; +} + +static void rt2800usb_disable_radio(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, ®); + rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0); + rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0); + rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg); + + rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0); + rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0); + rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0); + + /* Wait for DMA, ignore error */ + rt2800_wait_wpdma_ready(rt2x00dev); + + rt2x00usb_disable_radio(rt2x00dev); +} + +static int rt2800usb_set_state(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + if (state == STATE_AWAKE) + rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, 0xff, 0, 0); + else + rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0, 2); + + return 0; +} + +static int rt2800usb_set_device_state(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + int retval = 0; + + switch (state) { + case STATE_RADIO_ON: + /* + * Before the radio can be enabled, the device first has + * to be woken up. After that it needs a bit of time + * to be fully awake and then the radio can be enabled. + */ + rt2800usb_set_state(rt2x00dev, STATE_AWAKE); + msleep(1); + retval = rt2800usb_enable_radio(rt2x00dev); + break; + case STATE_RADIO_OFF: + /* + * After the radio has been disabled, the device should + * be put to sleep for powersaving. + */ + rt2800usb_disable_radio(rt2x00dev); + rt2800usb_set_state(rt2x00dev, STATE_SLEEP); + break; + case STATE_RADIO_RX_ON: + case STATE_RADIO_RX_ON_LINK: + case STATE_RADIO_RX_OFF: + case STATE_RADIO_RX_OFF_LINK: + rt2800usb_toggle_rx(rt2x00dev, state); + break; + case STATE_RADIO_IRQ_ON: + case STATE_RADIO_IRQ_OFF: + /* No support, but no error either */ + break; + case STATE_DEEP_SLEEP: + case STATE_SLEEP: + case STATE_STANDBY: + case STATE_AWAKE: + retval = rt2800usb_set_state(rt2x00dev, state); + break; + default: + retval = -ENOTSUPP; + break; + } + + if (unlikely(retval)) + ERROR(rt2x00dev, "Device failed to enter state %d (%d).\n", + state, retval); + + return retval; +} + +/* + * TX descriptor initialization + */ +static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev, + struct sk_buff *skb, + struct txentry_desc *txdesc) +{ + struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); + __le32 *txi = skbdesc->desc; + __le32 *txwi = &txi[TXINFO_DESC_SIZE / sizeof(__le32)]; + u32 word; + + /* + * Initialize TX Info descriptor + */ + rt2x00_desc_read(txwi, 0, &word); + rt2x00_set_field32(&word, TXWI_W0_FRAG, + test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags)); + rt2x00_set_field32(&word, TXWI_W0_MIMO_PS, 0); + rt2x00_set_field32(&word, TXWI_W0_CF_ACK, 0); + rt2x00_set_field32(&word, TXWI_W0_TS, + test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags)); + rt2x00_set_field32(&word, TXWI_W0_AMPDU, + test_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags)); + rt2x00_set_field32(&word, TXWI_W0_MPDU_DENSITY, txdesc->mpdu_density); + rt2x00_set_field32(&word, TXWI_W0_TX_OP, txdesc->ifs); + rt2x00_set_field32(&word, TXWI_W0_MCS, txdesc->mcs); + rt2x00_set_field32(&word, TXWI_W0_BW, + test_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags)); + rt2x00_set_field32(&word, TXWI_W0_SHORT_GI, + test_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags)); + rt2x00_set_field32(&word, TXWI_W0_STBC, txdesc->stbc); + rt2x00_set_field32(&word, TXWI_W0_PHYMODE, txdesc->rate_mode); + rt2x00_desc_write(txwi, 0, word); + + rt2x00_desc_read(txwi, 1, &word); + rt2x00_set_field32(&word, TXWI_W1_ACK, + test_bit(ENTRY_TXD_ACK, &txdesc->flags)); + rt2x00_set_field32(&word, TXWI_W1_NSEQ, + test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags)); + rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->ba_size); + rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID, + test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ? + txdesc->key_idx : 0xff); + rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT, + skb->len - txdesc->l2pad); + rt2x00_set_field32(&word, TXWI_W1_PACKETID, + skbdesc->entry->queue->qid + 1); + rt2x00_desc_write(txwi, 1, word); + + /* + * Always write 0 to IV/EIV fields, hardware will insert the IV + * from the IVEIV register when TXINFO_W0_WIV is set to 0. + * When TXINFO_W0_WIV is set to 1 it will use the IV data + * from the descriptor. The TXWI_W1_WIRELESS_CLI_ID indicates which + * crypto entry in the registers should be used to encrypt the frame. + */ + _rt2x00_desc_write(txwi, 2, 0 /* skbdesc->iv[0] */); + _rt2x00_desc_write(txwi, 3, 0 /* skbdesc->iv[1] */); + + /* + * Initialize TX descriptor + */ + rt2x00_desc_read(txi, 0, &word); + rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN, + skb->len + TXWI_DESC_SIZE); + rt2x00_set_field32(&word, TXINFO_W0_WIV, + !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags)); + rt2x00_set_field32(&word, TXINFO_W0_QSEL, 2); + rt2x00_set_field32(&word, TXINFO_W0_SW_USE_LAST_ROUND, 0); + rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_NEXT_VALID, 0); + rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_BURST, + test_bit(ENTRY_TXD_BURST, &txdesc->flags)); + rt2x00_desc_write(txi, 0, word); +} + +/* + * TX data initialization + */ +static void rt2800usb_write_beacon(struct queue_entry *entry) +{ + struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; + struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); + unsigned int beacon_base; + u32 reg; + + /* + * Add the descriptor in front of the skb. + */ + skb_push(entry->skb, entry->queue->desc_size); + memcpy(entry->skb->data, skbdesc->desc, skbdesc->desc_len); + skbdesc->desc = entry->skb->data; + + /* + * Disable beaconing while we are reloading the beacon data, + * otherwise we might be sending out invalid data. + */ + rt2800_register_read(rt2x00dev, BCN_TIME_CFG, ®); + rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_GEN, 0); + rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); + + /* + * Write entire beacon with descriptor to register. + */ + beacon_base = HW_BEACON_OFFSET(entry->entry_idx); + rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE, + USB_VENDOR_REQUEST_OUT, beacon_base, + entry->skb->data, entry->skb->len, + REGISTER_TIMEOUT32(entry->skb->len)); + + /* + * Clean up the beacon skb. + */ + dev_kfree_skb(entry->skb); + entry->skb = NULL; +} + +static int rt2800usb_get_tx_data_len(struct queue_entry *entry) +{ + int length; + + /* + * The length _must_ include 4 bytes padding, + * it should always be multiple of 4, + * but it must _not_ be a multiple of the USB packet size. + */ + length = roundup(entry->skb->len + 4, 4); + length += (4 * !(length % entry->queue->usb_maxpacket)); + + return length; +} + +static void rt2800usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev, + const enum data_queue_qid queue) +{ + u32 reg; + + if (queue != QID_BEACON) { + rt2x00usb_kick_tx_queue(rt2x00dev, queue); + return; + } + + rt2800_register_read(rt2x00dev, BCN_TIME_CFG, ®); + if (!rt2x00_get_field32(reg, BCN_TIME_CFG_BEACON_GEN)) { + rt2x00_set_field32(®, BCN_TIME_CFG_TSF_TICKING, 1); + rt2x00_set_field32(®, BCN_TIME_CFG_TBTT_ENABLE, 1); + rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_GEN, 1); + rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); + } +} + +/* + * RX control handlers + */ +static void rt2800usb_fill_rxdone(struct queue_entry *entry, + struct rxdone_entry_desc *rxdesc) +{ + struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; + struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); + __le32 *rxi = (__le32 *)entry->skb->data; + __le32 *rxwi; + __le32 *rxd; + u32 rxi0; + u32 rxwi0; + u32 rxwi1; + u32 rxwi2; + u32 rxwi3; + u32 rxd0; + int rx_pkt_len; + + /* + * RX frame format is : + * | RXINFO | RXWI | header | L2 pad | payload | pad | RXD | USB pad | + * |<------------ rx_pkt_len -------------->| + */ + rt2x00_desc_read(rxi, 0, &rxi0); + rx_pkt_len = rt2x00_get_field32(rxi0, RXINFO_W0_USB_DMA_RX_PKT_LEN); + + rxwi = (__le32 *)(entry->skb->data + RXINFO_DESC_SIZE); + + /* + * FIXME : we need to check for rx_pkt_len validity + */ + rxd = (__le32 *)(entry->skb->data + RXINFO_DESC_SIZE + rx_pkt_len); + + /* + * Copy descriptor to the skbdesc->desc buffer, making it safe from + * moving of frame data in rt2x00usb. + */ + memcpy(skbdesc->desc, rxi, skbdesc->desc_len); + + /* + * It is now safe to read the descriptor on all architectures. + */ + rt2x00_desc_read(rxwi, 0, &rxwi0); + rt2x00_desc_read(rxwi, 1, &rxwi1); + rt2x00_desc_read(rxwi, 2, &rxwi2); + rt2x00_desc_read(rxwi, 3, &rxwi3); + rt2x00_desc_read(rxd, 0, &rxd0); + + if (rt2x00_get_field32(rxd0, RXD_W0_CRC_ERROR)) + rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; + + if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) { + rxdesc->cipher = rt2x00_get_field32(rxwi0, RXWI_W0_UDF); + rxdesc->cipher_status = + rt2x00_get_field32(rxd0, RXD_W0_CIPHER_ERROR); + } + + if (rt2x00_get_field32(rxd0, RXD_W0_DECRYPTED)) { + /* + * Hardware has stripped IV/EIV data from 802.11 frame during + * decryption. Unfortunately the descriptor doesn't contain + * any fields with the EIV/IV data either, so they can't + * be restored by rt2x00lib. + */ + rxdesc->flags |= RX_FLAG_IV_STRIPPED; + + if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) + rxdesc->flags |= RX_FLAG_DECRYPTED; + else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) + rxdesc->flags |= RX_FLAG_MMIC_ERROR; + } + + if (rt2x00_get_field32(rxd0, RXD_W0_MY_BSS)) + rxdesc->dev_flags |= RXDONE_MY_BSS; + + if (rt2x00_get_field32(rxd0, RXD_W0_L2PAD)) + rxdesc->dev_flags |= RXDONE_L2PAD; + + if (rt2x00_get_field32(rxwi1, RXWI_W1_SHORT_GI)) + rxdesc->flags |= RX_FLAG_SHORT_GI; + + if (rt2x00_get_field32(rxwi1, RXWI_W1_BW)) + rxdesc->flags |= RX_FLAG_40MHZ; + + /* + * Detect RX rate, always use MCS as signal type. + */ + rxdesc->dev_flags |= RXDONE_SIGNAL_MCS; + rxdesc->rate_mode = rt2x00_get_field32(rxwi1, RXWI_W1_PHYMODE); + rxdesc->signal = rt2x00_get_field32(rxwi1, RXWI_W1_MCS); + + /* + * Mask of 0x8 bit to remove the short preamble flag. + */ + if (rxdesc->rate_mode == RATE_MODE_CCK) + rxdesc->signal &= ~0x8; + + rxdesc->rssi = + (rt2x00_get_field32(rxwi2, RXWI_W2_RSSI0) + + rt2x00_get_field32(rxwi2, RXWI_W2_RSSI1)) / 2; + + rxdesc->noise = + (rt2x00_get_field32(rxwi3, RXWI_W3_SNR0) + + rt2x00_get_field32(rxwi3, RXWI_W3_SNR1)) / 2; + + rxdesc->size = rt2x00_get_field32(rxwi0, RXWI_W0_MPDU_TOTAL_BYTE_COUNT); + + /* + * Remove RXWI descriptor from start of buffer. + */ + skb_pull(entry->skb, skbdesc->desc_len); +} + +/* + * Device probe functions. + */ +static int rt2800usb_validate_eeprom(struct rt2x00_dev *rt2x00dev) +{ + if (rt2800_efuse_detect(rt2x00dev)) + rt2800_read_eeprom_efuse(rt2x00dev); + else + rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom, + EEPROM_SIZE); + + return rt2800_validate_eeprom(rt2x00dev); +} + +static const struct rt2800_ops rt2800usb_rt2800_ops = { + .register_read = rt2x00usb_register_read, + .register_read_lock = rt2x00usb_register_read_lock, + .register_write = rt2x00usb_register_write, + .register_write_lock = rt2x00usb_register_write_lock, + + .register_multiread = rt2x00usb_register_multiread, + .register_multiwrite = rt2x00usb_register_multiwrite, + + .regbusy_read = rt2x00usb_regbusy_read, +}; + +static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev) +{ + int retval; + + rt2x00dev->priv = (void *)&rt2800usb_rt2800_ops; + + /* + * Allocate eeprom data. + */ + retval = rt2800usb_validate_eeprom(rt2x00dev); + if (retval) + return retval; + + retval = rt2800_init_eeprom(rt2x00dev); + if (retval) + return retval; + + /* + * Initialize hw specifications. + */ + retval = rt2800_probe_hw_mode(rt2x00dev); + if (retval) + return retval; + + /* + * This device has multiple filters for control frames + * and has a separate filter for PS Poll frames. + */ + __set_bit(DRIVER_SUPPORT_CONTROL_FILTERS, &rt2x00dev->flags); + __set_bit(DRIVER_SUPPORT_CONTROL_FILTER_PSPOLL, &rt2x00dev->flags); + + /* + * This device requires firmware. + */ + __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags); + __set_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags); + if (!modparam_nohwcrypt) + __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags); + + /* + * Set the rssi offset. + */ + rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET; + + return 0; +} + +static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = { + .probe_hw = rt2800usb_probe_hw, + .get_firmware_name = rt2800usb_get_firmware_name, + .check_firmware = rt2800usb_check_firmware, + .load_firmware = rt2800usb_load_firmware, + .initialize = rt2x00usb_initialize, + .uninitialize = rt2x00usb_uninitialize, + .clear_entry = rt2x00usb_clear_entry, + .set_device_state = rt2800usb_set_device_state, + .rfkill_poll = rt2800_rfkill_poll, + .link_stats = rt2800_link_stats, + .reset_tuner = rt2800_reset_tuner, + .link_tuner = rt2800_link_tuner, + .write_tx_desc = rt2800usb_write_tx_desc, + .write_tx_data = rt2x00usb_write_tx_data, + .write_beacon = rt2800usb_write_beacon, + .get_tx_data_len = rt2800usb_get_tx_data_len, + .kick_tx_queue = rt2800usb_kick_tx_queue, + .kill_tx_queue = rt2x00usb_kill_tx_queue, + .fill_rxdone = rt2800usb_fill_rxdone, + .config_shared_key = rt2800_config_shared_key, + .config_pairwise_key = rt2800_config_pairwise_key, + .config_filter = rt2800_config_filter, + .config_intf = rt2800_config_intf, + .config_erp = rt2800_config_erp, + .config_ant = rt2800_config_ant, + .config = rt2800_config, +}; + +static const struct data_queue_desc rt2800usb_queue_rx = { + .entry_num = RX_ENTRIES, + .data_size = AGGREGATION_SIZE, + .desc_size = RXINFO_DESC_SIZE + RXWI_DESC_SIZE, + .priv_size = sizeof(struct queue_entry_priv_usb), +}; + +static const struct data_queue_desc rt2800usb_queue_tx = { + .entry_num = TX_ENTRIES, + .data_size = AGGREGATION_SIZE, + .desc_size = TXINFO_DESC_SIZE + TXWI_DESC_SIZE, + .priv_size = sizeof(struct queue_entry_priv_usb), +}; + +static const struct data_queue_desc rt2800usb_queue_bcn = { + .entry_num = 8 * BEACON_ENTRIES, + .data_size = MGMT_FRAME_SIZE, + .desc_size = TXINFO_DESC_SIZE + TXWI_DESC_SIZE, + .priv_size = sizeof(struct queue_entry_priv_usb), +}; + +static const struct rt2x00_ops rt2800usb_ops = { + .name = KBUILD_MODNAME, + .max_sta_intf = 1, + .max_ap_intf = 8, + .eeprom_size = EEPROM_SIZE, + .rf_size = RF_SIZE, + .tx_queues = NUM_TX_QUEUES, + .extra_tx_headroom = TXINFO_DESC_SIZE + TXWI_DESC_SIZE, + .rx = &rt2800usb_queue_rx, + .tx = &rt2800usb_queue_tx, + .bcn = &rt2800usb_queue_bcn, + .lib = &rt2800usb_rt2x00_ops, + .hw = &rt2800_mac80211_ops, +#ifdef CONFIG_RT2X00_LIB_DEBUGFS + .debugfs = &rt2800_rt2x00debug, +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ +}; + +/* + * rt2800usb module information. + */ +static struct usb_device_id rt2800usb_device_table[] = { + /* Abocom */ + { USB_DEVICE(0x07b8, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x07b8, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x1482, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Amit */ + { USB_DEVICE(0x15c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Askey */ + { USB_DEVICE(0x1690, 0x0740), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* ASUS */ + { USB_DEVICE(0x0b05, 0x1731), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0b05, 0x1732), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0b05, 0x1742), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* AzureWave */ + { USB_DEVICE(0x13d3, 0x3247), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Belkin */ + { USB_DEVICE(0x050d, 0x8053), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x050d, 0x805c), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x050d, 0x815c), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Buffalo */ + { USB_DEVICE(0x0411, 0x00e8), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Conceptronic */ + { USB_DEVICE(0x14b2, 0x3c06), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x14b2, 0x3c07), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x14b2, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x14b2, 0x3c23), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x14b2, 0x3c25), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x14b2, 0x3c27), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x14b2, 0x3c28), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Corega */ + { USB_DEVICE(0x07aa, 0x002f), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x07aa, 0x003c), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x07aa, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* D-Link */ + { USB_DEVICE(0x07d1, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x07d1, 0x3c11), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Edimax */ + { USB_DEVICE(0x7392, 0x7717), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x7392, 0x7718), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* EnGenius */ + { USB_DEVICE(0X1740, 0x9701), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x1740, 0x9702), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Gigabyte */ + { USB_DEVICE(0x1044, 0x800b), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Hawking */ + { USB_DEVICE(0x0e66, 0x0001), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0e66, 0x0003), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Linksys */ + { USB_DEVICE(0x1737, 0x0070), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x1737, 0x0071), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Logitec */ + { USB_DEVICE(0x0789, 0x0162), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0789, 0x0163), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0789, 0x0164), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Motorola */ + { USB_DEVICE(0x100d, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* MSI */ + { USB_DEVICE(0x0db0, 0x6899), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Philips */ + { USB_DEVICE(0x0471, 0x200f), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Planex */ + { USB_DEVICE(0x2019, 0xed06), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Ralink */ + { USB_DEVICE(0x148f, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x148f, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Samsung */ + { USB_DEVICE(0x04e8, 0x2018), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Siemens */ + { USB_DEVICE(0x129b, 0x1828), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Sitecom */ + { USB_DEVICE(0x0df6, 0x0017), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0df6, 0x002b), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0df6, 0x002c), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0df6, 0x002d), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0df6, 0x0039), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0df6, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* SMC */ + { USB_DEVICE(0x083a, 0x6618), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x083a, 0x7512), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x083a, 0x7522), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x083a, 0x8522), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x083a, 0xa618), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x083a, 0xb522), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Sparklan */ + { USB_DEVICE(0x15a9, 0x0006), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Sweex */ + { USB_DEVICE(0x177f, 0x0302), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* U-Media*/ + { USB_DEVICE(0x157e, 0x300e), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* ZCOM */ + { USB_DEVICE(0x0cde, 0x0022), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0cde, 0x0025), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Zinwell */ + { USB_DEVICE(0x5a57, 0x0280), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x5a57, 0x0282), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Zyxel */ + { USB_DEVICE(0x0586, 0x3416), USB_DEVICE_DATA(&rt2800usb_ops) }, +#ifdef CONFIG_RT2800USB_RT30XX + /* Abocom */ + { USB_DEVICE(0x07b8, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x07b8, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x07b8, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* AirTies */ + { USB_DEVICE(0x1eda, 0x2310), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* AzureWave */ + { USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Conceptronic */ + { USB_DEVICE(0x14b2, 0x3c12), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Corega */ + { USB_DEVICE(0x18c5, 0x0012), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* D-Link */ + { USB_DEVICE(0x07d1, 0x3c0a), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x07d1, 0x3c0d), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x07d1, 0x3c0e), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x07d1, 0x3c0f), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Edimax */ + { USB_DEVICE(0x7392, 0x7711), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Encore */ + { USB_DEVICE(0x203d, 0x1480), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* EnGenius */ + { USB_DEVICE(0x1740, 0x9703), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x1740, 0x9705), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x1740, 0x9706), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Gigabyte */ + { USB_DEVICE(0x1044, 0x800d), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* I-O DATA */ + { USB_DEVICE(0x04bb, 0x0945), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* MSI */ + { USB_DEVICE(0x0db0, 0x3820), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Pegatron */ + { USB_DEVICE(0x1d4d, 0x000c), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x1d4d, 0x000e), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Planex */ + { USB_DEVICE(0x2019, 0xab25), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Quanta */ + { USB_DEVICE(0x1a32, 0x0304), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Ralink */ + { USB_DEVICE(0x148f, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x148f, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x148f, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x148f, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Sitecom */ + { USB_DEVICE(0x0df6, 0x003e), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0df6, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* SMC */ + { USB_DEVICE(0x083a, 0x7511), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Zinwell */ + { USB_DEVICE(0x5a57, 0x0283), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x5a57, 0x5257), USB_DEVICE_DATA(&rt2800usb_ops) }, +#endif +#ifdef CONFIG_RT2800USB_RT35XX + /* Askey */ + { USB_DEVICE(0x1690, 0x0744), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Cisco */ + { USB_DEVICE(0x167b, 0x4001), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* EnGenius */ + { USB_DEVICE(0x1740, 0x9801), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* I-O DATA */ + { USB_DEVICE(0x04bb, 0x0944), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Ralink */ + { USB_DEVICE(0x148f, 0x3370), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x148f, 0x3572), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x148f, 0x8070), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Sitecom */ + { USB_DEVICE(0x0df6, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Zinwell */ + { USB_DEVICE(0x5a57, 0x0284), USB_DEVICE_DATA(&rt2800usb_ops) }, +#endif +#ifdef CONFIG_RT2800USB_UNKNOWN + /* + * Unclear what kind of devices these are (they aren't supported by the + * vendor driver). + */ + /* Allwin */ + { USB_DEVICE(0x8516, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x8516, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x8516, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x8516, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x8516, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x8516, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x8516, 0x3572), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Amigo */ + { USB_DEVICE(0x0e0b, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0e0b, 0x9041), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Askey */ + { USB_DEVICE(0x0930, 0x0a07), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* ASUS */ + { USB_DEVICE(0x0b05, 0x1760), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0b05, 0x1761), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0b05, 0x1790), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x1761, 0x0b05), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* AzureWave */ + { USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x13d3, 0x3305), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Belkin */ + { USB_DEVICE(0x050d, 0x825a), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Buffalo */ + { USB_DEVICE(0x0411, 0x012e), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0411, 0x0148), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0411, 0x0150), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0411, 0x015d), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Conceptronic */ + { USB_DEVICE(0x14b2, 0x3c08), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x14b2, 0x3c11), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Corega */ + { USB_DEVICE(0x07aa, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x07aa, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x18c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* D-Link */ + { USB_DEVICE(0x07d1, 0x3c0b), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x07d1, 0x3c15), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x07d1, 0x3c16), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Encore */ + { USB_DEVICE(0x203d, 0x14a1), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x203d, 0x14a9), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* EnGenius */ + { USB_DEVICE(0x1740, 0x9707), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x1740, 0x9708), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x1740, 0x9709), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Gemtek */ + { USB_DEVICE(0x15a9, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Gigabyte */ + { USB_DEVICE(0x1044, 0x800c), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Hawking */ + { USB_DEVICE(0x0e66, 0x0009), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0e66, 0x000b), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* I-O DATA */ + { USB_DEVICE(0x04bb, 0x0947), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x04bb, 0x0948), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* LevelOne */ + { USB_DEVICE(0x1740, 0x0605), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x1740, 0x0615), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Linksys */ + { USB_DEVICE(0x1737, 0x0077), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x1737, 0x0078), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x1737, 0x0079), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Motorola */ + { USB_DEVICE(0x100d, 0x9032), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* MSI */ + { USB_DEVICE(0x0db0, 0x3821), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0db0, 0x3822), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0db0, 0x3870), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0db0, 0x3871), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0db0, 0x821a), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0db0, 0x822a), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0db0, 0x870a), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0db0, 0x871a), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0db0, 0x899a), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Ovislink */ + { USB_DEVICE(0x1b75, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Para */ + { USB_DEVICE(0x20b8, 0x8888), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Pegatron */ + { USB_DEVICE(0x05a6, 0x0101), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x1d4d, 0x0002), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x1d4d, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Planex */ + { USB_DEVICE(0x2019, 0xab24), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Qcom */ + { USB_DEVICE(0x18e8, 0x6259), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Sitecom */ + { USB_DEVICE(0x0df6, 0x003b), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0df6, 0x003c), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0df6, 0x003d), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0df6, 0x0040), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0df6, 0x0047), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0df6, 0x0048), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0df6, 0x004a), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x0df6, 0x004d), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* SMC */ + { USB_DEVICE(0x083a, 0xa512), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x083a, 0xa701), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x083a, 0xa702), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x083a, 0xc522), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x083a, 0xd522), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Sweex */ + { USB_DEVICE(0x177f, 0x0153), USB_DEVICE_DATA(&rt2800usb_ops) }, + { USB_DEVICE(0x177f, 0x0313), USB_DEVICE_DATA(&rt2800usb_ops) }, + /* Zyxel */ + { USB_DEVICE(0x0586, 0x341a), USB_DEVICE_DATA(&rt2800usb_ops) }, +#endif + { 0, } +}; + +MODULE_AUTHOR(DRV_PROJECT); +MODULE_VERSION(DRV_VERSION); +MODULE_DESCRIPTION("Ralink RT2800 USB Wireless LAN driver."); +MODULE_SUPPORTED_DEVICE("Ralink RT2870 USB chipset based cards"); +MODULE_DEVICE_TABLE(usb, rt2800usb_device_table); +MODULE_FIRMWARE(FIRMWARE_RT2870); +MODULE_LICENSE("GPL"); + +static struct usb_driver rt2800usb_driver = { + .name = KBUILD_MODNAME, + .id_table = rt2800usb_device_table, + .probe = rt2x00usb_probe, + .disconnect = rt2x00usb_disconnect, + .suspend = rt2x00usb_suspend, + .resume = rt2x00usb_resume, +}; + +static int __init rt2800usb_init(void) +{ + return usb_register(&rt2800usb_driver); +} + +static void __exit rt2800usb_exit(void) +{ + usb_deregister(&rt2800usb_driver); +} + +module_init(rt2800usb_init); +module_exit(rt2800usb_exit); diff --git a/drivers/net/wireless/rt2x00/rt2800usb.h b/drivers/net/wireless/rt2x00/rt2800usb.h new file mode 100644 index 0000000..d1d8ae9 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2800usb.h @@ -0,0 +1,189 @@ +/* + Copyright (C) 2009 Ivo van Doorn + Copyright (C) 2009 Mattias Nissler + Copyright (C) 2009 Felix Fietkau + Copyright (C) 2009 Xose Vazquez Perez + Copyright (C) 2009 Axel Kollhofer + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2800usb + Abstract: Data structures and registers for the rt2800usb module. + Supported chipsets: RT2800U. + */ + +#ifndef RT2800USB_H +#define RT2800USB_H + +/* + * USB registers. + */ + +/* + * USB_DMA_CFG + * RX_BULK_AGG_TIMEOUT: Rx Bulk Aggregation TimeOut in unit of 33ns. + * RX_BULK_AGG_LIMIT: Rx Bulk Aggregation Limit in unit of 256 bytes. + * PHY_CLEAR: phy watch dog enable. + * TX_CLEAR: Clear USB DMA TX path. + * TXOP_HALT: Halt TXOP count down when TX buffer is full. + * RX_BULK_AGG_EN: Enable Rx Bulk Aggregation. + * RX_BULK_EN: Enable USB DMA Rx. + * TX_BULK_EN: Enable USB DMA Tx. + * EP_OUT_VALID: OUT endpoint data valid. + * RX_BUSY: USB DMA RX FSM busy. + * TX_BUSY: USB DMA TX FSM busy. + */ +#define USB_DMA_CFG 0x02a0 +#define USB_DMA_CFG_RX_BULK_AGG_TIMEOUT FIELD32(0x000000ff) +#define USB_DMA_CFG_RX_BULK_AGG_LIMIT FIELD32(0x0000ff00) +#define USB_DMA_CFG_PHY_CLEAR FIELD32(0x00010000) +#define USB_DMA_CFG_TX_CLEAR FIELD32(0x00080000) +#define USB_DMA_CFG_TXOP_HALT FIELD32(0x00100000) +#define USB_DMA_CFG_RX_BULK_AGG_EN FIELD32(0x00200000) +#define USB_DMA_CFG_RX_BULK_EN FIELD32(0x00400000) +#define USB_DMA_CFG_TX_BULK_EN FIELD32(0x00800000) +#define USB_DMA_CFG_EP_OUT_VALID FIELD32(0x3f000000) +#define USB_DMA_CFG_RX_BUSY FIELD32(0x40000000) +#define USB_DMA_CFG_TX_BUSY FIELD32(0x80000000) + +/* + * USB_CYC_CFG + */ +#define USB_CYC_CFG 0x02a4 +#define USB_CYC_CFG_CLOCK_CYCLE FIELD32(0x000000ff) + +/* + * 8051 firmware image. + */ +#define FIRMWARE_RT2870 "rt2870.bin" +#define FIRMWARE_IMAGE_BASE 0x3000 + +/* + * DMA descriptor defines. + */ +#define TXINFO_DESC_SIZE ( 1 * sizeof(__le32) ) +#define RXINFO_DESC_SIZE ( 1 * sizeof(__le32) ) +#define RXWI_DESC_SIZE ( 4 * sizeof(__le32) ) +#define RXD_DESC_SIZE ( 1 * sizeof(__le32) ) + +/* + * TX Info structure + */ + +/* + * Word0 + * WIV: Wireless Info Valid. 1: Driver filled WI, 0: DMA needs to copy WI + * QSEL: Select on-chip FIFO ID for 2nd-stage output scheduler. + * 0:MGMT, 1:HCCA 2:EDCA + * USB_DMA_NEXT_VALID: Used ONLY in USB bulk Aggregation, NextValid + * DMA_TX_BURST: used ONLY in USB bulk Aggregation. + * Force USB DMA transmit frame from current selected endpoint + */ +#define TXINFO_W0_USB_DMA_TX_PKT_LEN FIELD32(0x0000ffff) +#define TXINFO_W0_WIV FIELD32(0x01000000) +#define TXINFO_W0_QSEL FIELD32(0x06000000) +#define TXINFO_W0_SW_USE_LAST_ROUND FIELD32(0x08000000) +#define TXINFO_W0_USB_DMA_NEXT_VALID FIELD32(0x40000000) +#define TXINFO_W0_USB_DMA_TX_BURST FIELD32(0x80000000) + +/* + * RX Info structure + */ + +/* + * Word 0 + */ + +#define RXINFO_W0_USB_DMA_RX_PKT_LEN FIELD32(0x0000ffff) + +/* + * RX WI structure + */ + +/* + * Word0 + */ +#define RXWI_W0_WIRELESS_CLI_ID FIELD32(0x000000ff) +#define RXWI_W0_KEY_INDEX FIELD32(0x00000300) +#define RXWI_W0_BSSID FIELD32(0x00001c00) +#define RXWI_W0_UDF FIELD32(0x0000e000) +#define RXWI_W0_MPDU_TOTAL_BYTE_COUNT FIELD32(0x0fff0000) +#define RXWI_W0_TID FIELD32(0xf0000000) + +/* + * Word1 + */ +#define RXWI_W1_FRAG FIELD32(0x0000000f) +#define RXWI_W1_SEQUENCE FIELD32(0x0000fff0) +#define RXWI_W1_MCS FIELD32(0x007f0000) +#define RXWI_W1_BW FIELD32(0x00800000) +#define RXWI_W1_SHORT_GI FIELD32(0x01000000) +#define RXWI_W1_STBC FIELD32(0x06000000) +#define RXWI_W1_PHYMODE FIELD32(0xc0000000) + +/* + * Word2 + */ +#define RXWI_W2_RSSI0 FIELD32(0x000000ff) +#define RXWI_W2_RSSI1 FIELD32(0x0000ff00) +#define RXWI_W2_RSSI2 FIELD32(0x00ff0000) + +/* + * Word3 + */ +#define RXWI_W3_SNR0 FIELD32(0x000000ff) +#define RXWI_W3_SNR1 FIELD32(0x0000ff00) + +/* + * RX descriptor format for RX Ring. + */ + +/* + * Word0 + * UNICAST_TO_ME: This RX frame is unicast to me. + * MULTICAST: This is a multicast frame. + * BROADCAST: This is a broadcast frame. + * MY_BSS: this frame belongs to the same BSSID. + * CRC_ERROR: CRC error. + * CIPHER_ERROR: 0: decryption okay, 1:ICV error, 2:MIC error, 3:KEY not valid. + * AMSDU: rx with 802.3 header, not 802.11 header. + */ + +#define RXD_W0_BA FIELD32(0x00000001) +#define RXD_W0_DATA FIELD32(0x00000002) +#define RXD_W0_NULLDATA FIELD32(0x00000004) +#define RXD_W0_FRAG FIELD32(0x00000008) +#define RXD_W0_UNICAST_TO_ME FIELD32(0x00000010) +#define RXD_W0_MULTICAST FIELD32(0x00000020) +#define RXD_W0_BROADCAST FIELD32(0x00000040) +#define RXD_W0_MY_BSS FIELD32(0x00000080) +#define RXD_W0_CRC_ERROR FIELD32(0x00000100) +#define RXD_W0_CIPHER_ERROR FIELD32(0x00000600) +#define RXD_W0_AMSDU FIELD32(0x00000800) +#define RXD_W0_HTC FIELD32(0x00001000) +#define RXD_W0_RSSI FIELD32(0x00002000) +#define RXD_W0_L2PAD FIELD32(0x00004000) +#define RXD_W0_AMPDU FIELD32(0x00008000) +#define RXD_W0_DECRYPTED FIELD32(0x00010000) +#define RXD_W0_PLCP_RSSI FIELD32(0x00020000) +#define RXD_W0_CIPHER_ALG FIELD32(0x00040000) +#define RXD_W0_LAST_AMSDU FIELD32(0x00080000) +#define RXD_W0_PLCP_SIGNAL FIELD32(0xfff00000) + +#endif /* RT2800USB_H */ diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h new file mode 100644 index 0000000..d9daa9c --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00.h @@ -0,0 +1,1051 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn + Copyright (C) 2004 - 2009 Gertjan van Wingerde + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00 + Abstract: rt2x00 global information. + */ + +#ifndef RT2X00_H +#define RT2X00_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "rt2x00debug.h" +#include "rt2x00leds.h" +#include "rt2x00reg.h" +#include "rt2x00queue.h" + +/* + * Module information. + */ +#define DRV_VERSION "2.3.0" +#define DRV_PROJECT "http://rt2x00.serialmonkey.com" + +/* + * Debug definitions. + * Debug output has to be enabled during compile time. + */ +#define DEBUG_PRINTK_MSG(__dev, __kernlvl, __lvl, __msg, __args...) \ + printk(__kernlvl "%s -> %s: %s - " __msg, \ + wiphy_name((__dev)->hw->wiphy), __func__, __lvl, ##__args) + +#define DEBUG_PRINTK_PROBE(__kernlvl, __lvl, __msg, __args...) \ + printk(__kernlvl "%s -> %s: %s - " __msg, \ + KBUILD_MODNAME, __func__, __lvl, ##__args) + +#ifdef CONFIG_RT2X00_DEBUG +#define DEBUG_PRINTK(__dev, __kernlvl, __lvl, __msg, __args...) \ + DEBUG_PRINTK_MSG(__dev, __kernlvl, __lvl, __msg, ##__args); +#else +#define DEBUG_PRINTK(__dev, __kernlvl, __lvl, __msg, __args...) \ + do { } while (0) +#endif /* CONFIG_RT2X00_DEBUG */ + +/* + * Various debug levels. + * The debug levels PANIC and ERROR both indicate serious problems, + * for this reason they should never be ignored. + * The special ERROR_PROBE message is for messages that are generated + * when the rt2x00_dev is not yet initialized. + */ +#define PANIC(__dev, __msg, __args...) \ + DEBUG_PRINTK_MSG(__dev, KERN_CRIT, "Panic", __msg, ##__args) +#define ERROR(__dev, __msg, __args...) \ + DEBUG_PRINTK_MSG(__dev, KERN_ERR, "Error", __msg, ##__args) +#define ERROR_PROBE(__msg, __args...) \ + DEBUG_PRINTK_PROBE(KERN_ERR, "Error", __msg, ##__args) +#define WARNING(__dev, __msg, __args...) \ + DEBUG_PRINTK(__dev, KERN_WARNING, "Warning", __msg, ##__args) +#define NOTICE(__dev, __msg, __args...) \ + DEBUG_PRINTK(__dev, KERN_NOTICE, "Notice", __msg, ##__args) +#define INFO(__dev, __msg, __args...) \ + DEBUG_PRINTK(__dev, KERN_INFO, "Info", __msg, ##__args) +#define DEBUG(__dev, __msg, __args...) \ + DEBUG_PRINTK(__dev, KERN_DEBUG, "Debug", __msg, ##__args) +#define EEPROM(__dev, __msg, __args...) \ + DEBUG_PRINTK(__dev, KERN_DEBUG, "EEPROM recovery", __msg, ##__args) + +/* + * Duration calculations + * The rate variable passed is: 100kbs. + * To convert from bytes to bits we multiply size with 8, + * then the size is multiplied with 10 to make the + * real rate -> rate argument correction. + */ +#define GET_DURATION(__size, __rate) (((__size) * 8 * 10) / (__rate)) +#define GET_DURATION_RES(__size, __rate)(((__size) * 8 * 10) % (__rate)) + +/* + * Determine the number of L2 padding bytes required between the header and + * the payload. + */ +#define L2PAD_SIZE(__hdrlen) (-(__hdrlen) & 3) + +/* + * Determine the alignment requirement, + * to make sure the 802.11 payload is padded to a 4-byte boundrary + * we must determine the address of the payload and calculate the + * amount of bytes needed to move the data. + */ +#define ALIGN_SIZE(__skb, __header) \ + ( ((unsigned long)((__skb)->data + (__header))) & 3 ) + +/* + * Constants for extra TX headroom for alignment purposes. + */ +#define RT2X00_ALIGN_SIZE 4 /* Only whole frame needs alignment */ +#define RT2X00_L2PAD_SIZE 8 /* Both header & payload need alignment */ + +/* + * Standard timing and size defines. + * These values should follow the ieee80211 specifications. + */ +#define ACK_SIZE 14 +#define IEEE80211_HEADER 24 +#define PLCP 48 +#define BEACON 100 +#define PREAMBLE 144 +#define SHORT_PREAMBLE 72 +#define SLOT_TIME 20 +#define SHORT_SLOT_TIME 9 +#define SIFS 10 +#define PIFS ( SIFS + SLOT_TIME ) +#define SHORT_PIFS ( SIFS + SHORT_SLOT_TIME ) +#define DIFS ( PIFS + SLOT_TIME ) +#define SHORT_DIFS ( SHORT_PIFS + SHORT_SLOT_TIME ) +#define EIFS ( SIFS + DIFS + \ + GET_DURATION(IEEE80211_HEADER + ACK_SIZE, 10) ) +#define SHORT_EIFS ( SIFS + SHORT_DIFS + \ + GET_DURATION(IEEE80211_HEADER + ACK_SIZE, 10) ) + +/* + * Structure for average calculation + * The avg field contains the actual average value, + * but avg_weight is internally used during calculations + * to prevent rounding errors. + */ +struct avg_val { + int avg; + int avg_weight; +}; + +enum rt2x00_chip_intf { + RT2X00_CHIP_INTF_PCI, + RT2X00_CHIP_INTF_USB, + RT2X00_CHIP_INTF_SOC, +}; + +/* + * Chipset identification + * The chipset on the device is composed of a RT and RF chip. + * The chipset combination is important for determining device capabilities. + */ +struct rt2x00_chip { + u16 rt; +#define RT2460 0x2460 +#define RT2560 0x2560 +#define RT2570 0x2570 +#define RT2661 0x2661 +#define RT2573 0x2573 +#define RT2860 0x2860 /* 2.4GHz PCI/CB */ +#define RT2870 0x2870 +#define RT2872 0x2872 +#define RT2880 0x2880 /* WSOC */ +#define RT2883 0x2883 /* WSOC */ +#define RT2890 0x2890 /* 2.4GHz PCIe */ +#define RT3052 0x3052 /* WSOC */ +#define RT3070 0x3070 +#define RT3071 0x3071 +#define RT3090 0x3090 /* 2.4GHz PCIe */ +#define RT3390 0x3390 +#define RT3572 0x3572 + + u16 rf; + u16 rev; + + enum rt2x00_chip_intf intf; +}; + +/* + * RF register values that belong to a particular channel. + */ +struct rf_channel { + int channel; + u32 rf1; + u32 rf2; + u32 rf3; + u32 rf4; +}; + +/* + * Channel information structure + */ +struct channel_info { + unsigned int flags; +#define GEOGRAPHY_ALLOWED 0x00000001 + + short tx_power1; + short tx_power2; +}; + +/* + * Antenna setup values. + */ +struct antenna_setup { + enum antenna rx; + enum antenna tx; +}; + +/* + * Quality statistics about the currently active link. + */ +struct link_qual { + /* + * Statistics required for Link tuning by driver + * The rssi value is provided by rt2x00lib during the + * link_tuner() callback function. + * The false_cca field is filled during the link_stats() + * callback function and could be used during the + * link_tuner() callback function. + */ + int rssi; + int false_cca; + + /* + * VGC levels + * Hardware driver will tune the VGC level during each call + * to the link_tuner() callback function. This vgc_level is + * is determined based on the link quality statistics like + * average RSSI and the false CCA count. + * + * In some cases the drivers need to differentiate between + * the currently "desired" VGC level and the level configured + * in the hardware. The latter is important to reduce the + * number of BBP register reads to reduce register access + * overhead. For this reason we store both values here. + */ + u8 vgc_level; + u8 vgc_level_reg; + + /* + * Statistics required for Signal quality calculation. + * These fields might be changed during the link_stats() + * callback function. + */ + int rx_success; + int rx_failed; + int tx_success; + int tx_failed; +}; + +/* + * Antenna settings about the currently active link. + */ +struct link_ant { + /* + * Antenna flags + */ + unsigned int flags; +#define ANTENNA_RX_DIVERSITY 0x00000001 +#define ANTENNA_TX_DIVERSITY 0x00000002 +#define ANTENNA_MODE_SAMPLE 0x00000004 + + /* + * Currently active TX/RX antenna setup. + * When software diversity is used, this will indicate + * which antenna is actually used at this time. + */ + struct antenna_setup active; + + /* + * RSSI history information for the antenna. + * Used to determine when to switch antenna + * when using software diversity. + */ + int rssi_history; + + /* + * Current RSSI average of the currently active antenna. + * Similar to the avg_rssi in the link_qual structure + * this value is updated by using the walking average. + */ + struct avg_val rssi_ant; +}; + +/* + * To optimize the quality of the link we need to store + * the quality of received frames and periodically + * optimize the link. + */ +struct link { + /* + * Link tuner counter + * The number of times the link has been tuned + * since the radio has been switched on. + */ + u32 count; + + /* + * Quality measurement values. + */ + struct link_qual qual; + + /* + * TX/RX antenna setup. + */ + struct link_ant ant; + + /* + * Currently active average RSSI value + */ + struct avg_val avg_rssi; + + /* + * Work structure for scheduling periodic link tuning. + */ + struct delayed_work work; +}; + +/* + * Interface structure + * Per interface configuration details, this structure + * is allocated as the private data for ieee80211_vif. + */ +struct rt2x00_intf { + /* + * All fields within the rt2x00_intf structure + * must be protected with a spinlock. + */ + spinlock_t lock; + + /* + * MAC of the device. + */ + u8 mac[ETH_ALEN]; + + /* + * BBSID of the AP to associate with. + */ + u8 bssid[ETH_ALEN]; + + /* + * beacon->skb must be protected with the mutex. + */ + struct mutex beacon_skb_mutex; + + /* + * Entry in the beacon queue which belongs to + * this interface. Each interface has its own + * dedicated beacon entry. + */ + struct queue_entry *beacon; + + /* + * Actions that needed rescheduling. + */ + unsigned int delayed_flags; +#define DELAYED_UPDATE_BEACON 0x00000001 + + /* + * Software sequence counter, this is only required + * for hardware which doesn't support hardware + * sequence counting. + */ + spinlock_t seqlock; + u16 seqno; +}; + +static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif) +{ + return (struct rt2x00_intf *)vif->drv_priv; +} + +/** + * struct hw_mode_spec: Hardware specifications structure + * + * Details about the supported modes, rates and channels + * of a particular chipset. This is used by rt2x00lib + * to build the ieee80211_hw_mode array for mac80211. + * + * @supported_bands: Bitmask contained the supported bands (2.4GHz, 5.2GHz). + * @supported_rates: Rate types which are supported (CCK, OFDM). + * @num_channels: Number of supported channels. This is used as array size + * for @tx_power_a, @tx_power_bg and @channels. + * @channels: Device/chipset specific channel values (See &struct rf_channel). + * @channels_info: Additional information for channels (See &struct channel_info). + * @ht: Driver HT Capabilities (See &ieee80211_sta_ht_cap). + */ +struct hw_mode_spec { + unsigned int supported_bands; +#define SUPPORT_BAND_2GHZ 0x00000001 +#define SUPPORT_BAND_5GHZ 0x00000002 + + unsigned int supported_rates; +#define SUPPORT_RATE_CCK 0x00000001 +#define SUPPORT_RATE_OFDM 0x00000002 + + unsigned int num_channels; + const struct rf_channel *channels; + const struct channel_info *channels_info; + + struct ieee80211_sta_ht_cap ht; +}; + +/* + * Configuration structure wrapper around the + * mac80211 configuration structure. + * When mac80211 configures the driver, rt2x00lib + * can precalculate values which are equal for all + * rt2x00 drivers. Those values can be stored in here. + */ +struct rt2x00lib_conf { + struct ieee80211_conf *conf; + + struct rf_channel rf; + struct channel_info channel; +}; + +/* + * Configuration structure for erp settings. + */ +struct rt2x00lib_erp { + int short_preamble; + int cts_protection; + + u32 basic_rates; + + int slot_time; + + short sifs; + short pifs; + short difs; + short eifs; + + u16 beacon_int; +}; + +/* + * Configuration structure for hardware encryption. + */ +struct rt2x00lib_crypto { + enum cipher cipher; + + enum set_key_cmd cmd; + const u8 *address; + + u32 bssidx; + u32 aid; + + u8 key[16]; + u8 tx_mic[8]; + u8 rx_mic[8]; +}; + +/* + * Configuration structure wrapper around the + * rt2x00 interface configuration handler. + */ +struct rt2x00intf_conf { + /* + * Interface type + */ + enum nl80211_iftype type; + + /* + * TSF sync value, this is dependant on the operation type. + */ + enum tsf_sync sync; + + /* + * The MAC and BSSID addressess are simple array of bytes, + * these arrays are little endian, so when sending the addressess + * to the drivers, copy the it into a endian-signed variable. + * + * Note that all devices (except rt2500usb) have 32 bits + * register word sizes. This means that whatever variable we + * pass _must_ be a multiple of 32 bits. Otherwise the device + * might not accept what we are sending to it. + * This will also make it easier for the driver to write + * the data to the device. + */ + __le32 mac[2]; + __le32 bssid[2]; +}; + +/* + * rt2x00lib callback functions. + */ +struct rt2x00lib_ops { + /* + * Interrupt handlers. + */ + irq_handler_t irq_handler; + + /* + * Device init handlers. + */ + int (*probe_hw) (struct rt2x00_dev *rt2x00dev); + char *(*get_firmware_name) (struct rt2x00_dev *rt2x00dev); + int (*check_firmware) (struct rt2x00_dev *rt2x00dev, + const u8 *data, const size_t len); + int (*load_firmware) (struct rt2x00_dev *rt2x00dev, + const u8 *data, const size_t len); + + /* + * Device initialization/deinitialization handlers. + */ + int (*initialize) (struct rt2x00_dev *rt2x00dev); + void (*uninitialize) (struct rt2x00_dev *rt2x00dev); + + /* + * queue initialization handlers + */ + bool (*get_entry_state) (struct queue_entry *entry); + void (*clear_entry) (struct queue_entry *entry); + + /* + * Radio control handlers. + */ + int (*set_device_state) (struct rt2x00_dev *rt2x00dev, + enum dev_state state); + int (*rfkill_poll) (struct rt2x00_dev *rt2x00dev); + void (*link_stats) (struct rt2x00_dev *rt2x00dev, + struct link_qual *qual); + void (*reset_tuner) (struct rt2x00_dev *rt2x00dev, + struct link_qual *qual); + void (*link_tuner) (struct rt2x00_dev *rt2x00dev, + struct link_qual *qual, const u32 count); + + /* + * TX control handlers + */ + void (*write_tx_desc) (struct rt2x00_dev *rt2x00dev, + struct sk_buff *skb, + struct txentry_desc *txdesc); + int (*write_tx_data) (struct queue_entry *entry); + void (*write_beacon) (struct queue_entry *entry); + int (*get_tx_data_len) (struct queue_entry *entry); + void (*kick_tx_queue) (struct rt2x00_dev *rt2x00dev, + const enum data_queue_qid queue); + void (*kill_tx_queue) (struct rt2x00_dev *rt2x00dev, + const enum data_queue_qid queue); + + /* + * RX control handlers + */ + void (*fill_rxdone) (struct queue_entry *entry, + struct rxdone_entry_desc *rxdesc); + + /* + * Configuration handlers. + */ + int (*config_shared_key) (struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_crypto *crypto, + struct ieee80211_key_conf *key); + int (*config_pairwise_key) (struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_crypto *crypto, + struct ieee80211_key_conf *key); + void (*config_filter) (struct rt2x00_dev *rt2x00dev, + const unsigned int filter_flags); + void (*config_intf) (struct rt2x00_dev *rt2x00dev, + struct rt2x00_intf *intf, + struct rt2x00intf_conf *conf, + const unsigned int flags); +#define CONFIG_UPDATE_TYPE ( 1 << 1 ) +#define CONFIG_UPDATE_MAC ( 1 << 2 ) +#define CONFIG_UPDATE_BSSID ( 1 << 3 ) + + void (*config_erp) (struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_erp *erp); + void (*config_ant) (struct rt2x00_dev *rt2x00dev, + struct antenna_setup *ant); + void (*config) (struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_conf *libconf, + const unsigned int changed_flags); +}; + +/* + * rt2x00 driver callback operation structure. + */ +struct rt2x00_ops { + const char *name; + const unsigned int max_sta_intf; + const unsigned int max_ap_intf; + const unsigned int eeprom_size; + const unsigned int rf_size; + const unsigned int tx_queues; + const unsigned int extra_tx_headroom; + const struct data_queue_desc *rx; + const struct data_queue_desc *tx; + const struct data_queue_desc *bcn; + const struct data_queue_desc *atim; + const struct rt2x00lib_ops *lib; + const struct ieee80211_ops *hw; +#ifdef CONFIG_RT2X00_LIB_DEBUGFS + const struct rt2x00debug *debugfs; +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ +}; + +/* + * rt2x00 device flags + */ +enum rt2x00_flags { + /* + * Device state flags + */ + DEVICE_STATE_PRESENT, + DEVICE_STATE_REGISTERED_HW, + DEVICE_STATE_INITIALIZED, + DEVICE_STATE_STARTED, + DEVICE_STATE_ENABLED_RADIO, + + /* + * Driver requirements + */ + DRIVER_REQUIRE_FIRMWARE, + DRIVER_REQUIRE_BEACON_GUARD, + DRIVER_REQUIRE_ATIM_QUEUE, + DRIVER_REQUIRE_DMA, + DRIVER_REQUIRE_COPY_IV, + DRIVER_REQUIRE_L2PAD, + + /* + * Driver features + */ + CONFIG_SUPPORT_HW_BUTTON, + CONFIG_SUPPORT_HW_CRYPTO, + DRIVER_SUPPORT_CONTROL_FILTERS, + DRIVER_SUPPORT_CONTROL_FILTER_PSPOLL, + + /* + * Driver configuration + */ + CONFIG_FRAME_TYPE, + CONFIG_RF_SEQUENCE, + CONFIG_EXTERNAL_LNA_A, + CONFIG_EXTERNAL_LNA_BG, + CONFIG_DOUBLE_ANTENNA, + CONFIG_DISABLE_LINK_TUNING, + CONFIG_CHANNEL_HT40, +}; + +/* + * rt2x00 device structure. + */ +struct rt2x00_dev { + /* + * Device structure. + * The structure stored in here depends on the + * system bus (PCI or USB). + * When accessing this variable, the rt2x00dev_{pci,usb} + * macros should be used for correct typecasting. + */ + struct device *dev; + + /* + * Callback functions. + */ + const struct rt2x00_ops *ops; + + /* + * IEEE80211 control structure. + */ + struct ieee80211_hw *hw; + struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; + enum ieee80211_band curr_band; + + /* + * If enabled, the debugfs interface structures + * required for deregistration of debugfs. + */ +#ifdef CONFIG_RT2X00_LIB_DEBUGFS + struct rt2x00debug_intf *debugfs_intf; +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ + + /* + * LED structure for changing the LED status + * by mac8011 or the kernel. + */ +#ifdef CONFIG_RT2X00_LIB_LEDS + struct rt2x00_led led_radio; + struct rt2x00_led led_assoc; + struct rt2x00_led led_qual; + u16 led_mcu_reg; +#endif /* CONFIG_RT2X00_LIB_LEDS */ + + /* + * Device flags. + * In these flags the current status and some + * of the device capabilities are stored. + */ + unsigned long flags; + + /* + * Device information, Bus IRQ and name (PCI, SoC) + */ + int irq; + const char *name; + + /* + * Chipset identification. + */ + struct rt2x00_chip chip; + + /* + * hw capability specifications. + */ + struct hw_mode_spec spec; + + /* + * This is the default TX/RX antenna setup as indicated + * by the device's EEPROM. + */ + struct antenna_setup default_ant; + + /* + * Register pointers + * csr.base: CSR base register address. (PCI) + * csr.cache: CSR cache for usb_control_msg. (USB) + */ + union csr { + void __iomem *base; + void *cache; + } csr; + + /* + * Mutex to protect register accesses. + * For PCI and USB devices it protects against concurrent indirect + * register access (BBP, RF, MCU) since accessing those + * registers require multiple calls to the CSR registers. + * For USB devices it also protects the csr_cache since that + * field is used for normal CSR access and it cannot support + * multiple callers simultaneously. + */ + struct mutex csr_mutex; + + /* + * Current packet filter configuration for the device. + * This contains all currently active FIF_* flags send + * to us by mac80211 during configure_filter(). + */ + unsigned int packet_filter; + + /* + * Interface details: + * - Open ap interface count. + * - Open sta interface count. + * - Association count. + */ + unsigned int intf_ap_count; + unsigned int intf_sta_count; + unsigned int intf_associated; + + /* + * Link quality + */ + struct link link; + + /* + * EEPROM data. + */ + __le16 *eeprom; + + /* + * Active RF register values. + * These are stored here so we don't need + * to read the rf registers and can directly + * use this value instead. + * This field should be accessed by using + * rt2x00_rf_read() and rt2x00_rf_write(). + */ + u32 *rf; + + /* + * LNA gain + */ + short lna_gain; + + /* + * Current TX power value. + */ + u16 tx_power; + + /* + * Current retry values. + */ + u8 short_retry; + u8 long_retry; + + /* + * Rssi <-> Dbm offset + */ + u8 rssi_offset; + + /* + * Frequency offset (for rt61pci & rt73usb). + */ + u8 freq_offset; + + /* + * Calibration information (for rt2800usb & rt2800pci). + * [0] -> BW20 + * [1] -> BW40 + */ + u8 calibration[2]; + + /* + * Beacon interval. + */ + u16 beacon_int; + + /* + * Low level statistics which will have + * to be kept up to date while device is running. + */ + struct ieee80211_low_level_stats low_level_stats; + + /* + * RX configuration information. + */ + struct ieee80211_rx_status rx_status; + + /* + * Scheduled work. + * NOTE: intf_work will use ieee80211_iterate_active_interfaces() + * which means it cannot be placed on the hw->workqueue + * due to RTNL locking requirements. + */ + struct work_struct intf_work; + + /* + * Data queue arrays for RX, TX and Beacon. + * The Beacon array also contains the Atim queue + * if that is supported by the device. + */ + unsigned int data_queues; + struct data_queue *rx; + struct data_queue *tx; + struct data_queue *bcn; + + /* + * Firmware image. + */ + const struct firmware *fw; + + /* + * Driver specific data. + */ + void *priv; +}; + +/* + * Register defines. + * Some registers require multiple attempts before success, + * in those cases REGISTER_BUSY_COUNT attempts should be + * taken with a REGISTER_BUSY_DELAY interval. + */ +#define REGISTER_BUSY_COUNT 5 +#define REGISTER_BUSY_DELAY 100 + +/* + * Generic RF access. + * The RF is being accessed by word index. + */ +static inline void rt2x00_rf_read(struct rt2x00_dev *rt2x00dev, + const unsigned int word, u32 *data) +{ + BUG_ON(word < 1 || word > rt2x00dev->ops->rf_size / sizeof(u32)); + *data = rt2x00dev->rf[word - 1]; +} + +static inline void rt2x00_rf_write(struct rt2x00_dev *rt2x00dev, + const unsigned int word, u32 data) +{ + BUG_ON(word < 1 || word > rt2x00dev->ops->rf_size / sizeof(u32)); + rt2x00dev->rf[word - 1] = data; +} + +/* + * Generic EEPROM access. + * The EEPROM is being accessed by word index. + */ +static inline void *rt2x00_eeprom_addr(struct rt2x00_dev *rt2x00dev, + const unsigned int word) +{ + return (void *)&rt2x00dev->eeprom[word]; +} + +static inline void rt2x00_eeprom_read(struct rt2x00_dev *rt2x00dev, + const unsigned int word, u16 *data) +{ + *data = le16_to_cpu(rt2x00dev->eeprom[word]); +} + +static inline void rt2x00_eeprom_write(struct rt2x00_dev *rt2x00dev, + const unsigned int word, u16 data) +{ + rt2x00dev->eeprom[word] = cpu_to_le16(data); +} + +/* + * Chipset handlers + */ +static inline void rt2x00_set_chip(struct rt2x00_dev *rt2x00dev, + const u16 rt, const u16 rf, const u16 rev) +{ + rt2x00dev->chip.rt = rt; + rt2x00dev->chip.rf = rf; + rt2x00dev->chip.rev = rev; + + INFO(rt2x00dev, + "Chipset detected - rt: %04x, rf: %04x, rev: %04x.\n", + rt2x00dev->chip.rt, rt2x00dev->chip.rf, rt2x00dev->chip.rev); +} + +static inline char rt2x00_rt(struct rt2x00_dev *rt2x00dev, const u16 rt) +{ + return (rt2x00dev->chip.rt == rt); +} + +static inline char rt2x00_rf(struct rt2x00_dev *rt2x00dev, const u16 rf) +{ + return (rt2x00dev->chip.rf == rf); +} + +static inline u16 rt2x00_rev(struct rt2x00_dev *rt2x00dev) +{ + return rt2x00dev->chip.rev; +} + +static inline void rt2x00_set_chip_intf(struct rt2x00_dev *rt2x00dev, + enum rt2x00_chip_intf intf) +{ + rt2x00dev->chip.intf = intf; +} + +static inline bool rt2x00_intf(struct rt2x00_dev *rt2x00dev, + enum rt2x00_chip_intf intf) +{ + return (rt2x00dev->chip.intf == intf); +} + +static inline bool rt2x00_is_pci(struct rt2x00_dev *rt2x00dev) +{ + return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI); +} + +static inline bool rt2x00_is_usb(struct rt2x00_dev *rt2x00dev) +{ + return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_USB); +} + +static inline bool rt2x00_is_soc(struct rt2x00_dev *rt2x00dev) +{ + return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_SOC); +} + +/** + * rt2x00queue_map_txskb - Map a skb into DMA for TX purposes. + * @rt2x00dev: Pointer to &struct rt2x00_dev. + * @skb: The skb to map. + */ +void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb); + +/** + * rt2x00queue_get_queue - Convert queue index to queue pointer + * @rt2x00dev: Pointer to &struct rt2x00_dev. + * @queue: rt2x00 queue index (see &enum data_queue_qid). + */ +struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev, + const enum data_queue_qid queue); + +/** + * rt2x00queue_get_entry - Get queue entry where the given index points to. + * @queue: Pointer to &struct data_queue from where we obtain the entry. + * @index: Index identifier for obtaining the correct index. + */ +struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue, + enum queue_index index); + +/* + * Interrupt context handlers. + */ +void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev); +void rt2x00lib_txdone(struct queue_entry *entry, + struct txdone_entry_desc *txdesc); +void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev, + struct queue_entry *entry); + +/* + * mac80211 handlers. + */ +int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb); +int rt2x00mac_start(struct ieee80211_hw *hw); +void rt2x00mac_stop(struct ieee80211_hw *hw); +int rt2x00mac_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); +void rt2x00mac_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); +int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed); +void rt2x00mac_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast); +int rt2x00mac_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, + bool set); +#ifdef CONFIG_RT2X00_LIB_CRYPTO +int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key); +#else +#define rt2x00mac_set_key NULL +#endif /* CONFIG_RT2X00_LIB_CRYPTO */ +int rt2x00mac_get_stats(struct ieee80211_hw *hw, + struct ieee80211_low_level_stats *stats); +void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changes); +int rt2x00mac_conf_tx(struct ieee80211_hw *hw, u16 queue, + const struct ieee80211_tx_queue_params *params); +void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw); + +/* + * Driver allocation handlers. + */ +int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev); +void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev); +#ifdef CONFIG_PM +int rt2x00lib_suspend(struct rt2x00_dev *rt2x00dev, pm_message_t state); +int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev); +#endif /* CONFIG_PM */ + +#endif /* RT2X00_H */ diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c new file mode 100644 index 0000000..098315a --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00config.c @@ -0,0 +1,212 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00lib + Abstract: rt2x00 generic configuration routines. + */ + +#include +#include + +#include "rt2x00.h" +#include "rt2x00lib.h" + +void rt2x00lib_config_intf(struct rt2x00_dev *rt2x00dev, + struct rt2x00_intf *intf, + enum nl80211_iftype type, + const u8 *mac, const u8 *bssid) +{ + struct rt2x00intf_conf conf; + unsigned int flags = 0; + + conf.type = type; + + switch (type) { + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_WDS: + conf.sync = TSF_SYNC_BEACON; + break; + case NL80211_IFTYPE_STATION: + conf.sync = TSF_SYNC_INFRA; + break; + default: + conf.sync = TSF_SYNC_NONE; + break; + } + + /* + * Note that when NULL is passed as address we will send + * 00:00:00:00:00 to the device to clear the address. + * This will prevent the device being confused when it wants + * to ACK frames or consideres itself associated. + */ + memset(&conf.mac, 0, sizeof(conf.mac)); + if (mac) + memcpy(&conf.mac, mac, ETH_ALEN); + + memset(&conf.bssid, 0, sizeof(conf.bssid)); + if (bssid) + memcpy(&conf.bssid, bssid, ETH_ALEN); + + flags |= CONFIG_UPDATE_TYPE; + if (mac || (!rt2x00dev->intf_ap_count && !rt2x00dev->intf_sta_count)) + flags |= CONFIG_UPDATE_MAC; + if (bssid || (!rt2x00dev->intf_ap_count && !rt2x00dev->intf_sta_count)) + flags |= CONFIG_UPDATE_BSSID; + + rt2x00dev->ops->lib->config_intf(rt2x00dev, intf, &conf, flags); +} + +void rt2x00lib_config_erp(struct rt2x00_dev *rt2x00dev, + struct rt2x00_intf *intf, + struct ieee80211_bss_conf *bss_conf) +{ + struct rt2x00lib_erp erp; + + memset(&erp, 0, sizeof(erp)); + + erp.short_preamble = bss_conf->use_short_preamble; + erp.cts_protection = bss_conf->use_cts_prot; + + erp.slot_time = bss_conf->use_short_slot ? SHORT_SLOT_TIME : SLOT_TIME; + erp.sifs = SIFS; + erp.pifs = bss_conf->use_short_slot ? SHORT_PIFS : PIFS; + erp.difs = bss_conf->use_short_slot ? SHORT_DIFS : DIFS; + erp.eifs = bss_conf->use_short_slot ? SHORT_EIFS : EIFS; + + erp.basic_rates = bss_conf->basic_rates; + erp.beacon_int = bss_conf->beacon_int; + + /* Update global beacon interval time, this is needed for PS support */ + rt2x00dev->beacon_int = bss_conf->beacon_int; + + rt2x00dev->ops->lib->config_erp(rt2x00dev, &erp); +} + +static inline +enum antenna rt2x00lib_config_antenna_check(enum antenna current_ant, + enum antenna default_ant) +{ + if (current_ant != ANTENNA_SW_DIVERSITY) + return current_ant; + return (default_ant != ANTENNA_SW_DIVERSITY) ? default_ant : ANTENNA_B; +} + +void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev, + struct antenna_setup config) +{ + struct link_ant *ant = &rt2x00dev->link.ant; + struct antenna_setup *def = &rt2x00dev->default_ant; + struct antenna_setup *active = &rt2x00dev->link.ant.active; + + /* + * Failsafe: Make sure we are not sending the + * ANTENNA_SW_DIVERSITY state to the driver. + * If that happens, fallback to hardware defaults, + * or our own default. + * If diversity handling is active for a particular antenna, + * we shouldn't overwrite that antenna. + * The calls to rt2x00lib_config_antenna_check() + * might have caused that we restore back to the already + * active setting. If that has happened we can quit. + */ + if (!(ant->flags & ANTENNA_RX_DIVERSITY)) + config.rx = rt2x00lib_config_antenna_check(config.rx, def->rx); + else + config.rx = active->rx; + + if (!(ant->flags & ANTENNA_TX_DIVERSITY)) + config.tx = rt2x00lib_config_antenna_check(config.tx, def->tx); + else + config.tx = active->tx; + + if (config.rx == active->rx && config.tx == active->tx) + return; + + /* + * Antenna setup changes require the RX to be disabled, + * else the changes will be ignored by the device. + */ + if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) + rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF_LINK); + + /* + * Write new antenna setup to device and reset the link tuner. + * The latter is required since we need to recalibrate the + * noise-sensitivity ratio for the new setup. + */ + rt2x00dev->ops->lib->config_ant(rt2x00dev, &config); + + rt2x00link_reset_tuner(rt2x00dev, true); + + memcpy(active, &config, sizeof(config)); + + if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) + rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON_LINK); +} + +void rt2x00lib_config(struct rt2x00_dev *rt2x00dev, + struct ieee80211_conf *conf, + unsigned int ieee80211_flags) +{ + struct rt2x00lib_conf libconf; + + memset(&libconf, 0, sizeof(libconf)); + + libconf.conf = conf; + + if (ieee80211_flags & IEEE80211_CONF_CHANGE_CHANNEL) { + if (conf_is_ht40(conf)) + __set_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags); + else + __clear_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags); + + memcpy(&libconf.rf, + &rt2x00dev->spec.channels[conf->channel->hw_value], + sizeof(libconf.rf)); + + memcpy(&libconf.channel, + &rt2x00dev->spec.channels_info[conf->channel->hw_value], + sizeof(libconf.channel)); + } + + /* + * Start configuration. + */ + rt2x00dev->ops->lib->config(rt2x00dev, &libconf, ieee80211_flags); + + /* + * Some configuration changes affect the link quality + * which means we need to reset the link tuner. + */ + if (ieee80211_flags & IEEE80211_CONF_CHANGE_CHANNEL) + rt2x00link_reset_tuner(rt2x00dev, false); + + rt2x00dev->curr_band = conf->channel->band; + rt2x00dev->tx_power = conf->power_level; + rt2x00dev->short_retry = conf->short_frame_max_tx_count; + rt2x00dev->long_retry = conf->long_frame_max_tx_count; + + rt2x00dev->rx_status.band = conf->channel->band; + rt2x00dev->rx_status.freq = conf->channel->center_freq; +} diff --git a/drivers/net/wireless/rt2x00/rt2x00crypto.c b/drivers/net/wireless/rt2x00/rt2x00crypto.c new file mode 100644 index 0000000..d291c78 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00crypto.c @@ -0,0 +1,258 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00lib + Abstract: rt2x00 crypto specific routines. + */ + +#include +#include + +#include "rt2x00.h" +#include "rt2x00lib.h" + +enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf *key) +{ + switch (key->alg) { + case ALG_WEP: + if (key->keylen == WLAN_KEY_LEN_WEP40) + return CIPHER_WEP64; + else + return CIPHER_WEP128; + case ALG_TKIP: + return CIPHER_TKIP; + case ALG_CCMP: + return CIPHER_AES; + default: + return CIPHER_NONE; + } +} + +void rt2x00crypto_create_tx_descriptor(struct queue_entry *entry, + struct txentry_desc *txdesc) +{ + struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); + struct ieee80211_key_conf *hw_key = tx_info->control.hw_key; + + if (!test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags) || !hw_key) + return; + + __set_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags); + + txdesc->cipher = rt2x00crypto_key_to_cipher(hw_key); + + if (hw_key->flags & IEEE80211_KEY_FLAG_PAIRWISE) + __set_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags); + + txdesc->key_idx = hw_key->hw_key_idx; + txdesc->iv_offset = txdesc->header_length; + txdesc->iv_len = hw_key->iv_len; + + if (!(hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV)) + __set_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags); + + if (!(hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) + __set_bit(ENTRY_TXD_ENCRYPT_MMIC, &txdesc->flags); +} + +unsigned int rt2x00crypto_tx_overhead(struct rt2x00_dev *rt2x00dev, + struct sk_buff *skb) +{ + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); + struct ieee80211_key_conf *key = tx_info->control.hw_key; + unsigned int overhead = 0; + + if (!test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags) || !key) + return overhead; + + /* + * Extend frame length to include IV/EIV/ICV/MMIC, + * note that these lengths should only be added when + * mac80211 does not generate it. + */ + overhead += key->icv_len; + + if (!(key->flags & IEEE80211_KEY_FLAG_GENERATE_IV)) + overhead += key->iv_len; + + if (!(key->flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) { + if (key->alg == ALG_TKIP) + overhead += 8; + } + + return overhead; +} + +void rt2x00crypto_tx_copy_iv(struct sk_buff *skb, struct txentry_desc *txdesc) +{ + struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); + + if (unlikely(!txdesc->iv_len)) + return; + + /* Copy IV/EIV data */ + memcpy(skbdesc->iv, skb->data + txdesc->iv_offset, txdesc->iv_len); +} + +void rt2x00crypto_tx_remove_iv(struct sk_buff *skb, struct txentry_desc *txdesc) +{ + struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); + + if (unlikely(!txdesc->iv_len)) + return; + + /* Copy IV/EIV data */ + memcpy(skbdesc->iv, skb->data + txdesc->iv_offset, txdesc->iv_len); + + /* Move ieee80211 header */ + memmove(skb->data + txdesc->iv_len, skb->data, txdesc->iv_offset); + + /* Pull buffer to correct size */ + skb_pull(skb, txdesc->iv_len); + + /* IV/EIV data has officially been stripped */ + skbdesc->flags |= SKBDESC_IV_STRIPPED; +} + +void rt2x00crypto_tx_insert_iv(struct sk_buff *skb, unsigned int header_length) +{ + struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); + const unsigned int iv_len = + ((!!(skbdesc->iv[0])) * 4) + ((!!(skbdesc->iv[1])) * 4); + + if (!(skbdesc->flags & SKBDESC_IV_STRIPPED)) + return; + + skb_push(skb, iv_len); + + /* Move ieee80211 header */ + memmove(skb->data, skb->data + iv_len, header_length); + + /* Copy IV/EIV data */ + memcpy(skb->data + header_length, skbdesc->iv, iv_len); + + /* IV/EIV data has returned into the frame */ + skbdesc->flags &= ~SKBDESC_IV_STRIPPED; +} + +void rt2x00crypto_rx_insert_iv(struct sk_buff *skb, + unsigned int header_length, + struct rxdone_entry_desc *rxdesc) +{ + unsigned int payload_len = rxdesc->size - header_length; + unsigned int align = ALIGN_SIZE(skb, header_length); + unsigned int iv_len; + unsigned int icv_len; + unsigned int transfer = 0; + + /* + * WEP64/WEP128: Provides IV & ICV + * TKIP: Provides IV/EIV & ICV + * AES: Provies IV/EIV & ICV + */ + switch (rxdesc->cipher) { + case CIPHER_WEP64: + case CIPHER_WEP128: + iv_len = 4; + icv_len = 4; + break; + case CIPHER_TKIP: + iv_len = 8; + icv_len = 4; + break; + case CIPHER_AES: + iv_len = 8; + icv_len = 8; + break; + default: + /* Unsupport type */ + return; + } + + /* + * Make room for new data. There are 2 possibilities + * either the alignment is already present between + * the 802.11 header and payload. In that case we + * we have to move the header less then the iv_len + * since we can use the already available l2pad bytes + * for the iv data. + * When the alignment must be added manually we must + * move the header more then iv_len since we must + * make room for the payload move as well. + */ + if (rxdesc->dev_flags & RXDONE_L2PAD) { + skb_push(skb, iv_len - align); + skb_put(skb, icv_len); + + /* Move ieee80211 header */ + memmove(skb->data + transfer, + skb->data + transfer + (iv_len - align), + header_length); + transfer += header_length; + } else { + skb_push(skb, iv_len + align); + if (align < icv_len) + skb_put(skb, icv_len - align); + else if (align > icv_len) + skb_trim(skb, rxdesc->size + iv_len + icv_len); + + /* Move ieee80211 header */ + memmove(skb->data + transfer, + skb->data + transfer + iv_len + align, + header_length); + transfer += header_length; + } + + /* Copy IV/EIV data */ + memcpy(skb->data + transfer, rxdesc->iv, iv_len); + transfer += iv_len; + + /* + * Move payload for alignment purposes. Note that + * this is only needed when no l2 padding is present. + */ + if (!(rxdesc->dev_flags & RXDONE_L2PAD)) { + memmove(skb->data + transfer, + skb->data + transfer + align, + payload_len); + } + + /* + * NOTE: Always count the payload as transfered, + * even when alignment was set to zero. This is required + * for determining the correct offset for the ICV data. + */ + transfer += payload_len; + + /* + * Copy ICV data + * AES appends 8 bytes, we can't fill the upper + * 4 bytes, but mac80211 doesn't care about what + * we provide here anyway and strips it immediately. + */ + memcpy(skb->data + transfer, &rxdesc->icv, 4); + transfer += icv_len; + + /* IV/EIV/ICV has been inserted into frame */ + rxdesc->size = transfer; + rxdesc->flags &= ~RX_FLAG_IV_STRIPPED; +} diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c new file mode 100644 index 0000000..28a1c46 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00debug.c @@ -0,0 +1,739 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00lib + Abstract: rt2x00 debugfs specific routines. + */ + +#include +#include +#include +#include +#include +#include + +#include "rt2x00.h" +#include "rt2x00lib.h" +#include "rt2x00dump.h" + +#define MAX_LINE_LENGTH 64 + +struct rt2x00debug_crypto { + unsigned long success; + unsigned long icv_error; + unsigned long mic_error; + unsigned long key_error; +}; + +struct rt2x00debug_intf { + /* + * Pointer to driver structure where + * this debugfs entry belongs to. + */ + struct rt2x00_dev *rt2x00dev; + + /* + * Reference to the rt2x00debug structure + * which can be used to communicate with + * the registers. + */ + const struct rt2x00debug *debug; + + /* + * Debugfs entries for: + * - driver folder + * - driver file + * - chipset file + * - device flags file + * - register folder + * - csr offset/value files + * - eeprom offset/value files + * - bbp offset/value files + * - rf offset/value files + * - queue folder + * - frame dump file + * - queue stats file + * - crypto stats file + */ + struct dentry *driver_folder; + struct dentry *driver_entry; + struct dentry *chipset_entry; + struct dentry *dev_flags; + struct dentry *register_folder; + struct dentry *csr_off_entry; + struct dentry *csr_val_entry; + struct dentry *eeprom_off_entry; + struct dentry *eeprom_val_entry; + struct dentry *bbp_off_entry; + struct dentry *bbp_val_entry; + struct dentry *rf_off_entry; + struct dentry *rf_val_entry; + struct dentry *queue_folder; + struct dentry *queue_frame_dump_entry; + struct dentry *queue_stats_entry; + struct dentry *crypto_stats_entry; + + /* + * The frame dump file only allows a single reader, + * so we need to store the current state here. + */ + unsigned long frame_dump_flags; +#define FRAME_DUMP_FILE_OPEN 1 + + /* + * We queue each frame before dumping it to the user, + * per read command we will pass a single skb structure + * so we should be prepared to queue multiple sk buffers + * before sending it to userspace. + */ + struct sk_buff_head frame_dump_skbqueue; + wait_queue_head_t frame_dump_waitqueue; + + /* + * HW crypto statistics. + * All statistics are stored separately per cipher type. + */ + struct rt2x00debug_crypto crypto_stats[CIPHER_MAX]; + + /* + * Driver and chipset files will use a data buffer + * that has been created in advance. This will simplify + * the code since we can use the debugfs functions. + */ + struct debugfs_blob_wrapper driver_blob; + struct debugfs_blob_wrapper chipset_blob; + + /* + * Requested offset for each register type. + */ + unsigned int offset_csr; + unsigned int offset_eeprom; + unsigned int offset_bbp; + unsigned int offset_rf; +}; + +void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev, + struct rxdone_entry_desc *rxdesc) +{ + struct rt2x00debug_intf *intf = rt2x00dev->debugfs_intf; + enum cipher cipher = rxdesc->cipher; + enum rx_crypto status = rxdesc->cipher_status; + + if (cipher == CIPHER_TKIP_NO_MIC) + cipher = CIPHER_TKIP; + if (cipher == CIPHER_NONE || cipher >= CIPHER_MAX) + return; + + /* Remove CIPHER_NONE index */ + cipher--; + + intf->crypto_stats[cipher].success += (status == RX_CRYPTO_SUCCESS); + intf->crypto_stats[cipher].icv_error += (status == RX_CRYPTO_FAIL_ICV); + intf->crypto_stats[cipher].mic_error += (status == RX_CRYPTO_FAIL_MIC); + intf->crypto_stats[cipher].key_error += (status == RX_CRYPTO_FAIL_KEY); +} + +void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev, + enum rt2x00_dump_type type, struct sk_buff *skb) +{ + struct rt2x00debug_intf *intf = rt2x00dev->debugfs_intf; + struct skb_frame_desc *desc = get_skb_frame_desc(skb); + struct sk_buff *skbcopy; + struct rt2x00dump_hdr *dump_hdr; + struct timeval timestamp; + + do_gettimeofday(×tamp); + + if (!test_bit(FRAME_DUMP_FILE_OPEN, &intf->frame_dump_flags)) + return; + + if (skb_queue_len(&intf->frame_dump_skbqueue) > 20) { + DEBUG(rt2x00dev, "txrx dump queue length exceeded.\n"); + return; + } + + skbcopy = alloc_skb(sizeof(*dump_hdr) + desc->desc_len + skb->len, + GFP_ATOMIC); + if (!skbcopy) { + DEBUG(rt2x00dev, "Failed to copy skb for dump.\n"); + return; + } + + dump_hdr = (struct rt2x00dump_hdr *)skb_put(skbcopy, sizeof(*dump_hdr)); + dump_hdr->version = cpu_to_le32(DUMP_HEADER_VERSION); + dump_hdr->header_length = cpu_to_le32(sizeof(*dump_hdr)); + dump_hdr->desc_length = cpu_to_le32(desc->desc_len); + dump_hdr->data_length = cpu_to_le32(skb->len); + dump_hdr->chip_rt = cpu_to_le16(rt2x00dev->chip.rt); + dump_hdr->chip_rf = cpu_to_le16(rt2x00dev->chip.rf); + dump_hdr->chip_rev = cpu_to_le16(rt2x00dev->chip.rev); + dump_hdr->type = cpu_to_le16(type); + dump_hdr->queue_index = desc->entry->queue->qid; + dump_hdr->entry_index = desc->entry->entry_idx; + dump_hdr->timestamp_sec = cpu_to_le32(timestamp.tv_sec); + dump_hdr->timestamp_usec = cpu_to_le32(timestamp.tv_usec); + + memcpy(skb_put(skbcopy, desc->desc_len), desc->desc, desc->desc_len); + memcpy(skb_put(skbcopy, skb->len), skb->data, skb->len); + + skb_queue_tail(&intf->frame_dump_skbqueue, skbcopy); + wake_up_interruptible(&intf->frame_dump_waitqueue); + + /* + * Verify that the file has not been closed while we were working. + */ + if (!test_bit(FRAME_DUMP_FILE_OPEN, &intf->frame_dump_flags)) + skb_queue_purge(&intf->frame_dump_skbqueue); +} + +static int rt2x00debug_file_open(struct inode *inode, struct file *file) +{ + struct rt2x00debug_intf *intf = inode->i_private; + + file->private_data = inode->i_private; + + if (!try_module_get(intf->debug->owner)) + return -EBUSY; + + return 0; +} + +static int rt2x00debug_file_release(struct inode *inode, struct file *file) +{ + struct rt2x00debug_intf *intf = file->private_data; + + module_put(intf->debug->owner); + + return 0; +} + +static int rt2x00debug_open_queue_dump(struct inode *inode, struct file *file) +{ + struct rt2x00debug_intf *intf = inode->i_private; + int retval; + + retval = rt2x00debug_file_open(inode, file); + if (retval) + return retval; + + if (test_and_set_bit(FRAME_DUMP_FILE_OPEN, &intf->frame_dump_flags)) { + rt2x00debug_file_release(inode, file); + return -EBUSY; + } + + return 0; +} + +static int rt2x00debug_release_queue_dump(struct inode *inode, struct file *file) +{ + struct rt2x00debug_intf *intf = inode->i_private; + + skb_queue_purge(&intf->frame_dump_skbqueue); + + clear_bit(FRAME_DUMP_FILE_OPEN, &intf->frame_dump_flags); + + return rt2x00debug_file_release(inode, file); +} + +static ssize_t rt2x00debug_read_queue_dump(struct file *file, + char __user *buf, + size_t length, + loff_t *offset) +{ + struct rt2x00debug_intf *intf = file->private_data; + struct sk_buff *skb; + size_t status; + int retval; + + if (file->f_flags & O_NONBLOCK) + return -EAGAIN; + + retval = + wait_event_interruptible(intf->frame_dump_waitqueue, + (skb = + skb_dequeue(&intf->frame_dump_skbqueue))); + if (retval) + return retval; + + status = min((size_t)skb->len, length); + if (copy_to_user(buf, skb->data, status)) { + status = -EFAULT; + goto exit; + } + + *offset += status; + +exit: + kfree_skb(skb); + + return status; +} + +static unsigned int rt2x00debug_poll_queue_dump(struct file *file, + poll_table *wait) +{ + struct rt2x00debug_intf *intf = file->private_data; + + poll_wait(file, &intf->frame_dump_waitqueue, wait); + + if (!skb_queue_empty(&intf->frame_dump_skbqueue)) + return POLLOUT | POLLWRNORM; + + return 0; +} + +static const struct file_operations rt2x00debug_fop_queue_dump = { + .owner = THIS_MODULE, + .read = rt2x00debug_read_queue_dump, + .poll = rt2x00debug_poll_queue_dump, + .open = rt2x00debug_open_queue_dump, + .release = rt2x00debug_release_queue_dump, +}; + +static ssize_t rt2x00debug_read_queue_stats(struct file *file, + char __user *buf, + size_t length, + loff_t *offset) +{ + struct rt2x00debug_intf *intf = file->private_data; + struct data_queue *queue; + unsigned long irqflags; + unsigned int lines = 1 + intf->rt2x00dev->data_queues; + size_t size; + char *data; + char *temp; + + if (*offset) + return 0; + + data = kzalloc(lines * MAX_LINE_LENGTH, GFP_KERNEL); + if (!data) + return -ENOMEM; + + temp = data + + sprintf(data, "qid\tcount\tlimit\tlength\tindex\tdone\tcrypto\n"); + + queue_for_each(intf->rt2x00dev, queue) { + spin_lock_irqsave(&queue->lock, irqflags); + + temp += sprintf(temp, "%d\t%d\t%d\t%d\t%d\t%d\t%d\n", queue->qid, + queue->count, queue->limit, queue->length, + queue->index[Q_INDEX], + queue->index[Q_INDEX_DONE], + queue->index[Q_INDEX_CRYPTO]); + + spin_unlock_irqrestore(&queue->lock, irqflags); + } + + size = strlen(data); + size = min(size, length); + + if (copy_to_user(buf, data, size)) { + kfree(data); + return -EFAULT; + } + + kfree(data); + + *offset += size; + return size; +} + +static const struct file_operations rt2x00debug_fop_queue_stats = { + .owner = THIS_MODULE, + .read = rt2x00debug_read_queue_stats, + .open = rt2x00debug_file_open, + .release = rt2x00debug_file_release, +}; + +#ifdef CONFIG_RT2X00_LIB_CRYPTO +static ssize_t rt2x00debug_read_crypto_stats(struct file *file, + char __user *buf, + size_t length, + loff_t *offset) +{ + struct rt2x00debug_intf *intf = file->private_data; + char *name[] = { "WEP64", "WEP128", "TKIP", "AES" }; + char *data; + char *temp; + size_t size; + unsigned int i; + + if (*offset) + return 0; + + data = kzalloc((1 + CIPHER_MAX) * MAX_LINE_LENGTH, GFP_KERNEL); + if (!data) + return -ENOMEM; + + temp = data; + temp += sprintf(data, "cipher\tsuccess\ticv err\tmic err\tkey err\n"); + + for (i = 0; i < CIPHER_MAX; i++) { + temp += sprintf(temp, "%s\t%lu\t%lu\t%lu\t%lu\n", name[i], + intf->crypto_stats[i].success, + intf->crypto_stats[i].icv_error, + intf->crypto_stats[i].mic_error, + intf->crypto_stats[i].key_error); + } + + size = strlen(data); + size = min(size, length); + + if (copy_to_user(buf, data, size)) { + kfree(data); + return -EFAULT; + } + + kfree(data); + + *offset += size; + return size; +} + +static const struct file_operations rt2x00debug_fop_crypto_stats = { + .owner = THIS_MODULE, + .read = rt2x00debug_read_crypto_stats, + .open = rt2x00debug_file_open, + .release = rt2x00debug_file_release, +}; +#endif + +#define RT2X00DEBUGFS_OPS_READ(__name, __format, __type) \ +static ssize_t rt2x00debug_read_##__name(struct file *file, \ + char __user *buf, \ + size_t length, \ + loff_t *offset) \ +{ \ + struct rt2x00debug_intf *intf = file->private_data; \ + const struct rt2x00debug *debug = intf->debug; \ + char line[16]; \ + size_t size; \ + unsigned int index = intf->offset_##__name; \ + __type value; \ + \ + if (*offset) \ + return 0; \ + \ + if (index >= debug->__name.word_count) \ + return -EINVAL; \ + \ + index += (debug->__name.word_base / \ + debug->__name.word_size); \ + \ + if (debug->__name.flags & RT2X00DEBUGFS_OFFSET) \ + index *= debug->__name.word_size; \ + \ + debug->__name.read(intf->rt2x00dev, index, &value); \ + \ + size = sprintf(line, __format, value); \ + \ + if (copy_to_user(buf, line, size)) \ + return -EFAULT; \ + \ + *offset += size; \ + return size; \ +} + +#define RT2X00DEBUGFS_OPS_WRITE(__name, __type) \ +static ssize_t rt2x00debug_write_##__name(struct file *file, \ + const char __user *buf,\ + size_t length, \ + loff_t *offset) \ +{ \ + struct rt2x00debug_intf *intf = file->private_data; \ + const struct rt2x00debug *debug = intf->debug; \ + char line[16]; \ + size_t size; \ + unsigned int index = intf->offset_##__name; \ + __type value; \ + \ + if (*offset) \ + return 0; \ + \ + if (index >= debug->__name.word_count) \ + return -EINVAL; \ + \ + if (copy_from_user(line, buf, length)) \ + return -EFAULT; \ + \ + size = strlen(line); \ + value = simple_strtoul(line, NULL, 0); \ + \ + index += (debug->__name.word_base / \ + debug->__name.word_size); \ + \ + if (debug->__name.flags & RT2X00DEBUGFS_OFFSET) \ + index *= debug->__name.word_size; \ + \ + debug->__name.write(intf->rt2x00dev, index, value); \ + \ + *offset += size; \ + return size; \ +} + +#define RT2X00DEBUGFS_OPS(__name, __format, __type) \ +RT2X00DEBUGFS_OPS_READ(__name, __format, __type); \ +RT2X00DEBUGFS_OPS_WRITE(__name, __type); \ + \ +static const struct file_operations rt2x00debug_fop_##__name = {\ + .owner = THIS_MODULE, \ + .read = rt2x00debug_read_##__name, \ + .write = rt2x00debug_write_##__name, \ + .open = rt2x00debug_file_open, \ + .release = rt2x00debug_file_release, \ +}; + +RT2X00DEBUGFS_OPS(csr, "0x%.8x\n", u32); +RT2X00DEBUGFS_OPS(eeprom, "0x%.4x\n", u16); +RT2X00DEBUGFS_OPS(bbp, "0x%.2x\n", u8); +RT2X00DEBUGFS_OPS(rf, "0x%.8x\n", u32); + +static ssize_t rt2x00debug_read_dev_flags(struct file *file, + char __user *buf, + size_t length, + loff_t *offset) +{ + struct rt2x00debug_intf *intf = file->private_data; + char line[16]; + size_t size; + + if (*offset) + return 0; + + size = sprintf(line, "0x%.8x\n", (unsigned int)intf->rt2x00dev->flags); + + if (copy_to_user(buf, line, size)) + return -EFAULT; + + *offset += size; + return size; +} + +static const struct file_operations rt2x00debug_fop_dev_flags = { + .owner = THIS_MODULE, + .read = rt2x00debug_read_dev_flags, + .open = rt2x00debug_file_open, + .release = rt2x00debug_file_release, +}; + +static struct dentry *rt2x00debug_create_file_driver(const char *name, + struct rt2x00debug_intf + *intf, + struct debugfs_blob_wrapper + *blob) +{ + char *data; + + data = kzalloc(3 * MAX_LINE_LENGTH, GFP_KERNEL); + if (!data) + return NULL; + + blob->data = data; + data += sprintf(data, "driver:\t%s\n", intf->rt2x00dev->ops->name); + data += sprintf(data, "version:\t%s\n", DRV_VERSION); + data += sprintf(data, "compiled:\t%s %s\n", __DATE__, __TIME__); + blob->size = strlen(blob->data); + + return debugfs_create_blob(name, S_IRUSR, intf->driver_folder, blob); +} + +static struct dentry *rt2x00debug_create_file_chipset(const char *name, + struct rt2x00debug_intf + *intf, + struct + debugfs_blob_wrapper + *blob) +{ + const struct rt2x00debug *debug = intf->debug; + char *data; + + data = kzalloc(8 * MAX_LINE_LENGTH, GFP_KERNEL); + if (!data) + return NULL; + + blob->data = data; + data += sprintf(data, "rt chip:\t%04x\n", intf->rt2x00dev->chip.rt); + data += sprintf(data, "rf chip:\t%04x\n", intf->rt2x00dev->chip.rf); + data += sprintf(data, "revision:\t%04x\n", intf->rt2x00dev->chip.rev); + data += sprintf(data, "\n"); + data += sprintf(data, "register\tbase\twords\twordsize\n"); + data += sprintf(data, "csr\t%d\t%d\t%d\n", + debug->csr.word_base, + debug->csr.word_count, + debug->csr.word_size); + data += sprintf(data, "eeprom\t%d\t%d\t%d\n", + debug->eeprom.word_base, + debug->eeprom.word_count, + debug->eeprom.word_size); + data += sprintf(data, "bbp\t%d\t%d\t%d\n", + debug->bbp.word_base, + debug->bbp.word_count, + debug->bbp.word_size); + data += sprintf(data, "rf\t%d\t%d\t%d\n", + debug->rf.word_base, + debug->rf.word_count, + debug->rf.word_size); + blob->size = strlen(blob->data); + + return debugfs_create_blob(name, S_IRUSR, intf->driver_folder, blob); +} + +void rt2x00debug_register(struct rt2x00_dev *rt2x00dev) +{ + const struct rt2x00debug *debug = rt2x00dev->ops->debugfs; + struct rt2x00debug_intf *intf; + + intf = kzalloc(sizeof(struct rt2x00debug_intf), GFP_KERNEL); + if (!intf) { + ERROR(rt2x00dev, "Failed to allocate debug handler.\n"); + return; + } + + intf->debug = debug; + intf->rt2x00dev = rt2x00dev; + rt2x00dev->debugfs_intf = intf; + + intf->driver_folder = + debugfs_create_dir(intf->rt2x00dev->ops->name, + rt2x00dev->hw->wiphy->debugfsdir); + if (IS_ERR(intf->driver_folder) || !intf->driver_folder) + goto exit; + + intf->driver_entry = + rt2x00debug_create_file_driver("driver", intf, &intf->driver_blob); + if (IS_ERR(intf->driver_entry) || !intf->driver_entry) + goto exit; + + intf->chipset_entry = + rt2x00debug_create_file_chipset("chipset", + intf, &intf->chipset_blob); + if (IS_ERR(intf->chipset_entry) || !intf->chipset_entry) + goto exit; + + intf->dev_flags = debugfs_create_file("dev_flags", S_IRUSR, + intf->driver_folder, intf, + &rt2x00debug_fop_dev_flags); + if (IS_ERR(intf->dev_flags) || !intf->dev_flags) + goto exit; + + intf->register_folder = + debugfs_create_dir("register", intf->driver_folder); + if (IS_ERR(intf->register_folder) || !intf->register_folder) + goto exit; + +#define RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(__intf, __name) \ +({ \ + (__intf)->__name##_off_entry = \ + debugfs_create_u32(__stringify(__name) "_offset", \ + S_IRUSR | S_IWUSR, \ + (__intf)->register_folder, \ + &(__intf)->offset_##__name); \ + if (IS_ERR((__intf)->__name##_off_entry) \ + || !(__intf)->__name##_off_entry) \ + goto exit; \ + \ + (__intf)->__name##_val_entry = \ + debugfs_create_file(__stringify(__name) "_value", \ + S_IRUSR | S_IWUSR, \ + (__intf)->register_folder, \ + (__intf), &rt2x00debug_fop_##__name);\ + if (IS_ERR((__intf)->__name##_val_entry) \ + || !(__intf)->__name##_val_entry) \ + goto exit; \ +}) + + RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(intf, csr); + RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(intf, eeprom); + RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(intf, bbp); + RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(intf, rf); + +#undef RT2X00DEBUGFS_CREATE_REGISTER_ENTRY + + intf->queue_folder = + debugfs_create_dir("queue", intf->driver_folder); + if (IS_ERR(intf->queue_folder) || !intf->queue_folder) + goto exit; + + intf->queue_frame_dump_entry = + debugfs_create_file("dump", S_IRUSR, intf->queue_folder, + intf, &rt2x00debug_fop_queue_dump); + if (IS_ERR(intf->queue_frame_dump_entry) + || !intf->queue_frame_dump_entry) + goto exit; + + skb_queue_head_init(&intf->frame_dump_skbqueue); + init_waitqueue_head(&intf->frame_dump_waitqueue); + + intf->queue_stats_entry = + debugfs_create_file("queue", S_IRUSR, intf->queue_folder, + intf, &rt2x00debug_fop_queue_stats); + +#ifdef CONFIG_RT2X00_LIB_CRYPTO + if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) + intf->crypto_stats_entry = + debugfs_create_file("crypto", S_IRUGO, intf->queue_folder, + intf, &rt2x00debug_fop_crypto_stats); +#endif + + return; + +exit: + rt2x00debug_deregister(rt2x00dev); + ERROR(rt2x00dev, "Failed to register debug handler.\n"); + + return; +} + +void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev) +{ + struct rt2x00debug_intf *intf = rt2x00dev->debugfs_intf; + + if (unlikely(!intf)) + return; + + skb_queue_purge(&intf->frame_dump_skbqueue); + +#ifdef CONFIG_RT2X00_LIB_CRYPTO + debugfs_remove(intf->crypto_stats_entry); +#endif + debugfs_remove(intf->queue_stats_entry); + debugfs_remove(intf->queue_frame_dump_entry); + debugfs_remove(intf->queue_folder); + debugfs_remove(intf->rf_val_entry); + debugfs_remove(intf->rf_off_entry); + debugfs_remove(intf->bbp_val_entry); + debugfs_remove(intf->bbp_off_entry); + debugfs_remove(intf->eeprom_val_entry); + debugfs_remove(intf->eeprom_off_entry); + debugfs_remove(intf->csr_val_entry); + debugfs_remove(intf->csr_off_entry); + debugfs_remove(intf->register_folder); + debugfs_remove(intf->dev_flags); + debugfs_remove(intf->chipset_entry); + debugfs_remove(intf->driver_entry); + debugfs_remove(intf->driver_folder); + kfree(intf->chipset_blob.data); + kfree(intf->driver_blob.data); + kfree(intf); + + rt2x00dev->debugfs_intf = NULL; +} diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.h b/drivers/net/wireless/rt2x00/rt2x00debug.h new file mode 100644 index 0000000..fa11409 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00debug.h @@ -0,0 +1,70 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00debug + Abstract: Data structures for the rt2x00debug. + */ + +#ifndef RT2X00DEBUG_H +#define RT2X00DEBUG_H + +struct rt2x00_dev; + +/** + * enum rt2x00debugfs_entry_flags: Flags for debugfs registry entry + * + * @RT2X00DEBUGFS_OFFSET: rt2x00lib should pass the register offset + * as argument when using the callback function read()/write() + */ +enum rt2x00debugfs_entry_flags { + RT2X00DEBUGFS_OFFSET = (1 << 0), +}; + +#define RT2X00DEBUGFS_REGISTER_ENTRY(__name, __type) \ +struct reg##__name { \ + void (*read)(struct rt2x00_dev *rt2x00dev, \ + const unsigned int word, __type *data); \ + void (*write)(struct rt2x00_dev *rt2x00dev, \ + const unsigned int word, __type data); \ + \ + unsigned int flags; \ + \ + unsigned int word_base; \ + unsigned int word_size; \ + unsigned int word_count; \ +} __name + +struct rt2x00debug { + /* + * Reference to the modules structure. + */ + struct module *owner; + + /* + * Register access entries. + */ + RT2X00DEBUGFS_REGISTER_ENTRY(csr, u32); + RT2X00DEBUGFS_REGISTER_ENTRY(eeprom, u16); + RT2X00DEBUGFS_REGISTER_ENTRY(bbp, u8); + RT2X00DEBUGFS_REGISTER_ENTRY(rf, u32); +}; + +#endif /* RT2X00DEBUG_H */ diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c new file mode 100644 index 0000000..dd5ab8f --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c @@ -0,0 +1,1014 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00lib + Abstract: rt2x00 generic device routines. + */ + +#include +#include + +#include "rt2x00.h" +#include "rt2x00lib.h" + +/* + * Radio control handlers. + */ +int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev) +{ + int status; + + /* + * Don't enable the radio twice. + * And check if the hardware button has been disabled. + */ + if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) + return 0; + + /* + * Initialize all data queues. + */ + rt2x00queue_init_queues(rt2x00dev); + + /* + * Enable radio. + */ + status = + rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_ON); + if (status) + return status; + + rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_IRQ_ON); + + rt2x00leds_led_radio(rt2x00dev, true); + rt2x00led_led_activity(rt2x00dev, true); + + set_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags); + + /* + * Enable RX. + */ + rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON); + + /* + * Start the TX queues. + */ + ieee80211_wake_queues(rt2x00dev->hw); + + return 0; +} + +void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev) +{ + if (!test_and_clear_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) + return; + + /* + * Stop the TX queues in mac80211. + */ + ieee80211_stop_queues(rt2x00dev->hw); + rt2x00queue_stop_queues(rt2x00dev); + + /* + * Disable RX. + */ + rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF); + + /* + * Disable radio. + */ + rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_OFF); + rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_IRQ_OFF); + rt2x00led_led_activity(rt2x00dev, false); + rt2x00leds_led_radio(rt2x00dev, false); +} + +void rt2x00lib_toggle_rx(struct rt2x00_dev *rt2x00dev, enum dev_state state) +{ + /* + * When we are disabling the RX, we should also stop the link tuner. + */ + if (state == STATE_RADIO_RX_OFF) + rt2x00link_stop_tuner(rt2x00dev); + + rt2x00dev->ops->lib->set_device_state(rt2x00dev, state); + + /* + * When we are enabling the RX, we should also start the link tuner. + */ + if (state == STATE_RADIO_RX_ON) + rt2x00link_start_tuner(rt2x00dev); +} + +static void rt2x00lib_intf_scheduled_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct rt2x00_dev *rt2x00dev = data; + struct rt2x00_intf *intf = vif_to_intf(vif); + int delayed_flags; + + /* + * Copy all data we need during this action under the protection + * of a spinlock. Otherwise race conditions might occur which results + * into an invalid configuration. + */ + spin_lock(&intf->lock); + + delayed_flags = intf->delayed_flags; + intf->delayed_flags = 0; + + spin_unlock(&intf->lock); + + /* + * It is possible the radio was disabled while the work had been + * scheduled. If that happens we should return here immediately, + * note that in the spinlock protected area above the delayed_flags + * have been cleared correctly. + */ + if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) + return; + + if (delayed_flags & DELAYED_UPDATE_BEACON) + rt2x00queue_update_beacon(rt2x00dev, vif, true); +} + +static void rt2x00lib_intf_scheduled(struct work_struct *work) +{ + struct rt2x00_dev *rt2x00dev = + container_of(work, struct rt2x00_dev, intf_work); + + /* + * Iterate over each interface and perform the + * requested configurations. + */ + ieee80211_iterate_active_interfaces(rt2x00dev->hw, + rt2x00lib_intf_scheduled_iter, + rt2x00dev); +} + +/* + * Interrupt context handlers. + */ +static void rt2x00lib_beacondone_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct rt2x00_intf *intf = vif_to_intf(vif); + + if (vif->type != NL80211_IFTYPE_AP && + vif->type != NL80211_IFTYPE_ADHOC && + vif->type != NL80211_IFTYPE_MESH_POINT && + vif->type != NL80211_IFTYPE_WDS) + return; + + spin_lock(&intf->lock); + intf->delayed_flags |= DELAYED_UPDATE_BEACON; + spin_unlock(&intf->lock); +} + +void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev) +{ + if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) + return; + + ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw, + rt2x00lib_beacondone_iter, + rt2x00dev); + + ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->intf_work); +} +EXPORT_SYMBOL_GPL(rt2x00lib_beacondone); + +void rt2x00lib_txdone(struct queue_entry *entry, + struct txdone_entry_desc *txdesc) +{ + struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); + struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); + enum data_queue_qid qid = skb_get_queue_mapping(entry->skb); + unsigned int header_length = ieee80211_get_hdrlen_from_skb(entry->skb); + u8 rate_idx, rate_flags, retry_rates; + u8 skbdesc_flags = skbdesc->flags; + unsigned int i; + bool success; + + /* + * Unmap the skb. + */ + rt2x00queue_unmap_skb(rt2x00dev, entry->skb); + + /* + * Remove L2 padding which was added during + */ + if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags)) + rt2x00queue_remove_l2pad(entry->skb, header_length); + + /* + * If the IV/EIV data was stripped from the frame before it was + * passed to the hardware, we should now reinsert it again because + * mac80211 will expect the the same data to be present it the + * frame as it was passed to us. + */ + if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) + rt2x00crypto_tx_insert_iv(entry->skb, header_length); + + /* + * Send frame to debugfs immediately, after this call is completed + * we are going to overwrite the skb->cb array. + */ + rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TXDONE, entry->skb); + + /* + * Determine if the frame has been successfully transmitted. + */ + success = + test_bit(TXDONE_SUCCESS, &txdesc->flags) || + test_bit(TXDONE_UNKNOWN, &txdesc->flags) || + test_bit(TXDONE_FALLBACK, &txdesc->flags); + + /* + * Update TX statistics. + */ + rt2x00dev->link.qual.tx_success += success; + rt2x00dev->link.qual.tx_failed += !success; + + rate_idx = skbdesc->tx_rate_idx; + rate_flags = skbdesc->tx_rate_flags; + retry_rates = test_bit(TXDONE_FALLBACK, &txdesc->flags) ? + (txdesc->retry + 1) : 1; + + /* + * Initialize TX status + */ + memset(&tx_info->status, 0, sizeof(tx_info->status)); + tx_info->status.ack_signal = 0; + + /* + * Frame was send with retries, hardware tried + * different rates to send out the frame, at each + * retry it lowered the rate 1 step. + */ + for (i = 0; i < retry_rates && i < IEEE80211_TX_MAX_RATES; i++) { + tx_info->status.rates[i].idx = rate_idx - i; + tx_info->status.rates[i].flags = rate_flags; + tx_info->status.rates[i].count = 1; + } + if (i < (IEEE80211_TX_MAX_RATES - 1)) + tx_info->status.rates[i].idx = -1; /* terminate */ + + if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) { + if (success) + tx_info->flags |= IEEE80211_TX_STAT_ACK; + else + rt2x00dev->low_level_stats.dot11ACKFailureCount++; + } + + if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) { + if (success) + rt2x00dev->low_level_stats.dot11RTSSuccessCount++; + else + rt2x00dev->low_level_stats.dot11RTSFailureCount++; + } + + /* + * Only send the status report to mac80211 when it's a frame + * that originated in mac80211. If this was a extra frame coming + * through a mac80211 library call (RTS/CTS) then we should not + * send the status report back. + */ + if (!(skbdesc_flags & SKBDESC_NOT_MAC80211)) + ieee80211_tx_status_irqsafe(rt2x00dev->hw, entry->skb); + else + dev_kfree_skb_irq(entry->skb); + + /* + * Make this entry available for reuse. + */ + entry->skb = NULL; + entry->flags = 0; + + rt2x00dev->ops->lib->clear_entry(entry); + + clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); + rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE); + + /* + * If the data queue was below the threshold before the txdone + * handler we must make sure the packet queue in the mac80211 stack + * is reenabled when the txdone handler has finished. + */ + if (!rt2x00queue_threshold(entry->queue)) + ieee80211_wake_queue(rt2x00dev->hw, qid); +} +EXPORT_SYMBOL_GPL(rt2x00lib_txdone); + +static int rt2x00lib_rxdone_read_signal(struct rt2x00_dev *rt2x00dev, + struct rxdone_entry_desc *rxdesc) +{ + struct ieee80211_supported_band *sband; + const struct rt2x00_rate *rate; + unsigned int i; + int signal; + int type; + + /* + * For non-HT rates the MCS value needs to contain the + * actually used rate modulation (CCK or OFDM). + */ + if (rxdesc->dev_flags & RXDONE_SIGNAL_MCS) + signal = RATE_MCS(rxdesc->rate_mode, rxdesc->signal); + else + signal = rxdesc->signal; + + type = (rxdesc->dev_flags & RXDONE_SIGNAL_MASK); + + sband = &rt2x00dev->bands[rt2x00dev->curr_band]; + for (i = 0; i < sband->n_bitrates; i++) { + rate = rt2x00_get_rate(sband->bitrates[i].hw_value); + + if (((type == RXDONE_SIGNAL_PLCP) && + (rate->plcp == signal)) || + ((type == RXDONE_SIGNAL_BITRATE) && + (rate->bitrate == signal)) || + ((type == RXDONE_SIGNAL_MCS) && + (rate->mcs == signal))) { + return i; + } + } + + WARNING(rt2x00dev, "Frame received with unrecognized signal, " + "signal=0x%.4x, type=%d.\n", signal, type); + return 0; +} + +void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev, + struct queue_entry *entry) +{ + struct rxdone_entry_desc rxdesc; + struct sk_buff *skb; + struct ieee80211_rx_status *rx_status = &rt2x00dev->rx_status; + unsigned int header_length; + int rate_idx; + /* + * Allocate a new sk_buffer. If no new buffer available, drop the + * received frame and reuse the existing buffer. + */ + skb = rt2x00queue_alloc_rxskb(rt2x00dev, entry); + if (!skb) + return; + + /* + * Unmap the skb. + */ + rt2x00queue_unmap_skb(rt2x00dev, entry->skb); + + /* + * Extract the RXD details. + */ + memset(&rxdesc, 0, sizeof(rxdesc)); + rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc); + + /* + * The data behind the ieee80211 header must be + * aligned on a 4 byte boundary. + */ + header_length = ieee80211_get_hdrlen_from_skb(entry->skb); + + /* + * Hardware might have stripped the IV/EIV/ICV data, + * in that case it is possible that the data was + * provided separately (through hardware descriptor) + * in which case we should reinsert the data into the frame. + */ + if ((rxdesc.dev_flags & RXDONE_CRYPTO_IV) && + (rxdesc.flags & RX_FLAG_IV_STRIPPED)) + rt2x00crypto_rx_insert_iv(entry->skb, header_length, + &rxdesc); + else if (header_length && + (rxdesc.size > header_length) && + (rxdesc.dev_flags & RXDONE_L2PAD)) + rt2x00queue_remove_l2pad(entry->skb, header_length); + else + rt2x00queue_align_payload(entry->skb, header_length); + + /* Trim buffer to correct size */ + skb_trim(entry->skb, rxdesc.size); + + /* + * Check if the frame was received using HT. In that case, + * the rate is the MCS index and should be passed to mac80211 + * directly. Otherwise we need to translate the signal to + * the correct bitrate index. + */ + if (rxdesc.rate_mode == RATE_MODE_CCK || + rxdesc.rate_mode == RATE_MODE_OFDM) { + rate_idx = rt2x00lib_rxdone_read_signal(rt2x00dev, &rxdesc); + } else { + rxdesc.flags |= RX_FLAG_HT; + rate_idx = rxdesc.signal; + } + + /* + * Update extra components + */ + rt2x00link_update_stats(rt2x00dev, entry->skb, &rxdesc); + rt2x00debug_update_crypto(rt2x00dev, &rxdesc); + + rx_status->mactime = rxdesc.timestamp; + rx_status->rate_idx = rate_idx; + rx_status->signal = rxdesc.rssi; + rx_status->noise = rxdesc.noise; + rx_status->flag = rxdesc.flags; + rx_status->antenna = rt2x00dev->link.ant.active.rx; + + /* + * Send frame to mac80211 & debugfs. + * mac80211 will clean up the skb structure. + */ + rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_RXDONE, entry->skb); + memcpy(IEEE80211_SKB_RXCB(entry->skb), rx_status, sizeof(*rx_status)); + ieee80211_rx_irqsafe(rt2x00dev->hw, entry->skb); + + /* + * Replace the skb with the freshly allocated one. + */ + entry->skb = skb; + entry->flags = 0; + + rt2x00dev->ops->lib->clear_entry(entry); + + rt2x00queue_index_inc(entry->queue, Q_INDEX); +} +EXPORT_SYMBOL_GPL(rt2x00lib_rxdone); + +/* + * Driver initialization handlers. + */ +const struct rt2x00_rate rt2x00_supported_rates[12] = { + { + .flags = DEV_RATE_CCK, + .bitrate = 10, + .ratemask = BIT(0), + .plcp = 0x00, + .mcs = RATE_MCS(RATE_MODE_CCK, 0), + }, + { + .flags = DEV_RATE_CCK | DEV_RATE_SHORT_PREAMBLE, + .bitrate = 20, + .ratemask = BIT(1), + .plcp = 0x01, + .mcs = RATE_MCS(RATE_MODE_CCK, 1), + }, + { + .flags = DEV_RATE_CCK | DEV_RATE_SHORT_PREAMBLE, + .bitrate = 55, + .ratemask = BIT(2), + .plcp = 0x02, + .mcs = RATE_MCS(RATE_MODE_CCK, 2), + }, + { + .flags = DEV_RATE_CCK | DEV_RATE_SHORT_PREAMBLE, + .bitrate = 110, + .ratemask = BIT(3), + .plcp = 0x03, + .mcs = RATE_MCS(RATE_MODE_CCK, 3), + }, + { + .flags = DEV_RATE_OFDM, + .bitrate = 60, + .ratemask = BIT(4), + .plcp = 0x0b, + .mcs = RATE_MCS(RATE_MODE_OFDM, 0), + }, + { + .flags = DEV_RATE_OFDM, + .bitrate = 90, + .ratemask = BIT(5), + .plcp = 0x0f, + .mcs = RATE_MCS(RATE_MODE_OFDM, 1), + }, + { + .flags = DEV_RATE_OFDM, + .bitrate = 120, + .ratemask = BIT(6), + .plcp = 0x0a, + .mcs = RATE_MCS(RATE_MODE_OFDM, 2), + }, + { + .flags = DEV_RATE_OFDM, + .bitrate = 180, + .ratemask = BIT(7), + .plcp = 0x0e, + .mcs = RATE_MCS(RATE_MODE_OFDM, 3), + }, + { + .flags = DEV_RATE_OFDM, + .bitrate = 240, + .ratemask = BIT(8), + .plcp = 0x09, + .mcs = RATE_MCS(RATE_MODE_OFDM, 4), + }, + { + .flags = DEV_RATE_OFDM, + .bitrate = 360, + .ratemask = BIT(9), + .plcp = 0x0d, + .mcs = RATE_MCS(RATE_MODE_OFDM, 5), + }, + { + .flags = DEV_RATE_OFDM, + .bitrate = 480, + .ratemask = BIT(10), + .plcp = 0x08, + .mcs = RATE_MCS(RATE_MODE_OFDM, 6), + }, + { + .flags = DEV_RATE_OFDM, + .bitrate = 540, + .ratemask = BIT(11), + .plcp = 0x0c, + .mcs = RATE_MCS(RATE_MODE_OFDM, 7), + }, +}; + +static void rt2x00lib_channel(struct ieee80211_channel *entry, + const int channel, const int tx_power, + const int value) +{ + entry->center_freq = ieee80211_channel_to_frequency(channel); + entry->hw_value = value; + entry->max_power = tx_power; + entry->max_antenna_gain = 0xff; +} + +static void rt2x00lib_rate(struct ieee80211_rate *entry, + const u16 index, const struct rt2x00_rate *rate) +{ + entry->flags = 0; + entry->bitrate = rate->bitrate; + entry->hw_value =index; + entry->hw_value_short = index; + + if (rate->flags & DEV_RATE_SHORT_PREAMBLE) + entry->flags |= IEEE80211_RATE_SHORT_PREAMBLE; +} + +static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev, + struct hw_mode_spec *spec) +{ + struct ieee80211_hw *hw = rt2x00dev->hw; + struct ieee80211_channel *channels; + struct ieee80211_rate *rates; + unsigned int num_rates; + unsigned int i; + + num_rates = 0; + if (spec->supported_rates & SUPPORT_RATE_CCK) + num_rates += 4; + if (spec->supported_rates & SUPPORT_RATE_OFDM) + num_rates += 8; + + channels = kzalloc(sizeof(*channels) * spec->num_channels, GFP_KERNEL); + if (!channels) + return -ENOMEM; + + rates = kzalloc(sizeof(*rates) * num_rates, GFP_KERNEL); + if (!rates) + goto exit_free_channels; + + /* + * Initialize Rate list. + */ + for (i = 0; i < num_rates; i++) + rt2x00lib_rate(&rates[i], i, rt2x00_get_rate(i)); + + /* + * Initialize Channel list. + */ + for (i = 0; i < spec->num_channels; i++) { + rt2x00lib_channel(&channels[i], + spec->channels[i].channel, + spec->channels_info[i].tx_power1, i); + } + + /* + * Intitialize 802.11b, 802.11g + * Rates: CCK, OFDM. + * Channels: 2.4 GHz + */ + if (spec->supported_bands & SUPPORT_BAND_2GHZ) { + rt2x00dev->bands[IEEE80211_BAND_2GHZ].n_channels = 14; + rt2x00dev->bands[IEEE80211_BAND_2GHZ].n_bitrates = num_rates; + rt2x00dev->bands[IEEE80211_BAND_2GHZ].channels = channels; + rt2x00dev->bands[IEEE80211_BAND_2GHZ].bitrates = rates; + hw->wiphy->bands[IEEE80211_BAND_2GHZ] = + &rt2x00dev->bands[IEEE80211_BAND_2GHZ]; + memcpy(&rt2x00dev->bands[IEEE80211_BAND_2GHZ].ht_cap, + &spec->ht, sizeof(spec->ht)); + } + + /* + * Intitialize 802.11a + * Rates: OFDM. + * Channels: OFDM, UNII, HiperLAN2. + */ + if (spec->supported_bands & SUPPORT_BAND_5GHZ) { + rt2x00dev->bands[IEEE80211_BAND_5GHZ].n_channels = + spec->num_channels - 14; + rt2x00dev->bands[IEEE80211_BAND_5GHZ].n_bitrates = + num_rates - 4; + rt2x00dev->bands[IEEE80211_BAND_5GHZ].channels = &channels[14]; + rt2x00dev->bands[IEEE80211_BAND_5GHZ].bitrates = &rates[4]; + hw->wiphy->bands[IEEE80211_BAND_5GHZ] = + &rt2x00dev->bands[IEEE80211_BAND_5GHZ]; + memcpy(&rt2x00dev->bands[IEEE80211_BAND_5GHZ].ht_cap, + &spec->ht, sizeof(spec->ht)); + } + + return 0; + + exit_free_channels: + kfree(channels); + ERROR(rt2x00dev, "Allocation ieee80211 modes failed.\n"); + return -ENOMEM; +} + +static void rt2x00lib_remove_hw(struct rt2x00_dev *rt2x00dev) +{ + if (test_bit(DEVICE_STATE_REGISTERED_HW, &rt2x00dev->flags)) + ieee80211_unregister_hw(rt2x00dev->hw); + + if (likely(rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ])) { + kfree(rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels); + kfree(rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->bitrates); + rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = NULL; + rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL; + } + + kfree(rt2x00dev->spec.channels_info); +} + +static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev) +{ + struct hw_mode_spec *spec = &rt2x00dev->spec; + int status; + + if (test_bit(DEVICE_STATE_REGISTERED_HW, &rt2x00dev->flags)) + return 0; + + /* + * Initialize HW modes. + */ + status = rt2x00lib_probe_hw_modes(rt2x00dev, spec); + if (status) + return status; + + /* + * Initialize HW fields. + */ + rt2x00dev->hw->queues = rt2x00dev->ops->tx_queues; + + /* + * Initialize extra TX headroom required. + */ + rt2x00dev->hw->extra_tx_headroom = + max_t(unsigned int, IEEE80211_TX_STATUS_HEADROOM, + rt2x00dev->ops->extra_tx_headroom); + + /* + * Take TX headroom required for alignment into account. + */ + if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags)) + rt2x00dev->hw->extra_tx_headroom += RT2X00_L2PAD_SIZE; + else if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) + rt2x00dev->hw->extra_tx_headroom += RT2X00_ALIGN_SIZE; + + /* + * Register HW. + */ + status = ieee80211_register_hw(rt2x00dev->hw); + if (status) + return status; + + set_bit(DEVICE_STATE_REGISTERED_HW, &rt2x00dev->flags); + + return 0; +} + +/* + * Initialization/uninitialization handlers. + */ +static void rt2x00lib_uninitialize(struct rt2x00_dev *rt2x00dev) +{ + if (!test_and_clear_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags)) + return; + + /* + * Unregister extra components. + */ + rt2x00rfkill_unregister(rt2x00dev); + + /* + * Allow the HW to uninitialize. + */ + rt2x00dev->ops->lib->uninitialize(rt2x00dev); + + /* + * Free allocated queue entries. + */ + rt2x00queue_uninitialize(rt2x00dev); +} + +static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev) +{ + int status; + + if (test_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags)) + return 0; + + /* + * Allocate all queue entries. + */ + status = rt2x00queue_initialize(rt2x00dev); + if (status) + return status; + + /* + * Initialize the device. + */ + status = rt2x00dev->ops->lib->initialize(rt2x00dev); + if (status) { + rt2x00queue_uninitialize(rt2x00dev); + return status; + } + + set_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags); + + /* + * Register the extra components. + */ + rt2x00rfkill_register(rt2x00dev); + + return 0; +} + +int rt2x00lib_start(struct rt2x00_dev *rt2x00dev) +{ + int retval; + + if (test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) + return 0; + + /* + * If this is the first interface which is added, + * we should load the firmware now. + */ + retval = rt2x00lib_load_firmware(rt2x00dev); + if (retval) + return retval; + + /* + * Initialize the device. + */ + retval = rt2x00lib_initialize(rt2x00dev); + if (retval) + return retval; + + rt2x00dev->intf_ap_count = 0; + rt2x00dev->intf_sta_count = 0; + rt2x00dev->intf_associated = 0; + + /* Enable the radio */ + retval = rt2x00lib_enable_radio(rt2x00dev); + if (retval) { + rt2x00queue_uninitialize(rt2x00dev); + return retval; + } + + set_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags); + + return 0; +} + +void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev) +{ + if (!test_and_clear_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) + return; + + /* + * Perhaps we can add something smarter here, + * but for now just disabling the radio should do. + */ + rt2x00lib_disable_radio(rt2x00dev); + + rt2x00dev->intf_ap_count = 0; + rt2x00dev->intf_sta_count = 0; + rt2x00dev->intf_associated = 0; +} + +/* + * driver allocation handlers. + */ +int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev) +{ + int retval = -ENOMEM; + + mutex_init(&rt2x00dev->csr_mutex); + + set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); + + /* + * Make room for rt2x00_intf inside the per-interface + * structure ieee80211_vif. + */ + rt2x00dev->hw->vif_data_size = sizeof(struct rt2x00_intf); + + /* + * Determine which operating modes are supported, all modes + * which require beaconing, depend on the availability of + * beacon entries. + */ + rt2x00dev->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); + if (rt2x00dev->ops->bcn->entry_num > 0) + rt2x00dev->hw->wiphy->interface_modes |= + BIT(NL80211_IFTYPE_ADHOC) | + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_MESH_POINT) | + BIT(NL80211_IFTYPE_WDS); + + /* + * Let the driver probe the device to detect the capabilities. + */ + retval = rt2x00dev->ops->lib->probe_hw(rt2x00dev); + if (retval) { + ERROR(rt2x00dev, "Failed to allocate device.\n"); + goto exit; + } + + /* + * Initialize configuration work. + */ + INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled); + + /* + * Allocate queue array. + */ + retval = rt2x00queue_allocate(rt2x00dev); + if (retval) + goto exit; + + /* + * Initialize ieee80211 structure. + */ + retval = rt2x00lib_probe_hw(rt2x00dev); + if (retval) { + ERROR(rt2x00dev, "Failed to initialize hw.\n"); + goto exit; + } + + /* + * Register extra components. + */ + rt2x00link_register(rt2x00dev); + rt2x00leds_register(rt2x00dev); + rt2x00debug_register(rt2x00dev); + + return 0; + +exit: + rt2x00lib_remove_dev(rt2x00dev); + + return retval; +} +EXPORT_SYMBOL_GPL(rt2x00lib_probe_dev); + +void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev) +{ + clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); + + /* + * Disable radio. + */ + rt2x00lib_disable_radio(rt2x00dev); + + /* + * Stop all work. + */ + cancel_work_sync(&rt2x00dev->intf_work); + + /* + * Uninitialize device. + */ + rt2x00lib_uninitialize(rt2x00dev); + + /* + * Free extra components + */ + rt2x00debug_deregister(rt2x00dev); + rt2x00leds_unregister(rt2x00dev); + + /* + * Free ieee80211_hw memory. + */ + rt2x00lib_remove_hw(rt2x00dev); + + /* + * Free firmware image. + */ + rt2x00lib_free_firmware(rt2x00dev); + + /* + * Free queue structures. + */ + rt2x00queue_free(rt2x00dev); +} +EXPORT_SYMBOL_GPL(rt2x00lib_remove_dev); + +/* + * Device state handlers + */ +#ifdef CONFIG_PM +int rt2x00lib_suspend(struct rt2x00_dev *rt2x00dev, pm_message_t state) +{ + NOTICE(rt2x00dev, "Going to sleep.\n"); + + /* + * Prevent mac80211 from accessing driver while suspended. + */ + if (!test_and_clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) + return 0; + + /* + * Cleanup as much as possible. + */ + rt2x00lib_uninitialize(rt2x00dev); + + /* + * Suspend/disable extra components. + */ + rt2x00leds_suspend(rt2x00dev); + rt2x00debug_deregister(rt2x00dev); + + /* + * Set device mode to sleep for power management, + * on some hardware this call seems to consistently fail. + * From the specifications it is hard to tell why it fails, + * and if this is a "bad thing". + * Overall it is safe to just ignore the failure and + * continue suspending. The only downside is that the + * device will not be in optimal power save mode, but with + * the radio and the other components already disabled the + * device is as good as disabled. + */ + if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_SLEEP)) + WARNING(rt2x00dev, "Device failed to enter sleep state, " + "continue suspending.\n"); + + return 0; +} +EXPORT_SYMBOL_GPL(rt2x00lib_suspend); + +int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev) +{ + NOTICE(rt2x00dev, "Waking up.\n"); + + /* + * Restore/enable extra components. + */ + rt2x00debug_register(rt2x00dev); + rt2x00leds_resume(rt2x00dev); + + /* + * We are ready again to receive requests from mac80211. + */ + set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); + + return 0; +} +EXPORT_SYMBOL_GPL(rt2x00lib_resume); +#endif /* CONFIG_PM */ + +/* + * rt2x00lib module information. + */ +MODULE_AUTHOR(DRV_PROJECT); +MODULE_VERSION(DRV_VERSION); +MODULE_DESCRIPTION("rt2x00 library"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/rt2x00/rt2x00dump.h b/drivers/net/wireless/rt2x00/rt2x00dump.h new file mode 100644 index 0000000..727019a --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00dump.h @@ -0,0 +1,121 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00dump + Abstract: Data structures for the rt2x00debug & userspace. + */ + +#ifndef RT2X00DUMP_H +#define RT2X00DUMP_H + +/** + * DOC: Introduction + * + * This header is intended to be exported to userspace, + * to make the structures and enumerations available to userspace + * applications. This means that all data types should be exportable. + * + * When rt2x00 is compiled with debugfs support enabled, + * it is possible to capture all data coming in and out of the device + * by reading the frame dump file. This file can have only a single reader. + * The following frames will be reported: + * - All incoming frames (rx) + * - All outgoing frames (tx, including beacon and atim) + * - All completed frames (txdone including atim) + * + * The data is send to the file using the following format: + * + * [rt2x00dump header][hardware descriptor][ieee802.11 frame] + * + * rt2x00dump header: The description of the dumped frame, as well as + * additional information usefull for debugging. See &rt2x00dump_hdr. + * hardware descriptor: Descriptor that was used to receive or transmit + * the frame. + * ieee802.11 frame: The actual frame that was received or transmitted. + */ + +/** + * enum rt2x00_dump_type - Frame type + * + * These values are used for the @type member of &rt2x00dump_hdr. + * @DUMP_FRAME_RXDONE: This frame has been received by the hardware. + * @DUMP_FRAME_TX: This frame is queued for transmission to the hardware. + * @DUMP_FRAME_TXDONE: This frame indicates the device has handled + * the tx event which has either succeeded or failed. A frame + * with this type should also have been reported with as a + * %DUMP_FRAME_TX frame. + */ +enum rt2x00_dump_type { + DUMP_FRAME_RXDONE = 1, + DUMP_FRAME_TX = 2, + DUMP_FRAME_TXDONE = 3, +}; + +/** + * struct rt2x00dump_hdr - Dump frame header + * + * Each frame dumped to the debugfs file starts with this header + * attached. This header contains the description of the actual + * frame which was dumped. + * + * New fields inside the structure must be appended to the end of + * the structure. This way userspace tools compiled for earlier + * header versions can still correctly handle the frame dump + * (although they will not handle all data passed to them in the dump). + * + * @version: Header version should always be set to %DUMP_HEADER_VERSION. + * This field must be checked by userspace to determine if it can + * handle this frame. + * @header_length: The length of the &rt2x00dump_hdr structure. This is + * used for compatibility reasons so userspace can easily determine + * the location of the next field in the dump. + * @desc_length: The length of the device descriptor. + * @data_length: The length of the frame data (including the ieee802.11 header. + * @chip_rt: RT chipset + * @chip_rf: RF chipset + * @chip_rev: Chipset revision + * @type: The frame type (&rt2x00_dump_type) + * @queue_index: The index number of the data queue. + * @entry_index: The index number of the entry inside the data queue. + * @timestamp_sec: Timestamp - seconds + * @timestamp_usec: Timestamp - microseconds + */ +struct rt2x00dump_hdr { + __le32 version; +#define DUMP_HEADER_VERSION 2 + + __le32 header_length; + __le32 desc_length; + __le32 data_length; + + __le16 chip_rt; + __le16 chip_rf; + __le32 chip_rev; + + __le16 type; + __u8 queue_index; + __u8 entry_index; + + __le32 timestamp_sec; + __le32 timestamp_usec; +}; + +#endif /* RT2X00DUMP_H */ diff --git a/drivers/net/wireless/rt2x00/rt2x00firmware.c b/drivers/net/wireless/rt2x00/rt2x00firmware.c new file mode 100644 index 0000000..34beb00 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00firmware.c @@ -0,0 +1,128 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn + Copyright (C) 2004 - 2009 Gertjan van Wingerde + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00lib + Abstract: rt2x00 firmware loading routines. + */ + +#include +#include + +#include "rt2x00.h" +#include "rt2x00lib.h" + +static int rt2x00lib_request_firmware(struct rt2x00_dev *rt2x00dev) +{ + struct device *device = wiphy_dev(rt2x00dev->hw->wiphy); + const struct firmware *fw; + char *fw_name; + int retval; + + /* + * Read correct firmware from harddisk. + */ + fw_name = rt2x00dev->ops->lib->get_firmware_name(rt2x00dev); + if (!fw_name) { + ERROR(rt2x00dev, + "Invalid firmware filename.\n" + "Please file bug report to %s.\n", DRV_PROJECT); + return -EINVAL; + } + + INFO(rt2x00dev, "Loading firmware file '%s'.\n", fw_name); + + retval = request_firmware(&fw, fw_name, device); + if (retval) { + ERROR(rt2x00dev, "Failed to request Firmware.\n"); + return retval; + } + + if (!fw || !fw->size || !fw->data) { + ERROR(rt2x00dev, "Failed to read Firmware.\n"); + return -ENOENT; + } + + INFO(rt2x00dev, "Firmware detected - version: %d.%d.\n", + fw->data[fw->size - 4], fw->data[fw->size - 3]); + + retval = rt2x00dev->ops->lib->check_firmware(rt2x00dev, fw->data, fw->size); + switch (retval) { + case FW_OK: + break; + case FW_BAD_CRC: + ERROR(rt2x00dev, "Firmware checksum error.\n"); + goto exit; + case FW_BAD_LENGTH: + ERROR(rt2x00dev, + "Invalid firmware file length (len=%zu)\n", fw->size); + goto exit; + case FW_BAD_VERSION: + ERROR(rt2x00dev, + "Current firmware does not support detected chipset.\n"); + goto exit; + }; + + rt2x00dev->fw = fw; + + return 0; + +exit: + release_firmware(fw); + + return -ENOENT; +} + +int rt2x00lib_load_firmware(struct rt2x00_dev *rt2x00dev) +{ + int retval; + + if (!test_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags)) + return 0; + + if (!rt2x00dev->fw) { + retval = rt2x00lib_request_firmware(rt2x00dev); + if (retval) + return retval; + } + + /* + * Send firmware to the device. + */ + retval = rt2x00dev->ops->lib->load_firmware(rt2x00dev, + rt2x00dev->fw->data, + rt2x00dev->fw->size); + + /* + * When the firmware is uploaded to the hardware the LED + * association status might have been triggered, for correct + * LED handling it should now be reset. + */ + rt2x00leds_led_assoc(rt2x00dev, false); + + return retval; +} + +void rt2x00lib_free_firmware(struct rt2x00_dev *rt2x00dev) +{ + release_firmware(rt2x00dev->fw); + rt2x00dev->fw = NULL; +} diff --git a/drivers/net/wireless/rt2x00/rt2x00ht.c b/drivers/net/wireless/rt2x00/rt2x00ht.c new file mode 100644 index 0000000..1056c92 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00ht.c @@ -0,0 +1,69 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00lib + Abstract: rt2x00 HT specific routines. + */ + +#include +#include + +#include "rt2x00.h" +#include "rt2x00lib.h" + +void rt2x00ht_create_tx_descriptor(struct queue_entry *entry, + struct txentry_desc *txdesc, + const struct rt2x00_rate *hwrate) +{ + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); + struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; + + if (tx_info->control.sta) + txdesc->mpdu_density = + tx_info->control.sta->ht_cap.ampdu_density; + else + txdesc->mpdu_density = 0; + + txdesc->ba_size = 7; /* FIXME: What value is needed? */ + txdesc->stbc = 0; /* FIXME: What value is needed? */ + + txdesc->mcs = rt2x00_get_rate_mcs(hwrate->mcs); + if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) + txdesc->mcs |= 0x08; + + /* + * Convert flags + */ + if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) + __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags); + + /* + * Determine HT Mix/Greenfield rate mode + */ + if (txrate->flags & IEEE80211_TX_RC_MCS) + txdesc->rate_mode = RATE_MODE_HT_MIX; + if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD) + txdesc->rate_mode = RATE_MODE_HT_GREENFIELD; + if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags); + if (txrate->flags & IEEE80211_TX_RC_SHORT_GI) + __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags); +} diff --git a/drivers/net/wireless/rt2x00/rt2x00leds.c b/drivers/net/wireless/rt2x00/rt2x00leds.c new file mode 100644 index 0000000..ca585e3 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00leds.c @@ -0,0 +1,246 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00lib + Abstract: rt2x00 led specific routines. + */ + +#include +#include + +#include "rt2x00.h" +#include "rt2x00lib.h" + +void rt2x00leds_led_quality(struct rt2x00_dev *rt2x00dev, int rssi) +{ + struct rt2x00_led *led = &rt2x00dev->led_qual; + unsigned int brightness; + + if ((led->type != LED_TYPE_QUALITY) || !(led->flags & LED_REGISTERED)) + return; + + /* + * Led handling requires a positive value for the rssi, + * to do that correctly we need to add the correction. + */ + rssi += rt2x00dev->rssi_offset; + + /* + * Get the rssi level, this is used to convert the rssi + * to a LED value inside the range LED_OFF - LED_FULL. + */ + if (rssi <= 30) + rssi = 0; + else if (rssi <= 39) + rssi = 1; + else if (rssi <= 49) + rssi = 2; + else if (rssi <= 53) + rssi = 3; + else if (rssi <= 63) + rssi = 4; + else + rssi = 5; + + /* + * Note that we must _not_ send LED_OFF since the driver + * is going to calculate the value and might use it in a + * division. + */ + brightness = ((LED_FULL / 6) * rssi) + 1; + if (brightness != led->led_dev.brightness) { + led->led_dev.brightness_set(&led->led_dev, brightness); + led->led_dev.brightness = brightness; + } +} + +static void rt2x00led_led_simple(struct rt2x00_led *led, bool enabled) +{ + unsigned int brightness = enabled ? LED_FULL : LED_OFF; + + if (!(led->flags & LED_REGISTERED)) + return; + + led->led_dev.brightness_set(&led->led_dev, brightness); + led->led_dev.brightness = brightness; +} + +void rt2x00led_led_activity(struct rt2x00_dev *rt2x00dev, bool enabled) +{ + if (rt2x00dev->led_qual.type == LED_TYPE_ACTIVITY) + rt2x00led_led_simple(&rt2x00dev->led_qual, enabled); +} + +void rt2x00leds_led_assoc(struct rt2x00_dev *rt2x00dev, bool enabled) +{ + if (rt2x00dev->led_assoc.type == LED_TYPE_ASSOC) + rt2x00led_led_simple(&rt2x00dev->led_assoc, enabled); +} + +void rt2x00leds_led_radio(struct rt2x00_dev *rt2x00dev, bool enabled) +{ + if (rt2x00dev->led_radio.type == LED_TYPE_RADIO) + rt2x00led_led_simple(&rt2x00dev->led_radio, enabled); +} + +static int rt2x00leds_register_led(struct rt2x00_dev *rt2x00dev, + struct rt2x00_led *led, + const char *name) +{ + struct device *device = wiphy_dev(rt2x00dev->hw->wiphy); + int retval; + + led->led_dev.name = name; + led->led_dev.brightness = LED_OFF; + + retval = led_classdev_register(device, &led->led_dev); + if (retval) { + ERROR(rt2x00dev, "Failed to register led handler.\n"); + return retval; + } + + led->flags |= LED_REGISTERED; + + return 0; +} + +void rt2x00leds_register(struct rt2x00_dev *rt2x00dev) +{ + char dev_name[16]; + char name[32]; + int retval; + unsigned long on_period; + unsigned long off_period; + + snprintf(dev_name, sizeof(dev_name), "%s-%s", + rt2x00dev->ops->name, wiphy_name(rt2x00dev->hw->wiphy)); + + if (rt2x00dev->led_radio.flags & LED_INITIALIZED) { + snprintf(name, sizeof(name), "%s::radio", dev_name); + + retval = rt2x00leds_register_led(rt2x00dev, + &rt2x00dev->led_radio, + name); + if (retval) + goto exit_fail; + } + + if (rt2x00dev->led_assoc.flags & LED_INITIALIZED) { + snprintf(name, sizeof(name), "%s::assoc", dev_name); + + retval = rt2x00leds_register_led(rt2x00dev, + &rt2x00dev->led_assoc, + name); + if (retval) + goto exit_fail; + } + + if (rt2x00dev->led_qual.flags & LED_INITIALIZED) { + snprintf(name, sizeof(name), "%s::quality", dev_name); + + retval = rt2x00leds_register_led(rt2x00dev, + &rt2x00dev->led_qual, + name); + if (retval) + goto exit_fail; + } + + /* + * Initialize blink time to default value: + * On period: 70ms + * Off period: 30ms + */ + if (rt2x00dev->led_radio.led_dev.blink_set) { + on_period = 70; + off_period = 30; + rt2x00dev->led_radio.led_dev.blink_set( + &rt2x00dev->led_radio.led_dev, &on_period, &off_period); + } + + return; + +exit_fail: + rt2x00leds_unregister(rt2x00dev); +} + +static void rt2x00leds_unregister_led(struct rt2x00_led *led) +{ + led_classdev_unregister(&led->led_dev); + + /* + * This might look weird, but when we are unregistering while + * suspended the led is already off, and since we haven't + * fully resumed yet, access to the device might not be + * possible yet. + */ + if (!(led->led_dev.flags & LED_SUSPENDED)) + led->led_dev.brightness_set(&led->led_dev, LED_OFF); + + led->flags &= ~LED_REGISTERED; +} + +void rt2x00leds_unregister(struct rt2x00_dev *rt2x00dev) +{ + if (rt2x00dev->led_qual.flags & LED_REGISTERED) + rt2x00leds_unregister_led(&rt2x00dev->led_qual); + if (rt2x00dev->led_assoc.flags & LED_REGISTERED) + rt2x00leds_unregister_led(&rt2x00dev->led_assoc); + if (rt2x00dev->led_radio.flags & LED_REGISTERED) + rt2x00leds_unregister_led(&rt2x00dev->led_radio); +} + +static inline void rt2x00leds_suspend_led(struct rt2x00_led *led) +{ + led_classdev_suspend(&led->led_dev); + + /* This shouldn't be needed, but just to be safe */ + led->led_dev.brightness_set(&led->led_dev, LED_OFF); + led->led_dev.brightness = LED_OFF; +} + +void rt2x00leds_suspend(struct rt2x00_dev *rt2x00dev) +{ + if (rt2x00dev->led_qual.flags & LED_REGISTERED) + rt2x00leds_suspend_led(&rt2x00dev->led_qual); + if (rt2x00dev->led_assoc.flags & LED_REGISTERED) + rt2x00leds_suspend_led(&rt2x00dev->led_assoc); + if (rt2x00dev->led_radio.flags & LED_REGISTERED) + rt2x00leds_suspend_led(&rt2x00dev->led_radio); +} + +static inline void rt2x00leds_resume_led(struct rt2x00_led *led) +{ + led_classdev_resume(&led->led_dev); + + /* Device might have enabled the LEDS during resume */ + led->led_dev.brightness_set(&led->led_dev, LED_OFF); + led->led_dev.brightness = LED_OFF; +} + +void rt2x00leds_resume(struct rt2x00_dev *rt2x00dev) +{ + if (rt2x00dev->led_radio.flags & LED_REGISTERED) + rt2x00leds_resume_led(&rt2x00dev->led_radio); + if (rt2x00dev->led_assoc.flags & LED_REGISTERED) + rt2x00leds_resume_led(&rt2x00dev->led_assoc); + if (rt2x00dev->led_qual.flags & LED_REGISTERED) + rt2x00leds_resume_led(&rt2x00dev->led_qual); +} diff --git a/drivers/net/wireless/rt2x00/rt2x00leds.h b/drivers/net/wireless/rt2x00/rt2x00leds.h new file mode 100644 index 0000000..3b46f0c --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00leds.h @@ -0,0 +1,46 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00lib + Abstract: rt2x00 led datastructures and routines + */ + +#ifndef RT2X00LEDS_H +#define RT2X00LEDS_H + +enum led_type { + LED_TYPE_RADIO, + LED_TYPE_ASSOC, + LED_TYPE_ACTIVITY, + LED_TYPE_QUALITY, +}; + +struct rt2x00_led { + struct rt2x00_dev *rt2x00dev; + struct led_classdev led_dev; + + enum led_type type; + unsigned int flags; +#define LED_INITIALIZED ( 1 << 0 ) +#define LED_REGISTERED ( 1 << 1 ) +}; + +#endif /* RT2X00LEDS_H */ diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h new file mode 100644 index 0000000..be2e37f --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00lib.h @@ -0,0 +1,460 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn + Copyright (C) 2004 - 2009 Gertjan van Wingerde + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00lib + Abstract: Data structures and definitions for the rt2x00lib module. + */ + +#ifndef RT2X00LIB_H +#define RT2X00LIB_H + +#include "rt2x00dump.h" + +/* + * Interval defines + */ +#define LINK_TUNE_INTERVAL round_jiffies_relative(HZ) + +/* + * rt2x00_rate: Per rate device information + */ +struct rt2x00_rate { + unsigned short flags; +#define DEV_RATE_CCK 0x0001 +#define DEV_RATE_OFDM 0x0002 +#define DEV_RATE_SHORT_PREAMBLE 0x0004 + + unsigned short bitrate; /* In 100kbit/s */ + unsigned short ratemask; + + unsigned short plcp; + unsigned short mcs; +}; + +extern const struct rt2x00_rate rt2x00_supported_rates[12]; + +static inline const struct rt2x00_rate *rt2x00_get_rate(const u16 hw_value) +{ + return &rt2x00_supported_rates[hw_value & 0xff]; +} + +#define RATE_MCS(__mode, __mcs) \ + ( (((__mode) & 0x00ff) << 8) | ((__mcs) & 0x00ff) ) + +static inline int rt2x00_get_rate_mcs(const u16 mcs_value) +{ + return (mcs_value & 0x00ff); +} + +/* + * Radio control handlers. + */ +int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev); +void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev); +void rt2x00lib_toggle_rx(struct rt2x00_dev *rt2x00dev, enum dev_state state); + +/* + * Initialization handlers. + */ +int rt2x00lib_start(struct rt2x00_dev *rt2x00dev); +void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev); + +/* + * Configuration handlers. + */ +void rt2x00lib_config_intf(struct rt2x00_dev *rt2x00dev, + struct rt2x00_intf *intf, + enum nl80211_iftype type, + const u8 *mac, const u8 *bssid); +void rt2x00lib_config_erp(struct rt2x00_dev *rt2x00dev, + struct rt2x00_intf *intf, + struct ieee80211_bss_conf *conf); +void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev, + struct antenna_setup ant); +void rt2x00lib_config(struct rt2x00_dev *rt2x00dev, + struct ieee80211_conf *conf, + const unsigned int changed_flags); + +/** + * DOC: Queue handlers + */ + +/** + * rt2x00queue_alloc_rxskb - allocate a skb for RX purposes. + * @rt2x00dev: Pointer to &struct rt2x00_dev. + * @queue: The queue for which the skb will be applicable. + */ +struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev, + struct queue_entry *entry); + +/** + * rt2x00queue_unmap_skb - Unmap a skb from DMA. + * @rt2x00dev: Pointer to &struct rt2x00_dev. + * @skb: The skb to unmap. + */ +void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb); + +/** + * rt2x00queue_free_skb - free a skb + * @rt2x00dev: Pointer to &struct rt2x00_dev. + * @skb: The skb to free. + */ +void rt2x00queue_free_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb); + +/** + * rt2x00queue_align_frame - Align 802.11 frame to 4-byte boundary + * @skb: The skb to align + * + * Align the start of the 802.11 frame to a 4-byte boundary, this could + * mean the payload is not aligned properly though. + */ +void rt2x00queue_align_frame(struct sk_buff *skb); + +/** + * rt2x00queue_align_payload - Align 802.11 payload to 4-byte boundary + * @skb: The skb to align + * @header_length: Length of 802.11 header + * + * Align the 802.11 payload to a 4-byte boundary, this could + * mean the header is not aligned properly though. + */ +void rt2x00queue_align_payload(struct sk_buff *skb, unsigned int header_length); + +/** + * rt2x00queue_insert_l2pad - Align 802.11 header & payload to 4-byte boundary + * @skb: The skb to align + * @header_length: Length of 802.11 header + * + * Apply L2 padding to align both header and payload to 4-byte boundary + */ +void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length); + +/** + * rt2x00queue_insert_l2pad - Remove L2 padding from 802.11 frame + * @skb: The skb to align + * @header_length: Length of 802.11 header + * + * Remove L2 padding used to align both header and payload to 4-byte boundary, + * by removing the L2 padding the header will no longer be 4-byte aligned. + */ +void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length); + +/** + * rt2x00queue_write_tx_frame - Write TX frame to hardware + * @queue: Queue over which the frame should be send + * @skb: The skb to send + * @local: frame is not from mac80211 + */ +int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, + bool local); + +/** + * rt2x00queue_update_beacon - Send new beacon from mac80211 to hardware + * @rt2x00dev: Pointer to &struct rt2x00_dev. + * @vif: Interface for which the beacon should be updated. + * @enable_beacon: Enable beaconing + */ +int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev, + struct ieee80211_vif *vif, + const bool enable_beacon); + +/** + * rt2x00queue_index_inc - Index incrementation function + * @queue: Queue (&struct data_queue) to perform the action on. + * @index: Index type (&enum queue_index) to perform the action on. + * + * This function will increase the requested index on the queue, + * it will grab the appropriate locks and handle queue overflow events by + * resetting the index to the start of the queue. + */ +void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index); + +/** + * rt2x00queue_stop_queues - Halt all data queues + * @rt2x00dev: Pointer to &struct rt2x00_dev. + * + * This function will loop through all available queues to stop + * any pending outgoing frames. + */ +void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev); + +/** + * rt2x00queue_init_queues - Initialize all data queues + * @rt2x00dev: Pointer to &struct rt2x00_dev. + * + * This function will loop through all available queues to clear all + * index numbers and set the queue entry to the correct initialization + * state. + */ +void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev); + +int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev); +void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev); +int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev); +void rt2x00queue_free(struct rt2x00_dev *rt2x00dev); + +/** + * rt2x00link_update_stats - Update link statistics from RX frame + * @rt2x00dev: Pointer to &struct rt2x00_dev. + * @skb: Received frame + * @rxdesc: Received frame descriptor + * + * Update link statistics based on the information from the + * received frame descriptor. + */ +void rt2x00link_update_stats(struct rt2x00_dev *rt2x00dev, + struct sk_buff *skb, + struct rxdone_entry_desc *rxdesc); + +/** + * rt2x00link_start_tuner - Start periodic link tuner work + * @rt2x00dev: Pointer to &struct rt2x00_dev. + * + * This start the link tuner periodic work, this work will + * be executed periodically until &rt2x00link_stop_tuner has + * been called. + */ +void rt2x00link_start_tuner(struct rt2x00_dev *rt2x00dev); + +/** + * rt2x00link_stop_tuner - Stop periodic link tuner work + * @rt2x00dev: Pointer to &struct rt2x00_dev. + * + * After this function completed the link tuner will not + * be running until &rt2x00link_start_tuner is called. + */ +void rt2x00link_stop_tuner(struct rt2x00_dev *rt2x00dev); + +/** + * rt2x00link_reset_tuner - Reset periodic link tuner work + * @rt2x00dev: Pointer to &struct rt2x00_dev. + * @antenna: Should the antenna tuning also be reset + * + * The VGC limit configured in the hardware will be reset to 0 + * which forces the driver to rediscover the correct value for + * the current association. This is needed when configuration + * options have changed which could drastically change the + * SNR level or link quality (i.e. changing the antenna setting). + * + * Resetting the link tuner will also cause the periodic work counter + * to be reset. Any driver which has a fixed limit on the number + * of rounds the link tuner is supposed to work will accept the + * tuner actions again if this limit was previously reached. + * + * If @antenna is set to true a the software antenna diversity + * tuning will also be reset. + */ +void rt2x00link_reset_tuner(struct rt2x00_dev *rt2x00dev, bool antenna); + +/** + * rt2x00link_register - Initialize link tuning functionality + * @rt2x00dev: Pointer to &struct rt2x00_dev. + * + * Initialize work structure and all link tuning related + * parameters. This will not start the link tuning process itself. + */ +void rt2x00link_register(struct rt2x00_dev *rt2x00dev); + +/* + * Firmware handlers. + */ +#ifdef CONFIG_RT2X00_LIB_FIRMWARE +int rt2x00lib_load_firmware(struct rt2x00_dev *rt2x00dev); +void rt2x00lib_free_firmware(struct rt2x00_dev *rt2x00dev); +#else +static inline int rt2x00lib_load_firmware(struct rt2x00_dev *rt2x00dev) +{ + return 0; +} +static inline void rt2x00lib_free_firmware(struct rt2x00_dev *rt2x00dev) +{ +} +#endif /* CONFIG_RT2X00_LIB_FIRMWARE */ + +/* + * Debugfs handlers. + */ +#ifdef CONFIG_RT2X00_LIB_DEBUGFS +void rt2x00debug_register(struct rt2x00_dev *rt2x00dev); +void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev); +void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev, + enum rt2x00_dump_type type, struct sk_buff *skb); +void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev, + struct rxdone_entry_desc *rxdesc); +#else +static inline void rt2x00debug_register(struct rt2x00_dev *rt2x00dev) +{ +} + +static inline void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev) +{ +} + +static inline void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev, + enum rt2x00_dump_type type, + struct sk_buff *skb) +{ +} + +static inline void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev, + struct rxdone_entry_desc *rxdesc) +{ +} +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ + +/* + * Crypto handlers. + */ +#ifdef CONFIG_RT2X00_LIB_CRYPTO +enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf *key); +void rt2x00crypto_create_tx_descriptor(struct queue_entry *entry, + struct txentry_desc *txdesc); +unsigned int rt2x00crypto_tx_overhead(struct rt2x00_dev *rt2x00dev, + struct sk_buff *skb); +void rt2x00crypto_tx_copy_iv(struct sk_buff *skb, + struct txentry_desc *txdesc); +void rt2x00crypto_tx_remove_iv(struct sk_buff *skb, + struct txentry_desc *txdesc); +void rt2x00crypto_tx_insert_iv(struct sk_buff *skb, unsigned int header_length); +void rt2x00crypto_rx_insert_iv(struct sk_buff *skb, + unsigned int header_length, + struct rxdone_entry_desc *rxdesc); +#else +static inline enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf *key) +{ + return CIPHER_NONE; +} + +static inline void rt2x00crypto_create_tx_descriptor(struct queue_entry *entry, + struct txentry_desc *txdesc) +{ +} + +static inline unsigned int rt2x00crypto_tx_overhead(struct rt2x00_dev *rt2x00dev, + struct sk_buff *skb) +{ + return 0; +} + +static inline void rt2x00crypto_tx_copy_iv(struct sk_buff *skb, + struct txentry_desc *txdesc) +{ +} + +static inline void rt2x00crypto_tx_remove_iv(struct sk_buff *skb, + struct txentry_desc *txdesc) +{ +} + +static inline void rt2x00crypto_tx_insert_iv(struct sk_buff *skb, + unsigned int header_length) +{ +} + +static inline void rt2x00crypto_rx_insert_iv(struct sk_buff *skb, + unsigned int header_length, + struct rxdone_entry_desc *rxdesc) +{ +} +#endif /* CONFIG_RT2X00_LIB_CRYPTO */ + +/* + * HT handlers. + */ +#ifdef CONFIG_RT2X00_LIB_HT +void rt2x00ht_create_tx_descriptor(struct queue_entry *entry, + struct txentry_desc *txdesc, + const struct rt2x00_rate *hwrate); +#else +static inline void rt2x00ht_create_tx_descriptor(struct queue_entry *entry, + struct txentry_desc *txdesc, + const struct rt2x00_rate *hwrate) +{ +} +#endif /* CONFIG_RT2X00_LIB_HT */ + +/* + * RFkill handlers. + */ +static inline void rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev) +{ + if (test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags)) + wiphy_rfkill_start_polling(rt2x00dev->hw->wiphy); +} + +static inline void rt2x00rfkill_unregister(struct rt2x00_dev *rt2x00dev) +{ + if (test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags)) + wiphy_rfkill_stop_polling(rt2x00dev->hw->wiphy); +} + +/* + * LED handlers + */ +#ifdef CONFIG_RT2X00_LIB_LEDS +void rt2x00leds_led_quality(struct rt2x00_dev *rt2x00dev, int rssi); +void rt2x00led_led_activity(struct rt2x00_dev *rt2x00dev, bool enabled); +void rt2x00leds_led_assoc(struct rt2x00_dev *rt2x00dev, bool enabled); +void rt2x00leds_led_radio(struct rt2x00_dev *rt2x00dev, bool enabled); +void rt2x00leds_register(struct rt2x00_dev *rt2x00dev); +void rt2x00leds_unregister(struct rt2x00_dev *rt2x00dev); +void rt2x00leds_suspend(struct rt2x00_dev *rt2x00dev); +void rt2x00leds_resume(struct rt2x00_dev *rt2x00dev); +#else +static inline void rt2x00leds_led_quality(struct rt2x00_dev *rt2x00dev, + int rssi) +{ +} + +static inline void rt2x00led_led_activity(struct rt2x00_dev *rt2x00dev, + bool enabled) +{ +} + +static inline void rt2x00leds_led_assoc(struct rt2x00_dev *rt2x00dev, + bool enabled) +{ +} + +static inline void rt2x00leds_led_radio(struct rt2x00_dev *rt2x00dev, + bool enabled) +{ +} + +static inline void rt2x00leds_register(struct rt2x00_dev *rt2x00dev) +{ +} + +static inline void rt2x00leds_unregister(struct rt2x00_dev *rt2x00dev) +{ +} + +static inline void rt2x00leds_suspend(struct rt2x00_dev *rt2x00dev) +{ +} + +static inline void rt2x00leds_resume(struct rt2x00_dev *rt2x00dev) +{ +} +#endif /* CONFIG_RT2X00_LIB_LEDS */ + +#endif /* RT2X00LIB_H */ diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c new file mode 100644 index 0000000..0efbf5a --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00link.c @@ -0,0 +1,394 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00lib + Abstract: rt2x00 generic link tuning routines. + */ + +#include +#include + +#include "rt2x00.h" +#include "rt2x00lib.h" + +/* + * When we lack RSSI information return something less then -80 to + * tell the driver to tune the device to maximum sensitivity. + */ +#define DEFAULT_RSSI -128 + +/* + * Helper struct and macro to work with moving/walking averages. + * When adding a value to the average value the following calculation + * is needed: + * + * avg_rssi = ((avg_rssi * 7) + rssi) / 8; + * + * The advantage of this approach is that we only need 1 variable + * to store the average in (No need for a count and a total). + * But more importantly, normal average values will over time + * move less and less towards newly added values this results + * that with link tuning, the device can have a very good RSSI + * for a few minutes but when the device is moved away from the AP + * the average will not decrease fast enough to compensate. + * The walking average compensates this and will move towards + * the new values correctly allowing a effective link tuning, + * the speed of the average moving towards other values depends + * on the value for the number of samples. The higher the number + * of samples, the slower the average will move. + * We use two variables to keep track of the average value to + * compensate for the rounding errors. This can be a significant + * error (>5dBm) if the factor is too low. + */ +#define AVG_SAMPLES 8 +#define AVG_FACTOR 1000 +#define MOVING_AVERAGE(__avg, __val) \ +({ \ + struct avg_val __new; \ + __new.avg_weight = \ + (__avg).avg_weight ? \ + ((((__avg).avg_weight * ((AVG_SAMPLES) - 1)) + \ + ((__val) * (AVG_FACTOR))) / \ + (AVG_SAMPLES) ) : \ + ((__val) * (AVG_FACTOR)); \ + __new.avg = __new.avg_weight / (AVG_FACTOR); \ + __new; \ +}) + +static int rt2x00link_antenna_get_link_rssi(struct rt2x00_dev *rt2x00dev) +{ + struct link_ant *ant = &rt2x00dev->link.ant; + + if (ant->rssi_ant.avg && rt2x00dev->link.qual.rx_success) + return ant->rssi_ant.avg; + return DEFAULT_RSSI; +} + +static int rt2x00link_antenna_get_rssi_history(struct rt2x00_dev *rt2x00dev) +{ + struct link_ant *ant = &rt2x00dev->link.ant; + + if (ant->rssi_history) + return ant->rssi_history; + return DEFAULT_RSSI; +} + +static void rt2x00link_antenna_update_rssi_history(struct rt2x00_dev *rt2x00dev, + int rssi) +{ + struct link_ant *ant = &rt2x00dev->link.ant; + ant->rssi_history = rssi; +} + +static void rt2x00link_antenna_reset(struct rt2x00_dev *rt2x00dev) +{ + rt2x00dev->link.ant.rssi_ant.avg = 0; + rt2x00dev->link.ant.rssi_ant.avg_weight = 0; +} + +static void rt2x00lib_antenna_diversity_sample(struct rt2x00_dev *rt2x00dev) +{ + struct link_ant *ant = &rt2x00dev->link.ant; + struct antenna_setup new_ant; + int other_antenna; + + int sample_current = rt2x00link_antenna_get_link_rssi(rt2x00dev); + int sample_other = rt2x00link_antenna_get_rssi_history(rt2x00dev); + + memcpy(&new_ant, &ant->active, sizeof(new_ant)); + + /* + * We are done sampling. Now we should evaluate the results. + */ + ant->flags &= ~ANTENNA_MODE_SAMPLE; + + /* + * During the last period we have sampled the RSSI + * from both antennas. It now is time to determine + * which antenna demonstrated the best performance. + * When we are already on the antenna with the best + * performance, just create a good starting point + * for the history and we are done. + */ + if (sample_current >= sample_other) { + rt2x00link_antenna_update_rssi_history(rt2x00dev, + sample_current); + return; + } + + other_antenna = (ant->active.rx == ANTENNA_A) ? ANTENNA_B : ANTENNA_A; + + if (ant->flags & ANTENNA_RX_DIVERSITY) + new_ant.rx = other_antenna; + + if (ant->flags & ANTENNA_TX_DIVERSITY) + new_ant.tx = other_antenna; + + rt2x00lib_config_antenna(rt2x00dev, new_ant); +} + +static void rt2x00lib_antenna_diversity_eval(struct rt2x00_dev *rt2x00dev) +{ + struct link_ant *ant = &rt2x00dev->link.ant; + struct antenna_setup new_ant; + int rssi_curr; + int rssi_old; + + memcpy(&new_ant, &ant->active, sizeof(new_ant)); + + /* + * Get current RSSI value along with the historical value, + * after that update the history with the current value. + */ + rssi_curr = rt2x00link_antenna_get_link_rssi(rt2x00dev); + rssi_old = rt2x00link_antenna_get_rssi_history(rt2x00dev); + rt2x00link_antenna_update_rssi_history(rt2x00dev, rssi_curr); + + /* + * Legacy driver indicates that we should swap antenna's + * when the difference in RSSI is greater that 5. This + * also should be done when the RSSI was actually better + * then the previous sample. + * When the difference exceeds the threshold we should + * sample the rssi from the other antenna to make a valid + * comparison between the 2 antennas. + */ + if (abs(rssi_curr - rssi_old) < 5) + return; + + ant->flags |= ANTENNA_MODE_SAMPLE; + + if (ant->flags & ANTENNA_RX_DIVERSITY) + new_ant.rx = (new_ant.rx == ANTENNA_A) ? ANTENNA_B : ANTENNA_A; + + if (ant->flags & ANTENNA_TX_DIVERSITY) + new_ant.tx = (new_ant.tx == ANTENNA_A) ? ANTENNA_B : ANTENNA_A; + + rt2x00lib_config_antenna(rt2x00dev, new_ant); +} + +static bool rt2x00lib_antenna_diversity(struct rt2x00_dev *rt2x00dev) +{ + struct link_ant *ant = &rt2x00dev->link.ant; + unsigned int flags = ant->flags; + + /* + * Determine if software diversity is enabled for + * either the TX or RX antenna (or both). + * Always perform this check since within the link + * tuner interval the configuration might have changed. + */ + flags &= ~ANTENNA_RX_DIVERSITY; + flags &= ~ANTENNA_TX_DIVERSITY; + + if (rt2x00dev->default_ant.rx == ANTENNA_SW_DIVERSITY) + flags |= ANTENNA_RX_DIVERSITY; + if (rt2x00dev->default_ant.tx == ANTENNA_SW_DIVERSITY) + flags |= ANTENNA_TX_DIVERSITY; + + if (!(ant->flags & ANTENNA_RX_DIVERSITY) && + !(ant->flags & ANTENNA_TX_DIVERSITY)) { + ant->flags = 0; + return true; + } + + /* Update flags */ + ant->flags = flags; + + /* + * If we have only sampled the data over the last period + * we should now harvest the data. Otherwise just evaluate + * the data. The latter should only be performed once + * every 2 seconds. + */ + if (ant->flags & ANTENNA_MODE_SAMPLE) { + rt2x00lib_antenna_diversity_sample(rt2x00dev); + return true; + } else if (rt2x00dev->link.count & 1) { + rt2x00lib_antenna_diversity_eval(rt2x00dev); + return true; + } + + return false; +} + +void rt2x00link_update_stats(struct rt2x00_dev *rt2x00dev, + struct sk_buff *skb, + struct rxdone_entry_desc *rxdesc) +{ + struct link *link = &rt2x00dev->link; + struct link_qual *qual = &rt2x00dev->link.qual; + struct link_ant *ant = &rt2x00dev->link.ant; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + + /* + * Frame was received successfully since non-succesfull + * frames would have been dropped by the hardware. + */ + qual->rx_success++; + + /* + * We are only interested in quality statistics from + * beacons which came from the BSS which we are + * associated with. + */ + if (!ieee80211_is_beacon(hdr->frame_control) || + !(rxdesc->dev_flags & RXDONE_MY_BSS)) + return; + + /* + * Update global RSSI + */ + link->avg_rssi = MOVING_AVERAGE(link->avg_rssi, rxdesc->rssi); + + /* + * Update antenna RSSI + */ + ant->rssi_ant = MOVING_AVERAGE(ant->rssi_ant, rxdesc->rssi); +} + +void rt2x00link_start_tuner(struct rt2x00_dev *rt2x00dev) +{ + struct link *link = &rt2x00dev->link; + + /* + * Link tuning should only be performed when + * an active sta or master interface exists. + * Single monitor mode interfaces should never have + * work with link tuners. + */ + if (!rt2x00dev->intf_ap_count && !rt2x00dev->intf_sta_count) + return; + + rt2x00link_reset_tuner(rt2x00dev, false); + + if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) + ieee80211_queue_delayed_work(rt2x00dev->hw, + &link->work, LINK_TUNE_INTERVAL); +} + +void rt2x00link_stop_tuner(struct rt2x00_dev *rt2x00dev) +{ + cancel_delayed_work_sync(&rt2x00dev->link.work); +} + +void rt2x00link_reset_tuner(struct rt2x00_dev *rt2x00dev, bool antenna) +{ + struct link_qual *qual = &rt2x00dev->link.qual; + + if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) + return; + + /* + * Reset link information. + * Both the currently active vgc level as well as + * the link tuner counter should be reset. Resetting + * the counter is important for devices where the + * device should only perform link tuning during the + * first minute after being enabled. + */ + rt2x00dev->link.count = 0; + memset(qual, 0, sizeof(*qual)); + + /* + * Reset the link tuner. + */ + rt2x00dev->ops->lib->reset_tuner(rt2x00dev, qual); + + if (antenna) + rt2x00link_antenna_reset(rt2x00dev); +} + +static void rt2x00link_reset_qual(struct rt2x00_dev *rt2x00dev) +{ + struct link_qual *qual = &rt2x00dev->link.qual; + + qual->rx_success = 0; + qual->rx_failed = 0; + qual->tx_success = 0; + qual->tx_failed = 0; +} + +static void rt2x00link_tuner(struct work_struct *work) +{ + struct rt2x00_dev *rt2x00dev = + container_of(work, struct rt2x00_dev, link.work.work); + struct link *link = &rt2x00dev->link; + struct link_qual *qual = &rt2x00dev->link.qual; + + /* + * When the radio is shutting down we should + * immediately cease all link tuning. + */ + if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) + return; + + /* + * Update statistics. + */ + rt2x00dev->ops->lib->link_stats(rt2x00dev, qual); + rt2x00dev->low_level_stats.dot11FCSErrorCount += qual->rx_failed; + + /* + * Update quality RSSI for link tuning, + * when we have received some frames and we managed to + * collect the RSSI data we could use this. Otherwise we + * must fallback to the default RSSI value. + */ + if (!link->avg_rssi.avg || !qual->rx_success) + qual->rssi = DEFAULT_RSSI; + else + qual->rssi = link->avg_rssi.avg; + + /* + * Only perform the link tuning when Link tuning + * has been enabled (This could have been disabled from the EEPROM). + */ + if (!test_bit(CONFIG_DISABLE_LINK_TUNING, &rt2x00dev->flags)) + rt2x00dev->ops->lib->link_tuner(rt2x00dev, qual, link->count); + + /* + * Send a signal to the led to update the led signal strength. + */ + rt2x00leds_led_quality(rt2x00dev, qual->rssi); + + /* + * Evaluate antenna setup, make this the last step when + * rt2x00lib_antenna_diversity made changes the quality + * statistics will be reset. + */ + if (rt2x00lib_antenna_diversity(rt2x00dev)) + rt2x00link_reset_qual(rt2x00dev); + + /* + * Increase tuner counter, and reschedule the next link tuner run. + */ + link->count++; + + if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) + ieee80211_queue_delayed_work(rt2x00dev->hw, + &link->work, LINK_TUNE_INTERVAL); +} + +void rt2x00link_register(struct rt2x00_dev *rt2x00dev) +{ + INIT_DELAYED_WORK(&rt2x00dev->link.work, rt2x00link_tuner); +} diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c new file mode 100644 index 0000000..abbd857 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00mac.c @@ -0,0 +1,672 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00mac + Abstract: rt2x00 generic mac80211 routines. + */ + +#include +#include + +#include "rt2x00.h" +#include "rt2x00lib.h" + +static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev, + struct data_queue *queue, + struct sk_buff *frag_skb) +{ + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(frag_skb); + struct ieee80211_tx_info *rts_info; + struct sk_buff *skb; + unsigned int data_length; + int retval = 0; + + if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) + data_length = sizeof(struct ieee80211_cts); + else + data_length = sizeof(struct ieee80211_rts); + + skb = dev_alloc_skb(data_length + rt2x00dev->hw->extra_tx_headroom); + if (unlikely(!skb)) { + WARNING(rt2x00dev, "Failed to create RTS/CTS frame.\n"); + return -ENOMEM; + } + + skb_reserve(skb, rt2x00dev->hw->extra_tx_headroom); + skb_put(skb, data_length); + + /* + * Copy TX information over from original frame to + * RTS/CTS frame. Note that we set the no encryption flag + * since we don't want this frame to be encrypted. + * RTS frames should be acked, while CTS-to-self frames + * should not. The ready for TX flag is cleared to prevent + * it being automatically send when the descriptor is + * written to the hardware. + */ + memcpy(skb->cb, frag_skb->cb, sizeof(skb->cb)); + rts_info = IEEE80211_SKB_CB(skb); + rts_info->control.rates[0].flags &= ~IEEE80211_TX_RC_USE_RTS_CTS; + rts_info->control.rates[0].flags &= ~IEEE80211_TX_RC_USE_CTS_PROTECT; + + if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) + rts_info->flags |= IEEE80211_TX_CTL_NO_ACK; + else + rts_info->flags &= ~IEEE80211_TX_CTL_NO_ACK; + + /* Disable hardware encryption */ + rts_info->control.hw_key = NULL; + + /* + * RTS/CTS frame should use the length of the frame plus any + * encryption overhead that will be added by the hardware. + */ + data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb); + + if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) + ieee80211_ctstoself_get(rt2x00dev->hw, tx_info->control.vif, + frag_skb->data, data_length, tx_info, + (struct ieee80211_cts *)(skb->data)); + else + ieee80211_rts_get(rt2x00dev->hw, tx_info->control.vif, + frag_skb->data, data_length, tx_info, + (struct ieee80211_rts *)(skb->data)); + + retval = rt2x00queue_write_tx_frame(queue, skb, true); + if (retval) { + dev_kfree_skb_any(skb); + WARNING(rt2x00dev, "Failed to send RTS/CTS frame.\n"); + } + + return retval; +} + +int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); + enum data_queue_qid qid = skb_get_queue_mapping(skb); + struct data_queue *queue; + + /* + * Mac80211 might be calling this function while we are trying + * to remove the device or perhaps suspending it. + * Note that we can only stop the TX queues inside the TX path + * due to possible race conditions in mac80211. + */ + if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) + goto exit_fail; + + /* + * Determine which queue to put packet on. + */ + if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM && + test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) + queue = rt2x00queue_get_queue(rt2x00dev, QID_ATIM); + else + queue = rt2x00queue_get_queue(rt2x00dev, qid); + if (unlikely(!queue)) { + ERROR(rt2x00dev, + "Attempt to send packet over invalid queue %d.\n" + "Please file bug report to %s.\n", qid, DRV_PROJECT); + goto exit_fail; + } + + /* + * If CTS/RTS is required. create and queue that frame first. + * Make sure we have at least enough entries available to send + * this CTS/RTS frame as well as the data frame. + * Note that when the driver has set the set_rts_threshold() + * callback function it doesn't need software generation of + * either RTS or CTS-to-self frame and handles everything + * inside the hardware. + */ + if ((tx_info->control.rates[0].flags & (IEEE80211_TX_RC_USE_RTS_CTS | + IEEE80211_TX_RC_USE_CTS_PROTECT)) && + !rt2x00dev->ops->hw->set_rts_threshold) { + if (rt2x00queue_available(queue) <= 1) + goto exit_fail; + + if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb)) + goto exit_fail; + } + + if (rt2x00queue_write_tx_frame(queue, skb, false)) + goto exit_fail; + + if (rt2x00queue_threshold(queue)) + ieee80211_stop_queue(rt2x00dev->hw, qid); + + return NETDEV_TX_OK; + + exit_fail: + ieee80211_stop_queue(rt2x00dev->hw, qid); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} +EXPORT_SYMBOL_GPL(rt2x00mac_tx); + +int rt2x00mac_start(struct ieee80211_hw *hw) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + + if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) + return 0; + + return rt2x00lib_start(rt2x00dev); +} +EXPORT_SYMBOL_GPL(rt2x00mac_start); + +void rt2x00mac_stop(struct ieee80211_hw *hw) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + + if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) + return; + + rt2x00lib_stop(rt2x00dev); +} +EXPORT_SYMBOL_GPL(rt2x00mac_stop); + +int rt2x00mac_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + struct rt2x00_intf *intf = vif_to_intf(vif); + struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, QID_BEACON); + struct queue_entry *entry = NULL; + unsigned int i; + + /* + * Don't allow interfaces to be added + * the device has disappeared. + */ + if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) || + !test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) + return -ENODEV; + + switch (vif->type) { + case NL80211_IFTYPE_AP: + /* + * We don't support mixed combinations of + * sta and ap interfaces. + */ + if (rt2x00dev->intf_sta_count) + return -ENOBUFS; + + /* + * Check if we exceeded the maximum amount + * of supported interfaces. + */ + if (rt2x00dev->intf_ap_count >= rt2x00dev->ops->max_ap_intf) + return -ENOBUFS; + + break; + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_WDS: + /* + * We don't support mixed combinations of + * sta and ap interfaces. + */ + if (rt2x00dev->intf_ap_count) + return -ENOBUFS; + + /* + * Check if we exceeded the maximum amount + * of supported interfaces. + */ + if (rt2x00dev->intf_sta_count >= rt2x00dev->ops->max_sta_intf) + return -ENOBUFS; + + break; + default: + return -EINVAL; + } + + /* + * Loop through all beacon queues to find a free + * entry. Since there are as much beacon entries + * as the maximum interfaces, this search shouldn't + * fail. + */ + for (i = 0; i < queue->limit; i++) { + entry = &queue->entries[i]; + if (!test_and_set_bit(ENTRY_BCN_ASSIGNED, &entry->flags)) + break; + } + + if (unlikely(i == queue->limit)) + return -ENOBUFS; + + /* + * We are now absolutely sure the interface can be created, + * increase interface count and start initialization. + */ + + if (vif->type == NL80211_IFTYPE_AP) + rt2x00dev->intf_ap_count++; + else + rt2x00dev->intf_sta_count++; + + spin_lock_init(&intf->lock); + spin_lock_init(&intf->seqlock); + mutex_init(&intf->beacon_skb_mutex); + intf->beacon = entry; + + if (vif->type == NL80211_IFTYPE_AP) + memcpy(&intf->bssid, vif->addr, ETH_ALEN); + memcpy(&intf->mac, vif->addr, ETH_ALEN); + + /* + * The MAC adddress must be configured after the device + * has been initialized. Otherwise the device can reset + * the MAC registers. + */ + rt2x00lib_config_intf(rt2x00dev, intf, vif->type, intf->mac, NULL); + + /* + * Some filters depend on the current working mode. We can force + * an update during the next configure_filter() run by mac80211 by + * resetting the current packet_filter state. + */ + rt2x00dev->packet_filter = 0; + + return 0; +} +EXPORT_SYMBOL_GPL(rt2x00mac_add_interface); + +void rt2x00mac_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + struct rt2x00_intf *intf = vif_to_intf(vif); + + /* + * Don't allow interfaces to be remove while + * either the device has disappeared or when + * no interface is present. + */ + if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) || + (vif->type == NL80211_IFTYPE_AP && !rt2x00dev->intf_ap_count) || + (vif->type != NL80211_IFTYPE_AP && !rt2x00dev->intf_sta_count)) + return; + + if (vif->type == NL80211_IFTYPE_AP) + rt2x00dev->intf_ap_count--; + else + rt2x00dev->intf_sta_count--; + + /* + * Release beacon entry so it is available for + * new interfaces again. + */ + clear_bit(ENTRY_BCN_ASSIGNED, &intf->beacon->flags); + + /* + * Make sure the bssid and mac address registers + * are cleared to prevent false ACKing of frames. + */ + rt2x00lib_config_intf(rt2x00dev, intf, + NL80211_IFTYPE_UNSPECIFIED, NULL, NULL); +} +EXPORT_SYMBOL_GPL(rt2x00mac_remove_interface); + +int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + struct ieee80211_conf *conf = &hw->conf; + + /* + * mac80211 might be calling this function while we are trying + * to remove the device or perhaps suspending it. + */ + if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) + return 0; + + /* + * Some configuration parameters (e.g. channel and antenna values) can + * only be set when the radio is enabled, but do require the RX to + * be off. + */ + rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF); + + /* + * When we've just turned on the radio, we want to reprogram + * everything to ensure a consistent state + */ + rt2x00lib_config(rt2x00dev, conf, changed); + + /* + * After the radio has been enabled we need to configure + * the antenna to the default settings. rt2x00lib_config_antenna() + * should determine if any action should be taken based on + * checking if diversity has been enabled or no antenna changes + * have been made since the last configuration change. + */ + rt2x00lib_config_antenna(rt2x00dev, rt2x00dev->default_ant); + + /* Turn RX back on */ + rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON); + + return 0; +} +EXPORT_SYMBOL_GPL(rt2x00mac_config); + +void rt2x00mac_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + + /* + * Mask off any flags we are going to ignore + * from the total_flags field. + */ + *total_flags &= + FIF_ALLMULTI | + FIF_FCSFAIL | + FIF_PLCPFAIL | + FIF_CONTROL | + FIF_PSPOLL | + FIF_OTHER_BSS | + FIF_PROMISC_IN_BSS; + + /* + * Apply some rules to the filters: + * - Some filters imply different filters to be set. + * - Some things we can't filter out at all. + * - Multicast filter seems to kill broadcast traffic so never use it. + */ + *total_flags |= FIF_ALLMULTI; + if (*total_flags & FIF_OTHER_BSS || + *total_flags & FIF_PROMISC_IN_BSS) + *total_flags |= FIF_PROMISC_IN_BSS | FIF_OTHER_BSS; + + /* + * If the device has a single filter for all control frames, + * FIF_CONTROL and FIF_PSPOLL flags imply each other. + * And if the device has more than one filter for control frames + * of different types, but has no a separate filter for PS Poll frames, + * FIF_CONTROL flag implies FIF_PSPOLL. + */ + if (!test_bit(DRIVER_SUPPORT_CONTROL_FILTERS, &rt2x00dev->flags)) { + if (*total_flags & FIF_CONTROL || *total_flags & FIF_PSPOLL) + *total_flags |= FIF_CONTROL | FIF_PSPOLL; + } + if (!test_bit(DRIVER_SUPPORT_CONTROL_FILTER_PSPOLL, &rt2x00dev->flags)) { + if (*total_flags & FIF_CONTROL) + *total_flags |= FIF_PSPOLL; + } + + /* + * Check if there is any work left for us. + */ + if (rt2x00dev->packet_filter == *total_flags) + return; + rt2x00dev->packet_filter = *total_flags; + + rt2x00dev->ops->lib->config_filter(rt2x00dev, *total_flags); +} +EXPORT_SYMBOL_GPL(rt2x00mac_configure_filter); + +int rt2x00mac_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, + bool set) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + + rt2x00lib_beacondone(rt2x00dev); + return 0; +} +EXPORT_SYMBOL_GPL(rt2x00mac_set_tim); + +#ifdef CONFIG_RT2X00_LIB_CRYPTO +static void memcpy_tkip(struct rt2x00lib_crypto *crypto, u8 *key, u8 key_len) +{ + if (key_len > NL80211_TKIP_DATA_OFFSET_ENCR_KEY) + memcpy(&crypto->key, + &key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY], + sizeof(crypto->key)); + + if (key_len > NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY) + memcpy(&crypto->tx_mic, + &key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], + sizeof(crypto->tx_mic)); + + if (key_len > NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY) + memcpy(&crypto->rx_mic, + &key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], + sizeof(crypto->rx_mic)); +} + +int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + struct rt2x00_intf *intf = vif_to_intf(vif); + int (*set_key) (struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_crypto *crypto, + struct ieee80211_key_conf *key); + struct rt2x00lib_crypto crypto; + static const u8 bcast_addr[ETH_ALEN] = + { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, }; + + if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) + return 0; + else if (!test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) + return -EOPNOTSUPP; + else if (key->keylen > 32) + return -ENOSPC; + + memset(&crypto, 0, sizeof(crypto)); + + /* + * When in STA mode, bssidx is always 0 otherwise local_address[5] + * contains the bss number, see BSS_ID_MASK comments for details. + */ + if (rt2x00dev->intf_sta_count) + crypto.bssidx = 0; + else + crypto.bssidx = intf->mac[5] & (rt2x00dev->ops->max_ap_intf - 1); + + crypto.cipher = rt2x00crypto_key_to_cipher(key); + if (crypto.cipher == CIPHER_NONE) + return -EOPNOTSUPP; + + crypto.cmd = cmd; + + if (sta) { + /* some drivers need the AID */ + crypto.aid = sta->aid; + crypto.address = sta->addr; + } else + crypto.address = bcast_addr; + + if (crypto.cipher == CIPHER_TKIP) + memcpy_tkip(&crypto, &key->key[0], key->keylen); + else + memcpy(&crypto.key, &key->key[0], key->keylen); + /* + * Each BSS has a maximum of 4 shared keys. + * Shared key index values: + * 0) BSS0 key0 + * 1) BSS0 key1 + * ... + * 4) BSS1 key0 + * ... + * 8) BSS2 key0 + * ... + * Both pairwise as shared key indeces are determined by + * driver. This is required because the hardware requires + * keys to be assigned in correct order (When key 1 is + * provided but key 0 is not, then the key is not found + * by the hardware during RX). + */ + if (cmd == SET_KEY) + key->hw_key_idx = 0; + + if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) + set_key = rt2x00dev->ops->lib->config_pairwise_key; + else + set_key = rt2x00dev->ops->lib->config_shared_key; + + if (!set_key) + return -EOPNOTSUPP; + + return set_key(rt2x00dev, &crypto, key); +} +EXPORT_SYMBOL_GPL(rt2x00mac_set_key); +#endif /* CONFIG_RT2X00_LIB_CRYPTO */ + +int rt2x00mac_get_stats(struct ieee80211_hw *hw, + struct ieee80211_low_level_stats *stats) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + + /* + * The dot11ACKFailureCount, dot11RTSFailureCount and + * dot11RTSSuccessCount are updated in interrupt time. + * dot11FCSErrorCount is updated in the link tuner. + */ + memcpy(stats, &rt2x00dev->low_level_stats, sizeof(*stats)); + + return 0; +} +EXPORT_SYMBOL_GPL(rt2x00mac_get_stats); + +void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changes) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + struct rt2x00_intf *intf = vif_to_intf(vif); + int update_bssid = 0; + + /* + * mac80211 might be calling this function while we are trying + * to remove the device or perhaps suspending it. + */ + if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) + return; + + spin_lock(&intf->lock); + + /* + * conf->bssid can be NULL if coming from the internal + * beacon update routine. + */ + if (changes & BSS_CHANGED_BSSID) { + update_bssid = 1; + memcpy(&intf->bssid, bss_conf->bssid, ETH_ALEN); + } + + spin_unlock(&intf->lock); + + /* + * Call rt2x00_config_intf() outside of the spinlock context since + * the call will sleep for USB drivers. By using the ieee80211_if_conf + * values as arguments we make keep access to rt2x00_intf thread safe + * even without the lock. + */ + if (changes & BSS_CHANGED_BSSID) + rt2x00lib_config_intf(rt2x00dev, intf, vif->type, NULL, + update_bssid ? bss_conf->bssid : NULL); + + /* + * Update the beacon. + */ + if (changes & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED)) + rt2x00queue_update_beacon(rt2x00dev, vif, + bss_conf->enable_beacon); + + /* + * When the association status has changed we must reset the link + * tuner counter. This is because some drivers determine if they + * should perform link tuning based on the number of seconds + * while associated or not associated. + */ + if (changes & BSS_CHANGED_ASSOC) { + rt2x00dev->link.count = 0; + + if (bss_conf->assoc) + rt2x00dev->intf_associated++; + else + rt2x00dev->intf_associated--; + + rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated); + } + + /* + * When the erp information has changed, we should perform + * additional configuration steps. For all other changes we are done. + */ + if (changes & ~(BSS_CHANGED_ASSOC | BSS_CHANGED_HT)) + rt2x00lib_config_erp(rt2x00dev, intf, bss_conf); +} +EXPORT_SYMBOL_GPL(rt2x00mac_bss_info_changed); + +int rt2x00mac_conf_tx(struct ieee80211_hw *hw, u16 queue_idx, + const struct ieee80211_tx_queue_params *params) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + struct data_queue *queue; + + queue = rt2x00queue_get_queue(rt2x00dev, queue_idx); + if (unlikely(!queue)) + return -EINVAL; + + /* + * The passed variables are stored as real value ((2^n)-1). + * Ralink registers require to know the bit number 'n'. + */ + if (params->cw_min > 0) + queue->cw_min = fls(params->cw_min); + else + queue->cw_min = 5; /* cw_min: 2^5 = 32. */ + + if (params->cw_max > 0) + queue->cw_max = fls(params->cw_max); + else + queue->cw_max = 10; /* cw_min: 2^10 = 1024. */ + + queue->aifs = params->aifs; + queue->txop = params->txop; + + INFO(rt2x00dev, + "Configured TX queue %d - CWmin: %d, CWmax: %d, Aifs: %d, TXop: %d.\n", + queue_idx, queue->cw_min, queue->cw_max, queue->aifs, queue->txop); + + return 0; +} +EXPORT_SYMBOL_GPL(rt2x00mac_conf_tx); + +void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + bool active = !!rt2x00dev->ops->lib->rfkill_poll(rt2x00dev); + + wiphy_rfkill_set_hw_state(hw->wiphy, !active); +} +EXPORT_SYMBOL_GPL(rt2x00mac_rfkill_poll); diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c new file mode 100644 index 0000000..047123b --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00pci.c @@ -0,0 +1,407 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00pci + Abstract: rt2x00 generic pci device routines. + */ + +#include +#include +#include +#include + +#include "rt2x00.h" +#include "rt2x00pci.h" + +/* + * Register access. + */ +int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + const struct rt2x00_field32 field, + u32 *reg) +{ + unsigned int i; + + if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) + return 0; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2x00pci_register_read(rt2x00dev, offset, reg); + if (!rt2x00_get_field32(*reg, field)) + return 1; + udelay(REGISTER_BUSY_DELAY); + } + + ERROR(rt2x00dev, "Indirect register access failed: " + "offset=0x%.08x, value=0x%.08x\n", offset, *reg); + *reg = ~0; + + return 0; +} +EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read); + +/* + * TX data handlers. + */ +int rt2x00pci_write_tx_data(struct queue_entry *entry) +{ + struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; + struct queue_entry_priv_pci *entry_priv = entry->priv_data; + struct skb_frame_desc *skbdesc; + + /* + * This should not happen, we already checked the entry + * was ours. When the hardware disagrees there has been + * a queue corruption! + */ + if (unlikely(rt2x00dev->ops->lib->get_entry_state(entry))) { + ERROR(rt2x00dev, + "Corrupt queue %d, accessing entry which is not ours.\n" + "Please file bug report to %s.\n", + entry->queue->qid, DRV_PROJECT); + return -EINVAL; + } + + /* + * Fill in skb descriptor + */ + skbdesc = get_skb_frame_desc(entry->skb); + skbdesc->desc = entry_priv->desc; + skbdesc->desc_len = entry->queue->desc_size; + + return 0; +} +EXPORT_SYMBOL_GPL(rt2x00pci_write_tx_data); + +/* + * TX/RX data handlers. + */ +void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev) +{ + struct data_queue *queue = rt2x00dev->rx; + struct queue_entry *entry; + struct queue_entry_priv_pci *entry_priv; + struct skb_frame_desc *skbdesc; + + while (1) { + entry = rt2x00queue_get_entry(queue, Q_INDEX); + entry_priv = entry->priv_data; + + if (rt2x00dev->ops->lib->get_entry_state(entry)) + break; + + /* + * Fill in desc fields of the skb descriptor + */ + skbdesc = get_skb_frame_desc(entry->skb); + skbdesc->desc = entry_priv->desc; + skbdesc->desc_len = entry->queue->desc_size; + + /* + * Send the frame to rt2x00lib for further processing. + */ + rt2x00lib_rxdone(rt2x00dev, entry); + } +} +EXPORT_SYMBOL_GPL(rt2x00pci_rxdone); + +/* + * Device initialization handlers. + */ +static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev, + struct data_queue *queue) +{ + struct queue_entry_priv_pci *entry_priv; + void *addr; + dma_addr_t dma; + unsigned int i; + + /* + * Allocate DMA memory for descriptor and buffer. + */ + addr = dma_alloc_coherent(rt2x00dev->dev, + queue->limit * queue->desc_size, + &dma, GFP_KERNEL | GFP_DMA); + if (!addr) + return -ENOMEM; + + memset(addr, 0, queue->limit * queue->desc_size); + + /* + * Initialize all queue entries to contain valid addresses. + */ + for (i = 0; i < queue->limit; i++) { + entry_priv = queue->entries[i].priv_data; + entry_priv->desc = addr + i * queue->desc_size; + entry_priv->desc_dma = dma + i * queue->desc_size; + } + + return 0; +} + +static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev, + struct data_queue *queue) +{ + struct queue_entry_priv_pci *entry_priv = + queue->entries[0].priv_data; + + if (entry_priv->desc) + dma_free_coherent(rt2x00dev->dev, + queue->limit * queue->desc_size, + entry_priv->desc, entry_priv->desc_dma); + entry_priv->desc = NULL; +} + +int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev) +{ + struct data_queue *queue; + int status; + + /* + * Allocate DMA + */ + queue_for_each(rt2x00dev, queue) { + status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue); + if (status) + goto exit; + } + + /* + * Register interrupt handler. + */ + status = request_irq(rt2x00dev->irq, rt2x00dev->ops->lib->irq_handler, + IRQF_SHARED, rt2x00dev->name, rt2x00dev); + if (status) { + ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n", + rt2x00dev->irq, status); + goto exit; + } + + return 0; + +exit: + queue_for_each(rt2x00dev, queue) + rt2x00pci_free_queue_dma(rt2x00dev, queue); + + return status; +} +EXPORT_SYMBOL_GPL(rt2x00pci_initialize); + +void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev) +{ + struct data_queue *queue; + + /* + * Free irq line. + */ + free_irq(to_pci_dev(rt2x00dev->dev)->irq, rt2x00dev); + + /* + * Free DMA + */ + queue_for_each(rt2x00dev, queue) + rt2x00pci_free_queue_dma(rt2x00dev, queue); +} +EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize); + +/* + * PCI driver handlers. + */ +static void rt2x00pci_free_reg(struct rt2x00_dev *rt2x00dev) +{ + kfree(rt2x00dev->rf); + rt2x00dev->rf = NULL; + + kfree(rt2x00dev->eeprom); + rt2x00dev->eeprom = NULL; + + if (rt2x00dev->csr.base) { + iounmap(rt2x00dev->csr.base); + rt2x00dev->csr.base = NULL; + } +} + +static int rt2x00pci_alloc_reg(struct rt2x00_dev *rt2x00dev) +{ + struct pci_dev *pci_dev = to_pci_dev(rt2x00dev->dev); + + rt2x00dev->csr.base = pci_ioremap_bar(pci_dev, 0); + if (!rt2x00dev->csr.base) + goto exit; + + rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL); + if (!rt2x00dev->eeprom) + goto exit; + + rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL); + if (!rt2x00dev->rf) + goto exit; + + return 0; + +exit: + ERROR_PROBE("Failed to allocate registers.\n"); + + rt2x00pci_free_reg(rt2x00dev); + + return -ENOMEM; +} + +int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) +{ + struct rt2x00_ops *ops = (struct rt2x00_ops *)id->driver_data; + struct ieee80211_hw *hw; + struct rt2x00_dev *rt2x00dev; + int retval; + + retval = pci_request_regions(pci_dev, pci_name(pci_dev)); + if (retval) { + ERROR_PROBE("PCI request regions failed.\n"); + return retval; + } + + retval = pci_enable_device(pci_dev); + if (retval) { + ERROR_PROBE("Enable device failed.\n"); + goto exit_release_regions; + } + + pci_set_master(pci_dev); + + if (pci_set_mwi(pci_dev)) + ERROR_PROBE("MWI not available.\n"); + + if (dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32))) { + ERROR_PROBE("PCI DMA not supported.\n"); + retval = -EIO; + goto exit_disable_device; + } + + hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw); + if (!hw) { + ERROR_PROBE("Failed to allocate hardware.\n"); + retval = -ENOMEM; + goto exit_disable_device; + } + + pci_set_drvdata(pci_dev, hw); + + rt2x00dev = hw->priv; + rt2x00dev->dev = &pci_dev->dev; + rt2x00dev->ops = ops; + rt2x00dev->hw = hw; + rt2x00dev->irq = pci_dev->irq; + rt2x00dev->name = pci_name(pci_dev); + + rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI); + + retval = rt2x00pci_alloc_reg(rt2x00dev); + if (retval) + goto exit_free_device; + + retval = rt2x00lib_probe_dev(rt2x00dev); + if (retval) + goto exit_free_reg; + + return 0; + +exit_free_reg: + rt2x00pci_free_reg(rt2x00dev); + +exit_free_device: + ieee80211_free_hw(hw); + +exit_disable_device: + if (retval != -EBUSY) + pci_disable_device(pci_dev); + +exit_release_regions: + pci_release_regions(pci_dev); + + pci_set_drvdata(pci_dev, NULL); + + return retval; +} +EXPORT_SYMBOL_GPL(rt2x00pci_probe); + +void rt2x00pci_remove(struct pci_dev *pci_dev) +{ + struct ieee80211_hw *hw = pci_get_drvdata(pci_dev); + struct rt2x00_dev *rt2x00dev = hw->priv; + + /* + * Free all allocated data. + */ + rt2x00lib_remove_dev(rt2x00dev); + rt2x00pci_free_reg(rt2x00dev); + ieee80211_free_hw(hw); + + /* + * Free the PCI device data. + */ + pci_set_drvdata(pci_dev, NULL); + pci_disable_device(pci_dev); + pci_release_regions(pci_dev); +} +EXPORT_SYMBOL_GPL(rt2x00pci_remove); + +#ifdef CONFIG_PM +int rt2x00pci_suspend(struct pci_dev *pci_dev, pm_message_t state) +{ + struct ieee80211_hw *hw = pci_get_drvdata(pci_dev); + struct rt2x00_dev *rt2x00dev = hw->priv; + int retval; + + retval = rt2x00lib_suspend(rt2x00dev, state); + if (retval) + return retval; + + pci_save_state(pci_dev); + pci_disable_device(pci_dev); + return pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state)); +} +EXPORT_SYMBOL_GPL(rt2x00pci_suspend); + +int rt2x00pci_resume(struct pci_dev *pci_dev) +{ + struct ieee80211_hw *hw = pci_get_drvdata(pci_dev); + struct rt2x00_dev *rt2x00dev = hw->priv; + + if (pci_set_power_state(pci_dev, PCI_D0) || + pci_enable_device(pci_dev) || + pci_restore_state(pci_dev)) { + ERROR(rt2x00dev, "Failed to resume device.\n"); + return -EIO; + } + + return rt2x00lib_resume(rt2x00dev); +} +EXPORT_SYMBOL_GPL(rt2x00pci_resume); +#endif /* CONFIG_PM */ + +/* + * rt2x00pci module information. + */ +MODULE_AUTHOR(DRV_PROJECT); +MODULE_VERSION(DRV_VERSION); +MODULE_DESCRIPTION("rt2x00 pci library"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h new file mode 100644 index 0000000..8149ff6 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00pci.h @@ -0,0 +1,135 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00pci + Abstract: Data structures for the rt2x00pci module. + */ + +#ifndef RT2X00PCI_H +#define RT2X00PCI_H + +#include +#include + +/* + * This variable should be used with the + * pci_driver structure initialization. + */ +#define PCI_DEVICE_DATA(__ops) .driver_data = (kernel_ulong_t)(__ops) + +/* + * Register access. + */ +static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + u32 *value) +{ + *value = readl(rt2x00dev->csr.base + offset); +} + +static inline void rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + void *value, const u32 length) +{ + memcpy_fromio(value, rt2x00dev->csr.base + offset, length); +} + +static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + u32 value) +{ + writel(value, rt2x00dev->csr.base + offset); +} + +static inline void rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + const void *value, + const u32 length) +{ + memcpy_toio(rt2x00dev->csr.base + offset, value, length); +} + +/** + * rt2x00pci_regbusy_read - Read from register with busy check + * @rt2x00dev: Device pointer, see &struct rt2x00_dev. + * @offset: Register offset + * @field: Field to check if register is busy + * @reg: Pointer to where register contents should be stored + * + * This function will read the given register, and checks if the + * register is busy. If it is, it will sleep for a couple of + * microseconds before reading the register again. If the register + * is not read after a certain timeout, this function will return + * FALSE. + */ +int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + const struct rt2x00_field32 field, + u32 *reg); + +/** + * rt2x00pci_write_tx_data - Initialize data for TX operation + * @entry: The entry where the frame is located + * + * This function will initialize the DMA and skb descriptor + * to prepare the entry for the actual TX operation. + */ +int rt2x00pci_write_tx_data(struct queue_entry *entry); + +/** + * struct queue_entry_priv_pci: Per entry PCI specific information + * + * @desc: Pointer to device descriptor + * @desc_dma: DMA pointer to &desc. + * @data: Pointer to device's entry memory. + * @data_dma: DMA pointer to &data. + */ +struct queue_entry_priv_pci { + __le32 *desc; + dma_addr_t desc_dma; +}; + +/** + * rt2x00pci_rxdone - Handle RX done events + * @rt2x00dev: Device pointer, see &struct rt2x00_dev. + */ +void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev); + +/* + * Device initialization handlers. + */ +int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev); +void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev); + +/* + * PCI driver handlers. + */ +int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id); +void rt2x00pci_remove(struct pci_dev *pci_dev); +#ifdef CONFIG_PM +int rt2x00pci_suspend(struct pci_dev *pci_dev, pm_message_t state); +int rt2x00pci_resume(struct pci_dev *pci_dev); +#else +#define rt2x00pci_suspend NULL +#define rt2x00pci_resume NULL +#endif /* CONFIG_PM */ + +#endif /* RT2X00PCI_H */ diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c new file mode 100644 index 0000000..5b6b789 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c @@ -0,0 +1,921 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn + Copyright (C) 2004 - 2009 Gertjan van Wingerde + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00lib + Abstract: rt2x00 queue specific routines. + */ + +#include +#include +#include + +#include "rt2x00.h" +#include "rt2x00lib.h" + +struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev, + struct queue_entry *entry) +{ + struct sk_buff *skb; + struct skb_frame_desc *skbdesc; + unsigned int frame_size; + unsigned int head_size = 0; + unsigned int tail_size = 0; + + /* + * The frame size includes descriptor size, because the + * hardware directly receive the frame into the skbuffer. + */ + frame_size = entry->queue->data_size + entry->queue->desc_size; + + /* + * The payload should be aligned to a 4-byte boundary, + * this means we need at least 3 bytes for moving the frame + * into the correct offset. + */ + head_size = 4; + + /* + * For IV/EIV/ICV assembly we must make sure there is + * at least 8 bytes bytes available in headroom for IV/EIV + * and 8 bytes for ICV data as tailroon. + */ + if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) { + head_size += 8; + tail_size += 8; + } + + /* + * Allocate skbuffer. + */ + skb = dev_alloc_skb(frame_size + head_size + tail_size); + if (!skb) + return NULL; + + /* + * Make sure we not have a frame with the requested bytes + * available in the head and tail. + */ + skb_reserve(skb, head_size); + skb_put(skb, frame_size); + + /* + * Populate skbdesc. + */ + skbdesc = get_skb_frame_desc(skb); + memset(skbdesc, 0, sizeof(*skbdesc)); + skbdesc->entry = entry; + + if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) { + skbdesc->skb_dma = dma_map_single(rt2x00dev->dev, + skb->data, + skb->len, + DMA_FROM_DEVICE); + skbdesc->flags |= SKBDESC_DMA_MAPPED_RX; + } + + return skb; +} + +void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb) +{ + struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); + + /* + * If device has requested headroom, we should make sure that + * is also mapped to the DMA so it can be used for transfering + * additional descriptor information to the hardware. + */ + skb_push(skb, rt2x00dev->ops->extra_tx_headroom); + + skbdesc->skb_dma = + dma_map_single(rt2x00dev->dev, skb->data, skb->len, DMA_TO_DEVICE); + + /* + * Restore data pointer to original location again. + */ + skb_pull(skb, rt2x00dev->ops->extra_tx_headroom); + + skbdesc->flags |= SKBDESC_DMA_MAPPED_TX; +} +EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb); + +void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb) +{ + struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); + + if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) { + dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len, + DMA_FROM_DEVICE); + skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX; + } + + if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) { + /* + * Add headroom to the skb length, it has been removed + * by the driver, but it was actually mapped to DMA. + */ + dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, + skb->len + rt2x00dev->ops->extra_tx_headroom, + DMA_TO_DEVICE); + skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX; + } +} + +void rt2x00queue_free_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb) +{ + if (!skb) + return; + + rt2x00queue_unmap_skb(rt2x00dev, skb); + dev_kfree_skb_any(skb); +} + +void rt2x00queue_align_frame(struct sk_buff *skb) +{ + unsigned int frame_length = skb->len; + unsigned int align = ALIGN_SIZE(skb, 0); + + if (!align) + return; + + skb_push(skb, align); + memmove(skb->data, skb->data + align, frame_length); + skb_trim(skb, frame_length); +} + +void rt2x00queue_align_payload(struct sk_buff *skb, unsigned int header_length) +{ + unsigned int frame_length = skb->len; + unsigned int align = ALIGN_SIZE(skb, header_length); + + if (!align) + return; + + skb_push(skb, align); + memmove(skb->data, skb->data + align, frame_length); + skb_trim(skb, frame_length); +} + +void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length) +{ + unsigned int payload_length = skb->len - header_length; + unsigned int header_align = ALIGN_SIZE(skb, 0); + unsigned int payload_align = ALIGN_SIZE(skb, header_length); + unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0; + + /* + * Adjust the header alignment if the payload needs to be moved more + * than the header. + */ + if (payload_align > header_align) + header_align += 4; + + /* There is nothing to do if no alignment is needed */ + if (!header_align) + return; + + /* Reserve the amount of space needed in front of the frame */ + skb_push(skb, header_align); + + /* + * Move the header. + */ + memmove(skb->data, skb->data + header_align, header_length); + + /* Move the payload, if present and if required */ + if (payload_length && payload_align) + memmove(skb->data + header_length + l2pad, + skb->data + header_length + l2pad + payload_align, + payload_length); + + /* Trim the skb to the correct size */ + skb_trim(skb, header_length + l2pad + payload_length); +} + +void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length) +{ + unsigned int l2pad = L2PAD_SIZE(header_length); + + if (!l2pad) + return; + + memmove(skb->data + l2pad, skb->data, header_length); + skb_pull(skb, l2pad); +} + +static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry, + struct txentry_desc *txdesc) +{ + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data; + struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif); + unsigned long irqflags; + + if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) || + unlikely(!tx_info->control.vif)) + return; + + /* + * Hardware should insert sequence counter. + * FIXME: We insert a software sequence counter first for + * hardware that doesn't support hardware sequence counting. + * + * This is wrong because beacons are not getting sequence + * numbers assigned properly. + * + * A secondary problem exists for drivers that cannot toggle + * sequence counting per-frame, since those will override the + * sequence counter given by mac80211. + */ + spin_lock_irqsave(&intf->seqlock, irqflags); + + if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) + intf->seqno += 0x10; + hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); + hdr->seq_ctrl |= cpu_to_le16(intf->seqno); + + spin_unlock_irqrestore(&intf->seqlock, irqflags); + + __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); +} + +static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry, + struct txentry_desc *txdesc, + const struct rt2x00_rate *hwrate) +{ + struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); + struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; + unsigned int data_length; + unsigned int duration; + unsigned int residual; + + /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */ + data_length = entry->skb->len + 4; + data_length += rt2x00crypto_tx_overhead(rt2x00dev, entry->skb); + + /* + * PLCP setup + * Length calculation depends on OFDM/CCK rate. + */ + txdesc->signal = hwrate->plcp; + txdesc->service = 0x04; + + if (hwrate->flags & DEV_RATE_OFDM) { + txdesc->length_high = (data_length >> 6) & 0x3f; + txdesc->length_low = data_length & 0x3f; + } else { + /* + * Convert length to microseconds. + */ + residual = GET_DURATION_RES(data_length, hwrate->bitrate); + duration = GET_DURATION(data_length, hwrate->bitrate); + + if (residual != 0) { + duration++; + + /* + * Check if we need to set the Length Extension + */ + if (hwrate->bitrate == 110 && residual <= 30) + txdesc->service |= 0x80; + } + + txdesc->length_high = (duration >> 8) & 0xff; + txdesc->length_low = duration & 0xff; + + /* + * When preamble is enabled we should set the + * preamble bit for the signal. + */ + if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) + txdesc->signal |= 0x08; + } +} + +static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry, + struct txentry_desc *txdesc) +{ + struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data; + struct ieee80211_rate *rate = + ieee80211_get_tx_rate(rt2x00dev->hw, tx_info); + const struct rt2x00_rate *hwrate; + + memset(txdesc, 0, sizeof(*txdesc)); + + /* + * Initialize information from queue + */ + txdesc->queue = entry->queue->qid; + txdesc->cw_min = entry->queue->cw_min; + txdesc->cw_max = entry->queue->cw_max; + txdesc->aifs = entry->queue->aifs; + + /* + * Header and alignment information. + */ + txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb); + if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags) && + (entry->skb->len > txdesc->header_length)) + txdesc->l2pad = L2PAD_SIZE(txdesc->header_length); + + /* + * Check whether this frame is to be acked. + */ + if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) + __set_bit(ENTRY_TXD_ACK, &txdesc->flags); + + /* + * Check if this is a RTS/CTS frame + */ + if (ieee80211_is_rts(hdr->frame_control) || + ieee80211_is_cts(hdr->frame_control)) { + __set_bit(ENTRY_TXD_BURST, &txdesc->flags); + if (ieee80211_is_rts(hdr->frame_control)) + __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags); + else + __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags); + if (tx_info->control.rts_cts_rate_idx >= 0) + rate = + ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info); + } + + /* + * Determine retry information. + */ + txdesc->retry_limit = tx_info->control.rates[0].count - 1; + if (txdesc->retry_limit >= rt2x00dev->long_retry) + __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags); + + /* + * Check if more fragments are pending + */ + if (ieee80211_has_morefrags(hdr->frame_control) || + (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)) { + __set_bit(ENTRY_TXD_BURST, &txdesc->flags); + __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags); + } + + /* + * Beacons and probe responses require the tsf timestamp + * to be inserted into the frame, except for a frame that has been injected + * through a monitor interface. This latter is needed for testing a + * monitor interface. + */ + if ((ieee80211_is_beacon(hdr->frame_control) || + ieee80211_is_probe_resp(hdr->frame_control)) && + (!(tx_info->flags & IEEE80211_TX_CTL_INJECTED))) + __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags); + + /* + * Determine with what IFS priority this frame should be send. + * Set ifs to IFS_SIFS when the this is not the first fragment, + * or this fragment came after RTS/CTS. + */ + if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) && + !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) { + __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags); + txdesc->ifs = IFS_BACKOFF; + } else + txdesc->ifs = IFS_SIFS; + + /* + * Determine rate modulation. + */ + hwrate = rt2x00_get_rate(rate->hw_value); + txdesc->rate_mode = RATE_MODE_CCK; + if (hwrate->flags & DEV_RATE_OFDM) + txdesc->rate_mode = RATE_MODE_OFDM; + + /* + * Apply TX descriptor handling by components + */ + rt2x00crypto_create_tx_descriptor(entry, txdesc); + rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate); + rt2x00queue_create_tx_descriptor_seq(entry, txdesc); + rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate); +} + +static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry, + struct txentry_desc *txdesc) +{ + struct data_queue *queue = entry->queue; + struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; + + rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, entry->skb, txdesc); + + /* + * All processing on the frame has been completed, this means + * it is now ready to be dumped to userspace through debugfs. + */ + rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TX, entry->skb); + + /* + * Check if we need to kick the queue, there are however a few rules + * 1) Don't kick beacon queue + * 2) Don't kick unless this is the last in frame in a burst. + * When the burst flag is set, this frame is always followed + * by another frame which in some way are related to eachother. + * This is true for fragments, RTS or CTS-to-self frames. + * 3) Rule 2 can be broken when the available entries + * in the queue are less then a certain threshold. + */ + if (entry->queue->qid == QID_BEACON) + return; + + if (rt2x00queue_threshold(queue) || + !test_bit(ENTRY_TXD_BURST, &txdesc->flags)) + rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, queue->qid); +} + +int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, + bool local) +{ + struct ieee80211_tx_info *tx_info; + struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX); + struct txentry_desc txdesc; + struct skb_frame_desc *skbdesc; + u8 rate_idx, rate_flags; + + if (unlikely(rt2x00queue_full(queue))) + return -ENOBUFS; + + if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) { + ERROR(queue->rt2x00dev, + "Arrived at non-free entry in the non-full queue %d.\n" + "Please file bug report to %s.\n", + queue->qid, DRV_PROJECT); + return -EINVAL; + } + + /* + * Copy all TX descriptor information into txdesc, + * after that we are free to use the skb->cb array + * for our information. + */ + entry->skb = skb; + rt2x00queue_create_tx_descriptor(entry, &txdesc); + + /* + * All information is retrieved from the skb->cb array, + * now we should claim ownership of the driver part of that + * array, preserving the bitrate index and flags. + */ + tx_info = IEEE80211_SKB_CB(skb); + rate_idx = tx_info->control.rates[0].idx; + rate_flags = tx_info->control.rates[0].flags; + skbdesc = get_skb_frame_desc(skb); + memset(skbdesc, 0, sizeof(*skbdesc)); + skbdesc->entry = entry; + skbdesc->tx_rate_idx = rate_idx; + skbdesc->tx_rate_flags = rate_flags; + + if (local) + skbdesc->flags |= SKBDESC_NOT_MAC80211; + + /* + * When hardware encryption is supported, and this frame + * is to be encrypted, we should strip the IV/EIV data from + * the frame so we can provide it to the driver separately. + */ + if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) && + !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) { + if (test_bit(DRIVER_REQUIRE_COPY_IV, &queue->rt2x00dev->flags)) + rt2x00crypto_tx_copy_iv(skb, &txdesc); + else + rt2x00crypto_tx_remove_iv(skb, &txdesc); + } + + /* + * When DMA allocation is required we should guarentee to the + * driver that the DMA is aligned to a 4-byte boundary. + * However some drivers require L2 padding to pad the payload + * rather then the header. This could be a requirement for + * PCI and USB devices, while header alignment only is valid + * for PCI devices. + */ + if (test_bit(DRIVER_REQUIRE_L2PAD, &queue->rt2x00dev->flags)) + rt2x00queue_insert_l2pad(entry->skb, txdesc.header_length); + else if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags)) + rt2x00queue_align_frame(entry->skb); + + /* + * It could be possible that the queue was corrupted and this + * call failed. Since we always return NETDEV_TX_OK to mac80211, + * this frame will simply be dropped. + */ + if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry))) { + clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); + entry->skb = NULL; + return -EIO; + } + + if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags)) + rt2x00queue_map_txskb(queue->rt2x00dev, skb); + + set_bit(ENTRY_DATA_PENDING, &entry->flags); + + rt2x00queue_index_inc(queue, Q_INDEX); + rt2x00queue_write_tx_descriptor(entry, &txdesc); + + return 0; +} + +int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev, + struct ieee80211_vif *vif, + const bool enable_beacon) +{ + struct rt2x00_intf *intf = vif_to_intf(vif); + struct skb_frame_desc *skbdesc; + struct txentry_desc txdesc; + __le32 desc[16]; + + if (unlikely(!intf->beacon)) + return -ENOBUFS; + + mutex_lock(&intf->beacon_skb_mutex); + + /* + * Clean up the beacon skb. + */ + rt2x00queue_free_skb(rt2x00dev, intf->beacon->skb); + intf->beacon->skb = NULL; + + if (!enable_beacon) { + rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev, QID_BEACON); + mutex_unlock(&intf->beacon_skb_mutex); + return 0; + } + + intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif); + if (!intf->beacon->skb) { + mutex_unlock(&intf->beacon_skb_mutex); + return -ENOMEM; + } + + /* + * Copy all TX descriptor information into txdesc, + * after that we are free to use the skb->cb array + * for our information. + */ + rt2x00queue_create_tx_descriptor(intf->beacon, &txdesc); + + /* + * For the descriptor we use a local array from where the + * driver can move it to the correct location required for + * the hardware. + */ + memset(desc, 0, sizeof(desc)); + + /* + * Fill in skb descriptor + */ + skbdesc = get_skb_frame_desc(intf->beacon->skb); + memset(skbdesc, 0, sizeof(*skbdesc)); + skbdesc->desc = desc; + skbdesc->desc_len = intf->beacon->queue->desc_size; + skbdesc->entry = intf->beacon; + + /* + * Write TX descriptor into reserved room in front of the beacon. + */ + rt2x00queue_write_tx_descriptor(intf->beacon, &txdesc); + + /* + * Send beacon to hardware. + * Also enable beacon generation, which might have been disabled + * by the driver during the config_beacon() callback function. + */ + rt2x00dev->ops->lib->write_beacon(intf->beacon); + rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, QID_BEACON); + + mutex_unlock(&intf->beacon_skb_mutex); + + return 0; +} + +struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev, + const enum data_queue_qid queue) +{ + int atim = test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags); + + if (queue == QID_RX) + return rt2x00dev->rx; + + if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx) + return &rt2x00dev->tx[queue]; + + if (!rt2x00dev->bcn) + return NULL; + + if (queue == QID_BEACON) + return &rt2x00dev->bcn[0]; + else if (queue == QID_ATIM && atim) + return &rt2x00dev->bcn[1]; + + return NULL; +} +EXPORT_SYMBOL_GPL(rt2x00queue_get_queue); + +struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue, + enum queue_index index) +{ + struct queue_entry *entry; + unsigned long irqflags; + + if (unlikely(index >= Q_INDEX_MAX)) { + ERROR(queue->rt2x00dev, + "Entry requested from invalid index type (%d)\n", index); + return NULL; + } + + spin_lock_irqsave(&queue->lock, irqflags); + + entry = &queue->entries[queue->index[index]]; + + spin_unlock_irqrestore(&queue->lock, irqflags); + + return entry; +} +EXPORT_SYMBOL_GPL(rt2x00queue_get_entry); + +void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index) +{ + unsigned long irqflags; + + if (unlikely(index >= Q_INDEX_MAX)) { + ERROR(queue->rt2x00dev, + "Index change on invalid index type (%d)\n", index); + return; + } + + spin_lock_irqsave(&queue->lock, irqflags); + + queue->index[index]++; + if (queue->index[index] >= queue->limit) + queue->index[index] = 0; + + if (index == Q_INDEX) { + queue->length++; + } else if (index == Q_INDEX_DONE) { + queue->length--; + queue->count++; + } + + spin_unlock_irqrestore(&queue->lock, irqflags); +} + +static void rt2x00queue_reset(struct data_queue *queue) +{ + unsigned long irqflags; + + spin_lock_irqsave(&queue->lock, irqflags); + + queue->count = 0; + queue->length = 0; + memset(queue->index, 0, sizeof(queue->index)); + + spin_unlock_irqrestore(&queue->lock, irqflags); +} + +void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev) +{ + struct data_queue *queue; + + txall_queue_for_each(rt2x00dev, queue) + rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev, queue->qid); +} + +void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev) +{ + struct data_queue *queue; + unsigned int i; + + queue_for_each(rt2x00dev, queue) { + rt2x00queue_reset(queue); + + for (i = 0; i < queue->limit; i++) { + queue->entries[i].flags = 0; + + rt2x00dev->ops->lib->clear_entry(&queue->entries[i]); + } + } +} + +static int rt2x00queue_alloc_entries(struct data_queue *queue, + const struct data_queue_desc *qdesc) +{ + struct queue_entry *entries; + unsigned int entry_size; + unsigned int i; + + rt2x00queue_reset(queue); + + queue->limit = qdesc->entry_num; + queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10); + queue->data_size = qdesc->data_size; + queue->desc_size = qdesc->desc_size; + + /* + * Allocate all queue entries. + */ + entry_size = sizeof(*entries) + qdesc->priv_size; + entries = kzalloc(queue->limit * entry_size, GFP_KERNEL); + if (!entries) + return -ENOMEM; + +#define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \ + ( ((char *)(__base)) + ((__limit) * (__esize)) + \ + ((__index) * (__psize)) ) + + for (i = 0; i < queue->limit; i++) { + entries[i].flags = 0; + entries[i].queue = queue; + entries[i].skb = NULL; + entries[i].entry_idx = i; + entries[i].priv_data = + QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit, + sizeof(*entries), qdesc->priv_size); + } + +#undef QUEUE_ENTRY_PRIV_OFFSET + + queue->entries = entries; + + return 0; +} + +static void rt2x00queue_free_skbs(struct rt2x00_dev *rt2x00dev, + struct data_queue *queue) +{ + unsigned int i; + + if (!queue->entries) + return; + + for (i = 0; i < queue->limit; i++) { + if (queue->entries[i].skb) + rt2x00queue_free_skb(rt2x00dev, queue->entries[i].skb); + } +} + +static int rt2x00queue_alloc_rxskbs(struct rt2x00_dev *rt2x00dev, + struct data_queue *queue) +{ + unsigned int i; + struct sk_buff *skb; + + for (i = 0; i < queue->limit; i++) { + skb = rt2x00queue_alloc_rxskb(rt2x00dev, &queue->entries[i]); + if (!skb) + return -ENOMEM; + queue->entries[i].skb = skb; + } + + return 0; +} + +int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev) +{ + struct data_queue *queue; + int status; + + status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx); + if (status) + goto exit; + + tx_queue_for_each(rt2x00dev, queue) { + status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx); + if (status) + goto exit; + } + + status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn); + if (status) + goto exit; + + if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) { + status = rt2x00queue_alloc_entries(&rt2x00dev->bcn[1], + rt2x00dev->ops->atim); + if (status) + goto exit; + } + + status = rt2x00queue_alloc_rxskbs(rt2x00dev, rt2x00dev->rx); + if (status) + goto exit; + + return 0; + +exit: + ERROR(rt2x00dev, "Queue entries allocation failed.\n"); + + rt2x00queue_uninitialize(rt2x00dev); + + return status; +} + +void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev) +{ + struct data_queue *queue; + + rt2x00queue_free_skbs(rt2x00dev, rt2x00dev->rx); + + queue_for_each(rt2x00dev, queue) { + kfree(queue->entries); + queue->entries = NULL; + } +} + +static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev, + struct data_queue *queue, enum data_queue_qid qid) +{ + spin_lock_init(&queue->lock); + + queue->rt2x00dev = rt2x00dev; + queue->qid = qid; + queue->txop = 0; + queue->aifs = 2; + queue->cw_min = 5; + queue->cw_max = 10; +} + +int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev) +{ + struct data_queue *queue; + enum data_queue_qid qid; + unsigned int req_atim = + !!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags); + + /* + * We need the following queues: + * RX: 1 + * TX: ops->tx_queues + * Beacon: 1 + * Atim: 1 (if required) + */ + rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim; + + queue = kzalloc(rt2x00dev->data_queues * sizeof(*queue), GFP_KERNEL); + if (!queue) { + ERROR(rt2x00dev, "Queue allocation failed.\n"); + return -ENOMEM; + } + + /* + * Initialize pointers + */ + rt2x00dev->rx = queue; + rt2x00dev->tx = &queue[1]; + rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues]; + + /* + * Initialize queue parameters. + * RX: qid = QID_RX + * TX: qid = QID_AC_BE + index + * TX: cw_min: 2^5 = 32. + * TX: cw_max: 2^10 = 1024. + * BCN: qid = QID_BEACON + * ATIM: qid = QID_ATIM + */ + rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX); + + qid = QID_AC_BE; + tx_queue_for_each(rt2x00dev, queue) + rt2x00queue_init(rt2x00dev, queue, qid++); + + rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[0], QID_BEACON); + if (req_atim) + rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[1], QID_ATIM); + + return 0; +} + +void rt2x00queue_free(struct rt2x00_dev *rt2x00dev) +{ + kfree(rt2x00dev->rx); + rt2x00dev->rx = NULL; + rt2x00dev->tx = NULL; + rt2x00dev->bcn = NULL; +} diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h new file mode 100644 index 0000000..c1e482b --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00queue.h @@ -0,0 +1,639 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00 + Abstract: rt2x00 queue datastructures and routines + */ + +#ifndef RT2X00QUEUE_H +#define RT2X00QUEUE_H + +#include + +/** + * DOC: Entry frame size + * + * Ralink PCI devices demand the Frame size to be a multiple of 128 bytes, + * for USB devices this restriction does not apply, but the value of + * 2432 makes sense since it is big enough to contain the maximum fragment + * size according to the ieee802.11 specs. + * The aggregation size depends on support from the driver, but should + * be something around 3840 bytes. + */ +#define DATA_FRAME_SIZE 2432 +#define MGMT_FRAME_SIZE 256 +#define AGGREGATION_SIZE 3840 + +/** + * DOC: Number of entries per queue + * + * Under normal load without fragmentation, 12 entries are sufficient + * without the queue being filled up to the maximum. When using fragmentation + * and the queue threshold code, we need to add some additional margins to + * make sure the queue will never (or only under extreme load) fill up + * completely. + * Since we don't use preallocated DMA, having a large number of queue entries + * will have minimal impact on the memory requirements for the queue. + */ +#define RX_ENTRIES 24 +#define TX_ENTRIES 24 +#define BEACON_ENTRIES 1 +#define ATIM_ENTRIES 8 + +/** + * enum data_queue_qid: Queue identification + * + * @QID_AC_BE: AC BE queue + * @QID_AC_BK: AC BK queue + * @QID_AC_VI: AC VI queue + * @QID_AC_VO: AC VO queue + * @QID_HCCA: HCCA queue + * @QID_MGMT: MGMT queue (prio queue) + * @QID_RX: RX queue + * @QID_OTHER: None of the above (don't use, only present for completeness) + * @QID_BEACON: Beacon queue (value unspecified, don't send it to device) + * @QID_ATIM: Atim queue (value unspeficied, don't send it to device) + */ +enum data_queue_qid { + QID_AC_BE = 0, + QID_AC_BK = 1, + QID_AC_VI = 2, + QID_AC_VO = 3, + QID_HCCA = 4, + QID_MGMT = 13, + QID_RX = 14, + QID_OTHER = 15, + QID_BEACON, + QID_ATIM, +}; + +/** + * enum skb_frame_desc_flags: Flags for &struct skb_frame_desc + * + * @SKBDESC_DMA_MAPPED_RX: &skb_dma field has been mapped for RX + * @SKBDESC_DMA_MAPPED_TX: &skb_dma field has been mapped for TX + * @SKBDESC_IV_STRIPPED: Frame contained a IV/EIV provided by + * mac80211 but was stripped for processing by the driver. + * @SKBDESC_NOT_MAC80211: Frame didn't originate from mac80211, + * don't try to pass it back. + */ +enum skb_frame_desc_flags { + SKBDESC_DMA_MAPPED_RX = 1 << 0, + SKBDESC_DMA_MAPPED_TX = 1 << 1, + SKBDESC_IV_STRIPPED = 1 << 2, + SKBDESC_NOT_MAC80211 = 1 << 3, +}; + +/** + * struct skb_frame_desc: Descriptor information for the skb buffer + * + * This structure is placed over the driver_data array, this means that + * this structure should not exceed the size of that array (40 bytes). + * + * @flags: Frame flags, see &enum skb_frame_desc_flags. + * @desc_len: Length of the frame descriptor. + * @tx_rate_idx: the index of the TX rate, used for TX status reporting + * @tx_rate_flags: the TX rate flags, used for TX status reporting + * @desc: Pointer to descriptor part of the frame. + * Note that this pointer could point to something outside + * of the scope of the skb->data pointer. + * @iv: IV/EIV data used during encryption/decryption. + * @skb_dma: (PCI-only) the DMA address associated with the sk buffer. + * @entry: The entry to which this sk buffer belongs. + */ +struct skb_frame_desc { + u8 flags; + + u8 desc_len; + u8 tx_rate_idx; + u8 tx_rate_flags; + + void *desc; + + __le32 iv[2]; + + dma_addr_t skb_dma; + + struct queue_entry *entry; +}; + +/** + * get_skb_frame_desc - Obtain the rt2x00 frame descriptor from a sk_buff. + * @skb: &struct sk_buff from where we obtain the &struct skb_frame_desc + */ +static inline struct skb_frame_desc* get_skb_frame_desc(struct sk_buff *skb) +{ + BUILD_BUG_ON(sizeof(struct skb_frame_desc) > + IEEE80211_TX_INFO_DRIVER_DATA_SIZE); + return (struct skb_frame_desc *)&IEEE80211_SKB_CB(skb)->driver_data; +} + +/** + * enum rxdone_entry_desc_flags: Flags for &struct rxdone_entry_desc + * + * @RXDONE_SIGNAL_PLCP: Signal field contains the plcp value. + * @RXDONE_SIGNAL_BITRATE: Signal field contains the bitrate value. + * @RXDONE_SIGNAL_MCS: Signal field contains the mcs value. + * @RXDONE_MY_BSS: Does this frame originate from device's BSS. + * @RXDONE_CRYPTO_IV: Driver provided IV/EIV data. + * @RXDONE_CRYPTO_ICV: Driver provided ICV data. + * @RXDONE_L2PAD: 802.11 payload has been padded to 4-byte boundary. + */ +enum rxdone_entry_desc_flags { + RXDONE_SIGNAL_PLCP = BIT(0), + RXDONE_SIGNAL_BITRATE = BIT(1), + RXDONE_SIGNAL_MCS = BIT(2), + RXDONE_MY_BSS = BIT(3), + RXDONE_CRYPTO_IV = BIT(4), + RXDONE_CRYPTO_ICV = BIT(5), + RXDONE_L2PAD = BIT(6), +}; + +/** + * RXDONE_SIGNAL_MASK - Define to mask off all &rxdone_entry_desc_flags flags + * except for the RXDONE_SIGNAL_* flags. This is useful to convert the dev_flags + * from &rxdone_entry_desc to a signal value type. + */ +#define RXDONE_SIGNAL_MASK \ + ( RXDONE_SIGNAL_PLCP | RXDONE_SIGNAL_BITRATE | RXDONE_SIGNAL_MCS ) + +/** + * struct rxdone_entry_desc: RX Entry descriptor + * + * Summary of information that has been read from the RX frame descriptor. + * + * @timestamp: RX Timestamp + * @signal: Signal of the received frame. + * @rssi: RSSI of the received frame. + * @noise: Measured noise during frame reception. + * @size: Data size of the received frame. + * @flags: MAC80211 receive flags (See &enum mac80211_rx_flags). + * @dev_flags: Ralink receive flags (See &enum rxdone_entry_desc_flags). + * @rate_mode: Rate mode (See @enum rate_modulation). + * @cipher: Cipher type used during decryption. + * @cipher_status: Decryption status. + * @iv: IV/EIV data used during decryption. + * @icv: ICV data used during decryption. + */ +struct rxdone_entry_desc { + u64 timestamp; + int signal; + int rssi; + int noise; + int size; + int flags; + int dev_flags; + u16 rate_mode; + u8 cipher; + u8 cipher_status; + + __le32 iv[2]; + __le32 icv; +}; + +/** + * enum txdone_entry_desc_flags: Flags for &struct txdone_entry_desc + * + * @TXDONE_UNKNOWN: Hardware could not determine success of transmission. + * @TXDONE_SUCCESS: Frame was successfully send + * @TXDONE_FALLBACK: Frame was successfully send using a fallback rate. + * @TXDONE_FAILURE: Frame was not successfully send + * @TXDONE_EXCESSIVE_RETRY: In addition to &TXDONE_FAILURE, the + * frame transmission failed due to excessive retries. + */ +enum txdone_entry_desc_flags { + TXDONE_UNKNOWN, + TXDONE_SUCCESS, + TXDONE_FALLBACK, + TXDONE_FAILURE, + TXDONE_EXCESSIVE_RETRY, +}; + +/** + * struct txdone_entry_desc: TX done entry descriptor + * + * Summary of information that has been read from the TX frame descriptor + * after the device is done with transmission. + * + * @flags: TX done flags (See &enum txdone_entry_desc_flags). + * @retry: Retry count. + */ +struct txdone_entry_desc { + unsigned long flags; + int retry; +}; + +/** + * enum txentry_desc_flags: Status flags for TX entry descriptor + * + * @ENTRY_TXD_RTS_FRAME: This frame is a RTS frame. + * @ENTRY_TXD_CTS_FRAME: This frame is a CTS-to-self frame. + * @ENTRY_TXD_GENERATE_SEQ: This frame requires sequence counter. + * @ENTRY_TXD_FIRST_FRAGMENT: This is the first frame. + * @ENTRY_TXD_MORE_FRAG: This frame is followed by another fragment. + * @ENTRY_TXD_REQ_TIMESTAMP: Require timestamp to be inserted. + * @ENTRY_TXD_BURST: This frame belongs to the same burst event. + * @ENTRY_TXD_ACK: An ACK is required for this frame. + * @ENTRY_TXD_RETRY_MODE: When set, the long retry count is used. + * @ENTRY_TXD_ENCRYPT: This frame should be encrypted. + * @ENTRY_TXD_ENCRYPT_PAIRWISE: Use pairwise key table (instead of shared). + * @ENTRY_TXD_ENCRYPT_IV: Generate IV/EIV in hardware. + * @ENTRY_TXD_ENCRYPT_MMIC: Generate MIC in hardware. + * @ENTRY_TXD_HT_AMPDU: This frame is part of an AMPDU. + * @ENTRY_TXD_HT_BW_40: Use 40MHz Bandwidth. + * @ENTRY_TXD_HT_SHORT_GI: Use short GI. + */ +enum txentry_desc_flags { + ENTRY_TXD_RTS_FRAME, + ENTRY_TXD_CTS_FRAME, + ENTRY_TXD_GENERATE_SEQ, + ENTRY_TXD_FIRST_FRAGMENT, + ENTRY_TXD_MORE_FRAG, + ENTRY_TXD_REQ_TIMESTAMP, + ENTRY_TXD_BURST, + ENTRY_TXD_ACK, + ENTRY_TXD_RETRY_MODE, + ENTRY_TXD_ENCRYPT, + ENTRY_TXD_ENCRYPT_PAIRWISE, + ENTRY_TXD_ENCRYPT_IV, + ENTRY_TXD_ENCRYPT_MMIC, + ENTRY_TXD_HT_AMPDU, + ENTRY_TXD_HT_BW_40, + ENTRY_TXD_HT_SHORT_GI, +}; + +/** + * struct txentry_desc: TX Entry descriptor + * + * Summary of information for the frame descriptor before sending a TX frame. + * + * @flags: Descriptor flags (See &enum queue_entry_flags). + * @queue: Queue identification (See &enum data_queue_qid). + * @header_length: Length of 802.11 header. + * @l2pad: Amount of padding to align 802.11 payload to 4-byte boundrary. + * @length_high: PLCP length high word. + * @length_low: PLCP length low word. + * @signal: PLCP signal. + * @service: PLCP service. + * @msc: MCS. + * @stbc: STBC. + * @ba_size: BA size. + * @rate_mode: Rate mode (See @enum rate_modulation). + * @mpdu_density: MDPU density. + * @retry_limit: Max number of retries. + * @aifs: AIFS value. + * @ifs: IFS value. + * @cw_min: cwmin value. + * @cw_max: cwmax value. + * @cipher: Cipher type used for encryption. + * @key_idx: Key index used for encryption. + * @iv_offset: Position where IV should be inserted by hardware. + * @iv_len: Length of IV data. + */ +struct txentry_desc { + unsigned long flags; + + enum data_queue_qid queue; + + u16 header_length; + u16 l2pad; + + u16 length_high; + u16 length_low; + u16 signal; + u16 service; + + u16 mcs; + u16 stbc; + u16 ba_size; + u16 rate_mode; + u16 mpdu_density; + + short retry_limit; + short aifs; + short ifs; + short cw_min; + short cw_max; + + enum cipher cipher; + u16 key_idx; + u16 iv_offset; + u16 iv_len; +}; + +/** + * enum queue_entry_flags: Status flags for queue entry + * + * @ENTRY_BCN_ASSIGNED: This entry has been assigned to an interface. + * As long as this bit is set, this entry may only be touched + * through the interface structure. + * @ENTRY_OWNER_DEVICE_DATA: This entry is owned by the device for data + * transfer (either TX or RX depending on the queue). The entry should + * only be touched after the device has signaled it is done with it. + * @ENTRY_OWNER_DEVICE_CRYPTO: This entry is owned by the device for data + * encryption or decryption. The entry should only be touched after + * the device has signaled it is done with it. + * @ENTRY_DATA_PENDING: This entry contains a valid frame and is waiting + * for the signal to start sending. + */ +enum queue_entry_flags { + ENTRY_BCN_ASSIGNED, + ENTRY_OWNER_DEVICE_DATA, + ENTRY_OWNER_DEVICE_CRYPTO, + ENTRY_DATA_PENDING, +}; + +/** + * struct queue_entry: Entry inside the &struct data_queue + * + * @flags: Entry flags, see &enum queue_entry_flags. + * @queue: The data queue (&struct data_queue) to which this entry belongs. + * @skb: The buffer which is currently being transmitted (for TX queue), + * or used to directly recieve data in (for RX queue). + * @entry_idx: The entry index number. + * @priv_data: Private data belonging to this queue entry. The pointer + * points to data specific to a particular driver and queue type. + */ +struct queue_entry { + unsigned long flags; + + struct data_queue *queue; + + struct sk_buff *skb; + + unsigned int entry_idx; + + void *priv_data; +}; + +/** + * enum queue_index: Queue index type + * + * @Q_INDEX: Index pointer to the current entry in the queue, if this entry is + * owned by the hardware then the queue is considered to be full. + * @Q_INDEX_DONE: Index pointer to the next entry which will be completed by + * the hardware and for which we need to run the txdone handler. If this + * entry is not owned by the hardware the queue is considered to be empty. + * @Q_INDEX_CRYPTO: Index pointer to the next entry which encryption/decription + * will be completed by the hardware next. + * @Q_INDEX_MAX: Keep last, used in &struct data_queue to determine the size + * of the index array. + */ +enum queue_index { + Q_INDEX, + Q_INDEX_DONE, + Q_INDEX_CRYPTO, + Q_INDEX_MAX, +}; + +/** + * struct data_queue: Data queue + * + * @rt2x00dev: Pointer to main &struct rt2x00dev where this queue belongs to. + * @entries: Base address of the &struct queue_entry which are + * part of this queue. + * @qid: The queue identification, see &enum data_queue_qid. + * @lock: Spinlock to protect index handling. Whenever @index, @index_done or + * @index_crypt needs to be changed this lock should be grabbed to prevent + * index corruption due to concurrency. + * @count: Number of frames handled in the queue. + * @limit: Maximum number of entries in the queue. + * @threshold: Minimum number of free entries before queue is kicked by force. + * @length: Number of frames in queue. + * @index: Index pointers to entry positions in the queue, + * use &enum queue_index to get a specific index field. + * @txop: maximum burst time. + * @aifs: The aifs value for outgoing frames (field ignored in RX queue). + * @cw_min: The cw min value for outgoing frames (field ignored in RX queue). + * @cw_max: The cw max value for outgoing frames (field ignored in RX queue). + * @data_size: Maximum data size for the frames in this queue. + * @desc_size: Hardware descriptor size for the data in this queue. + * @usb_endpoint: Device endpoint used for communication (USB only) + * @usb_maxpacket: Max packet size for given endpoint (USB only) + */ +struct data_queue { + struct rt2x00_dev *rt2x00dev; + struct queue_entry *entries; + + enum data_queue_qid qid; + + spinlock_t lock; + unsigned int count; + unsigned short limit; + unsigned short threshold; + unsigned short length; + unsigned short index[Q_INDEX_MAX]; + + unsigned short txop; + unsigned short aifs; + unsigned short cw_min; + unsigned short cw_max; + + unsigned short data_size; + unsigned short desc_size; + + unsigned short usb_endpoint; + unsigned short usb_maxpacket; +}; + +/** + * struct data_queue_desc: Data queue description + * + * The information in this structure is used by drivers + * to inform rt2x00lib about the creation of the data queue. + * + * @entry_num: Maximum number of entries for a queue. + * @data_size: Maximum data size for the frames in this queue. + * @desc_size: Hardware descriptor size for the data in this queue. + * @priv_size: Size of per-queue_entry private data. + */ +struct data_queue_desc { + unsigned short entry_num; + unsigned short data_size; + unsigned short desc_size; + unsigned short priv_size; +}; + +/** + * queue_end - Return pointer to the last queue (HELPER MACRO). + * @__dev: Pointer to &struct rt2x00_dev + * + * Using the base rx pointer and the maximum number of available queues, + * this macro will return the address of 1 position beyond the end of the + * queues array. + */ +#define queue_end(__dev) \ + &(__dev)->rx[(__dev)->data_queues] + +/** + * tx_queue_end - Return pointer to the last TX queue (HELPER MACRO). + * @__dev: Pointer to &struct rt2x00_dev + * + * Using the base tx pointer and the maximum number of available TX + * queues, this macro will return the address of 1 position beyond + * the end of the TX queue array. + */ +#define tx_queue_end(__dev) \ + &(__dev)->tx[(__dev)->ops->tx_queues] + +/** + * queue_next - Return pointer to next queue in list (HELPER MACRO). + * @__queue: Current queue for which we need the next queue + * + * Using the current queue address we take the address directly + * after the queue to take the next queue. Note that this macro + * should be used carefully since it does not protect against + * moving past the end of the list. (See macros &queue_end and + * &tx_queue_end for determining the end of the queue). + */ +#define queue_next(__queue) \ + &(__queue)[1] + +/** + * queue_loop - Loop through the queues within a specific range (HELPER MACRO). + * @__entry: Pointer where the current queue entry will be stored in. + * @__start: Start queue pointer. + * @__end: End queue pointer. + * + * This macro will loop through all queues between &__start and &__end. + */ +#define queue_loop(__entry, __start, __end) \ + for ((__entry) = (__start); \ + prefetch(queue_next(__entry)), (__entry) != (__end);\ + (__entry) = queue_next(__entry)) + +/** + * queue_for_each - Loop through all queues + * @__dev: Pointer to &struct rt2x00_dev + * @__entry: Pointer where the current queue entry will be stored in. + * + * This macro will loop through all available queues. + */ +#define queue_for_each(__dev, __entry) \ + queue_loop(__entry, (__dev)->rx, queue_end(__dev)) + +/** + * tx_queue_for_each - Loop through the TX queues + * @__dev: Pointer to &struct rt2x00_dev + * @__entry: Pointer where the current queue entry will be stored in. + * + * This macro will loop through all TX related queues excluding + * the Beacon and Atim queues. + */ +#define tx_queue_for_each(__dev, __entry) \ + queue_loop(__entry, (__dev)->tx, tx_queue_end(__dev)) + +/** + * txall_queue_for_each - Loop through all TX related queues + * @__dev: Pointer to &struct rt2x00_dev + * @__entry: Pointer where the current queue entry will be stored in. + * + * This macro will loop through all TX related queues including + * the Beacon and Atim queues. + */ +#define txall_queue_for_each(__dev, __entry) \ + queue_loop(__entry, (__dev)->tx, queue_end(__dev)) + +/** + * rt2x00queue_empty - Check if the queue is empty. + * @queue: Queue to check if empty. + */ +static inline int rt2x00queue_empty(struct data_queue *queue) +{ + return queue->length == 0; +} + +/** + * rt2x00queue_full - Check if the queue is full. + * @queue: Queue to check if full. + */ +static inline int rt2x00queue_full(struct data_queue *queue) +{ + return queue->length == queue->limit; +} + +/** + * rt2x00queue_free - Check the number of available entries in queue. + * @queue: Queue to check. + */ +static inline int rt2x00queue_available(struct data_queue *queue) +{ + return queue->limit - queue->length; +} + +/** + * rt2x00queue_threshold - Check if the queue is below threshold + * @queue: Queue to check. + */ +static inline int rt2x00queue_threshold(struct data_queue *queue) +{ + return rt2x00queue_available(queue) < queue->threshold; +} + +/** + * _rt2x00_desc_read - Read a word from the hardware descriptor. + * @desc: Base descriptor address + * @word: Word index from where the descriptor should be read. + * @value: Address where the descriptor value should be written into. + */ +static inline void _rt2x00_desc_read(__le32 *desc, const u8 word, __le32 *value) +{ + *value = desc[word]; +} + +/** + * rt2x00_desc_read - Read a word from the hardware descriptor, this + * function will take care of the byte ordering. + * @desc: Base descriptor address + * @word: Word index from where the descriptor should be read. + * @value: Address where the descriptor value should be written into. + */ +static inline void rt2x00_desc_read(__le32 *desc, const u8 word, u32 *value) +{ + __le32 tmp; + _rt2x00_desc_read(desc, word, &tmp); + *value = le32_to_cpu(tmp); +} + +/** + * rt2x00_desc_write - write a word to the hardware descriptor, this + * function will take care of the byte ordering. + * @desc: Base descriptor address + * @word: Word index from where the descriptor should be written. + * @value: Value that should be written into the descriptor. + */ +static inline void _rt2x00_desc_write(__le32 *desc, const u8 word, __le32 value) +{ + desc[word] = value; +} + +/** + * rt2x00_desc_write - write a word to the hardware descriptor. + * @desc: Base descriptor address + * @word: Word index from where the descriptor should be written. + * @value: Value that should be written into the descriptor. + */ +static inline void rt2x00_desc_write(__le32 *desc, const u8 word, u32 value) +{ + _rt2x00_desc_write(desc, word, cpu_to_le32(value)); +} + +#endif /* RT2X00QUEUE_H */ diff --git a/drivers/net/wireless/rt2x00/rt2x00reg.h b/drivers/net/wireless/rt2x00/rt2x00reg.h new file mode 100644 index 0000000..603bfc0 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00reg.h @@ -0,0 +1,272 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00 + Abstract: rt2x00 generic register information. + */ + +#ifndef RT2X00REG_H +#define RT2X00REG_H + +/* + * RX crypto status + */ +enum rx_crypto { + RX_CRYPTO_SUCCESS = 0, + RX_CRYPTO_FAIL_ICV = 1, + RX_CRYPTO_FAIL_MIC = 2, + RX_CRYPTO_FAIL_KEY = 3, +}; + +/* + * Antenna values + */ +enum antenna { + ANTENNA_SW_DIVERSITY = 0, + ANTENNA_A = 1, + ANTENNA_B = 2, + ANTENNA_HW_DIVERSITY = 3, +}; + +/* + * Led mode values. + */ +enum led_mode { + LED_MODE_DEFAULT = 0, + LED_MODE_TXRX_ACTIVITY = 1, + LED_MODE_SIGNAL_STRENGTH = 2, + LED_MODE_ASUS = 3, + LED_MODE_ALPHA = 4, +}; + +/* + * TSF sync values + */ +enum tsf_sync { + TSF_SYNC_NONE = 0, + TSF_SYNC_INFRA = 1, + TSF_SYNC_BEACON = 2, +}; + +/* + * Device states + */ +enum dev_state { + STATE_DEEP_SLEEP = 0, + STATE_SLEEP = 1, + STATE_STANDBY = 2, + STATE_AWAKE = 3, + +/* + * Additional device states, these values are + * not strict since they are not directly passed + * into the device. + */ + STATE_RADIO_ON, + STATE_RADIO_OFF, + STATE_RADIO_RX_ON, + STATE_RADIO_RX_OFF, + STATE_RADIO_RX_ON_LINK, + STATE_RADIO_RX_OFF_LINK, + STATE_RADIO_IRQ_ON, + STATE_RADIO_IRQ_OFF, +}; + +/* + * IFS backoff values + */ +enum ifs { + IFS_BACKOFF = 0, + IFS_SIFS = 1, + IFS_NEW_BACKOFF = 2, + IFS_NONE = 3, +}; + +/* + * Cipher types for hardware encryption + */ +enum cipher { + CIPHER_NONE = 0, + CIPHER_WEP64 = 1, + CIPHER_WEP128 = 2, + CIPHER_TKIP = 3, + CIPHER_AES = 4, +/* + * The following fields were added by rt61pci and rt73usb. + */ + CIPHER_CKIP64 = 5, + CIPHER_CKIP128 = 6, + CIPHER_TKIP_NO_MIC = 7, /* Don't send to device */ + +/* + * Max cipher type. + * Note that CIPHER_NONE isn't counted, and CKIP64 and CKIP128 + * are excluded due to limitations in mac80211. + */ + CIPHER_MAX = 4, +}; + +/* + * Rate modulations + */ +enum rate_modulation { + RATE_MODE_CCK = 0, + RATE_MODE_OFDM = 1, + RATE_MODE_HT_MIX = 2, + RATE_MODE_HT_GREENFIELD = 3, +}; + +/* + * Firmware validation error codes + */ +enum firmware_errors { + FW_OK, + FW_BAD_CRC, + FW_BAD_LENGTH, + FW_BAD_VERSION, +}; + +/* + * Register handlers. + * We store the position of a register field inside a field structure, + * This will simplify the process of setting and reading a certain field + * inside the register while making sure the process remains byte order safe. + */ +struct rt2x00_field8 { + u8 bit_offset; + u8 bit_mask; +}; + +struct rt2x00_field16 { + u16 bit_offset; + u16 bit_mask; +}; + +struct rt2x00_field32 { + u32 bit_offset; + u32 bit_mask; +}; + +/* + * Power of two check, this will check + * if the mask that has been given contains and contiguous set of bits. + * Note that we cannot use the is_power_of_2() function since this + * check must be done at compile-time. + */ +#define is_power_of_two(x) ( !((x) & ((x)-1)) ) +#define low_bit_mask(x) ( ((x)-1) & ~(x) ) +#define is_valid_mask(x) is_power_of_two(1LU + (x) + low_bit_mask(x)) + +/* + * Macros to find first set bit in a variable. + * These macros behave the same as the __ffs() functions but + * the most important difference that this is done during + * compile-time rather then run-time. + */ +#define compile_ffs2(__x) \ + __builtin_choose_expr(((__x) & 0x1), 0, 1) + +#define compile_ffs4(__x) \ + __builtin_choose_expr(((__x) & 0x3), \ + (compile_ffs2((__x))), \ + (compile_ffs2((__x) >> 2) + 2)) + +#define compile_ffs8(__x) \ + __builtin_choose_expr(((__x) & 0xf), \ + (compile_ffs4((__x))), \ + (compile_ffs4((__x) >> 4) + 4)) + +#define compile_ffs16(__x) \ + __builtin_choose_expr(((__x) & 0xff), \ + (compile_ffs8((__x))), \ + (compile_ffs8((__x) >> 8) + 8)) + +#define compile_ffs32(__x) \ + __builtin_choose_expr(((__x) & 0xffff), \ + (compile_ffs16((__x))), \ + (compile_ffs16((__x) >> 16) + 16)) + +/* + * This macro will check the requirements for the FIELD{8,16,32} macros + * The mask should be a constant non-zero contiguous set of bits which + * does not exceed the given typelimit. + */ +#define FIELD_CHECK(__mask, __type) \ + BUILD_BUG_ON(!(__mask) || \ + !is_valid_mask(__mask) || \ + (__mask) != (__type)(__mask)) \ + +#define FIELD8(__mask) \ +({ \ + FIELD_CHECK(__mask, u8); \ + (struct rt2x00_field8) { \ + compile_ffs8(__mask), (__mask) \ + }; \ +}) + +#define FIELD16(__mask) \ +({ \ + FIELD_CHECK(__mask, u16); \ + (struct rt2x00_field16) { \ + compile_ffs16(__mask), (__mask) \ + }; \ +}) + +#define FIELD32(__mask) \ +({ \ + FIELD_CHECK(__mask, u32); \ + (struct rt2x00_field32) { \ + compile_ffs32(__mask), (__mask) \ + }; \ +}) + +#define SET_FIELD(__reg, __type, __field, __value)\ +({ \ + typecheck(__type, __field); \ + *(__reg) &= ~((__field).bit_mask); \ + *(__reg) |= ((__value) << \ + ((__field).bit_offset)) & \ + ((__field).bit_mask); \ +}) + +#define GET_FIELD(__reg, __type, __field) \ +({ \ + typecheck(__type, __field); \ + ((__reg) & ((__field).bit_mask)) >> \ + ((__field).bit_offset); \ +}) + +#define rt2x00_set_field32(__reg, __field, __value) \ + SET_FIELD(__reg, struct rt2x00_field32, __field, __value) +#define rt2x00_get_field32(__reg, __field) \ + GET_FIELD(__reg, struct rt2x00_field32, __field) + +#define rt2x00_set_field16(__reg, __field, __value) \ + SET_FIELD(__reg, struct rt2x00_field16, __field, __value) +#define rt2x00_get_field16(__reg, __field) \ + GET_FIELD(__reg, struct rt2x00_field16, __field) + +#define rt2x00_set_field8(__reg, __field, __value) \ + SET_FIELD(__reg, struct rt2x00_field8, __field, __value) +#define rt2x00_get_field8(__reg, __field) \ + GET_FIELD(__reg, struct rt2x00_field8, __field) + +#endif /* RT2X00REG_H */ diff --git a/drivers/net/wireless/rt2x00/rt2x00soc.c b/drivers/net/wireless/rt2x00/rt2x00soc.c new file mode 100644 index 0000000..111c0ff --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00soc.c @@ -0,0 +1,159 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn + Copyright (C) 2004 - 2009 Felix Fietkau + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00soc + Abstract: rt2x00 generic soc device routines. + */ + +#include +#include +#include +#include + +#include "rt2x00.h" +#include "rt2x00soc.h" + +static void rt2x00soc_free_reg(struct rt2x00_dev *rt2x00dev) +{ + kfree(rt2x00dev->rf); + rt2x00dev->rf = NULL; + + kfree(rt2x00dev->eeprom); + rt2x00dev->eeprom = NULL; +} + +static int rt2x00soc_alloc_reg(struct rt2x00_dev *rt2x00dev) +{ + struct platform_device *pdev = to_platform_device(rt2x00dev->dev); + struct resource *res; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + rt2x00dev->csr.base = (void __iomem *)KSEG1ADDR(res->start); + if (!rt2x00dev->csr.base) + goto exit; + + rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL); + if (!rt2x00dev->eeprom) + goto exit; + + rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL); + if (!rt2x00dev->rf) + goto exit; + + return 0; + +exit: + ERROR_PROBE("Failed to allocate registers.\n"); + rt2x00soc_free_reg(rt2x00dev); + + return -ENOMEM; +} + +int rt2x00soc_probe(struct platform_device *pdev, const struct rt2x00_ops *ops) +{ + struct ieee80211_hw *hw; + struct rt2x00_dev *rt2x00dev; + int retval; + + hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw); + if (!hw) { + ERROR_PROBE("Failed to allocate hardware.\n"); + return -ENOMEM; + } + + platform_set_drvdata(pdev, hw); + + rt2x00dev = hw->priv; + rt2x00dev->dev = &pdev->dev; + rt2x00dev->ops = ops; + rt2x00dev->hw = hw; + rt2x00dev->irq = platform_get_irq(pdev, 0); + rt2x00dev->name = pdev->dev.driver->name; + + rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_SOC); + + retval = rt2x00soc_alloc_reg(rt2x00dev); + if (retval) + goto exit_free_device; + + retval = rt2x00lib_probe_dev(rt2x00dev); + if (retval) + goto exit_free_reg; + + return 0; + +exit_free_reg: + rt2x00soc_free_reg(rt2x00dev); + +exit_free_device: + ieee80211_free_hw(hw); + + return retval; +} +EXPORT_SYMBOL_GPL(rt2x00soc_probe); + +int rt2x00soc_remove(struct platform_device *pdev) +{ + struct ieee80211_hw *hw = platform_get_drvdata(pdev); + struct rt2x00_dev *rt2x00dev = hw->priv; + + /* + * Free all allocated data. + */ + rt2x00lib_remove_dev(rt2x00dev); + rt2x00soc_free_reg(rt2x00dev); + ieee80211_free_hw(hw); + + return 0; +} +EXPORT_SYMBOL_GPL(rt2x00soc_remove); + +#ifdef CONFIG_PM +int rt2x00soc_suspend(struct platform_device *pdev, pm_message_t state) +{ + struct ieee80211_hw *hw = platform_get_drvdata(pdev); + struct rt2x00_dev *rt2x00dev = hw->priv; + + return rt2x00lib_suspend(rt2x00dev, state); +} +EXPORT_SYMBOL_GPL(rt2x00soc_suspend); + +int rt2x00soc_resume(struct platform_device *pdev) +{ + struct ieee80211_hw *hw = platform_get_drvdata(pdev); + struct rt2x00_dev *rt2x00dev = hw->priv; + + return rt2x00lib_resume(rt2x00dev); +} +EXPORT_SYMBOL_GPL(rt2x00soc_resume); +#endif /* CONFIG_PM */ + +/* + * rt2x00soc module information. + */ +MODULE_AUTHOR(DRV_PROJECT); +MODULE_VERSION(DRV_VERSION); +MODULE_DESCRIPTION("rt2x00 soc library"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/rt2x00/rt2x00soc.h b/drivers/net/wireless/rt2x00/rt2x00soc.h new file mode 100644 index 0000000..4739edf --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00soc.h @@ -0,0 +1,44 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00soc + Abstract: Data structures for the rt2x00soc module. + */ + +#ifndef RT2X00SOC_H +#define RT2X00SOC_H + +#define KSEG1ADDR(__ptr) __ptr + +/* + * SoC driver handlers. + */ +int rt2x00soc_probe(struct platform_device *pdev, const struct rt2x00_ops *ops); +int rt2x00soc_remove(struct platform_device *pdev); +#ifdef CONFIG_PM +int rt2x00soc_suspend(struct platform_device *pdev, pm_message_t state); +int rt2x00soc_resume(struct platform_device *pdev); +#else +#define rt2x00soc_suspend NULL +#define rt2x00soc_resume NULL +#endif /* CONFIG_PM */ + +#endif /* RT2X00SOC_H */ diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c new file mode 100644 index 0000000..0a751e7 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00usb.c @@ -0,0 +1,741 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00usb + Abstract: rt2x00 generic usb device routines. + */ + +#include +#include +#include +#include + +#include "rt2x00.h" +#include "rt2x00usb.h" + +/* + * Interfacing with the HW. + */ +int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev, + const u8 request, const u8 requesttype, + const u16 offset, const u16 value, + void *buffer, const u16 buffer_length, + const int timeout) +{ + struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); + int status; + unsigned int i; + unsigned int pipe = + (requesttype == USB_VENDOR_REQUEST_IN) ? + usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0); + + if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) + return -ENODEV; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + status = usb_control_msg(usb_dev, pipe, request, requesttype, + value, offset, buffer, buffer_length, + timeout); + if (status >= 0) + return 0; + + /* + * Check for errors + * -ENODEV: Device has disappeared, no point continuing. + * All other errors: Try again. + */ + else if (status == -ENODEV) { + clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); + break; + } + } + + ERROR(rt2x00dev, + "Vendor Request 0x%02x failed for offset 0x%04x with error %d.\n", + request, offset, status); + + return status; +} +EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request); + +int rt2x00usb_vendor_req_buff_lock(struct rt2x00_dev *rt2x00dev, + const u8 request, const u8 requesttype, + const u16 offset, void *buffer, + const u16 buffer_length, const int timeout) +{ + int status; + + BUG_ON(!mutex_is_locked(&rt2x00dev->csr_mutex)); + + /* + * Check for Cache availability. + */ + if (unlikely(!rt2x00dev->csr.cache || buffer_length > CSR_CACHE_SIZE)) { + ERROR(rt2x00dev, "CSR cache not available.\n"); + return -ENOMEM; + } + + if (requesttype == USB_VENDOR_REQUEST_OUT) + memcpy(rt2x00dev->csr.cache, buffer, buffer_length); + + status = rt2x00usb_vendor_request(rt2x00dev, request, requesttype, + offset, 0, rt2x00dev->csr.cache, + buffer_length, timeout); + + if (!status && requesttype == USB_VENDOR_REQUEST_IN) + memcpy(buffer, rt2x00dev->csr.cache, buffer_length); + + return status; +} +EXPORT_SYMBOL_GPL(rt2x00usb_vendor_req_buff_lock); + +int rt2x00usb_vendor_request_buff(struct rt2x00_dev *rt2x00dev, + const u8 request, const u8 requesttype, + const u16 offset, void *buffer, + const u16 buffer_length, const int timeout) +{ + int status; + + mutex_lock(&rt2x00dev->csr_mutex); + + status = rt2x00usb_vendor_req_buff_lock(rt2x00dev, request, + requesttype, offset, buffer, + buffer_length, timeout); + + mutex_unlock(&rt2x00dev->csr_mutex); + + return status; +} +EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_buff); + +int rt2x00usb_vendor_request_large_buff(struct rt2x00_dev *rt2x00dev, + const u8 request, const u8 requesttype, + const u16 offset, const void *buffer, + const u16 buffer_length, + const int timeout) +{ + int status = 0; + unsigned char *tb; + u16 off, len, bsize; + + mutex_lock(&rt2x00dev->csr_mutex); + + tb = (char *)buffer; + off = offset; + len = buffer_length; + while (len && !status) { + bsize = min_t(u16, CSR_CACHE_SIZE, len); + status = rt2x00usb_vendor_req_buff_lock(rt2x00dev, request, + requesttype, off, tb, + bsize, timeout); + + tb += bsize; + len -= bsize; + off += bsize; + } + + mutex_unlock(&rt2x00dev->csr_mutex); + + return status; +} +EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_large_buff); + +int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + const struct rt2x00_field32 field, + u32 *reg) +{ + unsigned int i; + + if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) + return -ENODEV; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2x00usb_register_read_lock(rt2x00dev, offset, reg); + if (!rt2x00_get_field32(*reg, field)) + return 1; + udelay(REGISTER_BUSY_DELAY); + } + + ERROR(rt2x00dev, "Indirect register access failed: " + "offset=0x%.08x, value=0x%.08x\n", offset, *reg); + *reg = ~0; + + return 0; +} +EXPORT_SYMBOL_GPL(rt2x00usb_regbusy_read); + +/* + * TX data handlers. + */ +static void rt2x00usb_interrupt_txdone(struct urb *urb) +{ + struct queue_entry *entry = (struct queue_entry *)urb->context; + struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; + struct txdone_entry_desc txdesc; + + if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags) || + !test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) + return; + + /* + * Obtain the status about this packet. + * Note that when the status is 0 it does not mean the + * frame was send out correctly. It only means the frame + * was succesfully pushed to the hardware, we have no + * way to determine the transmission status right now. + * (Only indirectly by looking at the failed TX counters + * in the register). + */ + txdesc.flags = 0; + if (!urb->status) + __set_bit(TXDONE_UNKNOWN, &txdesc.flags); + else + __set_bit(TXDONE_FAILURE, &txdesc.flags); + txdesc.retry = 0; + + rt2x00lib_txdone(entry, &txdesc); +} + +int rt2x00usb_write_tx_data(struct queue_entry *entry) +{ + struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; + struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); + struct queue_entry_priv_usb *entry_priv = entry->priv_data; + struct skb_frame_desc *skbdesc; + u32 length; + + /* + * Add the descriptor in front of the skb. + */ + skb_push(entry->skb, entry->queue->desc_size); + memset(entry->skb->data, 0, entry->queue->desc_size); + + /* + * Fill in skb descriptor + */ + skbdesc = get_skb_frame_desc(entry->skb); + skbdesc->desc = entry->skb->data; + skbdesc->desc_len = entry->queue->desc_size; + + /* + * USB devices cannot blindly pass the skb->len as the + * length of the data to usb_fill_bulk_urb. Pass the skb + * to the driver to determine what the length should be. + */ + length = rt2x00dev->ops->lib->get_tx_data_len(entry); + + usb_fill_bulk_urb(entry_priv->urb, usb_dev, + usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint), + entry->skb->data, length, + rt2x00usb_interrupt_txdone, entry); + + /* + * Make sure the skb->data pointer points to the frame, not the + * descriptor. + */ + skb_pull(entry->skb, entry->queue->desc_size); + + return 0; +} +EXPORT_SYMBOL_GPL(rt2x00usb_write_tx_data); + +static inline void rt2x00usb_kick_tx_entry(struct queue_entry *entry) +{ + struct queue_entry_priv_usb *entry_priv = entry->priv_data; + + if (test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags)) + usb_submit_urb(entry_priv->urb, GFP_ATOMIC); +} + +void rt2x00usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev, + const enum data_queue_qid qid) +{ + struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, qid); + unsigned long irqflags; + unsigned int index; + unsigned int index_done; + unsigned int i; + + /* + * Only protect the range we are going to loop over, + * if during our loop a extra entry is set to pending + * it should not be kicked during this run, since it + * is part of another TX operation. + */ + spin_lock_irqsave(&queue->lock, irqflags); + index = queue->index[Q_INDEX]; + index_done = queue->index[Q_INDEX_DONE]; + spin_unlock_irqrestore(&queue->lock, irqflags); + + /* + * Start from the TX done pointer, this guarentees that we will + * send out all frames in the correct order. + */ + if (index_done < index) { + for (i = index_done; i < index; i++) + rt2x00usb_kick_tx_entry(&queue->entries[i]); + } else { + for (i = index_done; i < queue->limit; i++) + rt2x00usb_kick_tx_entry(&queue->entries[i]); + + for (i = 0; i < index; i++) + rt2x00usb_kick_tx_entry(&queue->entries[i]); + } +} +EXPORT_SYMBOL_GPL(rt2x00usb_kick_tx_queue); + +void rt2x00usb_kill_tx_queue(struct rt2x00_dev *rt2x00dev, + const enum data_queue_qid qid) +{ + struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, qid); + struct queue_entry_priv_usb *entry_priv; + struct queue_entry_priv_usb_bcn *bcn_priv; + unsigned int i; + bool kill_guard; + + /* + * When killing the beacon queue, we must also kill + * the beacon guard byte. + */ + kill_guard = + (qid == QID_BEACON) && + (test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags)); + + /* + * Cancel all entries. + */ + for (i = 0; i < queue->limit; i++) { + entry_priv = queue->entries[i].priv_data; + usb_kill_urb(entry_priv->urb); + + /* + * Kill guardian urb (if required by driver). + */ + if (kill_guard) { + bcn_priv = queue->entries[i].priv_data; + usb_kill_urb(bcn_priv->guardian_urb); + } + } +} +EXPORT_SYMBOL_GPL(rt2x00usb_kill_tx_queue); + +/* + * RX data handlers. + */ +static void rt2x00usb_interrupt_rxdone(struct urb *urb) +{ + struct queue_entry *entry = (struct queue_entry *)urb->context; + struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; + struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); + u8 rxd[32]; + + if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags) || + !test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) + return; + + /* + * Check if the received data is simply too small + * to be actually valid, or if the urb is signaling + * a problem. + */ + if (urb->actual_length < entry->queue->desc_size || urb->status) { + set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); + usb_submit_urb(urb, GFP_ATOMIC); + return; + } + + /* + * Fill in desc fields of the skb descriptor + */ + skbdesc->desc = rxd; + skbdesc->desc_len = entry->queue->desc_size; + + /* + * Send the frame to rt2x00lib for further processing. + */ + rt2x00lib_rxdone(rt2x00dev, entry); +} + +/* + * Radio handlers + */ +void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev) +{ + rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0, 0, + REGISTER_TIMEOUT); + + /* + * The USB version of kill_tx_queue also works + * on the RX queue. + */ + rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev, QID_RX); +} +EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio); + +/* + * Device initialization handlers. + */ +void rt2x00usb_clear_entry(struct queue_entry *entry) +{ + struct usb_device *usb_dev = + to_usb_device_intf(entry->queue->rt2x00dev->dev); + struct queue_entry_priv_usb *entry_priv = entry->priv_data; + int pipe; + + if (entry->queue->qid == QID_RX) { + pipe = usb_rcvbulkpipe(usb_dev, entry->queue->usb_endpoint); + usb_fill_bulk_urb(entry_priv->urb, usb_dev, pipe, + entry->skb->data, entry->skb->len, + rt2x00usb_interrupt_rxdone, entry); + + set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); + usb_submit_urb(entry_priv->urb, GFP_ATOMIC); + } else { + entry->flags = 0; + } +} +EXPORT_SYMBOL_GPL(rt2x00usb_clear_entry); + +static void rt2x00usb_assign_endpoint(struct data_queue *queue, + struct usb_endpoint_descriptor *ep_desc) +{ + struct usb_device *usb_dev = to_usb_device_intf(queue->rt2x00dev->dev); + int pipe; + + queue->usb_endpoint = usb_endpoint_num(ep_desc); + + if (queue->qid == QID_RX) { + pipe = usb_rcvbulkpipe(usb_dev, queue->usb_endpoint); + queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 0); + } else { + pipe = usb_sndbulkpipe(usb_dev, queue->usb_endpoint); + queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 1); + } + + if (!queue->usb_maxpacket) + queue->usb_maxpacket = 1; +} + +static int rt2x00usb_find_endpoints(struct rt2x00_dev *rt2x00dev) +{ + struct usb_interface *intf = to_usb_interface(rt2x00dev->dev); + struct usb_host_interface *intf_desc = intf->cur_altsetting; + struct usb_endpoint_descriptor *ep_desc; + struct data_queue *queue = rt2x00dev->tx; + struct usb_endpoint_descriptor *tx_ep_desc = NULL; + unsigned int i; + + /* + * Walk through all available endpoints to search for "bulk in" + * and "bulk out" endpoints. When we find such endpoints collect + * the information we need from the descriptor and assign it + * to the queue. + */ + for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) { + ep_desc = &intf_desc->endpoint[i].desc; + + if (usb_endpoint_is_bulk_in(ep_desc)) { + rt2x00usb_assign_endpoint(rt2x00dev->rx, ep_desc); + } else if (usb_endpoint_is_bulk_out(ep_desc) && + (queue != queue_end(rt2x00dev))) { + rt2x00usb_assign_endpoint(queue, ep_desc); + queue = queue_next(queue); + + tx_ep_desc = ep_desc; + } + } + + /* + * At least 1 endpoint for RX and 1 endpoint for TX must be available. + */ + if (!rt2x00dev->rx->usb_endpoint || !rt2x00dev->tx->usb_endpoint) { + ERROR(rt2x00dev, "Bulk-in/Bulk-out endpoints not found\n"); + return -EPIPE; + } + + /* + * It might be possible not all queues have a dedicated endpoint. + * Loop through all TX queues and copy the endpoint information + * which we have gathered from already assigned endpoints. + */ + txall_queue_for_each(rt2x00dev, queue) { + if (!queue->usb_endpoint) + rt2x00usb_assign_endpoint(queue, tx_ep_desc); + } + + return 0; +} + +static int rt2x00usb_alloc_urb(struct rt2x00_dev *rt2x00dev, + struct data_queue *queue) +{ + struct queue_entry_priv_usb *entry_priv; + struct queue_entry_priv_usb_bcn *bcn_priv; + unsigned int i; + + for (i = 0; i < queue->limit; i++) { + entry_priv = queue->entries[i].priv_data; + entry_priv->urb = usb_alloc_urb(0, GFP_KERNEL); + if (!entry_priv->urb) + return -ENOMEM; + } + + /* + * If this is not the beacon queue or + * no guardian byte was required for the beacon, + * then we are done. + */ + if (rt2x00dev->bcn != queue || + !test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags)) + return 0; + + for (i = 0; i < queue->limit; i++) { + bcn_priv = queue->entries[i].priv_data; + bcn_priv->guardian_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!bcn_priv->guardian_urb) + return -ENOMEM; + } + + return 0; +} + +static void rt2x00usb_free_urb(struct rt2x00_dev *rt2x00dev, + struct data_queue *queue) +{ + struct queue_entry_priv_usb *entry_priv; + struct queue_entry_priv_usb_bcn *bcn_priv; + unsigned int i; + + if (!queue->entries) + return; + + for (i = 0; i < queue->limit; i++) { + entry_priv = queue->entries[i].priv_data; + usb_kill_urb(entry_priv->urb); + usb_free_urb(entry_priv->urb); + } + + /* + * If this is not the beacon queue or + * no guardian byte was required for the beacon, + * then we are done. + */ + if (rt2x00dev->bcn != queue || + !test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags)) + return; + + for (i = 0; i < queue->limit; i++) { + bcn_priv = queue->entries[i].priv_data; + usb_kill_urb(bcn_priv->guardian_urb); + usb_free_urb(bcn_priv->guardian_urb); + } +} + +int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev) +{ + struct data_queue *queue; + int status; + + /* + * Find endpoints for each queue + */ + status = rt2x00usb_find_endpoints(rt2x00dev); + if (status) + goto exit; + + /* + * Allocate DMA + */ + queue_for_each(rt2x00dev, queue) { + status = rt2x00usb_alloc_urb(rt2x00dev, queue); + if (status) + goto exit; + } + + return 0; + +exit: + rt2x00usb_uninitialize(rt2x00dev); + + return status; +} +EXPORT_SYMBOL_GPL(rt2x00usb_initialize); + +void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev) +{ + struct data_queue *queue; + + queue_for_each(rt2x00dev, queue) + rt2x00usb_free_urb(rt2x00dev, queue); +} +EXPORT_SYMBOL_GPL(rt2x00usb_uninitialize); + +/* + * USB driver handlers. + */ +static void rt2x00usb_free_reg(struct rt2x00_dev *rt2x00dev) +{ + kfree(rt2x00dev->rf); + rt2x00dev->rf = NULL; + + kfree(rt2x00dev->eeprom); + rt2x00dev->eeprom = NULL; + + kfree(rt2x00dev->csr.cache); + rt2x00dev->csr.cache = NULL; +} + +static int rt2x00usb_alloc_reg(struct rt2x00_dev *rt2x00dev) +{ + rt2x00dev->csr.cache = kzalloc(CSR_CACHE_SIZE, GFP_KERNEL); + if (!rt2x00dev->csr.cache) + goto exit; + + rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL); + if (!rt2x00dev->eeprom) + goto exit; + + rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL); + if (!rt2x00dev->rf) + goto exit; + + return 0; + +exit: + ERROR_PROBE("Failed to allocate registers.\n"); + + rt2x00usb_free_reg(rt2x00dev); + + return -ENOMEM; +} + +int rt2x00usb_probe(struct usb_interface *usb_intf, + const struct usb_device_id *id) +{ + struct usb_device *usb_dev = interface_to_usbdev(usb_intf); + struct rt2x00_ops *ops = (struct rt2x00_ops *)id->driver_info; + struct ieee80211_hw *hw; + struct rt2x00_dev *rt2x00dev; + int retval; + + usb_dev = usb_get_dev(usb_dev); + + hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw); + if (!hw) { + ERROR_PROBE("Failed to allocate hardware.\n"); + retval = -ENOMEM; + goto exit_put_device; + } + + usb_set_intfdata(usb_intf, hw); + + rt2x00dev = hw->priv; + rt2x00dev->dev = &usb_intf->dev; + rt2x00dev->ops = ops; + rt2x00dev->hw = hw; + + rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_USB); + + retval = rt2x00usb_alloc_reg(rt2x00dev); + if (retval) + goto exit_free_device; + + retval = rt2x00lib_probe_dev(rt2x00dev); + if (retval) + goto exit_free_reg; + + return 0; + +exit_free_reg: + rt2x00usb_free_reg(rt2x00dev); + +exit_free_device: + ieee80211_free_hw(hw); + +exit_put_device: + usb_put_dev(usb_dev); + + usb_set_intfdata(usb_intf, NULL); + + return retval; +} +EXPORT_SYMBOL_GPL(rt2x00usb_probe); + +void rt2x00usb_disconnect(struct usb_interface *usb_intf) +{ + struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); + struct rt2x00_dev *rt2x00dev = hw->priv; + + /* + * Free all allocated data. + */ + rt2x00lib_remove_dev(rt2x00dev); + rt2x00usb_free_reg(rt2x00dev); + ieee80211_free_hw(hw); + + /* + * Free the USB device data. + */ + usb_set_intfdata(usb_intf, NULL); + usb_put_dev(interface_to_usbdev(usb_intf)); +} +EXPORT_SYMBOL_GPL(rt2x00usb_disconnect); + +#ifdef CONFIG_PM +int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state) +{ + struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); + struct rt2x00_dev *rt2x00dev = hw->priv; + int retval; + + retval = rt2x00lib_suspend(rt2x00dev, state); + if (retval) + return retval; + + /* + * Decrease usbdev refcount. + */ + usb_put_dev(interface_to_usbdev(usb_intf)); + + return 0; +} +EXPORT_SYMBOL_GPL(rt2x00usb_suspend); + +int rt2x00usb_resume(struct usb_interface *usb_intf) +{ + struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); + struct rt2x00_dev *rt2x00dev = hw->priv; + + usb_get_dev(interface_to_usbdev(usb_intf)); + + return rt2x00lib_resume(rt2x00dev); +} +EXPORT_SYMBOL_GPL(rt2x00usb_resume); +#endif /* CONFIG_PM */ + +/* + * rt2x00usb module information. + */ +MODULE_AUTHOR(DRV_PROJECT); +MODULE_VERSION(DRV_VERSION); +MODULE_DESCRIPTION("rt2x00 usb library"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h new file mode 100644 index 0000000..3da6841 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00usb.h @@ -0,0 +1,451 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00usb + Abstract: Data structures for the rt2x00usb module. + */ + +#ifndef RT2X00USB_H +#define RT2X00USB_H + +#include + +#define to_usb_device_intf(d) \ +({ \ + struct usb_interface *intf = to_usb_interface(d); \ + interface_to_usbdev(intf); \ +}) + +/* + * This variable should be used with the + * usb_driver structure initialization. + */ +#define USB_DEVICE_DATA(__ops) .driver_info = (kernel_ulong_t)(__ops) + +/* + * For USB vendor requests we need to pass a timeout + * time in ms, for this we use the REGISTER_TIMEOUT, + * however when loading firmware a higher value is + * required. In that case we use the REGISTER_TIMEOUT_FIRMWARE. + */ +#define REGISTER_TIMEOUT 500 +#define REGISTER_TIMEOUT_FIRMWARE 1000 + +/** + * REGISTER_TIMEOUT16 - Determine the timeout for 16bit register access + * @__datalen: Data length + */ +#define REGISTER_TIMEOUT16(__datalen) \ + ( REGISTER_TIMEOUT * ((__datalen) / sizeof(u16)) ) + +/** + * REGISTER_TIMEOUT32 - Determine the timeout for 32bit register access + * @__datalen: Data length + */ +#define REGISTER_TIMEOUT32(__datalen) \ + ( REGISTER_TIMEOUT * ((__datalen) / sizeof(u32)) ) + +/* + * Cache size + */ +#define CSR_CACHE_SIZE 64 + +/* + * USB request types. + */ +#define USB_VENDOR_REQUEST ( USB_TYPE_VENDOR | USB_RECIP_DEVICE ) +#define USB_VENDOR_REQUEST_IN ( USB_DIR_IN | USB_VENDOR_REQUEST ) +#define USB_VENDOR_REQUEST_OUT ( USB_DIR_OUT | USB_VENDOR_REQUEST ) + +/** + * enum rt2x00usb_vendor_request: USB vendor commands. + */ +enum rt2x00usb_vendor_request { + USB_DEVICE_MODE = 1, + USB_SINGLE_WRITE = 2, + USB_SINGLE_READ = 3, + USB_MULTI_WRITE = 6, + USB_MULTI_READ = 7, + USB_EEPROM_WRITE = 8, + USB_EEPROM_READ = 9, + USB_LED_CONTROL = 10, /* RT73USB */ + USB_RX_CONTROL = 12, +}; + +/** + * enum rt2x00usb_mode_offset: Device modes offset. + */ +enum rt2x00usb_mode_offset { + USB_MODE_RESET = 1, + USB_MODE_UNPLUG = 2, + USB_MODE_FUNCTION = 3, + USB_MODE_TEST = 4, + USB_MODE_SLEEP = 7, /* RT73USB */ + USB_MODE_FIRMWARE = 8, /* RT73USB */ + USB_MODE_WAKEUP = 9, /* RT73USB */ +}; + +/** + * rt2x00usb_vendor_request - Send register command to device + * @rt2x00dev: Pointer to &struct rt2x00_dev + * @request: USB vendor command (See &enum rt2x00usb_vendor_request) + * @requesttype: Request type &USB_VENDOR_REQUEST_* + * @offset: Register offset to perform action on + * @value: Value to write to device + * @buffer: Buffer where information will be read/written to by device + * @buffer_length: Size of &buffer + * @timeout: Operation timeout + * + * This is the main function to communicate with the device, + * the &buffer argument _must_ either be NULL or point to + * a buffer allocated by kmalloc. Failure to do so can lead + * to unexpected behavior depending on the architecture. + */ +int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev, + const u8 request, const u8 requesttype, + const u16 offset, const u16 value, + void *buffer, const u16 buffer_length, + const int timeout); + +/** + * rt2x00usb_vendor_request_buff - Send register command to device (buffered) + * @rt2x00dev: Pointer to &struct rt2x00_dev + * @request: USB vendor command (See &enum rt2x00usb_vendor_request) + * @requesttype: Request type &USB_VENDOR_REQUEST_* + * @offset: Register offset to perform action on + * @buffer: Buffer where information will be read/written to by device + * @buffer_length: Size of &buffer + * @timeout: Operation timeout + * + * This function will use a previously with kmalloc allocated cache + * to communicate with the device. The contents of the buffer pointer + * will be copied to this cache when writing, or read from the cache + * when reading. + * Buffers send to &rt2x00usb_vendor_request _must_ be allocated with + * kmalloc. Hence the reason for using a previously allocated cache + * which has been allocated properly. + */ +int rt2x00usb_vendor_request_buff(struct rt2x00_dev *rt2x00dev, + const u8 request, const u8 requesttype, + const u16 offset, void *buffer, + const u16 buffer_length, const int timeout); + +/** + * rt2x00usb_vendor_request_buff - Send register command to device (buffered) + * @rt2x00dev: Pointer to &struct rt2x00_dev + * @request: USB vendor command (See &enum rt2x00usb_vendor_request) + * @requesttype: Request type &USB_VENDOR_REQUEST_* + * @offset: Register offset to perform action on + * @buffer: Buffer where information will be read/written to by device + * @buffer_length: Size of &buffer + * @timeout: Operation timeout + * + * A version of &rt2x00usb_vendor_request_buff which must be called + * if the usb_cache_mutex is already held. + */ +int rt2x00usb_vendor_req_buff_lock(struct rt2x00_dev *rt2x00dev, + const u8 request, const u8 requesttype, + const u16 offset, void *buffer, + const u16 buffer_length, const int timeout); + +/** + * rt2x00usb_vendor_request_large_buff - Send register command to device (buffered) + * @rt2x00dev: Pointer to &struct rt2x00_dev + * @request: USB vendor command (See &enum rt2x00usb_vendor_request) + * @requesttype: Request type &USB_VENDOR_REQUEST_* + * @offset: Register start offset to perform action on + * @buffer: Buffer where information will be read/written to by device + * @buffer_length: Size of &buffer + * @timeout: Operation timeout + * + * This function is used to transfer register data in blocks larger + * then CSR_CACHE_SIZE. Use for firmware upload, keys and beacons. + */ +int rt2x00usb_vendor_request_large_buff(struct rt2x00_dev *rt2x00dev, + const u8 request, const u8 requesttype, + const u16 offset, const void *buffer, + const u16 buffer_length, + const int timeout); + +/** + * rt2x00usb_vendor_request_sw - Send single register command to device + * @rt2x00dev: Pointer to &struct rt2x00_dev + * @request: USB vendor command (See &enum rt2x00usb_vendor_request) + * @offset: Register offset to perform action on + * @value: Value to write to device + * @timeout: Operation timeout + * + * Simple wrapper around rt2x00usb_vendor_request to write a single + * command to the device. Since we don't use the buffer argument we + * don't have to worry about kmalloc here. + */ +static inline int rt2x00usb_vendor_request_sw(struct rt2x00_dev *rt2x00dev, + const u8 request, + const u16 offset, + const u16 value, + const int timeout) +{ + return rt2x00usb_vendor_request(rt2x00dev, request, + USB_VENDOR_REQUEST_OUT, offset, + value, NULL, 0, timeout); +} + +/** + * rt2x00usb_eeprom_read - Read eeprom from device + * @rt2x00dev: Pointer to &struct rt2x00_dev + * @eeprom: Pointer to eeprom array to store the information in + * @length: Number of bytes to read from the eeprom + * + * Simple wrapper around rt2x00usb_vendor_request to read the eeprom + * from the device. Note that the eeprom argument _must_ be allocated using + * kmalloc for correct handling inside the kernel USB layer. + */ +static inline int rt2x00usb_eeprom_read(struct rt2x00_dev *rt2x00dev, + __le16 *eeprom, const u16 length) +{ + return rt2x00usb_vendor_request(rt2x00dev, USB_EEPROM_READ, + USB_VENDOR_REQUEST_IN, 0, 0, + eeprom, length, + REGISTER_TIMEOUT16(length)); +} + +/** + * rt2x00usb_register_read - Read 32bit register word + * @rt2x00dev: Device pointer, see &struct rt2x00_dev. + * @offset: Register offset + * @value: Pointer to where register contents should be stored + * + * This function is a simple wrapper for 32bit register access + * through rt2x00usb_vendor_request_buff(). + */ +static inline void rt2x00usb_register_read(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + u32 *value) +{ + __le32 reg; + rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ, + USB_VENDOR_REQUEST_IN, offset, + ®, sizeof(reg), REGISTER_TIMEOUT); + *value = le32_to_cpu(reg); +} + +/** + * rt2x00usb_register_read_lock - Read 32bit register word + * @rt2x00dev: Device pointer, see &struct rt2x00_dev. + * @offset: Register offset + * @value: Pointer to where register contents should be stored + * + * This function is a simple wrapper for 32bit register access + * through rt2x00usb_vendor_req_buff_lock(). + */ +static inline void rt2x00usb_register_read_lock(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + u32 *value) +{ + __le32 reg; + rt2x00usb_vendor_req_buff_lock(rt2x00dev, USB_MULTI_READ, + USB_VENDOR_REQUEST_IN, offset, + ®, sizeof(reg), REGISTER_TIMEOUT); + *value = le32_to_cpu(reg); +} + +/** + * rt2x00usb_register_multiread - Read 32bit register words + * @rt2x00dev: Device pointer, see &struct rt2x00_dev. + * @offset: Register offset + * @value: Pointer to where register contents should be stored + * @length: Length of the data + * + * This function is a simple wrapper for 32bit register access + * through rt2x00usb_vendor_request_buff(). + */ +static inline void rt2x00usb_register_multiread(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + void *value, const u32 length) +{ + rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ, + USB_VENDOR_REQUEST_IN, offset, + value, length, + REGISTER_TIMEOUT32(length)); +} + +/** + * rt2x00usb_register_write - Write 32bit register word + * @rt2x00dev: Device pointer, see &struct rt2x00_dev. + * @offset: Register offset + * @value: Data which should be written + * + * This function is a simple wrapper for 32bit register access + * through rt2x00usb_vendor_request_buff(). + */ +static inline void rt2x00usb_register_write(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + u32 value) +{ + __le32 reg = cpu_to_le32(value); + rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE, + USB_VENDOR_REQUEST_OUT, offset, + ®, sizeof(reg), REGISTER_TIMEOUT); +} + +/** + * rt2x00usb_register_write_lock - Write 32bit register word + * @rt2x00dev: Device pointer, see &struct rt2x00_dev. + * @offset: Register offset + * @value: Data which should be written + * + * This function is a simple wrapper for 32bit register access + * through rt2x00usb_vendor_req_buff_lock(). + */ +static inline void rt2x00usb_register_write_lock(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + u32 value) +{ + __le32 reg = cpu_to_le32(value); + rt2x00usb_vendor_req_buff_lock(rt2x00dev, USB_MULTI_WRITE, + USB_VENDOR_REQUEST_OUT, offset, + ®, sizeof(reg), REGISTER_TIMEOUT); +} + +/** + * rt2x00usb_register_multiwrite - Write 32bit register words + * @rt2x00dev: Device pointer, see &struct rt2x00_dev. + * @offset: Register offset + * @value: Data which should be written + * @length: Length of the data + * + * This function is a simple wrapper for 32bit register access + * through rt2x00usb_vendor_request_buff(). + */ +static inline void rt2x00usb_register_multiwrite(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + const void *value, + const u32 length) +{ + rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE, + USB_VENDOR_REQUEST_OUT, offset, + (void *)value, length, + REGISTER_TIMEOUT32(length)); +} + +/** + * rt2x00usb_regbusy_read - Read from register with busy check + * @rt2x00dev: Device pointer, see &struct rt2x00_dev. + * @offset: Register offset + * @field: Field to check if register is busy + * @reg: Pointer to where register contents should be stored + * + * This function will read the given register, and checks if the + * register is busy. If it is, it will sleep for a couple of + * microseconds before reading the register again. If the register + * is not read after a certain timeout, this function will return + * FALSE. + */ +int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + const struct rt2x00_field32 field, + u32 *reg); + +/* + * Radio handlers + */ +void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev); + +/** + * rt2x00usb_write_tx_data - Initialize URB for TX operation + * @entry: The entry where the frame is located + * + * This function will initialize the URB and skb descriptor + * to prepare the entry for the actual TX operation. + */ +int rt2x00usb_write_tx_data(struct queue_entry *entry); + +/** + * struct queue_entry_priv_usb: Per entry USB specific information + * + * @urb: Urb structure used for device communication. + */ +struct queue_entry_priv_usb { + struct urb *urb; +}; + +/** + * struct queue_entry_priv_usb_bcn: Per TX entry USB specific information + * + * The first section should match &struct queue_entry_priv_usb exactly. + * rt2500usb can use this structure to send a guardian byte when working + * with beacons. + * + * @urb: Urb structure used for device communication. + * @guardian_data: Set to 0, used for sending the guardian data. + * @guardian_urb: Urb structure used to send the guardian data. + */ +struct queue_entry_priv_usb_bcn { + struct urb *urb; + + unsigned int guardian_data; + struct urb *guardian_urb; +}; + +/** + * rt2x00usb_kick_tx_queue - Kick data queue + * @rt2x00dev: Pointer to &struct rt2x00_dev + * @qid: Data queue to kick + * + * This will walk through all entries of the queue and push all pending + * frames to the hardware as a single burst. + */ +void rt2x00usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev, + const enum data_queue_qid qid); + +/** + * rt2x00usb_kill_tx_queue - Kill data queue + * @rt2x00dev: Pointer to &struct rt2x00_dev + * @qid: Data queue to kill + * + * This will walk through all entries of the queue and kill all + * previously kicked frames before they can be send. + */ +void rt2x00usb_kill_tx_queue(struct rt2x00_dev *rt2x00dev, + const enum data_queue_qid qid); + +/* + * Device initialization handlers. + */ +void rt2x00usb_clear_entry(struct queue_entry *entry); +int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev); +void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev); + +/* + * USB driver handlers. + */ +int rt2x00usb_probe(struct usb_interface *usb_intf, + const struct usb_device_id *id); +void rt2x00usb_disconnect(struct usb_interface *usb_intf); +#ifdef CONFIG_PM +int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state); +int rt2x00usb_resume(struct usb_interface *usb_intf); +#else +#define rt2x00usb_suspend NULL +#define rt2x00usb_resume NULL +#endif /* CONFIG_PM */ + +#endif /* RT2X00USB_H */ diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c new file mode 100644 index 0000000..1774727 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt61pci.c @@ -0,0 +1,2852 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt61pci + Abstract: rt61pci device specific routines. + Supported chipsets: RT2561, RT2561s, RT2661. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rt2x00.h" +#include "rt2x00pci.h" +#include "rt61pci.h" + +/* + * Allow hardware encryption to be disabled. + */ +static int modparam_nohwcrypt = 0; +module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); +MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); + +/* + * Register access. + * BBP and RF register require indirect register access, + * and use the CSR registers PHY_CSR3 and PHY_CSR4 to achieve this. + * These indirect registers work with busy bits, + * and we will try maximal REGISTER_BUSY_COUNT times to access + * the register while taking a REGISTER_BUSY_DELAY us delay + * between each attempt. When the busy bit is still set at that time, + * the access attempt is considered to have failed, + * and we will print an error. + */ +#define WAIT_FOR_BBP(__dev, __reg) \ + rt2x00pci_regbusy_read((__dev), PHY_CSR3, PHY_CSR3_BUSY, (__reg)) +#define WAIT_FOR_RF(__dev, __reg) \ + rt2x00pci_regbusy_read((__dev), PHY_CSR4, PHY_CSR4_BUSY, (__reg)) +#define WAIT_FOR_MCU(__dev, __reg) \ + rt2x00pci_regbusy_read((__dev), H2M_MAILBOX_CSR, \ + H2M_MAILBOX_CSR_OWNER, (__reg)) + +static void rt61pci_bbp_write(struct rt2x00_dev *rt2x00dev, + const unsigned int word, const u8 value) +{ + u32 reg; + + mutex_lock(&rt2x00dev->csr_mutex); + + /* + * Wait until the BBP becomes available, afterwards we + * can safely write the new data into the register. + */ + if (WAIT_FOR_BBP(rt2x00dev, ®)) { + reg = 0; + rt2x00_set_field32(®, PHY_CSR3_VALUE, value); + rt2x00_set_field32(®, PHY_CSR3_REGNUM, word); + rt2x00_set_field32(®, PHY_CSR3_BUSY, 1); + rt2x00_set_field32(®, PHY_CSR3_READ_CONTROL, 0); + + rt2x00pci_register_write(rt2x00dev, PHY_CSR3, reg); + } + + mutex_unlock(&rt2x00dev->csr_mutex); +} + +static void rt61pci_bbp_read(struct rt2x00_dev *rt2x00dev, + const unsigned int word, u8 *value) +{ + u32 reg; + + mutex_lock(&rt2x00dev->csr_mutex); + + /* + * Wait until the BBP becomes available, afterwards we + * can safely write the read request into the register. + * After the data has been written, we wait until hardware + * returns the correct value, if at any time the register + * doesn't become available in time, reg will be 0xffffffff + * which means we return 0xff to the caller. + */ + if (WAIT_FOR_BBP(rt2x00dev, ®)) { + reg = 0; + rt2x00_set_field32(®, PHY_CSR3_REGNUM, word); + rt2x00_set_field32(®, PHY_CSR3_BUSY, 1); + rt2x00_set_field32(®, PHY_CSR3_READ_CONTROL, 1); + + rt2x00pci_register_write(rt2x00dev, PHY_CSR3, reg); + + WAIT_FOR_BBP(rt2x00dev, ®); + } + + *value = rt2x00_get_field32(reg, PHY_CSR3_VALUE); + + mutex_unlock(&rt2x00dev->csr_mutex); +} + +static void rt61pci_rf_write(struct rt2x00_dev *rt2x00dev, + const unsigned int word, const u32 value) +{ + u32 reg; + + mutex_lock(&rt2x00dev->csr_mutex); + + /* + * Wait until the RF becomes available, afterwards we + * can safely write the new data into the register. + */ + if (WAIT_FOR_RF(rt2x00dev, ®)) { + reg = 0; + rt2x00_set_field32(®, PHY_CSR4_VALUE, value); + rt2x00_set_field32(®, PHY_CSR4_NUMBER_OF_BITS, 21); + rt2x00_set_field32(®, PHY_CSR4_IF_SELECT, 0); + rt2x00_set_field32(®, PHY_CSR4_BUSY, 1); + + rt2x00pci_register_write(rt2x00dev, PHY_CSR4, reg); + rt2x00_rf_write(rt2x00dev, word, value); + } + + mutex_unlock(&rt2x00dev->csr_mutex); +} + +static void rt61pci_mcu_request(struct rt2x00_dev *rt2x00dev, + const u8 command, const u8 token, + const u8 arg0, const u8 arg1) +{ + u32 reg; + + mutex_lock(&rt2x00dev->csr_mutex); + + /* + * Wait until the MCU becomes available, afterwards we + * can safely write the new data into the register. + */ + if (WAIT_FOR_MCU(rt2x00dev, ®)) { + rt2x00_set_field32(®, H2M_MAILBOX_CSR_OWNER, 1); + rt2x00_set_field32(®, H2M_MAILBOX_CSR_CMD_TOKEN, token); + rt2x00_set_field32(®, H2M_MAILBOX_CSR_ARG0, arg0); + rt2x00_set_field32(®, H2M_MAILBOX_CSR_ARG1, arg1); + rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CSR, reg); + + rt2x00pci_register_read(rt2x00dev, HOST_CMD_CSR, ®); + rt2x00_set_field32(®, HOST_CMD_CSR_HOST_COMMAND, command); + rt2x00_set_field32(®, HOST_CMD_CSR_INTERRUPT_MCU, 1); + rt2x00pci_register_write(rt2x00dev, HOST_CMD_CSR, reg); + } + + mutex_unlock(&rt2x00dev->csr_mutex); + +} + +static void rt61pci_eepromregister_read(struct eeprom_93cx6 *eeprom) +{ + struct rt2x00_dev *rt2x00dev = eeprom->data; + u32 reg; + + rt2x00pci_register_read(rt2x00dev, E2PROM_CSR, ®); + + eeprom->reg_data_in = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_IN); + eeprom->reg_data_out = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_OUT); + eeprom->reg_data_clock = + !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_CLOCK); + eeprom->reg_chip_select = + !!rt2x00_get_field32(reg, E2PROM_CSR_CHIP_SELECT); +} + +static void rt61pci_eepromregister_write(struct eeprom_93cx6 *eeprom) +{ + struct rt2x00_dev *rt2x00dev = eeprom->data; + u32 reg = 0; + + rt2x00_set_field32(®, E2PROM_CSR_DATA_IN, !!eeprom->reg_data_in); + rt2x00_set_field32(®, E2PROM_CSR_DATA_OUT, !!eeprom->reg_data_out); + rt2x00_set_field32(®, E2PROM_CSR_DATA_CLOCK, + !!eeprom->reg_data_clock); + rt2x00_set_field32(®, E2PROM_CSR_CHIP_SELECT, + !!eeprom->reg_chip_select); + + rt2x00pci_register_write(rt2x00dev, E2PROM_CSR, reg); +} + +#ifdef CONFIG_RT2X00_LIB_DEBUGFS +static const struct rt2x00debug rt61pci_rt2x00debug = { + .owner = THIS_MODULE, + .csr = { + .read = rt2x00pci_register_read, + .write = rt2x00pci_register_write, + .flags = RT2X00DEBUGFS_OFFSET, + .word_base = CSR_REG_BASE, + .word_size = sizeof(u32), + .word_count = CSR_REG_SIZE / sizeof(u32), + }, + .eeprom = { + .read = rt2x00_eeprom_read, + .write = rt2x00_eeprom_write, + .word_base = EEPROM_BASE, + .word_size = sizeof(u16), + .word_count = EEPROM_SIZE / sizeof(u16), + }, + .bbp = { + .read = rt61pci_bbp_read, + .write = rt61pci_bbp_write, + .word_base = BBP_BASE, + .word_size = sizeof(u8), + .word_count = BBP_SIZE / sizeof(u8), + }, + .rf = { + .read = rt2x00_rf_read, + .write = rt61pci_rf_write, + .word_base = RF_BASE, + .word_size = sizeof(u32), + .word_count = RF_SIZE / sizeof(u32), + }, +}; +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ + +static int rt61pci_rfkill_poll(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, MAC_CSR13, ®); + return rt2x00_get_field32(reg, MAC_CSR13_BIT5); +} + +#ifdef CONFIG_RT2X00_LIB_LEDS +static void rt61pci_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct rt2x00_led *led = + container_of(led_cdev, struct rt2x00_led, led_dev); + unsigned int enabled = brightness != LED_OFF; + unsigned int a_mode = + (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_5GHZ); + unsigned int bg_mode = + (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_2GHZ); + + if (led->type == LED_TYPE_RADIO) { + rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg, + MCU_LEDCS_RADIO_STATUS, enabled); + + rt61pci_mcu_request(led->rt2x00dev, MCU_LED, 0xff, + (led->rt2x00dev->led_mcu_reg & 0xff), + ((led->rt2x00dev->led_mcu_reg >> 8))); + } else if (led->type == LED_TYPE_ASSOC) { + rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg, + MCU_LEDCS_LINK_BG_STATUS, bg_mode); + rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg, + MCU_LEDCS_LINK_A_STATUS, a_mode); + + rt61pci_mcu_request(led->rt2x00dev, MCU_LED, 0xff, + (led->rt2x00dev->led_mcu_reg & 0xff), + ((led->rt2x00dev->led_mcu_reg >> 8))); + } else if (led->type == LED_TYPE_QUALITY) { + /* + * The brightness is divided into 6 levels (0 - 5), + * this means we need to convert the brightness + * argument into the matching level within that range. + */ + rt61pci_mcu_request(led->rt2x00dev, MCU_LED_STRENGTH, 0xff, + brightness / (LED_FULL / 6), 0); + } +} + +static int rt61pci_blink_set(struct led_classdev *led_cdev, + unsigned long *delay_on, + unsigned long *delay_off) +{ + struct rt2x00_led *led = + container_of(led_cdev, struct rt2x00_led, led_dev); + u32 reg; + + rt2x00pci_register_read(led->rt2x00dev, MAC_CSR14, ®); + rt2x00_set_field32(®, MAC_CSR14_ON_PERIOD, *delay_on); + rt2x00_set_field32(®, MAC_CSR14_OFF_PERIOD, *delay_off); + rt2x00pci_register_write(led->rt2x00dev, MAC_CSR14, reg); + + return 0; +} + +static void rt61pci_init_led(struct rt2x00_dev *rt2x00dev, + struct rt2x00_led *led, + enum led_type type) +{ + led->rt2x00dev = rt2x00dev; + led->type = type; + led->led_dev.brightness_set = rt61pci_brightness_set; + led->led_dev.blink_set = rt61pci_blink_set; + led->flags = LED_INITIALIZED; +} +#endif /* CONFIG_RT2X00_LIB_LEDS */ + +/* + * Configuration handlers. + */ +static int rt61pci_config_shared_key(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_crypto *crypto, + struct ieee80211_key_conf *key) +{ + struct hw_key_entry key_entry; + struct rt2x00_field32 field; + u32 mask; + u32 reg; + + if (crypto->cmd == SET_KEY) { + /* + * rt2x00lib can't determine the correct free + * key_idx for shared keys. We have 1 register + * with key valid bits. The goal is simple, read + * the register, if that is full we have no slots + * left. + * Note that each BSS is allowed to have up to 4 + * shared keys, so put a mask over the allowed + * entries. + */ + mask = (0xf << crypto->bssidx); + + rt2x00pci_register_read(rt2x00dev, SEC_CSR0, ®); + reg &= mask; + + if (reg && reg == mask) + return -ENOSPC; + + key->hw_key_idx += reg ? ffz(reg) : 0; + + /* + * Upload key to hardware + */ + memcpy(key_entry.key, crypto->key, + sizeof(key_entry.key)); + memcpy(key_entry.tx_mic, crypto->tx_mic, + sizeof(key_entry.tx_mic)); + memcpy(key_entry.rx_mic, crypto->rx_mic, + sizeof(key_entry.rx_mic)); + + reg = SHARED_KEY_ENTRY(key->hw_key_idx); + rt2x00pci_register_multiwrite(rt2x00dev, reg, + &key_entry, sizeof(key_entry)); + + /* + * The cipher types are stored over 2 registers. + * bssidx 0 and 1 keys are stored in SEC_CSR1 and + * bssidx 1 and 2 keys are stored in SEC_CSR5. + * Using the correct defines correctly will cause overhead, + * so just calculate the correct offset. + */ + if (key->hw_key_idx < 8) { + field.bit_offset = (3 * key->hw_key_idx); + field.bit_mask = 0x7 << field.bit_offset; + + rt2x00pci_register_read(rt2x00dev, SEC_CSR1, ®); + rt2x00_set_field32(®, field, crypto->cipher); + rt2x00pci_register_write(rt2x00dev, SEC_CSR1, reg); + } else { + field.bit_offset = (3 * (key->hw_key_idx - 8)); + field.bit_mask = 0x7 << field.bit_offset; + + rt2x00pci_register_read(rt2x00dev, SEC_CSR5, ®); + rt2x00_set_field32(®, field, crypto->cipher); + rt2x00pci_register_write(rt2x00dev, SEC_CSR5, reg); + } + + /* + * The driver does not support the IV/EIV generation + * in hardware. However it doesn't support the IV/EIV + * inside the ieee80211 frame either, but requires it + * to be provided separately for the descriptor. + * rt2x00lib will cut the IV/EIV data out of all frames + * given to us by mac80211, but we must tell mac80211 + * to generate the IV/EIV data. + */ + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + } + + /* + * SEC_CSR0 contains only single-bit fields to indicate + * a particular key is valid. Because using the FIELD32() + * defines directly will cause a lot of overhead, we use + * a calculation to determine the correct bit directly. + */ + mask = 1 << key->hw_key_idx; + + rt2x00pci_register_read(rt2x00dev, SEC_CSR0, ®); + if (crypto->cmd == SET_KEY) + reg |= mask; + else if (crypto->cmd == DISABLE_KEY) + reg &= ~mask; + rt2x00pci_register_write(rt2x00dev, SEC_CSR0, reg); + + return 0; +} + +static int rt61pci_config_pairwise_key(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_crypto *crypto, + struct ieee80211_key_conf *key) +{ + struct hw_pairwise_ta_entry addr_entry; + struct hw_key_entry key_entry; + u32 mask; + u32 reg; + + if (crypto->cmd == SET_KEY) { + /* + * rt2x00lib can't determine the correct free + * key_idx for pairwise keys. We have 2 registers + * with key valid bits. The goal is simple: read + * the first register. If that is full, move to + * the next register. + * When both registers are full, we drop the key. + * Otherwise, we use the first invalid entry. + */ + rt2x00pci_register_read(rt2x00dev, SEC_CSR2, ®); + if (reg && reg == ~0) { + key->hw_key_idx = 32; + rt2x00pci_register_read(rt2x00dev, SEC_CSR3, ®); + if (reg && reg == ~0) + return -ENOSPC; + } + + key->hw_key_idx += reg ? ffz(reg) : 0; + + /* + * Upload key to hardware + */ + memcpy(key_entry.key, crypto->key, + sizeof(key_entry.key)); + memcpy(key_entry.tx_mic, crypto->tx_mic, + sizeof(key_entry.tx_mic)); + memcpy(key_entry.rx_mic, crypto->rx_mic, + sizeof(key_entry.rx_mic)); + + memset(&addr_entry, 0, sizeof(addr_entry)); + memcpy(&addr_entry, crypto->address, ETH_ALEN); + addr_entry.cipher = crypto->cipher; + + reg = PAIRWISE_KEY_ENTRY(key->hw_key_idx); + rt2x00pci_register_multiwrite(rt2x00dev, reg, + &key_entry, sizeof(key_entry)); + + reg = PAIRWISE_TA_ENTRY(key->hw_key_idx); + rt2x00pci_register_multiwrite(rt2x00dev, reg, + &addr_entry, sizeof(addr_entry)); + + /* + * Enable pairwise lookup table for given BSS idx. + * Without this, received frames will not be decrypted + * by the hardware. + */ + rt2x00pci_register_read(rt2x00dev, SEC_CSR4, ®); + reg |= (1 << crypto->bssidx); + rt2x00pci_register_write(rt2x00dev, SEC_CSR4, reg); + + /* + * The driver does not support the IV/EIV generation + * in hardware. However it doesn't support the IV/EIV + * inside the ieee80211 frame either, but requires it + * to be provided separately for the descriptor. + * rt2x00lib will cut the IV/EIV data out of all frames + * given to us by mac80211, but we must tell mac80211 + * to generate the IV/EIV data. + */ + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + } + + /* + * SEC_CSR2 and SEC_CSR3 contain only single-bit fields to indicate + * a particular key is valid. Because using the FIELD32() + * defines directly will cause a lot of overhead, we use + * a calculation to determine the correct bit directly. + */ + if (key->hw_key_idx < 32) { + mask = 1 << key->hw_key_idx; + + rt2x00pci_register_read(rt2x00dev, SEC_CSR2, ®); + if (crypto->cmd == SET_KEY) + reg |= mask; + else if (crypto->cmd == DISABLE_KEY) + reg &= ~mask; + rt2x00pci_register_write(rt2x00dev, SEC_CSR2, reg); + } else { + mask = 1 << (key->hw_key_idx - 32); + + rt2x00pci_register_read(rt2x00dev, SEC_CSR3, ®); + if (crypto->cmd == SET_KEY) + reg |= mask; + else if (crypto->cmd == DISABLE_KEY) + reg &= ~mask; + rt2x00pci_register_write(rt2x00dev, SEC_CSR3, reg); + } + + return 0; +} + +static void rt61pci_config_filter(struct rt2x00_dev *rt2x00dev, + const unsigned int filter_flags) +{ + u32 reg; + + /* + * Start configuration steps. + * Note that the version error will always be dropped + * and broadcast frames will always be accepted since + * there is no filter for it at this time. + */ + rt2x00pci_register_read(rt2x00dev, TXRX_CSR0, ®); + rt2x00_set_field32(®, TXRX_CSR0_DROP_CRC, + !(filter_flags & FIF_FCSFAIL)); + rt2x00_set_field32(®, TXRX_CSR0_DROP_PHYSICAL, + !(filter_flags & FIF_PLCPFAIL)); + rt2x00_set_field32(®, TXRX_CSR0_DROP_CONTROL, + !(filter_flags & (FIF_CONTROL | FIF_PSPOLL))); + rt2x00_set_field32(®, TXRX_CSR0_DROP_NOT_TO_ME, + !(filter_flags & FIF_PROMISC_IN_BSS)); + rt2x00_set_field32(®, TXRX_CSR0_DROP_TO_DS, + !(filter_flags & FIF_PROMISC_IN_BSS) && + !rt2x00dev->intf_ap_count); + rt2x00_set_field32(®, TXRX_CSR0_DROP_VERSION_ERROR, 1); + rt2x00_set_field32(®, TXRX_CSR0_DROP_MULTICAST, + !(filter_flags & FIF_ALLMULTI)); + rt2x00_set_field32(®, TXRX_CSR0_DROP_BROADCAST, 0); + rt2x00_set_field32(®, TXRX_CSR0_DROP_ACK_CTS, + !(filter_flags & FIF_CONTROL)); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg); +} + +static void rt61pci_config_intf(struct rt2x00_dev *rt2x00dev, + struct rt2x00_intf *intf, + struct rt2x00intf_conf *conf, + const unsigned int flags) +{ + unsigned int beacon_base; + u32 reg; + + if (flags & CONFIG_UPDATE_TYPE) { + /* + * Clear current synchronisation setup. + * For the Beacon base registers, we only need to clear + * the first byte since that byte contains the VALID and OWNER + * bits which (when set to 0) will invalidate the entire beacon. + */ + beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx); + rt2x00pci_register_write(rt2x00dev, beacon_base, 0); + + /* + * Enable synchronisation. + */ + rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, ®); + rt2x00_set_field32(®, TXRX_CSR9_TSF_TICKING, 1); + rt2x00_set_field32(®, TXRX_CSR9_TSF_SYNC, conf->sync); + rt2x00_set_field32(®, TXRX_CSR9_TBTT_ENABLE, 1); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); + } + + if (flags & CONFIG_UPDATE_MAC) { + reg = le32_to_cpu(conf->mac[1]); + rt2x00_set_field32(®, MAC_CSR3_UNICAST_TO_ME_MASK, 0xff); + conf->mac[1] = cpu_to_le32(reg); + + rt2x00pci_register_multiwrite(rt2x00dev, MAC_CSR2, + conf->mac, sizeof(conf->mac)); + } + + if (flags & CONFIG_UPDATE_BSSID) { + reg = le32_to_cpu(conf->bssid[1]); + rt2x00_set_field32(®, MAC_CSR5_BSS_ID_MASK, 3); + conf->bssid[1] = cpu_to_le32(reg); + + rt2x00pci_register_multiwrite(rt2x00dev, MAC_CSR4, + conf->bssid, sizeof(conf->bssid)); + } +} + +static void rt61pci_config_erp(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_erp *erp) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR0, ®); + rt2x00_set_field32(®, TXRX_CSR0_RX_ACK_TIMEOUT, 0x32); + rt2x00_set_field32(®, TXRX_CSR0_TSF_OFFSET, IEEE80211_HEADER); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg); + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR4, ®); + rt2x00_set_field32(®, TXRX_CSR4_AUTORESPOND_ENABLE, 1); + rt2x00_set_field32(®, TXRX_CSR4_AUTORESPOND_PREAMBLE, + !!erp->short_preamble); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR4, reg); + + rt2x00pci_register_write(rt2x00dev, TXRX_CSR5, erp->basic_rates); + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, ®); + rt2x00_set_field32(®, TXRX_CSR9_BEACON_INTERVAL, + erp->beacon_int * 16); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); + + rt2x00pci_register_read(rt2x00dev, MAC_CSR9, ®); + rt2x00_set_field32(®, MAC_CSR9_SLOT_TIME, erp->slot_time); + rt2x00pci_register_write(rt2x00dev, MAC_CSR9, reg); + + rt2x00pci_register_read(rt2x00dev, MAC_CSR8, ®); + rt2x00_set_field32(®, MAC_CSR8_SIFS, erp->sifs); + rt2x00_set_field32(®, MAC_CSR8_SIFS_AFTER_RX_OFDM, 3); + rt2x00_set_field32(®, MAC_CSR8_EIFS, erp->eifs); + rt2x00pci_register_write(rt2x00dev, MAC_CSR8, reg); +} + +static void rt61pci_config_antenna_5x(struct rt2x00_dev *rt2x00dev, + struct antenna_setup *ant) +{ + u8 r3; + u8 r4; + u8 r77; + + rt61pci_bbp_read(rt2x00dev, 3, &r3); + rt61pci_bbp_read(rt2x00dev, 4, &r4); + rt61pci_bbp_read(rt2x00dev, 77, &r77); + + rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, rt2x00_rf(rt2x00dev, RF5325)); + + /* + * Configure the RX antenna. + */ + switch (ant->rx) { + case ANTENNA_HW_DIVERSITY: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2); + rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, + (rt2x00dev->curr_band != IEEE80211_BAND_5GHZ)); + break; + case ANTENNA_A: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); + rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0); + if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) + rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0); + else + rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3); + break; + case ANTENNA_B: + default: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); + rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0); + if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) + rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3); + else + rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0); + break; + } + + rt61pci_bbp_write(rt2x00dev, 77, r77); + rt61pci_bbp_write(rt2x00dev, 3, r3); + rt61pci_bbp_write(rt2x00dev, 4, r4); +} + +static void rt61pci_config_antenna_2x(struct rt2x00_dev *rt2x00dev, + struct antenna_setup *ant) +{ + u8 r3; + u8 r4; + u8 r77; + + rt61pci_bbp_read(rt2x00dev, 3, &r3); + rt61pci_bbp_read(rt2x00dev, 4, &r4); + rt61pci_bbp_read(rt2x00dev, 77, &r77); + + rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, rt2x00_rf(rt2x00dev, RF2529)); + rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, + !test_bit(CONFIG_FRAME_TYPE, &rt2x00dev->flags)); + + /* + * Configure the RX antenna. + */ + switch (ant->rx) { + case ANTENNA_HW_DIVERSITY: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2); + break; + case ANTENNA_A: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); + rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3); + break; + case ANTENNA_B: + default: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); + rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0); + break; + } + + rt61pci_bbp_write(rt2x00dev, 77, r77); + rt61pci_bbp_write(rt2x00dev, 3, r3); + rt61pci_bbp_write(rt2x00dev, 4, r4); +} + +static void rt61pci_config_antenna_2529_rx(struct rt2x00_dev *rt2x00dev, + const int p1, const int p2) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, MAC_CSR13, ®); + + rt2x00_set_field32(®, MAC_CSR13_BIT4, p1); + rt2x00_set_field32(®, MAC_CSR13_BIT12, 0); + + rt2x00_set_field32(®, MAC_CSR13_BIT3, !p2); + rt2x00_set_field32(®, MAC_CSR13_BIT11, 0); + + rt2x00pci_register_write(rt2x00dev, MAC_CSR13, reg); +} + +static void rt61pci_config_antenna_2529(struct rt2x00_dev *rt2x00dev, + struct antenna_setup *ant) +{ + u8 r3; + u8 r4; + u8 r77; + + rt61pci_bbp_read(rt2x00dev, 3, &r3); + rt61pci_bbp_read(rt2x00dev, 4, &r4); + rt61pci_bbp_read(rt2x00dev, 77, &r77); + + /* + * Configure the RX antenna. + */ + switch (ant->rx) { + case ANTENNA_A: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); + rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0); + rt61pci_config_antenna_2529_rx(rt2x00dev, 0, 0); + break; + case ANTENNA_HW_DIVERSITY: + /* + * FIXME: Antenna selection for the rf 2529 is very confusing + * in the legacy driver. Just default to antenna B until the + * legacy code can be properly translated into rt2x00 code. + */ + case ANTENNA_B: + default: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); + rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3); + rt61pci_config_antenna_2529_rx(rt2x00dev, 1, 1); + break; + } + + rt61pci_bbp_write(rt2x00dev, 77, r77); + rt61pci_bbp_write(rt2x00dev, 3, r3); + rt61pci_bbp_write(rt2x00dev, 4, r4); +} + +struct antenna_sel { + u8 word; + /* + * value[0] -> non-LNA + * value[1] -> LNA + */ + u8 value[2]; +}; + +static const struct antenna_sel antenna_sel_a[] = { + { 96, { 0x58, 0x78 } }, + { 104, { 0x38, 0x48 } }, + { 75, { 0xfe, 0x80 } }, + { 86, { 0xfe, 0x80 } }, + { 88, { 0xfe, 0x80 } }, + { 35, { 0x60, 0x60 } }, + { 97, { 0x58, 0x58 } }, + { 98, { 0x58, 0x58 } }, +}; + +static const struct antenna_sel antenna_sel_bg[] = { + { 96, { 0x48, 0x68 } }, + { 104, { 0x2c, 0x3c } }, + { 75, { 0xfe, 0x80 } }, + { 86, { 0xfe, 0x80 } }, + { 88, { 0xfe, 0x80 } }, + { 35, { 0x50, 0x50 } }, + { 97, { 0x48, 0x48 } }, + { 98, { 0x48, 0x48 } }, +}; + +static void rt61pci_config_ant(struct rt2x00_dev *rt2x00dev, + struct antenna_setup *ant) +{ + const struct antenna_sel *sel; + unsigned int lna; + unsigned int i; + u32 reg; + + /* + * We should never come here because rt2x00lib is supposed + * to catch this and send us the correct antenna explicitely. + */ + BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY || + ant->tx == ANTENNA_SW_DIVERSITY); + + if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) { + sel = antenna_sel_a; + lna = test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags); + } else { + sel = antenna_sel_bg; + lna = test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags); + } + + for (i = 0; i < ARRAY_SIZE(antenna_sel_a); i++) + rt61pci_bbp_write(rt2x00dev, sel[i].word, sel[i].value[lna]); + + rt2x00pci_register_read(rt2x00dev, PHY_CSR0, ®); + + rt2x00_set_field32(®, PHY_CSR0_PA_PE_BG, + rt2x00dev->curr_band == IEEE80211_BAND_2GHZ); + rt2x00_set_field32(®, PHY_CSR0_PA_PE_A, + rt2x00dev->curr_band == IEEE80211_BAND_5GHZ); + + rt2x00pci_register_write(rt2x00dev, PHY_CSR0, reg); + + if (rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF5325)) + rt61pci_config_antenna_5x(rt2x00dev, ant); + else if (rt2x00_rf(rt2x00dev, RF2527)) + rt61pci_config_antenna_2x(rt2x00dev, ant); + else if (rt2x00_rf(rt2x00dev, RF2529)) { + if (test_bit(CONFIG_DOUBLE_ANTENNA, &rt2x00dev->flags)) + rt61pci_config_antenna_2x(rt2x00dev, ant); + else + rt61pci_config_antenna_2529(rt2x00dev, ant); + } +} + +static void rt61pci_config_lna_gain(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_conf *libconf) +{ + u16 eeprom; + short lna_gain = 0; + + if (libconf->conf->channel->band == IEEE80211_BAND_2GHZ) { + if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) + lna_gain += 14; + + rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom); + lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1); + } else { + if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags)) + lna_gain += 14; + + rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &eeprom); + lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_A_1); + } + + rt2x00dev->lna_gain = lna_gain; +} + +static void rt61pci_config_channel(struct rt2x00_dev *rt2x00dev, + struct rf_channel *rf, const int txpower) +{ + u8 r3; + u8 r94; + u8 smart; + + rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); + rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset); + + smart = !(rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527)); + + rt61pci_bbp_read(rt2x00dev, 3, &r3); + rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart); + rt61pci_bbp_write(rt2x00dev, 3, r3); + + r94 = 6; + if (txpower > MAX_TXPOWER && txpower <= (MAX_TXPOWER + r94)) + r94 += txpower - MAX_TXPOWER; + else if (txpower < MIN_TXPOWER && txpower >= (MIN_TXPOWER - r94)) + r94 += txpower; + rt61pci_bbp_write(rt2x00dev, 94, r94); + + rt61pci_rf_write(rt2x00dev, 1, rf->rf1); + rt61pci_rf_write(rt2x00dev, 2, rf->rf2); + rt61pci_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004); + rt61pci_rf_write(rt2x00dev, 4, rf->rf4); + + udelay(200); + + rt61pci_rf_write(rt2x00dev, 1, rf->rf1); + rt61pci_rf_write(rt2x00dev, 2, rf->rf2); + rt61pci_rf_write(rt2x00dev, 3, rf->rf3 | 0x00000004); + rt61pci_rf_write(rt2x00dev, 4, rf->rf4); + + udelay(200); + + rt61pci_rf_write(rt2x00dev, 1, rf->rf1); + rt61pci_rf_write(rt2x00dev, 2, rf->rf2); + rt61pci_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004); + rt61pci_rf_write(rt2x00dev, 4, rf->rf4); + + msleep(1); +} + +static void rt61pci_config_txpower(struct rt2x00_dev *rt2x00dev, + const int txpower) +{ + struct rf_channel rf; + + rt2x00_rf_read(rt2x00dev, 1, &rf.rf1); + rt2x00_rf_read(rt2x00dev, 2, &rf.rf2); + rt2x00_rf_read(rt2x00dev, 3, &rf.rf3); + rt2x00_rf_read(rt2x00dev, 4, &rf.rf4); + + rt61pci_config_channel(rt2x00dev, &rf, txpower); +} + +static void rt61pci_config_retry_limit(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_conf *libconf) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR4, ®); + rt2x00_set_field32(®, TXRX_CSR4_LONG_RETRY_LIMIT, + libconf->conf->long_frame_max_tx_count); + rt2x00_set_field32(®, TXRX_CSR4_SHORT_RETRY_LIMIT, + libconf->conf->short_frame_max_tx_count); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR4, reg); +} + +static void rt61pci_config_ps(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_conf *libconf) +{ + enum dev_state state = + (libconf->conf->flags & IEEE80211_CONF_PS) ? + STATE_SLEEP : STATE_AWAKE; + u32 reg; + + if (state == STATE_SLEEP) { + rt2x00pci_register_read(rt2x00dev, MAC_CSR11, ®); + rt2x00_set_field32(®, MAC_CSR11_DELAY_AFTER_TBCN, + rt2x00dev->beacon_int - 10); + rt2x00_set_field32(®, MAC_CSR11_TBCN_BEFORE_WAKEUP, + libconf->conf->listen_interval - 1); + rt2x00_set_field32(®, MAC_CSR11_WAKEUP_LATENCY, 5); + + /* We must first disable autowake before it can be enabled */ + rt2x00_set_field32(®, MAC_CSR11_AUTOWAKE, 0); + rt2x00pci_register_write(rt2x00dev, MAC_CSR11, reg); + + rt2x00_set_field32(®, MAC_CSR11_AUTOWAKE, 1); + rt2x00pci_register_write(rt2x00dev, MAC_CSR11, reg); + + rt2x00pci_register_write(rt2x00dev, SOFT_RESET_CSR, 0x00000005); + rt2x00pci_register_write(rt2x00dev, IO_CNTL_CSR, 0x0000001c); + rt2x00pci_register_write(rt2x00dev, PCI_USEC_CSR, 0x00000060); + + rt61pci_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0, 0); + } else { + rt2x00pci_register_read(rt2x00dev, MAC_CSR11, ®); + rt2x00_set_field32(®, MAC_CSR11_DELAY_AFTER_TBCN, 0); + rt2x00_set_field32(®, MAC_CSR11_TBCN_BEFORE_WAKEUP, 0); + rt2x00_set_field32(®, MAC_CSR11_AUTOWAKE, 0); + rt2x00_set_field32(®, MAC_CSR11_WAKEUP_LATENCY, 0); + rt2x00pci_register_write(rt2x00dev, MAC_CSR11, reg); + + rt2x00pci_register_write(rt2x00dev, SOFT_RESET_CSR, 0x00000007); + rt2x00pci_register_write(rt2x00dev, IO_CNTL_CSR, 0x00000018); + rt2x00pci_register_write(rt2x00dev, PCI_USEC_CSR, 0x00000020); + + rt61pci_mcu_request(rt2x00dev, MCU_WAKEUP, 0xff, 0, 0); + } +} + +static void rt61pci_config(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_conf *libconf, + const unsigned int flags) +{ + /* Always recalculate LNA gain before changing configuration */ + rt61pci_config_lna_gain(rt2x00dev, libconf); + + if (flags & IEEE80211_CONF_CHANGE_CHANNEL) + rt61pci_config_channel(rt2x00dev, &libconf->rf, + libconf->conf->power_level); + if ((flags & IEEE80211_CONF_CHANGE_POWER) && + !(flags & IEEE80211_CONF_CHANGE_CHANNEL)) + rt61pci_config_txpower(rt2x00dev, libconf->conf->power_level); + if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS) + rt61pci_config_retry_limit(rt2x00dev, libconf); + if (flags & IEEE80211_CONF_CHANGE_PS) + rt61pci_config_ps(rt2x00dev, libconf); +} + +/* + * Link tuning + */ +static void rt61pci_link_stats(struct rt2x00_dev *rt2x00dev, + struct link_qual *qual) +{ + u32 reg; + + /* + * Update FCS error count from register. + */ + rt2x00pci_register_read(rt2x00dev, STA_CSR0, ®); + qual->rx_failed = rt2x00_get_field32(reg, STA_CSR0_FCS_ERROR); + + /* + * Update False CCA count from register. + */ + rt2x00pci_register_read(rt2x00dev, STA_CSR1, ®); + qual->false_cca = rt2x00_get_field32(reg, STA_CSR1_FALSE_CCA_ERROR); +} + +static inline void rt61pci_set_vgc(struct rt2x00_dev *rt2x00dev, + struct link_qual *qual, u8 vgc_level) +{ + if (qual->vgc_level != vgc_level) { + rt61pci_bbp_write(rt2x00dev, 17, vgc_level); + qual->vgc_level = vgc_level; + qual->vgc_level_reg = vgc_level; + } +} + +static void rt61pci_reset_tuner(struct rt2x00_dev *rt2x00dev, + struct link_qual *qual) +{ + rt61pci_set_vgc(rt2x00dev, qual, 0x20); +} + +static void rt61pci_link_tuner(struct rt2x00_dev *rt2x00dev, + struct link_qual *qual, const u32 count) +{ + u8 up_bound; + u8 low_bound; + + /* + * Determine r17 bounds. + */ + if (rt2x00dev->rx_status.band == IEEE80211_BAND_5GHZ) { + low_bound = 0x28; + up_bound = 0x48; + if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags)) { + low_bound += 0x10; + up_bound += 0x10; + } + } else { + low_bound = 0x20; + up_bound = 0x40; + if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) { + low_bound += 0x10; + up_bound += 0x10; + } + } + + /* + * If we are not associated, we should go straight to the + * dynamic CCA tuning. + */ + if (!rt2x00dev->intf_associated) + goto dynamic_cca_tune; + + /* + * Special big-R17 for very short distance + */ + if (qual->rssi >= -35) { + rt61pci_set_vgc(rt2x00dev, qual, 0x60); + return; + } + + /* + * Special big-R17 for short distance + */ + if (qual->rssi >= -58) { + rt61pci_set_vgc(rt2x00dev, qual, up_bound); + return; + } + + /* + * Special big-R17 for middle-short distance + */ + if (qual->rssi >= -66) { + rt61pci_set_vgc(rt2x00dev, qual, low_bound + 0x10); + return; + } + + /* + * Special mid-R17 for middle distance + */ + if (qual->rssi >= -74) { + rt61pci_set_vgc(rt2x00dev, qual, low_bound + 0x08); + return; + } + + /* + * Special case: Change up_bound based on the rssi. + * Lower up_bound when rssi is weaker then -74 dBm. + */ + up_bound -= 2 * (-74 - qual->rssi); + if (low_bound > up_bound) + up_bound = low_bound; + + if (qual->vgc_level > up_bound) { + rt61pci_set_vgc(rt2x00dev, qual, up_bound); + return; + } + +dynamic_cca_tune: + + /* + * r17 does not yet exceed upper limit, continue and base + * the r17 tuning on the false CCA count. + */ + if ((qual->false_cca > 512) && (qual->vgc_level < up_bound)) + rt61pci_set_vgc(rt2x00dev, qual, ++qual->vgc_level); + else if ((qual->false_cca < 100) && (qual->vgc_level > low_bound)) + rt61pci_set_vgc(rt2x00dev, qual, --qual->vgc_level); +} + +/* + * Firmware functions + */ +static char *rt61pci_get_firmware_name(struct rt2x00_dev *rt2x00dev) +{ + u16 chip; + char *fw_name; + + pci_read_config_word(to_pci_dev(rt2x00dev->dev), PCI_DEVICE_ID, &chip); + switch (chip) { + case RT2561_PCI_ID: + fw_name = FIRMWARE_RT2561; + break; + case RT2561s_PCI_ID: + fw_name = FIRMWARE_RT2561s; + break; + case RT2661_PCI_ID: + fw_name = FIRMWARE_RT2661; + break; + default: + fw_name = NULL; + break; + } + + return fw_name; +} + +static int rt61pci_check_firmware(struct rt2x00_dev *rt2x00dev, + const u8 *data, const size_t len) +{ + u16 fw_crc; + u16 crc; + + /* + * Only support 8kb firmware files. + */ + if (len != 8192) + return FW_BAD_LENGTH; + + /* + * The last 2 bytes in the firmware array are the crc checksum itself. + * This means that we should never pass those 2 bytes to the crc + * algorithm. + */ + fw_crc = (data[len - 2] << 8 | data[len - 1]); + + /* + * Use the crc itu-t algorithm. + */ + crc = crc_itu_t(0, data, len - 2); + crc = crc_itu_t_byte(crc, 0); + crc = crc_itu_t_byte(crc, 0); + + return (fw_crc == crc) ? FW_OK : FW_BAD_CRC; +} + +static int rt61pci_load_firmware(struct rt2x00_dev *rt2x00dev, + const u8 *data, const size_t len) +{ + int i; + u32 reg; + + /* + * Wait for stable hardware. + */ + for (i = 0; i < 100; i++) { + rt2x00pci_register_read(rt2x00dev, MAC_CSR0, ®); + if (reg) + break; + msleep(1); + } + + if (!reg) { + ERROR(rt2x00dev, "Unstable hardware.\n"); + return -EBUSY; + } + + /* + * Prepare MCU and mailbox for firmware loading. + */ + reg = 0; + rt2x00_set_field32(®, MCU_CNTL_CSR_RESET, 1); + rt2x00pci_register_write(rt2x00dev, MCU_CNTL_CSR, reg); + rt2x00pci_register_write(rt2x00dev, M2H_CMD_DONE_CSR, 0xffffffff); + rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0); + rt2x00pci_register_write(rt2x00dev, HOST_CMD_CSR, 0); + + /* + * Write firmware to device. + */ + reg = 0; + rt2x00_set_field32(®, MCU_CNTL_CSR_RESET, 1); + rt2x00_set_field32(®, MCU_CNTL_CSR_SELECT_BANK, 1); + rt2x00pci_register_write(rt2x00dev, MCU_CNTL_CSR, reg); + + rt2x00pci_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE, + data, len); + + rt2x00_set_field32(®, MCU_CNTL_CSR_SELECT_BANK, 0); + rt2x00pci_register_write(rt2x00dev, MCU_CNTL_CSR, reg); + + rt2x00_set_field32(®, MCU_CNTL_CSR_RESET, 0); + rt2x00pci_register_write(rt2x00dev, MCU_CNTL_CSR, reg); + + for (i = 0; i < 100; i++) { + rt2x00pci_register_read(rt2x00dev, MCU_CNTL_CSR, ®); + if (rt2x00_get_field32(reg, MCU_CNTL_CSR_READY)) + break; + msleep(1); + } + + if (i == 100) { + ERROR(rt2x00dev, "MCU Control register not ready.\n"); + return -EBUSY; + } + + /* + * Hardware needs another millisecond before it is ready. + */ + msleep(1); + + /* + * Reset MAC and BBP registers. + */ + reg = 0; + rt2x00_set_field32(®, MAC_CSR1_SOFT_RESET, 1); + rt2x00_set_field32(®, MAC_CSR1_BBP_RESET, 1); + rt2x00pci_register_write(rt2x00dev, MAC_CSR1, reg); + + rt2x00pci_register_read(rt2x00dev, MAC_CSR1, ®); + rt2x00_set_field32(®, MAC_CSR1_SOFT_RESET, 0); + rt2x00_set_field32(®, MAC_CSR1_BBP_RESET, 0); + rt2x00pci_register_write(rt2x00dev, MAC_CSR1, reg); + + rt2x00pci_register_read(rt2x00dev, MAC_CSR1, ®); + rt2x00_set_field32(®, MAC_CSR1_HOST_READY, 1); + rt2x00pci_register_write(rt2x00dev, MAC_CSR1, reg); + + return 0; +} + +/* + * Initialization functions. + */ +static bool rt61pci_get_entry_state(struct queue_entry *entry) +{ + struct queue_entry_priv_pci *entry_priv = entry->priv_data; + u32 word; + + if (entry->queue->qid == QID_RX) { + rt2x00_desc_read(entry_priv->desc, 0, &word); + + return rt2x00_get_field32(word, RXD_W0_OWNER_NIC); + } else { + rt2x00_desc_read(entry_priv->desc, 0, &word); + + return (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) || + rt2x00_get_field32(word, TXD_W0_VALID)); + } +} + +static void rt61pci_clear_entry(struct queue_entry *entry) +{ + struct queue_entry_priv_pci *entry_priv = entry->priv_data; + struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); + u32 word; + + if (entry->queue->qid == QID_RX) { + rt2x00_desc_read(entry_priv->desc, 5, &word); + rt2x00_set_field32(&word, RXD_W5_BUFFER_PHYSICAL_ADDRESS, + skbdesc->skb_dma); + rt2x00_desc_write(entry_priv->desc, 5, word); + + rt2x00_desc_read(entry_priv->desc, 0, &word); + rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1); + rt2x00_desc_write(entry_priv->desc, 0, word); + } else { + rt2x00_desc_read(entry_priv->desc, 0, &word); + rt2x00_set_field32(&word, TXD_W0_VALID, 0); + rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0); + rt2x00_desc_write(entry_priv->desc, 0, word); + } +} + +static int rt61pci_init_queues(struct rt2x00_dev *rt2x00dev) +{ + struct queue_entry_priv_pci *entry_priv; + u32 reg; + + /* + * Initialize registers. + */ + rt2x00pci_register_read(rt2x00dev, TX_RING_CSR0, ®); + rt2x00_set_field32(®, TX_RING_CSR0_AC0_RING_SIZE, + rt2x00dev->tx[0].limit); + rt2x00_set_field32(®, TX_RING_CSR0_AC1_RING_SIZE, + rt2x00dev->tx[1].limit); + rt2x00_set_field32(®, TX_RING_CSR0_AC2_RING_SIZE, + rt2x00dev->tx[2].limit); + rt2x00_set_field32(®, TX_RING_CSR0_AC3_RING_SIZE, + rt2x00dev->tx[3].limit); + rt2x00pci_register_write(rt2x00dev, TX_RING_CSR0, reg); + + rt2x00pci_register_read(rt2x00dev, TX_RING_CSR1, ®); + rt2x00_set_field32(®, TX_RING_CSR1_TXD_SIZE, + rt2x00dev->tx[0].desc_size / 4); + rt2x00pci_register_write(rt2x00dev, TX_RING_CSR1, reg); + + entry_priv = rt2x00dev->tx[0].entries[0].priv_data; + rt2x00pci_register_read(rt2x00dev, AC0_BASE_CSR, ®); + rt2x00_set_field32(®, AC0_BASE_CSR_RING_REGISTER, + entry_priv->desc_dma); + rt2x00pci_register_write(rt2x00dev, AC0_BASE_CSR, reg); + + entry_priv = rt2x00dev->tx[1].entries[0].priv_data; + rt2x00pci_register_read(rt2x00dev, AC1_BASE_CSR, ®); + rt2x00_set_field32(®, AC1_BASE_CSR_RING_REGISTER, + entry_priv->desc_dma); + rt2x00pci_register_write(rt2x00dev, AC1_BASE_CSR, reg); + + entry_priv = rt2x00dev->tx[2].entries[0].priv_data; + rt2x00pci_register_read(rt2x00dev, AC2_BASE_CSR, ®); + rt2x00_set_field32(®, AC2_BASE_CSR_RING_REGISTER, + entry_priv->desc_dma); + rt2x00pci_register_write(rt2x00dev, AC2_BASE_CSR, reg); + + entry_priv = rt2x00dev->tx[3].entries[0].priv_data; + rt2x00pci_register_read(rt2x00dev, AC3_BASE_CSR, ®); + rt2x00_set_field32(®, AC3_BASE_CSR_RING_REGISTER, + entry_priv->desc_dma); + rt2x00pci_register_write(rt2x00dev, AC3_BASE_CSR, reg); + + rt2x00pci_register_read(rt2x00dev, RX_RING_CSR, ®); + rt2x00_set_field32(®, RX_RING_CSR_RING_SIZE, rt2x00dev->rx->limit); + rt2x00_set_field32(®, RX_RING_CSR_RXD_SIZE, + rt2x00dev->rx->desc_size / 4); + rt2x00_set_field32(®, RX_RING_CSR_RXD_WRITEBACK_SIZE, 4); + rt2x00pci_register_write(rt2x00dev, RX_RING_CSR, reg); + + entry_priv = rt2x00dev->rx->entries[0].priv_data; + rt2x00pci_register_read(rt2x00dev, RX_BASE_CSR, ®); + rt2x00_set_field32(®, RX_BASE_CSR_RING_REGISTER, + entry_priv->desc_dma); + rt2x00pci_register_write(rt2x00dev, RX_BASE_CSR, reg); + + rt2x00pci_register_read(rt2x00dev, TX_DMA_DST_CSR, ®); + rt2x00_set_field32(®, TX_DMA_DST_CSR_DEST_AC0, 2); + rt2x00_set_field32(®, TX_DMA_DST_CSR_DEST_AC1, 2); + rt2x00_set_field32(®, TX_DMA_DST_CSR_DEST_AC2, 2); + rt2x00_set_field32(®, TX_DMA_DST_CSR_DEST_AC3, 2); + rt2x00pci_register_write(rt2x00dev, TX_DMA_DST_CSR, reg); + + rt2x00pci_register_read(rt2x00dev, LOAD_TX_RING_CSR, ®); + rt2x00_set_field32(®, LOAD_TX_RING_CSR_LOAD_TXD_AC0, 1); + rt2x00_set_field32(®, LOAD_TX_RING_CSR_LOAD_TXD_AC1, 1); + rt2x00_set_field32(®, LOAD_TX_RING_CSR_LOAD_TXD_AC2, 1); + rt2x00_set_field32(®, LOAD_TX_RING_CSR_LOAD_TXD_AC3, 1); + rt2x00pci_register_write(rt2x00dev, LOAD_TX_RING_CSR, reg); + + rt2x00pci_register_read(rt2x00dev, RX_CNTL_CSR, ®); + rt2x00_set_field32(®, RX_CNTL_CSR_LOAD_RXD, 1); + rt2x00pci_register_write(rt2x00dev, RX_CNTL_CSR, reg); + + return 0; +} + +static int rt61pci_init_registers(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR0, ®); + rt2x00_set_field32(®, TXRX_CSR0_AUTO_TX_SEQ, 1); + rt2x00_set_field32(®, TXRX_CSR0_DISABLE_RX, 0); + rt2x00_set_field32(®, TXRX_CSR0_TX_WITHOUT_WAITING, 0); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg); + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR1, ®); + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID0, 47); /* CCK Signal */ + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID0_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID1, 30); /* Rssi */ + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID1_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID2, 42); /* OFDM Rate */ + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID2_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID3, 30); /* Rssi */ + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID3_VALID, 1); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR1, reg); + + /* + * CCK TXD BBP registers + */ + rt2x00pci_register_read(rt2x00dev, TXRX_CSR2, ®); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID0, 13); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID0_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID1, 12); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID1_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID2, 11); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID2_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID3, 10); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID3_VALID, 1); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR2, reg); + + /* + * OFDM TXD BBP registers + */ + rt2x00pci_register_read(rt2x00dev, TXRX_CSR3, ®); + rt2x00_set_field32(®, TXRX_CSR3_BBP_ID0, 7); + rt2x00_set_field32(®, TXRX_CSR3_BBP_ID0_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR3_BBP_ID1, 6); + rt2x00_set_field32(®, TXRX_CSR3_BBP_ID1_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR3_BBP_ID2, 5); + rt2x00_set_field32(®, TXRX_CSR3_BBP_ID2_VALID, 1); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR3, reg); + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR7, ®); + rt2x00_set_field32(®, TXRX_CSR7_ACK_CTS_6MBS, 59); + rt2x00_set_field32(®, TXRX_CSR7_ACK_CTS_9MBS, 53); + rt2x00_set_field32(®, TXRX_CSR7_ACK_CTS_12MBS, 49); + rt2x00_set_field32(®, TXRX_CSR7_ACK_CTS_18MBS, 46); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR7, reg); + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR8, ®); + rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_24MBS, 44); + rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_36MBS, 42); + rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_48MBS, 42); + rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_54MBS, 42); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR8, reg); + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, ®); + rt2x00_set_field32(®, TXRX_CSR9_BEACON_INTERVAL, 0); + rt2x00_set_field32(®, TXRX_CSR9_TSF_TICKING, 0); + rt2x00_set_field32(®, TXRX_CSR9_TSF_SYNC, 0); + rt2x00_set_field32(®, TXRX_CSR9_TBTT_ENABLE, 0); + rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 0); + rt2x00_set_field32(®, TXRX_CSR9_TIMESTAMP_COMPENSATE, 0); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); + + rt2x00pci_register_write(rt2x00dev, TXRX_CSR15, 0x0000000f); + + rt2x00pci_register_write(rt2x00dev, MAC_CSR6, 0x00000fff); + + rt2x00pci_register_read(rt2x00dev, MAC_CSR9, ®); + rt2x00_set_field32(®, MAC_CSR9_CW_SELECT, 0); + rt2x00pci_register_write(rt2x00dev, MAC_CSR9, reg); + + rt2x00pci_register_write(rt2x00dev, MAC_CSR10, 0x0000071c); + + if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE)) + return -EBUSY; + + rt2x00pci_register_write(rt2x00dev, MAC_CSR13, 0x0000e000); + + /* + * Invalidate all Shared Keys (SEC_CSR0), + * and clear the Shared key Cipher algorithms (SEC_CSR1 & SEC_CSR5) + */ + rt2x00pci_register_write(rt2x00dev, SEC_CSR0, 0x00000000); + rt2x00pci_register_write(rt2x00dev, SEC_CSR1, 0x00000000); + rt2x00pci_register_write(rt2x00dev, SEC_CSR5, 0x00000000); + + rt2x00pci_register_write(rt2x00dev, PHY_CSR1, 0x000023b0); + rt2x00pci_register_write(rt2x00dev, PHY_CSR5, 0x060a100c); + rt2x00pci_register_write(rt2x00dev, PHY_CSR6, 0x00080606); + rt2x00pci_register_write(rt2x00dev, PHY_CSR7, 0x00000a08); + + rt2x00pci_register_write(rt2x00dev, PCI_CFG_CSR, 0x28ca4404); + + rt2x00pci_register_write(rt2x00dev, TEST_MODE_CSR, 0x00000200); + + rt2x00pci_register_write(rt2x00dev, M2H_CMD_DONE_CSR, 0xffffffff); + + /* + * Clear all beacons + * For the Beacon base registers we only need to clear + * the first byte since that byte contains the VALID and OWNER + * bits which (when set to 0) will invalidate the entire beacon. + */ + rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE0, 0); + rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE1, 0); + rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE2, 0); + rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE3, 0); + + /* + * We must clear the error counters. + * These registers are cleared on read, + * so we may pass a useless variable to store the value. + */ + rt2x00pci_register_read(rt2x00dev, STA_CSR0, ®); + rt2x00pci_register_read(rt2x00dev, STA_CSR1, ®); + rt2x00pci_register_read(rt2x00dev, STA_CSR2, ®); + + /* + * Reset MAC and BBP registers. + */ + rt2x00pci_register_read(rt2x00dev, MAC_CSR1, ®); + rt2x00_set_field32(®, MAC_CSR1_SOFT_RESET, 1); + rt2x00_set_field32(®, MAC_CSR1_BBP_RESET, 1); + rt2x00pci_register_write(rt2x00dev, MAC_CSR1, reg); + + rt2x00pci_register_read(rt2x00dev, MAC_CSR1, ®); + rt2x00_set_field32(®, MAC_CSR1_SOFT_RESET, 0); + rt2x00_set_field32(®, MAC_CSR1_BBP_RESET, 0); + rt2x00pci_register_write(rt2x00dev, MAC_CSR1, reg); + + rt2x00pci_register_read(rt2x00dev, MAC_CSR1, ®); + rt2x00_set_field32(®, MAC_CSR1_HOST_READY, 1); + rt2x00pci_register_write(rt2x00dev, MAC_CSR1, reg); + + return 0; +} + +static int rt61pci_wait_bbp_ready(struct rt2x00_dev *rt2x00dev) +{ + unsigned int i; + u8 value; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt61pci_bbp_read(rt2x00dev, 0, &value); + if ((value != 0xff) && (value != 0x00)) + return 0; + udelay(REGISTER_BUSY_DELAY); + } + + ERROR(rt2x00dev, "BBP register access failed, aborting.\n"); + return -EACCES; +} + +static int rt61pci_init_bbp(struct rt2x00_dev *rt2x00dev) +{ + unsigned int i; + u16 eeprom; + u8 reg_id; + u8 value; + + if (unlikely(rt61pci_wait_bbp_ready(rt2x00dev))) + return -EACCES; + + rt61pci_bbp_write(rt2x00dev, 3, 0x00); + rt61pci_bbp_write(rt2x00dev, 15, 0x30); + rt61pci_bbp_write(rt2x00dev, 21, 0xc8); + rt61pci_bbp_write(rt2x00dev, 22, 0x38); + rt61pci_bbp_write(rt2x00dev, 23, 0x06); + rt61pci_bbp_write(rt2x00dev, 24, 0xfe); + rt61pci_bbp_write(rt2x00dev, 25, 0x0a); + rt61pci_bbp_write(rt2x00dev, 26, 0x0d); + rt61pci_bbp_write(rt2x00dev, 34, 0x12); + rt61pci_bbp_write(rt2x00dev, 37, 0x07); + rt61pci_bbp_write(rt2x00dev, 39, 0xf8); + rt61pci_bbp_write(rt2x00dev, 41, 0x60); + rt61pci_bbp_write(rt2x00dev, 53, 0x10); + rt61pci_bbp_write(rt2x00dev, 54, 0x18); + rt61pci_bbp_write(rt2x00dev, 60, 0x10); + rt61pci_bbp_write(rt2x00dev, 61, 0x04); + rt61pci_bbp_write(rt2x00dev, 62, 0x04); + rt61pci_bbp_write(rt2x00dev, 75, 0xfe); + rt61pci_bbp_write(rt2x00dev, 86, 0xfe); + rt61pci_bbp_write(rt2x00dev, 88, 0xfe); + rt61pci_bbp_write(rt2x00dev, 90, 0x0f); + rt61pci_bbp_write(rt2x00dev, 99, 0x00); + rt61pci_bbp_write(rt2x00dev, 102, 0x16); + rt61pci_bbp_write(rt2x00dev, 107, 0x04); + + for (i = 0; i < EEPROM_BBP_SIZE; i++) { + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom); + + if (eeprom != 0xffff && eeprom != 0x0000) { + reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID); + value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE); + rt61pci_bbp_write(rt2x00dev, reg_id, value); + } + } + + return 0; +} + +/* + * Device state switch handlers. + */ +static void rt61pci_toggle_rx(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR0, ®); + rt2x00_set_field32(®, TXRX_CSR0_DISABLE_RX, + (state == STATE_RADIO_RX_OFF) || + (state == STATE_RADIO_RX_OFF_LINK)); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg); +} + +static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + int mask = (state == STATE_RADIO_IRQ_OFF); + u32 reg; + + /* + * When interrupts are being enabled, the interrupt registers + * should clear the register to assure a clean state. + */ + if (state == STATE_RADIO_IRQ_ON) { + rt2x00pci_register_read(rt2x00dev, INT_SOURCE_CSR, ®); + rt2x00pci_register_write(rt2x00dev, INT_SOURCE_CSR, reg); + + rt2x00pci_register_read(rt2x00dev, MCU_INT_SOURCE_CSR, ®); + rt2x00pci_register_write(rt2x00dev, MCU_INT_SOURCE_CSR, reg); + } + + /* + * Only toggle the interrupts bits we are going to use. + * Non-checked interrupt bits are disabled by default. + */ + rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, ®); + rt2x00_set_field32(®, INT_MASK_CSR_TXDONE, mask); + rt2x00_set_field32(®, INT_MASK_CSR_RXDONE, mask); + rt2x00_set_field32(®, INT_MASK_CSR_ENABLE_MITIGATION, mask); + rt2x00_set_field32(®, INT_MASK_CSR_MITIGATION_PERIOD, 0xff); + rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg); + + rt2x00pci_register_read(rt2x00dev, MCU_INT_MASK_CSR, ®); + rt2x00_set_field32(®, MCU_INT_MASK_CSR_0, mask); + rt2x00_set_field32(®, MCU_INT_MASK_CSR_1, mask); + rt2x00_set_field32(®, MCU_INT_MASK_CSR_2, mask); + rt2x00_set_field32(®, MCU_INT_MASK_CSR_3, mask); + rt2x00_set_field32(®, MCU_INT_MASK_CSR_4, mask); + rt2x00_set_field32(®, MCU_INT_MASK_CSR_5, mask); + rt2x00_set_field32(®, MCU_INT_MASK_CSR_6, mask); + rt2x00_set_field32(®, MCU_INT_MASK_CSR_7, mask); + rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg); +} + +static int rt61pci_enable_radio(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + /* + * Initialize all registers. + */ + if (unlikely(rt61pci_init_queues(rt2x00dev) || + rt61pci_init_registers(rt2x00dev) || + rt61pci_init_bbp(rt2x00dev))) + return -EIO; + + /* + * Enable RX. + */ + rt2x00pci_register_read(rt2x00dev, RX_CNTL_CSR, ®); + rt2x00_set_field32(®, RX_CNTL_CSR_ENABLE_RX_DMA, 1); + rt2x00pci_register_write(rt2x00dev, RX_CNTL_CSR, reg); + + return 0; +} + +static void rt61pci_disable_radio(struct rt2x00_dev *rt2x00dev) +{ + /* + * Disable power + */ + rt2x00pci_register_write(rt2x00dev, MAC_CSR10, 0x00001818); +} + +static int rt61pci_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) +{ + u32 reg; + unsigned int i; + char put_to_sleep; + + put_to_sleep = (state != STATE_AWAKE); + + rt2x00pci_register_read(rt2x00dev, MAC_CSR12, ®); + rt2x00_set_field32(®, MAC_CSR12_FORCE_WAKEUP, !put_to_sleep); + rt2x00_set_field32(®, MAC_CSR12_PUT_TO_SLEEP, put_to_sleep); + rt2x00pci_register_write(rt2x00dev, MAC_CSR12, reg); + + /* + * Device is not guaranteed to be in the requested state yet. + * We must wait until the register indicates that the + * device has entered the correct state. + */ + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2x00pci_register_read(rt2x00dev, MAC_CSR12, ®); + state = rt2x00_get_field32(reg, MAC_CSR12_BBP_CURRENT_STATE); + if (state == !put_to_sleep) + return 0; + msleep(10); + } + + return -EBUSY; +} + +static int rt61pci_set_device_state(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + int retval = 0; + + switch (state) { + case STATE_RADIO_ON: + retval = rt61pci_enable_radio(rt2x00dev); + break; + case STATE_RADIO_OFF: + rt61pci_disable_radio(rt2x00dev); + break; + case STATE_RADIO_RX_ON: + case STATE_RADIO_RX_ON_LINK: + case STATE_RADIO_RX_OFF: + case STATE_RADIO_RX_OFF_LINK: + rt61pci_toggle_rx(rt2x00dev, state); + break; + case STATE_RADIO_IRQ_ON: + case STATE_RADIO_IRQ_OFF: + rt61pci_toggle_irq(rt2x00dev, state); + break; + case STATE_DEEP_SLEEP: + case STATE_SLEEP: + case STATE_STANDBY: + case STATE_AWAKE: + retval = rt61pci_set_state(rt2x00dev, state); + break; + default: + retval = -ENOTSUPP; + break; + } + + if (unlikely(retval)) + ERROR(rt2x00dev, "Device failed to enter state %d (%d).\n", + state, retval); + + return retval; +} + +/* + * TX descriptor initialization + */ +static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev, + struct sk_buff *skb, + struct txentry_desc *txdesc) +{ + struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); + __le32 *txd = skbdesc->desc; + u32 word; + + /* + * Start writing the descriptor words. + */ + rt2x00_desc_read(txd, 1, &word); + rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, txdesc->queue); + rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs); + rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min); + rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max); + rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset); + rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE, + test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags)); + rt2x00_set_field32(&word, TXD_W1_BUFFER_COUNT, 1); + rt2x00_desc_write(txd, 1, word); + + rt2x00_desc_read(txd, 2, &word); + rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->signal); + rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->service); + rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->length_low); + rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high); + rt2x00_desc_write(txd, 2, word); + + if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) { + _rt2x00_desc_write(txd, 3, skbdesc->iv[0]); + _rt2x00_desc_write(txd, 4, skbdesc->iv[1]); + } + + rt2x00_desc_read(txd, 5, &word); + rt2x00_set_field32(&word, TXD_W5_PID_TYPE, skbdesc->entry->queue->qid); + rt2x00_set_field32(&word, TXD_W5_PID_SUBTYPE, + skbdesc->entry->entry_idx); + rt2x00_set_field32(&word, TXD_W5_TX_POWER, + TXPOWER_TO_DEV(rt2x00dev->tx_power)); + rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1); + rt2x00_desc_write(txd, 5, word); + + rt2x00_desc_read(txd, 6, &word); + rt2x00_set_field32(&word, TXD_W6_BUFFER_PHYSICAL_ADDRESS, + skbdesc->skb_dma); + rt2x00_desc_write(txd, 6, word); + + if (skbdesc->desc_len > TXINFO_SIZE) { + rt2x00_desc_read(txd, 11, &word); + rt2x00_set_field32(&word, TXD_W11_BUFFER_LENGTH0, skb->len); + rt2x00_desc_write(txd, 11, word); + } + + rt2x00_desc_read(txd, 0, &word); + rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 1); + rt2x00_set_field32(&word, TXD_W0_VALID, 1); + rt2x00_set_field32(&word, TXD_W0_MORE_FRAG, + test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags)); + rt2x00_set_field32(&word, TXD_W0_ACK, + test_bit(ENTRY_TXD_ACK, &txdesc->flags)); + rt2x00_set_field32(&word, TXD_W0_TIMESTAMP, + test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags)); + rt2x00_set_field32(&word, TXD_W0_OFDM, + (txdesc->rate_mode == RATE_MODE_OFDM)); + rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); + rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, + test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags)); + rt2x00_set_field32(&word, TXD_W0_TKIP_MIC, + test_bit(ENTRY_TXD_ENCRYPT_MMIC, &txdesc->flags)); + rt2x00_set_field32(&word, TXD_W0_KEY_TABLE, + test_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags)); + rt2x00_set_field32(&word, TXD_W0_KEY_INDEX, txdesc->key_idx); + rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len); + rt2x00_set_field32(&word, TXD_W0_BURST, + test_bit(ENTRY_TXD_BURST, &txdesc->flags)); + rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, txdesc->cipher); + rt2x00_desc_write(txd, 0, word); +} + +/* + * TX data initialization + */ +static void rt61pci_write_beacon(struct queue_entry *entry) +{ + struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; + struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); + unsigned int beacon_base; + u32 reg; + + /* + * Disable beaconing while we are reloading the beacon data, + * otherwise we might be sending out invalid data. + */ + rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, ®); + rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 0); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); + + /* + * Write entire beacon with descriptor to register. + */ + beacon_base = HW_BEACON_OFFSET(entry->entry_idx); + rt2x00pci_register_multiwrite(rt2x00dev, + beacon_base, + skbdesc->desc, skbdesc->desc_len); + rt2x00pci_register_multiwrite(rt2x00dev, + beacon_base + skbdesc->desc_len, + entry->skb->data, entry->skb->len); + + /* + * Clean up beacon skb. + */ + dev_kfree_skb_any(entry->skb); + entry->skb = NULL; +} + +static void rt61pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev, + const enum data_queue_qid queue) +{ + u32 reg; + + if (queue == QID_BEACON) { + /* + * For Wi-Fi faily generated beacons between participating + * stations. Set TBTT phase adaptive adjustment step to 8us. + */ + rt2x00pci_register_write(rt2x00dev, TXRX_CSR10, 0x00001008); + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, ®); + if (!rt2x00_get_field32(reg, TXRX_CSR9_BEACON_GEN)) { + rt2x00_set_field32(®, TXRX_CSR9_TSF_TICKING, 1); + rt2x00_set_field32(®, TXRX_CSR9_TBTT_ENABLE, 1); + rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 1); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); + } + return; + } + + rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, ®); + rt2x00_set_field32(®, TX_CNTL_CSR_KICK_TX_AC0, (queue == QID_AC_BE)); + rt2x00_set_field32(®, TX_CNTL_CSR_KICK_TX_AC1, (queue == QID_AC_BK)); + rt2x00_set_field32(®, TX_CNTL_CSR_KICK_TX_AC2, (queue == QID_AC_VI)); + rt2x00_set_field32(®, TX_CNTL_CSR_KICK_TX_AC3, (queue == QID_AC_VO)); + rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg); +} + +static void rt61pci_kill_tx_queue(struct rt2x00_dev *rt2x00dev, + const enum data_queue_qid qid) +{ + u32 reg; + + if (qid == QID_BEACON) { + rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, 0); + return; + } + + rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, ®); + rt2x00_set_field32(®, TX_CNTL_CSR_ABORT_TX_AC0, (qid == QID_AC_BE)); + rt2x00_set_field32(®, TX_CNTL_CSR_ABORT_TX_AC1, (qid == QID_AC_BK)); + rt2x00_set_field32(®, TX_CNTL_CSR_ABORT_TX_AC2, (qid == QID_AC_VI)); + rt2x00_set_field32(®, TX_CNTL_CSR_ABORT_TX_AC3, (qid == QID_AC_VO)); + rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg); +} + +/* + * RX control handlers + */ +static int rt61pci_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1) +{ + u8 offset = rt2x00dev->lna_gain; + u8 lna; + + lna = rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_LNA); + switch (lna) { + case 3: + offset += 90; + break; + case 2: + offset += 74; + break; + case 1: + offset += 64; + break; + default: + return 0; + } + + if (rt2x00dev->rx_status.band == IEEE80211_BAND_5GHZ) { + if (lna == 3 || lna == 2) + offset += 10; + } + + return rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_AGC) * 2 - offset; +} + +static void rt61pci_fill_rxdone(struct queue_entry *entry, + struct rxdone_entry_desc *rxdesc) +{ + struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; + struct queue_entry_priv_pci *entry_priv = entry->priv_data; + u32 word0; + u32 word1; + + rt2x00_desc_read(entry_priv->desc, 0, &word0); + rt2x00_desc_read(entry_priv->desc, 1, &word1); + + if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) + rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; + + if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) { + rxdesc->cipher = + rt2x00_get_field32(word0, RXD_W0_CIPHER_ALG); + rxdesc->cipher_status = + rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR); + } + + if (rxdesc->cipher != CIPHER_NONE) { + _rt2x00_desc_read(entry_priv->desc, 2, &rxdesc->iv[0]); + _rt2x00_desc_read(entry_priv->desc, 3, &rxdesc->iv[1]); + rxdesc->dev_flags |= RXDONE_CRYPTO_IV; + + _rt2x00_desc_read(entry_priv->desc, 4, &rxdesc->icv); + rxdesc->dev_flags |= RXDONE_CRYPTO_ICV; + + /* + * Hardware has stripped IV/EIV data from 802.11 frame during + * decryption. It has provided the data separately but rt2x00lib + * should decide if it should be reinserted. + */ + rxdesc->flags |= RX_FLAG_IV_STRIPPED; + + /* + * FIXME: Legacy driver indicates that the frame does + * contain the Michael Mic. Unfortunately, in rt2x00 + * the MIC seems to be missing completely... + */ + rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; + + if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) + rxdesc->flags |= RX_FLAG_DECRYPTED; + else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) + rxdesc->flags |= RX_FLAG_MMIC_ERROR; + } + + /* + * Obtain the status about this packet. + * When frame was received with an OFDM bitrate, + * the signal is the PLCP value. If it was received with + * a CCK bitrate the signal is the rate in 100kbit/s. + */ + rxdesc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL); + rxdesc->rssi = rt61pci_agc_to_rssi(rt2x00dev, word1); + rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); + + if (rt2x00_get_field32(word0, RXD_W0_OFDM)) + rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP; + else + rxdesc->dev_flags |= RXDONE_SIGNAL_BITRATE; + if (rt2x00_get_field32(word0, RXD_W0_MY_BSS)) + rxdesc->dev_flags |= RXDONE_MY_BSS; +} + +/* + * Interrupt functions. + */ +static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev) +{ + struct data_queue *queue; + struct queue_entry *entry; + struct queue_entry *entry_done; + struct queue_entry_priv_pci *entry_priv; + struct txdone_entry_desc txdesc; + u32 word; + u32 reg; + u32 old_reg; + int type; + int index; + + /* + * During each loop we will compare the freshly read + * STA_CSR4 register value with the value read from + * the previous loop. If the 2 values are equal then + * we should stop processing because the chance is + * quite big that the device has been unplugged and + * we risk going into an endless loop. + */ + old_reg = 0; + + while (1) { + rt2x00pci_register_read(rt2x00dev, STA_CSR4, ®); + if (!rt2x00_get_field32(reg, STA_CSR4_VALID)) + break; + + if (old_reg == reg) + break; + old_reg = reg; + + /* + * Skip this entry when it contains an invalid + * queue identication number. + */ + type = rt2x00_get_field32(reg, STA_CSR4_PID_TYPE); + queue = rt2x00queue_get_queue(rt2x00dev, type); + if (unlikely(!queue)) + continue; + + /* + * Skip this entry when it contains an invalid + * index number. + */ + index = rt2x00_get_field32(reg, STA_CSR4_PID_SUBTYPE); + if (unlikely(index >= queue->limit)) + continue; + + entry = &queue->entries[index]; + entry_priv = entry->priv_data; + rt2x00_desc_read(entry_priv->desc, 0, &word); + + if (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) || + !rt2x00_get_field32(word, TXD_W0_VALID)) + return; + + entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE); + while (entry != entry_done) { + /* Catch up. + * Just report any entries we missed as failed. + */ + WARNING(rt2x00dev, + "TX status report missed for entry %d\n", + entry_done->entry_idx); + + txdesc.flags = 0; + __set_bit(TXDONE_UNKNOWN, &txdesc.flags); + txdesc.retry = 0; + + rt2x00lib_txdone(entry_done, &txdesc); + entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE); + } + + /* + * Obtain the status about this packet. + */ + txdesc.flags = 0; + switch (rt2x00_get_field32(reg, STA_CSR4_TX_RESULT)) { + case 0: /* Success, maybe with retry */ + __set_bit(TXDONE_SUCCESS, &txdesc.flags); + break; + case 6: /* Failure, excessive retries */ + __set_bit(TXDONE_EXCESSIVE_RETRY, &txdesc.flags); + /* Don't break, this is a failed frame! */ + default: /* Failure */ + __set_bit(TXDONE_FAILURE, &txdesc.flags); + } + txdesc.retry = rt2x00_get_field32(reg, STA_CSR4_RETRY_COUNT); + + rt2x00lib_txdone(entry, &txdesc); + } +} + +static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance) +{ + struct rt2x00_dev *rt2x00dev = dev_instance; + u32 reg_mcu; + u32 reg; + + /* + * Get the interrupt sources & saved to local variable. + * Write register value back to clear pending interrupts. + */ + rt2x00pci_register_read(rt2x00dev, MCU_INT_SOURCE_CSR, ®_mcu); + rt2x00pci_register_write(rt2x00dev, MCU_INT_SOURCE_CSR, reg_mcu); + + rt2x00pci_register_read(rt2x00dev, INT_SOURCE_CSR, ®); + rt2x00pci_register_write(rt2x00dev, INT_SOURCE_CSR, reg); + + if (!reg && !reg_mcu) + return IRQ_NONE; + + if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) + return IRQ_HANDLED; + + /* + * Handle interrupts, walk through all bits + * and run the tasks, the bits are checked in order of + * priority. + */ + + /* + * 1 - Rx ring done interrupt. + */ + if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RXDONE)) + rt2x00pci_rxdone(rt2x00dev); + + /* + * 2 - Tx ring done interrupt. + */ + if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TXDONE)) + rt61pci_txdone(rt2x00dev); + + /* + * 3 - Handle MCU command done. + */ + if (reg_mcu) + rt2x00pci_register_write(rt2x00dev, + M2H_CMD_DONE_CSR, 0xffffffff); + + return IRQ_HANDLED; +} + +/* + * Device probe functions. + */ +static int rt61pci_validate_eeprom(struct rt2x00_dev *rt2x00dev) +{ + struct eeprom_93cx6 eeprom; + u32 reg; + u16 word; + u8 *mac; + s8 value; + + rt2x00pci_register_read(rt2x00dev, E2PROM_CSR, ®); + + eeprom.data = rt2x00dev; + eeprom.register_read = rt61pci_eepromregister_read; + eeprom.register_write = rt61pci_eepromregister_write; + eeprom.width = rt2x00_get_field32(reg, E2PROM_CSR_TYPE_93C46) ? + PCI_EEPROM_WIDTH_93C46 : PCI_EEPROM_WIDTH_93C66; + eeprom.reg_data_in = 0; + eeprom.reg_data_out = 0; + eeprom.reg_data_clock = 0; + eeprom.reg_chip_select = 0; + + eeprom_93cx6_multiread(&eeprom, EEPROM_BASE, rt2x00dev->eeprom, + EEPROM_SIZE / sizeof(u16)); + + /* + * Start validation of the data that has been read. + */ + mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); + if (!is_valid_ether_addr(mac)) { + random_ether_addr(mac); + EEPROM(rt2x00dev, "MAC: %pM\n", mac); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_ANTENNA_NUM, 2); + rt2x00_set_field16(&word, EEPROM_ANTENNA_TX_DEFAULT, + ANTENNA_B); + rt2x00_set_field16(&word, EEPROM_ANTENNA_RX_DEFAULT, + ANTENNA_B); + rt2x00_set_field16(&word, EEPROM_ANTENNA_FRAME_TYPE, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_DYN_TXAGC, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_HARDWARE_RADIO, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF5225); + rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word); + EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_NIC_ENABLE_DIVERSITY, 0); + rt2x00_set_field16(&word, EEPROM_NIC_TX_DIVERSITY, 0); + rt2x00_set_field16(&word, EEPROM_NIC_RX_FIXED, 0); + rt2x00_set_field16(&word, EEPROM_NIC_TX_FIXED, 0); + rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA_BG, 0); + rt2x00_set_field16(&word, EEPROM_NIC_CARDBUS_ACCEL, 0); + rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA_A, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word); + EEPROM(rt2x00dev, "NIC: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_LED, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_LED_LED_MODE, + LED_MODE_DEFAULT); + rt2x00_eeprom_write(rt2x00dev, EEPROM_LED, word); + EEPROM(rt2x00dev, "Led: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0); + rt2x00_set_field16(&word, EEPROM_FREQ_SEQ, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word); + EEPROM(rt2x00dev, "Freq: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_1, 0); + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_2, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_BG, word); + EEPROM(rt2x00dev, "RSSI OFFSET BG: 0x%04x\n", word); + } else { + value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_BG_1); + if (value < -10 || value > 10) + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_1, 0); + value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_BG_2); + if (value < -10 || value > 10) + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_2, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_BG, word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_1, 0); + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_2, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_A, word); + EEPROM(rt2x00dev, "RSSI OFFSET A: 0x%04x\n", word); + } else { + value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_A_1); + if (value < -10 || value > 10) + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_1, 0); + value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_A_2); + if (value < -10 || value > 10) + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_2, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_A, word); + } + + return 0; +} + +static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + u16 value; + u16 eeprom; + + /* + * Read EEPROM word for configuration. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom); + + /* + * Identify RF chipset. + */ + value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); + rt2x00pci_register_read(rt2x00dev, MAC_CSR0, ®); + rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET), + value, rt2x00_get_field32(reg, MAC_CSR0_REVISION)); + + if (!rt2x00_rf(rt2x00dev, RF5225) && + !rt2x00_rf(rt2x00dev, RF5325) && + !rt2x00_rf(rt2x00dev, RF2527) && + !rt2x00_rf(rt2x00dev, RF2529)) { + ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); + return -ENODEV; + } + + /* + * Determine number of antennas. + */ + if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_NUM) == 2) + __set_bit(CONFIG_DOUBLE_ANTENNA, &rt2x00dev->flags); + + /* + * Identify default antenna configuration. + */ + rt2x00dev->default_ant.tx = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TX_DEFAULT); + rt2x00dev->default_ant.rx = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_DEFAULT); + + /* + * Read the Frame type. + */ + if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_FRAME_TYPE)) + __set_bit(CONFIG_FRAME_TYPE, &rt2x00dev->flags); + + /* + * Detect if this device has a hardware controlled radio. + */ + if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) + __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags); + + /* + * Read frequency offset and RF programming sequence. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom); + if (rt2x00_get_field16(eeprom, EEPROM_FREQ_SEQ)) + __set_bit(CONFIG_RF_SEQUENCE, &rt2x00dev->flags); + + rt2x00dev->freq_offset = rt2x00_get_field16(eeprom, EEPROM_FREQ_OFFSET); + + /* + * Read external LNA informations. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom); + + if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_A)) + __set_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags); + if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_BG)) + __set_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags); + + /* + * When working with a RF2529 chip without double antenna, + * the antenna settings should be gathered from the NIC + * eeprom word. + */ + if (rt2x00_rf(rt2x00dev, RF2529) && + !test_bit(CONFIG_DOUBLE_ANTENNA, &rt2x00dev->flags)) { + rt2x00dev->default_ant.rx = + ANTENNA_A + rt2x00_get_field16(eeprom, EEPROM_NIC_RX_FIXED); + rt2x00dev->default_ant.tx = + ANTENNA_B - rt2x00_get_field16(eeprom, EEPROM_NIC_TX_FIXED); + + if (rt2x00_get_field16(eeprom, EEPROM_NIC_TX_DIVERSITY)) + rt2x00dev->default_ant.tx = ANTENNA_SW_DIVERSITY; + if (rt2x00_get_field16(eeprom, EEPROM_NIC_ENABLE_DIVERSITY)) + rt2x00dev->default_ant.rx = ANTENNA_SW_DIVERSITY; + } + + /* + * Store led settings, for correct led behaviour. + * If the eeprom value is invalid, + * switch to default led mode. + */ +#ifdef CONFIG_RT2X00_LIB_LEDS + rt2x00_eeprom_read(rt2x00dev, EEPROM_LED, &eeprom); + value = rt2x00_get_field16(eeprom, EEPROM_LED_LED_MODE); + + rt61pci_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO); + rt61pci_init_led(rt2x00dev, &rt2x00dev->led_assoc, LED_TYPE_ASSOC); + if (value == LED_MODE_SIGNAL_STRENGTH) + rt61pci_init_led(rt2x00dev, &rt2x00dev->led_qual, + LED_TYPE_QUALITY); + + rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_LED_MODE, value); + rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_0, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_GPIO_0)); + rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_1, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_GPIO_1)); + rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_2, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_GPIO_2)); + rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_3, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_GPIO_3)); + rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_4, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_GPIO_4)); + rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_ACT, + rt2x00_get_field16(eeprom, EEPROM_LED_POLARITY_ACT)); + rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_READY_BG, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_RDY_G)); + rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_READY_A, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_RDY_A)); +#endif /* CONFIG_RT2X00_LIB_LEDS */ + + return 0; +} + +/* + * RF value list for RF5225 & RF5325 + * Supports: 2.4 GHz & 5.2 GHz, rf_sequence disabled + */ +static const struct rf_channel rf_vals_noseq[] = { + { 1, 0x00002ccc, 0x00004786, 0x00068455, 0x000ffa0b }, + { 2, 0x00002ccc, 0x00004786, 0x00068455, 0x000ffa1f }, + { 3, 0x00002ccc, 0x0000478a, 0x00068455, 0x000ffa0b }, + { 4, 0x00002ccc, 0x0000478a, 0x00068455, 0x000ffa1f }, + { 5, 0x00002ccc, 0x0000478e, 0x00068455, 0x000ffa0b }, + { 6, 0x00002ccc, 0x0000478e, 0x00068455, 0x000ffa1f }, + { 7, 0x00002ccc, 0x00004792, 0x00068455, 0x000ffa0b }, + { 8, 0x00002ccc, 0x00004792, 0x00068455, 0x000ffa1f }, + { 9, 0x00002ccc, 0x00004796, 0x00068455, 0x000ffa0b }, + { 10, 0x00002ccc, 0x00004796, 0x00068455, 0x000ffa1f }, + { 11, 0x00002ccc, 0x0000479a, 0x00068455, 0x000ffa0b }, + { 12, 0x00002ccc, 0x0000479a, 0x00068455, 0x000ffa1f }, + { 13, 0x00002ccc, 0x0000479e, 0x00068455, 0x000ffa0b }, + { 14, 0x00002ccc, 0x000047a2, 0x00068455, 0x000ffa13 }, + + /* 802.11 UNI / HyperLan 2 */ + { 36, 0x00002ccc, 0x0000499a, 0x0009be55, 0x000ffa23 }, + { 40, 0x00002ccc, 0x000049a2, 0x0009be55, 0x000ffa03 }, + { 44, 0x00002ccc, 0x000049a6, 0x0009be55, 0x000ffa0b }, + { 48, 0x00002ccc, 0x000049aa, 0x0009be55, 0x000ffa13 }, + { 52, 0x00002ccc, 0x000049ae, 0x0009ae55, 0x000ffa1b }, + { 56, 0x00002ccc, 0x000049b2, 0x0009ae55, 0x000ffa23 }, + { 60, 0x00002ccc, 0x000049ba, 0x0009ae55, 0x000ffa03 }, + { 64, 0x00002ccc, 0x000049be, 0x0009ae55, 0x000ffa0b }, + + /* 802.11 HyperLan 2 */ + { 100, 0x00002ccc, 0x00004a2a, 0x000bae55, 0x000ffa03 }, + { 104, 0x00002ccc, 0x00004a2e, 0x000bae55, 0x000ffa0b }, + { 108, 0x00002ccc, 0x00004a32, 0x000bae55, 0x000ffa13 }, + { 112, 0x00002ccc, 0x00004a36, 0x000bae55, 0x000ffa1b }, + { 116, 0x00002ccc, 0x00004a3a, 0x000bbe55, 0x000ffa23 }, + { 120, 0x00002ccc, 0x00004a82, 0x000bbe55, 0x000ffa03 }, + { 124, 0x00002ccc, 0x00004a86, 0x000bbe55, 0x000ffa0b }, + { 128, 0x00002ccc, 0x00004a8a, 0x000bbe55, 0x000ffa13 }, + { 132, 0x00002ccc, 0x00004a8e, 0x000bbe55, 0x000ffa1b }, + { 136, 0x00002ccc, 0x00004a92, 0x000bbe55, 0x000ffa23 }, + + /* 802.11 UNII */ + { 140, 0x00002ccc, 0x00004a9a, 0x000bbe55, 0x000ffa03 }, + { 149, 0x00002ccc, 0x00004aa2, 0x000bbe55, 0x000ffa1f }, + { 153, 0x00002ccc, 0x00004aa6, 0x000bbe55, 0x000ffa27 }, + { 157, 0x00002ccc, 0x00004aae, 0x000bbe55, 0x000ffa07 }, + { 161, 0x00002ccc, 0x00004ab2, 0x000bbe55, 0x000ffa0f }, + { 165, 0x00002ccc, 0x00004ab6, 0x000bbe55, 0x000ffa17 }, + + /* MMAC(Japan)J52 ch 34,38,42,46 */ + { 34, 0x00002ccc, 0x0000499a, 0x0009be55, 0x000ffa0b }, + { 38, 0x00002ccc, 0x0000499e, 0x0009be55, 0x000ffa13 }, + { 42, 0x00002ccc, 0x000049a2, 0x0009be55, 0x000ffa1b }, + { 46, 0x00002ccc, 0x000049a6, 0x0009be55, 0x000ffa23 }, +}; + +/* + * RF value list for RF5225 & RF5325 + * Supports: 2.4 GHz & 5.2 GHz, rf_sequence enabled + */ +static const struct rf_channel rf_vals_seq[] = { + { 1, 0x00002ccc, 0x00004786, 0x00068455, 0x000ffa0b }, + { 2, 0x00002ccc, 0x00004786, 0x00068455, 0x000ffa1f }, + { 3, 0x00002ccc, 0x0000478a, 0x00068455, 0x000ffa0b }, + { 4, 0x00002ccc, 0x0000478a, 0x00068455, 0x000ffa1f }, + { 5, 0x00002ccc, 0x0000478e, 0x00068455, 0x000ffa0b }, + { 6, 0x00002ccc, 0x0000478e, 0x00068455, 0x000ffa1f }, + { 7, 0x00002ccc, 0x00004792, 0x00068455, 0x000ffa0b }, + { 8, 0x00002ccc, 0x00004792, 0x00068455, 0x000ffa1f }, + { 9, 0x00002ccc, 0x00004796, 0x00068455, 0x000ffa0b }, + { 10, 0x00002ccc, 0x00004796, 0x00068455, 0x000ffa1f }, + { 11, 0x00002ccc, 0x0000479a, 0x00068455, 0x000ffa0b }, + { 12, 0x00002ccc, 0x0000479a, 0x00068455, 0x000ffa1f }, + { 13, 0x00002ccc, 0x0000479e, 0x00068455, 0x000ffa0b }, + { 14, 0x00002ccc, 0x000047a2, 0x00068455, 0x000ffa13 }, + + /* 802.11 UNI / HyperLan 2 */ + { 36, 0x00002cd4, 0x0004481a, 0x00098455, 0x000c0a03 }, + { 40, 0x00002cd0, 0x00044682, 0x00098455, 0x000c0a03 }, + { 44, 0x00002cd0, 0x00044686, 0x00098455, 0x000c0a1b }, + { 48, 0x00002cd0, 0x0004468e, 0x00098655, 0x000c0a0b }, + { 52, 0x00002cd0, 0x00044692, 0x00098855, 0x000c0a23 }, + { 56, 0x00002cd0, 0x0004469a, 0x00098c55, 0x000c0a13 }, + { 60, 0x00002cd0, 0x000446a2, 0x00098e55, 0x000c0a03 }, + { 64, 0x00002cd0, 0x000446a6, 0x00099255, 0x000c0a1b }, + + /* 802.11 HyperLan 2 */ + { 100, 0x00002cd4, 0x0004489a, 0x000b9855, 0x000c0a03 }, + { 104, 0x00002cd4, 0x000448a2, 0x000b9855, 0x000c0a03 }, + { 108, 0x00002cd4, 0x000448aa, 0x000b9855, 0x000c0a03 }, + { 112, 0x00002cd4, 0x000448b2, 0x000b9a55, 0x000c0a03 }, + { 116, 0x00002cd4, 0x000448ba, 0x000b9a55, 0x000c0a03 }, + { 120, 0x00002cd0, 0x00044702, 0x000b9a55, 0x000c0a03 }, + { 124, 0x00002cd0, 0x00044706, 0x000b9a55, 0x000c0a1b }, + { 128, 0x00002cd0, 0x0004470e, 0x000b9c55, 0x000c0a0b }, + { 132, 0x00002cd0, 0x00044712, 0x000b9c55, 0x000c0a23 }, + { 136, 0x00002cd0, 0x0004471a, 0x000b9e55, 0x000c0a13 }, + + /* 802.11 UNII */ + { 140, 0x00002cd0, 0x00044722, 0x000b9e55, 0x000c0a03 }, + { 149, 0x00002cd0, 0x0004472e, 0x000ba255, 0x000c0a1b }, + { 153, 0x00002cd0, 0x00044736, 0x000ba255, 0x000c0a0b }, + { 157, 0x00002cd4, 0x0004490a, 0x000ba255, 0x000c0a17 }, + { 161, 0x00002cd4, 0x00044912, 0x000ba255, 0x000c0a17 }, + { 165, 0x00002cd4, 0x0004491a, 0x000ba255, 0x000c0a17 }, + + /* MMAC(Japan)J52 ch 34,38,42,46 */ + { 34, 0x00002ccc, 0x0000499a, 0x0009be55, 0x000c0a0b }, + { 38, 0x00002ccc, 0x0000499e, 0x0009be55, 0x000c0a13 }, + { 42, 0x00002ccc, 0x000049a2, 0x0009be55, 0x000c0a1b }, + { 46, 0x00002ccc, 0x000049a6, 0x0009be55, 0x000c0a23 }, +}; + +static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) +{ + struct hw_mode_spec *spec = &rt2x00dev->spec; + struct channel_info *info; + char *tx_power; + unsigned int i; + + /* + * Disable powersaving as default. + */ + rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; + + /* + * Initialize all hw fields. + */ + rt2x00dev->hw->flags = + IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | + IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_SUPPORTS_PS | + IEEE80211_HW_PS_NULLFUNC_STACK; + + SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev); + SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, + rt2x00_eeprom_addr(rt2x00dev, + EEPROM_MAC_ADDR_0)); + + /* + * Initialize hw_mode information. + */ + spec->supported_bands = SUPPORT_BAND_2GHZ; + spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; + + if (!test_bit(CONFIG_RF_SEQUENCE, &rt2x00dev->flags)) { + spec->num_channels = 14; + spec->channels = rf_vals_noseq; + } else { + spec->num_channels = 14; + spec->channels = rf_vals_seq; + } + + if (rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF5325)) { + spec->supported_bands |= SUPPORT_BAND_5GHZ; + spec->num_channels = ARRAY_SIZE(rf_vals_seq); + } + + /* + * Create channel information array + */ + info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + spec->channels_info = info; + + tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START); + for (i = 0; i < 14; i++) + info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]); + + if (spec->num_channels > 14) { + tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START); + for (i = 14; i < spec->num_channels; i++) + info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]); + } + + return 0; +} + +static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev) +{ + int retval; + + /* + * Disable power saving. + */ + rt2x00pci_register_write(rt2x00dev, SOFT_RESET_CSR, 0x00000007); + + /* + * Allocate eeprom data. + */ + retval = rt61pci_validate_eeprom(rt2x00dev); + if (retval) + return retval; + + retval = rt61pci_init_eeprom(rt2x00dev); + if (retval) + return retval; + + /* + * Initialize hw specifications. + */ + retval = rt61pci_probe_hw_mode(rt2x00dev); + if (retval) + return retval; + + /* + * This device has multiple filters for control frames, + * but has no a separate filter for PS Poll frames. + */ + __set_bit(DRIVER_SUPPORT_CONTROL_FILTERS, &rt2x00dev->flags); + + /* + * This device requires firmware and DMA mapped skbs. + */ + __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags); + __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags); + if (!modparam_nohwcrypt) + __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags); + + /* + * Set the rssi offset. + */ + rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET; + + return 0; +} + +/* + * IEEE80211 stack callback functions. + */ +static int rt61pci_conf_tx(struct ieee80211_hw *hw, u16 queue_idx, + const struct ieee80211_tx_queue_params *params) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + struct data_queue *queue; + struct rt2x00_field32 field; + int retval; + u32 reg; + u32 offset; + + /* + * First pass the configuration through rt2x00lib, that will + * update the queue settings and validate the input. After that + * we are free to update the registers based on the value + * in the queue parameter. + */ + retval = rt2x00mac_conf_tx(hw, queue_idx, params); + if (retval) + return retval; + + /* + * We only need to perform additional register initialization + * for WMM queues. + */ + if (queue_idx >= 4) + return 0; + + queue = rt2x00queue_get_queue(rt2x00dev, queue_idx); + + /* Update WMM TXOP register */ + offset = AC_TXOP_CSR0 + (sizeof(u32) * (!!(queue_idx & 2))); + field.bit_offset = (queue_idx & 1) * 16; + field.bit_mask = 0xffff << field.bit_offset; + + rt2x00pci_register_read(rt2x00dev, offset, ®); + rt2x00_set_field32(®, field, queue->txop); + rt2x00pci_register_write(rt2x00dev, offset, reg); + + /* Update WMM registers */ + field.bit_offset = queue_idx * 4; + field.bit_mask = 0xf << field.bit_offset; + + rt2x00pci_register_read(rt2x00dev, AIFSN_CSR, ®); + rt2x00_set_field32(®, field, queue->aifs); + rt2x00pci_register_write(rt2x00dev, AIFSN_CSR, reg); + + rt2x00pci_register_read(rt2x00dev, CWMIN_CSR, ®); + rt2x00_set_field32(®, field, queue->cw_min); + rt2x00pci_register_write(rt2x00dev, CWMIN_CSR, reg); + + rt2x00pci_register_read(rt2x00dev, CWMAX_CSR, ®); + rt2x00_set_field32(®, field, queue->cw_max); + rt2x00pci_register_write(rt2x00dev, CWMAX_CSR, reg); + + return 0; +} + +static u64 rt61pci_get_tsf(struct ieee80211_hw *hw) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + u64 tsf; + u32 reg; + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR13, ®); + tsf = (u64) rt2x00_get_field32(reg, TXRX_CSR13_HIGH_TSFTIMER) << 32; + rt2x00pci_register_read(rt2x00dev, TXRX_CSR12, ®); + tsf |= rt2x00_get_field32(reg, TXRX_CSR12_LOW_TSFTIMER); + + return tsf; +} + +static const struct ieee80211_ops rt61pci_mac80211_ops = { + .tx = rt2x00mac_tx, + .start = rt2x00mac_start, + .stop = rt2x00mac_stop, + .add_interface = rt2x00mac_add_interface, + .remove_interface = rt2x00mac_remove_interface, + .config = rt2x00mac_config, + .configure_filter = rt2x00mac_configure_filter, + .set_tim = rt2x00mac_set_tim, + .set_key = rt2x00mac_set_key, + .get_stats = rt2x00mac_get_stats, + .bss_info_changed = rt2x00mac_bss_info_changed, + .conf_tx = rt61pci_conf_tx, + .get_tsf = rt61pci_get_tsf, + .rfkill_poll = rt2x00mac_rfkill_poll, +}; + +static const struct rt2x00lib_ops rt61pci_rt2x00_ops = { + .irq_handler = rt61pci_interrupt, + .probe_hw = rt61pci_probe_hw, + .get_firmware_name = rt61pci_get_firmware_name, + .check_firmware = rt61pci_check_firmware, + .load_firmware = rt61pci_load_firmware, + .initialize = rt2x00pci_initialize, + .uninitialize = rt2x00pci_uninitialize, + .get_entry_state = rt61pci_get_entry_state, + .clear_entry = rt61pci_clear_entry, + .set_device_state = rt61pci_set_device_state, + .rfkill_poll = rt61pci_rfkill_poll, + .link_stats = rt61pci_link_stats, + .reset_tuner = rt61pci_reset_tuner, + .link_tuner = rt61pci_link_tuner, + .write_tx_desc = rt61pci_write_tx_desc, + .write_tx_data = rt2x00pci_write_tx_data, + .write_beacon = rt61pci_write_beacon, + .kick_tx_queue = rt61pci_kick_tx_queue, + .kill_tx_queue = rt61pci_kill_tx_queue, + .fill_rxdone = rt61pci_fill_rxdone, + .config_shared_key = rt61pci_config_shared_key, + .config_pairwise_key = rt61pci_config_pairwise_key, + .config_filter = rt61pci_config_filter, + .config_intf = rt61pci_config_intf, + .config_erp = rt61pci_config_erp, + .config_ant = rt61pci_config_ant, + .config = rt61pci_config, +}; + +static const struct data_queue_desc rt61pci_queue_rx = { + .entry_num = RX_ENTRIES, + .data_size = DATA_FRAME_SIZE, + .desc_size = RXD_DESC_SIZE, + .priv_size = sizeof(struct queue_entry_priv_pci), +}; + +static const struct data_queue_desc rt61pci_queue_tx = { + .entry_num = TX_ENTRIES, + .data_size = DATA_FRAME_SIZE, + .desc_size = TXD_DESC_SIZE, + .priv_size = sizeof(struct queue_entry_priv_pci), +}; + +static const struct data_queue_desc rt61pci_queue_bcn = { + .entry_num = 4 * BEACON_ENTRIES, + .data_size = 0, /* No DMA required for beacons */ + .desc_size = TXINFO_SIZE, + .priv_size = sizeof(struct queue_entry_priv_pci), +}; + +static const struct rt2x00_ops rt61pci_ops = { + .name = KBUILD_MODNAME, + .max_sta_intf = 1, + .max_ap_intf = 4, + .eeprom_size = EEPROM_SIZE, + .rf_size = RF_SIZE, + .tx_queues = NUM_TX_QUEUES, + .extra_tx_headroom = 0, + .rx = &rt61pci_queue_rx, + .tx = &rt61pci_queue_tx, + .bcn = &rt61pci_queue_bcn, + .lib = &rt61pci_rt2x00_ops, + .hw = &rt61pci_mac80211_ops, +#ifdef CONFIG_RT2X00_LIB_DEBUGFS + .debugfs = &rt61pci_rt2x00debug, +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ +}; + +/* + * RT61pci module information. + */ +static DEFINE_PCI_DEVICE_TABLE(rt61pci_device_table) = { + /* RT2561s */ + { PCI_DEVICE(0x1814, 0x0301), PCI_DEVICE_DATA(&rt61pci_ops) }, + /* RT2561 v2 */ + { PCI_DEVICE(0x1814, 0x0302), PCI_DEVICE_DATA(&rt61pci_ops) }, + /* RT2661 */ + { PCI_DEVICE(0x1814, 0x0401), PCI_DEVICE_DATA(&rt61pci_ops) }, + { 0, } +}; + +MODULE_AUTHOR(DRV_PROJECT); +MODULE_VERSION(DRV_VERSION); +MODULE_DESCRIPTION("Ralink RT61 PCI & PCMCIA Wireless LAN driver."); +MODULE_SUPPORTED_DEVICE("Ralink RT2561, RT2561s & RT2661 " + "PCI & PCMCIA chipset based cards"); +MODULE_DEVICE_TABLE(pci, rt61pci_device_table); +MODULE_FIRMWARE(FIRMWARE_RT2561); +MODULE_FIRMWARE(FIRMWARE_RT2561s); +MODULE_FIRMWARE(FIRMWARE_RT2661); +MODULE_LICENSE("GPL"); + +static struct pci_driver rt61pci_driver = { + .name = KBUILD_MODNAME, + .id_table = rt61pci_device_table, + .probe = rt2x00pci_probe, + .remove = __devexit_p(rt2x00pci_remove), + .suspend = rt2x00pci_suspend, + .resume = rt2x00pci_resume, +}; + +static int __init rt61pci_init(void) +{ + return pci_register_driver(&rt61pci_driver); +} + +static void __exit rt61pci_exit(void) +{ + pci_unregister_driver(&rt61pci_driver); +} + +module_init(rt61pci_init); +module_exit(rt61pci_exit); diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h new file mode 100644 index 0000000..df80f1a --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt61pci.h @@ -0,0 +1,1501 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt61pci + Abstract: Data structures and registers for the rt61pci module. + Supported chipsets: RT2561, RT2561s, RT2661. + */ + +#ifndef RT61PCI_H +#define RT61PCI_H + +/* + * RT chip PCI IDs. + */ +#define RT2561s_PCI_ID 0x0301 +#define RT2561_PCI_ID 0x0302 +#define RT2661_PCI_ID 0x0401 + +/* + * RF chip defines. + */ +#define RF5225 0x0001 +#define RF5325 0x0002 +#define RF2527 0x0003 +#define RF2529 0x0004 + +/* + * Signal information. + * Default offset is required for RSSI <-> dBm conversion. + */ +#define DEFAULT_RSSI_OFFSET 120 + +/* + * Register layout information. + */ +#define CSR_REG_BASE 0x3000 +#define CSR_REG_SIZE 0x04b0 +#define EEPROM_BASE 0x0000 +#define EEPROM_SIZE 0x0100 +#define BBP_BASE 0x0000 +#define BBP_SIZE 0x0080 +#define RF_BASE 0x0004 +#define RF_SIZE 0x0010 + +/* + * Number of TX queues. + */ +#define NUM_TX_QUEUES 4 + +/* + * PCI registers. + */ + +/* + * HOST_CMD_CSR: For HOST to interrupt embedded processor + */ +#define HOST_CMD_CSR 0x0008 +#define HOST_CMD_CSR_HOST_COMMAND FIELD32(0x0000007f) +#define HOST_CMD_CSR_INTERRUPT_MCU FIELD32(0x00000080) + +/* + * MCU_CNTL_CSR + * SELECT_BANK: Select 8051 program bank. + * RESET: Enable 8051 reset state. + * READY: Ready state for 8051. + */ +#define MCU_CNTL_CSR 0x000c +#define MCU_CNTL_CSR_SELECT_BANK FIELD32(0x00000001) +#define MCU_CNTL_CSR_RESET FIELD32(0x00000002) +#define MCU_CNTL_CSR_READY FIELD32(0x00000004) + +/* + * SOFT_RESET_CSR + * FORCE_CLOCK_ON: Host force MAC clock ON + */ +#define SOFT_RESET_CSR 0x0010 +#define SOFT_RESET_CSR_FORCE_CLOCK_ON FIELD32(0x00000002) + +/* + * MCU_INT_SOURCE_CSR: MCU interrupt source/mask register. + */ +#define MCU_INT_SOURCE_CSR 0x0014 +#define MCU_INT_SOURCE_CSR_0 FIELD32(0x00000001) +#define MCU_INT_SOURCE_CSR_1 FIELD32(0x00000002) +#define MCU_INT_SOURCE_CSR_2 FIELD32(0x00000004) +#define MCU_INT_SOURCE_CSR_3 FIELD32(0x00000008) +#define MCU_INT_SOURCE_CSR_4 FIELD32(0x00000010) +#define MCU_INT_SOURCE_CSR_5 FIELD32(0x00000020) +#define MCU_INT_SOURCE_CSR_6 FIELD32(0x00000040) +#define MCU_INT_SOURCE_CSR_7 FIELD32(0x00000080) +#define MCU_INT_SOURCE_CSR_TWAKEUP FIELD32(0x00000100) +#define MCU_INT_SOURCE_CSR_TBTT_EXPIRE FIELD32(0x00000200) + +/* + * MCU_INT_MASK_CSR: MCU interrupt source/mask register. + */ +#define MCU_INT_MASK_CSR 0x0018 +#define MCU_INT_MASK_CSR_0 FIELD32(0x00000001) +#define MCU_INT_MASK_CSR_1 FIELD32(0x00000002) +#define MCU_INT_MASK_CSR_2 FIELD32(0x00000004) +#define MCU_INT_MASK_CSR_3 FIELD32(0x00000008) +#define MCU_INT_MASK_CSR_4 FIELD32(0x00000010) +#define MCU_INT_MASK_CSR_5 FIELD32(0x00000020) +#define MCU_INT_MASK_CSR_6 FIELD32(0x00000040) +#define MCU_INT_MASK_CSR_7 FIELD32(0x00000080) +#define MCU_INT_MASK_CSR_TWAKEUP FIELD32(0x00000100) +#define MCU_INT_MASK_CSR_TBTT_EXPIRE FIELD32(0x00000200) + +/* + * PCI_USEC_CSR + */ +#define PCI_USEC_CSR 0x001c + +/* + * Security key table memory. + * 16 entries 32-byte for shared key table + * 64 entries 32-byte for pairwise key table + * 64 entries 8-byte for pairwise ta key table + */ +#define SHARED_KEY_TABLE_BASE 0x1000 +#define PAIRWISE_KEY_TABLE_BASE 0x1200 +#define PAIRWISE_TA_TABLE_BASE 0x1a00 + +#define SHARED_KEY_ENTRY(__idx) \ + ( SHARED_KEY_TABLE_BASE + \ + ((__idx) * sizeof(struct hw_key_entry)) ) +#define PAIRWISE_KEY_ENTRY(__idx) \ + ( PAIRWISE_KEY_TABLE_BASE + \ + ((__idx) * sizeof(struct hw_key_entry)) ) +#define PAIRWISE_TA_ENTRY(__idx) \ + ( PAIRWISE_TA_TABLE_BASE + \ + ((__idx) * sizeof(struct hw_pairwise_ta_entry)) ) + +struct hw_key_entry { + u8 key[16]; + u8 tx_mic[8]; + u8 rx_mic[8]; +} __attribute__ ((packed)); + +struct hw_pairwise_ta_entry { + u8 address[6]; + u8 cipher; + u8 reserved; +} __attribute__ ((packed)); + +/* + * Other on-chip shared memory space. + */ +#define HW_CIS_BASE 0x2000 +#define HW_NULL_BASE 0x2b00 + +/* + * Since NULL frame won't be that long (256 byte), + * We steal 16 tail bytes to save debugging settings. + */ +#define HW_DEBUG_SETTING_BASE 0x2bf0 + +/* + * On-chip BEACON frame space. + */ +#define HW_BEACON_BASE0 0x2c00 +#define HW_BEACON_BASE1 0x2d00 +#define HW_BEACON_BASE2 0x2e00 +#define HW_BEACON_BASE3 0x2f00 + +#define HW_BEACON_OFFSET(__index) \ + ( HW_BEACON_BASE0 + (__index * 0x0100) ) + +/* + * HOST-MCU shared memory. + */ + +/* + * H2M_MAILBOX_CSR: Host-to-MCU Mailbox. + */ +#define H2M_MAILBOX_CSR 0x2100 +#define H2M_MAILBOX_CSR_ARG0 FIELD32(0x000000ff) +#define H2M_MAILBOX_CSR_ARG1 FIELD32(0x0000ff00) +#define H2M_MAILBOX_CSR_CMD_TOKEN FIELD32(0x00ff0000) +#define H2M_MAILBOX_CSR_OWNER FIELD32(0xff000000) + +/* + * MCU_LEDCS: LED control for MCU Mailbox. + */ +#define MCU_LEDCS_LED_MODE FIELD16(0x001f) +#define MCU_LEDCS_RADIO_STATUS FIELD16(0x0020) +#define MCU_LEDCS_LINK_BG_STATUS FIELD16(0x0040) +#define MCU_LEDCS_LINK_A_STATUS FIELD16(0x0080) +#define MCU_LEDCS_POLARITY_GPIO_0 FIELD16(0x0100) +#define MCU_LEDCS_POLARITY_GPIO_1 FIELD16(0x0200) +#define MCU_LEDCS_POLARITY_GPIO_2 FIELD16(0x0400) +#define MCU_LEDCS_POLARITY_GPIO_3 FIELD16(0x0800) +#define MCU_LEDCS_POLARITY_GPIO_4 FIELD16(0x1000) +#define MCU_LEDCS_POLARITY_ACT FIELD16(0x2000) +#define MCU_LEDCS_POLARITY_READY_BG FIELD16(0x4000) +#define MCU_LEDCS_POLARITY_READY_A FIELD16(0x8000) + +/* + * M2H_CMD_DONE_CSR. + */ +#define M2H_CMD_DONE_CSR 0x2104 + +/* + * MCU_TXOP_ARRAY_BASE. + */ +#define MCU_TXOP_ARRAY_BASE 0x2110 + +/* + * MAC Control/Status Registers(CSR). + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * MAC_CSR0: ASIC revision number. + */ +#define MAC_CSR0 0x3000 +#define MAC_CSR0_REVISION FIELD32(0x0000000f) +#define MAC_CSR0_CHIPSET FIELD32(0x000ffff0) + +/* + * MAC_CSR1: System control register. + * SOFT_RESET: Software reset bit, 1: reset, 0: normal. + * BBP_RESET: Hardware reset BBP. + * HOST_READY: Host is ready after initialization, 1: ready. + */ +#define MAC_CSR1 0x3004 +#define MAC_CSR1_SOFT_RESET FIELD32(0x00000001) +#define MAC_CSR1_BBP_RESET FIELD32(0x00000002) +#define MAC_CSR1_HOST_READY FIELD32(0x00000004) + +/* + * MAC_CSR2: STA MAC register 0. + */ +#define MAC_CSR2 0x3008 +#define MAC_CSR2_BYTE0 FIELD32(0x000000ff) +#define MAC_CSR2_BYTE1 FIELD32(0x0000ff00) +#define MAC_CSR2_BYTE2 FIELD32(0x00ff0000) +#define MAC_CSR2_BYTE3 FIELD32(0xff000000) + +/* + * MAC_CSR3: STA MAC register 1. + * UNICAST_TO_ME_MASK: + * Used to mask off bits from byte 5 of the MAC address + * to determine the UNICAST_TO_ME bit for RX frames. + * The full mask is complemented by BSS_ID_MASK: + * MASK = BSS_ID_MASK & UNICAST_TO_ME_MASK + */ +#define MAC_CSR3 0x300c +#define MAC_CSR3_BYTE4 FIELD32(0x000000ff) +#define MAC_CSR3_BYTE5 FIELD32(0x0000ff00) +#define MAC_CSR3_UNICAST_TO_ME_MASK FIELD32(0x00ff0000) + +/* + * MAC_CSR4: BSSID register 0. + */ +#define MAC_CSR4 0x3010 +#define MAC_CSR4_BYTE0 FIELD32(0x000000ff) +#define MAC_CSR4_BYTE1 FIELD32(0x0000ff00) +#define MAC_CSR4_BYTE2 FIELD32(0x00ff0000) +#define MAC_CSR4_BYTE3 FIELD32(0xff000000) + +/* + * MAC_CSR5: BSSID register 1. + * BSS_ID_MASK: + * This mask is used to mask off bits 0 and 1 of byte 5 of the + * BSSID. This will make sure that those bits will be ignored + * when determining the MY_BSS of RX frames. + * 0: 1-BSSID mode (BSS index = 0) + * 1: 2-BSSID mode (BSS index: Byte5, bit 0) + * 2: 2-BSSID mode (BSS index: byte5, bit 1) + * 3: 4-BSSID mode (BSS index: byte5, bit 0 - 1) + */ +#define MAC_CSR5 0x3014 +#define MAC_CSR5_BYTE4 FIELD32(0x000000ff) +#define MAC_CSR5_BYTE5 FIELD32(0x0000ff00) +#define MAC_CSR5_BSS_ID_MASK FIELD32(0x00ff0000) + +/* + * MAC_CSR6: Maximum frame length register. + */ +#define MAC_CSR6 0x3018 +#define MAC_CSR6_MAX_FRAME_UNIT FIELD32(0x00000fff) + +/* + * MAC_CSR7: Reserved + */ +#define MAC_CSR7 0x301c + +/* + * MAC_CSR8: SIFS/EIFS register. + * All units are in US. + */ +#define MAC_CSR8 0x3020 +#define MAC_CSR8_SIFS FIELD32(0x000000ff) +#define MAC_CSR8_SIFS_AFTER_RX_OFDM FIELD32(0x0000ff00) +#define MAC_CSR8_EIFS FIELD32(0xffff0000) + +/* + * MAC_CSR9: Back-Off control register. + * SLOT_TIME: Slot time, default is 20us for 802.11BG. + * CWMIN: Bit for Cwmin. default Cwmin is 31 (2^5 - 1). + * CWMAX: Bit for Cwmax, default Cwmax is 1023 (2^10 - 1). + * CW_SELECT: 1: CWmin/Cwmax select from register, 0:select from TxD. + */ +#define MAC_CSR9 0x3024 +#define MAC_CSR9_SLOT_TIME FIELD32(0x000000ff) +#define MAC_CSR9_CWMIN FIELD32(0x00000f00) +#define MAC_CSR9_CWMAX FIELD32(0x0000f000) +#define MAC_CSR9_CW_SELECT FIELD32(0x00010000) + +/* + * MAC_CSR10: Power state configuration. + */ +#define MAC_CSR10 0x3028 + +/* + * MAC_CSR11: Power saving transition time register. + * DELAY_AFTER_TBCN: Delay after Tbcn expired in units of TU. + * TBCN_BEFORE_WAKEUP: Number of beacon before wakeup. + * WAKEUP_LATENCY: In unit of TU. + */ +#define MAC_CSR11 0x302c +#define MAC_CSR11_DELAY_AFTER_TBCN FIELD32(0x000000ff) +#define MAC_CSR11_TBCN_BEFORE_WAKEUP FIELD32(0x00007f00) +#define MAC_CSR11_AUTOWAKE FIELD32(0x00008000) +#define MAC_CSR11_WAKEUP_LATENCY FIELD32(0x000f0000) + +/* + * MAC_CSR12: Manual power control / status register (merge CSR20 & PWRCSR1). + * CURRENT_STATE: 0:sleep, 1:awake. + * FORCE_WAKEUP: This has higher priority than PUT_TO_SLEEP. + * BBP_CURRENT_STATE: 0: BBP sleep, 1: BBP awake. + */ +#define MAC_CSR12 0x3030 +#define MAC_CSR12_CURRENT_STATE FIELD32(0x00000001) +#define MAC_CSR12_PUT_TO_SLEEP FIELD32(0x00000002) +#define MAC_CSR12_FORCE_WAKEUP FIELD32(0x00000004) +#define MAC_CSR12_BBP_CURRENT_STATE FIELD32(0x00000008) + +/* + * MAC_CSR13: GPIO. + */ +#define MAC_CSR13 0x3034 +#define MAC_CSR13_BIT0 FIELD32(0x00000001) +#define MAC_CSR13_BIT1 FIELD32(0x00000002) +#define MAC_CSR13_BIT2 FIELD32(0x00000004) +#define MAC_CSR13_BIT3 FIELD32(0x00000008) +#define MAC_CSR13_BIT4 FIELD32(0x00000010) +#define MAC_CSR13_BIT5 FIELD32(0x00000020) +#define MAC_CSR13_BIT6 FIELD32(0x00000040) +#define MAC_CSR13_BIT7 FIELD32(0x00000080) +#define MAC_CSR13_BIT8 FIELD32(0x00000100) +#define MAC_CSR13_BIT9 FIELD32(0x00000200) +#define MAC_CSR13_BIT10 FIELD32(0x00000400) +#define MAC_CSR13_BIT11 FIELD32(0x00000800) +#define MAC_CSR13_BIT12 FIELD32(0x00001000) + +/* + * MAC_CSR14: LED control register. + * ON_PERIOD: On period, default 70ms. + * OFF_PERIOD: Off period, default 30ms. + * HW_LED: HW TX activity, 1: normal OFF, 0: normal ON. + * SW_LED: s/w LED, 1: ON, 0: OFF. + * HW_LED_POLARITY: 0: active low, 1: active high. + */ +#define MAC_CSR14 0x3038 +#define MAC_CSR14_ON_PERIOD FIELD32(0x000000ff) +#define MAC_CSR14_OFF_PERIOD FIELD32(0x0000ff00) +#define MAC_CSR14_HW_LED FIELD32(0x00010000) +#define MAC_CSR14_SW_LED FIELD32(0x00020000) +#define MAC_CSR14_HW_LED_POLARITY FIELD32(0x00040000) +#define MAC_CSR14_SW_LED2 FIELD32(0x00080000) + +/* + * MAC_CSR15: NAV control. + */ +#define MAC_CSR15 0x303c + +/* + * TXRX control registers. + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * TXRX_CSR0: TX/RX configuration register. + * TSF_OFFSET: Default is 24. + * AUTO_TX_SEQ: 1: ASIC auto replace sequence nr in outgoing frame. + * DISABLE_RX: Disable Rx engine. + * DROP_CRC: Drop CRC error. + * DROP_PHYSICAL: Drop physical error. + * DROP_CONTROL: Drop control frame. + * DROP_NOT_TO_ME: Drop not to me unicast frame. + * DROP_TO_DS: Drop fram ToDs bit is true. + * DROP_VERSION_ERROR: Drop version error frame. + * DROP_MULTICAST: Drop multicast frames. + * DROP_BORADCAST: Drop broadcast frames. + * ROP_ACK_CTS: Drop received ACK and CTS. + */ +#define TXRX_CSR0 0x3040 +#define TXRX_CSR0_RX_ACK_TIMEOUT FIELD32(0x000001ff) +#define TXRX_CSR0_TSF_OFFSET FIELD32(0x00007e00) +#define TXRX_CSR0_AUTO_TX_SEQ FIELD32(0x00008000) +#define TXRX_CSR0_DISABLE_RX FIELD32(0x00010000) +#define TXRX_CSR0_DROP_CRC FIELD32(0x00020000) +#define TXRX_CSR0_DROP_PHYSICAL FIELD32(0x00040000) +#define TXRX_CSR0_DROP_CONTROL FIELD32(0x00080000) +#define TXRX_CSR0_DROP_NOT_TO_ME FIELD32(0x00100000) +#define TXRX_CSR0_DROP_TO_DS FIELD32(0x00200000) +#define TXRX_CSR0_DROP_VERSION_ERROR FIELD32(0x00400000) +#define TXRX_CSR0_DROP_MULTICAST FIELD32(0x00800000) +#define TXRX_CSR0_DROP_BROADCAST FIELD32(0x01000000) +#define TXRX_CSR0_DROP_ACK_CTS FIELD32(0x02000000) +#define TXRX_CSR0_TX_WITHOUT_WAITING FIELD32(0x04000000) + +/* + * TXRX_CSR1 + */ +#define TXRX_CSR1 0x3044 +#define TXRX_CSR1_BBP_ID0 FIELD32(0x0000007f) +#define TXRX_CSR1_BBP_ID0_VALID FIELD32(0x00000080) +#define TXRX_CSR1_BBP_ID1 FIELD32(0x00007f00) +#define TXRX_CSR1_BBP_ID1_VALID FIELD32(0x00008000) +#define TXRX_CSR1_BBP_ID2 FIELD32(0x007f0000) +#define TXRX_CSR1_BBP_ID2_VALID FIELD32(0x00800000) +#define TXRX_CSR1_BBP_ID3 FIELD32(0x7f000000) +#define TXRX_CSR1_BBP_ID3_VALID FIELD32(0x80000000) + +/* + * TXRX_CSR2 + */ +#define TXRX_CSR2 0x3048 +#define TXRX_CSR2_BBP_ID0 FIELD32(0x0000007f) +#define TXRX_CSR2_BBP_ID0_VALID FIELD32(0x00000080) +#define TXRX_CSR2_BBP_ID1 FIELD32(0x00007f00) +#define TXRX_CSR2_BBP_ID1_VALID FIELD32(0x00008000) +#define TXRX_CSR2_BBP_ID2 FIELD32(0x007f0000) +#define TXRX_CSR2_BBP_ID2_VALID FIELD32(0x00800000) +#define TXRX_CSR2_BBP_ID3 FIELD32(0x7f000000) +#define TXRX_CSR2_BBP_ID3_VALID FIELD32(0x80000000) + +/* + * TXRX_CSR3 + */ +#define TXRX_CSR3 0x304c +#define TXRX_CSR3_BBP_ID0 FIELD32(0x0000007f) +#define TXRX_CSR3_BBP_ID0_VALID FIELD32(0x00000080) +#define TXRX_CSR3_BBP_ID1 FIELD32(0x00007f00) +#define TXRX_CSR3_BBP_ID1_VALID FIELD32(0x00008000) +#define TXRX_CSR3_BBP_ID2 FIELD32(0x007f0000) +#define TXRX_CSR3_BBP_ID2_VALID FIELD32(0x00800000) +#define TXRX_CSR3_BBP_ID3 FIELD32(0x7f000000) +#define TXRX_CSR3_BBP_ID3_VALID FIELD32(0x80000000) + +/* + * TXRX_CSR4: Auto-Responder/Tx-retry register. + * AUTORESPOND_PREAMBLE: 0:long, 1:short preamble. + * OFDM_TX_RATE_DOWN: 1:enable. + * OFDM_TX_RATE_STEP: 0:1-step, 1: 2-step, 2:3-step, 3:4-step. + * OFDM_TX_FALLBACK_CCK: 0: Fallback to OFDM 6M only, 1: Fallback to CCK 1M,2M. + */ +#define TXRX_CSR4 0x3050 +#define TXRX_CSR4_TX_ACK_TIMEOUT FIELD32(0x000000ff) +#define TXRX_CSR4_CNTL_ACK_POLICY FIELD32(0x00000700) +#define TXRX_CSR4_ACK_CTS_PSM FIELD32(0x00010000) +#define TXRX_CSR4_AUTORESPOND_ENABLE FIELD32(0x00020000) +#define TXRX_CSR4_AUTORESPOND_PREAMBLE FIELD32(0x00040000) +#define TXRX_CSR4_OFDM_TX_RATE_DOWN FIELD32(0x00080000) +#define TXRX_CSR4_OFDM_TX_RATE_STEP FIELD32(0x00300000) +#define TXRX_CSR4_OFDM_TX_FALLBACK_CCK FIELD32(0x00400000) +#define TXRX_CSR4_LONG_RETRY_LIMIT FIELD32(0x0f000000) +#define TXRX_CSR4_SHORT_RETRY_LIMIT FIELD32(0xf0000000) + +/* + * TXRX_CSR5 + */ +#define TXRX_CSR5 0x3054 + +/* + * TXRX_CSR6: ACK/CTS payload consumed time + */ +#define TXRX_CSR6 0x3058 + +/* + * TXRX_CSR7: OFDM ACK/CTS payload consumed time for 6/9/12/18 mbps. + */ +#define TXRX_CSR7 0x305c +#define TXRX_CSR7_ACK_CTS_6MBS FIELD32(0x000000ff) +#define TXRX_CSR7_ACK_CTS_9MBS FIELD32(0x0000ff00) +#define TXRX_CSR7_ACK_CTS_12MBS FIELD32(0x00ff0000) +#define TXRX_CSR7_ACK_CTS_18MBS FIELD32(0xff000000) + +/* + * TXRX_CSR8: OFDM ACK/CTS payload consumed time for 24/36/48/54 mbps. + */ +#define TXRX_CSR8 0x3060 +#define TXRX_CSR8_ACK_CTS_24MBS FIELD32(0x000000ff) +#define TXRX_CSR8_ACK_CTS_36MBS FIELD32(0x0000ff00) +#define TXRX_CSR8_ACK_CTS_48MBS FIELD32(0x00ff0000) +#define TXRX_CSR8_ACK_CTS_54MBS FIELD32(0xff000000) + +/* + * TXRX_CSR9: Synchronization control register. + * BEACON_INTERVAL: In unit of 1/16 TU. + * TSF_TICKING: Enable TSF auto counting. + * TSF_SYNC: Tsf sync, 0: disable, 1: infra, 2: ad-hoc/master mode. + * BEACON_GEN: Enable beacon generator. + */ +#define TXRX_CSR9 0x3064 +#define TXRX_CSR9_BEACON_INTERVAL FIELD32(0x0000ffff) +#define TXRX_CSR9_TSF_TICKING FIELD32(0x00010000) +#define TXRX_CSR9_TSF_SYNC FIELD32(0x00060000) +#define TXRX_CSR9_TBTT_ENABLE FIELD32(0x00080000) +#define TXRX_CSR9_BEACON_GEN FIELD32(0x00100000) +#define TXRX_CSR9_TIMESTAMP_COMPENSATE FIELD32(0xff000000) + +/* + * TXRX_CSR10: BEACON alignment. + */ +#define TXRX_CSR10 0x3068 + +/* + * TXRX_CSR11: AES mask. + */ +#define TXRX_CSR11 0x306c + +/* + * TXRX_CSR12: TSF low 32. + */ +#define TXRX_CSR12 0x3070 +#define TXRX_CSR12_LOW_TSFTIMER FIELD32(0xffffffff) + +/* + * TXRX_CSR13: TSF high 32. + */ +#define TXRX_CSR13 0x3074 +#define TXRX_CSR13_HIGH_TSFTIMER FIELD32(0xffffffff) + +/* + * TXRX_CSR14: TBTT timer. + */ +#define TXRX_CSR14 0x3078 + +/* + * TXRX_CSR15: TKIP MIC priority byte "AND" mask. + */ +#define TXRX_CSR15 0x307c + +/* + * PHY control registers. + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * PHY_CSR0: RF/PS control. + */ +#define PHY_CSR0 0x3080 +#define PHY_CSR0_PA_PE_BG FIELD32(0x00010000) +#define PHY_CSR0_PA_PE_A FIELD32(0x00020000) + +/* + * PHY_CSR1 + */ +#define PHY_CSR1 0x3084 + +/* + * PHY_CSR2: Pre-TX BBP control. + */ +#define PHY_CSR2 0x3088 + +/* + * PHY_CSR3: BBP serial control register. + * VALUE: Register value to program into BBP. + * REG_NUM: Selected BBP register. + * READ_CONTROL: 0: Write BBP, 1: Read BBP. + * BUSY: 1: ASIC is busy execute BBP programming. + */ +#define PHY_CSR3 0x308c +#define PHY_CSR3_VALUE FIELD32(0x000000ff) +#define PHY_CSR3_REGNUM FIELD32(0x00007f00) +#define PHY_CSR3_READ_CONTROL FIELD32(0x00008000) +#define PHY_CSR3_BUSY FIELD32(0x00010000) + +/* + * PHY_CSR4: RF serial control register + * VALUE: Register value (include register id) serial out to RF/IF chip. + * NUMBER_OF_BITS: Number of bits used in RFRegValue (I:20, RFMD:22). + * IF_SELECT: 1: select IF to program, 0: select RF to program. + * PLL_LD: RF PLL_LD status. + * BUSY: 1: ASIC is busy execute RF programming. + */ +#define PHY_CSR4 0x3090 +#define PHY_CSR4_VALUE FIELD32(0x00ffffff) +#define PHY_CSR4_NUMBER_OF_BITS FIELD32(0x1f000000) +#define PHY_CSR4_IF_SELECT FIELD32(0x20000000) +#define PHY_CSR4_PLL_LD FIELD32(0x40000000) +#define PHY_CSR4_BUSY FIELD32(0x80000000) + +/* + * PHY_CSR5: RX to TX signal switch timing control. + */ +#define PHY_CSR5 0x3094 +#define PHY_CSR5_IQ_FLIP FIELD32(0x00000004) + +/* + * PHY_CSR6: TX to RX signal timing control. + */ +#define PHY_CSR6 0x3098 +#define PHY_CSR6_IQ_FLIP FIELD32(0x00000004) + +/* + * PHY_CSR7: TX DAC switching timing control. + */ +#define PHY_CSR7 0x309c + +/* + * Security control register. + */ + +/* + * SEC_CSR0: Shared key table control. + */ +#define SEC_CSR0 0x30a0 +#define SEC_CSR0_BSS0_KEY0_VALID FIELD32(0x00000001) +#define SEC_CSR0_BSS0_KEY1_VALID FIELD32(0x00000002) +#define SEC_CSR0_BSS0_KEY2_VALID FIELD32(0x00000004) +#define SEC_CSR0_BSS0_KEY3_VALID FIELD32(0x00000008) +#define SEC_CSR0_BSS1_KEY0_VALID FIELD32(0x00000010) +#define SEC_CSR0_BSS1_KEY1_VALID FIELD32(0x00000020) +#define SEC_CSR0_BSS1_KEY2_VALID FIELD32(0x00000040) +#define SEC_CSR0_BSS1_KEY3_VALID FIELD32(0x00000080) +#define SEC_CSR0_BSS2_KEY0_VALID FIELD32(0x00000100) +#define SEC_CSR0_BSS2_KEY1_VALID FIELD32(0x00000200) +#define SEC_CSR0_BSS2_KEY2_VALID FIELD32(0x00000400) +#define SEC_CSR0_BSS2_KEY3_VALID FIELD32(0x00000800) +#define SEC_CSR0_BSS3_KEY0_VALID FIELD32(0x00001000) +#define SEC_CSR0_BSS3_KEY1_VALID FIELD32(0x00002000) +#define SEC_CSR0_BSS3_KEY2_VALID FIELD32(0x00004000) +#define SEC_CSR0_BSS3_KEY3_VALID FIELD32(0x00008000) + +/* + * SEC_CSR1: Shared key table security mode register. + */ +#define SEC_CSR1 0x30a4 +#define SEC_CSR1_BSS0_KEY0_CIPHER_ALG FIELD32(0x00000007) +#define SEC_CSR1_BSS0_KEY1_CIPHER_ALG FIELD32(0x00000070) +#define SEC_CSR1_BSS0_KEY2_CIPHER_ALG FIELD32(0x00000700) +#define SEC_CSR1_BSS0_KEY3_CIPHER_ALG FIELD32(0x00007000) +#define SEC_CSR1_BSS1_KEY0_CIPHER_ALG FIELD32(0x00070000) +#define SEC_CSR1_BSS1_KEY1_CIPHER_ALG FIELD32(0x00700000) +#define SEC_CSR1_BSS1_KEY2_CIPHER_ALG FIELD32(0x07000000) +#define SEC_CSR1_BSS1_KEY3_CIPHER_ALG FIELD32(0x70000000) + +/* + * Pairwise key table valid bitmap registers. + * SEC_CSR2: pairwise key table valid bitmap 0. + * SEC_CSR3: pairwise key table valid bitmap 1. + */ +#define SEC_CSR2 0x30a8 +#define SEC_CSR3 0x30ac + +/* + * SEC_CSR4: Pairwise key table lookup control. + */ +#define SEC_CSR4 0x30b0 +#define SEC_CSR4_ENABLE_BSS0 FIELD32(0x00000001) +#define SEC_CSR4_ENABLE_BSS1 FIELD32(0x00000002) +#define SEC_CSR4_ENABLE_BSS2 FIELD32(0x00000004) +#define SEC_CSR4_ENABLE_BSS3 FIELD32(0x00000008) + +/* + * SEC_CSR5: shared key table security mode register. + */ +#define SEC_CSR5 0x30b4 +#define SEC_CSR5_BSS2_KEY0_CIPHER_ALG FIELD32(0x00000007) +#define SEC_CSR5_BSS2_KEY1_CIPHER_ALG FIELD32(0x00000070) +#define SEC_CSR5_BSS2_KEY2_CIPHER_ALG FIELD32(0x00000700) +#define SEC_CSR5_BSS2_KEY3_CIPHER_ALG FIELD32(0x00007000) +#define SEC_CSR5_BSS3_KEY0_CIPHER_ALG FIELD32(0x00070000) +#define SEC_CSR5_BSS3_KEY1_CIPHER_ALG FIELD32(0x00700000) +#define SEC_CSR5_BSS3_KEY2_CIPHER_ALG FIELD32(0x07000000) +#define SEC_CSR5_BSS3_KEY3_CIPHER_ALG FIELD32(0x70000000) + +/* + * STA control registers. + */ + +/* + * STA_CSR0: RX PLCP error count & RX FCS error count. + */ +#define STA_CSR0 0x30c0 +#define STA_CSR0_FCS_ERROR FIELD32(0x0000ffff) +#define STA_CSR0_PLCP_ERROR FIELD32(0xffff0000) + +/* + * STA_CSR1: RX False CCA count & RX LONG frame count. + */ +#define STA_CSR1 0x30c4 +#define STA_CSR1_PHYSICAL_ERROR FIELD32(0x0000ffff) +#define STA_CSR1_FALSE_CCA_ERROR FIELD32(0xffff0000) + +/* + * STA_CSR2: TX Beacon count and RX FIFO overflow count. + */ +#define STA_CSR2 0x30c8 +#define STA_CSR2_RX_FIFO_OVERFLOW_COUNT FIELD32(0x0000ffff) +#define STA_CSR2_RX_OVERFLOW_COUNT FIELD32(0xffff0000) + +/* + * STA_CSR3: TX Beacon count. + */ +#define STA_CSR3 0x30cc +#define STA_CSR3_TX_BEACON_COUNT FIELD32(0x0000ffff) + +/* + * STA_CSR4: TX Result status register. + * VALID: 1:This register contains a valid TX result. + */ +#define STA_CSR4 0x30d0 +#define STA_CSR4_VALID FIELD32(0x00000001) +#define STA_CSR4_TX_RESULT FIELD32(0x0000000e) +#define STA_CSR4_RETRY_COUNT FIELD32(0x000000f0) +#define STA_CSR4_PID_SUBTYPE FIELD32(0x00001f00) +#define STA_CSR4_PID_TYPE FIELD32(0x0000e000) +#define STA_CSR4_TXRATE FIELD32(0x000f0000) + +/* + * QOS control registers. + */ + +/* + * QOS_CSR0: TXOP holder MAC address register. + */ +#define QOS_CSR0 0x30e0 +#define QOS_CSR0_BYTE0 FIELD32(0x000000ff) +#define QOS_CSR0_BYTE1 FIELD32(0x0000ff00) +#define QOS_CSR0_BYTE2 FIELD32(0x00ff0000) +#define QOS_CSR0_BYTE3 FIELD32(0xff000000) + +/* + * QOS_CSR1: TXOP holder MAC address register. + */ +#define QOS_CSR1 0x30e4 +#define QOS_CSR1_BYTE4 FIELD32(0x000000ff) +#define QOS_CSR1_BYTE5 FIELD32(0x0000ff00) + +/* + * QOS_CSR2: TXOP holder timeout register. + */ +#define QOS_CSR2 0x30e8 + +/* + * RX QOS-CFPOLL MAC address register. + * QOS_CSR3: RX QOS-CFPOLL MAC address 0. + * QOS_CSR4: RX QOS-CFPOLL MAC address 1. + */ +#define QOS_CSR3 0x30ec +#define QOS_CSR4 0x30f0 + +/* + * QOS_CSR5: "QosControl" field of the RX QOS-CFPOLL. + */ +#define QOS_CSR5 0x30f4 + +/* + * Host DMA registers. + */ + +/* + * AC0_BASE_CSR: AC_BK base address. + */ +#define AC0_BASE_CSR 0x3400 +#define AC0_BASE_CSR_RING_REGISTER FIELD32(0xffffffff) + +/* + * AC1_BASE_CSR: AC_BE base address. + */ +#define AC1_BASE_CSR 0x3404 +#define AC1_BASE_CSR_RING_REGISTER FIELD32(0xffffffff) + +/* + * AC2_BASE_CSR: AC_VI base address. + */ +#define AC2_BASE_CSR 0x3408 +#define AC2_BASE_CSR_RING_REGISTER FIELD32(0xffffffff) + +/* + * AC3_BASE_CSR: AC_VO base address. + */ +#define AC3_BASE_CSR 0x340c +#define AC3_BASE_CSR_RING_REGISTER FIELD32(0xffffffff) + +/* + * MGMT_BASE_CSR: MGMT ring base address. + */ +#define MGMT_BASE_CSR 0x3410 +#define MGMT_BASE_CSR_RING_REGISTER FIELD32(0xffffffff) + +/* + * TX_RING_CSR0: TX Ring size for AC_BK, AC_BE, AC_VI, AC_VO. + */ +#define TX_RING_CSR0 0x3418 +#define TX_RING_CSR0_AC0_RING_SIZE FIELD32(0x000000ff) +#define TX_RING_CSR0_AC1_RING_SIZE FIELD32(0x0000ff00) +#define TX_RING_CSR0_AC2_RING_SIZE FIELD32(0x00ff0000) +#define TX_RING_CSR0_AC3_RING_SIZE FIELD32(0xff000000) + +/* + * TX_RING_CSR1: TX Ring size for MGMT Ring, HCCA Ring + * TXD_SIZE: In unit of 32-bit. + */ +#define TX_RING_CSR1 0x341c +#define TX_RING_CSR1_MGMT_RING_SIZE FIELD32(0x000000ff) +#define TX_RING_CSR1_HCCA_RING_SIZE FIELD32(0x0000ff00) +#define TX_RING_CSR1_TXD_SIZE FIELD32(0x003f0000) + +/* + * AIFSN_CSR: AIFSN for each EDCA AC. + * AIFSN0: For AC_BK. + * AIFSN1: For AC_BE. + * AIFSN2: For AC_VI. + * AIFSN3: For AC_VO. + */ +#define AIFSN_CSR 0x3420 +#define AIFSN_CSR_AIFSN0 FIELD32(0x0000000f) +#define AIFSN_CSR_AIFSN1 FIELD32(0x000000f0) +#define AIFSN_CSR_AIFSN2 FIELD32(0x00000f00) +#define AIFSN_CSR_AIFSN3 FIELD32(0x0000f000) + +/* + * CWMIN_CSR: CWmin for each EDCA AC. + * CWMIN0: For AC_BK. + * CWMIN1: For AC_BE. + * CWMIN2: For AC_VI. + * CWMIN3: For AC_VO. + */ +#define CWMIN_CSR 0x3424 +#define CWMIN_CSR_CWMIN0 FIELD32(0x0000000f) +#define CWMIN_CSR_CWMIN1 FIELD32(0x000000f0) +#define CWMIN_CSR_CWMIN2 FIELD32(0x00000f00) +#define CWMIN_CSR_CWMIN3 FIELD32(0x0000f000) + +/* + * CWMAX_CSR: CWmax for each EDCA AC. + * CWMAX0: For AC_BK. + * CWMAX1: For AC_BE. + * CWMAX2: For AC_VI. + * CWMAX3: For AC_VO. + */ +#define CWMAX_CSR 0x3428 +#define CWMAX_CSR_CWMAX0 FIELD32(0x0000000f) +#define CWMAX_CSR_CWMAX1 FIELD32(0x000000f0) +#define CWMAX_CSR_CWMAX2 FIELD32(0x00000f00) +#define CWMAX_CSR_CWMAX3 FIELD32(0x0000f000) + +/* + * TX_DMA_DST_CSR: TX DMA destination + * 0: TX ring0, 1: TX ring1, 2: TX ring2 3: invalid + */ +#define TX_DMA_DST_CSR 0x342c +#define TX_DMA_DST_CSR_DEST_AC0 FIELD32(0x00000003) +#define TX_DMA_DST_CSR_DEST_AC1 FIELD32(0x0000000c) +#define TX_DMA_DST_CSR_DEST_AC2 FIELD32(0x00000030) +#define TX_DMA_DST_CSR_DEST_AC3 FIELD32(0x000000c0) +#define TX_DMA_DST_CSR_DEST_MGMT FIELD32(0x00000300) + +/* + * TX_CNTL_CSR: KICK/Abort TX. + * KICK_TX_AC0: For AC_BK. + * KICK_TX_AC1: For AC_BE. + * KICK_TX_AC2: For AC_VI. + * KICK_TX_AC3: For AC_VO. + * ABORT_TX_AC0: For AC_BK. + * ABORT_TX_AC1: For AC_BE. + * ABORT_TX_AC2: For AC_VI. + * ABORT_TX_AC3: For AC_VO. + */ +#define TX_CNTL_CSR 0x3430 +#define TX_CNTL_CSR_KICK_TX_AC0 FIELD32(0x00000001) +#define TX_CNTL_CSR_KICK_TX_AC1 FIELD32(0x00000002) +#define TX_CNTL_CSR_KICK_TX_AC2 FIELD32(0x00000004) +#define TX_CNTL_CSR_KICK_TX_AC3 FIELD32(0x00000008) +#define TX_CNTL_CSR_KICK_TX_MGMT FIELD32(0x00000010) +#define TX_CNTL_CSR_ABORT_TX_AC0 FIELD32(0x00010000) +#define TX_CNTL_CSR_ABORT_TX_AC1 FIELD32(0x00020000) +#define TX_CNTL_CSR_ABORT_TX_AC2 FIELD32(0x00040000) +#define TX_CNTL_CSR_ABORT_TX_AC3 FIELD32(0x00080000) +#define TX_CNTL_CSR_ABORT_TX_MGMT FIELD32(0x00100000) + +/* + * LOAD_TX_RING_CSR: Load RX desriptor + */ +#define LOAD_TX_RING_CSR 0x3434 +#define LOAD_TX_RING_CSR_LOAD_TXD_AC0 FIELD32(0x00000001) +#define LOAD_TX_RING_CSR_LOAD_TXD_AC1 FIELD32(0x00000002) +#define LOAD_TX_RING_CSR_LOAD_TXD_AC2 FIELD32(0x00000004) +#define LOAD_TX_RING_CSR_LOAD_TXD_AC3 FIELD32(0x00000008) +#define LOAD_TX_RING_CSR_LOAD_TXD_MGMT FIELD32(0x00000010) + +/* + * Several read-only registers, for debugging. + */ +#define AC0_TXPTR_CSR 0x3438 +#define AC1_TXPTR_CSR 0x343c +#define AC2_TXPTR_CSR 0x3440 +#define AC3_TXPTR_CSR 0x3444 +#define MGMT_TXPTR_CSR 0x3448 + +/* + * RX_BASE_CSR + */ +#define RX_BASE_CSR 0x3450 +#define RX_BASE_CSR_RING_REGISTER FIELD32(0xffffffff) + +/* + * RX_RING_CSR. + * RXD_SIZE: In unit of 32-bit. + */ +#define RX_RING_CSR 0x3454 +#define RX_RING_CSR_RING_SIZE FIELD32(0x000000ff) +#define RX_RING_CSR_RXD_SIZE FIELD32(0x00003f00) +#define RX_RING_CSR_RXD_WRITEBACK_SIZE FIELD32(0x00070000) + +/* + * RX_CNTL_CSR + */ +#define RX_CNTL_CSR 0x3458 +#define RX_CNTL_CSR_ENABLE_RX_DMA FIELD32(0x00000001) +#define RX_CNTL_CSR_LOAD_RXD FIELD32(0x00000002) + +/* + * RXPTR_CSR: Read-only, for debugging. + */ +#define RXPTR_CSR 0x345c + +/* + * PCI_CFG_CSR + */ +#define PCI_CFG_CSR 0x3460 + +/* + * BUF_FORMAT_CSR + */ +#define BUF_FORMAT_CSR 0x3464 + +/* + * INT_SOURCE_CSR: Interrupt source register. + * Write one to clear corresponding bit. + */ +#define INT_SOURCE_CSR 0x3468 +#define INT_SOURCE_CSR_TXDONE FIELD32(0x00000001) +#define INT_SOURCE_CSR_RXDONE FIELD32(0x00000002) +#define INT_SOURCE_CSR_BEACON_DONE FIELD32(0x00000004) +#define INT_SOURCE_CSR_TX_ABORT_DONE FIELD32(0x00000010) +#define INT_SOURCE_CSR_AC0_DMA_DONE FIELD32(0x00010000) +#define INT_SOURCE_CSR_AC1_DMA_DONE FIELD32(0x00020000) +#define INT_SOURCE_CSR_AC2_DMA_DONE FIELD32(0x00040000) +#define INT_SOURCE_CSR_AC3_DMA_DONE FIELD32(0x00080000) +#define INT_SOURCE_CSR_MGMT_DMA_DONE FIELD32(0x00100000) +#define INT_SOURCE_CSR_HCCA_DMA_DONE FIELD32(0x00200000) + +/* + * INT_MASK_CSR: Interrupt MASK register. 1: the interrupt is mask OFF. + * MITIGATION_PERIOD: Interrupt mitigation in unit of 32 PCI clock. + */ +#define INT_MASK_CSR 0x346c +#define INT_MASK_CSR_TXDONE FIELD32(0x00000001) +#define INT_MASK_CSR_RXDONE FIELD32(0x00000002) +#define INT_MASK_CSR_BEACON_DONE FIELD32(0x00000004) +#define INT_MASK_CSR_TX_ABORT_DONE FIELD32(0x00000010) +#define INT_MASK_CSR_ENABLE_MITIGATION FIELD32(0x00000080) +#define INT_MASK_CSR_MITIGATION_PERIOD FIELD32(0x0000ff00) +#define INT_MASK_CSR_AC0_DMA_DONE FIELD32(0x00010000) +#define INT_MASK_CSR_AC1_DMA_DONE FIELD32(0x00020000) +#define INT_MASK_CSR_AC2_DMA_DONE FIELD32(0x00040000) +#define INT_MASK_CSR_AC3_DMA_DONE FIELD32(0x00080000) +#define INT_MASK_CSR_MGMT_DMA_DONE FIELD32(0x00100000) +#define INT_MASK_CSR_HCCA_DMA_DONE FIELD32(0x00200000) + +/* + * E2PROM_CSR: EEPROM control register. + * RELOAD: Write 1 to reload eeprom content. + * TYPE_93C46: 1: 93c46, 0:93c66. + * LOAD_STATUS: 1:loading, 0:done. + */ +#define E2PROM_CSR 0x3470 +#define E2PROM_CSR_RELOAD FIELD32(0x00000001) +#define E2PROM_CSR_DATA_CLOCK FIELD32(0x00000002) +#define E2PROM_CSR_CHIP_SELECT FIELD32(0x00000004) +#define E2PROM_CSR_DATA_IN FIELD32(0x00000008) +#define E2PROM_CSR_DATA_OUT FIELD32(0x00000010) +#define E2PROM_CSR_TYPE_93C46 FIELD32(0x00000020) +#define E2PROM_CSR_LOAD_STATUS FIELD32(0x00000040) + +/* + * AC_TXOP_CSR0: AC_BK/AC_BE TXOP register. + * AC0_TX_OP: For AC_BK, in unit of 32us. + * AC1_TX_OP: For AC_BE, in unit of 32us. + */ +#define AC_TXOP_CSR0 0x3474 +#define AC_TXOP_CSR0_AC0_TX_OP FIELD32(0x0000ffff) +#define AC_TXOP_CSR0_AC1_TX_OP FIELD32(0xffff0000) + +/* + * AC_TXOP_CSR1: AC_VO/AC_VI TXOP register. + * AC2_TX_OP: For AC_VI, in unit of 32us. + * AC3_TX_OP: For AC_VO, in unit of 32us. + */ +#define AC_TXOP_CSR1 0x3478 +#define AC_TXOP_CSR1_AC2_TX_OP FIELD32(0x0000ffff) +#define AC_TXOP_CSR1_AC3_TX_OP FIELD32(0xffff0000) + +/* + * DMA_STATUS_CSR + */ +#define DMA_STATUS_CSR 0x3480 + +/* + * TEST_MODE_CSR + */ +#define TEST_MODE_CSR 0x3484 + +/* + * UART0_TX_CSR + */ +#define UART0_TX_CSR 0x3488 + +/* + * UART0_RX_CSR + */ +#define UART0_RX_CSR 0x348c + +/* + * UART0_FRAME_CSR + */ +#define UART0_FRAME_CSR 0x3490 + +/* + * UART0_BUFFER_CSR + */ +#define UART0_BUFFER_CSR 0x3494 + +/* + * IO_CNTL_CSR + * RF_PS: Set RF interface value to power save + */ +#define IO_CNTL_CSR 0x3498 +#define IO_CNTL_CSR_RF_PS FIELD32(0x00000004) + +/* + * UART_INT_SOURCE_CSR + */ +#define UART_INT_SOURCE_CSR 0x34a8 + +/* + * UART_INT_MASK_CSR + */ +#define UART_INT_MASK_CSR 0x34ac + +/* + * PBF_QUEUE_CSR + */ +#define PBF_QUEUE_CSR 0x34b0 + +/* + * Firmware DMA registers. + * Firmware DMA registers are dedicated for MCU usage + * and should not be touched by host driver. + * Therefore we skip the definition of these registers. + */ +#define FW_TX_BASE_CSR 0x34c0 +#define FW_TX_START_CSR 0x34c4 +#define FW_TX_LAST_CSR 0x34c8 +#define FW_MODE_CNTL_CSR 0x34cc +#define FW_TXPTR_CSR 0x34d0 + +/* + * 8051 firmware image. + */ +#define FIRMWARE_RT2561 "rt2561.bin" +#define FIRMWARE_RT2561s "rt2561s.bin" +#define FIRMWARE_RT2661 "rt2661.bin" +#define FIRMWARE_IMAGE_BASE 0x4000 + +/* + * BBP registers. + * The wordsize of the BBP is 8 bits. + */ + +/* + * R2 + */ +#define BBP_R2_BG_MODE FIELD8(0x20) + +/* + * R3 + */ +#define BBP_R3_SMART_MODE FIELD8(0x01) + +/* + * R4: RX antenna control + * FRAME_END: 1 - DPDT, 0 - SPDT (Only valid for 802.11G, RF2527 & RF2529) + */ + +/* + * ANTENNA_CONTROL semantics (guessed): + * 0x1: Software controlled antenna switching (fixed or SW diversity) + * 0x2: Hardware diversity. + */ +#define BBP_R4_RX_ANTENNA_CONTROL FIELD8(0x03) +#define BBP_R4_RX_FRAME_END FIELD8(0x20) + +/* + * R77 + */ +#define BBP_R77_RX_ANTENNA FIELD8(0x03) + +/* + * RF registers + */ + +/* + * RF 3 + */ +#define RF3_TXPOWER FIELD32(0x00003e00) + +/* + * RF 4 + */ +#define RF4_FREQ_OFFSET FIELD32(0x0003f000) + +/* + * EEPROM content. + * The wordsize of the EEPROM is 16 bits. + */ + +/* + * HW MAC address. + */ +#define EEPROM_MAC_ADDR_0 0x0002 +#define EEPROM_MAC_ADDR_BYTE0 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE1 FIELD16(0xff00) +#define EEPROM_MAC_ADDR1 0x0003 +#define EEPROM_MAC_ADDR_BYTE2 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE3 FIELD16(0xff00) +#define EEPROM_MAC_ADDR_2 0x0004 +#define EEPROM_MAC_ADDR_BYTE4 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE5 FIELD16(0xff00) + +/* + * EEPROM antenna. + * ANTENNA_NUM: Number of antenna's. + * TX_DEFAULT: Default antenna 0: diversity, 1: A, 2: B. + * RX_DEFAULT: Default antenna 0: diversity, 1: A, 2: B. + * FRAME_TYPE: 0: DPDT , 1: SPDT , noted this bit is valid for g only. + * DYN_TXAGC: Dynamic TX AGC control. + * HARDWARE_RADIO: 1: Hardware controlled radio. Read GPIO0. + * RF_TYPE: Rf_type of this adapter. + */ +#define EEPROM_ANTENNA 0x0010 +#define EEPROM_ANTENNA_NUM FIELD16(0x0003) +#define EEPROM_ANTENNA_TX_DEFAULT FIELD16(0x000c) +#define EEPROM_ANTENNA_RX_DEFAULT FIELD16(0x0030) +#define EEPROM_ANTENNA_FRAME_TYPE FIELD16(0x0040) +#define EEPROM_ANTENNA_DYN_TXAGC FIELD16(0x0200) +#define EEPROM_ANTENNA_HARDWARE_RADIO FIELD16(0x0400) +#define EEPROM_ANTENNA_RF_TYPE FIELD16(0xf800) + +/* + * EEPROM NIC config. + * ENABLE_DIVERSITY: 1:enable, 0:disable. + * EXTERNAL_LNA_BG: External LNA enable for 2.4G. + * CARDBUS_ACCEL: 0:enable, 1:disable. + * EXTERNAL_LNA_A: External LNA enable for 5G. + */ +#define EEPROM_NIC 0x0011 +#define EEPROM_NIC_ENABLE_DIVERSITY FIELD16(0x0001) +#define EEPROM_NIC_TX_DIVERSITY FIELD16(0x0002) +#define EEPROM_NIC_RX_FIXED FIELD16(0x0004) +#define EEPROM_NIC_TX_FIXED FIELD16(0x0008) +#define EEPROM_NIC_EXTERNAL_LNA_BG FIELD16(0x0010) +#define EEPROM_NIC_CARDBUS_ACCEL FIELD16(0x0020) +#define EEPROM_NIC_EXTERNAL_LNA_A FIELD16(0x0040) + +/* + * EEPROM geography. + * GEO_A: Default geographical setting for 5GHz band + * GEO: Default geographical setting. + */ +#define EEPROM_GEOGRAPHY 0x0012 +#define EEPROM_GEOGRAPHY_GEO_A FIELD16(0x00ff) +#define EEPROM_GEOGRAPHY_GEO FIELD16(0xff00) + +/* + * EEPROM BBP. + */ +#define EEPROM_BBP_START 0x0013 +#define EEPROM_BBP_SIZE 16 +#define EEPROM_BBP_VALUE FIELD16(0x00ff) +#define EEPROM_BBP_REG_ID FIELD16(0xff00) + +/* + * EEPROM TXPOWER 802.11G + */ +#define EEPROM_TXPOWER_G_START 0x0023 +#define EEPROM_TXPOWER_G_SIZE 7 +#define EEPROM_TXPOWER_G_1 FIELD16(0x00ff) +#define EEPROM_TXPOWER_G_2 FIELD16(0xff00) + +/* + * EEPROM Frequency + */ +#define EEPROM_FREQ 0x002f +#define EEPROM_FREQ_OFFSET FIELD16(0x00ff) +#define EEPROM_FREQ_SEQ_MASK FIELD16(0xff00) +#define EEPROM_FREQ_SEQ FIELD16(0x0300) + +/* + * EEPROM LED. + * POLARITY_RDY_G: Polarity RDY_G setting. + * POLARITY_RDY_A: Polarity RDY_A setting. + * POLARITY_ACT: Polarity ACT setting. + * POLARITY_GPIO_0: Polarity GPIO0 setting. + * POLARITY_GPIO_1: Polarity GPIO1 setting. + * POLARITY_GPIO_2: Polarity GPIO2 setting. + * POLARITY_GPIO_3: Polarity GPIO3 setting. + * POLARITY_GPIO_4: Polarity GPIO4 setting. + * LED_MODE: Led mode. + */ +#define EEPROM_LED 0x0030 +#define EEPROM_LED_POLARITY_RDY_G FIELD16(0x0001) +#define EEPROM_LED_POLARITY_RDY_A FIELD16(0x0002) +#define EEPROM_LED_POLARITY_ACT FIELD16(0x0004) +#define EEPROM_LED_POLARITY_GPIO_0 FIELD16(0x0008) +#define EEPROM_LED_POLARITY_GPIO_1 FIELD16(0x0010) +#define EEPROM_LED_POLARITY_GPIO_2 FIELD16(0x0020) +#define EEPROM_LED_POLARITY_GPIO_3 FIELD16(0x0040) +#define EEPROM_LED_POLARITY_GPIO_4 FIELD16(0x0080) +#define EEPROM_LED_LED_MODE FIELD16(0x1f00) + +/* + * EEPROM TXPOWER 802.11A + */ +#define EEPROM_TXPOWER_A_START 0x0031 +#define EEPROM_TXPOWER_A_SIZE 12 +#define EEPROM_TXPOWER_A_1 FIELD16(0x00ff) +#define EEPROM_TXPOWER_A_2 FIELD16(0xff00) + +/* + * EEPROM RSSI offset 802.11BG + */ +#define EEPROM_RSSI_OFFSET_BG 0x004d +#define EEPROM_RSSI_OFFSET_BG_1 FIELD16(0x00ff) +#define EEPROM_RSSI_OFFSET_BG_2 FIELD16(0xff00) + +/* + * EEPROM RSSI offset 802.11A + */ +#define EEPROM_RSSI_OFFSET_A 0x004e +#define EEPROM_RSSI_OFFSET_A_1 FIELD16(0x00ff) +#define EEPROM_RSSI_OFFSET_A_2 FIELD16(0xff00) + +/* + * MCU mailbox commands. + */ +#define MCU_SLEEP 0x30 +#define MCU_WAKEUP 0x31 +#define MCU_LED 0x50 +#define MCU_LED_STRENGTH 0x52 + +/* + * DMA descriptor defines. + */ +#define TXD_DESC_SIZE ( 16 * sizeof(__le32) ) +#define TXINFO_SIZE ( 6 * sizeof(__le32) ) +#define RXD_DESC_SIZE ( 16 * sizeof(__le32) ) + +/* + * TX descriptor format for TX, PRIO and Beacon Ring. + */ + +/* + * Word0 + * TKIP_MIC: ASIC appends TKIP MIC if TKIP is used. + * KEY_TABLE: Use per-client pairwise KEY table. + * KEY_INDEX: + * Key index (0~31) to the pairwise KEY table. + * 0~3 to shared KEY table 0 (BSS0). + * 4~7 to shared KEY table 1 (BSS1). + * 8~11 to shared KEY table 2 (BSS2). + * 12~15 to shared KEY table 3 (BSS3). + * BURST: Next frame belongs to same "burst" event. + */ +#define TXD_W0_OWNER_NIC FIELD32(0x00000001) +#define TXD_W0_VALID FIELD32(0x00000002) +#define TXD_W0_MORE_FRAG FIELD32(0x00000004) +#define TXD_W0_ACK FIELD32(0x00000008) +#define TXD_W0_TIMESTAMP FIELD32(0x00000010) +#define TXD_W0_OFDM FIELD32(0x00000020) +#define TXD_W0_IFS FIELD32(0x00000040) +#define TXD_W0_RETRY_MODE FIELD32(0x00000080) +#define TXD_W0_TKIP_MIC FIELD32(0x00000100) +#define TXD_W0_KEY_TABLE FIELD32(0x00000200) +#define TXD_W0_KEY_INDEX FIELD32(0x0000fc00) +#define TXD_W0_DATABYTE_COUNT FIELD32(0x0fff0000) +#define TXD_W0_BURST FIELD32(0x10000000) +#define TXD_W0_CIPHER_ALG FIELD32(0xe0000000) + +/* + * Word1 + * HOST_Q_ID: EDCA/HCCA queue ID. + * HW_SEQUENCE: MAC overwrites the frame sequence number. + * BUFFER_COUNT: Number of buffers in this TXD. + */ +#define TXD_W1_HOST_Q_ID FIELD32(0x0000000f) +#define TXD_W1_AIFSN FIELD32(0x000000f0) +#define TXD_W1_CWMIN FIELD32(0x00000f00) +#define TXD_W1_CWMAX FIELD32(0x0000f000) +#define TXD_W1_IV_OFFSET FIELD32(0x003f0000) +#define TXD_W1_PIGGY_BACK FIELD32(0x01000000) +#define TXD_W1_HW_SEQUENCE FIELD32(0x10000000) +#define TXD_W1_BUFFER_COUNT FIELD32(0xe0000000) + +/* + * Word2: PLCP information + */ +#define TXD_W2_PLCP_SIGNAL FIELD32(0x000000ff) +#define TXD_W2_PLCP_SERVICE FIELD32(0x0000ff00) +#define TXD_W2_PLCP_LENGTH_LOW FIELD32(0x00ff0000) +#define TXD_W2_PLCP_LENGTH_HIGH FIELD32(0xff000000) + +/* + * Word3 + */ +#define TXD_W3_IV FIELD32(0xffffffff) + +/* + * Word4 + */ +#define TXD_W4_EIV FIELD32(0xffffffff) + +/* + * Word5 + * FRAME_OFFSET: Frame start offset inside ASIC TXFIFO (after TXINFO field). + * TXD_W5_PID_SUBTYPE: Driver assigned packet ID index for txdone handler. + * TXD_W5_PID_TYPE: Driver assigned packet ID type for txdone handler. + * WAITING_DMA_DONE_INT: TXD been filled with data + * and waiting for TxDoneISR housekeeping. + */ +#define TXD_W5_FRAME_OFFSET FIELD32(0x000000ff) +#define TXD_W5_PID_SUBTYPE FIELD32(0x00001f00) +#define TXD_W5_PID_TYPE FIELD32(0x0000e000) +#define TXD_W5_TX_POWER FIELD32(0x00ff0000) +#define TXD_W5_WAITING_DMA_DONE_INT FIELD32(0x01000000) + +/* + * the above 24-byte is called TXINFO and will be DMAed to MAC block + * through TXFIFO. MAC block use this TXINFO to control the transmission + * behavior of this frame. + * The following fields are not used by MAC block. + * They are used by DMA block and HOST driver only. + * Once a frame has been DMA to ASIC, all the following fields are useless + * to ASIC. + */ + +/* + * Word6-10: Buffer physical address + */ +#define TXD_W6_BUFFER_PHYSICAL_ADDRESS FIELD32(0xffffffff) +#define TXD_W7_BUFFER_PHYSICAL_ADDRESS FIELD32(0xffffffff) +#define TXD_W8_BUFFER_PHYSICAL_ADDRESS FIELD32(0xffffffff) +#define TXD_W9_BUFFER_PHYSICAL_ADDRESS FIELD32(0xffffffff) +#define TXD_W10_BUFFER_PHYSICAL_ADDRESS FIELD32(0xffffffff) + +/* + * Word11-13: Buffer length + */ +#define TXD_W11_BUFFER_LENGTH0 FIELD32(0x00000fff) +#define TXD_W11_BUFFER_LENGTH1 FIELD32(0x0fff0000) +#define TXD_W12_BUFFER_LENGTH2 FIELD32(0x00000fff) +#define TXD_W12_BUFFER_LENGTH3 FIELD32(0x0fff0000) +#define TXD_W13_BUFFER_LENGTH4 FIELD32(0x00000fff) + +/* + * Word14 + */ +#define TXD_W14_SK_BUFFER FIELD32(0xffffffff) + +/* + * Word15 + */ +#define TXD_W15_NEXT_SK_BUFFER FIELD32(0xffffffff) + +/* + * RX descriptor format for RX Ring. + */ + +/* + * Word0 + * CIPHER_ERROR: 1:ICV error, 2:MIC error, 3:invalid key. + * KEY_INDEX: Decryption key actually used. + */ +#define RXD_W0_OWNER_NIC FIELD32(0x00000001) +#define RXD_W0_DROP FIELD32(0x00000002) +#define RXD_W0_UNICAST_TO_ME FIELD32(0x00000004) +#define RXD_W0_MULTICAST FIELD32(0x00000008) +#define RXD_W0_BROADCAST FIELD32(0x00000010) +#define RXD_W0_MY_BSS FIELD32(0x00000020) +#define RXD_W0_CRC_ERROR FIELD32(0x00000040) +#define RXD_W0_OFDM FIELD32(0x00000080) +#define RXD_W0_CIPHER_ERROR FIELD32(0x00000300) +#define RXD_W0_KEY_INDEX FIELD32(0x0000fc00) +#define RXD_W0_DATABYTE_COUNT FIELD32(0x0fff0000) +#define RXD_W0_CIPHER_ALG FIELD32(0xe0000000) + +/* + * Word1 + * SIGNAL: RX raw data rate reported by BBP. + */ +#define RXD_W1_SIGNAL FIELD32(0x000000ff) +#define RXD_W1_RSSI_AGC FIELD32(0x00001f00) +#define RXD_W1_RSSI_LNA FIELD32(0x00006000) +#define RXD_W1_FRAME_OFFSET FIELD32(0x7f000000) + +/* + * Word2 + * IV: Received IV of originally encrypted. + */ +#define RXD_W2_IV FIELD32(0xffffffff) + +/* + * Word3 + * EIV: Received EIV of originally encrypted. + */ +#define RXD_W3_EIV FIELD32(0xffffffff) + +/* + * Word4 + * ICV: Received ICV of originally encrypted. + * NOTE: This is a guess, the official definition is "reserved" + */ +#define RXD_W4_ICV FIELD32(0xffffffff) + +/* + * the above 20-byte is called RXINFO and will be DMAed to MAC RX block + * and passed to the HOST driver. + * The following fields are for DMA block and HOST usage only. + * Can't be touched by ASIC MAC block. + */ + +/* + * Word5 + */ +#define RXD_W5_BUFFER_PHYSICAL_ADDRESS FIELD32(0xffffffff) + +/* + * Word6-15: Reserved + */ +#define RXD_W6_RESERVED FIELD32(0xffffffff) +#define RXD_W7_RESERVED FIELD32(0xffffffff) +#define RXD_W8_RESERVED FIELD32(0xffffffff) +#define RXD_W9_RESERVED FIELD32(0xffffffff) +#define RXD_W10_RESERVED FIELD32(0xffffffff) +#define RXD_W11_RESERVED FIELD32(0xffffffff) +#define RXD_W12_RESERVED FIELD32(0xffffffff) +#define RXD_W13_RESERVED FIELD32(0xffffffff) +#define RXD_W14_RESERVED FIELD32(0xffffffff) +#define RXD_W15_RESERVED FIELD32(0xffffffff) + +/* + * Macros for converting txpower from EEPROM to mac80211 value + * and from mac80211 value to register value. + */ +#define MIN_TXPOWER 0 +#define MAX_TXPOWER 31 +#define DEFAULT_TXPOWER 24 + +#define TXPOWER_FROM_DEV(__txpower) \ + (((u8)(__txpower)) > MAX_TXPOWER) ? DEFAULT_TXPOWER : (__txpower) + +#define TXPOWER_TO_DEV(__txpower) \ + clamp_t(char, __txpower, MIN_TXPOWER, MAX_TXPOWER) + +#endif /* RT61PCI_H */ diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c new file mode 100644 index 0000000..290d70b --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt73usb.c @@ -0,0 +1,2461 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt73usb + Abstract: rt73usb device specific routines. + Supported chipsets: rt2571W & rt2671. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "rt2x00.h" +#include "rt2x00usb.h" +#include "rt73usb.h" + +/* + * Allow hardware encryption to be disabled. + */ +static int modparam_nohwcrypt = 0; +module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); +MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); + +/* + * Register access. + * All access to the CSR registers will go through the methods + * rt2x00usb_register_read and rt2x00usb_register_write. + * BBP and RF register require indirect register access, + * and use the CSR registers BBPCSR and RFCSR to achieve this. + * These indirect registers work with busy bits, + * and we will try maximal REGISTER_BUSY_COUNT times to access + * the register while taking a REGISTER_BUSY_DELAY us delay + * between each attampt. When the busy bit is still set at that time, + * the access attempt is considered to have failed, + * and we will print an error. + * The _lock versions must be used if you already hold the csr_mutex + */ +#define WAIT_FOR_BBP(__dev, __reg) \ + rt2x00usb_regbusy_read((__dev), PHY_CSR3, PHY_CSR3_BUSY, (__reg)) +#define WAIT_FOR_RF(__dev, __reg) \ + rt2x00usb_regbusy_read((__dev), PHY_CSR4, PHY_CSR4_BUSY, (__reg)) + +static void rt73usb_bbp_write(struct rt2x00_dev *rt2x00dev, + const unsigned int word, const u8 value) +{ + u32 reg; + + mutex_lock(&rt2x00dev->csr_mutex); + + /* + * Wait until the BBP becomes available, afterwards we + * can safely write the new data into the register. + */ + if (WAIT_FOR_BBP(rt2x00dev, ®)) { + reg = 0; + rt2x00_set_field32(®, PHY_CSR3_VALUE, value); + rt2x00_set_field32(®, PHY_CSR3_REGNUM, word); + rt2x00_set_field32(®, PHY_CSR3_BUSY, 1); + rt2x00_set_field32(®, PHY_CSR3_READ_CONTROL, 0); + + rt2x00usb_register_write_lock(rt2x00dev, PHY_CSR3, reg); + } + + mutex_unlock(&rt2x00dev->csr_mutex); +} + +static void rt73usb_bbp_read(struct rt2x00_dev *rt2x00dev, + const unsigned int word, u8 *value) +{ + u32 reg; + + mutex_lock(&rt2x00dev->csr_mutex); + + /* + * Wait until the BBP becomes available, afterwards we + * can safely write the read request into the register. + * After the data has been written, we wait until hardware + * returns the correct value, if at any time the register + * doesn't become available in time, reg will be 0xffffffff + * which means we return 0xff to the caller. + */ + if (WAIT_FOR_BBP(rt2x00dev, ®)) { + reg = 0; + rt2x00_set_field32(®, PHY_CSR3_REGNUM, word); + rt2x00_set_field32(®, PHY_CSR3_BUSY, 1); + rt2x00_set_field32(®, PHY_CSR3_READ_CONTROL, 1); + + rt2x00usb_register_write_lock(rt2x00dev, PHY_CSR3, reg); + + WAIT_FOR_BBP(rt2x00dev, ®); + } + + *value = rt2x00_get_field32(reg, PHY_CSR3_VALUE); + + mutex_unlock(&rt2x00dev->csr_mutex); +} + +static void rt73usb_rf_write(struct rt2x00_dev *rt2x00dev, + const unsigned int word, const u32 value) +{ + u32 reg; + + mutex_lock(&rt2x00dev->csr_mutex); + + /* + * Wait until the RF becomes available, afterwards we + * can safely write the new data into the register. + */ + if (WAIT_FOR_RF(rt2x00dev, ®)) { + reg = 0; + rt2x00_set_field32(®, PHY_CSR4_VALUE, value); + /* + * RF5225 and RF2527 contain 21 bits per RF register value, + * all others contain 20 bits. + */ + rt2x00_set_field32(®, PHY_CSR4_NUMBER_OF_BITS, + 20 + (rt2x00_rf(rt2x00dev, RF5225) || + rt2x00_rf(rt2x00dev, RF2527))); + rt2x00_set_field32(®, PHY_CSR4_IF_SELECT, 0); + rt2x00_set_field32(®, PHY_CSR4_BUSY, 1); + + rt2x00usb_register_write_lock(rt2x00dev, PHY_CSR4, reg); + rt2x00_rf_write(rt2x00dev, word, value); + } + + mutex_unlock(&rt2x00dev->csr_mutex); +} + +#ifdef CONFIG_RT2X00_LIB_DEBUGFS +static const struct rt2x00debug rt73usb_rt2x00debug = { + .owner = THIS_MODULE, + .csr = { + .read = rt2x00usb_register_read, + .write = rt2x00usb_register_write, + .flags = RT2X00DEBUGFS_OFFSET, + .word_base = CSR_REG_BASE, + .word_size = sizeof(u32), + .word_count = CSR_REG_SIZE / sizeof(u32), + }, + .eeprom = { + .read = rt2x00_eeprom_read, + .write = rt2x00_eeprom_write, + .word_base = EEPROM_BASE, + .word_size = sizeof(u16), + .word_count = EEPROM_SIZE / sizeof(u16), + }, + .bbp = { + .read = rt73usb_bbp_read, + .write = rt73usb_bbp_write, + .word_base = BBP_BASE, + .word_size = sizeof(u8), + .word_count = BBP_SIZE / sizeof(u8), + }, + .rf = { + .read = rt2x00_rf_read, + .write = rt73usb_rf_write, + .word_base = RF_BASE, + .word_size = sizeof(u32), + .word_count = RF_SIZE / sizeof(u32), + }, +}; +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ + +static int rt73usb_rfkill_poll(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + rt2x00usb_register_read(rt2x00dev, MAC_CSR13, ®); + return rt2x00_get_field32(reg, MAC_CSR13_BIT7); +} + +#ifdef CONFIG_RT2X00_LIB_LEDS +static void rt73usb_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct rt2x00_led *led = + container_of(led_cdev, struct rt2x00_led, led_dev); + unsigned int enabled = brightness != LED_OFF; + unsigned int a_mode = + (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_5GHZ); + unsigned int bg_mode = + (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_2GHZ); + + if (led->type == LED_TYPE_RADIO) { + rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg, + MCU_LEDCS_RADIO_STATUS, enabled); + + rt2x00usb_vendor_request_sw(led->rt2x00dev, USB_LED_CONTROL, + 0, led->rt2x00dev->led_mcu_reg, + REGISTER_TIMEOUT); + } else if (led->type == LED_TYPE_ASSOC) { + rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg, + MCU_LEDCS_LINK_BG_STATUS, bg_mode); + rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg, + MCU_LEDCS_LINK_A_STATUS, a_mode); + + rt2x00usb_vendor_request_sw(led->rt2x00dev, USB_LED_CONTROL, + 0, led->rt2x00dev->led_mcu_reg, + REGISTER_TIMEOUT); + } else if (led->type == LED_TYPE_QUALITY) { + /* + * The brightness is divided into 6 levels (0 - 5), + * this means we need to convert the brightness + * argument into the matching level within that range. + */ + rt2x00usb_vendor_request_sw(led->rt2x00dev, USB_LED_CONTROL, + brightness / (LED_FULL / 6), + led->rt2x00dev->led_mcu_reg, + REGISTER_TIMEOUT); + } +} + +static int rt73usb_blink_set(struct led_classdev *led_cdev, + unsigned long *delay_on, + unsigned long *delay_off) +{ + struct rt2x00_led *led = + container_of(led_cdev, struct rt2x00_led, led_dev); + u32 reg; + + rt2x00usb_register_read(led->rt2x00dev, MAC_CSR14, ®); + rt2x00_set_field32(®, MAC_CSR14_ON_PERIOD, *delay_on); + rt2x00_set_field32(®, MAC_CSR14_OFF_PERIOD, *delay_off); + rt2x00usb_register_write(led->rt2x00dev, MAC_CSR14, reg); + + return 0; +} + +static void rt73usb_init_led(struct rt2x00_dev *rt2x00dev, + struct rt2x00_led *led, + enum led_type type) +{ + led->rt2x00dev = rt2x00dev; + led->type = type; + led->led_dev.brightness_set = rt73usb_brightness_set; + led->led_dev.blink_set = rt73usb_blink_set; + led->flags = LED_INITIALIZED; +} +#endif /* CONFIG_RT2X00_LIB_LEDS */ + +/* + * Configuration handlers. + */ +static int rt73usb_config_shared_key(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_crypto *crypto, + struct ieee80211_key_conf *key) +{ + struct hw_key_entry key_entry; + struct rt2x00_field32 field; + int timeout; + u32 mask; + u32 reg; + + if (crypto->cmd == SET_KEY) { + /* + * rt2x00lib can't determine the correct free + * key_idx for shared keys. We have 1 register + * with key valid bits. The goal is simple, read + * the register, if that is full we have no slots + * left. + * Note that each BSS is allowed to have up to 4 + * shared keys, so put a mask over the allowed + * entries. + */ + mask = (0xf << crypto->bssidx); + + rt2x00usb_register_read(rt2x00dev, SEC_CSR0, ®); + reg &= mask; + + if (reg && reg == mask) + return -ENOSPC; + + key->hw_key_idx += reg ? ffz(reg) : 0; + + /* + * Upload key to hardware + */ + memcpy(key_entry.key, crypto->key, + sizeof(key_entry.key)); + memcpy(key_entry.tx_mic, crypto->tx_mic, + sizeof(key_entry.tx_mic)); + memcpy(key_entry.rx_mic, crypto->rx_mic, + sizeof(key_entry.rx_mic)); + + reg = SHARED_KEY_ENTRY(key->hw_key_idx); + timeout = REGISTER_TIMEOUT32(sizeof(key_entry)); + rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE, + USB_VENDOR_REQUEST_OUT, reg, + &key_entry, + sizeof(key_entry), + timeout); + + /* + * The cipher types are stored over 2 registers. + * bssidx 0 and 1 keys are stored in SEC_CSR1 and + * bssidx 1 and 2 keys are stored in SEC_CSR5. + * Using the correct defines correctly will cause overhead, + * so just calculate the correct offset. + */ + if (key->hw_key_idx < 8) { + field.bit_offset = (3 * key->hw_key_idx); + field.bit_mask = 0x7 << field.bit_offset; + + rt2x00usb_register_read(rt2x00dev, SEC_CSR1, ®); + rt2x00_set_field32(®, field, crypto->cipher); + rt2x00usb_register_write(rt2x00dev, SEC_CSR1, reg); + } else { + field.bit_offset = (3 * (key->hw_key_idx - 8)); + field.bit_mask = 0x7 << field.bit_offset; + + rt2x00usb_register_read(rt2x00dev, SEC_CSR5, ®); + rt2x00_set_field32(®, field, crypto->cipher); + rt2x00usb_register_write(rt2x00dev, SEC_CSR5, reg); + } + + /* + * The driver does not support the IV/EIV generation + * in hardware. However it doesn't support the IV/EIV + * inside the ieee80211 frame either, but requires it + * to be provided separately for the descriptor. + * rt2x00lib will cut the IV/EIV data out of all frames + * given to us by mac80211, but we must tell mac80211 + * to generate the IV/EIV data. + */ + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + } + + /* + * SEC_CSR0 contains only single-bit fields to indicate + * a particular key is valid. Because using the FIELD32() + * defines directly will cause a lot of overhead we use + * a calculation to determine the correct bit directly. + */ + mask = 1 << key->hw_key_idx; + + rt2x00usb_register_read(rt2x00dev, SEC_CSR0, ®); + if (crypto->cmd == SET_KEY) + reg |= mask; + else if (crypto->cmd == DISABLE_KEY) + reg &= ~mask; + rt2x00usb_register_write(rt2x00dev, SEC_CSR0, reg); + + return 0; +} + +static int rt73usb_config_pairwise_key(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_crypto *crypto, + struct ieee80211_key_conf *key) +{ + struct hw_pairwise_ta_entry addr_entry; + struct hw_key_entry key_entry; + int timeout; + u32 mask; + u32 reg; + + if (crypto->cmd == SET_KEY) { + /* + * rt2x00lib can't determine the correct free + * key_idx for pairwise keys. We have 2 registers + * with key valid bits. The goal is simple, read + * the first register, if that is full move to + * the next register. + * When both registers are full, we drop the key, + * otherwise we use the first invalid entry. + */ + rt2x00usb_register_read(rt2x00dev, SEC_CSR2, ®); + if (reg && reg == ~0) { + key->hw_key_idx = 32; + rt2x00usb_register_read(rt2x00dev, SEC_CSR3, ®); + if (reg && reg == ~0) + return -ENOSPC; + } + + key->hw_key_idx += reg ? ffz(reg) : 0; + + /* + * Upload key to hardware + */ + memcpy(key_entry.key, crypto->key, + sizeof(key_entry.key)); + memcpy(key_entry.tx_mic, crypto->tx_mic, + sizeof(key_entry.tx_mic)); + memcpy(key_entry.rx_mic, crypto->rx_mic, + sizeof(key_entry.rx_mic)); + + reg = PAIRWISE_KEY_ENTRY(key->hw_key_idx); + timeout = REGISTER_TIMEOUT32(sizeof(key_entry)); + rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE, + USB_VENDOR_REQUEST_OUT, reg, + &key_entry, + sizeof(key_entry), + timeout); + + /* + * Send the address and cipher type to the hardware register. + * This data fits within the CSR cache size, so we can use + * rt2x00usb_register_multiwrite() directly. + */ + memset(&addr_entry, 0, sizeof(addr_entry)); + memcpy(&addr_entry, crypto->address, ETH_ALEN); + addr_entry.cipher = crypto->cipher; + + reg = PAIRWISE_TA_ENTRY(key->hw_key_idx); + rt2x00usb_register_multiwrite(rt2x00dev, reg, + &addr_entry, sizeof(addr_entry)); + + /* + * Enable pairwise lookup table for given BSS idx, + * without this received frames will not be decrypted + * by the hardware. + */ + rt2x00usb_register_read(rt2x00dev, SEC_CSR4, ®); + reg |= (1 << crypto->bssidx); + rt2x00usb_register_write(rt2x00dev, SEC_CSR4, reg); + + /* + * The driver does not support the IV/EIV generation + * in hardware. However it doesn't support the IV/EIV + * inside the ieee80211 frame either, but requires it + * to be provided separately for the descriptor. + * rt2x00lib will cut the IV/EIV data out of all frames + * given to us by mac80211, but we must tell mac80211 + * to generate the IV/EIV data. + */ + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + } + + /* + * SEC_CSR2 and SEC_CSR3 contain only single-bit fields to indicate + * a particular key is valid. Because using the FIELD32() + * defines directly will cause a lot of overhead we use + * a calculation to determine the correct bit directly. + */ + if (key->hw_key_idx < 32) { + mask = 1 << key->hw_key_idx; + + rt2x00usb_register_read(rt2x00dev, SEC_CSR2, ®); + if (crypto->cmd == SET_KEY) + reg |= mask; + else if (crypto->cmd == DISABLE_KEY) + reg &= ~mask; + rt2x00usb_register_write(rt2x00dev, SEC_CSR2, reg); + } else { + mask = 1 << (key->hw_key_idx - 32); + + rt2x00usb_register_read(rt2x00dev, SEC_CSR3, ®); + if (crypto->cmd == SET_KEY) + reg |= mask; + else if (crypto->cmd == DISABLE_KEY) + reg &= ~mask; + rt2x00usb_register_write(rt2x00dev, SEC_CSR3, reg); + } + + return 0; +} + +static void rt73usb_config_filter(struct rt2x00_dev *rt2x00dev, + const unsigned int filter_flags) +{ + u32 reg; + + /* + * Start configuration steps. + * Note that the version error will always be dropped + * and broadcast frames will always be accepted since + * there is no filter for it at this time. + */ + rt2x00usb_register_read(rt2x00dev, TXRX_CSR0, ®); + rt2x00_set_field32(®, TXRX_CSR0_DROP_CRC, + !(filter_flags & FIF_FCSFAIL)); + rt2x00_set_field32(®, TXRX_CSR0_DROP_PHYSICAL, + !(filter_flags & FIF_PLCPFAIL)); + rt2x00_set_field32(®, TXRX_CSR0_DROP_CONTROL, + !(filter_flags & (FIF_CONTROL | FIF_PSPOLL))); + rt2x00_set_field32(®, TXRX_CSR0_DROP_NOT_TO_ME, + !(filter_flags & FIF_PROMISC_IN_BSS)); + rt2x00_set_field32(®, TXRX_CSR0_DROP_TO_DS, + !(filter_flags & FIF_PROMISC_IN_BSS) && + !rt2x00dev->intf_ap_count); + rt2x00_set_field32(®, TXRX_CSR0_DROP_VERSION_ERROR, 1); + rt2x00_set_field32(®, TXRX_CSR0_DROP_MULTICAST, + !(filter_flags & FIF_ALLMULTI)); + rt2x00_set_field32(®, TXRX_CSR0_DROP_BROADCAST, 0); + rt2x00_set_field32(®, TXRX_CSR0_DROP_ACK_CTS, + !(filter_flags & FIF_CONTROL)); + rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg); +} + +static void rt73usb_config_intf(struct rt2x00_dev *rt2x00dev, + struct rt2x00_intf *intf, + struct rt2x00intf_conf *conf, + const unsigned int flags) +{ + unsigned int beacon_base; + u32 reg; + + if (flags & CONFIG_UPDATE_TYPE) { + /* + * Clear current synchronisation setup. + * For the Beacon base registers we only need to clear + * the first byte since that byte contains the VALID and OWNER + * bits which (when set to 0) will invalidate the entire beacon. + */ + beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx); + rt2x00usb_register_write(rt2x00dev, beacon_base, 0); + + /* + * Enable synchronisation. + */ + rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, ®); + rt2x00_set_field32(®, TXRX_CSR9_TSF_TICKING, 1); + rt2x00_set_field32(®, TXRX_CSR9_TSF_SYNC, conf->sync); + rt2x00_set_field32(®, TXRX_CSR9_TBTT_ENABLE, 1); + rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); + } + + if (flags & CONFIG_UPDATE_MAC) { + reg = le32_to_cpu(conf->mac[1]); + rt2x00_set_field32(®, MAC_CSR3_UNICAST_TO_ME_MASK, 0xff); + conf->mac[1] = cpu_to_le32(reg); + + rt2x00usb_register_multiwrite(rt2x00dev, MAC_CSR2, + conf->mac, sizeof(conf->mac)); + } + + if (flags & CONFIG_UPDATE_BSSID) { + reg = le32_to_cpu(conf->bssid[1]); + rt2x00_set_field32(®, MAC_CSR5_BSS_ID_MASK, 3); + conf->bssid[1] = cpu_to_le32(reg); + + rt2x00usb_register_multiwrite(rt2x00dev, MAC_CSR4, + conf->bssid, sizeof(conf->bssid)); + } +} + +static void rt73usb_config_erp(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_erp *erp) +{ + u32 reg; + + rt2x00usb_register_read(rt2x00dev, TXRX_CSR0, ®); + rt2x00_set_field32(®, TXRX_CSR0_RX_ACK_TIMEOUT, 0x32); + rt2x00_set_field32(®, TXRX_CSR0_TSF_OFFSET, IEEE80211_HEADER); + rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg); + + rt2x00usb_register_read(rt2x00dev, TXRX_CSR4, ®); + rt2x00_set_field32(®, TXRX_CSR4_AUTORESPOND_ENABLE, 1); + rt2x00_set_field32(®, TXRX_CSR4_AUTORESPOND_PREAMBLE, + !!erp->short_preamble); + rt2x00usb_register_write(rt2x00dev, TXRX_CSR4, reg); + + rt2x00usb_register_write(rt2x00dev, TXRX_CSR5, erp->basic_rates); + + rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, ®); + rt2x00_set_field32(®, TXRX_CSR9_BEACON_INTERVAL, + erp->beacon_int * 16); + rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); + + rt2x00usb_register_read(rt2x00dev, MAC_CSR9, ®); + rt2x00_set_field32(®, MAC_CSR9_SLOT_TIME, erp->slot_time); + rt2x00usb_register_write(rt2x00dev, MAC_CSR9, reg); + + rt2x00usb_register_read(rt2x00dev, MAC_CSR8, ®); + rt2x00_set_field32(®, MAC_CSR8_SIFS, erp->sifs); + rt2x00_set_field32(®, MAC_CSR8_SIFS_AFTER_RX_OFDM, 3); + rt2x00_set_field32(®, MAC_CSR8_EIFS, erp->eifs); + rt2x00usb_register_write(rt2x00dev, MAC_CSR8, reg); +} + +static void rt73usb_config_antenna_5x(struct rt2x00_dev *rt2x00dev, + struct antenna_setup *ant) +{ + u8 r3; + u8 r4; + u8 r77; + u8 temp; + + rt73usb_bbp_read(rt2x00dev, 3, &r3); + rt73usb_bbp_read(rt2x00dev, 4, &r4); + rt73usb_bbp_read(rt2x00dev, 77, &r77); + + rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, 0); + + /* + * Configure the RX antenna. + */ + switch (ant->rx) { + case ANTENNA_HW_DIVERSITY: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2); + temp = !test_bit(CONFIG_FRAME_TYPE, &rt2x00dev->flags) + && (rt2x00dev->curr_band != IEEE80211_BAND_5GHZ); + rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, temp); + break; + case ANTENNA_A: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); + rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0); + if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) + rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0); + else + rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3); + break; + case ANTENNA_B: + default: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); + rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0); + if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) + rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3); + else + rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0); + break; + } + + rt73usb_bbp_write(rt2x00dev, 77, r77); + rt73usb_bbp_write(rt2x00dev, 3, r3); + rt73usb_bbp_write(rt2x00dev, 4, r4); +} + +static void rt73usb_config_antenna_2x(struct rt2x00_dev *rt2x00dev, + struct antenna_setup *ant) +{ + u8 r3; + u8 r4; + u8 r77; + + rt73usb_bbp_read(rt2x00dev, 3, &r3); + rt73usb_bbp_read(rt2x00dev, 4, &r4); + rt73usb_bbp_read(rt2x00dev, 77, &r77); + + rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, 0); + rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, + !test_bit(CONFIG_FRAME_TYPE, &rt2x00dev->flags)); + + /* + * Configure the RX antenna. + */ + switch (ant->rx) { + case ANTENNA_HW_DIVERSITY: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2); + break; + case ANTENNA_A: + rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3); + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); + break; + case ANTENNA_B: + default: + rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0); + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); + break; + } + + rt73usb_bbp_write(rt2x00dev, 77, r77); + rt73usb_bbp_write(rt2x00dev, 3, r3); + rt73usb_bbp_write(rt2x00dev, 4, r4); +} + +struct antenna_sel { + u8 word; + /* + * value[0] -> non-LNA + * value[1] -> LNA + */ + u8 value[2]; +}; + +static const struct antenna_sel antenna_sel_a[] = { + { 96, { 0x58, 0x78 } }, + { 104, { 0x38, 0x48 } }, + { 75, { 0xfe, 0x80 } }, + { 86, { 0xfe, 0x80 } }, + { 88, { 0xfe, 0x80 } }, + { 35, { 0x60, 0x60 } }, + { 97, { 0x58, 0x58 } }, + { 98, { 0x58, 0x58 } }, +}; + +static const struct antenna_sel antenna_sel_bg[] = { + { 96, { 0x48, 0x68 } }, + { 104, { 0x2c, 0x3c } }, + { 75, { 0xfe, 0x80 } }, + { 86, { 0xfe, 0x80 } }, + { 88, { 0xfe, 0x80 } }, + { 35, { 0x50, 0x50 } }, + { 97, { 0x48, 0x48 } }, + { 98, { 0x48, 0x48 } }, +}; + +static void rt73usb_config_ant(struct rt2x00_dev *rt2x00dev, + struct antenna_setup *ant) +{ + const struct antenna_sel *sel; + unsigned int lna; + unsigned int i; + u32 reg; + + /* + * We should never come here because rt2x00lib is supposed + * to catch this and send us the correct antenna explicitely. + */ + BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY || + ant->tx == ANTENNA_SW_DIVERSITY); + + if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) { + sel = antenna_sel_a; + lna = test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags); + } else { + sel = antenna_sel_bg; + lna = test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags); + } + + for (i = 0; i < ARRAY_SIZE(antenna_sel_a); i++) + rt73usb_bbp_write(rt2x00dev, sel[i].word, sel[i].value[lna]); + + rt2x00usb_register_read(rt2x00dev, PHY_CSR0, ®); + + rt2x00_set_field32(®, PHY_CSR0_PA_PE_BG, + (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ)); + rt2x00_set_field32(®, PHY_CSR0_PA_PE_A, + (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ)); + + rt2x00usb_register_write(rt2x00dev, PHY_CSR0, reg); + + if (rt2x00_rf(rt2x00dev, RF5226) || rt2x00_rf(rt2x00dev, RF5225)) + rt73usb_config_antenna_5x(rt2x00dev, ant); + else if (rt2x00_rf(rt2x00dev, RF2528) || rt2x00_rf(rt2x00dev, RF2527)) + rt73usb_config_antenna_2x(rt2x00dev, ant); +} + +static void rt73usb_config_lna_gain(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_conf *libconf) +{ + u16 eeprom; + short lna_gain = 0; + + if (libconf->conf->channel->band == IEEE80211_BAND_2GHZ) { + if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) + lna_gain += 14; + + rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom); + lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1); + } else { + rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &eeprom); + lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_A_1); + } + + rt2x00dev->lna_gain = lna_gain; +} + +static void rt73usb_config_channel(struct rt2x00_dev *rt2x00dev, + struct rf_channel *rf, const int txpower) +{ + u8 r3; + u8 r94; + u8 smart; + + rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); + rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset); + + smart = !(rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527)); + + rt73usb_bbp_read(rt2x00dev, 3, &r3); + rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart); + rt73usb_bbp_write(rt2x00dev, 3, r3); + + r94 = 6; + if (txpower > MAX_TXPOWER && txpower <= (MAX_TXPOWER + r94)) + r94 += txpower - MAX_TXPOWER; + else if (txpower < MIN_TXPOWER && txpower >= (MIN_TXPOWER - r94)) + r94 += txpower; + rt73usb_bbp_write(rt2x00dev, 94, r94); + + rt73usb_rf_write(rt2x00dev, 1, rf->rf1); + rt73usb_rf_write(rt2x00dev, 2, rf->rf2); + rt73usb_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004); + rt73usb_rf_write(rt2x00dev, 4, rf->rf4); + + rt73usb_rf_write(rt2x00dev, 1, rf->rf1); + rt73usb_rf_write(rt2x00dev, 2, rf->rf2); + rt73usb_rf_write(rt2x00dev, 3, rf->rf3 | 0x00000004); + rt73usb_rf_write(rt2x00dev, 4, rf->rf4); + + rt73usb_rf_write(rt2x00dev, 1, rf->rf1); + rt73usb_rf_write(rt2x00dev, 2, rf->rf2); + rt73usb_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004); + rt73usb_rf_write(rt2x00dev, 4, rf->rf4); + + udelay(10); +} + +static void rt73usb_config_txpower(struct rt2x00_dev *rt2x00dev, + const int txpower) +{ + struct rf_channel rf; + + rt2x00_rf_read(rt2x00dev, 1, &rf.rf1); + rt2x00_rf_read(rt2x00dev, 2, &rf.rf2); + rt2x00_rf_read(rt2x00dev, 3, &rf.rf3); + rt2x00_rf_read(rt2x00dev, 4, &rf.rf4); + + rt73usb_config_channel(rt2x00dev, &rf, txpower); +} + +static void rt73usb_config_retry_limit(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_conf *libconf) +{ + u32 reg; + + rt2x00usb_register_read(rt2x00dev, TXRX_CSR4, ®); + rt2x00_set_field32(®, TXRX_CSR4_LONG_RETRY_LIMIT, + libconf->conf->long_frame_max_tx_count); + rt2x00_set_field32(®, TXRX_CSR4_SHORT_RETRY_LIMIT, + libconf->conf->short_frame_max_tx_count); + rt2x00usb_register_write(rt2x00dev, TXRX_CSR4, reg); +} + +static void rt73usb_config_ps(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_conf *libconf) +{ + enum dev_state state = + (libconf->conf->flags & IEEE80211_CONF_PS) ? + STATE_SLEEP : STATE_AWAKE; + u32 reg; + + if (state == STATE_SLEEP) { + rt2x00usb_register_read(rt2x00dev, MAC_CSR11, ®); + rt2x00_set_field32(®, MAC_CSR11_DELAY_AFTER_TBCN, + rt2x00dev->beacon_int - 10); + rt2x00_set_field32(®, MAC_CSR11_TBCN_BEFORE_WAKEUP, + libconf->conf->listen_interval - 1); + rt2x00_set_field32(®, MAC_CSR11_WAKEUP_LATENCY, 5); + + /* We must first disable autowake before it can be enabled */ + rt2x00_set_field32(®, MAC_CSR11_AUTOWAKE, 0); + rt2x00usb_register_write(rt2x00dev, MAC_CSR11, reg); + + rt2x00_set_field32(®, MAC_CSR11_AUTOWAKE, 1); + rt2x00usb_register_write(rt2x00dev, MAC_CSR11, reg); + + rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0, + USB_MODE_SLEEP, REGISTER_TIMEOUT); + } else { + rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0, + USB_MODE_WAKEUP, REGISTER_TIMEOUT); + + rt2x00usb_register_read(rt2x00dev, MAC_CSR11, ®); + rt2x00_set_field32(®, MAC_CSR11_DELAY_AFTER_TBCN, 0); + rt2x00_set_field32(®, MAC_CSR11_TBCN_BEFORE_WAKEUP, 0); + rt2x00_set_field32(®, MAC_CSR11_AUTOWAKE, 0); + rt2x00_set_field32(®, MAC_CSR11_WAKEUP_LATENCY, 0); + rt2x00usb_register_write(rt2x00dev, MAC_CSR11, reg); + } +} + +static void rt73usb_config(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_conf *libconf, + const unsigned int flags) +{ + /* Always recalculate LNA gain before changing configuration */ + rt73usb_config_lna_gain(rt2x00dev, libconf); + + if (flags & IEEE80211_CONF_CHANGE_CHANNEL) + rt73usb_config_channel(rt2x00dev, &libconf->rf, + libconf->conf->power_level); + if ((flags & IEEE80211_CONF_CHANGE_POWER) && + !(flags & IEEE80211_CONF_CHANGE_CHANNEL)) + rt73usb_config_txpower(rt2x00dev, libconf->conf->power_level); + if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS) + rt73usb_config_retry_limit(rt2x00dev, libconf); + if (flags & IEEE80211_CONF_CHANGE_PS) + rt73usb_config_ps(rt2x00dev, libconf); +} + +/* + * Link tuning + */ +static void rt73usb_link_stats(struct rt2x00_dev *rt2x00dev, + struct link_qual *qual) +{ + u32 reg; + + /* + * Update FCS error count from register. + */ + rt2x00usb_register_read(rt2x00dev, STA_CSR0, ®); + qual->rx_failed = rt2x00_get_field32(reg, STA_CSR0_FCS_ERROR); + + /* + * Update False CCA count from register. + */ + rt2x00usb_register_read(rt2x00dev, STA_CSR1, ®); + qual->false_cca = rt2x00_get_field32(reg, STA_CSR1_FALSE_CCA_ERROR); +} + +static inline void rt73usb_set_vgc(struct rt2x00_dev *rt2x00dev, + struct link_qual *qual, u8 vgc_level) +{ + if (qual->vgc_level != vgc_level) { + rt73usb_bbp_write(rt2x00dev, 17, vgc_level); + qual->vgc_level = vgc_level; + qual->vgc_level_reg = vgc_level; + } +} + +static void rt73usb_reset_tuner(struct rt2x00_dev *rt2x00dev, + struct link_qual *qual) +{ + rt73usb_set_vgc(rt2x00dev, qual, 0x20); +} + +static void rt73usb_link_tuner(struct rt2x00_dev *rt2x00dev, + struct link_qual *qual, const u32 count) +{ + u8 up_bound; + u8 low_bound; + + /* + * Determine r17 bounds. + */ + if (rt2x00dev->rx_status.band == IEEE80211_BAND_5GHZ) { + low_bound = 0x28; + up_bound = 0x48; + + if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags)) { + low_bound += 0x10; + up_bound += 0x10; + } + } else { + if (qual->rssi > -82) { + low_bound = 0x1c; + up_bound = 0x40; + } else if (qual->rssi > -84) { + low_bound = 0x1c; + up_bound = 0x20; + } else { + low_bound = 0x1c; + up_bound = 0x1c; + } + + if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) { + low_bound += 0x14; + up_bound += 0x10; + } + } + + /* + * If we are not associated, we should go straight to the + * dynamic CCA tuning. + */ + if (!rt2x00dev->intf_associated) + goto dynamic_cca_tune; + + /* + * Special big-R17 for very short distance + */ + if (qual->rssi > -35) { + rt73usb_set_vgc(rt2x00dev, qual, 0x60); + return; + } + + /* + * Special big-R17 for short distance + */ + if (qual->rssi >= -58) { + rt73usb_set_vgc(rt2x00dev, qual, up_bound); + return; + } + + /* + * Special big-R17 for middle-short distance + */ + if (qual->rssi >= -66) { + rt73usb_set_vgc(rt2x00dev, qual, low_bound + 0x10); + return; + } + + /* + * Special mid-R17 for middle distance + */ + if (qual->rssi >= -74) { + rt73usb_set_vgc(rt2x00dev, qual, low_bound + 0x08); + return; + } + + /* + * Special case: Change up_bound based on the rssi. + * Lower up_bound when rssi is weaker then -74 dBm. + */ + up_bound -= 2 * (-74 - qual->rssi); + if (low_bound > up_bound) + up_bound = low_bound; + + if (qual->vgc_level > up_bound) { + rt73usb_set_vgc(rt2x00dev, qual, up_bound); + return; + } + +dynamic_cca_tune: + + /* + * r17 does not yet exceed upper limit, continue and base + * the r17 tuning on the false CCA count. + */ + if ((qual->false_cca > 512) && (qual->vgc_level < up_bound)) + rt73usb_set_vgc(rt2x00dev, qual, + min_t(u8, qual->vgc_level + 4, up_bound)); + else if ((qual->false_cca < 100) && (qual->vgc_level > low_bound)) + rt73usb_set_vgc(rt2x00dev, qual, + max_t(u8, qual->vgc_level - 4, low_bound)); +} + +/* + * Firmware functions + */ +static char *rt73usb_get_firmware_name(struct rt2x00_dev *rt2x00dev) +{ + return FIRMWARE_RT2571; +} + +static int rt73usb_check_firmware(struct rt2x00_dev *rt2x00dev, + const u8 *data, const size_t len) +{ + u16 fw_crc; + u16 crc; + + /* + * Only support 2kb firmware files. + */ + if (len != 2048) + return FW_BAD_LENGTH; + + /* + * The last 2 bytes in the firmware array are the crc checksum itself, + * this means that we should never pass those 2 bytes to the crc + * algorithm. + */ + fw_crc = (data[len - 2] << 8 | data[len - 1]); + + /* + * Use the crc itu-t algorithm. + */ + crc = crc_itu_t(0, data, len - 2); + crc = crc_itu_t_byte(crc, 0); + crc = crc_itu_t_byte(crc, 0); + + return (fw_crc == crc) ? FW_OK : FW_BAD_CRC; +} + +static int rt73usb_load_firmware(struct rt2x00_dev *rt2x00dev, + const u8 *data, const size_t len) +{ + unsigned int i; + int status; + u32 reg; + + /* + * Wait for stable hardware. + */ + for (i = 0; i < 100; i++) { + rt2x00usb_register_read(rt2x00dev, MAC_CSR0, ®); + if (reg) + break; + msleep(1); + } + + if (!reg) { + ERROR(rt2x00dev, "Unstable hardware.\n"); + return -EBUSY; + } + + /* + * Write firmware to device. + */ + rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE, + USB_VENDOR_REQUEST_OUT, + FIRMWARE_IMAGE_BASE, + data, len, + REGISTER_TIMEOUT32(len)); + + /* + * Send firmware request to device to load firmware, + * we need to specify a long timeout time. + */ + status = rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, + 0, USB_MODE_FIRMWARE, + REGISTER_TIMEOUT_FIRMWARE); + if (status < 0) { + ERROR(rt2x00dev, "Failed to write Firmware to device.\n"); + return status; + } + + return 0; +} + +/* + * Initialization functions. + */ +static int rt73usb_init_registers(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + rt2x00usb_register_read(rt2x00dev, TXRX_CSR0, ®); + rt2x00_set_field32(®, TXRX_CSR0_AUTO_TX_SEQ, 1); + rt2x00_set_field32(®, TXRX_CSR0_DISABLE_RX, 0); + rt2x00_set_field32(®, TXRX_CSR0_TX_WITHOUT_WAITING, 0); + rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg); + + rt2x00usb_register_read(rt2x00dev, TXRX_CSR1, ®); + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID0, 47); /* CCK Signal */ + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID0_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID1, 30); /* Rssi */ + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID1_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID2, 42); /* OFDM Rate */ + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID2_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID3, 30); /* Rssi */ + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID3_VALID, 1); + rt2x00usb_register_write(rt2x00dev, TXRX_CSR1, reg); + + /* + * CCK TXD BBP registers + */ + rt2x00usb_register_read(rt2x00dev, TXRX_CSR2, ®); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID0, 13); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID0_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID1, 12); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID1_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID2, 11); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID2_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID3, 10); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID3_VALID, 1); + rt2x00usb_register_write(rt2x00dev, TXRX_CSR2, reg); + + /* + * OFDM TXD BBP registers + */ + rt2x00usb_register_read(rt2x00dev, TXRX_CSR3, ®); + rt2x00_set_field32(®, TXRX_CSR3_BBP_ID0, 7); + rt2x00_set_field32(®, TXRX_CSR3_BBP_ID0_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR3_BBP_ID1, 6); + rt2x00_set_field32(®, TXRX_CSR3_BBP_ID1_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR3_BBP_ID2, 5); + rt2x00_set_field32(®, TXRX_CSR3_BBP_ID2_VALID, 1); + rt2x00usb_register_write(rt2x00dev, TXRX_CSR3, reg); + + rt2x00usb_register_read(rt2x00dev, TXRX_CSR7, ®); + rt2x00_set_field32(®, TXRX_CSR7_ACK_CTS_6MBS, 59); + rt2x00_set_field32(®, TXRX_CSR7_ACK_CTS_9MBS, 53); + rt2x00_set_field32(®, TXRX_CSR7_ACK_CTS_12MBS, 49); + rt2x00_set_field32(®, TXRX_CSR7_ACK_CTS_18MBS, 46); + rt2x00usb_register_write(rt2x00dev, TXRX_CSR7, reg); + + rt2x00usb_register_read(rt2x00dev, TXRX_CSR8, ®); + rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_24MBS, 44); + rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_36MBS, 42); + rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_48MBS, 42); + rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_54MBS, 42); + rt2x00usb_register_write(rt2x00dev, TXRX_CSR8, reg); + + rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, ®); + rt2x00_set_field32(®, TXRX_CSR9_BEACON_INTERVAL, 0); + rt2x00_set_field32(®, TXRX_CSR9_TSF_TICKING, 0); + rt2x00_set_field32(®, TXRX_CSR9_TSF_SYNC, 0); + rt2x00_set_field32(®, TXRX_CSR9_TBTT_ENABLE, 0); + rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 0); + rt2x00_set_field32(®, TXRX_CSR9_TIMESTAMP_COMPENSATE, 0); + rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); + + rt2x00usb_register_write(rt2x00dev, TXRX_CSR15, 0x0000000f); + + rt2x00usb_register_read(rt2x00dev, MAC_CSR6, ®); + rt2x00_set_field32(®, MAC_CSR6_MAX_FRAME_UNIT, 0xfff); + rt2x00usb_register_write(rt2x00dev, MAC_CSR6, reg); + + rt2x00usb_register_write(rt2x00dev, MAC_CSR10, 0x00000718); + + if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE)) + return -EBUSY; + + rt2x00usb_register_write(rt2x00dev, MAC_CSR13, 0x00007f00); + + /* + * Invalidate all Shared Keys (SEC_CSR0), + * and clear the Shared key Cipher algorithms (SEC_CSR1 & SEC_CSR5) + */ + rt2x00usb_register_write(rt2x00dev, SEC_CSR0, 0x00000000); + rt2x00usb_register_write(rt2x00dev, SEC_CSR1, 0x00000000); + rt2x00usb_register_write(rt2x00dev, SEC_CSR5, 0x00000000); + + reg = 0x000023b0; + if (rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527)) + rt2x00_set_field32(®, PHY_CSR1_RF_RPI, 1); + rt2x00usb_register_write(rt2x00dev, PHY_CSR1, reg); + + rt2x00usb_register_write(rt2x00dev, PHY_CSR5, 0x00040a06); + rt2x00usb_register_write(rt2x00dev, PHY_CSR6, 0x00080606); + rt2x00usb_register_write(rt2x00dev, PHY_CSR7, 0x00000408); + + rt2x00usb_register_read(rt2x00dev, MAC_CSR9, ®); + rt2x00_set_field32(®, MAC_CSR9_CW_SELECT, 0); + rt2x00usb_register_write(rt2x00dev, MAC_CSR9, reg); + + /* + * Clear all beacons + * For the Beacon base registers we only need to clear + * the first byte since that byte contains the VALID and OWNER + * bits which (when set to 0) will invalidate the entire beacon. + */ + rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE0, 0); + rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE1, 0); + rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE2, 0); + rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE3, 0); + + /* + * We must clear the error counters. + * These registers are cleared on read, + * so we may pass a useless variable to store the value. + */ + rt2x00usb_register_read(rt2x00dev, STA_CSR0, ®); + rt2x00usb_register_read(rt2x00dev, STA_CSR1, ®); + rt2x00usb_register_read(rt2x00dev, STA_CSR2, ®); + + /* + * Reset MAC and BBP registers. + */ + rt2x00usb_register_read(rt2x00dev, MAC_CSR1, ®); + rt2x00_set_field32(®, MAC_CSR1_SOFT_RESET, 1); + rt2x00_set_field32(®, MAC_CSR1_BBP_RESET, 1); + rt2x00usb_register_write(rt2x00dev, MAC_CSR1, reg); + + rt2x00usb_register_read(rt2x00dev, MAC_CSR1, ®); + rt2x00_set_field32(®, MAC_CSR1_SOFT_RESET, 0); + rt2x00_set_field32(®, MAC_CSR1_BBP_RESET, 0); + rt2x00usb_register_write(rt2x00dev, MAC_CSR1, reg); + + rt2x00usb_register_read(rt2x00dev, MAC_CSR1, ®); + rt2x00_set_field32(®, MAC_CSR1_HOST_READY, 1); + rt2x00usb_register_write(rt2x00dev, MAC_CSR1, reg); + + return 0; +} + +static int rt73usb_wait_bbp_ready(struct rt2x00_dev *rt2x00dev) +{ + unsigned int i; + u8 value; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt73usb_bbp_read(rt2x00dev, 0, &value); + if ((value != 0xff) && (value != 0x00)) + return 0; + udelay(REGISTER_BUSY_DELAY); + } + + ERROR(rt2x00dev, "BBP register access failed, aborting.\n"); + return -EACCES; +} + +static int rt73usb_init_bbp(struct rt2x00_dev *rt2x00dev) +{ + unsigned int i; + u16 eeprom; + u8 reg_id; + u8 value; + + if (unlikely(rt73usb_wait_bbp_ready(rt2x00dev))) + return -EACCES; + + rt73usb_bbp_write(rt2x00dev, 3, 0x80); + rt73usb_bbp_write(rt2x00dev, 15, 0x30); + rt73usb_bbp_write(rt2x00dev, 21, 0xc8); + rt73usb_bbp_write(rt2x00dev, 22, 0x38); + rt73usb_bbp_write(rt2x00dev, 23, 0x06); + rt73usb_bbp_write(rt2x00dev, 24, 0xfe); + rt73usb_bbp_write(rt2x00dev, 25, 0x0a); + rt73usb_bbp_write(rt2x00dev, 26, 0x0d); + rt73usb_bbp_write(rt2x00dev, 32, 0x0b); + rt73usb_bbp_write(rt2x00dev, 34, 0x12); + rt73usb_bbp_write(rt2x00dev, 37, 0x07); + rt73usb_bbp_write(rt2x00dev, 39, 0xf8); + rt73usb_bbp_write(rt2x00dev, 41, 0x60); + rt73usb_bbp_write(rt2x00dev, 53, 0x10); + rt73usb_bbp_write(rt2x00dev, 54, 0x18); + rt73usb_bbp_write(rt2x00dev, 60, 0x10); + rt73usb_bbp_write(rt2x00dev, 61, 0x04); + rt73usb_bbp_write(rt2x00dev, 62, 0x04); + rt73usb_bbp_write(rt2x00dev, 75, 0xfe); + rt73usb_bbp_write(rt2x00dev, 86, 0xfe); + rt73usb_bbp_write(rt2x00dev, 88, 0xfe); + rt73usb_bbp_write(rt2x00dev, 90, 0x0f); + rt73usb_bbp_write(rt2x00dev, 99, 0x00); + rt73usb_bbp_write(rt2x00dev, 102, 0x16); + rt73usb_bbp_write(rt2x00dev, 107, 0x04); + + for (i = 0; i < EEPROM_BBP_SIZE; i++) { + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom); + + if (eeprom != 0xffff && eeprom != 0x0000) { + reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID); + value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE); + rt73usb_bbp_write(rt2x00dev, reg_id, value); + } + } + + return 0; +} + +/* + * Device state switch handlers. + */ +static void rt73usb_toggle_rx(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + u32 reg; + + rt2x00usb_register_read(rt2x00dev, TXRX_CSR0, ®); + rt2x00_set_field32(®, TXRX_CSR0_DISABLE_RX, + (state == STATE_RADIO_RX_OFF) || + (state == STATE_RADIO_RX_OFF_LINK)); + rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg); +} + +static int rt73usb_enable_radio(struct rt2x00_dev *rt2x00dev) +{ + /* + * Initialize all registers. + */ + if (unlikely(rt73usb_init_registers(rt2x00dev) || + rt73usb_init_bbp(rt2x00dev))) + return -EIO; + + return 0; +} + +static void rt73usb_disable_radio(struct rt2x00_dev *rt2x00dev) +{ + rt2x00usb_register_write(rt2x00dev, MAC_CSR10, 0x00001818); + + /* + * Disable synchronisation. + */ + rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, 0); + + rt2x00usb_disable_radio(rt2x00dev); +} + +static int rt73usb_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) +{ + u32 reg; + unsigned int i; + char put_to_sleep; + + put_to_sleep = (state != STATE_AWAKE); + + rt2x00usb_register_read(rt2x00dev, MAC_CSR12, ®); + rt2x00_set_field32(®, MAC_CSR12_FORCE_WAKEUP, !put_to_sleep); + rt2x00_set_field32(®, MAC_CSR12_PUT_TO_SLEEP, put_to_sleep); + rt2x00usb_register_write(rt2x00dev, MAC_CSR12, reg); + + /* + * Device is not guaranteed to be in the requested state yet. + * We must wait until the register indicates that the + * device has entered the correct state. + */ + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2x00usb_register_read(rt2x00dev, MAC_CSR12, ®); + state = rt2x00_get_field32(reg, MAC_CSR12_BBP_CURRENT_STATE); + if (state == !put_to_sleep) + return 0; + msleep(10); + } + + return -EBUSY; +} + +static int rt73usb_set_device_state(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + int retval = 0; + + switch (state) { + case STATE_RADIO_ON: + retval = rt73usb_enable_radio(rt2x00dev); + break; + case STATE_RADIO_OFF: + rt73usb_disable_radio(rt2x00dev); + break; + case STATE_RADIO_RX_ON: + case STATE_RADIO_RX_ON_LINK: + case STATE_RADIO_RX_OFF: + case STATE_RADIO_RX_OFF_LINK: + rt73usb_toggle_rx(rt2x00dev, state); + break; + case STATE_RADIO_IRQ_ON: + case STATE_RADIO_IRQ_OFF: + /* No support, but no error either */ + break; + case STATE_DEEP_SLEEP: + case STATE_SLEEP: + case STATE_STANDBY: + case STATE_AWAKE: + retval = rt73usb_set_state(rt2x00dev, state); + break; + default: + retval = -ENOTSUPP; + break; + } + + if (unlikely(retval)) + ERROR(rt2x00dev, "Device failed to enter state %d (%d).\n", + state, retval); + + return retval; +} + +/* + * TX descriptor initialization + */ +static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev, + struct sk_buff *skb, + struct txentry_desc *txdesc) +{ + struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); + __le32 *txd = skbdesc->desc; + u32 word; + + /* + * Start writing the descriptor words. + */ + rt2x00_desc_read(txd, 1, &word); + rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, txdesc->queue); + rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs); + rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min); + rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max); + rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset); + rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE, + test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags)); + rt2x00_desc_write(txd, 1, word); + + rt2x00_desc_read(txd, 2, &word); + rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->signal); + rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->service); + rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->length_low); + rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high); + rt2x00_desc_write(txd, 2, word); + + if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) { + _rt2x00_desc_write(txd, 3, skbdesc->iv[0]); + _rt2x00_desc_write(txd, 4, skbdesc->iv[1]); + } + + rt2x00_desc_read(txd, 5, &word); + rt2x00_set_field32(&word, TXD_W5_TX_POWER, + TXPOWER_TO_DEV(rt2x00dev->tx_power)); + rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1); + rt2x00_desc_write(txd, 5, word); + + rt2x00_desc_read(txd, 0, &word); + rt2x00_set_field32(&word, TXD_W0_BURST, + test_bit(ENTRY_TXD_BURST, &txdesc->flags)); + rt2x00_set_field32(&word, TXD_W0_VALID, 1); + rt2x00_set_field32(&word, TXD_W0_MORE_FRAG, + test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags)); + rt2x00_set_field32(&word, TXD_W0_ACK, + test_bit(ENTRY_TXD_ACK, &txdesc->flags)); + rt2x00_set_field32(&word, TXD_W0_TIMESTAMP, + test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags)); + rt2x00_set_field32(&word, TXD_W0_OFDM, + (txdesc->rate_mode == RATE_MODE_OFDM)); + rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); + rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, + test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags)); + rt2x00_set_field32(&word, TXD_W0_TKIP_MIC, + test_bit(ENTRY_TXD_ENCRYPT_MMIC, &txdesc->flags)); + rt2x00_set_field32(&word, TXD_W0_KEY_TABLE, + test_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags)); + rt2x00_set_field32(&word, TXD_W0_KEY_INDEX, txdesc->key_idx); + rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len); + rt2x00_set_field32(&word, TXD_W0_BURST2, + test_bit(ENTRY_TXD_BURST, &txdesc->flags)); + rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, txdesc->cipher); + rt2x00_desc_write(txd, 0, word); +} + +/* + * TX data initialization + */ +static void rt73usb_write_beacon(struct queue_entry *entry) +{ + struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; + struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); + unsigned int beacon_base; + u32 reg; + + /* + * Add the descriptor in front of the skb. + */ + skb_push(entry->skb, entry->queue->desc_size); + memcpy(entry->skb->data, skbdesc->desc, skbdesc->desc_len); + skbdesc->desc = entry->skb->data; + + /* + * Disable beaconing while we are reloading the beacon data, + * otherwise we might be sending out invalid data. + */ + rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, ®); + rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 0); + rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); + + /* + * Write entire beacon with descriptor to register. + */ + beacon_base = HW_BEACON_OFFSET(entry->entry_idx); + rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE, + USB_VENDOR_REQUEST_OUT, beacon_base, + entry->skb->data, entry->skb->len, + REGISTER_TIMEOUT32(entry->skb->len)); + + /* + * Clean up the beacon skb. + */ + dev_kfree_skb(entry->skb); + entry->skb = NULL; +} + +static int rt73usb_get_tx_data_len(struct queue_entry *entry) +{ + int length; + + /* + * The length _must_ be a multiple of 4, + * but it must _not_ be a multiple of the USB packet size. + */ + length = roundup(entry->skb->len, 4); + length += (4 * !(length % entry->queue->usb_maxpacket)); + + return length; +} + +static void rt73usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev, + const enum data_queue_qid queue) +{ + u32 reg; + + if (queue != QID_BEACON) { + rt2x00usb_kick_tx_queue(rt2x00dev, queue); + return; + } + + /* + * For Wi-Fi faily generated beacons between participating stations. + * Set TBTT phase adaptive adjustment step to 8us (default 16us) + */ + rt2x00usb_register_write(rt2x00dev, TXRX_CSR10, 0x00001008); + + rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, ®); + if (!rt2x00_get_field32(reg, TXRX_CSR9_BEACON_GEN)) { + rt2x00_set_field32(®, TXRX_CSR9_TSF_TICKING, 1); + rt2x00_set_field32(®, TXRX_CSR9_TBTT_ENABLE, 1); + rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 1); + rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); + } +} + +/* + * RX control handlers + */ +static int rt73usb_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1) +{ + u8 offset = rt2x00dev->lna_gain; + u8 lna; + + lna = rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_LNA); + switch (lna) { + case 3: + offset += 90; + break; + case 2: + offset += 74; + break; + case 1: + offset += 64; + break; + default: + return 0; + } + + if (rt2x00dev->rx_status.band == IEEE80211_BAND_5GHZ) { + if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags)) { + if (lna == 3 || lna == 2) + offset += 10; + } else { + if (lna == 3) + offset += 6; + else if (lna == 2) + offset += 8; + } + } + + return rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_AGC) * 2 - offset; +} + +static void rt73usb_fill_rxdone(struct queue_entry *entry, + struct rxdone_entry_desc *rxdesc) +{ + struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; + struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); + __le32 *rxd = (__le32 *)entry->skb->data; + u32 word0; + u32 word1; + + /* + * Copy descriptor to the skbdesc->desc buffer, making it safe from moving of + * frame data in rt2x00usb. + */ + memcpy(skbdesc->desc, rxd, skbdesc->desc_len); + rxd = (__le32 *)skbdesc->desc; + + /* + * It is now safe to read the descriptor on all architectures. + */ + rt2x00_desc_read(rxd, 0, &word0); + rt2x00_desc_read(rxd, 1, &word1); + + if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) + rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; + + if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) { + rxdesc->cipher = + rt2x00_get_field32(word0, RXD_W0_CIPHER_ALG); + rxdesc->cipher_status = + rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR); + } + + if (rxdesc->cipher != CIPHER_NONE) { + _rt2x00_desc_read(rxd, 2, &rxdesc->iv[0]); + _rt2x00_desc_read(rxd, 3, &rxdesc->iv[1]); + rxdesc->dev_flags |= RXDONE_CRYPTO_IV; + + _rt2x00_desc_read(rxd, 4, &rxdesc->icv); + rxdesc->dev_flags |= RXDONE_CRYPTO_ICV; + + /* + * Hardware has stripped IV/EIV data from 802.11 frame during + * decryption. It has provided the data separately but rt2x00lib + * should decide if it should be reinserted. + */ + rxdesc->flags |= RX_FLAG_IV_STRIPPED; + + /* + * FIXME: Legacy driver indicates that the frame does + * contain the Michael Mic. Unfortunately, in rt2x00 + * the MIC seems to be missing completely... + */ + rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; + + if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) + rxdesc->flags |= RX_FLAG_DECRYPTED; + else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) + rxdesc->flags |= RX_FLAG_MMIC_ERROR; + } + + /* + * Obtain the status about this packet. + * When frame was received with an OFDM bitrate, + * the signal is the PLCP value. If it was received with + * a CCK bitrate the signal is the rate in 100kbit/s. + */ + rxdesc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL); + rxdesc->rssi = rt73usb_agc_to_rssi(rt2x00dev, word1); + rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); + + if (rt2x00_get_field32(word0, RXD_W0_OFDM)) + rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP; + else + rxdesc->dev_flags |= RXDONE_SIGNAL_BITRATE; + if (rt2x00_get_field32(word0, RXD_W0_MY_BSS)) + rxdesc->dev_flags |= RXDONE_MY_BSS; + + /* + * Set skb pointers, and update frame information. + */ + skb_pull(entry->skb, entry->queue->desc_size); + skb_trim(entry->skb, rxdesc->size); +} + +/* + * Device probe functions. + */ +static int rt73usb_validate_eeprom(struct rt2x00_dev *rt2x00dev) +{ + u16 word; + u8 *mac; + s8 value; + + rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom, EEPROM_SIZE); + + /* + * Start validation of the data that has been read. + */ + mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); + if (!is_valid_ether_addr(mac)) { + random_ether_addr(mac); + EEPROM(rt2x00dev, "MAC: %pM\n", mac); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_ANTENNA_NUM, 2); + rt2x00_set_field16(&word, EEPROM_ANTENNA_TX_DEFAULT, + ANTENNA_B); + rt2x00_set_field16(&word, EEPROM_ANTENNA_RX_DEFAULT, + ANTENNA_B); + rt2x00_set_field16(&word, EEPROM_ANTENNA_FRAME_TYPE, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_DYN_TXAGC, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_HARDWARE_RADIO, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF5226); + rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word); + EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word); + EEPROM(rt2x00dev, "NIC: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_LED, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_LED_POLARITY_RDY_G, 0); + rt2x00_set_field16(&word, EEPROM_LED_POLARITY_RDY_A, 0); + rt2x00_set_field16(&word, EEPROM_LED_POLARITY_ACT, 0); + rt2x00_set_field16(&word, EEPROM_LED_POLARITY_GPIO_0, 0); + rt2x00_set_field16(&word, EEPROM_LED_POLARITY_GPIO_1, 0); + rt2x00_set_field16(&word, EEPROM_LED_POLARITY_GPIO_2, 0); + rt2x00_set_field16(&word, EEPROM_LED_POLARITY_GPIO_3, 0); + rt2x00_set_field16(&word, EEPROM_LED_POLARITY_GPIO_4, 0); + rt2x00_set_field16(&word, EEPROM_LED_LED_MODE, + LED_MODE_DEFAULT); + rt2x00_eeprom_write(rt2x00dev, EEPROM_LED, word); + EEPROM(rt2x00dev, "Led: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0); + rt2x00_set_field16(&word, EEPROM_FREQ_SEQ, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word); + EEPROM(rt2x00dev, "Freq: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_1, 0); + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_2, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_BG, word); + EEPROM(rt2x00dev, "RSSI OFFSET BG: 0x%04x\n", word); + } else { + value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_BG_1); + if (value < -10 || value > 10) + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_1, 0); + value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_BG_2); + if (value < -10 || value > 10) + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_2, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_BG, word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_1, 0); + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_2, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_A, word); + EEPROM(rt2x00dev, "RSSI OFFSET A: 0x%04x\n", word); + } else { + value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_A_1); + if (value < -10 || value > 10) + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_1, 0); + value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_A_2); + if (value < -10 || value > 10) + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_2, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_A, word); + } + + return 0; +} + +static int rt73usb_init_eeprom(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + u16 value; + u16 eeprom; + + /* + * Read EEPROM word for configuration. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom); + + /* + * Identify RF chipset. + */ + value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); + rt2x00usb_register_read(rt2x00dev, MAC_CSR0, ®); + rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET), + value, rt2x00_get_field32(reg, MAC_CSR0_REVISION)); + + if (!rt2x00_rt(rt2x00dev, RT2573) || (rt2x00_rev(rt2x00dev) == 0)) { + ERROR(rt2x00dev, "Invalid RT chipset detected.\n"); + return -ENODEV; + } + + if (!rt2x00_rf(rt2x00dev, RF5226) && + !rt2x00_rf(rt2x00dev, RF2528) && + !rt2x00_rf(rt2x00dev, RF5225) && + !rt2x00_rf(rt2x00dev, RF2527)) { + ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); + return -ENODEV; + } + + /* + * Identify default antenna configuration. + */ + rt2x00dev->default_ant.tx = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TX_DEFAULT); + rt2x00dev->default_ant.rx = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_DEFAULT); + + /* + * Read the Frame type. + */ + if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_FRAME_TYPE)) + __set_bit(CONFIG_FRAME_TYPE, &rt2x00dev->flags); + + /* + * Detect if this device has an hardware controlled radio. + */ + if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) + __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags); + + /* + * Read frequency offset. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom); + rt2x00dev->freq_offset = rt2x00_get_field16(eeprom, EEPROM_FREQ_OFFSET); + + /* + * Read external LNA informations. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom); + + if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA)) { + __set_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags); + __set_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags); + } + + /* + * Store led settings, for correct led behaviour. + */ +#ifdef CONFIG_RT2X00_LIB_LEDS + rt2x00_eeprom_read(rt2x00dev, EEPROM_LED, &eeprom); + + rt73usb_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO); + rt73usb_init_led(rt2x00dev, &rt2x00dev->led_assoc, LED_TYPE_ASSOC); + if (value == LED_MODE_SIGNAL_STRENGTH) + rt73usb_init_led(rt2x00dev, &rt2x00dev->led_qual, + LED_TYPE_QUALITY); + + rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_LED_MODE, value); + rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_0, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_GPIO_0)); + rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_1, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_GPIO_1)); + rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_2, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_GPIO_2)); + rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_3, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_GPIO_3)); + rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_4, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_GPIO_4)); + rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_ACT, + rt2x00_get_field16(eeprom, EEPROM_LED_POLARITY_ACT)); + rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_READY_BG, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_RDY_G)); + rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_READY_A, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_RDY_A)); +#endif /* CONFIG_RT2X00_LIB_LEDS */ + + return 0; +} + +/* + * RF value list for RF2528 + * Supports: 2.4 GHz + */ +static const struct rf_channel rf_vals_bg_2528[] = { + { 1, 0x00002c0c, 0x00000786, 0x00068255, 0x000fea0b }, + { 2, 0x00002c0c, 0x00000786, 0x00068255, 0x000fea1f }, + { 3, 0x00002c0c, 0x0000078a, 0x00068255, 0x000fea0b }, + { 4, 0x00002c0c, 0x0000078a, 0x00068255, 0x000fea1f }, + { 5, 0x00002c0c, 0x0000078e, 0x00068255, 0x000fea0b }, + { 6, 0x00002c0c, 0x0000078e, 0x00068255, 0x000fea1f }, + { 7, 0x00002c0c, 0x00000792, 0x00068255, 0x000fea0b }, + { 8, 0x00002c0c, 0x00000792, 0x00068255, 0x000fea1f }, + { 9, 0x00002c0c, 0x00000796, 0x00068255, 0x000fea0b }, + { 10, 0x00002c0c, 0x00000796, 0x00068255, 0x000fea1f }, + { 11, 0x00002c0c, 0x0000079a, 0x00068255, 0x000fea0b }, + { 12, 0x00002c0c, 0x0000079a, 0x00068255, 0x000fea1f }, + { 13, 0x00002c0c, 0x0000079e, 0x00068255, 0x000fea0b }, + { 14, 0x00002c0c, 0x000007a2, 0x00068255, 0x000fea13 }, +}; + +/* + * RF value list for RF5226 + * Supports: 2.4 GHz & 5.2 GHz + */ +static const struct rf_channel rf_vals_5226[] = { + { 1, 0x00002c0c, 0x00000786, 0x00068255, 0x000fea0b }, + { 2, 0x00002c0c, 0x00000786, 0x00068255, 0x000fea1f }, + { 3, 0x00002c0c, 0x0000078a, 0x00068255, 0x000fea0b }, + { 4, 0x00002c0c, 0x0000078a, 0x00068255, 0x000fea1f }, + { 5, 0x00002c0c, 0x0000078e, 0x00068255, 0x000fea0b }, + { 6, 0x00002c0c, 0x0000078e, 0x00068255, 0x000fea1f }, + { 7, 0x00002c0c, 0x00000792, 0x00068255, 0x000fea0b }, + { 8, 0x00002c0c, 0x00000792, 0x00068255, 0x000fea1f }, + { 9, 0x00002c0c, 0x00000796, 0x00068255, 0x000fea0b }, + { 10, 0x00002c0c, 0x00000796, 0x00068255, 0x000fea1f }, + { 11, 0x00002c0c, 0x0000079a, 0x00068255, 0x000fea0b }, + { 12, 0x00002c0c, 0x0000079a, 0x00068255, 0x000fea1f }, + { 13, 0x00002c0c, 0x0000079e, 0x00068255, 0x000fea0b }, + { 14, 0x00002c0c, 0x000007a2, 0x00068255, 0x000fea13 }, + + /* 802.11 UNI / HyperLan 2 */ + { 36, 0x00002c0c, 0x0000099a, 0x00098255, 0x000fea23 }, + { 40, 0x00002c0c, 0x000009a2, 0x00098255, 0x000fea03 }, + { 44, 0x00002c0c, 0x000009a6, 0x00098255, 0x000fea0b }, + { 48, 0x00002c0c, 0x000009aa, 0x00098255, 0x000fea13 }, + { 52, 0x00002c0c, 0x000009ae, 0x00098255, 0x000fea1b }, + { 56, 0x00002c0c, 0x000009b2, 0x00098255, 0x000fea23 }, + { 60, 0x00002c0c, 0x000009ba, 0x00098255, 0x000fea03 }, + { 64, 0x00002c0c, 0x000009be, 0x00098255, 0x000fea0b }, + + /* 802.11 HyperLan 2 */ + { 100, 0x00002c0c, 0x00000a2a, 0x000b8255, 0x000fea03 }, + { 104, 0x00002c0c, 0x00000a2e, 0x000b8255, 0x000fea0b }, + { 108, 0x00002c0c, 0x00000a32, 0x000b8255, 0x000fea13 }, + { 112, 0x00002c0c, 0x00000a36, 0x000b8255, 0x000fea1b }, + { 116, 0x00002c0c, 0x00000a3a, 0x000b8255, 0x000fea23 }, + { 120, 0x00002c0c, 0x00000a82, 0x000b8255, 0x000fea03 }, + { 124, 0x00002c0c, 0x00000a86, 0x000b8255, 0x000fea0b }, + { 128, 0x00002c0c, 0x00000a8a, 0x000b8255, 0x000fea13 }, + { 132, 0x00002c0c, 0x00000a8e, 0x000b8255, 0x000fea1b }, + { 136, 0x00002c0c, 0x00000a92, 0x000b8255, 0x000fea23 }, + + /* 802.11 UNII */ + { 140, 0x00002c0c, 0x00000a9a, 0x000b8255, 0x000fea03 }, + { 149, 0x00002c0c, 0x00000aa2, 0x000b8255, 0x000fea1f }, + { 153, 0x00002c0c, 0x00000aa6, 0x000b8255, 0x000fea27 }, + { 157, 0x00002c0c, 0x00000aae, 0x000b8255, 0x000fea07 }, + { 161, 0x00002c0c, 0x00000ab2, 0x000b8255, 0x000fea0f }, + { 165, 0x00002c0c, 0x00000ab6, 0x000b8255, 0x000fea17 }, + + /* MMAC(Japan)J52 ch 34,38,42,46 */ + { 34, 0x00002c0c, 0x0008099a, 0x000da255, 0x000d3a0b }, + { 38, 0x00002c0c, 0x0008099e, 0x000da255, 0x000d3a13 }, + { 42, 0x00002c0c, 0x000809a2, 0x000da255, 0x000d3a1b }, + { 46, 0x00002c0c, 0x000809a6, 0x000da255, 0x000d3a23 }, +}; + +/* + * RF value list for RF5225 & RF2527 + * Supports: 2.4 GHz & 5.2 GHz + */ +static const struct rf_channel rf_vals_5225_2527[] = { + { 1, 0x00002ccc, 0x00004786, 0x00068455, 0x000ffa0b }, + { 2, 0x00002ccc, 0x00004786, 0x00068455, 0x000ffa1f }, + { 3, 0x00002ccc, 0x0000478a, 0x00068455, 0x000ffa0b }, + { 4, 0x00002ccc, 0x0000478a, 0x00068455, 0x000ffa1f }, + { 5, 0x00002ccc, 0x0000478e, 0x00068455, 0x000ffa0b }, + { 6, 0x00002ccc, 0x0000478e, 0x00068455, 0x000ffa1f }, + { 7, 0x00002ccc, 0x00004792, 0x00068455, 0x000ffa0b }, + { 8, 0x00002ccc, 0x00004792, 0x00068455, 0x000ffa1f }, + { 9, 0x00002ccc, 0x00004796, 0x00068455, 0x000ffa0b }, + { 10, 0x00002ccc, 0x00004796, 0x00068455, 0x000ffa1f }, + { 11, 0x00002ccc, 0x0000479a, 0x00068455, 0x000ffa0b }, + { 12, 0x00002ccc, 0x0000479a, 0x00068455, 0x000ffa1f }, + { 13, 0x00002ccc, 0x0000479e, 0x00068455, 0x000ffa0b }, + { 14, 0x00002ccc, 0x000047a2, 0x00068455, 0x000ffa13 }, + + /* 802.11 UNI / HyperLan 2 */ + { 36, 0x00002ccc, 0x0000499a, 0x0009be55, 0x000ffa23 }, + { 40, 0x00002ccc, 0x000049a2, 0x0009be55, 0x000ffa03 }, + { 44, 0x00002ccc, 0x000049a6, 0x0009be55, 0x000ffa0b }, + { 48, 0x00002ccc, 0x000049aa, 0x0009be55, 0x000ffa13 }, + { 52, 0x00002ccc, 0x000049ae, 0x0009ae55, 0x000ffa1b }, + { 56, 0x00002ccc, 0x000049b2, 0x0009ae55, 0x000ffa23 }, + { 60, 0x00002ccc, 0x000049ba, 0x0009ae55, 0x000ffa03 }, + { 64, 0x00002ccc, 0x000049be, 0x0009ae55, 0x000ffa0b }, + + /* 802.11 HyperLan 2 */ + { 100, 0x00002ccc, 0x00004a2a, 0x000bae55, 0x000ffa03 }, + { 104, 0x00002ccc, 0x00004a2e, 0x000bae55, 0x000ffa0b }, + { 108, 0x00002ccc, 0x00004a32, 0x000bae55, 0x000ffa13 }, + { 112, 0x00002ccc, 0x00004a36, 0x000bae55, 0x000ffa1b }, + { 116, 0x00002ccc, 0x00004a3a, 0x000bbe55, 0x000ffa23 }, + { 120, 0x00002ccc, 0x00004a82, 0x000bbe55, 0x000ffa03 }, + { 124, 0x00002ccc, 0x00004a86, 0x000bbe55, 0x000ffa0b }, + { 128, 0x00002ccc, 0x00004a8a, 0x000bbe55, 0x000ffa13 }, + { 132, 0x00002ccc, 0x00004a8e, 0x000bbe55, 0x000ffa1b }, + { 136, 0x00002ccc, 0x00004a92, 0x000bbe55, 0x000ffa23 }, + + /* 802.11 UNII */ + { 140, 0x00002ccc, 0x00004a9a, 0x000bbe55, 0x000ffa03 }, + { 149, 0x00002ccc, 0x00004aa2, 0x000bbe55, 0x000ffa1f }, + { 153, 0x00002ccc, 0x00004aa6, 0x000bbe55, 0x000ffa27 }, + { 157, 0x00002ccc, 0x00004aae, 0x000bbe55, 0x000ffa07 }, + { 161, 0x00002ccc, 0x00004ab2, 0x000bbe55, 0x000ffa0f }, + { 165, 0x00002ccc, 0x00004ab6, 0x000bbe55, 0x000ffa17 }, + + /* MMAC(Japan)J52 ch 34,38,42,46 */ + { 34, 0x00002ccc, 0x0000499a, 0x0009be55, 0x000ffa0b }, + { 38, 0x00002ccc, 0x0000499e, 0x0009be55, 0x000ffa13 }, + { 42, 0x00002ccc, 0x000049a2, 0x0009be55, 0x000ffa1b }, + { 46, 0x00002ccc, 0x000049a6, 0x0009be55, 0x000ffa23 }, +}; + + +static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev) +{ + struct hw_mode_spec *spec = &rt2x00dev->spec; + struct channel_info *info; + char *tx_power; + unsigned int i; + + /* + * Initialize all hw fields. + */ + rt2x00dev->hw->flags = + IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | + IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_SUPPORTS_PS | + IEEE80211_HW_PS_NULLFUNC_STACK; + + SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev); + SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, + rt2x00_eeprom_addr(rt2x00dev, + EEPROM_MAC_ADDR_0)); + + /* + * Initialize hw_mode information. + */ + spec->supported_bands = SUPPORT_BAND_2GHZ; + spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; + + if (rt2x00_rf(rt2x00dev, RF2528)) { + spec->num_channels = ARRAY_SIZE(rf_vals_bg_2528); + spec->channels = rf_vals_bg_2528; + } else if (rt2x00_rf(rt2x00dev, RF5226)) { + spec->supported_bands |= SUPPORT_BAND_5GHZ; + spec->num_channels = ARRAY_SIZE(rf_vals_5226); + spec->channels = rf_vals_5226; + } else if (rt2x00_rf(rt2x00dev, RF2527)) { + spec->num_channels = 14; + spec->channels = rf_vals_5225_2527; + } else if (rt2x00_rf(rt2x00dev, RF5225)) { + spec->supported_bands |= SUPPORT_BAND_5GHZ; + spec->num_channels = ARRAY_SIZE(rf_vals_5225_2527); + spec->channels = rf_vals_5225_2527; + } + + /* + * Create channel information array + */ + info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + spec->channels_info = info; + + tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START); + for (i = 0; i < 14; i++) + info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]); + + if (spec->num_channels > 14) { + tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START); + for (i = 14; i < spec->num_channels; i++) + info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]); + } + + return 0; +} + +static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev) +{ + int retval; + + /* + * Allocate eeprom data. + */ + retval = rt73usb_validate_eeprom(rt2x00dev); + if (retval) + return retval; + + retval = rt73usb_init_eeprom(rt2x00dev); + if (retval) + return retval; + + /* + * Initialize hw specifications. + */ + retval = rt73usb_probe_hw_mode(rt2x00dev); + if (retval) + return retval; + + /* + * This device has multiple filters for control frames, + * but has no a separate filter for PS Poll frames. + */ + __set_bit(DRIVER_SUPPORT_CONTROL_FILTERS, &rt2x00dev->flags); + + /* + * This device requires firmware. + */ + __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags); + if (!modparam_nohwcrypt) + __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags); + + /* + * Set the rssi offset. + */ + rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET; + + return 0; +} + +/* + * IEEE80211 stack callback functions. + */ +static int rt73usb_conf_tx(struct ieee80211_hw *hw, u16 queue_idx, + const struct ieee80211_tx_queue_params *params) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + struct data_queue *queue; + struct rt2x00_field32 field; + int retval; + u32 reg; + u32 offset; + + /* + * First pass the configuration through rt2x00lib, that will + * update the queue settings and validate the input. After that + * we are free to update the registers based on the value + * in the queue parameter. + */ + retval = rt2x00mac_conf_tx(hw, queue_idx, params); + if (retval) + return retval; + + /* + * We only need to perform additional register initialization + * for WMM queues/ + */ + if (queue_idx >= 4) + return 0; + + queue = rt2x00queue_get_queue(rt2x00dev, queue_idx); + + /* Update WMM TXOP register */ + offset = AC_TXOP_CSR0 + (sizeof(u32) * (!!(queue_idx & 2))); + field.bit_offset = (queue_idx & 1) * 16; + field.bit_mask = 0xffff << field.bit_offset; + + rt2x00usb_register_read(rt2x00dev, offset, ®); + rt2x00_set_field32(®, field, queue->txop); + rt2x00usb_register_write(rt2x00dev, offset, reg); + + /* Update WMM registers */ + field.bit_offset = queue_idx * 4; + field.bit_mask = 0xf << field.bit_offset; + + rt2x00usb_register_read(rt2x00dev, AIFSN_CSR, ®); + rt2x00_set_field32(®, field, queue->aifs); + rt2x00usb_register_write(rt2x00dev, AIFSN_CSR, reg); + + rt2x00usb_register_read(rt2x00dev, CWMIN_CSR, ®); + rt2x00_set_field32(®, field, queue->cw_min); + rt2x00usb_register_write(rt2x00dev, CWMIN_CSR, reg); + + rt2x00usb_register_read(rt2x00dev, CWMAX_CSR, ®); + rt2x00_set_field32(®, field, queue->cw_max); + rt2x00usb_register_write(rt2x00dev, CWMAX_CSR, reg); + + return 0; +} + +static u64 rt73usb_get_tsf(struct ieee80211_hw *hw) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + u64 tsf; + u32 reg; + + rt2x00usb_register_read(rt2x00dev, TXRX_CSR13, ®); + tsf = (u64) rt2x00_get_field32(reg, TXRX_CSR13_HIGH_TSFTIMER) << 32; + rt2x00usb_register_read(rt2x00dev, TXRX_CSR12, ®); + tsf |= rt2x00_get_field32(reg, TXRX_CSR12_LOW_TSFTIMER); + + return tsf; +} + +static const struct ieee80211_ops rt73usb_mac80211_ops = { + .tx = rt2x00mac_tx, + .start = rt2x00mac_start, + .stop = rt2x00mac_stop, + .add_interface = rt2x00mac_add_interface, + .remove_interface = rt2x00mac_remove_interface, + .config = rt2x00mac_config, + .configure_filter = rt2x00mac_configure_filter, + .set_tim = rt2x00mac_set_tim, + .set_key = rt2x00mac_set_key, + .get_stats = rt2x00mac_get_stats, + .bss_info_changed = rt2x00mac_bss_info_changed, + .conf_tx = rt73usb_conf_tx, + .get_tsf = rt73usb_get_tsf, + .rfkill_poll = rt2x00mac_rfkill_poll, +}; + +static const struct rt2x00lib_ops rt73usb_rt2x00_ops = { + .probe_hw = rt73usb_probe_hw, + .get_firmware_name = rt73usb_get_firmware_name, + .check_firmware = rt73usb_check_firmware, + .load_firmware = rt73usb_load_firmware, + .initialize = rt2x00usb_initialize, + .uninitialize = rt2x00usb_uninitialize, + .clear_entry = rt2x00usb_clear_entry, + .set_device_state = rt73usb_set_device_state, + .rfkill_poll = rt73usb_rfkill_poll, + .link_stats = rt73usb_link_stats, + .reset_tuner = rt73usb_reset_tuner, + .link_tuner = rt73usb_link_tuner, + .write_tx_desc = rt73usb_write_tx_desc, + .write_tx_data = rt2x00usb_write_tx_data, + .write_beacon = rt73usb_write_beacon, + .get_tx_data_len = rt73usb_get_tx_data_len, + .kick_tx_queue = rt73usb_kick_tx_queue, + .kill_tx_queue = rt2x00usb_kill_tx_queue, + .fill_rxdone = rt73usb_fill_rxdone, + .config_shared_key = rt73usb_config_shared_key, + .config_pairwise_key = rt73usb_config_pairwise_key, + .config_filter = rt73usb_config_filter, + .config_intf = rt73usb_config_intf, + .config_erp = rt73usb_config_erp, + .config_ant = rt73usb_config_ant, + .config = rt73usb_config, +}; + +static const struct data_queue_desc rt73usb_queue_rx = { + .entry_num = RX_ENTRIES, + .data_size = DATA_FRAME_SIZE, + .desc_size = RXD_DESC_SIZE, + .priv_size = sizeof(struct queue_entry_priv_usb), +}; + +static const struct data_queue_desc rt73usb_queue_tx = { + .entry_num = TX_ENTRIES, + .data_size = DATA_FRAME_SIZE, + .desc_size = TXD_DESC_SIZE, + .priv_size = sizeof(struct queue_entry_priv_usb), +}; + +static const struct data_queue_desc rt73usb_queue_bcn = { + .entry_num = 4 * BEACON_ENTRIES, + .data_size = MGMT_FRAME_SIZE, + .desc_size = TXINFO_SIZE, + .priv_size = sizeof(struct queue_entry_priv_usb), +}; + +static const struct rt2x00_ops rt73usb_ops = { + .name = KBUILD_MODNAME, + .max_sta_intf = 1, + .max_ap_intf = 4, + .eeprom_size = EEPROM_SIZE, + .rf_size = RF_SIZE, + .tx_queues = NUM_TX_QUEUES, + .extra_tx_headroom = TXD_DESC_SIZE, + .rx = &rt73usb_queue_rx, + .tx = &rt73usb_queue_tx, + .bcn = &rt73usb_queue_bcn, + .lib = &rt73usb_rt2x00_ops, + .hw = &rt73usb_mac80211_ops, +#ifdef CONFIG_RT2X00_LIB_DEBUGFS + .debugfs = &rt73usb_rt2x00debug, +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ +}; + +/* + * rt73usb module information. + */ +static struct usb_device_id rt73usb_device_table[] = { + /* AboCom */ + { USB_DEVICE(0x07b8, 0xb21b), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x07b8, 0xb21c), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x07b8, 0xb21d), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x07b8, 0xb21e), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x07b8, 0xb21f), USB_DEVICE_DATA(&rt73usb_ops) }, + /* AL */ + { USB_DEVICE(0x14b2, 0x3c10), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Amigo */ + { USB_DEVICE(0x148f, 0x9021), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x0eb0, 0x9021), USB_DEVICE_DATA(&rt73usb_ops) }, + /* AMIT */ + { USB_DEVICE(0x18c5, 0x0002), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Askey */ + { USB_DEVICE(0x1690, 0x0722), USB_DEVICE_DATA(&rt73usb_ops) }, + /* ASUS */ + { USB_DEVICE(0x0b05, 0x1723), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x0b05, 0x1724), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Belkin */ + { USB_DEVICE(0x050d, 0x7050), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x050d, 0x705a), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x050d, 0x905b), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x050d, 0x905c), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Billionton */ + { USB_DEVICE(0x1631, 0xc019), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x08dd, 0x0120), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Buffalo */ + { USB_DEVICE(0x0411, 0x00d8), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x0411, 0x00d9), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x0411, 0x00f4), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x0411, 0x0116), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x0411, 0x0119), USB_DEVICE_DATA(&rt73usb_ops) }, + /* CEIVA */ + { USB_DEVICE(0x178d, 0x02be), USB_DEVICE_DATA(&rt73usb_ops) }, + /* CNet */ + { USB_DEVICE(0x1371, 0x9022), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x1371, 0x9032), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Conceptronic */ + { USB_DEVICE(0x14b2, 0x3c22), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Corega */ + { USB_DEVICE(0x07aa, 0x002e), USB_DEVICE_DATA(&rt73usb_ops) }, + /* D-Link */ + { USB_DEVICE(0x07d1, 0x3c03), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x07d1, 0x3c04), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x07d1, 0x3c06), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x07d1, 0x3c07), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Edimax */ + { USB_DEVICE(0x7392, 0x7318), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x7392, 0x7618), USB_DEVICE_DATA(&rt73usb_ops) }, + /* EnGenius */ + { USB_DEVICE(0x1740, 0x3701), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Gemtek */ + { USB_DEVICE(0x15a9, 0x0004), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Gigabyte */ + { USB_DEVICE(0x1044, 0x8008), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x1044, 0x800a), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Huawei-3Com */ + { USB_DEVICE(0x1472, 0x0009), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Hercules */ + { USB_DEVICE(0x06f8, 0xe002), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x06f8, 0xe010), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x06f8, 0xe020), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Linksys */ + { USB_DEVICE(0x13b1, 0x0020), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x13b1, 0x0023), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x13b1, 0x0028), USB_DEVICE_DATA(&rt73usb_ops) }, + /* MSI */ + { USB_DEVICE(0x0db0, 0x4600), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x0db0, 0x6877), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x0db0, 0x6874), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x0db0, 0xa861), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x0db0, 0xa874), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Ovislink */ + { USB_DEVICE(0x1b75, 0x7318), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Ralink */ + { USB_DEVICE(0x04bb, 0x093d), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x148f, 0x2671), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Qcom */ + { USB_DEVICE(0x18e8, 0x6196), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x18e8, 0x6229), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x18e8, 0x6238), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Samsung */ + { USB_DEVICE(0x04e8, 0x4471), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Senao */ + { USB_DEVICE(0x1740, 0x7100), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Sitecom */ + { USB_DEVICE(0x0df6, 0x0024), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x0df6, 0x0027), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x0df6, 0x002f), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x0df6, 0x90ac), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x0df6, 0x9712), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Surecom */ + { USB_DEVICE(0x0769, 0x31f3), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Tilgin */ + { USB_DEVICE(0x6933, 0x5001), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Philips */ + { USB_DEVICE(0x0471, 0x200a), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Planex */ + { USB_DEVICE(0x2019, 0xab01), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x2019, 0xab50), USB_DEVICE_DATA(&rt73usb_ops) }, + /* WideTell */ + { USB_DEVICE(0x7167, 0x3840), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Zcom */ + { USB_DEVICE(0x0cde, 0x001c), USB_DEVICE_DATA(&rt73usb_ops) }, + /* ZyXEL */ + { USB_DEVICE(0x0586, 0x3415), USB_DEVICE_DATA(&rt73usb_ops) }, + { 0, } +}; + +MODULE_AUTHOR(DRV_PROJECT); +MODULE_VERSION(DRV_VERSION); +MODULE_DESCRIPTION("Ralink RT73 USB Wireless LAN driver."); +MODULE_SUPPORTED_DEVICE("Ralink RT2571W & RT2671 USB chipset based cards"); +MODULE_DEVICE_TABLE(usb, rt73usb_device_table); +MODULE_FIRMWARE(FIRMWARE_RT2571); +MODULE_LICENSE("GPL"); + +static struct usb_driver rt73usb_driver = { + .name = KBUILD_MODNAME, + .id_table = rt73usb_device_table, + .probe = rt2x00usb_probe, + .disconnect = rt2x00usb_disconnect, + .suspend = rt2x00usb_suspend, + .resume = rt2x00usb_resume, +}; + +static int __init rt73usb_init(void) +{ + return usb_register(&rt73usb_driver); +} + +static void __exit rt73usb_exit(void) +{ + usb_deregister(&rt73usb_driver); +} + +module_init(rt73usb_init); +module_exit(rt73usb_exit); diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h new file mode 100644 index 0000000..7abe7eb --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt73usb.h @@ -0,0 +1,1076 @@ +/* + Copyright (C) 2004 - 2009 Ivo van Doorn + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt73usb + Abstract: Data structures and registers for the rt73usb module. + Supported chipsets: rt2571W & rt2671. + */ + +#ifndef RT73USB_H +#define RT73USB_H + +/* + * RF chip defines. + */ +#define RF5226 0x0001 +#define RF2528 0x0002 +#define RF5225 0x0003 +#define RF2527 0x0004 + +/* + * Signal information. + * Default offset is required for RSSI <-> dBm conversion. + */ +#define DEFAULT_RSSI_OFFSET 120 + +/* + * Register layout information. + */ +#define CSR_REG_BASE 0x3000 +#define CSR_REG_SIZE 0x04b0 +#define EEPROM_BASE 0x0000 +#define EEPROM_SIZE 0x0100 +#define BBP_BASE 0x0000 +#define BBP_SIZE 0x0080 +#define RF_BASE 0x0004 +#define RF_SIZE 0x0010 + +/* + * Number of TX queues. + */ +#define NUM_TX_QUEUES 4 + +/* + * USB registers. + */ + +/* + * MCU_LEDCS: LED control for MCU Mailbox. + */ +#define MCU_LEDCS_LED_MODE FIELD16(0x001f) +#define MCU_LEDCS_RADIO_STATUS FIELD16(0x0020) +#define MCU_LEDCS_LINK_BG_STATUS FIELD16(0x0040) +#define MCU_LEDCS_LINK_A_STATUS FIELD16(0x0080) +#define MCU_LEDCS_POLARITY_GPIO_0 FIELD16(0x0100) +#define MCU_LEDCS_POLARITY_GPIO_1 FIELD16(0x0200) +#define MCU_LEDCS_POLARITY_GPIO_2 FIELD16(0x0400) +#define MCU_LEDCS_POLARITY_GPIO_3 FIELD16(0x0800) +#define MCU_LEDCS_POLARITY_GPIO_4 FIELD16(0x1000) +#define MCU_LEDCS_POLARITY_ACT FIELD16(0x2000) +#define MCU_LEDCS_POLARITY_READY_BG FIELD16(0x4000) +#define MCU_LEDCS_POLARITY_READY_A FIELD16(0x8000) + +/* + * 8051 firmware image. + */ +#define FIRMWARE_RT2571 "rt73.bin" +#define FIRMWARE_IMAGE_BASE 0x0800 + +/* + * Security key table memory. + * 16 entries 32-byte for shared key table + * 64 entries 32-byte for pairwise key table + * 64 entries 8-byte for pairwise ta key table + */ +#define SHARED_KEY_TABLE_BASE 0x1000 +#define PAIRWISE_KEY_TABLE_BASE 0x1200 +#define PAIRWISE_TA_TABLE_BASE 0x1a00 + +#define SHARED_KEY_ENTRY(__idx) \ + ( SHARED_KEY_TABLE_BASE + \ + ((__idx) * sizeof(struct hw_key_entry)) ) +#define PAIRWISE_KEY_ENTRY(__idx) \ + ( PAIRWISE_KEY_TABLE_BASE + \ + ((__idx) * sizeof(struct hw_key_entry)) ) +#define PAIRWISE_TA_ENTRY(__idx) \ + ( PAIRWISE_TA_TABLE_BASE + \ + ((__idx) * sizeof(struct hw_pairwise_ta_entry)) ) + +struct hw_key_entry { + u8 key[16]; + u8 tx_mic[8]; + u8 rx_mic[8]; +} __attribute__ ((packed)); + +struct hw_pairwise_ta_entry { + u8 address[6]; + u8 cipher; + u8 reserved; +} __attribute__ ((packed)); + +/* + * Since NULL frame won't be that long (256 byte), + * We steal 16 tail bytes to save debugging settings. + */ +#define HW_DEBUG_SETTING_BASE 0x2bf0 + +/* + * On-chip BEACON frame space. + */ +#define HW_BEACON_BASE0 0x2400 +#define HW_BEACON_BASE1 0x2500 +#define HW_BEACON_BASE2 0x2600 +#define HW_BEACON_BASE3 0x2700 + +#define HW_BEACON_OFFSET(__index) \ + ( HW_BEACON_BASE0 + (__index * 0x0100) ) + +/* + * MAC Control/Status Registers(CSR). + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * MAC_CSR0: ASIC revision number. + */ +#define MAC_CSR0 0x3000 +#define MAC_CSR0_REVISION FIELD32(0x0000000f) +#define MAC_CSR0_CHIPSET FIELD32(0x000ffff0) + +/* + * MAC_CSR1: System control register. + * SOFT_RESET: Software reset bit, 1: reset, 0: normal. + * BBP_RESET: Hardware reset BBP. + * HOST_READY: Host is ready after initialization, 1: ready. + */ +#define MAC_CSR1 0x3004 +#define MAC_CSR1_SOFT_RESET FIELD32(0x00000001) +#define MAC_CSR1_BBP_RESET FIELD32(0x00000002) +#define MAC_CSR1_HOST_READY FIELD32(0x00000004) + +/* + * MAC_CSR2: STA MAC register 0. + */ +#define MAC_CSR2 0x3008 +#define MAC_CSR2_BYTE0 FIELD32(0x000000ff) +#define MAC_CSR2_BYTE1 FIELD32(0x0000ff00) +#define MAC_CSR2_BYTE2 FIELD32(0x00ff0000) +#define MAC_CSR2_BYTE3 FIELD32(0xff000000) + +/* + * MAC_CSR3: STA MAC register 1. + * UNICAST_TO_ME_MASK: + * Used to mask off bits from byte 5 of the MAC address + * to determine the UNICAST_TO_ME bit for RX frames. + * The full mask is complemented by BSS_ID_MASK: + * MASK = BSS_ID_MASK & UNICAST_TO_ME_MASK + */ +#define MAC_CSR3 0x300c +#define MAC_CSR3_BYTE4 FIELD32(0x000000ff) +#define MAC_CSR3_BYTE5 FIELD32(0x0000ff00) +#define MAC_CSR3_UNICAST_TO_ME_MASK FIELD32(0x00ff0000) + +/* + * MAC_CSR4: BSSID register 0. + */ +#define MAC_CSR4 0x3010 +#define MAC_CSR4_BYTE0 FIELD32(0x000000ff) +#define MAC_CSR4_BYTE1 FIELD32(0x0000ff00) +#define MAC_CSR4_BYTE2 FIELD32(0x00ff0000) +#define MAC_CSR4_BYTE3 FIELD32(0xff000000) + +/* + * MAC_CSR5: BSSID register 1. + * BSS_ID_MASK: + * This mask is used to mask off bits 0 and 1 of byte 5 of the + * BSSID. This will make sure that those bits will be ignored + * when determining the MY_BSS of RX frames. + * 0: 1-BSSID mode (BSS index = 0) + * 1: 2-BSSID mode (BSS index: Byte5, bit 0) + * 2: 2-BSSID mode (BSS index: byte5, bit 1) + * 3: 4-BSSID mode (BSS index: byte5, bit 0 - 1) + */ +#define MAC_CSR5 0x3014 +#define MAC_CSR5_BYTE4 FIELD32(0x000000ff) +#define MAC_CSR5_BYTE5 FIELD32(0x0000ff00) +#define MAC_CSR5_BSS_ID_MASK FIELD32(0x00ff0000) + +/* + * MAC_CSR6: Maximum frame length register. + */ +#define MAC_CSR6 0x3018 +#define MAC_CSR6_MAX_FRAME_UNIT FIELD32(0x00000fff) + +/* + * MAC_CSR7: Reserved + */ +#define MAC_CSR7 0x301c + +/* + * MAC_CSR8: SIFS/EIFS register. + * All units are in US. + */ +#define MAC_CSR8 0x3020 +#define MAC_CSR8_SIFS FIELD32(0x000000ff) +#define MAC_CSR8_SIFS_AFTER_RX_OFDM FIELD32(0x0000ff00) +#define MAC_CSR8_EIFS FIELD32(0xffff0000) + +/* + * MAC_CSR9: Back-Off control register. + * SLOT_TIME: Slot time, default is 20us for 802.11BG. + * CWMIN: Bit for Cwmin. default Cwmin is 31 (2^5 - 1). + * CWMAX: Bit for Cwmax, default Cwmax is 1023 (2^10 - 1). + * CW_SELECT: 1: CWmin/Cwmax select from register, 0:select from TxD. + */ +#define MAC_CSR9 0x3024 +#define MAC_CSR9_SLOT_TIME FIELD32(0x000000ff) +#define MAC_CSR9_CWMIN FIELD32(0x00000f00) +#define MAC_CSR9_CWMAX FIELD32(0x0000f000) +#define MAC_CSR9_CW_SELECT FIELD32(0x00010000) + +/* + * MAC_CSR10: Power state configuration. + */ +#define MAC_CSR10 0x3028 + +/* + * MAC_CSR11: Power saving transition time register. + * DELAY_AFTER_TBCN: Delay after Tbcn expired in units of TU. + * TBCN_BEFORE_WAKEUP: Number of beacon before wakeup. + * WAKEUP_LATENCY: In unit of TU. + */ +#define MAC_CSR11 0x302c +#define MAC_CSR11_DELAY_AFTER_TBCN FIELD32(0x000000ff) +#define MAC_CSR11_TBCN_BEFORE_WAKEUP FIELD32(0x00007f00) +#define MAC_CSR11_AUTOWAKE FIELD32(0x00008000) +#define MAC_CSR11_WAKEUP_LATENCY FIELD32(0x000f0000) + +/* + * MAC_CSR12: Manual power control / status register (merge CSR20 & PWRCSR1). + * CURRENT_STATE: 0:sleep, 1:awake. + * FORCE_WAKEUP: This has higher priority than PUT_TO_SLEEP. + * BBP_CURRENT_STATE: 0: BBP sleep, 1: BBP awake. + */ +#define MAC_CSR12 0x3030 +#define MAC_CSR12_CURRENT_STATE FIELD32(0x00000001) +#define MAC_CSR12_PUT_TO_SLEEP FIELD32(0x00000002) +#define MAC_CSR12_FORCE_WAKEUP FIELD32(0x00000004) +#define MAC_CSR12_BBP_CURRENT_STATE FIELD32(0x00000008) + +/* + * MAC_CSR13: GPIO. + */ +#define MAC_CSR13 0x3034 +#define MAC_CSR13_BIT0 FIELD32(0x00000001) +#define MAC_CSR13_BIT1 FIELD32(0x00000002) +#define MAC_CSR13_BIT2 FIELD32(0x00000004) +#define MAC_CSR13_BIT3 FIELD32(0x00000008) +#define MAC_CSR13_BIT4 FIELD32(0x00000010) +#define MAC_CSR13_BIT5 FIELD32(0x00000020) +#define MAC_CSR13_BIT6 FIELD32(0x00000040) +#define MAC_CSR13_BIT7 FIELD32(0x00000080) +#define MAC_CSR13_BIT8 FIELD32(0x00000100) +#define MAC_CSR13_BIT9 FIELD32(0x00000200) +#define MAC_CSR13_BIT10 FIELD32(0x00000400) +#define MAC_CSR13_BIT11 FIELD32(0x00000800) +#define MAC_CSR13_BIT12 FIELD32(0x00001000) + +/* + * MAC_CSR14: LED control register. + * ON_PERIOD: On period, default 70ms. + * OFF_PERIOD: Off period, default 30ms. + * HW_LED: HW TX activity, 1: normal OFF, 0: normal ON. + * SW_LED: s/w LED, 1: ON, 0: OFF. + * HW_LED_POLARITY: 0: active low, 1: active high. + */ +#define MAC_CSR14 0x3038 +#define MAC_CSR14_ON_PERIOD FIELD32(0x000000ff) +#define MAC_CSR14_OFF_PERIOD FIELD32(0x0000ff00) +#define MAC_CSR14_HW_LED FIELD32(0x00010000) +#define MAC_CSR14_SW_LED FIELD32(0x00020000) +#define MAC_CSR14_HW_LED_POLARITY FIELD32(0x00040000) +#define MAC_CSR14_SW_LED2 FIELD32(0x00080000) + +/* + * MAC_CSR15: NAV control. + */ +#define MAC_CSR15 0x303c + +/* + * TXRX control registers. + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * TXRX_CSR0: TX/RX configuration register. + * TSF_OFFSET: Default is 24. + * AUTO_TX_SEQ: 1: ASIC auto replace sequence nr in outgoing frame. + * DISABLE_RX: Disable Rx engine. + * DROP_CRC: Drop CRC error. + * DROP_PHYSICAL: Drop physical error. + * DROP_CONTROL: Drop control frame. + * DROP_NOT_TO_ME: Drop not to me unicast frame. + * DROP_TO_DS: Drop fram ToDs bit is true. + * DROP_VERSION_ERROR: Drop version error frame. + * DROP_MULTICAST: Drop multicast frames. + * DROP_BORADCAST: Drop broadcast frames. + * ROP_ACK_CTS: Drop received ACK and CTS. + */ +#define TXRX_CSR0 0x3040 +#define TXRX_CSR0_RX_ACK_TIMEOUT FIELD32(0x000001ff) +#define TXRX_CSR0_TSF_OFFSET FIELD32(0x00007e00) +#define TXRX_CSR0_AUTO_TX_SEQ FIELD32(0x00008000) +#define TXRX_CSR0_DISABLE_RX FIELD32(0x00010000) +#define TXRX_CSR0_DROP_CRC FIELD32(0x00020000) +#define TXRX_CSR0_DROP_PHYSICAL FIELD32(0x00040000) +#define TXRX_CSR0_DROP_CONTROL FIELD32(0x00080000) +#define TXRX_CSR0_DROP_NOT_TO_ME FIELD32(0x00100000) +#define TXRX_CSR0_DROP_TO_DS FIELD32(0x00200000) +#define TXRX_CSR0_DROP_VERSION_ERROR FIELD32(0x00400000) +#define TXRX_CSR0_DROP_MULTICAST FIELD32(0x00800000) +#define TXRX_CSR0_DROP_BROADCAST FIELD32(0x01000000) +#define TXRX_CSR0_DROP_ACK_CTS FIELD32(0x02000000) +#define TXRX_CSR0_TX_WITHOUT_WAITING FIELD32(0x04000000) + +/* + * TXRX_CSR1 + */ +#define TXRX_CSR1 0x3044 +#define TXRX_CSR1_BBP_ID0 FIELD32(0x0000007f) +#define TXRX_CSR1_BBP_ID0_VALID FIELD32(0x00000080) +#define TXRX_CSR1_BBP_ID1 FIELD32(0x00007f00) +#define TXRX_CSR1_BBP_ID1_VALID FIELD32(0x00008000) +#define TXRX_CSR1_BBP_ID2 FIELD32(0x007f0000) +#define TXRX_CSR1_BBP_ID2_VALID FIELD32(0x00800000) +#define TXRX_CSR1_BBP_ID3 FIELD32(0x7f000000) +#define TXRX_CSR1_BBP_ID3_VALID FIELD32(0x80000000) + +/* + * TXRX_CSR2 + */ +#define TXRX_CSR2 0x3048 +#define TXRX_CSR2_BBP_ID0 FIELD32(0x0000007f) +#define TXRX_CSR2_BBP_ID0_VALID FIELD32(0x00000080) +#define TXRX_CSR2_BBP_ID1 FIELD32(0x00007f00) +#define TXRX_CSR2_BBP_ID1_VALID FIELD32(0x00008000) +#define TXRX_CSR2_BBP_ID2 FIELD32(0x007f0000) +#define TXRX_CSR2_BBP_ID2_VALID FIELD32(0x00800000) +#define TXRX_CSR2_BBP_ID3 FIELD32(0x7f000000) +#define TXRX_CSR2_BBP_ID3_VALID FIELD32(0x80000000) + +/* + * TXRX_CSR3 + */ +#define TXRX_CSR3 0x304c +#define TXRX_CSR3_BBP_ID0 FIELD32(0x0000007f) +#define TXRX_CSR3_BBP_ID0_VALID FIELD32(0x00000080) +#define TXRX_CSR3_BBP_ID1 FIELD32(0x00007f00) +#define TXRX_CSR3_BBP_ID1_VALID FIELD32(0x00008000) +#define TXRX_CSR3_BBP_ID2 FIELD32(0x007f0000) +#define TXRX_CSR3_BBP_ID2_VALID FIELD32(0x00800000) +#define TXRX_CSR3_BBP_ID3 FIELD32(0x7f000000) +#define TXRX_CSR3_BBP_ID3_VALID FIELD32(0x80000000) + +/* + * TXRX_CSR4: Auto-Responder/Tx-retry register. + * AUTORESPOND_PREAMBLE: 0:long, 1:short preamble. + * OFDM_TX_RATE_DOWN: 1:enable. + * OFDM_TX_RATE_STEP: 0:1-step, 1: 2-step, 2:3-step, 3:4-step. + * OFDM_TX_FALLBACK_CCK: 0: Fallback to OFDM 6M only, 1: Fallback to CCK 1M,2M. + */ +#define TXRX_CSR4 0x3050 +#define TXRX_CSR4_TX_ACK_TIMEOUT FIELD32(0x000000ff) +#define TXRX_CSR4_CNTL_ACK_POLICY FIELD32(0x00000700) +#define TXRX_CSR4_ACK_CTS_PSM FIELD32(0x00010000) +#define TXRX_CSR4_AUTORESPOND_ENABLE FIELD32(0x00020000) +#define TXRX_CSR4_AUTORESPOND_PREAMBLE FIELD32(0x00040000) +#define TXRX_CSR4_OFDM_TX_RATE_DOWN FIELD32(0x00080000) +#define TXRX_CSR4_OFDM_TX_RATE_STEP FIELD32(0x00300000) +#define TXRX_CSR4_OFDM_TX_FALLBACK_CCK FIELD32(0x00400000) +#define TXRX_CSR4_LONG_RETRY_LIMIT FIELD32(0x0f000000) +#define TXRX_CSR4_SHORT_RETRY_LIMIT FIELD32(0xf0000000) + +/* + * TXRX_CSR5 + */ +#define TXRX_CSR5 0x3054 + +/* + * TXRX_CSR6: ACK/CTS payload consumed time + */ +#define TXRX_CSR6 0x3058 + +/* + * TXRX_CSR7: OFDM ACK/CTS payload consumed time for 6/9/12/18 mbps. + */ +#define TXRX_CSR7 0x305c +#define TXRX_CSR7_ACK_CTS_6MBS FIELD32(0x000000ff) +#define TXRX_CSR7_ACK_CTS_9MBS FIELD32(0x0000ff00) +#define TXRX_CSR7_ACK_CTS_12MBS FIELD32(0x00ff0000) +#define TXRX_CSR7_ACK_CTS_18MBS FIELD32(0xff000000) + +/* + * TXRX_CSR8: OFDM ACK/CTS payload consumed time for 24/36/48/54 mbps. + */ +#define TXRX_CSR8 0x3060 +#define TXRX_CSR8_ACK_CTS_24MBS FIELD32(0x000000ff) +#define TXRX_CSR8_ACK_CTS_36MBS FIELD32(0x0000ff00) +#define TXRX_CSR8_ACK_CTS_48MBS FIELD32(0x00ff0000) +#define TXRX_CSR8_ACK_CTS_54MBS FIELD32(0xff000000) + +/* + * TXRX_CSR9: Synchronization control register. + * BEACON_INTERVAL: In unit of 1/16 TU. + * TSF_TICKING: Enable TSF auto counting. + * TSF_SYNC: Tsf sync, 0: disable, 1: infra, 2: ad-hoc/master mode. + * BEACON_GEN: Enable beacon generator. + */ +#define TXRX_CSR9 0x3064 +#define TXRX_CSR9_BEACON_INTERVAL FIELD32(0x0000ffff) +#define TXRX_CSR9_TSF_TICKING FIELD32(0x00010000) +#define TXRX_CSR9_TSF_SYNC FIELD32(0x00060000) +#define TXRX_CSR9_TBTT_ENABLE FIELD32(0x00080000) +#define TXRX_CSR9_BEACON_GEN FIELD32(0x00100000) +#define TXRX_CSR9_TIMESTAMP_COMPENSATE FIELD32(0xff000000) + +/* + * TXRX_CSR10: BEACON alignment. + */ +#define TXRX_CSR10 0x3068 + +/* + * TXRX_CSR11: AES mask. + */ +#define TXRX_CSR11 0x306c + +/* + * TXRX_CSR12: TSF low 32. + */ +#define TXRX_CSR12 0x3070 +#define TXRX_CSR12_LOW_TSFTIMER FIELD32(0xffffffff) + +/* + * TXRX_CSR13: TSF high 32. + */ +#define TXRX_CSR13 0x3074 +#define TXRX_CSR13_HIGH_TSFTIMER FIELD32(0xffffffff) + +/* + * TXRX_CSR14: TBTT timer. + */ +#define TXRX_CSR14 0x3078 + +/* + * TXRX_CSR15: TKIP MIC priority byte "AND" mask. + */ +#define TXRX_CSR15 0x307c + +/* + * PHY control registers. + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * PHY_CSR0: RF/PS control. + */ +#define PHY_CSR0 0x3080 +#define PHY_CSR0_PA_PE_BG FIELD32(0x00010000) +#define PHY_CSR0_PA_PE_A FIELD32(0x00020000) + +/* + * PHY_CSR1 + */ +#define PHY_CSR1 0x3084 +#define PHY_CSR1_RF_RPI FIELD32(0x00010000) + +/* + * PHY_CSR2: Pre-TX BBP control. + */ +#define PHY_CSR2 0x3088 + +/* + * PHY_CSR3: BBP serial control register. + * VALUE: Register value to program into BBP. + * REG_NUM: Selected BBP register. + * READ_CONTROL: 0: Write BBP, 1: Read BBP. + * BUSY: 1: ASIC is busy execute BBP programming. + */ +#define PHY_CSR3 0x308c +#define PHY_CSR3_VALUE FIELD32(0x000000ff) +#define PHY_CSR3_REGNUM FIELD32(0x00007f00) +#define PHY_CSR3_READ_CONTROL FIELD32(0x00008000) +#define PHY_CSR3_BUSY FIELD32(0x00010000) + +/* + * PHY_CSR4: RF serial control register + * VALUE: Register value (include register id) serial out to RF/IF chip. + * NUMBER_OF_BITS: Number of bits used in RFRegValue (I:20, RFMD:22). + * IF_SELECT: 1: select IF to program, 0: select RF to program. + * PLL_LD: RF PLL_LD status. + * BUSY: 1: ASIC is busy execute RF programming. + */ +#define PHY_CSR4 0x3090 +#define PHY_CSR4_VALUE FIELD32(0x00ffffff) +#define PHY_CSR4_NUMBER_OF_BITS FIELD32(0x1f000000) +#define PHY_CSR4_IF_SELECT FIELD32(0x20000000) +#define PHY_CSR4_PLL_LD FIELD32(0x40000000) +#define PHY_CSR4_BUSY FIELD32(0x80000000) + +/* + * PHY_CSR5: RX to TX signal switch timing control. + */ +#define PHY_CSR5 0x3094 +#define PHY_CSR5_IQ_FLIP FIELD32(0x00000004) + +/* + * PHY_CSR6: TX to RX signal timing control. + */ +#define PHY_CSR6 0x3098 +#define PHY_CSR6_IQ_FLIP FIELD32(0x00000004) + +/* + * PHY_CSR7: TX DAC switching timing control. + */ +#define PHY_CSR7 0x309c + +/* + * Security control register. + */ + +/* + * SEC_CSR0: Shared key table control. + */ +#define SEC_CSR0 0x30a0 +#define SEC_CSR0_BSS0_KEY0_VALID FIELD32(0x00000001) +#define SEC_CSR0_BSS0_KEY1_VALID FIELD32(0x00000002) +#define SEC_CSR0_BSS0_KEY2_VALID FIELD32(0x00000004) +#define SEC_CSR0_BSS0_KEY3_VALID FIELD32(0x00000008) +#define SEC_CSR0_BSS1_KEY0_VALID FIELD32(0x00000010) +#define SEC_CSR0_BSS1_KEY1_VALID FIELD32(0x00000020) +#define SEC_CSR0_BSS1_KEY2_VALID FIELD32(0x00000040) +#define SEC_CSR0_BSS1_KEY3_VALID FIELD32(0x00000080) +#define SEC_CSR0_BSS2_KEY0_VALID FIELD32(0x00000100) +#define SEC_CSR0_BSS2_KEY1_VALID FIELD32(0x00000200) +#define SEC_CSR0_BSS2_KEY2_VALID FIELD32(0x00000400) +#define SEC_CSR0_BSS2_KEY3_VALID FIELD32(0x00000800) +#define SEC_CSR0_BSS3_KEY0_VALID FIELD32(0x00001000) +#define SEC_CSR0_BSS3_KEY1_VALID FIELD32(0x00002000) +#define SEC_CSR0_BSS3_KEY2_VALID FIELD32(0x00004000) +#define SEC_CSR0_BSS3_KEY3_VALID FIELD32(0x00008000) + +/* + * SEC_CSR1: Shared key table security mode register. + */ +#define SEC_CSR1 0x30a4 +#define SEC_CSR1_BSS0_KEY0_CIPHER_ALG FIELD32(0x00000007) +#define SEC_CSR1_BSS0_KEY1_CIPHER_ALG FIELD32(0x00000070) +#define SEC_CSR1_BSS0_KEY2_CIPHER_ALG FIELD32(0x00000700) +#define SEC_CSR1_BSS0_KEY3_CIPHER_ALG FIELD32(0x00007000) +#define SEC_CSR1_BSS1_KEY0_CIPHER_ALG FIELD32(0x00070000) +#define SEC_CSR1_BSS1_KEY1_CIPHER_ALG FIELD32(0x00700000) +#define SEC_CSR1_BSS1_KEY2_CIPHER_ALG FIELD32(0x07000000) +#define SEC_CSR1_BSS1_KEY3_CIPHER_ALG FIELD32(0x70000000) + +/* + * Pairwise key table valid bitmap registers. + * SEC_CSR2: pairwise key table valid bitmap 0. + * SEC_CSR3: pairwise key table valid bitmap 1. + */ +#define SEC_CSR2 0x30a8 +#define SEC_CSR3 0x30ac + +/* + * SEC_CSR4: Pairwise key table lookup control. + */ +#define SEC_CSR4 0x30b0 +#define SEC_CSR4_ENABLE_BSS0 FIELD32(0x00000001) +#define SEC_CSR4_ENABLE_BSS1 FIELD32(0x00000002) +#define SEC_CSR4_ENABLE_BSS2 FIELD32(0x00000004) +#define SEC_CSR4_ENABLE_BSS3 FIELD32(0x00000008) + +/* + * SEC_CSR5: shared key table security mode register. + */ +#define SEC_CSR5 0x30b4 +#define SEC_CSR5_BSS2_KEY0_CIPHER_ALG FIELD32(0x00000007) +#define SEC_CSR5_BSS2_KEY1_CIPHER_ALG FIELD32(0x00000070) +#define SEC_CSR5_BSS2_KEY2_CIPHER_ALG FIELD32(0x00000700) +#define SEC_CSR5_BSS2_KEY3_CIPHER_ALG FIELD32(0x00007000) +#define SEC_CSR5_BSS3_KEY0_CIPHER_ALG FIELD32(0x00070000) +#define SEC_CSR5_BSS3_KEY1_CIPHER_ALG FIELD32(0x00700000) +#define SEC_CSR5_BSS3_KEY2_CIPHER_ALG FIELD32(0x07000000) +#define SEC_CSR5_BSS3_KEY3_CIPHER_ALG FIELD32(0x70000000) + +/* + * STA control registers. + */ + +/* + * STA_CSR0: RX PLCP error count & RX FCS error count. + */ +#define STA_CSR0 0x30c0 +#define STA_CSR0_FCS_ERROR FIELD32(0x0000ffff) +#define STA_CSR0_PLCP_ERROR FIELD32(0xffff0000) + +/* + * STA_CSR1: RX False CCA count & RX LONG frame count. + */ +#define STA_CSR1 0x30c4 +#define STA_CSR1_PHYSICAL_ERROR FIELD32(0x0000ffff) +#define STA_CSR1_FALSE_CCA_ERROR FIELD32(0xffff0000) + +/* + * STA_CSR2: TX Beacon count and RX FIFO overflow count. + */ +#define STA_CSR2 0x30c8 +#define STA_CSR2_RX_FIFO_OVERFLOW_COUNT FIELD32(0x0000ffff) +#define STA_CSR2_RX_OVERFLOW_COUNT FIELD32(0xffff0000) + +/* + * STA_CSR3: TX Beacon count. + */ +#define STA_CSR3 0x30cc +#define STA_CSR3_TX_BEACON_COUNT FIELD32(0x0000ffff) + +/* + * STA_CSR4: TX Retry count. + */ +#define STA_CSR4 0x30d0 +#define STA_CSR4_TX_NO_RETRY_COUNT FIELD32(0x0000ffff) +#define STA_CSR4_TX_ONE_RETRY_COUNT FIELD32(0xffff0000) + +/* + * STA_CSR5: TX Retry count. + */ +#define STA_CSR5 0x30d4 +#define STA_CSR4_TX_MULTI_RETRY_COUNT FIELD32(0x0000ffff) +#define STA_CSR4_TX_RETRY_FAIL_COUNT FIELD32(0xffff0000) + +/* + * QOS control registers. + */ + +/* + * QOS_CSR1: TXOP holder MAC address register. + */ +#define QOS_CSR1 0x30e4 +#define QOS_CSR1_BYTE4 FIELD32(0x000000ff) +#define QOS_CSR1_BYTE5 FIELD32(0x0000ff00) + +/* + * QOS_CSR2: TXOP holder timeout register. + */ +#define QOS_CSR2 0x30e8 + +/* + * RX QOS-CFPOLL MAC address register. + * QOS_CSR3: RX QOS-CFPOLL MAC address 0. + * QOS_CSR4: RX QOS-CFPOLL MAC address 1. + */ +#define QOS_CSR3 0x30ec +#define QOS_CSR4 0x30f0 + +/* + * QOS_CSR5: "QosControl" field of the RX QOS-CFPOLL. + */ +#define QOS_CSR5 0x30f4 + +/* + * WMM Scheduler Register + */ + +/* + * AIFSN_CSR: AIFSN for each EDCA AC. + * AIFSN0: For AC_BK. + * AIFSN1: For AC_BE. + * AIFSN2: For AC_VI. + * AIFSN3: For AC_VO. + */ +#define AIFSN_CSR 0x0400 +#define AIFSN_CSR_AIFSN0 FIELD32(0x0000000f) +#define AIFSN_CSR_AIFSN1 FIELD32(0x000000f0) +#define AIFSN_CSR_AIFSN2 FIELD32(0x00000f00) +#define AIFSN_CSR_AIFSN3 FIELD32(0x0000f000) + +/* + * CWMIN_CSR: CWmin for each EDCA AC. + * CWMIN0: For AC_BK. + * CWMIN1: For AC_BE. + * CWMIN2: For AC_VI. + * CWMIN3: For AC_VO. + */ +#define CWMIN_CSR 0x0404 +#define CWMIN_CSR_CWMIN0 FIELD32(0x0000000f) +#define CWMIN_CSR_CWMIN1 FIELD32(0x000000f0) +#define CWMIN_CSR_CWMIN2 FIELD32(0x00000f00) +#define CWMIN_CSR_CWMIN3 FIELD32(0x0000f000) + +/* + * CWMAX_CSR: CWmax for each EDCA AC. + * CWMAX0: For AC_BK. + * CWMAX1: For AC_BE. + * CWMAX2: For AC_VI. + * CWMAX3: For AC_VO. + */ +#define CWMAX_CSR 0x0408 +#define CWMAX_CSR_CWMAX0 FIELD32(0x0000000f) +#define CWMAX_CSR_CWMAX1 FIELD32(0x000000f0) +#define CWMAX_CSR_CWMAX2 FIELD32(0x00000f00) +#define CWMAX_CSR_CWMAX3 FIELD32(0x0000f000) + +/* + * AC_TXOP_CSR0: AC_BK/AC_BE TXOP register. + * AC0_TX_OP: For AC_BK, in unit of 32us. + * AC1_TX_OP: For AC_BE, in unit of 32us. + */ +#define AC_TXOP_CSR0 0x040c +#define AC_TXOP_CSR0_AC0_TX_OP FIELD32(0x0000ffff) +#define AC_TXOP_CSR0_AC1_TX_OP FIELD32(0xffff0000) + +/* + * AC_TXOP_CSR1: AC_VO/AC_VI TXOP register. + * AC2_TX_OP: For AC_VI, in unit of 32us. + * AC3_TX_OP: For AC_VO, in unit of 32us. + */ +#define AC_TXOP_CSR1 0x0410 +#define AC_TXOP_CSR1_AC2_TX_OP FIELD32(0x0000ffff) +#define AC_TXOP_CSR1_AC3_TX_OP FIELD32(0xffff0000) + +/* + * BBP registers. + * The wordsize of the BBP is 8 bits. + */ + +/* + * R2 + */ +#define BBP_R2_BG_MODE FIELD8(0x20) + +/* + * R3 + */ +#define BBP_R3_SMART_MODE FIELD8(0x01) + +/* + * R4: RX antenna control + * FRAME_END: 1 - DPDT, 0 - SPDT (Only valid for 802.11G, RF2527 & RF2529) + */ + +/* + * ANTENNA_CONTROL semantics (guessed): + * 0x1: Software controlled antenna switching (fixed or SW diversity) + * 0x2: Hardware diversity. + */ +#define BBP_R4_RX_ANTENNA_CONTROL FIELD8(0x03) +#define BBP_R4_RX_FRAME_END FIELD8(0x20) + +/* + * R77 + */ +#define BBP_R77_RX_ANTENNA FIELD8(0x03) + +/* + * RF registers + */ + +/* + * RF 3 + */ +#define RF3_TXPOWER FIELD32(0x00003e00) + +/* + * RF 4 + */ +#define RF4_FREQ_OFFSET FIELD32(0x0003f000) + +/* + * EEPROM content. + * The wordsize of the EEPROM is 16 bits. + */ + +/* + * HW MAC address. + */ +#define EEPROM_MAC_ADDR_0 0x0002 +#define EEPROM_MAC_ADDR_BYTE0 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE1 FIELD16(0xff00) +#define EEPROM_MAC_ADDR1 0x0003 +#define EEPROM_MAC_ADDR_BYTE2 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE3 FIELD16(0xff00) +#define EEPROM_MAC_ADDR_2 0x0004 +#define EEPROM_MAC_ADDR_BYTE4 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE5 FIELD16(0xff00) + +/* + * EEPROM antenna. + * ANTENNA_NUM: Number of antennas. + * TX_DEFAULT: Default antenna 0: diversity, 1: A, 2: B. + * RX_DEFAULT: Default antenna 0: diversity, 1: A, 2: B. + * FRAME_TYPE: 0: DPDT , 1: SPDT , noted this bit is valid for g only. + * DYN_TXAGC: Dynamic TX AGC control. + * HARDWARE_RADIO: 1: Hardware controlled radio. Read GPIO0. + * RF_TYPE: Rf_type of this adapter. + */ +#define EEPROM_ANTENNA 0x0010 +#define EEPROM_ANTENNA_NUM FIELD16(0x0003) +#define EEPROM_ANTENNA_TX_DEFAULT FIELD16(0x000c) +#define EEPROM_ANTENNA_RX_DEFAULT FIELD16(0x0030) +#define EEPROM_ANTENNA_FRAME_TYPE FIELD16(0x0040) +#define EEPROM_ANTENNA_DYN_TXAGC FIELD16(0x0200) +#define EEPROM_ANTENNA_HARDWARE_RADIO FIELD16(0x0400) +#define EEPROM_ANTENNA_RF_TYPE FIELD16(0xf800) + +/* + * EEPROM NIC config. + * EXTERNAL_LNA: External LNA. + */ +#define EEPROM_NIC 0x0011 +#define EEPROM_NIC_EXTERNAL_LNA FIELD16(0x0010) + +/* + * EEPROM geography. + * GEO_A: Default geographical setting for 5GHz band + * GEO: Default geographical setting. + */ +#define EEPROM_GEOGRAPHY 0x0012 +#define EEPROM_GEOGRAPHY_GEO_A FIELD16(0x00ff) +#define EEPROM_GEOGRAPHY_GEO FIELD16(0xff00) + +/* + * EEPROM BBP. + */ +#define EEPROM_BBP_START 0x0013 +#define EEPROM_BBP_SIZE 16 +#define EEPROM_BBP_VALUE FIELD16(0x00ff) +#define EEPROM_BBP_REG_ID FIELD16(0xff00) + +/* + * EEPROM TXPOWER 802.11G + */ +#define EEPROM_TXPOWER_G_START 0x0023 +#define EEPROM_TXPOWER_G_SIZE 7 +#define EEPROM_TXPOWER_G_1 FIELD16(0x00ff) +#define EEPROM_TXPOWER_G_2 FIELD16(0xff00) + +/* + * EEPROM Frequency + */ +#define EEPROM_FREQ 0x002f +#define EEPROM_FREQ_OFFSET FIELD16(0x00ff) +#define EEPROM_FREQ_SEQ_MASK FIELD16(0xff00) +#define EEPROM_FREQ_SEQ FIELD16(0x0300) + +/* + * EEPROM LED. + * POLARITY_RDY_G: Polarity RDY_G setting. + * POLARITY_RDY_A: Polarity RDY_A setting. + * POLARITY_ACT: Polarity ACT setting. + * POLARITY_GPIO_0: Polarity GPIO0 setting. + * POLARITY_GPIO_1: Polarity GPIO1 setting. + * POLARITY_GPIO_2: Polarity GPIO2 setting. + * POLARITY_GPIO_3: Polarity GPIO3 setting. + * POLARITY_GPIO_4: Polarity GPIO4 setting. + * LED_MODE: Led mode. + */ +#define EEPROM_LED 0x0030 +#define EEPROM_LED_POLARITY_RDY_G FIELD16(0x0001) +#define EEPROM_LED_POLARITY_RDY_A FIELD16(0x0002) +#define EEPROM_LED_POLARITY_ACT FIELD16(0x0004) +#define EEPROM_LED_POLARITY_GPIO_0 FIELD16(0x0008) +#define EEPROM_LED_POLARITY_GPIO_1 FIELD16(0x0010) +#define EEPROM_LED_POLARITY_GPIO_2 FIELD16(0x0020) +#define EEPROM_LED_POLARITY_GPIO_3 FIELD16(0x0040) +#define EEPROM_LED_POLARITY_GPIO_4 FIELD16(0x0080) +#define EEPROM_LED_LED_MODE FIELD16(0x1f00) + +/* + * EEPROM TXPOWER 802.11A + */ +#define EEPROM_TXPOWER_A_START 0x0031 +#define EEPROM_TXPOWER_A_SIZE 12 +#define EEPROM_TXPOWER_A_1 FIELD16(0x00ff) +#define EEPROM_TXPOWER_A_2 FIELD16(0xff00) + +/* + * EEPROM RSSI offset 802.11BG + */ +#define EEPROM_RSSI_OFFSET_BG 0x004d +#define EEPROM_RSSI_OFFSET_BG_1 FIELD16(0x00ff) +#define EEPROM_RSSI_OFFSET_BG_2 FIELD16(0xff00) + +/* + * EEPROM RSSI offset 802.11A + */ +#define EEPROM_RSSI_OFFSET_A 0x004e +#define EEPROM_RSSI_OFFSET_A_1 FIELD16(0x00ff) +#define EEPROM_RSSI_OFFSET_A_2 FIELD16(0xff00) + +/* + * DMA descriptor defines. + */ +#define TXD_DESC_SIZE ( 6 * sizeof(__le32) ) +#define TXINFO_SIZE ( 6 * sizeof(__le32) ) +#define RXD_DESC_SIZE ( 6 * sizeof(__le32) ) + +/* + * TX descriptor format for TX, PRIO and Beacon Ring. + */ + +/* + * Word0 + * BURST: Next frame belongs to same "burst" event. + * TKIP_MIC: ASIC appends TKIP MIC if TKIP is used. + * KEY_TABLE: Use per-client pairwise KEY table. + * KEY_INDEX: + * Key index (0~31) to the pairwise KEY table. + * 0~3 to shared KEY table 0 (BSS0). + * 4~7 to shared KEY table 1 (BSS1). + * 8~11 to shared KEY table 2 (BSS2). + * 12~15 to shared KEY table 3 (BSS3). + * BURST2: For backward compatibility, set to same value as BURST. + */ +#define TXD_W0_BURST FIELD32(0x00000001) +#define TXD_W0_VALID FIELD32(0x00000002) +#define TXD_W0_MORE_FRAG FIELD32(0x00000004) +#define TXD_W0_ACK FIELD32(0x00000008) +#define TXD_W0_TIMESTAMP FIELD32(0x00000010) +#define TXD_W0_OFDM FIELD32(0x00000020) +#define TXD_W0_IFS FIELD32(0x00000040) +#define TXD_W0_RETRY_MODE FIELD32(0x00000080) +#define TXD_W0_TKIP_MIC FIELD32(0x00000100) +#define TXD_W0_KEY_TABLE FIELD32(0x00000200) +#define TXD_W0_KEY_INDEX FIELD32(0x0000fc00) +#define TXD_W0_DATABYTE_COUNT FIELD32(0x0fff0000) +#define TXD_W0_BURST2 FIELD32(0x10000000) +#define TXD_W0_CIPHER_ALG FIELD32(0xe0000000) + +/* + * Word1 + * HOST_Q_ID: EDCA/HCCA queue ID. + * HW_SEQUENCE: MAC overwrites the frame sequence number. + * BUFFER_COUNT: Number of buffers in this TXD. + */ +#define TXD_W1_HOST_Q_ID FIELD32(0x0000000f) +#define TXD_W1_AIFSN FIELD32(0x000000f0) +#define TXD_W1_CWMIN FIELD32(0x00000f00) +#define TXD_W1_CWMAX FIELD32(0x0000f000) +#define TXD_W1_IV_OFFSET FIELD32(0x003f0000) +#define TXD_W1_HW_SEQUENCE FIELD32(0x10000000) +#define TXD_W1_BUFFER_COUNT FIELD32(0xe0000000) + +/* + * Word2: PLCP information + */ +#define TXD_W2_PLCP_SIGNAL FIELD32(0x000000ff) +#define TXD_W2_PLCP_SERVICE FIELD32(0x0000ff00) +#define TXD_W2_PLCP_LENGTH_LOW FIELD32(0x00ff0000) +#define TXD_W2_PLCP_LENGTH_HIGH FIELD32(0xff000000) + +/* + * Word3 + */ +#define TXD_W3_IV FIELD32(0xffffffff) + +/* + * Word4 + */ +#define TXD_W4_EIV FIELD32(0xffffffff) + +/* + * Word5 + * FRAME_OFFSET: Frame start offset inside ASIC TXFIFO (after TXINFO field). + * PACKET_ID: Driver assigned packet ID to categorize TXResult in interrupt. + * WAITING_DMA_DONE_INT: TXD been filled with data + * and waiting for TxDoneISR housekeeping. + */ +#define TXD_W5_FRAME_OFFSET FIELD32(0x000000ff) +#define TXD_W5_PACKET_ID FIELD32(0x0000ff00) +#define TXD_W5_TX_POWER FIELD32(0x00ff0000) +#define TXD_W5_WAITING_DMA_DONE_INT FIELD32(0x01000000) + +/* + * RX descriptor format for RX Ring. + */ + +/* + * Word0 + * CIPHER_ERROR: 1:ICV error, 2:MIC error, 3:invalid key. + * KEY_INDEX: Decryption key actually used. + */ +#define RXD_W0_OWNER_NIC FIELD32(0x00000001) +#define RXD_W0_DROP FIELD32(0x00000002) +#define RXD_W0_UNICAST_TO_ME FIELD32(0x00000004) +#define RXD_W0_MULTICAST FIELD32(0x00000008) +#define RXD_W0_BROADCAST FIELD32(0x00000010) +#define RXD_W0_MY_BSS FIELD32(0x00000020) +#define RXD_W0_CRC_ERROR FIELD32(0x00000040) +#define RXD_W0_OFDM FIELD32(0x00000080) +#define RXD_W0_CIPHER_ERROR FIELD32(0x00000300) +#define RXD_W0_KEY_INDEX FIELD32(0x0000fc00) +#define RXD_W0_DATABYTE_COUNT FIELD32(0x0fff0000) +#define RXD_W0_CIPHER_ALG FIELD32(0xe0000000) + +/* + * WORD1 + * SIGNAL: RX raw data rate reported by BBP. + * RSSI: RSSI reported by BBP. + */ +#define RXD_W1_SIGNAL FIELD32(0x000000ff) +#define RXD_W1_RSSI_AGC FIELD32(0x00001f00) +#define RXD_W1_RSSI_LNA FIELD32(0x00006000) +#define RXD_W1_FRAME_OFFSET FIELD32(0x7f000000) + +/* + * Word2 + * IV: Received IV of originally encrypted. + */ +#define RXD_W2_IV FIELD32(0xffffffff) + +/* + * Word3 + * EIV: Received EIV of originally encrypted. + */ +#define RXD_W3_EIV FIELD32(0xffffffff) + +/* + * Word4 + * ICV: Received ICV of originally encrypted. + * NOTE: This is a guess, the official definition is "reserved" + */ +#define RXD_W4_ICV FIELD32(0xffffffff) + +/* + * the above 20-byte is called RXINFO and will be DMAed to MAC RX block + * and passed to the HOST driver. + * The following fields are for DMA block and HOST usage only. + * Can't be touched by ASIC MAC block. + */ + +/* + * Word5 + */ +#define RXD_W5_RESERVED FIELD32(0xffffffff) + +/* + * Macros for converting txpower from EEPROM to mac80211 value + * and from mac80211 value to register value. + */ +#define MIN_TXPOWER 0 +#define MAX_TXPOWER 31 +#define DEFAULT_TXPOWER 24 + +#define TXPOWER_FROM_DEV(__txpower) \ + (((u8)(__txpower)) > MAX_TXPOWER) ? DEFAULT_TXPOWER : (__txpower) + +#define TXPOWER_TO_DEV(__txpower) \ + clamp_t(char, __txpower, MIN_TXPOWER, MAX_TXPOWER) + +#endif /* RT73USB_H */ diff --git a/drivers/net/wireless/rtl818x/Makefile b/drivers/net/wireless/rtl818x/Makefile new file mode 100644 index 0000000..93cbfbe --- /dev/null +++ b/drivers/net/wireless/rtl818x/Makefile @@ -0,0 +1,7 @@ +rtl8180-objs := rtl8180_dev.o rtl8180_rtl8225.o rtl8180_sa2400.o rtl8180_max2820.o rtl8180_grf5101.o +rtl8187-objs := rtl8187_dev.o rtl8187_rtl8225.o rtl8187_leds.o rtl8187_rfkill.o + +obj-$(CONFIG_RTL8180) += rtl8180.o +obj-$(CONFIG_RTL8187) += rtl8187.o + + diff --git a/drivers/net/wireless/rtl818x/rtl8180.h b/drivers/net/wireless/rtl818x/rtl8180.h new file mode 100644 index 0000000..de3844f --- /dev/null +++ b/drivers/net/wireless/rtl818x/rtl8180.h @@ -0,0 +1,119 @@ +#ifndef RTL8180_H +#define RTL8180_H + +#include "rtl818x.h" + +#define MAX_RX_SIZE IEEE80211_MAX_RTS_THRESHOLD + +#define RF_PARAM_ANALOGPHY (1 << 0) +#define RF_PARAM_ANTBDEFAULT (1 << 1) +#define RF_PARAM_CARRIERSENSE1 (1 << 2) +#define RF_PARAM_CARRIERSENSE2 (1 << 3) + +#define BB_ANTATTEN_CHAN14 0x0C +#define BB_ANTENNA_B 0x40 + +#define BB_HOST_BANG (1 << 30) +#define BB_HOST_BANG_EN (1 << 2) +#define BB_HOST_BANG_CLK (1 << 1) +#define BB_HOST_BANG_DATA 1 + +#define ANAPARAM_TXDACOFF_SHIFT 27 +#define ANAPARAM_PWR0_SHIFT 28 +#define ANAPARAM_PWR0_MASK (0x07 << ANAPARAM_PWR0_SHIFT) +#define ANAPARAM_PWR1_SHIFT 20 +#define ANAPARAM_PWR1_MASK (0x7F << ANAPARAM_PWR1_SHIFT) + +struct rtl8180_tx_desc { + __le32 flags; + __le16 rts_duration; + __le16 plcp_len; + __le32 tx_buf; + __le32 frame_len; + __le32 next_tx_desc; + u8 cw; + u8 retry_limit; + u8 agc; + u8 flags2; + u32 reserved[2]; +} __attribute__ ((packed)); + +struct rtl8180_rx_desc { + __le32 flags; + __le32 flags2; + union { + __le32 rx_buf; + __le64 tsft; + }; +} __attribute__ ((packed)); + +struct rtl8180_tx_ring { + struct rtl8180_tx_desc *desc; + dma_addr_t dma; + unsigned int idx; + unsigned int entries; + struct sk_buff_head queue; +}; + +struct rtl8180_priv { + /* common between rtl818x drivers */ + struct rtl818x_csr __iomem *map; + const struct rtl818x_rf_ops *rf; + struct ieee80211_vif *vif; + + /* rtl8180 driver specific */ + spinlock_t lock; + struct rtl8180_rx_desc *rx_ring; + dma_addr_t rx_ring_dma; + unsigned int rx_idx; + struct sk_buff *rx_buf[32]; + struct rtl8180_tx_ring tx_ring[4]; + struct ieee80211_channel channels[14]; + struct ieee80211_rate rates[12]; + struct ieee80211_supported_band band; + struct pci_dev *pdev; + u32 rx_conf; + + int r8185; + u32 anaparam; + u16 rfparam; + u8 csthreshold; +}; + +void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data); +void rtl8180_set_anaparam(struct rtl8180_priv *priv, u32 anaparam); + +static inline u8 rtl818x_ioread8(struct rtl8180_priv *priv, u8 __iomem *addr) +{ + return ioread8(addr); +} + +static inline u16 rtl818x_ioread16(struct rtl8180_priv *priv, __le16 __iomem *addr) +{ + return ioread16(addr); +} + +static inline u32 rtl818x_ioread32(struct rtl8180_priv *priv, __le32 __iomem *addr) +{ + return ioread32(addr); +} + +static inline void rtl818x_iowrite8(struct rtl8180_priv *priv, + u8 __iomem *addr, u8 val) +{ + iowrite8(val, addr); +} + +static inline void rtl818x_iowrite16(struct rtl8180_priv *priv, + __le16 __iomem *addr, u16 val) +{ + iowrite16(val, addr); +} + +static inline void rtl818x_iowrite32(struct rtl8180_priv *priv, + __le32 __iomem *addr, u32 val) +{ + iowrite32(val, addr); +} + +#endif /* RTL8180_H */ diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c new file mode 100644 index 0000000..2b928ec --- /dev/null +++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c @@ -0,0 +1,1101 @@ + +/* + * Linux device driver for RTL8180 / RTL8185 + * + * Copyright 2007 Michael Wu + * Copyright 2007 Andrea Merello + * + * Based on the r8180 driver, which is: + * Copyright 2004-2005 Andrea Merello , et al. + * + * Thanks to Realtek for their support! + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include + +#include "rtl8180.h" +#include "rtl8180_rtl8225.h" +#include "rtl8180_sa2400.h" +#include "rtl8180_max2820.h" +#include "rtl8180_grf5101.h" + +MODULE_AUTHOR("Michael Wu "); +MODULE_AUTHOR("Andrea Merello "); +MODULE_DESCRIPTION("RTL8180 / RTL8185 PCI wireless driver"); +MODULE_LICENSE("GPL"); + +static DEFINE_PCI_DEVICE_TABLE(rtl8180_table) = { + /* rtl8185 */ + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8185) }, + { PCI_DEVICE(PCI_VENDOR_ID_BELKIN, 0x700f) }, + { PCI_DEVICE(PCI_VENDOR_ID_BELKIN, 0x701f) }, + + /* rtl8180 */ + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8180) }, + { PCI_DEVICE(0x1799, 0x6001) }, + { PCI_DEVICE(0x1799, 0x6020) }, + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x3300) }, + { } +}; + +MODULE_DEVICE_TABLE(pci, rtl8180_table); + +static const struct ieee80211_rate rtl818x_rates[] = { + { .bitrate = 10, .hw_value = 0, }, + { .bitrate = 20, .hw_value = 1, }, + { .bitrate = 55, .hw_value = 2, }, + { .bitrate = 110, .hw_value = 3, }, + { .bitrate = 60, .hw_value = 4, }, + { .bitrate = 90, .hw_value = 5, }, + { .bitrate = 120, .hw_value = 6, }, + { .bitrate = 180, .hw_value = 7, }, + { .bitrate = 240, .hw_value = 8, }, + { .bitrate = 360, .hw_value = 9, }, + { .bitrate = 480, .hw_value = 10, }, + { .bitrate = 540, .hw_value = 11, }, +}; + +static const struct ieee80211_channel rtl818x_channels[] = { + { .center_freq = 2412 }, + { .center_freq = 2417 }, + { .center_freq = 2422 }, + { .center_freq = 2427 }, + { .center_freq = 2432 }, + { .center_freq = 2437 }, + { .center_freq = 2442 }, + { .center_freq = 2447 }, + { .center_freq = 2452 }, + { .center_freq = 2457 }, + { .center_freq = 2462 }, + { .center_freq = 2467 }, + { .center_freq = 2472 }, + { .center_freq = 2484 }, +}; + + +void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data) +{ + struct rtl8180_priv *priv = dev->priv; + int i = 10; + u32 buf; + + buf = (data << 8) | addr; + + rtl818x_iowrite32(priv, (__le32 __iomem *)&priv->map->PHY[0], buf | 0x80); + while (i--) { + rtl818x_iowrite32(priv, (__le32 __iomem *)&priv->map->PHY[0], buf); + if (rtl818x_ioread8(priv, &priv->map->PHY[2]) == (data & 0xFF)) + return; + } +} + +static void rtl8180_handle_rx(struct ieee80211_hw *dev) +{ + struct rtl8180_priv *priv = dev->priv; + unsigned int count = 32; + + while (count--) { + struct rtl8180_rx_desc *entry = &priv->rx_ring[priv->rx_idx]; + struct sk_buff *skb = priv->rx_buf[priv->rx_idx]; + u32 flags = le32_to_cpu(entry->flags); + + if (flags & RTL818X_RX_DESC_FLAG_OWN) + return; + + if (unlikely(flags & (RTL818X_RX_DESC_FLAG_DMA_FAIL | + RTL818X_RX_DESC_FLAG_FOF | + RTL818X_RX_DESC_FLAG_RX_ERR))) + goto done; + else { + u32 flags2 = le32_to_cpu(entry->flags2); + struct ieee80211_rx_status rx_status = {0}; + struct sk_buff *new_skb = dev_alloc_skb(MAX_RX_SIZE); + + if (unlikely(!new_skb)) + goto done; + + pci_unmap_single(priv->pdev, + *((dma_addr_t *)skb->cb), + MAX_RX_SIZE, PCI_DMA_FROMDEVICE); + skb_put(skb, flags & 0xFFF); + + rx_status.antenna = (flags2 >> 15) & 1; + /* TODO: improve signal/rssi reporting */ + rx_status.signal = (flags2 >> 8) & 0x7F; + /* XXX: is this correct? */ + rx_status.rate_idx = (flags >> 20) & 0xF; + rx_status.freq = dev->conf.channel->center_freq; + rx_status.band = dev->conf.channel->band; + rx_status.mactime = le64_to_cpu(entry->tsft); + rx_status.flag |= RX_FLAG_TSFT; + if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR) + rx_status.flag |= RX_FLAG_FAILED_FCS_CRC; + + memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); + ieee80211_rx_irqsafe(dev, skb); + + skb = new_skb; + priv->rx_buf[priv->rx_idx] = skb; + *((dma_addr_t *) skb->cb) = + pci_map_single(priv->pdev, skb_tail_pointer(skb), + MAX_RX_SIZE, PCI_DMA_FROMDEVICE); + } + + done: + entry->rx_buf = cpu_to_le32(*((dma_addr_t *)skb->cb)); + entry->flags = cpu_to_le32(RTL818X_RX_DESC_FLAG_OWN | + MAX_RX_SIZE); + if (priv->rx_idx == 31) + entry->flags |= cpu_to_le32(RTL818X_RX_DESC_FLAG_EOR); + priv->rx_idx = (priv->rx_idx + 1) % 32; + } +} + +static void rtl8180_handle_tx(struct ieee80211_hw *dev, unsigned int prio) +{ + struct rtl8180_priv *priv = dev->priv; + struct rtl8180_tx_ring *ring = &priv->tx_ring[prio]; + + while (skb_queue_len(&ring->queue)) { + struct rtl8180_tx_desc *entry = &ring->desc[ring->idx]; + struct sk_buff *skb; + struct ieee80211_tx_info *info; + u32 flags = le32_to_cpu(entry->flags); + + if (flags & RTL818X_TX_DESC_FLAG_OWN) + return; + + ring->idx = (ring->idx + 1) % ring->entries; + skb = __skb_dequeue(&ring->queue); + pci_unmap_single(priv->pdev, le32_to_cpu(entry->tx_buf), + skb->len, PCI_DMA_TODEVICE); + + info = IEEE80211_SKB_CB(skb); + ieee80211_tx_info_clear_status(info); + + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) && + (flags & RTL818X_TX_DESC_FLAG_TX_OK)) + info->flags |= IEEE80211_TX_STAT_ACK; + + info->status.rates[0].count = (flags & 0xFF) + 1; + + ieee80211_tx_status_irqsafe(dev, skb); + if (ring->entries - skb_queue_len(&ring->queue) == 2) + ieee80211_wake_queue(dev, prio); + } +} + +static irqreturn_t rtl8180_interrupt(int irq, void *dev_id) +{ + struct ieee80211_hw *dev = dev_id; + struct rtl8180_priv *priv = dev->priv; + u16 reg; + + spin_lock(&priv->lock); + reg = rtl818x_ioread16(priv, &priv->map->INT_STATUS); + if (unlikely(reg == 0xFFFF)) { + spin_unlock(&priv->lock); + return IRQ_HANDLED; + } + + rtl818x_iowrite16(priv, &priv->map->INT_STATUS, reg); + + if (reg & (RTL818X_INT_TXB_OK | RTL818X_INT_TXB_ERR)) + rtl8180_handle_tx(dev, 3); + + if (reg & (RTL818X_INT_TXH_OK | RTL818X_INT_TXH_ERR)) + rtl8180_handle_tx(dev, 2); + + if (reg & (RTL818X_INT_TXN_OK | RTL818X_INT_TXN_ERR)) + rtl8180_handle_tx(dev, 1); + + if (reg & (RTL818X_INT_TXL_OK | RTL818X_INT_TXL_ERR)) + rtl8180_handle_tx(dev, 0); + + if (reg & (RTL818X_INT_RX_OK | RTL818X_INT_RX_ERR)) + rtl8180_handle_rx(dev); + + spin_unlock(&priv->lock); + + return IRQ_HANDLED; +} + +static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct rtl8180_priv *priv = dev->priv; + struct rtl8180_tx_ring *ring; + struct rtl8180_tx_desc *entry; + unsigned long flags; + unsigned int idx, prio; + dma_addr_t mapping; + u32 tx_flags; + u8 rc_flags; + u16 plcp_len = 0; + __le16 rts_duration = 0; + + prio = skb_get_queue_mapping(skb); + ring = &priv->tx_ring[prio]; + + mapping = pci_map_single(priv->pdev, skb->data, + skb->len, PCI_DMA_TODEVICE); + + tx_flags = RTL818X_TX_DESC_FLAG_OWN | RTL818X_TX_DESC_FLAG_FS | + RTL818X_TX_DESC_FLAG_LS | + (ieee80211_get_tx_rate(dev, info)->hw_value << 24) | + skb->len; + + if (priv->r8185) + tx_flags |= RTL818X_TX_DESC_FLAG_DMA | + RTL818X_TX_DESC_FLAG_NO_ENC; + + rc_flags = info->control.rates[0].flags; + if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) { + tx_flags |= RTL818X_TX_DESC_FLAG_RTS; + tx_flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19; + } else if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { + tx_flags |= RTL818X_TX_DESC_FLAG_CTS; + tx_flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19; + } + + if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) + rts_duration = ieee80211_rts_duration(dev, priv->vif, skb->len, + info); + + if (!priv->r8185) { + unsigned int remainder; + + plcp_len = DIV_ROUND_UP(16 * (skb->len + 4), + (ieee80211_get_tx_rate(dev, info)->bitrate * 2) / 10); + remainder = (16 * (skb->len + 4)) % + ((ieee80211_get_tx_rate(dev, info)->bitrate * 2) / 10); + if (remainder <= 6) + plcp_len |= 1 << 15; + } + + spin_lock_irqsave(&priv->lock, flags); + idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries; + entry = &ring->desc[idx]; + + entry->rts_duration = rts_duration; + entry->plcp_len = cpu_to_le16(plcp_len); + entry->tx_buf = cpu_to_le32(mapping); + entry->frame_len = cpu_to_le32(skb->len); + entry->flags2 = info->control.rates[1].idx >= 0 ? + ieee80211_get_alt_retry_rate(dev, info, 0)->bitrate << 4 : 0; + entry->retry_limit = info->control.rates[0].count; + entry->flags = cpu_to_le32(tx_flags); + __skb_queue_tail(&ring->queue, skb); + if (ring->entries - skb_queue_len(&ring->queue) < 2) + ieee80211_stop_queue(dev, skb_get_queue_mapping(skb)); + spin_unlock_irqrestore(&priv->lock, flags); + + rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING, (1 << (prio + 4))); + + return 0; +} + +void rtl8180_set_anaparam(struct rtl8180_priv *priv, u32 anaparam) +{ + u8 reg; + + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); + reg = rtl818x_ioread8(priv, &priv->map->CONFIG3); + rtl818x_iowrite8(priv, &priv->map->CONFIG3, + reg | RTL818X_CONFIG3_ANAPARAM_WRITE); + rtl818x_iowrite32(priv, &priv->map->ANAPARAM, anaparam); + rtl818x_iowrite8(priv, &priv->map->CONFIG3, + reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE); + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); +} + +static int rtl8180_init_hw(struct ieee80211_hw *dev) +{ + struct rtl8180_priv *priv = dev->priv; + u16 reg; + + rtl818x_iowrite8(priv, &priv->map->CMD, 0); + rtl818x_ioread8(priv, &priv->map->CMD); + msleep(10); + + /* reset */ + rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0); + rtl818x_ioread8(priv, &priv->map->CMD); + + reg = rtl818x_ioread8(priv, &priv->map->CMD); + reg &= (1 << 1); + reg |= RTL818X_CMD_RESET; + rtl818x_iowrite8(priv, &priv->map->CMD, RTL818X_CMD_RESET); + rtl818x_ioread8(priv, &priv->map->CMD); + msleep(200); + + /* check success of reset */ + if (rtl818x_ioread8(priv, &priv->map->CMD) & RTL818X_CMD_RESET) { + printk(KERN_ERR "%s: reset timeout!\n", wiphy_name(dev->wiphy)); + return -ETIMEDOUT; + } + + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_LOAD); + rtl818x_ioread8(priv, &priv->map->CMD); + msleep(200); + + if (rtl818x_ioread8(priv, &priv->map->CONFIG3) & (1 << 3)) { + /* For cardbus */ + reg = rtl818x_ioread8(priv, &priv->map->CONFIG3); + reg |= 1 << 1; + rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg); + reg = rtl818x_ioread16(priv, &priv->map->FEMR); + reg |= (1 << 15) | (1 << 14) | (1 << 4); + rtl818x_iowrite16(priv, &priv->map->FEMR, reg); + } + + rtl818x_iowrite8(priv, &priv->map->MSR, 0); + + if (!priv->r8185) + rtl8180_set_anaparam(priv, priv->anaparam); + + rtl818x_iowrite32(priv, &priv->map->RDSAR, priv->rx_ring_dma); + rtl818x_iowrite32(priv, &priv->map->TBDA, priv->tx_ring[3].dma); + rtl818x_iowrite32(priv, &priv->map->THPDA, priv->tx_ring[2].dma); + rtl818x_iowrite32(priv, &priv->map->TNPDA, priv->tx_ring[1].dma); + rtl818x_iowrite32(priv, &priv->map->TLPDA, priv->tx_ring[0].dma); + + /* TODO: necessary? specs indicate not */ + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); + reg = rtl818x_ioread8(priv, &priv->map->CONFIG2); + rtl818x_iowrite8(priv, &priv->map->CONFIG2, reg & ~(1 << 3)); + if (priv->r8185) { + reg = rtl818x_ioread8(priv, &priv->map->CONFIG2); + rtl818x_iowrite8(priv, &priv->map->CONFIG2, reg | (1 << 4)); + } + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); + + /* TODO: set CONFIG5 for calibrating AGC on rtl8180 + philips radio? */ + + /* TODO: turn off hw wep on rtl8180 */ + + rtl818x_iowrite32(priv, &priv->map->INT_TIMEOUT, 0); + + if (priv->r8185) { + rtl818x_iowrite8(priv, &priv->map->WPA_CONF, 0); + rtl818x_iowrite8(priv, &priv->map->RATE_FALLBACK, 0x81); + rtl818x_iowrite8(priv, &priv->map->RESP_RATE, (8 << 4) | 0); + + rtl818x_iowrite16(priv, &priv->map->BRSR, 0x01F3); + + /* TODO: set ClkRun enable? necessary? */ + reg = rtl818x_ioread8(priv, &priv->map->GP_ENABLE); + rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, reg & ~(1 << 6)); + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); + reg = rtl818x_ioread8(priv, &priv->map->CONFIG3); + rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg | (1 << 2)); + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); + } else { + rtl818x_iowrite16(priv, &priv->map->BRSR, 0x1); + rtl818x_iowrite8(priv, &priv->map->SECURITY, 0); + + rtl818x_iowrite8(priv, &priv->map->PHY_DELAY, 0x6); + rtl818x_iowrite8(priv, &priv->map->CARRIER_SENSE_COUNTER, 0x4C); + } + + priv->rf->init(dev); + if (priv->r8185) + rtl818x_iowrite16(priv, &priv->map->BRSR, 0x01F3); + return 0; +} + +static int rtl8180_init_rx_ring(struct ieee80211_hw *dev) +{ + struct rtl8180_priv *priv = dev->priv; + struct rtl8180_rx_desc *entry; + int i; + + priv->rx_ring = pci_alloc_consistent(priv->pdev, + sizeof(*priv->rx_ring) * 32, + &priv->rx_ring_dma); + + if (!priv->rx_ring || (unsigned long)priv->rx_ring & 0xFF) { + printk(KERN_ERR "%s: Cannot allocate RX ring\n", + wiphy_name(dev->wiphy)); + return -ENOMEM; + } + + memset(priv->rx_ring, 0, sizeof(*priv->rx_ring) * 32); + priv->rx_idx = 0; + + for (i = 0; i < 32; i++) { + struct sk_buff *skb = dev_alloc_skb(MAX_RX_SIZE); + dma_addr_t *mapping; + entry = &priv->rx_ring[i]; + if (!skb) + return 0; + + priv->rx_buf[i] = skb; + mapping = (dma_addr_t *)skb->cb; + *mapping = pci_map_single(priv->pdev, skb_tail_pointer(skb), + MAX_RX_SIZE, PCI_DMA_FROMDEVICE); + entry->rx_buf = cpu_to_le32(*mapping); + entry->flags = cpu_to_le32(RTL818X_RX_DESC_FLAG_OWN | + MAX_RX_SIZE); + } + entry->flags |= cpu_to_le32(RTL818X_RX_DESC_FLAG_EOR); + return 0; +} + +static void rtl8180_free_rx_ring(struct ieee80211_hw *dev) +{ + struct rtl8180_priv *priv = dev->priv; + int i; + + for (i = 0; i < 32; i++) { + struct sk_buff *skb = priv->rx_buf[i]; + if (!skb) + continue; + + pci_unmap_single(priv->pdev, + *((dma_addr_t *)skb->cb), + MAX_RX_SIZE, PCI_DMA_FROMDEVICE); + kfree_skb(skb); + } + + pci_free_consistent(priv->pdev, sizeof(*priv->rx_ring) * 32, + priv->rx_ring, priv->rx_ring_dma); + priv->rx_ring = NULL; +} + +static int rtl8180_init_tx_ring(struct ieee80211_hw *dev, + unsigned int prio, unsigned int entries) +{ + struct rtl8180_priv *priv = dev->priv; + struct rtl8180_tx_desc *ring; + dma_addr_t dma; + int i; + + ring = pci_alloc_consistent(priv->pdev, sizeof(*ring) * entries, &dma); + if (!ring || (unsigned long)ring & 0xFF) { + printk(KERN_ERR "%s: Cannot allocate TX ring (prio = %d)\n", + wiphy_name(dev->wiphy), prio); + return -ENOMEM; + } + + memset(ring, 0, sizeof(*ring)*entries); + priv->tx_ring[prio].desc = ring; + priv->tx_ring[prio].dma = dma; + priv->tx_ring[prio].idx = 0; + priv->tx_ring[prio].entries = entries; + skb_queue_head_init(&priv->tx_ring[prio].queue); + + for (i = 0; i < entries; i++) + ring[i].next_tx_desc = + cpu_to_le32((u32)dma + ((i + 1) % entries) * sizeof(*ring)); + + return 0; +} + +static void rtl8180_free_tx_ring(struct ieee80211_hw *dev, unsigned int prio) +{ + struct rtl8180_priv *priv = dev->priv; + struct rtl8180_tx_ring *ring = &priv->tx_ring[prio]; + + while (skb_queue_len(&ring->queue)) { + struct rtl8180_tx_desc *entry = &ring->desc[ring->idx]; + struct sk_buff *skb = __skb_dequeue(&ring->queue); + + pci_unmap_single(priv->pdev, le32_to_cpu(entry->tx_buf), + skb->len, PCI_DMA_TODEVICE); + kfree_skb(skb); + ring->idx = (ring->idx + 1) % ring->entries; + } + + pci_free_consistent(priv->pdev, sizeof(*ring->desc)*ring->entries, + ring->desc, ring->dma); + ring->desc = NULL; +} + +static int rtl8180_start(struct ieee80211_hw *dev) +{ + struct rtl8180_priv *priv = dev->priv; + int ret, i; + u32 reg; + + ret = rtl8180_init_rx_ring(dev); + if (ret) + return ret; + + for (i = 0; i < 4; i++) + if ((ret = rtl8180_init_tx_ring(dev, i, 16))) + goto err_free_rings; + + ret = rtl8180_init_hw(dev); + if (ret) + goto err_free_rings; + + rtl818x_iowrite32(priv, &priv->map->RDSAR, priv->rx_ring_dma); + rtl818x_iowrite32(priv, &priv->map->TBDA, priv->tx_ring[3].dma); + rtl818x_iowrite32(priv, &priv->map->THPDA, priv->tx_ring[2].dma); + rtl818x_iowrite32(priv, &priv->map->TNPDA, priv->tx_ring[1].dma); + rtl818x_iowrite32(priv, &priv->map->TLPDA, priv->tx_ring[0].dma); + + ret = request_irq(priv->pdev->irq, rtl8180_interrupt, + IRQF_SHARED, KBUILD_MODNAME, dev); + if (ret) { + printk(KERN_ERR "%s: failed to register IRQ handler\n", + wiphy_name(dev->wiphy)); + goto err_free_rings; + } + + rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0xFFFF); + + rtl818x_iowrite32(priv, &priv->map->MAR[0], ~0); + rtl818x_iowrite32(priv, &priv->map->MAR[1], ~0); + + reg = RTL818X_RX_CONF_ONLYERLPKT | + RTL818X_RX_CONF_RX_AUTORESETPHY | + RTL818X_RX_CONF_MGMT | + RTL818X_RX_CONF_DATA | + (7 << 8 /* MAX RX DMA */) | + RTL818X_RX_CONF_BROADCAST | + RTL818X_RX_CONF_NICMAC; + + if (priv->r8185) + reg |= RTL818X_RX_CONF_CSDM1 | RTL818X_RX_CONF_CSDM2; + else { + reg |= (priv->rfparam & RF_PARAM_CARRIERSENSE1) + ? RTL818X_RX_CONF_CSDM1 : 0; + reg |= (priv->rfparam & RF_PARAM_CARRIERSENSE2) + ? RTL818X_RX_CONF_CSDM2 : 0; + } + + priv->rx_conf = reg; + rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg); + + if (priv->r8185) { + reg = rtl818x_ioread8(priv, &priv->map->CW_CONF); + reg &= ~RTL818X_CW_CONF_PERPACKET_CW_SHIFT; + reg |= RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT; + rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg); + + reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL); + reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT; + reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT; + reg |= RTL818X_TX_AGC_CTL_FEEDBACK_ANT; + rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg); + + /* disable early TX */ + rtl818x_iowrite8(priv, (u8 __iomem *)priv->map + 0xec, 0x3f); + } + + reg = rtl818x_ioread32(priv, &priv->map->TX_CONF); + reg |= (6 << 21 /* MAX TX DMA */) | + RTL818X_TX_CONF_NO_ICV; + + if (priv->r8185) + reg &= ~RTL818X_TX_CONF_PROBE_DTS; + else + reg &= ~RTL818X_TX_CONF_HW_SEQNUM; + + /* different meaning, same value on both rtl8185 and rtl8180 */ + reg &= ~RTL818X_TX_CONF_SAT_HWPLCP; + + rtl818x_iowrite32(priv, &priv->map->TX_CONF, reg); + + reg = rtl818x_ioread8(priv, &priv->map->CMD); + reg |= RTL818X_CMD_RX_ENABLE; + reg |= RTL818X_CMD_TX_ENABLE; + rtl818x_iowrite8(priv, &priv->map->CMD, reg); + + return 0; + + err_free_rings: + rtl8180_free_rx_ring(dev); + for (i = 0; i < 4; i++) + if (priv->tx_ring[i].desc) + rtl8180_free_tx_ring(dev, i); + + return ret; +} + +static void rtl8180_stop(struct ieee80211_hw *dev) +{ + struct rtl8180_priv *priv = dev->priv; + u8 reg; + int i; + + rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0); + + reg = rtl818x_ioread8(priv, &priv->map->CMD); + reg &= ~RTL818X_CMD_TX_ENABLE; + reg &= ~RTL818X_CMD_RX_ENABLE; + rtl818x_iowrite8(priv, &priv->map->CMD, reg); + + priv->rf->stop(dev); + + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); + reg = rtl818x_ioread8(priv, &priv->map->CONFIG4); + rtl818x_iowrite8(priv, &priv->map->CONFIG4, reg | RTL818X_CONFIG4_VCOOFF); + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); + + free_irq(priv->pdev->irq, dev); + + rtl8180_free_rx_ring(dev); + for (i = 0; i < 4; i++) + rtl8180_free_tx_ring(dev, i); +} + +static int rtl8180_add_interface(struct ieee80211_hw *dev, + struct ieee80211_vif *vif) +{ + struct rtl8180_priv *priv = dev->priv; + + /* + * We only support one active interface at a time. + */ + if (priv->vif) + return -EBUSY; + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + break; + default: + return -EOPNOTSUPP; + } + + priv->vif = vif; + + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); + rtl818x_iowrite32(priv, (__le32 __iomem *)&priv->map->MAC[0], + le32_to_cpu(*(__le32 *)vif->addr)); + rtl818x_iowrite16(priv, (__le16 __iomem *)&priv->map->MAC[4], + le16_to_cpu(*(__le16 *)(vif->addr + 4))); + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); + + return 0; +} + +static void rtl8180_remove_interface(struct ieee80211_hw *dev, + struct ieee80211_vif *vif) +{ + struct rtl8180_priv *priv = dev->priv; + priv->vif = NULL; +} + +static int rtl8180_config(struct ieee80211_hw *dev, u32 changed) +{ + struct rtl8180_priv *priv = dev->priv; + struct ieee80211_conf *conf = &dev->conf; + + priv->rf->set_chan(dev, conf); + + return 0; +} + +static void rtl8180_bss_info_changed(struct ieee80211_hw *dev, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, + u32 changed) +{ + struct rtl8180_priv *priv = dev->priv; + int i; + + if (changed & BSS_CHANGED_BSSID) { + for (i = 0; i < ETH_ALEN; i++) + rtl818x_iowrite8(priv, &priv->map->BSSID[i], + info->bssid[i]); + + if (is_valid_ether_addr(info->bssid)) + rtl818x_iowrite8(priv, &priv->map->MSR, + RTL818X_MSR_INFRA); + else + rtl818x_iowrite8(priv, &priv->map->MSR, + RTL818X_MSR_NO_LINK); + } + + if (changed & BSS_CHANGED_ERP_SLOT && priv->rf->conf_erp) + priv->rf->conf_erp(dev, info); +} + +static u64 rtl8180_prepare_multicast(struct ieee80211_hw *dev, int mc_count, + struct dev_addr_list *mc_list) +{ + return mc_count; +} + +static void rtl8180_configure_filter(struct ieee80211_hw *dev, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) +{ + struct rtl8180_priv *priv = dev->priv; + + if (changed_flags & FIF_FCSFAIL) + priv->rx_conf ^= RTL818X_RX_CONF_FCS; + if (changed_flags & FIF_CONTROL) + priv->rx_conf ^= RTL818X_RX_CONF_CTRL; + if (changed_flags & FIF_OTHER_BSS) + priv->rx_conf ^= RTL818X_RX_CONF_MONITOR; + if (*total_flags & FIF_ALLMULTI || multicast > 0) + priv->rx_conf |= RTL818X_RX_CONF_MULTICAST; + else + priv->rx_conf &= ~RTL818X_RX_CONF_MULTICAST; + + *total_flags = 0; + + if (priv->rx_conf & RTL818X_RX_CONF_FCS) + *total_flags |= FIF_FCSFAIL; + if (priv->rx_conf & RTL818X_RX_CONF_CTRL) + *total_flags |= FIF_CONTROL; + if (priv->rx_conf & RTL818X_RX_CONF_MONITOR) + *total_flags |= FIF_OTHER_BSS; + if (priv->rx_conf & RTL818X_RX_CONF_MULTICAST) + *total_flags |= FIF_ALLMULTI; + + rtl818x_iowrite32(priv, &priv->map->RX_CONF, priv->rx_conf); +} + +static u64 rtl8180_get_tsf(struct ieee80211_hw *dev) +{ + struct rtl8180_priv *priv = dev->priv; + + return rtl818x_ioread32(priv, &priv->map->TSFT[0]) | + (u64)(rtl818x_ioread32(priv, &priv->map->TSFT[1])) << 32; +} + +static const struct ieee80211_ops rtl8180_ops = { + .tx = rtl8180_tx, + .start = rtl8180_start, + .stop = rtl8180_stop, + .add_interface = rtl8180_add_interface, + .remove_interface = rtl8180_remove_interface, + .config = rtl8180_config, + .bss_info_changed = rtl8180_bss_info_changed, + .prepare_multicast = rtl8180_prepare_multicast, + .configure_filter = rtl8180_configure_filter, + .get_tsf = rtl8180_get_tsf, +}; + +static void rtl8180_eeprom_register_read(struct eeprom_93cx6 *eeprom) +{ + struct ieee80211_hw *dev = eeprom->data; + struct rtl8180_priv *priv = dev->priv; + u8 reg = rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); + + eeprom->reg_data_in = reg & RTL818X_EEPROM_CMD_WRITE; + eeprom->reg_data_out = reg & RTL818X_EEPROM_CMD_READ; + eeprom->reg_data_clock = reg & RTL818X_EEPROM_CMD_CK; + eeprom->reg_chip_select = reg & RTL818X_EEPROM_CMD_CS; +} + +static void rtl8180_eeprom_register_write(struct eeprom_93cx6 *eeprom) +{ + struct ieee80211_hw *dev = eeprom->data; + struct rtl8180_priv *priv = dev->priv; + u8 reg = 2 << 6; + + if (eeprom->reg_data_in) + reg |= RTL818X_EEPROM_CMD_WRITE; + if (eeprom->reg_data_out) + reg |= RTL818X_EEPROM_CMD_READ; + if (eeprom->reg_data_clock) + reg |= RTL818X_EEPROM_CMD_CK; + if (eeprom->reg_chip_select) + reg |= RTL818X_EEPROM_CMD_CS; + + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, reg); + rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); + udelay(10); +} + +static int __devinit rtl8180_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct ieee80211_hw *dev; + struct rtl8180_priv *priv; + unsigned long mem_addr, mem_len; + unsigned int io_addr, io_len; + int err, i; + struct eeprom_93cx6 eeprom; + const char *chip_name, *rf_name = NULL; + u32 reg; + u16 eeprom_val; + + err = pci_enable_device(pdev); + if (err) { + printk(KERN_ERR "%s (rtl8180): Cannot enable new PCI device\n", + pci_name(pdev)); + return err; + } + + err = pci_request_regions(pdev, KBUILD_MODNAME); + if (err) { + printk(KERN_ERR "%s (rtl8180): Cannot obtain PCI resources\n", + pci_name(pdev)); + return err; + } + + io_addr = pci_resource_start(pdev, 0); + io_len = pci_resource_len(pdev, 0); + mem_addr = pci_resource_start(pdev, 1); + mem_len = pci_resource_len(pdev, 1); + + if (mem_len < sizeof(struct rtl818x_csr) || + io_len < sizeof(struct rtl818x_csr)) { + printk(KERN_ERR "%s (rtl8180): Too short PCI resources\n", + pci_name(pdev)); + err = -ENOMEM; + goto err_free_reg; + } + + if ((err = pci_set_dma_mask(pdev, 0xFFFFFF00ULL)) || + (err = pci_set_consistent_dma_mask(pdev, 0xFFFFFF00ULL))) { + printk(KERN_ERR "%s (rtl8180): No suitable DMA available\n", + pci_name(pdev)); + goto err_free_reg; + } + + pci_set_master(pdev); + + dev = ieee80211_alloc_hw(sizeof(*priv), &rtl8180_ops); + if (!dev) { + printk(KERN_ERR "%s (rtl8180): ieee80211 alloc failed\n", + pci_name(pdev)); + err = -ENOMEM; + goto err_free_reg; + } + + priv = dev->priv; + priv->pdev = pdev; + + dev->max_rates = 2; + SET_IEEE80211_DEV(dev, &pdev->dev); + pci_set_drvdata(pdev, dev); + + priv->map = pci_iomap(pdev, 1, mem_len); + if (!priv->map) + priv->map = pci_iomap(pdev, 0, io_len); + + if (!priv->map) { + printk(KERN_ERR "%s (rtl8180): Cannot map device memory\n", + pci_name(pdev)); + goto err_free_dev; + } + + BUILD_BUG_ON(sizeof(priv->channels) != sizeof(rtl818x_channels)); + BUILD_BUG_ON(sizeof(priv->rates) != sizeof(rtl818x_rates)); + + memcpy(priv->channels, rtl818x_channels, sizeof(rtl818x_channels)); + memcpy(priv->rates, rtl818x_rates, sizeof(rtl818x_rates)); + + priv->band.band = IEEE80211_BAND_2GHZ; + priv->band.channels = priv->channels; + priv->band.n_channels = ARRAY_SIZE(rtl818x_channels); + priv->band.bitrates = priv->rates; + priv->band.n_bitrates = 4; + dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; + + dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | + IEEE80211_HW_RX_INCLUDES_FCS | + IEEE80211_HW_SIGNAL_UNSPEC; + dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); + dev->queues = 1; + dev->max_signal = 65; + + reg = rtl818x_ioread32(priv, &priv->map->TX_CONF); + reg &= RTL818X_TX_CONF_HWVER_MASK; + switch (reg) { + case RTL818X_TX_CONF_R8180_ABCD: + chip_name = "RTL8180"; + break; + case RTL818X_TX_CONF_R8180_F: + chip_name = "RTL8180vF"; + break; + case RTL818X_TX_CONF_R8185_ABC: + chip_name = "RTL8185"; + break; + case RTL818X_TX_CONF_R8185_D: + chip_name = "RTL8185vD"; + break; + default: + printk(KERN_ERR "%s (rtl8180): Unknown chip! (0x%x)\n", + pci_name(pdev), reg >> 25); + goto err_iounmap; + } + + priv->r8185 = reg & RTL818X_TX_CONF_R8185_ABC; + if (priv->r8185) { + priv->band.n_bitrates = ARRAY_SIZE(rtl818x_rates); + pci_try_set_mwi(pdev); + } + + eeprom.data = dev; + eeprom.register_read = rtl8180_eeprom_register_read; + eeprom.register_write = rtl8180_eeprom_register_write; + if (rtl818x_ioread32(priv, &priv->map->RX_CONF) & (1 << 6)) + eeprom.width = PCI_EEPROM_WIDTH_93C66; + else + eeprom.width = PCI_EEPROM_WIDTH_93C46; + + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_PROGRAM); + rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); + udelay(10); + + eeprom_93cx6_read(&eeprom, 0x06, &eeprom_val); + eeprom_val &= 0xFF; + switch (eeprom_val) { + case 1: rf_name = "Intersil"; + break; + case 2: rf_name = "RFMD"; + break; + case 3: priv->rf = &sa2400_rf_ops; + break; + case 4: priv->rf = &max2820_rf_ops; + break; + case 5: priv->rf = &grf5101_rf_ops; + break; + case 9: priv->rf = rtl8180_detect_rf(dev); + break; + case 10: + rf_name = "RTL8255"; + break; + default: + printk(KERN_ERR "%s (rtl8180): Unknown RF! (0x%x)\n", + pci_name(pdev), eeprom_val); + goto err_iounmap; + } + + if (!priv->rf) { + printk(KERN_ERR "%s (rtl8180): %s RF frontend not supported!\n", + pci_name(pdev), rf_name); + goto err_iounmap; + } + + eeprom_93cx6_read(&eeprom, 0x17, &eeprom_val); + priv->csthreshold = eeprom_val >> 8; + if (!priv->r8185) { + __le32 anaparam; + eeprom_93cx6_multiread(&eeprom, 0xD, (__le16 *)&anaparam, 2); + priv->anaparam = le32_to_cpu(anaparam); + eeprom_93cx6_read(&eeprom, 0x19, &priv->rfparam); + } + + eeprom_93cx6_multiread(&eeprom, 0x7, (__le16 *)dev->wiphy->perm_addr, 3); + if (!is_valid_ether_addr(dev->wiphy->perm_addr)) { + printk(KERN_WARNING "%s (rtl8180): Invalid hwaddr! Using" + " randomly generated MAC addr\n", pci_name(pdev)); + random_ether_addr(dev->wiphy->perm_addr); + } + + /* CCK TX power */ + for (i = 0; i < 14; i += 2) { + u16 txpwr; + eeprom_93cx6_read(&eeprom, 0x10 + (i >> 1), &txpwr); + priv->channels[i].hw_value = txpwr & 0xFF; + priv->channels[i + 1].hw_value = txpwr >> 8; + } + + /* OFDM TX power */ + if (priv->r8185) { + for (i = 0; i < 14; i += 2) { + u16 txpwr; + eeprom_93cx6_read(&eeprom, 0x20 + (i >> 1), &txpwr); + priv->channels[i].hw_value |= (txpwr & 0xFF) << 8; + priv->channels[i + 1].hw_value |= txpwr & 0xFF00; + } + } + + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); + + spin_lock_init(&priv->lock); + + err = ieee80211_register_hw(dev); + if (err) { + printk(KERN_ERR "%s (rtl8180): Cannot register device\n", + pci_name(pdev)); + goto err_iounmap; + } + + printk(KERN_INFO "%s: hwaddr %pM, %s + %s\n", + wiphy_name(dev->wiphy), dev->wiphy->perm_addr, + chip_name, priv->rf->name); + + return 0; + + err_iounmap: + iounmap(priv->map); + + err_free_dev: + pci_set_drvdata(pdev, NULL); + ieee80211_free_hw(dev); + + err_free_reg: + pci_release_regions(pdev); + pci_disable_device(pdev); + return err; +} + +static void __devexit rtl8180_remove(struct pci_dev *pdev) +{ + struct ieee80211_hw *dev = pci_get_drvdata(pdev); + struct rtl8180_priv *priv; + + if (!dev) + return; + + ieee80211_unregister_hw(dev); + + priv = dev->priv; + + pci_iounmap(pdev, priv->map); + pci_release_regions(pdev); + pci_disable_device(pdev); + ieee80211_free_hw(dev); +} + +#ifdef CONFIG_PM +static int rtl8180_suspend(struct pci_dev *pdev, pm_message_t state) +{ + pci_save_state(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + return 0; +} + +static int rtl8180_resume(struct pci_dev *pdev) +{ + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + return 0; +} + +#endif /* CONFIG_PM */ + +static struct pci_driver rtl8180_driver = { + .name = KBUILD_MODNAME, + .id_table = rtl8180_table, + .probe = rtl8180_probe, + .remove = __devexit_p(rtl8180_remove), +#ifdef CONFIG_PM + .suspend = rtl8180_suspend, + .resume = rtl8180_resume, +#endif /* CONFIG_PM */ +}; + +static int __init rtl8180_init(void) +{ + return pci_register_driver(&rtl8180_driver); +} + +static void __exit rtl8180_exit(void) +{ + pci_unregister_driver(&rtl8180_driver); +} + +module_init(rtl8180_init); +module_exit(rtl8180_exit); diff --git a/drivers/net/wireless/rtl818x/rtl8180_grf5101.c b/drivers/net/wireless/rtl818x/rtl8180_grf5101.c new file mode 100644 index 0000000..947ee55 --- /dev/null +++ b/drivers/net/wireless/rtl818x/rtl8180_grf5101.c @@ -0,0 +1,180 @@ + +/* + * Radio tuning for GCT GRF5101 on RTL8180 + * + * Copyright 2007 Andrea Merello + * + * Code from the BSD driver and the rtl8181 project have been + * very useful to understand certain things + * + * I want to thanks the Authors of such projects and the Ndiswrapper + * project Authors. + * + * A special Big Thanks also is for all people who donated me cards, + * making possible the creation of the original rtl8180 driver + * from which this code is derived! + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include + +#include "rtl8180.h" +#include "rtl8180_grf5101.h" + +static const int grf5101_encode[] = { + 0x0, 0x8, 0x4, 0xC, + 0x2, 0xA, 0x6, 0xE, + 0x1, 0x9, 0x5, 0xD, + 0x3, 0xB, 0x7, 0xF +}; + +static void write_grf5101(struct ieee80211_hw *dev, u8 addr, u32 data) +{ + struct rtl8180_priv *priv = dev->priv; + u32 phy_config; + + phy_config = grf5101_encode[(data >> 8) & 0xF]; + phy_config |= grf5101_encode[(data >> 4) & 0xF] << 4; + phy_config |= grf5101_encode[data & 0xF] << 8; + phy_config |= grf5101_encode[(addr >> 1) & 0xF] << 12; + phy_config |= (addr & 1) << 16; + phy_config |= grf5101_encode[(data & 0xf000) >> 12] << 24; + + /* MAC will bang bits to the chip */ + phy_config |= 0x90000000; + + rtl818x_iowrite32(priv, + (__le32 __iomem *) &priv->map->RFPinsOutput, phy_config); + + msleep(3); +} + +static void grf5101_write_phy_antenna(struct ieee80211_hw *dev, short chan) +{ + struct rtl8180_priv *priv = dev->priv; + u8 ant = GRF5101_ANTENNA; + + if (priv->rfparam & RF_PARAM_ANTBDEFAULT) + ant |= BB_ANTENNA_B; + + if (chan == 14) + ant |= BB_ANTATTEN_CHAN14; + + rtl8180_write_phy(dev, 0x10, ant); +} + +static void grf5101_rf_set_channel(struct ieee80211_hw *dev, + struct ieee80211_conf *conf) +{ + struct rtl8180_priv *priv = dev->priv; + int channel = ieee80211_frequency_to_channel(conf->channel->center_freq); + u32 txpw = priv->channels[channel - 1].hw_value & 0xFF; + u32 chan = channel - 1; + + /* set TX power */ + write_grf5101(dev, 0x15, 0x0); + write_grf5101(dev, 0x06, txpw); + write_grf5101(dev, 0x15, 0x10); + write_grf5101(dev, 0x15, 0x0); + + /* set frequency */ + write_grf5101(dev, 0x07, 0x0); + write_grf5101(dev, 0x0B, chan); + write_grf5101(dev, 0x07, 0x1000); + + grf5101_write_phy_antenna(dev, channel); +} + +static void grf5101_rf_stop(struct ieee80211_hw *dev) +{ + struct rtl8180_priv *priv = dev->priv; + u32 anaparam; + + anaparam = priv->anaparam; + anaparam &= 0x000fffff; + anaparam |= 0x3f900000; + rtl8180_set_anaparam(priv, anaparam); + + write_grf5101(dev, 0x07, 0x0); + write_grf5101(dev, 0x1f, 0x45); + write_grf5101(dev, 0x1f, 0x5); + write_grf5101(dev, 0x00, 0x8e4); +} + +static void grf5101_rf_init(struct ieee80211_hw *dev) +{ + struct rtl8180_priv *priv = dev->priv; + + rtl8180_set_anaparam(priv, priv->anaparam); + + write_grf5101(dev, 0x1f, 0x0); + write_grf5101(dev, 0x1f, 0x0); + write_grf5101(dev, 0x1f, 0x40); + write_grf5101(dev, 0x1f, 0x60); + write_grf5101(dev, 0x1f, 0x61); + write_grf5101(dev, 0x1f, 0x61); + write_grf5101(dev, 0x00, 0xae4); + write_grf5101(dev, 0x1f, 0x1); + write_grf5101(dev, 0x1f, 0x41); + write_grf5101(dev, 0x1f, 0x61); + + write_grf5101(dev, 0x01, 0x1a23); + write_grf5101(dev, 0x02, 0x4971); + write_grf5101(dev, 0x03, 0x41de); + write_grf5101(dev, 0x04, 0x2d80); + write_grf5101(dev, 0x05, 0x68ff); /* 0x61ff original value */ + write_grf5101(dev, 0x06, 0x0); + write_grf5101(dev, 0x07, 0x0); + write_grf5101(dev, 0x08, 0x7533); + write_grf5101(dev, 0x09, 0xc401); + write_grf5101(dev, 0x0a, 0x0); + write_grf5101(dev, 0x0c, 0x1c7); + write_grf5101(dev, 0x0d, 0x29d3); + write_grf5101(dev, 0x0e, 0x2e8); + write_grf5101(dev, 0x10, 0x192); + write_grf5101(dev, 0x11, 0x248); + write_grf5101(dev, 0x12, 0x0); + write_grf5101(dev, 0x13, 0x20c4); + write_grf5101(dev, 0x14, 0xf4fc); + write_grf5101(dev, 0x15, 0x0); + write_grf5101(dev, 0x16, 0x1500); + + write_grf5101(dev, 0x07, 0x1000); + + /* baseband configuration */ + rtl8180_write_phy(dev, 0, 0xa8); + rtl8180_write_phy(dev, 3, 0x0); + rtl8180_write_phy(dev, 4, 0xc0); + rtl8180_write_phy(dev, 5, 0x90); + rtl8180_write_phy(dev, 6, 0x1e); + rtl8180_write_phy(dev, 7, 0x64); + + grf5101_write_phy_antenna(dev, 1); + + rtl8180_write_phy(dev, 0x11, 0x88); + + if (rtl818x_ioread8(priv, &priv->map->CONFIG2) & + RTL818X_CONFIG2_ANTENNA_DIV) + rtl8180_write_phy(dev, 0x12, 0xc0); /* enable ant diversity */ + else + rtl8180_write_phy(dev, 0x12, 0x40); /* disable ant diversity */ + + rtl8180_write_phy(dev, 0x13, 0x90 | priv->csthreshold); + + rtl8180_write_phy(dev, 0x19, 0x0); + rtl8180_write_phy(dev, 0x1a, 0xa0); + rtl8180_write_phy(dev, 0x1b, 0x44); +} + +const struct rtl818x_rf_ops grf5101_rf_ops = { + .name = "GCT", + .init = grf5101_rf_init, + .stop = grf5101_rf_stop, + .set_chan = grf5101_rf_set_channel +}; diff --git a/drivers/net/wireless/rtl818x/rtl8180_grf5101.h b/drivers/net/wireless/rtl818x/rtl8180_grf5101.h new file mode 100644 index 0000000..7664711 --- /dev/null +++ b/drivers/net/wireless/rtl818x/rtl8180_grf5101.h @@ -0,0 +1,28 @@ +#ifndef RTL8180_GRF5101_H +#define RTL8180_GRF5101_H + +/* + * Radio tuning for GCT GRF5101 on RTL8180 + * + * Copyright 2007 Andrea Merello + * + * Code from the BSD driver and the rtl8181 project have been + * very useful to understand certain things + * + * I want to thanks the Authors of such projects and the Ndiswrapper + * project Authors. + * + * A special Big Thanks also is for all people who donated me cards, + * making possible the creation of the original rtl8180 driver + * from which this code is derived! + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define GRF5101_ANTENNA 0xA3 + +extern const struct rtl818x_rf_ops grf5101_rf_ops; + +#endif /* RTL8180_GRF5101_H */ diff --git a/drivers/net/wireless/rtl818x/rtl8180_max2820.c b/drivers/net/wireless/rtl818x/rtl8180_max2820.c new file mode 100644 index 0000000..6c825fd --- /dev/null +++ b/drivers/net/wireless/rtl818x/rtl8180_max2820.c @@ -0,0 +1,152 @@ +/* + * Radio tuning for Maxim max2820 on RTL8180 + * + * Copyright 2007 Andrea Merello + * + * Code from the BSD driver and the rtl8181 project have been + * very useful to understand certain things + * + * I want to thanks the Authors of such projects and the Ndiswrapper + * project Authors. + * + * A special Big Thanks also is for all people who donated me cards, + * making possible the creation of the original rtl8180 driver + * from which this code is derived! + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include + +#include "rtl8180.h" +#include "rtl8180_max2820.h" + +static const u32 max2820_chan[] = { + 12, /* CH 1 */ + 17, + 22, + 27, + 32, + 37, + 42, + 47, + 52, + 57, + 62, + 67, + 72, + 84, /* CH 14 */ +}; + +static void write_max2820(struct ieee80211_hw *dev, u8 addr, u32 data) +{ + struct rtl8180_priv *priv = dev->priv; + u32 phy_config; + + phy_config = 0x90 + (data & 0xf); + phy_config <<= 16; + phy_config += addr; + phy_config <<= 8; + phy_config += (data >> 4) & 0xff; + + rtl818x_iowrite32(priv, + (__le32 __iomem *) &priv->map->RFPinsOutput, phy_config); + + msleep(1); +} + +static void max2820_write_phy_antenna(struct ieee80211_hw *dev, short chan) +{ + struct rtl8180_priv *priv = dev->priv; + u8 ant; + + ant = MAXIM_ANTENNA; + if (priv->rfparam & RF_PARAM_ANTBDEFAULT) + ant |= BB_ANTENNA_B; + if (chan == 14) + ant |= BB_ANTATTEN_CHAN14; + + rtl8180_write_phy(dev, 0x10, ant); +} + +static void max2820_rf_set_channel(struct ieee80211_hw *dev, + struct ieee80211_conf *conf) +{ + struct rtl8180_priv *priv = dev->priv; + int channel = conf ? + ieee80211_frequency_to_channel(conf->channel->center_freq) : 1; + unsigned int chan_idx = channel - 1; + u32 txpw = priv->channels[chan_idx].hw_value & 0xFF; + u32 chan = max2820_chan[chan_idx]; + + /* While philips SA2400 drive the PA bias from + * sa2400, for MAXIM we do this directly from BB */ + rtl8180_write_phy(dev, 3, txpw); + + max2820_write_phy_antenna(dev, channel); + write_max2820(dev, 3, chan); +} + +static void max2820_rf_stop(struct ieee80211_hw *dev) +{ + rtl8180_write_phy(dev, 3, 0x8); + write_max2820(dev, 1, 0); +} + + +static void max2820_rf_init(struct ieee80211_hw *dev) +{ + struct rtl8180_priv *priv = dev->priv; + + /* MAXIM from netbsd driver */ + write_max2820(dev, 0, 0x007); /* test mode as indicated in datasheet */ + write_max2820(dev, 1, 0x01e); /* enable register */ + write_max2820(dev, 2, 0x001); /* synt register */ + + max2820_rf_set_channel(dev, NULL); + + write_max2820(dev, 4, 0x313); /* rx register */ + + /* PA is driven directly by the BB, we keep the MAXIM bias + * at the highest value in case that setting it to lower + * values may introduce some further attenuation somewhere.. + */ + write_max2820(dev, 5, 0x00f); + + /* baseband configuration */ + rtl8180_write_phy(dev, 0, 0x88); /* sys1 */ + rtl8180_write_phy(dev, 3, 0x08); /* txagc */ + rtl8180_write_phy(dev, 4, 0xf8); /* lnadet */ + rtl8180_write_phy(dev, 5, 0x90); /* ifagcinit */ + rtl8180_write_phy(dev, 6, 0x1a); /* ifagclimit */ + rtl8180_write_phy(dev, 7, 0x64); /* ifagcdet */ + + max2820_write_phy_antenna(dev, 1); + + rtl8180_write_phy(dev, 0x11, 0x88); /* trl */ + + if (rtl818x_ioread8(priv, &priv->map->CONFIG2) & + RTL818X_CONFIG2_ANTENNA_DIV) + rtl8180_write_phy(dev, 0x12, 0xc7); + else + rtl8180_write_phy(dev, 0x12, 0x47); + + rtl8180_write_phy(dev, 0x13, 0x9b); + + rtl8180_write_phy(dev, 0x19, 0x0); /* CHESTLIM */ + rtl8180_write_phy(dev, 0x1a, 0x9f); /* CHSQLIM */ + + max2820_rf_set_channel(dev, NULL); +} + +const struct rtl818x_rf_ops max2820_rf_ops = { + .name = "Maxim", + .init = max2820_rf_init, + .stop = max2820_rf_stop, + .set_chan = max2820_rf_set_channel +}; diff --git a/drivers/net/wireless/rtl818x/rtl8180_max2820.h b/drivers/net/wireless/rtl818x/rtl8180_max2820.h new file mode 100644 index 0000000..61cf6d1 --- /dev/null +++ b/drivers/net/wireless/rtl818x/rtl8180_max2820.h @@ -0,0 +1,28 @@ +#ifndef RTL8180_MAX2820_H +#define RTL8180_MAX2820_H + +/* + * Radio tuning for Maxim max2820 on RTL8180 + * + * Copyright 2007 Andrea Merello + * + * Code from the BSD driver and the rtl8181 project have been + * very useful to understand certain things + * + * I want to thanks the Authors of such projects and the Ndiswrapper + * project Authors. + * + * A special Big Thanks also is for all people who donated me cards, + * making possible the creation of the original rtl8180 driver + * from which this code is derived! + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define MAXIM_ANTENNA 0xb3 + +extern const struct rtl818x_rf_ops max2820_rf_ops; + +#endif /* RTL8180_MAX2820_H */ diff --git a/drivers/net/wireless/rtl818x/rtl8180_rtl8225.c b/drivers/net/wireless/rtl818x/rtl8180_rtl8225.c new file mode 100644 index 0000000..4d2be0d --- /dev/null +++ b/drivers/net/wireless/rtl818x/rtl8180_rtl8225.c @@ -0,0 +1,788 @@ + +/* + * Radio tuning for RTL8225 on RTL8180 + * + * Copyright 2007 Michael Wu + * Copyright 2007 Andrea Merello + * + * Based on the r8180 driver, which is: + * Copyright 2005 Andrea Merello , et al. + * + * Thanks to Realtek for their support! + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include + +#include "rtl8180.h" +#include "rtl8180_rtl8225.h" + +static void rtl8225_write(struct ieee80211_hw *dev, u8 addr, u16 data) +{ + struct rtl8180_priv *priv = dev->priv; + u16 reg80, reg84, reg82; + u32 bangdata; + int i; + + bangdata = (data << 4) | (addr & 0xf); + + reg80 = rtl818x_ioread16(priv, &priv->map->RFPinsOutput) & 0xfff3; + reg82 = rtl818x_ioread16(priv, &priv->map->RFPinsEnable); + + rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, reg82 | 0x7); + + reg84 = rtl818x_ioread16(priv, &priv->map->RFPinsSelect); + rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84 | 0x7 | 0x400); + rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); + udelay(10); + + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2)); + rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); + udelay(2); + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80); + rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); + udelay(10); + + for (i = 15; i >= 0; i--) { + u16 reg = reg80 | !!(bangdata & (1 << i)); + + if (i & 1) + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg); + + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg | (1 << 1)); + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg | (1 << 1)); + + if (!(i & 1)) + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg); + } + + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2)); + rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); + udelay(10); + + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2)); + rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84 | 0x400); + rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF); +} + +static u16 rtl8225_read(struct ieee80211_hw *dev, u8 addr) +{ + struct rtl8180_priv *priv = dev->priv; + u16 reg80, reg82, reg84, out; + int i; + + reg80 = rtl818x_ioread16(priv, &priv->map->RFPinsOutput); + reg82 = rtl818x_ioread16(priv, &priv->map->RFPinsEnable); + reg84 = rtl818x_ioread16(priv, &priv->map->RFPinsSelect) | 0x400; + + reg80 &= ~0xF; + + rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, reg82 | 0x000F); + rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84 | 0x000F); + + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2)); + rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); + udelay(4); + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80); + rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); + udelay(5); + + for (i = 4; i >= 0; i--) { + u16 reg = reg80 | ((addr >> i) & 1); + + if (!(i & 1)) { + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg); + rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); + udelay(1); + } + + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, + reg | (1 << 1)); + rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); + udelay(2); + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, + reg | (1 << 1)); + rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); + udelay(2); + + if (i & 1) { + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg); + rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); + udelay(1); + } + } + + rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x000E); + rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0x040E); + rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, + reg80 | (1 << 3) | (1 << 1)); + rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); + udelay(2); + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, + reg80 | (1 << 3)); + rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); + udelay(2); + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, + reg80 | (1 << 3)); + rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); + udelay(2); + + out = 0; + for (i = 11; i >= 0; i--) { + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, + reg80 | (1 << 3)); + rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); + udelay(1); + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, + reg80 | (1 << 3) | (1 << 1)); + rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); + udelay(2); + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, + reg80 | (1 << 3) | (1 << 1)); + rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); + udelay(2); + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, + reg80 | (1 << 3) | (1 << 1)); + rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); + udelay(2); + + if (rtl818x_ioread16(priv, &priv->map->RFPinsInput) & (1 << 1)) + out |= 1 << i; + + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, + reg80 | (1 << 3)); + rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); + udelay(2); + } + + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, + reg80 | (1 << 3) | (1 << 2)); + rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); + udelay(2); + + rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, reg82); + rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84); + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x03A0); + + return out; +} + +static const u16 rtl8225bcd_rxgain[] = { + 0x0400, 0x0401, 0x0402, 0x0403, 0x0404, 0x0405, 0x0408, 0x0409, + 0x040a, 0x040b, 0x0502, 0x0503, 0x0504, 0x0505, 0x0540, 0x0541, + 0x0542, 0x0543, 0x0544, 0x0545, 0x0580, 0x0581, 0x0582, 0x0583, + 0x0584, 0x0585, 0x0588, 0x0589, 0x058a, 0x058b, 0x0643, 0x0644, + 0x0645, 0x0680, 0x0681, 0x0682, 0x0683, 0x0684, 0x0685, 0x0688, + 0x0689, 0x068a, 0x068b, 0x068c, 0x0742, 0x0743, 0x0744, 0x0745, + 0x0780, 0x0781, 0x0782, 0x0783, 0x0784, 0x0785, 0x0788, 0x0789, + 0x078a, 0x078b, 0x078c, 0x078d, 0x0790, 0x0791, 0x0792, 0x0793, + 0x0794, 0x0795, 0x0798, 0x0799, 0x079a, 0x079b, 0x079c, 0x079d, + 0x07a0, 0x07a1, 0x07a2, 0x07a3, 0x07a4, 0x07a5, 0x07a8, 0x07a9, + 0x07aa, 0x07ab, 0x07ac, 0x07ad, 0x07b0, 0x07b1, 0x07b2, 0x07b3, + 0x07b4, 0x07b5, 0x07b8, 0x07b9, 0x07ba, 0x07bb, 0x07bb +}; + +static const u8 rtl8225_agc[] = { + 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, + 0x9d, 0x9c, 0x9b, 0x9a, 0x99, 0x98, 0x97, 0x96, + 0x95, 0x94, 0x93, 0x92, 0x91, 0x90, 0x8f, 0x8e, + 0x8d, 0x8c, 0x8b, 0x8a, 0x89, 0x88, 0x87, 0x86, + 0x85, 0x84, 0x83, 0x82, 0x81, 0x80, 0x3f, 0x3e, + 0x3d, 0x3c, 0x3b, 0x3a, 0x39, 0x38, 0x37, 0x36, + 0x35, 0x34, 0x33, 0x32, 0x31, 0x30, 0x2f, 0x2e, + 0x2d, 0x2c, 0x2b, 0x2a, 0x29, 0x28, 0x27, 0x26, + 0x25, 0x24, 0x23, 0x22, 0x21, 0x20, 0x1f, 0x1e, + 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18, 0x17, 0x16, + 0x15, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x0e, + 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08, 0x07, 0x06, + 0x05, 0x04, 0x03, 0x02, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01 +}; + +static const u8 rtl8225_gain[] = { + 0x23, 0x88, 0x7c, 0xa5, /* -82dbm */ + 0x23, 0x88, 0x7c, 0xb5, /* -82dbm */ + 0x23, 0x88, 0x7c, 0xc5, /* -82dbm */ + 0x33, 0x80, 0x79, 0xc5, /* -78dbm */ + 0x43, 0x78, 0x76, 0xc5, /* -74dbm */ + 0x53, 0x60, 0x73, 0xc5, /* -70dbm */ + 0x63, 0x58, 0x70, 0xc5, /* -66dbm */ +}; + +static const u8 rtl8225_threshold[] = { + 0x8d, 0x8d, 0x8d, 0x8d, 0x9d, 0xad, 0xbd +}; + +static const u8 rtl8225_tx_gain_cck_ofdm[] = { + 0x02, 0x06, 0x0e, 0x1e, 0x3e, 0x7e +}; + +static const u8 rtl8225_tx_power_cck[] = { + 0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02, + 0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02, + 0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02, + 0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02, + 0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03, + 0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03 +}; + +static const u8 rtl8225_tx_power_cck_ch14[] = { + 0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00, + 0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00, + 0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00, + 0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00, + 0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00, + 0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00 +}; + +static const u8 rtl8225_tx_power_ofdm[] = { + 0x80, 0x90, 0xa2, 0xb5, 0xcb, 0xe4 +}; + +static const u32 rtl8225_chan[] = { + 0x085c, 0x08dc, 0x095c, 0x09dc, 0x0a5c, 0x0adc, 0x0b5c, + 0x0bdc, 0x0c5c, 0x0cdc, 0x0d5c, 0x0ddc, 0x0e5c, 0x0f72 +}; + +static void rtl8225_rf_set_tx_power(struct ieee80211_hw *dev, int channel) +{ + struct rtl8180_priv *priv = dev->priv; + u8 cck_power, ofdm_power; + const u8 *tmp; + u32 reg; + int i; + + cck_power = priv->channels[channel - 1].hw_value & 0xFF; + ofdm_power = priv->channels[channel - 1].hw_value >> 8; + + cck_power = min(cck_power, (u8)35); + ofdm_power = min(ofdm_power, (u8)35); + + rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK, + rtl8225_tx_gain_cck_ofdm[cck_power / 6] >> 1); + + if (channel == 14) + tmp = &rtl8225_tx_power_cck_ch14[(cck_power % 6) * 8]; + else + tmp = &rtl8225_tx_power_cck[(cck_power % 6) * 8]; + + for (i = 0; i < 8; i++) + rtl8225_write_phy_cck(dev, 0x44 + i, *tmp++); + + msleep(1); /* FIXME: optional? */ + + /* anaparam2 on */ + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); + reg = rtl818x_ioread8(priv, &priv->map->CONFIG3); + rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg | RTL818X_CONFIG3_ANAPARAM_WRITE); + rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, RTL8225_ANAPARAM2_ON); + rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE); + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); + + rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM, + rtl8225_tx_gain_cck_ofdm[ofdm_power/6] >> 1); + + tmp = &rtl8225_tx_power_ofdm[ofdm_power % 6]; + + rtl8225_write_phy_ofdm(dev, 5, *tmp); + rtl8225_write_phy_ofdm(dev, 7, *tmp); + + msleep(1); +} + +static void rtl8225_rf_init(struct ieee80211_hw *dev) +{ + struct rtl8180_priv *priv = dev->priv; + int i; + + rtl8180_set_anaparam(priv, RTL8225_ANAPARAM_ON); + + /* host_pci_init */ + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x0480); + rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF); + rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0x0488); + rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0); + rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); + msleep(200); /* FIXME: ehh?? */ + rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0xFF & ~(1 << 6)); + + rtl818x_iowrite32(priv, &priv->map->RF_TIMING, 0x000a8008); + + /* TODO: check if we need really to change BRSR to do RF config */ + rtl818x_ioread16(priv, &priv->map->BRSR); + rtl818x_iowrite16(priv, &priv->map->BRSR, 0xFFFF); + rtl818x_iowrite32(priv, &priv->map->RF_PARA, 0x00100044); + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); + rtl818x_iowrite8(priv, &priv->map->CONFIG3, 0x44); + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); + + rtl8225_write(dev, 0x0, 0x067); + rtl8225_write(dev, 0x1, 0xFE0); + rtl8225_write(dev, 0x2, 0x44D); + rtl8225_write(dev, 0x3, 0x441); + rtl8225_write(dev, 0x4, 0x8BE); + rtl8225_write(dev, 0x5, 0xBF0); /* TODO: minipci */ + rtl8225_write(dev, 0x6, 0xAE6); + rtl8225_write(dev, 0x7, rtl8225_chan[0]); + rtl8225_write(dev, 0x8, 0x01F); + rtl8225_write(dev, 0x9, 0x334); + rtl8225_write(dev, 0xA, 0xFD4); + rtl8225_write(dev, 0xB, 0x391); + rtl8225_write(dev, 0xC, 0x050); + rtl8225_write(dev, 0xD, 0x6DB); + rtl8225_write(dev, 0xE, 0x029); + rtl8225_write(dev, 0xF, 0x914); msleep(1); + + rtl8225_write(dev, 0x2, 0xC4D); msleep(100); + + rtl8225_write(dev, 0x0, 0x127); + + for (i = 0; i < ARRAY_SIZE(rtl8225bcd_rxgain); i++) { + rtl8225_write(dev, 0x1, i + 1); + rtl8225_write(dev, 0x2, rtl8225bcd_rxgain[i]); + } + + rtl8225_write(dev, 0x0, 0x027); + rtl8225_write(dev, 0x0, 0x22F); + rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF); + + for (i = 0; i < ARRAY_SIZE(rtl8225_agc); i++) { + rtl8225_write_phy_ofdm(dev, 0xB, rtl8225_agc[i]); + msleep(1); + rtl8225_write_phy_ofdm(dev, 0xA, 0x80 + i); + msleep(1); + } + + msleep(1); + + rtl8225_write_phy_ofdm(dev, 0x00, 0x01); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x01, 0x02); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x02, 0x62); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x03, 0x00); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x04, 0x00); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x05, 0x00); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x06, 0x00); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x07, 0x00); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x08, 0x00); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x09, 0xfe); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x0a, 0x09); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x0b, 0x80); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x0c, 0x01); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x0e, 0xd3); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x0f, 0x38); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x10, 0x84); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x11, 0x03); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x12, 0x20); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x13, 0x20); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x14, 0x00); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x15, 0x40); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x16, 0x00); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x17, 0x40); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x18, 0xef); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x19, 0x19); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x1a, 0x20); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x1b, 0x76); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x1c, 0x04); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x1e, 0x95); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x1f, 0x75); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x20, 0x1f); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x21, 0x27); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x22, 0x16); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x24, 0x46); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x25, 0x20); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x26, 0x90); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x27, 0x88); msleep(1); + + rtl8225_write_phy_cck(dev, 0x00, 0x98); msleep(1); + rtl8225_write_phy_cck(dev, 0x03, 0x20); msleep(1); + rtl8225_write_phy_cck(dev, 0x04, 0x7e); msleep(1); + rtl8225_write_phy_cck(dev, 0x05, 0x12); msleep(1); + rtl8225_write_phy_cck(dev, 0x06, 0xfc); msleep(1); + rtl8225_write_phy_cck(dev, 0x07, 0x78); msleep(1); + rtl8225_write_phy_cck(dev, 0x08, 0x2e); msleep(1); + rtl8225_write_phy_cck(dev, 0x10, 0x93); msleep(1); + rtl8225_write_phy_cck(dev, 0x11, 0x88); msleep(1); + rtl8225_write_phy_cck(dev, 0x12, 0x47); msleep(1); + rtl8225_write_phy_cck(dev, 0x13, 0xd0); + rtl8225_write_phy_cck(dev, 0x19, 0x00); + rtl8225_write_phy_cck(dev, 0x1a, 0xa0); + rtl8225_write_phy_cck(dev, 0x1b, 0x08); + rtl8225_write_phy_cck(dev, 0x40, 0x86); + rtl8225_write_phy_cck(dev, 0x41, 0x8d); msleep(1); + rtl8225_write_phy_cck(dev, 0x42, 0x15); msleep(1); + rtl8225_write_phy_cck(dev, 0x43, 0x18); msleep(1); + rtl8225_write_phy_cck(dev, 0x44, 0x1f); msleep(1); + rtl8225_write_phy_cck(dev, 0x45, 0x1e); msleep(1); + rtl8225_write_phy_cck(dev, 0x46, 0x1a); msleep(1); + rtl8225_write_phy_cck(dev, 0x47, 0x15); msleep(1); + rtl8225_write_phy_cck(dev, 0x48, 0x10); msleep(1); + rtl8225_write_phy_cck(dev, 0x49, 0x0a); msleep(1); + rtl8225_write_phy_cck(dev, 0x4a, 0x05); msleep(1); + rtl8225_write_phy_cck(dev, 0x4b, 0x02); msleep(1); + rtl8225_write_phy_cck(dev, 0x4c, 0x05); msleep(1); + + rtl818x_iowrite8(priv, &priv->map->TESTR, 0x0D); msleep(1); + + rtl8225_rf_set_tx_power(dev, 1); + + /* RX antenna default to A */ + rtl8225_write_phy_cck(dev, 0x10, 0x9b); msleep(1); /* B: 0xDB */ + rtl8225_write_phy_ofdm(dev, 0x26, 0x90); msleep(1); /* B: 0x10 */ + + rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03); /* B: 0x00 */ + msleep(1); + rtl818x_iowrite32(priv, (__le32 __iomem *)((void __iomem *)priv->map + 0x94), 0x15c00002); + rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF); + + rtl8225_write(dev, 0x0c, 0x50); + /* set OFDM initial gain */ + rtl8225_write_phy_ofdm(dev, 0x0d, rtl8225_gain[4 * 4]); + rtl8225_write_phy_ofdm(dev, 0x23, rtl8225_gain[4 * 4 + 1]); + rtl8225_write_phy_ofdm(dev, 0x1b, rtl8225_gain[4 * 4 + 2]); + rtl8225_write_phy_ofdm(dev, 0x1d, rtl8225_gain[4 * 4 + 3]); + /* set CCK threshold */ + rtl8225_write_phy_cck(dev, 0x41, rtl8225_threshold[0]); +} + +static const u8 rtl8225z2_tx_power_cck_ch14[] = { + 0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00 +}; + +static const u8 rtl8225z2_tx_power_cck_B[] = { + 0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x04 +}; + +static const u8 rtl8225z2_tx_power_cck_A[] = { + 0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04 +}; + +static const u8 rtl8225z2_tx_power_cck[] = { + 0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04 +}; + +static void rtl8225z2_rf_set_tx_power(struct ieee80211_hw *dev, int channel) +{ + struct rtl8180_priv *priv = dev->priv; + u8 cck_power, ofdm_power; + const u8 *tmp; + int i; + + cck_power = priv->channels[channel - 1].hw_value & 0xFF; + ofdm_power = priv->channels[channel - 1].hw_value >> 8; + + if (channel == 14) + tmp = rtl8225z2_tx_power_cck_ch14; + else if (cck_power == 12) + tmp = rtl8225z2_tx_power_cck_B; + else if (cck_power == 13) + tmp = rtl8225z2_tx_power_cck_A; + else + tmp = rtl8225z2_tx_power_cck; + + for (i = 0; i < 8; i++) + rtl8225_write_phy_cck(dev, 0x44 + i, *tmp++); + + cck_power = min(cck_power, (u8)35); + if (cck_power == 13 || cck_power == 14) + cck_power = 12; + if (cck_power >= 15) + cck_power -= 2; + + rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK, cck_power); + rtl818x_ioread8(priv, &priv->map->TX_GAIN_CCK); + msleep(1); + + ofdm_power = min(ofdm_power, (u8)35); + rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM, ofdm_power); + + rtl8225_write_phy_ofdm(dev, 2, 0x62); + rtl8225_write_phy_ofdm(dev, 5, 0x00); + rtl8225_write_phy_ofdm(dev, 6, 0x40); + rtl8225_write_phy_ofdm(dev, 7, 0x00); + rtl8225_write_phy_ofdm(dev, 8, 0x40); + + msleep(1); +} + +static const u16 rtl8225z2_rxgain[] = { + 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0008, 0x0009, + 0x000a, 0x000b, 0x0102, 0x0103, 0x0104, 0x0105, 0x0140, 0x0141, + 0x0142, 0x0143, 0x0144, 0x0145, 0x0180, 0x0181, 0x0182, 0x0183, + 0x0184, 0x0185, 0x0188, 0x0189, 0x018a, 0x018b, 0x0243, 0x0244, + 0x0245, 0x0280, 0x0281, 0x0282, 0x0283, 0x0284, 0x0285, 0x0288, + 0x0289, 0x028a, 0x028b, 0x028c, 0x0342, 0x0343, 0x0344, 0x0345, + 0x0380, 0x0381, 0x0382, 0x0383, 0x0384, 0x0385, 0x0388, 0x0389, + 0x038a, 0x038b, 0x038c, 0x038d, 0x0390, 0x0391, 0x0392, 0x0393, + 0x0394, 0x0395, 0x0398, 0x0399, 0x039a, 0x039b, 0x039c, 0x039d, + 0x03a0, 0x03a1, 0x03a2, 0x03a3, 0x03a4, 0x03a5, 0x03a8, 0x03a9, + 0x03aa, 0x03ab, 0x03ac, 0x03ad, 0x03b0, 0x03b1, 0x03b2, 0x03b3, + 0x03b4, 0x03b5, 0x03b8, 0x03b9, 0x03ba, 0x03bb, 0x03bb +}; + +static void rtl8225z2_rf_init(struct ieee80211_hw *dev) +{ + struct rtl8180_priv *priv = dev->priv; + int i; + + rtl8180_set_anaparam(priv, RTL8225_ANAPARAM_ON); + + /* host_pci_init */ + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x0480); + rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF); + rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0x0488); + rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0); + rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); + msleep(200); /* FIXME: ehh?? */ + rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0xFF & ~(1 << 6)); + + rtl818x_iowrite32(priv, &priv->map->RF_TIMING, 0x00088008); + + /* TODO: check if we need really to change BRSR to do RF config */ + rtl818x_ioread16(priv, &priv->map->BRSR); + rtl818x_iowrite16(priv, &priv->map->BRSR, 0xFFFF); + rtl818x_iowrite32(priv, &priv->map->RF_PARA, 0x00100044); + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); + rtl818x_iowrite8(priv, &priv->map->CONFIG3, 0x44); + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); + + rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF); + + rtl8225_write(dev, 0x0, 0x0B7); msleep(1); + rtl8225_write(dev, 0x1, 0xEE0); msleep(1); + rtl8225_write(dev, 0x2, 0x44D); msleep(1); + rtl8225_write(dev, 0x3, 0x441); msleep(1); + rtl8225_write(dev, 0x4, 0x8C3); msleep(1); + rtl8225_write(dev, 0x5, 0xC72); msleep(1); + rtl8225_write(dev, 0x6, 0x0E6); msleep(1); + rtl8225_write(dev, 0x7, 0x82A); msleep(1); + rtl8225_write(dev, 0x8, 0x03F); msleep(1); + rtl8225_write(dev, 0x9, 0x335); msleep(1); + rtl8225_write(dev, 0xa, 0x9D4); msleep(1); + rtl8225_write(dev, 0xb, 0x7BB); msleep(1); + rtl8225_write(dev, 0xc, 0x850); msleep(1); + rtl8225_write(dev, 0xd, 0xCDF); msleep(1); + rtl8225_write(dev, 0xe, 0x02B); msleep(1); + rtl8225_write(dev, 0xf, 0x114); msleep(100); + + if (!(rtl8225_read(dev, 6) & (1 << 7))) { + rtl8225_write(dev, 0x02, 0x0C4D); + msleep(200); + rtl8225_write(dev, 0x02, 0x044D); + msleep(100); + /* TODO: readd calibration failure message when the calibration + check works */ + } + + rtl8225_write(dev, 0x0, 0x1B7); + rtl8225_write(dev, 0x3, 0x002); + rtl8225_write(dev, 0x5, 0x004); + + for (i = 0; i < ARRAY_SIZE(rtl8225z2_rxgain); i++) { + rtl8225_write(dev, 0x1, i + 1); + rtl8225_write(dev, 0x2, rtl8225z2_rxgain[i]); + } + + rtl8225_write(dev, 0x0, 0x0B7); msleep(100); + rtl8225_write(dev, 0x2, 0xC4D); + + msleep(200); + rtl8225_write(dev, 0x2, 0x44D); + msleep(100); + + rtl8225_write(dev, 0x00, 0x2BF); + rtl8225_write(dev, 0xFF, 0xFFFF); + + rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF); + + for (i = 0; i < ARRAY_SIZE(rtl8225_agc); i++) { + rtl8225_write_phy_ofdm(dev, 0xB, rtl8225_agc[i]); + msleep(1); + rtl8225_write_phy_ofdm(dev, 0xA, 0x80 + i); + msleep(1); + } + + msleep(1); + + rtl8225_write_phy_ofdm(dev, 0x00, 0x01); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x01, 0x02); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x02, 0x62); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x03, 0x00); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x04, 0x00); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x05, 0x00); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x06, 0x40); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x07, 0x00); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x08, 0x40); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x09, 0xfe); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x0a, 0x09); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x18, 0xef); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x0b, 0x80); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x0c, 0x01); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x0d, 0x43); + rtl8225_write_phy_ofdm(dev, 0x0e, 0xd3); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x0f, 0x38); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x10, 0x84); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x11, 0x06); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x12, 0x20); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x13, 0x20); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x14, 0x00); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x15, 0x40); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x16, 0x00); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x17, 0x40); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x18, 0xef); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x19, 0x19); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x1a, 0x20); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x1b, 0x11); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x1c, 0x04); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x1d, 0xc5); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x1e, 0xb3); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x1f, 0x75); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x20, 0x1f); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x21, 0x27); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x22, 0x16); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x23, 0x80); msleep(1); /* FIXME: not needed? */ + rtl8225_write_phy_ofdm(dev, 0x24, 0x46); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x25, 0x20); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x26, 0x90); msleep(1); + rtl8225_write_phy_ofdm(dev, 0x27, 0x88); msleep(1); + + rtl8225_write_phy_cck(dev, 0x00, 0x98); msleep(1); + rtl8225_write_phy_cck(dev, 0x03, 0x20); msleep(1); + rtl8225_write_phy_cck(dev, 0x04, 0x7e); msleep(1); + rtl8225_write_phy_cck(dev, 0x05, 0x12); msleep(1); + rtl8225_write_phy_cck(dev, 0x06, 0xfc); msleep(1); + rtl8225_write_phy_cck(dev, 0x07, 0x78); msleep(1); + rtl8225_write_phy_cck(dev, 0x08, 0x2e); msleep(1); + rtl8225_write_phy_cck(dev, 0x10, 0x93); msleep(1); + rtl8225_write_phy_cck(dev, 0x11, 0x88); msleep(1); + rtl8225_write_phy_cck(dev, 0x12, 0x47); msleep(1); + rtl8225_write_phy_cck(dev, 0x13, 0xd0); + rtl8225_write_phy_cck(dev, 0x19, 0x00); + rtl8225_write_phy_cck(dev, 0x1a, 0xa0); + rtl8225_write_phy_cck(dev, 0x1b, 0x08); + rtl8225_write_phy_cck(dev, 0x40, 0x86); + rtl8225_write_phy_cck(dev, 0x41, 0x8a); msleep(1); + rtl8225_write_phy_cck(dev, 0x42, 0x15); msleep(1); + rtl8225_write_phy_cck(dev, 0x43, 0x18); msleep(1); + rtl8225_write_phy_cck(dev, 0x44, 0x36); msleep(1); + rtl8225_write_phy_cck(dev, 0x45, 0x35); msleep(1); + rtl8225_write_phy_cck(dev, 0x46, 0x2e); msleep(1); + rtl8225_write_phy_cck(dev, 0x47, 0x25); msleep(1); + rtl8225_write_phy_cck(dev, 0x48, 0x1c); msleep(1); + rtl8225_write_phy_cck(dev, 0x49, 0x12); msleep(1); + rtl8225_write_phy_cck(dev, 0x4a, 0x09); msleep(1); + rtl8225_write_phy_cck(dev, 0x4b, 0x04); msleep(1); + rtl8225_write_phy_cck(dev, 0x4c, 0x05); msleep(1); + + rtl818x_iowrite8(priv, (u8 __iomem *)((void __iomem *)priv->map + 0x5B), 0x0D); msleep(1); + + rtl8225z2_rf_set_tx_power(dev, 1); + + /* RX antenna default to A */ + rtl8225_write_phy_cck(dev, 0x10, 0x9b); msleep(1); /* B: 0xDB */ + rtl8225_write_phy_ofdm(dev, 0x26, 0x90); msleep(1); /* B: 0x10 */ + + rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03); /* B: 0x00 */ + msleep(1); + rtl818x_iowrite32(priv, (__le32 __iomem *)((void __iomem *)priv->map + 0x94), 0x15c00002); + rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF); +} + +static void rtl8225_rf_stop(struct ieee80211_hw *dev) +{ + struct rtl8180_priv *priv = dev->priv; + u8 reg; + + rtl8225_write(dev, 0x4, 0x1f); msleep(1); + + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); + reg = rtl818x_ioread8(priv, &priv->map->CONFIG3); + rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg | RTL818X_CONFIG3_ANAPARAM_WRITE); + rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, RTL8225_ANAPARAM2_OFF); + rtl818x_iowrite32(priv, &priv->map->ANAPARAM, RTL8225_ANAPARAM_OFF); + rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE); + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); +} + +static void rtl8225_rf_set_channel(struct ieee80211_hw *dev, + struct ieee80211_conf *conf) +{ + struct rtl8180_priv *priv = dev->priv; + int chan = ieee80211_frequency_to_channel(conf->channel->center_freq); + + if (priv->rf->init == rtl8225_rf_init) + rtl8225_rf_set_tx_power(dev, chan); + else + rtl8225z2_rf_set_tx_power(dev, chan); + + rtl8225_write(dev, 0x7, rtl8225_chan[chan - 1]); + msleep(10); +} + +static void rtl8225_rf_conf_erp(struct ieee80211_hw *dev, + struct ieee80211_bss_conf *info) +{ + struct rtl8180_priv *priv = dev->priv; + + if (info->use_short_slot) { + rtl818x_iowrite8(priv, &priv->map->SLOT, 0x9); + rtl818x_iowrite8(priv, &priv->map->SIFS, 0x22); + rtl818x_iowrite8(priv, &priv->map->DIFS, 0x14); + rtl818x_iowrite8(priv, &priv->map->EIFS, 81); + rtl818x_iowrite8(priv, &priv->map->CW_VAL, 0x73); + } else { + rtl818x_iowrite8(priv, &priv->map->SLOT, 0x14); + rtl818x_iowrite8(priv, &priv->map->SIFS, 0x44); + rtl818x_iowrite8(priv, &priv->map->DIFS, 0x24); + rtl818x_iowrite8(priv, &priv->map->EIFS, 81); + rtl818x_iowrite8(priv, &priv->map->CW_VAL, 0xa5); + } +} + +static const struct rtl818x_rf_ops rtl8225_ops = { + .name = "rtl8225", + .init = rtl8225_rf_init, + .stop = rtl8225_rf_stop, + .set_chan = rtl8225_rf_set_channel, + .conf_erp = rtl8225_rf_conf_erp, +}; + +static const struct rtl818x_rf_ops rtl8225z2_ops = { + .name = "rtl8225z2", + .init = rtl8225z2_rf_init, + .stop = rtl8225_rf_stop, + .set_chan = rtl8225_rf_set_channel, + .conf_erp = rtl8225_rf_conf_erp, +}; + +const struct rtl818x_rf_ops * rtl8180_detect_rf(struct ieee80211_hw *dev) +{ + struct rtl8180_priv *priv = dev->priv; + u16 reg8, reg9; + + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x0480); + rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0x0488); + rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF); + rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); + msleep(100); + + rtl8225_write(dev, 0, 0x1B7); + + reg8 = rtl8225_read(dev, 8); + reg9 = rtl8225_read(dev, 9); + + rtl8225_write(dev, 0, 0x0B7); + + if (reg8 != 0x588 || reg9 != 0x700) + return &rtl8225_ops; + + return &rtl8225z2_ops; +} diff --git a/drivers/net/wireless/rtl818x/rtl8180_rtl8225.h b/drivers/net/wireless/rtl818x/rtl8180_rtl8225.h new file mode 100644 index 0000000..310013a --- /dev/null +++ b/drivers/net/wireless/rtl818x/rtl8180_rtl8225.h @@ -0,0 +1,23 @@ +#ifndef RTL8180_RTL8225_H +#define RTL8180_RTL8225_H + +#define RTL8225_ANAPARAM_ON 0xa0000b59 +#define RTL8225_ANAPARAM2_ON 0x860dec11 +#define RTL8225_ANAPARAM_OFF 0xa00beb59 +#define RTL8225_ANAPARAM2_OFF 0x840dec11 + +const struct rtl818x_rf_ops * rtl8180_detect_rf(struct ieee80211_hw *); + +static inline void rtl8225_write_phy_ofdm(struct ieee80211_hw *dev, + u8 addr, u8 data) +{ + rtl8180_write_phy(dev, addr, data); +} + +static inline void rtl8225_write_phy_cck(struct ieee80211_hw *dev, + u8 addr, u8 data) +{ + rtl8180_write_phy(dev, addr, data | 0x10000); +} + +#endif /* RTL8180_RTL8225_H */ diff --git a/drivers/net/wireless/rtl818x/rtl8180_sa2400.c b/drivers/net/wireless/rtl818x/rtl8180_sa2400.c new file mode 100644 index 0000000..cea4e0c --- /dev/null +++ b/drivers/net/wireless/rtl818x/rtl8180_sa2400.c @@ -0,0 +1,202 @@ + +/* + * Radio tuning for Philips SA2400 on RTL8180 + * + * Copyright 2007 Andrea Merello + * + * Code from the BSD driver and the rtl8181 project have been + * very useful to understand certain things + * + * I want to thanks the Authors of such projects and the Ndiswrapper + * project Authors. + * + * A special Big Thanks also is for all people who donated me cards, + * making possible the creation of the original rtl8180 driver + * from which this code is derived! + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include + +#include "rtl8180.h" +#include "rtl8180_sa2400.h" + +static const u32 sa2400_chan[] = { + 0x00096c, /* ch1 */ + 0x080970, + 0x100974, + 0x180978, + 0x000980, + 0x080984, + 0x100988, + 0x18098c, + 0x000994, + 0x080998, + 0x10099c, + 0x1809a0, + 0x0009a8, + 0x0009b4, /* ch 14 */ +}; + +static void write_sa2400(struct ieee80211_hw *dev, u8 addr, u32 data) +{ + struct rtl8180_priv *priv = dev->priv; + u32 phy_config; + + /* MAC will bang bits to the sa2400. sw 3-wire is NOT used */ + phy_config = 0xb0000000; + + phy_config |= ((u32)(addr & 0xf)) << 24; + phy_config |= data & 0xffffff; + + rtl818x_iowrite32(priv, + (__le32 __iomem *) &priv->map->RFPinsOutput, phy_config); + + msleep(3); +} + +static void sa2400_write_phy_antenna(struct ieee80211_hw *dev, short chan) +{ + struct rtl8180_priv *priv = dev->priv; + u8 ant = SA2400_ANTENNA; + + if (priv->rfparam & RF_PARAM_ANTBDEFAULT) + ant |= BB_ANTENNA_B; + + if (chan == 14) + ant |= BB_ANTATTEN_CHAN14; + + rtl8180_write_phy(dev, 0x10, ant); + +} + +static void sa2400_rf_set_channel(struct ieee80211_hw *dev, + struct ieee80211_conf *conf) +{ + struct rtl8180_priv *priv = dev->priv; + int channel = ieee80211_frequency_to_channel(conf->channel->center_freq); + u32 txpw = priv->channels[channel - 1].hw_value & 0xFF; + u32 chan = sa2400_chan[channel - 1]; + + write_sa2400(dev, 7, txpw); + + sa2400_write_phy_antenna(dev, channel); + + write_sa2400(dev, 0, chan); + write_sa2400(dev, 1, 0xbb50); + write_sa2400(dev, 2, 0x80); + write_sa2400(dev, 3, 0); +} + +static void sa2400_rf_stop(struct ieee80211_hw *dev) +{ + write_sa2400(dev, 4, 0); +} + +static void sa2400_rf_init(struct ieee80211_hw *dev) +{ + struct rtl8180_priv *priv = dev->priv; + u32 anaparam, txconf; + u8 firdac; + int analogphy = priv->rfparam & RF_PARAM_ANALOGPHY; + + anaparam = priv->anaparam; + anaparam &= ~(1 << ANAPARAM_TXDACOFF_SHIFT); + anaparam &= ~ANAPARAM_PWR1_MASK; + anaparam &= ~ANAPARAM_PWR0_MASK; + + if (analogphy) { + anaparam |= SA2400_ANA_ANAPARAM_PWR1_ON << ANAPARAM_PWR1_SHIFT; + firdac = 0; + } else { + anaparam |= (SA2400_DIG_ANAPARAM_PWR1_ON << ANAPARAM_PWR1_SHIFT); + anaparam |= (SA2400_ANAPARAM_PWR0_ON << ANAPARAM_PWR0_SHIFT); + firdac = 1 << SA2400_REG4_FIRDAC_SHIFT; + } + + rtl8180_set_anaparam(priv, anaparam); + + write_sa2400(dev, 0, sa2400_chan[0]); + write_sa2400(dev, 1, 0xbb50); + write_sa2400(dev, 2, 0x80); + write_sa2400(dev, 3, 0); + write_sa2400(dev, 4, 0x19340 | firdac); + write_sa2400(dev, 5, 0x1dfb | (SA2400_MAX_SENS - 54) << 15); + write_sa2400(dev, 4, 0x19348 | firdac); /* calibrate VCO */ + + if (!analogphy) + write_sa2400(dev, 4, 0x1938c); /*???*/ + + write_sa2400(dev, 4, 0x19340 | firdac); + + write_sa2400(dev, 0, sa2400_chan[0]); + write_sa2400(dev, 1, 0xbb50); + write_sa2400(dev, 2, 0x80); + write_sa2400(dev, 3, 0); + write_sa2400(dev, 4, 0x19344 | firdac); /* calibrate filter */ + + /* new from rtl8180 embedded driver (rtl8181 project) */ + write_sa2400(dev, 6, 0x13ff | (1 << 23)); /* MANRX */ + write_sa2400(dev, 8, 0); /* VCO */ + + if (analogphy) { + rtl8180_set_anaparam(priv, anaparam | + (1 << ANAPARAM_TXDACOFF_SHIFT)); + + txconf = rtl818x_ioread32(priv, &priv->map->TX_CONF); + rtl818x_iowrite32(priv, &priv->map->TX_CONF, + txconf | RTL818X_TX_CONF_LOOPBACK_CONT); + + write_sa2400(dev, 4, 0x19341); /* calibrates DC */ + + /* a 5us sleep is required here, + * we rely on the 3ms delay introduced in write_sa2400 */ + write_sa2400(dev, 4, 0x19345); + + /* a 20us sleep is required here, + * we rely on the 3ms delay introduced in write_sa2400 */ + + rtl818x_iowrite32(priv, &priv->map->TX_CONF, txconf); + + rtl8180_set_anaparam(priv, anaparam); + } + /* end new code */ + + write_sa2400(dev, 4, 0x19341 | firdac); /* RTX MODE */ + + /* baseband configuration */ + rtl8180_write_phy(dev, 0, 0x98); + rtl8180_write_phy(dev, 3, 0x38); + rtl8180_write_phy(dev, 4, 0xe0); + rtl8180_write_phy(dev, 5, 0x90); + rtl8180_write_phy(dev, 6, 0x1a); + rtl8180_write_phy(dev, 7, 0x64); + + sa2400_write_phy_antenna(dev, 1); + + rtl8180_write_phy(dev, 0x11, 0x80); + + if (rtl818x_ioread8(priv, &priv->map->CONFIG2) & + RTL818X_CONFIG2_ANTENNA_DIV) + rtl8180_write_phy(dev, 0x12, 0xc7); /* enable ant diversity */ + else + rtl8180_write_phy(dev, 0x12, 0x47); /* disable ant diversity */ + + rtl8180_write_phy(dev, 0x13, 0x90 | priv->csthreshold); + + rtl8180_write_phy(dev, 0x19, 0x0); + rtl8180_write_phy(dev, 0x1a, 0xa0); +} + +const struct rtl818x_rf_ops sa2400_rf_ops = { + .name = "Philips", + .init = sa2400_rf_init, + .stop = sa2400_rf_stop, + .set_chan = sa2400_rf_set_channel +}; diff --git a/drivers/net/wireless/rtl818x/rtl8180_sa2400.h b/drivers/net/wireless/rtl818x/rtl8180_sa2400.h new file mode 100644 index 0000000..a4aaa0d --- /dev/null +++ b/drivers/net/wireless/rtl818x/rtl8180_sa2400.h @@ -0,0 +1,36 @@ +#ifndef RTL8180_SA2400_H +#define RTL8180_SA2400_H + +/* + * Radio tuning for Philips SA2400 on RTL8180 + * + * Copyright 2007 Andrea Merello + * + * Code from the BSD driver and the rtl8181 project have been + * very useful to understand certain things + * + * I want to thanks the Authors of such projects and the Ndiswrapper + * project Authors. + * + * A special Big Thanks also is for all people who donated me cards, + * making possible the creation of the original rtl8180 driver + * from which this code is derived! + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define SA2400_ANTENNA 0x91 +#define SA2400_DIG_ANAPARAM_PWR1_ON 0x8 +#define SA2400_ANA_ANAPARAM_PWR1_ON 0x28 +#define SA2400_ANAPARAM_PWR0_ON 0x3 + +/* RX sensitivity in dbm */ +#define SA2400_MAX_SENS 85 + +#define SA2400_REG4_FIRDAC_SHIFT 7 + +extern const struct rtl818x_rf_ops sa2400_rf_ops; + +#endif /* RTL8180_SA2400_H */ diff --git a/drivers/net/wireless/rtl818x/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187.h new file mode 100644 index 0000000..6bb3211 --- /dev/null +++ b/drivers/net/wireless/rtl818x/rtl8187.h @@ -0,0 +1,271 @@ +/* + * Definitions for RTL8187 hardware + * + * Copyright 2007 Michael Wu + * Copyright 2007 Andrea Merello + * + * Based on the r8187 driver, which is: + * Copyright 2005 Andrea Merello , et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef RTL8187_H +#define RTL8187_H + +#include "rtl818x.h" +#include "rtl8187_leds.h" + +#define RTL8187_EEPROM_TXPWR_BASE 0x05 +#define RTL8187_EEPROM_MAC_ADDR 0x07 +#define RTL8187_EEPROM_TXPWR_CHAN_1 0x16 /* 3 channels */ +#define RTL8187_EEPROM_TXPWR_CHAN_6 0x1B /* 2 channels */ +#define RTL8187_EEPROM_TXPWR_CHAN_4 0x3D /* 2 channels */ +#define RTL8187_EEPROM_SELECT_GPIO 0x3B + +#define RTL8187_REQT_READ 0xC0 +#define RTL8187_REQT_WRITE 0x40 +#define RTL8187_REQ_GET_REG 0x05 +#define RTL8187_REQ_SET_REG 0x05 + +#define RTL8187_MAX_RX 0x9C4 + +#define RFKILL_MASK_8187_89_97 0x2 +#define RFKILL_MASK_8198 0x4 + +struct rtl8187_rx_info { + struct urb *urb; + struct ieee80211_hw *dev; +}; + +struct rtl8187_rx_hdr { + __le32 flags; + u8 noise; + u8 signal; + u8 agc; + u8 reserved; + __le64 mac_time; +} __attribute__((packed)); + +struct rtl8187b_rx_hdr { + __le32 flags; + __le64 mac_time; + u8 sq; + u8 rssi; + u8 agc; + u8 flags2; + __le16 snr_long2end; + s8 pwdb_g12; + u8 fot; +} __attribute__((packed)); + +/* {rtl8187,rtl8187b}_tx_info is in skb */ + +struct rtl8187_tx_hdr { + __le32 flags; + __le16 rts_duration; + __le16 len; + __le32 retry; +} __attribute__((packed)); + +struct rtl8187b_tx_hdr { + __le32 flags; + __le16 rts_duration; + __le16 len; + __le32 unused_1; + __le16 unused_2; + __le16 tx_duration; + __le32 unused_3; + __le32 retry; + __le32 unused_4[2]; +} __attribute__((packed)); + +enum { + DEVICE_RTL8187, + DEVICE_RTL8187B +}; + +struct rtl8187_priv { + /* common between rtl818x drivers */ + struct rtl818x_csr *map; + const struct rtl818x_rf_ops *rf; + struct ieee80211_vif *vif; + + /* The mutex protects the TX loopback state. + * Any attempt to set channels concurrently locks the device. + */ + struct mutex conf_mutex; + + /* rtl8187 specific */ + struct ieee80211_channel channels[14]; + struct ieee80211_rate rates[12]; + struct ieee80211_supported_band band; + struct usb_device *udev; + u32 rx_conf; + struct usb_anchor anchored; + struct delayed_work work; + struct ieee80211_hw *dev; +#ifdef CONFIG_RTL8187_LEDS + struct rtl8187_led led_radio; + struct rtl8187_led led_tx; + struct rtl8187_led led_rx; + struct delayed_work led_on; + struct delayed_work led_off; +#endif + u16 txpwr_base; + u8 asic_rev; + u8 is_rtl8187b; + enum { + RTL8187BvB, + RTL8187BvD, + RTL8187BvE + } hw_rev; + struct sk_buff_head rx_queue; + u8 signal; + u8 noise; + u8 slot_time; + u8 aifsn[4]; + u8 rfkill_mask; + struct { + __le64 buf; + struct sk_buff_head queue; + } b_tx_status; /* This queue is used by both -b and non-b devices */ + struct mutex io_mutex; + union { + u8 bits8; + __le16 bits16; + __le32 bits32; + } *io_dmabuf; + bool rfkill_off; +}; + +void rtl8187_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data); + +static inline u8 rtl818x_ioread8_idx(struct rtl8187_priv *priv, + u8 *addr, u8 idx) +{ + u8 val; + + mutex_lock(&priv->io_mutex); + usb_control_msg(priv->udev, usb_rcvctrlpipe(priv->udev, 0), + RTL8187_REQ_GET_REG, RTL8187_REQT_READ, + (unsigned long)addr, idx & 0x03, + &priv->io_dmabuf->bits8, sizeof(val), HZ / 2); + + val = priv->io_dmabuf->bits8; + mutex_unlock(&priv->io_mutex); + + return val; +} + +static inline u8 rtl818x_ioread8(struct rtl8187_priv *priv, u8 *addr) +{ + return rtl818x_ioread8_idx(priv, addr, 0); +} + +static inline u16 rtl818x_ioread16_idx(struct rtl8187_priv *priv, + __le16 *addr, u8 idx) +{ + __le16 val; + + mutex_lock(&priv->io_mutex); + usb_control_msg(priv->udev, usb_rcvctrlpipe(priv->udev, 0), + RTL8187_REQ_GET_REG, RTL8187_REQT_READ, + (unsigned long)addr, idx & 0x03, + &priv->io_dmabuf->bits16, sizeof(val), HZ / 2); + + val = priv->io_dmabuf->bits16; + mutex_unlock(&priv->io_mutex); + + return le16_to_cpu(val); +} + +static inline u16 rtl818x_ioread16(struct rtl8187_priv *priv, __le16 *addr) +{ + return rtl818x_ioread16_idx(priv, addr, 0); +} + +static inline u32 rtl818x_ioread32_idx(struct rtl8187_priv *priv, + __le32 *addr, u8 idx) +{ + __le32 val; + + mutex_lock(&priv->io_mutex); + usb_control_msg(priv->udev, usb_rcvctrlpipe(priv->udev, 0), + RTL8187_REQ_GET_REG, RTL8187_REQT_READ, + (unsigned long)addr, idx & 0x03, + &priv->io_dmabuf->bits32, sizeof(val), HZ / 2); + + val = priv->io_dmabuf->bits32; + mutex_unlock(&priv->io_mutex); + + return le32_to_cpu(val); +} + +static inline u32 rtl818x_ioread32(struct rtl8187_priv *priv, __le32 *addr) +{ + return rtl818x_ioread32_idx(priv, addr, 0); +} + +static inline void rtl818x_iowrite8_idx(struct rtl8187_priv *priv, + u8 *addr, u8 val, u8 idx) +{ + mutex_lock(&priv->io_mutex); + + priv->io_dmabuf->bits8 = val; + usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0), + RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE, + (unsigned long)addr, idx & 0x03, + &priv->io_dmabuf->bits8, sizeof(val), HZ / 2); + + mutex_unlock(&priv->io_mutex); +} + +static inline void rtl818x_iowrite8(struct rtl8187_priv *priv, u8 *addr, u8 val) +{ + rtl818x_iowrite8_idx(priv, addr, val, 0); +} + +static inline void rtl818x_iowrite16_idx(struct rtl8187_priv *priv, + __le16 *addr, u16 val, u8 idx) +{ + mutex_lock(&priv->io_mutex); + + priv->io_dmabuf->bits16 = cpu_to_le16(val); + usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0), + RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE, + (unsigned long)addr, idx & 0x03, + &priv->io_dmabuf->bits16, sizeof(val), HZ / 2); + + mutex_unlock(&priv->io_mutex); +} + +static inline void rtl818x_iowrite16(struct rtl8187_priv *priv, __le16 *addr, + u16 val) +{ + rtl818x_iowrite16_idx(priv, addr, val, 0); +} + +static inline void rtl818x_iowrite32_idx(struct rtl8187_priv *priv, + __le32 *addr, u32 val, u8 idx) +{ + mutex_lock(&priv->io_mutex); + + priv->io_dmabuf->bits32 = cpu_to_le32(val); + usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0), + RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE, + (unsigned long)addr, idx & 0x03, + &priv->io_dmabuf->bits32, sizeof(val), HZ / 2); + + mutex_unlock(&priv->io_mutex); +} + +static inline void rtl818x_iowrite32(struct rtl8187_priv *priv, __le32 *addr, + u32 val) +{ + rtl818x_iowrite32_idx(priv, addr, val, 0); +} + +#endif /* RTL8187_H */ diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c new file mode 100644 index 0000000..0fb850e --- /dev/null +++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c @@ -0,0 +1,1588 @@ +/* + * Linux device driver for RTL8187 + * + * Copyright 2007 Michael Wu + * Copyright 2007 Andrea Merello + * + * Based on the r8187 driver, which is: + * Copyright 2005 Andrea Merello , et al. + * + * The driver was extended to the RTL8187B in 2008 by: + * Herton Ronaldo Krzesinski + * Hin-Tak Leung + * Larry Finger + * + * Magic delays and register offsets below are taken from the original + * r8187 driver sources. Thanks to Realtek for their support! + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include + +#include "rtl8187.h" +#include "rtl8187_rtl8225.h" +#ifdef CONFIG_RTL8187_LEDS +#include "rtl8187_leds.h" +#endif +#include "rtl8187_rfkill.h" + +MODULE_AUTHOR("Michael Wu "); +MODULE_AUTHOR("Andrea Merello "); +MODULE_AUTHOR("Herton Ronaldo Krzesinski "); +MODULE_AUTHOR("Hin-Tak Leung "); +MODULE_AUTHOR("Larry Finger "); +MODULE_DESCRIPTION("RTL8187/RTL8187B USB wireless driver"); +MODULE_LICENSE("GPL"); + +static struct usb_device_id rtl8187_table[] __devinitdata = { + /* Asus */ + {USB_DEVICE(0x0b05, 0x171d), .driver_info = DEVICE_RTL8187}, + /* Belkin */ + {USB_DEVICE(0x050d, 0x705e), .driver_info = DEVICE_RTL8187B}, + /* Realtek */ + {USB_DEVICE(0x0bda, 0x8187), .driver_info = DEVICE_RTL8187}, + {USB_DEVICE(0x0bda, 0x8189), .driver_info = DEVICE_RTL8187B}, + {USB_DEVICE(0x0bda, 0x8197), .driver_info = DEVICE_RTL8187B}, + {USB_DEVICE(0x0bda, 0x8198), .driver_info = DEVICE_RTL8187B}, + /* Surecom */ + {USB_DEVICE(0x0769, 0x11F2), .driver_info = DEVICE_RTL8187}, + /* Logitech */ + {USB_DEVICE(0x0789, 0x010C), .driver_info = DEVICE_RTL8187}, + /* Netgear */ + {USB_DEVICE(0x0846, 0x6100), .driver_info = DEVICE_RTL8187}, + {USB_DEVICE(0x0846, 0x6a00), .driver_info = DEVICE_RTL8187}, + {USB_DEVICE(0x0846, 0x4260), .driver_info = DEVICE_RTL8187B}, + /* HP */ + {USB_DEVICE(0x03f0, 0xca02), .driver_info = DEVICE_RTL8187}, + /* Sitecom */ + {USB_DEVICE(0x0df6, 0x000d), .driver_info = DEVICE_RTL8187}, + {USB_DEVICE(0x0df6, 0x0028), .driver_info = DEVICE_RTL8187B}, + {USB_DEVICE(0x0df6, 0x0029), .driver_info = DEVICE_RTL8187B}, + /* Sphairon Access Systems GmbH */ + {USB_DEVICE(0x114B, 0x0150), .driver_info = DEVICE_RTL8187}, + /* Dick Smith Electronics */ + {USB_DEVICE(0x1371, 0x9401), .driver_info = DEVICE_RTL8187}, + /* Abocom */ + {USB_DEVICE(0x13d1, 0xabe6), .driver_info = DEVICE_RTL8187}, + /* Qcom */ + {USB_DEVICE(0x18E8, 0x6232), .driver_info = DEVICE_RTL8187}, + /* AirLive */ + {USB_DEVICE(0x1b75, 0x8187), .driver_info = DEVICE_RTL8187}, + /* Linksys */ + {USB_DEVICE(0x1737, 0x0073), .driver_info = DEVICE_RTL8187B}, + {} +}; + +MODULE_DEVICE_TABLE(usb, rtl8187_table); + +static const struct ieee80211_rate rtl818x_rates[] = { + { .bitrate = 10, .hw_value = 0, }, + { .bitrate = 20, .hw_value = 1, }, + { .bitrate = 55, .hw_value = 2, }, + { .bitrate = 110, .hw_value = 3, }, + { .bitrate = 60, .hw_value = 4, }, + { .bitrate = 90, .hw_value = 5, }, + { .bitrate = 120, .hw_value = 6, }, + { .bitrate = 180, .hw_value = 7, }, + { .bitrate = 240, .hw_value = 8, }, + { .bitrate = 360, .hw_value = 9, }, + { .bitrate = 480, .hw_value = 10, }, + { .bitrate = 540, .hw_value = 11, }, +}; + +static const struct ieee80211_channel rtl818x_channels[] = { + { .center_freq = 2412 }, + { .center_freq = 2417 }, + { .center_freq = 2422 }, + { .center_freq = 2427 }, + { .center_freq = 2432 }, + { .center_freq = 2437 }, + { .center_freq = 2442 }, + { .center_freq = 2447 }, + { .center_freq = 2452 }, + { .center_freq = 2457 }, + { .center_freq = 2462 }, + { .center_freq = 2467 }, + { .center_freq = 2472 }, + { .center_freq = 2484 }, +}; + +static void rtl8187_iowrite_async_cb(struct urb *urb) +{ + kfree(urb->context); +} + +static void rtl8187_iowrite_async(struct rtl8187_priv *priv, __le16 addr, + void *data, u16 len) +{ + struct usb_ctrlrequest *dr; + struct urb *urb; + struct rtl8187_async_write_data { + u8 data[4]; + struct usb_ctrlrequest dr; + } *buf; + int rc; + + buf = kmalloc(sizeof(*buf), GFP_ATOMIC); + if (!buf) + return; + + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!urb) { + kfree(buf); + return; + } + + dr = &buf->dr; + + dr->bRequestType = RTL8187_REQT_WRITE; + dr->bRequest = RTL8187_REQ_SET_REG; + dr->wValue = addr; + dr->wIndex = 0; + dr->wLength = cpu_to_le16(len); + + memcpy(buf, data, len); + + usb_fill_control_urb(urb, priv->udev, usb_sndctrlpipe(priv->udev, 0), + (unsigned char *)dr, buf, len, + rtl8187_iowrite_async_cb, buf); + usb_anchor_urb(urb, &priv->anchored); + rc = usb_submit_urb(urb, GFP_ATOMIC); + if (rc < 0) { + kfree(buf); + usb_unanchor_urb(urb); + } + usb_free_urb(urb); +} + +static inline void rtl818x_iowrite32_async(struct rtl8187_priv *priv, + __le32 *addr, u32 val) +{ + __le32 buf = cpu_to_le32(val); + + rtl8187_iowrite_async(priv, cpu_to_le16((unsigned long)addr), + &buf, sizeof(buf)); +} + +void rtl8187_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data) +{ + struct rtl8187_priv *priv = dev->priv; + + data <<= 8; + data |= addr | 0x80; + + rtl818x_iowrite8(priv, &priv->map->PHY[3], (data >> 24) & 0xFF); + rtl818x_iowrite8(priv, &priv->map->PHY[2], (data >> 16) & 0xFF); + rtl818x_iowrite8(priv, &priv->map->PHY[1], (data >> 8) & 0xFF); + rtl818x_iowrite8(priv, &priv->map->PHY[0], data & 0xFF); +} + +static void rtl8187_tx_cb(struct urb *urb) +{ + struct sk_buff *skb = (struct sk_buff *)urb->context; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hw *hw = info->rate_driver_data[0]; + struct rtl8187_priv *priv = hw->priv; + + skb_pull(skb, priv->is_rtl8187b ? sizeof(struct rtl8187b_tx_hdr) : + sizeof(struct rtl8187_tx_hdr)); + ieee80211_tx_info_clear_status(info); + + if (!(urb->status) && !(info->flags & IEEE80211_TX_CTL_NO_ACK)) { + if (priv->is_rtl8187b) { + skb_queue_tail(&priv->b_tx_status.queue, skb); + + /* queue is "full", discard last items */ + while (skb_queue_len(&priv->b_tx_status.queue) > 5) { + struct sk_buff *old_skb; + + dev_dbg(&priv->udev->dev, + "transmit status queue full\n"); + + old_skb = skb_dequeue(&priv->b_tx_status.queue); + ieee80211_tx_status_irqsafe(hw, old_skb); + } + return; + } else { + info->flags |= IEEE80211_TX_STAT_ACK; + } + } + if (priv->is_rtl8187b) + ieee80211_tx_status_irqsafe(hw, skb); + else { + /* Retry information for the RTI8187 is only available by + * reading a register in the device. We are in interrupt mode + * here, thus queue the skb and finish on a work queue. */ + skb_queue_tail(&priv->b_tx_status.queue, skb); + ieee80211_queue_delayed_work(hw, &priv->work, 0); + } +} + +static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb) +{ + struct rtl8187_priv *priv = dev->priv; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + unsigned int ep; + void *buf; + struct urb *urb; + __le16 rts_dur = 0; + u32 flags; + int rc; + + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!urb) { + kfree_skb(skb); + return NETDEV_TX_OK; + } + + flags = skb->len; + flags |= RTL818X_TX_DESC_FLAG_NO_ENC; + + flags |= ieee80211_get_tx_rate(dev, info)->hw_value << 24; + if (ieee80211_has_morefrags(((struct ieee80211_hdr *)skb->data)->frame_control)) + flags |= RTL818X_TX_DESC_FLAG_MOREFRAG; + if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) { + flags |= RTL818X_TX_DESC_FLAG_RTS; + flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19; + rts_dur = ieee80211_rts_duration(dev, priv->vif, + skb->len, info); + } else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { + flags |= RTL818X_TX_DESC_FLAG_CTS; + flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19; + } + + if (!priv->is_rtl8187b) { + struct rtl8187_tx_hdr *hdr = + (struct rtl8187_tx_hdr *)skb_push(skb, sizeof(*hdr)); + hdr->flags = cpu_to_le32(flags); + hdr->len = 0; + hdr->rts_duration = rts_dur; + hdr->retry = cpu_to_le32((info->control.rates[0].count - 1) << 8); + buf = hdr; + + ep = 2; + } else { + /* fc needs to be calculated before skb_push() */ + unsigned int epmap[4] = { 6, 7, 5, 4 }; + struct ieee80211_hdr *tx_hdr = + (struct ieee80211_hdr *)(skb->data); + u16 fc = le16_to_cpu(tx_hdr->frame_control); + + struct rtl8187b_tx_hdr *hdr = + (struct rtl8187b_tx_hdr *)skb_push(skb, sizeof(*hdr)); + struct ieee80211_rate *txrate = + ieee80211_get_tx_rate(dev, info); + memset(hdr, 0, sizeof(*hdr)); + hdr->flags = cpu_to_le32(flags); + hdr->rts_duration = rts_dur; + hdr->retry = cpu_to_le32((info->control.rates[0].count - 1) << 8); + hdr->tx_duration = + ieee80211_generic_frame_duration(dev, priv->vif, + skb->len, txrate); + buf = hdr; + + if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) + ep = 12; + else + ep = epmap[skb_get_queue_mapping(skb)]; + } + + info->rate_driver_data[0] = dev; + info->rate_driver_data[1] = urb; + + usb_fill_bulk_urb(urb, priv->udev, usb_sndbulkpipe(priv->udev, ep), + buf, skb->len, rtl8187_tx_cb, skb); + urb->transfer_flags |= URB_ZERO_PACKET; + usb_anchor_urb(urb, &priv->anchored); + rc = usb_submit_urb(urb, GFP_ATOMIC); + if (rc < 0) { + usb_unanchor_urb(urb); + kfree_skb(skb); + } + usb_free_urb(urb); + + return NETDEV_TX_OK; +} + +static void rtl8187_rx_cb(struct urb *urb) +{ + struct sk_buff *skb = (struct sk_buff *)urb->context; + struct rtl8187_rx_info *info = (struct rtl8187_rx_info *)skb->cb; + struct ieee80211_hw *dev = info->dev; + struct rtl8187_priv *priv = dev->priv; + struct ieee80211_rx_status rx_status = { 0 }; + int rate, signal; + u32 flags; + unsigned long f; + + spin_lock_irqsave(&priv->rx_queue.lock, f); + __skb_unlink(skb, &priv->rx_queue); + spin_unlock_irqrestore(&priv->rx_queue.lock, f); + skb_put(skb, urb->actual_length); + + if (unlikely(urb->status)) { + dev_kfree_skb_irq(skb); + return; + } + + if (!priv->is_rtl8187b) { + struct rtl8187_rx_hdr *hdr = + (typeof(hdr))(skb_tail_pointer(skb) - sizeof(*hdr)); + flags = le32_to_cpu(hdr->flags); + /* As with the RTL8187B below, the AGC is used to calculate + * signal strength. In this case, the scaling + * constants are derived from the output of p54usb. + */ + signal = -4 - ((27 * hdr->agc) >> 6); + rx_status.antenna = (hdr->signal >> 7) & 1; + rx_status.mactime = le64_to_cpu(hdr->mac_time); + } else { + struct rtl8187b_rx_hdr *hdr = + (typeof(hdr))(skb_tail_pointer(skb) - sizeof(*hdr)); + /* The Realtek datasheet for the RTL8187B shows that the RX + * header contains the following quantities: signal quality, + * RSSI, AGC, the received power in dB, and the measured SNR. + * In testing, none of these quantities show qualitative + * agreement with AP signal strength, except for the AGC, + * which is inversely proportional to the strength of the + * signal. In the following, the signal strength + * is derived from the AGC. The arbitrary scaling constants + * are chosen to make the results close to the values obtained + * for a BCM4312 using b43 as the driver. The noise is ignored + * for now. + */ + flags = le32_to_cpu(hdr->flags); + signal = 14 - hdr->agc / 2; + rx_status.antenna = (hdr->rssi >> 7) & 1; + rx_status.mactime = le64_to_cpu(hdr->mac_time); + } + + rx_status.signal = signal; + priv->signal = signal; + rate = (flags >> 20) & 0xF; + skb_trim(skb, flags & 0x0FFF); + rx_status.rate_idx = rate; + rx_status.freq = dev->conf.channel->center_freq; + rx_status.band = dev->conf.channel->band; + rx_status.flag |= RX_FLAG_TSFT; + if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR) + rx_status.flag |= RX_FLAG_FAILED_FCS_CRC; + memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); + ieee80211_rx_irqsafe(dev, skb); + + skb = dev_alloc_skb(RTL8187_MAX_RX); + if (unlikely(!skb)) { + /* TODO check rx queue length and refill *somewhere* */ + return; + } + + info = (struct rtl8187_rx_info *)skb->cb; + info->urb = urb; + info->dev = dev; + urb->transfer_buffer = skb_tail_pointer(skb); + urb->context = skb; + skb_queue_tail(&priv->rx_queue, skb); + + usb_anchor_urb(urb, &priv->anchored); + if (usb_submit_urb(urb, GFP_ATOMIC)) { + usb_unanchor_urb(urb); + skb_unlink(skb, &priv->rx_queue); + dev_kfree_skb_irq(skb); + } +} + +static int rtl8187_init_urbs(struct ieee80211_hw *dev) +{ + struct rtl8187_priv *priv = dev->priv; + struct urb *entry = NULL; + struct sk_buff *skb; + struct rtl8187_rx_info *info; + int ret = 0; + + while (skb_queue_len(&priv->rx_queue) < 16) { + skb = __dev_alloc_skb(RTL8187_MAX_RX, GFP_KERNEL); + if (!skb) { + ret = -ENOMEM; + goto err; + } + entry = usb_alloc_urb(0, GFP_KERNEL); + if (!entry) { + ret = -ENOMEM; + goto err; + } + usb_fill_bulk_urb(entry, priv->udev, + usb_rcvbulkpipe(priv->udev, + priv->is_rtl8187b ? 3 : 1), + skb_tail_pointer(skb), + RTL8187_MAX_RX, rtl8187_rx_cb, skb); + info = (struct rtl8187_rx_info *)skb->cb; + info->urb = entry; + info->dev = dev; + skb_queue_tail(&priv->rx_queue, skb); + usb_anchor_urb(entry, &priv->anchored); + ret = usb_submit_urb(entry, GFP_KERNEL); + if (ret) { + skb_unlink(skb, &priv->rx_queue); + usb_unanchor_urb(entry); + goto err; + } + usb_free_urb(entry); + } + return ret; + +err: + usb_free_urb(entry); + kfree_skb(skb); + usb_kill_anchored_urbs(&priv->anchored); + return ret; +} + +static void rtl8187b_status_cb(struct urb *urb) +{ + struct ieee80211_hw *hw = (struct ieee80211_hw *)urb->context; + struct rtl8187_priv *priv = hw->priv; + u64 val; + unsigned int cmd_type; + + if (unlikely(urb->status)) + return; + + /* + * Read from status buffer: + * + * bits [30:31] = cmd type: + * - 0 indicates tx beacon interrupt + * - 1 indicates tx close descriptor + * + * In the case of tx beacon interrupt: + * [0:9] = Last Beacon CW + * [10:29] = reserved + * [30:31] = 00b + * [32:63] = Last Beacon TSF + * + * If it's tx close descriptor: + * [0:7] = Packet Retry Count + * [8:14] = RTS Retry Count + * [15] = TOK + * [16:27] = Sequence No + * [28] = LS + * [29] = FS + * [30:31] = 01b + * [32:47] = unused (reserved?) + * [48:63] = MAC Used Time + */ + val = le64_to_cpu(priv->b_tx_status.buf); + + cmd_type = (val >> 30) & 0x3; + if (cmd_type == 1) { + unsigned int pkt_rc, seq_no; + bool tok; + struct sk_buff *skb; + struct ieee80211_hdr *ieee80211hdr; + unsigned long flags; + + pkt_rc = val & 0xFF; + tok = val & (1 << 15); + seq_no = (val >> 16) & 0xFFF; + + spin_lock_irqsave(&priv->b_tx_status.queue.lock, flags); + skb_queue_reverse_walk(&priv->b_tx_status.queue, skb) { + ieee80211hdr = (struct ieee80211_hdr *)skb->data; + + /* + * While testing, it was discovered that the seq_no + * doesn't actually contains the sequence number. + * Instead of returning just the 12 bits of sequence + * number, hardware is returning entire sequence control + * (fragment number plus sequence number) in a 12 bit + * only field overflowing after some time. As a + * workaround, just consider the lower bits, and expect + * it's unlikely we wrongly ack some sent data + */ + if ((le16_to_cpu(ieee80211hdr->seq_ctrl) + & 0xFFF) == seq_no) + break; + } + if (skb != (struct sk_buff *) &priv->b_tx_status.queue) { + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + __skb_unlink(skb, &priv->b_tx_status.queue); + if (tok) + info->flags |= IEEE80211_TX_STAT_ACK; + info->status.rates[0].count = pkt_rc + 1; + + ieee80211_tx_status_irqsafe(hw, skb); + } + spin_unlock_irqrestore(&priv->b_tx_status.queue.lock, flags); + } + + usb_anchor_urb(urb, &priv->anchored); + if (usb_submit_urb(urb, GFP_ATOMIC)) + usb_unanchor_urb(urb); +} + +static int rtl8187b_init_status_urb(struct ieee80211_hw *dev) +{ + struct rtl8187_priv *priv = dev->priv; + struct urb *entry; + int ret = 0; + + entry = usb_alloc_urb(0, GFP_KERNEL); + if (!entry) + return -ENOMEM; + + usb_fill_bulk_urb(entry, priv->udev, usb_rcvbulkpipe(priv->udev, 9), + &priv->b_tx_status.buf, sizeof(priv->b_tx_status.buf), + rtl8187b_status_cb, dev); + + usb_anchor_urb(entry, &priv->anchored); + ret = usb_submit_urb(entry, GFP_KERNEL); + if (ret) + usb_unanchor_urb(entry); + usb_free_urb(entry); + + return ret; +} + +static int rtl8187_cmd_reset(struct ieee80211_hw *dev) +{ + struct rtl8187_priv *priv = dev->priv; + u8 reg; + int i; + + reg = rtl818x_ioread8(priv, &priv->map->CMD); + reg &= (1 << 1); + reg |= RTL818X_CMD_RESET; + rtl818x_iowrite8(priv, &priv->map->CMD, reg); + + i = 10; + do { + msleep(2); + if (!(rtl818x_ioread8(priv, &priv->map->CMD) & + RTL818X_CMD_RESET)) + break; + } while (--i); + + if (!i) { + printk(KERN_ERR "%s: Reset timeout!\n", wiphy_name(dev->wiphy)); + return -ETIMEDOUT; + } + + /* reload registers from eeprom */ + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_LOAD); + + i = 10; + do { + msleep(4); + if (!(rtl818x_ioread8(priv, &priv->map->EEPROM_CMD) & + RTL818X_EEPROM_CMD_CONFIG)) + break; + } while (--i); + + if (!i) { + printk(KERN_ERR "%s: eeprom reset timeout!\n", + wiphy_name(dev->wiphy)); + return -ETIMEDOUT; + } + + return 0; +} + +static int rtl8187_init_hw(struct ieee80211_hw *dev) +{ + struct rtl8187_priv *priv = dev->priv; + u8 reg; + int res; + + /* reset */ + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, + RTL818X_EEPROM_CMD_CONFIG); + reg = rtl818x_ioread8(priv, &priv->map->CONFIG3); + rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg | + RTL818X_CONFIG3_ANAPARAM_WRITE); + rtl818x_iowrite32(priv, &priv->map->ANAPARAM, + RTL8187_RTL8225_ANAPARAM_ON); + rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, + RTL8187_RTL8225_ANAPARAM2_ON); + rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg & + ~RTL818X_CONFIG3_ANAPARAM_WRITE); + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, + RTL818X_EEPROM_CMD_NORMAL); + + rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0); + + msleep(200); + rtl818x_iowrite8(priv, (u8 *)0xFE18, 0x10); + rtl818x_iowrite8(priv, (u8 *)0xFE18, 0x11); + rtl818x_iowrite8(priv, (u8 *)0xFE18, 0x00); + msleep(200); + + res = rtl8187_cmd_reset(dev); + if (res) + return res; + + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); + reg = rtl818x_ioread8(priv, &priv->map->CONFIG3); + rtl818x_iowrite8(priv, &priv->map->CONFIG3, + reg | RTL818X_CONFIG3_ANAPARAM_WRITE); + rtl818x_iowrite32(priv, &priv->map->ANAPARAM, + RTL8187_RTL8225_ANAPARAM_ON); + rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, + RTL8187_RTL8225_ANAPARAM2_ON); + rtl818x_iowrite8(priv, &priv->map->CONFIG3, + reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE); + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); + + /* setup card */ + rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0); + rtl818x_iowrite8(priv, &priv->map->GPIO0, 0); + + rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, (4 << 8)); + rtl818x_iowrite8(priv, &priv->map->GPIO0, 1); + rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0); + + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); + + rtl818x_iowrite16(priv, (__le16 *)0xFFF4, 0xFFFF); + reg = rtl818x_ioread8(priv, &priv->map->CONFIG1); + reg &= 0x3F; + reg |= 0x80; + rtl818x_iowrite8(priv, &priv->map->CONFIG1, reg); + + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); + + rtl818x_iowrite32(priv, &priv->map->INT_TIMEOUT, 0); + rtl818x_iowrite8(priv, &priv->map->WPA_CONF, 0); + rtl818x_iowrite8(priv, &priv->map->RATE_FALLBACK, 0); + + // TODO: set RESP_RATE and BRSR properly + rtl818x_iowrite8(priv, &priv->map->RESP_RATE, (8 << 4) | 0); + rtl818x_iowrite16(priv, &priv->map->BRSR, 0x01F3); + + /* host_usb_init */ + rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0); + rtl818x_iowrite8(priv, &priv->map->GPIO0, 0); + reg = rtl818x_ioread8(priv, (u8 *)0xFE53); + rtl818x_iowrite8(priv, (u8 *)0xFE53, reg | (1 << 7)); + rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, (4 << 8)); + rtl818x_iowrite8(priv, &priv->map->GPIO0, 0x20); + rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0); + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x80); + rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0x80); + rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x80); + msleep(100); + + rtl818x_iowrite32(priv, &priv->map->RF_TIMING, 0x000a8008); + rtl818x_iowrite16(priv, &priv->map->BRSR, 0xFFFF); + rtl818x_iowrite32(priv, &priv->map->RF_PARA, 0x00100044); + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, + RTL818X_EEPROM_CMD_CONFIG); + rtl818x_iowrite8(priv, &priv->map->CONFIG3, 0x44); + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, + RTL818X_EEPROM_CMD_NORMAL); + rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FF7); + msleep(100); + + priv->rf->init(dev); + + rtl818x_iowrite16(priv, &priv->map->BRSR, 0x01F3); + reg = rtl818x_ioread8(priv, &priv->map->PGSELECT) & ~1; + rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg | 1); + rtl818x_iowrite16(priv, (__le16 *)0xFFFE, 0x10); + rtl818x_iowrite8(priv, &priv->map->TALLY_SEL, 0x80); + rtl818x_iowrite8(priv, (u8 *)0xFFFF, 0x60); + rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg); + + return 0; +} + +static const u8 rtl8187b_reg_table[][3] = { + {0xF0, 0x32, 0}, {0xF1, 0x32, 0}, {0xF2, 0x00, 0}, {0xF3, 0x00, 0}, + {0xF4, 0x32, 0}, {0xF5, 0x43, 0}, {0xF6, 0x00, 0}, {0xF7, 0x00, 0}, + {0xF8, 0x46, 0}, {0xF9, 0xA4, 0}, {0xFA, 0x00, 0}, {0xFB, 0x00, 0}, + {0xFC, 0x96, 0}, {0xFD, 0xA4, 0}, {0xFE, 0x00, 0}, {0xFF, 0x00, 0}, + + {0x58, 0x4B, 1}, {0x59, 0x00, 1}, {0x5A, 0x4B, 1}, {0x5B, 0x00, 1}, + {0x60, 0x4B, 1}, {0x61, 0x09, 1}, {0x62, 0x4B, 1}, {0x63, 0x09, 1}, + {0xCE, 0x0F, 1}, {0xCF, 0x00, 1}, {0xE0, 0xFF, 1}, {0xE1, 0x0F, 1}, + {0xE2, 0x00, 1}, {0xF0, 0x4E, 1}, {0xF1, 0x01, 1}, {0xF2, 0x02, 1}, + {0xF3, 0x03, 1}, {0xF4, 0x04, 1}, {0xF5, 0x05, 1}, {0xF6, 0x06, 1}, + {0xF7, 0x07, 1}, {0xF8, 0x08, 1}, + + {0x4E, 0x00, 2}, {0x0C, 0x04, 2}, {0x21, 0x61, 2}, {0x22, 0x68, 2}, + {0x23, 0x6F, 2}, {0x24, 0x76, 2}, {0x25, 0x7D, 2}, {0x26, 0x84, 2}, + {0x27, 0x8D, 2}, {0x4D, 0x08, 2}, {0x50, 0x05, 2}, {0x51, 0xF5, 2}, + {0x52, 0x04, 2}, {0x53, 0xA0, 2}, {0x54, 0x1F, 2}, {0x55, 0x23, 2}, + {0x56, 0x45, 2}, {0x57, 0x67, 2}, {0x58, 0x08, 2}, {0x59, 0x08, 2}, + {0x5A, 0x08, 2}, {0x5B, 0x08, 2}, {0x60, 0x08, 2}, {0x61, 0x08, 2}, + {0x62, 0x08, 2}, {0x63, 0x08, 2}, {0x64, 0xCF, 2}, {0x72, 0x56, 2}, + {0x73, 0x9A, 2}, + + {0x34, 0xF0, 0}, {0x35, 0x0F, 0}, {0x5B, 0x40, 0}, {0x84, 0x88, 0}, + {0x85, 0x24, 0}, {0x88, 0x54, 0}, {0x8B, 0xB8, 0}, {0x8C, 0x07, 0}, + {0x8D, 0x00, 0}, {0x94, 0x1B, 0}, {0x95, 0x12, 0}, {0x96, 0x00, 0}, + {0x97, 0x06, 0}, {0x9D, 0x1A, 0}, {0x9F, 0x10, 0}, {0xB4, 0x22, 0}, + {0xBE, 0x80, 0}, {0xDB, 0x00, 0}, {0xEE, 0x00, 0}, {0x4C, 0x00, 2}, + + {0x9F, 0x00, 3}, {0x8C, 0x01, 0}, {0x8D, 0x10, 0}, {0x8E, 0x08, 0}, + {0x8F, 0x00, 0} +}; + +static int rtl8187b_init_hw(struct ieee80211_hw *dev) +{ + struct rtl8187_priv *priv = dev->priv; + int res, i; + u8 reg; + + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, + RTL818X_EEPROM_CMD_CONFIG); + + reg = rtl818x_ioread8(priv, &priv->map->CONFIG3); + reg |= RTL818X_CONFIG3_ANAPARAM_WRITE | RTL818X_CONFIG3_GNT_SELECT; + rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg); + rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, + RTL8187B_RTL8225_ANAPARAM2_ON); + rtl818x_iowrite32(priv, &priv->map->ANAPARAM, + RTL8187B_RTL8225_ANAPARAM_ON); + rtl818x_iowrite8(priv, &priv->map->ANAPARAM3, + RTL8187B_RTL8225_ANAPARAM3_ON); + + rtl818x_iowrite8(priv, (u8 *)0xFF61, 0x10); + reg = rtl818x_ioread8(priv, (u8 *)0xFF62); + rtl818x_iowrite8(priv, (u8 *)0xFF62, reg & ~(1 << 5)); + rtl818x_iowrite8(priv, (u8 *)0xFF62, reg | (1 << 5)); + + reg = rtl818x_ioread8(priv, &priv->map->CONFIG3); + reg &= ~RTL818X_CONFIG3_ANAPARAM_WRITE; + rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg); + + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, + RTL818X_EEPROM_CMD_NORMAL); + + res = rtl8187_cmd_reset(dev); + if (res) + return res; + + rtl818x_iowrite16(priv, (__le16 *)0xFF2D, 0x0FFF); + reg = rtl818x_ioread8(priv, &priv->map->CW_CONF); + reg |= RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT; + rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg); + reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL); + reg |= RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT | + RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT; + rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg); + + rtl818x_iowrite16_idx(priv, (__le16 *)0xFFE0, 0x0FFF, 1); + + rtl818x_iowrite16(priv, &priv->map->BEACON_INTERVAL, 100); + rtl818x_iowrite16(priv, &priv->map->ATIM_WND, 2); + rtl818x_iowrite16_idx(priv, (__le16 *)0xFFD4, 0xFFFF, 1); + + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, + RTL818X_EEPROM_CMD_CONFIG); + reg = rtl818x_ioread8(priv, &priv->map->CONFIG1); + rtl818x_iowrite8(priv, &priv->map->CONFIG1, (reg & 0x3F) | 0x80); + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, + RTL818X_EEPROM_CMD_NORMAL); + + rtl818x_iowrite8(priv, &priv->map->WPA_CONF, 0); + for (i = 0; i < ARRAY_SIZE(rtl8187b_reg_table); i++) { + rtl818x_iowrite8_idx(priv, + (u8 *)(uintptr_t) + (rtl8187b_reg_table[i][0] | 0xFF00), + rtl8187b_reg_table[i][1], + rtl8187b_reg_table[i][2]); + } + + rtl818x_iowrite16(priv, &priv->map->TID_AC_MAP, 0xFA50); + rtl818x_iowrite16(priv, &priv->map->INT_MIG, 0); + + rtl818x_iowrite32_idx(priv, (__le32 *)0xFFF0, 0, 1); + rtl818x_iowrite32_idx(priv, (__le32 *)0xFFF4, 0, 1); + rtl818x_iowrite8_idx(priv, (u8 *)0xFFF8, 0, 1); + + rtl818x_iowrite32(priv, &priv->map->RF_TIMING, 0x00004001); + + rtl818x_iowrite16_idx(priv, (__le16 *)0xFF72, 0x569A, 2); + + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, + RTL818X_EEPROM_CMD_CONFIG); + reg = rtl818x_ioread8(priv, &priv->map->CONFIG3); + reg |= RTL818X_CONFIG3_ANAPARAM_WRITE; + rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg); + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, + RTL818X_EEPROM_CMD_NORMAL); + + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x0480); + rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0x2488); + rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF); + msleep(100); + + priv->rf->init(dev); + + reg = RTL818X_CMD_TX_ENABLE | RTL818X_CMD_RX_ENABLE; + rtl818x_iowrite8(priv, &priv->map->CMD, reg); + rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0xFFFF); + + rtl818x_iowrite8(priv, (u8 *)0xFE41, 0xF4); + rtl818x_iowrite8(priv, (u8 *)0xFE40, 0x00); + rtl818x_iowrite8(priv, (u8 *)0xFE42, 0x00); + rtl818x_iowrite8(priv, (u8 *)0xFE42, 0x01); + rtl818x_iowrite8(priv, (u8 *)0xFE40, 0x0F); + rtl818x_iowrite8(priv, (u8 *)0xFE42, 0x00); + rtl818x_iowrite8(priv, (u8 *)0xFE42, 0x01); + + reg = rtl818x_ioread8(priv, (u8 *)0xFFDB); + rtl818x_iowrite8(priv, (u8 *)0xFFDB, reg | (1 << 2)); + rtl818x_iowrite16_idx(priv, (__le16 *)0xFF72, 0x59FA, 3); + rtl818x_iowrite16_idx(priv, (__le16 *)0xFF74, 0x59D2, 3); + rtl818x_iowrite16_idx(priv, (__le16 *)0xFF76, 0x59D2, 3); + rtl818x_iowrite16_idx(priv, (__le16 *)0xFF78, 0x19FA, 3); + rtl818x_iowrite16_idx(priv, (__le16 *)0xFF7A, 0x19FA, 3); + rtl818x_iowrite16_idx(priv, (__le16 *)0xFF7C, 0x00D0, 3); + rtl818x_iowrite8(priv, (u8 *)0xFF61, 0); + rtl818x_iowrite8_idx(priv, (u8 *)0xFF80, 0x0F, 1); + rtl818x_iowrite8_idx(priv, (u8 *)0xFF83, 0x03, 1); + rtl818x_iowrite8(priv, (u8 *)0xFFDA, 0x10); + rtl818x_iowrite8_idx(priv, (u8 *)0xFF4D, 0x08, 2); + + rtl818x_iowrite32(priv, &priv->map->HSSI_PARA, 0x0600321B); + + rtl818x_iowrite16_idx(priv, (__le16 *)0xFFEC, 0x0800, 1); + + priv->slot_time = 0x9; + priv->aifsn[0] = 2; /* AIFSN[AC_VO] */ + priv->aifsn[1] = 2; /* AIFSN[AC_VI] */ + priv->aifsn[2] = 7; /* AIFSN[AC_BK] */ + priv->aifsn[3] = 3; /* AIFSN[AC_BE] */ + rtl818x_iowrite8(priv, &priv->map->ACM_CONTROL, 0); + + /* ENEDCA flag must always be set, transmit issues? */ + rtl818x_iowrite8(priv, &priv->map->MSR, RTL818X_MSR_ENEDCA); + + return 0; +} + +static void rtl8187_work(struct work_struct *work) +{ + /* The RTL8187 returns the retry count through register 0xFFFA. In + * addition, it appears to be a cumulative retry count, not the + * value for the current TX packet. When multiple TX entries are + * queued, the retry count will be valid for the last one in the queue. + * The "error" should not matter for purposes of rate setting. */ + struct rtl8187_priv *priv = container_of(work, struct rtl8187_priv, + work.work); + struct ieee80211_tx_info *info; + struct ieee80211_hw *dev = priv->dev; + static u16 retry; + u16 tmp; + + mutex_lock(&priv->conf_mutex); + tmp = rtl818x_ioread16(priv, (__le16 *)0xFFFA); + while (skb_queue_len(&priv->b_tx_status.queue) > 0) { + struct sk_buff *old_skb; + + old_skb = skb_dequeue(&priv->b_tx_status.queue); + info = IEEE80211_SKB_CB(old_skb); + info->status.rates[0].count = tmp - retry + 1; + ieee80211_tx_status_irqsafe(dev, old_skb); + } + retry = tmp; + mutex_unlock(&priv->conf_mutex); +} + +static int rtl8187_start(struct ieee80211_hw *dev) +{ + struct rtl8187_priv *priv = dev->priv; + u32 reg; + int ret; + + mutex_lock(&priv->conf_mutex); + + ret = (!priv->is_rtl8187b) ? rtl8187_init_hw(dev) : + rtl8187b_init_hw(dev); + if (ret) + goto rtl8187_start_exit; + + init_usb_anchor(&priv->anchored); + priv->dev = dev; + + if (priv->is_rtl8187b) { + reg = RTL818X_RX_CONF_MGMT | + RTL818X_RX_CONF_DATA | + RTL818X_RX_CONF_BROADCAST | + RTL818X_RX_CONF_NICMAC | + RTL818X_RX_CONF_BSSID | + (7 << 13 /* RX FIFO threshold NONE */) | + (7 << 10 /* MAX RX DMA */) | + RTL818X_RX_CONF_RX_AUTORESETPHY | + RTL818X_RX_CONF_ONLYERLPKT | + RTL818X_RX_CONF_MULTICAST; + priv->rx_conf = reg; + rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg); + + rtl818x_iowrite32(priv, &priv->map->TX_CONF, + RTL818X_TX_CONF_HW_SEQNUM | + RTL818X_TX_CONF_DISREQQSIZE | + (7 << 8 /* short retry limit */) | + (7 << 0 /* long retry limit */) | + (7 << 21 /* MAX TX DMA */)); + rtl8187_init_urbs(dev); + rtl8187b_init_status_urb(dev); + goto rtl8187_start_exit; + } + + rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0xFFFF); + + rtl818x_iowrite32(priv, &priv->map->MAR[0], ~0); + rtl818x_iowrite32(priv, &priv->map->MAR[1], ~0); + + rtl8187_init_urbs(dev); + + reg = RTL818X_RX_CONF_ONLYERLPKT | + RTL818X_RX_CONF_RX_AUTORESETPHY | + RTL818X_RX_CONF_BSSID | + RTL818X_RX_CONF_MGMT | + RTL818X_RX_CONF_DATA | + (7 << 13 /* RX FIFO threshold NONE */) | + (7 << 10 /* MAX RX DMA */) | + RTL818X_RX_CONF_BROADCAST | + RTL818X_RX_CONF_NICMAC; + + priv->rx_conf = reg; + rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg); + + reg = rtl818x_ioread8(priv, &priv->map->CW_CONF); + reg &= ~RTL818X_CW_CONF_PERPACKET_CW_SHIFT; + reg |= RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT; + rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg); + + reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL); + reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT; + reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT; + reg &= ~RTL818X_TX_AGC_CTL_FEEDBACK_ANT; + rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg); + + reg = RTL818X_TX_CONF_CW_MIN | + (7 << 21 /* MAX TX DMA */) | + RTL818X_TX_CONF_NO_ICV; + rtl818x_iowrite32(priv, &priv->map->TX_CONF, reg); + + reg = rtl818x_ioread8(priv, &priv->map->CMD); + reg |= RTL818X_CMD_TX_ENABLE; + reg |= RTL818X_CMD_RX_ENABLE; + rtl818x_iowrite8(priv, &priv->map->CMD, reg); + INIT_DELAYED_WORK(&priv->work, rtl8187_work); + +rtl8187_start_exit: + mutex_unlock(&priv->conf_mutex); + return ret; +} + +static void rtl8187_stop(struct ieee80211_hw *dev) +{ + struct rtl8187_priv *priv = dev->priv; + struct sk_buff *skb; + u32 reg; + + mutex_lock(&priv->conf_mutex); + rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0); + + reg = rtl818x_ioread8(priv, &priv->map->CMD); + reg &= ~RTL818X_CMD_TX_ENABLE; + reg &= ~RTL818X_CMD_RX_ENABLE; + rtl818x_iowrite8(priv, &priv->map->CMD, reg); + + priv->rf->stop(dev); + + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); + reg = rtl818x_ioread8(priv, &priv->map->CONFIG4); + rtl818x_iowrite8(priv, &priv->map->CONFIG4, reg | RTL818X_CONFIG4_VCOOFF); + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); + + while ((skb = skb_dequeue(&priv->b_tx_status.queue))) + dev_kfree_skb_any(skb); + + usb_kill_anchored_urbs(&priv->anchored); + mutex_unlock(&priv->conf_mutex); + + if (!priv->is_rtl8187b) + cancel_delayed_work_sync(&priv->work); +} + +static int rtl8187_add_interface(struct ieee80211_hw *dev, + struct ieee80211_vif *vif) +{ + struct rtl8187_priv *priv = dev->priv; + int i; + int ret = -EOPNOTSUPP; + + mutex_lock(&priv->conf_mutex); + if (priv->vif) + goto exit; + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + break; + default: + goto exit; + } + + ret = 0; + priv->vif = vif; + + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); + for (i = 0; i < ETH_ALEN; i++) + rtl818x_iowrite8(priv, &priv->map->MAC[i], + ((u8 *)vif->addr)[i]); + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); + +exit: + mutex_unlock(&priv->conf_mutex); + return ret; +} + +static void rtl8187_remove_interface(struct ieee80211_hw *dev, + struct ieee80211_vif *vif) +{ + struct rtl8187_priv *priv = dev->priv; + mutex_lock(&priv->conf_mutex); + priv->vif = NULL; + mutex_unlock(&priv->conf_mutex); +} + +static int rtl8187_config(struct ieee80211_hw *dev, u32 changed) +{ + struct rtl8187_priv *priv = dev->priv; + struct ieee80211_conf *conf = &dev->conf; + u32 reg; + + mutex_lock(&priv->conf_mutex); + reg = rtl818x_ioread32(priv, &priv->map->TX_CONF); + /* Enable TX loopback on MAC level to avoid TX during channel + * changes, as this has be seen to causes problems and the + * card will stop work until next reset + */ + rtl818x_iowrite32(priv, &priv->map->TX_CONF, + reg | RTL818X_TX_CONF_LOOPBACK_MAC); + priv->rf->set_chan(dev, conf); + msleep(10); + rtl818x_iowrite32(priv, &priv->map->TX_CONF, reg); + + rtl818x_iowrite16(priv, &priv->map->ATIM_WND, 2); + rtl818x_iowrite16(priv, &priv->map->ATIMTR_INTERVAL, 100); + rtl818x_iowrite16(priv, &priv->map->BEACON_INTERVAL, 100); + rtl818x_iowrite16(priv, &priv->map->BEACON_INTERVAL_TIME, 100); + mutex_unlock(&priv->conf_mutex); + return 0; +} + +/* + * With 8187B, AC_*_PARAM clashes with FEMR definition in struct rtl818x_csr for + * example. Thus we have to use raw values for AC_*_PARAM register addresses. + */ +static __le32 *rtl8187b_ac_addr[4] = { + (__le32 *) 0xFFF0, /* AC_VO */ + (__le32 *) 0xFFF4, /* AC_VI */ + (__le32 *) 0xFFFC, /* AC_BK */ + (__le32 *) 0xFFF8, /* AC_BE */ +}; + +#define SIFS_TIME 0xa + +static void rtl8187_conf_erp(struct rtl8187_priv *priv, bool use_short_slot, + bool use_short_preamble) +{ + if (priv->is_rtl8187b) { + u8 difs, eifs; + u16 ack_timeout; + int queue; + + if (use_short_slot) { + priv->slot_time = 0x9; + difs = 0x1c; + eifs = 0x53; + } else { + priv->slot_time = 0x14; + difs = 0x32; + eifs = 0x5b; + } + rtl818x_iowrite8(priv, &priv->map->SIFS, 0x22); + rtl818x_iowrite8(priv, &priv->map->SLOT, priv->slot_time); + rtl818x_iowrite8(priv, &priv->map->DIFS, difs); + + /* + * BRSR+1 on 8187B is in fact EIFS register + * Value in units of 4 us + */ + rtl818x_iowrite8(priv, (u8 *)&priv->map->BRSR + 1, eifs); + + /* + * For 8187B, CARRIER_SENSE_COUNTER is in fact ack timeout + * register. In units of 4 us like eifs register + * ack_timeout = ack duration + plcp + difs + preamble + */ + ack_timeout = 112 + 48 + difs; + if (use_short_preamble) + ack_timeout += 72; + else + ack_timeout += 144; + rtl818x_iowrite8(priv, &priv->map->CARRIER_SENSE_COUNTER, + DIV_ROUND_UP(ack_timeout, 4)); + + for (queue = 0; queue < 4; queue++) + rtl818x_iowrite8(priv, (u8 *) rtl8187b_ac_addr[queue], + priv->aifsn[queue] * priv->slot_time + + SIFS_TIME); + } else { + rtl818x_iowrite8(priv, &priv->map->SIFS, 0x22); + if (use_short_slot) { + rtl818x_iowrite8(priv, &priv->map->SLOT, 0x9); + rtl818x_iowrite8(priv, &priv->map->DIFS, 0x14); + rtl818x_iowrite8(priv, &priv->map->EIFS, 91 - 0x14); + } else { + rtl818x_iowrite8(priv, &priv->map->SLOT, 0x14); + rtl818x_iowrite8(priv, &priv->map->DIFS, 0x24); + rtl818x_iowrite8(priv, &priv->map->EIFS, 91 - 0x24); + } + } +} + +static void rtl8187_bss_info_changed(struct ieee80211_hw *dev, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, + u32 changed) +{ + struct rtl8187_priv *priv = dev->priv; + int i; + u8 reg; + + if (changed & BSS_CHANGED_BSSID) { + mutex_lock(&priv->conf_mutex); + for (i = 0; i < ETH_ALEN; i++) + rtl818x_iowrite8(priv, &priv->map->BSSID[i], + info->bssid[i]); + + if (priv->is_rtl8187b) + reg = RTL818X_MSR_ENEDCA; + else + reg = 0; + + if (is_valid_ether_addr(info->bssid)) { + reg |= RTL818X_MSR_INFRA; + rtl818x_iowrite8(priv, &priv->map->MSR, reg); + } else { + reg |= RTL818X_MSR_NO_LINK; + rtl818x_iowrite8(priv, &priv->map->MSR, reg); + } + + mutex_unlock(&priv->conf_mutex); + } + + if (changed & (BSS_CHANGED_ERP_SLOT | BSS_CHANGED_ERP_PREAMBLE)) + rtl8187_conf_erp(priv, info->use_short_slot, + info->use_short_preamble); +} + +static u64 rtl8187_prepare_multicast(struct ieee80211_hw *dev, + int mc_count, struct dev_addr_list *mc_list) +{ + return mc_count; +} + +static void rtl8187_configure_filter(struct ieee80211_hw *dev, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) +{ + struct rtl8187_priv *priv = dev->priv; + + if (changed_flags & FIF_FCSFAIL) + priv->rx_conf ^= RTL818X_RX_CONF_FCS; + if (changed_flags & FIF_CONTROL) + priv->rx_conf ^= RTL818X_RX_CONF_CTRL; + if (changed_flags & FIF_OTHER_BSS) + priv->rx_conf ^= RTL818X_RX_CONF_MONITOR; + if (*total_flags & FIF_ALLMULTI || multicast > 0) + priv->rx_conf |= RTL818X_RX_CONF_MULTICAST; + else + priv->rx_conf &= ~RTL818X_RX_CONF_MULTICAST; + + *total_flags = 0; + + if (priv->rx_conf & RTL818X_RX_CONF_FCS) + *total_flags |= FIF_FCSFAIL; + if (priv->rx_conf & RTL818X_RX_CONF_CTRL) + *total_flags |= FIF_CONTROL; + if (priv->rx_conf & RTL818X_RX_CONF_MONITOR) + *total_flags |= FIF_OTHER_BSS; + if (priv->rx_conf & RTL818X_RX_CONF_MULTICAST) + *total_flags |= FIF_ALLMULTI; + + rtl818x_iowrite32_async(priv, &priv->map->RX_CONF, priv->rx_conf); +} + +static int rtl8187_conf_tx(struct ieee80211_hw *dev, u16 queue, + const struct ieee80211_tx_queue_params *params) +{ + struct rtl8187_priv *priv = dev->priv; + u8 cw_min, cw_max; + + if (queue > 3) + return -EINVAL; + + cw_min = fls(params->cw_min); + cw_max = fls(params->cw_max); + + if (priv->is_rtl8187b) { + priv->aifsn[queue] = params->aifs; + + /* + * This is the structure of AC_*_PARAM registers in 8187B: + * - TXOP limit field, bit offset = 16 + * - ECWmax, bit offset = 12 + * - ECWmin, bit offset = 8 + * - AIFS, bit offset = 0 + */ + rtl818x_iowrite32(priv, rtl8187b_ac_addr[queue], + (params->txop << 16) | (cw_max << 12) | + (cw_min << 8) | (params->aifs * + priv->slot_time + SIFS_TIME)); + } else { + if (queue != 0) + return -EINVAL; + + rtl818x_iowrite8(priv, &priv->map->CW_VAL, + cw_min | (cw_max << 4)); + } + return 0; +} + +static u64 rtl8187_get_tsf(struct ieee80211_hw *dev) +{ + struct rtl8187_priv *priv = dev->priv; + + return rtl818x_ioread32(priv, &priv->map->TSFT[0]) | + (u64)(rtl818x_ioread32(priv, &priv->map->TSFT[1])) << 32; +} + +static const struct ieee80211_ops rtl8187_ops = { + .tx = rtl8187_tx, + .start = rtl8187_start, + .stop = rtl8187_stop, + .add_interface = rtl8187_add_interface, + .remove_interface = rtl8187_remove_interface, + .config = rtl8187_config, + .bss_info_changed = rtl8187_bss_info_changed, + .prepare_multicast = rtl8187_prepare_multicast, + .configure_filter = rtl8187_configure_filter, + .conf_tx = rtl8187_conf_tx, + .rfkill_poll = rtl8187_rfkill_poll, + .get_tsf = rtl8187_get_tsf, +}; + +static void rtl8187_eeprom_register_read(struct eeprom_93cx6 *eeprom) +{ + struct ieee80211_hw *dev = eeprom->data; + struct rtl8187_priv *priv = dev->priv; + u8 reg = rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); + + eeprom->reg_data_in = reg & RTL818X_EEPROM_CMD_WRITE; + eeprom->reg_data_out = reg & RTL818X_EEPROM_CMD_READ; + eeprom->reg_data_clock = reg & RTL818X_EEPROM_CMD_CK; + eeprom->reg_chip_select = reg & RTL818X_EEPROM_CMD_CS; +} + +static void rtl8187_eeprom_register_write(struct eeprom_93cx6 *eeprom) +{ + struct ieee80211_hw *dev = eeprom->data; + struct rtl8187_priv *priv = dev->priv; + u8 reg = RTL818X_EEPROM_CMD_PROGRAM; + + if (eeprom->reg_data_in) + reg |= RTL818X_EEPROM_CMD_WRITE; + if (eeprom->reg_data_out) + reg |= RTL818X_EEPROM_CMD_READ; + if (eeprom->reg_data_clock) + reg |= RTL818X_EEPROM_CMD_CK; + if (eeprom->reg_chip_select) + reg |= RTL818X_EEPROM_CMD_CS; + + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, reg); + udelay(10); +} + +static int __devinit rtl8187_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct usb_device *udev = interface_to_usbdev(intf); + struct ieee80211_hw *dev; + struct rtl8187_priv *priv; + struct eeprom_93cx6 eeprom; + struct ieee80211_channel *channel; + const char *chip_name; + u16 txpwr, reg; + u16 product_id = le16_to_cpu(udev->descriptor.idProduct); + int err, i; + + dev = ieee80211_alloc_hw(sizeof(*priv), &rtl8187_ops); + if (!dev) { + printk(KERN_ERR "rtl8187: ieee80211 alloc failed\n"); + return -ENOMEM; + } + + priv = dev->priv; + priv->is_rtl8187b = (id->driver_info == DEVICE_RTL8187B); + + /* allocate "DMA aware" buffer for register accesses */ + priv->io_dmabuf = kmalloc(sizeof(*priv->io_dmabuf), GFP_KERNEL); + if (!priv->io_dmabuf) { + err = -ENOMEM; + goto err_free_dev; + } + mutex_init(&priv->io_mutex); + + SET_IEEE80211_DEV(dev, &intf->dev); + usb_set_intfdata(intf, dev); + priv->udev = udev; + + usb_get_dev(udev); + + skb_queue_head_init(&priv->rx_queue); + + BUILD_BUG_ON(sizeof(priv->channels) != sizeof(rtl818x_channels)); + BUILD_BUG_ON(sizeof(priv->rates) != sizeof(rtl818x_rates)); + + memcpy(priv->channels, rtl818x_channels, sizeof(rtl818x_channels)); + memcpy(priv->rates, rtl818x_rates, sizeof(rtl818x_rates)); + priv->map = (struct rtl818x_csr *)0xFF00; + + priv->band.band = IEEE80211_BAND_2GHZ; + priv->band.channels = priv->channels; + priv->band.n_channels = ARRAY_SIZE(rtl818x_channels); + priv->band.bitrates = priv->rates; + priv->band.n_bitrates = ARRAY_SIZE(rtl818x_rates); + dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; + + + dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | + IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_RX_INCLUDES_FCS; + + eeprom.data = dev; + eeprom.register_read = rtl8187_eeprom_register_read; + eeprom.register_write = rtl8187_eeprom_register_write; + if (rtl818x_ioread32(priv, &priv->map->RX_CONF) & (1 << 6)) + eeprom.width = PCI_EEPROM_WIDTH_93C66; + else + eeprom.width = PCI_EEPROM_WIDTH_93C46; + + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); + udelay(10); + + eeprom_93cx6_multiread(&eeprom, RTL8187_EEPROM_MAC_ADDR, + (__le16 __force *)dev->wiphy->perm_addr, 3); + if (!is_valid_ether_addr(dev->wiphy->perm_addr)) { + printk(KERN_WARNING "rtl8187: Invalid hwaddr! Using randomly " + "generated MAC address\n"); + random_ether_addr(dev->wiphy->perm_addr); + } + + channel = priv->channels; + for (i = 0; i < 3; i++) { + eeprom_93cx6_read(&eeprom, RTL8187_EEPROM_TXPWR_CHAN_1 + i, + &txpwr); + (*channel++).hw_value = txpwr & 0xFF; + (*channel++).hw_value = txpwr >> 8; + } + for (i = 0; i < 2; i++) { + eeprom_93cx6_read(&eeprom, RTL8187_EEPROM_TXPWR_CHAN_4 + i, + &txpwr); + (*channel++).hw_value = txpwr & 0xFF; + (*channel++).hw_value = txpwr >> 8; + } + + eeprom_93cx6_read(&eeprom, RTL8187_EEPROM_TXPWR_BASE, + &priv->txpwr_base); + + reg = rtl818x_ioread8(priv, &priv->map->PGSELECT) & ~1; + rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg | 1); + /* 0 means asic B-cut, we should use SW 3 wire + * bit-by-bit banging for radio. 1 means we can use + * USB specific request to write radio registers */ + priv->asic_rev = rtl818x_ioread8(priv, (u8 *)0xFFFE) & 0x3; + rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg); + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); + + if (!priv->is_rtl8187b) { + u32 reg32; + reg32 = rtl818x_ioread32(priv, &priv->map->TX_CONF); + reg32 &= RTL818X_TX_CONF_HWVER_MASK; + switch (reg32) { + case RTL818X_TX_CONF_R8187vD_B: + /* Some RTL8187B devices have a USB ID of 0x8187 + * detect them here */ + chip_name = "RTL8187BvB(early)"; + priv->is_rtl8187b = 1; + priv->hw_rev = RTL8187BvB; + break; + case RTL818X_TX_CONF_R8187vD: + chip_name = "RTL8187vD"; + break; + default: + chip_name = "RTL8187vB (default)"; + } + } else { + /* + * Force USB request to write radio registers for 8187B, Realtek + * only uses it in their sources + */ + /*if (priv->asic_rev == 0) { + printk(KERN_WARNING "rtl8187: Forcing use of USB " + "requests to write to radio registers\n"); + priv->asic_rev = 1; + }*/ + switch (rtl818x_ioread8(priv, (u8 *)0xFFE1)) { + case RTL818X_R8187B_B: + chip_name = "RTL8187BvB"; + priv->hw_rev = RTL8187BvB; + break; + case RTL818X_R8187B_D: + chip_name = "RTL8187BvD"; + priv->hw_rev = RTL8187BvD; + break; + case RTL818X_R8187B_E: + chip_name = "RTL8187BvE"; + priv->hw_rev = RTL8187BvE; + break; + default: + chip_name = "RTL8187BvB (default)"; + priv->hw_rev = RTL8187BvB; + } + } + + if (!priv->is_rtl8187b) { + for (i = 0; i < 2; i++) { + eeprom_93cx6_read(&eeprom, + RTL8187_EEPROM_TXPWR_CHAN_6 + i, + &txpwr); + (*channel++).hw_value = txpwr & 0xFF; + (*channel++).hw_value = txpwr >> 8; + } + } else { + eeprom_93cx6_read(&eeprom, RTL8187_EEPROM_TXPWR_CHAN_6, + &txpwr); + (*channel++).hw_value = txpwr & 0xFF; + + eeprom_93cx6_read(&eeprom, 0x0A, &txpwr); + (*channel++).hw_value = txpwr & 0xFF; + + eeprom_93cx6_read(&eeprom, 0x1C, &txpwr); + (*channel++).hw_value = txpwr & 0xFF; + (*channel++).hw_value = txpwr >> 8; + } + /* Handle the differing rfkill GPIO bit in different models */ + priv->rfkill_mask = RFKILL_MASK_8187_89_97; + if (product_id == 0x8197 || product_id == 0x8198) { + eeprom_93cx6_read(&eeprom, RTL8187_EEPROM_SELECT_GPIO, ®); + if (reg & 0xFF00) + priv->rfkill_mask = RFKILL_MASK_8198; + } + + /* + * XXX: Once this driver supports anything that requires + * beacons it must implement IEEE80211_TX_CTL_ASSIGN_SEQ. + */ + dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); + + if ((id->driver_info == DEVICE_RTL8187) && priv->is_rtl8187b) + printk(KERN_INFO "rtl8187: inconsistency between id with OEM" + " info!\n"); + + priv->rf = rtl8187_detect_rf(dev); + dev->extra_tx_headroom = (!priv->is_rtl8187b) ? + sizeof(struct rtl8187_tx_hdr) : + sizeof(struct rtl8187b_tx_hdr); + if (!priv->is_rtl8187b) + dev->queues = 1; + else + dev->queues = 4; + + err = ieee80211_register_hw(dev); + if (err) { + printk(KERN_ERR "rtl8187: Cannot register device\n"); + goto err_free_dmabuf; + } + mutex_init(&priv->conf_mutex); + skb_queue_head_init(&priv->b_tx_status.queue); + + printk(KERN_INFO "%s: hwaddr %pM, %s V%d + %s, rfkill mask %d\n", + wiphy_name(dev->wiphy), dev->wiphy->perm_addr, + chip_name, priv->asic_rev, priv->rf->name, priv->rfkill_mask); + +#ifdef CONFIG_RTL8187_LEDS + eeprom_93cx6_read(&eeprom, 0x3F, ®); + reg &= 0xFF; + rtl8187_leds_init(dev, reg); +#endif + rtl8187_rfkill_init(dev); + + return 0; + + err_free_dmabuf: + kfree(priv->io_dmabuf); + err_free_dev: + ieee80211_free_hw(dev); + usb_set_intfdata(intf, NULL); + usb_put_dev(udev); + return err; +} + +static void __devexit rtl8187_disconnect(struct usb_interface *intf) +{ + struct ieee80211_hw *dev = usb_get_intfdata(intf); + struct rtl8187_priv *priv; + + if (!dev) + return; + +#ifdef CONFIG_RTL8187_LEDS + rtl8187_leds_exit(dev); +#endif + rtl8187_rfkill_exit(dev); + ieee80211_unregister_hw(dev); + + priv = dev->priv; + usb_reset_device(priv->udev); + usb_put_dev(interface_to_usbdev(intf)); + kfree(priv->io_dmabuf); + ieee80211_free_hw(dev); +} + +static struct usb_driver rtl8187_driver = { + .name = KBUILD_MODNAME, + .id_table = rtl8187_table, + .probe = rtl8187_probe, + .disconnect = __devexit_p(rtl8187_disconnect), +}; + +static int __init rtl8187_init(void) +{ + return usb_register(&rtl8187_driver); +} + +static void __exit rtl8187_exit(void) +{ + usb_deregister(&rtl8187_driver); +} + +module_init(rtl8187_init); +module_exit(rtl8187_exit); diff --git a/drivers/net/wireless/rtl818x/rtl8187_leds.c b/drivers/net/wireless/rtl818x/rtl8187_leds.c new file mode 100644 index 0000000..4637337 --- /dev/null +++ b/drivers/net/wireless/rtl818x/rtl8187_leds.c @@ -0,0 +1,245 @@ +/* + * Linux LED driver for RTL8187 + * + * Copyright 2009 Larry Finger + * + * Based on the LED handling in the r8187 driver, which is: + * Copyright (c) Realtek Semiconductor Corp. All rights reserved. + * + * Thanks to Realtek for their support! + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifdef CONFIG_RTL8187_LEDS + +#include +#include +#include + +#include "rtl8187.h" +#include "rtl8187_leds.h" + +static void led_turn_on(struct work_struct *work) +{ + /* As this routine does read/write operations on the hardware, it must + * be run from a work queue. + */ + u8 reg; + struct rtl8187_priv *priv = container_of(work, struct rtl8187_priv, + led_on.work); + struct rtl8187_led *led = &priv->led_tx; + + /* Don't change the LED, when the device is down. */ + if (!priv->vif || priv->vif->type == NL80211_IFTYPE_UNSPECIFIED) + return ; + + /* Skip if the LED is not registered. */ + if (!led->dev) + return; + mutex_lock(&priv->conf_mutex); + switch (led->ledpin) { + case LED_PIN_GPIO0: + rtl818x_iowrite8(priv, &priv->map->GPIO0, 0x01); + rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0x00); + break; + case LED_PIN_LED0: + reg = rtl818x_ioread8(priv, &priv->map->PGSELECT) & ~(1 << 4); + rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg); + break; + case LED_PIN_LED1: + reg = rtl818x_ioread8(priv, &priv->map->PGSELECT) & ~(1 << 5); + rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg); + break; + case LED_PIN_HW: + default: + break; + } + mutex_unlock(&priv->conf_mutex); +} + +static void led_turn_off(struct work_struct *work) +{ + /* As this routine does read/write operations on the hardware, it must + * be run from a work queue. + */ + u8 reg; + struct rtl8187_priv *priv = container_of(work, struct rtl8187_priv, + led_off.work); + struct rtl8187_led *led = &priv->led_tx; + + /* Don't change the LED, when the device is down. */ + if (!priv->vif || priv->vif->type == NL80211_IFTYPE_UNSPECIFIED) + return ; + + /* Skip if the LED is not registered. */ + if (!led->dev) + return; + mutex_lock(&priv->conf_mutex); + switch (led->ledpin) { + case LED_PIN_GPIO0: + rtl818x_iowrite8(priv, &priv->map->GPIO0, 0x01); + rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0x01); + break; + case LED_PIN_LED0: + reg = rtl818x_ioread8(priv, &priv->map->PGSELECT) | (1 << 4); + rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg); + break; + case LED_PIN_LED1: + reg = rtl818x_ioread8(priv, &priv->map->PGSELECT) | (1 << 5); + rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg); + break; + case LED_PIN_HW: + default: + break; + } + mutex_unlock(&priv->conf_mutex); +} + +/* Callback from the LED subsystem. */ +static void rtl8187_led_brightness_set(struct led_classdev *led_dev, + enum led_brightness brightness) +{ + struct rtl8187_led *led = container_of(led_dev, struct rtl8187_led, + led_dev); + struct ieee80211_hw *hw = led->dev; + struct rtl8187_priv *priv; + static bool radio_on; + + if (!hw) + return; + priv = hw->priv; + if (led->is_radio) { + if (brightness == LED_FULL) { + ieee80211_queue_delayed_work(hw, &priv->led_on, 0); + radio_on = true; + } else if (radio_on) { + radio_on = false; + cancel_delayed_work_sync(&priv->led_on); + ieee80211_queue_delayed_work(hw, &priv->led_off, 0); + } + } else if (radio_on) { + if (brightness == LED_OFF) { + ieee80211_queue_delayed_work(hw, &priv->led_off, 0); + /* The LED is off for 1/20 sec - it just blinks. */ + ieee80211_queue_delayed_work(hw, &priv->led_on, + HZ / 20); + } else + ieee80211_queue_delayed_work(hw, &priv->led_on, 0); + } +} + +static int rtl8187_register_led(struct ieee80211_hw *dev, + struct rtl8187_led *led, const char *name, + const char *default_trigger, u8 ledpin, + bool is_radio) +{ + int err; + struct rtl8187_priv *priv = dev->priv; + + if (led->dev) + return -EEXIST; + if (!default_trigger) + return -EINVAL; + led->dev = dev; + led->ledpin = ledpin; + led->is_radio = is_radio; + strncpy(led->name, name, sizeof(led->name)); + + led->led_dev.name = led->name; + led->led_dev.default_trigger = default_trigger; + led->led_dev.brightness_set = rtl8187_led_brightness_set; + + err = led_classdev_register(&priv->udev->dev, &led->led_dev); + if (err) { + printk(KERN_INFO "LEDs: Failed to register %s\n", name); + led->dev = NULL; + return err; + } + return 0; +} + +static void rtl8187_unregister_led(struct rtl8187_led *led) +{ + struct ieee80211_hw *hw = led->dev; + struct rtl8187_priv *priv = hw->priv; + + led_classdev_unregister(&led->led_dev); + flush_delayed_work(&priv->led_off); + led->dev = NULL; +} + +void rtl8187_leds_init(struct ieee80211_hw *dev, u16 custid) +{ + struct rtl8187_priv *priv = dev->priv; + char name[RTL8187_LED_MAX_NAME_LEN + 1]; + u8 ledpin; + int err; + + /* According to the vendor driver, the LED operation depends on the + * customer ID encoded in the EEPROM + */ + printk(KERN_INFO "rtl8187: Customer ID is 0x%02X\n", custid); + switch (custid) { + case EEPROM_CID_RSVD0: + case EEPROM_CID_RSVD1: + case EEPROM_CID_SERCOMM_PS: + case EEPROM_CID_QMI: + case EEPROM_CID_DELL: + case EEPROM_CID_TOSHIBA: + ledpin = LED_PIN_GPIO0; + break; + case EEPROM_CID_ALPHA0: + ledpin = LED_PIN_LED0; + break; + case EEPROM_CID_HW: + ledpin = LED_PIN_HW; + break; + default: + ledpin = LED_PIN_GPIO0; + } + + INIT_DELAYED_WORK(&priv->led_on, led_turn_on); + INIT_DELAYED_WORK(&priv->led_off, led_turn_off); + + snprintf(name, sizeof(name), + "rtl8187-%s::radio", wiphy_name(dev->wiphy)); + err = rtl8187_register_led(dev, &priv->led_radio, name, + ieee80211_get_radio_led_name(dev), ledpin, true); + if (err) + return; + + snprintf(name, sizeof(name), + "rtl8187-%s::tx", wiphy_name(dev->wiphy)); + err = rtl8187_register_led(dev, &priv->led_tx, name, + ieee80211_get_tx_led_name(dev), ledpin, false); + if (err) + goto err_tx; + + snprintf(name, sizeof(name), + "rtl8187-%s::rx", wiphy_name(dev->wiphy)); + err = rtl8187_register_led(dev, &priv->led_rx, name, + ieee80211_get_rx_led_name(dev), ledpin, false); + if (!err) + return; + + /* registration of RX LED failed - unregister */ + rtl8187_unregister_led(&priv->led_tx); +err_tx: + rtl8187_unregister_led(&priv->led_radio); +} + +void rtl8187_leds_exit(struct ieee80211_hw *dev) +{ + struct rtl8187_priv *priv = dev->priv; + + rtl8187_unregister_led(&priv->led_radio); + rtl8187_unregister_led(&priv->led_rx); + rtl8187_unregister_led(&priv->led_tx); + cancel_delayed_work_sync(&priv->led_off); + cancel_delayed_work_sync(&priv->led_on); +} +#endif /* def CONFIG_RTL8187_LEDS */ + diff --git a/drivers/net/wireless/rtl818x/rtl8187_leds.h b/drivers/net/wireless/rtl818x/rtl8187_leds.h new file mode 100644 index 0000000..d743c96 --- /dev/null +++ b/drivers/net/wireless/rtl818x/rtl8187_leds.h @@ -0,0 +1,59 @@ +/* + * Definitions for RTL8187 leds + * + * Copyright 2009 Larry Finger + * + * Based on the LED handling in the r8187 driver, which is: + * Copyright (c) Realtek Semiconductor Corp. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef RTL8187_LED_H +#define RTL8187_LED_H + +#ifdef CONFIG_RTL8187_LEDS + +#define RTL8187_LED_MAX_NAME_LEN 21 + +#include +#include + +enum { + LED_PIN_LED0, + LED_PIN_LED1, + LED_PIN_GPIO0, + LED_PIN_HW +}; + +enum { + EEPROM_CID_RSVD0 = 0x00, + EEPROM_CID_RSVD1 = 0xFF, + EEPROM_CID_ALPHA0 = 0x01, + EEPROM_CID_SERCOMM_PS = 0x02, + EEPROM_CID_HW = 0x03, + EEPROM_CID_TOSHIBA = 0x04, + EEPROM_CID_QMI = 0x07, + EEPROM_CID_DELL = 0x08 +}; + +struct rtl8187_led { + struct ieee80211_hw *dev; + /* The LED class device */ + struct led_classdev led_dev; + /* The pin/method used to control the led */ + u8 ledpin; + /* The unique name string for this LED device. */ + char name[RTL8187_LED_MAX_NAME_LEN + 1]; + /* If the LED is radio or tx/rx */ + bool is_radio; +}; + +void rtl8187_leds_init(struct ieee80211_hw *dev, u16 code); +void rtl8187_leds_exit(struct ieee80211_hw *dev); + +#endif /* def CONFIG_RTL8187_LEDS */ + +#endif /* RTL8187_LED_H */ diff --git a/drivers/net/wireless/rtl818x/rtl8187_rfkill.c b/drivers/net/wireless/rtl818x/rtl8187_rfkill.c new file mode 100644 index 0000000..03555e1 --- /dev/null +++ b/drivers/net/wireless/rtl818x/rtl8187_rfkill.c @@ -0,0 +1,64 @@ +/* + * Linux RFKILL support for RTL8187 + * + * Copyright (c) 2009 Herton Ronaldo Krzesinski + * + * Based on the RFKILL handling in the r8187 driver, which is: + * Copyright (c) Realtek Semiconductor Corp. All rights reserved. + * + * Thanks to Realtek for their support! + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include + +#include "rtl8187.h" +#include "rtl8187_rfkill.h" + +static bool rtl8187_is_radio_enabled(struct rtl8187_priv *priv) +{ + u8 gpio; + + gpio = rtl818x_ioread8(priv, &priv->map->GPIO0); + rtl818x_iowrite8(priv, &priv->map->GPIO0, gpio & ~priv->rfkill_mask); + gpio = rtl818x_ioread8(priv, &priv->map->GPIO1); + + return gpio & priv->rfkill_mask; +} + +void rtl8187_rfkill_init(struct ieee80211_hw *hw) +{ + struct rtl8187_priv *priv = hw->priv; + + priv->rfkill_off = rtl8187_is_radio_enabled(priv); + printk(KERN_INFO "rtl8187: wireless switch is %s\n", + priv->rfkill_off ? "on" : "off"); + wiphy_rfkill_set_hw_state(hw->wiphy, !priv->rfkill_off); + wiphy_rfkill_start_polling(hw->wiphy); +} + +void rtl8187_rfkill_poll(struct ieee80211_hw *hw) +{ + bool enabled; + struct rtl8187_priv *priv = hw->priv; + + mutex_lock(&priv->conf_mutex); + enabled = rtl8187_is_radio_enabled(priv); + if (unlikely(enabled != priv->rfkill_off)) { + priv->rfkill_off = enabled; + printk(KERN_INFO "rtl8187: wireless radio switch turned %s\n", + enabled ? "on" : "off"); + wiphy_rfkill_set_hw_state(hw->wiphy, !enabled); + } + mutex_unlock(&priv->conf_mutex); +} + +void rtl8187_rfkill_exit(struct ieee80211_hw *hw) +{ + wiphy_rfkill_stop_polling(hw->wiphy); +} diff --git a/drivers/net/wireless/rtl818x/rtl8187_rfkill.h b/drivers/net/wireless/rtl818x/rtl8187_rfkill.h new file mode 100644 index 0000000..e12575e --- /dev/null +++ b/drivers/net/wireless/rtl818x/rtl8187_rfkill.h @@ -0,0 +1,8 @@ +#ifndef RTL8187_RFKILL_H +#define RTL8187_RFKILL_H + +void rtl8187_rfkill_init(struct ieee80211_hw *hw); +void rtl8187_rfkill_poll(struct ieee80211_hw *hw); +void rtl8187_rfkill_exit(struct ieee80211_hw *hw); + +#endif /* RTL8187_RFKILL_H */ diff --git a/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c b/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c new file mode 100644 index 0000000..a098193 --- /dev/null +++ b/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c @@ -0,0 +1,983 @@ +/* + * Radio tuning for RTL8225 on RTL8187 + * + * Copyright 2007 Michael Wu + * Copyright 2007 Andrea Merello + * + * Based on the r8187 driver, which is: + * Copyright 2005 Andrea Merello , et al. + * + * Magic delays, register offsets, and phy value tables below are + * taken from the original r8187 driver sources. Thanks to Realtek + * for their support! + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include + +#include "rtl8187.h" +#include "rtl8187_rtl8225.h" + +static void rtl8225_write_bitbang(struct ieee80211_hw *dev, u8 addr, u16 data) +{ + struct rtl8187_priv *priv = dev->priv; + u16 reg80, reg84, reg82; + u32 bangdata; + int i; + + bangdata = (data << 4) | (addr & 0xf); + + reg80 = rtl818x_ioread16(priv, &priv->map->RFPinsOutput) & 0xfff3; + reg82 = rtl818x_ioread16(priv, &priv->map->RFPinsEnable); + + rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, reg82 | 0x7); + + reg84 = rtl818x_ioread16(priv, &priv->map->RFPinsSelect); + rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84 | 0x7); + udelay(10); + + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2)); + udelay(2); + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80); + udelay(10); + + for (i = 15; i >= 0; i--) { + u16 reg = reg80 | (bangdata & (1 << i)) >> i; + + if (i & 1) + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg); + + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg | (1 << 1)); + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg | (1 << 1)); + + if (!(i & 1)) + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg); + } + + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2)); + udelay(10); + + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2)); + rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84); +} + +static void rtl8225_write_8051(struct ieee80211_hw *dev, u8 addr, __le16 data) +{ + struct rtl8187_priv *priv = dev->priv; + u16 reg80, reg82, reg84; + + reg80 = rtl818x_ioread16(priv, &priv->map->RFPinsOutput); + reg82 = rtl818x_ioread16(priv, &priv->map->RFPinsEnable); + reg84 = rtl818x_ioread16(priv, &priv->map->RFPinsSelect); + + reg80 &= ~(0x3 << 2); + reg84 &= ~0xF; + + rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, reg82 | 0x0007); + rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84 | 0x0007); + udelay(10); + + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2)); + udelay(2); + + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80); + udelay(10); + + mutex_lock(&priv->io_mutex); + + priv->io_dmabuf->bits16 = data; + usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0), + RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE, + addr, 0x8225, &priv->io_dmabuf->bits16, sizeof(data), + HZ / 2); + + mutex_unlock(&priv->io_mutex); + + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2)); + udelay(10); + + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2)); + rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84); +} + +static void rtl8225_write(struct ieee80211_hw *dev, u8 addr, u16 data) +{ + struct rtl8187_priv *priv = dev->priv; + + if (priv->asic_rev) + rtl8225_write_8051(dev, addr, cpu_to_le16(data)); + else + rtl8225_write_bitbang(dev, addr, data); +} + +static u16 rtl8225_read(struct ieee80211_hw *dev, u8 addr) +{ + struct rtl8187_priv *priv = dev->priv; + u16 reg80, reg82, reg84, out; + int i; + + reg80 = rtl818x_ioread16(priv, &priv->map->RFPinsOutput); + reg82 = rtl818x_ioread16(priv, &priv->map->RFPinsEnable); + reg84 = rtl818x_ioread16(priv, &priv->map->RFPinsSelect); + + reg80 &= ~0xF; + + rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, reg82 | 0x000F); + rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84 | 0x000F); + + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2)); + udelay(4); + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80); + udelay(5); + + for (i = 4; i >= 0; i--) { + u16 reg = reg80 | ((addr >> i) & 1); + + if (!(i & 1)) { + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg); + udelay(1); + } + + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, + reg | (1 << 1)); + udelay(2); + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, + reg | (1 << 1)); + udelay(2); + + if (i & 1) { + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg); + udelay(1); + } + } + + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, + reg80 | (1 << 3) | (1 << 1)); + udelay(2); + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, + reg80 | (1 << 3)); + udelay(2); + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, + reg80 | (1 << 3)); + udelay(2); + + out = 0; + for (i = 11; i >= 0; i--) { + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, + reg80 | (1 << 3)); + udelay(1); + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, + reg80 | (1 << 3) | (1 << 1)); + udelay(2); + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, + reg80 | (1 << 3) | (1 << 1)); + udelay(2); + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, + reg80 | (1 << 3) | (1 << 1)); + udelay(2); + + if (rtl818x_ioread16(priv, &priv->map->RFPinsInput) & (1 << 1)) + out |= 1 << i; + + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, + reg80 | (1 << 3)); + udelay(2); + } + + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, + reg80 | (1 << 3) | (1 << 2)); + udelay(2); + + rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, reg82); + rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84); + rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x03A0); + + return out; +} + +static const u16 rtl8225bcd_rxgain[] = { + 0x0400, 0x0401, 0x0402, 0x0403, 0x0404, 0x0405, 0x0408, 0x0409, + 0x040a, 0x040b, 0x0502, 0x0503, 0x0504, 0x0505, 0x0540, 0x0541, + 0x0542, 0x0543, 0x0544, 0x0545, 0x0580, 0x0581, 0x0582, 0x0583, + 0x0584, 0x0585, 0x0588, 0x0589, 0x058a, 0x058b, 0x0643, 0x0644, + 0x0645, 0x0680, 0x0681, 0x0682, 0x0683, 0x0684, 0x0685, 0x0688, + 0x0689, 0x068a, 0x068b, 0x068c, 0x0742, 0x0743, 0x0744, 0x0745, + 0x0780, 0x0781, 0x0782, 0x0783, 0x0784, 0x0785, 0x0788, 0x0789, + 0x078a, 0x078b, 0x078c, 0x078d, 0x0790, 0x0791, 0x0792, 0x0793, + 0x0794, 0x0795, 0x0798, 0x0799, 0x079a, 0x079b, 0x079c, 0x079d, + 0x07a0, 0x07a1, 0x07a2, 0x07a3, 0x07a4, 0x07a5, 0x07a8, 0x07a9, + 0x07aa, 0x07ab, 0x07ac, 0x07ad, 0x07b0, 0x07b1, 0x07b2, 0x07b3, + 0x07b4, 0x07b5, 0x07b8, 0x07b9, 0x07ba, 0x07bb, 0x07bb +}; + +static const u8 rtl8225_agc[] = { + 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, + 0x9d, 0x9c, 0x9b, 0x9a, 0x99, 0x98, 0x97, 0x96, + 0x95, 0x94, 0x93, 0x92, 0x91, 0x90, 0x8f, 0x8e, + 0x8d, 0x8c, 0x8b, 0x8a, 0x89, 0x88, 0x87, 0x86, + 0x85, 0x84, 0x83, 0x82, 0x81, 0x80, 0x3f, 0x3e, + 0x3d, 0x3c, 0x3b, 0x3a, 0x39, 0x38, 0x37, 0x36, + 0x35, 0x34, 0x33, 0x32, 0x31, 0x30, 0x2f, 0x2e, + 0x2d, 0x2c, 0x2b, 0x2a, 0x29, 0x28, 0x27, 0x26, + 0x25, 0x24, 0x23, 0x22, 0x21, 0x20, 0x1f, 0x1e, + 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18, 0x17, 0x16, + 0x15, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x0e, + 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08, 0x07, 0x06, + 0x05, 0x04, 0x03, 0x02, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01 +}; + +static const u8 rtl8225_gain[] = { + 0x23, 0x88, 0x7c, 0xa5, /* -82dBm */ + 0x23, 0x88, 0x7c, 0xb5, /* -82dBm */ + 0x23, 0x88, 0x7c, 0xc5, /* -82dBm */ + 0x33, 0x80, 0x79, 0xc5, /* -78dBm */ + 0x43, 0x78, 0x76, 0xc5, /* -74dBm */ + 0x53, 0x60, 0x73, 0xc5, /* -70dBm */ + 0x63, 0x58, 0x70, 0xc5, /* -66dBm */ +}; + +static const u8 rtl8225_threshold[] = { + 0x8d, 0x8d, 0x8d, 0x8d, 0x9d, 0xad, 0xbd +}; + +static const u8 rtl8225_tx_gain_cck_ofdm[] = { + 0x02, 0x06, 0x0e, 0x1e, 0x3e, 0x7e +}; + +static const u8 rtl8225_tx_power_cck[] = { + 0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02, + 0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02, + 0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02, + 0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02, + 0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03, + 0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03 +}; + +static const u8 rtl8225_tx_power_cck_ch14[] = { + 0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00, + 0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00, + 0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00, + 0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00, + 0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00, + 0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00 +}; + +static const u8 rtl8225_tx_power_ofdm[] = { + 0x80, 0x90, 0xa2, 0xb5, 0xcb, 0xe4 +}; + +static const u32 rtl8225_chan[] = { + 0x085c, 0x08dc, 0x095c, 0x09dc, 0x0a5c, 0x0adc, 0x0b5c, + 0x0bdc, 0x0c5c, 0x0cdc, 0x0d5c, 0x0ddc, 0x0e5c, 0x0f72 +}; + +static void rtl8225_rf_set_tx_power(struct ieee80211_hw *dev, int channel) +{ + struct rtl8187_priv *priv = dev->priv; + u8 cck_power, ofdm_power; + const u8 *tmp; + u32 reg; + int i; + + cck_power = priv->channels[channel - 1].hw_value & 0xF; + ofdm_power = priv->channels[channel - 1].hw_value >> 4; + + cck_power = min(cck_power, (u8)11); + if (ofdm_power > (u8)15) + ofdm_power = 25; + else + ofdm_power += 10; + + rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK, + rtl8225_tx_gain_cck_ofdm[cck_power / 6] >> 1); + + if (channel == 14) + tmp = &rtl8225_tx_power_cck_ch14[(cck_power % 6) * 8]; + else + tmp = &rtl8225_tx_power_cck[(cck_power % 6) * 8]; + + for (i = 0; i < 8; i++) + rtl8225_write_phy_cck(dev, 0x44 + i, *tmp++); + + msleep(1); // FIXME: optional? + + /* anaparam2 on */ + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); + reg = rtl818x_ioread8(priv, &priv->map->CONFIG3); + rtl818x_iowrite8(priv, &priv->map->CONFIG3, + reg | RTL818X_CONFIG3_ANAPARAM_WRITE); + rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, + RTL8187_RTL8225_ANAPARAM2_ON); + rtl818x_iowrite8(priv, &priv->map->CONFIG3, + reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE); + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); + + rtl8225_write_phy_ofdm(dev, 2, 0x42); + rtl8225_write_phy_ofdm(dev, 6, 0x00); + rtl8225_write_phy_ofdm(dev, 8, 0x00); + + rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM, + rtl8225_tx_gain_cck_ofdm[ofdm_power / 6] >> 1); + + tmp = &rtl8225_tx_power_ofdm[ofdm_power % 6]; + + rtl8225_write_phy_ofdm(dev, 5, *tmp); + rtl8225_write_phy_ofdm(dev, 7, *tmp); + + msleep(1); +} + +static void rtl8225_rf_init(struct ieee80211_hw *dev) +{ + struct rtl8187_priv *priv = dev->priv; + int i; + + rtl8225_write(dev, 0x0, 0x067); + rtl8225_write(dev, 0x1, 0xFE0); + rtl8225_write(dev, 0x2, 0x44D); + rtl8225_write(dev, 0x3, 0x441); + rtl8225_write(dev, 0x4, 0x486); + rtl8225_write(dev, 0x5, 0xBC0); + rtl8225_write(dev, 0x6, 0xAE6); + rtl8225_write(dev, 0x7, 0x82A); + rtl8225_write(dev, 0x8, 0x01F); + rtl8225_write(dev, 0x9, 0x334); + rtl8225_write(dev, 0xA, 0xFD4); + rtl8225_write(dev, 0xB, 0x391); + rtl8225_write(dev, 0xC, 0x050); + rtl8225_write(dev, 0xD, 0x6DB); + rtl8225_write(dev, 0xE, 0x029); + rtl8225_write(dev, 0xF, 0x914); msleep(100); + + rtl8225_write(dev, 0x2, 0xC4D); msleep(200); + rtl8225_write(dev, 0x2, 0x44D); msleep(200); + + if (!(rtl8225_read(dev, 6) & (1 << 7))) { + rtl8225_write(dev, 0x02, 0x0c4d); + msleep(200); + rtl8225_write(dev, 0x02, 0x044d); + msleep(100); + if (!(rtl8225_read(dev, 6) & (1 << 7))) + printk(KERN_WARNING "%s: RF Calibration Failed! %x\n", + wiphy_name(dev->wiphy), rtl8225_read(dev, 6)); + } + + rtl8225_write(dev, 0x0, 0x127); + + for (i = 0; i < ARRAY_SIZE(rtl8225bcd_rxgain); i++) { + rtl8225_write(dev, 0x1, i + 1); + rtl8225_write(dev, 0x2, rtl8225bcd_rxgain[i]); + } + + rtl8225_write(dev, 0x0, 0x027); + rtl8225_write(dev, 0x0, 0x22F); + + for (i = 0; i < ARRAY_SIZE(rtl8225_agc); i++) { + rtl8225_write_phy_ofdm(dev, 0xB, rtl8225_agc[i]); + rtl8225_write_phy_ofdm(dev, 0xA, 0x80 + i); + } + + msleep(1); + + rtl8225_write_phy_ofdm(dev, 0x00, 0x01); + rtl8225_write_phy_ofdm(dev, 0x01, 0x02); + rtl8225_write_phy_ofdm(dev, 0x02, 0x42); + rtl8225_write_phy_ofdm(dev, 0x03, 0x00); + rtl8225_write_phy_ofdm(dev, 0x04, 0x00); + rtl8225_write_phy_ofdm(dev, 0x05, 0x00); + rtl8225_write_phy_ofdm(dev, 0x06, 0x40); + rtl8225_write_phy_ofdm(dev, 0x07, 0x00); + rtl8225_write_phy_ofdm(dev, 0x08, 0x40); + rtl8225_write_phy_ofdm(dev, 0x09, 0xfe); + rtl8225_write_phy_ofdm(dev, 0x0a, 0x09); + rtl8225_write_phy_ofdm(dev, 0x0b, 0x80); + rtl8225_write_phy_ofdm(dev, 0x0c, 0x01); + rtl8225_write_phy_ofdm(dev, 0x0e, 0xd3); + rtl8225_write_phy_ofdm(dev, 0x0f, 0x38); + rtl8225_write_phy_ofdm(dev, 0x10, 0x84); + rtl8225_write_phy_ofdm(dev, 0x11, 0x06); + rtl8225_write_phy_ofdm(dev, 0x12, 0x20); + rtl8225_write_phy_ofdm(dev, 0x13, 0x20); + rtl8225_write_phy_ofdm(dev, 0x14, 0x00); + rtl8225_write_phy_ofdm(dev, 0x15, 0x40); + rtl8225_write_phy_ofdm(dev, 0x16, 0x00); + rtl8225_write_phy_ofdm(dev, 0x17, 0x40); + rtl8225_write_phy_ofdm(dev, 0x18, 0xef); + rtl8225_write_phy_ofdm(dev, 0x19, 0x19); + rtl8225_write_phy_ofdm(dev, 0x1a, 0x20); + rtl8225_write_phy_ofdm(dev, 0x1b, 0x76); + rtl8225_write_phy_ofdm(dev, 0x1c, 0x04); + rtl8225_write_phy_ofdm(dev, 0x1e, 0x95); + rtl8225_write_phy_ofdm(dev, 0x1f, 0x75); + rtl8225_write_phy_ofdm(dev, 0x20, 0x1f); + rtl8225_write_phy_ofdm(dev, 0x21, 0x27); + rtl8225_write_phy_ofdm(dev, 0x22, 0x16); + rtl8225_write_phy_ofdm(dev, 0x24, 0x46); + rtl8225_write_phy_ofdm(dev, 0x25, 0x20); + rtl8225_write_phy_ofdm(dev, 0x26, 0x90); + rtl8225_write_phy_ofdm(dev, 0x27, 0x88); + + rtl8225_write_phy_ofdm(dev, 0x0d, rtl8225_gain[2 * 4]); + rtl8225_write_phy_ofdm(dev, 0x1b, rtl8225_gain[2 * 4 + 2]); + rtl8225_write_phy_ofdm(dev, 0x1d, rtl8225_gain[2 * 4 + 3]); + rtl8225_write_phy_ofdm(dev, 0x23, rtl8225_gain[2 * 4 + 1]); + + rtl8225_write_phy_cck(dev, 0x00, 0x98); + rtl8225_write_phy_cck(dev, 0x03, 0x20); + rtl8225_write_phy_cck(dev, 0x04, 0x7e); + rtl8225_write_phy_cck(dev, 0x05, 0x12); + rtl8225_write_phy_cck(dev, 0x06, 0xfc); + rtl8225_write_phy_cck(dev, 0x07, 0x78); + rtl8225_write_phy_cck(dev, 0x08, 0x2e); + rtl8225_write_phy_cck(dev, 0x10, 0x9b); + rtl8225_write_phy_cck(dev, 0x11, 0x88); + rtl8225_write_phy_cck(dev, 0x12, 0x47); + rtl8225_write_phy_cck(dev, 0x13, 0xd0); + rtl8225_write_phy_cck(dev, 0x19, 0x00); + rtl8225_write_phy_cck(dev, 0x1a, 0xa0); + rtl8225_write_phy_cck(dev, 0x1b, 0x08); + rtl8225_write_phy_cck(dev, 0x40, 0x86); + rtl8225_write_phy_cck(dev, 0x41, 0x8d); + rtl8225_write_phy_cck(dev, 0x42, 0x15); + rtl8225_write_phy_cck(dev, 0x43, 0x18); + rtl8225_write_phy_cck(dev, 0x44, 0x1f); + rtl8225_write_phy_cck(dev, 0x45, 0x1e); + rtl8225_write_phy_cck(dev, 0x46, 0x1a); + rtl8225_write_phy_cck(dev, 0x47, 0x15); + rtl8225_write_phy_cck(dev, 0x48, 0x10); + rtl8225_write_phy_cck(dev, 0x49, 0x0a); + rtl8225_write_phy_cck(dev, 0x4a, 0x05); + rtl8225_write_phy_cck(dev, 0x4b, 0x02); + rtl8225_write_phy_cck(dev, 0x4c, 0x05); + + rtl818x_iowrite8(priv, &priv->map->TESTR, 0x0D); + + rtl8225_rf_set_tx_power(dev, 1); + + /* RX antenna default to A */ + rtl8225_write_phy_cck(dev, 0x10, 0x9b); /* B: 0xDB */ + rtl8225_write_phy_ofdm(dev, 0x26, 0x90); /* B: 0x10 */ + + rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03); /* B: 0x00 */ + msleep(1); + rtl818x_iowrite32(priv, (__le32 *)0xFF94, 0x3dc00002); + + /* set sensitivity */ + rtl8225_write(dev, 0x0c, 0x50); + rtl8225_write_phy_ofdm(dev, 0x0d, rtl8225_gain[2 * 4]); + rtl8225_write_phy_ofdm(dev, 0x1b, rtl8225_gain[2 * 4 + 2]); + rtl8225_write_phy_ofdm(dev, 0x1d, rtl8225_gain[2 * 4 + 3]); + rtl8225_write_phy_ofdm(dev, 0x23, rtl8225_gain[2 * 4 + 1]); + rtl8225_write_phy_cck(dev, 0x41, rtl8225_threshold[2]); +} + +static const u8 rtl8225z2_agc[] = { + 0x5e, 0x5e, 0x5e, 0x5e, 0x5d, 0x5b, 0x59, 0x57, 0x55, 0x53, 0x51, 0x4f, + 0x4d, 0x4b, 0x49, 0x47, 0x45, 0x43, 0x41, 0x3f, 0x3d, 0x3b, 0x39, 0x37, + 0x35, 0x33, 0x31, 0x2f, 0x2d, 0x2b, 0x29, 0x27, 0x25, 0x23, 0x21, 0x1f, + 0x1d, 0x1b, 0x19, 0x17, 0x15, 0x13, 0x11, 0x0f, 0x0d, 0x0b, 0x09, 0x07, + 0x05, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, + 0x19, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x26, 0x27, 0x27, 0x28, + 0x28, 0x29, 0x2a, 0x2a, 0x2a, 0x2b, 0x2b, 0x2b, 0x2c, 0x2c, 0x2c, 0x2d, + 0x2d, 0x2d, 0x2d, 0x2e, 0x2e, 0x2e, 0x2e, 0x2f, 0x2f, 0x2f, 0x30, 0x30, + 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, + 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31 +}; +static const u8 rtl8225z2_ofdm[] = { + 0x10, 0x0d, 0x01, 0x00, 0x14, 0xfb, 0xfb, 0x60, + 0x00, 0x60, 0x00, 0x00, 0x00, 0x5c, 0x00, 0x00, + 0x40, 0x00, 0x40, 0x00, 0x00, 0x00, 0xa8, 0x26, + 0x32, 0x33, 0x07, 0xa5, 0x6f, 0x55, 0xc8, 0xb3, + 0x0a, 0xe1, 0x2C, 0x8a, 0x86, 0x83, 0x34, 0x0f, + 0x4f, 0x24, 0x6f, 0xc2, 0x6b, 0x40, 0x80, 0x00, + 0xc0, 0xc1, 0x58, 0xf1, 0x00, 0xe4, 0x90, 0x3e, + 0x6d, 0x3c, 0xfb, 0x07 +}; + +static const u8 rtl8225z2_tx_power_cck_ch14[] = { + 0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00, + 0x30, 0x2f, 0x29, 0x15, 0x00, 0x00, 0x00, 0x00, + 0x30, 0x2f, 0x29, 0x15, 0x00, 0x00, 0x00, 0x00, + 0x30, 0x2f, 0x29, 0x15, 0x00, 0x00, 0x00, 0x00 +}; + +static const u8 rtl8225z2_tx_power_cck[] = { + 0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04, + 0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03, + 0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03, + 0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03 +}; + +static const u8 rtl8225z2_tx_power_ofdm[] = { + 0x42, 0x00, 0x40, 0x00, 0x40 +}; + +static const u8 rtl8225z2_tx_gain_cck_ofdm[] = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, + 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, + 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, + 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, + 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23 +}; + +static void rtl8225z2_rf_set_tx_power(struct ieee80211_hw *dev, int channel) +{ + struct rtl8187_priv *priv = dev->priv; + u8 cck_power, ofdm_power; + const u8 *tmp; + u32 reg; + int i; + + cck_power = priv->channels[channel - 1].hw_value & 0xF; + ofdm_power = priv->channels[channel - 1].hw_value >> 4; + + cck_power = min(cck_power, (u8)15); + cck_power += priv->txpwr_base & 0xF; + cck_power = min(cck_power, (u8)35); + + if (ofdm_power > (u8)15) + ofdm_power = 25; + else + ofdm_power += 10; + ofdm_power += priv->txpwr_base >> 4; + ofdm_power = min(ofdm_power, (u8)35); + + if (channel == 14) + tmp = rtl8225z2_tx_power_cck_ch14; + else + tmp = rtl8225z2_tx_power_cck; + + for (i = 0; i < 8; i++) + rtl8225_write_phy_cck(dev, 0x44 + i, *tmp++); + + rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK, + rtl8225z2_tx_gain_cck_ofdm[cck_power]); + msleep(1); + + /* anaparam2 on */ + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); + reg = rtl818x_ioread8(priv, &priv->map->CONFIG3); + rtl818x_iowrite8(priv, &priv->map->CONFIG3, + reg | RTL818X_CONFIG3_ANAPARAM_WRITE); + rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, + RTL8187_RTL8225_ANAPARAM2_ON); + rtl818x_iowrite8(priv, &priv->map->CONFIG3, + reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE); + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); + + rtl8225_write_phy_ofdm(dev, 2, 0x42); + rtl8225_write_phy_ofdm(dev, 5, 0x00); + rtl8225_write_phy_ofdm(dev, 6, 0x40); + rtl8225_write_phy_ofdm(dev, 7, 0x00); + rtl8225_write_phy_ofdm(dev, 8, 0x40); + + rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM, + rtl8225z2_tx_gain_cck_ofdm[ofdm_power]); + msleep(1); +} + +static void rtl8225z2_b_rf_set_tx_power(struct ieee80211_hw *dev, int channel) +{ + struct rtl8187_priv *priv = dev->priv; + u8 cck_power, ofdm_power; + const u8 *tmp; + int i; + + cck_power = priv->channels[channel - 1].hw_value & 0xF; + ofdm_power = priv->channels[channel - 1].hw_value >> 4; + + if (cck_power > 15) + cck_power = (priv->hw_rev == RTL8187BvB) ? 15 : 22; + else + cck_power += (priv->hw_rev == RTL8187BvB) ? 0 : 7; + cck_power += priv->txpwr_base & 0xF; + cck_power = min(cck_power, (u8)35); + + if (ofdm_power > 15) + ofdm_power = (priv->hw_rev == RTL8187BvB) ? 17 : 25; + else + ofdm_power += (priv->hw_rev == RTL8187BvB) ? 2 : 10; + ofdm_power += (priv->txpwr_base >> 4) & 0xF; + ofdm_power = min(ofdm_power, (u8)35); + + if (channel == 14) + tmp = rtl8225z2_tx_power_cck_ch14; + else + tmp = rtl8225z2_tx_power_cck; + + if (priv->hw_rev == RTL8187BvB) { + if (cck_power <= 6) + ; /* do nothing */ + else if (cck_power <= 11) + tmp += 8; + else + tmp += 16; + } else { + if (cck_power <= 5) + ; /* do nothing */ + else if (cck_power <= 11) + tmp += 8; + else if (cck_power <= 17) + tmp += 16; + else + tmp += 24; + } + + for (i = 0; i < 8; i++) + rtl8225_write_phy_cck(dev, 0x44 + i, *tmp++); + + rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK, + rtl8225z2_tx_gain_cck_ofdm[cck_power] << 1); + msleep(1); + + rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM, + rtl8225z2_tx_gain_cck_ofdm[ofdm_power] << 1); + if (priv->hw_rev == RTL8187BvB) { + if (ofdm_power <= 11) { + rtl8225_write_phy_ofdm(dev, 0x87, 0x60); + rtl8225_write_phy_ofdm(dev, 0x89, 0x60); + } else { + rtl8225_write_phy_ofdm(dev, 0x87, 0x5c); + rtl8225_write_phy_ofdm(dev, 0x89, 0x5c); + } + } else { + if (ofdm_power <= 11) { + rtl8225_write_phy_ofdm(dev, 0x87, 0x5c); + rtl8225_write_phy_ofdm(dev, 0x89, 0x5c); + } else if (ofdm_power <= 17) { + rtl8225_write_phy_ofdm(dev, 0x87, 0x54); + rtl8225_write_phy_ofdm(dev, 0x89, 0x54); + } else { + rtl8225_write_phy_ofdm(dev, 0x87, 0x50); + rtl8225_write_phy_ofdm(dev, 0x89, 0x50); + } + } + msleep(1); +} + +static const u16 rtl8225z2_rxgain[] = { + 0x0400, 0x0401, 0x0402, 0x0403, 0x0404, 0x0405, 0x0408, 0x0409, + 0x040a, 0x040b, 0x0502, 0x0503, 0x0504, 0x0505, 0x0540, 0x0541, + 0x0542, 0x0543, 0x0544, 0x0545, 0x0580, 0x0581, 0x0582, 0x0583, + 0x0584, 0x0585, 0x0588, 0x0589, 0x058a, 0x058b, 0x0643, 0x0644, + 0x0645, 0x0680, 0x0681, 0x0682, 0x0683, 0x0684, 0x0685, 0x0688, + 0x0689, 0x068a, 0x068b, 0x068c, 0x0742, 0x0743, 0x0744, 0x0745, + 0x0780, 0x0781, 0x0782, 0x0783, 0x0784, 0x0785, 0x0788, 0x0789, + 0x078a, 0x078b, 0x078c, 0x078d, 0x0790, 0x0791, 0x0792, 0x0793, + 0x0794, 0x0795, 0x0798, 0x0799, 0x079a, 0x079b, 0x079c, 0x079d, + 0x07a0, 0x07a1, 0x07a2, 0x07a3, 0x07a4, 0x07a5, 0x07a8, 0x07a9, + 0x03aa, 0x03ab, 0x03ac, 0x03ad, 0x03b0, 0x03b1, 0x03b2, 0x03b3, + 0x03b4, 0x03b5, 0x03b8, 0x03b9, 0x03ba, 0x03bb, 0x03bb +}; + +static const u8 rtl8225z2_gain_bg[] = { + 0x23, 0x15, 0xa5, /* -82-1dBm */ + 0x23, 0x15, 0xb5, /* -82-2dBm */ + 0x23, 0x15, 0xc5, /* -82-3dBm */ + 0x33, 0x15, 0xc5, /* -78dBm */ + 0x43, 0x15, 0xc5, /* -74dBm */ + 0x53, 0x15, 0xc5, /* -70dBm */ + 0x63, 0x15, 0xc5 /* -66dBm */ +}; + +static void rtl8225z2_rf_init(struct ieee80211_hw *dev) +{ + struct rtl8187_priv *priv = dev->priv; + int i; + + rtl8225_write(dev, 0x0, 0x2BF); + rtl8225_write(dev, 0x1, 0xEE0); + rtl8225_write(dev, 0x2, 0x44D); + rtl8225_write(dev, 0x3, 0x441); + rtl8225_write(dev, 0x4, 0x8C3); + rtl8225_write(dev, 0x5, 0xC72); + rtl8225_write(dev, 0x6, 0x0E6); + rtl8225_write(dev, 0x7, 0x82A); + rtl8225_write(dev, 0x8, 0x03F); + rtl8225_write(dev, 0x9, 0x335); + rtl8225_write(dev, 0xa, 0x9D4); + rtl8225_write(dev, 0xb, 0x7BB); + rtl8225_write(dev, 0xc, 0x850); + rtl8225_write(dev, 0xd, 0xCDF); + rtl8225_write(dev, 0xe, 0x02B); + rtl8225_write(dev, 0xf, 0x114); + msleep(100); + + rtl8225_write(dev, 0x0, 0x1B7); + + for (i = 0; i < ARRAY_SIZE(rtl8225z2_rxgain); i++) { + rtl8225_write(dev, 0x1, i + 1); + rtl8225_write(dev, 0x2, rtl8225z2_rxgain[i]); + } + + rtl8225_write(dev, 0x3, 0x080); + rtl8225_write(dev, 0x5, 0x004); + rtl8225_write(dev, 0x0, 0x0B7); + rtl8225_write(dev, 0x2, 0xc4D); + + msleep(200); + rtl8225_write(dev, 0x2, 0x44D); + msleep(100); + + if (!(rtl8225_read(dev, 6) & (1 << 7))) { + rtl8225_write(dev, 0x02, 0x0C4D); + msleep(200); + rtl8225_write(dev, 0x02, 0x044D); + msleep(100); + if (!(rtl8225_read(dev, 6) & (1 << 7))) + printk(KERN_WARNING "%s: RF Calibration Failed! %x\n", + wiphy_name(dev->wiphy), rtl8225_read(dev, 6)); + } + + msleep(200); + + rtl8225_write(dev, 0x0, 0x2BF); + + for (i = 0; i < ARRAY_SIZE(rtl8225_agc); i++) { + rtl8225_write_phy_ofdm(dev, 0xB, rtl8225_agc[i]); + rtl8225_write_phy_ofdm(dev, 0xA, 0x80 + i); + } + + msleep(1); + + rtl8225_write_phy_ofdm(dev, 0x00, 0x01); + rtl8225_write_phy_ofdm(dev, 0x01, 0x02); + rtl8225_write_phy_ofdm(dev, 0x02, 0x42); + rtl8225_write_phy_ofdm(dev, 0x03, 0x00); + rtl8225_write_phy_ofdm(dev, 0x04, 0x00); + rtl8225_write_phy_ofdm(dev, 0x05, 0x00); + rtl8225_write_phy_ofdm(dev, 0x06, 0x40); + rtl8225_write_phy_ofdm(dev, 0x07, 0x00); + rtl8225_write_phy_ofdm(dev, 0x08, 0x40); + rtl8225_write_phy_ofdm(dev, 0x09, 0xfe); + rtl8225_write_phy_ofdm(dev, 0x0a, 0x08); + rtl8225_write_phy_ofdm(dev, 0x0b, 0x80); + rtl8225_write_phy_ofdm(dev, 0x0c, 0x01); + rtl8225_write_phy_ofdm(dev, 0x0d, 0x43); + rtl8225_write_phy_ofdm(dev, 0x0e, 0xd3); + rtl8225_write_phy_ofdm(dev, 0x0f, 0x38); + rtl8225_write_phy_ofdm(dev, 0x10, 0x84); + rtl8225_write_phy_ofdm(dev, 0x11, 0x07); + rtl8225_write_phy_ofdm(dev, 0x12, 0x20); + rtl8225_write_phy_ofdm(dev, 0x13, 0x20); + rtl8225_write_phy_ofdm(dev, 0x14, 0x00); + rtl8225_write_phy_ofdm(dev, 0x15, 0x40); + rtl8225_write_phy_ofdm(dev, 0x16, 0x00); + rtl8225_write_phy_ofdm(dev, 0x17, 0x40); + rtl8225_write_phy_ofdm(dev, 0x18, 0xef); + rtl8225_write_phy_ofdm(dev, 0x19, 0x19); + rtl8225_write_phy_ofdm(dev, 0x1a, 0x20); + rtl8225_write_phy_ofdm(dev, 0x1b, 0x15); + rtl8225_write_phy_ofdm(dev, 0x1c, 0x04); + rtl8225_write_phy_ofdm(dev, 0x1d, 0xc5); + rtl8225_write_phy_ofdm(dev, 0x1e, 0x95); + rtl8225_write_phy_ofdm(dev, 0x1f, 0x75); + rtl8225_write_phy_ofdm(dev, 0x20, 0x1f); + rtl8225_write_phy_ofdm(dev, 0x21, 0x17); + rtl8225_write_phy_ofdm(dev, 0x22, 0x16); + rtl8225_write_phy_ofdm(dev, 0x23, 0x80); + rtl8225_write_phy_ofdm(dev, 0x24, 0x46); + rtl8225_write_phy_ofdm(dev, 0x25, 0x00); + rtl8225_write_phy_ofdm(dev, 0x26, 0x90); + rtl8225_write_phy_ofdm(dev, 0x27, 0x88); + + rtl8225_write_phy_ofdm(dev, 0x0b, rtl8225z2_gain_bg[4 * 3]); + rtl8225_write_phy_ofdm(dev, 0x1b, rtl8225z2_gain_bg[4 * 3 + 1]); + rtl8225_write_phy_ofdm(dev, 0x1d, rtl8225z2_gain_bg[4 * 3 + 2]); + rtl8225_write_phy_ofdm(dev, 0x21, 0x37); + + rtl8225_write_phy_cck(dev, 0x00, 0x98); + rtl8225_write_phy_cck(dev, 0x03, 0x20); + rtl8225_write_phy_cck(dev, 0x04, 0x7e); + rtl8225_write_phy_cck(dev, 0x05, 0x12); + rtl8225_write_phy_cck(dev, 0x06, 0xfc); + rtl8225_write_phy_cck(dev, 0x07, 0x78); + rtl8225_write_phy_cck(dev, 0x08, 0x2e); + rtl8225_write_phy_cck(dev, 0x10, 0x9b); + rtl8225_write_phy_cck(dev, 0x11, 0x88); + rtl8225_write_phy_cck(dev, 0x12, 0x47); + rtl8225_write_phy_cck(dev, 0x13, 0xd0); + rtl8225_write_phy_cck(dev, 0x19, 0x00); + rtl8225_write_phy_cck(dev, 0x1a, 0xa0); + rtl8225_write_phy_cck(dev, 0x1b, 0x08); + rtl8225_write_phy_cck(dev, 0x40, 0x86); + rtl8225_write_phy_cck(dev, 0x41, 0x8d); + rtl8225_write_phy_cck(dev, 0x42, 0x15); + rtl8225_write_phy_cck(dev, 0x43, 0x18); + rtl8225_write_phy_cck(dev, 0x44, 0x36); + rtl8225_write_phy_cck(dev, 0x45, 0x35); + rtl8225_write_phy_cck(dev, 0x46, 0x2e); + rtl8225_write_phy_cck(dev, 0x47, 0x25); + rtl8225_write_phy_cck(dev, 0x48, 0x1c); + rtl8225_write_phy_cck(dev, 0x49, 0x12); + rtl8225_write_phy_cck(dev, 0x4a, 0x09); + rtl8225_write_phy_cck(dev, 0x4b, 0x04); + rtl8225_write_phy_cck(dev, 0x4c, 0x05); + + rtl818x_iowrite8(priv, (u8 *)0xFF5B, 0x0D); msleep(1); + + rtl8225z2_rf_set_tx_power(dev, 1); + + /* RX antenna default to A */ + rtl8225_write_phy_cck(dev, 0x10, 0x9b); /* B: 0xDB */ + rtl8225_write_phy_ofdm(dev, 0x26, 0x90); /* B: 0x10 */ + + rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03); /* B: 0x00 */ + msleep(1); + rtl818x_iowrite32(priv, (__le32 *)0xFF94, 0x3dc00002); +} + +static void rtl8225z2_b_rf_init(struct ieee80211_hw *dev) +{ + struct rtl8187_priv *priv = dev->priv; + int i; + + rtl8225_write(dev, 0x0, 0x0B7); + rtl8225_write(dev, 0x1, 0xEE0); + rtl8225_write(dev, 0x2, 0x44D); + rtl8225_write(dev, 0x3, 0x441); + rtl8225_write(dev, 0x4, 0x8C3); + rtl8225_write(dev, 0x5, 0xC72); + rtl8225_write(dev, 0x6, 0x0E6); + rtl8225_write(dev, 0x7, 0x82A); + rtl8225_write(dev, 0x8, 0x03F); + rtl8225_write(dev, 0x9, 0x335); + rtl8225_write(dev, 0xa, 0x9D4); + rtl8225_write(dev, 0xb, 0x7BB); + rtl8225_write(dev, 0xc, 0x850); + rtl8225_write(dev, 0xd, 0xCDF); + rtl8225_write(dev, 0xe, 0x02B); + rtl8225_write(dev, 0xf, 0x114); + + rtl8225_write(dev, 0x0, 0x1B7); + + for (i = 0; i < ARRAY_SIZE(rtl8225z2_rxgain); i++) { + rtl8225_write(dev, 0x1, i + 1); + rtl8225_write(dev, 0x2, rtl8225z2_rxgain[i]); + } + + rtl8225_write(dev, 0x3, 0x080); + rtl8225_write(dev, 0x5, 0x004); + rtl8225_write(dev, 0x0, 0x0B7); + + rtl8225_write(dev, 0x2, 0xC4D); + + rtl8225_write(dev, 0x2, 0x44D); + rtl8225_write(dev, 0x0, 0x2BF); + + rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK, 0x03); + rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM, 0x07); + rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03); + + rtl8225_write_phy_ofdm(dev, 0x80, 0x12); + for (i = 0; i < ARRAY_SIZE(rtl8225z2_agc); i++) { + rtl8225_write_phy_ofdm(dev, 0xF, rtl8225z2_agc[i]); + rtl8225_write_phy_ofdm(dev, 0xE, 0x80 + i); + rtl8225_write_phy_ofdm(dev, 0xE, 0); + } + rtl8225_write_phy_ofdm(dev, 0x80, 0x10); + + for (i = 0; i < ARRAY_SIZE(rtl8225z2_ofdm); i++) + rtl8225_write_phy_ofdm(dev, i, rtl8225z2_ofdm[i]); + + rtl8225_write_phy_ofdm(dev, 0x97, 0x46); + rtl8225_write_phy_ofdm(dev, 0xa4, 0xb6); + rtl8225_write_phy_ofdm(dev, 0x85, 0xfc); + rtl8225_write_phy_cck(dev, 0xc1, 0x88); +} + +static void rtl8225_rf_stop(struct ieee80211_hw *dev) +{ + u8 reg; + struct rtl8187_priv *priv = dev->priv; + + rtl8225_write(dev, 0x4, 0x1f); + + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); + reg = rtl818x_ioread8(priv, &priv->map->CONFIG3); + rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg | RTL818X_CONFIG3_ANAPARAM_WRITE); + if (!priv->is_rtl8187b) { + rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, + RTL8187_RTL8225_ANAPARAM2_OFF); + rtl818x_iowrite32(priv, &priv->map->ANAPARAM, + RTL8187_RTL8225_ANAPARAM_OFF); + } else { + rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, + RTL8187B_RTL8225_ANAPARAM2_OFF); + rtl818x_iowrite32(priv, &priv->map->ANAPARAM, + RTL8187B_RTL8225_ANAPARAM_OFF); + rtl818x_iowrite8(priv, &priv->map->ANAPARAM3, + RTL8187B_RTL8225_ANAPARAM3_OFF); + } + rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE); + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); +} + +static void rtl8225_rf_set_channel(struct ieee80211_hw *dev, + struct ieee80211_conf *conf) +{ + struct rtl8187_priv *priv = dev->priv; + int chan = ieee80211_frequency_to_channel(conf->channel->center_freq); + + if (priv->rf->init == rtl8225_rf_init) + rtl8225_rf_set_tx_power(dev, chan); + else if (priv->rf->init == rtl8225z2_rf_init) + rtl8225z2_rf_set_tx_power(dev, chan); + else + rtl8225z2_b_rf_set_tx_power(dev, chan); + + rtl8225_write(dev, 0x7, rtl8225_chan[chan - 1]); + msleep(10); +} + +static const struct rtl818x_rf_ops rtl8225_ops = { + .name = "rtl8225", + .init = rtl8225_rf_init, + .stop = rtl8225_rf_stop, + .set_chan = rtl8225_rf_set_channel +}; + +static const struct rtl818x_rf_ops rtl8225z2_ops = { + .name = "rtl8225z2", + .init = rtl8225z2_rf_init, + .stop = rtl8225_rf_stop, + .set_chan = rtl8225_rf_set_channel +}; + +static const struct rtl818x_rf_ops rtl8225z2_b_ops = { + .name = "rtl8225z2", + .init = rtl8225z2_b_rf_init, + .stop = rtl8225_rf_stop, + .set_chan = rtl8225_rf_set_channel +}; + +const struct rtl818x_rf_ops * rtl8187_detect_rf(struct ieee80211_hw *dev) +{ + u16 reg8, reg9; + struct rtl8187_priv *priv = dev->priv; + + if (!priv->is_rtl8187b) { + rtl8225_write(dev, 0, 0x1B7); + + reg8 = rtl8225_read(dev, 8); + reg9 = rtl8225_read(dev, 9); + + rtl8225_write(dev, 0, 0x0B7); + + if (reg8 != 0x588 || reg9 != 0x700) + return &rtl8225_ops; + + return &rtl8225z2_ops; + } else + return &rtl8225z2_b_ops; +} diff --git a/drivers/net/wireless/rtl818x/rtl8187_rtl8225.h b/drivers/net/wireless/rtl818x/rtl8187_rtl8225.h new file mode 100644 index 0000000..20c5b6e --- /dev/null +++ b/drivers/net/wireless/rtl818x/rtl8187_rtl8225.h @@ -0,0 +1,44 @@ +/* + * Radio tuning definitions for RTL8225 on RTL8187 + * + * Copyright 2007 Michael Wu + * Copyright 2007 Andrea Merello + * + * Based on the r8187 driver, which is: + * Copyright 2005 Andrea Merello , et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef RTL8187_RTL8225_H +#define RTL8187_RTL8225_H + +#define RTL8187_RTL8225_ANAPARAM_ON 0xa0000a59 +#define RTL8187_RTL8225_ANAPARAM2_ON 0x860c7312 +#define RTL8187_RTL8225_ANAPARAM_OFF 0xa00beb59 +#define RTL8187_RTL8225_ANAPARAM2_OFF 0x840dec11 + +#define RTL8187B_RTL8225_ANAPARAM_ON 0x45090658 +#define RTL8187B_RTL8225_ANAPARAM2_ON 0x727f3f52 +#define RTL8187B_RTL8225_ANAPARAM3_ON 0x00 +#define RTL8187B_RTL8225_ANAPARAM_OFF 0x55480658 +#define RTL8187B_RTL8225_ANAPARAM2_OFF 0x72003f50 +#define RTL8187B_RTL8225_ANAPARAM3_OFF 0x00 + +const struct rtl818x_rf_ops * rtl8187_detect_rf(struct ieee80211_hw *); + +static inline void rtl8225_write_phy_ofdm(struct ieee80211_hw *dev, + u8 addr, u32 data) +{ + rtl8187_write_phy(dev, addr, data); +} + +static inline void rtl8225_write_phy_cck(struct ieee80211_hw *dev, + u8 addr, u32 data) +{ + rtl8187_write_phy(dev, addr, data | 0x10000); +} + +#endif /* RTL8187_RTL8225_H */ diff --git a/drivers/net/wireless/rtl818x/rtl818x.h b/drivers/net/wireless/rtl818x/rtl818x.h new file mode 100644 index 0000000..8522490 --- /dev/null +++ b/drivers/net/wireless/rtl818x/rtl818x.h @@ -0,0 +1,243 @@ +/* + * Definitions for RTL818x hardware + * + * Copyright 2007 Michael Wu + * Copyright 2007 Andrea Merello + * + * Based on the r8187 driver, which is: + * Copyright 2005 Andrea Merello , et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef RTL818X_H +#define RTL818X_H + +struct rtl818x_csr { + u8 MAC[6]; + u8 reserved_0[2]; + __le32 MAR[2]; + u8 RX_FIFO_COUNT; + u8 reserved_1; + u8 TX_FIFO_COUNT; + u8 BQREQ; + u8 reserved_2[4]; + __le32 TSFT[2]; + __le32 TLPDA; + __le32 TNPDA; + __le32 THPDA; + __le16 BRSR; + u8 BSSID[6]; + u8 RESP_RATE; + u8 EIFS; + u8 reserved_3[1]; + u8 CMD; +#define RTL818X_CMD_TX_ENABLE (1 << 2) +#define RTL818X_CMD_RX_ENABLE (1 << 3) +#define RTL818X_CMD_RESET (1 << 4) + u8 reserved_4[4]; + __le16 INT_MASK; + __le16 INT_STATUS; +#define RTL818X_INT_RX_OK (1 << 0) +#define RTL818X_INT_RX_ERR (1 << 1) +#define RTL818X_INT_TXL_OK (1 << 2) +#define RTL818X_INT_TXL_ERR (1 << 3) +#define RTL818X_INT_RX_DU (1 << 4) +#define RTL818X_INT_RX_FO (1 << 5) +#define RTL818X_INT_TXN_OK (1 << 6) +#define RTL818X_INT_TXN_ERR (1 << 7) +#define RTL818X_INT_TXH_OK (1 << 8) +#define RTL818X_INT_TXH_ERR (1 << 9) +#define RTL818X_INT_TXB_OK (1 << 10) +#define RTL818X_INT_TXB_ERR (1 << 11) +#define RTL818X_INT_ATIM (1 << 12) +#define RTL818X_INT_BEACON (1 << 13) +#define RTL818X_INT_TIME_OUT (1 << 14) +#define RTL818X_INT_TX_FO (1 << 15) + __le32 TX_CONF; +#define RTL818X_TX_CONF_LOOPBACK_MAC (1 << 17) +#define RTL818X_TX_CONF_LOOPBACK_CONT (3 << 17) +#define RTL818X_TX_CONF_NO_ICV (1 << 19) +#define RTL818X_TX_CONF_DISCW (1 << 20) +#define RTL818X_TX_CONF_SAT_HWPLCP (1 << 24) +#define RTL818X_TX_CONF_R8180_ABCD (2 << 25) +#define RTL818X_TX_CONF_R8180_F (3 << 25) +#define RTL818X_TX_CONF_R8185_ABC (4 << 25) +#define RTL818X_TX_CONF_R8185_D (5 << 25) +#define RTL818X_TX_CONF_R8187vD (5 << 25) +#define RTL818X_TX_CONF_R8187vD_B (6 << 25) +#define RTL818X_TX_CONF_HWVER_MASK (7 << 25) +#define RTL818X_TX_CONF_DISREQQSIZE (1 << 28) +#define RTL818X_TX_CONF_PROBE_DTS (1 << 29) +#define RTL818X_TX_CONF_HW_SEQNUM (1 << 30) +#define RTL818X_TX_CONF_CW_MIN (1 << 31) + __le32 RX_CONF; +#define RTL818X_RX_CONF_MONITOR (1 << 0) +#define RTL818X_RX_CONF_NICMAC (1 << 1) +#define RTL818X_RX_CONF_MULTICAST (1 << 2) +#define RTL818X_RX_CONF_BROADCAST (1 << 3) +#define RTL818X_RX_CONF_FCS (1 << 5) +#define RTL818X_RX_CONF_DATA (1 << 18) +#define RTL818X_RX_CONF_CTRL (1 << 19) +#define RTL818X_RX_CONF_MGMT (1 << 20) +#define RTL818X_RX_CONF_ADDR3 (1 << 21) +#define RTL818X_RX_CONF_PM (1 << 22) +#define RTL818X_RX_CONF_BSSID (1 << 23) +#define RTL818X_RX_CONF_RX_AUTORESETPHY (1 << 28) +#define RTL818X_RX_CONF_CSDM1 (1 << 29) +#define RTL818X_RX_CONF_CSDM2 (1 << 30) +#define RTL818X_RX_CONF_ONLYERLPKT (1 << 31) + __le32 INT_TIMEOUT; + __le32 TBDA; + u8 EEPROM_CMD; +#define RTL818X_EEPROM_CMD_READ (1 << 0) +#define RTL818X_EEPROM_CMD_WRITE (1 << 1) +#define RTL818X_EEPROM_CMD_CK (1 << 2) +#define RTL818X_EEPROM_CMD_CS (1 << 3) +#define RTL818X_EEPROM_CMD_NORMAL (0 << 6) +#define RTL818X_EEPROM_CMD_LOAD (1 << 6) +#define RTL818X_EEPROM_CMD_PROGRAM (2 << 6) +#define RTL818X_EEPROM_CMD_CONFIG (3 << 6) + u8 CONFIG0; + u8 CONFIG1; + u8 CONFIG2; +#define RTL818X_CONFIG2_ANTENNA_DIV (1 << 6) + __le32 ANAPARAM; + u8 MSR; +#define RTL818X_MSR_NO_LINK (0 << 2) +#define RTL818X_MSR_ADHOC (1 << 2) +#define RTL818X_MSR_INFRA (2 << 2) +#define RTL818X_MSR_MASTER (3 << 2) +#define RTL818X_MSR_ENEDCA (4 << 2) + u8 CONFIG3; +#define RTL818X_CONFIG3_ANAPARAM_WRITE (1 << 6) +#define RTL818X_CONFIG3_GNT_SELECT (1 << 7) + u8 CONFIG4; +#define RTL818X_CONFIG4_POWEROFF (1 << 6) +#define RTL818X_CONFIG4_VCOOFF (1 << 7) + u8 TESTR; + u8 reserved_9[2]; + u8 PGSELECT; + u8 SECURITY; + __le32 ANAPARAM2; + u8 reserved_10[12]; + __le16 BEACON_INTERVAL; + __le16 ATIM_WND; + __le16 BEACON_INTERVAL_TIME; + __le16 ATIMTR_INTERVAL; + u8 PHY_DELAY; + u8 CARRIER_SENSE_COUNTER; + u8 reserved_11[2]; + u8 PHY[4]; + __le16 RFPinsOutput; + __le16 RFPinsEnable; + __le16 RFPinsSelect; + __le16 RFPinsInput; + __le32 RF_PARA; + __le32 RF_TIMING; + u8 GP_ENABLE; + u8 GPIO0; + u8 GPIO1; + u8 reserved_12; + __le32 HSSI_PARA; + u8 reserved_13[4]; + u8 TX_AGC_CTL; +#define RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT (1 << 0) +#define RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT (1 << 1) +#define RTL818X_TX_AGC_CTL_FEEDBACK_ANT (1 << 2) + u8 TX_GAIN_CCK; + u8 TX_GAIN_OFDM; + u8 TX_ANTENNA; + u8 reserved_14[16]; + u8 WPA_CONF; + u8 reserved_15[3]; + u8 SIFS; + u8 DIFS; + u8 SLOT; + u8 reserved_16[5]; + u8 CW_CONF; +#define RTL818X_CW_CONF_PERPACKET_CW_SHIFT (1 << 0) +#define RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT (1 << 1) + u8 CW_VAL; + u8 RATE_FALLBACK; +#define RTL818X_RATE_FALLBACK_ENABLE (1 << 7) + u8 ACM_CONTROL; + u8 reserved_17[24]; + u8 CONFIG5; + u8 TX_DMA_POLLING; + u8 reserved_18[2]; + __le16 CWR; + u8 RETRY_CTR; + u8 reserved_19[3]; + __le16 INT_MIG; +/* RTL818X_R8187B_*: magic numbers from ioregisters */ +#define RTL818X_R8187B_B 0 +#define RTL818X_R8187B_D 1 +#define RTL818X_R8187B_E 2 + __le32 RDSAR; + __le16 TID_AC_MAP; + u8 reserved_20[4]; + u8 ANAPARAM3; + u8 reserved_21[5]; + __le16 FEMR; + u8 reserved_22[4]; + __le16 TALLY_CNT; + u8 TALLY_SEL; +} __attribute__((packed)); + +struct rtl818x_rf_ops { + char *name; + void (*init)(struct ieee80211_hw *); + void (*stop)(struct ieee80211_hw *); + void (*set_chan)(struct ieee80211_hw *, struct ieee80211_conf *); + void (*conf_erp)(struct ieee80211_hw *, struct ieee80211_bss_conf *); +}; + +/** + * enum rtl818x_tx_desc_flags - Tx/Rx flags are common between RTL818X chips + * + * @RTL818X_TX_DESC_FLAG_NO_ENC: Disable hardware based encryption. + * @RTL818X_TX_DESC_FLAG_TX_OK: TX frame was ACKed. + * @RTL818X_TX_DESC_FLAG_SPLCP: Use short preamble. + * @RTL818X_TX_DESC_FLAG_MOREFRAG: More fragments follow. + * @RTL818X_TX_DESC_FLAG_CTS: Use CTS-to-self protection. + * @RTL818X_TX_DESC_FLAG_RTS: Use RTS/CTS protection. + * @RTL818X_TX_DESC_FLAG_LS: Last segment of the frame. + * @RTL818X_TX_DESC_FLAG_FS: First segment of the frame. + */ +enum rtl818x_tx_desc_flags { + RTL818X_TX_DESC_FLAG_NO_ENC = (1 << 15), + RTL818X_TX_DESC_FLAG_TX_OK = (1 << 15), + RTL818X_TX_DESC_FLAG_SPLCP = (1 << 16), + RTL818X_TX_DESC_FLAG_RX_UNDER = (1 << 16), + RTL818X_TX_DESC_FLAG_MOREFRAG = (1 << 17), + RTL818X_TX_DESC_FLAG_CTS = (1 << 18), + RTL818X_TX_DESC_FLAG_RTS = (1 << 23), + RTL818X_TX_DESC_FLAG_LS = (1 << 28), + RTL818X_TX_DESC_FLAG_FS = (1 << 29), + RTL818X_TX_DESC_FLAG_DMA = (1 << 30), + RTL818X_TX_DESC_FLAG_OWN = (1 << 31) +}; + +enum rtl818x_rx_desc_flags { + RTL818X_RX_DESC_FLAG_ICV_ERR = (1 << 12), + RTL818X_RX_DESC_FLAG_CRC32_ERR = (1 << 13), + RTL818X_RX_DESC_FLAG_PM = (1 << 14), + RTL818X_RX_DESC_FLAG_RX_ERR = (1 << 15), + RTL818X_RX_DESC_FLAG_BCAST = (1 << 16), + RTL818X_RX_DESC_FLAG_PAM = (1 << 17), + RTL818X_RX_DESC_FLAG_MCAST = (1 << 18), + RTL818X_RX_DESC_FLAG_QOS = (1 << 19), /* RTL8187(B) only */ + RTL818X_RX_DESC_FLAG_TRSW = (1 << 24), /* RTL8187(B) only */ + RTL818X_RX_DESC_FLAG_SPLCP = (1 << 25), + RTL818X_RX_DESC_FLAG_FOF = (1 << 26), + RTL818X_RX_DESC_FLAG_DMA_FAIL = (1 << 27), + RTL818X_RX_DESC_FLAG_LS = (1 << 28), + RTL818X_RX_DESC_FLAG_FS = (1 << 29), + RTL818X_RX_DESC_FLAG_EOR = (1 << 30), + RTL818X_RX_DESC_FLAG_OWN = (1 << 31) +}; + +#endif /* RTL818X_H */ diff --git a/drivers/net/wireless/wl12xx/Makefile b/drivers/net/wireless/wl12xx/Makefile new file mode 100644 index 0000000..f47ec94 --- /dev/null +++ b/drivers/net/wireless/wl12xx/Makefile @@ -0,0 +1,16 @@ +wl1251-objs = wl1251_main.o wl1251_event.o \ + wl1251_tx.o wl1251_rx.o wl1251_ps.o wl1251_cmd.o \ + wl1251_acx.o wl1251_boot.o wl1251_init.o \ + wl1251_debugfs.o wl1251_io.o + +obj-$(CONFIG_WL1251) += wl1251.o +obj-$(CONFIG_WL1251_SPI) += wl1251_spi.o +obj-$(CONFIG_WL1251_SDIO) += wl1251_sdio.o + +wl1271-objs = wl1271_main.o wl1271_spi.o wl1271_cmd.o \ + wl1271_event.o wl1271_tx.o wl1271_rx.o \ + wl1271_ps.o wl1271_acx.o wl1271_boot.o \ + wl1271_init.o wl1271_debugfs.o wl1271_io.o + +wl1271-$(CONFIG_NL80211_TESTMODE) += wl1271_testmode.o +obj-$(CONFIG_WL1271) += wl1271.o diff --git a/drivers/net/wireless/wl12xx/wl1251.h b/drivers/net/wireless/wl12xx/wl1251.h new file mode 100644 index 0000000..37c61c1 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1251.h @@ -0,0 +1,427 @@ +/* + * This file is part of wl1251 + * + * Copyright (c) 1998-2007 Texas Instruments Incorporated + * Copyright (C) 2008-2009 Nokia Corporation + * + * Contact: Kalle Valo + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __WL1251_H__ +#define __WL1251_H__ + +#include +#include +#include +#include + +#define DRIVER_NAME "wl1251" +#define DRIVER_PREFIX DRIVER_NAME ": " + +enum { + DEBUG_NONE = 0, + DEBUG_IRQ = BIT(0), + DEBUG_SPI = BIT(1), + DEBUG_BOOT = BIT(2), + DEBUG_MAILBOX = BIT(3), + DEBUG_NETLINK = BIT(4), + DEBUG_EVENT = BIT(5), + DEBUG_TX = BIT(6), + DEBUG_RX = BIT(7), + DEBUG_SCAN = BIT(8), + DEBUG_CRYPT = BIT(9), + DEBUG_PSM = BIT(10), + DEBUG_MAC80211 = BIT(11), + DEBUG_CMD = BIT(12), + DEBUG_ACX = BIT(13), + DEBUG_ALL = ~0, +}; + +#define DEBUG_LEVEL (DEBUG_NONE) + +#define DEBUG_DUMP_LIMIT 1024 + +#define wl1251_error(fmt, arg...) \ + printk(KERN_ERR DRIVER_PREFIX "ERROR " fmt "\n", ##arg) + +#define wl1251_warning(fmt, arg...) \ + printk(KERN_WARNING DRIVER_PREFIX "WARNING " fmt "\n", ##arg) + +#define wl1251_notice(fmt, arg...) \ + printk(KERN_INFO DRIVER_PREFIX fmt "\n", ##arg) + +#define wl1251_info(fmt, arg...) \ + printk(KERN_DEBUG DRIVER_PREFIX fmt "\n", ##arg) + +#define wl1251_debug(level, fmt, arg...) \ + do { \ + if (level & DEBUG_LEVEL) \ + printk(KERN_DEBUG DRIVER_PREFIX fmt "\n", ##arg); \ + } while (0) + +#define wl1251_dump(level, prefix, buf, len) \ + do { \ + if (level & DEBUG_LEVEL) \ + print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \ + DUMP_PREFIX_OFFSET, 16, 1, \ + buf, \ + min_t(size_t, len, DEBUG_DUMP_LIMIT), \ + 0); \ + } while (0) + +#define wl1251_dump_ascii(level, prefix, buf, len) \ + do { \ + if (level & DEBUG_LEVEL) \ + print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \ + DUMP_PREFIX_OFFSET, 16, 1, \ + buf, \ + min_t(size_t, len, DEBUG_DUMP_LIMIT), \ + true); \ + } while (0) + +#define WL1251_DEFAULT_RX_CONFIG (CFG_UNI_FILTER_EN | \ + CFG_BSSID_FILTER_EN) + +#define WL1251_DEFAULT_RX_FILTER (CFG_RX_PRSP_EN | \ + CFG_RX_MGMT_EN | \ + CFG_RX_DATA_EN | \ + CFG_RX_CTL_EN | \ + CFG_RX_BCN_EN | \ + CFG_RX_AUTH_EN | \ + CFG_RX_ASSOC_EN) + +#define WL1251_BUSY_WORD_LEN 8 + +struct boot_attr { + u32 radio_type; + u8 mac_clock; + u8 arm_clock; + int firmware_debug; + u32 minor; + u32 major; + u32 bugfix; +}; + +enum wl1251_state { + WL1251_STATE_OFF, + WL1251_STATE_ON, + WL1251_STATE_PLT, +}; + +enum wl1251_partition_type { + PART_DOWN, + PART_WORK, + PART_DRPW, + + PART_TABLE_LEN +}; + +struct wl1251_partition { + u32 size; + u32 start; +}; + +struct wl1251_partition_set { + struct wl1251_partition mem; + struct wl1251_partition reg; +}; + +struct wl1251; + +struct wl1251_stats { + struct acx_statistics *fw_stats; + unsigned long fw_stats_update; + + unsigned int retry_count; + unsigned int excessive_retries; +}; + +struct wl1251_debugfs { + struct dentry *rootdir; + struct dentry *fw_statistics; + + struct dentry *tx_internal_desc_overflow; + + struct dentry *rx_out_of_mem; + struct dentry *rx_hdr_overflow; + struct dentry *rx_hw_stuck; + struct dentry *rx_dropped; + struct dentry *rx_fcs_err; + struct dentry *rx_xfr_hint_trig; + struct dentry *rx_path_reset; + struct dentry *rx_reset_counter; + + struct dentry *dma_rx_requested; + struct dentry *dma_rx_errors; + struct dentry *dma_tx_requested; + struct dentry *dma_tx_errors; + + struct dentry *isr_cmd_cmplt; + struct dentry *isr_fiqs; + struct dentry *isr_rx_headers; + struct dentry *isr_rx_mem_overflow; + struct dentry *isr_rx_rdys; + struct dentry *isr_irqs; + struct dentry *isr_tx_procs; + struct dentry *isr_decrypt_done; + struct dentry *isr_dma0_done; + struct dentry *isr_dma1_done; + struct dentry *isr_tx_exch_complete; + struct dentry *isr_commands; + struct dentry *isr_rx_procs; + struct dentry *isr_hw_pm_mode_changes; + struct dentry *isr_host_acknowledges; + struct dentry *isr_pci_pm; + struct dentry *isr_wakeups; + struct dentry *isr_low_rssi; + + struct dentry *wep_addr_key_count; + struct dentry *wep_default_key_count; + /* skipping wep.reserved */ + struct dentry *wep_key_not_found; + struct dentry *wep_decrypt_fail; + struct dentry *wep_packets; + struct dentry *wep_interrupt; + + struct dentry *pwr_ps_enter; + struct dentry *pwr_elp_enter; + struct dentry *pwr_missing_bcns; + struct dentry *pwr_wake_on_host; + struct dentry *pwr_wake_on_timer_exp; + struct dentry *pwr_tx_with_ps; + struct dentry *pwr_tx_without_ps; + struct dentry *pwr_rcvd_beacons; + struct dentry *pwr_power_save_off; + struct dentry *pwr_enable_ps; + struct dentry *pwr_disable_ps; + struct dentry *pwr_fix_tsf_ps; + /* skipping cont_miss_bcns_spread for now */ + struct dentry *pwr_rcvd_awake_beacons; + + struct dentry *mic_rx_pkts; + struct dentry *mic_calc_failure; + + struct dentry *aes_encrypt_fail; + struct dentry *aes_decrypt_fail; + struct dentry *aes_encrypt_packets; + struct dentry *aes_decrypt_packets; + struct dentry *aes_encrypt_interrupt; + struct dentry *aes_decrypt_interrupt; + + struct dentry *event_heart_beat; + struct dentry *event_calibration; + struct dentry *event_rx_mismatch; + struct dentry *event_rx_mem_empty; + struct dentry *event_rx_pool; + struct dentry *event_oom_late; + struct dentry *event_phy_transmit_error; + struct dentry *event_tx_stuck; + + struct dentry *ps_pspoll_timeouts; + struct dentry *ps_upsd_timeouts; + struct dentry *ps_upsd_max_sptime; + struct dentry *ps_upsd_max_apturn; + struct dentry *ps_pspoll_max_apturn; + struct dentry *ps_pspoll_utilization; + struct dentry *ps_upsd_utilization; + + struct dentry *rxpipe_rx_prep_beacon_drop; + struct dentry *rxpipe_descr_host_int_trig_rx_data; + struct dentry *rxpipe_beacon_buffer_thres_host_int_trig_rx_data; + struct dentry *rxpipe_missed_beacon_host_int_trig_rx_data; + struct dentry *rxpipe_tx_xfr_host_int_trig_rx_data; + + struct dentry *tx_queue_len; + struct dentry *tx_queue_status; + + struct dentry *retry_count; + struct dentry *excessive_retries; +}; + +struct wl1251_if_operations { + void (*read)(struct wl1251 *wl, int addr, void *buf, size_t len); + void (*write)(struct wl1251 *wl, int addr, void *buf, size_t len); + void (*reset)(struct wl1251 *wl); + void (*enable_irq)(struct wl1251 *wl); + void (*disable_irq)(struct wl1251 *wl); +}; + +struct wl1251 { + struct ieee80211_hw *hw; + bool mac80211_registered; + + void *if_priv; + const struct wl1251_if_operations *if_ops; + + void (*set_power)(bool enable); + int irq; + bool use_eeprom; + + enum wl1251_state state; + struct mutex mutex; + + int physical_mem_addr; + int physical_reg_addr; + int virtual_mem_addr; + int virtual_reg_addr; + + int cmd_box_addr; + int event_box_addr; + struct boot_attr boot_attr; + + u8 *fw; + size_t fw_len; + u8 *nvs; + size_t nvs_len; + + u8 bssid[ETH_ALEN]; + u8 mac_addr[ETH_ALEN]; + u8 bss_type; + u8 listen_int; + int channel; + + void *target_mem_map; + struct acx_data_path_params_resp *data_path; + + /* Number of TX packets transferred to the FW, modulo 16 */ + u32 data_in_count; + + /* Frames scheduled for transmission, not handled yet */ + struct sk_buff_head tx_queue; + bool tx_queue_stopped; + + struct work_struct tx_work; + struct work_struct filter_work; + + /* Pending TX frames */ + struct sk_buff *tx_frames[16]; + + /* + * Index pointing to the next TX complete entry + * in the cyclic XT complete array we get from + * the FW. + */ + u32 next_tx_complete; + + /* FW Rx counter */ + u32 rx_counter; + + /* Rx frames handled */ + u32 rx_handled; + + /* Current double buffer */ + u32 rx_current_buffer; + u32 rx_last_id; + + /* The target interrupt mask */ + u32 intr_mask; + struct work_struct irq_work; + + /* The mbox event mask */ + u32 event_mask; + + /* Mailbox pointers */ + u32 mbox_ptr[2]; + + /* Are we currently scanning */ + bool scanning; + + /* Default key (for WEP) */ + u32 default_key; + + unsigned int tx_mgmt_frm_rate; + unsigned int tx_mgmt_frm_mod; + + unsigned int rx_config; + unsigned int rx_filter; + + /* is firmware in elp mode */ + bool elp; + + struct delayed_work elp_work; + + /* we can be in psm, but not in elp, we have to differentiate */ + bool psm; + + /* PSM mode requested */ + bool psm_requested; + + u16 beacon_int; + u8 dtim_period; + + /* in dBm */ + int power_level; + + struct wl1251_stats stats; + struct wl1251_debugfs debugfs; + + u32 buffer_32; + u32 buffer_cmd; + u8 buffer_busyword[WL1251_BUSY_WORD_LEN]; + struct wl1251_rx_descriptor *rx_descriptor; + + struct ieee80211_vif *vif; + + u32 chip_id; + char fw_ver[21]; +}; + +int wl1251_plt_start(struct wl1251 *wl); +int wl1251_plt_stop(struct wl1251 *wl); + +struct ieee80211_hw *wl1251_alloc_hw(void); +int wl1251_free_hw(struct wl1251 *wl); +int wl1251_init_ieee80211(struct wl1251 *wl); +void wl1251_enable_interrupts(struct wl1251 *wl); +void wl1251_disable_interrupts(struct wl1251 *wl); + +#define DEFAULT_HW_GEN_MODULATION_TYPE CCK_LONG /* Long Preamble */ +#define DEFAULT_HW_GEN_TX_RATE RATE_2MBPS +#define JOIN_TIMEOUT 5000 /* 5000 milliseconds to join */ + +#define WL1251_DEFAULT_POWER_LEVEL 20 + +#define WL1251_TX_QUEUE_MAX_LENGTH 20 + +#define WL1251_DEFAULT_BEACON_INT 100 +#define WL1251_DEFAULT_DTIM_PERIOD 1 + +#define WL1251_DEFAULT_CHANNEL 0 + +#define CHIP_ID_1251_PG10 (0x7010101) +#define CHIP_ID_1251_PG11 (0x7020101) +#define CHIP_ID_1251_PG12 (0x7030101) +#define CHIP_ID_1271_PG10 (0x4030101) +#define CHIP_ID_1271_PG20 (0x4030111) + +#define WL1251_FW_NAME "wl1251-fw.bin" +#define WL1251_NVS_NAME "wl1251-nvs.bin" + +#define WL1251_POWER_ON_SLEEP 10 /* in miliseconds */ + +#define WL1251_PART_DOWN_MEM_START 0x0 +#define WL1251_PART_DOWN_MEM_SIZE 0x16800 +#define WL1251_PART_DOWN_REG_START REGISTERS_BASE +#define WL1251_PART_DOWN_REG_SIZE REGISTERS_DOWN_SIZE + +#define WL1251_PART_WORK_MEM_START 0x28000 +#define WL1251_PART_WORK_MEM_SIZE 0x14000 +#define WL1251_PART_WORK_REG_START REGISTERS_BASE +#define WL1251_PART_WORK_REG_SIZE REGISTERS_WORK_SIZE + +#endif diff --git a/drivers/net/wireless/wl12xx/wl1251_acx.c b/drivers/net/wireless/wl12xx/wl1251_acx.c new file mode 100644 index 0000000..beff084 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1251_acx.c @@ -0,0 +1,1047 @@ +#include "wl1251_acx.h" + +#include +#include + +#include "wl1251.h" +#include "wl1251_reg.h" +#include "wl1251_cmd.h" +#include "wl1251_ps.h" + +int wl1251_acx_frame_rates(struct wl1251 *wl, u8 ctrl_rate, u8 ctrl_mod, + u8 mgt_rate, u8 mgt_mod) +{ + struct acx_fw_gen_frame_rates *rates; + int ret; + + wl1251_debug(DEBUG_ACX, "acx frame rates"); + + rates = kzalloc(sizeof(*rates), GFP_KERNEL); + if (!rates) { + ret = -ENOMEM; + goto out; + } + + rates->tx_ctrl_frame_rate = ctrl_rate; + rates->tx_ctrl_frame_mod = ctrl_mod; + rates->tx_mgt_frame_rate = mgt_rate; + rates->tx_mgt_frame_mod = mgt_mod; + + ret = wl1251_cmd_configure(wl, ACX_FW_GEN_FRAME_RATES, + rates, sizeof(*rates)); + if (ret < 0) { + wl1251_error("Failed to set FW rates and modulation"); + goto out; + } + +out: + kfree(rates); + return ret; +} + + +int wl1251_acx_station_id(struct wl1251 *wl) +{ + struct acx_dot11_station_id *mac; + int ret, i; + + wl1251_debug(DEBUG_ACX, "acx dot11_station_id"); + + mac = kzalloc(sizeof(*mac), GFP_KERNEL); + if (!mac) { + ret = -ENOMEM; + goto out; + } + + for (i = 0; i < ETH_ALEN; i++) + mac->mac[i] = wl->mac_addr[ETH_ALEN - 1 - i]; + + ret = wl1251_cmd_configure(wl, DOT11_STATION_ID, mac, sizeof(*mac)); + if (ret < 0) + goto out; + +out: + kfree(mac); + return ret; +} + +int wl1251_acx_default_key(struct wl1251 *wl, u8 key_id) +{ + struct acx_dot11_default_key *default_key; + int ret; + + wl1251_debug(DEBUG_ACX, "acx dot11_default_key (%d)", key_id); + + default_key = kzalloc(sizeof(*default_key), GFP_KERNEL); + if (!default_key) { + ret = -ENOMEM; + goto out; + } + + default_key->id = key_id; + + ret = wl1251_cmd_configure(wl, DOT11_DEFAULT_KEY, + default_key, sizeof(*default_key)); + if (ret < 0) { + wl1251_error("Couldn't set default key"); + goto out; + } + + wl->default_key = key_id; + +out: + kfree(default_key); + return ret; +} + +int wl1251_acx_wake_up_conditions(struct wl1251 *wl, u8 wake_up_event, + u8 listen_interval) +{ + struct acx_wake_up_condition *wake_up; + int ret; + + wl1251_debug(DEBUG_ACX, "acx wake up conditions"); + + wake_up = kzalloc(sizeof(*wake_up), GFP_KERNEL); + if (!wake_up) { + ret = -ENOMEM; + goto out; + } + + wake_up->wake_up_event = wake_up_event; + wake_up->listen_interval = listen_interval; + + ret = wl1251_cmd_configure(wl, ACX_WAKE_UP_CONDITIONS, + wake_up, sizeof(*wake_up)); + if (ret < 0) { + wl1251_warning("could not set wake up conditions: %d", ret); + goto out; + } + +out: + kfree(wake_up); + return ret; +} + +int wl1251_acx_sleep_auth(struct wl1251 *wl, u8 sleep_auth) +{ + struct acx_sleep_auth *auth; + int ret; + + wl1251_debug(DEBUG_ACX, "acx sleep auth"); + + auth = kzalloc(sizeof(*auth), GFP_KERNEL); + if (!auth) { + ret = -ENOMEM; + goto out; + } + + auth->sleep_auth = sleep_auth; + + ret = wl1251_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth)); + if (ret < 0) + return ret; + +out: + kfree(auth); + return ret; +} + +int wl1251_acx_fw_version(struct wl1251 *wl, char *buf, size_t len) +{ + struct acx_revision *rev; + int ret; + + wl1251_debug(DEBUG_ACX, "acx fw rev"); + + rev = kzalloc(sizeof(*rev), GFP_KERNEL); + if (!rev) { + ret = -ENOMEM; + goto out; + } + + ret = wl1251_cmd_interrogate(wl, ACX_FW_REV, rev, sizeof(*rev)); + if (ret < 0) { + wl1251_warning("ACX_FW_REV interrogate failed"); + goto out; + } + + /* be careful with the buffer sizes */ + strncpy(buf, rev->fw_version, min(len, sizeof(rev->fw_version))); + + /* + * if the firmware version string is exactly + * sizeof(rev->fw_version) long or fw_len is less than + * sizeof(rev->fw_version) it won't be null terminated + */ + buf[min(len, sizeof(rev->fw_version)) - 1] = '\0'; + +out: + kfree(rev); + return ret; +} + +int wl1251_acx_tx_power(struct wl1251 *wl, int power) +{ + struct acx_current_tx_power *acx; + int ret; + + wl1251_debug(DEBUG_ACX, "acx dot11_cur_tx_pwr"); + + if (power < 0 || power > 25) + return -EINVAL; + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->current_tx_power = power * 10; + + ret = wl1251_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx)); + if (ret < 0) { + wl1251_warning("configure of tx power failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1251_acx_feature_cfg(struct wl1251 *wl) +{ + struct acx_feature_config *feature; + int ret; + + wl1251_debug(DEBUG_ACX, "acx feature cfg"); + + feature = kzalloc(sizeof(*feature), GFP_KERNEL); + if (!feature) { + ret = -ENOMEM; + goto out; + } + + /* DF_ENCRYPTION_DISABLE and DF_SNIFF_MODE_ENABLE are disabled */ + feature->data_flow_options = 0; + feature->options = 0; + + ret = wl1251_cmd_configure(wl, ACX_FEATURE_CFG, + feature, sizeof(*feature)); + if (ret < 0) { + wl1251_error("Couldn't set HW encryption"); + goto out; + } + +out: + kfree(feature); + return ret; +} + +int wl1251_acx_mem_map(struct wl1251 *wl, struct acx_header *mem_map, + size_t len) +{ + int ret; + + wl1251_debug(DEBUG_ACX, "acx mem map"); + + ret = wl1251_cmd_interrogate(wl, ACX_MEM_MAP, mem_map, len); + if (ret < 0) + return ret; + + return 0; +} + +int wl1251_acx_data_path_params(struct wl1251 *wl, + struct acx_data_path_params_resp *resp) +{ + struct acx_data_path_params *params; + int ret; + + wl1251_debug(DEBUG_ACX, "acx data path params"); + + params = kzalloc(sizeof(*params), GFP_KERNEL); + if (!params) { + ret = -ENOMEM; + goto out; + } + + params->rx_packet_ring_chunk_size = DP_RX_PACKET_RING_CHUNK_SIZE; + params->tx_packet_ring_chunk_size = DP_TX_PACKET_RING_CHUNK_SIZE; + + params->rx_packet_ring_chunk_num = DP_RX_PACKET_RING_CHUNK_NUM; + params->tx_packet_ring_chunk_num = DP_TX_PACKET_RING_CHUNK_NUM; + + params->tx_complete_threshold = 1; + + params->tx_complete_ring_depth = FW_TX_CMPLT_BLOCK_SIZE; + + params->tx_complete_timeout = DP_TX_COMPLETE_TIME_OUT; + + ret = wl1251_cmd_configure(wl, ACX_DATA_PATH_PARAMS, + params, sizeof(*params)); + if (ret < 0) + goto out; + + /* FIXME: shouldn't this be ACX_DATA_PATH_RESP_PARAMS? */ + ret = wl1251_cmd_interrogate(wl, ACX_DATA_PATH_PARAMS, + resp, sizeof(*resp)); + + if (ret < 0) { + wl1251_warning("failed to read data path parameters: %d", ret); + goto out; + } else if (resp->header.cmd.status != CMD_STATUS_SUCCESS) { + wl1251_warning("data path parameter acx status failed"); + ret = -EIO; + goto out; + } + +out: + kfree(params); + return ret; +} + +int wl1251_acx_rx_msdu_life_time(struct wl1251 *wl, u32 life_time) +{ + struct acx_rx_msdu_lifetime *acx; + int ret; + + wl1251_debug(DEBUG_ACX, "acx rx msdu life time"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->lifetime = life_time; + ret = wl1251_cmd_configure(wl, DOT11_RX_MSDU_LIFE_TIME, + acx, sizeof(*acx)); + if (ret < 0) { + wl1251_warning("failed to set rx msdu life time: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1251_acx_rx_config(struct wl1251 *wl, u32 config, u32 filter) +{ + struct acx_rx_config *rx_config; + int ret; + + wl1251_debug(DEBUG_ACX, "acx rx config"); + + rx_config = kzalloc(sizeof(*rx_config), GFP_KERNEL); + if (!rx_config) { + ret = -ENOMEM; + goto out; + } + + rx_config->config_options = config; + rx_config->filter_options = filter; + + ret = wl1251_cmd_configure(wl, ACX_RX_CFG, + rx_config, sizeof(*rx_config)); + if (ret < 0) { + wl1251_warning("failed to set rx config: %d", ret); + goto out; + } + +out: + kfree(rx_config); + return ret; +} + +int wl1251_acx_pd_threshold(struct wl1251 *wl) +{ + struct acx_packet_detection *pd; + int ret; + + wl1251_debug(DEBUG_ACX, "acx data pd threshold"); + + pd = kzalloc(sizeof(*pd), GFP_KERNEL); + if (!pd) { + ret = -ENOMEM; + goto out; + } + + /* FIXME: threshold value not set */ + + ret = wl1251_cmd_configure(wl, ACX_PD_THRESHOLD, pd, sizeof(*pd)); + if (ret < 0) { + wl1251_warning("failed to set pd threshold: %d", ret); + goto out; + } + +out: + kfree(pd); + return 0; +} + +int wl1251_acx_slot(struct wl1251 *wl, enum acx_slot_type slot_time) +{ + struct acx_slot *slot; + int ret; + + wl1251_debug(DEBUG_ACX, "acx slot"); + + slot = kzalloc(sizeof(*slot), GFP_KERNEL); + if (!slot) { + ret = -ENOMEM; + goto out; + } + + slot->wone_index = STATION_WONE_INDEX; + slot->slot_time = slot_time; + + ret = wl1251_cmd_configure(wl, ACX_SLOT, slot, sizeof(*slot)); + if (ret < 0) { + wl1251_warning("failed to set slot time: %d", ret); + goto out; + } + +out: + kfree(slot); + return ret; +} + +int wl1251_acx_group_address_tbl(struct wl1251 *wl) +{ + struct acx_dot11_grp_addr_tbl *acx; + int ret; + + wl1251_debug(DEBUG_ACX, "acx group address tbl"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + /* MAC filtering */ + acx->enabled = 0; + acx->num_groups = 0; + memset(acx->mac_table, 0, ADDRESS_GROUP_MAX_LEN); + + ret = wl1251_cmd_configure(wl, DOT11_GROUP_ADDRESS_TBL, + acx, sizeof(*acx)); + if (ret < 0) { + wl1251_warning("failed to set group addr table: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1251_acx_service_period_timeout(struct wl1251 *wl) +{ + struct acx_rx_timeout *rx_timeout; + int ret; + + rx_timeout = kzalloc(sizeof(*rx_timeout), GFP_KERNEL); + if (!rx_timeout) { + ret = -ENOMEM; + goto out; + } + + wl1251_debug(DEBUG_ACX, "acx service period timeout"); + + rx_timeout->ps_poll_timeout = RX_TIMEOUT_PS_POLL_DEF; + rx_timeout->upsd_timeout = RX_TIMEOUT_UPSD_DEF; + + ret = wl1251_cmd_configure(wl, ACX_SERVICE_PERIOD_TIMEOUT, + rx_timeout, sizeof(*rx_timeout)); + if (ret < 0) { + wl1251_warning("failed to set service period timeout: %d", + ret); + goto out; + } + +out: + kfree(rx_timeout); + return ret; +} + +int wl1251_acx_rts_threshold(struct wl1251 *wl, u16 rts_threshold) +{ + struct acx_rts_threshold *rts; + int ret; + + wl1251_debug(DEBUG_ACX, "acx rts threshold"); + + rts = kzalloc(sizeof(*rts), GFP_KERNEL); + if (!rts) { + ret = -ENOMEM; + goto out; + } + + rts->threshold = rts_threshold; + + ret = wl1251_cmd_configure(wl, DOT11_RTS_THRESHOLD, rts, sizeof(*rts)); + if (ret < 0) { + wl1251_warning("failed to set rts threshold: %d", ret); + goto out; + } + +out: + kfree(rts); + return ret; +} + +int wl1251_acx_beacon_filter_opt(struct wl1251 *wl, bool enable_filter) +{ + struct acx_beacon_filter_option *beacon_filter; + int ret; + + wl1251_debug(DEBUG_ACX, "acx beacon filter opt"); + + beacon_filter = kzalloc(sizeof(*beacon_filter), GFP_KERNEL); + if (!beacon_filter) { + ret = -ENOMEM; + goto out; + } + + beacon_filter->enable = enable_filter; + beacon_filter->max_num_beacons = 0; + + ret = wl1251_cmd_configure(wl, ACX_BEACON_FILTER_OPT, + beacon_filter, sizeof(*beacon_filter)); + if (ret < 0) { + wl1251_warning("failed to set beacon filter opt: %d", ret); + goto out; + } + +out: + kfree(beacon_filter); + return ret; +} + +int wl1251_acx_beacon_filter_table(struct wl1251 *wl) +{ + struct acx_beacon_filter_ie_table *ie_table; + int idx = 0; + int ret; + + wl1251_debug(DEBUG_ACX, "acx beacon filter table"); + + ie_table = kzalloc(sizeof(*ie_table), GFP_KERNEL); + if (!ie_table) { + ret = -ENOMEM; + goto out; + } + + /* configure default beacon pass-through rules */ + ie_table->num_ie = 1; + ie_table->table[idx++] = BEACON_FILTER_IE_ID_CHANNEL_SWITCH_ANN; + ie_table->table[idx++] = BEACON_RULE_PASS_ON_APPEARANCE; + + ret = wl1251_cmd_configure(wl, ACX_BEACON_FILTER_TABLE, + ie_table, sizeof(*ie_table)); + if (ret < 0) { + wl1251_warning("failed to set beacon filter table: %d", ret); + goto out; + } + +out: + kfree(ie_table); + return ret; +} + +int wl1251_acx_conn_monit_params(struct wl1251 *wl) +{ + struct acx_conn_monit_params *acx; + int ret; + + wl1251_debug(DEBUG_ACX, "acx connection monitor parameters"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->synch_fail_thold = SYNCH_FAIL_DEFAULT_THRESHOLD; + acx->bss_lose_timeout = NO_BEACON_DEFAULT_TIMEOUT; + + ret = wl1251_cmd_configure(wl, ACX_CONN_MONIT_PARAMS, + acx, sizeof(*acx)); + if (ret < 0) { + wl1251_warning("failed to set connection monitor " + "parameters: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1251_acx_sg_enable(struct wl1251 *wl) +{ + struct acx_bt_wlan_coex *pta; + int ret; + + wl1251_debug(DEBUG_ACX, "acx sg enable"); + + pta = kzalloc(sizeof(*pta), GFP_KERNEL); + if (!pta) { + ret = -ENOMEM; + goto out; + } + + pta->enable = SG_ENABLE; + + ret = wl1251_cmd_configure(wl, ACX_SG_ENABLE, pta, sizeof(*pta)); + if (ret < 0) { + wl1251_warning("failed to set softgemini enable: %d", ret); + goto out; + } + +out: + kfree(pta); + return ret; +} + +int wl1251_acx_sg_cfg(struct wl1251 *wl) +{ + struct acx_bt_wlan_coex_param *param; + int ret; + + wl1251_debug(DEBUG_ACX, "acx sg cfg"); + + param = kzalloc(sizeof(*param), GFP_KERNEL); + if (!param) { + ret = -ENOMEM; + goto out; + } + + /* BT-WLAN coext parameters */ + param->min_rate = RATE_INDEX_24MBPS; + param->bt_hp_max_time = PTA_BT_HP_MAXTIME_DEF; + param->wlan_hp_max_time = PTA_WLAN_HP_MAX_TIME_DEF; + param->sense_disable_timer = PTA_SENSE_DISABLE_TIMER_DEF; + param->rx_time_bt_hp = PTA_PROTECTIVE_RX_TIME_DEF; + param->tx_time_bt_hp = PTA_PROTECTIVE_TX_TIME_DEF; + param->rx_time_bt_hp_fast = PTA_PROTECTIVE_RX_TIME_FAST_DEF; + param->tx_time_bt_hp_fast = PTA_PROTECTIVE_TX_TIME_FAST_DEF; + param->wlan_cycle_fast = PTA_CYCLE_TIME_FAST_DEF; + param->bt_anti_starvation_period = PTA_ANTI_STARVE_PERIOD_DEF; + param->next_bt_lp_packet = PTA_TIMEOUT_NEXT_BT_LP_PACKET_DEF; + param->wake_up_beacon = PTA_TIME_BEFORE_BEACON_DEF; + param->hp_dm_max_guard_time = PTA_HPDM_MAX_TIME_DEF; + param->next_wlan_packet = PTA_TIME_OUT_NEXT_WLAN_DEF; + param->antenna_type = PTA_ANTENNA_TYPE_DEF; + param->signal_type = PTA_SIGNALING_TYPE_DEF; + param->afh_leverage_on = PTA_AFH_LEVERAGE_ON_DEF; + param->quiet_cycle_num = PTA_NUMBER_QUIET_CYCLE_DEF; + param->max_cts = PTA_MAX_NUM_CTS_DEF; + param->wlan_packets_num = PTA_NUMBER_OF_WLAN_PACKETS_DEF; + param->bt_packets_num = PTA_NUMBER_OF_BT_PACKETS_DEF; + param->missed_rx_avalanche = PTA_RX_FOR_AVALANCHE_DEF; + param->wlan_elp_hp = PTA_ELP_HP_DEF; + param->bt_anti_starvation_cycles = PTA_ANTI_STARVE_NUM_CYCLE_DEF; + param->ack_mode_dual_ant = PTA_ACK_MODE_DEF; + param->pa_sd_enable = PTA_ALLOW_PA_SD_DEF; + param->pta_auto_mode_enable = PTA_AUTO_MODE_NO_CTS_DEF; + param->bt_hp_respected_num = PTA_BT_HP_RESPECTED_DEF; + + ret = wl1251_cmd_configure(wl, ACX_SG_CFG, param, sizeof(*param)); + if (ret < 0) { + wl1251_warning("failed to set sg config: %d", ret); + goto out; + } + +out: + kfree(param); + return ret; +} + +int wl1251_acx_cca_threshold(struct wl1251 *wl) +{ + struct acx_energy_detection *detection; + int ret; + + wl1251_debug(DEBUG_ACX, "acx cca threshold"); + + detection = kzalloc(sizeof(*detection), GFP_KERNEL); + if (!detection) { + ret = -ENOMEM; + goto out; + } + + detection->rx_cca_threshold = CCA_THRSH_DISABLE_ENERGY_D; + detection->tx_energy_detection = 0; + + ret = wl1251_cmd_configure(wl, ACX_CCA_THRESHOLD, + detection, sizeof(*detection)); + if (ret < 0) { + wl1251_warning("failed to set cca threshold: %d", ret); + return ret; + } + +out: + kfree(detection); + return ret; +} + +int wl1251_acx_bcn_dtim_options(struct wl1251 *wl) +{ + struct acx_beacon_broadcast *bb; + int ret; + + wl1251_debug(DEBUG_ACX, "acx bcn dtim options"); + + bb = kzalloc(sizeof(*bb), GFP_KERNEL); + if (!bb) { + ret = -ENOMEM; + goto out; + } + + bb->beacon_rx_timeout = BCN_RX_TIMEOUT_DEF_VALUE; + bb->broadcast_timeout = BROADCAST_RX_TIMEOUT_DEF_VALUE; + bb->rx_broadcast_in_ps = RX_BROADCAST_IN_PS_DEF_VALUE; + bb->ps_poll_threshold = CONSECUTIVE_PS_POLL_FAILURE_DEF; + + ret = wl1251_cmd_configure(wl, ACX_BCN_DTIM_OPTIONS, bb, sizeof(*bb)); + if (ret < 0) { + wl1251_warning("failed to set rx config: %d", ret); + goto out; + } + +out: + kfree(bb); + return ret; +} + +int wl1251_acx_aid(struct wl1251 *wl, u16 aid) +{ + struct acx_aid *acx_aid; + int ret; + + wl1251_debug(DEBUG_ACX, "acx aid"); + + acx_aid = kzalloc(sizeof(*acx_aid), GFP_KERNEL); + if (!acx_aid) { + ret = -ENOMEM; + goto out; + } + + acx_aid->aid = aid; + + ret = wl1251_cmd_configure(wl, ACX_AID, acx_aid, sizeof(*acx_aid)); + if (ret < 0) { + wl1251_warning("failed to set aid: %d", ret); + goto out; + } + +out: + kfree(acx_aid); + return ret; +} + +int wl1251_acx_event_mbox_mask(struct wl1251 *wl, u32 event_mask) +{ + struct acx_event_mask *mask; + int ret; + + wl1251_debug(DEBUG_ACX, "acx event mbox mask"); + + mask = kzalloc(sizeof(*mask), GFP_KERNEL); + if (!mask) { + ret = -ENOMEM; + goto out; + } + + /* high event mask is unused */ + mask->high_event_mask = 0xffffffff; + + mask->event_mask = event_mask; + + ret = wl1251_cmd_configure(wl, ACX_EVENT_MBOX_MASK, + mask, sizeof(*mask)); + if (ret < 0) { + wl1251_warning("failed to set acx_event_mbox_mask: %d", ret); + goto out; + } + +out: + kfree(mask); + return ret; +} + +int wl1251_acx_set_preamble(struct wl1251 *wl, enum acx_preamble_type preamble) +{ + struct acx_preamble *acx; + int ret; + + wl1251_debug(DEBUG_ACX, "acx_set_preamble"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->preamble = preamble; + + ret = wl1251_cmd_configure(wl, ACX_PREAMBLE_TYPE, acx, sizeof(*acx)); + if (ret < 0) { + wl1251_warning("Setting of preamble failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1251_acx_cts_protect(struct wl1251 *wl, + enum acx_ctsprotect_type ctsprotect) +{ + struct acx_ctsprotect *acx; + int ret; + + wl1251_debug(DEBUG_ACX, "acx_set_ctsprotect"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->ctsprotect = ctsprotect; + + ret = wl1251_cmd_configure(wl, ACX_CTS_PROTECTION, acx, sizeof(*acx)); + if (ret < 0) { + wl1251_warning("Setting of ctsprotect failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1251_acx_tsf_info(struct wl1251 *wl, u64 *mactime) +{ + struct acx_tsf_info *tsf_info; + int ret; + + tsf_info = kzalloc(sizeof(*tsf_info), GFP_KERNEL); + if (!tsf_info) { + ret = -ENOMEM; + goto out; + } + + ret = wl1251_cmd_interrogate(wl, ACX_TSF_INFO, + tsf_info, sizeof(*tsf_info)); + if (ret < 0) { + wl1251_warning("ACX_FW_REV interrogate failed"); + goto out; + } + + *mactime = tsf_info->current_tsf_lsb | + (tsf_info->current_tsf_msb << 31); + +out: + kfree(tsf_info); + return ret; +} + +int wl1251_acx_statistics(struct wl1251 *wl, struct acx_statistics *stats) +{ + int ret; + + wl1251_debug(DEBUG_ACX, "acx statistics"); + + ret = wl1251_cmd_interrogate(wl, ACX_STATISTICS, stats, + sizeof(*stats)); + if (ret < 0) { + wl1251_warning("acx statistics failed: %d", ret); + return -ENOMEM; + } + + return 0; +} + +int wl1251_acx_rate_policies(struct wl1251 *wl) +{ + struct acx_rate_policy *acx; + int ret = 0; + + wl1251_debug(DEBUG_ACX, "acx rate policies"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + + if (!acx) { + ret = -ENOMEM; + goto out; + } + + /* configure one default (one-size-fits-all) rate class */ + acx->rate_class_cnt = 1; + acx->rate_class[0].enabled_rates = ACX_RATE_MASK_UNSPECIFIED; + acx->rate_class[0].short_retry_limit = ACX_RATE_RETRY_LIMIT; + acx->rate_class[0].long_retry_limit = ACX_RATE_RETRY_LIMIT; + acx->rate_class[0].aflags = 0; + + ret = wl1251_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx)); + if (ret < 0) { + wl1251_warning("Setting of rate policies failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1251_acx_mem_cfg(struct wl1251 *wl) +{ + struct wl1251_acx_config_memory *mem_conf; + int ret, i; + + wl1251_debug(DEBUG_ACX, "acx mem cfg"); + + mem_conf = kzalloc(sizeof(*mem_conf), GFP_KERNEL); + if (!mem_conf) { + ret = -ENOMEM; + goto out; + } + + /* memory config */ + mem_conf->mem_config.num_stations = cpu_to_le16(DEFAULT_NUM_STATIONS); + mem_conf->mem_config.rx_mem_block_num = 35; + mem_conf->mem_config.tx_min_mem_block_num = 64; + mem_conf->mem_config.num_tx_queues = MAX_TX_QUEUES; + mem_conf->mem_config.host_if_options = HOSTIF_PKT_RING; + mem_conf->mem_config.num_ssid_profiles = 1; + mem_conf->mem_config.debug_buffer_size = + cpu_to_le16(TRACE_BUFFER_MAX_SIZE); + + /* RX queue config */ + mem_conf->rx_queue_config.dma_address = 0; + mem_conf->rx_queue_config.num_descs = ACX_RX_DESC_DEF; + mem_conf->rx_queue_config.priority = DEFAULT_RXQ_PRIORITY; + mem_conf->rx_queue_config.type = DEFAULT_RXQ_TYPE; + + /* TX queue config */ + for (i = 0; i < MAX_TX_QUEUES; i++) { + mem_conf->tx_queue_config[i].num_descs = ACX_TX_DESC_DEF; + mem_conf->tx_queue_config[i].attributes = i; + } + + ret = wl1251_cmd_configure(wl, ACX_MEM_CFG, mem_conf, + sizeof(*mem_conf)); + if (ret < 0) { + wl1251_warning("wl1251 mem config failed: %d", ret); + goto out; + } + +out: + kfree(mem_conf); + return ret; +} + +int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim) +{ + struct wl1251_acx_wr_tbtt_and_dtim *acx; + int ret; + + wl1251_debug(DEBUG_ACX, "acx tbtt and dtim"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->tbtt = tbtt; + acx->dtim = dtim; + + ret = wl1251_cmd_configure(wl, ACX_WR_TBTT_AND_DTIM, + acx, sizeof(*acx)); + if (ret < 0) { + wl1251_warning("failed to set tbtt and dtim: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max, + u8 aifs, u16 txop) +{ + struct wl1251_acx_ac_cfg *acx; + int ret = 0; + + wl1251_debug(DEBUG_ACX, "acx ac cfg %d cw_ming %d cw_max %d " + "aifs %d txop %d", ac, cw_min, cw_max, aifs, txop); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->ac = ac; + acx->cw_min = cw_min; + acx->cw_max = cw_max; + acx->aifsn = aifs; + acx->txop_limit = txop; + + ret = wl1251_cmd_configure(wl, ACX_AC_CFG, acx, sizeof(*acx)); + if (ret < 0) { + wl1251_warning("acx ac cfg failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1251_acx_tid_cfg(struct wl1251 *wl, u8 queue, + enum wl1251_acx_channel_type type, + u8 tsid, enum wl1251_acx_ps_scheme ps_scheme, + enum wl1251_acx_ack_policy ack_policy) +{ + struct wl1251_acx_tid_cfg *acx; + int ret = 0; + + wl1251_debug(DEBUG_ACX, "acx tid cfg %d type %d tsid %d " + "ps_scheme %d ack_policy %d", queue, type, tsid, + ps_scheme, ack_policy); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->queue = queue; + acx->type = type; + acx->tsid = tsid; + acx->ps_scheme = ps_scheme; + acx->ack_policy = ack_policy; + + ret = wl1251_cmd_configure(wl, ACX_TID_CFG, acx, sizeof(*acx)); + if (ret < 0) { + wl1251_warning("acx tid cfg failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} diff --git a/drivers/net/wireless/wl12xx/wl1251_acx.h b/drivers/net/wireless/wl12xx/wl1251_acx.h new file mode 100644 index 0000000..26160c4 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1251_acx.h @@ -0,0 +1,1413 @@ +/* + * This file is part of wl1251 + * + * Copyright (c) 1998-2007 Texas Instruments Incorporated + * Copyright (C) 2008 Nokia Corporation + * + * Contact: Kalle Valo + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __WL1251_ACX_H__ +#define __WL1251_ACX_H__ + +#include "wl1251.h" +#include "wl1251_cmd.h" + +/* Target's information element */ +struct acx_header { + struct wl1251_cmd_header cmd; + + /* acx (or information element) header */ + u16 id; + + /* payload length (not including headers */ + u16 len; +}; + +struct acx_error_counter { + struct acx_header header; + + /* The number of PLCP errors since the last time this */ + /* information element was interrogated. This field is */ + /* automatically cleared when it is interrogated.*/ + u32 PLCP_error; + + /* The number of FCS errors since the last time this */ + /* information element was interrogated. This field is */ + /* automatically cleared when it is interrogated.*/ + u32 FCS_error; + + /* The number of MPDUs without PLCP header errors received*/ + /* since the last time this information element was interrogated. */ + /* This field is automatically cleared when it is interrogated.*/ + u32 valid_frame; + + /* the number of missed sequence numbers in the squentially */ + /* values of frames seq numbers */ + u32 seq_num_miss; +} __attribute__ ((packed)); + +struct acx_revision { + struct acx_header header; + + /* + * The WiLink firmware version, an ASCII string x.x.x.x, + * that uniquely identifies the current firmware. + * The left most digit is incremented each time a + * significant change is made to the firmware, such as + * code redesign or new platform support. + * The second digit is incremented when major enhancements + * are added or major fixes are made. + * The third digit is incremented for each GA release. + * The fourth digit is incremented for each build. + * The first two digits identify a firmware release version, + * in other words, a unique set of features. + * The first three digits identify a GA release. + */ + char fw_version[20]; + + /* + * This 4 byte field specifies the WiLink hardware version. + * bits 0 - 15: Reserved. + * bits 16 - 23: Version ID - The WiLink version ID + * (1 = first spin, 2 = second spin, and so on). + * bits 24 - 31: Chip ID - The WiLink chip ID. + */ + u32 hw_version; +} __attribute__ ((packed)); + +enum wl1251_psm_mode { + /* Active mode */ + WL1251_PSM_CAM = 0, + + /* Power save mode */ + WL1251_PSM_PS = 1, + + /* Extreme low power */ + WL1251_PSM_ELP = 2, +}; + +struct acx_sleep_auth { + struct acx_header header; + + /* The sleep level authorization of the device. */ + /* 0 - Always active*/ + /* 1 - Power down mode: light / fast sleep*/ + /* 2 - ELP mode: Deep / Max sleep*/ + u8 sleep_auth; + u8 padding[3]; +} __attribute__ ((packed)); + +enum { + HOSTIF_PCI_MASTER_HOST_INDIRECT, + HOSTIF_PCI_MASTER_HOST_DIRECT, + HOSTIF_SLAVE, + HOSTIF_PKT_RING, + HOSTIF_DONTCARE = 0xFF +}; + +#define DEFAULT_UCAST_PRIORITY 0 +#define DEFAULT_RX_Q_PRIORITY 0 +#define DEFAULT_NUM_STATIONS 1 +#define DEFAULT_RXQ_PRIORITY 0 /* low 0 .. 15 high */ +#define DEFAULT_RXQ_TYPE 0x07 /* All frames, Data/Ctrl/Mgmt */ +#define TRACE_BUFFER_MAX_SIZE 256 + +#define DP_RX_PACKET_RING_CHUNK_SIZE 1600 +#define DP_TX_PACKET_RING_CHUNK_SIZE 1600 +#define DP_RX_PACKET_RING_CHUNK_NUM 2 +#define DP_TX_PACKET_RING_CHUNK_NUM 2 +#define DP_TX_COMPLETE_TIME_OUT 20 +#define FW_TX_CMPLT_BLOCK_SIZE 16 + +struct acx_data_path_params { + struct acx_header header; + + u16 rx_packet_ring_chunk_size; + u16 tx_packet_ring_chunk_size; + + u8 rx_packet_ring_chunk_num; + u8 tx_packet_ring_chunk_num; + + /* + * Maximum number of packets that can be gathered + * in the TX complete ring before an interrupt + * is generated. + */ + u8 tx_complete_threshold; + + /* Number of pending TX complete entries in cyclic ring.*/ + u8 tx_complete_ring_depth; + + /* + * Max num microseconds since a packet enters the TX + * complete ring until an interrupt is generated. + */ + u32 tx_complete_timeout; +} __attribute__ ((packed)); + + +struct acx_data_path_params_resp { + struct acx_header header; + + u16 rx_packet_ring_chunk_size; + u16 tx_packet_ring_chunk_size; + + u8 rx_packet_ring_chunk_num; + u8 tx_packet_ring_chunk_num; + + u8 pad[2]; + + u32 rx_packet_ring_addr; + u32 tx_packet_ring_addr; + + u32 rx_control_addr; + u32 tx_control_addr; + + u32 tx_complete_addr; +} __attribute__ ((packed)); + +#define TX_MSDU_LIFETIME_MIN 0 +#define TX_MSDU_LIFETIME_MAX 3000 +#define TX_MSDU_LIFETIME_DEF 512 +#define RX_MSDU_LIFETIME_MIN 0 +#define RX_MSDU_LIFETIME_MAX 0xFFFFFFFF +#define RX_MSDU_LIFETIME_DEF 512000 + +struct acx_rx_msdu_lifetime { + struct acx_header header; + + /* + * The maximum amount of time, in TU, before the + * firmware discards the MSDU. + */ + u32 lifetime; +} __attribute__ ((packed)); + +/* + * RX Config Options Table + * Bit Definition + * === ========== + * 31:14 Reserved + * 13 Copy RX Status - when set, write three receive status words + * to top of rx'd MPDUs. + * When cleared, do not write three status words (added rev 1.5) + * 12 Reserved + * 11 RX Complete upon FCS error - when set, give rx complete + * interrupt for FCS errors, after the rx filtering, e.g. unicast + * frames not to us with FCS error will not generate an interrupt. + * 10 SSID Filter Enable - When set, the WiLink discards all beacon, + * probe request, and probe response frames with an SSID that does + * not match the SSID specified by the host in the START/JOIN + * command. + * When clear, the WiLink receives frames with any SSID. + * 9 Broadcast Filter Enable - When set, the WiLink discards all + * broadcast frames. When clear, the WiLink receives all received + * broadcast frames. + * 8:6 Reserved + * 5 BSSID Filter Enable - When set, the WiLink discards any frames + * with a BSSID that does not match the BSSID specified by the + * host. + * When clear, the WiLink receives frames from any BSSID. + * 4 MAC Addr Filter - When set, the WiLink discards any frames + * with a destination address that does not match the MAC address + * of the adaptor. + * When clear, the WiLink receives frames destined to any MAC + * address. + * 3 Promiscuous - When set, the WiLink receives all valid frames + * (i.e., all frames that pass the FCS check). + * When clear, only frames that pass the other filters specified + * are received. + * 2 FCS - When set, the WiLink includes the FCS with the received + * frame. + * When cleared, the FCS is discarded. + * 1 PLCP header - When set, write all data from baseband to frame + * buffer including PHY header. + * 0 Reserved - Always equal to 0. + * + * RX Filter Options Table + * Bit Definition + * === ========== + * 31:12 Reserved - Always equal to 0. + * 11 Association - When set, the WiLink receives all association + * related frames (association request/response, reassocation + * request/response, and disassociation). When clear, these frames + * are discarded. + * 10 Auth/De auth - When set, the WiLink receives all authentication + * and de-authentication frames. When clear, these frames are + * discarded. + * 9 Beacon - When set, the WiLink receives all beacon frames. + * When clear, these frames are discarded. + * 8 Contention Free - When set, the WiLink receives all contention + * free frames. + * When clear, these frames are discarded. + * 7 Control - When set, the WiLink receives all control frames. + * When clear, these frames are discarded. + * 6 Data - When set, the WiLink receives all data frames. + * When clear, these frames are discarded. + * 5 FCS Error - When set, the WiLink receives frames that have FCS + * errors. + * When clear, these frames are discarded. + * 4 Management - When set, the WiLink receives all management + * frames. + * When clear, these frames are discarded. + * 3 Probe Request - When set, the WiLink receives all probe request + * frames. + * When clear, these frames are discarded. + * 2 Probe Response - When set, the WiLink receives all probe + * response frames. + * When clear, these frames are discarded. + * 1 RTS/CTS/ACK - When set, the WiLink receives all RTS, CTS and ACK + * frames. + * When clear, these frames are discarded. + * 0 Rsvd Type/Sub Type - When set, the WiLink receives all frames + * that have reserved frame types and sub types as defined by the + * 802.11 specification. + * When clear, these frames are discarded. + */ +struct acx_rx_config { + struct acx_header header; + + u32 config_options; + u32 filter_options; +} __attribute__ ((packed)); + +enum { + QOS_AC_BE = 0, + QOS_AC_BK, + QOS_AC_VI, + QOS_AC_VO, + QOS_HIGHEST_AC_INDEX = QOS_AC_VO, +}; + +#define MAX_NUM_OF_AC (QOS_HIGHEST_AC_INDEX+1) +#define FIRST_AC_INDEX QOS_AC_BE +#define MAX_NUM_OF_802_1d_TAGS 8 +#define AC_PARAMS_MAX_TSID 15 +#define MAX_APSD_CONF 0xffff + +#define QOS_TX_HIGH_MIN (0) +#define QOS_TX_HIGH_MAX (100) + +#define QOS_TX_HIGH_BK_DEF (25) +#define QOS_TX_HIGH_BE_DEF (35) +#define QOS_TX_HIGH_VI_DEF (35) +#define QOS_TX_HIGH_VO_DEF (35) + +#define QOS_TX_LOW_BK_DEF (15) +#define QOS_TX_LOW_BE_DEF (25) +#define QOS_TX_LOW_VI_DEF (25) +#define QOS_TX_LOW_VO_DEF (25) + +struct acx_tx_queue_qos_config { + struct acx_header header; + + u8 qid; + u8 pad[3]; + + /* Max number of blocks allowd in the queue */ + u16 high_threshold; + + /* Lowest memory blocks guaranteed for this queue */ + u16 low_threshold; +} __attribute__ ((packed)); + +struct acx_packet_detection { + struct acx_header header; + + u32 threshold; +} __attribute__ ((packed)); + + +enum acx_slot_type { + SLOT_TIME_LONG = 0, + SLOT_TIME_SHORT = 1, + DEFAULT_SLOT_TIME = SLOT_TIME_SHORT, + MAX_SLOT_TIMES = 0xFF +}; + +#define STATION_WONE_INDEX 0 + +struct acx_slot { + struct acx_header header; + + u8 wone_index; /* Reserved */ + u8 slot_time; + u8 reserved[6]; +} __attribute__ ((packed)); + + +#define ADDRESS_GROUP_MAX (8) +#define ADDRESS_GROUP_MAX_LEN (ETH_ALEN * ADDRESS_GROUP_MAX) + +struct acx_dot11_grp_addr_tbl { + struct acx_header header; + + u8 enabled; + u8 num_groups; + u8 pad[2]; + u8 mac_table[ADDRESS_GROUP_MAX_LEN]; +} __attribute__ ((packed)); + + +#define RX_TIMEOUT_PS_POLL_MIN 0 +#define RX_TIMEOUT_PS_POLL_MAX (200000) +#define RX_TIMEOUT_PS_POLL_DEF (15) +#define RX_TIMEOUT_UPSD_MIN 0 +#define RX_TIMEOUT_UPSD_MAX (200000) +#define RX_TIMEOUT_UPSD_DEF (15) + +struct acx_rx_timeout { + struct acx_header header; + + /* + * The longest time the STA will wait to receive + * traffic from the AP after a PS-poll has been + * transmitted. + */ + u16 ps_poll_timeout; + + /* + * The longest time the STA will wait to receive + * traffic from the AP after a frame has been sent + * from an UPSD enabled queue. + */ + u16 upsd_timeout; +} __attribute__ ((packed)); + +#define RTS_THRESHOLD_MIN 0 +#define RTS_THRESHOLD_MAX 4096 +#define RTS_THRESHOLD_DEF 2347 + +struct acx_rts_threshold { + struct acx_header header; + + u16 threshold; + u8 pad[2]; +} __attribute__ ((packed)); + +struct acx_beacon_filter_option { + struct acx_header header; + + u8 enable; + + /* + * The number of beacons without the unicast TIM + * bit set that the firmware buffers before + * signaling the host about ready frames. + * When set to 0 and the filter is enabled, beacons + * without the unicast TIM bit set are dropped. + */ + u8 max_num_beacons; + u8 pad[2]; +} __attribute__ ((packed)); + +/* + * ACXBeaconFilterEntry (not 221) + * Byte Offset Size (Bytes) Definition + * =========== ============ ========== + * 0 1 IE identifier + * 1 1 Treatment bit mask + * + * ACXBeaconFilterEntry (221) + * Byte Offset Size (Bytes) Definition + * =========== ============ ========== + * 0 1 IE identifier + * 1 1 Treatment bit mask + * 2 3 OUI + * 5 1 Type + * 6 2 Version + * + * + * Treatment bit mask - The information element handling: + * bit 0 - The information element is compared and transferred + * in case of change. + * bit 1 - The information element is transferred to the host + * with each appearance or disappearance. + * Note that both bits can be set at the same time. + */ +#define BEACON_FILTER_TABLE_MAX_IE_NUM (32) +#define BEACON_FILTER_TABLE_MAX_VENDOR_SPECIFIC_IE_NUM (6) +#define BEACON_FILTER_TABLE_IE_ENTRY_SIZE (2) +#define BEACON_FILTER_TABLE_EXTRA_VENDOR_SPECIFIC_IE_SIZE (6) +#define BEACON_FILTER_TABLE_MAX_SIZE ((BEACON_FILTER_TABLE_MAX_IE_NUM * \ + BEACON_FILTER_TABLE_IE_ENTRY_SIZE) + \ + (BEACON_FILTER_TABLE_MAX_VENDOR_SPECIFIC_IE_NUM * \ + BEACON_FILTER_TABLE_EXTRA_VENDOR_SPECIFIC_IE_SIZE)) + +#define BEACON_RULE_PASS_ON_CHANGE BIT(0) +#define BEACON_RULE_PASS_ON_APPEARANCE BIT(1) + +#define BEACON_FILTER_IE_ID_CHANNEL_SWITCH_ANN (37) + +struct acx_beacon_filter_ie_table { + struct acx_header header; + + u8 num_ie; + u8 table[BEACON_FILTER_TABLE_MAX_SIZE]; + u8 pad[3]; +} __attribute__ ((packed)); + +#define SYNCH_FAIL_DEFAULT_THRESHOLD 10 /* number of beacons */ +#define NO_BEACON_DEFAULT_TIMEOUT (500) /* in microseconds */ + +struct acx_conn_monit_params { + struct acx_header header; + + u32 synch_fail_thold; /* number of beacons missed */ + u32 bss_lose_timeout; /* number of TU's from synch fail */ +}; + +enum { + SG_ENABLE = 0, + SG_DISABLE, + SG_SENSE_NO_ACTIVITY, + SG_SENSE_ACTIVE +}; + +struct acx_bt_wlan_coex { + struct acx_header header; + + /* + * 0 -> PTA enabled + * 1 -> PTA disabled + * 2 -> sense no active mode, i.e. + * an interrupt is sent upon + * BT activity. + * 3 -> PTA is switched on in response + * to the interrupt sending. + */ + u8 enable; + u8 pad[3]; +} __attribute__ ((packed)); + +#define PTA_ANTENNA_TYPE_DEF (0) +#define PTA_BT_HP_MAXTIME_DEF (2000) +#define PTA_WLAN_HP_MAX_TIME_DEF (5000) +#define PTA_SENSE_DISABLE_TIMER_DEF (1350) +#define PTA_PROTECTIVE_RX_TIME_DEF (1500) +#define PTA_PROTECTIVE_TX_TIME_DEF (1500) +#define PTA_TIMEOUT_NEXT_BT_LP_PACKET_DEF (3000) +#define PTA_SIGNALING_TYPE_DEF (1) +#define PTA_AFH_LEVERAGE_ON_DEF (0) +#define PTA_NUMBER_QUIET_CYCLE_DEF (0) +#define PTA_MAX_NUM_CTS_DEF (3) +#define PTA_NUMBER_OF_WLAN_PACKETS_DEF (2) +#define PTA_NUMBER_OF_BT_PACKETS_DEF (2) +#define PTA_PROTECTIVE_RX_TIME_FAST_DEF (1500) +#define PTA_PROTECTIVE_TX_TIME_FAST_DEF (3000) +#define PTA_CYCLE_TIME_FAST_DEF (8700) +#define PTA_RX_FOR_AVALANCHE_DEF (5) +#define PTA_ELP_HP_DEF (0) +#define PTA_ANTI_STARVE_PERIOD_DEF (500) +#define PTA_ANTI_STARVE_NUM_CYCLE_DEF (4) +#define PTA_ALLOW_PA_SD_DEF (1) +#define PTA_TIME_BEFORE_BEACON_DEF (6300) +#define PTA_HPDM_MAX_TIME_DEF (1600) +#define PTA_TIME_OUT_NEXT_WLAN_DEF (2550) +#define PTA_AUTO_MODE_NO_CTS_DEF (0) +#define PTA_BT_HP_RESPECTED_DEF (3) +#define PTA_WLAN_RX_MIN_RATE_DEF (24) +#define PTA_ACK_MODE_DEF (1) + +struct acx_bt_wlan_coex_param { + struct acx_header header; + + /* + * The minimum rate of a received WLAN packet in the STA, + * during protective mode, of which a new BT-HP request + * during this Rx will always be respected and gain the antenna. + */ + u32 min_rate; + + /* Max time the BT HP will be respected. */ + u16 bt_hp_max_time; + + /* Max time the WLAN HP will be respected. */ + u16 wlan_hp_max_time; + + /* + * The time between the last BT activity + * and the moment when the sense mode returns + * to SENSE_INACTIVE. + */ + u16 sense_disable_timer; + + /* Time before the next BT HP instance */ + u16 rx_time_bt_hp; + u16 tx_time_bt_hp; + + /* range: 10-20000 default: 1500 */ + u16 rx_time_bt_hp_fast; + u16 tx_time_bt_hp_fast; + + /* range: 2000-65535 default: 8700 */ + u16 wlan_cycle_fast; + + /* range: 0 - 15000 (Msec) default: 1000 */ + u16 bt_anti_starvation_period; + + /* range 400-10000(Usec) default: 3000 */ + u16 next_bt_lp_packet; + + /* Deafult: worst case for BT DH5 traffic */ + u16 wake_up_beacon; + + /* range: 0-50000(Usec) default: 1050 */ + u16 hp_dm_max_guard_time; + + /* + * This is to prevent both BT & WLAN antenna + * starvation. + * Range: 100-50000(Usec) default:2550 + */ + u16 next_wlan_packet; + + /* 0 -> shared antenna */ + u8 antenna_type; + + /* + * 0 -> TI legacy + * 1 -> Palau + */ + u8 signal_type; + + /* + * BT AFH status + * 0 -> no AFH + * 1 -> from dedicated GPIO + * 2 -> AFH on (from host) + */ + u8 afh_leverage_on; + + /* + * The number of cycles during which no + * TX will be sent after 1 cycle of RX + * transaction in protective mode + */ + u8 quiet_cycle_num; + + /* + * The maximum number of CTSs that will + * be sent for receiving RX packet in + * protective mode + */ + u8 max_cts; + + /* + * The number of WLAN packets + * transferred in common mode before + * switching to BT. + */ + u8 wlan_packets_num; + + /* + * The number of BT packets + * transferred in common mode before + * switching to WLAN. + */ + u8 bt_packets_num; + + /* range: 1-255 default: 5 */ + u8 missed_rx_avalanche; + + /* range: 0-1 default: 1 */ + u8 wlan_elp_hp; + + /* range: 0 - 15 default: 4 */ + u8 bt_anti_starvation_cycles; + + u8 ack_mode_dual_ant; + + /* + * Allow PA_SD assertion/de-assertion + * during enabled BT activity. + */ + u8 pa_sd_enable; + + /* + * Enable/Disable PTA in auto mode: + * Support Both Active & P.S modes + */ + u8 pta_auto_mode_enable; + + /* range: 0 - 20 default: 1 */ + u8 bt_hp_respected_num; +} __attribute__ ((packed)); + +#define CCA_THRSH_ENABLE_ENERGY_D 0x140A +#define CCA_THRSH_DISABLE_ENERGY_D 0xFFEF + +struct acx_energy_detection { + struct acx_header header; + + /* The RX Clear Channel Assessment threshold in the PHY */ + u16 rx_cca_threshold; + u8 tx_energy_detection; + u8 pad; +} __attribute__ ((packed)); + +#define BCN_RX_TIMEOUT_DEF_VALUE 10000 +#define BROADCAST_RX_TIMEOUT_DEF_VALUE 20000 +#define RX_BROADCAST_IN_PS_DEF_VALUE 1 +#define CONSECUTIVE_PS_POLL_FAILURE_DEF 4 + +struct acx_beacon_broadcast { + struct acx_header header; + + u16 beacon_rx_timeout; + u16 broadcast_timeout; + + /* Enables receiving of broadcast packets in PS mode */ + u8 rx_broadcast_in_ps; + + /* Consecutive PS Poll failures before updating the host */ + u8 ps_poll_threshold; + u8 pad[2]; +} __attribute__ ((packed)); + +struct acx_event_mask { + struct acx_header header; + + u32 event_mask; + u32 high_event_mask; /* Unused */ +} __attribute__ ((packed)); + +#define CFG_RX_FCS BIT(2) +#define CFG_RX_ALL_GOOD BIT(3) +#define CFG_UNI_FILTER_EN BIT(4) +#define CFG_BSSID_FILTER_EN BIT(5) +#define CFG_MC_FILTER_EN BIT(6) +#define CFG_MC_ADDR0_EN BIT(7) +#define CFG_MC_ADDR1_EN BIT(8) +#define CFG_BC_REJECT_EN BIT(9) +#define CFG_SSID_FILTER_EN BIT(10) +#define CFG_RX_INT_FCS_ERROR BIT(11) +#define CFG_RX_INT_ENCRYPTED BIT(12) +#define CFG_RX_WR_RX_STATUS BIT(13) +#define CFG_RX_FILTER_NULTI BIT(14) +#define CFG_RX_RESERVE BIT(15) +#define CFG_RX_TIMESTAMP_TSF BIT(16) + +#define CFG_RX_RSV_EN BIT(0) +#define CFG_RX_RCTS_ACK BIT(1) +#define CFG_RX_PRSP_EN BIT(2) +#define CFG_RX_PREQ_EN BIT(3) +#define CFG_RX_MGMT_EN BIT(4) +#define CFG_RX_FCS_ERROR BIT(5) +#define CFG_RX_DATA_EN BIT(6) +#define CFG_RX_CTL_EN BIT(7) +#define CFG_RX_CF_EN BIT(8) +#define CFG_RX_BCN_EN BIT(9) +#define CFG_RX_AUTH_EN BIT(10) +#define CFG_RX_ASSOC_EN BIT(11) + +#define SCAN_PASSIVE BIT(0) +#define SCAN_5GHZ_BAND BIT(1) +#define SCAN_TRIGGERED BIT(2) +#define SCAN_PRIORITY_HIGH BIT(3) + +struct acx_fw_gen_frame_rates { + struct acx_header header; + + u8 tx_ctrl_frame_rate; /* RATE_* */ + u8 tx_ctrl_frame_mod; /* CCK_* or PBCC_* */ + u8 tx_mgt_frame_rate; + u8 tx_mgt_frame_mod; +} __attribute__ ((packed)); + +/* STA MAC */ +struct acx_dot11_station_id { + struct acx_header header; + + u8 mac[ETH_ALEN]; + u8 pad[2]; +} __attribute__ ((packed)); + +struct acx_feature_config { + struct acx_header header; + + u32 options; + u32 data_flow_options; +} __attribute__ ((packed)); + +struct acx_current_tx_power { + struct acx_header header; + + u8 current_tx_power; + u8 padding[3]; +} __attribute__ ((packed)); + +struct acx_dot11_default_key { + struct acx_header header; + + u8 id; + u8 pad[3]; +} __attribute__ ((packed)); + +struct acx_tsf_info { + struct acx_header header; + + u32 current_tsf_msb; + u32 current_tsf_lsb; + u32 last_TBTT_msb; + u32 last_TBTT_lsb; + u8 last_dtim_count; + u8 pad[3]; +} __attribute__ ((packed)); + +enum acx_wake_up_event { + WAKE_UP_EVENT_BEACON_BITMAP = 0x01, /* Wake on every Beacon*/ + WAKE_UP_EVENT_DTIM_BITMAP = 0x02, /* Wake on every DTIM*/ + WAKE_UP_EVENT_N_DTIM_BITMAP = 0x04, /* Wake on every Nth DTIM */ + WAKE_UP_EVENT_N_BEACONS_BITMAP = 0x08, /* Wake on every Nth Beacon */ + WAKE_UP_EVENT_BITS_MASK = 0x0F +}; + +struct acx_wake_up_condition { + struct acx_header header; + + u8 wake_up_event; /* Only one bit can be set */ + u8 listen_interval; + u8 pad[2]; +} __attribute__ ((packed)); + +struct acx_aid { + struct acx_header header; + + /* + * To be set when associated with an AP. + */ + u16 aid; + u8 pad[2]; +} __attribute__ ((packed)); + +enum acx_preamble_type { + ACX_PREAMBLE_LONG = 0, + ACX_PREAMBLE_SHORT = 1 +}; + +struct acx_preamble { + struct acx_header header; + + /* + * When set, the WiLink transmits the frames with a short preamble and + * when cleared, the WiLink transmits the frames with a long preamble. + */ + u8 preamble; + u8 padding[3]; +} __attribute__ ((packed)); + +enum acx_ctsprotect_type { + CTSPROTECT_DISABLE = 0, + CTSPROTECT_ENABLE = 1 +}; + +struct acx_ctsprotect { + struct acx_header header; + u8 ctsprotect; + u8 padding[3]; +} __attribute__ ((packed)); + +struct acx_tx_statistics { + u32 internal_desc_overflow; +} __attribute__ ((packed)); + +struct acx_rx_statistics { + u32 out_of_mem; + u32 hdr_overflow; + u32 hw_stuck; + u32 dropped; + u32 fcs_err; + u32 xfr_hint_trig; + u32 path_reset; + u32 reset_counter; +} __attribute__ ((packed)); + +struct acx_dma_statistics { + u32 rx_requested; + u32 rx_errors; + u32 tx_requested; + u32 tx_errors; +} __attribute__ ((packed)); + +struct acx_isr_statistics { + /* host command complete */ + u32 cmd_cmplt; + + /* fiqisr() */ + u32 fiqs; + + /* (INT_STS_ND & INT_TRIG_RX_HEADER) */ + u32 rx_headers; + + /* (INT_STS_ND & INT_TRIG_RX_CMPLT) */ + u32 rx_completes; + + /* (INT_STS_ND & INT_TRIG_NO_RX_BUF) */ + u32 rx_mem_overflow; + + /* (INT_STS_ND & INT_TRIG_S_RX_RDY) */ + u32 rx_rdys; + + /* irqisr() */ + u32 irqs; + + /* (INT_STS_ND & INT_TRIG_TX_PROC) */ + u32 tx_procs; + + /* (INT_STS_ND & INT_TRIG_DECRYPT_DONE) */ + u32 decrypt_done; + + /* (INT_STS_ND & INT_TRIG_DMA0) */ + u32 dma0_done; + + /* (INT_STS_ND & INT_TRIG_DMA1) */ + u32 dma1_done; + + /* (INT_STS_ND & INT_TRIG_TX_EXC_CMPLT) */ + u32 tx_exch_complete; + + /* (INT_STS_ND & INT_TRIG_COMMAND) */ + u32 commands; + + /* (INT_STS_ND & INT_TRIG_RX_PROC) */ + u32 rx_procs; + + /* (INT_STS_ND & INT_TRIG_PM_802) */ + u32 hw_pm_mode_changes; + + /* (INT_STS_ND & INT_TRIG_ACKNOWLEDGE) */ + u32 host_acknowledges; + + /* (INT_STS_ND & INT_TRIG_PM_PCI) */ + u32 pci_pm; + + /* (INT_STS_ND & INT_TRIG_ACM_WAKEUP) */ + u32 wakeups; + + /* (INT_STS_ND & INT_TRIG_LOW_RSSI) */ + u32 low_rssi; +} __attribute__ ((packed)); + +struct acx_wep_statistics { + /* WEP address keys configured */ + u32 addr_key_count; + + /* default keys configured */ + u32 default_key_count; + + u32 reserved; + + /* number of times that WEP key not found on lookup */ + u32 key_not_found; + + /* number of times that WEP key decryption failed */ + u32 decrypt_fail; + + /* WEP packets decrypted */ + u32 packets; + + /* WEP decrypt interrupts */ + u32 interrupt; +} __attribute__ ((packed)); + +#define ACX_MISSED_BEACONS_SPREAD 10 + +struct acx_pwr_statistics { + /* the amount of enters into power save mode (both PD & ELP) */ + u32 ps_enter; + + /* the amount of enters into ELP mode */ + u32 elp_enter; + + /* the amount of missing beacon interrupts to the host */ + u32 missing_bcns; + + /* the amount of wake on host-access times */ + u32 wake_on_host; + + /* the amount of wake on timer-expire */ + u32 wake_on_timer_exp; + + /* the number of packets that were transmitted with PS bit set */ + u32 tx_with_ps; + + /* the number of packets that were transmitted with PS bit clear */ + u32 tx_without_ps; + + /* the number of received beacons */ + u32 rcvd_beacons; + + /* the number of entering into PowerOn (power save off) */ + u32 power_save_off; + + /* the number of entries into power save mode */ + u16 enable_ps; + + /* + * the number of exits from power save, not including failed PS + * transitions + */ + u16 disable_ps; + + /* + * the number of times the TSF counter was adjusted because + * of drift + */ + u32 fix_tsf_ps; + + /* Gives statistics about the spread continuous missed beacons. + * The 16 LSB are dedicated for the PS mode. + * The 16 MSB are dedicated for the PS mode. + * cont_miss_bcns_spread[0] - single missed beacon. + * cont_miss_bcns_spread[1] - two continuous missed beacons. + * cont_miss_bcns_spread[2] - three continuous missed beacons. + * ... + * cont_miss_bcns_spread[9] - ten and more continuous missed beacons. + */ + u32 cont_miss_bcns_spread[ACX_MISSED_BEACONS_SPREAD]; + + /* the number of beacons in awake mode */ + u32 rcvd_awake_beacons; +} __attribute__ ((packed)); + +struct acx_mic_statistics { + u32 rx_pkts; + u32 calc_failure; +} __attribute__ ((packed)); + +struct acx_aes_statistics { + u32 encrypt_fail; + u32 decrypt_fail; + u32 encrypt_packets; + u32 decrypt_packets; + u32 encrypt_interrupt; + u32 decrypt_interrupt; +} __attribute__ ((packed)); + +struct acx_event_statistics { + u32 heart_beat; + u32 calibration; + u32 rx_mismatch; + u32 rx_mem_empty; + u32 rx_pool; + u32 oom_late; + u32 phy_transmit_error; + u32 tx_stuck; +} __attribute__ ((packed)); + +struct acx_ps_statistics { + u32 pspoll_timeouts; + u32 upsd_timeouts; + u32 upsd_max_sptime; + u32 upsd_max_apturn; + u32 pspoll_max_apturn; + u32 pspoll_utilization; + u32 upsd_utilization; +} __attribute__ ((packed)); + +struct acx_rxpipe_statistics { + u32 rx_prep_beacon_drop; + u32 descr_host_int_trig_rx_data; + u32 beacon_buffer_thres_host_int_trig_rx_data; + u32 missed_beacon_host_int_trig_rx_data; + u32 tx_xfr_host_int_trig_rx_data; +} __attribute__ ((packed)); + +struct acx_statistics { + struct acx_header header; + + struct acx_tx_statistics tx; + struct acx_rx_statistics rx; + struct acx_dma_statistics dma; + struct acx_isr_statistics isr; + struct acx_wep_statistics wep; + struct acx_pwr_statistics pwr; + struct acx_aes_statistics aes; + struct acx_mic_statistics mic; + struct acx_event_statistics event; + struct acx_ps_statistics ps; + struct acx_rxpipe_statistics rxpipe; +} __attribute__ ((packed)); + +#define ACX_MAX_RATE_CLASSES 8 +#define ACX_RATE_MASK_UNSPECIFIED 0 +#define ACX_RATE_RETRY_LIMIT 10 + +struct acx_rate_class { + u32 enabled_rates; + u8 short_retry_limit; + u8 long_retry_limit; + u8 aflags; + u8 reserved; +}; + +struct acx_rate_policy { + struct acx_header header; + + u32 rate_class_cnt; + struct acx_rate_class rate_class[ACX_MAX_RATE_CLASSES]; +} __attribute__ ((packed)); + +struct wl1251_acx_memory { + __le16 num_stations; /* number of STAs to be supported. */ + u16 reserved_1; + + /* + * Nmber of memory buffers for the RX mem pool. + * The actual number may be less if there are + * not enough blocks left for the minimum num + * of TX ones. + */ + u8 rx_mem_block_num; + u8 reserved_2; + u8 num_tx_queues; /* From 1 to 16 */ + u8 host_if_options; /* HOST_IF* */ + u8 tx_min_mem_block_num; + u8 num_ssid_profiles; + __le16 debug_buffer_size; +} __attribute__ ((packed)); + + +#define ACX_RX_DESC_MIN 1 +#define ACX_RX_DESC_MAX 127 +#define ACX_RX_DESC_DEF 32 +struct wl1251_acx_rx_queue_config { + u8 num_descs; + u8 pad; + u8 type; + u8 priority; + __le32 dma_address; +} __attribute__ ((packed)); + +#define ACX_TX_DESC_MIN 1 +#define ACX_TX_DESC_MAX 127 +#define ACX_TX_DESC_DEF 16 +struct wl1251_acx_tx_queue_config { + u8 num_descs; + u8 pad[2]; + u8 attributes; +} __attribute__ ((packed)); + +#define MAX_TX_QUEUE_CONFIGS 5 +#define MAX_TX_QUEUES 4 +struct wl1251_acx_config_memory { + struct acx_header header; + + struct wl1251_acx_memory mem_config; + struct wl1251_acx_rx_queue_config rx_queue_config; + struct wl1251_acx_tx_queue_config tx_queue_config[MAX_TX_QUEUE_CONFIGS]; +} __attribute__ ((packed)); + +struct wl1251_acx_mem_map { + struct acx_header header; + + void *code_start; + void *code_end; + + void *wep_defkey_start; + void *wep_defkey_end; + + void *sta_table_start; + void *sta_table_end; + + void *packet_template_start; + void *packet_template_end; + + void *queue_memory_start; + void *queue_memory_end; + + void *packet_memory_pool_start; + void *packet_memory_pool_end; + + void *debug_buffer1_start; + void *debug_buffer1_end; + + void *debug_buffer2_start; + void *debug_buffer2_end; + + /* Number of blocks FW allocated for TX packets */ + u32 num_tx_mem_blocks; + + /* Number of blocks FW allocated for RX packets */ + u32 num_rx_mem_blocks; +} __attribute__ ((packed)); + + +struct wl1251_acx_wr_tbtt_and_dtim { + + struct acx_header header; + + /* Time in TUs between two consecutive beacons */ + u16 tbtt; + + /* + * DTIM period + * For BSS: Number of TBTTs in a DTIM period (range: 1-10) + * For IBSS: value shall be set to 1 + */ + u8 dtim; + u8 padding; +} __attribute__ ((packed)); + +struct wl1251_acx_ac_cfg { + struct acx_header header; + + /* + * Access Category - The TX queue's access category + * (refer to AccessCategory_enum) + */ + u8 ac; + + /* + * The contention window minimum size (in slots) for + * the access class. + */ + u8 cw_min; + + /* + * The contention window maximum size (in slots) for + * the access class. + */ + u16 cw_max; + + /* The AIF value (in slots) for the access class. */ + u8 aifsn; + + u8 reserved; + + /* The TX Op Limit (in microseconds) for the access class. */ + u16 txop_limit; +} __attribute__ ((packed)); + + +enum wl1251_acx_channel_type { + CHANNEL_TYPE_DCF = 0, + CHANNEL_TYPE_EDCF = 1, + CHANNEL_TYPE_HCCA = 2, +}; + +enum wl1251_acx_ps_scheme { + /* regular ps: simple sending of packets */ + WL1251_ACX_PS_SCHEME_LEGACY = 0, + + /* sending a packet triggers a unscheduled apsd downstream */ + WL1251_ACX_PS_SCHEME_UPSD_TRIGGER = 1, + + /* a pspoll packet will be sent before every data packet */ + WL1251_ACX_PS_SCHEME_LEGACY_PSPOLL = 2, + + /* scheduled apsd mode */ + WL1251_ACX_PS_SCHEME_SAPSD = 3, +}; + +enum wl1251_acx_ack_policy { + WL1251_ACX_ACK_POLICY_LEGACY = 0, + WL1251_ACX_ACK_POLICY_NO_ACK = 1, + WL1251_ACX_ACK_POLICY_BLOCK = 2, +}; + +struct wl1251_acx_tid_cfg { + struct acx_header header; + + /* tx queue id number (0-7) */ + u8 queue; + + /* channel access type for the queue, enum wl1251_acx_channel_type */ + u8 type; + + /* EDCA: ac index (0-3), HCCA: traffic stream id (8-15) */ + u8 tsid; + + /* ps scheme of the specified queue, enum wl1251_acx_ps_scheme */ + u8 ps_scheme; + + /* the tx queue ack policy, enum wl1251_acx_ack_policy */ + u8 ack_policy; + + u8 padding[3]; + + /* not supported */ + u32 apsdconf[2]; +} __attribute__ ((packed)); + +/************************************************************************* + + Host Interrupt Register (WiLink -> Host) + +**************************************************************************/ + +/* RX packet is ready in Xfer buffer #0 */ +#define WL1251_ACX_INTR_RX0_DATA BIT(0) + +/* TX result(s) are in the TX complete buffer */ +#define WL1251_ACX_INTR_TX_RESULT BIT(1) + +/* OBSOLETE */ +#define WL1251_ACX_INTR_TX_XFR BIT(2) + +/* RX packet is ready in Xfer buffer #1 */ +#define WL1251_ACX_INTR_RX1_DATA BIT(3) + +/* Event was entered to Event MBOX #A */ +#define WL1251_ACX_INTR_EVENT_A BIT(4) + +/* Event was entered to Event MBOX #B */ +#define WL1251_ACX_INTR_EVENT_B BIT(5) + +/* OBSOLETE */ +#define WL1251_ACX_INTR_WAKE_ON_HOST BIT(6) + +/* Trace meassge on MBOX #A */ +#define WL1251_ACX_INTR_TRACE_A BIT(7) + +/* Trace meassge on MBOX #B */ +#define WL1251_ACX_INTR_TRACE_B BIT(8) + +/* Command processing completion */ +#define WL1251_ACX_INTR_CMD_COMPLETE BIT(9) + +/* Init sequence is done */ +#define WL1251_ACX_INTR_INIT_COMPLETE BIT(14) + +#define WL1251_ACX_INTR_ALL 0xFFFFFFFF + +enum { + ACX_WAKE_UP_CONDITIONS = 0x0002, + ACX_MEM_CFG = 0x0003, + ACX_SLOT = 0x0004, + ACX_QUEUE_HEAD = 0x0005, /* for MASTER mode only */ + ACX_AC_CFG = 0x0007, + ACX_MEM_MAP = 0x0008, + ACX_AID = 0x000A, + ACX_RADIO_PARAM = 0x000B, /* Not used */ + ACX_CFG = 0x000C, /* Not used */ + ACX_FW_REV = 0x000D, + ACX_MEDIUM_USAGE = 0x000F, + ACX_RX_CFG = 0x0010, + ACX_TX_QUEUE_CFG = 0x0011, /* FIXME: only used by wl1251 */ + ACX_BSS_IN_PS = 0x0012, /* for AP only */ + ACX_STATISTICS = 0x0013, /* Debug API */ + ACX_FEATURE_CFG = 0x0015, + ACX_MISC_CFG = 0x0017, /* Not used */ + ACX_TID_CFG = 0x001A, + ACX_BEACON_FILTER_OPT = 0x001F, + ACX_LOW_RSSI = 0x0020, + ACX_NOISE_HIST = 0x0021, + ACX_HDK_VERSION = 0x0022, /* ??? */ + ACX_PD_THRESHOLD = 0x0023, + ACX_DATA_PATH_PARAMS = 0x0024, /* WO */ + ACX_DATA_PATH_RESP_PARAMS = 0x0024, /* RO */ + ACX_CCA_THRESHOLD = 0x0025, + ACX_EVENT_MBOX_MASK = 0x0026, +#ifdef FW_RUNNING_AS_AP + ACX_DTIM_PERIOD = 0x0027, /* for AP only */ +#else + ACX_WR_TBTT_AND_DTIM = 0x0027, /* STA only */ +#endif + ACX_ACI_OPTION_CFG = 0x0029, /* OBSOLETE (for 1251)*/ + ACX_GPIO_CFG = 0x002A, /* Not used */ + ACX_GPIO_SET = 0x002B, /* Not used */ + ACX_PM_CFG = 0x002C, /* To Be Documented */ + ACX_CONN_MONIT_PARAMS = 0x002D, + ACX_AVERAGE_RSSI = 0x002E, /* Not used */ + ACX_CONS_TX_FAILURE = 0x002F, + ACX_BCN_DTIM_OPTIONS = 0x0031, + ACX_SG_ENABLE = 0x0032, + ACX_SG_CFG = 0x0033, + ACX_ANTENNA_DIVERSITY_CFG = 0x0035, /* To Be Documented */ + ACX_LOW_SNR = 0x0037, /* To Be Documented */ + ACX_BEACON_FILTER_TABLE = 0x0038, + ACX_ARP_IP_FILTER = 0x0039, + ACX_ROAMING_STATISTICS_TBL = 0x003B, + ACX_RATE_POLICY = 0x003D, + ACX_CTS_PROTECTION = 0x003E, + ACX_SLEEP_AUTH = 0x003F, + ACX_PREAMBLE_TYPE = 0x0040, + ACX_ERROR_CNT = 0x0041, + ACX_FW_GEN_FRAME_RATES = 0x0042, + ACX_IBSS_FILTER = 0x0044, + ACX_SERVICE_PERIOD_TIMEOUT = 0x0045, + ACX_TSF_INFO = 0x0046, + ACX_CONFIG_PS_WMM = 0x0049, + ACX_ENABLE_RX_DATA_FILTER = 0x004A, + ACX_SET_RX_DATA_FILTER = 0x004B, + ACX_GET_DATA_FILTER_STATISTICS = 0x004C, + ACX_POWER_LEVEL_TABLE = 0x004D, + ACX_BET_ENABLE = 0x0050, + DOT11_STATION_ID = 0x1001, + DOT11_RX_MSDU_LIFE_TIME = 0x1004, + DOT11_CUR_TX_PWR = 0x100D, + DOT11_DEFAULT_KEY = 0x1010, + DOT11_RX_DOT11_MODE = 0x1012, + DOT11_RTS_THRESHOLD = 0x1013, + DOT11_GROUP_ADDRESS_TBL = 0x1014, + + MAX_DOT11_IE = DOT11_GROUP_ADDRESS_TBL, + + MAX_IE = 0xFFFF +}; + + +int wl1251_acx_frame_rates(struct wl1251 *wl, u8 ctrl_rate, u8 ctrl_mod, + u8 mgt_rate, u8 mgt_mod); +int wl1251_acx_station_id(struct wl1251 *wl); +int wl1251_acx_default_key(struct wl1251 *wl, u8 key_id); +int wl1251_acx_wake_up_conditions(struct wl1251 *wl, u8 wake_up_event, + u8 listen_interval); +int wl1251_acx_sleep_auth(struct wl1251 *wl, u8 sleep_auth); +int wl1251_acx_fw_version(struct wl1251 *wl, char *buf, size_t len); +int wl1251_acx_tx_power(struct wl1251 *wl, int power); +int wl1251_acx_feature_cfg(struct wl1251 *wl); +int wl1251_acx_mem_map(struct wl1251 *wl, + struct acx_header *mem_map, size_t len); +int wl1251_acx_data_path_params(struct wl1251 *wl, + struct acx_data_path_params_resp *data_path); +int wl1251_acx_rx_msdu_life_time(struct wl1251 *wl, u32 life_time); +int wl1251_acx_rx_config(struct wl1251 *wl, u32 config, u32 filter); +int wl1251_acx_pd_threshold(struct wl1251 *wl); +int wl1251_acx_slot(struct wl1251 *wl, enum acx_slot_type slot_time); +int wl1251_acx_group_address_tbl(struct wl1251 *wl); +int wl1251_acx_service_period_timeout(struct wl1251 *wl); +int wl1251_acx_rts_threshold(struct wl1251 *wl, u16 rts_threshold); +int wl1251_acx_beacon_filter_opt(struct wl1251 *wl, bool enable_filter); +int wl1251_acx_beacon_filter_table(struct wl1251 *wl); +int wl1251_acx_conn_monit_params(struct wl1251 *wl); +int wl1251_acx_sg_enable(struct wl1251 *wl); +int wl1251_acx_sg_cfg(struct wl1251 *wl); +int wl1251_acx_cca_threshold(struct wl1251 *wl); +int wl1251_acx_bcn_dtim_options(struct wl1251 *wl); +int wl1251_acx_aid(struct wl1251 *wl, u16 aid); +int wl1251_acx_event_mbox_mask(struct wl1251 *wl, u32 event_mask); +int wl1251_acx_set_preamble(struct wl1251 *wl, enum acx_preamble_type preamble); +int wl1251_acx_cts_protect(struct wl1251 *wl, + enum acx_ctsprotect_type ctsprotect); +int wl1251_acx_statistics(struct wl1251 *wl, struct acx_statistics *stats); +int wl1251_acx_tsf_info(struct wl1251 *wl, u64 *mactime); +int wl1251_acx_rate_policies(struct wl1251 *wl); +int wl1251_acx_mem_cfg(struct wl1251 *wl); +int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim); +int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max, + u8 aifs, u16 txop); +int wl1251_acx_tid_cfg(struct wl1251 *wl, u8 queue, + enum wl1251_acx_channel_type type, + u8 tsid, enum wl1251_acx_ps_scheme ps_scheme, + enum wl1251_acx_ack_policy ack_policy); + +#endif /* __WL1251_ACX_H__ */ diff --git a/drivers/net/wireless/wl12xx/wl1251_boot.c b/drivers/net/wireless/wl12xx/wl1251_boot.c new file mode 100644 index 0000000..28a8086 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1251_boot.c @@ -0,0 +1,557 @@ +/* + * This file is part of wl1251 + * + * Copyright (C) 2008 Nokia Corporation + * + * Contact: Kalle Valo + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include + +#include "wl1251_reg.h" +#include "wl1251_boot.h" +#include "wl1251_io.h" +#include "wl1251_spi.h" +#include "wl1251_event.h" +#include "wl1251_acx.h" + +void wl1251_boot_target_enable_interrupts(struct wl1251 *wl) +{ + wl1251_reg_write32(wl, ACX_REG_INTERRUPT_MASK, ~(wl->intr_mask)); + wl1251_reg_write32(wl, HI_CFG, HI_CFG_DEF_VAL); +} + +int wl1251_boot_soft_reset(struct wl1251 *wl) +{ + unsigned long timeout; + u32 boot_data; + + /* perform soft reset */ + wl1251_reg_write32(wl, ACX_REG_SLV_SOFT_RESET, ACX_SLV_SOFT_RESET_BIT); + + /* SOFT_RESET is self clearing */ + timeout = jiffies + usecs_to_jiffies(SOFT_RESET_MAX_TIME); + while (1) { + boot_data = wl1251_reg_read32(wl, ACX_REG_SLV_SOFT_RESET); + wl1251_debug(DEBUG_BOOT, "soft reset bootdata 0x%x", boot_data); + if ((boot_data & ACX_SLV_SOFT_RESET_BIT) == 0) + break; + + if (time_after(jiffies, timeout)) { + /* 1.2 check pWhalBus->uSelfClearTime if the + * timeout was reached */ + wl1251_error("soft reset timeout"); + return -1; + } + + udelay(SOFT_RESET_STALL_TIME); + } + + /* disable Rx/Tx */ + wl1251_reg_write32(wl, ENABLE, 0x0); + + /* disable auto calibration on start*/ + wl1251_reg_write32(wl, SPARE_A2, 0xffff); + + return 0; +} + +int wl1251_boot_init_seq(struct wl1251 *wl) +{ + u32 scr_pad6, init_data, tmp, elp_cmd, ref_freq; + + /* + * col #1: INTEGER_DIVIDER + * col #2: FRACTIONAL_DIVIDER + * col #3: ATTN_BB + * col #4: ALPHA_BB + * col #5: STOP_TIME_BB + * col #6: BB_PLL_LOOP_FILTER + */ + static const u32 LUT[REF_FREQ_NUM][LUT_PARAM_NUM] = { + + { 83, 87381, 0xB, 5, 0xF00, 3}, /* REF_FREQ_19_2*/ + { 61, 141154, 0xB, 5, 0x1450, 2}, /* REF_FREQ_26_0*/ + { 41, 174763, 0xC, 6, 0x2D00, 1}, /* REF_FREQ_38_4*/ + { 40, 0, 0xC, 6, 0x2EE0, 1}, /* REF_FREQ_40_0*/ + { 47, 162280, 0xC, 6, 0x2760, 1} /* REF_FREQ_33_6 */ + }; + + /* read NVS params */ + scr_pad6 = wl1251_reg_read32(wl, SCR_PAD6); + wl1251_debug(DEBUG_BOOT, "scr_pad6 0x%x", scr_pad6); + + /* read ELP_CMD */ + elp_cmd = wl1251_reg_read32(wl, ELP_CMD); + wl1251_debug(DEBUG_BOOT, "elp_cmd 0x%x", elp_cmd); + + /* set the BB calibration time to be 300 usec (PLL_CAL_TIME) */ + ref_freq = scr_pad6 & 0x000000FF; + wl1251_debug(DEBUG_BOOT, "ref_freq 0x%x", ref_freq); + + wl1251_reg_write32(wl, PLL_CAL_TIME, 0x9); + + /* + * PG 1.2: set the clock buffer time to be 210 usec (CLK_BUF_TIME) + */ + wl1251_reg_write32(wl, CLK_BUF_TIME, 0x6); + + /* + * set the clock detect feature to work in the restart wu procedure + * (ELP_CFG_MODE[14]) and Select the clock source type + * (ELP_CFG_MODE[13:12]) + */ + tmp = ((scr_pad6 & 0x0000FF00) << 4) | 0x00004000; + wl1251_reg_write32(wl, ELP_CFG_MODE, tmp); + + /* PG 1.2: enable the BB PLL fix. Enable the PLL_LIMP_CLK_EN_CMD */ + elp_cmd |= 0x00000040; + wl1251_reg_write32(wl, ELP_CMD, elp_cmd); + + /* PG 1.2: Set the BB PLL stable time to be 1000usec + * (PLL_STABLE_TIME) */ + wl1251_reg_write32(wl, CFG_PLL_SYNC_CNT, 0x20); + + /* PG 1.2: read clock request time */ + init_data = wl1251_reg_read32(wl, CLK_REQ_TIME); + + /* + * PG 1.2: set the clock request time to be ref_clk_settling_time - + * 1ms = 4ms + */ + if (init_data > 0x21) + tmp = init_data - 0x21; + else + tmp = 0; + wl1251_reg_write32(wl, CLK_REQ_TIME, tmp); + + /* set BB PLL configurations in RF AFE */ + wl1251_reg_write32(wl, 0x003058cc, 0x4B5); + + /* set RF_AFE_REG_5 */ + wl1251_reg_write32(wl, 0x003058d4, 0x50); + + /* set RF_AFE_CTRL_REG_2 */ + wl1251_reg_write32(wl, 0x00305948, 0x11c001); + + /* + * change RF PLL and BB PLL divider for VCO clock and adjust VCO + * bais current(RF_AFE_REG_13) + */ + wl1251_reg_write32(wl, 0x003058f4, 0x1e); + + /* set BB PLL configurations */ + tmp = LUT[ref_freq][LUT_PARAM_INTEGER_DIVIDER] | 0x00017000; + wl1251_reg_write32(wl, 0x00305840, tmp); + + /* set fractional divider according to Appendix C-BB PLL + * Calculations + */ + tmp = LUT[ref_freq][LUT_PARAM_FRACTIONAL_DIVIDER]; + wl1251_reg_write32(wl, 0x00305844, tmp); + + /* set the initial data for the sigma delta */ + wl1251_reg_write32(wl, 0x00305848, 0x3039); + + /* + * set the accumulator attenuation value, calibration loop1 + * (alpha), calibration loop2 (beta), calibration loop3 (gamma) and + * the VCO gain + */ + tmp = (LUT[ref_freq][LUT_PARAM_ATTN_BB] << 16) | + (LUT[ref_freq][LUT_PARAM_ALPHA_BB] << 12) | 0x1; + wl1251_reg_write32(wl, 0x00305854, tmp); + + /* + * set the calibration stop time after holdoff time expires and set + * settling time HOLD_OFF_TIME_BB + */ + tmp = LUT[ref_freq][LUT_PARAM_STOP_TIME_BB] | 0x000A0000; + wl1251_reg_write32(wl, 0x00305858, tmp); + + /* + * set BB PLL Loop filter capacitor3- BB_C3[2:0] and set BB PLL + * constant leakage current to linearize PFD to 0uA - + * BB_ILOOPF[7:3] + */ + tmp = LUT[ref_freq][LUT_PARAM_BB_PLL_LOOP_FILTER] | 0x00000030; + wl1251_reg_write32(wl, 0x003058f8, tmp); + + /* + * set regulator output voltage for n divider to + * 1.35-BB_REFDIV[1:0], set charge pump current- BB_CPGAIN[4:2], + * set BB PLL Loop filter capacitor2- BB_C2[7:5], set gain of BB + * PLL auto-call to normal mode- BB_CALGAIN_3DB[8] + */ + wl1251_reg_write32(wl, 0x003058f0, 0x29); + + /* enable restart wakeup sequence (ELP_CMD[0]) */ + wl1251_reg_write32(wl, ELP_CMD, elp_cmd | 0x1); + + /* restart sequence completed */ + udelay(2000); + + return 0; +} + +static void wl1251_boot_set_ecpu_ctrl(struct wl1251 *wl, u32 flag) +{ + u32 cpu_ctrl; + + /* 10.5.0 run the firmware (I) */ + cpu_ctrl = wl1251_reg_read32(wl, ACX_REG_ECPU_CONTROL); + + /* 10.5.1 run the firmware (II) */ + cpu_ctrl &= ~flag; + wl1251_reg_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl); +} + +int wl1251_boot_run_firmware(struct wl1251 *wl) +{ + int loop, ret; + u32 chip_id, interrupt; + + wl1251_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT); + + chip_id = wl1251_reg_read32(wl, CHIP_ID_B); + + wl1251_debug(DEBUG_BOOT, "chip id after firmware boot: 0x%x", chip_id); + + if (chip_id != wl->chip_id) { + wl1251_error("chip id doesn't match after firmware boot"); + return -EIO; + } + + /* wait for init to complete */ + loop = 0; + while (loop++ < INIT_LOOP) { + udelay(INIT_LOOP_DELAY); + interrupt = wl1251_reg_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); + + if (interrupt == 0xffffffff) { + wl1251_error("error reading hardware complete " + "init indication"); + return -EIO; + } + /* check that ACX_INTR_INIT_COMPLETE is enabled */ + else if (interrupt & WL1251_ACX_INTR_INIT_COMPLETE) { + wl1251_reg_write32(wl, ACX_REG_INTERRUPT_ACK, + WL1251_ACX_INTR_INIT_COMPLETE); + break; + } + } + + if (loop > INIT_LOOP) { + wl1251_error("timeout waiting for the hardware to " + "complete initialization"); + return -EIO; + } + + /* get hardware config command mail box */ + wl->cmd_box_addr = wl1251_reg_read32(wl, REG_COMMAND_MAILBOX_PTR); + + /* get hardware config event mail box */ + wl->event_box_addr = wl1251_reg_read32(wl, REG_EVENT_MAILBOX_PTR); + + /* set the working partition to its "running" mode offset */ + wl1251_set_partition(wl, WL1251_PART_WORK_MEM_START, + WL1251_PART_WORK_MEM_SIZE, + WL1251_PART_WORK_REG_START, + WL1251_PART_WORK_REG_SIZE); + + wl1251_debug(DEBUG_MAILBOX, "cmd_box_addr 0x%x event_box_addr 0x%x", + wl->cmd_box_addr, wl->event_box_addr); + + wl1251_acx_fw_version(wl, wl->fw_ver, sizeof(wl->fw_ver)); + + /* + * in case of full asynchronous mode the firmware event must be + * ready to receive event from the command mailbox + */ + + /* enable gpio interrupts */ + wl1251_enable_interrupts(wl); + + /* Enable target's interrupts */ + wl->intr_mask = WL1251_ACX_INTR_RX0_DATA | + WL1251_ACX_INTR_RX1_DATA | + WL1251_ACX_INTR_TX_RESULT | + WL1251_ACX_INTR_EVENT_A | + WL1251_ACX_INTR_EVENT_B | + WL1251_ACX_INTR_INIT_COMPLETE; + wl1251_boot_target_enable_interrupts(wl); + + wl->event_mask = SCAN_COMPLETE_EVENT_ID | BSS_LOSE_EVENT_ID | + SYNCHRONIZATION_TIMEOUT_EVENT_ID | + ROAMING_TRIGGER_LOW_RSSI_EVENT_ID | + ROAMING_TRIGGER_REGAINED_RSSI_EVENT_ID | + REGAINED_BSS_EVENT_ID | BT_PTA_SENSE_EVENT_ID | + BT_PTA_PREDICTION_EVENT_ID; + + ret = wl1251_event_unmask(wl); + if (ret < 0) { + wl1251_error("EVENT mask setting failed"); + return ret; + } + + wl1251_event_mbox_config(wl); + + /* firmware startup completed */ + return 0; +} + +static int wl1251_boot_upload_firmware(struct wl1251 *wl) +{ + int addr, chunk_num, partition_limit; + size_t fw_data_len, len; + u8 *p, *buf; + + /* whal_FwCtrl_LoadFwImageSm() */ + + wl1251_debug(DEBUG_BOOT, "chip id before fw upload: 0x%x", + wl1251_reg_read32(wl, CHIP_ID_B)); + + /* 10.0 check firmware length and set partition */ + fw_data_len = (wl->fw[4] << 24) | (wl->fw[5] << 16) | + (wl->fw[6] << 8) | (wl->fw[7]); + + wl1251_debug(DEBUG_BOOT, "fw_data_len %zu chunk_size %d", fw_data_len, + CHUNK_SIZE); + + if ((fw_data_len % 4) != 0) { + wl1251_error("firmware length not multiple of four"); + return -EIO; + } + + buf = kmalloc(CHUNK_SIZE, GFP_KERNEL); + if (!buf) { + wl1251_error("allocation for firmware upload chunk failed"); + return -ENOMEM; + } + + wl1251_set_partition(wl, WL1251_PART_DOWN_MEM_START, + WL1251_PART_DOWN_MEM_SIZE, + WL1251_PART_DOWN_REG_START, + WL1251_PART_DOWN_REG_SIZE); + + /* 10.1 set partition limit and chunk num */ + chunk_num = 0; + partition_limit = WL1251_PART_DOWN_MEM_SIZE; + + while (chunk_num < fw_data_len / CHUNK_SIZE) { + /* 10.2 update partition, if needed */ + addr = WL1251_PART_DOWN_MEM_START + + (chunk_num + 2) * CHUNK_SIZE; + if (addr > partition_limit) { + addr = WL1251_PART_DOWN_MEM_START + + chunk_num * CHUNK_SIZE; + partition_limit = chunk_num * CHUNK_SIZE + + WL1251_PART_DOWN_MEM_SIZE; + wl1251_set_partition(wl, + addr, + WL1251_PART_DOWN_MEM_SIZE, + WL1251_PART_DOWN_REG_START, + WL1251_PART_DOWN_REG_SIZE); + } + + /* 10.3 upload the chunk */ + addr = WL1251_PART_DOWN_MEM_START + chunk_num * CHUNK_SIZE; + p = wl->fw + FW_HDR_SIZE + chunk_num * CHUNK_SIZE; + wl1251_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x", + p, addr); + + /* need to copy the chunk for dma */ + len = CHUNK_SIZE; + memcpy(buf, p, len); + wl1251_mem_write(wl, addr, buf, len); + + chunk_num++; + } + + /* 10.4 upload the last chunk */ + addr = WL1251_PART_DOWN_MEM_START + chunk_num * CHUNK_SIZE; + p = wl->fw + FW_HDR_SIZE + chunk_num * CHUNK_SIZE; + + /* need to copy the chunk for dma */ + len = fw_data_len % CHUNK_SIZE; + memcpy(buf, p, len); + + wl1251_debug(DEBUG_BOOT, "uploading fw last chunk (%zu B) 0x%p to 0x%x", + len, p, addr); + wl1251_mem_write(wl, addr, buf, len); + + kfree(buf); + + return 0; +} + +static int wl1251_boot_upload_nvs(struct wl1251 *wl) +{ + size_t nvs_len, nvs_bytes_written, burst_len; + int nvs_start, i; + u32 dest_addr, val; + u8 *nvs_ptr, *nvs; + + nvs = wl->nvs; + if (nvs == NULL) + return -ENODEV; + + nvs_ptr = nvs; + + nvs_len = wl->nvs_len; + nvs_start = wl->fw_len; + + /* + * Layout before the actual NVS tables: + * 1 byte : burst length. + * 2 bytes: destination address. + * n bytes: data to burst copy. + * + * This is ended by a 0 length, then the NVS tables. + */ + + while (nvs_ptr[0]) { + burst_len = nvs_ptr[0]; + dest_addr = (nvs_ptr[1] & 0xfe) | ((u32)(nvs_ptr[2] << 8)); + + /* We move our pointer to the data */ + nvs_ptr += 3; + + for (i = 0; i < burst_len; i++) { + val = (nvs_ptr[0] | (nvs_ptr[1] << 8) + | (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24)); + + wl1251_debug(DEBUG_BOOT, + "nvs burst write 0x%x: 0x%x", + dest_addr, val); + wl1251_mem_write32(wl, dest_addr, val); + + nvs_ptr += 4; + dest_addr += 4; + } + } + + /* + * We've reached the first zero length, the first NVS table + * is 7 bytes further. + */ + nvs_ptr += 7; + nvs_len -= nvs_ptr - nvs; + nvs_len = ALIGN(nvs_len, 4); + + /* Now we must set the partition correctly */ + wl1251_set_partition(wl, nvs_start, + WL1251_PART_DOWN_MEM_SIZE, + WL1251_PART_DOWN_REG_START, + WL1251_PART_DOWN_REG_SIZE); + + /* And finally we upload the NVS tables */ + nvs_bytes_written = 0; + while (nvs_bytes_written < nvs_len) { + val = (nvs_ptr[0] | (nvs_ptr[1] << 8) + | (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24)); + + val = cpu_to_le32(val); + + wl1251_debug(DEBUG_BOOT, + "nvs write table 0x%x: 0x%x", + nvs_start, val); + wl1251_mem_write32(wl, nvs_start, val); + + nvs_ptr += 4; + nvs_bytes_written += 4; + nvs_start += 4; + } + + return 0; +} + +int wl1251_boot(struct wl1251 *wl) +{ + int ret = 0, minor_minor_e2_ver; + u32 tmp, boot_data; + + /* halt embedded ARM CPU while loading firmware */ + wl1251_reg_write32(wl, ACX_REG_ECPU_CONTROL, ECPU_CONTROL_HALT); + + ret = wl1251_boot_soft_reset(wl); + if (ret < 0) + goto out; + + /* 2. start processing NVS file */ + if (wl->use_eeprom) { + wl1251_reg_write32(wl, ACX_REG_EE_START, START_EEPROM_MGR); + msleep(4000); + wl1251_reg_write32(wl, ACX_EEPROMLESS_IND_REG, USE_EEPROM); + } else { + ret = wl1251_boot_upload_nvs(wl); + if (ret < 0) + goto out; + + /* write firmware's last address (ie. it's length) to + * ACX_EEPROMLESS_IND_REG */ + wl1251_reg_write32(wl, ACX_EEPROMLESS_IND_REG, wl->fw_len); + } + + /* 6. read the EEPROM parameters */ + tmp = wl1251_reg_read32(wl, SCR_PAD2); + + /* 7. read bootdata */ + wl->boot_attr.radio_type = (tmp & 0x0000FF00) >> 8; + wl->boot_attr.major = (tmp & 0x00FF0000) >> 16; + tmp = wl1251_reg_read32(wl, SCR_PAD3); + + /* 8. check bootdata and call restart sequence */ + wl->boot_attr.minor = (tmp & 0x00FF0000) >> 16; + minor_minor_e2_ver = (tmp & 0xFF000000) >> 24; + + wl1251_debug(DEBUG_BOOT, "radioType 0x%x majorE2Ver 0x%x " + "minorE2Ver 0x%x minor_minor_e2_ver 0x%x", + wl->boot_attr.radio_type, wl->boot_attr.major, + wl->boot_attr.minor, minor_minor_e2_ver); + + ret = wl1251_boot_init_seq(wl); + if (ret < 0) + goto out; + + /* 9. NVS processing done */ + boot_data = wl1251_reg_read32(wl, ACX_REG_ECPU_CONTROL); + + wl1251_debug(DEBUG_BOOT, "halt boot_data 0x%x", boot_data); + + /* 10. check that ECPU_CONTROL_HALT bits are set in + * pWhalBus->uBootData and start uploading firmware + */ + if ((boot_data & ECPU_CONTROL_HALT) == 0) { + wl1251_error("boot failed, ECPU_CONTROL_HALT not set"); + ret = -EIO; + goto out; + } + + ret = wl1251_boot_upload_firmware(wl); + if (ret < 0) + goto out; + + /* 10.5 start firmware */ + ret = wl1251_boot_run_firmware(wl); + if (ret < 0) + goto out; + +out: + return ret; +} diff --git a/drivers/net/wireless/wl12xx/wl1251_boot.h b/drivers/net/wireless/wl12xx/wl1251_boot.h new file mode 100644 index 0000000..9006369 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1251_boot.h @@ -0,0 +1,41 @@ +/* + * This file is part of wl1251 + * + * Copyright (C) 2008 Nokia Corporation + * + * Contact: Kalle Valo + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __BOOT_H__ +#define __BOOT_H__ + +#include "wl1251.h" + +int wl1251_boot_soft_reset(struct wl1251 *wl); +int wl1251_boot_init_seq(struct wl1251 *wl); +int wl1251_boot_run_firmware(struct wl1251 *wl); +void wl1251_boot_target_enable_interrupts(struct wl1251 *wl); +int wl1251_boot(struct wl1251 *wl); + +/* number of times we try to read the INIT interrupt */ +#define INIT_LOOP 20000 + +/* delay between retries */ +#define INIT_LOOP_DELAY 50 + +#endif diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.c b/drivers/net/wireless/wl12xx/wl1251_cmd.c new file mode 100644 index 0000000..0320b47 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1251_cmd.c @@ -0,0 +1,495 @@ +#include "wl1251_cmd.h" + +#include +#include + +#include "wl1251.h" +#include "wl1251_reg.h" +#include "wl1251_io.h" +#include "wl1251_ps.h" +#include "wl1251_acx.h" + +/** + * send command to firmware + * + * @wl: wl struct + * @id: command id + * @buf: buffer containing the command, must work with dma + * @len: length of the buffer + */ +int wl1251_cmd_send(struct wl1251 *wl, u16 id, void *buf, size_t len) +{ + struct wl1251_cmd_header *cmd; + unsigned long timeout; + u32 intr; + int ret = 0; + + cmd = buf; + cmd->id = id; + cmd->status = 0; + + WARN_ON(len % 4 != 0); + + wl1251_mem_write(wl, wl->cmd_box_addr, buf, len); + + wl1251_reg_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_CMD); + + timeout = jiffies + msecs_to_jiffies(WL1251_COMMAND_TIMEOUT); + + intr = wl1251_reg_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); + while (!(intr & WL1251_ACX_INTR_CMD_COMPLETE)) { + if (time_after(jiffies, timeout)) { + wl1251_error("command complete timeout"); + ret = -ETIMEDOUT; + goto out; + } + + msleep(1); + + intr = wl1251_reg_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); + } + + wl1251_reg_write32(wl, ACX_REG_INTERRUPT_ACK, + WL1251_ACX_INTR_CMD_COMPLETE); + +out: + return ret; +} + +/** + * send test command to firmware + * + * @wl: wl struct + * @buf: buffer containing the command, with all headers, must work with dma + * @len: length of the buffer + * @answer: is answer needed + */ +int wl1251_cmd_test(struct wl1251 *wl, void *buf, size_t buf_len, u8 answer) +{ + int ret; + + wl1251_debug(DEBUG_CMD, "cmd test"); + + ret = wl1251_cmd_send(wl, CMD_TEST, buf, buf_len); + + if (ret < 0) { + wl1251_warning("TEST command failed"); + return ret; + } + + if (answer) { + struct wl1251_command *cmd_answer; + + /* + * The test command got in, we can read the answer. + * The answer would be a wl1251_command, where the + * parameter array contains the actual answer. + */ + wl1251_mem_read(wl, wl->cmd_box_addr, buf, buf_len); + + cmd_answer = buf; + + if (cmd_answer->header.status != CMD_STATUS_SUCCESS) + wl1251_error("TEST command answer error: %d", + cmd_answer->header.status); + } + + return 0; +} + +/** + * read acx from firmware + * + * @wl: wl struct + * @id: acx id + * @buf: buffer for the response, including all headers, must work with dma + * @len: lenght of buf + */ +int wl1251_cmd_interrogate(struct wl1251 *wl, u16 id, void *buf, size_t len) +{ + struct acx_header *acx = buf; + int ret; + + wl1251_debug(DEBUG_CMD, "cmd interrogate"); + + acx->id = id; + + /* payload length, does not include any headers */ + acx->len = len - sizeof(*acx); + + ret = wl1251_cmd_send(wl, CMD_INTERROGATE, acx, sizeof(*acx)); + if (ret < 0) { + wl1251_error("INTERROGATE command failed"); + goto out; + } + + /* the interrogate command got in, we can read the answer */ + wl1251_mem_read(wl, wl->cmd_box_addr, buf, len); + + acx = buf; + if (acx->cmd.status != CMD_STATUS_SUCCESS) + wl1251_error("INTERROGATE command error: %d", + acx->cmd.status); + +out: + return ret; +} + +/** + * write acx value to firmware + * + * @wl: wl struct + * @id: acx id + * @buf: buffer containing acx, including all headers, must work with dma + * @len: length of buf + */ +int wl1251_cmd_configure(struct wl1251 *wl, u16 id, void *buf, size_t len) +{ + struct acx_header *acx = buf; + int ret; + + wl1251_debug(DEBUG_CMD, "cmd configure"); + + acx->id = id; + + /* payload length, does not include any headers */ + acx->len = len - sizeof(*acx); + + ret = wl1251_cmd_send(wl, CMD_CONFIGURE, acx, len); + if (ret < 0) { + wl1251_warning("CONFIGURE command NOK"); + return ret; + } + + return 0; +} + +int wl1251_cmd_vbm(struct wl1251 *wl, u8 identity, + void *bitmap, u16 bitmap_len, u8 bitmap_control) +{ + struct wl1251_cmd_vbm_update *vbm; + int ret; + + wl1251_debug(DEBUG_CMD, "cmd vbm"); + + vbm = kzalloc(sizeof(*vbm), GFP_KERNEL); + if (!vbm) { + ret = -ENOMEM; + goto out; + } + + /* Count and period will be filled by the target */ + vbm->tim.bitmap_ctrl = bitmap_control; + if (bitmap_len > PARTIAL_VBM_MAX) { + wl1251_warning("cmd vbm len is %d B, truncating to %d", + bitmap_len, PARTIAL_VBM_MAX); + bitmap_len = PARTIAL_VBM_MAX; + } + memcpy(vbm->tim.pvb_field, bitmap, bitmap_len); + vbm->tim.identity = identity; + vbm->tim.length = bitmap_len + 3; + + vbm->len = cpu_to_le16(bitmap_len + 5); + + ret = wl1251_cmd_send(wl, CMD_VBM, vbm, sizeof(*vbm)); + if (ret < 0) { + wl1251_error("VBM command failed"); + goto out; + } + +out: + kfree(vbm); + return 0; +} + +int wl1251_cmd_data_path(struct wl1251 *wl, u8 channel, bool enable) +{ + struct cmd_enabledisable_path *cmd; + int ret; + u16 cmd_rx, cmd_tx; + + wl1251_debug(DEBUG_CMD, "cmd data path"); + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + cmd->channel = channel; + + if (enable) { + cmd_rx = CMD_ENABLE_RX; + cmd_tx = CMD_ENABLE_TX; + } else { + cmd_rx = CMD_DISABLE_RX; + cmd_tx = CMD_DISABLE_TX; + } + + ret = wl1251_cmd_send(wl, cmd_rx, cmd, sizeof(*cmd)); + if (ret < 0) { + wl1251_error("rx %s cmd for channel %d failed", + enable ? "start" : "stop", channel); + goto out; + } + + wl1251_debug(DEBUG_BOOT, "rx %s cmd channel %d", + enable ? "start" : "stop", channel); + + ret = wl1251_cmd_send(wl, cmd_tx, cmd, sizeof(*cmd)); + if (ret < 0) { + wl1251_error("tx %s cmd for channel %d failed", + enable ? "start" : "stop", channel); + return ret; + } + + wl1251_debug(DEBUG_BOOT, "tx %s cmd channel %d", + enable ? "start" : "stop", channel); + +out: + kfree(cmd); + return ret; +} + +int wl1251_cmd_join(struct wl1251 *wl, u8 bss_type, u8 channel, + u16 beacon_interval, u8 dtim_interval) +{ + struct cmd_join *join; + int ret, i; + u8 *bssid; + + join = kzalloc(sizeof(*join), GFP_KERNEL); + if (!join) { + ret = -ENOMEM; + goto out; + } + + wl1251_debug(DEBUG_CMD, "cmd join%s ch %d %d/%d", + bss_type == BSS_TYPE_IBSS ? " ibss" : "", + channel, beacon_interval, dtim_interval); + + /* Reverse order BSSID */ + bssid = (u8 *) &join->bssid_lsb; + for (i = 0; i < ETH_ALEN; i++) + bssid[i] = wl->bssid[ETH_ALEN - i - 1]; + + join->rx_config_options = wl->rx_config; + join->rx_filter_options = wl->rx_filter; + + /* + * FIXME: disable temporarily all filters because after commit + * 9cef8737 "mac80211: fix managed mode BSSID handling" broke + * association. The filter logic needs to be implemented properly + * and once that is done, this hack can be removed. + */ + join->rx_config_options = 0; + join->rx_filter_options = WL1251_DEFAULT_RX_FILTER; + + join->basic_rate_set = RATE_MASK_1MBPS | RATE_MASK_2MBPS | + RATE_MASK_5_5MBPS | RATE_MASK_11MBPS; + + join->beacon_interval = beacon_interval; + join->dtim_interval = dtim_interval; + join->bss_type = bss_type; + join->channel = channel; + join->ctrl = JOIN_CMD_CTRL_TX_FLUSH; + + ret = wl1251_cmd_send(wl, CMD_START_JOIN, join, sizeof(*join)); + if (ret < 0) { + wl1251_error("failed to initiate cmd join"); + goto out; + } + +out: + kfree(join); + return ret; +} + +int wl1251_cmd_ps_mode(struct wl1251 *wl, u8 ps_mode) +{ + struct wl1251_cmd_ps_params *ps_params = NULL; + int ret = 0; + + wl1251_debug(DEBUG_CMD, "cmd set ps mode"); + + ps_params = kzalloc(sizeof(*ps_params), GFP_KERNEL); + if (!ps_params) { + ret = -ENOMEM; + goto out; + } + + ps_params->ps_mode = ps_mode; + ps_params->send_null_data = 1; + ps_params->retries = 5; + ps_params->hang_over_period = 128; + ps_params->null_data_rate = 1; /* 1 Mbps */ + + ret = wl1251_cmd_send(wl, CMD_SET_PS_MODE, ps_params, + sizeof(*ps_params)); + if (ret < 0) { + wl1251_error("cmd set_ps_mode failed"); + goto out; + } + +out: + kfree(ps_params); + return ret; +} + +int wl1251_cmd_read_memory(struct wl1251 *wl, u32 addr, void *answer, + size_t len) +{ + struct cmd_read_write_memory *cmd; + int ret = 0; + + wl1251_debug(DEBUG_CMD, "cmd read memory"); + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + WARN_ON(len > MAX_READ_SIZE); + len = min_t(size_t, len, MAX_READ_SIZE); + + cmd->addr = addr; + cmd->size = len; + + ret = wl1251_cmd_send(wl, CMD_READ_MEMORY, cmd, sizeof(*cmd)); + if (ret < 0) { + wl1251_error("read memory command failed: %d", ret); + goto out; + } + + /* the read command got in, we can now read the answer */ + wl1251_mem_read(wl, wl->cmd_box_addr, cmd, sizeof(*cmd)); + + if (cmd->header.status != CMD_STATUS_SUCCESS) + wl1251_error("error in read command result: %d", + cmd->header.status); + + memcpy(answer, cmd->value, len); + +out: + kfree(cmd); + return ret; +} + +int wl1251_cmd_template_set(struct wl1251 *wl, u16 cmd_id, + void *buf, size_t buf_len) +{ + struct wl1251_cmd_packet_template *cmd; + size_t cmd_len; + int ret = 0; + + wl1251_debug(DEBUG_CMD, "cmd template %d", cmd_id); + + WARN_ON(buf_len > WL1251_MAX_TEMPLATE_SIZE); + buf_len = min_t(size_t, buf_len, WL1251_MAX_TEMPLATE_SIZE); + cmd_len = ALIGN(sizeof(*cmd) + buf_len, 4); + + cmd = kzalloc(cmd_len, GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + cmd->size = cpu_to_le16(buf_len); + + if (buf) + memcpy(cmd->data, buf, buf_len); + + ret = wl1251_cmd_send(wl, cmd_id, cmd, cmd_len); + if (ret < 0) { + wl1251_warning("cmd set_template failed: %d", ret); + goto out; + } + +out: + kfree(cmd); + return ret; +} + +int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len, + struct ieee80211_channel *channels[], + unsigned int n_channels, unsigned int n_probes) +{ + struct wl1251_cmd_scan *cmd; + int i, ret = 0; + + wl1251_debug(DEBUG_CMD, "cmd scan"); + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->params.rx_config_options = cpu_to_le32(CFG_RX_ALL_GOOD); + cmd->params.rx_filter_options = cpu_to_le32(CFG_RX_PRSP_EN | + CFG_RX_MGMT_EN | + CFG_RX_BCN_EN); + cmd->params.scan_options = 0; + cmd->params.num_channels = n_channels; + cmd->params.num_probe_requests = n_probes; + cmd->params.tx_rate = cpu_to_le16(1 << 1); /* 2 Mbps */ + cmd->params.tid_trigger = 0; + + for (i = 0; i < n_channels; i++) { + cmd->channels[i].min_duration = + cpu_to_le32(WL1251_SCAN_MIN_DURATION); + cmd->channels[i].max_duration = + cpu_to_le32(WL1251_SCAN_MAX_DURATION); + memset(&cmd->channels[i].bssid_lsb, 0xff, 4); + memset(&cmd->channels[i].bssid_msb, 0xff, 2); + cmd->channels[i].early_termination = 0; + cmd->channels[i].tx_power_att = 0; + cmd->channels[i].channel = channels[i]->hw_value; + } + + cmd->params.ssid_len = ssid_len; + if (ssid) + memcpy(cmd->params.ssid, ssid, ssid_len); + + ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd)); + if (ret < 0) { + wl1251_error("cmd scan failed: %d", ret); + goto out; + } + + wl1251_mem_read(wl, wl->cmd_box_addr, cmd, sizeof(*cmd)); + + if (cmd->header.status != CMD_STATUS_SUCCESS) { + wl1251_error("cmd scan status wasn't success: %d", + cmd->header.status); + ret = -EIO; + goto out; + } + +out: + kfree(cmd); + return ret; +} + +int wl1251_cmd_trigger_scan_to(struct wl1251 *wl, u32 timeout) +{ + struct wl1251_cmd_trigger_scan_to *cmd; + int ret; + + wl1251_debug(DEBUG_CMD, "cmd trigger scan to"); + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->timeout = timeout; + + ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd)); + if (ret < 0) { + wl1251_error("cmd trigger scan to failed: %d", ret); + goto out; + } + +out: + kfree(cmd); + return ret; +} diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.h b/drivers/net/wireless/wl12xx/wl1251_cmd.h new file mode 100644 index 0000000..4ad67ca --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1251_cmd.h @@ -0,0 +1,417 @@ +/* + * This file is part of wl1251 + * + * Copyright (c) 1998-2007 Texas Instruments Incorporated + * Copyright (C) 2008 Nokia Corporation + * + * Contact: Kalle Valo + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __WL1251_CMD_H__ +#define __WL1251_CMD_H__ + +#include "wl1251.h" + +#include + +struct acx_header; + +int wl1251_cmd_send(struct wl1251 *wl, u16 type, void *buf, size_t buf_len); +int wl1251_cmd_test(struct wl1251 *wl, void *buf, size_t buf_len, u8 answer); +int wl1251_cmd_interrogate(struct wl1251 *wl, u16 id, void *buf, size_t len); +int wl1251_cmd_configure(struct wl1251 *wl, u16 id, void *buf, size_t len); +int wl1251_cmd_vbm(struct wl1251 *wl, u8 identity, + void *bitmap, u16 bitmap_len, u8 bitmap_control); +int wl1251_cmd_data_path(struct wl1251 *wl, u8 channel, bool enable); +int wl1251_cmd_join(struct wl1251 *wl, u8 bss_type, u8 channel, + u16 beacon_interval, u8 dtim_interval); +int wl1251_cmd_ps_mode(struct wl1251 *wl, u8 ps_mode); +int wl1251_cmd_read_memory(struct wl1251 *wl, u32 addr, void *answer, + size_t len); +int wl1251_cmd_template_set(struct wl1251 *wl, u16 cmd_id, + void *buf, size_t buf_len); +int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len, + struct ieee80211_channel *channels[], + unsigned int n_channels, unsigned int n_probes); +int wl1251_cmd_trigger_scan_to(struct wl1251 *wl, u32 timeout); + +/* unit ms */ +#define WL1251_COMMAND_TIMEOUT 2000 + +enum wl1251_commands { + CMD_RESET = 0, + CMD_INTERROGATE = 1, /*use this to read information elements*/ + CMD_CONFIGURE = 2, /*use this to write information elements*/ + CMD_ENABLE_RX = 3, + CMD_ENABLE_TX = 4, + CMD_DISABLE_RX = 5, + CMD_DISABLE_TX = 6, + CMD_SCAN = 8, + CMD_STOP_SCAN = 9, + CMD_VBM = 10, + CMD_START_JOIN = 11, + CMD_SET_KEYS = 12, + CMD_READ_MEMORY = 13, + CMD_WRITE_MEMORY = 14, + CMD_BEACON = 19, + CMD_PROBE_RESP = 20, + CMD_NULL_DATA = 21, + CMD_PROBE_REQ = 22, + CMD_TEST = 23, + CMD_RADIO_CALIBRATE = 25, /* OBSOLETE */ + CMD_ENABLE_RX_PATH = 27, /* OBSOLETE */ + CMD_NOISE_HIST = 28, + CMD_RX_RESET = 29, + CMD_PS_POLL = 30, + CMD_QOS_NULL_DATA = 31, + CMD_LNA_CONTROL = 32, + CMD_SET_BCN_MODE = 33, + CMD_MEASUREMENT = 34, + CMD_STOP_MEASUREMENT = 35, + CMD_DISCONNECT = 36, + CMD_SET_PS_MODE = 37, + CMD_CHANNEL_SWITCH = 38, + CMD_STOP_CHANNEL_SWICTH = 39, + CMD_AP_DISCOVERY = 40, + CMD_STOP_AP_DISCOVERY = 41, + CMD_SPS_SCAN = 42, + CMD_STOP_SPS_SCAN = 43, + CMD_HEALTH_CHECK = 45, + CMD_DEBUG = 46, + CMD_TRIGGER_SCAN_TO = 47, + + NUM_COMMANDS, + MAX_COMMAND_ID = 0xFFFF, +}; + +#define MAX_CMD_PARAMS 572 + +struct wl1251_cmd_header { + u16 id; + u16 status; + /* payload */ + u8 data[0]; +} __attribute__ ((packed)); + +struct wl1251_command { + struct wl1251_cmd_header header; + u8 parameters[MAX_CMD_PARAMS]; +}; + +enum { + CMD_MAILBOX_IDLE = 0, + CMD_STATUS_SUCCESS = 1, + CMD_STATUS_UNKNOWN_CMD = 2, + CMD_STATUS_UNKNOWN_IE = 3, + CMD_STATUS_REJECT_MEAS_SG_ACTIVE = 11, + CMD_STATUS_RX_BUSY = 13, + CMD_STATUS_INVALID_PARAM = 14, + CMD_STATUS_TEMPLATE_TOO_LARGE = 15, + CMD_STATUS_OUT_OF_MEMORY = 16, + CMD_STATUS_STA_TABLE_FULL = 17, + CMD_STATUS_RADIO_ERROR = 18, + CMD_STATUS_WRONG_NESTING = 19, + CMD_STATUS_TIMEOUT = 21, /* Driver internal use.*/ + CMD_STATUS_FW_RESET = 22, /* Driver internal use.*/ + MAX_COMMAND_STATUS = 0xff +}; + + +/* + * CMD_READ_MEMORY + * + * The host issues this command to read the WiLink device memory/registers. + * + * Note: The Base Band address has special handling (16 bits registers and + * addresses). For more information, see the hardware specification. + */ +/* + * CMD_WRITE_MEMORY + * + * The host issues this command to write the WiLink device memory/registers. + * + * The Base Band address has special handling (16 bits registers and + * addresses). For more information, see the hardware specification. + */ +#define MAX_READ_SIZE 256 + +struct cmd_read_write_memory { + struct wl1251_cmd_header header; + + /* The address of the memory to read from or write to.*/ + u32 addr; + + /* The amount of data in bytes to read from or write to the WiLink + * device.*/ + u32 size; + + /* The actual value read from or written to the Wilink. The source + of this field is the Host in WRITE command or the Wilink in READ + command. */ + u8 value[MAX_READ_SIZE]; +}; + +#define CMDMBOX_HEADER_LEN 4 +#define CMDMBOX_INFO_ELEM_HEADER_LEN 4 + +#define WL1251_SCAN_MIN_DURATION 30000 +#define WL1251_SCAN_MAX_DURATION 60000 + +#define WL1251_SCAN_NUM_PROBES 3 + +struct wl1251_scan_parameters { + u32 rx_config_options; + u32 rx_filter_options; + + /* + * Scan options: + * bit 0: When this bit is set, passive scan. + * bit 1: Band, when this bit is set we scan + * in the 5Ghz band. + * bit 2: voice mode, 0 for normal scan. + * bit 3: scan priority, 1 for high priority. + */ + u16 scan_options; + + /* Number of channels to scan */ + u8 num_channels; + + /* Number opf probe requests to send, per channel */ + u8 num_probe_requests; + + /* Rate and modulation for probe requests */ + u16 tx_rate; + + u8 tid_trigger; + u8 ssid_len; + u8 ssid[32]; + +} __attribute__ ((packed)); + +struct wl1251_scan_ch_parameters { + u32 min_duration; /* in TU */ + u32 max_duration; /* in TU */ + u32 bssid_lsb; + u16 bssid_msb; + + /* + * bits 0-3: Early termination count. + * bits 4-5: Early termination condition. + */ + u8 early_termination; + + u8 tx_power_att; + u8 channel; + u8 pad[3]; +} __attribute__ ((packed)); + +/* SCAN parameters */ +#define SCAN_MAX_NUM_OF_CHANNELS 16 + +struct wl1251_cmd_scan { + struct wl1251_cmd_header header; + + struct wl1251_scan_parameters params; + struct wl1251_scan_ch_parameters channels[SCAN_MAX_NUM_OF_CHANNELS]; +} __attribute__ ((packed)); + +enum { + BSS_TYPE_IBSS = 0, + BSS_TYPE_STA_BSS = 2, + BSS_TYPE_AP_BSS = 3, + MAX_BSS_TYPE = 0xFF +}; + +#define JOIN_CMD_CTRL_TX_FLUSH 0x80 /* Firmware flushes all Tx */ +#define JOIN_CMD_CTRL_EARLY_WAKEUP_ENABLE 0x01 /* Early wakeup time */ + + +struct cmd_join { + struct wl1251_cmd_header header; + + u32 bssid_lsb; + u16 bssid_msb; + u16 beacon_interval; /* in TBTTs */ + u32 rx_config_options; + u32 rx_filter_options; + + /* + * The target uses this field to determine the rate at + * which to transmit control frame responses (such as + * ACK or CTS frames). + */ + u16 basic_rate_set; + u8 dtim_interval; + u8 tx_ctrl_frame_rate; /* OBSOLETE */ + u8 tx_ctrl_frame_mod; /* OBSOLETE */ + /* + * bits 0-2: This bitwise field specifies the type + * of BSS to start or join (BSS_TYPE_*). + * bit 4: Band - The radio band in which to join + * or start. + * 0 - 2.4GHz band + * 1 - 5GHz band + * bits 3, 5-7: Reserved + */ + u8 bss_type; + u8 channel; + u8 ssid_len; + u8 ssid[IW_ESSID_MAX_SIZE]; + u8 ctrl; /* JOIN_CMD_CTRL_* */ + u8 tx_mgt_frame_rate; /* OBSOLETE */ + u8 tx_mgt_frame_mod; /* OBSOLETE */ + u8 reserved; +} __attribute__ ((packed)); + +struct cmd_enabledisable_path { + struct wl1251_cmd_header header; + + u8 channel; + u8 padding[3]; +} __attribute__ ((packed)); + +#define WL1251_MAX_TEMPLATE_SIZE 300 + +struct wl1251_cmd_packet_template { + struct wl1251_cmd_header header; + + __le16 size; + u8 data[0]; +} __attribute__ ((packed)); + +#define TIM_ELE_ID 5 +#define PARTIAL_VBM_MAX 251 + +struct wl1251_tim { + u8 identity; + u8 length; + u8 dtim_count; + u8 dtim_period; + u8 bitmap_ctrl; + u8 pvb_field[PARTIAL_VBM_MAX]; /* Partial Virtual Bitmap */ +} __attribute__ ((packed)); + +/* Virtual Bit Map update */ +struct wl1251_cmd_vbm_update { + struct wl1251_cmd_header header; + __le16 len; + u8 padding[2]; + struct wl1251_tim tim; +} __attribute__ ((packed)); + +enum wl1251_cmd_ps_mode { + STATION_ACTIVE_MODE, + STATION_POWER_SAVE_MODE +}; + +struct wl1251_cmd_ps_params { + struct wl1251_cmd_header header; + + u8 ps_mode; /* STATION_* */ + u8 send_null_data; /* Do we have to send NULL data packet ? */ + u8 retries; /* Number of retires for the initial NULL data packet */ + + /* + * TUs during which the target stays awake after switching + * to power save mode. + */ + u8 hang_over_period; + u16 null_data_rate; + u8 pad[2]; +} __attribute__ ((packed)); + +struct wl1251_cmd_trigger_scan_to { + struct wl1251_cmd_header header; + + u32 timeout; +}; + +/* HW encryption keys */ +#define NUM_ACCESS_CATEGORIES_COPY 4 +#define MAX_KEY_SIZE 32 + +/* When set, disable HW encryption */ +#define DF_ENCRYPTION_DISABLE 0x01 +/* When set, disable HW decryption */ +#define DF_SNIFF_MODE_ENABLE 0x80 + +enum wl1251_cmd_key_action { + KEY_ADD_OR_REPLACE = 1, + KEY_REMOVE = 2, + KEY_SET_ID = 3, + MAX_KEY_ACTION = 0xffff, +}; + +enum wl1251_cmd_key_type { + KEY_WEP_DEFAULT = 0, + KEY_WEP_ADDR = 1, + KEY_AES_GROUP = 4, + KEY_AES_PAIRWISE = 5, + KEY_WEP_GROUP = 6, + KEY_TKIP_MIC_GROUP = 10, + KEY_TKIP_MIC_PAIRWISE = 11, +}; + +/* + * + * key_type_e key size key format + * ---------- --------- ---------- + * 0x00 5, 13, 29 Key data + * 0x01 5, 13, 29 Key data + * 0x04 16 16 bytes of key data + * 0x05 16 16 bytes of key data + * 0x0a 32 16 bytes of TKIP key data + * 8 bytes of RX MIC key data + * 8 bytes of TX MIC key data + * 0x0b 32 16 bytes of TKIP key data + * 8 bytes of RX MIC key data + * 8 bytes of TX MIC key data + * + */ + +struct wl1251_cmd_set_keys { + struct wl1251_cmd_header header; + + /* Ignored for default WEP key */ + u8 addr[ETH_ALEN]; + + /* key_action_e */ + u16 key_action; + + u16 reserved_1; + + /* key size in bytes */ + u8 key_size; + + /* key_type_e */ + u8 key_type; + u8 ssid_profile; + + /* + * TKIP, AES: frame's key id field. + * For WEP default key: key id; + */ + u8 id; + u8 reserved_2[6]; + u8 key[MAX_KEY_SIZE]; + u16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY]; + u32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY]; +} __attribute__ ((packed)); + + +#endif /* __WL1251_CMD_H__ */ diff --git a/drivers/net/wireless/wl12xx/wl1251_debugfs.c b/drivers/net/wireless/wl12xx/wl1251_debugfs.c new file mode 100644 index 0000000..0ccba57 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1251_debugfs.c @@ -0,0 +1,541 @@ +/* + * This file is part of wl1251 + * + * Copyright (C) 2009 Nokia Corporation + * + * Contact: Kalle Valo + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include "wl1251_debugfs.h" + +#include + +#include "wl1251.h" +#include "wl1251_acx.h" +#include "wl1251_ps.h" + +/* ms */ +#define WL1251_DEBUGFS_STATS_LIFETIME 1000 + +/* debugfs macros idea from mac80211 */ + +#define DEBUGFS_READONLY_FILE(name, buflen, fmt, value...) \ +static ssize_t name## _read(struct file *file, char __user *userbuf, \ + size_t count, loff_t *ppos) \ +{ \ + struct wl1251 *wl = file->private_data; \ + char buf[buflen]; \ + int res; \ + \ + res = scnprintf(buf, buflen, fmt "\n", ##value); \ + return simple_read_from_buffer(userbuf, count, ppos, buf, res); \ +} \ + \ +static const struct file_operations name## _ops = { \ + .read = name## _read, \ + .open = wl1251_open_file_generic, \ +}; + +#define DEBUGFS_ADD(name, parent) \ + wl->debugfs.name = debugfs_create_file(#name, 0400, parent, \ + wl, &name## _ops); \ + if (IS_ERR(wl->debugfs.name)) { \ + ret = PTR_ERR(wl->debugfs.name); \ + wl->debugfs.name = NULL; \ + goto out; \ + } + +#define DEBUGFS_DEL(name) \ + do { \ + debugfs_remove(wl->debugfs.name); \ + wl->debugfs.name = NULL; \ + } while (0) + +#define DEBUGFS_FWSTATS_FILE(sub, name, buflen, fmt) \ +static ssize_t sub## _ ##name## _read(struct file *file, \ + char __user *userbuf, \ + size_t count, loff_t *ppos) \ +{ \ + struct wl1251 *wl = file->private_data; \ + char buf[buflen]; \ + int res; \ + \ + wl1251_debugfs_update_stats(wl); \ + \ + res = scnprintf(buf, buflen, fmt "\n", \ + wl->stats.fw_stats->sub.name); \ + return simple_read_from_buffer(userbuf, count, ppos, buf, res); \ +} \ + \ +static const struct file_operations sub## _ ##name## _ops = { \ + .read = sub## _ ##name## _read, \ + .open = wl1251_open_file_generic, \ +}; + +#define DEBUGFS_FWSTATS_ADD(sub, name) \ + DEBUGFS_ADD(sub## _ ##name, wl->debugfs.fw_statistics) + +#define DEBUGFS_FWSTATS_DEL(sub, name) \ + DEBUGFS_DEL(sub## _ ##name) + +static void wl1251_debugfs_update_stats(struct wl1251 *wl) +{ + int ret; + + mutex_lock(&wl->mutex); + + ret = wl1251_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + if (wl->state == WL1251_STATE_ON && + time_after(jiffies, wl->stats.fw_stats_update + + msecs_to_jiffies(WL1251_DEBUGFS_STATS_LIFETIME))) { + wl1251_acx_statistics(wl, wl->stats.fw_stats); + wl->stats.fw_stats_update = jiffies; + } + + wl1251_ps_elp_sleep(wl); + +out: + mutex_unlock(&wl->mutex); +} + +static int wl1251_open_file_generic(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +DEBUGFS_FWSTATS_FILE(tx, internal_desc_overflow, 20, "%u"); + +DEBUGFS_FWSTATS_FILE(rx, out_of_mem, 20, "%u"); +DEBUGFS_FWSTATS_FILE(rx, hdr_overflow, 20, "%u"); +DEBUGFS_FWSTATS_FILE(rx, hw_stuck, 20, "%u"); +DEBUGFS_FWSTATS_FILE(rx, dropped, 20, "%u"); +DEBUGFS_FWSTATS_FILE(rx, fcs_err, 20, "%u"); +DEBUGFS_FWSTATS_FILE(rx, xfr_hint_trig, 20, "%u"); +DEBUGFS_FWSTATS_FILE(rx, path_reset, 20, "%u"); +DEBUGFS_FWSTATS_FILE(rx, reset_counter, 20, "%u"); + +DEBUGFS_FWSTATS_FILE(dma, rx_requested, 20, "%u"); +DEBUGFS_FWSTATS_FILE(dma, rx_errors, 20, "%u"); +DEBUGFS_FWSTATS_FILE(dma, tx_requested, 20, "%u"); +DEBUGFS_FWSTATS_FILE(dma, tx_errors, 20, "%u"); + +DEBUGFS_FWSTATS_FILE(isr, cmd_cmplt, 20, "%u"); +DEBUGFS_FWSTATS_FILE(isr, fiqs, 20, "%u"); +DEBUGFS_FWSTATS_FILE(isr, rx_headers, 20, "%u"); +DEBUGFS_FWSTATS_FILE(isr, rx_mem_overflow, 20, "%u"); +DEBUGFS_FWSTATS_FILE(isr, rx_rdys, 20, "%u"); +DEBUGFS_FWSTATS_FILE(isr, irqs, 20, "%u"); +DEBUGFS_FWSTATS_FILE(isr, tx_procs, 20, "%u"); +DEBUGFS_FWSTATS_FILE(isr, decrypt_done, 20, "%u"); +DEBUGFS_FWSTATS_FILE(isr, dma0_done, 20, "%u"); +DEBUGFS_FWSTATS_FILE(isr, dma1_done, 20, "%u"); +DEBUGFS_FWSTATS_FILE(isr, tx_exch_complete, 20, "%u"); +DEBUGFS_FWSTATS_FILE(isr, commands, 20, "%u"); +DEBUGFS_FWSTATS_FILE(isr, rx_procs, 20, "%u"); +DEBUGFS_FWSTATS_FILE(isr, hw_pm_mode_changes, 20, "%u"); +DEBUGFS_FWSTATS_FILE(isr, host_acknowledges, 20, "%u"); +DEBUGFS_FWSTATS_FILE(isr, pci_pm, 20, "%u"); +DEBUGFS_FWSTATS_FILE(isr, wakeups, 20, "%u"); +DEBUGFS_FWSTATS_FILE(isr, low_rssi, 20, "%u"); + +DEBUGFS_FWSTATS_FILE(wep, addr_key_count, 20, "%u"); +DEBUGFS_FWSTATS_FILE(wep, default_key_count, 20, "%u"); +/* skipping wep.reserved */ +DEBUGFS_FWSTATS_FILE(wep, key_not_found, 20, "%u"); +DEBUGFS_FWSTATS_FILE(wep, decrypt_fail, 20, "%u"); +DEBUGFS_FWSTATS_FILE(wep, packets, 20, "%u"); +DEBUGFS_FWSTATS_FILE(wep, interrupt, 20, "%u"); + +DEBUGFS_FWSTATS_FILE(pwr, ps_enter, 20, "%u"); +DEBUGFS_FWSTATS_FILE(pwr, elp_enter, 20, "%u"); +DEBUGFS_FWSTATS_FILE(pwr, missing_bcns, 20, "%u"); +DEBUGFS_FWSTATS_FILE(pwr, wake_on_host, 20, "%u"); +DEBUGFS_FWSTATS_FILE(pwr, wake_on_timer_exp, 20, "%u"); +DEBUGFS_FWSTATS_FILE(pwr, tx_with_ps, 20, "%u"); +DEBUGFS_FWSTATS_FILE(pwr, tx_without_ps, 20, "%u"); +DEBUGFS_FWSTATS_FILE(pwr, rcvd_beacons, 20, "%u"); +DEBUGFS_FWSTATS_FILE(pwr, power_save_off, 20, "%u"); +DEBUGFS_FWSTATS_FILE(pwr, enable_ps, 20, "%u"); +DEBUGFS_FWSTATS_FILE(pwr, disable_ps, 20, "%u"); +DEBUGFS_FWSTATS_FILE(pwr, fix_tsf_ps, 20, "%u"); +/* skipping cont_miss_bcns_spread for now */ +DEBUGFS_FWSTATS_FILE(pwr, rcvd_awake_beacons, 20, "%u"); + +DEBUGFS_FWSTATS_FILE(mic, rx_pkts, 20, "%u"); +DEBUGFS_FWSTATS_FILE(mic, calc_failure, 20, "%u"); + +DEBUGFS_FWSTATS_FILE(aes, encrypt_fail, 20, "%u"); +DEBUGFS_FWSTATS_FILE(aes, decrypt_fail, 20, "%u"); +DEBUGFS_FWSTATS_FILE(aes, encrypt_packets, 20, "%u"); +DEBUGFS_FWSTATS_FILE(aes, decrypt_packets, 20, "%u"); +DEBUGFS_FWSTATS_FILE(aes, encrypt_interrupt, 20, "%u"); +DEBUGFS_FWSTATS_FILE(aes, decrypt_interrupt, 20, "%u"); + +DEBUGFS_FWSTATS_FILE(event, heart_beat, 20, "%u"); +DEBUGFS_FWSTATS_FILE(event, calibration, 20, "%u"); +DEBUGFS_FWSTATS_FILE(event, rx_mismatch, 20, "%u"); +DEBUGFS_FWSTATS_FILE(event, rx_mem_empty, 20, "%u"); +DEBUGFS_FWSTATS_FILE(event, rx_pool, 20, "%u"); +DEBUGFS_FWSTATS_FILE(event, oom_late, 20, "%u"); +DEBUGFS_FWSTATS_FILE(event, phy_transmit_error, 20, "%u"); +DEBUGFS_FWSTATS_FILE(event, tx_stuck, 20, "%u"); + +DEBUGFS_FWSTATS_FILE(ps, pspoll_timeouts, 20, "%u"); +DEBUGFS_FWSTATS_FILE(ps, upsd_timeouts, 20, "%u"); +DEBUGFS_FWSTATS_FILE(ps, upsd_max_sptime, 20, "%u"); +DEBUGFS_FWSTATS_FILE(ps, upsd_max_apturn, 20, "%u"); +DEBUGFS_FWSTATS_FILE(ps, pspoll_max_apturn, 20, "%u"); +DEBUGFS_FWSTATS_FILE(ps, pspoll_utilization, 20, "%u"); +DEBUGFS_FWSTATS_FILE(ps, upsd_utilization, 20, "%u"); + +DEBUGFS_FWSTATS_FILE(rxpipe, rx_prep_beacon_drop, 20, "%u"); +DEBUGFS_FWSTATS_FILE(rxpipe, descr_host_int_trig_rx_data, 20, "%u"); +DEBUGFS_FWSTATS_FILE(rxpipe, beacon_buffer_thres_host_int_trig_rx_data, + 20, "%u"); +DEBUGFS_FWSTATS_FILE(rxpipe, missed_beacon_host_int_trig_rx_data, 20, "%u"); +DEBUGFS_FWSTATS_FILE(rxpipe, tx_xfr_host_int_trig_rx_data, 20, "%u"); + +DEBUGFS_READONLY_FILE(retry_count, 20, "%u", wl->stats.retry_count); +DEBUGFS_READONLY_FILE(excessive_retries, 20, "%u", + wl->stats.excessive_retries); + +static ssize_t tx_queue_len_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct wl1251 *wl = file->private_data; + u32 queue_len; + char buf[20]; + int res; + + queue_len = skb_queue_len(&wl->tx_queue); + + res = scnprintf(buf, sizeof(buf), "%u\n", queue_len); + return simple_read_from_buffer(userbuf, count, ppos, buf, res); +} + +static const struct file_operations tx_queue_len_ops = { + .read = tx_queue_len_read, + .open = wl1251_open_file_generic, +}; + +static ssize_t tx_queue_status_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct wl1251 *wl = file->private_data; + char buf[3], status; + int len; + + if (wl->tx_queue_stopped) + status = 's'; + else + status = 'r'; + + len = scnprintf(buf, sizeof(buf), "%c\n", status); + return simple_read_from_buffer(userbuf, count, ppos, buf, len); +} + +static const struct file_operations tx_queue_status_ops = { + .read = tx_queue_status_read, + .open = wl1251_open_file_generic, +}; + +static void wl1251_debugfs_delete_files(struct wl1251 *wl) +{ + DEBUGFS_FWSTATS_DEL(tx, internal_desc_overflow); + + DEBUGFS_FWSTATS_DEL(rx, out_of_mem); + DEBUGFS_FWSTATS_DEL(rx, hdr_overflow); + DEBUGFS_FWSTATS_DEL(rx, hw_stuck); + DEBUGFS_FWSTATS_DEL(rx, dropped); + DEBUGFS_FWSTATS_DEL(rx, fcs_err); + DEBUGFS_FWSTATS_DEL(rx, xfr_hint_trig); + DEBUGFS_FWSTATS_DEL(rx, path_reset); + DEBUGFS_FWSTATS_DEL(rx, reset_counter); + + DEBUGFS_FWSTATS_DEL(dma, rx_requested); + DEBUGFS_FWSTATS_DEL(dma, rx_errors); + DEBUGFS_FWSTATS_DEL(dma, tx_requested); + DEBUGFS_FWSTATS_DEL(dma, tx_errors); + + DEBUGFS_FWSTATS_DEL(isr, cmd_cmplt); + DEBUGFS_FWSTATS_DEL(isr, fiqs); + DEBUGFS_FWSTATS_DEL(isr, rx_headers); + DEBUGFS_FWSTATS_DEL(isr, rx_mem_overflow); + DEBUGFS_FWSTATS_DEL(isr, rx_rdys); + DEBUGFS_FWSTATS_DEL(isr, irqs); + DEBUGFS_FWSTATS_DEL(isr, tx_procs); + DEBUGFS_FWSTATS_DEL(isr, decrypt_done); + DEBUGFS_FWSTATS_DEL(isr, dma0_done); + DEBUGFS_FWSTATS_DEL(isr, dma1_done); + DEBUGFS_FWSTATS_DEL(isr, tx_exch_complete); + DEBUGFS_FWSTATS_DEL(isr, commands); + DEBUGFS_FWSTATS_DEL(isr, rx_procs); + DEBUGFS_FWSTATS_DEL(isr, hw_pm_mode_changes); + DEBUGFS_FWSTATS_DEL(isr, host_acknowledges); + DEBUGFS_FWSTATS_DEL(isr, pci_pm); + DEBUGFS_FWSTATS_DEL(isr, wakeups); + DEBUGFS_FWSTATS_DEL(isr, low_rssi); + + DEBUGFS_FWSTATS_DEL(wep, addr_key_count); + DEBUGFS_FWSTATS_DEL(wep, default_key_count); + /* skipping wep.reserved */ + DEBUGFS_FWSTATS_DEL(wep, key_not_found); + DEBUGFS_FWSTATS_DEL(wep, decrypt_fail); + DEBUGFS_FWSTATS_DEL(wep, packets); + DEBUGFS_FWSTATS_DEL(wep, interrupt); + + DEBUGFS_FWSTATS_DEL(pwr, ps_enter); + DEBUGFS_FWSTATS_DEL(pwr, elp_enter); + DEBUGFS_FWSTATS_DEL(pwr, missing_bcns); + DEBUGFS_FWSTATS_DEL(pwr, wake_on_host); + DEBUGFS_FWSTATS_DEL(pwr, wake_on_timer_exp); + DEBUGFS_FWSTATS_DEL(pwr, tx_with_ps); + DEBUGFS_FWSTATS_DEL(pwr, tx_without_ps); + DEBUGFS_FWSTATS_DEL(pwr, rcvd_beacons); + DEBUGFS_FWSTATS_DEL(pwr, power_save_off); + DEBUGFS_FWSTATS_DEL(pwr, enable_ps); + DEBUGFS_FWSTATS_DEL(pwr, disable_ps); + DEBUGFS_FWSTATS_DEL(pwr, fix_tsf_ps); + /* skipping cont_miss_bcns_spread for now */ + DEBUGFS_FWSTATS_DEL(pwr, rcvd_awake_beacons); + + DEBUGFS_FWSTATS_DEL(mic, rx_pkts); + DEBUGFS_FWSTATS_DEL(mic, calc_failure); + + DEBUGFS_FWSTATS_DEL(aes, encrypt_fail); + DEBUGFS_FWSTATS_DEL(aes, decrypt_fail); + DEBUGFS_FWSTATS_DEL(aes, encrypt_packets); + DEBUGFS_FWSTATS_DEL(aes, decrypt_packets); + DEBUGFS_FWSTATS_DEL(aes, encrypt_interrupt); + DEBUGFS_FWSTATS_DEL(aes, decrypt_interrupt); + + DEBUGFS_FWSTATS_DEL(event, heart_beat); + DEBUGFS_FWSTATS_DEL(event, calibration); + DEBUGFS_FWSTATS_DEL(event, rx_mismatch); + DEBUGFS_FWSTATS_DEL(event, rx_mem_empty); + DEBUGFS_FWSTATS_DEL(event, rx_pool); + DEBUGFS_FWSTATS_DEL(event, oom_late); + DEBUGFS_FWSTATS_DEL(event, phy_transmit_error); + DEBUGFS_FWSTATS_DEL(event, tx_stuck); + + DEBUGFS_FWSTATS_DEL(ps, pspoll_timeouts); + DEBUGFS_FWSTATS_DEL(ps, upsd_timeouts); + DEBUGFS_FWSTATS_DEL(ps, upsd_max_sptime); + DEBUGFS_FWSTATS_DEL(ps, upsd_max_apturn); + DEBUGFS_FWSTATS_DEL(ps, pspoll_max_apturn); + DEBUGFS_FWSTATS_DEL(ps, pspoll_utilization); + DEBUGFS_FWSTATS_DEL(ps, upsd_utilization); + + DEBUGFS_FWSTATS_DEL(rxpipe, rx_prep_beacon_drop); + DEBUGFS_FWSTATS_DEL(rxpipe, descr_host_int_trig_rx_data); + DEBUGFS_FWSTATS_DEL(rxpipe, beacon_buffer_thres_host_int_trig_rx_data); + DEBUGFS_FWSTATS_DEL(rxpipe, missed_beacon_host_int_trig_rx_data); + DEBUGFS_FWSTATS_DEL(rxpipe, tx_xfr_host_int_trig_rx_data); + + DEBUGFS_DEL(tx_queue_len); + DEBUGFS_DEL(tx_queue_status); + DEBUGFS_DEL(retry_count); + DEBUGFS_DEL(excessive_retries); +} + +static int wl1251_debugfs_add_files(struct wl1251 *wl) +{ + int ret = 0; + + DEBUGFS_FWSTATS_ADD(tx, internal_desc_overflow); + + DEBUGFS_FWSTATS_ADD(rx, out_of_mem); + DEBUGFS_FWSTATS_ADD(rx, hdr_overflow); + DEBUGFS_FWSTATS_ADD(rx, hw_stuck); + DEBUGFS_FWSTATS_ADD(rx, dropped); + DEBUGFS_FWSTATS_ADD(rx, fcs_err); + DEBUGFS_FWSTATS_ADD(rx, xfr_hint_trig); + DEBUGFS_FWSTATS_ADD(rx, path_reset); + DEBUGFS_FWSTATS_ADD(rx, reset_counter); + + DEBUGFS_FWSTATS_ADD(dma, rx_requested); + DEBUGFS_FWSTATS_ADD(dma, rx_errors); + DEBUGFS_FWSTATS_ADD(dma, tx_requested); + DEBUGFS_FWSTATS_ADD(dma, tx_errors); + + DEBUGFS_FWSTATS_ADD(isr, cmd_cmplt); + DEBUGFS_FWSTATS_ADD(isr, fiqs); + DEBUGFS_FWSTATS_ADD(isr, rx_headers); + DEBUGFS_FWSTATS_ADD(isr, rx_mem_overflow); + DEBUGFS_FWSTATS_ADD(isr, rx_rdys); + DEBUGFS_FWSTATS_ADD(isr, irqs); + DEBUGFS_FWSTATS_ADD(isr, tx_procs); + DEBUGFS_FWSTATS_ADD(isr, decrypt_done); + DEBUGFS_FWSTATS_ADD(isr, dma0_done); + DEBUGFS_FWSTATS_ADD(isr, dma1_done); + DEBUGFS_FWSTATS_ADD(isr, tx_exch_complete); + DEBUGFS_FWSTATS_ADD(isr, commands); + DEBUGFS_FWSTATS_ADD(isr, rx_procs); + DEBUGFS_FWSTATS_ADD(isr, hw_pm_mode_changes); + DEBUGFS_FWSTATS_ADD(isr, host_acknowledges); + DEBUGFS_FWSTATS_ADD(isr, pci_pm); + DEBUGFS_FWSTATS_ADD(isr, wakeups); + DEBUGFS_FWSTATS_ADD(isr, low_rssi); + + DEBUGFS_FWSTATS_ADD(wep, addr_key_count); + DEBUGFS_FWSTATS_ADD(wep, default_key_count); + /* skipping wep.reserved */ + DEBUGFS_FWSTATS_ADD(wep, key_not_found); + DEBUGFS_FWSTATS_ADD(wep, decrypt_fail); + DEBUGFS_FWSTATS_ADD(wep, packets); + DEBUGFS_FWSTATS_ADD(wep, interrupt); + + DEBUGFS_FWSTATS_ADD(pwr, ps_enter); + DEBUGFS_FWSTATS_ADD(pwr, elp_enter); + DEBUGFS_FWSTATS_ADD(pwr, missing_bcns); + DEBUGFS_FWSTATS_ADD(pwr, wake_on_host); + DEBUGFS_FWSTATS_ADD(pwr, wake_on_timer_exp); + DEBUGFS_FWSTATS_ADD(pwr, tx_with_ps); + DEBUGFS_FWSTATS_ADD(pwr, tx_without_ps); + DEBUGFS_FWSTATS_ADD(pwr, rcvd_beacons); + DEBUGFS_FWSTATS_ADD(pwr, power_save_off); + DEBUGFS_FWSTATS_ADD(pwr, enable_ps); + DEBUGFS_FWSTATS_ADD(pwr, disable_ps); + DEBUGFS_FWSTATS_ADD(pwr, fix_tsf_ps); + /* skipping cont_miss_bcns_spread for now */ + DEBUGFS_FWSTATS_ADD(pwr, rcvd_awake_beacons); + + DEBUGFS_FWSTATS_ADD(mic, rx_pkts); + DEBUGFS_FWSTATS_ADD(mic, calc_failure); + + DEBUGFS_FWSTATS_ADD(aes, encrypt_fail); + DEBUGFS_FWSTATS_ADD(aes, decrypt_fail); + DEBUGFS_FWSTATS_ADD(aes, encrypt_packets); + DEBUGFS_FWSTATS_ADD(aes, decrypt_packets); + DEBUGFS_FWSTATS_ADD(aes, encrypt_interrupt); + DEBUGFS_FWSTATS_ADD(aes, decrypt_interrupt); + + DEBUGFS_FWSTATS_ADD(event, heart_beat); + DEBUGFS_FWSTATS_ADD(event, calibration); + DEBUGFS_FWSTATS_ADD(event, rx_mismatch); + DEBUGFS_FWSTATS_ADD(event, rx_mem_empty); + DEBUGFS_FWSTATS_ADD(event, rx_pool); + DEBUGFS_FWSTATS_ADD(event, oom_late); + DEBUGFS_FWSTATS_ADD(event, phy_transmit_error); + DEBUGFS_FWSTATS_ADD(event, tx_stuck); + + DEBUGFS_FWSTATS_ADD(ps, pspoll_timeouts); + DEBUGFS_FWSTATS_ADD(ps, upsd_timeouts); + DEBUGFS_FWSTATS_ADD(ps, upsd_max_sptime); + DEBUGFS_FWSTATS_ADD(ps, upsd_max_apturn); + DEBUGFS_FWSTATS_ADD(ps, pspoll_max_apturn); + DEBUGFS_FWSTATS_ADD(ps, pspoll_utilization); + DEBUGFS_FWSTATS_ADD(ps, upsd_utilization); + + DEBUGFS_FWSTATS_ADD(rxpipe, rx_prep_beacon_drop); + DEBUGFS_FWSTATS_ADD(rxpipe, descr_host_int_trig_rx_data); + DEBUGFS_FWSTATS_ADD(rxpipe, beacon_buffer_thres_host_int_trig_rx_data); + DEBUGFS_FWSTATS_ADD(rxpipe, missed_beacon_host_int_trig_rx_data); + DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data); + + DEBUGFS_ADD(tx_queue_len, wl->debugfs.rootdir); + DEBUGFS_ADD(tx_queue_status, wl->debugfs.rootdir); + DEBUGFS_ADD(retry_count, wl->debugfs.rootdir); + DEBUGFS_ADD(excessive_retries, wl->debugfs.rootdir); + +out: + if (ret < 0) + wl1251_debugfs_delete_files(wl); + + return ret; +} + +void wl1251_debugfs_reset(struct wl1251 *wl) +{ + memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats)); + wl->stats.retry_count = 0; + wl->stats.excessive_retries = 0; +} + +int wl1251_debugfs_init(struct wl1251 *wl) +{ + int ret; + + wl->debugfs.rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL); + + if (IS_ERR(wl->debugfs.rootdir)) { + ret = PTR_ERR(wl->debugfs.rootdir); + wl->debugfs.rootdir = NULL; + goto err; + } + + wl->debugfs.fw_statistics = debugfs_create_dir("fw-statistics", + wl->debugfs.rootdir); + + if (IS_ERR(wl->debugfs.fw_statistics)) { + ret = PTR_ERR(wl->debugfs.fw_statistics); + wl->debugfs.fw_statistics = NULL; + goto err_root; + } + + wl->stats.fw_stats = kzalloc(sizeof(*wl->stats.fw_stats), + GFP_KERNEL); + + if (!wl->stats.fw_stats) { + ret = -ENOMEM; + goto err_fw; + } + + wl->stats.fw_stats_update = jiffies; + + ret = wl1251_debugfs_add_files(wl); + + if (ret < 0) + goto err_file; + + return 0; + +err_file: + kfree(wl->stats.fw_stats); + wl->stats.fw_stats = NULL; + +err_fw: + debugfs_remove(wl->debugfs.fw_statistics); + wl->debugfs.fw_statistics = NULL; + +err_root: + debugfs_remove(wl->debugfs.rootdir); + wl->debugfs.rootdir = NULL; + +err: + return ret; +} + +void wl1251_debugfs_exit(struct wl1251 *wl) +{ + wl1251_debugfs_delete_files(wl); + + kfree(wl->stats.fw_stats); + wl->stats.fw_stats = NULL; + + debugfs_remove(wl->debugfs.fw_statistics); + wl->debugfs.fw_statistics = NULL; + + debugfs_remove(wl->debugfs.rootdir); + wl->debugfs.rootdir = NULL; + +} diff --git a/drivers/net/wireless/wl12xx/wl1251_debugfs.h b/drivers/net/wireless/wl12xx/wl1251_debugfs.h new file mode 100644 index 0000000..6dc3d08 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1251_debugfs.h @@ -0,0 +1,33 @@ +/* + * This file is part of wl1251 + * + * Copyright (C) 2009 Nokia Corporation + * + * Contact: Kalle Valo + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef WL1251_DEBUGFS_H +#define WL1251_DEBUGFS_H + +#include "wl1251.h" + +int wl1251_debugfs_init(struct wl1251 *wl); +void wl1251_debugfs_exit(struct wl1251 *wl); +void wl1251_debugfs_reset(struct wl1251 *wl); + +#endif /* WL1251_DEBUGFS_H */ diff --git a/drivers/net/wireless/wl12xx/wl1251_event.c b/drivers/net/wireless/wl12xx/wl1251_event.c new file mode 100644 index 0000000..020d764 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1251_event.c @@ -0,0 +1,143 @@ +/* + * This file is part of wl1251 + * + * Copyright (c) 1998-2007 Texas Instruments Incorporated + * Copyright (C) 2008 Nokia Corporation + * + * Contact: Kalle Valo + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include "wl1251.h" +#include "wl1251_reg.h" +#include "wl1251_io.h" +#include "wl1251_event.h" +#include "wl1251_ps.h" + +static int wl1251_event_scan_complete(struct wl1251 *wl, + struct event_mailbox *mbox) +{ + wl1251_debug(DEBUG_EVENT, "status: 0x%x, channels: %d", + mbox->scheduled_scan_status, + mbox->scheduled_scan_channels); + + if (wl->scanning) { + mutex_unlock(&wl->mutex); + ieee80211_scan_completed(wl->hw, false); + mutex_lock(&wl->mutex); + wl1251_debug(DEBUG_MAC80211, "mac80211 hw scan completed"); + wl->scanning = false; + } + + return 0; +} + +static void wl1251_event_mbox_dump(struct event_mailbox *mbox) +{ + wl1251_debug(DEBUG_EVENT, "MBOX DUMP:"); + wl1251_debug(DEBUG_EVENT, "\tvector: 0x%x", mbox->events_vector); + wl1251_debug(DEBUG_EVENT, "\tmask: 0x%x", mbox->events_mask); +} + +static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox) +{ + int ret; + u32 vector; + + wl1251_event_mbox_dump(mbox); + + vector = mbox->events_vector & ~(mbox->events_mask); + wl1251_debug(DEBUG_EVENT, "vector: 0x%x", vector); + + if (vector & SCAN_COMPLETE_EVENT_ID) { + ret = wl1251_event_scan_complete(wl, mbox); + if (ret < 0) + return ret; + } + + if (vector & BSS_LOSE_EVENT_ID) { + wl1251_debug(DEBUG_EVENT, "BSS_LOSE_EVENT"); + + if (wl->psm_requested && wl->psm) { + ret = wl1251_ps_set_mode(wl, STATION_ACTIVE_MODE); + if (ret < 0) + return ret; + } + } + + if (vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID && wl->psm) { + wl1251_debug(DEBUG_EVENT, "SYNCHRONIZATION_TIMEOUT_EVENT"); + + /* indicate to the stack, that beacons have been lost */ + ieee80211_beacon_loss(wl->vif); + } + + if (vector & REGAINED_BSS_EVENT_ID) { + if (wl->psm_requested) { + ret = wl1251_ps_set_mode(wl, STATION_POWER_SAVE_MODE); + if (ret < 0) + return ret; + } + } + + return 0; +} + +int wl1251_event_unmask(struct wl1251 *wl) +{ + int ret; + + ret = wl1251_acx_event_mbox_mask(wl, ~(wl->event_mask)); + if (ret < 0) + return ret; + + return 0; +} + +void wl1251_event_mbox_config(struct wl1251 *wl) +{ + wl->mbox_ptr[0] = wl1251_reg_read32(wl, REG_EVENT_MAILBOX_PTR); + wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox); + + wl1251_debug(DEBUG_EVENT, "MBOX ptrs: 0x%x 0x%x", + wl->mbox_ptr[0], wl->mbox_ptr[1]); +} + +int wl1251_event_handle(struct wl1251 *wl, u8 mbox_num) +{ + struct event_mailbox mbox; + int ret; + + wl1251_debug(DEBUG_EVENT, "EVENT on mbox %d", mbox_num); + + if (mbox_num > 1) + return -EINVAL; + + /* first we read the mbox descriptor */ + wl1251_mem_read(wl, wl->mbox_ptr[mbox_num], &mbox, + sizeof(struct event_mailbox)); + + /* process the descriptor */ + ret = wl1251_event_process(wl, &mbox); + if (ret < 0) + return ret; + + /* then we let the firmware know it can go on...*/ + wl1251_reg_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_EVENT_ACK); + + return 0; +} diff --git a/drivers/net/wireless/wl12xx/wl1251_event.h b/drivers/net/wireless/wl12xx/wl1251_event.h new file mode 100644 index 0000000..be0ac54 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1251_event.h @@ -0,0 +1,121 @@ +/* + * This file is part of wl1251 + * + * Copyright (c) 1998-2007 Texas Instruments Incorporated + * Copyright (C) 2008 Nokia Corporation + * + * Contact: Kalle Valo + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __WL1251_EVENT_H__ +#define __WL1251_EVENT_H__ + +/* + * Mbox events + * + * The event mechanism is based on a pair of event buffers (buffers A and + * B) at fixed locations in the target's memory. The host processes one + * buffer while the other buffer continues to collect events. If the host + * is not processing events, an interrupt is issued to signal that a buffer + * is ready. Once the host is done with processing events from one buffer, + * it signals the target (with an ACK interrupt) that the event buffer is + * free. + */ + +enum { + RESERVED1_EVENT_ID = BIT(0), + RESERVED2_EVENT_ID = BIT(1), + MEASUREMENT_START_EVENT_ID = BIT(2), + SCAN_COMPLETE_EVENT_ID = BIT(3), + CALIBRATION_COMPLETE_EVENT_ID = BIT(4), + ROAMING_TRIGGER_LOW_RSSI_EVENT_ID = BIT(5), + PS_REPORT_EVENT_ID = BIT(6), + SYNCHRONIZATION_TIMEOUT_EVENT_ID = BIT(7), + HEALTH_REPORT_EVENT_ID = BIT(8), + ACI_DETECTION_EVENT_ID = BIT(9), + DEBUG_REPORT_EVENT_ID = BIT(10), + MAC_STATUS_EVENT_ID = BIT(11), + DISCONNECT_EVENT_COMPLETE_ID = BIT(12), + JOIN_EVENT_COMPLETE_ID = BIT(13), + CHANNEL_SWITCH_COMPLETE_EVENT_ID = BIT(14), + BSS_LOSE_EVENT_ID = BIT(15), + ROAMING_TRIGGER_MAX_TX_RETRY_EVENT_ID = BIT(16), + MEASUREMENT_COMPLETE_EVENT_ID = BIT(17), + AP_DISCOVERY_COMPLETE_EVENT_ID = BIT(18), + SCHEDULED_SCAN_COMPLETE_EVENT_ID = BIT(19), + PSPOLL_DELIVERY_FAILURE_EVENT_ID = BIT(20), + RESET_BSS_EVENT_ID = BIT(21), + REGAINED_BSS_EVENT_ID = BIT(22), + ROAMING_TRIGGER_REGAINED_RSSI_EVENT_ID = BIT(23), + ROAMING_TRIGGER_LOW_SNR_EVENT_ID = BIT(24), + ROAMING_TRIGGER_REGAINED_SNR_EVENT_ID = BIT(25), + + DBG_EVENT_ID = BIT(26), + BT_PTA_SENSE_EVENT_ID = BIT(27), + BT_PTA_PREDICTION_EVENT_ID = BIT(28), + BT_PTA_AVALANCHE_EVENT_ID = BIT(29), + + PLT_RX_CALIBRATION_COMPLETE_EVENT_ID = BIT(30), + + EVENT_MBOX_ALL_EVENT_ID = 0x7fffffff, +}; + +struct event_debug_report { + u8 debug_event_id; + u8 num_params; + u16 pad; + u32 report_1; + u32 report_2; + u32 report_3; +} __attribute__ ((packed)); + +struct event_mailbox { + u32 events_vector; + u32 events_mask; + u32 reserved_1; + u32 reserved_2; + + char average_rssi_level; + u8 ps_status; + u8 channel_switch_status; + u8 scheduled_scan_status; + + /* Channels scanned by the scheduled scan */ + u16 scheduled_scan_channels; + + /* If bit 0 is set -> target's fatal error */ + u16 health_report; + u16 bad_fft_counter; + u8 bt_pta_sense_info; + u8 bt_pta_protective_info; + u32 reserved; + u32 debug_report[2]; + + /* Number of FCS errors since last event */ + u32 fcs_err_counter; + + struct event_debug_report report; + u8 average_snr_level; + u8 padding[19]; +} __attribute__ ((packed)); + +int wl1251_event_unmask(struct wl1251 *wl); +void wl1251_event_mbox_config(struct wl1251 *wl); +int wl1251_event_handle(struct wl1251 *wl, u8 mbox); + +#endif diff --git a/drivers/net/wireless/wl12xx/wl1251_init.c b/drivers/net/wireless/wl12xx/wl1251_init.c new file mode 100644 index 0000000..5aad56e --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1251_init.c @@ -0,0 +1,424 @@ +/* + * This file is part of wl1251 + * + * Copyright (C) 2009 Nokia Corporation + * + * Contact: Kalle Valo + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include +#include + +#include "wl1251_init.h" +#include "wl12xx_80211.h" +#include "wl1251_acx.h" +#include "wl1251_cmd.h" +#include "wl1251_reg.h" + +int wl1251_hw_init_hwenc_config(struct wl1251 *wl) +{ + int ret; + + ret = wl1251_acx_feature_cfg(wl); + if (ret < 0) { + wl1251_warning("couldn't set feature config"); + return ret; + } + + ret = wl1251_acx_default_key(wl, wl->default_key); + if (ret < 0) { + wl1251_warning("couldn't set default key"); + return ret; + } + + return 0; +} + +int wl1251_hw_init_templates_config(struct wl1251 *wl) +{ + int ret; + u8 partial_vbm[PARTIAL_VBM_MAX]; + + /* send empty templates for fw memory reservation */ + ret = wl1251_cmd_template_set(wl, CMD_PROBE_REQ, NULL, + sizeof(struct wl12xx_probe_req_template)); + if (ret < 0) + return ret; + + ret = wl1251_cmd_template_set(wl, CMD_NULL_DATA, NULL, + sizeof(struct wl12xx_null_data_template)); + if (ret < 0) + return ret; + + ret = wl1251_cmd_template_set(wl, CMD_PS_POLL, NULL, + sizeof(struct wl12xx_ps_poll_template)); + if (ret < 0) + return ret; + + ret = wl1251_cmd_template_set(wl, CMD_QOS_NULL_DATA, NULL, + sizeof + (struct wl12xx_qos_null_data_template)); + if (ret < 0) + return ret; + + ret = wl1251_cmd_template_set(wl, CMD_PROBE_RESP, NULL, + sizeof + (struct wl12xx_probe_resp_template)); + if (ret < 0) + return ret; + + ret = wl1251_cmd_template_set(wl, CMD_BEACON, NULL, + sizeof + (struct wl12xx_beacon_template)); + if (ret < 0) + return ret; + + /* tim templates, first reserve space then allocate an empty one */ + memset(partial_vbm, 0, PARTIAL_VBM_MAX); + ret = wl1251_cmd_vbm(wl, TIM_ELE_ID, partial_vbm, PARTIAL_VBM_MAX, 0); + if (ret < 0) + return ret; + + ret = wl1251_cmd_vbm(wl, TIM_ELE_ID, partial_vbm, 1, 0); + if (ret < 0) + return ret; + + return 0; +} + +int wl1251_hw_init_rx_config(struct wl1251 *wl, u32 config, u32 filter) +{ + int ret; + + ret = wl1251_acx_rx_msdu_life_time(wl, RX_MSDU_LIFETIME_DEF); + if (ret < 0) + return ret; + + ret = wl1251_acx_rx_config(wl, config, filter); + if (ret < 0) + return ret; + + return 0; +} + +int wl1251_hw_init_phy_config(struct wl1251 *wl) +{ + int ret; + + ret = wl1251_acx_pd_threshold(wl); + if (ret < 0) + return ret; + + ret = wl1251_acx_slot(wl, DEFAULT_SLOT_TIME); + if (ret < 0) + return ret; + + ret = wl1251_acx_group_address_tbl(wl); + if (ret < 0) + return ret; + + ret = wl1251_acx_service_period_timeout(wl); + if (ret < 0) + return ret; + + ret = wl1251_acx_rts_threshold(wl, RTS_THRESHOLD_DEF); + if (ret < 0) + return ret; + + return 0; +} + +int wl1251_hw_init_beacon_filter(struct wl1251 *wl) +{ + int ret; + + /* disable beacon filtering at this stage */ + ret = wl1251_acx_beacon_filter_opt(wl, false); + if (ret < 0) + return ret; + + ret = wl1251_acx_beacon_filter_table(wl); + if (ret < 0) + return ret; + + return 0; +} + +int wl1251_hw_init_pta(struct wl1251 *wl) +{ + int ret; + + ret = wl1251_acx_sg_enable(wl); + if (ret < 0) + return ret; + + ret = wl1251_acx_sg_cfg(wl); + if (ret < 0) + return ret; + + return 0; +} + +int wl1251_hw_init_energy_detection(struct wl1251 *wl) +{ + int ret; + + ret = wl1251_acx_cca_threshold(wl); + if (ret < 0) + return ret; + + return 0; +} + +int wl1251_hw_init_beacon_broadcast(struct wl1251 *wl) +{ + int ret; + + ret = wl1251_acx_bcn_dtim_options(wl); + if (ret < 0) + return ret; + + return 0; +} + +int wl1251_hw_init_power_auth(struct wl1251 *wl) +{ + return wl1251_acx_sleep_auth(wl, WL1251_PSM_CAM); +} + +int wl1251_hw_init_mem_config(struct wl1251 *wl) +{ + int ret; + + ret = wl1251_acx_mem_cfg(wl); + if (ret < 0) + return ret; + + wl->target_mem_map = kzalloc(sizeof(struct wl1251_acx_mem_map), + GFP_KERNEL); + if (!wl->target_mem_map) { + wl1251_error("couldn't allocate target memory map"); + return -ENOMEM; + } + + /* we now ask for the firmware built memory map */ + ret = wl1251_acx_mem_map(wl, wl->target_mem_map, + sizeof(struct wl1251_acx_mem_map)); + if (ret < 0) { + wl1251_error("couldn't retrieve firmware memory map"); + kfree(wl->target_mem_map); + wl->target_mem_map = NULL; + return ret; + } + + return 0; +} + +static int wl1251_hw_init_txq_fill(u8 qid, + struct acx_tx_queue_qos_config *config, + u32 num_blocks) +{ + config->qid = qid; + + switch (qid) { + case QOS_AC_BE: + config->high_threshold = + (QOS_TX_HIGH_BE_DEF * num_blocks) / 100; + config->low_threshold = + (QOS_TX_LOW_BE_DEF * num_blocks) / 100; + break; + case QOS_AC_BK: + config->high_threshold = + (QOS_TX_HIGH_BK_DEF * num_blocks) / 100; + config->low_threshold = + (QOS_TX_LOW_BK_DEF * num_blocks) / 100; + break; + case QOS_AC_VI: + config->high_threshold = + (QOS_TX_HIGH_VI_DEF * num_blocks) / 100; + config->low_threshold = + (QOS_TX_LOW_VI_DEF * num_blocks) / 100; + break; + case QOS_AC_VO: + config->high_threshold = + (QOS_TX_HIGH_VO_DEF * num_blocks) / 100; + config->low_threshold = + (QOS_TX_LOW_VO_DEF * num_blocks) / 100; + break; + default: + wl1251_error("Invalid TX queue id: %d", qid); + return -EINVAL; + } + + return 0; +} + +static int wl1251_hw_init_tx_queue_config(struct wl1251 *wl) +{ + struct acx_tx_queue_qos_config *config; + struct wl1251_acx_mem_map *wl_mem_map = wl->target_mem_map; + int ret, i; + + wl1251_debug(DEBUG_ACX, "acx tx queue config"); + + config = kzalloc(sizeof(*config), GFP_KERNEL); + if (!config) { + ret = -ENOMEM; + goto out; + } + + for (i = 0; i < MAX_NUM_OF_AC; i++) { + ret = wl1251_hw_init_txq_fill(i, config, + wl_mem_map->num_tx_mem_blocks); + if (ret < 0) + goto out; + + ret = wl1251_cmd_configure(wl, ACX_TX_QUEUE_CFG, + config, sizeof(*config)); + if (ret < 0) + goto out; + } + + wl1251_acx_ac_cfg(wl, AC_BE, CWMIN_BE, CWMAX_BE, AIFS_DIFS, TXOP_BE); + wl1251_acx_ac_cfg(wl, AC_BK, CWMIN_BK, CWMAX_BK, AIFS_DIFS, TXOP_BK); + wl1251_acx_ac_cfg(wl, AC_VI, CWMIN_VI, CWMAX_VI, AIFS_DIFS, TXOP_VI); + wl1251_acx_ac_cfg(wl, AC_VO, CWMIN_VO, CWMAX_VO, AIFS_DIFS, TXOP_VO); + +out: + kfree(config); + return ret; +} + +static int wl1251_hw_init_data_path_config(struct wl1251 *wl) +{ + int ret; + + /* asking for the data path parameters */ + wl->data_path = kzalloc(sizeof(struct acx_data_path_params_resp), + GFP_KERNEL); + if (!wl->data_path) { + wl1251_error("Couldnt allocate data path parameters"); + return -ENOMEM; + } + + ret = wl1251_acx_data_path_params(wl, wl->data_path); + if (ret < 0) { + kfree(wl->data_path); + wl->data_path = NULL; + return ret; + } + + return 0; +} + + +int wl1251_hw_init(struct wl1251 *wl) +{ + struct wl1251_acx_mem_map *wl_mem_map; + int ret; + + ret = wl1251_hw_init_hwenc_config(wl); + if (ret < 0) + return ret; + + /* Template settings */ + ret = wl1251_hw_init_templates_config(wl); + if (ret < 0) + return ret; + + /* Default memory configuration */ + ret = wl1251_hw_init_mem_config(wl); + if (ret < 0) + return ret; + + /* Default data path configuration */ + ret = wl1251_hw_init_data_path_config(wl); + if (ret < 0) + goto out_free_memmap; + + /* RX config */ + ret = wl1251_hw_init_rx_config(wl, + RX_CFG_PROMISCUOUS | RX_CFG_TSF, + RX_FILTER_OPTION_DEF); + /* RX_CONFIG_OPTION_ANY_DST_ANY_BSS, + RX_FILTER_OPTION_FILTER_ALL); */ + if (ret < 0) + goto out_free_data_path; + + /* TX queues config */ + ret = wl1251_hw_init_tx_queue_config(wl); + if (ret < 0) + goto out_free_data_path; + + /* PHY layer config */ + ret = wl1251_hw_init_phy_config(wl); + if (ret < 0) + goto out_free_data_path; + + /* Initialize connection monitoring thresholds */ + ret = wl1251_acx_conn_monit_params(wl); + if (ret < 0) + goto out_free_data_path; + + /* Beacon filtering */ + ret = wl1251_hw_init_beacon_filter(wl); + if (ret < 0) + goto out_free_data_path; + + /* Bluetooth WLAN coexistence */ + ret = wl1251_hw_init_pta(wl); + if (ret < 0) + goto out_free_data_path; + + /* Energy detection */ + ret = wl1251_hw_init_energy_detection(wl); + if (ret < 0) + goto out_free_data_path; + + /* Beacons and boradcast settings */ + ret = wl1251_hw_init_beacon_broadcast(wl); + if (ret < 0) + goto out_free_data_path; + + /* Enable data path */ + ret = wl1251_cmd_data_path(wl, wl->channel, 1); + if (ret < 0) + goto out_free_data_path; + + /* Default power state */ + ret = wl1251_hw_init_power_auth(wl); + if (ret < 0) + goto out_free_data_path; + + wl_mem_map = wl->target_mem_map; + wl1251_info("%d tx blocks at 0x%x, %d rx blocks at 0x%x", + wl_mem_map->num_tx_mem_blocks, + wl->data_path->tx_control_addr, + wl_mem_map->num_rx_mem_blocks, + wl->data_path->rx_control_addr); + + return 0; + + out_free_data_path: + kfree(wl->data_path); + + out_free_memmap: + kfree(wl->target_mem_map); + + return ret; +} diff --git a/drivers/net/wireless/wl12xx/wl1251_init.h b/drivers/net/wireless/wl12xx/wl1251_init.h new file mode 100644 index 0000000..269cefb --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1251_init.h @@ -0,0 +1,88 @@ +/* + * This file is part of wl1251 + * + * Copyright (C) 2009 Nokia Corporation + * + * Contact: Kalle Valo + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __WL1251_INIT_H__ +#define __WL1251_INIT_H__ + +#include "wl1251.h" + +enum { + /* best effort/legacy */ + AC_BE = 0, + + /* background */ + AC_BK = 1, + + /* video */ + AC_VI = 2, + + /* voice */ + AC_VO = 3, + + /* broadcast dummy access category */ + AC_BCAST = 4, + + NUM_ACCESS_CATEGORIES = 4 +}; + +/* following are defult values for the IE fields*/ +#define CWMIN_BK 15 +#define CWMIN_BE 15 +#define CWMIN_VI 7 +#define CWMIN_VO 3 +#define CWMAX_BK 1023 +#define CWMAX_BE 63 +#define CWMAX_VI 15 +#define CWMAX_VO 7 + +/* slot number setting to start transmission at PIFS interval */ +#define AIFS_PIFS 1 + +/* + * slot number setting to start transmission at DIFS interval - normal DCF + * access + */ +#define AIFS_DIFS 2 + +#define AIFSN_BK 7 +#define AIFSN_BE 3 +#define AIFSN_VI AIFS_PIFS +#define AIFSN_VO AIFS_PIFS +#define TXOP_BK 0 +#define TXOP_BE 0 +#define TXOP_VI 3008 +#define TXOP_VO 1504 + +int wl1251_hw_init_hwenc_config(struct wl1251 *wl); +int wl1251_hw_init_templates_config(struct wl1251 *wl); +int wl1251_hw_init_rx_config(struct wl1251 *wl, u32 config, u32 filter); +int wl1251_hw_init_phy_config(struct wl1251 *wl); +int wl1251_hw_init_beacon_filter(struct wl1251 *wl); +int wl1251_hw_init_pta(struct wl1251 *wl); +int wl1251_hw_init_energy_detection(struct wl1251 *wl); +int wl1251_hw_init_beacon_broadcast(struct wl1251 *wl); +int wl1251_hw_init_power_auth(struct wl1251 *wl); +int wl1251_hw_init_mem_config(struct wl1251 *wl); +int wl1251_hw_init(struct wl1251 *wl); + +#endif diff --git a/drivers/net/wireless/wl12xx/wl1251_io.c b/drivers/net/wireless/wl12xx/wl1251_io.c new file mode 100644 index 0000000..f1c232e --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1251_io.c @@ -0,0 +1,196 @@ +/* + * This file is part of wl12xx + * + * Copyright (C) 2008 Nokia Corporation + * + * Contact: Kalle Valo + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include "wl1251.h" +#include "wl1251_reg.h" +#include "wl1251_io.h" + +/* FIXME: this is static data nowadays and the table can be removed */ +static enum wl12xx_acx_int_reg wl1251_io_reg_table[ACX_REG_TABLE_LEN] = { + [ACX_REG_INTERRUPT_TRIG] = (REGISTERS_BASE + 0x0474), + [ACX_REG_INTERRUPT_TRIG_H] = (REGISTERS_BASE + 0x0478), + [ACX_REG_INTERRUPT_MASK] = (REGISTERS_BASE + 0x0494), + [ACX_REG_HINT_MASK_SET] = (REGISTERS_BASE + 0x0498), + [ACX_REG_HINT_MASK_CLR] = (REGISTERS_BASE + 0x049C), + [ACX_REG_INTERRUPT_NO_CLEAR] = (REGISTERS_BASE + 0x04B0), + [ACX_REG_INTERRUPT_CLEAR] = (REGISTERS_BASE + 0x04A4), + [ACX_REG_INTERRUPT_ACK] = (REGISTERS_BASE + 0x04A8), + [ACX_REG_SLV_SOFT_RESET] = (REGISTERS_BASE + 0x0000), + [ACX_REG_EE_START] = (REGISTERS_BASE + 0x080C), + [ACX_REG_ECPU_CONTROL] = (REGISTERS_BASE + 0x0804) +}; + +static int wl1251_translate_reg_addr(struct wl1251 *wl, int addr) +{ + /* If the address is lower than REGISTERS_BASE, it means that this is + * a chip-specific register address, so look it up in the registers + * table */ + if (addr < REGISTERS_BASE) { + /* Make sure we don't go over the table */ + if (addr >= ACX_REG_TABLE_LEN) { + wl1251_error("address out of range (%d)", addr); + return -EINVAL; + } + addr = wl1251_io_reg_table[addr]; + } + + return addr - wl->physical_reg_addr + wl->virtual_reg_addr; +} + +static int wl1251_translate_mem_addr(struct wl1251 *wl, int addr) +{ + return addr - wl->physical_mem_addr + wl->virtual_mem_addr; +} + +void wl1251_mem_read(struct wl1251 *wl, int addr, void *buf, size_t len) +{ + int physical; + + physical = wl1251_translate_mem_addr(wl, addr); + + wl->if_ops->read(wl, physical, buf, len); +} + +void wl1251_mem_write(struct wl1251 *wl, int addr, void *buf, size_t len) +{ + int physical; + + physical = wl1251_translate_mem_addr(wl, addr); + + wl->if_ops->write(wl, physical, buf, len); +} + +u32 wl1251_mem_read32(struct wl1251 *wl, int addr) +{ + return wl1251_read32(wl, wl1251_translate_mem_addr(wl, addr)); +} + +void wl1251_mem_write32(struct wl1251 *wl, int addr, u32 val) +{ + wl1251_write32(wl, wl1251_translate_mem_addr(wl, addr), val); +} + +u32 wl1251_reg_read32(struct wl1251 *wl, int addr) +{ + return wl1251_read32(wl, wl1251_translate_reg_addr(wl, addr)); +} + +void wl1251_reg_write32(struct wl1251 *wl, int addr, u32 val) +{ + wl1251_write32(wl, wl1251_translate_reg_addr(wl, addr), val); +} + +/* Set the partitions to access the chip addresses. + * + * There are two VIRTUAL partitions (the memory partition and the + * registers partition), which are mapped to two different areas of the + * PHYSICAL (hardware) memory. This function also makes other checks to + * ensure that the partitions are not overlapping. In the diagram below, the + * memory partition comes before the register partition, but the opposite is + * also supported. + * + * PHYSICAL address + * space + * + * | | + * ...+----+--> mem_start + * VIRTUAL address ... | | + * space ... | | [PART_0] + * ... | | + * 0x00000000 <--+----+... ...+----+--> mem_start + mem_size + * | | ... | | + * |MEM | ... | | + * | | ... | | + * part_size <--+----+... | | {unused area) + * | | ... | | + * |REG | ... | | + * part_size | | ... | | + * + <--+----+... ...+----+--> reg_start + * reg_size ... | | + * ... | | [PART_1] + * ... | | + * ...+----+--> reg_start + reg_size + * | | + * + */ +void wl1251_set_partition(struct wl1251 *wl, + u32 mem_start, u32 mem_size, + u32 reg_start, u32 reg_size) +{ + struct wl1251_partition partition[2]; + + wl1251_debug(DEBUG_SPI, "mem_start %08X mem_size %08X", + mem_start, mem_size); + wl1251_debug(DEBUG_SPI, "reg_start %08X reg_size %08X", + reg_start, reg_size); + + /* Make sure that the two partitions together don't exceed the + * address range */ + if ((mem_size + reg_size) > HW_ACCESS_MEMORY_MAX_RANGE) { + wl1251_debug(DEBUG_SPI, "Total size exceeds maximum virtual" + " address range. Truncating partition[0]."); + mem_size = HW_ACCESS_MEMORY_MAX_RANGE - reg_size; + wl1251_debug(DEBUG_SPI, "mem_start %08X mem_size %08X", + mem_start, mem_size); + wl1251_debug(DEBUG_SPI, "reg_start %08X reg_size %08X", + reg_start, reg_size); + } + + if ((mem_start < reg_start) && + ((mem_start + mem_size) > reg_start)) { + /* Guarantee that the memory partition doesn't overlap the + * registers partition */ + wl1251_debug(DEBUG_SPI, "End of partition[0] is " + "overlapping partition[1]. Adjusted."); + mem_size = reg_start - mem_start; + wl1251_debug(DEBUG_SPI, "mem_start %08X mem_size %08X", + mem_start, mem_size); + wl1251_debug(DEBUG_SPI, "reg_start %08X reg_size %08X", + reg_start, reg_size); + } else if ((reg_start < mem_start) && + ((reg_start + reg_size) > mem_start)) { + /* Guarantee that the register partition doesn't overlap the + * memory partition */ + wl1251_debug(DEBUG_SPI, "End of partition[1] is" + " overlapping partition[0]. Adjusted."); + reg_size = mem_start - reg_start; + wl1251_debug(DEBUG_SPI, "mem_start %08X mem_size %08X", + mem_start, mem_size); + wl1251_debug(DEBUG_SPI, "reg_start %08X reg_size %08X", + reg_start, reg_size); + } + + partition[0].start = mem_start; + partition[0].size = mem_size; + partition[1].start = reg_start; + partition[1].size = reg_size; + + wl->physical_mem_addr = mem_start; + wl->physical_reg_addr = reg_start; + + wl->virtual_mem_addr = 0; + wl->virtual_reg_addr = mem_size; + + wl->if_ops->write(wl, HW_ACCESS_PART0_SIZE_ADDR, partition, + sizeof(partition)); +} diff --git a/drivers/net/wireless/wl12xx/wl1251_io.h b/drivers/net/wireless/wl12xx/wl1251_io.h new file mode 100644 index 0000000..b89d2ac --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1251_io.h @@ -0,0 +1,64 @@ +/* + * This file is part of wl12xx + * + * Copyright (C) 2008 Nokia Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ +#ifndef __WL1251_IO_H__ +#define __WL1251_IO_H__ + +#include "wl1251.h" + +#define HW_ACCESS_MEMORY_MAX_RANGE 0x1FFC0 + +#define HW_ACCESS_PART0_SIZE_ADDR 0x1FFC0 +#define HW_ACCESS_PART0_START_ADDR 0x1FFC4 +#define HW_ACCESS_PART1_SIZE_ADDR 0x1FFC8 +#define HW_ACCESS_PART1_START_ADDR 0x1FFCC + +#define HW_ACCESS_REGISTER_SIZE 4 + +#define HW_ACCESS_PRAM_MAX_RANGE 0x3c000 + +static inline u32 wl1251_read32(struct wl1251 *wl, int addr) +{ + u32 response; + + wl->if_ops->read(wl, addr, &response, sizeof(u32)); + + return response; +} + +static inline void wl1251_write32(struct wl1251 *wl, int addr, u32 val) +{ + wl->if_ops->write(wl, addr, &val, sizeof(u32)); +} + +/* Memory target IO, address is translated to partition 0 */ +void wl1251_mem_read(struct wl1251 *wl, int addr, void *buf, size_t len); +void wl1251_mem_write(struct wl1251 *wl, int addr, void *buf, size_t len); +u32 wl1251_mem_read32(struct wl1251 *wl, int addr); +void wl1251_mem_write32(struct wl1251 *wl, int addr, u32 val); +/* Registers IO */ +u32 wl1251_reg_read32(struct wl1251 *wl, int addr); +void wl1251_reg_write32(struct wl1251 *wl, int addr, u32 val); + +void wl1251_set_partition(struct wl1251 *wl, + u32 part_start, u32 part_size, + u32 reg_start, u32 reg_size); + +#endif diff --git a/drivers/net/wireless/wl12xx/wl1251_main.c b/drivers/net/wireless/wl12xx/wl1251_main.c new file mode 100644 index 0000000..0ef2d9c --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1251_main.c @@ -0,0 +1,1362 @@ +/* + * This file is part of wl1251 + * + * Copyright (C) 2008-2009 Nokia Corporation + * + * Contact: Kalle Valo + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE == KERNEL_VERSION(2,6,28)) +#include +#endif +#include +#include +#include + +#include "wl1251.h" +#include "wl12xx_80211.h" +#include "wl1251_reg.h" +#include "wl1251_io.h" +#include "wl1251_cmd.h" +#include "wl1251_event.h" +#include "wl1251_tx.h" +#include "wl1251_rx.h" +#include "wl1251_ps.h" +#include "wl1251_init.h" +#include "wl1251_debugfs.h" +#include "wl1251_boot.h" + +void wl1251_enable_interrupts(struct wl1251 *wl) +{ + wl->if_ops->enable_irq(wl); +} + +void wl1251_disable_interrupts(struct wl1251 *wl) +{ + wl->if_ops->disable_irq(wl); +} + +static void wl1251_power_off(struct wl1251 *wl) +{ + wl->set_power(false); +} + +static void wl1251_power_on(struct wl1251 *wl) +{ + wl->set_power(true); +} + +static int wl1251_fetch_firmware(struct wl1251 *wl) +{ + const struct firmware *fw; + struct device *dev = wiphy_dev(wl->hw->wiphy); + int ret; + + ret = request_firmware(&fw, WL1251_FW_NAME, dev); + + if (ret < 0) { + wl1251_error("could not get firmware: %d", ret); + return ret; + } + + if (fw->size % 4) { + wl1251_error("firmware size is not multiple of 32 bits: %zu", + fw->size); + ret = -EILSEQ; + goto out; + } + + wl->fw_len = fw->size; + wl->fw = vmalloc(wl->fw_len); + + if (!wl->fw) { + wl1251_error("could not allocate memory for the firmware"); + ret = -ENOMEM; + goto out; + } + + memcpy(wl->fw, fw->data, wl->fw_len); + + ret = 0; + +out: + release_firmware(fw); + + return ret; +} + +static int wl1251_fetch_nvs(struct wl1251 *wl) +{ + const struct firmware *fw; + struct device *dev = wiphy_dev(wl->hw->wiphy); + int ret; + + ret = request_firmware(&fw, WL1251_NVS_NAME, dev); + + if (ret < 0) { + wl1251_error("could not get nvs file: %d", ret); + return ret; + } + + if (fw->size % 4) { + wl1251_error("nvs size is not multiple of 32 bits: %zu", + fw->size); + ret = -EILSEQ; + goto out; + } + + wl->nvs_len = fw->size; + wl->nvs = kmalloc(wl->nvs_len, GFP_KERNEL); + + if (!wl->nvs) { + wl1251_error("could not allocate memory for the nvs file"); + ret = -ENOMEM; + goto out; + } + + memcpy(wl->nvs, fw->data, wl->nvs_len); + + ret = 0; + +out: + release_firmware(fw); + + return ret; +} + +static void wl1251_fw_wakeup(struct wl1251 *wl) +{ + u32 elp_reg; + + elp_reg = ELPCTRL_WAKE_UP; + wl1251_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg); + elp_reg = wl1251_read32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR); + + if (!(elp_reg & ELPCTRL_WLAN_READY)) + wl1251_warning("WLAN not ready"); +} + +static int wl1251_chip_wakeup(struct wl1251 *wl) +{ + int ret = 0; + + wl1251_power_on(wl); + msleep(WL1251_POWER_ON_SLEEP); + wl->if_ops->reset(wl); + + /* We don't need a real memory partition here, because we only want + * to use the registers at this point. */ + wl1251_set_partition(wl, + 0x00000000, + 0x00000000, + REGISTERS_BASE, + REGISTERS_DOWN_SIZE); + + /* ELP module wake up */ + wl1251_fw_wakeup(wl); + + /* whal_FwCtrl_BootSm() */ + + /* 0. read chip id from CHIP_ID */ + wl->chip_id = wl1251_reg_read32(wl, CHIP_ID_B); + + /* 1. check if chip id is valid */ + + switch (wl->chip_id) { + case CHIP_ID_1251_PG12: + wl1251_debug(DEBUG_BOOT, "chip id 0x%x (1251 PG12)", + wl->chip_id); + break; + case CHIP_ID_1251_PG11: + wl1251_debug(DEBUG_BOOT, "chip id 0x%x (1251 PG11)", + wl->chip_id); + break; + case CHIP_ID_1251_PG10: + default: + wl1251_error("unsupported chip id: 0x%x", wl->chip_id); + ret = -ENODEV; + goto out; + } + + if (wl->fw == NULL) { + ret = wl1251_fetch_firmware(wl); + if (ret < 0) + goto out; + } + + /* No NVS from netlink, try to get it from the filesystem */ + if (wl->nvs == NULL) { + ret = wl1251_fetch_nvs(wl); + if (ret < 0) + goto out; + } + +out: + return ret; +} + +#define WL1251_IRQ_LOOP_COUNT 10 +static void wl1251_irq_work(struct work_struct *work) +{ + u32 intr, ctr = WL1251_IRQ_LOOP_COUNT; + struct wl1251 *wl = + container_of(work, struct wl1251, irq_work); + int ret; + + mutex_lock(&wl->mutex); + + wl1251_debug(DEBUG_IRQ, "IRQ work"); + + if (wl->state == WL1251_STATE_OFF) + goto out; + + ret = wl1251_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + wl1251_reg_write32(wl, ACX_REG_INTERRUPT_MASK, WL1251_ACX_INTR_ALL); + + intr = wl1251_reg_read32(wl, ACX_REG_INTERRUPT_CLEAR); + wl1251_debug(DEBUG_IRQ, "intr: 0x%x", intr); + + do { + if (wl->data_path) { + wl->rx_counter = wl1251_mem_read32( + wl, wl->data_path->rx_control_addr); + + /* We handle a frmware bug here */ + switch ((wl->rx_counter - wl->rx_handled) & 0xf) { + case 0: + wl1251_debug(DEBUG_IRQ, + "RX: FW and host in sync"); + intr &= ~WL1251_ACX_INTR_RX0_DATA; + intr &= ~WL1251_ACX_INTR_RX1_DATA; + break; + case 1: + wl1251_debug(DEBUG_IRQ, "RX: FW +1"); + intr |= WL1251_ACX_INTR_RX0_DATA; + intr &= ~WL1251_ACX_INTR_RX1_DATA; + break; + case 2: + wl1251_debug(DEBUG_IRQ, "RX: FW +2"); + intr |= WL1251_ACX_INTR_RX0_DATA; + intr |= WL1251_ACX_INTR_RX1_DATA; + break; + default: + wl1251_warning( + "RX: FW and host out of sync: %d", + wl->rx_counter - wl->rx_handled); + break; + } + + wl->rx_handled = wl->rx_counter; + + wl1251_debug(DEBUG_IRQ, "RX counter: %d", + wl->rx_counter); + } + + intr &= wl->intr_mask; + + if (intr == 0) { + wl1251_debug(DEBUG_IRQ, "INTR is 0"); + goto out_sleep; + } + + if (intr & WL1251_ACX_INTR_RX0_DATA) { + wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_RX0_DATA"); + wl1251_rx(wl); + } + + if (intr & WL1251_ACX_INTR_RX1_DATA) { + wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_RX1_DATA"); + wl1251_rx(wl); + } + + if (intr & WL1251_ACX_INTR_TX_RESULT) { + wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_TX_RESULT"); + wl1251_tx_complete(wl); + } + + if (intr & (WL1251_ACX_INTR_EVENT_A | + WL1251_ACX_INTR_EVENT_B)) { + wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_EVENT (0x%x)", + intr); + if (intr & WL1251_ACX_INTR_EVENT_A) + wl1251_event_handle(wl, 0); + else + wl1251_event_handle(wl, 1); + } + + if (intr & WL1251_ACX_INTR_INIT_COMPLETE) + wl1251_debug(DEBUG_IRQ, + "WL1251_ACX_INTR_INIT_COMPLETE"); + + if (--ctr == 0) + break; + + intr = wl1251_reg_read32(wl, ACX_REG_INTERRUPT_CLEAR); + } while (intr); + +out_sleep: + wl1251_reg_write32(wl, ACX_REG_INTERRUPT_MASK, ~(wl->intr_mask)); + wl1251_ps_elp_sleep(wl); + +out: + mutex_unlock(&wl->mutex); +} + +static int wl1251_join(struct wl1251 *wl, u8 bss_type, u8 channel, + u16 beacon_interval, u8 dtim_period) +{ + int ret; + + ret = wl1251_acx_frame_rates(wl, DEFAULT_HW_GEN_TX_RATE, + DEFAULT_HW_GEN_MODULATION_TYPE, + wl->tx_mgmt_frm_rate, + wl->tx_mgmt_frm_mod); + if (ret < 0) + goto out; + + + ret = wl1251_cmd_join(wl, bss_type, channel, beacon_interval, + dtim_period); + if (ret < 0) + goto out; + + /* + * FIXME: we should wait for JOIN_EVENT_COMPLETE_ID but to simplify + * locking we just sleep instead, for now + */ + msleep(10); + +out: + return ret; +} + +static void wl1251_filter_work(struct work_struct *work) +{ + struct wl1251 *wl = + container_of(work, struct wl1251, filter_work); + int ret; + + mutex_lock(&wl->mutex); + + if (wl->state == WL1251_STATE_OFF) + goto out; + + ret = wl1251_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + ret = wl1251_join(wl, wl->bss_type, wl->channel, wl->beacon_int, + wl->dtim_period); + if (ret < 0) + goto out_sleep; + +out_sleep: + wl1251_ps_elp_sleep(wl); + +out: + mutex_unlock(&wl->mutex); +} + +static int wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct wl1251 *wl = hw->priv; + + skb_queue_tail(&wl->tx_queue, skb); + + /* + * The chip specific setup must run before the first TX packet - + * before that, the tx_work will not be initialized! + */ + + ieee80211_queue_work(wl->hw, &wl->tx_work); + + /* + * The workqueue is slow to process the tx_queue and we need stop + * the queue here, otherwise the queue will get too long. + */ + if (skb_queue_len(&wl->tx_queue) >= WL1251_TX_QUEUE_MAX_LENGTH) { + wl1251_debug(DEBUG_TX, "op_tx: tx_queue full, stop queues"); + ieee80211_stop_queues(wl->hw); + + /* + * FIXME: this is racy, the variable is not properly + * protected. Maybe fix this by removing the stupid + * variable altogether and checking the real queue state? + */ + wl->tx_queue_stopped = true; + } + + return NETDEV_TX_OK; +} + +static int wl1251_op_start(struct ieee80211_hw *hw) +{ + struct wl1251 *wl = hw->priv; + int ret = 0; + + wl1251_debug(DEBUG_MAC80211, "mac80211 start"); + + mutex_lock(&wl->mutex); + + if (wl->state != WL1251_STATE_OFF) { + wl1251_error("cannot start because not in off state: %d", + wl->state); + ret = -EBUSY; + goto out; + } + + ret = wl1251_chip_wakeup(wl); + if (ret < 0) + goto out; + + ret = wl1251_boot(wl); + if (ret < 0) + goto out; + + ret = wl1251_hw_init(wl); + if (ret < 0) + goto out; + + ret = wl1251_acx_station_id(wl); + if (ret < 0) + goto out; + + wl->state = WL1251_STATE_ON; + + wl1251_info("firmware booted (%s)", wl->fw_ver); + +out: + if (ret < 0) + wl1251_power_off(wl); + + mutex_unlock(&wl->mutex); + + return ret; +} + +static void wl1251_op_stop(struct ieee80211_hw *hw) +{ + struct wl1251 *wl = hw->priv; + + wl1251_info("down"); + + wl1251_debug(DEBUG_MAC80211, "mac80211 stop"); + + mutex_lock(&wl->mutex); + + WARN_ON(wl->state != WL1251_STATE_ON); + + if (wl->scanning) { + mutex_unlock(&wl->mutex); + ieee80211_scan_completed(wl->hw, true); + mutex_lock(&wl->mutex); + wl->scanning = false; + } + + wl->state = WL1251_STATE_OFF; + + wl1251_disable_interrupts(wl); + + mutex_unlock(&wl->mutex); + + cancel_work_sync(&wl->irq_work); + cancel_work_sync(&wl->tx_work); + cancel_work_sync(&wl->filter_work); + + mutex_lock(&wl->mutex); + + /* let's notify MAC80211 about the remaining pending TX frames */ + wl1251_tx_flush(wl); + wl1251_power_off(wl); + + memset(wl->bssid, 0, ETH_ALEN); + wl->listen_int = 1; + wl->bss_type = MAX_BSS_TYPE; + + wl->data_in_count = 0; + wl->rx_counter = 0; + wl->rx_handled = 0; + wl->rx_current_buffer = 0; + wl->rx_last_id = 0; + wl->next_tx_complete = 0; + wl->elp = false; + wl->psm = 0; + wl->tx_queue_stopped = false; + wl->power_level = WL1251_DEFAULT_POWER_LEVEL; + wl->channel = WL1251_DEFAULT_CHANNEL; + + wl1251_debugfs_reset(wl); + + mutex_unlock(&wl->mutex); +} + +static int wl1251_op_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct wl1251 *wl = hw->priv; + int ret = 0; + + wl1251_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM", + vif->type, vif->addr); + + mutex_lock(&wl->mutex); + if (wl->vif) { + ret = -EBUSY; + goto out; + } + + wl->vif = vif; + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + wl->bss_type = BSS_TYPE_STA_BSS; + break; + case NL80211_IFTYPE_ADHOC: + wl->bss_type = BSS_TYPE_IBSS; + break; + default: + ret = -EOPNOTSUPP; + goto out; + } + + if (memcmp(wl->mac_addr, vif->addr, ETH_ALEN)) { + memcpy(wl->mac_addr, vif->addr, ETH_ALEN); + SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr); + ret = wl1251_acx_station_id(wl); + if (ret < 0) + goto out; + } + +out: + mutex_unlock(&wl->mutex); + return ret; +} + +static void wl1251_op_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct wl1251 *wl = hw->priv; + + mutex_lock(&wl->mutex); + wl1251_debug(DEBUG_MAC80211, "mac80211 remove interface"); + wl->vif = NULL; + mutex_unlock(&wl->mutex); +} + +static int wl1251_build_qos_null_data(struct wl1251 *wl) +{ + struct ieee80211_qos_hdr template; + + memset(&template, 0, sizeof(template)); + + memcpy(template.addr1, wl->bssid, ETH_ALEN); + memcpy(template.addr2, wl->mac_addr, ETH_ALEN); + memcpy(template.addr3, wl->bssid, ETH_ALEN); + + template.frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | + IEEE80211_STYPE_QOS_NULLFUNC | + IEEE80211_FCTL_TODS); + + /* FIXME: not sure what priority to use here */ + template.qos_ctrl = cpu_to_le16(0); + + return wl1251_cmd_template_set(wl, CMD_QOS_NULL_DATA, &template, + sizeof(template)); +} + +static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed) +{ + struct wl1251 *wl = hw->priv; + struct ieee80211_conf *conf = &hw->conf; + int channel, ret = 0; + + channel = ieee80211_frequency_to_channel(conf->channel->center_freq); + + wl1251_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d", + channel, + conf->flags & IEEE80211_CONF_PS ? "on" : "off", + conf->power_level); + + mutex_lock(&wl->mutex); + + ret = wl1251_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + if (channel != wl->channel) { + wl->channel = channel; + + ret = wl1251_join(wl, wl->bss_type, wl->channel, + wl->beacon_int, wl->dtim_period); + if (ret < 0) + goto out_sleep; + } + + if (conf->flags & IEEE80211_CONF_PS && !wl->psm_requested) { + wl1251_debug(DEBUG_PSM, "psm enabled"); + + wl->psm_requested = true; + + wl->dtim_period = conf->ps_dtim_period; + + ret = wl1251_acx_wr_tbtt_and_dtim(wl, wl->beacon_int, + wl->dtim_period); + + /* + * mac80211 enables PSM only if we're already associated. + */ + ret = wl1251_ps_set_mode(wl, STATION_POWER_SAVE_MODE); + if (ret < 0) + goto out_sleep; + } else if (!(conf->flags & IEEE80211_CONF_PS) && + wl->psm_requested) { + wl1251_debug(DEBUG_PSM, "psm disabled"); + + wl->psm_requested = false; + + if (wl->psm) { + ret = wl1251_ps_set_mode(wl, STATION_ACTIVE_MODE); + if (ret < 0) + goto out_sleep; + } + } + + if (conf->power_level != wl->power_level) { + ret = wl1251_acx_tx_power(wl, conf->power_level); + if (ret < 0) + goto out_sleep; + + wl->power_level = conf->power_level; + } + +out_sleep: + wl1251_ps_elp_sleep(wl); + +out: + mutex_unlock(&wl->mutex); + + return ret; +} + +#define WL1251_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \ + FIF_ALLMULTI | \ + FIF_FCSFAIL | \ + FIF_BCN_PRBRESP_PROMISC | \ + FIF_CONTROL | \ + FIF_OTHER_BSS) + +static void wl1251_op_configure_filter(struct ieee80211_hw *hw, + unsigned int changed, + unsigned int *total,u64 multicast) +{ + struct wl1251 *wl = hw->priv; + + wl1251_debug(DEBUG_MAC80211, "mac80211 configure filter"); + + *total &= WL1251_SUPPORTED_FILTERS; + changed &= WL1251_SUPPORTED_FILTERS; + + if (changed == 0) + /* no filters which we support changed */ + return; + + /* FIXME: wl->rx_config and wl->rx_filter are not protected */ + + wl->rx_config = WL1251_DEFAULT_RX_CONFIG; + wl->rx_filter = WL1251_DEFAULT_RX_FILTER; + + if (*total & FIF_PROMISC_IN_BSS) { + wl->rx_config |= CFG_BSSID_FILTER_EN; + wl->rx_config |= CFG_RX_ALL_GOOD; + } + if (*total & FIF_ALLMULTI) + /* + * CFG_MC_FILTER_EN in rx_config needs to be 0 to receive + * all multicast frames + */ + wl->rx_config &= ~CFG_MC_FILTER_EN; + if (*total & FIF_FCSFAIL) + wl->rx_filter |= CFG_RX_FCS_ERROR; + if (*total & FIF_BCN_PRBRESP_PROMISC) { + wl->rx_config &= ~CFG_BSSID_FILTER_EN; + wl->rx_config &= ~CFG_SSID_FILTER_EN; + } + if (*total & FIF_CONTROL) + wl->rx_filter |= CFG_RX_CTL_EN; + if (*total & FIF_OTHER_BSS) + wl->rx_filter &= ~CFG_BSSID_FILTER_EN; + + /* + * FIXME: workqueues need to be properly cancelled on stop(), for + * now let's just disable changing the filter settings. They will + * be updated any on config(). + */ + /* schedule_work(&wl->filter_work); */ +} + +/* HW encryption */ +static int wl1251_set_key_type(struct wl1251 *wl, + struct wl1251_cmd_set_keys *key, + enum set_key_cmd cmd, + struct ieee80211_key_conf *mac80211_key, + const u8 *addr) +{ + switch (mac80211_key->alg) { + case ALG_WEP: + if (is_broadcast_ether_addr(addr)) + key->key_type = KEY_WEP_DEFAULT; + else + key->key_type = KEY_WEP_ADDR; + + mac80211_key->hw_key_idx = mac80211_key->keyidx; + break; + case ALG_TKIP: + if (is_broadcast_ether_addr(addr)) + key->key_type = KEY_TKIP_MIC_GROUP; + else + key->key_type = KEY_TKIP_MIC_PAIRWISE; + + mac80211_key->hw_key_idx = mac80211_key->keyidx; + break; + case ALG_CCMP: + if (is_broadcast_ether_addr(addr)) + key->key_type = KEY_AES_GROUP; + else + key->key_type = KEY_AES_PAIRWISE; + mac80211_key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + break; + default: + wl1251_error("Unknown key algo 0x%x", mac80211_key->alg); + return -EOPNOTSUPP; + } + + return 0; +} + +static int wl1251_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct wl1251 *wl = hw->priv; + struct wl1251_cmd_set_keys *wl_cmd; + const u8 *addr; + int ret; + + static const u8 bcast_addr[ETH_ALEN] = + { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + + wl1251_debug(DEBUG_MAC80211, "mac80211 set key"); + + wl_cmd = kzalloc(sizeof(*wl_cmd), GFP_KERNEL); + if (!wl_cmd) { + ret = -ENOMEM; + goto out; + } + + addr = sta ? sta->addr : bcast_addr; + + wl1251_debug(DEBUG_CRYPT, "CMD: 0x%x", cmd); + wl1251_dump(DEBUG_CRYPT, "ADDR: ", addr, ETH_ALEN); + wl1251_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x", + key->alg, key->keyidx, key->keylen, key->flags); + wl1251_dump(DEBUG_CRYPT, "KEY: ", key->key, key->keylen); + + if (is_zero_ether_addr(addr)) { + /* We dont support TX only encryption */ + ret = -EOPNOTSUPP; + goto out; + } + + mutex_lock(&wl->mutex); + + ret = wl1251_ps_elp_wakeup(wl); + if (ret < 0) + goto out_unlock; + + switch (cmd) { + case SET_KEY: + wl_cmd->key_action = KEY_ADD_OR_REPLACE; + break; + case DISABLE_KEY: + wl_cmd->key_action = KEY_REMOVE; + break; + default: + wl1251_error("Unsupported key cmd 0x%x", cmd); + break; + } + + ret = wl1251_set_key_type(wl, wl_cmd, cmd, key, addr); + if (ret < 0) { + wl1251_error("Set KEY type failed"); + goto out_sleep; + } + + if (wl_cmd->key_type != KEY_WEP_DEFAULT) + memcpy(wl_cmd->addr, addr, ETH_ALEN); + + if ((wl_cmd->key_type == KEY_TKIP_MIC_GROUP) || + (wl_cmd->key_type == KEY_TKIP_MIC_PAIRWISE)) { + /* + * We get the key in the following form: + * TKIP (16 bytes) - TX MIC (8 bytes) - RX MIC (8 bytes) + * but the target is expecting: + * TKIP - RX MIC - TX MIC + */ + memcpy(wl_cmd->key, key->key, 16); + memcpy(wl_cmd->key + 16, key->key + 24, 8); + memcpy(wl_cmd->key + 24, key->key + 16, 8); + + } else { + memcpy(wl_cmd->key, key->key, key->keylen); + } + wl_cmd->key_size = key->keylen; + + wl_cmd->id = key->keyidx; + wl_cmd->ssid_profile = 0; + + wl1251_dump(DEBUG_CRYPT, "TARGET KEY: ", wl_cmd, sizeof(*wl_cmd)); + + ret = wl1251_cmd_send(wl, CMD_SET_KEYS, wl_cmd, sizeof(*wl_cmd)); + if (ret < 0) { + wl1251_warning("could not set keys"); + goto out_sleep; + } + +out_sleep: + wl1251_ps_elp_sleep(wl); + +out_unlock: + mutex_unlock(&wl->mutex); + +out: + kfree(wl_cmd); + + return ret; +} + +static int wl1251_op_hw_scan(struct ieee80211_hw *hw, + struct cfg80211_scan_request *req) +{ + struct wl1251 *wl = hw->priv; + struct sk_buff *skb; + size_t ssid_len = 0; + u8 *ssid = NULL; + int ret; + + wl1251_debug(DEBUG_MAC80211, "mac80211 hw scan"); + + if (req->n_ssids) { + ssid = req->ssids[0].ssid; + ssid_len = req->ssids[0].ssid_len; + } + + mutex_lock(&wl->mutex); + + if (wl->scanning) { + wl1251_debug(DEBUG_SCAN, "scan already in progress"); + ret = -EINVAL; + goto out; + } + + ret = wl1251_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + skb = ieee80211_probereq_get(wl->hw, wl->vif, ssid, ssid_len, + req->ie, req->ie_len); + if (!skb) { + ret = -ENOMEM; + goto out; + } + + ret = wl1251_cmd_template_set(wl, CMD_PROBE_REQ, skb->data, + skb->len); + dev_kfree_skb(skb); + if (ret < 0) + goto out_sleep; + + ret = wl1251_cmd_trigger_scan_to(wl, 0); + if (ret < 0) + goto out_sleep; + + wl->scanning = true; + + ret = wl1251_cmd_scan(wl, ssid, ssid_len, req->channels, + req->n_channels, WL1251_SCAN_NUM_PROBES); + if (ret < 0) { + wl->scanning = false; + goto out_sleep; + } + +out_sleep: + wl1251_ps_elp_sleep(wl); + +out: + mutex_unlock(&wl->mutex); + + return ret; +} + +static int wl1251_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value) +{ + struct wl1251 *wl = hw->priv; + int ret; + + mutex_lock(&wl->mutex); + + ret = wl1251_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + ret = wl1251_acx_rts_threshold(wl, (u16) value); + if (ret < 0) + wl1251_warning("wl1251_op_set_rts_threshold failed: %d", ret); + + wl1251_ps_elp_sleep(wl); + +out: + mutex_unlock(&wl->mutex); + + return ret; +} + +static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changed) +{ + struct wl1251 *wl = hw->priv; + struct sk_buff *beacon, *skb; + int ret; + + wl1251_debug(DEBUG_MAC80211, "mac80211 bss info changed"); + + mutex_lock(&wl->mutex); + + ret = wl1251_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + if (changed & BSS_CHANGED_BSSID) { + memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN); + + skb = ieee80211_nullfunc_get(wl->hw, wl->vif); + if (!skb) + goto out_sleep; + + ret = wl1251_cmd_template_set(wl, CMD_NULL_DATA, + skb->data, skb->len); + dev_kfree_skb(skb); + if (ret < 0) + goto out_sleep; + + ret = wl1251_build_qos_null_data(wl); + if (ret < 0) + goto out; + + if (wl->bss_type != BSS_TYPE_IBSS) { + ret = wl1251_join(wl, wl->bss_type, wl->channel, + wl->beacon_int, wl->dtim_period); + if (ret < 0) + goto out_sleep; + } + } + + if (changed & BSS_CHANGED_ASSOC) { + if (bss_conf->assoc) { + wl->beacon_int = bss_conf->beacon_int; + + skb = ieee80211_pspoll_get(wl->hw, wl->vif); + if (!skb) + goto out_sleep; + + ret = wl1251_cmd_template_set(wl, CMD_PS_POLL, + skb->data, + skb->len); + dev_kfree_skb(skb); + if (ret < 0) + goto out_sleep; + + ret = wl1251_acx_aid(wl, bss_conf->aid); + if (ret < 0) + goto out_sleep; + } else { + /* use defaults when not associated */ + wl->beacon_int = WL1251_DEFAULT_BEACON_INT; + wl->dtim_period = WL1251_DEFAULT_DTIM_PERIOD; + } + } + if (changed & BSS_CHANGED_ERP_SLOT) { + if (bss_conf->use_short_slot) + ret = wl1251_acx_slot(wl, SLOT_TIME_SHORT); + else + ret = wl1251_acx_slot(wl, SLOT_TIME_LONG); + if (ret < 0) { + wl1251_warning("Set slot time failed %d", ret); + goto out_sleep; + } + } + + if (changed & BSS_CHANGED_ERP_PREAMBLE) { + if (bss_conf->use_short_preamble) + wl1251_acx_set_preamble(wl, ACX_PREAMBLE_SHORT); + else + wl1251_acx_set_preamble(wl, ACX_PREAMBLE_LONG); + } + + if (changed & BSS_CHANGED_ERP_CTS_PROT) { + if (bss_conf->use_cts_prot) + ret = wl1251_acx_cts_protect(wl, CTSPROTECT_ENABLE); + else + ret = wl1251_acx_cts_protect(wl, CTSPROTECT_DISABLE); + if (ret < 0) { + wl1251_warning("Set ctsprotect failed %d", ret); + goto out_sleep; + } + } + + if (changed & BSS_CHANGED_BEACON) { + beacon = ieee80211_beacon_get(hw, vif); + ret = wl1251_cmd_template_set(wl, CMD_BEACON, beacon->data, + beacon->len); + + if (ret < 0) { + dev_kfree_skb(beacon); + goto out_sleep; + } + + ret = wl1251_cmd_template_set(wl, CMD_PROBE_RESP, beacon->data, + beacon->len); + + dev_kfree_skb(beacon); + + if (ret < 0) + goto out_sleep; + + ret = wl1251_join(wl, wl->bss_type, wl->beacon_int, + wl->channel, wl->dtim_period); + + if (ret < 0) + goto out_sleep; + } + +out_sleep: + wl1251_ps_elp_sleep(wl); + +out: + mutex_unlock(&wl->mutex); +} + + +/* can't be const, mac80211 writes to this */ +static struct ieee80211_rate wl1251_rates[] = { + { .bitrate = 10, + .hw_value = 0x1, + .hw_value_short = 0x1, }, + { .bitrate = 20, + .hw_value = 0x2, + .hw_value_short = 0x2, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 55, + .hw_value = 0x4, + .hw_value_short = 0x4, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 110, + .hw_value = 0x20, + .hw_value_short = 0x20, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 60, + .hw_value = 0x8, + .hw_value_short = 0x8, }, + { .bitrate = 90, + .hw_value = 0x10, + .hw_value_short = 0x10, }, + { .bitrate = 120, + .hw_value = 0x40, + .hw_value_short = 0x40, }, + { .bitrate = 180, + .hw_value = 0x80, + .hw_value_short = 0x80, }, + { .bitrate = 240, + .hw_value = 0x200, + .hw_value_short = 0x200, }, + { .bitrate = 360, + .hw_value = 0x400, + .hw_value_short = 0x400, }, + { .bitrate = 480, + .hw_value = 0x800, + .hw_value_short = 0x800, }, + { .bitrate = 540, + .hw_value = 0x1000, + .hw_value_short = 0x1000, }, +}; + +/* can't be const, mac80211 writes to this */ +static struct ieee80211_channel wl1251_channels[] = { + { .hw_value = 1, .center_freq = 2412}, + { .hw_value = 2, .center_freq = 2417}, + { .hw_value = 3, .center_freq = 2422}, + { .hw_value = 4, .center_freq = 2427}, + { .hw_value = 5, .center_freq = 2432}, + { .hw_value = 6, .center_freq = 2437}, + { .hw_value = 7, .center_freq = 2442}, + { .hw_value = 8, .center_freq = 2447}, + { .hw_value = 9, .center_freq = 2452}, + { .hw_value = 10, .center_freq = 2457}, + { .hw_value = 11, .center_freq = 2462}, + { .hw_value = 12, .center_freq = 2467}, + { .hw_value = 13, .center_freq = 2472}, +}; + +static int wl1251_op_conf_tx(struct ieee80211_hw *hw, u16 queue, + const struct ieee80211_tx_queue_params *params) +{ + enum wl1251_acx_ps_scheme ps_scheme; + struct wl1251 *wl = hw->priv; + int ret; + + mutex_lock(&wl->mutex); + + wl1251_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue); + + ret = wl1251_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + /* mac80211 uses units of 32 usec */ + ret = wl1251_acx_ac_cfg(wl, wl1251_tx_get_queue(queue), + params->cw_min, params->cw_max, + params->aifs, params->txop * 32); + if (ret < 0) + goto out_sleep; + + if (params->uapsd) + ps_scheme = WL1251_ACX_PS_SCHEME_UPSD_TRIGGER; + else + ps_scheme = WL1251_ACX_PS_SCHEME_LEGACY; + + ret = wl1251_acx_tid_cfg(wl, wl1251_tx_get_queue(queue), + CHANNEL_TYPE_EDCF, + wl1251_tx_get_queue(queue), ps_scheme, + WL1251_ACX_ACK_POLICY_LEGACY); + if (ret < 0) + goto out_sleep; + +out_sleep: + wl1251_ps_elp_sleep(wl); + +out: + mutex_unlock(&wl->mutex); + + return ret; +} + +/* can't be const, mac80211 writes to this */ +static struct ieee80211_supported_band wl1251_band_2ghz = { + .channels = wl1251_channels, + .n_channels = ARRAY_SIZE(wl1251_channels), + .bitrates = wl1251_rates, + .n_bitrates = ARRAY_SIZE(wl1251_rates), +}; + +static const struct ieee80211_ops wl1251_ops = { + .start = wl1251_op_start, + .stop = wl1251_op_stop, + .add_interface = wl1251_op_add_interface, + .remove_interface = wl1251_op_remove_interface, + .config = wl1251_op_config, + .configure_filter = wl1251_op_configure_filter, + .tx = wl1251_op_tx, + .set_key = wl1251_op_set_key, + .hw_scan = wl1251_op_hw_scan, + .bss_info_changed = wl1251_op_bss_info_changed, + .set_rts_threshold = wl1251_op_set_rts_threshold, + .conf_tx = wl1251_op_conf_tx, +}; + +static int wl1251_register_hw(struct wl1251 *wl) +{ + int ret; + + if (wl->mac80211_registered) + return 0; + + SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr); + + ret = ieee80211_register_hw(wl->hw); + if (ret < 0) { + wl1251_error("unable to register mac80211 hw: %d", ret); + return ret; + } + + wl->mac80211_registered = true; + + wl1251_notice("loaded"); + + return 0; +} + +int wl1251_init_ieee80211(struct wl1251 *wl) +{ + int ret; + + /* The tx descriptor buffer and the TKIP space */ + wl->hw->extra_tx_headroom = sizeof(struct tx_double_buffer_desc) + + WL1251_TKIP_IV_SPACE; + + /* unit us */ + /* FIXME: find a proper value */ + wl->hw->channel_change_time = 10000; + + wl->hw->flags = IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_NOISE_DBM | + IEEE80211_HW_SUPPORTS_PS | + IEEE80211_HW_BEACON_FILTER | + IEEE80211_HW_SUPPORTS_UAPSD; + + wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); + wl->hw->wiphy->max_scan_ssids = 1; + wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1251_band_2ghz; + + wl->hw->queues = 4; + + ret = wl1251_register_hw(wl); + if (ret) + goto out; + + wl1251_debugfs_init(wl); + wl1251_notice("initialized"); + + ret = 0; + +out: + return ret; +} +EXPORT_SYMBOL_GPL(wl1251_init_ieee80211); + +struct ieee80211_hw *wl1251_alloc_hw(void) +{ + struct ieee80211_hw *hw; + struct wl1251 *wl; + int i; + static const u8 nokia_oui[3] = {0x00, 0x1f, 0xdf}; + + hw = ieee80211_alloc_hw(sizeof(*wl), &wl1251_ops); + if (!hw) { + wl1251_error("could not alloc ieee80211_hw"); + return ERR_PTR(-ENOMEM); + } + + wl = hw->priv; + memset(wl, 0, sizeof(*wl)); + + wl->hw = hw; + + wl->data_in_count = 0; + + skb_queue_head_init(&wl->tx_queue); + + INIT_WORK(&wl->filter_work, wl1251_filter_work); + INIT_DELAYED_WORK(&wl->elp_work, wl1251_elp_work); + wl->channel = WL1251_DEFAULT_CHANNEL; + wl->scanning = false; + wl->default_key = 0; + wl->listen_int = 1; + wl->rx_counter = 0; + wl->rx_handled = 0; + wl->rx_current_buffer = 0; + wl->rx_last_id = 0; + wl->rx_config = WL1251_DEFAULT_RX_CONFIG; + wl->rx_filter = WL1251_DEFAULT_RX_FILTER; + wl->elp = false; + wl->psm = 0; + wl->psm_requested = false; + wl->tx_queue_stopped = false; + wl->power_level = WL1251_DEFAULT_POWER_LEVEL; + wl->beacon_int = WL1251_DEFAULT_BEACON_INT; + wl->dtim_period = WL1251_DEFAULT_DTIM_PERIOD; + wl->vif = NULL; + + for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++) + wl->tx_frames[i] = NULL; + + wl->next_tx_complete = 0; + + INIT_WORK(&wl->irq_work, wl1251_irq_work); + INIT_WORK(&wl->tx_work, wl1251_tx_work); + + /* + * In case our MAC address is not correctly set, + * we use a random but Nokia MAC. + */ + memcpy(wl->mac_addr, nokia_oui, 3); + get_random_bytes(wl->mac_addr + 3, 3); + + wl->state = WL1251_STATE_OFF; + mutex_init(&wl->mutex); + + wl->tx_mgmt_frm_rate = DEFAULT_HW_GEN_TX_RATE; + wl->tx_mgmt_frm_mod = DEFAULT_HW_GEN_MODULATION_TYPE; + + wl->rx_descriptor = kmalloc(sizeof(*wl->rx_descriptor), GFP_KERNEL); + if (!wl->rx_descriptor) { + wl1251_error("could not allocate memory for rx descriptor"); + ieee80211_free_hw(hw); + return ERR_PTR(-ENOMEM); + } + + return hw; +} +EXPORT_SYMBOL_GPL(wl1251_alloc_hw); + +int wl1251_free_hw(struct wl1251 *wl) +{ + ieee80211_unregister_hw(wl->hw); + + wl1251_debugfs_exit(wl); + + kfree(wl->target_mem_map); + kfree(wl->data_path); + vfree(wl->fw); + wl->fw = NULL; + kfree(wl->nvs); + wl->nvs = NULL; + + kfree(wl->rx_descriptor); + wl->rx_descriptor = NULL; + + ieee80211_free_hw(wl->hw); + + return 0; +} +EXPORT_SYMBOL_GPL(wl1251_free_hw); + +MODULE_DESCRIPTION("TI wl1251 Wireles LAN Driver Core"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Kalle Valo "); +MODULE_ALIAS("spi:wl1251"); +MODULE_FIRMWARE(WL1251_FW_NAME); diff --git a/drivers/net/wireless/wl12xx/wl1251_ps.c b/drivers/net/wireless/wl12xx/wl1251_ps.c new file mode 100644 index 0000000..851dfb6 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1251_ps.c @@ -0,0 +1,196 @@ +/* + * This file is part of wl1251 + * + * Copyright (C) 2008 Nokia Corporation + * + * Contact: Kalle Valo + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include "wl1251_reg.h" +#include "wl1251_ps.h" +#include "wl1251_cmd.h" +#include "wl1251_io.h" + +/* in ms */ +#define WL1251_WAKEUP_TIMEOUT 100 + +void wl1251_elp_work(struct work_struct *work) +{ + struct delayed_work *dwork; + struct wl1251 *wl; + + dwork = container_of(work, struct delayed_work, work); + wl = container_of(dwork, struct wl1251, elp_work); + + wl1251_debug(DEBUG_PSM, "elp work"); + + mutex_lock(&wl->mutex); + + if (wl->elp || !wl->psm) + goto out; + + wl1251_debug(DEBUG_PSM, "chip to elp"); + wl1251_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP); + wl->elp = true; + +out: + mutex_unlock(&wl->mutex); +} + +#define ELP_ENTRY_DELAY 5 + +/* Routines to toggle sleep mode while in ELP */ +void wl1251_ps_elp_sleep(struct wl1251 *wl) +{ + unsigned long delay; + + if (wl->psm) { + cancel_delayed_work(&wl->elp_work); + delay = msecs_to_jiffies(ELP_ENTRY_DELAY); + ieee80211_queue_delayed_work(wl->hw, &wl->elp_work, delay); + } +} + +int wl1251_ps_elp_wakeup(struct wl1251 *wl) +{ + unsigned long timeout, start; + u32 elp_reg; + + if (!wl->elp) + return 0; + + wl1251_debug(DEBUG_PSM, "waking up chip from elp"); + + start = jiffies; + timeout = jiffies + msecs_to_jiffies(WL1251_WAKEUP_TIMEOUT); + + wl1251_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP); + + elp_reg = wl1251_read32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR); + + /* + * FIXME: we should wait for irq from chip but, as a temporary + * solution to simplify locking, let's poll instead + */ + while (!(elp_reg & ELPCTRL_WLAN_READY)) { + if (time_after(jiffies, timeout)) { + wl1251_error("elp wakeup timeout"); + return -ETIMEDOUT; + } + msleep(1); + elp_reg = wl1251_read32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR); + } + + wl1251_debug(DEBUG_PSM, "wakeup time: %u ms", + jiffies_to_msecs(jiffies - start)); + + wl->elp = false; + + return 0; +} + +static int wl1251_ps_set_elp(struct wl1251 *wl, bool enable) +{ + int ret; + + if (enable) { + wl1251_debug(DEBUG_PSM, "sleep auth psm/elp"); + + ret = wl1251_acx_sleep_auth(wl, WL1251_PSM_ELP); + if (ret < 0) + return ret; + + wl1251_ps_elp_sleep(wl); + } else { + wl1251_debug(DEBUG_PSM, "sleep auth cam"); + + /* + * When the target is in ELP, we can only + * access the ELP control register. Thus, + * we have to wake the target up before + * changing the power authorization. + */ + + wl1251_ps_elp_wakeup(wl); + + ret = wl1251_acx_sleep_auth(wl, WL1251_PSM_CAM); + if (ret < 0) + return ret; + } + + return 0; +} + +int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode) +{ + int ret; + + switch (mode) { + case STATION_POWER_SAVE_MODE: + wl1251_debug(DEBUG_PSM, "entering psm"); + + /* enable beacon filtering */ + ret = wl1251_acx_beacon_filter_opt(wl, true); + if (ret < 0) + return ret; + + ret = wl1251_acx_wake_up_conditions(wl, + WAKE_UP_EVENT_DTIM_BITMAP, + wl->listen_int); + if (ret < 0) + return ret; + + ret = wl1251_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE); + if (ret < 0) + return ret; + + ret = wl1251_ps_set_elp(wl, true); + if (ret < 0) + return ret; + + wl->psm = 1; + break; + case STATION_ACTIVE_MODE: + default: + wl1251_debug(DEBUG_PSM, "leaving psm"); + ret = wl1251_ps_set_elp(wl, false); + if (ret < 0) + return ret; + + /* disable beacon filtering */ + ret = wl1251_acx_beacon_filter_opt(wl, false); + if (ret < 0) + return ret; + + ret = wl1251_acx_wake_up_conditions(wl, + WAKE_UP_EVENT_DTIM_BITMAP, + wl->listen_int); + if (ret < 0) + return ret; + + ret = wl1251_cmd_ps_mode(wl, STATION_ACTIVE_MODE); + if (ret < 0) + return ret; + + wl->psm = 0; + break; + } + + return ret; +} + diff --git a/drivers/net/wireless/wl12xx/wl1251_ps.h b/drivers/net/wireless/wl12xx/wl1251_ps.h new file mode 100644 index 0000000..c688ac5 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1251_ps.h @@ -0,0 +1,37 @@ +#ifndef __WL1251_PS_H__ +#define __WL1251_PS_H__ + +/* + * This file is part of wl1251 + * + * Copyright (c) 1998-2007 Texas Instruments Incorporated + * Copyright (C) 2008 Nokia Corporation + * + * Contact: Kalle Valo + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include "wl1251.h" +#include "wl1251_acx.h" + +int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode); +void wl1251_ps_elp_sleep(struct wl1251 *wl); +int wl1251_ps_elp_wakeup(struct wl1251 *wl); +void wl1251_elp_work(struct work_struct *work); + + +#endif /* __WL1251_PS_H__ */ diff --git a/drivers/net/wireless/wl12xx/wl1251_reg.h b/drivers/net/wireless/wl12xx/wl1251_reg.h new file mode 100644 index 0000000..0ca3b43 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1251_reg.h @@ -0,0 +1,650 @@ +/* + * This file is part of wl12xx + * + * Copyright (c) 1998-2007 Texas Instruments Incorporated + * Copyright (C) 2008 Nokia Corporation + * + * Contact: Kalle Valo + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __REG_H__ +#define __REG_H__ + +#include + +#define REGISTERS_BASE 0x00300000 +#define DRPW_BASE 0x00310000 + +#define REGISTERS_DOWN_SIZE 0x00008800 +#define REGISTERS_WORK_SIZE 0x0000b000 + +#define HW_ACCESS_ELP_CTRL_REG_ADDR 0x1FFFC + +/* ELP register commands */ +#define ELPCTRL_WAKE_UP 0x1 +#define ELPCTRL_WAKE_UP_WLAN_READY 0x5 +#define ELPCTRL_SLEEP 0x0 +/* ELP WLAN_READY bit */ +#define ELPCTRL_WLAN_READY 0x2 + +/* Device Configuration registers*/ +#define SOR_CFG (REGISTERS_BASE + 0x0800) +#define ECPU_CTRL (REGISTERS_BASE + 0x0804) +#define HI_CFG (REGISTERS_BASE + 0x0808) +#define EE_START (REGISTERS_BASE + 0x080C) + +#define CHIP_ID_B (REGISTERS_BASE + 0x5674) + +#define CHIP_ID_1251_PG10 (0x7010101) +#define CHIP_ID_1251_PG11 (0x7020101) +#define CHIP_ID_1251_PG12 (0x7030101) + +#define ENABLE (REGISTERS_BASE + 0x5450) + +/* Power Management registers */ +#define ELP_CFG_MODE (REGISTERS_BASE + 0x5804) +#define ELP_CMD (REGISTERS_BASE + 0x5808) +#define PLL_CAL_TIME (REGISTERS_BASE + 0x5810) +#define CLK_REQ_TIME (REGISTERS_BASE + 0x5814) +#define CLK_BUF_TIME (REGISTERS_BASE + 0x5818) + +#define CFG_PLL_SYNC_CNT (REGISTERS_BASE + 0x5820) + +/* Scratch Pad registers*/ +#define SCR_PAD0 (REGISTERS_BASE + 0x5608) +#define SCR_PAD1 (REGISTERS_BASE + 0x560C) +#define SCR_PAD2 (REGISTERS_BASE + 0x5610) +#define SCR_PAD3 (REGISTERS_BASE + 0x5614) +#define SCR_PAD4 (REGISTERS_BASE + 0x5618) +#define SCR_PAD4_SET (REGISTERS_BASE + 0x561C) +#define SCR_PAD4_CLR (REGISTERS_BASE + 0x5620) +#define SCR_PAD5 (REGISTERS_BASE + 0x5624) +#define SCR_PAD5_SET (REGISTERS_BASE + 0x5628) +#define SCR_PAD5_CLR (REGISTERS_BASE + 0x562C) +#define SCR_PAD6 (REGISTERS_BASE + 0x5630) +#define SCR_PAD7 (REGISTERS_BASE + 0x5634) +#define SCR_PAD8 (REGISTERS_BASE + 0x5638) +#define SCR_PAD9 (REGISTERS_BASE + 0x563C) + +/* Spare registers*/ +#define SPARE_A1 (REGISTERS_BASE + 0x0994) +#define SPARE_A2 (REGISTERS_BASE + 0x0998) +#define SPARE_A3 (REGISTERS_BASE + 0x099C) +#define SPARE_A4 (REGISTERS_BASE + 0x09A0) +#define SPARE_A5 (REGISTERS_BASE + 0x09A4) +#define SPARE_A6 (REGISTERS_BASE + 0x09A8) +#define SPARE_A7 (REGISTERS_BASE + 0x09AC) +#define SPARE_A8 (REGISTERS_BASE + 0x09B0) +#define SPARE_B1 (REGISTERS_BASE + 0x5420) +#define SPARE_B2 (REGISTERS_BASE + 0x5424) +#define SPARE_B3 (REGISTERS_BASE + 0x5428) +#define SPARE_B4 (REGISTERS_BASE + 0x542C) +#define SPARE_B5 (REGISTERS_BASE + 0x5430) +#define SPARE_B6 (REGISTERS_BASE + 0x5434) +#define SPARE_B7 (REGISTERS_BASE + 0x5438) +#define SPARE_B8 (REGISTERS_BASE + 0x543C) + +enum wl12xx_acx_int_reg { + ACX_REG_INTERRUPT_TRIG, + ACX_REG_INTERRUPT_TRIG_H, + +/*============================================= + Host Interrupt Mask Register - 32bit (RW) + ------------------------------------------ + Setting a bit in this register masks the + corresponding interrupt to the host. + 0 - RX0 - Rx first dubble buffer Data Interrupt + 1 - TXD - Tx Data Interrupt + 2 - TXXFR - Tx Transfer Interrupt + 3 - RX1 - Rx second dubble buffer Data Interrupt + 4 - RXXFR - Rx Transfer Interrupt + 5 - EVENT_A - Event Mailbox interrupt + 6 - EVENT_B - Event Mailbox interrupt + 7 - WNONHST - Wake On Host Interrupt + 8 - TRACE_A - Debug Trace interrupt + 9 - TRACE_B - Debug Trace interrupt + 10 - CDCMP - Command Complete Interrupt + 11 - + 12 - + 13 - + 14 - ICOMP - Initialization Complete Interrupt + 16 - SG SE - Soft Gemini - Sense enable interrupt + 17 - SG SD - Soft Gemini - Sense disable interrupt + 18 - - + 19 - - + 20 - - + 21- - + Default: 0x0001 +*==============================================*/ + ACX_REG_INTERRUPT_MASK, + +/*============================================= + Host Interrupt Mask Set 16bit, (Write only) + ------------------------------------------ + Setting a bit in this register sets + the corresponding bin in ACX_HINT_MASK register + without effecting the mask + state of other bits (0 = no effect). +==============================================*/ + ACX_REG_HINT_MASK_SET, + +/*============================================= + Host Interrupt Mask Clear 16bit,(Write only) + ------------------------------------------ + Setting a bit in this register clears + the corresponding bin in ACX_HINT_MASK register + without effecting the mask + state of other bits (0 = no effect). +=============================================*/ + ACX_REG_HINT_MASK_CLR, + +/*============================================= + Host Interrupt Status Nondestructive Read + 16bit,(Read only) + ------------------------------------------ + The host can read this register to determine + which interrupts are active. + Reading this register doesn't + effect its content. +=============================================*/ + ACX_REG_INTERRUPT_NO_CLEAR, + +/*============================================= + Host Interrupt Status Clear on Read Register + 16bit,(Read only) + ------------------------------------------ + The host can read this register to determine + which interrupts are active. + Reading this register clears it, + thus making all interrupts inactive. +==============================================*/ + ACX_REG_INTERRUPT_CLEAR, + +/*============================================= + Host Interrupt Acknowledge Register + 16bit,(Write only) + ------------------------------------------ + The host can set individual bits in this + register to clear (acknowledge) the corresp. + interrupt status bits in the HINT_STS_CLR and + HINT_STS_ND registers, thus making the + assotiated interrupt inactive. (0-no effect) +==============================================*/ + ACX_REG_INTERRUPT_ACK, + +/*=============================================== + Host Software Reset - 32bit RW + ------------------------------------------ + [31:1] Reserved + 0 SOFT_RESET Soft Reset - When this bit is set, + it holds the Wlan hardware in a soft reset state. + This reset disables all MAC and baseband processor + clocks except the CardBus/PCI interface clock. + It also initializes all MAC state machines except + the host interface. It does not reload the + contents of the EEPROM. When this bit is cleared + (not self-clearing), the Wlan hardware + exits the software reset state. +===============================================*/ + ACX_REG_SLV_SOFT_RESET, + +/*=============================================== + EEPROM Burst Read Start - 32bit RW + ------------------------------------------ + [31:1] Reserved + 0 ACX_EE_START - EEPROM Burst Read Start 0 + Setting this bit starts a burst read from + the external EEPROM. + If this bit is set (after reset) before an EEPROM read/write, + the burst read starts at EEPROM address 0. + Otherwise, it starts at the address + following the address of the previous access. + TheWlan hardware hardware clears this bit automatically. + + Default: 0x00000000 +*================================================*/ + ACX_REG_EE_START, + +/* Embedded ARM CPU Control */ + +/*=============================================== + Halt eCPU - 32bit RW + ------------------------------------------ + 0 HALT_ECPU Halt Embedded CPU - This bit is the + compliment of bit 1 (MDATA2) in the SOR_CFG register. + During a hardware reset, this bit holds + the inverse of MDATA2. + When downloading firmware from the host, + set this bit (pull down MDATA2). + The host clears this bit after downloading the firmware into + zero-wait-state SSRAM. + When loading firmware from Flash, clear this bit (pull up MDATA2) + so that the eCPU can run the bootloader code in Flash + HALT_ECPU eCPU State + -------------------- + 1 halt eCPU + 0 enable eCPU + ===============================================*/ + ACX_REG_ECPU_CONTROL, + + ACX_REG_TABLE_LEN +}; + +#define ACX_SLV_SOFT_RESET_BIT BIT(0) +#define ACX_REG_EEPROM_START_BIT BIT(0) + +/* Command/Information Mailbox Pointers */ + +/*=============================================== + Command Mailbox Pointer - 32bit RW + ------------------------------------------ + This register holds the start address of + the command mailbox located in the Wlan hardware memory. + The host must read this pointer after a reset to + find the location of the command mailbox. + The Wlan hardware initializes the command mailbox + pointer with the default address of the command mailbox. + The command mailbox pointer is not valid until after + the host receives the Init Complete interrupt from + the Wlan hardware. + ===============================================*/ +#define REG_COMMAND_MAILBOX_PTR (SCR_PAD0) + +/*=============================================== + Information Mailbox Pointer - 32bit RW + ------------------------------------------ + This register holds the start address of + the information mailbox located in the Wlan hardware memory. + The host must read this pointer after a reset to find + the location of the information mailbox. + The Wlan hardware initializes the information mailbox pointer + with the default address of the information mailbox. + The information mailbox pointer is not valid + until after the host receives the Init Complete interrupt from + the Wlan hardware. + ===============================================*/ +#define REG_EVENT_MAILBOX_PTR (SCR_PAD1) + + +/* Misc */ + +#define REG_ENABLE_TX_RX (ENABLE) +/* + * Rx configuration (filter) information element + * --------------------------------------------- + */ +#define REG_RX_CONFIG (RX_CFG) +#define REG_RX_FILTER (RX_FILTER_CFG) + + +#define RX_CFG_ENABLE_PHY_HEADER_PLCP 0x0002 + +/* promiscuous - receives all valid frames */ +#define RX_CFG_PROMISCUOUS 0x0008 + +/* receives frames from any BSSID */ +#define RX_CFG_BSSID 0x0020 + +/* receives frames destined to any MAC address */ +#define RX_CFG_MAC 0x0010 + +#define RX_CFG_ENABLE_ONLY_MY_DEST_MAC 0x0010 +#define RX_CFG_ENABLE_ANY_DEST_MAC 0x0000 +#define RX_CFG_ENABLE_ONLY_MY_BSSID 0x0020 +#define RX_CFG_ENABLE_ANY_BSSID 0x0000 + +/* discards all broadcast frames */ +#define RX_CFG_DISABLE_BCAST 0x0200 + +#define RX_CFG_ENABLE_ONLY_MY_SSID 0x0400 +#define RX_CFG_ENABLE_RX_CMPLT_FCS_ERROR 0x0800 +#define RX_CFG_COPY_RX_STATUS 0x2000 +#define RX_CFG_TSF 0x10000 + +#define RX_CONFIG_OPTION_ANY_DST_MY_BSS (RX_CFG_ENABLE_ANY_DEST_MAC | \ + RX_CFG_ENABLE_ONLY_MY_BSSID) + +#define RX_CONFIG_OPTION_MY_DST_ANY_BSS (RX_CFG_ENABLE_ONLY_MY_DEST_MAC\ + | RX_CFG_ENABLE_ANY_BSSID) + +#define RX_CONFIG_OPTION_ANY_DST_ANY_BSS (RX_CFG_ENABLE_ANY_DEST_MAC | \ + RX_CFG_ENABLE_ANY_BSSID) + +#define RX_CONFIG_OPTION_MY_DST_MY_BSS (RX_CFG_ENABLE_ONLY_MY_DEST_MAC\ + | RX_CFG_ENABLE_ONLY_MY_BSSID) + +#define RX_CONFIG_OPTION_FOR_SCAN (RX_CFG_ENABLE_PHY_HEADER_PLCP \ + | RX_CFG_ENABLE_RX_CMPLT_FCS_ERROR \ + | RX_CFG_COPY_RX_STATUS | RX_CFG_TSF) + +#define RX_CONFIG_OPTION_FOR_MEASUREMENT (RX_CFG_ENABLE_ANY_DEST_MAC) + +#define RX_CONFIG_OPTION_FOR_JOIN (RX_CFG_ENABLE_ONLY_MY_BSSID | \ + RX_CFG_ENABLE_ONLY_MY_DEST_MAC) + +#define RX_CONFIG_OPTION_FOR_IBSS_JOIN (RX_CFG_ENABLE_ONLY_MY_SSID | \ + RX_CFG_ENABLE_ONLY_MY_DEST_MAC) + +#define RX_FILTER_OPTION_DEF (CFG_RX_MGMT_EN | CFG_RX_DATA_EN\ + | CFG_RX_CTL_EN | CFG_RX_BCN_EN\ + | CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN) + +#define RX_FILTER_OPTION_FILTER_ALL 0 + +#define RX_FILTER_OPTION_DEF_PRSP_BCN (CFG_RX_PRSP_EN | CFG_RX_MGMT_EN\ + | CFG_RX_RCTS_ACK | CFG_RX_BCN_EN) + +#define RX_FILTER_OPTION_JOIN (CFG_RX_MGMT_EN | CFG_RX_DATA_EN\ + | CFG_RX_BCN_EN | CFG_RX_AUTH_EN\ + | CFG_RX_ASSOC_EN | CFG_RX_RCTS_ACK\ + | CFG_RX_PRSP_EN) + + +/*=============================================== + EEPROM Read/Write Request 32bit RW + ------------------------------------------ + 1 EE_READ - EEPROM Read Request 1 - Setting this bit + loads a single byte of data into the EE_DATA + register from the EEPROM location specified in + the EE_ADDR register. + The Wlan hardware hardware clears this bit automatically. + EE_DATA is valid when this bit is cleared. + + 0 EE_WRITE - EEPROM Write Request - Setting this bit + writes a single byte of data from the EE_DATA register into the + EEPROM location specified in the EE_ADDR register. + The Wlan hardware hardware clears this bit automatically. +*===============================================*/ +#define EE_CTL (REGISTERS_BASE + 0x2000) +#define ACX_EE_CTL_REG EE_CTL +#define EE_WRITE 0x00000001ul +#define EE_READ 0x00000002ul + +/*=============================================== + EEPROM Address - 32bit RW + ------------------------------------------ + This register specifies the address + within the EEPROM from/to which to read/write data. + ===============================================*/ +#define EE_ADDR (REGISTERS_BASE + 0x2008) +#define ACX_EE_ADDR_REG EE_ADDR + +/*=============================================== + EEPROM Data - 32bit RW + ------------------------------------------ + This register either holds the read 8 bits of + data from the EEPROM or the write data + to be written to the EEPROM. + ===============================================*/ +#define EE_DATA (REGISTERS_BASE + 0x2004) +#define ACX_EE_DATA_REG EE_DATA + +#define EEPROM_ACCESS_TO 10000 /* timeout counter */ +#define START_EEPROM_MGR 0x00000001 + +/*=============================================== + EEPROM Base Address - 32bit RW + ------------------------------------------ + This register holds the upper nine bits + [23:15] of the 24-bit Wlan hardware memory + address for burst reads from EEPROM accesses. + The EEPROM provides the lower 15 bits of this address. + The MSB of the address from the EEPROM is ignored. + ===============================================*/ +#define ACX_EE_CFG EE_CFG + +/*=============================================== + GPIO Output Values -32bit, RW + ------------------------------------------ + [31:16] Reserved + [15: 0] Specify the output values (at the output driver inputs) for + GPIO[15:0], respectively. + ===============================================*/ +#define ACX_GPIO_OUT_REG GPIO_OUT +#define ACX_MAX_GPIO_LINES 15 + +/*=============================================== + Contention window -32bit, RW + ------------------------------------------ + [31:26] Reserved + [25:16] Max (0x3ff) + [15:07] Reserved + [06:00] Current contention window value - default is 0x1F + ===============================================*/ +#define ACX_CONT_WIND_CFG_REG CONT_WIND_CFG +#define ACX_CONT_WIND_MIN_MASK 0x0000007f +#define ACX_CONT_WIND_MAX 0x03ff0000 + +/*=============================================== + HI_CFG Interface Configuration Register Values + ------------------------------------------ + ===============================================*/ +#define HI_CFG_UART_ENABLE 0x00000004 +#define HI_CFG_RST232_ENABLE 0x00000008 +#define HI_CFG_CLOCK_REQ_SELECT 0x00000010 +#define HI_CFG_HOST_INT_ENABLE 0x00000020 +#define HI_CFG_VLYNQ_OUTPUT_ENABLE 0x00000040 +#define HI_CFG_HOST_INT_ACTIVE_LOW 0x00000080 +#define HI_CFG_UART_TX_OUT_GPIO_15 0x00000100 +#define HI_CFG_UART_TX_OUT_GPIO_14 0x00000200 +#define HI_CFG_UART_TX_OUT_GPIO_7 0x00000400 + +/* + * NOTE: USE_ACTIVE_HIGH compilation flag should be defined in makefile + * for platforms using active high interrupt level + */ +#ifdef USE_ACTIVE_HIGH +#define HI_CFG_DEF_VAL \ + (HI_CFG_UART_ENABLE | \ + HI_CFG_RST232_ENABLE | \ + HI_CFG_CLOCK_REQ_SELECT | \ + HI_CFG_HOST_INT_ENABLE) +#else +#define HI_CFG_DEF_VAL \ + (HI_CFG_UART_ENABLE | \ + HI_CFG_RST232_ENABLE | \ + HI_CFG_CLOCK_REQ_SELECT | \ + HI_CFG_HOST_INT_ENABLE) + +#endif + +#define REF_FREQ_19_2 0 +#define REF_FREQ_26_0 1 +#define REF_FREQ_38_4 2 +#define REF_FREQ_40_0 3 +#define REF_FREQ_33_6 4 +#define REF_FREQ_NUM 5 + +#define LUT_PARAM_INTEGER_DIVIDER 0 +#define LUT_PARAM_FRACTIONAL_DIVIDER 1 +#define LUT_PARAM_ATTN_BB 2 +#define LUT_PARAM_ALPHA_BB 3 +#define LUT_PARAM_STOP_TIME_BB 4 +#define LUT_PARAM_BB_PLL_LOOP_FILTER 5 +#define LUT_PARAM_NUM 6 + +#define ACX_EEPROMLESS_IND_REG (SCR_PAD4) +#define USE_EEPROM 0 +#define SOFT_RESET_MAX_TIME 1000000 +#define SOFT_RESET_STALL_TIME 1000 +#define NVS_DATA_BUNDARY_ALIGNMENT 4 + + +/* Firmware image load chunk size */ +#define CHUNK_SIZE 512 + +/* Firmware image header size */ +#define FW_HDR_SIZE 8 + +#define ECPU_CONTROL_HALT 0x00000101 + + +/****************************************************************************** + + CHANNELS, BAND & REG DOMAINS definitions + +******************************************************************************/ + + +enum { + RADIO_BAND_2_4GHZ = 0, /* 2.4 Ghz band */ + RADIO_BAND_5GHZ = 1, /* 5 Ghz band */ + RADIO_BAND_JAPAN_4_9_GHZ = 2, + DEFAULT_BAND = RADIO_BAND_2_4GHZ, + INVALID_BAND = 0xFE, + MAX_RADIO_BANDS = 0xFF +}; + +enum { + NO_RATE = 0, + RATE_1MBPS = 0x0A, + RATE_2MBPS = 0x14, + RATE_5_5MBPS = 0x37, + RATE_6MBPS = 0x0B, + RATE_9MBPS = 0x0F, + RATE_11MBPS = 0x6E, + RATE_12MBPS = 0x0A, + RATE_18MBPS = 0x0E, + RATE_22MBPS = 0xDC, + RATE_24MBPS = 0x09, + RATE_36MBPS = 0x0D, + RATE_48MBPS = 0x08, + RATE_54MBPS = 0x0C +}; + +enum { + RATE_INDEX_1MBPS = 0, + RATE_INDEX_2MBPS = 1, + RATE_INDEX_5_5MBPS = 2, + RATE_INDEX_6MBPS = 3, + RATE_INDEX_9MBPS = 4, + RATE_INDEX_11MBPS = 5, + RATE_INDEX_12MBPS = 6, + RATE_INDEX_18MBPS = 7, + RATE_INDEX_22MBPS = 8, + RATE_INDEX_24MBPS = 9, + RATE_INDEX_36MBPS = 10, + RATE_INDEX_48MBPS = 11, + RATE_INDEX_54MBPS = 12, + RATE_INDEX_MAX = RATE_INDEX_54MBPS, + MAX_RATE_INDEX, + INVALID_RATE_INDEX = MAX_RATE_INDEX, + RATE_INDEX_ENUM_MAX_SIZE = 0x7FFFFFFF +}; + +enum { + RATE_MASK_1MBPS = 0x1, + RATE_MASK_2MBPS = 0x2, + RATE_MASK_5_5MBPS = 0x4, + RATE_MASK_11MBPS = 0x20, +}; + +#define SHORT_PREAMBLE_BIT BIT(0) /* CCK or Barker depending on the rate */ +#define OFDM_RATE_BIT BIT(6) +#define PBCC_RATE_BIT BIT(7) + +enum { + CCK_LONG = 0, + CCK_SHORT = SHORT_PREAMBLE_BIT, + PBCC_LONG = PBCC_RATE_BIT, + PBCC_SHORT = PBCC_RATE_BIT | SHORT_PREAMBLE_BIT, + OFDM = OFDM_RATE_BIT +}; + +/****************************************************************************** + +Transmit-Descriptor RATE-SET field definitions... + +Define a new "Rate-Set" for TX path that incorporates the +Rate & Modulation info into a single 16-bit field. + +TxdRateSet_t: +b15 - Indicates Preamble type (1=SHORT, 0=LONG). + Notes: + Must be LONG (0) for 1Mbps rate. + Does not apply (set to 0) for RevG-OFDM rates. +b14 - Indicates PBCC encoding (1=PBCC, 0=not). + Notes: + Does not apply (set to 0) for rates 1 and 2 Mbps. + Does not apply (set to 0) for RevG-OFDM rates. +b13 - Unused (set to 0). +b12-b0 - Supported Rate indicator bits as defined below. + +******************************************************************************/ + + +/************************************************************************* + + Interrupt Trigger Register (Host -> WiLink) + +**************************************************************************/ + +/* Hardware to Embedded CPU Interrupts - first 32-bit register set */ + +/* + * Host Command Interrupt. Setting this bit masks + * the interrupt that the host issues to inform + * the FW that it has sent a command + * to the Wlan hardware Command Mailbox. + */ +#define INTR_TRIG_CMD BIT(0) + +/* + * Host Event Acknowlegde Interrupt. The host + * sets this bit to acknowledge that it received + * the unsolicited information from the event + * mailbox. + */ +#define INTR_TRIG_EVENT_ACK BIT(1) + +/* + * The host sets this bit to inform the Wlan + * FW that a TX packet is in the XFER + * Buffer #0. + */ +#define INTR_TRIG_TX_PROC0 BIT(2) + +/* + * The host sets this bit to inform the FW + * that it read a packet from RX XFER + * Buffer #0. + */ +#define INTR_TRIG_RX_PROC0 BIT(3) + +#define INTR_TRIG_DEBUG_ACK BIT(4) + +#define INTR_TRIG_STATE_CHANGED BIT(5) + + +/* Hardware to Embedded CPU Interrupts - second 32-bit register set */ + +/* + * The host sets this bit to inform the FW + * that it read a packet from RX XFER + * Buffer #1. + */ +#define INTR_TRIG_RX_PROC1 BIT(17) + +/* + * The host sets this bit to inform the Wlan + * hardware that a TX packet is in the XFER + * Buffer #1. + */ +#define INTR_TRIG_TX_PROC1 BIT(18) + +#endif diff --git a/drivers/net/wireless/wl12xx/wl1251_rx.c b/drivers/net/wireless/wl12xx/wl1251_rx.c new file mode 100644 index 0000000..b567322 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1251_rx.c @@ -0,0 +1,193 @@ +/* + * This file is part of wl1251 + * + * Copyright (c) 1998-2007 Texas Instruments Incorporated + * Copyright (C) 2008 Nokia Corporation + * + * Contact: Kalle Valo + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include +#include + +#include "wl1251.h" +#include "wl1251_reg.h" +#include "wl1251_io.h" +#include "wl1251_rx.h" +#include "wl1251_cmd.h" +#include "wl1251_acx.h" + +static void wl1251_rx_header(struct wl1251 *wl, + struct wl1251_rx_descriptor *desc) +{ + u32 rx_packet_ring_addr; + + rx_packet_ring_addr = wl->data_path->rx_packet_ring_addr; + if (wl->rx_current_buffer) + rx_packet_ring_addr += wl->data_path->rx_packet_ring_chunk_size; + + wl1251_mem_read(wl, rx_packet_ring_addr, desc, sizeof(*desc)); +} + +static void wl1251_rx_status(struct wl1251 *wl, + struct wl1251_rx_descriptor *desc, + struct ieee80211_rx_status *status, + u8 beacon) +{ + u64 mactime; + int ret; + + memset(status, 0, sizeof(struct ieee80211_rx_status)); + + status->band = IEEE80211_BAND_2GHZ; + status->mactime = desc->timestamp; + + /* + * The rx status timestamp is a 32 bits value while the TSF is a + * 64 bits one. + * For IBSS merging, TSF is mandatory, so we have to get it + * somehow, so we ask for ACX_TSF_INFO. + * That could be moved to the get_tsf() hook, but unfortunately, + * this one must be atomic, while our SPI routines can sleep. + */ + if ((wl->bss_type == BSS_TYPE_IBSS) && beacon) { + ret = wl1251_acx_tsf_info(wl, &mactime); + if (ret == 0) + status->mactime = mactime; + } + + status->signal = desc->rssi; + + /* + * FIXME: guessing that snr needs to be divided by two, otherwise + * the values don't make any sense + */ + status->noise = desc->rssi - desc->snr / 2; + + status->freq = ieee80211_channel_to_frequency(desc->channel); + + status->flag |= RX_FLAG_TSFT; + + if (desc->flags & RX_DESC_ENCRYPTION_MASK) { + status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED; + + if (likely(!(desc->flags & RX_DESC_DECRYPT_FAIL))) + status->flag |= RX_FLAG_DECRYPTED; + + if (unlikely(desc->flags & RX_DESC_MIC_FAIL)) + status->flag |= RX_FLAG_MMIC_ERROR; + } + + if (unlikely(!(desc->flags & RX_DESC_VALID_FCS))) + status->flag |= RX_FLAG_FAILED_FCS_CRC; + + + /* FIXME: set status->rate_idx */ +} + +static void wl1251_rx_body(struct wl1251 *wl, + struct wl1251_rx_descriptor *desc) +{ + struct sk_buff *skb; + struct ieee80211_rx_status status; + u8 *rx_buffer, beacon = 0; + u16 length, *fc; + u32 curr_id, last_id_inc, rx_packet_ring_addr; + + length = WL1251_RX_ALIGN(desc->length - PLCP_HEADER_LENGTH); + curr_id = (desc->flags & RX_DESC_SEQNUM_MASK) >> RX_DESC_PACKETID_SHIFT; + last_id_inc = (wl->rx_last_id + 1) % (RX_MAX_PACKET_ID + 1); + + if (last_id_inc != curr_id) { + wl1251_warning("curr ID:%d, last ID inc:%d", + curr_id, last_id_inc); + wl->rx_last_id = curr_id; + } else { + wl->rx_last_id = last_id_inc; + } + + rx_packet_ring_addr = wl->data_path->rx_packet_ring_addr + + sizeof(struct wl1251_rx_descriptor) + 20; + if (wl->rx_current_buffer) + rx_packet_ring_addr += wl->data_path->rx_packet_ring_chunk_size; + + skb = __dev_alloc_skb(length, GFP_KERNEL); + if (!skb) { + wl1251_error("Couldn't allocate RX frame"); + return; + } + + rx_buffer = skb_put(skb, length); + wl1251_mem_read(wl, rx_packet_ring_addr, rx_buffer, length); + + /* The actual lenght doesn't include the target's alignment */ + skb->len = desc->length - PLCP_HEADER_LENGTH; + + fc = (u16 *)skb->data; + + if ((*fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON) + beacon = 1; + + wl1251_rx_status(wl, desc, &status, beacon); + + wl1251_debug(DEBUG_RX, "rx skb 0x%p: %d B %s", skb, skb->len, + beacon ? "beacon" : ""); + + memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); + ieee80211_rx_ni(wl->hw, skb); +} + +static void wl1251_rx_ack(struct wl1251 *wl) +{ + u32 data, addr; + + if (wl->rx_current_buffer) { + addr = ACX_REG_INTERRUPT_TRIG_H; + data = INTR_TRIG_RX_PROC1; + } else { + addr = ACX_REG_INTERRUPT_TRIG; + data = INTR_TRIG_RX_PROC0; + } + + wl1251_reg_write32(wl, addr, data); + + /* Toggle buffer ring */ + wl->rx_current_buffer = !wl->rx_current_buffer; +} + + +void wl1251_rx(struct wl1251 *wl) +{ + struct wl1251_rx_descriptor *rx_desc; + + if (wl->state != WL1251_STATE_ON) + return; + + rx_desc = wl->rx_descriptor; + + /* We first read the frame's header */ + wl1251_rx_header(wl, rx_desc); + + /* Now we can read the body */ + wl1251_rx_body(wl, rx_desc); + + /* Finally, we need to ACK the RX */ + wl1251_rx_ack(wl); + + return; +} diff --git a/drivers/net/wireless/wl12xx/wl1251_rx.h b/drivers/net/wireless/wl12xx/wl1251_rx.h new file mode 100644 index 0000000..563a3fd --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1251_rx.h @@ -0,0 +1,124 @@ +/* + * This file is part of wl1251 + * + * Copyright (c) 1998-2007 Texas Instruments Incorporated + * Copyright (C) 2008 Nokia Corporation + * + * Contact: Kalle Valo + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __WL1251_RX_H__ +#define __WL1251_RX_H__ + +#include + +#include "wl1251.h" + +/* + * RX PATH + * + * The Rx path uses a double buffer and an rx_contro structure, each located + * at a fixed address in the device memory. The host keeps track of which + * buffer is available and alternates between them on a per packet basis. + * The size of each of the two buffers is large enough to hold the longest + * 802.3 packet. + * The RX path goes like that: + * 1) The target generates an interrupt each time a new packet is received. + * There are 2 RX interrupts, one for each buffer. + * 2) The host reads the received packet from one of the double buffers. + * 3) The host triggers a target interrupt. + * 4) The target prepares the next RX packet. + */ + +#define WL1251_RX_MAX_RSSI -30 +#define WL1251_RX_MIN_RSSI -95 + +#define WL1251_RX_ALIGN_TO 4 +#define WL1251_RX_ALIGN(len) (((len) + WL1251_RX_ALIGN_TO - 1) & \ + ~(WL1251_RX_ALIGN_TO - 1)) + +#define SHORT_PREAMBLE_BIT BIT(0) +#define OFDM_RATE_BIT BIT(6) +#define PBCC_RATE_BIT BIT(7) + +#define PLCP_HEADER_LENGTH 8 +#define RX_DESC_PACKETID_SHIFT 11 +#define RX_MAX_PACKET_ID 3 + +#define RX_DESC_VALID_FCS 0x0001 +#define RX_DESC_MATCH_RXADDR1 0x0002 +#define RX_DESC_MCAST 0x0004 +#define RX_DESC_STAINTIM 0x0008 +#define RX_DESC_VIRTUAL_BM 0x0010 +#define RX_DESC_BCAST 0x0020 +#define RX_DESC_MATCH_SSID 0x0040 +#define RX_DESC_MATCH_BSSID 0x0080 +#define RX_DESC_ENCRYPTION_MASK 0x0300 +#define RX_DESC_MEASURMENT 0x0400 +#define RX_DESC_SEQNUM_MASK 0x1800 +#define RX_DESC_MIC_FAIL 0x2000 +#define RX_DESC_DECRYPT_FAIL 0x4000 + +struct wl1251_rx_descriptor { + u32 timestamp; /* In microseconds */ + u16 length; /* Paylod length, including headers */ + u16 flags; + + /* + * 0 - 802.11 + * 1 - 802.3 + * 2 - IP + * 3 - Raw Codec + */ + u8 type; + + /* + * Received Rate: + * 0x0A - 1MBPS + * 0x14 - 2MBPS + * 0x37 - 5_5MBPS + * 0x0B - 6MBPS + * 0x0F - 9MBPS + * 0x6E - 11MBPS + * 0x0A - 12MBPS + * 0x0E - 18MBPS + * 0xDC - 22MBPS + * 0x09 - 24MBPS + * 0x0D - 36MBPS + * 0x08 - 48MBPS + * 0x0C - 54MBPS + */ + u8 rate; + + u8 mod_pre; /* Modulation and preamble */ + u8 channel; + + /* + * 0 - 2.4 Ghz + * 1 - 5 Ghz + */ + u8 band; + + s8 rssi; /* in dB */ + u8 rcpi; /* in dB */ + u8 snr; /* in dB */ +} __attribute__ ((packed)); + +void wl1251_rx(struct wl1251 *wl); + +#endif diff --git a/drivers/net/wireless/wl12xx/wl1251_sdio.c b/drivers/net/wireless/wl12xx/wl1251_sdio.c new file mode 100644 index 0000000..9423f22 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1251_sdio.c @@ -0,0 +1,205 @@ +/* + * wl12xx SDIO routines + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + * Copyright (C) 2005 Texas Instruments Incorporated + * Copyright (C) 2008 Google Inc + * Copyright (C) 2009 Bob Copeland (me@bobcopeland.com) + */ +#include +#include +#include +#include +#include +#include +#include + +#include "wl1251.h" +#include "wl12xx_80211.h" +#include "wl1251_reg.h" +#include "wl1251_ps.h" +#include "wl1251_io.h" +#include "wl1251_tx.h" +#include "wl1251_debugfs.h" + +#ifndef SDIO_VENDOR_ID_TI +#define SDIO_VENDOR_ID_TI 0x104c +#endif + +#ifndef SDIO_DEVICE_ID_TI_WL1251 +#define SDIO_DEVICE_ID_TI_WL1251 0x9066 +#endif + +static struct sdio_func *wl_to_func(struct wl1251 *wl) +{ + return wl->if_priv; +} + +static void wl1251_sdio_interrupt(struct sdio_func *func) +{ + struct wl1251 *wl = sdio_get_drvdata(func); + + wl1251_debug(DEBUG_IRQ, "IRQ"); + + /* FIXME should be synchronous for sdio */ + ieee80211_queue_work(wl->hw, &wl->irq_work); +} + +static const struct sdio_device_id wl1251_devices[] = { + { SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1251) }, + {} +}; +MODULE_DEVICE_TABLE(sdio, wl1251_devices); + + +void wl1251_sdio_read(struct wl1251 *wl, int addr, void *buf, size_t len) +{ + int ret; + struct sdio_func *func = wl_to_func(wl); + + sdio_claim_host(func); + ret = sdio_memcpy_fromio(func, buf, addr, len); + if (ret) + wl1251_error("sdio read failed (%d)", ret); + sdio_release_host(func); +} + +void wl1251_sdio_write(struct wl1251 *wl, int addr, void *buf, size_t len) +{ + int ret; + struct sdio_func *func = wl_to_func(wl); + + sdio_claim_host(func); + ret = sdio_memcpy_toio(func, addr, buf, len); + if (ret) + wl1251_error("sdio write failed (%d)", ret); + sdio_release_host(func); +} + +void wl1251_sdio_reset(struct wl1251 *wl) +{ +} + +static void wl1251_sdio_enable_irq(struct wl1251 *wl) +{ + struct sdio_func *func = wl_to_func(wl); + + sdio_claim_host(func); + sdio_claim_irq(func, wl1251_sdio_interrupt); + sdio_release_host(func); +} + +static void wl1251_sdio_disable_irq(struct wl1251 *wl) +{ + struct sdio_func *func = wl_to_func(wl); + + sdio_claim_host(func); + sdio_release_irq(func); + sdio_release_host(func); +} + +void wl1251_sdio_set_power(bool enable) +{ +} + +struct wl1251_if_operations wl1251_sdio_ops = { + .read = wl1251_sdio_read, + .write = wl1251_sdio_write, + .reset = wl1251_sdio_reset, + .enable_irq = wl1251_sdio_enable_irq, + .disable_irq = wl1251_sdio_disable_irq, +}; + +int wl1251_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) +{ + int ret; + struct wl1251 *wl; + struct ieee80211_hw *hw; + + hw = wl1251_alloc_hw(); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + wl = hw->priv; + + sdio_claim_host(func); + ret = sdio_enable_func(func); + if (ret) + goto release; + + sdio_set_block_size(func, 512); + + SET_IEEE80211_DEV(hw, &func->dev); + wl->if_priv = func; + wl->if_ops = &wl1251_sdio_ops; + wl->set_power = wl1251_sdio_set_power; + + sdio_release_host(func); + ret = wl1251_init_ieee80211(wl); + if (ret) + goto disable; + + sdio_set_drvdata(func, wl); + return ret; + +disable: + sdio_claim_host(func); + sdio_disable_func(func); +release: + sdio_release_host(func); + return ret; +} + +static void __devexit wl1251_sdio_remove(struct sdio_func *func) +{ + struct wl1251 *wl = sdio_get_drvdata(func); + + wl1251_free_hw(wl); + + sdio_claim_host(func); + sdio_release_irq(func); + sdio_disable_func(func); + sdio_release_host(func); +} + +static struct sdio_driver wl1251_sdio_driver = { + .name = "wl1251_sdio", + .id_table = wl1251_devices, + .probe = wl1251_sdio_probe, + .remove = __devexit_p(wl1251_sdio_remove), +}; + +static int __init wl1251_sdio_init(void) +{ + int err; + + err = sdio_register_driver(&wl1251_sdio_driver); + if (err) + wl1251_error("failed to register sdio driver: %d", err); + return err; +} + +static void __exit wl1251_sdio_exit(void) +{ + sdio_unregister_driver(&wl1251_sdio_driver); + wl1251_notice("unloaded"); +} + +module_init(wl1251_sdio_init); +module_exit(wl1251_sdio_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Kalle Valo "); diff --git a/drivers/net/wireless/wl12xx/wl1251_spi.c b/drivers/net/wireless/wl12xx/wl1251_spi.c new file mode 100644 index 0000000..7d966be --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1251_spi.c @@ -0,0 +1,349 @@ +/* + * This file is part of wl1251 + * + * Copyright (C) 2008 Nokia Corporation + * + * Contact: Kalle Valo + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include +#include +#include +#if (LINUX_VERSION_CODE == KERNEL_VERSION(2,6,28)) +#include +#endif +#include +#include + +#include "wl1251.h" +#include "wl1251_reg.h" +#include "wl1251_spi.h" + +static irqreturn_t wl1251_irq(int irq, void *cookie) +{ + struct wl1251 *wl; + + wl1251_debug(DEBUG_IRQ, "IRQ"); + + wl = cookie; + + ieee80211_queue_work(wl->hw, &wl->irq_work); + + return IRQ_HANDLED; +} + +static struct spi_device *wl_to_spi(struct wl1251 *wl) +{ + return wl->if_priv; +} + +static void wl1251_spi_reset(struct wl1251 *wl) +{ + u8 *cmd; + struct spi_transfer t; + struct spi_message m; + + cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL); + if (!cmd) { + wl1251_error("could not allocate cmd for spi reset"); + return; + } + + memset(&t, 0, sizeof(t)); + spi_message_init(&m); + + memset(cmd, 0xff, WSPI_INIT_CMD_LEN); + + t.tx_buf = cmd; + t.len = WSPI_INIT_CMD_LEN; + spi_message_add_tail(&t, &m); + + spi_sync(wl_to_spi(wl), &m); + + wl1251_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN); +} + +static void wl1251_spi_wake(struct wl1251 *wl) +{ + u8 crc[WSPI_INIT_CMD_CRC_LEN], *cmd; + struct spi_transfer t; + struct spi_message m; + + cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL); + if (!cmd) { + wl1251_error("could not allocate cmd for spi init"); + return; + } + + memset(crc, 0, sizeof(crc)); + memset(&t, 0, sizeof(t)); + spi_message_init(&m); + + /* + * Set WSPI_INIT_COMMAND + * the data is being send from the MSB to LSB + */ + cmd[2] = 0xff; + cmd[3] = 0xff; + cmd[1] = WSPI_INIT_CMD_START | WSPI_INIT_CMD_TX; + cmd[0] = 0; + cmd[7] = 0; + cmd[6] |= HW_ACCESS_WSPI_INIT_CMD_MASK << 3; + cmd[6] |= HW_ACCESS_WSPI_FIXED_BUSY_LEN & WSPI_INIT_CMD_FIXEDBUSY_LEN; + + if (HW_ACCESS_WSPI_FIXED_BUSY_LEN == 0) + cmd[5] |= WSPI_INIT_CMD_DIS_FIXEDBUSY; + else + cmd[5] |= WSPI_INIT_CMD_EN_FIXEDBUSY; + + cmd[5] |= WSPI_INIT_CMD_IOD | WSPI_INIT_CMD_IP | WSPI_INIT_CMD_CS + | WSPI_INIT_CMD_WSPI | WSPI_INIT_CMD_WS; + + crc[0] = cmd[1]; + crc[1] = cmd[0]; + crc[2] = cmd[7]; + crc[3] = cmd[6]; + crc[4] = cmd[5]; + + cmd[4] |= crc7(0, crc, WSPI_INIT_CMD_CRC_LEN) << 1; + cmd[4] |= WSPI_INIT_CMD_END; + + t.tx_buf = cmd; + t.len = WSPI_INIT_CMD_LEN; + spi_message_add_tail(&t, &m); + + spi_sync(wl_to_spi(wl), &m); + + wl1251_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN); +} + +static void wl1251_spi_reset_wake(struct wl1251 *wl) +{ + wl1251_spi_reset(wl); + wl1251_spi_wake(wl); +} + +static void wl1251_spi_read(struct wl1251 *wl, int addr, void *buf, + size_t len) +{ + struct spi_transfer t[3]; + struct spi_message m; + u8 *busy_buf; + u32 *cmd; + + cmd = &wl->buffer_cmd; + busy_buf = wl->buffer_busyword; + + *cmd = 0; + *cmd |= WSPI_CMD_READ; + *cmd |= (len << WSPI_CMD_BYTE_LENGTH_OFFSET) & WSPI_CMD_BYTE_LENGTH; + *cmd |= addr & WSPI_CMD_BYTE_ADDR; + + spi_message_init(&m); + memset(t, 0, sizeof(t)); + + t[0].tx_buf = cmd; + t[0].len = 4; + spi_message_add_tail(&t[0], &m); + + /* Busy and non busy words read */ + t[1].rx_buf = busy_buf; + t[1].len = WL1251_BUSY_WORD_LEN; + spi_message_add_tail(&t[1], &m); + + t[2].rx_buf = buf; + t[2].len = len; + spi_message_add_tail(&t[2], &m); + + spi_sync(wl_to_spi(wl), &m); + + /* FIXME: check busy words */ + + wl1251_dump(DEBUG_SPI, "spi_read cmd -> ", cmd, sizeof(*cmd)); + wl1251_dump(DEBUG_SPI, "spi_read buf <- ", buf, len); +} + +static void wl1251_spi_write(struct wl1251 *wl, int addr, void *buf, + size_t len) +{ + struct spi_transfer t[2]; + struct spi_message m; + u32 *cmd; + + cmd = &wl->buffer_cmd; + + *cmd = 0; + *cmd |= WSPI_CMD_WRITE; + *cmd |= (len << WSPI_CMD_BYTE_LENGTH_OFFSET) & WSPI_CMD_BYTE_LENGTH; + *cmd |= addr & WSPI_CMD_BYTE_ADDR; + + spi_message_init(&m); + memset(t, 0, sizeof(t)); + + t[0].tx_buf = cmd; + t[0].len = sizeof(*cmd); + spi_message_add_tail(&t[0], &m); + + t[1].tx_buf = buf; + t[1].len = len; + spi_message_add_tail(&t[1], &m); + + spi_sync(wl_to_spi(wl), &m); + + wl1251_dump(DEBUG_SPI, "spi_write cmd -> ", cmd, sizeof(*cmd)); + wl1251_dump(DEBUG_SPI, "spi_write buf -> ", buf, len); +} + +static void wl1251_spi_enable_irq(struct wl1251 *wl) +{ + return enable_irq(wl->irq); +} + +static void wl1251_spi_disable_irq(struct wl1251 *wl) +{ + return disable_irq(wl->irq); +} + +static const struct wl1251_if_operations wl1251_spi_ops = { + .read = wl1251_spi_read, + .write = wl1251_spi_write, + .reset = wl1251_spi_reset_wake, + .enable_irq = wl1251_spi_enable_irq, + .disable_irq = wl1251_spi_disable_irq, +}; + +static int __devinit wl1251_spi_probe(struct spi_device *spi) +{ + struct wl12xx_platform_data *pdata; + struct ieee80211_hw *hw; + struct wl1251 *wl; + int ret; + + pdata = spi->dev.platform_data; + if (!pdata) { + wl1251_error("no platform data"); + return -ENODEV; + } + + hw = wl1251_alloc_hw(); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + wl = hw->priv; + + SET_IEEE80211_DEV(hw, &spi->dev); + dev_set_drvdata(&spi->dev, wl); + wl->if_priv = spi; + wl->if_ops = &wl1251_spi_ops; + + /* This is the only SPI value that we need to set here, the rest + * comes from the board-peripherals file */ + spi->bits_per_word = 32; + + ret = spi_setup(spi); + if (ret < 0) { + wl1251_error("spi_setup failed"); + goto out_free; + } + + wl->set_power = pdata->set_power; + if (!wl->set_power) { + wl1251_error("set power function missing in platform data"); + return -ENODEV; + } + + wl->irq = spi->irq; + if (wl->irq < 0) { + wl1251_error("irq missing in platform data"); + return -ENODEV; + } + + wl->use_eeprom = pdata->use_eeprom; + + ret = request_irq(wl->irq, wl1251_irq, 0, DRIVER_NAME, wl); + if (ret < 0) { + wl1251_error("request_irq() failed: %d", ret); + goto out_free; + } + + set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); + + disable_irq(wl->irq); + + ret = wl1251_init_ieee80211(wl); + if (ret) + goto out_irq; + + return 0; + + out_irq: + free_irq(wl->irq, wl); + + out_free: + ieee80211_free_hw(hw); + + return ret; +} + +static int __devexit wl1251_spi_remove(struct spi_device *spi) +{ + struct wl1251 *wl = dev_get_drvdata(&spi->dev); + + free_irq(wl->irq, wl); + wl1251_free_hw(wl); + + return 0; +} + +static struct spi_driver wl1251_spi_driver = { + .driver = { + .name = "wl1251", + .bus = &spi_bus_type, + .owner = THIS_MODULE, + }, + + .probe = wl1251_spi_probe, + .remove = __devexit_p(wl1251_spi_remove), +}; + +static int __init wl1251_spi_init(void) +{ + int ret; + + ret = spi_register_driver(&wl1251_spi_driver); + if (ret < 0) { + wl1251_error("failed to register spi driver: %d", ret); + goto out; + } + +out: + return ret; +} + +static void __exit wl1251_spi_exit(void) +{ + spi_unregister_driver(&wl1251_spi_driver); + + wl1251_notice("unloaded"); +} + +module_init(wl1251_spi_init); +module_exit(wl1251_spi_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Kalle Valo "); diff --git a/drivers/net/wireless/wl12xx/wl1251_spi.h b/drivers/net/wireless/wl12xx/wl1251_spi.h new file mode 100644 index 0000000..2e273a9 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1251_spi.h @@ -0,0 +1,61 @@ +/* + * This file is part of wl1251 + * + * Copyright (c) 1998-2007 Texas Instruments Incorporated + * Copyright (C) 2008 Nokia Corporation + * + * Contact: Kalle Valo + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __WL1251_SPI_H__ +#define __WL1251_SPI_H__ + +#include "wl1251_cmd.h" +#include "wl1251_acx.h" +#include "wl1251_reg.h" + +#define WSPI_CMD_READ 0x40000000 +#define WSPI_CMD_WRITE 0x00000000 +#define WSPI_CMD_FIXED 0x20000000 +#define WSPI_CMD_BYTE_LENGTH 0x1FFE0000 +#define WSPI_CMD_BYTE_LENGTH_OFFSET 17 +#define WSPI_CMD_BYTE_ADDR 0x0001FFFF + +#define WSPI_INIT_CMD_CRC_LEN 5 + +#define WSPI_INIT_CMD_START 0x00 +#define WSPI_INIT_CMD_TX 0x40 +/* the extra bypass bit is sampled by the TNET as '1' */ +#define WSPI_INIT_CMD_BYPASS_BIT 0x80 +#define WSPI_INIT_CMD_FIXEDBUSY_LEN 0x07 +#define WSPI_INIT_CMD_EN_FIXEDBUSY 0x80 +#define WSPI_INIT_CMD_DIS_FIXEDBUSY 0x00 +#define WSPI_INIT_CMD_IOD 0x40 +#define WSPI_INIT_CMD_IP 0x20 +#define WSPI_INIT_CMD_CS 0x10 +#define WSPI_INIT_CMD_WS 0x08 +#define WSPI_INIT_CMD_WSPI 0x01 +#define WSPI_INIT_CMD_END 0x01 + +#define WSPI_INIT_CMD_LEN 8 + +#define HW_ACCESS_WSPI_FIXED_BUSY_LEN \ + ((WL1251_BUSY_WORD_LEN - 4) / sizeof(u32)) +#define HW_ACCESS_WSPI_INIT_CMD_MASK 0 + +#endif /* __WL1251_SPI_H__ */ diff --git a/drivers/net/wireless/wl12xx/wl1251_tx.c b/drivers/net/wireless/wl12xx/wl1251_tx.c new file mode 100644 index 0000000..c822318 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1251_tx.c @@ -0,0 +1,560 @@ +/* + * This file is part of wl1251 + * + * Copyright (c) 1998-2007 Texas Instruments Incorporated + * Copyright (C) 2008 Nokia Corporation + * + * Contact: Kalle Valo + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include +#include + +#include "wl1251.h" +#include "wl1251_reg.h" +#include "wl1251_tx.h" +#include "wl1251_ps.h" +#include "wl1251_io.h" + +static bool wl1251_tx_double_buffer_busy(struct wl1251 *wl, u32 data_out_count) +{ + int used, data_in_count; + + data_in_count = wl->data_in_count; + + if (data_in_count < data_out_count) + /* data_in_count has wrapped */ + data_in_count += TX_STATUS_DATA_OUT_COUNT_MASK + 1; + + used = data_in_count - data_out_count; + + WARN_ON(used < 0); + WARN_ON(used > DP_TX_PACKET_RING_CHUNK_NUM); + + if (used >= DP_TX_PACKET_RING_CHUNK_NUM) + return true; + else + return false; +} + +static int wl1251_tx_path_status(struct wl1251 *wl) +{ + u32 status, addr, data_out_count; + bool busy; + + addr = wl->data_path->tx_control_addr; + status = wl1251_mem_read32(wl, addr); + data_out_count = status & TX_STATUS_DATA_OUT_COUNT_MASK; + busy = wl1251_tx_double_buffer_busy(wl, data_out_count); + + if (busy) + return -EBUSY; + + return 0; +} + +static int wl1251_tx_id(struct wl1251 *wl, struct sk_buff *skb) +{ + int i; + + for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++) + if (wl->tx_frames[i] == NULL) { + wl->tx_frames[i] = skb; + return i; + } + + return -EBUSY; +} + +static void wl1251_tx_control(struct tx_double_buffer_desc *tx_hdr, + struct ieee80211_tx_info *control, u16 fc) +{ + *(u16 *)&tx_hdr->control = 0; + + tx_hdr->control.rate_policy = 0; + + /* 802.11 packets */ + tx_hdr->control.packet_type = 0; + + if (control->flags & IEEE80211_TX_CTL_NO_ACK) + tx_hdr->control.ack_policy = 1; + + tx_hdr->control.tx_complete = 1; + + if ((fc & IEEE80211_FTYPE_DATA) && + ((fc & IEEE80211_STYPE_QOS_DATA) || + (fc & IEEE80211_STYPE_QOS_NULLFUNC))) + tx_hdr->control.qos = 1; +} + +/* RSN + MIC = 8 + 8 = 16 bytes (worst case - AES). */ +#define MAX_MSDU_SECURITY_LENGTH 16 +#define MAX_MPDU_SECURITY_LENGTH 16 +#define WLAN_QOS_HDR_LEN 26 +#define MAX_MPDU_HEADER_AND_SECURITY (MAX_MPDU_SECURITY_LENGTH + \ + WLAN_QOS_HDR_LEN) +#define HW_BLOCK_SIZE 252 +static void wl1251_tx_frag_block_num(struct tx_double_buffer_desc *tx_hdr) +{ + u16 payload_len, frag_threshold, mem_blocks; + u16 num_mpdus, mem_blocks_per_frag; + + frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD; + tx_hdr->frag_threshold = cpu_to_le16(frag_threshold); + + payload_len = tx_hdr->length + MAX_MSDU_SECURITY_LENGTH; + + if (payload_len > frag_threshold) { + mem_blocks_per_frag = + ((frag_threshold + MAX_MPDU_HEADER_AND_SECURITY) / + HW_BLOCK_SIZE) + 1; + num_mpdus = payload_len / frag_threshold; + mem_blocks = num_mpdus * mem_blocks_per_frag; + payload_len -= num_mpdus * frag_threshold; + num_mpdus++; + + } else { + mem_blocks_per_frag = 0; + mem_blocks = 0; + num_mpdus = 1; + } + + mem_blocks += (payload_len / HW_BLOCK_SIZE) + 1; + + if (num_mpdus > 1) + mem_blocks += min(num_mpdus, mem_blocks_per_frag); + + tx_hdr->num_mem_blocks = mem_blocks; +} + +static int wl1251_tx_fill_hdr(struct wl1251 *wl, struct sk_buff *skb, + struct ieee80211_tx_info *control) +{ + struct tx_double_buffer_desc *tx_hdr; + struct ieee80211_rate *rate; + int id; + u16 fc; + + if (!skb) + return -EINVAL; + + id = wl1251_tx_id(wl, skb); + if (id < 0) + return id; + + fc = *(u16 *)skb->data; + tx_hdr = (struct tx_double_buffer_desc *) skb_push(skb, + sizeof(*tx_hdr)); + + tx_hdr->length = cpu_to_le16(skb->len - sizeof(*tx_hdr)); + rate = ieee80211_get_tx_rate(wl->hw, control); + tx_hdr->rate = cpu_to_le16(rate->hw_value); + tx_hdr->expiry_time = cpu_to_le32(1 << 16); + tx_hdr->id = id; + + tx_hdr->xmit_queue = wl1251_tx_get_queue(skb_get_queue_mapping(skb)); + + wl1251_tx_control(tx_hdr, control, fc); + wl1251_tx_frag_block_num(tx_hdr); + + return 0; +} + +/* We copy the packet to the target */ +static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb, + struct ieee80211_tx_info *control) +{ + struct tx_double_buffer_desc *tx_hdr; + int len; + u32 addr; + + if (!skb) + return -EINVAL; + + tx_hdr = (struct tx_double_buffer_desc *) skb->data; + + if (control->control.hw_key && + control->control.hw_key->alg == ALG_TKIP) { + int hdrlen; + u16 fc; + u8 *pos; + + fc = *(u16 *)(skb->data + sizeof(*tx_hdr)); + tx_hdr->length += WL1251_TKIP_IV_SPACE; + + hdrlen = ieee80211_hdrlen(fc); + + pos = skb_push(skb, WL1251_TKIP_IV_SPACE); + memmove(pos, pos + WL1251_TKIP_IV_SPACE, + sizeof(*tx_hdr) + hdrlen); + } + + /* Revisit. This is a workaround for getting non-aligned packets. + This happens at least with EAPOL packets from the user space. + Our DMA requires packets to be aligned on a 4-byte boundary. + */ + if (unlikely((long)skb->data & 0x03)) { + int offset = (4 - (long)skb->data) & 0x03; + wl1251_debug(DEBUG_TX, "skb offset %d", offset); + + /* check whether the current skb can be used */ + if (!skb_cloned(skb) && (skb_tailroom(skb) >= offset)) { + unsigned char *src = skb->data; + + /* align the buffer on a 4-byte boundary */ + skb_reserve(skb, offset); + memmove(skb->data, src, skb->len); + tx_hdr = (struct tx_double_buffer_desc *) skb->data; + } else { + wl1251_info("No handler, fixme!"); + return -EINVAL; + } + } + + /* Our skb->data at this point includes the HW header */ + len = WL1251_TX_ALIGN(skb->len); + + if (wl->data_in_count & 0x1) + addr = wl->data_path->tx_packet_ring_addr + + wl->data_path->tx_packet_ring_chunk_size; + else + addr = wl->data_path->tx_packet_ring_addr; + + wl1251_mem_write(wl, addr, skb->data, len); + + wl1251_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u rate 0x%x " + "queue %d", tx_hdr->id, skb, tx_hdr->length, + tx_hdr->rate, tx_hdr->xmit_queue); + + return 0; +} + +static void wl1251_tx_trigger(struct wl1251 *wl) +{ + u32 data, addr; + + if (wl->data_in_count & 0x1) { + addr = ACX_REG_INTERRUPT_TRIG_H; + data = INTR_TRIG_TX_PROC1; + } else { + addr = ACX_REG_INTERRUPT_TRIG; + data = INTR_TRIG_TX_PROC0; + } + + wl1251_reg_write32(wl, addr, data); + + /* Bumping data in */ + wl->data_in_count = (wl->data_in_count + 1) & + TX_STATUS_DATA_OUT_COUNT_MASK; +} + +/* caller must hold wl->mutex */ +static int wl1251_tx_frame(struct wl1251 *wl, struct sk_buff *skb) +{ + struct ieee80211_tx_info *info; + int ret = 0; + u8 idx; + + info = IEEE80211_SKB_CB(skb); + + if (info->control.hw_key) { + idx = info->control.hw_key->hw_key_idx; + if (unlikely(wl->default_key != idx)) { + ret = wl1251_acx_default_key(wl, idx); + if (ret < 0) + return ret; + } + } + + ret = wl1251_tx_path_status(wl); + if (ret < 0) + return ret; + + ret = wl1251_tx_fill_hdr(wl, skb, info); + if (ret < 0) + return ret; + + ret = wl1251_tx_send_packet(wl, skb, info); + if (ret < 0) + return ret; + + wl1251_tx_trigger(wl); + + return ret; +} + +void wl1251_tx_work(struct work_struct *work) +{ + struct wl1251 *wl = container_of(work, struct wl1251, tx_work); + struct sk_buff *skb; + bool woken_up = false; + int ret; + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state == WL1251_STATE_OFF)) + goto out; + + while ((skb = skb_dequeue(&wl->tx_queue))) { + if (!woken_up) { + ret = wl1251_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + woken_up = true; + } + + ret = wl1251_tx_frame(wl, skb); + if (ret == -EBUSY) { + /* firmware buffer is full, stop queues */ + wl1251_debug(DEBUG_TX, "tx_work: fw buffer full, " + "stop queues"); + ieee80211_stop_queues(wl->hw); + wl->tx_queue_stopped = true; + skb_queue_head(&wl->tx_queue, skb); + goto out; + } else if (ret < 0) { + dev_kfree_skb(skb); + goto out; + } + } + +out: + if (woken_up) + wl1251_ps_elp_sleep(wl); + + mutex_unlock(&wl->mutex); +} + +static const char *wl1251_tx_parse_status(u8 status) +{ + /* 8 bit status field, one character per bit plus null */ + static char buf[9]; + int i = 0; + + memset(buf, 0, sizeof(buf)); + + if (status & TX_DMA_ERROR) + buf[i++] = 'm'; + if (status & TX_DISABLED) + buf[i++] = 'd'; + if (status & TX_RETRY_EXCEEDED) + buf[i++] = 'r'; + if (status & TX_TIMEOUT) + buf[i++] = 't'; + if (status & TX_KEY_NOT_FOUND) + buf[i++] = 'k'; + if (status & TX_ENCRYPT_FAIL) + buf[i++] = 'e'; + if (status & TX_UNAVAILABLE_PRIORITY) + buf[i++] = 'p'; + + /* bit 0 is unused apparently */ + + return buf; +} + +static void wl1251_tx_packet_cb(struct wl1251 *wl, + struct tx_result *result) +{ + struct ieee80211_tx_info *info; + struct sk_buff *skb; + int hdrlen, ret; + u8 *frame; + + skb = wl->tx_frames[result->id]; + if (skb == NULL) { + wl1251_error("SKB for packet %d is NULL", result->id); + return; + } + + info = IEEE80211_SKB_CB(skb); + + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) && + (result->status == TX_SUCCESS)) + info->flags |= IEEE80211_TX_STAT_ACK; + + info->status.rates[0].count = result->ack_failures + 1; + wl->stats.retry_count += result->ack_failures; + + /* + * We have to remove our private TX header before pushing + * the skb back to mac80211. + */ + frame = skb_pull(skb, sizeof(struct tx_double_buffer_desc)); + if (info->control.hw_key && + info->control.hw_key->alg == ALG_TKIP) { + hdrlen = ieee80211_get_hdrlen_from_skb(skb); + memmove(frame + WL1251_TKIP_IV_SPACE, frame, hdrlen); + skb_pull(skb, WL1251_TKIP_IV_SPACE); + } + + wl1251_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x" + " status 0x%x (%s)", + result->id, skb, result->ack_failures, result->rate, + result->status, wl1251_tx_parse_status(result->status)); + + + ieee80211_tx_status(wl->hw, skb); + + wl->tx_frames[result->id] = NULL; + + if (wl->tx_queue_stopped) { + wl1251_debug(DEBUG_TX, "cb: queue was stopped"); + + skb = skb_dequeue(&wl->tx_queue); + + /* The skb can be NULL because tx_work might have been + scheduled before the queue was stopped making the + queue empty */ + + if (skb) { + ret = wl1251_tx_frame(wl, skb); + if (ret == -EBUSY) { + /* firmware buffer is still full */ + wl1251_debug(DEBUG_TX, "cb: fw buffer " + "still full"); + skb_queue_head(&wl->tx_queue, skb); + return; + } else if (ret < 0) { + dev_kfree_skb(skb); + return; + } + } + + wl1251_debug(DEBUG_TX, "cb: waking queues"); + ieee80211_wake_queues(wl->hw); + wl->tx_queue_stopped = false; + } +} + +/* Called upon reception of a TX complete interrupt */ +void wl1251_tx_complete(struct wl1251 *wl) +{ + int i, result_index, num_complete = 0; + struct tx_result result[FW_TX_CMPLT_BLOCK_SIZE], *result_ptr; + + if (unlikely(wl->state != WL1251_STATE_ON)) + return; + + /* First we read the result */ + wl1251_mem_read(wl, wl->data_path->tx_complete_addr, + result, sizeof(result)); + + result_index = wl->next_tx_complete; + + for (i = 0; i < ARRAY_SIZE(result); i++) { + result_ptr = &result[result_index]; + + if (result_ptr->done_1 == 1 && + result_ptr->done_2 == 1) { + wl1251_tx_packet_cb(wl, result_ptr); + + result_ptr->done_1 = 0; + result_ptr->done_2 = 0; + + result_index = (result_index + 1) & + (FW_TX_CMPLT_BLOCK_SIZE - 1); + num_complete++; + } else { + break; + } + } + + /* Every completed frame needs to be acknowledged */ + if (num_complete) { + /* + * If we've wrapped, we have to clear + * the results in 2 steps. + */ + if (result_index > wl->next_tx_complete) { + /* Only 1 write is needed */ + wl1251_mem_write(wl, + wl->data_path->tx_complete_addr + + (wl->next_tx_complete * + sizeof(struct tx_result)), + &result[wl->next_tx_complete], + num_complete * + sizeof(struct tx_result)); + + + } else if (result_index < wl->next_tx_complete) { + /* 2 writes are needed */ + wl1251_mem_write(wl, + wl->data_path->tx_complete_addr + + (wl->next_tx_complete * + sizeof(struct tx_result)), + &result[wl->next_tx_complete], + (FW_TX_CMPLT_BLOCK_SIZE - + wl->next_tx_complete) * + sizeof(struct tx_result)); + + wl1251_mem_write(wl, + wl->data_path->tx_complete_addr, + result, + (num_complete - + FW_TX_CMPLT_BLOCK_SIZE + + wl->next_tx_complete) * + sizeof(struct tx_result)); + + } else { + /* We have to write the whole array */ + wl1251_mem_write(wl, + wl->data_path->tx_complete_addr, + result, + FW_TX_CMPLT_BLOCK_SIZE * + sizeof(struct tx_result)); + } + + } + + wl->next_tx_complete = result_index; +} + +/* caller must hold wl->mutex */ +void wl1251_tx_flush(struct wl1251 *wl) +{ + int i; + struct sk_buff *skb; + struct ieee80211_tx_info *info; + + /* TX failure */ +/* control->flags = 0; FIXME */ + + while ((skb = skb_dequeue(&wl->tx_queue))) { + info = IEEE80211_SKB_CB(skb); + + wl1251_debug(DEBUG_TX, "flushing skb 0x%p", skb); + + if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) + continue; + + ieee80211_tx_status(wl->hw, skb); + } + + for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++) + if (wl->tx_frames[i] != NULL) { + skb = wl->tx_frames[i]; + info = IEEE80211_SKB_CB(skb); + + if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) + continue; + + ieee80211_tx_status(wl->hw, skb); + wl->tx_frames[i] = NULL; + } +} diff --git a/drivers/net/wireless/wl12xx/wl1251_tx.h b/drivers/net/wireless/wl12xx/wl1251_tx.h new file mode 100644 index 0000000..55856c6 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1251_tx.h @@ -0,0 +1,233 @@ +/* + * This file is part of wl1251 + * + * Copyright (c) 1998-2007 Texas Instruments Incorporated + * Copyright (C) 2008 Nokia Corporation + * + * Contact: Kalle Valo + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __WL1251_TX_H__ +#define __WL1251_TX_H__ + +#include +#include "wl1251_acx.h" + +/* + * + * TX PATH + * + * The Tx path uses a double buffer and a tx_control structure, each located + * at a fixed address in the device's memory. On startup, the host retrieves + * the pointers to these addresses. A double buffer allows for continuous data + * flow towards the device. The host keeps track of which buffer is available + * and alternates between these two buffers on a per packet basis. + * + * The size of each of the two buffers is large enough to hold the longest + * 802.3 packet - maximum size Ethernet packet + header + descriptor. + * TX complete indication will be received a-synchronously in a TX done cyclic + * buffer which is composed of 16 tx_result descriptors structures and is used + * in a cyclic manner. + * + * The TX (HOST) procedure is as follows: + * 1. Read the Tx path status, that will give the data_out_count. + * 2. goto 1, if not possible. + * i.e. if data_in_count - data_out_count >= HwBuffer size (2 for double + * buffer). + * 3. Copy the packet (preceded by double_buffer_desc), if possible. + * i.e. if data_in_count - data_out_count < HwBuffer size (2 for double + * buffer). + * 4. increment data_in_count. + * 5. Inform the firmware by generating a firmware internal interrupt. + * 6. FW will increment data_out_count after it reads the buffer. + * + * The TX Complete procedure: + * 1. To get a TX complete indication the host enables the tx_complete flag in + * the TX descriptor Structure. + * 2. For each packet with a Tx Complete field set, the firmware adds the + * transmit results to the cyclic buffer (txDoneRing) and sets both done_1 + * and done_2 to 1 to indicate driver ownership. + * 3. The firmware sends a Tx Complete interrupt to the host to trigger the + * host to process the new data. Note: interrupt will be send per packet if + * TX complete indication was requested in tx_control or per crossing + * aggregation threshold. + * 4. After receiving the Tx Complete interrupt, the host reads the + * TxDescriptorDone information in a cyclic manner and clears both done_1 + * and done_2 fields. + * + */ + +#define TX_COMPLETE_REQUIRED_BIT 0x80 +#define TX_STATUS_DATA_OUT_COUNT_MASK 0xf + +#define WL1251_TX_ALIGN_TO 4 +#define WL1251_TX_ALIGN(len) (((len) + WL1251_TX_ALIGN_TO - 1) & \ + ~(WL1251_TX_ALIGN_TO - 1)) +#define WL1251_TKIP_IV_SPACE 4 + +struct tx_control { + /* Rate Policy (class) index */ + unsigned rate_policy:3; + + /* When set, no ack policy is expected */ + unsigned ack_policy:1; + + /* + * Packet type: + * 0 -> 802.11 + * 1 -> 802.3 + * 2 -> IP + * 3 -> raw codec + */ + unsigned packet_type:2; + + /* If set, this is a QoS-Null or QoS-Data frame */ + unsigned qos:1; + + /* + * If set, the target triggers the tx complete INT + * upon frame sending completion. + */ + unsigned tx_complete:1; + + /* 2 bytes padding before packet header */ + unsigned xfer_pad:1; + + unsigned reserved:7; +} __attribute__ ((packed)); + + +struct tx_double_buffer_desc { + /* Length of payload, including headers. */ + u16 length; + + /* + * A bit mask that specifies the initial rate to be used + * Possible values are: + * 0x0001 - 1Mbits + * 0x0002 - 2Mbits + * 0x0004 - 5.5Mbits + * 0x0008 - 6Mbits + * 0x0010 - 9Mbits + * 0x0020 - 11Mbits + * 0x0040 - 12Mbits + * 0x0080 - 18Mbits + * 0x0100 - 22Mbits + * 0x0200 - 24Mbits + * 0x0400 - 36Mbits + * 0x0800 - 48Mbits + * 0x1000 - 54Mbits + */ + u16 rate; + + /* Time in us that a packet can spend in the target */ + u32 expiry_time; + + /* index of the TX queue used for this packet */ + u8 xmit_queue; + + /* Used to identify a packet */ + u8 id; + + struct tx_control control; + + /* + * The FW should cut the packet into fragments + * of this size. + */ + u16 frag_threshold; + + /* Numbers of HW queue blocks to be allocated */ + u8 num_mem_blocks; + + u8 reserved; +} __attribute__ ((packed)); + +enum { + TX_SUCCESS = 0, + TX_DMA_ERROR = BIT(7), + TX_DISABLED = BIT(6), + TX_RETRY_EXCEEDED = BIT(5), + TX_TIMEOUT = BIT(4), + TX_KEY_NOT_FOUND = BIT(3), + TX_ENCRYPT_FAIL = BIT(2), + TX_UNAVAILABLE_PRIORITY = BIT(1), +}; + +struct tx_result { + /* + * Ownership synchronization between the host and + * the firmware. If done_1 and done_2 are cleared, + * owned by the FW (no info ready). + */ + u8 done_1; + + /* same as double_buffer_desc->id */ + u8 id; + + /* + * Total air access duration consumed by this + * packet, including all retries and overheads. + */ + u16 medium_usage; + + /* Total media delay (from 1st EDCA AIFS counter until TX Complete). */ + u32 medium_delay; + + /* Time between host xfer and tx complete */ + u32 fw_hnadling_time; + + /* The LS-byte of the last TKIP sequence number. */ + u8 lsb_seq_num; + + /* Retry count */ + u8 ack_failures; + + /* At which rate we got a ACK */ + u16 rate; + + u16 reserved; + + /* TX_* */ + u8 status; + + /* See done_1 */ + u8 done_2; +} __attribute__ ((packed)); + +static inline int wl1251_tx_get_queue(int queue) +{ + switch (queue) { + case 0: + return QOS_AC_VO; + case 1: + return QOS_AC_VI; + case 2: + return QOS_AC_BE; + case 3: + return QOS_AC_BK; + default: + return QOS_AC_BE; + } +} + +void wl1251_tx_work(struct work_struct *work); +void wl1251_tx_complete(struct wl1251 *wl); +void wl1251_tx_flush(struct wl1251 *wl); + +#endif diff --git a/drivers/net/wireless/wl12xx/wl1271.h b/drivers/net/wireless/wl12xx/wl1271.h new file mode 100644 index 0000000..97ea509 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1271.h @@ -0,0 +1,497 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 1998-2009 Texas Instruments. All rights reserved. + * Copyright (C) 2008-2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __WL1271_H__ +#define __WL1271_H__ + +#include +#include +#include +#include +#include +#include + +#include "wl1271_conf.h" + +#define DRIVER_NAME "wl1271" +#define DRIVER_PREFIX DRIVER_NAME ": " + +enum { + DEBUG_NONE = 0, + DEBUG_IRQ = BIT(0), + DEBUG_SPI = BIT(1), + DEBUG_BOOT = BIT(2), + DEBUG_MAILBOX = BIT(3), + DEBUG_TESTMODE = BIT(4), + DEBUG_EVENT = BIT(5), + DEBUG_TX = BIT(6), + DEBUG_RX = BIT(7), + DEBUG_SCAN = BIT(8), + DEBUG_CRYPT = BIT(9), + DEBUG_PSM = BIT(10), + DEBUG_MAC80211 = BIT(11), + DEBUG_CMD = BIT(12), + DEBUG_ACX = BIT(13), + DEBUG_ALL = ~0, +}; + +#define DEBUG_LEVEL (DEBUG_NONE) + +#define DEBUG_DUMP_LIMIT 1024 + +#define wl1271_error(fmt, arg...) \ + printk(KERN_ERR DRIVER_PREFIX "ERROR " fmt "\n", ##arg) + +#define wl1271_warning(fmt, arg...) \ + printk(KERN_WARNING DRIVER_PREFIX "WARNING " fmt "\n", ##arg) + +#define wl1271_notice(fmt, arg...) \ + printk(KERN_INFO DRIVER_PREFIX fmt "\n", ##arg) + +#define wl1271_info(fmt, arg...) \ + printk(KERN_DEBUG DRIVER_PREFIX fmt "\n", ##arg) + +#define wl1271_debug(level, fmt, arg...) \ + do { \ + if (level & DEBUG_LEVEL) \ + printk(KERN_DEBUG DRIVER_PREFIX fmt "\n", ##arg); \ + } while (0) + +#define wl1271_dump(level, prefix, buf, len) \ + do { \ + if (level & DEBUG_LEVEL) \ + print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \ + DUMP_PREFIX_OFFSET, 16, 1, \ + buf, \ + min_t(size_t, len, DEBUG_DUMP_LIMIT), \ + 0); \ + } while (0) + +#define wl1271_dump_ascii(level, prefix, buf, len) \ + do { \ + if (level & DEBUG_LEVEL) \ + print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \ + DUMP_PREFIX_OFFSET, 16, 1, \ + buf, \ + min_t(size_t, len, DEBUG_DUMP_LIMIT), \ + true); \ + } while (0) + +#define WL1271_DEFAULT_RX_CONFIG (CFG_UNI_FILTER_EN | \ + CFG_BSSID_FILTER_EN | \ + CFG_MC_FILTER_EN) + +#define WL1271_DEFAULT_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PRSP_EN | \ + CFG_RX_MGMT_EN | CFG_RX_DATA_EN | \ + CFG_RX_CTL_EN | CFG_RX_BCN_EN | \ + CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN) + +#define WL1271_FW_NAME "wl1271-fw.bin" +#define WL1271_NVS_NAME "wl1271-nvs.bin" + +/* NVS data structure */ +#define WL1271_NVS_SECTION_SIZE 468 + +#define WL1271_NVS_GENERAL_PARAMS_SIZE 57 +#define WL1271_NVS_GENERAL_PARAMS_SIZE_PADDED \ + (WL1271_NVS_GENERAL_PARAMS_SIZE + 1) +#define WL1271_NVS_STAT_RADIO_PARAMS_SIZE 17 +#define WL1271_NVS_STAT_RADIO_PARAMS_SIZE_PADDED \ + (WL1271_NVS_STAT_RADIO_PARAMS_SIZE + 1) +#define WL1271_NVS_DYN_RADIO_PARAMS_SIZE 65 +#define WL1271_NVS_DYN_RADIO_PARAMS_SIZE_PADDED \ + (WL1271_NVS_DYN_RADIO_PARAMS_SIZE + 1) +#define WL1271_NVS_FEM_COUNT 2 +#define WL1271_NVS_INI_SPARE_SIZE 124 + +struct wl1271_nvs_file { + /* NVS section */ + u8 nvs[WL1271_NVS_SECTION_SIZE]; + + /* INI section */ + u8 general_params[WL1271_NVS_GENERAL_PARAMS_SIZE_PADDED]; + u8 stat_radio_params[WL1271_NVS_STAT_RADIO_PARAMS_SIZE_PADDED]; + u8 dyn_radio_params[WL1271_NVS_FEM_COUNT] + [WL1271_NVS_DYN_RADIO_PARAMS_SIZE_PADDED]; + u8 ini_spare[WL1271_NVS_INI_SPARE_SIZE]; +} __attribute__ ((packed)); + +/* + * Enable/disable 802.11a support for WL1273 + */ +#undef WL1271_80211A_ENABLED + +/* + * FIXME: for the wl1271, a busy word count of 1 here will result in a more + * optimal SPI interface. There is some SPI bug however, causing RXS time outs + * with this mode occasionally on boot, so lets have three for now. A value of + * three should make sure, that the chipset will always be ready, though this + * will impact throughput and latencies slightly. + */ +#define WL1271_BUSY_WORD_CNT 3 +#define WL1271_BUSY_WORD_LEN (WL1271_BUSY_WORD_CNT * sizeof(u32)) + +#define WL1271_ELP_HW_STATE_ASLEEP 0 +#define WL1271_ELP_HW_STATE_IRQ 1 + +#define WL1271_DEFAULT_BEACON_INT 100 +#define WL1271_DEFAULT_DTIM_PERIOD 1 + +#define ACX_TX_DESCRIPTORS 32 + +enum wl1271_state { + WL1271_STATE_OFF, + WL1271_STATE_ON, + WL1271_STATE_PLT, +}; + +enum wl1271_partition_type { + PART_DOWN, + PART_WORK, + PART_DRPW, + + PART_TABLE_LEN +}; + +struct wl1271_partition { + u32 size; + u32 start; +}; + +struct wl1271_partition_set { + struct wl1271_partition mem; + struct wl1271_partition reg; + struct wl1271_partition mem2; + struct wl1271_partition mem3; +}; + +struct wl1271; + +/* FIXME: I'm not sure about this structure name */ +struct wl1271_chip { + u32 id; + char fw_ver[21]; +}; + +struct wl1271_stats { + struct acx_statistics *fw_stats; + unsigned long fw_stats_update; + + unsigned int retry_count; + unsigned int excessive_retries; +}; + +struct wl1271_debugfs { + struct dentry *rootdir; + struct dentry *fw_statistics; + + struct dentry *tx_internal_desc_overflow; + + struct dentry *rx_out_of_mem; + struct dentry *rx_hdr_overflow; + struct dentry *rx_hw_stuck; + struct dentry *rx_dropped; + struct dentry *rx_fcs_err; + struct dentry *rx_xfr_hint_trig; + struct dentry *rx_path_reset; + struct dentry *rx_reset_counter; + + struct dentry *dma_rx_requested; + struct dentry *dma_rx_errors; + struct dentry *dma_tx_requested; + struct dentry *dma_tx_errors; + + struct dentry *isr_cmd_cmplt; + struct dentry *isr_fiqs; + struct dentry *isr_rx_headers; + struct dentry *isr_rx_mem_overflow; + struct dentry *isr_rx_rdys; + struct dentry *isr_irqs; + struct dentry *isr_tx_procs; + struct dentry *isr_decrypt_done; + struct dentry *isr_dma0_done; + struct dentry *isr_dma1_done; + struct dentry *isr_tx_exch_complete; + struct dentry *isr_commands; + struct dentry *isr_rx_procs; + struct dentry *isr_hw_pm_mode_changes; + struct dentry *isr_host_acknowledges; + struct dentry *isr_pci_pm; + struct dentry *isr_wakeups; + struct dentry *isr_low_rssi; + + struct dentry *wep_addr_key_count; + struct dentry *wep_default_key_count; + /* skipping wep.reserved */ + struct dentry *wep_key_not_found; + struct dentry *wep_decrypt_fail; + struct dentry *wep_packets; + struct dentry *wep_interrupt; + + struct dentry *pwr_ps_enter; + struct dentry *pwr_elp_enter; + struct dentry *pwr_missing_bcns; + struct dentry *pwr_wake_on_host; + struct dentry *pwr_wake_on_timer_exp; + struct dentry *pwr_tx_with_ps; + struct dentry *pwr_tx_without_ps; + struct dentry *pwr_rcvd_beacons; + struct dentry *pwr_power_save_off; + struct dentry *pwr_enable_ps; + struct dentry *pwr_disable_ps; + struct dentry *pwr_fix_tsf_ps; + /* skipping cont_miss_bcns_spread for now */ + struct dentry *pwr_rcvd_awake_beacons; + + struct dentry *mic_rx_pkts; + struct dentry *mic_calc_failure; + + struct dentry *aes_encrypt_fail; + struct dentry *aes_decrypt_fail; + struct dentry *aes_encrypt_packets; + struct dentry *aes_decrypt_packets; + struct dentry *aes_encrypt_interrupt; + struct dentry *aes_decrypt_interrupt; + + struct dentry *event_heart_beat; + struct dentry *event_calibration; + struct dentry *event_rx_mismatch; + struct dentry *event_rx_mem_empty; + struct dentry *event_rx_pool; + struct dentry *event_oom_late; + struct dentry *event_phy_transmit_error; + struct dentry *event_tx_stuck; + + struct dentry *ps_pspoll_timeouts; + struct dentry *ps_upsd_timeouts; + struct dentry *ps_upsd_max_sptime; + struct dentry *ps_upsd_max_apturn; + struct dentry *ps_pspoll_max_apturn; + struct dentry *ps_pspoll_utilization; + struct dentry *ps_upsd_utilization; + + struct dentry *rxpipe_rx_prep_beacon_drop; + struct dentry *rxpipe_descr_host_int_trig_rx_data; + struct dentry *rxpipe_beacon_buffer_thres_host_int_trig_rx_data; + struct dentry *rxpipe_missed_beacon_host_int_trig_rx_data; + struct dentry *rxpipe_tx_xfr_host_int_trig_rx_data; + + struct dentry *tx_queue_len; + + struct dentry *retry_count; + struct dentry *excessive_retries; + struct dentry *gpio_power; +}; + +#define NUM_TX_QUEUES 4 +#define NUM_RX_PKT_DESC 8 + +/* FW status registers */ +struct wl1271_fw_status { + __le32 intr; + u8 fw_rx_counter; + u8 drv_rx_counter; + u8 reserved; + u8 tx_results_counter; + __le32 rx_pkt_descs[NUM_RX_PKT_DESC]; + __le32 tx_released_blks[NUM_TX_QUEUES]; + __le32 fw_localtime; + __le32 padding[2]; +} __attribute__ ((packed)); + +struct wl1271_rx_mem_pool_addr { + u32 addr; + u32 addr_extra; +}; + +struct wl1271_scan { + u8 state; + u8 ssid[IW_ESSID_MAX_SIZE+1]; + size_t ssid_len; + u8 active; + u8 high_prio; + u8 probe_requests; +}; + +struct wl1271 { + struct ieee80211_hw *hw; + bool mac80211_registered; + + struct spi_device *spi; + + void (*set_power)(bool enable); + int irq; + + spinlock_t wl_lock; + + enum wl1271_state state; + struct mutex mutex; + +#define WL1271_FLAG_STA_RATES_CHANGED (0) +#define WL1271_FLAG_STA_ASSOCIATED (1) +#define WL1271_FLAG_JOINED (2) +#define WL1271_FLAG_GPIO_POWER (3) +#define WL1271_FLAG_TX_QUEUE_STOPPED (4) +#define WL1271_FLAG_SCANNING (5) +#define WL1271_FLAG_IN_ELP (6) +#define WL1271_FLAG_PSM (7) +#define WL1271_FLAG_PSM_REQUESTED (8) + unsigned long flags; + + struct wl1271_partition_set part; + + struct wl1271_chip chip; + + int cmd_box_addr; + int event_box_addr; + + u8 *fw; + size_t fw_len; + struct wl1271_nvs_file *nvs; + + u8 bssid[ETH_ALEN]; + u8 mac_addr[ETH_ALEN]; + u8 bss_type; + u8 ssid[IW_ESSID_MAX_SIZE + 1]; + u8 ssid_len; + int channel; + + struct wl1271_acx_mem_map *target_mem_map; + + /* Accounting for allocated / available TX blocks on HW */ + u32 tx_blocks_freed[NUM_TX_QUEUES]; + u32 tx_blocks_available; + u8 tx_results_count; + + /* Transmitted TX packets counter for chipset interface */ + int tx_packets_count; + + /* Time-offset between host and chipset clocks */ + int time_offset; + + /* Session counter for the chipset */ + int session_counter; + + /* Frames scheduled for transmission, not handled yet */ + struct sk_buff_head tx_queue; + + struct work_struct tx_work; + + /* Pending TX frames */ + struct sk_buff *tx_frames[ACX_TX_DESCRIPTORS]; + + /* Security sequence number counters */ + u8 tx_security_last_seq; + u16 tx_security_seq_16; + u32 tx_security_seq_32; + + /* FW Rx counter */ + u32 rx_counter; + + /* Rx memory pool address */ + struct wl1271_rx_mem_pool_addr rx_mem_pool_addr; + + /* The target interrupt mask */ + struct work_struct irq_work; + + /* The mbox event mask */ + u32 event_mask; + + /* Mailbox pointers */ + u32 mbox_ptr[2]; + + /* Are we currently scanning */ + struct wl1271_scan scan; + + /* Our association ID */ + u16 aid; + + /* currently configured rate set */ + u32 sta_rate_set; + u32 basic_rate_set; + u32 rate_set; + + /* The current band */ + enum ieee80211_band band; + + /* Default key (for WEP) */ + u32 default_key; + + unsigned int rx_config; + unsigned int rx_filter; + + struct completion *elp_compl; + struct delayed_work elp_work; + + /* retry counter for PSM entries */ + u8 psm_entry_retry; + + /* in dBm */ + int power_level; + + struct wl1271_stats stats; + struct wl1271_debugfs debugfs; + + u32 buffer_32; + u32 buffer_cmd; + u32 buffer_busyword[WL1271_BUSY_WORD_CNT]; + + struct wl1271_fw_status *fw_status; + struct wl1271_tx_hw_res_if *tx_res_if; + + struct ieee80211_vif *vif; + + /* Current chipset configuration */ + struct conf_drv_settings conf; + + struct list_head list; +}; + +int wl1271_plt_start(struct wl1271 *wl); +int wl1271_plt_stop(struct wl1271 *wl); + +#define JOIN_TIMEOUT 5000 /* 5000 milliseconds to join */ + +#define SESSION_COUNTER_MAX 7 /* maximum value for the session counter */ + +#define WL1271_DEFAULT_POWER_LEVEL 0 + +#define WL1271_TX_QUEUE_MAX_LENGTH 20 + +/* WL1271 needs a 200ms sleep after power on, and a 20ms sleep before power + on in case is has been shut down shortly before */ +#define WL1271_PRE_POWER_ON_SLEEP 20 /* in miliseconds */ +#define WL1271_POWER_ON_SLEEP 200 /* in miliseconds */ + +static inline bool wl1271_11a_enabled(void) +{ + /* FIXME: this could be determined based on the NVS-INI file */ +#ifdef WL1271_80211A_ENABLED + return true; +#else + return false; +#endif +} + +#endif diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.c b/drivers/net/wireless/wl12xx/wl1271_acx.c new file mode 100644 index 0000000..60f10dc --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1271_acx.c @@ -0,0 +1,1144 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2008-2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include "wl1271_acx.h" + +#include +#include +#include +#include + +#include "wl1271.h" +#include "wl12xx_80211.h" +#include "wl1271_reg.h" +#include "wl1271_spi.h" +#include "wl1271_ps.h" + +int wl1271_acx_wake_up_conditions(struct wl1271 *wl) +{ + struct acx_wake_up_condition *wake_up; + int ret; + + wl1271_debug(DEBUG_ACX, "acx wake up conditions"); + + wake_up = kzalloc(sizeof(*wake_up), GFP_KERNEL); + if (!wake_up) { + ret = -ENOMEM; + goto out; + } + + wake_up->wake_up_event = wl->conf.conn.wake_up_event; + wake_up->listen_interval = wl->conf.conn.listen_interval; + + ret = wl1271_cmd_configure(wl, ACX_WAKE_UP_CONDITIONS, + wake_up, sizeof(*wake_up)); + if (ret < 0) { + wl1271_warning("could not set wake up conditions: %d", ret); + goto out; + } + +out: + kfree(wake_up); + return ret; +} + +int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth) +{ + struct acx_sleep_auth *auth; + int ret; + + wl1271_debug(DEBUG_ACX, "acx sleep auth"); + + auth = kzalloc(sizeof(*auth), GFP_KERNEL); + if (!auth) { + ret = -ENOMEM; + goto out; + } + + auth->sleep_auth = sleep_auth; + + ret = wl1271_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth)); + if (ret < 0) + return ret; + +out: + kfree(auth); + return ret; +} + +int wl1271_acx_fw_version(struct wl1271 *wl, char *buf, size_t len) +{ + struct acx_revision *rev; + int ret; + + wl1271_debug(DEBUG_ACX, "acx fw rev"); + + rev = kzalloc(sizeof(*rev), GFP_KERNEL); + if (!rev) { + ret = -ENOMEM; + goto out; + } + + ret = wl1271_cmd_interrogate(wl, ACX_FW_REV, rev, sizeof(*rev)); + if (ret < 0) { + wl1271_warning("ACX_FW_REV interrogate failed"); + goto out; + } + + /* be careful with the buffer sizes */ + strncpy(buf, rev->fw_version, min(len, sizeof(rev->fw_version))); + + /* + * if the firmware version string is exactly + * sizeof(rev->fw_version) long or fw_len is less than + * sizeof(rev->fw_version) it won't be null terminated + */ + buf[min(len, sizeof(rev->fw_version)) - 1] = '\0'; + +out: + kfree(rev); + return ret; +} + +int wl1271_acx_tx_power(struct wl1271 *wl, int power) +{ + struct acx_current_tx_power *acx; + int ret; + + wl1271_debug(DEBUG_ACX, "acx dot11_cur_tx_pwr"); + + if (power < 0 || power > 25) + return -EINVAL; + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + /* + * FIXME: This is a workaround needed while we don't the correct + * calibration, to avoid distortions + */ + /* acx->current_tx_power = power * 10; */ + acx->current_tx_power = 120; + + ret = wl1271_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("configure of tx power failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1271_acx_feature_cfg(struct wl1271 *wl) +{ + struct acx_feature_config *feature; + int ret; + + wl1271_debug(DEBUG_ACX, "acx feature cfg"); + + feature = kzalloc(sizeof(*feature), GFP_KERNEL); + if (!feature) { + ret = -ENOMEM; + goto out; + } + + /* DF_ENCRYPTION_DISABLE and DF_SNIFF_MODE_ENABLE are disabled */ + feature->data_flow_options = 0; + feature->options = 0; + + ret = wl1271_cmd_configure(wl, ACX_FEATURE_CFG, + feature, sizeof(*feature)); + if (ret < 0) { + wl1271_error("Couldnt set HW encryption"); + goto out; + } + +out: + kfree(feature); + return ret; +} + +int wl1271_acx_mem_map(struct wl1271 *wl, struct acx_header *mem_map, + size_t len) +{ + int ret; + + wl1271_debug(DEBUG_ACX, "acx mem map"); + + ret = wl1271_cmd_interrogate(wl, ACX_MEM_MAP, mem_map, len); + if (ret < 0) + return ret; + + return 0; +} + +int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl) +{ + struct acx_rx_msdu_lifetime *acx; + int ret; + + wl1271_debug(DEBUG_ACX, "acx rx msdu life time"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->lifetime = cpu_to_le32(wl->conf.rx.rx_msdu_life_time); + ret = wl1271_cmd_configure(wl, DOT11_RX_MSDU_LIFE_TIME, + acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("failed to set rx msdu life time: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1271_acx_rx_config(struct wl1271 *wl, u32 config, u32 filter) +{ + struct acx_rx_config *rx_config; + int ret; + + wl1271_debug(DEBUG_ACX, "acx rx config"); + + rx_config = kzalloc(sizeof(*rx_config), GFP_KERNEL); + if (!rx_config) { + ret = -ENOMEM; + goto out; + } + + rx_config->config_options = cpu_to_le32(config); + rx_config->filter_options = cpu_to_le32(filter); + + ret = wl1271_cmd_configure(wl, ACX_RX_CFG, + rx_config, sizeof(*rx_config)); + if (ret < 0) { + wl1271_warning("failed to set rx config: %d", ret); + goto out; + } + +out: + kfree(rx_config); + return ret; +} + +int wl1271_acx_pd_threshold(struct wl1271 *wl) +{ + struct acx_packet_detection *pd; + int ret; + + wl1271_debug(DEBUG_ACX, "acx data pd threshold"); + + pd = kzalloc(sizeof(*pd), GFP_KERNEL); + if (!pd) { + ret = -ENOMEM; + goto out; + } + + pd->threshold = cpu_to_le32(wl->conf.rx.packet_detection_threshold); + + ret = wl1271_cmd_configure(wl, ACX_PD_THRESHOLD, pd, sizeof(*pd)); + if (ret < 0) { + wl1271_warning("failed to set pd threshold: %d", ret); + goto out; + } + +out: + kfree(pd); + return 0; +} + +int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time) +{ + struct acx_slot *slot; + int ret; + + wl1271_debug(DEBUG_ACX, "acx slot"); + + slot = kzalloc(sizeof(*slot), GFP_KERNEL); + if (!slot) { + ret = -ENOMEM; + goto out; + } + + slot->wone_index = STATION_WONE_INDEX; + slot->slot_time = slot_time; + + ret = wl1271_cmd_configure(wl, ACX_SLOT, slot, sizeof(*slot)); + if (ret < 0) { + wl1271_warning("failed to set slot time: %d", ret); + goto out; + } + +out: + kfree(slot); + return ret; +} + +int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable, + void *mc_list, u32 mc_list_len) +{ + struct acx_dot11_grp_addr_tbl *acx; + int ret; + + wl1271_debug(DEBUG_ACX, "acx group address tbl"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + /* MAC filtering */ + acx->enabled = enable; + acx->num_groups = mc_list_len; + memcpy(acx->mac_table, mc_list, mc_list_len * ETH_ALEN); + + ret = wl1271_cmd_configure(wl, DOT11_GROUP_ADDRESS_TBL, + acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("failed to set group addr table: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1271_acx_service_period_timeout(struct wl1271 *wl) +{ + struct acx_rx_timeout *rx_timeout; + int ret; + + rx_timeout = kzalloc(sizeof(*rx_timeout), GFP_KERNEL); + if (!rx_timeout) { + ret = -ENOMEM; + goto out; + } + + wl1271_debug(DEBUG_ACX, "acx service period timeout"); + + rx_timeout->ps_poll_timeout = cpu_to_le16(wl->conf.rx.ps_poll_timeout); + rx_timeout->upsd_timeout = cpu_to_le16(wl->conf.rx.upsd_timeout); + + ret = wl1271_cmd_configure(wl, ACX_SERVICE_PERIOD_TIMEOUT, + rx_timeout, sizeof(*rx_timeout)); + if (ret < 0) { + wl1271_warning("failed to set service period timeout: %d", + ret); + goto out; + } + +out: + kfree(rx_timeout); + return ret; +} + +int wl1271_acx_rts_threshold(struct wl1271 *wl, u16 rts_threshold) +{ + struct acx_rts_threshold *rts; + int ret; + + wl1271_debug(DEBUG_ACX, "acx rts threshold"); + + rts = kzalloc(sizeof(*rts), GFP_KERNEL); + if (!rts) { + ret = -ENOMEM; + goto out; + } + + rts->threshold = cpu_to_le16(rts_threshold); + + ret = wl1271_cmd_configure(wl, DOT11_RTS_THRESHOLD, rts, sizeof(*rts)); + if (ret < 0) { + wl1271_warning("failed to set rts threshold: %d", ret); + goto out; + } + +out: + kfree(rts); + return ret; +} + +int wl1271_acx_dco_itrim_params(struct wl1271 *wl) +{ + struct acx_dco_itrim_params *dco; + struct conf_itrim_settings *c = &wl->conf.itrim; + int ret; + + wl1271_debug(DEBUG_ACX, "acx dco itrim parameters"); + + dco = kzalloc(sizeof(*dco), GFP_KERNEL); + if (!dco) { + ret = -ENOMEM; + goto out; + } + + dco->enable = c->enable; + dco->timeout = cpu_to_le32(c->timeout); + + ret = wl1271_cmd_configure(wl, ACX_SET_DCO_ITRIM_PARAMS, + dco, sizeof(*dco)); + if (ret < 0) { + wl1271_warning("failed to set dco itrim parameters: %d", ret); + goto out; + } + +out: + kfree(dco); + return ret; +} + +int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter) +{ + struct acx_beacon_filter_option *beacon_filter = NULL; + int ret = 0; + + wl1271_debug(DEBUG_ACX, "acx beacon filter opt"); + + if (enable_filter && + wl->conf.conn.bcn_filt_mode == CONF_BCN_FILT_MODE_DISABLED) + goto out; + + beacon_filter = kzalloc(sizeof(*beacon_filter), GFP_KERNEL); + if (!beacon_filter) { + ret = -ENOMEM; + goto out; + } + + beacon_filter->enable = enable_filter; + + /* + * When set to zero, and the filter is enabled, beacons + * without the unicast TIM bit set are dropped. + */ + beacon_filter->max_num_beacons = 0; + + ret = wl1271_cmd_configure(wl, ACX_BEACON_FILTER_OPT, + beacon_filter, sizeof(*beacon_filter)); + if (ret < 0) { + wl1271_warning("failed to set beacon filter opt: %d", ret); + goto out; + } + +out: + kfree(beacon_filter); + return ret; +} + +int wl1271_acx_beacon_filter_table(struct wl1271 *wl) +{ + struct acx_beacon_filter_ie_table *ie_table; + int i, idx = 0; + int ret; + bool vendor_spec = false; + + wl1271_debug(DEBUG_ACX, "acx beacon filter table"); + + ie_table = kzalloc(sizeof(*ie_table), GFP_KERNEL); + if (!ie_table) { + ret = -ENOMEM; + goto out; + } + + /* configure default beacon pass-through rules */ + ie_table->num_ie = 0; + for (i = 0; i < wl->conf.conn.bcn_filt_ie_count; i++) { + struct conf_bcn_filt_rule *r = &(wl->conf.conn.bcn_filt_ie[i]); + ie_table->table[idx++] = r->ie; + ie_table->table[idx++] = r->rule; + + if (r->ie == WLAN_EID_VENDOR_SPECIFIC) { + /* only one vendor specific ie allowed */ + if (vendor_spec) + continue; + + /* for vendor specific rules configure the + additional fields */ + memcpy(&(ie_table->table[idx]), r->oui, + CONF_BCN_IE_OUI_LEN); + idx += CONF_BCN_IE_OUI_LEN; + ie_table->table[idx++] = r->type; + memcpy(&(ie_table->table[idx]), r->version, + CONF_BCN_IE_VER_LEN); + idx += CONF_BCN_IE_VER_LEN; + vendor_spec = true; + } + + ie_table->num_ie++; + } + + ret = wl1271_cmd_configure(wl, ACX_BEACON_FILTER_TABLE, + ie_table, sizeof(*ie_table)); + if (ret < 0) { + wl1271_warning("failed to set beacon filter table: %d", ret); + goto out; + } + +out: + kfree(ie_table); + return ret; +} + +int wl1271_acx_conn_monit_params(struct wl1271 *wl) +{ + struct acx_conn_monit_params *acx; + int ret; + + wl1271_debug(DEBUG_ACX, "acx connection monitor parameters"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->synch_fail_thold = cpu_to_le32(wl->conf.conn.synch_fail_thold); + acx->bss_lose_timeout = cpu_to_le32(wl->conf.conn.bss_lose_timeout); + + ret = wl1271_cmd_configure(wl, ACX_CONN_MONIT_PARAMS, + acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("failed to set connection monitor " + "parameters: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + + +int wl1271_acx_sg_enable(struct wl1271 *wl) +{ + struct acx_bt_wlan_coex *pta; + int ret; + + wl1271_debug(DEBUG_ACX, "acx sg enable"); + + pta = kzalloc(sizeof(*pta), GFP_KERNEL); + if (!pta) { + ret = -ENOMEM; + goto out; + } + + pta->enable = SG_ENABLE; + + ret = wl1271_cmd_configure(wl, ACX_SG_ENABLE, pta, sizeof(*pta)); + if (ret < 0) { + wl1271_warning("failed to set softgemini enable: %d", ret); + goto out; + } + +out: + kfree(pta); + return ret; +} + +int wl1271_acx_sg_cfg(struct wl1271 *wl) +{ + struct acx_bt_wlan_coex_param *param; + struct conf_sg_settings *c = &wl->conf.sg; + int ret; + + wl1271_debug(DEBUG_ACX, "acx sg cfg"); + + param = kzalloc(sizeof(*param), GFP_KERNEL); + if (!param) { + ret = -ENOMEM; + goto out; + } + + /* BT-WLAN coext parameters */ + param->per_threshold = cpu_to_le32(c->per_threshold); + param->max_scan_compensation_time = + cpu_to_le32(c->max_scan_compensation_time); + param->nfs_sample_interval = cpu_to_le16(c->nfs_sample_interval); + param->load_ratio = c->load_ratio; + param->auto_ps_mode = c->auto_ps_mode; + param->probe_req_compensation = c->probe_req_compensation; + param->scan_window_compensation = c->scan_window_compensation; + param->antenna_config = c->antenna_config; + param->beacon_miss_threshold = c->beacon_miss_threshold; + param->rate_adaptation_threshold = + cpu_to_le32(c->rate_adaptation_threshold); + param->rate_adaptation_snr = c->rate_adaptation_snr; + + ret = wl1271_cmd_configure(wl, ACX_SG_CFG, param, sizeof(*param)); + if (ret < 0) { + wl1271_warning("failed to set sg config: %d", ret); + goto out; + } + +out: + kfree(param); + return ret; +} + +int wl1271_acx_cca_threshold(struct wl1271 *wl) +{ + struct acx_energy_detection *detection; + int ret; + + wl1271_debug(DEBUG_ACX, "acx cca threshold"); + + detection = kzalloc(sizeof(*detection), GFP_KERNEL); + if (!detection) { + ret = -ENOMEM; + goto out; + } + + detection->rx_cca_threshold = cpu_to_le16(wl->conf.rx.rx_cca_threshold); + detection->tx_energy_detection = wl->conf.tx.tx_energy_detection; + + ret = wl1271_cmd_configure(wl, ACX_CCA_THRESHOLD, + detection, sizeof(*detection)); + if (ret < 0) { + wl1271_warning("failed to set cca threshold: %d", ret); + return ret; + } + +out: + kfree(detection); + return ret; +} + +int wl1271_acx_bcn_dtim_options(struct wl1271 *wl) +{ + struct acx_beacon_broadcast *bb; + int ret; + + wl1271_debug(DEBUG_ACX, "acx bcn dtim options"); + + bb = kzalloc(sizeof(*bb), GFP_KERNEL); + if (!bb) { + ret = -ENOMEM; + goto out; + } + + bb->beacon_rx_timeout = cpu_to_le16(wl->conf.conn.beacon_rx_timeout); + bb->broadcast_timeout = cpu_to_le16(wl->conf.conn.broadcast_timeout); + bb->rx_broadcast_in_ps = wl->conf.conn.rx_broadcast_in_ps; + bb->ps_poll_threshold = wl->conf.conn.ps_poll_threshold; + + ret = wl1271_cmd_configure(wl, ACX_BCN_DTIM_OPTIONS, bb, sizeof(*bb)); + if (ret < 0) { + wl1271_warning("failed to set rx config: %d", ret); + goto out; + } + +out: + kfree(bb); + return ret; +} + +int wl1271_acx_aid(struct wl1271 *wl, u16 aid) +{ + struct acx_aid *acx_aid; + int ret; + + wl1271_debug(DEBUG_ACX, "acx aid"); + + acx_aid = kzalloc(sizeof(*acx_aid), GFP_KERNEL); + if (!acx_aid) { + ret = -ENOMEM; + goto out; + } + + acx_aid->aid = cpu_to_le16(aid); + + ret = wl1271_cmd_configure(wl, ACX_AID, acx_aid, sizeof(*acx_aid)); + if (ret < 0) { + wl1271_warning("failed to set aid: %d", ret); + goto out; + } + +out: + kfree(acx_aid); + return ret; +} + +int wl1271_acx_event_mbox_mask(struct wl1271 *wl, u32 event_mask) +{ + struct acx_event_mask *mask; + int ret; + + wl1271_debug(DEBUG_ACX, "acx event mbox mask"); + + mask = kzalloc(sizeof(*mask), GFP_KERNEL); + if (!mask) { + ret = -ENOMEM; + goto out; + } + + /* high event mask is unused */ + mask->high_event_mask = cpu_to_le32(0xffffffff); + mask->event_mask = cpu_to_le32(event_mask); + + ret = wl1271_cmd_configure(wl, ACX_EVENT_MBOX_MASK, + mask, sizeof(*mask)); + if (ret < 0) { + wl1271_warning("failed to set acx_event_mbox_mask: %d", ret); + goto out; + } + +out: + kfree(mask); + return ret; +} + +int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble) +{ + struct acx_preamble *acx; + int ret; + + wl1271_debug(DEBUG_ACX, "acx_set_preamble"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->preamble = preamble; + + ret = wl1271_cmd_configure(wl, ACX_PREAMBLE_TYPE, acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("Setting of preamble failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1271_acx_cts_protect(struct wl1271 *wl, + enum acx_ctsprotect_type ctsprotect) +{ + struct acx_ctsprotect *acx; + int ret; + + wl1271_debug(DEBUG_ACX, "acx_set_ctsprotect"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->ctsprotect = ctsprotect; + + ret = wl1271_cmd_configure(wl, ACX_CTS_PROTECTION, acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("Setting of ctsprotect failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats) +{ + int ret; + + wl1271_debug(DEBUG_ACX, "acx statistics"); + + ret = wl1271_cmd_interrogate(wl, ACX_STATISTICS, stats, + sizeof(*stats)); + if (ret < 0) { + wl1271_warning("acx statistics failed: %d", ret); + return -ENOMEM; + } + + return 0; +} + +int wl1271_acx_rate_policies(struct wl1271 *wl) +{ + struct acx_rate_policy *acx; + struct conf_tx_rate_class *c = &wl->conf.tx.rc_conf; + int idx = 0; + int ret = 0; + + wl1271_debug(DEBUG_ACX, "acx rate policies"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + + if (!acx) { + ret = -ENOMEM; + goto out; + } + + /* configure one basic rate class */ + idx = ACX_TX_BASIC_RATE; + acx->rate_class[idx].enabled_rates = cpu_to_le32(wl->basic_rate_set); + acx->rate_class[idx].short_retry_limit = c->short_retry_limit; + acx->rate_class[idx].long_retry_limit = c->long_retry_limit; + acx->rate_class[idx].aflags = c->aflags; + + /* configure one AP supported rate class */ + idx = ACX_TX_AP_FULL_RATE; + acx->rate_class[idx].enabled_rates = cpu_to_le32(wl->rate_set); + acx->rate_class[idx].short_retry_limit = c->short_retry_limit; + acx->rate_class[idx].long_retry_limit = c->long_retry_limit; + acx->rate_class[idx].aflags = c->aflags; + + acx->rate_class_cnt = cpu_to_le32(ACX_TX_RATE_POLICY_CNT); + + ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("Setting of rate policies failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max, + u8 aifsn, u16 txop) +{ + struct acx_ac_cfg *acx; + int ret = 0; + + wl1271_debug(DEBUG_ACX, "acx ac cfg %d cw_ming %d cw_max %d " + "aifs %d txop %d", ac, cw_min, cw_max, aifsn, txop); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->ac = ac; + acx->cw_min = cw_min; + acx->cw_max = cpu_to_le16(cw_max); + acx->aifsn = aifsn; + acx->tx_op_limit = cpu_to_le16(txop); + + ret = wl1271_cmd_configure(wl, ACX_AC_CFG, acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("acx ac cfg failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type, + u8 tsid, u8 ps_scheme, u8 ack_policy, + u32 apsd_conf0, u32 apsd_conf1) +{ + struct acx_tid_config *acx; + int ret = 0; + + wl1271_debug(DEBUG_ACX, "acx tid config"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->queue_id = queue_id; + acx->channel_type = channel_type; + acx->tsid = tsid; + acx->ps_scheme = ps_scheme; + acx->ack_policy = ack_policy; + acx->apsd_conf[0] = cpu_to_le32(apsd_conf0); + acx->apsd_conf[1] = cpu_to_le32(apsd_conf1); + + ret = wl1271_cmd_configure(wl, ACX_TID_CFG, acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("Setting of tid config failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1271_acx_frag_threshold(struct wl1271 *wl) +{ + struct acx_frag_threshold *acx; + int ret = 0; + + wl1271_debug(DEBUG_ACX, "acx frag threshold"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->frag_threshold = cpu_to_le16(wl->conf.tx.frag_threshold); + ret = wl1271_cmd_configure(wl, ACX_FRAG_CFG, acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("Setting of frag threshold failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1271_acx_tx_config_options(struct wl1271 *wl) +{ + struct acx_tx_config_options *acx; + int ret = 0; + + wl1271_debug(DEBUG_ACX, "acx tx config options"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->tx_compl_timeout = cpu_to_le16(wl->conf.tx.tx_compl_timeout); + acx->tx_compl_threshold = cpu_to_le16(wl->conf.tx.tx_compl_threshold); + ret = wl1271_cmd_configure(wl, ACX_TX_CONFIG_OPT, acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("Setting of tx options failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1271_acx_mem_cfg(struct wl1271 *wl) +{ + struct wl1271_acx_config_memory *mem_conf; + int ret; + + wl1271_debug(DEBUG_ACX, "wl1271 mem cfg"); + + mem_conf = kzalloc(sizeof(*mem_conf), GFP_KERNEL); + if (!mem_conf) { + ret = -ENOMEM; + goto out; + } + + /* memory config */ + mem_conf->num_stations = DEFAULT_NUM_STATIONS; + mem_conf->rx_mem_block_num = ACX_RX_MEM_BLOCKS; + mem_conf->tx_min_mem_block_num = ACX_TX_MIN_MEM_BLOCKS; + mem_conf->num_ssid_profiles = ACX_NUM_SSID_PROFILES; + mem_conf->total_tx_descriptors = cpu_to_le32(ACX_TX_DESCRIPTORS); + + ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf, + sizeof(*mem_conf)); + if (ret < 0) { + wl1271_warning("wl1271 mem config failed: %d", ret); + goto out; + } + +out: + kfree(mem_conf); + return ret; +} + +int wl1271_acx_init_mem_config(struct wl1271 *wl) +{ + int ret; + + ret = wl1271_acx_mem_cfg(wl); + if (ret < 0) + return ret; + + wl->target_mem_map = kzalloc(sizeof(struct wl1271_acx_mem_map), + GFP_KERNEL); + if (!wl->target_mem_map) { + wl1271_error("couldn't allocate target memory map"); + return -ENOMEM; + } + + /* we now ask for the firmware built memory map */ + ret = wl1271_acx_mem_map(wl, (void *)wl->target_mem_map, + sizeof(struct wl1271_acx_mem_map)); + if (ret < 0) { + wl1271_error("couldn't retrieve firmware memory map"); + kfree(wl->target_mem_map); + wl->target_mem_map = NULL; + return ret; + } + + /* initialize TX block book keeping */ + wl->tx_blocks_available = + le32_to_cpu(wl->target_mem_map->num_tx_mem_blocks); + wl1271_debug(DEBUG_TX, "available tx blocks: %d", + wl->tx_blocks_available); + + return 0; +} + +int wl1271_acx_init_rx_interrupt(struct wl1271 *wl) +{ + struct wl1271_acx_rx_config_opt *rx_conf; + int ret; + + wl1271_debug(DEBUG_ACX, "wl1271 rx interrupt config"); + + rx_conf = kzalloc(sizeof(*rx_conf), GFP_KERNEL); + if (!rx_conf) { + ret = -ENOMEM; + goto out; + } + + rx_conf->threshold = cpu_to_le16(wl->conf.rx.irq_pkt_threshold); + rx_conf->timeout = cpu_to_le16(wl->conf.rx.irq_timeout); + rx_conf->mblk_threshold = cpu_to_le16(wl->conf.rx.irq_blk_threshold); + rx_conf->queue_type = wl->conf.rx.queue_type; + + ret = wl1271_cmd_configure(wl, ACX_RX_CONFIG_OPT, rx_conf, + sizeof(*rx_conf)); + if (ret < 0) { + wl1271_warning("wl1271 rx config opt failed: %d", ret); + goto out; + } + +out: + kfree(rx_conf); + return ret; +} + +int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable) +{ + struct wl1271_acx_bet_enable *acx = NULL; + int ret = 0; + + wl1271_debug(DEBUG_ACX, "acx bet enable"); + + if (enable && wl->conf.conn.bet_enable == CONF_BET_MODE_DISABLE) + goto out; + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->enable = enable ? CONF_BET_MODE_ENABLE : CONF_BET_MODE_DISABLE; + acx->max_consecutive = wl->conf.conn.bet_max_consecutive; + + ret = wl1271_cmd_configure(wl, ACX_BET_ENABLE, acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("acx bet enable failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, u8 *address, + u8 version) +{ + struct wl1271_acx_arp_filter *acx; + int ret; + + wl1271_debug(DEBUG_ACX, "acx arp ip filter, enable: %d", enable); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->version = version; + acx->enable = enable; + + if (enable == true) { + if (version == ACX_IPV4_VERSION) + memcpy(acx->address, address, ACX_IPV4_ADDR_SIZE); + else if (version == ACX_IPV6_VERSION) + memcpy(acx->address, address, sizeof(acx->address)); + else + wl1271_error("Invalid IP version"); + } + + ret = wl1271_cmd_configure(wl, ACX_ARP_IP_FILTER, + acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("failed to set arp ip filter: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1271_acx_pm_config(struct wl1271 *wl) +{ + struct wl1271_acx_pm_config *acx = NULL; + struct conf_pm_config_settings *c = &wl->conf.pm_config; + int ret = 0; + + wl1271_debug(DEBUG_ACX, "acx pm config"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->host_clk_settling_time = cpu_to_le32(c->host_clk_settling_time); + acx->host_fast_wakeup_support = c->host_fast_wakeup_support; + + ret = wl1271_cmd_configure(wl, ACX_PM_CONFIG, acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("acx pm config failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.h b/drivers/net/wireless/wl12xx/wl1271_acx.h new file mode 100644 index 0000000..aeccc98 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1271_acx.h @@ -0,0 +1,1089 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 1998-2009 Texas Instruments. All rights reserved. + * Copyright (C) 2008-2010 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __WL1271_ACX_H__ +#define __WL1271_ACX_H__ + +#include "wl1271.h" +#include "wl1271_cmd.h" + +/************************************************************************* + + Host Interrupt Register (WiLink -> Host) + +**************************************************************************/ +/* HW Initiated interrupt Watchdog timer expiration */ +#define WL1271_ACX_INTR_WATCHDOG BIT(0) +/* Init sequence is done (masked interrupt, detection through polling only ) */ +#define WL1271_ACX_INTR_INIT_COMPLETE BIT(1) +/* Event was entered to Event MBOX #A*/ +#define WL1271_ACX_INTR_EVENT_A BIT(2) +/* Event was entered to Event MBOX #B*/ +#define WL1271_ACX_INTR_EVENT_B BIT(3) +/* Command processing completion*/ +#define WL1271_ACX_INTR_CMD_COMPLETE BIT(4) +/* Signaling the host on HW wakeup */ +#define WL1271_ACX_INTR_HW_AVAILABLE BIT(5) +/* The MISC bit is used for aggregation of RX, TxComplete and TX rate update */ +#define WL1271_ACX_INTR_DATA BIT(6) +/* Trace meassge on MBOX #A */ +#define WL1271_ACX_INTR_TRACE_A BIT(7) +/* Trace meassge on MBOX #B */ +#define WL1271_ACX_INTR_TRACE_B BIT(8) + +#define WL1271_ACX_INTR_ALL 0xFFFFFFFF +#define WL1271_ACX_ALL_EVENTS_VECTOR (WL1271_ACX_INTR_WATCHDOG | \ + WL1271_ACX_INTR_INIT_COMPLETE | \ + WL1271_ACX_INTR_EVENT_A | \ + WL1271_ACX_INTR_EVENT_B | \ + WL1271_ACX_INTR_CMD_COMPLETE | \ + WL1271_ACX_INTR_HW_AVAILABLE | \ + WL1271_ACX_INTR_DATA) + +#define WL1271_INTR_MASK (WL1271_ACX_INTR_EVENT_A | \ + WL1271_ACX_INTR_EVENT_B | \ + WL1271_ACX_INTR_HW_AVAILABLE | \ + WL1271_ACX_INTR_DATA) + +/* Target's information element */ +struct acx_header { + struct wl1271_cmd_header cmd; + + /* acx (or information element) header */ + __le16 id; + + /* payload length (not including headers */ + __le16 len; +} __attribute__ ((packed)); + +struct acx_error_counter { + struct acx_header header; + + /* The number of PLCP errors since the last time this */ + /* information element was interrogated. This field is */ + /* automatically cleared when it is interrogated.*/ + __le32 PLCP_error; + + /* The number of FCS errors since the last time this */ + /* information element was interrogated. This field is */ + /* automatically cleared when it is interrogated.*/ + __le32 FCS_error; + + /* The number of MPDUs without PLCP header errors received*/ + /* since the last time this information element was interrogated. */ + /* This field is automatically cleared when it is interrogated.*/ + __le32 valid_frame; + + /* the number of missed sequence numbers in the squentially */ + /* values of frames seq numbers */ + __le32 seq_num_miss; +} __attribute__ ((packed)); + +struct acx_revision { + struct acx_header header; + + /* + * The WiLink firmware version, an ASCII string x.x.x.x, + * that uniquely identifies the current firmware. + * The left most digit is incremented each time a + * significant change is made to the firmware, such as + * code redesign or new platform support. + * The second digit is incremented when major enhancements + * are added or major fixes are made. + * The third digit is incremented for each GA release. + * The fourth digit is incremented for each build. + * The first two digits identify a firmware release version, + * in other words, a unique set of features. + * The first three digits identify a GA release. + */ + char fw_version[20]; + + /* + * This 4 byte field specifies the WiLink hardware version. + * bits 0 - 15: Reserved. + * bits 16 - 23: Version ID - The WiLink version ID + * (1 = first spin, 2 = second spin, and so on). + * bits 24 - 31: Chip ID - The WiLink chip ID. + */ + __le32 hw_version; +} __attribute__ ((packed)); + +enum wl1271_psm_mode { + /* Active mode */ + WL1271_PSM_CAM = 0, + + /* Power save mode */ + WL1271_PSM_PS = 1, + + /* Extreme low power */ + WL1271_PSM_ELP = 2, +}; + +struct acx_sleep_auth { + struct acx_header header; + + /* The sleep level authorization of the device. */ + /* 0 - Always active*/ + /* 1 - Power down mode: light / fast sleep*/ + /* 2 - ELP mode: Deep / Max sleep*/ + u8 sleep_auth; + u8 padding[3]; +} __attribute__ ((packed)); + +enum { + HOSTIF_PCI_MASTER_HOST_INDIRECT, + HOSTIF_PCI_MASTER_HOST_DIRECT, + HOSTIF_SLAVE, + HOSTIF_PKT_RING, + HOSTIF_DONTCARE = 0xFF +}; + +#define DEFAULT_UCAST_PRIORITY 0 +#define DEFAULT_RX_Q_PRIORITY 0 +#define DEFAULT_NUM_STATIONS 1 +#define DEFAULT_RXQ_PRIORITY 0 /* low 0 .. 15 high */ +#define DEFAULT_RXQ_TYPE 0x07 /* All frames, Data/Ctrl/Mgmt */ +#define TRACE_BUFFER_MAX_SIZE 256 + +#define DP_RX_PACKET_RING_CHUNK_SIZE 1600 +#define DP_TX_PACKET_RING_CHUNK_SIZE 1600 +#define DP_RX_PACKET_RING_CHUNK_NUM 2 +#define DP_TX_PACKET_RING_CHUNK_NUM 2 +#define DP_TX_COMPLETE_TIME_OUT 20 + +#define TX_MSDU_LIFETIME_MIN 0 +#define TX_MSDU_LIFETIME_MAX 3000 +#define TX_MSDU_LIFETIME_DEF 512 +#define RX_MSDU_LIFETIME_MIN 0 +#define RX_MSDU_LIFETIME_MAX 0xFFFFFFFF +#define RX_MSDU_LIFETIME_DEF 512000 + +struct acx_rx_msdu_lifetime { + struct acx_header header; + + /* + * The maximum amount of time, in TU, before the + * firmware discards the MSDU. + */ + __le32 lifetime; +} __attribute__ ((packed)); + +/* + * RX Config Options Table + * Bit Definition + * === ========== + * 31:14 Reserved + * 13 Copy RX Status - when set, write three receive status words + * to top of rx'd MPDUs. + * When cleared, do not write three status words (added rev 1.5) + * 12 Reserved + * 11 RX Complete upon FCS error - when set, give rx complete + * interrupt for FCS errors, after the rx filtering, e.g. unicast + * frames not to us with FCS error will not generate an interrupt. + * 10 SSID Filter Enable - When set, the WiLink discards all beacon, + * probe request, and probe response frames with an SSID that does + * not match the SSID specified by the host in the START/JOIN + * command. + * When clear, the WiLink receives frames with any SSID. + * 9 Broadcast Filter Enable - When set, the WiLink discards all + * broadcast frames. When clear, the WiLink receives all received + * broadcast frames. + * 8:6 Reserved + * 5 BSSID Filter Enable - When set, the WiLink discards any frames + * with a BSSID that does not match the BSSID specified by the + * host. + * When clear, the WiLink receives frames from any BSSID. + * 4 MAC Addr Filter - When set, the WiLink discards any frames + * with a destination address that does not match the MAC address + * of the adaptor. + * When clear, the WiLink receives frames destined to any MAC + * address. + * 3 Promiscuous - When set, the WiLink receives all valid frames + * (i.e., all frames that pass the FCS check). + * When clear, only frames that pass the other filters specified + * are received. + * 2 FCS - When set, the WiLink includes the FCS with the received + * frame. + * When cleared, the FCS is discarded. + * 1 PLCP header - When set, write all data from baseband to frame + * buffer including PHY header. + * 0 Reserved - Always equal to 0. + * + * RX Filter Options Table + * Bit Definition + * === ========== + * 31:12 Reserved - Always equal to 0. + * 11 Association - When set, the WiLink receives all association + * related frames (association request/response, reassocation + * request/response, and disassociation). When clear, these frames + * are discarded. + * 10 Auth/De auth - When set, the WiLink receives all authentication + * and de-authentication frames. When clear, these frames are + * discarded. + * 9 Beacon - When set, the WiLink receives all beacon frames. + * When clear, these frames are discarded. + * 8 Contention Free - When set, the WiLink receives all contention + * free frames. + * When clear, these frames are discarded. + * 7 Control - When set, the WiLink receives all control frames. + * When clear, these frames are discarded. + * 6 Data - When set, the WiLink receives all data frames. + * When clear, these frames are discarded. + * 5 FCS Error - When set, the WiLink receives frames that have FCS + * errors. + * When clear, these frames are discarded. + * 4 Management - When set, the WiLink receives all management + * frames. + * When clear, these frames are discarded. + * 3 Probe Request - When set, the WiLink receives all probe request + * frames. + * When clear, these frames are discarded. + * 2 Probe Response - When set, the WiLink receives all probe + * response frames. + * When clear, these frames are discarded. + * 1 RTS/CTS/ACK - When set, the WiLink receives all RTS, CTS and ACK + * frames. + * When clear, these frames are discarded. + * 0 Rsvd Type/Sub Type - When set, the WiLink receives all frames + * that have reserved frame types and sub types as defined by the + * 802.11 specification. + * When clear, these frames are discarded. + */ +struct acx_rx_config { + struct acx_header header; + + __le32 config_options; + __le32 filter_options; +} __attribute__ ((packed)); + +struct acx_packet_detection { + struct acx_header header; + + __le32 threshold; +} __attribute__ ((packed)); + + +enum acx_slot_type { + SLOT_TIME_LONG = 0, + SLOT_TIME_SHORT = 1, + DEFAULT_SLOT_TIME = SLOT_TIME_SHORT, + MAX_SLOT_TIMES = 0xFF +}; + +#define STATION_WONE_INDEX 0 + +struct acx_slot { + struct acx_header header; + + u8 wone_index; /* Reserved */ + u8 slot_time; + u8 reserved[6]; +} __attribute__ ((packed)); + + +#define ACX_MC_ADDRESS_GROUP_MAX (8) +#define ADDRESS_GROUP_MAX_LEN (ETH_ALEN * ACX_MC_ADDRESS_GROUP_MAX) + +struct acx_dot11_grp_addr_tbl { + struct acx_header header; + + u8 enabled; + u8 num_groups; + u8 pad[2]; + u8 mac_table[ADDRESS_GROUP_MAX_LEN]; +} __attribute__ ((packed)); + +struct acx_rx_timeout { + struct acx_header header; + + __le16 ps_poll_timeout; + __le16 upsd_timeout; +} __attribute__ ((packed)); + +struct acx_rts_threshold { + struct acx_header header; + + __le16 threshold; + u8 pad[2]; +} __attribute__ ((packed)); + +struct acx_beacon_filter_option { + struct acx_header header; + + u8 enable; + + /* + * The number of beacons without the unicast TIM + * bit set that the firmware buffers before + * signaling the host about ready frames. + * When set to 0 and the filter is enabled, beacons + * without the unicast TIM bit set are dropped. + */ + u8 max_num_beacons; + u8 pad[2]; +} __attribute__ ((packed)); + +/* + * ACXBeaconFilterEntry (not 221) + * Byte Offset Size (Bytes) Definition + * =========== ============ ========== + * 0 1 IE identifier + * 1 1 Treatment bit mask + * + * ACXBeaconFilterEntry (221) + * Byte Offset Size (Bytes) Definition + * =========== ============ ========== + * 0 1 IE identifier + * 1 1 Treatment bit mask + * 2 3 OUI + * 5 1 Type + * 6 2 Version + * + * + * Treatment bit mask - The information element handling: + * bit 0 - The information element is compared and transferred + * in case of change. + * bit 1 - The information element is transferred to the host + * with each appearance or disappearance. + * Note that both bits can be set at the same time. + */ +#define BEACON_FILTER_TABLE_MAX_IE_NUM (32) +#define BEACON_FILTER_TABLE_MAX_VENDOR_SPECIFIC_IE_NUM (6) +#define BEACON_FILTER_TABLE_IE_ENTRY_SIZE (2) +#define BEACON_FILTER_TABLE_EXTRA_VENDOR_SPECIFIC_IE_SIZE (6) +#define BEACON_FILTER_TABLE_MAX_SIZE ((BEACON_FILTER_TABLE_MAX_IE_NUM * \ + BEACON_FILTER_TABLE_IE_ENTRY_SIZE) + \ + (BEACON_FILTER_TABLE_MAX_VENDOR_SPECIFIC_IE_NUM * \ + BEACON_FILTER_TABLE_EXTRA_VENDOR_SPECIFIC_IE_SIZE)) + +struct acx_beacon_filter_ie_table { + struct acx_header header; + + u8 num_ie; + u8 pad[3]; + u8 table[BEACON_FILTER_TABLE_MAX_SIZE]; +} __attribute__ ((packed)); + +struct acx_conn_monit_params { + struct acx_header header; + + __le32 synch_fail_thold; /* number of beacons missed */ + __le32 bss_lose_timeout; /* number of TU's from synch fail */ +} __attribute__ ((packed)); + +enum { + SG_ENABLE = 0, + SG_DISABLE, + SG_SENSE_NO_ACTIVITY, + SG_SENSE_ACTIVE +}; + +struct acx_bt_wlan_coex { + struct acx_header header; + + /* + * 0 -> PTA enabled + * 1 -> PTA disabled + * 2 -> sense no active mode, i.e. + * an interrupt is sent upon + * BT activity. + * 3 -> PTA is switched on in response + * to the interrupt sending. + */ + u8 enable; + u8 pad[3]; +} __attribute__ ((packed)); + +struct acx_dco_itrim_params { + struct acx_header header; + + u8 enable; + u8 padding[3]; + __le32 timeout; +} __attribute__ ((packed)); + +#define PTA_ANTENNA_TYPE_DEF (0) +#define PTA_BT_HP_MAXTIME_DEF (2000) +#define PTA_WLAN_HP_MAX_TIME_DEF (5000) +#define PTA_SENSE_DISABLE_TIMER_DEF (1350) +#define PTA_PROTECTIVE_RX_TIME_DEF (1500) +#define PTA_PROTECTIVE_TX_TIME_DEF (1500) +#define PTA_TIMEOUT_NEXT_BT_LP_PACKET_DEF (3000) +#define PTA_SIGNALING_TYPE_DEF (1) +#define PTA_AFH_LEVERAGE_ON_DEF (0) +#define PTA_NUMBER_QUIET_CYCLE_DEF (0) +#define PTA_MAX_NUM_CTS_DEF (3) +#define PTA_NUMBER_OF_WLAN_PACKETS_DEF (2) +#define PTA_NUMBER_OF_BT_PACKETS_DEF (2) +#define PTA_PROTECTIVE_RX_TIME_FAST_DEF (1500) +#define PTA_PROTECTIVE_TX_TIME_FAST_DEF (3000) +#define PTA_CYCLE_TIME_FAST_DEF (8700) +#define PTA_RX_FOR_AVALANCHE_DEF (5) +#define PTA_ELP_HP_DEF (0) +#define PTA_ANTI_STARVE_PERIOD_DEF (500) +#define PTA_ANTI_STARVE_NUM_CYCLE_DEF (4) +#define PTA_ALLOW_PA_SD_DEF (1) +#define PTA_TIME_BEFORE_BEACON_DEF (6300) +#define PTA_HPDM_MAX_TIME_DEF (1600) +#define PTA_TIME_OUT_NEXT_WLAN_DEF (2550) +#define PTA_AUTO_MODE_NO_CTS_DEF (0) +#define PTA_BT_HP_RESPECTED_DEF (3) +#define PTA_WLAN_RX_MIN_RATE_DEF (24) +#define PTA_ACK_MODE_DEF (1) + +struct acx_bt_wlan_coex_param { + struct acx_header header; + + __le32 per_threshold; + __le32 max_scan_compensation_time; + __le16 nfs_sample_interval; + u8 load_ratio; + u8 auto_ps_mode; + u8 probe_req_compensation; + u8 scan_window_compensation; + u8 antenna_config; + u8 beacon_miss_threshold; + __le32 rate_adaptation_threshold; + s8 rate_adaptation_snr; + u8 padding[3]; +} __attribute__ ((packed)); + +struct acx_energy_detection { + struct acx_header header; + + /* The RX Clear Channel Assessment threshold in the PHY */ + __le16 rx_cca_threshold; + u8 tx_energy_detection; + u8 pad; +} __attribute__ ((packed)); + +struct acx_beacon_broadcast { + struct acx_header header; + + __le16 beacon_rx_timeout; + __le16 broadcast_timeout; + + /* Enables receiving of broadcast packets in PS mode */ + u8 rx_broadcast_in_ps; + + /* Consecutive PS Poll failures before updating the host */ + u8 ps_poll_threshold; + u8 pad[2]; +} __attribute__ ((packed)); + +struct acx_event_mask { + struct acx_header header; + + __le32 event_mask; + __le32 high_event_mask; /* Unused */ +} __attribute__ ((packed)); + +#define CFG_RX_FCS BIT(2) +#define CFG_RX_ALL_GOOD BIT(3) +#define CFG_UNI_FILTER_EN BIT(4) +#define CFG_BSSID_FILTER_EN BIT(5) +#define CFG_MC_FILTER_EN BIT(6) +#define CFG_MC_ADDR0_EN BIT(7) +#define CFG_MC_ADDR1_EN BIT(8) +#define CFG_BC_REJECT_EN BIT(9) +#define CFG_SSID_FILTER_EN BIT(10) +#define CFG_RX_INT_FCS_ERROR BIT(11) +#define CFG_RX_INT_ENCRYPTED BIT(12) +#define CFG_RX_WR_RX_STATUS BIT(13) +#define CFG_RX_FILTER_NULTI BIT(14) +#define CFG_RX_RESERVE BIT(15) +#define CFG_RX_TIMESTAMP_TSF BIT(16) + +#define CFG_RX_RSV_EN BIT(0) +#define CFG_RX_RCTS_ACK BIT(1) +#define CFG_RX_PRSP_EN BIT(2) +#define CFG_RX_PREQ_EN BIT(3) +#define CFG_RX_MGMT_EN BIT(4) +#define CFG_RX_FCS_ERROR BIT(5) +#define CFG_RX_DATA_EN BIT(6) +#define CFG_RX_CTL_EN BIT(7) +#define CFG_RX_CF_EN BIT(8) +#define CFG_RX_BCN_EN BIT(9) +#define CFG_RX_AUTH_EN BIT(10) +#define CFG_RX_ASSOC_EN BIT(11) + +#define SCAN_PASSIVE BIT(0) +#define SCAN_5GHZ_BAND BIT(1) +#define SCAN_TRIGGERED BIT(2) +#define SCAN_PRIORITY_HIGH BIT(3) + +/* When set, disable HW encryption */ +#define DF_ENCRYPTION_DISABLE 0x01 +#define DF_SNIFF_MODE_ENABLE 0x80 + +struct acx_feature_config { + struct acx_header header; + + __le32 options; + __le32 data_flow_options; +} __attribute__ ((packed)); + +struct acx_current_tx_power { + struct acx_header header; + + u8 current_tx_power; + u8 padding[3]; +} __attribute__ ((packed)); + +struct acx_wake_up_condition { + struct acx_header header; + + u8 wake_up_event; /* Only one bit can be set */ + u8 listen_interval; + u8 pad[2]; +} __attribute__ ((packed)); + +struct acx_aid { + struct acx_header header; + + /* + * To be set when associated with an AP. + */ + __le16 aid; + u8 pad[2]; +} __attribute__ ((packed)); + +enum acx_preamble_type { + ACX_PREAMBLE_LONG = 0, + ACX_PREAMBLE_SHORT = 1 +}; + +struct acx_preamble { + struct acx_header header; + + /* + * When set, the WiLink transmits the frames with a short preamble and + * when cleared, the WiLink transmits the frames with a long preamble. + */ + u8 preamble; + u8 padding[3]; +} __attribute__ ((packed)); + +enum acx_ctsprotect_type { + CTSPROTECT_DISABLE = 0, + CTSPROTECT_ENABLE = 1 +}; + +struct acx_ctsprotect { + struct acx_header header; + u8 ctsprotect; + u8 padding[3]; +} __attribute__ ((packed)); + +struct acx_tx_statistics { + __le32 internal_desc_overflow; +} __attribute__ ((packed)); + +struct acx_rx_statistics { + __le32 out_of_mem; + __le32 hdr_overflow; + __le32 hw_stuck; + __le32 dropped; + __le32 fcs_err; + __le32 xfr_hint_trig; + __le32 path_reset; + __le32 reset_counter; +} __attribute__ ((packed)); + +struct acx_dma_statistics { + __le32 rx_requested; + __le32 rx_errors; + __le32 tx_requested; + __le32 tx_errors; +} __attribute__ ((packed)); + +struct acx_isr_statistics { + /* host command complete */ + __le32 cmd_cmplt; + + /* fiqisr() */ + __le32 fiqs; + + /* (INT_STS_ND & INT_TRIG_RX_HEADER) */ + __le32 rx_headers; + + /* (INT_STS_ND & INT_TRIG_RX_CMPLT) */ + __le32 rx_completes; + + /* (INT_STS_ND & INT_TRIG_NO_RX_BUF) */ + __le32 rx_mem_overflow; + + /* (INT_STS_ND & INT_TRIG_S_RX_RDY) */ + __le32 rx_rdys; + + /* irqisr() */ + __le32 irqs; + + /* (INT_STS_ND & INT_TRIG_TX_PROC) */ + __le32 tx_procs; + + /* (INT_STS_ND & INT_TRIG_DECRYPT_DONE) */ + __le32 decrypt_done; + + /* (INT_STS_ND & INT_TRIG_DMA0) */ + __le32 dma0_done; + + /* (INT_STS_ND & INT_TRIG_DMA1) */ + __le32 dma1_done; + + /* (INT_STS_ND & INT_TRIG_TX_EXC_CMPLT) */ + __le32 tx_exch_complete; + + /* (INT_STS_ND & INT_TRIG_COMMAND) */ + __le32 commands; + + /* (INT_STS_ND & INT_TRIG_RX_PROC) */ + __le32 rx_procs; + + /* (INT_STS_ND & INT_TRIG_PM_802) */ + __le32 hw_pm_mode_changes; + + /* (INT_STS_ND & INT_TRIG_ACKNOWLEDGE) */ + __le32 host_acknowledges; + + /* (INT_STS_ND & INT_TRIG_PM_PCI) */ + __le32 pci_pm; + + /* (INT_STS_ND & INT_TRIG_ACM_WAKEUP) */ + __le32 wakeups; + + /* (INT_STS_ND & INT_TRIG_LOW_RSSI) */ + __le32 low_rssi; +} __attribute__ ((packed)); + +struct acx_wep_statistics { + /* WEP address keys configured */ + __le32 addr_key_count; + + /* default keys configured */ + __le32 default_key_count; + + __le32 reserved; + + /* number of times that WEP key not found on lookup */ + __le32 key_not_found; + + /* number of times that WEP key decryption failed */ + __le32 decrypt_fail; + + /* WEP packets decrypted */ + __le32 packets; + + /* WEP decrypt interrupts */ + __le32 interrupt; +} __attribute__ ((packed)); + +#define ACX_MISSED_BEACONS_SPREAD 10 + +struct acx_pwr_statistics { + /* the amount of enters into power save mode (both PD & ELP) */ + __le32 ps_enter; + + /* the amount of enters into ELP mode */ + __le32 elp_enter; + + /* the amount of missing beacon interrupts to the host */ + __le32 missing_bcns; + + /* the amount of wake on host-access times */ + __le32 wake_on_host; + + /* the amount of wake on timer-expire */ + __le32 wake_on_timer_exp; + + /* the number of packets that were transmitted with PS bit set */ + __le32 tx_with_ps; + + /* the number of packets that were transmitted with PS bit clear */ + __le32 tx_without_ps; + + /* the number of received beacons */ + __le32 rcvd_beacons; + + /* the number of entering into PowerOn (power save off) */ + __le32 power_save_off; + + /* the number of entries into power save mode */ + __le16 enable_ps; + + /* + * the number of exits from power save, not including failed PS + * transitions + */ + __le16 disable_ps; + + /* + * the number of times the TSF counter was adjusted because + * of drift + */ + __le32 fix_tsf_ps; + + /* Gives statistics about the spread continuous missed beacons. + * The 16 LSB are dedicated for the PS mode. + * The 16 MSB are dedicated for the PS mode. + * cont_miss_bcns_spread[0] - single missed beacon. + * cont_miss_bcns_spread[1] - two continuous missed beacons. + * cont_miss_bcns_spread[2] - three continuous missed beacons. + * ... + * cont_miss_bcns_spread[9] - ten and more continuous missed beacons. + */ + __le32 cont_miss_bcns_spread[ACX_MISSED_BEACONS_SPREAD]; + + /* the number of beacons in awake mode */ + __le32 rcvd_awake_beacons; +} __attribute__ ((packed)); + +struct acx_mic_statistics { + __le32 rx_pkts; + __le32 calc_failure; +} __attribute__ ((packed)); + +struct acx_aes_statistics { + __le32 encrypt_fail; + __le32 decrypt_fail; + __le32 encrypt_packets; + __le32 decrypt_packets; + __le32 encrypt_interrupt; + __le32 decrypt_interrupt; +} __attribute__ ((packed)); + +struct acx_event_statistics { + __le32 heart_beat; + __le32 calibration; + __le32 rx_mismatch; + __le32 rx_mem_empty; + __le32 rx_pool; + __le32 oom_late; + __le32 phy_transmit_error; + __le32 tx_stuck; +} __attribute__ ((packed)); + +struct acx_ps_statistics { + __le32 pspoll_timeouts; + __le32 upsd_timeouts; + __le32 upsd_max_sptime; + __le32 upsd_max_apturn; + __le32 pspoll_max_apturn; + __le32 pspoll_utilization; + __le32 upsd_utilization; +} __attribute__ ((packed)); + +struct acx_rxpipe_statistics { + __le32 rx_prep_beacon_drop; + __le32 descr_host_int_trig_rx_data; + __le32 beacon_buffer_thres_host_int_trig_rx_data; + __le32 missed_beacon_host_int_trig_rx_data; + __le32 tx_xfr_host_int_trig_rx_data; +} __attribute__ ((packed)); + +struct acx_statistics { + struct acx_header header; + + struct acx_tx_statistics tx; + struct acx_rx_statistics rx; + struct acx_dma_statistics dma; + struct acx_isr_statistics isr; + struct acx_wep_statistics wep; + struct acx_pwr_statistics pwr; + struct acx_aes_statistics aes; + struct acx_mic_statistics mic; + struct acx_event_statistics event; + struct acx_ps_statistics ps; + struct acx_rxpipe_statistics rxpipe; +} __attribute__ ((packed)); + +struct acx_rate_class { + __le32 enabled_rates; + u8 short_retry_limit; + u8 long_retry_limit; + u8 aflags; + u8 reserved; +}; + +#define ACX_TX_BASIC_RATE 0 +#define ACX_TX_AP_FULL_RATE 1 +#define ACX_TX_RATE_POLICY_CNT 2 +struct acx_rate_policy { + struct acx_header header; + + __le32 rate_class_cnt; + struct acx_rate_class rate_class[CONF_TX_MAX_RATE_CLASSES]; +} __attribute__ ((packed)); + +struct acx_ac_cfg { + struct acx_header header; + u8 ac; + u8 cw_min; + __le16 cw_max; + u8 aifsn; + u8 reserved; + __le16 tx_op_limit; +} __attribute__ ((packed)); + +struct acx_tid_config { + struct acx_header header; + u8 queue_id; + u8 channel_type; + u8 tsid; + u8 ps_scheme; + u8 ack_policy; + u8 padding[3]; + __le32 apsd_conf[2]; +} __attribute__ ((packed)); + +struct acx_frag_threshold { + struct acx_header header; + __le16 frag_threshold; + u8 padding[2]; +} __attribute__ ((packed)); + +struct acx_tx_config_options { + struct acx_header header; + __le16 tx_compl_timeout; /* msec */ + __le16 tx_compl_threshold; /* number of packets */ +} __attribute__ ((packed)); + +#define ACX_RX_MEM_BLOCKS 70 +#define ACX_TX_MIN_MEM_BLOCKS 40 +#define ACX_TX_DESCRIPTORS 32 +#define ACX_NUM_SSID_PROFILES 1 + +struct wl1271_acx_config_memory { + struct acx_header header; + + u8 rx_mem_block_num; + u8 tx_min_mem_block_num; + u8 num_stations; + u8 num_ssid_profiles; + __le32 total_tx_descriptors; +} __attribute__ ((packed)); + +struct wl1271_acx_mem_map { + struct acx_header header; + + __le32 code_start; + __le32 code_end; + + __le32 wep_defkey_start; + __le32 wep_defkey_end; + + __le32 sta_table_start; + __le32 sta_table_end; + + __le32 packet_template_start; + __le32 packet_template_end; + + /* Address of the TX result interface (control block) */ + __le32 tx_result; + __le32 tx_result_queue_start; + + __le32 queue_memory_start; + __le32 queue_memory_end; + + __le32 packet_memory_pool_start; + __le32 packet_memory_pool_end; + + __le32 debug_buffer1_start; + __le32 debug_buffer1_end; + + __le32 debug_buffer2_start; + __le32 debug_buffer2_end; + + /* Number of blocks FW allocated for TX packets */ + __le32 num_tx_mem_blocks; + + /* Number of blocks FW allocated for RX packets */ + __le32 num_rx_mem_blocks; + + /* the following 4 fields are valid in SLAVE mode only */ + u8 *tx_cbuf; + u8 *rx_cbuf; + __le32 rx_ctrl; + __le32 tx_ctrl; +} __attribute__ ((packed)); + +struct wl1271_acx_rx_config_opt { + struct acx_header header; + + __le16 mblk_threshold; + __le16 threshold; + __le16 timeout; + u8 queue_type; + u8 reserved; +} __attribute__ ((packed)); + + +struct wl1271_acx_bet_enable { + struct acx_header header; + + u8 enable; + u8 max_consecutive; + u8 padding[2]; +} __attribute__ ((packed)); + +#define ACX_IPV4_VERSION 4 +#define ACX_IPV6_VERSION 6 +#define ACX_IPV4_ADDR_SIZE 4 +struct wl1271_acx_arp_filter { + struct acx_header header; + u8 version; /* ACX_IPV4_VERSION, ACX_IPV6_VERSION */ + u8 enable; /* 1 to enable ARP filtering, 0 to disable */ + u8 padding[2]; + u8 address[16]; /* The configured device IP address - all ARP + requests directed to this IP address will pass + through. For IPv4, the first four bytes are + used. */ +} __attribute__((packed)); + +struct wl1271_acx_pm_config { + struct acx_header header; + + __le32 host_clk_settling_time; + u8 host_fast_wakeup_support; + u8 padding[3]; +} __attribute__ ((packed)); + +enum { + ACX_WAKE_UP_CONDITIONS = 0x0002, + ACX_MEM_CFG = 0x0003, + ACX_SLOT = 0x0004, + ACX_AC_CFG = 0x0007, + ACX_MEM_MAP = 0x0008, + ACX_AID = 0x000A, + /* ACX_FW_REV is missing in the ref driver, but seems to work */ + ACX_FW_REV = 0x000D, + ACX_MEDIUM_USAGE = 0x000F, + ACX_RX_CFG = 0x0010, + ACX_TX_QUEUE_CFG = 0x0011, /* FIXME: only used by wl1251 */ + ACX_STATISTICS = 0x0013, /* Debug API */ + ACX_PWR_CONSUMPTION_STATISTICS = 0x0014, + ACX_FEATURE_CFG = 0x0015, + ACX_TID_CFG = 0x001A, + ACX_PS_RX_STREAMING = 0x001B, + ACX_BEACON_FILTER_OPT = 0x001F, + ACX_NOISE_HIST = 0x0021, + ACX_HDK_VERSION = 0x0022, /* ??? */ + ACX_PD_THRESHOLD = 0x0023, + ACX_TX_CONFIG_OPT = 0x0024, + ACX_CCA_THRESHOLD = 0x0025, + ACX_EVENT_MBOX_MASK = 0x0026, + ACX_CONN_MONIT_PARAMS = 0x002D, + ACX_CONS_TX_FAILURE = 0x002F, + ACX_BCN_DTIM_OPTIONS = 0x0031, + ACX_SG_ENABLE = 0x0032, + ACX_SG_CFG = 0x0033, + ACX_BEACON_FILTER_TABLE = 0x0038, + ACX_ARP_IP_FILTER = 0x0039, + ACX_ROAMING_STATISTICS_TBL = 0x003B, + ACX_RATE_POLICY = 0x003D, + ACX_CTS_PROTECTION = 0x003E, + ACX_SLEEP_AUTH = 0x003F, + ACX_PREAMBLE_TYPE = 0x0040, + ACX_ERROR_CNT = 0x0041, + ACX_IBSS_FILTER = 0x0044, + ACX_SERVICE_PERIOD_TIMEOUT = 0x0045, + ACX_TSF_INFO = 0x0046, + ACX_CONFIG_PS_WMM = 0x0049, + ACX_ENABLE_RX_DATA_FILTER = 0x004A, + ACX_SET_RX_DATA_FILTER = 0x004B, + ACX_GET_DATA_FILTER_STATISTICS = 0x004C, + ACX_RX_CONFIG_OPT = 0x004E, + ACX_FRAG_CFG = 0x004F, + ACX_BET_ENABLE = 0x0050, + ACX_RSSI_SNR_TRIGGER = 0x0051, + ACX_RSSI_SNR_WEIGHTS = 0x0051, + ACX_KEEP_ALIVE_MODE = 0x0052, + ACX_SET_KEEP_ALIVE_CONFIG = 0x0054, + ACX_BA_SESSION_RESPONDER_POLICY = 0x0055, + ACX_BA_SESSION_INITIATOR_POLICY = 0x0056, + ACX_PEER_HT_CAP = 0x0057, + ACX_HT_BSS_OPERATION = 0x0058, + ACX_COEX_ACTIVITY = 0x0059, + ACX_SET_SMART_REFLEX_DEBUG = 0x005A, + ACX_SET_DCO_ITRIM_PARAMS = 0x0061, + DOT11_RX_MSDU_LIFE_TIME = 0x1004, + DOT11_CUR_TX_PWR = 0x100D, + DOT11_RX_DOT11_MODE = 0x1012, + DOT11_RTS_THRESHOLD = 0x1013, + DOT11_GROUP_ADDRESS_TBL = 0x1014, + ACX_PM_CONFIG = 0x1016, + + MAX_DOT11_IE = DOT11_GROUP_ADDRESS_TBL, + + MAX_IE = 0xFFFF +}; + + +int wl1271_acx_wake_up_conditions(struct wl1271 *wl); +int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth); +int wl1271_acx_fw_version(struct wl1271 *wl, char *buf, size_t len); +int wl1271_acx_tx_power(struct wl1271 *wl, int power); +int wl1271_acx_feature_cfg(struct wl1271 *wl); +int wl1271_acx_mem_map(struct wl1271 *wl, + struct acx_header *mem_map, size_t len); +int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl); +int wl1271_acx_rx_config(struct wl1271 *wl, u32 config, u32 filter); +int wl1271_acx_pd_threshold(struct wl1271 *wl); +int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time); +int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable, + void *mc_list, u32 mc_list_len); +int wl1271_acx_service_period_timeout(struct wl1271 *wl); +int wl1271_acx_rts_threshold(struct wl1271 *wl, u16 rts_threshold); +int wl1271_acx_dco_itrim_params(struct wl1271 *wl); +int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter); +int wl1271_acx_beacon_filter_table(struct wl1271 *wl); +int wl1271_acx_conn_monit_params(struct wl1271 *wl); +int wl1271_acx_sg_enable(struct wl1271 *wl); +int wl1271_acx_sg_cfg(struct wl1271 *wl); +int wl1271_acx_cca_threshold(struct wl1271 *wl); +int wl1271_acx_bcn_dtim_options(struct wl1271 *wl); +int wl1271_acx_aid(struct wl1271 *wl, u16 aid); +int wl1271_acx_event_mbox_mask(struct wl1271 *wl, u32 event_mask); +int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble); +int wl1271_acx_cts_protect(struct wl1271 *wl, + enum acx_ctsprotect_type ctsprotect); +int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats); +int wl1271_acx_rate_policies(struct wl1271 *wl); +int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max, + u8 aifsn, u16 txop); +int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type, + u8 tsid, u8 ps_scheme, u8 ack_policy, + u32 apsd_conf0, u32 apsd_conf1); +int wl1271_acx_frag_threshold(struct wl1271 *wl); +int wl1271_acx_tx_config_options(struct wl1271 *wl); +int wl1271_acx_mem_cfg(struct wl1271 *wl); +int wl1271_acx_init_mem_config(struct wl1271 *wl); +int wl1271_acx_init_rx_interrupt(struct wl1271 *wl); +int wl1271_acx_smart_reflex(struct wl1271 *wl); +int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable); +int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, u8 *address, + u8 version); +int wl1271_acx_pm_config(struct wl1271 *wl); + +#endif /* __WL1271_ACX_H__ */ diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.c b/drivers/net/wireless/wl12xx/wl1271_boot.c new file mode 100644 index 0000000..2be76ee --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1271_boot.c @@ -0,0 +1,551 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2008-2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include + +#include "wl1271_acx.h" +#include "wl1271_reg.h" +#include "wl1271_boot.h" +#include "wl1271_spi.h" +#include "wl1271_io.h" +#include "wl1271_event.h" + +static struct wl1271_partition_set part_table[PART_TABLE_LEN] = { + [PART_DOWN] = { + .mem = { + .start = 0x00000000, + .size = 0x000177c0 + }, + .reg = { + .start = REGISTERS_BASE, + .size = 0x00008800 + }, + .mem2 = { + .start = 0x00000000, + .size = 0x00000000 + }, + .mem3 = { + .start = 0x00000000, + .size = 0x00000000 + }, + }, + + [PART_WORK] = { + .mem = { + .start = 0x00040000, + .size = 0x00014fc0 + }, + .reg = { + .start = REGISTERS_BASE, + .size = 0x0000a000 + }, + .mem2 = { + .start = 0x003004f8, + .size = 0x00000004 + }, + .mem3 = { + .start = 0x00040404, + .size = 0x00000000 + }, + }, + + [PART_DRPW] = { + .mem = { + .start = 0x00040000, + .size = 0x00014fc0 + }, + .reg = { + .start = DRPW_BASE, + .size = 0x00006000 + }, + .mem2 = { + .start = 0x00000000, + .size = 0x00000000 + }, + .mem3 = { + .start = 0x00000000, + .size = 0x00000000 + } + } +}; + +static void wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag) +{ + u32 cpu_ctrl; + + /* 10.5.0 run the firmware (I) */ + cpu_ctrl = wl1271_read32(wl, ACX_REG_ECPU_CONTROL); + + /* 10.5.1 run the firmware (II) */ + cpu_ctrl |= flag; + wl1271_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl); +} + +static void wl1271_boot_fw_version(struct wl1271 *wl) +{ + struct wl1271_static_data static_data; + + wl1271_read(wl, wl->cmd_box_addr, &static_data, sizeof(static_data), + false); + + strncpy(wl->chip.fw_ver, static_data.fw_version, + sizeof(wl->chip.fw_ver)); + + /* make sure the string is NULL-terminated */ + wl->chip.fw_ver[sizeof(wl->chip.fw_ver) - 1] = '\0'; +} + +static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf, + size_t fw_data_len, u32 dest) +{ + struct wl1271_partition_set partition; + int addr, chunk_num, partition_limit; + u8 *p, *chunk; + + /* whal_FwCtrl_LoadFwImageSm() */ + + wl1271_debug(DEBUG_BOOT, "starting firmware upload"); + + wl1271_debug(DEBUG_BOOT, "fw_data_len %zd chunk_size %d", + fw_data_len, CHUNK_SIZE); + + if ((fw_data_len % 4) != 0) { + wl1271_error("firmware length not multiple of four"); + return -EIO; + } + + chunk = kmalloc(CHUNK_SIZE, GFP_KERNEL); + if (!chunk) { + wl1271_error("allocation for firmware upload chunk failed"); + return -ENOMEM; + } + + memcpy(&partition, &part_table[PART_DOWN], sizeof(partition)); + partition.mem.start = dest; + wl1271_set_partition(wl, &partition); + + /* 10.1 set partition limit and chunk num */ + chunk_num = 0; + partition_limit = part_table[PART_DOWN].mem.size; + + while (chunk_num < fw_data_len / CHUNK_SIZE) { + /* 10.2 update partition, if needed */ + addr = dest + (chunk_num + 2) * CHUNK_SIZE; + if (addr > partition_limit) { + addr = dest + chunk_num * CHUNK_SIZE; + partition_limit = chunk_num * CHUNK_SIZE + + part_table[PART_DOWN].mem.size; + partition.mem.start = addr; + wl1271_set_partition(wl, &partition); + } + + /* 10.3 upload the chunk */ + addr = dest + chunk_num * CHUNK_SIZE; + p = buf + chunk_num * CHUNK_SIZE; + memcpy(chunk, p, CHUNK_SIZE); + wl1271_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x", + p, addr); + wl1271_write(wl, addr, chunk, CHUNK_SIZE, false); + + chunk_num++; + } + + /* 10.4 upload the last chunk */ + addr = dest + chunk_num * CHUNK_SIZE; + p = buf + chunk_num * CHUNK_SIZE; + memcpy(chunk, p, fw_data_len % CHUNK_SIZE); + wl1271_debug(DEBUG_BOOT, "uploading fw last chunk (%zd B) 0x%p to 0x%x", + fw_data_len % CHUNK_SIZE, p, addr); + wl1271_write(wl, addr, chunk, fw_data_len % CHUNK_SIZE, false); + + kfree(chunk); + return 0; +} + +static int wl1271_boot_upload_firmware(struct wl1271 *wl) +{ + u32 chunks, addr, len; + int ret = 0; + u8 *fw; + + fw = wl->fw; + chunks = be32_to_cpup((__be32 *) fw); + fw += sizeof(u32); + + wl1271_debug(DEBUG_BOOT, "firmware chunks to be uploaded: %u", chunks); + + while (chunks--) { + addr = be32_to_cpup((__be32 *) fw); + fw += sizeof(u32); + len = be32_to_cpup((__be32 *) fw); + fw += sizeof(u32); + + if (len > 300000) { + wl1271_info("firmware chunk too long: %u", len); + return -EINVAL; + } + wl1271_debug(DEBUG_BOOT, "chunk %d addr 0x%x len %u", + chunks, addr, len); + ret = wl1271_boot_upload_firmware_chunk(wl, fw, len, addr); + if (ret != 0) + break; + fw += len; + } + + return ret; +} + +static int wl1271_boot_upload_nvs(struct wl1271 *wl) +{ + size_t nvs_len, burst_len; + int i; + u32 dest_addr, val; + u8 *nvs_ptr, *nvs_aligned; + + if (wl->nvs == NULL) + return -ENODEV; + + /* only the first part of the NVS needs to be uploaded */ + nvs_len = sizeof(wl->nvs->nvs); + nvs_ptr = (u8 *)wl->nvs->nvs; + + /* + * Layout before the actual NVS tables: + * 1 byte : burst length. + * 2 bytes: destination address. + * n bytes: data to burst copy. + * + * This is ended by a 0 length, then the NVS tables. + */ + + /* FIXME: Do we need to check here whether the LSB is 1? */ + while (nvs_ptr[0]) { + burst_len = nvs_ptr[0]; + dest_addr = (nvs_ptr[1] & 0xfe) | ((u32)(nvs_ptr[2] << 8)); + + /* FIXME: Due to our new wl1271_translate_reg_addr function, + we need to add the REGISTER_BASE to the destination */ + dest_addr += REGISTERS_BASE; + + /* We move our pointer to the data */ + nvs_ptr += 3; + + for (i = 0; i < burst_len; i++) { + val = (nvs_ptr[0] | (nvs_ptr[1] << 8) + | (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24)); + + wl1271_debug(DEBUG_BOOT, + "nvs burst write 0x%x: 0x%x", + dest_addr, val); + wl1271_write32(wl, dest_addr, val); + + nvs_ptr += 4; + dest_addr += 4; + } + } + + /* + * We've reached the first zero length, the first NVS table + * is 7 bytes further. + */ + nvs_ptr += 7; + nvs_len -= nvs_ptr - (u8 *)wl->nvs->nvs; + nvs_len = ALIGN(nvs_len, 4); + + /* FIXME: The driver sets the partition here, but this is not needed, + since it sets to the same one as currently in use */ + /* Now we must set the partition correctly */ + wl1271_set_partition(wl, &part_table[PART_WORK]); + + /* Copy the NVS tables to a new block to ensure alignment */ + /* FIXME: We jump 3 more bytes before uploading the NVS. It seems + that our NVS files have three extra zeros here. I'm not sure whether + the problem is in our NVS generation or we should really jumpt these + 3 bytes here */ + nvs_ptr += 3; + + nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL); if + (!nvs_aligned) return -ENOMEM; + + /* And finally we upload the NVS tables */ + /* FIXME: In wl1271, we upload everything at once. + No endianness handling needed here?! The ref driver doesn't do + anything about it at this point */ + wl1271_write(wl, CMD_MBOX_ADDRESS, nvs_aligned, nvs_len, false); + + kfree(nvs_aligned); + return 0; +} + +static void wl1271_boot_enable_interrupts(struct wl1271 *wl) +{ + enable_irq(wl->irq); + wl1271_write32(wl, ACX_REG_INTERRUPT_MASK, + WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK)); + wl1271_write32(wl, HI_CFG, HI_CFG_DEF_VAL); +} + +static int wl1271_boot_soft_reset(struct wl1271 *wl) +{ + unsigned long timeout; + u32 boot_data; + + /* perform soft reset */ + wl1271_write32(wl, ACX_REG_SLV_SOFT_RESET, ACX_SLV_SOFT_RESET_BIT); + + /* SOFT_RESET is self clearing */ + timeout = jiffies + usecs_to_jiffies(SOFT_RESET_MAX_TIME); + while (1) { + boot_data = wl1271_read32(wl, ACX_REG_SLV_SOFT_RESET); + wl1271_debug(DEBUG_BOOT, "soft reset bootdata 0x%x", boot_data); + if ((boot_data & ACX_SLV_SOFT_RESET_BIT) == 0) + break; + + if (time_after(jiffies, timeout)) { + /* 1.2 check pWhalBus->uSelfClearTime if the + * timeout was reached */ + wl1271_error("soft reset timeout"); + return -1; + } + + udelay(SOFT_RESET_STALL_TIME); + } + + /* disable Rx/Tx */ + wl1271_write32(wl, ENABLE, 0x0); + + /* disable auto calibration on start*/ + wl1271_write32(wl, SPARE_A2, 0xffff); + + return 0; +} + +static int wl1271_boot_run_firmware(struct wl1271 *wl) +{ + int loop, ret; + u32 chip_id, interrupt; + + wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT); + + chip_id = wl1271_read32(wl, CHIP_ID_B); + + wl1271_debug(DEBUG_BOOT, "chip id after firmware boot: 0x%x", chip_id); + + if (chip_id != wl->chip.id) { + wl1271_error("chip id doesn't match after firmware boot"); + return -EIO; + } + + /* wait for init to complete */ + loop = 0; + while (loop++ < INIT_LOOP) { + udelay(INIT_LOOP_DELAY); + interrupt = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); + + if (interrupt == 0xffffffff) { + wl1271_error("error reading hardware complete " + "init indication"); + return -EIO; + } + /* check that ACX_INTR_INIT_COMPLETE is enabled */ + else if (interrupt & WL1271_ACX_INTR_INIT_COMPLETE) { + wl1271_write32(wl, ACX_REG_INTERRUPT_ACK, + WL1271_ACX_INTR_INIT_COMPLETE); + break; + } + } + + if (loop > INIT_LOOP) { + wl1271_error("timeout waiting for the hardware to " + "complete initialization"); + return -EIO; + } + + /* get hardware config command mail box */ + wl->cmd_box_addr = wl1271_read32(wl, REG_COMMAND_MAILBOX_PTR); + + /* get hardware config event mail box */ + wl->event_box_addr = wl1271_read32(wl, REG_EVENT_MAILBOX_PTR); + + /* set the working partition to its "running" mode offset */ + wl1271_set_partition(wl, &part_table[PART_WORK]); + + wl1271_debug(DEBUG_MAILBOX, "cmd_box_addr 0x%x event_box_addr 0x%x", + wl->cmd_box_addr, wl->event_box_addr); + + wl1271_boot_fw_version(wl); + + /* + * in case of full asynchronous mode the firmware event must be + * ready to receive event from the command mailbox + */ + + /* unmask required mbox events */ + wl->event_mask = BSS_LOSE_EVENT_ID | + SCAN_COMPLETE_EVENT_ID | + PS_REPORT_EVENT_ID; + + ret = wl1271_event_unmask(wl); + if (ret < 0) { + wl1271_error("EVENT mask setting failed"); + return ret; + } + + wl1271_event_mbox_config(wl); + + /* firmware startup completed */ + return 0; +} + +static int wl1271_boot_write_irq_polarity(struct wl1271 *wl) +{ + u32 polarity; + + polarity = wl1271_top_reg_read(wl, OCP_REG_POLARITY); + + /* We use HIGH polarity, so unset the LOW bit */ + polarity &= ~POLARITY_LOW; + wl1271_top_reg_write(wl, OCP_REG_POLARITY, polarity); + + return 0; +} + +int wl1271_boot(struct wl1271 *wl) +{ + int ret = 0; + u32 tmp, clk, pause; + + if (REF_CLOCK == 0 || REF_CLOCK == 2 || REF_CLOCK == 4) + /* ref clk: 19.2/38.4/38.4-XTAL */ + clk = 0x3; + else if (REF_CLOCK == 1 || REF_CLOCK == 3) + /* ref clk: 26/52 */ + clk = 0x5; + + if (REF_CLOCK != 0) { + u16 val; + /* Set clock type */ + val = wl1271_top_reg_read(wl, OCP_REG_CLK_TYPE); + val &= FREF_CLK_TYPE_BITS; + val |= CLK_REQ_PRCM; + wl1271_top_reg_write(wl, OCP_REG_CLK_TYPE, val); + } else { + u16 val; + /* Set clock polarity */ + val = wl1271_top_reg_read(wl, OCP_REG_CLK_POLARITY); + val &= FREF_CLK_POLARITY_BITS; + val |= CLK_REQ_OUTN_SEL; + wl1271_top_reg_write(wl, OCP_REG_CLK_POLARITY, val); + } + + wl1271_write32(wl, PLL_PARAMETERS, clk); + + pause = wl1271_read32(wl, PLL_PARAMETERS); + + wl1271_debug(DEBUG_BOOT, "pause1 0x%x", pause); + + pause &= ~(WU_COUNTER_PAUSE_VAL); /* FIXME: This should probably be + * WU_COUNTER_PAUSE_VAL instead of + * 0x3ff (magic number ). How does + * this work?! */ + pause |= WU_COUNTER_PAUSE_VAL; + wl1271_write32(wl, WU_COUNTER_PAUSE, pause); + + /* Continue the ELP wake up sequence */ + wl1271_write32(wl, WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL); + udelay(500); + + wl1271_set_partition(wl, &part_table[PART_DRPW]); + + /* Read-modify-write DRPW_SCRATCH_START register (see next state) + to be used by DRPw FW. The RTRIM value will be added by the FW + before taking DRPw out of reset */ + + wl1271_debug(DEBUG_BOOT, "DRPW_SCRATCH_START %08x", DRPW_SCRATCH_START); + clk = wl1271_read32(wl, DRPW_SCRATCH_START); + + wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk); + + /* 2 */ + clk |= (REF_CLOCK << 1) << 4; + wl1271_write32(wl, DRPW_SCRATCH_START, clk); + + wl1271_set_partition(wl, &part_table[PART_WORK]); + + /* Disable interrupts */ + wl1271_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL); + + ret = wl1271_boot_soft_reset(wl); + if (ret < 0) + goto out; + + /* 2. start processing NVS file */ + ret = wl1271_boot_upload_nvs(wl); + if (ret < 0) + goto out; + + /* write firmware's last address (ie. it's length) to + * ACX_EEPROMLESS_IND_REG */ + wl1271_debug(DEBUG_BOOT, "ACX_EEPROMLESS_IND_REG"); + + wl1271_write32(wl, ACX_EEPROMLESS_IND_REG, ACX_EEPROMLESS_IND_REG); + + tmp = wl1271_read32(wl, CHIP_ID_B); + + wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp); + + /* 6. read the EEPROM parameters */ + tmp = wl1271_read32(wl, SCR_PAD2); + + ret = wl1271_boot_write_irq_polarity(wl); + if (ret < 0) + goto out; + + /* FIXME: Need to check whether this is really what we want */ + wl1271_write32(wl, ACX_REG_INTERRUPT_MASK, + WL1271_ACX_ALL_EVENTS_VECTOR); + + /* WL1271: The reference driver skips steps 7 to 10 (jumps directly + * to upload_fw) */ + + ret = wl1271_boot_upload_firmware(wl); + if (ret < 0) + goto out; + + /* 10.5 start firmware */ + ret = wl1271_boot_run_firmware(wl); + if (ret < 0) + goto out; + + /* Enable firmware interrupts now */ + wl1271_boot_enable_interrupts(wl); + + /* set the wl1271 default filters */ + wl->rx_config = WL1271_DEFAULT_RX_CONFIG; + wl->rx_filter = WL1271_DEFAULT_RX_FILTER; + + wl1271_event_mbox_config(wl); + +out: + return ret; +} diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.h b/drivers/net/wireless/wl12xx/wl1271_boot.h new file mode 100644 index 0000000..412443e --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1271_boot.h @@ -0,0 +1,66 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2008-2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __BOOT_H__ +#define __BOOT_H__ + +#include "wl1271.h" + +int wl1271_boot(struct wl1271 *wl); + +#define WL1271_NO_SUBBANDS 8 +#define WL1271_NO_POWER_LEVELS 4 +#define WL1271_FW_VERSION_MAX_LEN 20 + +struct wl1271_static_data { + u8 mac_address[ETH_ALEN]; + u8 padding[2]; + u8 fw_version[WL1271_FW_VERSION_MAX_LEN]; + u32 hw_version; + u8 tx_power_table[WL1271_NO_SUBBANDS][WL1271_NO_POWER_LEVELS]; +}; + +/* number of times we try to read the INIT interrupt */ +#define INIT_LOOP 20000 + +/* delay between retries */ +#define INIT_LOOP_DELAY 50 + +#define REF_CLOCK 2 +#define WU_COUNTER_PAUSE_VAL 0x3FF +#define WELP_ARM_COMMAND_VAL 0x4 + +#define OCP_REG_POLARITY 0x0064 +#define OCP_REG_CLK_TYPE 0x0448 +#define OCP_REG_CLK_POLARITY 0x0cb2 + +#define CMD_MBOX_ADDRESS 0x407B4 + +#define POLARITY_LOW BIT(1) + +#define FREF_CLK_TYPE_BITS 0xfffffe7f +#define CLK_REQ_PRCM 0x100 +#define FREF_CLK_POLARITY_BITS 0xfffff8ff +#define CLK_REQ_OUTN_SEL 0x700 + +#endif diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.c b/drivers/net/wireless/wl12xx/wl1271_cmd.c new file mode 100644 index 0000000..36a64e0 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1271_cmd.c @@ -0,0 +1,983 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include +#include +#include +#include +#include + +#include "wl1271.h" +#include "wl1271_reg.h" +#include "wl1271_spi.h" +#include "wl1271_io.h" +#include "wl1271_acx.h" +#include "wl12xx_80211.h" +#include "wl1271_cmd.h" + +/* + * send command to firmware + * + * @wl: wl struct + * @id: command id + * @buf: buffer containing the command, must work with dma + * @len: length of the buffer + */ +int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len, + size_t res_len) +{ + struct wl1271_cmd_header *cmd; + unsigned long timeout; + u32 intr; + int ret = 0; + u16 status; + + cmd = buf; + cmd->id = cpu_to_le16(id); + cmd->status = 0; + + WARN_ON(len % 4 != 0); + + wl1271_write(wl, wl->cmd_box_addr, buf, len, false); + + wl1271_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_CMD); + + timeout = jiffies + msecs_to_jiffies(WL1271_COMMAND_TIMEOUT); + + intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); + while (!(intr & WL1271_ACX_INTR_CMD_COMPLETE)) { + if (time_after(jiffies, timeout)) { + wl1271_error("command complete timeout"); + ret = -ETIMEDOUT; + goto out; + } + + msleep(1); + + intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); + } + + /* read back the status code of the command */ + if (res_len == 0) + res_len = sizeof(struct wl1271_cmd_header); + wl1271_read(wl, wl->cmd_box_addr, cmd, res_len, false); + + status = le16_to_cpu(cmd->status); + if (status != CMD_STATUS_SUCCESS) { + wl1271_error("command execute failure %d", status); + ret = -EIO; + } + + wl1271_write32(wl, ACX_REG_INTERRUPT_ACK, + WL1271_ACX_INTR_CMD_COMPLETE); + +out: + return ret; +} + +static int wl1271_cmd_cal_channel_tune(struct wl1271 *wl) +{ + struct wl1271_cmd_cal_channel_tune *cmd; + int ret = 0; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->test.id = TEST_CMD_CHANNEL_TUNE; + + cmd->band = WL1271_CHANNEL_TUNE_BAND_2_4; + /* set up any channel, 7 is in the middle of the range */ + cmd->channel = 7; + + ret = wl1271_cmd_test(wl, cmd, sizeof(*cmd), 0); + if (ret < 0) + wl1271_warning("TEST_CMD_CHANNEL_TUNE failed"); + + kfree(cmd); + return ret; +} + +static int wl1271_cmd_cal_update_ref_point(struct wl1271 *wl) +{ + struct wl1271_cmd_cal_update_ref_point *cmd; + int ret = 0; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->test.id = TEST_CMD_UPDATE_PD_REFERENCE_POINT; + + /* FIXME: still waiting for the correct values */ + cmd->ref_power = 0; + cmd->ref_detector = 0; + + cmd->sub_band = WL1271_PD_REFERENCE_POINT_BAND_B_G; + + ret = wl1271_cmd_test(wl, cmd, sizeof(*cmd), 0); + if (ret < 0) + wl1271_warning("TEST_CMD_UPDATE_PD_REFERENCE_POINT failed"); + + kfree(cmd); + return ret; +} + +static int wl1271_cmd_cal_p2g(struct wl1271 *wl) +{ + struct wl1271_cmd_cal_p2g *cmd; + int ret = 0; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->test.id = TEST_CMD_P2G_CAL; + + cmd->sub_band_mask = WL1271_CAL_P2G_BAND_B_G; + + ret = wl1271_cmd_test(wl, cmd, sizeof(*cmd), 0); + if (ret < 0) + wl1271_warning("TEST_CMD_P2G_CAL failed"); + + kfree(cmd); + return ret; +} + +static int wl1271_cmd_cal(struct wl1271 *wl) +{ + /* + * FIXME: we must make sure that we're not sleeping when calibration + * is done + */ + int ret; + + wl1271_notice("performing tx calibration"); + + ret = wl1271_cmd_cal_channel_tune(wl); + if (ret < 0) + return ret; + + ret = wl1271_cmd_cal_update_ref_point(wl); + if (ret < 0) + return ret; + + ret = wl1271_cmd_cal_p2g(wl); + if (ret < 0) + return ret; + + return ret; +} + +int wl1271_cmd_general_parms(struct wl1271 *wl) +{ + struct wl1271_general_parms_cmd *gen_parms; + int ret; + + if (!wl->nvs) + return -ENODEV; + + gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL); + if (!gen_parms) + return -ENOMEM; + + gen_parms->test.id = TEST_CMD_INI_FILE_GENERAL_PARAM; + + memcpy(gen_parms->params, wl->nvs->general_params, + WL1271_NVS_GENERAL_PARAMS_SIZE); + + ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), 0); + if (ret < 0) + wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed"); + + kfree(gen_parms); + return ret; +} + +int wl1271_cmd_radio_parms(struct wl1271 *wl) +{ + struct wl1271_radio_parms_cmd *radio_parms; + struct conf_radio_parms *rparam = &wl->conf.init.radioparam; + int ret; + + if (!wl->nvs) + return -ENODEV; + + radio_parms = kzalloc(sizeof(*radio_parms), GFP_KERNEL); + if (!radio_parms) + return -ENOMEM; + + radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM; + + memcpy(radio_parms->stat_radio_params, wl->nvs->stat_radio_params, + WL1271_NVS_STAT_RADIO_PARAMS_SIZE); + memcpy(radio_parms->dyn_radio_params, + wl->nvs->dyn_radio_params[rparam->fem], + WL1271_NVS_DYN_RADIO_PARAMS_SIZE); + + /* FIXME: current NVS is missing 5GHz parameters */ + + wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ", + radio_parms, sizeof(*radio_parms)); + + ret = wl1271_cmd_test(wl, radio_parms, sizeof(*radio_parms), 0); + if (ret < 0) + wl1271_warning("CMD_INI_FILE_RADIO_PARAM failed"); + + kfree(radio_parms); + return ret; +} + +int wl1271_cmd_join(struct wl1271 *wl) +{ + static bool do_cal = true; + struct wl1271_cmd_join *join; + int ret, i; + u8 *bssid; + + /* FIXME: remove when we get calibration from the factory */ + if (do_cal) { + ret = wl1271_cmd_cal(wl); + if (ret < 0) + wl1271_warning("couldn't calibrate"); + else + do_cal = false; + } + + join = kzalloc(sizeof(*join), GFP_KERNEL); + if (!join) { + ret = -ENOMEM; + goto out; + } + + wl1271_debug(DEBUG_CMD, "cmd join"); + + /* Reverse order BSSID */ + bssid = (u8 *) &join->bssid_lsb; + for (i = 0; i < ETH_ALEN; i++) + bssid[i] = wl->bssid[ETH_ALEN - i - 1]; + + join->rx_config_options = cpu_to_le32(wl->rx_config); + join->rx_filter_options = cpu_to_le32(wl->rx_filter); + join->bss_type = wl->bss_type; + + /* + * FIXME: disable temporarily all filters because after commit + * 9cef8737 "mac80211: fix managed mode BSSID handling" broke + * association. The filter logic needs to be implemented properly + * and once that is done, this hack can be removed. + */ + join->rx_config_options = cpu_to_le32(0); + join->rx_filter_options = cpu_to_le32(WL1271_DEFAULT_RX_FILTER); + + if (wl->band == IEEE80211_BAND_2GHZ) + join->basic_rate_set = cpu_to_le32(CONF_HW_BIT_RATE_1MBPS | + CONF_HW_BIT_RATE_2MBPS | + CONF_HW_BIT_RATE_5_5MBPS | + CONF_HW_BIT_RATE_11MBPS); + else { + join->bss_type |= WL1271_JOIN_CMD_BSS_TYPE_5GHZ; + join->basic_rate_set = cpu_to_le32(CONF_HW_BIT_RATE_6MBPS | + CONF_HW_BIT_RATE_12MBPS | + CONF_HW_BIT_RATE_24MBPS); + } + + join->beacon_interval = cpu_to_le16(WL1271_DEFAULT_BEACON_INT); + join->dtim_interval = WL1271_DEFAULT_DTIM_PERIOD; + + join->channel = wl->channel; + join->ssid_len = wl->ssid_len; + memcpy(join->ssid, wl->ssid, wl->ssid_len); + join->ctrl = WL1271_JOIN_CMD_CTRL_TX_FLUSH; + + /* increment the session counter */ + wl->session_counter++; + if (wl->session_counter >= SESSION_COUNTER_MAX) + wl->session_counter = 0; + + join->ctrl |= wl->session_counter << WL1271_JOIN_CMD_TX_SESSION_OFFSET; + + /* reset TX security counters */ + wl->tx_security_last_seq = 0; + wl->tx_security_seq_16 = 0; + wl->tx_security_seq_32 = 0; + + ret = wl1271_cmd_send(wl, CMD_START_JOIN, join, sizeof(*join), 0); + if (ret < 0) { + wl1271_error("failed to initiate cmd join"); + goto out_free; + } + + /* + * ugly hack: we should wait for JOIN_EVENT_COMPLETE_ID but to + * simplify locking we just sleep instead, for now + */ + msleep(10); + +out_free: + kfree(join); + +out: + return ret; +} + +/** + * send test command to firmware + * + * @wl: wl struct + * @buf: buffer containing the command, with all headers, must work with dma + * @len: length of the buffer + * @answer: is answer needed + */ +int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer) +{ + int ret; + size_t res_len = 0; + + wl1271_debug(DEBUG_CMD, "cmd test"); + + if (answer) + res_len = buf_len; + + ret = wl1271_cmd_send(wl, CMD_TEST, buf, buf_len, res_len); + + if (ret < 0) { + wl1271_warning("TEST command failed"); + return ret; + } + + return ret; +} + +/** + * read acx from firmware + * + * @wl: wl struct + * @id: acx id + * @buf: buffer for the response, including all headers, must work with dma + * @len: lenght of buf + */ +int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len) +{ + struct acx_header *acx = buf; + int ret; + + wl1271_debug(DEBUG_CMD, "cmd interrogate"); + + acx->id = cpu_to_le16(id); + + /* payload length, does not include any headers */ + acx->len = cpu_to_le16(len - sizeof(*acx)); + + ret = wl1271_cmd_send(wl, CMD_INTERROGATE, acx, sizeof(*acx), len); + if (ret < 0) + wl1271_error("INTERROGATE command failed"); + + return ret; +} + +/** + * write acx value to firmware + * + * @wl: wl struct + * @id: acx id + * @buf: buffer containing acx, including all headers, must work with dma + * @len: length of buf + */ +int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len) +{ + struct acx_header *acx = buf; + int ret; + + wl1271_debug(DEBUG_CMD, "cmd configure"); + + acx->id = cpu_to_le16(id); + + /* payload length, does not include any headers */ + acx->len = cpu_to_le16(len - sizeof(*acx)); + + ret = wl1271_cmd_send(wl, CMD_CONFIGURE, acx, len, 0); + if (ret < 0) { + wl1271_warning("CONFIGURE command NOK"); + return ret; + } + + return 0; +} + +int wl1271_cmd_data_path(struct wl1271 *wl, bool enable) +{ + struct cmd_enabledisable_path *cmd; + int ret; + u16 cmd_rx, cmd_tx; + + wl1271_debug(DEBUG_CMD, "cmd data path"); + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + /* the channel here is only used for calibration, so hardcoded to 1 */ + cmd->channel = 1; + + if (enable) { + cmd_rx = CMD_ENABLE_RX; + cmd_tx = CMD_ENABLE_TX; + } else { + cmd_rx = CMD_DISABLE_RX; + cmd_tx = CMD_DISABLE_TX; + } + + ret = wl1271_cmd_send(wl, cmd_rx, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("rx %s cmd for channel %d failed", + enable ? "start" : "stop", cmd->channel); + goto out; + } + + wl1271_debug(DEBUG_BOOT, "rx %s cmd channel %d", + enable ? "start" : "stop", cmd->channel); + + ret = wl1271_cmd_send(wl, cmd_tx, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("tx %s cmd for channel %d failed", + enable ? "start" : "stop", cmd->channel); + return ret; + } + + wl1271_debug(DEBUG_BOOT, "tx %s cmd channel %d", + enable ? "start" : "stop", cmd->channel); + +out: + kfree(cmd); + return ret; +} + +int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, bool send) +{ + struct wl1271_cmd_ps_params *ps_params = NULL; + int ret = 0; + + /* FIXME: this should be in ps.c */ + ret = wl1271_acx_wake_up_conditions(wl); + if (ret < 0) { + wl1271_error("couldn't set wake up conditions"); + goto out; + } + + wl1271_debug(DEBUG_CMD, "cmd set ps mode"); + + ps_params = kzalloc(sizeof(*ps_params), GFP_KERNEL); + if (!ps_params) { + ret = -ENOMEM; + goto out; + } + + ps_params->ps_mode = ps_mode; + ps_params->send_null_data = send; + ps_params->retries = 5; + ps_params->hang_over_period = 128; + ps_params->null_data_rate = cpu_to_le32(1); /* 1 Mbps */ + + ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params, + sizeof(*ps_params), 0); + if (ret < 0) { + wl1271_error("cmd set_ps_mode failed"); + goto out; + } + +out: + kfree(ps_params); + return ret; +} + +int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer, + size_t len) +{ + struct cmd_read_write_memory *cmd; + int ret = 0; + + wl1271_debug(DEBUG_CMD, "cmd read memory"); + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + WARN_ON(len > MAX_READ_SIZE); + len = min_t(size_t, len, MAX_READ_SIZE); + + cmd->addr = cpu_to_le32(addr); + cmd->size = cpu_to_le32(len); + + ret = wl1271_cmd_send(wl, CMD_READ_MEMORY, cmd, sizeof(*cmd), + sizeof(*cmd)); + if (ret < 0) { + wl1271_error("read memory command failed: %d", ret); + goto out; + } + + /* the read command got in */ + memcpy(answer, cmd->value, len); + +out: + kfree(cmd); + return ret; +} + +int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len, + u8 active_scan, u8 high_prio, u8 band, + u8 probe_requests) +{ + + struct wl1271_cmd_trigger_scan_to *trigger = NULL; + struct wl1271_cmd_scan *params = NULL; + struct ieee80211_channel *channels; + int i, j, n_ch, ret; + u16 scan_options = 0; + u8 ieee_band; + + if (band == WL1271_SCAN_BAND_2_4_GHZ) + ieee_band = IEEE80211_BAND_2GHZ; + else if (band == WL1271_SCAN_BAND_DUAL && wl1271_11a_enabled()) + ieee_band = IEEE80211_BAND_2GHZ; + else if (band == WL1271_SCAN_BAND_5_GHZ && wl1271_11a_enabled()) + ieee_band = IEEE80211_BAND_5GHZ; + else + return -EINVAL; + + if (wl->hw->wiphy->bands[ieee_band]->channels == NULL) + return -EINVAL; + + channels = wl->hw->wiphy->bands[ieee_band]->channels; + n_ch = wl->hw->wiphy->bands[ieee_band]->n_channels; + + if (test_bit(WL1271_FLAG_SCANNING, &wl->flags)) + return -EINVAL; + + params = kzalloc(sizeof(*params), GFP_KERNEL); + if (!params) + return -ENOMEM; + + params->params.rx_config_options = cpu_to_le32(CFG_RX_ALL_GOOD); + params->params.rx_filter_options = + cpu_to_le32(CFG_RX_PRSP_EN | CFG_RX_MGMT_EN | CFG_RX_BCN_EN); + + if (!active_scan) + scan_options |= WL1271_SCAN_OPT_PASSIVE; + if (high_prio) + scan_options |= WL1271_SCAN_OPT_PRIORITY_HIGH; + params->params.scan_options = cpu_to_le16(scan_options); + + params->params.num_probe_requests = probe_requests; + /* Let the fw autodetect suitable tx_rate for probes */ + params->params.tx_rate = 0; + params->params.tid_trigger = 0; + params->params.scan_tag = WL1271_SCAN_DEFAULT_TAG; + + if (band == WL1271_SCAN_BAND_DUAL) + params->params.band = WL1271_SCAN_BAND_2_4_GHZ; + else + params->params.band = band; + + for (i = 0, j = 0; i < n_ch && i < WL1271_SCAN_MAX_CHANNELS; i++) { + if (!(channels[i].flags & IEEE80211_CHAN_DISABLED)) { + params->channels[j].min_duration = + cpu_to_le32(WL1271_SCAN_CHAN_MIN_DURATION); + params->channels[j].max_duration = + cpu_to_le32(WL1271_SCAN_CHAN_MAX_DURATION); + memset(¶ms->channels[j].bssid_lsb, 0xff, 4); + memset(¶ms->channels[j].bssid_msb, 0xff, 2); + params->channels[j].early_termination = 0; + params->channels[j].tx_power_att = + WL1271_SCAN_CURRENT_TX_PWR; + params->channels[j].channel = channels[i].hw_value; + j++; + } + } + + params->params.num_channels = j; + + if (len && ssid) { + params->params.ssid_len = len; + memcpy(params->params.ssid, ssid, len); + } + + ret = wl1271_cmd_build_probe_req(wl, ssid, len, ieee_band); + if (ret < 0) { + wl1271_error("PROBE request template failed"); + goto out; + } + + trigger = kzalloc(sizeof(*trigger), GFP_KERNEL); + if (!trigger) { + ret = -ENOMEM; + goto out; + } + + /* disable the timeout */ + trigger->timeout = 0; + + ret = wl1271_cmd_send(wl, CMD_TRIGGER_SCAN_TO, trigger, + sizeof(*trigger), 0); + if (ret < 0) { + wl1271_error("trigger scan to failed for hw scan"); + goto out; + } + + wl1271_dump(DEBUG_SCAN, "SCAN: ", params, sizeof(*params)); + + set_bit(WL1271_FLAG_SCANNING, &wl->flags); + if (wl1271_11a_enabled()) { + wl->scan.state = band; + if (band == WL1271_SCAN_BAND_DUAL) { + wl->scan.active = active_scan; + wl->scan.high_prio = high_prio; + wl->scan.probe_requests = probe_requests; + if (len && ssid) { + wl->scan.ssid_len = len; + memcpy(wl->scan.ssid, ssid, len); + } else + wl->scan.ssid_len = 0; + } + } + + ret = wl1271_cmd_send(wl, CMD_SCAN, params, sizeof(*params), 0); + if (ret < 0) { + wl1271_error("SCAN failed"); + clear_bit(WL1271_FLAG_SCANNING, &wl->flags); + goto out; + } + +out: + kfree(params); + return ret; +} + +int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id, + void *buf, size_t buf_len) +{ + struct wl1271_cmd_template_set *cmd; + int ret = 0; + + wl1271_debug(DEBUG_CMD, "cmd template_set %d", template_id); + + WARN_ON(buf_len > WL1271_CMD_TEMPL_MAX_SIZE); + buf_len = min_t(size_t, buf_len, WL1271_CMD_TEMPL_MAX_SIZE); + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + cmd->len = cpu_to_le16(buf_len); + cmd->template_type = template_id; + cmd->enabled_rates = cpu_to_le32(wl->conf.tx.rc_conf.enabled_rates); + cmd->short_retry_limit = wl->conf.tx.rc_conf.short_retry_limit; + cmd->long_retry_limit = wl->conf.tx.rc_conf.long_retry_limit; + + if (buf) + memcpy(cmd->template_data, buf, buf_len); + + ret = wl1271_cmd_send(wl, CMD_SET_TEMPLATE, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_warning("cmd set_template failed: %d", ret); + goto out_free; + } + +out_free: + kfree(cmd); + +out: + return ret; +} + +static int wl1271_build_basic_rates(u8 *rates, u8 band) +{ + u8 index = 0; + + if (band == IEEE80211_BAND_2GHZ) { + rates[index++] = + IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB; + rates[index++] = + IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB; + rates[index++] = + IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB; + rates[index++] = + IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB; + } else if (band == IEEE80211_BAND_5GHZ) { + rates[index++] = + IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_6MB; + rates[index++] = + IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_12MB; + rates[index++] = + IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_24MB; + } else { + wl1271_error("build_basic_rates invalid band: %d", band); + } + + return index; +} + +static int wl1271_build_extended_rates(u8 *rates, u8 band) +{ + u8 index = 0; + + if (band == IEEE80211_BAND_2GHZ) { + rates[index++] = IEEE80211_OFDM_RATE_6MB; + rates[index++] = IEEE80211_OFDM_RATE_9MB; + rates[index++] = IEEE80211_OFDM_RATE_12MB; + rates[index++] = IEEE80211_OFDM_RATE_18MB; + rates[index++] = IEEE80211_OFDM_RATE_24MB; + rates[index++] = IEEE80211_OFDM_RATE_36MB; + rates[index++] = IEEE80211_OFDM_RATE_48MB; + rates[index++] = IEEE80211_OFDM_RATE_54MB; + } else if (band == IEEE80211_BAND_5GHZ) { + rates[index++] = + IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_9MB; + rates[index++] = + IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_18MB; + rates[index++] = + IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_24MB; + rates[index++] = + IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_36MB; + rates[index++] = + IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_48MB; + rates[index++] = + IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_54MB; + } else { + wl1271_error("build_basic_rates invalid band: %d", band); + } + + return index; +} + +int wl1271_cmd_build_null_data(struct wl1271 *wl) +{ + struct wl12xx_null_data_template template; + + if (!is_zero_ether_addr(wl->bssid)) { + memcpy(template.header.da, wl->bssid, ETH_ALEN); + memcpy(template.header.bssid, wl->bssid, ETH_ALEN); + } else { + memset(template.header.da, 0xff, ETH_ALEN); + memset(template.header.bssid, 0xff, ETH_ALEN); + } + + memcpy(template.header.sa, wl->mac_addr, ETH_ALEN); + template.header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_DATA | + IEEE80211_STYPE_NULLFUNC | + IEEE80211_FCTL_TODS); + + return wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, &template, + sizeof(template)); + +} + +int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid) +{ + struct wl12xx_ps_poll_template template; + + memcpy(template.bssid, wl->bssid, ETH_ALEN); + memcpy(template.ta, wl->mac_addr, ETH_ALEN); + + /* aid in PS-Poll has its two MSBs each set to 1 */ + template.aid = cpu_to_le16(1 << 15 | 1 << 14 | aid); + + template.fc = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL); + + return wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, &template, + sizeof(template)); + +} + +int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len, + u8 band) +{ + struct wl12xx_probe_req_template template; + struct wl12xx_ie_rates *rates; + char *ptr; + u16 size; + int ret; + + ptr = (char *)&template; + size = sizeof(struct ieee80211_header); + + memset(template.header.da, 0xff, ETH_ALEN); + memset(template.header.bssid, 0xff, ETH_ALEN); + memcpy(template.header.sa, wl->mac_addr, ETH_ALEN); + template.header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); + + /* IEs */ + /* SSID */ + template.ssid.header.id = WLAN_EID_SSID; + template.ssid.header.len = ssid_len; + if (ssid_len && ssid) + memcpy(template.ssid.ssid, ssid, ssid_len); + size += sizeof(struct wl12xx_ie_header) + ssid_len; + ptr += size; + + /* Basic Rates */ + rates = (struct wl12xx_ie_rates *)ptr; + rates->header.id = WLAN_EID_SUPP_RATES; + rates->header.len = wl1271_build_basic_rates(rates->rates, band); + size += sizeof(struct wl12xx_ie_header) + rates->header.len; + ptr += sizeof(struct wl12xx_ie_header) + rates->header.len; + + /* Extended rates */ + rates = (struct wl12xx_ie_rates *)ptr; + rates->header.id = WLAN_EID_EXT_SUPP_RATES; + rates->header.len = wl1271_build_extended_rates(rates->rates, band); + size += sizeof(struct wl12xx_ie_header) + rates->header.len; + + wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", &template, size); + + if (band == IEEE80211_BAND_2GHZ) + ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4, + &template, size); + else + ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5, + &template, size); + return ret; +} + +int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id) +{ + struct wl1271_cmd_set_keys *cmd; + int ret = 0; + + wl1271_debug(DEBUG_CMD, "cmd set_default_wep_key %d", id); + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + cmd->id = id; + cmd->key_action = cpu_to_le16(KEY_SET_ID); + cmd->key_type = KEY_WEP; + + ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_warning("cmd set_default_wep_key failed: %d", ret); + goto out; + } + +out: + kfree(cmd); + + return ret; +} + +int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, + u8 key_size, const u8 *key, const u8 *addr, + u32 tx_seq_32, u16 tx_seq_16) +{ + struct wl1271_cmd_set_keys *cmd; + int ret = 0; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + if (key_type != KEY_WEP) + memcpy(cmd->addr, addr, ETH_ALEN); + + cmd->key_action = cpu_to_le16(action); + cmd->key_size = key_size; + cmd->key_type = key_type; + + cmd->ac_seq_num16[0] = cpu_to_le16(tx_seq_16); + cmd->ac_seq_num32[0] = cpu_to_le32(tx_seq_32); + + /* we have only one SSID profile */ + cmd->ssid_profile = 0; + + cmd->id = id; + + if (key_type == KEY_TKIP) { + /* + * We get the key in the following form: + * TKIP (16 bytes) - TX MIC (8 bytes) - RX MIC (8 bytes) + * but the target is expecting: + * TKIP - RX MIC - TX MIC + */ + memcpy(cmd->key, key, 16); + memcpy(cmd->key + 16, key + 24, 8); + memcpy(cmd->key + 24, key + 16, 8); + + } else { + memcpy(cmd->key, key, key_size); + } + + wl1271_dump(DEBUG_CRYPT, "TARGET KEY: ", cmd, sizeof(*cmd)); + + ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_warning("could not set keys"); + goto out; + } + +out: + kfree(cmd); + + return ret; +} + +int wl1271_cmd_disconnect(struct wl1271 *wl) +{ + struct wl1271_cmd_disconnect *cmd; + int ret = 0; + + wl1271_debug(DEBUG_CMD, "cmd disconnect"); + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + cmd->rx_config_options = cpu_to_le32(wl->rx_config); + cmd->rx_filter_options = cpu_to_le32(wl->rx_filter); + /* disconnect reason is not used in immediate disconnections */ + cmd->type = DISCONNECT_IMMEDIATE; + + ret = wl1271_cmd_send(wl, CMD_DISCONNECT, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("failed to send disconnect command"); + goto out_free; + } + +out_free: + kfree(cmd); + +out: + return ret; +} diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.h b/drivers/net/wireless/wl12xx/wl1271_cmd.h new file mode 100644 index 0000000..2dc06c7 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1271_cmd.h @@ -0,0 +1,521 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 1998-2009 Texas Instruments. All rights reserved. + * Copyright (C) 2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __WL1271_CMD_H__ +#define __WL1271_CMD_H__ + +#include "wl1271.h" + +struct acx_header; + +int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len, + size_t res_len); +int wl1271_cmd_general_parms(struct wl1271 *wl); +int wl1271_cmd_radio_parms(struct wl1271 *wl); +int wl1271_cmd_join(struct wl1271 *wl); +int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer); +int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len); +int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len); +int wl1271_cmd_data_path(struct wl1271 *wl, bool enable); +int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, bool send); +int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer, + size_t len); +int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len, + u8 active_scan, u8 high_prio, u8 band, + u8 probe_requests); +int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id, + void *buf, size_t buf_len); +int wl1271_cmd_build_null_data(struct wl1271 *wl); +int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid); +int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len, + u8 band); +int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id); +int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, + u8 key_size, const u8 *key, const u8 *addr, + u32 tx_seq_32, u16 tx_seq_16); +int wl1271_cmd_disconnect(struct wl1271 *wl); + +enum wl1271_commands { + CMD_INTERROGATE = 1, /*use this to read information elements*/ + CMD_CONFIGURE = 2, /*use this to write information elements*/ + CMD_ENABLE_RX = 3, + CMD_ENABLE_TX = 4, + CMD_DISABLE_RX = 5, + CMD_DISABLE_TX = 6, + CMD_SCAN = 8, + CMD_STOP_SCAN = 9, + CMD_START_JOIN = 11, + CMD_SET_KEYS = 12, + CMD_READ_MEMORY = 13, + CMD_WRITE_MEMORY = 14, + CMD_SET_TEMPLATE = 19, + CMD_TEST = 23, + CMD_NOISE_HIST = 28, + CMD_LNA_CONTROL = 32, + CMD_SET_BCN_MODE = 33, + CMD_MEASUREMENT = 34, + CMD_STOP_MEASUREMENT = 35, + CMD_DISCONNECT = 36, + CMD_SET_PS_MODE = 37, + CMD_CHANNEL_SWITCH = 38, + CMD_STOP_CHANNEL_SWICTH = 39, + CMD_AP_DISCOVERY = 40, + CMD_STOP_AP_DISCOVERY = 41, + CMD_SPS_SCAN = 42, + CMD_STOP_SPS_SCAN = 43, + CMD_HEALTH_CHECK = 45, + CMD_DEBUG = 46, + CMD_TRIGGER_SCAN_TO = 47, + CMD_CONNECTION_SCAN_CFG = 48, + CMD_CONNECTION_SCAN_SSID_CFG = 49, + CMD_START_PERIODIC_SCAN = 50, + CMD_STOP_PERIODIC_SCAN = 51, + CMD_SET_STA_STATE = 52, + + NUM_COMMANDS, + MAX_COMMAND_ID = 0xFFFF, +}; + +#define MAX_CMD_PARAMS 572 + +enum cmd_templ { + CMD_TEMPL_NULL_DATA = 0, + CMD_TEMPL_BEACON, + CMD_TEMPL_CFG_PROBE_REQ_2_4, + CMD_TEMPL_CFG_PROBE_REQ_5, + CMD_TEMPL_PROBE_RESPONSE, + CMD_TEMPL_QOS_NULL_DATA, + CMD_TEMPL_PS_POLL, + CMD_TEMPL_KLV, + CMD_TEMPL_DISCONNECT, + CMD_TEMPL_PROBE_REQ_2_4, /* for firmware internal use only */ + CMD_TEMPL_PROBE_REQ_5, /* for firmware internal use only */ + CMD_TEMPL_BAR, /* for firmware internal use only */ + CMD_TEMPL_CTS, /* + * For CTS-to-self (FastCTS) mechanism + * for BT/WLAN coexistence (SoftGemini). */ + CMD_TEMPL_MAX = 0xff +}; + +/* unit ms */ +#define WL1271_COMMAND_TIMEOUT 2000 +#define WL1271_CMD_TEMPL_MAX_SIZE 252 + +struct wl1271_cmd_header { + __le16 id; + __le16 status; + /* payload */ + u8 data[0]; +} __attribute__ ((packed)); + +#define WL1271_CMD_MAX_PARAMS 572 + +struct wl1271_command { + struct wl1271_cmd_header header; + u8 parameters[WL1271_CMD_MAX_PARAMS]; +} __attribute__ ((packed)); + +enum { + CMD_MAILBOX_IDLE = 0, + CMD_STATUS_SUCCESS = 1, + CMD_STATUS_UNKNOWN_CMD = 2, + CMD_STATUS_UNKNOWN_IE = 3, + CMD_STATUS_REJECT_MEAS_SG_ACTIVE = 11, + CMD_STATUS_RX_BUSY = 13, + CMD_STATUS_INVALID_PARAM = 14, + CMD_STATUS_TEMPLATE_TOO_LARGE = 15, + CMD_STATUS_OUT_OF_MEMORY = 16, + CMD_STATUS_STA_TABLE_FULL = 17, + CMD_STATUS_RADIO_ERROR = 18, + CMD_STATUS_WRONG_NESTING = 19, + CMD_STATUS_TIMEOUT = 21, /* Driver internal use.*/ + CMD_STATUS_FW_RESET = 22, /* Driver internal use.*/ + MAX_COMMAND_STATUS = 0xff +}; + + +/* + * CMD_READ_MEMORY + * + * The host issues this command to read the WiLink device memory/registers. + * + * Note: The Base Band address has special handling (16 bits registers and + * addresses). For more information, see the hardware specification. + */ +/* + * CMD_WRITE_MEMORY + * + * The host issues this command to write the WiLink device memory/registers. + * + * The Base Band address has special handling (16 bits registers and + * addresses). For more information, see the hardware specification. + */ +#define MAX_READ_SIZE 256 + +struct cmd_read_write_memory { + struct wl1271_cmd_header header; + + /* The address of the memory to read from or write to.*/ + __le32 addr; + + /* The amount of data in bytes to read from or write to the WiLink + * device.*/ + __le32 size; + + /* The actual value read from or written to the Wilink. The source + of this field is the Host in WRITE command or the Wilink in READ + command. */ + u8 value[MAX_READ_SIZE]; +} __attribute__ ((packed)); + +#define CMDMBOX_HEADER_LEN 4 +#define CMDMBOX_INFO_ELEM_HEADER_LEN 4 + +enum { + BSS_TYPE_IBSS = 0, + BSS_TYPE_STA_BSS = 2, + BSS_TYPE_AP_BSS = 3, + MAX_BSS_TYPE = 0xFF +}; + +#define WL1271_JOIN_CMD_CTRL_TX_FLUSH 0x80 /* Firmware flushes all Tx */ +#define WL1271_JOIN_CMD_TX_SESSION_OFFSET 1 +#define WL1271_JOIN_CMD_BSS_TYPE_5GHZ 0x10 + +struct wl1271_cmd_join { + struct wl1271_cmd_header header; + + __le32 bssid_lsb; + __le16 bssid_msb; + __le16 beacon_interval; /* in TBTTs */ + __le32 rx_config_options; + __le32 rx_filter_options; + + /* + * The target uses this field to determine the rate at + * which to transmit control frame responses (such as + * ACK or CTS frames). + */ + __le32 basic_rate_set; + u8 dtim_interval; + /* + * bits 0-2: This bitwise field specifies the type + * of BSS to start or join (BSS_TYPE_*). + * bit 4: Band - The radio band in which to join + * or start. + * 0 - 2.4GHz band + * 1 - 5GHz band + * bits 3, 5-7: Reserved + */ + u8 bss_type; + u8 channel; + u8 ssid_len; + u8 ssid[IW_ESSID_MAX_SIZE]; + u8 ctrl; /* JOIN_CMD_CTRL_* */ + u8 reserved[3]; +} __attribute__ ((packed)); + +struct cmd_enabledisable_path { + struct wl1271_cmd_header header; + + u8 channel; + u8 padding[3]; +} __attribute__ ((packed)); + +struct wl1271_cmd_template_set { + struct wl1271_cmd_header header; + + __le16 len; + u8 template_type; + u8 index; /* relevant only for KLV_TEMPLATE type */ + __le32 enabled_rates; + u8 short_retry_limit; + u8 long_retry_limit; + u8 aflags; + u8 reserved; + u8 template_data[WL1271_CMD_TEMPL_MAX_SIZE]; +} __attribute__ ((packed)); + +#define TIM_ELE_ID 5 +#define PARTIAL_VBM_MAX 251 + +struct wl1271_tim { + u8 identity; + u8 length; + u8 dtim_count; + u8 dtim_period; + u8 bitmap_ctrl; + u8 pvb_field[PARTIAL_VBM_MAX]; /* Partial Virtual Bitmap */ +} __attribute__ ((packed)); + +enum wl1271_cmd_ps_mode { + STATION_ACTIVE_MODE, + STATION_POWER_SAVE_MODE +}; + +struct wl1271_cmd_ps_params { + struct wl1271_cmd_header header; + + u8 ps_mode; /* STATION_* */ + u8 send_null_data; /* Do we have to send NULL data packet ? */ + u8 retries; /* Number of retires for the initial NULL data packet */ + + /* + * TUs during which the target stays awake after switching + * to power save mode. + */ + u8 hang_over_period; + __le32 null_data_rate; +} __attribute__ ((packed)); + +/* HW encryption keys */ +#define NUM_ACCESS_CATEGORIES_COPY 4 +#define MAX_KEY_SIZE 32 + +enum wl1271_cmd_key_action { + KEY_ADD_OR_REPLACE = 1, + KEY_REMOVE = 2, + KEY_SET_ID = 3, + MAX_KEY_ACTION = 0xffff, +}; + +enum wl1271_cmd_key_type { + KEY_NONE = 0, + KEY_WEP = 1, + KEY_TKIP = 2, + KEY_AES = 3, + KEY_GEM = 4 +}; + +/* FIXME: Add description for key-types */ + +struct wl1271_cmd_set_keys { + struct wl1271_cmd_header header; + + /* Ignored for default WEP key */ + u8 addr[ETH_ALEN]; + + /* key_action_e */ + __le16 key_action; + + __le16 reserved_1; + + /* key size in bytes */ + u8 key_size; + + /* key_type_e */ + u8 key_type; + u8 ssid_profile; + + /* + * TKIP, AES: frame's key id field. + * For WEP default key: key id; + */ + u8 id; + u8 reserved_2[6]; + u8 key[MAX_KEY_SIZE]; + __le16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY]; + __le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY]; +} __attribute__ ((packed)); + + +#define WL1271_SCAN_MAX_CHANNELS 24 +#define WL1271_SCAN_DEFAULT_TAG 1 +#define WL1271_SCAN_CURRENT_TX_PWR 0 +#define WL1271_SCAN_OPT_ACTIVE 0 +#define WL1271_SCAN_OPT_PASSIVE 1 +#define WL1271_SCAN_OPT_PRIORITY_HIGH 4 +#define WL1271_SCAN_CHAN_MIN_DURATION 30000 /* TU */ +#define WL1271_SCAN_CHAN_MAX_DURATION 60000 /* TU */ +#define WL1271_SCAN_BAND_2_4_GHZ 0 +#define WL1271_SCAN_BAND_5_GHZ 1 +#define WL1271_SCAN_BAND_DUAL 2 + +struct basic_scan_params { + __le32 rx_config_options; + __le32 rx_filter_options; + /* Scan option flags (WL1271_SCAN_OPT_*) */ + __le16 scan_options; + /* Number of scan channels in the list (maximum 30) */ + u8 num_channels; + /* This field indicates the number of probe requests to send + per channel for an active scan */ + u8 num_probe_requests; + /* Rate bit field for sending the probes */ + __le32 tx_rate; + u8 tid_trigger; + u8 ssid_len; + /* in order to align */ + u8 padding1[2]; + u8 ssid[IW_ESSID_MAX_SIZE]; + /* Band to scan */ + u8 band; + u8 use_ssid_list; + u8 scan_tag; + u8 padding2; +} __attribute__ ((packed)); + +struct basic_scan_channel_params { + /* Duration in TU to wait for frames on a channel for active scan */ + __le32 min_duration; + __le32 max_duration; + __le32 bssid_lsb; + __le16 bssid_msb; + u8 early_termination; + u8 tx_power_att; + u8 channel; + /* FW internal use only! */ + u8 dfs_candidate; + u8 activity_detected; + u8 pad; +} __attribute__ ((packed)); + +struct wl1271_cmd_scan { + struct wl1271_cmd_header header; + + struct basic_scan_params params; + struct basic_scan_channel_params channels[WL1271_SCAN_MAX_CHANNELS]; +} __attribute__ ((packed)); + +struct wl1271_cmd_trigger_scan_to { + struct wl1271_cmd_header header; + + __le32 timeout; +} __attribute__ ((packed)); + +struct wl1271_cmd_test_header { + u8 id; + u8 padding[3]; +} __attribute__ ((packed)); + +enum wl1271_channel_tune_bands { + WL1271_CHANNEL_TUNE_BAND_2_4, + WL1271_CHANNEL_TUNE_BAND_5, + WL1271_CHANNEL_TUNE_BAND_4_9 +}; + +#define WL1271_PD_REFERENCE_POINT_BAND_B_G 0 + +#define TEST_CMD_P2G_CAL 0x02 +#define TEST_CMD_CHANNEL_TUNE 0x0d +#define TEST_CMD_UPDATE_PD_REFERENCE_POINT 0x1d +#define TEST_CMD_INI_FILE_RADIO_PARAM 0x19 +#define TEST_CMD_INI_FILE_GENERAL_PARAM 0x1E + +struct wl1271_general_parms_cmd { + struct wl1271_cmd_header header; + + struct wl1271_cmd_test_header test; + + u8 params[WL1271_NVS_GENERAL_PARAMS_SIZE]; + s8 reserved[23]; +} __attribute__ ((packed)); + +#define WL1271_STAT_RADIO_PARAMS_5_SIZE 29 +#define WL1271_DYN_RADIO_PARAMS_5_SIZE 104 + +struct wl1271_radio_parms_cmd { + struct wl1271_cmd_header header; + + struct wl1271_cmd_test_header test; + + u8 stat_radio_params[WL1271_NVS_STAT_RADIO_PARAMS_SIZE]; + u8 stat_radio_params_5[WL1271_STAT_RADIO_PARAMS_5_SIZE]; + + u8 dyn_radio_params[WL1271_NVS_DYN_RADIO_PARAMS_SIZE]; + u8 reserved; + u8 dyn_radio_params_5[WL1271_DYN_RADIO_PARAMS_5_SIZE]; +} __attribute__ ((packed)); + +struct wl1271_cmd_cal_channel_tune { + struct wl1271_cmd_header header; + + struct wl1271_cmd_test_header test; + + u8 band; + u8 channel; + + __le16 radio_status; +} __attribute__ ((packed)); + +struct wl1271_cmd_cal_update_ref_point { + struct wl1271_cmd_header header; + + struct wl1271_cmd_test_header test; + + __le32 ref_power; + __le32 ref_detector; + u8 sub_band; + u8 padding[3]; +} __attribute__ ((packed)); + +#define MAX_TLV_LENGTH 400 +#define MAX_NVS_VERSION_LENGTH 12 + +#define WL1271_CAL_P2G_BAND_B_G BIT(0) + +struct wl1271_cmd_cal_p2g { + struct wl1271_cmd_header header; + + struct wl1271_cmd_test_header test; + + __le16 len; + u8 buf[MAX_TLV_LENGTH]; + u8 type; + u8 padding; + + __le16 radio_status; + u8 nvs_version[MAX_NVS_VERSION_LENGTH]; + + u8 sub_band_mask; + u8 padding2; +} __attribute__ ((packed)); + + +/* + * There are three types of disconnections: + * + * DISCONNECT_IMMEDIATE: the fw doesn't send any frames + * DISCONNECT_DEAUTH: the fw generates a DEAUTH request with the reason + * we have passed + * DISCONNECT_DISASSOC: the fw generates a DESASSOC request with the reason + * we have passed + */ +enum wl1271_disconnect_type { + DISCONNECT_IMMEDIATE, + DISCONNECT_DEAUTH, + DISCONNECT_DISASSOC +}; + +struct wl1271_cmd_disconnect { + __le32 rx_config_options; + __le32 rx_filter_options; + + __le16 reason; + u8 type; + + u8 padding; +} __attribute__ ((packed)); + +#endif /* __WL1271_CMD_H__ */ diff --git a/drivers/net/wireless/wl12xx/wl1271_conf.h b/drivers/net/wireless/wl12xx/wl1271_conf.h new file mode 100644 index 0000000..6f9e75c --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1271_conf.h @@ -0,0 +1,795 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __WL1271_CONF_H__ +#define __WL1271_CONF_H__ + +enum { + CONF_HW_BIT_RATE_1MBPS = BIT(0), + CONF_HW_BIT_RATE_2MBPS = BIT(1), + CONF_HW_BIT_RATE_5_5MBPS = BIT(2), + CONF_HW_BIT_RATE_6MBPS = BIT(3), + CONF_HW_BIT_RATE_9MBPS = BIT(4), + CONF_HW_BIT_RATE_11MBPS = BIT(5), + CONF_HW_BIT_RATE_12MBPS = BIT(6), + CONF_HW_BIT_RATE_18MBPS = BIT(7), + CONF_HW_BIT_RATE_22MBPS = BIT(8), + CONF_HW_BIT_RATE_24MBPS = BIT(9), + CONF_HW_BIT_RATE_36MBPS = BIT(10), + CONF_HW_BIT_RATE_48MBPS = BIT(11), + CONF_HW_BIT_RATE_54MBPS = BIT(12), + CONF_HW_BIT_RATE_MCS_0 = BIT(13), + CONF_HW_BIT_RATE_MCS_1 = BIT(14), + CONF_HW_BIT_RATE_MCS_2 = BIT(15), + CONF_HW_BIT_RATE_MCS_3 = BIT(16), + CONF_HW_BIT_RATE_MCS_4 = BIT(17), + CONF_HW_BIT_RATE_MCS_5 = BIT(18), + CONF_HW_BIT_RATE_MCS_6 = BIT(19), + CONF_HW_BIT_RATE_MCS_7 = BIT(20) +}; + +enum { + CONF_HW_RATE_INDEX_1MBPS = 0, + CONF_HW_RATE_INDEX_2MBPS = 1, + CONF_HW_RATE_INDEX_5_5MBPS = 2, + CONF_HW_RATE_INDEX_6MBPS = 3, + CONF_HW_RATE_INDEX_9MBPS = 4, + CONF_HW_RATE_INDEX_11MBPS = 5, + CONF_HW_RATE_INDEX_12MBPS = 6, + CONF_HW_RATE_INDEX_18MBPS = 7, + CONF_HW_RATE_INDEX_22MBPS = 8, + CONF_HW_RATE_INDEX_24MBPS = 9, + CONF_HW_RATE_INDEX_36MBPS = 10, + CONF_HW_RATE_INDEX_48MBPS = 11, + CONF_HW_RATE_INDEX_54MBPS = 12, + CONF_HW_RATE_INDEX_MAX = CONF_HW_RATE_INDEX_54MBPS, +}; + +struct conf_sg_settings { + /* + * Defines the PER threshold in PPM of the BT voice of which reaching + * this value will trigger raising the priority of the BT voice by + * the BT IP until next NFS sample interval time as defined in + * nfs_sample_interval. + * + * Unit: PER value in PPM (parts per million) + * #Error_packets / #Total_packets + + * Range: u32 + */ + u32 per_threshold; + + /* + * This value is an absolute time in micro-seconds to limit the + * maximum scan duration compensation while in SG + */ + u32 max_scan_compensation_time; + + /* Defines the PER threshold of the BT voice of which reaching this + * value will trigger raising the priority of the BT voice until next + * NFS sample interval time as defined in sample_interval. + * + * Unit: msec + * Range: 1-65000 + */ + u16 nfs_sample_interval; + + /* + * Defines the load ratio for the BT. + * The WLAN ratio is: 100 - load_ratio + * + * Unit: Percent + * Range: 0-100 + */ + u8 load_ratio; + + /* + * true - Co-ex is allowed to enter/exit P.S automatically and + * transparently to the host + * + * false - Co-ex is disallowed to enter/exit P.S and will trigger an + * event to the host to notify for the need to enter/exit P.S + * due to BT change state + * + */ + u8 auto_ps_mode; + + /* + * This parameter defines the compensation percentage of num of probe + * requests in case scan is initiated during BT voice/BT ACL + * guaranteed link. + * + * Unit: Percent + * Range: 0-255 (0 - No compensation) + */ + u8 probe_req_compensation; + + /* + * This parameter defines the compensation percentage of scan window + * size in case scan is initiated during BT voice/BT ACL Guaranteed + * link. + * + * Unit: Percent + * Range: 0-255 (0 - No compensation) + */ + u8 scan_window_compensation; + + /* + * Defines the antenna configuration. + * + * Range: 0 - Single Antenna; 1 - Dual Antenna + */ + u8 antenna_config; + + /* + * The percent out of the Max consecutive beacon miss roaming trigger + * which is the threshold for raising the priority of beacon + * reception. + * + * Range: 1-100 + * N = MaxConsecutiveBeaconMiss + * P = coexMaxConsecutiveBeaconMissPrecent + * Threshold = MIN( N-1, round(N * P / 100)) + */ + u8 beacon_miss_threshold; + + /* + * The RX rate threshold below which rate adaptation is assumed to be + * occurring at the AP which will raise priority for ACTIVE_RX and RX + * SP. + * + * Range: HW_BIT_RATE_* + */ + u32 rate_adaptation_threshold; + + /* + * The SNR above which the RX rate threshold indicating AP rate + * adaptation is valid + * + * Range: -128 - 127 + */ + s8 rate_adaptation_snr; +}; + +enum conf_rx_queue_type { + CONF_RX_QUEUE_TYPE_LOW_PRIORITY, /* All except the high priority */ + CONF_RX_QUEUE_TYPE_HIGH_PRIORITY, /* Management and voice packets */ +}; + +struct conf_rx_settings { + /* + * The maximum amount of time, in TU, before the + * firmware discards the MSDU. + * + * Range: 0 - 0xFFFFFFFF + */ + u32 rx_msdu_life_time; + + /* + * Packet detection threshold in the PHY. + * + * FIXME: details unknown. + */ + u32 packet_detection_threshold; + + /* + * The longest time the STA will wait to receive traffic from the AP + * after a PS-poll has been transmitted. + * + * Range: 0 - 200000 + */ + u16 ps_poll_timeout; + /* + * The longest time the STA will wait to receive traffic from the AP + * after a frame has been sent from an UPSD enabled queue. + * + * Range: 0 - 200000 + */ + u16 upsd_timeout; + + /* + * The number of octets in an MPDU, below which an RTS/CTS + * handshake is not performed. + * + * Range: 0 - 4096 + */ + u16 rts_threshold; + + /* + * The RX Clear Channel Assessment threshold in the PHY + * (the energy threshold). + * + * Range: ENABLE_ENERGY_D == 0x140A + * DISABLE_ENERGY_D == 0xFFEF + */ + u16 rx_cca_threshold; + + /* + * Occupied Rx mem-blocks number which requires interrupting the host + * (0 = no buffering, 0xffff = disabled). + * + * Range: u16 + */ + u16 irq_blk_threshold; + + /* + * Rx packets number which requires interrupting the host + * (0 = no buffering). + * + * Range: u16 + */ + u16 irq_pkt_threshold; + + /* + * Max time in msec the FW may delay RX-Complete interrupt. + * + * Range: 1 - 100 + */ + u16 irq_timeout; + + /* + * The RX queue type. + * + * Range: RX_QUEUE_TYPE_RX_LOW_PRIORITY, RX_QUEUE_TYPE_RX_HIGH_PRIORITY, + */ + u8 queue_type; +}; + +#define CONF_TX_MAX_RATE_CLASSES 8 + +#define CONF_TX_RATE_MASK_UNSPECIFIED 0 +#define CONF_TX_RATE_MASK_BASIC (CONF_HW_BIT_RATE_1MBPS | \ + CONF_HW_BIT_RATE_2MBPS) +#define CONF_TX_RATE_RETRY_LIMIT 10 + +struct conf_tx_rate_class { + + /* + * The rates enabled for this rate class. + * + * Range: CONF_HW_BIT_RATE_* bit mask + */ + u32 enabled_rates; + + /* + * The dot11 short retry limit used for TX retries. + * + * Range: u8 + */ + u8 short_retry_limit; + + /* + * The dot11 long retry limit used for TX retries. + * + * Range: u8 + */ + u8 long_retry_limit; + + /* + * Flags controlling the attributes of TX transmission. + * + * Range: bit 0: Truncate - when set, FW attempts to send a frame stop + * when the total valid per-rate attempts have + * been exhausted; otherwise transmissions + * will continue at the lowest available rate + * until the appropriate one of the + * short_retry_limit, long_retry_limit, + * dot11_max_transmit_msdu_life_time, or + * max_tx_life_time, is exhausted. + * 1: Preamble Override - indicates if the preamble type + * should be used in TX. + * 2: Preamble Type - the type of the preamble to be used by + * the policy (0 - long preamble, 1 - short preamble. + */ + u8 aflags; +}; + +#define CONF_TX_MAX_AC_COUNT 4 + +/* Slot number setting to start transmission at PIFS interval */ +#define CONF_TX_AIFS_PIFS 1 +/* Slot number setting to start transmission at DIFS interval normal + * DCF access */ +#define CONF_TX_AIFS_DIFS 2 + + +enum conf_tx_ac { + CONF_TX_AC_BE = 0, /* best effort / legacy */ + CONF_TX_AC_BK = 1, /* background */ + CONF_TX_AC_VI = 2, /* video */ + CONF_TX_AC_VO = 3, /* voice */ + CONF_TX_AC_CTS2SELF = 4, /* fictious AC, follows AC_VO */ + CONF_TX_AC_ANY_TID = 0x1f +}; + +struct conf_tx_ac_category { + /* + * The AC class identifier. + * + * Range: enum conf_tx_ac + */ + u8 ac; + + /* + * The contention window minimum size (in slots) for the access + * class. + * + * Range: u8 + */ + u8 cw_min; + + /* + * The contention window maximum size (in slots) for the access + * class. + * + * Range: u8 + */ + u16 cw_max; + + /* + * The AIF value (in slots) for the access class. + * + * Range: u8 + */ + u8 aifsn; + + /* + * The TX Op Limit (in microseconds) for the access class. + * + * Range: u16 + */ + u16 tx_op_limit; +}; + +#define CONF_TX_MAX_TID_COUNT 7 + +enum { + CONF_CHANNEL_TYPE_DCF = 0, /* DC/LEGACY*/ + CONF_CHANNEL_TYPE_EDCF = 1, /* EDCA*/ + CONF_CHANNEL_TYPE_HCCA = 2, /* HCCA*/ +}; + +enum { + CONF_PS_SCHEME_LEGACY = 0, + CONF_PS_SCHEME_UPSD_TRIGGER = 1, + CONF_PS_SCHEME_LEGACY_PSPOLL = 2, + CONF_PS_SCHEME_SAPSD = 3, +}; + +enum { + CONF_ACK_POLICY_LEGACY = 0, + CONF_ACK_POLICY_NO_ACK = 1, + CONF_ACK_POLICY_BLOCK = 2, +}; + + +struct conf_tx_tid { + u8 queue_id; + u8 channel_type; + u8 tsid; + u8 ps_scheme; + u8 ack_policy; + u32 apsd_conf[2]; +}; + +struct conf_tx_settings { + /* + * The TX ED value for TELEC Enable/Disable. + * + * Range: 0, 1 + */ + u8 tx_energy_detection; + + /* + * Configuration for rate classes for TX (currently only one + * rate class supported.) + */ + struct conf_tx_rate_class rc_conf; + + /* + * Configuration for access categories for TX rate control. + */ + u8 ac_conf_count; + struct conf_tx_ac_category ac_conf[CONF_TX_MAX_AC_COUNT]; + + /* + * Configuration for TID parameters. + */ + u8 tid_conf_count; + struct conf_tx_tid tid_conf[CONF_TX_MAX_TID_COUNT]; + + /* + * The TX fragmentation threshold. + * + * Range: u16 + */ + u16 frag_threshold; + + /* + * Max time in msec the FW may delay frame TX-Complete interrupt. + * + * Range: u16 + */ + u16 tx_compl_timeout; + + /* + * Completed TX packet count which requires to issue the TX-Complete + * interrupt. + * + * Range: u16 + */ + u16 tx_compl_threshold; + +}; + +enum { + CONF_WAKE_UP_EVENT_BEACON = 0x01, /* Wake on every Beacon*/ + CONF_WAKE_UP_EVENT_DTIM = 0x02, /* Wake on every DTIM*/ + CONF_WAKE_UP_EVENT_N_DTIM = 0x04, /* Wake every Nth DTIM */ + CONF_WAKE_UP_EVENT_N_BEACONS = 0x08, /* Wake every Nth beacon */ + CONF_WAKE_UP_EVENT_BITS_MASK = 0x0F +}; + +#define CONF_MAX_BCN_FILT_IE_COUNT 32 + +#define CONF_BCN_RULE_PASS_ON_CHANGE BIT(0) +#define CONF_BCN_RULE_PASS_ON_APPEARANCE BIT(1) + +#define CONF_BCN_IE_OUI_LEN 3 +#define CONF_BCN_IE_VER_LEN 2 + +struct conf_bcn_filt_rule { + /* + * IE number to which to associate a rule. + * + * Range: u8 + */ + u8 ie; + + /* + * Rule to associate with the specific ie. + * + * Range: CONF_BCN_RULE_PASS_ON_* + */ + u8 rule; + + /* + * OUI for the vendor specifie IE (221) + */ + u8 oui[CONF_BCN_IE_OUI_LEN]; + + /* + * Type for the vendor specifie IE (221) + */ + u8 type; + + /* + * Version for the vendor specifie IE (221) + */ + u8 version[CONF_BCN_IE_VER_LEN]; +}; + +#define CONF_MAX_RSSI_SNR_TRIGGERS 8 + +enum { + CONF_TRIG_METRIC_RSSI_BEACON = 0, + CONF_TRIG_METRIC_RSSI_DATA, + CONF_TRIG_METRIC_SNR_BEACON, + CONF_TRIG_METRIC_SNR_DATA +}; + +enum { + CONF_TRIG_EVENT_TYPE_LEVEL = 0, + CONF_TRIG_EVENT_TYPE_EDGE +}; + +enum { + CONF_TRIG_EVENT_DIR_LOW = 0, + CONF_TRIG_EVENT_DIR_HIGH, + CONF_TRIG_EVENT_DIR_BIDIR +}; + + +struct conf_sig_trigger { + /* + * The RSSI / SNR threshold value. + * + * FIXME: what is the range? + */ + s16 threshold; + + /* + * Minimum delay between two trigger events for this trigger in ms. + * + * Range: 0 - 60000 + */ + u16 pacing; + + /* + * The measurement data source for this trigger. + * + * Range: CONF_TRIG_METRIC_* + */ + u8 metric; + + /* + * The trigger type of this trigger. + * + * Range: CONF_TRIG_EVENT_TYPE_* + */ + u8 type; + + /* + * The direction of the trigger. + * + * Range: CONF_TRIG_EVENT_DIR_* + */ + u8 direction; + + /* + * Hysteresis range of the trigger around the threshold (in dB) + * + * Range: u8 + */ + u8 hysteresis; + + /* + * Index of the trigger rule. + * + * Range: 0 - CONF_MAX_RSSI_SNR_TRIGGERS-1 + */ + u8 index; + + /* + * Enable / disable this rule (to use for clearing rules.) + * + * Range: 1 - Enabled, 2 - Not enabled + */ + u8 enable; +}; + +struct conf_sig_weights { + + /* + * RSSI from beacons average weight. + * + * Range: u8 + */ + u8 rssi_bcn_avg_weight; + + /* + * RSSI from data average weight. + * + * Range: u8 + */ + u8 rssi_pkt_avg_weight; + + /* + * SNR from beacons average weight. + * + * Range: u8 + */ + u8 snr_bcn_avg_weight; + + /* + * SNR from data average weight. + * + * Range: u8 + */ + u8 snr_pkt_avg_weight; +}; + +enum conf_bcn_filt_mode { + CONF_BCN_FILT_MODE_DISABLED = 0, + CONF_BCN_FILT_MODE_ENABLED = 1 +}; + +enum conf_bet_mode { + CONF_BET_MODE_DISABLE = 0, + CONF_BET_MODE_ENABLE = 1, +}; + +struct conf_conn_settings { + /* + * Firmware wakeup conditions configuration. The host may set only + * one bit. + * + * Range: CONF_WAKE_UP_EVENT_* + */ + u8 wake_up_event; + + /* + * Listen interval for beacons or Dtims. + * + * Range: 0 for beacon and Dtim wakeup + * 1-10 for x Dtims + * 1-255 for x beacons + */ + u8 listen_interval; + + /* + * Enable or disable the beacon filtering. + * + * Range: CONF_BCN_FILT_MODE_* + */ + enum conf_bcn_filt_mode bcn_filt_mode; + + /* + * Configure Beacon filter pass-thru rules. + */ + u8 bcn_filt_ie_count; + struct conf_bcn_filt_rule bcn_filt_ie[CONF_MAX_BCN_FILT_IE_COUNT]; + + /* + * The number of consequtive beacons to lose, before the firmware + * becomes out of synch. + * + * Range: u32 + */ + u32 synch_fail_thold; + + /* + * After out-of-synch, the number of TU's to wait without a further + * received beacon (or probe response) before issuing the BSS_EVENT_LOSE + * event. + * + * Range: u32 + */ + u32 bss_lose_timeout; + + /* + * Beacon receive timeout. + * + * Range: u32 + */ + u32 beacon_rx_timeout; + + /* + * Broadcast receive timeout. + * + * Range: u32 + */ + u32 broadcast_timeout; + + /* + * Enable/disable reception of broadcast packets in power save mode + * + * Range: 1 - enable, 0 - disable + */ + u8 rx_broadcast_in_ps; + + /* + * Consequtive PS Poll failures before sending event to driver + * + * Range: u8 + */ + u8 ps_poll_threshold; + + /* + * Configuration of signal (rssi/snr) triggers. + */ + u8 sig_trigger_count; + struct conf_sig_trigger sig_trigger[CONF_MAX_RSSI_SNR_TRIGGERS]; + + /* + * Configuration of signal average weights. + */ + struct conf_sig_weights sig_weights; + + /* + * Specifies if beacon early termination procedure is enabled or + * disabled. + * + * Range: CONF_BET_MODE_* + */ + u8 bet_enable; + + /* + * Specifies the maximum number of consecutive beacons that may be + * early terminated. After this number is reached at least one full + * beacon must be correctly received in FW before beacon ET + * resumes. + * + * Range 0 - 255 + */ + u8 bet_max_consecutive; + + /* + * Specifies the maximum number of times to try PSM entry if it fails + * (if sending the appropriate null-func message fails.) + * + * Range 0 - 255 + */ + u8 psm_entry_retries; +}; + +enum { + CONF_REF_CLK_19_2_E, + CONF_REF_CLK_26_E, + CONF_REF_CLK_38_4_E, + CONF_REF_CLK_52_E +}; + +enum single_dual_band_enum { + CONF_SINGLE_BAND, + CONF_DUAL_BAND +}; + +#define CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE 15 +#define CONF_NUMBER_OF_SUB_BANDS_5 7 +#define CONF_NUMBER_OF_RATE_GROUPS 6 +#define CONF_NUMBER_OF_CHANNELS_2_4 14 +#define CONF_NUMBER_OF_CHANNELS_5 35 + +struct conf_radio_parms { + /* + * FEM parameter set to use + * + * Range: 0 or 1 + */ + u8 fem; +}; + +struct conf_init_settings { + /* + * Configure radio parameters. + */ + struct conf_radio_parms radioparam; + +}; + +struct conf_itrim_settings { + /* enable dco itrim */ + u8 enable; + + /* moderation timeout in microsecs from the last TX */ + u32 timeout; +}; + +struct conf_pm_config_settings { + /* + * Host clock settling time + * + * Range: 0 - 30000 us + */ + u32 host_clk_settling_time; + + /* + * Host fast wakeup support + * + * Range: true, false + */ + bool host_fast_wakeup_support; +}; + +struct conf_drv_settings { + struct conf_sg_settings sg; + struct conf_rx_settings rx; + struct conf_tx_settings tx; + struct conf_conn_settings conn; + struct conf_init_settings init; + struct conf_itrim_settings itrim; + struct conf_pm_config_settings pm_config; +}; + +#endif diff --git a/drivers/net/wireless/wl12xx/wl1271_debugfs.c b/drivers/net/wireless/wl12xx/wl1271_debugfs.c new file mode 100644 index 0000000..8d7588c --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1271_debugfs.c @@ -0,0 +1,580 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include "wl1271_debugfs.h" + +#include + +#include "wl1271.h" +#include "wl1271_acx.h" +#include "wl1271_ps.h" + +/* ms */ +#define WL1271_DEBUGFS_STATS_LIFETIME 1000 + +/* debugfs macros idea from mac80211 */ + +#define DEBUGFS_READONLY_FILE(name, buflen, fmt, value...) \ +static ssize_t name## _read(struct file *file, char __user *userbuf, \ + size_t count, loff_t *ppos) \ +{ \ + struct wl1271 *wl = file->private_data; \ + char buf[buflen]; \ + int res; \ + \ + res = scnprintf(buf, buflen, fmt "\n", ##value); \ + return simple_read_from_buffer(userbuf, count, ppos, buf, res); \ +} \ + \ +static const struct file_operations name## _ops = { \ + .read = name## _read, \ + .open = wl1271_open_file_generic, \ +}; + +#define DEBUGFS_ADD(name, parent) \ + wl->debugfs.name = debugfs_create_file(#name, 0400, parent, \ + wl, &name## _ops); \ + if (IS_ERR(wl->debugfs.name)) { \ + ret = PTR_ERR(wl->debugfs.name); \ + wl->debugfs.name = NULL; \ + goto out; \ + } + +#define DEBUGFS_DEL(name) \ + do { \ + debugfs_remove(wl->debugfs.name); \ + wl->debugfs.name = NULL; \ + } while (0) + +#define DEBUGFS_FWSTATS_FILE(sub, name, buflen, fmt) \ +static ssize_t sub## _ ##name## _read(struct file *file, \ + char __user *userbuf, \ + size_t count, loff_t *ppos) \ +{ \ + struct wl1271 *wl = file->private_data; \ + char buf[buflen]; \ + int res; \ + \ + wl1271_debugfs_update_stats(wl); \ + \ + res = scnprintf(buf, buflen, fmt "\n", \ + wl->stats.fw_stats->sub.name); \ + return simple_read_from_buffer(userbuf, count, ppos, buf, res); \ +} \ + \ +static const struct file_operations sub## _ ##name## _ops = { \ + .read = sub## _ ##name## _read, \ + .open = wl1271_open_file_generic, \ +}; + +#define DEBUGFS_FWSTATS_ADD(sub, name) \ + DEBUGFS_ADD(sub## _ ##name, wl->debugfs.fw_statistics) + +#define DEBUGFS_FWSTATS_DEL(sub, name) \ + DEBUGFS_DEL(sub## _ ##name) + +static void wl1271_debugfs_update_stats(struct wl1271 *wl) +{ + int ret; + + mutex_lock(&wl->mutex); + + ret = wl1271_ps_elp_wakeup(wl, false); + if (ret < 0) + goto out; + + if (wl->state == WL1271_STATE_ON && + time_after(jiffies, wl->stats.fw_stats_update + + msecs_to_jiffies(WL1271_DEBUGFS_STATS_LIFETIME))) { + wl1271_acx_statistics(wl, wl->stats.fw_stats); + wl->stats.fw_stats_update = jiffies; + } + + wl1271_ps_elp_sleep(wl); + +out: + mutex_unlock(&wl->mutex); +} + +static int wl1271_open_file_generic(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +DEBUGFS_FWSTATS_FILE(tx, internal_desc_overflow, 20, "%u"); + +DEBUGFS_FWSTATS_FILE(rx, out_of_mem, 20, "%u"); +DEBUGFS_FWSTATS_FILE(rx, hdr_overflow, 20, "%u"); +DEBUGFS_FWSTATS_FILE(rx, hw_stuck, 20, "%u"); +DEBUGFS_FWSTATS_FILE(rx, dropped, 20, "%u"); +DEBUGFS_FWSTATS_FILE(rx, fcs_err, 20, "%u"); +DEBUGFS_FWSTATS_FILE(rx, xfr_hint_trig, 20, "%u"); +DEBUGFS_FWSTATS_FILE(rx, path_reset, 20, "%u"); +DEBUGFS_FWSTATS_FILE(rx, reset_counter, 20, "%u"); + +DEBUGFS_FWSTATS_FILE(dma, rx_requested, 20, "%u"); +DEBUGFS_FWSTATS_FILE(dma, rx_errors, 20, "%u"); +DEBUGFS_FWSTATS_FILE(dma, tx_requested, 20, "%u"); +DEBUGFS_FWSTATS_FILE(dma, tx_errors, 20, "%u"); + +DEBUGFS_FWSTATS_FILE(isr, cmd_cmplt, 20, "%u"); +DEBUGFS_FWSTATS_FILE(isr, fiqs, 20, "%u"); +DEBUGFS_FWSTATS_FILE(isr, rx_headers, 20, "%u"); +DEBUGFS_FWSTATS_FILE(isr, rx_mem_overflow, 20, "%u"); +DEBUGFS_FWSTATS_FILE(isr, rx_rdys, 20, "%u"); +DEBUGFS_FWSTATS_FILE(isr, irqs, 20, "%u"); +DEBUGFS_FWSTATS_FILE(isr, tx_procs, 20, "%u"); +DEBUGFS_FWSTATS_FILE(isr, decrypt_done, 20, "%u"); +DEBUGFS_FWSTATS_FILE(isr, dma0_done, 20, "%u"); +DEBUGFS_FWSTATS_FILE(isr, dma1_done, 20, "%u"); +DEBUGFS_FWSTATS_FILE(isr, tx_exch_complete, 20, "%u"); +DEBUGFS_FWSTATS_FILE(isr, commands, 20, "%u"); +DEBUGFS_FWSTATS_FILE(isr, rx_procs, 20, "%u"); +DEBUGFS_FWSTATS_FILE(isr, hw_pm_mode_changes, 20, "%u"); +DEBUGFS_FWSTATS_FILE(isr, host_acknowledges, 20, "%u"); +DEBUGFS_FWSTATS_FILE(isr, pci_pm, 20, "%u"); +DEBUGFS_FWSTATS_FILE(isr, wakeups, 20, "%u"); +DEBUGFS_FWSTATS_FILE(isr, low_rssi, 20, "%u"); + +DEBUGFS_FWSTATS_FILE(wep, addr_key_count, 20, "%u"); +DEBUGFS_FWSTATS_FILE(wep, default_key_count, 20, "%u"); +/* skipping wep.reserved */ +DEBUGFS_FWSTATS_FILE(wep, key_not_found, 20, "%u"); +DEBUGFS_FWSTATS_FILE(wep, decrypt_fail, 20, "%u"); +DEBUGFS_FWSTATS_FILE(wep, packets, 20, "%u"); +DEBUGFS_FWSTATS_FILE(wep, interrupt, 20, "%u"); + +DEBUGFS_FWSTATS_FILE(pwr, ps_enter, 20, "%u"); +DEBUGFS_FWSTATS_FILE(pwr, elp_enter, 20, "%u"); +DEBUGFS_FWSTATS_FILE(pwr, missing_bcns, 20, "%u"); +DEBUGFS_FWSTATS_FILE(pwr, wake_on_host, 20, "%u"); +DEBUGFS_FWSTATS_FILE(pwr, wake_on_timer_exp, 20, "%u"); +DEBUGFS_FWSTATS_FILE(pwr, tx_with_ps, 20, "%u"); +DEBUGFS_FWSTATS_FILE(pwr, tx_without_ps, 20, "%u"); +DEBUGFS_FWSTATS_FILE(pwr, rcvd_beacons, 20, "%u"); +DEBUGFS_FWSTATS_FILE(pwr, power_save_off, 20, "%u"); +DEBUGFS_FWSTATS_FILE(pwr, enable_ps, 20, "%u"); +DEBUGFS_FWSTATS_FILE(pwr, disable_ps, 20, "%u"); +DEBUGFS_FWSTATS_FILE(pwr, fix_tsf_ps, 20, "%u"); +/* skipping cont_miss_bcns_spread for now */ +DEBUGFS_FWSTATS_FILE(pwr, rcvd_awake_beacons, 20, "%u"); + +DEBUGFS_FWSTATS_FILE(mic, rx_pkts, 20, "%u"); +DEBUGFS_FWSTATS_FILE(mic, calc_failure, 20, "%u"); + +DEBUGFS_FWSTATS_FILE(aes, encrypt_fail, 20, "%u"); +DEBUGFS_FWSTATS_FILE(aes, decrypt_fail, 20, "%u"); +DEBUGFS_FWSTATS_FILE(aes, encrypt_packets, 20, "%u"); +DEBUGFS_FWSTATS_FILE(aes, decrypt_packets, 20, "%u"); +DEBUGFS_FWSTATS_FILE(aes, encrypt_interrupt, 20, "%u"); +DEBUGFS_FWSTATS_FILE(aes, decrypt_interrupt, 20, "%u"); + +DEBUGFS_FWSTATS_FILE(event, heart_beat, 20, "%u"); +DEBUGFS_FWSTATS_FILE(event, calibration, 20, "%u"); +DEBUGFS_FWSTATS_FILE(event, rx_mismatch, 20, "%u"); +DEBUGFS_FWSTATS_FILE(event, rx_mem_empty, 20, "%u"); +DEBUGFS_FWSTATS_FILE(event, rx_pool, 20, "%u"); +DEBUGFS_FWSTATS_FILE(event, oom_late, 20, "%u"); +DEBUGFS_FWSTATS_FILE(event, phy_transmit_error, 20, "%u"); +DEBUGFS_FWSTATS_FILE(event, tx_stuck, 20, "%u"); + +DEBUGFS_FWSTATS_FILE(ps, pspoll_timeouts, 20, "%u"); +DEBUGFS_FWSTATS_FILE(ps, upsd_timeouts, 20, "%u"); +DEBUGFS_FWSTATS_FILE(ps, upsd_max_sptime, 20, "%u"); +DEBUGFS_FWSTATS_FILE(ps, upsd_max_apturn, 20, "%u"); +DEBUGFS_FWSTATS_FILE(ps, pspoll_max_apturn, 20, "%u"); +DEBUGFS_FWSTATS_FILE(ps, pspoll_utilization, 20, "%u"); +DEBUGFS_FWSTATS_FILE(ps, upsd_utilization, 20, "%u"); + +DEBUGFS_FWSTATS_FILE(rxpipe, rx_prep_beacon_drop, 20, "%u"); +DEBUGFS_FWSTATS_FILE(rxpipe, descr_host_int_trig_rx_data, 20, "%u"); +DEBUGFS_FWSTATS_FILE(rxpipe, beacon_buffer_thres_host_int_trig_rx_data, + 20, "%u"); +DEBUGFS_FWSTATS_FILE(rxpipe, missed_beacon_host_int_trig_rx_data, 20, "%u"); +DEBUGFS_FWSTATS_FILE(rxpipe, tx_xfr_host_int_trig_rx_data, 20, "%u"); + +DEBUGFS_READONLY_FILE(retry_count, 20, "%u", wl->stats.retry_count); +DEBUGFS_READONLY_FILE(excessive_retries, 20, "%u", + wl->stats.excessive_retries); + +static ssize_t tx_queue_len_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + u32 queue_len; + char buf[20]; + int res; + + queue_len = skb_queue_len(&wl->tx_queue); + + res = scnprintf(buf, sizeof(buf), "%u\n", queue_len); + return simple_read_from_buffer(userbuf, count, ppos, buf, res); +} + +static const struct file_operations tx_queue_len_ops = { + .read = tx_queue_len_read, + .open = wl1271_open_file_generic, +}; + +static ssize_t gpio_power_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + bool state = test_bit(WL1271_FLAG_GPIO_POWER, &wl->flags); + + int res; + char buf[10]; + + res = scnprintf(buf, sizeof(buf), "%d\n", state); + + return simple_read_from_buffer(user_buf, count, ppos, buf, res); +} + +static ssize_t gpio_power_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + char buf[10]; + size_t len; + unsigned long value; + int ret; + + mutex_lock(&wl->mutex); + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) { + ret = -EFAULT; + goto out; + } + buf[len] = '\0'; + + ret = strict_strtoul(buf, 0, &value); + if (ret < 0) { + wl1271_warning("illegal value in gpio_power"); + goto out; + } + + if (value) { + wl->set_power(true); + set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags); + } else { + wl->set_power(false); + clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags); + } + +out: + mutex_unlock(&wl->mutex); + return count; +} + +static const struct file_operations gpio_power_ops = { + .read = gpio_power_read, + .write = gpio_power_write, + .open = wl1271_open_file_generic +}; + +static void wl1271_debugfs_delete_files(struct wl1271 *wl) +{ + DEBUGFS_FWSTATS_DEL(tx, internal_desc_overflow); + + DEBUGFS_FWSTATS_DEL(rx, out_of_mem); + DEBUGFS_FWSTATS_DEL(rx, hdr_overflow); + DEBUGFS_FWSTATS_DEL(rx, hw_stuck); + DEBUGFS_FWSTATS_DEL(rx, dropped); + DEBUGFS_FWSTATS_DEL(rx, fcs_err); + DEBUGFS_FWSTATS_DEL(rx, xfr_hint_trig); + DEBUGFS_FWSTATS_DEL(rx, path_reset); + DEBUGFS_FWSTATS_DEL(rx, reset_counter); + + DEBUGFS_FWSTATS_DEL(dma, rx_requested); + DEBUGFS_FWSTATS_DEL(dma, rx_errors); + DEBUGFS_FWSTATS_DEL(dma, tx_requested); + DEBUGFS_FWSTATS_DEL(dma, tx_errors); + + DEBUGFS_FWSTATS_DEL(isr, cmd_cmplt); + DEBUGFS_FWSTATS_DEL(isr, fiqs); + DEBUGFS_FWSTATS_DEL(isr, rx_headers); + DEBUGFS_FWSTATS_DEL(isr, rx_mem_overflow); + DEBUGFS_FWSTATS_DEL(isr, rx_rdys); + DEBUGFS_FWSTATS_DEL(isr, irqs); + DEBUGFS_FWSTATS_DEL(isr, tx_procs); + DEBUGFS_FWSTATS_DEL(isr, decrypt_done); + DEBUGFS_FWSTATS_DEL(isr, dma0_done); + DEBUGFS_FWSTATS_DEL(isr, dma1_done); + DEBUGFS_FWSTATS_DEL(isr, tx_exch_complete); + DEBUGFS_FWSTATS_DEL(isr, commands); + DEBUGFS_FWSTATS_DEL(isr, rx_procs); + DEBUGFS_FWSTATS_DEL(isr, hw_pm_mode_changes); + DEBUGFS_FWSTATS_DEL(isr, host_acknowledges); + DEBUGFS_FWSTATS_DEL(isr, pci_pm); + DEBUGFS_FWSTATS_DEL(isr, wakeups); + DEBUGFS_FWSTATS_DEL(isr, low_rssi); + + DEBUGFS_FWSTATS_DEL(wep, addr_key_count); + DEBUGFS_FWSTATS_DEL(wep, default_key_count); + /* skipping wep.reserved */ + DEBUGFS_FWSTATS_DEL(wep, key_not_found); + DEBUGFS_FWSTATS_DEL(wep, decrypt_fail); + DEBUGFS_FWSTATS_DEL(wep, packets); + DEBUGFS_FWSTATS_DEL(wep, interrupt); + + DEBUGFS_FWSTATS_DEL(pwr, ps_enter); + DEBUGFS_FWSTATS_DEL(pwr, elp_enter); + DEBUGFS_FWSTATS_DEL(pwr, missing_bcns); + DEBUGFS_FWSTATS_DEL(pwr, wake_on_host); + DEBUGFS_FWSTATS_DEL(pwr, wake_on_timer_exp); + DEBUGFS_FWSTATS_DEL(pwr, tx_with_ps); + DEBUGFS_FWSTATS_DEL(pwr, tx_without_ps); + DEBUGFS_FWSTATS_DEL(pwr, rcvd_beacons); + DEBUGFS_FWSTATS_DEL(pwr, power_save_off); + DEBUGFS_FWSTATS_DEL(pwr, enable_ps); + DEBUGFS_FWSTATS_DEL(pwr, disable_ps); + DEBUGFS_FWSTATS_DEL(pwr, fix_tsf_ps); + /* skipping cont_miss_bcns_spread for now */ + DEBUGFS_FWSTATS_DEL(pwr, rcvd_awake_beacons); + + DEBUGFS_FWSTATS_DEL(mic, rx_pkts); + DEBUGFS_FWSTATS_DEL(mic, calc_failure); + + DEBUGFS_FWSTATS_DEL(aes, encrypt_fail); + DEBUGFS_FWSTATS_DEL(aes, decrypt_fail); + DEBUGFS_FWSTATS_DEL(aes, encrypt_packets); + DEBUGFS_FWSTATS_DEL(aes, decrypt_packets); + DEBUGFS_FWSTATS_DEL(aes, encrypt_interrupt); + DEBUGFS_FWSTATS_DEL(aes, decrypt_interrupt); + + DEBUGFS_FWSTATS_DEL(event, heart_beat); + DEBUGFS_FWSTATS_DEL(event, calibration); + DEBUGFS_FWSTATS_DEL(event, rx_mismatch); + DEBUGFS_FWSTATS_DEL(event, rx_mem_empty); + DEBUGFS_FWSTATS_DEL(event, rx_pool); + DEBUGFS_FWSTATS_DEL(event, oom_late); + DEBUGFS_FWSTATS_DEL(event, phy_transmit_error); + DEBUGFS_FWSTATS_DEL(event, tx_stuck); + + DEBUGFS_FWSTATS_DEL(ps, pspoll_timeouts); + DEBUGFS_FWSTATS_DEL(ps, upsd_timeouts); + DEBUGFS_FWSTATS_DEL(ps, upsd_max_sptime); + DEBUGFS_FWSTATS_DEL(ps, upsd_max_apturn); + DEBUGFS_FWSTATS_DEL(ps, pspoll_max_apturn); + DEBUGFS_FWSTATS_DEL(ps, pspoll_utilization); + DEBUGFS_FWSTATS_DEL(ps, upsd_utilization); + + DEBUGFS_FWSTATS_DEL(rxpipe, rx_prep_beacon_drop); + DEBUGFS_FWSTATS_DEL(rxpipe, descr_host_int_trig_rx_data); + DEBUGFS_FWSTATS_DEL(rxpipe, beacon_buffer_thres_host_int_trig_rx_data); + DEBUGFS_FWSTATS_DEL(rxpipe, missed_beacon_host_int_trig_rx_data); + DEBUGFS_FWSTATS_DEL(rxpipe, tx_xfr_host_int_trig_rx_data); + + DEBUGFS_DEL(tx_queue_len); + DEBUGFS_DEL(retry_count); + DEBUGFS_DEL(excessive_retries); + + DEBUGFS_DEL(gpio_power); +} + +static int wl1271_debugfs_add_files(struct wl1271 *wl) +{ + int ret = 0; + + DEBUGFS_FWSTATS_ADD(tx, internal_desc_overflow); + + DEBUGFS_FWSTATS_ADD(rx, out_of_mem); + DEBUGFS_FWSTATS_ADD(rx, hdr_overflow); + DEBUGFS_FWSTATS_ADD(rx, hw_stuck); + DEBUGFS_FWSTATS_ADD(rx, dropped); + DEBUGFS_FWSTATS_ADD(rx, fcs_err); + DEBUGFS_FWSTATS_ADD(rx, xfr_hint_trig); + DEBUGFS_FWSTATS_ADD(rx, path_reset); + DEBUGFS_FWSTATS_ADD(rx, reset_counter); + + DEBUGFS_FWSTATS_ADD(dma, rx_requested); + DEBUGFS_FWSTATS_ADD(dma, rx_errors); + DEBUGFS_FWSTATS_ADD(dma, tx_requested); + DEBUGFS_FWSTATS_ADD(dma, tx_errors); + + DEBUGFS_FWSTATS_ADD(isr, cmd_cmplt); + DEBUGFS_FWSTATS_ADD(isr, fiqs); + DEBUGFS_FWSTATS_ADD(isr, rx_headers); + DEBUGFS_FWSTATS_ADD(isr, rx_mem_overflow); + DEBUGFS_FWSTATS_ADD(isr, rx_rdys); + DEBUGFS_FWSTATS_ADD(isr, irqs); + DEBUGFS_FWSTATS_ADD(isr, tx_procs); + DEBUGFS_FWSTATS_ADD(isr, decrypt_done); + DEBUGFS_FWSTATS_ADD(isr, dma0_done); + DEBUGFS_FWSTATS_ADD(isr, dma1_done); + DEBUGFS_FWSTATS_ADD(isr, tx_exch_complete); + DEBUGFS_FWSTATS_ADD(isr, commands); + DEBUGFS_FWSTATS_ADD(isr, rx_procs); + DEBUGFS_FWSTATS_ADD(isr, hw_pm_mode_changes); + DEBUGFS_FWSTATS_ADD(isr, host_acknowledges); + DEBUGFS_FWSTATS_ADD(isr, pci_pm); + DEBUGFS_FWSTATS_ADD(isr, wakeups); + DEBUGFS_FWSTATS_ADD(isr, low_rssi); + + DEBUGFS_FWSTATS_ADD(wep, addr_key_count); + DEBUGFS_FWSTATS_ADD(wep, default_key_count); + /* skipping wep.reserved */ + DEBUGFS_FWSTATS_ADD(wep, key_not_found); + DEBUGFS_FWSTATS_ADD(wep, decrypt_fail); + DEBUGFS_FWSTATS_ADD(wep, packets); + DEBUGFS_FWSTATS_ADD(wep, interrupt); + + DEBUGFS_FWSTATS_ADD(pwr, ps_enter); + DEBUGFS_FWSTATS_ADD(pwr, elp_enter); + DEBUGFS_FWSTATS_ADD(pwr, missing_bcns); + DEBUGFS_FWSTATS_ADD(pwr, wake_on_host); + DEBUGFS_FWSTATS_ADD(pwr, wake_on_timer_exp); + DEBUGFS_FWSTATS_ADD(pwr, tx_with_ps); + DEBUGFS_FWSTATS_ADD(pwr, tx_without_ps); + DEBUGFS_FWSTATS_ADD(pwr, rcvd_beacons); + DEBUGFS_FWSTATS_ADD(pwr, power_save_off); + DEBUGFS_FWSTATS_ADD(pwr, enable_ps); + DEBUGFS_FWSTATS_ADD(pwr, disable_ps); + DEBUGFS_FWSTATS_ADD(pwr, fix_tsf_ps); + /* skipping cont_miss_bcns_spread for now */ + DEBUGFS_FWSTATS_ADD(pwr, rcvd_awake_beacons); + + DEBUGFS_FWSTATS_ADD(mic, rx_pkts); + DEBUGFS_FWSTATS_ADD(mic, calc_failure); + + DEBUGFS_FWSTATS_ADD(aes, encrypt_fail); + DEBUGFS_FWSTATS_ADD(aes, decrypt_fail); + DEBUGFS_FWSTATS_ADD(aes, encrypt_packets); + DEBUGFS_FWSTATS_ADD(aes, decrypt_packets); + DEBUGFS_FWSTATS_ADD(aes, encrypt_interrupt); + DEBUGFS_FWSTATS_ADD(aes, decrypt_interrupt); + + DEBUGFS_FWSTATS_ADD(event, heart_beat); + DEBUGFS_FWSTATS_ADD(event, calibration); + DEBUGFS_FWSTATS_ADD(event, rx_mismatch); + DEBUGFS_FWSTATS_ADD(event, rx_mem_empty); + DEBUGFS_FWSTATS_ADD(event, rx_pool); + DEBUGFS_FWSTATS_ADD(event, oom_late); + DEBUGFS_FWSTATS_ADD(event, phy_transmit_error); + DEBUGFS_FWSTATS_ADD(event, tx_stuck); + + DEBUGFS_FWSTATS_ADD(ps, pspoll_timeouts); + DEBUGFS_FWSTATS_ADD(ps, upsd_timeouts); + DEBUGFS_FWSTATS_ADD(ps, upsd_max_sptime); + DEBUGFS_FWSTATS_ADD(ps, upsd_max_apturn); + DEBUGFS_FWSTATS_ADD(ps, pspoll_max_apturn); + DEBUGFS_FWSTATS_ADD(ps, pspoll_utilization); + DEBUGFS_FWSTATS_ADD(ps, upsd_utilization); + + DEBUGFS_FWSTATS_ADD(rxpipe, rx_prep_beacon_drop); + DEBUGFS_FWSTATS_ADD(rxpipe, descr_host_int_trig_rx_data); + DEBUGFS_FWSTATS_ADD(rxpipe, beacon_buffer_thres_host_int_trig_rx_data); + DEBUGFS_FWSTATS_ADD(rxpipe, missed_beacon_host_int_trig_rx_data); + DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data); + + DEBUGFS_ADD(tx_queue_len, wl->debugfs.rootdir); + DEBUGFS_ADD(retry_count, wl->debugfs.rootdir); + DEBUGFS_ADD(excessive_retries, wl->debugfs.rootdir); + + DEBUGFS_ADD(gpio_power, wl->debugfs.rootdir); + +out: + if (ret < 0) + wl1271_debugfs_delete_files(wl); + + return ret; +} + +void wl1271_debugfs_reset(struct wl1271 *wl) +{ + memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats)); + wl->stats.retry_count = 0; + wl->stats.excessive_retries = 0; +} + +int wl1271_debugfs_init(struct wl1271 *wl) +{ + int ret; + + wl->debugfs.rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL); + + if (IS_ERR(wl->debugfs.rootdir)) { + ret = PTR_ERR(wl->debugfs.rootdir); + wl->debugfs.rootdir = NULL; + goto err; + } + + wl->debugfs.fw_statistics = debugfs_create_dir("fw-statistics", + wl->debugfs.rootdir); + + if (IS_ERR(wl->debugfs.fw_statistics)) { + ret = PTR_ERR(wl->debugfs.fw_statistics); + wl->debugfs.fw_statistics = NULL; + goto err_root; + } + + wl->stats.fw_stats = kzalloc(sizeof(*wl->stats.fw_stats), + GFP_KERNEL); + + if (!wl->stats.fw_stats) { + ret = -ENOMEM; + goto err_fw; + } + + wl->stats.fw_stats_update = jiffies; + + ret = wl1271_debugfs_add_files(wl); + + if (ret < 0) + goto err_file; + + return 0; + +err_file: + kfree(wl->stats.fw_stats); + wl->stats.fw_stats = NULL; + +err_fw: + debugfs_remove(wl->debugfs.fw_statistics); + wl->debugfs.fw_statistics = NULL; + +err_root: + debugfs_remove(wl->debugfs.rootdir); + wl->debugfs.rootdir = NULL; + +err: + return ret; +} + +void wl1271_debugfs_exit(struct wl1271 *wl) +{ + wl1271_debugfs_delete_files(wl); + + kfree(wl->stats.fw_stats); + wl->stats.fw_stats = NULL; + + debugfs_remove(wl->debugfs.fw_statistics); + wl->debugfs.fw_statistics = NULL; + + debugfs_remove(wl->debugfs.rootdir); + wl->debugfs.rootdir = NULL; + +} diff --git a/drivers/net/wireless/wl12xx/wl1271_debugfs.h b/drivers/net/wireless/wl12xx/wl1271_debugfs.h new file mode 100644 index 0000000..00a45b2 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1271_debugfs.h @@ -0,0 +1,33 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef WL1271_DEBUGFS_H +#define WL1271_DEBUGFS_H + +#include "wl1271.h" + +int wl1271_debugfs_init(struct wl1271 *wl); +void wl1271_debugfs_exit(struct wl1271 *wl); +void wl1271_debugfs_reset(struct wl1271 *wl); + +#endif /* WL1271_DEBUGFS_H */ diff --git a/drivers/net/wireless/wl12xx/wl1271_event.c b/drivers/net/wireless/wl12xx/wl1271_event.c new file mode 100644 index 0000000..7468ef1 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1271_event.c @@ -0,0 +1,248 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2008-2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include "wl1271.h" +#include "wl1271_reg.h" +#include "wl1271_spi.h" +#include "wl1271_io.h" +#include "wl1271_event.h" +#include "wl1271_ps.h" +#include "wl12xx_80211.h" + +static int wl1271_event_scan_complete(struct wl1271 *wl, + struct event_mailbox *mbox) +{ + int size = sizeof(struct wl12xx_probe_req_template); + wl1271_debug(DEBUG_EVENT, "status: 0x%x", + mbox->scheduled_scan_status); + + if (test_bit(WL1271_FLAG_SCANNING, &wl->flags)) { + if (wl->scan.state == WL1271_SCAN_BAND_DUAL) { + wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4, + NULL, size); + /* 2.4 GHz band scanned, scan 5 GHz band, pretend + * to the wl1271_cmd_scan function that we are not + * scanning as it checks that. + */ + clear_bit(WL1271_FLAG_SCANNING, &wl->flags); + wl1271_cmd_scan(wl, wl->scan.ssid, wl->scan.ssid_len, + wl->scan.active, + wl->scan.high_prio, + WL1271_SCAN_BAND_5_GHZ, + wl->scan.probe_requests); + } else { + if (wl->scan.state == WL1271_SCAN_BAND_2_4_GHZ) + wl1271_cmd_template_set(wl, + CMD_TEMPL_CFG_PROBE_REQ_2_4, + NULL, size); + else + wl1271_cmd_template_set(wl, + CMD_TEMPL_CFG_PROBE_REQ_5, + NULL, size); + + mutex_unlock(&wl->mutex); + ieee80211_scan_completed(wl->hw, false); + mutex_lock(&wl->mutex); + clear_bit(WL1271_FLAG_SCANNING, &wl->flags); + } + } + return 0; +} + +static int wl1271_event_ps_report(struct wl1271 *wl, + struct event_mailbox *mbox, + bool *beacon_loss) +{ + int ret = 0; + + wl1271_debug(DEBUG_EVENT, "ps_status: 0x%x", mbox->ps_status); + + switch (mbox->ps_status) { + case EVENT_ENTER_POWER_SAVE_FAIL: + wl1271_debug(DEBUG_PSM, "PSM entry failed"); + + if (!test_bit(WL1271_FLAG_PSM, &wl->flags)) { + /* remain in active mode */ + wl->psm_entry_retry = 0; + break; + } + + if (wl->psm_entry_retry < wl->conf.conn.psm_entry_retries) { + wl->psm_entry_retry++; + ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE, + true); + } else { + wl1271_error("PSM entry failed, giving up.\n"); + /* FIXME: this may need to be reconsidered. for now it + is not possible to indicate to the mac80211 + afterwards that PSM entry failed. To maximize + functionality (receiving data and remaining + associated) make sure that we are in sync with the + AP in regard of PSM mode. */ + ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE, + false); + wl->psm_entry_retry = 0; + } + break; + case EVENT_ENTER_POWER_SAVE_SUCCESS: + wl->psm_entry_retry = 0; + + /* enable beacon filtering */ + ret = wl1271_acx_beacon_filter_opt(wl, true); + if (ret < 0) + break; + + /* enable beacon early termination */ + ret = wl1271_acx_bet_enable(wl, true); + if (ret < 0) + break; + + /* go to extremely low power mode */ + wl1271_ps_elp_sleep(wl); + if (ret < 0) + break; + break; + case EVENT_EXIT_POWER_SAVE_FAIL: + wl1271_debug(DEBUG_PSM, "PSM exit failed"); + + if (test_bit(WL1271_FLAG_PSM, &wl->flags)) { + wl->psm_entry_retry = 0; + break; + } + + /* make sure the firmware goes to active mode - the frame to + be sent next will indicate to the AP, that we are active. */ + ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE, + false); + break; + case EVENT_EXIT_POWER_SAVE_SUCCESS: + default: + break; + } + + return ret; +} + +static void wl1271_event_mbox_dump(struct event_mailbox *mbox) +{ + wl1271_debug(DEBUG_EVENT, "MBOX DUMP:"); + wl1271_debug(DEBUG_EVENT, "\tvector: 0x%x", mbox->events_vector); + wl1271_debug(DEBUG_EVENT, "\tmask: 0x%x", mbox->events_mask); +} + +static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox) +{ + int ret; + u32 vector; + bool beacon_loss = false; + + wl1271_event_mbox_dump(mbox); + + vector = le32_to_cpu(mbox->events_vector); + vector &= ~(le32_to_cpu(mbox->events_mask)); + wl1271_debug(DEBUG_EVENT, "vector: 0x%x", vector); + + if (vector & SCAN_COMPLETE_EVENT_ID) { + ret = wl1271_event_scan_complete(wl, mbox); + if (ret < 0) + return ret; + } + + /* + * The BSS_LOSE_EVENT_ID is only needed while psm (and hence beacon + * filtering) is enabled. Without PSM, the stack will receive all + * beacons and can detect beacon loss by itself. + */ + if (vector & BSS_LOSE_EVENT_ID && + test_bit(WL1271_FLAG_PSM, &wl->flags)) { + wl1271_debug(DEBUG_EVENT, "BSS_LOSE_EVENT"); + + /* indicate to the stack, that beacons have been lost */ + beacon_loss = true; + } + + if (vector & PS_REPORT_EVENT_ID) { + wl1271_debug(DEBUG_EVENT, "PS_REPORT_EVENT"); + ret = wl1271_event_ps_report(wl, mbox, &beacon_loss); + if (ret < 0) + return ret; + } + + if (wl->vif && beacon_loss) { + /* Obviously, it's dangerous to release the mutex while + we are holding many of the variables in the wl struct. + That's why it's done last in the function, and care must + be taken that nothing more is done after this function + returns. */ + mutex_unlock(&wl->mutex); + ieee80211_beacon_loss(wl->vif); + mutex_lock(&wl->mutex); + } + + return 0; +} + +int wl1271_event_unmask(struct wl1271 *wl) +{ + int ret; + + ret = wl1271_acx_event_mbox_mask(wl, ~(wl->event_mask)); + if (ret < 0) + return ret; + + return 0; +} + +void wl1271_event_mbox_config(struct wl1271 *wl) +{ + wl->mbox_ptr[0] = wl1271_read32(wl, REG_EVENT_MAILBOX_PTR); + wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox); + + wl1271_debug(DEBUG_EVENT, "MBOX ptrs: 0x%x 0x%x", + wl->mbox_ptr[0], wl->mbox_ptr[1]); +} + +int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num) +{ + struct event_mailbox mbox; + int ret; + + wl1271_debug(DEBUG_EVENT, "EVENT on mbox %d", mbox_num); + + if (mbox_num > 1) + return -EINVAL; + + /* first we read the mbox descriptor */ + wl1271_read(wl, wl->mbox_ptr[mbox_num], &mbox, + sizeof(struct event_mailbox), false); + + /* process the descriptor */ + ret = wl1271_event_process(wl, &mbox); + if (ret < 0) + return ret; + + /* then we let the firmware know it can go on...*/ + wl1271_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_EVENT_ACK); + + return 0; +} diff --git a/drivers/net/wireless/wl12xx/wl1271_event.h b/drivers/net/wireless/wl12xx/wl1271_event.h new file mode 100644 index 0000000..278f920 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1271_event.h @@ -0,0 +1,117 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 1998-2009 Texas Instruments. All rights reserved. + * Copyright (C) 2008-2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __WL1271_EVENT_H__ +#define __WL1271_EVENT_H__ + +/* + * Mbox events + * + * The event mechanism is based on a pair of event buffers (buffers A and + * B) at fixed locations in the target's memory. The host processes one + * buffer while the other buffer continues to collect events. If the host + * is not processing events, an interrupt is issued to signal that a buffer + * is ready. Once the host is done with processing events from one buffer, + * it signals the target (with an ACK interrupt) that the event buffer is + * free. + */ + +enum { + MEASUREMENT_START_EVENT_ID = BIT(8), + MEASUREMENT_COMPLETE_EVENT_ID = BIT(9), + SCAN_COMPLETE_EVENT_ID = BIT(10), + SCHEDULED_SCAN_COMPLETE_EVENT_ID = BIT(11), + AP_DISCOVERY_COMPLETE_EVENT_ID = BIT(12), + PS_REPORT_EVENT_ID = BIT(13), + PSPOLL_DELIVERY_FAILURE_EVENT_ID = BIT(14), + DISCONNECT_EVENT_COMPLETE_ID = BIT(15), + JOIN_EVENT_COMPLETE_ID = BIT(16), + CHANNEL_SWITCH_COMPLETE_EVENT_ID = BIT(17), + BSS_LOSE_EVENT_ID = BIT(18), + REGAINED_BSS_EVENT_ID = BIT(19), + ROAMING_TRIGGER_MAX_TX_RETRY_EVENT_ID = BIT(20), + SOFT_GEMINI_SENSE_EVENT_ID = BIT(22), + SOFT_GEMINI_PREDICTION_EVENT_ID = BIT(23), + SOFT_GEMINI_AVALANCHE_EVENT_ID = BIT(24), + PLT_RX_CALIBRATION_COMPLETE_EVENT_ID = BIT(25), + DBG_EVENT_ID = BIT(26), + HEALTH_CHECK_REPLY_EVENT_ID = BIT(27), + PERIODIC_SCAN_COMPLETE_EVENT_ID = BIT(28), + PERIODIC_SCAN_REPORT_EVENT_ID = BIT(29), + BA_SESSION_TEAR_DOWN_EVENT_ID = BIT(30), + EVENT_MBOX_ALL_EVENT_ID = 0x7fffffff, +}; + +enum { + EVENT_ENTER_POWER_SAVE_FAIL = 0, + EVENT_ENTER_POWER_SAVE_SUCCESS, + EVENT_EXIT_POWER_SAVE_FAIL, + EVENT_EXIT_POWER_SAVE_SUCCESS, +}; + +struct event_debug_report { + u8 debug_event_id; + u8 num_params; + __le16 pad; + __le32 report_1; + __le32 report_2; + __le32 report_3; +} __attribute__ ((packed)); + +#define NUM_OF_RSSI_SNR_TRIGGERS 8 + +struct event_mailbox { + __le32 events_vector; + __le32 events_mask; + __le32 reserved_1; + __le32 reserved_2; + + u8 dbg_event_id; + u8 num_relevant_params; + __le16 reserved_3; + __le32 event_report_p1; + __le32 event_report_p2; + __le32 event_report_p3; + + u8 number_of_scan_results; + u8 scan_tag; + u8 reserved_4[2]; + __le32 compl_scheduled_scan_status; + + __le16 scheduled_scan_attended_channels; + u8 soft_gemini_sense_info; + u8 soft_gemini_protective_info; + s8 rssi_snr_trigger_metric[NUM_OF_RSSI_SNR_TRIGGERS]; + u8 channel_switch_status; + u8 scheduled_scan_status; + u8 ps_status; + + u8 reserved_5[29]; +} __attribute__ ((packed)); + +int wl1271_event_unmask(struct wl1271 *wl); +void wl1271_event_mbox_config(struct wl1271 *wl); +int wl1271_event_handle(struct wl1271 *wl, u8 mbox); + +#endif diff --git a/drivers/net/wireless/wl12xx/wl1271_init.c b/drivers/net/wireless/wl12xx/wl1271_init.c new file mode 100644 index 0000000..86c30a8 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1271_init.c @@ -0,0 +1,334 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include +#include + +#include "wl1271_init.h" +#include "wl12xx_80211.h" +#include "wl1271_acx.h" +#include "wl1271_cmd.h" +#include "wl1271_reg.h" + +static int wl1271_init_hwenc_config(struct wl1271 *wl) +{ + int ret; + + ret = wl1271_acx_feature_cfg(wl); + if (ret < 0) { + wl1271_warning("couldn't set feature config"); + return ret; + } + + ret = wl1271_cmd_set_default_wep_key(wl, wl->default_key); + if (ret < 0) { + wl1271_warning("couldn't set default key"); + return ret; + } + + return 0; +} + +int wl1271_init_templates_config(struct wl1271 *wl) +{ + int ret; + + /* send empty templates for fw memory reservation */ + ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4, NULL, + sizeof(struct wl12xx_probe_req_template)); + if (ret < 0) + return ret; + + if (wl1271_11a_enabled()) { + ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5, + NULL, + sizeof(struct wl12xx_probe_req_template)); + if (ret < 0) + return ret; + } + + ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL, + sizeof(struct wl12xx_null_data_template)); + if (ret < 0) + return ret; + + ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, NULL, + sizeof(struct wl12xx_ps_poll_template)); + if (ret < 0) + return ret; + + ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL, + sizeof + (struct wl12xx_qos_null_data_template)); + if (ret < 0) + return ret; + + ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PROBE_RESPONSE, NULL, + sizeof + (struct wl12xx_probe_resp_template)); + if (ret < 0) + return ret; + + ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON, NULL, + sizeof + (struct wl12xx_beacon_template)); + if (ret < 0) + return ret; + + return 0; +} + +static int wl1271_init_rx_config(struct wl1271 *wl, u32 config, u32 filter) +{ + int ret; + + ret = wl1271_acx_rx_msdu_life_time(wl); + if (ret < 0) + return ret; + + ret = wl1271_acx_rx_config(wl, config, filter); + if (ret < 0) + return ret; + + return 0; +} + +int wl1271_init_phy_config(struct wl1271 *wl) +{ + int ret; + + ret = wl1271_acx_pd_threshold(wl); + if (ret < 0) + return ret; + + ret = wl1271_acx_slot(wl, DEFAULT_SLOT_TIME); + if (ret < 0) + return ret; + + ret = wl1271_acx_group_address_tbl(wl, true, NULL, 0); + if (ret < 0) + return ret; + + ret = wl1271_acx_service_period_timeout(wl); + if (ret < 0) + return ret; + + ret = wl1271_acx_rts_threshold(wl, wl->conf.rx.rts_threshold); + if (ret < 0) + return ret; + + return 0; +} + +static int wl1271_init_beacon_filter(struct wl1271 *wl) +{ + int ret; + + /* disable beacon filtering at this stage */ + ret = wl1271_acx_beacon_filter_opt(wl, false); + if (ret < 0) + return ret; + + ret = wl1271_acx_beacon_filter_table(wl); + if (ret < 0) + return ret; + + return 0; +} + +int wl1271_init_pta(struct wl1271 *wl) +{ + int ret; + + ret = wl1271_acx_sg_enable(wl); + if (ret < 0) + return ret; + + ret = wl1271_acx_sg_cfg(wl); + if (ret < 0) + return ret; + + return 0; +} + +int wl1271_init_energy_detection(struct wl1271 *wl) +{ + int ret; + + ret = wl1271_acx_cca_threshold(wl); + if (ret < 0) + return ret; + + return 0; +} + +static int wl1271_init_beacon_broadcast(struct wl1271 *wl) +{ + int ret; + + ret = wl1271_acx_bcn_dtim_options(wl); + if (ret < 0) + return ret; + + return 0; +} + +int wl1271_hw_init(struct wl1271 *wl) +{ + struct conf_tx_ac_category *conf_ac; + struct conf_tx_tid *conf_tid; + int ret, i; + + ret = wl1271_cmd_general_parms(wl); + if (ret < 0) + return ret; + + ret = wl1271_cmd_radio_parms(wl); + if (ret < 0) + return ret; + + /* Template settings */ + ret = wl1271_init_templates_config(wl); + if (ret < 0) + return ret; + + /* Default memory configuration */ + ret = wl1271_acx_init_mem_config(wl); + if (ret < 0) + return ret; + + /* RX config */ + ret = wl1271_init_rx_config(wl, + RX_CFG_PROMISCUOUS | RX_CFG_TSF, + RX_FILTER_OPTION_DEF); + /* RX_CONFIG_OPTION_ANY_DST_ANY_BSS, + RX_FILTER_OPTION_FILTER_ALL); */ + if (ret < 0) + goto out_free_memmap; + + /* PHY layer config */ + ret = wl1271_init_phy_config(wl); + if (ret < 0) + goto out_free_memmap; + + ret = wl1271_acx_dco_itrim_params(wl); + if (ret < 0) + goto out_free_memmap; + + /* Initialize connection monitoring thresholds */ + ret = wl1271_acx_conn_monit_params(wl); + if (ret < 0) + goto out_free_memmap; + + /* Beacon filtering */ + ret = wl1271_init_beacon_filter(wl); + if (ret < 0) + goto out_free_memmap; + + /* Configure TX patch complete interrupt behavior */ + ret = wl1271_acx_tx_config_options(wl); + if (ret < 0) + goto out_free_memmap; + + /* RX complete interrupt pacing */ + ret = wl1271_acx_init_rx_interrupt(wl); + if (ret < 0) + goto out_free_memmap; + + /* Bluetooth WLAN coexistence */ + ret = wl1271_init_pta(wl); + if (ret < 0) + goto out_free_memmap; + + /* Energy detection */ + ret = wl1271_init_energy_detection(wl); + if (ret < 0) + goto out_free_memmap; + + /* Beacons and boradcast settings */ + ret = wl1271_init_beacon_broadcast(wl); + if (ret < 0) + goto out_free_memmap; + + /* Default fragmentation threshold */ + ret = wl1271_acx_frag_threshold(wl); + if (ret < 0) + goto out_free_memmap; + + /* Default TID configuration */ + for (i = 0; i < wl->conf.tx.tid_conf_count; i++) { + conf_tid = &wl->conf.tx.tid_conf[i]; + ret = wl1271_acx_tid_cfg(wl, conf_tid->queue_id, + conf_tid->channel_type, + conf_tid->tsid, + conf_tid->ps_scheme, + conf_tid->ack_policy, + conf_tid->apsd_conf[0], + conf_tid->apsd_conf[1]); + if (ret < 0) + goto out_free_memmap; + } + + /* Default AC configuration */ + for (i = 0; i < wl->conf.tx.ac_conf_count; i++) { + conf_ac = &wl->conf.tx.ac_conf[i]; + ret = wl1271_acx_ac_cfg(wl, conf_ac->ac, conf_ac->cw_min, + conf_ac->cw_max, conf_ac->aifsn, + conf_ac->tx_op_limit); + if (ret < 0) + goto out_free_memmap; + } + + /* Configure TX rate classes */ + ret = wl1271_acx_rate_policies(wl); + if (ret < 0) + goto out_free_memmap; + + /* Enable data path */ + ret = wl1271_cmd_data_path(wl, 1); + if (ret < 0) + goto out_free_memmap; + + /* Configure for ELP power saving */ + ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP); + if (ret < 0) + goto out_free_memmap; + + /* Configure HW encryption */ + ret = wl1271_init_hwenc_config(wl); + if (ret < 0) + goto out_free_memmap; + + /* configure PM */ + ret = wl1271_acx_pm_config(wl); + if (ret < 0) + goto out_free_memmap; + + return 0; + + out_free_memmap: + kfree(wl->target_mem_map); + wl->target_mem_map = NULL; + + return ret; +} diff --git a/drivers/net/wireless/wl12xx/wl1271_init.h b/drivers/net/wireless/wl12xx/wl1271_init.h new file mode 100644 index 0000000..bc26f8c --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1271_init.h @@ -0,0 +1,36 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __WL1271_INIT_H__ +#define __WL1271_INIT_H__ + +#include "wl1271.h" + +int wl1271_hw_init_power_auth(struct wl1271 *wl); +int wl1271_init_templates_config(struct wl1271 *wl); +int wl1271_init_phy_config(struct wl1271 *wl); +int wl1271_init_pta(struct wl1271 *wl); +int wl1271_init_energy_detection(struct wl1271 *wl); +int wl1271_hw_init(struct wl1271 *wl); + +#endif diff --git a/drivers/net/wireless/wl12xx/wl1271_io.c b/drivers/net/wireless/wl12xx/wl1271_io.c new file mode 100644 index 0000000..5cd94d5 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1271_io.c @@ -0,0 +1,213 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2008-2010 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include +#include +#include +#include + +#include "wl1271.h" +#include "wl12xx_80211.h" +#include "wl1271_spi.h" +#include "wl1271_io.h" + +static int wl1271_translate_addr(struct wl1271 *wl, int addr) +{ + /* + * To translate, first check to which window of addresses the + * particular address belongs. Then subtract the starting address + * of that window from the address. Then, add offset of the + * translated region. + * + * The translated regions occur next to each other in physical device + * memory, so just add the sizes of the preceeding address regions to + * get the offset to the new region. + * + * Currently, only the two first regions are addressed, and the + * assumption is that all addresses will fall into either of those + * two. + */ + if ((addr >= wl->part.reg.start) && + (addr < wl->part.reg.start + wl->part.reg.size)) + return addr - wl->part.reg.start + wl->part.mem.size; + else + return addr - wl->part.mem.start; +} + +/* Set the SPI partitions to access the chip addresses + * + * To simplify driver code, a fixed (virtual) memory map is defined for + * register and memory addresses. Because in the chipset, in different stages + * of operation, those addresses will move around, an address translation + * mechanism is required. + * + * There are four partitions (three memory and one register partition), + * which are mapped to two different areas of the hardware memory. + * + * Virtual address + * space + * + * | | + * ...+----+--> mem.start + * Physical address ... | | + * space ... | | [PART_0] + * ... | | + * 00000000 <--+----+... ...+----+--> mem.start + mem.size + * | | ... | | + * |MEM | ... | | + * | | ... | | + * mem.size <--+----+... | | {unused area) + * | | ... | | + * |REG | ... | | + * mem.size | | ... | | + * + <--+----+... ...+----+--> reg.start + * reg.size | | ... | | + * |MEM2| ... | | [PART_1] + * | | ... | | + * ...+----+--> reg.start + reg.size + * | | + * + */ +int wl1271_set_partition(struct wl1271 *wl, + struct wl1271_partition_set *p) +{ + /* copy partition info */ + memcpy(&wl->part, p, sizeof(*p)); + + wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X", + p->mem.start, p->mem.size); + wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X", + p->reg.start, p->reg.size); + wl1271_debug(DEBUG_SPI, "mem2_start %08X mem2_size %08X", + p->mem2.start, p->mem2.size); + wl1271_debug(DEBUG_SPI, "mem3_start %08X mem3_size %08X", + p->mem3.start, p->mem3.size); + + /* write partition info to the chipset */ + wl1271_raw_write32(wl, HW_PART0_START_ADDR, p->mem.start); + wl1271_raw_write32(wl, HW_PART0_SIZE_ADDR, p->mem.size); + wl1271_raw_write32(wl, HW_PART1_START_ADDR, p->reg.start); + wl1271_raw_write32(wl, HW_PART1_SIZE_ADDR, p->reg.size); + wl1271_raw_write32(wl, HW_PART2_START_ADDR, p->mem2.start); + wl1271_raw_write32(wl, HW_PART2_SIZE_ADDR, p->mem2.size); + wl1271_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start); + + return 0; +} + +void wl1271_io_reset(struct wl1271 *wl) +{ + wl1271_spi_reset(wl); +} + +void wl1271_io_init(struct wl1271 *wl) +{ + wl1271_spi_init(wl); +} + +void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf, + size_t len, bool fixed) +{ + wl1271_spi_raw_write(wl, addr, buf, len, fixed); +} + +void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf, + size_t len, bool fixed) +{ + wl1271_spi_raw_read(wl, addr, buf, len, fixed); +} + +void wl1271_read(struct wl1271 *wl, int addr, void *buf, size_t len, + bool fixed) +{ + int physical; + + physical = wl1271_translate_addr(wl, addr); + + wl1271_spi_raw_read(wl, physical, buf, len, fixed); +} + +void wl1271_write(struct wl1271 *wl, int addr, void *buf, size_t len, + bool fixed) +{ + int physical; + + physical = wl1271_translate_addr(wl, addr); + + wl1271_spi_raw_write(wl, physical, buf, len, fixed); +} + +u32 wl1271_read32(struct wl1271 *wl, int addr) +{ + return wl1271_raw_read32(wl, wl1271_translate_addr(wl, addr)); +} + +void wl1271_write32(struct wl1271 *wl, int addr, u32 val) +{ + wl1271_raw_write32(wl, wl1271_translate_addr(wl, addr), val); +} + +void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val) +{ + /* write address >> 1 + 0x30000 to OCP_POR_CTR */ + addr = (addr >> 1) + 0x30000; + wl1271_write32(wl, OCP_POR_CTR, addr); + + /* write value to OCP_POR_WDATA */ + wl1271_write32(wl, OCP_DATA_WRITE, val); + + /* write 1 to OCP_CMD */ + wl1271_write32(wl, OCP_CMD, OCP_CMD_WRITE); +} + +u16 wl1271_top_reg_read(struct wl1271 *wl, int addr) +{ + u32 val; + int timeout = OCP_CMD_LOOP; + + /* write address >> 1 + 0x30000 to OCP_POR_CTR */ + addr = (addr >> 1) + 0x30000; + wl1271_write32(wl, OCP_POR_CTR, addr); + + /* write 2 to OCP_CMD */ + wl1271_write32(wl, OCP_CMD, OCP_CMD_READ); + + /* poll for data ready */ + do { + val = wl1271_read32(wl, OCP_DATA_READ); + } while (!(val & OCP_READY_MASK) && --timeout); + + if (!timeout) { + wl1271_warning("Top register access timed out."); + return 0xffff; + } + + /* check data status and return if OK */ + if ((val & OCP_STATUS_MASK) == OCP_STATUS_OK) + return val & 0xffff; + else { + wl1271_warning("Top register access returned error."); + return 0xffff; + } +} + diff --git a/drivers/net/wireless/wl12xx/wl1271_io.h b/drivers/net/wireless/wl12xx/wl1271_io.h new file mode 100644 index 0000000..fa9a0b3 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1271_io.h @@ -0,0 +1,68 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 1998-2009 Texas Instruments. All rights reserved. + * Copyright (C) 2008-2010 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __WL1271_IO_H__ +#define __WL1271_IO_H__ + +struct wl1271; + +void wl1271_io_reset(struct wl1271 *wl); +void wl1271_io_init(struct wl1271 *wl); + +/* Raw target IO, address is not translated */ +void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf, + size_t len, bool fixed); +void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf, + size_t len, bool fixed); + +/* Translated target IO */ +void wl1271_read(struct wl1271 *wl, int addr, void *buf, size_t len, + bool fixed); +void wl1271_write(struct wl1271 *wl, int addr, void *buf, size_t len, + bool fixed); +u32 wl1271_read32(struct wl1271 *wl, int addr); +void wl1271_write32(struct wl1271 *wl, int addr, u32 val); + +/* Top Register IO */ +void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val); +u16 wl1271_top_reg_read(struct wl1271 *wl, int addr); + +int wl1271_set_partition(struct wl1271 *wl, + struct wl1271_partition_set *p); + +static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr) +{ + wl1271_raw_read(wl, addr, &wl->buffer_32, + sizeof(wl->buffer_32), false); + + return wl->buffer_32; +} + +static inline void wl1271_raw_write32(struct wl1271 *wl, int addr, u32 val) +{ + wl->buffer_32 = val; + wl1271_raw_write(wl, addr, &wl->buffer_32, + sizeof(wl->buffer_32), false); +} +#endif diff --git a/drivers/net/wireless/wl12xx/wl1271_main.c b/drivers/net/wireless/wl12xx/wl1271_main.c new file mode 100644 index 0000000..2a864b2 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1271_main.c @@ -0,0 +1,2239 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2008-2010 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "wl1271.h" +#include "wl12xx_80211.h" +#include "wl1271_reg.h" +#include "wl1271_spi.h" +#include "wl1271_io.h" +#include "wl1271_event.h" +#include "wl1271_tx.h" +#include "wl1271_rx.h" +#include "wl1271_ps.h" +#include "wl1271_init.h" +#include "wl1271_debugfs.h" +#include "wl1271_cmd.h" +#include "wl1271_boot.h" +#include "wl1271_testmode.h" + +#define WL1271_BOOT_RETRIES 3 + +static struct conf_drv_settings default_conf = { + .sg = { + .per_threshold = 7500, + .max_scan_compensation_time = 120000, + .nfs_sample_interval = 400, + .load_ratio = 50, + .auto_ps_mode = 0, + .probe_req_compensation = 170, + .scan_window_compensation = 50, + .antenna_config = 0, + .beacon_miss_threshold = 60, + .rate_adaptation_threshold = CONF_HW_BIT_RATE_12MBPS, + .rate_adaptation_snr = 0 + }, + .rx = { + .rx_msdu_life_time = 512000, + .packet_detection_threshold = 0, + .ps_poll_timeout = 15, + .upsd_timeout = 15, + .rts_threshold = 2347, + .rx_cca_threshold = 0, + .irq_blk_threshold = 0xFFFF, + .irq_pkt_threshold = 0, + .irq_timeout = 600, + .queue_type = CONF_RX_QUEUE_TYPE_LOW_PRIORITY, + }, + .tx = { + .tx_energy_detection = 0, + .rc_conf = { + .enabled_rates = CONF_HW_BIT_RATE_1MBPS | + CONF_HW_BIT_RATE_2MBPS, + .short_retry_limit = 10, + .long_retry_limit = 10, + .aflags = 0 + }, + .ac_conf_count = 4, + .ac_conf = { + [0] = { + .ac = CONF_TX_AC_BE, + .cw_min = 15, + .cw_max = 63, + .aifsn = 3, + .tx_op_limit = 0, + }, + [1] = { + .ac = CONF_TX_AC_BK, + .cw_min = 15, + .cw_max = 63, + .aifsn = 7, + .tx_op_limit = 0, + }, + [2] = { + .ac = CONF_TX_AC_VI, + .cw_min = 15, + .cw_max = 63, + .aifsn = CONF_TX_AIFS_PIFS, + .tx_op_limit = 3008, + }, + [3] = { + .ac = CONF_TX_AC_VO, + .cw_min = 15, + .cw_max = 63, + .aifsn = CONF_TX_AIFS_PIFS, + .tx_op_limit = 1504, + }, + }, + .tid_conf_count = 7, + .tid_conf = { + [0] = { + .queue_id = 0, + .channel_type = CONF_CHANNEL_TYPE_DCF, + .tsid = CONF_TX_AC_BE, + .ps_scheme = CONF_PS_SCHEME_LEGACY, + .ack_policy = CONF_ACK_POLICY_LEGACY, + .apsd_conf = {0, 0}, + }, + [1] = { + .queue_id = 1, + .channel_type = CONF_CHANNEL_TYPE_DCF, + .tsid = CONF_TX_AC_BE, + .ps_scheme = CONF_PS_SCHEME_LEGACY, + .ack_policy = CONF_ACK_POLICY_LEGACY, + .apsd_conf = {0, 0}, + }, + [2] = { + .queue_id = 2, + .channel_type = CONF_CHANNEL_TYPE_DCF, + .tsid = CONF_TX_AC_BE, + .ps_scheme = CONF_PS_SCHEME_LEGACY, + .ack_policy = CONF_ACK_POLICY_LEGACY, + .apsd_conf = {0, 0}, + }, + [3] = { + .queue_id = 3, + .channel_type = CONF_CHANNEL_TYPE_DCF, + .tsid = CONF_TX_AC_BE, + .ps_scheme = CONF_PS_SCHEME_LEGACY, + .ack_policy = CONF_ACK_POLICY_LEGACY, + .apsd_conf = {0, 0}, + }, + [4] = { + .queue_id = 4, + .channel_type = CONF_CHANNEL_TYPE_DCF, + .tsid = CONF_TX_AC_BE, + .ps_scheme = CONF_PS_SCHEME_LEGACY, + .ack_policy = CONF_ACK_POLICY_LEGACY, + .apsd_conf = {0, 0}, + }, + [5] = { + .queue_id = 5, + .channel_type = CONF_CHANNEL_TYPE_DCF, + .tsid = CONF_TX_AC_BE, + .ps_scheme = CONF_PS_SCHEME_LEGACY, + .ack_policy = CONF_ACK_POLICY_LEGACY, + .apsd_conf = {0, 0}, + }, + [6] = { + .queue_id = 6, + .channel_type = CONF_CHANNEL_TYPE_DCF, + .tsid = CONF_TX_AC_BE, + .ps_scheme = CONF_PS_SCHEME_LEGACY, + .ack_policy = CONF_ACK_POLICY_LEGACY, + .apsd_conf = {0, 0}, + } + }, + .frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD, + .tx_compl_timeout = 700, + .tx_compl_threshold = 4 + }, + .conn = { + .wake_up_event = CONF_WAKE_UP_EVENT_DTIM, + .listen_interval = 0, + .bcn_filt_mode = CONF_BCN_FILT_MODE_ENABLED, + .bcn_filt_ie_count = 1, + .bcn_filt_ie = { + [0] = { + .ie = WLAN_EID_CHANNEL_SWITCH, + .rule = CONF_BCN_RULE_PASS_ON_APPEARANCE, + } + }, + .synch_fail_thold = 10, + .bss_lose_timeout = 100, + .beacon_rx_timeout = 10000, + .broadcast_timeout = 20000, + .rx_broadcast_in_ps = 1, + .ps_poll_threshold = 20, + .sig_trigger_count = 2, + .sig_trigger = { + [0] = { + .threshold = -75, + .pacing = 500, + .metric = CONF_TRIG_METRIC_RSSI_BEACON, + .type = CONF_TRIG_EVENT_TYPE_EDGE, + .direction = CONF_TRIG_EVENT_DIR_LOW, + .hysteresis = 2, + .index = 0, + .enable = 1 + }, + [1] = { + .threshold = -75, + .pacing = 500, + .metric = CONF_TRIG_METRIC_RSSI_BEACON, + .type = CONF_TRIG_EVENT_TYPE_EDGE, + .direction = CONF_TRIG_EVENT_DIR_HIGH, + .hysteresis = 2, + .index = 1, + .enable = 1 + } + }, + .sig_weights = { + .rssi_bcn_avg_weight = 10, + .rssi_pkt_avg_weight = 10, + .snr_bcn_avg_weight = 10, + .snr_pkt_avg_weight = 10 + }, + .bet_enable = CONF_BET_MODE_ENABLE, + .bet_max_consecutive = 10, + .psm_entry_retries = 3 + }, + .init = { + .radioparam = { + .fem = 1, + } + }, + .itrim = { + .enable = false, + .timeout = 50000, + }, + .pm_config = { + .host_clk_settling_time = 5000, + .host_fast_wakeup_support = false + } +}; + +static LIST_HEAD(wl_list); + +static void wl1271_conf_init(struct wl1271 *wl) +{ + + /* + * This function applies the default configuration to the driver. This + * function is invoked upon driver load (spi probe.) + * + * The configuration is stored in a run-time structure in order to + * facilitate for run-time adjustment of any of the parameters. Making + * changes to the configuration structure will apply the new values on + * the next interface up (wl1271_op_start.) + */ + + /* apply driver default configuration */ + memcpy(&wl->conf, &default_conf, sizeof(default_conf)); +} + + +static int wl1271_plt_init(struct wl1271 *wl) +{ + struct conf_tx_ac_category *conf_ac; + struct conf_tx_tid *conf_tid; + int ret, i; + + ret = wl1271_cmd_general_parms(wl); + if (ret < 0) + return ret; + + ret = wl1271_cmd_radio_parms(wl); + if (ret < 0) + return ret; + + ret = wl1271_init_templates_config(wl); + if (ret < 0) + return ret; + + ret = wl1271_acx_init_mem_config(wl); + if (ret < 0) + return ret; + + /* PHY layer config */ + ret = wl1271_init_phy_config(wl); + if (ret < 0) + goto out_free_memmap; + + ret = wl1271_acx_dco_itrim_params(wl); + if (ret < 0) + goto out_free_memmap; + + /* Initialize connection monitoring thresholds */ + ret = wl1271_acx_conn_monit_params(wl); + if (ret < 0) + goto out_free_memmap; + + /* Bluetooth WLAN coexistence */ + ret = wl1271_init_pta(wl); + if (ret < 0) + goto out_free_memmap; + + /* Energy detection */ + ret = wl1271_init_energy_detection(wl); + if (ret < 0) + goto out_free_memmap; + + /* Default fragmentation threshold */ + ret = wl1271_acx_frag_threshold(wl); + if (ret < 0) + goto out_free_memmap; + + /* Default TID configuration */ + for (i = 0; i < wl->conf.tx.tid_conf_count; i++) { + conf_tid = &wl->conf.tx.tid_conf[i]; + ret = wl1271_acx_tid_cfg(wl, conf_tid->queue_id, + conf_tid->channel_type, + conf_tid->tsid, + conf_tid->ps_scheme, + conf_tid->ack_policy, + conf_tid->apsd_conf[0], + conf_tid->apsd_conf[1]); + if (ret < 0) + goto out_free_memmap; + } + + /* Default AC configuration */ + for (i = 0; i < wl->conf.tx.ac_conf_count; i++) { + conf_ac = &wl->conf.tx.ac_conf[i]; + ret = wl1271_acx_ac_cfg(wl, conf_ac->ac, conf_ac->cw_min, + conf_ac->cw_max, conf_ac->aifsn, + conf_ac->tx_op_limit); + if (ret < 0) + goto out_free_memmap; + } + + /* Enable data path */ + ret = wl1271_cmd_data_path(wl, 1); + if (ret < 0) + goto out_free_memmap; + + /* Configure for CAM power saving (ie. always active) */ + ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM); + if (ret < 0) + goto out_free_memmap; + + /* configure PM */ + ret = wl1271_acx_pm_config(wl); + if (ret < 0) + goto out_free_memmap; + + return 0; + + out_free_memmap: + kfree(wl->target_mem_map); + wl->target_mem_map = NULL; + + return ret; +} + +static void wl1271_disable_interrupts(struct wl1271 *wl) +{ + disable_irq(wl->irq); +} + +static void wl1271_power_off(struct wl1271 *wl) +{ + wl->set_power(false); + clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags); +} + +static void wl1271_power_on(struct wl1271 *wl) +{ + wl->set_power(true); + set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags); +} + +static void wl1271_fw_status(struct wl1271 *wl, + struct wl1271_fw_status *status) +{ + u32 total = 0; + int i; + + wl1271_read(wl, FW_STATUS_ADDR, status, sizeof(*status), false); + + wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, " + "drv_rx_counter = %d, tx_results_counter = %d)", + status->intr, + status->fw_rx_counter, + status->drv_rx_counter, + status->tx_results_counter); + + /* update number of available TX blocks */ + for (i = 0; i < NUM_TX_QUEUES; i++) { + u32 cnt = le32_to_cpu(status->tx_released_blks[i]) - + wl->tx_blocks_freed[i]; + + wl->tx_blocks_freed[i] = + le32_to_cpu(status->tx_released_blks[i]); + wl->tx_blocks_available += cnt; + total += cnt; + } + + /* if more blocks are available now, schedule some tx work */ + if (total && !skb_queue_empty(&wl->tx_queue)) + ieee80211_queue_work(wl->hw, &wl->tx_work); + + /* update the host-chipset time offset */ + wl->time_offset = jiffies_to_usecs(jiffies) - + le32_to_cpu(status->fw_localtime); +} + +static void wl1271_irq_work(struct work_struct *work) +{ + int ret; + u32 intr; + struct wl1271 *wl = + container_of(work, struct wl1271, irq_work); + + mutex_lock(&wl->mutex); + + wl1271_debug(DEBUG_IRQ, "IRQ work"); + + if (wl->state == WL1271_STATE_OFF) + goto out; + + ret = wl1271_ps_elp_wakeup(wl, true); + if (ret < 0) + goto out; + + wl1271_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL); + + wl1271_fw_status(wl, wl->fw_status); + intr = le32_to_cpu(wl->fw_status->intr); + if (!intr) { + wl1271_debug(DEBUG_IRQ, "Zero interrupt received."); + goto out_sleep; + } + + intr &= WL1271_INTR_MASK; + + if (intr & WL1271_ACX_INTR_EVENT_A) { + wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A"); + wl1271_event_handle(wl, 0); + } + + if (intr & WL1271_ACX_INTR_EVENT_B) { + wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B"); + wl1271_event_handle(wl, 1); + } + + if (intr & WL1271_ACX_INTR_INIT_COMPLETE) + wl1271_debug(DEBUG_IRQ, + "WL1271_ACX_INTR_INIT_COMPLETE"); + + if (intr & WL1271_ACX_INTR_HW_AVAILABLE) + wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE"); + + if (intr & WL1271_ACX_INTR_DATA) { + u8 tx_res_cnt = wl->fw_status->tx_results_counter - + wl->tx_results_count; + + wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA"); + + /* check for tx results */ + if (tx_res_cnt) + wl1271_tx_complete(wl, tx_res_cnt); + + wl1271_rx(wl, wl->fw_status); + } + +out_sleep: + wl1271_write32(wl, ACX_REG_INTERRUPT_MASK, + WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK)); + wl1271_ps_elp_sleep(wl); + +out: + mutex_unlock(&wl->mutex); +} + +static irqreturn_t wl1271_irq(int irq, void *cookie) +{ + struct wl1271 *wl; + unsigned long flags; + + wl1271_debug(DEBUG_IRQ, "IRQ"); + + wl = cookie; + + /* complete the ELP completion */ + spin_lock_irqsave(&wl->wl_lock, flags); + if (wl->elp_compl) { + complete(wl->elp_compl); + wl->elp_compl = NULL; + } + + ieee80211_queue_work(wl->hw, &wl->irq_work); + spin_unlock_irqrestore(&wl->wl_lock, flags); + + return IRQ_HANDLED; +} + +static int wl1271_fetch_firmware(struct wl1271 *wl) +{ + const struct firmware *fw; + int ret; + + ret = request_firmware(&fw, WL1271_FW_NAME, &wl->spi->dev); + + if (ret < 0) { + wl1271_error("could not get firmware: %d", ret); + return ret; + } + + if (fw->size % 4) { + wl1271_error("firmware size is not multiple of 32 bits: %zu", + fw->size); + ret = -EILSEQ; + goto out; + } + + wl->fw_len = fw->size; + wl->fw = vmalloc(wl->fw_len); + + if (!wl->fw) { + wl1271_error("could not allocate memory for the firmware"); + ret = -ENOMEM; + goto out; + } + + memcpy(wl->fw, fw->data, wl->fw_len); + + ret = 0; + +out: + release_firmware(fw); + + return ret; +} + +static int wl1271_update_mac_addr(struct wl1271 *wl) +{ + int ret = 0; + u8 *nvs_ptr = (u8 *)wl->nvs->nvs; + + /* get mac address from the NVS */ + wl->mac_addr[0] = nvs_ptr[11]; + wl->mac_addr[1] = nvs_ptr[10]; + wl->mac_addr[2] = nvs_ptr[6]; + wl->mac_addr[3] = nvs_ptr[5]; + wl->mac_addr[4] = nvs_ptr[4]; + wl->mac_addr[5] = nvs_ptr[3]; + + /* FIXME: if it is a zero-address, we should bail out. Now, instead, + we randomize an address */ + if (is_zero_ether_addr(wl->mac_addr)) { + static const u8 nokia_oui[3] = {0x00, 0x1f, 0xdf}; + memcpy(wl->mac_addr, nokia_oui, 3); + get_random_bytes(wl->mac_addr + 3, 3); + + /* update this address to the NVS */ + nvs_ptr[11] = wl->mac_addr[0]; + nvs_ptr[10] = wl->mac_addr[1]; + nvs_ptr[6] = wl->mac_addr[2]; + nvs_ptr[5] = wl->mac_addr[3]; + nvs_ptr[4] = wl->mac_addr[4]; + nvs_ptr[3] = wl->mac_addr[5]; + } + + SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr); + + return ret; +} + +static int wl1271_fetch_nvs(struct wl1271 *wl) +{ + const struct firmware *fw; + int ret; + + ret = request_firmware(&fw, WL1271_NVS_NAME, &wl->spi->dev); + + if (ret < 0) { + wl1271_error("could not get nvs file: %d", ret); + return ret; + } + + if (fw->size != sizeof(struct wl1271_nvs_file)) { + wl1271_error("nvs size is not as expected: %zu != %zu", + fw->size, sizeof(struct wl1271_nvs_file)); + ret = -EILSEQ; + goto out; + } + + wl->nvs = kmalloc(sizeof(struct wl1271_nvs_file), GFP_KERNEL); + + if (!wl->nvs) { + wl1271_error("could not allocate memory for the nvs file"); + ret = -ENOMEM; + goto out; + } + + memcpy(wl->nvs, fw->data, sizeof(struct wl1271_nvs_file)); + + ret = wl1271_update_mac_addr(wl); + +out: + release_firmware(fw); + + return ret; +} + +static void wl1271_fw_wakeup(struct wl1271 *wl) +{ + u32 elp_reg; + + elp_reg = ELPCTRL_WAKE_UP; + wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg); +} + +static int wl1271_setup(struct wl1271 *wl) +{ + wl->fw_status = kmalloc(sizeof(*wl->fw_status), GFP_KERNEL); + if (!wl->fw_status) + return -ENOMEM; + + wl->tx_res_if = kmalloc(sizeof(*wl->tx_res_if), GFP_KERNEL); + if (!wl->tx_res_if) { + kfree(wl->fw_status); + return -ENOMEM; + } + + INIT_WORK(&wl->irq_work, wl1271_irq_work); + INIT_WORK(&wl->tx_work, wl1271_tx_work); + return 0; +} + +static int wl1271_chip_wakeup(struct wl1271 *wl) +{ + struct wl1271_partition_set partition; + int ret = 0; + + msleep(WL1271_PRE_POWER_ON_SLEEP); + wl1271_power_on(wl); + msleep(WL1271_POWER_ON_SLEEP); + wl1271_io_reset(wl); + wl1271_io_init(wl); + + /* We don't need a real memory partition here, because we only want + * to use the registers at this point. */ + memset(&partition, 0, sizeof(partition)); + partition.reg.start = REGISTERS_BASE; + partition.reg.size = REGISTERS_DOWN_SIZE; + wl1271_set_partition(wl, &partition); + + /* ELP module wake up */ + wl1271_fw_wakeup(wl); + + /* whal_FwCtrl_BootSm() */ + + /* 0. read chip id from CHIP_ID */ + wl->chip.id = wl1271_read32(wl, CHIP_ID_B); + + /* 1. check if chip id is valid */ + + switch (wl->chip.id) { + case CHIP_ID_1271_PG10: + wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete", + wl->chip.id); + + ret = wl1271_setup(wl); + if (ret < 0) + goto out; + break; + case CHIP_ID_1271_PG20: + wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)", + wl->chip.id); + + ret = wl1271_setup(wl); + if (ret < 0) + goto out; + break; + default: + wl1271_warning("unsupported chip id: 0x%x", wl->chip.id); + ret = -ENODEV; + goto out; + } + + if (wl->fw == NULL) { + ret = wl1271_fetch_firmware(wl); + if (ret < 0) + goto out; + } + + /* No NVS from netlink, try to get it from the filesystem */ + if (wl->nvs == NULL) { + ret = wl1271_fetch_nvs(wl); + if (ret < 0) + goto out; + } + +out: + return ret; +} + +int wl1271_plt_start(struct wl1271 *wl) +{ + int retries = WL1271_BOOT_RETRIES; + int ret; + + mutex_lock(&wl->mutex); + + wl1271_notice("power up"); + + if (wl->state != WL1271_STATE_OFF) { + wl1271_error("cannot go into PLT state because not " + "in off state: %d", wl->state); + ret = -EBUSY; + goto out; + } + + while (retries) { + retries--; + ret = wl1271_chip_wakeup(wl); + if (ret < 0) + goto power_off; + + ret = wl1271_boot(wl); + if (ret < 0) + goto power_off; + + ret = wl1271_plt_init(wl); + if (ret < 0) + goto irq_disable; + + wl->state = WL1271_STATE_PLT; + wl1271_notice("firmware booted in PLT mode (%s)", + wl->chip.fw_ver); + goto out; + +irq_disable: + wl1271_disable_interrupts(wl); + mutex_unlock(&wl->mutex); + /* Unlocking the mutex in the middle of handling is + inherently unsafe. In this case we deem it safe to do, + because we need to let any possibly pending IRQ out of + the system (and while we are WL1271_STATE_OFF the IRQ + work function will not do anything.) Also, any other + possible concurrent operations will fail due to the + current state, hence the wl1271 struct should be safe. */ + cancel_work_sync(&wl->irq_work); + mutex_lock(&wl->mutex); +power_off: + wl1271_power_off(wl); + } + + wl1271_error("firmware boot in PLT mode failed despite %d retries", + WL1271_BOOT_RETRIES); +out: + mutex_unlock(&wl->mutex); + + return ret; +} + +int wl1271_plt_stop(struct wl1271 *wl) +{ + int ret = 0; + + mutex_lock(&wl->mutex); + + wl1271_notice("power down"); + + if (wl->state != WL1271_STATE_PLT) { + wl1271_error("cannot power down because not in PLT " + "state: %d", wl->state); + ret = -EBUSY; + goto out; + } + + wl1271_disable_interrupts(wl); + wl1271_power_off(wl); + + wl->state = WL1271_STATE_OFF; + wl->rx_counter = 0; + +out: + mutex_unlock(&wl->mutex); + + return ret; +} + + +static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct wl1271 *wl = hw->priv; + struct ieee80211_conf *conf = &hw->conf; + struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb); + struct ieee80211_sta *sta = txinfo->control.sta; + unsigned long flags; + + /* peek into the rates configured in the STA entry */ + spin_lock_irqsave(&wl->wl_lock, flags); + if (sta && sta->supp_rates[conf->channel->band] != wl->sta_rate_set) { + wl->sta_rate_set = sta->supp_rates[conf->channel->band]; + set_bit(WL1271_FLAG_STA_RATES_CHANGED, &wl->flags); + } + spin_unlock_irqrestore(&wl->wl_lock, flags); + + /* queue the packet */ + skb_queue_tail(&wl->tx_queue, skb); + + /* + * The chip specific setup must run before the first TX packet - + * before that, the tx_work will not be initialized! + */ + + ieee80211_queue_work(wl->hw, &wl->tx_work); + + /* + * The workqueue is slow to process the tx_queue and we need stop + * the queue here, otherwise the queue will get too long. + */ + if (skb_queue_len(&wl->tx_queue) >= WL1271_TX_QUEUE_MAX_LENGTH) { + ieee80211_stop_queues(wl->hw); + + /* + * FIXME: this is racy, the variable is not properly + * protected. Maybe fix this by removing the stupid + * variable altogether and checking the real queue state? + */ + set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags); + } + + return NETDEV_TX_OK; +} + +static int wl1271_dev_notify(struct notifier_block *me, unsigned long what, + void *arg) +{ + struct net_device *dev; + struct wireless_dev *wdev; + struct wiphy *wiphy; + struct ieee80211_hw *hw; + struct wl1271 *wl; + struct wl1271 *wl_temp; + struct in_device *idev; + struct in_ifaddr *ifa = arg; + int ret = 0; + + /* FIXME: this ugly function should probably be implemented in the + * mac80211, and here should only be a simple callback handling actual + * setting of the filters. Now we need to dig up references to + * various structures to gain access to what we need. + * Also, because of this, there is no "initial" setting of the filter + * in "op_start", because we don't want to dig up struct net_device + * there - the filter will be set upon first change of the interface + * IP address. */ + + dev = ifa->ifa_dev->dev; + + wdev = dev->ieee80211_ptr; + if (wdev == NULL) + return NOTIFY_DONE; + + wiphy = wdev->wiphy; + if (wiphy == NULL) + return NOTIFY_DONE; + + hw = wiphy_priv(wiphy); + if (hw == NULL) + return NOTIFY_DONE; + + /* Check that the interface is one supported by this driver. */ + wl_temp = hw->priv; + list_for_each_entry(wl, &wl_list, list) { + if (wl == wl_temp) + break; + } + if (wl == NULL) + return NOTIFY_DONE; + + /* Get the interface IP address for the device. "ifa" will become + NULL if: + - there is no IPV4 protocol address configured + - there are multiple (virtual) IPV4 addresses configured + When "ifa" is NULL, filtering will be disabled. + */ + ifa = NULL; + idev = dev->ip_ptr; + if (idev) + ifa = idev->ifa_list; + + if (ifa && ifa->ifa_next) + ifa = NULL; + + mutex_lock(&wl->mutex); + + if (wl->state == WL1271_STATE_OFF) + goto out; + + ret = wl1271_ps_elp_wakeup(wl, false); + if (ret < 0) + goto out; + if (ifa) + ret = wl1271_acx_arp_ip_filter(wl, true, + (u8 *)&ifa->ifa_address, + ACX_IPV4_VERSION); + else + ret = wl1271_acx_arp_ip_filter(wl, false, NULL, + ACX_IPV4_VERSION); + wl1271_ps_elp_sleep(wl); + +out: + mutex_unlock(&wl->mutex); + + return NOTIFY_OK; +} + +static struct notifier_block wl1271_dev_notifier = { + .notifier_call = wl1271_dev_notify, +}; + + +static int wl1271_op_start(struct ieee80211_hw *hw) +{ + struct wl1271 *wl = hw->priv; + int retries = WL1271_BOOT_RETRIES; + int ret = 0; + + wl1271_debug(DEBUG_MAC80211, "mac80211 start"); + + mutex_lock(&wl->mutex); + + if (wl->state != WL1271_STATE_OFF) { + wl1271_error("cannot start because not in off state: %d", + wl->state); + ret = -EBUSY; + goto out; + } + + while (retries) { + retries--; + ret = wl1271_chip_wakeup(wl); + if (ret < 0) + goto power_off; + + ret = wl1271_boot(wl); + if (ret < 0) + goto power_off; + + ret = wl1271_hw_init(wl); + if (ret < 0) + goto irq_disable; + + wl->state = WL1271_STATE_ON; + wl1271_info("firmware booted (%s)", wl->chip.fw_ver); + goto out; + +irq_disable: + wl1271_disable_interrupts(wl); + mutex_unlock(&wl->mutex); + /* Unlocking the mutex in the middle of handling is + inherently unsafe. In this case we deem it safe to do, + because we need to let any possibly pending IRQ out of + the system (and while we are WL1271_STATE_OFF the IRQ + work function will not do anything.) Also, any other + possible concurrent operations will fail due to the + current state, hence the wl1271 struct should be safe. */ + cancel_work_sync(&wl->irq_work); + mutex_lock(&wl->mutex); +power_off: + wl1271_power_off(wl); + } + + wl1271_error("firmware boot failed despite %d retries", + WL1271_BOOT_RETRIES); +out: + mutex_unlock(&wl->mutex); + + if (!ret) { + list_add(&wl->list, &wl_list); + register_inetaddr_notifier(&wl1271_dev_notifier); + } + + return ret; +} + +static void wl1271_op_stop(struct ieee80211_hw *hw) +{ + struct wl1271 *wl = hw->priv; + int i; + + wl1271_info("down"); + + wl1271_debug(DEBUG_MAC80211, "mac80211 stop"); + + unregister_inetaddr_notifier(&wl1271_dev_notifier); + list_del(&wl->list); + + mutex_lock(&wl->mutex); + + WARN_ON(wl->state != WL1271_STATE_ON); + + if (test_and_clear_bit(WL1271_FLAG_SCANNING, &wl->flags)) { + mutex_unlock(&wl->mutex); + ieee80211_scan_completed(wl->hw, true); + mutex_lock(&wl->mutex); + } + + wl->state = WL1271_STATE_OFF; + + wl1271_disable_interrupts(wl); + + mutex_unlock(&wl->mutex); + + cancel_work_sync(&wl->irq_work); + cancel_work_sync(&wl->tx_work); + + mutex_lock(&wl->mutex); + + /* let's notify MAC80211 about the remaining pending TX frames */ + wl1271_tx_flush(wl); + wl1271_power_off(wl); + + memset(wl->bssid, 0, ETH_ALEN); + memset(wl->ssid, 0, IW_ESSID_MAX_SIZE + 1); + wl->ssid_len = 0; + wl->bss_type = MAX_BSS_TYPE; + wl->band = IEEE80211_BAND_2GHZ; + + wl->rx_counter = 0; + wl->psm_entry_retry = 0; + wl->power_level = WL1271_DEFAULT_POWER_LEVEL; + wl->tx_blocks_available = 0; + wl->tx_results_count = 0; + wl->tx_packets_count = 0; + wl->tx_security_last_seq = 0; + wl->tx_security_seq_16 = 0; + wl->tx_security_seq_32 = 0; + wl->time_offset = 0; + wl->session_counter = 0; + wl->rate_set = CONF_TX_RATE_MASK_BASIC; + wl->sta_rate_set = 0; + wl->flags = 0; + + for (i = 0; i < NUM_TX_QUEUES; i++) + wl->tx_blocks_freed[i] = 0; + + wl1271_debugfs_reset(wl); + mutex_unlock(&wl->mutex); +} + +static int wl1271_op_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct wl1271 *wl = hw->priv; + int ret = 0; + + wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM", + vif->type, vif->addr); + + mutex_lock(&wl->mutex); + if (wl->vif) { + ret = -EBUSY; + goto out; + } + + wl->vif = vif; + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + wl->bss_type = BSS_TYPE_STA_BSS; + break; + case NL80211_IFTYPE_ADHOC: + wl->bss_type = BSS_TYPE_IBSS; + break; + default: + ret = -EOPNOTSUPP; + goto out; + } + + /* FIXME: what if conf->mac_addr changes? */ + +out: + mutex_unlock(&wl->mutex); + return ret; +} + +static void wl1271_op_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct wl1271 *wl = hw->priv; + + mutex_lock(&wl->mutex); + wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface"); + wl->vif = NULL; + mutex_unlock(&wl->mutex); +} + +#if 0 +static int wl1271_op_config_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_if_conf *conf) +{ + struct wl1271 *wl = hw->priv; + struct sk_buff *beacon; + int ret; + + wl1271_debug(DEBUG_MAC80211, "mac80211 config_interface bssid %pM", + conf->bssid); + wl1271_dump_ascii(DEBUG_MAC80211, "ssid: ", conf->ssid, + conf->ssid_len); + + mutex_lock(&wl->mutex); + + ret = wl1271_ps_elp_wakeup(wl, false); + if (ret < 0) + goto out; + + if (memcmp(wl->bssid, conf->bssid, ETH_ALEN)) { + wl1271_debug(DEBUG_MAC80211, "bssid changed"); + + memcpy(wl->bssid, conf->bssid, ETH_ALEN); + + ret = wl1271_cmd_join(wl); + if (ret < 0) + goto out_sleep; + + ret = wl1271_cmd_build_null_data(wl); + if (ret < 0) + goto out_sleep; + } + + wl->ssid_len = conf->ssid_len; + if (wl->ssid_len) + memcpy(wl->ssid, conf->ssid, wl->ssid_len); + + if (conf->changed & IEEE80211_IFCC_BEACON) { + beacon = ieee80211_beacon_get(hw, vif); + ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON, + beacon->data, beacon->len); + + if (ret < 0) { + dev_kfree_skb(beacon); + goto out_sleep; + } + + ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PROBE_RESPONSE, + beacon->data, beacon->len); + + dev_kfree_skb(beacon); + + if (ret < 0) + goto out_sleep; + } + +out_sleep: + wl1271_ps_elp_sleep(wl); + +out: + mutex_unlock(&wl->mutex); + + return ret; +} +#endif + +static int wl1271_join_channel(struct wl1271 *wl, int channel) +{ + int ret = 0; + /* we need to use a dummy BSSID for now */ + static const u8 dummy_bssid[ETH_ALEN] = { 0x0b, 0xad, 0xde, + 0xad, 0xbe, 0xef }; + + /* the dummy join is not required for ad-hoc */ + if (wl->bss_type == BSS_TYPE_IBSS) + goto out; + + /* disable mac filter, so we hear everything */ + wl->rx_config &= ~CFG_BSSID_FILTER_EN; + + wl->channel = channel; + memcpy(wl->bssid, dummy_bssid, ETH_ALEN); + + ret = wl1271_cmd_join(wl); + if (ret < 0) + goto out; + + set_bit(WL1271_FLAG_JOINED, &wl->flags); + +out: + return ret; +} + +static int wl1271_unjoin_channel(struct wl1271 *wl) +{ + int ret; + + /* to stop listening to a channel, we disconnect */ + ret = wl1271_cmd_disconnect(wl); + if (ret < 0) + goto out; + + clear_bit(WL1271_FLAG_JOINED, &wl->flags); + wl->channel = 0; + memset(wl->bssid, 0, ETH_ALEN); + wl->rx_config = WL1271_DEFAULT_RX_CONFIG; + +out: + return ret; +} + +static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed) +{ + struct wl1271 *wl = hw->priv; + struct ieee80211_conf *conf = &hw->conf; + int channel, ret = 0; + + channel = ieee80211_frequency_to_channel(conf->channel->center_freq); + + wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s", + channel, + conf->flags & IEEE80211_CONF_PS ? "on" : "off", + conf->power_level, + conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use"); + + mutex_lock(&wl->mutex); + + wl->band = conf->channel->band; + + ret = wl1271_ps_elp_wakeup(wl, false); + if (ret < 0) + goto out; + + if (changed & IEEE80211_CONF_CHANGE_IDLE) { + if (conf->flags & IEEE80211_CONF_IDLE && + test_bit(WL1271_FLAG_JOINED, &wl->flags)) + wl1271_unjoin_channel(wl); + else if (!(conf->flags & IEEE80211_CONF_IDLE)) + wl1271_join_channel(wl, channel); + + if (conf->flags & IEEE80211_CONF_IDLE) { + wl->rate_set = CONF_TX_RATE_MASK_BASIC; + wl->sta_rate_set = 0; + wl1271_acx_rate_policies(wl); + } + } + + /* if the channel changes while joined, join again */ + if (channel != wl->channel && + test_bit(WL1271_FLAG_JOINED, &wl->flags)) { + wl->channel = channel; + /* FIXME: maybe use CMD_CHANNEL_SWITCH for this? */ + ret = wl1271_cmd_join(wl); + if (ret < 0) + wl1271_warning("cmd join to update channel failed %d", + ret); + } else + wl->channel = channel; + + if (conf->flags & IEEE80211_CONF_PS && + !test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) { + set_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags); + + /* + * We enter PSM only if we're already associated. + * If we're not, we'll enter it when joining an SSID, + * through the bss_info_changed() hook. + */ + if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) { + wl1271_info("psm enabled"); + ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE, + true); + } + } else if (!(conf->flags & IEEE80211_CONF_PS) && + test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) { + wl1271_info("psm disabled"); + + clear_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags); + + if (test_bit(WL1271_FLAG_PSM, &wl->flags)) + ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE, + true); + } + + if (conf->power_level != wl->power_level) { + ret = wl1271_acx_tx_power(wl, conf->power_level); + if (ret < 0) + goto out_sleep; + + wl->power_level = conf->power_level; + } + +out_sleep: + wl1271_ps_elp_sleep(wl); + +out: + mutex_unlock(&wl->mutex); + + return ret; +} + +struct wl1271_filter_params { + bool enabled; + int mc_list_length; + u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN]; +}; + +static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw, int mc_count, + struct dev_addr_list *mc_list) +{ + struct wl1271_filter_params *fp; + int i; + + fp = kzalloc(sizeof(*fp), GFP_ATOMIC); + if (!fp) { + wl1271_error("Out of memory setting filters."); + return 0; + } + + /* update multicast filtering parameters */ + fp->enabled = true; + if (mc_count > ACX_MC_ADDRESS_GROUP_MAX) { + mc_count = 0; + fp->enabled = false; + } + + fp->mc_list_length = 0; + for (i = 0; i < mc_count; i++) { + if (mc_list->da_addrlen == ETH_ALEN) { + memcpy(fp->mc_list[fp->mc_list_length], + mc_list->da_addr, ETH_ALEN); + fp->mc_list_length++; + } else + wl1271_warning("Unknown mc address length."); + mc_list = mc_list->next; + } + + return (u64)(unsigned long)fp; +} + +#define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \ + FIF_ALLMULTI | \ + FIF_FCSFAIL | \ + FIF_BCN_PRBRESP_PROMISC | \ + FIF_CONTROL | \ + FIF_OTHER_BSS) + +static void wl1271_op_configure_filter(struct ieee80211_hw *hw, + unsigned int changed, + unsigned int *total, u64 multicast) +{ + struct wl1271_filter_params *fp = (void *)(unsigned long)multicast; + struct wl1271 *wl = hw->priv; + int ret; + + wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter"); + + mutex_lock(&wl->mutex); + + if (wl->state == WL1271_STATE_OFF) + goto out; + + ret = wl1271_ps_elp_wakeup(wl, false); + if (ret < 0) + goto out; + + *total &= WL1271_SUPPORTED_FILTERS; + changed &= WL1271_SUPPORTED_FILTERS; + + if (*total & FIF_ALLMULTI) + ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0); + else if (fp) + ret = wl1271_acx_group_address_tbl(wl, fp->enabled, + fp->mc_list, + fp->mc_list_length); + if (ret < 0) + goto out_sleep; + + kfree(fp); + + /* FIXME: We still need to set our filters properly */ + + /* determine, whether supported filter values have changed */ + if (changed == 0) + goto out_sleep; + + /* apply configured filters */ + ret = wl1271_acx_rx_config(wl, wl->rx_config, wl->rx_filter); + if (ret < 0) + goto out_sleep; + +out_sleep: + wl1271_ps_elp_sleep(wl); + +out: + mutex_unlock(&wl->mutex); +} + +static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key_conf) +{ + struct wl1271 *wl = hw->priv; + const u8 *addr; + int ret; + u32 tx_seq_32 = 0; + u16 tx_seq_16 = 0; + u8 key_type; + + static const u8 bcast_addr[ETH_ALEN] = + { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + + wl1271_debug(DEBUG_MAC80211, "mac80211 set key"); + + addr = sta ? sta->addr : bcast_addr; + + wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x", cmd); + wl1271_dump(DEBUG_CRYPT, "ADDR: ", addr, ETH_ALEN); + wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x", + key_conf->alg, key_conf->keyidx, + key_conf->keylen, key_conf->flags); + wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen); + + if (is_zero_ether_addr(addr)) { + /* We dont support TX only encryption */ + ret = -EOPNOTSUPP; + goto out; + } + + mutex_lock(&wl->mutex); + + ret = wl1271_ps_elp_wakeup(wl, false); + if (ret < 0) + goto out_unlock; + + switch (key_conf->alg) { + case ALG_WEP: + key_type = KEY_WEP; + + key_conf->hw_key_idx = key_conf->keyidx; + break; + case ALG_TKIP: + key_type = KEY_TKIP; + + key_conf->hw_key_idx = key_conf->keyidx; + tx_seq_32 = wl->tx_security_seq_32; + tx_seq_16 = wl->tx_security_seq_16; + break; + case ALG_CCMP: + key_type = KEY_AES; + + key_conf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + tx_seq_32 = wl->tx_security_seq_32; + tx_seq_16 = wl->tx_security_seq_16; + break; + default: + wl1271_error("Unknown key algo 0x%x", key_conf->alg); + + ret = -EOPNOTSUPP; + goto out_sleep; + } + + switch (cmd) { + case SET_KEY: + ret = wl1271_cmd_set_key(wl, KEY_ADD_OR_REPLACE, + key_conf->keyidx, key_type, + key_conf->keylen, key_conf->key, + addr, tx_seq_32, tx_seq_16); + if (ret < 0) { + wl1271_error("Could not add or replace key"); + goto out_sleep; + } + + /* the default WEP key needs to be configured at least once */ + if (key_type == KEY_WEP) { + ret = wl1271_cmd_set_default_wep_key(wl, + wl->default_key); + if (ret < 0) + goto out_sleep; + } + break; + + case DISABLE_KEY: + /* The wl1271 does not allow to remove unicast keys - they + will be cleared automatically on next CMD_JOIN. Ignore the + request silently, as we dont want the mac80211 to emit + an error message. */ + if (!is_broadcast_ether_addr(addr)) + break; + + ret = wl1271_cmd_set_key(wl, KEY_REMOVE, + key_conf->keyidx, key_type, + key_conf->keylen, key_conf->key, + addr, 0, 0); + if (ret < 0) { + wl1271_error("Could not remove key"); + goto out_sleep; + } + break; + + default: + wl1271_error("Unsupported key cmd 0x%x", cmd); + ret = -EOPNOTSUPP; + goto out_sleep; + + break; + } + +out_sleep: + wl1271_ps_elp_sleep(wl); + +out_unlock: + mutex_unlock(&wl->mutex); + +out: + return ret; +} + +static int wl1271_op_hw_scan(struct ieee80211_hw *hw, + struct cfg80211_scan_request *req) +{ + struct wl1271 *wl = hw->priv; + int ret; + u8 *ssid = NULL; + size_t len = 0; + + wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan"); + + if (req->n_ssids) { + ssid = req->ssids[0].ssid; + len = req->ssids[0].ssid_len; + } + + mutex_lock(&wl->mutex); + + ret = wl1271_ps_elp_wakeup(wl, false); + if (ret < 0) + goto out; + + if (wl1271_11a_enabled()) + ret = wl1271_cmd_scan(hw->priv, ssid, len, 1, 0, + WL1271_SCAN_BAND_DUAL, 3); + else + ret = wl1271_cmd_scan(hw->priv, ssid, len, 1, 0, + WL1271_SCAN_BAND_2_4_GHZ, 3); + + wl1271_ps_elp_sleep(wl); + +out: + mutex_unlock(&wl->mutex); + + return ret; +} + +static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value) +{ + struct wl1271 *wl = hw->priv; + int ret; + + mutex_lock(&wl->mutex); + + ret = wl1271_ps_elp_wakeup(wl, false); + if (ret < 0) + goto out; + + ret = wl1271_acx_rts_threshold(wl, (u16) value); + if (ret < 0) + wl1271_warning("wl1271_op_set_rts_threshold failed: %d", ret); + + wl1271_ps_elp_sleep(wl); + +out: + mutex_unlock(&wl->mutex); + + return ret; +} + +static void wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *beacon) +{ + u8 *ptr = beacon->data + + offsetof(struct ieee80211_mgmt, u.beacon.variable); + + /* find the location of the ssid in the beacon */ + while (ptr < beacon->data + beacon->len) { + if (ptr[0] == WLAN_EID_SSID) { + wl->ssid_len = ptr[1]; + memcpy(wl->ssid, ptr+2, wl->ssid_len); + return; + } + ptr += ptr[1]; + } + wl1271_error("ad-hoc beacon template has no SSID!\n"); +} + +static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changed) +{ + enum wl1271_cmd_ps_mode mode; + struct wl1271 *wl = hw->priv; + bool do_join = false; + int ret; + + wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed"); + + mutex_lock(&wl->mutex); + + ret = wl1271_ps_elp_wakeup(wl, false); + if (ret < 0) + goto out; + + if (wl->bss_type == BSS_TYPE_IBSS) { + /* FIXME: This implements rudimentary ad-hoc support - + proper templates are on the wish list and notification + on when they change. This patch will update the templates + on every call to this function. */ + struct sk_buff *beacon = ieee80211_beacon_get(hw, vif); + + if (beacon) { + struct ieee80211_hdr *hdr; + + wl1271_ssid_set(wl, beacon); + ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON, + beacon->data, + beacon->len); + + if (ret < 0) { + dev_kfree_skb(beacon); + goto out_sleep; + } + + hdr = (struct ieee80211_hdr *) beacon->data; + hdr->frame_control = cpu_to_le16( + IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_PROBE_RESP); + + ret = wl1271_cmd_template_set(wl, + CMD_TEMPL_PROBE_RESPONSE, + beacon->data, + beacon->len); + dev_kfree_skb(beacon); + if (ret < 0) + goto out_sleep; + + /* Need to update the SSID (for filtering etc) */ + do_join = true; + } + } + + if ((changed & BSS_CHANGED_BSSID) && + /* + * Now we know the correct bssid, so we send a new join command + * and enable the BSSID filter + */ + memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) { + wl->rx_config |= CFG_BSSID_FILTER_EN; + memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN); + ret = wl1271_cmd_build_null_data(wl); + if (ret < 0) { + wl1271_warning("cmd buld null data failed %d", + ret); + goto out_sleep; + } + + /* Need to update the BSSID (for filtering etc) */ + do_join = true; + } + + if (changed & BSS_CHANGED_ASSOC) { + if (bss_conf->assoc) { + wl->aid = bss_conf->aid; + set_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags); + + /* + * with wl1271, we don't need to update the + * beacon_int and dtim_period, because the firmware + * updates it by itself when the first beacon is + * received after a join. + */ + ret = wl1271_cmd_build_ps_poll(wl, wl->aid); + if (ret < 0) + goto out_sleep; + + ret = wl1271_acx_aid(wl, wl->aid); + if (ret < 0) + goto out_sleep; + + /* If we want to go in PSM but we're not there yet */ + if (test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags) && + !test_bit(WL1271_FLAG_PSM, &wl->flags)) { + mode = STATION_POWER_SAVE_MODE; + ret = wl1271_ps_set_mode(wl, mode, true); + if (ret < 0) + goto out_sleep; + } + } else { + /* use defaults when not associated */ + clear_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags); + wl->aid = 0; + } + + } + + if (changed & BSS_CHANGED_ERP_SLOT) { + if (bss_conf->use_short_slot) + ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT); + else + ret = wl1271_acx_slot(wl, SLOT_TIME_LONG); + if (ret < 0) { + wl1271_warning("Set slot time failed %d", ret); + goto out_sleep; + } + } + + if (changed & BSS_CHANGED_ERP_PREAMBLE) { + if (bss_conf->use_short_preamble) + wl1271_acx_set_preamble(wl, ACX_PREAMBLE_SHORT); + else + wl1271_acx_set_preamble(wl, ACX_PREAMBLE_LONG); + } + + if (changed & BSS_CHANGED_ERP_CTS_PROT) { + if (bss_conf->use_cts_prot) + ret = wl1271_acx_cts_protect(wl, CTSPROTECT_ENABLE); + else + ret = wl1271_acx_cts_protect(wl, CTSPROTECT_DISABLE); + if (ret < 0) { + wl1271_warning("Set ctsprotect failed %d", ret); + goto out_sleep; + } + } + + if (do_join) { + ret = wl1271_cmd_join(wl); + if (ret < 0) { + wl1271_warning("cmd join failed %d", ret); + goto out_sleep; + } + set_bit(WL1271_FLAG_JOINED, &wl->flags); + } + +out_sleep: + wl1271_ps_elp_sleep(wl); + +out: + mutex_unlock(&wl->mutex); +} + +static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue, + const struct ieee80211_tx_queue_params *params) +{ + struct wl1271 *wl = hw->priv; + int ret; + + mutex_lock(&wl->mutex); + + wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue); + + ret = wl1271_ps_elp_wakeup(wl, false); + if (ret < 0) + goto out; + + ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue), + params->cw_min, params->cw_max, + params->aifs, params->txop); + if (ret < 0) + goto out_sleep; + + ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue), + CONF_CHANNEL_TYPE_EDCF, + wl1271_tx_get_queue(queue), + CONF_PS_SCHEME_LEGACY_PSPOLL, + CONF_ACK_POLICY_LEGACY, 0, 0); + if (ret < 0) + goto out_sleep; + +out_sleep: + wl1271_ps_elp_sleep(wl); + +out: + mutex_unlock(&wl->mutex); + + return ret; +} + + +/* can't be const, mac80211 writes to this */ +static struct ieee80211_rate wl1271_rates[] = { + { .bitrate = 10, + .hw_value = CONF_HW_BIT_RATE_1MBPS, + .hw_value_short = CONF_HW_BIT_RATE_1MBPS, }, + { .bitrate = 20, + .hw_value = CONF_HW_BIT_RATE_2MBPS, + .hw_value_short = CONF_HW_BIT_RATE_2MBPS, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 55, + .hw_value = CONF_HW_BIT_RATE_5_5MBPS, + .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 110, + .hw_value = CONF_HW_BIT_RATE_11MBPS, + .hw_value_short = CONF_HW_BIT_RATE_11MBPS, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 60, + .hw_value = CONF_HW_BIT_RATE_6MBPS, + .hw_value_short = CONF_HW_BIT_RATE_6MBPS, }, + { .bitrate = 90, + .hw_value = CONF_HW_BIT_RATE_9MBPS, + .hw_value_short = CONF_HW_BIT_RATE_9MBPS, }, + { .bitrate = 120, + .hw_value = CONF_HW_BIT_RATE_12MBPS, + .hw_value_short = CONF_HW_BIT_RATE_12MBPS, }, + { .bitrate = 180, + .hw_value = CONF_HW_BIT_RATE_18MBPS, + .hw_value_short = CONF_HW_BIT_RATE_18MBPS, }, + { .bitrate = 240, + .hw_value = CONF_HW_BIT_RATE_24MBPS, + .hw_value_short = CONF_HW_BIT_RATE_24MBPS, }, + { .bitrate = 360, + .hw_value = CONF_HW_BIT_RATE_36MBPS, + .hw_value_short = CONF_HW_BIT_RATE_36MBPS, }, + { .bitrate = 480, + .hw_value = CONF_HW_BIT_RATE_48MBPS, + .hw_value_short = CONF_HW_BIT_RATE_48MBPS, }, + { .bitrate = 540, + .hw_value = CONF_HW_BIT_RATE_54MBPS, + .hw_value_short = CONF_HW_BIT_RATE_54MBPS, }, +}; + +/* can't be const, mac80211 writes to this */ +static struct ieee80211_channel wl1271_channels[] = { + { .hw_value = 1, .center_freq = 2412, .max_power = 25 }, + { .hw_value = 2, .center_freq = 2417, .max_power = 25 }, + { .hw_value = 3, .center_freq = 2422, .max_power = 25 }, + { .hw_value = 4, .center_freq = 2427, .max_power = 25 }, + { .hw_value = 5, .center_freq = 2432, .max_power = 25 }, + { .hw_value = 6, .center_freq = 2437, .max_power = 25 }, + { .hw_value = 7, .center_freq = 2442, .max_power = 25 }, + { .hw_value = 8, .center_freq = 2447, .max_power = 25 }, + { .hw_value = 9, .center_freq = 2452, .max_power = 25 }, + { .hw_value = 10, .center_freq = 2457, .max_power = 25 }, + { .hw_value = 11, .center_freq = 2462, .max_power = 25 }, + { .hw_value = 12, .center_freq = 2467, .max_power = 25 }, + { .hw_value = 13, .center_freq = 2472, .max_power = 25 }, +}; + +/* can't be const, mac80211 writes to this */ +static struct ieee80211_supported_band wl1271_band_2ghz = { + .channels = wl1271_channels, + .n_channels = ARRAY_SIZE(wl1271_channels), + .bitrates = wl1271_rates, + .n_bitrates = ARRAY_SIZE(wl1271_rates), +}; + +/* 5 GHz data rates for WL1273 */ +static struct ieee80211_rate wl1271_rates_5ghz[] = { + { .bitrate = 60, + .hw_value = CONF_HW_BIT_RATE_6MBPS, + .hw_value_short = CONF_HW_BIT_RATE_6MBPS, }, + { .bitrate = 90, + .hw_value = CONF_HW_BIT_RATE_9MBPS, + .hw_value_short = CONF_HW_BIT_RATE_9MBPS, }, + { .bitrate = 120, + .hw_value = CONF_HW_BIT_RATE_12MBPS, + .hw_value_short = CONF_HW_BIT_RATE_12MBPS, }, + { .bitrate = 180, + .hw_value = CONF_HW_BIT_RATE_18MBPS, + .hw_value_short = CONF_HW_BIT_RATE_18MBPS, }, + { .bitrate = 240, + .hw_value = CONF_HW_BIT_RATE_24MBPS, + .hw_value_short = CONF_HW_BIT_RATE_24MBPS, }, + { .bitrate = 360, + .hw_value = CONF_HW_BIT_RATE_36MBPS, + .hw_value_short = CONF_HW_BIT_RATE_36MBPS, }, + { .bitrate = 480, + .hw_value = CONF_HW_BIT_RATE_48MBPS, + .hw_value_short = CONF_HW_BIT_RATE_48MBPS, }, + { .bitrate = 540, + .hw_value = CONF_HW_BIT_RATE_54MBPS, + .hw_value_short = CONF_HW_BIT_RATE_54MBPS, }, +}; + +/* 5 GHz band channels for WL1273 */ +static struct ieee80211_channel wl1271_channels_5ghz[] = { + { .hw_value = 183, .center_freq = 4915}, + { .hw_value = 184, .center_freq = 4920}, + { .hw_value = 185, .center_freq = 4925}, + { .hw_value = 187, .center_freq = 4935}, + { .hw_value = 188, .center_freq = 4940}, + { .hw_value = 189, .center_freq = 4945}, + { .hw_value = 192, .center_freq = 4960}, + { .hw_value = 196, .center_freq = 4980}, + { .hw_value = 7, .center_freq = 5035}, + { .hw_value = 8, .center_freq = 5040}, + { .hw_value = 9, .center_freq = 5045}, + { .hw_value = 11, .center_freq = 5055}, + { .hw_value = 12, .center_freq = 5060}, + { .hw_value = 16, .center_freq = 5080}, + { .hw_value = 34, .center_freq = 5170}, + { .hw_value = 36, .center_freq = 5180}, + { .hw_value = 38, .center_freq = 5190}, + { .hw_value = 40, .center_freq = 5200}, + { .hw_value = 42, .center_freq = 5210}, + { .hw_value = 44, .center_freq = 5220}, + { .hw_value = 46, .center_freq = 5230}, + { .hw_value = 48, .center_freq = 5240}, + { .hw_value = 52, .center_freq = 5260}, + { .hw_value = 56, .center_freq = 5280}, + { .hw_value = 60, .center_freq = 5300}, + { .hw_value = 64, .center_freq = 5320}, + { .hw_value = 100, .center_freq = 5500}, + { .hw_value = 104, .center_freq = 5520}, + { .hw_value = 108, .center_freq = 5540}, + { .hw_value = 112, .center_freq = 5560}, + { .hw_value = 116, .center_freq = 5580}, + { .hw_value = 120, .center_freq = 5600}, + { .hw_value = 124, .center_freq = 5620}, + { .hw_value = 128, .center_freq = 5640}, + { .hw_value = 132, .center_freq = 5660}, + { .hw_value = 136, .center_freq = 5680}, + { .hw_value = 140, .center_freq = 5700}, + { .hw_value = 149, .center_freq = 5745}, + { .hw_value = 153, .center_freq = 5765}, + { .hw_value = 157, .center_freq = 5785}, + { .hw_value = 161, .center_freq = 5805}, + { .hw_value = 165, .center_freq = 5825}, +}; + + +static struct ieee80211_supported_band wl1271_band_5ghz = { + .channels = wl1271_channels_5ghz, + .n_channels = ARRAY_SIZE(wl1271_channels_5ghz), + .bitrates = wl1271_rates_5ghz, + .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz), +}; + +static const struct ieee80211_ops wl1271_ops = { + .start = wl1271_op_start, + .stop = wl1271_op_stop, + .add_interface = wl1271_op_add_interface, + .remove_interface = wl1271_op_remove_interface, + .config = wl1271_op_config, +/* .config_interface = wl1271_op_config_interface, */ + .prepare_multicast = wl1271_op_prepare_multicast, + .configure_filter = wl1271_op_configure_filter, + .tx = wl1271_op_tx, + .set_key = wl1271_op_set_key, + .hw_scan = wl1271_op_hw_scan, + .bss_info_changed = wl1271_op_bss_info_changed, + .set_rts_threshold = wl1271_op_set_rts_threshold, + .conf_tx = wl1271_op_conf_tx, + CFG80211_TESTMODE_CMD(wl1271_tm_cmd) +}; + +static int wl1271_register_hw(struct wl1271 *wl) +{ + int ret; + + if (wl->mac80211_registered) + return 0; + + SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr); + + ret = ieee80211_register_hw(wl->hw); + if (ret < 0) { + wl1271_error("unable to register mac80211 hw: %d", ret); + return ret; + } + + wl->mac80211_registered = true; + + wl1271_notice("loaded"); + + return 0; +} + +static int wl1271_init_ieee80211(struct wl1271 *wl) +{ + /* The tx descriptor buffer and the TKIP space. */ + wl->hw->extra_tx_headroom = WL1271_TKIP_IV_SPACE + + sizeof(struct wl1271_tx_hw_descr); + + /* unit us */ + /* FIXME: find a proper value */ + wl->hw->channel_change_time = 10000; + + wl->hw->flags = IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_NOISE_DBM | + IEEE80211_HW_BEACON_FILTER | + IEEE80211_HW_SUPPORTS_PS; + + wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_ADHOC); + wl->hw->wiphy->max_scan_ssids = 1; + wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1271_band_2ghz; + + if (wl1271_11a_enabled()) + wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wl1271_band_5ghz; + + SET_IEEE80211_DEV(wl->hw, &wl->spi->dev); + + return 0; +} + +static void wl1271_device_release(struct device *dev) +{ + +} + +static struct platform_device wl1271_device = { + .name = "wl1271", + .id = -1, + + /* device model insists to have a release function */ + .dev = { + .release = wl1271_device_release, + }, +}; + +#define WL1271_DEFAULT_CHANNEL 0 + +static struct ieee80211_hw *wl1271_alloc_hw(void) +{ + struct ieee80211_hw *hw; + struct wl1271 *wl; + int i; + + hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops); + if (!hw) { + wl1271_error("could not alloc ieee80211_hw"); + return ERR_PTR(-ENOMEM); + } + + wl = hw->priv; + memset(wl, 0, sizeof(*wl)); + + INIT_LIST_HEAD(&wl->list); + + wl->hw = hw; + + skb_queue_head_init(&wl->tx_queue); + + INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work); + wl->channel = WL1271_DEFAULT_CHANNEL; + wl->default_key = 0; + wl->rx_counter = 0; + wl->rx_config = WL1271_DEFAULT_RX_CONFIG; + wl->rx_filter = WL1271_DEFAULT_RX_FILTER; + wl->psm_entry_retry = 0; + wl->power_level = WL1271_DEFAULT_POWER_LEVEL; + wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC; + wl->rate_set = CONF_TX_RATE_MASK_BASIC; + wl->sta_rate_set = 0; + wl->band = IEEE80211_BAND_2GHZ; + wl->vif = NULL; + wl->flags = 0; + + for (i = 0; i < ACX_TX_DESCRIPTORS; i++) + wl->tx_frames[i] = NULL; + + spin_lock_init(&wl->wl_lock); + + wl->state = WL1271_STATE_OFF; + mutex_init(&wl->mutex); + + /* Apply default driver configuration. */ + wl1271_conf_init(wl); + + return hw; +} + +int wl1271_free_hw(struct wl1271 *wl) +{ + ieee80211_unregister_hw(wl->hw); + + wl1271_debugfs_exit(wl); + + kfree(wl->target_mem_map); + vfree(wl->fw); + wl->fw = NULL; + kfree(wl->nvs); + wl->nvs = NULL; + + kfree(wl->fw_status); + kfree(wl->tx_res_if); + + ieee80211_free_hw(wl->hw); + + return 0; +} + +static int __devinit wl1271_probe(struct spi_device *spi) +{ + struct wl12xx_platform_data *pdata; + struct ieee80211_hw *hw; + struct wl1271 *wl; + int ret; + + pdata = spi->dev.platform_data; + if (!pdata) { + wl1271_error("no platform data"); + return -ENODEV; + } + + hw = wl1271_alloc_hw(); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + wl = hw->priv; + + dev_set_drvdata(&spi->dev, wl); + wl->spi = spi; + + /* This is the only SPI value that we need to set here, the rest + * comes from the board-peripherals file */ + spi->bits_per_word = 32; + + ret = spi_setup(spi); + if (ret < 0) { + wl1271_error("spi_setup failed"); + goto out_free; + } + + wl->set_power = pdata->set_power; + if (!wl->set_power) { + wl1271_error("set power function missing in platform data"); + ret = -ENODEV; + goto out_free; + } + + wl->irq = spi->irq; + if (wl->irq < 0) { + wl1271_error("irq missing in platform data"); + ret = -ENODEV; + goto out_free; + } + + ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl); + if (ret < 0) { + wl1271_error("request_irq() failed: %d", ret); + goto out_free; + } + + set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); + + disable_irq(wl->irq); + + ret = platform_device_register(&wl1271_device); + if (ret) { + wl1271_error("couldn't register platform device"); + goto out_irq; + } + dev_set_drvdata(&wl1271_device.dev, wl); + + ret = wl1271_init_ieee80211(wl); + if (ret) + goto out_platform; + + ret = wl1271_register_hw(wl); + if (ret) + goto out_platform; + + wl1271_debugfs_init(wl); + + wl1271_notice("initialized"); + + return 0; + + out_platform: + platform_device_unregister(&wl1271_device); + + out_irq: + free_irq(wl->irq, wl); + + out_free: + ieee80211_free_hw(hw); + + return ret; +} + +static int __devexit wl1271_remove(struct spi_device *spi) +{ + struct wl1271 *wl = dev_get_drvdata(&spi->dev); + + platform_device_unregister(&wl1271_device); + free_irq(wl->irq, wl); + + wl1271_free_hw(wl); + + return 0; +} + + +static struct spi_driver wl1271_spi_driver = { + .driver = { + .name = "wl1271", + .bus = &spi_bus_type, + .owner = THIS_MODULE, + }, + + .probe = wl1271_probe, + .remove = __devexit_p(wl1271_remove), +}; + +static int __init wl1271_init(void) +{ + int ret; + + ret = spi_register_driver(&wl1271_spi_driver); + if (ret < 0) { + wl1271_error("failed to register spi driver: %d", ret); + goto out; + } + +out: + return ret; +} + +static void __exit wl1271_exit(void) +{ + spi_unregister_driver(&wl1271_spi_driver); + + wl1271_notice("unloaded"); +} + +module_init(wl1271_init); +module_exit(wl1271_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Luciano Coelho "); +MODULE_AUTHOR("Juuso Oikarinen "); +MODULE_FIRMWARE(WL1271_FW_NAME); diff --git a/drivers/net/wireless/wl12xx/wl1271_ps.c b/drivers/net/wireless/wl12xx/wl1271_ps.c new file mode 100644 index 0000000..e2b1ebf --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1271_ps.c @@ -0,0 +1,165 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2008-2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include "wl1271_reg.h" +#include "wl1271_ps.h" +#include "wl1271_spi.h" +#include "wl1271_io.h" + +#define WL1271_WAKEUP_TIMEOUT 500 + +void wl1271_elp_work(struct work_struct *work) +{ + struct delayed_work *dwork; + struct wl1271 *wl; + + dwork = container_of(work, struct delayed_work, work); + wl = container_of(dwork, struct wl1271, elp_work); + + wl1271_debug(DEBUG_PSM, "elp work"); + + mutex_lock(&wl->mutex); + + if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags) || + !test_bit(WL1271_FLAG_PSM, &wl->flags)) + goto out; + + wl1271_debug(DEBUG_PSM, "chip to elp"); + wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP); + set_bit(WL1271_FLAG_IN_ELP, &wl->flags); + +out: + mutex_unlock(&wl->mutex); +} + +#define ELP_ENTRY_DELAY 5 + +/* Routines to toggle sleep mode while in ELP */ +void wl1271_ps_elp_sleep(struct wl1271 *wl) +{ + if (test_bit(WL1271_FLAG_PSM, &wl->flags)) { + cancel_delayed_work(&wl->elp_work); + ieee80211_queue_delayed_work(wl->hw, &wl->elp_work, + msecs_to_jiffies(ELP_ENTRY_DELAY)); + } +} + +int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake) +{ + DECLARE_COMPLETION_ONSTACK(compl); + unsigned long flags; + int ret; + u32 start_time = jiffies; + bool pending = false; + + if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags)) + return 0; + + wl1271_debug(DEBUG_PSM, "waking up chip from elp"); + + /* + * The spinlock is required here to synchronize both the work and + * the completion variable in one entity. + */ + spin_lock_irqsave(&wl->wl_lock, flags); + if (work_pending(&wl->irq_work) || chip_awake) + pending = true; + else + wl->elp_compl = &compl; + spin_unlock_irqrestore(&wl->wl_lock, flags); + + wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP); + + if (!pending) { + ret = wait_for_completion_timeout( + &compl, msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT)); + if (ret == 0) { + wl1271_error("ELP wakeup timeout!"); + ret = -ETIMEDOUT; + goto err; + } else if (ret < 0) { + wl1271_error("ELP wakeup completion error."); + goto err; + } + } + + clear_bit(WL1271_FLAG_IN_ELP, &wl->flags); + + wl1271_debug(DEBUG_PSM, "wakeup time: %u ms", + jiffies_to_msecs(jiffies - start_time)); + goto out; + +err: + spin_lock_irqsave(&wl->wl_lock, flags); + wl->elp_compl = NULL; + spin_unlock_irqrestore(&wl->wl_lock, flags); + return ret; + +out: + return 0; +} + +int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode, + bool send) +{ + int ret; + + switch (mode) { + case STATION_POWER_SAVE_MODE: + wl1271_debug(DEBUG_PSM, "entering psm"); + + ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE, send); + if (ret < 0) + return ret; + + set_bit(WL1271_FLAG_PSM, &wl->flags); + break; + case STATION_ACTIVE_MODE: + default: + wl1271_debug(DEBUG_PSM, "leaving psm"); + ret = wl1271_ps_elp_wakeup(wl, false); + if (ret < 0) + return ret; + + /* disable beacon early termination */ + ret = wl1271_acx_bet_enable(wl, false); + if (ret < 0) + return ret; + + /* disable beacon filtering */ + ret = wl1271_acx_beacon_filter_opt(wl, false); + if (ret < 0) + return ret; + + ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE, send); + if (ret < 0) + return ret; + + clear_bit(WL1271_FLAG_PSM, &wl->flags); + break; + } + + return ret; +} + + diff --git a/drivers/net/wireless/wl12xx/wl1271_ps.h b/drivers/net/wireless/wl12xx/wl1271_ps.h new file mode 100644 index 0000000..940276f --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1271_ps.h @@ -0,0 +1,36 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2008-2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __WL1271_PS_H__ +#define __WL1271_PS_H__ + +#include "wl1271.h" +#include "wl1271_acx.h" + +int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode, + bool send); +void wl1271_ps_elp_sleep(struct wl1271 *wl); +int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake); +void wl1271_elp_work(struct work_struct *work); + +#endif /* __WL1271_PS_H__ */ diff --git a/drivers/net/wireless/wl12xx/wl1271_reg.h b/drivers/net/wireless/wl12xx/wl1271_reg.h new file mode 100644 index 0000000..9909607 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1271_reg.h @@ -0,0 +1,614 @@ +/* + * This file is part of wl12xx + * + * Copyright (C) 1998-2009 Texas Instruments. All rights reserved. + * Copyright (C) 2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __REG_H__ +#define __REG_H__ + +#include + +#define REGISTERS_BASE 0x00300000 +#define DRPW_BASE 0x00310000 + +#define REGISTERS_DOWN_SIZE 0x00008800 +#define REGISTERS_WORK_SIZE 0x0000b000 + +#define HW_ACCESS_ELP_CTRL_REG_ADDR 0x1FFFC +#define FW_STATUS_ADDR (0x14FC0 + 0xA000) + +/* ELP register commands */ +#define ELPCTRL_WAKE_UP 0x1 +#define ELPCTRL_WAKE_UP_WLAN_READY 0x5 +#define ELPCTRL_SLEEP 0x0 +/* ELP WLAN_READY bit */ +#define ELPCTRL_WLAN_READY 0x2 + +/*=============================================== + Host Software Reset - 32bit RW + ------------------------------------------ + [31:1] Reserved + 0 SOFT_RESET Soft Reset - When this bit is set, + it holds the Wlan hardware in a soft reset state. + This reset disables all MAC and baseband processor + clocks except the CardBus/PCI interface clock. + It also initializes all MAC state machines except + the host interface. It does not reload the + contents of the EEPROM. When this bit is cleared + (not self-clearing), the Wlan hardware + exits the software reset state. +===============================================*/ +#define ACX_REG_SLV_SOFT_RESET (REGISTERS_BASE + 0x0000) + +#define WL1271_SLV_REG_DATA (REGISTERS_BASE + 0x0008) +#define WL1271_SLV_REG_ADATA (REGISTERS_BASE + 0x000c) +#define WL1271_SLV_MEM_DATA (REGISTERS_BASE + 0x0018) + +#define ACX_REG_INTERRUPT_TRIG (REGISTERS_BASE + 0x0474) +#define ACX_REG_INTERRUPT_TRIG_H (REGISTERS_BASE + 0x0478) + +/*============================================= + Host Interrupt Mask Register - 32bit (RW) + ------------------------------------------ + Setting a bit in this register masks the + corresponding interrupt to the host. + 0 - RX0 - Rx first dubble buffer Data Interrupt + 1 - TXD - Tx Data Interrupt + 2 - TXXFR - Tx Transfer Interrupt + 3 - RX1 - Rx second dubble buffer Data Interrupt + 4 - RXXFR - Rx Transfer Interrupt + 5 - EVENT_A - Event Mailbox interrupt + 6 - EVENT_B - Event Mailbox interrupt + 7 - WNONHST - Wake On Host Interrupt + 8 - TRACE_A - Debug Trace interrupt + 9 - TRACE_B - Debug Trace interrupt + 10 - CDCMP - Command Complete Interrupt + 11 - + 12 - + 13 - + 14 - ICOMP - Initialization Complete Interrupt + 16 - SG SE - Soft Gemini - Sense enable interrupt + 17 - SG SD - Soft Gemini - Sense disable interrupt + 18 - - + 19 - - + 20 - - + 21- - + Default: 0x0001 +*==============================================*/ +#define ACX_REG_INTERRUPT_MASK (REGISTERS_BASE + 0x04DC) + +/*============================================= + Host Interrupt Mask Set 16bit, (Write only) + ------------------------------------------ + Setting a bit in this register sets + the corresponding bin in ACX_HINT_MASK register + without effecting the mask + state of other bits (0 = no effect). +==============================================*/ +#define ACX_REG_HINT_MASK_SET (REGISTERS_BASE + 0x04E0) + +/*============================================= + Host Interrupt Mask Clear 16bit,(Write only) + ------------------------------------------ + Setting a bit in this register clears + the corresponding bin in ACX_HINT_MASK register + without effecting the mask + state of other bits (0 = no effect). +=============================================*/ +#define ACX_REG_HINT_MASK_CLR (REGISTERS_BASE + 0x04E4) + +/*============================================= + Host Interrupt Status Nondestructive Read + 16bit,(Read only) + ------------------------------------------ + The host can read this register to determine + which interrupts are active. + Reading this register doesn't + effect its content. +=============================================*/ +#define ACX_REG_INTERRUPT_NO_CLEAR (REGISTERS_BASE + 0x04E8) + +/*============================================= + Host Interrupt Status Clear on Read Register + 16bit,(Read only) + ------------------------------------------ + The host can read this register to determine + which interrupts are active. + Reading this register clears it, + thus making all interrupts inactive. +==============================================*/ +#define ACX_REG_INTERRUPT_CLEAR (REGISTERS_BASE + 0x04F8) + +/*============================================= + Host Interrupt Acknowledge Register + 16bit,(Write only) + ------------------------------------------ + The host can set individual bits in this + register to clear (acknowledge) the corresp. + interrupt status bits in the HINT_STS_CLR and + HINT_STS_ND registers, thus making the + assotiated interrupt inactive. (0-no effect) +==============================================*/ +#define ACX_REG_INTERRUPT_ACK (REGISTERS_BASE + 0x04F0) + +#define RX_DRIVER_COUNTER_ADDRESS (REGISTERS_BASE + 0x0538) + +/* Device Configuration registers*/ +#define SOR_CFG (REGISTERS_BASE + 0x0800) + +/* Embedded ARM CPU Control */ + +/*=============================================== + Halt eCPU - 32bit RW + ------------------------------------------ + 0 HALT_ECPU Halt Embedded CPU - This bit is the + compliment of bit 1 (MDATA2) in the SOR_CFG register. + During a hardware reset, this bit holds + the inverse of MDATA2. + When downloading firmware from the host, + set this bit (pull down MDATA2). + The host clears this bit after downloading the firmware into + zero-wait-state SSRAM. + When loading firmware from Flash, clear this bit (pull up MDATA2) + so that the eCPU can run the bootloader code in Flash + HALT_ECPU eCPU State + -------------------- + 1 halt eCPU + 0 enable eCPU + ===============================================*/ +#define ACX_REG_ECPU_CONTROL (REGISTERS_BASE + 0x0804) + +#define HI_CFG (REGISTERS_BASE + 0x0808) + +/*=============================================== + EEPROM Burst Read Start - 32bit RW + ------------------------------------------ + [31:1] Reserved + 0 ACX_EE_START - EEPROM Burst Read Start 0 + Setting this bit starts a burst read from + the external EEPROM. + If this bit is set (after reset) before an EEPROM read/write, + the burst read starts at EEPROM address 0. + Otherwise, it starts at the address + following the address of the previous access. + TheWlan hardware hardware clears this bit automatically. + + Default: 0x00000000 +*================================================*/ +#define ACX_REG_EE_START (REGISTERS_BASE + 0x080C) + +#define OCP_POR_CTR (REGISTERS_BASE + 0x09B4) +#define OCP_DATA_WRITE (REGISTERS_BASE + 0x09B8) +#define OCP_DATA_READ (REGISTERS_BASE + 0x09BC) +#define OCP_CMD (REGISTERS_BASE + 0x09C0) + +#define WL1271_HOST_WR_ACCESS (REGISTERS_BASE + 0x09F8) + +#define CHIP_ID_B (REGISTERS_BASE + 0x5674) + +#define CHIP_ID_1271_PG10 (0x4030101) +#define CHIP_ID_1271_PG20 (0x4030111) + +#define ENABLE (REGISTERS_BASE + 0x5450) + +/* Power Management registers */ +#define ELP_CFG_MODE (REGISTERS_BASE + 0x5804) +#define ELP_CMD (REGISTERS_BASE + 0x5808) +#define PLL_CAL_TIME (REGISTERS_BASE + 0x5810) +#define CLK_REQ_TIME (REGISTERS_BASE + 0x5814) +#define CLK_BUF_TIME (REGISTERS_BASE + 0x5818) + +#define CFG_PLL_SYNC_CNT (REGISTERS_BASE + 0x5820) + +/* Scratch Pad registers*/ +#define SCR_PAD0 (REGISTERS_BASE + 0x5608) +#define SCR_PAD1 (REGISTERS_BASE + 0x560C) +#define SCR_PAD2 (REGISTERS_BASE + 0x5610) +#define SCR_PAD3 (REGISTERS_BASE + 0x5614) +#define SCR_PAD4 (REGISTERS_BASE + 0x5618) +#define SCR_PAD4_SET (REGISTERS_BASE + 0x561C) +#define SCR_PAD4_CLR (REGISTERS_BASE + 0x5620) +#define SCR_PAD5 (REGISTERS_BASE + 0x5624) +#define SCR_PAD5_SET (REGISTERS_BASE + 0x5628) +#define SCR_PAD5_CLR (REGISTERS_BASE + 0x562C) +#define SCR_PAD6 (REGISTERS_BASE + 0x5630) +#define SCR_PAD7 (REGISTERS_BASE + 0x5634) +#define SCR_PAD8 (REGISTERS_BASE + 0x5638) +#define SCR_PAD9 (REGISTERS_BASE + 0x563C) + +/* Spare registers*/ +#define SPARE_A1 (REGISTERS_BASE + 0x0994) +#define SPARE_A2 (REGISTERS_BASE + 0x0998) +#define SPARE_A3 (REGISTERS_BASE + 0x099C) +#define SPARE_A4 (REGISTERS_BASE + 0x09A0) +#define SPARE_A5 (REGISTERS_BASE + 0x09A4) +#define SPARE_A6 (REGISTERS_BASE + 0x09A8) +#define SPARE_A7 (REGISTERS_BASE + 0x09AC) +#define SPARE_A8 (REGISTERS_BASE + 0x09B0) +#define SPARE_B1 (REGISTERS_BASE + 0x5420) +#define SPARE_B2 (REGISTERS_BASE + 0x5424) +#define SPARE_B3 (REGISTERS_BASE + 0x5428) +#define SPARE_B4 (REGISTERS_BASE + 0x542C) +#define SPARE_B5 (REGISTERS_BASE + 0x5430) +#define SPARE_B6 (REGISTERS_BASE + 0x5434) +#define SPARE_B7 (REGISTERS_BASE + 0x5438) +#define SPARE_B8 (REGISTERS_BASE + 0x543C) + +#define PLL_PARAMETERS (REGISTERS_BASE + 0x6040) +#define WU_COUNTER_PAUSE (REGISTERS_BASE + 0x6008) +#define WELP_ARM_COMMAND (REGISTERS_BASE + 0x6100) +#define DRPW_SCRATCH_START (DRPW_BASE + 0x002C) + + +#define ACX_SLV_SOFT_RESET_BIT BIT(1) +#define ACX_REG_EEPROM_START_BIT BIT(1) + +/* Command/Information Mailbox Pointers */ + +/*=============================================== + Command Mailbox Pointer - 32bit RW + ------------------------------------------ + This register holds the start address of + the command mailbox located in the Wlan hardware memory. + The host must read this pointer after a reset to + find the location of the command mailbox. + The Wlan hardware initializes the command mailbox + pointer with the default address of the command mailbox. + The command mailbox pointer is not valid until after + the host receives the Init Complete interrupt from + the Wlan hardware. + ===============================================*/ +#define REG_COMMAND_MAILBOX_PTR (SCR_PAD0) + +/*=============================================== + Information Mailbox Pointer - 32bit RW + ------------------------------------------ + This register holds the start address of + the information mailbox located in the Wlan hardware memory. + The host must read this pointer after a reset to find + the location of the information mailbox. + The Wlan hardware initializes the information mailbox pointer + with the default address of the information mailbox. + The information mailbox pointer is not valid + until after the host receives the Init Complete interrupt from + the Wlan hardware. + ===============================================*/ +#define REG_EVENT_MAILBOX_PTR (SCR_PAD1) + + +/* Misc */ + +#define REG_ENABLE_TX_RX (ENABLE) +/* + * Rx configuration (filter) information element + * --------------------------------------------- + */ +#define REG_RX_CONFIG (RX_CFG) +#define REG_RX_FILTER (RX_FILTER_CFG) + + +#define RX_CFG_ENABLE_PHY_HEADER_PLCP 0x0002 + +/* promiscuous - receives all valid frames */ +#define RX_CFG_PROMISCUOUS 0x0008 + +/* receives frames from any BSSID */ +#define RX_CFG_BSSID 0x0020 + +/* receives frames destined to any MAC address */ +#define RX_CFG_MAC 0x0010 + +#define RX_CFG_ENABLE_ONLY_MY_DEST_MAC 0x0010 +#define RX_CFG_ENABLE_ANY_DEST_MAC 0x0000 +#define RX_CFG_ENABLE_ONLY_MY_BSSID 0x0020 +#define RX_CFG_ENABLE_ANY_BSSID 0x0000 + +/* discards all broadcast frames */ +#define RX_CFG_DISABLE_BCAST 0x0200 + +#define RX_CFG_ENABLE_ONLY_MY_SSID 0x0400 +#define RX_CFG_ENABLE_RX_CMPLT_FCS_ERROR 0x0800 +#define RX_CFG_COPY_RX_STATUS 0x2000 +#define RX_CFG_TSF 0x10000 + +#define RX_CONFIG_OPTION_ANY_DST_MY_BSS (RX_CFG_ENABLE_ANY_DEST_MAC | \ + RX_CFG_ENABLE_ONLY_MY_BSSID) + +#define RX_CONFIG_OPTION_MY_DST_ANY_BSS (RX_CFG_ENABLE_ONLY_MY_DEST_MAC\ + | RX_CFG_ENABLE_ANY_BSSID) + +#define RX_CONFIG_OPTION_ANY_DST_ANY_BSS (RX_CFG_ENABLE_ANY_DEST_MAC | \ + RX_CFG_ENABLE_ANY_BSSID) + +#define RX_CONFIG_OPTION_MY_DST_MY_BSS (RX_CFG_ENABLE_ONLY_MY_DEST_MAC\ + | RX_CFG_ENABLE_ONLY_MY_BSSID) + +#define RX_CONFIG_OPTION_FOR_SCAN (RX_CFG_ENABLE_PHY_HEADER_PLCP \ + | RX_CFG_ENABLE_RX_CMPLT_FCS_ERROR \ + | RX_CFG_COPY_RX_STATUS | RX_CFG_TSF) + +#define RX_CONFIG_OPTION_FOR_MEASUREMENT (RX_CFG_ENABLE_ANY_DEST_MAC) + +#define RX_CONFIG_OPTION_FOR_JOIN (RX_CFG_ENABLE_ONLY_MY_BSSID | \ + RX_CFG_ENABLE_ONLY_MY_DEST_MAC) + +#define RX_CONFIG_OPTION_FOR_IBSS_JOIN (RX_CFG_ENABLE_ONLY_MY_SSID | \ + RX_CFG_ENABLE_ONLY_MY_DEST_MAC) + +#define RX_FILTER_OPTION_DEF (CFG_RX_MGMT_EN | CFG_RX_DATA_EN\ + | CFG_RX_CTL_EN | CFG_RX_BCN_EN\ + | CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN) + +#define RX_FILTER_OPTION_FILTER_ALL 0 + +#define RX_FILTER_OPTION_DEF_PRSP_BCN (CFG_RX_PRSP_EN | CFG_RX_MGMT_EN\ + | CFG_RX_RCTS_ACK | CFG_RX_BCN_EN) + +#define RX_FILTER_OPTION_JOIN (CFG_RX_MGMT_EN | CFG_RX_DATA_EN\ + | CFG_RX_BCN_EN | CFG_RX_AUTH_EN\ + | CFG_RX_ASSOC_EN | CFG_RX_RCTS_ACK\ + | CFG_RX_PRSP_EN) + + +/*=============================================== + EEPROM Read/Write Request 32bit RW + ------------------------------------------ + 1 EE_READ - EEPROM Read Request 1 - Setting this bit + loads a single byte of data into the EE_DATA + register from the EEPROM location specified in + the EE_ADDR register. + The Wlan hardware hardware clears this bit automatically. + EE_DATA is valid when this bit is cleared. + + 0 EE_WRITE - EEPROM Write Request - Setting this bit + writes a single byte of data from the EE_DATA register into the + EEPROM location specified in the EE_ADDR register. + The Wlan hardware hardware clears this bit automatically. +*===============================================*/ +#define ACX_EE_CTL_REG EE_CTL +#define EE_WRITE 0x00000001ul +#define EE_READ 0x00000002ul + +/*=============================================== + EEPROM Address - 32bit RW + ------------------------------------------ + This register specifies the address + within the EEPROM from/to which to read/write data. + ===============================================*/ +#define ACX_EE_ADDR_REG EE_ADDR + +/*=============================================== + EEPROM Data - 32bit RW + ------------------------------------------ + This register either holds the read 8 bits of + data from the EEPROM or the write data + to be written to the EEPROM. + ===============================================*/ +#define ACX_EE_DATA_REG EE_DATA + +/*=============================================== + EEPROM Base Address - 32bit RW + ------------------------------------------ + This register holds the upper nine bits + [23:15] of the 24-bit Wlan hardware memory + address for burst reads from EEPROM accesses. + The EEPROM provides the lower 15 bits of this address. + The MSB of the address from the EEPROM is ignored. + ===============================================*/ +#define ACX_EE_CFG EE_CFG + +/*=============================================== + GPIO Output Values -32bit, RW + ------------------------------------------ + [31:16] Reserved + [15: 0] Specify the output values (at the output driver inputs) for + GPIO[15:0], respectively. + ===============================================*/ +#define ACX_GPIO_OUT_REG GPIO_OUT +#define ACX_MAX_GPIO_LINES 15 + +/*=============================================== + Contention window -32bit, RW + ------------------------------------------ + [31:26] Reserved + [25:16] Max (0x3ff) + [15:07] Reserved + [06:00] Current contention window value - default is 0x1F + ===============================================*/ +#define ACX_CONT_WIND_CFG_REG CONT_WIND_CFG +#define ACX_CONT_WIND_MIN_MASK 0x0000007f +#define ACX_CONT_WIND_MAX 0x03ff0000 + +/*=============================================== + HI_CFG Interface Configuration Register Values + ------------------------------------------ + ===============================================*/ +#define HI_CFG_UART_ENABLE 0x00000004 +#define HI_CFG_RST232_ENABLE 0x00000008 +#define HI_CFG_CLOCK_REQ_SELECT 0x00000010 +#define HI_CFG_HOST_INT_ENABLE 0x00000020 +#define HI_CFG_VLYNQ_OUTPUT_ENABLE 0x00000040 +#define HI_CFG_HOST_INT_ACTIVE_LOW 0x00000080 +#define HI_CFG_UART_TX_OUT_GPIO_15 0x00000100 +#define HI_CFG_UART_TX_OUT_GPIO_14 0x00000200 +#define HI_CFG_UART_TX_OUT_GPIO_7 0x00000400 + +/* + * NOTE: USE_ACTIVE_HIGH compilation flag should be defined in makefile + * for platforms using active high interrupt level + */ +#ifdef USE_ACTIVE_HIGH +#define HI_CFG_DEF_VAL \ + (HI_CFG_UART_ENABLE | \ + HI_CFG_RST232_ENABLE | \ + HI_CFG_CLOCK_REQ_SELECT | \ + HI_CFG_HOST_INT_ENABLE) +#else +#define HI_CFG_DEF_VAL \ + (HI_CFG_UART_ENABLE | \ + HI_CFG_RST232_ENABLE | \ + HI_CFG_CLOCK_REQ_SELECT | \ + HI_CFG_HOST_INT_ENABLE) + +#endif + +#define REF_FREQ_19_2 0 +#define REF_FREQ_26_0 1 +#define REF_FREQ_38_4 2 +#define REF_FREQ_40_0 3 +#define REF_FREQ_33_6 4 +#define REF_FREQ_NUM 5 + +#define LUT_PARAM_INTEGER_DIVIDER 0 +#define LUT_PARAM_FRACTIONAL_DIVIDER 1 +#define LUT_PARAM_ATTN_BB 2 +#define LUT_PARAM_ALPHA_BB 3 +#define LUT_PARAM_STOP_TIME_BB 4 +#define LUT_PARAM_BB_PLL_LOOP_FILTER 5 +#define LUT_PARAM_NUM 6 + +#define ACX_EEPROMLESS_IND_REG (SCR_PAD4) +#define USE_EEPROM 0 +#define SOFT_RESET_MAX_TIME 1000000 +#define SOFT_RESET_STALL_TIME 1000 +#define NVS_DATA_BUNDARY_ALIGNMENT 4 + + +/* Firmware image load chunk size */ +#define CHUNK_SIZE 512 + +/* Firmware image header size */ +#define FW_HDR_SIZE 8 + +#define ECPU_CONTROL_HALT 0x00000101 + + +/****************************************************************************** + + CHANNELS, BAND & REG DOMAINS definitions + +******************************************************************************/ + + +enum { + RADIO_BAND_2_4GHZ = 0, /* 2.4 Ghz band */ + RADIO_BAND_5GHZ = 1, /* 5 Ghz band */ + RADIO_BAND_JAPAN_4_9_GHZ = 2, + DEFAULT_BAND = RADIO_BAND_2_4GHZ, + INVALID_BAND = 0xFE, + MAX_RADIO_BANDS = 0xFF +}; + +#define SHORT_PREAMBLE_BIT BIT(0) /* CCK or Barker depending on the rate */ +#define OFDM_RATE_BIT BIT(6) +#define PBCC_RATE_BIT BIT(7) + +enum { + CCK_LONG = 0, + CCK_SHORT = SHORT_PREAMBLE_BIT, + PBCC_LONG = PBCC_RATE_BIT, + PBCC_SHORT = PBCC_RATE_BIT | SHORT_PREAMBLE_BIT, + OFDM = OFDM_RATE_BIT +}; + +/****************************************************************************** + +Transmit-Descriptor RATE-SET field definitions... + +Define a new "Rate-Set" for TX path that incorporates the +Rate & Modulation info into a single 16-bit field. + +TxdRateSet_t: +b15 - Indicates Preamble type (1=SHORT, 0=LONG). + Notes: + Must be LONG (0) for 1Mbps rate. + Does not apply (set to 0) for RevG-OFDM rates. +b14 - Indicates PBCC encoding (1=PBCC, 0=not). + Notes: + Does not apply (set to 0) for rates 1 and 2 Mbps. + Does not apply (set to 0) for RevG-OFDM rates. +b13 - Unused (set to 0). +b12-b0 - Supported Rate indicator bits as defined below. + +******************************************************************************/ + + +/************************************************************************* + + Interrupt Trigger Register (Host -> WiLink) + +**************************************************************************/ + +/* Hardware to Embedded CPU Interrupts - first 32-bit register set */ + +/* + * Host Command Interrupt. Setting this bit masks + * the interrupt that the host issues to inform + * the FW that it has sent a command + * to the Wlan hardware Command Mailbox. + */ +#define INTR_TRIG_CMD BIT(0) + +/* + * Host Event Acknowlegde Interrupt. The host + * sets this bit to acknowledge that it received + * the unsolicited information from the event + * mailbox. + */ +#define INTR_TRIG_EVENT_ACK BIT(1) + +/* + * The host sets this bit to inform the Wlan + * FW that a TX packet is in the XFER + * Buffer #0. + */ +#define INTR_TRIG_TX_PROC0 BIT(2) + +/* + * The host sets this bit to inform the FW + * that it read a packet from RX XFER + * Buffer #0. + */ +#define INTR_TRIG_RX_PROC0 BIT(3) + +#define INTR_TRIG_DEBUG_ACK BIT(4) + +#define INTR_TRIG_STATE_CHANGED BIT(5) + + +/* Hardware to Embedded CPU Interrupts - second 32-bit register set */ + +/* + * The host sets this bit to inform the FW + * that it read a packet from RX XFER + * Buffer #1. + */ +#define INTR_TRIG_RX_PROC1 BIT(17) + +/* + * The host sets this bit to inform the Wlan + * hardware that a TX packet is in the XFER + * Buffer #1. + */ +#define INTR_TRIG_TX_PROC1 BIT(18) + +#endif diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.c b/drivers/net/wireless/wl12xx/wl1271_rx.c new file mode 100644 index 0000000..6730f5b --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1271_rx.c @@ -0,0 +1,223 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include "wl1271.h" +#include "wl1271_acx.h" +#include "wl1271_reg.h" +#include "wl1271_rx.h" +#include "wl1271_spi.h" +#include "wl1271_io.h" + +static u8 wl1271_rx_get_mem_block(struct wl1271_fw_status *status, + u32 drv_rx_counter) +{ + return le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) & + RX_MEM_BLOCK_MASK; +} + +static u32 wl1271_rx_get_buf_size(struct wl1271_fw_status *status, + u32 drv_rx_counter) +{ + return (le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) & + RX_BUF_SIZE_MASK) >> RX_BUF_SIZE_SHIFT_DIV; +} + +/* The values of this table must match the wl1271_rates[] array */ +static u8 wl1271_rx_rate_to_idx[] = { + /* MCS rates are used only with 11n */ + WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS7 */ + WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS6 */ + WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS5 */ + WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS4 */ + WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS3 */ + WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS2 */ + WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS1 */ + WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS0 */ + + 11, /* WL1271_RATE_54 */ + 10, /* WL1271_RATE_48 */ + 9, /* WL1271_RATE_36 */ + 8, /* WL1271_RATE_24 */ + + /* TI-specific rate */ + WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_22 */ + + 7, /* WL1271_RATE_18 */ + 6, /* WL1271_RATE_12 */ + 3, /* WL1271_RATE_11 */ + 5, /* WL1271_RATE_9 */ + 4, /* WL1271_RATE_6 */ + 2, /* WL1271_RATE_5_5 */ + 1, /* WL1271_RATE_2 */ + 0 /* WL1271_RATE_1 */ +}; + +/* The values of this table must match the wl1271_rates[] array */ +static u8 wl1271_5_ghz_rx_rate_to_idx[] = { + /* MCS rates are used only with 11n */ + WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS7 */ + WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS6 */ + WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS5 */ + WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS4 */ + WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS3 */ + WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS2 */ + WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS1 */ + WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS0 */ + + 7, /* WL1271_RATE_54 */ + 6, /* WL1271_RATE_48 */ + 5, /* WL1271_RATE_36 */ + 4, /* WL1271_RATE_24 */ + + /* TI-specific rate */ + WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_22 */ + + 3, /* WL1271_RATE_18 */ + 2, /* WL1271_RATE_12 */ + WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_11 */ + 1, /* WL1271_RATE_9 */ + 0, /* WL1271_RATE_6 */ + WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_5_5 */ + WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_2 */ + WL1271_RX_RATE_UNSUPPORTED /* WL1271_RATE_1 */ +}; + +static void wl1271_rx_status(struct wl1271 *wl, + struct wl1271_rx_descriptor *desc, + struct ieee80211_rx_status *status, + u8 beacon) +{ + memset(status, 0, sizeof(struct ieee80211_rx_status)); + + if ((desc->flags & WL1271_RX_DESC_BAND_MASK) == + WL1271_RX_DESC_BAND_BG) { + status->band = IEEE80211_BAND_2GHZ; + status->rate_idx = wl1271_rx_rate_to_idx[desc->rate]; + } else if ((desc->flags & WL1271_RX_DESC_BAND_MASK) == + WL1271_RX_DESC_BAND_A) { + status->band = IEEE80211_BAND_5GHZ; + status->rate_idx = wl1271_5_ghz_rx_rate_to_idx[desc->rate]; + } else + wl1271_warning("unsupported band 0x%x", + desc->flags & WL1271_RX_DESC_BAND_MASK); + + if (unlikely(status->rate_idx == WL1271_RX_RATE_UNSUPPORTED)) + wl1271_warning("unsupported rate"); + + /* + * FIXME: Add mactime handling. For IBSS (ad-hoc) we need to get the + * timestamp from the beacon (acx_tsf_info). In BSS mode (infra) we + * only need the mactime for monitor mode. For now the mactime is + * not valid, so RX_FLAG_TSFT should not be set + */ + status->signal = desc->rssi; + + /* + * FIXME: In wl1251, the SNR should be divided by two. In wl1271 we + * need to divide by two for now, but TI has been discussing about + * changing it. This needs to be rechecked. + */ + status->noise = desc->rssi - (desc->snr >> 1); + + status->freq = ieee80211_channel_to_frequency(desc->channel); + + if (desc->flags & WL1271_RX_DESC_ENCRYPT_MASK) { + status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED; + + if (likely(!(desc->status & WL1271_RX_DESC_DECRYPT_FAIL))) + status->flag |= RX_FLAG_DECRYPTED; + if (unlikely(desc->status & WL1271_RX_DESC_MIC_FAIL)) + status->flag |= RX_FLAG_MMIC_ERROR; + } +} + +static void wl1271_rx_handle_data(struct wl1271 *wl, u32 length) +{ + struct ieee80211_rx_status rx_status; + struct wl1271_rx_descriptor *desc; + struct sk_buff *skb; + u16 *fc; + u8 *buf; + u8 beacon = 0; + + skb = __dev_alloc_skb(length, GFP_KERNEL); + if (!skb) { + wl1271_error("Couldn't allocate RX frame"); + return; + } + + buf = skb_put(skb, length); + wl1271_read(wl, WL1271_SLV_MEM_DATA, buf, length, true); + + /* the data read starts with the descriptor */ + desc = (struct wl1271_rx_descriptor *) buf; + + /* now we pull the descriptor out of the buffer */ + skb_pull(skb, sizeof(*desc)); + + fc = (u16 *)skb->data; + if ((*fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON) + beacon = 1; + + wl1271_rx_status(wl, desc, &rx_status, beacon); + + wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s", skb, skb->len, + beacon ? "beacon" : ""); + + memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); + ieee80211_rx_ni(wl->hw, skb); +} + +void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status) +{ + struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map; + u32 buf_size; + u32 fw_rx_counter = status->fw_rx_counter & NUM_RX_PKT_DESC_MOD_MASK; + u32 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK; + u32 mem_block; + + while (drv_rx_counter != fw_rx_counter) { + mem_block = wl1271_rx_get_mem_block(status, drv_rx_counter); + buf_size = wl1271_rx_get_buf_size(status, drv_rx_counter); + + if (buf_size == 0) { + wl1271_warning("received empty data"); + break; + } + + wl->rx_mem_pool_addr.addr = (mem_block << 8) + + le32_to_cpu(wl_mem_map->packet_memory_pool_start); + wl->rx_mem_pool_addr.addr_extra = + wl->rx_mem_pool_addr.addr + 4; + + /* Choose the block we want to read */ + wl1271_write(wl, WL1271_SLV_REG_DATA, &wl->rx_mem_pool_addr, + sizeof(wl->rx_mem_pool_addr), false); + + wl1271_rx_handle_data(wl, buf_size); + + wl->rx_counter++; + drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK; + wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter); + } +} diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.h b/drivers/net/wireless/wl12xx/wl1271_rx.h new file mode 100644 index 0000000..1ae6d17 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1271_rx.h @@ -0,0 +1,121 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 1998-2009 Texas Instruments. All rights reserved. + * Copyright (C) 2008-2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __WL1271_RX_H__ +#define __WL1271_RX_H__ + +#include + +#define WL1271_RX_MAX_RSSI -30 +#define WL1271_RX_MIN_RSSI -95 + +#define WL1271_RX_ALIGN_TO 4 +#define WL1271_RX_ALIGN(len) (((len) + WL1271_RX_ALIGN_TO - 1) & \ + ~(WL1271_RX_ALIGN_TO - 1)) + +#define SHORT_PREAMBLE_BIT BIT(0) +#define OFDM_RATE_BIT BIT(6) +#define PBCC_RATE_BIT BIT(7) + +#define PLCP_HEADER_LENGTH 8 +#define RX_DESC_PACKETID_SHIFT 11 +#define RX_MAX_PACKET_ID 3 + +#define NUM_RX_PKT_DESC_MOD_MASK 7 +#define WL1271_RX_RATE_UNSUPPORTED 0xFF + +#define RX_DESC_VALID_FCS 0x0001 +#define RX_DESC_MATCH_RXADDR1 0x0002 +#define RX_DESC_MCAST 0x0004 +#define RX_DESC_STAINTIM 0x0008 +#define RX_DESC_VIRTUAL_BM 0x0010 +#define RX_DESC_BCAST 0x0020 +#define RX_DESC_MATCH_SSID 0x0040 +#define RX_DESC_MATCH_BSSID 0x0080 +#define RX_DESC_ENCRYPTION_MASK 0x0300 +#define RX_DESC_MEASURMENT 0x0400 +#define RX_DESC_SEQNUM_MASK 0x1800 +#define RX_DESC_MIC_FAIL 0x2000 +#define RX_DESC_DECRYPT_FAIL 0x4000 + +/* + * RX Descriptor flags: + * + * Bits 0-1 - band + * Bit 2 - STBC + * Bit 3 - A-MPDU + * Bit 4 - HT + * Bits 5-7 - encryption + */ +#define WL1271_RX_DESC_BAND_MASK 0x03 +#define WL1271_RX_DESC_ENCRYPT_MASK 0xE0 + +#define WL1271_RX_DESC_BAND_BG 0x00 +#define WL1271_RX_DESC_BAND_J 0x01 +#define WL1271_RX_DESC_BAND_A 0x02 + +#define WL1271_RX_DESC_STBC BIT(2) +#define WL1271_RX_DESC_A_MPDU BIT(3) +#define WL1271_RX_DESC_HT BIT(4) + +#define WL1271_RX_DESC_ENCRYPT_WEP 0x20 +#define WL1271_RX_DESC_ENCRYPT_TKIP 0x40 +#define WL1271_RX_DESC_ENCRYPT_AES 0x60 +#define WL1271_RX_DESC_ENCRYPT_GEM 0x80 + +/* + * RX Descriptor status + * + * Bits 0-2 - status + * Bits 3-7 - reserved + */ +#define WL1271_RX_DESC_STATUS_MASK 0x07 + +#define WL1271_RX_DESC_SUCCESS 0x00 +#define WL1271_RX_DESC_DECRYPT_FAIL 0x01 +#define WL1271_RX_DESC_MIC_FAIL 0x02 +#define WL1271_RX_DESC_DRIVER_RX_Q_FAIL 0x03 + +#define RX_MEM_BLOCK_MASK 0xFF +#define RX_BUF_SIZE_MASK 0xFFF00 +#define RX_BUF_SIZE_SHIFT_DIV 6 + +struct wl1271_rx_descriptor { + __le16 length; + u8 status; + u8 flags; + u8 rate; + u8 channel; + s8 rssi; + u8 snr; + __le32 timestamp; + u8 packet_class; + u8 process_id; + u8 pad_len; + u8 reserved; +} __attribute__ ((packed)); + +void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status); + +#endif diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.c b/drivers/net/wireless/wl12xx/wl1271_spi.c new file mode 100644 index 0000000..67a8293 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1271_spi.c @@ -0,0 +1,257 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2008-2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include +#include +#include +#include + +#include "wl1271.h" +#include "wl12xx_80211.h" +#include "wl1271_spi.h" + + +void wl1271_spi_reset(struct wl1271 *wl) +{ + u8 *cmd; + struct spi_transfer t; + struct spi_message m; + + cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL); + if (!cmd) { + wl1271_error("could not allocate cmd for spi reset"); + return; + } + + memset(&t, 0, sizeof(t)); + spi_message_init(&m); + + memset(cmd, 0xff, WSPI_INIT_CMD_LEN); + + t.tx_buf = cmd; + t.len = WSPI_INIT_CMD_LEN; + spi_message_add_tail(&t, &m); + + spi_sync(wl->spi, &m); + + wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN); +} + +void wl1271_spi_init(struct wl1271 *wl) +{ + u8 crc[WSPI_INIT_CMD_CRC_LEN], *cmd; + struct spi_transfer t; + struct spi_message m; + + cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL); + if (!cmd) { + wl1271_error("could not allocate cmd for spi init"); + return; + } + + memset(crc, 0, sizeof(crc)); + memset(&t, 0, sizeof(t)); + spi_message_init(&m); + + /* + * Set WSPI_INIT_COMMAND + * the data is being send from the MSB to LSB + */ + cmd[2] = 0xff; + cmd[3] = 0xff; + cmd[1] = WSPI_INIT_CMD_START | WSPI_INIT_CMD_TX; + cmd[0] = 0; + cmd[7] = 0; + cmd[6] |= HW_ACCESS_WSPI_INIT_CMD_MASK << 3; + cmd[6] |= HW_ACCESS_WSPI_FIXED_BUSY_LEN & WSPI_INIT_CMD_FIXEDBUSY_LEN; + + if (HW_ACCESS_WSPI_FIXED_BUSY_LEN == 0) + cmd[5] |= WSPI_INIT_CMD_DIS_FIXEDBUSY; + else + cmd[5] |= WSPI_INIT_CMD_EN_FIXEDBUSY; + + cmd[5] |= WSPI_INIT_CMD_IOD | WSPI_INIT_CMD_IP | WSPI_INIT_CMD_CS + | WSPI_INIT_CMD_WSPI | WSPI_INIT_CMD_WS; + + crc[0] = cmd[1]; + crc[1] = cmd[0]; + crc[2] = cmd[7]; + crc[3] = cmd[6]; + crc[4] = cmd[5]; + + cmd[4] |= crc7(0, crc, WSPI_INIT_CMD_CRC_LEN) << 1; + cmd[4] |= WSPI_INIT_CMD_END; + + t.tx_buf = cmd; + t.len = WSPI_INIT_CMD_LEN; + spi_message_add_tail(&t, &m); + + spi_sync(wl->spi, &m); + + wl1271_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN); +} + +#define WL1271_BUSY_WORD_TIMEOUT 1000 + +/* FIXME: Check busy words, removed due to SPI bug */ +#if 0 +static void wl1271_spi_read_busy(struct wl1271 *wl, void *buf, size_t len) +{ + struct spi_transfer t[1]; + struct spi_message m; + u32 *busy_buf; + int num_busy_bytes = 0; + + wl1271_info("spi read BUSY!"); + + /* + * Look for the non-busy word in the read buffer, and if found, + * read in the remaining data into the buffer. + */ + busy_buf = (u32 *)buf; + for (; (u32)busy_buf < (u32)buf + len; busy_buf++) { + num_busy_bytes += sizeof(u32); + if (*busy_buf & 0x1) { + spi_message_init(&m); + memset(t, 0, sizeof(t)); + memmove(buf, busy_buf, len - num_busy_bytes); + t[0].rx_buf = buf + (len - num_busy_bytes); + t[0].len = num_busy_bytes; + spi_message_add_tail(&t[0], &m); + spi_sync(wl->spi, &m); + return; + } + } + + /* + * Read further busy words from SPI until a non-busy word is + * encountered, then read the data itself into the buffer. + */ + wl1271_info("spi read BUSY-polling needed!"); + + num_busy_bytes = WL1271_BUSY_WORD_TIMEOUT; + busy_buf = wl->buffer_busyword; + while (num_busy_bytes) { + num_busy_bytes--; + spi_message_init(&m); + memset(t, 0, sizeof(t)); + t[0].rx_buf = busy_buf; + t[0].len = sizeof(u32); + spi_message_add_tail(&t[0], &m); + spi_sync(wl->spi, &m); + + if (*busy_buf & 0x1) { + spi_message_init(&m); + memset(t, 0, sizeof(t)); + t[0].rx_buf = buf; + t[0].len = len; + spi_message_add_tail(&t[0], &m); + spi_sync(wl->spi, &m); + return; + } + } + + /* The SPI bus is unresponsive, the read failed. */ + memset(buf, 0, len); + wl1271_error("SPI read busy-word timeout!\n"); +} +#endif + +void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf, + size_t len, bool fixed) +{ + struct spi_transfer t[3]; + struct spi_message m; + u32 *busy_buf; + u32 *cmd; + + cmd = &wl->buffer_cmd; + busy_buf = wl->buffer_busyword; + + *cmd = 0; + *cmd |= WSPI_CMD_READ; + *cmd |= (len << WSPI_CMD_BYTE_LENGTH_OFFSET) & WSPI_CMD_BYTE_LENGTH; + *cmd |= addr & WSPI_CMD_BYTE_ADDR; + + if (fixed) + *cmd |= WSPI_CMD_FIXED; + + spi_message_init(&m); + memset(t, 0, sizeof(t)); + + t[0].tx_buf = cmd; + t[0].len = 4; + spi_message_add_tail(&t[0], &m); + + /* Busy and non busy words read */ + t[1].rx_buf = busy_buf; + t[1].len = WL1271_BUSY_WORD_LEN; + spi_message_add_tail(&t[1], &m); + + t[2].rx_buf = buf; + t[2].len = len; + spi_message_add_tail(&t[2], &m); + + spi_sync(wl->spi, &m); + + /* FIXME: Check busy words, removed due to SPI bug */ + /* if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1)) + wl1271_spi_read_busy(wl, buf, len); */ + + wl1271_dump(DEBUG_SPI, "spi_read cmd -> ", cmd, sizeof(*cmd)); + wl1271_dump(DEBUG_SPI, "spi_read buf <- ", buf, len); +} + +void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf, + size_t len, bool fixed) +{ + struct spi_transfer t[2]; + struct spi_message m; + u32 *cmd; + + cmd = &wl->buffer_cmd; + + *cmd = 0; + *cmd |= WSPI_CMD_WRITE; + *cmd |= (len << WSPI_CMD_BYTE_LENGTH_OFFSET) & WSPI_CMD_BYTE_LENGTH; + *cmd |= addr & WSPI_CMD_BYTE_ADDR; + + if (fixed) + *cmd |= WSPI_CMD_FIXED; + + spi_message_init(&m); + memset(t, 0, sizeof(t)); + + t[0].tx_buf = cmd; + t[0].len = sizeof(*cmd); + spi_message_add_tail(&t[0], &m); + + t[1].tx_buf = buf; + t[1].len = len; + spi_message_add_tail(&t[1], &m); + + spi_sync(wl->spi, &m); + + wl1271_dump(DEBUG_SPI, "spi_write cmd -> ", cmd, sizeof(*cmd)); + wl1271_dump(DEBUG_SPI, "spi_write buf -> ", buf, len); +} diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.h b/drivers/net/wireless/wl12xx/wl1271_spi.h new file mode 100644 index 0000000..a803596 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1271_spi.h @@ -0,0 +1,96 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 1998-2009 Texas Instruments. All rights reserved. + * Copyright (C) 2008-2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __WL1271_SPI_H__ +#define __WL1271_SPI_H__ + +#include "wl1271_reg.h" + +#define HW_ACCESS_MEMORY_MAX_RANGE 0x1FFC0 + +#define HW_PARTITION_REGISTERS_ADDR 0x1ffc0 +#define HW_PART0_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR) +#define HW_PART0_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 4) +#define HW_PART1_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 8) +#define HW_PART1_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 12) +#define HW_PART2_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 16) +#define HW_PART2_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 20) +#define HW_PART3_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 24) + +#define HW_ACCESS_REGISTER_SIZE 4 + +#define HW_ACCESS_PRAM_MAX_RANGE 0x3c000 + +#define WSPI_CMD_READ 0x40000000 +#define WSPI_CMD_WRITE 0x00000000 +#define WSPI_CMD_FIXED 0x20000000 +#define WSPI_CMD_BYTE_LENGTH 0x1FFE0000 +#define WSPI_CMD_BYTE_LENGTH_OFFSET 17 +#define WSPI_CMD_BYTE_ADDR 0x0001FFFF + +#define WSPI_INIT_CMD_CRC_LEN 5 + +#define WSPI_INIT_CMD_START 0x00 +#define WSPI_INIT_CMD_TX 0x40 +/* the extra bypass bit is sampled by the TNET as '1' */ +#define WSPI_INIT_CMD_BYPASS_BIT 0x80 +#define WSPI_INIT_CMD_FIXEDBUSY_LEN 0x07 +#define WSPI_INIT_CMD_EN_FIXEDBUSY 0x80 +#define WSPI_INIT_CMD_DIS_FIXEDBUSY 0x00 +#define WSPI_INIT_CMD_IOD 0x40 +#define WSPI_INIT_CMD_IP 0x20 +#define WSPI_INIT_CMD_CS 0x10 +#define WSPI_INIT_CMD_WS 0x08 +#define WSPI_INIT_CMD_WSPI 0x01 +#define WSPI_INIT_CMD_END 0x01 + +#define WSPI_INIT_CMD_LEN 8 + +#define HW_ACCESS_WSPI_FIXED_BUSY_LEN \ + ((WL1271_BUSY_WORD_LEN - 4) / sizeof(u32)) +#define HW_ACCESS_WSPI_INIT_CMD_MASK 0 + +#define OCP_CMD_LOOP 32 + +#define OCP_CMD_WRITE 0x1 +#define OCP_CMD_READ 0x2 + +#define OCP_READY_MASK BIT(18) +#define OCP_STATUS_MASK (BIT(16) | BIT(17)) + +#define OCP_STATUS_NO_RESP 0x00000 +#define OCP_STATUS_OK 0x10000 +#define OCP_STATUS_REQ_FAILED 0x20000 +#define OCP_STATUS_RESP_ERROR 0x30000 + +/* Raw target IO, address is not translated */ +void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf, + size_t len, bool fixed); +void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf, + size_t len, bool fixed); + +/* INIT and RESET words */ +void wl1271_spi_reset(struct wl1271 *wl); +void wl1271_spi_init(struct wl1271 *wl); +#endif /* __WL1271_SPI_H__ */ diff --git a/drivers/net/wireless/wl12xx/wl1271_testmode.c b/drivers/net/wireless/wl12xx/wl1271_testmode.c new file mode 100644 index 0000000..3919102 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1271_testmode.c @@ -0,0 +1,283 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2010 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ +#include "wl1271_testmode.h" + +#include + +#include "wl1271.h" +#include "wl1271_spi.h" +#include "wl1271_acx.h" + +#define WL1271_TM_MAX_DATA_LENGTH 1024 + +enum wl1271_tm_commands { + WL1271_TM_CMD_UNSPEC, + WL1271_TM_CMD_TEST, + WL1271_TM_CMD_INTERROGATE, + WL1271_TM_CMD_CONFIGURE, + WL1271_TM_CMD_NVS_PUSH, + WL1271_TM_CMD_SET_PLT_MODE, + + __WL1271_TM_CMD_AFTER_LAST +}; +#define WL1271_TM_CMD_MAX (__WL1271_TM_CMD_AFTER_LAST - 1) + +enum wl1271_tm_attrs { + WL1271_TM_ATTR_UNSPEC, + WL1271_TM_ATTR_CMD_ID, + WL1271_TM_ATTR_ANSWER, + WL1271_TM_ATTR_DATA, + WL1271_TM_ATTR_IE_ID, + WL1271_TM_ATTR_PLT_MODE, + + __WL1271_TM_ATTR_AFTER_LAST +}; +#define WL1271_TM_ATTR_MAX (__WL1271_TM_ATTR_AFTER_LAST - 1) + +static struct nla_policy wl1271_tm_policy[WL1271_TM_ATTR_MAX + 1] = { + [WL1271_TM_ATTR_CMD_ID] = { .type = NLA_U32 }, + [WL1271_TM_ATTR_ANSWER] = { .type = NLA_U8 }, + [WL1271_TM_ATTR_DATA] = { .type = NLA_BINARY, + .len = WL1271_TM_MAX_DATA_LENGTH }, + [WL1271_TM_ATTR_IE_ID] = { .type = NLA_U32 }, + [WL1271_TM_ATTR_PLT_MODE] = { .type = NLA_U32 }, +}; + + +static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[]) +{ + int buf_len, ret, len; + struct sk_buff *skb; + void *buf; + u8 answer = 0; + + wl1271_debug(DEBUG_TESTMODE, "testmode cmd test"); + + if (!tb[WL1271_TM_ATTR_DATA]) + return -EINVAL; + + buf = nla_data(tb[WL1271_TM_ATTR_DATA]); + buf_len = nla_len(tb[WL1271_TM_ATTR_DATA]); + + if (tb[WL1271_TM_ATTR_ANSWER]) + answer = nla_get_u8(tb[WL1271_TM_ATTR_ANSWER]); + + if (buf_len > sizeof(struct wl1271_command)) + return -EMSGSIZE; + + mutex_lock(&wl->mutex); + ret = wl1271_cmd_test(wl, buf, buf_len, answer); + mutex_unlock(&wl->mutex); + + if (ret < 0) { + wl1271_warning("testmode cmd test failed: %d", ret); + return ret; + } + + if (answer) { + len = nla_total_size(buf_len); + skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, len); + if (!skb) + return -ENOMEM; + + NLA_PUT(skb, WL1271_TM_ATTR_DATA, buf_len, buf); + ret = cfg80211_testmode_reply(skb); + if (ret < 0) + return ret; + } + + return 0; + +nla_put_failure: + kfree_skb(skb); + return -EMSGSIZE; +} + +static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[]) +{ + int ret; + struct wl1271_command *cmd; + struct sk_buff *skb; + u8 ie_id; + + wl1271_debug(DEBUG_TESTMODE, "testmode cmd interrogate"); + + if (!tb[WL1271_TM_ATTR_IE_ID]) + return -EINVAL; + + ie_id = nla_get_u8(tb[WL1271_TM_ATTR_IE_ID]); + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + mutex_lock(&wl->mutex); + ret = wl1271_cmd_interrogate(wl, ie_id, cmd, sizeof(*cmd)); + mutex_unlock(&wl->mutex); + + if (ret < 0) { + wl1271_warning("testmode cmd interrogate failed: %d", ret); + return ret; + } + + skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd); + + return 0; + +nla_put_failure: + kfree_skb(skb); + return -EMSGSIZE; +} + +static int wl1271_tm_cmd_configure(struct wl1271 *wl, struct nlattr *tb[]) +{ + int buf_len, ret; + void *buf; + u8 ie_id; + + wl1271_debug(DEBUG_TESTMODE, "testmode cmd configure"); + + if (!tb[WL1271_TM_ATTR_DATA]) + return -EINVAL; + if (!tb[WL1271_TM_ATTR_IE_ID]) + return -EINVAL; + + ie_id = nla_get_u8(tb[WL1271_TM_ATTR_IE_ID]); + buf = nla_data(tb[WL1271_TM_ATTR_DATA]); + buf_len = nla_len(tb[WL1271_TM_ATTR_DATA]); + + if (buf_len > sizeof(struct wl1271_command)) + return -EMSGSIZE; + + mutex_lock(&wl->mutex); + ret = wl1271_cmd_configure(wl, ie_id, buf, buf_len); + mutex_unlock(&wl->mutex); + + if (ret < 0) { + wl1271_warning("testmode cmd configure failed: %d", ret); + return ret; + } + + return 0; +} + +static int wl1271_tm_cmd_nvs_push(struct wl1271 *wl, struct nlattr *tb[]) +{ + int ret = 0; + size_t len; + void *buf; + + wl1271_debug(DEBUG_TESTMODE, "testmode cmd nvs push"); + + if (!tb[WL1271_TM_ATTR_DATA]) + return -EINVAL; + + buf = nla_data(tb[WL1271_TM_ATTR_DATA]); + len = nla_len(tb[WL1271_TM_ATTR_DATA]); + + if (len != sizeof(struct wl1271_nvs_file)) { + wl1271_error("nvs size is not as expected: %zu != %zu", + len, sizeof(struct wl1271_nvs_file)); + return -EMSGSIZE; + } + + mutex_lock(&wl->mutex); + + kfree(wl->nvs); + + wl->nvs = kmalloc(sizeof(struct wl1271_nvs_file), GFP_KERNEL); + if (!wl->nvs) { + wl1271_error("could not allocate memory for the nvs file"); + ret = -ENOMEM; + goto out; + } + + memcpy(wl->nvs, buf, len); + + wl1271_debug(DEBUG_TESTMODE, "testmode pushed nvs"); + +out: + mutex_unlock(&wl->mutex); + + return ret; +} + +static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[]) +{ + u32 val; + int ret; + + wl1271_debug(DEBUG_TESTMODE, "testmode cmd set plt mode"); + + if (!tb[WL1271_TM_ATTR_PLT_MODE]) + return -EINVAL; + + val = nla_get_u32(tb[WL1271_TM_ATTR_PLT_MODE]); + + switch (val) { + case 0: + ret = wl1271_plt_stop(wl); + break; + case 1: + ret = wl1271_plt_start(wl); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len) +{ + struct wl1271 *wl = hw->priv; + struct nlattr *tb[WL1271_TM_ATTR_MAX + 1]; + int err; + + err = nla_parse(tb, WL1271_TM_ATTR_MAX, data, len, wl1271_tm_policy); + if (err) + return err; + + if (!tb[WL1271_TM_ATTR_CMD_ID]) + return -EINVAL; + + switch (nla_get_u32(tb[WL1271_TM_ATTR_CMD_ID])) { + case WL1271_TM_CMD_TEST: + return wl1271_tm_cmd_test(wl, tb); + case WL1271_TM_CMD_INTERROGATE: + return wl1271_tm_cmd_interrogate(wl, tb); + case WL1271_TM_CMD_CONFIGURE: + return wl1271_tm_cmd_configure(wl, tb); + case WL1271_TM_CMD_NVS_PUSH: + return wl1271_tm_cmd_nvs_push(wl, tb); + case WL1271_TM_CMD_SET_PLT_MODE: + return wl1271_tm_cmd_set_plt_mode(wl, tb); + default: + return -EOPNOTSUPP; + } +} diff --git a/drivers/net/wireless/wl12xx/wl1271_testmode.h b/drivers/net/wireless/wl12xx/wl1271_testmode.h new file mode 100644 index 0000000..c196d28 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1271_testmode.h @@ -0,0 +1,31 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2010 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __WL1271_TESTMODE_H__ +#define __WL1271_TESTMODE_H__ + +#include + +int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len); + +#endif /* __WL1271_TESTMODE_H__ */ diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.c b/drivers/net/wireless/wl12xx/wl1271_tx.c new file mode 100644 index 0000000..811e739 --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1271_tx.c @@ -0,0 +1,439 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include +#include + +#include "wl1271.h" +#include "wl1271_spi.h" +#include "wl1271_io.h" +#include "wl1271_reg.h" +#include "wl1271_ps.h" +#include "wl1271_tx.h" + +static int wl1271_tx_id(struct wl1271 *wl, struct sk_buff *skb) +{ + int i; + for (i = 0; i < ACX_TX_DESCRIPTORS; i++) + if (wl->tx_frames[i] == NULL) { + wl->tx_frames[i] = skb; + return i; + } + + return -EBUSY; +} + +static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra) +{ + struct wl1271_tx_hw_descr *desc; + u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra; + u32 total_blocks, excluded; + int id, ret = -EBUSY; + + /* allocate free identifier for the packet */ + id = wl1271_tx_id(wl, skb); + if (id < 0) + return id; + + /* approximate the number of blocks required for this packet + in the firmware */ + /* FIXME: try to figure out what is done here and make it cleaner */ + total_blocks = (total_len + 20) >> TX_HW_BLOCK_SHIFT_DIV; + excluded = (total_blocks << 2) + ((total_len + 20) & 0xff) + 34; + total_blocks += (excluded > 252) ? 2 : 1; + total_blocks += TX_HW_BLOCK_SPARE; + + if (total_blocks <= wl->tx_blocks_available) { + desc = (struct wl1271_tx_hw_descr *)skb_push( + skb, total_len - skb->len); + + desc->extra_mem_blocks = TX_HW_BLOCK_SPARE; + desc->total_mem_blocks = total_blocks; + desc->id = id; + + wl->tx_blocks_available -= total_blocks; + + ret = 0; + + wl1271_debug(DEBUG_TX, + "tx_allocate: size: %d, blocks: %d, id: %d", + total_len, total_blocks, id); + } else + wl->tx_frames[id] = NULL; + + return ret; +} + +static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb, + u32 extra, struct ieee80211_tx_info *control) +{ + struct wl1271_tx_hw_descr *desc; + int pad, ac; + u16 tx_attr; + + desc = (struct wl1271_tx_hw_descr *) skb->data; + + /* relocate space for security header */ + if (extra) { + void *framestart = skb->data + sizeof(*desc); + u16 fc = *(u16 *)(framestart + extra); + int hdrlen = ieee80211_hdrlen(cpu_to_le16(fc)); + memmove(framestart, framestart + extra, hdrlen); + } + + /* configure packet life time */ + desc->start_time = cpu_to_le32(jiffies_to_usecs(jiffies) - + wl->time_offset); + desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU); + + /* configure the tx attributes */ + tx_attr = wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER; + + /* queue */ + ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); + desc->tid = wl1271_tx_ac_to_tid(ac); + + desc->aid = TX_HW_DEFAULT_AID; + desc->reserved = 0; + + /* align the length (and store in terms of words) */ + pad = WL1271_TX_ALIGN(skb->len); + desc->length = cpu_to_le16(pad >> 2); + + /* calculate number of padding bytes */ + pad = pad - skb->len; + tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD; + + /* if the packets are destined for AP (have a STA entry) send them + with AP rate policies, otherwise use default basic rates */ + if (control->control.sta) + tx_attr |= ACX_TX_AP_FULL_RATE << TX_HW_ATTR_OFST_RATE_POLICY; + + desc->tx_attr = cpu_to_le16(tx_attr); + + wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d", pad); + return 0; +} + +static int wl1271_tx_send_packet(struct wl1271 *wl, struct sk_buff *skb, + struct ieee80211_tx_info *control) +{ + + struct wl1271_tx_hw_descr *desc; + int len; + + /* FIXME: This is a workaround for getting non-aligned packets. + This happens at least with EAPOL packets from the user space. + Our DMA requires packets to be aligned on a 4-byte boundary. + */ + if (unlikely((long)skb->data & 0x03)) { + int offset = (4 - (long)skb->data) & 0x03; + wl1271_debug(DEBUG_TX, "skb offset %d", offset); + + /* check whether the current skb can be used */ + if (!skb_cloned(skb) && (skb_tailroom(skb) >= offset)) { + unsigned char *src = skb->data; + + /* align the buffer on a 4-byte boundary */ + skb_reserve(skb, offset); + memmove(skb->data, src, skb->len); + } else { + wl1271_info("No handler, fixme!"); + return -EINVAL; + } + } + + len = WL1271_TX_ALIGN(skb->len); + + /* perform a fixed address block write with the packet */ + wl1271_write(wl, WL1271_SLV_MEM_DATA, skb->data, len, true); + + /* write packet new counter into the write access register */ + wl->tx_packets_count++; + wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count); + + desc = (struct wl1271_tx_hw_descr *) skb->data; + wl1271_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u (%u words)", + desc->id, skb, len, desc->length); + + return 0; +} + +/* caller must hold wl->mutex */ +static int wl1271_tx_frame(struct wl1271 *wl, struct sk_buff *skb) +{ + struct ieee80211_tx_info *info; + u32 extra = 0; + int ret = 0; + u8 idx; + + if (!skb) + return -EINVAL; + + info = IEEE80211_SKB_CB(skb); + + if (info->control.hw_key && + info->control.hw_key->alg == ALG_TKIP) + extra = WL1271_TKIP_IV_SPACE; + + if (info->control.hw_key) { + idx = info->control.hw_key->hw_key_idx; + + /* FIXME: do we have to do this if we're not using WEP? */ + if (unlikely(wl->default_key != idx)) { + ret = wl1271_cmd_set_default_wep_key(wl, idx); + if (ret < 0) + return ret; + wl->default_key = idx; + } + } + + ret = wl1271_tx_allocate(wl, skb, extra); + if (ret < 0) + return ret; + + ret = wl1271_tx_fill_hdr(wl, skb, extra, info); + if (ret < 0) + return ret; + + ret = wl1271_tx_send_packet(wl, skb, info); + if (ret < 0) + return ret; + + return ret; +} + +static u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set) +{ + struct ieee80211_supported_band *band; + u32 enabled_rates = 0; + int bit; + + band = wl->hw->wiphy->bands[wl->band]; + for (bit = 0; bit < band->n_bitrates; bit++) { + if (rate_set & 0x1) + enabled_rates |= band->bitrates[bit].hw_value; + rate_set >>= 1; + } + + return enabled_rates; +} + +void wl1271_tx_work(struct work_struct *work) +{ + struct wl1271 *wl = container_of(work, struct wl1271, tx_work); + struct sk_buff *skb; + bool woken_up = false; + u32 sta_rates = 0; + int ret; + + /* check if the rates supported by the AP have changed */ + if (unlikely(test_and_clear_bit(WL1271_FLAG_STA_RATES_CHANGED, + &wl->flags))) { + unsigned long flags; + spin_lock_irqsave(&wl->wl_lock, flags); + sta_rates = wl->sta_rate_set; + spin_unlock_irqrestore(&wl->wl_lock, flags); + } + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state == WL1271_STATE_OFF)) + goto out; + + /* if rates have changed, re-configure the rate policy */ + if (unlikely(sta_rates)) { + wl->rate_set = wl1271_tx_enabled_rates_get(wl, sta_rates); + wl1271_acx_rate_policies(wl); + } + + while ((skb = skb_dequeue(&wl->tx_queue))) { + if (!woken_up) { + ret = wl1271_ps_elp_wakeup(wl, false); + if (ret < 0) + goto out; + woken_up = true; + } + + ret = wl1271_tx_frame(wl, skb); + if (ret == -EBUSY) { + /* firmware buffer is full, stop queues */ + wl1271_debug(DEBUG_TX, "tx_work: fw buffer full, " + "stop queues"); + ieee80211_stop_queues(wl->hw); + set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags); + skb_queue_head(&wl->tx_queue, skb); + goto out; + } else if (ret < 0) { + dev_kfree_skb(skb); + goto out; + } else if (test_and_clear_bit(WL1271_FLAG_TX_QUEUE_STOPPED, + &wl->flags)) { + /* firmware buffer has space, restart queues */ + wl1271_debug(DEBUG_TX, + "complete_packet: waking queues"); + ieee80211_wake_queues(wl->hw); + } + } + +out: + if (woken_up) + wl1271_ps_elp_sleep(wl); + + mutex_unlock(&wl->mutex); +} + +static void wl1271_tx_complete_packet(struct wl1271 *wl, + struct wl1271_tx_hw_res_descr *result) +{ + struct ieee80211_tx_info *info; + struct sk_buff *skb; + u16 seq; + int id = result->id; + + /* check for id legality */ + if (id >= ACX_TX_DESCRIPTORS || wl->tx_frames[id] == NULL) { + wl1271_warning("TX result illegal id: %d", id); + return; + } + + skb = wl->tx_frames[id]; + info = IEEE80211_SKB_CB(skb); + + /* update packet status */ + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { + if (result->status == TX_SUCCESS) + info->flags |= IEEE80211_TX_STAT_ACK; + if (result->status & TX_RETRY_EXCEEDED) { + /* FIXME */ + /* info->status.excessive_retries = 1; */ + wl->stats.excessive_retries++; + } + } + + /* FIXME */ + /* info->status.retry_count = result->ack_failures; */ + wl->stats.retry_count += result->ack_failures; + + /* update security sequence number */ + seq = wl->tx_security_seq_16 + + (result->lsb_security_sequence_number - + wl->tx_security_last_seq); + wl->tx_security_last_seq = result->lsb_security_sequence_number; + + if (seq < wl->tx_security_seq_16) + wl->tx_security_seq_32++; + wl->tx_security_seq_16 = seq; + + /* remove private header from packet */ + skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); + + /* remove TKIP header space if present */ + if (info->control.hw_key && + info->control.hw_key->alg == ALG_TKIP) { + int hdrlen = ieee80211_get_hdrlen_from_skb(skb); + memmove(skb->data + WL1271_TKIP_IV_SPACE, skb->data, hdrlen); + skb_pull(skb, WL1271_TKIP_IV_SPACE); + } + + wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x" + " status 0x%x", + result->id, skb, result->ack_failures, + result->rate_class_index, result->status); + + /* return the packet to the stack */ + ieee80211_tx_status(wl->hw, skb); + wl->tx_frames[result->id] = NULL; +} + +/* Called upon reception of a TX complete interrupt */ +void wl1271_tx_complete(struct wl1271 *wl, u32 count) +{ + struct wl1271_acx_mem_map *memmap = + (struct wl1271_acx_mem_map *)wl->target_mem_map; + u32 i; + + wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count); + + /* read the tx results from the chipset */ + wl1271_read(wl, le32_to_cpu(memmap->tx_result), + wl->tx_res_if, sizeof(*wl->tx_res_if), false); + + /* verify that the result buffer is not getting overrun */ + if (count > TX_HW_RESULT_QUEUE_LEN) { + wl1271_warning("TX result overflow from chipset: %d", count); + count = TX_HW_RESULT_QUEUE_LEN; + } + + /* process the results */ + for (i = 0; i < count; i++) { + struct wl1271_tx_hw_res_descr *result; + u8 offset = wl->tx_results_count & TX_HW_RESULT_QUEUE_LEN_MASK; + + /* process the packet */ + result = &(wl->tx_res_if->tx_results_queue[offset]); + wl1271_tx_complete_packet(wl, result); + + wl->tx_results_count++; + } + + /* write host counter to chipset (to ack) */ + wl1271_write32(wl, le32_to_cpu(memmap->tx_result) + + offsetof(struct wl1271_tx_hw_res_if, + tx_result_host_counter), + le32_to_cpu(wl->tx_res_if->tx_result_fw_counter)); +} + +/* caller must hold wl->mutex */ +void wl1271_tx_flush(struct wl1271 *wl) +{ + int i; + struct sk_buff *skb; + struct ieee80211_tx_info *info; + + /* TX failure */ +/* control->flags = 0; FIXME */ + + while ((skb = skb_dequeue(&wl->tx_queue))) { + info = IEEE80211_SKB_CB(skb); + + wl1271_debug(DEBUG_TX, "flushing skb 0x%p", skb); + + if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) + continue; + + ieee80211_tx_status(wl->hw, skb); + } + + for (i = 0; i < ACX_TX_DESCRIPTORS; i++) + if (wl->tx_frames[i] != NULL) { + skb = wl->tx_frames[i]; + info = IEEE80211_SKB_CB(skb); + + if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) + continue; + + ieee80211_tx_status(wl->hw, skb); + wl->tx_frames[i] = NULL; + } +} diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.h b/drivers/net/wireless/wl12xx/wl1271_tx.h new file mode 100644 index 0000000..17e405a --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl1271_tx.h @@ -0,0 +1,166 @@ +/* + * This file is part of wl1271 + * + * Copyright (C) 1998-2009 Texas Instruments. All rights reserved. + * Copyright (C) 2009 Nokia Corporation + * + * Contact: Luciano Coelho + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __WL1271_TX_H__ +#define __WL1271_TX_H__ + +#define TX_HW_BLOCK_SPARE 2 +#define TX_HW_BLOCK_SHIFT_DIV 8 + +#define TX_HW_MGMT_PKT_LIFETIME_TU 2000 +/* The chipset reference driver states, that the "aid" value 1 + * is for infra-BSS, but is still always used */ +#define TX_HW_DEFAULT_AID 1 + +#define TX_HW_ATTR_SAVE_RETRIES BIT(0) +#define TX_HW_ATTR_HEADER_PAD BIT(1) +#define TX_HW_ATTR_SESSION_COUNTER (BIT(2) | BIT(3) | BIT(4)) +#define TX_HW_ATTR_RATE_POLICY (BIT(5) | BIT(6) | BIT(7) | \ + BIT(8) | BIT(9)) +#define TX_HW_ATTR_LAST_WORD_PAD (BIT(10) | BIT(11)) +#define TX_HW_ATTR_TX_CMPLT_REQ BIT(12) + +#define TX_HW_ATTR_OFST_SAVE_RETRIES 0 +#define TX_HW_ATTR_OFST_HEADER_PAD 1 +#define TX_HW_ATTR_OFST_SESSION_COUNTER 2 +#define TX_HW_ATTR_OFST_RATE_POLICY 5 +#define TX_HW_ATTR_OFST_LAST_WORD_PAD 10 +#define TX_HW_ATTR_OFST_TX_CMPLT_REQ 12 + +#define TX_HW_RESULT_QUEUE_LEN 16 +#define TX_HW_RESULT_QUEUE_LEN_MASK 0xf + +#define WL1271_TX_ALIGN_TO 4 +#define WL1271_TX_ALIGN(len) (((len) + WL1271_TX_ALIGN_TO - 1) & \ + ~(WL1271_TX_ALIGN_TO - 1)) +#define WL1271_TKIP_IV_SPACE 4 + +struct wl1271_tx_hw_descr { + /* Length of packet in words, including descriptor+header+data */ + __le16 length; + /* Number of extra memory blocks to allocate for this packet in + addition to the number of blocks derived from the packet length */ + u8 extra_mem_blocks; + /* Total number of memory blocks allocated by the host for this packet. + Must be equal or greater than the actual blocks number allocated by + HW!! */ + u8 total_mem_blocks; + /* Device time (in us) when the packet arrived to the driver */ + __le32 start_time; + /* Max delay in TUs until transmission. The last device time the + packet can be transmitted is: startTime+(1024*LifeTime) */ + __le16 life_time; + /* Bitwise fields - see TX_ATTR... definitions above. */ + __le16 tx_attr; + /* Packet identifier used also in the Tx-Result. */ + u8 id; + /* The packet TID value (as User-Priority) */ + u8 tid; + /* Identifier of the remote STA in IBSS, 1 in infra-BSS */ + u8 aid; + u8 reserved; +} __attribute__ ((packed)); + +enum wl1271_tx_hw_res_status { + TX_SUCCESS = 0, + TX_HW_ERROR = 1, + TX_DISABLED = 2, + TX_RETRY_EXCEEDED = 3, + TX_TIMEOUT = 4, + TX_KEY_NOT_FOUND = 5, + TX_PEER_NOT_FOUND = 6, + TX_SESSION_MISMATCH = 7 +}; + +struct wl1271_tx_hw_res_descr { + /* Packet Identifier - same value used in the Tx descriptor.*/ + u8 id; + /* The status of the transmission, indicating success or one of + several possible reasons for failure. */ + u8 status; + /* Total air access duration including all retrys and overheads.*/ + __le16 medium_usage; + /* The time passed from host xfer to Tx-complete.*/ + __le32 fw_handling_time; + /* Total media delay + (from 1st EDCA AIFS counter until TX Complete). */ + __le32 medium_delay; + /* LS-byte of last TKIP seq-num (saved per AC for recovery). */ + u8 lsb_security_sequence_number; + /* Retry count - number of transmissions without successful ACK.*/ + u8 ack_failures; + /* The rate that succeeded getting ACK + (Valid only if status=SUCCESS). */ + u8 rate_class_index; + /* for 4-byte alignment. */ + u8 spare; +} __attribute__ ((packed)); + +struct wl1271_tx_hw_res_if { + __le32 tx_result_fw_counter; + __le32 tx_result_host_counter; + struct wl1271_tx_hw_res_descr tx_results_queue[TX_HW_RESULT_QUEUE_LEN]; +} __attribute__ ((packed)); + +static inline int wl1271_tx_get_queue(int queue) +{ + /* FIXME: use best effort until WMM is enabled */ + return CONF_TX_AC_BE; + + switch (queue) { + case 0: + return CONF_TX_AC_VO; + case 1: + return CONF_TX_AC_VI; + case 2: + return CONF_TX_AC_BE; + case 3: + return CONF_TX_AC_BK; + default: + return CONF_TX_AC_BE; + } +} + +/* wl1271 tx descriptor needs the tid and we need to convert it from ac */ +static inline int wl1271_tx_ac_to_tid(int ac) +{ + switch (ac) { + case 0: + return 0; + case 1: + return 2; + case 2: + return 4; + case 3: + return 6; + default: + return 0; + } +} + +void wl1271_tx_work(struct work_struct *work); +void wl1271_tx_complete(struct wl1271 *wl, u32 count); +void wl1271_tx_flush(struct wl1271 *wl); + +#endif diff --git a/drivers/net/wireless/wl12xx/wl12xx_80211.h b/drivers/net/wireless/wl12xx/wl12xx_80211.h new file mode 100644 index 0000000..055d7bc --- /dev/null +++ b/drivers/net/wireless/wl12xx/wl12xx_80211.h @@ -0,0 +1,156 @@ +#ifndef __WL12XX_80211_H__ +#define __WL12XX_80211_H__ + +#include /* ETH_ALEN */ + +/* RATES */ +#define IEEE80211_CCK_RATE_1MB 0x02 +#define IEEE80211_CCK_RATE_2MB 0x04 +#define IEEE80211_CCK_RATE_5MB 0x0B +#define IEEE80211_CCK_RATE_11MB 0x16 +#define IEEE80211_OFDM_RATE_6MB 0x0C +#define IEEE80211_OFDM_RATE_9MB 0x12 +#define IEEE80211_OFDM_RATE_12MB 0x18 +#define IEEE80211_OFDM_RATE_18MB 0x24 +#define IEEE80211_OFDM_RATE_24MB 0x30 +#define IEEE80211_OFDM_RATE_36MB 0x48 +#define IEEE80211_OFDM_RATE_48MB 0x60 +#define IEEE80211_OFDM_RATE_54MB 0x6C +#define IEEE80211_BASIC_RATE_MASK 0x80 + +#define IEEE80211_CCK_RATE_1MB_MASK (1<<0) +#define IEEE80211_CCK_RATE_2MB_MASK (1<<1) +#define IEEE80211_CCK_RATE_5MB_MASK (1<<2) +#define IEEE80211_CCK_RATE_11MB_MASK (1<<3) +#define IEEE80211_OFDM_RATE_6MB_MASK (1<<4) +#define IEEE80211_OFDM_RATE_9MB_MASK (1<<5) +#define IEEE80211_OFDM_RATE_12MB_MASK (1<<6) +#define IEEE80211_OFDM_RATE_18MB_MASK (1<<7) +#define IEEE80211_OFDM_RATE_24MB_MASK (1<<8) +#define IEEE80211_OFDM_RATE_36MB_MASK (1<<9) +#define IEEE80211_OFDM_RATE_48MB_MASK (1<<10) +#define IEEE80211_OFDM_RATE_54MB_MASK (1<<11) + +#define IEEE80211_CCK_RATES_MASK 0x0000000F +#define IEEE80211_CCK_BASIC_RATES_MASK (IEEE80211_CCK_RATE_1MB_MASK | \ + IEEE80211_CCK_RATE_2MB_MASK) +#define IEEE80211_CCK_DEFAULT_RATES_MASK (IEEE80211_CCK_BASIC_RATES_MASK | \ + IEEE80211_CCK_RATE_5MB_MASK | \ + IEEE80211_CCK_RATE_11MB_MASK) + +#define IEEE80211_OFDM_RATES_MASK 0x00000FF0 +#define IEEE80211_OFDM_BASIC_RATES_MASK (IEEE80211_OFDM_RATE_6MB_MASK | \ + IEEE80211_OFDM_RATE_12MB_MASK | \ + IEEE80211_OFDM_RATE_24MB_MASK) +#define IEEE80211_OFDM_DEFAULT_RATES_MASK (IEEE80211_OFDM_BASIC_RATES_MASK | \ + IEEE80211_OFDM_RATE_9MB_MASK | \ + IEEE80211_OFDM_RATE_18MB_MASK | \ + IEEE80211_OFDM_RATE_36MB_MASK | \ + IEEE80211_OFDM_RATE_48MB_MASK | \ + IEEE80211_OFDM_RATE_54MB_MASK) +#define IEEE80211_DEFAULT_RATES_MASK (IEEE80211_OFDM_DEFAULT_RATES_MASK | \ + IEEE80211_CCK_DEFAULT_RATES_MASK) + + +/* This really should be 8, but not for our firmware */ +#define MAX_SUPPORTED_RATES 32 +#define COUNTRY_STRING_LEN 3 +#define MAX_COUNTRY_TRIPLETS 32 + +/* Headers */ +struct ieee80211_header { + __le16 frame_ctl; + __le16 duration_id; + u8 da[ETH_ALEN]; + u8 sa[ETH_ALEN]; + u8 bssid[ETH_ALEN]; + __le16 seq_ctl; + u8 payload[0]; +} __attribute__ ((packed)); + +struct wl12xx_ie_header { + u8 id; + u8 len; +} __attribute__ ((packed)); + +/* IEs */ + +struct wl12xx_ie_ssid { + struct wl12xx_ie_header header; + char ssid[IW_ESSID_MAX_SIZE]; +} __attribute__ ((packed)); + +struct wl12xx_ie_rates { + struct wl12xx_ie_header header; + u8 rates[MAX_SUPPORTED_RATES]; +} __attribute__ ((packed)); + +struct wl12xx_ie_ds_params { + struct wl12xx_ie_header header; + u8 channel; +} __attribute__ ((packed)); + +struct country_triplet { + u8 channel; + u8 num_channels; + u8 max_tx_power; +} __attribute__ ((packed)); + +struct wl12xx_ie_country { + struct wl12xx_ie_header header; + u8 country_string[COUNTRY_STRING_LEN]; + struct country_triplet triplets[MAX_COUNTRY_TRIPLETS]; +} __attribute__ ((packed)); + + +/* Templates */ + +struct wl12xx_beacon_template { + struct ieee80211_header header; + __le32 time_stamp[2]; + __le16 beacon_interval; + __le16 capability; + struct wl12xx_ie_ssid ssid; + struct wl12xx_ie_rates rates; + struct wl12xx_ie_rates ext_rates; + struct wl12xx_ie_ds_params ds_params; + struct wl12xx_ie_country country; +} __attribute__ ((packed)); + +struct wl12xx_null_data_template { + struct ieee80211_header header; +} __attribute__ ((packed)); + +struct wl12xx_ps_poll_template { + __le16 fc; + __le16 aid; + u8 bssid[ETH_ALEN]; + u8 ta[ETH_ALEN]; +} __attribute__ ((packed)); + +struct wl12xx_qos_null_data_template { + struct ieee80211_header header; + __le16 qos_ctl; +} __attribute__ ((packed)); + +struct wl12xx_probe_req_template { + struct ieee80211_header header; + struct wl12xx_ie_ssid ssid; + struct wl12xx_ie_rates rates; + struct wl12xx_ie_rates ext_rates; +} __attribute__ ((packed)); + + +struct wl12xx_probe_resp_template { + struct ieee80211_header header; + __le32 time_stamp[2]; + __le16 beacon_interval; + __le16 capability; + struct wl12xx_ie_ssid ssid; + struct wl12xx_ie_rates rates; + struct wl12xx_ie_rates ext_rates; + struct wl12xx_ie_ds_params ds_params; + struct wl12xx_ie_country country; +} __attribute__ ((packed)); + +#endif diff --git a/drivers/net/wireless/zd1211rw/Makefile b/drivers/net/wireless/zd1211rw/Makefile new file mode 100644 index 0000000..1907eaf --- /dev/null +++ b/drivers/net/wireless/zd1211rw/Makefile @@ -0,0 +1,11 @@ +obj-$(CONFIG_ZD1211RW) += zd1211rw.o + +zd1211rw-objs := zd_chip.o zd_mac.o \ + zd_rf_al2230.o zd_rf_rf2959.o \ + zd_rf_al7230b.o zd_rf_uw2453.o \ + zd_rf.o zd_usb.o + +ifeq ($(CONFIG_ZD1211RW_DEBUG),y) +EXTRA_CFLAGS += -DDEBUG +endif + diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c new file mode 100644 index 0000000..7ca95c4 --- /dev/null +++ b/drivers/net/wireless/zd1211rw/zd_chip.c @@ -0,0 +1,1499 @@ +/* ZD1211 USB-WLAN driver for Linux + * + * Copyright (C) 2005-2007 Ulrich Kunitz + * Copyright (C) 2006-2007 Daniel Drake + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/* This file implements all the hardware specific functions for the ZD1211 + * and ZD1211B chips. Support for the ZD1211B was possible after Timothy + * Legge sent me a ZD1211B device. Thank you Tim. -- Uli + */ + +#include +#include + +#include "zd_def.h" +#include "zd_chip.h" +#include "zd_mac.h" +#include "zd_rf.h" + +void zd_chip_init(struct zd_chip *chip, + struct ieee80211_hw *hw, + struct usb_interface *intf) +{ + memset(chip, 0, sizeof(*chip)); + mutex_init(&chip->mutex); + zd_usb_init(&chip->usb, hw, intf); + zd_rf_init(&chip->rf); +} + +void zd_chip_clear(struct zd_chip *chip) +{ + ZD_ASSERT(!mutex_is_locked(&chip->mutex)); + zd_usb_clear(&chip->usb); + zd_rf_clear(&chip->rf); + mutex_destroy(&chip->mutex); + ZD_MEMCLEAR(chip, sizeof(*chip)); +} + +static int scnprint_mac_oui(struct zd_chip *chip, char *buffer, size_t size) +{ + u8 *addr = zd_mac_get_perm_addr(zd_chip_to_mac(chip)); + return scnprintf(buffer, size, "%02x-%02x-%02x", + addr[0], addr[1], addr[2]); +} + +/* Prints an identifier line, which will support debugging. */ +static int scnprint_id(struct zd_chip *chip, char *buffer, size_t size) +{ + int i = 0; + + i = scnprintf(buffer, size, "zd1211%s chip ", + zd_chip_is_zd1211b(chip) ? "b" : ""); + i += zd_usb_scnprint_id(&chip->usb, buffer+i, size-i); + i += scnprintf(buffer+i, size-i, " "); + i += scnprint_mac_oui(chip, buffer+i, size-i); + i += scnprintf(buffer+i, size-i, " "); + i += zd_rf_scnprint_id(&chip->rf, buffer+i, size-i); + i += scnprintf(buffer+i, size-i, " pa%1x %c%c%c%c%c", chip->pa_type, + chip->patch_cck_gain ? 'g' : '-', + chip->patch_cr157 ? '7' : '-', + chip->patch_6m_band_edge ? '6' : '-', + chip->new_phy_layout ? 'N' : '-', + chip->al2230s_bit ? 'S' : '-'); + return i; +} + +static void print_id(struct zd_chip *chip) +{ + char buffer[80]; + + scnprint_id(chip, buffer, sizeof(buffer)); + buffer[sizeof(buffer)-1] = 0; + dev_info(zd_chip_dev(chip), "%s\n", buffer); +} + +static zd_addr_t inc_addr(zd_addr_t addr) +{ + u16 a = (u16)addr; + /* Control registers use byte addressing, but everything else uses word + * addressing. */ + if ((a & 0xf000) == CR_START) + a += 2; + else + a += 1; + return (zd_addr_t)a; +} + +/* Read a variable number of 32-bit values. Parameter count is not allowed to + * exceed USB_MAX_IOREAD32_COUNT. + */ +int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr, + unsigned int count) +{ + int r; + int i; + zd_addr_t *a16; + u16 *v16; + unsigned int count16; + + if (count > USB_MAX_IOREAD32_COUNT) + return -EINVAL; + + /* Allocate a single memory block for values and addresses. */ + count16 = 2*count; + a16 = (zd_addr_t *) kmalloc(count16 * (sizeof(zd_addr_t) + sizeof(u16)), + GFP_KERNEL); + if (!a16) { + dev_dbg_f(zd_chip_dev(chip), + "error ENOMEM in allocation of a16\n"); + r = -ENOMEM; + goto out; + } + v16 = (u16 *)(a16 + count16); + + for (i = 0; i < count; i++) { + int j = 2*i; + /* We read the high word always first. */ + a16[j] = inc_addr(addr[i]); + a16[j+1] = addr[i]; + } + + r = zd_ioread16v_locked(chip, v16, a16, count16); + if (r) { + dev_dbg_f(zd_chip_dev(chip), + "error: zd_ioread16v_locked. Error number %d\n", r); + goto out; + } + + for (i = 0; i < count; i++) { + int j = 2*i; + values[i] = (v16[j] << 16) | v16[j+1]; + } + +out: + kfree((void *)a16); + return r; +} + +int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs, + unsigned int count) +{ + int i, j, r; + struct zd_ioreq16 *ioreqs16; + unsigned int count16; + + ZD_ASSERT(mutex_is_locked(&chip->mutex)); + + if (count == 0) + return 0; + if (count > USB_MAX_IOWRITE32_COUNT) + return -EINVAL; + + /* Allocate a single memory block for values and addresses. */ + count16 = 2*count; + ioreqs16 = kmalloc(count16 * sizeof(struct zd_ioreq16), GFP_KERNEL); + if (!ioreqs16) { + r = -ENOMEM; + dev_dbg_f(zd_chip_dev(chip), + "error %d in ioreqs16 allocation\n", r); + goto out; + } + + for (i = 0; i < count; i++) { + j = 2*i; + /* We write the high word always first. */ + ioreqs16[j].value = ioreqs[i].value >> 16; + ioreqs16[j].addr = inc_addr(ioreqs[i].addr); + ioreqs16[j+1].value = ioreqs[i].value; + ioreqs16[j+1].addr = ioreqs[i].addr; + } + + r = zd_usb_iowrite16v(&chip->usb, ioreqs16, count16); +#ifdef DEBUG + if (r) { + dev_dbg_f(zd_chip_dev(chip), + "error %d in zd_usb_write16v\n", r); + } +#endif /* DEBUG */ +out: + kfree(ioreqs16); + return r; +} + +int zd_iowrite16a_locked(struct zd_chip *chip, + const struct zd_ioreq16 *ioreqs, unsigned int count) +{ + int r; + unsigned int i, j, t, max; + + ZD_ASSERT(mutex_is_locked(&chip->mutex)); + for (i = 0; i < count; i += j + t) { + t = 0; + max = count-i; + if (max > USB_MAX_IOWRITE16_COUNT) + max = USB_MAX_IOWRITE16_COUNT; + for (j = 0; j < max; j++) { + if (!ioreqs[i+j].addr) { + t = 1; + break; + } + } + + r = zd_usb_iowrite16v(&chip->usb, &ioreqs[i], j); + if (r) { + dev_dbg_f(zd_chip_dev(chip), + "error zd_usb_iowrite16v. Error number %d\n", + r); + return r; + } + } + + return 0; +} + +/* Writes a variable number of 32 bit registers. The functions will split + * that in several USB requests. A split can be forced by inserting an IO + * request with an zero address field. + */ +int zd_iowrite32a_locked(struct zd_chip *chip, + const struct zd_ioreq32 *ioreqs, unsigned int count) +{ + int r; + unsigned int i, j, t, max; + + for (i = 0; i < count; i += j + t) { + t = 0; + max = count-i; + if (max > USB_MAX_IOWRITE32_COUNT) + max = USB_MAX_IOWRITE32_COUNT; + for (j = 0; j < max; j++) { + if (!ioreqs[i+j].addr) { + t = 1; + break; + } + } + + r = _zd_iowrite32v_locked(chip, &ioreqs[i], j); + if (r) { + dev_dbg_f(zd_chip_dev(chip), + "error _zd_iowrite32v_locked." + " Error number %d\n", r); + return r; + } + } + + return 0; +} + +int zd_ioread16(struct zd_chip *chip, zd_addr_t addr, u16 *value) +{ + int r; + + mutex_lock(&chip->mutex); + r = zd_ioread16_locked(chip, value, addr); + mutex_unlock(&chip->mutex); + return r; +} + +int zd_ioread32(struct zd_chip *chip, zd_addr_t addr, u32 *value) +{ + int r; + + mutex_lock(&chip->mutex); + r = zd_ioread32_locked(chip, value, addr); + mutex_unlock(&chip->mutex); + return r; +} + +int zd_iowrite16(struct zd_chip *chip, zd_addr_t addr, u16 value) +{ + int r; + + mutex_lock(&chip->mutex); + r = zd_iowrite16_locked(chip, value, addr); + mutex_unlock(&chip->mutex); + return r; +} + +int zd_iowrite32(struct zd_chip *chip, zd_addr_t addr, u32 value) +{ + int r; + + mutex_lock(&chip->mutex); + r = zd_iowrite32_locked(chip, value, addr); + mutex_unlock(&chip->mutex); + return r; +} + +int zd_ioread32v(struct zd_chip *chip, const zd_addr_t *addresses, + u32 *values, unsigned int count) +{ + int r; + + mutex_lock(&chip->mutex); + r = zd_ioread32v_locked(chip, values, addresses, count); + mutex_unlock(&chip->mutex); + return r; +} + +int zd_iowrite32a(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs, + unsigned int count) +{ + int r; + + mutex_lock(&chip->mutex); + r = zd_iowrite32a_locked(chip, ioreqs, count); + mutex_unlock(&chip->mutex); + return r; +} + +static int read_pod(struct zd_chip *chip, u8 *rf_type) +{ + int r; + u32 value; + + ZD_ASSERT(mutex_is_locked(&chip->mutex)); + r = zd_ioread32_locked(chip, &value, E2P_POD); + if (r) + goto error; + dev_dbg_f(zd_chip_dev(chip), "E2P_POD %#010x\n", value); + + /* FIXME: AL2230 handling (Bit 7 in POD) */ + *rf_type = value & 0x0f; + chip->pa_type = (value >> 16) & 0x0f; + chip->patch_cck_gain = (value >> 8) & 0x1; + chip->patch_cr157 = (value >> 13) & 0x1; + chip->patch_6m_band_edge = (value >> 21) & 0x1; + chip->new_phy_layout = (value >> 31) & 0x1; + chip->al2230s_bit = (value >> 7) & 0x1; + chip->link_led = ((value >> 4) & 1) ? LED1 : LED2; + chip->supports_tx_led = 1; + if (value & (1 << 24)) { /* LED scenario */ + if (value & (1 << 29)) + chip->supports_tx_led = 0; + } + + dev_dbg_f(zd_chip_dev(chip), + "RF %s %#01x PA type %#01x patch CCK %d patch CR157 %d " + "patch 6M %d new PHY %d link LED%d tx led %d\n", + zd_rf_name(*rf_type), *rf_type, + chip->pa_type, chip->patch_cck_gain, + chip->patch_cr157, chip->patch_6m_band_edge, + chip->new_phy_layout, + chip->link_led == LED1 ? 1 : 2, + chip->supports_tx_led); + return 0; +error: + *rf_type = 0; + chip->pa_type = 0; + chip->patch_cck_gain = 0; + chip->patch_cr157 = 0; + chip->patch_6m_band_edge = 0; + chip->new_phy_layout = 0; + return r; +} + +/* MAC address: if custom mac addresses are to be used CR_MAC_ADDR_P1 and + * CR_MAC_ADDR_P2 must be overwritten + */ +int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr) +{ + int r; + struct zd_ioreq32 reqs[2] = { + [0] = { .addr = CR_MAC_ADDR_P1 }, + [1] = { .addr = CR_MAC_ADDR_P2 }, + }; + + if (mac_addr) { + reqs[0].value = (mac_addr[3] << 24) + | (mac_addr[2] << 16) + | (mac_addr[1] << 8) + | mac_addr[0]; + reqs[1].value = (mac_addr[5] << 8) + | mac_addr[4]; + dev_dbg_f(zd_chip_dev(chip), "mac addr %pM\n", mac_addr); + } else { + dev_dbg_f(zd_chip_dev(chip), "set NULL mac\n"); + } + + mutex_lock(&chip->mutex); + r = zd_iowrite32a_locked(chip, reqs, ARRAY_SIZE(reqs)); + mutex_unlock(&chip->mutex); + return r; +} + +int zd_read_regdomain(struct zd_chip *chip, u8 *regdomain) +{ + int r; + u32 value; + + mutex_lock(&chip->mutex); + r = zd_ioread32_locked(chip, &value, E2P_SUBID); + mutex_unlock(&chip->mutex); + if (r) + return r; + + *regdomain = value >> 16; + dev_dbg_f(zd_chip_dev(chip), "regdomain: %#04x\n", *regdomain); + + return 0; +} + +static int read_values(struct zd_chip *chip, u8 *values, size_t count, + zd_addr_t e2p_addr, u32 guard) +{ + int r; + int i; + u32 v; + + ZD_ASSERT(mutex_is_locked(&chip->mutex)); + for (i = 0;;) { + r = zd_ioread32_locked(chip, &v, + (zd_addr_t)((u16)e2p_addr+i/2)); + if (r) + return r; + v -= guard; + if (i+4 < count) { + values[i++] = v; + values[i++] = v >> 8; + values[i++] = v >> 16; + values[i++] = v >> 24; + continue; + } + for (;i < count; i++) + values[i] = v >> (8*(i%3)); + return 0; + } +} + +static int read_pwr_cal_values(struct zd_chip *chip) +{ + return read_values(chip, chip->pwr_cal_values, + E2P_CHANNEL_COUNT, E2P_PWR_CAL_VALUE1, + 0); +} + +static int read_pwr_int_values(struct zd_chip *chip) +{ + return read_values(chip, chip->pwr_int_values, + E2P_CHANNEL_COUNT, E2P_PWR_INT_VALUE1, + E2P_PWR_INT_GUARD); +} + +static int read_ofdm_cal_values(struct zd_chip *chip) +{ + int r; + int i; + static const zd_addr_t addresses[] = { + E2P_36M_CAL_VALUE1, + E2P_48M_CAL_VALUE1, + E2P_54M_CAL_VALUE1, + }; + + for (i = 0; i < 3; i++) { + r = read_values(chip, chip->ofdm_cal_values[i], + E2P_CHANNEL_COUNT, addresses[i], 0); + if (r) + return r; + } + return 0; +} + +static int read_cal_int_tables(struct zd_chip *chip) +{ + int r; + + r = read_pwr_cal_values(chip); + if (r) + return r; + r = read_pwr_int_values(chip); + if (r) + return r; + r = read_ofdm_cal_values(chip); + if (r) + return r; + return 0; +} + +/* phy means physical registers */ +int zd_chip_lock_phy_regs(struct zd_chip *chip) +{ + int r; + u32 tmp; + + ZD_ASSERT(mutex_is_locked(&chip->mutex)); + r = zd_ioread32_locked(chip, &tmp, CR_REG1); + if (r) { + dev_err(zd_chip_dev(chip), "error ioread32(CR_REG1): %d\n", r); + return r; + } + + tmp &= ~UNLOCK_PHY_REGS; + + r = zd_iowrite32_locked(chip, tmp, CR_REG1); + if (r) + dev_err(zd_chip_dev(chip), "error iowrite32(CR_REG1): %d\n", r); + return r; +} + +int zd_chip_unlock_phy_regs(struct zd_chip *chip) +{ + int r; + u32 tmp; + + ZD_ASSERT(mutex_is_locked(&chip->mutex)); + r = zd_ioread32_locked(chip, &tmp, CR_REG1); + if (r) { + dev_err(zd_chip_dev(chip), + "error ioread32(CR_REG1): %d\n", r); + return r; + } + + tmp |= UNLOCK_PHY_REGS; + + r = zd_iowrite32_locked(chip, tmp, CR_REG1); + if (r) + dev_err(zd_chip_dev(chip), "error iowrite32(CR_REG1): %d\n", r); + return r; +} + +/* CR157 can be optionally patched by the EEPROM for original ZD1211 */ +static int patch_cr157(struct zd_chip *chip) +{ + int r; + u16 value; + + if (!chip->patch_cr157) + return 0; + + r = zd_ioread16_locked(chip, &value, E2P_PHY_REG); + if (r) + return r; + + dev_dbg_f(zd_chip_dev(chip), "patching value %x\n", value >> 8); + return zd_iowrite32_locked(chip, value >> 8, CR157); +} + +/* + * 6M band edge can be optionally overwritten for certain RF's + * Vendor driver says: for FCC regulation, enabled per HWFeature 6M band edge + * bit (for AL2230, AL2230S) + */ +static int patch_6m_band_edge(struct zd_chip *chip, u8 channel) +{ + ZD_ASSERT(mutex_is_locked(&chip->mutex)); + if (!chip->patch_6m_band_edge) + return 0; + + return zd_rf_patch_6m_band_edge(&chip->rf, channel); +} + +/* Generic implementation of 6M band edge patching, used by most RFs via + * zd_rf_generic_patch_6m() */ +int zd_chip_generic_patch_6m_band(struct zd_chip *chip, int channel) +{ + struct zd_ioreq16 ioreqs[] = { + { CR128, 0x14 }, { CR129, 0x12 }, { CR130, 0x10 }, + { CR47, 0x1e }, + }; + + /* FIXME: Channel 11 is not the edge for all regulatory domains. */ + if (channel == 1 || channel == 11) + ioreqs[0].value = 0x12; + + dev_dbg_f(zd_chip_dev(chip), "patching for channel %d\n", channel); + return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); +} + +static int zd1211_hw_reset_phy(struct zd_chip *chip) +{ + static const struct zd_ioreq16 ioreqs[] = { + { CR0, 0x0a }, { CR1, 0x06 }, { CR2, 0x26 }, + { CR3, 0x38 }, { CR4, 0x80 }, { CR9, 0xa0 }, + { CR10, 0x81 }, { CR11, 0x00 }, { CR12, 0x7f }, + { CR13, 0x8c }, { CR14, 0x80 }, { CR15, 0x3d }, + { CR16, 0x20 }, { CR17, 0x1e }, { CR18, 0x0a }, + { CR19, 0x48 }, { CR20, 0x0c }, { CR21, 0x0c }, + { CR22, 0x23 }, { CR23, 0x90 }, { CR24, 0x14 }, + { CR25, 0x40 }, { CR26, 0x10 }, { CR27, 0x19 }, + { CR28, 0x7f }, { CR29, 0x80 }, { CR30, 0x4b }, + { CR31, 0x60 }, { CR32, 0x43 }, { CR33, 0x08 }, + { CR34, 0x06 }, { CR35, 0x0a }, { CR36, 0x00 }, + { CR37, 0x00 }, { CR38, 0x38 }, { CR39, 0x0c }, + { CR40, 0x84 }, { CR41, 0x2a }, { CR42, 0x80 }, + { CR43, 0x10 }, { CR44, 0x12 }, { CR46, 0xff }, + { CR47, 0x1E }, { CR48, 0x26 }, { CR49, 0x5b }, + { CR64, 0xd0 }, { CR65, 0x04 }, { CR66, 0x58 }, + { CR67, 0xc9 }, { CR68, 0x88 }, { CR69, 0x41 }, + { CR70, 0x23 }, { CR71, 0x10 }, { CR72, 0xff }, + { CR73, 0x32 }, { CR74, 0x30 }, { CR75, 0x65 }, + { CR76, 0x41 }, { CR77, 0x1b }, { CR78, 0x30 }, + { CR79, 0x68 }, { CR80, 0x64 }, { CR81, 0x64 }, + { CR82, 0x00 }, { CR83, 0x00 }, { CR84, 0x00 }, + { CR85, 0x02 }, { CR86, 0x00 }, { CR87, 0x00 }, + { CR88, 0xff }, { CR89, 0xfc }, { CR90, 0x00 }, + { CR91, 0x00 }, { CR92, 0x00 }, { CR93, 0x08 }, + { CR94, 0x00 }, { CR95, 0x00 }, { CR96, 0xff }, + { CR97, 0xe7 }, { CR98, 0x00 }, { CR99, 0x00 }, + { CR100, 0x00 }, { CR101, 0xae }, { CR102, 0x02 }, + { CR103, 0x00 }, { CR104, 0x03 }, { CR105, 0x65 }, + { CR106, 0x04 }, { CR107, 0x00 }, { CR108, 0x0a }, + { CR109, 0xaa }, { CR110, 0xaa }, { CR111, 0x25 }, + { CR112, 0x25 }, { CR113, 0x00 }, { CR119, 0x1e }, + { CR125, 0x90 }, { CR126, 0x00 }, { CR127, 0x00 }, + { }, + { CR5, 0x00 }, { CR6, 0x00 }, { CR7, 0x00 }, + { CR8, 0x00 }, { CR9, 0x20 }, { CR12, 0xf0 }, + { CR20, 0x0e }, { CR21, 0x0e }, { CR27, 0x10 }, + { CR44, 0x33 }, { CR47, 0x1E }, { CR83, 0x24 }, + { CR84, 0x04 }, { CR85, 0x00 }, { CR86, 0x0C }, + { CR87, 0x12 }, { CR88, 0x0C }, { CR89, 0x00 }, + { CR90, 0x10 }, { CR91, 0x08 }, { CR93, 0x00 }, + { CR94, 0x01 }, { CR95, 0x00 }, { CR96, 0x50 }, + { CR97, 0x37 }, { CR98, 0x35 }, { CR101, 0x13 }, + { CR102, 0x27 }, { CR103, 0x27 }, { CR104, 0x18 }, + { CR105, 0x12 }, { CR109, 0x27 }, { CR110, 0x27 }, + { CR111, 0x27 }, { CR112, 0x27 }, { CR113, 0x27 }, + { CR114, 0x27 }, { CR115, 0x26 }, { CR116, 0x24 }, + { CR117, 0xfc }, { CR118, 0xfa }, { CR120, 0x4f }, + { CR125, 0xaa }, { CR127, 0x03 }, { CR128, 0x14 }, + { CR129, 0x12 }, { CR130, 0x10 }, { CR131, 0x0C }, + { CR136, 0xdf }, { CR137, 0x40 }, { CR138, 0xa0 }, + { CR139, 0xb0 }, { CR140, 0x99 }, { CR141, 0x82 }, + { CR142, 0x54 }, { CR143, 0x1c }, { CR144, 0x6c }, + { CR147, 0x07 }, { CR148, 0x4c }, { CR149, 0x50 }, + { CR150, 0x0e }, { CR151, 0x18 }, { CR160, 0xfe }, + { CR161, 0xee }, { CR162, 0xaa }, { CR163, 0xfa }, + { CR164, 0xfa }, { CR165, 0xea }, { CR166, 0xbe }, + { CR167, 0xbe }, { CR168, 0x6a }, { CR169, 0xba }, + { CR170, 0xba }, { CR171, 0xba }, + /* Note: CR204 must lead the CR203 */ + { CR204, 0x7d }, + { }, + { CR203, 0x30 }, + }; + + int r, t; + + dev_dbg_f(zd_chip_dev(chip), "\n"); + + r = zd_chip_lock_phy_regs(chip); + if (r) + goto out; + + r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); + if (r) + goto unlock; + + r = patch_cr157(chip); +unlock: + t = zd_chip_unlock_phy_regs(chip); + if (t && !r) + r = t; +out: + return r; +} + +static int zd1211b_hw_reset_phy(struct zd_chip *chip) +{ + static const struct zd_ioreq16 ioreqs[] = { + { CR0, 0x14 }, { CR1, 0x06 }, { CR2, 0x26 }, + { CR3, 0x38 }, { CR4, 0x80 }, { CR9, 0xe0 }, + { CR10, 0x81 }, + /* power control { { CR11, 1 << 6 }, */ + { CR11, 0x00 }, + { CR12, 0xf0 }, { CR13, 0x8c }, { CR14, 0x80 }, + { CR15, 0x3d }, { CR16, 0x20 }, { CR17, 0x1e }, + { CR18, 0x0a }, { CR19, 0x48 }, + { CR20, 0x10 }, /* Org:0x0E, ComTrend:RalLink AP */ + { CR21, 0x0e }, { CR22, 0x23 }, { CR23, 0x90 }, + { CR24, 0x14 }, { CR25, 0x40 }, { CR26, 0x10 }, + { CR27, 0x10 }, { CR28, 0x7f }, { CR29, 0x80 }, + { CR30, 0x4b }, /* ASIC/FWT, no jointly decoder */ + { CR31, 0x60 }, { CR32, 0x43 }, { CR33, 0x08 }, + { CR34, 0x06 }, { CR35, 0x0a }, { CR36, 0x00 }, + { CR37, 0x00 }, { CR38, 0x38 }, { CR39, 0x0c }, + { CR40, 0x84 }, { CR41, 0x2a }, { CR42, 0x80 }, + { CR43, 0x10 }, { CR44, 0x33 }, { CR46, 0xff }, + { CR47, 0x1E }, { CR48, 0x26 }, { CR49, 0x5b }, + { CR64, 0xd0 }, { CR65, 0x04 }, { CR66, 0x58 }, + { CR67, 0xc9 }, { CR68, 0x88 }, { CR69, 0x41 }, + { CR70, 0x23 }, { CR71, 0x10 }, { CR72, 0xff }, + { CR73, 0x32 }, { CR74, 0x30 }, { CR75, 0x65 }, + { CR76, 0x41 }, { CR77, 0x1b }, { CR78, 0x30 }, + { CR79, 0xf0 }, { CR80, 0x64 }, { CR81, 0x64 }, + { CR82, 0x00 }, { CR83, 0x24 }, { CR84, 0x04 }, + { CR85, 0x00 }, { CR86, 0x0c }, { CR87, 0x12 }, + { CR88, 0x0c }, { CR89, 0x00 }, { CR90, 0x58 }, + { CR91, 0x04 }, { CR92, 0x00 }, { CR93, 0x00 }, + { CR94, 0x01 }, + { CR95, 0x20 }, /* ZD1211B */ + { CR96, 0x50 }, { CR97, 0x37 }, { CR98, 0x35 }, + { CR99, 0x00 }, { CR100, 0x01 }, { CR101, 0x13 }, + { CR102, 0x27 }, { CR103, 0x27 }, { CR104, 0x18 }, + { CR105, 0x12 }, { CR106, 0x04 }, { CR107, 0x00 }, + { CR108, 0x0a }, { CR109, 0x27 }, { CR110, 0x27 }, + { CR111, 0x27 }, { CR112, 0x27 }, { CR113, 0x27 }, + { CR114, 0x27 }, { CR115, 0x26 }, { CR116, 0x24 }, + { CR117, 0xfc }, { CR118, 0xfa }, { CR119, 0x1e }, + { CR125, 0x90 }, { CR126, 0x00 }, { CR127, 0x00 }, + { CR128, 0x14 }, { CR129, 0x12 }, { CR130, 0x10 }, + { CR131, 0x0c }, { CR136, 0xdf }, { CR137, 0xa0 }, + { CR138, 0xa8 }, { CR139, 0xb4 }, { CR140, 0x98 }, + { CR141, 0x82 }, { CR142, 0x53 }, { CR143, 0x1c }, + { CR144, 0x6c }, { CR147, 0x07 }, { CR148, 0x40 }, + { CR149, 0x40 }, /* Org:0x50 ComTrend:RalLink AP */ + { CR150, 0x14 }, /* Org:0x0E ComTrend:RalLink AP */ + { CR151, 0x18 }, { CR159, 0x70 }, { CR160, 0xfe }, + { CR161, 0xee }, { CR162, 0xaa }, { CR163, 0xfa }, + { CR164, 0xfa }, { CR165, 0xea }, { CR166, 0xbe }, + { CR167, 0xbe }, { CR168, 0x6a }, { CR169, 0xba }, + { CR170, 0xba }, { CR171, 0xba }, + /* Note: CR204 must lead the CR203 */ + { CR204, 0x7d }, + {}, + { CR203, 0x30 }, + }; + + int r, t; + + dev_dbg_f(zd_chip_dev(chip), "\n"); + + r = zd_chip_lock_phy_regs(chip); + if (r) + goto out; + + r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); + t = zd_chip_unlock_phy_regs(chip); + if (t && !r) + r = t; +out: + return r; +} + +static int hw_reset_phy(struct zd_chip *chip) +{ + return zd_chip_is_zd1211b(chip) ? zd1211b_hw_reset_phy(chip) : + zd1211_hw_reset_phy(chip); +} + +static int zd1211_hw_init_hmac(struct zd_chip *chip) +{ + static const struct zd_ioreq32 ioreqs[] = { + { CR_ZD1211_RETRY_MAX, ZD1211_RETRY_COUNT }, + { CR_RX_THRESHOLD, 0x000c0640 }, + }; + + dev_dbg_f(zd_chip_dev(chip), "\n"); + ZD_ASSERT(mutex_is_locked(&chip->mutex)); + return zd_iowrite32a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); +} + +static int zd1211b_hw_init_hmac(struct zd_chip *chip) +{ + static const struct zd_ioreq32 ioreqs[] = { + { CR_ZD1211B_RETRY_MAX, ZD1211B_RETRY_COUNT }, + { CR_ZD1211B_CWIN_MAX_MIN_AC0, 0x007f003f }, + { CR_ZD1211B_CWIN_MAX_MIN_AC1, 0x007f003f }, + { CR_ZD1211B_CWIN_MAX_MIN_AC2, 0x003f001f }, + { CR_ZD1211B_CWIN_MAX_MIN_AC3, 0x001f000f }, + { CR_ZD1211B_AIFS_CTL1, 0x00280028 }, + { CR_ZD1211B_AIFS_CTL2, 0x008C003C }, + { CR_ZD1211B_TXOP, 0x01800824 }, + { CR_RX_THRESHOLD, 0x000c0eff, }, + }; + + dev_dbg_f(zd_chip_dev(chip), "\n"); + ZD_ASSERT(mutex_is_locked(&chip->mutex)); + return zd_iowrite32a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); +} + +static int hw_init_hmac(struct zd_chip *chip) +{ + int r; + static const struct zd_ioreq32 ioreqs[] = { + { CR_ACK_TIMEOUT_EXT, 0x20 }, + { CR_ADDA_MBIAS_WARMTIME, 0x30000808 }, + { CR_SNIFFER_ON, 0 }, + { CR_RX_FILTER, STA_RX_FILTER }, + { CR_GROUP_HASH_P1, 0x00 }, + { CR_GROUP_HASH_P2, 0x80000000 }, + { CR_REG1, 0xa4 }, + { CR_ADDA_PWR_DWN, 0x7f }, + { CR_BCN_PLCP_CFG, 0x00f00401 }, + { CR_PHY_DELAY, 0x00 }, + { CR_ACK_TIMEOUT_EXT, 0x80 }, + { CR_ADDA_PWR_DWN, 0x00 }, + { CR_ACK_TIME_80211, 0x100 }, + { CR_RX_PE_DELAY, 0x70 }, + { CR_PS_CTRL, 0x10000000 }, + { CR_RTS_CTS_RATE, 0x02030203 }, + { CR_AFTER_PNP, 0x1 }, + { CR_WEP_PROTECT, 0x114 }, + { CR_IFS_VALUE, IFS_VALUE_DEFAULT }, + { CR_CAM_MODE, MODE_AP_WDS}, + }; + + ZD_ASSERT(mutex_is_locked(&chip->mutex)); + r = zd_iowrite32a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); + if (r) + return r; + + return zd_chip_is_zd1211b(chip) ? + zd1211b_hw_init_hmac(chip) : zd1211_hw_init_hmac(chip); +} + +struct aw_pt_bi { + u32 atim_wnd_period; + u32 pre_tbtt; + u32 beacon_interval; +}; + +static int get_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s) +{ + int r; + static const zd_addr_t aw_pt_bi_addr[] = + { CR_ATIM_WND_PERIOD, CR_PRE_TBTT, CR_BCN_INTERVAL }; + u32 values[3]; + + r = zd_ioread32v_locked(chip, values, (const zd_addr_t *)aw_pt_bi_addr, + ARRAY_SIZE(aw_pt_bi_addr)); + if (r) { + memset(s, 0, sizeof(*s)); + return r; + } + + s->atim_wnd_period = values[0]; + s->pre_tbtt = values[1]; + s->beacon_interval = values[2]; + return 0; +} + +static int set_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s) +{ + struct zd_ioreq32 reqs[3]; + + if (s->beacon_interval <= 5) + s->beacon_interval = 5; + if (s->pre_tbtt < 4 || s->pre_tbtt >= s->beacon_interval) + s->pre_tbtt = s->beacon_interval - 1; + if (s->atim_wnd_period >= s->pre_tbtt) + s->atim_wnd_period = s->pre_tbtt - 1; + + reqs[0].addr = CR_ATIM_WND_PERIOD; + reqs[0].value = s->atim_wnd_period; + reqs[1].addr = CR_PRE_TBTT; + reqs[1].value = s->pre_tbtt; + reqs[2].addr = CR_BCN_INTERVAL; + reqs[2].value = s->beacon_interval; + + return zd_iowrite32a_locked(chip, reqs, ARRAY_SIZE(reqs)); +} + + +static int set_beacon_interval(struct zd_chip *chip, u32 interval) +{ + int r; + struct aw_pt_bi s; + + ZD_ASSERT(mutex_is_locked(&chip->mutex)); + r = get_aw_pt_bi(chip, &s); + if (r) + return r; + s.beacon_interval = interval; + return set_aw_pt_bi(chip, &s); +} + +int zd_set_beacon_interval(struct zd_chip *chip, u32 interval) +{ + int r; + + mutex_lock(&chip->mutex); + r = set_beacon_interval(chip, interval); + mutex_unlock(&chip->mutex); + return r; +} + +static int hw_init(struct zd_chip *chip) +{ + int r; + + dev_dbg_f(zd_chip_dev(chip), "\n"); + ZD_ASSERT(mutex_is_locked(&chip->mutex)); + r = hw_reset_phy(chip); + if (r) + return r; + + r = hw_init_hmac(chip); + if (r) + return r; + + return set_beacon_interval(chip, 100); +} + +static zd_addr_t fw_reg_addr(struct zd_chip *chip, u16 offset) +{ + return (zd_addr_t)((u16)chip->fw_regs_base + offset); +} + +#ifdef DEBUG +static int dump_cr(struct zd_chip *chip, const zd_addr_t addr, + const char *addr_string) +{ + int r; + u32 value; + + r = zd_ioread32_locked(chip, &value, addr); + if (r) { + dev_dbg_f(zd_chip_dev(chip), + "error reading %s. Error number %d\n", addr_string, r); + return r; + } + + dev_dbg_f(zd_chip_dev(chip), "%s %#010x\n", + addr_string, (unsigned int)value); + return 0; +} + +static int test_init(struct zd_chip *chip) +{ + int r; + + r = dump_cr(chip, CR_AFTER_PNP, "CR_AFTER_PNP"); + if (r) + return r; + r = dump_cr(chip, CR_GPI_EN, "CR_GPI_EN"); + if (r) + return r; + return dump_cr(chip, CR_INTERRUPT, "CR_INTERRUPT"); +} + +static void dump_fw_registers(struct zd_chip *chip) +{ + const zd_addr_t addr[4] = { + fw_reg_addr(chip, FW_REG_FIRMWARE_VER), + fw_reg_addr(chip, FW_REG_USB_SPEED), + fw_reg_addr(chip, FW_REG_FIX_TX_RATE), + fw_reg_addr(chip, FW_REG_LED_LINK_STATUS), + }; + + int r; + u16 values[4]; + + r = zd_ioread16v_locked(chip, values, (const zd_addr_t*)addr, + ARRAY_SIZE(addr)); + if (r) { + dev_dbg_f(zd_chip_dev(chip), "error %d zd_ioread16v_locked\n", + r); + return; + } + + dev_dbg_f(zd_chip_dev(chip), "FW_FIRMWARE_VER %#06hx\n", values[0]); + dev_dbg_f(zd_chip_dev(chip), "FW_USB_SPEED %#06hx\n", values[1]); + dev_dbg_f(zd_chip_dev(chip), "FW_FIX_TX_RATE %#06hx\n", values[2]); + dev_dbg_f(zd_chip_dev(chip), "FW_LINK_STATUS %#06hx\n", values[3]); +} +#endif /* DEBUG */ + +static int print_fw_version(struct zd_chip *chip) +{ + int r; + u16 version; + + r = zd_ioread16_locked(chip, &version, + fw_reg_addr(chip, FW_REG_FIRMWARE_VER)); + if (r) + return r; + + dev_info(zd_chip_dev(chip),"firmware version %04hx\n", version); + return 0; +} + +static int set_mandatory_rates(struct zd_chip *chip, int gmode) +{ + u32 rates; + ZD_ASSERT(mutex_is_locked(&chip->mutex)); + /* This sets the mandatory rates, which only depend from the standard + * that the device is supporting. Until further notice we should try + * to support 802.11g also for full speed USB. + */ + if (!gmode) + rates = CR_RATE_1M|CR_RATE_2M|CR_RATE_5_5M|CR_RATE_11M; + else + rates = CR_RATE_1M|CR_RATE_2M|CR_RATE_5_5M|CR_RATE_11M| + CR_RATE_6M|CR_RATE_12M|CR_RATE_24M; + + return zd_iowrite32_locked(chip, rates, CR_MANDATORY_RATE_TBL); +} + +int zd_chip_set_rts_cts_rate_locked(struct zd_chip *chip, + int preamble) +{ + u32 value = 0; + + dev_dbg_f(zd_chip_dev(chip), "preamble=%x\n", preamble); + value |= preamble << RTSCTS_SH_RTS_PMB_TYPE; + value |= preamble << RTSCTS_SH_CTS_PMB_TYPE; + + /* We always send 11M RTS/self-CTS messages, like the vendor driver. */ + value |= ZD_PURE_RATE(ZD_CCK_RATE_11M) << RTSCTS_SH_RTS_RATE; + value |= ZD_RX_CCK << RTSCTS_SH_RTS_MOD_TYPE; + value |= ZD_PURE_RATE(ZD_CCK_RATE_11M) << RTSCTS_SH_CTS_RATE; + value |= ZD_RX_CCK << RTSCTS_SH_CTS_MOD_TYPE; + + return zd_iowrite32_locked(chip, value, CR_RTS_CTS_RATE); +} + +int zd_chip_enable_hwint(struct zd_chip *chip) +{ + int r; + + mutex_lock(&chip->mutex); + r = zd_iowrite32_locked(chip, HWINT_ENABLED, CR_INTERRUPT); + mutex_unlock(&chip->mutex); + return r; +} + +static int disable_hwint(struct zd_chip *chip) +{ + return zd_iowrite32_locked(chip, HWINT_DISABLED, CR_INTERRUPT); +} + +int zd_chip_disable_hwint(struct zd_chip *chip) +{ + int r; + + mutex_lock(&chip->mutex); + r = disable_hwint(chip); + mutex_unlock(&chip->mutex); + return r; +} + +static int read_fw_regs_offset(struct zd_chip *chip) +{ + int r; + + ZD_ASSERT(mutex_is_locked(&chip->mutex)); + r = zd_ioread16_locked(chip, (u16*)&chip->fw_regs_base, + FWRAW_REGS_ADDR); + if (r) + return r; + dev_dbg_f(zd_chip_dev(chip), "fw_regs_base: %#06hx\n", + (u16)chip->fw_regs_base); + + return 0; +} + +/* Read mac address using pre-firmware interface */ +int zd_chip_read_mac_addr_fw(struct zd_chip *chip, u8 *addr) +{ + dev_dbg_f(zd_chip_dev(chip), "\n"); + return zd_usb_read_fw(&chip->usb, E2P_MAC_ADDR_P1, addr, + ETH_ALEN); +} + +int zd_chip_init_hw(struct zd_chip *chip) +{ + int r; + u8 rf_type; + + dev_dbg_f(zd_chip_dev(chip), "\n"); + + mutex_lock(&chip->mutex); + +#ifdef DEBUG + r = test_init(chip); + if (r) + goto out; +#endif + r = zd_iowrite32_locked(chip, 1, CR_AFTER_PNP); + if (r) + goto out; + + r = read_fw_regs_offset(chip); + if (r) + goto out; + + /* GPI is always disabled, also in the other driver. + */ + r = zd_iowrite32_locked(chip, 0, CR_GPI_EN); + if (r) + goto out; + r = zd_iowrite32_locked(chip, CWIN_SIZE, CR_CWMIN_CWMAX); + if (r) + goto out; + /* Currently we support IEEE 802.11g for full and high speed USB. + * It might be discussed, whether we should suppport pure b mode for + * full speed USB. + */ + r = set_mandatory_rates(chip, 1); + if (r) + goto out; + /* Disabling interrupts is certainly a smart thing here. + */ + r = disable_hwint(chip); + if (r) + goto out; + r = read_pod(chip, &rf_type); + if (r) + goto out; + r = hw_init(chip); + if (r) + goto out; + r = zd_rf_init_hw(&chip->rf, rf_type); + if (r) + goto out; + + r = print_fw_version(chip); + if (r) + goto out; + +#ifdef DEBUG + dump_fw_registers(chip); + r = test_init(chip); + if (r) + goto out; +#endif /* DEBUG */ + + r = read_cal_int_tables(chip); + if (r) + goto out; + + print_id(chip); +out: + mutex_unlock(&chip->mutex); + return r; +} + +static int update_pwr_int(struct zd_chip *chip, u8 channel) +{ + u8 value = chip->pwr_int_values[channel - 1]; + return zd_iowrite16_locked(chip, value, CR31); +} + +static int update_pwr_cal(struct zd_chip *chip, u8 channel) +{ + u8 value = chip->pwr_cal_values[channel-1]; + return zd_iowrite16_locked(chip, value, CR68); +} + +static int update_ofdm_cal(struct zd_chip *chip, u8 channel) +{ + struct zd_ioreq16 ioreqs[3]; + + ioreqs[0].addr = CR67; + ioreqs[0].value = chip->ofdm_cal_values[OFDM_36M_INDEX][channel-1]; + ioreqs[1].addr = CR66; + ioreqs[1].value = chip->ofdm_cal_values[OFDM_48M_INDEX][channel-1]; + ioreqs[2].addr = CR65; + ioreqs[2].value = chip->ofdm_cal_values[OFDM_54M_INDEX][channel-1]; + + return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); +} + +static int update_channel_integration_and_calibration(struct zd_chip *chip, + u8 channel) +{ + int r; + + if (!zd_rf_should_update_pwr_int(&chip->rf)) + return 0; + + r = update_pwr_int(chip, channel); + if (r) + return r; + if (zd_chip_is_zd1211b(chip)) { + static const struct zd_ioreq16 ioreqs[] = { + { CR69, 0x28 }, + {}, + { CR69, 0x2a }, + }; + + r = update_ofdm_cal(chip, channel); + if (r) + return r; + r = update_pwr_cal(chip, channel); + if (r) + return r; + r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); + if (r) + return r; + } + + return 0; +} + +/* The CCK baseband gain can be optionally patched by the EEPROM */ +static int patch_cck_gain(struct zd_chip *chip) +{ + int r; + u32 value; + + if (!chip->patch_cck_gain || !zd_rf_should_patch_cck_gain(&chip->rf)) + return 0; + + ZD_ASSERT(mutex_is_locked(&chip->mutex)); + r = zd_ioread32_locked(chip, &value, E2P_PHY_REG); + if (r) + return r; + dev_dbg_f(zd_chip_dev(chip), "patching value %x\n", value & 0xff); + return zd_iowrite16_locked(chip, value & 0xff, CR47); +} + +int zd_chip_set_channel(struct zd_chip *chip, u8 channel) +{ + int r, t; + + mutex_lock(&chip->mutex); + r = zd_chip_lock_phy_regs(chip); + if (r) + goto out; + r = zd_rf_set_channel(&chip->rf, channel); + if (r) + goto unlock; + r = update_channel_integration_and_calibration(chip, channel); + if (r) + goto unlock; + r = patch_cck_gain(chip); + if (r) + goto unlock; + r = patch_6m_band_edge(chip, channel); + if (r) + goto unlock; + r = zd_iowrite32_locked(chip, 0, CR_CONFIG_PHILIPS); +unlock: + t = zd_chip_unlock_phy_regs(chip); + if (t && !r) + r = t; +out: + mutex_unlock(&chip->mutex); + return r; +} + +u8 zd_chip_get_channel(struct zd_chip *chip) +{ + u8 channel; + + mutex_lock(&chip->mutex); + channel = chip->rf.channel; + mutex_unlock(&chip->mutex); + return channel; +} + +int zd_chip_control_leds(struct zd_chip *chip, enum led_status status) +{ + const zd_addr_t a[] = { + fw_reg_addr(chip, FW_REG_LED_LINK_STATUS), + CR_LED, + }; + + int r; + u16 v[ARRAY_SIZE(a)]; + struct zd_ioreq16 ioreqs[ARRAY_SIZE(a)] = { + [0] = { fw_reg_addr(chip, FW_REG_LED_LINK_STATUS) }, + [1] = { CR_LED }, + }; + u16 other_led; + + mutex_lock(&chip->mutex); + r = zd_ioread16v_locked(chip, v, (const zd_addr_t *)a, ARRAY_SIZE(a)); + if (r) + goto out; + + other_led = chip->link_led == LED1 ? LED2 : LED1; + + switch (status) { + case ZD_LED_OFF: + ioreqs[0].value = FW_LINK_OFF; + ioreqs[1].value = v[1] & ~(LED1|LED2); + break; + case ZD_LED_SCANNING: + ioreqs[0].value = FW_LINK_OFF; + ioreqs[1].value = v[1] & ~other_led; + if (get_seconds() % 3 == 0) { + ioreqs[1].value &= ~chip->link_led; + } else { + ioreqs[1].value |= chip->link_led; + } + break; + case ZD_LED_ASSOCIATED: + ioreqs[0].value = FW_LINK_TX; + ioreqs[1].value = v[1] & ~other_led; + ioreqs[1].value |= chip->link_led; + break; + default: + r = -EINVAL; + goto out; + } + + if (v[0] != ioreqs[0].value || v[1] != ioreqs[1].value) { + r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); + if (r) + goto out; + } + r = 0; +out: + mutex_unlock(&chip->mutex); + return r; +} + +int zd_chip_set_basic_rates(struct zd_chip *chip, u16 cr_rates) +{ + int r; + + if (cr_rates & ~(CR_RATES_80211B|CR_RATES_80211G)) + return -EINVAL; + + mutex_lock(&chip->mutex); + r = zd_iowrite32_locked(chip, cr_rates, CR_BASIC_RATE_TBL); + mutex_unlock(&chip->mutex); + return r; +} + +static inline u8 zd_rate_from_ofdm_plcp_header(const void *rx_frame) +{ + return ZD_OFDM | zd_ofdm_plcp_header_rate(rx_frame); +} + +/** + * zd_rx_rate - report zd-rate + * @rx_frame - received frame + * @rx_status - rx_status as given by the device + * + * This function converts the rate as encoded in the received packet to the + * zd-rate, we are using on other places in the driver. + */ +u8 zd_rx_rate(const void *rx_frame, const struct rx_status *status) +{ + u8 zd_rate; + if (status->frame_status & ZD_RX_OFDM) { + zd_rate = zd_rate_from_ofdm_plcp_header(rx_frame); + } else { + switch (zd_cck_plcp_header_signal(rx_frame)) { + case ZD_CCK_PLCP_SIGNAL_1M: + zd_rate = ZD_CCK_RATE_1M; + break; + case ZD_CCK_PLCP_SIGNAL_2M: + zd_rate = ZD_CCK_RATE_2M; + break; + case ZD_CCK_PLCP_SIGNAL_5M5: + zd_rate = ZD_CCK_RATE_5_5M; + break; + case ZD_CCK_PLCP_SIGNAL_11M: + zd_rate = ZD_CCK_RATE_11M; + break; + default: + zd_rate = 0; + } + } + + return zd_rate; +} + +int zd_chip_switch_radio_on(struct zd_chip *chip) +{ + int r; + + mutex_lock(&chip->mutex); + r = zd_switch_radio_on(&chip->rf); + mutex_unlock(&chip->mutex); + return r; +} + +int zd_chip_switch_radio_off(struct zd_chip *chip) +{ + int r; + + mutex_lock(&chip->mutex); + r = zd_switch_radio_off(&chip->rf); + mutex_unlock(&chip->mutex); + return r; +} + +int zd_chip_enable_int(struct zd_chip *chip) +{ + int r; + + mutex_lock(&chip->mutex); + r = zd_usb_enable_int(&chip->usb); + mutex_unlock(&chip->mutex); + return r; +} + +void zd_chip_disable_int(struct zd_chip *chip) +{ + mutex_lock(&chip->mutex); + zd_usb_disable_int(&chip->usb); + mutex_unlock(&chip->mutex); +} + +int zd_chip_enable_rxtx(struct zd_chip *chip) +{ + int r; + + mutex_lock(&chip->mutex); + zd_usb_enable_tx(&chip->usb); + r = zd_usb_enable_rx(&chip->usb); + mutex_unlock(&chip->mutex); + return r; +} + +void zd_chip_disable_rxtx(struct zd_chip *chip) +{ + mutex_lock(&chip->mutex); + zd_usb_disable_rx(&chip->usb); + zd_usb_disable_tx(&chip->usb); + mutex_unlock(&chip->mutex); +} + +int zd_rfwritev_locked(struct zd_chip *chip, + const u32* values, unsigned int count, u8 bits) +{ + int r; + unsigned int i; + + for (i = 0; i < count; i++) { + r = zd_rfwrite_locked(chip, values[i], bits); + if (r) + return r; + } + + return 0; +} + +/* + * We can optionally program the RF directly through CR regs, if supported by + * the hardware. This is much faster than the older method. + */ +int zd_rfwrite_cr_locked(struct zd_chip *chip, u32 value) +{ + struct zd_ioreq16 ioreqs[] = { + { CR244, (value >> 16) & 0xff }, + { CR243, (value >> 8) & 0xff }, + { CR242, value & 0xff }, + }; + ZD_ASSERT(mutex_is_locked(&chip->mutex)); + return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); +} + +int zd_rfwritev_cr_locked(struct zd_chip *chip, + const u32 *values, unsigned int count) +{ + int r; + unsigned int i; + + for (i = 0; i < count; i++) { + r = zd_rfwrite_cr_locked(chip, values[i]); + if (r) + return r; + } + + return 0; +} + +int zd_chip_set_multicast_hash(struct zd_chip *chip, + struct zd_mc_hash *hash) +{ + struct zd_ioreq32 ioreqs[] = { + { CR_GROUP_HASH_P1, hash->low }, + { CR_GROUP_HASH_P2, hash->high }, + }; + + return zd_iowrite32a(chip, ioreqs, ARRAY_SIZE(ioreqs)); +} + +u64 zd_chip_get_tsf(struct zd_chip *chip) +{ + int r; + static const zd_addr_t aw_pt_bi_addr[] = + { CR_TSF_LOW_PART, CR_TSF_HIGH_PART }; + u32 values[2]; + u64 tsf; + + mutex_lock(&chip->mutex); + r = zd_ioread32v_locked(chip, values, (const zd_addr_t *)aw_pt_bi_addr, + ARRAY_SIZE(aw_pt_bi_addr)); + mutex_unlock(&chip->mutex); + if (r) + return 0; + + tsf = values[1]; + tsf = (tsf << 32) | values[0]; + + return tsf; +} diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h new file mode 100644 index 0000000..f8bbf7d --- /dev/null +++ b/drivers/net/wireless/zd1211rw/zd_chip.h @@ -0,0 +1,968 @@ +/* ZD1211 USB-WLAN driver for Linux + * + * Copyright (C) 2005-2007 Ulrich Kunitz + * Copyright (C) 2006-2007 Daniel Drake + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _ZD_CHIP_H +#define _ZD_CHIP_H + +#include "zd_rf.h" +#include "zd_usb.h" + +/* Header for the Media Access Controller (MAC) and the Baseband Processor + * (BBP). It appears that the ZD1211 wraps the old ZD1205 with USB glue and + * adds a processor for handling the USB protocol. + */ + +/* Address space */ +enum { + /* CONTROL REGISTERS */ + CR_START = 0x9000, + + + /* FIRMWARE */ + FW_START = 0xee00, + + + /* EEPROM */ + E2P_START = 0xf800, + E2P_LEN = 0x800, + + /* EEPROM layout */ + E2P_LOAD_CODE_LEN = 0xe, /* base 0xf800 */ + E2P_LOAD_VECT_LEN = 0x9, /* base 0xf80e */ + /* E2P_DATA indexes into this */ + E2P_DATA_LEN = 0x7e, /* base 0xf817 */ + E2P_BOOT_CODE_LEN = 0x760, /* base 0xf895 */ + E2P_INTR_VECT_LEN = 0xb, /* base 0xfff5 */ + + /* Some precomputed offsets into the EEPROM */ + E2P_DATA_OFFSET = E2P_LOAD_CODE_LEN + E2P_LOAD_VECT_LEN, + E2P_BOOT_CODE_OFFSET = E2P_DATA_OFFSET + E2P_DATA_LEN, +}; + +#define CTL_REG(offset) ((zd_addr_t)(CR_START + (offset))) +#define E2P_DATA(offset) ((zd_addr_t)(E2P_START + E2P_DATA_OFFSET + (offset))) +#define FWRAW_DATA(offset) ((zd_addr_t)(FW_START + (offset))) + +/* 8-bit hardware registers */ +#define CR0 CTL_REG(0x0000) +#define CR1 CTL_REG(0x0004) +#define CR2 CTL_REG(0x0008) +#define CR3 CTL_REG(0x000C) + +#define CR5 CTL_REG(0x0010) +/* bit 5: if set short preamble used + * bit 6: filter band - Japan channel 14 on, else off + */ +#define CR6 CTL_REG(0x0014) +#define CR7 CTL_REG(0x0018) +#define CR8 CTL_REG(0x001C) + +#define CR4 CTL_REG(0x0020) + +#define CR9 CTL_REG(0x0024) +/* bit 2: antenna switch (together with CR10) */ +#define CR10 CTL_REG(0x0028) +/* bit 1: antenna switch (together with CR9) + * RF2959 controls with CR11 radion on and off + */ +#define CR11 CTL_REG(0x002C) +/* bit 6: TX power control for OFDM + * RF2959 controls with CR10 radio on and off + */ +#define CR12 CTL_REG(0x0030) +#define CR13 CTL_REG(0x0034) +#define CR14 CTL_REG(0x0038) +#define CR15 CTL_REG(0x003C) +#define CR16 CTL_REG(0x0040) +#define CR17 CTL_REG(0x0044) +#define CR18 CTL_REG(0x0048) +#define CR19 CTL_REG(0x004C) +#define CR20 CTL_REG(0x0050) +#define CR21 CTL_REG(0x0054) +#define CR22 CTL_REG(0x0058) +#define CR23 CTL_REG(0x005C) +#define CR24 CTL_REG(0x0060) /* CCA threshold */ +#define CR25 CTL_REG(0x0064) +#define CR26 CTL_REG(0x0068) +#define CR27 CTL_REG(0x006C) +#define CR28 CTL_REG(0x0070) +#define CR29 CTL_REG(0x0074) +#define CR30 CTL_REG(0x0078) +#define CR31 CTL_REG(0x007C) /* TX power control for RF in CCK mode */ +#define CR32 CTL_REG(0x0080) +#define CR33 CTL_REG(0x0084) +#define CR34 CTL_REG(0x0088) +#define CR35 CTL_REG(0x008C) +#define CR36 CTL_REG(0x0090) +#define CR37 CTL_REG(0x0094) +#define CR38 CTL_REG(0x0098) +#define CR39 CTL_REG(0x009C) +#define CR40 CTL_REG(0x00A0) +#define CR41 CTL_REG(0x00A4) +#define CR42 CTL_REG(0x00A8) +#define CR43 CTL_REG(0x00AC) +#define CR44 CTL_REG(0x00B0) +#define CR45 CTL_REG(0x00B4) +#define CR46 CTL_REG(0x00B8) +#define CR47 CTL_REG(0x00BC) /* CCK baseband gain + * (patch value might be in EEPROM) + */ +#define CR48 CTL_REG(0x00C0) +#define CR49 CTL_REG(0x00C4) +#define CR50 CTL_REG(0x00C8) +#define CR51 CTL_REG(0x00CC) /* TX power control for RF in 6-36M modes */ +#define CR52 CTL_REG(0x00D0) /* TX power control for RF in 48M mode */ +#define CR53 CTL_REG(0x00D4) /* TX power control for RF in 54M mode */ +#define CR54 CTL_REG(0x00D8) +#define CR55 CTL_REG(0x00DC) +#define CR56 CTL_REG(0x00E0) +#define CR57 CTL_REG(0x00E4) +#define CR58 CTL_REG(0x00E8) +#define CR59 CTL_REG(0x00EC) +#define CR60 CTL_REG(0x00F0) +#define CR61 CTL_REG(0x00F4) +#define CR62 CTL_REG(0x00F8) +#define CR63 CTL_REG(0x00FC) +#define CR64 CTL_REG(0x0100) +#define CR65 CTL_REG(0x0104) /* OFDM 54M calibration */ +#define CR66 CTL_REG(0x0108) /* OFDM 48M calibration */ +#define CR67 CTL_REG(0x010C) /* OFDM 36M calibration */ +#define CR68 CTL_REG(0x0110) /* CCK calibration */ +#define CR69 CTL_REG(0x0114) +#define CR70 CTL_REG(0x0118) +#define CR71 CTL_REG(0x011C) +#define CR72 CTL_REG(0x0120) +#define CR73 CTL_REG(0x0124) +#define CR74 CTL_REG(0x0128) +#define CR75 CTL_REG(0x012C) +#define CR76 CTL_REG(0x0130) +#define CR77 CTL_REG(0x0134) +#define CR78 CTL_REG(0x0138) +#define CR79 CTL_REG(0x013C) +#define CR80 CTL_REG(0x0140) +#define CR81 CTL_REG(0x0144) +#define CR82 CTL_REG(0x0148) +#define CR83 CTL_REG(0x014C) +#define CR84 CTL_REG(0x0150) +#define CR85 CTL_REG(0x0154) +#define CR86 CTL_REG(0x0158) +#define CR87 CTL_REG(0x015C) +#define CR88 CTL_REG(0x0160) +#define CR89 CTL_REG(0x0164) +#define CR90 CTL_REG(0x0168) +#define CR91 CTL_REG(0x016C) +#define CR92 CTL_REG(0x0170) +#define CR93 CTL_REG(0x0174) +#define CR94 CTL_REG(0x0178) +#define CR95 CTL_REG(0x017C) +#define CR96 CTL_REG(0x0180) +#define CR97 CTL_REG(0x0184) +#define CR98 CTL_REG(0x0188) +#define CR99 CTL_REG(0x018C) +#define CR100 CTL_REG(0x0190) +#define CR101 CTL_REG(0x0194) +#define CR102 CTL_REG(0x0198) +#define CR103 CTL_REG(0x019C) +#define CR104 CTL_REG(0x01A0) +#define CR105 CTL_REG(0x01A4) +#define CR106 CTL_REG(0x01A8) +#define CR107 CTL_REG(0x01AC) +#define CR108 CTL_REG(0x01B0) +#define CR109 CTL_REG(0x01B4) +#define CR110 CTL_REG(0x01B8) +#define CR111 CTL_REG(0x01BC) +#define CR112 CTL_REG(0x01C0) +#define CR113 CTL_REG(0x01C4) +#define CR114 CTL_REG(0x01C8) +#define CR115 CTL_REG(0x01CC) +#define CR116 CTL_REG(0x01D0) +#define CR117 CTL_REG(0x01D4) +#define CR118 CTL_REG(0x01D8) +#define CR119 CTL_REG(0x01DC) +#define CR120 CTL_REG(0x01E0) +#define CR121 CTL_REG(0x01E4) +#define CR122 CTL_REG(0x01E8) +#define CR123 CTL_REG(0x01EC) +#define CR124 CTL_REG(0x01F0) +#define CR125 CTL_REG(0x01F4) +#define CR126 CTL_REG(0x01F8) +#define CR127 CTL_REG(0x01FC) +#define CR128 CTL_REG(0x0200) +#define CR129 CTL_REG(0x0204) +#define CR130 CTL_REG(0x0208) +#define CR131 CTL_REG(0x020C) +#define CR132 CTL_REG(0x0210) +#define CR133 CTL_REG(0x0214) +#define CR134 CTL_REG(0x0218) +#define CR135 CTL_REG(0x021C) +#define CR136 CTL_REG(0x0220) +#define CR137 CTL_REG(0x0224) +#define CR138 CTL_REG(0x0228) +#define CR139 CTL_REG(0x022C) +#define CR140 CTL_REG(0x0230) +#define CR141 CTL_REG(0x0234) +#define CR142 CTL_REG(0x0238) +#define CR143 CTL_REG(0x023C) +#define CR144 CTL_REG(0x0240) +#define CR145 CTL_REG(0x0244) +#define CR146 CTL_REG(0x0248) +#define CR147 CTL_REG(0x024C) +#define CR148 CTL_REG(0x0250) +#define CR149 CTL_REG(0x0254) +#define CR150 CTL_REG(0x0258) +#define CR151 CTL_REG(0x025C) +#define CR152 CTL_REG(0x0260) +#define CR153 CTL_REG(0x0264) +#define CR154 CTL_REG(0x0268) +#define CR155 CTL_REG(0x026C) +#define CR156 CTL_REG(0x0270) +#define CR157 CTL_REG(0x0274) +#define CR158 CTL_REG(0x0278) +#define CR159 CTL_REG(0x027C) +#define CR160 CTL_REG(0x0280) +#define CR161 CTL_REG(0x0284) +#define CR162 CTL_REG(0x0288) +#define CR163 CTL_REG(0x028C) +#define CR164 CTL_REG(0x0290) +#define CR165 CTL_REG(0x0294) +#define CR166 CTL_REG(0x0298) +#define CR167 CTL_REG(0x029C) +#define CR168 CTL_REG(0x02A0) +#define CR169 CTL_REG(0x02A4) +#define CR170 CTL_REG(0x02A8) +#define CR171 CTL_REG(0x02AC) +#define CR172 CTL_REG(0x02B0) +#define CR173 CTL_REG(0x02B4) +#define CR174 CTL_REG(0x02B8) +#define CR175 CTL_REG(0x02BC) +#define CR176 CTL_REG(0x02C0) +#define CR177 CTL_REG(0x02C4) +#define CR178 CTL_REG(0x02C8) +#define CR179 CTL_REG(0x02CC) +#define CR180 CTL_REG(0x02D0) +#define CR181 CTL_REG(0x02D4) +#define CR182 CTL_REG(0x02D8) +#define CR183 CTL_REG(0x02DC) +#define CR184 CTL_REG(0x02E0) +#define CR185 CTL_REG(0x02E4) +#define CR186 CTL_REG(0x02E8) +#define CR187 CTL_REG(0x02EC) +#define CR188 CTL_REG(0x02F0) +#define CR189 CTL_REG(0x02F4) +#define CR190 CTL_REG(0x02F8) +#define CR191 CTL_REG(0x02FC) +#define CR192 CTL_REG(0x0300) +#define CR193 CTL_REG(0x0304) +#define CR194 CTL_REG(0x0308) +#define CR195 CTL_REG(0x030C) +#define CR196 CTL_REG(0x0310) +#define CR197 CTL_REG(0x0314) +#define CR198 CTL_REG(0x0318) +#define CR199 CTL_REG(0x031C) +#define CR200 CTL_REG(0x0320) +#define CR201 CTL_REG(0x0324) +#define CR202 CTL_REG(0x0328) +#define CR203 CTL_REG(0x032C) /* I2C bus template value & flash control */ +#define CR204 CTL_REG(0x0330) +#define CR205 CTL_REG(0x0334) +#define CR206 CTL_REG(0x0338) +#define CR207 CTL_REG(0x033C) +#define CR208 CTL_REG(0x0340) +#define CR209 CTL_REG(0x0344) +#define CR210 CTL_REG(0x0348) +#define CR211 CTL_REG(0x034C) +#define CR212 CTL_REG(0x0350) +#define CR213 CTL_REG(0x0354) +#define CR214 CTL_REG(0x0358) +#define CR215 CTL_REG(0x035C) +#define CR216 CTL_REG(0x0360) +#define CR217 CTL_REG(0x0364) +#define CR218 CTL_REG(0x0368) +#define CR219 CTL_REG(0x036C) +#define CR220 CTL_REG(0x0370) +#define CR221 CTL_REG(0x0374) +#define CR222 CTL_REG(0x0378) +#define CR223 CTL_REG(0x037C) +#define CR224 CTL_REG(0x0380) +#define CR225 CTL_REG(0x0384) +#define CR226 CTL_REG(0x0388) +#define CR227 CTL_REG(0x038C) +#define CR228 CTL_REG(0x0390) +#define CR229 CTL_REG(0x0394) +#define CR230 CTL_REG(0x0398) +#define CR231 CTL_REG(0x039C) +#define CR232 CTL_REG(0x03A0) +#define CR233 CTL_REG(0x03A4) +#define CR234 CTL_REG(0x03A8) +#define CR235 CTL_REG(0x03AC) +#define CR236 CTL_REG(0x03B0) + +#define CR240 CTL_REG(0x03C0) +/* bit 7: host-controlled RF register writes + * CR241-CR245: for hardware controlled writing of RF bits, not needed for + * USB + */ +#define CR241 CTL_REG(0x03C4) +#define CR242 CTL_REG(0x03C8) +#define CR243 CTL_REG(0x03CC) +#define CR244 CTL_REG(0x03D0) +#define CR245 CTL_REG(0x03D4) + +#define CR251 CTL_REG(0x03EC) /* only used for activation and deactivation of + * Airoha RFs AL2230 and AL7230B + */ +#define CR252 CTL_REG(0x03F0) +#define CR253 CTL_REG(0x03F4) +#define CR254 CTL_REG(0x03F8) +#define CR255 CTL_REG(0x03FC) + +#define CR_MAX_PHY_REG 255 + +/* Taken from the ZYDAS driver, not all of them are relevant for the ZD1211 + * driver. + */ + +#define CR_RF_IF_CLK CTL_REG(0x0400) +#define CR_RF_IF_DATA CTL_REG(0x0404) +#define CR_PE1_PE2 CTL_REG(0x0408) +#define CR_PE2_DLY CTL_REG(0x040C) +#define CR_LE1 CTL_REG(0x0410) +#define CR_LE2 CTL_REG(0x0414) +/* Seems to enable/disable GPI (General Purpose IO?) */ +#define CR_GPI_EN CTL_REG(0x0418) +#define CR_RADIO_PD CTL_REG(0x042C) +#define CR_RF2948_PD CTL_REG(0x042C) +#define CR_ENABLE_PS_MANUAL_AGC CTL_REG(0x043C) +#define CR_CONFIG_PHILIPS CTL_REG(0x0440) +#define CR_SA2400_SER_AP CTL_REG(0x0444) +#define CR_I2C_WRITE CTL_REG(0x0444) +#define CR_SA2400_SER_RP CTL_REG(0x0448) +#define CR_RADIO_PE CTL_REG(0x0458) +#define CR_RST_BUS_MASTER CTL_REG(0x045C) +#define CR_RFCFG CTL_REG(0x0464) +#define CR_HSTSCHG CTL_REG(0x046C) +#define CR_PHY_ON CTL_REG(0x0474) +#define CR_RX_DELAY CTL_REG(0x0478) +#define CR_RX_PE_DELAY CTL_REG(0x047C) +#define CR_GPIO_1 CTL_REG(0x0490) +#define CR_GPIO_2 CTL_REG(0x0494) +#define CR_EncryBufMux CTL_REG(0x04A8) +#define CR_PS_CTRL CTL_REG(0x0500) +#define CR_ADDA_PWR_DWN CTL_REG(0x0504) +#define CR_ADDA_MBIAS_WARMTIME CTL_REG(0x0508) +#define CR_MAC_PS_STATE CTL_REG(0x050C) + +#define CR_INTERRUPT CTL_REG(0x0510) +#define INT_TX_COMPLETE (1 << 0) +#define INT_RX_COMPLETE (1 << 1) +#define INT_RETRY_FAIL (1 << 2) +#define INT_WAKEUP (1 << 3) +#define INT_DTIM_NOTIFY (1 << 5) +#define INT_CFG_NEXT_BCN (1 << 6) +#define INT_BUS_ABORT (1 << 7) +#define INT_TX_FIFO_READY (1 << 8) +#define INT_UART (1 << 9) +#define INT_TX_COMPLETE_EN (1 << 16) +#define INT_RX_COMPLETE_EN (1 << 17) +#define INT_RETRY_FAIL_EN (1 << 18) +#define INT_WAKEUP_EN (1 << 19) +#define INT_DTIM_NOTIFY_EN (1 << 21) +#define INT_CFG_NEXT_BCN_EN (1 << 22) +#define INT_BUS_ABORT_EN (1 << 23) +#define INT_TX_FIFO_READY_EN (1 << 24) +#define INT_UART_EN (1 << 25) + +#define CR_TSF_LOW_PART CTL_REG(0x0514) +#define CR_TSF_HIGH_PART CTL_REG(0x0518) + +/* Following three values are in time units (1024us) + * Following condition must be met: + * atim < tbtt < bcn + */ +#define CR_ATIM_WND_PERIOD CTL_REG(0x051C) +#define CR_BCN_INTERVAL CTL_REG(0x0520) +#define CR_PRE_TBTT CTL_REG(0x0524) +/* in units of TU(1024us) */ + +/* for UART support */ +#define CR_UART_RBR_THR_DLL CTL_REG(0x0540) +#define CR_UART_DLM_IER CTL_REG(0x0544) +#define CR_UART_IIR_FCR CTL_REG(0x0548) +#define CR_UART_LCR CTL_REG(0x054c) +#define CR_UART_MCR CTL_REG(0x0550) +#define CR_UART_LSR CTL_REG(0x0554) +#define CR_UART_MSR CTL_REG(0x0558) +#define CR_UART_ECR CTL_REG(0x055c) +#define CR_UART_STATUS CTL_REG(0x0560) + +#define CR_PCI_TX_ADDR_P1 CTL_REG(0x0600) +#define CR_PCI_TX_AddR_P2 CTL_REG(0x0604) +#define CR_PCI_RX_AddR_P1 CTL_REG(0x0608) +#define CR_PCI_RX_AddR_P2 CTL_REG(0x060C) + +/* must be overwritten if custom MAC address will be used */ +#define CR_MAC_ADDR_P1 CTL_REG(0x0610) +#define CR_MAC_ADDR_P2 CTL_REG(0x0614) +#define CR_BSSID_P1 CTL_REG(0x0618) +#define CR_BSSID_P2 CTL_REG(0x061C) +#define CR_BCN_PLCP_CFG CTL_REG(0x0620) + +/* Group hash table for filtering incoming packets. + * + * The group hash table is 64 bit large and split over two parts. The first + * part is the lower part. The upper 6 bits of the last byte of the target + * address are used as index. Packets are received if the hash table bit is + * set. This is used for multicast handling, but for broadcasts (address + * ff:ff:ff:ff:ff:ff) the highest bit in the second table must also be set. + */ +#define CR_GROUP_HASH_P1 CTL_REG(0x0624) +#define CR_GROUP_HASH_P2 CTL_REG(0x0628) + +#define CR_RX_TIMEOUT CTL_REG(0x062C) + +/* Basic rates supported by the BSS. When producing ACK or CTS messages, the + * device will use a rate in this table that is less than or equal to the rate + * of the incoming frame which prompted the response. */ +#define CR_BASIC_RATE_TBL CTL_REG(0x0630) +#define CR_RATE_1M (1 << 0) /* 802.11b */ +#define CR_RATE_2M (1 << 1) /* 802.11b */ +#define CR_RATE_5_5M (1 << 2) /* 802.11b */ +#define CR_RATE_11M (1 << 3) /* 802.11b */ +#define CR_RATE_6M (1 << 8) /* 802.11g */ +#define CR_RATE_9M (1 << 9) /* 802.11g */ +#define CR_RATE_12M (1 << 10) /* 802.11g */ +#define CR_RATE_18M (1 << 11) /* 802.11g */ +#define CR_RATE_24M (1 << 12) /* 802.11g */ +#define CR_RATE_36M (1 << 13) /* 802.11g */ +#define CR_RATE_48M (1 << 14) /* 802.11g */ +#define CR_RATE_54M (1 << 15) /* 802.11g */ +#define CR_RATES_80211G 0xff00 +#define CR_RATES_80211B 0x000f + +/* Mandatory rates required in the BSS. When producing ACK or CTS messages, if + * the device could not find an appropriate rate in CR_BASIC_RATE_TBL, it will + * look for a rate in this table that is less than or equal to the rate of + * the incoming frame. */ +#define CR_MANDATORY_RATE_TBL CTL_REG(0x0634) +#define CR_RTS_CTS_RATE CTL_REG(0x0638) + +/* These are all bit indexes in CR_RTS_CTS_RATE, so remember to shift. */ +#define RTSCTS_SH_RTS_RATE 0 +#define RTSCTS_SH_EXP_CTS_RATE 4 +#define RTSCTS_SH_RTS_MOD_TYPE 8 +#define RTSCTS_SH_RTS_PMB_TYPE 9 +#define RTSCTS_SH_CTS_RATE 16 +#define RTSCTS_SH_CTS_MOD_TYPE 24 +#define RTSCTS_SH_CTS_PMB_TYPE 25 + +#define CR_WEP_PROTECT CTL_REG(0x063C) +#define CR_RX_THRESHOLD CTL_REG(0x0640) + +/* register for controlling the LEDS */ +#define CR_LED CTL_REG(0x0644) +/* masks for controlling LEDs */ +#define LED1 (1 << 8) +#define LED2 (1 << 9) +#define LED_SW (1 << 10) + +/* Seems to indicate that the configuration is over. + */ +#define CR_AFTER_PNP CTL_REG(0x0648) +#define CR_ACK_TIME_80211 CTL_REG(0x0658) + +#define CR_RX_OFFSET CTL_REG(0x065c) + +#define CR_BCN_LENGTH CTL_REG(0x0664) +#define CR_PHY_DELAY CTL_REG(0x066C) +#define CR_BCN_FIFO CTL_REG(0x0670) +#define CR_SNIFFER_ON CTL_REG(0x0674) + +#define CR_ENCRYPTION_TYPE CTL_REG(0x0678) +#define NO_WEP 0 +#define WEP64 1 +#define WEP128 5 +#define WEP256 6 +#define ENC_SNIFFER 8 + +#define CR_ZD1211_RETRY_MAX CTL_REG(0x067C) + +#define CR_REG1 CTL_REG(0x0680) +/* Setting the bit UNLOCK_PHY_REGS disallows the write access to physical + * registers, so one could argue it is a LOCK bit. But calling it + * LOCK_PHY_REGS makes it confusing. + */ +#define UNLOCK_PHY_REGS (1 << 7) + +#define CR_DEVICE_STATE CTL_REG(0x0684) +#define CR_UNDERRUN_CNT CTL_REG(0x0688) + +#define CR_RX_FILTER CTL_REG(0x068c) +#define RX_FILTER_ASSOC_REQUEST (1 << 0) +#define RX_FILTER_ASSOC_RESPONSE (1 << 1) +#define RX_FILTER_REASSOC_REQUEST (1 << 2) +#define RX_FILTER_REASSOC_RESPONSE (1 << 3) +#define RX_FILTER_PROBE_REQUEST (1 << 4) +#define RX_FILTER_PROBE_RESPONSE (1 << 5) +/* bits 6 and 7 reserved */ +#define RX_FILTER_BEACON (1 << 8) +#define RX_FILTER_ATIM (1 << 9) +#define RX_FILTER_DISASSOC (1 << 10) +#define RX_FILTER_AUTH (1 << 11) +#define RX_FILTER_DEAUTH (1 << 12) +#define RX_FILTER_PSPOLL (1 << 26) +#define RX_FILTER_RTS (1 << 27) +#define RX_FILTER_CTS (1 << 28) +#define RX_FILTER_ACK (1 << 29) +#define RX_FILTER_CFEND (1 << 30) +#define RX_FILTER_CFACK (1 << 31) + +/* Enable bits for all frames you are interested in. */ +#define STA_RX_FILTER (RX_FILTER_ASSOC_REQUEST | RX_FILTER_ASSOC_RESPONSE | \ + RX_FILTER_REASSOC_REQUEST | RX_FILTER_REASSOC_RESPONSE | \ + RX_FILTER_PROBE_REQUEST | RX_FILTER_PROBE_RESPONSE | \ + (0x3 << 6) /* vendor driver sets these reserved bits */ | \ + RX_FILTER_BEACON | RX_FILTER_ATIM | RX_FILTER_DISASSOC | \ + RX_FILTER_AUTH | RX_FILTER_DEAUTH | \ + (0x7 << 13) /* vendor driver sets these reserved bits */ | \ + RX_FILTER_PSPOLL | RX_FILTER_ACK) /* 0x2400ffff */ + +#define RX_FILTER_CTRL (RX_FILTER_RTS | RX_FILTER_CTS | \ + RX_FILTER_CFEND | RX_FILTER_CFACK) + +#define BCN_MODE_IBSS 0x2000000 + +/* Monitor mode sets filter to 0xfffff */ + +#define CR_ACK_TIMEOUT_EXT CTL_REG(0x0690) +#define CR_BCN_FIFO_SEMAPHORE CTL_REG(0x0694) + +#define CR_IFS_VALUE CTL_REG(0x0698) +#define IFS_VALUE_DIFS_SH 0 +#define IFS_VALUE_EIFS_SH 12 +#define IFS_VALUE_SIFS_SH 24 +#define IFS_VALUE_DEFAULT (( 50 << IFS_VALUE_DIFS_SH) | \ + (1148 << IFS_VALUE_EIFS_SH) | \ + ( 10 << IFS_VALUE_SIFS_SH)) + +#define CR_RX_TIME_OUT CTL_REG(0x069C) +#define CR_TOTAL_RX_FRM CTL_REG(0x06A0) +#define CR_CRC32_CNT CTL_REG(0x06A4) +#define CR_CRC16_CNT CTL_REG(0x06A8) +#define CR_DECRYPTION_ERR_UNI CTL_REG(0x06AC) +#define CR_RX_FIFO_OVERRUN CTL_REG(0x06B0) + +#define CR_DECRYPTION_ERR_MUL CTL_REG(0x06BC) + +#define CR_NAV_CNT CTL_REG(0x06C4) +#define CR_NAV_CCA CTL_REG(0x06C8) +#define CR_RETRY_CNT CTL_REG(0x06CC) + +#define CR_READ_TCB_ADDR CTL_REG(0x06E8) +#define CR_READ_RFD_ADDR CTL_REG(0x06EC) +#define CR_CWMIN_CWMAX CTL_REG(0x06F0) +#define CR_TOTAL_TX_FRM CTL_REG(0x06F4) + +/* CAM: Continuous Access Mode (power management) */ +#define CR_CAM_MODE CTL_REG(0x0700) +#define MODE_IBSS 0x0 +#define MODE_AP 0x1 +#define MODE_STA 0x2 +#define MODE_AP_WDS 0x3 + +#define CR_CAM_ROLL_TB_LOW CTL_REG(0x0704) +#define CR_CAM_ROLL_TB_HIGH CTL_REG(0x0708) +#define CR_CAM_ADDRESS CTL_REG(0x070C) +#define CR_CAM_DATA CTL_REG(0x0710) + +#define CR_ROMDIR CTL_REG(0x0714) + +#define CR_DECRY_ERR_FLG_LOW CTL_REG(0x0714) +#define CR_DECRY_ERR_FLG_HIGH CTL_REG(0x0718) + +#define CR_WEPKEY0 CTL_REG(0x0720) +#define CR_WEPKEY1 CTL_REG(0x0724) +#define CR_WEPKEY2 CTL_REG(0x0728) +#define CR_WEPKEY3 CTL_REG(0x072C) +#define CR_WEPKEY4 CTL_REG(0x0730) +#define CR_WEPKEY5 CTL_REG(0x0734) +#define CR_WEPKEY6 CTL_REG(0x0738) +#define CR_WEPKEY7 CTL_REG(0x073C) +#define CR_WEPKEY8 CTL_REG(0x0740) +#define CR_WEPKEY9 CTL_REG(0x0744) +#define CR_WEPKEY10 CTL_REG(0x0748) +#define CR_WEPKEY11 CTL_REG(0x074C) +#define CR_WEPKEY12 CTL_REG(0x0750) +#define CR_WEPKEY13 CTL_REG(0x0754) +#define CR_WEPKEY14 CTL_REG(0x0758) +#define CR_WEPKEY15 CTL_REG(0x075c) +#define CR_TKIP_MODE CTL_REG(0x0760) + +#define CR_EEPROM_PROTECT0 CTL_REG(0x0758) +#define CR_EEPROM_PROTECT1 CTL_REG(0x075C) + +#define CR_DBG_FIFO_RD CTL_REG(0x0800) +#define CR_DBG_SELECT CTL_REG(0x0804) +#define CR_FIFO_Length CTL_REG(0x0808) + + +#define CR_RSSI_MGC CTL_REG(0x0810) + +#define CR_PON CTL_REG(0x0818) +#define CR_RX_ON CTL_REG(0x081C) +#define CR_TX_ON CTL_REG(0x0820) +#define CR_CHIP_EN CTL_REG(0x0824) +#define CR_LO_SW CTL_REG(0x0828) +#define CR_TXRX_SW CTL_REG(0x082C) +#define CR_S_MD CTL_REG(0x0830) + +#define CR_USB_DEBUG_PORT CTL_REG(0x0888) +#define CR_ZD1211B_CWIN_MAX_MIN_AC0 CTL_REG(0x0b00) +#define CR_ZD1211B_CWIN_MAX_MIN_AC1 CTL_REG(0x0b04) +#define CR_ZD1211B_CWIN_MAX_MIN_AC2 CTL_REG(0x0b08) +#define CR_ZD1211B_CWIN_MAX_MIN_AC3 CTL_REG(0x0b0c) +#define CR_ZD1211B_AIFS_CTL1 CTL_REG(0x0b10) +#define CR_ZD1211B_AIFS_CTL2 CTL_REG(0x0b14) +#define CR_ZD1211B_TXOP CTL_REG(0x0b20) +#define CR_ZD1211B_RETRY_MAX CTL_REG(0x0b28) + +/* Value for CR_ZD1211_RETRY_MAX & CR_ZD1211B_RETRY_MAX. Vendor driver uses 2, + * we use 0. The first rate is tried (count+2), then all next rates are tried + * twice, until 1 Mbits is tried. */ +#define ZD1211_RETRY_COUNT 0 +#define ZD1211B_RETRY_COUNT \ + (ZD1211_RETRY_COUNT << 0)| \ + (ZD1211_RETRY_COUNT << 8)| \ + (ZD1211_RETRY_COUNT << 16)| \ + (ZD1211_RETRY_COUNT << 24) + +/* Used to detect PLL lock */ +#define UW2453_INTR_REG ((zd_addr_t)0x85c1) + +#define CWIN_SIZE 0x007f043f + + +#define HWINT_ENABLED \ + (INT_TX_COMPLETE_EN| \ + INT_RX_COMPLETE_EN| \ + INT_RETRY_FAIL_EN| \ + INT_WAKEUP_EN| \ + INT_CFG_NEXT_BCN_EN) + +#define HWINT_DISABLED 0 + +#define E2P_PWR_INT_GUARD 8 +#define E2P_CHANNEL_COUNT 14 + +/* If you compare this addresses with the ZYDAS orignal driver, please notify + * that we use word mapping for the EEPROM. + */ + +/* + * Upper 16 bit contains the regulatory domain. + */ +#define E2P_SUBID E2P_DATA(0x00) +#define E2P_POD E2P_DATA(0x02) +#define E2P_MAC_ADDR_P1 E2P_DATA(0x04) +#define E2P_MAC_ADDR_P2 E2P_DATA(0x06) +#define E2P_PWR_CAL_VALUE1 E2P_DATA(0x08) +#define E2P_PWR_CAL_VALUE2 E2P_DATA(0x0a) +#define E2P_PWR_CAL_VALUE3 E2P_DATA(0x0c) +#define E2P_PWR_CAL_VALUE4 E2P_DATA(0x0e) +#define E2P_PWR_INT_VALUE1 E2P_DATA(0x10) +#define E2P_PWR_INT_VALUE2 E2P_DATA(0x12) +#define E2P_PWR_INT_VALUE3 E2P_DATA(0x14) +#define E2P_PWR_INT_VALUE4 E2P_DATA(0x16) + +/* Contains a bit for each allowed channel. It gives for Europe (ETSI 0x30) + * also only 11 channels. */ +#define E2P_ALLOWED_CHANNEL E2P_DATA(0x18) + +#define E2P_DEVICE_VER E2P_DATA(0x20) +#define E2P_PHY_REG E2P_DATA(0x25) +#define E2P_36M_CAL_VALUE1 E2P_DATA(0x28) +#define E2P_36M_CAL_VALUE2 E2P_DATA(0x2a) +#define E2P_36M_CAL_VALUE3 E2P_DATA(0x2c) +#define E2P_36M_CAL_VALUE4 E2P_DATA(0x2e) +#define E2P_11A_INT_VALUE1 E2P_DATA(0x30) +#define E2P_11A_INT_VALUE2 E2P_DATA(0x32) +#define E2P_11A_INT_VALUE3 E2P_DATA(0x34) +#define E2P_11A_INT_VALUE4 E2P_DATA(0x36) +#define E2P_48M_CAL_VALUE1 E2P_DATA(0x38) +#define E2P_48M_CAL_VALUE2 E2P_DATA(0x3a) +#define E2P_48M_CAL_VALUE3 E2P_DATA(0x3c) +#define E2P_48M_CAL_VALUE4 E2P_DATA(0x3e) +#define E2P_48M_INT_VALUE1 E2P_DATA(0x40) +#define E2P_48M_INT_VALUE2 E2P_DATA(0x42) +#define E2P_48M_INT_VALUE3 E2P_DATA(0x44) +#define E2P_48M_INT_VALUE4 E2P_DATA(0x46) +#define E2P_54M_CAL_VALUE1 E2P_DATA(0x48) /* ??? */ +#define E2P_54M_CAL_VALUE2 E2P_DATA(0x4a) +#define E2P_54M_CAL_VALUE3 E2P_DATA(0x4c) +#define E2P_54M_CAL_VALUE4 E2P_DATA(0x4e) +#define E2P_54M_INT_VALUE1 E2P_DATA(0x50) +#define E2P_54M_INT_VALUE2 E2P_DATA(0x52) +#define E2P_54M_INT_VALUE3 E2P_DATA(0x54) +#define E2P_54M_INT_VALUE4 E2P_DATA(0x56) + +/* This word contains the base address of the FW_REG_ registers below */ +#define FWRAW_REGS_ADDR FWRAW_DATA(0x1d) + +/* All 16 bit values, offset from the address in FWRAW_REGS_ADDR */ +enum { + FW_REG_FIRMWARE_VER = 0, + /* non-zero if USB high speed connection */ + FW_REG_USB_SPEED = 1, + FW_REG_FIX_TX_RATE = 2, + /* Seems to be able to control LEDs over the firmware */ + FW_REG_LED_LINK_STATUS = 3, + FW_REG_SOFT_RESET = 4, + FW_REG_FLASH_CHK = 5, +}; + +/* Values for FW_LINK_STATUS */ +#define FW_LINK_OFF 0x0 +#define FW_LINK_TX 0x1 +/* 0x2 - link led on? */ + +enum { + /* indices for ofdm_cal_values */ + OFDM_36M_INDEX = 0, + OFDM_48M_INDEX = 1, + OFDM_54M_INDEX = 2, +}; + +struct zd_chip { + struct zd_usb usb; + struct zd_rf rf; + struct mutex mutex; + /* Base address of FW_REG_ registers */ + zd_addr_t fw_regs_base; + /* EepSetPoint in the vendor driver */ + u8 pwr_cal_values[E2P_CHANNEL_COUNT]; + /* integration values in the vendor driver */ + u8 pwr_int_values[E2P_CHANNEL_COUNT]; + /* SetPointOFDM in the vendor driver */ + u8 ofdm_cal_values[3][E2P_CHANNEL_COUNT]; + u16 link_led; + unsigned int pa_type:4, + patch_cck_gain:1, patch_cr157:1, patch_6m_band_edge:1, + new_phy_layout:1, al2230s_bit:1, + supports_tx_led:1; +}; + +static inline struct zd_chip *zd_usb_to_chip(struct zd_usb *usb) +{ + return container_of(usb, struct zd_chip, usb); +} + +static inline struct zd_chip *zd_rf_to_chip(struct zd_rf *rf) +{ + return container_of(rf, struct zd_chip, rf); +} + +#define zd_chip_dev(chip) (&(chip)->usb.intf->dev) + +void zd_chip_init(struct zd_chip *chip, + struct ieee80211_hw *hw, + struct usb_interface *intf); +void zd_chip_clear(struct zd_chip *chip); +int zd_chip_read_mac_addr_fw(struct zd_chip *chip, u8 *addr); +int zd_chip_init_hw(struct zd_chip *chip); +int zd_chip_reset(struct zd_chip *chip); + +static inline int zd_chip_is_zd1211b(struct zd_chip *chip) +{ + return chip->usb.is_zd1211b; +} + +static inline int zd_ioread16v_locked(struct zd_chip *chip, u16 *values, + const zd_addr_t *addresses, + unsigned int count) +{ + ZD_ASSERT(mutex_is_locked(&chip->mutex)); + return zd_usb_ioread16v(&chip->usb, values, addresses, count); +} + +static inline int zd_ioread16_locked(struct zd_chip *chip, u16 *value, + const zd_addr_t addr) +{ + ZD_ASSERT(mutex_is_locked(&chip->mutex)); + return zd_usb_ioread16(&chip->usb, value, addr); +} + +int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, + const zd_addr_t *addresses, unsigned int count); + +static inline int zd_ioread32_locked(struct zd_chip *chip, u32 *value, + const zd_addr_t addr) +{ + return zd_ioread32v_locked(chip, value, (const zd_addr_t *)&addr, 1); +} + +static inline int zd_iowrite16_locked(struct zd_chip *chip, u16 value, + zd_addr_t addr) +{ + struct zd_ioreq16 ioreq; + + ZD_ASSERT(mutex_is_locked(&chip->mutex)); + ioreq.addr = addr; + ioreq.value = value; + + return zd_usb_iowrite16v(&chip->usb, &ioreq, 1); +} + +int zd_iowrite16a_locked(struct zd_chip *chip, + const struct zd_ioreq16 *ioreqs, unsigned int count); + +int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs, + unsigned int count); + +static inline int zd_iowrite32_locked(struct zd_chip *chip, u32 value, + zd_addr_t addr) +{ + struct zd_ioreq32 ioreq; + + ioreq.addr = addr; + ioreq.value = value; + + return _zd_iowrite32v_locked(chip, &ioreq, 1); +} + +int zd_iowrite32a_locked(struct zd_chip *chip, + const struct zd_ioreq32 *ioreqs, unsigned int count); + +static inline int zd_rfwrite_locked(struct zd_chip *chip, u32 value, u8 bits) +{ + ZD_ASSERT(mutex_is_locked(&chip->mutex)); + return zd_usb_rfwrite(&chip->usb, value, bits); +} + +int zd_rfwrite_cr_locked(struct zd_chip *chip, u32 value); + +int zd_rfwritev_locked(struct zd_chip *chip, + const u32* values, unsigned int count, u8 bits); +int zd_rfwritev_cr_locked(struct zd_chip *chip, + const u32* values, unsigned int count); + +/* Locking functions for reading and writing registers. + * The different parameters are intentional. + */ +int zd_ioread16(struct zd_chip *chip, zd_addr_t addr, u16 *value); +int zd_iowrite16(struct zd_chip *chip, zd_addr_t addr, u16 value); +int zd_ioread32(struct zd_chip *chip, zd_addr_t addr, u32 *value); +int zd_iowrite32(struct zd_chip *chip, zd_addr_t addr, u32 value); +int zd_ioread32v(struct zd_chip *chip, const zd_addr_t *addresses, + u32 *values, unsigned int count); +int zd_iowrite32a(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs, + unsigned int count); + +int zd_chip_set_channel(struct zd_chip *chip, u8 channel); +static inline u8 _zd_chip_get_channel(struct zd_chip *chip) +{ + return chip->rf.channel; +} +u8 zd_chip_get_channel(struct zd_chip *chip); +int zd_read_regdomain(struct zd_chip *chip, u8 *regdomain); +int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr); +int zd_chip_switch_radio_on(struct zd_chip *chip); +int zd_chip_switch_radio_off(struct zd_chip *chip); +int zd_chip_enable_int(struct zd_chip *chip); +void zd_chip_disable_int(struct zd_chip *chip); +int zd_chip_enable_rxtx(struct zd_chip *chip); +void zd_chip_disable_rxtx(struct zd_chip *chip); +int zd_chip_enable_hwint(struct zd_chip *chip); +int zd_chip_disable_hwint(struct zd_chip *chip); +int zd_chip_generic_patch_6m_band(struct zd_chip *chip, int channel); +int zd_chip_set_rts_cts_rate_locked(struct zd_chip *chip, int preamble); + +static inline int zd_get_encryption_type(struct zd_chip *chip, u32 *type) +{ + return zd_ioread32(chip, CR_ENCRYPTION_TYPE, type); +} + +static inline int zd_set_encryption_type(struct zd_chip *chip, u32 type) +{ + return zd_iowrite32(chip, CR_ENCRYPTION_TYPE, type); +} + +static inline int zd_chip_get_basic_rates(struct zd_chip *chip, u16 *cr_rates) +{ + return zd_ioread16(chip, CR_BASIC_RATE_TBL, cr_rates); +} + +int zd_chip_set_basic_rates(struct zd_chip *chip, u16 cr_rates); + +int zd_chip_lock_phy_regs(struct zd_chip *chip); +int zd_chip_unlock_phy_regs(struct zd_chip *chip); + +enum led_status { + ZD_LED_OFF = 0, + ZD_LED_SCANNING = 1, + ZD_LED_ASSOCIATED = 2, +}; + +int zd_chip_control_leds(struct zd_chip *chip, enum led_status status); + +int zd_set_beacon_interval(struct zd_chip *chip, u32 interval); + +static inline int zd_get_beacon_interval(struct zd_chip *chip, u32 *interval) +{ + return zd_ioread32(chip, CR_BCN_INTERVAL, interval); +} + +struct rx_status; + +u8 zd_rx_rate(const void *rx_frame, const struct rx_status *status); + +struct zd_mc_hash { + u32 low; + u32 high; +}; + +static inline void zd_mc_clear(struct zd_mc_hash *hash) +{ + hash->low = 0; + /* The interfaces must always received broadcasts. + * The hash of the broadcast address ff:ff:ff:ff:ff:ff is 63. + */ + hash->high = 0x80000000; +} + +static inline void zd_mc_add_all(struct zd_mc_hash *hash) +{ + hash->low = hash->high = 0xffffffff; +} + +static inline void zd_mc_add_addr(struct zd_mc_hash *hash, u8 *addr) +{ + unsigned int i = addr[5] >> 2; + if (i < 32) { + hash->low |= 1 << i; + } else { + hash->high |= 1 << (i-32); + } +} + +int zd_chip_set_multicast_hash(struct zd_chip *chip, + struct zd_mc_hash *hash); + +u64 zd_chip_get_tsf(struct zd_chip *chip); + +#endif /* _ZD_CHIP_H */ diff --git a/drivers/net/wireless/zd1211rw/zd_def.h b/drivers/net/wireless/zd1211rw/zd_def.h new file mode 100644 index 0000000..6ac597f --- /dev/null +++ b/drivers/net/wireless/zd1211rw/zd_def.h @@ -0,0 +1,64 @@ +/* ZD1211 USB-WLAN driver for Linux + * + * Copyright (C) 2005-2007 Ulrich Kunitz + * Copyright (C) 2006-2007 Daniel Drake + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _ZD_DEF_H +#define _ZD_DEF_H + +#include +#include +#include + +typedef u16 __nocast zd_addr_t; + +#define dev_printk_f(level, dev, fmt, args...) \ + dev_printk(level, dev, "%s() " fmt, __func__, ##args) + +#ifdef DEBUG +# define dev_dbg_f(dev, fmt, args...) \ + dev_printk_f(KERN_DEBUG, dev, fmt, ## args) +# define dev_dbg_f_limit(dev, fmt, args...) do { \ + if (net_ratelimit()) \ + dev_printk_f(KERN_DEBUG, dev, fmt, ## args); \ +} while (0) +#else +# define dev_dbg_f(dev, fmt, args...) do { (void)(dev); } while (0) +# define dev_dbg_f_limit(dev, fmt, args...) do { (void)(dev); } while (0) +#endif /* DEBUG */ + +#ifdef DEBUG +# define ZD_ASSERT(x) \ +do { \ + if (!(x)) { \ + pr_debug("%s:%d ASSERT %s VIOLATED!\n", \ + __FILE__, __LINE__, __stringify(x)); \ + dump_stack(); \ + } \ +} while (0) +#else +# define ZD_ASSERT(x) do { } while (0) +#endif + +#ifdef DEBUG +# define ZD_MEMCLEAR(pointer, size) memset((pointer), 0xff, (size)) +#else +# define ZD_MEMCLEAR(pointer, size) do { } while (0) +#endif + +#endif /* _ZD_DEF_H */ diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c new file mode 100644 index 0000000..00e09e2 --- /dev/null +++ b/drivers/net/wireless/zd1211rw/zd_mac.c @@ -0,0 +1,1214 @@ +/* ZD1211 USB-WLAN driver for Linux + * + * Copyright (C) 2005-2007 Ulrich Kunitz + * Copyright (C) 2006-2007 Daniel Drake + * Copyright (C) 2006-2007 Michael Wu + * Copyright (C) 2007-2008 Luis R. Rodriguez + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include + +#include "zd_def.h" +#include "zd_chip.h" +#include "zd_mac.h" +#include "zd_rf.h" + +struct zd_reg_alpha2_map { + u32 reg; + char alpha2[2]; +}; + +static struct zd_reg_alpha2_map reg_alpha2_map[] = { + { ZD_REGDOMAIN_FCC, "US" }, + { ZD_REGDOMAIN_IC, "CA" }, + { ZD_REGDOMAIN_ETSI, "DE" }, /* Generic ETSI, use most restrictive */ + { ZD_REGDOMAIN_JAPAN, "JP" }, + { ZD_REGDOMAIN_JAPAN_ADD, "JP" }, + { ZD_REGDOMAIN_SPAIN, "ES" }, + { ZD_REGDOMAIN_FRANCE, "FR" }, +}; + +/* This table contains the hardware specific values for the modulation rates. */ +static const struct ieee80211_rate zd_rates[] = { + { .bitrate = 10, + .hw_value = ZD_CCK_RATE_1M, }, + { .bitrate = 20, + .hw_value = ZD_CCK_RATE_2M, + .hw_value_short = ZD_CCK_RATE_2M | ZD_CCK_PREA_SHORT, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 55, + .hw_value = ZD_CCK_RATE_5_5M, + .hw_value_short = ZD_CCK_RATE_5_5M | ZD_CCK_PREA_SHORT, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 110, + .hw_value = ZD_CCK_RATE_11M, + .hw_value_short = ZD_CCK_RATE_11M | ZD_CCK_PREA_SHORT, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 60, + .hw_value = ZD_OFDM_RATE_6M, + .flags = 0 }, + { .bitrate = 90, + .hw_value = ZD_OFDM_RATE_9M, + .flags = 0 }, + { .bitrate = 120, + .hw_value = ZD_OFDM_RATE_12M, + .flags = 0 }, + { .bitrate = 180, + .hw_value = ZD_OFDM_RATE_18M, + .flags = 0 }, + { .bitrate = 240, + .hw_value = ZD_OFDM_RATE_24M, + .flags = 0 }, + { .bitrate = 360, + .hw_value = ZD_OFDM_RATE_36M, + .flags = 0 }, + { .bitrate = 480, + .hw_value = ZD_OFDM_RATE_48M, + .flags = 0 }, + { .bitrate = 540, + .hw_value = ZD_OFDM_RATE_54M, + .flags = 0 }, +}; + +/* + * Zydas retry rates table. Each line is listed in the same order as + * in zd_rates[] and contains all the rate used when a packet is sent + * starting with a given rates. Let's consider an example : + * + * "11 Mbits : 4, 3, 2, 1, 0" means : + * - packet is sent using 4 different rates + * - 1st rate is index 3 (ie 11 Mbits) + * - 2nd rate is index 2 (ie 5.5 Mbits) + * - 3rd rate is index 1 (ie 2 Mbits) + * - 4th rate is index 0 (ie 1 Mbits) + */ + +static const struct tx_retry_rate zd_retry_rates[] = { + { /* 1 Mbits */ 1, { 0 }}, + { /* 2 Mbits */ 2, { 1, 0 }}, + { /* 5.5 Mbits */ 3, { 2, 1, 0 }}, + { /* 11 Mbits */ 4, { 3, 2, 1, 0 }}, + { /* 6 Mbits */ 5, { 4, 3, 2, 1, 0 }}, + { /* 9 Mbits */ 6, { 5, 4, 3, 2, 1, 0}}, + { /* 12 Mbits */ 5, { 6, 3, 2, 1, 0 }}, + { /* 18 Mbits */ 6, { 7, 6, 3, 2, 1, 0 }}, + { /* 24 Mbits */ 6, { 8, 6, 3, 2, 1, 0 }}, + { /* 36 Mbits */ 7, { 9, 8, 6, 3, 2, 1, 0 }}, + { /* 48 Mbits */ 8, {10, 9, 8, 6, 3, 2, 1, 0 }}, + { /* 54 Mbits */ 9, {11, 10, 9, 8, 6, 3, 2, 1, 0 }} +}; + +static const struct ieee80211_channel zd_channels[] = { + { .center_freq = 2412, .hw_value = 1 }, + { .center_freq = 2417, .hw_value = 2 }, + { .center_freq = 2422, .hw_value = 3 }, + { .center_freq = 2427, .hw_value = 4 }, + { .center_freq = 2432, .hw_value = 5 }, + { .center_freq = 2437, .hw_value = 6 }, + { .center_freq = 2442, .hw_value = 7 }, + { .center_freq = 2447, .hw_value = 8 }, + { .center_freq = 2452, .hw_value = 9 }, + { .center_freq = 2457, .hw_value = 10 }, + { .center_freq = 2462, .hw_value = 11 }, + { .center_freq = 2467, .hw_value = 12 }, + { .center_freq = 2472, .hw_value = 13 }, + { .center_freq = 2484, .hw_value = 14 }, +}; + +static void housekeeping_init(struct zd_mac *mac); +static void housekeeping_enable(struct zd_mac *mac); +static void housekeeping_disable(struct zd_mac *mac); + +static int zd_reg2alpha2(u8 regdomain, char *alpha2) +{ + unsigned int i; + struct zd_reg_alpha2_map *reg_map; + for (i = 0; i < ARRAY_SIZE(reg_alpha2_map); i++) { + reg_map = ®_alpha2_map[i]; + if (regdomain == reg_map->reg) { + alpha2[0] = reg_map->alpha2[0]; + alpha2[1] = reg_map->alpha2[1]; + return 0; + } + } + return 1; +} + +int zd_mac_preinit_hw(struct ieee80211_hw *hw) +{ + int r; + u8 addr[ETH_ALEN]; + struct zd_mac *mac = zd_hw_mac(hw); + + r = zd_chip_read_mac_addr_fw(&mac->chip, addr); + if (r) + return r; + + SET_IEEE80211_PERM_ADDR(hw, addr); + + return 0; +} + +int zd_mac_init_hw(struct ieee80211_hw *hw) +{ + int r; + struct zd_mac *mac = zd_hw_mac(hw); + struct zd_chip *chip = &mac->chip; + char alpha2[2]; + u8 default_regdomain; + + r = zd_chip_enable_int(chip); + if (r) + goto out; + r = zd_chip_init_hw(chip); + if (r) + goto disable_int; + + ZD_ASSERT(!irqs_disabled()); + + r = zd_read_regdomain(chip, &default_regdomain); + if (r) + goto disable_int; + spin_lock_irq(&mac->lock); + mac->regdomain = mac->default_regdomain = default_regdomain; + spin_unlock_irq(&mac->lock); + + /* We must inform the device that we are doing encryption/decryption in + * software at the moment. */ + r = zd_set_encryption_type(chip, ENC_SNIFFER); + if (r) + goto disable_int; + + r = zd_reg2alpha2(mac->regdomain, alpha2); + if (r) + goto disable_int; + + r = regulatory_hint(hw->wiphy, alpha2); +disable_int: + zd_chip_disable_int(chip); +out: + return r; +} + +void zd_mac_clear(struct zd_mac *mac) +{ + flush_workqueue(zd_workqueue); + zd_chip_clear(&mac->chip); + ZD_ASSERT(!spin_is_locked(&mac->lock)); + ZD_MEMCLEAR(mac, sizeof(struct zd_mac)); +} + +static int set_rx_filter(struct zd_mac *mac) +{ + unsigned long flags; + u32 filter = STA_RX_FILTER; + + spin_lock_irqsave(&mac->lock, flags); + if (mac->pass_ctrl) + filter |= RX_FILTER_CTRL; + spin_unlock_irqrestore(&mac->lock, flags); + + return zd_iowrite32(&mac->chip, CR_RX_FILTER, filter); +} + +static int set_mc_hash(struct zd_mac *mac) +{ + struct zd_mc_hash hash; + zd_mc_clear(&hash); + return zd_chip_set_multicast_hash(&mac->chip, &hash); +} + +static int zd_op_start(struct ieee80211_hw *hw) +{ + struct zd_mac *mac = zd_hw_mac(hw); + struct zd_chip *chip = &mac->chip; + struct zd_usb *usb = &chip->usb; + int r; + + if (!usb->initialized) { + r = zd_usb_init_hw(usb); + if (r) + goto out; + } + + r = zd_chip_enable_int(chip); + if (r < 0) + goto out; + + r = zd_chip_set_basic_rates(chip, CR_RATES_80211B | CR_RATES_80211G); + if (r < 0) + goto disable_int; + r = set_rx_filter(mac); + if (r) + goto disable_int; + r = set_mc_hash(mac); + if (r) + goto disable_int; + r = zd_chip_switch_radio_on(chip); + if (r < 0) + goto disable_int; + r = zd_chip_enable_rxtx(chip); + if (r < 0) + goto disable_radio; + r = zd_chip_enable_hwint(chip); + if (r < 0) + goto disable_rxtx; + + housekeeping_enable(mac); + return 0; +disable_rxtx: + zd_chip_disable_rxtx(chip); +disable_radio: + zd_chip_switch_radio_off(chip); +disable_int: + zd_chip_disable_int(chip); +out: + return r; +} + +static void zd_op_stop(struct ieee80211_hw *hw) +{ + struct zd_mac *mac = zd_hw_mac(hw); + struct zd_chip *chip = &mac->chip; + struct sk_buff *skb; + struct sk_buff_head *ack_wait_queue = &mac->ack_wait_queue; + + /* The order here deliberately is a little different from the open() + * method, since we need to make sure there is no opportunity for RX + * frames to be processed by mac80211 after we have stopped it. + */ + + zd_chip_disable_rxtx(chip); + housekeeping_disable(mac); + flush_workqueue(zd_workqueue); + + zd_chip_disable_hwint(chip); + zd_chip_switch_radio_off(chip); + zd_chip_disable_int(chip); + + + while ((skb = skb_dequeue(ack_wait_queue))) + dev_kfree_skb_any(skb); +} + +/** + * zd_mac_tx_status - reports tx status of a packet if required + * @hw - a &struct ieee80211_hw pointer + * @skb - a sk-buffer + * @flags: extra flags to set in the TX status info + * @ackssi: ACK signal strength + * @success - True for successful transmission of the frame + * + * This information calls ieee80211_tx_status_irqsafe() if required by the + * control information. It copies the control information into the status + * information. + * + * If no status information has been requested, the skb is freed. + */ +static void zd_mac_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, + int ackssi, struct tx_status *tx_status) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + int i; + int success = 1, retry = 1; + int first_idx; + const struct tx_retry_rate *retries; + + ieee80211_tx_info_clear_status(info); + + if (tx_status) { + success = !tx_status->failure; + retry = tx_status->retry + success; + } + + if (success) { + /* success */ + info->flags |= IEEE80211_TX_STAT_ACK; + } else { + /* failure */ + info->flags &= ~IEEE80211_TX_STAT_ACK; + } + + first_idx = info->status.rates[0].idx; + ZD_ASSERT(0<=first_idx && first_idxcount); + + info->status.rates[0].idx = retries->rate[0]; + info->status.rates[0].count = 1; // (retry > 1 ? 2 : 1); + + for (i=1; istatus.rates[i].idx = retries->rate[i]; + info->status.rates[i].count = 1; // ((i==retry-1) && success ? 1:2); + } + for (; istatus.rates[i].idx = retries->rate[retry - 1]; + info->status.rates[i].count = 1; // (success ? 1:2); + } + if (istatus.rates[i].idx = -1; /* terminate */ + + info->status.ack_signal = ackssi; + ieee80211_tx_status_irqsafe(hw, skb); +} + +/** + * zd_mac_tx_failed - callback for failed frames + * @dev: the mac80211 wireless device + * + * This function is called if a frame couldn't be successfully + * transferred. The first frame from the tx queue, will be selected and + * reported as error to the upper layers. + */ +void zd_mac_tx_failed(struct urb *urb) +{ + struct ieee80211_hw * hw = zd_usb_to_hw(urb->context); + struct zd_mac *mac = zd_hw_mac(hw); + struct sk_buff_head *q = &mac->ack_wait_queue; + struct sk_buff *skb; + struct tx_status *tx_status = (struct tx_status *)urb->transfer_buffer; + unsigned long flags; + int success = !tx_status->failure; + int retry = tx_status->retry + success; + int found = 0; + int i, position = 0; + + q = &mac->ack_wait_queue; + spin_lock_irqsave(&q->lock, flags); + + skb_queue_walk(q, skb) { + struct ieee80211_hdr *tx_hdr; + struct ieee80211_tx_info *info; + int first_idx, final_idx; + const struct tx_retry_rate *retries; + u8 final_rate; + + position ++; + + /* if the hardware reports a failure and we had a 802.11 ACK + * pending, then we skip the first skb when searching for a + * matching frame */ + if (tx_status->failure && mac->ack_pending && + skb_queue_is_first(q, skb)) { + continue; + } + + tx_hdr = (struct ieee80211_hdr *)skb->data; + + /* we skip all frames not matching the reported destination */ + if (unlikely(memcmp(tx_hdr->addr1, tx_status->mac, ETH_ALEN))) { + continue; + } + + /* we skip all frames not matching the reported final rate */ + + info = IEEE80211_SKB_CB(skb); + first_idx = info->status.rates[0].idx; + ZD_ASSERT(0<=first_idx && first_idx retries->count) + continue; + + final_idx = retries->rate[retry - 1]; + final_rate = zd_rates[final_idx].hw_value; + + if (final_rate != tx_status->rate) { + continue; + } + + found = 1; + break; + } + + if (found) { + for (i=1; i<=position; i++) { + skb = __skb_dequeue(q); + zd_mac_tx_status(hw, skb, + mac->ack_pending ? mac->ack_signal : 0, + i == position ? tx_status : NULL); + mac->ack_pending = 0; + } + } + + spin_unlock_irqrestore(&q->lock, flags); +} + +/** + * zd_mac_tx_to_dev - callback for USB layer + * @skb: a &sk_buff pointer + * @error: error value, 0 if transmission successful + * + * Informs the MAC layer that the frame has successfully transferred to the + * device. If an ACK is required and the transfer to the device has been + * successful, the packets are put on the @ack_wait_queue with + * the control set removed. + */ +void zd_mac_tx_to_dev(struct sk_buff *skb, int error) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hw *hw = info->rate_driver_data[0]; + struct zd_mac *mac = zd_hw_mac(hw); + + ieee80211_tx_info_clear_status(info); + + skb_pull(skb, sizeof(struct zd_ctrlset)); + if (unlikely(error || + (info->flags & IEEE80211_TX_CTL_NO_ACK))) { + /* + * FIXME : do we need to fill in anything ? + */ + ieee80211_tx_status_irqsafe(hw, skb); + } else { + struct sk_buff_head *q = &mac->ack_wait_queue; + + skb_queue_tail(q, skb); + while (skb_queue_len(q) > ZD_MAC_MAX_ACK_WAITERS) { + zd_mac_tx_status(hw, skb_dequeue(q), + mac->ack_pending ? mac->ack_signal : 0, + NULL); + mac->ack_pending = 0; + } + } +} + +static int zd_calc_tx_length_us(u8 *service, u8 zd_rate, u16 tx_length) +{ + /* ZD_PURE_RATE() must be used to remove the modulation type flag of + * the zd-rate values. + */ + static const u8 rate_divisor[] = { + [ZD_PURE_RATE(ZD_CCK_RATE_1M)] = 1, + [ZD_PURE_RATE(ZD_CCK_RATE_2M)] = 2, + /* Bits must be doubled. */ + [ZD_PURE_RATE(ZD_CCK_RATE_5_5M)] = 11, + [ZD_PURE_RATE(ZD_CCK_RATE_11M)] = 11, + [ZD_PURE_RATE(ZD_OFDM_RATE_6M)] = 6, + [ZD_PURE_RATE(ZD_OFDM_RATE_9M)] = 9, + [ZD_PURE_RATE(ZD_OFDM_RATE_12M)] = 12, + [ZD_PURE_RATE(ZD_OFDM_RATE_18M)] = 18, + [ZD_PURE_RATE(ZD_OFDM_RATE_24M)] = 24, + [ZD_PURE_RATE(ZD_OFDM_RATE_36M)] = 36, + [ZD_PURE_RATE(ZD_OFDM_RATE_48M)] = 48, + [ZD_PURE_RATE(ZD_OFDM_RATE_54M)] = 54, + }; + + u32 bits = (u32)tx_length * 8; + u32 divisor; + + divisor = rate_divisor[ZD_PURE_RATE(zd_rate)]; + if (divisor == 0) + return -EINVAL; + + switch (zd_rate) { + case ZD_CCK_RATE_5_5M: + bits = (2*bits) + 10; /* round up to the next integer */ + break; + case ZD_CCK_RATE_11M: + if (service) { + u32 t = bits % 11; + *service &= ~ZD_PLCP_SERVICE_LENGTH_EXTENSION; + if (0 < t && t <= 3) { + *service |= ZD_PLCP_SERVICE_LENGTH_EXTENSION; + } + } + bits += 10; /* round up to the next integer */ + break; + } + + return bits/divisor; +} + +static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs, + struct ieee80211_hdr *header, + struct ieee80211_tx_info *info) +{ + /* + * CONTROL TODO: + * - if backoff needed, enable bit 0 + * - if burst (backoff not needed) disable bit 0 + */ + + cs->control = 0; + + /* First fragment */ + if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) + cs->control |= ZD_CS_NEED_RANDOM_BACKOFF; + + /* No ACK expected (multicast, etc.) */ + if (info->flags & IEEE80211_TX_CTL_NO_ACK) + cs->control |= ZD_CS_NO_ACK; + + /* PS-POLL */ + if (ieee80211_is_pspoll(header->frame_control)) + cs->control |= ZD_CS_PS_POLL_FRAME; + + if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) + cs->control |= ZD_CS_RTS; + + if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) + cs->control |= ZD_CS_SELF_CTS; + + /* FIXME: Management frame? */ +} + +static int zd_mac_config_beacon(struct ieee80211_hw *hw, struct sk_buff *beacon) +{ + struct zd_mac *mac = zd_hw_mac(hw); + int r; + u32 tmp, j = 0; + /* 4 more bytes for tail CRC */ + u32 full_len = beacon->len + 4; + + r = zd_iowrite32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, 0); + if (r < 0) + return r; + r = zd_ioread32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, &tmp); + if (r < 0) + return r; + + while (tmp & 0x2) { + r = zd_ioread32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, &tmp); + if (r < 0) + return r; + if ((++j % 100) == 0) { + printk(KERN_ERR "CR_BCN_FIFO_SEMAPHORE not ready\n"); + if (j >= 500) { + printk(KERN_ERR "Giving up beacon config.\n"); + return -ETIMEDOUT; + } + } + msleep(1); + } + + r = zd_iowrite32(&mac->chip, CR_BCN_FIFO, full_len - 1); + if (r < 0) + return r; + if (zd_chip_is_zd1211b(&mac->chip)) { + r = zd_iowrite32(&mac->chip, CR_BCN_LENGTH, full_len - 1); + if (r < 0) + return r; + } + + for (j = 0 ; j < beacon->len; j++) { + r = zd_iowrite32(&mac->chip, CR_BCN_FIFO, + *((u8 *)(beacon->data + j))); + if (r < 0) + return r; + } + + for (j = 0; j < 4; j++) { + r = zd_iowrite32(&mac->chip, CR_BCN_FIFO, 0x0); + if (r < 0) + return r; + } + + r = zd_iowrite32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, 1); + if (r < 0) + return r; + + /* 802.11b/g 2.4G CCK 1Mb + * 802.11a, not yet implemented, uses different values (see GPL vendor + * driver) + */ + return zd_iowrite32(&mac->chip, CR_BCN_PLCP_CFG, 0x00000400 | + (full_len << 19)); +} + +static int fill_ctrlset(struct zd_mac *mac, + struct sk_buff *skb) +{ + int r; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + unsigned int frag_len = skb->len + FCS_LEN; + unsigned int packet_length; + struct ieee80211_rate *txrate; + struct zd_ctrlset *cs = (struct zd_ctrlset *) + skb_push(skb, sizeof(struct zd_ctrlset)); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + ZD_ASSERT(frag_len <= 0xffff); + + txrate = ieee80211_get_tx_rate(mac->hw, info); + + cs->modulation = txrate->hw_value; + if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) + cs->modulation = txrate->hw_value_short; + + cs->tx_length = cpu_to_le16(frag_len); + + cs_set_control(mac, cs, hdr, info); + + packet_length = frag_len + sizeof(struct zd_ctrlset) + 10; + ZD_ASSERT(packet_length <= 0xffff); + /* ZD1211B: Computing the length difference this way, gives us + * flexibility to compute the packet length. + */ + cs->packet_length = cpu_to_le16(zd_chip_is_zd1211b(&mac->chip) ? + packet_length - frag_len : packet_length); + + /* + * CURRENT LENGTH: + * - transmit frame length in microseconds + * - seems to be derived from frame length + * - see Cal_Us_Service() in zdinlinef.h + * - if macp->bTxBurstEnable is enabled, then multiply by 4 + * - bTxBurstEnable is never set in the vendor driver + * + * SERVICE: + * - "for PLCP configuration" + * - always 0 except in some situations at 802.11b 11M + * - see line 53 of zdinlinef.h + */ + cs->service = 0; + r = zd_calc_tx_length_us(&cs->service, ZD_RATE(cs->modulation), + le16_to_cpu(cs->tx_length)); + if (r < 0) + return r; + cs->current_length = cpu_to_le16(r); + cs->next_frame_length = 0; + + return 0; +} + +/** + * zd_op_tx - transmits a network frame to the device + * + * @dev: mac80211 hardware device + * @skb: socket buffer + * @control: the control structure + * + * This function transmit an IEEE 802.11 network frame to the device. The + * control block of the skbuff will be initialized. If necessary the incoming + * mac80211 queues will be stopped. + */ +static int zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct zd_mac *mac = zd_hw_mac(hw); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + int r; + + r = fill_ctrlset(mac, skb); + if (r) + goto fail; + + info->rate_driver_data[0] = hw; + + r = zd_usb_tx(&mac->chip.usb, skb); + if (r) + goto fail; + return 0; + +fail: + dev_kfree_skb(skb); + return 0; +} + +/** + * filter_ack - filters incoming packets for acknowledgements + * @dev: the mac80211 device + * @rx_hdr: received header + * @stats: the status for the received packet + * + * This functions looks for ACK packets and tries to match them with the + * frames in the tx queue. If a match is found the frame will be dequeued and + * the upper layers is informed about the successful transmission. If + * mac80211 queues have been stopped and the number of frames still to be + * transmitted is low the queues will be opened again. + * + * Returns 1 if the frame was an ACK, 0 if it was ignored. + */ +static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr, + struct ieee80211_rx_status *stats) +{ + struct zd_mac *mac = zd_hw_mac(hw); + struct sk_buff *skb; + struct sk_buff_head *q; + unsigned long flags; + int found = 0; + int i, position = 0; + + if (!ieee80211_is_ack(rx_hdr->frame_control)) + return 0; + + q = &mac->ack_wait_queue; + spin_lock_irqsave(&q->lock, flags); + skb_queue_walk(q, skb) { + struct ieee80211_hdr *tx_hdr; + + position ++; + + if (mac->ack_pending && skb_queue_is_first(q, skb)) + continue; + + tx_hdr = (struct ieee80211_hdr *)skb->data; + if (likely(!memcmp(tx_hdr->addr2, rx_hdr->addr1, ETH_ALEN))) + { + found = 1; + break; + } + } + + if (found) { + for (i=1; iack_pending ? mac->ack_signal : 0, + NULL); + mac->ack_pending = 0; + } + + mac->ack_pending = 1; + mac->ack_signal = stats->signal; + } + + spin_unlock_irqrestore(&q->lock, flags); + return 1; +} + +int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length) +{ + struct zd_mac *mac = zd_hw_mac(hw); + struct ieee80211_rx_status stats; + const struct rx_status *status; + struct sk_buff *skb; + int bad_frame = 0; + __le16 fc; + int need_padding; + int i; + u8 rate; + + if (length < ZD_PLCP_HEADER_SIZE + 10 /* IEEE80211_1ADDR_LEN */ + + FCS_LEN + sizeof(struct rx_status)) + return -EINVAL; + + memset(&stats, 0, sizeof(stats)); + + /* Note about pass_failed_fcs and pass_ctrl access below: + * mac locking intentionally omitted here, as this is the only unlocked + * reader and the only writer is configure_filter. Plus, if there were + * any races accessing these variables, it wouldn't really matter. + * If mac80211 ever provides a way for us to access filter flags + * from outside configure_filter, we could improve on this. Also, this + * situation may change once we implement some kind of DMA-into-skb + * RX path. */ + + /* Caller has to ensure that length >= sizeof(struct rx_status). */ + status = (struct rx_status *) + (buffer + (length - sizeof(struct rx_status))); + if (status->frame_status & ZD_RX_ERROR) { + if (mac->pass_failed_fcs && + (status->frame_status & ZD_RX_CRC32_ERROR)) { + stats.flag |= RX_FLAG_FAILED_FCS_CRC; + bad_frame = 1; + } else { + return -EINVAL; + } + } + + stats.freq = zd_channels[_zd_chip_get_channel(&mac->chip) - 1].center_freq; + stats.band = IEEE80211_BAND_2GHZ; + stats.signal = status->signal_strength; + + rate = zd_rx_rate(buffer, status); + + /* todo: return index in the big switches in zd_rx_rate instead */ + for (i = 0; i < mac->band.n_bitrates; i++) + if (rate == mac->band.bitrates[i].hw_value) + stats.rate_idx = i; + + length -= ZD_PLCP_HEADER_SIZE + sizeof(struct rx_status); + buffer += ZD_PLCP_HEADER_SIZE; + + /* Except for bad frames, filter each frame to see if it is an ACK, in + * which case our internal TX tracking is updated. Normally we then + * bail here as there's no need to pass ACKs on up to the stack, but + * there is also the case where the stack has requested us to pass + * control frames on up (pass_ctrl) which we must consider. */ + if (!bad_frame && + filter_ack(hw, (struct ieee80211_hdr *)buffer, &stats) + && !mac->pass_ctrl) + return 0; + + fc = get_unaligned((__le16*)buffer); + need_padding = ieee80211_is_data_qos(fc) ^ ieee80211_has_a4(fc); + + skb = dev_alloc_skb(length + (need_padding ? 2 : 0)); + if (skb == NULL) + return -ENOMEM; + if (need_padding) { + /* Make sure the the payload data is 4 byte aligned. */ + skb_reserve(skb, 2); + } + + /* FIXME : could we avoid this big memcpy ? */ + memcpy(skb_put(skb, length), buffer, length); + + memcpy(IEEE80211_SKB_RXCB(skb), &stats, sizeof(stats)); + ieee80211_rx_irqsafe(hw, skb); + return 0; +} + +static int zd_op_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct zd_mac *mac = zd_hw_mac(hw); + + /* using NL80211_IFTYPE_UNSPECIFIED to indicate no mode selected */ + if (mac->type != NL80211_IFTYPE_UNSPECIFIED) + return -EOPNOTSUPP; + + switch (vif->type) { + case NL80211_IFTYPE_MONITOR: + case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_ADHOC: + mac->type = vif->type; + break; + default: + return -EOPNOTSUPP; + } + + return zd_write_mac_addr(&mac->chip, vif->addr); +} + +static void zd_op_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct zd_mac *mac = zd_hw_mac(hw); + mac->type = NL80211_IFTYPE_UNSPECIFIED; + zd_set_beacon_interval(&mac->chip, 0); + zd_write_mac_addr(&mac->chip, NULL); +} + +static int zd_op_config(struct ieee80211_hw *hw, u32 changed) +{ + struct zd_mac *mac = zd_hw_mac(hw); + struct ieee80211_conf *conf = &hw->conf; + + return zd_chip_set_channel(&mac->chip, conf->channel->hw_value); +} + +static void zd_process_intr(struct work_struct *work) +{ + u16 int_status; + struct zd_mac *mac = container_of(work, struct zd_mac, process_intr); + + int_status = le16_to_cpu(*(__le16 *)(mac->intr_buffer+4)); + if (int_status & INT_CFG_NEXT_BCN) + dev_dbg_f_limit(zd_mac_dev(mac), "INT_CFG_NEXT_BCN\n"); + else + dev_dbg_f(zd_mac_dev(mac), "Unsupported interrupt\n"); + + zd_chip_enable_hwint(&mac->chip); +} + + +static void set_multicast_hash_handler(struct work_struct *work) +{ + struct zd_mac *mac = + container_of(work, struct zd_mac, set_multicast_hash_work); + struct zd_mc_hash hash; + + spin_lock_irq(&mac->lock); + hash = mac->multicast_hash; + spin_unlock_irq(&mac->lock); + + zd_chip_set_multicast_hash(&mac->chip, &hash); +} + +static void set_rx_filter_handler(struct work_struct *work) +{ + struct zd_mac *mac = + container_of(work, struct zd_mac, set_rx_filter_work); + int r; + + dev_dbg_f(zd_mac_dev(mac), "\n"); + r = set_rx_filter(mac); + if (r) + dev_err(zd_mac_dev(mac), "set_rx_filter_handler error %d\n", r); +} + +static u64 zd_op_prepare_multicast(struct ieee80211_hw *hw, + int mc_count, struct dev_addr_list *mclist) +{ + struct zd_mac *mac = zd_hw_mac(hw); + struct zd_mc_hash hash; + int i; + + zd_mc_clear(&hash); + + for (i = 0; i < mc_count; i++) { + if (!mclist) + break; + dev_dbg_f(zd_mac_dev(mac), "mc addr %pM\n", mclist->dmi_addr); + zd_mc_add_addr(&hash, mclist->dmi_addr); + mclist = mclist->next; + } + + return hash.low | ((u64)hash.high << 32); +} + +#define SUPPORTED_FIF_FLAGS \ + (FIF_PROMISC_IN_BSS | FIF_ALLMULTI | FIF_FCSFAIL | FIF_CONTROL | \ + FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC) +static void zd_op_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *new_flags, + u64 multicast) +{ + struct zd_mc_hash hash = { + .low = multicast, + .high = multicast >> 32, + }; + struct zd_mac *mac = zd_hw_mac(hw); + unsigned long flags; + + /* Only deal with supported flags */ + changed_flags &= SUPPORTED_FIF_FLAGS; + *new_flags &= SUPPORTED_FIF_FLAGS; + + /* + * If multicast parameter (as returned by zd_op_prepare_multicast) + * has changed, no bit in changed_flags is set. To handle this + * situation, we do not return if changed_flags is 0. If we do so, + * we will have some issue with IPv6 which uses multicast for link + * layer address resolution. + */ + if (*new_flags & (FIF_PROMISC_IN_BSS | FIF_ALLMULTI)) + zd_mc_add_all(&hash); + + spin_lock_irqsave(&mac->lock, flags); + mac->pass_failed_fcs = !!(*new_flags & FIF_FCSFAIL); + mac->pass_ctrl = !!(*new_flags & FIF_CONTROL); + mac->multicast_hash = hash; + spin_unlock_irqrestore(&mac->lock, flags); + + /* XXX: these can be called here now, can sleep now! */ + queue_work(zd_workqueue, &mac->set_multicast_hash_work); + + if (changed_flags & FIF_CONTROL) + queue_work(zd_workqueue, &mac->set_rx_filter_work); + + /* no handling required for FIF_OTHER_BSS as we don't currently + * do BSSID filtering */ + /* FIXME: in future it would be nice to enable the probe response + * filter (so that the driver doesn't see them) until + * FIF_BCN_PRBRESP_PROMISC is set. however due to atomicity here, we'd + * have to schedule work to enable prbresp reception, which might + * happen too late. For now we'll just listen and forward them all the + * time. */ +} + +static void set_rts_cts_work(struct work_struct *work) +{ + struct zd_mac *mac = + container_of(work, struct zd_mac, set_rts_cts_work); + unsigned long flags; + unsigned int short_preamble; + + mutex_lock(&mac->chip.mutex); + + spin_lock_irqsave(&mac->lock, flags); + mac->updating_rts_rate = 0; + short_preamble = mac->short_preamble; + spin_unlock_irqrestore(&mac->lock, flags); + + zd_chip_set_rts_cts_rate_locked(&mac->chip, short_preamble); + mutex_unlock(&mac->chip.mutex); +} + +static void zd_op_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changes) +{ + struct zd_mac *mac = zd_hw_mac(hw); + unsigned long flags; + int associated; + + dev_dbg_f(zd_mac_dev(mac), "changes: %x\n", changes); + + if (mac->type == NL80211_IFTYPE_MESH_POINT || + mac->type == NL80211_IFTYPE_ADHOC) { + associated = true; + if (changes & BSS_CHANGED_BEACON) { + struct sk_buff *beacon = ieee80211_beacon_get(hw, vif); + + if (beacon) { + zd_mac_config_beacon(hw, beacon); + kfree_skb(beacon); + } + } + + if (changes & BSS_CHANGED_BEACON_ENABLED) { + u32 interval; + + if (bss_conf->enable_beacon) + interval = BCN_MODE_IBSS | + bss_conf->beacon_int; + else + interval = 0; + + zd_set_beacon_interval(&mac->chip, interval); + } + } else + associated = is_valid_ether_addr(bss_conf->bssid); + + spin_lock_irq(&mac->lock); + mac->associated = associated; + spin_unlock_irq(&mac->lock); + + /* TODO: do hardware bssid filtering */ + + if (changes & BSS_CHANGED_ERP_PREAMBLE) { + spin_lock_irqsave(&mac->lock, flags); + mac->short_preamble = bss_conf->use_short_preamble; + if (!mac->updating_rts_rate) { + mac->updating_rts_rate = 1; + /* FIXME: should disable TX here, until work has + * completed and RTS_CTS reg is updated */ + queue_work(zd_workqueue, &mac->set_rts_cts_work); + } + spin_unlock_irqrestore(&mac->lock, flags); + } +} + +static u64 zd_op_get_tsf(struct ieee80211_hw *hw) +{ + struct zd_mac *mac = zd_hw_mac(hw); + return zd_chip_get_tsf(&mac->chip); +} + +static const struct ieee80211_ops zd_ops = { + .tx = zd_op_tx, + .start = zd_op_start, + .stop = zd_op_stop, + .add_interface = zd_op_add_interface, + .remove_interface = zd_op_remove_interface, + .config = zd_op_config, + .prepare_multicast = zd_op_prepare_multicast, + .configure_filter = zd_op_configure_filter, + .bss_info_changed = zd_op_bss_info_changed, + .get_tsf = zd_op_get_tsf, +}; + +struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf) +{ + struct zd_mac *mac; + struct ieee80211_hw *hw; + + hw = ieee80211_alloc_hw(sizeof(struct zd_mac), &zd_ops); + if (!hw) { + dev_dbg_f(&intf->dev, "out of memory\n"); + return NULL; + } + + mac = zd_hw_mac(hw); + + memset(mac, 0, sizeof(*mac)); + spin_lock_init(&mac->lock); + mac->hw = hw; + + mac->type = NL80211_IFTYPE_UNSPECIFIED; + + memcpy(mac->channels, zd_channels, sizeof(zd_channels)); + memcpy(mac->rates, zd_rates, sizeof(zd_rates)); + mac->band.n_bitrates = ARRAY_SIZE(zd_rates); + mac->band.bitrates = mac->rates; + mac->band.n_channels = ARRAY_SIZE(zd_channels); + mac->band.channels = mac->channels; + + hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &mac->band; + + hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | + IEEE80211_HW_SIGNAL_UNSPEC; + + hw->wiphy->interface_modes = + BIT(NL80211_IFTYPE_MESH_POINT) | + BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_ADHOC); + + hw->max_signal = 100; + hw->queues = 1; + hw->extra_tx_headroom = sizeof(struct zd_ctrlset); + + /* + * Tell mac80211 that we support multi rate retries + */ + hw->max_rates = IEEE80211_TX_MAX_RATES; + hw->max_rate_tries = 18; /* 9 rates * 2 retries/rate */ + + skb_queue_head_init(&mac->ack_wait_queue); + mac->ack_pending = 0; + + zd_chip_init(&mac->chip, hw, intf); + housekeeping_init(mac); + INIT_WORK(&mac->set_multicast_hash_work, set_multicast_hash_handler); + INIT_WORK(&mac->set_rts_cts_work, set_rts_cts_work); + INIT_WORK(&mac->set_rx_filter_work, set_rx_filter_handler); + INIT_WORK(&mac->process_intr, zd_process_intr); + + SET_IEEE80211_DEV(hw, &intf->dev); + return hw; +} + +#define LINK_LED_WORK_DELAY HZ + +static void link_led_handler(struct work_struct *work) +{ + struct zd_mac *mac = + container_of(work, struct zd_mac, housekeeping.link_led_work.work); + struct zd_chip *chip = &mac->chip; + int is_associated; + int r; + + spin_lock_irq(&mac->lock); + is_associated = mac->associated; + spin_unlock_irq(&mac->lock); + + r = zd_chip_control_leds(chip, + is_associated ? ZD_LED_ASSOCIATED : ZD_LED_SCANNING); + if (r) + dev_dbg_f(zd_mac_dev(mac), "zd_chip_control_leds error %d\n", r); + + queue_delayed_work(zd_workqueue, &mac->housekeeping.link_led_work, + LINK_LED_WORK_DELAY); +} + +static void housekeeping_init(struct zd_mac *mac) +{ + INIT_DELAYED_WORK(&mac->housekeeping.link_led_work, link_led_handler); +} + +static void housekeeping_enable(struct zd_mac *mac) +{ + dev_dbg_f(zd_mac_dev(mac), "\n"); + queue_delayed_work(zd_workqueue, &mac->housekeeping.link_led_work, + 0); +} + +static void housekeeping_disable(struct zd_mac *mac) +{ + dev_dbg_f(zd_mac_dev(mac), "\n"); + cancel_rearming_delayed_workqueue(zd_workqueue, + &mac->housekeeping.link_led_work); + zd_chip_control_leds(&mac->chip, ZD_LED_OFF); +} diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h new file mode 100644 index 0000000..630c298 --- /dev/null +++ b/drivers/net/wireless/zd1211rw/zd_mac.h @@ -0,0 +1,312 @@ +/* ZD1211 USB-WLAN driver for Linux + * + * Copyright (C) 2005-2007 Ulrich Kunitz + * Copyright (C) 2006-2007 Daniel Drake + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _ZD_MAC_H +#define _ZD_MAC_H + +#include +#include + +#include "zd_chip.h" + +struct zd_ctrlset { + u8 modulation; + __le16 tx_length; + u8 control; + /* stores only the difference to tx_length on ZD1211B */ + __le16 packet_length; + __le16 current_length; + u8 service; + __le16 next_frame_length; +} __attribute__((packed)); + +#define ZD_CS_RESERVED_SIZE 25 + +/* The field modulation of struct zd_ctrlset controls the bit rate, the use + * of short or long preambles in 802.11b (CCK mode) or the use of 802.11a or + * 802.11g in OFDM mode. + * + * The term zd-rate is used for the combination of the modulation type flag + * and the "pure" rate value. + */ +#define ZD_PURE_RATE_MASK 0x0f +#define ZD_MODULATION_TYPE_MASK 0x10 +#define ZD_RATE_MASK (ZD_PURE_RATE_MASK|ZD_MODULATION_TYPE_MASK) +#define ZD_PURE_RATE(modulation) ((modulation) & ZD_PURE_RATE_MASK) +#define ZD_MODULATION_TYPE(modulation) ((modulation) & ZD_MODULATION_TYPE_MASK) +#define ZD_RATE(modulation) ((modulation) & ZD_RATE_MASK) + +/* The two possible modulation types. Notify that 802.11b doesn't use the CCK + * codeing for the 1 and 2 MBit/s rate. We stay with the term here to remain + * consistent with uses the term at other places. + */ +#define ZD_CCK 0x00 +#define ZD_OFDM 0x10 + +/* The ZD1211 firmware uses proprietary encodings of the 802.11b (CCK) rates. + * For OFDM the PLCP rate encodings are used. We combine these "pure" rates + * with the modulation type flag and call the resulting values zd-rates. + */ +#define ZD_CCK_RATE_1M (ZD_CCK|0x00) +#define ZD_CCK_RATE_2M (ZD_CCK|0x01) +#define ZD_CCK_RATE_5_5M (ZD_CCK|0x02) +#define ZD_CCK_RATE_11M (ZD_CCK|0x03) +#define ZD_OFDM_RATE_6M (ZD_OFDM|ZD_OFDM_PLCP_RATE_6M) +#define ZD_OFDM_RATE_9M (ZD_OFDM|ZD_OFDM_PLCP_RATE_9M) +#define ZD_OFDM_RATE_12M (ZD_OFDM|ZD_OFDM_PLCP_RATE_12M) +#define ZD_OFDM_RATE_18M (ZD_OFDM|ZD_OFDM_PLCP_RATE_18M) +#define ZD_OFDM_RATE_24M (ZD_OFDM|ZD_OFDM_PLCP_RATE_24M) +#define ZD_OFDM_RATE_36M (ZD_OFDM|ZD_OFDM_PLCP_RATE_36M) +#define ZD_OFDM_RATE_48M (ZD_OFDM|ZD_OFDM_PLCP_RATE_48M) +#define ZD_OFDM_RATE_54M (ZD_OFDM|ZD_OFDM_PLCP_RATE_54M) + +/* The bit 5 of the zd_ctrlset modulation field controls the preamble in CCK + * mode or the 802.11a/802.11g selection in OFDM mode. + */ +#define ZD_CCK_PREA_LONG 0x00 +#define ZD_CCK_PREA_SHORT 0x20 +#define ZD_OFDM_MODE_11G 0x00 +#define ZD_OFDM_MODE_11A 0x20 + +/* zd_ctrlset control field */ +#define ZD_CS_NEED_RANDOM_BACKOFF 0x01 +#define ZD_CS_NO_ACK 0x02 + +#define ZD_CS_FRAME_TYPE_MASK 0x0c +#define ZD_CS_DATA_FRAME 0x00 +#define ZD_CS_PS_POLL_FRAME 0x04 +#define ZD_CS_MANAGEMENT_FRAME 0x08 +#define ZD_CS_NO_SEQUENCE_CTL_FRAME 0x0c + +#define ZD_CS_WAKE_DESTINATION 0x10 +#define ZD_CS_RTS 0x20 +#define ZD_CS_ENCRYPT 0x40 +#define ZD_CS_SELF_CTS 0x80 + +/* Incoming frames are prepended by a PLCP header */ +#define ZD_PLCP_HEADER_SIZE 5 + +struct rx_length_info { + __le16 length[3]; + __le16 tag; +} __attribute__((packed)); + +#define RX_LENGTH_INFO_TAG 0x697e + +struct rx_status { + u8 signal_quality_cck; + /* rssi */ + u8 signal_strength; + u8 signal_quality_ofdm; + u8 decryption_type; + u8 frame_status; +} __attribute__((packed)); + +/* rx_status field decryption_type */ +#define ZD_RX_NO_WEP 0 +#define ZD_RX_WEP64 1 +#define ZD_RX_TKIP 2 +#define ZD_RX_AES 4 +#define ZD_RX_WEP128 5 +#define ZD_RX_WEP256 6 + +/* rx_status field frame_status */ +#define ZD_RX_FRAME_MODULATION_MASK 0x01 +#define ZD_RX_CCK 0x00 +#define ZD_RX_OFDM 0x01 + +#define ZD_RX_TIMEOUT_ERROR 0x02 +#define ZD_RX_FIFO_OVERRUN_ERROR 0x04 +#define ZD_RX_DECRYPTION_ERROR 0x08 +#define ZD_RX_CRC32_ERROR 0x10 +#define ZD_RX_NO_ADDR1_MATCH_ERROR 0x20 +#define ZD_RX_CRC16_ERROR 0x40 +#define ZD_RX_ERROR 0x80 + +struct tx_retry_rate { + int count; /* number of valid element in rate[] array */ + int rate[10]; /* retry rates, described by an index in zd_rates[] */ +}; + +struct tx_status { + u8 type; /* must always be 0x01 : USB_INT_TYPE */ + u8 id; /* must always be 0xa0 : USB_INT_ID_RETRY_FAILED */ + u8 rate; + u8 pad; + u8 mac[ETH_ALEN]; + u8 retry; + u8 failure; +} __attribute__((packed)); + +enum mac_flags { + MAC_FIXED_CHANNEL = 0x01, +}; + +struct housekeeping { + struct delayed_work link_led_work; +}; + +#define ZD_MAC_STATS_BUFFER_SIZE 16 + +#define ZD_MAC_MAX_ACK_WAITERS 50 + +struct zd_mac { + struct zd_chip chip; + spinlock_t lock; + spinlock_t intr_lock; + struct ieee80211_hw *hw; + struct housekeeping housekeeping; + struct work_struct set_multicast_hash_work; + struct work_struct set_rts_cts_work; + struct work_struct set_rx_filter_work; + struct work_struct process_intr; + struct zd_mc_hash multicast_hash; + u8 intr_buffer[USB_MAX_EP_INT_BUFFER]; + u8 regdomain; + u8 default_regdomain; + int type; + int associated; + struct sk_buff_head ack_wait_queue; + struct ieee80211_channel channels[14]; + struct ieee80211_rate rates[12]; + struct ieee80211_supported_band band; + + /* Short preamble (used for RTS/CTS) */ + unsigned int short_preamble:1; + + /* flags to indicate update in progress */ + unsigned int updating_rts_rate:1; + + /* whether to pass frames with CRC errors to stack */ + unsigned int pass_failed_fcs:1; + + /* whether to pass control frames to stack */ + unsigned int pass_ctrl:1; + + /* whether we have received a 802.11 ACK that is pending */ + unsigned int ack_pending:1; + + /* signal strength of the last 802.11 ACK received */ + int ack_signal; +}; + +#define ZD_REGDOMAIN_FCC 0x10 +#define ZD_REGDOMAIN_IC 0x20 +#define ZD_REGDOMAIN_ETSI 0x30 +#define ZD_REGDOMAIN_SPAIN 0x31 +#define ZD_REGDOMAIN_FRANCE 0x32 +#define ZD_REGDOMAIN_JAPAN_ADD 0x40 +#define ZD_REGDOMAIN_JAPAN 0x41 + +enum { + MIN_CHANNEL24 = 1, + MAX_CHANNEL24 = 14, +}; + +#define ZD_PLCP_SERVICE_LENGTH_EXTENSION 0x80 + +struct ofdm_plcp_header { + u8 prefix[3]; + __le16 service; +} __attribute__((packed)); + +static inline u8 zd_ofdm_plcp_header_rate(const struct ofdm_plcp_header *header) +{ + return header->prefix[0] & 0xf; +} + +/* The following defines give the encoding of the 4-bit rate field in the + * OFDM (802.11a/802.11g) PLCP header. Notify that these values are used to + * define the zd-rate values for OFDM. + * + * See the struct zd_ctrlset definition in zd_mac.h. + */ +#define ZD_OFDM_PLCP_RATE_6M 0xb +#define ZD_OFDM_PLCP_RATE_9M 0xf +#define ZD_OFDM_PLCP_RATE_12M 0xa +#define ZD_OFDM_PLCP_RATE_18M 0xe +#define ZD_OFDM_PLCP_RATE_24M 0x9 +#define ZD_OFDM_PLCP_RATE_36M 0xd +#define ZD_OFDM_PLCP_RATE_48M 0x8 +#define ZD_OFDM_PLCP_RATE_54M 0xc + +struct cck_plcp_header { + u8 signal; + u8 service; + __le16 length; + __le16 crc16; +} __attribute__((packed)); + +static inline u8 zd_cck_plcp_header_signal(const struct cck_plcp_header *header) +{ + return header->signal; +} + +/* These defines give the encodings of the signal field in the 802.11b PLCP + * header. The signal field gives the bit rate of the following packet. Even + * if technically wrong we use CCK here also for the 1 MBit/s and 2 MBit/s + * rate to stay consistent with Zydas and our use of the term. + * + * Notify that these values are *not* used in the zd-rates. + */ +#define ZD_CCK_PLCP_SIGNAL_1M 0x0a +#define ZD_CCK_PLCP_SIGNAL_2M 0x14 +#define ZD_CCK_PLCP_SIGNAL_5M5 0x37 +#define ZD_CCK_PLCP_SIGNAL_11M 0x6e + +static inline struct zd_mac *zd_hw_mac(struct ieee80211_hw *hw) +{ + return hw->priv; +} + +static inline struct zd_mac *zd_chip_to_mac(struct zd_chip *chip) +{ + return container_of(chip, struct zd_mac, chip); +} + +static inline struct zd_mac *zd_usb_to_mac(struct zd_usb *usb) +{ + return zd_chip_to_mac(zd_usb_to_chip(usb)); +} + +static inline u8 *zd_mac_get_perm_addr(struct zd_mac *mac) +{ + return mac->hw->wiphy->perm_addr; +} + +#define zd_mac_dev(mac) (zd_chip_dev(&(mac)->chip)) + +struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf); +void zd_mac_clear(struct zd_mac *mac); + +int zd_mac_preinit_hw(struct ieee80211_hw *hw); +int zd_mac_init_hw(struct ieee80211_hw *hw); + +int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length); +void zd_mac_tx_failed(struct urb *urb); +void zd_mac_tx_to_dev(struct sk_buff *skb, int error); + +#ifdef DEBUG +void zd_dump_rx_status(const struct rx_status *status); +#else +#define zd_dump_rx_status(status) +#endif /* DEBUG */ + +#endif /* _ZD_MAC_H */ diff --git a/drivers/net/wireless/zd1211rw/zd_rf.c b/drivers/net/wireless/zd1211rw/zd_rf.c new file mode 100644 index 0000000..c875ee0 --- /dev/null +++ b/drivers/net/wireless/zd1211rw/zd_rf.c @@ -0,0 +1,182 @@ +/* ZD1211 USB-WLAN driver for Linux + * + * Copyright (C) 2005-2007 Ulrich Kunitz + * Copyright (C) 2006-2007 Daniel Drake + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include + +#include "zd_def.h" +#include "zd_rf.h" +#include "zd_mac.h" +#include "zd_chip.h" + +static const char * const rfs[] = { + [0] = "unknown RF0", + [1] = "unknown RF1", + [UW2451_RF] = "UW2451_RF", + [UCHIP_RF] = "UCHIP_RF", + [AL2230_RF] = "AL2230_RF", + [AL7230B_RF] = "AL7230B_RF", + [THETA_RF] = "THETA_RF", + [AL2210_RF] = "AL2210_RF", + [MAXIM_NEW_RF] = "MAXIM_NEW_RF", + [UW2453_RF] = "UW2453_RF", + [AL2230S_RF] = "AL2230S_RF", + [RALINK_RF] = "RALINK_RF", + [INTERSIL_RF] = "INTERSIL_RF", + [RF2959_RF] = "RF2959_RF", + [MAXIM_NEW2_RF] = "MAXIM_NEW2_RF", + [PHILIPS_RF] = "PHILIPS_RF", +}; + +const char *zd_rf_name(u8 type) +{ + if (type & 0xf0) + type = 0; + return rfs[type]; +} + +void zd_rf_init(struct zd_rf *rf) +{ + memset(rf, 0, sizeof(*rf)); + + /* default to update channel integration, as almost all RF's do want + * this */ + rf->update_channel_int = 1; +} + +void zd_rf_clear(struct zd_rf *rf) +{ + if (rf->clear) + rf->clear(rf); + ZD_MEMCLEAR(rf, sizeof(*rf)); +} + +int zd_rf_init_hw(struct zd_rf *rf, u8 type) +{ + int r = 0; + int t; + struct zd_chip *chip = zd_rf_to_chip(rf); + + ZD_ASSERT(mutex_is_locked(&chip->mutex)); + switch (type) { + case RF2959_RF: + r = zd_rf_init_rf2959(rf); + break; + case AL2230_RF: + case AL2230S_RF: + r = zd_rf_init_al2230(rf); + break; + case AL7230B_RF: + r = zd_rf_init_al7230b(rf); + break; + case MAXIM_NEW_RF: + case UW2453_RF: + r = zd_rf_init_uw2453(rf); + break; + default: + dev_err(zd_chip_dev(chip), + "RF %s %#x is not supported\n", zd_rf_name(type), type); + rf->type = 0; + return -ENODEV; + } + + if (r) + return r; + + rf->type = type; + + r = zd_chip_lock_phy_regs(chip); + if (r) + return r; + t = rf->init_hw(rf); + r = zd_chip_unlock_phy_regs(chip); + if (t) + r = t; + return r; +} + +int zd_rf_scnprint_id(struct zd_rf *rf, char *buffer, size_t size) +{ + return scnprintf(buffer, size, "%s", zd_rf_name(rf->type)); +} + +int zd_rf_set_channel(struct zd_rf *rf, u8 channel) +{ + int r; + + ZD_ASSERT(mutex_is_locked(&zd_rf_to_chip(rf)->mutex)); + if (channel < MIN_CHANNEL24) + return -EINVAL; + if (channel > MAX_CHANNEL24) + return -EINVAL; + dev_dbg_f(zd_chip_dev(zd_rf_to_chip(rf)), "channel: %d\n", channel); + + r = rf->set_channel(rf, channel); + if (r >= 0) + rf->channel = channel; + return r; +} + +int zd_switch_radio_on(struct zd_rf *rf) +{ + int r, t; + struct zd_chip *chip = zd_rf_to_chip(rf); + + ZD_ASSERT(mutex_is_locked(&chip->mutex)); + r = zd_chip_lock_phy_regs(chip); + if (r) + return r; + t = rf->switch_radio_on(rf); + r = zd_chip_unlock_phy_regs(chip); + if (t) + r = t; + return r; +} + +int zd_switch_radio_off(struct zd_rf *rf) +{ + int r, t; + struct zd_chip *chip = zd_rf_to_chip(rf); + + /* TODO: move phy regs handling to zd_chip */ + ZD_ASSERT(mutex_is_locked(&chip->mutex)); + r = zd_chip_lock_phy_regs(chip); + if (r) + return r; + t = rf->switch_radio_off(rf); + r = zd_chip_unlock_phy_regs(chip); + if (t) + r = t; + return r; +} + +int zd_rf_patch_6m_band_edge(struct zd_rf *rf, u8 channel) +{ + if (!rf->patch_6m_band_edge) + return 0; + + return rf->patch_6m_band_edge(rf, channel); +} + +int zd_rf_generic_patch_6m(struct zd_rf *rf, u8 channel) +{ + return zd_chip_generic_patch_6m_band(zd_rf_to_chip(rf), channel); +} + diff --git a/drivers/net/wireless/zd1211rw/zd_rf.h b/drivers/net/wireless/zd1211rw/zd_rf.h new file mode 100644 index 0000000..79dc103 --- /dev/null +++ b/drivers/net/wireless/zd1211rw/zd_rf.h @@ -0,0 +1,111 @@ +/* ZD1211 USB-WLAN driver for Linux + * + * Copyright (C) 2005-2007 Ulrich Kunitz + * Copyright (C) 2006-2007 Daniel Drake + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _ZD_RF_H +#define _ZD_RF_H + +#define UW2451_RF 0x2 +#define UCHIP_RF 0x3 +#define AL2230_RF 0x4 +#define AL7230B_RF 0x5 /* a,b,g */ +#define THETA_RF 0x6 +#define AL2210_RF 0x7 +#define MAXIM_NEW_RF 0x8 +#define UW2453_RF 0x9 +#define AL2230S_RF 0xa +#define RALINK_RF 0xb +#define INTERSIL_RF 0xc +#define RF2959_RF 0xd +#define MAXIM_NEW2_RF 0xe +#define PHILIPS_RF 0xf + +#define RF_CHANNEL(ch) [(ch)-1] + +/* Provides functions of the RF transceiver. */ + +enum { + RF_REG_BITS = 6, + RF_VALUE_BITS = 18, + RF_RV_BITS = RF_REG_BITS + RF_VALUE_BITS, +}; + +struct zd_rf { + u8 type; + + u8 channel; + + /* whether channel integration and calibration should be updated + * defaults to 1 (yes) */ + u8 update_channel_int:1; + + /* whether CR47 should be patched from the EEPROM, if the appropriate + * flag is set in the POD. The vendor driver suggests that this should + * be done for all RF's, but a bug in their code prevents but their + * HW_OverWritePhyRegFromE2P() routine from ever taking effect. */ + u8 patch_cck_gain:1; + + /* private RF driver data */ + void *priv; + + /* RF-specific functions */ + int (*init_hw)(struct zd_rf *rf); + int (*set_channel)(struct zd_rf *rf, u8 channel); + int (*switch_radio_on)(struct zd_rf *rf); + int (*switch_radio_off)(struct zd_rf *rf); + int (*patch_6m_band_edge)(struct zd_rf *rf, u8 channel); + void (*clear)(struct zd_rf *rf); +}; + +const char *zd_rf_name(u8 type); +void zd_rf_init(struct zd_rf *rf); +void zd_rf_clear(struct zd_rf *rf); +int zd_rf_init_hw(struct zd_rf *rf, u8 type); + +int zd_rf_scnprint_id(struct zd_rf *rf, char *buffer, size_t size); + +int zd_rf_set_channel(struct zd_rf *rf, u8 channel); + +int zd_switch_radio_on(struct zd_rf *rf); +int zd_switch_radio_off(struct zd_rf *rf); + +int zd_rf_patch_6m_band_edge(struct zd_rf *rf, u8 channel); +int zd_rf_generic_patch_6m(struct zd_rf *rf, u8 channel); + +static inline int zd_rf_should_update_pwr_int(struct zd_rf *rf) +{ + return rf->update_channel_int; +} + +static inline int zd_rf_should_patch_cck_gain(struct zd_rf *rf) +{ + return rf->patch_cck_gain; +} + +int zd_rf_patch_6m_band_edge(struct zd_rf *rf, u8 channel); +int zd_rf_generic_patch_6m(struct zd_rf *rf, u8 channel); + +/* Functions for individual RF chips */ + +int zd_rf_init_rf2959(struct zd_rf *rf); +int zd_rf_init_al2230(struct zd_rf *rf); +int zd_rf_init_al7230b(struct zd_rf *rf); +int zd_rf_init_uw2453(struct zd_rf *rf); + +#endif /* _ZD_RF_H */ diff --git a/drivers/net/wireless/zd1211rw/zd_rf_al2230.c b/drivers/net/wireless/zd1211rw/zd_rf_al2230.c new file mode 100644 index 0000000..74a8f7a --- /dev/null +++ b/drivers/net/wireless/zd1211rw/zd_rf_al2230.c @@ -0,0 +1,442 @@ +/* ZD1211 USB-WLAN driver for Linux + * + * Copyright (C) 2005-2007 Ulrich Kunitz + * Copyright (C) 2006-2007 Daniel Drake + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include + +#include "zd_rf.h" +#include "zd_usb.h" +#include "zd_chip.h" + +#define IS_AL2230S(chip) ((chip)->al2230s_bit || (chip)->rf.type == AL2230S_RF) + +static const u32 zd1211_al2230_table[][3] = { + RF_CHANNEL( 1) = { 0x03f790, 0x033331, 0x00000d, }, + RF_CHANNEL( 2) = { 0x03f790, 0x0b3331, 0x00000d, }, + RF_CHANNEL( 3) = { 0x03e790, 0x033331, 0x00000d, }, + RF_CHANNEL( 4) = { 0x03e790, 0x0b3331, 0x00000d, }, + RF_CHANNEL( 5) = { 0x03f7a0, 0x033331, 0x00000d, }, + RF_CHANNEL( 6) = { 0x03f7a0, 0x0b3331, 0x00000d, }, + RF_CHANNEL( 7) = { 0x03e7a0, 0x033331, 0x00000d, }, + RF_CHANNEL( 8) = { 0x03e7a0, 0x0b3331, 0x00000d, }, + RF_CHANNEL( 9) = { 0x03f7b0, 0x033331, 0x00000d, }, + RF_CHANNEL(10) = { 0x03f7b0, 0x0b3331, 0x00000d, }, + RF_CHANNEL(11) = { 0x03e7b0, 0x033331, 0x00000d, }, + RF_CHANNEL(12) = { 0x03e7b0, 0x0b3331, 0x00000d, }, + RF_CHANNEL(13) = { 0x03f7c0, 0x033331, 0x00000d, }, + RF_CHANNEL(14) = { 0x03e7c0, 0x066661, 0x00000d, }, +}; + +static const u32 zd1211b_al2230_table[][3] = { + RF_CHANNEL( 1) = { 0x09efc0, 0x8cccc0, 0xb00000, }, + RF_CHANNEL( 2) = { 0x09efc0, 0x8cccd0, 0xb00000, }, + RF_CHANNEL( 3) = { 0x09e7c0, 0x8cccc0, 0xb00000, }, + RF_CHANNEL( 4) = { 0x09e7c0, 0x8cccd0, 0xb00000, }, + RF_CHANNEL( 5) = { 0x05efc0, 0x8cccc0, 0xb00000, }, + RF_CHANNEL( 6) = { 0x05efc0, 0x8cccd0, 0xb00000, }, + RF_CHANNEL( 7) = { 0x05e7c0, 0x8cccc0, 0xb00000, }, + RF_CHANNEL( 8) = { 0x05e7c0, 0x8cccd0, 0xb00000, }, + RF_CHANNEL( 9) = { 0x0defc0, 0x8cccc0, 0xb00000, }, + RF_CHANNEL(10) = { 0x0defc0, 0x8cccd0, 0xb00000, }, + RF_CHANNEL(11) = { 0x0de7c0, 0x8cccc0, 0xb00000, }, + RF_CHANNEL(12) = { 0x0de7c0, 0x8cccd0, 0xb00000, }, + RF_CHANNEL(13) = { 0x03efc0, 0x8cccc0, 0xb00000, }, + RF_CHANNEL(14) = { 0x03e7c0, 0x866660, 0xb00000, }, +}; + +static const struct zd_ioreq16 zd1211b_ioreqs_shared_1[] = { + { CR240, 0x57 }, { CR9, 0xe0 }, +}; + +static const struct zd_ioreq16 ioreqs_init_al2230s[] = { + { CR47, 0x1e }, /* MARK_002 */ + { CR106, 0x22 }, + { CR107, 0x2a }, /* MARK_002 */ + { CR109, 0x13 }, /* MARK_002 */ + { CR118, 0xf8 }, /* MARK_002 */ + { CR119, 0x12 }, { CR122, 0xe0 }, + { CR128, 0x10 }, /* MARK_001 from 0xe->0x10 */ + { CR129, 0x0e }, /* MARK_001 from 0xd->0x0e */ + { CR130, 0x10 }, /* MARK_001 from 0xb->0x0d */ +}; + +static int zd1211b_al2230_finalize_rf(struct zd_chip *chip) +{ + int r; + static const struct zd_ioreq16 ioreqs[] = { + { CR80, 0x30 }, { CR81, 0x30 }, { CR79, 0x58 }, + { CR12, 0xf0 }, { CR77, 0x1b }, { CR78, 0x58 }, + { CR203, 0x06 }, + { }, + + { CR240, 0x80 }, + }; + + r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); + if (r) + return r; + + /* related to antenna selection? */ + if (chip->new_phy_layout) { + r = zd_iowrite16_locked(chip, 0xe1, CR9); + if (r) + return r; + } + + return zd_iowrite16_locked(chip, 0x06, CR203); +} + +static int zd1211_al2230_init_hw(struct zd_rf *rf) +{ + int r; + struct zd_chip *chip = zd_rf_to_chip(rf); + + static const struct zd_ioreq16 ioreqs_init[] = { + { CR15, 0x20 }, { CR23, 0x40 }, { CR24, 0x20 }, + { CR26, 0x11 }, { CR28, 0x3e }, { CR29, 0x00 }, + { CR44, 0x33 }, { CR106, 0x2a }, { CR107, 0x1a }, + { CR109, 0x09 }, { CR110, 0x27 }, { CR111, 0x2b }, + { CR112, 0x2b }, { CR119, 0x0a }, { CR10, 0x89 }, + /* for newest (3rd cut) AL2300 */ + { CR17, 0x28 }, + { CR26, 0x93 }, { CR34, 0x30 }, + /* for newest (3rd cut) AL2300 */ + { CR35, 0x3e }, + { CR41, 0x24 }, { CR44, 0x32 }, + /* for newest (3rd cut) AL2300 */ + { CR46, 0x96 }, + { CR47, 0x1e }, { CR79, 0x58 }, { CR80, 0x30 }, + { CR81, 0x30 }, { CR87, 0x0a }, { CR89, 0x04 }, + { CR92, 0x0a }, { CR99, 0x28 }, { CR100, 0x00 }, + { CR101, 0x13 }, { CR102, 0x27 }, { CR106, 0x24 }, + { CR107, 0x2a }, { CR109, 0x09 }, { CR110, 0x13 }, + { CR111, 0x1f }, { CR112, 0x1f }, { CR113, 0x27 }, + { CR114, 0x27 }, + /* for newest (3rd cut) AL2300 */ + { CR115, 0x24 }, + { CR116, 0x24 }, { CR117, 0xf4 }, { CR118, 0xfc }, + { CR119, 0x10 }, { CR120, 0x4f }, { CR121, 0x77 }, + { CR122, 0xe0 }, { CR137, 0x88 }, { CR252, 0xff }, + { CR253, 0xff }, + }; + + static const struct zd_ioreq16 ioreqs_pll[] = { + /* shdnb(PLL_ON)=0 */ + { CR251, 0x2f }, + /* shdnb(PLL_ON)=1 */ + { CR251, 0x3f }, + { CR138, 0x28 }, { CR203, 0x06 }, + }; + + static const u32 rv1[] = { + /* Channel 1 */ + 0x03f790, + 0x033331, + 0x00000d, + + 0x0b3331, + 0x03b812, + 0x00fff3, + }; + + static const u32 rv2[] = { + 0x000da4, + 0x0f4dc5, /* fix freq shift, 0x04edc5 */ + 0x0805b6, + 0x011687, + 0x000688, + 0x0403b9, /* external control TX power (CR31) */ + 0x00dbba, + 0x00099b, + 0x0bdffc, + 0x00000d, + 0x00500f, + }; + + static const u32 rv3[] = { + 0x00d00f, + 0x004c0f, + 0x00540f, + 0x00700f, + 0x00500f, + }; + + r = zd_iowrite16a_locked(chip, ioreqs_init, ARRAY_SIZE(ioreqs_init)); + if (r) + return r; + + if (IS_AL2230S(chip)) { + r = zd_iowrite16a_locked(chip, ioreqs_init_al2230s, + ARRAY_SIZE(ioreqs_init_al2230s)); + if (r) + return r; + } + + r = zd_rfwritev_locked(chip, rv1, ARRAY_SIZE(rv1), RF_RV_BITS); + if (r) + return r; + + /* improve band edge for AL2230S */ + if (IS_AL2230S(chip)) + r = zd_rfwrite_locked(chip, 0x000824, RF_RV_BITS); + else + r = zd_rfwrite_locked(chip, 0x0005a4, RF_RV_BITS); + if (r) + return r; + + r = zd_rfwritev_locked(chip, rv2, ARRAY_SIZE(rv2), RF_RV_BITS); + if (r) + return r; + + r = zd_iowrite16a_locked(chip, ioreqs_pll, ARRAY_SIZE(ioreqs_pll)); + if (r) + return r; + + r = zd_rfwritev_locked(chip, rv3, ARRAY_SIZE(rv3), RF_RV_BITS); + if (r) + return r; + + return 0; +} + +static int zd1211b_al2230_init_hw(struct zd_rf *rf) +{ + int r; + struct zd_chip *chip = zd_rf_to_chip(rf); + + static const struct zd_ioreq16 ioreqs1[] = { + { CR10, 0x89 }, { CR15, 0x20 }, + { CR17, 0x2B }, /* for newest(3rd cut) AL2230 */ + { CR23, 0x40 }, { CR24, 0x20 }, { CR26, 0x93 }, + { CR28, 0x3e }, { CR29, 0x00 }, + { CR33, 0x28 }, /* 5621 */ + { CR34, 0x30 }, + { CR35, 0x3e }, /* for newest(3rd cut) AL2230 */ + { CR41, 0x24 }, { CR44, 0x32 }, + { CR46, 0x99 }, /* for newest(3rd cut) AL2230 */ + { CR47, 0x1e }, + + /* ZD1211B 05.06.10 */ + { CR48, 0x06 }, { CR49, 0xf9 }, { CR51, 0x01 }, + { CR52, 0x80 }, { CR53, 0x7e }, { CR65, 0x00 }, + { CR66, 0x00 }, { CR67, 0x00 }, { CR68, 0x00 }, + { CR69, 0x28 }, + + { CR79, 0x58 }, { CR80, 0x30 }, { CR81, 0x30 }, + { CR87, 0x0a }, { CR89, 0x04 }, + { CR91, 0x00 }, /* 5621 */ + { CR92, 0x0a }, + { CR98, 0x8d }, /* 4804, for 1212 new algorithm */ + { CR99, 0x00 }, /* 5621 */ + { CR101, 0x13 }, { CR102, 0x27 }, + { CR106, 0x24 }, /* for newest(3rd cut) AL2230 */ + { CR107, 0x2a }, + { CR109, 0x13 }, /* 4804, for 1212 new algorithm */ + { CR110, 0x1f }, /* 4804, for 1212 new algorithm */ + { CR111, 0x1f }, { CR112, 0x1f }, { CR113, 0x27 }, + { CR114, 0x27 }, + { CR115, 0x26 }, /* 24->26 at 4902 for newest(3rd cut) AL2230 */ + { CR116, 0x24 }, + { CR117, 0xfa }, /* for 1211b */ + { CR118, 0xfa }, /* for 1211b */ + { CR119, 0x10 }, + { CR120, 0x4f }, + { CR121, 0x6c }, /* for 1211b */ + { CR122, 0xfc }, /* E0->FC at 4902 */ + { CR123, 0x57 }, /* 5623 */ + { CR125, 0xad }, /* 4804, for 1212 new algorithm */ + { CR126, 0x6c }, /* 5614 */ + { CR127, 0x03 }, /* 4804, for 1212 new algorithm */ + { CR137, 0x50 }, /* 5614 */ + { CR138, 0xa8 }, + { CR144, 0xac }, /* 5621 */ + { CR150, 0x0d }, { CR252, 0x34 }, { CR253, 0x34 }, + }; + + static const u32 rv1[] = { + 0x8cccd0, + 0x481dc0, + 0xcfff00, + 0x25a000, + }; + + static const u32 rv2[] = { + /* To improve AL2230 yield, improve phase noise, 4713 */ + 0x25a000, + 0xa3b2f0, + + 0x6da010, /* Reg6 update for MP versio */ + 0xe36280, /* Modified by jxiao for Bor-Chin on 2004/08/02 */ + 0x116000, + 0x9dc020, /* External control TX power (CR31) */ + 0x5ddb00, /* RegA update for MP version */ + 0xd99000, /* RegB update for MP version */ + 0x3ffbd0, /* RegC update for MP version */ + 0xb00000, /* RegD update for MP version */ + + /* improve phase noise and remove phase calibration,4713 */ + 0xf01a00, + }; + + static const struct zd_ioreq16 ioreqs2[] = { + { CR251, 0x2f }, /* shdnb(PLL_ON)=0 */ + { CR251, 0x7f }, /* shdnb(PLL_ON)=1 */ + }; + + static const u32 rv3[] = { + /* To improve AL2230 yield, 4713 */ + 0xf01b00, + 0xf01e00, + 0xf01a00, + }; + + static const struct zd_ioreq16 ioreqs3[] = { + /* related to 6M band edge patching, happens unconditionally */ + { CR128, 0x14 }, { CR129, 0x12 }, { CR130, 0x10 }, + }; + + r = zd_iowrite16a_locked(chip, zd1211b_ioreqs_shared_1, + ARRAY_SIZE(zd1211b_ioreqs_shared_1)); + if (r) + return r; + r = zd_iowrite16a_locked(chip, ioreqs1, ARRAY_SIZE(ioreqs1)); + if (r) + return r; + + if (IS_AL2230S(chip)) { + r = zd_iowrite16a_locked(chip, ioreqs_init_al2230s, + ARRAY_SIZE(ioreqs_init_al2230s)); + if (r) + return r; + } + + r = zd_rfwritev_cr_locked(chip, zd1211b_al2230_table[0], 3); + if (r) + return r; + r = zd_rfwritev_cr_locked(chip, rv1, ARRAY_SIZE(rv1)); + if (r) + return r; + + if (IS_AL2230S(chip)) + r = zd_rfwrite_locked(chip, 0x241000, RF_RV_BITS); + else + r = zd_rfwrite_locked(chip, 0x25a000, RF_RV_BITS); + if (r) + return r; + + r = zd_rfwritev_cr_locked(chip, rv2, ARRAY_SIZE(rv2)); + if (r) + return r; + r = zd_iowrite16a_locked(chip, ioreqs2, ARRAY_SIZE(ioreqs2)); + if (r) + return r; + r = zd_rfwritev_cr_locked(chip, rv3, ARRAY_SIZE(rv3)); + if (r) + return r; + r = zd_iowrite16a_locked(chip, ioreqs3, ARRAY_SIZE(ioreqs3)); + if (r) + return r; + return zd1211b_al2230_finalize_rf(chip); +} + +static int zd1211_al2230_set_channel(struct zd_rf *rf, u8 channel) +{ + int r; + const u32 *rv = zd1211_al2230_table[channel-1]; + struct zd_chip *chip = zd_rf_to_chip(rf); + static const struct zd_ioreq16 ioreqs[] = { + { CR138, 0x28 }, + { CR203, 0x06 }, + }; + + r = zd_rfwritev_locked(chip, rv, 3, RF_RV_BITS); + if (r) + return r; + return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); +} + +static int zd1211b_al2230_set_channel(struct zd_rf *rf, u8 channel) +{ + int r; + const u32 *rv = zd1211b_al2230_table[channel-1]; + struct zd_chip *chip = zd_rf_to_chip(rf); + + r = zd_iowrite16a_locked(chip, zd1211b_ioreqs_shared_1, + ARRAY_SIZE(zd1211b_ioreqs_shared_1)); + if (r) + return r; + + r = zd_rfwritev_cr_locked(chip, rv, 3); + if (r) + return r; + + return zd1211b_al2230_finalize_rf(chip); +} + +static int zd1211_al2230_switch_radio_on(struct zd_rf *rf) +{ + struct zd_chip *chip = zd_rf_to_chip(rf); + static const struct zd_ioreq16 ioreqs[] = { + { CR11, 0x00 }, + { CR251, 0x3f }, + }; + + return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); +} + +static int zd1211b_al2230_switch_radio_on(struct zd_rf *rf) +{ + struct zd_chip *chip = zd_rf_to_chip(rf); + static const struct zd_ioreq16 ioreqs[] = { + { CR11, 0x00 }, + { CR251, 0x7f }, + }; + + return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); +} + +static int al2230_switch_radio_off(struct zd_rf *rf) +{ + struct zd_chip *chip = zd_rf_to_chip(rf); + static const struct zd_ioreq16 ioreqs[] = { + { CR11, 0x04 }, + { CR251, 0x2f }, + }; + + return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); +} + +int zd_rf_init_al2230(struct zd_rf *rf) +{ + struct zd_chip *chip = zd_rf_to_chip(rf); + + rf->switch_radio_off = al2230_switch_radio_off; + if (zd_chip_is_zd1211b(chip)) { + rf->init_hw = zd1211b_al2230_init_hw; + rf->set_channel = zd1211b_al2230_set_channel; + rf->switch_radio_on = zd1211b_al2230_switch_radio_on; + } else { + rf->init_hw = zd1211_al2230_init_hw; + rf->set_channel = zd1211_al2230_set_channel; + rf->switch_radio_on = zd1211_al2230_switch_radio_on; + } + rf->patch_6m_band_edge = zd_rf_generic_patch_6m; + rf->patch_cck_gain = 1; + return 0; +} diff --git a/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c b/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c new file mode 100644 index 0000000..65095d6 --- /dev/null +++ b/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c @@ -0,0 +1,495 @@ +/* ZD1211 USB-WLAN driver for Linux + * + * Copyright (C) 2005-2007 Ulrich Kunitz + * Copyright (C) 2006-2007 Daniel Drake + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include + +#include "zd_rf.h" +#include "zd_usb.h" +#include "zd_chip.h" + +static const u32 chan_rv[][2] = { + RF_CHANNEL( 1) = { 0x09ec00, 0x8cccc8 }, + RF_CHANNEL( 2) = { 0x09ec00, 0x8cccd8 }, + RF_CHANNEL( 3) = { 0x09ec00, 0x8cccc0 }, + RF_CHANNEL( 4) = { 0x09ec00, 0x8cccd0 }, + RF_CHANNEL( 5) = { 0x05ec00, 0x8cccc8 }, + RF_CHANNEL( 6) = { 0x05ec00, 0x8cccd8 }, + RF_CHANNEL( 7) = { 0x05ec00, 0x8cccc0 }, + RF_CHANNEL( 8) = { 0x05ec00, 0x8cccd0 }, + RF_CHANNEL( 9) = { 0x0dec00, 0x8cccc8 }, + RF_CHANNEL(10) = { 0x0dec00, 0x8cccd8 }, + RF_CHANNEL(11) = { 0x0dec00, 0x8cccc0 }, + RF_CHANNEL(12) = { 0x0dec00, 0x8cccd0 }, + RF_CHANNEL(13) = { 0x03ec00, 0x8cccc8 }, + RF_CHANNEL(14) = { 0x03ec00, 0x866660 }, +}; + +static const u32 std_rv[] = { + 0x4ff821, + 0xc5fbfc, + 0x21ebfe, + 0xafd401, /* freq shift 0xaad401 */ + 0x6cf56a, + 0xe04073, + 0x193d76, + 0x9dd844, + 0x500007, + 0xd8c010, +}; + +static const u32 rv_init1[] = { + 0x3c9000, + 0xbfffff, + 0x700000, + 0xf15d58, +}; + +static const u32 rv_init2[] = { + 0xf15d59, + 0xf15d5c, + 0xf15d58, +}; + +static const struct zd_ioreq16 ioreqs_sw[] = { + { CR128, 0x14 }, { CR129, 0x12 }, { CR130, 0x10 }, + { CR38, 0x38 }, { CR136, 0xdf }, +}; + +static int zd1211b_al7230b_finalize(struct zd_chip *chip) +{ + int r; + static const struct zd_ioreq16 ioreqs[] = { + { CR80, 0x30 }, { CR81, 0x30 }, { CR79, 0x58 }, + { CR12, 0xf0 }, { CR77, 0x1b }, { CR78, 0x58 }, + { CR203, 0x04 }, + { }, + { CR240, 0x80 }, + }; + + r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); + if (r) + return r; + + if (chip->new_phy_layout) { + /* antenna selection? */ + r = zd_iowrite16_locked(chip, 0xe5, CR9); + if (r) + return r; + } + + return zd_iowrite16_locked(chip, 0x04, CR203); +} + +static int zd1211_al7230b_init_hw(struct zd_rf *rf) +{ + int r; + struct zd_chip *chip = zd_rf_to_chip(rf); + + /* All of these writes are identical to AL2230 unless otherwise + * specified */ + static const struct zd_ioreq16 ioreqs_1[] = { + /* This one is 7230-specific, and happens before the rest */ + { CR240, 0x57 }, + { }, + + { CR15, 0x20 }, { CR23, 0x40 }, { CR24, 0x20 }, + { CR26, 0x11 }, { CR28, 0x3e }, { CR29, 0x00 }, + { CR44, 0x33 }, + /* This value is different for 7230 (was: 0x2a) */ + { CR106, 0x22 }, + { CR107, 0x1a }, { CR109, 0x09 }, { CR110, 0x27 }, + { CR111, 0x2b }, { CR112, 0x2b }, { CR119, 0x0a }, + /* This happened further down in AL2230, + * and the value changed (was: 0xe0) */ + { CR122, 0xfc }, + { CR10, 0x89 }, + /* for newest (3rd cut) AL2300 */ + { CR17, 0x28 }, + { CR26, 0x93 }, { CR34, 0x30 }, + /* for newest (3rd cut) AL2300 */ + { CR35, 0x3e }, + { CR41, 0x24 }, { CR44, 0x32 }, + /* for newest (3rd cut) AL2300 */ + { CR46, 0x96 }, + { CR47, 0x1e }, { CR79, 0x58 }, { CR80, 0x30 }, + { CR81, 0x30 }, { CR87, 0x0a }, { CR89, 0x04 }, + { CR92, 0x0a }, { CR99, 0x28 }, + /* This value is different for 7230 (was: 0x00) */ + { CR100, 0x02 }, + { CR101, 0x13 }, { CR102, 0x27 }, + /* This value is different for 7230 (was: 0x24) */ + { CR106, 0x22 }, + /* This value is different for 7230 (was: 0x2a) */ + { CR107, 0x3f }, + { CR109, 0x09 }, + /* This value is different for 7230 (was: 0x13) */ + { CR110, 0x1f }, + { CR111, 0x1f }, { CR112, 0x1f }, { CR113, 0x27 }, + { CR114, 0x27 }, + /* for newest (3rd cut) AL2300 */ + { CR115, 0x24 }, + /* This value is different for 7230 (was: 0x24) */ + { CR116, 0x3f }, + /* This value is different for 7230 (was: 0xf4) */ + { CR117, 0xfa }, + { CR118, 0xfc }, { CR119, 0x10 }, { CR120, 0x4f }, + { CR121, 0x77 }, { CR137, 0x88 }, + /* This one is 7230-specific */ + { CR138, 0xa8 }, + /* This value is different for 7230 (was: 0xff) */ + { CR252, 0x34 }, + /* This value is different for 7230 (was: 0xff) */ + { CR253, 0x34 }, + + /* PLL_OFF */ + { CR251, 0x2f }, + }; + + static const struct zd_ioreq16 ioreqs_2[] = { + { CR251, 0x3f }, /* PLL_ON */ + { CR128, 0x14 }, { CR129, 0x12 }, { CR130, 0x10 }, + { CR38, 0x38 }, { CR136, 0xdf }, + }; + + r = zd_iowrite16a_locked(chip, ioreqs_1, ARRAY_SIZE(ioreqs_1)); + if (r) + return r; + + r = zd_rfwritev_cr_locked(chip, chan_rv[0], ARRAY_SIZE(chan_rv[0])); + if (r) + return r; + + r = zd_rfwritev_cr_locked(chip, std_rv, ARRAY_SIZE(std_rv)); + if (r) + return r; + + r = zd_rfwritev_cr_locked(chip, rv_init1, ARRAY_SIZE(rv_init1)); + if (r) + return r; + + r = zd_iowrite16a_locked(chip, ioreqs_2, ARRAY_SIZE(ioreqs_2)); + if (r) + return r; + + r = zd_rfwritev_cr_locked(chip, rv_init2, ARRAY_SIZE(rv_init2)); + if (r) + return r; + + r = zd_iowrite16_locked(chip, 0x06, CR203); + if (r) + return r; + r = zd_iowrite16_locked(chip, 0x80, CR240); + if (r) + return r; + + return 0; +} + +static int zd1211b_al7230b_init_hw(struct zd_rf *rf) +{ + int r; + struct zd_chip *chip = zd_rf_to_chip(rf); + + static const struct zd_ioreq16 ioreqs_1[] = { + { CR240, 0x57 }, { CR9, 0x9 }, + { }, + { CR10, 0x8b }, { CR15, 0x20 }, + { CR17, 0x2B }, /* for newest (3rd cut) AL2230 */ + { CR20, 0x10 }, /* 4N25->Stone Request */ + { CR23, 0x40 }, { CR24, 0x20 }, { CR26, 0x93 }, + { CR28, 0x3e }, { CR29, 0x00 }, + { CR33, 0x28 }, /* 5613 */ + { CR34, 0x30 }, + { CR35, 0x3e }, /* for newest (3rd cut) AL2230 */ + { CR41, 0x24 }, { CR44, 0x32 }, + { CR46, 0x99 }, /* for newest (3rd cut) AL2230 */ + { CR47, 0x1e }, + + /* ZD1215 5610 */ + { CR48, 0x00 }, { CR49, 0x00 }, { CR51, 0x01 }, + { CR52, 0x80 }, { CR53, 0x7e }, { CR65, 0x00 }, + { CR66, 0x00 }, { CR67, 0x00 }, { CR68, 0x00 }, + { CR69, 0x28 }, + + { CR79, 0x58 }, { CR80, 0x30 }, { CR81, 0x30 }, + { CR87, 0x0A }, { CR89, 0x04 }, + { CR90, 0x58 }, /* 5112 */ + { CR91, 0x00 }, /* 5613 */ + { CR92, 0x0a }, + { CR98, 0x8d }, /* 4804, for 1212 new algorithm */ + { CR99, 0x00 }, { CR100, 0x02 }, { CR101, 0x13 }, + { CR102, 0x27 }, + { CR106, 0x20 }, /* change to 0x24 for AL7230B */ + { CR109, 0x13 }, /* 4804, for 1212 new algorithm */ + { CR112, 0x1f }, + }; + + static const struct zd_ioreq16 ioreqs_new_phy[] = { + { CR107, 0x28 }, + { CR110, 0x1f }, /* 5127, 0x13->0x1f */ + { CR111, 0x1f }, /* 0x13 to 0x1f for AL7230B */ + { CR116, 0x2a }, { CR118, 0xfa }, { CR119, 0x12 }, + { CR121, 0x6c }, /* 5613 */ + }; + + static const struct zd_ioreq16 ioreqs_old_phy[] = { + { CR107, 0x24 }, + { CR110, 0x13 }, /* 5127, 0x13->0x1f */ + { CR111, 0x13 }, /* 0x13 to 0x1f for AL7230B */ + { CR116, 0x24 }, { CR118, 0xfc }, { CR119, 0x11 }, + { CR121, 0x6a }, /* 5613 */ + }; + + static const struct zd_ioreq16 ioreqs_2[] = { + { CR113, 0x27 }, { CR114, 0x27 }, { CR115, 0x24 }, + { CR117, 0xfa }, { CR120, 0x4f }, + { CR122, 0xfc }, /* E0->FCh at 4901 */ + { CR123, 0x57 }, /* 5613 */ + { CR125, 0xad }, /* 4804, for 1212 new algorithm */ + { CR126, 0x6c }, /* 5613 */ + { CR127, 0x03 }, /* 4804, for 1212 new algorithm */ + { CR130, 0x10 }, + { CR131, 0x00 }, /* 5112 */ + { CR137, 0x50 }, /* 5613 */ + { CR138, 0xa8 }, /* 5112 */ + { CR144, 0xac }, /* 5613 */ + { CR148, 0x40 }, /* 5112 */ + { CR149, 0x40 }, /* 4O07, 50->40 */ + { CR150, 0x1a }, /* 5112, 0C->1A */ + { CR252, 0x34 }, { CR253, 0x34 }, + { CR251, 0x2f }, /* PLL_OFF */ + }; + + static const struct zd_ioreq16 ioreqs_3[] = { + { CR251, 0x7f }, /* PLL_ON */ + { CR128, 0x14 }, { CR129, 0x12 }, { CR130, 0x10 }, + { CR38, 0x38 }, { CR136, 0xdf }, + }; + + r = zd_iowrite16a_locked(chip, ioreqs_1, ARRAY_SIZE(ioreqs_1)); + if (r) + return r; + + if (chip->new_phy_layout) + r = zd_iowrite16a_locked(chip, ioreqs_new_phy, + ARRAY_SIZE(ioreqs_new_phy)); + else + r = zd_iowrite16a_locked(chip, ioreqs_old_phy, + ARRAY_SIZE(ioreqs_old_phy)); + if (r) + return r; + + r = zd_iowrite16a_locked(chip, ioreqs_2, ARRAY_SIZE(ioreqs_2)); + if (r) + return r; + + r = zd_rfwritev_cr_locked(chip, chan_rv[0], ARRAY_SIZE(chan_rv[0])); + if (r) + return r; + + r = zd_rfwritev_cr_locked(chip, std_rv, ARRAY_SIZE(std_rv)); + if (r) + return r; + + r = zd_rfwritev_cr_locked(chip, rv_init1, ARRAY_SIZE(rv_init1)); + if (r) + return r; + + r = zd_iowrite16a_locked(chip, ioreqs_3, ARRAY_SIZE(ioreqs_3)); + if (r) + return r; + + r = zd_rfwritev_cr_locked(chip, rv_init2, ARRAY_SIZE(rv_init2)); + if (r) + return r; + + return zd1211b_al7230b_finalize(chip); +} + +static int zd1211_al7230b_set_channel(struct zd_rf *rf, u8 channel) +{ + int r; + const u32 *rv = chan_rv[channel-1]; + struct zd_chip *chip = zd_rf_to_chip(rf); + + static const struct zd_ioreq16 ioreqs[] = { + /* PLL_ON */ + { CR251, 0x3f }, + { CR203, 0x06 }, { CR240, 0x08 }, + }; + + r = zd_iowrite16_locked(chip, 0x57, CR240); + if (r) + return r; + + /* PLL_OFF */ + r = zd_iowrite16_locked(chip, 0x2f, CR251); + if (r) + return r; + + r = zd_rfwritev_cr_locked(chip, std_rv, ARRAY_SIZE(std_rv)); + if (r) + return r; + + r = zd_rfwrite_cr_locked(chip, 0x3c9000); + if (r) + return r; + r = zd_rfwrite_cr_locked(chip, 0xf15d58); + if (r) + return r; + + r = zd_iowrite16a_locked(chip, ioreqs_sw, ARRAY_SIZE(ioreqs_sw)); + if (r) + return r; + + r = zd_rfwritev_cr_locked(chip, rv, 2); + if (r) + return r; + + r = zd_rfwrite_cr_locked(chip, 0x3c9000); + if (r) + return r; + + return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); +} + +static int zd1211b_al7230b_set_channel(struct zd_rf *rf, u8 channel) +{ + int r; + const u32 *rv = chan_rv[channel-1]; + struct zd_chip *chip = zd_rf_to_chip(rf); + + r = zd_iowrite16_locked(chip, 0x57, CR240); + if (r) + return r; + r = zd_iowrite16_locked(chip, 0xe4, CR9); + if (r) + return r; + + /* PLL_OFF */ + r = zd_iowrite16_locked(chip, 0x2f, CR251); + if (r) + return r; + r = zd_rfwritev_cr_locked(chip, std_rv, ARRAY_SIZE(std_rv)); + if (r) + return r; + + r = zd_rfwrite_cr_locked(chip, 0x3c9000); + if (r) + return r; + r = zd_rfwrite_cr_locked(chip, 0xf15d58); + if (r) + return r; + + r = zd_iowrite16a_locked(chip, ioreqs_sw, ARRAY_SIZE(ioreqs_sw)); + if (r) + return r; + + r = zd_rfwritev_cr_locked(chip, rv, 2); + if (r) + return r; + + r = zd_rfwrite_cr_locked(chip, 0x3c9000); + if (r) + return r; + + r = zd_iowrite16_locked(chip, 0x7f, CR251); + if (r) + return r; + + return zd1211b_al7230b_finalize(chip); +} + +static int zd1211_al7230b_switch_radio_on(struct zd_rf *rf) +{ + struct zd_chip *chip = zd_rf_to_chip(rf); + static const struct zd_ioreq16 ioreqs[] = { + { CR11, 0x00 }, + { CR251, 0x3f }, + }; + + return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); +} + +static int zd1211b_al7230b_switch_radio_on(struct zd_rf *rf) +{ + struct zd_chip *chip = zd_rf_to_chip(rf); + static const struct zd_ioreq16 ioreqs[] = { + { CR11, 0x00 }, + { CR251, 0x7f }, + }; + + return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); +} + +static int al7230b_switch_radio_off(struct zd_rf *rf) +{ + struct zd_chip *chip = zd_rf_to_chip(rf); + static const struct zd_ioreq16 ioreqs[] = { + { CR11, 0x04 }, + { CR251, 0x2f }, + }; + + return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); +} + +/* ZD1211B+AL7230B 6m band edge patching differs slightly from other + * configurations */ +static int zd1211b_al7230b_patch_6m(struct zd_rf *rf, u8 channel) +{ + struct zd_chip *chip = zd_rf_to_chip(rf); + struct zd_ioreq16 ioreqs[] = { + { CR128, 0x14 }, { CR129, 0x12 }, + }; + + /* FIXME: Channel 11 is not the edge for all regulatory domains. */ + if (channel == 1) { + ioreqs[0].value = 0x0e; + ioreqs[1].value = 0x10; + } else if (channel == 11) { + ioreqs[0].value = 0x10; + ioreqs[1].value = 0x10; + } + + dev_dbg_f(zd_chip_dev(chip), "patching for channel %d\n", channel); + return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); +} + +int zd_rf_init_al7230b(struct zd_rf *rf) +{ + struct zd_chip *chip = zd_rf_to_chip(rf); + + if (zd_chip_is_zd1211b(chip)) { + rf->init_hw = zd1211b_al7230b_init_hw; + rf->switch_radio_on = zd1211b_al7230b_switch_radio_on; + rf->set_channel = zd1211b_al7230b_set_channel; + rf->patch_6m_band_edge = zd1211b_al7230b_patch_6m; + } else { + rf->init_hw = zd1211_al7230b_init_hw; + rf->switch_radio_on = zd1211_al7230b_switch_radio_on; + rf->set_channel = zd1211_al7230b_set_channel; + rf->patch_6m_band_edge = zd_rf_generic_patch_6m; + rf->patch_cck_gain = 1; + } + + rf->switch_radio_off = al7230b_switch_radio_off; + + return 0; +} diff --git a/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c b/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c new file mode 100644 index 0000000..0597d86 --- /dev/null +++ b/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c @@ -0,0 +1,282 @@ +/* ZD1211 USB-WLAN driver for Linux + * + * Copyright (C) 2005-2007 Ulrich Kunitz + * Copyright (C) 2006-2007 Daniel Drake + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include + +#include "zd_rf.h" +#include "zd_usb.h" +#include "zd_chip.h" + +static const u32 rf2959_table[][2] = { + RF_CHANNEL( 1) = { 0x181979, 0x1e6666 }, + RF_CHANNEL( 2) = { 0x181989, 0x1e6666 }, + RF_CHANNEL( 3) = { 0x181999, 0x1e6666 }, + RF_CHANNEL( 4) = { 0x1819a9, 0x1e6666 }, + RF_CHANNEL( 5) = { 0x1819b9, 0x1e6666 }, + RF_CHANNEL( 6) = { 0x1819c9, 0x1e6666 }, + RF_CHANNEL( 7) = { 0x1819d9, 0x1e6666 }, + RF_CHANNEL( 8) = { 0x1819e9, 0x1e6666 }, + RF_CHANNEL( 9) = { 0x1819f9, 0x1e6666 }, + RF_CHANNEL(10) = { 0x181a09, 0x1e6666 }, + RF_CHANNEL(11) = { 0x181a19, 0x1e6666 }, + RF_CHANNEL(12) = { 0x181a29, 0x1e6666 }, + RF_CHANNEL(13) = { 0x181a39, 0x1e6666 }, + RF_CHANNEL(14) = { 0x181a60, 0x1c0000 }, +}; + +#if 0 +static int bits(u32 rw, int from, int to) +{ + rw &= ~(0xffffffffU << (to+1)); + rw >>= from; + return rw; +} + +static int bit(u32 rw, int bit) +{ + return bits(rw, bit, bit); +} + +static void dump_regwrite(u32 rw) +{ + int reg = bits(rw, 18, 22); + int rw_flag = bits(rw, 23, 23); + PDEBUG("rf2959 %#010x reg %d rw %d", rw, reg, rw_flag); + + switch (reg) { + case 0: + PDEBUG("reg0 CFG1 ref_sel %d hybernate %d rf_vco_reg_en %d" + " if_vco_reg_en %d if_vga_en %d", + bits(rw, 14, 15), bit(rw, 3), bit(rw, 2), bit(rw, 1), + bit(rw, 0)); + break; + case 1: + PDEBUG("reg1 IFPLL1 pll_en1 %d kv_en1 %d vtc_en1 %d lpf1 %d" + " cpl1 %d pdp1 %d autocal_en1 %d ld_en1 %d ifloopr %d" + " ifloopc %d dac1 %d", + bit(rw, 17), bit(rw, 16), bit(rw, 15), bit(rw, 14), + bit(rw, 13), bit(rw, 12), bit(rw, 11), bit(rw, 10), + bits(rw, 7, 9), bits(rw, 4, 6), bits(rw, 0, 3)); + break; + case 2: + PDEBUG("reg2 IFPLL2 n1 %d num1 %d", + bits(rw, 6, 17), bits(rw, 0, 5)); + break; + case 3: + PDEBUG("reg3 IFPLL3 num %d", bits(rw, 0, 17)); + break; + case 4: + PDEBUG("reg4 IFPLL4 dn1 %#04x ct_def1 %d kv_def1 %d", + bits(rw, 8, 16), bits(rw, 4, 7), bits(rw, 0, 3)); + break; + case 5: + PDEBUG("reg5 RFPLL1 pll_en %d kv_en %d vtc_en %d lpf %d cpl %d" + " pdp %d autocal_en %d ld_en %d rfloopr %d rfloopc %d" + " dac %d", + bit(rw, 17), bit(rw, 16), bit(rw, 15), bit(rw, 14), + bit(rw, 13), bit(rw, 12), bit(rw, 11), bit(rw, 10), + bits(rw, 7, 9), bits(rw, 4, 6), bits(rw, 0,3)); + break; + case 6: + PDEBUG("reg6 RFPLL2 n %d num %d", + bits(rw, 6, 17), bits(rw, 0, 5)); + break; + case 7: + PDEBUG("reg7 RFPLL3 num2 %d", bits(rw, 0, 17)); + break; + case 8: + PDEBUG("reg8 RFPLL4 dn %#06x ct_def %d kv_def %d", + bits(rw, 8, 16), bits(rw, 4, 7), bits(rw, 0, 3)); + break; + case 9: + PDEBUG("reg9 CAL1 tvco %d tlock %d m_ct_value %d ld_window %d", + bits(rw, 13, 17), bits(rw, 8, 12), bits(rw, 3, 7), + bits(rw, 0, 2)); + break; + case 10: + PDEBUG("reg10 TXRX1 rxdcfbbyps %d pcontrol %d txvgc %d" + " rxlpfbw %d txlpfbw %d txdiffmode %d txenmode %d" + " intbiasen %d tybypass %d", + bit(rw, 17), bits(rw, 15, 16), bits(rw, 10, 14), + bits(rw, 7, 9), bits(rw, 4, 6), bit(rw, 3), bit(rw, 2), + bit(rw, 1), bit(rw, 0)); + break; + case 11: + PDEBUG("reg11 PCNT1 mid_bias %d p_desired %d pc_offset %d" + " tx_delay %d", + bits(rw, 15, 17), bits(rw, 9, 14), bits(rw, 3, 8), + bits(rw, 0, 2)); + break; + case 12: + PDEBUG("reg12 PCNT2 max_power %d mid_power %d min_power %d", + bits(rw, 12, 17), bits(rw, 6, 11), bits(rw, 0, 5)); + break; + case 13: + PDEBUG("reg13 VCOT1 rfpll vco comp %d ifpll vco comp %d" + " lobias %d if_biasbuf %d if_biasvco %d rf_biasbuf %d" + " rf_biasvco %d", + bit(rw, 17), bit(rw, 16), bit(rw, 15), + bits(rw, 8, 9), bits(rw, 5, 7), bits(rw, 3, 4), + bits(rw, 0, 2)); + break; + case 14: + PDEBUG("reg14 IQCAL rx_acal %d rx_pcal %d" + " tx_acal %d tx_pcal %d", + bits(rw, 13, 17), bits(rw, 9, 12), bits(rw, 4, 8), + bits(rw, 0, 3)); + break; + } +} +#endif /* 0 */ + +static int rf2959_init_hw(struct zd_rf *rf) +{ + int r; + struct zd_chip *chip = zd_rf_to_chip(rf); + + static const struct zd_ioreq16 ioreqs[] = { + { CR2, 0x1E }, { CR9, 0x20 }, { CR10, 0x89 }, + { CR11, 0x00 }, { CR15, 0xD0 }, { CR17, 0x68 }, + { CR19, 0x4a }, { CR20, 0x0c }, { CR21, 0x0E }, + { CR23, 0x48 }, + /* normal size for cca threshold */ + { CR24, 0x14 }, + /* { CR24, 0x20 }, */ + { CR26, 0x90 }, { CR27, 0x30 }, { CR29, 0x20 }, + { CR31, 0xb2 }, { CR32, 0x43 }, { CR33, 0x28 }, + { CR38, 0x30 }, { CR34, 0x0f }, { CR35, 0xF0 }, + { CR41, 0x2a }, { CR46, 0x7F }, { CR47, 0x1E }, + { CR51, 0xc5 }, { CR52, 0xc5 }, { CR53, 0xc5 }, + { CR79, 0x58 }, { CR80, 0x30 }, { CR81, 0x30 }, + { CR82, 0x00 }, { CR83, 0x24 }, { CR84, 0x04 }, + { CR85, 0x00 }, { CR86, 0x10 }, { CR87, 0x2A }, + { CR88, 0x10 }, { CR89, 0x24 }, { CR90, 0x18 }, + /* { CR91, 0x18 }, */ + /* should solve continous CTS frame problems */ + { CR91, 0x00 }, + { CR92, 0x0a }, { CR93, 0x00 }, { CR94, 0x01 }, + { CR95, 0x00 }, { CR96, 0x40 }, { CR97, 0x37 }, + { CR98, 0x05 }, { CR99, 0x28 }, { CR100, 0x00 }, + { CR101, 0x13 }, { CR102, 0x27 }, { CR103, 0x27 }, + { CR104, 0x18 }, { CR105, 0x12 }, + /* normal size */ + { CR106, 0x1a }, + /* { CR106, 0x22 }, */ + { CR107, 0x24 }, { CR108, 0x0a }, { CR109, 0x13 }, + { CR110, 0x2F }, { CR111, 0x27 }, { CR112, 0x27 }, + { CR113, 0x27 }, { CR114, 0x27 }, { CR115, 0x40 }, + { CR116, 0x40 }, { CR117, 0xF0 }, { CR118, 0xF0 }, + { CR119, 0x16 }, + /* no TX continuation */ + { CR122, 0x00 }, + /* { CR122, 0xff }, */ + { CR127, 0x03 }, { CR131, 0x08 }, { CR138, 0x28 }, + { CR148, 0x44 }, { CR150, 0x10 }, { CR169, 0xBB }, + { CR170, 0xBB }, + }; + + static const u32 rv[] = { + 0x000007, /* REG0(CFG1) */ + 0x07dd43, /* REG1(IFPLL1) */ + 0x080959, /* REG2(IFPLL2) */ + 0x0e6666, + 0x116a57, /* REG4 */ + 0x17dd43, /* REG5 */ + 0x1819f9, /* REG6 */ + 0x1e6666, + 0x214554, + 0x25e7fa, + 0x27fffa, + /* The Zydas driver somehow forgets to set this value. It's + * only set for Japan. We are using internal power control + * for now. + */ + 0x294128, /* internal power */ + /* 0x28252c, */ /* External control TX power */ + /* CR31_CCK, CR51_6-36M, CR52_48M, CR53_54M */ + 0x2c0000, + 0x300000, + 0x340000, /* REG13(0xD) */ + 0x381e0f, /* REG14(0xE) */ + /* Bogus, RF2959's data sheet doesn't know register 27, which is + * actually referenced here. The commented 0x11 is 17. + */ + 0x6c180f, /* REG27(0x11) */ + }; + + r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); + if (r) + return r; + + return zd_rfwritev_locked(chip, rv, ARRAY_SIZE(rv), RF_RV_BITS); +} + +static int rf2959_set_channel(struct zd_rf *rf, u8 channel) +{ + int i, r; + const u32 *rv = rf2959_table[channel-1]; + struct zd_chip *chip = zd_rf_to_chip(rf); + + for (i = 0; i < 2; i++) { + r = zd_rfwrite_locked(chip, rv[i], RF_RV_BITS); + if (r) + return r; + } + return 0; +} + +static int rf2959_switch_radio_on(struct zd_rf *rf) +{ + static const struct zd_ioreq16 ioreqs[] = { + { CR10, 0x89 }, + { CR11, 0x00 }, + }; + struct zd_chip *chip = zd_rf_to_chip(rf); + + return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); +} + +static int rf2959_switch_radio_off(struct zd_rf *rf) +{ + static const struct zd_ioreq16 ioreqs[] = { + { CR10, 0x15 }, + { CR11, 0x81 }, + }; + struct zd_chip *chip = zd_rf_to_chip(rf); + + return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); +} + +int zd_rf_init_rf2959(struct zd_rf *rf) +{ + struct zd_chip *chip = zd_rf_to_chip(rf); + + if (zd_chip_is_zd1211b(chip)) { + dev_err(zd_chip_dev(chip), + "RF2959 is currently not supported for ZD1211B" + " devices\n"); + return -ENODEV; + } + rf->init_hw = rf2959_init_hw; + rf->set_channel = rf2959_set_channel; + rf->switch_radio_on = rf2959_switch_radio_on; + rf->switch_radio_off = rf2959_switch_radio_off; + return 0; +} diff --git a/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c b/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c new file mode 100644 index 0000000..439799b --- /dev/null +++ b/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c @@ -0,0 +1,537 @@ +/* ZD1211 USB-WLAN driver for Linux + * + * Copyright (C) 2005-2007 Ulrich Kunitz + * Copyright (C) 2006-2007 Daniel Drake + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include + +#include "zd_rf.h" +#include "zd_usb.h" +#include "zd_chip.h" + +/* This RF programming code is based upon the code found in v2.16.0.0 of the + * ZyDAS vendor driver. Unlike other RF's, Ubec publish full technical specs + * for this RF on their website, so we're able to understand more than + * usual as to what is going on. Thumbs up for Ubec for doing that. */ + +/* The 3-wire serial interface provides access to 8 write-only registers. + * The data format is a 4 bit register address followed by a 20 bit value. */ +#define UW2453_REGWRITE(reg, val) ((((reg) & 0xf) << 20) | ((val) & 0xfffff)) + +/* For channel tuning, we have to configure registers 1 (synthesizer), 2 (synth + * fractional divide ratio) and 3 (VCO config). + * + * We configure the RF to produce an interrupt when the PLL is locked onto + * the configured frequency. During initialization, we run through a variety + * of different VCO configurations on channel 1 until we detect a PLL lock. + * When this happens, we remember which VCO configuration produced the lock + * and use it later. Actually, we use the configuration *after* the one that + * produced the lock, which seems odd, but it works. + * + * If we do not see a PLL lock on any standard VCO config, we fall back on an + * autocal configuration, which has a fixed (as opposed to per-channel) VCO + * config and different synth values from the standard set (divide ratio + * is still shared with the standard set). */ + +/* The per-channel synth values for all standard VCO configurations. These get + * written to register 1. */ +static const u8 uw2453_std_synth[] = { + RF_CHANNEL( 1) = 0x47, + RF_CHANNEL( 2) = 0x47, + RF_CHANNEL( 3) = 0x67, + RF_CHANNEL( 4) = 0x67, + RF_CHANNEL( 5) = 0x67, + RF_CHANNEL( 6) = 0x67, + RF_CHANNEL( 7) = 0x57, + RF_CHANNEL( 8) = 0x57, + RF_CHANNEL( 9) = 0x57, + RF_CHANNEL(10) = 0x57, + RF_CHANNEL(11) = 0x77, + RF_CHANNEL(12) = 0x77, + RF_CHANNEL(13) = 0x77, + RF_CHANNEL(14) = 0x4f, +}; + +/* This table stores the synthesizer fractional divide ratio for *all* VCO + * configurations (both standard and autocal). These get written to register 2. + */ +static const u16 uw2453_synth_divide[] = { + RF_CHANNEL( 1) = 0x999, + RF_CHANNEL( 2) = 0x99b, + RF_CHANNEL( 3) = 0x998, + RF_CHANNEL( 4) = 0x99a, + RF_CHANNEL( 5) = 0x999, + RF_CHANNEL( 6) = 0x99b, + RF_CHANNEL( 7) = 0x998, + RF_CHANNEL( 8) = 0x99a, + RF_CHANNEL( 9) = 0x999, + RF_CHANNEL(10) = 0x99b, + RF_CHANNEL(11) = 0x998, + RF_CHANNEL(12) = 0x99a, + RF_CHANNEL(13) = 0x999, + RF_CHANNEL(14) = 0xccc, +}; + +/* Here is the data for all the standard VCO configurations. We shrink our + * table a little by observing that both channels in a consecutive pair share + * the same value. We also observe that the high 4 bits ([0:3] in the specs) + * are all 'Reserved' and are always set to 0x4 - we chop them off in the data + * below. */ +#define CHAN_TO_PAIRIDX(a) ((a - 1) / 2) +#define RF_CHANPAIR(a,b) [CHAN_TO_PAIRIDX(a)] +static const u16 uw2453_std_vco_cfg[][7] = { + { /* table 1 */ + RF_CHANPAIR( 1, 2) = 0x664d, + RF_CHANPAIR( 3, 4) = 0x604d, + RF_CHANPAIR( 5, 6) = 0x6675, + RF_CHANPAIR( 7, 8) = 0x6475, + RF_CHANPAIR( 9, 10) = 0x6655, + RF_CHANPAIR(11, 12) = 0x6455, + RF_CHANPAIR(13, 14) = 0x6665, + }, + { /* table 2 */ + RF_CHANPAIR( 1, 2) = 0x666d, + RF_CHANPAIR( 3, 4) = 0x606d, + RF_CHANPAIR( 5, 6) = 0x664d, + RF_CHANPAIR( 7, 8) = 0x644d, + RF_CHANPAIR( 9, 10) = 0x6675, + RF_CHANPAIR(11, 12) = 0x6475, + RF_CHANPAIR(13, 14) = 0x6655, + }, + { /* table 3 */ + RF_CHANPAIR( 1, 2) = 0x665d, + RF_CHANPAIR( 3, 4) = 0x605d, + RF_CHANPAIR( 5, 6) = 0x666d, + RF_CHANPAIR( 7, 8) = 0x646d, + RF_CHANPAIR( 9, 10) = 0x664d, + RF_CHANPAIR(11, 12) = 0x644d, + RF_CHANPAIR(13, 14) = 0x6675, + }, + { /* table 4 */ + RF_CHANPAIR( 1, 2) = 0x667d, + RF_CHANPAIR( 3, 4) = 0x607d, + RF_CHANPAIR( 5, 6) = 0x665d, + RF_CHANPAIR( 7, 8) = 0x645d, + RF_CHANPAIR( 9, 10) = 0x666d, + RF_CHANPAIR(11, 12) = 0x646d, + RF_CHANPAIR(13, 14) = 0x664d, + }, + { /* table 5 */ + RF_CHANPAIR( 1, 2) = 0x6643, + RF_CHANPAIR( 3, 4) = 0x6043, + RF_CHANPAIR( 5, 6) = 0x667d, + RF_CHANPAIR( 7, 8) = 0x647d, + RF_CHANPAIR( 9, 10) = 0x665d, + RF_CHANPAIR(11, 12) = 0x645d, + RF_CHANPAIR(13, 14) = 0x666d, + }, + { /* table 6 */ + RF_CHANPAIR( 1, 2) = 0x6663, + RF_CHANPAIR( 3, 4) = 0x6063, + RF_CHANPAIR( 5, 6) = 0x6643, + RF_CHANPAIR( 7, 8) = 0x6443, + RF_CHANPAIR( 9, 10) = 0x667d, + RF_CHANPAIR(11, 12) = 0x647d, + RF_CHANPAIR(13, 14) = 0x665d, + }, + { /* table 7 */ + RF_CHANPAIR( 1, 2) = 0x6653, + RF_CHANPAIR( 3, 4) = 0x6053, + RF_CHANPAIR( 5, 6) = 0x6663, + RF_CHANPAIR( 7, 8) = 0x6463, + RF_CHANPAIR( 9, 10) = 0x6643, + RF_CHANPAIR(11, 12) = 0x6443, + RF_CHANPAIR(13, 14) = 0x667d, + }, + { /* table 8 */ + RF_CHANPAIR( 1, 2) = 0x6673, + RF_CHANPAIR( 3, 4) = 0x6073, + RF_CHANPAIR( 5, 6) = 0x6653, + RF_CHANPAIR( 7, 8) = 0x6453, + RF_CHANPAIR( 9, 10) = 0x6663, + RF_CHANPAIR(11, 12) = 0x6463, + RF_CHANPAIR(13, 14) = 0x6643, + }, + { /* table 9 */ + RF_CHANPAIR( 1, 2) = 0x664b, + RF_CHANPAIR( 3, 4) = 0x604b, + RF_CHANPAIR( 5, 6) = 0x6673, + RF_CHANPAIR( 7, 8) = 0x6473, + RF_CHANPAIR( 9, 10) = 0x6653, + RF_CHANPAIR(11, 12) = 0x6453, + RF_CHANPAIR(13, 14) = 0x6663, + }, + { /* table 10 */ + RF_CHANPAIR( 1, 2) = 0x666b, + RF_CHANPAIR( 3, 4) = 0x606b, + RF_CHANPAIR( 5, 6) = 0x664b, + RF_CHANPAIR( 7, 8) = 0x644b, + RF_CHANPAIR( 9, 10) = 0x6673, + RF_CHANPAIR(11, 12) = 0x6473, + RF_CHANPAIR(13, 14) = 0x6653, + }, + { /* table 11 */ + RF_CHANPAIR( 1, 2) = 0x665b, + RF_CHANPAIR( 3, 4) = 0x605b, + RF_CHANPAIR( 5, 6) = 0x666b, + RF_CHANPAIR( 7, 8) = 0x646b, + RF_CHANPAIR( 9, 10) = 0x664b, + RF_CHANPAIR(11, 12) = 0x644b, + RF_CHANPAIR(13, 14) = 0x6673, + }, + +}; + +/* The per-channel synth values for autocal. These get written to register 1. */ +static const u16 uw2453_autocal_synth[] = { + RF_CHANNEL( 1) = 0x6847, + RF_CHANNEL( 2) = 0x6847, + RF_CHANNEL( 3) = 0x6867, + RF_CHANNEL( 4) = 0x6867, + RF_CHANNEL( 5) = 0x6867, + RF_CHANNEL( 6) = 0x6867, + RF_CHANNEL( 7) = 0x6857, + RF_CHANNEL( 8) = 0x6857, + RF_CHANNEL( 9) = 0x6857, + RF_CHANNEL(10) = 0x6857, + RF_CHANNEL(11) = 0x6877, + RF_CHANNEL(12) = 0x6877, + RF_CHANNEL(13) = 0x6877, + RF_CHANNEL(14) = 0x684f, +}; + +/* The VCO configuration for autocal (all channels) */ +static const u16 UW2453_AUTOCAL_VCO_CFG = 0x6662; + +/* TX gain settings. The array index corresponds to the TX power integration + * values found in the EEPROM. The values get written to register 7. */ +static u32 uw2453_txgain[] = { + [0x00] = 0x0e313, + [0x01] = 0x0fb13, + [0x02] = 0x0e093, + [0x03] = 0x0f893, + [0x04] = 0x0ea93, + [0x05] = 0x1f093, + [0x06] = 0x1f493, + [0x07] = 0x1f693, + [0x08] = 0x1f393, + [0x09] = 0x1f35b, + [0x0a] = 0x1e6db, + [0x0b] = 0x1ff3f, + [0x0c] = 0x1ffff, + [0x0d] = 0x361d7, + [0x0e] = 0x37fbf, + [0x0f] = 0x3ff8b, + [0x10] = 0x3ff33, + [0x11] = 0x3fb3f, + [0x12] = 0x3ffff, +}; + +/* RF-specific structure */ +struct uw2453_priv { + /* index into synth/VCO config tables where PLL lock was found + * -1 means autocal */ + int config; +}; + +#define UW2453_PRIV(rf) ((struct uw2453_priv *) (rf)->priv) + +static int uw2453_synth_set_channel(struct zd_chip *chip, int channel, + bool autocal) +{ + int r; + int idx = channel - 1; + u32 val; + + if (autocal) + val = UW2453_REGWRITE(1, uw2453_autocal_synth[idx]); + else + val = UW2453_REGWRITE(1, uw2453_std_synth[idx]); + + r = zd_rfwrite_locked(chip, val, RF_RV_BITS); + if (r) + return r; + + return zd_rfwrite_locked(chip, + UW2453_REGWRITE(2, uw2453_synth_divide[idx]), RF_RV_BITS); +} + +static int uw2453_write_vco_cfg(struct zd_chip *chip, u16 value) +{ + /* vendor driver always sets these upper bits even though the specs say + * they are reserved */ + u32 val = 0x40000 | value; + return zd_rfwrite_locked(chip, UW2453_REGWRITE(3, val), RF_RV_BITS); +} + +static int uw2453_init_mode(struct zd_chip *chip) +{ + static const u32 rv[] = { + UW2453_REGWRITE(0, 0x25f98), /* enter IDLE mode */ + UW2453_REGWRITE(0, 0x25f9a), /* enter CAL_VCO mode */ + UW2453_REGWRITE(0, 0x25f94), /* enter RX/TX mode */ + UW2453_REGWRITE(0, 0x27fd4), /* power down RSSI circuit */ + }; + + return zd_rfwritev_locked(chip, rv, ARRAY_SIZE(rv), RF_RV_BITS); +} + +static int uw2453_set_tx_gain_level(struct zd_chip *chip, int channel) +{ + u8 int_value = chip->pwr_int_values[channel - 1]; + + if (int_value >= ARRAY_SIZE(uw2453_txgain)) { + dev_dbg_f(zd_chip_dev(chip), "can't configure TX gain for " + "int value %x on channel %d\n", int_value, channel); + return 0; + } + + return zd_rfwrite_locked(chip, + UW2453_REGWRITE(7, uw2453_txgain[int_value]), RF_RV_BITS); +} + +static int uw2453_init_hw(struct zd_rf *rf) +{ + int i, r; + int found_config = -1; + u16 intr_status; + struct zd_chip *chip = zd_rf_to_chip(rf); + + static const struct zd_ioreq16 ioreqs[] = { + { CR10, 0x89 }, { CR15, 0x20 }, + { CR17, 0x28 }, /* 6112 no change */ + { CR23, 0x38 }, { CR24, 0x20 }, { CR26, 0x93 }, + { CR27, 0x15 }, { CR28, 0x3e }, { CR29, 0x00 }, + { CR33, 0x28 }, { CR34, 0x30 }, + { CR35, 0x43 }, /* 6112 3e->43 */ + { CR41, 0x24 }, { CR44, 0x32 }, + { CR46, 0x92 }, /* 6112 96->92 */ + { CR47, 0x1e }, + { CR48, 0x04 }, /* 5602 Roger */ + { CR49, 0xfa }, { CR79, 0x58 }, { CR80, 0x30 }, + { CR81, 0x30 }, { CR87, 0x0a }, { CR89, 0x04 }, + { CR91, 0x00 }, { CR92, 0x0a }, { CR98, 0x8d }, + { CR99, 0x28 }, { CR100, 0x02 }, + { CR101, 0x09 }, /* 6112 13->1f 6220 1f->13 6407 13->9 */ + { CR102, 0x27 }, + { CR106, 0x1c }, /* 5d07 5112 1f->1c 6220 1c->1f 6221 1f->1c */ + { CR107, 0x1c }, /* 6220 1c->1a 5221 1a->1c */ + { CR109, 0x13 }, + { CR110, 0x1f }, /* 6112 13->1f 6221 1f->13 6407 13->0x09 */ + { CR111, 0x13 }, { CR112, 0x1f }, { CR113, 0x27 }, + { CR114, 0x23 }, /* 6221 27->23 */ + { CR115, 0x24 }, /* 6112 24->1c 6220 1c->24 */ + { CR116, 0x24 }, /* 6220 1c->24 */ + { CR117, 0xfa }, /* 6112 fa->f8 6220 f8->f4 6220 f4->fa */ + { CR118, 0xf0 }, /* 5d07 6112 f0->f2 6220 f2->f0 */ + { CR119, 0x1a }, /* 6112 1a->10 6220 10->14 6220 14->1a */ + { CR120, 0x4f }, + { CR121, 0x1f }, /* 6220 4f->1f */ + { CR122, 0xf0 }, { CR123, 0x57 }, { CR125, 0xad }, + { CR126, 0x6c }, { CR127, 0x03 }, + { CR128, 0x14 }, /* 6302 12->11 */ + { CR129, 0x12 }, /* 6301 10->0f */ + { CR130, 0x10 }, { CR137, 0x50 }, { CR138, 0xa8 }, + { CR144, 0xac }, { CR146, 0x20 }, { CR252, 0xff }, + { CR253, 0xff }, + }; + + static const u32 rv[] = { + UW2453_REGWRITE(4, 0x2b), /* configure reciever gain */ + UW2453_REGWRITE(5, 0x19e4f), /* configure transmitter gain */ + UW2453_REGWRITE(6, 0xf81ad), /* enable RX/TX filter tuning */ + UW2453_REGWRITE(7, 0x3fffe), /* disable TX gain in test mode */ + + /* enter CAL_FIL mode, TX gain set by registers, RX gain set by pins, + * RSSI circuit powered down, reduced RSSI range */ + UW2453_REGWRITE(0, 0x25f9c), /* 5d01 cal_fil */ + + /* synthesizer configuration for channel 1 */ + UW2453_REGWRITE(1, 0x47), + UW2453_REGWRITE(2, 0x999), + + /* disable manual VCO band selection */ + UW2453_REGWRITE(3, 0x7602), + + /* enable manual VCO band selection, configure current level */ + UW2453_REGWRITE(3, 0x46063), + }; + + r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); + if (r) + return r; + + r = zd_rfwritev_locked(chip, rv, ARRAY_SIZE(rv), RF_RV_BITS); + if (r) + return r; + + r = uw2453_init_mode(chip); + if (r) + return r; + + /* Try all standard VCO configuration settings on channel 1 */ + for (i = 0; i < ARRAY_SIZE(uw2453_std_vco_cfg) - 1; i++) { + /* Configure synthesizer for channel 1 */ + r = uw2453_synth_set_channel(chip, 1, false); + if (r) + return r; + + /* Write VCO config */ + r = uw2453_write_vco_cfg(chip, uw2453_std_vco_cfg[i][0]); + if (r) + return r; + + /* ack interrupt event */ + r = zd_iowrite16_locked(chip, 0x0f, UW2453_INTR_REG); + if (r) + return r; + + /* check interrupt status */ + r = zd_ioread16_locked(chip, &intr_status, UW2453_INTR_REG); + if (r) + return r; + + if (!(intr_status & 0xf)) { + dev_dbg_f(zd_chip_dev(chip), + "PLL locked on configuration %d\n", i); + found_config = i; + break; + } + } + + if (found_config == -1) { + /* autocal */ + dev_dbg_f(zd_chip_dev(chip), + "PLL did not lock, using autocal\n"); + + r = uw2453_synth_set_channel(chip, 1, true); + if (r) + return r; + + r = uw2453_write_vco_cfg(chip, UW2453_AUTOCAL_VCO_CFG); + if (r) + return r; + } + + /* To match the vendor driver behaviour, we use the configuration after + * the one that produced a lock. */ + UW2453_PRIV(rf)->config = found_config + 1; + + return zd_iowrite16_locked(chip, 0x06, CR203); +} + +static int uw2453_set_channel(struct zd_rf *rf, u8 channel) +{ + int r; + u16 vco_cfg; + int config = UW2453_PRIV(rf)->config; + bool autocal = (config == -1); + struct zd_chip *chip = zd_rf_to_chip(rf); + + static const struct zd_ioreq16 ioreqs[] = { + { CR80, 0x30 }, { CR81, 0x30 }, { CR79, 0x58 }, + { CR12, 0xf0 }, { CR77, 0x1b }, { CR78, 0x58 }, + }; + + r = uw2453_synth_set_channel(chip, channel, autocal); + if (r) + return r; + + if (autocal) + vco_cfg = UW2453_AUTOCAL_VCO_CFG; + else + vco_cfg = uw2453_std_vco_cfg[config][CHAN_TO_PAIRIDX(channel)]; + + r = uw2453_write_vco_cfg(chip, vco_cfg); + if (r) + return r; + + r = uw2453_init_mode(chip); + if (r) + return r; + + r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); + if (r) + return r; + + r = uw2453_set_tx_gain_level(chip, channel); + if (r) + return r; + + return zd_iowrite16_locked(chip, 0x06, CR203); +} + +static int uw2453_switch_radio_on(struct zd_rf *rf) +{ + int r; + struct zd_chip *chip = zd_rf_to_chip(rf); + struct zd_ioreq16 ioreqs[] = { + { CR11, 0x00 }, { CR251, 0x3f }, + }; + + /* enter RXTX mode */ + r = zd_rfwrite_locked(chip, UW2453_REGWRITE(0, 0x25f94), RF_RV_BITS); + if (r) + return r; + + if (zd_chip_is_zd1211b(chip)) + ioreqs[1].value = 0x7f; + + return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); +} + +static int uw2453_switch_radio_off(struct zd_rf *rf) +{ + int r; + struct zd_chip *chip = zd_rf_to_chip(rf); + static const struct zd_ioreq16 ioreqs[] = { + { CR11, 0x04 }, { CR251, 0x2f }, + }; + + /* enter IDLE mode */ + /* FIXME: shouldn't we go to SLEEP? sent email to zydas */ + r = zd_rfwrite_locked(chip, UW2453_REGWRITE(0, 0x25f90), RF_RV_BITS); + if (r) + return r; + + return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); +} + +static void uw2453_clear(struct zd_rf *rf) +{ + kfree(rf->priv); +} + +int zd_rf_init_uw2453(struct zd_rf *rf) +{ + rf->init_hw = uw2453_init_hw; + rf->set_channel = uw2453_set_channel; + rf->switch_radio_on = uw2453_switch_radio_on; + rf->switch_radio_off = uw2453_switch_radio_off; + rf->patch_6m_band_edge = zd_rf_generic_patch_6m; + rf->clear = uw2453_clear; + /* we have our own TX integration code */ + rf->update_channel_int = 0; + + rf->priv = kmalloc(sizeof(struct uw2453_priv), GFP_KERNEL); + if (rf->priv == NULL) + return -ENOMEM; + + return 0; +} + diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c new file mode 100644 index 0000000..442fc11 --- /dev/null +++ b/drivers/net/wireless/zd1211rw/zd_usb.c @@ -0,0 +1,1578 @@ +/* ZD1211 USB-WLAN driver for Linux + * + * Copyright (C) 2005-2007 Ulrich Kunitz + * Copyright (C) 2006-2007 Daniel Drake + * Copyright (C) 2006-2007 Michael Wu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "zd_def.h" +#include "zd_mac.h" +#include "zd_usb.h" + +static struct usb_device_id usb_ids[] = { + /* ZD1211 */ + { USB_DEVICE(0x0105, 0x145f), .driver_info = DEVICE_ZD1211 }, + { USB_DEVICE(0x0586, 0x3401), .driver_info = DEVICE_ZD1211 }, + { USB_DEVICE(0x0586, 0x3402), .driver_info = DEVICE_ZD1211 }, + { USB_DEVICE(0x0586, 0x3407), .driver_info = DEVICE_ZD1211 }, + { USB_DEVICE(0x0586, 0x3409), .driver_info = DEVICE_ZD1211 }, + { USB_DEVICE(0x079b, 0x004a), .driver_info = DEVICE_ZD1211 }, + { USB_DEVICE(0x07b8, 0x6001), .driver_info = DEVICE_ZD1211 }, + { USB_DEVICE(0x0ace, 0x1211), .driver_info = DEVICE_ZD1211 }, + { USB_DEVICE(0x0ace, 0xa211), .driver_info = DEVICE_ZD1211 }, + { USB_DEVICE(0x0b05, 0x170c), .driver_info = DEVICE_ZD1211 }, + { USB_DEVICE(0x0b3b, 0x1630), .driver_info = DEVICE_ZD1211 }, + { USB_DEVICE(0x0b3b, 0x5630), .driver_info = DEVICE_ZD1211 }, + { USB_DEVICE(0x0df6, 0x9071), .driver_info = DEVICE_ZD1211 }, + { USB_DEVICE(0x0df6, 0x9075), .driver_info = DEVICE_ZD1211 }, + { USB_DEVICE(0x126f, 0xa006), .driver_info = DEVICE_ZD1211 }, + { USB_DEVICE(0x129b, 0x1666), .driver_info = DEVICE_ZD1211 }, + { USB_DEVICE(0x13b1, 0x001e), .driver_info = DEVICE_ZD1211 }, + { USB_DEVICE(0x1435, 0x0711), .driver_info = DEVICE_ZD1211 }, + { USB_DEVICE(0x14ea, 0xab13), .driver_info = DEVICE_ZD1211 }, + { USB_DEVICE(0x157e, 0x300a), .driver_info = DEVICE_ZD1211 }, + { USB_DEVICE(0x157e, 0x300b), .driver_info = DEVICE_ZD1211 }, + { USB_DEVICE(0x157e, 0x3204), .driver_info = DEVICE_ZD1211 }, + { USB_DEVICE(0x1740, 0x2000), .driver_info = DEVICE_ZD1211 }, + { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 }, + /* ZD1211B */ + { USB_DEVICE(0x0053, 0x5301), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x0409, 0x0248), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x0411, 0x00da), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x050d, 0x705c), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x054c, 0x0257), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x0586, 0x340a), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x0586, 0x340f), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x0586, 0x3410), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x0586, 0x3412), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x0586, 0x3413), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x07b8, 0x6001), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x07fa, 0x1196), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x083a, 0x4505), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x083a, 0xe501), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x083a, 0xe503), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x083a, 0xe506), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x0ace, 0xb215), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x0b05, 0x171b), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x0baf, 0x0121), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x0cde, 0x001a), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x0df6, 0x0036), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x129b, 0x1667), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x13b1, 0x0024), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x2019, 0x5303), .driver_info = DEVICE_ZD1211B }, + /* "Driverless" devices that need ejecting */ + { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER }, + { USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER }, + {} +}; + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("USB driver for devices with the ZD1211 chip."); +MODULE_AUTHOR("Ulrich Kunitz"); +MODULE_AUTHOR("Daniel Drake"); +MODULE_VERSION("1.0"); +MODULE_DEVICE_TABLE(usb, usb_ids); + +#define FW_ZD1211_PREFIX "zd1211/zd1211_" +#define FW_ZD1211B_PREFIX "zd1211/zd1211b_" + +/* USB device initialization */ +static void int_urb_complete(struct urb *urb); + +static int request_fw_file( + const struct firmware **fw, const char *name, struct device *device) +{ + int r; + + dev_dbg_f(device, "fw name %s\n", name); + + r = request_firmware(fw, name, device); + if (r) + dev_err(device, + "Could not load firmware file %s. Error number %d\n", + name, r); + return r; +} + +static inline u16 get_bcdDevice(const struct usb_device *udev) +{ + return le16_to_cpu(udev->descriptor.bcdDevice); +} + +enum upload_code_flags { + REBOOT = 1, +}; + +/* Ensures that MAX_TRANSFER_SIZE is even. */ +#define MAX_TRANSFER_SIZE (USB_MAX_TRANSFER_SIZE & ~1) + +static int upload_code(struct usb_device *udev, + const u8 *data, size_t size, u16 code_offset, int flags) +{ + u8 *p; + int r; + + /* USB request blocks need "kmalloced" buffers. + */ + p = kmalloc(MAX_TRANSFER_SIZE, GFP_KERNEL); + if (!p) { + dev_err(&udev->dev, "out of memory\n"); + r = -ENOMEM; + goto error; + } + + size &= ~1; + while (size > 0) { + size_t transfer_size = size <= MAX_TRANSFER_SIZE ? + size : MAX_TRANSFER_SIZE; + + dev_dbg_f(&udev->dev, "transfer size %zu\n", transfer_size); + + memcpy(p, data, transfer_size); + r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), + USB_REQ_FIRMWARE_DOWNLOAD, + USB_DIR_OUT | USB_TYPE_VENDOR, + code_offset, 0, p, transfer_size, 1000 /* ms */); + if (r < 0) { + dev_err(&udev->dev, + "USB control request for firmware upload" + " failed. Error number %d\n", r); + goto error; + } + transfer_size = r & ~1; + + size -= transfer_size; + data += transfer_size; + code_offset += transfer_size/sizeof(u16); + } + + if (flags & REBOOT) { + u8 ret; + + /* Use "DMA-aware" buffer. */ + r = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), + USB_REQ_FIRMWARE_CONFIRM, + USB_DIR_IN | USB_TYPE_VENDOR, + 0, 0, p, sizeof(ret), 5000 /* ms */); + if (r != sizeof(ret)) { + dev_err(&udev->dev, + "control request firmeware confirmation failed." + " Return value %d\n", r); + if (r >= 0) + r = -ENODEV; + goto error; + } + ret = p[0]; + if (ret & 0x80) { + dev_err(&udev->dev, + "Internal error while downloading." + " Firmware confirm return value %#04x\n", + (unsigned int)ret); + r = -ENODEV; + goto error; + } + dev_dbg_f(&udev->dev, "firmware confirm return value %#04x\n", + (unsigned int)ret); + } + + r = 0; +error: + kfree(p); + return r; +} + +static u16 get_word(const void *data, u16 offset) +{ + const __le16 *p = data; + return le16_to_cpu(p[offset]); +} + +static char *get_fw_name(struct zd_usb *usb, char *buffer, size_t size, + const char* postfix) +{ + scnprintf(buffer, size, "%s%s", + usb->is_zd1211b ? + FW_ZD1211B_PREFIX : FW_ZD1211_PREFIX, + postfix); + return buffer; +} + +static int handle_version_mismatch(struct zd_usb *usb, + const struct firmware *ub_fw) +{ + struct usb_device *udev = zd_usb_to_usbdev(usb); + const struct firmware *ur_fw = NULL; + int offset; + int r = 0; + char fw_name[128]; + + r = request_fw_file(&ur_fw, + get_fw_name(usb, fw_name, sizeof(fw_name), "ur"), + &udev->dev); + if (r) + goto error; + + r = upload_code(udev, ur_fw->data, ur_fw->size, FW_START, REBOOT); + if (r) + goto error; + + offset = (E2P_BOOT_CODE_OFFSET * sizeof(u16)); + r = upload_code(udev, ub_fw->data + offset, ub_fw->size - offset, + E2P_START + E2P_BOOT_CODE_OFFSET, REBOOT); + + /* At this point, the vendor driver downloads the whole firmware + * image, hacks around with version IDs, and uploads it again, + * completely overwriting the boot code. We do not do this here as + * it is not required on any tested devices, and it is suspected to + * cause problems. */ +error: + release_firmware(ur_fw); + return r; +} + +static int upload_firmware(struct zd_usb *usb) +{ + int r; + u16 fw_bcdDevice; + u16 bcdDevice; + struct usb_device *udev = zd_usb_to_usbdev(usb); + const struct firmware *ub_fw = NULL; + const struct firmware *uph_fw = NULL; + char fw_name[128]; + + bcdDevice = get_bcdDevice(udev); + + r = request_fw_file(&ub_fw, + get_fw_name(usb, fw_name, sizeof(fw_name), "ub"), + &udev->dev); + if (r) + goto error; + + fw_bcdDevice = get_word(ub_fw->data, E2P_DATA_OFFSET); + + if (fw_bcdDevice != bcdDevice) { + dev_info(&udev->dev, + "firmware version %#06x and device bootcode version " + "%#06x differ\n", fw_bcdDevice, bcdDevice); + if (bcdDevice <= 0x4313) + dev_warn(&udev->dev, "device has old bootcode, please " + "report success or failure\n"); + + r = handle_version_mismatch(usb, ub_fw); + if (r) + goto error; + } else { + dev_dbg_f(&udev->dev, + "firmware device id %#06x is equal to the " + "actual device id\n", fw_bcdDevice); + } + + + r = request_fw_file(&uph_fw, + get_fw_name(usb, fw_name, sizeof(fw_name), "uphr"), + &udev->dev); + if (r) + goto error; + + r = upload_code(udev, uph_fw->data, uph_fw->size, FW_START, REBOOT); + if (r) { + dev_err(&udev->dev, + "Could not upload firmware code uph. Error number %d\n", + r); + } + + /* FALL-THROUGH */ +error: + release_firmware(ub_fw); + release_firmware(uph_fw); + return r; +} + +MODULE_FIRMWARE(FW_ZD1211B_PREFIX "ur"); +MODULE_FIRMWARE(FW_ZD1211_PREFIX "ur"); +MODULE_FIRMWARE(FW_ZD1211B_PREFIX "ub"); +MODULE_FIRMWARE(FW_ZD1211_PREFIX "ub"); +MODULE_FIRMWARE(FW_ZD1211B_PREFIX "uphr"); +MODULE_FIRMWARE(FW_ZD1211_PREFIX "uphr"); + +/* Read data from device address space using "firmware interface" which does + * not require firmware to be loaded. */ +int zd_usb_read_fw(struct zd_usb *usb, zd_addr_t addr, u8 *data, u16 len) +{ + int r; + struct usb_device *udev = zd_usb_to_usbdev(usb); + u8 *buf; + + /* Use "DMA-aware" buffer. */ + buf = kmalloc(len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + r = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), + USB_REQ_FIRMWARE_READ_DATA, USB_DIR_IN | 0x40, addr, 0, + buf, len, 5000); + if (r < 0) { + dev_err(&udev->dev, + "read over firmware interface failed: %d\n", r); + goto exit; + } else if (r != len) { + dev_err(&udev->dev, + "incomplete read over firmware interface: %d/%d\n", + r, len); + r = -EIO; + goto exit; + } + r = 0; + memcpy(data, buf, len); +exit: + kfree(buf); + return r; +} + +#define urb_dev(urb) (&(urb)->dev->dev) + +static inline void handle_regs_int(struct urb *urb) +{ + struct zd_usb *usb = urb->context; + struct zd_usb_interrupt *intr = &usb->intr; + int len; + u16 int_num; + + ZD_ASSERT(in_interrupt()); + spin_lock(&intr->lock); + + int_num = le16_to_cpu(*(__le16 *)(urb->transfer_buffer+2)); + if (int_num == CR_INTERRUPT) { + struct zd_mac *mac = zd_hw_mac(zd_usb_to_hw(urb->context)); + memcpy(&mac->intr_buffer, urb->transfer_buffer, + USB_MAX_EP_INT_BUFFER); + schedule_work(&mac->process_intr); + } else if (intr->read_regs_enabled) { + intr->read_regs.length = len = urb->actual_length; + + if (len > sizeof(intr->read_regs.buffer)) + len = sizeof(intr->read_regs.buffer); + memcpy(intr->read_regs.buffer, urb->transfer_buffer, len); + intr->read_regs_enabled = 0; + complete(&intr->read_regs.completion); + goto out; + } + +out: + spin_unlock(&intr->lock); +} + +static void int_urb_complete(struct urb *urb) +{ + int r; + struct usb_int_header *hdr; + + switch (urb->status) { + case 0: + break; + case -ESHUTDOWN: + case -EINVAL: + case -ENODEV: + case -ENOENT: + case -ECONNRESET: + case -EPIPE: + goto kfree; + default: + goto resubmit; + } + + if (urb->actual_length < sizeof(hdr)) { + dev_dbg_f(urb_dev(urb), "error: urb %p to small\n", urb); + goto resubmit; + } + + hdr = urb->transfer_buffer; + if (hdr->type != USB_INT_TYPE) { + dev_dbg_f(urb_dev(urb), "error: urb %p wrong type\n", urb); + goto resubmit; + } + + switch (hdr->id) { + case USB_INT_ID_REGS: + handle_regs_int(urb); + break; + case USB_INT_ID_RETRY_FAILED: + zd_mac_tx_failed(urb); + break; + default: + dev_dbg_f(urb_dev(urb), "error: urb %p unknown id %x\n", urb, + (unsigned int)hdr->id); + goto resubmit; + } + +resubmit: + r = usb_submit_urb(urb, GFP_ATOMIC); + if (r) { + dev_dbg_f(urb_dev(urb), "resubmit urb %p\n", urb); + goto kfree; + } + return; +kfree: + kfree(urb->transfer_buffer); +} + +static inline int int_urb_interval(struct usb_device *udev) +{ + switch (udev->speed) { + case USB_SPEED_HIGH: + return 4; + case USB_SPEED_LOW: + return 10; + case USB_SPEED_FULL: + default: + return 1; + } +} + +static inline int usb_int_enabled(struct zd_usb *usb) +{ + unsigned long flags; + struct zd_usb_interrupt *intr = &usb->intr; + struct urb *urb; + + spin_lock_irqsave(&intr->lock, flags); + urb = intr->urb; + spin_unlock_irqrestore(&intr->lock, flags); + return urb != NULL; +} + +int zd_usb_enable_int(struct zd_usb *usb) +{ + int r; + struct usb_device *udev; + struct zd_usb_interrupt *intr = &usb->intr; + void *transfer_buffer = NULL; + struct urb *urb; + + dev_dbg_f(zd_usb_dev(usb), "\n"); + + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) { + r = -ENOMEM; + goto out; + } + + ZD_ASSERT(!irqs_disabled()); + spin_lock_irq(&intr->lock); + if (intr->urb) { + spin_unlock_irq(&intr->lock); + r = 0; + goto error_free_urb; + } + intr->urb = urb; + spin_unlock_irq(&intr->lock); + + /* TODO: make it a DMA buffer */ + r = -ENOMEM; + transfer_buffer = kmalloc(USB_MAX_EP_INT_BUFFER, GFP_KERNEL); + if (!transfer_buffer) { + dev_dbg_f(zd_usb_dev(usb), + "couldn't allocate transfer_buffer\n"); + goto error_set_urb_null; + } + + udev = zd_usb_to_usbdev(usb); + usb_fill_int_urb(urb, udev, usb_rcvintpipe(udev, EP_INT_IN), + transfer_buffer, USB_MAX_EP_INT_BUFFER, + int_urb_complete, usb, + intr->interval); + + dev_dbg_f(zd_usb_dev(usb), "submit urb %p\n", intr->urb); + r = usb_submit_urb(urb, GFP_KERNEL); + if (r) { + dev_dbg_f(zd_usb_dev(usb), + "Couldn't submit urb. Error number %d\n", r); + goto error; + } + + return 0; +error: + kfree(transfer_buffer); +error_set_urb_null: + spin_lock_irq(&intr->lock); + intr->urb = NULL; + spin_unlock_irq(&intr->lock); +error_free_urb: + usb_free_urb(urb); +out: + return r; +} + +void zd_usb_disable_int(struct zd_usb *usb) +{ + unsigned long flags; + struct zd_usb_interrupt *intr = &usb->intr; + struct urb *urb; + + spin_lock_irqsave(&intr->lock, flags); + urb = intr->urb; + if (!urb) { + spin_unlock_irqrestore(&intr->lock, flags); + return; + } + intr->urb = NULL; + spin_unlock_irqrestore(&intr->lock, flags); + + usb_kill_urb(urb); + dev_dbg_f(zd_usb_dev(usb), "urb %p killed\n", urb); + usb_free_urb(urb); +} + +static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer, + unsigned int length) +{ + int i; + const struct rx_length_info *length_info; + + if (length < sizeof(struct rx_length_info)) { + /* It's not a complete packet anyhow. */ + printk("%s: invalid, small RX packet : %d\n", + __func__, length); + return; + } + length_info = (struct rx_length_info *) + (buffer + length - sizeof(struct rx_length_info)); + + /* It might be that three frames are merged into a single URB + * transaction. We have to check for the length info tag. + * + * While testing we discovered that length_info might be unaligned, + * because if USB transactions are merged, the last packet will not + * be padded. Unaligned access might also happen if the length_info + * structure is not present. + */ + if (get_unaligned_le16(&length_info->tag) == RX_LENGTH_INFO_TAG) + { + unsigned int l, k, n; + for (i = 0, l = 0;; i++) { + k = get_unaligned_le16(&length_info->length[i]); + if (k == 0) + return; + n = l+k; + if (n > length) + return; + zd_mac_rx(zd_usb_to_hw(usb), buffer+l, k); + if (i >= 2) + return; + l = (n+3) & ~3; + } + } else { + zd_mac_rx(zd_usb_to_hw(usb), buffer, length); + } +} + +static void rx_urb_complete(struct urb *urb) +{ + struct zd_usb *usb; + struct zd_usb_rx *rx; + const u8 *buffer; + unsigned int length; + + switch (urb->status) { + case 0: + break; + case -ESHUTDOWN: + case -EINVAL: + case -ENODEV: + case -ENOENT: + case -ECONNRESET: + case -EPIPE: + return; + default: + dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status); + goto resubmit; + } + + buffer = urb->transfer_buffer; + length = urb->actual_length; + usb = urb->context; + rx = &usb->rx; + + if (length%rx->usb_packet_size > rx->usb_packet_size-4) { + /* If there is an old first fragment, we don't care. */ + dev_dbg_f(urb_dev(urb), "*** first fragment ***\n"); + ZD_ASSERT(length <= ARRAY_SIZE(rx->fragment)); + spin_lock(&rx->lock); + memcpy(rx->fragment, buffer, length); + rx->fragment_length = length; + spin_unlock(&rx->lock); + goto resubmit; + } + + spin_lock(&rx->lock); + if (rx->fragment_length > 0) { + /* We are on a second fragment, we believe */ + ZD_ASSERT(length + rx->fragment_length <= + ARRAY_SIZE(rx->fragment)); + dev_dbg_f(urb_dev(urb), "*** second fragment ***\n"); + memcpy(rx->fragment+rx->fragment_length, buffer, length); + handle_rx_packet(usb, rx->fragment, + rx->fragment_length + length); + rx->fragment_length = 0; + spin_unlock(&rx->lock); + } else { + spin_unlock(&rx->lock); + handle_rx_packet(usb, buffer, length); + } + +resubmit: + usb_submit_urb(urb, GFP_ATOMIC); +} + +static struct urb *alloc_rx_urb(struct zd_usb *usb) +{ + struct usb_device *udev = zd_usb_to_usbdev(usb); + struct urb *urb; + void *buffer; + + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) + return NULL; + buffer = usb_buffer_alloc(udev, USB_MAX_RX_SIZE, GFP_KERNEL, + &urb->transfer_dma); + if (!buffer) { + usb_free_urb(urb); + return NULL; + } + + usb_fill_bulk_urb(urb, udev, usb_rcvbulkpipe(udev, EP_DATA_IN), + buffer, USB_MAX_RX_SIZE, + rx_urb_complete, usb); + urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + + return urb; +} + +static void free_rx_urb(struct urb *urb) +{ + if (!urb) + return; + usb_buffer_free(urb->dev, urb->transfer_buffer_length, + urb->transfer_buffer, urb->transfer_dma); + usb_free_urb(urb); +} + +int zd_usb_enable_rx(struct zd_usb *usb) +{ + int i, r; + struct zd_usb_rx *rx = &usb->rx; + struct urb **urbs; + + dev_dbg_f(zd_usb_dev(usb), "\n"); + + r = -ENOMEM; + urbs = kcalloc(RX_URBS_COUNT, sizeof(struct urb *), GFP_KERNEL); + if (!urbs) + goto error; + for (i = 0; i < RX_URBS_COUNT; i++) { + urbs[i] = alloc_rx_urb(usb); + if (!urbs[i]) + goto error; + } + + ZD_ASSERT(!irqs_disabled()); + spin_lock_irq(&rx->lock); + if (rx->urbs) { + spin_unlock_irq(&rx->lock); + r = 0; + goto error; + } + rx->urbs = urbs; + rx->urbs_count = RX_URBS_COUNT; + spin_unlock_irq(&rx->lock); + + for (i = 0; i < RX_URBS_COUNT; i++) { + r = usb_submit_urb(urbs[i], GFP_KERNEL); + if (r) + goto error_submit; + } + + return 0; +error_submit: + for (i = 0; i < RX_URBS_COUNT; i++) { + usb_kill_urb(urbs[i]); + } + spin_lock_irq(&rx->lock); + rx->urbs = NULL; + rx->urbs_count = 0; + spin_unlock_irq(&rx->lock); +error: + if (urbs) { + for (i = 0; i < RX_URBS_COUNT; i++) + free_rx_urb(urbs[i]); + } + return r; +} + +void zd_usb_disable_rx(struct zd_usb *usb) +{ + int i; + unsigned long flags; + struct urb **urbs; + unsigned int count; + struct zd_usb_rx *rx = &usb->rx; + + spin_lock_irqsave(&rx->lock, flags); + urbs = rx->urbs; + count = rx->urbs_count; + spin_unlock_irqrestore(&rx->lock, flags); + if (!urbs) + return; + + for (i = 0; i < count; i++) { + usb_kill_urb(urbs[i]); + free_rx_urb(urbs[i]); + } + kfree(urbs); + + spin_lock_irqsave(&rx->lock, flags); + rx->urbs = NULL; + rx->urbs_count = 0; + spin_unlock_irqrestore(&rx->lock, flags); +} + +/** + * zd_usb_disable_tx - disable transmission + * @usb: the zd1211rw-private USB structure + * + * Frees all URBs in the free list and marks the transmission as disabled. + */ +void zd_usb_disable_tx(struct zd_usb *usb) +{ + struct zd_usb_tx *tx = &usb->tx; + unsigned long flags; + struct list_head *pos, *n; + + spin_lock_irqsave(&tx->lock, flags); + list_for_each_safe(pos, n, &tx->free_urb_list) { + list_del(pos); + usb_free_urb(list_entry(pos, struct urb, urb_list)); + } + tx->enabled = 0; + tx->submitted_urbs = 0; + /* The stopped state is ignored, relying on ieee80211_wake_queues() + * in a potentionally following zd_usb_enable_tx(). + */ + spin_unlock_irqrestore(&tx->lock, flags); +} + +/** + * zd_usb_enable_tx - enables transmission + * @usb: a &struct zd_usb pointer + * + * This function enables transmission and prepares the &zd_usb_tx data + * structure. + */ +void zd_usb_enable_tx(struct zd_usb *usb) +{ + unsigned long flags; + struct zd_usb_tx *tx = &usb->tx; + + spin_lock_irqsave(&tx->lock, flags); + tx->enabled = 1; + tx->submitted_urbs = 0; + ieee80211_wake_queues(zd_usb_to_hw(usb)); + tx->stopped = 0; + spin_unlock_irqrestore(&tx->lock, flags); +} + +/** + * alloc_tx_urb - provides an tx URB + * @usb: a &struct zd_usb pointer + * + * Allocates a new URB. If possible takes the urb from the free list in + * usb->tx. + */ +static struct urb *alloc_tx_urb(struct zd_usb *usb) +{ + struct zd_usb_tx *tx = &usb->tx; + unsigned long flags; + struct list_head *entry; + struct urb *urb; + + spin_lock_irqsave(&tx->lock, flags); + if (list_empty(&tx->free_urb_list)) { + urb = usb_alloc_urb(0, GFP_ATOMIC); + goto out; + } + entry = tx->free_urb_list.next; + list_del(entry); + urb = list_entry(entry, struct urb, urb_list); +out: + spin_unlock_irqrestore(&tx->lock, flags); + return urb; +} + +/** + * free_tx_urb - frees a used tx URB + * @usb: a &struct zd_usb pointer + * @urb: URB to be freed + * + * Frees the the transmission URB, which means to put it on the free URB + * list. + */ +static void free_tx_urb(struct zd_usb *usb, struct urb *urb) +{ + struct zd_usb_tx *tx = &usb->tx; + unsigned long flags; + + spin_lock_irqsave(&tx->lock, flags); + if (!tx->enabled) { + usb_free_urb(urb); + goto out; + } + list_add(&urb->urb_list, &tx->free_urb_list); +out: + spin_unlock_irqrestore(&tx->lock, flags); +} + +static void tx_dec_submitted_urbs(struct zd_usb *usb) +{ + struct zd_usb_tx *tx = &usb->tx; + unsigned long flags; + + spin_lock_irqsave(&tx->lock, flags); + --tx->submitted_urbs; + if (tx->stopped && tx->submitted_urbs <= ZD_USB_TX_LOW) { + ieee80211_wake_queues(zd_usb_to_hw(usb)); + tx->stopped = 0; + } + spin_unlock_irqrestore(&tx->lock, flags); +} + +static void tx_inc_submitted_urbs(struct zd_usb *usb) +{ + struct zd_usb_tx *tx = &usb->tx; + unsigned long flags; + + spin_lock_irqsave(&tx->lock, flags); + ++tx->submitted_urbs; + if (!tx->stopped && tx->submitted_urbs > ZD_USB_TX_HIGH) { + ieee80211_stop_queues(zd_usb_to_hw(usb)); + tx->stopped = 1; + } + spin_unlock_irqrestore(&tx->lock, flags); +} + +/** + * tx_urb_complete - completes the execution of an URB + * @urb: a URB + * + * This function is called if the URB has been transferred to a device or an + * error has happened. + */ +static void tx_urb_complete(struct urb *urb) +{ + int r; + struct sk_buff *skb; + struct ieee80211_tx_info *info; + struct zd_usb *usb; + + switch (urb->status) { + case 0: + break; + case -ESHUTDOWN: + case -EINVAL: + case -ENODEV: + case -ENOENT: + case -ECONNRESET: + case -EPIPE: + dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status); + break; + default: + dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status); + goto resubmit; + } +free_urb: + skb = (struct sk_buff *)urb->context; + /* + * grab 'usb' pointer before handing off the skb (since + * it might be freed by zd_mac_tx_to_dev or mac80211) + */ + info = IEEE80211_SKB_CB(skb); + usb = &zd_hw_mac(info->rate_driver_data[0])->chip.usb; + zd_mac_tx_to_dev(skb, urb->status); + free_tx_urb(usb, urb); + tx_dec_submitted_urbs(usb); + return; +resubmit: + r = usb_submit_urb(urb, GFP_ATOMIC); + if (r) { + dev_dbg_f(urb_dev(urb), "error resubmit urb %p %d\n", urb, r); + goto free_urb; + } +} + +/** + * zd_usb_tx: initiates transfer of a frame of the device + * + * @usb: the zd1211rw-private USB structure + * @skb: a &struct sk_buff pointer + * + * This function tranmits a frame to the device. It doesn't wait for + * completion. The frame must contain the control set and have all the + * control set information available. + * + * The function returns 0 if the transfer has been successfully initiated. + */ +int zd_usb_tx(struct zd_usb *usb, struct sk_buff *skb) +{ + int r; + struct usb_device *udev = zd_usb_to_usbdev(usb); + struct urb *urb; + + urb = alloc_tx_urb(usb); + if (!urb) { + r = -ENOMEM; + goto out; + } + + usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, EP_DATA_OUT), + skb->data, skb->len, tx_urb_complete, skb); + + r = usb_submit_urb(urb, GFP_ATOMIC); + if (r) + goto error; + tx_inc_submitted_urbs(usb); + return 0; +error: + free_tx_urb(usb, urb); +out: + return r; +} + +static inline void init_usb_interrupt(struct zd_usb *usb) +{ + struct zd_usb_interrupt *intr = &usb->intr; + + spin_lock_init(&intr->lock); + intr->interval = int_urb_interval(zd_usb_to_usbdev(usb)); + init_completion(&intr->read_regs.completion); + intr->read_regs.cr_int_addr = cpu_to_le16((u16)CR_INTERRUPT); +} + +static inline void init_usb_rx(struct zd_usb *usb) +{ + struct zd_usb_rx *rx = &usb->rx; + spin_lock_init(&rx->lock); + if (interface_to_usbdev(usb->intf)->speed == USB_SPEED_HIGH) { + rx->usb_packet_size = 512; + } else { + rx->usb_packet_size = 64; + } + ZD_ASSERT(rx->fragment_length == 0); +} + +static inline void init_usb_tx(struct zd_usb *usb) +{ + struct zd_usb_tx *tx = &usb->tx; + spin_lock_init(&tx->lock); + tx->enabled = 0; + tx->stopped = 0; + INIT_LIST_HEAD(&tx->free_urb_list); + tx->submitted_urbs = 0; +} + +void zd_usb_init(struct zd_usb *usb, struct ieee80211_hw *hw, + struct usb_interface *intf) +{ + memset(usb, 0, sizeof(*usb)); + usb->intf = usb_get_intf(intf); + usb_set_intfdata(usb->intf, hw); + init_usb_interrupt(usb); + init_usb_tx(usb); + init_usb_rx(usb); +} + +void zd_usb_clear(struct zd_usb *usb) +{ + usb_set_intfdata(usb->intf, NULL); + usb_put_intf(usb->intf); + ZD_MEMCLEAR(usb, sizeof(*usb)); + /* FIXME: usb_interrupt, usb_tx, usb_rx? */ +} + +static const char *speed(enum usb_device_speed speed) +{ + switch (speed) { + case USB_SPEED_LOW: + return "low"; + case USB_SPEED_FULL: + return "full"; + case USB_SPEED_HIGH: + return "high"; + default: + return "unknown speed"; + } +} + +static int scnprint_id(struct usb_device *udev, char *buffer, size_t size) +{ + return scnprintf(buffer, size, "%04hx:%04hx v%04hx %s", + le16_to_cpu(udev->descriptor.idVendor), + le16_to_cpu(udev->descriptor.idProduct), + get_bcdDevice(udev), + speed(udev->speed)); +} + +int zd_usb_scnprint_id(struct zd_usb *usb, char *buffer, size_t size) +{ + struct usb_device *udev = interface_to_usbdev(usb->intf); + return scnprint_id(udev, buffer, size); +} + +#ifdef DEBUG +static void print_id(struct usb_device *udev) +{ + char buffer[40]; + + scnprint_id(udev, buffer, sizeof(buffer)); + buffer[sizeof(buffer)-1] = 0; + dev_dbg_f(&udev->dev, "%s\n", buffer); +} +#else +#define print_id(udev) do { } while (0) +#endif + +static int eject_installer(struct usb_interface *intf) +{ + struct usb_device *udev = interface_to_usbdev(intf); + struct usb_host_interface *iface_desc = &intf->altsetting[0]; + struct usb_endpoint_descriptor *endpoint; + unsigned char *cmd; + u8 bulk_out_ep; + int r; + + /* Find bulk out endpoint */ + for (r = 1; r >= 0; r--) { + endpoint = &iface_desc->endpoint[r].desc; + if (usb_endpoint_dir_out(endpoint) && + usb_endpoint_xfer_bulk(endpoint)) { + bulk_out_ep = endpoint->bEndpointAddress; + break; + } + } + if (r == -1) { + dev_err(&udev->dev, + "zd1211rw: Could not find bulk out endpoint\n"); + return -ENODEV; + } + + cmd = kzalloc(31, GFP_KERNEL); + if (cmd == NULL) + return -ENODEV; + + /* USB bulk command block */ + cmd[0] = 0x55; /* bulk command signature */ + cmd[1] = 0x53; /* bulk command signature */ + cmd[2] = 0x42; /* bulk command signature */ + cmd[3] = 0x43; /* bulk command signature */ + cmd[14] = 6; /* command length */ + + cmd[15] = 0x1b; /* SCSI command: START STOP UNIT */ + cmd[19] = 0x2; /* eject disc */ + + dev_info(&udev->dev, "Ejecting virtual installer media...\n"); + r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, bulk_out_ep), + cmd, 31, NULL, 2000); + kfree(cmd); + if (r) + return r; + + /* At this point, the device disconnects and reconnects with the real + * ID numbers. */ + + usb_set_intfdata(intf, NULL); + return 0; +} + +int zd_usb_init_hw(struct zd_usb *usb) +{ + int r; + struct zd_mac *mac = zd_usb_to_mac(usb); + + dev_dbg_f(zd_usb_dev(usb), "\n"); + + r = upload_firmware(usb); + if (r) { + dev_err(zd_usb_dev(usb), + "couldn't load firmware. Error number %d\n", r); + return r; + } + + r = usb_reset_configuration(zd_usb_to_usbdev(usb)); + if (r) { + dev_dbg_f(zd_usb_dev(usb), + "couldn't reset configuration. Error number %d\n", r); + return r; + } + + r = zd_mac_init_hw(mac->hw); + if (r) { + dev_dbg_f(zd_usb_dev(usb), + "couldn't initialize mac. Error number %d\n", r); + return r; + } + + usb->initialized = 1; + return 0; +} + +static int probe(struct usb_interface *intf, const struct usb_device_id *id) +{ + int r; + struct usb_device *udev = interface_to_usbdev(intf); + struct zd_usb *usb; + struct ieee80211_hw *hw = NULL; + + print_id(udev); + + if (id->driver_info & DEVICE_INSTALLER) + return eject_installer(intf); + + switch (udev->speed) { + case USB_SPEED_LOW: + case USB_SPEED_FULL: + case USB_SPEED_HIGH: + break; + default: + dev_dbg_f(&intf->dev, "Unknown USB speed\n"); + r = -ENODEV; + goto error; + } + + r = usb_reset_device(udev); + if (r) { + dev_err(&intf->dev, + "couldn't reset usb device. Error number %d\n", r); + goto error; + } + + hw = zd_mac_alloc_hw(intf); + if (hw == NULL) { + r = -ENOMEM; + goto error; + } + + usb = &zd_hw_mac(hw)->chip.usb; + usb->is_zd1211b = (id->driver_info == DEVICE_ZD1211B) != 0; + + r = zd_mac_preinit_hw(hw); + if (r) { + dev_dbg_f(&intf->dev, + "couldn't initialize mac. Error number %d\n", r); + goto error; + } + + r = ieee80211_register_hw(hw); + if (r) { + dev_dbg_f(&intf->dev, + "couldn't register device. Error number %d\n", r); + goto error; + } + + dev_dbg_f(&intf->dev, "successful\n"); + dev_info(&intf->dev, "%s\n", wiphy_name(hw->wiphy)); + return 0; +error: + usb_reset_device(interface_to_usbdev(intf)); + if (hw) { + zd_mac_clear(zd_hw_mac(hw)); + ieee80211_free_hw(hw); + } + return r; +} + +static void disconnect(struct usb_interface *intf) +{ + struct ieee80211_hw *hw = zd_intf_to_hw(intf); + struct zd_mac *mac; + struct zd_usb *usb; + + /* Either something really bad happened, or we're just dealing with + * a DEVICE_INSTALLER. */ + if (hw == NULL) + return; + + mac = zd_hw_mac(hw); + usb = &mac->chip.usb; + + dev_dbg_f(zd_usb_dev(usb), "\n"); + + ieee80211_unregister_hw(hw); + + /* Just in case something has gone wrong! */ + zd_usb_disable_rx(usb); + zd_usb_disable_int(usb); + + /* If the disconnect has been caused by a removal of the + * driver module, the reset allows reloading of the driver. If the + * reset will not be executed here, the upload of the firmware in the + * probe function caused by the reloading of the driver will fail. + */ + usb_reset_device(interface_to_usbdev(intf)); + + zd_mac_clear(mac); + ieee80211_free_hw(hw); + dev_dbg(&intf->dev, "disconnected\n"); +} + +static struct usb_driver driver = { + .name = KBUILD_MODNAME, + .id_table = usb_ids, + .probe = probe, + .disconnect = disconnect, +}; + +struct workqueue_struct *zd_workqueue; + +static int __init usb_init(void) +{ + int r; + + pr_debug("%s usb_init()\n", driver.name); + + zd_workqueue = create_singlethread_workqueue(driver.name); + if (zd_workqueue == NULL) { + printk(KERN_ERR "%s couldn't create workqueue\n", driver.name); + return -ENOMEM; + } + + r = usb_register(&driver); + if (r) { + destroy_workqueue(zd_workqueue); + printk(KERN_ERR "%s usb_register() failed. Error number %d\n", + driver.name, r); + return r; + } + + pr_debug("%s initialized\n", driver.name); + return 0; +} + +static void __exit usb_exit(void) +{ + pr_debug("%s usb_exit()\n", driver.name); + usb_deregister(&driver); + destroy_workqueue(zd_workqueue); +} + +module_init(usb_init); +module_exit(usb_exit); + +static int usb_int_regs_length(unsigned int count) +{ + return sizeof(struct usb_int_regs) + count * sizeof(struct reg_data); +} + +static void prepare_read_regs_int(struct zd_usb *usb) +{ + struct zd_usb_interrupt *intr = &usb->intr; + + spin_lock_irq(&intr->lock); + intr->read_regs_enabled = 1; + INIT_COMPLETION(intr->read_regs.completion); + spin_unlock_irq(&intr->lock); +} + +static void disable_read_regs_int(struct zd_usb *usb) +{ + struct zd_usb_interrupt *intr = &usb->intr; + + spin_lock_irq(&intr->lock); + intr->read_regs_enabled = 0; + spin_unlock_irq(&intr->lock); +} + +static int get_results(struct zd_usb *usb, u16 *values, + struct usb_req_read_regs *req, unsigned int count) +{ + int r; + int i; + struct zd_usb_interrupt *intr = &usb->intr; + struct read_regs_int *rr = &intr->read_regs; + struct usb_int_regs *regs = (struct usb_int_regs *)rr->buffer; + + spin_lock_irq(&intr->lock); + + r = -EIO; + /* The created block size seems to be larger than expected. + * However results appear to be correct. + */ + if (rr->length < usb_int_regs_length(count)) { + dev_dbg_f(zd_usb_dev(usb), + "error: actual length %d less than expected %d\n", + rr->length, usb_int_regs_length(count)); + goto error_unlock; + } + if (rr->length > sizeof(rr->buffer)) { + dev_dbg_f(zd_usb_dev(usb), + "error: actual length %d exceeds buffer size %zu\n", + rr->length, sizeof(rr->buffer)); + goto error_unlock; + } + + for (i = 0; i < count; i++) { + struct reg_data *rd = ®s->regs[i]; + if (rd->addr != req->addr[i]) { + dev_dbg_f(zd_usb_dev(usb), + "rd[%d] addr %#06hx expected %#06hx\n", i, + le16_to_cpu(rd->addr), + le16_to_cpu(req->addr[i])); + goto error_unlock; + } + values[i] = le16_to_cpu(rd->value); + } + + r = 0; +error_unlock: + spin_unlock_irq(&intr->lock); + return r; +} + +int zd_usb_ioread16v(struct zd_usb *usb, u16 *values, + const zd_addr_t *addresses, unsigned int count) +{ + int r; + int i, req_len, actual_req_len; + struct usb_device *udev; + struct usb_req_read_regs *req = NULL; + unsigned long timeout; + + if (count < 1) { + dev_dbg_f(zd_usb_dev(usb), "error: count is zero\n"); + return -EINVAL; + } + if (count > USB_MAX_IOREAD16_COUNT) { + dev_dbg_f(zd_usb_dev(usb), + "error: count %u exceeds possible max %u\n", + count, USB_MAX_IOREAD16_COUNT); + return -EINVAL; + } + if (in_atomic()) { + dev_dbg_f(zd_usb_dev(usb), + "error: io in atomic context not supported\n"); + return -EWOULDBLOCK; + } + if (!usb_int_enabled(usb)) { + dev_dbg_f(zd_usb_dev(usb), + "error: usb interrupt not enabled\n"); + return -EWOULDBLOCK; + } + + req_len = sizeof(struct usb_req_read_regs) + count * sizeof(__le16); + req = kmalloc(req_len, GFP_KERNEL); + if (!req) + return -ENOMEM; + req->id = cpu_to_le16(USB_REQ_READ_REGS); + for (i = 0; i < count; i++) + req->addr[i] = cpu_to_le16((u16)addresses[i]); + + udev = zd_usb_to_usbdev(usb); + prepare_read_regs_int(usb); + r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_REGS_OUT), + req, req_len, &actual_req_len, 1000 /* ms */); + if (r) { + dev_dbg_f(zd_usb_dev(usb), + "error in usb_bulk_msg(). Error number %d\n", r); + goto error; + } + if (req_len != actual_req_len) { + dev_dbg_f(zd_usb_dev(usb), "error in usb_bulk_msg()\n" + " req_len %d != actual_req_len %d\n", + req_len, actual_req_len); + r = -EIO; + goto error; + } + + timeout = wait_for_completion_timeout(&usb->intr.read_regs.completion, + msecs_to_jiffies(1000)); + if (!timeout) { + disable_read_regs_int(usb); + dev_dbg_f(zd_usb_dev(usb), "read timed out\n"); + r = -ETIMEDOUT; + goto error; + } + + r = get_results(usb, values, req, count); +error: + kfree(req); + return r; +} + +int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs, + unsigned int count) +{ + int r; + struct usb_device *udev; + struct usb_req_write_regs *req = NULL; + int i, req_len, actual_req_len; + + if (count == 0) + return 0; + if (count > USB_MAX_IOWRITE16_COUNT) { + dev_dbg_f(zd_usb_dev(usb), + "error: count %u exceeds possible max %u\n", + count, USB_MAX_IOWRITE16_COUNT); + return -EINVAL; + } + if (in_atomic()) { + dev_dbg_f(zd_usb_dev(usb), + "error: io in atomic context not supported\n"); + return -EWOULDBLOCK; + } + + req_len = sizeof(struct usb_req_write_regs) + + count * sizeof(struct reg_data); + req = kmalloc(req_len, GFP_KERNEL); + if (!req) + return -ENOMEM; + + req->id = cpu_to_le16(USB_REQ_WRITE_REGS); + for (i = 0; i < count; i++) { + struct reg_data *rw = &req->reg_writes[i]; + rw->addr = cpu_to_le16((u16)ioreqs[i].addr); + rw->value = cpu_to_le16(ioreqs[i].value); + } + + udev = zd_usb_to_usbdev(usb); + r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_REGS_OUT), + req, req_len, &actual_req_len, 1000 /* ms */); + if (r) { + dev_dbg_f(zd_usb_dev(usb), + "error in usb_bulk_msg(). Error number %d\n", r); + goto error; + } + if (req_len != actual_req_len) { + dev_dbg_f(zd_usb_dev(usb), + "error in usb_bulk_msg()" + " req_len %d != actual_req_len %d\n", + req_len, actual_req_len); + r = -EIO; + goto error; + } + + /* FALL-THROUGH with r == 0 */ +error: + kfree(req); + return r; +} + +int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits) +{ + int r; + struct usb_device *udev; + struct usb_req_rfwrite *req = NULL; + int i, req_len, actual_req_len; + u16 bit_value_template; + + if (in_atomic()) { + dev_dbg_f(zd_usb_dev(usb), + "error: io in atomic context not supported\n"); + return -EWOULDBLOCK; + } + if (bits < USB_MIN_RFWRITE_BIT_COUNT) { + dev_dbg_f(zd_usb_dev(usb), + "error: bits %d are smaller than" + " USB_MIN_RFWRITE_BIT_COUNT %d\n", + bits, USB_MIN_RFWRITE_BIT_COUNT); + return -EINVAL; + } + if (bits > USB_MAX_RFWRITE_BIT_COUNT) { + dev_dbg_f(zd_usb_dev(usb), + "error: bits %d exceed USB_MAX_RFWRITE_BIT_COUNT %d\n", + bits, USB_MAX_RFWRITE_BIT_COUNT); + return -EINVAL; + } +#ifdef DEBUG + if (value & (~0UL << bits)) { + dev_dbg_f(zd_usb_dev(usb), + "error: value %#09x has bits >= %d set\n", + value, bits); + return -EINVAL; + } +#endif /* DEBUG */ + + dev_dbg_f(zd_usb_dev(usb), "value %#09x bits %d\n", value, bits); + + r = zd_usb_ioread16(usb, &bit_value_template, CR203); + if (r) { + dev_dbg_f(zd_usb_dev(usb), + "error %d: Couldn't read CR203\n", r); + goto out; + } + bit_value_template &= ~(RF_IF_LE|RF_CLK|RF_DATA); + + req_len = sizeof(struct usb_req_rfwrite) + bits * sizeof(__le16); + req = kmalloc(req_len, GFP_KERNEL); + if (!req) + return -ENOMEM; + + req->id = cpu_to_le16(USB_REQ_WRITE_RF); + /* 1: 3683a, but not used in ZYDAS driver */ + req->value = cpu_to_le16(2); + req->bits = cpu_to_le16(bits); + + for (i = 0; i < bits; i++) { + u16 bv = bit_value_template; + if (value & (1 << (bits-1-i))) + bv |= RF_DATA; + req->bit_values[i] = cpu_to_le16(bv); + } + + udev = zd_usb_to_usbdev(usb); + r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_REGS_OUT), + req, req_len, &actual_req_len, 1000 /* ms */); + if (r) { + dev_dbg_f(zd_usb_dev(usb), + "error in usb_bulk_msg(). Error number %d\n", r); + goto out; + } + if (req_len != actual_req_len) { + dev_dbg_f(zd_usb_dev(usb), "error in usb_bulk_msg()" + " req_len %d != actual_req_len %d\n", + req_len, actual_req_len); + r = -EIO; + goto out; + } + + /* FALL-THROUGH with r == 0 */ +out: + kfree(req); + return r; +} diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h new file mode 100644 index 0000000..049f8b9 --- /dev/null +++ b/drivers/net/wireless/zd1211rw/zd_usb.h @@ -0,0 +1,266 @@ +/* ZD1211 USB-WLAN driver for Linux + * + * Copyright (C) 2005-2007 Ulrich Kunitz + * Copyright (C) 2006-2007 Daniel Drake + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _ZD_USB_H +#define _ZD_USB_H + +#include +#include +#include +#include +#include + +#include "zd_def.h" + +#define ZD_USB_TX_HIGH 5 +#define ZD_USB_TX_LOW 2 + +enum devicetype { + DEVICE_ZD1211 = 0, + DEVICE_ZD1211B = 1, + DEVICE_INSTALLER = 2, +}; + +enum endpoints { + EP_CTRL = 0, + EP_DATA_OUT = 1, + EP_DATA_IN = 2, + EP_INT_IN = 3, + EP_REGS_OUT = 4, +}; + +enum { + USB_MAX_TRANSFER_SIZE = 4096, /* bytes */ + /* FIXME: The original driver uses this value. We have to check, + * whether the MAX_TRANSFER_SIZE is sufficient and this needs only be + * used if one combined frame is split over two USB transactions. + */ + USB_MAX_RX_SIZE = 4800, /* bytes */ + USB_MAX_IOWRITE16_COUNT = 15, + USB_MAX_IOWRITE32_COUNT = USB_MAX_IOWRITE16_COUNT/2, + USB_MAX_IOREAD16_COUNT = 15, + USB_MAX_IOREAD32_COUNT = USB_MAX_IOREAD16_COUNT/2, + USB_MIN_RFWRITE_BIT_COUNT = 16, + USB_MAX_RFWRITE_BIT_COUNT = 28, + USB_MAX_EP_INT_BUFFER = 64, + USB_ZD1211B_BCD_DEVICE = 0x4810, +}; + +enum control_requests { + USB_REQ_WRITE_REGS = 0x21, + USB_REQ_READ_REGS = 0x22, + USB_REQ_WRITE_RF = 0x23, + USB_REQ_PROG_FLASH = 0x24, + USB_REQ_EEPROM_START = 0x0128, /* ? request is a byte */ + USB_REQ_EEPROM_MID = 0x28, + USB_REQ_EEPROM_END = 0x0228, /* ? request is a byte */ + USB_REQ_FIRMWARE_DOWNLOAD = 0x30, + USB_REQ_FIRMWARE_CONFIRM = 0x31, + USB_REQ_FIRMWARE_READ_DATA = 0x32, +}; + +struct usb_req_read_regs { + __le16 id; + __le16 addr[0]; +} __attribute__((packed)); + +struct reg_data { + __le16 addr; + __le16 value; +} __attribute__((packed)); + +struct usb_req_write_regs { + __le16 id; + struct reg_data reg_writes[0]; +} __attribute__((packed)); + +enum { + RF_IF_LE = 0x02, + RF_CLK = 0x04, + RF_DATA = 0x08, +}; + +struct usb_req_rfwrite { + __le16 id; + __le16 value; + /* 1: 3683a */ + /* 2: other (default) */ + __le16 bits; + /* RF2595: 24 */ + __le16 bit_values[0]; + /* (CR203 & ~(RF_IF_LE | RF_CLK | RF_DATA)) | (bit ? RF_DATA : 0) */ +} __attribute__((packed)); + +/* USB interrupt */ + +enum usb_int_id { + USB_INT_TYPE = 0x01, + USB_INT_ID_REGS = 0x90, + USB_INT_ID_RETRY_FAILED = 0xa0, +}; + +enum usb_int_flags { + USB_INT_READ_REGS_EN = 0x01, +}; + +struct usb_int_header { + u8 type; /* must always be 1 */ + u8 id; +} __attribute__((packed)); + +struct usb_int_regs { + struct usb_int_header hdr; + struct reg_data regs[0]; +} __attribute__((packed)); + +struct usb_int_retry_fail { + struct usb_int_header hdr; + u8 new_rate; + u8 _dummy; + u8 addr[ETH_ALEN]; + u8 ibss_wakeup_dest; +} __attribute__((packed)); + +struct read_regs_int { + struct completion completion; + /* Stores the USB int structure and contains the USB address of the + * first requested register before request. + */ + u8 buffer[USB_MAX_EP_INT_BUFFER]; + int length; + __le16 cr_int_addr; +}; + +struct zd_ioreq16 { + zd_addr_t addr; + u16 value; +}; + +struct zd_ioreq32 { + zd_addr_t addr; + u32 value; +}; + +struct zd_usb_interrupt { + struct read_regs_int read_regs; + spinlock_t lock; + struct urb *urb; + int interval; + u8 read_regs_enabled:1; +}; + +static inline struct usb_int_regs *get_read_regs(struct zd_usb_interrupt *intr) +{ + return (struct usb_int_regs *)intr->read_regs.buffer; +} + +#define RX_URBS_COUNT 5 + +struct zd_usb_rx { + spinlock_t lock; + u8 fragment[2*USB_MAX_RX_SIZE]; + unsigned int fragment_length; + unsigned int usb_packet_size; + struct urb **urbs; + int urbs_count; +}; + +/** + * struct zd_usb_tx - structure used for transmitting frames + * @lock: lock for transmission + * @free_urb_list: list of free URBs, contains all the URBs, which can be used + * @submitted_urbs: atomic integer that counts the URBs having sent to the + * device, which haven't been completed + * @enabled: enabled flag, indicates whether tx is enabled + * @stopped: indicates whether higher level tx queues are stopped + */ +struct zd_usb_tx { + spinlock_t lock; + struct list_head free_urb_list; + int submitted_urbs; + int enabled; + int stopped; +}; + +/* Contains the usb parts. The structure doesn't require a lock because intf + * will not be changed after initialization. + */ +struct zd_usb { + struct zd_usb_interrupt intr; + struct zd_usb_rx rx; + struct zd_usb_tx tx; + struct usb_interface *intf; + u8 is_zd1211b:1, initialized:1; +}; + +#define zd_usb_dev(usb) (&usb->intf->dev) + +static inline struct usb_device *zd_usb_to_usbdev(struct zd_usb *usb) +{ + return interface_to_usbdev(usb->intf); +} + +static inline struct ieee80211_hw *zd_intf_to_hw(struct usb_interface *intf) +{ + return usb_get_intfdata(intf); +} + +static inline struct ieee80211_hw *zd_usb_to_hw(struct zd_usb *usb) +{ + return zd_intf_to_hw(usb->intf); +} + +void zd_usb_init(struct zd_usb *usb, struct ieee80211_hw *hw, + struct usb_interface *intf); +int zd_usb_init_hw(struct zd_usb *usb); +void zd_usb_clear(struct zd_usb *usb); + +int zd_usb_scnprint_id(struct zd_usb *usb, char *buffer, size_t size); + +int zd_usb_enable_int(struct zd_usb *usb); +void zd_usb_disable_int(struct zd_usb *usb); + +int zd_usb_enable_rx(struct zd_usb *usb); +void zd_usb_disable_rx(struct zd_usb *usb); + +void zd_usb_enable_tx(struct zd_usb *usb); +void zd_usb_disable_tx(struct zd_usb *usb); + +int zd_usb_tx(struct zd_usb *usb, struct sk_buff *skb); + +int zd_usb_ioread16v(struct zd_usb *usb, u16 *values, + const zd_addr_t *addresses, unsigned int count); + +static inline int zd_usb_ioread16(struct zd_usb *usb, u16 *value, + const zd_addr_t addr) +{ + return zd_usb_ioread16v(usb, value, (const zd_addr_t *)&addr, 1); +} + +int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs, + unsigned int count); + +int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits); + +int zd_usb_read_fw(struct zd_usb *usb, zd_addr_t addr, u8 *data, u16 len); + +extern struct workqueue_struct *zd_workqueue; + +#endif /* _ZD_USB_H */ diff --git a/drivers/ssb/Makefile b/drivers/ssb/Makefile new file mode 100644 index 0000000..656e58b --- /dev/null +++ b/drivers/ssb/Makefile @@ -0,0 +1,23 @@ +# core +ssb-y += main.o scan.o +ssb-$(CONFIG_SSB_EMBEDDED) += embedded.o +ssb-$(CONFIG_SSB_SPROM) += sprom.o + +# host support +ssb-$(CONFIG_SSB_PCIHOST) += pci.o pcihost_wrapper.o +ssb-$(CONFIG_SSB_PCMCIAHOST) += pcmcia.o +ssb-$(CONFIG_SSB_SDIOHOST) += sdio.o + +# built-in drivers +ssb-y += driver_chipcommon.o +ssb-y += driver_chipcommon_pmu.o +ssb-$(CONFIG_SSB_DRIVER_MIPS) += driver_mipscore.o +ssb-$(CONFIG_SSB_DRIVER_EXTIF) += driver_extif.o +ssb-$(CONFIG_SSB_DRIVER_PCICORE) += driver_pcicore.o +ssb-$(CONFIG_SSB_DRIVER_GIGE) += driver_gige.o + +# b43 pci-ssb-bridge driver +# Not strictly a part of SSB, but kept here for convenience +ssb-$(CONFIG_SSB_B43_PCI_BRIDGE) += b43_pci_bridge.o + +obj-$(CONFIG_SSB) += ssb.o diff --git a/drivers/ssb/b43_pci_bridge.c b/drivers/ssb/b43_pci_bridge.c new file mode 100644 index 0000000..ef9c6a0 --- /dev/null +++ b/drivers/ssb/b43_pci_bridge.c @@ -0,0 +1,53 @@ +/* + * Broadcom 43xx PCI-SSB bridge module + * + * This technically is a separate PCI driver module, but + * because of its small size we include it in the SSB core + * instead of creating a standalone module. + * + * Copyright 2007 Michael Buesch + * + * Licensed under the GNU/GPL. See COPYING for details. + */ + +#include +#include + +#include "ssb_private.h" + + +static const struct pci_device_id b43_pci_bridge_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4301) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4306) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4307) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4311) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4312) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4315) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4318) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4319) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4320) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4321) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4324) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4325) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4328) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4329) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432b) }, + { 0, }, +}; +MODULE_DEVICE_TABLE(pci, b43_pci_bridge_tbl); + +static struct pci_driver b43_pci_bridge_driver = { + .name = "b43-pci-bridge", + .id_table = b43_pci_bridge_tbl, +}; + + +int __init b43_pci_ssb_bridge_init(void) +{ + return ssb_pcihost_register(&b43_pci_bridge_driver); +} + +void __exit b43_pci_ssb_bridge_exit(void) +{ + ssb_pcihost_unregister(&b43_pci_bridge_driver); +} diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c new file mode 100644 index 0000000..9681536 --- /dev/null +++ b/drivers/ssb/driver_chipcommon.c @@ -0,0 +1,486 @@ +/* + * Sonics Silicon Backplane + * Broadcom ChipCommon core driver + * + * Copyright 2005, Broadcom Corporation + * Copyright 2006, 2007, Michael Buesch + * + * Licensed under the GNU/GPL. See COPYING for details. + */ + +#include +#include +#include + +#include "ssb_private.h" + + +/* Clock sources */ +enum ssb_clksrc { + /* PCI clock */ + SSB_CHIPCO_CLKSRC_PCI, + /* Crystal slow clock oscillator */ + SSB_CHIPCO_CLKSRC_XTALOS, + /* Low power oscillator */ + SSB_CHIPCO_CLKSRC_LOPWROS, +}; + + +static inline u32 chipco_write32_masked(struct ssb_chipcommon *cc, u16 offset, + u32 mask, u32 value) +{ + value &= mask; + value |= chipco_read32(cc, offset) & ~mask; + chipco_write32(cc, offset, value); + + return value; +} + +void ssb_chipco_set_clockmode(struct ssb_chipcommon *cc, + enum ssb_clkmode mode) +{ + struct ssb_device *ccdev = cc->dev; + struct ssb_bus *bus; + u32 tmp; + + if (!ccdev) + return; + bus = ccdev->bus; + /* chipcommon cores prior to rev6 don't support dynamic clock control */ + if (ccdev->id.revision < 6) + return; + /* chipcommon cores rev10 are a whole new ball game */ + if (ccdev->id.revision >= 10) + return; + if (!(cc->capabilities & SSB_CHIPCO_CAP_PCTL)) + return; + + switch (mode) { + case SSB_CLKMODE_SLOW: + tmp = chipco_read32(cc, SSB_CHIPCO_SLOWCLKCTL); + tmp |= SSB_CHIPCO_SLOWCLKCTL_FSLOW; + chipco_write32(cc, SSB_CHIPCO_SLOWCLKCTL, tmp); + break; + case SSB_CLKMODE_FAST: + ssb_pci_xtal(bus, SSB_GPIO_XTAL, 1); /* Force crystal on */ + tmp = chipco_read32(cc, SSB_CHIPCO_SLOWCLKCTL); + tmp &= ~SSB_CHIPCO_SLOWCLKCTL_FSLOW; + tmp |= SSB_CHIPCO_SLOWCLKCTL_IPLL; + chipco_write32(cc, SSB_CHIPCO_SLOWCLKCTL, tmp); + break; + case SSB_CLKMODE_DYNAMIC: + tmp = chipco_read32(cc, SSB_CHIPCO_SLOWCLKCTL); + tmp &= ~SSB_CHIPCO_SLOWCLKCTL_FSLOW; + tmp &= ~SSB_CHIPCO_SLOWCLKCTL_IPLL; + tmp &= ~SSB_CHIPCO_SLOWCLKCTL_ENXTAL; + if ((tmp & SSB_CHIPCO_SLOWCLKCTL_SRC) != SSB_CHIPCO_SLOWCLKCTL_SRC_XTAL) + tmp |= SSB_CHIPCO_SLOWCLKCTL_ENXTAL; + chipco_write32(cc, SSB_CHIPCO_SLOWCLKCTL, tmp); + + /* for dynamic control, we have to release our xtal_pu "force on" */ + if (tmp & SSB_CHIPCO_SLOWCLKCTL_ENXTAL) + ssb_pci_xtal(bus, SSB_GPIO_XTAL, 0); + break; + default: + SSB_WARN_ON(1); + } +} + +/* Get the Slow Clock Source */ +static enum ssb_clksrc chipco_pctl_get_slowclksrc(struct ssb_chipcommon *cc) +{ + struct ssb_bus *bus = cc->dev->bus; + u32 uninitialized_var(tmp); + + if (cc->dev->id.revision < 6) { + if (bus->bustype == SSB_BUSTYPE_SSB || + bus->bustype == SSB_BUSTYPE_PCMCIA) + return SSB_CHIPCO_CLKSRC_XTALOS; + if (bus->bustype == SSB_BUSTYPE_PCI) { + pci_read_config_dword(bus->host_pci, SSB_GPIO_OUT, &tmp); + if (tmp & 0x10) + return SSB_CHIPCO_CLKSRC_PCI; + return SSB_CHIPCO_CLKSRC_XTALOS; + } + } + if (cc->dev->id.revision < 10) { + tmp = chipco_read32(cc, SSB_CHIPCO_SLOWCLKCTL); + tmp &= 0x7; + if (tmp == 0) + return SSB_CHIPCO_CLKSRC_LOPWROS; + if (tmp == 1) + return SSB_CHIPCO_CLKSRC_XTALOS; + if (tmp == 2) + return SSB_CHIPCO_CLKSRC_PCI; + } + + return SSB_CHIPCO_CLKSRC_XTALOS; +} + +/* Get maximum or minimum (depending on get_max flag) slowclock frequency. */ +static int chipco_pctl_clockfreqlimit(struct ssb_chipcommon *cc, int get_max) +{ + int uninitialized_var(limit); + enum ssb_clksrc clocksrc; + int divisor = 1; + u32 tmp; + + clocksrc = chipco_pctl_get_slowclksrc(cc); + if (cc->dev->id.revision < 6) { + switch (clocksrc) { + case SSB_CHIPCO_CLKSRC_PCI: + divisor = 64; + break; + case SSB_CHIPCO_CLKSRC_XTALOS: + divisor = 32; + break; + default: + SSB_WARN_ON(1); + } + } else if (cc->dev->id.revision < 10) { + switch (clocksrc) { + case SSB_CHIPCO_CLKSRC_LOPWROS: + break; + case SSB_CHIPCO_CLKSRC_XTALOS: + case SSB_CHIPCO_CLKSRC_PCI: + tmp = chipco_read32(cc, SSB_CHIPCO_SLOWCLKCTL); + divisor = (tmp >> 16) + 1; + divisor *= 4; + break; + } + } else { + tmp = chipco_read32(cc, SSB_CHIPCO_SYSCLKCTL); + divisor = (tmp >> 16) + 1; + divisor *= 4; + } + + switch (clocksrc) { + case SSB_CHIPCO_CLKSRC_LOPWROS: + if (get_max) + limit = 43000; + else + limit = 25000; + break; + case SSB_CHIPCO_CLKSRC_XTALOS: + if (get_max) + limit = 20200000; + else + limit = 19800000; + break; + case SSB_CHIPCO_CLKSRC_PCI: + if (get_max) + limit = 34000000; + else + limit = 25000000; + break; + } + limit /= divisor; + + return limit; +} + +static void chipco_powercontrol_init(struct ssb_chipcommon *cc) +{ + struct ssb_bus *bus = cc->dev->bus; + + if (bus->chip_id == 0x4321) { + if (bus->chip_rev == 0) + chipco_write32(cc, SSB_CHIPCO_CHIPCTL, 0x3A4); + else if (bus->chip_rev == 1) + chipco_write32(cc, SSB_CHIPCO_CHIPCTL, 0xA4); + } + + if (!(cc->capabilities & SSB_CHIPCO_CAP_PCTL)) + return; + + if (cc->dev->id.revision >= 10) { + /* Set Idle Power clock rate to 1Mhz */ + chipco_write32(cc, SSB_CHIPCO_SYSCLKCTL, + (chipco_read32(cc, SSB_CHIPCO_SYSCLKCTL) & + 0x0000FFFF) | 0x00040000); + } else { + int maxfreq; + + maxfreq = chipco_pctl_clockfreqlimit(cc, 1); + chipco_write32(cc, SSB_CHIPCO_PLLONDELAY, + (maxfreq * 150 + 999999) / 1000000); + chipco_write32(cc, SSB_CHIPCO_FREFSELDELAY, + (maxfreq * 15 + 999999) / 1000000); + } +} + +static void calc_fast_powerup_delay(struct ssb_chipcommon *cc) +{ + struct ssb_bus *bus = cc->dev->bus; + int minfreq; + unsigned int tmp; + u32 pll_on_delay; + + if (bus->bustype != SSB_BUSTYPE_PCI) + return; + if (!(cc->capabilities & SSB_CHIPCO_CAP_PCTL)) + return; + + minfreq = chipco_pctl_clockfreqlimit(cc, 0); + pll_on_delay = chipco_read32(cc, SSB_CHIPCO_PLLONDELAY); + tmp = (((pll_on_delay + 2) * 1000000) + (minfreq - 1)) / minfreq; + SSB_WARN_ON(tmp & ~0xFFFF); + + cc->fast_pwrup_delay = tmp; +} + +void ssb_chipcommon_init(struct ssb_chipcommon *cc) +{ + if (!cc->dev) + return; /* We don't have a ChipCommon */ + ssb_pmu_init(cc); + chipco_powercontrol_init(cc); + ssb_chipco_set_clockmode(cc, SSB_CLKMODE_FAST); + calc_fast_powerup_delay(cc); +} + +void ssb_chipco_suspend(struct ssb_chipcommon *cc) +{ + if (!cc->dev) + return; + ssb_chipco_set_clockmode(cc, SSB_CLKMODE_SLOW); +} + +void ssb_chipco_resume(struct ssb_chipcommon *cc) +{ + if (!cc->dev) + return; + chipco_powercontrol_init(cc); + ssb_chipco_set_clockmode(cc, SSB_CLKMODE_FAST); +} + +/* Get the processor clock */ +void ssb_chipco_get_clockcpu(struct ssb_chipcommon *cc, + u32 *plltype, u32 *n, u32 *m) +{ + *n = chipco_read32(cc, SSB_CHIPCO_CLOCK_N); + *plltype = (cc->capabilities & SSB_CHIPCO_CAP_PLLT); + switch (*plltype) { + case SSB_PLLTYPE_2: + case SSB_PLLTYPE_4: + case SSB_PLLTYPE_6: + case SSB_PLLTYPE_7: + *m = chipco_read32(cc, SSB_CHIPCO_CLOCK_MIPS); + break; + case SSB_PLLTYPE_3: + /* 5350 uses m2 to control mips */ + *m = chipco_read32(cc, SSB_CHIPCO_CLOCK_M2); + break; + default: + *m = chipco_read32(cc, SSB_CHIPCO_CLOCK_SB); + break; + } +} + +/* Get the bus clock */ +void ssb_chipco_get_clockcontrol(struct ssb_chipcommon *cc, + u32 *plltype, u32 *n, u32 *m) +{ + *n = chipco_read32(cc, SSB_CHIPCO_CLOCK_N); + *plltype = (cc->capabilities & SSB_CHIPCO_CAP_PLLT); + switch (*plltype) { + case SSB_PLLTYPE_6: /* 100/200 or 120/240 only */ + *m = chipco_read32(cc, SSB_CHIPCO_CLOCK_MIPS); + break; + case SSB_PLLTYPE_3: /* 25Mhz, 2 dividers */ + if (cc->dev->bus->chip_id != 0x5365) { + *m = chipco_read32(cc, SSB_CHIPCO_CLOCK_M2); + break; + } + /* Fallthough */ + default: + *m = chipco_read32(cc, SSB_CHIPCO_CLOCK_SB); + } +} + +void ssb_chipco_timing_init(struct ssb_chipcommon *cc, + unsigned long ns) +{ + struct ssb_device *dev = cc->dev; + struct ssb_bus *bus = dev->bus; + u32 tmp; + + /* set register for external IO to control LED. */ + chipco_write32(cc, SSB_CHIPCO_PROG_CFG, 0x11); + tmp = DIV_ROUND_UP(10, ns) << SSB_PROG_WCNT_3_SHIFT; /* Waitcount-3 = 10ns */ + tmp |= DIV_ROUND_UP(40, ns) << SSB_PROG_WCNT_1_SHIFT; /* Waitcount-1 = 40ns */ + tmp |= DIV_ROUND_UP(240, ns); /* Waitcount-0 = 240ns */ + chipco_write32(cc, SSB_CHIPCO_PROG_WAITCNT, tmp); /* 0x01020a0c for a 100Mhz clock */ + + /* Set timing for the flash */ + tmp = DIV_ROUND_UP(10, ns) << SSB_FLASH_WCNT_3_SHIFT; /* Waitcount-3 = 10nS */ + tmp |= DIV_ROUND_UP(10, ns) << SSB_FLASH_WCNT_1_SHIFT; /* Waitcount-1 = 10nS */ + tmp |= DIV_ROUND_UP(120, ns); /* Waitcount-0 = 120nS */ + if ((bus->chip_id == 0x5365) || + (dev->id.revision < 9)) + chipco_write32(cc, SSB_CHIPCO_FLASH_WAITCNT, tmp); + if ((bus->chip_id == 0x5365) || + (dev->id.revision < 9) || + ((bus->chip_id == 0x5350) && (bus->chip_rev == 0))) + chipco_write32(cc, SSB_CHIPCO_PCMCIA_MEMWAIT, tmp); + + if (bus->chip_id == 0x5350) { + /* Enable EXTIF */ + tmp = DIV_ROUND_UP(10, ns) << SSB_PROG_WCNT_3_SHIFT; /* Waitcount-3 = 10ns */ + tmp |= DIV_ROUND_UP(20, ns) << SSB_PROG_WCNT_2_SHIFT; /* Waitcount-2 = 20ns */ + tmp |= DIV_ROUND_UP(100, ns) << SSB_PROG_WCNT_1_SHIFT; /* Waitcount-1 = 100ns */ + tmp |= DIV_ROUND_UP(120, ns); /* Waitcount-0 = 120ns */ + chipco_write32(cc, SSB_CHIPCO_PROG_WAITCNT, tmp); /* 0x01020a0c for a 100Mhz clock */ + } +} + +/* Set chip watchdog reset timer to fire in 'ticks' backplane cycles */ +void ssb_chipco_watchdog_timer_set(struct ssb_chipcommon *cc, u32 ticks) +{ + /* instant NMI */ + chipco_write32(cc, SSB_CHIPCO_WATCHDOG, ticks); +} + +void ssb_chipco_irq_mask(struct ssb_chipcommon *cc, u32 mask, u32 value) +{ + chipco_write32_masked(cc, SSB_CHIPCO_IRQMASK, mask, value); +} + +u32 ssb_chipco_irq_status(struct ssb_chipcommon *cc, u32 mask) +{ + return chipco_read32(cc, SSB_CHIPCO_IRQSTAT) & mask; +} + +u32 ssb_chipco_gpio_in(struct ssb_chipcommon *cc, u32 mask) +{ + return chipco_read32(cc, SSB_CHIPCO_GPIOIN) & mask; +} + +u32 ssb_chipco_gpio_out(struct ssb_chipcommon *cc, u32 mask, u32 value) +{ + return chipco_write32_masked(cc, SSB_CHIPCO_GPIOOUT, mask, value); +} + +u32 ssb_chipco_gpio_outen(struct ssb_chipcommon *cc, u32 mask, u32 value) +{ + return chipco_write32_masked(cc, SSB_CHIPCO_GPIOOUTEN, mask, value); +} + +u32 ssb_chipco_gpio_control(struct ssb_chipcommon *cc, u32 mask, u32 value) +{ + return chipco_write32_masked(cc, SSB_CHIPCO_GPIOCTL, mask, value); +} + +u32 ssb_chipco_gpio_intmask(struct ssb_chipcommon *cc, u32 mask, u32 value) +{ + return chipco_write32_masked(cc, SSB_CHIPCO_GPIOIRQ, mask, value); +} + +u32 ssb_chipco_gpio_polarity(struct ssb_chipcommon *cc, u32 mask, u32 value) +{ + return chipco_write32_masked(cc, SSB_CHIPCO_GPIOPOL, mask, value); +} + +#ifdef CONFIG_SSB_SERIAL +int ssb_chipco_serial_init(struct ssb_chipcommon *cc, + struct ssb_serial_port *ports) +{ + struct ssb_bus *bus = cc->dev->bus; + int nr_ports = 0; + u32 plltype; + unsigned int irq; + u32 baud_base, div; + u32 i, n; + unsigned int ccrev = cc->dev->id.revision; + + plltype = (cc->capabilities & SSB_CHIPCO_CAP_PLLT); + irq = ssb_mips_irq(cc->dev); + + if (plltype == SSB_PLLTYPE_1) { + /* PLL clock */ + baud_base = ssb_calc_clock_rate(plltype, + chipco_read32(cc, SSB_CHIPCO_CLOCK_N), + chipco_read32(cc, SSB_CHIPCO_CLOCK_M2)); + div = 1; + } else { + if (ccrev == 20) { + /* BCM5354 uses constant 25MHz clock */ + baud_base = 25000000; + div = 48; + /* Set the override bit so we don't divide it */ + chipco_write32(cc, SSB_CHIPCO_CORECTL, + chipco_read32(cc, SSB_CHIPCO_CORECTL) + | SSB_CHIPCO_CORECTL_UARTCLK0); + } else if ((ccrev >= 11) && (ccrev != 15)) { + /* Fixed ALP clock */ + baud_base = 20000000; + if (cc->capabilities & SSB_CHIPCO_CAP_PMU) { + /* FIXME: baud_base is different for devices with a PMU */ + SSB_WARN_ON(1); + } + div = 1; + if (ccrev >= 21) { + /* Turn off UART clock before switching clocksource. */ + chipco_write32(cc, SSB_CHIPCO_CORECTL, + chipco_read32(cc, SSB_CHIPCO_CORECTL) + & ~SSB_CHIPCO_CORECTL_UARTCLKEN); + } + /* Set the override bit so we don't divide it */ + chipco_write32(cc, SSB_CHIPCO_CORECTL, + chipco_read32(cc, SSB_CHIPCO_CORECTL) + | SSB_CHIPCO_CORECTL_UARTCLK0); + if (ccrev >= 21) { + /* Re-enable the UART clock. */ + chipco_write32(cc, SSB_CHIPCO_CORECTL, + chipco_read32(cc, SSB_CHIPCO_CORECTL) + | SSB_CHIPCO_CORECTL_UARTCLKEN); + } + } else if (ccrev >= 3) { + /* Internal backplane clock */ + baud_base = ssb_clockspeed(bus); + div = chipco_read32(cc, SSB_CHIPCO_CLKDIV) + & SSB_CHIPCO_CLKDIV_UART; + } else { + /* Fixed internal backplane clock */ + baud_base = 88000000; + div = 48; + } + + /* Clock source depends on strapping if UartClkOverride is unset */ + if ((ccrev > 0) && + !(chipco_read32(cc, SSB_CHIPCO_CORECTL) & SSB_CHIPCO_CORECTL_UARTCLK0)) { + if ((cc->capabilities & SSB_CHIPCO_CAP_UARTCLK) == + SSB_CHIPCO_CAP_UARTCLK_INT) { + /* Internal divided backplane clock */ + baud_base /= div; + } else { + /* Assume external clock of 1.8432 MHz */ + baud_base = 1843200; + } + } + } + + /* Determine the registers of the UARTs */ + n = (cc->capabilities & SSB_CHIPCO_CAP_NRUART); + for (i = 0; i < n; i++) { + void __iomem *cc_mmio; + void __iomem *uart_regs; + + cc_mmio = cc->dev->bus->mmio + (cc->dev->core_index * SSB_CORE_SIZE); + uart_regs = cc_mmio + SSB_CHIPCO_UART0_DATA; + /* Offset changed at after rev 0 */ + if (ccrev == 0) + uart_regs += (i * 8); + else + uart_regs += (i * 256); + + nr_ports++; + ports[i].regs = uart_regs; + ports[i].irq = irq; + ports[i].baud_base = baud_base; + ports[i].reg_shift = 0; + } + + return nr_ports; +} +#endif /* CONFIG_SSB_SERIAL */ diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c new file mode 100644 index 0000000..3d55124 --- /dev/null +++ b/drivers/ssb/driver_chipcommon_pmu.c @@ -0,0 +1,609 @@ +/* + * Sonics Silicon Backplane + * Broadcom ChipCommon Power Management Unit driver + * + * Copyright 2009, Michael Buesch + * Copyright 2007, Broadcom Corporation + * + * Licensed under the GNU/GPL. See COPYING for details. + */ + +#include +#include +#include +#include + +#include "ssb_private.h" + +static u32 ssb_chipco_pll_read(struct ssb_chipcommon *cc, u32 offset) +{ + chipco_write32(cc, SSB_CHIPCO_PLLCTL_ADDR, offset); + return chipco_read32(cc, SSB_CHIPCO_PLLCTL_DATA); +} + +static void ssb_chipco_pll_write(struct ssb_chipcommon *cc, + u32 offset, u32 value) +{ + chipco_write32(cc, SSB_CHIPCO_PLLCTL_ADDR, offset); + chipco_write32(cc, SSB_CHIPCO_PLLCTL_DATA, value); +} + +static void ssb_chipco_regctl_maskset(struct ssb_chipcommon *cc, + u32 offset, u32 mask, u32 set) +{ + u32 value; + + chipco_read32(cc, SSB_CHIPCO_REGCTL_ADDR); + chipco_write32(cc, SSB_CHIPCO_REGCTL_ADDR, offset); + chipco_read32(cc, SSB_CHIPCO_REGCTL_ADDR); + value = chipco_read32(cc, SSB_CHIPCO_REGCTL_DATA); + value &= mask; + value |= set; + chipco_write32(cc, SSB_CHIPCO_REGCTL_DATA, value); + chipco_read32(cc, SSB_CHIPCO_REGCTL_DATA); +} + +struct pmu0_plltab_entry { + u16 freq; /* Crystal frequency in kHz.*/ + u8 xf; /* Crystal frequency value for PMU control */ + u8 wb_int; + u32 wb_frac; +}; + +static const struct pmu0_plltab_entry pmu0_plltab[] = { + { .freq = 12000, .xf = 1, .wb_int = 73, .wb_frac = 349525, }, + { .freq = 13000, .xf = 2, .wb_int = 67, .wb_frac = 725937, }, + { .freq = 14400, .xf = 3, .wb_int = 61, .wb_frac = 116508, }, + { .freq = 15360, .xf = 4, .wb_int = 57, .wb_frac = 305834, }, + { .freq = 16200, .xf = 5, .wb_int = 54, .wb_frac = 336579, }, + { .freq = 16800, .xf = 6, .wb_int = 52, .wb_frac = 399457, }, + { .freq = 19200, .xf = 7, .wb_int = 45, .wb_frac = 873813, }, + { .freq = 19800, .xf = 8, .wb_int = 44, .wb_frac = 466033, }, + { .freq = 20000, .xf = 9, .wb_int = 44, .wb_frac = 0, }, + { .freq = 25000, .xf = 10, .wb_int = 70, .wb_frac = 419430, }, + { .freq = 26000, .xf = 11, .wb_int = 67, .wb_frac = 725937, }, + { .freq = 30000, .xf = 12, .wb_int = 58, .wb_frac = 699050, }, + { .freq = 38400, .xf = 13, .wb_int = 45, .wb_frac = 873813, }, + { .freq = 40000, .xf = 14, .wb_int = 45, .wb_frac = 0, }, +}; +#define SSB_PMU0_DEFAULT_XTALFREQ 20000 + +static const struct pmu0_plltab_entry * pmu0_plltab_find_entry(u32 crystalfreq) +{ + const struct pmu0_plltab_entry *e; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(pmu0_plltab); i++) { + e = &pmu0_plltab[i]; + if (e->freq == crystalfreq) + return e; + } + + return NULL; +} + +/* Tune the PLL to the crystal speed. crystalfreq is in kHz. */ +static void ssb_pmu0_pllinit_r0(struct ssb_chipcommon *cc, + u32 crystalfreq) +{ + struct ssb_bus *bus = cc->dev->bus; + const struct pmu0_plltab_entry *e = NULL; + u32 pmuctl, tmp, pllctl; + unsigned int i; + + if ((bus->chip_id == 0x5354) && !crystalfreq) { + /* The 5354 crystal freq is 25MHz */ + crystalfreq = 25000; + } + if (crystalfreq) + e = pmu0_plltab_find_entry(crystalfreq); + if (!e) + e = pmu0_plltab_find_entry(SSB_PMU0_DEFAULT_XTALFREQ); + BUG_ON(!e); + crystalfreq = e->freq; + cc->pmu.crystalfreq = e->freq; + + /* Check if the PLL already is programmed to this frequency. */ + pmuctl = chipco_read32(cc, SSB_CHIPCO_PMU_CTL); + if (((pmuctl & SSB_CHIPCO_PMU_CTL_XTALFREQ) >> SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT) == e->xf) { + /* We're already there... */ + return; + } + + ssb_printk(KERN_INFO PFX "Programming PLL to %u.%03u MHz\n", + (crystalfreq / 1000), (crystalfreq % 1000)); + + /* First turn the PLL off. */ + switch (bus->chip_id) { + case 0x4328: + chipco_mask32(cc, SSB_CHIPCO_PMU_MINRES_MSK, + ~(1 << SSB_PMURES_4328_BB_PLL_PU)); + chipco_mask32(cc, SSB_CHIPCO_PMU_MAXRES_MSK, + ~(1 << SSB_PMURES_4328_BB_PLL_PU)); + break; + case 0x5354: + chipco_mask32(cc, SSB_CHIPCO_PMU_MINRES_MSK, + ~(1 << SSB_PMURES_5354_BB_PLL_PU)); + chipco_mask32(cc, SSB_CHIPCO_PMU_MAXRES_MSK, + ~(1 << SSB_PMURES_5354_BB_PLL_PU)); + break; + default: + SSB_WARN_ON(1); + } + for (i = 1500; i; i--) { + tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST); + if (!(tmp & SSB_CHIPCO_CLKCTLST_HAVEHT)) + break; + udelay(10); + } + tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST); + if (tmp & SSB_CHIPCO_CLKCTLST_HAVEHT) + ssb_printk(KERN_EMERG PFX "Failed to turn the PLL off!\n"); + + /* Set PDIV in PLL control 0. */ + pllctl = ssb_chipco_pll_read(cc, SSB_PMU0_PLLCTL0); + if (crystalfreq >= SSB_PMU0_PLLCTL0_PDIV_FREQ) + pllctl |= SSB_PMU0_PLLCTL0_PDIV_MSK; + else + pllctl &= ~SSB_PMU0_PLLCTL0_PDIV_MSK; + ssb_chipco_pll_write(cc, SSB_PMU0_PLLCTL0, pllctl); + + /* Set WILD in PLL control 1. */ + pllctl = ssb_chipco_pll_read(cc, SSB_PMU0_PLLCTL1); + pllctl &= ~SSB_PMU0_PLLCTL1_STOPMOD; + pllctl &= ~(SSB_PMU0_PLLCTL1_WILD_IMSK | SSB_PMU0_PLLCTL1_WILD_FMSK); + pllctl |= ((u32)e->wb_int << SSB_PMU0_PLLCTL1_WILD_IMSK_SHIFT) & SSB_PMU0_PLLCTL1_WILD_IMSK; + pllctl |= ((u32)e->wb_frac << SSB_PMU0_PLLCTL1_WILD_FMSK_SHIFT) & SSB_PMU0_PLLCTL1_WILD_FMSK; + if (e->wb_frac == 0) + pllctl |= SSB_PMU0_PLLCTL1_STOPMOD; + ssb_chipco_pll_write(cc, SSB_PMU0_PLLCTL1, pllctl); + + /* Set WILD in PLL control 2. */ + pllctl = ssb_chipco_pll_read(cc, SSB_PMU0_PLLCTL2); + pllctl &= ~SSB_PMU0_PLLCTL2_WILD_IMSKHI; + pllctl |= (((u32)e->wb_int >> 4) << SSB_PMU0_PLLCTL2_WILD_IMSKHI_SHIFT) & SSB_PMU0_PLLCTL2_WILD_IMSKHI; + ssb_chipco_pll_write(cc, SSB_PMU0_PLLCTL2, pllctl); + + /* Set the crystalfrequency and the divisor. */ + pmuctl = chipco_read32(cc, SSB_CHIPCO_PMU_CTL); + pmuctl &= ~SSB_CHIPCO_PMU_CTL_ILP_DIV; + pmuctl |= (((crystalfreq + 127) / 128 - 1) << SSB_CHIPCO_PMU_CTL_ILP_DIV_SHIFT) + & SSB_CHIPCO_PMU_CTL_ILP_DIV; + pmuctl &= ~SSB_CHIPCO_PMU_CTL_XTALFREQ; + pmuctl |= ((u32)e->xf << SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT) & SSB_CHIPCO_PMU_CTL_XTALFREQ; + chipco_write32(cc, SSB_CHIPCO_PMU_CTL, pmuctl); +} + +struct pmu1_plltab_entry { + u16 freq; /* Crystal frequency in kHz.*/ + u8 xf; /* Crystal frequency value for PMU control */ + u8 ndiv_int; + u32 ndiv_frac; + u8 p1div; + u8 p2div; +}; + +static const struct pmu1_plltab_entry pmu1_plltab[] = { + { .freq = 12000, .xf = 1, .p1div = 3, .p2div = 22, .ndiv_int = 0x9, .ndiv_frac = 0xFFFFEF, }, + { .freq = 13000, .xf = 2, .p1div = 1, .p2div = 6, .ndiv_int = 0xb, .ndiv_frac = 0x483483, }, + { .freq = 14400, .xf = 3, .p1div = 1, .p2div = 10, .ndiv_int = 0xa, .ndiv_frac = 0x1C71C7, }, + { .freq = 15360, .xf = 4, .p1div = 1, .p2div = 5, .ndiv_int = 0xb, .ndiv_frac = 0x755555, }, + { .freq = 16200, .xf = 5, .p1div = 1, .p2div = 10, .ndiv_int = 0x5, .ndiv_frac = 0x6E9E06, }, + { .freq = 16800, .xf = 6, .p1div = 1, .p2div = 10, .ndiv_int = 0x5, .ndiv_frac = 0x3CF3CF, }, + { .freq = 19200, .xf = 7, .p1div = 1, .p2div = 9, .ndiv_int = 0x5, .ndiv_frac = 0x17B425, }, + { .freq = 19800, .xf = 8, .p1div = 1, .p2div = 11, .ndiv_int = 0x4, .ndiv_frac = 0xA57EB, }, + { .freq = 20000, .xf = 9, .p1div = 1, .p2div = 11, .ndiv_int = 0x4, .ndiv_frac = 0, }, + { .freq = 24000, .xf = 10, .p1div = 3, .p2div = 11, .ndiv_int = 0xa, .ndiv_frac = 0, }, + { .freq = 25000, .xf = 11, .p1div = 5, .p2div = 16, .ndiv_int = 0xb, .ndiv_frac = 0, }, + { .freq = 26000, .xf = 12, .p1div = 1, .p2div = 2, .ndiv_int = 0x10, .ndiv_frac = 0xEC4EC4, }, + { .freq = 30000, .xf = 13, .p1div = 3, .p2div = 8, .ndiv_int = 0xb, .ndiv_frac = 0, }, + { .freq = 38400, .xf = 14, .p1div = 1, .p2div = 5, .ndiv_int = 0x4, .ndiv_frac = 0x955555, }, + { .freq = 40000, .xf = 15, .p1div = 1, .p2div = 2, .ndiv_int = 0xb, .ndiv_frac = 0, }, +}; + +#define SSB_PMU1_DEFAULT_XTALFREQ 15360 + +static const struct pmu1_plltab_entry * pmu1_plltab_find_entry(u32 crystalfreq) +{ + const struct pmu1_plltab_entry *e; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(pmu1_plltab); i++) { + e = &pmu1_plltab[i]; + if (e->freq == crystalfreq) + return e; + } + + return NULL; +} + +/* Tune the PLL to the crystal speed. crystalfreq is in kHz. */ +static void ssb_pmu1_pllinit_r0(struct ssb_chipcommon *cc, + u32 crystalfreq) +{ + struct ssb_bus *bus = cc->dev->bus; + const struct pmu1_plltab_entry *e = NULL; + u32 buffer_strength = 0; + u32 tmp, pllctl, pmuctl; + unsigned int i; + + if (bus->chip_id == 0x4312) { + /* We do not touch the BCM4312 PLL and assume + * the default crystal settings work out-of-the-box. */ + cc->pmu.crystalfreq = 20000; + return; + } + + if (crystalfreq) + e = pmu1_plltab_find_entry(crystalfreq); + if (!e) + e = pmu1_plltab_find_entry(SSB_PMU1_DEFAULT_XTALFREQ); + BUG_ON(!e); + crystalfreq = e->freq; + cc->pmu.crystalfreq = e->freq; + + /* Check if the PLL already is programmed to this frequency. */ + pmuctl = chipco_read32(cc, SSB_CHIPCO_PMU_CTL); + if (((pmuctl & SSB_CHIPCO_PMU_CTL_XTALFREQ) >> SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT) == e->xf) { + /* We're already there... */ + return; + } + + ssb_printk(KERN_INFO PFX "Programming PLL to %u.%03u MHz\n", + (crystalfreq / 1000), (crystalfreq % 1000)); + + /* First turn the PLL off. */ + switch (bus->chip_id) { + case 0x4325: + chipco_mask32(cc, SSB_CHIPCO_PMU_MINRES_MSK, + ~((1 << SSB_PMURES_4325_BBPLL_PWRSW_PU) | + (1 << SSB_PMURES_4325_HT_AVAIL))); + chipco_mask32(cc, SSB_CHIPCO_PMU_MAXRES_MSK, + ~((1 << SSB_PMURES_4325_BBPLL_PWRSW_PU) | + (1 << SSB_PMURES_4325_HT_AVAIL))); + /* Adjust the BBPLL to 2 on all channels later. */ + buffer_strength = 0x222222; + break; + default: + SSB_WARN_ON(1); + } + for (i = 1500; i; i--) { + tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST); + if (!(tmp & SSB_CHIPCO_CLKCTLST_HAVEHT)) + break; + udelay(10); + } + tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST); + if (tmp & SSB_CHIPCO_CLKCTLST_HAVEHT) + ssb_printk(KERN_EMERG PFX "Failed to turn the PLL off!\n"); + + /* Set p1div and p2div. */ + pllctl = ssb_chipco_pll_read(cc, SSB_PMU1_PLLCTL0); + pllctl &= ~(SSB_PMU1_PLLCTL0_P1DIV | SSB_PMU1_PLLCTL0_P2DIV); + pllctl |= ((u32)e->p1div << SSB_PMU1_PLLCTL0_P1DIV_SHIFT) & SSB_PMU1_PLLCTL0_P1DIV; + pllctl |= ((u32)e->p2div << SSB_PMU1_PLLCTL0_P2DIV_SHIFT) & SSB_PMU1_PLLCTL0_P2DIV; + ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL0, pllctl); + + /* Set ndiv int and ndiv mode */ + pllctl = ssb_chipco_pll_read(cc, SSB_PMU1_PLLCTL2); + pllctl &= ~(SSB_PMU1_PLLCTL2_NDIVINT | SSB_PMU1_PLLCTL2_NDIVMODE); + pllctl |= ((u32)e->ndiv_int << SSB_PMU1_PLLCTL2_NDIVINT_SHIFT) & SSB_PMU1_PLLCTL2_NDIVINT; + pllctl |= (1 << SSB_PMU1_PLLCTL2_NDIVMODE_SHIFT) & SSB_PMU1_PLLCTL2_NDIVMODE; + ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL2, pllctl); + + /* Set ndiv frac */ + pllctl = ssb_chipco_pll_read(cc, SSB_PMU1_PLLCTL3); + pllctl &= ~SSB_PMU1_PLLCTL3_NDIVFRAC; + pllctl |= ((u32)e->ndiv_frac << SSB_PMU1_PLLCTL3_NDIVFRAC_SHIFT) & SSB_PMU1_PLLCTL3_NDIVFRAC; + ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL3, pllctl); + + /* Change the drive strength, if required. */ + if (buffer_strength) { + pllctl = ssb_chipco_pll_read(cc, SSB_PMU1_PLLCTL5); + pllctl &= ~SSB_PMU1_PLLCTL5_CLKDRV; + pllctl |= (buffer_strength << SSB_PMU1_PLLCTL5_CLKDRV_SHIFT) & SSB_PMU1_PLLCTL5_CLKDRV; + ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL5, pllctl); + } + + /* Tune the crystalfreq and the divisor. */ + pmuctl = chipco_read32(cc, SSB_CHIPCO_PMU_CTL); + pmuctl &= ~(SSB_CHIPCO_PMU_CTL_ILP_DIV | SSB_CHIPCO_PMU_CTL_XTALFREQ); + pmuctl |= ((((u32)e->freq + 127) / 128 - 1) << SSB_CHIPCO_PMU_CTL_ILP_DIV_SHIFT) + & SSB_CHIPCO_PMU_CTL_ILP_DIV; + pmuctl |= ((u32)e->xf << SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT) & SSB_CHIPCO_PMU_CTL_XTALFREQ; + chipco_write32(cc, SSB_CHIPCO_PMU_CTL, pmuctl); +} + +static void ssb_pmu_pll_init(struct ssb_chipcommon *cc) +{ + struct ssb_bus *bus = cc->dev->bus; + u32 crystalfreq = 0; /* in kHz. 0 = keep default freq. */ + + if (bus->bustype == SSB_BUSTYPE_SSB) { + /* TODO: The user may override the crystal frequency. */ + } + + switch (bus->chip_id) { + case 0x4312: + case 0x4325: + ssb_pmu1_pllinit_r0(cc, crystalfreq); + break; + case 0x4328: + case 0x5354: + ssb_pmu0_pllinit_r0(cc, crystalfreq); + break; + case 0x4322: + if (cc->pmu.rev == 2) { + chipco_write32(cc, SSB_CHIPCO_PLLCTL_ADDR, 0x0000000A); + chipco_write32(cc, SSB_CHIPCO_PLLCTL_DATA, 0x380005C0); + } + break; + default: + ssb_printk(KERN_ERR PFX + "ERROR: PLL init unknown for device %04X\n", + bus->chip_id); + } +} + +struct pmu_res_updown_tab_entry { + u8 resource; /* The resource number */ + u16 updown; /* The updown value */ +}; + +enum pmu_res_depend_tab_task { + PMU_RES_DEP_SET = 1, + PMU_RES_DEP_ADD, + PMU_RES_DEP_REMOVE, +}; + +struct pmu_res_depend_tab_entry { + u8 resource; /* The resource number */ + u8 task; /* SET | ADD | REMOVE */ + u32 depend; /* The depend mask */ +}; + +static const struct pmu_res_updown_tab_entry pmu_res_updown_tab_4328a0[] = { + { .resource = SSB_PMURES_4328_EXT_SWITCHER_PWM, .updown = 0x0101, }, + { .resource = SSB_PMURES_4328_BB_SWITCHER_PWM, .updown = 0x1F01, }, + { .resource = SSB_PMURES_4328_BB_SWITCHER_BURST, .updown = 0x010F, }, + { .resource = SSB_PMURES_4328_BB_EXT_SWITCHER_BURST, .updown = 0x0101, }, + { .resource = SSB_PMURES_4328_ILP_REQUEST, .updown = 0x0202, }, + { .resource = SSB_PMURES_4328_RADIO_SWITCHER_PWM, .updown = 0x0F01, }, + { .resource = SSB_PMURES_4328_RADIO_SWITCHER_BURST, .updown = 0x0F01, }, + { .resource = SSB_PMURES_4328_ROM_SWITCH, .updown = 0x0101, }, + { .resource = SSB_PMURES_4328_PA_REF_LDO, .updown = 0x0F01, }, + { .resource = SSB_PMURES_4328_RADIO_LDO, .updown = 0x0F01, }, + { .resource = SSB_PMURES_4328_AFE_LDO, .updown = 0x0F01, }, + { .resource = SSB_PMURES_4328_PLL_LDO, .updown = 0x0F01, }, + { .resource = SSB_PMURES_4328_BG_FILTBYP, .updown = 0x0101, }, + { .resource = SSB_PMURES_4328_TX_FILTBYP, .updown = 0x0101, }, + { .resource = SSB_PMURES_4328_RX_FILTBYP, .updown = 0x0101, }, + { .resource = SSB_PMURES_4328_XTAL_PU, .updown = 0x0101, }, + { .resource = SSB_PMURES_4328_XTAL_EN, .updown = 0xA001, }, + { .resource = SSB_PMURES_4328_BB_PLL_FILTBYP, .updown = 0x0101, }, + { .resource = SSB_PMURES_4328_RF_PLL_FILTBYP, .updown = 0x0101, }, + { .resource = SSB_PMURES_4328_BB_PLL_PU, .updown = 0x0701, }, +}; + +static const struct pmu_res_depend_tab_entry pmu_res_depend_tab_4328a0[] = { + { + /* Adjust ILP Request to avoid forcing EXT/BB into burst mode. */ + .resource = SSB_PMURES_4328_ILP_REQUEST, + .task = PMU_RES_DEP_SET, + .depend = ((1 << SSB_PMURES_4328_EXT_SWITCHER_PWM) | + (1 << SSB_PMURES_4328_BB_SWITCHER_PWM)), + }, +}; + +static const struct pmu_res_updown_tab_entry pmu_res_updown_tab_4325a0[] = { + { .resource = SSB_PMURES_4325_XTAL_PU, .updown = 0x1501, }, +}; + +static const struct pmu_res_depend_tab_entry pmu_res_depend_tab_4325a0[] = { + { + /* Adjust HT-Available dependencies. */ + .resource = SSB_PMURES_4325_HT_AVAIL, + .task = PMU_RES_DEP_ADD, + .depend = ((1 << SSB_PMURES_4325_RX_PWRSW_PU) | + (1 << SSB_PMURES_4325_TX_PWRSW_PU) | + (1 << SSB_PMURES_4325_LOGEN_PWRSW_PU) | + (1 << SSB_PMURES_4325_AFE_PWRSW_PU)), + }, +}; + +static void ssb_pmu_resources_init(struct ssb_chipcommon *cc) +{ + struct ssb_bus *bus = cc->dev->bus; + u32 min_msk = 0, max_msk = 0; + unsigned int i; + const struct pmu_res_updown_tab_entry *updown_tab = NULL; + unsigned int updown_tab_size; + const struct pmu_res_depend_tab_entry *depend_tab = NULL; + unsigned int depend_tab_size; + + switch (bus->chip_id) { + case 0x4312: + case 0x4322: + /* We keep the default settings: + * min_msk = 0xCBB + * max_msk = 0x7FFFF + */ + break; + case 0x4325: + /* Power OTP down later. */ + min_msk = (1 << SSB_PMURES_4325_CBUCK_BURST) | + (1 << SSB_PMURES_4325_LNLDO2_PU); + if (chipco_read32(cc, SSB_CHIPCO_CHIPSTAT) & + SSB_CHIPCO_CHST_4325_PMUTOP_2B) + min_msk |= (1 << SSB_PMURES_4325_CLDO_CBUCK_BURST); + /* The PLL may turn on, if it decides so. */ + max_msk = 0xFFFFF; + updown_tab = pmu_res_updown_tab_4325a0; + updown_tab_size = ARRAY_SIZE(pmu_res_updown_tab_4325a0); + depend_tab = pmu_res_depend_tab_4325a0; + depend_tab_size = ARRAY_SIZE(pmu_res_depend_tab_4325a0); + break; + case 0x4328: + min_msk = (1 << SSB_PMURES_4328_EXT_SWITCHER_PWM) | + (1 << SSB_PMURES_4328_BB_SWITCHER_PWM) | + (1 << SSB_PMURES_4328_XTAL_EN); + /* The PLL may turn on, if it decides so. */ + max_msk = 0xFFFFF; + updown_tab = pmu_res_updown_tab_4328a0; + updown_tab_size = ARRAY_SIZE(pmu_res_updown_tab_4328a0); + depend_tab = pmu_res_depend_tab_4328a0; + depend_tab_size = ARRAY_SIZE(pmu_res_depend_tab_4328a0); + break; + case 0x5354: + /* The PLL may turn on, if it decides so. */ + max_msk = 0xFFFFF; + break; + default: + ssb_printk(KERN_ERR PFX + "ERROR: PMU resource config unknown for device %04X\n", + bus->chip_id); + } + + if (updown_tab) { + for (i = 0; i < updown_tab_size; i++) { + chipco_write32(cc, SSB_CHIPCO_PMU_RES_TABSEL, + updown_tab[i].resource); + chipco_write32(cc, SSB_CHIPCO_PMU_RES_UPDNTM, + updown_tab[i].updown); + } + } + if (depend_tab) { + for (i = 0; i < depend_tab_size; i++) { + chipco_write32(cc, SSB_CHIPCO_PMU_RES_TABSEL, + depend_tab[i].resource); + switch (depend_tab[i].task) { + case PMU_RES_DEP_SET: + chipco_write32(cc, SSB_CHIPCO_PMU_RES_DEPMSK, + depend_tab[i].depend); + break; + case PMU_RES_DEP_ADD: + chipco_set32(cc, SSB_CHIPCO_PMU_RES_DEPMSK, + depend_tab[i].depend); + break; + case PMU_RES_DEP_REMOVE: + chipco_mask32(cc, SSB_CHIPCO_PMU_RES_DEPMSK, + ~(depend_tab[i].depend)); + break; + default: + SSB_WARN_ON(1); + } + } + } + + /* Set the resource masks. */ + if (min_msk) + chipco_write32(cc, SSB_CHIPCO_PMU_MINRES_MSK, min_msk); + if (max_msk) + chipco_write32(cc, SSB_CHIPCO_PMU_MAXRES_MSK, max_msk); +} + +void ssb_pmu_init(struct ssb_chipcommon *cc) +{ + struct ssb_bus *bus = cc->dev->bus; + u32 pmucap; + + if (!(cc->capabilities & SSB_CHIPCO_CAP_PMU)) + return; + + pmucap = chipco_read32(cc, SSB_CHIPCO_PMU_CAP); + cc->pmu.rev = (pmucap & SSB_CHIPCO_PMU_CAP_REVISION); + + ssb_dprintk(KERN_DEBUG PFX "Found rev %u PMU (capabilities 0x%08X)\n", + cc->pmu.rev, pmucap); + + if (cc->pmu.rev >= 1) { + if ((bus->chip_id == 0x4325) && (bus->chip_rev < 2)) { + chipco_mask32(cc, SSB_CHIPCO_PMU_CTL, + ~SSB_CHIPCO_PMU_CTL_NOILPONW); + } else { + chipco_set32(cc, SSB_CHIPCO_PMU_CTL, + SSB_CHIPCO_PMU_CTL_NOILPONW); + } + } + ssb_pmu_pll_init(cc); + ssb_pmu_resources_init(cc); +} + +void ssb_pmu_set_ldo_voltage(struct ssb_chipcommon *cc, + enum ssb_pmu_ldo_volt_id id, u32 voltage) +{ + struct ssb_bus *bus = cc->dev->bus; + u32 addr, shift, mask; + + switch (bus->chip_id) { + case 0x4328: + case 0x5354: + switch (id) { + case LDO_VOLT1: + addr = 2; + shift = 25; + mask = 0xF; + break; + case LDO_VOLT2: + addr = 3; + shift = 1; + mask = 0xF; + break; + case LDO_VOLT3: + addr = 3; + shift = 9; + mask = 0xF; + break; + case LDO_PAREF: + addr = 3; + shift = 17; + mask = 0x3F; + break; + default: + SSB_WARN_ON(1); + return; + } + break; + case 0x4312: + if (SSB_WARN_ON(id != LDO_PAREF)) + return; + addr = 0; + shift = 21; + mask = 0x3F; + break; + default: + return; + } + + ssb_chipco_regctl_maskset(cc, addr, ~(mask << shift), + (voltage & mask) << shift); +} + +void ssb_pmu_set_ldo_paref(struct ssb_chipcommon *cc, bool on) +{ + struct ssb_bus *bus = cc->dev->bus; + int ldo; + + switch (bus->chip_id) { + case 0x4312: + ldo = SSB_PMURES_4312_PA_REF_LDO; + break; + case 0x4328: + ldo = SSB_PMURES_4328_PA_REF_LDO; + break; + case 0x5354: + ldo = SSB_PMURES_5354_PA_REF_LDO; + break; + default: + return; + } + + if (on) + chipco_set32(cc, SSB_CHIPCO_PMU_MINRES_MSK, 1 << ldo); + else + chipco_mask32(cc, SSB_CHIPCO_PMU_MINRES_MSK, ~(1 << ldo)); + chipco_read32(cc, SSB_CHIPCO_PMU_MINRES_MSK); //SPEC FIXME found via mmiotrace - dummy read? +} + +EXPORT_SYMBOL(ssb_pmu_set_ldo_voltage); +EXPORT_SYMBOL(ssb_pmu_set_ldo_paref); diff --git a/drivers/ssb/driver_extif.c b/drivers/ssb/driver_extif.c new file mode 100644 index 0000000..c3e1d3e --- /dev/null +++ b/drivers/ssb/driver_extif.c @@ -0,0 +1,146 @@ +/* + * Sonics Silicon Backplane + * Broadcom EXTIF core driver + * + * Copyright 2005, Broadcom Corporation + * Copyright 2006, 2007, Michael Buesch + * Copyright 2006, 2007, Felix Fietkau + * Copyright 2007, Aurelien Jarno + * + * Licensed under the GNU/GPL. See COPYING for details. + */ + +#include +#include +#include + +#include "ssb_private.h" + + +static inline u32 extif_read32(struct ssb_extif *extif, u16 offset) +{ + return ssb_read32(extif->dev, offset); +} + +static inline void extif_write32(struct ssb_extif *extif, u16 offset, u32 value) +{ + ssb_write32(extif->dev, offset, value); +} + +static inline u32 extif_write32_masked(struct ssb_extif *extif, u16 offset, + u32 mask, u32 value) +{ + value &= mask; + value |= extif_read32(extif, offset) & ~mask; + extif_write32(extif, offset, value); + + return value; +} + +#ifdef CONFIG_SSB_SERIAL +static bool serial_exists(u8 *regs) +{ + u8 save_mcr, msr = 0; + + if (regs) { + save_mcr = regs[UART_MCR]; + regs[UART_MCR] = (UART_MCR_LOOP | UART_MCR_OUT2 | UART_MCR_RTS); + msr = regs[UART_MSR] & (UART_MSR_DCD | UART_MSR_RI + | UART_MSR_CTS | UART_MSR_DSR); + regs[UART_MCR] = save_mcr; + } + return (msr == (UART_MSR_DCD | UART_MSR_CTS)); +} + +int ssb_extif_serial_init(struct ssb_extif *extif, struct ssb_serial_port *ports) +{ + u32 i, nr_ports = 0; + + /* Disable GPIO interrupt initially */ + extif_write32(extif, SSB_EXTIF_GPIO_INTPOL, 0); + extif_write32(extif, SSB_EXTIF_GPIO_INTMASK, 0); + + for (i = 0; i < 2; i++) { + void __iomem *uart_regs; + + uart_regs = ioremap_nocache(SSB_EUART, 16); + if (uart_regs) { + uart_regs += (i * 8); + + if (serial_exists(uart_regs) && ports) { + extif_write32(extif, SSB_EXTIF_GPIO_INTMASK, 2); + + nr_ports++; + ports[i].regs = uart_regs; + ports[i].irq = 2; + ports[i].baud_base = 13500000; + ports[i].reg_shift = 0; + } + iounmap(uart_regs); + } + } + return nr_ports; +} +#endif /* CONFIG_SSB_SERIAL */ + +void ssb_extif_timing_init(struct ssb_extif *extif, unsigned long ns) +{ + u32 tmp; + + /* Initialize extif so we can get to the LEDs and external UART */ + extif_write32(extif, SSB_EXTIF_PROG_CFG, SSB_EXTCFG_EN); + + /* Set timing for the flash */ + tmp = DIV_ROUND_UP(10, ns) << SSB_PROG_WCNT_3_SHIFT; + tmp |= DIV_ROUND_UP(40, ns) << SSB_PROG_WCNT_1_SHIFT; + tmp |= DIV_ROUND_UP(120, ns); + extif_write32(extif, SSB_EXTIF_PROG_WAITCNT, tmp); + + /* Set programmable interface timing for external uart */ + tmp = DIV_ROUND_UP(10, ns) << SSB_PROG_WCNT_3_SHIFT; + tmp |= DIV_ROUND_UP(20, ns) << SSB_PROG_WCNT_2_SHIFT; + tmp |= DIV_ROUND_UP(100, ns) << SSB_PROG_WCNT_1_SHIFT; + tmp |= DIV_ROUND_UP(120, ns); + extif_write32(extif, SSB_EXTIF_PROG_WAITCNT, tmp); +} + +void ssb_extif_get_clockcontrol(struct ssb_extif *extif, + u32 *pll_type, u32 *n, u32 *m) +{ + *pll_type = SSB_PLLTYPE_1; + *n = extif_read32(extif, SSB_EXTIF_CLOCK_N); + *m = extif_read32(extif, SSB_EXTIF_CLOCK_SB); +} + +void ssb_extif_watchdog_timer_set(struct ssb_extif *extif, + u32 ticks) +{ + extif_write32(extif, SSB_EXTIF_WATCHDOG, ticks); +} + +u32 ssb_extif_gpio_in(struct ssb_extif *extif, u32 mask) +{ + return extif_read32(extif, SSB_EXTIF_GPIO_IN) & mask; +} + +u32 ssb_extif_gpio_out(struct ssb_extif *extif, u32 mask, u32 value) +{ + return extif_write32_masked(extif, SSB_EXTIF_GPIO_OUT(0), + mask, value); +} + +u32 ssb_extif_gpio_outen(struct ssb_extif *extif, u32 mask, u32 value) +{ + return extif_write32_masked(extif, SSB_EXTIF_GPIO_OUTEN(0), + mask, value); +} + +u32 ssb_extif_gpio_polarity(struct ssb_extif *extif, u32 mask, u32 value) +{ + return extif_write32_masked(extif, SSB_EXTIF_GPIO_INTPOL, mask, value); +} + +u32 ssb_extif_gpio_intmask(struct ssb_extif *extif, u32 mask, u32 value) +{ + return extif_write32_masked(extif, SSB_EXTIF_GPIO_INTMASK, mask, value); +} diff --git a/drivers/ssb/driver_gige.c b/drivers/ssb/driver_gige.c new file mode 100644 index 0000000..172f904 --- /dev/null +++ b/drivers/ssb/driver_gige.c @@ -0,0 +1,294 @@ +/* + * Sonics Silicon Backplane + * Broadcom Gigabit Ethernet core driver + * + * Copyright 2008, Broadcom Corporation + * Copyright 2008, Michael Buesch + * + * Licensed under the GNU/GPL. See COPYING for details. + */ + +#include +#include +#include +#include + + +/* +MODULE_DESCRIPTION("SSB Broadcom Gigabit Ethernet driver"); +MODULE_AUTHOR("Michael Buesch"); +MODULE_LICENSE("GPL"); +*/ + +static const struct ssb_device_id ssb_gige_tbl[] = { + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET_GBIT, SSB_ANY_REV), + SSB_DEVTABLE_END +}; +/* MODULE_DEVICE_TABLE(ssb, ssb_gige_tbl); */ + + +static inline u8 gige_read8(struct ssb_gige *dev, u16 offset) +{ + return ssb_read8(dev->dev, offset); +} + +static inline u16 gige_read16(struct ssb_gige *dev, u16 offset) +{ + return ssb_read16(dev->dev, offset); +} + +static inline u32 gige_read32(struct ssb_gige *dev, u16 offset) +{ + return ssb_read32(dev->dev, offset); +} + +static inline void gige_write8(struct ssb_gige *dev, + u16 offset, u8 value) +{ + ssb_write8(dev->dev, offset, value); +} + +static inline void gige_write16(struct ssb_gige *dev, + u16 offset, u16 value) +{ + ssb_write16(dev->dev, offset, value); +} + +static inline void gige_write32(struct ssb_gige *dev, + u16 offset, u32 value) +{ + ssb_write32(dev->dev, offset, value); +} + +static inline +u8 gige_pcicfg_read8(struct ssb_gige *dev, unsigned int offset) +{ + BUG_ON(offset >= 256); + return gige_read8(dev, SSB_GIGE_PCICFG + offset); +} + +static inline +u16 gige_pcicfg_read16(struct ssb_gige *dev, unsigned int offset) +{ + BUG_ON(offset >= 256); + return gige_read16(dev, SSB_GIGE_PCICFG + offset); +} + +static inline +u32 gige_pcicfg_read32(struct ssb_gige *dev, unsigned int offset) +{ + BUG_ON(offset >= 256); + return gige_read32(dev, SSB_GIGE_PCICFG + offset); +} + +static inline +void gige_pcicfg_write8(struct ssb_gige *dev, + unsigned int offset, u8 value) +{ + BUG_ON(offset >= 256); + gige_write8(dev, SSB_GIGE_PCICFG + offset, value); +} + +static inline +void gige_pcicfg_write16(struct ssb_gige *dev, + unsigned int offset, u16 value) +{ + BUG_ON(offset >= 256); + gige_write16(dev, SSB_GIGE_PCICFG + offset, value); +} + +static inline +void gige_pcicfg_write32(struct ssb_gige *dev, + unsigned int offset, u32 value) +{ + BUG_ON(offset >= 256); + gige_write32(dev, SSB_GIGE_PCICFG + offset, value); +} + +static int ssb_gige_pci_read_config(struct pci_bus *bus, unsigned int devfn, + int reg, int size, u32 *val) +{ + struct ssb_gige *dev = container_of(bus->ops, struct ssb_gige, pci_ops); + unsigned long flags; + + if ((PCI_SLOT(devfn) > 0) || (PCI_FUNC(devfn) > 0)) + return PCIBIOS_DEVICE_NOT_FOUND; + if (reg >= 256) + return PCIBIOS_DEVICE_NOT_FOUND; + + spin_lock_irqsave(&dev->lock, flags); + switch (size) { + case 1: + *val = gige_pcicfg_read8(dev, reg); + break; + case 2: + *val = gige_pcicfg_read16(dev, reg); + break; + case 4: + *val = gige_pcicfg_read32(dev, reg); + break; + default: + WARN_ON(1); + } + spin_unlock_irqrestore(&dev->lock, flags); + + return PCIBIOS_SUCCESSFUL; +} + +static int ssb_gige_pci_write_config(struct pci_bus *bus, unsigned int devfn, + int reg, int size, u32 val) +{ + struct ssb_gige *dev = container_of(bus->ops, struct ssb_gige, pci_ops); + unsigned long flags; + + if ((PCI_SLOT(devfn) > 0) || (PCI_FUNC(devfn) > 0)) + return PCIBIOS_DEVICE_NOT_FOUND; + if (reg >= 256) + return PCIBIOS_DEVICE_NOT_FOUND; + + spin_lock_irqsave(&dev->lock, flags); + switch (size) { + case 1: + gige_pcicfg_write8(dev, reg, val); + break; + case 2: + gige_pcicfg_write16(dev, reg, val); + break; + case 4: + gige_pcicfg_write32(dev, reg, val); + break; + default: + WARN_ON(1); + } + spin_unlock_irqrestore(&dev->lock, flags); + + return PCIBIOS_SUCCESSFUL; +} + +static int ssb_gige_probe(struct ssb_device *sdev, const struct ssb_device_id *id) +{ + struct ssb_gige *dev; + u32 base, tmslow, tmshigh; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + dev->dev = sdev; + + spin_lock_init(&dev->lock); + dev->pci_controller.pci_ops = &dev->pci_ops; + dev->pci_controller.io_resource = &dev->io_resource; + dev->pci_controller.mem_resource = &dev->mem_resource; + dev->pci_controller.io_map_base = 0x800; + dev->pci_ops.read = ssb_gige_pci_read_config; + dev->pci_ops.write = ssb_gige_pci_write_config; + + dev->io_resource.name = SSB_GIGE_IO_RES_NAME; + dev->io_resource.start = 0x800; + dev->io_resource.end = 0x8FF; + dev->io_resource.flags = IORESOURCE_IO | IORESOURCE_PCI_FIXED; + + if (!ssb_device_is_enabled(sdev)) + ssb_device_enable(sdev, 0); + + /* Setup BAR0. This is a 64k MMIO region. */ + base = ssb_admatch_base(ssb_read32(sdev, SSB_ADMATCH1)); + gige_pcicfg_write32(dev, PCI_BASE_ADDRESS_0, base); + gige_pcicfg_write32(dev, PCI_BASE_ADDRESS_1, 0); + + dev->mem_resource.name = SSB_GIGE_MEM_RES_NAME; + dev->mem_resource.start = base; + dev->mem_resource.end = base + 0x10000 - 1; + dev->mem_resource.flags = IORESOURCE_MEM | IORESOURCE_PCI_FIXED; + + /* Enable the memory region. */ + gige_pcicfg_write16(dev, PCI_COMMAND, + gige_pcicfg_read16(dev, PCI_COMMAND) + | PCI_COMMAND_MEMORY); + + /* Write flushing is controlled by the Flush Status Control register. + * We want to flush every register write with a timeout and we want + * to disable the IRQ mask while flushing to avoid concurrency. + * Note that automatic write flushing does _not_ work from + * an IRQ handler. The driver must flush manually by reading a register. + */ + gige_write32(dev, SSB_GIGE_SHIM_FLUSHSTAT, 0x00000068); + + /* Check if we have an RGMII or GMII PHY-bus. + * On RGMII do not bypass the DLLs */ + tmslow = ssb_read32(sdev, SSB_TMSLOW); + tmshigh = ssb_read32(sdev, SSB_TMSHIGH); + if (tmshigh & SSB_GIGE_TMSHIGH_RGMII) { + tmslow &= ~SSB_GIGE_TMSLOW_TXBYPASS; + tmslow &= ~SSB_GIGE_TMSLOW_RXBYPASS; + dev->has_rgmii = 1; + } else { + tmslow |= SSB_GIGE_TMSLOW_TXBYPASS; + tmslow |= SSB_GIGE_TMSLOW_RXBYPASS; + dev->has_rgmii = 0; + } + tmslow |= SSB_GIGE_TMSLOW_DLLEN; + ssb_write32(sdev, SSB_TMSLOW, tmslow); + + ssb_set_drvdata(sdev, dev); + register_pci_controller(&dev->pci_controller); + + return 0; +} + +bool pdev_is_ssb_gige_core(struct pci_dev *pdev) +{ + if (!pdev->resource[0].name) + return 0; + return (strcmp(pdev->resource[0].name, SSB_GIGE_MEM_RES_NAME) == 0); +} +EXPORT_SYMBOL(pdev_is_ssb_gige_core); + +int ssb_gige_pcibios_plat_dev_init(struct ssb_device *sdev, + struct pci_dev *pdev) +{ + struct ssb_gige *dev = ssb_get_drvdata(sdev); + struct resource *res; + + if (pdev->bus->ops != &dev->pci_ops) { + /* The PCI device is not on this SSB GigE bridge device. */ + return -ENODEV; + } + + /* Fixup the PCI resources. */ + res = &(pdev->resource[0]); + res->flags = IORESOURCE_MEM | IORESOURCE_PCI_FIXED; + res->name = dev->mem_resource.name; + res->start = dev->mem_resource.start; + res->end = dev->mem_resource.end; + + /* Fixup interrupt lines. */ + pdev->irq = ssb_mips_irq(sdev) + 2; + pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, pdev->irq); + + return 0; +} + +int ssb_gige_map_irq(struct ssb_device *sdev, + const struct pci_dev *pdev) +{ + struct ssb_gige *dev = ssb_get_drvdata(sdev); + + if (pdev->bus->ops != &dev->pci_ops) { + /* The PCI device is not on this SSB GigE bridge device. */ + return -ENODEV; + } + + return ssb_mips_irq(sdev) + 2; +} + +static struct ssb_driver ssb_gige_driver = { + .name = "BCM-GigE", + .id_table = ssb_gige_tbl, + .probe = ssb_gige_probe, +}; + +int ssb_gige_init(void) +{ + return ssb_driver_register(&ssb_gige_driver); +} diff --git a/drivers/ssb/driver_mipscore.c b/drivers/ssb/driver_mipscore.c new file mode 100644 index 0000000..97efce1 --- /dev/null +++ b/drivers/ssb/driver_mipscore.c @@ -0,0 +1,294 @@ +/* + * Sonics Silicon Backplane + * Broadcom MIPS core driver + * + * Copyright 2005, Broadcom Corporation + * Copyright 2006, 2007, Michael Buesch + * + * Licensed under the GNU/GPL. See COPYING for details. + */ + +#include + +#include +#include +#include +#include + +#include "ssb_private.h" + + +static inline u32 mips_read32(struct ssb_mipscore *mcore, + u16 offset) +{ + return ssb_read32(mcore->dev, offset); +} + +static inline void mips_write32(struct ssb_mipscore *mcore, + u16 offset, + u32 value) +{ + ssb_write32(mcore->dev, offset, value); +} + +static const u32 ipsflag_irq_mask[] = { + 0, + SSB_IPSFLAG_IRQ1, + SSB_IPSFLAG_IRQ2, + SSB_IPSFLAG_IRQ3, + SSB_IPSFLAG_IRQ4, +}; + +static const u32 ipsflag_irq_shift[] = { + 0, + SSB_IPSFLAG_IRQ1_SHIFT, + SSB_IPSFLAG_IRQ2_SHIFT, + SSB_IPSFLAG_IRQ3_SHIFT, + SSB_IPSFLAG_IRQ4_SHIFT, +}; + +static inline u32 ssb_irqflag(struct ssb_device *dev) +{ + u32 tpsflag = ssb_read32(dev, SSB_TPSFLAG); + if (tpsflag) + return ssb_read32(dev, SSB_TPSFLAG) & SSB_TPSFLAG_BPFLAG; + else + /* not irq supported */ + return 0x3f; +} + +static struct ssb_device *find_device(struct ssb_device *rdev, int irqflag) +{ + struct ssb_bus *bus = rdev->bus; + int i; + for (i = 0; i < bus->nr_devices; i++) { + struct ssb_device *dev; + dev = &(bus->devices[i]); + if (ssb_irqflag(dev) == irqflag) + return dev; + } + return NULL; +} + +/* Get the MIPS IRQ assignment for a specified device. + * If unassigned, 0 is returned. + * If disabled, 5 is returned. + * If not supported, 6 is returned. + */ +unsigned int ssb_mips_irq(struct ssb_device *dev) +{ + struct ssb_bus *bus = dev->bus; + struct ssb_device *mdev = bus->mipscore.dev; + u32 irqflag; + u32 ipsflag; + u32 tmp; + unsigned int irq; + + irqflag = ssb_irqflag(dev); + if (irqflag == 0x3f) + return 6; + ipsflag = ssb_read32(bus->mipscore.dev, SSB_IPSFLAG); + for (irq = 1; irq <= 4; irq++) { + tmp = ((ipsflag & ipsflag_irq_mask[irq]) >> ipsflag_irq_shift[irq]); + if (tmp == irqflag) + break; + } + if (irq == 5) { + if ((1 << irqflag) & ssb_read32(mdev, SSB_INTVEC)) + irq = 0; + } + + return irq; +} + +static void clear_irq(struct ssb_bus *bus, unsigned int irq) +{ + struct ssb_device *dev = bus->mipscore.dev; + + /* Clear the IRQ in the MIPScore backplane registers */ + if (irq == 0) { + ssb_write32(dev, SSB_INTVEC, 0); + } else { + ssb_write32(dev, SSB_IPSFLAG, + ssb_read32(dev, SSB_IPSFLAG) | + ipsflag_irq_mask[irq]); + } +} + +static void set_irq(struct ssb_device *dev, unsigned int irq) +{ + unsigned int oldirq = ssb_mips_irq(dev); + struct ssb_bus *bus = dev->bus; + struct ssb_device *mdev = bus->mipscore.dev; + u32 irqflag = ssb_irqflag(dev); + + BUG_ON(oldirq == 6); + + dev->irq = irq + 2; + + /* clear the old irq */ + if (oldirq == 0) + ssb_write32(mdev, SSB_INTVEC, (~(1 << irqflag) & ssb_read32(mdev, SSB_INTVEC))); + else if (oldirq != 5) + clear_irq(bus, oldirq); + + /* assign the new one */ + if (irq == 0) { + ssb_write32(mdev, SSB_INTVEC, ((1 << irqflag) | ssb_read32(mdev, SSB_INTVEC))); + } else { + u32 ipsflag = ssb_read32(mdev, SSB_IPSFLAG); + if ((ipsflag & ipsflag_irq_mask[irq]) != ipsflag_irq_mask[irq]) { + u32 oldipsflag = (ipsflag & ipsflag_irq_mask[irq]) >> ipsflag_irq_shift[irq]; + struct ssb_device *olddev = find_device(dev, oldipsflag); + if (olddev) + set_irq(olddev, 0); + } + irqflag <<= ipsflag_irq_shift[irq]; + irqflag |= (ipsflag & ~ipsflag_irq_mask[irq]); + ssb_write32(mdev, SSB_IPSFLAG, irqflag); + } + ssb_dprintk(KERN_INFO PFX + "set_irq: core 0x%04x, irq %d => %d\n", + dev->id.coreid, oldirq+2, irq+2); +} + +static void print_irq(struct ssb_device *dev, unsigned int irq) +{ + int i; + static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"}; + ssb_dprintk(KERN_INFO PFX + "core 0x%04x, irq :", dev->id.coreid); + for (i = 0; i <= 6; i++) { + ssb_dprintk(" %s%s", irq_name[i], i==irq?"*":" "); + } + ssb_dprintk("\n"); +} + +static void dump_irq(struct ssb_bus *bus) +{ + int i; + for (i = 0; i < bus->nr_devices; i++) { + struct ssb_device *dev; + dev = &(bus->devices[i]); + print_irq(dev, ssb_mips_irq(dev)); + } +} + +static void ssb_mips_serial_init(struct ssb_mipscore *mcore) +{ + struct ssb_bus *bus = mcore->dev->bus; + + if (bus->extif.dev) + mcore->nr_serial_ports = ssb_extif_serial_init(&bus->extif, mcore->serial_ports); + else if (bus->chipco.dev) + mcore->nr_serial_ports = ssb_chipco_serial_init(&bus->chipco, mcore->serial_ports); + else + mcore->nr_serial_ports = 0; +} + +static void ssb_mips_flash_detect(struct ssb_mipscore *mcore) +{ + struct ssb_bus *bus = mcore->dev->bus; + + mcore->flash_buswidth = 2; + if (bus->chipco.dev) { + mcore->flash_window = 0x1c000000; + mcore->flash_window_size = 0x02000000; + if ((ssb_read32(bus->chipco.dev, SSB_CHIPCO_FLASH_CFG) + & SSB_CHIPCO_CFG_DS16) == 0) + mcore->flash_buswidth = 1; + } else { + mcore->flash_window = 0x1fc00000; + mcore->flash_window_size = 0x00400000; + } +} + +u32 ssb_cpu_clock(struct ssb_mipscore *mcore) +{ + struct ssb_bus *bus = mcore->dev->bus; + u32 pll_type, n, m, rate = 0; + + if (bus->extif.dev) { + ssb_extif_get_clockcontrol(&bus->extif, &pll_type, &n, &m); + } else if (bus->chipco.dev) { + ssb_chipco_get_clockcpu(&bus->chipco, &pll_type, &n, &m); + } else + return 0; + + if ((pll_type == SSB_PLLTYPE_5) || (bus->chip_id == 0x5365)) { + rate = 200000000; + } else { + rate = ssb_calc_clock_rate(pll_type, n, m); + } + + if (pll_type == SSB_PLLTYPE_6) { + rate *= 2; + } + + return rate; +} + +void ssb_mipscore_init(struct ssb_mipscore *mcore) +{ + struct ssb_bus *bus; + struct ssb_device *dev; + unsigned long hz, ns; + unsigned int irq, i; + + if (!mcore->dev) + return; /* We don't have a MIPS core */ + + ssb_dprintk(KERN_INFO PFX "Initializing MIPS core...\n"); + + bus = mcore->dev->bus; + hz = ssb_clockspeed(bus); + if (!hz) + hz = 100000000; + ns = 1000000000 / hz; + + if (bus->extif.dev) + ssb_extif_timing_init(&bus->extif, ns); + else if (bus->chipco.dev) + ssb_chipco_timing_init(&bus->chipco, ns); + + /* Assign IRQs to all cores on the bus, start with irq line 2, because serial usually takes 1 */ + for (irq = 2, i = 0; i < bus->nr_devices; i++) { + int mips_irq; + dev = &(bus->devices[i]); + mips_irq = ssb_mips_irq(dev); + if (mips_irq > 4) + dev->irq = 0; + else + dev->irq = mips_irq + 2; + if (dev->irq > 5) + continue; + switch (dev->id.coreid) { + case SSB_DEV_USB11_HOST: + /* shouldn't need a separate irq line for non-4710, most of them have a proper + * external usb controller on the pci */ + if ((bus->chip_id == 0x4710) && (irq <= 4)) { + set_irq(dev, irq++); + } + break; + case SSB_DEV_PCI: + case SSB_DEV_ETHERNET: + case SSB_DEV_ETHERNET_GBIT: + case SSB_DEV_80211: + case SSB_DEV_USB20_HOST: + /* These devices get their own IRQ line if available, the rest goes on IRQ0 */ + if (irq <= 4) { + set_irq(dev, irq++); + break; + } + /* fallthrough */ + case SSB_DEV_EXTIF: + set_irq(dev, 0); + break; + } + } + ssb_dprintk(KERN_INFO PFX "after irq reconfiguration\n"); + dump_irq(bus); + + ssb_mips_serial_init(mcore); + ssb_mips_flash_detect(mcore); +} diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c new file mode 100644 index 0000000..f1dcd79 --- /dev/null +++ b/drivers/ssb/driver_pcicore.c @@ -0,0 +1,629 @@ +/* + * Sonics Silicon Backplane + * Broadcom PCI-core driver + * + * Copyright 2005, Broadcom Corporation + * Copyright 2006, 2007, Michael Buesch + * + * Licensed under the GNU/GPL. See COPYING for details. + */ + +#include +#include +#include +#include + +#include "ssb_private.h" + + +static inline +u32 pcicore_read32(struct ssb_pcicore *pc, u16 offset) +{ + return ssb_read32(pc->dev, offset); +} + +static inline +void pcicore_write32(struct ssb_pcicore *pc, u16 offset, u32 value) +{ + ssb_write32(pc->dev, offset, value); +} + +static inline +u16 pcicore_read16(struct ssb_pcicore *pc, u16 offset) +{ + return ssb_read16(pc->dev, offset); +} + +static inline +void pcicore_write16(struct ssb_pcicore *pc, u16 offset, u16 value) +{ + ssb_write16(pc->dev, offset, value); +} + +/************************************************** + * Code for hostmode operation. + **************************************************/ + +#ifdef CONFIG_SSB_PCICORE_HOSTMODE + +#include +/* Probe a 32bit value on the bus and catch bus exceptions. + * Returns nonzero on a bus exception. + * This is MIPS specific */ +#define mips_busprobe32(val, addr) get_dbe((val), ((u32 *)(addr))) + +/* Assume one-hot slot wiring */ +#define SSB_PCI_SLOT_MAX 16 + +/* Global lock is OK, as we won't have more than one extpci anyway. */ +static DEFINE_SPINLOCK(cfgspace_lock); +/* Core to access the external PCI config space. Can only have one. */ +static struct ssb_pcicore *extpci_core; + + +static u32 get_cfgspace_addr(struct ssb_pcicore *pc, + unsigned int bus, unsigned int dev, + unsigned int func, unsigned int off) +{ + u32 addr = 0; + u32 tmp; + + /* We do only have one cardbus device behind the bridge. */ + if (pc->cardbusmode && (dev >= 1)) + goto out; + + if (bus == 0) { + /* Type 0 transaction */ + if (unlikely(dev >= SSB_PCI_SLOT_MAX)) + goto out; + /* Slide the window */ + tmp = SSB_PCICORE_SBTOPCI_CFG0; + tmp |= ((1 << (dev + 16)) & SSB_PCICORE_SBTOPCI1_MASK); + pcicore_write32(pc, SSB_PCICORE_SBTOPCI1, tmp); + /* Calculate the address */ + addr = SSB_PCI_CFG; + addr |= ((1 << (dev + 16)) & ~SSB_PCICORE_SBTOPCI1_MASK); + addr |= (func << 8); + addr |= (off & ~3); + } else { + /* Type 1 transaction */ + pcicore_write32(pc, SSB_PCICORE_SBTOPCI1, + SSB_PCICORE_SBTOPCI_CFG1); + /* Calculate the address */ + addr = SSB_PCI_CFG; + addr |= (bus << 16); + addr |= (dev << 11); + addr |= (func << 8); + addr |= (off & ~3); + } +out: + return addr; +} + +static int ssb_extpci_read_config(struct ssb_pcicore *pc, + unsigned int bus, unsigned int dev, + unsigned int func, unsigned int off, + void *buf, int len) +{ + int err = -EINVAL; + u32 addr, val; + void __iomem *mmio; + + SSB_WARN_ON(!pc->hostmode); + if (unlikely(len != 1 && len != 2 && len != 4)) + goto out; + addr = get_cfgspace_addr(pc, bus, dev, func, off); + if (unlikely(!addr)) + goto out; + err = -ENOMEM; + mmio = ioremap_nocache(addr, len); + if (!mmio) + goto out; + + if (mips_busprobe32(val, mmio)) { + val = 0xffffffff; + goto unmap; + } + + val = readl(mmio); + val >>= (8 * (off & 3)); + + switch (len) { + case 1: + *((u8 *)buf) = (u8)val; + break; + case 2: + *((u16 *)buf) = (u16)val; + break; + case 4: + *((u32 *)buf) = (u32)val; + break; + } + err = 0; +unmap: + iounmap(mmio); +out: + return err; +} + +static int ssb_extpci_write_config(struct ssb_pcicore *pc, + unsigned int bus, unsigned int dev, + unsigned int func, unsigned int off, + const void *buf, int len) +{ + int err = -EINVAL; + u32 addr, val = 0; + void __iomem *mmio; + + SSB_WARN_ON(!pc->hostmode); + if (unlikely(len != 1 && len != 2 && len != 4)) + goto out; + addr = get_cfgspace_addr(pc, bus, dev, func, off); + if (unlikely(!addr)) + goto out; + err = -ENOMEM; + mmio = ioremap_nocache(addr, len); + if (!mmio) + goto out; + + if (mips_busprobe32(val, mmio)) { + val = 0xffffffff; + goto unmap; + } + + switch (len) { + case 1: + val = readl(mmio); + val &= ~(0xFF << (8 * (off & 3))); + val |= *((const u8 *)buf) << (8 * (off & 3)); + break; + case 2: + val = readl(mmio); + val &= ~(0xFFFF << (8 * (off & 3))); + val |= *((const u16 *)buf) << (8 * (off & 3)); + break; + case 4: + val = *((const u32 *)buf); + break; + } + writel(val, mmio); + + err = 0; +unmap: + iounmap(mmio); +out: + return err; +} + +static int ssb_pcicore_read_config(struct pci_bus *bus, unsigned int devfn, + int reg, int size, u32 *val) +{ + unsigned long flags; + int err; + + spin_lock_irqsave(&cfgspace_lock, flags); + err = ssb_extpci_read_config(extpci_core, bus->number, PCI_SLOT(devfn), + PCI_FUNC(devfn), reg, val, size); + spin_unlock_irqrestore(&cfgspace_lock, flags); + + return err ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL; +} + +static int ssb_pcicore_write_config(struct pci_bus *bus, unsigned int devfn, + int reg, int size, u32 val) +{ + unsigned long flags; + int err; + + spin_lock_irqsave(&cfgspace_lock, flags); + err = ssb_extpci_write_config(extpci_core, bus->number, PCI_SLOT(devfn), + PCI_FUNC(devfn), reg, &val, size); + spin_unlock_irqrestore(&cfgspace_lock, flags); + + return err ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL; +} + +static struct pci_ops ssb_pcicore_pciops = { + .read = ssb_pcicore_read_config, + .write = ssb_pcicore_write_config, +}; + +static struct resource ssb_pcicore_mem_resource = { + .name = "SSB PCIcore external memory", + .start = SSB_PCI_DMA, + .end = SSB_PCI_DMA + SSB_PCI_DMA_SZ - 1, + .flags = IORESOURCE_MEM | IORESOURCE_PCI_FIXED, +}; + +static struct resource ssb_pcicore_io_resource = { + .name = "SSB PCIcore external I/O", + .start = 0x100, + .end = 0x7FF, + .flags = IORESOURCE_IO | IORESOURCE_PCI_FIXED, +}; + +static struct pci_controller ssb_pcicore_controller = { + .pci_ops = &ssb_pcicore_pciops, + .io_resource = &ssb_pcicore_io_resource, + .mem_resource = &ssb_pcicore_mem_resource, + .mem_offset = 0x24000000, +}; + +static u32 ssb_pcicore_pcibus_iobase = 0x100; +static u32 ssb_pcicore_pcibus_membase = SSB_PCI_DMA; + +/* This function is called when doing a pci_enable_device(). + * We must first check if the device is a device on the PCI-core bridge. */ +int ssb_pcicore_plat_dev_init(struct pci_dev *d) +{ + struct resource *res; + int pos, size; + u32 *base; + + if (d->bus->ops != &ssb_pcicore_pciops) { + /* This is not a device on the PCI-core bridge. */ + return -ENODEV; + } + + ssb_printk(KERN_INFO "PCI: Fixing up device %s\n", + pci_name(d)); + + /* Fix up resource bases */ + for (pos = 0; pos < 6; pos++) { + res = &d->resource[pos]; + if (res->flags & IORESOURCE_IO) + base = &ssb_pcicore_pcibus_iobase; + else + base = &ssb_pcicore_pcibus_membase; + res->flags |= IORESOURCE_PCI_FIXED; + if (res->end) { + size = res->end - res->start + 1; + if (*base & (size - 1)) + *base = (*base + size) & ~(size - 1); + res->start = *base; + res->end = res->start + size - 1; + *base += size; + pci_write_config_dword(d, PCI_BASE_ADDRESS_0 + (pos << 2), res->start); + } + /* Fix up PCI bridge BAR0 only */ + if (d->bus->number == 0 && PCI_SLOT(d->devfn) == 0) + break; + } + /* Fix up interrupt lines */ + d->irq = ssb_mips_irq(extpci_core->dev) + 2; + pci_write_config_byte(d, PCI_INTERRUPT_LINE, d->irq); + + return 0; +} + +/* Early PCI fixup for a device on the PCI-core bridge. */ +static void ssb_pcicore_fixup_pcibridge(struct pci_dev *dev) +{ + u8 lat; + + if (dev->bus->ops != &ssb_pcicore_pciops) { + /* This is not a device on the PCI-core bridge. */ + return; + } + if (dev->bus->number != 0 || PCI_SLOT(dev->devfn) != 0) + return; + + ssb_printk(KERN_INFO "PCI: Fixing up bridge %s\n", pci_name(dev)); + + /* Enable PCI bridge bus mastering and memory space */ + pci_set_master(dev); + if (pcibios_enable_device(dev, ~0) < 0) { + ssb_printk(KERN_ERR "PCI: SSB bridge enable failed\n"); + return; + } + + /* Enable PCI bridge BAR1 prefetch and burst */ + pci_write_config_dword(dev, SSB_BAR1_CONTROL, 3); + + /* Make sure our latency is high enough to handle the devices behind us */ + lat = 168; + ssb_printk(KERN_INFO "PCI: Fixing latency timer of device %s to %u\n", + pci_name(dev), lat); + pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); +} +DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, ssb_pcicore_fixup_pcibridge); + +/* PCI device IRQ mapping. */ +int ssb_pcicore_pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + if (dev->bus->ops != &ssb_pcicore_pciops) { + /* This is not a device on the PCI-core bridge. */ + return -ENODEV; + } + return ssb_mips_irq(extpci_core->dev) + 2; +} + +static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc) +{ + u32 val; + + if (WARN_ON(extpci_core)) + return; + extpci_core = pc; + + ssb_dprintk(KERN_INFO PFX "PCIcore in host mode found\n"); + /* Reset devices on the external PCI bus */ + val = SSB_PCICORE_CTL_RST_OE; + val |= SSB_PCICORE_CTL_CLK_OE; + pcicore_write32(pc, SSB_PCICORE_CTL, val); + val |= SSB_PCICORE_CTL_CLK; /* Clock on */ + pcicore_write32(pc, SSB_PCICORE_CTL, val); + udelay(150); /* Assertion time demanded by the PCI standard */ + val |= SSB_PCICORE_CTL_RST; /* Deassert RST# */ + pcicore_write32(pc, SSB_PCICORE_CTL, val); + val = SSB_PCICORE_ARBCTL_INTERN; + pcicore_write32(pc, SSB_PCICORE_ARBCTL, val); + udelay(1); /* Assertion time demanded by the PCI standard */ + + if (pc->dev->bus->has_cardbus_slot) { + ssb_dprintk(KERN_INFO PFX "CardBus slot detected\n"); + pc->cardbusmode = 1; + /* GPIO 1 resets the bridge */ + ssb_gpio_out(pc->dev->bus, 1, 1); + ssb_gpio_outen(pc->dev->bus, 1, 1); + pcicore_write16(pc, SSB_PCICORE_SPROM(0), + pcicore_read16(pc, SSB_PCICORE_SPROM(0)) + | 0x0400); + } + + /* 64MB I/O window */ + pcicore_write32(pc, SSB_PCICORE_SBTOPCI0, + SSB_PCICORE_SBTOPCI_IO); + /* 64MB config space */ + pcicore_write32(pc, SSB_PCICORE_SBTOPCI1, + SSB_PCICORE_SBTOPCI_CFG0); + /* 1GB memory window */ + pcicore_write32(pc, SSB_PCICORE_SBTOPCI2, + SSB_PCICORE_SBTOPCI_MEM | SSB_PCI_DMA); + + /* Enable PCI bridge BAR0 prefetch and burst */ + val = PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY; + ssb_extpci_write_config(pc, 0, 0, 0, PCI_COMMAND, &val, 2); + /* Clear error conditions */ + val = 0; + ssb_extpci_write_config(pc, 0, 0, 0, PCI_STATUS, &val, 2); + + /* Enable PCI interrupts */ + pcicore_write32(pc, SSB_PCICORE_IMASK, + SSB_PCICORE_IMASK_INTA); + + /* Ok, ready to run, register it to the system. + * The following needs change, if we want to port hostmode + * to non-MIPS platform. */ + ssb_pcicore_controller.io_map_base = (unsigned long)ioremap_nocache(SSB_PCI_MEM, 0x04000000); + set_io_port_base(ssb_pcicore_controller.io_map_base); + /* Give some time to the PCI controller to configure itself with the new + * values. Not waiting at this point causes crashes of the machine. */ + mdelay(10); + register_pci_controller(&ssb_pcicore_controller); +} + +static int pcicore_is_in_hostmode(struct ssb_pcicore *pc) +{ + struct ssb_bus *bus = pc->dev->bus; + u16 chipid_top; + u32 tmp; + + chipid_top = (bus->chip_id & 0xFF00); + if (chipid_top != 0x4700 && + chipid_top != 0x5300) + return 0; + + if (bus->sprom.boardflags_lo & SSB_PCICORE_BFL_NOPCI) + return 0; + + /* The 200-pin BCM4712 package does not bond out PCI. Even when + * PCI is bonded out, some boards may leave the pins floating. */ + if (bus->chip_id == 0x4712) { + if (bus->chip_package == SSB_CHIPPACK_BCM4712S) + return 0; + if (bus->chip_package == SSB_CHIPPACK_BCM4712M) + return 0; + } + if (bus->chip_id == 0x5350) + return 0; + + return !mips_busprobe32(tmp, (bus->mmio + (pc->dev->core_index * SSB_CORE_SIZE))); +} +#endif /* CONFIG_SSB_PCICORE_HOSTMODE */ + + +/************************************************** + * Generic and Clientmode operation code. + **************************************************/ + +static void ssb_pcicore_init_clientmode(struct ssb_pcicore *pc) +{ + /* Disable PCI interrupts. */ + ssb_write32(pc->dev, SSB_INTVEC, 0); +} + +void ssb_pcicore_init(struct ssb_pcicore *pc) +{ + struct ssb_device *dev = pc->dev; + struct ssb_bus *bus; + + if (!dev) + return; + bus = dev->bus; + if (!ssb_device_is_enabled(dev)) + ssb_device_enable(dev, 0); + +#ifdef CONFIG_SSB_PCICORE_HOSTMODE + pc->hostmode = pcicore_is_in_hostmode(pc); + if (pc->hostmode) + ssb_pcicore_init_hostmode(pc); +#endif /* CONFIG_SSB_PCICORE_HOSTMODE */ + if (!pc->hostmode) + ssb_pcicore_init_clientmode(pc); +} + +static u32 ssb_pcie_read(struct ssb_pcicore *pc, u32 address) +{ + pcicore_write32(pc, 0x130, address); + return pcicore_read32(pc, 0x134); +} + +static void ssb_pcie_write(struct ssb_pcicore *pc, u32 address, u32 data) +{ + pcicore_write32(pc, 0x130, address); + pcicore_write32(pc, 0x134, data); +} + +static void ssb_pcie_mdio_write(struct ssb_pcicore *pc, u8 device, + u8 address, u16 data) +{ + const u16 mdio_control = 0x128; + const u16 mdio_data = 0x12C; + u32 v; + int i; + + v = 0x80; /* Enable Preamble Sequence */ + v |= 0x2; /* MDIO Clock Divisor */ + pcicore_write32(pc, mdio_control, v); + + v = (1 << 30); /* Start of Transaction */ + v |= (1 << 28); /* Write Transaction */ + v |= (1 << 17); /* Turnaround */ + v |= (u32)device << 22; + v |= (u32)address << 18; + v |= data; + pcicore_write32(pc, mdio_data, v); + /* Wait for the device to complete the transaction */ + udelay(10); + for (i = 0; i < 10; i++) { + v = pcicore_read32(pc, mdio_control); + if (v & 0x100 /* Trans complete */) + break; + msleep(1); + } + pcicore_write32(pc, mdio_control, 0); +} + +static void ssb_broadcast_value(struct ssb_device *dev, + u32 address, u32 data) +{ + /* This is used for both, PCI and ChipCommon core, so be careful. */ + BUILD_BUG_ON(SSB_PCICORE_BCAST_ADDR != SSB_CHIPCO_BCAST_ADDR); + BUILD_BUG_ON(SSB_PCICORE_BCAST_DATA != SSB_CHIPCO_BCAST_DATA); + + ssb_write32(dev, SSB_PCICORE_BCAST_ADDR, address); + ssb_read32(dev, SSB_PCICORE_BCAST_ADDR); /* flush */ + ssb_write32(dev, SSB_PCICORE_BCAST_DATA, data); + ssb_read32(dev, SSB_PCICORE_BCAST_DATA); /* flush */ +} + +static void ssb_commit_settings(struct ssb_bus *bus) +{ + struct ssb_device *dev; + + dev = bus->chipco.dev ? bus->chipco.dev : bus->pcicore.dev; + if (WARN_ON(!dev)) + return; + /* This forces an update of the cached registers. */ + ssb_broadcast_value(dev, 0xFD8, 0); +} + +int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc, + struct ssb_device *dev) +{ + struct ssb_device *pdev = pc->dev; + struct ssb_bus *bus; + int err = 0; + u32 tmp; + + if (dev->bus->bustype != SSB_BUSTYPE_PCI) { + /* This SSB device is not on a PCI host-bus. So the IRQs are + * not routed through the PCI core. + * So we must not enable routing through the PCI core. */ + goto out; + } + + if (!pdev) + goto out; + bus = pdev->bus; + + might_sleep_if(pdev->id.coreid != SSB_DEV_PCI); + + /* Enable interrupts for this device. */ + if ((pdev->id.revision >= 6) || (pdev->id.coreid == SSB_DEV_PCIE)) { + u32 coremask; + + /* Calculate the "coremask" for the device. */ + coremask = (1 << dev->core_index); + + SSB_WARN_ON(bus->bustype != SSB_BUSTYPE_PCI); + err = pci_read_config_dword(bus->host_pci, SSB_PCI_IRQMASK, &tmp); + if (err) + goto out; + tmp |= coremask << 8; + err = pci_write_config_dword(bus->host_pci, SSB_PCI_IRQMASK, tmp); + if (err) + goto out; + } else { + u32 intvec; + + intvec = ssb_read32(pdev, SSB_INTVEC); + tmp = ssb_read32(dev, SSB_TPSFLAG); + tmp &= SSB_TPSFLAG_BPFLAG; + intvec |= (1 << tmp); + ssb_write32(pdev, SSB_INTVEC, intvec); + } + + /* Setup PCIcore operation. */ + if (pc->setup_done) + goto out; + if (pdev->id.coreid == SSB_DEV_PCI) { + tmp = pcicore_read32(pc, SSB_PCICORE_SBTOPCI2); + tmp |= SSB_PCICORE_SBTOPCI_PREF; + tmp |= SSB_PCICORE_SBTOPCI_BURST; + pcicore_write32(pc, SSB_PCICORE_SBTOPCI2, tmp); + + if (pdev->id.revision < 5) { + tmp = ssb_read32(pdev, SSB_IMCFGLO); + tmp &= ~SSB_IMCFGLO_SERTO; + tmp |= 2; + tmp &= ~SSB_IMCFGLO_REQTO; + tmp |= 3 << SSB_IMCFGLO_REQTO_SHIFT; + ssb_write32(pdev, SSB_IMCFGLO, tmp); + ssb_commit_settings(bus); + } else if (pdev->id.revision >= 11) { + tmp = pcicore_read32(pc, SSB_PCICORE_SBTOPCI2); + tmp |= SSB_PCICORE_SBTOPCI_MRM; + pcicore_write32(pc, SSB_PCICORE_SBTOPCI2, tmp); + } + } else { + WARN_ON(pdev->id.coreid != SSB_DEV_PCIE); + //TODO: Better make defines for all these magic PCIE values. + if ((pdev->id.revision == 0) || (pdev->id.revision == 1)) { + /* TLP Workaround register. */ + tmp = ssb_pcie_read(pc, 0x4); + tmp |= 0x8; + ssb_pcie_write(pc, 0x4, tmp); + } + if (pdev->id.revision == 0) { + const u8 serdes_rx_device = 0x1F; + + ssb_pcie_mdio_write(pc, serdes_rx_device, + 2 /* Timer */, 0x8128); + ssb_pcie_mdio_write(pc, serdes_rx_device, + 6 /* CDR */, 0x0100); + ssb_pcie_mdio_write(pc, serdes_rx_device, + 7 /* CDR BW */, 0x1466); + } else if (pdev->id.revision == 1) { + /* DLLP Link Control register. */ + tmp = ssb_pcie_read(pc, 0x100); + tmp |= 0x40; + ssb_pcie_write(pc, 0x100, tmp); + } + } + pc->setup_done = 1; +out: + return err; +} +EXPORT_SYMBOL(ssb_pcicore_dev_irqvecs_enable); diff --git a/drivers/ssb/embedded.c b/drivers/ssb/embedded.c new file mode 100644 index 0000000..a0e0d24 --- /dev/null +++ b/drivers/ssb/embedded.c @@ -0,0 +1,223 @@ +/* + * Sonics Silicon Backplane + * Embedded systems support code + * + * Copyright 2005-2008, Broadcom Corporation + * Copyright 2006-2008, Michael Buesch + * + * Licensed under the GNU/GPL. See COPYING for details. + */ + +#include +#include +#include +#include +#include + +#include "ssb_private.h" + + +int ssb_watchdog_timer_set(struct ssb_bus *bus, u32 ticks) +{ + if (ssb_chipco_available(&bus->chipco)) { + ssb_chipco_watchdog_timer_set(&bus->chipco, ticks); + return 0; + } + if (ssb_extif_available(&bus->extif)) { + ssb_extif_watchdog_timer_set(&bus->extif, ticks); + return 0; + } + return -ENODEV; +} +EXPORT_SYMBOL(ssb_watchdog_timer_set); + +u32 ssb_gpio_in(struct ssb_bus *bus, u32 mask) +{ + unsigned long flags; + u32 res = 0; + + spin_lock_irqsave(&bus->gpio_lock, flags); + if (ssb_chipco_available(&bus->chipco)) + res = ssb_chipco_gpio_in(&bus->chipco, mask); + else if (ssb_extif_available(&bus->extif)) + res = ssb_extif_gpio_in(&bus->extif, mask); + else + SSB_WARN_ON(1); + spin_unlock_irqrestore(&bus->gpio_lock, flags); + + return res; +} +EXPORT_SYMBOL(ssb_gpio_in); + +u32 ssb_gpio_out(struct ssb_bus *bus, u32 mask, u32 value) +{ + unsigned long flags; + u32 res = 0; + + spin_lock_irqsave(&bus->gpio_lock, flags); + if (ssb_chipco_available(&bus->chipco)) + res = ssb_chipco_gpio_out(&bus->chipco, mask, value); + else if (ssb_extif_available(&bus->extif)) + res = ssb_extif_gpio_out(&bus->extif, mask, value); + else + SSB_WARN_ON(1); + spin_unlock_irqrestore(&bus->gpio_lock, flags); + + return res; +} +EXPORT_SYMBOL(ssb_gpio_out); + +u32 ssb_gpio_outen(struct ssb_bus *bus, u32 mask, u32 value) +{ + unsigned long flags; + u32 res = 0; + + spin_lock_irqsave(&bus->gpio_lock, flags); + if (ssb_chipco_available(&bus->chipco)) + res = ssb_chipco_gpio_outen(&bus->chipco, mask, value); + else if (ssb_extif_available(&bus->extif)) + res = ssb_extif_gpio_outen(&bus->extif, mask, value); + else + SSB_WARN_ON(1); + spin_unlock_irqrestore(&bus->gpio_lock, flags); + + return res; +} +EXPORT_SYMBOL(ssb_gpio_outen); + +u32 ssb_gpio_control(struct ssb_bus *bus, u32 mask, u32 value) +{ + unsigned long flags; + u32 res = 0; + + spin_lock_irqsave(&bus->gpio_lock, flags); + if (ssb_chipco_available(&bus->chipco)) + res = ssb_chipco_gpio_control(&bus->chipco, mask, value); + spin_unlock_irqrestore(&bus->gpio_lock, flags); + + return res; +} +EXPORT_SYMBOL(ssb_gpio_control); + +u32 ssb_gpio_intmask(struct ssb_bus *bus, u32 mask, u32 value) +{ + unsigned long flags; + u32 res = 0; + + spin_lock_irqsave(&bus->gpio_lock, flags); + if (ssb_chipco_available(&bus->chipco)) + res = ssb_chipco_gpio_intmask(&bus->chipco, mask, value); + else if (ssb_extif_available(&bus->extif)) + res = ssb_extif_gpio_intmask(&bus->extif, mask, value); + else + SSB_WARN_ON(1); + spin_unlock_irqrestore(&bus->gpio_lock, flags); + + return res; +} +EXPORT_SYMBOL(ssb_gpio_intmask); + +u32 ssb_gpio_polarity(struct ssb_bus *bus, u32 mask, u32 value) +{ + unsigned long flags; + u32 res = 0; + + spin_lock_irqsave(&bus->gpio_lock, flags); + if (ssb_chipco_available(&bus->chipco)) + res = ssb_chipco_gpio_polarity(&bus->chipco, mask, value); + else if (ssb_extif_available(&bus->extif)) + res = ssb_extif_gpio_polarity(&bus->extif, mask, value); + else + SSB_WARN_ON(1); + spin_unlock_irqrestore(&bus->gpio_lock, flags); + + return res; +} +EXPORT_SYMBOL(ssb_gpio_polarity); + +#ifdef CONFIG_SSB_DRIVER_GIGE +static int gige_pci_init_callback(struct ssb_bus *bus, unsigned long data) +{ + struct pci_dev *pdev = (struct pci_dev *)data; + struct ssb_device *dev; + unsigned int i; + int res; + + for (i = 0; i < bus->nr_devices; i++) { + dev = &(bus->devices[i]); + if (dev->id.coreid != SSB_DEV_ETHERNET_GBIT) + continue; + if (!dev->dev || + !dev->dev->driver || + !device_is_registered(dev->dev)) + continue; + res = ssb_gige_pcibios_plat_dev_init(dev, pdev); + if (res >= 0) + return res; + } + + return -ENODEV; +} +#endif /* CONFIG_SSB_DRIVER_GIGE */ + +int ssb_pcibios_plat_dev_init(struct pci_dev *dev) +{ + int err; + + err = ssb_pcicore_plat_dev_init(dev); + if (!err) + return 0; +#ifdef CONFIG_SSB_DRIVER_GIGE + err = ssb_for_each_bus_call((unsigned long)dev, gige_pci_init_callback); + if (err >= 0) + return err; +#endif + /* This is not a PCI device on any SSB device. */ + + return -ENODEV; +} + +#ifdef CONFIG_SSB_DRIVER_GIGE +static int gige_map_irq_callback(struct ssb_bus *bus, unsigned long data) +{ + const struct pci_dev *pdev = (const struct pci_dev *)data; + struct ssb_device *dev; + unsigned int i; + int res; + + for (i = 0; i < bus->nr_devices; i++) { + dev = &(bus->devices[i]); + if (dev->id.coreid != SSB_DEV_ETHERNET_GBIT) + continue; + if (!dev->dev || + !dev->dev->driver || + !device_is_registered(dev->dev)) + continue; + res = ssb_gige_map_irq(dev, pdev); + if (res >= 0) + return res; + } + + return -ENODEV; +} +#endif /* CONFIG_SSB_DRIVER_GIGE */ + +int ssb_pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + int res; + + /* Check if this PCI device is a device on a SSB bus or device + * and return the IRQ number for it. */ + + res = ssb_pcicore_pcibios_map_irq(dev, slot, pin); + if (res >= 0) + return res; +#ifdef CONFIG_SSB_DRIVER_GIGE + res = ssb_for_each_bus_call((unsigned long)dev, gige_map_irq_callback); + if (res >= 0) + return res; +#endif + /* This is not a PCI device on any SSB device. */ + + return -ENODEV; +} diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c new file mode 100644 index 0000000..03dfd27 --- /dev/null +++ b/drivers/ssb/main.c @@ -0,0 +1,1453 @@ +/* + * Sonics Silicon Backplane + * Subsystem core + * + * Copyright 2005, Broadcom Corporation + * Copyright 2006, 2007, Michael Buesch + * + * Licensed under the GNU/GPL. See COPYING for details. + */ + +#include "ssb_private.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + + +MODULE_DESCRIPTION("Sonics Silicon Backplane driver"); +MODULE_LICENSE("GPL"); + + +/* Temporary list of yet-to-be-attached buses */ +static LIST_HEAD(attach_queue); +/* List if running buses */ +static LIST_HEAD(buses); +/* Software ID counter */ +static unsigned int next_busnumber; +/* buses_mutes locks the two buslists and the next_busnumber. + * Don't lock this directly, but use ssb_buses_[un]lock() below. */ +static DEFINE_MUTEX(buses_mutex); + +/* There are differences in the codeflow, if the bus is + * initialized from early boot, as various needed services + * are not available early. This is a mechanism to delay + * these initializations to after early boot has finished. + * It's also used to avoid mutex locking, as that's not + * available and needed early. */ +static bool ssb_is_early_boot = 1; + +static void ssb_buses_lock(void); +static void ssb_buses_unlock(void); + + +#ifdef CONFIG_SSB_PCIHOST +struct ssb_bus *ssb_pci_dev_to_bus(struct pci_dev *pdev) +{ + struct ssb_bus *bus; + + ssb_buses_lock(); + list_for_each_entry(bus, &buses, list) { + if (bus->bustype == SSB_BUSTYPE_PCI && + bus->host_pci == pdev) + goto found; + } + bus = NULL; +found: + ssb_buses_unlock(); + + return bus; +} +#endif /* CONFIG_SSB_PCIHOST */ + +#ifdef CONFIG_SSB_PCMCIAHOST +struct ssb_bus *ssb_pcmcia_dev_to_bus(struct pcmcia_device *pdev) +{ + struct ssb_bus *bus; + + ssb_buses_lock(); + list_for_each_entry(bus, &buses, list) { + if (bus->bustype == SSB_BUSTYPE_PCMCIA && + bus->host_pcmcia == pdev) + goto found; + } + bus = NULL; +found: + ssb_buses_unlock(); + + return bus; +} +#endif /* CONFIG_SSB_PCMCIAHOST */ + +#ifdef CONFIG_SSB_SDIOHOST +struct ssb_bus *ssb_sdio_func_to_bus(struct sdio_func *func) +{ + struct ssb_bus *bus; + + ssb_buses_lock(); + list_for_each_entry(bus, &buses, list) { + if (bus->bustype == SSB_BUSTYPE_SDIO && + bus->host_sdio == func) + goto found; + } + bus = NULL; +found: + ssb_buses_unlock(); + + return bus; +} +#endif /* CONFIG_SSB_SDIOHOST */ + +int ssb_for_each_bus_call(unsigned long data, + int (*func)(struct ssb_bus *bus, unsigned long data)) +{ + struct ssb_bus *bus; + int res; + + ssb_buses_lock(); + list_for_each_entry(bus, &buses, list) { + res = func(bus, data); + if (res >= 0) { + ssb_buses_unlock(); + return res; + } + } + ssb_buses_unlock(); + + return -ENODEV; +} + +static struct ssb_device *ssb_device_get(struct ssb_device *dev) +{ + if (dev) + get_device(dev->dev); + return dev; +} + +static void ssb_device_put(struct ssb_device *dev) +{ + if (dev) + put_device(dev->dev); +} + +static inline struct ssb_driver *ssb_driver_get(struct ssb_driver *drv) +{ + if (drv) + get_driver(&drv->drv); + return drv; +} + +static inline void ssb_driver_put(struct ssb_driver *drv) +{ + if (drv) + put_driver(&drv->drv); +} + +static int ssb_device_resume(struct device *dev) +{ + struct ssb_device *ssb_dev = dev_to_ssb_dev(dev); + struct ssb_driver *ssb_drv; + int err = 0; + + if (dev->driver) { + ssb_drv = drv_to_ssb_drv(dev->driver); + if (ssb_drv && ssb_drv->resume) + err = ssb_drv->resume(ssb_dev); + if (err) + goto out; + } +out: + return err; +} + +static int ssb_device_suspend(struct device *dev, pm_message_t state) +{ + struct ssb_device *ssb_dev = dev_to_ssb_dev(dev); + struct ssb_driver *ssb_drv; + int err = 0; + + if (dev->driver) { + ssb_drv = drv_to_ssb_drv(dev->driver); + if (ssb_drv && ssb_drv->suspend) + err = ssb_drv->suspend(ssb_dev, state); + if (err) + goto out; + } +out: + return err; +} + +int ssb_bus_resume(struct ssb_bus *bus) +{ + int err; + + /* Reset HW state information in memory, so that HW is + * completely reinitialized. */ + bus->mapped_device = NULL; +#ifdef CONFIG_SSB_DRIVER_PCICORE + bus->pcicore.setup_done = 0; +#endif + + err = ssb_bus_powerup(bus, 0); + if (err) + return err; + err = ssb_pcmcia_hardware_setup(bus); + if (err) { + ssb_bus_may_powerdown(bus); + return err; + } + ssb_chipco_resume(&bus->chipco); + ssb_bus_may_powerdown(bus); + + return 0; +} +EXPORT_SYMBOL(ssb_bus_resume); + +int ssb_bus_suspend(struct ssb_bus *bus) +{ + ssb_chipco_suspend(&bus->chipco); + ssb_pci_xtal(bus, SSB_GPIO_XTAL | SSB_GPIO_PLL, 0); + + return 0; +} +EXPORT_SYMBOL(ssb_bus_suspend); + +#ifdef CONFIG_SSB_SPROM +/** ssb_devices_freeze - Freeze all devices on the bus. + * + * After freezing no device driver will be handling a device + * on this bus anymore. ssb_devices_thaw() must be called after + * a successful freeze to reactivate the devices. + * + * @bus: The bus. + * @ctx: Context structure. Pass this to ssb_devices_thaw(). + */ +int ssb_devices_freeze(struct ssb_bus *bus, struct ssb_freeze_context *ctx) +{ + struct ssb_device *sdev; + struct ssb_driver *sdrv; + unsigned int i; + + memset(ctx, 0, sizeof(*ctx)); + ctx->bus = bus; + SSB_WARN_ON(bus->nr_devices > ARRAY_SIZE(ctx->device_frozen)); + + for (i = 0; i < bus->nr_devices; i++) { + sdev = ssb_device_get(&bus->devices[i]); + + if (!sdev->dev || !sdev->dev->driver || + !device_is_registered(sdev->dev)) { + ssb_device_put(sdev); + continue; + } + sdrv = ssb_driver_get(drv_to_ssb_drv(sdev->dev->driver)); + if (!sdrv || SSB_WARN_ON(!sdrv->remove)) { + ssb_device_put(sdev); + continue; + } + sdrv->remove(sdev); + ctx->device_frozen[i] = 1; + } + + return 0; +} + +/** ssb_devices_thaw - Unfreeze all devices on the bus. + * + * This will re-attach the device drivers and re-init the devices. + * + * @ctx: The context structure from ssb_devices_freeze() + */ +int ssb_devices_thaw(struct ssb_freeze_context *ctx) +{ + struct ssb_bus *bus = ctx->bus; + struct ssb_device *sdev; + struct ssb_driver *sdrv; + unsigned int i; + int err, result = 0; + + for (i = 0; i < bus->nr_devices; i++) { + if (!ctx->device_frozen[i]) + continue; + sdev = &bus->devices[i]; + + if (SSB_WARN_ON(!sdev->dev || !sdev->dev->driver)) + continue; + sdrv = drv_to_ssb_drv(sdev->dev->driver); + if (SSB_WARN_ON(!sdrv || !sdrv->probe)) + continue; + + err = sdrv->probe(sdev, &sdev->id); + if (err) { + ssb_printk(KERN_ERR PFX "Failed to thaw device %s\n", + dev_name(sdev->dev)); + result = err; + } + ssb_driver_put(sdrv); + ssb_device_put(sdev); + } + + return result; +} +#endif /* CONFIG_SSB_SPROM */ + +static void ssb_device_shutdown(struct device *dev) +{ + struct ssb_device *ssb_dev = dev_to_ssb_dev(dev); + struct ssb_driver *ssb_drv; + + if (!dev->driver) + return; + ssb_drv = drv_to_ssb_drv(dev->driver); + if (ssb_drv && ssb_drv->shutdown) + ssb_drv->shutdown(ssb_dev); +} + +static int ssb_device_remove(struct device *dev) +{ + struct ssb_device *ssb_dev = dev_to_ssb_dev(dev); + struct ssb_driver *ssb_drv = drv_to_ssb_drv(dev->driver); + + if (ssb_drv && ssb_drv->remove) + ssb_drv->remove(ssb_dev); + ssb_device_put(ssb_dev); + + return 0; +} + +static int ssb_device_probe(struct device *dev) +{ + struct ssb_device *ssb_dev = dev_to_ssb_dev(dev); + struct ssb_driver *ssb_drv = drv_to_ssb_drv(dev->driver); + int err = 0; + + ssb_device_get(ssb_dev); + if (ssb_drv && ssb_drv->probe) + err = ssb_drv->probe(ssb_dev, &ssb_dev->id); + if (err) + ssb_device_put(ssb_dev); + + return err; +} + +static int ssb_match_devid(const struct ssb_device_id *tabid, + const struct ssb_device_id *devid) +{ + if ((tabid->vendor != devid->vendor) && + tabid->vendor != SSB_ANY_VENDOR) + return 0; + if ((tabid->coreid != devid->coreid) && + tabid->coreid != SSB_ANY_ID) + return 0; + if ((tabid->revision != devid->revision) && + tabid->revision != SSB_ANY_REV) + return 0; + return 1; +} + +static int ssb_bus_match(struct device *dev, struct device_driver *drv) +{ + struct ssb_device *ssb_dev = dev_to_ssb_dev(dev); + struct ssb_driver *ssb_drv = drv_to_ssb_drv(drv); + const struct ssb_device_id *id; + + for (id = ssb_drv->id_table; + id->vendor || id->coreid || id->revision; + id++) { + if (ssb_match_devid(id, &ssb_dev->id)) + return 1; /* found */ + } + + return 0; +} + +static int ssb_device_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct ssb_device *ssb_dev = dev_to_ssb_dev(dev); + + if (!dev) + return -ENODEV; + + return add_uevent_var(env, + "MODALIAS=ssb:v%04Xid%04Xrev%02X", + ssb_dev->id.vendor, ssb_dev->id.coreid, + ssb_dev->id.revision); +} + +static struct bus_type ssb_bustype = { + .name = "ssb", + .match = ssb_bus_match, + .probe = ssb_device_probe, + .remove = ssb_device_remove, + .shutdown = ssb_device_shutdown, + .suspend = ssb_device_suspend, + .resume = ssb_device_resume, + .uevent = ssb_device_uevent, +}; + +static void ssb_buses_lock(void) +{ + /* See the comment at the ssb_is_early_boot definition */ + if (!ssb_is_early_boot) + mutex_lock(&buses_mutex); +} + +static void ssb_buses_unlock(void) +{ + /* See the comment at the ssb_is_early_boot definition */ + if (!ssb_is_early_boot) + mutex_unlock(&buses_mutex); +} + +static void ssb_devices_unregister(struct ssb_bus *bus) +{ + struct ssb_device *sdev; + int i; + + for (i = bus->nr_devices - 1; i >= 0; i--) { + sdev = &(bus->devices[i]); + if (sdev->dev) + device_unregister(sdev->dev); + } +} + +void ssb_bus_unregister(struct ssb_bus *bus) +{ + ssb_buses_lock(); + ssb_devices_unregister(bus); + list_del(&bus->list); + ssb_buses_unlock(); + + ssb_pcmcia_exit(bus); + ssb_pci_exit(bus); + ssb_iounmap(bus); +} +EXPORT_SYMBOL(ssb_bus_unregister); + +static void ssb_release_dev(struct device *dev) +{ + struct __ssb_dev_wrapper *devwrap; + + devwrap = container_of(dev, struct __ssb_dev_wrapper, dev); + kfree(devwrap); +} + +static int ssb_devices_register(struct ssb_bus *bus) +{ + struct ssb_device *sdev; + struct device *dev; + struct __ssb_dev_wrapper *devwrap; + int i, err = 0; + int dev_idx = 0; + + for (i = 0; i < bus->nr_devices; i++) { + sdev = &(bus->devices[i]); + + /* We don't register SSB-system devices to the kernel, + * as the drivers for them are built into SSB. */ + switch (sdev->id.coreid) { + case SSB_DEV_CHIPCOMMON: + case SSB_DEV_PCI: + case SSB_DEV_PCIE: + case SSB_DEV_PCMCIA: + case SSB_DEV_MIPS: + case SSB_DEV_MIPS_3302: + case SSB_DEV_EXTIF: + continue; + } + + devwrap = kzalloc(sizeof(*devwrap), GFP_KERNEL); + if (!devwrap) { + ssb_printk(KERN_ERR PFX + "Could not allocate device\n"); + err = -ENOMEM; + goto error; + } + dev = &devwrap->dev; + devwrap->sdev = sdev; + + dev->release = ssb_release_dev; + dev->bus = &ssb_bustype; + dev_set_name(dev, "ssb%u:%d", bus->busnumber, dev_idx); + + switch (bus->bustype) { + case SSB_BUSTYPE_PCI: +#ifdef CONFIG_SSB_PCIHOST + sdev->irq = bus->host_pci->irq; + dev->parent = &bus->host_pci->dev; +#endif + break; + case SSB_BUSTYPE_PCMCIA: +#ifdef CONFIG_SSB_PCMCIAHOST + sdev->irq = bus->host_pcmcia->irq.AssignedIRQ; + dev->parent = &bus->host_pcmcia->dev; +#endif + break; + case SSB_BUSTYPE_SDIO: +#ifdef CONFIG_SSB_SDIOHOST + dev->parent = &bus->host_sdio->dev; +#endif + break; + case SSB_BUSTYPE_SSB: + dev->dma_mask = &dev->coherent_dma_mask; + break; + } + + sdev->dev = dev; + err = device_register(dev); + if (err) { + ssb_printk(KERN_ERR PFX + "Could not register %s\n", + dev_name(dev)); + /* Set dev to NULL to not unregister + * dev on error unwinding. */ + sdev->dev = NULL; + kfree(devwrap); + goto error; + } + dev_idx++; + } + + return 0; +error: + /* Unwind the already registered devices. */ + ssb_devices_unregister(bus); + return err; +} + +/* Needs ssb_buses_lock() */ +static int ssb_attach_queued_buses(void) +{ + struct ssb_bus *bus, *n; + int err = 0; + int drop_them_all = 0; + + list_for_each_entry_safe(bus, n, &attach_queue, list) { + if (drop_them_all) { + list_del(&bus->list); + continue; + } + /* Can't init the PCIcore in ssb_bus_register(), as that + * is too early in boot for embedded systems + * (no udelay() available). So do it here in attach stage. + */ + err = ssb_bus_powerup(bus, 0); + if (err) + goto error; + ssb_pcicore_init(&bus->pcicore); + ssb_bus_may_powerdown(bus); + + err = ssb_devices_register(bus); +error: + if (err) { + drop_them_all = 1; + list_del(&bus->list); + continue; + } + list_move_tail(&bus->list, &buses); + } + + return err; +} + +static u8 ssb_ssb_read8(struct ssb_device *dev, u16 offset) +{ + struct ssb_bus *bus = dev->bus; + + offset += dev->core_index * SSB_CORE_SIZE; + return readb(bus->mmio + offset); +} + +static u16 ssb_ssb_read16(struct ssb_device *dev, u16 offset) +{ + struct ssb_bus *bus = dev->bus; + + offset += dev->core_index * SSB_CORE_SIZE; + return readw(bus->mmio + offset); +} + +static u32 ssb_ssb_read32(struct ssb_device *dev, u16 offset) +{ + struct ssb_bus *bus = dev->bus; + + offset += dev->core_index * SSB_CORE_SIZE; + return readl(bus->mmio + offset); +} + +#ifdef CONFIG_SSB_BLOCKIO +static void ssb_ssb_block_read(struct ssb_device *dev, void *buffer, + size_t count, u16 offset, u8 reg_width) +{ + struct ssb_bus *bus = dev->bus; + void __iomem *addr; + + offset += dev->core_index * SSB_CORE_SIZE; + addr = bus->mmio + offset; + + switch (reg_width) { + case sizeof(u8): { + u8 *buf = buffer; + + while (count) { + *buf = __raw_readb(addr); + buf++; + count--; + } + break; + } + case sizeof(u16): { + __le16 *buf = buffer; + + SSB_WARN_ON(count & 1); + while (count) { + *buf = (__force __le16)__raw_readw(addr); + buf++; + count -= 2; + } + break; + } + case sizeof(u32): { + __le32 *buf = buffer; + + SSB_WARN_ON(count & 3); + while (count) { + *buf = (__force __le32)__raw_readl(addr); + buf++; + count -= 4; + } + break; + } + default: + SSB_WARN_ON(1); + } +} +#endif /* CONFIG_SSB_BLOCKIO */ + +static void ssb_ssb_write8(struct ssb_device *dev, u16 offset, u8 value) +{ + struct ssb_bus *bus = dev->bus; + + offset += dev->core_index * SSB_CORE_SIZE; + writeb(value, bus->mmio + offset); +} + +static void ssb_ssb_write16(struct ssb_device *dev, u16 offset, u16 value) +{ + struct ssb_bus *bus = dev->bus; + + offset += dev->core_index * SSB_CORE_SIZE; + writew(value, bus->mmio + offset); +} + +static void ssb_ssb_write32(struct ssb_device *dev, u16 offset, u32 value) +{ + struct ssb_bus *bus = dev->bus; + + offset += dev->core_index * SSB_CORE_SIZE; + writel(value, bus->mmio + offset); +} + +#ifdef CONFIG_SSB_BLOCKIO +static void ssb_ssb_block_write(struct ssb_device *dev, const void *buffer, + size_t count, u16 offset, u8 reg_width) +{ + struct ssb_bus *bus = dev->bus; + void __iomem *addr; + + offset += dev->core_index * SSB_CORE_SIZE; + addr = bus->mmio + offset; + + switch (reg_width) { + case sizeof(u8): { + const u8 *buf = buffer; + + while (count) { + __raw_writeb(*buf, addr); + buf++; + count--; + } + break; + } + case sizeof(u16): { + const __le16 *buf = buffer; + + SSB_WARN_ON(count & 1); + while (count) { + __raw_writew((__force u16)(*buf), addr); + buf++; + count -= 2; + } + break; + } + case sizeof(u32): { + const __le32 *buf = buffer; + + SSB_WARN_ON(count & 3); + while (count) { + __raw_writel((__force u32)(*buf), addr); + buf++; + count -= 4; + } + break; + } + default: + SSB_WARN_ON(1); + } +} +#endif /* CONFIG_SSB_BLOCKIO */ + +/* Ops for the plain SSB bus without a host-device (no PCI or PCMCIA). */ +static const struct ssb_bus_ops ssb_ssb_ops = { + .read8 = ssb_ssb_read8, + .read16 = ssb_ssb_read16, + .read32 = ssb_ssb_read32, + .write8 = ssb_ssb_write8, + .write16 = ssb_ssb_write16, + .write32 = ssb_ssb_write32, +#ifdef CONFIG_SSB_BLOCKIO + .block_read = ssb_ssb_block_read, + .block_write = ssb_ssb_block_write, +#endif +}; + +static int ssb_fetch_invariants(struct ssb_bus *bus, + ssb_invariants_func_t get_invariants) +{ + struct ssb_init_invariants iv; + int err; + + memset(&iv, 0, sizeof(iv)); + err = get_invariants(bus, &iv); + if (err) + goto out; + memcpy(&bus->boardinfo, &iv.boardinfo, sizeof(iv.boardinfo)); + memcpy(&bus->sprom, &iv.sprom, sizeof(iv.sprom)); + bus->has_cardbus_slot = iv.has_cardbus_slot; +out: + return err; +} + +static int ssb_bus_register(struct ssb_bus *bus, + ssb_invariants_func_t get_invariants, + unsigned long baseaddr) +{ + int err; + + spin_lock_init(&bus->bar_lock); + INIT_LIST_HEAD(&bus->list); +#ifdef CONFIG_SSB_EMBEDDED + spin_lock_init(&bus->gpio_lock); +#endif + + /* Powerup the bus */ + err = ssb_pci_xtal(bus, SSB_GPIO_XTAL | SSB_GPIO_PLL, 1); + if (err) + goto out; + + /* Init SDIO-host device (if any), before the scan */ + err = ssb_sdio_init(bus); + if (err) + goto err_disable_xtal; + + ssb_buses_lock(); + bus->busnumber = next_busnumber; + /* Scan for devices (cores) */ + err = ssb_bus_scan(bus, baseaddr); + if (err) + goto err_sdio_exit; + + /* Init PCI-host device (if any) */ + err = ssb_pci_init(bus); + if (err) + goto err_unmap; + /* Init PCMCIA-host device (if any) */ + err = ssb_pcmcia_init(bus); + if (err) + goto err_pci_exit; + + /* Initialize basic system devices (if available) */ + err = ssb_bus_powerup(bus, 0); + if (err) + goto err_pcmcia_exit; + ssb_chipcommon_init(&bus->chipco); + ssb_mipscore_init(&bus->mipscore); + err = ssb_fetch_invariants(bus, get_invariants); + if (err) { + ssb_bus_may_powerdown(bus); + goto err_pcmcia_exit; + } + ssb_bus_may_powerdown(bus); + + /* Queue it for attach. + * See the comment at the ssb_is_early_boot definition. */ + list_add_tail(&bus->list, &attach_queue); + if (!ssb_is_early_boot) { + /* This is not early boot, so we must attach the bus now */ + err = ssb_attach_queued_buses(); + if (err) + goto err_dequeue; + } + next_busnumber++; + ssb_buses_unlock(); + +out: + return err; + +err_dequeue: + list_del(&bus->list); +err_pcmcia_exit: + ssb_pcmcia_exit(bus); +err_pci_exit: + ssb_pci_exit(bus); +err_unmap: + ssb_iounmap(bus); +err_sdio_exit: + ssb_sdio_exit(bus); +err_disable_xtal: + ssb_buses_unlock(); + ssb_pci_xtal(bus, SSB_GPIO_XTAL | SSB_GPIO_PLL, 0); + return err; +} + +#ifdef CONFIG_SSB_PCIHOST +int ssb_bus_pcibus_register(struct ssb_bus *bus, + struct pci_dev *host_pci) +{ + int err; + + bus->bustype = SSB_BUSTYPE_PCI; + bus->host_pci = host_pci; + bus->ops = &ssb_pci_ops; + + err = ssb_bus_register(bus, ssb_pci_get_invariants, 0); + if (!err) { + ssb_printk(KERN_INFO PFX "Sonics Silicon Backplane found on " + "PCI device %s\n", dev_name(&host_pci->dev)); + } + + return err; +} +EXPORT_SYMBOL(ssb_bus_pcibus_register); +#endif /* CONFIG_SSB_PCIHOST */ + +#ifdef CONFIG_SSB_PCMCIAHOST +int ssb_bus_pcmciabus_register(struct ssb_bus *bus, + struct pcmcia_device *pcmcia_dev, + unsigned long baseaddr) +{ + int err; + + bus->bustype = SSB_BUSTYPE_PCMCIA; + bus->host_pcmcia = pcmcia_dev; + bus->ops = &ssb_pcmcia_ops; + + err = ssb_bus_register(bus, ssb_pcmcia_get_invariants, baseaddr); + if (!err) { + ssb_printk(KERN_INFO PFX "Sonics Silicon Backplane found on " + "PCMCIA device %s\n", pcmcia_dev->devname); + } + + return err; +} +EXPORT_SYMBOL(ssb_bus_pcmciabus_register); +#endif /* CONFIG_SSB_PCMCIAHOST */ + +#ifdef CONFIG_SSB_SDIOHOST +int ssb_bus_sdiobus_register(struct ssb_bus *bus, struct sdio_func *func, + unsigned int quirks) +{ + int err; + + bus->bustype = SSB_BUSTYPE_SDIO; + bus->host_sdio = func; + bus->ops = &ssb_sdio_ops; + bus->quirks = quirks; + + err = ssb_bus_register(bus, ssb_sdio_get_invariants, ~0); + if (!err) { + ssb_printk(KERN_INFO PFX "Sonics Silicon Backplane found on " + "SDIO device %s\n", sdio_func_id(func)); + } + + return err; +} +EXPORT_SYMBOL(ssb_bus_sdiobus_register); +#endif /* CONFIG_SSB_PCMCIAHOST */ + +int ssb_bus_ssbbus_register(struct ssb_bus *bus, + unsigned long baseaddr, + ssb_invariants_func_t get_invariants) +{ + int err; + + bus->bustype = SSB_BUSTYPE_SSB; + bus->ops = &ssb_ssb_ops; + + err = ssb_bus_register(bus, get_invariants, baseaddr); + if (!err) { + ssb_printk(KERN_INFO PFX "Sonics Silicon Backplane found at " + "address 0x%08lX\n", baseaddr); + } + + return err; +} + +int __ssb_driver_register(struct ssb_driver *drv, struct module *owner) +{ + drv->drv.name = drv->name; + drv->drv.bus = &ssb_bustype; + drv->drv.owner = owner; + + return driver_register(&drv->drv); +} +EXPORT_SYMBOL(__ssb_driver_register); + +void ssb_driver_unregister(struct ssb_driver *drv) +{ + driver_unregister(&drv->drv); +} +EXPORT_SYMBOL(ssb_driver_unregister); + +void ssb_set_devtypedata(struct ssb_device *dev, void *data) +{ + struct ssb_bus *bus = dev->bus; + struct ssb_device *ent; + int i; + + for (i = 0; i < bus->nr_devices; i++) { + ent = &(bus->devices[i]); + if (ent->id.vendor != dev->id.vendor) + continue; + if (ent->id.coreid != dev->id.coreid) + continue; + + ent->devtypedata = data; + } +} +EXPORT_SYMBOL(ssb_set_devtypedata); + +static u32 clkfactor_f6_resolve(u32 v) +{ + /* map the magic values */ + switch (v) { + case SSB_CHIPCO_CLK_F6_2: + return 2; + case SSB_CHIPCO_CLK_F6_3: + return 3; + case SSB_CHIPCO_CLK_F6_4: + return 4; + case SSB_CHIPCO_CLK_F6_5: + return 5; + case SSB_CHIPCO_CLK_F6_6: + return 6; + case SSB_CHIPCO_CLK_F6_7: + return 7; + } + return 0; +} + +/* Calculate the speed the backplane would run at a given set of clockcontrol values */ +u32 ssb_calc_clock_rate(u32 plltype, u32 n, u32 m) +{ + u32 n1, n2, clock, m1, m2, m3, mc; + + n1 = (n & SSB_CHIPCO_CLK_N1); + n2 = ((n & SSB_CHIPCO_CLK_N2) >> SSB_CHIPCO_CLK_N2_SHIFT); + + switch (plltype) { + case SSB_PLLTYPE_6: /* 100/200 or 120/240 only */ + if (m & SSB_CHIPCO_CLK_T6_MMASK) + return SSB_CHIPCO_CLK_T6_M0; + return SSB_CHIPCO_CLK_T6_M1; + case SSB_PLLTYPE_1: /* 48Mhz base, 3 dividers */ + case SSB_PLLTYPE_3: /* 25Mhz, 2 dividers */ + case SSB_PLLTYPE_4: /* 48Mhz, 4 dividers */ + case SSB_PLLTYPE_7: /* 25Mhz, 4 dividers */ + n1 = clkfactor_f6_resolve(n1); + n2 += SSB_CHIPCO_CLK_F5_BIAS; + break; + case SSB_PLLTYPE_2: /* 48Mhz, 4 dividers */ + n1 += SSB_CHIPCO_CLK_T2_BIAS; + n2 += SSB_CHIPCO_CLK_T2_BIAS; + SSB_WARN_ON(!((n1 >= 2) && (n1 <= 7))); + SSB_WARN_ON(!((n2 >= 5) && (n2 <= 23))); + break; + case SSB_PLLTYPE_5: /* 25Mhz, 4 dividers */ + return 100000000; + default: + SSB_WARN_ON(1); + } + + switch (plltype) { + case SSB_PLLTYPE_3: /* 25Mhz, 2 dividers */ + case SSB_PLLTYPE_7: /* 25Mhz, 4 dividers */ + clock = SSB_CHIPCO_CLK_BASE2 * n1 * n2; + break; + default: + clock = SSB_CHIPCO_CLK_BASE1 * n1 * n2; + } + if (!clock) + return 0; + + m1 = (m & SSB_CHIPCO_CLK_M1); + m2 = ((m & SSB_CHIPCO_CLK_M2) >> SSB_CHIPCO_CLK_M2_SHIFT); + m3 = ((m & SSB_CHIPCO_CLK_M3) >> SSB_CHIPCO_CLK_M3_SHIFT); + mc = ((m & SSB_CHIPCO_CLK_MC) >> SSB_CHIPCO_CLK_MC_SHIFT); + + switch (plltype) { + case SSB_PLLTYPE_1: /* 48Mhz base, 3 dividers */ + case SSB_PLLTYPE_3: /* 25Mhz, 2 dividers */ + case SSB_PLLTYPE_4: /* 48Mhz, 4 dividers */ + case SSB_PLLTYPE_7: /* 25Mhz, 4 dividers */ + m1 = clkfactor_f6_resolve(m1); + if ((plltype == SSB_PLLTYPE_1) || + (plltype == SSB_PLLTYPE_3)) + m2 += SSB_CHIPCO_CLK_F5_BIAS; + else + m2 = clkfactor_f6_resolve(m2); + m3 = clkfactor_f6_resolve(m3); + + switch (mc) { + case SSB_CHIPCO_CLK_MC_BYPASS: + return clock; + case SSB_CHIPCO_CLK_MC_M1: + return (clock / m1); + case SSB_CHIPCO_CLK_MC_M1M2: + return (clock / (m1 * m2)); + case SSB_CHIPCO_CLK_MC_M1M2M3: + return (clock / (m1 * m2 * m3)); + case SSB_CHIPCO_CLK_MC_M1M3: + return (clock / (m1 * m3)); + } + return 0; + case SSB_PLLTYPE_2: + m1 += SSB_CHIPCO_CLK_T2_BIAS; + m2 += SSB_CHIPCO_CLK_T2M2_BIAS; + m3 += SSB_CHIPCO_CLK_T2_BIAS; + SSB_WARN_ON(!((m1 >= 2) && (m1 <= 7))); + SSB_WARN_ON(!((m2 >= 3) && (m2 <= 10))); + SSB_WARN_ON(!((m3 >= 2) && (m3 <= 7))); + + if (!(mc & SSB_CHIPCO_CLK_T2MC_M1BYP)) + clock /= m1; + if (!(mc & SSB_CHIPCO_CLK_T2MC_M2BYP)) + clock /= m2; + if (!(mc & SSB_CHIPCO_CLK_T2MC_M3BYP)) + clock /= m3; + return clock; + default: + SSB_WARN_ON(1); + } + return 0; +} + +/* Get the current speed the backplane is running at */ +u32 ssb_clockspeed(struct ssb_bus *bus) +{ + u32 rate; + u32 plltype; + u32 clkctl_n, clkctl_m; + + if (ssb_extif_available(&bus->extif)) + ssb_extif_get_clockcontrol(&bus->extif, &plltype, + &clkctl_n, &clkctl_m); + else if (bus->chipco.dev) + ssb_chipco_get_clockcontrol(&bus->chipco, &plltype, + &clkctl_n, &clkctl_m); + else + return 0; + + if (bus->chip_id == 0x5365) { + rate = 100000000; + } else { + rate = ssb_calc_clock_rate(plltype, clkctl_n, clkctl_m); + if (plltype == SSB_PLLTYPE_3) /* 25Mhz, 2 dividers */ + rate /= 2; + } + + return rate; +} +EXPORT_SYMBOL(ssb_clockspeed); + +static u32 ssb_tmslow_reject_bitmask(struct ssb_device *dev) +{ + u32 rev = ssb_read32(dev, SSB_IDLOW) & SSB_IDLOW_SSBREV; + + /* The REJECT bit changed position in TMSLOW between + * Backplane revisions. */ + switch (rev) { + case SSB_IDLOW_SSBREV_22: + return SSB_TMSLOW_REJECT_22; + case SSB_IDLOW_SSBREV_23: + return SSB_TMSLOW_REJECT_23; + case SSB_IDLOW_SSBREV_24: /* TODO - find the proper REJECT bits */ + case SSB_IDLOW_SSBREV_25: /* same here */ + case SSB_IDLOW_SSBREV_26: /* same here */ + case SSB_IDLOW_SSBREV_27: /* same here */ + return SSB_TMSLOW_REJECT_23; /* this is a guess */ + default: + printk(KERN_INFO "ssb: Backplane Revision 0x%.8X\n", rev); + WARN_ON(1); + } + return (SSB_TMSLOW_REJECT_22 | SSB_TMSLOW_REJECT_23); +} + +int ssb_device_is_enabled(struct ssb_device *dev) +{ + u32 val; + u32 reject; + + reject = ssb_tmslow_reject_bitmask(dev); + val = ssb_read32(dev, SSB_TMSLOW); + val &= SSB_TMSLOW_CLOCK | SSB_TMSLOW_RESET | reject; + + return (val == SSB_TMSLOW_CLOCK); +} +EXPORT_SYMBOL(ssb_device_is_enabled); + +static void ssb_flush_tmslow(struct ssb_device *dev) +{ + /* Make _really_ sure the device has finished the TMSLOW + * register write transaction, as we risk running into + * a machine check exception otherwise. + * Do this by reading the register back to commit the + * PCI write and delay an additional usec for the device + * to react to the change. */ + ssb_read32(dev, SSB_TMSLOW); + udelay(1); +} + +void ssb_device_enable(struct ssb_device *dev, u32 core_specific_flags) +{ + u32 val; + + ssb_device_disable(dev, core_specific_flags); + ssb_write32(dev, SSB_TMSLOW, + SSB_TMSLOW_RESET | SSB_TMSLOW_CLOCK | + SSB_TMSLOW_FGC | core_specific_flags); + ssb_flush_tmslow(dev); + + /* Clear SERR if set. This is a hw bug workaround. */ + if (ssb_read32(dev, SSB_TMSHIGH) & SSB_TMSHIGH_SERR) + ssb_write32(dev, SSB_TMSHIGH, 0); + + val = ssb_read32(dev, SSB_IMSTATE); + if (val & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO)) { + val &= ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO); + ssb_write32(dev, SSB_IMSTATE, val); + } + + ssb_write32(dev, SSB_TMSLOW, + SSB_TMSLOW_CLOCK | SSB_TMSLOW_FGC | + core_specific_flags); + ssb_flush_tmslow(dev); + + ssb_write32(dev, SSB_TMSLOW, SSB_TMSLOW_CLOCK | + core_specific_flags); + ssb_flush_tmslow(dev); +} +EXPORT_SYMBOL(ssb_device_enable); + +/* Wait for a bit in a register to get set or unset. + * timeout is in units of ten-microseconds */ +static int ssb_wait_bit(struct ssb_device *dev, u16 reg, u32 bitmask, + int timeout, int set) +{ + int i; + u32 val; + + for (i = 0; i < timeout; i++) { + val = ssb_read32(dev, reg); + if (set) { + if (val & bitmask) + return 0; + } else { + if (!(val & bitmask)) + return 0; + } + udelay(10); + } + printk(KERN_ERR PFX "Timeout waiting for bitmask %08X on " + "register %04X to %s.\n", + bitmask, reg, (set ? "set" : "clear")); + + return -ETIMEDOUT; +} + +void ssb_device_disable(struct ssb_device *dev, u32 core_specific_flags) +{ + u32 reject; + + if (ssb_read32(dev, SSB_TMSLOW) & SSB_TMSLOW_RESET) + return; + + reject = ssb_tmslow_reject_bitmask(dev); + ssb_write32(dev, SSB_TMSLOW, reject | SSB_TMSLOW_CLOCK); + ssb_wait_bit(dev, SSB_TMSLOW, reject, 1000, 1); + ssb_wait_bit(dev, SSB_TMSHIGH, SSB_TMSHIGH_BUSY, 1000, 0); + ssb_write32(dev, SSB_TMSLOW, + SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | + reject | SSB_TMSLOW_RESET | + core_specific_flags); + ssb_flush_tmslow(dev); + + ssb_write32(dev, SSB_TMSLOW, + reject | SSB_TMSLOW_RESET | + core_specific_flags); + ssb_flush_tmslow(dev); +} +EXPORT_SYMBOL(ssb_device_disable); + +u32 ssb_dma_translation(struct ssb_device *dev) +{ + switch (dev->bus->bustype) { + case SSB_BUSTYPE_SSB: + return 0; + case SSB_BUSTYPE_PCI: + return SSB_PCI_DMA; + default: + __ssb_dma_not_implemented(dev); + } + return 0; +} +EXPORT_SYMBOL(ssb_dma_translation); + +int ssb_dma_set_mask(struct ssb_device *dev, u64 mask) +{ +#ifdef CONFIG_SSB_PCIHOST + int err; +#endif + + switch (dev->bus->bustype) { + case SSB_BUSTYPE_PCI: +#ifdef CONFIG_SSB_PCIHOST + err = pci_set_dma_mask(dev->bus->host_pci, mask); + if (err) + return err; + err = pci_set_consistent_dma_mask(dev->bus->host_pci, mask); + return err; +#endif + case SSB_BUSTYPE_SSB: + return dma_set_mask(dev->dev, mask); + default: + __ssb_dma_not_implemented(dev); + } + return -ENOSYS; +} +EXPORT_SYMBOL(ssb_dma_set_mask); + +void * ssb_dma_alloc_consistent(struct ssb_device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp_flags) +{ + switch (dev->bus->bustype) { + case SSB_BUSTYPE_PCI: +#ifdef CONFIG_SSB_PCIHOST + if (gfp_flags & GFP_DMA) { + /* Workaround: The PCI API does not support passing + * a GFP flag. */ + return dma_alloc_coherent(&dev->bus->host_pci->dev, + size, dma_handle, gfp_flags); + } + return pci_alloc_consistent(dev->bus->host_pci, size, dma_handle); +#endif + case SSB_BUSTYPE_SSB: + return dma_alloc_coherent(dev->dev, size, dma_handle, gfp_flags); + default: + __ssb_dma_not_implemented(dev); + } + return NULL; +} +EXPORT_SYMBOL(ssb_dma_alloc_consistent); + +void ssb_dma_free_consistent(struct ssb_device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle, + gfp_t gfp_flags) +{ + switch (dev->bus->bustype) { + case SSB_BUSTYPE_PCI: +#ifdef CONFIG_SSB_PCIHOST + if (gfp_flags & GFP_DMA) { + /* Workaround: The PCI API does not support passing + * a GFP flag. */ + dma_free_coherent(&dev->bus->host_pci->dev, + size, vaddr, dma_handle); + return; + } + pci_free_consistent(dev->bus->host_pci, size, + vaddr, dma_handle); + return; +#endif + case SSB_BUSTYPE_SSB: + dma_free_coherent(dev->dev, size, vaddr, dma_handle); + return; + default: + __ssb_dma_not_implemented(dev); + } +} +EXPORT_SYMBOL(ssb_dma_free_consistent); + +int ssb_bus_may_powerdown(struct ssb_bus *bus) +{ + struct ssb_chipcommon *cc; + int err = 0; + + /* On buses where more than one core may be working + * at a time, we must not powerdown stuff if there are + * still cores that may want to run. */ + if (bus->bustype == SSB_BUSTYPE_SSB) + goto out; + + cc = &bus->chipco; + + if (!cc->dev) + goto out; + if (cc->dev->id.revision < 5) + goto out; + + ssb_chipco_set_clockmode(cc, SSB_CLKMODE_SLOW); + err = ssb_pci_xtal(bus, SSB_GPIO_XTAL | SSB_GPIO_PLL, 0); + if (err) + goto error; +out: +#ifdef CONFIG_SSB_DEBUG + bus->powered_up = 0; +#endif + return err; +error: + ssb_printk(KERN_ERR PFX "Bus powerdown failed\n"); + goto out; +} +EXPORT_SYMBOL(ssb_bus_may_powerdown); + +int ssb_bus_powerup(struct ssb_bus *bus, bool dynamic_pctl) +{ + struct ssb_chipcommon *cc; + int err; + enum ssb_clkmode mode; + + err = ssb_pci_xtal(bus, SSB_GPIO_XTAL | SSB_GPIO_PLL, 1); + if (err) + goto error; + cc = &bus->chipco; + mode = dynamic_pctl ? SSB_CLKMODE_DYNAMIC : SSB_CLKMODE_FAST; + ssb_chipco_set_clockmode(cc, mode); + +#ifdef CONFIG_SSB_DEBUG + bus->powered_up = 1; +#endif + return 0; +error: + ssb_printk(KERN_ERR PFX "Bus powerup failed\n"); + return err; +} +EXPORT_SYMBOL(ssb_bus_powerup); + +u32 ssb_admatch_base(u32 adm) +{ + u32 base = 0; + + switch (adm & SSB_ADM_TYPE) { + case SSB_ADM_TYPE0: + base = (adm & SSB_ADM_BASE0); + break; + case SSB_ADM_TYPE1: + SSB_WARN_ON(adm & SSB_ADM_NEG); /* unsupported */ + base = (adm & SSB_ADM_BASE1); + break; + case SSB_ADM_TYPE2: + SSB_WARN_ON(adm & SSB_ADM_NEG); /* unsupported */ + base = (adm & SSB_ADM_BASE2); + break; + default: + SSB_WARN_ON(1); + } + + return base; +} +EXPORT_SYMBOL(ssb_admatch_base); + +u32 ssb_admatch_size(u32 adm) +{ + u32 size = 0; + + switch (adm & SSB_ADM_TYPE) { + case SSB_ADM_TYPE0: + size = ((adm & SSB_ADM_SZ0) >> SSB_ADM_SZ0_SHIFT); + break; + case SSB_ADM_TYPE1: + SSB_WARN_ON(adm & SSB_ADM_NEG); /* unsupported */ + size = ((adm & SSB_ADM_SZ1) >> SSB_ADM_SZ1_SHIFT); + break; + case SSB_ADM_TYPE2: + SSB_WARN_ON(adm & SSB_ADM_NEG); /* unsupported */ + size = ((adm & SSB_ADM_SZ2) >> SSB_ADM_SZ2_SHIFT); + break; + default: + SSB_WARN_ON(1); + } + size = (1 << (size + 1)); + + return size; +} +EXPORT_SYMBOL(ssb_admatch_size); + +static int __init ssb_modinit(void) +{ + int err; + + /* See the comment at the ssb_is_early_boot definition */ + ssb_is_early_boot = 0; + err = bus_register(&ssb_bustype); + if (err) + return err; + + /* Maybe we already registered some buses at early boot. + * Check for this and attach them + */ + ssb_buses_lock(); + err = ssb_attach_queued_buses(); + ssb_buses_unlock(); + if (err) { + bus_unregister(&ssb_bustype); + goto out; + } + + err = b43_pci_ssb_bridge_init(); + if (err) { + ssb_printk(KERN_ERR "Broadcom 43xx PCI-SSB-bridge " + "initialization failed\n"); + /* don't fail SSB init because of this */ + err = 0; + } + err = ssb_gige_init(); + if (err) { + ssb_printk(KERN_ERR "SSB Broadcom Gigabit Ethernet " + "driver initialization failed\n"); + /* don't fail SSB init because of this */ + err = 0; + } +out: + return err; +} +/* ssb must be initialized after PCI but before the ssb drivers. + * That means we must use some initcall between subsys_initcall + * and device_initcall. */ +fs_initcall(ssb_modinit); + +static void __exit ssb_modexit(void) +{ + ssb_gige_exit(); + b43_pci_ssb_bridge_exit(); + bus_unregister(&ssb_bustype); +} +module_exit(ssb_modexit) diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c new file mode 100644 index 0000000..9e50896 --- /dev/null +++ b/drivers/ssb/pci.c @@ -0,0 +1,926 @@ +/* + * Sonics Silicon Backplane PCI-Hostbus related functions. + * + * Copyright (C) 2005-2006 Michael Buesch + * Copyright (C) 2005 Martin Langer + * Copyright (C) 2005 Stefano Brivio + * Copyright (C) 2005 Danny van Dyk + * Copyright (C) 2005 Andreas Jaggi + * + * Derived from the Broadcom 4400 device driver. + * Copyright (C) 2002 David S. Miller (davem@redhat.com) + * Fixed by Pekka Pietikainen (pp@ee.oulu.fi) + * Copyright (C) 2006 Broadcom Corporation. + * + * Licensed under the GNU/GPL. See COPYING for details. + */ + +#include +#include +#include +#include + +#include "ssb_private.h" + + +/* Define the following to 1 to enable a printk on each coreswitch. */ +#define SSB_VERBOSE_PCICORESWITCH_DEBUG 0 + + +/* Lowlevel coreswitching */ +int ssb_pci_switch_coreidx(struct ssb_bus *bus, u8 coreidx) +{ + int err; + int attempts = 0; + u32 cur_core; + + while (1) { + err = pci_write_config_dword(bus->host_pci, SSB_BAR0_WIN, + (coreidx * SSB_CORE_SIZE) + + SSB_ENUM_BASE); + if (err) + goto error; + err = pci_read_config_dword(bus->host_pci, SSB_BAR0_WIN, + &cur_core); + if (err) + goto error; + cur_core = (cur_core - SSB_ENUM_BASE) + / SSB_CORE_SIZE; + if (cur_core == coreidx) + break; + + if (attempts++ > SSB_BAR0_MAX_RETRIES) + goto error; + udelay(10); + } + return 0; +error: + ssb_printk(KERN_ERR PFX "Failed to switch to core %u\n", coreidx); + return -ENODEV; +} + +int ssb_pci_switch_core(struct ssb_bus *bus, + struct ssb_device *dev) +{ + int err; + unsigned long flags; + +#if SSB_VERBOSE_PCICORESWITCH_DEBUG + ssb_printk(KERN_INFO PFX + "Switching to %s core, index %d\n", + ssb_core_name(dev->id.coreid), + dev->core_index); +#endif + + spin_lock_irqsave(&bus->bar_lock, flags); + err = ssb_pci_switch_coreidx(bus, dev->core_index); + if (!err) + bus->mapped_device = dev; + spin_unlock_irqrestore(&bus->bar_lock, flags); + + return err; +} + +/* Enable/disable the on board crystal oscillator and/or PLL. */ +int ssb_pci_xtal(struct ssb_bus *bus, u32 what, int turn_on) +{ + int err; + u32 in, out, outenable; + u16 pci_status; + + if (bus->bustype != SSB_BUSTYPE_PCI) + return 0; + + err = pci_read_config_dword(bus->host_pci, SSB_GPIO_IN, &in); + if (err) + goto err_pci; + err = pci_read_config_dword(bus->host_pci, SSB_GPIO_OUT, &out); + if (err) + goto err_pci; + err = pci_read_config_dword(bus->host_pci, SSB_GPIO_OUT_ENABLE, &outenable); + if (err) + goto err_pci; + + outenable |= what; + + if (turn_on) { + /* Avoid glitching the clock if GPRS is already using it. + * We can't actually read the state of the PLLPD so we infer it + * by the value of XTAL_PU which *is* readable via gpioin. + */ + if (!(in & SSB_GPIO_XTAL)) { + if (what & SSB_GPIO_XTAL) { + /* Turn the crystal on */ + out |= SSB_GPIO_XTAL; + if (what & SSB_GPIO_PLL) + out |= SSB_GPIO_PLL; + err = pci_write_config_dword(bus->host_pci, SSB_GPIO_OUT, out); + if (err) + goto err_pci; + err = pci_write_config_dword(bus->host_pci, SSB_GPIO_OUT_ENABLE, + outenable); + if (err) + goto err_pci; + msleep(1); + } + if (what & SSB_GPIO_PLL) { + /* Turn the PLL on */ + out &= ~SSB_GPIO_PLL; + err = pci_write_config_dword(bus->host_pci, SSB_GPIO_OUT, out); + if (err) + goto err_pci; + msleep(5); + } + } + + err = pci_read_config_word(bus->host_pci, PCI_STATUS, &pci_status); + if (err) + goto err_pci; + pci_status &= ~PCI_STATUS_SIG_TARGET_ABORT; + err = pci_write_config_word(bus->host_pci, PCI_STATUS, pci_status); + if (err) + goto err_pci; + } else { + if (what & SSB_GPIO_XTAL) { + /* Turn the crystal off */ + out &= ~SSB_GPIO_XTAL; + } + if (what & SSB_GPIO_PLL) { + /* Turn the PLL off */ + out |= SSB_GPIO_PLL; + } + err = pci_write_config_dword(bus->host_pci, SSB_GPIO_OUT, out); + if (err) + goto err_pci; + err = pci_write_config_dword(bus->host_pci, SSB_GPIO_OUT_ENABLE, outenable); + if (err) + goto err_pci; + } + +out: + return err; + +err_pci: + printk(KERN_ERR PFX "Error: ssb_pci_xtal() could not access PCI config space!\n"); + err = -EBUSY; + goto out; +} + +/* Get the word-offset for a SSB_SPROM_XXX define. */ +#define SPOFF(offset) (((offset) - SSB_SPROM_BASE) / sizeof(u16)) +/* Helper to extract some _offset, which is one of the SSB_SPROM_XXX defines. */ +#define SPEX16(_outvar, _offset, _mask, _shift) \ + out->_outvar = ((in[SPOFF(_offset)] & (_mask)) >> (_shift)) +#define SPEX32(_outvar, _offset, _mask, _shift) \ + out->_outvar = ((((u32)in[SPOFF((_offset)+2)] << 16 | \ + in[SPOFF(_offset)]) & (_mask)) >> (_shift)) +#define SPEX(_outvar, _offset, _mask, _shift) \ + SPEX16(_outvar, _offset, _mask, _shift) + + +static inline u8 ssb_crc8(u8 crc, u8 data) +{ + /* Polynomial: x^8 + x^7 + x^6 + x^4 + x^2 + 1 */ + static const u8 t[] = { + 0x00, 0xF7, 0xB9, 0x4E, 0x25, 0xD2, 0x9C, 0x6B, + 0x4A, 0xBD, 0xF3, 0x04, 0x6F, 0x98, 0xD6, 0x21, + 0x94, 0x63, 0x2D, 0xDA, 0xB1, 0x46, 0x08, 0xFF, + 0xDE, 0x29, 0x67, 0x90, 0xFB, 0x0C, 0x42, 0xB5, + 0x7F, 0x88, 0xC6, 0x31, 0x5A, 0xAD, 0xE3, 0x14, + 0x35, 0xC2, 0x8C, 0x7B, 0x10, 0xE7, 0xA9, 0x5E, + 0xEB, 0x1C, 0x52, 0xA5, 0xCE, 0x39, 0x77, 0x80, + 0xA1, 0x56, 0x18, 0xEF, 0x84, 0x73, 0x3D, 0xCA, + 0xFE, 0x09, 0x47, 0xB0, 0xDB, 0x2C, 0x62, 0x95, + 0xB4, 0x43, 0x0D, 0xFA, 0x91, 0x66, 0x28, 0xDF, + 0x6A, 0x9D, 0xD3, 0x24, 0x4F, 0xB8, 0xF6, 0x01, + 0x20, 0xD7, 0x99, 0x6E, 0x05, 0xF2, 0xBC, 0x4B, + 0x81, 0x76, 0x38, 0xCF, 0xA4, 0x53, 0x1D, 0xEA, + 0xCB, 0x3C, 0x72, 0x85, 0xEE, 0x19, 0x57, 0xA0, + 0x15, 0xE2, 0xAC, 0x5B, 0x30, 0xC7, 0x89, 0x7E, + 0x5F, 0xA8, 0xE6, 0x11, 0x7A, 0x8D, 0xC3, 0x34, + 0xAB, 0x5C, 0x12, 0xE5, 0x8E, 0x79, 0x37, 0xC0, + 0xE1, 0x16, 0x58, 0xAF, 0xC4, 0x33, 0x7D, 0x8A, + 0x3F, 0xC8, 0x86, 0x71, 0x1A, 0xED, 0xA3, 0x54, + 0x75, 0x82, 0xCC, 0x3B, 0x50, 0xA7, 0xE9, 0x1E, + 0xD4, 0x23, 0x6D, 0x9A, 0xF1, 0x06, 0x48, 0xBF, + 0x9E, 0x69, 0x27, 0xD0, 0xBB, 0x4C, 0x02, 0xF5, + 0x40, 0xB7, 0xF9, 0x0E, 0x65, 0x92, 0xDC, 0x2B, + 0x0A, 0xFD, 0xB3, 0x44, 0x2F, 0xD8, 0x96, 0x61, + 0x55, 0xA2, 0xEC, 0x1B, 0x70, 0x87, 0xC9, 0x3E, + 0x1F, 0xE8, 0xA6, 0x51, 0x3A, 0xCD, 0x83, 0x74, + 0xC1, 0x36, 0x78, 0x8F, 0xE4, 0x13, 0x5D, 0xAA, + 0x8B, 0x7C, 0x32, 0xC5, 0xAE, 0x59, 0x17, 0xE0, + 0x2A, 0xDD, 0x93, 0x64, 0x0F, 0xF8, 0xB6, 0x41, + 0x60, 0x97, 0xD9, 0x2E, 0x45, 0xB2, 0xFC, 0x0B, + 0xBE, 0x49, 0x07, 0xF0, 0x9B, 0x6C, 0x22, 0xD5, + 0xF4, 0x03, 0x4D, 0xBA, 0xD1, 0x26, 0x68, 0x9F, + }; + return t[crc ^ data]; +} + +static u8 ssb_sprom_crc(const u16 *sprom, u16 size) +{ + int word; + u8 crc = 0xFF; + + for (word = 0; word < size - 1; word++) { + crc = ssb_crc8(crc, sprom[word] & 0x00FF); + crc = ssb_crc8(crc, (sprom[word] & 0xFF00) >> 8); + } + crc = ssb_crc8(crc, sprom[size - 1] & 0x00FF); + crc ^= 0xFF; + + return crc; +} + +static int sprom_check_crc(const u16 *sprom, size_t size) +{ + u8 crc; + u8 expected_crc; + u16 tmp; + + crc = ssb_sprom_crc(sprom, size); + tmp = sprom[size - 1] & SSB_SPROM_REVISION_CRC; + expected_crc = tmp >> SSB_SPROM_REVISION_CRC_SHIFT; + if (crc != expected_crc) + return -EPROTO; + + return 0; +} + +static int sprom_do_read(struct ssb_bus *bus, u16 *sprom) +{ + int i; + + for (i = 0; i < bus->sprom_size; i++) + sprom[i] = ioread16(bus->mmio + SSB_SPROM_BASE + (i * 2)); + + return 0; +} + +static int sprom_do_write(struct ssb_bus *bus, const u16 *sprom) +{ + struct pci_dev *pdev = bus->host_pci; + int i, err; + u32 spromctl; + u16 size = bus->sprom_size; + + ssb_printk(KERN_NOTICE PFX "Writing SPROM. Do NOT turn off the power! Please stand by...\n"); + err = pci_read_config_dword(pdev, SSB_SPROMCTL, &spromctl); + if (err) + goto err_ctlreg; + spromctl |= SSB_SPROMCTL_WE; + err = pci_write_config_dword(pdev, SSB_SPROMCTL, spromctl); + if (err) + goto err_ctlreg; + ssb_printk(KERN_NOTICE PFX "[ 0%%"); + msleep(500); + for (i = 0; i < size; i++) { + if (i == size / 4) + ssb_printk("25%%"); + else if (i == size / 2) + ssb_printk("50%%"); + else if (i == (size * 3) / 4) + ssb_printk("75%%"); + else if (i % 2) + ssb_printk("."); + writew(sprom[i], bus->mmio + SSB_SPROM_BASE + (i * 2)); + mmiowb(); + msleep(20); + } + err = pci_read_config_dword(pdev, SSB_SPROMCTL, &spromctl); + if (err) + goto err_ctlreg; + spromctl &= ~SSB_SPROMCTL_WE; + err = pci_write_config_dword(pdev, SSB_SPROMCTL, spromctl); + if (err) + goto err_ctlreg; + msleep(500); + ssb_printk("100%% ]\n"); + ssb_printk(KERN_NOTICE PFX "SPROM written.\n"); + + return 0; +err_ctlreg: + ssb_printk(KERN_ERR PFX "Could not access SPROM control register.\n"); + return err; +} + +static s8 r123_extract_antgain(u8 sprom_revision, const u16 *in, + u16 mask, u16 shift) +{ + u16 v; + u8 gain; + + v = in[SPOFF(SSB_SPROM1_AGAIN)]; + gain = (v & mask) >> shift; + if (gain == 0xFF) + gain = 2; /* If unset use 2dBm */ + if (sprom_revision == 1) { + /* Convert to Q5.2 */ + gain <<= 2; + } else { + /* Q5.2 Fractional part is stored in 0xC0 */ + gain = ((gain & 0xC0) >> 6) | ((gain & 0x3F) << 2); + } + + return (s8)gain; +} + +static void sprom_extract_r123(struct ssb_sprom *out, const u16 *in) +{ + int i; + u16 v; + s8 gain; + u16 loc[3]; + + if (out->revision == 3) /* rev 3 moved MAC */ + loc[0] = SSB_SPROM3_IL0MAC; + else { + loc[0] = SSB_SPROM1_IL0MAC; + loc[1] = SSB_SPROM1_ET0MAC; + loc[2] = SSB_SPROM1_ET1MAC; + } + for (i = 0; i < 3; i++) { + v = in[SPOFF(loc[0]) + i]; + *(((__be16 *)out->il0mac) + i) = cpu_to_be16(v); + } + if (out->revision < 3) { /* only rev 1-2 have et0, et1 */ + for (i = 0; i < 3; i++) { + v = in[SPOFF(loc[1]) + i]; + *(((__be16 *)out->et0mac) + i) = cpu_to_be16(v); + } + for (i = 0; i < 3; i++) { + v = in[SPOFF(loc[2]) + i]; + *(((__be16 *)out->et1mac) + i) = cpu_to_be16(v); + } + } + SPEX(et0phyaddr, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET0A, 0); + SPEX(et1phyaddr, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET1A, + SSB_SPROM1_ETHPHY_ET1A_SHIFT); + SPEX(et0mdcport, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET0M, 14); + SPEX(et1mdcport, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET1M, 15); + SPEX(board_rev, SSB_SPROM1_BINF, SSB_SPROM1_BINF_BREV, 0); + SPEX(country_code, SSB_SPROM1_BINF, SSB_SPROM1_BINF_CCODE, + SSB_SPROM1_BINF_CCODE_SHIFT); + SPEX(ant_available_a, SSB_SPROM1_BINF, SSB_SPROM1_BINF_ANTA, + SSB_SPROM1_BINF_ANTA_SHIFT); + SPEX(ant_available_bg, SSB_SPROM1_BINF, SSB_SPROM1_BINF_ANTBG, + SSB_SPROM1_BINF_ANTBG_SHIFT); + SPEX(pa0b0, SSB_SPROM1_PA0B0, 0xFFFF, 0); + SPEX(pa0b1, SSB_SPROM1_PA0B1, 0xFFFF, 0); + SPEX(pa0b2, SSB_SPROM1_PA0B2, 0xFFFF, 0); + SPEX(pa1b0, SSB_SPROM1_PA1B0, 0xFFFF, 0); + SPEX(pa1b1, SSB_SPROM1_PA1B1, 0xFFFF, 0); + SPEX(pa1b2, SSB_SPROM1_PA1B2, 0xFFFF, 0); + SPEX(gpio0, SSB_SPROM1_GPIOA, SSB_SPROM1_GPIOA_P0, 0); + SPEX(gpio1, SSB_SPROM1_GPIOA, SSB_SPROM1_GPIOA_P1, + SSB_SPROM1_GPIOA_P1_SHIFT); + SPEX(gpio2, SSB_SPROM1_GPIOB, SSB_SPROM1_GPIOB_P2, 0); + SPEX(gpio3, SSB_SPROM1_GPIOB, SSB_SPROM1_GPIOB_P3, + SSB_SPROM1_GPIOB_P3_SHIFT); + SPEX(maxpwr_a, SSB_SPROM1_MAXPWR, SSB_SPROM1_MAXPWR_A, + SSB_SPROM1_MAXPWR_A_SHIFT); + SPEX(maxpwr_bg, SSB_SPROM1_MAXPWR, SSB_SPROM1_MAXPWR_BG, 0); + SPEX(itssi_a, SSB_SPROM1_ITSSI, SSB_SPROM1_ITSSI_A, + SSB_SPROM1_ITSSI_A_SHIFT); + SPEX(itssi_bg, SSB_SPROM1_ITSSI, SSB_SPROM1_ITSSI_BG, 0); + SPEX(boardflags_lo, SSB_SPROM1_BFLLO, 0xFFFF, 0); + if (out->revision >= 2) + SPEX(boardflags_hi, SSB_SPROM2_BFLHI, 0xFFFF, 0); + + /* Extract the antenna gain values. */ + gain = r123_extract_antgain(out->revision, in, + SSB_SPROM1_AGAIN_BG, + SSB_SPROM1_AGAIN_BG_SHIFT); + out->antenna_gain.ghz24.a0 = gain; + out->antenna_gain.ghz24.a1 = gain; + out->antenna_gain.ghz24.a2 = gain; + out->antenna_gain.ghz24.a3 = gain; + gain = r123_extract_antgain(out->revision, in, + SSB_SPROM1_AGAIN_A, + SSB_SPROM1_AGAIN_A_SHIFT); + out->antenna_gain.ghz5.a0 = gain; + out->antenna_gain.ghz5.a1 = gain; + out->antenna_gain.ghz5.a2 = gain; + out->antenna_gain.ghz5.a3 = gain; +} + +static void sprom_extract_r45(struct ssb_sprom *out, const u16 *in) +{ + int i; + u16 v; + u16 il0mac_offset; + + if (out->revision == 4) + il0mac_offset = SSB_SPROM4_IL0MAC; + else + il0mac_offset = SSB_SPROM5_IL0MAC; + /* extract the MAC address */ + for (i = 0; i < 3; i++) { + v = in[SPOFF(il0mac_offset) + i]; + *(((__be16 *)out->il0mac) + i) = cpu_to_be16(v); + } + SPEX(et0phyaddr, SSB_SPROM4_ETHPHY, SSB_SPROM4_ETHPHY_ET0A, 0); + SPEX(et1phyaddr, SSB_SPROM4_ETHPHY, SSB_SPROM4_ETHPHY_ET1A, + SSB_SPROM4_ETHPHY_ET1A_SHIFT); + if (out->revision == 4) { + SPEX(country_code, SSB_SPROM4_CCODE, 0xFFFF, 0); + SPEX(boardflags_lo, SSB_SPROM4_BFLLO, 0xFFFF, 0); + SPEX(boardflags_hi, SSB_SPROM4_BFLHI, 0xFFFF, 0); + } else { + SPEX(country_code, SSB_SPROM5_CCODE, 0xFFFF, 0); + SPEX(boardflags_lo, SSB_SPROM5_BFLLO, 0xFFFF, 0); + SPEX(boardflags_hi, SSB_SPROM5_BFLHI, 0xFFFF, 0); + } + SPEX(ant_available_a, SSB_SPROM4_ANTAVAIL, SSB_SPROM4_ANTAVAIL_A, + SSB_SPROM4_ANTAVAIL_A_SHIFT); + SPEX(ant_available_bg, SSB_SPROM4_ANTAVAIL, SSB_SPROM4_ANTAVAIL_BG, + SSB_SPROM4_ANTAVAIL_BG_SHIFT); + SPEX(maxpwr_bg, SSB_SPROM4_MAXP_BG, SSB_SPROM4_MAXP_BG_MASK, 0); + SPEX(itssi_bg, SSB_SPROM4_MAXP_BG, SSB_SPROM4_ITSSI_BG, + SSB_SPROM4_ITSSI_BG_SHIFT); + SPEX(maxpwr_a, SSB_SPROM4_MAXP_A, SSB_SPROM4_MAXP_A_MASK, 0); + SPEX(itssi_a, SSB_SPROM4_MAXP_A, SSB_SPROM4_ITSSI_A, + SSB_SPROM4_ITSSI_A_SHIFT); + if (out->revision == 4) { + SPEX(gpio0, SSB_SPROM4_GPIOA, SSB_SPROM4_GPIOA_P0, 0); + SPEX(gpio1, SSB_SPROM4_GPIOA, SSB_SPROM4_GPIOA_P1, + SSB_SPROM4_GPIOA_P1_SHIFT); + SPEX(gpio2, SSB_SPROM4_GPIOB, SSB_SPROM4_GPIOB_P2, 0); + SPEX(gpio3, SSB_SPROM4_GPIOB, SSB_SPROM4_GPIOB_P3, + SSB_SPROM4_GPIOB_P3_SHIFT); + } else { + SPEX(gpio0, SSB_SPROM5_GPIOA, SSB_SPROM5_GPIOA_P0, 0); + SPEX(gpio1, SSB_SPROM5_GPIOA, SSB_SPROM5_GPIOA_P1, + SSB_SPROM5_GPIOA_P1_SHIFT); + SPEX(gpio2, SSB_SPROM5_GPIOB, SSB_SPROM5_GPIOB_P2, 0); + SPEX(gpio3, SSB_SPROM5_GPIOB, SSB_SPROM5_GPIOB_P3, + SSB_SPROM5_GPIOB_P3_SHIFT); + } + + /* Extract the antenna gain values. */ + SPEX(antenna_gain.ghz24.a0, SSB_SPROM4_AGAIN01, + SSB_SPROM4_AGAIN0, SSB_SPROM4_AGAIN0_SHIFT); + SPEX(antenna_gain.ghz24.a1, SSB_SPROM4_AGAIN01, + SSB_SPROM4_AGAIN1, SSB_SPROM4_AGAIN1_SHIFT); + SPEX(antenna_gain.ghz24.a2, SSB_SPROM4_AGAIN23, + SSB_SPROM4_AGAIN2, SSB_SPROM4_AGAIN2_SHIFT); + SPEX(antenna_gain.ghz24.a3, SSB_SPROM4_AGAIN23, + SSB_SPROM4_AGAIN3, SSB_SPROM4_AGAIN3_SHIFT); + memcpy(&out->antenna_gain.ghz5, &out->antenna_gain.ghz24, + sizeof(out->antenna_gain.ghz5)); + + /* TODO - get remaining rev 4 stuff needed */ +} + +static void sprom_extract_r8(struct ssb_sprom *out, const u16 *in) +{ + int i; + u16 v; + + /* extract the MAC address */ + for (i = 0; i < 3; i++) { + v = in[SPOFF(SSB_SPROM8_IL0MAC) + i]; + *(((__be16 *)out->il0mac) + i) = cpu_to_be16(v); + } + SPEX(country_code, SSB_SPROM8_CCODE, 0xFFFF, 0); + SPEX(boardflags_lo, SSB_SPROM8_BFLLO, 0xFFFF, 0); + SPEX(boardflags_hi, SSB_SPROM8_BFLHI, 0xFFFF, 0); + SPEX(boardflags2_lo, SSB_SPROM8_BFL2LO, 0xFFFF, 0); + SPEX(boardflags2_hi, SSB_SPROM8_BFL2HI, 0xFFFF, 0); + SPEX(ant_available_a, SSB_SPROM8_ANTAVAIL, SSB_SPROM8_ANTAVAIL_A, + SSB_SPROM8_ANTAVAIL_A_SHIFT); + SPEX(ant_available_bg, SSB_SPROM8_ANTAVAIL, SSB_SPROM8_ANTAVAIL_BG, + SSB_SPROM8_ANTAVAIL_BG_SHIFT); + SPEX(maxpwr_bg, SSB_SPROM8_MAXP_BG, SSB_SPROM8_MAXP_BG_MASK, 0); + SPEX(itssi_bg, SSB_SPROM8_MAXP_BG, SSB_SPROM8_ITSSI_BG, + SSB_SPROM8_ITSSI_BG_SHIFT); + SPEX(maxpwr_a, SSB_SPROM8_MAXP_A, SSB_SPROM8_MAXP_A_MASK, 0); + SPEX(itssi_a, SSB_SPROM8_MAXP_A, SSB_SPROM8_ITSSI_A, + SSB_SPROM8_ITSSI_A_SHIFT); + SPEX(maxpwr_ah, SSB_SPROM8_MAXP_AHL, SSB_SPROM8_MAXP_AH_MASK, 0); + SPEX(maxpwr_al, SSB_SPROM8_MAXP_AHL, SSB_SPROM8_MAXP_AL_MASK, + SSB_SPROM8_MAXP_AL_SHIFT); + SPEX(gpio0, SSB_SPROM8_GPIOA, SSB_SPROM8_GPIOA_P0, 0); + SPEX(gpio1, SSB_SPROM8_GPIOA, SSB_SPROM8_GPIOA_P1, + SSB_SPROM8_GPIOA_P1_SHIFT); + SPEX(gpio2, SSB_SPROM8_GPIOB, SSB_SPROM8_GPIOB_P2, 0); + SPEX(gpio3, SSB_SPROM8_GPIOB, SSB_SPROM8_GPIOB_P3, + SSB_SPROM8_GPIOB_P3_SHIFT); + SPEX(tri2g, SSB_SPROM8_TRI25G, SSB_SPROM8_TRI2G, 0); + SPEX(tri5g, SSB_SPROM8_TRI25G, SSB_SPROM8_TRI5G, + SSB_SPROM8_TRI5G_SHIFT); + SPEX(tri5gl, SSB_SPROM8_TRI5GHL, SSB_SPROM8_TRI5GL, 0); + SPEX(tri5gh, SSB_SPROM8_TRI5GHL, SSB_SPROM8_TRI5GH, + SSB_SPROM8_TRI5GH_SHIFT); + SPEX(rxpo2g, SSB_SPROM8_RXPO, SSB_SPROM8_RXPO2G, 0); + SPEX(rxpo5g, SSB_SPROM8_RXPO, SSB_SPROM8_RXPO5G, + SSB_SPROM8_RXPO5G_SHIFT); + SPEX(rssismf2g, SSB_SPROM8_RSSIPARM2G, SSB_SPROM8_RSSISMF2G, 0); + SPEX(rssismc2g, SSB_SPROM8_RSSIPARM2G, SSB_SPROM8_RSSISMC2G, + SSB_SPROM8_RSSISMC2G_SHIFT); + SPEX(rssisav2g, SSB_SPROM8_RSSIPARM2G, SSB_SPROM8_RSSISAV2G, + SSB_SPROM8_RSSISAV2G_SHIFT); + SPEX(bxa2g, SSB_SPROM8_RSSIPARM2G, SSB_SPROM8_BXA2G, + SSB_SPROM8_BXA2G_SHIFT); + SPEX(rssismf5g, SSB_SPROM8_RSSIPARM5G, SSB_SPROM8_RSSISMF5G, 0); + SPEX(rssismc5g, SSB_SPROM8_RSSIPARM5G, SSB_SPROM8_RSSISMC5G, + SSB_SPROM8_RSSISMC5G_SHIFT); + SPEX(rssisav5g, SSB_SPROM8_RSSIPARM5G, SSB_SPROM8_RSSISAV5G, + SSB_SPROM8_RSSISAV5G_SHIFT); + SPEX(bxa5g, SSB_SPROM8_RSSIPARM5G, SSB_SPROM8_BXA5G, + SSB_SPROM8_BXA5G_SHIFT); + SPEX(pa0b0, SSB_SPROM8_PA0B0, 0xFFFF, 0); + SPEX(pa0b1, SSB_SPROM8_PA0B1, 0xFFFF, 0); + SPEX(pa0b2, SSB_SPROM8_PA0B2, 0xFFFF, 0); + SPEX(pa1b0, SSB_SPROM8_PA1B0, 0xFFFF, 0); + SPEX(pa1b1, SSB_SPROM8_PA1B1, 0xFFFF, 0); + SPEX(pa1b2, SSB_SPROM8_PA1B2, 0xFFFF, 0); + SPEX(pa1lob0, SSB_SPROM8_PA1LOB0, 0xFFFF, 0); + SPEX(pa1lob1, SSB_SPROM8_PA1LOB1, 0xFFFF, 0); + SPEX(pa1lob2, SSB_SPROM8_PA1LOB2, 0xFFFF, 0); + SPEX(pa1hib0, SSB_SPROM8_PA1HIB0, 0xFFFF, 0); + SPEX(pa1hib1, SSB_SPROM8_PA1HIB1, 0xFFFF, 0); + SPEX(pa1hib2, SSB_SPROM8_PA1HIB2, 0xFFFF, 0); + SPEX(cck2gpo, SSB_SPROM8_CCK2GPO, 0xFFFF, 0); + SPEX32(ofdm2gpo, SSB_SPROM8_OFDM2GPO, 0xFFFFFFFF, 0); + SPEX32(ofdm5glpo, SSB_SPROM8_OFDM5GLPO, 0xFFFFFFFF, 0); + SPEX32(ofdm5gpo, SSB_SPROM8_OFDM5GPO, 0xFFFFFFFF, 0); + SPEX32(ofdm5ghpo, SSB_SPROM8_OFDM5GHPO, 0xFFFFFFFF, 0); + + /* Extract the antenna gain values. */ + SPEX(antenna_gain.ghz24.a0, SSB_SPROM8_AGAIN01, + SSB_SPROM8_AGAIN0, SSB_SPROM8_AGAIN0_SHIFT); + SPEX(antenna_gain.ghz24.a1, SSB_SPROM8_AGAIN01, + SSB_SPROM8_AGAIN1, SSB_SPROM8_AGAIN1_SHIFT); + SPEX(antenna_gain.ghz24.a2, SSB_SPROM8_AGAIN23, + SSB_SPROM8_AGAIN2, SSB_SPROM8_AGAIN2_SHIFT); + SPEX(antenna_gain.ghz24.a3, SSB_SPROM8_AGAIN23, + SSB_SPROM8_AGAIN3, SSB_SPROM8_AGAIN3_SHIFT); + memcpy(&out->antenna_gain.ghz5, &out->antenna_gain.ghz24, + sizeof(out->antenna_gain.ghz5)); + + /* TODO - get remaining rev 8 stuff needed */ +} + +static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out, + const u16 *in, u16 size) +{ + memset(out, 0, sizeof(*out)); + + out->revision = in[size - 1] & 0x00FF; + ssb_dprintk(KERN_DEBUG PFX "SPROM revision %d detected.\n", out->revision); + memset(out->et0mac, 0xFF, 6); /* preset et0 and et1 mac */ + memset(out->et1mac, 0xFF, 6); + if ((bus->chip_id & 0xFF00) == 0x4400) { + /* Workaround: The BCM44XX chip has a stupid revision + * number stored in the SPROM. + * Always extract r1. */ + out->revision = 1; + sprom_extract_r123(out, in); + } else if (bus->chip_id == 0x4321) { + /* the BCM4328 has a chipid == 0x4321 and a rev 4 SPROM */ + out->revision = 4; + sprom_extract_r45(out, in); + } else { + switch (out->revision) { + case 1: + case 2: + case 3: + sprom_extract_r123(out, in); + break; + case 4: + case 5: + sprom_extract_r45(out, in); + break; + case 8: + sprom_extract_r8(out, in); + break; + default: + ssb_printk(KERN_WARNING PFX "Unsupported SPROM" + " revision %d detected. Will extract" + " v1\n", out->revision); + out->revision = 1; + sprom_extract_r123(out, in); + } + } + + if (out->boardflags_lo == 0xFFFF) + out->boardflags_lo = 0; /* per specs */ + if (out->boardflags_hi == 0xFFFF) + out->boardflags_hi = 0; /* per specs */ + + return 0; +} + +static int ssb_pci_sprom_get(struct ssb_bus *bus, + struct ssb_sprom *sprom) +{ + const struct ssb_sprom *fallback; + int err = -ENOMEM; + u16 *buf; + + buf = kcalloc(SSB_SPROMSIZE_WORDS_R123, sizeof(u16), GFP_KERNEL); + if (!buf) + goto out; + bus->sprom_size = SSB_SPROMSIZE_WORDS_R123; + sprom_do_read(bus, buf); + err = sprom_check_crc(buf, bus->sprom_size); + if (err) { + /* try for a 440 byte SPROM - revision 4 and higher */ + kfree(buf); + buf = kcalloc(SSB_SPROMSIZE_WORDS_R4, sizeof(u16), + GFP_KERNEL); + if (!buf) + goto out; + bus->sprom_size = SSB_SPROMSIZE_WORDS_R4; + sprom_do_read(bus, buf); + err = sprom_check_crc(buf, bus->sprom_size); + if (err) { + /* All CRC attempts failed. + * Maybe there is no SPROM on the device? + * If we have a fallback, use that. */ + fallback = ssb_get_fallback_sprom(); + if (fallback) { + memcpy(sprom, fallback, sizeof(*sprom)); + err = 0; + goto out_free; + } + ssb_printk(KERN_WARNING PFX "WARNING: Invalid" + " SPROM CRC (corrupt SPROM)\n"); + } + } + err = sprom_extract(bus, sprom, buf, bus->sprom_size); + +out_free: + kfree(buf); +out: + return err; +} + +static void ssb_pci_get_boardinfo(struct ssb_bus *bus, + struct ssb_boardinfo *bi) +{ + pci_read_config_word(bus->host_pci, PCI_SUBSYSTEM_VENDOR_ID, + &bi->vendor); + pci_read_config_word(bus->host_pci, PCI_SUBSYSTEM_ID, + &bi->type); + pci_read_config_word(bus->host_pci, PCI_REVISION_ID, + &bi->rev); +} + +int ssb_pci_get_invariants(struct ssb_bus *bus, + struct ssb_init_invariants *iv) +{ + int err; + + err = ssb_pci_sprom_get(bus, &iv->sprom); + if (err) + goto out; + ssb_pci_get_boardinfo(bus, &iv->boardinfo); + +out: + return err; +} + +#ifdef CONFIG_SSB_DEBUG +static int ssb_pci_assert_buspower(struct ssb_bus *bus) +{ + if (likely(bus->powered_up)) + return 0; + + printk(KERN_ERR PFX "FATAL ERROR: Bus powered down " + "while accessing PCI MMIO space\n"); + if (bus->power_warn_count <= 10) { + bus->power_warn_count++; + dump_stack(); + } + + return -ENODEV; +} +#else /* DEBUG */ +static inline int ssb_pci_assert_buspower(struct ssb_bus *bus) +{ + return 0; +} +#endif /* DEBUG */ + +static u8 ssb_pci_read8(struct ssb_device *dev, u16 offset) +{ + struct ssb_bus *bus = dev->bus; + + if (unlikely(ssb_pci_assert_buspower(bus))) + return 0xFF; + if (unlikely(bus->mapped_device != dev)) { + if (unlikely(ssb_pci_switch_core(bus, dev))) + return 0xFF; + } + return ioread8(bus->mmio + offset); +} + +static u16 ssb_pci_read16(struct ssb_device *dev, u16 offset) +{ + struct ssb_bus *bus = dev->bus; + + if (unlikely(ssb_pci_assert_buspower(bus))) + return 0xFFFF; + if (unlikely(bus->mapped_device != dev)) { + if (unlikely(ssb_pci_switch_core(bus, dev))) + return 0xFFFF; + } + return ioread16(bus->mmio + offset); +} + +static u32 ssb_pci_read32(struct ssb_device *dev, u16 offset) +{ + struct ssb_bus *bus = dev->bus; + + if (unlikely(ssb_pci_assert_buspower(bus))) + return 0xFFFFFFFF; + if (unlikely(bus->mapped_device != dev)) { + if (unlikely(ssb_pci_switch_core(bus, dev))) + return 0xFFFFFFFF; + } + return ioread32(bus->mmio + offset); +} + +#ifdef CONFIG_SSB_BLOCKIO +static void ssb_pci_block_read(struct ssb_device *dev, void *buffer, + size_t count, u16 offset, u8 reg_width) +{ + struct ssb_bus *bus = dev->bus; + void __iomem *addr = bus->mmio + offset; + + if (unlikely(ssb_pci_assert_buspower(bus))) + goto error; + if (unlikely(bus->mapped_device != dev)) { + if (unlikely(ssb_pci_switch_core(bus, dev))) + goto error; + } + switch (reg_width) { + case sizeof(u8): + ioread8_rep(addr, buffer, count); + break; + case sizeof(u16): + SSB_WARN_ON(count & 1); + ioread16_rep(addr, buffer, count >> 1); + break; + case sizeof(u32): + SSB_WARN_ON(count & 3); + ioread32_rep(addr, buffer, count >> 2); + break; + default: + SSB_WARN_ON(1); + } + + return; +error: + memset(buffer, 0xFF, count); +} +#endif /* CONFIG_SSB_BLOCKIO */ + +static void ssb_pci_write8(struct ssb_device *dev, u16 offset, u8 value) +{ + struct ssb_bus *bus = dev->bus; + + if (unlikely(ssb_pci_assert_buspower(bus))) + return; + if (unlikely(bus->mapped_device != dev)) { + if (unlikely(ssb_pci_switch_core(bus, dev))) + return; + } + iowrite8(value, bus->mmio + offset); +} + +static void ssb_pci_write16(struct ssb_device *dev, u16 offset, u16 value) +{ + struct ssb_bus *bus = dev->bus; + + if (unlikely(ssb_pci_assert_buspower(bus))) + return; + if (unlikely(bus->mapped_device != dev)) { + if (unlikely(ssb_pci_switch_core(bus, dev))) + return; + } + iowrite16(value, bus->mmio + offset); +} + +static void ssb_pci_write32(struct ssb_device *dev, u16 offset, u32 value) +{ + struct ssb_bus *bus = dev->bus; + + if (unlikely(ssb_pci_assert_buspower(bus))) + return; + if (unlikely(bus->mapped_device != dev)) { + if (unlikely(ssb_pci_switch_core(bus, dev))) + return; + } + iowrite32(value, bus->mmio + offset); +} + +#ifdef CONFIG_SSB_BLOCKIO +static void ssb_pci_block_write(struct ssb_device *dev, const void *buffer, + size_t count, u16 offset, u8 reg_width) +{ + struct ssb_bus *bus = dev->bus; + void __iomem *addr = bus->mmio + offset; + + if (unlikely(ssb_pci_assert_buspower(bus))) + return; + if (unlikely(bus->mapped_device != dev)) { + if (unlikely(ssb_pci_switch_core(bus, dev))) + return; + } + switch (reg_width) { + case sizeof(u8): + iowrite8_rep(addr, buffer, count); + break; + case sizeof(u16): + SSB_WARN_ON(count & 1); + iowrite16_rep(addr, buffer, count >> 1); + break; + case sizeof(u32): + SSB_WARN_ON(count & 3); + iowrite32_rep(addr, buffer, count >> 2); + break; + default: + SSB_WARN_ON(1); + } +} +#endif /* CONFIG_SSB_BLOCKIO */ + +/* Not "static", as it's used in main.c */ +const struct ssb_bus_ops ssb_pci_ops = { + .read8 = ssb_pci_read8, + .read16 = ssb_pci_read16, + .read32 = ssb_pci_read32, + .write8 = ssb_pci_write8, + .write16 = ssb_pci_write16, + .write32 = ssb_pci_write32, +#ifdef CONFIG_SSB_BLOCKIO + .block_read = ssb_pci_block_read, + .block_write = ssb_pci_block_write, +#endif +}; + +static ssize_t ssb_pci_attr_sprom_show(struct device *pcidev, + struct device_attribute *attr, + char *buf) +{ + struct pci_dev *pdev = container_of(pcidev, struct pci_dev, dev); + struct ssb_bus *bus; + + bus = ssb_pci_dev_to_bus(pdev); + if (!bus) + return -ENODEV; + + return ssb_attr_sprom_show(bus, buf, sprom_do_read); +} + +static ssize_t ssb_pci_attr_sprom_store(struct device *pcidev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct pci_dev *pdev = container_of(pcidev, struct pci_dev, dev); + struct ssb_bus *bus; + + bus = ssb_pci_dev_to_bus(pdev); + if (!bus) + return -ENODEV; + + return ssb_attr_sprom_store(bus, buf, count, + sprom_check_crc, sprom_do_write); +} + +static DEVICE_ATTR(ssb_sprom, 0600, + ssb_pci_attr_sprom_show, + ssb_pci_attr_sprom_store); + +void ssb_pci_exit(struct ssb_bus *bus) +{ + struct pci_dev *pdev; + + if (bus->bustype != SSB_BUSTYPE_PCI) + return; + + pdev = bus->host_pci; + device_remove_file(&pdev->dev, &dev_attr_ssb_sprom); +} + +int ssb_pci_init(struct ssb_bus *bus) +{ + struct pci_dev *pdev; + int err; + + if (bus->bustype != SSB_BUSTYPE_PCI) + return 0; + + pdev = bus->host_pci; + mutex_init(&bus->sprom_mutex); + err = device_create_file(&pdev->dev, &dev_attr_ssb_sprom); + if (err) + goto out; + +out: + return err; +} diff --git a/drivers/ssb/pcihost_wrapper.c b/drivers/ssb/pcihost_wrapper.c new file mode 100644 index 0000000..26737a0 --- /dev/null +++ b/drivers/ssb/pcihost_wrapper.c @@ -0,0 +1,114 @@ +/* + * Sonics Silicon Backplane + * PCI Hostdevice wrapper + * + * Copyright (c) 2005 Martin Langer + * Copyright (c) 2005 Stefano Brivio + * Copyright (c) 2005 Danny van Dyk + * Copyright (c) 2005 Andreas Jaggi + * Copyright (c) 2005-2007 Michael Buesch + * + * Licensed under the GNU/GPL. See COPYING for details. + */ + +#include +#include + + +#ifdef CONFIG_PM +static int ssb_pcihost_suspend(struct pci_dev *dev, pm_message_t state) +{ + struct ssb_bus *ssb = pci_get_drvdata(dev); + int err; + + err = ssb_bus_suspend(ssb); + if (err) + return err; + pci_save_state(dev); + pci_disable_device(dev); + pci_set_power_state(dev, pci_choose_state(dev, state)); + + return 0; +} + +static int ssb_pcihost_resume(struct pci_dev *dev) +{ + struct ssb_bus *ssb = pci_get_drvdata(dev); + int err; + + pci_set_power_state(dev, 0); + err = pci_enable_device(dev); + if (err) + return err; + pci_restore_state(dev); + err = ssb_bus_resume(ssb); + if (err) + return err; + + return 0; +} +#else /* CONFIG_PM */ +# define ssb_pcihost_suspend NULL +# define ssb_pcihost_resume NULL +#endif /* CONFIG_PM */ + +static int ssb_pcihost_probe(struct pci_dev *dev, + const struct pci_device_id *id) +{ + struct ssb_bus *ssb; + int err = -ENOMEM; + const char *name; + + ssb = kzalloc(sizeof(*ssb), GFP_KERNEL); + if (!ssb) + goto out; + err = pci_enable_device(dev); + if (err) + goto err_kfree_ssb; + name = dev_name(&dev->dev); + if (dev->driver && dev->driver->name) + name = dev->driver->name; + err = pci_request_regions(dev, name); + if (err) + goto err_pci_disable; + pci_set_master(dev); + + err = ssb_bus_pcibus_register(ssb, dev); + if (err) + goto err_pci_release_regions; + + pci_set_drvdata(dev, ssb); + +out: + return err; + +err_pci_release_regions: + pci_release_regions(dev); +err_pci_disable: + pci_disable_device(dev); +err_kfree_ssb: + kfree(ssb); + return err; +} + +static void ssb_pcihost_remove(struct pci_dev *dev) +{ + struct ssb_bus *ssb = pci_get_drvdata(dev); + + ssb_bus_unregister(ssb); + pci_release_regions(dev); + pci_disable_device(dev); + kfree(ssb); + pci_set_drvdata(dev, NULL); +} + +int ssb_pcihost_register(struct pci_driver *driver) +{ + driver->probe = ssb_pcihost_probe; + driver->remove = ssb_pcihost_remove; + driver->suspend = ssb_pcihost_suspend; + driver->resume = ssb_pcihost_resume; + + return pci_register_driver(driver); +} +EXPORT_SYMBOL(ssb_pcihost_register); diff --git a/drivers/ssb/pcmcia.c b/drivers/ssb/pcmcia.c new file mode 100644 index 0000000..e72f404 --- /dev/null +++ b/drivers/ssb/pcmcia.c @@ -0,0 +1,864 @@ +/* + * Sonics Silicon Backplane + * PCMCIA-Hostbus related functions + * + * Copyright 2006 Johannes Berg + * Copyright 2007-2008 Michael Buesch + * + * Licensed under the GNU/GPL. See COPYING for details. + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "ssb_private.h" + + +/* Define the following to 1 to enable a printk on each coreswitch. */ +#define SSB_VERBOSE_PCMCIACORESWITCH_DEBUG 0 + + +/* PCMCIA configuration registers */ +#define SSB_PCMCIA_ADDRESS0 0x2E +#define SSB_PCMCIA_ADDRESS1 0x30 +#define SSB_PCMCIA_ADDRESS2 0x32 +#define SSB_PCMCIA_MEMSEG 0x34 +#define SSB_PCMCIA_SPROMCTL 0x36 +#define SSB_PCMCIA_SPROMCTL_IDLE 0 +#define SSB_PCMCIA_SPROMCTL_WRITE 1 +#define SSB_PCMCIA_SPROMCTL_READ 2 +#define SSB_PCMCIA_SPROMCTL_WRITEEN 4 +#define SSB_PCMCIA_SPROMCTL_WRITEDIS 7 +#define SSB_PCMCIA_SPROMCTL_DONE 8 +#define SSB_PCMCIA_SPROM_DATALO 0x38 +#define SSB_PCMCIA_SPROM_DATAHI 0x3A +#define SSB_PCMCIA_SPROM_ADDRLO 0x3C +#define SSB_PCMCIA_SPROM_ADDRHI 0x3E + +/* Hardware invariants CIS tuples */ +#define SSB_PCMCIA_CIS 0x80 +#define SSB_PCMCIA_CIS_ID 0x01 +#define SSB_PCMCIA_CIS_BOARDREV 0x02 +#define SSB_PCMCIA_CIS_PA 0x03 +#define SSB_PCMCIA_CIS_PA_PA0B0_LO 0 +#define SSB_PCMCIA_CIS_PA_PA0B0_HI 1 +#define SSB_PCMCIA_CIS_PA_PA0B1_LO 2 +#define SSB_PCMCIA_CIS_PA_PA0B1_HI 3 +#define SSB_PCMCIA_CIS_PA_PA0B2_LO 4 +#define SSB_PCMCIA_CIS_PA_PA0B2_HI 5 +#define SSB_PCMCIA_CIS_PA_ITSSI 6 +#define SSB_PCMCIA_CIS_PA_MAXPOW 7 +#define SSB_PCMCIA_CIS_OEMNAME 0x04 +#define SSB_PCMCIA_CIS_CCODE 0x05 +#define SSB_PCMCIA_CIS_ANTENNA 0x06 +#define SSB_PCMCIA_CIS_ANTGAIN 0x07 +#define SSB_PCMCIA_CIS_BFLAGS 0x08 +#define SSB_PCMCIA_CIS_LEDS 0x09 + +/* PCMCIA SPROM size. */ +#define SSB_PCMCIA_SPROM_SIZE 256 +#define SSB_PCMCIA_SPROM_SIZE_BYTES (SSB_PCMCIA_SPROM_SIZE * sizeof(u16)) + + +/* Write to a PCMCIA configuration register. */ +static int ssb_pcmcia_cfg_write(struct ssb_bus *bus, u8 offset, u8 value) +{ + conf_reg_t reg; + int res; + + memset(®, 0, sizeof(reg)); + reg.Offset = offset; + reg.Action = CS_WRITE; + reg.Value = value; + res = pcmcia_access_configuration_register(bus->host_pcmcia, ®); + if (unlikely(res != 0)) + return -EBUSY; + + return 0; +} + +/* Read from a PCMCIA configuration register. */ +static int ssb_pcmcia_cfg_read(struct ssb_bus *bus, u8 offset, u8 *value) +{ + conf_reg_t reg; + int res; + + memset(®, 0, sizeof(reg)); + reg.Offset = offset; + reg.Action = CS_READ; + res = pcmcia_access_configuration_register(bus->host_pcmcia, ®); + if (unlikely(res != 0)) + return -EBUSY; + *value = reg.Value; + + return 0; +} + +int ssb_pcmcia_switch_coreidx(struct ssb_bus *bus, + u8 coreidx) +{ + int err; + int attempts = 0; + u32 cur_core; + u32 addr; + u32 read_addr; + u8 val; + + addr = (coreidx * SSB_CORE_SIZE) + SSB_ENUM_BASE; + while (1) { + err = ssb_pcmcia_cfg_write(bus, SSB_PCMCIA_ADDRESS0, + (addr & 0x0000F000) >> 12); + if (err) + goto error; + err = ssb_pcmcia_cfg_write(bus, SSB_PCMCIA_ADDRESS1, + (addr & 0x00FF0000) >> 16); + if (err) + goto error; + err = ssb_pcmcia_cfg_write(bus, SSB_PCMCIA_ADDRESS2, + (addr & 0xFF000000) >> 24); + if (err) + goto error; + + read_addr = 0; + + err = ssb_pcmcia_cfg_read(bus, SSB_PCMCIA_ADDRESS0, &val); + if (err) + goto error; + read_addr |= ((u32)(val & 0x0F)) << 12; + err = ssb_pcmcia_cfg_read(bus, SSB_PCMCIA_ADDRESS1, &val); + if (err) + goto error; + read_addr |= ((u32)val) << 16; + err = ssb_pcmcia_cfg_read(bus, SSB_PCMCIA_ADDRESS2, &val); + if (err) + goto error; + read_addr |= ((u32)val) << 24; + + cur_core = (read_addr - SSB_ENUM_BASE) / SSB_CORE_SIZE; + if (cur_core == coreidx) + break; + + err = -ETIMEDOUT; + if (attempts++ > SSB_BAR0_MAX_RETRIES) + goto error; + udelay(10); + } + + return 0; +error: + ssb_printk(KERN_ERR PFX "Failed to switch to core %u\n", coreidx); + return err; +} + +int ssb_pcmcia_switch_core(struct ssb_bus *bus, + struct ssb_device *dev) +{ + int err; + +#if SSB_VERBOSE_PCMCIACORESWITCH_DEBUG + ssb_printk(KERN_INFO PFX + "Switching to %s core, index %d\n", + ssb_core_name(dev->id.coreid), + dev->core_index); +#endif + + err = ssb_pcmcia_switch_coreidx(bus, dev->core_index); + if (!err) + bus->mapped_device = dev; + + return err; +} + +int ssb_pcmcia_switch_segment(struct ssb_bus *bus, u8 seg) +{ + int attempts = 0; + int err; + u8 val; + + SSB_WARN_ON((seg != 0) && (seg != 1)); + while (1) { + err = ssb_pcmcia_cfg_write(bus, SSB_PCMCIA_MEMSEG, seg); + if (err) + goto error; + err = ssb_pcmcia_cfg_read(bus, SSB_PCMCIA_MEMSEG, &val); + if (err) + goto error; + if (val == seg) + break; + + err = -ETIMEDOUT; + if (unlikely(attempts++ > SSB_BAR0_MAX_RETRIES)) + goto error; + udelay(10); + } + bus->mapped_pcmcia_seg = seg; + + return 0; +error: + ssb_printk(KERN_ERR PFX "Failed to switch pcmcia segment\n"); + return err; +} + +static int select_core_and_segment(struct ssb_device *dev, + u16 *offset) +{ + struct ssb_bus *bus = dev->bus; + int err; + u8 need_segment; + + if (*offset >= 0x800) { + *offset -= 0x800; + need_segment = 1; + } else + need_segment = 0; + + if (unlikely(dev != bus->mapped_device)) { + err = ssb_pcmcia_switch_core(bus, dev); + if (unlikely(err)) + return err; + } + if (unlikely(need_segment != bus->mapped_pcmcia_seg)) { + err = ssb_pcmcia_switch_segment(bus, need_segment); + if (unlikely(err)) + return err; + } + + return 0; +} + +static u8 ssb_pcmcia_read8(struct ssb_device *dev, u16 offset) +{ + struct ssb_bus *bus = dev->bus; + unsigned long flags; + int err; + u8 value = 0xFF; + + spin_lock_irqsave(&bus->bar_lock, flags); + err = select_core_and_segment(dev, &offset); + if (likely(!err)) + value = readb(bus->mmio + offset); + spin_unlock_irqrestore(&bus->bar_lock, flags); + + return value; +} + +static u16 ssb_pcmcia_read16(struct ssb_device *dev, u16 offset) +{ + struct ssb_bus *bus = dev->bus; + unsigned long flags; + int err; + u16 value = 0xFFFF; + + spin_lock_irqsave(&bus->bar_lock, flags); + err = select_core_and_segment(dev, &offset); + if (likely(!err)) + value = readw(bus->mmio + offset); + spin_unlock_irqrestore(&bus->bar_lock, flags); + + return value; +} + +static u32 ssb_pcmcia_read32(struct ssb_device *dev, u16 offset) +{ + struct ssb_bus *bus = dev->bus; + unsigned long flags; + int err; + u32 lo = 0xFFFFFFFF, hi = 0xFFFFFFFF; + + spin_lock_irqsave(&bus->bar_lock, flags); + err = select_core_and_segment(dev, &offset); + if (likely(!err)) { + lo = readw(bus->mmio + offset); + hi = readw(bus->mmio + offset + 2); + } + spin_unlock_irqrestore(&bus->bar_lock, flags); + + return (lo | (hi << 16)); +} + +#ifdef CONFIG_SSB_BLOCKIO +static void ssb_pcmcia_block_read(struct ssb_device *dev, void *buffer, + size_t count, u16 offset, u8 reg_width) +{ + struct ssb_bus *bus = dev->bus; + unsigned long flags; + void __iomem *addr = bus->mmio + offset; + int err; + + spin_lock_irqsave(&bus->bar_lock, flags); + err = select_core_and_segment(dev, &offset); + if (unlikely(err)) { + memset(buffer, 0xFF, count); + goto unlock; + } + switch (reg_width) { + case sizeof(u8): { + u8 *buf = buffer; + + while (count) { + *buf = __raw_readb(addr); + buf++; + count--; + } + break; + } + case sizeof(u16): { + __le16 *buf = buffer; + + SSB_WARN_ON(count & 1); + while (count) { + *buf = (__force __le16)__raw_readw(addr); + buf++; + count -= 2; + } + break; + } + case sizeof(u32): { + __le16 *buf = buffer; + + SSB_WARN_ON(count & 3); + while (count) { + *buf = (__force __le16)__raw_readw(addr); + buf++; + *buf = (__force __le16)__raw_readw(addr + 2); + buf++; + count -= 4; + } + break; + } + default: + SSB_WARN_ON(1); + } +unlock: + spin_unlock_irqrestore(&bus->bar_lock, flags); +} +#endif /* CONFIG_SSB_BLOCKIO */ + +static void ssb_pcmcia_write8(struct ssb_device *dev, u16 offset, u8 value) +{ + struct ssb_bus *bus = dev->bus; + unsigned long flags; + int err; + + spin_lock_irqsave(&bus->bar_lock, flags); + err = select_core_and_segment(dev, &offset); + if (likely(!err)) + writeb(value, bus->mmio + offset); + mmiowb(); + spin_unlock_irqrestore(&bus->bar_lock, flags); +} + +static void ssb_pcmcia_write16(struct ssb_device *dev, u16 offset, u16 value) +{ + struct ssb_bus *bus = dev->bus; + unsigned long flags; + int err; + + spin_lock_irqsave(&bus->bar_lock, flags); + err = select_core_and_segment(dev, &offset); + if (likely(!err)) + writew(value, bus->mmio + offset); + mmiowb(); + spin_unlock_irqrestore(&bus->bar_lock, flags); +} + +static void ssb_pcmcia_write32(struct ssb_device *dev, u16 offset, u32 value) +{ + struct ssb_bus *bus = dev->bus; + unsigned long flags; + int err; + + spin_lock_irqsave(&bus->bar_lock, flags); + err = select_core_and_segment(dev, &offset); + if (likely(!err)) { + writew((value & 0x0000FFFF), bus->mmio + offset); + writew(((value & 0xFFFF0000) >> 16), bus->mmio + offset + 2); + } + mmiowb(); + spin_unlock_irqrestore(&bus->bar_lock, flags); +} + +#ifdef CONFIG_SSB_BLOCKIO +static void ssb_pcmcia_block_write(struct ssb_device *dev, const void *buffer, + size_t count, u16 offset, u8 reg_width) +{ + struct ssb_bus *bus = dev->bus; + unsigned long flags; + void __iomem *addr = bus->mmio + offset; + int err; + + spin_lock_irqsave(&bus->bar_lock, flags); + err = select_core_and_segment(dev, &offset); + if (unlikely(err)) + goto unlock; + switch (reg_width) { + case sizeof(u8): { + const u8 *buf = buffer; + + while (count) { + __raw_writeb(*buf, addr); + buf++; + count--; + } + break; + } + case sizeof(u16): { + const __le16 *buf = buffer; + + SSB_WARN_ON(count & 1); + while (count) { + __raw_writew((__force u16)(*buf), addr); + buf++; + count -= 2; + } + break; + } + case sizeof(u32): { + const __le16 *buf = buffer; + + SSB_WARN_ON(count & 3); + while (count) { + __raw_writew((__force u16)(*buf), addr); + buf++; + __raw_writew((__force u16)(*buf), addr + 2); + buf++; + count -= 4; + } + break; + } + default: + SSB_WARN_ON(1); + } +unlock: + mmiowb(); + spin_unlock_irqrestore(&bus->bar_lock, flags); +} +#endif /* CONFIG_SSB_BLOCKIO */ + +/* Not "static", as it's used in main.c */ +const struct ssb_bus_ops ssb_pcmcia_ops = { + .read8 = ssb_pcmcia_read8, + .read16 = ssb_pcmcia_read16, + .read32 = ssb_pcmcia_read32, + .write8 = ssb_pcmcia_write8, + .write16 = ssb_pcmcia_write16, + .write32 = ssb_pcmcia_write32, +#ifdef CONFIG_SSB_BLOCKIO + .block_read = ssb_pcmcia_block_read, + .block_write = ssb_pcmcia_block_write, +#endif +}; + +static int ssb_pcmcia_sprom_command(struct ssb_bus *bus, u8 command) +{ + unsigned int i; + int err; + u8 value; + + err = ssb_pcmcia_cfg_write(bus, SSB_PCMCIA_SPROMCTL, command); + if (err) + return err; + for (i = 0; i < 1000; i++) { + err = ssb_pcmcia_cfg_read(bus, SSB_PCMCIA_SPROMCTL, &value); + if (err) + return err; + if (value & SSB_PCMCIA_SPROMCTL_DONE) + return 0; + udelay(10); + } + + return -ETIMEDOUT; +} + +/* offset is the 16bit word offset */ +static int ssb_pcmcia_sprom_read(struct ssb_bus *bus, u16 offset, u16 *value) +{ + int err; + u8 lo, hi; + + offset *= 2; /* Make byte offset */ + + err = ssb_pcmcia_cfg_write(bus, SSB_PCMCIA_SPROM_ADDRLO, + (offset & 0x00FF)); + if (err) + return err; + err = ssb_pcmcia_cfg_write(bus, SSB_PCMCIA_SPROM_ADDRHI, + (offset & 0xFF00) >> 8); + if (err) + return err; + err = ssb_pcmcia_sprom_command(bus, SSB_PCMCIA_SPROMCTL_READ); + if (err) + return err; + err = ssb_pcmcia_cfg_read(bus, SSB_PCMCIA_SPROM_DATALO, &lo); + if (err) + return err; + err = ssb_pcmcia_cfg_read(bus, SSB_PCMCIA_SPROM_DATAHI, &hi); + if (err) + return err; + *value = (lo | (((u16)hi) << 8)); + + return 0; +} + +/* offset is the 16bit word offset */ +static int ssb_pcmcia_sprom_write(struct ssb_bus *bus, u16 offset, u16 value) +{ + int err; + + offset *= 2; /* Make byte offset */ + + err = ssb_pcmcia_cfg_write(bus, SSB_PCMCIA_SPROM_ADDRLO, + (offset & 0x00FF)); + if (err) + return err; + err = ssb_pcmcia_cfg_write(bus, SSB_PCMCIA_SPROM_ADDRHI, + (offset & 0xFF00) >> 8); + if (err) + return err; + err = ssb_pcmcia_cfg_write(bus, SSB_PCMCIA_SPROM_DATALO, + (value & 0x00FF)); + if (err) + return err; + err = ssb_pcmcia_cfg_write(bus, SSB_PCMCIA_SPROM_DATAHI, + (value & 0xFF00) >> 8); + if (err) + return err; + err = ssb_pcmcia_sprom_command(bus, SSB_PCMCIA_SPROMCTL_WRITE); + if (err) + return err; + msleep(20); + + return 0; +} + +/* Read the SPROM image. bufsize is in 16bit words. */ +static int ssb_pcmcia_sprom_read_all(struct ssb_bus *bus, u16 *sprom) +{ + int err, i; + + for (i = 0; i < SSB_PCMCIA_SPROM_SIZE; i++) { + err = ssb_pcmcia_sprom_read(bus, i, &sprom[i]); + if (err) + return err; + } + + return 0; +} + +/* Write the SPROM image. size is in 16bit words. */ +static int ssb_pcmcia_sprom_write_all(struct ssb_bus *bus, const u16 *sprom) +{ + int i, err; + bool failed = 0; + size_t size = SSB_PCMCIA_SPROM_SIZE; + + ssb_printk(KERN_NOTICE PFX + "Writing SPROM. Do NOT turn off the power! " + "Please stand by...\n"); + err = ssb_pcmcia_sprom_command(bus, SSB_PCMCIA_SPROMCTL_WRITEEN); + if (err) { + ssb_printk(KERN_NOTICE PFX + "Could not enable SPROM write access.\n"); + return -EBUSY; + } + ssb_printk(KERN_NOTICE PFX "[ 0%%"); + msleep(500); + for (i = 0; i < size; i++) { + if (i == size / 4) + ssb_printk("25%%"); + else if (i == size / 2) + ssb_printk("50%%"); + else if (i == (size * 3) / 4) + ssb_printk("75%%"); + else if (i % 2) + ssb_printk("."); + err = ssb_pcmcia_sprom_write(bus, i, sprom[i]); + if (err) { + ssb_printk(KERN_NOTICE PFX + "Failed to write to SPROM.\n"); + failed = 1; + break; + } + } + err = ssb_pcmcia_sprom_command(bus, SSB_PCMCIA_SPROMCTL_WRITEDIS); + if (err) { + ssb_printk(KERN_NOTICE PFX + "Could not disable SPROM write access.\n"); + failed = 1; + } + msleep(500); + if (!failed) { + ssb_printk("100%% ]\n"); + ssb_printk(KERN_NOTICE PFX "SPROM written.\n"); + } + + return failed ? -EBUSY : 0; +} + +static int ssb_pcmcia_sprom_check_crc(const u16 *sprom, size_t size) +{ + //TODO + return 0; +} + +#define GOTO_ERROR_ON(condition, description) do { \ + if (unlikely(condition)) { \ + error_description = description; \ + goto error; \ + } \ + } while (0) + +static int ssb_pcmcia_get_mac(struct pcmcia_device *p_dev, + tuple_t *tuple, + void *priv) +{ + struct ssb_sprom *sprom = priv; + + if (tuple->TupleData[0] != CISTPL_FUNCE_LAN_NODE_ID) + return -EINVAL; + if (tuple->TupleDataLen != ETH_ALEN + 2) + return -EINVAL; + if (tuple->TupleData[1] != ETH_ALEN) + return -EINVAL; + memcpy(sprom->il0mac, &tuple->TupleData[2], ETH_ALEN); + return 0; +}; + +static int ssb_pcmcia_do_get_invariants(struct pcmcia_device *p_dev, + tuple_t *tuple, + void *priv) +{ + struct ssb_init_invariants *iv = priv; + struct ssb_sprom *sprom = &iv->sprom; + struct ssb_boardinfo *bi = &iv->boardinfo; + const char *error_description; + + GOTO_ERROR_ON(tuple->TupleDataLen < 1, "VEN tpl < 1"); + switch (tuple->TupleData[0]) { + case SSB_PCMCIA_CIS_ID: + GOTO_ERROR_ON((tuple->TupleDataLen != 5) && + (tuple->TupleDataLen != 7), + "id tpl size"); + bi->vendor = tuple->TupleData[1] | + ((u16)tuple->TupleData[2] << 8); + break; + case SSB_PCMCIA_CIS_BOARDREV: + GOTO_ERROR_ON(tuple->TupleDataLen != 2, + "boardrev tpl size"); + sprom->board_rev = tuple->TupleData[1]; + break; + case SSB_PCMCIA_CIS_PA: + GOTO_ERROR_ON((tuple->TupleDataLen != 9) && + (tuple->TupleDataLen != 10), + "pa tpl size"); + sprom->pa0b0 = tuple->TupleData[1] | + ((u16)tuple->TupleData[2] << 8); + sprom->pa0b1 = tuple->TupleData[3] | + ((u16)tuple->TupleData[4] << 8); + sprom->pa0b2 = tuple->TupleData[5] | + ((u16)tuple->TupleData[6] << 8); + sprom->itssi_a = tuple->TupleData[7]; + sprom->itssi_bg = tuple->TupleData[7]; + sprom->maxpwr_a = tuple->TupleData[8]; + sprom->maxpwr_bg = tuple->TupleData[8]; + break; + case SSB_PCMCIA_CIS_OEMNAME: + /* We ignore this. */ + break; + case SSB_PCMCIA_CIS_CCODE: + GOTO_ERROR_ON(tuple->TupleDataLen != 2, + "ccode tpl size"); + sprom->country_code = tuple->TupleData[1]; + break; + case SSB_PCMCIA_CIS_ANTENNA: + GOTO_ERROR_ON(tuple->TupleDataLen != 2, + "ant tpl size"); + sprom->ant_available_a = tuple->TupleData[1]; + sprom->ant_available_bg = tuple->TupleData[1]; + break; + case SSB_PCMCIA_CIS_ANTGAIN: + GOTO_ERROR_ON(tuple->TupleDataLen != 2, + "antg tpl size"); + sprom->antenna_gain.ghz24.a0 = tuple->TupleData[1]; + sprom->antenna_gain.ghz24.a1 = tuple->TupleData[1]; + sprom->antenna_gain.ghz24.a2 = tuple->TupleData[1]; + sprom->antenna_gain.ghz24.a3 = tuple->TupleData[1]; + sprom->antenna_gain.ghz5.a0 = tuple->TupleData[1]; + sprom->antenna_gain.ghz5.a1 = tuple->TupleData[1]; + sprom->antenna_gain.ghz5.a2 = tuple->TupleData[1]; + sprom->antenna_gain.ghz5.a3 = tuple->TupleData[1]; + break; + case SSB_PCMCIA_CIS_BFLAGS: + GOTO_ERROR_ON((tuple->TupleDataLen != 3) && + (tuple->TupleDataLen != 5), + "bfl tpl size"); + sprom->boardflags_lo = tuple->TupleData[1] | + ((u16)tuple->TupleData[2] << 8); + break; + case SSB_PCMCIA_CIS_LEDS: + GOTO_ERROR_ON(tuple->TupleDataLen != 5, + "leds tpl size"); + sprom->gpio0 = tuple->TupleData[1]; + sprom->gpio1 = tuple->TupleData[2]; + sprom->gpio2 = tuple->TupleData[3]; + sprom->gpio3 = tuple->TupleData[4]; + break; + } + return -ENOSPC; /* continue with next entry */ + +error: + ssb_printk(KERN_ERR PFX + "PCMCIA: Failed to fetch device invariants: %s\n", + error_description); + return -ENODEV; +} + + +int ssb_pcmcia_get_invariants(struct ssb_bus *bus, + struct ssb_init_invariants *iv) +{ + struct ssb_sprom *sprom = &iv->sprom; + int res; + + memset(sprom, 0xFF, sizeof(*sprom)); + sprom->revision = 1; + sprom->boardflags_lo = 0; + sprom->boardflags_hi = 0; + + /* First fetch the MAC address. */ + res = pcmcia_loop_tuple(bus->host_pcmcia, CISTPL_FUNCE, + ssb_pcmcia_get_mac, sprom); + if (res != 0) { + ssb_printk(KERN_ERR PFX + "PCMCIA: Failed to fetch MAC address\n"); + return -ENODEV; + } + + /* Fetch the vendor specific tuples. */ + res = pcmcia_loop_tuple(bus->host_pcmcia, SSB_PCMCIA_CIS, + ssb_pcmcia_do_get_invariants, sprom); + if ((res == 0) || (res == -ENOSPC)) + return 0; + + ssb_printk(KERN_ERR PFX + "PCMCIA: Failed to fetch device invariants\n"); + return -ENODEV; +} + +static ssize_t ssb_pcmcia_attr_sprom_show(struct device *pcmciadev, + struct device_attribute *attr, + char *buf) +{ + struct pcmcia_device *pdev = + container_of(pcmciadev, struct pcmcia_device, dev); + struct ssb_bus *bus; + + bus = ssb_pcmcia_dev_to_bus(pdev); + if (!bus) + return -ENODEV; + + return ssb_attr_sprom_show(bus, buf, + ssb_pcmcia_sprom_read_all); +} + +static ssize_t ssb_pcmcia_attr_sprom_store(struct device *pcmciadev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct pcmcia_device *pdev = + container_of(pcmciadev, struct pcmcia_device, dev); + struct ssb_bus *bus; + + bus = ssb_pcmcia_dev_to_bus(pdev); + if (!bus) + return -ENODEV; + + return ssb_attr_sprom_store(bus, buf, count, + ssb_pcmcia_sprom_check_crc, + ssb_pcmcia_sprom_write_all); +} + +static DEVICE_ATTR(ssb_sprom, 0600, + ssb_pcmcia_attr_sprom_show, + ssb_pcmcia_attr_sprom_store); + +static int ssb_pcmcia_cor_setup(struct ssb_bus *bus, u8 cor) +{ + u8 val; + int err; + + err = ssb_pcmcia_cfg_read(bus, cor, &val); + if (err) + return err; + val &= ~COR_SOFT_RESET; + val |= COR_FUNC_ENA | COR_IREQ_ENA | COR_LEVEL_REQ; + err = ssb_pcmcia_cfg_write(bus, cor, val); + if (err) + return err; + msleep(40); + + return 0; +} + +/* Initialize the PCMCIA hardware. This is called on Init and Resume. */ +int ssb_pcmcia_hardware_setup(struct ssb_bus *bus) +{ + int err; + + if (bus->bustype != SSB_BUSTYPE_PCMCIA) + return 0; + + /* Switch segment to a known state and sync + * bus->mapped_pcmcia_seg with hardware state. */ + ssb_pcmcia_switch_segment(bus, 0); + /* Init the COR register. */ + err = ssb_pcmcia_cor_setup(bus, CISREG_COR); + if (err) + return err; + /* Some cards also need this register to get poked. */ + err = ssb_pcmcia_cor_setup(bus, CISREG_COR + 0x80); + if (err) + return err; + + return 0; +} + +void ssb_pcmcia_exit(struct ssb_bus *bus) +{ + if (bus->bustype != SSB_BUSTYPE_PCMCIA) + return; + + device_remove_file(&bus->host_pcmcia->dev, &dev_attr_ssb_sprom); +} + +int ssb_pcmcia_init(struct ssb_bus *bus) +{ + int err; + + if (bus->bustype != SSB_BUSTYPE_PCMCIA) + return 0; + + err = ssb_pcmcia_hardware_setup(bus); + if (err) + goto error; + + bus->sprom_size = SSB_PCMCIA_SPROM_SIZE; + mutex_init(&bus->sprom_mutex); + err = device_create_file(&bus->host_pcmcia->dev, &dev_attr_ssb_sprom); + if (err) + goto error; + + return 0; +error: + ssb_printk(KERN_ERR PFX "Failed to initialize PCMCIA host device\n"); + return err; +} diff --git a/drivers/ssb/scan.c b/drivers/ssb/scan.c new file mode 100644 index 0000000..0d6c028 --- /dev/null +++ b/drivers/ssb/scan.c @@ -0,0 +1,439 @@ +/* + * Sonics Silicon Backplane + * Bus scanning + * + * Copyright (C) 2005-2007 Michael Buesch + * Copyright (C) 2005 Martin Langer + * Copyright (C) 2005 Stefano Brivio + * Copyright (C) 2005 Danny van Dyk + * Copyright (C) 2005 Andreas Jaggi + * Copyright (C) 2006 Broadcom Corporation. + * + * Licensed under the GNU/GPL. See COPYING for details. + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "ssb_private.h" + + +const char *ssb_core_name(u16 coreid) +{ + switch (coreid) { + case SSB_DEV_CHIPCOMMON: + return "ChipCommon"; + case SSB_DEV_ILINE20: + return "ILine 20"; + case SSB_DEV_SDRAM: + return "SDRAM"; + case SSB_DEV_PCI: + return "PCI"; + case SSB_DEV_MIPS: + return "MIPS"; + case SSB_DEV_ETHERNET: + return "Fast Ethernet"; + case SSB_DEV_V90: + return "V90"; + case SSB_DEV_USB11_HOSTDEV: + return "USB 1.1 Hostdev"; + case SSB_DEV_ADSL: + return "ADSL"; + case SSB_DEV_ILINE100: + return "ILine 100"; + case SSB_DEV_IPSEC: + return "IPSEC"; + case SSB_DEV_PCMCIA: + return "PCMCIA"; + case SSB_DEV_INTERNAL_MEM: + return "Internal Memory"; + case SSB_DEV_MEMC_SDRAM: + return "MEMC SDRAM"; + case SSB_DEV_EXTIF: + return "EXTIF"; + case SSB_DEV_80211: + return "IEEE 802.11"; + case SSB_DEV_MIPS_3302: + return "MIPS 3302"; + case SSB_DEV_USB11_HOST: + return "USB 1.1 Host"; + case SSB_DEV_USB11_DEV: + return "USB 1.1 Device"; + case SSB_DEV_USB20_HOST: + return "USB 2.0 Host"; + case SSB_DEV_USB20_DEV: + return "USB 2.0 Device"; + case SSB_DEV_SDIO_HOST: + return "SDIO Host"; + case SSB_DEV_ROBOSWITCH: + return "Roboswitch"; + case SSB_DEV_PARA_ATA: + return "PATA"; + case SSB_DEV_SATA_XORDMA: + return "SATA XOR-DMA"; + case SSB_DEV_ETHERNET_GBIT: + return "GBit Ethernet"; + case SSB_DEV_PCIE: + return "PCI-E"; + case SSB_DEV_MIMO_PHY: + return "MIMO PHY"; + case SSB_DEV_SRAM_CTRLR: + return "SRAM Controller"; + case SSB_DEV_MINI_MACPHY: + return "Mini MACPHY"; + case SSB_DEV_ARM_1176: + return "ARM 1176"; + case SSB_DEV_ARM_7TDMI: + return "ARM 7TDMI"; + } + return "UNKNOWN"; +} + +static u16 pcidev_to_chipid(struct pci_dev *pci_dev) +{ + u16 chipid_fallback = 0; + + switch (pci_dev->device) { + case 0x4301: + chipid_fallback = 0x4301; + break; + case 0x4305 ... 0x4307: + chipid_fallback = 0x4307; + break; + case 0x4403: + chipid_fallback = 0x4402; + break; + case 0x4610 ... 0x4615: + chipid_fallback = 0x4610; + break; + case 0x4710 ... 0x4715: + chipid_fallback = 0x4710; + break; + case 0x4320 ... 0x4325: + chipid_fallback = 0x4309; + break; + case PCI_DEVICE_ID_BCM4401: + case PCI_DEVICE_ID_BCM4401B0: + case PCI_DEVICE_ID_BCM4401B1: + chipid_fallback = 0x4401; + break; + default: + ssb_printk(KERN_ERR PFX + "PCI-ID not in fallback list\n"); + } + + return chipid_fallback; +} + +static u8 chipid_to_nrcores(u16 chipid) +{ + switch (chipid) { + case 0x5365: + return 7; + case 0x4306: + return 6; + case 0x4310: + return 8; + case 0x4307: + case 0x4301: + return 5; + case 0x4401: + case 0x4402: + return 3; + case 0x4710: + case 0x4610: + case 0x4704: + return 9; + default: + ssb_printk(KERN_ERR PFX + "CHIPID not in nrcores fallback list\n"); + } + + return 1; +} + +static u32 scan_read32(struct ssb_bus *bus, u8 current_coreidx, + u16 offset) +{ + u32 lo, hi; + + switch (bus->bustype) { + case SSB_BUSTYPE_SSB: + offset += current_coreidx * SSB_CORE_SIZE; + break; + case SSB_BUSTYPE_PCI: + break; + case SSB_BUSTYPE_PCMCIA: + if (offset >= 0x800) { + ssb_pcmcia_switch_segment(bus, 1); + offset -= 0x800; + } else + ssb_pcmcia_switch_segment(bus, 0); + lo = readw(bus->mmio + offset); + hi = readw(bus->mmio + offset + 2); + return lo | (hi << 16); + case SSB_BUSTYPE_SDIO: + offset += current_coreidx * SSB_CORE_SIZE; + return ssb_sdio_scan_read32(bus, offset); + } + return readl(bus->mmio + offset); +} + +static int scan_switchcore(struct ssb_bus *bus, u8 coreidx) +{ + switch (bus->bustype) { + case SSB_BUSTYPE_SSB: + break; + case SSB_BUSTYPE_PCI: + return ssb_pci_switch_coreidx(bus, coreidx); + case SSB_BUSTYPE_PCMCIA: + return ssb_pcmcia_switch_coreidx(bus, coreidx); + case SSB_BUSTYPE_SDIO: + return ssb_sdio_scan_switch_coreidx(bus, coreidx); + } + return 0; +} + +void ssb_iounmap(struct ssb_bus *bus) +{ + switch (bus->bustype) { + case SSB_BUSTYPE_SSB: + case SSB_BUSTYPE_PCMCIA: + iounmap(bus->mmio); + break; + case SSB_BUSTYPE_PCI: +#ifdef CONFIG_SSB_PCIHOST + pci_iounmap(bus->host_pci, bus->mmio); +#else + SSB_BUG_ON(1); /* Can't reach this code. */ +#endif + break; + case SSB_BUSTYPE_SDIO: + break; + } + bus->mmio = NULL; + bus->mapped_device = NULL; +} + +static void __iomem *ssb_ioremap(struct ssb_bus *bus, + unsigned long baseaddr) +{ + void __iomem *mmio = NULL; + + switch (bus->bustype) { + case SSB_BUSTYPE_SSB: + /* Only map the first core for now. */ + /* fallthrough... */ + case SSB_BUSTYPE_PCMCIA: + mmio = ioremap(baseaddr, SSB_CORE_SIZE); + break; + case SSB_BUSTYPE_PCI: +#ifdef CONFIG_SSB_PCIHOST + mmio = pci_iomap(bus->host_pci, 0, ~0UL); +#else + SSB_BUG_ON(1); /* Can't reach this code. */ +#endif + break; + case SSB_BUSTYPE_SDIO: + /* Nothing to ioremap in the SDIO case, just fake it */ + mmio = (void __iomem *)baseaddr; + break; + } + + return mmio; +} + +static int we_support_multiple_80211_cores(struct ssb_bus *bus) +{ + /* More than one 802.11 core is only supported by special chips. + * There are chips with two 802.11 cores, but with dangling + * pins on the second core. Be careful and reject them here. + */ + +#ifdef CONFIG_SSB_PCIHOST + if (bus->bustype == SSB_BUSTYPE_PCI) { + if (bus->host_pci->vendor == PCI_VENDOR_ID_BROADCOM && + bus->host_pci->device == 0x4324) + return 1; + } +#endif /* CONFIG_SSB_PCIHOST */ + return 0; +} + +int ssb_bus_scan(struct ssb_bus *bus, + unsigned long baseaddr) +{ + int err = -ENOMEM; + void __iomem *mmio; + u32 idhi, cc, rev, tmp; + int dev_i, i; + struct ssb_device *dev; + int nr_80211_cores = 0; + + mmio = ssb_ioremap(bus, baseaddr); + if (!mmio) + goto out; + bus->mmio = mmio; + + err = scan_switchcore(bus, 0); /* Switch to first core */ + if (err) + goto err_unmap; + + idhi = scan_read32(bus, 0, SSB_IDHIGH); + cc = (idhi & SSB_IDHIGH_CC) >> SSB_IDHIGH_CC_SHIFT; + rev = (idhi & SSB_IDHIGH_RCLO); + rev |= (idhi & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT; + + bus->nr_devices = 0; + if (cc == SSB_DEV_CHIPCOMMON) { + tmp = scan_read32(bus, 0, SSB_CHIPCO_CHIPID); + + bus->chip_id = (tmp & SSB_CHIPCO_IDMASK); + bus->chip_rev = (tmp & SSB_CHIPCO_REVMASK) >> + SSB_CHIPCO_REVSHIFT; + bus->chip_package = (tmp & SSB_CHIPCO_PACKMASK) >> + SSB_CHIPCO_PACKSHIFT; + if (rev >= 4) { + bus->nr_devices = (tmp & SSB_CHIPCO_NRCORESMASK) >> + SSB_CHIPCO_NRCORESSHIFT; + } + tmp = scan_read32(bus, 0, SSB_CHIPCO_CAP); + bus->chipco.capabilities = tmp; + } else { + if (bus->bustype == SSB_BUSTYPE_PCI) { + bus->chip_id = pcidev_to_chipid(bus->host_pci); + pci_read_config_word(bus->host_pci, PCI_REVISION_ID, + &bus->chip_rev); + bus->chip_package = 0; + } else { + bus->chip_id = 0x4710; + bus->chip_rev = 0; + bus->chip_package = 0; + } + } + if (!bus->nr_devices) + bus->nr_devices = chipid_to_nrcores(bus->chip_id); + if (bus->nr_devices > ARRAY_SIZE(bus->devices)) { + ssb_printk(KERN_ERR PFX + "More than %d ssb cores found (%d)\n", + SSB_MAX_NR_CORES, bus->nr_devices); + goto err_unmap; + } + if (bus->bustype == SSB_BUSTYPE_SSB) { + /* Now that we know the number of cores, + * remap the whole IO space for all cores. + */ + err = -ENOMEM; + iounmap(mmio); + mmio = ioremap(baseaddr, SSB_CORE_SIZE * bus->nr_devices); + if (!mmio) + goto out; + bus->mmio = mmio; + } + + /* Fetch basic information about each core/device */ + for (i = 0, dev_i = 0; i < bus->nr_devices; i++) { + err = scan_switchcore(bus, i); + if (err) + goto err_unmap; + dev = &(bus->devices[dev_i]); + + idhi = scan_read32(bus, i, SSB_IDHIGH); + dev->id.coreid = (idhi & SSB_IDHIGH_CC) >> SSB_IDHIGH_CC_SHIFT; + dev->id.revision = (idhi & SSB_IDHIGH_RCLO); + dev->id.revision |= (idhi & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT; + dev->id.vendor = (idhi & SSB_IDHIGH_VC) >> SSB_IDHIGH_VC_SHIFT; + dev->core_index = i; + dev->bus = bus; + dev->ops = bus->ops; + + printk(KERN_DEBUG PFX + "Core %d found: %s " + "(cc 0x%03X, rev 0x%02X, vendor 0x%04X)\n", + i, ssb_core_name(dev->id.coreid), + dev->id.coreid, dev->id.revision, dev->id.vendor); + + switch (dev->id.coreid) { + case SSB_DEV_80211: + nr_80211_cores++; + if (nr_80211_cores > 1) { + if (!we_support_multiple_80211_cores(bus)) { + ssb_dprintk(KERN_INFO PFX "Ignoring additional " + "802.11 core\n"); + continue; + } + } + break; + case SSB_DEV_EXTIF: +#ifdef CONFIG_SSB_DRIVER_EXTIF + if (bus->extif.dev) { + ssb_printk(KERN_WARNING PFX + "WARNING: Multiple EXTIFs found\n"); + break; + } + bus->extif.dev = dev; +#endif /* CONFIG_SSB_DRIVER_EXTIF */ + break; + case SSB_DEV_CHIPCOMMON: + if (bus->chipco.dev) { + ssb_printk(KERN_WARNING PFX + "WARNING: Multiple ChipCommon found\n"); + break; + } + bus->chipco.dev = dev; + break; + case SSB_DEV_MIPS: + case SSB_DEV_MIPS_3302: +#ifdef CONFIG_SSB_DRIVER_MIPS + if (bus->mipscore.dev) { + ssb_printk(KERN_WARNING PFX + "WARNING: Multiple MIPS cores found\n"); + break; + } + bus->mipscore.dev = dev; +#endif /* CONFIG_SSB_DRIVER_MIPS */ + break; + case SSB_DEV_PCI: + case SSB_DEV_PCIE: +#ifdef CONFIG_SSB_DRIVER_PCICORE + if (bus->bustype == SSB_BUSTYPE_PCI) { + /* Ignore PCI cores on PCI-E cards. + * Ignore PCI-E cores on PCI cards. */ + if (dev->id.coreid == SSB_DEV_PCI) { + if (bus->host_pci->is_pcie) + continue; + } else { + if (!bus->host_pci->is_pcie) + continue; + } + } + if (bus->pcicore.dev) { + ssb_printk(KERN_WARNING PFX + "WARNING: Multiple PCI(E) cores found\n"); + break; + } + bus->pcicore.dev = dev; +#endif /* CONFIG_SSB_DRIVER_PCICORE */ + break; + default: + break; + } + + dev_i++; + } + bus->nr_devices = dev_i; + + err = 0; +out: + return err; +err_unmap: + ssb_iounmap(bus); + goto out; +} diff --git a/drivers/ssb/sdio.c b/drivers/ssb/sdio.c new file mode 100644 index 0000000..65a6080 --- /dev/null +++ b/drivers/ssb/sdio.c @@ -0,0 +1,610 @@ +/* + * Sonics Silicon Backplane + * SDIO-Hostbus related functions + * + * Copyright 2009 Albert Herranz + * + * Based on drivers/ssb/pcmcia.c + * Copyright 2006 Johannes Berg + * Copyright 2007-2008 Michael Buesch + * + * Licensed under the GNU/GPL. See COPYING for details. + * + */ + +#include +#include +#include +#include +#include + +#include "ssb_private.h" + +/* Define the following to 1 to enable a printk on each coreswitch. */ +#define SSB_VERBOSE_SDIOCORESWITCH_DEBUG 0 + + +/* Hardware invariants CIS tuples */ +#define SSB_SDIO_CIS 0x80 +#define SSB_SDIO_CIS_SROMREV 0x00 +#define SSB_SDIO_CIS_ID 0x01 +#define SSB_SDIO_CIS_BOARDREV 0x02 +#define SSB_SDIO_CIS_PA 0x03 +#define SSB_SDIO_CIS_PA_PA0B0_LO 0 +#define SSB_SDIO_CIS_PA_PA0B0_HI 1 +#define SSB_SDIO_CIS_PA_PA0B1_LO 2 +#define SSB_SDIO_CIS_PA_PA0B1_HI 3 +#define SSB_SDIO_CIS_PA_PA0B2_LO 4 +#define SSB_SDIO_CIS_PA_PA0B2_HI 5 +#define SSB_SDIO_CIS_PA_ITSSI 6 +#define SSB_SDIO_CIS_PA_MAXPOW 7 +#define SSB_SDIO_CIS_OEMNAME 0x04 +#define SSB_SDIO_CIS_CCODE 0x05 +#define SSB_SDIO_CIS_ANTENNA 0x06 +#define SSB_SDIO_CIS_ANTGAIN 0x07 +#define SSB_SDIO_CIS_BFLAGS 0x08 +#define SSB_SDIO_CIS_LEDS 0x09 + +#define CISTPL_FUNCE_LAN_NODE_ID 0x04 /* same as in PCMCIA */ + + +/* + * Function 1 miscellaneous registers. + * + * Definitions match src/include/sbsdio.h from the + * Android Open Source Project + * http://android.git.kernel.org/?p=platform/system/wlan/broadcom.git + * + */ +#define SBSDIO_FUNC1_SBADDRLOW 0x1000a /* SB Address window Low (b15) */ +#define SBSDIO_FUNC1_SBADDRMID 0x1000b /* SB Address window Mid (b23-b16) */ +#define SBSDIO_FUNC1_SBADDRHIGH 0x1000c /* SB Address window High (b24-b31) */ + +/* valid bits in SBSDIO_FUNC1_SBADDRxxx regs */ +#define SBSDIO_SBADDRLOW_MASK 0x80 /* Valid address bits in SBADDRLOW */ +#define SBSDIO_SBADDRMID_MASK 0xff /* Valid address bits in SBADDRMID */ +#define SBSDIO_SBADDRHIGH_MASK 0xff /* Valid address bits in SBADDRHIGH */ + +#define SBSDIO_SB_OFT_ADDR_MASK 0x7FFF /* sb offset addr is <= 15 bits, 32k */ + +/* REVISIT: this flag doesn't seem to matter */ +#define SBSDIO_SB_ACCESS_2_4B_FLAG 0x8000 /* forces 32-bit SB access */ + + +/* + * Address map within the SDIO function address space (128K). + * + * Start End Description + * ------- ------- ------------------------------------------ + * 0x00000 0x0ffff selected backplane address window (64K) + * 0x10000 0x1ffff backplane control registers (max 64K) + * + * The current address window is configured by writing to registers + * SBADDRLOW, SBADDRMID and SBADDRHIGH. + * + * In order to access the contents of a 32-bit Silicon Backplane address + * the backplane address window must be first loaded with the highest + * 16 bits of the target address. Then, an access must be done to the + * SDIO function address space using the lower 15 bits of the address. + * Bit 15 of the address must be set when doing 32 bit accesses. + * + * 10987654321098765432109876543210 + * WWWWWWWWWWWWWWWWW SB Address Window + * OOOOOOOOOOOOOOOO Offset within SB Address Window + * a 32-bit access flag + */ + + +/* + * SSB I/O via SDIO. + * + * NOTE: SDIO address @addr is 17 bits long (SDIO address space is 128K). + */ + +static inline struct device *ssb_sdio_dev(struct ssb_bus *bus) +{ + return &bus->host_sdio->dev; +} + +/* host claimed */ +static int ssb_sdio_writeb(struct ssb_bus *bus, unsigned int addr, u8 val) +{ + int error = 0; + + sdio_writeb(bus->host_sdio, val, addr, &error); + if (unlikely(error)) { + dev_dbg(ssb_sdio_dev(bus), "%08X <- %02x, error %d\n", + addr, val, error); + } + + return error; +} + +#if 0 +static u8 ssb_sdio_readb(struct ssb_bus *bus, unsigned int addr) +{ + u8 val; + int error = 0; + + val = sdio_readb(bus->host_sdio, addr, &error); + if (unlikely(error)) { + dev_dbg(ssb_sdio_dev(bus), "%08X -> %02x, error %d\n", + addr, val, error); + } + + return val; +} +#endif + +/* host claimed */ +static int ssb_sdio_set_sbaddr_window(struct ssb_bus *bus, u32 address) +{ + int error; + + error = ssb_sdio_writeb(bus, SBSDIO_FUNC1_SBADDRLOW, + (address >> 8) & SBSDIO_SBADDRLOW_MASK); + if (error) + goto out; + error = ssb_sdio_writeb(bus, SBSDIO_FUNC1_SBADDRMID, + (address >> 16) & SBSDIO_SBADDRMID_MASK); + if (error) + goto out; + error = ssb_sdio_writeb(bus, SBSDIO_FUNC1_SBADDRHIGH, + (address >> 24) & SBSDIO_SBADDRHIGH_MASK); + if (error) + goto out; + bus->sdio_sbaddr = address; +out: + if (error) { + dev_dbg(ssb_sdio_dev(bus), "failed to set address window" + " to 0x%08x, error %d\n", address, error); + } + + return error; +} + +/* for enumeration use only */ +u32 ssb_sdio_scan_read32(struct ssb_bus *bus, u16 offset) +{ + u32 val; + int error; + + sdio_claim_host(bus->host_sdio); + val = sdio_readl(bus->host_sdio, offset, &error); + sdio_release_host(bus->host_sdio); + if (unlikely(error)) { + dev_dbg(ssb_sdio_dev(bus), "%04X:%04X > %08x, error %d\n", + bus->sdio_sbaddr >> 16, offset, val, error); + } + + return val; +} + +/* for enumeration use only */ +int ssb_sdio_scan_switch_coreidx(struct ssb_bus *bus, u8 coreidx) +{ + u32 sbaddr; + int error; + + sbaddr = (coreidx * SSB_CORE_SIZE) + SSB_ENUM_BASE; + sdio_claim_host(bus->host_sdio); + error = ssb_sdio_set_sbaddr_window(bus, sbaddr); + sdio_release_host(bus->host_sdio); + if (error) { + dev_err(ssb_sdio_dev(bus), "failed to switch to core %u," + " error %d\n", coreidx, error); + goto out; + } +out: + return error; +} + +/* host must be already claimed */ +int ssb_sdio_switch_core(struct ssb_bus *bus, struct ssb_device *dev) +{ + u8 coreidx = dev->core_index; + u32 sbaddr; + int error = 0; + + sbaddr = (coreidx * SSB_CORE_SIZE) + SSB_ENUM_BASE; + if (unlikely(bus->sdio_sbaddr != sbaddr)) { +#if SSB_VERBOSE_SDIOCORESWITCH_DEBUG + dev_info(ssb_sdio_dev(bus), + "switching to %s core, index %d\n", + ssb_core_name(dev->id.coreid), coreidx); +#endif + error = ssb_sdio_set_sbaddr_window(bus, sbaddr); + if (error) { + dev_dbg(ssb_sdio_dev(bus), "failed to switch to" + " core %u, error %d\n", coreidx, error); + goto out; + } + bus->mapped_device = dev; + } + +out: + return error; +} + +static u8 ssb_sdio_read8(struct ssb_device *dev, u16 offset) +{ + struct ssb_bus *bus = dev->bus; + u8 val = 0xff; + int error = 0; + + sdio_claim_host(bus->host_sdio); + if (unlikely(ssb_sdio_switch_core(bus, dev))) + goto out; + offset |= bus->sdio_sbaddr & 0xffff; + offset &= SBSDIO_SB_OFT_ADDR_MASK; + val = sdio_readb(bus->host_sdio, offset, &error); + if (error) { + dev_dbg(ssb_sdio_dev(bus), "%04X:%04X > %02x, error %d\n", + bus->sdio_sbaddr >> 16, offset, val, error); + } +out: + sdio_release_host(bus->host_sdio); + + return val; +} + +static u16 ssb_sdio_read16(struct ssb_device *dev, u16 offset) +{ + struct ssb_bus *bus = dev->bus; + u16 val = 0xffff; + int error = 0; + + sdio_claim_host(bus->host_sdio); + if (unlikely(ssb_sdio_switch_core(bus, dev))) + goto out; + offset |= bus->sdio_sbaddr & 0xffff; + offset &= SBSDIO_SB_OFT_ADDR_MASK; + val = sdio_readw(bus->host_sdio, offset, &error); + if (error) { + dev_dbg(ssb_sdio_dev(bus), "%04X:%04X > %04x, error %d\n", + bus->sdio_sbaddr >> 16, offset, val, error); + } +out: + sdio_release_host(bus->host_sdio); + + return val; +} + +static u32 ssb_sdio_read32(struct ssb_device *dev, u16 offset) +{ + struct ssb_bus *bus = dev->bus; + u32 val = 0xffffffff; + int error = 0; + + sdio_claim_host(bus->host_sdio); + if (unlikely(ssb_sdio_switch_core(bus, dev))) + goto out; + offset |= bus->sdio_sbaddr & 0xffff; + offset &= SBSDIO_SB_OFT_ADDR_MASK; + offset |= SBSDIO_SB_ACCESS_2_4B_FLAG; /* 32 bit data access */ + val = sdio_readl(bus->host_sdio, offset, &error); + if (error) { + dev_dbg(ssb_sdio_dev(bus), "%04X:%04X > %08x, error %d\n", + bus->sdio_sbaddr >> 16, offset, val, error); + } +out: + sdio_release_host(bus->host_sdio); + + return val; +} + +#ifdef CONFIG_SSB_BLOCKIO +static void ssb_sdio_block_read(struct ssb_device *dev, void *buffer, + size_t count, u16 offset, u8 reg_width) +{ + size_t saved_count = count; + struct ssb_bus *bus = dev->bus; + int error = 0; + + sdio_claim_host(bus->host_sdio); + if (unlikely(ssb_sdio_switch_core(bus, dev))) { + error = -EIO; + memset(buffer, 0xff, count); + goto err_out; + } + offset |= bus->sdio_sbaddr & 0xffff; + offset &= SBSDIO_SB_OFT_ADDR_MASK; + + switch (reg_width) { + case sizeof(u8): { + error = sdio_readsb(bus->host_sdio, buffer, offset, count); + break; + } + case sizeof(u16): { + SSB_WARN_ON(count & 1); + error = sdio_readsb(bus->host_sdio, buffer, offset, count); + break; + } + case sizeof(u32): { + SSB_WARN_ON(count & 3); + offset |= SBSDIO_SB_ACCESS_2_4B_FLAG; /* 32 bit data access */ + error = sdio_readsb(bus->host_sdio, buffer, offset, count); + break; + } + default: + SSB_WARN_ON(1); + } + if (!error) + goto out; + +err_out: + dev_dbg(ssb_sdio_dev(bus), "%04X:%04X (width=%u, len=%zu), error %d\n", + bus->sdio_sbaddr >> 16, offset, reg_width, saved_count, error); +out: + sdio_release_host(bus->host_sdio); +} +#endif /* CONFIG_SSB_BLOCKIO */ + +static void ssb_sdio_write8(struct ssb_device *dev, u16 offset, u8 val) +{ + struct ssb_bus *bus = dev->bus; + int error = 0; + + sdio_claim_host(bus->host_sdio); + if (unlikely(ssb_sdio_switch_core(bus, dev))) + goto out; + offset |= bus->sdio_sbaddr & 0xffff; + offset &= SBSDIO_SB_OFT_ADDR_MASK; + sdio_writeb(bus->host_sdio, val, offset, &error); + if (error) { + dev_dbg(ssb_sdio_dev(bus), "%04X:%04X < %02x, error %d\n", + bus->sdio_sbaddr >> 16, offset, val, error); + } +out: + sdio_release_host(bus->host_sdio); +} + +static void ssb_sdio_write16(struct ssb_device *dev, u16 offset, u16 val) +{ + struct ssb_bus *bus = dev->bus; + int error = 0; + + sdio_claim_host(bus->host_sdio); + if (unlikely(ssb_sdio_switch_core(bus, dev))) + goto out; + offset |= bus->sdio_sbaddr & 0xffff; + offset &= SBSDIO_SB_OFT_ADDR_MASK; + sdio_writew(bus->host_sdio, val, offset, &error); + if (error) { + dev_dbg(ssb_sdio_dev(bus), "%04X:%04X < %04x, error %d\n", + bus->sdio_sbaddr >> 16, offset, val, error); + } +out: + sdio_release_host(bus->host_sdio); +} + +static void ssb_sdio_write32(struct ssb_device *dev, u16 offset, u32 val) +{ + struct ssb_bus *bus = dev->bus; + int error = 0; + + sdio_claim_host(bus->host_sdio); + if (unlikely(ssb_sdio_switch_core(bus, dev))) + goto out; + offset |= bus->sdio_sbaddr & 0xffff; + offset &= SBSDIO_SB_OFT_ADDR_MASK; + offset |= SBSDIO_SB_ACCESS_2_4B_FLAG; /* 32 bit data access */ + sdio_writel(bus->host_sdio, val, offset, &error); + if (error) { + dev_dbg(ssb_sdio_dev(bus), "%04X:%04X < %08x, error %d\n", + bus->sdio_sbaddr >> 16, offset, val, error); + } + if (bus->quirks & SSB_QUIRK_SDIO_READ_AFTER_WRITE32) + sdio_readl(bus->host_sdio, 0, &error); +out: + sdio_release_host(bus->host_sdio); +} + +#ifdef CONFIG_SSB_BLOCKIO +static void ssb_sdio_block_write(struct ssb_device *dev, const void *buffer, + size_t count, u16 offset, u8 reg_width) +{ + size_t saved_count = count; + struct ssb_bus *bus = dev->bus; + int error = 0; + + sdio_claim_host(bus->host_sdio); + if (unlikely(ssb_sdio_switch_core(bus, dev))) { + error = -EIO; + memset((void *)buffer, 0xff, count); + goto err_out; + } + offset |= bus->sdio_sbaddr & 0xffff; + offset &= SBSDIO_SB_OFT_ADDR_MASK; + + switch (reg_width) { + case sizeof(u8): + error = sdio_writesb(bus->host_sdio, offset, + (void *)buffer, count); + break; + case sizeof(u16): + SSB_WARN_ON(count & 1); + error = sdio_writesb(bus->host_sdio, offset, + (void *)buffer, count); + break; + case sizeof(u32): + SSB_WARN_ON(count & 3); + offset |= SBSDIO_SB_ACCESS_2_4B_FLAG; /* 32 bit data access */ + error = sdio_writesb(bus->host_sdio, offset, + (void *)buffer, count); + break; + default: + SSB_WARN_ON(1); + } + if (!error) + goto out; + +err_out: + dev_dbg(ssb_sdio_dev(bus), "%04X:%04X (width=%u, len=%zu), error %d\n", + bus->sdio_sbaddr >> 16, offset, reg_width, saved_count, error); +out: + sdio_release_host(bus->host_sdio); +} + +#endif /* CONFIG_SSB_BLOCKIO */ + +/* Not "static", as it's used in main.c */ +const struct ssb_bus_ops ssb_sdio_ops = { + .read8 = ssb_sdio_read8, + .read16 = ssb_sdio_read16, + .read32 = ssb_sdio_read32, + .write8 = ssb_sdio_write8, + .write16 = ssb_sdio_write16, + .write32 = ssb_sdio_write32, +#ifdef CONFIG_SSB_BLOCKIO + .block_read = ssb_sdio_block_read, + .block_write = ssb_sdio_block_write, +#endif +}; + +#define GOTO_ERROR_ON(condition, description) do { \ + if (unlikely(condition)) { \ + error_description = description; \ + goto error; \ + } \ + } while (0) + +int ssb_sdio_get_invariants(struct ssb_bus *bus, + struct ssb_init_invariants *iv) +{ + struct ssb_sprom *sprom = &iv->sprom; + struct ssb_boardinfo *bi = &iv->boardinfo; + const char *error_description = "none"; + struct sdio_func_tuple *tuple; + void *mac; + + memset(sprom, 0xFF, sizeof(*sprom)); + sprom->boardflags_lo = 0; + sprom->boardflags_hi = 0; + + tuple = bus->host_sdio->tuples; + while (tuple) { + switch (tuple->code) { + case 0x22: /* extended function */ + switch (tuple->data[0]) { + case CISTPL_FUNCE_LAN_NODE_ID: + GOTO_ERROR_ON((tuple->size != 7) && + (tuple->data[1] != 6), + "mac tpl size"); + /* fetch the MAC address. */ + mac = tuple->data + 2; + memcpy(sprom->il0mac, mac, ETH_ALEN); + memcpy(sprom->et1mac, mac, ETH_ALEN); + break; + default: + break; + } + break; + case 0x80: /* vendor specific tuple */ + switch (tuple->data[0]) { + case SSB_SDIO_CIS_SROMREV: + GOTO_ERROR_ON(tuple->size != 2, + "sromrev tpl size"); + sprom->revision = tuple->data[1]; + break; + case SSB_SDIO_CIS_ID: + GOTO_ERROR_ON((tuple->size != 5) && + (tuple->size != 7), + "id tpl size"); + bi->vendor = tuple->data[1] | + (tuple->data[2]<<8); + break; + case SSB_SDIO_CIS_BOARDREV: + GOTO_ERROR_ON(tuple->size != 2, + "boardrev tpl size"); + sprom->board_rev = tuple->data[1]; + break; + case SSB_SDIO_CIS_PA: + GOTO_ERROR_ON((tuple->size != 9) && + (tuple->size != 10), + "pa tpl size"); + sprom->pa0b0 = tuple->data[1] | + ((u16)tuple->data[2] << 8); + sprom->pa0b1 = tuple->data[3] | + ((u16)tuple->data[4] << 8); + sprom->pa0b2 = tuple->data[5] | + ((u16)tuple->data[6] << 8); + sprom->itssi_a = tuple->data[7]; + sprom->itssi_bg = tuple->data[7]; + sprom->maxpwr_a = tuple->data[8]; + sprom->maxpwr_bg = tuple->data[8]; + break; + case SSB_SDIO_CIS_OEMNAME: + /* Not present */ + break; + case SSB_SDIO_CIS_CCODE: + GOTO_ERROR_ON(tuple->size != 2, + "ccode tpl size"); + sprom->country_code = tuple->data[1]; + break; + case SSB_SDIO_CIS_ANTENNA: + GOTO_ERROR_ON(tuple->size != 2, + "ant tpl size"); + sprom->ant_available_a = tuple->data[1]; + sprom->ant_available_bg = tuple->data[1]; + break; + case SSB_SDIO_CIS_ANTGAIN: + GOTO_ERROR_ON(tuple->size != 2, + "antg tpl size"); + sprom->antenna_gain.ghz24.a0 = tuple->data[1]; + sprom->antenna_gain.ghz24.a1 = tuple->data[1]; + sprom->antenna_gain.ghz24.a2 = tuple->data[1]; + sprom->antenna_gain.ghz24.a3 = tuple->data[1]; + sprom->antenna_gain.ghz5.a0 = tuple->data[1]; + sprom->antenna_gain.ghz5.a1 = tuple->data[1]; + sprom->antenna_gain.ghz5.a2 = tuple->data[1]; + sprom->antenna_gain.ghz5.a3 = tuple->data[1]; + break; + case SSB_SDIO_CIS_BFLAGS: + GOTO_ERROR_ON((tuple->size != 3) && + (tuple->size != 5), + "bfl tpl size"); + sprom->boardflags_lo = tuple->data[1] | + ((u16)tuple->data[2] << 8); + break; + case SSB_SDIO_CIS_LEDS: + GOTO_ERROR_ON(tuple->size != 5, + "leds tpl size"); + sprom->gpio0 = tuple->data[1]; + sprom->gpio1 = tuple->data[2]; + sprom->gpio2 = tuple->data[3]; + sprom->gpio3 = tuple->data[4]; + break; + default: + break; + } + break; + default: + break; + } + tuple = tuple->next; + } + + return 0; +error: + dev_err(ssb_sdio_dev(bus), "failed to fetch device invariants: %s\n", + error_description); + return -ENODEV; +} + +void ssb_sdio_exit(struct ssb_bus *bus) +{ + if (bus->bustype != SSB_BUSTYPE_SDIO) + return; + /* Nothing to do here. */ +} + +int ssb_sdio_init(struct ssb_bus *bus) +{ + if (bus->bustype != SSB_BUSTYPE_SDIO) + return 0; + + bus->sdio_sbaddr = ~0; + + return 0; +} diff --git a/drivers/ssb/sprom.c b/drivers/ssb/sprom.c new file mode 100644 index 0000000..d0e6762 --- /dev/null +++ b/drivers/ssb/sprom.c @@ -0,0 +1,177 @@ +/* + * Sonics Silicon Backplane + * Common SPROM support routines + * + * Copyright (C) 2005-2008 Michael Buesch + * Copyright (C) 2005 Martin Langer + * Copyright (C) 2005 Stefano Brivio + * Copyright (C) 2005 Danny van Dyk + * Copyright (C) 2005 Andreas Jaggi + * + * Licensed under the GNU/GPL. See COPYING for details. + */ + +#include "ssb_private.h" + +#include + + +static const struct ssb_sprom *fallback_sprom; + + +static int sprom2hex(const u16 *sprom, char *buf, size_t buf_len, + size_t sprom_size_words) +{ + int i, pos = 0; + + for (i = 0; i < sprom_size_words; i++) + pos += snprintf(buf + pos, buf_len - pos - 1, + "%04X", swab16(sprom[i]) & 0xFFFF); + pos += snprintf(buf + pos, buf_len - pos - 1, "\n"); + + return pos + 1; +} + +static int hex2sprom(u16 *sprom, const char *dump, size_t len, + size_t sprom_size_words) +{ + char c, tmp[5] = { 0 }; + int err, cnt = 0; + unsigned long parsed; + + /* Strip whitespace at the end. */ + while (len) { + c = dump[len - 1]; + if (!isspace(c) && c != '\0') + break; + len--; + } + /* Length must match exactly. */ + if (len != sprom_size_words * 4) + return -EINVAL; + + while (cnt < sprom_size_words) { + memcpy(tmp, dump, 4); + dump += 4; + err = strict_strtoul(tmp, 16, &parsed); + if (err) + return err; + sprom[cnt++] = swab16((u16)parsed); + } + + return 0; +} + +/* Common sprom device-attribute show-handler */ +ssize_t ssb_attr_sprom_show(struct ssb_bus *bus, char *buf, + int (*sprom_read)(struct ssb_bus *bus, u16 *sprom)) +{ + u16 *sprom; + int err = -ENOMEM; + ssize_t count = 0; + size_t sprom_size_words = bus->sprom_size; + + sprom = kcalloc(sprom_size_words, sizeof(u16), GFP_KERNEL); + if (!sprom) + goto out; + + /* Use interruptible locking, as the SPROM write might + * be holding the lock for several seconds. So allow userspace + * to cancel operation. */ + err = -ERESTARTSYS; + if (mutex_lock_interruptible(&bus->sprom_mutex)) + goto out_kfree; + err = sprom_read(bus, sprom); + mutex_unlock(&bus->sprom_mutex); + + if (!err) + count = sprom2hex(sprom, buf, PAGE_SIZE, sprom_size_words); + +out_kfree: + kfree(sprom); +out: + return err ? err : count; +} + +/* Common sprom device-attribute store-handler */ +ssize_t ssb_attr_sprom_store(struct ssb_bus *bus, + const char *buf, size_t count, + int (*sprom_check_crc)(const u16 *sprom, size_t size), + int (*sprom_write)(struct ssb_bus *bus, const u16 *sprom)) +{ + u16 *sprom; + int res = 0, err = -ENOMEM; + size_t sprom_size_words = bus->sprom_size; + struct ssb_freeze_context freeze; + + sprom = kcalloc(bus->sprom_size, sizeof(u16), GFP_KERNEL); + if (!sprom) + goto out; + err = hex2sprom(sprom, buf, count, sprom_size_words); + if (err) { + err = -EINVAL; + goto out_kfree; + } + err = sprom_check_crc(sprom, sprom_size_words); + if (err) { + err = -EINVAL; + goto out_kfree; + } + + /* Use interruptible locking, as the SPROM write might + * be holding the lock for several seconds. So allow userspace + * to cancel operation. */ + err = -ERESTARTSYS; + if (mutex_lock_interruptible(&bus->sprom_mutex)) + goto out_kfree; + err = ssb_devices_freeze(bus, &freeze); + if (err) { + ssb_printk(KERN_ERR PFX "SPROM write: Could not freeze all devices\n"); + goto out_unlock; + } + res = sprom_write(bus, sprom); + err = ssb_devices_thaw(&freeze); + if (err) + ssb_printk(KERN_ERR PFX "SPROM write: Could not thaw all devices\n"); +out_unlock: + mutex_unlock(&bus->sprom_mutex); +out_kfree: + kfree(sprom); +out: + if (res) + return res; + return err ? err : count; +} + +/** + * ssb_arch_set_fallback_sprom - Set a fallback SPROM for use if no SPROM is found. + * + * @sprom: The SPROM data structure to register. + * + * With this function the architecture implementation may register a fallback + * SPROM data structure. The fallback is only used for PCI based SSB devices, + * where no valid SPROM can be found in the shadow registers. + * + * This function is useful for weird architectures that have a half-assed SSB device + * hardwired to their PCI bus. + * + * Note that it does only work with PCI attached SSB devices. PCMCIA devices currently + * don't use this fallback. + * Architectures must provide the SPROM for native SSB devices anyway, + * so the fallback also isn't used for native devices. + * + * This function is available for architecture code, only. So it is not exported. + */ +int ssb_arch_set_fallback_sprom(const struct ssb_sprom *sprom) +{ + if (fallback_sprom) + return -EEXIST; + fallback_sprom = sprom; + + return 0; +} + +const struct ssb_sprom *ssb_get_fallback_sprom(void) +{ + return fallback_sprom; +} diff --git a/drivers/ssb/ssb_private.h b/drivers/ssb/ssb_private.h new file mode 100644 index 0000000..0331139 --- /dev/null +++ b/drivers/ssb/ssb_private.h @@ -0,0 +1,209 @@ +#ifndef LINUX_SSB_PRIVATE_H_ +#define LINUX_SSB_PRIVATE_H_ + +#include +#include + + +#define PFX "ssb: " + +#ifdef CONFIG_SSB_SILENT +# define ssb_printk(fmt, x...) do { /* nothing */ } while (0) +#else +# define ssb_printk printk +#endif /* CONFIG_SSB_SILENT */ + +/* dprintk: Debugging printk; vanishes for non-debug compilation */ +#ifdef CONFIG_SSB_DEBUG +# define ssb_dprintk(fmt, x...) ssb_printk(fmt , ##x) +#else +# define ssb_dprintk(fmt, x...) do { /* nothing */ } while (0) +#endif + +#ifdef CONFIG_SSB_DEBUG +# define SSB_WARN_ON(x) WARN_ON(x) +# define SSB_BUG_ON(x) BUG_ON(x) +#else +static inline int __ssb_do_nothing(int x) { return x; } +# define SSB_WARN_ON(x) __ssb_do_nothing(unlikely(!!(x))) +# define SSB_BUG_ON(x) __ssb_do_nothing(unlikely(!!(x))) +#endif + + +/* pci.c */ +#ifdef CONFIG_SSB_PCIHOST +extern int ssb_pci_switch_core(struct ssb_bus *bus, + struct ssb_device *dev); +extern int ssb_pci_switch_coreidx(struct ssb_bus *bus, + u8 coreidx); +extern int ssb_pci_xtal(struct ssb_bus *bus, u32 what, + int turn_on); +extern int ssb_pci_get_invariants(struct ssb_bus *bus, + struct ssb_init_invariants *iv); +extern void ssb_pci_exit(struct ssb_bus *bus); +extern int ssb_pci_init(struct ssb_bus *bus); +extern const struct ssb_bus_ops ssb_pci_ops; + +#else /* CONFIG_SSB_PCIHOST */ + +static inline int ssb_pci_switch_core(struct ssb_bus *bus, + struct ssb_device *dev) +{ + return 0; +} +static inline int ssb_pci_switch_coreidx(struct ssb_bus *bus, + u8 coreidx) +{ + return 0; +} +static inline int ssb_pci_xtal(struct ssb_bus *bus, u32 what, + int turn_on) +{ + return 0; +} +static inline void ssb_pci_exit(struct ssb_bus *bus) +{ +} +static inline int ssb_pci_init(struct ssb_bus *bus) +{ + return 0; +} +#endif /* CONFIG_SSB_PCIHOST */ + + +/* pcmcia.c */ +#ifdef CONFIG_SSB_PCMCIAHOST +extern int ssb_pcmcia_switch_core(struct ssb_bus *bus, + struct ssb_device *dev); +extern int ssb_pcmcia_switch_coreidx(struct ssb_bus *bus, + u8 coreidx); +extern int ssb_pcmcia_switch_segment(struct ssb_bus *bus, + u8 seg); +extern int ssb_pcmcia_get_invariants(struct ssb_bus *bus, + struct ssb_init_invariants *iv); +extern int ssb_pcmcia_hardware_setup(struct ssb_bus *bus); +extern void ssb_pcmcia_exit(struct ssb_bus *bus); +extern int ssb_pcmcia_init(struct ssb_bus *bus); +extern const struct ssb_bus_ops ssb_pcmcia_ops; +#else /* CONFIG_SSB_PCMCIAHOST */ +static inline int ssb_pcmcia_switch_core(struct ssb_bus *bus, + struct ssb_device *dev) +{ + return 0; +} +static inline int ssb_pcmcia_switch_coreidx(struct ssb_bus *bus, + u8 coreidx) +{ + return 0; +} +static inline int ssb_pcmcia_switch_segment(struct ssb_bus *bus, + u8 seg) +{ + return 0; +} +static inline int ssb_pcmcia_hardware_setup(struct ssb_bus *bus) +{ + return 0; +} +static inline void ssb_pcmcia_exit(struct ssb_bus *bus) +{ +} +static inline int ssb_pcmcia_init(struct ssb_bus *bus) +{ + return 0; +} +#endif /* CONFIG_SSB_PCMCIAHOST */ + +/* sdio.c */ +#ifdef CONFIG_SSB_SDIOHOST +extern int ssb_sdio_get_invariants(struct ssb_bus *bus, + struct ssb_init_invariants *iv); + +extern u32 ssb_sdio_scan_read32(struct ssb_bus *bus, u16 offset); +extern int ssb_sdio_switch_core(struct ssb_bus *bus, struct ssb_device *dev); +extern int ssb_sdio_scan_switch_coreidx(struct ssb_bus *bus, u8 coreidx); +extern int ssb_sdio_hardware_setup(struct ssb_bus *bus); +extern void ssb_sdio_exit(struct ssb_bus *bus); +extern int ssb_sdio_init(struct ssb_bus *bus); + +extern const struct ssb_bus_ops ssb_sdio_ops; +#else /* CONFIG_SSB_SDIOHOST */ +static inline u32 ssb_sdio_scan_read32(struct ssb_bus *bus, u16 offset) +{ + return 0; +} +static inline int ssb_sdio_switch_core(struct ssb_bus *bus, + struct ssb_device *dev) +{ + return 0; +} +static inline int ssb_sdio_scan_switch_coreidx(struct ssb_bus *bus, u8 coreidx) +{ + return 0; +} +static inline int ssb_sdio_hardware_setup(struct ssb_bus *bus) +{ + return 0; +} +static inline void ssb_sdio_exit(struct ssb_bus *bus) +{ +} +static inline int ssb_sdio_init(struct ssb_bus *bus) +{ + return 0; +} +#endif /* CONFIG_SSB_SDIOHOST */ + + +/* scan.c */ +extern const char *ssb_core_name(u16 coreid); +extern int ssb_bus_scan(struct ssb_bus *bus, + unsigned long baseaddr); +extern void ssb_iounmap(struct ssb_bus *ssb); + + +/* sprom.c */ +extern +ssize_t ssb_attr_sprom_show(struct ssb_bus *bus, char *buf, + int (*sprom_read)(struct ssb_bus *bus, u16 *sprom)); +extern +ssize_t ssb_attr_sprom_store(struct ssb_bus *bus, + const char *buf, size_t count, + int (*sprom_check_crc)(const u16 *sprom, size_t size), + int (*sprom_write)(struct ssb_bus *bus, const u16 *sprom)); +extern const struct ssb_sprom *ssb_get_fallback_sprom(void); + + +/* core.c */ +extern u32 ssb_calc_clock_rate(u32 plltype, u32 n, u32 m); +extern struct ssb_bus *ssb_pci_dev_to_bus(struct pci_dev *pdev); +int ssb_for_each_bus_call(unsigned long data, + int (*func)(struct ssb_bus *bus, unsigned long data)); +extern struct ssb_bus *ssb_pcmcia_dev_to_bus(struct pcmcia_device *pdev); + +struct ssb_freeze_context { + /* Pointer to the bus */ + struct ssb_bus *bus; + /* Boolean list to indicate whether a device is frozen on this bus. */ + bool device_frozen[SSB_MAX_NR_CORES]; +}; +extern int ssb_devices_freeze(struct ssb_bus *bus, struct ssb_freeze_context *ctx); +extern int ssb_devices_thaw(struct ssb_freeze_context *ctx); + + + +/* b43_pci_bridge.c */ +#ifdef CONFIG_SSB_B43_PCI_BRIDGE +extern int __init b43_pci_ssb_bridge_init(void); +extern void __exit b43_pci_ssb_bridge_exit(void); +#else /* CONFIG_SSB_B43_PCI_BRIDGE */ +static inline int b43_pci_ssb_bridge_init(void) +{ + return 0; +} +static inline void b43_pci_ssb_bridge_exit(void) +{ +} +#endif /* CONFIG_SSB_B43_PCI_BRIDGE */ + +#endif /* LINUX_SSB_PRIVATE_H_ */ diff --git a/enable-older-kernels/README b/enable-older-kernels/README new file mode 100644 index 0000000..2069ce4 --- /dev/null +++ b/enable-older-kernels/README @@ -0,0 +1,12 @@ +compat-wireless as a whole aims to always be compiled and used with the +oldest stable kernel supported by kernel.org. This will be the oldest +stable kernel on the 2.6. series listed on the kernel.org front page. +Sometimes we'll go even beyond that. + +Backporting compat-wireless involves backporting some subsystems but +at times we may want to support compiling only certain drivers on older +kernels since its easier to backport some subsystems. Such is the case +with PCI drivers. This directly exists to allow developers enable +compilation of compat-wireless on older drivers using ./scripts/driver-select +Upon selection of a driver a patch from this directly will be applied to +allow further compilation of one driver onto even older kernels. diff --git a/enable-older-kernels/enable-2.6.21.patch b/enable-older-kernels/enable-2.6.21.patch new file mode 100644 index 0000000..bb6e33d --- /dev/null +++ b/enable-older-kernels/enable-2.6.21.patch @@ -0,0 +1,26 @@ +--- a/config.mk 2010-01-11 10:27:53.000000000 -0800 ++++ b/config.mk 2010-01-11 10:32:49.000000000 -0800 +@@ -26,8 +26,8 @@ + COMPAT_VERSIONS := $(shell I=$(COMPAT_LATEST_VERSION); while [ "$$I" -gt $(KERNEL_SUBLEVEL) ]; do echo $$I; I=$$(($$I - 1)); done) + $(foreach ver,$(COMPAT_VERSIONS),$(eval CONFIG_COMPAT_KERNEL_$(ver)=y)) + +-ifdef CONFIG_COMPAT_KERNEL_25 +-$(error "ERROR: compat-wireless by default supports kernels >= 2.6.25, try enabling only one driver though") ++ifdef CONFIG_COMPAT_WIRELESS_21 ++$(error "ERROR: compat-wireless for selected driver requires a kernel >= 2.6.21") + endif + + ifeq ($(CONFIG_CFG80211),y) +diff --git a/scripts/gen-compat-autoconf.sh b/scripts/gen-compat-autoconf.sh +index 6c7cae7..bb2ea6d 100755 +--- a/scripts/gen-compat-autoconf.sh ++++ b/scripts/gen-compat-autoconf.sh +@@ -11,7 +11,7 @@ + + # This indicates which is the oldest kernel we support + # Update this if you are adding support for older kernels. +-OLDEST_KERNEL_SUPPORTED="2.6.25" ++OLDEST_KERNEL_SUPPORTED="2.6.21" + COMPAT_RELEASE="compat-release" + KERNEL_RELEASE="git-describe" + MULT_DEP_FILE=".compat_pivot_dep" diff --git a/enable-older-kernels/enable-2.6.22.patch b/enable-older-kernels/enable-2.6.22.patch new file mode 100644 index 0000000..3acfcec --- /dev/null +++ b/enable-older-kernels/enable-2.6.22.patch @@ -0,0 +1,26 @@ +--- a/config.mk 2010-01-11 10:27:53.000000000 -0800 ++++ b/config.mk 2010-01-11 10:32:49.000000000 -0800 +@@ -26,8 +26,8 @@ + COMPAT_VERSIONS := $(shell I=$(COMPAT_LATEST_VERSION); while [ "$$I" -gt $(KERNEL_SUBLEVEL) ]; do echo $$I; I=$$(($$I - 1)); done) + $(foreach ver,$(COMPAT_VERSIONS),$(eval CONFIG_COMPAT_KERNEL_$(ver)=y)) + +-ifdef CONFIG_COMPAT_KERNEL_25 +-$(error "ERROR: compat-wireless by default supports kernels >= 2.6.25, try enabling only one driver though") ++ifdef CONFIG_COMPAT_WIRELESS_22 ++$(error "ERROR: compat-wireless for selected driver requires a kernel >= 2.6.22") + endif + + ifeq ($(CONFIG_CFG80211),y) +diff --git a/scripts/gen-compat-autoconf.sh b/scripts/gen-compat-autoconf.sh +index 6c7cae7..bb2ea6d 100755 +--- a/scripts/gen-compat-autoconf.sh ++++ b/scripts/gen-compat-autoconf.sh +@@ -11,7 +11,7 @@ + + # This indicates which is the oldest kernel we support + # Update this if you are adding support for older kernels. +-OLDEST_KERNEL_SUPPORTED="2.6.25" ++OLDEST_KERNEL_SUPPORTED="2.6.22" + COMPAT_RELEASE="compat-release" + KERNEL_RELEASE="git-describe" + MULT_DEP_FILE=".compat_pivot_dep" diff --git a/enable-older-kernels/enable-2.6.23.patch b/enable-older-kernels/enable-2.6.23.patch new file mode 100644 index 0000000..e415923 --- /dev/null +++ b/enable-older-kernels/enable-2.6.23.patch @@ -0,0 +1,26 @@ +--- a/config.mk 2010-01-11 10:27:53.000000000 -0800 ++++ b/config.mk 2010-01-11 10:32:49.000000000 -0800 +@@ -26,8 +26,8 @@ + COMPAT_VERSIONS := $(shell I=$(COMPAT_LATEST_VERSION); while [ "$$I" -gt $(KERNEL_SUBLEVEL) ]; do echo $$I; I=$$(($$I - 1)); done) + $(foreach ver,$(COMPAT_VERSIONS),$(eval CONFIG_COMPAT_KERNEL_$(ver)=y)) + +-ifdef CONFIG_COMPAT_KERNEL_25 +-$(error "ERROR: compat-wireless by default supports kernels >= 2.6.25, try enabling only one driver though") ++ifdef CONFIG_COMPAT_WIRELESS_23 ++$(error "ERROR: compat-wireless for selected driver requires a kernel >= 2.6.23") + endif + + ifeq ($(CONFIG_CFG80211),y) +diff --git a/scripts/gen-compat-autoconf.sh b/scripts/gen-compat-autoconf.sh +index 6c7cae7..bb2ea6d 100755 +--- a/scripts/gen-compat-autoconf.sh ++++ b/scripts/gen-compat-autoconf.sh +@@ -11,7 +11,7 @@ + + # This indicates which is the oldest kernel we support + # Update this if you are adding support for older kernels. +-OLDEST_KERNEL_SUPPORTED="2.6.25" ++OLDEST_KERNEL_SUPPORTED="2.6.23" + COMPAT_RELEASE="compat-release" + KERNEL_RELEASE="git-describe" + MULT_DEP_FILE=".compat_pivot_dep" diff --git a/enable-older-kernels/enable-2.6.24.patch b/enable-older-kernels/enable-2.6.24.patch new file mode 100644 index 0000000..f27af86 --- /dev/null +++ b/enable-older-kernels/enable-2.6.24.patch @@ -0,0 +1,26 @@ +--- a/config.mk 2010-01-11 10:27:53.000000000 -0800 ++++ b/config.mk 2010-01-11 10:32:49.000000000 -0800 +@@ -26,8 +26,8 @@ + COMPAT_VERSIONS := $(shell I=$(COMPAT_LATEST_VERSION); while [ "$$I" -gt $(KERNEL_SUBLEVEL) ]; do echo $$I; I=$$(($$I - 1)); done) + $(foreach ver,$(COMPAT_VERSIONS),$(eval CONFIG_COMPAT_KERNEL_$(ver)=y)) + +-ifdef CONFIG_COMPAT_KERNEL_25 +-$(error "ERROR: compat-wireless by default supports kernels >= 2.6.25, try enabling only one driver though") ++ifdef CONFIG_COMPAT_WIRELESS_24 ++$(error "ERROR: compat-wireless for selected driver requires a kernel >= 2.6.24") + endif + + ifeq ($(CONFIG_CFG80211),y) +diff --git a/scripts/gen-compat-autoconf.sh b/scripts/gen-compat-autoconf.sh +index 6c7cae7..bb2ea6d 100755 +--- a/scripts/gen-compat-autoconf.sh ++++ b/scripts/gen-compat-autoconf.sh +@@ -11,7 +11,7 @@ + + # This indicates which is the oldest kernel we support + # Update this if you are adding support for older kernels. +-OLDEST_KERNEL_SUPPORTED="2.6.25" ++OLDEST_KERNEL_SUPPORTED="2.6.24" + COMPAT_RELEASE="compat-release" + KERNEL_RELEASE="git-describe" + MULT_DEP_FILE=".compat_pivot_dep" diff --git a/git-describe b/git-describe new file mode 100644 index 0000000..6e1ab32 --- /dev/null +++ b/git-describe @@ -0,0 +1,2 @@ +linux-next.git +next-20100310 diff --git a/include/linux/ath9k_platform.h b/include/linux/ath9k_platform.h new file mode 100644 index 0000000..b847fc7 --- /dev/null +++ b/include/linux/ath9k_platform.h @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2008 Atheros Communications Inc. + * Copyright (c) 2009 Gabor Juhos + * Copyright (c) 2009 Imre Kaloz + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _LINUX_ATH9K_PLATFORM_H +#define _LINUX_ATH9K_PLATFORM_H + +#define ATH9K_PLAT_EEP_MAX_WORDS 2048 + +struct ath9k_platform_data { + u16 eeprom_data[ATH9K_PLAT_EEP_MAX_WORDS]; +}; + +#endif /* _LINUX_ATH9K_PLATFORM_H */ diff --git a/include/linux/bitops.h b/include/linux/bitops.h new file mode 100644 index 0000000..8469d35 --- /dev/null +++ b/include/linux/bitops.h @@ -0,0 +1,196 @@ +#ifndef _LINUX_BITOPS_H +#define _LINUX_BITOPS_H +#include + +#ifdef __KERNEL__ +#define BIT(nr) (1UL << (nr)) +#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) +#define BIT_WORD(nr) ((nr) / BITS_PER_LONG) +#define BITS_PER_BYTE 8 +#ifndef BITS_TO_LONGS /* Older kernels define this already */ +#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) +#endif +#endif + +/* + * Include this here because some architectures need generic_ffs/fls in + * scope + */ +#include + +#define for_each_bit(bit, addr, size) \ + for ((bit) = find_first_bit((addr), (size)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) + + +static __inline__ int get_bitmask_order(unsigned int count) +{ + int order; + + order = fls(count); + return order; /* We could be slightly more clever with -1 here... */ +} + +static __inline__ int get_count_order(unsigned int count) +{ + int order; + + order = fls(count) - 1; + if (count & (count - 1)) + order++; + return order; +} + +static inline unsigned long hweight_long(unsigned long w) +{ + return sizeof(w) == 4 ? hweight32(w) : hweight64(w); +} + +/** + * rol32 - rotate a 32-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u32 rol32(__u32 word, unsigned int shift) +{ + return (word << shift) | (word >> (32 - shift)); +} + +/** + * ror32 - rotate a 32-bit value right + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u32 ror32(__u32 word, unsigned int shift) +{ + return (word >> shift) | (word << (32 - shift)); +} + +/** + * rol16 - rotate a 16-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u16 rol16(__u16 word, unsigned int shift) +{ + return (word << shift) | (word >> (16 - shift)); +} + +/** + * ror16 - rotate a 16-bit value right + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u16 ror16(__u16 word, unsigned int shift) +{ + return (word >> shift) | (word << (16 - shift)); +} + +/** + * rol8 - rotate an 8-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u8 rol8(__u8 word, unsigned int shift) +{ + return (word << shift) | (word >> (8 - shift)); +} + +/** + * ror8 - rotate an 8-bit value right + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u8 ror8(__u8 word, unsigned int shift) +{ + return (word >> shift) | (word << (8 - shift)); +} + +static inline unsigned fls_long(unsigned long l) +{ + if (sizeof(l) == 4) + return fls(l); + return fls64(l); +} + +/** + * __ffs64 - find first set bit in a 64 bit word + * @word: The 64 bit word + * + * On 64 bit arches this is a synomyn for __ffs + * The result is not defined if no bits are set, so check that @word + * is non-zero before calling this. + */ +static inline unsigned long __ffs64(u64 word) +{ +#if BITS_PER_LONG == 32 + if (((u32)word) == 0UL) + return __ffs((u32)(word >> 32)) + 32; +#elif BITS_PER_LONG != 64 +#error BITS_PER_LONG not 32 or 64 +#endif + return __ffs((unsigned long)word); +} + +#ifdef __KERNEL__ +#ifdef CONFIG_GENERIC_FIND_FIRST_BIT + +/** + * find_first_bit - find the first set bit in a memory region + * @addr: The address to start the search at + * @size: The maximum size to search + * + * Returns the bit number of the first set bit. + */ +extern unsigned long find_first_bit(const unsigned long *addr, + unsigned long size); + +/** + * find_first_zero_bit - find the first cleared bit in a memory region + * @addr: The address to start the search at + * @size: The maximum size to search + * + * Returns the bit number of the first cleared bit. + */ +extern unsigned long find_first_zero_bit(const unsigned long *addr, + unsigned long size); +#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ + +#ifdef CONFIG_GENERIC_FIND_LAST_BIT +/** + * find_last_bit - find the last set bit in a memory region + * @addr: The address to start the search at + * @size: The maximum size to search + * + * Returns the bit number of the first set bit, or size. + */ +extern unsigned long find_last_bit(const unsigned long *addr, + unsigned long size); +#endif /* CONFIG_GENERIC_FIND_LAST_BIT */ + +#ifdef CONFIG_GENERIC_FIND_NEXT_BIT + +/** + * find_next_bit - find the next set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The bitmap size in bits + */ +extern unsigned long find_next_bit(const unsigned long *addr, + unsigned long size, unsigned long offset); + +/** + * find_next_zero_bit - find the next cleared bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The bitmap size in bits + */ + +extern unsigned long find_next_zero_bit(const unsigned long *addr, + unsigned long size, + unsigned long offset); + +#endif /* CONFIG_GENERIC_FIND_NEXT_BIT */ +#endif /* __KERNEL__ */ +#endif diff --git a/include/linux/compat-2.6.14.h b/include/linux/compat-2.6.14.h new file mode 100644 index 0000000..1f19f7f --- /dev/null +++ b/include/linux/compat-2.6.14.h @@ -0,0 +1,13 @@ +#ifndef LINUX_26_14_COMPAT_H +#define LINUX_26_14_COMPAT_H + +#include + +/* Compat work for 2.6.14 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14)) + +typedef unsigned int gfp_t; + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14)) */ + +#endif /* LINUX_26_14_COMPAT_H */ diff --git a/include/linux/compat-2.6.18.h b/include/linux/compat-2.6.18.h new file mode 100644 index 0000000..5e0182b --- /dev/null +++ b/include/linux/compat-2.6.18.h @@ -0,0 +1,13 @@ +#ifndef LINUX_26_18_COMPAT_H +#define LINUX_26_18_COMPAT_H + +#include + +/* Compat work for 2.6.18 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)) + +#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)) */ + +#endif /* LINUX_26_18_COMPAT_H */ diff --git a/include/linux/compat-2.6.19.h b/include/linux/compat-2.6.19.h new file mode 100644 index 0000000..1e648c0 --- /dev/null +++ b/include/linux/compat-2.6.19.h @@ -0,0 +1,24 @@ +#ifndef LINUX_26_19_COMPAT_H +#define LINUX_26_19_COMPAT_H + +#include + +/* Compat work for 2.6.19 */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) + +#include + +static inline int +compat_kmem_cache_destroy(struct kmem_cache *cachep) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) + return kmem_cache_destroy(cachep); +#else + kmem_cache_destroy(cachep); + return 0; +#endif +} + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) */ + +#endif /* LINUX_26_19_COMPAT_H */ diff --git a/include/linux/compat-2.6.21.h b/include/linux/compat-2.6.21.h new file mode 100644 index 0000000..89ed6d9 --- /dev/null +++ b/include/linux/compat-2.6.21.h @@ -0,0 +1,18 @@ +#ifndef LINUX_26_21_COMPAT_H +#define LINUX_26_21_COMPAT_H + +#include + +/* Compat work for 2.6.21 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) + +#include + +#define register_sysctl_table(table) \ + ({ \ + register_sysctl_table((table), 0); \ + }) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) */ + +#endif /* LINUX_26_21_COMPAT_H */ diff --git a/include/linux/compat-2.6.22.h b/include/linux/compat-2.6.22.h new file mode 100644 index 0000000..7ca1b18 --- /dev/null +++ b/include/linux/compat-2.6.22.h @@ -0,0 +1,104 @@ +#ifndef LINUX_26_22_COMPAT_H +#define LINUX_26_22_COMPAT_H + +#include + +/* Compat work for 2.6.21 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)) + +#include +#include + +/* reuse ax25_ptr */ +#define ieee80211_ptr ax25_ptr + +#ifdef CONFIG_AX25 +#error Compat reuses the AX.25 pointer so that may not be enabled! +#endif + +static inline unsigned char *skb_mac_header(const struct sk_buff *skb) +{ + return skb->mac.raw; +} + +static inline void skb_set_mac_header(struct sk_buff *skb, int offset) +{ + skb->mac.raw = skb->data + offset; +} + +static inline void skb_reset_mac_header(struct sk_buff *skb) +{ + skb->mac.raw = skb->data; +} + +static inline void skb_reset_network_header(struct sk_buff *skb) +{ + skb->nh.raw = skb->data; +} + +static inline void skb_set_network_header(struct sk_buff *skb, int offset) +{ + skb->nh.raw = skb->data + offset; +} + +static inline void skb_set_transport_header(struct sk_buff *skb, int offset) +{ + skb->h.raw = skb->data + offset; +} + +static inline unsigned char *skb_transport_header(struct sk_buff *skb) +{ + return skb->h.raw; +} + +static inline unsigned char *skb_network_header(const struct sk_buff *skb) +{ + return skb->nh.raw; +} + +static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) +{ + return skb->tail; +} + +static inline struct iphdr *ip_hdr(const struct sk_buff *skb) +{ + return (struct iphdr *)skb_network_header(skb); +} + +static inline void skb_copy_from_linear_data(const struct sk_buff *skb, + void *to, + const unsigned int len) +{ + memcpy(to, skb->data, len); +} + +static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb, + const int offset, void *to, + const unsigned int len) +{ + memcpy(to, skb->data + offset, len); +} + +#define __maybe_unused __attribute__((unused)) + +#define uninitialized_var(x) x = x + +/* This will lead to very weird behaviour... */ +#define NLA_BINARY NLA_STRING + +static inline int pci_set_mwi(struct pci_dev *dev) +{ + return -ENOSYS; +} + +static inline void pci_clear_mwi(struct pci_dev *dev) +{ +} + +#define list_first_entry(ptr, type, member) \ + list_entry((ptr)->next, type, member) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)) */ + +#endif /* LINUX_26_22_COMPAT_H */ diff --git a/include/linux/compat-2.6.23.h b/include/linux/compat-2.6.23.h new file mode 100644 index 0000000..fbfb470 --- /dev/null +++ b/include/linux/compat-2.6.23.h @@ -0,0 +1,136 @@ +#ifndef LINUX_26_23_COMPAT_H +#define LINUX_26_23_COMPAT_H + +#include + +/* Compat work for < 2.6.23 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) + +#include +#include +#include +#include +#include + +/* + * Tell gcc if a function is cold. The compiler will assume any path + * directly leading to the call is unlikely. + */ + +#if !(__GNUC__ == 4 && __GNUC_MINOR__ < 3) +/* Mark functions as cold. gcc will assume any path leading to a call + * to them will be unlikely. This means a lot of manual unlikely()s + * are unnecessary now for any paths leading to the usual suspects + * like BUG(), printk(), panic() etc. [but let's keep them for now for + * older compilers] + * + * Early snapshots of gcc 4.3 don't support this and we can't detect this + * in the preprocessor, but we can live with this because they're unreleased. + * Maketime probing would be overkill here. + * + * gcc also has a __attribute__((__hot__)) to move hot functions into + * a special section, but I don't see any sense in this right now in + * the kernel context */ +#define __cold __attribute__((__cold__)) +#endif /* gcc 4.3 check */ + +#ifndef __cold +#define __cold +#endif + +/* Added as of 2.6.23 in include/linux/netdevice.h */ +#define alloc_netdev_mq(sizeof_priv, name, setup, queue) \ + alloc_netdev(sizeof_priv, name, setup) +#define NETIF_F_MULTI_QUEUE 16384 + +/* Added as of 2.6.23 on include/linux/netdevice.h */ +static inline int netif_is_multiqueue(const struct net_device *dev) +{ + return (!!(NETIF_F_MULTI_QUEUE & dev->features)); +} + +/* 2.6.23 fixed a bug in tcf_destroy_chain and the parameter changed */ +static inline void tcf_destroy_chain_compat(struct tcf_proto **fl) +{ + struct tcf_proto *tp; + + while ((tp = *fl) != NULL) { + *fl = tp->next; + tp->ops->destroy(tp); + module_put(tp->ops->owner); + kfree(tp); + } +} + +/* dev_mc_list was replaced with dev_addr_list as of 2.6.23, + * only new member added is da_synced. */ +#define dev_addr_list dev_mc_list +#define da_addr dmi_addr +#define da_addrlen dmi_addrlen +#define da_users dmi_users +#define da_gusers dmi_gusers + +/* dev_set_promiscuity() was moved to __dev_set_promiscuity() on 2.6.23 and + * dev_set_promiscuity() became a wrapper. */ +#define __dev_set_promiscuity dev_set_promiscuity + +/* Our own 2.6.22 port on compat.c */ +extern void dev_mc_unsync(struct net_device *to, struct net_device *from); +extern int dev_mc_sync(struct net_device *to, struct net_device *from); + +/* Our own 2.6.22 port on compat.c */ +extern void __dev_set_rx_mode(struct net_device *dev); + +/* Simple to add this */ +extern int cancel_delayed_work_sync(struct delayed_work *work); + +#define cancel_delayed_work_sync cancel_rearming_delayed_work + +#define debugfs_rename(a, b, c, d) 1 + +/* nl80211 requires multicast group support which is new and added on + * 2.6.23. We can't add support for it for older kernels to support it + * genl_family structure was changed. Lets just let through the + * genl_register_mc_group call. This means no multicast group suppport */ + +#define genl_register_mc_group(a, b) 0 + +/** + * struct genl_multicast_group - generic netlink multicast group + * @name: name of the multicast group, names are per-family + * @id: multicast group ID, assigned by the core, to use with + * genlmsg_multicast(). + * @list: list entry for linking + * @family: pointer to family, need not be set before registering + */ +struct genl_multicast_group +{ + struct genl_family *family; /* private */ + struct list_head list; /* private */ + char name[GENL_NAMSIZ]; + u32 id; +}; + + +/* Added as of 2.6.23 */ +int pci_try_set_mwi(struct pci_dev *dev); + +/* Added as of 2.6.23 */ +#ifdef CONFIG_PM_SLEEP +/* + * Tell the freezer that the current task should be frozen by it + */ +static inline void set_freezable(void) +{ + current->flags &= ~PF_NOFREEZE; +} + +#else +static inline void set_freezable(void) {} +#endif /* CONFIG_PM_SLEEP */ + +#else +#define tcf_destroy_chain_compat tcf_destroy_chain +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) */ + +#endif /* LINUX_26_23_COMPAT_H */ diff --git a/include/linux/compat-2.6.24.h b/include/linux/compat-2.6.24.h new file mode 100644 index 0000000..f9c777c --- /dev/null +++ b/include/linux/compat-2.6.24.h @@ -0,0 +1,257 @@ +#ifndef LINUX_26_24_COMPAT_H +#define LINUX_26_24_COMPAT_H + +#include + +/* Compat work for 2.6.21, 2.6.22 and 2.6.23 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) + +#include +#include +#include +#include +#include +#include +#include + +#define KEY_BLUETOOTH 237 +#define KEY_WLAN 238 +#define KEY_UWB 239 + +#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) + +struct proc_dir_entry; +struct net_device; +struct net { + atomic_t count; /* To decided when the network + * namespace should be freed. + */ + atomic_t use_count; /* To track references we + * destroy on demand + */ + struct list_head list; /* list of network namespaces */ + struct work_struct work; /* work struct for freeing */ + + struct proc_dir_entry *proc_net; + struct proc_dir_entry *proc_net_stat; + struct proc_dir_entry *proc_net_root; + + struct net_device *loopback_dev; /* The loopback */ + + struct list_head dev_base_head; + struct hlist_head *dev_name_head; + struct hlist_head *dev_index_head; +}; + +#ifdef CONFIG_NET +/* Init's network namespace */ +extern struct net init_net; +#define INIT_NET_NS(net_ns) .net_ns = &init_net, +#else +#define INIT_NET_NS(net_ns) +#endif + +/* Added on 2.6.24 in include/linux/types.h by Al viro on commit 142956af */ +typedef unsigned long uintptr_t; + +/* From include/linux/net.h */ +enum sock_shutdown_cmd { + SHUT_RD = 0, + SHUT_WR = 1, + SHUT_RDWR = 2, +}; + +#if (LINUX_VERSION_CODE == KERNEL_VERSION(2,6,23)) /* Local check */ +/* Added as of 2.6.24 in include/linux/skbuff.h. + * + * Although 2.6.23 does support for CONFIG_NETDEVICES_MULTIQUEUE + * this helper was not added until 2.6.24. This implementation + * is exactly as it is on newer kernels. + * + * For older kernels we use the an internal mac80211 hack. + * For details see changes to include/net/mac80211.h through + * compat.diff and compat/mq_compat.h */ +static inline u16 skb_get_queue_mapping(struct sk_buff *skb) +{ +#ifdef CONFIG_NETDEVICES_MULTIQUEUE + return skb->queue_mapping; +#else + return 0; +#endif +} +#endif /* Local 2.6.23 check */ + +/* On older kernels we handle this a bit differently, so we yield to that + * code for its implementation in mq_compat.h as we want to make + * use of the internal mac80211 __ieee80211_queue_stopped() which itself + * uses internal mac80211 data structure hacks. */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) /* Local check */ +/** + * netif_subqueue_stopped - test status of subqueue + * @dev: network device + * @queue_index: sub queue index + * + * Check individual transmit queue of a device with multiple transmit queues. + */ +static inline int __netif_subqueue_stopped(const struct net_device *dev, + u16 queue_index) +{ +#ifdef CONFIG_NETDEVICES_MULTIQUEUE + return test_bit(__LINK_STATE_XOFF, + &dev->egress_subqueue[queue_index].state); +#else + return 0; +#endif +} + +/* Note: although the backport implementation for netif_subqueue_stopped + * on older kernels is identical to upstream __netif_subqueue_stopped() + * (except for a const qualifier) we implement netif_subqueue_stopped() + * as part of mac80211 as it relies on internal mac80211 structures we + * use for MQ support. We this implement it in mq_compat.h */ + +#endif /* Local 2.6.23 check */ + +/* + * Force link bug if constructor is used, can't be done compatibly + * because constructor arguments were swapped since then! + */ +extern void __incompatible_kmem_cache_create(void); + +/* 2.6.21 and 2.6.22 kmem_cache_create() takes 6 arguments */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) +#define kmem_cache_create(name, objsize, align, flags, ctor) \ + ({ \ + if (ctor) __incompatible_kmem_cache_create(); \ + kmem_cache_create((name), (objsize), (align), \ + (flags), NULL, NULL); \ + }) +#endif + +/* 2.6.23 kmem_cache_create() takes 5 arguments */ +#if (LINUX_VERSION_CODE == KERNEL_VERSION(2,6,23)) +#define kmem_cache_create(name, objsize, align, flags, ctor) \ + ({ \ + if (ctor) __incompatible_kmem_cache_create(); \ + kmem_cache_create((name), (objsize), (align), \ + (flags), NULL); \ + }) +#endif + +/* From include/linux/mod_devicetable.h */ + +/* SSB core, see drivers/ssb/ */ +#ifndef SSB_DEVICE +struct ssb_device_id { + __u16 vendor; + __u16 coreid; + __u8 revision; +}; +#define SSB_DEVICE(_vendor, _coreid, _revision) \ + { .vendor = _vendor, .coreid = _coreid, .revision = _revision, } +#define SSB_DEVTABLE_END \ + { 0, }, + +#define SSB_ANY_VENDOR 0xFFFF +#define SSB_ANY_ID 0xFFFF +#define SSB_ANY_REV 0xFF +#endif + + +/* Namespace stuff, introduced on 2.6.24 */ +#define dev_get_by_index(a, b) dev_get_by_index(b) +#define __dev_get_by_index(a, b) __dev_get_by_index(b) + +/* + * Display a 6 byte device address (MAC) in a readable format. + */ +#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x" +extern char *print_mac(char *buf, const u8 *addr); +#define DECLARE_MAC_BUF(var) char var[18] __maybe_unused + +extern int eth_header(struct sk_buff *skb, struct net_device *dev, + unsigned short type, void *daddr, + void *saddr, unsigned len); +extern int eth_rebuild_header(struct sk_buff *skb); +extern void eth_header_cache_update(struct hh_cache *hh, struct net_device *dev, + unsigned char * haddr); +extern int eth_header_cache(struct neighbour *neigh, + struct hh_cache *hh); + +/* This structure is simply not present on 2.6.22 and 2.6.23 */ +struct header_ops { + int (*create) (struct sk_buff *skb, struct net_device *dev, + unsigned short type, void *daddr, + void *saddr, unsigned len); + int (*parse)(const struct sk_buff *skb, unsigned char *haddr); + int (*rebuild)(struct sk_buff *skb); + #define HAVE_HEADER_CACHE + int (*cache)(struct neighbour *neigh, struct hh_cache *hh); + void (*cache_update)(struct hh_cache *hh, + struct net_device *dev, + unsigned char *haddr); +}; + +/* net/ieee80211/ieee80211_crypt_tkip uses sg_init_table. This was added on + * 2.6.24. CONFIG_DEBUG_SG was added in 2.6.24 as well, so lets just ignore + * the debug stuff. Note that adding this required changes to the struct + * scatterlist on include/asm/scatterlist*, so the right way to port this + * is to simply ignore the new structure changes and zero the scatterlist + * array. We lave the kdoc intact for reference. + */ + +/** + * sg_mark_end - Mark the end of the scatterlist + * @sg: SG entryScatterlist + * + * Description: + * Marks the passed in sg entry as the termination point for the sg + * table. A call to sg_next() on this entry will return NULL. + * + **/ +static inline void sg_mark_end(struct scatterlist *sg) +{ +#ifdef CONFIG_DEBUG_SG + BUG_ON(sg->sg_magic != SG_MAGIC); +#endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) + /* + * Set termination bit, clear potential chain bit + */ + sg->page_link |= 0x02; + sg->page_link &= ~0x01; +#endif +} + +/** + * sg_init_table - Initialize SG table + * @sgl: The SG table + * @nents: Number of entries in table + * + * Notes: + * If this is part of a chained sg table, sg_mark_end() should be + * used only on the last table part. + * + **/ +static inline void sg_init_table(struct scatterlist *sgl, unsigned int nents) +{ + memset(sgl, 0, sizeof(*sgl) * nents); +} + +/** + * usb_endpoint_num - get the endpoint's number + * @epd: endpoint to be checked + * + * Returns @epd's number: 0 to 15. + */ +static inline int usb_endpoint_num(const struct usb_endpoint_descriptor *epd) +{ + return epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; +} + +/* Helper to make struct pci_dev is_pcie compatibility code smaller */ +int compat_is_pcie(struct pci_dev *pdev); + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) */ + +#endif /* LINUX_26_24_COMPAT_H */ diff --git a/include/linux/compat-2.6.25.h b/include/linux/compat-2.6.25.h new file mode 100644 index 0000000..8d329b3 --- /dev/null +++ b/include/linux/compat-2.6.25.h @@ -0,0 +1,177 @@ +#ifndef LINUX_26_25_COMPAT_H +#define LINUX_26_25_COMPAT_H + +#include + +/* Compat work for 2.6.24 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)) + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* The macro below uses a const upstream, this differs */ + +/** + * DEFINE_PCI_DEVICE_TABLE - macro used to describe a pci device table + * @_table: device table name + * + * This macro is used to create a struct pci_device_id array (a device table) + * in a generic manner. + */ +#define DEFINE_PCI_DEVICE_TABLE(_table) \ + const struct pci_device_id _table[] __devinitdata + +/* + * Backport work for QoS dependencies (kernel/pm_qos_params.c) + * pm-qos stuff written by mark gross mgross@linux.intel.com. + * + * ipw2100 now makes use of: + * + * pm_qos_add_requirement(), + * pm_qos_update_requirement() and + * pm_qos_remove_requirement() from it + * + * mac80211 uses the network latency to determine if to enable or not + * dynamic PS. mac80211 also and registers a notifier for when + * the latency changes. Since older kernels do no thave pm-qos stuff + * we just implement it completley here and register it upon cfg80211 + * init. I haven't tested ipw2100 on 2.6.24 though. + * + * This pm-qos implementation is copied verbatim from the kernel + * written by mark gross mgross@linux.intel.com. You don't have + * to do anythinig to use pm-qos except use the same exported + * routines as used in newer kernels. The compat_pm_qos_power_init() + * defned below is used by the compat module to initialize pm-qos. + */ +int compat_pm_qos_power_init(void); +int compat_pm_qos_power_deinit(void); + +/* + * 2.6.25 adds PM_EVENT_HIBERNATE as well here but + * we don't have this on <= 2.6.23) + */ +#define PM_EVENT_SLEEP (PM_EVENT_SUSPEND) + +/* Although we don't care about wimax this is needed for rfkill input stuff */ +#define KEY_WIMAX 246 + +/* Although pm_qos stuff is not implemented on <= 2.6.24 lets keep the define */ +#define PM_QOS_DEFAULT_VALUE -1 + +#define __WARN(foo) dump_stack() + +#define dev_emerg(dev, format, arg...) \ + dev_printk(KERN_EMERG , dev , format , ## arg) +#define dev_alert(dev, format, arg...) \ + dev_printk(KERN_ALERT , dev , format , ## arg) +#define dev_crit(dev, format, arg...) \ + dev_printk(KERN_CRIT , dev , format , ## arg) + +extern int __dev_addr_sync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count); +extern void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count); + +#define seq_file_net &init_net; + +enum nf_inet_hooks { + NF_INET_PRE_ROUTING = 0, + NF_INET_LOCAL_IN = 1, + NF_INET_FORWARD = 2, + NF_INET_LOCAL_OUT = 3, + NF_INET_POST_ROUTING = 4, + NF_INET_NUMHOOKS = 5 +}; + +/* The patch: + * commit 8b5f6883683c91ad7e1af32b7ceeb604d68e2865 + * Author: Marcin Slusarz + * Date: Fri Feb 8 04:20:12 2008 -0800 + * + * byteorder: move le32_add_cpu & friends from OCFS2 to core + * + * moves le*_add_cpu and be*_add_cpu functions from OCFS2 to core + * header (1st) and converted some existing code to it. We port + * it here as later kernels will most likely use it. + */ +static inline void le16_add_cpu(__le16 *var, u16 val) +{ + *var = cpu_to_le16(le16_to_cpu(*var) + val); +} + +static inline void le32_add_cpu(__le32 *var, u32 val) +{ + *var = cpu_to_le32(le32_to_cpu(*var) + val); +} + +static inline void le64_add_cpu(__le64 *var, u64 val) +{ + *var = cpu_to_le64(le64_to_cpu(*var) + val); +} + +static inline void be16_add_cpu(__be16 *var, u16 val) +{ + u16 v = be16_to_cpu(*var); + *var = cpu_to_be16(v + val); +} + +static inline void be32_add_cpu(__be32 *var, u32 val) +{ + u32 v = be32_to_cpu(*var); + *var = cpu_to_be32(v + val); +} + +static inline void be64_add_cpu(__be64 *var, u64 val) +{ + u64 v = be64_to_cpu(*var); + *var = cpu_to_be64(v + val); +} + +/* 2.6.25 changes hwrng_unregister()'s behaviour by supporting + * suspend of its parent device (the misc device, which is itself the + * hardware random number generator). It does this by passing a parameter to + * unregister_miscdev() which is not supported in older kernels. The suspend + * parameter allows us to enable access to the device's hardware + * number generator during suspend. As far as wireless is concerned this means + * if a driver goes to suspend it you won't have the HNR available in + * older kernels. */ +static inline void __hwrng_unregister(struct hwrng *rng, bool suspended) +{ + hwrng_unregister(rng); +} + +static inline void led_classdev_unregister_suspended(struct led_classdev *lcd) +{ + led_classdev_unregister(lcd); +} + +/** + * The following things are out of ./include/linux/kernel.h + * The new iwlwifi driver is using them. + */ +extern int strict_strtoul(const char *, unsigned int, unsigned long *); +extern int strict_strtol(const char *, unsigned int, long *); + +#else +/* + * Kernels >= 2.6.25 have pm-qos and its initialized as part of + * the bootup process + */ +static inline int compat_pm_qos_power_init(void) +{ + return 0; +} + +static inline int compat_pm_qos_power_deinit(void) +{ + return 0; +} +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)) */ + +#endif /* LINUX_26_25_COMPAT_H */ diff --git a/include/linux/compat-2.6.26.h b/include/linux/compat-2.6.26.h new file mode 100644 index 0000000..2bee30e --- /dev/null +++ b/include/linux/compat-2.6.26.h @@ -0,0 +1,381 @@ +#ifndef LINUX_26_26_COMPAT_H +#define LINUX_26_26_COMPAT_H + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) + +#include +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) +#include +#endif +#include +#include + +/* These jiffie helpers added as of 2.6.26 */ + +/* + * These four macros compare jiffies and 'a' for convenience. + */ + +/* time_is_before_jiffies(a) return true if a is before jiffies */ +#define time_is_before_jiffies(a) time_after(jiffies, a) + +/* time_is_after_jiffies(a) return true if a is after jiffies */ +#define time_is_after_jiffies(a) time_before(jiffies, a) + +/* time_is_before_eq_jiffies(a) return true if a is before or equal to jiffies*/ +#define time_is_before_eq_jiffies(a) time_after_eq(jiffies, a) + +/* time_is_after_eq_jiffies(a) return true if a is after or equal to jiffies*/ +#define time_is_after_eq_jiffies(a) time_before_eq(jiffies, a) + +/* This comes from include/linux/input.h */ +#define SW_RFKILL_ALL 0x03 /* rfkill master switch, type "any" + set = radio enabled */ + +/* From kernel.h */ +#define USHORT_MAX ((u16)(~0U)) +#define SHORT_MAX ((s16)(USHORT_MAX>>1)) +#define SHORT_MIN (-SHORT_MAX - 1) + +extern int dev_set_name(struct device *dev, const char *name, ...) + __attribute__((format(printf, 2, 3))); + +/** + * clamp_t - return a value clamped to a given range using a given type + * @type: the type of variable to use + * @val: current value + * @min: minimum allowable value + * @max: maximum allowable value + * + * This macro does no typechecking and uses temporary variables of type + * 'type' to make all the comparisons. + */ +#define clamp_t(type, val, min, max) ({ \ + type __val = (val); \ + type __min = (min); \ + type __max = (max); \ + __val = __val < __min ? __min: __val; \ + __val > __max ? __max: __val; }) + + +/* from include/linux/device.h */ +/* device_create_drvdata() is new */ +extern struct device *device_create_drvdata(struct class *cls, + struct device *parent, + dev_t devt, + void *drvdata, + const char *fmt, ...) +__attribute__((format(printf, 5, 6))); + +/* This is from include/linux/list.h */ + +/** + * list_is_singular - tests whether a list has just one entry. + * @head: the list to test. + */ +static inline int list_is_singular(const struct list_head *head) +{ + return !list_empty(head) && (head->next == head->prev); +} + +/* This is from include/linux/device.h, which was added as of 2.6.26 */ +static inline const char *dev_name(struct device *dev) +{ + /* will be changed into kobject_name(&dev->kobj) in the near future */ + return dev->bus_id; +} + +/* This is from include/linux/kernel.h, which was added as of 2.6.26 */ + +/** + * clamp_val - return a value clamped to a given range using val's type + * @val: current value + * @min: minimum allowable value + * @max: maximum allowable value + * + * This macro does no typechecking and uses temporary variables of whatever + * type the input argument 'val' is. This is useful when val is an unsigned + * type and min and max are literals that will otherwise be assigned a signed + * integer type. + */ + +#define clamp_val(val, min, max) ({ \ + typeof(val) __val = (val); \ + typeof(val) __min = (min); \ + typeof(val) __max = (max); \ + __val = __val < __min ? __min: __val; \ + __val > __max ? __max: __val; }) + +/* This comes from include/net/net_namespace.h */ + +#ifdef CONFIG_NET_NS +static inline +int net_eq(const struct net *net1, const struct net *net2) +{ + return net1 == net2; +} +#else +static inline +int net_eq(const struct net *net1, const struct net *net2) +{ + return 1; +} +#endif + +static inline +void dev_net_set(struct net_device *dev, struct net *net) +{ +#ifdef CONFIG_NET_NS + release_net(dev->nd_net); + dev->nd_net = hold_net(net); +#endif +} + +static inline +struct net *sock_net(const struct sock *sk) +{ +#ifdef CONFIG_NET_NS + return sk->sk_net; +#else + return &init_net; +#endif +} + +/* This comes from include/linux/netdevice.h */ + +/* + * Net namespace inlines + */ +static inline +struct net *dev_net(const struct net_device *dev) +{ +#ifdef CONFIG_NET_NS + /* + * compat-wirelss backport note: + * For older kernels we may just need to always return init_net, + * not sure when we added dev->nd_net. + */ + return dev->nd_net; +#else + return &init_net; +#endif +} + + +/* + * 2.6.26 added its own unaligned API which the + * new drivers can use. Lets port it here by including it in older + * kernels and also deal with the architecture handling here. + */ + +#ifdef CONFIG_ALPHA + +#include +#include +#include + +#endif /* alpha */ +#ifdef CONFIG_ARM + +/* arm */ +#include +#include +#include + +#endif /* arm */ +#ifdef CONFIG_AVR32 + +/* + * AVR32 can handle some unaligned accesses, depending on the + * implementation. The AVR32 AP implementation can handle unaligned + * words, but halfwords must be halfword-aligned, and doublewords must + * be word-aligned. + * + * However, swapped word loads must be word-aligned so we can't + * optimize word loads in general. + */ + +#include +#include +#include + +#endif +#ifdef CONFIG_BLACKFIN + +#include +#include +#include + +#endif /* blackfin */ +#ifdef CONFIG_CRIS + +/* + * CRIS can do unaligned accesses itself. + */ +#include +#include + +#endif /* cris */ +#ifdef CONFIG_FRV + +#include +#include +#include + +#endif /* frv */ +#ifdef CONFIG_H8300 + +#include +#include +#include + +#endif /* h8300 */ +#ifdef CONFIG_IA64 + +#include +#include +#include + +#endif /* ia64 */ +#ifdef CONFIG_M32R + +#if defined(__LITTLE_ENDIAN__) +# include +# include +# include +#else +# include +# include +# include +#endif + +#endif /* m32r */ +#ifdef CONFIG_M68K /* this handles both m68k and m68knommu */ + +#ifdef CONFIG_COLDFIRE +#include +#include +#include +#else + +/* + * The m68k can do unaligned accesses itself. + */ +#include +#include +#endif + +#endif /* m68k and m68knommu */ +#ifdef CONFIG_MIPS + +#if defined(__MIPSEB__) +# include +# include +# include +# define get_unaligned __get_unaligned_be +# define put_unaligned __put_unaligned_be +#elif defined(__MIPSEL__) +# include +# include +# include +#endif + +#endif /* mips */ +#ifdef CONFIG_MN10300 + +#include +#include + +#endif /* mn10300 */ +#ifdef CONFIG_PARISC + +#include +#include +#include + +#endif /* parisc */ +#ifdef CONFIG_PPC +/* + * The PowerPC can do unaligned accesses itself in big endian mode. + */ +#include +#include + +#endif /* ppc */ +#ifdef CONFIG_S390 + +/* + * The S390 can do unaligned accesses itself. + */ +#include +#include + +#endif /* s390 */ +#ifdef CONFIG_SUPERH + +/* SH can't handle unaligned accesses. */ +#ifdef __LITTLE_ENDIAN__ +# include +# include +# include +#else +# include +# include +# include +#endif + +#endif /* sh - SUPERH */ +#ifdef CONFIG_SPARC + +/* sparc and sparc64 */ +#include +#include +#include + +#endif /* sparc */ +#ifdef CONFIG_UML + +#include "asm/arch/unaligned.h" + +#endif /* um - uml */ +#ifdef CONFIG_V850 + +#include +#include +#include + +#endif /* v850 */ +#ifdef CONFIG_X86 +/* + * The x86 can do unaligned accesses itself. + */ +#include +#include + +#endif /* x86 */ +#ifdef CONFIG_XTENSA + +#ifdef __XTENSA_EL__ +# include +# include +# include +#elif defined(__XTENSA_EB__) +# include +# include +# include +#else +# error processor byte order undefined! +#endif + +#endif /* xtensa */ + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) */ + +#endif /* LINUX_26_26_COMPAT_H */ diff --git a/include/linux/compat-2.6.27.h b/include/linux/compat-2.6.27.h new file mode 100644 index 0000000..a690e2c --- /dev/null +++ b/include/linux/compat-2.6.27.h @@ -0,0 +1,225 @@ +#ifndef LINUX_26_27_COMPAT_H +#define LINUX_26_27_COMPAT_H + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)) + +#include +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) +#include +#include +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) */ +#include +#include +#include +#include +#include + +#define PCI_PM_CAP_PME_SHIFT 11 + +/* I can't find a more suitable replacement... */ +#define flush_work(work) cancel_work_sync(work) + +struct builtin_fw { + char *name; + void *data; + unsigned long size; +}; + +/* + * On older kernels we do not have net_device Multi Queue support, but + * since we no longer use MQ on mac80211 we can simply use the 0 queue. + * Note that if other fullmac drivers make use of this they then need + * to be backported somehow or deal with just 1 queueue from MQ. + */ +static inline void netif_tx_wake_all_queues(struct net_device *dev) +{ + netif_wake_queue(dev); +} +static inline void netif_tx_start_all_queues(struct net_device *dev) +{ + netif_start_queue(dev); +} +static inline void netif_tx_stop_all_queues(struct net_device *dev) +{ + netif_stop_queue(dev); +} + +bool pci_pme_capable(struct pci_dev *dev, pci_power_t state); + +/* + * The net_device has a spin_lock on newer kernels, on older kernels we're out of luck + */ +#define netif_addr_lock_bh(dev) +#define netif_addr_unlock_bh(dev) + +/* + * To port this properly we'd have to port warn_slowpath_null(), + * which I'm lazy to do so just do a regular print for now. If you + * want to port this read kernel/panic.c + */ +#define __WARN_printf(arg...) do { printk(arg); __WARN(); } while (0) + +/* This is ported directly as-is on newer kernels */ +#ifndef WARN +#define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN_printf(format); \ + unlikely(__ret_warn_on); \ +}) +#endif + +/* On 2.6.27 a second argument was added, on older kernels we ignore it */ +#define dma_mapping_error(pdev, dma_addr) dma_mapping_error(dma_addr) +#define pci_dma_mapping_error(pdev, dma_addr) dma_mapping_error(pdev, dma_addr) + +/* This is from include/linux/ieee80211.h */ +#define IEEE80211_HT_CAP_DSSSCCK40 0x1000 + +/* New link list changes added as of 2.6.27, needed for ath9k */ + +static inline void __list_cut_position(struct list_head *list, + struct list_head *head, struct list_head *entry) +{ + struct list_head *new_first = entry->next; + list->next = head->next; + list->next->prev = list; + list->prev = entry; + entry->next = list; + head->next = new_first; + new_first->prev = head; +} + +/** + * list_cut_position - cut a list into two + * @list: a new list to add all removed entries + * @head: a list with entries + * @entry: an entry within head, could be the head itself + * and if so we won't cut the list + * + * This helper moves the initial part of @head, up to and + * including @entry, from @head to @list. You should + * pass on @entry an element you know is on @head. @list + * should be an empty list or a list you do not care about + * losing its data. + * + */ +static inline void list_cut_position(struct list_head *list, + struct list_head *head, struct list_head *entry) +{ + if (list_empty(head)) + return; + if (list_is_singular(head) && + (head->next != entry && head != entry)) + return; + if (entry == head) + INIT_LIST_HEAD(list); + else + __list_cut_position(list, head, entry); +} + + +/* __list_splice as re-implemented on 2.6.27, we backport it */ +static inline void __compat_list_splice_new_27(const struct list_head *list, + struct list_head *prev, + struct list_head *next) +{ + struct list_head *first = list->next; + struct list_head *last = list->prev; + + first->prev = prev; + prev->next = first; + + last->next = next; + next->prev = last; +} + +/** + * list_splice_tail - join two lists, each list being a queue + * @list: the new list to add. + * @head: the place to add it in the first list. + */ +static inline void list_splice_tail(struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) + __compat_list_splice_new_27(list, head->prev, head); +} + +/** + * list_splice_tail_init - join two lists and reinitialise the emptied list + * @list: the new list to add. + * @head: the place to add it in the first list. + * + * Each of the lists is a queue. + * The list at @list is reinitialised + */ +static inline void list_splice_tail_init(struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) { + __compat_list_splice_new_27(list, head->prev, head); + INIT_LIST_HEAD(list); + } +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) +extern unsigned int mmc_align_data_size(struct mmc_card *, unsigned int); +extern unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) */ + +#define iwe_stream_add_value(info, event, value, ends, iwe, event_len) iwe_stream_add_value(event, value, ends, iwe, event_len) +#define iwe_stream_add_point(info, stream, ends, iwe, extra) iwe_stream_add_point(stream, ends, iwe, extra) +#define iwe_stream_add_event(info, stream, ends, iwe, event_len) iwe_stream_add_event(stream, ends, iwe, event_len) + +/* Flags available in struct iw_request_info */ +#define IW_REQUEST_FLAG_COMPAT 0x0001 /* Compat ioctl call */ + +static inline int iwe_stream_lcp_len(struct iw_request_info *info) +{ +#ifdef CONFIG_COMPAT + if (info->flags & IW_REQUEST_FLAG_COMPAT) + return IW_EV_COMPAT_LCP_LEN; +#endif + return IW_EV_LCP_LEN; +} + +#ifdef CONFIG_ARM + +/* + * The caller asks to handle a range between offset and offset + size, + * but we process a larger range from 0 to offset + size due to lack of + * offset support. + */ + +static inline void dma_sync_single_range_for_cpu(struct device *dev, + dma_addr_t handle, unsigned long offset, size_t size, + enum dma_data_direction dir) +{ + dma_sync_single_for_cpu(dev, handle, offset + size, dir); +} + +static inline void dma_sync_single_range_for_device(struct device *dev, + dma_addr_t handle, unsigned long offset, size_t size, + enum dma_data_direction dir) +{ + dma_sync_single_for_device(dev, handle, offset + size, dir); +} + +#endif /* arm */ + +#if defined(CONFIG_DEBUG_FS) +void debugfs_remove_recursive(struct dentry *dentry); +#else +static inline void debugfs_remove_recursive(struct dentry *dentry) +{ } +#endif + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)) */ + +#endif /* LINUX_26_27_COMPAT_H */ diff --git a/include/linux/compat-2.6.28.h b/include/linux/compat-2.6.28.h new file mode 100644 index 0000000..2cab320 --- /dev/null +++ b/include/linux/compat-2.6.28.h @@ -0,0 +1,218 @@ +#ifndef LINUX_26_28_COMPAT_H +#define LINUX_26_28_COMPAT_H + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)) + +#include +#include +#include + +#ifndef ETH_P_PAE +#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */ +#endif + +#include + +#ifndef WARN_ONCE +#define WARN_ONCE(condition, format...) ({ \ + static int __warned; \ + int __ret_warn_once = !!(condition); \ + \ + if (unlikely(__ret_warn_once)) \ + if (WARN(!__warned, format)) \ + __warned = 1; \ + unlikely(__ret_warn_once); \ +}) +#endif /* From include/asm-generic/bug.h */ + +#if defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE) + +#include +#include +#include +#ifdef pcmcia_parse_tuple +#undef pcmcia_parse_tuple +#define pcmcia_parse_tuple(tuple, parse) pccard_parse_tuple(tuple, parse) +#endif + +/* From : include/pcmcia/ds.h */ +/* loop CIS entries for valid configuration */ +int pcmcia_loop_config(struct pcmcia_device *p_dev, + int (*conf_check) (struct pcmcia_device *p_dev, + cistpl_cftable_entry_t *cfg, + cistpl_cftable_entry_t *dflt, + unsigned int vcc, + void *priv_data), + void *priv_data); + +#endif /* CONFIG_PCMCIA */ + +/* USB anchors were added as of 2.6.23 */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) + +#if defined(CONFIG_USB) || defined(CONFIG_USB_MODULE) +#if 0 +extern void usb_poison_urb(struct urb *urb); +#endif +extern void usb_unpoison_urb(struct urb *urb); + +#if 0 +extern void usb_poison_anchored_urbs(struct usb_anchor *anchor); +#endif + +extern struct urb *usb_get_from_anchor(struct usb_anchor *anchor); +extern void usb_scuttle_anchored_urbs(struct usb_anchor *anchor); +extern int usb_anchor_empty(struct usb_anchor *anchor); +#endif /* CONFIG_USB */ +#endif + + +void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar); + +/** + * skb_queue_is_last - check if skb is the last entry in the queue + * @list: queue head + * @skb: buffer + * + * Returns true if @skb is the last buffer on the list. + */ +static inline bool skb_queue_is_last(const struct sk_buff_head *list, + const struct sk_buff *skb) +{ + return (skb->next == (struct sk_buff *) list); +} + +/** + * skb_queue_next - return the next packet in the queue + * @list: queue head + * @skb: current buffer + * + * Return the next packet in @list after @skb. It is only valid to + * call this if skb_queue_is_last() evaluates to false. + */ +static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list, + const struct sk_buff *skb) +{ + /* This BUG_ON may seem severe, but if we just return then we + * are going to dereference garbage. + */ + BUG_ON(skb_queue_is_last(list, skb)); + return skb->next; +} + +/** + * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head + * @list: queue to initialize + * + * This initializes only the list and queue length aspects of + * an sk_buff_head object. This allows to initialize the list + * aspects of an sk_buff_head without reinitializing things like + * the spinlock. It can also be used for on-stack sk_buff_head + * objects where the spinlock is known to not be used. + */ +static inline void __skb_queue_head_init(struct sk_buff_head *list) +{ + list->prev = list->next = (struct sk_buff *)list; + list->qlen = 0; +} + +static inline void __skb_queue_splice(const struct sk_buff_head *list, + struct sk_buff *prev, + struct sk_buff *next) +{ + struct sk_buff *first = list->next; + struct sk_buff *last = list->prev; + + first->prev = prev; + prev->next = first; + + last->next = next; + next->prev = last; +} + +/** + * skb_queue_splice - join two skb lists, this is designed for stacks + * @list: the new list to add + * @head: the place to add it in the first list + */ +static inline void skb_queue_splice(const struct sk_buff_head *list, + struct sk_buff_head *head) +{ + if (!skb_queue_empty(list)) { + __skb_queue_splice(list, (struct sk_buff *) head, head->next); + head->qlen += list->qlen; + } +} + +/** + * skb_queue_splice_tail - join two skb lists and reinitialise the emptied list + * @list: the new list to add + * @head: the place to add it in the first list + * + * Each of the lists is a queue. + * The list at @list is reinitialised + */ +static inline void skb_queue_splice_tail_init(struct sk_buff_head *list, + struct sk_buff_head *head) +{ + if (!skb_queue_empty(list)) { + __skb_queue_splice(list, head->prev, (struct sk_buff *) head); + head->qlen += list->qlen; + __skb_queue_head_init(list); + } +} /* From include/linux/skbuff.h */ + +/** + * skb_queue_splice_tail - join two skb lists, each list being a queue + * @list: the new list to add + * @head: the place to add it in the first list + */ +static inline void skb_queue_splice_tail(const struct sk_buff_head *list, + struct sk_buff_head *head) +{ + if (!skb_queue_empty(list)) { + __skb_queue_splice(list, head->prev, (struct sk_buff *) head); + head->qlen += list->qlen; + } +} + +#ifndef DECLARE_TRACE + +#define TP_PROTO(args...) args +#define TP_ARGS(args...) args + +#define DECLARE_TRACE(name, proto, args) \ + static inline void _do_trace_##name(struct tracepoint *tp, proto) \ + { } \ + static inline void trace_##name(proto) \ + { } \ + static inline int register_trace_##name(void (*probe)(proto)) \ + { \ + return -ENOSYS; \ + } \ + static inline int unregister_trace_##name(void (*probe)(proto)) \ + { \ + return -ENOSYS; \ + } + +#define EXPORT_TRACEPOINT_SYMBOL_GPL(name) +#define EXPORT_TRACEPOINT_SYMBOL(name) + + +#endif + +/* openSuse includes round_jiffies_up in it's kernel 2.6.27. + * This is needed to prevent conflicts with the openSuse definition. + */ +#define round_jiffies_up backport_round_jiffies_up + +unsigned long round_jiffies_up(unsigned long j); + +extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, + int off, int size); + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)) */ + +#endif /* LINUX_26_28_COMPAT_H */ diff --git a/include/linux/compat-2.6.29.h b/include/linux/compat-2.6.29.h new file mode 100644 index 0000000..c3572a2 --- /dev/null +++ b/include/linux/compat-2.6.29.h @@ -0,0 +1,66 @@ +#ifndef LINUX_26_29_COMPAT_H +#define LINUX_26_29_COMPAT_H + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29)) + +#include +#include +#include + +/** + * skb_queue_is_first - check if skb is the first entry in the queue + * @list: queue head + * @skb: buffer + * + * Returns true if @skb is the first buffer on the list. + */ +static inline bool skb_queue_is_first(const struct sk_buff_head *list, + const struct sk_buff *skb) +{ + return (skb->prev == (struct sk_buff *) list); +} + +/** + * skb_queue_prev - return the prev packet in the queue + * @list: queue head + * @skb: current buffer + * + * Return the prev packet in @list before @skb. It is only valid to + * call this if skb_queue_is_first() evaluates to false. + */ +static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list, + const struct sk_buff *skb) +{ + /* This BUG_ON may seem severe, but if we just return then we + * are going to dereference garbage. + */ + BUG_ON(skb_queue_is_first(list, skb)); + return skb->prev; +} + + +static inline struct net_device_stats *dev_get_stats(struct net_device *dev) +{ + return dev->get_stats(dev); +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) +#if defined(CONFIG_USB) || defined(CONFIG_USB_MODULE) +extern void usb_unpoison_anchored_urbs(struct usb_anchor *anchor); +#endif /* CONFIG_USB */ +#endif + +#define DIV_ROUND_CLOSEST(x, divisor)( \ +{ \ + typeof(divisor) __divisor = divisor; \ + (((x) + ((__divisor) / 2)) / (__divisor)); \ +} \ +) + +extern int eth_mac_addr(struct net_device *dev, void *p); + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29)) */ + +#endif /* LINUX_26_29_COMPAT_H */ diff --git a/include/linux/compat-2.6.30.h b/include/linux/compat-2.6.30.h new file mode 100644 index 0000000..f997d86 --- /dev/null +++ b/include/linux/compat-2.6.30.h @@ -0,0 +1,28 @@ +#ifndef LINUX_26_30_COMPAT_H +#define LINUX_26_30_COMPAT_H + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)) + +#ifndef TP_PROTO +#define TP_PROTO(args...) TPPROTO(args) +#endif +#ifndef TP_ARGS +#define TP_ARGS(args...) TPARGS(args) +#endif + +#define IRQ_WAKE_THREAD (2) + +/* From : include/linux/pm.h */ +/* How to reorder dpm_list after device_move() */ +enum dpm_order { + DPM_ORDER_NONE, + DPM_ORDER_DEV_AFTER_PARENT, + DPM_ORDER_PARENT_BEFORE_DEV, + DPM_ORDER_DEV_LAST, +}; + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)) */ + +#endif /* LINUX_26_30_COMPAT_H */ diff --git a/include/linux/compat-2.6.31.h b/include/linux/compat-2.6.31.h new file mode 100644 index 0000000..54ddefd --- /dev/null +++ b/include/linux/compat-2.6.31.h @@ -0,0 +1,197 @@ +#ifndef LINUX_26_31_COMPAT_H +#define LINUX_26_31_COMPAT_H + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)) + +#include +#include +#include +#include +#include +#include + +/* + * These macros allow us to backport rfkill without any + * changes on cfg80211 through compat.diff. Note that this + * file will be included by rfkill_backport.h so we must + * not conflict with things there. + */ +#define rfkill_get_led_trigger_name backport_rfkill_get_led_trigger_name +#define rfkill_set_led_trigger_name backport_rfkill_set_led_trigger_name +#define rfkill_set_hw_state backport_rfkill_set_hw_state +#define rfkill_set_sw_state backport_rfkill_set_sw_state +#define rfkill_init_sw_state backport_rfkill_init_sw_state +#define rfkill_set_states backport_rfkill_set_states +#define rfkill_pause_polling backport_rfkill_pause_polling +#define rfkill_resume_polling backport_rfkill_resume_polling +#define rfkill_blocked backport_rfkill_blocked +#define rfkill_alloc backport_rfkill_alloc +#define rfkill_register backport_rfkill_register +#define rfkill_unregister backport_rfkill_unregister +#define rfkill_destroy backport_rfkill_destroy + +#ifndef ERFKILL +#if !defined(CONFIG_ALPHA) && !defined(CONFIG_MIPS) && !defined(CONFIG_PARISC) && !defined(CONFIG_SPARC) +#define ERFKILL 132 /* Operation not possible due to RF-kill */ +#endif +#ifdef CONFIG_ALPHA +#define ERFKILL 138 /* Operation not possible due to RF-kill */ +#endif +#ifdef CONFIG_MIPS +#define ERFKILL 167 /* Operation not possible due to RF-kill */ +#endif +#ifdef CONFIG_PARISC +#define ERFKILL 256 /* Operation not possible due to RF-kill */ +#endif +#ifdef CONFIG_SPARC +#define ERFKILL 134 /* Operation not possible due to RF-kill */ +#endif +#endif + +#ifndef NETDEV_PRE_UP +#define NETDEV_PRE_UP 0x000D +#endif + +#ifndef SDIO_DEVICE_ID_MARVELL_8688WLAN +#define SDIO_DEVICE_ID_MARVELL_8688WLAN 0x9104 +#endif + +struct compat_threaded_irq { + unsigned int irq; + irq_handler_t handler; + irq_handler_t thread_fn; + void *dev_id; + char wq_name[64]; + struct workqueue_struct *wq; + struct work_struct work; +}; + +/* + * kmemleak was introduced on 2.6.31, since older kernels do not have + * we simply ignore its tuning. + */ +static inline void kmemleak_ignore(const void *ptr) +{ + return; +} + +static inline void kmemleak_not_leak(const void *ptr) +{ + return; +} + +static inline void kmemleak_no_scan(const void *ptr) +{ + return; +} + +/* + * Added via adf30907d63893e4208dfe3f5c88ae12bc2f25d5 + * + * There is no _sk_dst on older kernels, so just set the + * old dst to NULL and release it directly. + */ +static inline void skb_dst_drop(struct sk_buff *skb) +{ + dst_release(skb->dst); + skb->dst = NULL; +} + +static inline struct dst_entry *skb_dst(const struct sk_buff *skb) +{ + return (struct dst_entry *)skb->dst; +} + +static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) +{ + skb->dst = dst; +} + +static inline struct rtable *skb_rtable(const struct sk_buff *skb) +{ + return (struct rtable *)skb_dst(skb); +} + +extern int genl_register_family_with_ops(struct genl_family *family, + struct genl_ops *ops, size_t n_ops); + + +/* Backport threaded IRQ support */ + +static inline +void compat_irq_work(struct work_struct *work) +{ + struct compat_threaded_irq *comp = container_of(work, struct compat_threaded_irq, work); + comp->thread_fn(comp->irq, comp->dev_id); +} + +static inline +irqreturn_t compat_irq_dispatcher(int irq, void *dev_id) +{ + struct compat_threaded_irq *comp = dev_id; + irqreturn_t res; + + res = comp->handler(irq, comp->dev_id); + if (res == IRQ_WAKE_THREAD) { + queue_work(comp->wq, &comp->work); + res = IRQ_HANDLED; + } + + return res; +} + +static inline +int compat_request_threaded_irq(struct compat_threaded_irq *comp, + unsigned int irq, + irq_handler_t handler, + irq_handler_t thread_fn, + unsigned long flags, + const char *name, + void *dev_id) +{ + comp->irq = irq; + comp->handler = handler; + comp->thread_fn = thread_fn; + comp->dev_id = dev_id; + INIT_WORK(&comp->work, compat_irq_work); + + if (!comp->wq) { + snprintf(comp->wq_name, sizeof(comp->wq_name), + "compirq/%u-%s", irq, name); + comp->wq = create_singlethread_workqueue(comp->wq_name); + if (!comp->wq) { + printk(KERN_ERR "Failed to create compat-threaded-IRQ workqueue %s\n", + comp->wq_name); + return -ENOMEM; + } + } + return request_irq(irq, compat_irq_dispatcher, flags, name, comp); +} + +static inline +void compat_free_threaded_irq(struct compat_threaded_irq *comp) +{ + free_irq(comp->irq, comp); +} + +static inline +void compat_destroy_threaded_irq(struct compat_threaded_irq *comp) +{ + if (comp->wq) + destroy_workqueue(comp->wq); + comp->wq = NULL; +} + +static inline +void compat_synchronize_threaded_irq(struct compat_threaded_irq *comp) +{ + synchronize_irq(comp->irq); + cancel_work_sync(&comp->work); +} + + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)) */ + +#endif /* LINUX_26_31_COMPAT_H */ diff --git a/include/linux/compat-2.6.32.h b/include/linux/compat-2.6.32.h new file mode 100644 index 0000000..2f5013f --- /dev/null +++ b/include/linux/compat-2.6.32.h @@ -0,0 +1,97 @@ +#ifndef LINUX_26_32_COMPAT_H +#define LINUX_26_32_COMPAT_H + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)) + +#include +#include +#include +#include +#include + +#define SDIO_VENDOR_ID_INTEL 0x0089 +#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX 0x1402 +#define SDIO_DEVICE_ID_INTEL_IWMC3200WIFI 0x1403 +#define SDIO_DEVICE_ID_INTEL_IWMC3200TOP 0x1404 +#define SDIO_DEVICE_ID_INTEL_IWMC3200GPS 0x1405 +#define SDIO_DEVICE_ID_INTEL_IWMC3200BT 0x1406 + +static inline void flush_delayed_work(struct delayed_work *dwork) +{ + if (del_timer_sync(&dwork->timer)) { + /* + * This is what would happen on 2.6.32 but since we don't have + * access to the singlethread_cpu we can't really backport this, + * so avoid really *flush*ing the work... Oh well. Any better ideas? + + struct cpu_workqueue_struct *cwq; + cwq = wq_per_cpu(keventd_wq, get_cpu()); + __queue_work(cwq, &dwork->work); + put_cpu(); + + */ + } + flush_work(&dwork->work); +} + +/* + * struct genl_multicast_group was made netns aware through + * patch "genetlink: make netns aware" by johannes, we just + * force this to always use the default init_net + */ +#define genl_info_net(x) &init_net +/* Just use init_net for older kernels */ +#define get_net_ns_by_pid(x) &init_net + +/* net namespace is lost */ +#define genlmsg_multicast_netns(a, b, c, d, e) genlmsg_multicast(b, c, d, e) +#define genlmsg_multicast_allns(a, b, c, d) genlmsg_multicast(a, b, c, d) +#define genlmsg_unicast(net, skb, pid) genlmsg_unicast(skb, pid) + +#define dev_change_net_namespace(a, b, c) (-EOPNOTSUPP) + +#define SET_NETDEV_DEVTYPE(netdev, type) + +#ifdef __KERNEL__ +/* Driver transmit return codes */ +enum netdev_tx { + BACKPORT_NETDEV_TX_OK = NETDEV_TX_OK, /* driver took care of packet */ + BACKPORT_NETDEV_TX_BUSY = NETDEV_TX_BUSY, /* driver tx path was busy*/ + BACKPORT_NETDEV_TX_LOCKED = NETDEV_TX_LOCKED, /* driver tx lock was already taken */ +}; +typedef enum netdev_tx netdev_tx_t; +#endif /* __KERNEL__ */ + +/* + * dev_pm_ops is only available on kernels >= 2.6.29, for + * older kernels we rely on reverting the work to old + * power management style stuff. + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) +/* + * Use this if you want to use the same suspend and resume callbacks for suspend + * to RAM and hibernation. + */ +#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \ +struct dev_pm_ops name = { \ + .suspend = suspend_fn, \ + .resume = resume_fn, \ + .freeze = suspend_fn, \ + .thaw = resume_fn, \ + .poweroff = suspend_fn, \ + .restore = resume_fn, \ +} +#else +#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) +#endif /* >= 2.6.29 */ + +#define wireless_send_event(a, b, c, d) wireless_send_event(a, b, c, (char * ) d) + +/* The export symbol in changed in compat/patches/15-symbol-export-conflicts.patch */ +#define ieee80211_rx(hw, skb) mac80211_ieee80211_rx(hw, skb) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)) */ + +#endif /* LINUX_26_32_COMPAT_H */ diff --git a/include/linux/compat-2.6.33.h b/include/linux/compat-2.6.33.h new file mode 100644 index 0000000..70e5f98 --- /dev/null +++ b/include/linux/compat-2.6.33.h @@ -0,0 +1,108 @@ +#ifndef LINUX_26_33_COMPAT_H +#define LINUX_26_33_COMPAT_H + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33)) + +#include +#if defined(CONFIG_PCCARD) || defined(CONFIG_PCCARD_MODULE) +#if defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE) +#include +#include +#include +#endif +#endif +#include +#include + +#define release_firmware compat_release_firmware +#define request_firmware compat_request_firmware +#define request_firmware_nowait compat_request_firmware_nowait + +#if defined(CONFIG_FW_LOADER) || defined(CONFIG_FW_LOADER_MODULE) +int compat_request_firmware(const struct firmware **fw, const char *name, + struct device *device); +int compat_request_firmware_nowait( + struct module *module, int uevent, + const char *name, struct device *device, gfp_t gfp, void *context, + void (*cont)(const struct firmware *fw, void *context)); + +void compat_release_firmware(const struct firmware *fw); +#else +static inline int compat_request_firmware(const struct firmware **fw, + const char *name, + struct device *device) +{ + return -EINVAL; +} +static inline int request_firmware_nowait( + struct module *module, int uevent, + const char *name, struct device *device, gfp_t gfp, void *context, + void (*cont)(const struct firmware *fw, void *context)) +{ + return -EINVAL; +} + +static inline void compat_release_firmware(const struct firmware *fw) +{ +} +#endif + +#define KEY_RFKILL 247 /* Key that controls all radios */ + +#define IFF_DONT_BRIDGE 0x800 /* disallow bridging this ether dev */ +/* source: include/linux/if.h */ + +#define kparam_block_sysfs_write(a) +#define kparam_unblock_sysfs_write(a) + +/* this will never happen on older kernels */ +#define NETDEV_POST_INIT 0xffff + +static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, + unsigned int length) +{ + struct sk_buff *skb = netdev_alloc_skb(dev, length + NET_IP_ALIGN); + + if (NET_IP_ALIGN && skb) + skb_reserve(skb, NET_IP_ALIGN); + return skb; +} + +#if defined(CONFIG_PCCARD) || defined(CONFIG_PCCARD_MODULE) + +#if defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE) + +#define pcmcia_request_window(a, b, c) pcmcia_request_window(&a, b, c) + +#define pcmcia_map_mem_page(a, b, c) pcmcia_map_mem_page(b, c) + +/* loop over CIS entries */ +int pcmcia_loop_tuple(struct pcmcia_device *p_dev, cisdata_t code, + int (*loop_tuple) (struct pcmcia_device *p_dev, + tuple_t *tuple, + void *priv_data), + void *priv_data); + +#endif /* CONFIG_PCMCIA */ + +/* loop over CIS entries */ +int pccard_loop_tuple(struct pcmcia_socket *s, unsigned int function, + cisdata_t code, cisparse_t *parse, void *priv_data, + int (*loop_tuple) (tuple_t *tuple, + cisparse_t *parse, + void *priv_data)); + +#endif /* CONFIG_PCCARD */ + +/* Backport for kfifo + * kfifo_alloc and kfifo_free must be backported manually + */ +#define kfifo_in(a, b, c) __kfifo_put(*a, b, c) +#define kfifo_out(a, b, c) __kfifo_get(*a, b, c) +#define kfifo_len(a) __kfifo_len(*a) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33)) */ + +#endif /* LINUX_26_33_COMPAT_H */ diff --git a/include/linux/compat-2.6.34.h b/include/linux/compat-2.6.34.h new file mode 100644 index 0000000..d189bee --- /dev/null +++ b/include/linux/compat-2.6.34.h @@ -0,0 +1,160 @@ +#ifndef LINUX_26_34_COMPAT_H +#define LINUX_26_34_COMPAT_H + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34)) + +#include + +#define netdev_mc_count(dev) ((dev)->mc_count) +#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0) + +#define netdev_for_each_mc_addr(mclist, dev) \ + for (mclist = dev->mc_list; mclist; mclist = mclist->next) +/* source: include/linux/netdevice.h */ + + +/* Logging, debugging and troubleshooting/diagnostic helpers. */ + +/* netdev_printk helpers, similar to dev_printk */ + +static inline const char *netdev_name(const struct net_device *dev) +{ + if (dev->reg_state != NETREG_REGISTERED) + return "(unregistered net_device)"; + return dev->name; +} + +#define netdev_printk(level, netdev, format, args...) \ + dev_printk(level, (netdev)->dev.parent, \ + "%s: " format, \ + netdev_name(netdev), ##args) + +#define netdev_emerg(dev, format, args...) \ + netdev_printk(KERN_EMERG, dev, format, ##args) +#define netdev_alert(dev, format, args...) \ + netdev_printk(KERN_ALERT, dev, format, ##args) +#define netdev_crit(dev, format, args...) \ + netdev_printk(KERN_CRIT, dev, format, ##args) +#define netdev_err(dev, format, args...) \ + netdev_printk(KERN_ERR, dev, format, ##args) +#define netdev_warn(dev, format, args...) \ + netdev_printk(KERN_WARNING, dev, format, ##args) +#define netdev_notice(dev, format, args...) \ + netdev_printk(KERN_NOTICE, dev, format, ##args) +#define netdev_info(dev, format, args...) \ + netdev_printk(KERN_INFO, dev, format, ##args) + +#if defined(DEBUG) +#define netdev_dbg(__dev, format, args...) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args) +#elif defined(CONFIG_DYNAMIC_DEBUG) +#define netdev_dbg(__dev, format, args...) \ +do { \ + dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \ + netdev_name(__dev), ##args); \ +} while (0) +#else +#define netdev_dbg(__dev, format, args...) \ +({ \ + if (0) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args); \ + 0; \ +}) +#endif + +#if defined(VERBOSE_DEBUG) +#define netdev_vdbg netdev_dbg +#else + +#define netdev_vdbg(dev, format, args...) \ +({ \ + if (0) \ + netdev_printk(KERN_DEBUG, dev, format, ##args); \ + 0; \ +}) +#endif + +/* + * netdev_WARN() acts like dev_printk(), but with the key difference + * of using a WARN/WARN_ON to get the message out, including the + * file/line information and a backtrace. + */ +#define netdev_WARN(dev, format, args...) \ + WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args); + +/* netif printk helpers, similar to netdev_printk */ + +#define netif_printk(priv, type, level, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_printk(level, (dev), fmt, ##args); \ +} while (0) + +#define netif_emerg(priv, type, dev, fmt, args...) \ + netif_printk(priv, type, KERN_EMERG, dev, fmt, ##args) +#define netif_alert(priv, type, dev, fmt, args...) \ + netif_printk(priv, type, KERN_ALERT, dev, fmt, ##args) +#define netif_crit(priv, type, dev, fmt, args...) \ + netif_printk(priv, type, KERN_CRIT, dev, fmt, ##args) +#define netif_err(priv, type, dev, fmt, args...) \ + netif_printk(priv, type, KERN_ERR, dev, fmt, ##args) +#define netif_warn(priv, type, dev, fmt, args...) \ + netif_printk(priv, type, KERN_WARNING, dev, fmt, ##args) +#define netif_notice(priv, type, dev, fmt, args...) \ + netif_printk(priv, type, KERN_NOTICE, dev, fmt, ##args) +#define netif_info(priv, type, dev, fmt, args...) \ + netif_printk(priv, type, KERN_INFO, (dev), fmt, ##args) + +#if defined(DEBUG) +#define netif_dbg(priv, type, dev, format, args...) \ + netif_printk(priv, type, KERN_DEBUG, dev, format, ##args) +#elif defined(CONFIG_DYNAMIC_DEBUG) +#define netif_dbg(priv, type, netdev, format, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + dynamic_dev_dbg((netdev)->dev.parent, \ + "%s: " format, \ + netdev_name(netdev), ##args); \ +} while (0) +#else +#define netif_dbg(priv, type, dev, format, args...) \ +({ \ + if (0) \ + netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ + 0; \ +}) +#endif + +#if defined(VERBOSE_DEBUG) +#define netif_vdbg netdev_dbg +#else +#define netif_vdbg(priv, type, dev, format, args...) \ +({ \ + if (0) \ + netif_printk(KERN_DEBUG, dev, format, ##args); \ + 0; \ +}) +#endif +/* source: include/linux/netdevice.h */ + + +static inline void device_lock(struct device *dev) +{ + down(&dev->sem); +} + +static inline int device_trylock(struct device *dev) +{ + return down_trylock(&dev->sem); +} + +static inline void device_unlock(struct device *dev) +{ + up(&dev->sem); +} + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34)) */ + +#endif /* LINUX_26_34_COMPAT_H */ diff --git a/include/linux/compat-2.6.h b/include/linux/compat-2.6.h new file mode 100644 index 0000000..ceca997 --- /dev/null +++ b/include/linux/compat-2.6.h @@ -0,0 +1,31 @@ +#ifndef LINUX_26_COMPAT_H +#define LINUX_26_COMPAT_H + +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)) +#include +#else +#include +#endif +#include + +/* + * Each compat file represents compatibility code for new kernel + * code introduced for *that* kernel revision. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#endif /* LINUX_26_COMPAT_H */ diff --git a/include/linux/eeprom_93cx6.h b/include/linux/eeprom_93cx6.h new file mode 100644 index 0000000..a55c873 --- /dev/null +++ b/include/linux/eeprom_93cx6.h @@ -0,0 +1,73 @@ +/* + Copyright (C) 2004 - 2006 rt2x00 SourceForge Project + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: eeprom_93cx6 + Abstract: EEPROM reader datastructures for 93cx6 chipsets. + Supported chipsets: 93c46, 93c56 and 93c66. + */ + +/* + * EEPROM operation defines. + */ +#define PCI_EEPROM_WIDTH_93C46 6 +#define PCI_EEPROM_WIDTH_93C56 8 +#define PCI_EEPROM_WIDTH_93C66 8 +#define PCI_EEPROM_WIDTH_OPCODE 3 +#define PCI_EEPROM_WRITE_OPCODE 0x05 +#define PCI_EEPROM_READ_OPCODE 0x06 +#define PCI_EEPROM_EWDS_OPCODE 0x10 +#define PCI_EEPROM_EWEN_OPCODE 0x13 + +/** + * struct eeprom_93cx6 - control structure for setting the commands + * for reading the eeprom data. + * @data: private pointer for the driver. + * @register_read(struct eeprom_93cx6 *eeprom): handler to + * read the eeprom register, this function should set all reg_* fields. + * @register_write(struct eeprom_93cx6 *eeprom): handler to + * write to the eeprom register by using all reg_* fields. + * @width: eeprom width, should be one of the PCI_EEPROM_WIDTH_* defines + * @reg_data_in: register field to indicate data input + * @reg_data_out: register field to indicate data output + * @reg_data_clock: register field to set the data clock + * @reg_chip_select: register field to set the chip select + * + * This structure is used for the communication between the driver + * and the eeprom_93cx6 handlers for reading the eeprom. + */ +struct eeprom_93cx6 { + void *data; + + void (*register_read)(struct eeprom_93cx6 *eeprom); + void (*register_write)(struct eeprom_93cx6 *eeprom); + + int width; + + char reg_data_in; + char reg_data_out; + char reg_data_clock; + char reg_chip_select; +}; + +extern void eeprom_93cx6_read(struct eeprom_93cx6 *eeprom, + const u8 word, u16 *data); +extern void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, + const u8 word, __le16 *data, const u16 words); diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h new file mode 100644 index 0000000..1998495 --- /dev/null +++ b/include/linux/ieee80211.h @@ -0,0 +1,1597 @@ +/* + * IEEE 802.11 defines + * + * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen + * + * Copyright (c) 2002-2003, Jouni Malinen + * Copyright (c) 2005, Devicescape Software, Inc. + * Copyright (c) 2006, Michael Wu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef LINUX_IEEE80211_H +#define LINUX_IEEE80211_H + +#include +#include + +/* + * DS bit usage + * + * TA = transmitter address + * RA = receiver address + * DA = destination address + * SA = source address + * + * ToDS FromDS A1(RA) A2(TA) A3 A4 Use + * ----------------------------------------------------------------- + * 0 0 DA SA BSSID - IBSS/DLS + * 0 1 DA BSSID SA - AP -> STA + * 1 0 BSSID SA DA - AP <- STA + * 1 1 RA TA DA SA unspecified (WDS) + */ + +#define FCS_LEN 4 + +#define IEEE80211_FCTL_VERS 0x0003 +#define IEEE80211_FCTL_FTYPE 0x000c +#define IEEE80211_FCTL_STYPE 0x00f0 +#define IEEE80211_FCTL_TODS 0x0100 +#define IEEE80211_FCTL_FROMDS 0x0200 +#define IEEE80211_FCTL_MOREFRAGS 0x0400 +#define IEEE80211_FCTL_RETRY 0x0800 +#define IEEE80211_FCTL_PM 0x1000 +#define IEEE80211_FCTL_MOREDATA 0x2000 +#define IEEE80211_FCTL_PROTECTED 0x4000 +#define IEEE80211_FCTL_ORDER 0x8000 + +#define IEEE80211_SCTL_FRAG 0x000F +#define IEEE80211_SCTL_SEQ 0xFFF0 + +#define IEEE80211_FTYPE_MGMT 0x0000 +#define IEEE80211_FTYPE_CTL 0x0004 +#define IEEE80211_FTYPE_DATA 0x0008 + +/* management */ +#define IEEE80211_STYPE_ASSOC_REQ 0x0000 +#define IEEE80211_STYPE_ASSOC_RESP 0x0010 +#define IEEE80211_STYPE_REASSOC_REQ 0x0020 +#define IEEE80211_STYPE_REASSOC_RESP 0x0030 +#define IEEE80211_STYPE_PROBE_REQ 0x0040 +#define IEEE80211_STYPE_PROBE_RESP 0x0050 +#define IEEE80211_STYPE_BEACON 0x0080 +#define IEEE80211_STYPE_ATIM 0x0090 +#define IEEE80211_STYPE_DISASSOC 0x00A0 +#define IEEE80211_STYPE_AUTH 0x00B0 +#define IEEE80211_STYPE_DEAUTH 0x00C0 +#define IEEE80211_STYPE_ACTION 0x00D0 + +/* control */ +#define IEEE80211_STYPE_BACK_REQ 0x0080 +#define IEEE80211_STYPE_BACK 0x0090 +#define IEEE80211_STYPE_PSPOLL 0x00A0 +#define IEEE80211_STYPE_RTS 0x00B0 +#define IEEE80211_STYPE_CTS 0x00C0 +#define IEEE80211_STYPE_ACK 0x00D0 +#define IEEE80211_STYPE_CFEND 0x00E0 +#define IEEE80211_STYPE_CFENDACK 0x00F0 + +/* data */ +#define IEEE80211_STYPE_DATA 0x0000 +#define IEEE80211_STYPE_DATA_CFACK 0x0010 +#define IEEE80211_STYPE_DATA_CFPOLL 0x0020 +#define IEEE80211_STYPE_DATA_CFACKPOLL 0x0030 +#define IEEE80211_STYPE_NULLFUNC 0x0040 +#define IEEE80211_STYPE_CFACK 0x0050 +#define IEEE80211_STYPE_CFPOLL 0x0060 +#define IEEE80211_STYPE_CFACKPOLL 0x0070 +#define IEEE80211_STYPE_QOS_DATA 0x0080 +#define IEEE80211_STYPE_QOS_DATA_CFACK 0x0090 +#define IEEE80211_STYPE_QOS_DATA_CFPOLL 0x00A0 +#define IEEE80211_STYPE_QOS_DATA_CFACKPOLL 0x00B0 +#define IEEE80211_STYPE_QOS_NULLFUNC 0x00C0 +#define IEEE80211_STYPE_QOS_CFACK 0x00D0 +#define IEEE80211_STYPE_QOS_CFPOLL 0x00E0 +#define IEEE80211_STYPE_QOS_CFACKPOLL 0x00F0 + + +/* miscellaneous IEEE 802.11 constants */ +#define IEEE80211_MAX_FRAG_THRESHOLD 2352 +#define IEEE80211_MAX_RTS_THRESHOLD 2353 +#define IEEE80211_MAX_AID 2007 +#define IEEE80211_MAX_TIM_LEN 251 +/* Maximum size for the MA-UNITDATA primitive, 802.11 standard section + 6.2.1.1.2. + + 802.11e clarifies the figure in section 7.1.2. The frame body is + up to 2304 octets long (maximum MSDU size) plus any crypt overhead. */ +#define IEEE80211_MAX_DATA_LEN 2304 +/* 30 byte 4 addr hdr, 2 byte QoS, 2304 byte MSDU, 12 byte crypt, 4 byte FCS */ +#define IEEE80211_MAX_FRAME_LEN 2352 + +#define IEEE80211_MAX_SSID_LEN 32 + +#define IEEE80211_MAX_MESH_ID_LEN 32 + +#define IEEE80211_QOS_CTL_LEN 2 +#define IEEE80211_QOS_CTL_TID_MASK 0x000F +#define IEEE80211_QOS_CTL_TAG1D_MASK 0x0007 + +/* U-APSD queue for WMM IEs sent by AP */ +#define IEEE80211_WMM_IE_AP_QOSINFO_UAPSD (1<<7) + +/* U-APSD queues for WMM IEs sent by STA */ +#define IEEE80211_WMM_IE_STA_QOSINFO_AC_VO (1<<0) +#define IEEE80211_WMM_IE_STA_QOSINFO_AC_VI (1<<1) +#define IEEE80211_WMM_IE_STA_QOSINFO_AC_BK (1<<2) +#define IEEE80211_WMM_IE_STA_QOSINFO_AC_BE (1<<3) +#define IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK 0x0f + +/* U-APSD max SP length for WMM IEs sent by STA */ +#define IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL 0x00 +#define IEEE80211_WMM_IE_STA_QOSINFO_SP_2 0x01 +#define IEEE80211_WMM_IE_STA_QOSINFO_SP_4 0x02 +#define IEEE80211_WMM_IE_STA_QOSINFO_SP_6 0x03 +#define IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK 0x03 +#define IEEE80211_WMM_IE_STA_QOSINFO_SP_SHIFT 5 + +#define IEEE80211_HT_CTL_LEN 4 + +struct ieee80211_hdr { + __le16 frame_control; + __le16 duration_id; + u8 addr1[6]; + u8 addr2[6]; + u8 addr3[6]; + __le16 seq_ctrl; + u8 addr4[6]; +} __attribute__ ((packed)); + +struct ieee80211_hdr_3addr { + __le16 frame_control; + __le16 duration_id; + u8 addr1[6]; + u8 addr2[6]; + u8 addr3[6]; + __le16 seq_ctrl; +} __attribute__ ((packed)); + +struct ieee80211_qos_hdr { + __le16 frame_control; + __le16 duration_id; + u8 addr1[6]; + u8 addr2[6]; + u8 addr3[6]; + __le16 seq_ctrl; + __le16 qos_ctrl; +} __attribute__ ((packed)); + +/** + * ieee80211_has_tods - check if IEEE80211_FCTL_TODS is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_has_tods(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_TODS)) != 0; +} + +/** + * ieee80211_has_fromds - check if IEEE80211_FCTL_FROMDS is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_has_fromds(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FROMDS)) != 0; +} + +/** + * ieee80211_has_a4 - check if IEEE80211_FCTL_TODS and IEEE80211_FCTL_FROMDS are set + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_has_a4(__le16 fc) +{ + __le16 tmp = cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS); + return (fc & tmp) == tmp; +} + +/** + * ieee80211_has_morefrags - check if IEEE80211_FCTL_MOREFRAGS is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_has_morefrags(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) != 0; +} + +/** + * ieee80211_has_retry - check if IEEE80211_FCTL_RETRY is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_has_retry(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_RETRY)) != 0; +} + +/** + * ieee80211_has_pm - check if IEEE80211_FCTL_PM is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_has_pm(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_PM)) != 0; +} + +/** + * ieee80211_has_moredata - check if IEEE80211_FCTL_MOREDATA is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_has_moredata(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_MOREDATA)) != 0; +} + +/** + * ieee80211_has_protected - check if IEEE80211_FCTL_PROTECTED is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_has_protected(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_PROTECTED)) != 0; +} + +/** + * ieee80211_has_order - check if IEEE80211_FCTL_ORDER is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_has_order(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_ORDER)) != 0; +} + +/** + * ieee80211_is_mgmt - check if type is IEEE80211_FTYPE_MGMT + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_mgmt(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT); +} + +/** + * ieee80211_is_ctl - check if type is IEEE80211_FTYPE_CTL + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_ctl(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL); +} + +/** + * ieee80211_is_data - check if type is IEEE80211_FTYPE_DATA + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_data(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) == + cpu_to_le16(IEEE80211_FTYPE_DATA); +} + +/** + * ieee80211_is_data_qos - check if type is IEEE80211_FTYPE_DATA and IEEE80211_STYPE_QOS_DATA is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_data_qos(__le16 fc) +{ + /* + * mask with QOS_DATA rather than IEEE80211_FCTL_STYPE as we just need + * to check the one bit + */ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_STYPE_QOS_DATA)) == + cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA); +} + +/** + * ieee80211_is_data_present - check if type is IEEE80211_FTYPE_DATA and has data + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_data_present(__le16 fc) +{ + /* + * mask with 0x40 and test that that bit is clear to only return true + * for the data-containing substypes. + */ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | 0x40)) == + cpu_to_le16(IEEE80211_FTYPE_DATA); +} + +/** + * ieee80211_is_assoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_REQ + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_assoc_req(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ASSOC_REQ); +} + +/** + * ieee80211_is_assoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_RESP + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_assoc_resp(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ASSOC_RESP); +} + +/** + * ieee80211_is_reassoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_REQ + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_reassoc_req(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_REASSOC_REQ); +} + +/** + * ieee80211_is_reassoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_RESP + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_reassoc_resp(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_REASSOC_RESP); +} + +/** + * ieee80211_is_probe_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_REQ + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_probe_req(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ); +} + +/** + * ieee80211_is_probe_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_RESP + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_probe_resp(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_RESP); +} + +/** + * ieee80211_is_beacon - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_BEACON + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_beacon(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON); +} + +/** + * ieee80211_is_atim - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ATIM + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_atim(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ATIM); +} + +/** + * ieee80211_is_disassoc - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DISASSOC + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_disassoc(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DISASSOC); +} + +/** + * ieee80211_is_auth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_AUTH + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_auth(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH); +} + +/** + * ieee80211_is_deauth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DEAUTH + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_deauth(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DEAUTH); +} + +/** + * ieee80211_is_action - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ACTION + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_action(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); +} + +/** + * ieee80211_is_back_req - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK_REQ + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_back_req(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ); +} + +/** + * ieee80211_is_back - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_back(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK); +} + +/** + * ieee80211_is_pspoll - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_PSPOLL + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_pspoll(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL); +} + +/** + * ieee80211_is_rts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_RTS + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_rts(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS); +} + +/** + * ieee80211_is_cts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CTS + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_cts(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS); +} + +/** + * ieee80211_is_ack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_ACK + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_ack(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_ACK); +} + +/** + * ieee80211_is_cfend - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFEND + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_cfend(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CFEND); +} + +/** + * ieee80211_is_cfendack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFENDACK + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_cfendack(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CFENDACK); +} + +/** + * ieee80211_is_nullfunc - check if frame is a regular (non-QoS) nullfunc frame + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_nullfunc(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC); +} + +/** + * ieee80211_is_qos_nullfunc - check if frame is a QoS nullfunc frame + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee80211_is_qos_nullfunc(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC); +} + +struct ieee80211s_hdr { + u8 flags; + u8 ttl; + __le32 seqnum; + u8 eaddr1[6]; + u8 eaddr2[6]; + u8 eaddr3[6]; +} __attribute__ ((packed)); + +/* Mesh flags */ +#define MESH_FLAGS_AE_A4 0x1 +#define MESH_FLAGS_AE_A5_A6 0x2 +#define MESH_FLAGS_AE 0x3 +#define MESH_FLAGS_PS_DEEP 0x4 + +/** + * struct ieee80211_quiet_ie + * + * This structure refers to "Quiet information element" + */ +struct ieee80211_quiet_ie { + u8 count; + u8 period; + __le16 duration; + __le16 offset; +} __attribute__ ((packed)); + +/** + * struct ieee80211_msrment_ie + * + * This structure refers to "Measurement Request/Report information element" + */ +struct ieee80211_msrment_ie { + u8 token; + u8 mode; + u8 type; + u8 request[0]; +} __attribute__ ((packed)); + +/** + * struct ieee80211_channel_sw_ie + * + * This structure refers to "Channel Switch Announcement information element" + */ +struct ieee80211_channel_sw_ie { + u8 mode; + u8 new_ch_num; + u8 count; +} __attribute__ ((packed)); + +/** + * struct ieee80211_tim + * + * This structure refers to "Traffic Indication Map information element" + */ +struct ieee80211_tim_ie { + u8 dtim_count; + u8 dtim_period; + u8 bitmap_ctrl; + /* variable size: 1 - 251 bytes */ + u8 virtual_map[1]; +} __attribute__ ((packed)); + +/** + * struct ieee80211_meshconf_ie + * + * This structure refers to "Mesh Configuration information element" + */ +struct ieee80211_meshconf_ie { + u8 meshconf_psel; + u8 meshconf_pmetric; + u8 meshconf_congest; + u8 meshconf_synch; + u8 meshconf_auth; + u8 meshconf_form; + u8 meshconf_cap; +} __attribute__ ((packed)); + +/** + * struct ieee80211_rann_ie + * + * This structure refers to "Root Announcement information element" + */ +struct ieee80211_rann_ie { + u8 rann_flags; + u8 rann_hopcount; + u8 rann_ttl; + u8 rann_addr[6]; + u32 rann_seq; + u32 rann_metric; +} __attribute__ ((packed)); + +#define WLAN_SA_QUERY_TR_ID_LEN 2 + +struct ieee80211_mgmt { + __le16 frame_control; + __le16 duration; + u8 da[6]; + u8 sa[6]; + u8 bssid[6]; + __le16 seq_ctrl; + union { + struct { + __le16 auth_alg; + __le16 auth_transaction; + __le16 status_code; + /* possibly followed by Challenge text */ + u8 variable[0]; + } __attribute__ ((packed)) auth; + struct { + __le16 reason_code; + } __attribute__ ((packed)) deauth; + struct { + __le16 capab_info; + __le16 listen_interval; + /* followed by SSID and Supported rates */ + u8 variable[0]; + } __attribute__ ((packed)) assoc_req; + struct { + __le16 capab_info; + __le16 status_code; + __le16 aid; + /* followed by Supported rates */ + u8 variable[0]; + } __attribute__ ((packed)) assoc_resp, reassoc_resp; + struct { + __le16 capab_info; + __le16 listen_interval; + u8 current_ap[6]; + /* followed by SSID and Supported rates */ + u8 variable[0]; + } __attribute__ ((packed)) reassoc_req; + struct { + __le16 reason_code; + } __attribute__ ((packed)) disassoc; + struct { + __le64 timestamp; + __le16 beacon_int; + __le16 capab_info; + /* followed by some of SSID, Supported rates, + * FH Params, DS Params, CF Params, IBSS Params, TIM */ + u8 variable[0]; + } __attribute__ ((packed)) beacon; + struct { + /* only variable items: SSID, Supported rates */ + u8 variable[0]; + } __attribute__ ((packed)) probe_req; + struct { + __le64 timestamp; + __le16 beacon_int; + __le16 capab_info; + /* followed by some of SSID, Supported rates, + * FH Params, DS Params, CF Params, IBSS Params */ + u8 variable[0]; + } __attribute__ ((packed)) probe_resp; + struct { + u8 category; + union { + struct { + u8 action_code; + u8 dialog_token; + u8 status_code; + u8 variable[0]; + } __attribute__ ((packed)) wme_action; + struct{ + u8 action_code; + u8 element_id; + u8 length; + struct ieee80211_channel_sw_ie sw_elem; + } __attribute__((packed)) chan_switch; + struct{ + u8 action_code; + u8 dialog_token; + u8 element_id; + u8 length; + struct ieee80211_msrment_ie msr_elem; + } __attribute__((packed)) measurement; + struct{ + u8 action_code; + u8 dialog_token; + __le16 capab; + __le16 timeout; + __le16 start_seq_num; + } __attribute__((packed)) addba_req; + struct{ + u8 action_code; + u8 dialog_token; + __le16 status; + __le16 capab; + __le16 timeout; + } __attribute__((packed)) addba_resp; + struct{ + u8 action_code; + __le16 params; + __le16 reason_code; + } __attribute__((packed)) delba; + struct{ + u8 action_code; + /* capab_info for open and confirm, + * reason for close + */ + __le16 aux; + /* Followed in plink_confirm by status + * code, AID and supported rates, + * and directly by supported rates in + * plink_open and plink_close + */ + u8 variable[0]; + } __attribute__((packed)) plink_action; + struct{ + u8 action_code; + u8 variable[0]; + } __attribute__((packed)) mesh_action; + struct { + u8 action; + u8 trans_id[WLAN_SA_QUERY_TR_ID_LEN]; + } __attribute__ ((packed)) sa_query; + struct { + u8 action; + u8 smps_control; + } __attribute__ ((packed)) ht_smps; + } u; + } __attribute__ ((packed)) action; + } u; +} __attribute__ ((packed)); + +/* mgmt header + 1 byte category code */ +#define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u) + + +/* Management MIC information element (IEEE 802.11w) */ +struct ieee80211_mmie { + u8 element_id; + u8 length; + __le16 key_id; + u8 sequence_number[6]; + u8 mic[8]; +} __attribute__ ((packed)); + +/* Control frames */ +struct ieee80211_rts { + __le16 frame_control; + __le16 duration; + u8 ra[6]; + u8 ta[6]; +} __attribute__ ((packed)); + +struct ieee80211_cts { + __le16 frame_control; + __le16 duration; + u8 ra[6]; +} __attribute__ ((packed)); + +struct ieee80211_pspoll { + __le16 frame_control; + __le16 aid; + u8 bssid[6]; + u8 ta[6]; +} __attribute__ ((packed)); + +/** + * struct ieee80211_bar - HT Block Ack Request + * + * This structure refers to "HT BlockAckReq" as + * described in 802.11n draft section 7.2.1.7.1 + */ +struct ieee80211_bar { + __le16 frame_control; + __le16 duration; + __u8 ra[6]; + __u8 ta[6]; + __le16 control; + __le16 start_seq_num; +} __attribute__((packed)); + +/* 802.11 BAR control masks */ +#define IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL 0x0000 +#define IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA 0x0004 + + +#define IEEE80211_HT_MCS_MASK_LEN 10 + +/** + * struct ieee80211_mcs_info - MCS information + * @rx_mask: RX mask + * @rx_highest: highest supported RX rate. If set represents + * the highest supported RX data rate in units of 1 Mbps. + * If this field is 0 this value should not be used to + * consider the highest RX data rate supported. + * @tx_params: TX parameters + */ +struct ieee80211_mcs_info { + u8 rx_mask[IEEE80211_HT_MCS_MASK_LEN]; + __le16 rx_highest; + u8 tx_params; + u8 reserved[3]; +} __attribute__((packed)); + +/* 802.11n HT capability MSC set */ +#define IEEE80211_HT_MCS_RX_HIGHEST_MASK 0x3ff +#define IEEE80211_HT_MCS_TX_DEFINED 0x01 +#define IEEE80211_HT_MCS_TX_RX_DIFF 0x02 +/* value 0 == 1 stream etc */ +#define IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK 0x0C +#define IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT 2 +#define IEEE80211_HT_MCS_TX_MAX_STREAMS 4 +#define IEEE80211_HT_MCS_TX_UNEQUAL_MODULATION 0x10 + +/* + * 802.11n D5.0 20.3.5 / 20.6 says: + * - indices 0 to 7 and 32 are single spatial stream + * - 8 to 31 are multiple spatial streams using equal modulation + * [8..15 for two streams, 16..23 for three and 24..31 for four] + * - remainder are multiple spatial streams using unequal modulation + */ +#define IEEE80211_HT_MCS_UNEQUAL_MODULATION_START 33 +#define IEEE80211_HT_MCS_UNEQUAL_MODULATION_START_BYTE \ + (IEEE80211_HT_MCS_UNEQUAL_MODULATION_START / 8) + +/** + * struct ieee80211_ht_cap - HT capabilities + * + * This structure is the "HT capabilities element" as + * described in 802.11n D5.0 7.3.2.57 + */ +struct ieee80211_ht_cap { + __le16 cap_info; + u8 ampdu_params_info; + + /* 16 bytes MCS information */ + struct ieee80211_mcs_info mcs; + + __le16 extended_ht_cap_info; + __le32 tx_BF_cap_info; + u8 antenna_selection_info; +} __attribute__ ((packed)); + +/* 802.11n HT capabilities masks (for cap_info) */ +#define IEEE80211_HT_CAP_LDPC_CODING 0x0001 +#define IEEE80211_HT_CAP_SUP_WIDTH_20_40 0x0002 +#define IEEE80211_HT_CAP_SM_PS 0x000C +#define IEEE80211_HT_CAP_SM_PS_SHIFT 2 +#define IEEE80211_HT_CAP_GRN_FLD 0x0010 +#define IEEE80211_HT_CAP_SGI_20 0x0020 +#define IEEE80211_HT_CAP_SGI_40 0x0040 +#define IEEE80211_HT_CAP_TX_STBC 0x0080 +#define IEEE80211_HT_CAP_RX_STBC 0x0300 +#define IEEE80211_HT_CAP_DELAY_BA 0x0400 +#define IEEE80211_HT_CAP_MAX_AMSDU 0x0800 +#define IEEE80211_HT_CAP_DSSSCCK40 0x1000 +#define IEEE80211_HT_CAP_RESERVED 0x2000 +#define IEEE80211_HT_CAP_40MHZ_INTOLERANT 0x4000 +#define IEEE80211_HT_CAP_LSIG_TXOP_PROT 0x8000 + +/* 802.11n HT capability AMPDU settings (for ampdu_params_info) */ +#define IEEE80211_HT_AMPDU_PARM_FACTOR 0x03 +#define IEEE80211_HT_AMPDU_PARM_DENSITY 0x1C +#define IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT 2 + +/* + * Maximum length of AMPDU that the STA can receive. + * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets) + */ +enum ieee80211_max_ampdu_length_exp { + IEEE80211_HT_MAX_AMPDU_8K = 0, + IEEE80211_HT_MAX_AMPDU_16K = 1, + IEEE80211_HT_MAX_AMPDU_32K = 2, + IEEE80211_HT_MAX_AMPDU_64K = 3 +}; + +#define IEEE80211_HT_MAX_AMPDU_FACTOR 13 + +/* Minimum MPDU start spacing */ +enum ieee80211_min_mpdu_spacing { + IEEE80211_HT_MPDU_DENSITY_NONE = 0, /* No restriction */ + IEEE80211_HT_MPDU_DENSITY_0_25 = 1, /* 1/4 usec */ + IEEE80211_HT_MPDU_DENSITY_0_5 = 2, /* 1/2 usec */ + IEEE80211_HT_MPDU_DENSITY_1 = 3, /* 1 usec */ + IEEE80211_HT_MPDU_DENSITY_2 = 4, /* 2 usec */ + IEEE80211_HT_MPDU_DENSITY_4 = 5, /* 4 usec */ + IEEE80211_HT_MPDU_DENSITY_8 = 6, /* 8 usec */ + IEEE80211_HT_MPDU_DENSITY_16 = 7 /* 16 usec */ +}; + +/** + * struct ieee80211_ht_info - HT information + * + * This structure is the "HT information element" as + * described in 802.11n D5.0 7.3.2.58 + */ +struct ieee80211_ht_info { + u8 control_chan; + u8 ht_param; + __le16 operation_mode; + __le16 stbc_param; + u8 basic_set[16]; +} __attribute__ ((packed)); + +/* for ht_param */ +#define IEEE80211_HT_PARAM_CHA_SEC_OFFSET 0x03 +#define IEEE80211_HT_PARAM_CHA_SEC_NONE 0x00 +#define IEEE80211_HT_PARAM_CHA_SEC_ABOVE 0x01 +#define IEEE80211_HT_PARAM_CHA_SEC_BELOW 0x03 +#define IEEE80211_HT_PARAM_CHAN_WIDTH_ANY 0x04 +#define IEEE80211_HT_PARAM_RIFS_MODE 0x08 +#define IEEE80211_HT_PARAM_SPSMP_SUPPORT 0x10 +#define IEEE80211_HT_PARAM_SERV_INTERVAL_GRAN 0xE0 + +/* for operation_mode */ +#define IEEE80211_HT_OP_MODE_PROTECTION 0x0003 +#define IEEE80211_HT_OP_MODE_PROTECTION_NONE 0 +#define IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER 1 +#define IEEE80211_HT_OP_MODE_PROTECTION_20MHZ 2 +#define IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED 3 +#define IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT 0x0004 +#define IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT 0x0010 + +/* for stbc_param */ +#define IEEE80211_HT_STBC_PARAM_DUAL_BEACON 0x0040 +#define IEEE80211_HT_STBC_PARAM_DUAL_CTS_PROT 0x0080 +#define IEEE80211_HT_STBC_PARAM_STBC_BEACON 0x0100 +#define IEEE80211_HT_STBC_PARAM_LSIG_TXOP_FULLPROT 0x0200 +#define IEEE80211_HT_STBC_PARAM_PCO_ACTIVE 0x0400 +#define IEEE80211_HT_STBC_PARAM_PCO_PHASE 0x0800 + + +/* block-ack parameters */ +#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002 +#define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C +#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFA0 +#define IEEE80211_DELBA_PARAM_TID_MASK 0xF000 +#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800 + +/* + * A-PMDU buffer sizes + * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2) + */ +#define IEEE80211_MIN_AMPDU_BUF 0x8 +#define IEEE80211_MAX_AMPDU_BUF 0x40 + + +/* Spatial Multiplexing Power Save Modes (for capability) */ +#define WLAN_HT_CAP_SM_PS_STATIC 0 +#define WLAN_HT_CAP_SM_PS_DYNAMIC 1 +#define WLAN_HT_CAP_SM_PS_INVALID 2 +#define WLAN_HT_CAP_SM_PS_DISABLED 3 + +/* for SM power control field lower two bits */ +#define WLAN_HT_SMPS_CONTROL_DISABLED 0 +#define WLAN_HT_SMPS_CONTROL_STATIC 1 +#define WLAN_HT_SMPS_CONTROL_DYNAMIC 3 + +/* Authentication algorithms */ +#define WLAN_AUTH_OPEN 0 +#define WLAN_AUTH_SHARED_KEY 1 +#define WLAN_AUTH_FT 2 +#define WLAN_AUTH_LEAP 128 + +#define WLAN_AUTH_CHALLENGE_LEN 128 + +#define WLAN_CAPABILITY_ESS (1<<0) +#define WLAN_CAPABILITY_IBSS (1<<1) +#define WLAN_CAPABILITY_CF_POLLABLE (1<<2) +#define WLAN_CAPABILITY_CF_POLL_REQUEST (1<<3) +#define WLAN_CAPABILITY_PRIVACY (1<<4) +#define WLAN_CAPABILITY_SHORT_PREAMBLE (1<<5) +#define WLAN_CAPABILITY_PBCC (1<<6) +#define WLAN_CAPABILITY_CHANNEL_AGILITY (1<<7) + +/* 802.11h */ +#define WLAN_CAPABILITY_SPECTRUM_MGMT (1<<8) +#define WLAN_CAPABILITY_QOS (1<<9) +#define WLAN_CAPABILITY_SHORT_SLOT_TIME (1<<10) +#define WLAN_CAPABILITY_DSSS_OFDM (1<<13) +/* measurement */ +#define IEEE80211_SPCT_MSR_RPRT_MODE_LATE (1<<0) +#define IEEE80211_SPCT_MSR_RPRT_MODE_INCAPABLE (1<<1) +#define IEEE80211_SPCT_MSR_RPRT_MODE_REFUSED (1<<2) + +#define IEEE80211_SPCT_MSR_RPRT_TYPE_BASIC 0 +#define IEEE80211_SPCT_MSR_RPRT_TYPE_CCA 1 +#define IEEE80211_SPCT_MSR_RPRT_TYPE_RPI 2 + + +/* 802.11g ERP information element */ +#define WLAN_ERP_NON_ERP_PRESENT (1<<0) +#define WLAN_ERP_USE_PROTECTION (1<<1) +#define WLAN_ERP_BARKER_PREAMBLE (1<<2) + +/* WLAN_ERP_BARKER_PREAMBLE values */ +enum { + WLAN_ERP_PREAMBLE_SHORT = 0, + WLAN_ERP_PREAMBLE_LONG = 1, +}; + +/* Status codes */ +enum ieee80211_statuscode { + WLAN_STATUS_SUCCESS = 0, + WLAN_STATUS_UNSPECIFIED_FAILURE = 1, + WLAN_STATUS_CAPS_UNSUPPORTED = 10, + WLAN_STATUS_REASSOC_NO_ASSOC = 11, + WLAN_STATUS_ASSOC_DENIED_UNSPEC = 12, + WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG = 13, + WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION = 14, + WLAN_STATUS_CHALLENGE_FAIL = 15, + WLAN_STATUS_AUTH_TIMEOUT = 16, + WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA = 17, + WLAN_STATUS_ASSOC_DENIED_RATES = 18, + /* 802.11b */ + WLAN_STATUS_ASSOC_DENIED_NOSHORTPREAMBLE = 19, + WLAN_STATUS_ASSOC_DENIED_NOPBCC = 20, + WLAN_STATUS_ASSOC_DENIED_NOAGILITY = 21, + /* 802.11h */ + WLAN_STATUS_ASSOC_DENIED_NOSPECTRUM = 22, + WLAN_STATUS_ASSOC_REJECTED_BAD_POWER = 23, + WLAN_STATUS_ASSOC_REJECTED_BAD_SUPP_CHAN = 24, + /* 802.11g */ + WLAN_STATUS_ASSOC_DENIED_NOSHORTTIME = 25, + WLAN_STATUS_ASSOC_DENIED_NODSSSOFDM = 26, + /* 802.11w */ + WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY = 30, + WLAN_STATUS_ROBUST_MGMT_FRAME_POLICY_VIOLATION = 31, + /* 802.11i */ + WLAN_STATUS_INVALID_IE = 40, + WLAN_STATUS_INVALID_GROUP_CIPHER = 41, + WLAN_STATUS_INVALID_PAIRWISE_CIPHER = 42, + WLAN_STATUS_INVALID_AKMP = 43, + WLAN_STATUS_UNSUPP_RSN_VERSION = 44, + WLAN_STATUS_INVALID_RSN_IE_CAP = 45, + WLAN_STATUS_CIPHER_SUITE_REJECTED = 46, + /* 802.11e */ + WLAN_STATUS_UNSPECIFIED_QOS = 32, + WLAN_STATUS_ASSOC_DENIED_NOBANDWIDTH = 33, + WLAN_STATUS_ASSOC_DENIED_LOWACK = 34, + WLAN_STATUS_ASSOC_DENIED_UNSUPP_QOS = 35, + WLAN_STATUS_REQUEST_DECLINED = 37, + WLAN_STATUS_INVALID_QOS_PARAM = 38, + WLAN_STATUS_CHANGE_TSPEC = 39, + WLAN_STATUS_WAIT_TS_DELAY = 47, + WLAN_STATUS_NO_DIRECT_LINK = 48, + WLAN_STATUS_STA_NOT_PRESENT = 49, + WLAN_STATUS_STA_NOT_QSTA = 50, +}; + + +/* Reason codes */ +enum ieee80211_reasoncode { + WLAN_REASON_UNSPECIFIED = 1, + WLAN_REASON_PREV_AUTH_NOT_VALID = 2, + WLAN_REASON_DEAUTH_LEAVING = 3, + WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY = 4, + WLAN_REASON_DISASSOC_AP_BUSY = 5, + WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA = 6, + WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA = 7, + WLAN_REASON_DISASSOC_STA_HAS_LEFT = 8, + WLAN_REASON_STA_REQ_ASSOC_WITHOUT_AUTH = 9, + /* 802.11h */ + WLAN_REASON_DISASSOC_BAD_POWER = 10, + WLAN_REASON_DISASSOC_BAD_SUPP_CHAN = 11, + /* 802.11i */ + WLAN_REASON_INVALID_IE = 13, + WLAN_REASON_MIC_FAILURE = 14, + WLAN_REASON_4WAY_HANDSHAKE_TIMEOUT = 15, + WLAN_REASON_GROUP_KEY_HANDSHAKE_TIMEOUT = 16, + WLAN_REASON_IE_DIFFERENT = 17, + WLAN_REASON_INVALID_GROUP_CIPHER = 18, + WLAN_REASON_INVALID_PAIRWISE_CIPHER = 19, + WLAN_REASON_INVALID_AKMP = 20, + WLAN_REASON_UNSUPP_RSN_VERSION = 21, + WLAN_REASON_INVALID_RSN_IE_CAP = 22, + WLAN_REASON_IEEE8021X_FAILED = 23, + WLAN_REASON_CIPHER_SUITE_REJECTED = 24, + /* 802.11e */ + WLAN_REASON_DISASSOC_UNSPECIFIED_QOS = 32, + WLAN_REASON_DISASSOC_QAP_NO_BANDWIDTH = 33, + WLAN_REASON_DISASSOC_LOW_ACK = 34, + WLAN_REASON_DISASSOC_QAP_EXCEED_TXOP = 35, + WLAN_REASON_QSTA_LEAVE_QBSS = 36, + WLAN_REASON_QSTA_NOT_USE = 37, + WLAN_REASON_QSTA_REQUIRE_SETUP = 38, + WLAN_REASON_QSTA_TIMEOUT = 39, + WLAN_REASON_QSTA_CIPHER_NOT_SUPP = 45, +}; + + +/* Information Element IDs */ +enum ieee80211_eid { + WLAN_EID_SSID = 0, + WLAN_EID_SUPP_RATES = 1, + WLAN_EID_FH_PARAMS = 2, + WLAN_EID_DS_PARAMS = 3, + WLAN_EID_CF_PARAMS = 4, + WLAN_EID_TIM = 5, + WLAN_EID_IBSS_PARAMS = 6, + WLAN_EID_CHALLENGE = 16, + + WLAN_EID_COUNTRY = 7, + WLAN_EID_HP_PARAMS = 8, + WLAN_EID_HP_TABLE = 9, + WLAN_EID_REQUEST = 10, + + WLAN_EID_QBSS_LOAD = 11, + WLAN_EID_EDCA_PARAM_SET = 12, + WLAN_EID_TSPEC = 13, + WLAN_EID_TCLAS = 14, + WLAN_EID_SCHEDULE = 15, + WLAN_EID_TS_DELAY = 43, + WLAN_EID_TCLAS_PROCESSING = 44, + WLAN_EID_QOS_CAPA = 46, + /* 802.11s + * + * All mesh EID numbers are pending IEEE 802.11 ANA approval. + * The numbers have been incremented from those suggested in + * 802.11s/D2.0 so that MESH_CONFIG does not conflict with + * EXT_SUPP_RATES. + */ + WLAN_EID_MESH_CONFIG = 51, + WLAN_EID_MESH_ID = 52, + WLAN_EID_PEER_LINK = 55, + WLAN_EID_PREQ = 68, + WLAN_EID_PREP = 69, + WLAN_EID_PERR = 70, + WLAN_EID_RANN = 49, /* compatible with FreeBSD */ + + WLAN_EID_PWR_CONSTRAINT = 32, + WLAN_EID_PWR_CAPABILITY = 33, + WLAN_EID_TPC_REQUEST = 34, + WLAN_EID_TPC_REPORT = 35, + WLAN_EID_SUPPORTED_CHANNELS = 36, + WLAN_EID_CHANNEL_SWITCH = 37, + WLAN_EID_MEASURE_REQUEST = 38, + WLAN_EID_MEASURE_REPORT = 39, + WLAN_EID_QUIET = 40, + WLAN_EID_IBSS_DFS = 41, + + WLAN_EID_ERP_INFO = 42, + WLAN_EID_EXT_SUPP_RATES = 50, + + WLAN_EID_HT_CAPABILITY = 45, + WLAN_EID_HT_INFORMATION = 61, + + WLAN_EID_RSN = 48, + WLAN_EID_MMIE = 76, + WLAN_EID_WPA = 221, + WLAN_EID_GENERIC = 221, + WLAN_EID_VENDOR_SPECIFIC = 221, + WLAN_EID_QOS_PARAMETER = 222, + + WLAN_EID_AP_CHAN_REPORT = 51, + WLAN_EID_NEIGHBOR_REPORT = 52, + WLAN_EID_RCPI = 53, + WLAN_EID_BSS_AVG_ACCESS_DELAY = 63, + WLAN_EID_ANTENNA_INFO = 64, + WLAN_EID_RSNI = 65, + WLAN_EID_MEASUREMENT_PILOT_TX_INFO = 66, + WLAN_EID_BSS_AVAILABLE_CAPACITY = 67, + WLAN_EID_BSS_AC_ACCESS_DELAY = 68, + WLAN_EID_RRM_ENABLED_CAPABILITIES = 70, + WLAN_EID_MULTIPLE_BSSID = 71, + + WLAN_EID_MOBILITY_DOMAIN = 54, + WLAN_EID_FAST_BSS_TRANSITION = 55, + WLAN_EID_TIMEOUT_INTERVAL = 56, + WLAN_EID_RIC_DATA = 57, + WLAN_EID_RIC_DESCRIPTOR = 75, + + WLAN_EID_DSE_REGISTERED_LOCATION = 58, + WLAN_EID_SUPPORTED_REGULATORY_CLASSES = 59, + WLAN_EID_EXT_CHANSWITCH_ANN = 60, +}; + +/* Action category code */ +enum ieee80211_category { + WLAN_CATEGORY_SPECTRUM_MGMT = 0, + WLAN_CATEGORY_QOS = 1, + WLAN_CATEGORY_DLS = 2, + WLAN_CATEGORY_BACK = 3, + WLAN_CATEGORY_PUBLIC = 4, + WLAN_CATEGORY_HT = 7, + WLAN_CATEGORY_SA_QUERY = 8, + WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9, + WLAN_CATEGORY_WMM = 17, + WLAN_CATEGORY_VENDOR_SPECIFIC_PROTECTED = 126, + WLAN_CATEGORY_VENDOR_SPECIFIC = 127, +}; + +/* SPECTRUM_MGMT action code */ +enum ieee80211_spectrum_mgmt_actioncode { + WLAN_ACTION_SPCT_MSR_REQ = 0, + WLAN_ACTION_SPCT_MSR_RPRT = 1, + WLAN_ACTION_SPCT_TPC_REQ = 2, + WLAN_ACTION_SPCT_TPC_RPRT = 3, + WLAN_ACTION_SPCT_CHL_SWITCH = 4, +}; + +/* HT action codes */ +enum ieee80211_ht_actioncode { + WLAN_HT_ACTION_NOTIFY_CHANWIDTH = 0, + WLAN_HT_ACTION_SMPS = 1, + WLAN_HT_ACTION_PSMP = 2, + WLAN_HT_ACTION_PCO_PHASE = 3, + WLAN_HT_ACTION_CSI = 4, + WLAN_HT_ACTION_NONCOMPRESSED_BF = 5, + WLAN_HT_ACTION_COMPRESSED_BF = 6, + WLAN_HT_ACTION_ASEL_IDX_FEEDBACK = 7, +}; + +/* Security key length */ +enum ieee80211_key_len { + WLAN_KEY_LEN_WEP40 = 5, + WLAN_KEY_LEN_WEP104 = 13, + WLAN_KEY_LEN_CCMP = 16, + WLAN_KEY_LEN_TKIP = 32, + WLAN_KEY_LEN_AES_CMAC = 16, +}; + +/* + * IEEE 802.11-2007 7.3.2.9 Country information element + * + * Minimum length is 8 octets, ie len must be evenly + * divisible by 2 + */ + +/* Although the spec says 8 I'm seeing 6 in practice */ +#define IEEE80211_COUNTRY_IE_MIN_LEN 6 + +/* + * For regulatory extension stuff see IEEE 802.11-2007 + * Annex I (page 1141) and Annex J (page 1147). Also + * review 7.3.2.9. + * + * When dot11RegulatoryClassesRequired is true and the + * first_channel/reg_extension_id is >= 201 then the IE + * compromises of the 'ext' struct represented below: + * + * - Regulatory extension ID - when generating IE this just needs + * to be monotonically increasing for each triplet passed in + * the IE + * - Regulatory class - index into set of rules + * - Coverage class - index into air propagation time (Table 7-27), + * in microseconds, you can compute the air propagation time from + * the index by multiplying by 3, so index 10 yields a propagation + * of 10 us. Valid values are 0-31, values 32-255 are not defined + * yet. A value of 0 inicates air propagation of <= 1 us. + * + * See also Table I.2 for Emission limit sets and table + * I.3 for Behavior limit sets. Table J.1 indicates how to map + * a reg_class to an emission limit set and behavior limit set. + */ +#define IEEE80211_COUNTRY_EXTENSION_ID 201 + +/* + * Channels numbers in the IE must be monotonically increasing + * if dot11RegulatoryClassesRequired is not true. + * + * If dot11RegulatoryClassesRequired is true consecutive + * subband triplets following a regulatory triplet shall + * have monotonically increasing first_channel number fields. + * + * Channel numbers shall not overlap. + * + * Note that max_power is signed. + */ +struct ieee80211_country_ie_triplet { + union { + struct { + u8 first_channel; + u8 num_channels; + s8 max_power; + } __attribute__ ((packed)) chans; + struct { + u8 reg_extension_id; + u8 reg_class; + u8 coverage_class; + } __attribute__ ((packed)) ext; + }; +} __attribute__ ((packed)); + +enum ieee80211_timeout_interval_type { + WLAN_TIMEOUT_REASSOC_DEADLINE = 1 /* 802.11r */, + WLAN_TIMEOUT_KEY_LIFETIME = 2 /* 802.11r */, + WLAN_TIMEOUT_ASSOC_COMEBACK = 3 /* 802.11w */, +}; + +/* BACK action code */ +enum ieee80211_back_actioncode { + WLAN_ACTION_ADDBA_REQ = 0, + WLAN_ACTION_ADDBA_RESP = 1, + WLAN_ACTION_DELBA = 2, +}; + +/* BACK (block-ack) parties */ +enum ieee80211_back_parties { + WLAN_BACK_RECIPIENT = 0, + WLAN_BACK_INITIATOR = 1, + WLAN_BACK_TIMER = 2, +}; + +/* SA Query action */ +enum ieee80211_sa_query_action { + WLAN_ACTION_SA_QUERY_REQUEST = 0, + WLAN_ACTION_SA_QUERY_RESPONSE = 1, +}; + + +/* A-MSDU 802.11n */ +#define IEEE80211_QOS_CONTROL_A_MSDU_PRESENT 0x0080 + +/* cipher suite selectors */ +#define WLAN_CIPHER_SUITE_USE_GROUP 0x000FAC00 +#define WLAN_CIPHER_SUITE_WEP40 0x000FAC01 +#define WLAN_CIPHER_SUITE_TKIP 0x000FAC02 +/* reserved: 0x000FAC03 */ +#define WLAN_CIPHER_SUITE_CCMP 0x000FAC04 +#define WLAN_CIPHER_SUITE_WEP104 0x000FAC05 +#define WLAN_CIPHER_SUITE_AES_CMAC 0x000FAC06 + +/* AKM suite selectors */ +#define WLAN_AKM_SUITE_8021X 0x000FAC01 +#define WLAN_AKM_SUITE_PSK 0x000FAC02 + +#define WLAN_MAX_KEY_LEN 32 + +#define WLAN_PMKID_LEN 16 + +/** + * ieee80211_get_qos_ctl - get pointer to qos control bytes + * @hdr: the frame + * + * The qos ctrl bytes come after the frame_control, duration, seq_num + * and 3 or 4 addresses of length ETH_ALEN. + * 3 addr: 2 + 2 + 2 + 3*6 = 24 + * 4 addr: 2 + 2 + 2 + 4*6 = 30 + */ +static inline u8 *ieee80211_get_qos_ctl(struct ieee80211_hdr *hdr) +{ + if (ieee80211_has_a4(hdr->frame_control)) + return (u8 *)hdr + 30; + else + return (u8 *)hdr + 24; +} + +/** + * ieee80211_get_SA - get pointer to SA + * @hdr: the frame + * + * Given an 802.11 frame, this function returns the offset + * to the source address (SA). It does not verify that the + * header is long enough to contain the address, and the + * header must be long enough to contain the frame control + * field. + */ +static inline u8 *ieee80211_get_SA(struct ieee80211_hdr *hdr) +{ + if (ieee80211_has_a4(hdr->frame_control)) + return hdr->addr4; + if (ieee80211_has_fromds(hdr->frame_control)) + return hdr->addr3; + return hdr->addr2; +} + +/** + * ieee80211_get_DA - get pointer to DA + * @hdr: the frame + * + * Given an 802.11 frame, this function returns the offset + * to the destination address (DA). It does not verify that + * the header is long enough to contain the address, and the + * header must be long enough to contain the frame control + * field. + */ +static inline u8 *ieee80211_get_DA(struct ieee80211_hdr *hdr) +{ + if (ieee80211_has_tods(hdr->frame_control)) + return hdr->addr3; + else + return hdr->addr1; +} + +/** + * ieee80211_is_robust_mgmt_frame - check if frame is a robust management frame + * @hdr: the frame (buffer must include at least the first octet of payload) + */ +static inline bool ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr) +{ + if (ieee80211_is_disassoc(hdr->frame_control) || + ieee80211_is_deauth(hdr->frame_control)) + return true; + + if (ieee80211_is_action(hdr->frame_control)) { + u8 *category; + + /* + * Action frames, excluding Public Action frames, are Robust + * Management Frames. However, if we are looking at a Protected + * frame, skip the check since the data may be encrypted and + * the frame has already been found to be a Robust Management + * Frame (by the other end). + */ + if (ieee80211_has_protected(hdr->frame_control)) + return true; + category = ((u8 *) hdr) + 24; + return *category != WLAN_CATEGORY_PUBLIC && + *category != WLAN_CATEGORY_HT && + *category != WLAN_CATEGORY_VENDOR_SPECIFIC; + } + + return false; +} + +/** + * ieee80211_fhss_chan_to_freq - get channel frequency + * @channel: the FHSS channel + * + * Convert IEEE802.11 FHSS channel to frequency (MHz) + * Ref IEEE 802.11-2007 section 14.6 + */ +static inline int ieee80211_fhss_chan_to_freq(int channel) +{ + if ((channel > 1) && (channel < 96)) + return channel + 2400; + else + return -1; +} + +/** + * ieee80211_freq_to_fhss_chan - get channel + * @freq: the channels frequency + * + * Convert frequency (MHz) to IEEE802.11 FHSS channel + * Ref IEEE 802.11-2007 section 14.6 + */ +static inline int ieee80211_freq_to_fhss_chan(int freq) +{ + if ((freq > 2401) && (freq < 2496)) + return freq - 2400; + else + return -1; +} + +/** + * ieee80211_dsss_chan_to_freq - get channel center frequency + * @channel: the DSSS channel + * + * Convert IEEE802.11 DSSS channel to the center frequency (MHz). + * Ref IEEE 802.11-2007 section 15.6 + */ +static inline int ieee80211_dsss_chan_to_freq(int channel) +{ + if ((channel > 0) && (channel < 14)) + return 2407 + (channel * 5); + else if (channel == 14) + return 2484; + else + return -1; +} + +/** + * ieee80211_freq_to_dsss_chan - get channel + * @freq: the frequency + * + * Convert frequency (MHz) to IEEE802.11 DSSS channel + * Ref IEEE 802.11-2007 section 15.6 + * + * This routine selects the channel with the closest center frequency. + */ +static inline int ieee80211_freq_to_dsss_chan(int freq) +{ + if ((freq >= 2410) && (freq < 2475)) + return (freq - 2405) / 5; + else if ((freq >= 2482) && (freq < 2487)) + return 14; + else + return -1; +} + +/* Convert IEEE802.11 HR DSSS channel to frequency (MHz) and back + * Ref IEEE 802.11-2007 section 18.4.6.2 + * + * The channels and frequencies are the same as those defined for DSSS + */ +#define ieee80211_hr_chan_to_freq(chan) ieee80211_dsss_chan_to_freq(chan) +#define ieee80211_freq_to_hr_chan(freq) ieee80211_freq_to_dsss_chan(freq) + +/* Convert IEEE802.11 ERP channel to frequency (MHz) and back + * Ref IEEE 802.11-2007 section 19.4.2 + */ +#define ieee80211_erp_chan_to_freq(chan) ieee80211_hr_chan_to_freq(chan) +#define ieee80211_freq_to_erp_chan(freq) ieee80211_freq_to_hr_chan(freq) + +/** + * ieee80211_ofdm_chan_to_freq - get channel center frequency + * @s_freq: starting frequency == (dotChannelStartingFactor/2) MHz + * @channel: the OFDM channel + * + * Convert IEEE802.11 OFDM channel to center frequency (MHz) + * Ref IEEE 802.11-2007 section 17.3.8.3.2 + */ +static inline int ieee80211_ofdm_chan_to_freq(int s_freq, int channel) +{ + if ((channel > 0) && (channel <= 200) && + (s_freq >= 4000)) + return s_freq + (channel * 5); + else + return -1; +} + +/** + * ieee80211_freq_to_ofdm_channel - get channel + * @s_freq: starting frequency == (dotChannelStartingFactor/2) MHz + * @freq: the frequency + * + * Convert frequency (MHz) to IEEE802.11 OFDM channel + * Ref IEEE 802.11-2007 section 17.3.8.3.2 + * + * This routine selects the channel with the closest center frequency. + */ +static inline int ieee80211_freq_to_ofdm_chan(int s_freq, int freq) +{ + if ((freq > (s_freq + 2)) && (freq <= (s_freq + 1202)) && + (s_freq >= 4000)) + return (freq + 2 - s_freq) / 5; + else + return -1; +} + +/** + * ieee80211_tu_to_usec - convert time units (TU) to microseconds + * @tu: the TUs + */ +static inline unsigned long ieee80211_tu_to_usec(unsigned long tu) +{ + return 1024 * tu; +} + +/** + * ieee80211_check_tim - check if AID bit is set in TIM + * @tim: the TIM IE + * @tim_len: length of the TIM IE + * @aid: the AID to look for + */ +static inline bool ieee80211_check_tim(struct ieee80211_tim_ie *tim, + u8 tim_len, u16 aid) +{ + u8 mask; + u8 index, indexn1, indexn2; + + if (unlikely(!tim || tim_len < sizeof(*tim))) + return false; + + aid &= 0x3fff; + index = aid / 8; + mask = 1 << (aid & 7); + + indexn1 = tim->bitmap_ctrl & 0xfe; + indexn2 = tim_len + indexn1 - 4; + + if (index < indexn1 || index > indexn2) + return false; + + index -= indexn1; + + return !!(tim->virtual_map[index] & mask); +} + +#endif /* LINUX_IEEE80211_H */ diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h new file mode 100644 index 0000000..28ba20f --- /dev/null +++ b/include/linux/nl80211.h @@ -0,0 +1,1586 @@ +#ifndef __LINUX_NL80211_H +#define __LINUX_NL80211_H +/* + * 802.11 netlink interface public header + * + * Copyright 2006-2010 Johannes Berg + * Copyright 2008 Michael Wu + * Copyright 2008 Luis Carlos Cobo + * Copyright 2008 Michael Buesch + * Copyright 2008, 2009 Luis R. Rodriguez + * Copyright 2008 Jouni Malinen + * Copyright 2008 Colin McCabe + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +#include + +/** + * DOC: Station handling + * + * Stations are added per interface, but a special case exists with VLAN + * interfaces. When a station is bound to an AP interface, it may be moved + * into a VLAN identified by a VLAN interface index (%NL80211_ATTR_STA_VLAN). + * The station is still assumed to belong to the AP interface it was added + * to. + * + * TODO: need more info? + */ + +/** + * enum nl80211_commands - supported nl80211 commands + * + * @NL80211_CMD_UNSPEC: unspecified command to catch errors + * + * @NL80211_CMD_GET_WIPHY: request information about a wiphy or dump request + * to get a list of all present wiphys. + * @NL80211_CMD_SET_WIPHY: set wiphy parameters, needs %NL80211_ATTR_WIPHY or + * %NL80211_ATTR_IFINDEX; can be used to set %NL80211_ATTR_WIPHY_NAME, + * %NL80211_ATTR_WIPHY_TXQ_PARAMS, %NL80211_ATTR_WIPHY_FREQ, + * %NL80211_ATTR_WIPHY_CHANNEL_TYPE, %NL80211_ATTR_WIPHY_RETRY_SHORT, + * %NL80211_ATTR_WIPHY_RETRY_LONG, %NL80211_ATTR_WIPHY_FRAG_THRESHOLD, + * and/or %NL80211_ATTR_WIPHY_RTS_THRESHOLD. + * @NL80211_CMD_NEW_WIPHY: Newly created wiphy, response to get request + * or rename notification. Has attributes %NL80211_ATTR_WIPHY and + * %NL80211_ATTR_WIPHY_NAME. + * @NL80211_CMD_DEL_WIPHY: Wiphy deleted. Has attributes + * %NL80211_ATTR_WIPHY and %NL80211_ATTR_WIPHY_NAME. + * + * @NL80211_CMD_GET_INTERFACE: Request an interface's configuration; + * either a dump request on a %NL80211_ATTR_WIPHY or a specific get + * on an %NL80211_ATTR_IFINDEX is supported. + * @NL80211_CMD_SET_INTERFACE: Set type of a virtual interface, requires + * %NL80211_ATTR_IFINDEX and %NL80211_ATTR_IFTYPE. + * @NL80211_CMD_NEW_INTERFACE: Newly created virtual interface or response + * to %NL80211_CMD_GET_INTERFACE. Has %NL80211_ATTR_IFINDEX, + * %NL80211_ATTR_WIPHY and %NL80211_ATTR_IFTYPE attributes. Can also + * be sent from userspace to request creation of a new virtual interface, + * then requires attributes %NL80211_ATTR_WIPHY, %NL80211_ATTR_IFTYPE and + * %NL80211_ATTR_IFNAME. + * @NL80211_CMD_DEL_INTERFACE: Virtual interface was deleted, has attributes + * %NL80211_ATTR_IFINDEX and %NL80211_ATTR_WIPHY. Can also be sent from + * userspace to request deletion of a virtual interface, then requires + * attribute %NL80211_ATTR_IFINDEX. + * + * @NL80211_CMD_GET_KEY: Get sequence counter information for a key specified + * by %NL80211_ATTR_KEY_IDX and/or %NL80211_ATTR_MAC. + * @NL80211_CMD_SET_KEY: Set key attributes %NL80211_ATTR_KEY_DEFAULT, + * %NL80211_ATTR_KEY_DEFAULT_MGMT, or %NL80211_ATTR_KEY_THRESHOLD. + * @NL80211_CMD_NEW_KEY: add a key with given %NL80211_ATTR_KEY_DATA, + * %NL80211_ATTR_KEY_IDX, %NL80211_ATTR_MAC, %NL80211_ATTR_KEY_CIPHER, + * and %NL80211_ATTR_KEY_SEQ attributes. + * @NL80211_CMD_DEL_KEY: delete a key identified by %NL80211_ATTR_KEY_IDX + * or %NL80211_ATTR_MAC. + * + * @NL80211_CMD_GET_BEACON: retrieve beacon information (returned in a + * %NL80222_CMD_NEW_BEACON message) + * @NL80211_CMD_SET_BEACON: set the beacon on an access point interface + * using the %NL80211_ATTR_BEACON_INTERVAL, %NL80211_ATTR_DTIM_PERIOD, + * %NL80211_ATTR_BEACON_HEAD and %NL80211_ATTR_BEACON_TAIL attributes. + * @NL80211_CMD_NEW_BEACON: add a new beacon to an access point interface, + * parameters are like for %NL80211_CMD_SET_BEACON. + * @NL80211_CMD_DEL_BEACON: remove the beacon, stop sending it + * + * @NL80211_CMD_GET_STATION: Get station attributes for station identified by + * %NL80211_ATTR_MAC on the interface identified by %NL80211_ATTR_IFINDEX. + * @NL80211_CMD_SET_STATION: Set station attributes for station identified by + * %NL80211_ATTR_MAC on the interface identified by %NL80211_ATTR_IFINDEX. + * @NL80211_CMD_NEW_STATION: Add a station with given attributes to the + * the interface identified by %NL80211_ATTR_IFINDEX. + * @NL80211_CMD_DEL_STATION: Remove a station identified by %NL80211_ATTR_MAC + * or, if no MAC address given, all stations, on the interface identified + * by %NL80211_ATTR_IFINDEX. + * + * @NL80211_CMD_GET_MPATH: Get mesh path attributes for mesh path to + * destination %NL80211_ATTR_MAC on the interface identified by + * %NL80211_ATTR_IFINDEX. + * @NL80211_CMD_SET_MPATH: Set mesh path attributes for mesh path to + * destination %NL80211_ATTR_MAC on the interface identified by + * %NL80211_ATTR_IFINDEX. + * @NL80211_CMD_NEW_PATH: Add a mesh path with given attributes to the + * the interface identified by %NL80211_ATTR_IFINDEX. + * @NL80211_CMD_DEL_PATH: Remove a mesh path identified by %NL80211_ATTR_MAC + * or, if no MAC address given, all mesh paths, on the interface identified + * by %NL80211_ATTR_IFINDEX. + * @NL80211_CMD_SET_BSS: Set BSS attributes for BSS identified by + * %NL80211_ATTR_IFINDEX. + * + * @NL80211_CMD_GET_REG: ask the wireless core to send us its currently set + * regulatory domain. + * @NL80211_CMD_SET_REG: Set current regulatory domain. CRDA sends this command + * after being queried by the kernel. CRDA replies by sending a regulatory + * domain structure which consists of %NL80211_ATTR_REG_ALPHA set to our + * current alpha2 if it found a match. It also provides + * NL80211_ATTR_REG_RULE_FLAGS, and a set of regulatory rules. Each + * regulatory rule is a nested set of attributes given by + * %NL80211_ATTR_REG_RULE_FREQ_[START|END] and + * %NL80211_ATTR_FREQ_RANGE_MAX_BW with an attached power rule given by + * %NL80211_ATTR_REG_RULE_POWER_MAX_ANT_GAIN and + * %NL80211_ATTR_REG_RULE_POWER_MAX_EIRP. + * @NL80211_CMD_REQ_SET_REG: ask the wireless core to set the regulatory domain + * to the the specified ISO/IEC 3166-1 alpha2 country code. The core will + * store this as a valid request and then query userspace for it. + * + * @NL80211_CMD_GET_MESH_PARAMS: Get mesh networking properties for the + * interface identified by %NL80211_ATTR_IFINDEX + * + * @NL80211_CMD_SET_MESH_PARAMS: Set mesh networking properties for the + * interface identified by %NL80211_ATTR_IFINDEX + * + * @NL80211_CMD_SET_MGMT_EXTRA_IE: Set extra IEs for management frames. The + * interface is identified with %NL80211_ATTR_IFINDEX and the management + * frame subtype with %NL80211_ATTR_MGMT_SUBTYPE. The extra IE data to be + * added to the end of the specified management frame is specified with + * %NL80211_ATTR_IE. If the command succeeds, the requested data will be + * added to all specified management frames generated by + * kernel/firmware/driver. + * Note: This command has been removed and it is only reserved at this + * point to avoid re-using existing command number. The functionality this + * command was planned for has been provided with cleaner design with the + * option to specify additional IEs in NL80211_CMD_TRIGGER_SCAN, + * NL80211_CMD_AUTHENTICATE, NL80211_CMD_ASSOCIATE, + * NL80211_CMD_DEAUTHENTICATE, and NL80211_CMD_DISASSOCIATE. + * + * @NL80211_CMD_GET_SCAN: get scan results + * @NL80211_CMD_TRIGGER_SCAN: trigger a new scan with the given parameters + * @NL80211_CMD_NEW_SCAN_RESULTS: scan notification (as a reply to + * NL80211_CMD_GET_SCAN and on the "scan" multicast group) + * @NL80211_CMD_SCAN_ABORTED: scan was aborted, for unspecified reasons, + * partial scan results may be available + * + * @NL80211_CMD_GET_SURVEY: get survey resuls, e.g. channel occupation + * or noise level + * @NL80211_CMD_NEW_SURVEY_RESULTS: survey data notification (as a reply to + * NL80211_CMD_GET_SURVEY and on the "scan" multicast group) + * + * @NL80211_CMD_REG_CHANGE: indicates to userspace the regulatory domain + * has been changed and provides details of the request information + * that caused the change such as who initiated the regulatory request + * (%NL80211_ATTR_REG_INITIATOR), the wiphy_idx + * (%NL80211_ATTR_REG_ALPHA2) on which the request was made from if + * the initiator was %NL80211_REGDOM_SET_BY_COUNTRY_IE or + * %NL80211_REGDOM_SET_BY_DRIVER, the type of regulatory domain + * set (%NL80211_ATTR_REG_TYPE), if the type of regulatory domain is + * %NL80211_REG_TYPE_COUNTRY the alpha2 to which we have moved on + * to (%NL80211_ATTR_REG_ALPHA2). + * @NL80211_CMD_REG_BEACON_HINT: indicates to userspace that an AP beacon + * has been found while world roaming thus enabling active scan or + * any mode of operation that initiates TX (beacons) on a channel + * where we would not have been able to do either before. As an example + * if you are world roaming (regulatory domain set to world or if your + * driver is using a custom world roaming regulatory domain) and while + * doing a passive scan on the 5 GHz band you find an AP there (if not + * on a DFS channel) you will now be able to actively scan for that AP + * or use AP mode on your card on that same channel. Note that this will + * never be used for channels 1-11 on the 2 GHz band as they are always + * enabled world wide. This beacon hint is only sent if your device had + * either disabled active scanning or beaconing on a channel. We send to + * userspace the wiphy on which we removed a restriction from + * (%NL80211_ATTR_WIPHY) and the channel on which this occurred + * before (%NL80211_ATTR_FREQ_BEFORE) and after (%NL80211_ATTR_FREQ_AFTER) + * the beacon hint was processed. + * + * @NL80211_CMD_AUTHENTICATE: authentication request and notification. + * This command is used both as a command (request to authenticate) and + * as an event on the "mlme" multicast group indicating completion of the + * authentication process. + * When used as a command, %NL80211_ATTR_IFINDEX is used to identify the + * interface. %NL80211_ATTR_MAC is used to specify PeerSTAAddress (and + * BSSID in case of station mode). %NL80211_ATTR_SSID is used to specify + * the SSID (mainly for association, but is included in authentication + * request, too, to help BSS selection. %NL80211_ATTR_WIPHY_FREQ is used + * to specify the frequence of the channel in MHz. %NL80211_ATTR_AUTH_TYPE + * is used to specify the authentication type. %NL80211_ATTR_IE is used to + * define IEs (VendorSpecificInfo, but also including RSN IE and FT IEs) + * to be added to the frame. + * When used as an event, this reports reception of an Authentication + * frame in station and IBSS modes when the local MLME processed the + * frame, i.e., it was for the local STA and was received in correct + * state. This is similar to MLME-AUTHENTICATE.confirm primitive in the + * MLME SAP interface (kernel providing MLME, userspace SME). The + * included %NL80211_ATTR_FRAME attribute contains the management frame + * (including both the header and frame body, but not FCS). This event is + * also used to indicate if the authentication attempt timed out. In that + * case the %NL80211_ATTR_FRAME attribute is replaced with a + * %NL80211_ATTR_TIMED_OUT flag (and %NL80211_ATTR_MAC to indicate which + * pending authentication timed out). + * @NL80211_CMD_ASSOCIATE: association request and notification; like + * NL80211_CMD_AUTHENTICATE but for Association and Reassociation + * (similar to MLME-ASSOCIATE.request, MLME-REASSOCIATE.request, + * MLME-ASSOCIATE.confirm or MLME-REASSOCIATE.confirm primitives). + * @NL80211_CMD_DEAUTHENTICATE: deauthentication request and notification; like + * NL80211_CMD_AUTHENTICATE but for Deauthentication frames (similar to + * MLME-DEAUTHENTICATION.request and MLME-DEAUTHENTICATE.indication + * primitives). + * @NL80211_CMD_DISASSOCIATE: disassociation request and notification; like + * NL80211_CMD_AUTHENTICATE but for Disassociation frames (similar to + * MLME-DISASSOCIATE.request and MLME-DISASSOCIATE.indication primitives). + * + * @NL80211_CMD_MICHAEL_MIC_FAILURE: notification of a locally detected Michael + * MIC (part of TKIP) failure; sent on the "mlme" multicast group; the + * event includes %NL80211_ATTR_MAC to describe the source MAC address of + * the frame with invalid MIC, %NL80211_ATTR_KEY_TYPE to show the key + * type, %NL80211_ATTR_KEY_IDX to indicate the key identifier, and + * %NL80211_ATTR_KEY_SEQ to indicate the TSC value of the frame; this + * event matches with MLME-MICHAELMICFAILURE.indication() primitive + * + * @NL80211_CMD_JOIN_IBSS: Join a new IBSS -- given at least an SSID and a + * FREQ attribute (for the initial frequency if no peer can be found) + * and optionally a MAC (as BSSID) and FREQ_FIXED attribute if those + * should be fixed rather than automatically determined. Can only be + * executed on a network interface that is UP, and fixed BSSID/FREQ + * may be rejected. Another optional parameter is the beacon interval, + * given in the %NL80211_ATTR_BEACON_INTERVAL attribute, which if not + * given defaults to 100 TU (102.4ms). + * @NL80211_CMD_LEAVE_IBSS: Leave the IBSS -- no special arguments, the IBSS is + * determined by the network interface. + * + * @NL80211_CMD_TESTMODE: testmode command, takes a wiphy (or ifindex) attribute + * to identify the device, and the TESTDATA blob attribute to pass through + * to the driver. + * + * @NL80211_CMD_CONNECT: connection request and notification; this command + * requests to connect to a specified network but without separating + * auth and assoc steps. For this, you need to specify the SSID in a + * %NL80211_ATTR_SSID attribute, and can optionally specify the association + * IEs in %NL80211_ATTR_IE, %NL80211_ATTR_AUTH_TYPE, %NL80211_ATTR_MAC, + * %NL80211_ATTR_WIPHY_FREQ and %NL80211_ATTR_CONTROL_PORT. + * It is also sent as an event, with the BSSID and response IEs when the + * connection is established or failed to be established. This can be + * determined by the STATUS_CODE attribute. + * @NL80211_CMD_ROAM: request that the card roam (currently not implemented), + * sent as an event when the card/driver roamed by itself. + * @NL80211_CMD_DISCONNECT: drop a given connection; also used to notify + * userspace that a connection was dropped by the AP or due to other + * reasons, for this the %NL80211_ATTR_DISCONNECTED_BY_AP and + * %NL80211_ATTR_REASON_CODE attributes are used. + * + * @NL80211_CMD_SET_WIPHY_NETNS: Set a wiphy's netns. Note that all devices + * associated with this wiphy must be down and will follow. + * + * @NL80211_CMD_REMAIN_ON_CHANNEL: Request to remain awake on the specified + * channel for the specified amount of time. This can be used to do + * off-channel operations like transmit a Public Action frame and wait for + * a response while being associated to an AP on another channel. + * %NL80211_ATTR_WIPHY or %NL80211_ATTR_IFINDEX is used to specify which + * radio is used. %NL80211_ATTR_WIPHY_FREQ is used to specify the + * frequency for the operation and %NL80211_ATTR_WIPHY_CHANNEL_TYPE may be + * optionally used to specify additional channel parameters. + * %NL80211_ATTR_DURATION is used to specify the duration in milliseconds + * to remain on the channel. This command is also used as an event to + * notify when the requested duration starts (it may take a while for the + * driver to schedule this time due to other concurrent needs for the + * radio). + * When called, this operation returns a cookie (%NL80211_ATTR_COOKIE) + * that will be included with any events pertaining to this request; + * the cookie is also used to cancel the request. + * @NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL: This command can be used to cancel a + * pending remain-on-channel duration if the desired operation has been + * completed prior to expiration of the originally requested duration. + * %NL80211_ATTR_WIPHY or %NL80211_ATTR_IFINDEX is used to specify the + * radio. The %NL80211_ATTR_COOKIE attribute must be given as well to + * uniquely identify the request. + * This command is also used as an event to notify when a requested + * remain-on-channel duration has expired. + * + * @NL80211_CMD_SET_TX_BITRATE_MASK: Set the mask of rates to be used in TX + * rate selection. %NL80211_ATTR_IFINDEX is used to specify the interface + * and @NL80211_ATTR_TX_RATES the set of allowed rates. + * + * @NL80211_CMD_REGISTER_ACTION: Register for receiving certain action frames + * (via @NL80211_CMD_ACTION) for processing in userspace. This command + * requires an interface index and a match attribute containing the first + * few bytes of the frame that should match, e.g. a single byte for only + * a category match or four bytes for vendor frames including the OUI. + * The registration cannot be dropped, but is removed automatically + * when the netlink socket is closed. Multiple registrations can be made. + * @NL80211_CMD_ACTION: Action frame TX request and RX notification. This + * command is used both as a request to transmit an Action frame and as an + * event indicating reception of an Action frame that was not processed in + * kernel code, but is for us (i.e., which may need to be processed in a + * user space application). %NL80211_ATTR_FRAME is used to specify the + * frame contents (including header). %NL80211_ATTR_WIPHY_FREQ (and + * optionally %NL80211_ATTR_WIPHY_CHANNEL_TYPE) is used to indicate on + * which channel the frame is to be transmitted or was received. This + * channel has to be the current channel (remain-on-channel or the + * operational channel). When called, this operation returns a cookie + * (%NL80211_ATTR_COOKIE) that will be included with the TX status event + * pertaining to the TX request. + * @NL80211_CMD_ACTION_TX_STATUS: Report TX status of an Action frame + * transmitted with %NL80211_CMD_ACTION. %NL80211_ATTR_COOKIE identifies + * the TX command and %NL80211_ATTR_FRAME includes the contents of the + * frame. %NL80211_ATTR_ACK flag is included if the recipient acknowledged + * the frame. + * + * @NL80211_CMD_MAX: highest used command number + * @__NL80211_CMD_AFTER_LAST: internal use + */ +enum nl80211_commands { +/* don't change the order or add anything inbetween, this is ABI! */ + NL80211_CMD_UNSPEC, + + NL80211_CMD_GET_WIPHY, /* can dump */ + NL80211_CMD_SET_WIPHY, + NL80211_CMD_NEW_WIPHY, + NL80211_CMD_DEL_WIPHY, + + NL80211_CMD_GET_INTERFACE, /* can dump */ + NL80211_CMD_SET_INTERFACE, + NL80211_CMD_NEW_INTERFACE, + NL80211_CMD_DEL_INTERFACE, + + NL80211_CMD_GET_KEY, + NL80211_CMD_SET_KEY, + NL80211_CMD_NEW_KEY, + NL80211_CMD_DEL_KEY, + + NL80211_CMD_GET_BEACON, + NL80211_CMD_SET_BEACON, + NL80211_CMD_NEW_BEACON, + NL80211_CMD_DEL_BEACON, + + NL80211_CMD_GET_STATION, + NL80211_CMD_SET_STATION, + NL80211_CMD_NEW_STATION, + NL80211_CMD_DEL_STATION, + + NL80211_CMD_GET_MPATH, + NL80211_CMD_SET_MPATH, + NL80211_CMD_NEW_MPATH, + NL80211_CMD_DEL_MPATH, + + NL80211_CMD_SET_BSS, + + NL80211_CMD_SET_REG, + NL80211_CMD_REQ_SET_REG, + + NL80211_CMD_GET_MESH_PARAMS, + NL80211_CMD_SET_MESH_PARAMS, + + NL80211_CMD_SET_MGMT_EXTRA_IE /* reserved; not used */, + + NL80211_CMD_GET_REG, + + NL80211_CMD_GET_SCAN, + NL80211_CMD_TRIGGER_SCAN, + NL80211_CMD_NEW_SCAN_RESULTS, + NL80211_CMD_SCAN_ABORTED, + + NL80211_CMD_REG_CHANGE, + + NL80211_CMD_AUTHENTICATE, + NL80211_CMD_ASSOCIATE, + NL80211_CMD_DEAUTHENTICATE, + NL80211_CMD_DISASSOCIATE, + + NL80211_CMD_MICHAEL_MIC_FAILURE, + + NL80211_CMD_REG_BEACON_HINT, + + NL80211_CMD_JOIN_IBSS, + NL80211_CMD_LEAVE_IBSS, + + NL80211_CMD_TESTMODE, + + NL80211_CMD_CONNECT, + NL80211_CMD_ROAM, + NL80211_CMD_DISCONNECT, + + NL80211_CMD_SET_WIPHY_NETNS, + + NL80211_CMD_GET_SURVEY, + NL80211_CMD_NEW_SURVEY_RESULTS, + + NL80211_CMD_SET_PMKSA, + NL80211_CMD_DEL_PMKSA, + NL80211_CMD_FLUSH_PMKSA, + + NL80211_CMD_REMAIN_ON_CHANNEL, + NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL, + + NL80211_CMD_SET_TX_BITRATE_MASK, + + NL80211_CMD_REGISTER_ACTION, + NL80211_CMD_ACTION, + NL80211_CMD_ACTION_TX_STATUS, + + NL80211_CMD_SET_POWER_SAVE, + NL80211_CMD_GET_POWER_SAVE, + + /* add new commands above here */ + + /* used to define NL80211_CMD_MAX below */ + __NL80211_CMD_AFTER_LAST, + NL80211_CMD_MAX = __NL80211_CMD_AFTER_LAST - 1 +}; + +/* + * Allow user space programs to use #ifdef on new commands by defining them + * here + */ +#define NL80211_CMD_SET_BSS NL80211_CMD_SET_BSS +#define NL80211_CMD_SET_MGMT_EXTRA_IE NL80211_CMD_SET_MGMT_EXTRA_IE +#define NL80211_CMD_REG_CHANGE NL80211_CMD_REG_CHANGE +#define NL80211_CMD_AUTHENTICATE NL80211_CMD_AUTHENTICATE +#define NL80211_CMD_ASSOCIATE NL80211_CMD_ASSOCIATE +#define NL80211_CMD_DEAUTHENTICATE NL80211_CMD_DEAUTHENTICATE +#define NL80211_CMD_DISASSOCIATE NL80211_CMD_DISASSOCIATE +#define NL80211_CMD_REG_BEACON_HINT NL80211_CMD_REG_BEACON_HINT + +/** + * enum nl80211_attrs - nl80211 netlink attributes + * + * @NL80211_ATTR_UNSPEC: unspecified attribute to catch errors + * + * @NL80211_ATTR_WIPHY: index of wiphy to operate on, cf. + * /sys/class/ieee80211//index + * @NL80211_ATTR_WIPHY_NAME: wiphy name (used for renaming) + * @NL80211_ATTR_WIPHY_TXQ_PARAMS: a nested array of TX queue parameters + * @NL80211_ATTR_WIPHY_FREQ: frequency of the selected channel in MHz + * @NL80211_ATTR_WIPHY_CHANNEL_TYPE: included with NL80211_ATTR_WIPHY_FREQ + * if HT20 or HT40 are allowed (i.e., 802.11n disabled if not included): + * NL80211_CHAN_NO_HT = HT not allowed (i.e., same as not including + * this attribute) + * NL80211_CHAN_HT20 = HT20 only + * NL80211_CHAN_HT40MINUS = secondary channel is below the primary channel + * NL80211_CHAN_HT40PLUS = secondary channel is above the primary channel + * @NL80211_ATTR_WIPHY_RETRY_SHORT: TX retry limit for frames whose length is + * less than or equal to the RTS threshold; allowed range: 1..255; + * dot11ShortRetryLimit; u8 + * @NL80211_ATTR_WIPHY_RETRY_LONG: TX retry limit for frames whose length is + * greater than the RTS threshold; allowed range: 1..255; + * dot11ShortLongLimit; u8 + * @NL80211_ATTR_WIPHY_FRAG_THRESHOLD: fragmentation threshold, i.e., maximum + * length in octets for frames; allowed range: 256..8000, disable + * fragmentation with (u32)-1; dot11FragmentationThreshold; u32 + * @NL80211_ATTR_WIPHY_RTS_THRESHOLD: RTS threshold (TX frames with length + * larger than or equal to this use RTS/CTS handshake); allowed range: + * 0..65536, disable with (u32)-1; dot11RTSThreshold; u32 + * @NL80211_ATTR_WIPHY_COVERAGE_CLASS: Coverage Class as defined by IEEE 802.11 + * section 7.3.2.9; dot11CoverageClass; u8 + * + * @NL80211_ATTR_IFINDEX: network interface index of the device to operate on + * @NL80211_ATTR_IFNAME: network interface name + * @NL80211_ATTR_IFTYPE: type of virtual interface, see &enum nl80211_iftype + * + * @NL80211_ATTR_MAC: MAC address (various uses) + * + * @NL80211_ATTR_KEY_DATA: (temporal) key data; for TKIP this consists of + * 16 bytes encryption key followed by 8 bytes each for TX and RX MIC + * keys + * @NL80211_ATTR_KEY_IDX: key ID (u8, 0-3) + * @NL80211_ATTR_KEY_CIPHER: key cipher suite (u32, as defined by IEEE 802.11 + * section 7.3.2.25.1, e.g. 0x000FAC04) + * @NL80211_ATTR_KEY_SEQ: transmit key sequence number (IV/PN) for TKIP and + * CCMP keys, each six bytes in little endian + * + * @NL80211_ATTR_BEACON_INTERVAL: beacon interval in TU + * @NL80211_ATTR_DTIM_PERIOD: DTIM period for beaconing + * @NL80211_ATTR_BEACON_HEAD: portion of the beacon before the TIM IE + * @NL80211_ATTR_BEACON_TAIL: portion of the beacon after the TIM IE + * + * @NL80211_ATTR_STA_AID: Association ID for the station (u16) + * @NL80211_ATTR_STA_FLAGS: flags, nested element with NLA_FLAG attributes of + * &enum nl80211_sta_flags (deprecated, use %NL80211_ATTR_STA_FLAGS2) + * @NL80211_ATTR_STA_LISTEN_INTERVAL: listen interval as defined by + * IEEE 802.11 7.3.1.6 (u16). + * @NL80211_ATTR_STA_SUPPORTED_RATES: supported rates, array of supported + * rates as defined by IEEE 802.11 7.3.2.2 but without the length + * restriction (at most %NL80211_MAX_SUPP_RATES). + * @NL80211_ATTR_STA_VLAN: interface index of VLAN interface to move station + * to, or the AP interface the station was originally added to to. + * @NL80211_ATTR_STA_INFO: information about a station, part of station info + * given for %NL80211_CMD_GET_STATION, nested attribute containing + * info as possible, see &enum nl80211_sta_info. + * + * @NL80211_ATTR_WIPHY_BANDS: Information about an operating bands, + * consisting of a nested array. + * + * @NL80211_ATTR_MESH_ID: mesh id (1-32 bytes). + * @NL80211_ATTR_PLINK_ACTION: action to perform on the mesh peer link. + * @NL80211_ATTR_MPATH_NEXT_HOP: MAC address of the next hop for a mesh path. + * @NL80211_ATTR_MPATH_INFO: information about a mesh_path, part of mesh path + * info given for %NL80211_CMD_GET_MPATH, nested attribute described at + * &enum nl80211_mpath_info. + * + * @NL80211_ATTR_MNTR_FLAGS: flags, nested element with NLA_FLAG attributes of + * &enum nl80211_mntr_flags. + * + * @NL80211_ATTR_REG_ALPHA2: an ISO-3166-alpha2 country code for which the + * current regulatory domain should be set to or is already set to. + * For example, 'CR', for Costa Rica. This attribute is used by the kernel + * to query the CRDA to retrieve one regulatory domain. This attribute can + * also be used by userspace to query the kernel for the currently set + * regulatory domain. We chose an alpha2 as that is also used by the + * IEEE-802.11d country information element to identify a country. + * Users can also simply ask the wireless core to set regulatory domain + * to a specific alpha2. + * @NL80211_ATTR_REG_RULES: a nested array of regulatory domain regulatory + * rules. + * + * @NL80211_ATTR_BSS_CTS_PROT: whether CTS protection is enabled (u8, 0 or 1) + * @NL80211_ATTR_BSS_SHORT_PREAMBLE: whether short preamble is enabled + * (u8, 0 or 1) + * @NL80211_ATTR_BSS_SHORT_SLOT_TIME: whether short slot time enabled + * (u8, 0 or 1) + * @NL80211_ATTR_BSS_BASIC_RATES: basic rates, array of basic + * rates in format defined by IEEE 802.11 7.3.2.2 but without the length + * restriction (at most %NL80211_MAX_SUPP_RATES). + * + * @NL80211_ATTR_HT_CAPABILITY: HT Capability information element (from + * association request when used with NL80211_CMD_NEW_STATION) + * + * @NL80211_ATTR_SUPPORTED_IFTYPES: nested attribute containing all + * supported interface types, each a flag attribute with the number + * of the interface mode. + * + * @NL80211_ATTR_MGMT_SUBTYPE: Management frame subtype for + * %NL80211_CMD_SET_MGMT_EXTRA_IE. + * + * @NL80211_ATTR_IE: Information element(s) data (used, e.g., with + * %NL80211_CMD_SET_MGMT_EXTRA_IE). + * + * @NL80211_ATTR_MAX_NUM_SCAN_SSIDS: number of SSIDs you can scan with + * a single scan request, a wiphy attribute. + * @NL80211_ATTR_MAX_SCAN_IE_LEN: maximum length of information elements + * that can be added to a scan request + * + * @NL80211_ATTR_SCAN_FREQUENCIES: nested attribute with frequencies (in MHz) + * @NL80211_ATTR_SCAN_SSIDS: nested attribute with SSIDs, leave out for passive + * scanning and include a zero-length SSID (wildcard) for wildcard scan + * @NL80211_ATTR_BSS: scan result BSS + * + * @NL80211_ATTR_REG_INITIATOR: indicates who requested the regulatory domain + * currently in effect. This could be any of the %NL80211_REGDOM_SET_BY_* + * @NL80211_ATTR_REG_TYPE: indicates the type of the regulatory domain currently + * set. This can be one of the nl80211_reg_type (%NL80211_REGDOM_TYPE_*) + * + * @NL80211_ATTR_SUPPORTED_COMMANDS: wiphy attribute that specifies + * an array of command numbers (i.e. a mapping index to command number) + * that the driver for the given wiphy supports. + * + * @NL80211_ATTR_FRAME: frame data (binary attribute), including frame header + * and body, but not FCS; used, e.g., with NL80211_CMD_AUTHENTICATE and + * NL80211_CMD_ASSOCIATE events + * @NL80211_ATTR_SSID: SSID (binary attribute, 0..32 octets) + * @NL80211_ATTR_AUTH_TYPE: AuthenticationType, see &enum nl80211_auth_type, + * represented as a u32 + * @NL80211_ATTR_REASON_CODE: ReasonCode for %NL80211_CMD_DEAUTHENTICATE and + * %NL80211_CMD_DISASSOCIATE, u16 + * + * @NL80211_ATTR_KEY_TYPE: Key Type, see &enum nl80211_key_type, represented as + * a u32 + * + * @NL80211_ATTR_FREQ_BEFORE: A channel which has suffered a regulatory change + * due to considerations from a beacon hint. This attribute reflects + * the state of the channel _before_ the beacon hint processing. This + * attributes consists of a nested attribute containing + * NL80211_FREQUENCY_ATTR_* + * @NL80211_ATTR_FREQ_AFTER: A channel which has suffered a regulatory change + * due to considerations from a beacon hint. This attribute reflects + * the state of the channel _after_ the beacon hint processing. This + * attributes consists of a nested attribute containing + * NL80211_FREQUENCY_ATTR_* + * + * @NL80211_ATTR_CIPHER_SUITES: a set of u32 values indicating the supported + * cipher suites + * + * @NL80211_ATTR_FREQ_FIXED: a flag indicating the IBSS should not try to look + * for other networks on different channels + * + * @NL80211_ATTR_TIMED_OUT: a flag indicating than an operation timed out; this + * is used, e.g., with %NL80211_CMD_AUTHENTICATE event + * + * @NL80211_ATTR_USE_MFP: Whether management frame protection (IEEE 802.11w) is + * used for the association (&enum nl80211_mfp, represented as a u32); + * this attribute can be used + * with %NL80211_CMD_ASSOCIATE request + * + * @NL80211_ATTR_STA_FLAGS2: Attribute containing a + * &struct nl80211_sta_flag_update. + * + * @NL80211_ATTR_CONTROL_PORT: A flag indicating whether user space controls + * IEEE 802.1X port, i.e., sets/clears %NL80211_STA_FLAG_AUTHORIZED, in + * station mode. If the flag is included in %NL80211_CMD_ASSOCIATE + * request, the driver will assume that the port is unauthorized until + * authorized by user space. Otherwise, port is marked authorized by + * default in station mode. + * + * @NL80211_ATTR_TESTDATA: Testmode data blob, passed through to the driver. + * We recommend using nested, driver-specific attributes within this. + * + * @NL80211_ATTR_DISCONNECTED_BY_AP: A flag indicating that the DISCONNECT + * event was due to the AP disconnecting the station, and not due to + * a local disconnect request. + * @NL80211_ATTR_STATUS_CODE: StatusCode for the %NL80211_CMD_CONNECT + * event (u16) + * @NL80211_ATTR_PRIVACY: Flag attribute, used with connect(), indicating + * that protected APs should be used. + * + * @NL80211_ATTR_CIPHERS_PAIRWISE: Used with CONNECT and ASSOCIATE to + * indicate which unicast key ciphers will be used with the connection + * (an array of u32). + * @NL80211_ATTR_CIPHER_GROUP: Used with CONNECT and ASSOCIATE to indicate + * which group key cipher will be used with the connection (a u32). + * @NL80211_ATTR_WPA_VERSIONS: Used with CONNECT and ASSOCIATE to indicate + * which WPA version(s) the AP we want to associate with is using + * (a u32 with flags from &enum nl80211_wpa_versions). + * @NL80211_ATTR_AKM_SUITES: Used with CONNECT and ASSOCIATE to indicate + * which key management algorithm(s) to use (an array of u32). + * + * @NL80211_ATTR_REQ_IE: (Re)association request information elements as + * sent out by the card, for ROAM and successful CONNECT events. + * @NL80211_ATTR_RESP_IE: (Re)association response information elements as + * sent by peer, for ROAM and successful CONNECT events. + * + * @NL80211_ATTR_PREV_BSSID: previous BSSID, to be used by in ASSOCIATE + * commands to specify using a reassociate frame + * + * @NL80211_ATTR_KEY: key information in a nested attribute with + * %NL80211_KEY_* sub-attributes + * @NL80211_ATTR_KEYS: array of keys for static WEP keys for connect() + * and join_ibss(), key information is in a nested attribute each + * with %NL80211_KEY_* sub-attributes + * + * @NL80211_ATTR_PID: Process ID of a network namespace. + * + * @NL80211_ATTR_GENERATION: Used to indicate consistent snapshots for + * dumps. This number increases whenever the object list being + * dumped changes, and as such userspace can verify that it has + * obtained a complete and consistent snapshot by verifying that + * all dump messages contain the same generation number. If it + * changed then the list changed and the dump should be repeated + * completely from scratch. + * + * @NL80211_ATTR_4ADDR: Use 4-address frames on a virtual interface + * + * @NL80211_ATTR_SURVEY_INFO: survey information about a channel, part of + * the survey response for %NL80211_CMD_GET_SURVEY, nested attribute + * containing info as possible, see &enum survey_info. + * + * @NL80211_ATTR_PMKID: PMK material for PMKSA caching. + * @NL80211_ATTR_MAX_NUM_PMKIDS: maximum number of PMKIDs a firmware can + * cache, a wiphy attribute. + * + * @NL80211_ATTR_DURATION: Duration of an operation in milliseconds, u32. + * + * @NL80211_ATTR_COOKIE: Generic 64-bit cookie to identify objects. + * + * @NL80211_ATTR_TX_RATES: Nested set of attributes + * (enum nl80211_tx_rate_attributes) describing TX rates per band. The + * enum nl80211_band value is used as the index (nla_type() of the nested + * data. If a band is not included, it will be configured to allow all + * rates based on negotiated supported rates information. This attribute + * is used with %NL80211_CMD_SET_TX_BITRATE_MASK. + * + * @NL80211_ATTR_FRAME_MATCH: A binary attribute which typically must contain + * at least one byte, currently used with @NL80211_CMD_REGISTER_ACTION. + * + * @NL80211_ATTR_ACK: Flag attribute indicating that the frame was + * acknowledged by the recipient. + * + * @NL80211_ATTR_MAX: highest attribute number currently defined + * @__NL80211_ATTR_AFTER_LAST: internal use + */ +enum nl80211_attrs { +/* don't change the order or add anything inbetween, this is ABI! */ + NL80211_ATTR_UNSPEC, + + NL80211_ATTR_WIPHY, + NL80211_ATTR_WIPHY_NAME, + + NL80211_ATTR_IFINDEX, + NL80211_ATTR_IFNAME, + NL80211_ATTR_IFTYPE, + + NL80211_ATTR_MAC, + + NL80211_ATTR_KEY_DATA, + NL80211_ATTR_KEY_IDX, + NL80211_ATTR_KEY_CIPHER, + NL80211_ATTR_KEY_SEQ, + NL80211_ATTR_KEY_DEFAULT, + + NL80211_ATTR_BEACON_INTERVAL, + NL80211_ATTR_DTIM_PERIOD, + NL80211_ATTR_BEACON_HEAD, + NL80211_ATTR_BEACON_TAIL, + + NL80211_ATTR_STA_AID, + NL80211_ATTR_STA_FLAGS, + NL80211_ATTR_STA_LISTEN_INTERVAL, + NL80211_ATTR_STA_SUPPORTED_RATES, + NL80211_ATTR_STA_VLAN, + NL80211_ATTR_STA_INFO, + + NL80211_ATTR_WIPHY_BANDS, + + NL80211_ATTR_MNTR_FLAGS, + + NL80211_ATTR_MESH_ID, + NL80211_ATTR_STA_PLINK_ACTION, + NL80211_ATTR_MPATH_NEXT_HOP, + NL80211_ATTR_MPATH_INFO, + + NL80211_ATTR_BSS_CTS_PROT, + NL80211_ATTR_BSS_SHORT_PREAMBLE, + NL80211_ATTR_BSS_SHORT_SLOT_TIME, + + NL80211_ATTR_HT_CAPABILITY, + + NL80211_ATTR_SUPPORTED_IFTYPES, + + NL80211_ATTR_REG_ALPHA2, + NL80211_ATTR_REG_RULES, + + NL80211_ATTR_MESH_PARAMS, + + NL80211_ATTR_BSS_BASIC_RATES, + + NL80211_ATTR_WIPHY_TXQ_PARAMS, + NL80211_ATTR_WIPHY_FREQ, + NL80211_ATTR_WIPHY_CHANNEL_TYPE, + + NL80211_ATTR_KEY_DEFAULT_MGMT, + + NL80211_ATTR_MGMT_SUBTYPE, + NL80211_ATTR_IE, + + NL80211_ATTR_MAX_NUM_SCAN_SSIDS, + + NL80211_ATTR_SCAN_FREQUENCIES, + NL80211_ATTR_SCAN_SSIDS, + NL80211_ATTR_GENERATION, /* replaces old SCAN_GENERATION */ + NL80211_ATTR_BSS, + + NL80211_ATTR_REG_INITIATOR, + NL80211_ATTR_REG_TYPE, + + NL80211_ATTR_SUPPORTED_COMMANDS, + + NL80211_ATTR_FRAME, + NL80211_ATTR_SSID, + NL80211_ATTR_AUTH_TYPE, + NL80211_ATTR_REASON_CODE, + + NL80211_ATTR_KEY_TYPE, + + NL80211_ATTR_MAX_SCAN_IE_LEN, + NL80211_ATTR_CIPHER_SUITES, + + NL80211_ATTR_FREQ_BEFORE, + NL80211_ATTR_FREQ_AFTER, + + NL80211_ATTR_FREQ_FIXED, + + + NL80211_ATTR_WIPHY_RETRY_SHORT, + NL80211_ATTR_WIPHY_RETRY_LONG, + NL80211_ATTR_WIPHY_FRAG_THRESHOLD, + NL80211_ATTR_WIPHY_RTS_THRESHOLD, + + NL80211_ATTR_TIMED_OUT, + + NL80211_ATTR_USE_MFP, + + NL80211_ATTR_STA_FLAGS2, + + NL80211_ATTR_CONTROL_PORT, + + NL80211_ATTR_TESTDATA, + + NL80211_ATTR_PRIVACY, + + NL80211_ATTR_DISCONNECTED_BY_AP, + NL80211_ATTR_STATUS_CODE, + + NL80211_ATTR_CIPHER_SUITES_PAIRWISE, + NL80211_ATTR_CIPHER_SUITE_GROUP, + NL80211_ATTR_WPA_VERSIONS, + NL80211_ATTR_AKM_SUITES, + + NL80211_ATTR_REQ_IE, + NL80211_ATTR_RESP_IE, + + NL80211_ATTR_PREV_BSSID, + + NL80211_ATTR_KEY, + NL80211_ATTR_KEYS, + + NL80211_ATTR_PID, + + NL80211_ATTR_4ADDR, + + NL80211_ATTR_SURVEY_INFO, + + NL80211_ATTR_PMKID, + NL80211_ATTR_MAX_NUM_PMKIDS, + + NL80211_ATTR_DURATION, + + NL80211_ATTR_COOKIE, + + NL80211_ATTR_WIPHY_COVERAGE_CLASS, + + NL80211_ATTR_TX_RATES, + + NL80211_ATTR_FRAME_MATCH, + + NL80211_ATTR_ACK, + + NL80211_ATTR_PS_STATE, + + /* add attributes here, update the policy in nl80211.c */ + + __NL80211_ATTR_AFTER_LAST, + NL80211_ATTR_MAX = __NL80211_ATTR_AFTER_LAST - 1 +}; + +/* source-level API compatibility */ +#define NL80211_ATTR_SCAN_GENERATION NL80211_ATTR_GENERATION + +/* + * Allow user space programs to use #ifdef on new attributes by defining them + * here + */ +#define NL80211_CMD_CONNECT NL80211_CMD_CONNECT +#define NL80211_ATTR_HT_CAPABILITY NL80211_ATTR_HT_CAPABILITY +#define NL80211_ATTR_BSS_BASIC_RATES NL80211_ATTR_BSS_BASIC_RATES +#define NL80211_ATTR_WIPHY_TXQ_PARAMS NL80211_ATTR_WIPHY_TXQ_PARAMS +#define NL80211_ATTR_WIPHY_FREQ NL80211_ATTR_WIPHY_FREQ +#define NL80211_ATTR_WIPHY_CHANNEL_TYPE NL80211_ATTR_WIPHY_CHANNEL_TYPE +#define NL80211_ATTR_MGMT_SUBTYPE NL80211_ATTR_MGMT_SUBTYPE +#define NL80211_ATTR_IE NL80211_ATTR_IE +#define NL80211_ATTR_REG_INITIATOR NL80211_ATTR_REG_INITIATOR +#define NL80211_ATTR_REG_TYPE NL80211_ATTR_REG_TYPE +#define NL80211_ATTR_FRAME NL80211_ATTR_FRAME +#define NL80211_ATTR_SSID NL80211_ATTR_SSID +#define NL80211_ATTR_AUTH_TYPE NL80211_ATTR_AUTH_TYPE +#define NL80211_ATTR_REASON_CODE NL80211_ATTR_REASON_CODE +#define NL80211_ATTR_CIPHER_SUITES_PAIRWISE NL80211_ATTR_CIPHER_SUITES_PAIRWISE +#define NL80211_ATTR_CIPHER_SUITE_GROUP NL80211_ATTR_CIPHER_SUITE_GROUP +#define NL80211_ATTR_WPA_VERSIONS NL80211_ATTR_WPA_VERSIONS +#define NL80211_ATTR_AKM_SUITES NL80211_ATTR_AKM_SUITES +#define NL80211_ATTR_KEY NL80211_ATTR_KEY +#define NL80211_ATTR_KEYS NL80211_ATTR_KEYS + +#define NL80211_MAX_SUPP_RATES 32 +#define NL80211_MAX_SUPP_REG_RULES 32 +#define NL80211_TKIP_DATA_OFFSET_ENCR_KEY 0 +#define NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY 16 +#define NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY 24 +#define NL80211_HT_CAPABILITY_LEN 26 + +#define NL80211_MAX_NR_CIPHER_SUITES 5 +#define NL80211_MAX_NR_AKM_SUITES 2 + +/** + * enum nl80211_iftype - (virtual) interface types + * + * @NL80211_IFTYPE_UNSPECIFIED: unspecified type, driver decides + * @NL80211_IFTYPE_ADHOC: independent BSS member + * @NL80211_IFTYPE_STATION: managed BSS member + * @NL80211_IFTYPE_AP: access point + * @NL80211_IFTYPE_AP_VLAN: VLAN interface for access points + * @NL80211_IFTYPE_WDS: wireless distribution interface + * @NL80211_IFTYPE_MONITOR: monitor interface receiving all frames + * @NL80211_IFTYPE_MESH_POINT: mesh point + * @NL80211_IFTYPE_MAX: highest interface type number currently defined + * @__NL80211_IFTYPE_AFTER_LAST: internal use + * + * These values are used with the %NL80211_ATTR_IFTYPE + * to set the type of an interface. + * + */ +enum nl80211_iftype { + NL80211_IFTYPE_UNSPECIFIED, + NL80211_IFTYPE_ADHOC, + NL80211_IFTYPE_STATION, + NL80211_IFTYPE_AP, + NL80211_IFTYPE_AP_VLAN, + NL80211_IFTYPE_WDS, + NL80211_IFTYPE_MONITOR, + NL80211_IFTYPE_MESH_POINT, + + /* keep last */ + __NL80211_IFTYPE_AFTER_LAST, + NL80211_IFTYPE_MAX = __NL80211_IFTYPE_AFTER_LAST - 1 +}; + +/** + * enum nl80211_sta_flags - station flags + * + * Station flags. When a station is added to an AP interface, it is + * assumed to be already associated (and hence authenticated.) + * + * @NL80211_STA_FLAG_AUTHORIZED: station is authorized (802.1X) + * @NL80211_STA_FLAG_SHORT_PREAMBLE: station is capable of receiving frames + * with short barker preamble + * @NL80211_STA_FLAG_WME: station is WME/QoS capable + * @NL80211_STA_FLAG_MFP: station uses management frame protection + */ +enum nl80211_sta_flags { + __NL80211_STA_FLAG_INVALID, + NL80211_STA_FLAG_AUTHORIZED, + NL80211_STA_FLAG_SHORT_PREAMBLE, + NL80211_STA_FLAG_WME, + NL80211_STA_FLAG_MFP, + + /* keep last */ + __NL80211_STA_FLAG_AFTER_LAST, + NL80211_STA_FLAG_MAX = __NL80211_STA_FLAG_AFTER_LAST - 1 +}; + +/** + * struct nl80211_sta_flag_update - station flags mask/set + * @mask: mask of station flags to set + * @set: which values to set them to + * + * Both mask and set contain bits as per &enum nl80211_sta_flags. + */ +struct nl80211_sta_flag_update { + __u32 mask; + __u32 set; +} __attribute__((packed)); + +/** + * enum nl80211_rate_info - bitrate information + * + * These attribute types are used with %NL80211_STA_INFO_TXRATE + * when getting information about the bitrate of a station. + * + * @__NL80211_RATE_INFO_INVALID: attribute number 0 is reserved + * @NL80211_RATE_INFO_BITRATE: total bitrate (u16, 100kbit/s) + * @NL80211_RATE_INFO_MCS: mcs index for 802.11n (u8) + * @NL80211_RATE_INFO_40_MHZ_WIDTH: 40 Mhz dualchannel bitrate + * @NL80211_RATE_INFO_SHORT_GI: 400ns guard interval + * @NL80211_RATE_INFO_MAX: highest rate_info number currently defined + * @__NL80211_RATE_INFO_AFTER_LAST: internal use + */ +enum nl80211_rate_info { + __NL80211_RATE_INFO_INVALID, + NL80211_RATE_INFO_BITRATE, + NL80211_RATE_INFO_MCS, + NL80211_RATE_INFO_40_MHZ_WIDTH, + NL80211_RATE_INFO_SHORT_GI, + + /* keep last */ + __NL80211_RATE_INFO_AFTER_LAST, + NL80211_RATE_INFO_MAX = __NL80211_RATE_INFO_AFTER_LAST - 1 +}; + +/** + * enum nl80211_sta_info - station information + * + * These attribute types are used with %NL80211_ATTR_STA_INFO + * when getting information about a station. + * + * @__NL80211_STA_INFO_INVALID: attribute number 0 is reserved + * @NL80211_STA_INFO_INACTIVE_TIME: time since last activity (u32, msecs) + * @NL80211_STA_INFO_RX_BYTES: total received bytes (u32, from this station) + * @NL80211_STA_INFO_TX_BYTES: total transmitted bytes (u32, to this station) + * @__NL80211_STA_INFO_AFTER_LAST: internal + * @NL80211_STA_INFO_MAX: highest possible station info attribute + * @NL80211_STA_INFO_SIGNAL: signal strength of last received PPDU (u8, dBm) + * @NL80211_STA_INFO_TX_BITRATE: current unicast tx rate, nested attribute + * containing info as possible, see &enum nl80211_sta_info_txrate. + * @NL80211_STA_INFO_RX_PACKETS: total received packet (u32, from this station) + * @NL80211_STA_INFO_TX_PACKETS: total transmitted packets (u32, to this + * station) + */ +enum nl80211_sta_info { + __NL80211_STA_INFO_INVALID, + NL80211_STA_INFO_INACTIVE_TIME, + NL80211_STA_INFO_RX_BYTES, + NL80211_STA_INFO_TX_BYTES, + NL80211_STA_INFO_LLID, + NL80211_STA_INFO_PLID, + NL80211_STA_INFO_PLINK_STATE, + NL80211_STA_INFO_SIGNAL, + NL80211_STA_INFO_TX_BITRATE, + NL80211_STA_INFO_RX_PACKETS, + NL80211_STA_INFO_TX_PACKETS, + + /* keep last */ + __NL80211_STA_INFO_AFTER_LAST, + NL80211_STA_INFO_MAX = __NL80211_STA_INFO_AFTER_LAST - 1 +}; + +/** + * enum nl80211_mpath_flags - nl80211 mesh path flags + * + * @NL80211_MPATH_FLAG_ACTIVE: the mesh path is active + * @NL80211_MPATH_FLAG_RESOLVING: the mesh path discovery process is running + * @NL80211_MPATH_FLAG_SN_VALID: the mesh path contains a valid SN + * @NL80211_MPATH_FLAG_FIXED: the mesh path has been manually set + * @NL80211_MPATH_FLAG_RESOLVED: the mesh path discovery process succeeded + */ +enum nl80211_mpath_flags { + NL80211_MPATH_FLAG_ACTIVE = 1<<0, + NL80211_MPATH_FLAG_RESOLVING = 1<<1, + NL80211_MPATH_FLAG_SN_VALID = 1<<2, + NL80211_MPATH_FLAG_FIXED = 1<<3, + NL80211_MPATH_FLAG_RESOLVED = 1<<4, +}; + +/** + * enum nl80211_mpath_info - mesh path information + * + * These attribute types are used with %NL80211_ATTR_MPATH_INFO when getting + * information about a mesh path. + * + * @__NL80211_MPATH_INFO_INVALID: attribute number 0 is reserved + * @NL80211_ATTR_MPATH_FRAME_QLEN: number of queued frames for this destination + * @NL80211_ATTR_MPATH_SN: destination sequence number + * @NL80211_ATTR_MPATH_METRIC: metric (cost) of this mesh path + * @NL80211_ATTR_MPATH_EXPTIME: expiration time for the path, in msec from now + * @NL80211_ATTR_MPATH_FLAGS: mesh path flags, enumerated in + * &enum nl80211_mpath_flags; + * @NL80211_ATTR_MPATH_DISCOVERY_TIMEOUT: total path discovery timeout, in msec + * @NL80211_ATTR_MPATH_DISCOVERY_RETRIES: mesh path discovery retries + */ +enum nl80211_mpath_info { + __NL80211_MPATH_INFO_INVALID, + NL80211_MPATH_INFO_FRAME_QLEN, + NL80211_MPATH_INFO_SN, + NL80211_MPATH_INFO_METRIC, + NL80211_MPATH_INFO_EXPTIME, + NL80211_MPATH_INFO_FLAGS, + NL80211_MPATH_INFO_DISCOVERY_TIMEOUT, + NL80211_MPATH_INFO_DISCOVERY_RETRIES, + + /* keep last */ + __NL80211_MPATH_INFO_AFTER_LAST, + NL80211_MPATH_INFO_MAX = __NL80211_MPATH_INFO_AFTER_LAST - 1 +}; + +/** + * enum nl80211_band_attr - band attributes + * @__NL80211_BAND_ATTR_INVALID: attribute number 0 is reserved + * @NL80211_BAND_ATTR_FREQS: supported frequencies in this band, + * an array of nested frequency attributes + * @NL80211_BAND_ATTR_RATES: supported bitrates in this band, + * an array of nested bitrate attributes + * @NL80211_BAND_ATTR_HT_MCS_SET: 16-byte attribute containing the MCS set as + * defined in 802.11n + * @NL80211_BAND_ATTR_HT_CAPA: HT capabilities, as in the HT information IE + * @NL80211_BAND_ATTR_HT_AMPDU_FACTOR: A-MPDU factor, as in 11n + * @NL80211_BAND_ATTR_HT_AMPDU_DENSITY: A-MPDU density, as in 11n + */ +enum nl80211_band_attr { + __NL80211_BAND_ATTR_INVALID, + NL80211_BAND_ATTR_FREQS, + NL80211_BAND_ATTR_RATES, + + NL80211_BAND_ATTR_HT_MCS_SET, + NL80211_BAND_ATTR_HT_CAPA, + NL80211_BAND_ATTR_HT_AMPDU_FACTOR, + NL80211_BAND_ATTR_HT_AMPDU_DENSITY, + + /* keep last */ + __NL80211_BAND_ATTR_AFTER_LAST, + NL80211_BAND_ATTR_MAX = __NL80211_BAND_ATTR_AFTER_LAST - 1 +}; + +#define NL80211_BAND_ATTR_HT_CAPA NL80211_BAND_ATTR_HT_CAPA + +/** + * enum nl80211_frequency_attr - frequency attributes + * @NL80211_FREQUENCY_ATTR_FREQ: Frequency in MHz + * @NL80211_FREQUENCY_ATTR_DISABLED: Channel is disabled in current + * regulatory domain. + * @NL80211_FREQUENCY_ATTR_PASSIVE_SCAN: Only passive scanning is + * permitted on this channel in current regulatory domain. + * @NL80211_FREQUENCY_ATTR_NO_IBSS: IBSS networks are not permitted + * on this channel in current regulatory domain. + * @NL80211_FREQUENCY_ATTR_RADAR: Radar detection is mandatory + * on this channel in current regulatory domain. + * @NL80211_FREQUENCY_ATTR_MAX_TX_POWER: Maximum transmission power in mBm + * (100 * dBm). + */ +enum nl80211_frequency_attr { + __NL80211_FREQUENCY_ATTR_INVALID, + NL80211_FREQUENCY_ATTR_FREQ, + NL80211_FREQUENCY_ATTR_DISABLED, + NL80211_FREQUENCY_ATTR_PASSIVE_SCAN, + NL80211_FREQUENCY_ATTR_NO_IBSS, + NL80211_FREQUENCY_ATTR_RADAR, + NL80211_FREQUENCY_ATTR_MAX_TX_POWER, + + /* keep last */ + __NL80211_FREQUENCY_ATTR_AFTER_LAST, + NL80211_FREQUENCY_ATTR_MAX = __NL80211_FREQUENCY_ATTR_AFTER_LAST - 1 +}; + +#define NL80211_FREQUENCY_ATTR_MAX_TX_POWER NL80211_FREQUENCY_ATTR_MAX_TX_POWER + +/** + * enum nl80211_bitrate_attr - bitrate attributes + * @NL80211_BITRATE_ATTR_RATE: Bitrate in units of 100 kbps + * @NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE: Short preamble supported + * in 2.4 GHz band. + */ +enum nl80211_bitrate_attr { + __NL80211_BITRATE_ATTR_INVALID, + NL80211_BITRATE_ATTR_RATE, + NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE, + + /* keep last */ + __NL80211_BITRATE_ATTR_AFTER_LAST, + NL80211_BITRATE_ATTR_MAX = __NL80211_BITRATE_ATTR_AFTER_LAST - 1 +}; + +/** + * enum nl80211_initiator - Indicates the initiator of a reg domain request + * @NL80211_REGDOM_SET_BY_CORE: Core queried CRDA for a dynamic world + * regulatory domain. + * @NL80211_REGDOM_SET_BY_USER: User asked the wireless core to set the + * regulatory domain. + * @NL80211_REGDOM_SET_BY_DRIVER: a wireless drivers has hinted to the + * wireless core it thinks its knows the regulatory domain we should be in. + * @NL80211_REGDOM_SET_BY_COUNTRY_IE: the wireless core has received an + * 802.11 country information element with regulatory information it + * thinks we should consider. + */ +enum nl80211_reg_initiator { + NL80211_REGDOM_SET_BY_CORE, + NL80211_REGDOM_SET_BY_USER, + NL80211_REGDOM_SET_BY_DRIVER, + NL80211_REGDOM_SET_BY_COUNTRY_IE, +}; + +/** + * enum nl80211_reg_type - specifies the type of regulatory domain + * @NL80211_REGDOM_TYPE_COUNTRY: the regulatory domain set is one that pertains + * to a specific country. When this is set you can count on the + * ISO / IEC 3166 alpha2 country code being valid. + * @NL80211_REGDOM_TYPE_WORLD: the regulatory set domain is the world regulatory + * domain. + * @NL80211_REGDOM_TYPE_CUSTOM_WORLD: the regulatory domain set is a custom + * driver specific world regulatory domain. These do not apply system-wide + * and are only applicable to the individual devices which have requested + * them to be applied. + * @NL80211_REGDOM_TYPE_INTERSECTION: the regulatory domain set is the product + * of an intersection between two regulatory domains -- the previously + * set regulatory domain on the system and the last accepted regulatory + * domain request to be processed. + */ +enum nl80211_reg_type { + NL80211_REGDOM_TYPE_COUNTRY, + NL80211_REGDOM_TYPE_WORLD, + NL80211_REGDOM_TYPE_CUSTOM_WORLD, + NL80211_REGDOM_TYPE_INTERSECTION, +}; + +/** + * enum nl80211_reg_rule_attr - regulatory rule attributes + * @NL80211_ATTR_REG_RULE_FLAGS: a set of flags which specify additional + * considerations for a given frequency range. These are the + * &enum nl80211_reg_rule_flags. + * @NL80211_ATTR_FREQ_RANGE_START: starting frequencry for the regulatory + * rule in KHz. This is not a center of frequency but an actual regulatory + * band edge. + * @NL80211_ATTR_FREQ_RANGE_END: ending frequency for the regulatory rule + * in KHz. This is not a center a frequency but an actual regulatory + * band edge. + * @NL80211_ATTR_FREQ_RANGE_MAX_BW: maximum allowed bandwidth for this + * frequency range, in KHz. + * @NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN: the maximum allowed antenna gain + * for a given frequency range. The value is in mBi (100 * dBi). + * If you don't have one then don't send this. + * @NL80211_ATTR_POWER_RULE_MAX_EIRP: the maximum allowed EIRP for + * a given frequency range. The value is in mBm (100 * dBm). + */ +enum nl80211_reg_rule_attr { + __NL80211_REG_RULE_ATTR_INVALID, + NL80211_ATTR_REG_RULE_FLAGS, + + NL80211_ATTR_FREQ_RANGE_START, + NL80211_ATTR_FREQ_RANGE_END, + NL80211_ATTR_FREQ_RANGE_MAX_BW, + + NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN, + NL80211_ATTR_POWER_RULE_MAX_EIRP, + + /* keep last */ + __NL80211_REG_RULE_ATTR_AFTER_LAST, + NL80211_REG_RULE_ATTR_MAX = __NL80211_REG_RULE_ATTR_AFTER_LAST - 1 +}; + +/** + * enum nl80211_reg_rule_flags - regulatory rule flags + * + * @NL80211_RRF_NO_OFDM: OFDM modulation not allowed + * @NL80211_RRF_NO_CCK: CCK modulation not allowed + * @NL80211_RRF_NO_INDOOR: indoor operation not allowed + * @NL80211_RRF_NO_OUTDOOR: outdoor operation not allowed + * @NL80211_RRF_DFS: DFS support is required to be used + * @NL80211_RRF_PTP_ONLY: this is only for Point To Point links + * @NL80211_RRF_PTMP_ONLY: this is only for Point To Multi Point links + * @NL80211_RRF_PASSIVE_SCAN: passive scan is required + * @NL80211_RRF_NO_IBSS: no IBSS is allowed + */ +enum nl80211_reg_rule_flags { + NL80211_RRF_NO_OFDM = 1<<0, + NL80211_RRF_NO_CCK = 1<<1, + NL80211_RRF_NO_INDOOR = 1<<2, + NL80211_RRF_NO_OUTDOOR = 1<<3, + NL80211_RRF_DFS = 1<<4, + NL80211_RRF_PTP_ONLY = 1<<5, + NL80211_RRF_PTMP_ONLY = 1<<6, + NL80211_RRF_PASSIVE_SCAN = 1<<7, + NL80211_RRF_NO_IBSS = 1<<8, +}; + +/** + * enum nl80211_survey_info - survey information + * + * These attribute types are used with %NL80211_ATTR_SURVEY_INFO + * when getting information about a survey. + * + * @__NL80211_SURVEY_INFO_INVALID: attribute number 0 is reserved + * @NL80211_SURVEY_INFO_FREQUENCY: center frequency of channel + * @NL80211_SURVEY_INFO_NOISE: noise level of channel (u8, dBm) + */ +enum nl80211_survey_info { + __NL80211_SURVEY_INFO_INVALID, + NL80211_SURVEY_INFO_FREQUENCY, + NL80211_SURVEY_INFO_NOISE, + + /* keep last */ + __NL80211_SURVEY_INFO_AFTER_LAST, + NL80211_SURVEY_INFO_MAX = __NL80211_SURVEY_INFO_AFTER_LAST - 1 +}; + +/** + * enum nl80211_mntr_flags - monitor configuration flags + * + * Monitor configuration flags. + * + * @__NL80211_MNTR_FLAG_INVALID: reserved + * + * @NL80211_MNTR_FLAG_FCSFAIL: pass frames with bad FCS + * @NL80211_MNTR_FLAG_PLCPFAIL: pass frames with bad PLCP + * @NL80211_MNTR_FLAG_CONTROL: pass control frames + * @NL80211_MNTR_FLAG_OTHER_BSS: disable BSSID filtering + * @NL80211_MNTR_FLAG_COOK_FRAMES: report frames after processing. + * overrides all other flags. + * + * @__NL80211_MNTR_FLAG_AFTER_LAST: internal use + * @NL80211_MNTR_FLAG_MAX: highest possible monitor flag + */ +enum nl80211_mntr_flags { + __NL80211_MNTR_FLAG_INVALID, + NL80211_MNTR_FLAG_FCSFAIL, + NL80211_MNTR_FLAG_PLCPFAIL, + NL80211_MNTR_FLAG_CONTROL, + NL80211_MNTR_FLAG_OTHER_BSS, + NL80211_MNTR_FLAG_COOK_FRAMES, + + /* keep last */ + __NL80211_MNTR_FLAG_AFTER_LAST, + NL80211_MNTR_FLAG_MAX = __NL80211_MNTR_FLAG_AFTER_LAST - 1 +}; + +/** + * enum nl80211_meshconf_params - mesh configuration parameters + * + * Mesh configuration parameters + * + * @__NL80211_MESHCONF_INVALID: internal use + * + * @NL80211_MESHCONF_RETRY_TIMEOUT: specifies the initial retry timeout in + * millisecond units, used by the Peer Link Open message + * + * @NL80211_MESHCONF_CONFIRM_TIMEOUT: specifies the inital confirm timeout, in + * millisecond units, used by the peer link management to close a peer link + * + * @NL80211_MESHCONF_HOLDING_TIMEOUT: specifies the holding timeout, in + * millisecond units + * + * @NL80211_MESHCONF_MAX_PEER_LINKS: maximum number of peer links allowed + * on this mesh interface + * + * @NL80211_MESHCONF_MAX_RETRIES: specifies the maximum number of peer link + * open retries that can be sent to establish a new peer link instance in a + * mesh + * + * @NL80211_MESHCONF_TTL: specifies the value of TTL field set at a source mesh + * point. + * + * @NL80211_MESHCONF_AUTO_OPEN_PLINKS: whether we should automatically + * open peer links when we detect compatible mesh peers. + * + * @NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES: the number of action frames + * containing a PREQ that an MP can send to a particular destination (path + * target) + * + * @NL80211_MESHCONF_PATH_REFRESH_TIME: how frequently to refresh mesh paths + * (in milliseconds) + * + * @NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT: minimum length of time to wait + * until giving up on a path discovery (in milliseconds) + * + * @NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT: The time (in TUs) for which mesh + * points receiving a PREQ shall consider the forwarding information from the + * root to be valid. (TU = time unit) + * + * @NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL: The minimum interval of time (in + * TUs) during which an MP can send only one action frame containing a PREQ + * reference element + * + * @NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME: The interval of time (in TUs) + * that it takes for an HWMP information element to propagate across the mesh + * + * @NL80211_MESHCONF_ROOTMODE: whether root mode is enabled or not + * + * @NL80211_MESHCONF_ATTR_MAX: highest possible mesh configuration attribute + * + * @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use + */ +enum nl80211_meshconf_params { + __NL80211_MESHCONF_INVALID, + NL80211_MESHCONF_RETRY_TIMEOUT, + NL80211_MESHCONF_CONFIRM_TIMEOUT, + NL80211_MESHCONF_HOLDING_TIMEOUT, + NL80211_MESHCONF_MAX_PEER_LINKS, + NL80211_MESHCONF_MAX_RETRIES, + NL80211_MESHCONF_TTL, + NL80211_MESHCONF_AUTO_OPEN_PLINKS, + NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, + NL80211_MESHCONF_PATH_REFRESH_TIME, + NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT, + NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT, + NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, + NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, + NL80211_MESHCONF_HWMP_ROOTMODE, + + /* keep last */ + __NL80211_MESHCONF_ATTR_AFTER_LAST, + NL80211_MESHCONF_ATTR_MAX = __NL80211_MESHCONF_ATTR_AFTER_LAST - 1 +}; + +/** + * enum nl80211_txq_attr - TX queue parameter attributes + * @__NL80211_TXQ_ATTR_INVALID: Attribute number 0 is reserved + * @NL80211_TXQ_ATTR_QUEUE: TX queue identifier (NL80211_TXQ_Q_*) + * @NL80211_TXQ_ATTR_TXOP: Maximum burst time in units of 32 usecs, 0 meaning + * disabled + * @NL80211_TXQ_ATTR_CWMIN: Minimum contention window [a value of the form + * 2^n-1 in the range 1..32767] + * @NL80211_TXQ_ATTR_CWMAX: Maximum contention window [a value of the form + * 2^n-1 in the range 1..32767] + * @NL80211_TXQ_ATTR_AIFS: Arbitration interframe space [0..255] + * @__NL80211_TXQ_ATTR_AFTER_LAST: Internal + * @NL80211_TXQ_ATTR_MAX: Maximum TXQ attribute number + */ +enum nl80211_txq_attr { + __NL80211_TXQ_ATTR_INVALID, + NL80211_TXQ_ATTR_QUEUE, + NL80211_TXQ_ATTR_TXOP, + NL80211_TXQ_ATTR_CWMIN, + NL80211_TXQ_ATTR_CWMAX, + NL80211_TXQ_ATTR_AIFS, + + /* keep last */ + __NL80211_TXQ_ATTR_AFTER_LAST, + NL80211_TXQ_ATTR_MAX = __NL80211_TXQ_ATTR_AFTER_LAST - 1 +}; + +enum nl80211_txq_q { + NL80211_TXQ_Q_VO, + NL80211_TXQ_Q_VI, + NL80211_TXQ_Q_BE, + NL80211_TXQ_Q_BK +}; + +enum nl80211_channel_type { + NL80211_CHAN_NO_HT, + NL80211_CHAN_HT20, + NL80211_CHAN_HT40MINUS, + NL80211_CHAN_HT40PLUS +}; + +/** + * enum nl80211_bss - netlink attributes for a BSS + * + * @__NL80211_BSS_INVALID: invalid + * @NL80211_BSS_FREQUENCY: frequency in MHz (u32) + * @NL80211_BSS_TSF: TSF of the received probe response/beacon (u64) + * @NL80211_BSS_BEACON_INTERVAL: beacon interval of the (I)BSS (u16) + * @NL80211_BSS_CAPABILITY: capability field (CPU order, u16) + * @NL80211_BSS_INFORMATION_ELEMENTS: binary attribute containing the + * raw information elements from the probe response/beacon (bin); + * if the %NL80211_BSS_BEACON_IES attribute is present, the IEs here are + * from a Probe Response frame; otherwise they are from a Beacon frame. + * However, if the driver does not indicate the source of the IEs, these + * IEs may be from either frame subtype. + * @NL80211_BSS_SIGNAL_MBM: signal strength of probe response/beacon + * in mBm (100 * dBm) (s32) + * @NL80211_BSS_SIGNAL_UNSPEC: signal strength of the probe response/beacon + * in unspecified units, scaled to 0..100 (u8) + * @NL80211_BSS_STATUS: status, if this BSS is "used" + * @NL80211_BSS_SEEN_MS_AGO: age of this BSS entry in ms + * @NL80211_BSS_BEACON_IES: binary attribute containing the raw information + * elements from a Beacon frame (bin); not present if no Beacon frame has + * yet been received + * @__NL80211_BSS_AFTER_LAST: internal + * @NL80211_BSS_MAX: highest BSS attribute + */ +enum nl80211_bss { + __NL80211_BSS_INVALID, + NL80211_BSS_BSSID, + NL80211_BSS_FREQUENCY, + NL80211_BSS_TSF, + NL80211_BSS_BEACON_INTERVAL, + NL80211_BSS_CAPABILITY, + NL80211_BSS_INFORMATION_ELEMENTS, + NL80211_BSS_SIGNAL_MBM, + NL80211_BSS_SIGNAL_UNSPEC, + NL80211_BSS_STATUS, + NL80211_BSS_SEEN_MS_AGO, + NL80211_BSS_BEACON_IES, + + /* keep last */ + __NL80211_BSS_AFTER_LAST, + NL80211_BSS_MAX = __NL80211_BSS_AFTER_LAST - 1 +}; + +/** + * enum nl80211_bss_status - BSS "status" + */ +enum nl80211_bss_status { + NL80211_BSS_STATUS_AUTHENTICATED, + NL80211_BSS_STATUS_ASSOCIATED, + NL80211_BSS_STATUS_IBSS_JOINED, +}; + +/** + * enum nl80211_auth_type - AuthenticationType + * + * @NL80211_AUTHTYPE_OPEN_SYSTEM: Open System authentication + * @NL80211_AUTHTYPE_SHARED_KEY: Shared Key authentication (WEP only) + * @NL80211_AUTHTYPE_FT: Fast BSS Transition (IEEE 802.11r) + * @NL80211_AUTHTYPE_NETWORK_EAP: Network EAP (some Cisco APs and mainly LEAP) + * @__NL80211_AUTHTYPE_NUM: internal + * @NL80211_AUTHTYPE_MAX: maximum valid auth algorithm + * @NL80211_AUTHTYPE_AUTOMATIC: determine automatically (if necessary by + * trying multiple times); this is invalid in netlink -- leave out + * the attribute for this on CONNECT commands. + */ +enum nl80211_auth_type { + NL80211_AUTHTYPE_OPEN_SYSTEM, + NL80211_AUTHTYPE_SHARED_KEY, + NL80211_AUTHTYPE_FT, + NL80211_AUTHTYPE_NETWORK_EAP, + + /* keep last */ + __NL80211_AUTHTYPE_NUM, + NL80211_AUTHTYPE_MAX = __NL80211_AUTHTYPE_NUM - 1, + NL80211_AUTHTYPE_AUTOMATIC +}; + +/** + * enum nl80211_key_type - Key Type + * @NL80211_KEYTYPE_GROUP: Group (broadcast/multicast) key + * @NL80211_KEYTYPE_PAIRWISE: Pairwise (unicast/individual) key + * @NL80211_KEYTYPE_PEERKEY: PeerKey (DLS) + */ +enum nl80211_key_type { + NL80211_KEYTYPE_GROUP, + NL80211_KEYTYPE_PAIRWISE, + NL80211_KEYTYPE_PEERKEY, +}; + +/** + * enum nl80211_mfp - Management frame protection state + * @NL80211_MFP_NO: Management frame protection not used + * @NL80211_MFP_REQUIRED: Management frame protection required + */ +enum nl80211_mfp { + NL80211_MFP_NO, + NL80211_MFP_REQUIRED, +}; + +enum nl80211_wpa_versions { + NL80211_WPA_VERSION_1 = 1 << 0, + NL80211_WPA_VERSION_2 = 1 << 1, +}; + +/** + * enum nl80211_key_attributes - key attributes + * @__NL80211_KEY_INVALID: invalid + * @NL80211_KEY_DATA: (temporal) key data; for TKIP this consists of + * 16 bytes encryption key followed by 8 bytes each for TX and RX MIC + * keys + * @NL80211_KEY_IDX: key ID (u8, 0-3) + * @NL80211_KEY_CIPHER: key cipher suite (u32, as defined by IEEE 802.11 + * section 7.3.2.25.1, e.g. 0x000FAC04) + * @NL80211_KEY_SEQ: transmit key sequence number (IV/PN) for TKIP and + * CCMP keys, each six bytes in little endian + * @NL80211_KEY_DEFAULT: flag indicating default key + * @NL80211_KEY_DEFAULT_MGMT: flag indicating default management key + * @__NL80211_KEY_AFTER_LAST: internal + * @NL80211_KEY_MAX: highest key attribute + */ +enum nl80211_key_attributes { + __NL80211_KEY_INVALID, + NL80211_KEY_DATA, + NL80211_KEY_IDX, + NL80211_KEY_CIPHER, + NL80211_KEY_SEQ, + NL80211_KEY_DEFAULT, + NL80211_KEY_DEFAULT_MGMT, + + /* keep last */ + __NL80211_KEY_AFTER_LAST, + NL80211_KEY_MAX = __NL80211_KEY_AFTER_LAST - 1 +}; + +/** + * enum nl80211_tx_rate_attributes - TX rate set attributes + * @__NL80211_TXRATE_INVALID: invalid + * @NL80211_TXRATE_LEGACY: Legacy (non-MCS) rates allowed for TX rate selection + * in an array of rates as defined in IEEE 802.11 7.3.2.2 (u8 values with + * 1 = 500 kbps) but without the IE length restriction (at most + * %NL80211_MAX_SUPP_RATES in a single array). + * @__NL80211_TXRATE_AFTER_LAST: internal + * @NL80211_TXRATE_MAX: highest TX rate attribute + */ +enum nl80211_tx_rate_attributes { + __NL80211_TXRATE_INVALID, + NL80211_TXRATE_LEGACY, + + /* keep last */ + __NL80211_TXRATE_AFTER_LAST, + NL80211_TXRATE_MAX = __NL80211_TXRATE_AFTER_LAST - 1 +}; + +/** + * enum nl80211_band - Frequency band + * @NL80211_BAND_2GHZ - 2.4 GHz ISM band + * @NL80211_BAND_5GHZ - around 5 GHz band (4.9 - 5.7 GHz) + */ +enum nl80211_band { + NL80211_BAND_2GHZ, + NL80211_BAND_5GHZ, +}; + +enum nl80211_ps_state { + NL80211_PS_DISABLED, + NL80211_PS_ENABLED, +}; + +#endif /* __LINUX_NL80211_H */ diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h new file mode 100644 index 0000000..382476a --- /dev/null +++ b/include/linux/pci_ids.h @@ -0,0 +1,2739 @@ +/* + * PCI Class, Vendor and Device IDs + * + * Please keep sorted. + * + * Do not add new entries to this file unless the definitions + * are shared between multiple drivers. + */ + +/* Device classes and subclasses */ + +#define PCI_CLASS_NOT_DEFINED 0x0000 +#define PCI_CLASS_NOT_DEFINED_VGA 0x0001 + +#define PCI_BASE_CLASS_STORAGE 0x01 +#define PCI_CLASS_STORAGE_SCSI 0x0100 +#define PCI_CLASS_STORAGE_IDE 0x0101 +#define PCI_CLASS_STORAGE_FLOPPY 0x0102 +#define PCI_CLASS_STORAGE_IPI 0x0103 +#define PCI_CLASS_STORAGE_RAID 0x0104 +#define PCI_CLASS_STORAGE_SATA 0x0106 +#define PCI_CLASS_STORAGE_SATA_AHCI 0x010601 +#define PCI_CLASS_STORAGE_SAS 0x0107 +#define PCI_CLASS_STORAGE_OTHER 0x0180 + +#define PCI_BASE_CLASS_NETWORK 0x02 +#define PCI_CLASS_NETWORK_ETHERNET 0x0200 +#define PCI_CLASS_NETWORK_TOKEN_RING 0x0201 +#define PCI_CLASS_NETWORK_FDDI 0x0202 +#define PCI_CLASS_NETWORK_ATM 0x0203 +#define PCI_CLASS_NETWORK_OTHER 0x0280 + +#define PCI_BASE_CLASS_DISPLAY 0x03 +#define PCI_CLASS_DISPLAY_VGA 0x0300 +#define PCI_CLASS_DISPLAY_XGA 0x0301 +#define PCI_CLASS_DISPLAY_3D 0x0302 +#define PCI_CLASS_DISPLAY_OTHER 0x0380 + +#define PCI_BASE_CLASS_MULTIMEDIA 0x04 +#define PCI_CLASS_MULTIMEDIA_VIDEO 0x0400 +#define PCI_CLASS_MULTIMEDIA_AUDIO 0x0401 +#define PCI_CLASS_MULTIMEDIA_PHONE 0x0402 +#define PCI_CLASS_MULTIMEDIA_OTHER 0x0480 + +#define PCI_BASE_CLASS_MEMORY 0x05 +#define PCI_CLASS_MEMORY_RAM 0x0500 +#define PCI_CLASS_MEMORY_FLASH 0x0501 +#define PCI_CLASS_MEMORY_OTHER 0x0580 + +#define PCI_BASE_CLASS_BRIDGE 0x06 +#define PCI_CLASS_BRIDGE_HOST 0x0600 +#define PCI_CLASS_BRIDGE_ISA 0x0601 +#define PCI_CLASS_BRIDGE_EISA 0x0602 +#define PCI_CLASS_BRIDGE_MC 0x0603 +#define PCI_CLASS_BRIDGE_PCI 0x0604 +#define PCI_CLASS_BRIDGE_PCMCIA 0x0605 +#define PCI_CLASS_BRIDGE_NUBUS 0x0606 +#define PCI_CLASS_BRIDGE_CARDBUS 0x0607 +#define PCI_CLASS_BRIDGE_RACEWAY 0x0608 +#define PCI_CLASS_BRIDGE_OTHER 0x0680 + +#define PCI_BASE_CLASS_COMMUNICATION 0x07 +#define PCI_CLASS_COMMUNICATION_SERIAL 0x0700 +#define PCI_CLASS_COMMUNICATION_PARALLEL 0x0701 +#define PCI_CLASS_COMMUNICATION_MULTISERIAL 0x0702 +#define PCI_CLASS_COMMUNICATION_MODEM 0x0703 +#define PCI_CLASS_COMMUNICATION_OTHER 0x0780 + +#define PCI_BASE_CLASS_SYSTEM 0x08 +#define PCI_CLASS_SYSTEM_PIC 0x0800 +#define PCI_CLASS_SYSTEM_PIC_IOAPIC 0x080010 +#define PCI_CLASS_SYSTEM_PIC_IOXAPIC 0x080020 +#define PCI_CLASS_SYSTEM_DMA 0x0801 +#define PCI_CLASS_SYSTEM_TIMER 0x0802 +#define PCI_CLASS_SYSTEM_RTC 0x0803 +#define PCI_CLASS_SYSTEM_PCI_HOTPLUG 0x0804 +#define PCI_CLASS_SYSTEM_SDHCI 0x0805 +#define PCI_CLASS_SYSTEM_OTHER 0x0880 + +#define PCI_BASE_CLASS_INPUT 0x09 +#define PCI_CLASS_INPUT_KEYBOARD 0x0900 +#define PCI_CLASS_INPUT_PEN 0x0901 +#define PCI_CLASS_INPUT_MOUSE 0x0902 +#define PCI_CLASS_INPUT_SCANNER 0x0903 +#define PCI_CLASS_INPUT_GAMEPORT 0x0904 +#define PCI_CLASS_INPUT_OTHER 0x0980 + +#define PCI_BASE_CLASS_DOCKING 0x0a +#define PCI_CLASS_DOCKING_GENERIC 0x0a00 +#define PCI_CLASS_DOCKING_OTHER 0x0a80 + +#define PCI_BASE_CLASS_PROCESSOR 0x0b +#define PCI_CLASS_PROCESSOR_386 0x0b00 +#define PCI_CLASS_PROCESSOR_486 0x0b01 +#define PCI_CLASS_PROCESSOR_PENTIUM 0x0b02 +#define PCI_CLASS_PROCESSOR_ALPHA 0x0b10 +#define PCI_CLASS_PROCESSOR_POWERPC 0x0b20 +#define PCI_CLASS_PROCESSOR_MIPS 0x0b30 +#define PCI_CLASS_PROCESSOR_CO 0x0b40 + +#define PCI_BASE_CLASS_SERIAL 0x0c +#define PCI_CLASS_SERIAL_FIREWIRE 0x0c00 +#define PCI_CLASS_SERIAL_FIREWIRE_OHCI 0x0c0010 +#define PCI_CLASS_SERIAL_ACCESS 0x0c01 +#define PCI_CLASS_SERIAL_SSA 0x0c02 +#define PCI_CLASS_SERIAL_USB 0x0c03 +#define PCI_CLASS_SERIAL_USB_UHCI 0x0c0300 +#define PCI_CLASS_SERIAL_USB_OHCI 0x0c0310 +#define PCI_CLASS_SERIAL_USB_EHCI 0x0c0320 +#define PCI_CLASS_SERIAL_USB_XHCI 0x0c0330 +#define PCI_CLASS_SERIAL_FIBER 0x0c04 +#define PCI_CLASS_SERIAL_SMBUS 0x0c05 + +#define PCI_BASE_CLASS_WIRELESS 0x0d +#define PCI_CLASS_WIRELESS_RF_CONTROLLER 0x0d10 +#define PCI_CLASS_WIRELESS_WHCI 0x0d1010 + +#define PCI_BASE_CLASS_INTELLIGENT 0x0e +#define PCI_CLASS_INTELLIGENT_I2O 0x0e00 + +#define PCI_BASE_CLASS_SATELLITE 0x0f +#define PCI_CLASS_SATELLITE_TV 0x0f00 +#define PCI_CLASS_SATELLITE_AUDIO 0x0f01 +#define PCI_CLASS_SATELLITE_VOICE 0x0f03 +#define PCI_CLASS_SATELLITE_DATA 0x0f04 + +#define PCI_BASE_CLASS_CRYPT 0x10 +#define PCI_CLASS_CRYPT_NETWORK 0x1000 +#define PCI_CLASS_CRYPT_ENTERTAINMENT 0x1001 +#define PCI_CLASS_CRYPT_OTHER 0x1080 + +#define PCI_BASE_CLASS_SIGNAL_PROCESSING 0x11 +#define PCI_CLASS_SP_DPIO 0x1100 +#define PCI_CLASS_SP_OTHER 0x1180 + +#define PCI_CLASS_OTHERS 0xff + +/* Vendors and devices. Sort key: vendor first, device next. */ + +#define PCI_VENDOR_ID_TTTECH 0x0357 +#define PCI_DEVICE_ID_TTTECH_MC322 0x000a + +#define PCI_VENDOR_ID_DYNALINK 0x0675 +#define PCI_DEVICE_ID_DYNALINK_IS64PH 0x1702 + +#define PCI_VENDOR_ID_BERKOM 0x0871 +#define PCI_DEVICE_ID_BERKOM_A1T 0xffa1 +#define PCI_DEVICE_ID_BERKOM_T_CONCEPT 0xffa2 +#define PCI_DEVICE_ID_BERKOM_A4T 0xffa4 +#define PCI_DEVICE_ID_BERKOM_SCITEL_QUADRO 0xffa8 + +#define PCI_VENDOR_ID_COMPAQ 0x0e11 +#define PCI_DEVICE_ID_COMPAQ_TOKENRING 0x0508 +#define PCI_DEVICE_ID_COMPAQ_TACHYON 0xa0fc +#define PCI_DEVICE_ID_COMPAQ_SMART2P 0xae10 +#define PCI_DEVICE_ID_COMPAQ_NETEL100 0xae32 +#define PCI_DEVICE_ID_COMPAQ_NETEL10 0xae34 +#define PCI_DEVICE_ID_COMPAQ_TRIFLEX_IDE 0xae33 +#define PCI_DEVICE_ID_COMPAQ_NETFLEX3I 0xae35 +#define PCI_DEVICE_ID_COMPAQ_NETEL100D 0xae40 +#define PCI_DEVICE_ID_COMPAQ_NETEL100PI 0xae43 +#define PCI_DEVICE_ID_COMPAQ_NETEL100I 0xb011 +#define PCI_DEVICE_ID_COMPAQ_CISS 0xb060 +#define PCI_DEVICE_ID_COMPAQ_CISSB 0xb178 +#define PCI_DEVICE_ID_COMPAQ_CISSC 0x46 +#define PCI_DEVICE_ID_COMPAQ_THUNDER 0xf130 +#define PCI_DEVICE_ID_COMPAQ_NETFLEX3B 0xf150 + +#define PCI_VENDOR_ID_NCR 0x1000 +#define PCI_VENDOR_ID_LSI_LOGIC 0x1000 +#define PCI_DEVICE_ID_NCR_53C810 0x0001 +#define PCI_DEVICE_ID_NCR_53C820 0x0002 +#define PCI_DEVICE_ID_NCR_53C825 0x0003 +#define PCI_DEVICE_ID_NCR_53C815 0x0004 +#define PCI_DEVICE_ID_LSI_53C810AP 0x0005 +#define PCI_DEVICE_ID_NCR_53C860 0x0006 +#define PCI_DEVICE_ID_LSI_53C1510 0x000a +#define PCI_DEVICE_ID_NCR_53C896 0x000b +#define PCI_DEVICE_ID_NCR_53C895 0x000c +#define PCI_DEVICE_ID_NCR_53C885 0x000d +#define PCI_DEVICE_ID_NCR_53C875 0x000f +#define PCI_DEVICE_ID_NCR_53C1510 0x0010 +#define PCI_DEVICE_ID_LSI_53C895A 0x0012 +#define PCI_DEVICE_ID_LSI_53C875A 0x0013 +#define PCI_DEVICE_ID_LSI_53C1010_33 0x0020 +#define PCI_DEVICE_ID_LSI_53C1010_66 0x0021 +#define PCI_DEVICE_ID_LSI_53C1030 0x0030 +#define PCI_DEVICE_ID_LSI_1030_53C1035 0x0032 +#define PCI_DEVICE_ID_LSI_53C1035 0x0040 +#define PCI_DEVICE_ID_NCR_53C875J 0x008f +#define PCI_DEVICE_ID_LSI_FC909 0x0621 +#define PCI_DEVICE_ID_LSI_FC929 0x0622 +#define PCI_DEVICE_ID_LSI_FC929_LAN 0x0623 +#define PCI_DEVICE_ID_LSI_FC919 0x0624 +#define PCI_DEVICE_ID_LSI_FC919_LAN 0x0625 +#define PCI_DEVICE_ID_LSI_FC929X 0x0626 +#define PCI_DEVICE_ID_LSI_FC939X 0x0642 +#define PCI_DEVICE_ID_LSI_FC949X 0x0640 +#define PCI_DEVICE_ID_LSI_FC949ES 0x0646 +#define PCI_DEVICE_ID_LSI_FC919X 0x0628 +#define PCI_DEVICE_ID_NCR_YELLOWFIN 0x0701 +#define PCI_DEVICE_ID_LSI_61C102 0x0901 +#define PCI_DEVICE_ID_LSI_63C815 0x1000 +#define PCI_DEVICE_ID_LSI_SAS1064 0x0050 +#define PCI_DEVICE_ID_LSI_SAS1064R 0x0411 +#define PCI_DEVICE_ID_LSI_SAS1066 0x005E +#define PCI_DEVICE_ID_LSI_SAS1068 0x0054 +#define PCI_DEVICE_ID_LSI_SAS1064A 0x005C +#define PCI_DEVICE_ID_LSI_SAS1064E 0x0056 +#define PCI_DEVICE_ID_LSI_SAS1066E 0x005A +#define PCI_DEVICE_ID_LSI_SAS1068E 0x0058 +#define PCI_DEVICE_ID_LSI_SAS1078 0x0060 + +#define PCI_VENDOR_ID_ATI 0x1002 +/* Mach64 */ +#define PCI_DEVICE_ID_ATI_68800 0x4158 +#define PCI_DEVICE_ID_ATI_215CT222 0x4354 +#define PCI_DEVICE_ID_ATI_210888CX 0x4358 +#define PCI_DEVICE_ID_ATI_215ET222 0x4554 +/* Mach64 / Rage */ +#define PCI_DEVICE_ID_ATI_215GB 0x4742 +#define PCI_DEVICE_ID_ATI_215GD 0x4744 +#define PCI_DEVICE_ID_ATI_215GI 0x4749 +#define PCI_DEVICE_ID_ATI_215GP 0x4750 +#define PCI_DEVICE_ID_ATI_215GQ 0x4751 +#define PCI_DEVICE_ID_ATI_215XL 0x4752 +#define PCI_DEVICE_ID_ATI_215GT 0x4754 +#define PCI_DEVICE_ID_ATI_215GTB 0x4755 +#define PCI_DEVICE_ID_ATI_215_IV 0x4756 +#define PCI_DEVICE_ID_ATI_215_IW 0x4757 +#define PCI_DEVICE_ID_ATI_215_IZ 0x475A +#define PCI_DEVICE_ID_ATI_210888GX 0x4758 +#define PCI_DEVICE_ID_ATI_215_LB 0x4c42 +#define PCI_DEVICE_ID_ATI_215_LD 0x4c44 +#define PCI_DEVICE_ID_ATI_215_LG 0x4c47 +#define PCI_DEVICE_ID_ATI_215_LI 0x4c49 +#define PCI_DEVICE_ID_ATI_215_LM 0x4c4D +#define PCI_DEVICE_ID_ATI_215_LN 0x4c4E +#define PCI_DEVICE_ID_ATI_215_LR 0x4c52 +#define PCI_DEVICE_ID_ATI_215_LS 0x4c53 +#define PCI_DEVICE_ID_ATI_264_LT 0x4c54 +/* Mach64 VT */ +#define PCI_DEVICE_ID_ATI_264VT 0x5654 +#define PCI_DEVICE_ID_ATI_264VU 0x5655 +#define PCI_DEVICE_ID_ATI_264VV 0x5656 +/* Rage128 GL */ +#define PCI_DEVICE_ID_ATI_RAGE128_RE 0x5245 +#define PCI_DEVICE_ID_ATI_RAGE128_RF 0x5246 +#define PCI_DEVICE_ID_ATI_RAGE128_RG 0x5247 +/* Rage128 VR */ +#define PCI_DEVICE_ID_ATI_RAGE128_RK 0x524b +#define PCI_DEVICE_ID_ATI_RAGE128_RL 0x524c +#define PCI_DEVICE_ID_ATI_RAGE128_SE 0x5345 +#define PCI_DEVICE_ID_ATI_RAGE128_SF 0x5346 +#define PCI_DEVICE_ID_ATI_RAGE128_SG 0x5347 +#define PCI_DEVICE_ID_ATI_RAGE128_SH 0x5348 +#define PCI_DEVICE_ID_ATI_RAGE128_SK 0x534b +#define PCI_DEVICE_ID_ATI_RAGE128_SL 0x534c +#define PCI_DEVICE_ID_ATI_RAGE128_SM 0x534d +#define PCI_DEVICE_ID_ATI_RAGE128_SN 0x534e +/* Rage128 Ultra */ +#define PCI_DEVICE_ID_ATI_RAGE128_TF 0x5446 +#define PCI_DEVICE_ID_ATI_RAGE128_TL 0x544c +#define PCI_DEVICE_ID_ATI_RAGE128_TR 0x5452 +#define PCI_DEVICE_ID_ATI_RAGE128_TS 0x5453 +#define PCI_DEVICE_ID_ATI_RAGE128_TT 0x5454 +#define PCI_DEVICE_ID_ATI_RAGE128_TU 0x5455 +/* Rage128 M3 */ +#define PCI_DEVICE_ID_ATI_RAGE128_LE 0x4c45 +#define PCI_DEVICE_ID_ATI_RAGE128_LF 0x4c46 +/* Rage128 M4 */ +#define PCI_DEVICE_ID_ATI_RAGE128_MF 0x4d46 +#define PCI_DEVICE_ID_ATI_RAGE128_ML 0x4d4c +/* Rage128 Pro GL */ +#define PCI_DEVICE_ID_ATI_RAGE128_PA 0x5041 +#define PCI_DEVICE_ID_ATI_RAGE128_PB 0x5042 +#define PCI_DEVICE_ID_ATI_RAGE128_PC 0x5043 +#define PCI_DEVICE_ID_ATI_RAGE128_PD 0x5044 +#define PCI_DEVICE_ID_ATI_RAGE128_PE 0x5045 +#define PCI_DEVICE_ID_ATI_RAGE128_PF 0x5046 +/* Rage128 Pro VR */ +#define PCI_DEVICE_ID_ATI_RAGE128_PG 0x5047 +#define PCI_DEVICE_ID_ATI_RAGE128_PH 0x5048 +#define PCI_DEVICE_ID_ATI_RAGE128_PI 0x5049 +#define PCI_DEVICE_ID_ATI_RAGE128_PJ 0x504A +#define PCI_DEVICE_ID_ATI_RAGE128_PK 0x504B +#define PCI_DEVICE_ID_ATI_RAGE128_PL 0x504C +#define PCI_DEVICE_ID_ATI_RAGE128_PM 0x504D +#define PCI_DEVICE_ID_ATI_RAGE128_PN 0x504E +#define PCI_DEVICE_ID_ATI_RAGE128_PO 0x504F +#define PCI_DEVICE_ID_ATI_RAGE128_PP 0x5050 +#define PCI_DEVICE_ID_ATI_RAGE128_PQ 0x5051 +#define PCI_DEVICE_ID_ATI_RAGE128_PR 0x5052 +#define PCI_DEVICE_ID_ATI_RAGE128_PS 0x5053 +#define PCI_DEVICE_ID_ATI_RAGE128_PT 0x5054 +#define PCI_DEVICE_ID_ATI_RAGE128_PU 0x5055 +#define PCI_DEVICE_ID_ATI_RAGE128_PV 0x5056 +#define PCI_DEVICE_ID_ATI_RAGE128_PW 0x5057 +#define PCI_DEVICE_ID_ATI_RAGE128_PX 0x5058 +/* Rage128 M4 */ +/* Radeon R100 */ +#define PCI_DEVICE_ID_ATI_RADEON_QD 0x5144 +#define PCI_DEVICE_ID_ATI_RADEON_QE 0x5145 +#define PCI_DEVICE_ID_ATI_RADEON_QF 0x5146 +#define PCI_DEVICE_ID_ATI_RADEON_QG 0x5147 +/* Radeon RV100 (VE) */ +#define PCI_DEVICE_ID_ATI_RADEON_QY 0x5159 +#define PCI_DEVICE_ID_ATI_RADEON_QZ 0x515a +/* Radeon R200 (8500) */ +#define PCI_DEVICE_ID_ATI_RADEON_QL 0x514c +#define PCI_DEVICE_ID_ATI_RADEON_QN 0x514e +#define PCI_DEVICE_ID_ATI_RADEON_QO 0x514f +#define PCI_DEVICE_ID_ATI_RADEON_Ql 0x516c +#define PCI_DEVICE_ID_ATI_RADEON_BB 0x4242 +/* Radeon R200 (9100) */ +#define PCI_DEVICE_ID_ATI_RADEON_QM 0x514d +/* Radeon RV200 (7500) */ +#define PCI_DEVICE_ID_ATI_RADEON_QW 0x5157 +#define PCI_DEVICE_ID_ATI_RADEON_QX 0x5158 +/* Radeon NV-100 */ +/* Radeon RV250 (9000) */ +#define PCI_DEVICE_ID_ATI_RADEON_Id 0x4964 +#define PCI_DEVICE_ID_ATI_RADEON_Ie 0x4965 +#define PCI_DEVICE_ID_ATI_RADEON_If 0x4966 +#define PCI_DEVICE_ID_ATI_RADEON_Ig 0x4967 +/* Radeon RV280 (9200) */ +#define PCI_DEVICE_ID_ATI_RADEON_Ya 0x5961 +#define PCI_DEVICE_ID_ATI_RADEON_Yd 0x5964 +/* Radeon R300 (9500) */ +/* Radeon R300 (9700) */ +#define PCI_DEVICE_ID_ATI_RADEON_ND 0x4e44 +#define PCI_DEVICE_ID_ATI_RADEON_NE 0x4e45 +#define PCI_DEVICE_ID_ATI_RADEON_NF 0x4e46 +#define PCI_DEVICE_ID_ATI_RADEON_NG 0x4e47 +/* Radeon R350 (9800) */ +/* Radeon RV350 (9600) */ +/* Radeon M6 */ +#define PCI_DEVICE_ID_ATI_RADEON_LY 0x4c59 +#define PCI_DEVICE_ID_ATI_RADEON_LZ 0x4c5a +/* Radeon M7 */ +#define PCI_DEVICE_ID_ATI_RADEON_LW 0x4c57 +#define PCI_DEVICE_ID_ATI_RADEON_LX 0x4c58 +/* Radeon M9 */ +#define PCI_DEVICE_ID_ATI_RADEON_Ld 0x4c64 +#define PCI_DEVICE_ID_ATI_RADEON_Le 0x4c65 +#define PCI_DEVICE_ID_ATI_RADEON_Lf 0x4c66 +#define PCI_DEVICE_ID_ATI_RADEON_Lg 0x4c67 +/* Radeon */ +/* RadeonIGP */ +#define PCI_DEVICE_ID_ATI_RS100 0xcab0 +#define PCI_DEVICE_ID_ATI_RS200 0xcab2 +#define PCI_DEVICE_ID_ATI_RS200_B 0xcbb2 +#define PCI_DEVICE_ID_ATI_RS250 0xcab3 +#define PCI_DEVICE_ID_ATI_RS300_100 0x5830 +#define PCI_DEVICE_ID_ATI_RS300_133 0x5831 +#define PCI_DEVICE_ID_ATI_RS300_166 0x5832 +#define PCI_DEVICE_ID_ATI_RS300_200 0x5833 +#define PCI_DEVICE_ID_ATI_RS350_100 0x7830 +#define PCI_DEVICE_ID_ATI_RS350_133 0x7831 +#define PCI_DEVICE_ID_ATI_RS350_166 0x7832 +#define PCI_DEVICE_ID_ATI_RS350_200 0x7833 +#define PCI_DEVICE_ID_ATI_RS400_100 0x5a30 +#define PCI_DEVICE_ID_ATI_RS400_133 0x5a31 +#define PCI_DEVICE_ID_ATI_RS400_166 0x5a32 +#define PCI_DEVICE_ID_ATI_RS400_200 0x5a33 +#define PCI_DEVICE_ID_ATI_RS480 0x5950 +/* ATI IXP Chipset */ +#define PCI_DEVICE_ID_ATI_IXP200_IDE 0x4349 +#define PCI_DEVICE_ID_ATI_IXP200_SMBUS 0x4353 +#define PCI_DEVICE_ID_ATI_IXP300_SMBUS 0x4363 +#define PCI_DEVICE_ID_ATI_IXP300_IDE 0x4369 +#define PCI_DEVICE_ID_ATI_IXP300_SATA 0x436e +#define PCI_DEVICE_ID_ATI_IXP400_SMBUS 0x4372 +#define PCI_DEVICE_ID_ATI_IXP400_IDE 0x4376 +#define PCI_DEVICE_ID_ATI_IXP400_SATA 0x4379 +#define PCI_DEVICE_ID_ATI_IXP400_SATA2 0x437a +#define PCI_DEVICE_ID_ATI_IXP600_SATA 0x4380 +#define PCI_DEVICE_ID_ATI_SBX00_SMBUS 0x4385 +#define PCI_DEVICE_ID_ATI_IXP600_IDE 0x438c +#define PCI_DEVICE_ID_ATI_IXP700_SATA 0x4390 +#define PCI_DEVICE_ID_ATI_IXP700_IDE 0x439c + +#define PCI_VENDOR_ID_VLSI 0x1004 +#define PCI_DEVICE_ID_VLSI_82C592 0x0005 +#define PCI_DEVICE_ID_VLSI_82C593 0x0006 +#define PCI_DEVICE_ID_VLSI_82C594 0x0007 +#define PCI_DEVICE_ID_VLSI_82C597 0x0009 +#define PCI_DEVICE_ID_VLSI_82C541 0x000c +#define PCI_DEVICE_ID_VLSI_82C543 0x000d +#define PCI_DEVICE_ID_VLSI_82C532 0x0101 +#define PCI_DEVICE_ID_VLSI_82C534 0x0102 +#define PCI_DEVICE_ID_VLSI_82C535 0x0104 +#define PCI_DEVICE_ID_VLSI_82C147 0x0105 +#define PCI_DEVICE_ID_VLSI_VAS96011 0x0702 + +#define PCI_VENDOR_ID_ADL 0x1005 +#define PCI_DEVICE_ID_ADL_2301 0x2301 + +#define PCI_VENDOR_ID_NS 0x100b +#define PCI_DEVICE_ID_NS_87415 0x0002 +#define PCI_DEVICE_ID_NS_87560_LIO 0x000e +#define PCI_DEVICE_ID_NS_87560_USB 0x0012 +#define PCI_DEVICE_ID_NS_83815 0x0020 +#define PCI_DEVICE_ID_NS_83820 0x0022 +#define PCI_DEVICE_ID_NS_CS5535_ISA 0x002b +#define PCI_DEVICE_ID_NS_CS5535_IDE 0x002d +#define PCI_DEVICE_ID_NS_CS5535_AUDIO 0x002e +#define PCI_DEVICE_ID_NS_CS5535_USB 0x002f +#define PCI_DEVICE_ID_NS_GX_VIDEO 0x0030 +#define PCI_DEVICE_ID_NS_SATURN 0x0035 +#define PCI_DEVICE_ID_NS_SCx200_BRIDGE 0x0500 +#define PCI_DEVICE_ID_NS_SCx200_SMI 0x0501 +#define PCI_DEVICE_ID_NS_SCx200_IDE 0x0502 +#define PCI_DEVICE_ID_NS_SCx200_AUDIO 0x0503 +#define PCI_DEVICE_ID_NS_SCx200_VIDEO 0x0504 +#define PCI_DEVICE_ID_NS_SCx200_XBUS 0x0505 +#define PCI_DEVICE_ID_NS_SC1100_BRIDGE 0x0510 +#define PCI_DEVICE_ID_NS_SC1100_SMI 0x0511 +#define PCI_DEVICE_ID_NS_SC1100_XBUS 0x0515 +#define PCI_DEVICE_ID_NS_87410 0xd001 + +#define PCI_DEVICE_ID_NS_GX_HOST_BRIDGE 0x0028 + +#define PCI_VENDOR_ID_TSENG 0x100c +#define PCI_DEVICE_ID_TSENG_W32P_2 0x3202 +#define PCI_DEVICE_ID_TSENG_W32P_b 0x3205 +#define PCI_DEVICE_ID_TSENG_W32P_c 0x3206 +#define PCI_DEVICE_ID_TSENG_W32P_d 0x3207 +#define PCI_DEVICE_ID_TSENG_ET6000 0x3208 + +#define PCI_VENDOR_ID_WEITEK 0x100e +#define PCI_DEVICE_ID_WEITEK_P9000 0x9001 +#define PCI_DEVICE_ID_WEITEK_P9100 0x9100 + +#define PCI_VENDOR_ID_DEC 0x1011 +#define PCI_DEVICE_ID_DEC_BRD 0x0001 +#define PCI_DEVICE_ID_DEC_TULIP 0x0002 +#define PCI_DEVICE_ID_DEC_TGA 0x0004 +#define PCI_DEVICE_ID_DEC_TULIP_FAST 0x0009 +#define PCI_DEVICE_ID_DEC_TGA2 0x000D +#define PCI_DEVICE_ID_DEC_FDDI 0x000F +#define PCI_DEVICE_ID_DEC_TULIP_PLUS 0x0014 +#define PCI_DEVICE_ID_DEC_21142 0x0019 +#define PCI_DEVICE_ID_DEC_21052 0x0021 +#define PCI_DEVICE_ID_DEC_21150 0x0022 +#define PCI_DEVICE_ID_DEC_21152 0x0024 +#define PCI_DEVICE_ID_DEC_21153 0x0025 +#define PCI_DEVICE_ID_DEC_21154 0x0026 +#define PCI_DEVICE_ID_DEC_21285 0x1065 +#define PCI_DEVICE_ID_COMPAQ_42XX 0x0046 + +#define PCI_VENDOR_ID_CIRRUS 0x1013 +#define PCI_DEVICE_ID_CIRRUS_7548 0x0038 +#define PCI_DEVICE_ID_CIRRUS_5430 0x00a0 +#define PCI_DEVICE_ID_CIRRUS_5434_4 0x00a4 +#define PCI_DEVICE_ID_CIRRUS_5434_8 0x00a8 +#define PCI_DEVICE_ID_CIRRUS_5436 0x00ac +#define PCI_DEVICE_ID_CIRRUS_5446 0x00b8 +#define PCI_DEVICE_ID_CIRRUS_5480 0x00bc +#define PCI_DEVICE_ID_CIRRUS_5462 0x00d0 +#define PCI_DEVICE_ID_CIRRUS_5464 0x00d4 +#define PCI_DEVICE_ID_CIRRUS_5465 0x00d6 +#define PCI_DEVICE_ID_CIRRUS_6729 0x1100 +#define PCI_DEVICE_ID_CIRRUS_6832 0x1110 +#define PCI_DEVICE_ID_CIRRUS_7543 0x1202 +#define PCI_DEVICE_ID_CIRRUS_4610 0x6001 +#define PCI_DEVICE_ID_CIRRUS_4612 0x6003 +#define PCI_DEVICE_ID_CIRRUS_4615 0x6004 + +#define PCI_VENDOR_ID_IBM 0x1014 +#define PCI_DEVICE_ID_IBM_TR 0x0018 +#define PCI_DEVICE_ID_IBM_TR_WAKE 0x003e +#define PCI_DEVICE_ID_IBM_CPC710_PCI64 0x00fc +#define PCI_DEVICE_ID_IBM_SNIPE 0x0180 +#define PCI_DEVICE_ID_IBM_CITRINE 0x028C +#define PCI_DEVICE_ID_IBM_GEMSTONE 0xB166 +#define PCI_DEVICE_ID_IBM_OBSIDIAN 0x02BD +#define PCI_DEVICE_ID_IBM_ICOM_DEV_ID_1 0x0031 +#define PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2 0x0219 +#define PCI_DEVICE_ID_IBM_ICOM_V2_TWO_PORTS_RVX 0x021A +#define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM 0x0251 +#define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM_PCIE 0x0361 +#define PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL 0x252 + +#define PCI_SUBVENDOR_ID_IBM 0x1014 +#define PCI_SUBDEVICE_ID_IBM_SATURN_SERIAL_ONE_PORT 0x03d4 + +#define PCI_VENDOR_ID_UNISYS 0x1018 +#define PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR 0x001C + +#define PCI_VENDOR_ID_COMPEX2 0x101a /* pci.ids says "AT&T GIS (NCR)" */ +#define PCI_DEVICE_ID_COMPEX2_100VG 0x0005 + +#define PCI_VENDOR_ID_WD 0x101c +#define PCI_DEVICE_ID_WD_90C 0xc24a + +#define PCI_VENDOR_ID_AMI 0x101e +#define PCI_DEVICE_ID_AMI_MEGARAID3 0x1960 +#define PCI_DEVICE_ID_AMI_MEGARAID 0x9010 +#define PCI_DEVICE_ID_AMI_MEGARAID2 0x9060 + +#define PCI_VENDOR_ID_AMD 0x1022 +#define PCI_DEVICE_ID_AMD_K8_NB 0x1100 +#define PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP 0x1101 +#define PCI_DEVICE_ID_AMD_K8_NB_MEMCTL 0x1102 +#define PCI_DEVICE_ID_AMD_K8_NB_MISC 0x1103 +#define PCI_DEVICE_ID_AMD_10H_NB_HT 0x1200 +#define PCI_DEVICE_ID_AMD_10H_NB_MAP 0x1201 +#define PCI_DEVICE_ID_AMD_10H_NB_DRAM 0x1202 +#define PCI_DEVICE_ID_AMD_10H_NB_MISC 0x1203 +#define PCI_DEVICE_ID_AMD_10H_NB_LINK 0x1204 +#define PCI_DEVICE_ID_AMD_11H_NB_HT 0x1300 +#define PCI_DEVICE_ID_AMD_11H_NB_MAP 0x1301 +#define PCI_DEVICE_ID_AMD_11H_NB_DRAM 0x1302 +#define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303 +#define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304 +#define PCI_DEVICE_ID_AMD_LANCE 0x2000 +#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 +#define PCI_DEVICE_ID_AMD_SCSI 0x2020 +#define PCI_DEVICE_ID_AMD_SERENADE 0x36c0 +#define PCI_DEVICE_ID_AMD_FE_GATE_7006 0x7006 +#define PCI_DEVICE_ID_AMD_FE_GATE_7007 0x7007 +#define PCI_DEVICE_ID_AMD_FE_GATE_700C 0x700C +#define PCI_DEVICE_ID_AMD_FE_GATE_700E 0x700E +#define PCI_DEVICE_ID_AMD_COBRA_7401 0x7401 +#define PCI_DEVICE_ID_AMD_VIPER_7409 0x7409 +#define PCI_DEVICE_ID_AMD_VIPER_740B 0x740B +#define PCI_DEVICE_ID_AMD_VIPER_7410 0x7410 +#define PCI_DEVICE_ID_AMD_VIPER_7411 0x7411 +#define PCI_DEVICE_ID_AMD_VIPER_7413 0x7413 +#define PCI_DEVICE_ID_AMD_VIPER_7440 0x7440 +#define PCI_DEVICE_ID_AMD_OPUS_7441 0x7441 +#define PCI_DEVICE_ID_AMD_OPUS_7443 0x7443 +#define PCI_DEVICE_ID_AMD_VIPER_7443 0x7443 +#define PCI_DEVICE_ID_AMD_OPUS_7445 0x7445 +#define PCI_DEVICE_ID_AMD_8111_PCI 0x7460 +#define PCI_DEVICE_ID_AMD_8111_LPC 0x7468 +#define PCI_DEVICE_ID_AMD_8111_IDE 0x7469 +#define PCI_DEVICE_ID_AMD_8111_SMBUS2 0x746a +#define PCI_DEVICE_ID_AMD_8111_SMBUS 0x746b +#define PCI_DEVICE_ID_AMD_8111_AUDIO 0x746d +#define PCI_DEVICE_ID_AMD_8151_0 0x7454 +#define PCI_DEVICE_ID_AMD_8131_BRIDGE 0x7450 +#define PCI_DEVICE_ID_AMD_8131_APIC 0x7451 +#define PCI_DEVICE_ID_AMD_8132_BRIDGE 0x7458 +#define PCI_DEVICE_ID_AMD_HUDSON2_SMBUS 0x780b +#define PCI_DEVICE_ID_AMD_CS5535_IDE 0x208F +#define PCI_DEVICE_ID_AMD_CS5536_ISA 0x2090 +#define PCI_DEVICE_ID_AMD_CS5536_FLASH 0x2091 +#define PCI_DEVICE_ID_AMD_CS5536_AUDIO 0x2093 +#define PCI_DEVICE_ID_AMD_CS5536_OHC 0x2094 +#define PCI_DEVICE_ID_AMD_CS5536_EHC 0x2095 +#define PCI_DEVICE_ID_AMD_CS5536_UDC 0x2096 +#define PCI_DEVICE_ID_AMD_CS5536_UOC 0x2097 +#define PCI_DEVICE_ID_AMD_CS5536_IDE 0x209A +#define PCI_DEVICE_ID_AMD_LX_VIDEO 0x2081 +#define PCI_DEVICE_ID_AMD_LX_AES 0x2082 +#define PCI_DEVICE_ID_AMD_HUDSON2_IDE 0x780c +#define PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE 0x7800 + +#define PCI_VENDOR_ID_TRIDENT 0x1023 +#define PCI_DEVICE_ID_TRIDENT_4DWAVE_DX 0x2000 +#define PCI_DEVICE_ID_TRIDENT_4DWAVE_NX 0x2001 +#define PCI_DEVICE_ID_TRIDENT_9320 0x9320 +#define PCI_DEVICE_ID_TRIDENT_9388 0x9388 +#define PCI_DEVICE_ID_TRIDENT_9397 0x9397 +#define PCI_DEVICE_ID_TRIDENT_939A 0x939A +#define PCI_DEVICE_ID_TRIDENT_9520 0x9520 +#define PCI_DEVICE_ID_TRIDENT_9525 0x9525 +#define PCI_DEVICE_ID_TRIDENT_9420 0x9420 +#define PCI_DEVICE_ID_TRIDENT_9440 0x9440 +#define PCI_DEVICE_ID_TRIDENT_9660 0x9660 +#define PCI_DEVICE_ID_TRIDENT_9750 0x9750 +#define PCI_DEVICE_ID_TRIDENT_9850 0x9850 +#define PCI_DEVICE_ID_TRIDENT_9880 0x9880 +#define PCI_DEVICE_ID_TRIDENT_8400 0x8400 +#define PCI_DEVICE_ID_TRIDENT_8420 0x8420 +#define PCI_DEVICE_ID_TRIDENT_8500 0x8500 + +#define PCI_VENDOR_ID_AI 0x1025 +#define PCI_DEVICE_ID_AI_M1435 0x1435 + +#define PCI_VENDOR_ID_DELL 0x1028 +#define PCI_DEVICE_ID_DELL_RACIII 0x0008 +#define PCI_DEVICE_ID_DELL_RAC4 0x0012 +#define PCI_DEVICE_ID_DELL_PERC5 0x0015 + +#define PCI_VENDOR_ID_MATROX 0x102B +#define PCI_DEVICE_ID_MATROX_MGA_2 0x0518 +#define PCI_DEVICE_ID_MATROX_MIL 0x0519 +#define PCI_DEVICE_ID_MATROX_MYS 0x051A +#define PCI_DEVICE_ID_MATROX_MIL_2 0x051b +#define PCI_DEVICE_ID_MATROX_MYS_AGP 0x051e +#define PCI_DEVICE_ID_MATROX_MIL_2_AGP 0x051f +#define PCI_DEVICE_ID_MATROX_MGA_IMP 0x0d10 +#define PCI_DEVICE_ID_MATROX_G100_MM 0x1000 +#define PCI_DEVICE_ID_MATROX_G100_AGP 0x1001 +#define PCI_DEVICE_ID_MATROX_G200_PCI 0x0520 +#define PCI_DEVICE_ID_MATROX_G200_AGP 0x0521 +#define PCI_DEVICE_ID_MATROX_G400 0x0525 +#define PCI_DEVICE_ID_MATROX_G200EV_PCI 0x0530 +#define PCI_DEVICE_ID_MATROX_G550 0x2527 +#define PCI_DEVICE_ID_MATROX_VIA 0x4536 + +#define PCI_VENDOR_ID_CT 0x102c +#define PCI_DEVICE_ID_CT_69000 0x00c0 +#define PCI_DEVICE_ID_CT_65545 0x00d8 +#define PCI_DEVICE_ID_CT_65548 0x00dc +#define PCI_DEVICE_ID_CT_65550 0x00e0 +#define PCI_DEVICE_ID_CT_65554 0x00e4 +#define PCI_DEVICE_ID_CT_65555 0x00e5 + +#define PCI_VENDOR_ID_MIRO 0x1031 +#define PCI_DEVICE_ID_MIRO_36050 0x5601 +#define PCI_DEVICE_ID_MIRO_DC10PLUS 0x7efe +#define PCI_DEVICE_ID_MIRO_DC30PLUS 0xd801 + +#define PCI_VENDOR_ID_NEC 0x1033 +#define PCI_DEVICE_ID_NEC_CBUS_1 0x0001 /* PCI-Cbus Bridge */ +#define PCI_DEVICE_ID_NEC_LOCAL 0x0002 /* Local Bridge */ +#define PCI_DEVICE_ID_NEC_ATM 0x0003 /* ATM LAN Controller */ +#define PCI_DEVICE_ID_NEC_R4000 0x0004 /* R4000 Bridge */ +#define PCI_DEVICE_ID_NEC_486 0x0005 /* 486 Like Peripheral Bus Bridge */ +#define PCI_DEVICE_ID_NEC_ACCEL_1 0x0006 /* Graphic Accelerator */ +#define PCI_DEVICE_ID_NEC_UXBUS 0x0007 /* UX-Bus Bridge */ +#define PCI_DEVICE_ID_NEC_ACCEL_2 0x0008 /* Graphic Accelerator */ +#define PCI_DEVICE_ID_NEC_GRAPH 0x0009 /* PCI-CoreGraph Bridge */ +#define PCI_DEVICE_ID_NEC_VL 0x0016 /* PCI-VL Bridge */ +#define PCI_DEVICE_ID_NEC_STARALPHA2 0x002c /* STAR ALPHA2 */ +#define PCI_DEVICE_ID_NEC_CBUS_2 0x002d /* PCI-Cbus Bridge */ +#define PCI_DEVICE_ID_NEC_USB 0x0035 /* PCI-USB Host */ +#define PCI_DEVICE_ID_NEC_CBUS_3 0x003b +#define PCI_DEVICE_ID_NEC_NAPCCARD 0x003e +#define PCI_DEVICE_ID_NEC_PCX2 0x0046 /* PowerVR */ +#define PCI_DEVICE_ID_NEC_VRC5476 0x009b +#define PCI_DEVICE_ID_NEC_VRC4173 0x00a5 +#define PCI_DEVICE_ID_NEC_VRC5477_AC97 0x00a6 +#define PCI_DEVICE_ID_NEC_PC9821CS01 0x800c /* PC-9821-CS01 */ +#define PCI_DEVICE_ID_NEC_PC9821NRB06 0x800d /* PC-9821NR-B06 */ + +#define PCI_VENDOR_ID_FD 0x1036 +#define PCI_DEVICE_ID_FD_36C70 0x0000 + +#define PCI_VENDOR_ID_SI 0x1039 +#define PCI_DEVICE_ID_SI_5591_AGP 0x0001 +#define PCI_DEVICE_ID_SI_6202 0x0002 +#define PCI_DEVICE_ID_SI_503 0x0008 +#define PCI_DEVICE_ID_SI_ACPI 0x0009 +#define PCI_DEVICE_ID_SI_SMBUS 0x0016 +#define PCI_DEVICE_ID_SI_LPC 0x0018 +#define PCI_DEVICE_ID_SI_5597_VGA 0x0200 +#define PCI_DEVICE_ID_SI_6205 0x0205 +#define PCI_DEVICE_ID_SI_501 0x0406 +#define PCI_DEVICE_ID_SI_496 0x0496 +#define PCI_DEVICE_ID_SI_300 0x0300 +#define PCI_DEVICE_ID_SI_315H 0x0310 +#define PCI_DEVICE_ID_SI_315 0x0315 +#define PCI_DEVICE_ID_SI_315PRO 0x0325 +#define PCI_DEVICE_ID_SI_530 0x0530 +#define PCI_DEVICE_ID_SI_540 0x0540 +#define PCI_DEVICE_ID_SI_550 0x0550 +#define PCI_DEVICE_ID_SI_540_VGA 0x5300 +#define PCI_DEVICE_ID_SI_550_VGA 0x5315 +#define PCI_DEVICE_ID_SI_620 0x0620 +#define PCI_DEVICE_ID_SI_630 0x0630 +#define PCI_DEVICE_ID_SI_633 0x0633 +#define PCI_DEVICE_ID_SI_635 0x0635 +#define PCI_DEVICE_ID_SI_640 0x0640 +#define PCI_DEVICE_ID_SI_645 0x0645 +#define PCI_DEVICE_ID_SI_646 0x0646 +#define PCI_DEVICE_ID_SI_648 0x0648 +#define PCI_DEVICE_ID_SI_650 0x0650 +#define PCI_DEVICE_ID_SI_651 0x0651 +#define PCI_DEVICE_ID_SI_655 0x0655 +#define PCI_DEVICE_ID_SI_661 0x0661 +#define PCI_DEVICE_ID_SI_730 0x0730 +#define PCI_DEVICE_ID_SI_733 0x0733 +#define PCI_DEVICE_ID_SI_630_VGA 0x6300 +#define PCI_DEVICE_ID_SI_735 0x0735 +#define PCI_DEVICE_ID_SI_740 0x0740 +#define PCI_DEVICE_ID_SI_741 0x0741 +#define PCI_DEVICE_ID_SI_745 0x0745 +#define PCI_DEVICE_ID_SI_746 0x0746 +#define PCI_DEVICE_ID_SI_755 0x0755 +#define PCI_DEVICE_ID_SI_760 0x0760 +#define PCI_DEVICE_ID_SI_900 0x0900 +#define PCI_DEVICE_ID_SI_961 0x0961 +#define PCI_DEVICE_ID_SI_962 0x0962 +#define PCI_DEVICE_ID_SI_963 0x0963 +#define PCI_DEVICE_ID_SI_965 0x0965 +#define PCI_DEVICE_ID_SI_966 0x0966 +#define PCI_DEVICE_ID_SI_968 0x0968 +#define PCI_DEVICE_ID_SI_1180 0x1180 +#define PCI_DEVICE_ID_SI_5511 0x5511 +#define PCI_DEVICE_ID_SI_5513 0x5513 +#define PCI_DEVICE_ID_SI_5517 0x5517 +#define PCI_DEVICE_ID_SI_5518 0x5518 +#define PCI_DEVICE_ID_SI_5571 0x5571 +#define PCI_DEVICE_ID_SI_5581 0x5581 +#define PCI_DEVICE_ID_SI_5582 0x5582 +#define PCI_DEVICE_ID_SI_5591 0x5591 +#define PCI_DEVICE_ID_SI_5596 0x5596 +#define PCI_DEVICE_ID_SI_5597 0x5597 +#define PCI_DEVICE_ID_SI_5598 0x5598 +#define PCI_DEVICE_ID_SI_5600 0x5600 +#define PCI_DEVICE_ID_SI_7012 0x7012 +#define PCI_DEVICE_ID_SI_7013 0x7013 +#define PCI_DEVICE_ID_SI_7016 0x7016 +#define PCI_DEVICE_ID_SI_7018 0x7018 + +#define PCI_VENDOR_ID_HP 0x103c +#define PCI_DEVICE_ID_HP_VISUALIZE_EG 0x1005 +#define PCI_DEVICE_ID_HP_VISUALIZE_FX6 0x1006 +#define PCI_DEVICE_ID_HP_VISUALIZE_FX4 0x1008 +#define PCI_DEVICE_ID_HP_VISUALIZE_FX2 0x100a +#define PCI_DEVICE_ID_HP_TACHYON 0x1028 +#define PCI_DEVICE_ID_HP_TACHLITE 0x1029 +#define PCI_DEVICE_ID_HP_J2585A 0x1030 +#define PCI_DEVICE_ID_HP_J2585B 0x1031 +#define PCI_DEVICE_ID_HP_J2973A 0x1040 +#define PCI_DEVICE_ID_HP_J2970A 0x1042 +#define PCI_DEVICE_ID_HP_DIVA 0x1048 +#define PCI_DEVICE_ID_HP_DIVA_TOSCA1 0x1049 +#define PCI_DEVICE_ID_HP_DIVA_TOSCA2 0x104A +#define PCI_DEVICE_ID_HP_DIVA_MAESTRO 0x104B +#define PCI_DEVICE_ID_HP_REO_IOC 0x10f1 +#define PCI_DEVICE_ID_HP_VISUALIZE_FXE 0x108b +#define PCI_DEVICE_ID_HP_DIVA_HALFDOME 0x1223 +#define PCI_DEVICE_ID_HP_DIVA_KEYSTONE 0x1226 +#define PCI_DEVICE_ID_HP_DIVA_POWERBAR 0x1227 +#define PCI_DEVICE_ID_HP_ZX1_IOC 0x122a +#define PCI_DEVICE_ID_HP_PCIX_LBA 0x122e +#define PCI_DEVICE_ID_HP_SX1000_IOC 0x127c +#define PCI_DEVICE_ID_HP_DIVA_EVEREST 0x1282 +#define PCI_DEVICE_ID_HP_DIVA_AUX 0x1290 +#define PCI_DEVICE_ID_HP_DIVA_RMP3 0x1301 +#define PCI_DEVICE_ID_HP_DIVA_HURRICANE 0x132a +#define PCI_DEVICE_ID_HP_CISSA 0x3220 +#define PCI_DEVICE_ID_HP_CISSC 0x3230 +#define PCI_DEVICE_ID_HP_CISSD 0x3238 +#define PCI_DEVICE_ID_HP_CISSE 0x323a +#define PCI_DEVICE_ID_HP_ZX2_IOC 0x4031 + +#define PCI_VENDOR_ID_PCTECH 0x1042 +#define PCI_DEVICE_ID_PCTECH_RZ1000 0x1000 +#define PCI_DEVICE_ID_PCTECH_RZ1001 0x1001 +#define PCI_DEVICE_ID_PCTECH_SAMURAI_IDE 0x3020 + +#define PCI_VENDOR_ID_ASUSTEK 0x1043 +#define PCI_DEVICE_ID_ASUSTEK_0675 0x0675 + +#define PCI_VENDOR_ID_DPT 0x1044 +#define PCI_DEVICE_ID_DPT 0xa400 + +#define PCI_VENDOR_ID_OPTI 0x1045 +#define PCI_DEVICE_ID_OPTI_82C558 0xc558 +#define PCI_DEVICE_ID_OPTI_82C621 0xc621 +#define PCI_DEVICE_ID_OPTI_82C700 0xc700 +#define PCI_DEVICE_ID_OPTI_82C825 0xd568 + +#define PCI_VENDOR_ID_ELSA 0x1048 +#define PCI_DEVICE_ID_ELSA_MICROLINK 0x1000 +#define PCI_DEVICE_ID_ELSA_QS3000 0x3000 + +#define PCI_VENDOR_ID_BUSLOGIC 0x104B +#define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC 0x0140 +#define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER 0x1040 +#define PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT 0x8130 + +#define PCI_VENDOR_ID_TI 0x104c +#define PCI_DEVICE_ID_TI_TVP4020 0x3d07 +#define PCI_DEVICE_ID_TI_4450 0x8011 +#define PCI_DEVICE_ID_TI_XX21_XX11 0x8031 +#define PCI_DEVICE_ID_TI_XX21_XX11_FM 0x8033 +#define PCI_DEVICE_ID_TI_XX21_XX11_SD 0x8034 +#define PCI_DEVICE_ID_TI_X515 0x8036 +#define PCI_DEVICE_ID_TI_XX12 0x8039 +#define PCI_DEVICE_ID_TI_XX12_FM 0x803b +#define PCI_DEVICE_ID_TI_XIO2000A 0x8231 +#define PCI_DEVICE_ID_TI_1130 0xac12 +#define PCI_DEVICE_ID_TI_1031 0xac13 +#define PCI_DEVICE_ID_TI_1131 0xac15 +#define PCI_DEVICE_ID_TI_1250 0xac16 +#define PCI_DEVICE_ID_TI_1220 0xac17 +#define PCI_DEVICE_ID_TI_1221 0xac19 +#define PCI_DEVICE_ID_TI_1210 0xac1a +#define PCI_DEVICE_ID_TI_1450 0xac1b +#define PCI_DEVICE_ID_TI_1225 0xac1c +#define PCI_DEVICE_ID_TI_1251A 0xac1d +#define PCI_DEVICE_ID_TI_1211 0xac1e +#define PCI_DEVICE_ID_TI_1251B 0xac1f +#define PCI_DEVICE_ID_TI_4410 0xac41 +#define PCI_DEVICE_ID_TI_4451 0xac42 +#define PCI_DEVICE_ID_TI_4510 0xac44 +#define PCI_DEVICE_ID_TI_4520 0xac46 +#define PCI_DEVICE_ID_TI_7510 0xac47 +#define PCI_DEVICE_ID_TI_7610 0xac48 +#define PCI_DEVICE_ID_TI_7410 0xac49 +#define PCI_DEVICE_ID_TI_1410 0xac50 +#define PCI_DEVICE_ID_TI_1420 0xac51 +#define PCI_DEVICE_ID_TI_1451A 0xac52 +#define PCI_DEVICE_ID_TI_1620 0xac54 +#define PCI_DEVICE_ID_TI_1520 0xac55 +#define PCI_DEVICE_ID_TI_1510 0xac56 +#define PCI_DEVICE_ID_TI_X620 0xac8d +#define PCI_DEVICE_ID_TI_X420 0xac8e +#define PCI_DEVICE_ID_TI_XX20_FM 0xac8f + +#define PCI_VENDOR_ID_SONY 0x104d + +/* Winbond have two vendor IDs! See 0x10ad as well */ +#define PCI_VENDOR_ID_WINBOND2 0x1050 +#define PCI_DEVICE_ID_WINBOND2_89C940F 0x5a5a +#define PCI_DEVICE_ID_WINBOND2_6692 0x6692 + +#define PCI_VENDOR_ID_ANIGMA 0x1051 +#define PCI_DEVICE_ID_ANIGMA_MC145575 0x0100 + +#define PCI_VENDOR_ID_EFAR 0x1055 +#define PCI_DEVICE_ID_EFAR_SLC90E66_1 0x9130 +#define PCI_DEVICE_ID_EFAR_SLC90E66_3 0x9463 + +#define PCI_VENDOR_ID_MOTOROLA 0x1057 +#define PCI_DEVICE_ID_MOTOROLA_MPC105 0x0001 +#define PCI_DEVICE_ID_MOTOROLA_MPC106 0x0002 +#define PCI_DEVICE_ID_MOTOROLA_MPC107 0x0004 +#define PCI_DEVICE_ID_MOTOROLA_RAVEN 0x4801 +#define PCI_DEVICE_ID_MOTOROLA_FALCON 0x4802 +#define PCI_DEVICE_ID_MOTOROLA_HAWK 0x4803 +#define PCI_DEVICE_ID_MOTOROLA_HARRIER 0x480b +#define PCI_DEVICE_ID_MOTOROLA_MPC5200 0x5803 +#define PCI_DEVICE_ID_MOTOROLA_MPC5200B 0x5809 + +#define PCI_VENDOR_ID_PROMISE 0x105a +#define PCI_DEVICE_ID_PROMISE_20265 0x0d30 +#define PCI_DEVICE_ID_PROMISE_20267 0x4d30 +#define PCI_DEVICE_ID_PROMISE_20246 0x4d33 +#define PCI_DEVICE_ID_PROMISE_20262 0x4d38 +#define PCI_DEVICE_ID_PROMISE_20263 0x0D38 +#define PCI_DEVICE_ID_PROMISE_20268 0x4d68 +#define PCI_DEVICE_ID_PROMISE_20269 0x4d69 +#define PCI_DEVICE_ID_PROMISE_20270 0x6268 +#define PCI_DEVICE_ID_PROMISE_20271 0x6269 +#define PCI_DEVICE_ID_PROMISE_20275 0x1275 +#define PCI_DEVICE_ID_PROMISE_20276 0x5275 +#define PCI_DEVICE_ID_PROMISE_20277 0x7275 + +#define PCI_VENDOR_ID_FOXCONN 0x105b + +#define PCI_VENDOR_ID_UMC 0x1060 +#define PCI_DEVICE_ID_UMC_UM8673F 0x0101 +#define PCI_DEVICE_ID_UMC_UM8886BF 0x673a +#define PCI_DEVICE_ID_UMC_UM8886A 0x886a + +#define PCI_VENDOR_ID_PICOPOWER 0x1066 +#define PCI_DEVICE_ID_PICOPOWER_PT86C523 0x0002 +#define PCI_DEVICE_ID_PICOPOWER_PT86C523BBP 0x8002 + +#define PCI_VENDOR_ID_MYLEX 0x1069 +#define PCI_DEVICE_ID_MYLEX_DAC960_P 0x0001 +#define PCI_DEVICE_ID_MYLEX_DAC960_PD 0x0002 +#define PCI_DEVICE_ID_MYLEX_DAC960_PG 0x0010 +#define PCI_DEVICE_ID_MYLEX_DAC960_LA 0x0020 +#define PCI_DEVICE_ID_MYLEX_DAC960_LP 0x0050 +#define PCI_DEVICE_ID_MYLEX_DAC960_BA 0xBA56 +#define PCI_DEVICE_ID_MYLEX_DAC960_GEM 0xB166 + +#define PCI_VENDOR_ID_APPLE 0x106b +#define PCI_DEVICE_ID_APPLE_BANDIT 0x0001 +#define PCI_DEVICE_ID_APPLE_HYDRA 0x000e +#define PCI_DEVICE_ID_APPLE_UNI_N_FW 0x0018 +#define PCI_DEVICE_ID_APPLE_UNI_N_AGP 0x0020 +#define PCI_DEVICE_ID_APPLE_UNI_N_GMAC 0x0021 +#define PCI_DEVICE_ID_APPLE_UNI_N_GMACP 0x0024 +#define PCI_DEVICE_ID_APPLE_UNI_N_AGP_P 0x0027 +#define PCI_DEVICE_ID_APPLE_UNI_N_AGP15 0x002d +#define PCI_DEVICE_ID_APPLE_UNI_N_PCI15 0x002e +#define PCI_DEVICE_ID_APPLE_UNI_N_GMAC2 0x0032 +#define PCI_DEVICE_ID_APPLE_UNI_N_ATA 0x0033 +#define PCI_DEVICE_ID_APPLE_UNI_N_AGP2 0x0034 +#define PCI_DEVICE_ID_APPLE_IPID_ATA100 0x003b +#define PCI_DEVICE_ID_APPLE_K2_ATA100 0x0043 +#define PCI_DEVICE_ID_APPLE_U3_AGP 0x004b +#define PCI_DEVICE_ID_APPLE_K2_GMAC 0x004c +#define PCI_DEVICE_ID_APPLE_SH_ATA 0x0050 +#define PCI_DEVICE_ID_APPLE_SH_SUNGEM 0x0051 +#define PCI_DEVICE_ID_APPLE_U3L_AGP 0x0058 +#define PCI_DEVICE_ID_APPLE_U3H_AGP 0x0059 +#define PCI_DEVICE_ID_APPLE_U4_PCIE 0x005b +#define PCI_DEVICE_ID_APPLE_IPID2_AGP 0x0066 +#define PCI_DEVICE_ID_APPLE_IPID2_ATA 0x0069 +#define PCI_DEVICE_ID_APPLE_IPID2_FW 0x006a +#define PCI_DEVICE_ID_APPLE_IPID2_GMAC 0x006b +#define PCI_DEVICE_ID_APPLE_TIGON3 0x1645 + +#define PCI_VENDOR_ID_YAMAHA 0x1073 +#define PCI_DEVICE_ID_YAMAHA_724 0x0004 +#define PCI_DEVICE_ID_YAMAHA_724F 0x000d +#define PCI_DEVICE_ID_YAMAHA_740 0x000a +#define PCI_DEVICE_ID_YAMAHA_740C 0x000c +#define PCI_DEVICE_ID_YAMAHA_744 0x0010 +#define PCI_DEVICE_ID_YAMAHA_754 0x0012 + +#define PCI_VENDOR_ID_QLOGIC 0x1077 +#define PCI_DEVICE_ID_QLOGIC_ISP10160 0x1016 +#define PCI_DEVICE_ID_QLOGIC_ISP1020 0x1020 +#define PCI_DEVICE_ID_QLOGIC_ISP1080 0x1080 +#define PCI_DEVICE_ID_QLOGIC_ISP12160 0x1216 +#define PCI_DEVICE_ID_QLOGIC_ISP1240 0x1240 +#define PCI_DEVICE_ID_QLOGIC_ISP1280 0x1280 +#define PCI_DEVICE_ID_QLOGIC_ISP2100 0x2100 +#define PCI_DEVICE_ID_QLOGIC_ISP2200 0x2200 +#define PCI_DEVICE_ID_QLOGIC_ISP2300 0x2300 +#define PCI_DEVICE_ID_QLOGIC_ISP2312 0x2312 +#define PCI_DEVICE_ID_QLOGIC_ISP2322 0x2322 +#define PCI_DEVICE_ID_QLOGIC_ISP6312 0x6312 +#define PCI_DEVICE_ID_QLOGIC_ISP6322 0x6322 +#define PCI_DEVICE_ID_QLOGIC_ISP2422 0x2422 +#define PCI_DEVICE_ID_QLOGIC_ISP2432 0x2432 +#define PCI_DEVICE_ID_QLOGIC_ISP2512 0x2512 +#define PCI_DEVICE_ID_QLOGIC_ISP2522 0x2522 +#define PCI_DEVICE_ID_QLOGIC_ISP5422 0x5422 +#define PCI_DEVICE_ID_QLOGIC_ISP5432 0x5432 + +#define PCI_VENDOR_ID_CYRIX 0x1078 +#define PCI_DEVICE_ID_CYRIX_5510 0x0000 +#define PCI_DEVICE_ID_CYRIX_PCI_MASTER 0x0001 +#define PCI_DEVICE_ID_CYRIX_5520 0x0002 +#define PCI_DEVICE_ID_CYRIX_5530_LEGACY 0x0100 +#define PCI_DEVICE_ID_CYRIX_5530_IDE 0x0102 +#define PCI_DEVICE_ID_CYRIX_5530_AUDIO 0x0103 +#define PCI_DEVICE_ID_CYRIX_5530_VIDEO 0x0104 + +#define PCI_VENDOR_ID_CONTAQ 0x1080 +#define PCI_DEVICE_ID_CONTAQ_82C693 0xc693 + +#define PCI_VENDOR_ID_OLICOM 0x108d +#define PCI_DEVICE_ID_OLICOM_OC2325 0x0012 +#define PCI_DEVICE_ID_OLICOM_OC2183 0x0013 +#define PCI_DEVICE_ID_OLICOM_OC2326 0x0014 + +#define PCI_VENDOR_ID_SUN 0x108e +#define PCI_DEVICE_ID_SUN_EBUS 0x1000 +#define PCI_DEVICE_ID_SUN_HAPPYMEAL 0x1001 +#define PCI_DEVICE_ID_SUN_RIO_EBUS 0x1100 +#define PCI_DEVICE_ID_SUN_RIO_GEM 0x1101 +#define PCI_DEVICE_ID_SUN_RIO_1394 0x1102 +#define PCI_DEVICE_ID_SUN_RIO_USB 0x1103 +#define PCI_DEVICE_ID_SUN_GEM 0x2bad +#define PCI_DEVICE_ID_SUN_SIMBA 0x5000 +#define PCI_DEVICE_ID_SUN_PBM 0x8000 +#define PCI_DEVICE_ID_SUN_SCHIZO 0x8001 +#define PCI_DEVICE_ID_SUN_SABRE 0xa000 +#define PCI_DEVICE_ID_SUN_HUMMINGBIRD 0xa001 +#define PCI_DEVICE_ID_SUN_TOMATILLO 0xa801 +#define PCI_DEVICE_ID_SUN_CASSINI 0xabba + +#define PCI_VENDOR_ID_NI 0x1093 +#define PCI_DEVICE_ID_NI_PCI2322 0xd130 +#define PCI_DEVICE_ID_NI_PCI2324 0xd140 +#define PCI_DEVICE_ID_NI_PCI2328 0xd150 +#define PCI_DEVICE_ID_NI_PXI8422_2322 0xd190 +#define PCI_DEVICE_ID_NI_PXI8422_2324 0xd1a0 +#define PCI_DEVICE_ID_NI_PXI8420_2322 0xd1d0 +#define PCI_DEVICE_ID_NI_PXI8420_2324 0xd1e0 +#define PCI_DEVICE_ID_NI_PXI8420_2328 0xd1f0 +#define PCI_DEVICE_ID_NI_PXI8420_23216 0xd1f1 +#define PCI_DEVICE_ID_NI_PCI2322I 0xd250 +#define PCI_DEVICE_ID_NI_PCI2324I 0xd270 +#define PCI_DEVICE_ID_NI_PCI23216 0xd2b0 +#define PCI_DEVICE_ID_NI_PXI8430_2322 0x7080 +#define PCI_DEVICE_ID_NI_PCI8430_2322 0x70db +#define PCI_DEVICE_ID_NI_PXI8430_2324 0x70dd +#define PCI_DEVICE_ID_NI_PCI8430_2324 0x70df +#define PCI_DEVICE_ID_NI_PXI8430_2328 0x70e2 +#define PCI_DEVICE_ID_NI_PCI8430_2328 0x70e4 +#define PCI_DEVICE_ID_NI_PXI8430_23216 0x70e6 +#define PCI_DEVICE_ID_NI_PCI8430_23216 0x70e7 +#define PCI_DEVICE_ID_NI_PXI8432_2322 0x70e8 +#define PCI_DEVICE_ID_NI_PCI8432_2322 0x70ea +#define PCI_DEVICE_ID_NI_PXI8432_2324 0x70ec +#define PCI_DEVICE_ID_NI_PCI8432_2324 0x70ee + +#define PCI_VENDOR_ID_CMD 0x1095 +#define PCI_DEVICE_ID_CMD_643 0x0643 +#define PCI_DEVICE_ID_CMD_646 0x0646 +#define PCI_DEVICE_ID_CMD_648 0x0648 +#define PCI_DEVICE_ID_CMD_649 0x0649 + +#define PCI_DEVICE_ID_SII_680 0x0680 +#define PCI_DEVICE_ID_SII_3112 0x3112 +#define PCI_DEVICE_ID_SII_1210SA 0x0240 + +#define PCI_VENDOR_ID_BROOKTREE 0x109e +#define PCI_DEVICE_ID_BROOKTREE_878 0x0878 +#define PCI_DEVICE_ID_BROOKTREE_879 0x0879 + +#define PCI_VENDOR_ID_SGI 0x10a9 +#define PCI_DEVICE_ID_SGI_IOC3 0x0003 +#define PCI_DEVICE_ID_SGI_LITHIUM 0x1002 +#define PCI_DEVICE_ID_SGI_IOC4 0x100a + +#define PCI_VENDOR_ID_WINBOND 0x10ad +#define PCI_DEVICE_ID_WINBOND_82C105 0x0105 +#define PCI_DEVICE_ID_WINBOND_83C553 0x0565 + +#define PCI_VENDOR_ID_PLX 0x10b5 +#define PCI_DEVICE_ID_PLX_R685 0x1030 +#define PCI_DEVICE_ID_PLX_ROMULUS 0x106a +#define PCI_DEVICE_ID_PLX_SPCOM800 0x1076 +#define PCI_DEVICE_ID_PLX_1077 0x1077 +#define PCI_DEVICE_ID_PLX_SPCOM200 0x1103 +#define PCI_DEVICE_ID_PLX_DJINN_ITOO 0x1151 +#define PCI_DEVICE_ID_PLX_R753 0x1152 +#define PCI_DEVICE_ID_PLX_OLITEC 0x1187 +#define PCI_DEVICE_ID_PLX_PCI200SYN 0x3196 +#define PCI_DEVICE_ID_PLX_9030 0x9030 +#define PCI_DEVICE_ID_PLX_9050 0x9050 +#define PCI_DEVICE_ID_PLX_9056 0x9056 +#define PCI_DEVICE_ID_PLX_9080 0x9080 +#define PCI_DEVICE_ID_PLX_GTEK_SERIAL2 0xa001 + +#define PCI_VENDOR_ID_MADGE 0x10b6 +#define PCI_DEVICE_ID_MADGE_MK2 0x0002 + +#define PCI_VENDOR_ID_3COM 0x10b7 +#define PCI_DEVICE_ID_3COM_3C985 0x0001 +#define PCI_DEVICE_ID_3COM_3C940 0x1700 +#define PCI_DEVICE_ID_3COM_3C339 0x3390 +#define PCI_DEVICE_ID_3COM_3C359 0x3590 +#define PCI_DEVICE_ID_3COM_3C940B 0x80eb +#define PCI_DEVICE_ID_3COM_3CR990 0x9900 +#define PCI_DEVICE_ID_3COM_3CR990_TX_95 0x9902 +#define PCI_DEVICE_ID_3COM_3CR990_TX_97 0x9903 +#define PCI_DEVICE_ID_3COM_3CR990B 0x9904 +#define PCI_DEVICE_ID_3COM_3CR990_FX 0x9905 +#define PCI_DEVICE_ID_3COM_3CR990SVR95 0x9908 +#define PCI_DEVICE_ID_3COM_3CR990SVR97 0x9909 +#define PCI_DEVICE_ID_3COM_3CR990SVR 0x990a + +#define PCI_VENDOR_ID_AL 0x10b9 +#define PCI_DEVICE_ID_AL_M1533 0x1533 +#define PCI_DEVICE_ID_AL_M1535 0x1535 +#define PCI_DEVICE_ID_AL_M1541 0x1541 +#define PCI_DEVICE_ID_AL_M1563 0x1563 +#define PCI_DEVICE_ID_AL_M1621 0x1621 +#define PCI_DEVICE_ID_AL_M1631 0x1631 +#define PCI_DEVICE_ID_AL_M1632 0x1632 +#define PCI_DEVICE_ID_AL_M1641 0x1641 +#define PCI_DEVICE_ID_AL_M1644 0x1644 +#define PCI_DEVICE_ID_AL_M1647 0x1647 +#define PCI_DEVICE_ID_AL_M1651 0x1651 +#define PCI_DEVICE_ID_AL_M1671 0x1671 +#define PCI_DEVICE_ID_AL_M1681 0x1681 +#define PCI_DEVICE_ID_AL_M1683 0x1683 +#define PCI_DEVICE_ID_AL_M1689 0x1689 +#define PCI_DEVICE_ID_AL_M5219 0x5219 +#define PCI_DEVICE_ID_AL_M5228 0x5228 +#define PCI_DEVICE_ID_AL_M5229 0x5229 +#define PCI_DEVICE_ID_AL_M5451 0x5451 +#define PCI_DEVICE_ID_AL_M7101 0x7101 + +#define PCI_VENDOR_ID_NEOMAGIC 0x10c8 +#define PCI_DEVICE_ID_NEOMAGIC_NM256AV_AUDIO 0x8005 +#define PCI_DEVICE_ID_NEOMAGIC_NM256ZX_AUDIO 0x8006 +#define PCI_DEVICE_ID_NEOMAGIC_NM256XL_PLUS_AUDIO 0x8016 + +#define PCI_VENDOR_ID_TCONRAD 0x10da +#define PCI_DEVICE_ID_TCONRAD_TOKENRING 0x0508 + +#define PCI_VENDOR_ID_NVIDIA 0x10de +#define PCI_DEVICE_ID_NVIDIA_TNT 0x0020 +#define PCI_DEVICE_ID_NVIDIA_TNT2 0x0028 +#define PCI_DEVICE_ID_NVIDIA_UTNT2 0x0029 +#define PCI_DEVICE_ID_NVIDIA_TNT_UNKNOWN 0x002a +#define PCI_DEVICE_ID_NVIDIA_VTNT2 0x002C +#define PCI_DEVICE_ID_NVIDIA_UVTNT2 0x002D +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SMBUS 0x0034 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE 0x0035 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA 0x0036 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2 0x003e +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_ULTRA 0x0040 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800 0x0041 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_LE 0x0042 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_GT 0x0045 +#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_4000 0x004E +#define PCI_DEVICE_ID_NVIDIA_NFORCE4_SMBUS 0x0052 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE 0x0053 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA 0x0054 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2 0x0055 +#define PCI_DEVICE_ID_NVIDIA_CK804_AUDIO 0x0059 +#define PCI_DEVICE_ID_NVIDIA_CK804_PCIE 0x005d +#define PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS 0x0064 +#define PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE 0x0065 +#define PCI_DEVICE_ID_NVIDIA_MCP2_MODEM 0x0069 +#define PCI_DEVICE_ID_NVIDIA_MCP2_AUDIO 0x006a +#define PCI_DEVICE_ID_NVIDIA_NFORCE2S_SMBUS 0x0084 +#define PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE 0x0085 +#define PCI_DEVICE_ID_NVIDIA_MCP2S_MODEM 0x0089 +#define PCI_DEVICE_ID_NVIDIA_CK8_AUDIO 0x008a +#define PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA 0x008e +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_7800_GT 0x0090 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_7800_GTX 0x0091 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_7800 0x0098 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_7800_GTX 0x0099 +#define PCI_DEVICE_ID_NVIDIA_ITNT2 0x00A0 +#define PCI_DEVICE_ID_GEFORCE_6800A 0x00c1 +#define PCI_DEVICE_ID_GEFORCE_6800A_LE 0x00c2 +#define PCI_DEVICE_ID_GEFORCE_GO_6800 0x00c8 +#define PCI_DEVICE_ID_GEFORCE_GO_6800_ULTRA 0x00c9 +#define PCI_DEVICE_ID_QUADRO_FX_GO1400 0x00cc +#define PCI_DEVICE_ID_QUADRO_FX_1400 0x00ce +#define PCI_DEVICE_ID_NVIDIA_NFORCE3 0x00d1 +#define PCI_DEVICE_ID_NVIDIA_NFORCE3_SMBUS 0x00d4 +#define PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE 0x00d5 +#define PCI_DEVICE_ID_NVIDIA_MCP3_MODEM 0x00d9 +#define PCI_DEVICE_ID_NVIDIA_MCP3_AUDIO 0x00da +#define PCI_DEVICE_ID_NVIDIA_NFORCE3S 0x00e1 +#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA 0x00e3 +#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SMBUS 0x00e4 +#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE 0x00e5 +#define PCI_DEVICE_ID_NVIDIA_CK8S_AUDIO 0x00ea +#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2 0x00ee +#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_ALT1 0x00f0 +#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6600_ALT1 0x00f1 +#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6600_ALT2 0x00f2 +#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6200_ALT1 0x00f3 +#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_GT 0x00f9 +#define PCIE_DEVICE_ID_NVIDIA_QUADRO_NVS280 0x00fd +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_SDR 0x0100 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_DDR 0x0101 +#define PCI_DEVICE_ID_NVIDIA_QUADRO 0x0103 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_MX 0x0110 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_MX2 0x0111 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_GO 0x0112 +#define PCI_DEVICE_ID_NVIDIA_QUADRO2_MXR 0x0113 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6600_GT 0x0140 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6600 0x0141 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6610_XL 0x0145 +#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_540 0x014E +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6200 0x014F +#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_GTS 0x0150 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_GTS2 0x0151 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_ULTRA 0x0152 +#define PCI_DEVICE_ID_NVIDIA_QUADRO2_PRO 0x0153 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6200_TURBOCACHE 0x0161 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6200 0x0164 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6250 0x0166 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6200_1 0x0167 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6250_1 0x0168 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_460 0x0170 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440 0x0171 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_420 0x0172 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440_SE 0x0173 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_440_GO 0x0174 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_420_GO 0x0175 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_420_GO_M32 0x0176 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_460_GO 0x0177 +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_500XGL 0x0178 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_440_GO_M64 0x0179 +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_200 0x017A +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_550XGL 0x017B +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_500_GOGL 0x017C +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_410_GO_M16 0x017D +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440_8X 0x0181 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440SE_8X 0x0182 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_420_8X 0x0183 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_4000 0x0185 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_448_GO 0x0186 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_488_GO 0x0187 +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_580_XGL 0x0188 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_MAC 0x0189 +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_280_NVS 0x018A +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_380_XGL 0x018B +#define PCI_DEVICE_ID_NVIDIA_IGEFORCE2 0x01a0 +#define PCI_DEVICE_ID_NVIDIA_NFORCE 0x01a4 +#define PCI_DEVICE_ID_NVIDIA_MCP1_AUDIO 0x01b1 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_SMBUS 0x01b4 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_IDE 0x01bc +#define PCI_DEVICE_ID_NVIDIA_MCP1_MODEM 0x01c1 +#define PCI_DEVICE_ID_NVIDIA_NFORCE2 0x01e0 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE3 0x0200 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE3_1 0x0201 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE3_2 0x0202 +#define PCI_DEVICE_ID_NVIDIA_QUADRO_DDC 0x0203 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800B 0x0211 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800B_LE 0x0212 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800B_GT 0x0215 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4600 0x0250 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4400 0x0251 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4200 0x0253 +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_900XGL 0x0258 +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_750XGL 0x0259 +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_700XGL 0x025B +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS 0x0264 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE 0x0265 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA 0x0266 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2 0x0267 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS 0x0368 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE 0x036E +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA 0x037E +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2 0x037F +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800 0x0280 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800_8X 0x0281 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800SE 0x0282 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_4200_GO 0x0286 +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_980_XGL 0x0288 +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_780_XGL 0x0289 +#define PCI_DEVICE_ID_NVIDIA_QUADRO4_700_GOGL 0x028C +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5800_ULTRA 0x0301 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5800 0x0302 +#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_2000 0x0308 +#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1000 0x0309 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5600_ULTRA 0x0311 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5600 0x0312 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5600SE 0x0314 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5600 0x031A +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5650 0x031B +#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_GO700 0x031C +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200 0x0320 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200_ULTRA 0x0321 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200_1 0x0322 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200SE 0x0323 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5200 0x0324 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5250 0x0325 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5500 0x0326 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5100 0x0327 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5250_32 0x0328 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO_5200 0x0329 +#define PCI_DEVICE_ID_NVIDIA_QUADRO_NVS_280_PCI 0x032A +#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_500 0x032B +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5300 0x032C +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5100 0x032D +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900_ULTRA 0x0330 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900 0x0331 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900XT 0x0332 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5950_ULTRA 0x0333 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900ZT 0x0334 +#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_3000 0x0338 +#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_700 0x033F +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700_ULTRA 0x0341 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700 0x0342 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700LE 0x0343 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700VE 0x0344 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5700_1 0x0347 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5700_2 0x0348 +#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_GO1000 0x034C +#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1100 0x034E +#define PCI_DEVICE_ID_NVIDIA_NVENET_15 0x0373 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA 0x03E7 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SMBUS 0x03EB +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE 0x03EC +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2 0x03F6 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3 0x03F7 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_SMBUS 0x0446 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE 0x0448 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_SMBUS 0x0542 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE 0x0560 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE 0x056C +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP78S_SMBUS 0x0752 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE 0x0759 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_SMBUS 0x07D8 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP79_SMBUS 0x0AA2 + +#define PCI_VENDOR_ID_IMS 0x10e0 +#define PCI_DEVICE_ID_IMS_TT128 0x9128 +#define PCI_DEVICE_ID_IMS_TT3D 0x9135 + +#define PCI_VENDOR_ID_INTERG 0x10ea +#define PCI_DEVICE_ID_INTERG_1682 0x1682 +#define PCI_DEVICE_ID_INTERG_2000 0x2000 +#define PCI_DEVICE_ID_INTERG_2010 0x2010 +#define PCI_DEVICE_ID_INTERG_5000 0x5000 +#define PCI_DEVICE_ID_INTERG_5050 0x5050 + +#define PCI_VENDOR_ID_REALTEK 0x10ec +#define PCI_DEVICE_ID_REALTEK_8139 0x8139 + +#define PCI_VENDOR_ID_XILINX 0x10ee +#define PCI_DEVICE_ID_RME_DIGI96 0x3fc0 +#define PCI_DEVICE_ID_RME_DIGI96_8 0x3fc1 +#define PCI_DEVICE_ID_RME_DIGI96_8_PRO 0x3fc2 +#define PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST 0x3fc3 +#define PCI_DEVICE_ID_XILINX_HAMMERFALL_DSP 0x3fc5 +#define PCI_DEVICE_ID_XILINX_HAMMERFALL_DSP_MADI 0x3fc6 + +#define PCI_VENDOR_ID_INIT 0x1101 + +#define PCI_VENDOR_ID_CREATIVE 0x1102 /* duplicate: ECTIVA */ +#define PCI_DEVICE_ID_CREATIVE_EMU10K1 0x0002 +#define PCI_DEVICE_ID_CREATIVE_20K1 0x0005 +#define PCI_DEVICE_ID_CREATIVE_20K2 0x000b +#define PCI_SUBDEVICE_ID_CREATIVE_SB0760 0x0024 +#define PCI_SUBDEVICE_ID_CREATIVE_SB08801 0x0041 +#define PCI_SUBDEVICE_ID_CREATIVE_SB08802 0x0042 +#define PCI_SUBDEVICE_ID_CREATIVE_SB08803 0x0043 +#define PCI_SUBDEVICE_ID_CREATIVE_HENDRIX 0x6000 + +#define PCI_VENDOR_ID_ECTIVA 0x1102 /* duplicate: CREATIVE */ +#define PCI_DEVICE_ID_ECTIVA_EV1938 0x8938 + +#define PCI_VENDOR_ID_TTI 0x1103 +#define PCI_DEVICE_ID_TTI_HPT343 0x0003 +#define PCI_DEVICE_ID_TTI_HPT366 0x0004 +#define PCI_DEVICE_ID_TTI_HPT372 0x0005 +#define PCI_DEVICE_ID_TTI_HPT302 0x0006 +#define PCI_DEVICE_ID_TTI_HPT371 0x0007 +#define PCI_DEVICE_ID_TTI_HPT374 0x0008 +#define PCI_DEVICE_ID_TTI_HPT372N 0x0009 /* apparently a 372N variant? */ + +#define PCI_VENDOR_ID_VIA 0x1106 +#define PCI_DEVICE_ID_VIA_8763_0 0x0198 +#define PCI_DEVICE_ID_VIA_8380_0 0x0204 +#define PCI_DEVICE_ID_VIA_3238_0 0x0238 +#define PCI_DEVICE_ID_VIA_PT880 0x0258 +#define PCI_DEVICE_ID_VIA_PT880ULTRA 0x0308 +#define PCI_DEVICE_ID_VIA_PX8X0_0 0x0259 +#define PCI_DEVICE_ID_VIA_3269_0 0x0269 +#define PCI_DEVICE_ID_VIA_K8T800PRO_0 0x0282 +#define PCI_DEVICE_ID_VIA_3296_0 0x0296 +#define PCI_DEVICE_ID_VIA_8363_0 0x0305 +#define PCI_DEVICE_ID_VIA_P4M800CE 0x0314 +#define PCI_DEVICE_ID_VIA_P4M890 0x0327 +#define PCI_DEVICE_ID_VIA_VT3324 0x0324 +#define PCI_DEVICE_ID_VIA_VT3336 0x0336 +#define PCI_DEVICE_ID_VIA_VT3351 0x0351 +#define PCI_DEVICE_ID_VIA_VT3364 0x0364 +#define PCI_DEVICE_ID_VIA_8371_0 0x0391 +#define PCI_DEVICE_ID_VIA_6415 0x0415 +#define PCI_DEVICE_ID_VIA_8501_0 0x0501 +#define PCI_DEVICE_ID_VIA_82C561 0x0561 +#define PCI_DEVICE_ID_VIA_82C586_1 0x0571 +#define PCI_DEVICE_ID_VIA_82C576 0x0576 +#define PCI_DEVICE_ID_VIA_82C586_0 0x0586 +#define PCI_DEVICE_ID_VIA_82C596 0x0596 +#define PCI_DEVICE_ID_VIA_82C597_0 0x0597 +#define PCI_DEVICE_ID_VIA_82C598_0 0x0598 +#define PCI_DEVICE_ID_VIA_8601_0 0x0601 +#define PCI_DEVICE_ID_VIA_8605_0 0x0605 +#define PCI_DEVICE_ID_VIA_82C686 0x0686 +#define PCI_DEVICE_ID_VIA_82C691_0 0x0691 +#define PCI_DEVICE_ID_VIA_82C576_1 0x1571 +#define PCI_DEVICE_ID_VIA_82C586_2 0x3038 +#define PCI_DEVICE_ID_VIA_82C586_3 0x3040 +#define PCI_DEVICE_ID_VIA_82C596_3 0x3050 +#define PCI_DEVICE_ID_VIA_82C596B_3 0x3051 +#define PCI_DEVICE_ID_VIA_82C686_4 0x3057 +#define PCI_DEVICE_ID_VIA_82C686_5 0x3058 +#define PCI_DEVICE_ID_VIA_8233_5 0x3059 +#define PCI_DEVICE_ID_VIA_8233_0 0x3074 +#define PCI_DEVICE_ID_VIA_8633_0 0x3091 +#define PCI_DEVICE_ID_VIA_8367_0 0x3099 +#define PCI_DEVICE_ID_VIA_8653_0 0x3101 +#define PCI_DEVICE_ID_VIA_8622 0x3102 +#define PCI_DEVICE_ID_VIA_8235_USB_2 0x3104 +#define PCI_DEVICE_ID_VIA_8233C_0 0x3109 +#define PCI_DEVICE_ID_VIA_8361 0x3112 +#define PCI_DEVICE_ID_VIA_XM266 0x3116 +#define PCI_DEVICE_ID_VIA_612X 0x3119 +#define PCI_DEVICE_ID_VIA_862X_0 0x3123 +#define PCI_DEVICE_ID_VIA_8753_0 0x3128 +#define PCI_DEVICE_ID_VIA_8233A 0x3147 +#define PCI_DEVICE_ID_VIA_8703_51_0 0x3148 +#define PCI_DEVICE_ID_VIA_8237_SATA 0x3149 +#define PCI_DEVICE_ID_VIA_XN266 0x3156 +#define PCI_DEVICE_ID_VIA_6410 0x3164 +#define PCI_DEVICE_ID_VIA_8754C_0 0x3168 +#define PCI_DEVICE_ID_VIA_8235 0x3177 +#define PCI_DEVICE_ID_VIA_8385_0 0x3188 +#define PCI_DEVICE_ID_VIA_8377_0 0x3189 +#define PCI_DEVICE_ID_VIA_8378_0 0x3205 +#define PCI_DEVICE_ID_VIA_8783_0 0x3208 +#define PCI_DEVICE_ID_VIA_8237 0x3227 +#define PCI_DEVICE_ID_VIA_8251 0x3287 +#define PCI_DEVICE_ID_VIA_8261 0x3402 +#define PCI_DEVICE_ID_VIA_8237A 0x3337 +#define PCI_DEVICE_ID_VIA_8237S 0x3372 +#define PCI_DEVICE_ID_VIA_SATA_EIDE 0x5324 +#define PCI_DEVICE_ID_VIA_8231 0x8231 +#define PCI_DEVICE_ID_VIA_8231_4 0x8235 +#define PCI_DEVICE_ID_VIA_8365_1 0x8305 +#define PCI_DEVICE_ID_VIA_CX700 0x8324 +#define PCI_DEVICE_ID_VIA_CX700_IDE 0x0581 +#define PCI_DEVICE_ID_VIA_VX800 0x8353 +#define PCI_DEVICE_ID_VIA_VX855 0x8409 +#define PCI_DEVICE_ID_VIA_8371_1 0x8391 +#define PCI_DEVICE_ID_VIA_82C598_1 0x8598 +#define PCI_DEVICE_ID_VIA_838X_1 0xB188 +#define PCI_DEVICE_ID_VIA_83_87XX_1 0xB198 +#define PCI_DEVICE_ID_VIA_VX855_IDE 0xC409 +#define PCI_DEVICE_ID_VIA_ANON 0xFFFF + +#define PCI_VENDOR_ID_SIEMENS 0x110A +#define PCI_DEVICE_ID_SIEMENS_DSCC4 0x2102 + +#define PCI_VENDOR_ID_VORTEX 0x1119 +#define PCI_DEVICE_ID_VORTEX_GDT60x0 0x0000 +#define PCI_DEVICE_ID_VORTEX_GDT6000B 0x0001 +#define PCI_DEVICE_ID_VORTEX_GDT6x10 0x0002 +#define PCI_DEVICE_ID_VORTEX_GDT6x20 0x0003 +#define PCI_DEVICE_ID_VORTEX_GDT6530 0x0004 +#define PCI_DEVICE_ID_VORTEX_GDT6550 0x0005 +#define PCI_DEVICE_ID_VORTEX_GDT6x17 0x0006 +#define PCI_DEVICE_ID_VORTEX_GDT6x27 0x0007 +#define PCI_DEVICE_ID_VORTEX_GDT6537 0x0008 +#define PCI_DEVICE_ID_VORTEX_GDT6557 0x0009 +#define PCI_DEVICE_ID_VORTEX_GDT6x15 0x000a +#define PCI_DEVICE_ID_VORTEX_GDT6x25 0x000b +#define PCI_DEVICE_ID_VORTEX_GDT6535 0x000c +#define PCI_DEVICE_ID_VORTEX_GDT6555 0x000d +#define PCI_DEVICE_ID_VORTEX_GDT6x17RP 0x0100 +#define PCI_DEVICE_ID_VORTEX_GDT6x27RP 0x0101 +#define PCI_DEVICE_ID_VORTEX_GDT6537RP 0x0102 +#define PCI_DEVICE_ID_VORTEX_GDT6557RP 0x0103 +#define PCI_DEVICE_ID_VORTEX_GDT6x11RP 0x0104 +#define PCI_DEVICE_ID_VORTEX_GDT6x21RP 0x0105 + +#define PCI_VENDOR_ID_EF 0x111a +#define PCI_DEVICE_ID_EF_ATM_FPGA 0x0000 +#define PCI_DEVICE_ID_EF_ATM_ASIC 0x0002 +#define PCI_DEVICE_ID_EF_ATM_LANAI2 0x0003 +#define PCI_DEVICE_ID_EF_ATM_LANAIHB 0x0005 + +#define PCI_VENDOR_ID_IDT 0x111d +#define PCI_DEVICE_ID_IDT_IDT77201 0x0001 + +#define PCI_VENDOR_ID_FORE 0x1127 +#define PCI_DEVICE_ID_FORE_PCA200E 0x0300 + +#define PCI_VENDOR_ID_PHILIPS 0x1131 +#define PCI_DEVICE_ID_PHILIPS_SAA7146 0x7146 +#define PCI_DEVICE_ID_PHILIPS_SAA9730 0x9730 + +#define PCI_VENDOR_ID_EICON 0x1133 +#define PCI_DEVICE_ID_EICON_DIVA20 0xe002 +#define PCI_DEVICE_ID_EICON_DIVA20_U 0xe004 +#define PCI_DEVICE_ID_EICON_DIVA201 0xe005 +#define PCI_DEVICE_ID_EICON_DIVA202 0xe00b +#define PCI_DEVICE_ID_EICON_MAESTRA 0xe010 +#define PCI_DEVICE_ID_EICON_MAESTRAQ 0xe012 +#define PCI_DEVICE_ID_EICON_MAESTRAQ_U 0xe013 +#define PCI_DEVICE_ID_EICON_MAESTRAP 0xe014 + +#define PCI_VENDOR_ID_CISCO 0x1137 + +#define PCI_VENDOR_ID_ZIATECH 0x1138 +#define PCI_DEVICE_ID_ZIATECH_5550_HC 0x5550 + + +#define PCI_VENDOR_ID_SYSKONNECT 0x1148 +#define PCI_DEVICE_ID_SYSKONNECT_TR 0x4200 +#define PCI_DEVICE_ID_SYSKONNECT_GE 0x4300 +#define PCI_DEVICE_ID_SYSKONNECT_YU 0x4320 +#define PCI_DEVICE_ID_SYSKONNECT_9DXX 0x4400 +#define PCI_DEVICE_ID_SYSKONNECT_9MXX 0x4500 + +#define PCI_VENDOR_ID_DIGI 0x114f +#define PCI_DEVICE_ID_DIGI_DF_M_IOM2_E 0x0070 +#define PCI_DEVICE_ID_DIGI_DF_M_E 0x0071 +#define PCI_DEVICE_ID_DIGI_DF_M_IOM2_A 0x0072 +#define PCI_DEVICE_ID_DIGI_DF_M_A 0x0073 +#define PCI_DEVICE_ID_DIGI_NEO_8 0x00B1 +#define PCI_DEVICE_ID_NEO_2DB9 0x00C8 +#define PCI_DEVICE_ID_NEO_2DB9PRI 0x00C9 +#define PCI_DEVICE_ID_NEO_2RJ45 0x00CA +#define PCI_DEVICE_ID_NEO_2RJ45PRI 0x00CB +#define PCIE_DEVICE_ID_NEO_4_IBM 0x00F4 + +#define PCI_VENDOR_ID_XIRCOM 0x115d +#define PCI_DEVICE_ID_XIRCOM_RBM56G 0x0101 +#define PCI_DEVICE_ID_XIRCOM_X3201_MDM 0x0103 + +#define PCI_VENDOR_ID_SERVERWORKS 0x1166 +#define PCI_DEVICE_ID_SERVERWORKS_HE 0x0008 +#define PCI_DEVICE_ID_SERVERWORKS_LE 0x0009 +#define PCI_DEVICE_ID_SERVERWORKS_GCNB_LE 0x0017 +#define PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB 0x0036 +#define PCI_DEVICE_ID_SERVERWORKS_EPB 0x0103 +#define PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE 0x0132 +#define PCI_DEVICE_ID_SERVERWORKS_OSB4 0x0200 +#define PCI_DEVICE_ID_SERVERWORKS_CSB5 0x0201 +#define PCI_DEVICE_ID_SERVERWORKS_CSB6 0x0203 +#define PCI_DEVICE_ID_SERVERWORKS_HT1000SB 0x0205 +#define PCI_DEVICE_ID_SERVERWORKS_OSB4IDE 0x0211 +#define PCI_DEVICE_ID_SERVERWORKS_CSB5IDE 0x0212 +#define PCI_DEVICE_ID_SERVERWORKS_CSB6IDE 0x0213 +#define PCI_DEVICE_ID_SERVERWORKS_HT1000IDE 0x0214 +#define PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2 0x0217 +#define PCI_DEVICE_ID_SERVERWORKS_CSB6LPC 0x0227 +#define PCI_DEVICE_ID_SERVERWORKS_HT1100LD 0x0408 + +#define PCI_VENDOR_ID_SBE 0x1176 +#define PCI_DEVICE_ID_SBE_WANXL100 0x0301 +#define PCI_DEVICE_ID_SBE_WANXL200 0x0302 +#define PCI_DEVICE_ID_SBE_WANXL400 0x0104 + +#define PCI_VENDOR_ID_TOSHIBA 0x1179 +#define PCI_DEVICE_ID_TOSHIBA_PICCOLO_1 0x0101 +#define PCI_DEVICE_ID_TOSHIBA_PICCOLO_2 0x0102 +#define PCI_DEVICE_ID_TOSHIBA_PICCOLO_3 0x0103 +#define PCI_DEVICE_ID_TOSHIBA_PICCOLO_5 0x0105 +#define PCI_DEVICE_ID_TOSHIBA_TOPIC95 0x060a +#define PCI_DEVICE_ID_TOSHIBA_TOPIC97 0x060f +#define PCI_DEVICE_ID_TOSHIBA_TOPIC100 0x0617 + +#define PCI_VENDOR_ID_TOSHIBA_2 0x102f +#define PCI_DEVICE_ID_TOSHIBA_TC35815CF 0x0030 +#define PCI_DEVICE_ID_TOSHIBA_TC35815_NWU 0x0031 +#define PCI_DEVICE_ID_TOSHIBA_TC35815_TX4939 0x0032 +#define PCI_DEVICE_ID_TOSHIBA_TC86C001_IDE 0x0105 +#define PCI_DEVICE_ID_TOSHIBA_TC86C001_MISC 0x0108 +#define PCI_DEVICE_ID_TOSHIBA_SPIDER_NET 0x01b3 + +#define PCI_VENDOR_ID_ATTO 0x117c + +#define PCI_VENDOR_ID_RICOH 0x1180 +#define PCI_DEVICE_ID_RICOH_RL5C465 0x0465 +#define PCI_DEVICE_ID_RICOH_RL5C466 0x0466 +#define PCI_DEVICE_ID_RICOH_RL5C475 0x0475 +#define PCI_DEVICE_ID_RICOH_RL5C476 0x0476 +#define PCI_DEVICE_ID_RICOH_RL5C478 0x0478 +#define PCI_DEVICE_ID_RICOH_R5C822 0x0822 +#define PCI_DEVICE_ID_RICOH_R5C832 0x0832 +#define PCI_DEVICE_ID_RICOH_R5C843 0x0843 + +#define PCI_VENDOR_ID_DLINK 0x1186 +#define PCI_DEVICE_ID_DLINK_DGE510T 0x4c00 + +#define PCI_VENDOR_ID_ARTOP 0x1191 +#define PCI_DEVICE_ID_ARTOP_ATP850UF 0x0005 +#define PCI_DEVICE_ID_ARTOP_ATP860 0x0006 +#define PCI_DEVICE_ID_ARTOP_ATP860R 0x0007 +#define PCI_DEVICE_ID_ARTOP_ATP865 0x0008 +#define PCI_DEVICE_ID_ARTOP_ATP865R 0x0009 +#define PCI_DEVICE_ID_ARTOP_ATP867A 0x000A +#define PCI_DEVICE_ID_ARTOP_ATP867B 0x000B +#define PCI_DEVICE_ID_ARTOP_AEC7610 0x8002 +#define PCI_DEVICE_ID_ARTOP_AEC7612UW 0x8010 +#define PCI_DEVICE_ID_ARTOP_AEC7612U 0x8020 +#define PCI_DEVICE_ID_ARTOP_AEC7612S 0x8030 +#define PCI_DEVICE_ID_ARTOP_AEC7612D 0x8040 +#define PCI_DEVICE_ID_ARTOP_AEC7612SUW 0x8050 +#define PCI_DEVICE_ID_ARTOP_8060 0x8060 + +#define PCI_VENDOR_ID_ZEITNET 0x1193 +#define PCI_DEVICE_ID_ZEITNET_1221 0x0001 +#define PCI_DEVICE_ID_ZEITNET_1225 0x0002 + +#define PCI_VENDOR_ID_FUJITSU_ME 0x119e +#define PCI_DEVICE_ID_FUJITSU_FS155 0x0001 +#define PCI_DEVICE_ID_FUJITSU_FS50 0x0003 + +#define PCI_SUBVENDOR_ID_KEYSPAN 0x11a9 +#define PCI_SUBDEVICE_ID_KEYSPAN_SX2 0x5334 + +#define PCI_VENDOR_ID_MARVELL 0x11ab +#define PCI_DEVICE_ID_MARVELL_GT64111 0x4146 +#define PCI_DEVICE_ID_MARVELL_GT64260 0x6430 +#define PCI_DEVICE_ID_MARVELL_MV64360 0x6460 +#define PCI_DEVICE_ID_MARVELL_MV64460 0x6480 +#define PCI_DEVICE_ID_MARVELL_88ALP01_NAND 0x4100 +#define PCI_DEVICE_ID_MARVELL_88ALP01_SD 0x4101 +#define PCI_DEVICE_ID_MARVELL_88ALP01_CCIC 0x4102 + +#define PCI_VENDOR_ID_V3 0x11b0 +#define PCI_DEVICE_ID_V3_V960 0x0001 +#define PCI_DEVICE_ID_V3_V351 0x0002 + +#define PCI_VENDOR_ID_ATT 0x11c1 +#define PCI_DEVICE_ID_ATT_VENUS_MODEM 0x480 + +#define PCI_VENDOR_ID_SPECIALIX 0x11cb +#define PCI_DEVICE_ID_SPECIALIX_IO8 0x2000 +#define PCI_DEVICE_ID_SPECIALIX_RIO 0x8000 +#define PCI_SUBDEVICE_ID_SPECIALIX_SPEED4 0xa004 + +#define PCI_VENDOR_ID_ANALOG_DEVICES 0x11d4 +#define PCI_DEVICE_ID_AD1889JS 0x1889 + +#define PCI_DEVICE_ID_SEGA_BBA 0x1234 + +#define PCI_VENDOR_ID_ZORAN 0x11de +#define PCI_DEVICE_ID_ZORAN_36057 0x6057 +#define PCI_DEVICE_ID_ZORAN_36120 0x6120 + +#define PCI_VENDOR_ID_COMPEX 0x11f6 +#define PCI_DEVICE_ID_COMPEX_ENET100VG4 0x0112 + +#define PCI_VENDOR_ID_PMC_Sierra 0x11f8 + +#define PCI_VENDOR_ID_RP 0x11fe +#define PCI_DEVICE_ID_RP32INTF 0x0001 +#define PCI_DEVICE_ID_RP8INTF 0x0002 +#define PCI_DEVICE_ID_RP16INTF 0x0003 +#define PCI_DEVICE_ID_RP4QUAD 0x0004 +#define PCI_DEVICE_ID_RP8OCTA 0x0005 +#define PCI_DEVICE_ID_RP8J 0x0006 +#define PCI_DEVICE_ID_RP4J 0x0007 +#define PCI_DEVICE_ID_RP8SNI 0x0008 +#define PCI_DEVICE_ID_RP16SNI 0x0009 +#define PCI_DEVICE_ID_RPP4 0x000A +#define PCI_DEVICE_ID_RPP8 0x000B +#define PCI_DEVICE_ID_RP4M 0x000D +#define PCI_DEVICE_ID_RP2_232 0x000E +#define PCI_DEVICE_ID_RP2_422 0x000F +#define PCI_DEVICE_ID_URP32INTF 0x0801 +#define PCI_DEVICE_ID_URP8INTF 0x0802 +#define PCI_DEVICE_ID_URP16INTF 0x0803 +#define PCI_DEVICE_ID_URP8OCTA 0x0805 +#define PCI_DEVICE_ID_UPCI_RM3_8PORT 0x080C +#define PCI_DEVICE_ID_UPCI_RM3_4PORT 0x080D +#define PCI_DEVICE_ID_CRP16INTF 0x0903 + +#define PCI_VENDOR_ID_CYCLADES 0x120e +#define PCI_DEVICE_ID_CYCLOM_Y_Lo 0x0100 +#define PCI_DEVICE_ID_CYCLOM_Y_Hi 0x0101 +#define PCI_DEVICE_ID_CYCLOM_4Y_Lo 0x0102 +#define PCI_DEVICE_ID_CYCLOM_4Y_Hi 0x0103 +#define PCI_DEVICE_ID_CYCLOM_8Y_Lo 0x0104 +#define PCI_DEVICE_ID_CYCLOM_8Y_Hi 0x0105 +#define PCI_DEVICE_ID_CYCLOM_Z_Lo 0x0200 +#define PCI_DEVICE_ID_CYCLOM_Z_Hi 0x0201 +#define PCI_DEVICE_ID_PC300_RX_2 0x0300 +#define PCI_DEVICE_ID_PC300_RX_1 0x0301 +#define PCI_DEVICE_ID_PC300_TE_2 0x0310 +#define PCI_DEVICE_ID_PC300_TE_1 0x0311 +#define PCI_DEVICE_ID_PC300_TE_M_2 0x0320 +#define PCI_DEVICE_ID_PC300_TE_M_1 0x0321 + +#define PCI_VENDOR_ID_ESSENTIAL 0x120f +#define PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER 0x0001 + +#define PCI_VENDOR_ID_O2 0x1217 +#define PCI_DEVICE_ID_O2_6729 0x6729 +#define PCI_DEVICE_ID_O2_6730 0x673a +#define PCI_DEVICE_ID_O2_6832 0x6832 +#define PCI_DEVICE_ID_O2_6836 0x6836 +#define PCI_DEVICE_ID_O2_6812 0x6872 +#define PCI_DEVICE_ID_O2_6933 0x6933 + +#define PCI_VENDOR_ID_3DFX 0x121a +#define PCI_DEVICE_ID_3DFX_VOODOO 0x0001 +#define PCI_DEVICE_ID_3DFX_VOODOO2 0x0002 +#define PCI_DEVICE_ID_3DFX_BANSHEE 0x0003 +#define PCI_DEVICE_ID_3DFX_VOODOO3 0x0005 +#define PCI_DEVICE_ID_3DFX_VOODOO5 0x0009 + +#define PCI_VENDOR_ID_AVM 0x1244 +#define PCI_DEVICE_ID_AVM_B1 0x0700 +#define PCI_DEVICE_ID_AVM_C4 0x0800 +#define PCI_DEVICE_ID_AVM_A1 0x0a00 +#define PCI_DEVICE_ID_AVM_A1_V2 0x0e00 +#define PCI_DEVICE_ID_AVM_C2 0x1100 +#define PCI_DEVICE_ID_AVM_T1 0x1200 + +#define PCI_VENDOR_ID_STALLION 0x124d + +/* Allied Telesyn */ +#define PCI_VENDOR_ID_AT 0x1259 +#define PCI_SUBDEVICE_ID_AT_2700FX 0x2701 +#define PCI_SUBDEVICE_ID_AT_2701FX 0x2703 + +#define PCI_VENDOR_ID_ESS 0x125d +#define PCI_DEVICE_ID_ESS_ESS1968 0x1968 +#define PCI_DEVICE_ID_ESS_ESS1978 0x1978 +#define PCI_DEVICE_ID_ESS_ALLEGRO_1 0x1988 +#define PCI_DEVICE_ID_ESS_ALLEGRO 0x1989 +#define PCI_DEVICE_ID_ESS_CANYON3D_2LE 0x1990 +#define PCI_DEVICE_ID_ESS_CANYON3D_2 0x1992 +#define PCI_DEVICE_ID_ESS_MAESTRO3 0x1998 +#define PCI_DEVICE_ID_ESS_MAESTRO3_1 0x1999 +#define PCI_DEVICE_ID_ESS_MAESTRO3_HW 0x199a +#define PCI_DEVICE_ID_ESS_MAESTRO3_2 0x199b + +#define PCI_VENDOR_ID_SATSAGEM 0x1267 +#define PCI_DEVICE_ID_SATSAGEM_NICCY 0x1016 + +#define PCI_VENDOR_ID_ENSONIQ 0x1274 +#define PCI_DEVICE_ID_ENSONIQ_CT5880 0x5880 +#define PCI_DEVICE_ID_ENSONIQ_ES1370 0x5000 +#define PCI_DEVICE_ID_ENSONIQ_ES1371 0x1371 + +#define PCI_VENDOR_ID_TRANSMETA 0x1279 +#define PCI_DEVICE_ID_EFFICEON 0x0060 + +#define PCI_VENDOR_ID_ROCKWELL 0x127A + +#define PCI_VENDOR_ID_ITE 0x1283 +#define PCI_DEVICE_ID_ITE_8172 0x8172 +#define PCI_DEVICE_ID_ITE_8211 0x8211 +#define PCI_DEVICE_ID_ITE_8212 0x8212 +#define PCI_DEVICE_ID_ITE_8213 0x8213 +#define PCI_DEVICE_ID_ITE_8152 0x8152 +#define PCI_DEVICE_ID_ITE_8872 0x8872 +#define PCI_DEVICE_ID_ITE_IT8330G_0 0xe886 + +/* formerly Platform Tech */ +#define PCI_DEVICE_ID_ESS_ESS0100 0x0100 + +#define PCI_VENDOR_ID_ALTEON 0x12ae + +#define PCI_SUBVENDOR_ID_CONNECT_TECH 0x12c4 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_232 0x0001 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_232 0x0002 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_232 0x0003 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485 0x0004 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485_4_4 0x0005 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_485 0x0006 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_485_2_2 0x0007 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_485 0x0008 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485_2_6 0x0009 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH081101V1 0x000A +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH041101V1 0x000B +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_20MHZ 0x000C +#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_PTM 0x000D +#define PCI_SUBDEVICE_ID_CONNECT_TECH_NT960PCI 0x0100 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_TITAN_2 0x0201 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_TITAN_4 0x0202 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_232 0x0300 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_232 0x0301 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_232 0x0302 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_1_1 0x0310 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_2 0x0311 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_4 0x0312 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2 0x0320 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4 0x0321 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8 0x0322 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_485 0x0330 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_485 0x0331 +#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_485 0x0332 + +#define PCI_VENDOR_ID_NVIDIA_SGS 0x12d2 +#define PCI_DEVICE_ID_NVIDIA_SGS_RIVA128 0x0018 + +#define PCI_SUBVENDOR_ID_CHASE_PCIFAST 0x12E0 +#define PCI_SUBDEVICE_ID_CHASE_PCIFAST4 0x0031 +#define PCI_SUBDEVICE_ID_CHASE_PCIFAST8 0x0021 +#define PCI_SUBDEVICE_ID_CHASE_PCIFAST16 0x0011 +#define PCI_SUBDEVICE_ID_CHASE_PCIFAST16FMC 0x0041 +#define PCI_SUBVENDOR_ID_CHASE_PCIRAS 0x124D +#define PCI_SUBDEVICE_ID_CHASE_PCIRAS4 0xF001 +#define PCI_SUBDEVICE_ID_CHASE_PCIRAS8 0xF010 + +#define PCI_VENDOR_ID_AUREAL 0x12eb +#define PCI_DEVICE_ID_AUREAL_VORTEX_1 0x0001 +#define PCI_DEVICE_ID_AUREAL_VORTEX_2 0x0002 +#define PCI_DEVICE_ID_AUREAL_ADVANTAGE 0x0003 + +#define PCI_VENDOR_ID_ELECTRONICDESIGNGMBH 0x12f8 +#define PCI_DEVICE_ID_LML_33R10 0x8a02 + +#define PCI_VENDOR_ID_ESDGMBH 0x12fe +#define PCI_DEVICE_ID_ESDGMBH_CPCIASIO4 0x0111 + +#define PCI_VENDOR_ID_SIIG 0x131f +#define PCI_SUBVENDOR_ID_SIIG 0x131f +#define PCI_DEVICE_ID_SIIG_1S_10x_550 0x1000 +#define PCI_DEVICE_ID_SIIG_1S_10x_650 0x1001 +#define PCI_DEVICE_ID_SIIG_1S_10x_850 0x1002 +#define PCI_DEVICE_ID_SIIG_1S1P_10x_550 0x1010 +#define PCI_DEVICE_ID_SIIG_1S1P_10x_650 0x1011 +#define PCI_DEVICE_ID_SIIG_1S1P_10x_850 0x1012 +#define PCI_DEVICE_ID_SIIG_1P_10x 0x1020 +#define PCI_DEVICE_ID_SIIG_2P_10x 0x1021 +#define PCI_DEVICE_ID_SIIG_2S_10x_550 0x1030 +#define PCI_DEVICE_ID_SIIG_2S_10x_650 0x1031 +#define PCI_DEVICE_ID_SIIG_2S_10x_850 0x1032 +#define PCI_DEVICE_ID_SIIG_2S1P_10x_550 0x1034 +#define PCI_DEVICE_ID_SIIG_2S1P_10x_650 0x1035 +#define PCI_DEVICE_ID_SIIG_2S1P_10x_850 0x1036 +#define PCI_DEVICE_ID_SIIG_4S_10x_550 0x1050 +#define PCI_DEVICE_ID_SIIG_4S_10x_650 0x1051 +#define PCI_DEVICE_ID_SIIG_4S_10x_850 0x1052 +#define PCI_DEVICE_ID_SIIG_1S_20x_550 0x2000 +#define PCI_DEVICE_ID_SIIG_1S_20x_650 0x2001 +#define PCI_DEVICE_ID_SIIG_1S_20x_850 0x2002 +#define PCI_DEVICE_ID_SIIG_1P_20x 0x2020 +#define PCI_DEVICE_ID_SIIG_2P_20x 0x2021 +#define PCI_DEVICE_ID_SIIG_2S_20x_550 0x2030 +#define PCI_DEVICE_ID_SIIG_2S_20x_650 0x2031 +#define PCI_DEVICE_ID_SIIG_2S_20x_850 0x2032 +#define PCI_DEVICE_ID_SIIG_2P1S_20x_550 0x2040 +#define PCI_DEVICE_ID_SIIG_2P1S_20x_650 0x2041 +#define PCI_DEVICE_ID_SIIG_2P1S_20x_850 0x2042 +#define PCI_DEVICE_ID_SIIG_1S1P_20x_550 0x2010 +#define PCI_DEVICE_ID_SIIG_1S1P_20x_650 0x2011 +#define PCI_DEVICE_ID_SIIG_1S1P_20x_850 0x2012 +#define PCI_DEVICE_ID_SIIG_4S_20x_550 0x2050 +#define PCI_DEVICE_ID_SIIG_4S_20x_650 0x2051 +#define PCI_DEVICE_ID_SIIG_4S_20x_850 0x2052 +#define PCI_DEVICE_ID_SIIG_2S1P_20x_550 0x2060 +#define PCI_DEVICE_ID_SIIG_2S1P_20x_650 0x2061 +#define PCI_DEVICE_ID_SIIG_2S1P_20x_850 0x2062 +#define PCI_DEVICE_ID_SIIG_8S_20x_550 0x2080 +#define PCI_DEVICE_ID_SIIG_8S_20x_650 0x2081 +#define PCI_DEVICE_ID_SIIG_8S_20x_850 0x2082 +#define PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL 0x2050 +#define PCI_SUBDEVICE_ID_SIIG_DUAL_SERIAL 0x2530 + +#define PCI_VENDOR_ID_RADISYS 0x1331 + +#define PCI_VENDOR_ID_MICRO_MEMORY 0x1332 +#define PCI_DEVICE_ID_MICRO_MEMORY_5415CN 0x5415 +#define PCI_DEVICE_ID_MICRO_MEMORY_5425CN 0x5425 +#define PCI_DEVICE_ID_MICRO_MEMORY_6155 0x6155 + +#define PCI_VENDOR_ID_DOMEX 0x134a +#define PCI_DEVICE_ID_DOMEX_DMX3191D 0x0001 + +#define PCI_VENDOR_ID_INTASHIELD 0x135a +#define PCI_DEVICE_ID_INTASHIELD_IS200 0x0d80 +#define PCI_DEVICE_ID_INTASHIELD_IS400 0x0dc0 + +#define PCI_VENDOR_ID_QUATECH 0x135C +#define PCI_DEVICE_ID_QUATECH_QSC100 0x0010 +#define PCI_DEVICE_ID_QUATECH_DSC100 0x0020 +#define PCI_DEVICE_ID_QUATECH_ESC100D 0x0050 +#define PCI_DEVICE_ID_QUATECH_ESC100M 0x0060 +#define PCI_DEVICE_ID_QUATECH_SPPXP_100 0x0278 + +#define PCI_VENDOR_ID_SEALEVEL 0x135e +#define PCI_DEVICE_ID_SEALEVEL_U530 0x7101 +#define PCI_DEVICE_ID_SEALEVEL_UCOMM2 0x7201 +#define PCI_DEVICE_ID_SEALEVEL_UCOMM422 0x7402 +#define PCI_DEVICE_ID_SEALEVEL_UCOMM232 0x7202 +#define PCI_DEVICE_ID_SEALEVEL_COMM4 0x7401 +#define PCI_DEVICE_ID_SEALEVEL_COMM8 0x7801 +#define PCI_DEVICE_ID_SEALEVEL_7803 0x7803 +#define PCI_DEVICE_ID_SEALEVEL_UCOMM8 0x7804 + +#define PCI_VENDOR_ID_HYPERCOPE 0x1365 +#define PCI_DEVICE_ID_HYPERCOPE_PLX 0x9050 +#define PCI_SUBDEVICE_ID_HYPERCOPE_OLD_ERGO 0x0104 +#define PCI_SUBDEVICE_ID_HYPERCOPE_ERGO 0x0106 +#define PCI_SUBDEVICE_ID_HYPERCOPE_METRO 0x0107 +#define PCI_SUBDEVICE_ID_HYPERCOPE_CHAMP2 0x0108 + +#define PCI_VENDOR_ID_DIGIGRAM 0x1369 +#define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_SERIAL_SUBSYSTEM 0xc001 +#define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_CAE_SERIAL_SUBSYSTEM 0xc002 + +#define PCI_VENDOR_ID_KAWASAKI 0x136b +#define PCI_DEVICE_ID_MCHIP_KL5A72002 0xff01 + +#define PCI_VENDOR_ID_CNET 0x1371 +#define PCI_DEVICE_ID_CNET_GIGACARD 0x434e + +#define PCI_VENDOR_ID_LMC 0x1376 +#define PCI_DEVICE_ID_LMC_HSSI 0x0003 +#define PCI_DEVICE_ID_LMC_DS3 0x0004 +#define PCI_DEVICE_ID_LMC_SSI 0x0005 +#define PCI_DEVICE_ID_LMC_T1 0x0006 + +#define PCI_VENDOR_ID_NETGEAR 0x1385 +#define PCI_DEVICE_ID_NETGEAR_GA620 0x620a + +#define PCI_VENDOR_ID_APPLICOM 0x1389 +#define PCI_DEVICE_ID_APPLICOM_PCIGENERIC 0x0001 +#define PCI_DEVICE_ID_APPLICOM_PCI2000IBS_CAN 0x0002 +#define PCI_DEVICE_ID_APPLICOM_PCI2000PFB 0x0003 + +#define PCI_VENDOR_ID_MOXA 0x1393 +#define PCI_DEVICE_ID_MOXA_RC7000 0x0001 +#define PCI_DEVICE_ID_MOXA_CP102 0x1020 +#define PCI_DEVICE_ID_MOXA_CP102UL 0x1021 +#define PCI_DEVICE_ID_MOXA_CP102U 0x1022 +#define PCI_DEVICE_ID_MOXA_C104 0x1040 +#define PCI_DEVICE_ID_MOXA_CP104U 0x1041 +#define PCI_DEVICE_ID_MOXA_CP104JU 0x1042 +#define PCI_DEVICE_ID_MOXA_CP104EL 0x1043 +#define PCI_DEVICE_ID_MOXA_CT114 0x1140 +#define PCI_DEVICE_ID_MOXA_CP114 0x1141 +#define PCI_DEVICE_ID_MOXA_CP118U 0x1180 +#define PCI_DEVICE_ID_MOXA_CP118EL 0x1181 +#define PCI_DEVICE_ID_MOXA_CP132 0x1320 +#define PCI_DEVICE_ID_MOXA_CP132U 0x1321 +#define PCI_DEVICE_ID_MOXA_CP134U 0x1340 +#define PCI_DEVICE_ID_MOXA_C168 0x1680 +#define PCI_DEVICE_ID_MOXA_CP168U 0x1681 +#define PCI_DEVICE_ID_MOXA_CP168EL 0x1682 +#define PCI_DEVICE_ID_MOXA_CP204J 0x2040 +#define PCI_DEVICE_ID_MOXA_C218 0x2180 +#define PCI_DEVICE_ID_MOXA_C320 0x3200 + +#define PCI_VENDOR_ID_CCD 0x1397 +#define PCI_DEVICE_ID_CCD_HFC4S 0x08B4 +#define PCI_SUBDEVICE_ID_CCD_PMX2S 0x1234 +#define PCI_DEVICE_ID_CCD_HFC8S 0x16B8 +#define PCI_DEVICE_ID_CCD_2BD0 0x2bd0 +#define PCI_DEVICE_ID_CCD_HFCE1 0x30B1 +#define PCI_SUBDEVICE_ID_CCD_SPD4S 0x3136 +#define PCI_SUBDEVICE_ID_CCD_SPDE1 0x3137 +#define PCI_DEVICE_ID_CCD_B000 0xb000 +#define PCI_DEVICE_ID_CCD_B006 0xb006 +#define PCI_DEVICE_ID_CCD_B007 0xb007 +#define PCI_DEVICE_ID_CCD_B008 0xb008 +#define PCI_DEVICE_ID_CCD_B009 0xb009 +#define PCI_DEVICE_ID_CCD_B00A 0xb00a +#define PCI_DEVICE_ID_CCD_B00B 0xb00b +#define PCI_DEVICE_ID_CCD_B00C 0xb00c +#define PCI_DEVICE_ID_CCD_B100 0xb100 +#define PCI_SUBDEVICE_ID_CCD_IOB4ST 0xB520 +#define PCI_SUBDEVICE_ID_CCD_IOB8STR 0xB521 +#define PCI_SUBDEVICE_ID_CCD_IOB8ST 0xB522 +#define PCI_SUBDEVICE_ID_CCD_IOB1E1 0xB523 +#define PCI_SUBDEVICE_ID_CCD_SWYX4S 0xB540 +#define PCI_SUBDEVICE_ID_CCD_JH4S20 0xB550 +#define PCI_SUBDEVICE_ID_CCD_IOB8ST_1 0xB552 +#define PCI_SUBDEVICE_ID_CCD_JHSE1 0xB553 +#define PCI_SUBDEVICE_ID_CCD_JH8S 0xB55B +#define PCI_SUBDEVICE_ID_CCD_BN4S 0xB560 +#define PCI_SUBDEVICE_ID_CCD_BN8S 0xB562 +#define PCI_SUBDEVICE_ID_CCD_BNE1 0xB563 +#define PCI_SUBDEVICE_ID_CCD_BNE1D 0xB564 +#define PCI_SUBDEVICE_ID_CCD_BNE1DP 0xB565 +#define PCI_SUBDEVICE_ID_CCD_BN2S 0xB566 +#define PCI_SUBDEVICE_ID_CCD_BN1SM 0xB567 +#define PCI_SUBDEVICE_ID_CCD_BN4SM 0xB568 +#define PCI_SUBDEVICE_ID_CCD_BN2SM 0xB569 +#define PCI_SUBDEVICE_ID_CCD_BNE1M 0xB56A +#define PCI_SUBDEVICE_ID_CCD_BN8SP 0xB56B +#define PCI_SUBDEVICE_ID_CCD_HFC4S 0xB620 +#define PCI_SUBDEVICE_ID_CCD_HFC8S 0xB622 +#define PCI_DEVICE_ID_CCD_B700 0xb700 +#define PCI_DEVICE_ID_CCD_B701 0xb701 +#define PCI_SUBDEVICE_ID_CCD_HFCE1 0xC523 +#define PCI_SUBDEVICE_ID_CCD_OV2S 0xE884 +#define PCI_SUBDEVICE_ID_CCD_OV4S 0xE888 +#define PCI_SUBDEVICE_ID_CCD_OV8S 0xE998 + +#define PCI_VENDOR_ID_EXAR 0x13a8 +#define PCI_DEVICE_ID_EXAR_XR17C152 0x0152 +#define PCI_DEVICE_ID_EXAR_XR17C154 0x0154 +#define PCI_DEVICE_ID_EXAR_XR17C158 0x0158 + +#define PCI_VENDOR_ID_MICROGATE 0x13c0 +#define PCI_DEVICE_ID_MICROGATE_USC 0x0010 +#define PCI_DEVICE_ID_MICROGATE_SCA 0x0030 + +#define PCI_VENDOR_ID_3WARE 0x13C1 +#define PCI_DEVICE_ID_3WARE_1000 0x1000 +#define PCI_DEVICE_ID_3WARE_7000 0x1001 +#define PCI_DEVICE_ID_3WARE_9000 0x1002 + +#define PCI_VENDOR_ID_IOMEGA 0x13ca +#define PCI_DEVICE_ID_IOMEGA_BUZ 0x4231 + +#define PCI_VENDOR_ID_ABOCOM 0x13D1 +#define PCI_DEVICE_ID_ABOCOM_2BD1 0x2BD1 + +#define PCI_VENDOR_ID_SUNDANCE 0x13f0 + +#define PCI_VENDOR_ID_CMEDIA 0x13f6 +#define PCI_DEVICE_ID_CMEDIA_CM8338A 0x0100 +#define PCI_DEVICE_ID_CMEDIA_CM8338B 0x0101 +#define PCI_DEVICE_ID_CMEDIA_CM8738 0x0111 +#define PCI_DEVICE_ID_CMEDIA_CM8738B 0x0112 + +#define PCI_VENDOR_ID_LAVA 0x1407 +#define PCI_DEVICE_ID_LAVA_DSERIAL 0x0100 /* 2x 16550 */ +#define PCI_DEVICE_ID_LAVA_QUATRO_A 0x0101 /* 2x 16550, half of 4 port */ +#define PCI_DEVICE_ID_LAVA_QUATRO_B 0x0102 /* 2x 16550, half of 4 port */ +#define PCI_DEVICE_ID_LAVA_QUATTRO_A 0x0120 /* 2x 16550A, half of 4 port */ +#define PCI_DEVICE_ID_LAVA_QUATTRO_B 0x0121 /* 2x 16550A, half of 4 port */ +#define PCI_DEVICE_ID_LAVA_OCTO_A 0x0180 /* 4x 16550A, half of 8 port */ +#define PCI_DEVICE_ID_LAVA_OCTO_B 0x0181 /* 4x 16550A, half of 8 port */ +#define PCI_DEVICE_ID_LAVA_PORT_PLUS 0x0200 /* 2x 16650 */ +#define PCI_DEVICE_ID_LAVA_QUAD_A 0x0201 /* 2x 16650, half of 4 port */ +#define PCI_DEVICE_ID_LAVA_QUAD_B 0x0202 /* 2x 16650, half of 4 port */ +#define PCI_DEVICE_ID_LAVA_SSERIAL 0x0500 /* 1x 16550 */ +#define PCI_DEVICE_ID_LAVA_PORT_650 0x0600 /* 1x 16650 */ +#define PCI_DEVICE_ID_LAVA_PARALLEL 0x8000 +#define PCI_DEVICE_ID_LAVA_DUAL_PAR_A 0x8002 /* The Lava Dual Parallel is */ +#define PCI_DEVICE_ID_LAVA_DUAL_PAR_B 0x8003 /* two PCI devices on a card */ +#define PCI_DEVICE_ID_LAVA_BOCA_IOPPAR 0x8800 + +#define PCI_VENDOR_ID_TIMEDIA 0x1409 +#define PCI_DEVICE_ID_TIMEDIA_1889 0x7168 + +#define PCI_VENDOR_ID_ICE 0x1412 +#define PCI_DEVICE_ID_ICE_1712 0x1712 +#define PCI_DEVICE_ID_VT1724 0x1724 + +#define PCI_VENDOR_ID_OXSEMI 0x1415 +#define PCI_DEVICE_ID_OXSEMI_12PCI840 0x8403 +#define PCI_DEVICE_ID_OXSEMI_PCIe840 0xC000 +#define PCI_DEVICE_ID_OXSEMI_PCIe840_G 0xC004 +#define PCI_DEVICE_ID_OXSEMI_PCIe952_0 0xC100 +#define PCI_DEVICE_ID_OXSEMI_PCIe952_0_G 0xC104 +#define PCI_DEVICE_ID_OXSEMI_PCIe952_1 0xC110 +#define PCI_DEVICE_ID_OXSEMI_PCIe952_1_G 0xC114 +#define PCI_DEVICE_ID_OXSEMI_PCIe952_1_U 0xC118 +#define PCI_DEVICE_ID_OXSEMI_PCIe952_1_GU 0xC11C +#define PCI_DEVICE_ID_OXSEMI_16PCI954 0x9501 +#define PCI_DEVICE_ID_OXSEMI_C950 0x950B +#define PCI_DEVICE_ID_OXSEMI_16PCI95N 0x9511 +#define PCI_DEVICE_ID_OXSEMI_16PCI954PP 0x9513 +#define PCI_DEVICE_ID_OXSEMI_16PCI952 0x9521 +#define PCI_DEVICE_ID_OXSEMI_16PCI952PP 0x9523 +#define PCI_SUBDEVICE_ID_OXSEMI_C950 0x0001 + +#define PCI_VENDOR_ID_CHELSIO 0x1425 + +#define PCI_VENDOR_ID_SAMSUNG 0x144d + +#define PCI_VENDOR_ID_GIGABYTE 0x1458 + +#define PCI_VENDOR_ID_AMBIT 0x1468 + +#define PCI_VENDOR_ID_MYRICOM 0x14c1 + +#define PCI_VENDOR_ID_TITAN 0x14D2 +#define PCI_DEVICE_ID_TITAN_010L 0x8001 +#define PCI_DEVICE_ID_TITAN_100L 0x8010 +#define PCI_DEVICE_ID_TITAN_110L 0x8011 +#define PCI_DEVICE_ID_TITAN_200L 0x8020 +#define PCI_DEVICE_ID_TITAN_210L 0x8021 +#define PCI_DEVICE_ID_TITAN_400L 0x8040 +#define PCI_DEVICE_ID_TITAN_800L 0x8080 +#define PCI_DEVICE_ID_TITAN_100 0xA001 +#define PCI_DEVICE_ID_TITAN_200 0xA005 +#define PCI_DEVICE_ID_TITAN_400 0xA003 +#define PCI_DEVICE_ID_TITAN_800B 0xA004 + +#define PCI_VENDOR_ID_PANACOM 0x14d4 +#define PCI_DEVICE_ID_PANACOM_QUADMODEM 0x0400 +#define PCI_DEVICE_ID_PANACOM_DUALMODEM 0x0402 + +#define PCI_VENDOR_ID_SIPACKETS 0x14d9 +#define PCI_DEVICE_ID_SP1011 0x0010 + +#define PCI_VENDOR_ID_AFAVLAB 0x14db +#define PCI_DEVICE_ID_AFAVLAB_P028 0x2180 +#define PCI_DEVICE_ID_AFAVLAB_P030 0x2182 +#define PCI_SUBDEVICE_ID_AFAVLAB_P061 0x2150 + +#define PCI_VENDOR_ID_BROADCOM 0x14e4 +#define PCI_DEVICE_ID_TIGON3_5752 0x1600 +#define PCI_DEVICE_ID_TIGON3_5752M 0x1601 +#define PCI_DEVICE_ID_NX2_5709 0x1639 +#define PCI_DEVICE_ID_NX2_5709S 0x163a +#define PCI_DEVICE_ID_TIGON3_5700 0x1644 +#define PCI_DEVICE_ID_TIGON3_5701 0x1645 +#define PCI_DEVICE_ID_TIGON3_5702 0x1646 +#define PCI_DEVICE_ID_TIGON3_5703 0x1647 +#define PCI_DEVICE_ID_TIGON3_5704 0x1648 +#define PCI_DEVICE_ID_TIGON3_5704S_2 0x1649 +#define PCI_DEVICE_ID_NX2_5706 0x164a +#define PCI_DEVICE_ID_NX2_5708 0x164c +#define PCI_DEVICE_ID_TIGON3_5702FE 0x164d +#define PCI_DEVICE_ID_NX2_57710 0x164e +#define PCI_DEVICE_ID_NX2_57711 0x164f +#define PCI_DEVICE_ID_NX2_57711E 0x1650 +#define PCI_DEVICE_ID_TIGON3_5705 0x1653 +#define PCI_DEVICE_ID_TIGON3_5705_2 0x1654 +#define PCI_DEVICE_ID_TIGON3_5720 0x1658 +#define PCI_DEVICE_ID_TIGON3_5721 0x1659 +#define PCI_DEVICE_ID_TIGON3_5722 0x165a +#define PCI_DEVICE_ID_TIGON3_5723 0x165b +#define PCI_DEVICE_ID_TIGON3_5705M 0x165d +#define PCI_DEVICE_ID_TIGON3_5705M_2 0x165e +#define PCI_DEVICE_ID_TIGON3_5714 0x1668 +#define PCI_DEVICE_ID_TIGON3_5714S 0x1669 +#define PCI_DEVICE_ID_TIGON3_5780 0x166a +#define PCI_DEVICE_ID_TIGON3_5780S 0x166b +#define PCI_DEVICE_ID_TIGON3_5705F 0x166e +#define PCI_DEVICE_ID_TIGON3_5754M 0x1672 +#define PCI_DEVICE_ID_TIGON3_5755M 0x1673 +#define PCI_DEVICE_ID_TIGON3_5756 0x1674 +#define PCI_DEVICE_ID_TIGON3_5750 0x1676 +#define PCI_DEVICE_ID_TIGON3_5751 0x1677 +#define PCI_DEVICE_ID_TIGON3_5715 0x1678 +#define PCI_DEVICE_ID_TIGON3_5715S 0x1679 +#define PCI_DEVICE_ID_TIGON3_5754 0x167a +#define PCI_DEVICE_ID_TIGON3_5755 0x167b +#define PCI_DEVICE_ID_TIGON3_5750M 0x167c +#define PCI_DEVICE_ID_TIGON3_5751M 0x167d +#define PCI_DEVICE_ID_TIGON3_5751F 0x167e +#define PCI_DEVICE_ID_TIGON3_5787F 0x167f +#define PCI_DEVICE_ID_TIGON3_5761E 0x1680 +#define PCI_DEVICE_ID_TIGON3_5761 0x1681 +#define PCI_DEVICE_ID_TIGON3_5764 0x1684 +#define PCI_DEVICE_ID_TIGON3_5787M 0x1693 +#define PCI_DEVICE_ID_TIGON3_5782 0x1696 +#define PCI_DEVICE_ID_TIGON3_5784 0x1698 +#define PCI_DEVICE_ID_TIGON3_5786 0x169a +#define PCI_DEVICE_ID_TIGON3_5787 0x169b +#define PCI_DEVICE_ID_TIGON3_5788 0x169c +#define PCI_DEVICE_ID_TIGON3_5789 0x169d +#define PCI_DEVICE_ID_TIGON3_5702X 0x16a6 +#define PCI_DEVICE_ID_TIGON3_5703X 0x16a7 +#define PCI_DEVICE_ID_TIGON3_5704S 0x16a8 +#define PCI_DEVICE_ID_NX2_5706S 0x16aa +#define PCI_DEVICE_ID_NX2_5708S 0x16ac +#define PCI_DEVICE_ID_TIGON3_5702A3 0x16c6 +#define PCI_DEVICE_ID_TIGON3_5703A3 0x16c7 +#define PCI_DEVICE_ID_TIGON3_5781 0x16dd +#define PCI_DEVICE_ID_TIGON3_5753 0x16f7 +#define PCI_DEVICE_ID_TIGON3_5753M 0x16fd +#define PCI_DEVICE_ID_TIGON3_5753F 0x16fe +#define PCI_DEVICE_ID_TIGON3_5901 0x170d +#define PCI_DEVICE_ID_BCM4401B1 0x170c +#define PCI_DEVICE_ID_TIGON3_5901_2 0x170e +#define PCI_DEVICE_ID_TIGON3_5906 0x1712 +#define PCI_DEVICE_ID_TIGON3_5906M 0x1713 +#define PCI_DEVICE_ID_BCM4401 0x4401 +#define PCI_DEVICE_ID_BCM4401B0 0x4402 + +#define PCI_VENDOR_ID_TOPIC 0x151f +#define PCI_DEVICE_ID_TOPIC_TP560 0x0000 + +#define PCI_VENDOR_ID_MAINPINE 0x1522 +#define PCI_DEVICE_ID_MAINPINE_PBRIDGE 0x0100 +#define PCI_VENDOR_ID_ENE 0x1524 +#define PCI_DEVICE_ID_ENE_CB710_FLASH 0x0510 +#define PCI_DEVICE_ID_ENE_CB712_SD 0x0550 +#define PCI_DEVICE_ID_ENE_CB712_SD_2 0x0551 +#define PCI_DEVICE_ID_ENE_CB714_SD 0x0750 +#define PCI_DEVICE_ID_ENE_CB714_SD_2 0x0751 +#define PCI_DEVICE_ID_ENE_1211 0x1211 +#define PCI_DEVICE_ID_ENE_1225 0x1225 +#define PCI_DEVICE_ID_ENE_1410 0x1410 +#define PCI_DEVICE_ID_ENE_710 0x1411 +#define PCI_DEVICE_ID_ENE_712 0x1412 +#define PCI_DEVICE_ID_ENE_1420 0x1420 +#define PCI_DEVICE_ID_ENE_720 0x1421 +#define PCI_DEVICE_ID_ENE_722 0x1422 + +#define PCI_SUBVENDOR_ID_PERLE 0x155f +#define PCI_SUBDEVICE_ID_PCI_RAS4 0xf001 +#define PCI_SUBDEVICE_ID_PCI_RAS8 0xf010 + +#define PCI_VENDOR_ID_SYBA 0x1592 +#define PCI_DEVICE_ID_SYBA_2P_EPP 0x0782 +#define PCI_DEVICE_ID_SYBA_1P_ECP 0x0783 + +#define PCI_VENDOR_ID_MORETON 0x15aa +#define PCI_DEVICE_ID_RASTEL_2PORT 0x2000 + +#define PCI_VENDOR_ID_ZOLTRIX 0x15b0 +#define PCI_DEVICE_ID_ZOLTRIX_2BD0 0x2bd0 + +#define PCI_VENDOR_ID_MELLANOX 0x15b3 +#define PCI_DEVICE_ID_MELLANOX_TAVOR 0x5a44 +#define PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE 0x5a46 +#define PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT 0x6278 +#define PCI_DEVICE_ID_MELLANOX_ARBEL 0x6282 +#define PCI_DEVICE_ID_MELLANOX_SINAI_OLD 0x5e8c +#define PCI_DEVICE_ID_MELLANOX_SINAI 0x6274 + +#define PCI_VENDOR_ID_DFI 0x15bd + +#define PCI_VENDOR_ID_QUICKNET 0x15e2 +#define PCI_DEVICE_ID_QUICKNET_XJ 0x0500 + +/* + * ADDI-DATA GmbH communication cards + */ +#define PCI_VENDOR_ID_ADDIDATA_OLD 0x10E8 +#define PCI_VENDOR_ID_ADDIDATA 0x15B8 +#define PCI_DEVICE_ID_ADDIDATA_APCI7500 0x7000 +#define PCI_DEVICE_ID_ADDIDATA_APCI7420 0x7001 +#define PCI_DEVICE_ID_ADDIDATA_APCI7300 0x7002 +#define PCI_DEVICE_ID_ADDIDATA_APCI7800 0x818E +#define PCI_DEVICE_ID_ADDIDATA_APCI7500_2 0x7009 +#define PCI_DEVICE_ID_ADDIDATA_APCI7420_2 0x700A +#define PCI_DEVICE_ID_ADDIDATA_APCI7300_2 0x700B +#define PCI_DEVICE_ID_ADDIDATA_APCI7500_3 0x700C +#define PCI_DEVICE_ID_ADDIDATA_APCI7420_3 0x700D +#define PCI_DEVICE_ID_ADDIDATA_APCI7300_3 0x700E +#define PCI_DEVICE_ID_ADDIDATA_APCI7800_3 0x700F +#define PCI_DEVICE_ID_ADDIDATA_APCIe7300 0x7010 +#define PCI_DEVICE_ID_ADDIDATA_APCIe7420 0x7011 +#define PCI_DEVICE_ID_ADDIDATA_APCIe7500 0x7012 +#define PCI_DEVICE_ID_ADDIDATA_APCIe7800 0x7013 + +#define PCI_VENDOR_ID_PDC 0x15e9 + +#define PCI_VENDOR_ID_FARSITE 0x1619 +#define PCI_DEVICE_ID_FARSITE_T2P 0x0400 +#define PCI_DEVICE_ID_FARSITE_T4P 0x0440 +#define PCI_DEVICE_ID_FARSITE_T1U 0x0610 +#define PCI_DEVICE_ID_FARSITE_T2U 0x0620 +#define PCI_DEVICE_ID_FARSITE_T4U 0x0640 +#define PCI_DEVICE_ID_FARSITE_TE1 0x1610 +#define PCI_DEVICE_ID_FARSITE_TE1C 0x1612 + +#define PCI_VENDOR_ID_ARIMA 0x161f + +#define PCI_VENDOR_ID_BROCADE 0x1657 + +#define PCI_VENDOR_ID_SIBYTE 0x166d +#define PCI_DEVICE_ID_BCM1250_PCI 0x0001 +#define PCI_DEVICE_ID_BCM1250_HT 0x0002 + +#define PCI_VENDOR_ID_ATHEROS 0x168c + +#define PCI_VENDOR_ID_NETCELL 0x169c +#define PCI_DEVICE_ID_REVOLUTION 0x0044 + +#define PCI_VENDOR_ID_CENATEK 0x16CA +#define PCI_DEVICE_ID_CENATEK_IDE 0x0001 + +#define PCI_VENDOR_ID_VITESSE 0x1725 +#define PCI_DEVICE_ID_VITESSE_VSC7174 0x7174 + +#define PCI_VENDOR_ID_LINKSYS 0x1737 +#define PCI_DEVICE_ID_LINKSYS_EG1064 0x1064 + +#define PCI_VENDOR_ID_ALTIMA 0x173b +#define PCI_DEVICE_ID_ALTIMA_AC1000 0x03e8 +#define PCI_DEVICE_ID_ALTIMA_AC1001 0x03e9 +#define PCI_DEVICE_ID_ALTIMA_AC9100 0x03ea +#define PCI_DEVICE_ID_ALTIMA_AC1003 0x03eb + +#define PCI_VENDOR_ID_BELKIN 0x1799 +#define PCI_DEVICE_ID_BELKIN_F5D7010V7 0x701f + +#define PCI_VENDOR_ID_RDC 0x17f3 +#define PCI_DEVICE_ID_RDC_R6020 0x6020 +#define PCI_DEVICE_ID_RDC_R6030 0x6030 +#define PCI_DEVICE_ID_RDC_R6040 0x6040 +#define PCI_DEVICE_ID_RDC_R6060 0x6060 +#define PCI_DEVICE_ID_RDC_R6061 0x6061 +#define PCI_DEVICE_ID_RDC_D1010 0x1010 + +#define PCI_VENDOR_ID_LENOVO 0x17aa + +#define PCI_VENDOR_ID_ARECA 0x17d3 +#define PCI_DEVICE_ID_ARECA_1110 0x1110 +#define PCI_DEVICE_ID_ARECA_1120 0x1120 +#define PCI_DEVICE_ID_ARECA_1130 0x1130 +#define PCI_DEVICE_ID_ARECA_1160 0x1160 +#define PCI_DEVICE_ID_ARECA_1170 0x1170 +#define PCI_DEVICE_ID_ARECA_1200 0x1200 +#define PCI_DEVICE_ID_ARECA_1201 0x1201 +#define PCI_DEVICE_ID_ARECA_1202 0x1202 +#define PCI_DEVICE_ID_ARECA_1210 0x1210 +#define PCI_DEVICE_ID_ARECA_1220 0x1220 +#define PCI_DEVICE_ID_ARECA_1230 0x1230 +#define PCI_DEVICE_ID_ARECA_1260 0x1260 +#define PCI_DEVICE_ID_ARECA_1270 0x1270 +#define PCI_DEVICE_ID_ARECA_1280 0x1280 +#define PCI_DEVICE_ID_ARECA_1380 0x1380 +#define PCI_DEVICE_ID_ARECA_1381 0x1381 +#define PCI_DEVICE_ID_ARECA_1680 0x1680 +#define PCI_DEVICE_ID_ARECA_1681 0x1681 + +#define PCI_VENDOR_ID_S2IO 0x17d5 +#define PCI_DEVICE_ID_S2IO_WIN 0x5731 +#define PCI_DEVICE_ID_S2IO_UNI 0x5831 +#define PCI_DEVICE_ID_HERC_WIN 0x5732 +#define PCI_DEVICE_ID_HERC_UNI 0x5832 + +#define PCI_VENDOR_ID_SITECOM 0x182d +#define PCI_DEVICE_ID_SITECOM_DC105V2 0x3069 + +#define PCI_VENDOR_ID_TOPSPIN 0x1867 + +#define PCI_VENDOR_ID_SILAN 0x1904 + +#define PCI_VENDOR_ID_TDI 0x192E +#define PCI_DEVICE_ID_TDI_EHCI 0x0101 + +#define PCI_VENDOR_ID_FREESCALE 0x1957 +#define PCI_DEVICE_ID_MPC8315E 0x00b4 +#define PCI_DEVICE_ID_MPC8315 0x00b5 +#define PCI_DEVICE_ID_MPC8314E 0x00b6 +#define PCI_DEVICE_ID_MPC8314 0x00b7 +#define PCI_DEVICE_ID_MPC8378E 0x00c4 +#define PCI_DEVICE_ID_MPC8378 0x00c5 +#define PCI_DEVICE_ID_MPC8377E 0x00c6 +#define PCI_DEVICE_ID_MPC8377 0x00c7 +#define PCI_DEVICE_ID_MPC8548E 0x0012 +#define PCI_DEVICE_ID_MPC8548 0x0013 +#define PCI_DEVICE_ID_MPC8543E 0x0014 +#define PCI_DEVICE_ID_MPC8543 0x0015 +#define PCI_DEVICE_ID_MPC8547E 0x0018 +#define PCI_DEVICE_ID_MPC8545E 0x0019 +#define PCI_DEVICE_ID_MPC8545 0x001a +#define PCI_DEVICE_ID_MPC8569E 0x0061 +#define PCI_DEVICE_ID_MPC8569 0x0060 +#define PCI_DEVICE_ID_MPC8568E 0x0020 +#define PCI_DEVICE_ID_MPC8568 0x0021 +#define PCI_DEVICE_ID_MPC8567E 0x0022 +#define PCI_DEVICE_ID_MPC8567 0x0023 +#define PCI_DEVICE_ID_MPC8533E 0x0030 +#define PCI_DEVICE_ID_MPC8533 0x0031 +#define PCI_DEVICE_ID_MPC8544E 0x0032 +#define PCI_DEVICE_ID_MPC8544 0x0033 +#define PCI_DEVICE_ID_MPC8572E 0x0040 +#define PCI_DEVICE_ID_MPC8572 0x0041 +#define PCI_DEVICE_ID_MPC8536E 0x0050 +#define PCI_DEVICE_ID_MPC8536 0x0051 +#define PCI_DEVICE_ID_P2020E 0x0070 +#define PCI_DEVICE_ID_P2020 0x0071 +#define PCI_DEVICE_ID_P2010E 0x0078 +#define PCI_DEVICE_ID_P2010 0x0079 +#define PCI_DEVICE_ID_P1020E 0x0100 +#define PCI_DEVICE_ID_P1020 0x0101 +#define PCI_DEVICE_ID_P1011E 0x0108 +#define PCI_DEVICE_ID_P1011 0x0109 +#define PCI_DEVICE_ID_P1022E 0x0110 +#define PCI_DEVICE_ID_P1022 0x0111 +#define PCI_DEVICE_ID_P1013E 0x0118 +#define PCI_DEVICE_ID_P1013 0x0119 +#define PCI_DEVICE_ID_P4080E 0x0400 +#define PCI_DEVICE_ID_P4080 0x0401 +#define PCI_DEVICE_ID_P4040E 0x0408 +#define PCI_DEVICE_ID_P4040 0x0409 +#define PCI_DEVICE_ID_MPC8641 0x7010 +#define PCI_DEVICE_ID_MPC8641D 0x7011 +#define PCI_DEVICE_ID_MPC8610 0x7018 + +#define PCI_VENDOR_ID_PASEMI 0x1959 + +#define PCI_VENDOR_ID_ATTANSIC 0x1969 +#define PCI_DEVICE_ID_ATTANSIC_L1 0x1048 +#define PCI_DEVICE_ID_ATTANSIC_L2 0x2048 + +#define PCI_VENDOR_ID_JMICRON 0x197B +#define PCI_DEVICE_ID_JMICRON_JMB360 0x2360 +#define PCI_DEVICE_ID_JMICRON_JMB361 0x2361 +#define PCI_DEVICE_ID_JMICRON_JMB363 0x2363 +#define PCI_DEVICE_ID_JMICRON_JMB365 0x2365 +#define PCI_DEVICE_ID_JMICRON_JMB366 0x2366 +#define PCI_DEVICE_ID_JMICRON_JMB368 0x2368 +#define PCI_DEVICE_ID_JMICRON_JMB38X_SD 0x2381 +#define PCI_DEVICE_ID_JMICRON_JMB38X_MMC 0x2382 +#define PCI_DEVICE_ID_JMICRON_JMB38X_MS 0x2383 + +#define PCI_VENDOR_ID_KORENIX 0x1982 +#define PCI_DEVICE_ID_KORENIX_JETCARDF0 0x1600 +#define PCI_DEVICE_ID_KORENIX_JETCARDF1 0x16ff +#define PCI_DEVICE_ID_KORENIX_JETCARDF2 0x1700 +#define PCI_DEVICE_ID_KORENIX_JETCARDF3 0x17ff + +#define PCI_VENDOR_ID_QMI 0x1a32 + +#define PCI_VENDOR_ID_AZWAVE 0x1a3b + +#define PCI_VENDOR_ID_TEKRAM 0x1de1 +#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29 + +#define PCI_VENDOR_ID_TEHUTI 0x1fc9 +#define PCI_DEVICE_ID_TEHUTI_3009 0x3009 +#define PCI_DEVICE_ID_TEHUTI_3010 0x3010 +#define PCI_DEVICE_ID_TEHUTI_3014 0x3014 + +#define PCI_VENDOR_ID_HINT 0x3388 +#define PCI_DEVICE_ID_HINT_VXPROII_IDE 0x8013 + +#define PCI_VENDOR_ID_3DLABS 0x3d3d +#define PCI_DEVICE_ID_3DLABS_PERMEDIA2 0x0007 +#define PCI_DEVICE_ID_3DLABS_PERMEDIA2V 0x0009 + +#define PCI_VENDOR_ID_NETXEN 0x4040 +#define PCI_DEVICE_ID_NX2031_10GXSR 0x0001 +#define PCI_DEVICE_ID_NX2031_10GCX4 0x0002 +#define PCI_DEVICE_ID_NX2031_4GCU 0x0003 +#define PCI_DEVICE_ID_NX2031_IMEZ 0x0004 +#define PCI_DEVICE_ID_NX2031_HMEZ 0x0005 +#define PCI_DEVICE_ID_NX2031_XG_MGMT 0x0024 +#define PCI_DEVICE_ID_NX2031_XG_MGMT2 0x0025 +#define PCI_DEVICE_ID_NX3031 0x0100 + +#define PCI_VENDOR_ID_AKS 0x416c +#define PCI_DEVICE_ID_AKS_ALADDINCARD 0x0100 + +#define PCI_VENDOR_ID_S3 0x5333 +#define PCI_DEVICE_ID_S3_TRIO 0x8811 +#define PCI_DEVICE_ID_S3_868 0x8880 +#define PCI_DEVICE_ID_S3_968 0x88f0 +#define PCI_DEVICE_ID_S3_SAVAGE4 0x8a25 +#define PCI_DEVICE_ID_S3_PROSAVAGE8 0x8d04 +#define PCI_DEVICE_ID_S3_SONICVIBES 0xca00 + +#define PCI_VENDOR_ID_DUNORD 0x5544 +#define PCI_DEVICE_ID_DUNORD_I3000 0x0001 + +#define PCI_VENDOR_ID_DCI 0x6666 +#define PCI_DEVICE_ID_DCI_PCCOM4 0x0001 +#define PCI_DEVICE_ID_DCI_PCCOM8 0x0002 +#define PCI_DEVICE_ID_DCI_PCCOM2 0x0004 + +#define PCI_VENDOR_ID_INTEL 0x8086 +#define PCI_DEVICE_ID_INTEL_EESSC 0x0008 +#define PCI_DEVICE_ID_INTEL_PXHD_0 0x0320 +#define PCI_DEVICE_ID_INTEL_PXHD_1 0x0321 +#define PCI_DEVICE_ID_INTEL_PXH_0 0x0329 +#define PCI_DEVICE_ID_INTEL_PXH_1 0x032A +#define PCI_DEVICE_ID_INTEL_PXHV 0x032C +#define PCI_DEVICE_ID_INTEL_80332_0 0x0330 +#define PCI_DEVICE_ID_INTEL_80332_1 0x0332 +#define PCI_DEVICE_ID_INTEL_80333_0 0x0370 +#define PCI_DEVICE_ID_INTEL_80333_1 0x0372 +#define PCI_DEVICE_ID_INTEL_82375 0x0482 +#define PCI_DEVICE_ID_INTEL_82424 0x0483 +#define PCI_DEVICE_ID_INTEL_82378 0x0484 +#define PCI_DEVICE_ID_INTEL_I960 0x0960 +#define PCI_DEVICE_ID_INTEL_I960RM 0x0962 +#define PCI_DEVICE_ID_INTEL_8257X_SOL 0x1062 +#define PCI_DEVICE_ID_INTEL_82573E_SOL 0x1085 +#define PCI_DEVICE_ID_INTEL_82573L_SOL 0x108F +#define PCI_DEVICE_ID_INTEL_82815_MC 0x1130 +#define PCI_DEVICE_ID_INTEL_82815_CGC 0x1132 +#define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221 +#define PCI_DEVICE_ID_INTEL_7505_0 0x2550 +#define PCI_DEVICE_ID_INTEL_7205_0 0x255d +#define PCI_DEVICE_ID_INTEL_82437 0x122d +#define PCI_DEVICE_ID_INTEL_82371FB_0 0x122e +#define PCI_DEVICE_ID_INTEL_82371FB_1 0x1230 +#define PCI_DEVICE_ID_INTEL_82371MX 0x1234 +#define PCI_DEVICE_ID_INTEL_82441 0x1237 +#define PCI_DEVICE_ID_INTEL_82380FB 0x124b +#define PCI_DEVICE_ID_INTEL_82439 0x1250 +#define PCI_DEVICE_ID_INTEL_80960_RP 0x1960 +#define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21 +#define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30 +#define PCI_DEVICE_ID_INTEL_IOAT 0x1a38 +#define PCI_DEVICE_ID_INTEL_CPT_SMBUS 0x1c22 +#define PCI_DEVICE_ID_INTEL_CPT_LPC1 0x1c42 +#define PCI_DEVICE_ID_INTEL_CPT_LPC2 0x1c43 +#define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410 +#define PCI_DEVICE_ID_INTEL_82801AA_1 0x2411 +#define PCI_DEVICE_ID_INTEL_82801AA_3 0x2413 +#define PCI_DEVICE_ID_INTEL_82801AA_5 0x2415 +#define PCI_DEVICE_ID_INTEL_82801AA_6 0x2416 +#define PCI_DEVICE_ID_INTEL_82801AA_8 0x2418 +#define PCI_DEVICE_ID_INTEL_82801AB_0 0x2420 +#define PCI_DEVICE_ID_INTEL_82801AB_1 0x2421 +#define PCI_DEVICE_ID_INTEL_82801AB_3 0x2423 +#define PCI_DEVICE_ID_INTEL_82801AB_5 0x2425 +#define PCI_DEVICE_ID_INTEL_82801AB_6 0x2426 +#define PCI_DEVICE_ID_INTEL_82801AB_8 0x2428 +#define PCI_DEVICE_ID_INTEL_82801BA_0 0x2440 +#define PCI_DEVICE_ID_INTEL_82801BA_2 0x2443 +#define PCI_DEVICE_ID_INTEL_82801BA_4 0x2445 +#define PCI_DEVICE_ID_INTEL_82801BA_6 0x2448 +#define PCI_DEVICE_ID_INTEL_82801BA_8 0x244a +#define PCI_DEVICE_ID_INTEL_82801BA_9 0x244b +#define PCI_DEVICE_ID_INTEL_82801BA_10 0x244c +#define PCI_DEVICE_ID_INTEL_82801BA_11 0x244e +#define PCI_DEVICE_ID_INTEL_82801E_0 0x2450 +#define PCI_DEVICE_ID_INTEL_82801E_11 0x245b +#define PCI_DEVICE_ID_INTEL_82801CA_0 0x2480 +#define PCI_DEVICE_ID_INTEL_82801CA_3 0x2483 +#define PCI_DEVICE_ID_INTEL_82801CA_5 0x2485 +#define PCI_DEVICE_ID_INTEL_82801CA_6 0x2486 +#define PCI_DEVICE_ID_INTEL_82801CA_10 0x248a +#define PCI_DEVICE_ID_INTEL_82801CA_11 0x248b +#define PCI_DEVICE_ID_INTEL_82801CA_12 0x248c +#define PCI_DEVICE_ID_INTEL_82801DB_0 0x24c0 +#define PCI_DEVICE_ID_INTEL_82801DB_1 0x24c1 +#define PCI_DEVICE_ID_INTEL_82801DB_2 0x24c2 +#define PCI_DEVICE_ID_INTEL_82801DB_3 0x24c3 +#define PCI_DEVICE_ID_INTEL_82801DB_5 0x24c5 +#define PCI_DEVICE_ID_INTEL_82801DB_6 0x24c6 +#define PCI_DEVICE_ID_INTEL_82801DB_9 0x24c9 +#define PCI_DEVICE_ID_INTEL_82801DB_10 0x24ca +#define PCI_DEVICE_ID_INTEL_82801DB_11 0x24cb +#define PCI_DEVICE_ID_INTEL_82801DB_12 0x24cc +#define PCI_DEVICE_ID_INTEL_82801EB_0 0x24d0 +#define PCI_DEVICE_ID_INTEL_82801EB_1 0x24d1 +#define PCI_DEVICE_ID_INTEL_82801EB_3 0x24d3 +#define PCI_DEVICE_ID_INTEL_82801EB_5 0x24d5 +#define PCI_DEVICE_ID_INTEL_82801EB_6 0x24d6 +#define PCI_DEVICE_ID_INTEL_82801EB_11 0x24db +#define PCI_DEVICE_ID_INTEL_82801EB_12 0x24dc +#define PCI_DEVICE_ID_INTEL_82801EB_13 0x24dd +#define PCI_DEVICE_ID_INTEL_ESB_1 0x25a1 +#define PCI_DEVICE_ID_INTEL_ESB_2 0x25a2 +#define PCI_DEVICE_ID_INTEL_ESB_4 0x25a4 +#define PCI_DEVICE_ID_INTEL_ESB_5 0x25a6 +#define PCI_DEVICE_ID_INTEL_ESB_9 0x25ab +#define PCI_DEVICE_ID_INTEL_ESB_10 0x25ac +#define PCI_DEVICE_ID_INTEL_82820_HB 0x2500 +#define PCI_DEVICE_ID_INTEL_82820_UP_HB 0x2501 +#define PCI_DEVICE_ID_INTEL_82850_HB 0x2530 +#define PCI_DEVICE_ID_INTEL_82860_HB 0x2531 +#define PCI_DEVICE_ID_INTEL_E7501_MCH 0x254c +#define PCI_DEVICE_ID_INTEL_82845G_HB 0x2560 +#define PCI_DEVICE_ID_INTEL_82845G_IG 0x2562 +#define PCI_DEVICE_ID_INTEL_82865_HB 0x2570 +#define PCI_DEVICE_ID_INTEL_82865_IG 0x2572 +#define PCI_DEVICE_ID_INTEL_82875_HB 0x2578 +#define PCI_DEVICE_ID_INTEL_82915G_HB 0x2580 +#define PCI_DEVICE_ID_INTEL_82915G_IG 0x2582 +#define PCI_DEVICE_ID_INTEL_82915GM_HB 0x2590 +#define PCI_DEVICE_ID_INTEL_82915GM_IG 0x2592 +#define PCI_DEVICE_ID_INTEL_5000_ERR 0x25F0 +#define PCI_DEVICE_ID_INTEL_5000_FBD0 0x25F5 +#define PCI_DEVICE_ID_INTEL_5000_FBD1 0x25F6 +#define PCI_DEVICE_ID_INTEL_82945G_HB 0x2770 +#define PCI_DEVICE_ID_INTEL_82945G_IG 0x2772 +#define PCI_DEVICE_ID_INTEL_3000_HB 0x2778 +#define PCI_DEVICE_ID_INTEL_82945GM_HB 0x27A0 +#define PCI_DEVICE_ID_INTEL_82945GM_IG 0x27A2 +#define PCI_DEVICE_ID_INTEL_ICH6_0 0x2640 +#define PCI_DEVICE_ID_INTEL_ICH6_1 0x2641 +#define PCI_DEVICE_ID_INTEL_ICH6_2 0x2642 +#define PCI_DEVICE_ID_INTEL_ICH6_16 0x266a +#define PCI_DEVICE_ID_INTEL_ICH6_17 0x266d +#define PCI_DEVICE_ID_INTEL_ICH6_18 0x266e +#define PCI_DEVICE_ID_INTEL_ICH6_19 0x266f +#define PCI_DEVICE_ID_INTEL_ESB2_0 0x2670 +#define PCI_DEVICE_ID_INTEL_ESB2_14 0x2698 +#define PCI_DEVICE_ID_INTEL_ESB2_17 0x269b +#define PCI_DEVICE_ID_INTEL_ESB2_18 0x269e +#define PCI_DEVICE_ID_INTEL_ICH7_0 0x27b8 +#define PCI_DEVICE_ID_INTEL_ICH7_1 0x27b9 +#define PCI_DEVICE_ID_INTEL_ICH7_30 0x27b0 +#define PCI_DEVICE_ID_INTEL_TGP_LPC 0x27bc +#define PCI_DEVICE_ID_INTEL_ICH7_31 0x27bd +#define PCI_DEVICE_ID_INTEL_ICH7_17 0x27da +#define PCI_DEVICE_ID_INTEL_ICH7_19 0x27dd +#define PCI_DEVICE_ID_INTEL_ICH7_20 0x27de +#define PCI_DEVICE_ID_INTEL_ICH7_21 0x27df +#define PCI_DEVICE_ID_INTEL_ICH8_0 0x2810 +#define PCI_DEVICE_ID_INTEL_ICH8_1 0x2811 +#define PCI_DEVICE_ID_INTEL_ICH8_2 0x2812 +#define PCI_DEVICE_ID_INTEL_ICH8_3 0x2814 +#define PCI_DEVICE_ID_INTEL_ICH8_4 0x2815 +#define PCI_DEVICE_ID_INTEL_ICH8_5 0x283e +#define PCI_DEVICE_ID_INTEL_ICH8_6 0x2850 +#define PCI_DEVICE_ID_INTEL_ICH9_0 0x2910 +#define PCI_DEVICE_ID_INTEL_ICH9_1 0x2917 +#define PCI_DEVICE_ID_INTEL_ICH9_2 0x2912 +#define PCI_DEVICE_ID_INTEL_ICH9_3 0x2913 +#define PCI_DEVICE_ID_INTEL_ICH9_4 0x2914 +#define PCI_DEVICE_ID_INTEL_ICH9_5 0x2919 +#define PCI_DEVICE_ID_INTEL_ICH9_6 0x2930 +#define PCI_DEVICE_ID_INTEL_ICH9_7 0x2916 +#define PCI_DEVICE_ID_INTEL_ICH9_8 0x2918 +#define PCI_DEVICE_ID_INTEL_I7_MCR 0x2c18 +#define PCI_DEVICE_ID_INTEL_I7_MC_TAD 0x2c19 +#define PCI_DEVICE_ID_INTEL_I7_MC_RAS 0x2c1a +#define PCI_DEVICE_ID_INTEL_I7_MC_TEST 0x2c1c +#define PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL 0x2c20 +#define PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR 0x2c21 +#define PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK 0x2c22 +#define PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC 0x2c23 +#define PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL 0x2c28 +#define PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR 0x2c29 +#define PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK 0x2c2a +#define PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC 0x2c2b +#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL 0x2c30 +#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR 0x2c31 +#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK 0x2c32 +#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC 0x2c33 +#define PCI_DEVICE_ID_INTEL_I7_NONCORE 0x2c41 +#define PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT 0x2c40 +#define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340 +#define PCI_DEVICE_ID_INTEL_IOAT_TBG4 0x3429 +#define PCI_DEVICE_ID_INTEL_IOAT_TBG5 0x342a +#define PCI_DEVICE_ID_INTEL_IOAT_TBG6 0x342b +#define PCI_DEVICE_ID_INTEL_IOAT_TBG7 0x342c +#define PCI_DEVICE_ID_INTEL_X58_HUB_MGMT 0x342e +#define PCI_DEVICE_ID_INTEL_IOAT_TBG0 0x3430 +#define PCI_DEVICE_ID_INTEL_IOAT_TBG1 0x3431 +#define PCI_DEVICE_ID_INTEL_IOAT_TBG2 0x3432 +#define PCI_DEVICE_ID_INTEL_IOAT_TBG3 0x3433 +#define PCI_DEVICE_ID_INTEL_82830_HB 0x3575 +#define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577 +#define PCI_DEVICE_ID_INTEL_82854_HB 0x358c +#define PCI_DEVICE_ID_INTEL_82854_IG 0x358e +#define PCI_DEVICE_ID_INTEL_82855GM_HB 0x3580 +#define PCI_DEVICE_ID_INTEL_82855GM_IG 0x3582 +#define PCI_DEVICE_ID_INTEL_E7520_MCH 0x3590 +#define PCI_DEVICE_ID_INTEL_E7320_MCH 0x3592 +#define PCI_DEVICE_ID_INTEL_MCH_PA 0x3595 +#define PCI_DEVICE_ID_INTEL_MCH_PA1 0x3596 +#define PCI_DEVICE_ID_INTEL_MCH_PB 0x3597 +#define PCI_DEVICE_ID_INTEL_MCH_PB1 0x3598 +#define PCI_DEVICE_ID_INTEL_MCH_PC 0x3599 +#define PCI_DEVICE_ID_INTEL_MCH_PC1 0x359a +#define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e +#define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b +#define PCI_DEVICE_ID_INTEL_FBD_CNB 0x360c +#define PCI_DEVICE_ID_INTEL_IOAT_JSF0 0x3710 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF1 0x3711 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF2 0x3712 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF3 0x3713 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF4 0x3714 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF5 0x3715 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF6 0x3716 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF7 0x3717 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF8 0x3718 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF9 0x3719 +#define PCI_DEVICE_ID_INTEL_ICH10_0 0x3a14 +#define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16 +#define PCI_DEVICE_ID_INTEL_ICH10_2 0x3a18 +#define PCI_DEVICE_ID_INTEL_ICH10_3 0x3a1a +#define PCI_DEVICE_ID_INTEL_ICH10_4 0x3a30 +#define PCI_DEVICE_ID_INTEL_ICH10_5 0x3a60 +#define PCI_DEVICE_ID_INTEL_PCH_LPC_MIN 0x3b00 +#define PCI_DEVICE_ID_INTEL_PCH_LPC_MAX 0x3b1f +#define PCI_DEVICE_ID_INTEL_PCH_SMBUS 0x3b30 +#define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f +#define PCI_DEVICE_ID_INTEL_5100_16 0x65f0 +#define PCI_DEVICE_ID_INTEL_5100_21 0x65f5 +#define PCI_DEVICE_ID_INTEL_5100_22 0x65f6 +#define PCI_DEVICE_ID_INTEL_5400_ERR 0x4030 +#define PCI_DEVICE_ID_INTEL_5400_FBD0 0x4035 +#define PCI_DEVICE_ID_INTEL_5400_FBD1 0x4036 +#define PCI_DEVICE_ID_INTEL_IOAT_SCNB 0x65ff +#define PCI_DEVICE_ID_INTEL_TOLAPAI_0 0x5031 +#define PCI_DEVICE_ID_INTEL_TOLAPAI_1 0x5032 +#define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000 +#define PCI_DEVICE_ID_INTEL_82371SB_1 0x7010 +#define PCI_DEVICE_ID_INTEL_82371SB_2 0x7020 +#define PCI_DEVICE_ID_INTEL_82437VX 0x7030 +#define PCI_DEVICE_ID_INTEL_82439TX 0x7100 +#define PCI_DEVICE_ID_INTEL_82371AB_0 0x7110 +#define PCI_DEVICE_ID_INTEL_82371AB 0x7111 +#define PCI_DEVICE_ID_INTEL_82371AB_2 0x7112 +#define PCI_DEVICE_ID_INTEL_82371AB_3 0x7113 +#define PCI_DEVICE_ID_INTEL_82810_MC1 0x7120 +#define PCI_DEVICE_ID_INTEL_82810_IG1 0x7121 +#define PCI_DEVICE_ID_INTEL_82810_MC3 0x7122 +#define PCI_DEVICE_ID_INTEL_82810_IG3 0x7123 +#define PCI_DEVICE_ID_INTEL_82810E_MC 0x7124 +#define PCI_DEVICE_ID_INTEL_82810E_IG 0x7125 +#define PCI_DEVICE_ID_INTEL_82443LX_0 0x7180 +#define PCI_DEVICE_ID_INTEL_82443LX_1 0x7181 +#define PCI_DEVICE_ID_INTEL_82443BX_0 0x7190 +#define PCI_DEVICE_ID_INTEL_82443BX_1 0x7191 +#define PCI_DEVICE_ID_INTEL_82443BX_2 0x7192 +#define PCI_DEVICE_ID_INTEL_440MX 0x7195 +#define PCI_DEVICE_ID_INTEL_440MX_6 0x7196 +#define PCI_DEVICE_ID_INTEL_82443MX_0 0x7198 +#define PCI_DEVICE_ID_INTEL_82443MX_1 0x7199 +#define PCI_DEVICE_ID_INTEL_82443MX_3 0x719b +#define PCI_DEVICE_ID_INTEL_82443GX_0 0x71a0 +#define PCI_DEVICE_ID_INTEL_82443GX_2 0x71a2 +#define PCI_DEVICE_ID_INTEL_82372FB_1 0x7601 +#define PCI_DEVICE_ID_INTEL_SCH_LPC 0x8119 +#define PCI_DEVICE_ID_INTEL_SCH_IDE 0x811a +#define PCI_DEVICE_ID_INTEL_82454GX 0x84c4 +#define PCI_DEVICE_ID_INTEL_82450GX 0x84c5 +#define PCI_DEVICE_ID_INTEL_82451NX 0x84ca +#define PCI_DEVICE_ID_INTEL_82454NX 0x84cb +#define PCI_DEVICE_ID_INTEL_84460GX 0x84ea +#define PCI_DEVICE_ID_INTEL_IXP4XX 0x8500 +#define PCI_DEVICE_ID_INTEL_IXP2800 0x9004 +#define PCI_DEVICE_ID_INTEL_S21152BB 0xb152 + +#define PCI_VENDOR_ID_SCALEMP 0x8686 +#define PCI_DEVICE_ID_SCALEMP_VSMP_CTL 0x1010 + +#define PCI_VENDOR_ID_COMPUTONE 0x8e0e +#define PCI_DEVICE_ID_COMPUTONE_IP2EX 0x0291 +#define PCI_DEVICE_ID_COMPUTONE_PG 0x0302 +#define PCI_SUBVENDOR_ID_COMPUTONE 0x8e0e +#define PCI_SUBDEVICE_ID_COMPUTONE_PG4 0x0001 +#define PCI_SUBDEVICE_ID_COMPUTONE_PG8 0x0002 +#define PCI_SUBDEVICE_ID_COMPUTONE_PG6 0x0003 + +#define PCI_VENDOR_ID_KTI 0x8e2e + +#define PCI_VENDOR_ID_ADAPTEC 0x9004 +#define PCI_DEVICE_ID_ADAPTEC_7810 0x1078 +#define PCI_DEVICE_ID_ADAPTEC_7821 0x2178 +#define PCI_DEVICE_ID_ADAPTEC_38602 0x3860 +#define PCI_DEVICE_ID_ADAPTEC_7850 0x5078 +#define PCI_DEVICE_ID_ADAPTEC_7855 0x5578 +#define PCI_DEVICE_ID_ADAPTEC_3860 0x6038 +#define PCI_DEVICE_ID_ADAPTEC_1480A 0x6075 +#define PCI_DEVICE_ID_ADAPTEC_7860 0x6078 +#define PCI_DEVICE_ID_ADAPTEC_7861 0x6178 +#define PCI_DEVICE_ID_ADAPTEC_7870 0x7078 +#define PCI_DEVICE_ID_ADAPTEC_7871 0x7178 +#define PCI_DEVICE_ID_ADAPTEC_7872 0x7278 +#define PCI_DEVICE_ID_ADAPTEC_7873 0x7378 +#define PCI_DEVICE_ID_ADAPTEC_7874 0x7478 +#define PCI_DEVICE_ID_ADAPTEC_7895 0x7895 +#define PCI_DEVICE_ID_ADAPTEC_7880 0x8078 +#define PCI_DEVICE_ID_ADAPTEC_7881 0x8178 +#define PCI_DEVICE_ID_ADAPTEC_7882 0x8278 +#define PCI_DEVICE_ID_ADAPTEC_7883 0x8378 +#define PCI_DEVICE_ID_ADAPTEC_7884 0x8478 +#define PCI_DEVICE_ID_ADAPTEC_7885 0x8578 +#define PCI_DEVICE_ID_ADAPTEC_7886 0x8678 +#define PCI_DEVICE_ID_ADAPTEC_7887 0x8778 +#define PCI_DEVICE_ID_ADAPTEC_7888 0x8878 + +#define PCI_VENDOR_ID_ADAPTEC2 0x9005 +#define PCI_DEVICE_ID_ADAPTEC2_2940U2 0x0010 +#define PCI_DEVICE_ID_ADAPTEC2_2930U2 0x0011 +#define PCI_DEVICE_ID_ADAPTEC2_7890B 0x0013 +#define PCI_DEVICE_ID_ADAPTEC2_7890 0x001f +#define PCI_DEVICE_ID_ADAPTEC2_3940U2 0x0050 +#define PCI_DEVICE_ID_ADAPTEC2_3950U2D 0x0051 +#define PCI_DEVICE_ID_ADAPTEC2_7896 0x005f +#define PCI_DEVICE_ID_ADAPTEC2_7892A 0x0080 +#define PCI_DEVICE_ID_ADAPTEC2_7892B 0x0081 +#define PCI_DEVICE_ID_ADAPTEC2_7892D 0x0083 +#define PCI_DEVICE_ID_ADAPTEC2_7892P 0x008f +#define PCI_DEVICE_ID_ADAPTEC2_7899A 0x00c0 +#define PCI_DEVICE_ID_ADAPTEC2_7899B 0x00c1 +#define PCI_DEVICE_ID_ADAPTEC2_7899D 0x00c3 +#define PCI_DEVICE_ID_ADAPTEC2_7899P 0x00cf +#define PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN 0x0500 +#define PCI_DEVICE_ID_ADAPTEC2_SCAMP 0x0503 + +#define PCI_VENDOR_ID_HOLTEK 0x9412 +#define PCI_DEVICE_ID_HOLTEK_6565 0x6565 + +#define PCI_VENDOR_ID_NETMOS 0x9710 +#define PCI_DEVICE_ID_NETMOS_9705 0x9705 +#define PCI_DEVICE_ID_NETMOS_9715 0x9715 +#define PCI_DEVICE_ID_NETMOS_9735 0x9735 +#define PCI_DEVICE_ID_NETMOS_9745 0x9745 +#define PCI_DEVICE_ID_NETMOS_9755 0x9755 +#define PCI_DEVICE_ID_NETMOS_9805 0x9805 +#define PCI_DEVICE_ID_NETMOS_9815 0x9815 +#define PCI_DEVICE_ID_NETMOS_9835 0x9835 +#define PCI_DEVICE_ID_NETMOS_9845 0x9845 +#define PCI_DEVICE_ID_NETMOS_9855 0x9855 +#define PCI_DEVICE_ID_NETMOS_9865 0x9865 +#define PCI_DEVICE_ID_NETMOS_9901 0x9901 + +#define PCI_VENDOR_ID_3COM_2 0xa727 + +#define PCI_VENDOR_ID_DIGIUM 0xd161 +#define PCI_DEVICE_ID_DIGIUM_HFC4S 0xb410 + +#define PCI_SUBVENDOR_ID_EXSYS 0xd84d +#define PCI_SUBDEVICE_ID_EXSYS_4014 0x4014 +#define PCI_SUBDEVICE_ID_EXSYS_4055 0x4055 + +#define PCI_VENDOR_ID_TIGERJET 0xe159 +#define PCI_DEVICE_ID_TIGERJET_300 0x0001 +#define PCI_DEVICE_ID_TIGERJET_100 0x0002 + +#define PCI_VENDOR_ID_XILINX_RME 0xea60 +#define PCI_DEVICE_ID_RME_DIGI32 0x9896 +#define PCI_DEVICE_ID_RME_DIGI32_PRO 0x9897 +#define PCI_DEVICE_ID_RME_DIGI32_8 0x9898 diff --git a/include/linux/pm_qos_params.h b/include/linux/pm_qos_params.h new file mode 100644 index 0000000..091c13c --- /dev/null +++ b/include/linux/pm_qos_params.h @@ -0,0 +1,26 @@ +/* interface for the pm_qos_power infrastructure of the linux kernel. + * + * Mark Gross + */ +#include +#include +#include + +#define PM_QOS_RESERVED 0 +#define PM_QOS_CPU_DMA_LATENCY 1 +#define PM_QOS_NETWORK_LATENCY 2 +#define PM_QOS_NETWORK_THROUGHPUT 3 +#define PM_QOS_SYSTEM_BUS_FREQ 4 + +#define PM_QOS_NUM_CLASSES 5 +#define PM_QOS_DEFAULT_VALUE -1 + +int pm_qos_add_requirement(int qos, char *name, s32 value); +int pm_qos_update_requirement(int qos, char *name, s32 new_value); +void pm_qos_remove_requirement(int qos, char *name); + +int pm_qos_requirement(int qos); + +int pm_qos_add_notifier(int qos, struct notifier_block *notifier); +int pm_qos_remove_notifier(int qos, struct notifier_block *notifier); + diff --git a/include/linux/rfkill_backport.h b/include/linux/rfkill_backport.h new file mode 100644 index 0000000..4fdb5ed --- /dev/null +++ b/include/linux/rfkill_backport.h @@ -0,0 +1,391 @@ +#ifndef __RFKILL_H +#define __RFKILL_H + +/* + * Copyright (C) 2006 - 2007 Ivo van Doorn + * Copyright (C) 2007 Dmitry Torokhov + * Copyright 2009 Johannes Berg + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include + +/* define userspace visible states */ +#define RFKILL_STATE_SOFT_BLOCKED 0 +#define RFKILL_STATE_UNBLOCKED 1 +#define RFKILL_STATE_HARD_BLOCKED 2 + +/** + * enum rfkill_type - type of rfkill switch. + * + * @RFKILL_TYPE_ALL: toggles all switches (requests only - not a switch type) + * @RFKILL_TYPE_WLAN: switch is on a 802.11 wireless network device. + * @RFKILL_TYPE_BLUETOOTH: switch is on a bluetooth device. + * @RFKILL_TYPE_UWB: switch is on a ultra wideband device. + * @RFKILL_TYPE_WIMAX: switch is on a WiMAX device. + * @RFKILL_TYPE_WWAN: switch is on a wireless WAN device. + * @RFKILL_TYPE_GPS: switch is on a GPS device. + * @RFKILL_TYPE_FM: switch is on a FM radio device. + * @NUM_RFKILL_TYPES: number of defined rfkill types + */ +enum rfkill_type { + RFKILL_TYPE_ALL = 0, + RFKILL_TYPE_WLAN, + RFKILL_TYPE_BLUETOOTH, + RFKILL_TYPE_UWB, + RFKILL_TYPE_WIMAX, + RFKILL_TYPE_WWAN, + RFKILL_TYPE_GPS, + RFKILL_TYPE_FM, + NUM_RFKILL_TYPES, +}; + +/** + * enum rfkill_operation - operation types + * @RFKILL_OP_ADD: a device was added + * @RFKILL_OP_DEL: a device was removed + * @RFKILL_OP_CHANGE: a device's state changed -- userspace changes one device + * @RFKILL_OP_CHANGE_ALL: userspace changes all devices (of a type, or all) + */ +enum rfkill_operation { + RFKILL_OP_ADD = 0, + RFKILL_OP_DEL, + RFKILL_OP_CHANGE, + RFKILL_OP_CHANGE_ALL, +}; + +/** + * struct rfkill_event - events for userspace on /dev/rfkill + * @idx: index of dev rfkill + * @type: type of the rfkill struct + * @op: operation code + * @hard: hard state (0/1) + * @soft: soft state (0/1) + * + * Structure used for userspace communication on /dev/rfkill, + * used for events from the kernel and control to the kernel. + */ +struct rfkill_event { + __u32 idx; + __u8 type; + __u8 op; + __u8 soft, hard; +} __packed; + +/* + * We are planning to be backward and forward compatible with changes + * to the event struct, by adding new, optional, members at the end. + * When reading an event (whether the kernel from userspace or vice + * versa) we need to accept anything that's at least as large as the + * version 1 event size, but might be able to accept other sizes in + * the future. + * + * One exception is the kernel -- we already have two event sizes in + * that we've made the 'hard' member optional since our only option + * is to ignore it anyway. + */ +#define RFKILL_EVENT_SIZE_V1 8 + +/* ioctl for turning off rfkill-input (if present) */ +#define RFKILL_IOC_MAGIC 'R' +#define RFKILL_IOC_NOINPUT 1 +#define RFKILL_IOCTL_NOINPUT _IO(RFKILL_IOC_MAGIC, RFKILL_IOC_NOINPUT) + +/* and that's all userspace gets */ +#ifdef __KERNEL__ +/* don't allow anyone to use these in the kernel */ +enum rfkill_user_states { + RFKILL_USER_STATE_SOFT_BLOCKED = RFKILL_STATE_SOFT_BLOCKED, + RFKILL_USER_STATE_UNBLOCKED = RFKILL_STATE_UNBLOCKED, + RFKILL_USER_STATE_HARD_BLOCKED = RFKILL_STATE_HARD_BLOCKED, +}; +#undef RFKILL_STATE_SOFT_BLOCKED +#undef RFKILL_STATE_UNBLOCKED +#undef RFKILL_STATE_HARD_BLOCKED + +#include +#include +#include +#include +#include +#include + +/* this is opaque */ +struct rfkill; + +/** + * struct rfkill_ops - rfkill driver methods + * + * @poll: poll the rfkill block state(s) -- only assign this method + * when you need polling. When called, simply call one of the + * rfkill_set{,_hw,_sw}_state family of functions. If the hw + * is getting unblocked you need to take into account the return + * value of those functions to make sure the software block is + * properly used. + * @query: query the rfkill block state(s) and call exactly one of the + * rfkill_set{,_hw,_sw}_state family of functions. Assign this + * method if input events can cause hardware state changes to make + * the rfkill core query your driver before setting a requested + * block. + * @set_block: turn the transmitter on (blocked == false) or off + * (blocked == true) -- ignore and return 0 when hard blocked. + * This callback must be assigned. + */ +struct rfkill_ops { + void (*poll)(struct rfkill *rfkill, void *data); + void (*query)(struct rfkill *rfkill, void *data); + int (*set_block)(void *data, bool blocked); +}; + +#if defined(CONFIG_RFKILL_BACKPORT) || defined(CONFIG_RFKILL_BACKPORT_MODULE) +/** + * rfkill_alloc - allocate rfkill structure + * @name: name of the struct -- the string is not copied internally + * @parent: device that has rf switch on it + * @type: type of the switch (RFKILL_TYPE_*) + * @ops: rfkill methods + * @ops_data: data passed to each method + * + * This function should be called by the transmitter driver to allocate an + * rfkill structure. Returns %NULL on failure. + */ +struct rfkill * __must_check rfkill_alloc(const char *name, + struct device *parent, + const enum rfkill_type type, + const struct rfkill_ops *ops, + void *ops_data); + +/** + * rfkill_register - Register a rfkill structure. + * @rfkill: rfkill structure to be registered + * + * This function should be called by the transmitter driver to register + * the rfkill structure. Before calling this function the driver needs + * to be ready to service method calls from rfkill. + * + * If rfkill_init_sw_state() is not called before registration, + * set_block() will be called to initialize the software blocked state + * to a default value. + * + * If the hardware blocked state is not set before registration, + * it is assumed to be unblocked. + */ +int __must_check rfkill_register(struct rfkill *rfkill); + +/** + * rfkill_pause_polling(struct rfkill *rfkill) + * + * Pause polling -- say transmitter is off for other reasons. + * NOTE: not necessary for suspend/resume -- in that case the + * core stops polling anyway + */ +void rfkill_pause_polling(struct rfkill *rfkill); + +/** + * rfkill_resume_polling(struct rfkill *rfkill) + * + * Pause polling -- say transmitter is off for other reasons. + * NOTE: not necessary for suspend/resume -- in that case the + * core stops polling anyway + */ +void rfkill_resume_polling(struct rfkill *rfkill); + + +/** + * rfkill_unregister - Unregister a rfkill structure. + * @rfkill: rfkill structure to be unregistered + * + * This function should be called by the network driver during device + * teardown to destroy rfkill structure. Until it returns, the driver + * needs to be able to service method calls. + */ +void rfkill_unregister(struct rfkill *rfkill); + +/** + * rfkill_destroy - free rfkill structure + * @rfkill: rfkill structure to be destroyed + * + * Destroys the rfkill structure. + */ +void rfkill_destroy(struct rfkill *rfkill); + +/** + * rfkill_set_hw_state - Set the internal rfkill hardware block state + * @rfkill: pointer to the rfkill class to modify. + * @state: the current hardware block state to set + * + * rfkill drivers that get events when the hard-blocked state changes + * use this function to notify the rfkill core (and through that also + * userspace) of the current state. They should also use this after + * resume if the state could have changed. + * + * You need not (but may) call this function if poll_state is assigned. + * + * This function can be called in any context, even from within rfkill + * callbacks. + * + * The function returns the combined block state (true if transmitter + * should be blocked) so that drivers need not keep track of the soft + * block state -- which they might not be able to. + */ +bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked); + +/** + * rfkill_set_sw_state - Set the internal rfkill software block state + * @rfkill: pointer to the rfkill class to modify. + * @state: the current software block state to set + * + * rfkill drivers that get events when the soft-blocked state changes + * (yes, some platforms directly act on input but allow changing again) + * use this function to notify the rfkill core (and through that also + * userspace) of the current state. + * + * Drivers should also call this function after resume if the state has + * been changed by the user. This only makes sense for "persistent" + * devices (see rfkill_init_sw_state()). + * + * This function can be called in any context, even from within rfkill + * callbacks. + * + * The function returns the combined block state (true if transmitter + * should be blocked). + */ +bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked); + +/** + * rfkill_init_sw_state - Initialize persistent software block state + * @rfkill: pointer to the rfkill class to modify. + * @state: the current software block state to set + * + * rfkill drivers that preserve their software block state over power off + * use this function to notify the rfkill core (and through that also + * userspace) of their initial state. It should only be used before + * registration. + * + * In addition, it marks the device as "persistent", an attribute which + * can be read by userspace. Persistent devices are expected to preserve + * their own state when suspended. + */ +void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked); + +/** + * rfkill_set_states - Set the internal rfkill block states + * @rfkill: pointer to the rfkill class to modify. + * @sw: the current software block state to set + * @hw: the current hardware block state to set + * + * This function can be called in any context, even from within rfkill + * callbacks. + */ +void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw); + +/** + * rfkill_blocked - query rfkill block + * + * @rfkill: rfkill struct to query + */ +bool rfkill_blocked(struct rfkill *rfkill); +#else /* !RFKILL */ +static inline struct rfkill * __must_check +rfkill_alloc(const char *name, + struct device *parent, + const enum rfkill_type type, + const struct rfkill_ops *ops, + void *ops_data) +{ + return ERR_PTR(-ENODEV); +} + +static inline int __must_check rfkill_register(struct rfkill *rfkill) +{ + if (rfkill == ERR_PTR(-ENODEV)) + return 0; + return -EINVAL; +} + +static inline void rfkill_pause_polling(struct rfkill *rfkill) +{ +} + +static inline void rfkill_resume_polling(struct rfkill *rfkill) +{ +} + +static inline void rfkill_unregister(struct rfkill *rfkill) +{ +} + +static inline void rfkill_destroy(struct rfkill *rfkill) +{ +} + +static inline bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked) +{ + return blocked; +} + +static inline bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked) +{ + return blocked; +} + +static inline void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked) +{ +} + +static inline void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw) +{ +} + +static inline bool rfkill_blocked(struct rfkill *rfkill) +{ + return false; +} +#endif /* RFKILL || RFKILL_MODULE */ + + +#ifdef CONFIG_RFKILL_BACKPORT_LEDS +/** + * rfkill_get_led_trigger_name - Get the LED trigger name for the button's LED. + * This function might return a NULL pointer if registering of the + * LED trigger failed. Use this as "default_trigger" for the LED. + */ +const char *rfkill_get_led_trigger_name(struct rfkill *rfkill); + +/** + * rfkill_set_led_trigger_name -- set the LED trigger name + * @rfkill: rfkill struct + * @name: LED trigger name + * + * This function sets the LED trigger name of the radio LED + * trigger that rfkill creates. It is optional, but if called + * must be called before rfkill_register() to be effective. + */ +void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name); +#else +static inline const char *rfkill_get_led_trigger_name(struct rfkill *rfkill) +{ + return NULL; +} + +static inline void +rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name) +{ +} +#endif + +#endif /* __KERNEL__ */ + +#endif /* RFKILL_H */ diff --git a/include/linux/spi/libertas_spi.h b/include/linux/spi/libertas_spi.h new file mode 100644 index 0000000..1b5d538 --- /dev/null +++ b/include/linux/spi/libertas_spi.h @@ -0,0 +1,29 @@ +/* + * board-specific data for the libertas_spi driver. + * + * Copyright 2008 Analog Devices Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ +#ifndef _LIBERTAS_SPI_H_ +#define _LIBERTAS_SPI_H_ + +struct spi_device; + +struct libertas_spi_platform_data { + /* There are two ways to read data from the WLAN module's SPI + * interface. Setting 0 or 1 here controls which one is used. + * + * Usually you want to set use_dummy_writes = 1. + * However, if that doesn't work or if you are using a slow SPI clock + * speed, you may want to use 0 here. */ + u16 use_dummy_writes; + + /* Board specific setup/teardown */ + int (*setup)(struct spi_device *spi); + int (*teardown)(struct spi_device *spi); +}; +#endif diff --git a/include/linux/spi/wl12xx.h b/include/linux/spi/wl12xx.h new file mode 100644 index 0000000..aed64ed --- /dev/null +++ b/include/linux/spi/wl12xx.h @@ -0,0 +1,32 @@ +/* + * This file is part of wl12xx + * + * Copyright (C) 2009 Nokia Corporation + * + * Contact: Kalle Valo + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef _LINUX_SPI_WL12XX_H +#define _LINUX_SPI_WL12XX_H + +struct wl12xx_platform_data { + void (*set_power)(bool enable); + bool use_eeprom; +}; + +#endif diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h new file mode 100644 index 0000000..24f9885 --- /dev/null +++ b/include/linux/ssb/ssb.h @@ -0,0 +1,683 @@ +#ifndef LINUX_SSB_H_ +#define LINUX_SSB_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include + + +struct pcmcia_device; +struct ssb_bus; +struct ssb_driver; + +struct ssb_sprom { + u8 revision; + u8 il0mac[6]; /* MAC address for 802.11b/g */ + u8 et0mac[6]; /* MAC address for Ethernet */ + u8 et1mac[6]; /* MAC address for 802.11a */ + u8 et0phyaddr; /* MII address for enet0 */ + u8 et1phyaddr; /* MII address for enet1 */ + u8 et0mdcport; /* MDIO for enet0 */ + u8 et1mdcport; /* MDIO for enet1 */ + u8 board_rev; /* Board revision number from SPROM. */ + u8 country_code; /* Country Code */ + u8 ant_available_a; /* 2GHz antenna available bits (up to 4) */ + u8 ant_available_bg; /* 5GHz antenna available bits (up to 4) */ + u16 pa0b0; + u16 pa0b1; + u16 pa0b2; + u16 pa1b0; + u16 pa1b1; + u16 pa1b2; + u16 pa1lob0; + u16 pa1lob1; + u16 pa1lob2; + u16 pa1hib0; + u16 pa1hib1; + u16 pa1hib2; + u8 gpio0; /* GPIO pin 0 */ + u8 gpio1; /* GPIO pin 1 */ + u8 gpio2; /* GPIO pin 2 */ + u8 gpio3; /* GPIO pin 3 */ + u16 maxpwr_bg; /* 2.4GHz Amplifier Max Power (in dBm Q5.2) */ + u16 maxpwr_al; /* 5.2GHz Amplifier Max Power (in dBm Q5.2) */ + u16 maxpwr_a; /* 5.3GHz Amplifier Max Power (in dBm Q5.2) */ + u16 maxpwr_ah; /* 5.8GHz Amplifier Max Power (in dBm Q5.2) */ + u8 itssi_a; /* Idle TSSI Target for A-PHY */ + u8 itssi_bg; /* Idle TSSI Target for B/G-PHY */ + u8 tri2g; /* 2.4GHz TX isolation */ + u8 tri5gl; /* 5.2GHz TX isolation */ + u8 tri5g; /* 5.3GHz TX isolation */ + u8 tri5gh; /* 5.8GHz TX isolation */ + u8 rxpo2g; /* 2GHz RX power offset */ + u8 rxpo5g; /* 5GHz RX power offset */ + u8 rssisav2g; /* 2GHz RSSI params */ + u8 rssismc2g; + u8 rssismf2g; + u8 bxa2g; /* 2GHz BX arch */ + u8 rssisav5g; /* 5GHz RSSI params */ + u8 rssismc5g; + u8 rssismf5g; + u8 bxa5g; /* 5GHz BX arch */ + u16 cck2gpo; /* CCK power offset */ + u32 ofdm2gpo; /* 2.4GHz OFDM power offset */ + u32 ofdm5glpo; /* 5.2GHz OFDM power offset */ + u32 ofdm5gpo; /* 5.3GHz OFDM power offset */ + u32 ofdm5ghpo; /* 5.8GHz OFDM power offset */ + u16 boardflags_lo; /* Board flags (bits 0-15) */ + u16 boardflags_hi; /* Board flags (bits 16-31) */ + u16 boardflags2_lo; /* Board flags (bits 32-47) */ + u16 boardflags2_hi; /* Board flags (bits 48-63) */ + /* TODO store board flags in a single u64 */ + + /* Antenna gain values for up to 4 antennas + * on each band. Values in dBm/4 (Q5.2). Negative gain means the + * loss in the connectors is bigger than the gain. */ + struct { + struct { + s8 a0, a1, a2, a3; + } ghz24; /* 2.4GHz band */ + struct { + s8 a0, a1, a2, a3; + } ghz5; /* 5GHz band */ + } antenna_gain; + + /* TODO - add any parameters needed from rev 2, 3, 4, 5 or 8 SPROMs */ +}; + +/* Information about the PCB the circuitry is soldered on. */ +struct ssb_boardinfo { + u16 vendor; + u16 type; + u16 rev; +}; + + +struct ssb_device; +/* Lowlevel read/write operations on the device MMIO. + * Internal, don't use that outside of ssb. */ +struct ssb_bus_ops { + u8 (*read8)(struct ssb_device *dev, u16 offset); + u16 (*read16)(struct ssb_device *dev, u16 offset); + u32 (*read32)(struct ssb_device *dev, u16 offset); + void (*write8)(struct ssb_device *dev, u16 offset, u8 value); + void (*write16)(struct ssb_device *dev, u16 offset, u16 value); + void (*write32)(struct ssb_device *dev, u16 offset, u32 value); +#ifdef CONFIG_SSB_BLOCKIO + void (*block_read)(struct ssb_device *dev, void *buffer, + size_t count, u16 offset, u8 reg_width); + void (*block_write)(struct ssb_device *dev, const void *buffer, + size_t count, u16 offset, u8 reg_width); +#endif +}; + + +/* Core-ID values. */ +#define SSB_DEV_CHIPCOMMON 0x800 +#define SSB_DEV_ILINE20 0x801 +#define SSB_DEV_SDRAM 0x803 +#define SSB_DEV_PCI 0x804 +#define SSB_DEV_MIPS 0x805 +#define SSB_DEV_ETHERNET 0x806 +#define SSB_DEV_V90 0x807 +#define SSB_DEV_USB11_HOSTDEV 0x808 +#define SSB_DEV_ADSL 0x809 +#define SSB_DEV_ILINE100 0x80A +#define SSB_DEV_IPSEC 0x80B +#define SSB_DEV_PCMCIA 0x80D +#define SSB_DEV_INTERNAL_MEM 0x80E +#define SSB_DEV_MEMC_SDRAM 0x80F +#define SSB_DEV_EXTIF 0x811 +#define SSB_DEV_80211 0x812 +#define SSB_DEV_MIPS_3302 0x816 +#define SSB_DEV_USB11_HOST 0x817 +#define SSB_DEV_USB11_DEV 0x818 +#define SSB_DEV_USB20_HOST 0x819 +#define SSB_DEV_USB20_DEV 0x81A +#define SSB_DEV_SDIO_HOST 0x81B +#define SSB_DEV_ROBOSWITCH 0x81C +#define SSB_DEV_PARA_ATA 0x81D +#define SSB_DEV_SATA_XORDMA 0x81E +#define SSB_DEV_ETHERNET_GBIT 0x81F +#define SSB_DEV_PCIE 0x820 +#define SSB_DEV_MIMO_PHY 0x821 +#define SSB_DEV_SRAM_CTRLR 0x822 +#define SSB_DEV_MINI_MACPHY 0x823 +#define SSB_DEV_ARM_1176 0x824 +#define SSB_DEV_ARM_7TDMI 0x825 + +/* Vendor-ID values */ +#define SSB_VENDOR_BROADCOM 0x4243 + +/* Some kernel subsystems poke with dev->drvdata, so we must use the + * following ugly workaround to get from struct device to struct ssb_device */ +struct __ssb_dev_wrapper { + struct device dev; + struct ssb_device *sdev; +}; + +struct ssb_device { + /* Having a copy of the ops pointer in each dev struct + * is an optimization. */ + const struct ssb_bus_ops *ops; + + struct device *dev; + + struct ssb_bus *bus; + struct ssb_device_id id; + + u8 core_index; + unsigned int irq; + + /* Internal-only stuff follows. */ + void *drvdata; /* Per-device data */ + void *devtypedata; /* Per-devicetype (eg 802.11) data */ +}; + +/* Go from struct device to struct ssb_device. */ +static inline +struct ssb_device * dev_to_ssb_dev(struct device *dev) +{ + struct __ssb_dev_wrapper *wrap; + wrap = container_of(dev, struct __ssb_dev_wrapper, dev); + return wrap->sdev; +} + +/* Device specific user data */ +static inline +void ssb_set_drvdata(struct ssb_device *dev, void *data) +{ + dev->drvdata = data; +} +static inline +void * ssb_get_drvdata(struct ssb_device *dev) +{ + return dev->drvdata; +} + +/* Devicetype specific user data. This is per device-type (not per device) */ +void ssb_set_devtypedata(struct ssb_device *dev, void *data); +static inline +void * ssb_get_devtypedata(struct ssb_device *dev) +{ + return dev->devtypedata; +} + + +struct ssb_driver { + const char *name; + const struct ssb_device_id *id_table; + + int (*probe)(struct ssb_device *dev, const struct ssb_device_id *id); + void (*remove)(struct ssb_device *dev); + int (*suspend)(struct ssb_device *dev, pm_message_t state); + int (*resume)(struct ssb_device *dev); + void (*shutdown)(struct ssb_device *dev); + + struct device_driver drv; +}; +#define drv_to_ssb_drv(_drv) container_of(_drv, struct ssb_driver, drv) + +extern int __ssb_driver_register(struct ssb_driver *drv, struct module *owner); +static inline int ssb_driver_register(struct ssb_driver *drv) +{ + return __ssb_driver_register(drv, THIS_MODULE); +} +extern void ssb_driver_unregister(struct ssb_driver *drv); + + + + +enum ssb_bustype { + SSB_BUSTYPE_SSB, /* This SSB bus is the system bus */ + SSB_BUSTYPE_PCI, /* SSB is connected to PCI bus */ + SSB_BUSTYPE_PCMCIA, /* SSB is connected to PCMCIA bus */ + SSB_BUSTYPE_SDIO, /* SSB is connected to SDIO bus */ +}; + +/* board_vendor */ +#define SSB_BOARDVENDOR_BCM 0x14E4 /* Broadcom */ +#define SSB_BOARDVENDOR_DELL 0x1028 /* Dell */ +#define SSB_BOARDVENDOR_HP 0x0E11 /* HP */ +/* board_type */ +#define SSB_BOARD_BCM94306MP 0x0418 +#define SSB_BOARD_BCM4309G 0x0421 +#define SSB_BOARD_BCM4306CB 0x0417 +#define SSB_BOARD_BCM4309MP 0x040C +#define SSB_BOARD_MP4318 0x044A +#define SSB_BOARD_BU4306 0x0416 +#define SSB_BOARD_BU4309 0x040A +/* chip_package */ +#define SSB_CHIPPACK_BCM4712S 1 /* Small 200pin 4712 */ +#define SSB_CHIPPACK_BCM4712M 2 /* Medium 225pin 4712 */ +#define SSB_CHIPPACK_BCM4712L 0 /* Large 340pin 4712 */ + +#include +#include +#include +#include + +struct ssb_bus { + /* The MMIO area. */ + void __iomem *mmio; + + const struct ssb_bus_ops *ops; + + /* The core currently mapped into the MMIO window. + * Not valid on all host-buses. So don't use outside of SSB. */ + struct ssb_device *mapped_device; + union { + /* Currently mapped PCMCIA segment. (bustype == SSB_BUSTYPE_PCMCIA only) */ + u8 mapped_pcmcia_seg; + /* Current SSB base address window for SDIO. */ + u32 sdio_sbaddr; + }; + /* Lock for core and segment switching. + * On PCMCIA-host busses this is used to protect the whole MMIO access. */ + spinlock_t bar_lock; + + /* The host-bus this backplane is running on. */ + enum ssb_bustype bustype; + /* Pointers to the host-bus. Check bustype before using any of these pointers. */ + union { + /* Pointer to the PCI bus (only valid if bustype == SSB_BUSTYPE_PCI). */ + struct pci_dev *host_pci; + /* Pointer to the PCMCIA device (only if bustype == SSB_BUSTYPE_PCMCIA). */ + struct pcmcia_device *host_pcmcia; + /* Pointer to the SDIO device (only if bustype == SSB_BUSTYPE_SDIO). */ + struct sdio_func *host_sdio; + }; + + /* See enum ssb_quirks */ + unsigned int quirks; + +#ifdef CONFIG_SSB_SPROM + /* Mutex to protect the SPROM writing. */ + struct mutex sprom_mutex; +#endif + + /* ID information about the Chip. */ + u16 chip_id; + u16 chip_rev; + u16 sprom_size; /* number of words in sprom */ + u8 chip_package; + + /* List of devices (cores) on the backplane. */ + struct ssb_device devices[SSB_MAX_NR_CORES]; + u8 nr_devices; + + /* Software ID number for this bus. */ + unsigned int busnumber; + + /* The ChipCommon device (if available). */ + struct ssb_chipcommon chipco; + /* The PCI-core device (if available). */ + struct ssb_pcicore pcicore; + /* The MIPS-core device (if available). */ + struct ssb_mipscore mipscore; + /* The EXTif-core device (if available). */ + struct ssb_extif extif; + + /* The following structure elements are not available in early + * SSB initialization. Though, they are available for regular + * registered drivers at any stage. So be careful when + * using them in the ssb core code. */ + + /* ID information about the PCB. */ + struct ssb_boardinfo boardinfo; + /* Contents of the SPROM. */ + struct ssb_sprom sprom; + /* If the board has a cardbus slot, this is set to true. */ + bool has_cardbus_slot; + +#ifdef CONFIG_SSB_EMBEDDED + /* Lock for GPIO register access. */ + spinlock_t gpio_lock; +#endif /* EMBEDDED */ + + /* Internal-only stuff follows. Do not touch. */ + struct list_head list; +#ifdef CONFIG_SSB_DEBUG + /* Is the bus already powered up? */ + bool powered_up; + int power_warn_count; +#endif /* DEBUG */ +}; + +enum ssb_quirks { + /* SDIO connected card requires performing a read after writing a 32-bit value */ + SSB_QUIRK_SDIO_READ_AFTER_WRITE32 = (1 << 0), +}; + +/* The initialization-invariants. */ +struct ssb_init_invariants { + /* Versioning information about the PCB. */ + struct ssb_boardinfo boardinfo; + /* The SPROM information. That's either stored in an + * EEPROM or NVRAM on the board. */ + struct ssb_sprom sprom; + /* If the board has a cardbus slot, this is set to true. */ + bool has_cardbus_slot; +}; +/* Type of function to fetch the invariants. */ +typedef int (*ssb_invariants_func_t)(struct ssb_bus *bus, + struct ssb_init_invariants *iv); + +/* Register a SSB system bus. get_invariants() is called after the + * basic system devices are initialized. + * The invariants are usually fetched from some NVRAM. + * Put the invariants into the struct pointed to by iv. */ +extern int ssb_bus_ssbbus_register(struct ssb_bus *bus, + unsigned long baseaddr, + ssb_invariants_func_t get_invariants); +#ifdef CONFIG_SSB_PCIHOST +extern int ssb_bus_pcibus_register(struct ssb_bus *bus, + struct pci_dev *host_pci); +#endif /* CONFIG_SSB_PCIHOST */ +#ifdef CONFIG_SSB_PCMCIAHOST +extern int ssb_bus_pcmciabus_register(struct ssb_bus *bus, + struct pcmcia_device *pcmcia_dev, + unsigned long baseaddr); +#endif /* CONFIG_SSB_PCMCIAHOST */ +#ifdef CONFIG_SSB_SDIOHOST +extern int ssb_bus_sdiobus_register(struct ssb_bus *bus, + struct sdio_func *sdio_func, + unsigned int quirks); +#endif /* CONFIG_SSB_SDIOHOST */ + + +extern void ssb_bus_unregister(struct ssb_bus *bus); + +/* Set a fallback SPROM. + * See kdoc at the function definition for complete documentation. */ +extern int ssb_arch_set_fallback_sprom(const struct ssb_sprom *sprom); + +/* Suspend a SSB bus. + * Call this from the parent bus suspend routine. */ +extern int ssb_bus_suspend(struct ssb_bus *bus); +/* Resume a SSB bus. + * Call this from the parent bus resume routine. */ +extern int ssb_bus_resume(struct ssb_bus *bus); + +extern u32 ssb_clockspeed(struct ssb_bus *bus); + +/* Is the device enabled in hardware? */ +int ssb_device_is_enabled(struct ssb_device *dev); +/* Enable a device and pass device-specific SSB_TMSLOW flags. + * If no device-specific flags are available, use 0. */ +void ssb_device_enable(struct ssb_device *dev, u32 core_specific_flags); +/* Disable a device in hardware and pass SSB_TMSLOW flags (if any). */ +void ssb_device_disable(struct ssb_device *dev, u32 core_specific_flags); + + +/* Device MMIO register read/write functions. */ +static inline u8 ssb_read8(struct ssb_device *dev, u16 offset) +{ + return dev->ops->read8(dev, offset); +} +static inline u16 ssb_read16(struct ssb_device *dev, u16 offset) +{ + return dev->ops->read16(dev, offset); +} +static inline u32 ssb_read32(struct ssb_device *dev, u16 offset) +{ + return dev->ops->read32(dev, offset); +} +static inline void ssb_write8(struct ssb_device *dev, u16 offset, u8 value) +{ + dev->ops->write8(dev, offset, value); +} +static inline void ssb_write16(struct ssb_device *dev, u16 offset, u16 value) +{ + dev->ops->write16(dev, offset, value); +} +static inline void ssb_write32(struct ssb_device *dev, u16 offset, u32 value) +{ + dev->ops->write32(dev, offset, value); +} +#ifdef CONFIG_SSB_BLOCKIO +static inline void ssb_block_read(struct ssb_device *dev, void *buffer, + size_t count, u16 offset, u8 reg_width) +{ + dev->ops->block_read(dev, buffer, count, offset, reg_width); +} + +static inline void ssb_block_write(struct ssb_device *dev, const void *buffer, + size_t count, u16 offset, u8 reg_width) +{ + dev->ops->block_write(dev, buffer, count, offset, reg_width); +} +#endif /* CONFIG_SSB_BLOCKIO */ + + +/* The SSB DMA API. Use this API for any DMA operation on the device. + * This API basically is a wrapper that calls the correct DMA API for + * the host device type the SSB device is attached to. */ + +/* Translation (routing) bits that need to be ORed to DMA + * addresses before they are given to a device. */ +extern u32 ssb_dma_translation(struct ssb_device *dev); +#define SSB_DMA_TRANSLATION_MASK 0xC0000000 +#define SSB_DMA_TRANSLATION_SHIFT 30 + +extern int ssb_dma_set_mask(struct ssb_device *dev, u64 mask); + +extern void * ssb_dma_alloc_consistent(struct ssb_device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp_flags); +extern void ssb_dma_free_consistent(struct ssb_device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle, + gfp_t gfp_flags); + +static inline void __cold __ssb_dma_not_implemented(struct ssb_device *dev) +{ +#ifdef CONFIG_SSB_DEBUG + printk(KERN_ERR "SSB: BUG! Calling DMA API for " + "unsupported bustype %d\n", dev->bus->bustype); +#endif /* DEBUG */ +} + +static inline int ssb_dma_mapping_error(struct ssb_device *dev, dma_addr_t addr) +{ + switch (dev->bus->bustype) { + case SSB_BUSTYPE_PCI: +#ifdef CONFIG_SSB_PCIHOST + return pci_dma_mapping_error(dev->bus->host_pci, addr); +#endif + break; + case SSB_BUSTYPE_SSB: + return dma_mapping_error(dev->dev, addr); + default: + break; + } + __ssb_dma_not_implemented(dev); + return -ENOSYS; +} + +static inline dma_addr_t ssb_dma_map_single(struct ssb_device *dev, void *p, + size_t size, enum dma_data_direction dir) +{ + switch (dev->bus->bustype) { + case SSB_BUSTYPE_PCI: +#ifdef CONFIG_SSB_PCIHOST + return pci_map_single(dev->bus->host_pci, p, size, dir); +#endif + break; + case SSB_BUSTYPE_SSB: + return dma_map_single(dev->dev, p, size, dir); + default: + break; + } + __ssb_dma_not_implemented(dev); + return 0; +} + +static inline void ssb_dma_unmap_single(struct ssb_device *dev, dma_addr_t dma_addr, + size_t size, enum dma_data_direction dir) +{ + switch (dev->bus->bustype) { + case SSB_BUSTYPE_PCI: +#ifdef CONFIG_SSB_PCIHOST + pci_unmap_single(dev->bus->host_pci, dma_addr, size, dir); + return; +#endif + break; + case SSB_BUSTYPE_SSB: + dma_unmap_single(dev->dev, dma_addr, size, dir); + return; + default: + break; + } + __ssb_dma_not_implemented(dev); +} + +static inline void ssb_dma_sync_single_for_cpu(struct ssb_device *dev, + dma_addr_t dma_addr, + size_t size, + enum dma_data_direction dir) +{ + switch (dev->bus->bustype) { + case SSB_BUSTYPE_PCI: +#ifdef CONFIG_SSB_PCIHOST + pci_dma_sync_single_for_cpu(dev->bus->host_pci, dma_addr, + size, dir); + return; +#endif + break; + case SSB_BUSTYPE_SSB: + dma_sync_single_for_cpu(dev->dev, dma_addr, size, dir); + return; + default: + break; + } + __ssb_dma_not_implemented(dev); +} + +static inline void ssb_dma_sync_single_for_device(struct ssb_device *dev, + dma_addr_t dma_addr, + size_t size, + enum dma_data_direction dir) +{ + switch (dev->bus->bustype) { + case SSB_BUSTYPE_PCI: +#ifdef CONFIG_SSB_PCIHOST + pci_dma_sync_single_for_device(dev->bus->host_pci, dma_addr, + size, dir); + return; +#endif + break; + case SSB_BUSTYPE_SSB: + dma_sync_single_for_device(dev->dev, dma_addr, size, dir); + return; + default: + break; + } + __ssb_dma_not_implemented(dev); +} + +static inline void ssb_dma_sync_single_range_for_cpu(struct ssb_device *dev, + dma_addr_t dma_addr, + unsigned long offset, + size_t size, + enum dma_data_direction dir) +{ + switch (dev->bus->bustype) { + case SSB_BUSTYPE_PCI: +#ifdef CONFIG_SSB_PCIHOST + /* Just sync everything. That's all the PCI API can do. */ + pci_dma_sync_single_for_cpu(dev->bus->host_pci, dma_addr, + offset + size, dir); + return; +#endif + break; + case SSB_BUSTYPE_SSB: + dma_sync_single_range_for_cpu(dev->dev, dma_addr, offset, + size, dir); + return; + default: + break; + } + __ssb_dma_not_implemented(dev); +} + +static inline void ssb_dma_sync_single_range_for_device(struct ssb_device *dev, + dma_addr_t dma_addr, + unsigned long offset, + size_t size, + enum dma_data_direction dir) +{ + switch (dev->bus->bustype) { + case SSB_BUSTYPE_PCI: +#ifdef CONFIG_SSB_PCIHOST + /* Just sync everything. That's all the PCI API can do. */ + pci_dma_sync_single_for_device(dev->bus->host_pci, dma_addr, + offset + size, dir); + return; +#endif + break; + case SSB_BUSTYPE_SSB: + dma_sync_single_range_for_device(dev->dev, dma_addr, offset, + size, dir); + return; + default: + break; + } + __ssb_dma_not_implemented(dev); +} + + +#ifdef CONFIG_SSB_PCIHOST +/* PCI-host wrapper driver */ +extern int ssb_pcihost_register(struct pci_driver *driver); +static inline void ssb_pcihost_unregister(struct pci_driver *driver) +{ + pci_unregister_driver(driver); +} + +static inline +void ssb_pcihost_set_power_state(struct ssb_device *sdev, pci_power_t state) +{ + if (sdev->bus->bustype == SSB_BUSTYPE_PCI) + pci_set_power_state(sdev->bus->host_pci, state); +} +#else +static inline void ssb_pcihost_unregister(struct pci_driver *driver) +{ +} + +static inline +void ssb_pcihost_set_power_state(struct ssb_device *sdev, pci_power_t state) +{ +} +#endif /* CONFIG_SSB_PCIHOST */ + + +/* If a driver is shutdown or suspended, call this to signal + * that the bus may be completely powered down. SSB will decide, + * if it's really time to power down the bus, based on if there + * are other devices that want to run. */ +extern int ssb_bus_may_powerdown(struct ssb_bus *bus); +/* Before initializing and enabling a device, call this to power-up the bus. + * If you want to allow use of dynamic-power-control, pass the flag. + * Otherwise static always-on powercontrol will be used. */ +extern int ssb_bus_powerup(struct ssb_bus *bus, bool dynamic_pctl); + + +/* Various helper functions */ +extern u32 ssb_admatch_base(u32 adm); +extern u32 ssb_admatch_size(u32 adm); + +/* PCI device mapping and fixup routines. + * Called from the architecture pcibios init code. + * These are only available on SSB_EMBEDDED configurations. */ +#ifdef CONFIG_SSB_EMBEDDED +int ssb_pcibios_plat_dev_init(struct pci_dev *dev); +int ssb_pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); +#endif /* CONFIG_SSB_EMBEDDED */ + +#endif /* LINUX_SSB_H_ */ diff --git a/include/linux/ssb/ssb_driver_chipcommon.h b/include/linux/ssb/ssb_driver_chipcommon.h new file mode 100644 index 0000000..4e27acf --- /dev/null +++ b/include/linux/ssb/ssb_driver_chipcommon.h @@ -0,0 +1,643 @@ +#ifndef LINUX_SSB_CHIPCO_H_ +#define LINUX_SSB_CHIPCO_H_ + +/* SonicsSiliconBackplane CHIPCOMMON core hardware definitions + * + * The chipcommon core provides chip identification, SB control, + * jtag, 0/1/2 uarts, clock frequency control, a watchdog interrupt timer, + * gpio interface, extbus, and support for serial and parallel flashes. + * + * Copyright 2005, Broadcom Corporation + * Copyright 2006, Michael Buesch + * + * Licensed under the GPL version 2. See COPYING for details. + */ + +/** ChipCommon core registers. **/ + +#define SSB_CHIPCO_CHIPID 0x0000 +#define SSB_CHIPCO_IDMASK 0x0000FFFF +#define SSB_CHIPCO_REVMASK 0x000F0000 +#define SSB_CHIPCO_REVSHIFT 16 +#define SSB_CHIPCO_PACKMASK 0x00F00000 +#define SSB_CHIPCO_PACKSHIFT 20 +#define SSB_CHIPCO_NRCORESMASK 0x0F000000 +#define SSB_CHIPCO_NRCORESSHIFT 24 +#define SSB_CHIPCO_CAP 0x0004 /* Capabilities */ +#define SSB_CHIPCO_CAP_NRUART 0x00000003 /* # of UARTs */ +#define SSB_CHIPCO_CAP_MIPSEB 0x00000004 /* MIPS in BigEndian Mode */ +#define SSB_CHIPCO_CAP_UARTCLK 0x00000018 /* UART clock select */ +#define SSB_CHIPCO_CAP_UARTCLK_INT 0x00000008 /* UARTs are driven by internal divided clock */ +#define SSB_CHIPCO_CAP_UARTGPIO 0x00000020 /* UARTs on GPIO 15-12 */ +#define SSB_CHIPCO_CAP_EXTBUS 0x000000C0 /* External buses present */ +#define SSB_CHIPCO_CAP_FLASHT 0x00000700 /* Flash Type */ +#define SSB_CHIPCO_FLASHT_NONE 0x00000000 /* No flash */ +#define SSB_CHIPCO_FLASHT_STSER 0x00000100 /* ST serial flash */ +#define SSB_CHIPCO_FLASHT_ATSER 0x00000200 /* Atmel serial flash */ +#define SSB_CHIPCO_FLASHT_PARA 0x00000700 /* Parallel flash */ +#define SSB_CHIPCO_CAP_PLLT 0x00038000 /* PLL Type */ +#define SSB_PLLTYPE_NONE 0x00000000 +#define SSB_PLLTYPE_1 0x00010000 /* 48Mhz base, 3 dividers */ +#define SSB_PLLTYPE_2 0x00020000 /* 48Mhz, 4 dividers */ +#define SSB_PLLTYPE_3 0x00030000 /* 25Mhz, 2 dividers */ +#define SSB_PLLTYPE_4 0x00008000 /* 48Mhz, 4 dividers */ +#define SSB_PLLTYPE_5 0x00018000 /* 25Mhz, 4 dividers */ +#define SSB_PLLTYPE_6 0x00028000 /* 100/200 or 120/240 only */ +#define SSB_PLLTYPE_7 0x00038000 /* 25Mhz, 4 dividers */ +#define SSB_CHIPCO_CAP_PCTL 0x00040000 /* Power Control */ +#define SSB_CHIPCO_CAP_OTPS 0x00380000 /* OTP size */ +#define SSB_CHIPCO_CAP_OTPS_SHIFT 19 +#define SSB_CHIPCO_CAP_OTPS_BASE 5 +#define SSB_CHIPCO_CAP_JTAGM 0x00400000 /* JTAG master present */ +#define SSB_CHIPCO_CAP_BROM 0x00800000 /* Internal boot ROM active */ +#define SSB_CHIPCO_CAP_64BIT 0x08000000 /* 64-bit Backplane */ +#define SSB_CHIPCO_CAP_PMU 0x10000000 /* PMU available (rev >= 20) */ +#define SSB_CHIPCO_CAP_ECI 0x20000000 /* ECI available (rev >= 20) */ +#define SSB_CHIPCO_CORECTL 0x0008 +#define SSB_CHIPCO_CORECTL_UARTCLK0 0x00000001 /* Drive UART with internal clock */ +#define SSB_CHIPCO_CORECTL_SE 0x00000002 /* sync clk out enable (corerev >= 3) */ +#define SSB_CHIPCO_CORECTL_UARTCLKEN 0x00000008 /* UART clock enable (rev >= 21) */ +#define SSB_CHIPCO_BIST 0x000C +#define SSB_CHIPCO_OTPS 0x0010 /* OTP status */ +#define SSB_CHIPCO_OTPS_PROGFAIL 0x80000000 +#define SSB_CHIPCO_OTPS_PROTECT 0x00000007 +#define SSB_CHIPCO_OTPS_HW_PROTECT 0x00000001 +#define SSB_CHIPCO_OTPS_SW_PROTECT 0x00000002 +#define SSB_CHIPCO_OTPS_CID_PROTECT 0x00000004 +#define SSB_CHIPCO_OTPC 0x0014 /* OTP control */ +#define SSB_CHIPCO_OTPC_RECWAIT 0xFF000000 +#define SSB_CHIPCO_OTPC_PROGWAIT 0x00FFFF00 +#define SSB_CHIPCO_OTPC_PRW_SHIFT 8 +#define SSB_CHIPCO_OTPC_MAXFAIL 0x00000038 +#define SSB_CHIPCO_OTPC_VSEL 0x00000006 +#define SSB_CHIPCO_OTPC_SELVL 0x00000001 +#define SSB_CHIPCO_OTPP 0x0018 /* OTP prog */ +#define SSB_CHIPCO_OTPP_COL 0x000000FF +#define SSB_CHIPCO_OTPP_ROW 0x0000FF00 +#define SSB_CHIPCO_OTPP_ROW_SHIFT 8 +#define SSB_CHIPCO_OTPP_READERR 0x10000000 +#define SSB_CHIPCO_OTPP_VALUE 0x20000000 +#define SSB_CHIPCO_OTPP_READ 0x40000000 +#define SSB_CHIPCO_OTPP_START 0x80000000 +#define SSB_CHIPCO_OTPP_BUSY 0x80000000 +#define SSB_CHIPCO_IRQSTAT 0x0020 +#define SSB_CHIPCO_IRQMASK 0x0024 +#define SSB_CHIPCO_IRQ_GPIO 0x00000001 /* gpio intr */ +#define SSB_CHIPCO_IRQ_EXT 0x00000002 /* ro: ext intr pin (corerev >= 3) */ +#define SSB_CHIPCO_IRQ_WDRESET 0x80000000 /* watchdog reset occurred */ +#define SSB_CHIPCO_CHIPCTL 0x0028 /* Rev >= 11 only */ +#define SSB_CHIPCO_CHIPSTAT 0x002C /* Rev >= 11 only */ +#define SSB_CHIPCO_JCMD 0x0030 /* Rev >= 10 only */ +#define SSB_CHIPCO_JCMD_START 0x80000000 +#define SSB_CHIPCO_JCMD_BUSY 0x80000000 +#define SSB_CHIPCO_JCMD_PAUSE 0x40000000 +#define SSB_CHIPCO_JCMD0_ACC_MASK 0x0000F000 +#define SSB_CHIPCO_JCMD0_ACC_IRDR 0x00000000 +#define SSB_CHIPCO_JCMD0_ACC_DR 0x00001000 +#define SSB_CHIPCO_JCMD0_ACC_IR 0x00002000 +#define SSB_CHIPCO_JCMD0_ACC_RESET 0x00003000 +#define SSB_CHIPCO_JCMD0_ACC_IRPDR 0x00004000 +#define SSB_CHIPCO_JCMD0_ACC_PDR 0x00005000 +#define SSB_CHIPCO_JCMD0_IRW_MASK 0x00000F00 +#define SSB_CHIPCO_JCMD_ACC_MASK 0x000F0000 /* Changes for corerev 11 */ +#define SSB_CHIPCO_JCMD_ACC_IRDR 0x00000000 +#define SSB_CHIPCO_JCMD_ACC_DR 0x00010000 +#define SSB_CHIPCO_JCMD_ACC_IR 0x00020000 +#define SSB_CHIPCO_JCMD_ACC_RESET 0x00030000 +#define SSB_CHIPCO_JCMD_ACC_IRPDR 0x00040000 +#define SSB_CHIPCO_JCMD_ACC_PDR 0x00050000 +#define SSB_CHIPCO_JCMD_IRW_MASK 0x00001F00 +#define SSB_CHIPCO_JCMD_IRW_SHIFT 8 +#define SSB_CHIPCO_JCMD_DRW_MASK 0x0000003F +#define SSB_CHIPCO_JIR 0x0034 /* Rev >= 10 only */ +#define SSB_CHIPCO_JDR 0x0038 /* Rev >= 10 only */ +#define SSB_CHIPCO_JCTL 0x003C /* Rev >= 10 only */ +#define SSB_CHIPCO_JCTL_FORCE_CLK 4 /* Force clock */ +#define SSB_CHIPCO_JCTL_EXT_EN 2 /* Enable external targets */ +#define SSB_CHIPCO_JCTL_EN 1 /* Enable Jtag master */ +#define SSB_CHIPCO_FLASHCTL 0x0040 +#define SSB_CHIPCO_FLASHCTL_START 0x80000000 +#define SSB_CHIPCO_FLASHCTL_BUSY SSB_CHIPCO_FLASHCTL_START +#define SSB_CHIPCO_FLASHADDR 0x0044 +#define SSB_CHIPCO_FLASHDATA 0x0048 +#define SSB_CHIPCO_BCAST_ADDR 0x0050 +#define SSB_CHIPCO_BCAST_DATA 0x0054 +#define SSB_CHIPCO_GPIOIN 0x0060 +#define SSB_CHIPCO_GPIOOUT 0x0064 +#define SSB_CHIPCO_GPIOOUTEN 0x0068 +#define SSB_CHIPCO_GPIOCTL 0x006C +#define SSB_CHIPCO_GPIOPOL 0x0070 +#define SSB_CHIPCO_GPIOIRQ 0x0074 +#define SSB_CHIPCO_WATCHDOG 0x0080 +#define SSB_CHIPCO_GPIOTIMER 0x0088 /* LED powersave (corerev >= 16) */ +#define SSB_CHIPCO_GPIOTIMER_ONTIME_SHIFT 16 +#define SSB_CHIPCO_GPIOTOUTM 0x008C /* LED powersave (corerev >= 16) */ +#define SSB_CHIPCO_CLOCK_N 0x0090 +#define SSB_CHIPCO_CLOCK_SB 0x0094 +#define SSB_CHIPCO_CLOCK_PCI 0x0098 +#define SSB_CHIPCO_CLOCK_M2 0x009C +#define SSB_CHIPCO_CLOCK_MIPS 0x00A0 +#define SSB_CHIPCO_CLKDIV 0x00A4 /* Rev >= 3 only */ +#define SSB_CHIPCO_CLKDIV_SFLASH 0x0F000000 +#define SSB_CHIPCO_CLKDIV_SFLASH_SHIFT 24 +#define SSB_CHIPCO_CLKDIV_OTP 0x000F0000 +#define SSB_CHIPCO_CLKDIV_OTP_SHIFT 16 +#define SSB_CHIPCO_CLKDIV_JTAG 0x00000F00 +#define SSB_CHIPCO_CLKDIV_JTAG_SHIFT 8 +#define SSB_CHIPCO_CLKDIV_UART 0x000000FF +#define SSB_CHIPCO_PLLONDELAY 0x00B0 /* Rev >= 4 only */ +#define SSB_CHIPCO_FREFSELDELAY 0x00B4 /* Rev >= 4 only */ +#define SSB_CHIPCO_SLOWCLKCTL 0x00B8 /* 6 <= Rev <= 9 only */ +#define SSB_CHIPCO_SLOWCLKCTL_SRC 0x00000007 /* slow clock source mask */ +#define SSB_CHIPCO_SLOWCLKCTL_SRC_LPO 0x00000000 /* source of slow clock is LPO */ +#define SSB_CHIPCO_SLOWCLKCTL_SRC_XTAL 0x00000001 /* source of slow clock is crystal */ +#define SSB_CHIPCO_SLOECLKCTL_SRC_PCI 0x00000002 /* source of slow clock is PCI */ +#define SSB_CHIPCO_SLOWCLKCTL_LPOFREQ 0x00000200 /* LPOFreqSel, 1: 160Khz, 0: 32KHz */ +#define SSB_CHIPCO_SLOWCLKCTL_LPOPD 0x00000400 /* LPOPowerDown, 1: LPO is disabled, 0: LPO is enabled */ +#define SSB_CHIPCO_SLOWCLKCTL_FSLOW 0x00000800 /* ForceSlowClk, 1: sb/cores running on slow clock, 0: power logic control */ +#define SSB_CHIPCO_SLOWCLKCTL_IPLL 0x00001000 /* IgnorePllOffReq, 1/0: power logic ignores/honors PLL clock disable requests from core */ +#define SSB_CHIPCO_SLOWCLKCTL_ENXTAL 0x00002000 /* XtalControlEn, 1/0: power logic does/doesn't disable crystal when appropriate */ +#define SSB_CHIPCO_SLOWCLKCTL_XTALPU 0x00004000 /* XtalPU (RO), 1/0: crystal running/disabled */ +#define SSB_CHIPCO_SLOWCLKCTL_CLKDIV 0xFFFF0000 /* ClockDivider (SlowClk = 1/(4+divisor)) */ +#define SSB_CHIPCO_SLOWCLKCTL_CLKDIV_SHIFT 16 +#define SSB_CHIPCO_SYSCLKCTL 0x00C0 /* Rev >= 3 only */ +#define SSB_CHIPCO_SYSCLKCTL_IDLPEN 0x00000001 /* ILPen: Enable Idle Low Power */ +#define SSB_CHIPCO_SYSCLKCTL_ALPEN 0x00000002 /* ALPen: Enable Active Low Power */ +#define SSB_CHIPCO_SYSCLKCTL_PLLEN 0x00000004 /* ForcePLLOn */ +#define SSB_CHIPCO_SYSCLKCTL_FORCEALP 0x00000008 /* Force ALP (or HT if ALPen is not set */ +#define SSB_CHIPCO_SYSCLKCTL_FORCEHT 0x00000010 /* Force HT */ +#define SSB_CHIPCO_SYSCLKCTL_CLKDIV 0xFFFF0000 /* ClkDiv (ILP = 1/(4+divisor)) */ +#define SSB_CHIPCO_SYSCLKCTL_CLKDIV_SHIFT 16 +#define SSB_CHIPCO_CLKSTSTR 0x00C4 /* Rev >= 3 only */ +#define SSB_CHIPCO_PCMCIA_CFG 0x0100 +#define SSB_CHIPCO_PCMCIA_MEMWAIT 0x0104 +#define SSB_CHIPCO_PCMCIA_ATTRWAIT 0x0108 +#define SSB_CHIPCO_PCMCIA_IOWAIT 0x010C +#define SSB_CHIPCO_IDE_CFG 0x0110 +#define SSB_CHIPCO_IDE_MEMWAIT 0x0114 +#define SSB_CHIPCO_IDE_ATTRWAIT 0x0118 +#define SSB_CHIPCO_IDE_IOWAIT 0x011C +#define SSB_CHIPCO_PROG_CFG 0x0120 +#define SSB_CHIPCO_PROG_WAITCNT 0x0124 +#define SSB_CHIPCO_FLASH_CFG 0x0128 +#define SSB_CHIPCO_FLASH_WAITCNT 0x012C +#define SSB_CHIPCO_CLKCTLST 0x01E0 /* Clock control and status (rev >= 20) */ +#define SSB_CHIPCO_CLKCTLST_FORCEALP 0x00000001 /* Force ALP request */ +#define SSB_CHIPCO_CLKCTLST_FORCEHT 0x00000002 /* Force HT request */ +#define SSB_CHIPCO_CLKCTLST_FORCEILP 0x00000004 /* Force ILP request */ +#define SSB_CHIPCO_CLKCTLST_HAVEALPREQ 0x00000008 /* ALP available request */ +#define SSB_CHIPCO_CLKCTLST_HAVEHTREQ 0x00000010 /* HT available request */ +#define SSB_CHIPCO_CLKCTLST_HWCROFF 0x00000020 /* Force HW clock request off */ +#define SSB_CHIPCO_CLKCTLST_HAVEHT 0x00010000 /* HT available */ +#define SSB_CHIPCO_CLKCTLST_HAVEALP 0x00020000 /* APL available */ +#define SSB_CHIPCO_HW_WORKAROUND 0x01E4 /* Hardware workaround (rev >= 20) */ +#define SSB_CHIPCO_UART0_DATA 0x0300 +#define SSB_CHIPCO_UART0_IMR 0x0304 +#define SSB_CHIPCO_UART0_FCR 0x0308 +#define SSB_CHIPCO_UART0_LCR 0x030C +#define SSB_CHIPCO_UART0_MCR 0x0310 +#define SSB_CHIPCO_UART0_LSR 0x0314 +#define SSB_CHIPCO_UART0_MSR 0x0318 +#define SSB_CHIPCO_UART0_SCRATCH 0x031C +#define SSB_CHIPCO_UART1_DATA 0x0400 +#define SSB_CHIPCO_UART1_IMR 0x0404 +#define SSB_CHIPCO_UART1_FCR 0x0408 +#define SSB_CHIPCO_UART1_LCR 0x040C +#define SSB_CHIPCO_UART1_MCR 0x0410 +#define SSB_CHIPCO_UART1_LSR 0x0414 +#define SSB_CHIPCO_UART1_MSR 0x0418 +#define SSB_CHIPCO_UART1_SCRATCH 0x041C +/* PMU registers (rev >= 20) */ +#define SSB_CHIPCO_PMU_CTL 0x0600 /* PMU control */ +#define SSB_CHIPCO_PMU_CTL_ILP_DIV 0xFFFF0000 /* ILP div mask */ +#define SSB_CHIPCO_PMU_CTL_ILP_DIV_SHIFT 16 +#define SSB_CHIPCO_PMU_CTL_NOILPONW 0x00000200 /* No ILP on wait */ +#define SSB_CHIPCO_PMU_CTL_HTREQEN 0x00000100 /* HT req enable */ +#define SSB_CHIPCO_PMU_CTL_ALPREQEN 0x00000080 /* ALP req enable */ +#define SSB_CHIPCO_PMU_CTL_XTALFREQ 0x0000007C /* Crystal freq */ +#define SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT 2 +#define SSB_CHIPCO_PMU_CTL_ILPDIVEN 0x00000002 /* ILP div enable */ +#define SSB_CHIPCO_PMU_CTL_LPOSEL 0x00000001 /* LPO sel */ +#define SSB_CHIPCO_PMU_CAP 0x0604 /* PMU capabilities */ +#define SSB_CHIPCO_PMU_CAP_REVISION 0x000000FF /* Revision mask */ +#define SSB_CHIPCO_PMU_STAT 0x0608 /* PMU status */ +#define SSB_CHIPCO_PMU_STAT_INTPEND 0x00000040 /* Interrupt pending */ +#define SSB_CHIPCO_PMU_STAT_SBCLKST 0x00000030 /* Backplane clock status? */ +#define SSB_CHIPCO_PMU_STAT_HAVEALP 0x00000008 /* ALP available */ +#define SSB_CHIPCO_PMU_STAT_HAVEHT 0x00000004 /* HT available */ +#define SSB_CHIPCO_PMU_STAT_RESINIT 0x00000003 /* Res init */ +#define SSB_CHIPCO_PMU_RES_STAT 0x060C /* PMU res status */ +#define SSB_CHIPCO_PMU_RES_PEND 0x0610 /* PMU res pending */ +#define SSB_CHIPCO_PMU_TIMER 0x0614 /* PMU timer */ +#define SSB_CHIPCO_PMU_MINRES_MSK 0x0618 /* PMU min res mask */ +#define SSB_CHIPCO_PMU_MAXRES_MSK 0x061C /* PMU max res mask */ +#define SSB_CHIPCO_PMU_RES_TABSEL 0x0620 /* PMU res table sel */ +#define SSB_CHIPCO_PMU_RES_DEPMSK 0x0624 /* PMU res dep mask */ +#define SSB_CHIPCO_PMU_RES_UPDNTM 0x0628 /* PMU res updown timer */ +#define SSB_CHIPCO_PMU_RES_TIMER 0x062C /* PMU res timer */ +#define SSB_CHIPCO_PMU_CLKSTRETCH 0x0630 /* PMU clockstretch */ +#define SSB_CHIPCO_PMU_WATCHDOG 0x0634 /* PMU watchdog */ +#define SSB_CHIPCO_PMU_RES_REQTS 0x0640 /* PMU res req timer sel */ +#define SSB_CHIPCO_PMU_RES_REQT 0x0644 /* PMU res req timer */ +#define SSB_CHIPCO_PMU_RES_REQM 0x0648 /* PMU res req mask */ +#define SSB_CHIPCO_CHIPCTL_ADDR 0x0650 +#define SSB_CHIPCO_CHIPCTL_DATA 0x0654 +#define SSB_CHIPCO_REGCTL_ADDR 0x0658 +#define SSB_CHIPCO_REGCTL_DATA 0x065C +#define SSB_CHIPCO_PLLCTL_ADDR 0x0660 +#define SSB_CHIPCO_PLLCTL_DATA 0x0664 + + + +/** PMU PLL registers */ + +/* PMU rev 0 PLL registers */ +#define SSB_PMU0_PLLCTL0 0 +#define SSB_PMU0_PLLCTL0_PDIV_MSK 0x00000001 +#define SSB_PMU0_PLLCTL0_PDIV_FREQ 25000 /* kHz */ +#define SSB_PMU0_PLLCTL1 1 +#define SSB_PMU0_PLLCTL1_WILD_IMSK 0xF0000000 /* Wild int mask (low nibble) */ +#define SSB_PMU0_PLLCTL1_WILD_IMSK_SHIFT 28 +#define SSB_PMU0_PLLCTL1_WILD_FMSK 0x0FFFFF00 /* Wild frac mask */ +#define SSB_PMU0_PLLCTL1_WILD_FMSK_SHIFT 8 +#define SSB_PMU0_PLLCTL1_STOPMOD 0x00000040 /* Stop mod */ +#define SSB_PMU0_PLLCTL2 2 +#define SSB_PMU0_PLLCTL2_WILD_IMSKHI 0x0000000F /* Wild int mask (high nibble) */ +#define SSB_PMU0_PLLCTL2_WILD_IMSKHI_SHIFT 0 + +/* PMU rev 1 PLL registers */ +#define SSB_PMU1_PLLCTL0 0 +#define SSB_PMU1_PLLCTL0_P1DIV 0x00F00000 /* P1 div */ +#define SSB_PMU1_PLLCTL0_P1DIV_SHIFT 20 +#define SSB_PMU1_PLLCTL0_P2DIV 0x0F000000 /* P2 div */ +#define SSB_PMU1_PLLCTL0_P2DIV_SHIFT 24 +#define SSB_PMU1_PLLCTL1 1 +#define SSB_PMU1_PLLCTL1_M1DIV 0x000000FF /* M1 div */ +#define SSB_PMU1_PLLCTL1_M1DIV_SHIFT 0 +#define SSB_PMU1_PLLCTL1_M2DIV 0x0000FF00 /* M2 div */ +#define SSB_PMU1_PLLCTL1_M2DIV_SHIFT 8 +#define SSB_PMU1_PLLCTL1_M3DIV 0x00FF0000 /* M3 div */ +#define SSB_PMU1_PLLCTL1_M3DIV_SHIFT 16 +#define SSB_PMU1_PLLCTL1_M4DIV 0xFF000000 /* M4 div */ +#define SSB_PMU1_PLLCTL1_M4DIV_SHIFT 24 +#define SSB_PMU1_PLLCTL2 2 +#define SSB_PMU1_PLLCTL2_M5DIV 0x000000FF /* M5 div */ +#define SSB_PMU1_PLLCTL2_M5DIV_SHIFT 0 +#define SSB_PMU1_PLLCTL2_M6DIV 0x0000FF00 /* M6 div */ +#define SSB_PMU1_PLLCTL2_M6DIV_SHIFT 8 +#define SSB_PMU1_PLLCTL2_NDIVMODE 0x000E0000 /* NDIV mode */ +#define SSB_PMU1_PLLCTL2_NDIVMODE_SHIFT 17 +#define SSB_PMU1_PLLCTL2_NDIVINT 0x1FF00000 /* NDIV int */ +#define SSB_PMU1_PLLCTL2_NDIVINT_SHIFT 20 +#define SSB_PMU1_PLLCTL3 3 +#define SSB_PMU1_PLLCTL3_NDIVFRAC 0x00FFFFFF /* NDIV frac */ +#define SSB_PMU1_PLLCTL3_NDIVFRAC_SHIFT 0 +#define SSB_PMU1_PLLCTL4 4 +#define SSB_PMU1_PLLCTL5 5 +#define SSB_PMU1_PLLCTL5_CLKDRV 0xFFFFFF00 /* clk drv */ +#define SSB_PMU1_PLLCTL5_CLKDRV_SHIFT 8 + +/* BCM4312 PLL resource numbers. */ +#define SSB_PMURES_4312_SWITCHER_BURST 0 +#define SSB_PMURES_4312_SWITCHER_PWM 1 +#define SSB_PMURES_4312_PA_REF_LDO 2 +#define SSB_PMURES_4312_CORE_LDO_BURST 3 +#define SSB_PMURES_4312_CORE_LDO_PWM 4 +#define SSB_PMURES_4312_RADIO_LDO 5 +#define SSB_PMURES_4312_ILP_REQUEST 6 +#define SSB_PMURES_4312_BG_FILTBYP 7 +#define SSB_PMURES_4312_TX_FILTBYP 8 +#define SSB_PMURES_4312_RX_FILTBYP 9 +#define SSB_PMURES_4312_XTAL_PU 10 +#define SSB_PMURES_4312_ALP_AVAIL 11 +#define SSB_PMURES_4312_BB_PLL_FILTBYP 12 +#define SSB_PMURES_4312_RF_PLL_FILTBYP 13 +#define SSB_PMURES_4312_HT_AVAIL 14 + +/* BCM4325 PLL resource numbers. */ +#define SSB_PMURES_4325_BUCK_BOOST_BURST 0 +#define SSB_PMURES_4325_CBUCK_BURST 1 +#define SSB_PMURES_4325_CBUCK_PWM 2 +#define SSB_PMURES_4325_CLDO_CBUCK_BURST 3 +#define SSB_PMURES_4325_CLDO_CBUCK_PWM 4 +#define SSB_PMURES_4325_BUCK_BOOST_PWM 5 +#define SSB_PMURES_4325_ILP_REQUEST 6 +#define SSB_PMURES_4325_ABUCK_BURST 7 +#define SSB_PMURES_4325_ABUCK_PWM 8 +#define SSB_PMURES_4325_LNLDO1_PU 9 +#define SSB_PMURES_4325_LNLDO2_PU 10 +#define SSB_PMURES_4325_LNLDO3_PU 11 +#define SSB_PMURES_4325_LNLDO4_PU 12 +#define SSB_PMURES_4325_XTAL_PU 13 +#define SSB_PMURES_4325_ALP_AVAIL 14 +#define SSB_PMURES_4325_RX_PWRSW_PU 15 +#define SSB_PMURES_4325_TX_PWRSW_PU 16 +#define SSB_PMURES_4325_RFPLL_PWRSW_PU 17 +#define SSB_PMURES_4325_LOGEN_PWRSW_PU 18 +#define SSB_PMURES_4325_AFE_PWRSW_PU 19 +#define SSB_PMURES_4325_BBPLL_PWRSW_PU 20 +#define SSB_PMURES_4325_HT_AVAIL 21 + +/* BCM4328 PLL resource numbers. */ +#define SSB_PMURES_4328_EXT_SWITCHER_PWM 0 +#define SSB_PMURES_4328_BB_SWITCHER_PWM 1 +#define SSB_PMURES_4328_BB_SWITCHER_BURST 2 +#define SSB_PMURES_4328_BB_EXT_SWITCHER_BURST 3 +#define SSB_PMURES_4328_ILP_REQUEST 4 +#define SSB_PMURES_4328_RADIO_SWITCHER_PWM 5 +#define SSB_PMURES_4328_RADIO_SWITCHER_BURST 6 +#define SSB_PMURES_4328_ROM_SWITCH 7 +#define SSB_PMURES_4328_PA_REF_LDO 8 +#define SSB_PMURES_4328_RADIO_LDO 9 +#define SSB_PMURES_4328_AFE_LDO 10 +#define SSB_PMURES_4328_PLL_LDO 11 +#define SSB_PMURES_4328_BG_FILTBYP 12 +#define SSB_PMURES_4328_TX_FILTBYP 13 +#define SSB_PMURES_4328_RX_FILTBYP 14 +#define SSB_PMURES_4328_XTAL_PU 15 +#define SSB_PMURES_4328_XTAL_EN 16 +#define SSB_PMURES_4328_BB_PLL_FILTBYP 17 +#define SSB_PMURES_4328_RF_PLL_FILTBYP 18 +#define SSB_PMURES_4328_BB_PLL_PU 19 + +/* BCM5354 PLL resource numbers. */ +#define SSB_PMURES_5354_EXT_SWITCHER_PWM 0 +#define SSB_PMURES_5354_BB_SWITCHER_PWM 1 +#define SSB_PMURES_5354_BB_SWITCHER_BURST 2 +#define SSB_PMURES_5354_BB_EXT_SWITCHER_BURST 3 +#define SSB_PMURES_5354_ILP_REQUEST 4 +#define SSB_PMURES_5354_RADIO_SWITCHER_PWM 5 +#define SSB_PMURES_5354_RADIO_SWITCHER_BURST 6 +#define SSB_PMURES_5354_ROM_SWITCH 7 +#define SSB_PMURES_5354_PA_REF_LDO 8 +#define SSB_PMURES_5354_RADIO_LDO 9 +#define SSB_PMURES_5354_AFE_LDO 10 +#define SSB_PMURES_5354_PLL_LDO 11 +#define SSB_PMURES_5354_BG_FILTBYP 12 +#define SSB_PMURES_5354_TX_FILTBYP 13 +#define SSB_PMURES_5354_RX_FILTBYP 14 +#define SSB_PMURES_5354_XTAL_PU 15 +#define SSB_PMURES_5354_XTAL_EN 16 +#define SSB_PMURES_5354_BB_PLL_FILTBYP 17 +#define SSB_PMURES_5354_RF_PLL_FILTBYP 18 +#define SSB_PMURES_5354_BB_PLL_PU 19 + + + +/** Chip specific Chip-Status register contents. */ +#define SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL 0x00000003 +#define SSB_CHIPCO_CHST_4325_DEFCIS_SEL 0 /* OTP is powered up, use def. CIS, no SPROM */ +#define SSB_CHIPCO_CHST_4325_SPROM_SEL 1 /* OTP is powered up, SPROM is present */ +#define SSB_CHIPCO_CHST_4325_OTP_SEL 2 /* OTP is powered up, no SPROM */ +#define SSB_CHIPCO_CHST_4325_OTP_PWRDN 3 /* OTP is powered down, SPROM is present */ +#define SSB_CHIPCO_CHST_4325_SDIO_USB_MODE 0x00000004 +#define SSB_CHIPCO_CHST_4325_SDIO_USB_MODE_SHIFT 2 +#define SSB_CHIPCO_CHST_4325_RCAL_VALID 0x00000008 +#define SSB_CHIPCO_CHST_4325_RCAL_VALID_SHIFT 3 +#define SSB_CHIPCO_CHST_4325_RCAL_VALUE 0x000001F0 +#define SSB_CHIPCO_CHST_4325_RCAL_VALUE_SHIFT 4 +#define SSB_CHIPCO_CHST_4325_PMUTOP_2B 0x00000200 /* 1 for 2b, 0 for to 2a */ + + + +/** Clockcontrol masks and values **/ + +/* SSB_CHIPCO_CLOCK_N */ +#define SSB_CHIPCO_CLK_N1 0x0000003F /* n1 control */ +#define SSB_CHIPCO_CLK_N2 0x00003F00 /* n2 control */ +#define SSB_CHIPCO_CLK_N2_SHIFT 8 +#define SSB_CHIPCO_CLK_PLLC 0x000F0000 /* pll control */ +#define SSB_CHIPCO_CLK_PLLC_SHIFT 16 + +/* SSB_CHIPCO_CLOCK_SB/PCI/UART */ +#define SSB_CHIPCO_CLK_M1 0x0000003F /* m1 control */ +#define SSB_CHIPCO_CLK_M2 0x00003F00 /* m2 control */ +#define SSB_CHIPCO_CLK_M2_SHIFT 8 +#define SSB_CHIPCO_CLK_M3 0x003F0000 /* m3 control */ +#define SSB_CHIPCO_CLK_M3_SHIFT 16 +#define SSB_CHIPCO_CLK_MC 0x1F000000 /* mux control */ +#define SSB_CHIPCO_CLK_MC_SHIFT 24 + +/* N3M Clock control magic field values */ +#define SSB_CHIPCO_CLK_F6_2 0x02 /* A factor of 2 in */ +#define SSB_CHIPCO_CLK_F6_3 0x03 /* 6-bit fields like */ +#define SSB_CHIPCO_CLK_F6_4 0x05 /* N1, M1 or M3 */ +#define SSB_CHIPCO_CLK_F6_5 0x09 +#define SSB_CHIPCO_CLK_F6_6 0x11 +#define SSB_CHIPCO_CLK_F6_7 0x21 + +#define SSB_CHIPCO_CLK_F5_BIAS 5 /* 5-bit fields get this added */ + +#define SSB_CHIPCO_CLK_MC_BYPASS 0x08 +#define SSB_CHIPCO_CLK_MC_M1 0x04 +#define SSB_CHIPCO_CLK_MC_M1M2 0x02 +#define SSB_CHIPCO_CLK_MC_M1M2M3 0x01 +#define SSB_CHIPCO_CLK_MC_M1M3 0x11 + +/* Type 2 Clock control magic field values */ +#define SSB_CHIPCO_CLK_T2_BIAS 2 /* n1, n2, m1 & m3 bias */ +#define SSB_CHIPCO_CLK_T2M2_BIAS 3 /* m2 bias */ + +#define SSB_CHIPCO_CLK_T2MC_M1BYP 1 +#define SSB_CHIPCO_CLK_T2MC_M2BYP 2 +#define SSB_CHIPCO_CLK_T2MC_M3BYP 4 + +/* Type 6 Clock control magic field values */ +#define SSB_CHIPCO_CLK_T6_MMASK 1 /* bits of interest in m */ +#define SSB_CHIPCO_CLK_T6_M0 120000000 /* sb clock for m = 0 */ +#define SSB_CHIPCO_CLK_T6_M1 100000000 /* sb clock for m = 1 */ +#define SSB_CHIPCO_CLK_SB2MIPS_T6(sb) (2 * (sb)) + +/* Common clock base */ +#define SSB_CHIPCO_CLK_BASE1 24000000 /* Half the clock freq */ +#define SSB_CHIPCO_CLK_BASE2 12500000 /* Alternate crystal on some PLL's */ + +/* Clock control values for 200Mhz in 5350 */ +#define SSB_CHIPCO_CLK_5350_N 0x0311 +#define SSB_CHIPCO_CLK_5350_M 0x04020009 + + +/** Bits in the config registers **/ + +#define SSB_CHIPCO_CFG_EN 0x0001 /* Enable */ +#define SSB_CHIPCO_CFG_EXTM 0x000E /* Extif Mode */ +#define SSB_CHIPCO_CFG_EXTM_ASYNC 0x0002 /* Async/Parallel flash */ +#define SSB_CHIPCO_CFG_EXTM_SYNC 0x0004 /* Synchronous */ +#define SSB_CHIPCO_CFG_EXTM_PCMCIA 0x0008 /* PCMCIA */ +#define SSB_CHIPCO_CFG_EXTM_IDE 0x000A /* IDE */ +#define SSB_CHIPCO_CFG_DS16 0x0010 /* Data size, 0=8bit, 1=16bit */ +#define SSB_CHIPCO_CFG_CLKDIV 0x0060 /* Sync: Clock divisor */ +#define SSB_CHIPCO_CFG_CLKEN 0x0080 /* Sync: Clock enable */ +#define SSB_CHIPCO_CFG_BSTRO 0x0100 /* Sync: Size/Bytestrobe */ + + +/** Flash-specific control/status values */ + +/* flashcontrol opcodes for ST flashes */ +#define SSB_CHIPCO_FLASHCTL_ST_WREN 0x0006 /* Write Enable */ +#define SSB_CHIPCO_FLASHCTL_ST_WRDIS 0x0004 /* Write Disable */ +#define SSB_CHIPCO_FLASHCTL_ST_RDSR 0x0105 /* Read Status Register */ +#define SSB_CHIPCO_FLASHCTL_ST_WRSR 0x0101 /* Write Status Register */ +#define SSB_CHIPCO_FLASHCTL_ST_READ 0x0303 /* Read Data Bytes */ +#define SSB_CHIPCO_FLASHCTL_ST_PP 0x0302 /* Page Program */ +#define SSB_CHIPCO_FLASHCTL_ST_SE 0x02D8 /* Sector Erase */ +#define SSB_CHIPCO_FLASHCTL_ST_BE 0x00C7 /* Bulk Erase */ +#define SSB_CHIPCO_FLASHCTL_ST_DP 0x00B9 /* Deep Power-down */ +#define SSB_CHIPCO_FLASHCTL_ST_RSIG 0x03AB /* Read Electronic Signature */ + +/* Status register bits for ST flashes */ +#define SSB_CHIPCO_FLASHSTA_ST_WIP 0x01 /* Write In Progress */ +#define SSB_CHIPCO_FLASHSTA_ST_WEL 0x02 /* Write Enable Latch */ +#define SSB_CHIPCO_FLASHSTA_ST_BP 0x1C /* Block Protect */ +#define SSB_CHIPCO_FLASHSTA_ST_BP_SHIFT 2 +#define SSB_CHIPCO_FLASHSTA_ST_SRWD 0x80 /* Status Register Write Disable */ + +/* flashcontrol opcodes for Atmel flashes */ +#define SSB_CHIPCO_FLASHCTL_AT_READ 0x07E8 +#define SSB_CHIPCO_FLASHCTL_AT_PAGE_READ 0x07D2 +#define SSB_CHIPCO_FLASHCTL_AT_BUF1_READ /* FIXME */ +#define SSB_CHIPCO_FLASHCTL_AT_BUF2_READ /* FIXME */ +#define SSB_CHIPCO_FLASHCTL_AT_STATUS 0x01D7 +#define SSB_CHIPCO_FLASHCTL_AT_BUF1_WRITE 0x0384 +#define SSB_CHIPCO_FLASHCTL_AT_BUF2_WRITE 0x0387 +#define SSB_CHIPCO_FLASHCTL_AT_BUF1_ERASE_PRGM 0x0283 /* Erase program */ +#define SSB_CHIPCO_FLASHCTL_AT_BUF2_ERASE_PRGM 0x0286 /* Erase program */ +#define SSB_CHIPCO_FLASHCTL_AT_BUF1_PROGRAM 0x0288 +#define SSB_CHIPCO_FLASHCTL_AT_BUF2_PROGRAM 0x0289 +#define SSB_CHIPCO_FLASHCTL_AT_PAGE_ERASE 0x0281 +#define SSB_CHIPCO_FLASHCTL_AT_BLOCK_ERASE 0x0250 +#define SSB_CHIPCO_FLASHCTL_AT_BUF1_WRER_PRGM 0x0382 /* Write erase program */ +#define SSB_CHIPCO_FLASHCTL_AT_BUF2_WRER_PRGM 0x0385 /* Write erase program */ +#define SSB_CHIPCO_FLASHCTL_AT_BUF1_LOAD 0x0253 +#define SSB_CHIPCO_FLASHCTL_AT_BUF2_LOAD 0x0255 +#define SSB_CHIPCO_FLASHCTL_AT_BUF1_COMPARE 0x0260 +#define SSB_CHIPCO_FLASHCTL_AT_BUF2_COMPARE 0x0261 +#define SSB_CHIPCO_FLASHCTL_AT_BUF1_REPROGRAM 0x0258 +#define SSB_CHIPCO_FLASHCTL_AT_BUF2_REPROGRAM 0x0259 + +/* Status register bits for Atmel flashes */ +#define SSB_CHIPCO_FLASHSTA_AT_READY 0x80 +#define SSB_CHIPCO_FLASHSTA_AT_MISMATCH 0x40 +#define SSB_CHIPCO_FLASHSTA_AT_ID 0x38 +#define SSB_CHIPCO_FLASHSTA_AT_ID_SHIFT 3 + + +/** OTP **/ + +/* OTP regions */ +#define SSB_CHIPCO_OTP_HW_REGION SSB_CHIPCO_OTPS_HW_PROTECT +#define SSB_CHIPCO_OTP_SW_REGION SSB_CHIPCO_OTPS_SW_PROTECT +#define SSB_CHIPCO_OTP_CID_REGION SSB_CHIPCO_OTPS_CID_PROTECT + +/* OTP regions (Byte offsets from otp size) */ +#define SSB_CHIPCO_OTP_SWLIM_OFF (-8) +#define SSB_CHIPCO_OTP_CIDBASE_OFF 0 +#define SSB_CHIPCO_OTP_CIDLIM_OFF 8 + +/* Predefined OTP words (Word offset from otp size) */ +#define SSB_CHIPCO_OTP_BOUNDARY_OFF (-4) +#define SSB_CHIPCO_OTP_HWSIGN_OFF (-3) +#define SSB_CHIPCO_OTP_SWSIGN_OFF (-2) +#define SSB_CHIPCO_OTP_CIDSIGN_OFF (-1) + +#define SSB_CHIPCO_OTP_CID_OFF 0 +#define SSB_CHIPCO_OTP_PKG_OFF 1 +#define SSB_CHIPCO_OTP_FID_OFF 2 +#define SSB_CHIPCO_OTP_RSV_OFF 3 +#define SSB_CHIPCO_OTP_LIM_OFF 4 + +#define SSB_CHIPCO_OTP_SIGNATURE 0x578A +#define SSB_CHIPCO_OTP_MAGIC 0x4E56 + + +struct ssb_device; +struct ssb_serial_port; + +/* Data for the PMU, if available. + * Check availability with ((struct ssb_chipcommon)->capabilities & SSB_CHIPCO_CAP_PMU) + */ +struct ssb_chipcommon_pmu { + u8 rev; /* PMU revision */ + u32 crystalfreq; /* The active crystal frequency (in kHz) */ +}; + +struct ssb_chipcommon { + struct ssb_device *dev; + u32 capabilities; + /* Fast Powerup Delay constant */ + u16 fast_pwrup_delay; + struct ssb_chipcommon_pmu pmu; +}; + +static inline bool ssb_chipco_available(struct ssb_chipcommon *cc) +{ + return (cc->dev != NULL); +} + +/* Register access */ +#define chipco_read32(cc, offset) ssb_read32((cc)->dev, offset) +#define chipco_write32(cc, offset, val) ssb_write32((cc)->dev, offset, val) + +#define chipco_mask32(cc, offset, mask) \ + chipco_write32(cc, offset, chipco_read32(cc, offset) & (mask)) +#define chipco_set32(cc, offset, set) \ + chipco_write32(cc, offset, chipco_read32(cc, offset) | (set)) +#define chipco_maskset32(cc, offset, mask, set) \ + chipco_write32(cc, offset, (chipco_read32(cc, offset) & (mask)) | (set)) + +extern void ssb_chipcommon_init(struct ssb_chipcommon *cc); + +extern void ssb_chipco_suspend(struct ssb_chipcommon *cc); +extern void ssb_chipco_resume(struct ssb_chipcommon *cc); + +extern void ssb_chipco_get_clockcpu(struct ssb_chipcommon *cc, + u32 *plltype, u32 *n, u32 *m); +extern void ssb_chipco_get_clockcontrol(struct ssb_chipcommon *cc, + u32 *plltype, u32 *n, u32 *m); +extern void ssb_chipco_timing_init(struct ssb_chipcommon *cc, + unsigned long ns_per_cycle); + +enum ssb_clkmode { + SSB_CLKMODE_SLOW, + SSB_CLKMODE_FAST, + SSB_CLKMODE_DYNAMIC, +}; + +extern void ssb_chipco_set_clockmode(struct ssb_chipcommon *cc, + enum ssb_clkmode mode); + +extern void ssb_chipco_watchdog_timer_set(struct ssb_chipcommon *cc, + u32 ticks); + +void ssb_chipco_irq_mask(struct ssb_chipcommon *cc, u32 mask, u32 value); + +u32 ssb_chipco_irq_status(struct ssb_chipcommon *cc, u32 mask); + +/* Chipcommon GPIO pin access. */ +u32 ssb_chipco_gpio_in(struct ssb_chipcommon *cc, u32 mask); +u32 ssb_chipco_gpio_out(struct ssb_chipcommon *cc, u32 mask, u32 value); +u32 ssb_chipco_gpio_outen(struct ssb_chipcommon *cc, u32 mask, u32 value); +u32 ssb_chipco_gpio_control(struct ssb_chipcommon *cc, u32 mask, u32 value); +u32 ssb_chipco_gpio_intmask(struct ssb_chipcommon *cc, u32 mask, u32 value); +u32 ssb_chipco_gpio_polarity(struct ssb_chipcommon *cc, u32 mask, u32 value); + +#ifdef CONFIG_SSB_SERIAL +extern int ssb_chipco_serial_init(struct ssb_chipcommon *cc, + struct ssb_serial_port *ports); +#endif /* CONFIG_SSB_SERIAL */ + +/* PMU support */ +extern void ssb_pmu_init(struct ssb_chipcommon *cc); + +enum ssb_pmu_ldo_volt_id { + LDO_PAREF = 0, + LDO_VOLT1, + LDO_VOLT2, + LDO_VOLT3, +}; + +void ssb_pmu_set_ldo_voltage(struct ssb_chipcommon *cc, + enum ssb_pmu_ldo_volt_id id, u32 voltage); +void ssb_pmu_set_ldo_paref(struct ssb_chipcommon *cc, bool on); + +#endif /* LINUX_SSB_CHIPCO_H_ */ diff --git a/include/linux/ssb/ssb_driver_extif.h b/include/linux/ssb/ssb_driver_extif.h new file mode 100644 index 0000000..91161f0 --- /dev/null +++ b/include/linux/ssb/ssb_driver_extif.h @@ -0,0 +1,214 @@ +/* + * Hardware-specific External Interface I/O core definitions + * for the BCM47xx family of SiliconBackplane-based chips. + * + * The External Interface core supports a total of three external chip selects + * supporting external interfaces. One of the external chip selects is + * used for Flash, one is used for PCMCIA, and the other may be + * programmed to support either a synchronous interface or an + * asynchronous interface. The asynchronous interface can be used to + * support external devices such as UARTs and the BCM2019 Bluetooth + * baseband processor. + * The external interface core also contains 2 on-chip 16550 UARTs, clock + * frequency control, a watchdog interrupt timer, and a GPIO interface. + * + * Copyright 2005, Broadcom Corporation + * Copyright 2006, Michael Buesch + * + * Licensed under the GPL version 2. See COPYING for details. + */ +#ifndef LINUX_SSB_EXTIFCORE_H_ +#define LINUX_SSB_EXTIFCORE_H_ + +/* external interface address space */ +#define SSB_EXTIF_PCMCIA_MEMBASE(x) (x) +#define SSB_EXTIF_PCMCIA_IOBASE(x) ((x) + 0x100000) +#define SSB_EXTIF_PCMCIA_CFGBASE(x) ((x) + 0x200000) +#define SSB_EXTIF_CFGIF_BASE(x) ((x) + 0x800000) +#define SSB_EXTIF_FLASH_BASE(x) ((x) + 0xc00000) + +#define SSB_EXTIF_NR_GPIOOUT 5 +/* GPIO NOTE: + * The multiple instances of output and output enable registers + * are present to allow driver software for multiple cores to control + * gpio outputs without needing to share a single register pair. + * Use the following helper macro to get a register offset value. + */ +#define SSB_EXTIF_GPIO_OUT(index) ({ \ + BUILD_BUG_ON(index >= SSB_EXTIF_NR_GPIOOUT); \ + SSB_EXTIF_GPIO_OUT_BASE + ((index) * 8); \ + }) +#define SSB_EXTIF_GPIO_OUTEN(index) ({ \ + BUILD_BUG_ON(index >= SSB_EXTIF_NR_GPIOOUT); \ + SSB_EXTIF_GPIO_OUTEN_BASE + ((index) * 8); \ + }) + +/** EXTIF core registers **/ + +#define SSB_EXTIF_CTL 0x0000 +#define SSB_EXTIF_CTL_UARTEN (1 << 0) /* UART enable */ +#define SSB_EXTIF_EXTSTAT 0x0004 +#define SSB_EXTIF_EXTSTAT_EMODE (1 << 0) /* Endian mode (ro) */ +#define SSB_EXTIF_EXTSTAT_EIRQPIN (1 << 1) /* External interrupt pin (ro) */ +#define SSB_EXTIF_EXTSTAT_GPIOIRQPIN (1 << 2) /* GPIO interrupt pin (ro) */ +#define SSB_EXTIF_PCMCIA_CFG 0x0010 +#define SSB_EXTIF_PCMCIA_MEMWAIT 0x0014 +#define SSB_EXTIF_PCMCIA_ATTRWAIT 0x0018 +#define SSB_EXTIF_PCMCIA_IOWAIT 0x001C +#define SSB_EXTIF_PROG_CFG 0x0020 +#define SSB_EXTIF_PROG_WAITCNT 0x0024 +#define SSB_EXTIF_FLASH_CFG 0x0028 +#define SSB_EXTIF_FLASH_WAITCNT 0x002C +#define SSB_EXTIF_WATCHDOG 0x0040 +#define SSB_EXTIF_CLOCK_N 0x0044 +#define SSB_EXTIF_CLOCK_SB 0x0048 +#define SSB_EXTIF_CLOCK_PCI 0x004C +#define SSB_EXTIF_CLOCK_MII 0x0050 +#define SSB_EXTIF_GPIO_IN 0x0060 +#define SSB_EXTIF_GPIO_OUT_BASE 0x0064 +#define SSB_EXTIF_GPIO_OUTEN_BASE 0x0068 +#define SSB_EXTIF_EJTAG_OUTEN 0x0090 +#define SSB_EXTIF_GPIO_INTPOL 0x0094 +#define SSB_EXTIF_GPIO_INTMASK 0x0098 +#define SSB_EXTIF_UART_DATA 0x0300 +#define SSB_EXTIF_UART_TIMER 0x0310 +#define SSB_EXTIF_UART_FCR 0x0320 +#define SSB_EXTIF_UART_LCR 0x0330 +#define SSB_EXTIF_UART_MCR 0x0340 +#define SSB_EXTIF_UART_LSR 0x0350 +#define SSB_EXTIF_UART_MSR 0x0360 +#define SSB_EXTIF_UART_SCRATCH 0x0370 + + + + +/* pcmcia/prog/flash_config */ +#define SSB_EXTCFG_EN (1 << 0) /* enable */ +#define SSB_EXTCFG_MODE 0xE /* mode */ +#define SSB_EXTCFG_MODE_SHIFT 1 +#define SSB_EXTCFG_MODE_FLASH 0x0 /* flash/asynchronous mode */ +#define SSB_EXTCFG_MODE_SYNC 0x2 /* synchronous mode */ +#define SSB_EXTCFG_MODE_PCMCIA 0x4 /* pcmcia mode */ +#define SSB_EXTCFG_DS16 (1 << 4) /* destsize: 0=8bit, 1=16bit */ +#define SSB_EXTCFG_BSWAP (1 << 5) /* byteswap */ +#define SSB_EXTCFG_CLKDIV 0xC0 /* clock divider */ +#define SSB_EXTCFG_CLKDIV_SHIFT 6 +#define SSB_EXTCFG_CLKDIV_2 0x0 /* backplane/2 */ +#define SSB_EXTCFG_CLKDIV_3 0x40 /* backplane/3 */ +#define SSB_EXTCFG_CLKDIV_4 0x80 /* backplane/4 */ +#define SSB_EXTCFG_CLKEN (1 << 8) /* clock enable */ +#define SSB_EXTCFG_STROBE (1 << 9) /* size/bytestrobe (synch only) */ + +/* pcmcia_memwait */ +#define SSB_PCMCIA_MEMW_0 0x0000003F /* waitcount0 */ +#define SSB_PCMCIA_MEMW_1 0x00001F00 /* waitcount1 */ +#define SSB_PCMCIA_MEMW_1_SHIFT 8 +#define SSB_PCMCIA_MEMW_2 0x001F0000 /* waitcount2 */ +#define SSB_PCMCIA_MEMW_2_SHIFT 16 +#define SSB_PCMCIA_MEMW_3 0x1F000000 /* waitcount3 */ +#define SSB_PCMCIA_MEMW_3_SHIFT 24 + +/* pcmcia_attrwait */ +#define SSB_PCMCIA_ATTW_0 0x0000003F /* waitcount0 */ +#define SSB_PCMCIA_ATTW_1 0x00001F00 /* waitcount1 */ +#define SSB_PCMCIA_ATTW_1_SHIFT 8 +#define SSB_PCMCIA_ATTW_2 0x001F0000 /* waitcount2 */ +#define SSB_PCMCIA_ATTW_2_SHIFT 16 +#define SSB_PCMCIA_ATTW_3 0x1F000000 /* waitcount3 */ +#define SSB_PCMCIA_ATTW_3_SHIFT 24 + +/* pcmcia_iowait */ +#define SSB_PCMCIA_IOW_0 0x0000003F /* waitcount0 */ +#define SSB_PCMCIA_IOW_1 0x00001F00 /* waitcount1 */ +#define SSB_PCMCIA_IOW_1_SHIFT 8 +#define SSB_PCMCIA_IOW_2 0x001F0000 /* waitcount2 */ +#define SSB_PCMCIA_IOW_2_SHIFT 16 +#define SSB_PCMCIA_IOW_3 0x1F000000 /* waitcount3 */ +#define SSB_PCMCIA_IOW_3_SHIFT 24 + +/* prog_waitcount */ +#define SSB_PROG_WCNT_0 0x0000001F /* waitcount0 */ +#define SSB_PROG_WCNT_1 0x00001F00 /* waitcount1 */ +#define SSB_PROG_WCNT_1_SHIFT 8 +#define SSB_PROG_WCNT_2 0x001F0000 /* waitcount2 */ +#define SSB_PROG_WCNT_2_SHIFT 16 +#define SSB_PROG_WCNT_3 0x1F000000 /* waitcount3 */ +#define SSB_PROG_WCNT_3_SHIFT 24 + +#define SSB_PROG_W0 0x0000000C +#define SSB_PROG_W1 0x00000A00 +#define SSB_PROG_W2 0x00020000 +#define SSB_PROG_W3 0x01000000 + +/* flash_waitcount */ +#define SSB_FLASH_WCNT_0 0x0000001F /* waitcount0 */ +#define SSB_FLASH_WCNT_1 0x00001F00 /* waitcount1 */ +#define SSB_FLASH_WCNT_1_SHIFT 8 +#define SSB_FLASH_WCNT_2 0x001F0000 /* waitcount2 */ +#define SSB_FLASH_WCNT_2_SHIFT 16 +#define SSB_FLASH_WCNT_3 0x1F000000 /* waitcount3 */ +#define SSB_FLASH_WCNT_3_SHIFT 24 + +/* watchdog */ +#define SSB_EXTIF_WATCHDOG_CLK 48000000 /* Hz */ + + + +#ifdef CONFIG_SSB_DRIVER_EXTIF + +struct ssb_extif { + struct ssb_device *dev; +}; + +static inline bool ssb_extif_available(struct ssb_extif *extif) +{ + return (extif->dev != NULL); +} + +extern void ssb_extif_get_clockcontrol(struct ssb_extif *extif, + u32 *plltype, u32 *n, u32 *m); + +extern void ssb_extif_timing_init(struct ssb_extif *extif, + unsigned long ns); + +extern void ssb_extif_watchdog_timer_set(struct ssb_extif *extif, + u32 ticks); + +/* Extif GPIO pin access */ +u32 ssb_extif_gpio_in(struct ssb_extif *extif, u32 mask); +u32 ssb_extif_gpio_out(struct ssb_extif *extif, u32 mask, u32 value); +u32 ssb_extif_gpio_outen(struct ssb_extif *extif, u32 mask, u32 value); +u32 ssb_extif_gpio_polarity(struct ssb_extif *extif, u32 mask, u32 value); +u32 ssb_extif_gpio_intmask(struct ssb_extif *extif, u32 mask, u32 value); + +#ifdef CONFIG_SSB_SERIAL +extern int ssb_extif_serial_init(struct ssb_extif *extif, + struct ssb_serial_port *ports); +#endif /* CONFIG_SSB_SERIAL */ + + +#else /* CONFIG_SSB_DRIVER_EXTIF */ +/* extif disabled */ + +struct ssb_extif { +}; + +static inline bool ssb_extif_available(struct ssb_extif *extif) +{ + return 0; +} + +static inline +void ssb_extif_get_clockcontrol(struct ssb_extif *extif, + u32 *plltype, u32 *n, u32 *m) +{ +} + +static inline +void ssb_extif_watchdog_timer_set(struct ssb_extif *extif, + u32 ticks) +{ +} + +#endif /* CONFIG_SSB_DRIVER_EXTIF */ +#endif /* LINUX_SSB_EXTIFCORE_H_ */ diff --git a/include/linux/ssb/ssb_driver_gige.h b/include/linux/ssb/ssb_driver_gige.h new file mode 100644 index 0000000..942e387 --- /dev/null +++ b/include/linux/ssb/ssb_driver_gige.h @@ -0,0 +1,174 @@ +#ifndef LINUX_SSB_DRIVER_GIGE_H_ +#define LINUX_SSB_DRIVER_GIGE_H_ + +#include +#include +#include + + +#ifdef CONFIG_SSB_DRIVER_GIGE + + +#define SSB_GIGE_PCIIO 0x0000 /* PCI I/O Registers (1024 bytes) */ +#define SSB_GIGE_RESERVED 0x0400 /* Reserved (1024 bytes) */ +#define SSB_GIGE_PCICFG 0x0800 /* PCI config space (256 bytes) */ +#define SSB_GIGE_SHIM_FLUSHSTAT 0x0C00 /* PCI to OCP: Flush status control (32bit) */ +#define SSB_GIGE_SHIM_FLUSHRDA 0x0C04 /* PCI to OCP: Flush read address (32bit) */ +#define SSB_GIGE_SHIM_FLUSHTO 0x0C08 /* PCI to OCP: Flush timeout counter (32bit) */ +#define SSB_GIGE_SHIM_BARRIER 0x0C0C /* PCI to OCP: Barrier register (32bit) */ +#define SSB_GIGE_SHIM_MAOCPSI 0x0C10 /* PCI to OCP: MaocpSI Control (32bit) */ +#define SSB_GIGE_SHIM_SIOCPMA 0x0C14 /* PCI to OCP: SiocpMa Control (32bit) */ + +/* TM Status High flags */ +#define SSB_GIGE_TMSHIGH_RGMII 0x00010000 /* Have an RGMII PHY-bus */ +/* TM Status Low flags */ +#define SSB_GIGE_TMSLOW_TXBYPASS 0x00080000 /* TX bypass (no delay) */ +#define SSB_GIGE_TMSLOW_RXBYPASS 0x00100000 /* RX bypass (no delay) */ +#define SSB_GIGE_TMSLOW_DLLEN 0x01000000 /* Enable DLL controls */ + +/* Boardflags (low) */ +#define SSB_GIGE_BFL_ROBOSWITCH 0x0010 + + +#define SSB_GIGE_MEM_RES_NAME "SSB Broadcom 47xx GigE memory" +#define SSB_GIGE_IO_RES_NAME "SSB Broadcom 47xx GigE I/O" + +struct ssb_gige { + struct ssb_device *dev; + + spinlock_t lock; + + /* True, if the device has an RGMII bus. + * False, if the device has a GMII bus. */ + bool has_rgmii; + + /* The PCI controller device. */ + struct pci_controller pci_controller; + struct pci_ops pci_ops; + struct resource mem_resource; + struct resource io_resource; +}; + +/* Check whether a PCI device is a SSB Gigabit Ethernet core. */ +extern bool pdev_is_ssb_gige_core(struct pci_dev *pdev); + +/* Convert a pci_dev pointer to a ssb_gige pointer. */ +static inline struct ssb_gige * pdev_to_ssb_gige(struct pci_dev *pdev) +{ + if (!pdev_is_ssb_gige_core(pdev)) + return NULL; + return container_of(pdev->bus->ops, struct ssb_gige, pci_ops); +} + +/* Returns whether the PHY is connected by an RGMII bus. */ +static inline bool ssb_gige_is_rgmii(struct pci_dev *pdev) +{ + struct ssb_gige *dev = pdev_to_ssb_gige(pdev); + return (dev ? dev->has_rgmii : 0); +} + +/* Returns whether we have a Roboswitch. */ +static inline bool ssb_gige_have_roboswitch(struct pci_dev *pdev) +{ + struct ssb_gige *dev = pdev_to_ssb_gige(pdev); + if (dev) + return !!(dev->dev->bus->sprom.boardflags_lo & + SSB_GIGE_BFL_ROBOSWITCH); + return 0; +} + +/* Returns whether we can only do one DMA at once. */ +static inline bool ssb_gige_one_dma_at_once(struct pci_dev *pdev) +{ + struct ssb_gige *dev = pdev_to_ssb_gige(pdev); + if (dev) + return ((dev->dev->bus->chip_id == 0x4785) && + (dev->dev->bus->chip_rev < 2)); + return 0; +} + +/* Returns whether we must flush posted writes. */ +static inline bool ssb_gige_must_flush_posted_writes(struct pci_dev *pdev) +{ + struct ssb_gige *dev = pdev_to_ssb_gige(pdev); + if (dev) + return (dev->dev->bus->chip_id == 0x4785); + return 0; +} + +extern char * nvram_get(const char *name); +/* Get the device MAC address */ +static inline void ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr) +{ +#ifdef CONFIG_BCM47XX + char *res = nvram_get("et0macaddr"); + if (res) + memcpy(macaddr, res, 6); +#endif +} + +extern int ssb_gige_pcibios_plat_dev_init(struct ssb_device *sdev, + struct pci_dev *pdev); +extern int ssb_gige_map_irq(struct ssb_device *sdev, + const struct pci_dev *pdev); + +/* The GigE driver is not a standalone module, because we don't have support + * for unregistering the driver. So we could not unload the module anyway. */ +extern int ssb_gige_init(void); +static inline void ssb_gige_exit(void) +{ + /* Currently we can not unregister the GigE driver, + * because we can not unregister the PCI bridge. */ + BUG(); +} + + +#else /* CONFIG_SSB_DRIVER_GIGE */ +/* Gigabit Ethernet driver disabled */ + + +static inline int ssb_gige_pcibios_plat_dev_init(struct ssb_device *sdev, + struct pci_dev *pdev) +{ + return -ENOSYS; +} +static inline int ssb_gige_map_irq(struct ssb_device *sdev, + const struct pci_dev *pdev) +{ + return -ENOSYS; +} +static inline int ssb_gige_init(void) +{ + return 0; +} +static inline void ssb_gige_exit(void) +{ +} + +static inline bool pdev_is_ssb_gige_core(struct pci_dev *pdev) +{ + return 0; +} +static inline struct ssb_gige * pdev_to_ssb_gige(struct pci_dev *pdev) +{ + return NULL; +} +static inline bool ssb_gige_is_rgmii(struct pci_dev *pdev) +{ + return 0; +} +static inline bool ssb_gige_have_roboswitch(struct pci_dev *pdev) +{ + return 0; +} +static inline bool ssb_gige_one_dma_at_once(struct pci_dev *pdev) +{ + return 0; +} +static inline bool ssb_gige_must_flush_posted_writes(struct pci_dev *pdev) +{ + return 0; +} + +#endif /* CONFIG_SSB_DRIVER_GIGE */ +#endif /* LINUX_SSB_DRIVER_GIGE_H_ */ diff --git a/include/linux/ssb/ssb_driver_mips.h b/include/linux/ssb/ssb_driver_mips.h new file mode 100644 index 0000000..5f44e97 --- /dev/null +++ b/include/linux/ssb/ssb_driver_mips.h @@ -0,0 +1,46 @@ +#ifndef LINUX_SSB_MIPSCORE_H_ +#define LINUX_SSB_MIPSCORE_H_ + +#ifdef CONFIG_SSB_DRIVER_MIPS + +struct ssb_device; + +struct ssb_serial_port { + void *regs; + unsigned long clockspeed; + unsigned int irq; + unsigned int baud_base; + unsigned int reg_shift; +}; + + +struct ssb_mipscore { + struct ssb_device *dev; + + int nr_serial_ports; + struct ssb_serial_port serial_ports[4]; + + u8 flash_buswidth; + u32 flash_window; + u32 flash_window_size; +}; + +extern void ssb_mipscore_init(struct ssb_mipscore *mcore); +extern u32 ssb_cpu_clock(struct ssb_mipscore *mcore); + +extern unsigned int ssb_mips_irq(struct ssb_device *dev); + + +#else /* CONFIG_SSB_DRIVER_MIPS */ + +struct ssb_mipscore { +}; + +static inline +void ssb_mipscore_init(struct ssb_mipscore *mcore) +{ +} + +#endif /* CONFIG_SSB_DRIVER_MIPS */ + +#endif /* LINUX_SSB_MIPSCORE_H_ */ diff --git a/include/linux/ssb/ssb_driver_pci.h b/include/linux/ssb/ssb_driver_pci.h new file mode 100644 index 0000000..41e330e --- /dev/null +++ b/include/linux/ssb/ssb_driver_pci.h @@ -0,0 +1,130 @@ +#ifndef LINUX_SSB_PCICORE_H_ +#define LINUX_SSB_PCICORE_H_ + +#include + +struct pci_dev; + + +#ifdef CONFIG_SSB_DRIVER_PCICORE + +/* PCI core registers. */ +#define SSB_PCICORE_CTL 0x0000 /* PCI Control */ +#define SSB_PCICORE_CTL_RST_OE 0x00000001 /* PCI_RESET Output Enable */ +#define SSB_PCICORE_CTL_RST 0x00000002 /* PCI_RESET driven out to pin */ +#define SSB_PCICORE_CTL_CLK_OE 0x00000004 /* Clock gate Output Enable */ +#define SSB_PCICORE_CTL_CLK 0x00000008 /* Gate for clock driven out to pin */ +#define SSB_PCICORE_ARBCTL 0x0010 /* PCI Arbiter Control */ +#define SSB_PCICORE_ARBCTL_INTERN 0x00000001 /* Use internal arbiter */ +#define SSB_PCICORE_ARBCTL_EXTERN 0x00000002 /* Use external arbiter */ +#define SSB_PCICORE_ARBCTL_PARKID 0x00000006 /* Mask, selects which agent is parked on an idle bus */ +#define SSB_PCICORE_ARBCTL_PARKID_LAST 0x00000000 /* Last requestor */ +#define SSB_PCICORE_ARBCTL_PARKID_4710 0x00000002 /* 4710 */ +#define SSB_PCICORE_ARBCTL_PARKID_EXT0 0x00000004 /* External requestor 0 */ +#define SSB_PCICORE_ARBCTL_PARKID_EXT1 0x00000006 /* External requestor 1 */ +#define SSB_PCICORE_ISTAT 0x0020 /* Interrupt status */ +#define SSB_PCICORE_ISTAT_INTA 0x00000001 /* PCI INTA# */ +#define SSB_PCICORE_ISTAT_INTB 0x00000002 /* PCI INTB# */ +#define SSB_PCICORE_ISTAT_SERR 0x00000004 /* PCI SERR# (write to clear) */ +#define SSB_PCICORE_ISTAT_PERR 0x00000008 /* PCI PERR# (write to clear) */ +#define SSB_PCICORE_ISTAT_PME 0x00000010 /* PCI PME# */ +#define SSB_PCICORE_IMASK 0x0024 /* Interrupt mask */ +#define SSB_PCICORE_IMASK_INTA 0x00000001 /* PCI INTA# */ +#define SSB_PCICORE_IMASK_INTB 0x00000002 /* PCI INTB# */ +#define SSB_PCICORE_IMASK_SERR 0x00000004 /* PCI SERR# */ +#define SSB_PCICORE_IMASK_PERR 0x00000008 /* PCI PERR# */ +#define SSB_PCICORE_IMASK_PME 0x00000010 /* PCI PME# */ +#define SSB_PCICORE_MBOX 0x0028 /* Backplane to PCI Mailbox */ +#define SSB_PCICORE_MBOX_F0_0 0x00000100 /* PCI function 0, INT 0 */ +#define SSB_PCICORE_MBOX_F0_1 0x00000200 /* PCI function 0, INT 1 */ +#define SSB_PCICORE_MBOX_F1_0 0x00000400 /* PCI function 1, INT 0 */ +#define SSB_PCICORE_MBOX_F1_1 0x00000800 /* PCI function 1, INT 1 */ +#define SSB_PCICORE_MBOX_F2_0 0x00001000 /* PCI function 2, INT 0 */ +#define SSB_PCICORE_MBOX_F2_1 0x00002000 /* PCI function 2, INT 1 */ +#define SSB_PCICORE_MBOX_F3_0 0x00004000 /* PCI function 3, INT 0 */ +#define SSB_PCICORE_MBOX_F3_1 0x00008000 /* PCI function 3, INT 1 */ +#define SSB_PCICORE_BCAST_ADDR 0x0050 /* Backplane Broadcast Address */ +#define SSB_PCICORE_BCAST_ADDR_MASK 0x000000FF +#define SSB_PCICORE_BCAST_DATA 0x0054 /* Backplane Broadcast Data */ +#define SSB_PCICORE_GPIO_IN 0x0060 /* rev >= 2 only */ +#define SSB_PCICORE_GPIO_OUT 0x0064 /* rev >= 2 only */ +#define SSB_PCICORE_GPIO_ENABLE 0x0068 /* rev >= 2 only */ +#define SSB_PCICORE_GPIO_CTL 0x006C /* rev >= 2 only */ +#define SSB_PCICORE_SBTOPCI0 0x0100 /* Backplane to PCI translation 0 (sbtopci0) */ +#define SSB_PCICORE_SBTOPCI0_MASK 0xFC000000 +#define SSB_PCICORE_SBTOPCI1 0x0104 /* Backplane to PCI translation 1 (sbtopci1) */ +#define SSB_PCICORE_SBTOPCI1_MASK 0xFC000000 +#define SSB_PCICORE_SBTOPCI2 0x0108 /* Backplane to PCI translation 2 (sbtopci2) */ +#define SSB_PCICORE_SBTOPCI2_MASK 0xC0000000 +#define SSB_PCICORE_PCICFG0 0x0400 /* PCI config space 0 (rev >= 8) */ +#define SSB_PCICORE_PCICFG1 0x0500 /* PCI config space 1 (rev >= 8) */ +#define SSB_PCICORE_PCICFG2 0x0600 /* PCI config space 2 (rev >= 8) */ +#define SSB_PCICORE_PCICFG3 0x0700 /* PCI config space 3 (rev >= 8) */ +#define SSB_PCICORE_SPROM(wordoffset) (0x0800 + ((wordoffset) * 2)) /* SPROM shadow area (72 bytes) */ + +/* SBtoPCIx */ +#define SSB_PCICORE_SBTOPCI_MEM 0x00000000 +#define SSB_PCICORE_SBTOPCI_IO 0x00000001 +#define SSB_PCICORE_SBTOPCI_CFG0 0x00000002 +#define SSB_PCICORE_SBTOPCI_CFG1 0x00000003 +#define SSB_PCICORE_SBTOPCI_PREF 0x00000004 /* Prefetch enable */ +#define SSB_PCICORE_SBTOPCI_BURST 0x00000008 /* Burst enable */ +#define SSB_PCICORE_SBTOPCI_MRM 0x00000020 /* Memory Read Multiple */ +#define SSB_PCICORE_SBTOPCI_RC 0x00000030 /* Read Command mask (rev >= 11) */ +#define SSB_PCICORE_SBTOPCI_RC_READ 0x00000000 /* Memory read */ +#define SSB_PCICORE_SBTOPCI_RC_READL 0x00000010 /* Memory read line */ +#define SSB_PCICORE_SBTOPCI_RC_READM 0x00000020 /* Memory read multiple */ + + +/* PCIcore specific boardflags */ +#define SSB_PCICORE_BFL_NOPCI 0x00000400 /* Board leaves PCI floating */ + + +struct ssb_pcicore { + struct ssb_device *dev; + u8 setup_done:1; + u8 hostmode:1; + u8 cardbusmode:1; +}; + +extern void ssb_pcicore_init(struct ssb_pcicore *pc); + +/* Enable IRQ routing for a specific device */ +extern int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc, + struct ssb_device *dev); + +int ssb_pcicore_plat_dev_init(struct pci_dev *d); +int ssb_pcicore_pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); + + +#else /* CONFIG_SSB_DRIVER_PCICORE */ + + +struct ssb_pcicore { +}; + +static inline +void ssb_pcicore_init(struct ssb_pcicore *pc) +{ +} + +static inline +int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc, + struct ssb_device *dev) +{ + return 0; +} + +static inline +int ssb_pcicore_plat_dev_init(struct pci_dev *d) +{ + return -ENODEV; +} +static inline +int ssb_pcicore_pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + return -ENODEV; +} + +#endif /* CONFIG_SSB_DRIVER_PCICORE */ +#endif /* LINUX_SSB_PCICORE_H_ */ diff --git a/include/linux/ssb/ssb_embedded.h b/include/linux/ssb/ssb_embedded.h new file mode 100644 index 0000000..8d8dedf --- /dev/null +++ b/include/linux/ssb/ssb_embedded.h @@ -0,0 +1,18 @@ +#ifndef LINUX_SSB_EMBEDDED_H_ +#define LINUX_SSB_EMBEDDED_H_ + +#include +#include + + +extern int ssb_watchdog_timer_set(struct ssb_bus *bus, u32 ticks); + +/* Generic GPIO API */ +u32 ssb_gpio_in(struct ssb_bus *bus, u32 mask); +u32 ssb_gpio_out(struct ssb_bus *bus, u32 mask, u32 value); +u32 ssb_gpio_outen(struct ssb_bus *bus, u32 mask, u32 value); +u32 ssb_gpio_control(struct ssb_bus *bus, u32 mask, u32 value); +u32 ssb_gpio_intmask(struct ssb_bus *bus, u32 mask, u32 value); +u32 ssb_gpio_polarity(struct ssb_bus *bus, u32 mask, u32 value); + +#endif /* LINUX_SSB_EMBEDDED_H_ */ diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h new file mode 100644 index 0000000..9ae9082 --- /dev/null +++ b/include/linux/ssb/ssb_regs.h @@ -0,0 +1,456 @@ +#ifndef LINUX_SSB_REGS_H_ +#define LINUX_SSB_REGS_H_ + + +/* SiliconBackplane Address Map. + * All regions may not exist on all chips. + */ +#define SSB_SDRAM_BASE 0x00000000U /* Physical SDRAM */ +#define SSB_PCI_MEM 0x08000000U /* Host Mode sb2pcitranslation0 (64 MB) */ +#define SSB_PCI_CFG 0x0c000000U /* Host Mode sb2pcitranslation1 (64 MB) */ +#define SSB_SDRAM_SWAPPED 0x10000000U /* Byteswapped Physical SDRAM */ +#define SSB_ENUM_BASE 0x18000000U /* Enumeration space base */ +#define SSB_ENUM_LIMIT 0x18010000U /* Enumeration space limit */ + +#define SSB_FLASH2 0x1c000000U /* Flash Region 2 (region 1 shadowed here) */ +#define SSB_FLASH2_SZ 0x02000000U /* Size of Flash Region 2 */ + +#define SSB_EXTIF_BASE 0x1f000000U /* External Interface region base address */ +#define SSB_FLASH1 0x1fc00000U /* Flash Region 1 */ +#define SSB_FLASH1_SZ 0x00400000U /* Size of Flash Region 1 */ + +#define SSB_PCI_DMA 0x40000000U /* Client Mode sb2pcitranslation2 (1 GB) */ +#define SSB_PCI_DMA_SZ 0x40000000U /* Client Mode sb2pcitranslation2 size in bytes */ +#define SSB_PCIE_DMA_L32 0x00000000U /* PCIE Client Mode sb2pcitranslation2 (2 ZettaBytes), low 32 bits */ +#define SSB_PCIE_DMA_H32 0x80000000U /* PCIE Client Mode sb2pcitranslation2 (2 ZettaBytes), high 32 bits */ +#define SSB_EUART (SSB_EXTIF_BASE + 0x00800000) +#define SSB_LED (SSB_EXTIF_BASE + 0x00900000) + + +/* Enumeration space constants */ +#define SSB_CORE_SIZE 0x1000 /* Size of a core MMIO area */ +#define SSB_MAX_NR_CORES ((SSB_ENUM_LIMIT - SSB_ENUM_BASE) / SSB_CORE_SIZE) + + +/* mips address */ +#define SSB_EJTAG 0xff200000 /* MIPS EJTAG space (2M) */ + + +/* SSB PCI config space registers. */ +#define SSB_PMCSR 0x44 +#define SSB_PE 0x100 +#define SSB_BAR0_WIN 0x80 /* Backplane address space 0 */ +#define SSB_BAR1_WIN 0x84 /* Backplane address space 1 */ +#define SSB_SPROMCTL 0x88 /* SPROM control */ +#define SSB_SPROMCTL_WE 0x10 /* SPROM write enable */ +#define SSB_BAR1_CONTROL 0x8c /* Address space 1 burst control */ +#define SSB_PCI_IRQS 0x90 /* PCI interrupts */ +#define SSB_PCI_IRQMASK 0x94 /* PCI IRQ control and mask (pcirev >= 6 only) */ +#define SSB_BACKPLANE_IRQS 0x98 /* Backplane Interrupts */ +#define SSB_GPIO_IN 0xB0 /* GPIO Input (pcirev >= 3 only) */ +#define SSB_GPIO_OUT 0xB4 /* GPIO Output (pcirev >= 3 only) */ +#define SSB_GPIO_OUT_ENABLE 0xB8 /* GPIO Output Enable/Disable (pcirev >= 3 only) */ +#define SSB_GPIO_SCS 0x10 /* PCI config space bit 4 for 4306c0 slow clock source */ +#define SSB_GPIO_HWRAD 0x20 /* PCI config space GPIO 13 for hw radio disable */ +#define SSB_GPIO_XTAL 0x40 /* PCI config space GPIO 14 for Xtal powerup */ +#define SSB_GPIO_PLL 0x80 /* PCI config space GPIO 15 for PLL powerdown */ + + +#define SSB_BAR0_MAX_RETRIES 50 + +/* Silicon backplane configuration register definitions */ +#define SSB_IPSFLAG 0x0F08 +#define SSB_IPSFLAG_IRQ1 0x0000003F /* which sbflags get routed to mips interrupt 1 */ +#define SSB_IPSFLAG_IRQ1_SHIFT 0 +#define SSB_IPSFLAG_IRQ2 0x00003F00 /* which sbflags get routed to mips interrupt 2 */ +#define SSB_IPSFLAG_IRQ2_SHIFT 8 +#define SSB_IPSFLAG_IRQ3 0x003F0000 /* which sbflags get routed to mips interrupt 3 */ +#define SSB_IPSFLAG_IRQ3_SHIFT 16 +#define SSB_IPSFLAG_IRQ4 0x3F000000 /* which sbflags get routed to mips interrupt 4 */ +#define SSB_IPSFLAG_IRQ4_SHIFT 24 +#define SSB_TPSFLAG 0x0F18 +#define SSB_TPSFLAG_BPFLAG 0x0000003F /* Backplane flag # */ +#define SSB_TPSFLAG_ALWAYSIRQ 0x00000040 /* IRQ is always sent on the Backplane */ +#define SSB_TMERRLOGA 0x0F48 +#define SSB_TMERRLOG 0x0F50 +#define SSB_ADMATCH3 0x0F60 +#define SSB_ADMATCH2 0x0F68 +#define SSB_ADMATCH1 0x0F70 +#define SSB_IMSTATE 0x0F90 /* SB Initiator Agent State */ +#define SSB_IMSTATE_PC 0x0000000f /* Pipe Count */ +#define SSB_IMSTATE_AP_MASK 0x00000030 /* Arbitration Priority */ +#define SSB_IMSTATE_AP_BOTH 0x00000000 /* Use both timeslices and token */ +#define SSB_IMSTATE_AP_TS 0x00000010 /* Use timeslices only */ +#define SSB_IMSTATE_AP_TK 0x00000020 /* Use token only */ +#define SSB_IMSTATE_AP_RSV 0x00000030 /* Reserved */ +#define SSB_IMSTATE_IBE 0x00020000 /* In Band Error */ +#define SSB_IMSTATE_TO 0x00040000 /* Timeout */ +#define SSB_INTVEC 0x0F94 /* SB Interrupt Mask */ +#define SSB_INTVEC_PCI 0x00000001 /* Enable interrupts for PCI */ +#define SSB_INTVEC_ENET0 0x00000002 /* Enable interrupts for enet 0 */ +#define SSB_INTVEC_ILINE20 0x00000004 /* Enable interrupts for iline20 */ +#define SSB_INTVEC_CODEC 0x00000008 /* Enable interrupts for v90 codec */ +#define SSB_INTVEC_USB 0x00000010 /* Enable interrupts for usb */ +#define SSB_INTVEC_EXTIF 0x00000020 /* Enable interrupts for external i/f */ +#define SSB_INTVEC_ENET1 0x00000040 /* Enable interrupts for enet 1 */ +#define SSB_TMSLOW 0x0F98 /* SB Target State Low */ +#define SSB_TMSLOW_RESET 0x00000001 /* Reset */ +#define SSB_TMSLOW_REJECT_22 0x00000002 /* Reject (Backplane rev 2.2) */ +#define SSB_TMSLOW_REJECT_23 0x00000004 /* Reject (Backplane rev 2.3) */ +#define SSB_TMSLOW_CLOCK 0x00010000 /* Clock Enable */ +#define SSB_TMSLOW_FGC 0x00020000 /* Force Gated Clocks On */ +#define SSB_TMSLOW_PE 0x40000000 /* Power Management Enable */ +#define SSB_TMSLOW_BE 0x80000000 /* BIST Enable */ +#define SSB_TMSHIGH 0x0F9C /* SB Target State High */ +#define SSB_TMSHIGH_SERR 0x00000001 /* S-error */ +#define SSB_TMSHIGH_INT 0x00000002 /* Interrupt */ +#define SSB_TMSHIGH_BUSY 0x00000004 /* Busy */ +#define SSB_TMSHIGH_TO 0x00000020 /* Timeout. Backplane rev >= 2.3 only */ +#define SSB_TMSHIGH_COREFL 0x1FFF0000 /* Core specific flags */ +#define SSB_TMSHIGH_COREFL_SHIFT 16 +#define SSB_TMSHIGH_DMA64 0x10000000 /* 64bit DMA supported */ +#define SSB_TMSHIGH_GCR 0x20000000 /* Gated Clock Request */ +#define SSB_TMSHIGH_BISTF 0x40000000 /* BIST Failed */ +#define SSB_TMSHIGH_BISTD 0x80000000 /* BIST Done */ +#define SSB_BWA0 0x0FA0 +#define SSB_IMCFGLO 0x0FA8 +#define SSB_IMCFGLO_SERTO 0x00000007 /* Service timeout */ +#define SSB_IMCFGLO_REQTO 0x00000070 /* Request timeout */ +#define SSB_IMCFGLO_REQTO_SHIFT 4 +#define SSB_IMCFGLO_CONNID 0x00FF0000 /* Connection ID */ +#define SSB_IMCFGLO_CONNID_SHIFT 16 +#define SSB_IMCFGHI 0x0FAC +#define SSB_ADMATCH0 0x0FB0 +#define SSB_TMCFGLO 0x0FB8 +#define SSB_TMCFGHI 0x0FBC +#define SSB_BCONFIG 0x0FC0 +#define SSB_BSTATE 0x0FC8 +#define SSB_ACTCFG 0x0FD8 +#define SSB_FLAGST 0x0FE8 +#define SSB_IDLOW 0x0FF8 +#define SSB_IDLOW_CFGSP 0x00000003 /* Config Space */ +#define SSB_IDLOW_ADDRNGE 0x00000038 /* Address Ranges supported */ +#define SSB_IDLOW_ADDRNGE_SHIFT 3 +#define SSB_IDLOW_SYNC 0x00000040 +#define SSB_IDLOW_INITIATOR 0x00000080 +#define SSB_IDLOW_MIBL 0x00000F00 /* Minimum Backplane latency */ +#define SSB_IDLOW_MIBL_SHIFT 8 +#define SSB_IDLOW_MABL 0x0000F000 /* Maximum Backplane latency */ +#define SSB_IDLOW_MABL_SHIFT 12 +#define SSB_IDLOW_TIF 0x00010000 /* This Initiator is first */ +#define SSB_IDLOW_CCW 0x000C0000 /* Cycle counter width */ +#define SSB_IDLOW_CCW_SHIFT 18 +#define SSB_IDLOW_TPT 0x00F00000 /* Target ports */ +#define SSB_IDLOW_TPT_SHIFT 20 +#define SSB_IDLOW_INITP 0x0F000000 /* Initiator ports */ +#define SSB_IDLOW_INITP_SHIFT 24 +#define SSB_IDLOW_SSBREV 0xF0000000 /* Sonics Backplane Revision code */ +#define SSB_IDLOW_SSBREV_22 0x00000000 /* <= 2.2 */ +#define SSB_IDLOW_SSBREV_23 0x10000000 /* 2.3 */ +#define SSB_IDLOW_SSBREV_24 0x40000000 /* ?? Found in BCM4328 */ +#define SSB_IDLOW_SSBREV_25 0x50000000 /* ?? Not Found yet */ +#define SSB_IDLOW_SSBREV_26 0x60000000 /* ?? Found in some BCM4311/2 */ +#define SSB_IDLOW_SSBREV_27 0x70000000 /* ?? Found in some BCM4311/2 */ +#define SSB_IDHIGH 0x0FFC /* SB Identification High */ +#define SSB_IDHIGH_RCLO 0x0000000F /* Revision Code (low part) */ +#define SSB_IDHIGH_CC 0x00008FF0 /* Core Code */ +#define SSB_IDHIGH_CC_SHIFT 4 +#define SSB_IDHIGH_RCHI 0x00007000 /* Revision Code (high part) */ +#define SSB_IDHIGH_RCHI_SHIFT 8 /* yes, shift 8 is right */ +#define SSB_IDHIGH_VC 0xFFFF0000 /* Vendor Code */ +#define SSB_IDHIGH_VC_SHIFT 16 + +/* SPROM shadow area. If not otherwise noted, fields are + * two bytes wide. Note that the SPROM can _only_ be read + * in two-byte quantities. + */ +#define SSB_SPROMSIZE_WORDS 64 +#define SSB_SPROMSIZE_BYTES (SSB_SPROMSIZE_WORDS * sizeof(u16)) +#define SSB_SPROMSIZE_WORDS_R123 64 +#define SSB_SPROMSIZE_WORDS_R4 220 +#define SSB_SPROMSIZE_BYTES_R123 (SSB_SPROMSIZE_WORDS_R123 * sizeof(u16)) +#define SSB_SPROMSIZE_BYTES_R4 (SSB_SPROMSIZE_WORDS_R4 * sizeof(u16)) +#define SSB_SPROM_BASE 0x1000 +#define SSB_SPROM_REVISION 0x107E +#define SSB_SPROM_REVISION_REV 0x00FF /* SPROM Revision number */ +#define SSB_SPROM_REVISION_CRC 0xFF00 /* SPROM CRC8 value */ +#define SSB_SPROM_REVISION_CRC_SHIFT 8 + +/* SPROM Revision 1 */ +#define SSB_SPROM1_SPID 0x1004 /* Subsystem Product ID for PCI */ +#define SSB_SPROM1_SVID 0x1006 /* Subsystem Vendor ID for PCI */ +#define SSB_SPROM1_PID 0x1008 /* Product ID for PCI */ +#define SSB_SPROM1_IL0MAC 0x1048 /* 6 bytes MAC address for 802.11b/g */ +#define SSB_SPROM1_ET0MAC 0x104E /* 6 bytes MAC address for Ethernet */ +#define SSB_SPROM1_ET1MAC 0x1054 /* 6 bytes MAC address for 802.11a */ +#define SSB_SPROM1_ETHPHY 0x105A /* Ethernet PHY settings */ +#define SSB_SPROM1_ETHPHY_ET0A 0x001F /* MII Address for enet0 */ +#define SSB_SPROM1_ETHPHY_ET1A 0x03E0 /* MII Address for enet1 */ +#define SSB_SPROM1_ETHPHY_ET1A_SHIFT 5 +#define SSB_SPROM1_ETHPHY_ET0M (1<<14) /* MDIO for enet0 */ +#define SSB_SPROM1_ETHPHY_ET1M (1<<15) /* MDIO for enet1 */ +#define SSB_SPROM1_BINF 0x105C /* Board info */ +#define SSB_SPROM1_BINF_BREV 0x00FF /* Board Revision */ +#define SSB_SPROM1_BINF_CCODE 0x0F00 /* Country Code */ +#define SSB_SPROM1_BINF_CCODE_SHIFT 8 +#define SSB_SPROM1_BINF_ANTBG 0x3000 /* Available B-PHY and G-PHY antennas */ +#define SSB_SPROM1_BINF_ANTBG_SHIFT 12 +#define SSB_SPROM1_BINF_ANTA 0xC000 /* Available A-PHY antennas */ +#define SSB_SPROM1_BINF_ANTA_SHIFT 14 +#define SSB_SPROM1_PA0B0 0x105E +#define SSB_SPROM1_PA0B1 0x1060 +#define SSB_SPROM1_PA0B2 0x1062 +#define SSB_SPROM1_GPIOA 0x1064 /* General Purpose IO pins 0 and 1 */ +#define SSB_SPROM1_GPIOA_P0 0x00FF /* Pin 0 */ +#define SSB_SPROM1_GPIOA_P1 0xFF00 /* Pin 1 */ +#define SSB_SPROM1_GPIOA_P1_SHIFT 8 +#define SSB_SPROM1_GPIOB 0x1066 /* General Purpuse IO pins 2 and 3 */ +#define SSB_SPROM1_GPIOB_P2 0x00FF /* Pin 2 */ +#define SSB_SPROM1_GPIOB_P3 0xFF00 /* Pin 3 */ +#define SSB_SPROM1_GPIOB_P3_SHIFT 8 +#define SSB_SPROM1_MAXPWR 0x1068 /* Power Amplifier Max Power */ +#define SSB_SPROM1_MAXPWR_BG 0x00FF /* B-PHY and G-PHY (in dBm Q5.2) */ +#define SSB_SPROM1_MAXPWR_A 0xFF00 /* A-PHY (in dBm Q5.2) */ +#define SSB_SPROM1_MAXPWR_A_SHIFT 8 +#define SSB_SPROM1_PA1B0 0x106A +#define SSB_SPROM1_PA1B1 0x106C +#define SSB_SPROM1_PA1B2 0x106E +#define SSB_SPROM1_ITSSI 0x1070 /* Idle TSSI Target */ +#define SSB_SPROM1_ITSSI_BG 0x00FF /* B-PHY and G-PHY*/ +#define SSB_SPROM1_ITSSI_A 0xFF00 /* A-PHY */ +#define SSB_SPROM1_ITSSI_A_SHIFT 8 +#define SSB_SPROM1_BFLLO 0x1072 /* Boardflags (low 16 bits) */ +#define SSB_SPROM1_AGAIN 0x1074 /* Antenna Gain (in dBm Q5.2) */ +#define SSB_SPROM1_AGAIN_BG 0x00FF /* B-PHY and G-PHY */ +#define SSB_SPROM1_AGAIN_BG_SHIFT 0 +#define SSB_SPROM1_AGAIN_A 0xFF00 /* A-PHY */ +#define SSB_SPROM1_AGAIN_A_SHIFT 8 + +/* SPROM Revision 2 (inherits from rev 1) */ +#define SSB_SPROM2_BFLHI 0x1038 /* Boardflags (high 16 bits) */ +#define SSB_SPROM2_MAXP_A 0x103A /* A-PHY Max Power */ +#define SSB_SPROM2_MAXP_A_HI 0x00FF /* Max Power High */ +#define SSB_SPROM2_MAXP_A_LO 0xFF00 /* Max Power Low */ +#define SSB_SPROM2_MAXP_A_LO_SHIFT 8 +#define SSB_SPROM2_PA1LOB0 0x103C /* A-PHY PowerAmplifier Low Settings */ +#define SSB_SPROM2_PA1LOB1 0x103E /* A-PHY PowerAmplifier Low Settings */ +#define SSB_SPROM2_PA1LOB2 0x1040 /* A-PHY PowerAmplifier Low Settings */ +#define SSB_SPROM2_PA1HIB0 0x1042 /* A-PHY PowerAmplifier High Settings */ +#define SSB_SPROM2_PA1HIB1 0x1044 /* A-PHY PowerAmplifier High Settings */ +#define SSB_SPROM2_PA1HIB2 0x1046 /* A-PHY PowerAmplifier High Settings */ +#define SSB_SPROM2_OPO 0x1078 /* OFDM Power Offset from CCK Level */ +#define SSB_SPROM2_OPO_VALUE 0x00FF +#define SSB_SPROM2_OPO_UNUSED 0xFF00 +#define SSB_SPROM2_CCODE 0x107C /* Two char Country Code */ + +/* SPROM Revision 3 (inherits most data from rev 2) */ +#define SSB_SPROM3_IL0MAC 0x104A /* 6 bytes MAC address for 802.11b/g */ +#define SSB_SPROM3_OFDMAPO 0x102C /* A-PHY OFDM Mid Power Offset (4 bytes, BigEndian) */ +#define SSB_SPROM3_OFDMALPO 0x1030 /* A-PHY OFDM Low Power Offset (4 bytes, BigEndian) */ +#define SSB_SPROM3_OFDMAHPO 0x1034 /* A-PHY OFDM High Power Offset (4 bytes, BigEndian) */ +#define SSB_SPROM3_GPIOLDC 0x1042 /* GPIO LED Powersave Duty Cycle (4 bytes, BigEndian) */ +#define SSB_SPROM3_GPIOLDC_OFF 0x0000FF00 /* Off Count */ +#define SSB_SPROM3_GPIOLDC_OFF_SHIFT 8 +#define SSB_SPROM3_GPIOLDC_ON 0x00FF0000 /* On Count */ +#define SSB_SPROM3_GPIOLDC_ON_SHIFT 16 +#define SSB_SPROM3_CCKPO 0x1078 /* CCK Power Offset */ +#define SSB_SPROM3_CCKPO_1M 0x000F /* 1M Rate PO */ +#define SSB_SPROM3_CCKPO_2M 0x00F0 /* 2M Rate PO */ +#define SSB_SPROM3_CCKPO_2M_SHIFT 4 +#define SSB_SPROM3_CCKPO_55M 0x0F00 /* 5.5M Rate PO */ +#define SSB_SPROM3_CCKPO_55M_SHIFT 8 +#define SSB_SPROM3_CCKPO_11M 0xF000 /* 11M Rate PO */ +#define SSB_SPROM3_CCKPO_11M_SHIFT 12 +#define SSB_SPROM3_OFDMGPO 0x107A /* G-PHY OFDM Power Offset (4 bytes, BigEndian) */ + +/* SPROM Revision 4 */ +#define SSB_SPROM4_IL0MAC 0x104C /* 6 byte MAC address for a/b/g/n */ +#define SSB_SPROM4_ETHPHY 0x105A /* Ethernet PHY settings ?? */ +#define SSB_SPROM4_ETHPHY_ET0A 0x001F /* MII Address for enet0 */ +#define SSB_SPROM4_ETHPHY_ET1A 0x03E0 /* MII Address for enet1 */ +#define SSB_SPROM4_ETHPHY_ET1A_SHIFT 5 +#define SSB_SPROM4_ETHPHY_ET0M (1<<14) /* MDIO for enet0 */ +#define SSB_SPROM4_ETHPHY_ET1M (1<<15) /* MDIO for enet1 */ +#define SSB_SPROM4_CCODE 0x1052 /* Country Code (2 bytes) */ +#define SSB_SPROM4_ANTAVAIL 0x105D /* Antenna available bitfields */ +#define SSB_SPROM4_ANTAVAIL_A 0x00FF /* A-PHY bitfield */ +#define SSB_SPROM4_ANTAVAIL_A_SHIFT 0 +#define SSB_SPROM4_ANTAVAIL_BG 0xFF00 /* B-PHY and G-PHY bitfield */ +#define SSB_SPROM4_ANTAVAIL_BG_SHIFT 8 +#define SSB_SPROM4_BFLLO 0x1044 /* Boardflags (low 16 bits) */ +#define SSB_SPROM4_AGAIN01 0x105E /* Antenna Gain (in dBm Q5.2) */ +#define SSB_SPROM4_AGAIN0 0x00FF /* Antenna 0 */ +#define SSB_SPROM4_AGAIN0_SHIFT 0 +#define SSB_SPROM4_AGAIN1 0xFF00 /* Antenna 1 */ +#define SSB_SPROM4_AGAIN1_SHIFT 8 +#define SSB_SPROM4_AGAIN23 0x1060 +#define SSB_SPROM4_AGAIN2 0x00FF /* Antenna 2 */ +#define SSB_SPROM4_AGAIN2_SHIFT 0 +#define SSB_SPROM4_AGAIN3 0xFF00 /* Antenna 3 */ +#define SSB_SPROM4_AGAIN3_SHIFT 8 +#define SSB_SPROM4_BFLHI 0x1046 /* Board Flags Hi */ +#define SSB_SPROM4_MAXP_BG 0x1080 /* Max Power BG in path 1 */ +#define SSB_SPROM4_MAXP_BG_MASK 0x00FF /* Mask for Max Power BG */ +#define SSB_SPROM4_ITSSI_BG 0xFF00 /* Mask for path 1 itssi_bg */ +#define SSB_SPROM4_ITSSI_BG_SHIFT 8 +#define SSB_SPROM4_MAXP_A 0x108A /* Max Power A in path 1 */ +#define SSB_SPROM4_MAXP_A_MASK 0x00FF /* Mask for Max Power A */ +#define SSB_SPROM4_ITSSI_A 0xFF00 /* Mask for path 1 itssi_a */ +#define SSB_SPROM4_ITSSI_A_SHIFT 8 +#define SSB_SPROM4_GPIOA 0x1056 /* Gen. Purpose IO # 0 and 1 */ +#define SSB_SPROM4_GPIOA_P0 0x00FF /* Pin 0 */ +#define SSB_SPROM4_GPIOA_P1 0xFF00 /* Pin 1 */ +#define SSB_SPROM4_GPIOA_P1_SHIFT 8 +#define SSB_SPROM4_GPIOB 0x1058 /* Gen. Purpose IO # 2 and 3 */ +#define SSB_SPROM4_GPIOB_P2 0x00FF /* Pin 2 */ +#define SSB_SPROM4_GPIOB_P3 0xFF00 /* Pin 3 */ +#define SSB_SPROM4_GPIOB_P3_SHIFT 8 +#define SSB_SPROM4_PA0B0 0x1082 /* The paXbY locations are */ +#define SSB_SPROM4_PA0B1 0x1084 /* only guesses */ +#define SSB_SPROM4_PA0B2 0x1086 +#define SSB_SPROM4_PA1B0 0x108E +#define SSB_SPROM4_PA1B1 0x1090 +#define SSB_SPROM4_PA1B2 0x1092 + +/* SPROM Revision 5 (inherits most data from rev 4) */ +#define SSB_SPROM5_BFLLO 0x104A /* Boardflags (low 16 bits) */ +#define SSB_SPROM5_BFLHI 0x104C /* Board Flags Hi */ +#define SSB_SPROM5_IL0MAC 0x1052 /* 6 byte MAC address for a/b/g/n */ +#define SSB_SPROM5_CCODE 0x1044 /* Country Code (2 bytes) */ +#define SSB_SPROM5_GPIOA 0x1076 /* Gen. Purpose IO # 0 and 1 */ +#define SSB_SPROM5_GPIOA_P0 0x00FF /* Pin 0 */ +#define SSB_SPROM5_GPIOA_P1 0xFF00 /* Pin 1 */ +#define SSB_SPROM5_GPIOA_P1_SHIFT 8 +#define SSB_SPROM5_GPIOB 0x1078 /* Gen. Purpose IO # 2 and 3 */ +#define SSB_SPROM5_GPIOB_P2 0x00FF /* Pin 2 */ +#define SSB_SPROM5_GPIOB_P3 0xFF00 /* Pin 3 */ +#define SSB_SPROM5_GPIOB_P3_SHIFT 8 + +/* SPROM Revision 8 */ +#define SSB_SPROM8_BOARDREV 0x1082 /* Board revision */ +#define SSB_SPROM8_BFLLO 0x1084 /* Board flags (bits 0-15) */ +#define SSB_SPROM8_BFLHI 0x1086 /* Board flags (bits 16-31) */ +#define SSB_SPROM8_BFL2LO 0x1088 /* Board flags (bits 32-47) */ +#define SSB_SPROM8_BFL2HI 0x108A /* Board flags (bits 48-63) */ +#define SSB_SPROM8_IL0MAC 0x108C /* 6 byte MAC address */ +#define SSB_SPROM8_CCODE 0x1092 /* 2 byte country code */ +#define SSB_SPROM8_ANTAVAIL 0x109C /* Antenna available bitfields*/ +#define SSB_SPROM8_ANTAVAIL_A 0xFF00 /* A-PHY bitfield */ +#define SSB_SPROM8_ANTAVAIL_A_SHIFT 8 +#define SSB_SPROM8_ANTAVAIL_BG 0x00FF /* B-PHY and G-PHY bitfield */ +#define SSB_SPROM8_ANTAVAIL_BG_SHIFT 0 +#define SSB_SPROM8_AGAIN01 0x109E /* Antenna Gain (in dBm Q5.2) */ +#define SSB_SPROM8_AGAIN0 0x00FF /* Antenna 0 */ +#define SSB_SPROM8_AGAIN0_SHIFT 0 +#define SSB_SPROM8_AGAIN1 0xFF00 /* Antenna 1 */ +#define SSB_SPROM8_AGAIN1_SHIFT 8 +#define SSB_SPROM8_AGAIN23 0x10A0 +#define SSB_SPROM8_AGAIN2 0x00FF /* Antenna 2 */ +#define SSB_SPROM8_AGAIN2_SHIFT 0 +#define SSB_SPROM8_AGAIN3 0xFF00 /* Antenna 3 */ +#define SSB_SPROM8_AGAIN3_SHIFT 8 +#define SSB_SPROM8_GPIOA 0x1096 /*Gen. Purpose IO # 0 and 1 */ +#define SSB_SPROM8_GPIOA_P0 0x00FF /* Pin 0 */ +#define SSB_SPROM8_GPIOA_P1 0xFF00 /* Pin 1 */ +#define SSB_SPROM8_GPIOA_P1_SHIFT 8 +#define SSB_SPROM8_GPIOB 0x1098 /* Gen. Purpose IO # 2 and 3 */ +#define SSB_SPROM8_GPIOB_P2 0x00FF /* Pin 2 */ +#define SSB_SPROM8_GPIOB_P3 0xFF00 /* Pin 3 */ +#define SSB_SPROM8_GPIOB_P3_SHIFT 8 +#define SSB_SPROM8_RSSIPARM2G 0x10A4 /* RSSI params for 2GHz */ +#define SSB_SPROM8_RSSISMF2G 0x000F +#define SSB_SPROM8_RSSISMC2G 0x00F0 +#define SSB_SPROM8_RSSISMC2G_SHIFT 4 +#define SSB_SPROM8_RSSISAV2G 0x0700 +#define SSB_SPROM8_RSSISAV2G_SHIFT 8 +#define SSB_SPROM8_BXA2G 0x1800 +#define SSB_SPROM8_BXA2G_SHIFT 11 +#define SSB_SPROM8_RSSIPARM5G 0x10A6 /* RSSI params for 5GHz */ +#define SSB_SPROM8_RSSISMF5G 0x000F +#define SSB_SPROM8_RSSISMC5G 0x00F0 +#define SSB_SPROM8_RSSISMC5G_SHIFT 4 +#define SSB_SPROM8_RSSISAV5G 0x0700 +#define SSB_SPROM8_RSSISAV5G_SHIFT 8 +#define SSB_SPROM8_BXA5G 0x1800 +#define SSB_SPROM8_BXA5G_SHIFT 11 +#define SSB_SPROM8_TRI25G 0x10A8 /* TX isolation 2.4&5.3GHz */ +#define SSB_SPROM8_TRI2G 0x00FF /* TX isolation 2.4GHz */ +#define SSB_SPROM8_TRI5G 0xFF00 /* TX isolation 5.3GHz */ +#define SSB_SPROM8_TRI5G_SHIFT 8 +#define SSB_SPROM8_TRI5GHL 0x10AA /* TX isolation 5.2/5.8GHz */ +#define SSB_SPROM8_TRI5GL 0x00FF /* TX isolation 5.2GHz */ +#define SSB_SPROM8_TRI5GH 0xFF00 /* TX isolation 5.8GHz */ +#define SSB_SPROM8_TRI5GH_SHIFT 8 +#define SSB_SPROM8_RXPO 0x10AC /* RX power offsets */ +#define SSB_SPROM8_RXPO2G 0x00FF /* 2GHz RX power offset */ +#define SSB_SPROM8_RXPO5G 0xFF00 /* 5GHz RX power offset */ +#define SSB_SPROM8_RXPO5G_SHIFT 8 +#define SSB_SPROM8_MAXP_BG 0x10C0 /* Max Power 2GHz in path 1 */ +#define SSB_SPROM8_MAXP_BG_MASK 0x00FF /* Mask for Max Power 2GHz */ +#define SSB_SPROM8_ITSSI_BG 0xFF00 /* Mask for path 1 itssi_bg */ +#define SSB_SPROM8_ITSSI_BG_SHIFT 8 +#define SSB_SPROM8_PA0B0 0x10C2 /* 2GHz power amp settings */ +#define SSB_SPROM8_PA0B1 0x10C4 +#define SSB_SPROM8_PA0B2 0x10C6 +#define SSB_SPROM8_MAXP_A 0x10C8 /* Max Power 5.3GHz */ +#define SSB_SPROM8_MAXP_A_MASK 0x00FF /* Mask for Max Power 5.3GHz */ +#define SSB_SPROM8_ITSSI_A 0xFF00 /* Mask for path 1 itssi_a */ +#define SSB_SPROM8_ITSSI_A_SHIFT 8 +#define SSB_SPROM8_MAXP_AHL 0x10CA /* Max Power 5.2/5.8GHz */ +#define SSB_SPROM8_MAXP_AH_MASK 0x00FF /* Mask for Max Power 5.8GHz */ +#define SSB_SPROM8_MAXP_AL_MASK 0xFF00 /* Mask for Max Power 5.2GHz */ +#define SSB_SPROM8_MAXP_AL_SHIFT 8 +#define SSB_SPROM8_PA1B0 0x10CC /* 5.3GHz power amp settings */ +#define SSB_SPROM8_PA1B1 0x10CE +#define SSB_SPROM8_PA1B2 0x10D0 +#define SSB_SPROM8_PA1LOB0 0x10D2 /* 5.2GHz power amp settings */ +#define SSB_SPROM8_PA1LOB1 0x10D4 +#define SSB_SPROM8_PA1LOB2 0x10D6 +#define SSB_SPROM8_PA1HIB0 0x10D8 /* 5.8GHz power amp settings */ +#define SSB_SPROM8_PA1HIB1 0x10DA +#define SSB_SPROM8_PA1HIB2 0x10DC +#define SSB_SPROM8_CCK2GPO 0x1140 /* CCK power offset */ +#define SSB_SPROM8_OFDM2GPO 0x1142 /* 2.4GHz OFDM power offset */ +#define SSB_SPROM8_OFDM5GPO 0x1146 /* 5.3GHz OFDM power offset */ +#define SSB_SPROM8_OFDM5GLPO 0x114A /* 5.2GHz OFDM power offset */ +#define SSB_SPROM8_OFDM5GHPO 0x114E /* 5.8GHz OFDM power offset */ + +/* Values for SSB_SPROM1_BINF_CCODE */ +enum { + SSB_SPROM1CCODE_WORLD = 0, + SSB_SPROM1CCODE_THAILAND, + SSB_SPROM1CCODE_ISRAEL, + SSB_SPROM1CCODE_JORDAN, + SSB_SPROM1CCODE_CHINA, + SSB_SPROM1CCODE_JAPAN, + SSB_SPROM1CCODE_USA_CANADA_ANZ, + SSB_SPROM1CCODE_EUROPE, + SSB_SPROM1CCODE_USA_LOW, + SSB_SPROM1CCODE_JAPAN_HIGH, + SSB_SPROM1CCODE_ALL, + SSB_SPROM1CCODE_NONE, +}; + +/* Address-Match values and masks (SSB_ADMATCHxxx) */ +#define SSB_ADM_TYPE 0x00000003 /* Address type */ +#define SSB_ADM_TYPE0 0 +#define SSB_ADM_TYPE1 1 +#define SSB_ADM_TYPE2 2 +#define SSB_ADM_AD64 0x00000004 +#define SSB_ADM_SZ0 0x000000F8 /* Type0 size */ +#define SSB_ADM_SZ0_SHIFT 3 +#define SSB_ADM_SZ1 0x000001F8 /* Type1 size */ +#define SSB_ADM_SZ1_SHIFT 3 +#define SSB_ADM_SZ2 0x000001F8 /* Type2 size */ +#define SSB_ADM_SZ2_SHIFT 3 +#define SSB_ADM_EN 0x00000400 /* Enable */ +#define SSB_ADM_NEG 0x00000800 /* Negative decode */ +#define SSB_ADM_BASE0 0xFFFFFF00 /* Type0 base address */ +#define SSB_ADM_BASE0_SHIFT 8 +#define SSB_ADM_BASE1 0xFFFFF000 /* Type1 base address for the core */ +#define SSB_ADM_BASE1_SHIFT 12 +#define SSB_ADM_BASE2 0xFFFF0000 /* Type2 base address for the core */ +#define SSB_ADM_BASE2_SHIFT 16 + + +#endif /* LINUX_SSB_REGS_H_ */ diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h new file mode 100644 index 0000000..99c1b4d --- /dev/null +++ b/include/linux/unaligned/access_ok.h @@ -0,0 +1,67 @@ +#ifndef _LINUX_UNALIGNED_ACCESS_OK_H +#define _LINUX_UNALIGNED_ACCESS_OK_H + +#include +#include + +static inline u16 get_unaligned_le16(const void *p) +{ + return le16_to_cpup((__le16 *)p); +} + +static inline u32 get_unaligned_le32(const void *p) +{ + return le32_to_cpup((__le32 *)p); +} + +static inline u64 get_unaligned_le64(const void *p) +{ + return le64_to_cpup((__le64 *)p); +} + +static inline u16 get_unaligned_be16(const void *p) +{ + return be16_to_cpup((__be16 *)p); +} + +static inline u32 get_unaligned_be32(const void *p) +{ + return be32_to_cpup((__be32 *)p); +} + +static inline u64 get_unaligned_be64(const void *p) +{ + return be64_to_cpup((__be64 *)p); +} + +static inline void put_unaligned_le16(u16 val, void *p) +{ + *((__le16 *)p) = cpu_to_le16(val); +} + +static inline void put_unaligned_le32(u32 val, void *p) +{ + *((__le32 *)p) = cpu_to_le32(val); +} + +static inline void put_unaligned_le64(u64 val, void *p) +{ + *((__le64 *)p) = cpu_to_le64(val); +} + +static inline void put_unaligned_be16(u16 val, void *p) +{ + *((__be16 *)p) = cpu_to_be16(val); +} + +static inline void put_unaligned_be32(u32 val, void *p) +{ + *((__be32 *)p) = cpu_to_be32(val); +} + +static inline void put_unaligned_be64(u64 val, void *p) +{ + *((__be64 *)p) = cpu_to_be64(val); +} + +#endif /* _LINUX_UNALIGNED_ACCESS_OK_H */ diff --git a/include/linux/unaligned/be_byteshift.h b/include/linux/unaligned/be_byteshift.h new file mode 100644 index 0000000..9356b24 --- /dev/null +++ b/include/linux/unaligned/be_byteshift.h @@ -0,0 +1,70 @@ +#ifndef _LINUX_UNALIGNED_BE_BYTESHIFT_H +#define _LINUX_UNALIGNED_BE_BYTESHIFT_H + +#include + +static inline u16 __get_unaligned_be16(const u8 *p) +{ + return p[0] << 8 | p[1]; +} + +static inline u32 __get_unaligned_be32(const u8 *p) +{ + return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3]; +} + +static inline u64 __get_unaligned_be64(const u8 *p) +{ + return (u64)__get_unaligned_be32(p) << 32 | + __get_unaligned_be32(p + 4); +} + +static inline void __put_unaligned_be16(u16 val, u8 *p) +{ + *p++ = val >> 8; + *p++ = val; +} + +static inline void __put_unaligned_be32(u32 val, u8 *p) +{ + __put_unaligned_be16(val >> 16, p); + __put_unaligned_be16(val, p + 2); +} + +static inline void __put_unaligned_be64(u64 val, u8 *p) +{ + __put_unaligned_be32(val >> 32, p); + __put_unaligned_be32(val, p + 4); +} + +static inline u16 get_unaligned_be16(const void *p) +{ + return __get_unaligned_be16((const u8 *)p); +} + +static inline u32 get_unaligned_be32(const void *p) +{ + return __get_unaligned_be32((const u8 *)p); +} + +static inline u64 get_unaligned_be64(const void *p) +{ + return __get_unaligned_be64((const u8 *)p); +} + +static inline void put_unaligned_be16(u16 val, void *p) +{ + __put_unaligned_be16(val, p); +} + +static inline void put_unaligned_be32(u32 val, void *p) +{ + __put_unaligned_be32(val, p); +} + +static inline void put_unaligned_be64(u64 val, void *p) +{ + __put_unaligned_be64(val, p); +} + +#endif /* _LINUX_UNALIGNED_BE_BYTESHIFT_H */ diff --git a/include/linux/unaligned/be_memmove.h b/include/linux/unaligned/be_memmove.h new file mode 100644 index 0000000..c2a76c5 --- /dev/null +++ b/include/linux/unaligned/be_memmove.h @@ -0,0 +1,36 @@ +#ifndef _LINUX_UNALIGNED_BE_MEMMOVE_H +#define _LINUX_UNALIGNED_BE_MEMMOVE_H + +#include + +static inline u16 get_unaligned_be16(const void *p) +{ + return __get_unaligned_memmove16((const u8 *)p); +} + +static inline u32 get_unaligned_be32(const void *p) +{ + return __get_unaligned_memmove32((const u8 *)p); +} + +static inline u64 get_unaligned_be64(const void *p) +{ + return __get_unaligned_memmove64((const u8 *)p); +} + +static inline void put_unaligned_be16(u16 val, void *p) +{ + __put_unaligned_memmove16(val, p); +} + +static inline void put_unaligned_be32(u32 val, void *p) +{ + __put_unaligned_memmove32(val, p); +} + +static inline void put_unaligned_be64(u64 val, void *p) +{ + __put_unaligned_memmove64(val, p); +} + +#endif /* _LINUX_UNALIGNED_LE_MEMMOVE_H */ diff --git a/include/linux/unaligned/be_struct.h b/include/linux/unaligned/be_struct.h new file mode 100644 index 0000000..1324158 --- /dev/null +++ b/include/linux/unaligned/be_struct.h @@ -0,0 +1,36 @@ +#ifndef _LINUX_UNALIGNED_BE_STRUCT_H +#define _LINUX_UNALIGNED_BE_STRUCT_H + +#include + +static inline u16 get_unaligned_be16(const void *p) +{ + return __get_unaligned_cpu16((const u8 *)p); +} + +static inline u32 get_unaligned_be32(const void *p) +{ + return __get_unaligned_cpu32((const u8 *)p); +} + +static inline u64 get_unaligned_be64(const void *p) +{ + return __get_unaligned_cpu64((const u8 *)p); +} + +static inline void put_unaligned_be16(u16 val, void *p) +{ + __put_unaligned_cpu16(val, p); +} + +static inline void put_unaligned_be32(u32 val, void *p) +{ + __put_unaligned_cpu32(val, p); +} + +static inline void put_unaligned_be64(u64 val, void *p) +{ + __put_unaligned_cpu64(val, p); +} + +#endif /* _LINUX_UNALIGNED_BE_STRUCT_H */ diff --git a/include/linux/unaligned/generic.h b/include/linux/unaligned/generic.h new file mode 100644 index 0000000..02d97ff --- /dev/null +++ b/include/linux/unaligned/generic.h @@ -0,0 +1,68 @@ +#ifndef _LINUX_UNALIGNED_GENERIC_H +#define _LINUX_UNALIGNED_GENERIC_H + +/* + * Cause a link-time error if we try an unaligned access other than + * 1,2,4 or 8 bytes long + */ +extern void __bad_unaligned_access_size(void); + +#define __get_unaligned_le(ptr) ((__force typeof(*(ptr)))({ \ + __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \ + __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_le16((ptr)), \ + __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_le32((ptr)), \ + __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_le64((ptr)), \ + __bad_unaligned_access_size())))); \ + })) + +#define __get_unaligned_be(ptr) ((__force typeof(*(ptr)))({ \ + __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \ + __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_be16((ptr)), \ + __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_be32((ptr)), \ + __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_be64((ptr)), \ + __bad_unaligned_access_size())))); \ + })) + +#define __put_unaligned_le(val, ptr) ({ \ + void *__gu_p = (ptr); \ + switch (sizeof(*(ptr))) { \ + case 1: \ + *(u8 *)__gu_p = (__force u8)(val); \ + break; \ + case 2: \ + put_unaligned_le16((__force u16)(val), __gu_p); \ + break; \ + case 4: \ + put_unaligned_le32((__force u32)(val), __gu_p); \ + break; \ + case 8: \ + put_unaligned_le64((__force u64)(val), __gu_p); \ + break; \ + default: \ + __bad_unaligned_access_size(); \ + break; \ + } \ + (void)0; }) + +#define __put_unaligned_be(val, ptr) ({ \ + void *__gu_p = (ptr); \ + switch (sizeof(*(ptr))) { \ + case 1: \ + *(u8 *)__gu_p = (__force u8)(val); \ + break; \ + case 2: \ + put_unaligned_be16((__force u16)(val), __gu_p); \ + break; \ + case 4: \ + put_unaligned_be32((__force u32)(val), __gu_p); \ + break; \ + case 8: \ + put_unaligned_be64((__force u64)(val), __gu_p); \ + break; \ + default: \ + __bad_unaligned_access_size(); \ + break; \ + } \ + (void)0; }) + +#endif /* _LINUX_UNALIGNED_GENERIC_H */ diff --git a/include/linux/unaligned/le_byteshift.h b/include/linux/unaligned/le_byteshift.h new file mode 100644 index 0000000..be376fb --- /dev/null +++ b/include/linux/unaligned/le_byteshift.h @@ -0,0 +1,70 @@ +#ifndef _LINUX_UNALIGNED_LE_BYTESHIFT_H +#define _LINUX_UNALIGNED_LE_BYTESHIFT_H + +#include + +static inline u16 __get_unaligned_le16(const u8 *p) +{ + return p[0] | p[1] << 8; +} + +static inline u32 __get_unaligned_le32(const u8 *p) +{ + return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24; +} + +static inline u64 __get_unaligned_le64(const u8 *p) +{ + return (u64)__get_unaligned_le32(p + 4) << 32 | + __get_unaligned_le32(p); +} + +static inline void __put_unaligned_le16(u16 val, u8 *p) +{ + *p++ = val; + *p++ = val >> 8; +} + +static inline void __put_unaligned_le32(u32 val, u8 *p) +{ + __put_unaligned_le16(val >> 16, p + 2); + __put_unaligned_le16(val, p); +} + +static inline void __put_unaligned_le64(u64 val, u8 *p) +{ + __put_unaligned_le32(val >> 32, p + 4); + __put_unaligned_le32(val, p); +} + +static inline u16 get_unaligned_le16(const void *p) +{ + return __get_unaligned_le16((const u8 *)p); +} + +static inline u32 get_unaligned_le32(const void *p) +{ + return __get_unaligned_le32((const u8 *)p); +} + +static inline u64 get_unaligned_le64(const void *p) +{ + return __get_unaligned_le64((const u8 *)p); +} + +static inline void put_unaligned_le16(u16 val, void *p) +{ + __put_unaligned_le16(val, p); +} + +static inline void put_unaligned_le32(u32 val, void *p) +{ + __put_unaligned_le32(val, p); +} + +static inline void put_unaligned_le64(u64 val, void *p) +{ + __put_unaligned_le64(val, p); +} + +#endif /* _LINUX_UNALIGNED_LE_BYTESHIFT_H */ diff --git a/include/linux/unaligned/le_memmove.h b/include/linux/unaligned/le_memmove.h new file mode 100644 index 0000000..269849b --- /dev/null +++ b/include/linux/unaligned/le_memmove.h @@ -0,0 +1,36 @@ +#ifndef _LINUX_UNALIGNED_LE_MEMMOVE_H +#define _LINUX_UNALIGNED_LE_MEMMOVE_H + +#include + +static inline u16 get_unaligned_le16(const void *p) +{ + return __get_unaligned_memmove16((const u8 *)p); +} + +static inline u32 get_unaligned_le32(const void *p) +{ + return __get_unaligned_memmove32((const u8 *)p); +} + +static inline u64 get_unaligned_le64(const void *p) +{ + return __get_unaligned_memmove64((const u8 *)p); +} + +static inline void put_unaligned_le16(u16 val, void *p) +{ + __put_unaligned_memmove16(val, p); +} + +static inline void put_unaligned_le32(u32 val, void *p) +{ + __put_unaligned_memmove32(val, p); +} + +static inline void put_unaligned_le64(u64 val, void *p) +{ + __put_unaligned_memmove64(val, p); +} + +#endif /* _LINUX_UNALIGNED_LE_MEMMOVE_H */ diff --git a/include/linux/unaligned/le_struct.h b/include/linux/unaligned/le_struct.h new file mode 100644 index 0000000..088c457 --- /dev/null +++ b/include/linux/unaligned/le_struct.h @@ -0,0 +1,36 @@ +#ifndef _LINUX_UNALIGNED_LE_STRUCT_H +#define _LINUX_UNALIGNED_LE_STRUCT_H + +#include + +static inline u16 get_unaligned_le16(const void *p) +{ + return __get_unaligned_cpu16((const u8 *)p); +} + +static inline u32 get_unaligned_le32(const void *p) +{ + return __get_unaligned_cpu32((const u8 *)p); +} + +static inline u64 get_unaligned_le64(const void *p) +{ + return __get_unaligned_cpu64((const u8 *)p); +} + +static inline void put_unaligned_le16(u16 val, void *p) +{ + __put_unaligned_cpu16(val, p); +} + +static inline void put_unaligned_le32(u32 val, void *p) +{ + __put_unaligned_cpu32(val, p); +} + +static inline void put_unaligned_le64(u64 val, void *p) +{ + __put_unaligned_cpu64(val, p); +} + +#endif /* _LINUX_UNALIGNED_LE_STRUCT_H */ diff --git a/include/linux/unaligned/memmove.h b/include/linux/unaligned/memmove.h new file mode 100644 index 0000000..eeb5a77 --- /dev/null +++ b/include/linux/unaligned/memmove.h @@ -0,0 +1,45 @@ +#ifndef _LINUX_UNALIGNED_MEMMOVE_H +#define _LINUX_UNALIGNED_MEMMOVE_H + +#include +#include + +/* Use memmove here, so gcc does not insert a __builtin_memcpy. */ + +static inline u16 __get_unaligned_memmove16(const void *p) +{ + u16 tmp; + memmove(&tmp, p, 2); + return tmp; +} + +static inline u32 __get_unaligned_memmove32(const void *p) +{ + u32 tmp; + memmove(&tmp, p, 4); + return tmp; +} + +static inline u64 __get_unaligned_memmove64(const void *p) +{ + u64 tmp; + memmove(&tmp, p, 8); + return tmp; +} + +static inline void __put_unaligned_memmove16(u16 val, void *p) +{ + memmove(p, &val, 2); +} + +static inline void __put_unaligned_memmove32(u32 val, void *p) +{ + memmove(p, &val, 4); +} + +static inline void __put_unaligned_memmove64(u64 val, void *p) +{ + memmove(p, &val, 8); +} + +#endif /* _LINUX_UNALIGNED_MEMMOVE_H */ diff --git a/include/linux/unaligned/packed_struct.h b/include/linux/unaligned/packed_struct.h new file mode 100644 index 0000000..2498bb9 --- /dev/null +++ b/include/linux/unaligned/packed_struct.h @@ -0,0 +1,46 @@ +#ifndef _LINUX_UNALIGNED_PACKED_STRUCT_H +#define _LINUX_UNALIGNED_PACKED_STRUCT_H + +#include + +struct __una_u16 { u16 x __attribute__((packed)); }; +struct __una_u32 { u32 x __attribute__((packed)); }; +struct __una_u64 { u64 x __attribute__((packed)); }; + +static inline u16 __get_unaligned_cpu16(const void *p) +{ + const struct __una_u16 *ptr = (const struct __una_u16 *)p; + return ptr->x; +} + +static inline u32 __get_unaligned_cpu32(const void *p) +{ + const struct __una_u32 *ptr = (const struct __una_u32 *)p; + return ptr->x; +} + +static inline u64 __get_unaligned_cpu64(const void *p) +{ + const struct __una_u64 *ptr = (const struct __una_u64 *)p; + return ptr->x; +} + +static inline void __put_unaligned_cpu16(u16 val, void *p) +{ + struct __una_u16 *ptr = (struct __una_u16 *)p; + ptr->x = val; +} + +static inline void __put_unaligned_cpu32(u32 val, void *p) +{ + struct __una_u32 *ptr = (struct __una_u32 *)p; + ptr->x = val; +} + +static inline void __put_unaligned_cpu64(u64 val, void *p) +{ + struct __una_u64 *ptr = (struct __una_u64 *)p; + ptr->x = val; +} + +#endif /* _LINUX_UNALIGNED_PACKED_STRUCT_H */ diff --git a/include/linux/usb/rndis_host.h b/include/linux/usb/rndis_host.h new file mode 100644 index 0000000..1ef1ebc --- /dev/null +++ b/include/linux/usb/rndis_host.h @@ -0,0 +1,270 @@ +/* + * Host Side support for RNDIS Networking Links + * Copyright (C) 2005 by David Brownell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __LINUX_USB_RNDIS_HOST_H +#define __LINUX_USB_RNDIS_HOST_H + +/* + * CONTROL uses CDC "encapsulated commands" with funky notifications. + * - control-out: SEND_ENCAPSULATED + * - interrupt-in: RESPONSE_AVAILABLE + * - control-in: GET_ENCAPSULATED + * + * We'll try to ignore the RESPONSE_AVAILABLE notifications. + * + * REVISIT some RNDIS implementations seem to have curious issues still + * to be resolved. + */ +struct rndis_msg_hdr { + __le32 msg_type; /* RNDIS_MSG_* */ + __le32 msg_len; + // followed by data that varies between messages + __le32 request_id; + __le32 status; + // ... and more +} __attribute__ ((packed)); + +/* MS-Windows uses this strange size, but RNDIS spec says 1024 minimum */ +#define CONTROL_BUFFER_SIZE 1025 + +/* RNDIS defines an (absurdly huge) 10 second control timeout, + * but ActiveSync seems to use a more usual 5 second timeout + * (which matches the USB 2.0 spec). + */ +#define RNDIS_CONTROL_TIMEOUT_MS (5 * 1000) + +#define RNDIS_MSG_COMPLETION cpu_to_le32(0x80000000) + +/* codes for "msg_type" field of rndis messages; + * only the data channel uses packet messages (maybe batched); + * everything else goes on the control channel. + */ +#define RNDIS_MSG_PACKET cpu_to_le32(0x00000001) /* 1-N packets */ +#define RNDIS_MSG_INIT cpu_to_le32(0x00000002) +#define RNDIS_MSG_INIT_C (RNDIS_MSG_INIT|RNDIS_MSG_COMPLETION) +#define RNDIS_MSG_HALT cpu_to_le32(0x00000003) +#define RNDIS_MSG_QUERY cpu_to_le32(0x00000004) +#define RNDIS_MSG_QUERY_C (RNDIS_MSG_QUERY|RNDIS_MSG_COMPLETION) +#define RNDIS_MSG_SET cpu_to_le32(0x00000005) +#define RNDIS_MSG_SET_C (RNDIS_MSG_SET|RNDIS_MSG_COMPLETION) +#define RNDIS_MSG_RESET cpu_to_le32(0x00000006) +#define RNDIS_MSG_RESET_C (RNDIS_MSG_RESET|RNDIS_MSG_COMPLETION) +#define RNDIS_MSG_INDICATE cpu_to_le32(0x00000007) +#define RNDIS_MSG_KEEPALIVE cpu_to_le32(0x00000008) +#define RNDIS_MSG_KEEPALIVE_C (RNDIS_MSG_KEEPALIVE|RNDIS_MSG_COMPLETION) + +/* codes for "status" field of completion messages */ +#define RNDIS_STATUS_SUCCESS cpu_to_le32(0x00000000) +#define RNDIS_STATUS_FAILURE cpu_to_le32(0xc0000001) +#define RNDIS_STATUS_INVALID_DATA cpu_to_le32(0xc0010015) +#define RNDIS_STATUS_NOT_SUPPORTED cpu_to_le32(0xc00000bb) +#define RNDIS_STATUS_MEDIA_CONNECT cpu_to_le32(0x4001000b) +#define RNDIS_STATUS_MEDIA_DISCONNECT cpu_to_le32(0x4001000c) +#define RNDIS_STATUS_MEDIA_SPECIFIC_INDICATION cpu_to_le32(0x40010012) + +/* codes for OID_GEN_PHYSICAL_MEDIUM */ +#define RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED cpu_to_le32(0x00000000) +#define RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN cpu_to_le32(0x00000001) +#define RNDIS_PHYSICAL_MEDIUM_CABLE_MODEM cpu_to_le32(0x00000002) +#define RNDIS_PHYSICAL_MEDIUM_PHONE_LINE cpu_to_le32(0x00000003) +#define RNDIS_PHYSICAL_MEDIUM_POWER_LINE cpu_to_le32(0x00000004) +#define RNDIS_PHYSICAL_MEDIUM_DSL cpu_to_le32(0x00000005) +#define RNDIS_PHYSICAL_MEDIUM_FIBRE_CHANNEL cpu_to_le32(0x00000006) +#define RNDIS_PHYSICAL_MEDIUM_1394 cpu_to_le32(0x00000007) +#define RNDIS_PHYSICAL_MEDIUM_WIRELESS_WAN cpu_to_le32(0x00000008) +#define RNDIS_PHYSICAL_MEDIUM_MAX cpu_to_le32(0x00000009) + +struct rndis_data_hdr { + __le32 msg_type; /* RNDIS_MSG_PACKET */ + __le32 msg_len; // rndis_data_hdr + data_len + pad + __le32 data_offset; // 36 -- right after header + __le32 data_len; // ... real packet size + + __le32 oob_data_offset; // zero + __le32 oob_data_len; // zero + __le32 num_oob; // zero + __le32 packet_data_offset; // zero + + __le32 packet_data_len; // zero + __le32 vc_handle; // zero + __le32 reserved; // zero +} __attribute__ ((packed)); + +struct rndis_init { /* OUT */ + // header and: + __le32 msg_type; /* RNDIS_MSG_INIT */ + __le32 msg_len; // 24 + __le32 request_id; + __le32 major_version; // of rndis (1.0) + __le32 minor_version; + __le32 max_transfer_size; +} __attribute__ ((packed)); + +struct rndis_init_c { /* IN */ + // header and: + __le32 msg_type; /* RNDIS_MSG_INIT_C */ + __le32 msg_len; + __le32 request_id; + __le32 status; + __le32 major_version; // of rndis (1.0) + __le32 minor_version; + __le32 device_flags; + __le32 medium; // zero == 802.3 + __le32 max_packets_per_message; + __le32 max_transfer_size; + __le32 packet_alignment; // max 7; (1< + * Copyright (C) 2003-2005 David Hollis + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __LINUX_USB_USBNET_H +#define __LINUX_USB_USBNET_H + +/* interface from usbnet core to each USB networking link we handle */ +struct usbnet { + /* housekeeping */ + struct usb_device *udev; + struct usb_interface *intf; + struct driver_info *driver_info; + const char *driver_name; + void *driver_priv; + wait_queue_head_t *wait; + struct mutex phy_mutex; + unsigned char suspend_count; + + /* i/o info: pipes etc */ + unsigned in, out; + struct usb_host_endpoint *status; + unsigned maxpacket; + struct timer_list delay; + + /* protocol/interface state */ + struct net_device *net; + int msg_enable; + unsigned long data [5]; + u32 xid; + u32 hard_mtu; /* count any extra framing */ + size_t rx_urb_size; /* size for rx urbs */ + struct mii_if_info mii; + + /* various kinds of pending driver work */ + struct sk_buff_head rxq; + struct sk_buff_head txq; + struct sk_buff_head done; + struct sk_buff_head rxq_pause; + struct urb *interrupt; + struct usb_anchor deferred; + struct tasklet_struct bh; + + struct work_struct kevent; + unsigned long flags; +# define EVENT_TX_HALT 0 +# define EVENT_RX_HALT 1 +# define EVENT_RX_MEMORY 2 +# define EVENT_STS_SPLIT 3 +# define EVENT_LINK_RESET 4 +# define EVENT_RX_PAUSED 5 +# define EVENT_DEV_WAKING 6 +# define EVENT_DEV_ASLEEP 7 +}; + +static inline struct usb_driver *driver_of(struct usb_interface *intf) +{ + return to_usb_driver(intf->dev.driver); +} + +/* interface from the device/framing level "minidriver" to core */ +struct driver_info { + char *description; + + int flags; +/* framing is CDC Ethernet, not writing ZLPs (hw issues), or optionally: */ +#define FLAG_FRAMING_NC 0x0001 /* guard against device dropouts */ +#define FLAG_FRAMING_GL 0x0002 /* genelink batches packets */ +#define FLAG_FRAMING_Z 0x0004 /* zaurus adds a trailer */ +#define FLAG_FRAMING_RN 0x0008 /* RNDIS batches, plus huge header */ + +#define FLAG_NO_SETINT 0x0010 /* device can't set_interface() */ +#define FLAG_ETHER 0x0020 /* maybe use "eth%d" names */ + +#define FLAG_FRAMING_AX 0x0040 /* AX88772/178 packets */ +#define FLAG_WLAN 0x0080 /* use "wlan%d" names */ +#define FLAG_AVOID_UNLINK_URBS 0x0100 /* don't unlink urbs at usbnet_stop() */ +#define FLAG_SEND_ZLP 0x0200 /* hw requires ZLPs are sent */ +#define FLAG_WWAN 0x0400 /* use "wwan%d" names */ + +#define FLAG_LINK_INTR 0x0800 /* updates link (carrier) status */ + + /* init device ... can sleep, or cause probe() failure */ + int (*bind)(struct usbnet *, struct usb_interface *); + + /* cleanup device ... can sleep, but can't fail */ + void (*unbind)(struct usbnet *, struct usb_interface *); + + /* reset device ... can sleep */ + int (*reset)(struct usbnet *); + + /* stop device ... can sleep */ + int (*stop)(struct usbnet *); + + /* see if peer is connected ... can sleep */ + int (*check_connect)(struct usbnet *); + + /* (dis)activate runtime power management */ + int (*manage_power)(struct usbnet *, int); + + /* for status polling */ + void (*status)(struct usbnet *, struct urb *); + + /* link reset handling, called from defer_kevent */ + int (*link_reset)(struct usbnet *); + + /* fixup rx packet (strip framing) */ + int (*rx_fixup)(struct usbnet *dev, struct sk_buff *skb); + + /* fixup tx packet (add framing) */ + struct sk_buff *(*tx_fixup)(struct usbnet *dev, + struct sk_buff *skb, gfp_t flags); + + /* early initialization code, can sleep. This is for minidrivers + * having 'subminidrivers' that need to do extra initialization + * right after minidriver have initialized hardware. */ + int (*early_init)(struct usbnet *dev); + + /* called by minidriver when receiving indication */ + void (*indication)(struct usbnet *dev, void *ind, int indlen); + + /* for new devices, use the descriptor-reading code instead */ + int in; /* rx endpoint */ + int out; /* tx endpoint */ + + unsigned long data; /* Misc driver specific data */ +}; + +/* Minidrivers are just drivers using the "usbnet" core as a powerful + * network-specific subroutine library ... that happens to do pretty + * much everything except custom framing and chip-specific stuff. + */ +extern int usbnet_probe(struct usb_interface *, const struct usb_device_id *); +extern int usbnet_suspend (struct usb_interface *, pm_message_t ); +extern int usbnet_resume (struct usb_interface *); +extern void usbnet_disconnect(struct usb_interface *); + + +/* Drivers that reuse some of the standard USB CDC infrastructure + * (notably, using multiple interfaces according to the CDC + * union descriptor) get some helper code. + */ +struct cdc_state { + struct usb_cdc_header_desc *header; + struct usb_cdc_union_desc *u; + struct usb_cdc_ether_desc *ether; + struct usb_interface *control; + struct usb_interface *data; +}; + +extern int usbnet_generic_cdc_bind (struct usbnet *, struct usb_interface *); +extern void usbnet_cdc_unbind (struct usbnet *, struct usb_interface *); + +/* CDC and RNDIS support the same host-chosen packet filters for IN transfers */ +#define DEFAULT_FILTER (USB_CDC_PACKET_TYPE_BROADCAST \ + |USB_CDC_PACKET_TYPE_ALL_MULTICAST \ + |USB_CDC_PACKET_TYPE_PROMISCUOUS \ + |USB_CDC_PACKET_TYPE_DIRECTED) + + +/* we record the state for each of our queued skbs */ +enum skb_state { + illegal = 0, + tx_start, tx_done, + rx_start, rx_done, rx_cleanup +}; + +struct skb_data { /* skb->cb is one of these */ + struct urb *urb; + struct usbnet *dev; + enum skb_state state; + size_t length; +}; + +extern int usbnet_open (struct net_device *net); +extern int usbnet_stop (struct net_device *net); +extern netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, + struct net_device *net); +extern void usbnet_tx_timeout (struct net_device *net); +extern int usbnet_change_mtu (struct net_device *net, int new_mtu); + +extern int usbnet_get_endpoints(struct usbnet *, struct usb_interface *); +extern int usbnet_get_ethernet_addr(struct usbnet *, int); +extern void usbnet_defer_kevent (struct usbnet *, int); +extern void usbnet_skb_return (struct usbnet *, struct sk_buff *); +extern void usbnet_unlink_rx_urbs(struct usbnet *); + +extern void usbnet_pause_rx(struct usbnet *); +extern void usbnet_resume_rx(struct usbnet *); +extern void usbnet_purge_paused_rxq(struct usbnet *); + +extern int usbnet_get_settings (struct net_device *net, struct ethtool_cmd *cmd); +extern int usbnet_set_settings (struct net_device *net, struct ethtool_cmd *cmd); +extern u32 usbnet_get_link (struct net_device *net); +extern u32 usbnet_get_msglevel (struct net_device *); +extern void usbnet_set_msglevel (struct net_device *, u32); +extern void usbnet_get_drvinfo (struct net_device *, struct ethtool_drvinfo *); +extern int usbnet_nway_reset(struct net_device *net); + +#endif /* __LINUX_USB_USBNET_H */ diff --git a/include/linux/wireless.h b/include/linux/wireless.h new file mode 100644 index 0000000..5b4c6c7 --- /dev/null +++ b/include/linux/wireless.h @@ -0,0 +1,1160 @@ +/* + * This file define a set of standard wireless extensions + * + * Version : 22 16.3.07 + * + * Authors : Jean Tourrilhes - HPL - + * Copyright (c) 1997-2007 Jean Tourrilhes, All Rights Reserved. + */ + +#ifndef _LINUX_WIRELESS_H +#define _LINUX_WIRELESS_H + +/************************** DOCUMENTATION **************************/ +/* + * Initial APIs (1996 -> onward) : + * ----------------------------- + * Basically, the wireless extensions are for now a set of standard ioctl + * call + /proc/net/wireless + * + * The entry /proc/net/wireless give statistics and information on the + * driver. + * This is better than having each driver having its entry because + * its centralised and we may remove the driver module safely. + * + * Ioctl are used to configure the driver and issue commands. This is + * better than command line options of insmod because we may want to + * change dynamically (while the driver is running) some parameters. + * + * The ioctl mechanimsm are copied from standard devices ioctl. + * We have the list of command plus a structure descibing the + * data exchanged... + * Note that to add these ioctl, I was obliged to modify : + * # net/core/dev.c (two place + add include) + * # net/ipv4/af_inet.c (one place + add include) + * + * /proc/net/wireless is a copy of /proc/net/dev. + * We have a structure for data passed from the driver to /proc/net/wireless + * Too add this, I've modified : + * # net/core/dev.c (two other places) + * # include/linux/netdevice.h (one place) + * # include/linux/proc_fs.h (one place) + * + * New driver API (2002 -> onward) : + * ------------------------------- + * This file is only concerned with the user space API and common definitions. + * The new driver API is defined and documented in : + * # include/net/iw_handler.h + * + * Note as well that /proc/net/wireless implementation has now moved in : + * # net/core/wireless.c + * + * Wireless Events (2002 -> onward) : + * -------------------------------- + * Events are defined at the end of this file, and implemented in : + * # net/core/wireless.c + * + * Other comments : + * -------------- + * Do not add here things that are redundant with other mechanisms + * (drivers init, ifconfig, /proc/net/dev, ...) and with are not + * wireless specific. + * + * These wireless extensions are not magic : each driver has to provide + * support for them... + * + * IMPORTANT NOTE : As everything in the kernel, this is very much a + * work in progress. Contact me if you have ideas of improvements... + */ + +/***************************** INCLUDES *****************************/ + +#include /* for __u* and __s* typedefs */ +#include /* for "struct sockaddr" et al */ +#include /* for IFNAMSIZ and co... */ + +/***************************** VERSION *****************************/ +/* + * This constant is used to know the availability of the wireless + * extensions and to know which version of wireless extensions it is + * (there is some stuff that will be added in the future...) + * I just plan to increment with each new version. + */ +#define WIRELESS_EXT 22 + +/* + * Changes : + * + * V2 to V3 + * -------- + * Alan Cox start some incompatibles changes. I've integrated a bit more. + * - Encryption renamed to Encode to avoid US regulation problems + * - Frequency changed from float to struct to avoid problems on old 386 + * + * V3 to V4 + * -------- + * - Add sensitivity + * + * V4 to V5 + * -------- + * - Missing encoding definitions in range + * - Access points stuff + * + * V5 to V6 + * -------- + * - 802.11 support (ESSID ioctls) + * + * V6 to V7 + * -------- + * - define IW_ESSID_MAX_SIZE and IW_MAX_AP + * + * V7 to V8 + * -------- + * - Changed my e-mail address + * - More 802.11 support (nickname, rate, rts, frag) + * - List index in frequencies + * + * V8 to V9 + * -------- + * - Support for 'mode of operation' (ad-hoc, managed...) + * - Support for unicast and multicast power saving + * - Change encoding to support larger tokens (>64 bits) + * - Updated iw_params (disable, flags) and use it for NWID + * - Extracted iw_point from iwreq for clarity + * + * V9 to V10 + * --------- + * - Add PM capability to range structure + * - Add PM modifier : MAX/MIN/RELATIVE + * - Add encoding option : IW_ENCODE_NOKEY + * - Add TxPower ioctls (work like TxRate) + * + * V10 to V11 + * ---------- + * - Add WE version in range (help backward/forward compatibility) + * - Add retry ioctls (work like PM) + * + * V11 to V12 + * ---------- + * - Add SIOCSIWSTATS to get /proc/net/wireless programatically + * - Add DEV PRIVATE IOCTL to avoid collisions in SIOCDEVPRIVATE space + * - Add new statistics (frag, retry, beacon) + * - Add average quality (for user space calibration) + * + * V12 to V13 + * ---------- + * - Document creation of new driver API. + * - Extract union iwreq_data from struct iwreq (for new driver API). + * - Rename SIOCSIWNAME as SIOCSIWCOMMIT + * + * V13 to V14 + * ---------- + * - Wireless Events support : define struct iw_event + * - Define additional specific event numbers + * - Add "addr" and "param" fields in union iwreq_data + * - AP scanning stuff (SIOCSIWSCAN and friends) + * + * V14 to V15 + * ---------- + * - Add IW_PRIV_TYPE_ADDR for struct sockaddr private arg + * - Make struct iw_freq signed (both m & e), add explicit padding + * - Add IWEVCUSTOM for driver specific event/scanning token + * - Add IW_MAX_GET_SPY for driver returning a lot of addresses + * - Add IW_TXPOW_RANGE for range of Tx Powers + * - Add IWEVREGISTERED & IWEVEXPIRED events for Access Points + * - Add IW_MODE_MONITOR for passive monitor + * + * V15 to V16 + * ---------- + * - Increase the number of bitrates in iw_range to 32 (for 802.11g) + * - Increase the number of frequencies in iw_range to 32 (for 802.11b+a) + * - Reshuffle struct iw_range for increases, add filler + * - Increase IW_MAX_AP to 64 for driver returning a lot of addresses + * - Remove IW_MAX_GET_SPY because conflict with enhanced spy support + * - Add SIOCSIWTHRSPY/SIOCGIWTHRSPY and "struct iw_thrspy" + * - Add IW_ENCODE_TEMP and iw_range->encoding_login_index + * + * V16 to V17 + * ---------- + * - Add flags to frequency -> auto/fixed + * - Document (struct iw_quality *)->updated, add new flags (INVALID) + * - Wireless Event capability in struct iw_range + * - Add support for relative TxPower (yick !) + * + * V17 to V18 (From Jouni Malinen ) + * ---------- + * - Add support for WPA/WPA2 + * - Add extended encoding configuration (SIOCSIWENCODEEXT and + * SIOCGIWENCODEEXT) + * - Add SIOCSIWGENIE/SIOCGIWGENIE + * - Add SIOCSIWMLME + * - Add SIOCSIWPMKSA + * - Add struct iw_range bit field for supported encoding capabilities + * - Add optional scan request parameters for SIOCSIWSCAN + * - Add SIOCSIWAUTH/SIOCGIWAUTH for setting authentication and WPA + * related parameters (extensible up to 4096 parameter values) + * - Add wireless events: IWEVGENIE, IWEVMICHAELMICFAILURE, + * IWEVASSOCREQIE, IWEVASSOCRESPIE, IWEVPMKIDCAND + * + * V18 to V19 + * ---------- + * - Remove (struct iw_point *)->pointer from events and streams + * - Remove header includes to help user space + * - Increase IW_ENCODING_TOKEN_MAX from 32 to 64 + * - Add IW_QUAL_ALL_UPDATED and IW_QUAL_ALL_INVALID macros + * - Add explicit flag to tell stats are in dBm : IW_QUAL_DBM + * - Add IW_IOCTL_IDX() and IW_EVENT_IDX() macros + * + * V19 to V20 + * ---------- + * - RtNetlink requests support (SET/GET) + * + * V20 to V21 + * ---------- + * - Remove (struct net_device *)->get_wireless_stats() + * - Change length in ESSID and NICK to strlen() instead of strlen()+1 + * - Add IW_RETRY_SHORT/IW_RETRY_LONG retry modifiers + * - Power/Retry relative values no longer * 100000 + * - Add explicit flag to tell stats are in 802.11k RCPI : IW_QUAL_RCPI + * + * V21 to V22 + * ---------- + * - Prevent leaking of kernel space in stream on 64 bits. + */ + +/**************************** CONSTANTS ****************************/ + +/* -------------------------- IOCTL LIST -------------------------- */ + +/* Wireless Identification */ +#define SIOCSIWCOMMIT 0x8B00 /* Commit pending changes to driver */ +#define SIOCGIWNAME 0x8B01 /* get name == wireless protocol */ +/* SIOCGIWNAME is used to verify the presence of Wireless Extensions. + * Common values : "IEEE 802.11-DS", "IEEE 802.11-FH", "IEEE 802.11b"... + * Don't put the name of your driver there, it's useless. */ + +/* Basic operations */ +#define SIOCSIWNWID 0x8B02 /* set network id (pre-802.11) */ +#define SIOCGIWNWID 0x8B03 /* get network id (the cell) */ +#define SIOCSIWFREQ 0x8B04 /* set channel/frequency (Hz) */ +#define SIOCGIWFREQ 0x8B05 /* get channel/frequency (Hz) */ +#define SIOCSIWMODE 0x8B06 /* set operation mode */ +#define SIOCGIWMODE 0x8B07 /* get operation mode */ +#define SIOCSIWSENS 0x8B08 /* set sensitivity (dBm) */ +#define SIOCGIWSENS 0x8B09 /* get sensitivity (dBm) */ + +/* Informative stuff */ +#define SIOCSIWRANGE 0x8B0A /* Unused */ +#define SIOCGIWRANGE 0x8B0B /* Get range of parameters */ +#define SIOCSIWPRIV 0x8B0C /* Unused */ +#define SIOCGIWPRIV 0x8B0D /* get private ioctl interface info */ +#define SIOCSIWSTATS 0x8B0E /* Unused */ +#define SIOCGIWSTATS 0x8B0F /* Get /proc/net/wireless stats */ +/* SIOCGIWSTATS is strictly used between user space and the kernel, and + * is never passed to the driver (i.e. the driver will never see it). */ + +/* Spy support (statistics per MAC address - used for Mobile IP support) */ +#define SIOCSIWSPY 0x8B10 /* set spy addresses */ +#define SIOCGIWSPY 0x8B11 /* get spy info (quality of link) */ +#define SIOCSIWTHRSPY 0x8B12 /* set spy threshold (spy event) */ +#define SIOCGIWTHRSPY 0x8B13 /* get spy threshold */ + +/* Access Point manipulation */ +#define SIOCSIWAP 0x8B14 /* set access point MAC addresses */ +#define SIOCGIWAP 0x8B15 /* get access point MAC addresses */ +#define SIOCGIWAPLIST 0x8B17 /* Deprecated in favor of scanning */ +#define SIOCSIWSCAN 0x8B18 /* trigger scanning (list cells) */ +#define SIOCGIWSCAN 0x8B19 /* get scanning results */ + +/* 802.11 specific support */ +#define SIOCSIWESSID 0x8B1A /* set ESSID (network name) */ +#define SIOCGIWESSID 0x8B1B /* get ESSID */ +#define SIOCSIWNICKN 0x8B1C /* set node name/nickname */ +#define SIOCGIWNICKN 0x8B1D /* get node name/nickname */ +/* As the ESSID and NICKN are strings up to 32 bytes long, it doesn't fit + * within the 'iwreq' structure, so we need to use the 'data' member to + * point to a string in user space, like it is done for RANGE... */ + +/* Other parameters useful in 802.11 and some other devices */ +#define SIOCSIWRATE 0x8B20 /* set default bit rate (bps) */ +#define SIOCGIWRATE 0x8B21 /* get default bit rate (bps) */ +#define SIOCSIWRTS 0x8B22 /* set RTS/CTS threshold (bytes) */ +#define SIOCGIWRTS 0x8B23 /* get RTS/CTS threshold (bytes) */ +#define SIOCSIWFRAG 0x8B24 /* set fragmentation thr (bytes) */ +#define SIOCGIWFRAG 0x8B25 /* get fragmentation thr (bytes) */ +#define SIOCSIWTXPOW 0x8B26 /* set transmit power (dBm) */ +#define SIOCGIWTXPOW 0x8B27 /* get transmit power (dBm) */ +#define SIOCSIWRETRY 0x8B28 /* set retry limits and lifetime */ +#define SIOCGIWRETRY 0x8B29 /* get retry limits and lifetime */ + +/* Encoding stuff (scrambling, hardware security, WEP...) */ +#define SIOCSIWENCODE 0x8B2A /* set encoding token & mode */ +#define SIOCGIWENCODE 0x8B2B /* get encoding token & mode */ +/* Power saving stuff (power management, unicast and multicast) */ +#define SIOCSIWPOWER 0x8B2C /* set Power Management settings */ +#define SIOCGIWPOWER 0x8B2D /* get Power Management settings */ + +/* WPA : Generic IEEE 802.11 informatiom element (e.g., for WPA/RSN/WMM). + * This ioctl uses struct iw_point and data buffer that includes IE id and len + * fields. More than one IE may be included in the request. Setting the generic + * IE to empty buffer (len=0) removes the generic IE from the driver. Drivers + * are allowed to generate their own WPA/RSN IEs, but in these cases, drivers + * are required to report the used IE as a wireless event, e.g., when + * associating with an AP. */ +#define SIOCSIWGENIE 0x8B30 /* set generic IE */ +#define SIOCGIWGENIE 0x8B31 /* get generic IE */ + +/* WPA : IEEE 802.11 MLME requests */ +#define SIOCSIWMLME 0x8B16 /* request MLME operation; uses + * struct iw_mlme */ +/* WPA : Authentication mode parameters */ +#define SIOCSIWAUTH 0x8B32 /* set authentication mode params */ +#define SIOCGIWAUTH 0x8B33 /* get authentication mode params */ + +/* WPA : Extended version of encoding configuration */ +#define SIOCSIWENCODEEXT 0x8B34 /* set encoding token & mode */ +#define SIOCGIWENCODEEXT 0x8B35 /* get encoding token & mode */ + +/* WPA2 : PMKSA cache management */ +#define SIOCSIWPMKSA 0x8B36 /* PMKSA cache operation */ + +/* -------------------- DEV PRIVATE IOCTL LIST -------------------- */ + +/* These 32 ioctl are wireless device private, for 16 commands. + * Each driver is free to use them for whatever purpose it chooses, + * however the driver *must* export the description of those ioctls + * with SIOCGIWPRIV and *must* use arguments as defined below. + * If you don't follow those rules, DaveM is going to hate you (reason : + * it make mixed 32/64bit operation impossible). + */ +#define SIOCIWFIRSTPRIV 0x8BE0 +#define SIOCIWLASTPRIV 0x8BFF +/* Previously, we were using SIOCDEVPRIVATE, but we now have our + * separate range because of collisions with other tools such as + * 'mii-tool'. + * We now have 32 commands, so a bit more space ;-). + * Also, all 'even' commands are only usable by root and don't return the + * content of ifr/iwr to user (but you are not obliged to use the set/get + * convention, just use every other two command). More details in iwpriv.c. + * And I repeat : you are not forced to use them with iwpriv, but you + * must be compliant with it. + */ + +/* ------------------------- IOCTL STUFF ------------------------- */ + +/* The first and the last (range) */ +#define SIOCIWFIRST 0x8B00 +#define SIOCIWLAST SIOCIWLASTPRIV /* 0x8BFF */ +#define IW_IOCTL_IDX(cmd) ((cmd) - SIOCIWFIRST) + +/* Odd : get (world access), even : set (root access) */ +#define IW_IS_SET(cmd) (!((cmd) & 0x1)) +#define IW_IS_GET(cmd) ((cmd) & 0x1) + +/* ----------------------- WIRELESS EVENTS ----------------------- */ +/* Those are *NOT* ioctls, do not issue request on them !!! */ +/* Most events use the same identifier as ioctl requests */ + +#define IWEVTXDROP 0x8C00 /* Packet dropped to excessive retry */ +#define IWEVQUAL 0x8C01 /* Quality part of statistics (scan) */ +#define IWEVCUSTOM 0x8C02 /* Driver specific ascii string */ +#define IWEVREGISTERED 0x8C03 /* Discovered a new node (AP mode) */ +#define IWEVEXPIRED 0x8C04 /* Expired a node (AP mode) */ +#define IWEVGENIE 0x8C05 /* Generic IE (WPA, RSN, WMM, ..) + * (scan results); This includes id and + * length fields. One IWEVGENIE may + * contain more than one IE. Scan + * results may contain one or more + * IWEVGENIE events. */ +#define IWEVMICHAELMICFAILURE 0x8C06 /* Michael MIC failure + * (struct iw_michaelmicfailure) + */ +#define IWEVASSOCREQIE 0x8C07 /* IEs used in (Re)Association Request. + * The data includes id and length + * fields and may contain more than one + * IE. This event is required in + * Managed mode if the driver + * generates its own WPA/RSN IE. This + * should be sent just before + * IWEVREGISTERED event for the + * association. */ +#define IWEVASSOCRESPIE 0x8C08 /* IEs used in (Re)Association + * Response. The data includes id and + * length fields and may contain more + * than one IE. This may be sent + * between IWEVASSOCREQIE and + * IWEVREGISTERED events for the + * association. */ +#define IWEVPMKIDCAND 0x8C09 /* PMKID candidate for RSN + * pre-authentication + * (struct iw_pmkid_cand) */ + +#define IWEVFIRST 0x8C00 +#define IW_EVENT_IDX(cmd) ((cmd) - IWEVFIRST) + +/* ------------------------- PRIVATE INFO ------------------------- */ +/* + * The following is used with SIOCGIWPRIV. It allow a driver to define + * the interface (name, type of data) for its private ioctl. + * Privates ioctl are SIOCIWFIRSTPRIV -> SIOCIWLASTPRIV + */ + +#define IW_PRIV_TYPE_MASK 0x7000 /* Type of arguments */ +#define IW_PRIV_TYPE_NONE 0x0000 +#define IW_PRIV_TYPE_BYTE 0x1000 /* Char as number */ +#define IW_PRIV_TYPE_CHAR 0x2000 /* Char as character */ +#define IW_PRIV_TYPE_INT 0x4000 /* 32 bits int */ +#define IW_PRIV_TYPE_FLOAT 0x5000 /* struct iw_freq */ +#define IW_PRIV_TYPE_ADDR 0x6000 /* struct sockaddr */ + +#define IW_PRIV_SIZE_FIXED 0x0800 /* Variable or fixed number of args */ + +#define IW_PRIV_SIZE_MASK 0x07FF /* Max number of those args */ + +/* + * Note : if the number of args is fixed and the size < 16 octets, + * instead of passing a pointer we will put args in the iwreq struct... + */ + +/* ----------------------- OTHER CONSTANTS ----------------------- */ + +/* Maximum frequencies in the range struct */ +#define IW_MAX_FREQUENCIES 32 +/* Note : if you have something like 80 frequencies, + * don't increase this constant and don't fill the frequency list. + * The user will be able to set by channel anyway... */ + +/* Maximum bit rates in the range struct */ +#define IW_MAX_BITRATES 32 + +/* Maximum tx powers in the range struct */ +#define IW_MAX_TXPOWER 8 +/* Note : if you more than 8 TXPowers, just set the max and min or + * a few of them in the struct iw_range. */ + +/* Maximum of address that you may set with SPY */ +#define IW_MAX_SPY 8 + +/* Maximum of address that you may get in the + list of access points in range */ +#define IW_MAX_AP 64 + +/* Maximum size of the ESSID and NICKN strings */ +#define IW_ESSID_MAX_SIZE 32 + +/* Modes of operation */ +#define IW_MODE_AUTO 0 /* Let the driver decides */ +#define IW_MODE_ADHOC 1 /* Single cell network */ +#define IW_MODE_INFRA 2 /* Multi cell network, roaming, ... */ +#define IW_MODE_MASTER 3 /* Synchronisation master or Access Point */ +#define IW_MODE_REPEAT 4 /* Wireless Repeater (forwarder) */ +#define IW_MODE_SECOND 5 /* Secondary master/repeater (backup) */ +#define IW_MODE_MONITOR 6 /* Passive monitor (listen only) */ +#define IW_MODE_MESH 7 /* Mesh (IEEE 802.11s) network */ + +/* Statistics flags (bitmask in updated) */ +#define IW_QUAL_QUAL_UPDATED 0x01 /* Value was updated since last read */ +#define IW_QUAL_LEVEL_UPDATED 0x02 +#define IW_QUAL_NOISE_UPDATED 0x04 +#define IW_QUAL_ALL_UPDATED 0x07 +#define IW_QUAL_DBM 0x08 /* Level + Noise are dBm */ +#define IW_QUAL_QUAL_INVALID 0x10 /* Driver doesn't provide value */ +#define IW_QUAL_LEVEL_INVALID 0x20 +#define IW_QUAL_NOISE_INVALID 0x40 +#define IW_QUAL_RCPI 0x80 /* Level + Noise are 802.11k RCPI */ +#define IW_QUAL_ALL_INVALID 0x70 + +/* Frequency flags */ +#define IW_FREQ_AUTO 0x00 /* Let the driver decides */ +#define IW_FREQ_FIXED 0x01 /* Force a specific value */ + +/* Maximum number of size of encoding token available + * they are listed in the range structure */ +#define IW_MAX_ENCODING_SIZES 8 + +/* Maximum size of the encoding token in bytes */ +#define IW_ENCODING_TOKEN_MAX 64 /* 512 bits (for now) */ + +/* Flags for encoding (along with the token) */ +#define IW_ENCODE_INDEX 0x00FF /* Token index (if needed) */ +#define IW_ENCODE_FLAGS 0xFF00 /* Flags defined below */ +#define IW_ENCODE_MODE 0xF000 /* Modes defined below */ +#define IW_ENCODE_DISABLED 0x8000 /* Encoding disabled */ +#define IW_ENCODE_ENABLED 0x0000 /* Encoding enabled */ +#define IW_ENCODE_RESTRICTED 0x4000 /* Refuse non-encoded packets */ +#define IW_ENCODE_OPEN 0x2000 /* Accept non-encoded packets */ +#define IW_ENCODE_NOKEY 0x0800 /* Key is write only, so not present */ +#define IW_ENCODE_TEMP 0x0400 /* Temporary key */ + +/* Power management flags available (along with the value, if any) */ +#define IW_POWER_ON 0x0000 /* No details... */ +#define IW_POWER_TYPE 0xF000 /* Type of parameter */ +#define IW_POWER_PERIOD 0x1000 /* Value is a period/duration of */ +#define IW_POWER_TIMEOUT 0x2000 /* Value is a timeout (to go asleep) */ +#define IW_POWER_MODE 0x0F00 /* Power Management mode */ +#define IW_POWER_UNICAST_R 0x0100 /* Receive only unicast messages */ +#define IW_POWER_MULTICAST_R 0x0200 /* Receive only multicast messages */ +#define IW_POWER_ALL_R 0x0300 /* Receive all messages though PM */ +#define IW_POWER_FORCE_S 0x0400 /* Force PM procedure for sending unicast */ +#define IW_POWER_REPEATER 0x0800 /* Repeat broadcast messages in PM period */ +#define IW_POWER_MODIFIER 0x000F /* Modify a parameter */ +#define IW_POWER_MIN 0x0001 /* Value is a minimum */ +#define IW_POWER_MAX 0x0002 /* Value is a maximum */ +#define IW_POWER_RELATIVE 0x0004 /* Value is not in seconds/ms/us */ + +/* Transmit Power flags available */ +#define IW_TXPOW_TYPE 0x00FF /* Type of value */ +#define IW_TXPOW_DBM 0x0000 /* Value is in dBm */ +#define IW_TXPOW_MWATT 0x0001 /* Value is in mW */ +#define IW_TXPOW_RELATIVE 0x0002 /* Value is in arbitrary units */ +#define IW_TXPOW_RANGE 0x1000 /* Range of value between min/max */ + +/* Retry limits and lifetime flags available */ +#define IW_RETRY_ON 0x0000 /* No details... */ +#define IW_RETRY_TYPE 0xF000 /* Type of parameter */ +#define IW_RETRY_LIMIT 0x1000 /* Maximum number of retries*/ +#define IW_RETRY_LIFETIME 0x2000 /* Maximum duration of retries in us */ +#define IW_RETRY_MODIFIER 0x00FF /* Modify a parameter */ +#define IW_RETRY_MIN 0x0001 /* Value is a minimum */ +#define IW_RETRY_MAX 0x0002 /* Value is a maximum */ +#define IW_RETRY_RELATIVE 0x0004 /* Value is not in seconds/ms/us */ +#define IW_RETRY_SHORT 0x0010 /* Value is for short packets */ +#define IW_RETRY_LONG 0x0020 /* Value is for long packets */ + +/* Scanning request flags */ +#define IW_SCAN_DEFAULT 0x0000 /* Default scan of the driver */ +#define IW_SCAN_ALL_ESSID 0x0001 /* Scan all ESSIDs */ +#define IW_SCAN_THIS_ESSID 0x0002 /* Scan only this ESSID */ +#define IW_SCAN_ALL_FREQ 0x0004 /* Scan all Frequencies */ +#define IW_SCAN_THIS_FREQ 0x0008 /* Scan only this Frequency */ +#define IW_SCAN_ALL_MODE 0x0010 /* Scan all Modes */ +#define IW_SCAN_THIS_MODE 0x0020 /* Scan only this Mode */ +#define IW_SCAN_ALL_RATE 0x0040 /* Scan all Bit-Rates */ +#define IW_SCAN_THIS_RATE 0x0080 /* Scan only this Bit-Rate */ +/* struct iw_scan_req scan_type */ +#define IW_SCAN_TYPE_ACTIVE 0 +#define IW_SCAN_TYPE_PASSIVE 1 +/* Maximum size of returned data */ +#define IW_SCAN_MAX_DATA 4096 /* In bytes */ + +/* Scan capability flags - in (struct iw_range *)->scan_capa */ +#define IW_SCAN_CAPA_NONE 0x00 +#define IW_SCAN_CAPA_ESSID 0x01 +#define IW_SCAN_CAPA_BSSID 0x02 +#define IW_SCAN_CAPA_CHANNEL 0x04 +#define IW_SCAN_CAPA_MODE 0x08 +#define IW_SCAN_CAPA_RATE 0x10 +#define IW_SCAN_CAPA_TYPE 0x20 +#define IW_SCAN_CAPA_TIME 0x40 + +/* Max number of char in custom event - use multiple of them if needed */ +#define IW_CUSTOM_MAX 256 /* In bytes */ + +/* Generic information element */ +#define IW_GENERIC_IE_MAX 1024 + +/* MLME requests (SIOCSIWMLME / struct iw_mlme) */ +#define IW_MLME_DEAUTH 0 +#define IW_MLME_DISASSOC 1 +#define IW_MLME_AUTH 2 +#define IW_MLME_ASSOC 3 + +/* SIOCSIWAUTH/SIOCGIWAUTH struct iw_param flags */ +#define IW_AUTH_INDEX 0x0FFF +#define IW_AUTH_FLAGS 0xF000 +/* SIOCSIWAUTH/SIOCGIWAUTH parameters (0 .. 4095) + * (IW_AUTH_INDEX mask in struct iw_param flags; this is the index of the + * parameter that is being set/get to; value will be read/written to + * struct iw_param value field) */ +#define IW_AUTH_WPA_VERSION 0 +#define IW_AUTH_CIPHER_PAIRWISE 1 +#define IW_AUTH_CIPHER_GROUP 2 +#define IW_AUTH_KEY_MGMT 3 +#define IW_AUTH_TKIP_COUNTERMEASURES 4 +#define IW_AUTH_DROP_UNENCRYPTED 5 +#define IW_AUTH_80211_AUTH_ALG 6 +#define IW_AUTH_WPA_ENABLED 7 +#define IW_AUTH_RX_UNENCRYPTED_EAPOL 8 +#define IW_AUTH_ROAMING_CONTROL 9 +#define IW_AUTH_PRIVACY_INVOKED 10 +#define IW_AUTH_CIPHER_GROUP_MGMT 11 +#define IW_AUTH_MFP 12 + +/* IW_AUTH_WPA_VERSION values (bit field) */ +#define IW_AUTH_WPA_VERSION_DISABLED 0x00000001 +#define IW_AUTH_WPA_VERSION_WPA 0x00000002 +#define IW_AUTH_WPA_VERSION_WPA2 0x00000004 + +/* IW_AUTH_PAIRWISE_CIPHER, IW_AUTH_GROUP_CIPHER, and IW_AUTH_CIPHER_GROUP_MGMT + * values (bit field) */ +#define IW_AUTH_CIPHER_NONE 0x00000001 +#define IW_AUTH_CIPHER_WEP40 0x00000002 +#define IW_AUTH_CIPHER_TKIP 0x00000004 +#define IW_AUTH_CIPHER_CCMP 0x00000008 +#define IW_AUTH_CIPHER_WEP104 0x00000010 +#define IW_AUTH_CIPHER_AES_CMAC 0x00000020 + +/* IW_AUTH_KEY_MGMT values (bit field) */ +#define IW_AUTH_KEY_MGMT_802_1X 1 +#define IW_AUTH_KEY_MGMT_PSK 2 + +/* IW_AUTH_80211_AUTH_ALG values (bit field) */ +#define IW_AUTH_ALG_OPEN_SYSTEM 0x00000001 +#define IW_AUTH_ALG_SHARED_KEY 0x00000002 +#define IW_AUTH_ALG_LEAP 0x00000004 + +/* IW_AUTH_ROAMING_CONTROL values */ +#define IW_AUTH_ROAMING_ENABLE 0 /* driver/firmware based roaming */ +#define IW_AUTH_ROAMING_DISABLE 1 /* user space program used for roaming + * control */ + +/* IW_AUTH_MFP (management frame protection) values */ +#define IW_AUTH_MFP_DISABLED 0 /* MFP disabled */ +#define IW_AUTH_MFP_OPTIONAL 1 /* MFP optional */ +#define IW_AUTH_MFP_REQUIRED 2 /* MFP required */ + +/* SIOCSIWENCODEEXT definitions */ +#define IW_ENCODE_SEQ_MAX_SIZE 8 +/* struct iw_encode_ext ->alg */ +#define IW_ENCODE_ALG_NONE 0 +#define IW_ENCODE_ALG_WEP 1 +#define IW_ENCODE_ALG_TKIP 2 +#define IW_ENCODE_ALG_CCMP 3 +#define IW_ENCODE_ALG_PMK 4 +#define IW_ENCODE_ALG_AES_CMAC 5 +/* struct iw_encode_ext ->ext_flags */ +#define IW_ENCODE_EXT_TX_SEQ_VALID 0x00000001 +#define IW_ENCODE_EXT_RX_SEQ_VALID 0x00000002 +#define IW_ENCODE_EXT_GROUP_KEY 0x00000004 +#define IW_ENCODE_EXT_SET_TX_KEY 0x00000008 + +/* IWEVMICHAELMICFAILURE : struct iw_michaelmicfailure ->flags */ +#define IW_MICFAILURE_KEY_ID 0x00000003 /* Key ID 0..3 */ +#define IW_MICFAILURE_GROUP 0x00000004 +#define IW_MICFAILURE_PAIRWISE 0x00000008 +#define IW_MICFAILURE_STAKEY 0x00000010 +#define IW_MICFAILURE_COUNT 0x00000060 /* 1 or 2 (0 = count not supported) + */ + +/* Bit field values for enc_capa in struct iw_range */ +#define IW_ENC_CAPA_WPA 0x00000001 +#define IW_ENC_CAPA_WPA2 0x00000002 +#define IW_ENC_CAPA_CIPHER_TKIP 0x00000004 +#define IW_ENC_CAPA_CIPHER_CCMP 0x00000008 +#define IW_ENC_CAPA_4WAY_HANDSHAKE 0x00000010 + +/* Event capability macros - in (struct iw_range *)->event_capa + * Because we have more than 32 possible events, we use an array of + * 32 bit bitmasks. Note : 32 bits = 0x20 = 2^5. */ +#define IW_EVENT_CAPA_BASE(cmd) ((cmd >= SIOCIWFIRSTPRIV) ? \ + (cmd - SIOCIWFIRSTPRIV + 0x60) : \ + (cmd - SIOCSIWCOMMIT)) +#define IW_EVENT_CAPA_INDEX(cmd) (IW_EVENT_CAPA_BASE(cmd) >> 5) +#define IW_EVENT_CAPA_MASK(cmd) (1 << (IW_EVENT_CAPA_BASE(cmd) & 0x1F)) +/* Event capability constants - event autogenerated by the kernel + * This list is valid for most 802.11 devices, customise as needed... */ +#define IW_EVENT_CAPA_K_0 (IW_EVENT_CAPA_MASK(0x8B04) | \ + IW_EVENT_CAPA_MASK(0x8B06) | \ + IW_EVENT_CAPA_MASK(0x8B1A)) +#define IW_EVENT_CAPA_K_1 (IW_EVENT_CAPA_MASK(0x8B2A)) +/* "Easy" macro to set events in iw_range (less efficient) */ +#define IW_EVENT_CAPA_SET(event_capa, cmd) (event_capa[IW_EVENT_CAPA_INDEX(cmd)] |= IW_EVENT_CAPA_MASK(cmd)) +#define IW_EVENT_CAPA_SET_KERNEL(event_capa) {event_capa[0] |= IW_EVENT_CAPA_K_0; event_capa[1] |= IW_EVENT_CAPA_K_1; } + + +/****************************** TYPES ******************************/ + +/* --------------------------- SUBTYPES --------------------------- */ +/* + * Generic format for most parameters that fit in an int + */ +struct iw_param +{ + __s32 value; /* The value of the parameter itself */ + __u8 fixed; /* Hardware should not use auto select */ + __u8 disabled; /* Disable the feature */ + __u16 flags; /* Various specifc flags (if any) */ +}; + +/* + * For all data larger than 16 octets, we need to use a + * pointer to memory allocated in user space. + */ +struct iw_point +{ + void __user *pointer; /* Pointer to the data (in user space) */ + __u16 length; /* number of fields or size in bytes */ + __u16 flags; /* Optional params */ +}; + +#ifdef __KERNEL__ +#ifdef CONFIG_COMPAT + +#include + +struct compat_iw_point { + compat_caddr_t pointer; + __u16 length; + __u16 flags; +}; +#endif +#endif + +/* + * A frequency + * For numbers lower than 10^9, we encode the number in 'm' and + * set 'e' to 0 + * For number greater than 10^9, we divide it by the lowest power + * of 10 to get 'm' lower than 10^9, with 'm'= f / (10^'e')... + * The power of 10 is in 'e', the result of the division is in 'm'. + */ +struct iw_freq +{ + __s32 m; /* Mantissa */ + __s16 e; /* Exponent */ + __u8 i; /* List index (when in range struct) */ + __u8 flags; /* Flags (fixed/auto) */ +}; + +/* + * Quality of the link + */ +struct iw_quality +{ + __u8 qual; /* link quality (%retries, SNR, + %missed beacons or better...) */ + __u8 level; /* signal level (dBm) */ + __u8 noise; /* noise level (dBm) */ + __u8 updated; /* Flags to know if updated */ +}; + +/* + * Packet discarded in the wireless adapter due to + * "wireless" specific problems... + * Note : the list of counter and statistics in net_device_stats + * is already pretty exhaustive, and you should use that first. + * This is only additional stats... + */ +struct iw_discarded +{ + __u32 nwid; /* Rx : Wrong nwid/essid */ + __u32 code; /* Rx : Unable to code/decode (WEP) */ + __u32 fragment; /* Rx : Can't perform MAC reassembly */ + __u32 retries; /* Tx : Max MAC retries num reached */ + __u32 misc; /* Others cases */ +}; + +/* + * Packet/Time period missed in the wireless adapter due to + * "wireless" specific problems... + */ +struct iw_missed +{ + __u32 beacon; /* Missed beacons/superframe */ +}; + +/* + * Quality range (for spy threshold) + */ +struct iw_thrspy +{ + struct sockaddr addr; /* Source address (hw/mac) */ + struct iw_quality qual; /* Quality of the link */ + struct iw_quality low; /* Low threshold */ + struct iw_quality high; /* High threshold */ +}; + +/* + * Optional data for scan request + * + * Note: these optional parameters are controlling parameters for the + * scanning behavior, these do not apply to getting scan results + * (SIOCGIWSCAN). Drivers are expected to keep a local BSS table and + * provide a merged results with all BSSes even if the previous scan + * request limited scanning to a subset, e.g., by specifying an SSID. + * Especially, scan results are required to include an entry for the + * current BSS if the driver is in Managed mode and associated with an AP. + */ +struct iw_scan_req +{ + __u8 scan_type; /* IW_SCAN_TYPE_{ACTIVE,PASSIVE} */ + __u8 essid_len; + __u8 num_channels; /* num entries in channel_list; + * 0 = scan all allowed channels */ + __u8 flags; /* reserved as padding; use zero, this may + * be used in the future for adding flags + * to request different scan behavior */ + struct sockaddr bssid; /* ff:ff:ff:ff:ff:ff for broadcast BSSID or + * individual address of a specific BSS */ + + /* + * Use this ESSID if IW_SCAN_THIS_ESSID flag is used instead of using + * the current ESSID. This allows scan requests for specific ESSID + * without having to change the current ESSID and potentially breaking + * the current association. + */ + __u8 essid[IW_ESSID_MAX_SIZE]; + + /* + * Optional parameters for changing the default scanning behavior. + * These are based on the MLME-SCAN.request from IEEE Std 802.11. + * TU is 1.024 ms. If these are set to 0, driver is expected to use + * reasonable default values. min_channel_time defines the time that + * will be used to wait for the first reply on each channel. If no + * replies are received, next channel will be scanned after this. If + * replies are received, total time waited on the channel is defined by + * max_channel_time. + */ + __u32 min_channel_time; /* in TU */ + __u32 max_channel_time; /* in TU */ + + struct iw_freq channel_list[IW_MAX_FREQUENCIES]; +}; + +/* ------------------------- WPA SUPPORT ------------------------- */ + +/* + * Extended data structure for get/set encoding (this is used with + * SIOCSIWENCODEEXT/SIOCGIWENCODEEXT. struct iw_point and IW_ENCODE_* + * flags are used in the same way as with SIOCSIWENCODE/SIOCGIWENCODE and + * only the data contents changes (key data -> this structure, including + * key data). + * + * If the new key is the first group key, it will be set as the default + * TX key. Otherwise, default TX key index is only changed if + * IW_ENCODE_EXT_SET_TX_KEY flag is set. + * + * Key will be changed with SIOCSIWENCODEEXT in all cases except for + * special "change TX key index" operation which is indicated by setting + * key_len = 0 and ext_flags |= IW_ENCODE_EXT_SET_TX_KEY. + * + * tx_seq/rx_seq are only used when respective + * IW_ENCODE_EXT_{TX,RX}_SEQ_VALID flag is set in ext_flags. Normal + * TKIP/CCMP operation is to set RX seq with SIOCSIWENCODEEXT and start + * TX seq from zero whenever key is changed. SIOCGIWENCODEEXT is normally + * used only by an Authenticator (AP or an IBSS station) to get the + * current TX sequence number. Using TX_SEQ_VALID for SIOCSIWENCODEEXT and + * RX_SEQ_VALID for SIOCGIWENCODEEXT are optional, but can be useful for + * debugging/testing. + */ +struct iw_encode_ext +{ + __u32 ext_flags; /* IW_ENCODE_EXT_* */ + __u8 tx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */ + __u8 rx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */ + struct sockaddr addr; /* ff:ff:ff:ff:ff:ff for broadcast/multicast + * (group) keys or unicast address for + * individual keys */ + __u16 alg; /* IW_ENCODE_ALG_* */ + __u16 key_len; + __u8 key[0]; +}; + +/* SIOCSIWMLME data */ +struct iw_mlme +{ + __u16 cmd; /* IW_MLME_* */ + __u16 reason_code; + struct sockaddr addr; +}; + +/* SIOCSIWPMKSA data */ +#define IW_PMKSA_ADD 1 +#define IW_PMKSA_REMOVE 2 +#define IW_PMKSA_FLUSH 3 + +#define IW_PMKID_LEN 16 + +struct iw_pmksa +{ + __u32 cmd; /* IW_PMKSA_* */ + struct sockaddr bssid; + __u8 pmkid[IW_PMKID_LEN]; +}; + +/* IWEVMICHAELMICFAILURE data */ +struct iw_michaelmicfailure +{ + __u32 flags; + struct sockaddr src_addr; + __u8 tsc[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */ +}; + +/* IWEVPMKIDCAND data */ +#define IW_PMKID_CAND_PREAUTH 0x00000001 /* RNS pre-authentication enabled */ +struct iw_pmkid_cand +{ + __u32 flags; /* IW_PMKID_CAND_* */ + __u32 index; /* the smaller the index, the higher the + * priority */ + struct sockaddr bssid; +}; + +/* ------------------------ WIRELESS STATS ------------------------ */ +/* + * Wireless statistics (used for /proc/net/wireless) + */ +struct iw_statistics +{ + __u16 status; /* Status + * - device dependent for now */ + + struct iw_quality qual; /* Quality of the link + * (instant/mean/max) */ + struct iw_discarded discard; /* Packet discarded counts */ + struct iw_missed miss; /* Packet missed counts */ +}; + +/* ------------------------ IOCTL REQUEST ------------------------ */ +/* + * This structure defines the payload of an ioctl, and is used + * below. + * + * Note that this structure should fit on the memory footprint + * of iwreq (which is the same as ifreq), which mean a max size of + * 16 octets = 128 bits. Warning, pointers might be 64 bits wide... + * You should check this when increasing the structures defined + * above in this file... + */ +union iwreq_data +{ + /* Config - generic */ + char name[IFNAMSIZ]; + /* Name : used to verify the presence of wireless extensions. + * Name of the protocol/provider... */ + + struct iw_point essid; /* Extended network name */ + struct iw_param nwid; /* network id (or domain - the cell) */ + struct iw_freq freq; /* frequency or channel : + * 0-1000 = channel + * > 1000 = frequency in Hz */ + + struct iw_param sens; /* signal level threshold */ + struct iw_param bitrate; /* default bit rate */ + struct iw_param txpower; /* default transmit power */ + struct iw_param rts; /* RTS threshold threshold */ + struct iw_param frag; /* Fragmentation threshold */ + __u32 mode; /* Operation mode */ + struct iw_param retry; /* Retry limits & lifetime */ + + struct iw_point encoding; /* Encoding stuff : tokens */ + struct iw_param power; /* PM duration/timeout */ + struct iw_quality qual; /* Quality part of statistics */ + + struct sockaddr ap_addr; /* Access point address */ + struct sockaddr addr; /* Destination address (hw/mac) */ + + struct iw_param param; /* Other small parameters */ + struct iw_point data; /* Other large parameters */ +}; + +/* + * The structure to exchange data for ioctl. + * This structure is the same as 'struct ifreq', but (re)defined for + * convenience... + * Do I need to remind you about structure size (32 octets) ? + */ +struct iwreq +{ + union + { + char ifrn_name[IFNAMSIZ]; /* if name, e.g. "eth0" */ + } ifr_ifrn; + + /* Data part (defined just above) */ + union iwreq_data u; +}; + +/* -------------------------- IOCTL DATA -------------------------- */ +/* + * For those ioctl which want to exchange mode data that what could + * fit in the above structure... + */ + +/* + * Range of parameters + */ + +struct iw_range +{ + /* Informative stuff (to choose between different interface) */ + __u32 throughput; /* To give an idea... */ + /* In theory this value should be the maximum benchmarked + * TCP/IP throughput, because with most of these devices the + * bit rate is meaningless (overhead an co) to estimate how + * fast the connection will go and pick the fastest one. + * I suggest people to play with Netperf or any benchmark... + */ + + /* NWID (or domain id) */ + __u32 min_nwid; /* Minimal NWID we are able to set */ + __u32 max_nwid; /* Maximal NWID we are able to set */ + + /* Old Frequency (backward compat - moved lower ) */ + __u16 old_num_channels; + __u8 old_num_frequency; + + /* Scan capabilities */ + __u8 scan_capa; /* IW_SCAN_CAPA_* bit field */ + + /* Wireless event capability bitmasks */ + __u32 event_capa[6]; + + /* signal level threshold range */ + __s32 sensitivity; + + /* Quality of link & SNR stuff */ + /* Quality range (link, level, noise) + * If the quality is absolute, it will be in the range [0 ; max_qual], + * if the quality is dBm, it will be in the range [max_qual ; 0]. + * Don't forget that we use 8 bit arithmetics... */ + struct iw_quality max_qual; /* Quality of the link */ + /* This should contain the average/typical values of the quality + * indicator. This should be the threshold between a "good" and + * a "bad" link (example : monitor going from green to orange). + * Currently, user space apps like quality monitors don't have any + * way to calibrate the measurement. With this, they can split + * the range between 0 and max_qual in different quality level + * (using a geometric subdivision centered on the average). + * I expect that people doing the user space apps will feedback + * us on which value we need to put in each driver... */ + struct iw_quality avg_qual; /* Quality of the link */ + + /* Rates */ + __u8 num_bitrates; /* Number of entries in the list */ + __s32 bitrate[IW_MAX_BITRATES]; /* list, in bps */ + + /* RTS threshold */ + __s32 min_rts; /* Minimal RTS threshold */ + __s32 max_rts; /* Maximal RTS threshold */ + + /* Frag threshold */ + __s32 min_frag; /* Minimal frag threshold */ + __s32 max_frag; /* Maximal frag threshold */ + + /* Power Management duration & timeout */ + __s32 min_pmp; /* Minimal PM period */ + __s32 max_pmp; /* Maximal PM period */ + __s32 min_pmt; /* Minimal PM timeout */ + __s32 max_pmt; /* Maximal PM timeout */ + __u16 pmp_flags; /* How to decode max/min PM period */ + __u16 pmt_flags; /* How to decode max/min PM timeout */ + __u16 pm_capa; /* What PM options are supported */ + + /* Encoder stuff */ + __u16 encoding_size[IW_MAX_ENCODING_SIZES]; /* Different token sizes */ + __u8 num_encoding_sizes; /* Number of entry in the list */ + __u8 max_encoding_tokens; /* Max number of tokens */ + /* For drivers that need a "login/passwd" form */ + __u8 encoding_login_index; /* token index for login token */ + + /* Transmit power */ + __u16 txpower_capa; /* What options are supported */ + __u8 num_txpower; /* Number of entries in the list */ + __s32 txpower[IW_MAX_TXPOWER]; /* list, in bps */ + + /* Wireless Extension version info */ + __u8 we_version_compiled; /* Must be WIRELESS_EXT */ + __u8 we_version_source; /* Last update of source */ + + /* Retry limits and lifetime */ + __u16 retry_capa; /* What retry options are supported */ + __u16 retry_flags; /* How to decode max/min retry limit */ + __u16 r_time_flags; /* How to decode max/min retry life */ + __s32 min_retry; /* Minimal number of retries */ + __s32 max_retry; /* Maximal number of retries */ + __s32 min_r_time; /* Minimal retry lifetime */ + __s32 max_r_time; /* Maximal retry lifetime */ + + /* Frequency */ + __u16 num_channels; /* Number of channels [0; num - 1] */ + __u8 num_frequency; /* Number of entry in the list */ + struct iw_freq freq[IW_MAX_FREQUENCIES]; /* list */ + /* Note : this frequency list doesn't need to fit channel numbers, + * because each entry contain its channel index */ + + __u32 enc_capa; /* IW_ENC_CAPA_* bit field */ +}; + +/* + * Private ioctl interface information + */ + +struct iw_priv_args +{ + __u32 cmd; /* Number of the ioctl to issue */ + __u16 set_args; /* Type and number of args */ + __u16 get_args; /* Type and number of args */ + char name[IFNAMSIZ]; /* Name of the extension */ +}; + +/* ----------------------- WIRELESS EVENTS ----------------------- */ +/* + * Wireless events are carried through the rtnetlink socket to user + * space. They are encapsulated in the IFLA_WIRELESS field of + * a RTM_NEWLINK message. + */ + +/* + * A Wireless Event. Contains basically the same data as the ioctl... + */ +struct iw_event +{ + __u16 len; /* Real length of this stuff */ + __u16 cmd; /* Wireless IOCTL */ + union iwreq_data u; /* IOCTL fixed payload */ +}; + +/* Size of the Event prefix (including padding and alignement junk) */ +#define IW_EV_LCP_LEN (sizeof(struct iw_event) - sizeof(union iwreq_data)) +/* Size of the various events */ +#define IW_EV_CHAR_LEN (IW_EV_LCP_LEN + IFNAMSIZ) +#define IW_EV_UINT_LEN (IW_EV_LCP_LEN + sizeof(__u32)) +#define IW_EV_FREQ_LEN (IW_EV_LCP_LEN + sizeof(struct iw_freq)) +#define IW_EV_PARAM_LEN (IW_EV_LCP_LEN + sizeof(struct iw_param)) +#define IW_EV_ADDR_LEN (IW_EV_LCP_LEN + sizeof(struct sockaddr)) +#define IW_EV_QUAL_LEN (IW_EV_LCP_LEN + sizeof(struct iw_quality)) + +/* iw_point events are special. First, the payload (extra data) come at + * the end of the event, so they are bigger than IW_EV_POINT_LEN. Second, + * we omit the pointer, so start at an offset. */ +#define IW_EV_POINT_OFF (((char *) &(((struct iw_point *) NULL)->length)) - \ + (char *) NULL) +#define IW_EV_POINT_LEN (IW_EV_LCP_LEN + sizeof(struct iw_point) - \ + IW_EV_POINT_OFF) + +#ifdef __KERNEL__ +#ifdef CONFIG_COMPAT +struct __compat_iw_event { + __u16 len; /* Real length of this stuff */ + __u16 cmd; /* Wireless IOCTL */ + compat_caddr_t pointer; +}; +#define IW_EV_COMPAT_LCP_LEN offsetof(struct __compat_iw_event, pointer) +#define IW_EV_COMPAT_POINT_OFF offsetof(struct compat_iw_point, length) + +/* Size of the various events for compat */ +#define IW_EV_COMPAT_CHAR_LEN (IW_EV_COMPAT_LCP_LEN + IFNAMSIZ) +#define IW_EV_COMPAT_UINT_LEN (IW_EV_COMPAT_LCP_LEN + sizeof(__u32)) +#define IW_EV_COMPAT_FREQ_LEN (IW_EV_COMPAT_LCP_LEN + sizeof(struct iw_freq)) +#define IW_EV_COMPAT_PARAM_LEN (IW_EV_COMPAT_LCP_LEN + sizeof(struct iw_param)) +#define IW_EV_COMPAT_ADDR_LEN (IW_EV_COMPAT_LCP_LEN + sizeof(struct sockaddr)) +#define IW_EV_COMPAT_QUAL_LEN (IW_EV_COMPAT_LCP_LEN + sizeof(struct iw_quality)) +#define IW_EV_COMPAT_POINT_LEN \ + (IW_EV_COMPAT_LCP_LEN + sizeof(struct compat_iw_point) - \ + IW_EV_COMPAT_POINT_OFF) +#endif +#endif + +/* Size of the Event prefix when packed in stream */ +#define IW_EV_LCP_PK_LEN (4) +/* Size of the various events when packed in stream */ +#define IW_EV_CHAR_PK_LEN (IW_EV_LCP_PK_LEN + IFNAMSIZ) +#define IW_EV_UINT_PK_LEN (IW_EV_LCP_PK_LEN + sizeof(__u32)) +#define IW_EV_FREQ_PK_LEN (IW_EV_LCP_PK_LEN + sizeof(struct iw_freq)) +#define IW_EV_PARAM_PK_LEN (IW_EV_LCP_PK_LEN + sizeof(struct iw_param)) +#define IW_EV_ADDR_PK_LEN (IW_EV_LCP_PK_LEN + sizeof(struct sockaddr)) +#define IW_EV_QUAL_PK_LEN (IW_EV_LCP_PK_LEN + sizeof(struct iw_quality)) +#define IW_EV_POINT_PK_LEN (IW_EV_LCP_LEN + 4) + +#endif /* _LINUX_WIRELESS_H */ diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h new file mode 100644 index 0000000..04a6908 --- /dev/null +++ b/include/net/bluetooth/bluetooth.h @@ -0,0 +1,181 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + Copyright (C) 2000-2001 Qualcomm Incorporated + + Written 2000,2001 by Maxim Krasnyansky + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#ifndef __BLUETOOTH_H +#define __BLUETOOTH_H + +#include +#include +#include +#include +#include + +#ifndef AF_BLUETOOTH +#define AF_BLUETOOTH 31 +#define PF_BLUETOOTH AF_BLUETOOTH +#endif + +/* Reserv for core and drivers use */ +#define BT_SKB_RESERVE 8 + +#define BTPROTO_L2CAP 0 +#define BTPROTO_HCI 1 +#define BTPROTO_SCO 2 +#define BTPROTO_RFCOMM 3 +#define BTPROTO_BNEP 4 +#define BTPROTO_CMTP 5 +#define BTPROTO_HIDP 6 +#define BTPROTO_AVDTP 7 + +#define SOL_HCI 0 +#define SOL_L2CAP 6 +#define SOL_SCO 17 +#define SOL_RFCOMM 18 + +#define BT_SECURITY 4 +struct bt_security { + __u8 level; +}; +#define BT_SECURITY_SDP 0 +#define BT_SECURITY_LOW 1 +#define BT_SECURITY_MEDIUM 2 +#define BT_SECURITY_HIGH 3 + +#define BT_DEFER_SETUP 7 + +#define BT_INFO(fmt, arg...) printk(KERN_INFO "Bluetooth: " fmt "\n" , ## arg) +#define BT_ERR(fmt, arg...) printk(KERN_ERR "%s: " fmt "\n" , __func__ , ## arg) +#define BT_DBG(fmt, arg...) pr_debug("%s: " fmt "\n" , __func__ , ## arg) + +/* Connection and socket states */ +enum { + BT_CONNECTED = 1, /* Equal to TCP_ESTABLISHED to make net code happy */ + BT_OPEN, + BT_BOUND, + BT_LISTEN, + BT_CONNECT, + BT_CONNECT2, + BT_CONFIG, + BT_DISCONN, + BT_CLOSED +}; + +/* BD Address */ +typedef struct { + __u8 b[6]; +} __attribute__((packed)) bdaddr_t; + +#define BDADDR_ANY (&(bdaddr_t) {{0, 0, 0, 0, 0, 0}}) +#define BDADDR_LOCAL (&(bdaddr_t) {{0, 0, 0, 0xff, 0xff, 0xff}}) + +/* Copy, swap, convert BD Address */ +static inline int bacmp(bdaddr_t *ba1, bdaddr_t *ba2) +{ + return memcmp(ba1, ba2, sizeof(bdaddr_t)); +} +static inline void bacpy(bdaddr_t *dst, bdaddr_t *src) +{ + memcpy(dst, src, sizeof(bdaddr_t)); +} + +void baswap(bdaddr_t *dst, bdaddr_t *src); +char *batostr(bdaddr_t *ba); +bdaddr_t *strtoba(char *str); + +/* Common socket structures and functions */ + +#define bt_sk(__sk) ((struct bt_sock *) __sk) + +struct bt_sock { + struct sock sk; + bdaddr_t src; + bdaddr_t dst; + struct list_head accept_q; + struct sock *parent; + u32 defer_setup; +}; + +struct bt_sock_list { + struct hlist_head head; + rwlock_t lock; +}; + +int bt_sock_register(int proto, const struct net_proto_family *ops); +int bt_sock_unregister(int proto); +void bt_sock_link(struct bt_sock_list *l, struct sock *s); +void bt_sock_unlink(struct bt_sock_list *l, struct sock *s); +int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags); +uint bt_sock_poll(struct file * file, struct socket *sock, poll_table *wait); +int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); +int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo); + +void bt_accept_enqueue(struct sock *parent, struct sock *sk); +void bt_accept_unlink(struct sock *sk); +struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock); + +/* Skb helpers */ +struct bt_skb_cb { + __u8 pkt_type; + __u8 incoming; + __u8 tx_seq; + __u8 retries; + __u8 sar; +}; +#define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb)) + +static inline struct sk_buff *bt_skb_alloc(unsigned int len, gfp_t how) +{ + struct sk_buff *skb; + + if ((skb = alloc_skb(len + BT_SKB_RESERVE, how))) { + skb_reserve(skb, BT_SKB_RESERVE); + bt_cb(skb)->incoming = 0; + } + return skb; +} + +static inline struct sk_buff *bt_skb_send_alloc(struct sock *sk, unsigned long len, + int nb, int *err) +{ + struct sk_buff *skb; + + if ((skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err))) { + skb_reserve(skb, BT_SKB_RESERVE); + bt_cb(skb)->incoming = 0; + } + + return skb; +} + +int bt_err(__u16 code); + +extern int hci_sock_init(void); +extern void hci_sock_cleanup(void); + +extern int bt_sysfs_init(void); +extern void bt_sysfs_cleanup(void); + +extern struct class *bt_class; + +#endif /* __BLUETOOTH_H */ diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h new file mode 100644 index 0000000..fc0c502 --- /dev/null +++ b/include/net/bluetooth/hci.h @@ -0,0 +1,1036 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + Copyright (C) 2000-2001 Qualcomm Incorporated + + Written 2000,2001 by Maxim Krasnyansky + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#ifndef __HCI_H +#define __HCI_H + +#define HCI_MAX_ACL_SIZE 1024 +#define HCI_MAX_SCO_SIZE 255 +#define HCI_MAX_EVENT_SIZE 260 +#define HCI_MAX_FRAME_SIZE (HCI_MAX_ACL_SIZE + 4) + +/* HCI dev events */ +#define HCI_DEV_REG 1 +#define HCI_DEV_UNREG 2 +#define HCI_DEV_UP 3 +#define HCI_DEV_DOWN 4 +#define HCI_DEV_SUSPEND 5 +#define HCI_DEV_RESUME 6 + +/* HCI notify events */ +#define HCI_NOTIFY_CONN_ADD 1 +#define HCI_NOTIFY_CONN_DEL 2 +#define HCI_NOTIFY_VOICE_SETTING 3 + +/* HCI bus types */ +#define HCI_VIRTUAL 0 +#define HCI_USB 1 +#define HCI_PCCARD 2 +#define HCI_UART 3 +#define HCI_RS232 4 +#define HCI_PCI 5 +#define HCI_SDIO 6 + +/* HCI controller types */ +#define HCI_BREDR 0x00 +#define HCI_80211 0x01 + +/* HCI device quirks */ +enum { + HCI_QUIRK_NO_RESET, + HCI_QUIRK_RAW_DEVICE, + HCI_QUIRK_FIXUP_BUFFER_SIZE +}; + +/* HCI device flags */ +enum { + HCI_UP, + HCI_INIT, + HCI_RUNNING, + + HCI_PSCAN, + HCI_ISCAN, + HCI_AUTH, + HCI_ENCRYPT, + HCI_INQUIRY, + + HCI_RAW, +}; + +/* HCI ioctl defines */ +#define HCIDEVUP _IOW('H', 201, int) +#define HCIDEVDOWN _IOW('H', 202, int) +#define HCIDEVRESET _IOW('H', 203, int) +#define HCIDEVRESTAT _IOW('H', 204, int) + +#define HCIGETDEVLIST _IOR('H', 210, int) +#define HCIGETDEVINFO _IOR('H', 211, int) +#define HCIGETCONNLIST _IOR('H', 212, int) +#define HCIGETCONNINFO _IOR('H', 213, int) +#define HCIGETAUTHINFO _IOR('H', 215, int) + +#define HCISETRAW _IOW('H', 220, int) +#define HCISETSCAN _IOW('H', 221, int) +#define HCISETAUTH _IOW('H', 222, int) +#define HCISETENCRYPT _IOW('H', 223, int) +#define HCISETPTYPE _IOW('H', 224, int) +#define HCISETLINKPOL _IOW('H', 225, int) +#define HCISETLINKMODE _IOW('H', 226, int) +#define HCISETACLMTU _IOW('H', 227, int) +#define HCISETSCOMTU _IOW('H', 228, int) + +#define HCIINQUIRY _IOR('H', 240, int) + +/* HCI timeouts */ +#define HCI_CONNECT_TIMEOUT (40000) /* 40 seconds */ +#define HCI_DISCONN_TIMEOUT (2000) /* 2 seconds */ +#define HCI_PAIRING_TIMEOUT (60000) /* 60 seconds */ +#define HCI_IDLE_TIMEOUT (6000) /* 6 seconds */ +#define HCI_INIT_TIMEOUT (10000) /* 10 seconds */ + +/* HCI data types */ +#define HCI_COMMAND_PKT 0x01 +#define HCI_ACLDATA_PKT 0x02 +#define HCI_SCODATA_PKT 0x03 +#define HCI_EVENT_PKT 0x04 +#define HCI_VENDOR_PKT 0xff + +/* HCI packet types */ +#define HCI_DM1 0x0008 +#define HCI_DM3 0x0400 +#define HCI_DM5 0x4000 +#define HCI_DH1 0x0010 +#define HCI_DH3 0x0800 +#define HCI_DH5 0x8000 + +#define HCI_HV1 0x0020 +#define HCI_HV2 0x0040 +#define HCI_HV3 0x0080 + +#define SCO_PTYPE_MASK (HCI_HV1 | HCI_HV2 | HCI_HV3) +#define ACL_PTYPE_MASK (~SCO_PTYPE_MASK) + +/* eSCO packet types */ +#define ESCO_HV1 0x0001 +#define ESCO_HV2 0x0002 +#define ESCO_HV3 0x0004 +#define ESCO_EV3 0x0008 +#define ESCO_EV4 0x0010 +#define ESCO_EV5 0x0020 +#define ESCO_2EV3 0x0040 +#define ESCO_3EV3 0x0080 +#define ESCO_2EV5 0x0100 +#define ESCO_3EV5 0x0200 + +#define SCO_ESCO_MASK (ESCO_HV1 | ESCO_HV2 | ESCO_HV3) +#define EDR_ESCO_MASK (ESCO_2EV3 | ESCO_3EV3 | ESCO_2EV5 | ESCO_3EV5) + +/* ACL flags */ +#define ACL_CONT 0x01 +#define ACL_START 0x02 +#define ACL_ACTIVE_BCAST 0x04 +#define ACL_PICO_BCAST 0x08 + +/* Baseband links */ +#define SCO_LINK 0x00 +#define ACL_LINK 0x01 +#define ESCO_LINK 0x02 + +/* LMP features */ +#define LMP_3SLOT 0x01 +#define LMP_5SLOT 0x02 +#define LMP_ENCRYPT 0x04 +#define LMP_SOFFSET 0x08 +#define LMP_TACCURACY 0x10 +#define LMP_RSWITCH 0x20 +#define LMP_HOLD 0x40 +#define LMP_SNIFF 0x80 + +#define LMP_PARK 0x01 +#define LMP_RSSI 0x02 +#define LMP_QUALITY 0x04 +#define LMP_SCO 0x08 +#define LMP_HV2 0x10 +#define LMP_HV3 0x20 +#define LMP_ULAW 0x40 +#define LMP_ALAW 0x80 + +#define LMP_CVSD 0x01 +#define LMP_PSCHEME 0x02 +#define LMP_PCONTROL 0x04 + +#define LMP_ESCO 0x80 + +#define LMP_EV4 0x01 +#define LMP_EV5 0x02 + +#define LMP_SNIFF_SUBR 0x02 +#define LMP_EDR_ESCO_2M 0x20 +#define LMP_EDR_ESCO_3M 0x40 +#define LMP_EDR_3S_ESCO 0x80 + +#define LMP_SIMPLE_PAIR 0x08 + +/* Connection modes */ +#define HCI_CM_ACTIVE 0x0000 +#define HCI_CM_HOLD 0x0001 +#define HCI_CM_SNIFF 0x0002 +#define HCI_CM_PARK 0x0003 + +/* Link policies */ +#define HCI_LP_RSWITCH 0x0001 +#define HCI_LP_HOLD 0x0002 +#define HCI_LP_SNIFF 0x0004 +#define HCI_LP_PARK 0x0008 + +/* Link modes */ +#define HCI_LM_ACCEPT 0x8000 +#define HCI_LM_MASTER 0x0001 +#define HCI_LM_AUTH 0x0002 +#define HCI_LM_ENCRYPT 0x0004 +#define HCI_LM_TRUSTED 0x0008 +#define HCI_LM_RELIABLE 0x0010 +#define HCI_LM_SECURE 0x0020 + +/* Authentication types */ +#define HCI_AT_NO_BONDING 0x00 +#define HCI_AT_NO_BONDING_MITM 0x01 +#define HCI_AT_DEDICATED_BONDING 0x02 +#define HCI_AT_DEDICATED_BONDING_MITM 0x03 +#define HCI_AT_GENERAL_BONDING 0x04 +#define HCI_AT_GENERAL_BONDING_MITM 0x05 + +/* ----- HCI Commands ---- */ +#define HCI_OP_INQUIRY 0x0401 +struct hci_cp_inquiry { + __u8 lap[3]; + __u8 length; + __u8 num_rsp; +} __attribute__ ((packed)); + +#define HCI_OP_INQUIRY_CANCEL 0x0402 + +#define HCI_OP_EXIT_PERIODIC_INQ 0x0404 + +#define HCI_OP_CREATE_CONN 0x0405 +struct hci_cp_create_conn { + bdaddr_t bdaddr; + __le16 pkt_type; + __u8 pscan_rep_mode; + __u8 pscan_mode; + __le16 clock_offset; + __u8 role_switch; +} __attribute__ ((packed)); + +#define HCI_OP_DISCONNECT 0x0406 +struct hci_cp_disconnect { + __le16 handle; + __u8 reason; +} __attribute__ ((packed)); + +#define HCI_OP_ADD_SCO 0x0407 +struct hci_cp_add_sco { + __le16 handle; + __le16 pkt_type; +} __attribute__ ((packed)); + +#define HCI_OP_CREATE_CONN_CANCEL 0x0408 +struct hci_cp_create_conn_cancel { + bdaddr_t bdaddr; +} __attribute__ ((packed)); + +#define HCI_OP_ACCEPT_CONN_REQ 0x0409 +struct hci_cp_accept_conn_req { + bdaddr_t bdaddr; + __u8 role; +} __attribute__ ((packed)); + +#define HCI_OP_REJECT_CONN_REQ 0x040a +struct hci_cp_reject_conn_req { + bdaddr_t bdaddr; + __u8 reason; +} __attribute__ ((packed)); + +#define HCI_OP_LINK_KEY_REPLY 0x040b +struct hci_cp_link_key_reply { + bdaddr_t bdaddr; + __u8 link_key[16]; +} __attribute__ ((packed)); + +#define HCI_OP_LINK_KEY_NEG_REPLY 0x040c +struct hci_cp_link_key_neg_reply { + bdaddr_t bdaddr; +} __attribute__ ((packed)); + +#define HCI_OP_PIN_CODE_REPLY 0x040d +struct hci_cp_pin_code_reply { + bdaddr_t bdaddr; + __u8 pin_len; + __u8 pin_code[16]; +} __attribute__ ((packed)); + +#define HCI_OP_PIN_CODE_NEG_REPLY 0x040e +struct hci_cp_pin_code_neg_reply { + bdaddr_t bdaddr; +} __attribute__ ((packed)); + +#define HCI_OP_CHANGE_CONN_PTYPE 0x040f +struct hci_cp_change_conn_ptype { + __le16 handle; + __le16 pkt_type; +} __attribute__ ((packed)); + +#define HCI_OP_AUTH_REQUESTED 0x0411 +struct hci_cp_auth_requested { + __le16 handle; +} __attribute__ ((packed)); + +#define HCI_OP_SET_CONN_ENCRYPT 0x0413 +struct hci_cp_set_conn_encrypt { + __le16 handle; + __u8 encrypt; +} __attribute__ ((packed)); + +#define HCI_OP_CHANGE_CONN_LINK_KEY 0x0415 +struct hci_cp_change_conn_link_key { + __le16 handle; +} __attribute__ ((packed)); + +#define HCI_OP_REMOTE_NAME_REQ 0x0419 +struct hci_cp_remote_name_req { + bdaddr_t bdaddr; + __u8 pscan_rep_mode; + __u8 pscan_mode; + __le16 clock_offset; +} __attribute__ ((packed)); + +#define HCI_OP_REMOTE_NAME_REQ_CANCEL 0x041a +struct hci_cp_remote_name_req_cancel { + bdaddr_t bdaddr; +} __attribute__ ((packed)); + +#define HCI_OP_READ_REMOTE_FEATURES 0x041b +struct hci_cp_read_remote_features { + __le16 handle; +} __attribute__ ((packed)); + +#define HCI_OP_READ_REMOTE_EXT_FEATURES 0x041c +struct hci_cp_read_remote_ext_features { + __le16 handle; + __u8 page; +} __attribute__ ((packed)); + +#define HCI_OP_READ_REMOTE_VERSION 0x041d +struct hci_cp_read_remote_version { + __le16 handle; +} __attribute__ ((packed)); + +#define HCI_OP_SETUP_SYNC_CONN 0x0428 +struct hci_cp_setup_sync_conn { + __le16 handle; + __le32 tx_bandwidth; + __le32 rx_bandwidth; + __le16 max_latency; + __le16 voice_setting; + __u8 retrans_effort; + __le16 pkt_type; +} __attribute__ ((packed)); + +#define HCI_OP_ACCEPT_SYNC_CONN_REQ 0x0429 +struct hci_cp_accept_sync_conn_req { + bdaddr_t bdaddr; + __le32 tx_bandwidth; + __le32 rx_bandwidth; + __le16 max_latency; + __le16 content_format; + __u8 retrans_effort; + __le16 pkt_type; +} __attribute__ ((packed)); + +#define HCI_OP_REJECT_SYNC_CONN_REQ 0x042a +struct hci_cp_reject_sync_conn_req { + bdaddr_t bdaddr; + __u8 reason; +} __attribute__ ((packed)); + +#define HCI_OP_SNIFF_MODE 0x0803 +struct hci_cp_sniff_mode { + __le16 handle; + __le16 max_interval; + __le16 min_interval; + __le16 attempt; + __le16 timeout; +} __attribute__ ((packed)); + +#define HCI_OP_EXIT_SNIFF_MODE 0x0804 +struct hci_cp_exit_sniff_mode { + __le16 handle; +} __attribute__ ((packed)); + +#define HCI_OP_ROLE_DISCOVERY 0x0809 +struct hci_cp_role_discovery { + __le16 handle; +} __attribute__ ((packed)); +struct hci_rp_role_discovery { + __u8 status; + __le16 handle; + __u8 role; +} __attribute__ ((packed)); + +#define HCI_OP_SWITCH_ROLE 0x080b +struct hci_cp_switch_role { + bdaddr_t bdaddr; + __u8 role; +} __attribute__ ((packed)); + +#define HCI_OP_READ_LINK_POLICY 0x080c +struct hci_cp_read_link_policy { + __le16 handle; +} __attribute__ ((packed)); +struct hci_rp_read_link_policy { + __u8 status; + __le16 handle; + __le16 policy; +} __attribute__ ((packed)); + +#define HCI_OP_WRITE_LINK_POLICY 0x080d +struct hci_cp_write_link_policy { + __le16 handle; + __le16 policy; +} __attribute__ ((packed)); +struct hci_rp_write_link_policy { + __u8 status; + __le16 handle; +} __attribute__ ((packed)); + +#define HCI_OP_READ_DEF_LINK_POLICY 0x080e +struct hci_rp_read_def_link_policy { + __u8 status; + __le16 policy; +} __attribute__ ((packed)); + +#define HCI_OP_WRITE_DEF_LINK_POLICY 0x080f +struct hci_cp_write_def_link_policy { + __le16 policy; +} __attribute__ ((packed)); + +#define HCI_OP_SNIFF_SUBRATE 0x0811 +struct hci_cp_sniff_subrate { + __le16 handle; + __le16 max_latency; + __le16 min_remote_timeout; + __le16 min_local_timeout; +} __attribute__ ((packed)); + +#define HCI_OP_SET_EVENT_MASK 0x0c01 +struct hci_cp_set_event_mask { + __u8 mask[8]; +} __attribute__ ((packed)); + +#define HCI_OP_RESET 0x0c03 + +#define HCI_OP_SET_EVENT_FLT 0x0c05 +struct hci_cp_set_event_flt { + __u8 flt_type; + __u8 cond_type; + __u8 condition[0]; +} __attribute__ ((packed)); + +/* Filter types */ +#define HCI_FLT_CLEAR_ALL 0x00 +#define HCI_FLT_INQ_RESULT 0x01 +#define HCI_FLT_CONN_SETUP 0x02 + +/* CONN_SETUP Condition types */ +#define HCI_CONN_SETUP_ALLOW_ALL 0x00 +#define HCI_CONN_SETUP_ALLOW_CLASS 0x01 +#define HCI_CONN_SETUP_ALLOW_BDADDR 0x02 + +/* CONN_SETUP Conditions */ +#define HCI_CONN_SETUP_AUTO_OFF 0x01 +#define HCI_CONN_SETUP_AUTO_ON 0x02 + +#define HCI_OP_WRITE_LOCAL_NAME 0x0c13 +struct hci_cp_write_local_name { + __u8 name[248]; +} __attribute__ ((packed)); + +#define HCI_OP_READ_LOCAL_NAME 0x0c14 +struct hci_rp_read_local_name { + __u8 status; + __u8 name[248]; +} __attribute__ ((packed)); + +#define HCI_OP_WRITE_CA_TIMEOUT 0x0c16 + +#define HCI_OP_WRITE_PG_TIMEOUT 0x0c18 + +#define HCI_OP_WRITE_SCAN_ENABLE 0x0c1a + #define SCAN_DISABLED 0x00 + #define SCAN_INQUIRY 0x01 + #define SCAN_PAGE 0x02 + +#define HCI_OP_READ_AUTH_ENABLE 0x0c1f + +#define HCI_OP_WRITE_AUTH_ENABLE 0x0c20 + #define AUTH_DISABLED 0x00 + #define AUTH_ENABLED 0x01 + +#define HCI_OP_READ_ENCRYPT_MODE 0x0c21 + +#define HCI_OP_WRITE_ENCRYPT_MODE 0x0c22 + #define ENCRYPT_DISABLED 0x00 + #define ENCRYPT_P2P 0x01 + #define ENCRYPT_BOTH 0x02 + +#define HCI_OP_READ_CLASS_OF_DEV 0x0c23 +struct hci_rp_read_class_of_dev { + __u8 status; + __u8 dev_class[3]; +} __attribute__ ((packed)); + +#define HCI_OP_WRITE_CLASS_OF_DEV 0x0c24 +struct hci_cp_write_class_of_dev { + __u8 dev_class[3]; +} __attribute__ ((packed)); + +#define HCI_OP_READ_VOICE_SETTING 0x0c25 +struct hci_rp_read_voice_setting { + __u8 status; + __le16 voice_setting; +} __attribute__ ((packed)); + +#define HCI_OP_WRITE_VOICE_SETTING 0x0c26 +struct hci_cp_write_voice_setting { + __le16 voice_setting; +} __attribute__ ((packed)); + +#define HCI_OP_HOST_BUFFER_SIZE 0x0c33 +struct hci_cp_host_buffer_size { + __le16 acl_mtu; + __u8 sco_mtu; + __le16 acl_max_pkt; + __le16 sco_max_pkt; +} __attribute__ ((packed)); + +#define HCI_OP_READ_SSP_MODE 0x0c55 +struct hci_rp_read_ssp_mode { + __u8 status; + __u8 mode; +} __attribute__ ((packed)); + +#define HCI_OP_WRITE_SSP_MODE 0x0c56 +struct hci_cp_write_ssp_mode { + __u8 mode; +} __attribute__ ((packed)); + +#define HCI_OP_READ_LOCAL_VERSION 0x1001 +struct hci_rp_read_local_version { + __u8 status; + __u8 hci_ver; + __le16 hci_rev; + __u8 lmp_ver; + __le16 manufacturer; + __le16 lmp_subver; +} __attribute__ ((packed)); + +#define HCI_OP_READ_LOCAL_COMMANDS 0x1002 +struct hci_rp_read_local_commands { + __u8 status; + __u8 commands[64]; +} __attribute__ ((packed)); + +#define HCI_OP_READ_LOCAL_FEATURES 0x1003 +struct hci_rp_read_local_features { + __u8 status; + __u8 features[8]; +} __attribute__ ((packed)); + +#define HCI_OP_READ_LOCAL_EXT_FEATURES 0x1004 +struct hci_rp_read_local_ext_features { + __u8 status; + __u8 page; + __u8 max_page; + __u8 features[8]; +} __attribute__ ((packed)); + +#define HCI_OP_READ_BUFFER_SIZE 0x1005 +struct hci_rp_read_buffer_size { + __u8 status; + __le16 acl_mtu; + __u8 sco_mtu; + __le16 acl_max_pkt; + __le16 sco_max_pkt; +} __attribute__ ((packed)); + +#define HCI_OP_READ_BD_ADDR 0x1009 +struct hci_rp_read_bd_addr { + __u8 status; + bdaddr_t bdaddr; +} __attribute__ ((packed)); + +/* ---- HCI Events ---- */ +#define HCI_EV_INQUIRY_COMPLETE 0x01 + +#define HCI_EV_INQUIRY_RESULT 0x02 +struct inquiry_info { + bdaddr_t bdaddr; + __u8 pscan_rep_mode; + __u8 pscan_period_mode; + __u8 pscan_mode; + __u8 dev_class[3]; + __le16 clock_offset; +} __attribute__ ((packed)); + +#define HCI_EV_CONN_COMPLETE 0x03 +struct hci_ev_conn_complete { + __u8 status; + __le16 handle; + bdaddr_t bdaddr; + __u8 link_type; + __u8 encr_mode; +} __attribute__ ((packed)); + +#define HCI_EV_CONN_REQUEST 0x04 +struct hci_ev_conn_request { + bdaddr_t bdaddr; + __u8 dev_class[3]; + __u8 link_type; +} __attribute__ ((packed)); + +#define HCI_EV_DISCONN_COMPLETE 0x05 +struct hci_ev_disconn_complete { + __u8 status; + __le16 handle; + __u8 reason; +} __attribute__ ((packed)); + +#define HCI_EV_AUTH_COMPLETE 0x06 +struct hci_ev_auth_complete { + __u8 status; + __le16 handle; +} __attribute__ ((packed)); + +#define HCI_EV_REMOTE_NAME 0x07 +struct hci_ev_remote_name { + __u8 status; + bdaddr_t bdaddr; + __u8 name[248]; +} __attribute__ ((packed)); + +#define HCI_EV_ENCRYPT_CHANGE 0x08 +struct hci_ev_encrypt_change { + __u8 status; + __le16 handle; + __u8 encrypt; +} __attribute__ ((packed)); + +#define HCI_EV_CHANGE_LINK_KEY_COMPLETE 0x09 +struct hci_ev_change_link_key_complete { + __u8 status; + __le16 handle; +} __attribute__ ((packed)); + +#define HCI_EV_REMOTE_FEATURES 0x0b +struct hci_ev_remote_features { + __u8 status; + __le16 handle; + __u8 features[8]; +} __attribute__ ((packed)); + +#define HCI_EV_REMOTE_VERSION 0x0c +struct hci_ev_remote_version { + __u8 status; + __le16 handle; + __u8 lmp_ver; + __le16 manufacturer; + __le16 lmp_subver; +} __attribute__ ((packed)); + +#define HCI_EV_QOS_SETUP_COMPLETE 0x0d +struct hci_qos { + __u8 service_type; + __u32 token_rate; + __u32 peak_bandwidth; + __u32 latency; + __u32 delay_variation; +} __attribute__ ((packed)); +struct hci_ev_qos_setup_complete { + __u8 status; + __le16 handle; + struct hci_qos qos; +} __attribute__ ((packed)); + +#define HCI_EV_CMD_COMPLETE 0x0e +struct hci_ev_cmd_complete { + __u8 ncmd; + __le16 opcode; +} __attribute__ ((packed)); + +#define HCI_EV_CMD_STATUS 0x0f +struct hci_ev_cmd_status { + __u8 status; + __u8 ncmd; + __le16 opcode; +} __attribute__ ((packed)); + +#define HCI_EV_ROLE_CHANGE 0x12 +struct hci_ev_role_change { + __u8 status; + bdaddr_t bdaddr; + __u8 role; +} __attribute__ ((packed)); + +#define HCI_EV_NUM_COMP_PKTS 0x13 +struct hci_ev_num_comp_pkts { + __u8 num_hndl; + /* variable length part */ +} __attribute__ ((packed)); + +#define HCI_EV_MODE_CHANGE 0x14 +struct hci_ev_mode_change { + __u8 status; + __le16 handle; + __u8 mode; + __le16 interval; +} __attribute__ ((packed)); + +#define HCI_EV_PIN_CODE_REQ 0x16 +struct hci_ev_pin_code_req { + bdaddr_t bdaddr; +} __attribute__ ((packed)); + +#define HCI_EV_LINK_KEY_REQ 0x17 +struct hci_ev_link_key_req { + bdaddr_t bdaddr; +} __attribute__ ((packed)); + +#define HCI_EV_LINK_KEY_NOTIFY 0x18 +struct hci_ev_link_key_notify { + bdaddr_t bdaddr; + __u8 link_key[16]; + __u8 key_type; +} __attribute__ ((packed)); + +#define HCI_EV_CLOCK_OFFSET 0x1c +struct hci_ev_clock_offset { + __u8 status; + __le16 handle; + __le16 clock_offset; +} __attribute__ ((packed)); + +#define HCI_EV_PKT_TYPE_CHANGE 0x1d +struct hci_ev_pkt_type_change { + __u8 status; + __le16 handle; + __le16 pkt_type; +} __attribute__ ((packed)); + +#define HCI_EV_PSCAN_REP_MODE 0x20 +struct hci_ev_pscan_rep_mode { + bdaddr_t bdaddr; + __u8 pscan_rep_mode; +} __attribute__ ((packed)); + +#define HCI_EV_INQUIRY_RESULT_WITH_RSSI 0x22 +struct inquiry_info_with_rssi { + bdaddr_t bdaddr; + __u8 pscan_rep_mode; + __u8 pscan_period_mode; + __u8 dev_class[3]; + __le16 clock_offset; + __s8 rssi; +} __attribute__ ((packed)); +struct inquiry_info_with_rssi_and_pscan_mode { + bdaddr_t bdaddr; + __u8 pscan_rep_mode; + __u8 pscan_period_mode; + __u8 pscan_mode; + __u8 dev_class[3]; + __le16 clock_offset; + __s8 rssi; +} __attribute__ ((packed)); + +#define HCI_EV_REMOTE_EXT_FEATURES 0x23 +struct hci_ev_remote_ext_features { + __u8 status; + __le16 handle; + __u8 page; + __u8 max_page; + __u8 features[8]; +} __attribute__ ((packed)); + +#define HCI_EV_SYNC_CONN_COMPLETE 0x2c +struct hci_ev_sync_conn_complete { + __u8 status; + __le16 handle; + bdaddr_t bdaddr; + __u8 link_type; + __u8 tx_interval; + __u8 retrans_window; + __le16 rx_pkt_len; + __le16 tx_pkt_len; + __u8 air_mode; +} __attribute__ ((packed)); + +#define HCI_EV_SYNC_CONN_CHANGED 0x2d +struct hci_ev_sync_conn_changed { + __u8 status; + __le16 handle; + __u8 tx_interval; + __u8 retrans_window; + __le16 rx_pkt_len; + __le16 tx_pkt_len; +} __attribute__ ((packed)); + +#define HCI_EV_SNIFF_SUBRATE 0x2e +struct hci_ev_sniff_subrate { + __u8 status; + __le16 handle; + __le16 max_tx_latency; + __le16 max_rx_latency; + __le16 max_remote_timeout; + __le16 max_local_timeout; +} __attribute__ ((packed)); + +#define HCI_EV_EXTENDED_INQUIRY_RESULT 0x2f +struct extended_inquiry_info { + bdaddr_t bdaddr; + __u8 pscan_rep_mode; + __u8 pscan_period_mode; + __u8 dev_class[3]; + __le16 clock_offset; + __s8 rssi; + __u8 data[240]; +} __attribute__ ((packed)); + +#define HCI_EV_IO_CAPA_REQUEST 0x31 +struct hci_ev_io_capa_request { + bdaddr_t bdaddr; +} __attribute__ ((packed)); + +#define HCI_EV_SIMPLE_PAIR_COMPLETE 0x36 +struct hci_ev_simple_pair_complete { + __u8 status; + bdaddr_t bdaddr; +} __attribute__ ((packed)); + +#define HCI_EV_REMOTE_HOST_FEATURES 0x3d +struct hci_ev_remote_host_features { + bdaddr_t bdaddr; + __u8 features[8]; +} __attribute__ ((packed)); + +/* Internal events generated by Bluetooth stack */ +#define HCI_EV_STACK_INTERNAL 0xfd +struct hci_ev_stack_internal { + __u16 type; + __u8 data[0]; +} __attribute__ ((packed)); + +#define HCI_EV_SI_DEVICE 0x01 +struct hci_ev_si_device { + __u16 event; + __u16 dev_id; +} __attribute__ ((packed)); + +#define HCI_EV_SI_SECURITY 0x02 +struct hci_ev_si_security { + __u16 event; + __u16 proto; + __u16 subproto; + __u8 incoming; +} __attribute__ ((packed)); + +/* ---- HCI Packet structures ---- */ +#define HCI_COMMAND_HDR_SIZE 3 +#define HCI_EVENT_HDR_SIZE 2 +#define HCI_ACL_HDR_SIZE 4 +#define HCI_SCO_HDR_SIZE 3 + +struct hci_command_hdr { + __le16 opcode; /* OCF & OGF */ + __u8 plen; +} __attribute__ ((packed)); + +struct hci_event_hdr { + __u8 evt; + __u8 plen; +} __attribute__ ((packed)); + +struct hci_acl_hdr { + __le16 handle; /* Handle & Flags(PB, BC) */ + __le16 dlen; +} __attribute__ ((packed)); + +struct hci_sco_hdr { + __le16 handle; + __u8 dlen; +} __attribute__ ((packed)); + +#ifdef __KERNEL__ +#include +static inline struct hci_event_hdr *hci_event_hdr(const struct sk_buff *skb) +{ + return (struct hci_event_hdr *) skb->data; +} + +static inline struct hci_acl_hdr *hci_acl_hdr(const struct sk_buff *skb) +{ + return (struct hci_acl_hdr *) skb->data; +} + +static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb) +{ + return (struct hci_sco_hdr *) skb->data; +} +#endif + +/* Command opcode pack/unpack */ +#define hci_opcode_pack(ogf, ocf) (__u16) ((ocf & 0x03ff)|(ogf << 10)) +#define hci_opcode_ogf(op) (op >> 10) +#define hci_opcode_ocf(op) (op & 0x03ff) + +/* ACL handle and flags pack/unpack */ +#define hci_handle_pack(h, f) (__u16) ((h & 0x0fff)|(f << 12)) +#define hci_handle(h) (h & 0x0fff) +#define hci_flags(h) (h >> 12) + +/* ---- HCI Sockets ---- */ + +/* Socket options */ +#define HCI_DATA_DIR 1 +#define HCI_FILTER 2 +#define HCI_TIME_STAMP 3 + +/* CMSG flags */ +#define HCI_CMSG_DIR 0x0001 +#define HCI_CMSG_TSTAMP 0x0002 + +struct sockaddr_hci { + sa_family_t hci_family; + unsigned short hci_dev; +}; +#define HCI_DEV_NONE 0xffff + +struct hci_filter { + unsigned long type_mask; + unsigned long event_mask[2]; + __le16 opcode; +}; + +struct hci_ufilter { + __u32 type_mask; + __u32 event_mask[2]; + __le16 opcode; +}; + +#define HCI_FLT_TYPE_BITS 31 +#define HCI_FLT_EVENT_BITS 63 +#define HCI_FLT_OGF_BITS 63 +#define HCI_FLT_OCF_BITS 127 + +/* ---- HCI Ioctl requests structures ---- */ +struct hci_dev_stats { + __u32 err_rx; + __u32 err_tx; + __u32 cmd_tx; + __u32 evt_rx; + __u32 acl_tx; + __u32 acl_rx; + __u32 sco_tx; + __u32 sco_rx; + __u32 byte_rx; + __u32 byte_tx; +}; + +struct hci_dev_info { + __u16 dev_id; + char name[8]; + + bdaddr_t bdaddr; + + __u32 flags; + __u8 type; + + __u8 features[8]; + + __u32 pkt_type; + __u32 link_policy; + __u32 link_mode; + + __u16 acl_mtu; + __u16 acl_pkts; + __u16 sco_mtu; + __u16 sco_pkts; + + struct hci_dev_stats stat; +}; + +struct hci_conn_info { + __u16 handle; + bdaddr_t bdaddr; + __u8 type; + __u8 out; + __u16 state; + __u32 link_mode; +}; + +struct hci_dev_req { + __u16 dev_id; + __u32 dev_opt; +}; + +struct hci_dev_list_req { + __u16 dev_num; + struct hci_dev_req dev_req[0]; /* hci_dev_req structures */ +}; + +struct hci_conn_list_req { + __u16 dev_id; + __u16 conn_num; + struct hci_conn_info conn_info[0]; +}; + +struct hci_conn_info_req { + bdaddr_t bdaddr; + __u8 type; + struct hci_conn_info conn_info[0]; +}; + +struct hci_auth_info_req { + bdaddr_t bdaddr; + __u8 type; +}; + +struct hci_inquiry_req { + __u16 dev_id; + __u16 flags; + __u8 lap[3]; + __u8 length; + __u8 num_rsp; +}; +#define IREQ_CACHE_FLUSH 0x0001 + +#endif /* __HCI_H */ diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h new file mode 100644 index 0000000..ce3c99e --- /dev/null +++ b/include/net/bluetooth/hci_core.h @@ -0,0 +1,678 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + Copyright (C) 2000-2001 Qualcomm Incorporated + + Written 2000,2001 by Maxim Krasnyansky + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#ifndef __HCI_CORE_H +#define __HCI_CORE_H + +#include + +/* HCI upper protocols */ +#define HCI_PROTO_L2CAP 0 +#define HCI_PROTO_SCO 1 + +/* HCI Core structures */ +struct inquiry_data { + bdaddr_t bdaddr; + __u8 pscan_rep_mode; + __u8 pscan_period_mode; + __u8 pscan_mode; + __u8 dev_class[3]; + __le16 clock_offset; + __s8 rssi; + __u8 ssp_mode; +}; + +struct inquiry_entry { + struct inquiry_entry *next; + __u32 timestamp; + struct inquiry_data data; +}; + +struct inquiry_cache { + spinlock_t lock; + __u32 timestamp; + struct inquiry_entry *list; +}; + +struct hci_conn_hash { + struct list_head list; + spinlock_t lock; + unsigned int acl_num; + unsigned int sco_num; +}; + +struct hci_dev { + struct list_head list; + spinlock_t lock; + atomic_t refcnt; + + char name[8]; + unsigned long flags; + __u16 id; + __u8 bus; + __u8 dev_type; + bdaddr_t bdaddr; + __u8 dev_name[248]; + __u8 dev_class[3]; + __u8 features[8]; + __u8 commands[64]; + __u8 ssp_mode; + __u8 hci_ver; + __u16 hci_rev; + __u16 manufacturer; + __u16 voice_setting; + + __u16 pkt_type; + __u16 esco_type; + __u16 link_policy; + __u16 link_mode; + + __u32 idle_timeout; + __u16 sniff_min_interval; + __u16 sniff_max_interval; + + unsigned long quirks; + + atomic_t cmd_cnt; + unsigned int acl_cnt; + unsigned int sco_cnt; + + unsigned int acl_mtu; + unsigned int sco_mtu; + unsigned int acl_pkts; + unsigned int sco_pkts; + + unsigned long cmd_last_tx; + unsigned long acl_last_tx; + unsigned long sco_last_tx; + + struct tasklet_struct cmd_task; + struct tasklet_struct rx_task; + struct tasklet_struct tx_task; + + struct sk_buff_head rx_q; + struct sk_buff_head raw_q; + struct sk_buff_head cmd_q; + + struct sk_buff *sent_cmd; + struct sk_buff *reassembly[3]; + + struct mutex req_lock; + wait_queue_head_t req_wait_q; + __u32 req_status; + __u32 req_result; + + struct inquiry_cache inq_cache; + struct hci_conn_hash conn_hash; + + struct hci_dev_stats stat; + + struct sk_buff_head driver_init; + + void *driver_data; + void *core_data; + + atomic_t promisc; + + struct dentry *debugfs; + + struct device *parent; + struct device dev; + + struct rfkill *rfkill; + + struct module *owner; + + int (*open)(struct hci_dev *hdev); + int (*close)(struct hci_dev *hdev); + int (*flush)(struct hci_dev *hdev); + int (*send)(struct sk_buff *skb); + void (*destruct)(struct hci_dev *hdev); + void (*notify)(struct hci_dev *hdev, unsigned int evt); + int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg); +}; + +struct hci_conn { + struct list_head list; + + atomic_t refcnt; + spinlock_t lock; + + bdaddr_t dst; + __u16 handle; + __u16 state; + __u8 mode; + __u8 type; + __u8 out; + __u8 attempt; + __u8 dev_class[3]; + __u8 features[8]; + __u8 ssp_mode; + __u16 interval; + __u16 pkt_type; + __u16 link_policy; + __u32 link_mode; + __u8 auth_type; + __u8 sec_level; + __u8 power_save; + __u16 disc_timeout; + unsigned long pend; + + unsigned int sent; + + struct sk_buff_head data_q; + + struct timer_list disc_timer; + struct timer_list idle_timer; + + struct work_struct work_add; + struct work_struct work_del; + + struct device dev; + atomic_t devref; + + struct hci_dev *hdev; + void *l2cap_data; + void *sco_data; + void *priv; + + struct hci_conn *link; +}; + +extern struct hci_proto *hci_proto[]; +extern struct list_head hci_dev_list; +extern struct list_head hci_cb_list; +extern rwlock_t hci_dev_list_lock; +extern rwlock_t hci_cb_list_lock; + +/* ----- Inquiry cache ----- */ +#define INQUIRY_CACHE_AGE_MAX (HZ*30) // 30 seconds +#define INQUIRY_ENTRY_AGE_MAX (HZ*60) // 60 seconds + +#define inquiry_cache_lock(c) spin_lock(&c->lock) +#define inquiry_cache_unlock(c) spin_unlock(&c->lock) +#define inquiry_cache_lock_bh(c) spin_lock_bh(&c->lock) +#define inquiry_cache_unlock_bh(c) spin_unlock_bh(&c->lock) + +static inline void inquiry_cache_init(struct hci_dev *hdev) +{ + struct inquiry_cache *c = &hdev->inq_cache; + spin_lock_init(&c->lock); + c->list = NULL; +} + +static inline int inquiry_cache_empty(struct hci_dev *hdev) +{ + struct inquiry_cache *c = &hdev->inq_cache; + return (c->list == NULL); +} + +static inline long inquiry_cache_age(struct hci_dev *hdev) +{ + struct inquiry_cache *c = &hdev->inq_cache; + return jiffies - c->timestamp; +} + +static inline long inquiry_entry_age(struct inquiry_entry *e) +{ + return jiffies - e->timestamp; +} + +struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr); +void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data); + +/* ----- HCI Connections ----- */ +enum { + HCI_CONN_AUTH_PEND, + HCI_CONN_ENCRYPT_PEND, + HCI_CONN_RSWITCH_PEND, + HCI_CONN_MODE_CHANGE_PEND, +}; + +static inline void hci_conn_hash_init(struct hci_dev *hdev) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + INIT_LIST_HEAD(&h->list); + spin_lock_init(&h->lock); + h->acl_num = 0; + h->sco_num = 0; +} + +static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + list_add(&c->list, &h->list); + if (c->type == ACL_LINK) + h->acl_num++; + else + h->sco_num++; +} + +static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + list_del(&c->list); + if (c->type == ACL_LINK) + h->acl_num--; + else + h->sco_num--; +} + +static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev, + __u16 handle) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct list_head *p; + struct hci_conn *c; + + list_for_each(p, &h->list) { + c = list_entry(p, struct hci_conn, list); + if (c->handle == handle) + return c; + } + return NULL; +} + +static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev, + __u8 type, bdaddr_t *ba) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct list_head *p; + struct hci_conn *c; + + list_for_each(p, &h->list) { + c = list_entry(p, struct hci_conn, list); + if (c->type == type && !bacmp(&c->dst, ba)) + return c; + } + return NULL; +} + +static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev, + __u8 type, __u16 state) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct list_head *p; + struct hci_conn *c; + + list_for_each(p, &h->list) { + c = list_entry(p, struct hci_conn, list); + if (c->type == type && c->state == state) + return c; + } + return NULL; +} + +void hci_acl_connect(struct hci_conn *conn); +void hci_acl_disconn(struct hci_conn *conn, __u8 reason); +void hci_add_sco(struct hci_conn *conn, __u16 handle); +void hci_setup_sync(struct hci_conn *conn, __u16 handle); + +struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst); +int hci_conn_del(struct hci_conn *conn); +void hci_conn_hash_flush(struct hci_dev *hdev); +void hci_conn_check_pending(struct hci_dev *hdev); + +struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type); +int hci_conn_check_link_mode(struct hci_conn *conn); +int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type); +int hci_conn_change_link_key(struct hci_conn *conn); +int hci_conn_switch_role(struct hci_conn *conn, __u8 role); + +void hci_conn_enter_active_mode(struct hci_conn *conn); +void hci_conn_enter_sniff_mode(struct hci_conn *conn); + +void hci_conn_hold_device(struct hci_conn *conn); +void hci_conn_put_device(struct hci_conn *conn); + +static inline void hci_conn_hold(struct hci_conn *conn) +{ + atomic_inc(&conn->refcnt); + del_timer(&conn->disc_timer); +} + +static inline void hci_conn_put(struct hci_conn *conn) +{ + if (atomic_dec_and_test(&conn->refcnt)) { + unsigned long timeo; + if (conn->type == ACL_LINK) { + del_timer(&conn->idle_timer); + if (conn->state == BT_CONNECTED) { + timeo = msecs_to_jiffies(conn->disc_timeout); + if (!conn->out) + timeo *= 2; + } else + timeo = msecs_to_jiffies(10); + } else + timeo = msecs_to_jiffies(10); + mod_timer(&conn->disc_timer, jiffies + timeo); + } +} + +/* ----- HCI Devices ----- */ +static inline void __hci_dev_put(struct hci_dev *d) +{ + if (atomic_dec_and_test(&d->refcnt)) + d->destruct(d); +} + +static inline void hci_dev_put(struct hci_dev *d) +{ + __hci_dev_put(d); + module_put(d->owner); +} + +static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d) +{ + atomic_inc(&d->refcnt); + return d; +} + +static inline struct hci_dev *hci_dev_hold(struct hci_dev *d) +{ + if (try_module_get(d->owner)) + return __hci_dev_hold(d); + return NULL; +} + +#define hci_dev_lock(d) spin_lock(&d->lock) +#define hci_dev_unlock(d) spin_unlock(&d->lock) +#define hci_dev_lock_bh(d) spin_lock_bh(&d->lock) +#define hci_dev_unlock_bh(d) spin_unlock_bh(&d->lock) + +struct hci_dev *hci_dev_get(int index); +struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst); + +struct hci_dev *hci_alloc_dev(void); +void hci_free_dev(struct hci_dev *hdev); +int hci_register_dev(struct hci_dev *hdev); +int hci_unregister_dev(struct hci_dev *hdev); +int hci_suspend_dev(struct hci_dev *hdev); +int hci_resume_dev(struct hci_dev *hdev); +int hci_dev_open(__u16 dev); +int hci_dev_close(__u16 dev); +int hci_dev_reset(__u16 dev); +int hci_dev_reset_stat(__u16 dev); +int hci_dev_cmd(unsigned int cmd, void __user *arg); +int hci_get_dev_list(void __user *arg); +int hci_get_dev_info(void __user *arg); +int hci_get_conn_list(void __user *arg); +int hci_get_conn_info(struct hci_dev *hdev, void __user *arg); +int hci_get_auth_info(struct hci_dev *hdev, void __user *arg); +int hci_inquiry(void __user *arg); + +void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); + +int hci_recv_frame(struct sk_buff *skb); +int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count); + +int hci_register_sysfs(struct hci_dev *hdev); +void hci_unregister_sysfs(struct hci_dev *hdev); +void hci_conn_init_sysfs(struct hci_conn *conn); +void hci_conn_add_sysfs(struct hci_conn *conn); +void hci_conn_del_sysfs(struct hci_conn *conn); + +#define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev)) + +/* ----- LMP capabilities ----- */ +#define lmp_rswitch_capable(dev) ((dev)->features[0] & LMP_RSWITCH) +#define lmp_encrypt_capable(dev) ((dev)->features[0] & LMP_ENCRYPT) +#define lmp_sniff_capable(dev) ((dev)->features[0] & LMP_SNIFF) +#define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR) +#define lmp_esco_capable(dev) ((dev)->features[3] & LMP_ESCO) +#define lmp_ssp_capable(dev) ((dev)->features[6] & LMP_SIMPLE_PAIR) + +/* ----- HCI protocols ----- */ +struct hci_proto { + char *name; + unsigned int id; + unsigned long flags; + + void *priv; + + int (*connect_ind) (struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type); + int (*connect_cfm) (struct hci_conn *conn, __u8 status); + int (*disconn_ind) (struct hci_conn *conn); + int (*disconn_cfm) (struct hci_conn *conn, __u8 reason); + int (*recv_acldata) (struct hci_conn *conn, struct sk_buff *skb, __u16 flags); + int (*recv_scodata) (struct hci_conn *conn, struct sk_buff *skb); + int (*security_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt); +}; + +static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type) +{ + register struct hci_proto *hp; + int mask = 0; + + hp = hci_proto[HCI_PROTO_L2CAP]; + if (hp && hp->connect_ind) + mask |= hp->connect_ind(hdev, bdaddr, type); + + hp = hci_proto[HCI_PROTO_SCO]; + if (hp && hp->connect_ind) + mask |= hp->connect_ind(hdev, bdaddr, type); + + return mask; +} + +static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status) +{ + register struct hci_proto *hp; + + hp = hci_proto[HCI_PROTO_L2CAP]; + if (hp && hp->connect_cfm) + hp->connect_cfm(conn, status); + + hp = hci_proto[HCI_PROTO_SCO]; + if (hp && hp->connect_cfm) + hp->connect_cfm(conn, status); +} + +static inline int hci_proto_disconn_ind(struct hci_conn *conn) +{ + register struct hci_proto *hp; + int reason = 0x13; + + hp = hci_proto[HCI_PROTO_L2CAP]; + if (hp && hp->disconn_ind) + reason = hp->disconn_ind(conn); + + hp = hci_proto[HCI_PROTO_SCO]; + if (hp && hp->disconn_ind) + reason = hp->disconn_ind(conn); + + return reason; +} + +static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason) +{ + register struct hci_proto *hp; + + hp = hci_proto[HCI_PROTO_L2CAP]; + if (hp && hp->disconn_cfm) + hp->disconn_cfm(conn, reason); + + hp = hci_proto[HCI_PROTO_SCO]; + if (hp && hp->disconn_cfm) + hp->disconn_cfm(conn, reason); +} + +static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status) +{ + register struct hci_proto *hp; + __u8 encrypt; + + if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) + return; + + encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00; + + hp = hci_proto[HCI_PROTO_L2CAP]; + if (hp && hp->security_cfm) + hp->security_cfm(conn, status, encrypt); + + hp = hci_proto[HCI_PROTO_SCO]; + if (hp && hp->security_cfm) + hp->security_cfm(conn, status, encrypt); +} + +static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt) +{ + register struct hci_proto *hp; + + hp = hci_proto[HCI_PROTO_L2CAP]; + if (hp && hp->security_cfm) + hp->security_cfm(conn, status, encrypt); + + hp = hci_proto[HCI_PROTO_SCO]; + if (hp && hp->security_cfm) + hp->security_cfm(conn, status, encrypt); +} + +int hci_register_proto(struct hci_proto *hproto); +int hci_unregister_proto(struct hci_proto *hproto); + +/* ----- HCI callbacks ----- */ +struct hci_cb { + struct list_head list; + + char *name; + + void (*security_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt); + void (*key_change_cfm) (struct hci_conn *conn, __u8 status); + void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role); +}; + +static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status) +{ + struct list_head *p; + __u8 encrypt; + + hci_proto_auth_cfm(conn, status); + + if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) + return; + + encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00; + + read_lock_bh(&hci_cb_list_lock); + list_for_each(p, &hci_cb_list) { + struct hci_cb *cb = list_entry(p, struct hci_cb, list); + if (cb->security_cfm) + cb->security_cfm(conn, status, encrypt); + } + read_unlock_bh(&hci_cb_list_lock); +} + +static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt) +{ + struct list_head *p; + + if (conn->sec_level == BT_SECURITY_SDP) + conn->sec_level = BT_SECURITY_LOW; + + hci_proto_encrypt_cfm(conn, status, encrypt); + + read_lock_bh(&hci_cb_list_lock); + list_for_each(p, &hci_cb_list) { + struct hci_cb *cb = list_entry(p, struct hci_cb, list); + if (cb->security_cfm) + cb->security_cfm(conn, status, encrypt); + } + read_unlock_bh(&hci_cb_list_lock); +} + +static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status) +{ + struct list_head *p; + + read_lock_bh(&hci_cb_list_lock); + list_for_each(p, &hci_cb_list) { + struct hci_cb *cb = list_entry(p, struct hci_cb, list); + if (cb->key_change_cfm) + cb->key_change_cfm(conn, status); + } + read_unlock_bh(&hci_cb_list_lock); +} + +static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role) +{ + struct list_head *p; + + read_lock_bh(&hci_cb_list_lock); + list_for_each(p, &hci_cb_list) { + struct hci_cb *cb = list_entry(p, struct hci_cb, list); + if (cb->role_switch_cfm) + cb->role_switch_cfm(conn, status, role); + } + read_unlock_bh(&hci_cb_list_lock); +} + +int hci_register_cb(struct hci_cb *hcb); +int hci_unregister_cb(struct hci_cb *hcb); + +int hci_register_notifier(struct notifier_block *nb); +int hci_unregister_notifier(struct notifier_block *nb); + +int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param); +int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags); +int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb); + +void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode); + +void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data); + +/* ----- HCI Sockets ----- */ +void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb); + +/* HCI info for socket */ +#define hci_pi(sk) ((struct hci_pinfo *) sk) + +struct hci_pinfo { + struct bt_sock bt; + struct hci_dev *hdev; + struct hci_filter filter; + __u32 cmsg_mask; +}; + +/* HCI security filter */ +#define HCI_SFLT_MAX_OGF 5 + +struct hci_sec_filter { + __u32 type_mask; + __u32 event_mask[2]; + __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4]; +}; + +/* ----- HCI requests ----- */ +#define HCI_REQ_DONE 0 +#define HCI_REQ_PEND 1 +#define HCI_REQ_CANCELED 2 + +#define hci_req_lock(d) mutex_lock(&d->req_lock) +#define hci_req_unlock(d) mutex_unlock(&d->req_lock) + +void hci_req_complete(struct hci_dev *hdev, int result); + +#endif /* __HCI_CORE_H */ diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h new file mode 100644 index 0000000..17a689f --- /dev/null +++ b/include/net/bluetooth/l2cap.h @@ -0,0 +1,405 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + Copyright (C) 2000-2001 Qualcomm Incorporated + + Written 2000,2001 by Maxim Krasnyansky + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#ifndef __L2CAP_H +#define __L2CAP_H + +/* L2CAP defaults */ +#define L2CAP_DEFAULT_MTU 672 +#define L2CAP_DEFAULT_MIN_MTU 48 +#define L2CAP_DEFAULT_FLUSH_TO 0xffff +#define L2CAP_DEFAULT_TX_WINDOW 63 +#define L2CAP_DEFAULT_NUM_TO_ACK (L2CAP_DEFAULT_TX_WINDOW/5) +#define L2CAP_DEFAULT_MAX_TX 3 +#define L2CAP_DEFAULT_RETRANS_TO 1000 /* 1 second */ +#define L2CAP_DEFAULT_MONITOR_TO 12000 /* 12 seconds */ +#define L2CAP_DEFAULT_MAX_PDU_SIZE 672 + +#define L2CAP_CONN_TIMEOUT (40000) /* 40 seconds */ +#define L2CAP_INFO_TIMEOUT (4000) /* 4 seconds */ + +/* L2CAP socket address */ +struct sockaddr_l2 { + sa_family_t l2_family; + __le16 l2_psm; + bdaddr_t l2_bdaddr; + __le16 l2_cid; +}; + +/* L2CAP socket options */ +#define L2CAP_OPTIONS 0x01 +struct l2cap_options { + __u16 omtu; + __u16 imtu; + __u16 flush_to; + __u8 mode; + __u8 fcs; +}; + +#define L2CAP_CONNINFO 0x02 +struct l2cap_conninfo { + __u16 hci_handle; + __u8 dev_class[3]; +}; + +#define L2CAP_LM 0x03 +#define L2CAP_LM_MASTER 0x0001 +#define L2CAP_LM_AUTH 0x0002 +#define L2CAP_LM_ENCRYPT 0x0004 +#define L2CAP_LM_TRUSTED 0x0008 +#define L2CAP_LM_RELIABLE 0x0010 +#define L2CAP_LM_SECURE 0x0020 + +/* L2CAP command codes */ +#define L2CAP_COMMAND_REJ 0x01 +#define L2CAP_CONN_REQ 0x02 +#define L2CAP_CONN_RSP 0x03 +#define L2CAP_CONF_REQ 0x04 +#define L2CAP_CONF_RSP 0x05 +#define L2CAP_DISCONN_REQ 0x06 +#define L2CAP_DISCONN_RSP 0x07 +#define L2CAP_ECHO_REQ 0x08 +#define L2CAP_ECHO_RSP 0x09 +#define L2CAP_INFO_REQ 0x0a +#define L2CAP_INFO_RSP 0x0b + +/* L2CAP feature mask */ +#define L2CAP_FEAT_FLOWCTL 0x00000001 +#define L2CAP_FEAT_RETRANS 0x00000002 +#define L2CAP_FEAT_ERTM 0x00000008 +#define L2CAP_FEAT_STREAMING 0x00000010 +#define L2CAP_FEAT_FCS 0x00000020 +#define L2CAP_FEAT_FIXED_CHAN 0x00000080 + +/* L2CAP checksum option */ +#define L2CAP_FCS_NONE 0x00 +#define L2CAP_FCS_CRC16 0x01 + +/* L2CAP Control Field bit masks */ +#define L2CAP_CTRL_SAR 0xC000 +#define L2CAP_CTRL_REQSEQ 0x3F00 +#define L2CAP_CTRL_TXSEQ 0x007E +#define L2CAP_CTRL_RETRANS 0x0080 +#define L2CAP_CTRL_FINAL 0x0080 +#define L2CAP_CTRL_POLL 0x0010 +#define L2CAP_CTRL_SUPERVISE 0x000C +#define L2CAP_CTRL_FRAME_TYPE 0x0001 /* I- or S-Frame */ + +#define L2CAP_CTRL_TXSEQ_SHIFT 1 +#define L2CAP_CTRL_REQSEQ_SHIFT 8 +#define L2CAP_CTRL_SAR_SHIFT 14 + +/* L2CAP Supervisory Function */ +#define L2CAP_SUPER_RCV_READY 0x0000 +#define L2CAP_SUPER_REJECT 0x0004 +#define L2CAP_SUPER_RCV_NOT_READY 0x0008 +#define L2CAP_SUPER_SELECT_REJECT 0x000C + +/* L2CAP Segmentation and Reassembly */ +#define L2CAP_SDU_UNSEGMENTED 0x0000 +#define L2CAP_SDU_START 0x4000 +#define L2CAP_SDU_END 0x8000 +#define L2CAP_SDU_CONTINUE 0xC000 + +/* L2CAP structures */ +struct l2cap_hdr { + __le16 len; + __le16 cid; +} __attribute__ ((packed)); +#define L2CAP_HDR_SIZE 4 + +struct l2cap_cmd_hdr { + __u8 code; + __u8 ident; + __le16 len; +} __attribute__ ((packed)); +#define L2CAP_CMD_HDR_SIZE 4 + +struct l2cap_cmd_rej { + __le16 reason; +} __attribute__ ((packed)); + +struct l2cap_conn_req { + __le16 psm; + __le16 scid; +} __attribute__ ((packed)); + +struct l2cap_conn_rsp { + __le16 dcid; + __le16 scid; + __le16 result; + __le16 status; +} __attribute__ ((packed)); + +/* channel indentifier */ +#define L2CAP_CID_SIGNALING 0x0001 +#define L2CAP_CID_CONN_LESS 0x0002 +#define L2CAP_CID_DYN_START 0x0040 +#define L2CAP_CID_DYN_END 0xffff + +/* connect result */ +#define L2CAP_CR_SUCCESS 0x0000 +#define L2CAP_CR_PEND 0x0001 +#define L2CAP_CR_BAD_PSM 0x0002 +#define L2CAP_CR_SEC_BLOCK 0x0003 +#define L2CAP_CR_NO_MEM 0x0004 + +/* connect status */ +#define L2CAP_CS_NO_INFO 0x0000 +#define L2CAP_CS_AUTHEN_PEND 0x0001 +#define L2CAP_CS_AUTHOR_PEND 0x0002 + +struct l2cap_conf_req { + __le16 dcid; + __le16 flags; + __u8 data[0]; +} __attribute__ ((packed)); + +struct l2cap_conf_rsp { + __le16 scid; + __le16 flags; + __le16 result; + __u8 data[0]; +} __attribute__ ((packed)); + +#define L2CAP_CONF_SUCCESS 0x0000 +#define L2CAP_CONF_UNACCEPT 0x0001 +#define L2CAP_CONF_REJECT 0x0002 +#define L2CAP_CONF_UNKNOWN 0x0003 + +struct l2cap_conf_opt { + __u8 type; + __u8 len; + __u8 val[0]; +} __attribute__ ((packed)); +#define L2CAP_CONF_OPT_SIZE 2 + +#define L2CAP_CONF_HINT 0x80 +#define L2CAP_CONF_MASK 0x7f + +#define L2CAP_CONF_MTU 0x01 +#define L2CAP_CONF_FLUSH_TO 0x02 +#define L2CAP_CONF_QOS 0x03 +#define L2CAP_CONF_RFC 0x04 +#define L2CAP_CONF_FCS 0x05 + +#define L2CAP_CONF_MAX_SIZE 22 + +struct l2cap_conf_rfc { + __u8 mode; + __u8 txwin_size; + __u8 max_transmit; + __le16 retrans_timeout; + __le16 monitor_timeout; + __le16 max_pdu_size; +} __attribute__ ((packed)); + +#define L2CAP_MODE_BASIC 0x00 +#define L2CAP_MODE_RETRANS 0x01 +#define L2CAP_MODE_FLOWCTL 0x02 +#define L2CAP_MODE_ERTM 0x03 +#define L2CAP_MODE_STREAMING 0x04 + +struct l2cap_disconn_req { + __le16 dcid; + __le16 scid; +} __attribute__ ((packed)); + +struct l2cap_disconn_rsp { + __le16 dcid; + __le16 scid; +} __attribute__ ((packed)); + +struct l2cap_info_req { + __le16 type; +} __attribute__ ((packed)); + +struct l2cap_info_rsp { + __le16 type; + __le16 result; + __u8 data[0]; +} __attribute__ ((packed)); + +/* info type */ +#define L2CAP_IT_CL_MTU 0x0001 +#define L2CAP_IT_FEAT_MASK 0x0002 +#define L2CAP_IT_FIXED_CHAN 0x0003 + +/* info result */ +#define L2CAP_IR_SUCCESS 0x0000 +#define L2CAP_IR_NOTSUPP 0x0001 + +/* ----- L2CAP connections ----- */ +struct l2cap_chan_list { + struct sock *head; + rwlock_t lock; + long num; +}; + +struct l2cap_conn { + struct hci_conn *hcon; + + bdaddr_t *dst; + bdaddr_t *src; + + unsigned int mtu; + + __u32 feat_mask; + + __u8 info_state; + __u8 info_ident; + + struct timer_list info_timer; + + spinlock_t lock; + + struct sk_buff *rx_skb; + __u32 rx_len; + __u8 rx_ident; + __u8 tx_ident; + + __u8 disc_reason; + + struct l2cap_chan_list chan_list; +}; + +#define L2CAP_INFO_CL_MTU_REQ_SENT 0x01 +#define L2CAP_INFO_FEAT_MASK_REQ_SENT 0x04 +#define L2CAP_INFO_FEAT_MASK_REQ_DONE 0x08 + +/* ----- L2CAP channel and socket info ----- */ +#define l2cap_pi(sk) ((struct l2cap_pinfo *) sk) +#define TX_QUEUE(sk) (&l2cap_pi(sk)->tx_queue) +#define SREJ_QUEUE(sk) (&l2cap_pi(sk)->srej_queue) +#define SREJ_LIST(sk) (&l2cap_pi(sk)->srej_l.list) + +struct srej_list { + __u8 tx_seq; + struct list_head list; +}; + +struct l2cap_pinfo { + struct bt_sock bt; + __le16 psm; + __u16 dcid; + __u16 scid; + + __u16 imtu; + __u16 omtu; + __u16 flush_to; + __u8 mode; + __u8 num_conf_req; + __u8 num_conf_rsp; + + __u8 fcs; + __u8 sec_level; + __u8 role_switch; + __u8 force_reliable; + + __u8 conf_req[64]; + __u8 conf_len; + __u8 conf_state; + __u8 conn_state; + + __u8 next_tx_seq; + __u8 expected_ack_seq; + __u8 expected_tx_seq; + __u8 buffer_seq; + __u8 buffer_seq_srej; + __u8 srej_save_reqseq; + __u8 unacked_frames; + __u8 retry_count; + __u8 num_to_ack; + __u16 sdu_len; + __u16 partial_sdu_len; + struct sk_buff *sdu; + + __u8 ident; + + __u8 remote_tx_win; + __u8 remote_max_tx; + __u16 retrans_timeout; + __u16 monitor_timeout; + __u16 max_pdu_size; + + __le16 sport; + + struct timer_list retrans_timer; + struct timer_list monitor_timer; + struct sk_buff_head tx_queue; + struct sk_buff_head srej_queue; + struct srej_list srej_l; + struct l2cap_conn *conn; + struct sock *next_c; + struct sock *prev_c; +}; + +#define L2CAP_CONF_REQ_SENT 0x01 +#define L2CAP_CONF_INPUT_DONE 0x02 +#define L2CAP_CONF_OUTPUT_DONE 0x04 +#define L2CAP_CONF_MTU_DONE 0x08 +#define L2CAP_CONF_MODE_DONE 0x10 +#define L2CAP_CONF_CONNECT_PEND 0x20 +#define L2CAP_CONF_NO_FCS_RECV 0x40 +#define L2CAP_CONF_STATE2_DEVICE 0x80 + +#define L2CAP_CONF_MAX_CONF_REQ 2 +#define L2CAP_CONF_MAX_CONF_RSP 2 + +#define L2CAP_CONN_SAR_SDU 0x01 +#define L2CAP_CONN_SREJ_SENT 0x02 +#define L2CAP_CONN_WAIT_F 0x04 +#define L2CAP_CONN_SREJ_ACT 0x08 +#define L2CAP_CONN_SEND_PBIT 0x10 +#define L2CAP_CONN_REMOTE_BUSY 0x20 +#define L2CAP_CONN_LOCAL_BUSY 0x40 +#define L2CAP_CONN_REJ_ACT 0x80 + +#define __mod_retrans_timer() mod_timer(&l2cap_pi(sk)->retrans_timer, \ + jiffies + msecs_to_jiffies(L2CAP_DEFAULT_RETRANS_TO)); +#define __mod_monitor_timer() mod_timer(&l2cap_pi(sk)->monitor_timer, \ + jiffies + msecs_to_jiffies(L2CAP_DEFAULT_MONITOR_TO)); + +static inline int l2cap_tx_window_full(struct sock *sk) +{ + struct l2cap_pinfo *pi = l2cap_pi(sk); + int sub; + + sub = (pi->next_tx_seq - pi->expected_ack_seq) % 64; + + if (sub < 0) + sub += 64; + + return (sub == pi->remote_tx_win); +} + +#define __get_txseq(ctrl) ((ctrl) & L2CAP_CTRL_TXSEQ) >> 1 +#define __get_reqseq(ctrl) ((ctrl) & L2CAP_CTRL_REQSEQ) >> 8 +#define __is_iframe(ctrl) !((ctrl) & L2CAP_CTRL_FRAME_TYPE) +#define __is_sframe(ctrl) (ctrl) & L2CAP_CTRL_FRAME_TYPE +#define __is_sar_start(ctrl) ((ctrl) & L2CAP_CTRL_SAR) == L2CAP_SDU_START + +void l2cap_load(void); + +#endif /* __L2CAP_H */ diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h new file mode 100644 index 0000000..921d7b3 --- /dev/null +++ b/include/net/bluetooth/rfcomm.h @@ -0,0 +1,373 @@ +/* + RFCOMM implementation for Linux Bluetooth stack (BlueZ). + Copyright (C) 2002 Maxim Krasnyansky + Copyright (C) 2002 Marcel Holtmann + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#ifndef __RFCOMM_H +#define __RFCOMM_H + +#define RFCOMM_PSM 3 + +#define RFCOMM_CONN_TIMEOUT (HZ * 30) +#define RFCOMM_DISC_TIMEOUT (HZ * 20) +#define RFCOMM_AUTH_TIMEOUT (HZ * 25) +#define RFCOMM_IDLE_TIMEOUT (HZ * 2) + +#define RFCOMM_DEFAULT_MTU 127 +#define RFCOMM_DEFAULT_CREDITS 7 + +#define RFCOMM_MAX_L2CAP_MTU 1013 +#define RFCOMM_MAX_CREDITS 40 + +#define RFCOMM_SKB_HEAD_RESERVE 8 +#define RFCOMM_SKB_TAIL_RESERVE 2 +#define RFCOMM_SKB_RESERVE (RFCOMM_SKB_HEAD_RESERVE + RFCOMM_SKB_TAIL_RESERVE) + +#define RFCOMM_SABM 0x2f +#define RFCOMM_DISC 0x43 +#define RFCOMM_UA 0x63 +#define RFCOMM_DM 0x0f +#define RFCOMM_UIH 0xef + +#define RFCOMM_TEST 0x08 +#define RFCOMM_FCON 0x28 +#define RFCOMM_FCOFF 0x18 +#define RFCOMM_MSC 0x38 +#define RFCOMM_RPN 0x24 +#define RFCOMM_RLS 0x14 +#define RFCOMM_PN 0x20 +#define RFCOMM_NSC 0x04 + +#define RFCOMM_V24_FC 0x02 +#define RFCOMM_V24_RTC 0x04 +#define RFCOMM_V24_RTR 0x08 +#define RFCOMM_V24_IC 0x40 +#define RFCOMM_V24_DV 0x80 + +#define RFCOMM_RPN_BR_2400 0x0 +#define RFCOMM_RPN_BR_4800 0x1 +#define RFCOMM_RPN_BR_7200 0x2 +#define RFCOMM_RPN_BR_9600 0x3 +#define RFCOMM_RPN_BR_19200 0x4 +#define RFCOMM_RPN_BR_38400 0x5 +#define RFCOMM_RPN_BR_57600 0x6 +#define RFCOMM_RPN_BR_115200 0x7 +#define RFCOMM_RPN_BR_230400 0x8 + +#define RFCOMM_RPN_DATA_5 0x0 +#define RFCOMM_RPN_DATA_6 0x1 +#define RFCOMM_RPN_DATA_7 0x2 +#define RFCOMM_RPN_DATA_8 0x3 + +#define RFCOMM_RPN_STOP_1 0 +#define RFCOMM_RPN_STOP_15 1 + +#define RFCOMM_RPN_PARITY_NONE 0x0 +#define RFCOMM_RPN_PARITY_ODD 0x1 +#define RFCOMM_RPN_PARITY_EVEN 0x3 +#define RFCOMM_RPN_PARITY_MARK 0x5 +#define RFCOMM_RPN_PARITY_SPACE 0x7 + +#define RFCOMM_RPN_FLOW_NONE 0x00 + +#define RFCOMM_RPN_XON_CHAR 0x11 +#define RFCOMM_RPN_XOFF_CHAR 0x13 + +#define RFCOMM_RPN_PM_BITRATE 0x0001 +#define RFCOMM_RPN_PM_DATA 0x0002 +#define RFCOMM_RPN_PM_STOP 0x0004 +#define RFCOMM_RPN_PM_PARITY 0x0008 +#define RFCOMM_RPN_PM_PARITY_TYPE 0x0010 +#define RFCOMM_RPN_PM_XON 0x0020 +#define RFCOMM_RPN_PM_XOFF 0x0040 +#define RFCOMM_RPN_PM_FLOW 0x3F00 + +#define RFCOMM_RPN_PM_ALL 0x3F7F + +struct rfcomm_hdr { + u8 addr; + u8 ctrl; + u8 len; // Actual size can be 2 bytes +} __attribute__ ((packed)); + +struct rfcomm_cmd { + u8 addr; + u8 ctrl; + u8 len; + u8 fcs; +} __attribute__ ((packed)); + +struct rfcomm_mcc { + u8 type; + u8 len; +} __attribute__ ((packed)); + +struct rfcomm_pn { + u8 dlci; + u8 flow_ctrl; + u8 priority; + u8 ack_timer; + __le16 mtu; + u8 max_retrans; + u8 credits; +} __attribute__ ((packed)); + +struct rfcomm_rpn { + u8 dlci; + u8 bit_rate; + u8 line_settings; + u8 flow_ctrl; + u8 xon_char; + u8 xoff_char; + __le16 param_mask; +} __attribute__ ((packed)); + +struct rfcomm_rls { + u8 dlci; + u8 status; +} __attribute__ ((packed)); + +struct rfcomm_msc { + u8 dlci; + u8 v24_sig; +} __attribute__ ((packed)); + +/* ---- Core structures, flags etc ---- */ + +struct rfcomm_session { + struct list_head list; + struct socket *sock; + struct timer_list timer; + unsigned long state; + unsigned long flags; + atomic_t refcnt; + int initiator; + + /* Default DLC parameters */ + int cfc; + uint mtu; + + struct list_head dlcs; +}; + +struct rfcomm_dlc { + struct list_head list; + struct rfcomm_session *session; + struct sk_buff_head tx_queue; + struct timer_list timer; + + spinlock_t lock; + unsigned long state; + unsigned long flags; + atomic_t refcnt; + u8 dlci; + u8 addr; + u8 priority; + u8 v24_sig; + u8 remote_v24_sig; + u8 mscex; + u8 out; + u8 sec_level; + u8 role_switch; + u32 defer_setup; + + uint mtu; + uint cfc; + uint rx_credits; + uint tx_credits; + + void *owner; + + void (*data_ready)(struct rfcomm_dlc *d, struct sk_buff *skb); + void (*state_change)(struct rfcomm_dlc *d, int err); + void (*modem_status)(struct rfcomm_dlc *d, u8 v24_sig); +}; + +/* DLC and session flags */ +#define RFCOMM_RX_THROTTLED 0 +#define RFCOMM_TX_THROTTLED 1 +#define RFCOMM_TIMED_OUT 2 +#define RFCOMM_MSC_PENDING 3 +#define RFCOMM_SEC_PENDING 4 +#define RFCOMM_AUTH_PENDING 5 +#define RFCOMM_AUTH_ACCEPT 6 +#define RFCOMM_AUTH_REJECT 7 +#define RFCOMM_DEFER_SETUP 8 + +/* Scheduling flags and events */ +#define RFCOMM_SCHED_STATE 0 +#define RFCOMM_SCHED_RX 1 +#define RFCOMM_SCHED_TX 2 +#define RFCOMM_SCHED_TIMEO 3 +#define RFCOMM_SCHED_AUTH 4 +#define RFCOMM_SCHED_WAKEUP 31 + +/* MSC exchange flags */ +#define RFCOMM_MSCEX_TX 1 +#define RFCOMM_MSCEX_RX 2 +#define RFCOMM_MSCEX_OK (RFCOMM_MSCEX_TX + RFCOMM_MSCEX_RX) + +/* CFC states */ +#define RFCOMM_CFC_UNKNOWN -1 +#define RFCOMM_CFC_DISABLED 0 +#define RFCOMM_CFC_ENABLED RFCOMM_MAX_CREDITS + +/* ---- RFCOMM SEND RPN ---- */ +int rfcomm_send_rpn(struct rfcomm_session *s, int cr, u8 dlci, + u8 bit_rate, u8 data_bits, u8 stop_bits, + u8 parity, u8 flow_ctrl_settings, + u8 xon_char, u8 xoff_char, u16 param_mask); + +/* ---- RFCOMM DLCs (channels) ---- */ +struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio); +void rfcomm_dlc_free(struct rfcomm_dlc *d); +int rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, u8 channel); +int rfcomm_dlc_close(struct rfcomm_dlc *d, int reason); +int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb); +int rfcomm_dlc_set_modem_status(struct rfcomm_dlc *d, u8 v24_sig); +int rfcomm_dlc_get_modem_status(struct rfcomm_dlc *d, u8 *v24_sig); +void rfcomm_dlc_accept(struct rfcomm_dlc *d); + +#define rfcomm_dlc_lock(d) spin_lock(&d->lock) +#define rfcomm_dlc_unlock(d) spin_unlock(&d->lock) + +static inline void rfcomm_dlc_hold(struct rfcomm_dlc *d) +{ + atomic_inc(&d->refcnt); +} + +static inline void rfcomm_dlc_put(struct rfcomm_dlc *d) +{ + if (atomic_dec_and_test(&d->refcnt)) + rfcomm_dlc_free(d); +} + +extern void __rfcomm_dlc_throttle(struct rfcomm_dlc *d); +extern void __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d); + +static inline void rfcomm_dlc_throttle(struct rfcomm_dlc *d) +{ + if (!test_and_set_bit(RFCOMM_RX_THROTTLED, &d->flags)) + __rfcomm_dlc_throttle(d); +} + +static inline void rfcomm_dlc_unthrottle(struct rfcomm_dlc *d) +{ + if (test_and_clear_bit(RFCOMM_RX_THROTTLED, &d->flags)) + __rfcomm_dlc_unthrottle(d); +} + +/* ---- RFCOMM sessions ---- */ +void rfcomm_session_getaddr(struct rfcomm_session *s, bdaddr_t *src, bdaddr_t *dst); + +static inline void rfcomm_session_hold(struct rfcomm_session *s) +{ + atomic_inc(&s->refcnt); +} + +/* ---- RFCOMM sockets ---- */ +struct sockaddr_rc { + sa_family_t rc_family; + bdaddr_t rc_bdaddr; + u8 rc_channel; +}; + +#define RFCOMM_CONNINFO 0x02 +struct rfcomm_conninfo { + __u16 hci_handle; + __u8 dev_class[3]; +}; + +#define RFCOMM_LM 0x03 +#define RFCOMM_LM_MASTER 0x0001 +#define RFCOMM_LM_AUTH 0x0002 +#define RFCOMM_LM_ENCRYPT 0x0004 +#define RFCOMM_LM_TRUSTED 0x0008 +#define RFCOMM_LM_RELIABLE 0x0010 +#define RFCOMM_LM_SECURE 0x0020 + +#define rfcomm_pi(sk) ((struct rfcomm_pinfo *) sk) + +struct rfcomm_pinfo { + struct bt_sock bt; + struct rfcomm_dlc *dlc; + u8 channel; + u8 sec_level; + u8 role_switch; +}; + +int rfcomm_init_sockets(void); +void rfcomm_cleanup_sockets(void); + +int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc **d); + +/* ---- RFCOMM TTY ---- */ +#define RFCOMM_MAX_DEV 256 + +#define RFCOMMCREATEDEV _IOW('R', 200, int) +#define RFCOMMRELEASEDEV _IOW('R', 201, int) +#define RFCOMMGETDEVLIST _IOR('R', 210, int) +#define RFCOMMGETDEVINFO _IOR('R', 211, int) +#define RFCOMMSTEALDLC _IOW('R', 220, int) + +#define RFCOMM_REUSE_DLC 0 +#define RFCOMM_RELEASE_ONHUP 1 +#define RFCOMM_HANGUP_NOW 2 +#define RFCOMM_TTY_ATTACHED 3 +#define RFCOMM_TTY_RELEASED 4 + +struct rfcomm_dev_req { + s16 dev_id; + u32 flags; + bdaddr_t src; + bdaddr_t dst; + u8 channel; +}; + +struct rfcomm_dev_info { + s16 id; + u32 flags; + u16 state; + bdaddr_t src; + bdaddr_t dst; + u8 channel; +}; + +struct rfcomm_dev_list_req { + u16 dev_num; + struct rfcomm_dev_info dev_info[0]; +}; + +int rfcomm_dev_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); + +#ifdef CONFIG_BT_RFCOMM_TTY +int rfcomm_init_ttys(void); +void rfcomm_cleanup_ttys(void); +#else +static inline int rfcomm_init_ttys(void) +{ + return 0; +} +static inline void rfcomm_cleanup_ttys(void) +{ +} +#endif +#endif /* __RFCOMM_H */ diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h new file mode 100644 index 0000000..0be73ca --- /dev/null +++ b/include/net/cfg80211.h @@ -0,0 +1,2341 @@ +#ifndef __NET_CFG80211_H +#define __NET_CFG80211_H +/* + * 802.11 device and configuration interface + * + * Copyright 2006-2010 Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* remove once we remove the wext stuff */ +#include +#include +#include + + +/* + * wireless hardware capability structures + */ + +/** + * enum ieee80211_band - supported frequency bands + * + * The bands are assigned this way because the supported + * bitrates differ in these bands. + * + * @IEEE80211_BAND_2GHZ: 2.4GHz ISM band + * @IEEE80211_BAND_5GHZ: around 5GHz band (4.9-5.7) + */ +enum ieee80211_band { + IEEE80211_BAND_2GHZ = NL80211_BAND_2GHZ, + IEEE80211_BAND_5GHZ = NL80211_BAND_5GHZ, + + /* keep last */ + IEEE80211_NUM_BANDS +}; + +/** + * enum ieee80211_channel_flags - channel flags + * + * Channel flags set by the regulatory control code. + * + * @IEEE80211_CHAN_DISABLED: This channel is disabled. + * @IEEE80211_CHAN_PASSIVE_SCAN: Only passive scanning is permitted + * on this channel. + * @IEEE80211_CHAN_NO_IBSS: IBSS is not allowed on this channel. + * @IEEE80211_CHAN_RADAR: Radar detection is required on this channel. + * @IEEE80211_CHAN_NO_HT40PLUS: extension channel above this channel + * is not permitted. + * @IEEE80211_CHAN_NO_HT40MINUS: extension channel below this channel + * is not permitted. + */ +enum ieee80211_channel_flags { + IEEE80211_CHAN_DISABLED = 1<<0, + IEEE80211_CHAN_PASSIVE_SCAN = 1<<1, + IEEE80211_CHAN_NO_IBSS = 1<<2, + IEEE80211_CHAN_RADAR = 1<<3, + IEEE80211_CHAN_NO_HT40PLUS = 1<<4, + IEEE80211_CHAN_NO_HT40MINUS = 1<<5, +}; + +#define IEEE80211_CHAN_NO_HT40 \ + (IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS) + +/** + * struct ieee80211_channel - channel definition + * + * This structure describes a single channel for use + * with cfg80211. + * + * @center_freq: center frequency in MHz + * @hw_value: hardware-specific value for the channel + * @flags: channel flags from &enum ieee80211_channel_flags. + * @orig_flags: channel flags at registration time, used by regulatory + * code to support devices with additional restrictions + * @band: band this channel belongs to. + * @max_antenna_gain: maximum antenna gain in dBi + * @max_power: maximum transmission power (in dBm) + * @beacon_found: helper to regulatory code to indicate when a beacon + * has been found on this channel. Use regulatory_hint_found_beacon() + * to enable this, this is is useful only on 5 GHz band. + * @orig_mag: internal use + * @orig_mpwr: internal use + */ +struct ieee80211_channel { + enum ieee80211_band band; + u16 center_freq; + u16 hw_value; + u32 flags; + int max_antenna_gain; + int max_power; + bool beacon_found; + u32 orig_flags; + int orig_mag, orig_mpwr; +}; + +/** + * enum ieee80211_rate_flags - rate flags + * + * Hardware/specification flags for rates. These are structured + * in a way that allows using the same bitrate structure for + * different bands/PHY modes. + * + * @IEEE80211_RATE_SHORT_PREAMBLE: Hardware can send with short + * preamble on this bitrate; only relevant in 2.4GHz band and + * with CCK rates. + * @IEEE80211_RATE_MANDATORY_A: This bitrate is a mandatory rate + * when used with 802.11a (on the 5 GHz band); filled by the + * core code when registering the wiphy. + * @IEEE80211_RATE_MANDATORY_B: This bitrate is a mandatory rate + * when used with 802.11b (on the 2.4 GHz band); filled by the + * core code when registering the wiphy. + * @IEEE80211_RATE_MANDATORY_G: This bitrate is a mandatory rate + * when used with 802.11g (on the 2.4 GHz band); filled by the + * core code when registering the wiphy. + * @IEEE80211_RATE_ERP_G: This is an ERP rate in 802.11g mode. + */ +enum ieee80211_rate_flags { + IEEE80211_RATE_SHORT_PREAMBLE = 1<<0, + IEEE80211_RATE_MANDATORY_A = 1<<1, + IEEE80211_RATE_MANDATORY_B = 1<<2, + IEEE80211_RATE_MANDATORY_G = 1<<3, + IEEE80211_RATE_ERP_G = 1<<4, +}; + +/** + * struct ieee80211_rate - bitrate definition + * + * This structure describes a bitrate that an 802.11 PHY can + * operate with. The two values @hw_value and @hw_value_short + * are only for driver use when pointers to this structure are + * passed around. + * + * @flags: rate-specific flags + * @bitrate: bitrate in units of 100 Kbps + * @hw_value: driver/hardware value for this rate + * @hw_value_short: driver/hardware value for this rate when + * short preamble is used + */ +struct ieee80211_rate { + u32 flags; + u16 bitrate; + u16 hw_value, hw_value_short; +}; + +/** + * struct ieee80211_sta_ht_cap - STA's HT capabilities + * + * This structure describes most essential parameters needed + * to describe 802.11n HT capabilities for an STA. + * + * @ht_supported: is HT supported by the STA + * @cap: HT capabilities map as described in 802.11n spec + * @ampdu_factor: Maximum A-MPDU length factor + * @ampdu_density: Minimum A-MPDU spacing + * @mcs: Supported MCS rates + */ +struct ieee80211_sta_ht_cap { + u16 cap; /* use IEEE80211_HT_CAP_ */ + bool ht_supported; + u8 ampdu_factor; + u8 ampdu_density; + struct ieee80211_mcs_info mcs; +}; + +/** + * struct ieee80211_supported_band - frequency band definition + * + * This structure describes a frequency band a wiphy + * is able to operate in. + * + * @channels: Array of channels the hardware can operate in + * in this band. + * @band: the band this structure represents + * @n_channels: Number of channels in @channels + * @bitrates: Array of bitrates the hardware can operate with + * in this band. Must be sorted to give a valid "supported + * rates" IE, i.e. CCK rates first, then OFDM. + * @n_bitrates: Number of bitrates in @bitrates + */ +struct ieee80211_supported_band { + struct ieee80211_channel *channels; + struct ieee80211_rate *bitrates; + enum ieee80211_band band; + int n_channels; + int n_bitrates; + struct ieee80211_sta_ht_cap ht_cap; +}; + +/* + * Wireless hardware/device configuration structures and methods + */ + +/** + * struct vif_params - describes virtual interface parameters + * @mesh_id: mesh ID to use + * @mesh_id_len: length of the mesh ID + * @use_4addr: use 4-address frames + */ +struct vif_params { + u8 *mesh_id; + int mesh_id_len; + int use_4addr; +}; + +/** + * struct key_params - key information + * + * Information about a key + * + * @key: key material + * @key_len: length of key material + * @cipher: cipher suite selector + * @seq: sequence counter (IV/PN) for TKIP and CCMP keys, only used + * with the get_key() callback, must be in little endian, + * length given by @seq_len. + */ +struct key_params { + u8 *key; + u8 *seq; + int key_len; + int seq_len; + u32 cipher; +}; + +/** + * enum survey_info_flags - survey information flags + * + * Used by the driver to indicate which info in &struct survey_info + * it has filled in during the get_survey(). + */ +enum survey_info_flags { + SURVEY_INFO_NOISE_DBM = 1<<0, +}; + +/** + * struct survey_info - channel survey response + * + * Used by dump_survey() to report back per-channel survey information. + * + * @channel: the channel this survey record reports, mandatory + * @filled: bitflag of flags from &enum survey_info_flags + * @noise: channel noise in dBm. This and all following fields are + * optional + * + * This structure can later be expanded with things like + * channel duty cycle etc. + */ +struct survey_info { + struct ieee80211_channel *channel; + u32 filled; + s8 noise; +}; + +/** + * struct beacon_parameters - beacon parameters + * + * Used to configure the beacon for an interface. + * + * @head: head portion of beacon (before TIM IE) + * or %NULL if not changed + * @tail: tail portion of beacon (after TIM IE) + * or %NULL if not changed + * @interval: beacon interval or zero if not changed + * @dtim_period: DTIM period or zero if not changed + * @head_len: length of @head + * @tail_len: length of @tail + */ +struct beacon_parameters { + u8 *head, *tail; + int interval, dtim_period; + int head_len, tail_len; +}; + +/** + * enum plink_action - actions to perform in mesh peers + * + * @PLINK_ACTION_INVALID: action 0 is reserved + * @PLINK_ACTION_OPEN: start mesh peer link establishment + * @PLINK_ACTION_BLOCL: block traffic from this mesh peer + */ +enum plink_actions { + PLINK_ACTION_INVALID, + PLINK_ACTION_OPEN, + PLINK_ACTION_BLOCK, +}; + +/** + * struct station_parameters - station parameters + * + * Used to change and create a new station. + * + * @vlan: vlan interface station should belong to + * @supported_rates: supported rates in IEEE 802.11 format + * (or NULL for no change) + * @supported_rates_len: number of supported rates + * @sta_flags_mask: station flags that changed + * (bitmask of BIT(NL80211_STA_FLAG_...)) + * @sta_flags_set: station flags values + * (bitmask of BIT(NL80211_STA_FLAG_...)) + * @listen_interval: listen interval or -1 for no change + * @aid: AID or zero for no change + */ +struct station_parameters { + u8 *supported_rates; + struct net_device *vlan; + u32 sta_flags_mask, sta_flags_set; + int listen_interval; + u16 aid; + u8 supported_rates_len; + u8 plink_action; + struct ieee80211_ht_cap *ht_capa; +}; + +/** + * enum station_info_flags - station information flags + * + * Used by the driver to indicate which info in &struct station_info + * it has filled in during get_station() or dump_station(). + * + * @STATION_INFO_INACTIVE_TIME: @inactive_time filled + * @STATION_INFO_RX_BYTES: @rx_bytes filled + * @STATION_INFO_TX_BYTES: @tx_bytes filled + * @STATION_INFO_LLID: @llid filled + * @STATION_INFO_PLID: @plid filled + * @STATION_INFO_PLINK_STATE: @plink_state filled + * @STATION_INFO_SIGNAL: @signal filled + * @STATION_INFO_TX_BITRATE: @tx_bitrate fields are filled + * (tx_bitrate, tx_bitrate_flags and tx_bitrate_mcs) + * @STATION_INFO_RX_PACKETS: @rx_packets filled + * @STATION_INFO_TX_PACKETS: @tx_packets filled + */ +enum station_info_flags { + STATION_INFO_INACTIVE_TIME = 1<<0, + STATION_INFO_RX_BYTES = 1<<1, + STATION_INFO_TX_BYTES = 1<<2, + STATION_INFO_LLID = 1<<3, + STATION_INFO_PLID = 1<<4, + STATION_INFO_PLINK_STATE = 1<<5, + STATION_INFO_SIGNAL = 1<<6, + STATION_INFO_TX_BITRATE = 1<<7, + STATION_INFO_RX_PACKETS = 1<<8, + STATION_INFO_TX_PACKETS = 1<<9, +}; + +/** + * enum station_info_rate_flags - bitrate info flags + * + * Used by the driver to indicate the specific rate transmission + * type for 802.11n transmissions. + * + * @RATE_INFO_FLAGS_MCS: @tx_bitrate_mcs filled + * @RATE_INFO_FLAGS_40_MHZ_WIDTH: 40 Mhz width transmission + * @RATE_INFO_FLAGS_SHORT_GI: 400ns guard interval + */ +enum rate_info_flags { + RATE_INFO_FLAGS_MCS = 1<<0, + RATE_INFO_FLAGS_40_MHZ_WIDTH = 1<<1, + RATE_INFO_FLAGS_SHORT_GI = 1<<2, +}; + +/** + * struct rate_info - bitrate information + * + * Information about a receiving or transmitting bitrate + * + * @flags: bitflag of flags from &enum rate_info_flags + * @mcs: mcs index if struct describes a 802.11n bitrate + * @legacy: bitrate in 100kbit/s for 802.11abg + */ +struct rate_info { + u8 flags; + u8 mcs; + u16 legacy; +}; + +/** + * struct station_info - station information + * + * Station information filled by driver for get_station() and dump_station. + * + * @filled: bitflag of flags from &enum station_info_flags + * @inactive_time: time since last station activity (tx/rx) in milliseconds + * @rx_bytes: bytes received from this station + * @tx_bytes: bytes transmitted to this station + * @llid: mesh local link id + * @plid: mesh peer link id + * @plink_state: mesh peer link state + * @signal: signal strength of last received packet in dBm + * @txrate: current unicast bitrate to this station + * @rx_packets: packets received from this station + * @tx_packets: packets transmitted to this station + * @generation: generation number for nl80211 dumps. + * This number should increase every time the list of stations + * changes, i.e. when a station is added or removed, so that + * userspace can tell whether it got a consistent snapshot. + */ +struct station_info { + u32 filled; + u32 inactive_time; + u32 rx_bytes; + u32 tx_bytes; + u16 llid; + u16 plid; + u8 plink_state; + s8 signal; + struct rate_info txrate; + u32 rx_packets; + u32 tx_packets; + + int generation; +}; + +/** + * enum monitor_flags - monitor flags + * + * Monitor interface configuration flags. Note that these must be the bits + * according to the nl80211 flags. + * + * @MONITOR_FLAG_FCSFAIL: pass frames with bad FCS + * @MONITOR_FLAG_PLCPFAIL: pass frames with bad PLCP + * @MONITOR_FLAG_CONTROL: pass control frames + * @MONITOR_FLAG_OTHER_BSS: disable BSSID filtering + * @MONITOR_FLAG_COOK_FRAMES: report frames after processing + */ +enum monitor_flags { + MONITOR_FLAG_FCSFAIL = 1<bss_priv_size bytes + */ +struct cfg80211_bss { + struct ieee80211_channel *channel; + + u8 bssid[ETH_ALEN]; + u64 tsf; + u16 beacon_interval; + u16 capability; + u8 *information_elements; + size_t len_information_elements; + u8 *beacon_ies; + size_t len_beacon_ies; + u8 *proberesp_ies; + size_t len_proberesp_ies; + + s32 signal; + + void (*free_priv)(struct cfg80211_bss *bss); + u8 priv[0] __attribute__((__aligned__(sizeof(void *)))); +}; + +/** + * ieee80211_bss_get_ie - find IE with given ID + * @bss: the bss to search + * @ie: the IE ID + * Returns %NULL if not found. + */ +const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 ie); + + +/** + * struct cfg80211_crypto_settings - Crypto settings + * @wpa_versions: indicates which, if any, WPA versions are enabled + * (from enum nl80211_wpa_versions) + * @cipher_group: group key cipher suite (or 0 if unset) + * @n_ciphers_pairwise: number of AP supported unicast ciphers + * @ciphers_pairwise: unicast key cipher suites + * @n_akm_suites: number of AKM suites + * @akm_suites: AKM suites + * @control_port: Whether user space controls IEEE 802.1X port, i.e., + * sets/clears %NL80211_STA_FLAG_AUTHORIZED. If true, the driver is + * required to assume that the port is unauthorized until authorized by + * user space. Otherwise, port is marked authorized by default. + */ +struct cfg80211_crypto_settings { + u32 wpa_versions; + u32 cipher_group; + int n_ciphers_pairwise; + u32 ciphers_pairwise[NL80211_MAX_NR_CIPHER_SUITES]; + int n_akm_suites; + u32 akm_suites[NL80211_MAX_NR_AKM_SUITES]; + bool control_port; +}; + +/** + * struct cfg80211_auth_request - Authentication request data + * + * This structure provides information needed to complete IEEE 802.11 + * authentication. + * + * @bss: The BSS to authenticate with. + * @auth_type: Authentication type (algorithm) + * @ie: Extra IEs to add to Authentication frame or %NULL + * @ie_len: Length of ie buffer in octets + * @key_len: length of WEP key for shared key authentication + * @key_idx: index of WEP key for shared key authentication + * @key: WEP key for shared key authentication + */ +struct cfg80211_auth_request { + struct cfg80211_bss *bss; + const u8 *ie; + size_t ie_len; + enum nl80211_auth_type auth_type; + const u8 *key; + u8 key_len, key_idx; +}; + +/** + * struct cfg80211_assoc_request - (Re)Association request data + * + * This structure provides information needed to complete IEEE 802.11 + * (re)association. + * @bss: The BSS to associate with. + * @ie: Extra IEs to add to (Re)Association Request frame or %NULL + * @ie_len: Length of ie buffer in octets + * @use_mfp: Use management frame protection (IEEE 802.11w) in this association + * @crypto: crypto settings + * @prev_bssid: previous BSSID, if not %NULL use reassociate frame + */ +struct cfg80211_assoc_request { + struct cfg80211_bss *bss; + const u8 *ie, *prev_bssid; + size_t ie_len; + struct cfg80211_crypto_settings crypto; + bool use_mfp; +}; + +/** + * struct cfg80211_deauth_request - Deauthentication request data + * + * This structure provides information needed to complete IEEE 802.11 + * deauthentication. + * + * @bss: the BSS to deauthenticate from + * @ie: Extra IEs to add to Deauthentication frame or %NULL + * @ie_len: Length of ie buffer in octets + * @reason_code: The reason code for the deauthentication + */ +struct cfg80211_deauth_request { + struct cfg80211_bss *bss; + const u8 *ie; + size_t ie_len; + u16 reason_code; +}; + +/** + * struct cfg80211_disassoc_request - Disassociation request data + * + * This structure provides information needed to complete IEEE 802.11 + * disassocation. + * + * @bss: the BSS to disassociate from + * @ie: Extra IEs to add to Disassociation frame or %NULL + * @ie_len: Length of ie buffer in octets + * @reason_code: The reason code for the disassociation + */ +struct cfg80211_disassoc_request { + struct cfg80211_bss *bss; + const u8 *ie; + size_t ie_len; + u16 reason_code; +}; + +/** + * struct cfg80211_ibss_params - IBSS parameters + * + * This structure defines the IBSS parameters for the join_ibss() + * method. + * + * @ssid: The SSID, will always be non-null. + * @ssid_len: The length of the SSID, will always be non-zero. + * @bssid: Fixed BSSID requested, maybe be %NULL, if set do not + * search for IBSSs with a different BSSID. + * @channel: The channel to use if no IBSS can be found to join. + * @channel_fixed: The channel should be fixed -- do not search for + * IBSSs to join on other channels. + * @ie: information element(s) to include in the beacon + * @ie_len: length of that + * @beacon_interval: beacon interval to use + * @privacy: this is a protected network, keys will be configured + * after joining + */ +struct cfg80211_ibss_params { + u8 *ssid; + u8 *bssid; + struct ieee80211_channel *channel; + u8 *ie; + u8 ssid_len, ie_len; + u16 beacon_interval; + bool channel_fixed; + bool privacy; +}; + +/** + * struct cfg80211_connect_params - Connection parameters + * + * This structure provides information needed to complete IEEE 802.11 + * authentication and association. + * + * @channel: The channel to use or %NULL if not specified (auto-select based + * on scan results) + * @bssid: The AP BSSID or %NULL if not specified (auto-select based on scan + * results) + * @ssid: SSID + * @ssid_len: Length of ssid in octets + * @auth_type: Authentication type (algorithm) + * @assoc_ie: IEs for association request + * @assoc_ie_len: Length of assoc_ie in octets + * @privacy: indicates whether privacy-enabled APs should be used + * @crypto: crypto settings + * @key_len: length of WEP key for shared key authentication + * @key_idx: index of WEP key for shared key authentication + * @key: WEP key for shared key authentication + */ +struct cfg80211_connect_params { + struct ieee80211_channel *channel; + u8 *bssid; + u8 *ssid; + size_t ssid_len; + enum nl80211_auth_type auth_type; + u8 *ie; + size_t ie_len; + bool privacy; + struct cfg80211_crypto_settings crypto; + const u8 *key; + u8 key_len, key_idx; +}; + +/** + * enum wiphy_params_flags - set_wiphy_params bitfield values + * WIPHY_PARAM_RETRY_SHORT: wiphy->retry_short has changed + * WIPHY_PARAM_RETRY_LONG: wiphy->retry_long has changed + * WIPHY_PARAM_FRAG_THRESHOLD: wiphy->frag_threshold has changed + * WIPHY_PARAM_RTS_THRESHOLD: wiphy->rts_threshold has changed + */ +enum wiphy_params_flags { + WIPHY_PARAM_RETRY_SHORT = 1 << 0, + WIPHY_PARAM_RETRY_LONG = 1 << 1, + WIPHY_PARAM_FRAG_THRESHOLD = 1 << 2, + WIPHY_PARAM_RTS_THRESHOLD = 1 << 3, + WIPHY_PARAM_COVERAGE_CLASS = 1 << 4, +}; + +/** + * enum tx_power_setting - TX power adjustment + * + * @TX_POWER_AUTOMATIC: the dbm parameter is ignored + * @TX_POWER_LIMITED: limit TX power by the dbm parameter + * @TX_POWER_FIXED: fix TX power to the dbm parameter + */ +enum tx_power_setting { + TX_POWER_AUTOMATIC, + TX_POWER_LIMITED, + TX_POWER_FIXED, +}; + +/* + * cfg80211_bitrate_mask - masks for bitrate control + */ +struct cfg80211_bitrate_mask { + struct { + u32 legacy; + /* TODO: add support for masking MCS rates; e.g.: */ + /* u8 mcs[IEEE80211_HT_MCS_MASK_LEN]; */ + } control[IEEE80211_NUM_BANDS]; +}; +/** + * struct cfg80211_pmksa - PMK Security Association + * + * This structure is passed to the set/del_pmksa() method for PMKSA + * caching. + * + * @bssid: The AP's BSSID. + * @pmkid: The PMK material itself. + */ +struct cfg80211_pmksa { + u8 *bssid; + u8 *pmkid; +}; + +/** + * struct cfg80211_ops - backend description for wireless configuration + * + * This struct is registered by fullmac card drivers and/or wireless stacks + * in order to handle configuration requests on their interfaces. + * + * All callbacks except where otherwise noted should return 0 + * on success or a negative error code. + * + * All operations are currently invoked under rtnl for consistency with the + * wireless extensions but this is subject to reevaluation as soon as this + * code is used more widely and we have a first user without wext. + * + * @suspend: wiphy device needs to be suspended + * @resume: wiphy device needs to be resumed + * + * @add_virtual_intf: create a new virtual interface with the given name, + * must set the struct wireless_dev's iftype. Beware: You must create + * the new netdev in the wiphy's network namespace! + * + * @del_virtual_intf: remove the virtual interface determined by ifindex. + * + * @change_virtual_intf: change type/configuration of virtual interface, + * keep the struct wireless_dev's iftype updated. + * + * @add_key: add a key with the given parameters. @mac_addr will be %NULL + * when adding a group key. + * + * @get_key: get information about the key with the given parameters. + * @mac_addr will be %NULL when requesting information for a group + * key. All pointers given to the @callback function need not be valid + * after it returns. This function should return an error if it is + * not possible to retrieve the key, -ENOENT if it doesn't exist. + * + * @del_key: remove a key given the @mac_addr (%NULL for a group key) + * and @key_index, return -ENOENT if the key doesn't exist. + * + * @set_default_key: set the default key on an interface + * + * @set_default_mgmt_key: set the default management frame key on an interface + * + * @add_beacon: Add a beacon with given parameters, @head, @interval + * and @dtim_period will be valid, @tail is optional. + * @set_beacon: Change the beacon parameters for an access point mode + * interface. This should reject the call when no beacon has been + * configured. + * @del_beacon: Remove beacon configuration and stop sending the beacon. + * + * @add_station: Add a new station. + * + * @del_station: Remove a station; @mac may be NULL to remove all stations. + * + * @change_station: Modify a given station. + * + * @get_mesh_params: Put the current mesh parameters into *params + * + * @set_mesh_params: Set mesh parameters. + * The mask is a bitfield which tells us which parameters to + * set, and which to leave alone. + * + * @set_mesh_cfg: set mesh parameters (by now, just mesh id) + * + * @change_bss: Modify parameters for a given BSS. + * + * @set_txq_params: Set TX queue parameters + * + * @set_channel: Set channel + * + * @scan: Request to do a scan. If returning zero, the scan request is given + * the driver, and will be valid until passed to cfg80211_scan_done(). + * For scan results, call cfg80211_inform_bss(); you can call this outside + * the scan/scan_done bracket too. + * + * @auth: Request to authenticate with the specified peer + * @assoc: Request to (re)associate with the specified peer + * @deauth: Request to deauthenticate from the specified peer + * @disassoc: Request to disassociate from the specified peer + * + * @connect: Connect to the ESS with the specified parameters. When connected, + * call cfg80211_connect_result() with status code %WLAN_STATUS_SUCCESS. + * If the connection fails for some reason, call cfg80211_connect_result() + * with the status from the AP. + * @disconnect: Disconnect from the BSS/ESS. + * + * @join_ibss: Join the specified IBSS (or create if necessary). Once done, call + * cfg80211_ibss_joined(), also call that function when changing BSSID due + * to a merge. + * @leave_ibss: Leave the IBSS. + * + * @set_wiphy_params: Notify that wiphy parameters have changed; + * @changed bitfield (see &enum wiphy_params_flags) describes which values + * have changed. The actual parameter values are available in + * struct wiphy. If returning an error, no value should be changed. + * + * @set_tx_power: set the transmit power according to the parameters + * @get_tx_power: store the current TX power into the dbm variable; + * return 0 if successful + * + * @rfkill_poll: polls the hw rfkill line, use cfg80211 reporting + * functions to adjust rfkill hw state + * + * @dump_survey: get site survey information. + * + * @remain_on_channel: Request the driver to remain awake on the specified + * channel for the specified duration to complete an off-channel + * operation (e.g., public action frame exchange). When the driver is + * ready on the requested channel, it must indicate this with an event + * notification by calling cfg80211_ready_on_channel(). + * @cancel_remain_on_channel: Cancel an on-going remain-on-channel operation. + * This allows the operation to be terminated prior to timeout based on + * the duration value. + * @action: Transmit an action frame + * + * @testmode_cmd: run a test mode command + * + * @set_pmksa: Cache a PMKID for a BSSID. This is mostly useful for fullmac + * devices running firmwares capable of generating the (re) association + * RSN IE. It allows for faster roaming between WPA2 BSSIDs. + * @del_pmksa: Delete a cached PMKID. + * @flush_pmksa: Flush all cached PMKIDs. + * + */ +struct cfg80211_ops { + int (*suspend)(struct wiphy *wiphy); + int (*resume)(struct wiphy *wiphy); + + int (*add_virtual_intf)(struct wiphy *wiphy, char *name, + enum nl80211_iftype type, u32 *flags, + struct vif_params *params); + int (*del_virtual_intf)(struct wiphy *wiphy, struct net_device *dev); + int (*change_virtual_intf)(struct wiphy *wiphy, + struct net_device *dev, + enum nl80211_iftype type, u32 *flags, + struct vif_params *params); + + int (*add_key)(struct wiphy *wiphy, struct net_device *netdev, + u8 key_index, const u8 *mac_addr, + struct key_params *params); + int (*get_key)(struct wiphy *wiphy, struct net_device *netdev, + u8 key_index, const u8 *mac_addr, void *cookie, + void (*callback)(void *cookie, struct key_params*)); + int (*del_key)(struct wiphy *wiphy, struct net_device *netdev, + u8 key_index, const u8 *mac_addr); + int (*set_default_key)(struct wiphy *wiphy, + struct net_device *netdev, + u8 key_index); + int (*set_default_mgmt_key)(struct wiphy *wiphy, + struct net_device *netdev, + u8 key_index); + + int (*add_beacon)(struct wiphy *wiphy, struct net_device *dev, + struct beacon_parameters *info); + int (*set_beacon)(struct wiphy *wiphy, struct net_device *dev, + struct beacon_parameters *info); + int (*del_beacon)(struct wiphy *wiphy, struct net_device *dev); + + + int (*add_station)(struct wiphy *wiphy, struct net_device *dev, + u8 *mac, struct station_parameters *params); + int (*del_station)(struct wiphy *wiphy, struct net_device *dev, + u8 *mac); + int (*change_station)(struct wiphy *wiphy, struct net_device *dev, + u8 *mac, struct station_parameters *params); + int (*get_station)(struct wiphy *wiphy, struct net_device *dev, + u8 *mac, struct station_info *sinfo); + int (*dump_station)(struct wiphy *wiphy, struct net_device *dev, + int idx, u8 *mac, struct station_info *sinfo); + + int (*add_mpath)(struct wiphy *wiphy, struct net_device *dev, + u8 *dst, u8 *next_hop); + int (*del_mpath)(struct wiphy *wiphy, struct net_device *dev, + u8 *dst); + int (*change_mpath)(struct wiphy *wiphy, struct net_device *dev, + u8 *dst, u8 *next_hop); + int (*get_mpath)(struct wiphy *wiphy, struct net_device *dev, + u8 *dst, u8 *next_hop, + struct mpath_info *pinfo); + int (*dump_mpath)(struct wiphy *wiphy, struct net_device *dev, + int idx, u8 *dst, u8 *next_hop, + struct mpath_info *pinfo); + int (*get_mesh_params)(struct wiphy *wiphy, + struct net_device *dev, + struct mesh_config *conf); + int (*set_mesh_params)(struct wiphy *wiphy, + struct net_device *dev, + const struct mesh_config *nconf, u32 mask); + int (*change_bss)(struct wiphy *wiphy, struct net_device *dev, + struct bss_parameters *params); + + int (*set_txq_params)(struct wiphy *wiphy, + struct ieee80211_txq_params *params); + + int (*set_channel)(struct wiphy *wiphy, + struct ieee80211_channel *chan, + enum nl80211_channel_type channel_type); + + int (*scan)(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_scan_request *request); + + int (*auth)(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_auth_request *req); + int (*assoc)(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_assoc_request *req); + int (*deauth)(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_deauth_request *req, + void *cookie); + int (*disassoc)(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_disassoc_request *req, + void *cookie); + + int (*connect)(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_connect_params *sme); + int (*disconnect)(struct wiphy *wiphy, struct net_device *dev, + u16 reason_code); + + int (*join_ibss)(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_ibss_params *params); + int (*leave_ibss)(struct wiphy *wiphy, struct net_device *dev); + + int (*set_wiphy_params)(struct wiphy *wiphy, u32 changed); + + int (*set_tx_power)(struct wiphy *wiphy, + enum tx_power_setting type, int dbm); + int (*get_tx_power)(struct wiphy *wiphy, int *dbm); + + int (*set_wds_peer)(struct wiphy *wiphy, struct net_device *dev, + u8 *addr); + + void (*rfkill_poll)(struct wiphy *wiphy); + +#ifdef CONFIG_NL80211_TESTMODE + int (*testmode_cmd)(struct wiphy *wiphy, void *data, int len); +#endif + + int (*set_bitrate_mask)(struct wiphy *wiphy, + struct net_device *dev, + const u8 *peer, + const struct cfg80211_bitrate_mask *mask); + + int (*dump_survey)(struct wiphy *wiphy, struct net_device *netdev, + int idx, struct survey_info *info); + + int (*set_pmksa)(struct wiphy *wiphy, struct net_device *netdev, + struct cfg80211_pmksa *pmksa); + int (*del_pmksa)(struct wiphy *wiphy, struct net_device *netdev, + struct cfg80211_pmksa *pmksa); + int (*flush_pmksa)(struct wiphy *wiphy, struct net_device *netdev); + + int (*remain_on_channel)(struct wiphy *wiphy, + struct net_device *dev, + struct ieee80211_channel *chan, + enum nl80211_channel_type channel_type, + unsigned int duration, + u64 *cookie); + int (*cancel_remain_on_channel)(struct wiphy *wiphy, + struct net_device *dev, + u64 cookie); + + int (*action)(struct wiphy *wiphy, struct net_device *dev, + struct ieee80211_channel *chan, + enum nl80211_channel_type channel_type, + const u8 *buf, size_t len, u64 *cookie); + + int (*set_power_mgmt)(struct wiphy *wiphy, struct net_device *dev, + bool enabled, int timeout); +}; + +/* + * wireless hardware and networking interfaces structures + * and registration/helper functions + */ + +/** + * enum wiphy_flags - wiphy capability flags + * + * @WIPHY_FLAG_CUSTOM_REGULATORY: tells us the driver for this device + * has its own custom regulatory domain and cannot identify the + * ISO / IEC 3166 alpha2 it belongs to. When this is enabled + * we will disregard the first regulatory hint (when the + * initiator is %REGDOM_SET_BY_CORE). + * @WIPHY_FLAG_STRICT_REGULATORY: tells us the driver for this device will + * ignore regulatory domain settings until it gets its own regulatory + * domain via its regulatory_hint(). After its gets its own regulatory + * domain it will only allow further regulatory domain settings to + * further enhance compliance. For example if channel 13 and 14 are + * disabled by this regulatory domain no user regulatory domain can + * enable these channels at a later time. This can be used for devices + * which do not have calibration information gauranteed for frequencies + * or settings outside of its regulatory domain. + * @WIPHY_FLAG_DISABLE_BEACON_HINTS: enable this if your driver needs to ensure + * that passive scan flags and beaconing flags may not be lifted by + * cfg80211 due to regulatory beacon hints. For more information on beacon + * hints read the documenation for regulatory_hint_found_beacon() + * @WIPHY_FLAG_NETNS_OK: if not set, do not allow changing the netns of this + * wiphy at all + * @WIPHY_FLAG_PS_ON_BY_DEFAULT: if set to true, powersave will be enabled + * by default -- this flag will be set depending on the kernel's default + * on wiphy_new(), but can be changed by the driver if it has a good + * reason to override the default + * @WIPHY_FLAG_4ADDR_AP: supports 4addr mode even on AP (with a single station + * on a VLAN interface) + * @WIPHY_FLAG_4ADDR_STATION: supports 4addr mode even as a station + */ +enum wiphy_flags { + WIPHY_FLAG_CUSTOM_REGULATORY = BIT(0), + WIPHY_FLAG_STRICT_REGULATORY = BIT(1), + WIPHY_FLAG_DISABLE_BEACON_HINTS = BIT(2), + WIPHY_FLAG_NETNS_OK = BIT(3), + WIPHY_FLAG_PS_ON_BY_DEFAULT = BIT(4), + WIPHY_FLAG_4ADDR_AP = BIT(5), + WIPHY_FLAG_4ADDR_STATION = BIT(6), +}; + +struct mac_address { + u8 addr[ETH_ALEN]; +}; + +/** + * struct wiphy - wireless hardware description + * @idx: the wiphy index assigned to this item + * @class_dev: the class device representing /sys/class/ieee80211/ + * @reg_notifier: the driver's regulatory notification callback + * @regd: the driver's regulatory domain, if one was requested via + * the regulatory_hint() API. This can be used by the driver + * on the reg_notifier() if it chooses to ignore future + * regulatory domain changes caused by other drivers. + * @signal_type: signal type reported in &struct cfg80211_bss. + * @cipher_suites: supported cipher suites + * @n_cipher_suites: number of supported cipher suites + * @retry_short: Retry limit for short frames (dot11ShortRetryLimit) + * @retry_long: Retry limit for long frames (dot11LongRetryLimit) + * @frag_threshold: Fragmentation threshold (dot11FragmentationThreshold); + * -1 = fragmentation disabled, only odd values >= 256 used + * @rts_threshold: RTS threshold (dot11RTSThreshold); -1 = RTS/CTS disabled + * @net: the network namespace this wiphy currently lives in + * @perm_addr: permanent MAC address of this device + * @addr_mask: If the device supports multiple MAC addresses by masking, + * set this to a mask with variable bits set to 1, e.g. if the last + * four bits are variable then set it to 00:...:00:0f. The actual + * variable bits shall be determined by the interfaces added, with + * interfaces not matching the mask being rejected to be brought up. + * @n_addresses: number of addresses in @addresses. + * @addresses: If the device has more than one address, set this pointer + * to a list of addresses (6 bytes each). The first one will be used + * by default for perm_addr. In this case, the mask should be set to + * all-zeroes. In this case it is assumed that the device can handle + * the same number of arbitrary MAC addresses. + */ +struct wiphy { + /* assign these fields before you register the wiphy */ + + /* permanent MAC address(es) */ + u8 perm_addr[ETH_ALEN]; + u8 addr_mask[ETH_ALEN]; + + u16 n_addresses; + struct mac_address *addresses; + + /* Supported interface modes, OR together BIT(NL80211_IFTYPE_...) */ + u16 interface_modes; + + u32 flags; + + enum cfg80211_signal_type signal_type; + + int bss_priv_size; + u8 max_scan_ssids; + u16 max_scan_ie_len; + + int n_cipher_suites; + const u32 *cipher_suites; + + u8 retry_short; + u8 retry_long; + u32 frag_threshold; + u32 rts_threshold; + u8 coverage_class; + + char fw_version[ETHTOOL_BUSINFO_LEN]; + u32 hw_version; + + u8 max_num_pmkids; + + /* If multiple wiphys are registered and you're handed e.g. + * a regular netdev with assigned ieee80211_ptr, you won't + * know whether it points to a wiphy your driver has registered + * or not. Assign this to something global to your driver to + * help determine whether you own this wiphy or not. */ + const void *privid; + + struct ieee80211_supported_band *bands[IEEE80211_NUM_BANDS]; + + /* Lets us get back the wiphy on the callback */ + int (*reg_notifier)(struct wiphy *wiphy, + struct regulatory_request *request); + + /* fields below are read-only, assigned by cfg80211 */ + + const struct ieee80211_regdomain *regd; + + /* the item in /sys/class/ieee80211/ points to this, + * you need use set_wiphy_dev() (see below) */ + struct device dev; + + /* dir in debugfs: ieee80211/ */ + struct dentry *debugfsdir; + +#ifdef CONFIG_NET_NS + /* the network namespace this phy lives in currently */ + struct net *_net; +#endif + +#ifdef CONFIG_CFG80211_WEXT + const struct iw_handler_def *wext; +#endif + + char priv[0] __attribute__((__aligned__(NETDEV_ALIGN))); +}; + +#ifdef CONFIG_NET_NS +static inline struct net *wiphy_net(struct wiphy *wiphy) +{ + return wiphy->_net; +} + +static inline void wiphy_net_set(struct wiphy *wiphy, struct net *net) +{ + wiphy->_net = net; +} +#else +static inline struct net *wiphy_net(struct wiphy *wiphy) +{ + return &init_net; +} + +static inline void wiphy_net_set(struct wiphy *wiphy, struct net *net) +{ +} +#endif + +/** + * wiphy_priv - return priv from wiphy + * + * @wiphy: the wiphy whose priv pointer to return + */ +static inline void *wiphy_priv(struct wiphy *wiphy) +{ + BUG_ON(!wiphy); + return &wiphy->priv; +} + +/** + * priv_to_wiphy - return the wiphy containing the priv + * + * @priv: a pointer previously returned by wiphy_priv + */ +static inline struct wiphy *priv_to_wiphy(void *priv) +{ + BUG_ON(!priv); + return container_of(priv, struct wiphy, priv); +} + +/** + * set_wiphy_dev - set device pointer for wiphy + * + * @wiphy: The wiphy whose device to bind + * @dev: The device to parent it to + */ +static inline void set_wiphy_dev(struct wiphy *wiphy, struct device *dev) +{ + wiphy->dev.parent = dev; +} + +/** + * wiphy_dev - get wiphy dev pointer + * + * @wiphy: The wiphy whose device struct to look up + */ +static inline struct device *wiphy_dev(struct wiphy *wiphy) +{ + return wiphy->dev.parent; +} + +/** + * wiphy_name - get wiphy name + * + * @wiphy: The wiphy whose name to return + */ +static inline const char *wiphy_name(struct wiphy *wiphy) +{ + return dev_name(&wiphy->dev); +} + +/** + * wiphy_new - create a new wiphy for use with cfg80211 + * + * @ops: The configuration operations for this device + * @sizeof_priv: The size of the private area to allocate + * + * Create a new wiphy and associate the given operations with it. + * @sizeof_priv bytes are allocated for private use. + * + * The returned pointer must be assigned to each netdev's + * ieee80211_ptr for proper operation. + */ +struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv); + +/** + * wiphy_register - register a wiphy with cfg80211 + * + * @wiphy: The wiphy to register. + * + * Returns a non-negative wiphy index or a negative error code. + */ +extern int wiphy_register(struct wiphy *wiphy); + +/** + * wiphy_unregister - deregister a wiphy from cfg80211 + * + * @wiphy: The wiphy to unregister. + * + * After this call, no more requests can be made with this priv + * pointer, but the call may sleep to wait for an outstanding + * request that is being handled. + */ +extern void wiphy_unregister(struct wiphy *wiphy); + +/** + * wiphy_free - free wiphy + * + * @wiphy: The wiphy to free + */ +extern void wiphy_free(struct wiphy *wiphy); + +/* internal structs */ +struct cfg80211_conn; +struct cfg80211_internal_bss; +struct cfg80211_cached_keys; + +#define MAX_AUTH_BSSES 4 + +/** + * struct wireless_dev - wireless per-netdev state + * + * This structure must be allocated by the driver/stack + * that uses the ieee80211_ptr field in struct net_device + * (this is intentional so it can be allocated along with + * the netdev.) + * + * @wiphy: pointer to hardware description + * @iftype: interface type + * @list: (private) Used to collect the interfaces + * @netdev: (private) Used to reference back to the netdev + * @current_bss: (private) Used by the internal configuration code + * @bssid: (private) Used by the internal configuration code + * @ssid: (private) Used by the internal configuration code + * @ssid_len: (private) Used by the internal configuration code + * @wext: (private) Used by the internal wireless extensions compat code + * @wext_bssid: (private) Used by the internal wireless extensions compat code + * @use_4addr: indicates 4addr mode is used on this interface, must be + * set by driver (if supported) on add_interface BEFORE registering the + * netdev and may otherwise be used by driver read-only, will be update + * by cfg80211 on change_interface + * @action_registrations: list of registrations for action frames + * @action_registrations_lock: lock for the list + */ +struct wireless_dev { + struct wiphy *wiphy; + enum nl80211_iftype iftype; + + /* the remainder of this struct should be private to cfg80211 */ + struct list_head list; + struct net_device *netdev; + + struct list_head action_registrations; + spinlock_t action_registrations_lock; + + struct mutex mtx; + + struct work_struct cleanup_work; + + bool use_4addr; + + /* currently used for IBSS and SME - might be rearranged later */ + u8 ssid[IEEE80211_MAX_SSID_LEN]; + u8 ssid_len; + enum { + CFG80211_SME_IDLE, + CFG80211_SME_CONNECTING, + CFG80211_SME_CONNECTED, + } sme_state; + struct cfg80211_conn *conn; + struct cfg80211_cached_keys *connect_keys; + + struct list_head event_list; + spinlock_t event_lock; + + struct cfg80211_internal_bss *authtry_bsses[MAX_AUTH_BSSES]; + struct cfg80211_internal_bss *auth_bsses[MAX_AUTH_BSSES]; + struct cfg80211_internal_bss *current_bss; /* associated / joined */ + + bool ps; + int ps_timeout; + +#ifdef CONFIG_CFG80211_WEXT + /* wext data */ + struct { + struct cfg80211_ibss_params ibss; + struct cfg80211_connect_params connect; + struct cfg80211_cached_keys *keys; + u8 *ie; + size_t ie_len; + u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN]; + u8 ssid[IEEE80211_MAX_SSID_LEN]; + s8 default_key, default_mgmt_key; + bool prev_bssid_valid; + } wext; +#endif +}; + +/** + * wdev_priv - return wiphy priv from wireless_dev + * + * @wdev: The wireless device whose wiphy's priv pointer to return + */ +static inline void *wdev_priv(struct wireless_dev *wdev) +{ + BUG_ON(!wdev); + return wiphy_priv(wdev->wiphy); +} + +/* + * Utility functions + */ + +/** + * ieee80211_channel_to_frequency - convert channel number to frequency + */ +extern int ieee80211_channel_to_frequency(int chan); + +/** + * ieee80211_frequency_to_channel - convert frequency to channel number + */ +extern int ieee80211_frequency_to_channel(int freq); + +/* + * Name indirection necessary because the ieee80211 code also has + * a function named "ieee80211_get_channel", so if you include + * cfg80211's header file you get cfg80211's version, if you try + * to include both header files you'll (rightfully!) get a symbol + * clash. + */ +extern struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy, + int freq); +/** + * ieee80211_get_channel - get channel struct from wiphy for specified frequency + */ +static inline struct ieee80211_channel * +ieee80211_get_channel(struct wiphy *wiphy, int freq) +{ + return __ieee80211_get_channel(wiphy, freq); +} + +/** + * ieee80211_get_response_rate - get basic rate for a given rate + * + * @sband: the band to look for rates in + * @basic_rates: bitmap of basic rates + * @bitrate: the bitrate for which to find the basic rate + * + * This function returns the basic rate corresponding to a given + * bitrate, that is the next lower bitrate contained in the basic + * rate map, which is, for this function, given as a bitmap of + * indices of rates in the band's bitrate table. + */ +struct ieee80211_rate * +ieee80211_get_response_rate(struct ieee80211_supported_band *sband, + u32 basic_rates, int bitrate); + +/* + * Radiotap parsing functions -- for controlled injection support + * + * Implemented in net/wireless/radiotap.c + * Documentation in Documentation/networking/radiotap-headers.txt + */ + +struct radiotap_align_size { + uint8_t align:4, size:4; +}; + +struct ieee80211_radiotap_namespace { + const struct radiotap_align_size *align_size; + int n_bits; + uint32_t oui; + uint8_t subns; +}; + +struct ieee80211_radiotap_vendor_namespaces { + const struct ieee80211_radiotap_namespace *ns; + int n_ns; +}; + +/** + * struct ieee80211_radiotap_iterator - tracks walk thru present radiotap args + * @this_arg_index: index of current arg, valid after each successful call + * to ieee80211_radiotap_iterator_next() + * @this_arg: pointer to current radiotap arg; it is valid after each + * call to ieee80211_radiotap_iterator_next() but also after + * ieee80211_radiotap_iterator_init() where it will point to + * the beginning of the actual data portion + * @this_arg_size: length of the current arg, for convenience + * @current_namespace: pointer to the current namespace definition + * (or internally %NULL if the current namespace is unknown) + * @is_radiotap_ns: indicates whether the current namespace is the default + * radiotap namespace or not + * + * @overrides: override standard radiotap fields + * @n_overrides: number of overrides + * + * @_rtheader: pointer to the radiotap header we are walking through + * @_max_length: length of radiotap header in cpu byte ordering + * @_arg_index: next argument index + * @_arg: next argument pointer + * @_next_bitmap: internal pointer to next present u32 + * @_bitmap_shifter: internal shifter for curr u32 bitmap, b0 set == arg present + * @_vns: vendor namespace definitions + * @_next_ns_data: beginning of the next namespace's data + * @_reset_on_ext: internal; reset the arg index to 0 when going to the + * next bitmap word + * + * Describes the radiotap parser state. Fields prefixed with an underscore + * must not be used by users of the parser, only by the parser internally. + */ + +struct ieee80211_radiotap_iterator { + struct ieee80211_radiotap_header *_rtheader; + const struct ieee80211_radiotap_vendor_namespaces *_vns; + const struct ieee80211_radiotap_namespace *current_namespace; + + unsigned char *_arg, *_next_ns_data; + uint32_t *_next_bitmap; + + unsigned char *this_arg; + int this_arg_index; + int this_arg_size; + + int is_radiotap_ns; + + int _max_length; + int _arg_index; + uint32_t _bitmap_shifter; + int _reset_on_ext; +}; + +extern int ieee80211_radiotap_iterator_init( + struct ieee80211_radiotap_iterator *iterator, + struct ieee80211_radiotap_header *radiotap_header, + int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns); + +extern int ieee80211_radiotap_iterator_next( + struct ieee80211_radiotap_iterator *iterator); + + +extern const unsigned char rfc1042_header[6]; +extern const unsigned char bridge_tunnel_header[6]; + +/** + * ieee80211_get_hdrlen_from_skb - get header length from data + * + * Given an skb with a raw 802.11 header at the data pointer this function + * returns the 802.11 header length in bytes (not including encryption + * headers). If the data in the sk_buff is too short to contain a valid 802.11 + * header the function returns 0. + * + * @skb: the frame + */ +unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb); + +/** + * ieee80211_hdrlen - get header length in bytes from frame control + * @fc: frame control field in little-endian format + */ +unsigned int ieee80211_hdrlen(__le16 fc); + +/** + * ieee80211_data_to_8023 - convert an 802.11 data frame to 802.3 + * @skb: the 802.11 data frame + * @addr: the device MAC address + * @iftype: the virtual interface type + */ +int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, + enum nl80211_iftype iftype); + +/** + * ieee80211_data_from_8023 - convert an 802.3 frame to 802.11 + * @skb: the 802.3 frame + * @addr: the device MAC address + * @iftype: the virtual interface type + * @bssid: the network bssid (used only for iftype STATION and ADHOC) + * @qos: build 802.11 QoS data frame + */ +int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr, + enum nl80211_iftype iftype, u8 *bssid, bool qos); + +/** + * ieee80211_amsdu_to_8023s - decode an IEEE 802.11n A-MSDU frame + * + * Decode an IEEE 802.11n A-MSDU frame and convert it to a list of + * 802.3 frames. The @list will be empty if the decode fails. The + * @skb is consumed after the function returns. + * + * @skb: The input IEEE 802.11n A-MSDU frame. + * @list: The output list of 802.3 frames. It must be allocated and + * initialized by by the caller. + * @addr: The device MAC address. + * @iftype: The device interface type. + * @extra_headroom: The hardware extra headroom for SKBs in the @list. + */ +void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, + const u8 *addr, enum nl80211_iftype iftype, + const unsigned int extra_headroom); + +/** + * cfg80211_classify8021d - determine the 802.1p/1d tag for a data frame + * @skb: the data frame + */ +unsigned int cfg80211_classify8021d(struct sk_buff *skb); + +/** + * cfg80211_find_ie - find information element in data + * + * @eid: element ID + * @ies: data consisting of IEs + * @len: length of data + * + * This function will return %NULL if the element ID could + * not be found or if the element is invalid (claims to be + * longer than the given data), or a pointer to the first byte + * of the requested element, that is the byte containing the + * element ID. There are no checks on the element length + * other than having to fit into the given data. + */ +const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len); + +/* + * Regulatory helper functions for wiphys + */ + +/** + * regulatory_hint - driver hint to the wireless core a regulatory domain + * @wiphy: the wireless device giving the hint (used only for reporting + * conflicts) + * @alpha2: the ISO/IEC 3166 alpha2 the driver claims its regulatory domain + * should be in. If @rd is set this should be NULL. Note that if you + * set this to NULL you should still set rd->alpha2 to some accepted + * alpha2. + * + * Wireless drivers can use this function to hint to the wireless core + * what it believes should be the current regulatory domain by + * giving it an ISO/IEC 3166 alpha2 country code it knows its regulatory + * domain should be in or by providing a completely build regulatory domain. + * If the driver provides an ISO/IEC 3166 alpha2 userspace will be queried + * for a regulatory domain structure for the respective country. + * + * The wiphy must have been registered to cfg80211 prior to this call. + * For cfg80211 drivers this means you must first use wiphy_register(), + * for mac80211 drivers you must first use ieee80211_register_hw(). + * + * Drivers should check the return value, its possible you can get + * an -ENOMEM. + */ +extern int regulatory_hint(struct wiphy *wiphy, const char *alpha2); + +/** + * wiphy_apply_custom_regulatory - apply a custom driver regulatory domain + * @wiphy: the wireless device we want to process the regulatory domain on + * @regd: the custom regulatory domain to use for this wiphy + * + * Drivers can sometimes have custom regulatory domains which do not apply + * to a specific country. Drivers can use this to apply such custom regulatory + * domains. This routine must be called prior to wiphy registration. The + * custom regulatory domain will be trusted completely and as such previous + * default channel settings will be disregarded. If no rule is found for a + * channel on the regulatory domain the channel will be disabled. + */ +extern void wiphy_apply_custom_regulatory( + struct wiphy *wiphy, + const struct ieee80211_regdomain *regd); + +/** + * freq_reg_info - get regulatory information for the given frequency + * @wiphy: the wiphy for which we want to process this rule for + * @center_freq: Frequency in KHz for which we want regulatory information for + * @desired_bw_khz: the desired max bandwidth you want to use per + * channel. Note that this is still 20 MHz if you want to use HT40 + * as HT40 makes use of two channels for its 40 MHz width bandwidth. + * If set to 0 we'll assume you want the standard 20 MHz. + * @reg_rule: the regulatory rule which we have for this frequency + * + * Use this function to get the regulatory rule for a specific frequency on + * a given wireless device. If the device has a specific regulatory domain + * it wants to follow we respect that unless a country IE has been received + * and processed already. + * + * Returns 0 if it was able to find a valid regulatory rule which does + * apply to the given center_freq otherwise it returns non-zero. It will + * also return -ERANGE if we determine the given center_freq does not even have + * a regulatory rule for a frequency range in the center_freq's band. See + * freq_in_rule_band() for our current definition of a band -- this is purely + * subjective and right now its 802.11 specific. + */ +extern int freq_reg_info(struct wiphy *wiphy, + u32 center_freq, + u32 desired_bw_khz, + const struct ieee80211_reg_rule **reg_rule); + +/* + * Temporary wext handlers & helper functions + * + * In the future cfg80211 will simply assign the entire wext handler + * structure to netdevs it manages, but we're not there yet. + */ +int cfg80211_wext_giwname(struct net_device *dev, + struct iw_request_info *info, + char *name, char *extra); +int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info, + u32 *mode, char *extra); +int cfg80211_wext_giwmode(struct net_device *dev, struct iw_request_info *info, + u32 *mode, char *extra); +int cfg80211_wext_siwscan(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra); +int cfg80211_wext_giwscan(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *data, char *extra); +int cfg80211_wext_siwmlme(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *data, char *extra); +int cfg80211_wext_giwrange(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *data, char *extra); +int cfg80211_wext_siwgenie(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *data, char *extra); +int cfg80211_wext_siwauth(struct net_device *dev, + struct iw_request_info *info, + struct iw_param *data, char *extra); +int cfg80211_wext_giwauth(struct net_device *dev, + struct iw_request_info *info, + struct iw_param *data, char *extra); + +int cfg80211_wext_siwfreq(struct net_device *dev, + struct iw_request_info *info, + struct iw_freq *freq, char *extra); +int cfg80211_wext_giwfreq(struct net_device *dev, + struct iw_request_info *info, + struct iw_freq *freq, char *extra); +int cfg80211_wext_siwessid(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *data, char *ssid); +int cfg80211_wext_giwessid(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *data, char *ssid); +int cfg80211_wext_siwrate(struct net_device *dev, + struct iw_request_info *info, + struct iw_param *rate, char *extra); +int cfg80211_wext_giwrate(struct net_device *dev, + struct iw_request_info *info, + struct iw_param *rate, char *extra); + +int cfg80211_wext_siwrts(struct net_device *dev, + struct iw_request_info *info, + struct iw_param *rts, char *extra); +int cfg80211_wext_giwrts(struct net_device *dev, + struct iw_request_info *info, + struct iw_param *rts, char *extra); +int cfg80211_wext_siwfrag(struct net_device *dev, + struct iw_request_info *info, + struct iw_param *frag, char *extra); +int cfg80211_wext_giwfrag(struct net_device *dev, + struct iw_request_info *info, + struct iw_param *frag, char *extra); +int cfg80211_wext_siwretry(struct net_device *dev, + struct iw_request_info *info, + struct iw_param *retry, char *extra); +int cfg80211_wext_giwretry(struct net_device *dev, + struct iw_request_info *info, + struct iw_param *retry, char *extra); +int cfg80211_wext_siwencodeext(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *erq, char *extra); +int cfg80211_wext_siwencode(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *erq, char *keybuf); +int cfg80211_wext_giwencode(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *erq, char *keybuf); +int cfg80211_wext_siwtxpower(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *data, char *keybuf); +int cfg80211_wext_giwtxpower(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *data, char *keybuf); +struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev); + +int cfg80211_wext_siwpower(struct net_device *dev, + struct iw_request_info *info, + struct iw_param *wrq, char *extra); +int cfg80211_wext_giwpower(struct net_device *dev, + struct iw_request_info *info, + struct iw_param *wrq, char *extra); + +int cfg80211_wext_siwap(struct net_device *dev, + struct iw_request_info *info, + struct sockaddr *ap_addr, char *extra); +int cfg80211_wext_giwap(struct net_device *dev, + struct iw_request_info *info, + struct sockaddr *ap_addr, char *extra); + +/* + * callbacks for asynchronous cfg80211 methods, notification + * functions and BSS handling helpers + */ + +/** + * cfg80211_scan_done - notify that scan finished + * + * @request: the corresponding scan request + * @aborted: set to true if the scan was aborted for any reason, + * userspace will be notified of that + */ +void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted); + +/** + * cfg80211_inform_bss - inform cfg80211 of a new BSS + * + * @wiphy: the wiphy reporting the BSS + * @bss: the found BSS + * @signal: the signal strength, type depends on the wiphy's signal_type + * @gfp: context flags + * + * This informs cfg80211 that BSS information was found and + * the BSS should be updated/added. + */ +struct cfg80211_bss* +cfg80211_inform_bss_frame(struct wiphy *wiphy, + struct ieee80211_channel *channel, + struct ieee80211_mgmt *mgmt, size_t len, + s32 signal, gfp_t gfp); + +struct cfg80211_bss* +cfg80211_inform_bss(struct wiphy *wiphy, + struct ieee80211_channel *channel, + const u8 *bssid, + u64 timestamp, u16 capability, u16 beacon_interval, + const u8 *ie, size_t ielen, + s32 signal, gfp_t gfp); + +struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy, + struct ieee80211_channel *channel, + const u8 *bssid, + const u8 *ssid, size_t ssid_len, + u16 capa_mask, u16 capa_val); +static inline struct cfg80211_bss * +cfg80211_get_ibss(struct wiphy *wiphy, + struct ieee80211_channel *channel, + const u8 *ssid, size_t ssid_len) +{ + return cfg80211_get_bss(wiphy, channel, NULL, ssid, ssid_len, + WLAN_CAPABILITY_IBSS, WLAN_CAPABILITY_IBSS); +} + +struct cfg80211_bss *cfg80211_get_mesh(struct wiphy *wiphy, + struct ieee80211_channel *channel, + const u8 *meshid, size_t meshidlen, + const u8 *meshcfg); +void cfg80211_put_bss(struct cfg80211_bss *bss); + +/** + * cfg80211_unlink_bss - unlink BSS from internal data structures + * @wiphy: the wiphy + * @bss: the bss to remove + * + * This function removes the given BSS from the internal data structures + * thereby making it no longer show up in scan results etc. Use this + * function when you detect a BSS is gone. Normally BSSes will also time + * out, so it is not necessary to use this function at all. + */ +void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *bss); + +/** + * cfg80211_send_rx_auth - notification of processed authentication + * @dev: network device + * @buf: authentication frame (header + body) + * @len: length of the frame data + * + * This function is called whenever an authentication has been processed in + * station mode. The driver is required to call either this function or + * cfg80211_send_auth_timeout() to indicate the result of cfg80211_ops::auth() + * call. This function may sleep. + */ +void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len); + +/** + * cfg80211_send_auth_timeout - notification of timed out authentication + * @dev: network device + * @addr: The MAC address of the device with which the authentication timed out + * + * This function may sleep. + */ +void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr); + +/** + * __cfg80211_auth_canceled - notify cfg80211 that authentication was canceled + * @dev: network device + * @addr: The MAC address of the device with which the authentication timed out + * + * When a pending authentication had no action yet, the driver may decide + * to not send a deauth frame, but in that case must calls this function + * to tell cfg80211 about this decision. It is only valid to call this + * function within the deauth() callback. + */ +void __cfg80211_auth_canceled(struct net_device *dev, const u8 *addr); + +/** + * cfg80211_send_rx_assoc - notification of processed association + * @dev: network device + * @buf: (re)association response frame (header + body) + * @len: length of the frame data + * + * This function is called whenever a (re)association response has been + * processed in station mode. The driver is required to call either this + * function or cfg80211_send_assoc_timeout() to indicate the result of + * cfg80211_ops::assoc() call. This function may sleep. + */ +void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len); + +/** + * cfg80211_send_assoc_timeout - notification of timed out association + * @dev: network device + * @addr: The MAC address of the device with which the association timed out + * + * This function may sleep. + */ +void cfg80211_send_assoc_timeout(struct net_device *dev, const u8 *addr); + +/** + * cfg80211_send_deauth - notification of processed deauthentication + * @dev: network device + * @buf: deauthentication frame (header + body) + * @len: length of the frame data + * + * This function is called whenever deauthentication has been processed in + * station mode. This includes both received deauthentication frames and + * locally generated ones. This function may sleep. + */ +void cfg80211_send_deauth(struct net_device *dev, const u8 *buf, size_t len); + +/** + * __cfg80211_send_deauth - notification of processed deauthentication + * @dev: network device + * @buf: deauthentication frame (header + body) + * @len: length of the frame data + * + * Like cfg80211_send_deauth(), but doesn't take the wdev lock. + */ +void __cfg80211_send_deauth(struct net_device *dev, const u8 *buf, size_t len); + +/** + * cfg80211_send_disassoc - notification of processed disassociation + * @dev: network device + * @buf: disassociation response frame (header + body) + * @len: length of the frame data + * + * This function is called whenever disassociation has been processed in + * station mode. This includes both received disassociation frames and locally + * generated ones. This function may sleep. + */ +void cfg80211_send_disassoc(struct net_device *dev, const u8 *buf, size_t len); + +/** + * __cfg80211_send_disassoc - notification of processed disassociation + * @dev: network device + * @buf: disassociation response frame (header + body) + * @len: length of the frame data + * + * Like cfg80211_send_disassoc(), but doesn't take the wdev lock. + */ +void __cfg80211_send_disassoc(struct net_device *dev, const u8 *buf, + size_t len); + +/** + * cfg80211_michael_mic_failure - notification of Michael MIC failure (TKIP) + * @dev: network device + * @addr: The source MAC address of the frame + * @key_type: The key type that the received frame used + * @key_id: Key identifier (0..3) + * @tsc: The TSC value of the frame that generated the MIC failure (6 octets) + * @gfp: allocation flags + * + * This function is called whenever the local MAC detects a MIC failure in a + * received frame. This matches with MLME-MICHAELMICFAILURE.indication() + * primitive. + */ +void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr, + enum nl80211_key_type key_type, int key_id, + const u8 *tsc, gfp_t gfp); + +/** + * cfg80211_ibss_joined - notify cfg80211 that device joined an IBSS + * + * @dev: network device + * @bssid: the BSSID of the IBSS joined + * @gfp: allocation flags + * + * This function notifies cfg80211 that the device joined an IBSS or + * switched to a different BSSID. Before this function can be called, + * either a beacon has to have been received from the IBSS, or one of + * the cfg80211_inform_bss{,_frame} functions must have been called + * with the locally generated beacon -- this guarantees that there is + * always a scan result for this IBSS. cfg80211 will handle the rest. + */ +void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp); + +/** + * wiphy_rfkill_set_hw_state - notify cfg80211 about hw block state + * @wiphy: the wiphy + * @blocked: block status + */ +void wiphy_rfkill_set_hw_state(struct wiphy *wiphy, bool blocked); + +/** + * wiphy_rfkill_start_polling - start polling rfkill + * @wiphy: the wiphy + */ +void wiphy_rfkill_start_polling(struct wiphy *wiphy); + +/** + * wiphy_rfkill_stop_polling - stop polling rfkill + * @wiphy: the wiphy + */ +void wiphy_rfkill_stop_polling(struct wiphy *wiphy); + +#ifdef CONFIG_NL80211_TESTMODE +/** + * cfg80211_testmode_alloc_reply_skb - allocate testmode reply + * @wiphy: the wiphy + * @approxlen: an upper bound of the length of the data that will + * be put into the skb + * + * This function allocates and pre-fills an skb for a reply to + * the testmode command. Since it is intended for a reply, calling + * it outside of the @testmode_cmd operation is invalid. + * + * The returned skb (or %NULL if any errors happen) is pre-filled + * with the wiphy index and set up in a way that any data that is + * put into the skb (with skb_put(), nla_put() or similar) will end + * up being within the %NL80211_ATTR_TESTDATA attribute, so all that + * needs to be done with the skb is adding data for the corresponding + * userspace tool which can then read that data out of the testdata + * attribute. You must not modify the skb in any other way. + * + * When done, call cfg80211_testmode_reply() with the skb and return + * its error code as the result of the @testmode_cmd operation. + */ +struct sk_buff *cfg80211_testmode_alloc_reply_skb(struct wiphy *wiphy, + int approxlen); + +/** + * cfg80211_testmode_reply - send the reply skb + * @skb: The skb, must have been allocated with + * cfg80211_testmode_alloc_reply_skb() + * + * Returns an error code or 0 on success, since calling this + * function will usually be the last thing before returning + * from the @testmode_cmd you should return the error code. + * Note that this function consumes the skb regardless of the + * return value. + */ +int cfg80211_testmode_reply(struct sk_buff *skb); + +/** + * cfg80211_testmode_alloc_event_skb - allocate testmode event + * @wiphy: the wiphy + * @approxlen: an upper bound of the length of the data that will + * be put into the skb + * @gfp: allocation flags + * + * This function allocates and pre-fills an skb for an event on the + * testmode multicast group. + * + * The returned skb (or %NULL if any errors happen) is set up in the + * same way as with cfg80211_testmode_alloc_reply_skb() but prepared + * for an event. As there, you should simply add data to it that will + * then end up in the %NL80211_ATTR_TESTDATA attribute. Again, you must + * not modify the skb in any other way. + * + * When done filling the skb, call cfg80211_testmode_event() with the + * skb to send the event. + */ +struct sk_buff *cfg80211_testmode_alloc_event_skb(struct wiphy *wiphy, + int approxlen, gfp_t gfp); + +/** + * cfg80211_testmode_event - send the event + * @skb: The skb, must have been allocated with + * cfg80211_testmode_alloc_event_skb() + * @gfp: allocation flags + * + * This function sends the given @skb, which must have been allocated + * by cfg80211_testmode_alloc_event_skb(), as an event. It always + * consumes it. + */ +void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp); + +#define CFG80211_TESTMODE_CMD(cmd) .testmode_cmd = (cmd), +#else +#define CFG80211_TESTMODE_CMD(cmd) +#endif + +/** + * cfg80211_connect_result - notify cfg80211 of connection result + * + * @dev: network device + * @bssid: the BSSID of the AP + * @req_ie: association request IEs (maybe be %NULL) + * @req_ie_len: association request IEs length + * @resp_ie: association response IEs (may be %NULL) + * @resp_ie_len: assoc response IEs length + * @status: status code, 0 for successful connection, use + * %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you + * the real status code for failures. + * @gfp: allocation flags + * + * It should be called by the underlying driver whenever connect() has + * succeeded. + */ +void cfg80211_connect_result(struct net_device *dev, const u8 *bssid, + const u8 *req_ie, size_t req_ie_len, + const u8 *resp_ie, size_t resp_ie_len, + u16 status, gfp_t gfp); + +/** + * cfg80211_roamed - notify cfg80211 of roaming + * + * @dev: network device + * @bssid: the BSSID of the new AP + * @req_ie: association request IEs (maybe be %NULL) + * @req_ie_len: association request IEs length + * @resp_ie: association response IEs (may be %NULL) + * @resp_ie_len: assoc response IEs length + * @gfp: allocation flags + * + * It should be called by the underlying driver whenever it roamed + * from one AP to another while connected. + */ +void cfg80211_roamed(struct net_device *dev, const u8 *bssid, + const u8 *req_ie, size_t req_ie_len, + const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp); + +/** + * cfg80211_disconnected - notify cfg80211 that connection was dropped + * + * @dev: network device + * @ie: information elements of the deauth/disassoc frame (may be %NULL) + * @ie_len: length of IEs + * @reason: reason code for the disconnection, set it to 0 if unknown + * @gfp: allocation flags + * + * After it calls this function, the driver should enter an idle state + * and not try to connect to any AP any more. + */ +void cfg80211_disconnected(struct net_device *dev, u16 reason, + u8 *ie, size_t ie_len, gfp_t gfp); + +/** + * cfg80211_ready_on_channel - notification of remain_on_channel start + * @dev: network device + * @cookie: the request cookie + * @chan: The current channel (from remain_on_channel request) + * @channel_type: Channel type + * @duration: Duration in milliseconds that the driver intents to remain on the + * channel + * @gfp: allocation flags + */ +void cfg80211_ready_on_channel(struct net_device *dev, u64 cookie, + struct ieee80211_channel *chan, + enum nl80211_channel_type channel_type, + unsigned int duration, gfp_t gfp); + +/** + * cfg80211_remain_on_channel_expired - remain_on_channel duration expired + * @dev: network device + * @cookie: the request cookie + * @chan: The current channel (from remain_on_channel request) + * @channel_type: Channel type + * @gfp: allocation flags + */ +void cfg80211_remain_on_channel_expired(struct net_device *dev, + u64 cookie, + struct ieee80211_channel *chan, + enum nl80211_channel_type channel_type, + gfp_t gfp); + + +/** + * cfg80211_new_sta - notify userspace about station + * + * @dev: the netdev + * @mac_addr: the station's address + * @sinfo: the station information + * @gfp: allocation flags + */ +void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr, + struct station_info *sinfo, gfp_t gfp); + +/** + * cfg80211_rx_action - notification of received, unprocessed Action frame + * @dev: network device + * @freq: Frequency on which the frame was received in MHz + * @buf: Action frame (header + body) + * @len: length of the frame data + * @gfp: context flags + * Returns %true if a user space application is responsible for rejecting the + * unrecognized Action frame; %false if no such application is registered + * (i.e., the driver is responsible for rejecting the unrecognized Action + * frame) + * + * This function is called whenever an Action frame is received for a station + * mode interface, but is not processed in kernel. + */ +bool cfg80211_rx_action(struct net_device *dev, int freq, const u8 *buf, + size_t len, gfp_t gfp); + +/** + * cfg80211_action_tx_status - notification of TX status for Action frame + * @dev: network device + * @cookie: Cookie returned by cfg80211_ops::action() + * @buf: Action frame (header + body) + * @len: length of the frame data + * @ack: Whether frame was acknowledged + * @gfp: context flags + * + * This function is called whenever an Action frame was requested to be + * transmitted with cfg80211_ops::action() to report the TX status of the + * transmission attempt. + */ +void cfg80211_action_tx_status(struct net_device *dev, u64 cookie, + const u8 *buf, size_t len, bool ack, gfp_t gfp); + +#endif /* __NET_CFG80211_H */ diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h new file mode 100644 index 0000000..af49f8a --- /dev/null +++ b/include/net/ieee80211_radiotap.h @@ -0,0 +1,265 @@ +/* + * Copyright (c) 2003, 2004 David Young. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of David Young may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY DAVID YOUNG ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DAVID + * YOUNG BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + */ + +/* + * Modifications to fit into the linux IEEE 802.11 stack, + * Mike Kershaw (dragorn@kismetwireless.net) + */ + +#ifndef IEEE80211RADIOTAP_H +#define IEEE80211RADIOTAP_H + +#include +#include +#include + +/* Base version of the radiotap packet header data */ +#define PKTHDR_RADIOTAP_VERSION 0 + +/* A generic radio capture format is desirable. There is one for + * Linux, but it is neither rigidly defined (there were not even + * units given for some fields) nor easily extensible. + * + * I suggest the following extensible radio capture format. It is + * based on a bitmap indicating which fields are present. + * + * I am trying to describe precisely what the application programmer + * should expect in the following, and for that reason I tell the + * units and origin of each measurement (where it applies), or else I + * use sufficiently weaselly language ("is a monotonically nondecreasing + * function of...") that I cannot set false expectations for lawyerly + * readers. + */ + +/* + * The radio capture header precedes the 802.11 header. + * All data in the header is little endian on all platforms. + */ +struct ieee80211_radiotap_header { + u8 it_version; /* Version 0. Only increases + * for drastic changes, + * introduction of compatible + * new fields does not count. + */ + u8 it_pad; + __le16 it_len; /* length of the whole + * header in bytes, including + * it_version, it_pad, + * it_len, and data fields. + */ + __le32 it_present; /* A bitmap telling which + * fields are present. Set bit 31 + * (0x80000000) to extend the + * bitmap by another 32 bits. + * Additional extensions are made + * by setting bit 31. + */ +} __packed; + +/* Name Data type Units + * ---- --------- ----- + * + * IEEE80211_RADIOTAP_TSFT __le64 microseconds + * + * Value in microseconds of the MAC's 64-bit 802.11 Time + * Synchronization Function timer when the first bit of the + * MPDU arrived at the MAC. For received frames, only. + * + * IEEE80211_RADIOTAP_CHANNEL 2 x __le16 MHz, bitmap + * + * Tx/Rx frequency in MHz, followed by flags (see below). + * + * IEEE80211_RADIOTAP_FHSS __le16 see below + * + * For frequency-hopping radios, the hop set (first byte) + * and pattern (second byte). + * + * IEEE80211_RADIOTAP_RATE u8 500kb/s + * + * Tx/Rx data rate + * + * IEEE80211_RADIOTAP_DBM_ANTSIGNAL s8 decibels from + * one milliwatt (dBm) + * + * RF signal power at the antenna, decibel difference from + * one milliwatt. + * + * IEEE80211_RADIOTAP_DBM_ANTNOISE s8 decibels from + * one milliwatt (dBm) + * + * RF noise power at the antenna, decibel difference from one + * milliwatt. + * + * IEEE80211_RADIOTAP_DB_ANTSIGNAL u8 decibel (dB) + * + * RF signal power at the antenna, decibel difference from an + * arbitrary, fixed reference. + * + * IEEE80211_RADIOTAP_DB_ANTNOISE u8 decibel (dB) + * + * RF noise power at the antenna, decibel difference from an + * arbitrary, fixed reference point. + * + * IEEE80211_RADIOTAP_LOCK_QUALITY __le16 unitless + * + * Quality of Barker code lock. Unitless. Monotonically + * nondecreasing with "better" lock strength. Called "Signal + * Quality" in datasheets. (Is there a standard way to measure + * this?) + * + * IEEE80211_RADIOTAP_TX_ATTENUATION __le16 unitless + * + * Transmit power expressed as unitless distance from max + * power set at factory calibration. 0 is max power. + * Monotonically nondecreasing with lower power levels. + * + * IEEE80211_RADIOTAP_DB_TX_ATTENUATION __le16 decibels (dB) + * + * Transmit power expressed as decibel distance from max power + * set at factory calibration. 0 is max power. Monotonically + * nondecreasing with lower power levels. + * + * IEEE80211_RADIOTAP_DBM_TX_POWER s8 decibels from + * one milliwatt (dBm) + * + * Transmit power expressed as dBm (decibels from a 1 milliwatt + * reference). This is the absolute power level measured at + * the antenna port. + * + * IEEE80211_RADIOTAP_FLAGS u8 bitmap + * + * Properties of transmitted and received frames. See flags + * defined below. + * + * IEEE80211_RADIOTAP_ANTENNA u8 antenna index + * + * Unitless indication of the Rx/Tx antenna for this packet. + * The first antenna is antenna 0. + * + * IEEE80211_RADIOTAP_RX_FLAGS __le16 bitmap + * + * Properties of received frames. See flags defined below. + * + * IEEE80211_RADIOTAP_TX_FLAGS __le16 bitmap + * + * Properties of transmitted frames. See flags defined below. + * + * IEEE80211_RADIOTAP_RTS_RETRIES u8 data + * + * Number of rts retries a transmitted frame used. + * + * IEEE80211_RADIOTAP_DATA_RETRIES u8 data + * + * Number of unicast retries a transmitted frame used. + * + */ +enum ieee80211_radiotap_type { + IEEE80211_RADIOTAP_TSFT = 0, + IEEE80211_RADIOTAP_FLAGS = 1, + IEEE80211_RADIOTAP_RATE = 2, + IEEE80211_RADIOTAP_CHANNEL = 3, + IEEE80211_RADIOTAP_FHSS = 4, + IEEE80211_RADIOTAP_DBM_ANTSIGNAL = 5, + IEEE80211_RADIOTAP_DBM_ANTNOISE = 6, + IEEE80211_RADIOTAP_LOCK_QUALITY = 7, + IEEE80211_RADIOTAP_TX_ATTENUATION = 8, + IEEE80211_RADIOTAP_DB_TX_ATTENUATION = 9, + IEEE80211_RADIOTAP_DBM_TX_POWER = 10, + IEEE80211_RADIOTAP_ANTENNA = 11, + IEEE80211_RADIOTAP_DB_ANTSIGNAL = 12, + IEEE80211_RADIOTAP_DB_ANTNOISE = 13, + IEEE80211_RADIOTAP_RX_FLAGS = 14, + IEEE80211_RADIOTAP_TX_FLAGS = 15, + IEEE80211_RADIOTAP_RTS_RETRIES = 16, + IEEE80211_RADIOTAP_DATA_RETRIES = 17, + + /* valid in every it_present bitmap, even vendor namespaces */ + IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29, + IEEE80211_RADIOTAP_VENDOR_NAMESPACE = 30, + IEEE80211_RADIOTAP_EXT = 31 +}; + +/* Channel flags. */ +#define IEEE80211_CHAN_TURBO 0x0010 /* Turbo channel */ +#define IEEE80211_CHAN_CCK 0x0020 /* CCK channel */ +#define IEEE80211_CHAN_OFDM 0x0040 /* OFDM channel */ +#define IEEE80211_CHAN_2GHZ 0x0080 /* 2 GHz spectrum channel. */ +#define IEEE80211_CHAN_5GHZ 0x0100 /* 5 GHz spectrum channel */ +#define IEEE80211_CHAN_PASSIVE 0x0200 /* Only passive scan allowed */ +#define IEEE80211_CHAN_DYN 0x0400 /* Dynamic CCK-OFDM channel */ +#define IEEE80211_CHAN_GFSK 0x0800 /* GFSK channel (FHSS PHY) */ + +/* For IEEE80211_RADIOTAP_FLAGS */ +#define IEEE80211_RADIOTAP_F_CFP 0x01 /* sent/received + * during CFP + */ +#define IEEE80211_RADIOTAP_F_SHORTPRE 0x02 /* sent/received + * with short + * preamble + */ +#define IEEE80211_RADIOTAP_F_WEP 0x04 /* sent/received + * with WEP encryption + */ +#define IEEE80211_RADIOTAP_F_FRAG 0x08 /* sent/received + * with fragmentation + */ +#define IEEE80211_RADIOTAP_F_FCS 0x10 /* frame includes FCS */ +#define IEEE80211_RADIOTAP_F_DATAPAD 0x20 /* frame has padding between + * 802.11 header and payload + * (to 32-bit boundary) + */ +#define IEEE80211_RADIOTAP_F_BADFCS 0x40 /* bad FCS */ + +/* For IEEE80211_RADIOTAP_RX_FLAGS */ +#define IEEE80211_RADIOTAP_F_RX_BADPLCP 0x0002 /* frame has bad PLCP */ + +/* For IEEE80211_RADIOTAP_TX_FLAGS */ +#define IEEE80211_RADIOTAP_F_TX_FAIL 0x0001 /* failed due to excessive + * retries */ +#define IEEE80211_RADIOTAP_F_TX_CTS 0x0002 /* used cts 'protection' */ +#define IEEE80211_RADIOTAP_F_TX_RTS 0x0004 /* used rts/cts handshake */ + +/* Ugly macro to convert literal channel numbers into their mhz equivalents + * There are certianly some conditions that will break this (like feeding it '30') + * but they shouldn't arise since nothing talks on channel 30. */ +#define ieee80211chan2mhz(x) \ + (((x) <= 14) ? \ + (((x) == 14) ? 2484 : ((x) * 5) + 2407) : \ + ((x) + 1000) * 5) + +/* helpers */ +static inline int ieee80211_get_radiotap_len(unsigned char *data) +{ + struct ieee80211_radiotap_header *hdr = + (struct ieee80211_radiotap_header *)data; + + return get_unaligned_le16(&hdr->it_len); +} + +#endif /* IEEE80211_RADIOTAP_H */ diff --git a/include/net/lib80211.h b/include/net/lib80211.h new file mode 100644 index 0000000..fb4e278 --- /dev/null +++ b/include/net/lib80211.h @@ -0,0 +1,129 @@ +/* + * lib80211.h -- common bits for IEEE802.11 wireless drivers + * + * Copyright (c) 2008, John W. Linville + * + * Some bits copied from old ieee80211 component, w/ original copyright + * notices below: + * + * Original code based on Host AP (software wireless LAN access point) driver + * for Intersil Prism2/2.5/3. + * + * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen + * + * Copyright (c) 2002-2003, Jouni Malinen + * + * Adaption to a generic IEEE 802.11 stack by James Ketrenos + * + * + * Copyright (c) 2004, Intel Corporation + * + */ + +#ifndef LIB80211_H +#define LIB80211_H + +#include +#include +#include +#include +#include +#include +#include +#include +/* print_ssid() is intended to be used in debug (and possibly error) + * messages. It should never be used for passing ssid to user space. */ +const char *print_ssid(char *buf, const char *ssid, u8 ssid_len); +#define DECLARE_SSID_BUF(var) char var[IEEE80211_MAX_SSID_LEN * 4 + 1] __maybe_unused + +#define NUM_WEP_KEYS 4 + +enum { + IEEE80211_CRYPTO_TKIP_COUNTERMEASURES = (1 << 0), +}; + +struct lib80211_crypto_ops { + const char *name; + struct list_head list; + + /* init new crypto context (e.g., allocate private data space, + * select IV, etc.); returns NULL on failure or pointer to allocated + * private data on success */ + void *(*init) (int keyidx); + + /* deinitialize crypto context and free allocated private data */ + void (*deinit) (void *priv); + + int (*build_iv) (struct sk_buff * skb, int hdr_len, + u8 *key, int keylen, void *priv); + + /* encrypt/decrypt return < 0 on error or >= 0 on success. The return + * value from decrypt_mpdu is passed as the keyidx value for + * decrypt_msdu. skb must have enough head and tail room for the + * encryption; if not, error will be returned; these functions are + * called for all MPDUs (i.e., fragments). + */ + int (*encrypt_mpdu) (struct sk_buff * skb, int hdr_len, void *priv); + int (*decrypt_mpdu) (struct sk_buff * skb, int hdr_len, void *priv); + + /* These functions are called for full MSDUs, i.e. full frames. + * These can be NULL if full MSDU operations are not needed. */ + int (*encrypt_msdu) (struct sk_buff * skb, int hdr_len, void *priv); + int (*decrypt_msdu) (struct sk_buff * skb, int keyidx, int hdr_len, + void *priv); + + int (*set_key) (void *key, int len, u8 * seq, void *priv); + int (*get_key) (void *key, int len, u8 * seq, void *priv); + + /* procfs handler for printing out key information and possible + * statistics */ + char *(*print_stats) (char *p, void *priv); + + /* Crypto specific flag get/set for configuration settings */ + unsigned long (*get_flags) (void *priv); + unsigned long (*set_flags) (unsigned long flags, void *priv); + + /* maximum number of bytes added by encryption; encrypt buf is + * allocated with extra_prefix_len bytes, copy of in_buf, and + * extra_postfix_len; encrypt need not use all this space, but + * the result must start at the beginning of the buffer and correct + * length must be returned */ + int extra_mpdu_prefix_len, extra_mpdu_postfix_len; + int extra_msdu_prefix_len, extra_msdu_postfix_len; + + struct module *owner; +}; + +struct lib80211_crypt_data { + struct list_head list; /* delayed deletion list */ + struct lib80211_crypto_ops *ops; + void *priv; + atomic_t refcnt; +}; + +struct lib80211_crypt_info { + char *name; + /* Most clients will already have a lock, + so just point to that. */ + spinlock_t *lock; + + struct lib80211_crypt_data *crypt[NUM_WEP_KEYS]; + int tx_keyidx; /* default TX key index (crypt[tx_keyidx]) */ + struct list_head crypt_deinit_list; + struct timer_list crypt_deinit_timer; + int crypt_quiesced; +}; + +int lib80211_crypt_info_init(struct lib80211_crypt_info *info, char *name, + spinlock_t *lock); +void lib80211_crypt_info_free(struct lib80211_crypt_info *info); +int lib80211_register_crypto_ops(struct lib80211_crypto_ops *ops); +int lib80211_unregister_crypto_ops(struct lib80211_crypto_ops *ops); +struct lib80211_crypto_ops *lib80211_get_crypto_ops(const char *name); +void lib80211_crypt_deinit_entries(struct lib80211_crypt_info *, int); +void lib80211_crypt_deinit_handler(unsigned long); +void lib80211_crypt_delayed_deinit(struct lib80211_crypt_info *info, + struct lib80211_crypt_data **crypt); +void lib80211_crypt_quiescing(struct lib80211_crypt_info *info); + +#endif /* LIB80211_H */ diff --git a/include/net/mac80211.h b/include/net/mac80211.h new file mode 100644 index 0000000..45d7d44 --- /dev/null +++ b/include/net/mac80211.h @@ -0,0 +1,2536 @@ +/* + * mac80211 <-> driver interface + * + * Copyright 2002-2005, Devicescape Software, Inc. + * Copyright 2006-2007 Jiri Benc + * Copyright 2007-2010 Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef MAC80211_H +#define MAC80211_H + +#include +#include +#include +#include +#include +#include +#include + +/** + * DOC: Introduction + * + * mac80211 is the Linux stack for 802.11 hardware that implements + * only partial functionality in hard- or firmware. This document + * defines the interface between mac80211 and low-level hardware + * drivers. + */ + +/** + * DOC: Calling mac80211 from interrupts + * + * Only ieee80211_tx_status_irqsafe() and ieee80211_rx_irqsafe() can be + * called in hardware interrupt context. The low-level driver must not call any + * other functions in hardware interrupt context. If there is a need for such + * call, the low-level driver should first ACK the interrupt and perform the + * IEEE 802.11 code call after this, e.g. from a scheduled workqueue or even + * tasklet function. + * + * NOTE: If the driver opts to use the _irqsafe() functions, it may not also + * use the non-IRQ-safe functions! + */ + +/** + * DOC: Warning + * + * If you're reading this document and not the header file itself, it will + * be incomplete because not all documentation has been converted yet. + */ + +/** + * DOC: Frame format + * + * As a general rule, when frames are passed between mac80211 and the driver, + * they start with the IEEE 802.11 header and include the same octets that are + * sent over the air except for the FCS which should be calculated by the + * hardware. + * + * There are, however, various exceptions to this rule for advanced features: + * + * The first exception is for hardware encryption and decryption offload + * where the IV/ICV may or may not be generated in hardware. + * + * Secondly, when the hardware handles fragmentation, the frame handed to + * the driver from mac80211 is the MSDU, not the MPDU. + * + * Finally, for received frames, the driver is able to indicate that it has + * filled a radiotap header and put that in front of the frame; if it does + * not do so then mac80211 may add this under certain circumstances. + */ + +/** + * DOC: mac80211 workqueue + * + * mac80211 provides its own workqueue for drivers and internal mac80211 use. + * The workqueue is a single threaded workqueue and can only be accessed by + * helpers for sanity checking. Drivers must ensure all work added onto the + * mac80211 workqueue should be cancelled on the driver stop() callback. + * + * mac80211 will flushed the workqueue upon interface removal and during + * suspend. + * + * All work performed on the mac80211 workqueue must not acquire the RTNL lock. + * + */ + +/** + * enum ieee80211_max_queues - maximum number of queues + * + * @IEEE80211_MAX_QUEUES: Maximum number of regular device queues. + */ +enum ieee80211_max_queues { + IEEE80211_MAX_QUEUES = 4, +}; + +/** + * struct ieee80211_tx_queue_params - transmit queue configuration + * + * The information provided in this structure is required for QoS + * transmit queue configuration. Cf. IEEE 802.11 7.3.2.29. + * + * @aifs: arbitration interframe space [0..255] + * @cw_min: minimum contention window [a value of the form + * 2^n-1 in the range 1..32767] + * @cw_max: maximum contention window [like @cw_min] + * @txop: maximum burst time in units of 32 usecs, 0 meaning disabled + * @uapsd: is U-APSD mode enabled for the queue + */ +struct ieee80211_tx_queue_params { + u16 txop; + u16 cw_min; + u16 cw_max; + u8 aifs; + bool uapsd; +}; + +struct ieee80211_low_level_stats { + unsigned int dot11ACKFailureCount; + unsigned int dot11RTSFailureCount; + unsigned int dot11FCSErrorCount; + unsigned int dot11RTSSuccessCount; +}; + +/** + * enum ieee80211_bss_change - BSS change notification flags + * + * These flags are used with the bss_info_changed() callback + * to indicate which BSS parameter changed. + * + * @BSS_CHANGED_ASSOC: association status changed (associated/disassociated), + * also implies a change in the AID. + * @BSS_CHANGED_ERP_CTS_PROT: CTS protection changed + * @BSS_CHANGED_ERP_PREAMBLE: preamble changed + * @BSS_CHANGED_ERP_SLOT: slot timing changed + * @BSS_CHANGED_HT: 802.11n parameters changed + * @BSS_CHANGED_BASIC_RATES: Basic rateset changed + * @BSS_CHANGED_BEACON_INT: Beacon interval changed + * @BSS_CHANGED_BSSID: BSSID changed, for whatever + * reason (IBSS and managed mode) + * @BSS_CHANGED_BEACON: Beacon data changed, retrieve + * new beacon (beaconing modes) + * @BSS_CHANGED_BEACON_ENABLED: Beaconing should be + * enabled/disabled (beaconing modes) + */ +enum ieee80211_bss_change { + BSS_CHANGED_ASSOC = 1<<0, + BSS_CHANGED_ERP_CTS_PROT = 1<<1, + BSS_CHANGED_ERP_PREAMBLE = 1<<2, + BSS_CHANGED_ERP_SLOT = 1<<3, + BSS_CHANGED_HT = 1<<4, + BSS_CHANGED_BASIC_RATES = 1<<5, + BSS_CHANGED_BEACON_INT = 1<<6, + BSS_CHANGED_BSSID = 1<<7, + BSS_CHANGED_BEACON = 1<<8, + BSS_CHANGED_BEACON_ENABLED = 1<<9, +}; + +/** + * struct ieee80211_bss_conf - holds the BSS's changing parameters + * + * This structure keeps information about a BSS (and an association + * to that BSS) that can change during the lifetime of the BSS. + * + * @assoc: association status + * @aid: association ID number, valid only when @assoc is true + * @use_cts_prot: use CTS protection + * @use_short_preamble: use 802.11b short preamble; + * if the hardware cannot handle this it must set the + * IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE hardware flag + * @use_short_slot: use short slot time (only relevant for ERP); + * if the hardware cannot handle this it must set the + * IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE hardware flag + * @dtim_period: num of beacons before the next DTIM, for beaconing, + * not valid in station mode (cf. hw conf ps_dtim_period) + * @timestamp: beacon timestamp + * @beacon_int: beacon interval + * @assoc_capability: capabilities taken from assoc resp + * @basic_rates: bitmap of basic rates, each bit stands for an + * index into the rate table configured by the driver in + * the current band. + * @bssid: The BSSID for this BSS + * @enable_beacon: whether beaconing should be enabled or not + * @ht_operation_mode: HT operation mode (like in &struct ieee80211_ht_info). + * This field is only valid when the channel type is one of the HT types. + */ +struct ieee80211_bss_conf { + const u8 *bssid; + /* association related data */ + bool assoc; + u16 aid; + /* erp related data */ + bool use_cts_prot; + bool use_short_preamble; + bool use_short_slot; + bool enable_beacon; + u8 dtim_period; + u16 beacon_int; + u16 assoc_capability; + u64 timestamp; + u32 basic_rates; + u16 ht_operation_mode; +}; + +/** + * enum mac80211_tx_control_flags - flags to describe transmission information/status + * + * These flags are used with the @flags member of &ieee80211_tx_info. + * + * @IEEE80211_TX_CTL_REQ_TX_STATUS: require TX status callback for this frame. + * @IEEE80211_TX_CTL_ASSIGN_SEQ: The driver has to assign a sequence + * number to this frame, taking care of not overwriting the fragment + * number and increasing the sequence number only when the + * IEEE80211_TX_CTL_FIRST_FRAGMENT flag is set. mac80211 will properly + * assign sequence numbers to QoS-data frames but cannot do so correctly + * for non-QoS-data and management frames because beacons need them from + * that counter as well and mac80211 cannot guarantee proper sequencing. + * If this flag is set, the driver should instruct the hardware to + * assign a sequence number to the frame or assign one itself. Cf. IEEE + * 802.11-2007 7.1.3.4.1 paragraph 3. This flag will always be set for + * beacons and always be clear for frames without a sequence number field. + * @IEEE80211_TX_CTL_NO_ACK: tell the low level not to wait for an ack + * @IEEE80211_TX_CTL_CLEAR_PS_FILT: clear powersave filter for destination + * station + * @IEEE80211_TX_CTL_FIRST_FRAGMENT: this is a first fragment of the frame + * @IEEE80211_TX_CTL_SEND_AFTER_DTIM: send this frame after DTIM beacon + * @IEEE80211_TX_CTL_AMPDU: this frame should be sent as part of an A-MPDU + * @IEEE80211_TX_CTL_INJECTED: Frame was injected, internal to mac80211. + * @IEEE80211_TX_STAT_TX_FILTERED: The frame was not transmitted + * because the destination STA was in powersave mode. Note that to + * avoid race conditions, the filter must be set by the hardware or + * firmware upon receiving a frame that indicates that the station + * went to sleep (must be done on device to filter frames already on + * the queue) and may only be unset after mac80211 gives the OK for + * that by setting the IEEE80211_TX_CTL_CLEAR_PS_FILT (see above), + * since only then is it guaranteed that no more frames are in the + * hardware queue. + * @IEEE80211_TX_STAT_ACK: Frame was acknowledged + * @IEEE80211_TX_STAT_AMPDU: The frame was aggregated, so status + * is for the whole aggregation. + * @IEEE80211_TX_STAT_AMPDU_NO_BACK: no block ack was returned, + * so consider using block ack request (BAR). + * @IEEE80211_TX_CTL_RATE_CTRL_PROBE: internal to mac80211, can be + * set by rate control algorithms to indicate probe rate, will + * be cleared for fragmented frames (except on the last fragment) + * @IEEE80211_TX_INTFL_NEED_TXPROCESSING: completely internal to mac80211, + * used to indicate that a pending frame requires TX processing before + * it can be sent out. + * @IEEE80211_TX_INTFL_RETRIED: completely internal to mac80211, + * used to indicate that a frame was already retried due to PS + * @IEEE80211_TX_INTFL_DONT_ENCRYPT: completely internal to mac80211, + * used to indicate frame should not be encrypted + * @IEEE80211_TX_CTL_PSPOLL_RESPONSE: (internal?) + * This frame is a response to a PS-poll frame and should be sent + * although the station is in powersave mode. + * @IEEE80211_TX_CTL_MORE_FRAMES: More frames will be passed to the + * transmit function after the current frame, this can be used + * by drivers to kick the DMA queue only if unset or when the + * queue gets full. + * @IEEE80211_TX_INTFL_RETRANSMISSION: This frame is being retransmitted + * after TX status because the destination was asleep, it must not + * be modified again (no seqno assignment, crypto, etc.) + * @IEEE80211_TX_INTFL_HAS_RADIOTAP: This frame was injected and still + * has a radiotap header at skb->data. + * @IEEE80211_TX_INTFL_NL80211_FRAME_TX: Frame was requested through nl80211 + * MLME command (internal to mac80211 to figure out whether to send TX + * status to user space) + */ +enum mac80211_tx_control_flags { + IEEE80211_TX_CTL_REQ_TX_STATUS = BIT(0), + IEEE80211_TX_CTL_ASSIGN_SEQ = BIT(1), + IEEE80211_TX_CTL_NO_ACK = BIT(2), + IEEE80211_TX_CTL_CLEAR_PS_FILT = BIT(3), + IEEE80211_TX_CTL_FIRST_FRAGMENT = BIT(4), + IEEE80211_TX_CTL_SEND_AFTER_DTIM = BIT(5), + IEEE80211_TX_CTL_AMPDU = BIT(6), + IEEE80211_TX_CTL_INJECTED = BIT(7), + IEEE80211_TX_STAT_TX_FILTERED = BIT(8), + IEEE80211_TX_STAT_ACK = BIT(9), + IEEE80211_TX_STAT_AMPDU = BIT(10), + IEEE80211_TX_STAT_AMPDU_NO_BACK = BIT(11), + IEEE80211_TX_CTL_RATE_CTRL_PROBE = BIT(12), + IEEE80211_TX_INTFL_NEED_TXPROCESSING = BIT(14), + IEEE80211_TX_INTFL_RETRIED = BIT(15), + IEEE80211_TX_INTFL_DONT_ENCRYPT = BIT(16), + IEEE80211_TX_CTL_PSPOLL_RESPONSE = BIT(17), + IEEE80211_TX_CTL_MORE_FRAMES = BIT(18), + IEEE80211_TX_INTFL_RETRANSMISSION = BIT(19), + IEEE80211_TX_INTFL_HAS_RADIOTAP = BIT(20), + IEEE80211_TX_INTFL_NL80211_FRAME_TX = BIT(21), +}; + +/** + * enum mac80211_rate_control_flags - per-rate flags set by the + * Rate Control algorithm. + * + * These flags are set by the Rate control algorithm for each rate during tx, + * in the @flags member of struct ieee80211_tx_rate. + * + * @IEEE80211_TX_RC_USE_RTS_CTS: Use RTS/CTS exchange for this rate. + * @IEEE80211_TX_RC_USE_CTS_PROTECT: CTS-to-self protection is required. + * This is set if the current BSS requires ERP protection. + * @IEEE80211_TX_RC_USE_SHORT_PREAMBLE: Use short preamble. + * @IEEE80211_TX_RC_MCS: HT rate. + * @IEEE80211_TX_RC_GREEN_FIELD: Indicates whether this rate should be used in + * Greenfield mode. + * @IEEE80211_TX_RC_40_MHZ_WIDTH: Indicates if the Channel Width should be 40 MHz. + * @IEEE80211_TX_RC_DUP_DATA: The frame should be transmitted on both of the + * adjacent 20 MHz channels, if the current channel type is + * NL80211_CHAN_HT40MINUS or NL80211_CHAN_HT40PLUS. + * @IEEE80211_TX_RC_SHORT_GI: Short Guard interval should be used for this rate. + */ +enum mac80211_rate_control_flags { + IEEE80211_TX_RC_USE_RTS_CTS = BIT(0), + IEEE80211_TX_RC_USE_CTS_PROTECT = BIT(1), + IEEE80211_TX_RC_USE_SHORT_PREAMBLE = BIT(2), + + /* rate index is an MCS rate number instead of an index */ + IEEE80211_TX_RC_MCS = BIT(3), + IEEE80211_TX_RC_GREEN_FIELD = BIT(4), + IEEE80211_TX_RC_40_MHZ_WIDTH = BIT(5), + IEEE80211_TX_RC_DUP_DATA = BIT(6), + IEEE80211_TX_RC_SHORT_GI = BIT(7), +}; + + +/* there are 40 bytes if you don't need the rateset to be kept */ +#define IEEE80211_TX_INFO_DRIVER_DATA_SIZE 40 + +/* if you do need the rateset, then you have less space */ +#define IEEE80211_TX_INFO_RATE_DRIVER_DATA_SIZE 24 + +/* maximum number of rate stages */ +#define IEEE80211_TX_MAX_RATES 5 + +/** + * struct ieee80211_tx_rate - rate selection/status + * + * @idx: rate index to attempt to send with + * @flags: rate control flags (&enum mac80211_rate_control_flags) + * @count: number of tries in this rate before going to the next rate + * + * A value of -1 for @idx indicates an invalid rate and, if used + * in an array of retry rates, that no more rates should be tried. + * + * When used for transmit status reporting, the driver should + * always report the rate along with the flags it used. + * + * &struct ieee80211_tx_info contains an array of these structs + * in the control information, and it will be filled by the rate + * control algorithm according to what should be sent. For example, + * if this array contains, in the format { , } the + * information + * { 3, 2 }, { 2, 2 }, { 1, 4 }, { -1, 0 }, { -1, 0 } + * then this means that the frame should be transmitted + * up to twice at rate 3, up to twice at rate 2, and up to four + * times at rate 1 if it doesn't get acknowledged. Say it gets + * acknowledged by the peer after the fifth attempt, the status + * information should then contain + * { 3, 2 }, { 2, 2 }, { 1, 1 }, { -1, 0 } ... + * since it was transmitted twice at rate 3, twice at rate 2 + * and once at rate 1 after which we received an acknowledgement. + */ +struct ieee80211_tx_rate { + s8 idx; + u8 count; + u8 flags; +} __attribute__((packed)); + +/** + * struct ieee80211_tx_info - skb transmit information + * + * This structure is placed in skb->cb for three uses: + * (1) mac80211 TX control - mac80211 tells the driver what to do + * (2) driver internal use (if applicable) + * (3) TX status information - driver tells mac80211 what happened + * + * The TX control's sta pointer is only valid during the ->tx call, + * it may be NULL. + * + * @flags: transmit info flags, defined above + * @band: the band to transmit on (use for checking for races) + * @antenna_sel_tx: antenna to use, 0 for automatic diversity + * @pad: padding, ignore + * @control: union for control data + * @status: union for status data + * @driver_data: array of driver_data pointers + * @ampdu_ack_len: number of acked aggregated frames. + * relevant only if IEEE80211_TX_STATUS_AMPDU was set. + * @ampdu_ack_map: block ack bit map for the aggregation. + * relevant only if IEEE80211_TX_STATUS_AMPDU was set. + * @ampdu_len: number of aggregated frames. + * relevant only if IEEE80211_TX_STATUS_AMPDU was set. + * @ack_signal: signal strength of the ACK frame + */ +struct ieee80211_tx_info { + /* common information */ + u32 flags; + u8 band; + + u8 antenna_sel_tx; + + /* 2 byte hole */ + u8 pad[2]; + + union { + struct { + union { + /* rate control */ + struct { + struct ieee80211_tx_rate rates[ + IEEE80211_TX_MAX_RATES]; + s8 rts_cts_rate_idx; + }; + /* only needed before rate control */ + unsigned long jiffies; + }; + /* NB: vif can be NULL for injected frames */ + struct ieee80211_vif *vif; + struct ieee80211_key_conf *hw_key; + struct ieee80211_sta *sta; + } control; + struct { + struct ieee80211_tx_rate rates[IEEE80211_TX_MAX_RATES]; + u8 ampdu_ack_len; + u64 ampdu_ack_map; + int ack_signal; + u8 ampdu_len; + /* 7 bytes free */ + } status; + struct { + struct ieee80211_tx_rate driver_rates[ + IEEE80211_TX_MAX_RATES]; + void *rate_driver_data[ + IEEE80211_TX_INFO_RATE_DRIVER_DATA_SIZE / sizeof(void *)]; + }; + void *driver_data[ + IEEE80211_TX_INFO_DRIVER_DATA_SIZE / sizeof(void *)]; + }; +}; + +static inline struct ieee80211_tx_info *IEEE80211_SKB_CB(struct sk_buff *skb) +{ + return (struct ieee80211_tx_info *)skb->cb; +} + +static inline struct ieee80211_rx_status *IEEE80211_SKB_RXCB(struct sk_buff *skb) +{ + return (struct ieee80211_rx_status *)skb->cb; +} + +/** + * ieee80211_tx_info_clear_status - clear TX status + * + * @info: The &struct ieee80211_tx_info to be cleared. + * + * When the driver passes an skb back to mac80211, it must report + * a number of things in TX status. This function clears everything + * in the TX status but the rate control information (it does clear + * the count since you need to fill that in anyway). + * + * NOTE: You can only use this function if you do NOT use + * info->driver_data! Use info->rate_driver_data + * instead if you need only the less space that allows. + */ +static inline void +ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) +{ + int i; + + BUILD_BUG_ON(offsetof(struct ieee80211_tx_info, status.rates) != + offsetof(struct ieee80211_tx_info, control.rates)); + BUILD_BUG_ON(offsetof(struct ieee80211_tx_info, status.rates) != + offsetof(struct ieee80211_tx_info, driver_rates)); + BUILD_BUG_ON(offsetof(struct ieee80211_tx_info, status.rates) != 8); + /* clear the rate counts */ + for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) + info->status.rates[i].count = 0; + + BUILD_BUG_ON( + offsetof(struct ieee80211_tx_info, status.ampdu_ack_len) != 23); + memset(&info->status.ampdu_ack_len, 0, + sizeof(struct ieee80211_tx_info) - + offsetof(struct ieee80211_tx_info, status.ampdu_ack_len)); +} + + +/** + * enum mac80211_rx_flags - receive flags + * + * These flags are used with the @flag member of &struct ieee80211_rx_status. + * @RX_FLAG_MMIC_ERROR: Michael MIC error was reported on this frame. + * Use together with %RX_FLAG_MMIC_STRIPPED. + * @RX_FLAG_DECRYPTED: This frame was decrypted in hardware. + * @RX_FLAG_MMIC_STRIPPED: the Michael MIC is stripped off this frame, + * verification has been done by the hardware. + * @RX_FLAG_IV_STRIPPED: The IV/ICV are stripped from this frame. + * If this flag is set, the stack cannot do any replay detection + * hence the driver or hardware will have to do that. + * @RX_FLAG_FAILED_FCS_CRC: Set this flag if the FCS check failed on + * the frame. + * @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on + * the frame. + * @RX_FLAG_TSFT: The timestamp passed in the RX status (@mactime field) + * is valid. This is useful in monitor mode and necessary for beacon frames + * to enable IBSS merging. + * @RX_FLAG_SHORTPRE: Short preamble was used for this frame + * @RX_FLAG_HT: HT MCS was used and rate_idx is MCS index + * @RX_FLAG_40MHZ: HT40 (40 MHz) was used + * @RX_FLAG_SHORT_GI: Short guard interval was used + * @RX_FLAG_INTERNAL_CMTR: set internally after frame was reported + * on cooked monitor to avoid double-reporting it for multiple + * virtual interfaces + */ +enum mac80211_rx_flags { + RX_FLAG_MMIC_ERROR = 1<<0, + RX_FLAG_DECRYPTED = 1<<1, + RX_FLAG_MMIC_STRIPPED = 1<<3, + RX_FLAG_IV_STRIPPED = 1<<4, + RX_FLAG_FAILED_FCS_CRC = 1<<5, + RX_FLAG_FAILED_PLCP_CRC = 1<<6, + RX_FLAG_TSFT = 1<<7, + RX_FLAG_SHORTPRE = 1<<8, + RX_FLAG_HT = 1<<9, + RX_FLAG_40MHZ = 1<<10, + RX_FLAG_SHORT_GI = 1<<11, + RX_FLAG_INTERNAL_CMTR = 1<<12, +}; + +/** + * struct ieee80211_rx_status - receive status + * + * The low-level driver should provide this information (the subset + * supported by hardware) to the 802.11 code with each received + * frame, in the skb's control buffer (cb). + * + * @mactime: value in microseconds of the 64-bit Time Synchronization Function + * (TSF) timer when the first data symbol (MPDU) arrived at the hardware. + * @band: the active band when this frame was received + * @freq: frequency the radio was tuned to when receiving this frame, in MHz + * @signal: signal strength when receiving this frame, either in dBm, in dB or + * unspecified depending on the hardware capabilities flags + * @IEEE80211_HW_SIGNAL_* + * @noise: noise when receiving this frame, in dBm. + * @antenna: antenna used + * @rate_idx: index of data rate into band's supported rates or MCS index if + * HT rates are use (RX_FLAG_HT) + * @flag: %RX_FLAG_* + */ +struct ieee80211_rx_status { + u64 mactime; + enum ieee80211_band band; + int freq; + int signal; + int noise; + int antenna; + int rate_idx; + int flag; +}; + +/** + * enum ieee80211_conf_flags - configuration flags + * + * Flags to define PHY configuration options + * + * @IEEE80211_CONF_MONITOR: there's a monitor interface present -- use this + * to determine for example whether to calculate timestamps for packets + * or not, do not use instead of filter flags! + * @IEEE80211_CONF_PS: Enable 802.11 power save mode (managed mode only). + * This is the power save mode defined by IEEE 802.11-2007 section 11.2, + * meaning that the hardware still wakes up for beacons, is able to + * transmit frames and receive the possible acknowledgment frames. + * Not to be confused with hardware specific wakeup/sleep states, + * driver is responsible for that. See the section "Powersave support" + * for more. + * @IEEE80211_CONF_IDLE: The device is running, but idle; if the flag is set + * the driver should be prepared to handle configuration requests but + * may turn the device off as much as possible. Typically, this flag will + * be set when an interface is set UP but not associated or scanning, but + * it can also be unset in that case when monitor interfaces are active. + */ +enum ieee80211_conf_flags { + IEEE80211_CONF_MONITOR = (1<<0), + IEEE80211_CONF_PS = (1<<1), + IEEE80211_CONF_IDLE = (1<<2), +}; + + +/** + * enum ieee80211_conf_changed - denotes which configuration changed + * + * @IEEE80211_CONF_CHANGE_LISTEN_INTERVAL: the listen interval changed + * @IEEE80211_CONF_CHANGE_MONITOR: the monitor flag changed + * @IEEE80211_CONF_CHANGE_PS: the PS flag or dynamic PS timeout changed + * @IEEE80211_CONF_CHANGE_POWER: the TX power changed + * @IEEE80211_CONF_CHANGE_CHANNEL: the channel/channel_type changed + * @IEEE80211_CONF_CHANGE_RETRY_LIMITS: retry limits changed + * @IEEE80211_CONF_CHANGE_IDLE: Idle flag changed + * @IEEE80211_CONF_CHANGE_SMPS: Spatial multiplexing powersave mode changed + */ +enum ieee80211_conf_changed { + IEEE80211_CONF_CHANGE_SMPS = BIT(1), + IEEE80211_CONF_CHANGE_LISTEN_INTERVAL = BIT(2), + IEEE80211_CONF_CHANGE_MONITOR = BIT(3), + IEEE80211_CONF_CHANGE_PS = BIT(4), + IEEE80211_CONF_CHANGE_POWER = BIT(5), + IEEE80211_CONF_CHANGE_CHANNEL = BIT(6), + IEEE80211_CONF_CHANGE_RETRY_LIMITS = BIT(7), + IEEE80211_CONF_CHANGE_IDLE = BIT(8), +}; + +/** + * enum ieee80211_smps_mode - spatial multiplexing power save mode + * + * @IEEE80211_SMPS_AUTOMATIC: automatic + * @IEEE80211_SMPS_OFF: off + * @IEEE80211_SMPS_STATIC: static + * @IEEE80211_SMPS_DYNAMIC: dynamic + * @IEEE80211_SMPS_NUM_MODES: internal, don't use + */ +enum ieee80211_smps_mode { + IEEE80211_SMPS_AUTOMATIC, + IEEE80211_SMPS_OFF, + IEEE80211_SMPS_STATIC, + IEEE80211_SMPS_DYNAMIC, + + /* keep last */ + IEEE80211_SMPS_NUM_MODES, +}; + +/** + * struct ieee80211_conf - configuration of the device + * + * This struct indicates how the driver shall configure the hardware. + * + * @flags: configuration flags defined above + * + * @listen_interval: listen interval in units of beacon interval + * @max_sleep_period: the maximum number of beacon intervals to sleep for + * before checking the beacon for a TIM bit (managed mode only); this + * value will be only achievable between DTIM frames, the hardware + * needs to check for the multicast traffic bit in DTIM beacons. + * This variable is valid only when the CONF_PS flag is set. + * @ps_dtim_period: The DTIM period of the AP we're connected to, for use + * in power saving. Power saving will not be enabled until a beacon + * has been received and the DTIM period is known. + * @dynamic_ps_timeout: The dynamic powersave timeout (in ms), see the + * powersave documentation below. This variable is valid only when + * the CONF_PS flag is set. + * + * @power_level: requested transmit power (in dBm) + * + * @channel: the channel to tune to + * @channel_type: the channel (HT) type + * + * @long_frame_max_tx_count: Maximum number of transmissions for a "long" frame + * (a frame not RTS protected), called "dot11LongRetryLimit" in 802.11, + * but actually means the number of transmissions not the number of retries + * @short_frame_max_tx_count: Maximum number of transmissions for a "short" + * frame, called "dot11ShortRetryLimit" in 802.11, but actually means the + * number of transmissions not the number of retries + * + * @smps_mode: spatial multiplexing powersave mode; note that + * %IEEE80211_SMPS_STATIC is used when the device is not + * configured for an HT channel + */ +struct ieee80211_conf { + u32 flags; + int power_level, dynamic_ps_timeout; + int max_sleep_period; + + u16 listen_interval; + u8 ps_dtim_period; + + u8 long_frame_max_tx_count, short_frame_max_tx_count; + + struct ieee80211_channel *channel; + enum nl80211_channel_type channel_type; + enum ieee80211_smps_mode smps_mode; +}; + +/** + * struct ieee80211_vif - per-interface data + * + * Data in this structure is continually present for driver + * use during the life of a virtual interface. + * + * @type: type of this virtual interface + * @bss_conf: BSS configuration for this interface, either our own + * or the BSS we're associated to + * @addr: address of this interface + * @drv_priv: data area for driver use, will always be aligned to + * sizeof(void *). + */ +struct ieee80211_vif { + enum nl80211_iftype type; + struct ieee80211_bss_conf bss_conf; + u8 addr[ETH_ALEN]; + /* must be last */ + u8 drv_priv[0] __attribute__((__aligned__(sizeof(void *)))); +}; + +static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif) +{ +#ifdef CONFIG_MAC80211_MESH + return vif->type == NL80211_IFTYPE_MESH_POINT; +#endif + return false; +} + +/** + * enum ieee80211_key_alg - key algorithm + * @ALG_WEP: WEP40 or WEP104 + * @ALG_TKIP: TKIP + * @ALG_CCMP: CCMP (AES) + * @ALG_AES_CMAC: AES-128-CMAC + */ +enum ieee80211_key_alg { + ALG_WEP, + ALG_TKIP, + ALG_CCMP, + ALG_AES_CMAC, +}; + +/** + * enum ieee80211_key_flags - key flags + * + * These flags are used for communication about keys between the driver + * and mac80211, with the @flags parameter of &struct ieee80211_key_conf. + * + * @IEEE80211_KEY_FLAG_WMM_STA: Set by mac80211, this flag indicates + * that the STA this key will be used with could be using QoS. + * @IEEE80211_KEY_FLAG_GENERATE_IV: This flag should be set by the + * driver to indicate that it requires IV generation for this + * particular key. + * @IEEE80211_KEY_FLAG_GENERATE_MMIC: This flag should be set by + * the driver for a TKIP key if it requires Michael MIC + * generation in software. + * @IEEE80211_KEY_FLAG_PAIRWISE: Set by mac80211, this flag indicates + * that the key is pairwise rather then a shared key. + * @IEEE80211_KEY_FLAG_SW_MGMT: This flag should be set by the driver for a + * CCMP key if it requires CCMP encryption of management frames (MFP) to + * be done in software. + */ +enum ieee80211_key_flags { + IEEE80211_KEY_FLAG_WMM_STA = 1<<0, + IEEE80211_KEY_FLAG_GENERATE_IV = 1<<1, + IEEE80211_KEY_FLAG_GENERATE_MMIC= 1<<2, + IEEE80211_KEY_FLAG_PAIRWISE = 1<<3, + IEEE80211_KEY_FLAG_SW_MGMT = 1<<4, +}; + +/** + * struct ieee80211_key_conf - key information + * + * This key information is given by mac80211 to the driver by + * the set_key() callback in &struct ieee80211_ops. + * + * @hw_key_idx: To be set by the driver, this is the key index the driver + * wants to be given when a frame is transmitted and needs to be + * encrypted in hardware. + * @alg: The key algorithm. + * @flags: key flags, see &enum ieee80211_key_flags. + * @keyidx: the key index (0-3) + * @keylen: key material length + * @key: key material. For ALG_TKIP the key is encoded as a 256-bit (32 byte) + * data block: + * - Temporal Encryption Key (128 bits) + * - Temporal Authenticator Tx MIC Key (64 bits) + * - Temporal Authenticator Rx MIC Key (64 bits) + * @icv_len: The ICV length for this key type + * @iv_len: The IV length for this key type + */ +struct ieee80211_key_conf { + enum ieee80211_key_alg alg; + u8 icv_len; + u8 iv_len; + u8 hw_key_idx; + u8 flags; + s8 keyidx; + u8 keylen; + u8 key[0]; +}; + +/** + * enum set_key_cmd - key command + * + * Used with the set_key() callback in &struct ieee80211_ops, this + * indicates whether a key is being removed or added. + * + * @SET_KEY: a key is set + * @DISABLE_KEY: a key must be disabled + */ +enum set_key_cmd { + SET_KEY, DISABLE_KEY, +}; + +/** + * struct ieee80211_sta - station table entry + * + * A station table entry represents a station we are possibly + * communicating with. Since stations are RCU-managed in + * mac80211, any ieee80211_sta pointer you get access to must + * either be protected by rcu_read_lock() explicitly or implicitly, + * or you must take good care to not use such a pointer after a + * call to your sta_remove callback that removed it. + * + * @addr: MAC address + * @aid: AID we assigned to the station if we're an AP + * @supp_rates: Bitmap of supported rates (per band) + * @ht_cap: HT capabilities of this STA; restricted to our own TX capabilities + * @drv_priv: data area for driver use, will always be aligned to + * sizeof(void *), size is determined in hw information. + */ +struct ieee80211_sta { + u32 supp_rates[IEEE80211_NUM_BANDS]; + u8 addr[ETH_ALEN]; + u16 aid; + struct ieee80211_sta_ht_cap ht_cap; + + /* must be last */ + u8 drv_priv[0] __attribute__((__aligned__(sizeof(void *)))); +}; + +/** + * enum sta_notify_cmd - sta notify command + * + * Used with the sta_notify() callback in &struct ieee80211_ops, this + * indicates addition and removal of a station to station table, + * or if a associated station made a power state transition. + * + * @STA_NOTIFY_ADD: (DEPRECATED) a station was added to the station table + * @STA_NOTIFY_REMOVE: (DEPRECATED) a station being removed from the station table + * @STA_NOTIFY_SLEEP: a station is now sleeping + * @STA_NOTIFY_AWAKE: a sleeping station woke up + */ +enum sta_notify_cmd { + STA_NOTIFY_ADD, STA_NOTIFY_REMOVE, + STA_NOTIFY_SLEEP, STA_NOTIFY_AWAKE, +}; + +/** + * enum ieee80211_tkip_key_type - get tkip key + * + * Used by drivers which need to get a tkip key for skb. Some drivers need a + * phase 1 key, others need a phase 2 key. A single function allows the driver + * to get the key, this enum indicates what type of key is required. + * + * @IEEE80211_TKIP_P1_KEY: the driver needs a phase 1 key + * @IEEE80211_TKIP_P2_KEY: the driver needs a phase 2 key + */ +enum ieee80211_tkip_key_type { + IEEE80211_TKIP_P1_KEY, + IEEE80211_TKIP_P2_KEY, +}; + +/** + * enum ieee80211_hw_flags - hardware flags + * + * These flags are used to indicate hardware capabilities to + * the stack. Generally, flags here should have their meaning + * done in a way that the simplest hardware doesn't need setting + * any particular flags. There are some exceptions to this rule, + * however, so you are advised to review these flags carefully. + * + * @IEEE80211_HW_HAS_RATE_CONTROL: + * The hardware or firmware includes rate control, and cannot be + * controlled by the stack. As such, no rate control algorithm + * should be instantiated, and the TX rate reported to userspace + * will be taken from the TX status instead of the rate control + * algorithm. + * Note that this requires that the driver implement a number of + * callbacks so it has the correct information, it needs to have + * the @set_rts_threshold callback and must look at the BSS config + * @use_cts_prot for G/N protection, @use_short_slot for slot + * timing in 2.4 GHz and @use_short_preamble for preambles for + * CCK frames. + * + * @IEEE80211_HW_RX_INCLUDES_FCS: + * Indicates that received frames passed to the stack include + * the FCS at the end. + * + * @IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING: + * Some wireless LAN chipsets buffer broadcast/multicast frames + * for power saving stations in the hardware/firmware and others + * rely on the host system for such buffering. This option is used + * to configure the IEEE 802.11 upper layer to buffer broadcast and + * multicast frames when there are power saving stations so that + * the driver can fetch them with ieee80211_get_buffered_bc(). + * + * @IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE: + * Hardware is not capable of short slot operation on the 2.4 GHz band. + * + * @IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE: + * Hardware is not capable of receiving frames with short preamble on + * the 2.4 GHz band. + * + * @IEEE80211_HW_SIGNAL_UNSPEC: + * Hardware can provide signal values but we don't know its units. We + * expect values between 0 and @max_signal. + * If possible please provide dB or dBm instead. + * + * @IEEE80211_HW_SIGNAL_DBM: + * Hardware gives signal values in dBm, decibel difference from + * one milliwatt. This is the preferred method since it is standardized + * between different devices. @max_signal does not need to be set. + * + * @IEEE80211_HW_NOISE_DBM: + * Hardware can provide noise (radio interference) values in units dBm, + * decibel difference from one milliwatt. + * + * @IEEE80211_HW_SPECTRUM_MGMT: + * Hardware supports spectrum management defined in 802.11h + * Measurement, Channel Switch, Quieting, TPC + * + * @IEEE80211_HW_AMPDU_AGGREGATION: + * Hardware supports 11n A-MPDU aggregation. + * + * @IEEE80211_HW_SUPPORTS_PS: + * Hardware has power save support (i.e. can go to sleep). + * + * @IEEE80211_HW_PS_NULLFUNC_STACK: + * Hardware requires nullfunc frame handling in stack, implies + * stack support for dynamic PS. + * + * @IEEE80211_HW_SUPPORTS_DYNAMIC_PS: + * Hardware has support for dynamic PS. + * + * @IEEE80211_HW_MFP_CAPABLE: + * Hardware supports management frame protection (MFP, IEEE 802.11w). + * + * @IEEE80211_HW_BEACON_FILTER: + * Hardware supports dropping of irrelevant beacon frames to + * avoid waking up cpu. + * + * @IEEE80211_HW_SUPPORTS_STATIC_SMPS: + * Hardware supports static spatial multiplexing powersave, + * ie. can turn off all but one chain even on HT connections + * that should be using more chains. + * + * @IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS: + * Hardware supports dynamic spatial multiplexing powersave, + * ie. can turn off all but one chain and then wake the rest + * up as required after, for example, rts/cts handshake. + * + * @IEEE80211_HW_SUPPORTS_UAPSD: + * Hardware supports Unscheduled Automatic Power Save Delivery + * (U-APSD) in managed mode. The mode is configured with + * conf_tx() operation. + * + * @IEEE80211_HW_REPORTS_TX_ACK_STATUS: + * Hardware can provide ack status reports of Tx frames to + * the stack. + * + */ +enum ieee80211_hw_flags { + IEEE80211_HW_HAS_RATE_CONTROL = 1<<0, + IEEE80211_HW_RX_INCLUDES_FCS = 1<<1, + IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING = 1<<2, + IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE = 1<<3, + IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE = 1<<4, + IEEE80211_HW_SIGNAL_UNSPEC = 1<<5, + IEEE80211_HW_SIGNAL_DBM = 1<<6, + IEEE80211_HW_NOISE_DBM = 1<<7, + IEEE80211_HW_SPECTRUM_MGMT = 1<<8, + IEEE80211_HW_AMPDU_AGGREGATION = 1<<9, + IEEE80211_HW_SUPPORTS_PS = 1<<10, + IEEE80211_HW_PS_NULLFUNC_STACK = 1<<11, + IEEE80211_HW_SUPPORTS_DYNAMIC_PS = 1<<12, + IEEE80211_HW_MFP_CAPABLE = 1<<13, + IEEE80211_HW_BEACON_FILTER = 1<<14, + IEEE80211_HW_SUPPORTS_STATIC_SMPS = 1<<15, + IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS = 1<<16, + IEEE80211_HW_SUPPORTS_UAPSD = 1<<17, + IEEE80211_HW_REPORTS_TX_ACK_STATUS = 1<<18, +}; + +/** + * struct ieee80211_hw - hardware information and state + * + * This structure contains the configuration and hardware + * information for an 802.11 PHY. + * + * @wiphy: This points to the &struct wiphy allocated for this + * 802.11 PHY. You must fill in the @perm_addr and @dev + * members of this structure using SET_IEEE80211_DEV() + * and SET_IEEE80211_PERM_ADDR(). Additionally, all supported + * bands (with channels, bitrates) are registered here. + * + * @conf: &struct ieee80211_conf, device configuration, don't use. + * + * @priv: pointer to private area that was allocated for driver use + * along with this structure. + * + * @flags: hardware flags, see &enum ieee80211_hw_flags. + * + * @extra_tx_headroom: headroom to reserve in each transmit skb + * for use by the driver (e.g. for transmit headers.) + * + * @channel_change_time: time (in microseconds) it takes to change channels. + * + * @max_signal: Maximum value for signal (rssi) in RX information, used + * only when @IEEE80211_HW_SIGNAL_UNSPEC or @IEEE80211_HW_SIGNAL_DB + * + * @max_listen_interval: max listen interval in units of beacon interval + * that HW supports + * + * @queues: number of available hardware transmit queues for + * data packets. WMM/QoS requires at least four, these + * queues need to have configurable access parameters. + * + * @rate_control_algorithm: rate control algorithm for this hardware. + * If unset (NULL), the default algorithm will be used. Must be + * set before calling ieee80211_register_hw(). + * + * @vif_data_size: size (in bytes) of the drv_priv data area + * within &struct ieee80211_vif. + * @sta_data_size: size (in bytes) of the drv_priv data area + * within &struct ieee80211_sta. + * + * @max_rates: maximum number of alternate rate retry stages + * @max_rate_tries: maximum number of tries for each stage + */ +struct ieee80211_hw { + struct ieee80211_conf conf; + struct wiphy *wiphy; + const char *rate_control_algorithm; + void *priv; + u32 flags; + unsigned int extra_tx_headroom; + int channel_change_time; + int vif_data_size; + int sta_data_size; + u16 queues; + u16 max_listen_interval; + s8 max_signal; + u8 max_rates; + u8 max_rate_tries; +}; + +/** + * wiphy_to_ieee80211_hw - return a mac80211 driver hw struct from a wiphy + * + * @wiphy: the &struct wiphy which we want to query + * + * mac80211 drivers can use this to get to their respective + * &struct ieee80211_hw. Drivers wishing to get to their own private + * structure can then access it via hw->priv. Note that mac802111 drivers should + * not use wiphy_priv() to try to get their private driver structure as this + * is already used internally by mac80211. + */ +struct ieee80211_hw *wiphy_to_ieee80211_hw(struct wiphy *wiphy); + +/** + * SET_IEEE80211_DEV - set device for 802.11 hardware + * + * @hw: the &struct ieee80211_hw to set the device for + * @dev: the &struct device of this 802.11 device + */ +static inline void SET_IEEE80211_DEV(struct ieee80211_hw *hw, struct device *dev) +{ + set_wiphy_dev(hw->wiphy, dev); +} + +/** + * SET_IEEE80211_PERM_ADDR - set the permanent MAC address for 802.11 hardware + * + * @hw: the &struct ieee80211_hw to set the MAC address for + * @addr: the address to set + */ +static inline void SET_IEEE80211_PERM_ADDR(struct ieee80211_hw *hw, u8 *addr) +{ + memcpy(hw->wiphy->perm_addr, addr, ETH_ALEN); +} + +static inline struct ieee80211_rate * +ieee80211_get_tx_rate(const struct ieee80211_hw *hw, + const struct ieee80211_tx_info *c) +{ + if (WARN_ON(c->control.rates[0].idx < 0)) + return NULL; + return &hw->wiphy->bands[c->band]->bitrates[c->control.rates[0].idx]; +} + +static inline struct ieee80211_rate * +ieee80211_get_rts_cts_rate(const struct ieee80211_hw *hw, + const struct ieee80211_tx_info *c) +{ + if (c->control.rts_cts_rate_idx < 0) + return NULL; + return &hw->wiphy->bands[c->band]->bitrates[c->control.rts_cts_rate_idx]; +} + +static inline struct ieee80211_rate * +ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw, + const struct ieee80211_tx_info *c, int idx) +{ + if (c->control.rates[idx + 1].idx < 0) + return NULL; + return &hw->wiphy->bands[c->band]->bitrates[c->control.rates[idx + 1].idx]; +} + +/** + * DOC: Hardware crypto acceleration + * + * mac80211 is capable of taking advantage of many hardware + * acceleration designs for encryption and decryption operations. + * + * The set_key() callback in the &struct ieee80211_ops for a given + * device is called to enable hardware acceleration of encryption and + * decryption. The callback takes a @sta parameter that will be NULL + * for default keys or keys used for transmission only, or point to + * the station information for the peer for individual keys. + * Multiple transmission keys with the same key index may be used when + * VLANs are configured for an access point. + * + * When transmitting, the TX control data will use the @hw_key_idx + * selected by the driver by modifying the &struct ieee80211_key_conf + * pointed to by the @key parameter to the set_key() function. + * + * The set_key() call for the %SET_KEY command should return 0 if + * the key is now in use, -%EOPNOTSUPP or -%ENOSPC if it couldn't be + * added; if you return 0 then hw_key_idx must be assigned to the + * hardware key index, you are free to use the full u8 range. + * + * When the cmd is %DISABLE_KEY then it must succeed. + * + * Note that it is permissible to not decrypt a frame even if a key + * for it has been uploaded to hardware, the stack will not make any + * decision based on whether a key has been uploaded or not but rather + * based on the receive flags. + * + * The &struct ieee80211_key_conf structure pointed to by the @key + * parameter is guaranteed to be valid until another call to set_key() + * removes it, but it can only be used as a cookie to differentiate + * keys. + * + * In TKIP some HW need to be provided a phase 1 key, for RX decryption + * acceleration (i.e. iwlwifi). Those drivers should provide update_tkip_key + * handler. + * The update_tkip_key() call updates the driver with the new phase 1 key. + * This happens everytime the iv16 wraps around (every 65536 packets). The + * set_key() call will happen only once for each key (unless the AP did + * rekeying), it will not include a valid phase 1 key. The valid phase 1 key is + * provided by update_tkip_key only. The trigger that makes mac80211 call this + * handler is software decryption with wrap around of iv16. + */ + +/** + * DOC: Powersave support + * + * mac80211 has support for various powersave implementations. + * + * First, it can support hardware that handles all powersaving by itself, + * such hardware should simply set the %IEEE80211_HW_SUPPORTS_PS hardware + * flag. In that case, it will be told about the desired powersave mode + * with the %IEEE80211_CONF_PS flag depending on the association status. + * The hardware must take care of sending nullfunc frames when necessary, + * i.e. when entering and leaving powersave mode. The hardware is required + * to look at the AID in beacons and signal to the AP that it woke up when + * it finds traffic directed to it. + * + * %IEEE80211_CONF_PS flag enabled means that the powersave mode defined in + * IEEE 802.11-2007 section 11.2 is enabled. This is not to be confused + * with hardware wakeup and sleep states. Driver is responsible for waking + * up the hardware before issueing commands to the hardware and putting it + * back to sleep at approriate times. + * + * When PS is enabled, hardware needs to wakeup for beacons and receive the + * buffered multicast/broadcast frames after the beacon. Also it must be + * possible to send frames and receive the acknowledment frame. + * + * Other hardware designs cannot send nullfunc frames by themselves and also + * need software support for parsing the TIM bitmap. This is also supported + * by mac80211 by combining the %IEEE80211_HW_SUPPORTS_PS and + * %IEEE80211_HW_PS_NULLFUNC_STACK flags. The hardware is of course still + * required to pass up beacons. The hardware is still required to handle + * waking up for multicast traffic; if it cannot the driver must handle that + * as best as it can, mac80211 is too slow to do that. + * + * Dynamic powersave is an extension to normal powersave in which the + * hardware stays awake for a user-specified period of time after sending a + * frame so that reply frames need not be buffered and therefore delayed to + * the next wakeup. It's compromise of getting good enough latency when + * there's data traffic and still saving significantly power in idle + * periods. + * + * Dynamic powersave is supported by simply mac80211 enabling and disabling + * PS based on traffic. Driver needs to only set %IEEE80211_HW_SUPPORTS_PS + * flag and mac80211 will handle everything automatically. Additionally, + * hardware having support for the dynamic PS feature may set the + * %IEEE80211_HW_SUPPORTS_DYNAMIC_PS flag to indicate that it can support + * dynamic PS mode itself. The driver needs to look at the + * @dynamic_ps_timeout hardware configuration value and use it that value + * whenever %IEEE80211_CONF_PS is set. In this case mac80211 will disable + * dynamic PS feature in stack and will just keep %IEEE80211_CONF_PS + * enabled whenever user has enabled powersave. + * + * Driver informs U-APSD client support by enabling + * %IEEE80211_HW_SUPPORTS_UAPSD flag. The mode is configured through the + * uapsd paramater in conf_tx() operation. Hardware needs to send the QoS + * Nullfunc frames and stay awake until the service period has ended. To + * utilize U-APSD, dynamic powersave is disabled for voip AC and all frames + * from that AC are transmitted with powersave enabled. + * + * Note: U-APSD client mode is not yet supported with + * %IEEE80211_HW_PS_NULLFUNC_STACK. + */ + +/** + * DOC: Beacon filter support + * + * Some hardware have beacon filter support to reduce host cpu wakeups + * which will reduce system power consumption. It usuallly works so that + * the firmware creates a checksum of the beacon but omits all constantly + * changing elements (TSF, TIM etc). Whenever the checksum changes the + * beacon is forwarded to the host, otherwise it will be just dropped. That + * way the host will only receive beacons where some relevant information + * (for example ERP protection or WMM settings) have changed. + * + * Beacon filter support is advertised with the %IEEE80211_HW_BEACON_FILTER + * hardware capability. The driver needs to enable beacon filter support + * whenever power save is enabled, that is %IEEE80211_CONF_PS is set. When + * power save is enabled, the stack will not check for beacon loss and the + * driver needs to notify about loss of beacons with ieee80211_beacon_loss(). + * + * The time (or number of beacons missed) until the firmware notifies the + * driver of a beacon loss event (which in turn causes the driver to call + * ieee80211_beacon_loss()) should be configurable and will be controlled + * by mac80211 and the roaming algorithm in the future. + * + * Since there may be constantly changing information elements that nothing + * in the software stack cares about, we will, in the future, have mac80211 + * tell the driver which information elements are interesting in the sense + * that we want to see changes in them. This will include + * - a list of information element IDs + * - a list of OUIs for the vendor information element + * + * Ideally, the hardware would filter out any beacons without changes in the + * requested elements, but if it cannot support that it may, at the expense + * of some efficiency, filter out only a subset. For example, if the device + * doesn't support checking for OUIs it should pass up all changes in all + * vendor information elements. + * + * Note that change, for the sake of simplification, also includes information + * elements appearing or disappearing from the beacon. + * + * Some hardware supports an "ignore list" instead, just make sure nothing + * that was requested is on the ignore list, and include commonly changing + * information element IDs in the ignore list, for example 11 (BSS load) and + * the various vendor-assigned IEs with unknown contents (128, 129, 133-136, + * 149, 150, 155, 156, 173, 176, 178, 179, 219); for forward compatibility + * it could also include some currently unused IDs. + * + * + * In addition to these capabilities, hardware should support notifying the + * host of changes in the beacon RSSI. This is relevant to implement roaming + * when no traffic is flowing (when traffic is flowing we see the RSSI of + * the received data packets). This can consist in notifying the host when + * the RSSI changes significantly or when it drops below or rises above + * configurable thresholds. In the future these thresholds will also be + * configured by mac80211 (which gets them from userspace) to implement + * them as the roaming algorithm requires. + * + * If the hardware cannot implement this, the driver should ask it to + * periodically pass beacon frames to the host so that software can do the + * signal strength threshold checking. + */ + +/** + * DOC: Spatial multiplexing power save + * + * SMPS (Spatial multiplexing power save) is a mechanism to conserve + * power in an 802.11n implementation. For details on the mechanism + * and rationale, please refer to 802.11 (as amended by 802.11n-2009) + * "11.2.3 SM power save". + * + * The mac80211 implementation is capable of sending action frames + * to update the AP about the station's SMPS mode, and will instruct + * the driver to enter the specific mode. It will also announce the + * requested SMPS mode during the association handshake. Hardware + * support for this feature is required, and can be indicated by + * hardware flags. + * + * The default mode will be "automatic", which nl80211/cfg80211 + * defines to be dynamic SMPS in (regular) powersave, and SMPS + * turned off otherwise. + * + * To support this feature, the driver must set the appropriate + * hardware support flags, and handle the SMPS flag to the config() + * operation. It will then with this mechanism be instructed to + * enter the requested SMPS mode while associated to an HT AP. + */ + +/** + * DOC: Frame filtering + * + * mac80211 requires to see many management frames for proper + * operation, and users may want to see many more frames when + * in monitor mode. However, for best CPU usage and power consumption, + * having as few frames as possible percolate through the stack is + * desirable. Hence, the hardware should filter as much as possible. + * + * To achieve this, mac80211 uses filter flags (see below) to tell + * the driver's configure_filter() function which frames should be + * passed to mac80211 and which should be filtered out. + * + * Before configure_filter() is invoked, the prepare_multicast() + * callback is invoked with the parameters @mc_count and @mc_list + * for the combined multicast address list of all virtual interfaces. + * It's use is optional, and it returns a u64 that is passed to + * configure_filter(). Additionally, configure_filter() has the + * arguments @changed_flags telling which flags were changed and + * @total_flags with the new flag states. + * + * If your device has no multicast address filters your driver will + * need to check both the %FIF_ALLMULTI flag and the @mc_count + * parameter to see whether multicast frames should be accepted + * or dropped. + * + * All unsupported flags in @total_flags must be cleared. + * Hardware does not support a flag if it is incapable of _passing_ + * the frame to the stack. Otherwise the driver must ignore + * the flag, but not clear it. + * You must _only_ clear the flag (announce no support for the + * flag to mac80211) if you are not able to pass the packet type + * to the stack (so the hardware always filters it). + * So for example, you should clear @FIF_CONTROL, if your hardware + * always filters control frames. If your hardware always passes + * control frames to the kernel and is incapable of filtering them, + * you do _not_ clear the @FIF_CONTROL flag. + * This rule applies to all other FIF flags as well. + */ + +/** + * enum ieee80211_filter_flags - hardware filter flags + * + * These flags determine what the filter in hardware should be + * programmed to let through and what should not be passed to the + * stack. It is always safe to pass more frames than requested, + * but this has negative impact on power consumption. + * + * @FIF_PROMISC_IN_BSS: promiscuous mode within your BSS, + * think of the BSS as your network segment and then this corresponds + * to the regular ethernet device promiscuous mode. + * + * @FIF_ALLMULTI: pass all multicast frames, this is used if requested + * by the user or if the hardware is not capable of filtering by + * multicast address. + * + * @FIF_FCSFAIL: pass frames with failed FCS (but you need to set the + * %RX_FLAG_FAILED_FCS_CRC for them) + * + * @FIF_PLCPFAIL: pass frames with failed PLCP CRC (but you need to set + * the %RX_FLAG_FAILED_PLCP_CRC for them + * + * @FIF_BCN_PRBRESP_PROMISC: This flag is set during scanning to indicate + * to the hardware that it should not filter beacons or probe responses + * by BSSID. Filtering them can greatly reduce the amount of processing + * mac80211 needs to do and the amount of CPU wakeups, so you should + * honour this flag if possible. + * + * @FIF_CONTROL: pass control frames (except for PS Poll), if PROMISC_IN_BSS + * is not set then only those addressed to this station. + * + * @FIF_OTHER_BSS: pass frames destined to other BSSes + * + * @FIF_PSPOLL: pass PS Poll frames, if PROMISC_IN_BSS is not set then only + * those addressed to this station. + */ +enum ieee80211_filter_flags { + FIF_PROMISC_IN_BSS = 1<<0, + FIF_ALLMULTI = 1<<1, + FIF_FCSFAIL = 1<<2, + FIF_PLCPFAIL = 1<<3, + FIF_BCN_PRBRESP_PROMISC = 1<<4, + FIF_CONTROL = 1<<5, + FIF_OTHER_BSS = 1<<6, + FIF_PSPOLL = 1<<7, +}; + +/** + * enum ieee80211_ampdu_mlme_action - A-MPDU actions + * + * These flags are used with the ampdu_action() callback in + * &struct ieee80211_ops to indicate which action is needed. + * + * Note that drivers MUST be able to deal with a TX aggregation + * session being stopped even before they OK'ed starting it by + * calling ieee80211_start_tx_ba_cb(_irqsafe), because the peer + * might receive the addBA frame and send a delBA right away! + * + * @IEEE80211_AMPDU_RX_START: start Rx aggregation + * @IEEE80211_AMPDU_RX_STOP: stop Rx aggregation + * @IEEE80211_AMPDU_TX_START: start Tx aggregation + * @IEEE80211_AMPDU_TX_STOP: stop Tx aggregation + * @IEEE80211_AMPDU_TX_OPERATIONAL: TX aggregation has become operational + */ +enum ieee80211_ampdu_mlme_action { + IEEE80211_AMPDU_RX_START, + IEEE80211_AMPDU_RX_STOP, + IEEE80211_AMPDU_TX_START, + IEEE80211_AMPDU_TX_STOP, + IEEE80211_AMPDU_TX_OPERATIONAL, +}; + +/** + * struct ieee80211_ops - callbacks from mac80211 to the driver + * + * This structure contains various callbacks that the driver may + * handle or, in some cases, must handle, for example to configure + * the hardware to a new channel or to transmit a frame. + * + * @tx: Handler that 802.11 module calls for each transmitted frame. + * skb contains the buffer starting from the IEEE 802.11 header. + * The low-level driver should send the frame out based on + * configuration in the TX control data. This handler should, + * preferably, never fail and stop queues appropriately, more + * importantly, however, it must never fail for A-MPDU-queues. + * This function should return NETDEV_TX_OK except in very + * limited cases. + * Must be implemented and atomic. + * + * @start: Called before the first netdevice attached to the hardware + * is enabled. This should turn on the hardware and must turn on + * frame reception (for possibly enabled monitor interfaces.) + * Returns negative error codes, these may be seen in userspace, + * or zero. + * When the device is started it should not have a MAC address + * to avoid acknowledging frames before a non-monitor device + * is added. + * Must be implemented and can sleep. + * + * @stop: Called after last netdevice attached to the hardware + * is disabled. This should turn off the hardware (at least + * it must turn off frame reception.) + * May be called right after add_interface if that rejects + * an interface. If you added any work onto the mac80211 workqueue + * you should ensure to cancel it on this callback. + * Must be implemented and can sleep. + * + * @add_interface: Called when a netdevice attached to the hardware is + * enabled. Because it is not called for monitor mode devices, @start + * and @stop must be implemented. + * The driver should perform any initialization it needs before + * the device can be enabled. The initial configuration for the + * interface is given in the conf parameter. + * The callback may refuse to add an interface by returning a + * negative error code (which will be seen in userspace.) + * Must be implemented and can sleep. + * + * @remove_interface: Notifies a driver that an interface is going down. + * The @stop callback is called after this if it is the last interface + * and no monitor interfaces are present. + * When all interfaces are removed, the MAC address in the hardware + * must be cleared so the device no longer acknowledges packets, + * the mac_addr member of the conf structure is, however, set to the + * MAC address of the device going away. + * Hence, this callback must be implemented. It can sleep. + * + * @config: Handler for configuration requests. IEEE 802.11 code calls this + * function to change hardware configuration, e.g., channel. + * This function should never fail but returns a negative error code + * if it does. The callback can sleep. + * + * @bss_info_changed: Handler for configuration requests related to BSS + * parameters that may vary during BSS's lifespan, and may affect low + * level driver (e.g. assoc/disassoc status, erp parameters). + * This function should not be used if no BSS has been set, unless + * for association indication. The @changed parameter indicates which + * of the bss parameters has changed when a call is made. The callback + * can sleep. + * + * @prepare_multicast: Prepare for multicast filter configuration. + * This callback is optional, and its return value is passed + * to configure_filter(). This callback must be atomic. + * + * @configure_filter: Configure the device's RX filter. + * See the section "Frame filtering" for more information. + * This callback must be implemented and can sleep. + * + * @set_tim: Set TIM bit. mac80211 calls this function when a TIM bit + * must be set or cleared for a given STA. Must be atomic. + * + * @set_key: See the section "Hardware crypto acceleration" + * This callback is only called between add_interface and + * remove_interface calls, i.e. while the given virtual interface + * is enabled. + * Returns a negative error code if the key can't be added. + * The callback can sleep. + * + * @update_tkip_key: See the section "Hardware crypto acceleration" + * This callback will be called in the context of Rx. Called for drivers + * which set IEEE80211_KEY_FLAG_TKIP_REQ_RX_P1_KEY. + * The callback must be atomic. + * + * @hw_scan: Ask the hardware to service the scan request, no need to start + * the scan state machine in stack. The scan must honour the channel + * configuration done by the regulatory agent in the wiphy's + * registered bands. The hardware (or the driver) needs to make sure + * that power save is disabled. + * The @req ie/ie_len members are rewritten by mac80211 to contain the + * entire IEs after the SSID, so that drivers need not look at these + * at all but just send them after the SSID -- mac80211 includes the + * (extended) supported rates and HT information (where applicable). + * When the scan finishes, ieee80211_scan_completed() must be called; + * note that it also must be called when the scan cannot finish due to + * any error unless this callback returned a negative error code. + * The callback can sleep. + * + * @sw_scan_start: Notifier function that is called just before a software scan + * is started. Can be NULL, if the driver doesn't need this notification. + * The callback can sleep. + * + * @sw_scan_complete: Notifier function that is called just after a + * software scan finished. Can be NULL, if the driver doesn't need + * this notification. + * The callback can sleep. + * + * @get_stats: Return low-level statistics. + * Returns zero if statistics are available. + * The callback can sleep. + * + * @get_tkip_seq: If your device implements TKIP encryption in hardware this + * callback should be provided to read the TKIP transmit IVs (both IV32 + * and IV16) for the given key from hardware. + * The callback must be atomic. + * + * @set_rts_threshold: Configuration of RTS threshold (if device needs it) + * The callback can sleep. + * + * @sta_add: Notifies low level driver about addition of an associated station, + * AP, IBSS/WDS/mesh peer etc. This callback can sleep. + * + * @sta_remove: Notifies low level driver about removal of an associated + * station, AP, IBSS/WDS/mesh peer etc. This callback can sleep. + * + * @sta_notify: Notifies low level driver about power state transition of an + * associated station, AP, IBSS/WDS/mesh peer etc. Must be atomic. + * + * @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max), + * bursting) for a hardware TX queue. + * Returns a negative error code on failure. + * The callback can sleep. + * + * @get_tsf: Get the current TSF timer value from firmware/hardware. Currently, + * this is only used for IBSS mode BSSID merging and debugging. Is not a + * required function. + * The callback can sleep. + * + * @set_tsf: Set the TSF timer to the specified value in the firmware/hardware. + * Currently, this is only used for IBSS mode debugging. Is not a + * required function. + * The callback can sleep. + * + * @reset_tsf: Reset the TSF timer and allow firmware/hardware to synchronize + * with other STAs in the IBSS. This is only used in IBSS mode. This + * function is optional if the firmware/hardware takes full care of + * TSF synchronization. + * The callback can sleep. + * + * @tx_last_beacon: Determine whether the last IBSS beacon was sent by us. + * This is needed only for IBSS mode and the result of this function is + * used to determine whether to reply to Probe Requests. + * Returns non-zero if this device sent the last beacon. + * The callback can sleep. + * + * @ampdu_action: Perform a certain A-MPDU action + * The RA/TID combination determines the destination and TID we want + * the ampdu action to be performed for. The action is defined through + * ieee80211_ampdu_mlme_action. Starting sequence number (@ssn) + * is the first frame we expect to perform the action on. Notice + * that TX/RX_STOP can pass NULL for this parameter. + * Returns a negative error code on failure. + * The callback must be atomic. + * + * @rfkill_poll: Poll rfkill hardware state. If you need this, you also + * need to set wiphy->rfkill_poll to %true before registration, + * and need to call wiphy_rfkill_set_hw_state() in the callback. + * The callback can sleep. + * + * @set_coverage_class: Set slot time for given coverage class as specified + * in IEEE 802.11-2007 section 17.3.8.6 and modify ACK timeout + * accordingly. This callback is not required and may sleep. + * + * @testmode_cmd: Implement a cfg80211 test mode command. + * The callback can sleep. + * + * @flush: Flush all pending frames from the hardware queue, making sure + * that the hardware queues are empty. If the parameter @drop is set + * to %true, pending frames may be dropped. The callback can sleep. + */ +struct ieee80211_ops { + int (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb); + int (*start)(struct ieee80211_hw *hw); + void (*stop)(struct ieee80211_hw *hw); + int (*add_interface)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); + void (*remove_interface)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); + int (*config)(struct ieee80211_hw *hw, u32 changed); + void (*bss_info_changed)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, + u32 changed); + u64 (*prepare_multicast)(struct ieee80211_hw *hw, + int mc_count, struct dev_addr_list *mc_list); + void (*configure_filter)(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast); + int (*set_tim)(struct ieee80211_hw *hw, struct ieee80211_sta *sta, + bool set); + int (*set_key)(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key); + void (*update_tkip_key)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_key_conf *conf, + struct ieee80211_sta *sta, + u32 iv32, u16 *phase1key); + int (*hw_scan)(struct ieee80211_hw *hw, + struct cfg80211_scan_request *req); + void (*sw_scan_start)(struct ieee80211_hw *hw); + void (*sw_scan_complete)(struct ieee80211_hw *hw); + int (*get_stats)(struct ieee80211_hw *hw, + struct ieee80211_low_level_stats *stats); + void (*get_tkip_seq)(struct ieee80211_hw *hw, u8 hw_key_idx, + u32 *iv32, u16 *iv16); + int (*set_rts_threshold)(struct ieee80211_hw *hw, u32 value); + int (*sta_add)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); + int (*sta_remove)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); + void (*sta_notify)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + enum sta_notify_cmd, struct ieee80211_sta *sta); + int (*conf_tx)(struct ieee80211_hw *hw, u16 queue, + const struct ieee80211_tx_queue_params *params); + u64 (*get_tsf)(struct ieee80211_hw *hw); + void (*set_tsf)(struct ieee80211_hw *hw, u64 tsf); + void (*reset_tsf)(struct ieee80211_hw *hw); + int (*tx_last_beacon)(struct ieee80211_hw *hw); + int (*ampdu_action)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, u16 *ssn); + + void (*rfkill_poll)(struct ieee80211_hw *hw); + void (*set_coverage_class)(struct ieee80211_hw *hw, u8 coverage_class); +#ifdef CONFIG_NL80211_TESTMODE + int (*testmode_cmd)(struct ieee80211_hw *hw, void *data, int len); +#endif + void (*flush)(struct ieee80211_hw *hw, bool drop); +}; + +/** + * ieee80211_alloc_hw - Allocate a new hardware device + * + * This must be called once for each hardware device. The returned pointer + * must be used to refer to this device when calling other functions. + * mac80211 allocates a private data area for the driver pointed to by + * @priv in &struct ieee80211_hw, the size of this area is given as + * @priv_data_len. + * + * @priv_data_len: length of private data + * @ops: callbacks for this device + */ +struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, + const struct ieee80211_ops *ops); + +/** + * ieee80211_register_hw - Register hardware device + * + * You must call this function before any other functions in + * mac80211. Note that before a hardware can be registered, you + * need to fill the contained wiphy's information. + * + * @hw: the device to register as returned by ieee80211_alloc_hw() + */ +int ieee80211_register_hw(struct ieee80211_hw *hw); + +#ifdef CONFIG_MAC80211_LEDS +extern char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw); +extern char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw); +extern char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw); +extern char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw); +#endif +/** + * ieee80211_get_tx_led_name - get name of TX LED + * + * mac80211 creates a transmit LED trigger for each wireless hardware + * that can be used to drive LEDs if your driver registers a LED device. + * This function returns the name (or %NULL if not configured for LEDs) + * of the trigger so you can automatically link the LED device. + * + * @hw: the hardware to get the LED trigger name for + */ +static inline char *ieee80211_get_tx_led_name(struct ieee80211_hw *hw) +{ +#ifdef CONFIG_MAC80211_LEDS + return __ieee80211_get_tx_led_name(hw); +#else + return NULL; +#endif +} + +/** + * ieee80211_get_rx_led_name - get name of RX LED + * + * mac80211 creates a receive LED trigger for each wireless hardware + * that can be used to drive LEDs if your driver registers a LED device. + * This function returns the name (or %NULL if not configured for LEDs) + * of the trigger so you can automatically link the LED device. + * + * @hw: the hardware to get the LED trigger name for + */ +static inline char *ieee80211_get_rx_led_name(struct ieee80211_hw *hw) +{ +#ifdef CONFIG_MAC80211_LEDS + return __ieee80211_get_rx_led_name(hw); +#else + return NULL; +#endif +} + +/** + * ieee80211_get_assoc_led_name - get name of association LED + * + * mac80211 creates a association LED trigger for each wireless hardware + * that can be used to drive LEDs if your driver registers a LED device. + * This function returns the name (or %NULL if not configured for LEDs) + * of the trigger so you can automatically link the LED device. + * + * @hw: the hardware to get the LED trigger name for + */ +static inline char *ieee80211_get_assoc_led_name(struct ieee80211_hw *hw) +{ +#ifdef CONFIG_MAC80211_LEDS + return __ieee80211_get_assoc_led_name(hw); +#else + return NULL; +#endif +} + +/** + * ieee80211_get_radio_led_name - get name of radio LED + * + * mac80211 creates a radio change LED trigger for each wireless hardware + * that can be used to drive LEDs if your driver registers a LED device. + * This function returns the name (or %NULL if not configured for LEDs) + * of the trigger so you can automatically link the LED device. + * + * @hw: the hardware to get the LED trigger name for + */ +static inline char *ieee80211_get_radio_led_name(struct ieee80211_hw *hw) +{ +#ifdef CONFIG_MAC80211_LEDS + return __ieee80211_get_radio_led_name(hw); +#else + return NULL; +#endif +} + +/** + * ieee80211_unregister_hw - Unregister a hardware device + * + * This function instructs mac80211 to free allocated resources + * and unregister netdevices from the networking subsystem. + * + * @hw: the hardware to unregister + */ +void ieee80211_unregister_hw(struct ieee80211_hw *hw); + +/** + * ieee80211_free_hw - free hardware descriptor + * + * This function frees everything that was allocated, including the + * private data for the driver. You must call ieee80211_unregister_hw() + * before calling this function. + * + * @hw: the hardware to free + */ +void ieee80211_free_hw(struct ieee80211_hw *hw); + +/** + * ieee80211_restart_hw - restart hardware completely + * + * Call this function when the hardware was restarted for some reason + * (hardware error, ...) and the driver is unable to restore its state + * by itself. mac80211 assumes that at this point the driver/hardware + * is completely uninitialised and stopped, it starts the process by + * calling the ->start() operation. The driver will need to reset all + * internal state that it has prior to calling this function. + * + * @hw: the hardware to restart + */ +void ieee80211_restart_hw(struct ieee80211_hw *hw); + +/** + * ieee80211_rx - receive frame + * + * Use this function to hand received frames to mac80211. The receive + * buffer in @skb must start with an IEEE 802.11 header. + * + * This function may not be called in IRQ context. Calls to this function + * for a single hardware must be synchronized against each other. Calls to + * this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be + * mixed for a single hardware. + * + * In process context use instead ieee80211_rx_ni(). + * + * @hw: the hardware this frame came in on + * @skb: the buffer to receive, owned by mac80211 after this call + */ +void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb); + +/** + * ieee80211_rx_irqsafe - receive frame + * + * Like ieee80211_rx() but can be called in IRQ context + * (internally defers to a tasklet.) + * + * Calls to this function, ieee80211_rx() or ieee80211_rx_ni() may not + * be mixed for a single hardware. + * + * @hw: the hardware this frame came in on + * @skb: the buffer to receive, owned by mac80211 after this call + */ +void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb); + +/** + * ieee80211_rx_ni - receive frame (in process context) + * + * Like ieee80211_rx() but can be called in process context + * (internally disables bottom halves). + * + * Calls to this function, ieee80211_rx() and ieee80211_rx_irqsafe() may + * not be mixed for a single hardware. + * + * @hw: the hardware this frame came in on + * @skb: the buffer to receive, owned by mac80211 after this call + */ +static inline void ieee80211_rx_ni(struct ieee80211_hw *hw, + struct sk_buff *skb) +{ + local_bh_disable(); + ieee80211_rx(hw, skb); + local_bh_enable(); +} + +/* + * The TX headroom reserved by mac80211 for its own tx_status functions. + * This is enough for the radiotap header. + */ +#define IEEE80211_TX_STATUS_HEADROOM 13 + +/** + * ieee80211_tx_status - transmit status callback + * + * Call this function for all transmitted frames after they have been + * transmitted. It is permissible to not call this function for + * multicast frames but this can affect statistics. + * + * This function may not be called in IRQ context. Calls to this function + * for a single hardware must be synchronized against each other. Calls + * to this function and ieee80211_tx_status_irqsafe() may not be mixed + * for a single hardware. + * + * @hw: the hardware the frame was transmitted by + * @skb: the frame that was transmitted, owned by mac80211 after this call + */ +void ieee80211_tx_status(struct ieee80211_hw *hw, + struct sk_buff *skb); + +/** + * ieee80211_tx_status_irqsafe - IRQ-safe transmit status callback + * + * Like ieee80211_tx_status() but can be called in IRQ context + * (internally defers to a tasklet.) + * + * Calls to this function and ieee80211_tx_status() may not be mixed for a + * single hardware. + * + * @hw: the hardware the frame was transmitted by + * @skb: the frame that was transmitted, owned by mac80211 after this call + */ +void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw, + struct sk_buff *skb); + +/** + * ieee80211_beacon_get_tim - beacon generation function + * @hw: pointer obtained from ieee80211_alloc_hw(). + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * @tim_offset: pointer to variable that will receive the TIM IE offset. + * Set to 0 if invalid (in non-AP modes). + * @tim_length: pointer to variable that will receive the TIM IE length, + * (including the ID and length bytes!). + * Set to 0 if invalid (in non-AP modes). + * + * If the driver implements beaconing modes, it must use this function to + * obtain the beacon frame/template. + * + * If the beacon frames are generated by the host system (i.e., not in + * hardware/firmware), the driver uses this function to get each beacon + * frame from mac80211 -- it is responsible for calling this function + * before the beacon is needed (e.g. based on hardware interrupt). + * + * If the beacon frames are generated by the device, then the driver + * must use the returned beacon as the template and change the TIM IE + * according to the current DTIM parameters/TIM bitmap. + * + * The driver is responsible for freeing the returned skb. + */ +struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u16 *tim_offset, u16 *tim_length); + +/** + * ieee80211_beacon_get - beacon generation function + * @hw: pointer obtained from ieee80211_alloc_hw(). + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * + * See ieee80211_beacon_get_tim(). + */ +static inline struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + return ieee80211_beacon_get_tim(hw, vif, NULL, NULL); +} + +/** + * ieee80211_pspoll_get - retrieve a PS Poll template + * @hw: pointer obtained from ieee80211_alloc_hw(). + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * + * Creates a PS Poll a template which can, for example, uploaded to + * hardware. The template must be updated after association so that correct + * AID, BSSID and MAC address is used. + * + * Note: Caller (or hardware) is responsible for setting the + * &IEEE80211_FCTL_PM bit. + */ +struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); + +/** + * ieee80211_nullfunc_get - retrieve a nullfunc template + * @hw: pointer obtained from ieee80211_alloc_hw(). + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * + * Creates a Nullfunc template which can, for example, uploaded to + * hardware. The template must be updated after association so that correct + * BSSID and address is used. + * + * Note: Caller (or hardware) is responsible for setting the + * &IEEE80211_FCTL_PM bit as well as Duration and Sequence Control fields. + */ +struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); + +/** + * ieee80211_probereq_get - retrieve a Probe Request template + * @hw: pointer obtained from ieee80211_alloc_hw(). + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * @ssid: SSID buffer + * @ssid_len: length of SSID + * @ie: buffer containing all IEs except SSID for the template + * @ie_len: length of the IE buffer + * + * Creates a Probe Request template which can, for example, be uploaded to + * hardware. + */ +struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const u8 *ssid, size_t ssid_len, + const u8 *ie, size_t ie_len); + +/** + * ieee80211_rts_get - RTS frame generation function + * @hw: pointer obtained from ieee80211_alloc_hw(). + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * @frame: pointer to the frame that is going to be protected by the RTS. + * @frame_len: the frame length (in octets). + * @frame_txctl: &struct ieee80211_tx_info of the frame. + * @rts: The buffer where to store the RTS frame. + * + * If the RTS frames are generated by the host system (i.e., not in + * hardware/firmware), the low-level driver uses this function to receive + * the next RTS frame from the 802.11 code. The low-level is responsible + * for calling this function before and RTS frame is needed. + */ +void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + const void *frame, size_t frame_len, + const struct ieee80211_tx_info *frame_txctl, + struct ieee80211_rts *rts); + +/** + * ieee80211_rts_duration - Get the duration field for an RTS frame + * @hw: pointer obtained from ieee80211_alloc_hw(). + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * @frame_len: the length of the frame that is going to be protected by the RTS. + * @frame_txctl: &struct ieee80211_tx_info of the frame. + * + * If the RTS is generated in firmware, but the host system must provide + * the duration field, the low-level driver uses this function to receive + * the duration field value in little-endian byteorder. + */ +__le16 ieee80211_rts_duration(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, size_t frame_len, + const struct ieee80211_tx_info *frame_txctl); + +/** + * ieee80211_ctstoself_get - CTS-to-self frame generation function + * @hw: pointer obtained from ieee80211_alloc_hw(). + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * @frame: pointer to the frame that is going to be protected by the CTS-to-self. + * @frame_len: the frame length (in octets). + * @frame_txctl: &struct ieee80211_tx_info of the frame. + * @cts: The buffer where to store the CTS-to-self frame. + * + * If the CTS-to-self frames are generated by the host system (i.e., not in + * hardware/firmware), the low-level driver uses this function to receive + * the next CTS-to-self frame from the 802.11 code. The low-level is responsible + * for calling this function before and CTS-to-self frame is needed. + */ +void ieee80211_ctstoself_get(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const void *frame, size_t frame_len, + const struct ieee80211_tx_info *frame_txctl, + struct ieee80211_cts *cts); + +/** + * ieee80211_ctstoself_duration - Get the duration field for a CTS-to-self frame + * @hw: pointer obtained from ieee80211_alloc_hw(). + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * @frame_len: the length of the frame that is going to be protected by the CTS-to-self. + * @frame_txctl: &struct ieee80211_tx_info of the frame. + * + * If the CTS-to-self is generated in firmware, but the host system must provide + * the duration field, the low-level driver uses this function to receive + * the duration field value in little-endian byteorder. + */ +__le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + size_t frame_len, + const struct ieee80211_tx_info *frame_txctl); + +/** + * ieee80211_generic_frame_duration - Calculate the duration field for a frame + * @hw: pointer obtained from ieee80211_alloc_hw(). + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * @frame_len: the length of the frame. + * @rate: the rate at which the frame is going to be transmitted. + * + * Calculate the duration field of some generic frame, given its + * length and transmission rate (in 100kbps). + */ +__le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + size_t frame_len, + struct ieee80211_rate *rate); + +/** + * ieee80211_get_buffered_bc - accessing buffered broadcast and multicast frames + * @hw: pointer as obtained from ieee80211_alloc_hw(). + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * + * Function for accessing buffered broadcast and multicast frames. If + * hardware/firmware does not implement buffering of broadcast/multicast + * frames when power saving is used, 802.11 code buffers them in the host + * memory. The low-level driver uses this function to fetch next buffered + * frame. In most cases, this is used when generating beacon frame. This + * function returns a pointer to the next buffered skb or NULL if no more + * buffered frames are available. + * + * Note: buffered frames are returned only after DTIM beacon frame was + * generated with ieee80211_beacon_get() and the low-level driver must thus + * call ieee80211_beacon_get() first. ieee80211_get_buffered_bc() returns + * NULL if the previous generated beacon was not DTIM, so the low-level driver + * does not need to check for DTIM beacons separately and should be able to + * use common code for all beacons. + */ +struct sk_buff * +ieee80211_get_buffered_bc(struct ieee80211_hw *hw, struct ieee80211_vif *vif); + +/** + * ieee80211_get_tkip_key - get a TKIP rc4 for skb + * + * This function computes a TKIP rc4 key for an skb. It computes + * a phase 1 key if needed (iv16 wraps around). This function is to + * be used by drivers which can do HW encryption but need to compute + * to phase 1/2 key in SW. + * + * @keyconf: the parameter passed with the set key + * @skb: the skb for which the key is needed + * @type: TBD + * @key: a buffer to which the key will be written + */ +void ieee80211_get_tkip_key(struct ieee80211_key_conf *keyconf, + struct sk_buff *skb, + enum ieee80211_tkip_key_type type, u8 *key); +/** + * ieee80211_wake_queue - wake specific queue + * @hw: pointer as obtained from ieee80211_alloc_hw(). + * @queue: queue number (counted from zero). + * + * Drivers should use this function instead of netif_wake_queue. + */ +void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue); + +/** + * ieee80211_stop_queue - stop specific queue + * @hw: pointer as obtained from ieee80211_alloc_hw(). + * @queue: queue number (counted from zero). + * + * Drivers should use this function instead of netif_stop_queue. + */ +void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue); + +/** + * ieee80211_queue_stopped - test status of the queue + * @hw: pointer as obtained from ieee80211_alloc_hw(). + * @queue: queue number (counted from zero). + * + * Drivers should use this function instead of netif_stop_queue. + */ + +int ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue); + +/** + * ieee80211_stop_queues - stop all queues + * @hw: pointer as obtained from ieee80211_alloc_hw(). + * + * Drivers should use this function instead of netif_stop_queue. + */ +void ieee80211_stop_queues(struct ieee80211_hw *hw); + +/** + * ieee80211_wake_queues - wake all queues + * @hw: pointer as obtained from ieee80211_alloc_hw(). + * + * Drivers should use this function instead of netif_wake_queue. + */ +void ieee80211_wake_queues(struct ieee80211_hw *hw); + +/** + * ieee80211_scan_completed - completed hardware scan + * + * When hardware scan offload is used (i.e. the hw_scan() callback is + * assigned) this function needs to be called by the driver to notify + * mac80211 that the scan finished. + * + * @hw: the hardware that finished the scan + * @aborted: set to true if scan was aborted + */ +void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted); + +/** + * ieee80211_iterate_active_interfaces - iterate active interfaces + * + * This function iterates over the interfaces associated with a given + * hardware that are currently active and calls the callback for them. + * This function allows the iterator function to sleep, when the iterator + * function is atomic @ieee80211_iterate_active_interfaces_atomic can + * be used. + * + * @hw: the hardware struct of which the interfaces should be iterated over + * @iterator: the iterator function to call + * @data: first argument of the iterator function + */ +void ieee80211_iterate_active_interfaces(struct ieee80211_hw *hw, + void (*iterator)(void *data, u8 *mac, + struct ieee80211_vif *vif), + void *data); + +/** + * ieee80211_iterate_active_interfaces_atomic - iterate active interfaces + * + * This function iterates over the interfaces associated with a given + * hardware that are currently active and calls the callback for them. + * This function requires the iterator callback function to be atomic, + * if that is not desired, use @ieee80211_iterate_active_interfaces instead. + * + * @hw: the hardware struct of which the interfaces should be iterated over + * @iterator: the iterator function to call, cannot sleep + * @data: first argument of the iterator function + */ +void ieee80211_iterate_active_interfaces_atomic(struct ieee80211_hw *hw, + void (*iterator)(void *data, + u8 *mac, + struct ieee80211_vif *vif), + void *data); + +/** + * ieee80211_queue_work - add work onto the mac80211 workqueue + * + * Drivers and mac80211 use this to add work onto the mac80211 workqueue. + * This helper ensures drivers are not queueing work when they should not be. + * + * @hw: the hardware struct for the interface we are adding work for + * @work: the work we want to add onto the mac80211 workqueue + */ +void ieee80211_queue_work(struct ieee80211_hw *hw, struct work_struct *work); + +/** + * ieee80211_queue_delayed_work - add work onto the mac80211 workqueue + * + * Drivers and mac80211 use this to queue delayed work onto the mac80211 + * workqueue. + * + * @hw: the hardware struct for the interface we are adding work for + * @dwork: delayable work to queue onto the mac80211 workqueue + * @delay: number of jiffies to wait before queueing + */ +void ieee80211_queue_delayed_work(struct ieee80211_hw *hw, + struct delayed_work *dwork, + unsigned long delay); + +/** + * ieee80211_start_tx_ba_session - Start a tx Block Ack session. + * @sta: the station for which to start a BA session + * @tid: the TID to BA on. + * + * Return: success if addBA request was sent, failure otherwise + * + * Although mac80211/low level driver/user space application can estimate + * the need to start aggregation on a certain RA/TID, the session level + * will be managed by the mac80211. + */ +int ieee80211_start_tx_ba_session(struct ieee80211_sta *sta, u16 tid); + +/** + * ieee80211_start_tx_ba_cb - low level driver ready to aggregate. + * @vif: &struct ieee80211_vif pointer from the add_interface callback + * @ra: receiver address of the BA session recipient. + * @tid: the TID to BA on. + * + * This function must be called by low level driver once it has + * finished with preparations for the BA session. + */ +void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid); + +/** + * ieee80211_start_tx_ba_cb_irqsafe - low level driver ready to aggregate. + * @vif: &struct ieee80211_vif pointer from the add_interface callback + * @ra: receiver address of the BA session recipient. + * @tid: the TID to BA on. + * + * This function must be called by low level driver once it has + * finished with preparations for the BA session. + * This version of the function is IRQ-safe. + */ +void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, const u8 *ra, + u16 tid); + +/** + * ieee80211_stop_tx_ba_session - Stop a Block Ack session. + * @sta: the station whose BA session to stop + * @tid: the TID to stop BA. + * @initiator: if indicates initiator DELBA frame will be sent. + * + * Return: error if no sta with matching da found, success otherwise + * + * Although mac80211/low level driver/user space application can estimate + * the need to stop aggregation on a certain RA/TID, the session level + * will be managed by the mac80211. + */ +int ieee80211_stop_tx_ba_session(struct ieee80211_sta *sta, u16 tid, + enum ieee80211_back_parties initiator); + +/** + * ieee80211_stop_tx_ba_cb - low level driver ready to stop aggregate. + * @vif: &struct ieee80211_vif pointer from the add_interface callback + * @ra: receiver address of the BA session recipient. + * @tid: the desired TID to BA on. + * + * This function must be called by low level driver once it has + * finished with preparations for the BA session tear down. + */ +void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid); + +/** + * ieee80211_stop_tx_ba_cb_irqsafe - low level driver ready to stop aggregate. + * @vif: &struct ieee80211_vif pointer from the add_interface callback + * @ra: receiver address of the BA session recipient. + * @tid: the desired TID to BA on. + * + * This function must be called by low level driver once it has + * finished with preparations for the BA session tear down. + * This version of the function is IRQ-safe. + */ +void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, const u8 *ra, + u16 tid); + +/** + * ieee80211_find_sta - find a station + * + * @vif: virtual interface to look for station on + * @addr: station's address + * + * This function must be called under RCU lock and the + * resulting pointer is only valid under RCU lock as well. + */ +struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_vif *vif, + const u8 *addr); + +/** + * ieee80211_find_sta_by_hw - find a station on hardware + * + * @hw: pointer as obtained from ieee80211_alloc_hw() + * @addr: station's address + * + * This function must be called under RCU lock and the + * resulting pointer is only valid under RCU lock as well. + * + * NOTE: This function should not be used! When mac80211 is converted + * internally to properly keep track of stations on multiple + * virtual interfaces, it will not always know which station to + * return here since a single address might be used by multiple + * logical stations (e.g. consider a station connecting to another + * BSSID on the same AP hardware without disconnecting first). + * + * DO NOT USE THIS FUNCTION. + */ +struct ieee80211_sta *ieee80211_find_sta_by_hw(struct ieee80211_hw *hw, + const u8 *addr); + +/** + * ieee80211_sta_block_awake - block station from waking up + * @hw: the hardware + * @pubsta: the station + * @block: whether to block or unblock + * + * Some devices require that all frames that are on the queues + * for a specific station that went to sleep are flushed before + * a poll response or frames after the station woke up can be + * delivered to that it. Note that such frames must be rejected + * by the driver as filtered, with the appropriate status flag. + * + * This function allows implementing this mode in a race-free + * manner. + * + * To do this, a driver must keep track of the number of frames + * still enqueued for a specific station. If this number is not + * zero when the station goes to sleep, the driver must call + * this function to force mac80211 to consider the station to + * be asleep regardless of the station's actual state. Once the + * number of outstanding frames reaches zero, the driver must + * call this function again to unblock the station. That will + * cause mac80211 to be able to send ps-poll responses, and if + * the station queried in the meantime then frames will also + * be sent out as a result of this. Additionally, the driver + * will be notified that the station woke up some time after + * it is unblocked, regardless of whether the station actually + * woke up while blocked or not. + */ +void ieee80211_sta_block_awake(struct ieee80211_hw *hw, + struct ieee80211_sta *pubsta, bool block); + +/** + * ieee80211_beacon_loss - inform hardware does not receive beacons + * + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * + * When beacon filtering is enabled with IEEE80211_HW_BEACON_FILTERING and + * IEEE80211_CONF_PS is set, the driver needs to inform whenever the + * hardware is not receiving beacons with this function. + */ +void ieee80211_beacon_loss(struct ieee80211_vif *vif); + +/* Rate control API */ + +/** + * enum rate_control_changed - flags to indicate which parameter changed + * + * @IEEE80211_RC_HT_CHANGED: The HT parameters of the operating channel have + * changed, rate control algorithm can update its internal state if needed. + */ +enum rate_control_changed { + IEEE80211_RC_HT_CHANGED = BIT(0) +}; + +/** + * struct ieee80211_tx_rate_control - rate control information for/from RC algo + * + * @hw: The hardware the algorithm is invoked for. + * @sband: The band this frame is being transmitted on. + * @bss_conf: the current BSS configuration + * @reported_rate: The rate control algorithm can fill this in to indicate + * which rate should be reported to userspace as the current rate and + * used for rate calculations in the mesh network. + * @rts: whether RTS will be used for this frame because it is longer than the + * RTS threshold + * @short_preamble: whether mac80211 will request short-preamble transmission + * if the selected rate supports it + * @max_rate_idx: user-requested maximum rate (not MCS for now) + * (deprecated; this will be removed once drivers get updated to use + * rate_idx_mask) + * @rate_idx_mask: user-requested rate mask (not MCS for now) + * @skb: the skb that will be transmitted, the control information in it needs + * to be filled in + * @ap: whether this frame is sent out in AP mode + */ +struct ieee80211_tx_rate_control { + struct ieee80211_hw *hw; + struct ieee80211_supported_band *sband; + struct ieee80211_bss_conf *bss_conf; + struct sk_buff *skb; + struct ieee80211_tx_rate reported_rate; + bool rts, short_preamble; + u8 max_rate_idx; + u32 rate_idx_mask; + bool ap; +}; + +struct rate_control_ops { + struct module *module; + const char *name; + void *(*alloc)(struct ieee80211_hw *hw, struct dentry *debugfsdir); + void (*free)(void *priv); + + void *(*alloc_sta)(void *priv, struct ieee80211_sta *sta, gfp_t gfp); + void (*rate_init)(void *priv, struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta); + void (*rate_update)(void *priv, struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, + void *priv_sta, u32 changed, + enum nl80211_channel_type oper_chan_type); + void (*free_sta)(void *priv, struct ieee80211_sta *sta, + void *priv_sta); + + void (*tx_status)(void *priv, struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta, + struct sk_buff *skb); + void (*get_rate)(void *priv, struct ieee80211_sta *sta, void *priv_sta, + struct ieee80211_tx_rate_control *txrc); + + void (*add_sta_debugfs)(void *priv, void *priv_sta, + struct dentry *dir); + void (*remove_sta_debugfs)(void *priv, void *priv_sta); +}; + +static inline int rate_supported(struct ieee80211_sta *sta, + enum ieee80211_band band, + int index) +{ + return (sta == NULL || sta->supp_rates[band] & BIT(index)); +} + +/** + * rate_control_send_low - helper for drivers for management/no-ack frames + * + * Rate control algorithms that agree to use the lowest rate to + * send management frames and NO_ACK data with the respective hw + * retries should use this in the beginning of their mac80211 get_rate + * callback. If true is returned the rate control can simply return. + * If false is returned we guarantee that sta and sta and priv_sta is + * not null. + * + * Rate control algorithms wishing to do more intelligent selection of + * rate for multicast/broadcast frames may choose to not use this. + * + * @sta: &struct ieee80211_sta pointer to the target destination. Note + * that this may be null. + * @priv_sta: private rate control structure. This may be null. + * @txrc: rate control information we sholud populate for mac80211. + */ +bool rate_control_send_low(struct ieee80211_sta *sta, + void *priv_sta, + struct ieee80211_tx_rate_control *txrc); + + +static inline s8 +rate_lowest_index(struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta) +{ + int i; + + for (i = 0; i < sband->n_bitrates; i++) + if (rate_supported(sta, sband->band, i)) + return i; + + /* warn when we cannot find a rate. */ + WARN_ON(1); + + return 0; +} + +static inline +bool rate_usable_index_exists(struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta) +{ + unsigned int i; + + for (i = 0; i < sband->n_bitrates; i++) + if (rate_supported(sta, sband->band, i)) + return true; + return false; +} + +int ieee80211_rate_control_register(struct rate_control_ops *ops); +void ieee80211_rate_control_unregister(struct rate_control_ops *ops); + +static inline bool +conf_is_ht20(struct ieee80211_conf *conf) +{ + return conf->channel_type == NL80211_CHAN_HT20; +} + +static inline bool +conf_is_ht40_minus(struct ieee80211_conf *conf) +{ + return conf->channel_type == NL80211_CHAN_HT40MINUS; +} + +static inline bool +conf_is_ht40_plus(struct ieee80211_conf *conf) +{ + return conf->channel_type == NL80211_CHAN_HT40PLUS; +} + +static inline bool +conf_is_ht40(struct ieee80211_conf *conf) +{ + return conf_is_ht40_minus(conf) || conf_is_ht40_plus(conf); +} + +static inline bool +conf_is_ht(struct ieee80211_conf *conf) +{ + return conf->channel_type != NL80211_CHAN_NO_HT; +} + +#endif /* MAC80211_H */ diff --git a/include/net/regulatory.h b/include/net/regulatory.h new file mode 100644 index 0000000..f873ee3 --- /dev/null +++ b/include/net/regulatory.h @@ -0,0 +1,102 @@ +#ifndef __NET_REGULATORY_H +#define __NET_REGULATORY_H +/* + * regulatory support structures + * + * Copyright 2008-2009 Luis R. Rodriguez + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + + +/** + * enum environment_cap - Environment parsed from country IE + * @ENVIRON_ANY: indicates country IE applies to both indoor and + * outdoor operation. + * @ENVIRON_INDOOR: indicates country IE applies only to indoor operation + * @ENVIRON_OUTDOOR: indicates country IE applies only to outdoor operation + */ +enum environment_cap { + ENVIRON_ANY, + ENVIRON_INDOOR, + ENVIRON_OUTDOOR, +}; + +/** + * struct regulatory_request - used to keep track of regulatory requests + * + * @wiphy_idx: this is set if this request's initiator is + * %REGDOM_SET_BY_COUNTRY_IE or %REGDOM_SET_BY_DRIVER. This + * can be used by the wireless core to deal with conflicts + * and potentially inform users of which devices specifically + * cased the conflicts. + * @initiator: indicates who sent this request, could be any of + * of those set in nl80211_reg_initiator (%NL80211_REGDOM_SET_BY_*) + * @alpha2: the ISO / IEC 3166 alpha2 country code of the requested + * regulatory domain. We have a few special codes: + * 00 - World regulatory domain + * 99 - built by driver but a specific alpha2 cannot be determined + * 98 - result of an intersection between two regulatory domains + * 97 - regulatory domain has not yet been configured + * @intersect: indicates whether the wireless core should intersect + * the requested regulatory domain with the presently set regulatory + * domain. + * @country_ie_checksum: checksum of the last processed and accepted + * country IE + * @country_ie_env: lets us know if the AP is telling us we are outdoor, + * indoor, or if it doesn't matter + * @list: used to insert into the reg_requests_list linked list + */ +struct regulatory_request { + int wiphy_idx; + enum nl80211_reg_initiator initiator; + char alpha2[2]; + bool intersect; + u32 country_ie_checksum; + enum environment_cap country_ie_env; + struct list_head list; +}; + +struct ieee80211_freq_range { + u32 start_freq_khz; + u32 end_freq_khz; + u32 max_bandwidth_khz; +}; + +struct ieee80211_power_rule { + u32 max_antenna_gain; + u32 max_eirp; +}; + +struct ieee80211_reg_rule { + struct ieee80211_freq_range freq_range; + struct ieee80211_power_rule power_rule; + u32 flags; +}; + +struct ieee80211_regdomain { + u32 n_reg_rules; + char alpha2[2]; + struct ieee80211_reg_rule reg_rules[]; +}; + +#define MHZ_TO_KHZ(freq) ((freq) * 1000) +#define KHZ_TO_MHZ(freq) ((freq) / 1000) +#define DBI_TO_MBI(gain) ((gain) * 100) +#define MBI_TO_DBI(gain) ((gain) / 100) +#define DBM_TO_MBM(gain) ((gain) * 100) +#define MBM_TO_DBM(gain) ((gain) / 100) + +#define REG_RULE(start, end, bw, gain, eirp, reg_flags) \ +{ \ + .freq_range.start_freq_khz = MHZ_TO_KHZ(start), \ + .freq_range.end_freq_khz = MHZ_TO_KHZ(end), \ + .freq_range.max_bandwidth_khz = MHZ_TO_KHZ(bw), \ + .power_rule.max_antenna_gain = DBI_TO_MBI(gain),\ + .power_rule.max_eirp = DBM_TO_MBM(eirp), \ + .flags = reg_flags, \ +} + +#endif diff --git a/include/net/wext.h b/include/net/wext.h new file mode 100644 index 0000000..4f6e742 --- /dev/null +++ b/include/net/wext.h @@ -0,0 +1,60 @@ +#ifndef __NET_WEXT_H +#define __NET_WEXT_H + +#include + +struct net; + +#ifdef CONFIG_WEXT_CORE +extern int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, + void __user *arg); +extern int compat_wext_handle_ioctl(struct net *net, unsigned int cmd, + unsigned long arg); + +extern struct iw_statistics *get_wireless_stats(struct net_device *dev); +extern int call_commit_handler(struct net_device *dev); +#else +static inline int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, + void __user *arg) +{ + return -EINVAL; +} +static inline int compat_wext_handle_ioctl(struct net *net, unsigned int cmd, + unsigned long arg) +{ + return -EINVAL; +} +#endif + +#ifdef CONFIG_WEXT_PROC +extern int wext_proc_init(struct net *net); +extern void wext_proc_exit(struct net *net); +#else +static inline int wext_proc_init(struct net *net) +{ + return 0; +} +static inline void wext_proc_exit(struct net *net) +{ + return; +} +#endif + +#ifdef CONFIG_WEXT_PRIV +int ioctl_private_call(struct net_device *dev, struct iwreq *iwr, + unsigned int cmd, struct iw_request_info *info, + iw_handler handler); +int compat_private_call(struct net_device *dev, struct iwreq *iwr, + unsigned int cmd, struct iw_request_info *info, + iw_handler handler); +int iw_handler_get_private(struct net_device * dev, + struct iw_request_info * info, + union iwreq_data * wrqu, + char * extra); +#else +#define ioctl_private_call NULL +#define compat_private_call NULL +#endif + + +#endif /* __NET_WEXT_H */ diff --git a/linux-next-cherry-picks/README b/linux-next-cherry-picks/README new file mode 100644 index 0000000..f579040 --- /dev/null +++ b/linux-next-cherry-picks/README @@ -0,0 +1,16 @@ +compat-wireless linux-next chery picked patches +=============================================== + +We work hard to get patches in time onto the stable +tree but sometimes a few things slip out, and sometimes a +stable fix is simply too big in size to be merged into +stable. In such cases though we do believe some of these +patches are still relatively important to either enable new +hardware which escaped the late rc cycles or to correct some +behaviour which might be too late for stable. We apply +these patches by default as they will be supported on these +releases. + +The larger the number of patches you see in this directory +the more we should be ashamed. We should strive to reduce this +to 0 all the time. diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile new file mode 100644 index 0000000..d1e433f --- /dev/null +++ b/net/bluetooth/Makefile @@ -0,0 +1,13 @@ +# +# Makefile for the Linux Bluetooth subsystem. +# + +obj-$(CONFIG_BT) += bluetooth.o +obj-$(CONFIG_BT_L2CAP) += l2cap.o +obj-$(CONFIG_BT_SCO) += sco.o +obj-$(CONFIG_BT_RFCOMM) += rfcomm/ +obj-$(CONFIG_BT_BNEP) += bnep/ +obj-$(CONFIG_BT_CMTP) += cmtp/ +obj-$(CONFIG_BT_HIDP) += hidp/ + +bluetooth-objs := af_bluetooth.o hci_core.o hci_conn.o hci_event.o hci_sock.o hci_sysfs.o lib.o diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c new file mode 100644 index 0000000..2594d00 --- /dev/null +++ b/net/bluetooth/af_bluetooth.c @@ -0,0 +1,471 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + Copyright (C) 2000-2001 Qualcomm Incorporated + + Written 2000,2001 by Maxim Krasnyansky + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +/* Bluetooth address family and sockets. */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define VERSION "2.15" + +/* Bluetooth sockets */ +#define BT_MAX_PROTO 8 +static const struct net_proto_family *bt_proto[BT_MAX_PROTO]; +static DEFINE_RWLOCK(bt_proto_lock); + +static struct lock_class_key bt_lock_key[BT_MAX_PROTO]; +static const char *const bt_key_strings[BT_MAX_PROTO] = { + "sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP", + "sk_lock-AF_BLUETOOTH-BTPROTO_HCI", + "sk_lock-AF_BLUETOOTH-BTPROTO_SCO", + "sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM", + "sk_lock-AF_BLUETOOTH-BTPROTO_BNEP", + "sk_lock-AF_BLUETOOTH-BTPROTO_CMTP", + "sk_lock-AF_BLUETOOTH-BTPROTO_HIDP", + "sk_lock-AF_BLUETOOTH-BTPROTO_AVDTP", +}; + +static struct lock_class_key bt_slock_key[BT_MAX_PROTO]; +static const char *const bt_slock_key_strings[BT_MAX_PROTO] = { + "slock-AF_BLUETOOTH-BTPROTO_L2CAP", + "slock-AF_BLUETOOTH-BTPROTO_HCI", + "slock-AF_BLUETOOTH-BTPROTO_SCO", + "slock-AF_BLUETOOTH-BTPROTO_RFCOMM", + "slock-AF_BLUETOOTH-BTPROTO_BNEP", + "slock-AF_BLUETOOTH-BTPROTO_CMTP", + "slock-AF_BLUETOOTH-BTPROTO_HIDP", + "slock-AF_BLUETOOTH-BTPROTO_AVDTP", +}; + +static inline void bt_sock_reclassify_lock(struct socket *sock, int proto) +{ + struct sock *sk = sock->sk; + + if (!sk) + return; + + BUG_ON(sock_owned_by_user(sk)); + + sock_lock_init_class_and_name(sk, + bt_slock_key_strings[proto], &bt_slock_key[proto], + bt_key_strings[proto], &bt_lock_key[proto]); +} + +int bt_sock_register(int proto, const struct net_proto_family *ops) +{ + int err = 0; + + if (proto < 0 || proto >= BT_MAX_PROTO) + return -EINVAL; + + write_lock(&bt_proto_lock); + + if (bt_proto[proto]) + err = -EEXIST; + else + bt_proto[proto] = ops; + + write_unlock(&bt_proto_lock); + + return err; +} +EXPORT_SYMBOL(bt_sock_register); + +int bt_sock_unregister(int proto) +{ + int err = 0; + + if (proto < 0 || proto >= BT_MAX_PROTO) + return -EINVAL; + + write_lock(&bt_proto_lock); + + if (!bt_proto[proto]) + err = -ENOENT; + else + bt_proto[proto] = NULL; + + write_unlock(&bt_proto_lock); + + return err; +} +EXPORT_SYMBOL(bt_sock_unregister); + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32)) +static int bt_sock_create(struct net *net, struct socket *sock, int proto, + int kern) +#else +static int bt_sock_create(struct net *net, struct socket *sock, int proto) +#endif +{ + int err; + + if (net != &init_net) + return -EAFNOSUPPORT; + + if (proto < 0 || proto >= BT_MAX_PROTO) + return -EINVAL; + + if (!bt_proto[proto]) + request_module("bt-proto-%d", proto); + + err = -EPROTONOSUPPORT; + + read_lock(&bt_proto_lock); + + if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) { +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32)) + err = bt_proto[proto]->create(net, sock, proto, kern); +#else + err = bt_proto[proto]->create(net, sock, proto); +#endif + bt_sock_reclassify_lock(sock, proto); + module_put(bt_proto[proto]->owner); + } + + read_unlock(&bt_proto_lock); + + return err; +} + +void bt_sock_link(struct bt_sock_list *l, struct sock *sk) +{ + write_lock_bh(&l->lock); + sk_add_node(sk, &l->head); + write_unlock_bh(&l->lock); +} +EXPORT_SYMBOL(bt_sock_link); + +void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk) +{ + write_lock_bh(&l->lock); + sk_del_node_init(sk); + write_unlock_bh(&l->lock); +} +EXPORT_SYMBOL(bt_sock_unlink); + +void bt_accept_enqueue(struct sock *parent, struct sock *sk) +{ + BT_DBG("parent %p, sk %p", parent, sk); + + sock_hold(sk); + list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q); + bt_sk(sk)->parent = parent; + parent->sk_ack_backlog++; +} +EXPORT_SYMBOL(bt_accept_enqueue); + +void bt_accept_unlink(struct sock *sk) +{ + BT_DBG("sk %p state %d", sk, sk->sk_state); + + list_del_init(&bt_sk(sk)->accept_q); + bt_sk(sk)->parent->sk_ack_backlog--; + bt_sk(sk)->parent = NULL; + sock_put(sk); +} +EXPORT_SYMBOL(bt_accept_unlink); + +struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock) +{ + struct list_head *p, *n; + struct sock *sk; + + BT_DBG("parent %p", parent); + + list_for_each_safe(p, n, &bt_sk(parent)->accept_q) { + sk = (struct sock *) list_entry(p, struct bt_sock, accept_q); + + lock_sock(sk); + + /* FIXME: Is this check still needed */ + if (sk->sk_state == BT_CLOSED) { + release_sock(sk); + bt_accept_unlink(sk); + continue; + } + + if (sk->sk_state == BT_CONNECTED || !newsock || + bt_sk(parent)->defer_setup) { + bt_accept_unlink(sk); + if (newsock) + sock_graft(sk, newsock); + release_sock(sk); + return sk; + } + + release_sock(sk); + } + return NULL; +} +EXPORT_SYMBOL(bt_accept_dequeue); + +int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, + struct msghdr *msg, size_t len, int flags) +{ + int noblock = flags & MSG_DONTWAIT; + struct sock *sk = sock->sk; + struct sk_buff *skb; + size_t copied; + int err; + + BT_DBG("sock %p sk %p len %zu", sock, sk, len); + + if (flags & (MSG_OOB)) + return -EOPNOTSUPP; + + if (!(skb = skb_recv_datagram(sk, flags, noblock, &err))) { + if (sk->sk_shutdown & RCV_SHUTDOWN) + return 0; + return err; + } + + msg->msg_namelen = 0; + + copied = skb->len; + if (len < copied) { + msg->msg_flags |= MSG_TRUNC; + copied = len; + } + + skb_reset_transport_header(skb); + err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + if (err == 0) +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32)) + sock_recv_ts_and_drops(msg, sk, skb); +#else + sock_recv_timestamp(msg, sk, skb); +#endif + + skb_free_datagram(sk, skb); + + return err ? : copied; +} +EXPORT_SYMBOL(bt_sock_recvmsg); + +static inline unsigned int bt_accept_poll(struct sock *parent) +{ + struct list_head *p, *n; + struct sock *sk; + + list_for_each_safe(p, n, &bt_sk(parent)->accept_q) { + sk = (struct sock *) list_entry(p, struct bt_sock, accept_q); + if (sk->sk_state == BT_CONNECTED || + (bt_sk(parent)->defer_setup && + sk->sk_state == BT_CONNECT2)) + return POLLIN | POLLRDNORM; + } + + return 0; +} + +unsigned int bt_sock_poll(struct file * file, struct socket *sock, poll_table *wait) +{ + struct sock *sk = sock->sk; + unsigned int mask = 0; + + BT_DBG("sock %p, sk %p", sock, sk); + + poll_wait(file, sk->sk_sleep, wait); + + if (sk->sk_state == BT_LISTEN) + return bt_accept_poll(sk); + + if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) + mask |= POLLERR; + + if (sk->sk_shutdown & RCV_SHUTDOWN) + mask |= POLLRDHUP; + + if (sk->sk_shutdown == SHUTDOWN_MASK) + mask |= POLLHUP; + + if (!skb_queue_empty(&sk->sk_receive_queue) || + (sk->sk_shutdown & RCV_SHUTDOWN)) + mask |= POLLIN | POLLRDNORM; + + if (sk->sk_state == BT_CLOSED) + mask |= POLLHUP; + + if (sk->sk_state == BT_CONNECT || + sk->sk_state == BT_CONNECT2 || + sk->sk_state == BT_CONFIG) + return mask; + + if (sock_writeable(sk)) + mask |= POLLOUT | POLLWRNORM | POLLWRBAND; + else + set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); + + return mask; +} +EXPORT_SYMBOL(bt_sock_poll); + +int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) +{ + struct sock *sk = sock->sk; + struct sk_buff *skb; + long amount; + int err; + + BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg); + + switch (cmd) { + case TIOCOUTQ: + if (sk->sk_state == BT_LISTEN) + return -EINVAL; + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,31)) + amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); +#else + amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); +#endif + if (amount < 0) + amount = 0; + err = put_user(amount, (int __user *) arg); + break; + + case TIOCINQ: + if (sk->sk_state == BT_LISTEN) + return -EINVAL; + + lock_sock(sk); + skb = skb_peek(&sk->sk_receive_queue); + amount = skb ? skb->len : 0; + release_sock(sk); + err = put_user(amount, (int __user *) arg); + break; + + case SIOCGSTAMP: + err = sock_get_timestamp(sk, (struct timeval __user *) arg); + break; + + case SIOCGSTAMPNS: + err = sock_get_timestampns(sk, (struct timespec __user *) arg); + break; + + default: + err = -ENOIOCTLCMD; + break; + } + + return err; +} +EXPORT_SYMBOL(bt_sock_ioctl); + +int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo) +{ + DECLARE_WAITQUEUE(wait, current); + int err = 0; + + BT_DBG("sk %p", sk); + + add_wait_queue(sk->sk_sleep, &wait); + while (sk->sk_state != state) { + set_current_state(TASK_INTERRUPTIBLE); + + if (!timeo) { + err = -EINPROGRESS; + break; + } + + if (signal_pending(current)) { + err = sock_intr_errno(timeo); + break; + } + + release_sock(sk); + timeo = schedule_timeout(timeo); + lock_sock(sk); + + err = sock_error(sk); + if (err) + break; + } + set_current_state(TASK_RUNNING); + remove_wait_queue(sk->sk_sleep, &wait); + return err; +} +EXPORT_SYMBOL(bt_sock_wait_state); + +static struct net_proto_family bt_sock_family_ops = { + .owner = THIS_MODULE, + .family = PF_BLUETOOTH, + .create = bt_sock_create, +}; + +static int __init bt_init(void) +{ + int err; + + BT_INFO("Core ver %s", VERSION); + + err = bt_sysfs_init(); + if (err < 0) + return err; + + err = sock_register(&bt_sock_family_ops); + if (err < 0) { + bt_sysfs_cleanup(); + return err; + } + + BT_INFO("HCI device and connection manager initialized"); + + hci_sock_init(); + + return 0; +} + +static void __exit bt_exit(void) +{ + hci_sock_cleanup(); + + sock_unregister(PF_BLUETOOTH); + + bt_sysfs_cleanup(); +} + +subsys_initcall(bt_init); +module_exit(bt_exit); + +MODULE_AUTHOR("Marcel Holtmann "); +MODULE_DESCRIPTION("Bluetooth Core ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_NETPROTO(PF_BLUETOOTH); diff --git a/net/bluetooth/bnep/Makefile b/net/bluetooth/bnep/Makefile new file mode 100644 index 0000000..c7821e7 --- /dev/null +++ b/net/bluetooth/bnep/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for the Linux Bluetooth BNEP layer. +# + +obj-$(CONFIG_BT_BNEP) += bnep.o + +bnep-objs := core.o sock.o netdev.o diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h new file mode 100644 index 0000000..0d9e506 --- /dev/null +++ b/net/bluetooth/bnep/bnep.h @@ -0,0 +1,179 @@ +/* + BNEP protocol definition for Linux Bluetooth stack (BlueZ). + Copyright (C) 2002 Maxim Krasnyansky + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License, version 2, as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +#ifndef _BNEP_H +#define _BNEP_H + +#include +#include +#include + +// Limits +#define BNEP_MAX_PROTO_FILTERS 5 +#define BNEP_MAX_MULTICAST_FILTERS 20 + +// UUIDs +#define BNEP_BASE_UUID 0x0000000000001000800000805F9B34FB +#define BNEP_UUID16 0x02 +#define BNEP_UUID32 0x04 +#define BNEP_UUID128 0x16 + +#define BNEP_SVC_PANU 0x1115 +#define BNEP_SVC_NAP 0x1116 +#define BNEP_SVC_GN 0x1117 + +// Packet types +#define BNEP_GENERAL 0x00 +#define BNEP_CONTROL 0x01 +#define BNEP_COMPRESSED 0x02 +#define BNEP_COMPRESSED_SRC_ONLY 0x03 +#define BNEP_COMPRESSED_DST_ONLY 0x04 + +// Control types +#define BNEP_CMD_NOT_UNDERSTOOD 0x00 +#define BNEP_SETUP_CONN_REQ 0x01 +#define BNEP_SETUP_CONN_RSP 0x02 +#define BNEP_FILTER_NET_TYPE_SET 0x03 +#define BNEP_FILTER_NET_TYPE_RSP 0x04 +#define BNEP_FILTER_MULTI_ADDR_SET 0x05 +#define BNEP_FILTER_MULTI_ADDR_RSP 0x06 + +// Extension types +#define BNEP_EXT_CONTROL 0x00 + +// Response messages +#define BNEP_SUCCESS 0x00 + +#define BNEP_CONN_INVALID_DST 0x01 +#define BNEP_CONN_INVALID_SRC 0x02 +#define BNEP_CONN_INVALID_SVC 0x03 +#define BNEP_CONN_NOT_ALLOWED 0x04 + +#define BNEP_FILTER_UNSUPPORTED_REQ 0x01 +#define BNEP_FILTER_INVALID_RANGE 0x02 +#define BNEP_FILTER_INVALID_MCADDR 0x02 +#define BNEP_FILTER_LIMIT_REACHED 0x03 +#define BNEP_FILTER_DENIED_SECURITY 0x04 + +// L2CAP settings +#define BNEP_MTU 1691 +#define BNEP_PSM 0x0f +#define BNEP_FLUSH_TO 0xffff +#define BNEP_CONNECT_TO 15 +#define BNEP_FILTER_TO 15 + +// Headers +#define BNEP_TYPE_MASK 0x7f +#define BNEP_EXT_HEADER 0x80 + +struct bnep_setup_conn_req { + __u8 type; + __u8 ctrl; + __u8 uuid_size; + __u8 service[0]; +} __attribute__((packed)); + +struct bnep_set_filter_req { + __u8 type; + __u8 ctrl; + __be16 len; + __u8 list[0]; +} __attribute__((packed)); + +struct bnep_control_rsp { + __u8 type; + __u8 ctrl; + __be16 resp; +} __attribute__((packed)); + +struct bnep_ext_hdr { + __u8 type; + __u8 len; + __u8 data[0]; +} __attribute__((packed)); + +/* BNEP ioctl defines */ +#define BNEPCONNADD _IOW('B', 200, int) +#define BNEPCONNDEL _IOW('B', 201, int) +#define BNEPGETCONNLIST _IOR('B', 210, int) +#define BNEPGETCONNINFO _IOR('B', 211, int) + +struct bnep_connadd_req { + int sock; // Connected socket + __u32 flags; + __u16 role; + char device[16]; // Name of the Ethernet device +}; + +struct bnep_conndel_req { + __u32 flags; + __u8 dst[ETH_ALEN]; +}; + +struct bnep_conninfo { + __u32 flags; + __u16 role; + __u16 state; + __u8 dst[ETH_ALEN]; + char device[16]; +}; + +struct bnep_connlist_req { + __u32 cnum; + struct bnep_conninfo __user *ci; +}; + +struct bnep_proto_filter { + __u16 start; + __u16 end; +}; + +int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock); +int bnep_del_connection(struct bnep_conndel_req *req); +int bnep_get_connlist(struct bnep_connlist_req *req); +int bnep_get_conninfo(struct bnep_conninfo *ci); + +// BNEP sessions +struct bnep_session { + struct list_head list; + + unsigned int role; + unsigned long state; + unsigned long flags; + atomic_t killed; + + struct ethhdr eh; + struct msghdr msg; + + struct bnep_proto_filter proto_filter[BNEP_MAX_PROTO_FILTERS]; + unsigned long long mc_filter; + + struct socket *sock; + struct net_device *dev; +}; + +void bnep_net_setup(struct net_device *dev); +int bnep_sock_init(void); +void bnep_sock_cleanup(void); + +static inline int bnep_mc_hash(__u8 *addr) +{ + return (crc32_be(~0, addr, ETH_ALEN) >> 26); +} + +#endif diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c new file mode 100644 index 0000000..191d168 --- /dev/null +++ b/net/bluetooth/bnep/core.c @@ -0,0 +1,747 @@ +/* + BNEP implementation for Linux Bluetooth stack (BlueZ). + Copyright (C) 2001-2002 Inventel Systemes + Written 2001-2002 by + Clément Moreau + David Libault + + Copyright (C) 2002 Maxim Krasnyansky + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include + +#include +#include +#include + +#include "bnep.h" + +#define VERSION "1.3" + +static int compress_src = 1; +static int compress_dst = 1; + +static LIST_HEAD(bnep_session_list); +static DECLARE_RWSEM(bnep_session_sem); + +static struct bnep_session *__bnep_get_session(u8 *dst) +{ + struct bnep_session *s; + struct list_head *p; + + BT_DBG(""); + + list_for_each(p, &bnep_session_list) { + s = list_entry(p, struct bnep_session, list); + if (!compare_ether_addr(dst, s->eh.h_source)) + return s; + } + return NULL; +} + +static void __bnep_link_session(struct bnep_session *s) +{ + /* It's safe to call __module_get() here because sessions are added + by the socket layer which has to hold the reference to this module. + */ + __module_get(THIS_MODULE); + list_add(&s->list, &bnep_session_list); +} + +static void __bnep_unlink_session(struct bnep_session *s) +{ + list_del(&s->list); + module_put(THIS_MODULE); +} + +static int bnep_send(struct bnep_session *s, void *data, size_t len) +{ + struct socket *sock = s->sock; + struct kvec iv = { data, len }; + + return kernel_sendmsg(sock, &s->msg, &iv, 1, len); +} + +static int bnep_send_rsp(struct bnep_session *s, u8 ctrl, u16 resp) +{ + struct bnep_control_rsp rsp; + rsp.type = BNEP_CONTROL; + rsp.ctrl = ctrl; + rsp.resp = htons(resp); + return bnep_send(s, &rsp, sizeof(rsp)); +} + +#ifdef CONFIG_BT_BNEP_PROTO_FILTER +static inline void bnep_set_default_proto_filter(struct bnep_session *s) +{ + /* (IPv4, ARP) */ + s->proto_filter[0].start = ETH_P_IP; + s->proto_filter[0].end = ETH_P_ARP; + /* (RARP, AppleTalk) */ + s->proto_filter[1].start = ETH_P_RARP; + s->proto_filter[1].end = ETH_P_AARP; + /* (IPX, IPv6) */ + s->proto_filter[2].start = ETH_P_IPX; + s->proto_filter[2].end = ETH_P_IPV6; +} +#endif + +static int bnep_ctrl_set_netfilter(struct bnep_session *s, __be16 *data, int len) +{ + int n; + + if (len < 2) + return -EILSEQ; + + n = get_unaligned_be16(data); + data++; len -= 2; + + if (len < n) + return -EILSEQ; + + BT_DBG("filter len %d", n); + +#ifdef CONFIG_BT_BNEP_PROTO_FILTER + n /= 4; + if (n <= BNEP_MAX_PROTO_FILTERS) { + struct bnep_proto_filter *f = s->proto_filter; + int i; + + for (i = 0; i < n; i++) { + f[i].start = get_unaligned_be16(data++); + f[i].end = get_unaligned_be16(data++); + + BT_DBG("proto filter start %d end %d", + f[i].start, f[i].end); + } + + if (i < BNEP_MAX_PROTO_FILTERS) + memset(f + i, 0, sizeof(*f)); + + if (n == 0) + bnep_set_default_proto_filter(s); + + bnep_send_rsp(s, BNEP_FILTER_NET_TYPE_RSP, BNEP_SUCCESS); + } else { + bnep_send_rsp(s, BNEP_FILTER_NET_TYPE_RSP, BNEP_FILTER_LIMIT_REACHED); + } +#else + bnep_send_rsp(s, BNEP_FILTER_NET_TYPE_RSP, BNEP_FILTER_UNSUPPORTED_REQ); +#endif + return 0; +} + +static int bnep_ctrl_set_mcfilter(struct bnep_session *s, u8 *data, int len) +{ + int n; + + if (len < 2) + return -EILSEQ; + + n = get_unaligned_be16(data); + data += 2; len -= 2; + + if (len < n) + return -EILSEQ; + + BT_DBG("filter len %d", n); + +#ifdef CONFIG_BT_BNEP_MC_FILTER + n /= (ETH_ALEN * 2); + + if (n > 0) { + s->mc_filter = 0; + + /* Always send broadcast */ + set_bit(bnep_mc_hash(s->dev->broadcast), (ulong *) &s->mc_filter); + + /* Add address ranges to the multicast hash */ + for (; n > 0; n--) { + u8 a1[6], *a2; + + memcpy(a1, data, ETH_ALEN); data += ETH_ALEN; + a2 = data; data += ETH_ALEN; + + BT_DBG("mc filter %s -> %s", + batostr((void *) a1), batostr((void *) a2)); + + #define INCA(a) { int i = 5; while (i >=0 && ++a[i--] == 0); } + + /* Iterate from a1 to a2 */ + set_bit(bnep_mc_hash(a1), (ulong *) &s->mc_filter); + while (memcmp(a1, a2, 6) < 0 && s->mc_filter != ~0LL) { + INCA(a1); + set_bit(bnep_mc_hash(a1), (ulong *) &s->mc_filter); + } + } + } + + BT_DBG("mc filter hash 0x%llx", s->mc_filter); + + bnep_send_rsp(s, BNEP_FILTER_MULTI_ADDR_RSP, BNEP_SUCCESS); +#else + bnep_send_rsp(s, BNEP_FILTER_MULTI_ADDR_RSP, BNEP_FILTER_UNSUPPORTED_REQ); +#endif + return 0; +} + +static int bnep_rx_control(struct bnep_session *s, void *data, int len) +{ + u8 cmd = *(u8 *)data; + int err = 0; + + data++; len--; + + switch (cmd) { + case BNEP_CMD_NOT_UNDERSTOOD: + case BNEP_SETUP_CONN_RSP: + case BNEP_FILTER_NET_TYPE_RSP: + case BNEP_FILTER_MULTI_ADDR_RSP: + /* Ignore these for now */ + break; + + case BNEP_FILTER_NET_TYPE_SET: + err = bnep_ctrl_set_netfilter(s, data, len); + break; + + case BNEP_FILTER_MULTI_ADDR_SET: + err = bnep_ctrl_set_mcfilter(s, data, len); + break; + + case BNEP_SETUP_CONN_REQ: + err = bnep_send_rsp(s, BNEP_SETUP_CONN_RSP, BNEP_CONN_NOT_ALLOWED); + break; + + default: { + u8 pkt[3]; + pkt[0] = BNEP_CONTROL; + pkt[1] = BNEP_CMD_NOT_UNDERSTOOD; + pkt[2] = cmd; + bnep_send(s, pkt, sizeof(pkt)); + } + break; + } + + return err; +} + +static int bnep_rx_extension(struct bnep_session *s, struct sk_buff *skb) +{ + struct bnep_ext_hdr *h; + int err = 0; + + do { + h = (void *) skb->data; + if (!skb_pull(skb, sizeof(*h))) { + err = -EILSEQ; + break; + } + + BT_DBG("type 0x%x len %d", h->type, h->len); + + switch (h->type & BNEP_TYPE_MASK) { + case BNEP_EXT_CONTROL: + bnep_rx_control(s, skb->data, skb->len); + break; + + default: + /* Unknown extension, skip it. */ + break; + } + + if (!skb_pull(skb, h->len)) { + err = -EILSEQ; + break; + } + } while (!err && (h->type & BNEP_EXT_HEADER)); + + return err; +} + +static u8 __bnep_rx_hlen[] = { + ETH_HLEN, /* BNEP_GENERAL */ + 0, /* BNEP_CONTROL */ + 2, /* BNEP_COMPRESSED */ + ETH_ALEN + 2, /* BNEP_COMPRESSED_SRC_ONLY */ + ETH_ALEN + 2 /* BNEP_COMPRESSED_DST_ONLY */ +}; +#define BNEP_RX_TYPES (sizeof(__bnep_rx_hlen) - 1) + +static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) +{ + struct net_device *dev = s->dev; + struct sk_buff *nskb; + u8 type; + + dev->stats.rx_bytes += skb->len; + + type = *(u8 *) skb->data; skb_pull(skb, 1); + + if ((type & BNEP_TYPE_MASK) > BNEP_RX_TYPES) + goto badframe; + + if ((type & BNEP_TYPE_MASK) == BNEP_CONTROL) { + bnep_rx_control(s, skb->data, skb->len); + kfree_skb(skb); + return 0; + } + + skb_reset_mac_header(skb); + + /* Verify and pull out header */ + if (!skb_pull(skb, __bnep_rx_hlen[type & BNEP_TYPE_MASK])) + goto badframe; + + s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2)); + + if (type & BNEP_EXT_HEADER) { + if (bnep_rx_extension(s, skb) < 0) + goto badframe; + } + + /* Strip 802.1p header */ + if (ntohs(s->eh.h_proto) == 0x8100) { + if (!skb_pull(skb, 4)) + goto badframe; + s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2)); + } + + /* We have to alloc new skb and copy data here :(. Because original skb + * may not be modified and because of the alignment requirements. */ + nskb = alloc_skb(2 + ETH_HLEN + skb->len, GFP_KERNEL); + if (!nskb) { + dev->stats.rx_dropped++; + kfree_skb(skb); + return -ENOMEM; + } + skb_reserve(nskb, 2); + + /* Decompress header and construct ether frame */ + switch (type & BNEP_TYPE_MASK) { + case BNEP_COMPRESSED: + memcpy(__skb_put(nskb, ETH_HLEN), &s->eh, ETH_HLEN); + break; + + case BNEP_COMPRESSED_SRC_ONLY: + memcpy(__skb_put(nskb, ETH_ALEN), s->eh.h_dest, ETH_ALEN); + memcpy(__skb_put(nskb, ETH_ALEN), skb_mac_header(skb), ETH_ALEN); + put_unaligned(s->eh.h_proto, (__be16 *) __skb_put(nskb, 2)); + break; + + case BNEP_COMPRESSED_DST_ONLY: + memcpy(__skb_put(nskb, ETH_ALEN), skb_mac_header(skb), + ETH_ALEN); + memcpy(__skb_put(nskb, ETH_ALEN + 2), s->eh.h_source, + ETH_ALEN + 2); + break; + + case BNEP_GENERAL: + memcpy(__skb_put(nskb, ETH_ALEN * 2), skb_mac_header(skb), + ETH_ALEN * 2); + put_unaligned(s->eh.h_proto, (__be16 *) __skb_put(nskb, 2)); + break; + } + + skb_copy_from_linear_data(skb, __skb_put(nskb, skb->len), skb->len); + kfree_skb(skb); + + dev->stats.rx_packets++; + nskb->ip_summed = CHECKSUM_NONE; + nskb->protocol = eth_type_trans(nskb, dev); + netif_rx_ni(nskb); + return 0; + +badframe: + dev->stats.rx_errors++; + kfree_skb(skb); + return 0; +} + +static u8 __bnep_tx_types[] = { + BNEP_GENERAL, + BNEP_COMPRESSED_SRC_ONLY, + BNEP_COMPRESSED_DST_ONLY, + BNEP_COMPRESSED +}; + +static inline int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb) +{ + struct ethhdr *eh = (void *) skb->data; + struct socket *sock = s->sock; + struct kvec iv[3]; + int len = 0, il = 0; + u8 type = 0; + + BT_DBG("skb %p dev %p type %d", skb, skb->dev, skb->pkt_type); + + if (!skb->dev) { + /* Control frame sent by us */ + goto send; + } + + iv[il++] = (struct kvec) { &type, 1 }; + len++; + + if (compress_src && !compare_ether_addr(eh->h_dest, s->eh.h_source)) + type |= 0x01; + + if (compress_dst && !compare_ether_addr(eh->h_source, s->eh.h_dest)) + type |= 0x02; + + if (type) + skb_pull(skb, ETH_ALEN * 2); + + type = __bnep_tx_types[type]; + switch (type) { + case BNEP_COMPRESSED_SRC_ONLY: + iv[il++] = (struct kvec) { eh->h_source, ETH_ALEN }; + len += ETH_ALEN; + break; + + case BNEP_COMPRESSED_DST_ONLY: + iv[il++] = (struct kvec) { eh->h_dest, ETH_ALEN }; + len += ETH_ALEN; + break; + } + +send: + iv[il++] = (struct kvec) { skb->data, skb->len }; + len += skb->len; + + /* FIXME: linearize skb */ + { + len = kernel_sendmsg(sock, &s->msg, iv, il, len); + } + kfree_skb(skb); + + if (len > 0) { + s->dev->stats.tx_bytes += len; + s->dev->stats.tx_packets++; + return 0; + } + + return len; +} + +static int bnep_session(void *arg) +{ + struct bnep_session *s = arg; + struct net_device *dev = s->dev; + struct sock *sk = s->sock->sk; + struct sk_buff *skb; + wait_queue_t wait; + + BT_DBG(""); + + daemonize("kbnepd %s", dev->name); + set_user_nice(current, -15); + + init_waitqueue_entry(&wait, current); + add_wait_queue(sk->sk_sleep, &wait); + while (!atomic_read(&s->killed)) { + set_current_state(TASK_INTERRUPTIBLE); + + // RX + while ((skb = skb_dequeue(&sk->sk_receive_queue))) { + skb_orphan(skb); + bnep_rx_frame(s, skb); + } + + if (sk->sk_state != BT_CONNECTED) + break; + + // TX + while ((skb = skb_dequeue(&sk->sk_write_queue))) + if (bnep_tx_frame(s, skb)) + break; + netif_wake_queue(dev); + + schedule(); + } + set_current_state(TASK_RUNNING); + remove_wait_queue(sk->sk_sleep, &wait); + + /* Cleanup session */ + down_write(&bnep_session_sem); + + /* Delete network device */ + unregister_netdev(dev); + + /* Wakeup user-space polling for socket errors */ + s->sock->sk->sk_err = EUNATCH; + + wake_up_interruptible(s->sock->sk->sk_sleep); + + /* Release the socket */ + fput(s->sock->file); + + __bnep_unlink_session(s); + + up_write(&bnep_session_sem); + free_netdev(dev); + return 0; +} + +static struct device *bnep_get_device(struct bnep_session *session) +{ + bdaddr_t *src = &bt_sk(session->sock->sk)->src; + bdaddr_t *dst = &bt_sk(session->sock->sk)->dst; + struct hci_dev *hdev; + struct hci_conn *conn; + + hdev = hci_get_route(dst, src); + if (!hdev) + return NULL; + + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); + + hci_dev_put(hdev); + + return conn ? &conn->dev : NULL; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) +static struct device_type bnep_type = { + .name = "bluetooth", +}; +#endif + +int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock) +{ + struct net_device *dev; + struct bnep_session *s, *ss; + u8 dst[ETH_ALEN], src[ETH_ALEN]; + int err; + + BT_DBG(""); + + baswap((void *) dst, &bt_sk(sock->sk)->dst); + baswap((void *) src, &bt_sk(sock->sk)->src); + + /* session struct allocated as private part of net_device */ + dev = alloc_netdev(sizeof(struct bnep_session), + (*req->device) ? req->device : "bnep%d", + bnep_net_setup); + if (!dev) + return -ENOMEM; + + down_write(&bnep_session_sem); + + ss = __bnep_get_session(dst); + if (ss && ss->state == BT_CONNECTED) { + err = -EEXIST; + goto failed; + } + + s = netdev_priv(dev); + + /* This is rx header therefore addresses are swapped. + * ie eh.h_dest is our local address. */ + memcpy(s->eh.h_dest, &src, ETH_ALEN); + memcpy(s->eh.h_source, &dst, ETH_ALEN); + memcpy(dev->dev_addr, s->eh.h_dest, ETH_ALEN); + + s->dev = dev; + s->sock = sock; + s->role = req->role; + s->state = BT_CONNECTED; + + s->msg.msg_flags = MSG_NOSIGNAL; + +#ifdef CONFIG_BT_BNEP_MC_FILTER + /* Set default mc filter */ + set_bit(bnep_mc_hash(dev->broadcast), (ulong *) &s->mc_filter); +#endif + +#ifdef CONFIG_BT_BNEP_PROTO_FILTER + /* Set default protocol filter */ + bnep_set_default_proto_filter(s); +#endif + + SET_NETDEV_DEV(dev, bnep_get_device(s)); + SET_NETDEV_DEVTYPE(dev, &bnep_type); + + err = register_netdev(dev); + if (err) { + goto failed; + } + + __bnep_link_session(s); + + err = kernel_thread(bnep_session, s, CLONE_KERNEL); + if (err < 0) { + /* Session thread start failed, gotta cleanup. */ + unregister_netdev(dev); + __bnep_unlink_session(s); + goto failed; + } + + up_write(&bnep_session_sem); + strcpy(req->device, dev->name); + return 0; + +failed: + up_write(&bnep_session_sem); + free_netdev(dev); + return err; +} + +int bnep_del_connection(struct bnep_conndel_req *req) +{ + struct bnep_session *s; + int err = 0; + + BT_DBG(""); + + down_read(&bnep_session_sem); + + s = __bnep_get_session(req->dst); + if (s) { + /* Wakeup user-space which is polling for socket errors. + * This is temporary hack until we have shutdown in L2CAP */ + s->sock->sk->sk_err = EUNATCH; + + /* Kill session thread */ + atomic_inc(&s->killed); + wake_up_interruptible(s->sock->sk->sk_sleep); + } else + err = -ENOENT; + + up_read(&bnep_session_sem); + return err; +} + +static void __bnep_copy_ci(struct bnep_conninfo *ci, struct bnep_session *s) +{ + memcpy(ci->dst, s->eh.h_source, ETH_ALEN); + strcpy(ci->device, s->dev->name); + ci->flags = s->flags; + ci->state = s->state; + ci->role = s->role; +} + +int bnep_get_connlist(struct bnep_connlist_req *req) +{ + struct list_head *p; + int err = 0, n = 0; + + down_read(&bnep_session_sem); + + list_for_each(p, &bnep_session_list) { + struct bnep_session *s; + struct bnep_conninfo ci; + + s = list_entry(p, struct bnep_session, list); + + __bnep_copy_ci(&ci, s); + + if (copy_to_user(req->ci, &ci, sizeof(ci))) { + err = -EFAULT; + break; + } + + if (++n >= req->cnum) + break; + + req->ci++; + } + req->cnum = n; + + up_read(&bnep_session_sem); + return err; +} + +int bnep_get_conninfo(struct bnep_conninfo *ci) +{ + struct bnep_session *s; + int err = 0; + + down_read(&bnep_session_sem); + + s = __bnep_get_session(ci->dst); + if (s) + __bnep_copy_ci(ci, s); + else + err = -ENOENT; + + up_read(&bnep_session_sem); + return err; +} + +static int __init bnep_init(void) +{ + char flt[50] = ""; + + l2cap_load(); + +#ifdef CONFIG_BT_BNEP_PROTO_FILTER + strcat(flt, "protocol "); +#endif + +#ifdef CONFIG_BT_BNEP_MC_FILTER + strcat(flt, "multicast"); +#endif + + BT_INFO("BNEP (Ethernet Emulation) ver %s", VERSION); + if (flt[0]) + BT_INFO("BNEP filters: %s", flt); + + bnep_sock_init(); + return 0; +} + +static void __exit bnep_exit(void) +{ + bnep_sock_cleanup(); +} + +module_init(bnep_init); +module_exit(bnep_exit); + +module_param(compress_src, bool, 0644); +MODULE_PARM_DESC(compress_src, "Compress sources headers"); + +module_param(compress_dst, bool, 0644); +MODULE_PARM_DESC(compress_dst, "Compress destination headers"); + +MODULE_AUTHOR("Marcel Holtmann "); +MODULE_DESCRIPTION("Bluetooth BNEP ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("bt-proto-4"); diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c new file mode 100644 index 0000000..b28e94c --- /dev/null +++ b/net/bluetooth/bnep/netdev.c @@ -0,0 +1,260 @@ +/* + BNEP implementation for Linux Bluetooth stack (BlueZ). + Copyright (C) 2001-2002 Inventel Systemes + Written 2001-2002 by + Clément Moreau + David Libault + + Copyright (C) 2002 Maxim Krasnyansky + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#include + +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +#include "bnep.h" + +#define BNEP_TX_QUEUE_LEN 20 + +static int bnep_net_open(struct net_device *dev) +{ + netif_start_queue(dev); + return 0; +} + +static int bnep_net_close(struct net_device *dev) +{ + netif_stop_queue(dev); + return 0; +} + +static void bnep_net_set_mc_list(struct net_device *dev) +{ +#ifdef CONFIG_BT_BNEP_MC_FILTER + struct bnep_session *s = netdev_priv(dev); + struct sock *sk = s->sock->sk; + struct bnep_set_filter_req *r; + struct sk_buff *skb; + int size; + + BT_DBG("%s mc_count %d", dev->name, netdev_mc_count(dev)); + + size = sizeof(*r) + (BNEP_MAX_MULTICAST_FILTERS + 1) * ETH_ALEN * 2; + skb = alloc_skb(size, GFP_ATOMIC); + if (!skb) { + BT_ERR("%s Multicast list allocation failed", dev->name); + return; + } + + r = (void *) skb->data; + __skb_put(skb, sizeof(*r)); + + r->type = BNEP_CONTROL; + r->ctrl = BNEP_FILTER_MULTI_ADDR_SET; + + if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { + u8 start[ETH_ALEN] = { 0x01 }; + + /* Request all addresses */ + memcpy(__skb_put(skb, ETH_ALEN), start, ETH_ALEN); + memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN); + r->len = htons(ETH_ALEN * 2); + } else { + struct dev_mc_list *dmi = dev->mc_list; + int i, len = skb->len; + + if (dev->flags & IFF_BROADCAST) { + memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN); + memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN); + } + + /* FIXME: We should group addresses here. */ + + for (i = 0; + i < netdev_mc_count(dev) && i < BNEP_MAX_MULTICAST_FILTERS; + i++) { + memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN); + memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN); + dmi = dmi->next; + } + r->len = htons(skb->len - len); + } + + skb_queue_tail(&sk->sk_write_queue, skb); + wake_up_interruptible(sk->sk_sleep); +#endif +} + +static int bnep_net_set_mac_addr(struct net_device *dev, void *arg) +{ + BT_DBG("%s", dev->name); + return 0; +} + +static void bnep_net_timeout(struct net_device *dev) +{ + BT_DBG("net_timeout"); + netif_wake_queue(dev); +} + +#ifdef CONFIG_BT_BNEP_MC_FILTER +static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s) +{ + struct ethhdr *eh = (void *) skb->data; + + if ((eh->h_dest[0] & 1) && !test_bit(bnep_mc_hash(eh->h_dest), (ulong *) &s->mc_filter)) + return 1; + return 0; +} +#endif + +#ifdef CONFIG_BT_BNEP_PROTO_FILTER +/* Determine ether protocol. Based on eth_type_trans. */ +static inline u16 bnep_net_eth_proto(struct sk_buff *skb) +{ + struct ethhdr *eh = (void *) skb->data; + u16 proto = ntohs(eh->h_proto); + + if (proto >= 1536) + return proto; + + if (get_unaligned((__be16 *) skb->data) == htons(0xFFFF)) + return ETH_P_802_3; + + return ETH_P_802_2; +} + +static inline int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s) +{ + u16 proto = bnep_net_eth_proto(skb); + struct bnep_proto_filter *f = s->proto_filter; + int i; + + for (i = 0; i < BNEP_MAX_PROTO_FILTERS && f[i].end; i++) { + if (proto >= f[i].start && proto <= f[i].end) + return 0; + } + + BT_DBG("BNEP: filtered skb %p, proto 0x%.4x", skb, proto); + return 1; +} +#endif + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,31)) +static netdev_tx_t bnep_net_xmit(struct sk_buff *skb, struct net_device *dev) +#else +static int bnep_net_xmit(struct sk_buff *skb, struct net_device *dev) +#endif + +{ + struct bnep_session *s = netdev_priv(dev); + struct sock *sk = s->sock->sk; + + BT_DBG("skb %p, dev %p", skb, dev); + +#ifdef CONFIG_BT_BNEP_MC_FILTER + if (bnep_net_mc_filter(skb, s)) { + kfree_skb(skb); + return NETDEV_TX_OK; + } +#endif + +#ifdef CONFIG_BT_BNEP_PROTO_FILTER + if (bnep_net_proto_filter(skb, s)) { + kfree_skb(skb); + return NETDEV_TX_OK; + } +#endif + + /* + * We cannot send L2CAP packets from here as we are potentially in a bh. + * So we have to queue them and wake up session thread which is sleeping + * on the sk->sk_sleep. + */ + dev->trans_start = jiffies; + skb_queue_tail(&sk->sk_write_queue, skb); + wake_up_interruptible(sk->sk_sleep); + + if (skb_queue_len(&sk->sk_write_queue) >= BNEP_TX_QUEUE_LEN) { + BT_DBG("tx queue is full"); + + /* Stop queuing. + * Session thread will do netif_wake_queue() */ + netif_stop_queue(dev); + } + + return NETDEV_TX_OK; +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,28)) +static const struct net_device_ops bnep_netdev_ops = { + .ndo_open = bnep_net_open, + .ndo_stop = bnep_net_close, + .ndo_start_xmit = bnep_net_xmit, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_multicast_list = bnep_net_set_mc_list, + .ndo_set_mac_address = bnep_net_set_mac_addr, + .ndo_tx_timeout = bnep_net_timeout, + .ndo_change_mtu = eth_change_mtu, + +}; +#else +static struct net_device_stats *bnep_net_get_stats(struct net_device *dev) +{ + return &dev->stats; +} +static int bnep_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + return -EINVAL; +} +#endif + +void bnep_net_setup(struct net_device *dev) +{ + + memset(dev->broadcast, 0xff, ETH_ALEN); + dev->addr_len = ETH_ALEN; + + ether_setup(dev); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,28)) + dev->netdev_ops = &bnep_netdev_ops; +#else + dev->open = bnep_net_open; + dev->stop = bnep_net_close; + dev->hard_start_xmit = bnep_net_xmit; + dev->get_stats = bnep_net_get_stats; + dev->do_ioctl = bnep_net_ioctl; + dev->set_mac_address = bnep_net_set_mac_addr; + dev->set_multicast_list = bnep_net_set_mc_list; + dev->tx_timeout = bnep_net_timeout; +#endif + + dev->watchdog_timeo = HZ * 2; +} diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c new file mode 100644 index 0000000..3f67022 --- /dev/null +++ b/net/bluetooth/bnep/sock.c @@ -0,0 +1,262 @@ +/* + BNEP implementation for Linux Bluetooth stack (BlueZ). + Copyright (C) 2001-2002 Inventel Systemes + Written 2001-2002 by + David Libault + + Copyright (C) 2002 Maxim Krasnyansky + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "bnep.h" + +static int bnep_sock_release(struct socket *sock) +{ + struct sock *sk = sock->sk; + + BT_DBG("sock %p sk %p", sock, sk); + + if (!sk) + return 0; + + sock_orphan(sk); + sock_put(sk); + return 0; +} + +static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) +{ + struct bnep_connlist_req cl; + struct bnep_connadd_req ca; + struct bnep_conndel_req cd; + struct bnep_conninfo ci; + struct socket *nsock; + void __user *argp = (void __user *)arg; + int err; + + BT_DBG("cmd %x arg %lx", cmd, arg); + + switch (cmd) { + case BNEPCONNADD: + if (!capable(CAP_NET_ADMIN)) + return -EACCES; + + if (copy_from_user(&ca, argp, sizeof(ca))) + return -EFAULT; + + nsock = sockfd_lookup(ca.sock, &err); + if (!nsock) + return err; + + if (nsock->sk->sk_state != BT_CONNECTED) { + sockfd_put(nsock); + return -EBADFD; + } + + err = bnep_add_connection(&ca, nsock); + if (!err) { + if (copy_to_user(argp, &ca, sizeof(ca))) + err = -EFAULT; + } else + sockfd_put(nsock); + + return err; + + case BNEPCONNDEL: + if (!capable(CAP_NET_ADMIN)) + return -EACCES; + + if (copy_from_user(&cd, argp, sizeof(cd))) + return -EFAULT; + + return bnep_del_connection(&cd); + + case BNEPGETCONNLIST: + if (copy_from_user(&cl, argp, sizeof(cl))) + return -EFAULT; + + if (cl.cnum <= 0) + return -EINVAL; + + err = bnep_get_connlist(&cl); + if (!err && copy_to_user(argp, &cl, sizeof(cl))) + return -EFAULT; + + return err; + + case BNEPGETCONNINFO: + if (copy_from_user(&ci, argp, sizeof(ci))) + return -EFAULT; + + err = bnep_get_conninfo(&ci); + if (!err && copy_to_user(argp, &ci, sizeof(ci))) + return -EFAULT; + + return err; + + default: + return -EINVAL; + } + + return 0; +} + +#ifdef CONFIG_COMPAT +static int bnep_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) +{ + if (cmd == BNEPGETCONNLIST) { + struct bnep_connlist_req cl; + uint32_t uci; + int err; + + if (get_user(cl.cnum, (uint32_t __user *) arg) || + get_user(uci, (u32 __user *) (arg + 4))) + return -EFAULT; + + cl.ci = compat_ptr(uci); + + if (cl.cnum <= 0) + return -EINVAL; + + err = bnep_get_connlist(&cl); + + if (!err && put_user(cl.cnum, (uint32_t __user *) arg)) + err = -EFAULT; + + return err; + } + + return bnep_sock_ioctl(sock, cmd, arg); +} +#endif + +static const struct proto_ops bnep_sock_ops = { + .family = PF_BLUETOOTH, + .owner = THIS_MODULE, + .release = bnep_sock_release, + .ioctl = bnep_sock_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = bnep_sock_compat_ioctl, +#endif + .bind = sock_no_bind, + .getname = sock_no_getname, + .sendmsg = sock_no_sendmsg, + .recvmsg = sock_no_recvmsg, + .poll = sock_no_poll, + .listen = sock_no_listen, + .shutdown = sock_no_shutdown, + .setsockopt = sock_no_setsockopt, + .getsockopt = sock_no_getsockopt, + .connect = sock_no_connect, + .socketpair = sock_no_socketpair, + .accept = sock_no_accept, + .mmap = sock_no_mmap +}; + +static struct proto bnep_proto = { + .name = "BNEP", + .owner = THIS_MODULE, + .obj_size = sizeof(struct bt_sock) +}; + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32)) +static int bnep_sock_create(struct net *net, struct socket *sock, int protocol, + int kern) +#else +static int bnep_sock_create(struct net *net, struct socket *sock, int protocol) +#endif +{ + struct sock *sk; + + BT_DBG("sock %p", sock); + + if (sock->type != SOCK_RAW) + return -ESOCKTNOSUPPORT; + + sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &bnep_proto); + if (!sk) + return -ENOMEM; + + sock_init_data(sock, sk); + + sock->ops = &bnep_sock_ops; + + sock->state = SS_UNCONNECTED; + + sock_reset_flag(sk, SOCK_ZAPPED); + + sk->sk_protocol = protocol; + sk->sk_state = BT_OPEN; + + return 0; +} + +static const struct net_proto_family bnep_sock_family_ops = { + .family = PF_BLUETOOTH, + .owner = THIS_MODULE, + .create = bnep_sock_create +}; + +int __init bnep_sock_init(void) +{ + int err; + + err = proto_register(&bnep_proto, 0); + if (err < 0) + return err; + + err = bt_sock_register(BTPROTO_BNEP, &bnep_sock_family_ops); + if (err < 0) + goto error; + + return 0; + +error: + BT_ERR("Can't register BNEP socket"); + proto_unregister(&bnep_proto); + return err; +} + +void __exit bnep_sock_cleanup(void) +{ + if (bt_sock_unregister(BTPROTO_BNEP) < 0) + BT_ERR("Can't unregister BNEP socket"); + + proto_unregister(&bnep_proto); +} diff --git a/net/bluetooth/cmtp/Makefile b/net/bluetooth/cmtp/Makefile new file mode 100644 index 0000000..890a9a5 --- /dev/null +++ b/net/bluetooth/cmtp/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for the Linux Bluetooth CMTP layer +# + +obj-$(CONFIG_BT_CMTP) += cmtp.o + +cmtp-objs := core.o sock.o capi.o diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c new file mode 100644 index 0000000..bc420ac --- /dev/null +++ b/net/bluetooth/cmtp/capi.c @@ -0,0 +1,664 @@ +/* + CMTP implementation for Linux Bluetooth stack (BlueZ). + Copyright (C) 2002-2003 Marcel Holtmann + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34)) +#include +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "cmtp.h" + +#define CAPI_INTEROPERABILITY 0x20 + +#define CAPI_INTEROPERABILITY_REQ CAPICMD(CAPI_INTEROPERABILITY, CAPI_REQ) +#define CAPI_INTEROPERABILITY_CONF CAPICMD(CAPI_INTEROPERABILITY, CAPI_CONF) +#define CAPI_INTEROPERABILITY_IND CAPICMD(CAPI_INTEROPERABILITY, CAPI_IND) +#define CAPI_INTEROPERABILITY_RESP CAPICMD(CAPI_INTEROPERABILITY, CAPI_RESP) + +#define CAPI_INTEROPERABILITY_REQ_LEN (CAPI_MSG_BASELEN + 2) +#define CAPI_INTEROPERABILITY_CONF_LEN (CAPI_MSG_BASELEN + 4) +#define CAPI_INTEROPERABILITY_IND_LEN (CAPI_MSG_BASELEN + 2) +#define CAPI_INTEROPERABILITY_RESP_LEN (CAPI_MSG_BASELEN + 2) + +#define CAPI_FUNCTION_REGISTER 0 +#define CAPI_FUNCTION_RELEASE 1 +#define CAPI_FUNCTION_GET_PROFILE 2 +#define CAPI_FUNCTION_GET_MANUFACTURER 3 +#define CAPI_FUNCTION_GET_VERSION 4 +#define CAPI_FUNCTION_GET_SERIAL_NUMBER 5 +#define CAPI_FUNCTION_MANUFACTURER 6 +#define CAPI_FUNCTION_LOOPBACK 7 + + +#define CMTP_MSGNUM 1 +#define CMTP_APPLID 2 +#define CMTP_MAPPING 3 + +static struct cmtp_application *cmtp_application_add(struct cmtp_session *session, __u16 appl) +{ + struct cmtp_application *app = kzalloc(sizeof(*app), GFP_KERNEL); + + BT_DBG("session %p application %p appl %d", session, app, appl); + + if (!app) + return NULL; + + app->state = BT_OPEN; + app->appl = appl; + + list_add_tail(&app->list, &session->applications); + + return app; +} + +static void cmtp_application_del(struct cmtp_session *session, struct cmtp_application *app) +{ + BT_DBG("session %p application %p", session, app); + + if (app) { + list_del(&app->list); + kfree(app); + } +} + +static struct cmtp_application *cmtp_application_get(struct cmtp_session *session, int pattern, __u16 value) +{ + struct cmtp_application *app; + struct list_head *p, *n; + + list_for_each_safe(p, n, &session->applications) { + app = list_entry(p, struct cmtp_application, list); + switch (pattern) { + case CMTP_MSGNUM: + if (app->msgnum == value) + return app; + break; + case CMTP_APPLID: + if (app->appl == value) + return app; + break; + case CMTP_MAPPING: + if (app->mapping == value) + return app; + break; + } + } + + return NULL; +} + +static int cmtp_msgnum_get(struct cmtp_session *session) +{ + session->msgnum++; + + if ((session->msgnum & 0xff) > 200) + session->msgnum = CMTP_INITIAL_MSGNUM + 1; + + return session->msgnum; +} + +static void cmtp_send_capimsg(struct cmtp_session *session, struct sk_buff *skb) +{ + struct cmtp_scb *scb = (void *) skb->cb; + + BT_DBG("session %p skb %p len %d", session, skb, skb->len); + + scb->id = -1; + scb->data = (CAPIMSG_COMMAND(skb->data) == CAPI_DATA_B3); + + skb_queue_tail(&session->transmit, skb); + + cmtp_schedule(session); +} + +static void cmtp_send_interopmsg(struct cmtp_session *session, + __u8 subcmd, __u16 appl, __u16 msgnum, + __u16 function, unsigned char *buf, int len) +{ + struct sk_buff *skb; + unsigned char *s; + + BT_DBG("session %p subcmd 0x%02x appl %d msgnum %d", session, subcmd, appl, msgnum); + + if (!(skb = alloc_skb(CAPI_MSG_BASELEN + 6 + len, GFP_ATOMIC))) { + BT_ERR("Can't allocate memory for interoperability packet"); + return; + } + + s = skb_put(skb, CAPI_MSG_BASELEN + 6 + len); + + capimsg_setu16(s, 0, CAPI_MSG_BASELEN + 6 + len); + capimsg_setu16(s, 2, appl); + capimsg_setu8 (s, 4, CAPI_INTEROPERABILITY); + capimsg_setu8 (s, 5, subcmd); + capimsg_setu16(s, 6, msgnum); + + /* Interoperability selector (Bluetooth Device Management) */ + capimsg_setu16(s, 8, 0x0001); + + capimsg_setu8 (s, 10, 3 + len); + capimsg_setu16(s, 11, function); + capimsg_setu8 (s, 13, len); + + if (len > 0) + memcpy(s + 14, buf, len); + + cmtp_send_capimsg(session, skb); +} + +static void cmtp_recv_interopmsg(struct cmtp_session *session, struct sk_buff *skb) +{ + struct capi_ctr *ctrl = &session->ctrl; + struct cmtp_application *application; + __u16 appl, msgnum, func, info; + __u32 controller; + + BT_DBG("session %p skb %p len %d", session, skb, skb->len); + + switch (CAPIMSG_SUBCOMMAND(skb->data)) { + case CAPI_CONF: + if (skb->len < CAPI_MSG_BASELEN + 10) + break; + + func = CAPIMSG_U16(skb->data, CAPI_MSG_BASELEN + 5); + info = CAPIMSG_U16(skb->data, CAPI_MSG_BASELEN + 8); + + switch (func) { + case CAPI_FUNCTION_REGISTER: + msgnum = CAPIMSG_MSGID(skb->data); + + application = cmtp_application_get(session, CMTP_MSGNUM, msgnum); + if (application) { + application->state = BT_CONNECTED; + application->msgnum = 0; + application->mapping = CAPIMSG_APPID(skb->data); + wake_up_interruptible(&session->wait); + } + + break; + + case CAPI_FUNCTION_RELEASE: + appl = CAPIMSG_APPID(skb->data); + + application = cmtp_application_get(session, CMTP_MAPPING, appl); + if (application) { + application->state = BT_CLOSED; + application->msgnum = 0; + wake_up_interruptible(&session->wait); + } + + break; + + case CAPI_FUNCTION_GET_PROFILE: + if (skb->len < CAPI_MSG_BASELEN + 11 + sizeof(capi_profile)) + break; + + controller = CAPIMSG_U16(skb->data, CAPI_MSG_BASELEN + 11); + msgnum = CAPIMSG_MSGID(skb->data); + + if (!info && (msgnum == CMTP_INITIAL_MSGNUM)) { + session->ncontroller = controller; + wake_up_interruptible(&session->wait); + break; + } + + if (!info && ctrl) { + memcpy(&ctrl->profile, + skb->data + CAPI_MSG_BASELEN + 11, + sizeof(capi_profile)); + session->state = BT_CONNECTED; + capi_ctr_ready(ctrl); + } + + break; + + case CAPI_FUNCTION_GET_MANUFACTURER: + if (skb->len < CAPI_MSG_BASELEN + 15) + break; + + controller = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 10); + + if (!info && ctrl) { + int len = min_t(uint, CAPI_MANUFACTURER_LEN, + skb->data[CAPI_MSG_BASELEN + 14]); + + memset(ctrl->manu, 0, CAPI_MANUFACTURER_LEN); + strncpy(ctrl->manu, + skb->data + CAPI_MSG_BASELEN + 15, len); + } + + break; + + case CAPI_FUNCTION_GET_VERSION: + if (skb->len < CAPI_MSG_BASELEN + 32) + break; + + controller = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 12); + + if (!info && ctrl) { + ctrl->version.majorversion = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 16); + ctrl->version.minorversion = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 20); + ctrl->version.majormanuversion = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 24); + ctrl->version.minormanuversion = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 28); + } + + break; + + case CAPI_FUNCTION_GET_SERIAL_NUMBER: + if (skb->len < CAPI_MSG_BASELEN + 17) + break; + + controller = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 12); + + if (!info && ctrl) { + int len = min_t(uint, CAPI_SERIAL_LEN, + skb->data[CAPI_MSG_BASELEN + 16]); + + memset(ctrl->serial, 0, CAPI_SERIAL_LEN); + strncpy(ctrl->serial, + skb->data + CAPI_MSG_BASELEN + 17, len); + } + + break; + } + + break; + + case CAPI_IND: + if (skb->len < CAPI_MSG_BASELEN + 6) + break; + + func = CAPIMSG_U16(skb->data, CAPI_MSG_BASELEN + 3); + + if (func == CAPI_FUNCTION_LOOPBACK) { + int len = min_t(uint, skb->len - CAPI_MSG_BASELEN - 6, + skb->data[CAPI_MSG_BASELEN + 5]); + appl = CAPIMSG_APPID(skb->data); + msgnum = CAPIMSG_MSGID(skb->data); + cmtp_send_interopmsg(session, CAPI_RESP, appl, msgnum, func, + skb->data + CAPI_MSG_BASELEN + 6, len); + } + + break; + } + + kfree_skb(skb); +} + +void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb) +{ + struct capi_ctr *ctrl = &session->ctrl; + struct cmtp_application *application; + __u16 cmd, appl; + __u32 contr; + + BT_DBG("session %p skb %p len %d", session, skb, skb->len); + + if (skb->len < CAPI_MSG_BASELEN) + return; + + if (CAPIMSG_COMMAND(skb->data) == CAPI_INTEROPERABILITY) { + cmtp_recv_interopmsg(session, skb); + return; + } + + if (session->flags & (1 << CMTP_LOOPBACK)) { + kfree_skb(skb); + return; + } + + cmd = CAPICMD(CAPIMSG_COMMAND(skb->data), CAPIMSG_SUBCOMMAND(skb->data)); + appl = CAPIMSG_APPID(skb->data); + contr = CAPIMSG_CONTROL(skb->data); + + application = cmtp_application_get(session, CMTP_MAPPING, appl); + if (application) { + appl = application->appl; + CAPIMSG_SETAPPID(skb->data, appl); + } else { + BT_ERR("Can't find application with id %d", appl); + kfree_skb(skb); + return; + } + + if ((contr & 0x7f) == 0x01) { + contr = (contr & 0xffffff80) | session->num; + CAPIMSG_SETCONTROL(skb->data, contr); + } + + if (!ctrl) { + BT_ERR("Can't find controller %d for message", session->num); + kfree_skb(skb); + return; + } + + capi_ctr_handle_message(ctrl, appl, skb); +} + +static int cmtp_load_firmware(struct capi_ctr *ctrl, capiloaddata *data) +{ + BT_DBG("ctrl %p data %p", ctrl, data); + + return 0; +} + +static void cmtp_reset_ctr(struct capi_ctr *ctrl) +{ + struct cmtp_session *session = ctrl->driverdata; + + BT_DBG("ctrl %p", ctrl); + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,30)) + capi_ctr_down(ctrl); +#else + capi_ctr_reseted(ctrl); +#endif + + atomic_inc(&session->terminate); + cmtp_schedule(session); +} + +static void cmtp_register_appl(struct capi_ctr *ctrl, __u16 appl, capi_register_params *rp) +{ + DECLARE_WAITQUEUE(wait, current); + struct cmtp_session *session = ctrl->driverdata; + struct cmtp_application *application; + unsigned long timeo = CMTP_INTEROP_TIMEOUT; + unsigned char buf[8]; + int err = 0, nconn, want = rp->level3cnt; + + BT_DBG("ctrl %p appl %d level3cnt %d datablkcnt %d datablklen %d", + ctrl, appl, rp->level3cnt, rp->datablkcnt, rp->datablklen); + + application = cmtp_application_add(session, appl); + if (!application) { + BT_ERR("Can't allocate memory for new application"); + return; + } + + if (want < 0) + nconn = ctrl->profile.nbchannel * -want; + else + nconn = want; + + if (nconn == 0) + nconn = ctrl->profile.nbchannel; + + capimsg_setu16(buf, 0, nconn); + capimsg_setu16(buf, 2, rp->datablkcnt); + capimsg_setu16(buf, 4, rp->datablklen); + + application->state = BT_CONFIG; + application->msgnum = cmtp_msgnum_get(session); + + cmtp_send_interopmsg(session, CAPI_REQ, 0x0000, application->msgnum, + CAPI_FUNCTION_REGISTER, buf, 6); + + add_wait_queue(&session->wait, &wait); + while (1) { + set_current_state(TASK_INTERRUPTIBLE); + + if (!timeo) { + err = -EAGAIN; + break; + } + + if (application->state == BT_CLOSED) { + err = -application->err; + break; + } + + if (application->state == BT_CONNECTED) + break; + + if (signal_pending(current)) { + err = -EINTR; + break; + } + + timeo = schedule_timeout(timeo); + } + set_current_state(TASK_RUNNING); + remove_wait_queue(&session->wait, &wait); + + if (err) { + cmtp_application_del(session, application); + return; + } +} + +static void cmtp_release_appl(struct capi_ctr *ctrl, __u16 appl) +{ + struct cmtp_session *session = ctrl->driverdata; + struct cmtp_application *application; + + BT_DBG("ctrl %p appl %d", ctrl, appl); + + application = cmtp_application_get(session, CMTP_APPLID, appl); + if (!application) { + BT_ERR("Can't find application"); + return; + } + + application->msgnum = cmtp_msgnum_get(session); + + cmtp_send_interopmsg(session, CAPI_REQ, application->mapping, application->msgnum, + CAPI_FUNCTION_RELEASE, NULL, 0); + + wait_event_interruptible_timeout(session->wait, + (application->state == BT_CLOSED), CMTP_INTEROP_TIMEOUT); + + cmtp_application_del(session, application); +} + +static u16 cmtp_send_message(struct capi_ctr *ctrl, struct sk_buff *skb) +{ + struct cmtp_session *session = ctrl->driverdata; + struct cmtp_application *application; + __u16 appl; + __u32 contr; + + BT_DBG("ctrl %p skb %p", ctrl, skb); + + appl = CAPIMSG_APPID(skb->data); + contr = CAPIMSG_CONTROL(skb->data); + + application = cmtp_application_get(session, CMTP_APPLID, appl); + if ((!application) || (application->state != BT_CONNECTED)) { + BT_ERR("Can't find application with id %d", appl); + return CAPI_ILLAPPNR; + } + + CAPIMSG_SETAPPID(skb->data, application->mapping); + + if ((contr & 0x7f) == session->num) { + contr = (contr & 0xffffff80) | 0x01; + CAPIMSG_SETCONTROL(skb->data, contr); + } + + cmtp_send_capimsg(session, skb); + + return CAPI_NOERROR; +} + +static char *cmtp_procinfo(struct capi_ctr *ctrl) +{ + return "CAPI Message Transport Protocol"; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34)) +static int cmtp_proc_show(struct seq_file *m, void *v) +{ + struct capi_ctr *ctrl = m->private; + struct cmtp_session *session = ctrl->driverdata; + struct cmtp_application *app; + struct list_head *p, *n; + + seq_printf(m, "%s\n\n", cmtp_procinfo(ctrl)); + seq_printf(m, "addr %s\n", session->name); + seq_printf(m, "ctrl %d\n", session->num); + + list_for_each_safe(p, n, &session->applications) { + app = list_entry(p, struct cmtp_application, list); + seq_printf(m, "appl %d -> %d\n", app->appl, app->mapping); + } + + return 0; +} + +static int cmtp_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, cmtp_proc_show, PDE(inode)->data); +} + +static const struct file_operations cmtp_proc_fops = { + .owner = THIS_MODULE, + .open = cmtp_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +#else + +static int cmtp_ctr_read_proc(char *page, char **start, off_t off, int count, int *eof, struct capi_ctr *ctrl) +{ + struct cmtp_session *session = ctrl->driverdata; + struct cmtp_application *app; + struct list_head *p, *n; + int len = 0; + + len += sprintf(page + len, "%s\n\n", cmtp_procinfo(ctrl)); + len += sprintf(page + len, "addr %s\n", session->name); + len += sprintf(page + len, "ctrl %d\n", session->num); + + list_for_each_safe(p, n, &session->applications) { + app = list_entry(p, struct cmtp_application, list); + len += sprintf(page + len, "appl %d -> %d\n", app->appl, app->mapping); + } + + if (off + count >= len) + *eof = 1; + + if (len < off) + return 0; + + *start = page + off; + + return ((count < len - off) ? count : len - off); +} +#endif + +int cmtp_attach_device(struct cmtp_session *session) +{ + unsigned char buf[4]; + long ret; + + BT_DBG("session %p", session); + + capimsg_setu32(buf, 0, 0); + + cmtp_send_interopmsg(session, CAPI_REQ, 0xffff, CMTP_INITIAL_MSGNUM, + CAPI_FUNCTION_GET_PROFILE, buf, 4); + + ret = wait_event_interruptible_timeout(session->wait, + session->ncontroller, CMTP_INTEROP_TIMEOUT); + + BT_INFO("Found %d CAPI controller(s) on device %s", session->ncontroller, session->name); + + if (!ret) + return -ETIMEDOUT; + + if (!session->ncontroller) + return -ENODEV; + + if (session->ncontroller > 1) + BT_INFO("Setting up only CAPI controller 1"); + + session->ctrl.owner = THIS_MODULE; + session->ctrl.driverdata = session; + strcpy(session->ctrl.name, session->name); + + session->ctrl.driver_name = "cmtp"; + session->ctrl.load_firmware = cmtp_load_firmware; + session->ctrl.reset_ctr = cmtp_reset_ctr; + session->ctrl.register_appl = cmtp_register_appl; + session->ctrl.release_appl = cmtp_release_appl; + session->ctrl.send_message = cmtp_send_message; + + session->ctrl.procinfo = cmtp_procinfo; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34)) + session->ctrl.proc_fops = &cmtp_proc_fops; +#else + session->ctrl.ctr_read_proc = cmtp_ctr_read_proc; +#endif + + if (attach_capi_ctr(&session->ctrl) < 0) { + BT_ERR("Can't attach new controller"); + return -EBUSY; + } + + session->num = session->ctrl.cnr; + + BT_DBG("session %p num %d", session, session->num); + + capimsg_setu32(buf, 0, 1); + + cmtp_send_interopmsg(session, CAPI_REQ, 0xffff, cmtp_msgnum_get(session), + CAPI_FUNCTION_GET_MANUFACTURER, buf, 4); + + cmtp_send_interopmsg(session, CAPI_REQ, 0xffff, cmtp_msgnum_get(session), + CAPI_FUNCTION_GET_VERSION, buf, 4); + + cmtp_send_interopmsg(session, CAPI_REQ, 0xffff, cmtp_msgnum_get(session), + CAPI_FUNCTION_GET_SERIAL_NUMBER, buf, 4); + + cmtp_send_interopmsg(session, CAPI_REQ, 0xffff, cmtp_msgnum_get(session), + CAPI_FUNCTION_GET_PROFILE, buf, 4); + + return 0; +} + +void cmtp_detach_device(struct cmtp_session *session) +{ + BT_DBG("session %p", session); + + detach_capi_ctr(&session->ctrl); +} diff --git a/net/bluetooth/cmtp/cmtp.h b/net/bluetooth/cmtp/cmtp.h new file mode 100644 index 0000000..e4663aa --- /dev/null +++ b/net/bluetooth/cmtp/cmtp.h @@ -0,0 +1,135 @@ +/* + CMTP implementation for Linux Bluetooth stack (BlueZ). + Copyright (C) 2002-2003 Marcel Holtmann + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#ifndef __CMTP_H +#define __CMTP_H + +#include +#include + +#define BTNAMSIZ 18 + +/* CMTP ioctl defines */ +#define CMTPCONNADD _IOW('C', 200, int) +#define CMTPCONNDEL _IOW('C', 201, int) +#define CMTPGETCONNLIST _IOR('C', 210, int) +#define CMTPGETCONNINFO _IOR('C', 211, int) + +#define CMTP_LOOPBACK 0 + +struct cmtp_connadd_req { + int sock; // Connected socket + __u32 flags; +}; + +struct cmtp_conndel_req { + bdaddr_t bdaddr; + __u32 flags; +}; + +struct cmtp_conninfo { + bdaddr_t bdaddr; + __u32 flags; + __u16 state; + int num; +}; + +struct cmtp_connlist_req { + __u32 cnum; + struct cmtp_conninfo __user *ci; +}; + +int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock); +int cmtp_del_connection(struct cmtp_conndel_req *req); +int cmtp_get_connlist(struct cmtp_connlist_req *req); +int cmtp_get_conninfo(struct cmtp_conninfo *ci); + +/* CMTP session defines */ +#define CMTP_INTEROP_TIMEOUT (HZ * 5) +#define CMTP_INITIAL_MSGNUM 0xff00 + +struct cmtp_session { + struct list_head list; + + struct socket *sock; + + bdaddr_t bdaddr; + + unsigned long state; + unsigned long flags; + + uint mtu; + + char name[BTNAMSIZ]; + + atomic_t terminate; + + wait_queue_head_t wait; + + int ncontroller; + int num; + struct capi_ctr ctrl; + + struct list_head applications; + + unsigned long blockids; + int msgnum; + + struct sk_buff_head transmit; + + struct sk_buff *reassembly[16]; +}; + +struct cmtp_application { + struct list_head list; + + unsigned long state; + int err; + + __u16 appl; + __u16 mapping; + + __u16 msgnum; +}; + +struct cmtp_scb { + int id; + int data; +}; + +int cmtp_attach_device(struct cmtp_session *session); +void cmtp_detach_device(struct cmtp_session *session); + +void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb); + +static inline void cmtp_schedule(struct cmtp_session *session) +{ + struct sock *sk = session->sock->sk; + + wake_up_interruptible(sk->sk_sleep); +} + +/* CMTP init defines */ +int cmtp_init_sockets(void); +void cmtp_cleanup_sockets(void); + +#endif /* __CMTP_H */ diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c new file mode 100644 index 0000000..0073ec8 --- /dev/null +++ b/net/bluetooth/cmtp/core.c @@ -0,0 +1,493 @@ +/* + CMTP implementation for Linux Bluetooth stack (BlueZ). + Copyright (C) 2002-2003 Marcel Holtmann + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include "cmtp.h" + +#define VERSION "1.0" + +static DECLARE_RWSEM(cmtp_session_sem); +static LIST_HEAD(cmtp_session_list); + +static struct cmtp_session *__cmtp_get_session(bdaddr_t *bdaddr) +{ + struct cmtp_session *session; + struct list_head *p; + + BT_DBG(""); + + list_for_each(p, &cmtp_session_list) { + session = list_entry(p, struct cmtp_session, list); + if (!bacmp(bdaddr, &session->bdaddr)) + return session; + } + return NULL; +} + +static void __cmtp_link_session(struct cmtp_session *session) +{ + __module_get(THIS_MODULE); + list_add(&session->list, &cmtp_session_list); +} + +static void __cmtp_unlink_session(struct cmtp_session *session) +{ + list_del(&session->list); + module_put(THIS_MODULE); +} + +static void __cmtp_copy_session(struct cmtp_session *session, struct cmtp_conninfo *ci) +{ + bacpy(&ci->bdaddr, &session->bdaddr); + + ci->flags = session->flags; + ci->state = session->state; + + ci->num = session->num; +} + + +static inline int cmtp_alloc_block_id(struct cmtp_session *session) +{ + int i, id = -1; + + for (i = 0; i < 16; i++) + if (!test_and_set_bit(i, &session->blockids)) { + id = i; + break; + } + + return id; +} + +static inline void cmtp_free_block_id(struct cmtp_session *session, int id) +{ + clear_bit(id, &session->blockids); +} + +static inline void cmtp_add_msgpart(struct cmtp_session *session, int id, const unsigned char *buf, int count) +{ + struct sk_buff *skb = session->reassembly[id], *nskb; + int size; + + BT_DBG("session %p buf %p count %d", session, buf, count); + + size = (skb) ? skb->len + count : count; + + if (!(nskb = alloc_skb(size, GFP_ATOMIC))) { + BT_ERR("Can't allocate memory for CAPI message"); + return; + } + + if (skb && (skb->len > 0)) + skb_copy_from_linear_data(skb, skb_put(nskb, skb->len), skb->len); + + memcpy(skb_put(nskb, count), buf, count); + + session->reassembly[id] = nskb; + + kfree_skb(skb); +} + +static inline int cmtp_recv_frame(struct cmtp_session *session, struct sk_buff *skb) +{ + __u8 hdr, hdrlen, id; + __u16 len; + + BT_DBG("session %p skb %p len %d", session, skb, skb->len); + + while (skb->len > 0) { + hdr = skb->data[0]; + + switch (hdr & 0xc0) { + case 0x40: + hdrlen = 2; + len = skb->data[1]; + break; + case 0x80: + hdrlen = 3; + len = skb->data[1] | (skb->data[2] << 8); + break; + default: + hdrlen = 1; + len = 0; + break; + } + + id = (hdr & 0x3c) >> 2; + + BT_DBG("hdr 0x%02x hdrlen %d len %d id %d", hdr, hdrlen, len, id); + + if (hdrlen + len > skb->len) { + BT_ERR("Wrong size or header information in CMTP frame"); + break; + } + + if (len == 0) { + skb_pull(skb, hdrlen); + continue; + } + + switch (hdr & 0x03) { + case 0x00: + cmtp_add_msgpart(session, id, skb->data + hdrlen, len); + cmtp_recv_capimsg(session, session->reassembly[id]); + session->reassembly[id] = NULL; + break; + case 0x01: + cmtp_add_msgpart(session, id, skb->data + hdrlen, len); + break; + default: + if (session->reassembly[id] != NULL) + kfree_skb(session->reassembly[id]); + session->reassembly[id] = NULL; + break; + } + + skb_pull(skb, hdrlen + len); + } + + kfree_skb(skb); + return 0; +} + +static int cmtp_send_frame(struct cmtp_session *session, unsigned char *data, int len) +{ + struct socket *sock = session->sock; + struct kvec iv = { data, len }; + struct msghdr msg; + + BT_DBG("session %p data %p len %d", session, data, len); + + if (!len) + return 0; + + memset(&msg, 0, sizeof(msg)); + + return kernel_sendmsg(sock, &msg, &iv, 1, len); +} + +static void cmtp_process_transmit(struct cmtp_session *session) +{ + struct sk_buff *skb, *nskb; + unsigned char *hdr; + unsigned int size, tail; + + BT_DBG("session %p", session); + + if (!(nskb = alloc_skb(session->mtu, GFP_ATOMIC))) { + BT_ERR("Can't allocate memory for new frame"); + return; + } + + while ((skb = skb_dequeue(&session->transmit))) { + struct cmtp_scb *scb = (void *) skb->cb; + + if ((tail = (session->mtu - nskb->len)) < 5) { + cmtp_send_frame(session, nskb->data, nskb->len); + skb_trim(nskb, 0); + tail = session->mtu; + } + + size = min_t(uint, ((tail < 258) ? (tail - 2) : (tail - 3)), skb->len); + + if ((scb->id < 0) && ((scb->id = cmtp_alloc_block_id(session)) < 0)) { + skb_queue_head(&session->transmit, skb); + break; + } + + if (size < 256) { + hdr = skb_put(nskb, 2); + hdr[0] = 0x40 + | ((scb->id << 2) & 0x3c) + | ((skb->len == size) ? 0x00 : 0x01); + hdr[1] = size; + } else { + hdr = skb_put(nskb, 3); + hdr[0] = 0x80 + | ((scb->id << 2) & 0x3c) + | ((skb->len == size) ? 0x00 : 0x01); + hdr[1] = size & 0xff; + hdr[2] = size >> 8; + } + + skb_copy_from_linear_data(skb, skb_put(nskb, size), size); + skb_pull(skb, size); + + if (skb->len > 0) { + skb_queue_head(&session->transmit, skb); + } else { + cmtp_free_block_id(session, scb->id); + if (scb->data) { + cmtp_send_frame(session, nskb->data, nskb->len); + skb_trim(nskb, 0); + } + kfree_skb(skb); + } + } + + cmtp_send_frame(session, nskb->data, nskb->len); + + kfree_skb(nskb); +} + +static int cmtp_session(void *arg) +{ + struct cmtp_session *session = arg; + struct sock *sk = session->sock->sk; + struct sk_buff *skb; + wait_queue_t wait; + + BT_DBG("session %p", session); + + daemonize("kcmtpd_ctr_%d", session->num); + set_user_nice(current, -15); + + init_waitqueue_entry(&wait, current); + add_wait_queue(sk->sk_sleep, &wait); + while (!atomic_read(&session->terminate)) { + set_current_state(TASK_INTERRUPTIBLE); + + if (sk->sk_state != BT_CONNECTED) + break; + + while ((skb = skb_dequeue(&sk->sk_receive_queue))) { + skb_orphan(skb); + cmtp_recv_frame(session, skb); + } + + cmtp_process_transmit(session); + + schedule(); + } + set_current_state(TASK_RUNNING); + remove_wait_queue(sk->sk_sleep, &wait); + + down_write(&cmtp_session_sem); + + if (!(session->flags & (1 << CMTP_LOOPBACK))) + cmtp_detach_device(session); + + fput(session->sock->file); + + __cmtp_unlink_session(session); + + up_write(&cmtp_session_sem); + + kfree(session); + return 0; +} + +int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock) +{ + struct cmtp_session *session, *s; + bdaddr_t src, dst; + int i, err; + + BT_DBG(""); + + baswap(&src, &bt_sk(sock->sk)->src); + baswap(&dst, &bt_sk(sock->sk)->dst); + + session = kzalloc(sizeof(struct cmtp_session), GFP_KERNEL); + if (!session) + return -ENOMEM; + + down_write(&cmtp_session_sem); + + s = __cmtp_get_session(&bt_sk(sock->sk)->dst); + if (s && s->state == BT_CONNECTED) { + err = -EEXIST; + goto failed; + } + + bacpy(&session->bdaddr, &bt_sk(sock->sk)->dst); + + session->mtu = min_t(uint, l2cap_pi(sock->sk)->omtu, l2cap_pi(sock->sk)->imtu); + + BT_DBG("mtu %d", session->mtu); + + sprintf(session->name, "%s", batostr(&dst)); + + session->sock = sock; + session->state = BT_CONFIG; + + init_waitqueue_head(&session->wait); + + session->msgnum = CMTP_INITIAL_MSGNUM; + + INIT_LIST_HEAD(&session->applications); + + skb_queue_head_init(&session->transmit); + + for (i = 0; i < 16; i++) + session->reassembly[i] = NULL; + + session->flags = req->flags; + + __cmtp_link_session(session); + + err = kernel_thread(cmtp_session, session, CLONE_KERNEL); + if (err < 0) + goto unlink; + + if (!(session->flags & (1 << CMTP_LOOPBACK))) { + err = cmtp_attach_device(session); + if (err < 0) + goto detach; + } + + up_write(&cmtp_session_sem); + return 0; + +detach: + cmtp_detach_device(session); + +unlink: + __cmtp_unlink_session(session); + +failed: + up_write(&cmtp_session_sem); + kfree(session); + return err; +} + +int cmtp_del_connection(struct cmtp_conndel_req *req) +{ + struct cmtp_session *session; + int err = 0; + + BT_DBG(""); + + down_read(&cmtp_session_sem); + + session = __cmtp_get_session(&req->bdaddr); + if (session) { + /* Flush the transmit queue */ + skb_queue_purge(&session->transmit); + + /* Kill session thread */ + atomic_inc(&session->terminate); + cmtp_schedule(session); + } else + err = -ENOENT; + + up_read(&cmtp_session_sem); + return err; +} + +int cmtp_get_connlist(struct cmtp_connlist_req *req) +{ + struct list_head *p; + int err = 0, n = 0; + + BT_DBG(""); + + down_read(&cmtp_session_sem); + + list_for_each(p, &cmtp_session_list) { + struct cmtp_session *session; + struct cmtp_conninfo ci; + + session = list_entry(p, struct cmtp_session, list); + + __cmtp_copy_session(session, &ci); + + if (copy_to_user(req->ci, &ci, sizeof(ci))) { + err = -EFAULT; + break; + } + + if (++n >= req->cnum) + break; + + req->ci++; + } + req->cnum = n; + + up_read(&cmtp_session_sem); + return err; +} + +int cmtp_get_conninfo(struct cmtp_conninfo *ci) +{ + struct cmtp_session *session; + int err = 0; + + down_read(&cmtp_session_sem); + + session = __cmtp_get_session(&ci->bdaddr); + if (session) + __cmtp_copy_session(session, ci); + else + err = -ENOENT; + + up_read(&cmtp_session_sem); + return err; +} + + +static int __init cmtp_init(void) +{ + l2cap_load(); + + BT_INFO("CMTP (CAPI Emulation) ver %s", VERSION); + + cmtp_init_sockets(); + + return 0; +} + +static void __exit cmtp_exit(void) +{ + cmtp_cleanup_sockets(); +} + +module_init(cmtp_init); +module_exit(cmtp_exit); + +MODULE_AUTHOR("Marcel Holtmann "); +MODULE_DESCRIPTION("Bluetooth CMTP ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("bt-proto-5"); diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c new file mode 100644 index 0000000..d322d88 --- /dev/null +++ b/net/bluetooth/cmtp/sock.c @@ -0,0 +1,257 @@ +/* + CMTP implementation for Linux Bluetooth stack (BlueZ). + Copyright (C) 2002-2003 Marcel Holtmann + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include "cmtp.h" + +static int cmtp_sock_release(struct socket *sock) +{ + struct sock *sk = sock->sk; + + BT_DBG("sock %p sk %p", sock, sk); + + if (!sk) + return 0; + + sock_orphan(sk); + sock_put(sk); + + return 0; +} + +static int cmtp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) +{ + struct cmtp_connadd_req ca; + struct cmtp_conndel_req cd; + struct cmtp_connlist_req cl; + struct cmtp_conninfo ci; + struct socket *nsock; + void __user *argp = (void __user *)arg; + int err; + + BT_DBG("cmd %x arg %lx", cmd, arg); + + switch (cmd) { + case CMTPCONNADD: + if (!capable(CAP_NET_ADMIN)) + return -EACCES; + + if (copy_from_user(&ca, argp, sizeof(ca))) + return -EFAULT; + + nsock = sockfd_lookup(ca.sock, &err); + if (!nsock) + return err; + + if (nsock->sk->sk_state != BT_CONNECTED) { + sockfd_put(nsock); + return -EBADFD; + } + + err = cmtp_add_connection(&ca, nsock); + if (!err) { + if (copy_to_user(argp, &ca, sizeof(ca))) + err = -EFAULT; + } else + sockfd_put(nsock); + + return err; + + case CMTPCONNDEL: + if (!capable(CAP_NET_ADMIN)) + return -EACCES; + + if (copy_from_user(&cd, argp, sizeof(cd))) + return -EFAULT; + + return cmtp_del_connection(&cd); + + case CMTPGETCONNLIST: + if (copy_from_user(&cl, argp, sizeof(cl))) + return -EFAULT; + + if (cl.cnum <= 0) + return -EINVAL; + + err = cmtp_get_connlist(&cl); + if (!err && copy_to_user(argp, &cl, sizeof(cl))) + return -EFAULT; + + return err; + + case CMTPGETCONNINFO: + if (copy_from_user(&ci, argp, sizeof(ci))) + return -EFAULT; + + err = cmtp_get_conninfo(&ci); + if (!err && copy_to_user(argp, &ci, sizeof(ci))) + return -EFAULT; + + return err; + } + + return -EINVAL; +} + +#ifdef CONFIG_COMPAT +static int cmtp_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) +{ + if (cmd == CMTPGETCONNLIST) { + struct cmtp_connlist_req cl; + uint32_t uci; + int err; + + if (get_user(cl.cnum, (uint32_t __user *) arg) || + get_user(uci, (u32 __user *) (arg + 4))) + return -EFAULT; + + cl.ci = compat_ptr(uci); + + if (cl.cnum <= 0) + return -EINVAL; + + err = cmtp_get_connlist(&cl); + + if (!err && put_user(cl.cnum, (uint32_t __user *) arg)) + err = -EFAULT; + + return err; + } + + return cmtp_sock_ioctl(sock, cmd, arg); +} +#endif + +static const struct proto_ops cmtp_sock_ops = { + .family = PF_BLUETOOTH, + .owner = THIS_MODULE, + .release = cmtp_sock_release, + .ioctl = cmtp_sock_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = cmtp_sock_compat_ioctl, +#endif + .bind = sock_no_bind, + .getname = sock_no_getname, + .sendmsg = sock_no_sendmsg, + .recvmsg = sock_no_recvmsg, + .poll = sock_no_poll, + .listen = sock_no_listen, + .shutdown = sock_no_shutdown, + .setsockopt = sock_no_setsockopt, + .getsockopt = sock_no_getsockopt, + .connect = sock_no_connect, + .socketpair = sock_no_socketpair, + .accept = sock_no_accept, + .mmap = sock_no_mmap +}; + +static struct proto cmtp_proto = { + .name = "CMTP", + .owner = THIS_MODULE, + .obj_size = sizeof(struct bt_sock) +}; + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32)) +static int cmtp_sock_create(struct net *net, struct socket *sock, int protocol, + int kern) +#else +static int cmtp_sock_create(struct net *net, struct socket *sock, int protocol) +#endif +{ + struct sock *sk; + + BT_DBG("sock %p", sock); + + if (sock->type != SOCK_RAW) + return -ESOCKTNOSUPPORT; + + sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &cmtp_proto); + if (!sk) + return -ENOMEM; + + sock_init_data(sock, sk); + + sock->ops = &cmtp_sock_ops; + + sock->state = SS_UNCONNECTED; + + sock_reset_flag(sk, SOCK_ZAPPED); + + sk->sk_protocol = protocol; + sk->sk_state = BT_OPEN; + + return 0; +} + +static const struct net_proto_family cmtp_sock_family_ops = { + .family = PF_BLUETOOTH, + .owner = THIS_MODULE, + .create = cmtp_sock_create +}; + +int cmtp_init_sockets(void) +{ + int err; + + err = proto_register(&cmtp_proto, 0); + if (err < 0) + return err; + + err = bt_sock_register(BTPROTO_CMTP, &cmtp_sock_family_ops); + if (err < 0) + goto error; + + return 0; + +error: + BT_ERR("Can't register CMTP socket"); + proto_unregister(&cmtp_proto); + return err; +} + +void cmtp_cleanup_sockets(void) +{ + if (bt_sock_unregister(BTPROTO_CMTP) < 0) + BT_ERR("Can't unregister CMTP socket"); + + proto_unregister(&cmtp_proto); +} diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c new file mode 100644 index 0000000..b10e3cd --- /dev/null +++ b/net/bluetooth/hci_conn.c @@ -0,0 +1,705 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + Copyright (C) 2000-2001 Qualcomm Incorporated + + Written 2000,2001 by Maxim Krasnyansky + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +/* Bluetooth HCI connection handling. */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +void hci_acl_connect(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + struct inquiry_entry *ie; + struct hci_cp_create_conn cp; + + BT_DBG("%p", conn); + + conn->state = BT_CONNECT; + conn->out = 1; + + conn->link_mode = HCI_LM_MASTER; + + conn->attempt++; + + conn->link_policy = hdev->link_policy; + + memset(&cp, 0, sizeof(cp)); + bacpy(&cp.bdaddr, &conn->dst); + cp.pscan_rep_mode = 0x02; + + if ((ie = hci_inquiry_cache_lookup(hdev, &conn->dst))) { + if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) { + cp.pscan_rep_mode = ie->data.pscan_rep_mode; + cp.pscan_mode = ie->data.pscan_mode; + cp.clock_offset = ie->data.clock_offset | + cpu_to_le16(0x8000); + } + + memcpy(conn->dev_class, ie->data.dev_class, 3); + conn->ssp_mode = ie->data.ssp_mode; + } + + cp.pkt_type = cpu_to_le16(conn->pkt_type); + if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER)) + cp.role_switch = 0x01; + else + cp.role_switch = 0x00; + + hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp); +} + +static void hci_acl_connect_cancel(struct hci_conn *conn) +{ + struct hci_cp_create_conn_cancel cp; + + BT_DBG("%p", conn); + + if (conn->hdev->hci_ver < 2) + return; + + bacpy(&cp.bdaddr, &conn->dst); + hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp); +} + +void hci_acl_disconn(struct hci_conn *conn, __u8 reason) +{ + struct hci_cp_disconnect cp; + + BT_DBG("%p", conn); + + conn->state = BT_DISCONN; + + cp.handle = cpu_to_le16(conn->handle); + cp.reason = reason; + hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp); +} + +void hci_add_sco(struct hci_conn *conn, __u16 handle) +{ + struct hci_dev *hdev = conn->hdev; + struct hci_cp_add_sco cp; + + BT_DBG("%p", conn); + + conn->state = BT_CONNECT; + conn->out = 1; + + conn->attempt++; + + cp.handle = cpu_to_le16(handle); + cp.pkt_type = cpu_to_le16(conn->pkt_type); + + hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp); +} + +void hci_setup_sync(struct hci_conn *conn, __u16 handle) +{ + struct hci_dev *hdev = conn->hdev; + struct hci_cp_setup_sync_conn cp; + + BT_DBG("%p", conn); + + conn->state = BT_CONNECT; + conn->out = 1; + + conn->attempt++; + + cp.handle = cpu_to_le16(handle); + cp.pkt_type = cpu_to_le16(conn->pkt_type); + + cp.tx_bandwidth = cpu_to_le32(0x00001f40); + cp.rx_bandwidth = cpu_to_le32(0x00001f40); + cp.max_latency = cpu_to_le16(0xffff); + cp.voice_setting = cpu_to_le16(hdev->voice_setting); + cp.retrans_effort = 0xff; + + hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp); +} + +static void hci_conn_timeout(unsigned long arg) +{ + struct hci_conn *conn = (void *) arg; + struct hci_dev *hdev = conn->hdev; + __u8 reason; + + BT_DBG("conn %p state %d", conn, conn->state); + + if (atomic_read(&conn->refcnt)) + return; + + hci_dev_lock(hdev); + + switch (conn->state) { + case BT_CONNECT: + case BT_CONNECT2: + if (conn->type == ACL_LINK && conn->out) + hci_acl_connect_cancel(conn); + break; + case BT_CONFIG: + case BT_CONNECTED: + reason = hci_proto_disconn_ind(conn); + hci_acl_disconn(conn, reason); + break; + default: + conn->state = BT_CLOSED; + break; + } + + hci_dev_unlock(hdev); +} + +static void hci_conn_idle(unsigned long arg) +{ + struct hci_conn *conn = (void *) arg; + + BT_DBG("conn %p mode %d", conn, conn->mode); + + hci_conn_enter_sniff_mode(conn); +} + +struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) +{ + struct hci_conn *conn; + + BT_DBG("%s dst %s", hdev->name, batostr(dst)); + + conn = kzalloc(sizeof(struct hci_conn), GFP_ATOMIC); + if (!conn) + return NULL; + + bacpy(&conn->dst, dst); + conn->hdev = hdev; + conn->type = type; + conn->mode = HCI_CM_ACTIVE; + conn->state = BT_OPEN; + conn->auth_type = HCI_AT_GENERAL_BONDING; + + conn->power_save = 1; + conn->disc_timeout = HCI_DISCONN_TIMEOUT; + + switch (type) { + case ACL_LINK: + conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK; + break; + case SCO_LINK: + if (lmp_esco_capable(hdev)) + conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | + (hdev->esco_type & EDR_ESCO_MASK); + else + conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK; + break; + case ESCO_LINK: + conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK; + break; + } + + skb_queue_head_init(&conn->data_q); + + setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn); + setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn); + + atomic_set(&conn->refcnt, 0); + + hci_dev_hold(hdev); + + tasklet_disable(&hdev->tx_task); + + hci_conn_hash_add(hdev, conn); + if (hdev->notify) + hdev->notify(hdev, HCI_NOTIFY_CONN_ADD); + + atomic_set(&conn->devref, 0); + + hci_conn_init_sysfs(conn); + + tasklet_enable(&hdev->tx_task); + + return conn; +} + +int hci_conn_del(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + + BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle); + + del_timer(&conn->idle_timer); + + del_timer(&conn->disc_timer); + + if (conn->type == ACL_LINK) { + struct hci_conn *sco = conn->link; + if (sco) + sco->link = NULL; + + /* Unacked frames */ + hdev->acl_cnt += conn->sent; + } else { + struct hci_conn *acl = conn->link; + if (acl) { + acl->link = NULL; + hci_conn_put(acl); + } + } + + tasklet_disable(&hdev->tx_task); + + hci_conn_hash_del(hdev, conn); + if (hdev->notify) + hdev->notify(hdev, HCI_NOTIFY_CONN_DEL); + + tasklet_enable(&hdev->tx_task); + + skb_queue_purge(&conn->data_q); + + hci_conn_put_device(conn); + + hci_dev_put(hdev); + + return 0; +} + +struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src) +{ + int use_src = bacmp(src, BDADDR_ANY); + struct hci_dev *hdev = NULL; + struct list_head *p; + + BT_DBG("%s -> %s", batostr(src), batostr(dst)); + + read_lock_bh(&hci_dev_list_lock); + + list_for_each(p, &hci_dev_list) { + struct hci_dev *d = list_entry(p, struct hci_dev, list); + + if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags)) + continue; + + /* Simple routing: + * No source address - find interface with bdaddr != dst + * Source address - find interface with bdaddr == src + */ + + if (use_src) { + if (!bacmp(&d->bdaddr, src)) { + hdev = d; break; + } + } else { + if (bacmp(&d->bdaddr, dst)) { + hdev = d; break; + } + } + } + + if (hdev) + hdev = hci_dev_hold(hdev); + + read_unlock_bh(&hci_dev_list_lock); + return hdev; +} +EXPORT_SYMBOL(hci_get_route); + +/* Create SCO or ACL connection. + * Device _must_ be locked */ +struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type) +{ + struct hci_conn *acl; + struct hci_conn *sco; + + BT_DBG("%s dst %s", hdev->name, batostr(dst)); + + if (!(acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst))) { + if (!(acl = hci_conn_add(hdev, ACL_LINK, dst))) + return NULL; + } + + hci_conn_hold(acl); + + if (acl->state == BT_OPEN || acl->state == BT_CLOSED) { + acl->sec_level = sec_level; + acl->auth_type = auth_type; + hci_acl_connect(acl); + } + + if (type == ACL_LINK) + return acl; + + if (!(sco = hci_conn_hash_lookup_ba(hdev, type, dst))) { + if (!(sco = hci_conn_add(hdev, type, dst))) { + hci_conn_put(acl); + return NULL; + } + } + + acl->link = sco; + sco->link = acl; + + hci_conn_hold(sco); + + if (acl->state == BT_CONNECTED && + (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { + acl->power_save = 1; + hci_conn_enter_active_mode(acl); + + if (lmp_esco_capable(hdev)) + hci_setup_sync(sco, acl->handle); + else + hci_add_sco(sco, acl->handle); + } + + return sco; +} +EXPORT_SYMBOL(hci_connect); + +/* Check link security requirement */ +int hci_conn_check_link_mode(struct hci_conn *conn) +{ + BT_DBG("conn %p", conn); + + if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0 && + !(conn->link_mode & HCI_LM_ENCRYPT)) + return 0; + + return 1; +} +EXPORT_SYMBOL(hci_conn_check_link_mode); + +/* Authenticate remote device */ +static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) +{ + BT_DBG("conn %p", conn); + + if (sec_level > conn->sec_level) + conn->sec_level = sec_level; + else if (conn->link_mode & HCI_LM_AUTH) + return 1; + + conn->auth_type = auth_type; + + if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { + struct hci_cp_auth_requested cp; + cp.handle = cpu_to_le16(conn->handle); + hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, + sizeof(cp), &cp); + } + + return 0; +} + +/* Enable security */ +int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) +{ + BT_DBG("conn %p", conn); + + if (sec_level == BT_SECURITY_SDP) + return 1; + + if (sec_level == BT_SECURITY_LOW && + (!conn->ssp_mode || !conn->hdev->ssp_mode)) + return 1; + + if (conn->link_mode & HCI_LM_ENCRYPT) + return hci_conn_auth(conn, sec_level, auth_type); + + if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) + return 0; + + if (hci_conn_auth(conn, sec_level, auth_type)) { + struct hci_cp_set_conn_encrypt cp; + cp.handle = cpu_to_le16(conn->handle); + cp.encrypt = 1; + hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, + sizeof(cp), &cp); + } + + return 0; +} +EXPORT_SYMBOL(hci_conn_security); + +/* Change link key */ +int hci_conn_change_link_key(struct hci_conn *conn) +{ + BT_DBG("conn %p", conn); + + if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { + struct hci_cp_change_conn_link_key cp; + cp.handle = cpu_to_le16(conn->handle); + hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY, + sizeof(cp), &cp); + } + + return 0; +} +EXPORT_SYMBOL(hci_conn_change_link_key); + +/* Switch role */ +int hci_conn_switch_role(struct hci_conn *conn, __u8 role) +{ + BT_DBG("conn %p", conn); + + if (!role && conn->link_mode & HCI_LM_MASTER) + return 1; + + if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->pend)) { + struct hci_cp_switch_role cp; + bacpy(&cp.bdaddr, &conn->dst); + cp.role = role; + hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp); + } + + return 0; +} +EXPORT_SYMBOL(hci_conn_switch_role); + +/* Enter active mode */ +void hci_conn_enter_active_mode(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + + BT_DBG("conn %p mode %d", conn, conn->mode); + + if (test_bit(HCI_RAW, &hdev->flags)) + return; + + if (conn->mode != HCI_CM_SNIFF || !conn->power_save) + goto timer; + + if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { + struct hci_cp_exit_sniff_mode cp; + cp.handle = cpu_to_le16(conn->handle); + hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp); + } + +timer: + if (hdev->idle_timeout > 0) + mod_timer(&conn->idle_timer, + jiffies + msecs_to_jiffies(hdev->idle_timeout)); +} + +/* Enter sniff mode */ +void hci_conn_enter_sniff_mode(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + + BT_DBG("conn %p mode %d", conn, conn->mode); + + if (test_bit(HCI_RAW, &hdev->flags)) + return; + + if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn)) + return; + + if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF)) + return; + + if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) { + struct hci_cp_sniff_subrate cp; + cp.handle = cpu_to_le16(conn->handle); + cp.max_latency = cpu_to_le16(0); + cp.min_remote_timeout = cpu_to_le16(0); + cp.min_local_timeout = cpu_to_le16(0); + hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp); + } + + if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { + struct hci_cp_sniff_mode cp; + cp.handle = cpu_to_le16(conn->handle); + cp.max_interval = cpu_to_le16(hdev->sniff_max_interval); + cp.min_interval = cpu_to_le16(hdev->sniff_min_interval); + cp.attempt = cpu_to_le16(4); + cp.timeout = cpu_to_le16(1); + hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp); + } +} + +/* Drop all connection on the device */ +void hci_conn_hash_flush(struct hci_dev *hdev) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct list_head *p; + + BT_DBG("hdev %s", hdev->name); + + p = h->list.next; + while (p != &h->list) { + struct hci_conn *c; + + c = list_entry(p, struct hci_conn, list); + p = p->next; + + c->state = BT_CLOSED; + + hci_proto_disconn_cfm(c, 0x16); + hci_conn_del(c); + } +} + +/* Check pending connect attempts */ +void hci_conn_check_pending(struct hci_dev *hdev) +{ + struct hci_conn *conn; + + BT_DBG("hdev %s", hdev->name); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2); + if (conn) + hci_acl_connect(conn); + + hci_dev_unlock(hdev); +} + +void hci_conn_hold_device(struct hci_conn *conn) +{ + atomic_inc(&conn->devref); +} +EXPORT_SYMBOL(hci_conn_hold_device); + +void hci_conn_put_device(struct hci_conn *conn) +{ + if (atomic_dec_and_test(&conn->devref)) + hci_conn_del_sysfs(conn); +} +EXPORT_SYMBOL(hci_conn_put_device); + +int hci_get_conn_list(void __user *arg) +{ + struct hci_conn_list_req req, *cl; + struct hci_conn_info *ci; + struct hci_dev *hdev; + struct list_head *p; + int n = 0, size, err; + + if (copy_from_user(&req, arg, sizeof(req))) + return -EFAULT; + + if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci)) + return -EINVAL; + + size = sizeof(req) + req.conn_num * sizeof(*ci); + + if (!(cl = kmalloc(size, GFP_KERNEL))) + return -ENOMEM; + + if (!(hdev = hci_dev_get(req.dev_id))) { + kfree(cl); + return -ENODEV; + } + + ci = cl->conn_info; + + hci_dev_lock_bh(hdev); + list_for_each(p, &hdev->conn_hash.list) { + register struct hci_conn *c; + c = list_entry(p, struct hci_conn, list); + + bacpy(&(ci + n)->bdaddr, &c->dst); + (ci + n)->handle = c->handle; + (ci + n)->type = c->type; + (ci + n)->out = c->out; + (ci + n)->state = c->state; + (ci + n)->link_mode = c->link_mode; + if (++n >= req.conn_num) + break; + } + hci_dev_unlock_bh(hdev); + + cl->dev_id = hdev->id; + cl->conn_num = n; + size = sizeof(req) + n * sizeof(*ci); + + hci_dev_put(hdev); + + err = copy_to_user(arg, cl, size); + kfree(cl); + + return err ? -EFAULT : 0; +} + +int hci_get_conn_info(struct hci_dev *hdev, void __user *arg) +{ + struct hci_conn_info_req req; + struct hci_conn_info ci; + struct hci_conn *conn; + char __user *ptr = arg + sizeof(req); + + if (copy_from_user(&req, arg, sizeof(req))) + return -EFAULT; + + hci_dev_lock_bh(hdev); + conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr); + if (conn) { + bacpy(&ci.bdaddr, &conn->dst); + ci.handle = conn->handle; + ci.type = conn->type; + ci.out = conn->out; + ci.state = conn->state; + ci.link_mode = conn->link_mode; + } + hci_dev_unlock_bh(hdev); + + if (!conn) + return -ENOENT; + + return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0; +} + +int hci_get_auth_info(struct hci_dev *hdev, void __user *arg) +{ + struct hci_auth_info_req req; + struct hci_conn *conn; + + if (copy_from_user(&req, arg, sizeof(req))) + return -EFAULT; + + hci_dev_lock_bh(hdev); + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr); + if (conn) + req.type = conn->auth_type; + hci_dev_unlock_bh(hdev); + + if (!conn) + return -ENOENT; + + return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0; +} diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c new file mode 100644 index 0000000..5e8fd44 --- /dev/null +++ b/net/bluetooth/hci_core.c @@ -0,0 +1,1656 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + Copyright (C) 2000-2001 Qualcomm Incorporated + + Written 2000,2001 by Maxim Krasnyansky + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +/* Bluetooth HCI core. */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)) +#include +#else +#include +#endif + +#include + +#include +#include +#include + +#include +#include + +static void hci_cmd_task(unsigned long arg); +static void hci_rx_task(unsigned long arg); +static void hci_tx_task(unsigned long arg); +static void hci_notify(struct hci_dev *hdev, int event); + +static DEFINE_RWLOCK(hci_task_lock); + +/* HCI device list */ +LIST_HEAD(hci_dev_list); +DEFINE_RWLOCK(hci_dev_list_lock); + +/* HCI callback list */ +LIST_HEAD(hci_cb_list); +DEFINE_RWLOCK(hci_cb_list_lock); + +/* HCI protocols */ +#define HCI_MAX_PROTO 2 +struct hci_proto *hci_proto[HCI_MAX_PROTO]; + +/* HCI notifiers list */ +static ATOMIC_NOTIFIER_HEAD(hci_notifier); + +/* ---- HCI notifications ---- */ + +int hci_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&hci_notifier, nb); +} + +int hci_unregister_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&hci_notifier, nb); +} + +static void hci_notify(struct hci_dev *hdev, int event) +{ + atomic_notifier_call_chain(&hci_notifier, event, hdev); +} + +/* ---- HCI requests ---- */ + +void hci_req_complete(struct hci_dev *hdev, int result) +{ + BT_DBG("%s result 0x%2.2x", hdev->name, result); + + if (hdev->req_status == HCI_REQ_PEND) { + hdev->req_result = result; + hdev->req_status = HCI_REQ_DONE; + wake_up_interruptible(&hdev->req_wait_q); + } +} + +static void hci_req_cancel(struct hci_dev *hdev, int err) +{ + BT_DBG("%s err 0x%2.2x", hdev->name, err); + + if (hdev->req_status == HCI_REQ_PEND) { + hdev->req_result = err; + hdev->req_status = HCI_REQ_CANCELED; + wake_up_interruptible(&hdev->req_wait_q); + } +} + +/* Execute request and wait for completion. */ +static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), + unsigned long opt, __u32 timeout) +{ + DECLARE_WAITQUEUE(wait, current); + int err = 0; + + BT_DBG("%s start", hdev->name); + + hdev->req_status = HCI_REQ_PEND; + + add_wait_queue(&hdev->req_wait_q, &wait); + set_current_state(TASK_INTERRUPTIBLE); + + req(hdev, opt); + schedule_timeout(timeout); + + remove_wait_queue(&hdev->req_wait_q, &wait); + + if (signal_pending(current)) + return -EINTR; + + switch (hdev->req_status) { + case HCI_REQ_DONE: + err = -bt_err(hdev->req_result); + break; + + case HCI_REQ_CANCELED: + err = -hdev->req_result; + break; + + default: + err = -ETIMEDOUT; + break; + } + + hdev->req_status = hdev->req_result = 0; + + BT_DBG("%s end: err %d", hdev->name, err); + + return err; +} + +static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), + unsigned long opt, __u32 timeout) +{ + int ret; + + if (!test_bit(HCI_UP, &hdev->flags)) + return -ENETDOWN; + + /* Serialize all requests */ + hci_req_lock(hdev); + ret = __hci_request(hdev, req, opt, timeout); + hci_req_unlock(hdev); + + return ret; +} + +static void hci_reset_req(struct hci_dev *hdev, unsigned long opt) +{ + BT_DBG("%s %ld", hdev->name, opt); + + /* Reset device */ + hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); +} + +static void hci_init_req(struct hci_dev *hdev, unsigned long opt) +{ + struct sk_buff *skb; + __le16 param; + __u8 flt_type; + + BT_DBG("%s %ld", hdev->name, opt); + + /* Driver initialization */ + + /* Special commands */ + while ((skb = skb_dequeue(&hdev->driver_init))) { + bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; + skb->dev = (void *) hdev; + + skb_queue_tail(&hdev->cmd_q, skb); + tasklet_schedule(&hdev->cmd_task); + } + skb_queue_purge(&hdev->driver_init); + + /* Mandatory initialization */ + + /* Reset */ + if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) + hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); + + /* Read Local Supported Features */ + hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); + + /* Read Local Version */ + hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL); + + /* Read Buffer Size (ACL mtu, max pkt, etc.) */ + hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL); + +#if 0 + /* Host buffer size */ + { + struct hci_cp_host_buffer_size cp; + cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE); + cp.sco_mtu = HCI_MAX_SCO_SIZE; + cp.acl_max_pkt = cpu_to_le16(0xffff); + cp.sco_max_pkt = cpu_to_le16(0xffff); + hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp); + } +#endif + + /* Read BD Address */ + hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL); + + /* Read Class of Device */ + hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL); + + /* Read Local Name */ + hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL); + + /* Read Voice Setting */ + hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL); + + /* Optional initialization */ + + /* Clear Event Filters */ + flt_type = HCI_FLT_CLEAR_ALL; + hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type); + + /* Page timeout ~20 secs */ + param = cpu_to_le16(0x8000); + hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, ¶m); + + /* Connection accept timeout ~20 secs */ + param = cpu_to_le16(0x7d00); + hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m); +} + +static void hci_scan_req(struct hci_dev *hdev, unsigned long opt) +{ + __u8 scan = opt; + + BT_DBG("%s %x", hdev->name, scan); + + /* Inquiry and Page scans */ + hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); +} + +static void hci_auth_req(struct hci_dev *hdev, unsigned long opt) +{ + __u8 auth = opt; + + BT_DBG("%s %x", hdev->name, auth); + + /* Authentication */ + hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth); +} + +static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt) +{ + __u8 encrypt = opt; + + BT_DBG("%s %x", hdev->name, encrypt); + + /* Encryption */ + hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt); +} + +static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt) +{ + __le16 policy = cpu_to_le16(opt); + + BT_DBG("%s %x", hdev->name, policy); + + /* Default link policy */ + hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy); +} + +/* Get HCI device by index. + * Device is held on return. */ +struct hci_dev *hci_dev_get(int index) +{ + struct hci_dev *hdev = NULL; + struct list_head *p; + + BT_DBG("%d", index); + + if (index < 0) + return NULL; + + read_lock(&hci_dev_list_lock); + list_for_each(p, &hci_dev_list) { + struct hci_dev *d = list_entry(p, struct hci_dev, list); + if (d->id == index) { + hdev = hci_dev_hold(d); + break; + } + } + read_unlock(&hci_dev_list_lock); + return hdev; +} + +/* ---- Inquiry support ---- */ +static void inquiry_cache_flush(struct hci_dev *hdev) +{ + struct inquiry_cache *cache = &hdev->inq_cache; + struct inquiry_entry *next = cache->list, *e; + + BT_DBG("cache %p", cache); + + cache->list = NULL; + while ((e = next)) { + next = e->next; + kfree(e); + } +} + +struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr) +{ + struct inquiry_cache *cache = &hdev->inq_cache; + struct inquiry_entry *e; + + BT_DBG("cache %p, %s", cache, batostr(bdaddr)); + + for (e = cache->list; e; e = e->next) + if (!bacmp(&e->data.bdaddr, bdaddr)) + break; + return e; +} + +void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data) +{ + struct inquiry_cache *cache = &hdev->inq_cache; + struct inquiry_entry *e; + + BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr)); + + if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) { + /* Entry not in the cache. Add new one. */ + if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC))) + return; + e->next = cache->list; + cache->list = e; + } + + memcpy(&e->data, data, sizeof(*data)); + e->timestamp = jiffies; + cache->timestamp = jiffies; +} + +static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf) +{ + struct inquiry_cache *cache = &hdev->inq_cache; + struct inquiry_info *info = (struct inquiry_info *) buf; + struct inquiry_entry *e; + int copied = 0; + + for (e = cache->list; e && copied < num; e = e->next, copied++) { + struct inquiry_data *data = &e->data; + bacpy(&info->bdaddr, &data->bdaddr); + info->pscan_rep_mode = data->pscan_rep_mode; + info->pscan_period_mode = data->pscan_period_mode; + info->pscan_mode = data->pscan_mode; + memcpy(info->dev_class, data->dev_class, 3); + info->clock_offset = data->clock_offset; + info++; + } + + BT_DBG("cache %p, copied %d", cache, copied); + return copied; +} + +static void hci_inq_req(struct hci_dev *hdev, unsigned long opt) +{ + struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt; + struct hci_cp_inquiry cp; + + BT_DBG("%s", hdev->name); + + if (test_bit(HCI_INQUIRY, &hdev->flags)) + return; + + /* Start Inquiry */ + memcpy(&cp.lap, &ir->lap, 3); + cp.length = ir->length; + cp.num_rsp = ir->num_rsp; + hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp); +} + +int hci_inquiry(void __user *arg) +{ + __u8 __user *ptr = arg; + struct hci_inquiry_req ir; + struct hci_dev *hdev; + int err = 0, do_inquiry = 0, max_rsp; + long timeo; + __u8 *buf; + + if (copy_from_user(&ir, ptr, sizeof(ir))) + return -EFAULT; + + if (!(hdev = hci_dev_get(ir.dev_id))) + return -ENODEV; + + hci_dev_lock_bh(hdev); + if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || + inquiry_cache_empty(hdev) || + ir.flags & IREQ_CACHE_FLUSH) { + inquiry_cache_flush(hdev); + do_inquiry = 1; + } + hci_dev_unlock_bh(hdev); + + timeo = ir.length * msecs_to_jiffies(2000); + if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0) + goto done; + + /* for unlimited number of responses we will use buffer with 255 entries */ + max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp; + + /* cache_dump can't sleep. Therefore we allocate temp buffer and then + * copy it to the user space. + */ + if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) { + err = -ENOMEM; + goto done; + } + + hci_dev_lock_bh(hdev); + ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf); + hci_dev_unlock_bh(hdev); + + BT_DBG("num_rsp %d", ir.num_rsp); + + if (!copy_to_user(ptr, &ir, sizeof(ir))) { + ptr += sizeof(ir); + if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) * + ir.num_rsp)) + err = -EFAULT; + } else + err = -EFAULT; + + kfree(buf); + +done: + hci_dev_put(hdev); + return err; +} + +/* ---- HCI ioctl helpers ---- */ + +int hci_dev_open(__u16 dev) +{ + struct hci_dev *hdev; + int ret = 0; + + if (!(hdev = hci_dev_get(dev))) + return -ENODEV; + + BT_DBG("%s %p", hdev->name, hdev); + + hci_req_lock(hdev); + + if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) { + ret = -ERFKILL; + goto done; + } + + if (test_bit(HCI_UP, &hdev->flags)) { + ret = -EALREADY; + goto done; + } + + if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) + set_bit(HCI_RAW, &hdev->flags); + + /* Treat all non BR/EDR controllers as raw devices for now */ + if (hdev->dev_type != HCI_BREDR) + set_bit(HCI_RAW, &hdev->flags); + + if (hdev->open(hdev)) { + ret = -EIO; + goto done; + } + + if (!test_bit(HCI_RAW, &hdev->flags)) { + atomic_set(&hdev->cmd_cnt, 1); + set_bit(HCI_INIT, &hdev->flags); + + //__hci_request(hdev, hci_reset_req, 0, HZ); + ret = __hci_request(hdev, hci_init_req, 0, + msecs_to_jiffies(HCI_INIT_TIMEOUT)); + + clear_bit(HCI_INIT, &hdev->flags); + } + + if (!ret) { + hci_dev_hold(hdev); + set_bit(HCI_UP, &hdev->flags); + hci_notify(hdev, HCI_DEV_UP); + } else { + /* Init failed, cleanup */ + tasklet_kill(&hdev->rx_task); + tasklet_kill(&hdev->tx_task); + tasklet_kill(&hdev->cmd_task); + + skb_queue_purge(&hdev->cmd_q); + skb_queue_purge(&hdev->rx_q); + + if (hdev->flush) + hdev->flush(hdev); + + if (hdev->sent_cmd) { + kfree_skb(hdev->sent_cmd); + hdev->sent_cmd = NULL; + } + + hdev->close(hdev); + hdev->flags = 0; + } + +done: + hci_req_unlock(hdev); + hci_dev_put(hdev); + return ret; +} + +static int hci_dev_do_close(struct hci_dev *hdev) +{ + BT_DBG("%s %p", hdev->name, hdev); + + hci_req_cancel(hdev, ENODEV); + hci_req_lock(hdev); + + if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { + hci_req_unlock(hdev); + return 0; + } + + /* Kill RX and TX tasks */ + tasklet_kill(&hdev->rx_task); + tasklet_kill(&hdev->tx_task); + + hci_dev_lock_bh(hdev); + inquiry_cache_flush(hdev); + hci_conn_hash_flush(hdev); + hci_dev_unlock_bh(hdev); + + hci_notify(hdev, HCI_DEV_DOWN); + + if (hdev->flush) + hdev->flush(hdev); + + /* Reset device */ + skb_queue_purge(&hdev->cmd_q); + atomic_set(&hdev->cmd_cnt, 1); + if (!test_bit(HCI_RAW, &hdev->flags)) { + set_bit(HCI_INIT, &hdev->flags); + __hci_request(hdev, hci_reset_req, 0, + msecs_to_jiffies(250)); + clear_bit(HCI_INIT, &hdev->flags); + } + + /* Kill cmd task */ + tasklet_kill(&hdev->cmd_task); + + /* Drop queues */ + skb_queue_purge(&hdev->rx_q); + skb_queue_purge(&hdev->cmd_q); + skb_queue_purge(&hdev->raw_q); + + /* Drop last sent command */ + if (hdev->sent_cmd) { + kfree_skb(hdev->sent_cmd); + hdev->sent_cmd = NULL; + } + + /* After this point our queues are empty + * and no tasks are scheduled. */ + hdev->close(hdev); + + /* Clear flags */ + hdev->flags = 0; + + hci_req_unlock(hdev); + + hci_dev_put(hdev); + return 0; +} + +int hci_dev_close(__u16 dev) +{ + struct hci_dev *hdev; + int err; + + if (!(hdev = hci_dev_get(dev))) + return -ENODEV; + err = hci_dev_do_close(hdev); + hci_dev_put(hdev); + return err; +} + +int hci_dev_reset(__u16 dev) +{ + struct hci_dev *hdev; + int ret = 0; + + if (!(hdev = hci_dev_get(dev))) + return -ENODEV; + + hci_req_lock(hdev); + tasklet_disable(&hdev->tx_task); + + if (!test_bit(HCI_UP, &hdev->flags)) + goto done; + + /* Drop queues */ + skb_queue_purge(&hdev->rx_q); + skb_queue_purge(&hdev->cmd_q); + + hci_dev_lock_bh(hdev); + inquiry_cache_flush(hdev); + hci_conn_hash_flush(hdev); + hci_dev_unlock_bh(hdev); + + if (hdev->flush) + hdev->flush(hdev); + + atomic_set(&hdev->cmd_cnt, 1); + hdev->acl_cnt = 0; hdev->sco_cnt = 0; + + if (!test_bit(HCI_RAW, &hdev->flags)) + ret = __hci_request(hdev, hci_reset_req, 0, + msecs_to_jiffies(HCI_INIT_TIMEOUT)); + +done: + tasklet_enable(&hdev->tx_task); + hci_req_unlock(hdev); + hci_dev_put(hdev); + return ret; +} + +int hci_dev_reset_stat(__u16 dev) +{ + struct hci_dev *hdev; + int ret = 0; + + if (!(hdev = hci_dev_get(dev))) + return -ENODEV; + + memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); + + hci_dev_put(hdev); + + return ret; +} + +int hci_dev_cmd(unsigned int cmd, void __user *arg) +{ + struct hci_dev *hdev; + struct hci_dev_req dr; + int err = 0; + + if (copy_from_user(&dr, arg, sizeof(dr))) + return -EFAULT; + + if (!(hdev = hci_dev_get(dr.dev_id))) + return -ENODEV; + + switch (cmd) { + case HCISETAUTH: + err = hci_request(hdev, hci_auth_req, dr.dev_opt, + msecs_to_jiffies(HCI_INIT_TIMEOUT)); + break; + + case HCISETENCRYPT: + if (!lmp_encrypt_capable(hdev)) { + err = -EOPNOTSUPP; + break; + } + + if (!test_bit(HCI_AUTH, &hdev->flags)) { + /* Auth must be enabled first */ + err = hci_request(hdev, hci_auth_req, dr.dev_opt, + msecs_to_jiffies(HCI_INIT_TIMEOUT)); + if (err) + break; + } + + err = hci_request(hdev, hci_encrypt_req, dr.dev_opt, + msecs_to_jiffies(HCI_INIT_TIMEOUT)); + break; + + case HCISETSCAN: + err = hci_request(hdev, hci_scan_req, dr.dev_opt, + msecs_to_jiffies(HCI_INIT_TIMEOUT)); + break; + + case HCISETLINKPOL: + err = hci_request(hdev, hci_linkpol_req, dr.dev_opt, + msecs_to_jiffies(HCI_INIT_TIMEOUT)); + break; + + case HCISETLINKMODE: + hdev->link_mode = ((__u16) dr.dev_opt) & + (HCI_LM_MASTER | HCI_LM_ACCEPT); + break; + + case HCISETPTYPE: + hdev->pkt_type = (__u16) dr.dev_opt; + break; + + case HCISETACLMTU: + hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1); + hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0); + break; + + case HCISETSCOMTU: + hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1); + hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0); + break; + + default: + err = -EINVAL; + break; + } + + hci_dev_put(hdev); + return err; +} + +int hci_get_dev_list(void __user *arg) +{ + struct hci_dev_list_req *dl; + struct hci_dev_req *dr; + struct list_head *p; + int n = 0, size, err; + __u16 dev_num; + + if (get_user(dev_num, (__u16 __user *) arg)) + return -EFAULT; + + if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr)) + return -EINVAL; + + size = sizeof(*dl) + dev_num * sizeof(*dr); + + if (!(dl = kzalloc(size, GFP_KERNEL))) + return -ENOMEM; + + dr = dl->dev_req; + + read_lock_bh(&hci_dev_list_lock); + list_for_each(p, &hci_dev_list) { + struct hci_dev *hdev; + hdev = list_entry(p, struct hci_dev, list); + (dr + n)->dev_id = hdev->id; + (dr + n)->dev_opt = hdev->flags; + if (++n >= dev_num) + break; + } + read_unlock_bh(&hci_dev_list_lock); + + dl->dev_num = n; + size = sizeof(*dl) + n * sizeof(*dr); + + err = copy_to_user(arg, dl, size); + kfree(dl); + + return err ? -EFAULT : 0; +} + +int hci_get_dev_info(void __user *arg) +{ + struct hci_dev *hdev; + struct hci_dev_info di; + int err = 0; + + if (copy_from_user(&di, arg, sizeof(di))) + return -EFAULT; + + if (!(hdev = hci_dev_get(di.dev_id))) + return -ENODEV; + + strcpy(di.name, hdev->name); + di.bdaddr = hdev->bdaddr; + di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4); + di.flags = hdev->flags; + di.pkt_type = hdev->pkt_type; + di.acl_mtu = hdev->acl_mtu; + di.acl_pkts = hdev->acl_pkts; + di.sco_mtu = hdev->sco_mtu; + di.sco_pkts = hdev->sco_pkts; + di.link_policy = hdev->link_policy; + di.link_mode = hdev->link_mode; + + memcpy(&di.stat, &hdev->stat, sizeof(di.stat)); + memcpy(&di.features, &hdev->features, sizeof(di.features)); + + if (copy_to_user(arg, &di, sizeof(di))) + err = -EFAULT; + + hci_dev_put(hdev); + + return err; +} + +/* ---- Interface to HCI drivers ---- */ + +static int hci_rfkill_set_block(void *data, bool blocked) +{ + struct hci_dev *hdev = data; + + BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked); + + if (!blocked) + return 0; + + hci_dev_do_close(hdev); + + return 0; +} + +static const struct rfkill_ops hci_rfkill_ops = { + .set_block = hci_rfkill_set_block, +}; + +/* Alloc HCI device */ +struct hci_dev *hci_alloc_dev(void) +{ + struct hci_dev *hdev; + + hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL); + if (!hdev) + return NULL; + + skb_queue_head_init(&hdev->driver_init); + + return hdev; +} +EXPORT_SYMBOL(hci_alloc_dev); + +/* Free HCI device */ +void hci_free_dev(struct hci_dev *hdev) +{ + skb_queue_purge(&hdev->driver_init); + + /* will free via device release */ + put_device(&hdev->dev); +} +EXPORT_SYMBOL(hci_free_dev); + +/* Register HCI device */ +int hci_register_dev(struct hci_dev *hdev) +{ + struct list_head *head = &hci_dev_list, *p; + int i, id = 0; + + BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name, + hdev->bus, hdev->owner); + + if (!hdev->open || !hdev->close || !hdev->destruct) + return -EINVAL; + + write_lock_bh(&hci_dev_list_lock); + + /* Find first available device id */ + list_for_each(p, &hci_dev_list) { + if (list_entry(p, struct hci_dev, list)->id != id) + break; + head = p; id++; + } + + sprintf(hdev->name, "hci%d", id); + hdev->id = id; + list_add(&hdev->list, head); + + atomic_set(&hdev->refcnt, 1); + spin_lock_init(&hdev->lock); + + hdev->flags = 0; + hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); + hdev->esco_type = (ESCO_HV1); + hdev->link_mode = (HCI_LM_ACCEPT); + + hdev->idle_timeout = 0; + hdev->sniff_max_interval = 800; + hdev->sniff_min_interval = 80; + + tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev); + tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev); + tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev); + + skb_queue_head_init(&hdev->rx_q); + skb_queue_head_init(&hdev->cmd_q); + skb_queue_head_init(&hdev->raw_q); + + for (i = 0; i < 3; i++) + hdev->reassembly[i] = NULL; + + init_waitqueue_head(&hdev->req_wait_q); + mutex_init(&hdev->req_lock); + + inquiry_cache_init(hdev); + + hci_conn_hash_init(hdev); + + memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); + + atomic_set(&hdev->promisc, 0); + + write_unlock_bh(&hci_dev_list_lock); + + hci_register_sysfs(hdev); + + hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, + RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev); + if (hdev->rfkill) { + if (rfkill_register(hdev->rfkill) < 0) { + rfkill_destroy(hdev->rfkill); + hdev->rfkill = NULL; + } + } + + hci_notify(hdev, HCI_DEV_REG); + + return id; +} +EXPORT_SYMBOL(hci_register_dev); + +/* Unregister HCI device */ +int hci_unregister_dev(struct hci_dev *hdev) +{ + int i; + + BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); + + write_lock_bh(&hci_dev_list_lock); + list_del(&hdev->list); + write_unlock_bh(&hci_dev_list_lock); + + hci_dev_do_close(hdev); + + for (i = 0; i < 3; i++) + kfree_skb(hdev->reassembly[i]); + + hci_notify(hdev, HCI_DEV_UNREG); + + if (hdev->rfkill) { + rfkill_unregister(hdev->rfkill); + rfkill_destroy(hdev->rfkill); + } + + hci_unregister_sysfs(hdev); + + __hci_dev_put(hdev); + + return 0; +} +EXPORT_SYMBOL(hci_unregister_dev); + +/* Suspend HCI device */ +int hci_suspend_dev(struct hci_dev *hdev) +{ + hci_notify(hdev, HCI_DEV_SUSPEND); + return 0; +} +EXPORT_SYMBOL(hci_suspend_dev); + +/* Resume HCI device */ +int hci_resume_dev(struct hci_dev *hdev) +{ + hci_notify(hdev, HCI_DEV_RESUME); + return 0; +} +EXPORT_SYMBOL(hci_resume_dev); + +/* Receive frame from HCI drivers */ +int hci_recv_frame(struct sk_buff *skb) +{ + struct hci_dev *hdev = (struct hci_dev *) skb->dev; + if (!hdev || (!test_bit(HCI_UP, &hdev->flags) + && !test_bit(HCI_INIT, &hdev->flags))) { + kfree_skb(skb); + return -ENXIO; + } + + /* Incomming skb */ + bt_cb(skb)->incoming = 1; + + /* Time stamp */ + __net_timestamp(skb); + + /* Queue frame for rx task */ + skb_queue_tail(&hdev->rx_q, skb); + tasklet_schedule(&hdev->rx_task); + + return 0; +} +EXPORT_SYMBOL(hci_recv_frame); + +/* Receive packet type fragment */ +#define __reassembly(hdev, type) ((hdev)->reassembly[(type) - 2]) + +int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count) +{ + if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) + return -EILSEQ; + + while (count) { + struct sk_buff *skb = __reassembly(hdev, type); + struct { int expect; } *scb; + int len = 0; + + if (!skb) { + /* Start of the frame */ + + switch (type) { + case HCI_EVENT_PKT: + if (count >= HCI_EVENT_HDR_SIZE) { + struct hci_event_hdr *h = data; + len = HCI_EVENT_HDR_SIZE + h->plen; + } else + return -EILSEQ; + break; + + case HCI_ACLDATA_PKT: + if (count >= HCI_ACL_HDR_SIZE) { + struct hci_acl_hdr *h = data; + len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen); + } else + return -EILSEQ; + break; + + case HCI_SCODATA_PKT: + if (count >= HCI_SCO_HDR_SIZE) { + struct hci_sco_hdr *h = data; + len = HCI_SCO_HDR_SIZE + h->dlen; + } else + return -EILSEQ; + break; + } + + skb = bt_skb_alloc(len, GFP_ATOMIC); + if (!skb) { + BT_ERR("%s no memory for packet", hdev->name); + return -ENOMEM; + } + + skb->dev = (void *) hdev; + bt_cb(skb)->pkt_type = type; + + __reassembly(hdev, type) = skb; + + scb = (void *) skb->cb; + scb->expect = len; + } else { + /* Continuation */ + + scb = (void *) skb->cb; + len = scb->expect; + } + + len = min(len, count); + + memcpy(skb_put(skb, len), data, len); + + scb->expect -= len; + + if (scb->expect == 0) { + /* Complete frame */ + + __reassembly(hdev, type) = NULL; + + bt_cb(skb)->pkt_type = type; + hci_recv_frame(skb); + } + + count -= len; data += len; + } + + return 0; +} +EXPORT_SYMBOL(hci_recv_fragment); + +/* ---- Interface to upper protocols ---- */ + +/* Register/Unregister protocols. + * hci_task_lock is used to ensure that no tasks are running. */ +int hci_register_proto(struct hci_proto *hp) +{ + int err = 0; + + BT_DBG("%p name %s id %d", hp, hp->name, hp->id); + + if (hp->id >= HCI_MAX_PROTO) + return -EINVAL; + + write_lock_bh(&hci_task_lock); + + if (!hci_proto[hp->id]) + hci_proto[hp->id] = hp; + else + err = -EEXIST; + + write_unlock_bh(&hci_task_lock); + + return err; +} +EXPORT_SYMBOL(hci_register_proto); + +int hci_unregister_proto(struct hci_proto *hp) +{ + int err = 0; + + BT_DBG("%p name %s id %d", hp, hp->name, hp->id); + + if (hp->id >= HCI_MAX_PROTO) + return -EINVAL; + + write_lock_bh(&hci_task_lock); + + if (hci_proto[hp->id]) + hci_proto[hp->id] = NULL; + else + err = -ENOENT; + + write_unlock_bh(&hci_task_lock); + + return err; +} +EXPORT_SYMBOL(hci_unregister_proto); + +int hci_register_cb(struct hci_cb *cb) +{ + BT_DBG("%p name %s", cb, cb->name); + + write_lock_bh(&hci_cb_list_lock); + list_add(&cb->list, &hci_cb_list); + write_unlock_bh(&hci_cb_list_lock); + + return 0; +} +EXPORT_SYMBOL(hci_register_cb); + +int hci_unregister_cb(struct hci_cb *cb) +{ + BT_DBG("%p name %s", cb, cb->name); + + write_lock_bh(&hci_cb_list_lock); + list_del(&cb->list); + write_unlock_bh(&hci_cb_list_lock); + + return 0; +} +EXPORT_SYMBOL(hci_unregister_cb); + +static int hci_send_frame(struct sk_buff *skb) +{ + struct hci_dev *hdev = (struct hci_dev *) skb->dev; + + if (!hdev) { + kfree_skb(skb); + return -ENODEV; + } + + BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len); + + if (atomic_read(&hdev->promisc)) { + /* Time stamp */ + __net_timestamp(skb); + + hci_send_to_sock(hdev, skb); + } + + /* Get rid of skb owner, prior to sending to the driver. */ + skb_orphan(skb); + + return hdev->send(skb); +} + +/* Send HCI command */ +int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param) +{ + int len = HCI_COMMAND_HDR_SIZE + plen; + struct hci_command_hdr *hdr; + struct sk_buff *skb; + + BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen); + + skb = bt_skb_alloc(len, GFP_ATOMIC); + if (!skb) { + BT_ERR("%s no memory for command", hdev->name); + return -ENOMEM; + } + + hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE); + hdr->opcode = cpu_to_le16(opcode); + hdr->plen = plen; + + if (plen) + memcpy(skb_put(skb, plen), param, plen); + + BT_DBG("skb len %d", skb->len); + + bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; + skb->dev = (void *) hdev; + + skb_queue_tail(&hdev->cmd_q, skb); + tasklet_schedule(&hdev->cmd_task); + + return 0; +} + +/* Get data from the previously sent command */ +void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode) +{ + struct hci_command_hdr *hdr; + + if (!hdev->sent_cmd) + return NULL; + + hdr = (void *) hdev->sent_cmd->data; + + if (hdr->opcode != cpu_to_le16(opcode)) + return NULL; + + BT_DBG("%s opcode 0x%x", hdev->name, opcode); + + return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE; +} + +/* Send ACL data */ +static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags) +{ + struct hci_acl_hdr *hdr; + int len = skb->len; + + skb_push(skb, HCI_ACL_HDR_SIZE); + skb_reset_transport_header(skb); + hdr = (struct hci_acl_hdr *)skb_transport_header(skb); + hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags)); + hdr->dlen = cpu_to_le16(len); +} + +int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags) +{ + struct hci_dev *hdev = conn->hdev; + struct sk_buff *list; + + BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags); + + skb->dev = (void *) hdev; + bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; + hci_add_acl_hdr(skb, conn->handle, flags | ACL_START); + + if (!(list = skb_shinfo(skb)->frag_list)) { + /* Non fragmented */ + BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); + + skb_queue_tail(&conn->data_q, skb); + } else { + /* Fragmented */ + BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); + + skb_shinfo(skb)->frag_list = NULL; + + /* Queue all fragments atomically */ + spin_lock_bh(&conn->data_q.lock); + + __skb_queue_tail(&conn->data_q, skb); + do { + skb = list; list = list->next; + + skb->dev = (void *) hdev; + bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; + hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT); + + BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); + + __skb_queue_tail(&conn->data_q, skb); + } while (list); + + spin_unlock_bh(&conn->data_q.lock); + } + + tasklet_schedule(&hdev->tx_task); + + return 0; +} +EXPORT_SYMBOL(hci_send_acl); + +/* Send SCO data */ +int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) +{ + struct hci_dev *hdev = conn->hdev; + struct hci_sco_hdr hdr; + + BT_DBG("%s len %d", hdev->name, skb->len); + + if (skb->len > hdev->sco_mtu) { + kfree_skb(skb); + return -EINVAL; + } + + hdr.handle = cpu_to_le16(conn->handle); + hdr.dlen = skb->len; + + skb_push(skb, HCI_SCO_HDR_SIZE); + skb_reset_transport_header(skb); + memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE); + + skb->dev = (void *) hdev; + bt_cb(skb)->pkt_type = HCI_SCODATA_PKT; + + skb_queue_tail(&conn->data_q, skb); + tasklet_schedule(&hdev->tx_task); + + return 0; +} +EXPORT_SYMBOL(hci_send_sco); + +/* ---- HCI TX task (outgoing data) ---- */ + +/* HCI Connection scheduler */ +static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct hci_conn *conn = NULL; + int num = 0, min = ~0; + struct list_head *p; + + /* We don't have to lock device here. Connections are always + * added and removed with TX task disabled. */ + list_for_each(p, &h->list) { + struct hci_conn *c; + c = list_entry(p, struct hci_conn, list); + + if (c->type != type || skb_queue_empty(&c->data_q)) + continue; + + if (c->state != BT_CONNECTED && c->state != BT_CONFIG) + continue; + + num++; + + if (c->sent < min) { + min = c->sent; + conn = c; + } + } + + if (conn) { + int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt); + int q = cnt / num; + *quote = q ? q : 1; + } else + *quote = 0; + + BT_DBG("conn %p quote %d", conn, *quote); + return conn; +} + +static inline void hci_acl_tx_to(struct hci_dev *hdev) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct list_head *p; + struct hci_conn *c; + + BT_ERR("%s ACL tx timeout", hdev->name); + + /* Kill stalled connections */ + list_for_each(p, &h->list) { + c = list_entry(p, struct hci_conn, list); + if (c->type == ACL_LINK && c->sent) { + BT_ERR("%s killing stalled ACL connection %s", + hdev->name, batostr(&c->dst)); + hci_acl_disconn(c, 0x13); + } + } +} + +static inline void hci_sched_acl(struct hci_dev *hdev) +{ + struct hci_conn *conn; + struct sk_buff *skb; + int quote; + + BT_DBG("%s", hdev->name); + + if (!test_bit(HCI_RAW, &hdev->flags)) { + /* ACL tx timeout must be longer than maximum + * link supervision timeout (40.9 seconds) */ + if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45)) + hci_acl_tx_to(hdev); + } + + while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) { + while (quote-- && (skb = skb_dequeue(&conn->data_q))) { + BT_DBG("skb %p len %d", skb, skb->len); + + hci_conn_enter_active_mode(conn); + + hci_send_frame(skb); + hdev->acl_last_tx = jiffies; + + hdev->acl_cnt--; + conn->sent++; + } + } +} + +/* Schedule SCO */ +static inline void hci_sched_sco(struct hci_dev *hdev) +{ + struct hci_conn *conn; + struct sk_buff *skb; + int quote; + + BT_DBG("%s", hdev->name); + + while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) { + while (quote-- && (skb = skb_dequeue(&conn->data_q))) { + BT_DBG("skb %p len %d", skb, skb->len); + hci_send_frame(skb); + + conn->sent++; + if (conn->sent == ~0) + conn->sent = 0; + } + } +} + +static inline void hci_sched_esco(struct hci_dev *hdev) +{ + struct hci_conn *conn; + struct sk_buff *skb; + int quote; + + BT_DBG("%s", hdev->name); + + while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, "e))) { + while (quote-- && (skb = skb_dequeue(&conn->data_q))) { + BT_DBG("skb %p len %d", skb, skb->len); + hci_send_frame(skb); + + conn->sent++; + if (conn->sent == ~0) + conn->sent = 0; + } + } +} + +static void hci_tx_task(unsigned long arg) +{ + struct hci_dev *hdev = (struct hci_dev *) arg; + struct sk_buff *skb; + + read_lock(&hci_task_lock); + + BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt); + + /* Schedule queues and send stuff to HCI driver */ + + hci_sched_acl(hdev); + + hci_sched_sco(hdev); + + hci_sched_esco(hdev); + + /* Send next queued raw (unknown type) packet */ + while ((skb = skb_dequeue(&hdev->raw_q))) + hci_send_frame(skb); + + read_unlock(&hci_task_lock); +} + +/* ----- HCI RX task (incoming data proccessing) ----- */ + +/* ACL data packet */ +static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_acl_hdr *hdr = (void *) skb->data; + struct hci_conn *conn; + __u16 handle, flags; + + skb_pull(skb, HCI_ACL_HDR_SIZE); + + handle = __le16_to_cpu(hdr->handle); + flags = hci_flags(handle); + handle = hci_handle(handle); + + BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags); + + hdev->stat.acl_rx++; + + hci_dev_lock(hdev); + conn = hci_conn_hash_lookup_handle(hdev, handle); + hci_dev_unlock(hdev); + + if (conn) { + register struct hci_proto *hp; + + hci_conn_enter_active_mode(conn); + + /* Send to upper protocol */ + if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) { + hp->recv_acldata(conn, skb, flags); + return; + } + } else { + BT_ERR("%s ACL packet for unknown connection handle %d", + hdev->name, handle); + } + + kfree_skb(skb); +} + +/* SCO data packet */ +static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_sco_hdr *hdr = (void *) skb->data; + struct hci_conn *conn; + __u16 handle; + + skb_pull(skb, HCI_SCO_HDR_SIZE); + + handle = __le16_to_cpu(hdr->handle); + + BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle); + + hdev->stat.sco_rx++; + + hci_dev_lock(hdev); + conn = hci_conn_hash_lookup_handle(hdev, handle); + hci_dev_unlock(hdev); + + if (conn) { + register struct hci_proto *hp; + + /* Send to upper protocol */ + if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) { + hp->recv_scodata(conn, skb); + return; + } + } else { + BT_ERR("%s SCO packet for unknown connection handle %d", + hdev->name, handle); + } + + kfree_skb(skb); +} + +static void hci_rx_task(unsigned long arg) +{ + struct hci_dev *hdev = (struct hci_dev *) arg; + struct sk_buff *skb; + + BT_DBG("%s", hdev->name); + + read_lock(&hci_task_lock); + + while ((skb = skb_dequeue(&hdev->rx_q))) { + if (atomic_read(&hdev->promisc)) { + /* Send copy to the sockets */ + hci_send_to_sock(hdev, skb); + } + + if (test_bit(HCI_RAW, &hdev->flags)) { + kfree_skb(skb); + continue; + } + + if (test_bit(HCI_INIT, &hdev->flags)) { + /* Don't process data packets in this states. */ + switch (bt_cb(skb)->pkt_type) { + case HCI_ACLDATA_PKT: + case HCI_SCODATA_PKT: + kfree_skb(skb); + continue; + } + } + + /* Process frame */ + switch (bt_cb(skb)->pkt_type) { + case HCI_EVENT_PKT: + hci_event_packet(hdev, skb); + break; + + case HCI_ACLDATA_PKT: + BT_DBG("%s ACL data packet", hdev->name); + hci_acldata_packet(hdev, skb); + break; + + case HCI_SCODATA_PKT: + BT_DBG("%s SCO data packet", hdev->name); + hci_scodata_packet(hdev, skb); + break; + + default: + kfree_skb(skb); + break; + } + } + + read_unlock(&hci_task_lock); +} + +static void hci_cmd_task(unsigned long arg) +{ + struct hci_dev *hdev = (struct hci_dev *) arg; + struct sk_buff *skb; + + BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt)); + + if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) { + BT_ERR("%s command tx timeout", hdev->name); + atomic_set(&hdev->cmd_cnt, 1); + } + + /* Send queued commands */ + if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) { + kfree_skb(hdev->sent_cmd); + + if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) { + atomic_dec(&hdev->cmd_cnt); + hci_send_frame(skb); + hdev->cmd_last_tx = jiffies; + } else { + skb_queue_head(&hdev->cmd_q, skb); + tasklet_schedule(&hdev->cmd_task); + } + } +} diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c new file mode 100644 index 0000000..6c57fc7 --- /dev/null +++ b/net/bluetooth/hci_event.c @@ -0,0 +1,1994 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + Copyright (C) 2000-2001 Qualcomm Incorporated + + Written 2000,2001 by Maxim Krasnyansky + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +/* Bluetooth HCI event handling. */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +/* Handle HCI Event packets */ + +static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) +{ + __u8 status = *((__u8 *) skb->data); + + BT_DBG("%s status 0x%x", hdev->name, status); + + if (status) + return; + + clear_bit(HCI_INQUIRY, &hdev->flags); + + hci_req_complete(hdev, status); + + hci_conn_check_pending(hdev); +} + +static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) +{ + __u8 status = *((__u8 *) skb->data); + + BT_DBG("%s status 0x%x", hdev->name, status); + + if (status) + return; + + clear_bit(HCI_INQUIRY, &hdev->flags); + + hci_conn_check_pending(hdev); +} + +static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb) +{ + BT_DBG("%s", hdev->name); +} + +static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_rp_role_discovery *rp = (void *) skb->data; + struct hci_conn *conn; + + BT_DBG("%s status 0x%x", hdev->name, rp->status); + + if (rp->status) + return; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); + if (conn) { + if (rp->role) + conn->link_mode &= ~HCI_LM_MASTER; + else + conn->link_mode |= HCI_LM_MASTER; + } + + hci_dev_unlock(hdev); +} + +static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_rp_read_link_policy *rp = (void *) skb->data; + struct hci_conn *conn; + + BT_DBG("%s status 0x%x", hdev->name, rp->status); + + if (rp->status) + return; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); + if (conn) + conn->link_policy = __le16_to_cpu(rp->policy); + + hci_dev_unlock(hdev); +} + +static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_rp_write_link_policy *rp = (void *) skb->data; + struct hci_conn *conn; + void *sent; + + BT_DBG("%s status 0x%x", hdev->name, rp->status); + + if (rp->status) + return; + + sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY); + if (!sent) + return; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); + if (conn) + conn->link_policy = get_unaligned_le16(sent + 2); + + hci_dev_unlock(hdev); +} + +static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_rp_read_def_link_policy *rp = (void *) skb->data; + + BT_DBG("%s status 0x%x", hdev->name, rp->status); + + if (rp->status) + return; + + hdev->link_policy = __le16_to_cpu(rp->policy); +} + +static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb) +{ + __u8 status = *((__u8 *) skb->data); + void *sent; + + BT_DBG("%s status 0x%x", hdev->name, status); + + sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY); + if (!sent) + return; + + if (!status) + hdev->link_policy = get_unaligned_le16(sent); + + hci_req_complete(hdev, status); +} + +static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) +{ + __u8 status = *((__u8 *) skb->data); + + BT_DBG("%s status 0x%x", hdev->name, status); + + hci_req_complete(hdev, status); +} + +static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) +{ + __u8 status = *((__u8 *) skb->data); + void *sent; + + BT_DBG("%s status 0x%x", hdev->name, status); + + if (status) + return; + + sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); + if (!sent) + return; + + memcpy(hdev->dev_name, sent, 248); +} + +static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_rp_read_local_name *rp = (void *) skb->data; + + BT_DBG("%s status 0x%x", hdev->name, rp->status); + + if (rp->status) + return; + + memcpy(hdev->dev_name, rp->name, 248); +} + +static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb) +{ + __u8 status = *((__u8 *) skb->data); + void *sent; + + BT_DBG("%s status 0x%x", hdev->name, status); + + sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE); + if (!sent) + return; + + if (!status) { + __u8 param = *((__u8 *) sent); + + if (param == AUTH_ENABLED) + set_bit(HCI_AUTH, &hdev->flags); + else + clear_bit(HCI_AUTH, &hdev->flags); + } + + hci_req_complete(hdev, status); +} + +static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb) +{ + __u8 status = *((__u8 *) skb->data); + void *sent; + + BT_DBG("%s status 0x%x", hdev->name, status); + + sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE); + if (!sent) + return; + + if (!status) { + __u8 param = *((__u8 *) sent); + + if (param) + set_bit(HCI_ENCRYPT, &hdev->flags); + else + clear_bit(HCI_ENCRYPT, &hdev->flags); + } + + hci_req_complete(hdev, status); +} + +static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) +{ + __u8 status = *((__u8 *) skb->data); + void *sent; + + BT_DBG("%s status 0x%x", hdev->name, status); + + sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE); + if (!sent) + return; + + if (!status) { + __u8 param = *((__u8 *) sent); + + clear_bit(HCI_PSCAN, &hdev->flags); + clear_bit(HCI_ISCAN, &hdev->flags); + + if (param & SCAN_INQUIRY) + set_bit(HCI_ISCAN, &hdev->flags); + + if (param & SCAN_PAGE) + set_bit(HCI_PSCAN, &hdev->flags); + } + + hci_req_complete(hdev, status); +} + +static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_rp_read_class_of_dev *rp = (void *) skb->data; + + BT_DBG("%s status 0x%x", hdev->name, rp->status); + + if (rp->status) + return; + + memcpy(hdev->dev_class, rp->dev_class, 3); + + BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name, + hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); +} + +static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) +{ + __u8 status = *((__u8 *) skb->data); + void *sent; + + BT_DBG("%s status 0x%x", hdev->name, status); + + if (status) + return; + + sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV); + if (!sent) + return; + + memcpy(hdev->dev_class, sent, 3); +} + +static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_rp_read_voice_setting *rp = (void *) skb->data; + __u16 setting; + + BT_DBG("%s status 0x%x", hdev->name, rp->status); + + if (rp->status) + return; + + setting = __le16_to_cpu(rp->voice_setting); + + if (hdev->voice_setting == setting) + return; + + hdev->voice_setting = setting; + + BT_DBG("%s voice setting 0x%04x", hdev->name, setting); + + if (hdev->notify) { + tasklet_disable(&hdev->tx_task); + hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); + tasklet_enable(&hdev->tx_task); + } +} + +static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) +{ + __u8 status = *((__u8 *) skb->data); + __u16 setting; + void *sent; + + BT_DBG("%s status 0x%x", hdev->name, status); + + if (status) + return; + + sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING); + if (!sent) + return; + + setting = get_unaligned_le16(sent); + + if (hdev->voice_setting == setting) + return; + + hdev->voice_setting = setting; + + BT_DBG("%s voice setting 0x%04x", hdev->name, setting); + + if (hdev->notify) { + tasklet_disable(&hdev->tx_task); + hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); + tasklet_enable(&hdev->tx_task); + } +} + +static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) +{ + __u8 status = *((__u8 *) skb->data); + + BT_DBG("%s status 0x%x", hdev->name, status); + + hci_req_complete(hdev, status); +} + +static void hci_cc_read_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_rp_read_ssp_mode *rp = (void *) skb->data; + + BT_DBG("%s status 0x%x", hdev->name, rp->status); + + if (rp->status) + return; + + hdev->ssp_mode = rp->mode; +} + +static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) +{ + __u8 status = *((__u8 *) skb->data); + void *sent; + + BT_DBG("%s status 0x%x", hdev->name, status); + + if (status) + return; + + sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE); + if (!sent) + return; + + hdev->ssp_mode = *((__u8 *) sent); +} + +static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_rp_read_local_version *rp = (void *) skb->data; + + BT_DBG("%s status 0x%x", hdev->name, rp->status); + + if (rp->status) + return; + + hdev->hci_ver = rp->hci_ver; + hdev->hci_rev = __le16_to_cpu(rp->hci_rev); + hdev->manufacturer = __le16_to_cpu(rp->manufacturer); + + BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name, + hdev->manufacturer, + hdev->hci_ver, hdev->hci_rev); +} + +static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_rp_read_local_commands *rp = (void *) skb->data; + + BT_DBG("%s status 0x%x", hdev->name, rp->status); + + if (rp->status) + return; + + memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); +} + +static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_rp_read_local_features *rp = (void *) skb->data; + + BT_DBG("%s status 0x%x", hdev->name, rp->status); + + if (rp->status) + return; + + memcpy(hdev->features, rp->features, 8); + + /* Adjust default settings according to features + * supported by device. */ + + if (hdev->features[0] & LMP_3SLOT) + hdev->pkt_type |= (HCI_DM3 | HCI_DH3); + + if (hdev->features[0] & LMP_5SLOT) + hdev->pkt_type |= (HCI_DM5 | HCI_DH5); + + if (hdev->features[1] & LMP_HV2) { + hdev->pkt_type |= (HCI_HV2); + hdev->esco_type |= (ESCO_HV2); + } + + if (hdev->features[1] & LMP_HV3) { + hdev->pkt_type |= (HCI_HV3); + hdev->esco_type |= (ESCO_HV3); + } + + if (hdev->features[3] & LMP_ESCO) + hdev->esco_type |= (ESCO_EV3); + + if (hdev->features[4] & LMP_EV4) + hdev->esco_type |= (ESCO_EV4); + + if (hdev->features[4] & LMP_EV5) + hdev->esco_type |= (ESCO_EV5); + + if (hdev->features[5] & LMP_EDR_ESCO_2M) + hdev->esco_type |= (ESCO_2EV3); + + if (hdev->features[5] & LMP_EDR_ESCO_3M) + hdev->esco_type |= (ESCO_3EV3); + + if (hdev->features[5] & LMP_EDR_3S_ESCO) + hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); + + BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name, + hdev->features[0], hdev->features[1], + hdev->features[2], hdev->features[3], + hdev->features[4], hdev->features[5], + hdev->features[6], hdev->features[7]); +} + +static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_rp_read_buffer_size *rp = (void *) skb->data; + + BT_DBG("%s status 0x%x", hdev->name, rp->status); + + if (rp->status) + return; + + hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu); + hdev->sco_mtu = rp->sco_mtu; + hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt); + hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt); + + if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) { + hdev->sco_mtu = 64; + hdev->sco_pkts = 8; + } + + hdev->acl_cnt = hdev->acl_pkts; + hdev->sco_cnt = hdev->sco_pkts; + + BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, + hdev->acl_mtu, hdev->acl_pkts, + hdev->sco_mtu, hdev->sco_pkts); +} + +static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_rp_read_bd_addr *rp = (void *) skb->data; + + BT_DBG("%s status 0x%x", hdev->name, rp->status); + + if (!rp->status) + bacpy(&hdev->bdaddr, &rp->bdaddr); + + hci_req_complete(hdev, rp->status); +} + +static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) +{ + BT_DBG("%s status 0x%x", hdev->name, status); + + if (status) { + hci_req_complete(hdev, status); + + hci_conn_check_pending(hdev); + } else + set_bit(HCI_INQUIRY, &hdev->flags); +} + +static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) +{ + struct hci_cp_create_conn *cp; + struct hci_conn *conn; + + BT_DBG("%s status 0x%x", hdev->name, status); + + cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN); + if (!cp) + return; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); + + BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn); + + if (status) { + if (conn && conn->state == BT_CONNECT) { + if (status != 0x0c || conn->attempt > 2) { + conn->state = BT_CLOSED; + hci_proto_connect_cfm(conn, status); + hci_conn_del(conn); + } else + conn->state = BT_CONNECT2; + } + } else { + if (!conn) { + conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr); + if (conn) { + conn->out = 1; + conn->link_mode |= HCI_LM_MASTER; + } else + BT_ERR("No memmory for new connection"); + } + } + + hci_dev_unlock(hdev); +} + +static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) +{ + struct hci_cp_add_sco *cp; + struct hci_conn *acl, *sco; + __u16 handle; + + BT_DBG("%s status 0x%x", hdev->name, status); + + if (!status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO); + if (!cp) + return; + + handle = __le16_to_cpu(cp->handle); + + BT_DBG("%s handle %d", hdev->name, handle); + + hci_dev_lock(hdev); + + acl = hci_conn_hash_lookup_handle(hdev, handle); + if (acl && (sco = acl->link)) { + sco->state = BT_CLOSED; + + hci_proto_connect_cfm(sco, status); + hci_conn_del(sco); + } + + hci_dev_unlock(hdev); +} + +static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status) +{ + struct hci_cp_auth_requested *cp; + struct hci_conn *conn; + + BT_DBG("%s status 0x%x", hdev->name, status); + + if (!status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED); + if (!cp) + return; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); + if (conn) { + if (conn->state == BT_CONFIG) { + hci_proto_connect_cfm(conn, status); + hci_conn_put(conn); + } + } + + hci_dev_unlock(hdev); +} + +static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status) +{ + struct hci_cp_set_conn_encrypt *cp; + struct hci_conn *conn; + + BT_DBG("%s status 0x%x", hdev->name, status); + + if (!status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT); + if (!cp) + return; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); + if (conn) { + if (conn->state == BT_CONFIG) { + hci_proto_connect_cfm(conn, status); + hci_conn_put(conn); + } + } + + hci_dev_unlock(hdev); +} + +static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status) +{ + BT_DBG("%s status 0x%x", hdev->name, status); +} + +static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status) +{ + struct hci_cp_read_remote_features *cp; + struct hci_conn *conn; + + BT_DBG("%s status 0x%x", hdev->name, status); + + if (!status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES); + if (!cp) + return; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); + if (conn) { + if (conn->state == BT_CONFIG) { + hci_proto_connect_cfm(conn, status); + hci_conn_put(conn); + } + } + + hci_dev_unlock(hdev); +} + +static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status) +{ + struct hci_cp_read_remote_ext_features *cp; + struct hci_conn *conn; + + BT_DBG("%s status 0x%x", hdev->name, status); + + if (!status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES); + if (!cp) + return; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); + if (conn) { + if (conn->state == BT_CONFIG) { + hci_proto_connect_cfm(conn, status); + hci_conn_put(conn); + } + } + + hci_dev_unlock(hdev); +} + +static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) +{ + struct hci_cp_setup_sync_conn *cp; + struct hci_conn *acl, *sco; + __u16 handle; + + BT_DBG("%s status 0x%x", hdev->name, status); + + if (!status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN); + if (!cp) + return; + + handle = __le16_to_cpu(cp->handle); + + BT_DBG("%s handle %d", hdev->name, handle); + + hci_dev_lock(hdev); + + acl = hci_conn_hash_lookup_handle(hdev, handle); + if (acl && (sco = acl->link)) { + sco->state = BT_CLOSED; + + hci_proto_connect_cfm(sco, status); + hci_conn_del(sco); + } + + hci_dev_unlock(hdev); +} + +static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status) +{ + struct hci_cp_sniff_mode *cp; + struct hci_conn *conn; + + BT_DBG("%s status 0x%x", hdev->name, status); + + if (!status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE); + if (!cp) + return; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); + if (conn) + clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend); + + hci_dev_unlock(hdev); +} + +static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status) +{ + struct hci_cp_exit_sniff_mode *cp; + struct hci_conn *conn; + + BT_DBG("%s status 0x%x", hdev->name, status); + + if (!status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE); + if (!cp) + return; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); + if (conn) + clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend); + + hci_dev_unlock(hdev); +} + +static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + __u8 status = *((__u8 *) skb->data); + + BT_DBG("%s status %d", hdev->name, status); + + clear_bit(HCI_INQUIRY, &hdev->flags); + + hci_req_complete(hdev, status); + + hci_conn_check_pending(hdev); +} + +static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct inquiry_data data; + struct inquiry_info *info = (void *) (skb->data + 1); + int num_rsp = *((__u8 *) skb->data); + + BT_DBG("%s num_rsp %d", hdev->name, num_rsp); + + if (!num_rsp) + return; + + hci_dev_lock(hdev); + + for (; num_rsp; num_rsp--) { + bacpy(&data.bdaddr, &info->bdaddr); + data.pscan_rep_mode = info->pscan_rep_mode; + data.pscan_period_mode = info->pscan_period_mode; + data.pscan_mode = info->pscan_mode; + memcpy(data.dev_class, info->dev_class, 3); + data.clock_offset = info->clock_offset; + data.rssi = 0x00; + data.ssp_mode = 0x00; + info++; + hci_inquiry_cache_update(hdev, &data); + } + + hci_dev_unlock(hdev); +} + +static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_ev_conn_complete *ev = (void *) skb->data; + struct hci_conn *conn; + + BT_DBG("%s", hdev->name); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); + if (!conn) { + if (ev->link_type != SCO_LINK) + goto unlock; + + conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); + if (!conn) + goto unlock; + + conn->type = SCO_LINK; + } + + if (!ev->status) { + conn->handle = __le16_to_cpu(ev->handle); + + if (conn->type == ACL_LINK) { + conn->state = BT_CONFIG; + hci_conn_hold(conn); + conn->disc_timeout = HCI_DISCONN_TIMEOUT; + } else + conn->state = BT_CONNECTED; + + hci_conn_hold_device(conn); + hci_conn_add_sysfs(conn); + + if (test_bit(HCI_AUTH, &hdev->flags)) + conn->link_mode |= HCI_LM_AUTH; + + if (test_bit(HCI_ENCRYPT, &hdev->flags)) + conn->link_mode |= HCI_LM_ENCRYPT; + + /* Get remote features */ + if (conn->type == ACL_LINK) { + struct hci_cp_read_remote_features cp; + cp.handle = ev->handle; + hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, + sizeof(cp), &cp); + } + + /* Set packet type for incoming connection */ + if (!conn->out && hdev->hci_ver < 3) { + struct hci_cp_change_conn_ptype cp; + cp.handle = ev->handle; + cp.pkt_type = cpu_to_le16(conn->pkt_type); + hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, + sizeof(cp), &cp); + } + } else + conn->state = BT_CLOSED; + + if (conn->type == ACL_LINK) { + struct hci_conn *sco = conn->link; + if (sco) { + if (!ev->status) { + if (lmp_esco_capable(hdev)) + hci_setup_sync(sco, conn->handle); + else + hci_add_sco(sco, conn->handle); + } else { + hci_proto_connect_cfm(sco, ev->status); + hci_conn_del(sco); + } + } + } + + if (ev->status) { + hci_proto_connect_cfm(conn, ev->status); + hci_conn_del(conn); + } else if (ev->link_type != ACL_LINK) + hci_proto_connect_cfm(conn, ev->status); + +unlock: + hci_dev_unlock(hdev); + + hci_conn_check_pending(hdev); +} + +static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_ev_conn_request *ev = (void *) skb->data; + int mask = hdev->link_mode; + + BT_DBG("%s bdaddr %s type 0x%x", hdev->name, + batostr(&ev->bdaddr), ev->link_type); + + mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type); + + if (mask & HCI_LM_ACCEPT) { + /* Connection accepted */ + struct inquiry_entry *ie; + struct hci_conn *conn; + + hci_dev_lock(hdev); + + if ((ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr))) + memcpy(ie->data.dev_class, ev->dev_class, 3); + + conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); + if (!conn) { + if (!(conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr))) { + BT_ERR("No memmory for new connection"); + hci_dev_unlock(hdev); + return; + } + } + + memcpy(conn->dev_class, ev->dev_class, 3); + conn->state = BT_CONNECT; + + hci_dev_unlock(hdev); + + if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) { + struct hci_cp_accept_conn_req cp; + + bacpy(&cp.bdaddr, &ev->bdaddr); + + if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) + cp.role = 0x00; /* Become master */ + else + cp.role = 0x01; /* Remain slave */ + + hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, + sizeof(cp), &cp); + } else { + struct hci_cp_accept_sync_conn_req cp; + + bacpy(&cp.bdaddr, &ev->bdaddr); + cp.pkt_type = cpu_to_le16(conn->pkt_type); + + cp.tx_bandwidth = cpu_to_le32(0x00001f40); + cp.rx_bandwidth = cpu_to_le32(0x00001f40); + cp.max_latency = cpu_to_le16(0xffff); + cp.content_format = cpu_to_le16(hdev->voice_setting); + cp.retrans_effort = 0xff; + + hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, + sizeof(cp), &cp); + } + } else { + /* Connection rejected */ + struct hci_cp_reject_conn_req cp; + + bacpy(&cp.bdaddr, &ev->bdaddr); + cp.reason = 0x0f; + hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp); + } +} + +static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_ev_disconn_complete *ev = (void *) skb->data; + struct hci_conn *conn; + + BT_DBG("%s status %d", hdev->name, ev->status); + + if (ev->status) + return; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); + if (conn) { + conn->state = BT_CLOSED; + + hci_proto_disconn_cfm(conn, ev->reason); + hci_conn_del(conn); + } + + hci_dev_unlock(hdev); +} + +static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_ev_auth_complete *ev = (void *) skb->data; + struct hci_conn *conn; + + BT_DBG("%s status %d", hdev->name, ev->status); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); + if (conn) { + if (!ev->status) + conn->link_mode |= HCI_LM_AUTH; + + clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); + + if (conn->state == BT_CONFIG) { + if (!ev->status && hdev->ssp_mode > 0 && + conn->ssp_mode > 0) { + struct hci_cp_set_conn_encrypt cp; + cp.handle = ev->handle; + cp.encrypt = 0x01; + hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, + sizeof(cp), &cp); + } else { + conn->state = BT_CONNECTED; + hci_proto_connect_cfm(conn, ev->status); + hci_conn_put(conn); + } + } else { + hci_auth_cfm(conn, ev->status); + + hci_conn_hold(conn); + conn->disc_timeout = HCI_DISCONN_TIMEOUT; + hci_conn_put(conn); + } + + if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) { + if (!ev->status) { + struct hci_cp_set_conn_encrypt cp; + cp.handle = ev->handle; + cp.encrypt = 0x01; + hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, + sizeof(cp), &cp); + } else { + clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend); + hci_encrypt_cfm(conn, ev->status, 0x00); + } + } + } + + hci_dev_unlock(hdev); +} + +static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + BT_DBG("%s", hdev->name); + + hci_conn_check_pending(hdev); +} + +static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_ev_encrypt_change *ev = (void *) skb->data; + struct hci_conn *conn; + + BT_DBG("%s status %d", hdev->name, ev->status); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); + if (conn) { + if (!ev->status) { + if (ev->encrypt) { + /* Encryption implies authentication */ + conn->link_mode |= HCI_LM_AUTH; + conn->link_mode |= HCI_LM_ENCRYPT; + } else + conn->link_mode &= ~HCI_LM_ENCRYPT; + } + + clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend); + + if (conn->state == BT_CONFIG) { + if (!ev->status) + conn->state = BT_CONNECTED; + + hci_proto_connect_cfm(conn, ev->status); + hci_conn_put(conn); + } else + hci_encrypt_cfm(conn, ev->status, ev->encrypt); + } + + hci_dev_unlock(hdev); +} + +static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_ev_change_link_key_complete *ev = (void *) skb->data; + struct hci_conn *conn; + + BT_DBG("%s status %d", hdev->name, ev->status); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); + if (conn) { + if (!ev->status) + conn->link_mode |= HCI_LM_SECURE; + + clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); + + hci_key_change_cfm(conn, ev->status); + } + + hci_dev_unlock(hdev); +} + +static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_ev_remote_features *ev = (void *) skb->data; + struct hci_conn *conn; + + BT_DBG("%s status %d", hdev->name, ev->status); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); + if (conn) { + if (!ev->status) + memcpy(conn->features, ev->features, 8); + + if (conn->state == BT_CONFIG) { + if (!ev->status && lmp_ssp_capable(hdev) && + lmp_ssp_capable(conn)) { + struct hci_cp_read_remote_ext_features cp; + cp.handle = ev->handle; + cp.page = 0x01; + hci_send_cmd(hdev, + HCI_OP_READ_REMOTE_EXT_FEATURES, + sizeof(cp), &cp); + } else { + conn->state = BT_CONNECTED; + hci_proto_connect_cfm(conn, ev->status); + hci_conn_put(conn); + } + } + } + + hci_dev_unlock(hdev); +} + +static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + BT_DBG("%s", hdev->name); +} + +static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + BT_DBG("%s", hdev->name); +} + +static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_ev_cmd_complete *ev = (void *) skb->data; + __u16 opcode; + + skb_pull(skb, sizeof(*ev)); + + opcode = __le16_to_cpu(ev->opcode); + + switch (opcode) { + case HCI_OP_INQUIRY_CANCEL: + hci_cc_inquiry_cancel(hdev, skb); + break; + + case HCI_OP_EXIT_PERIODIC_INQ: + hci_cc_exit_periodic_inq(hdev, skb); + break; + + case HCI_OP_REMOTE_NAME_REQ_CANCEL: + hci_cc_remote_name_req_cancel(hdev, skb); + break; + + case HCI_OP_ROLE_DISCOVERY: + hci_cc_role_discovery(hdev, skb); + break; + + case HCI_OP_READ_LINK_POLICY: + hci_cc_read_link_policy(hdev, skb); + break; + + case HCI_OP_WRITE_LINK_POLICY: + hci_cc_write_link_policy(hdev, skb); + break; + + case HCI_OP_READ_DEF_LINK_POLICY: + hci_cc_read_def_link_policy(hdev, skb); + break; + + case HCI_OP_WRITE_DEF_LINK_POLICY: + hci_cc_write_def_link_policy(hdev, skb); + break; + + case HCI_OP_RESET: + hci_cc_reset(hdev, skb); + break; + + case HCI_OP_WRITE_LOCAL_NAME: + hci_cc_write_local_name(hdev, skb); + break; + + case HCI_OP_READ_LOCAL_NAME: + hci_cc_read_local_name(hdev, skb); + break; + + case HCI_OP_WRITE_AUTH_ENABLE: + hci_cc_write_auth_enable(hdev, skb); + break; + + case HCI_OP_WRITE_ENCRYPT_MODE: + hci_cc_write_encrypt_mode(hdev, skb); + break; + + case HCI_OP_WRITE_SCAN_ENABLE: + hci_cc_write_scan_enable(hdev, skb); + break; + + case HCI_OP_READ_CLASS_OF_DEV: + hci_cc_read_class_of_dev(hdev, skb); + break; + + case HCI_OP_WRITE_CLASS_OF_DEV: + hci_cc_write_class_of_dev(hdev, skb); + break; + + case HCI_OP_READ_VOICE_SETTING: + hci_cc_read_voice_setting(hdev, skb); + break; + + case HCI_OP_WRITE_VOICE_SETTING: + hci_cc_write_voice_setting(hdev, skb); + break; + + case HCI_OP_HOST_BUFFER_SIZE: + hci_cc_host_buffer_size(hdev, skb); + break; + + case HCI_OP_READ_SSP_MODE: + hci_cc_read_ssp_mode(hdev, skb); + break; + + case HCI_OP_WRITE_SSP_MODE: + hci_cc_write_ssp_mode(hdev, skb); + break; + + case HCI_OP_READ_LOCAL_VERSION: + hci_cc_read_local_version(hdev, skb); + break; + + case HCI_OP_READ_LOCAL_COMMANDS: + hci_cc_read_local_commands(hdev, skb); + break; + + case HCI_OP_READ_LOCAL_FEATURES: + hci_cc_read_local_features(hdev, skb); + break; + + case HCI_OP_READ_BUFFER_SIZE: + hci_cc_read_buffer_size(hdev, skb); + break; + + case HCI_OP_READ_BD_ADDR: + hci_cc_read_bd_addr(hdev, skb); + break; + + default: + BT_DBG("%s opcode 0x%x", hdev->name, opcode); + break; + } + + if (ev->ncmd) { + atomic_set(&hdev->cmd_cnt, 1); + if (!skb_queue_empty(&hdev->cmd_q)) + tasklet_schedule(&hdev->cmd_task); + } +} + +static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_ev_cmd_status *ev = (void *) skb->data; + __u16 opcode; + + skb_pull(skb, sizeof(*ev)); + + opcode = __le16_to_cpu(ev->opcode); + + switch (opcode) { + case HCI_OP_INQUIRY: + hci_cs_inquiry(hdev, ev->status); + break; + + case HCI_OP_CREATE_CONN: + hci_cs_create_conn(hdev, ev->status); + break; + + case HCI_OP_ADD_SCO: + hci_cs_add_sco(hdev, ev->status); + break; + + case HCI_OP_AUTH_REQUESTED: + hci_cs_auth_requested(hdev, ev->status); + break; + + case HCI_OP_SET_CONN_ENCRYPT: + hci_cs_set_conn_encrypt(hdev, ev->status); + break; + + case HCI_OP_REMOTE_NAME_REQ: + hci_cs_remote_name_req(hdev, ev->status); + break; + + case HCI_OP_READ_REMOTE_FEATURES: + hci_cs_read_remote_features(hdev, ev->status); + break; + + case HCI_OP_READ_REMOTE_EXT_FEATURES: + hci_cs_read_remote_ext_features(hdev, ev->status); + break; + + case HCI_OP_SETUP_SYNC_CONN: + hci_cs_setup_sync_conn(hdev, ev->status); + break; + + case HCI_OP_SNIFF_MODE: + hci_cs_sniff_mode(hdev, ev->status); + break; + + case HCI_OP_EXIT_SNIFF_MODE: + hci_cs_exit_sniff_mode(hdev, ev->status); + break; + + default: + BT_DBG("%s opcode 0x%x", hdev->name, opcode); + break; + } + + if (ev->ncmd) { + atomic_set(&hdev->cmd_cnt, 1); + if (!skb_queue_empty(&hdev->cmd_q)) + tasklet_schedule(&hdev->cmd_task); + } +} + +static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_ev_role_change *ev = (void *) skb->data; + struct hci_conn *conn; + + BT_DBG("%s status %d", hdev->name, ev->status); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); + if (conn) { + if (!ev->status) { + if (ev->role) + conn->link_mode &= ~HCI_LM_MASTER; + else + conn->link_mode |= HCI_LM_MASTER; + } + + clear_bit(HCI_CONN_RSWITCH_PEND, &conn->pend); + + hci_role_switch_cfm(conn, ev->status, ev->role); + } + + hci_dev_unlock(hdev); +} + +static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_ev_num_comp_pkts *ev = (void *) skb->data; + __le16 *ptr; + int i; + + skb_pull(skb, sizeof(*ev)); + + BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl); + + if (skb->len < ev->num_hndl * 4) { + BT_DBG("%s bad parameters", hdev->name); + return; + } + + tasklet_disable(&hdev->tx_task); + + for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) { + struct hci_conn *conn; + __u16 handle, count; + + handle = get_unaligned_le16(ptr++); + count = get_unaligned_le16(ptr++); + + conn = hci_conn_hash_lookup_handle(hdev, handle); + if (conn) { + conn->sent -= count; + + if (conn->type == ACL_LINK) { + if ((hdev->acl_cnt += count) > hdev->acl_pkts) + hdev->acl_cnt = hdev->acl_pkts; + } else { + if ((hdev->sco_cnt += count) > hdev->sco_pkts) + hdev->sco_cnt = hdev->sco_pkts; + } + } + } + + tasklet_schedule(&hdev->tx_task); + + tasklet_enable(&hdev->tx_task); +} + +static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_ev_mode_change *ev = (void *) skb->data; + struct hci_conn *conn; + + BT_DBG("%s status %d", hdev->name, ev->status); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); + if (conn) { + conn->mode = ev->mode; + conn->interval = __le16_to_cpu(ev->interval); + + if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { + if (conn->mode == HCI_CM_ACTIVE) + conn->power_save = 1; + else + conn->power_save = 0; + } + } + + hci_dev_unlock(hdev); +} + +static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_ev_pin_code_req *ev = (void *) skb->data; + struct hci_conn *conn; + + BT_DBG("%s", hdev->name); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); + if (conn && conn->state == BT_CONNECTED) { + hci_conn_hold(conn); + conn->disc_timeout = HCI_PAIRING_TIMEOUT; + hci_conn_put(conn); + } + + hci_dev_unlock(hdev); +} + +static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + BT_DBG("%s", hdev->name); +} + +static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_ev_link_key_notify *ev = (void *) skb->data; + struct hci_conn *conn; + + BT_DBG("%s", hdev->name); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); + if (conn) { + hci_conn_hold(conn); + conn->disc_timeout = HCI_DISCONN_TIMEOUT; + hci_conn_put(conn); + } + + hci_dev_unlock(hdev); +} + +static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_ev_clock_offset *ev = (void *) skb->data; + struct hci_conn *conn; + + BT_DBG("%s status %d", hdev->name, ev->status); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); + if (conn && !ev->status) { + struct inquiry_entry *ie; + + if ((ie = hci_inquiry_cache_lookup(hdev, &conn->dst))) { + ie->data.clock_offset = ev->clock_offset; + ie->timestamp = jiffies; + } + } + + hci_dev_unlock(hdev); +} + +static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_ev_pkt_type_change *ev = (void *) skb->data; + struct hci_conn *conn; + + BT_DBG("%s status %d", hdev->name, ev->status); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); + if (conn && !ev->status) + conn->pkt_type = __le16_to_cpu(ev->pkt_type); + + hci_dev_unlock(hdev); +} + +static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_ev_pscan_rep_mode *ev = (void *) skb->data; + struct inquiry_entry *ie; + + BT_DBG("%s", hdev->name); + + hci_dev_lock(hdev); + + if ((ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr))) { + ie->data.pscan_rep_mode = ev->pscan_rep_mode; + ie->timestamp = jiffies; + } + + hci_dev_unlock(hdev); +} + +static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct inquiry_data data; + int num_rsp = *((__u8 *) skb->data); + + BT_DBG("%s num_rsp %d", hdev->name, num_rsp); + + if (!num_rsp) + return; + + hci_dev_lock(hdev); + + if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) { + struct inquiry_info_with_rssi_and_pscan_mode *info = (void *) (skb->data + 1); + + for (; num_rsp; num_rsp--) { + bacpy(&data.bdaddr, &info->bdaddr); + data.pscan_rep_mode = info->pscan_rep_mode; + data.pscan_period_mode = info->pscan_period_mode; + data.pscan_mode = info->pscan_mode; + memcpy(data.dev_class, info->dev_class, 3); + data.clock_offset = info->clock_offset; + data.rssi = info->rssi; + data.ssp_mode = 0x00; + info++; + hci_inquiry_cache_update(hdev, &data); + } + } else { + struct inquiry_info_with_rssi *info = (void *) (skb->data + 1); + + for (; num_rsp; num_rsp--) { + bacpy(&data.bdaddr, &info->bdaddr); + data.pscan_rep_mode = info->pscan_rep_mode; + data.pscan_period_mode = info->pscan_period_mode; + data.pscan_mode = 0x00; + memcpy(data.dev_class, info->dev_class, 3); + data.clock_offset = info->clock_offset; + data.rssi = info->rssi; + data.ssp_mode = 0x00; + info++; + hci_inquiry_cache_update(hdev, &data); + } + } + + hci_dev_unlock(hdev); +} + +static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_ev_remote_ext_features *ev = (void *) skb->data; + struct hci_conn *conn; + + BT_DBG("%s", hdev->name); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); + if (conn) { + if (!ev->status && ev->page == 0x01) { + struct inquiry_entry *ie; + + if ((ie = hci_inquiry_cache_lookup(hdev, &conn->dst))) + ie->data.ssp_mode = (ev->features[0] & 0x01); + + conn->ssp_mode = (ev->features[0] & 0x01); + } + + if (conn->state == BT_CONFIG) { + if (!ev->status && hdev->ssp_mode > 0 && + conn->ssp_mode > 0 && conn->out && + conn->sec_level != BT_SECURITY_SDP) { + struct hci_cp_auth_requested cp; + cp.handle = ev->handle; + hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, + sizeof(cp), &cp); + } else { + conn->state = BT_CONNECTED; + hci_proto_connect_cfm(conn, ev->status); + hci_conn_put(conn); + } + } + } + + hci_dev_unlock(hdev); +} + +static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_ev_sync_conn_complete *ev = (void *) skb->data; + struct hci_conn *conn; + + BT_DBG("%s status %d", hdev->name, ev->status); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); + if (!conn) { + if (ev->link_type == ESCO_LINK) + goto unlock; + + conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); + if (!conn) + goto unlock; + + conn->type = SCO_LINK; + } + + switch (ev->status) { + case 0x00: + conn->handle = __le16_to_cpu(ev->handle); + conn->state = BT_CONNECTED; + + hci_conn_hold_device(conn); + hci_conn_add_sysfs(conn); + break; + + case 0x11: /* Unsupported Feature or Parameter Value */ + case 0x1c: /* SCO interval rejected */ + case 0x1a: /* Unsupported Remote Feature */ + case 0x1f: /* Unspecified error */ + if (conn->out && conn->attempt < 2) { + conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | + (hdev->esco_type & EDR_ESCO_MASK); + hci_setup_sync(conn, conn->link->handle); + goto unlock; + } + /* fall through */ + + default: + conn->state = BT_CLOSED; + break; + } + + hci_proto_connect_cfm(conn, ev->status); + if (ev->status) + hci_conn_del(conn); + +unlock: + hci_dev_unlock(hdev); +} + +static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + BT_DBG("%s", hdev->name); +} + +static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_ev_sniff_subrate *ev = (void *) skb->data; + struct hci_conn *conn; + + BT_DBG("%s status %d", hdev->name, ev->status); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); + if (conn) { + } + + hci_dev_unlock(hdev); +} + +static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct inquiry_data data; + struct extended_inquiry_info *info = (void *) (skb->data + 1); + int num_rsp = *((__u8 *) skb->data); + + BT_DBG("%s num_rsp %d", hdev->name, num_rsp); + + if (!num_rsp) + return; + + hci_dev_lock(hdev); + + for (; num_rsp; num_rsp--) { + bacpy(&data.bdaddr, &info->bdaddr); + data.pscan_rep_mode = info->pscan_rep_mode; + data.pscan_period_mode = info->pscan_period_mode; + data.pscan_mode = 0x00; + memcpy(data.dev_class, info->dev_class, 3); + data.clock_offset = info->clock_offset; + data.rssi = info->rssi; + data.ssp_mode = 0x01; + info++; + hci_inquiry_cache_update(hdev, &data); + } + + hci_dev_unlock(hdev); +} + +static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_ev_io_capa_request *ev = (void *) skb->data; + struct hci_conn *conn; + + BT_DBG("%s", hdev->name); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); + if (conn) + hci_conn_hold(conn); + + hci_dev_unlock(hdev); +} + +static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_ev_simple_pair_complete *ev = (void *) skb->data; + struct hci_conn *conn; + + BT_DBG("%s", hdev->name); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); + if (conn) + hci_conn_put(conn); + + hci_dev_unlock(hdev); +} + +static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_ev_remote_host_features *ev = (void *) skb->data; + struct inquiry_entry *ie; + + BT_DBG("%s", hdev->name); + + hci_dev_lock(hdev); + + if ((ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr))) + ie->data.ssp_mode = (ev->features[0] & 0x01); + + hci_dev_unlock(hdev); +} + +void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_event_hdr *hdr = (void *) skb->data; + __u8 event = hdr->evt; + + skb_pull(skb, HCI_EVENT_HDR_SIZE); + + switch (event) { + case HCI_EV_INQUIRY_COMPLETE: + hci_inquiry_complete_evt(hdev, skb); + break; + + case HCI_EV_INQUIRY_RESULT: + hci_inquiry_result_evt(hdev, skb); + break; + + case HCI_EV_CONN_COMPLETE: + hci_conn_complete_evt(hdev, skb); + break; + + case HCI_EV_CONN_REQUEST: + hci_conn_request_evt(hdev, skb); + break; + + case HCI_EV_DISCONN_COMPLETE: + hci_disconn_complete_evt(hdev, skb); + break; + + case HCI_EV_AUTH_COMPLETE: + hci_auth_complete_evt(hdev, skb); + break; + + case HCI_EV_REMOTE_NAME: + hci_remote_name_evt(hdev, skb); + break; + + case HCI_EV_ENCRYPT_CHANGE: + hci_encrypt_change_evt(hdev, skb); + break; + + case HCI_EV_CHANGE_LINK_KEY_COMPLETE: + hci_change_link_key_complete_evt(hdev, skb); + break; + + case HCI_EV_REMOTE_FEATURES: + hci_remote_features_evt(hdev, skb); + break; + + case HCI_EV_REMOTE_VERSION: + hci_remote_version_evt(hdev, skb); + break; + + case HCI_EV_QOS_SETUP_COMPLETE: + hci_qos_setup_complete_evt(hdev, skb); + break; + + case HCI_EV_CMD_COMPLETE: + hci_cmd_complete_evt(hdev, skb); + break; + + case HCI_EV_CMD_STATUS: + hci_cmd_status_evt(hdev, skb); + break; + + case HCI_EV_ROLE_CHANGE: + hci_role_change_evt(hdev, skb); + break; + + case HCI_EV_NUM_COMP_PKTS: + hci_num_comp_pkts_evt(hdev, skb); + break; + + case HCI_EV_MODE_CHANGE: + hci_mode_change_evt(hdev, skb); + break; + + case HCI_EV_PIN_CODE_REQ: + hci_pin_code_request_evt(hdev, skb); + break; + + case HCI_EV_LINK_KEY_REQ: + hci_link_key_request_evt(hdev, skb); + break; + + case HCI_EV_LINK_KEY_NOTIFY: + hci_link_key_notify_evt(hdev, skb); + break; + + case HCI_EV_CLOCK_OFFSET: + hci_clock_offset_evt(hdev, skb); + break; + + case HCI_EV_PKT_TYPE_CHANGE: + hci_pkt_type_change_evt(hdev, skb); + break; + + case HCI_EV_PSCAN_REP_MODE: + hci_pscan_rep_mode_evt(hdev, skb); + break; + + case HCI_EV_INQUIRY_RESULT_WITH_RSSI: + hci_inquiry_result_with_rssi_evt(hdev, skb); + break; + + case HCI_EV_REMOTE_EXT_FEATURES: + hci_remote_ext_features_evt(hdev, skb); + break; + + case HCI_EV_SYNC_CONN_COMPLETE: + hci_sync_conn_complete_evt(hdev, skb); + break; + + case HCI_EV_SYNC_CONN_CHANGED: + hci_sync_conn_changed_evt(hdev, skb); + break; + + case HCI_EV_SNIFF_SUBRATE: + hci_sniff_subrate_evt(hdev, skb); + break; + + case HCI_EV_EXTENDED_INQUIRY_RESULT: + hci_extended_inquiry_result_evt(hdev, skb); + break; + + case HCI_EV_IO_CAPA_REQUEST: + hci_io_capa_request_evt(hdev, skb); + break; + + case HCI_EV_SIMPLE_PAIR_COMPLETE: + hci_simple_pair_complete_evt(hdev, skb); + break; + + case HCI_EV_REMOTE_HOST_FEATURES: + hci_remote_host_features_evt(hdev, skb); + break; + + default: + BT_DBG("%s event 0x%x", hdev->name, event); + break; + } + + kfree_skb(skb); + hdev->stat.evt_rx++; +} + +/* Generate internal stack event */ +void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data) +{ + struct hci_event_hdr *hdr; + struct hci_ev_stack_internal *ev; + struct sk_buff *skb; + + skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC); + if (!skb) + return; + + hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE); + hdr->evt = HCI_EV_STACK_INTERNAL; + hdr->plen = sizeof(*ev) + dlen; + + ev = (void *) skb_put(skb, sizeof(*ev) + dlen); + ev->type = type; + memcpy(ev->data, data, dlen); + + bt_cb(skb)->incoming = 1; + __net_timestamp(skb); + + bt_cb(skb)->pkt_type = HCI_EVENT_PKT; + skb->dev = (void *) hdev; + hci_send_to_sock(hdev, skb); + kfree_skb(skb); +} diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c new file mode 100644 index 0000000..65388bd --- /dev/null +++ b/net/bluetooth/hci_sock.c @@ -0,0 +1,748 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + Copyright (C) 2000-2001 Qualcomm Incorporated + + Written 2000,2001 by Maxim Krasnyansky + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +/* Bluetooth HCI sockets. */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +/* ----- HCI socket interface ----- */ + +static inline int hci_test_bit(int nr, void *addr) +{ + return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31)); +} + +/* Security filter */ +static struct hci_sec_filter hci_sec_filter = { + /* Packet types */ + 0x10, + /* Events */ + { 0x1000d9fe, 0x0000b00c }, + /* Commands */ + { + { 0x0 }, + /* OGF_LINK_CTL */ + { 0xbe000006, 0x00000001, 0x00000000, 0x00 }, + /* OGF_LINK_POLICY */ + { 0x00005200, 0x00000000, 0x00000000, 0x00 }, + /* OGF_HOST_CTL */ + { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 }, + /* OGF_INFO_PARAM */ + { 0x000002be, 0x00000000, 0x00000000, 0x00 }, + /* OGF_STATUS_PARAM */ + { 0x000000ea, 0x00000000, 0x00000000, 0x00 } + } +}; + +static struct bt_sock_list hci_sk_list = { + .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock) +}; + +/* Send frame to RAW socket */ +void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct sock *sk; + struct hlist_node *node; + + BT_DBG("hdev %p len %d", hdev, skb->len); + + read_lock(&hci_sk_list.lock); + sk_for_each(sk, node, &hci_sk_list.head) { + struct hci_filter *flt; + struct sk_buff *nskb; + + if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev) + continue; + + /* Don't send frame to the socket it came from */ + if (skb->sk == sk) + continue; + + /* Apply filter */ + flt = &hci_pi(sk)->filter; + + if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ? + 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS), &flt->type_mask)) + continue; + + if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) { + register int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS); + + if (!hci_test_bit(evt, &flt->event_mask)) + continue; + + if (flt->opcode && + ((evt == HCI_EV_CMD_COMPLETE && + flt->opcode != + get_unaligned((__le16 *)(skb->data + 3))) || + (evt == HCI_EV_CMD_STATUS && + flt->opcode != + get_unaligned((__le16 *)(skb->data + 4))))) + continue; + } + + if (!(nskb = skb_clone(skb, GFP_ATOMIC))) + continue; + + /* Put type byte before the data */ + memcpy(skb_push(nskb, 1), &bt_cb(nskb)->pkt_type, 1); + + if (sock_queue_rcv_skb(sk, nskb)) + kfree_skb(nskb); + } + read_unlock(&hci_sk_list.lock); +} + +static int hci_sock_release(struct socket *sock) +{ + struct sock *sk = sock->sk; + struct hci_dev *hdev; + + BT_DBG("sock %p sk %p", sock, sk); + + if (!sk) + return 0; + + hdev = hci_pi(sk)->hdev; + + bt_sock_unlink(&hci_sk_list, sk); + + if (hdev) { + atomic_dec(&hdev->promisc); + hci_dev_put(hdev); + } + + sock_orphan(sk); + + skb_queue_purge(&sk->sk_receive_queue); + skb_queue_purge(&sk->sk_write_queue); + + sock_put(sk); + return 0; +} + +/* Ioctls that require bound socket */ +static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg) +{ + struct hci_dev *hdev = hci_pi(sk)->hdev; + + if (!hdev) + return -EBADFD; + + switch (cmd) { + case HCISETRAW: + if (!capable(CAP_NET_ADMIN)) + return -EACCES; + + if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) + return -EPERM; + + if (arg) + set_bit(HCI_RAW, &hdev->flags); + else + clear_bit(HCI_RAW, &hdev->flags); + + return 0; + + case HCIGETCONNINFO: + return hci_get_conn_info(hdev, (void __user *) arg); + + case HCIGETAUTHINFO: + return hci_get_auth_info(hdev, (void __user *) arg); + + default: + if (hdev->ioctl) + return hdev->ioctl(hdev, cmd, arg); + return -EINVAL; + } +} + +static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) +{ + struct sock *sk = sock->sk; + void __user *argp = (void __user *) arg; + int err; + + BT_DBG("cmd %x arg %lx", cmd, arg); + + switch (cmd) { + case HCIGETDEVLIST: + return hci_get_dev_list(argp); + + case HCIGETDEVINFO: + return hci_get_dev_info(argp); + + case HCIGETCONNLIST: + return hci_get_conn_list(argp); + + case HCIDEVUP: + if (!capable(CAP_NET_ADMIN)) + return -EACCES; + return hci_dev_open(arg); + + case HCIDEVDOWN: + if (!capable(CAP_NET_ADMIN)) + return -EACCES; + return hci_dev_close(arg); + + case HCIDEVRESET: + if (!capable(CAP_NET_ADMIN)) + return -EACCES; + return hci_dev_reset(arg); + + case HCIDEVRESTAT: + if (!capable(CAP_NET_ADMIN)) + return -EACCES; + return hci_dev_reset_stat(arg); + + case HCISETSCAN: + case HCISETAUTH: + case HCISETENCRYPT: + case HCISETPTYPE: + case HCISETLINKPOL: + case HCISETLINKMODE: + case HCISETACLMTU: + case HCISETSCOMTU: + if (!capable(CAP_NET_ADMIN)) + return -EACCES; + return hci_dev_cmd(cmd, argp); + + case HCIINQUIRY: + return hci_inquiry(argp); + + default: + lock_sock(sk); + err = hci_sock_bound_ioctl(sk, cmd, arg); + release_sock(sk); + return err; + } +} + +static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) +{ + struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr; + struct sock *sk = sock->sk; + struct hci_dev *hdev = NULL; + int err = 0; + + BT_DBG("sock %p sk %p", sock, sk); + + if (!haddr || haddr->hci_family != AF_BLUETOOTH) + return -EINVAL; + + lock_sock(sk); + + if (hci_pi(sk)->hdev) { + err = -EALREADY; + goto done; + } + + if (haddr->hci_dev != HCI_DEV_NONE) { + if (!(hdev = hci_dev_get(haddr->hci_dev))) { + err = -ENODEV; + goto done; + } + + atomic_inc(&hdev->promisc); + } + + hci_pi(sk)->hdev = hdev; + sk->sk_state = BT_BOUND; + +done: + release_sock(sk); + return err; +} + +static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer) +{ + struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr; + struct sock *sk = sock->sk; + struct hci_dev *hdev = hci_pi(sk)->hdev; + + BT_DBG("sock %p sk %p", sock, sk); + + if (!hdev) + return -EBADFD; + + lock_sock(sk); + + *addr_len = sizeof(*haddr); + haddr->hci_family = AF_BLUETOOTH; + haddr->hci_dev = hdev->id; + + release_sock(sk); + return 0; +} + +static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) +{ + __u32 mask = hci_pi(sk)->cmsg_mask; + + if (mask & HCI_CMSG_DIR) { + int incoming = bt_cb(skb)->incoming; + put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), &incoming); + } + + if (mask & HCI_CMSG_TSTAMP) { +#ifdef CONFIG_COMPAT + struct compat_timeval ctv; +#endif + struct timeval tv; + void *data; + int len; + + skb_get_timestamp(skb, &tv); + + data = &tv; + len = sizeof(tv); +#ifdef CONFIG_COMPAT + if (msg->msg_flags & MSG_CMSG_COMPAT) { + ctv.tv_sec = tv.tv_sec; + ctv.tv_usec = tv.tv_usec; + data = &ctv; + len = sizeof(ctv); + } +#endif + + put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data); + } +} + +static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock, + struct msghdr *msg, size_t len, int flags) +{ + int noblock = flags & MSG_DONTWAIT; + struct sock *sk = sock->sk; + struct sk_buff *skb; + int copied, err; + + BT_DBG("sock %p, sk %p", sock, sk); + + if (flags & (MSG_OOB)) + return -EOPNOTSUPP; + + if (sk->sk_state == BT_CLOSED) + return 0; + + if (!(skb = skb_recv_datagram(sk, flags, noblock, &err))) + return err; + + msg->msg_namelen = 0; + + copied = skb->len; + if (len < copied) { + msg->msg_flags |= MSG_TRUNC; + copied = len; + } + + skb_reset_transport_header(skb); + err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + + hci_sock_cmsg(sk, msg, skb); + + skb_free_datagram(sk, skb); + + return err ? : copied; +} + +static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock, + struct msghdr *msg, size_t len) +{ + struct sock *sk = sock->sk; + struct hci_dev *hdev; + struct sk_buff *skb; + int err; + + BT_DBG("sock %p sk %p", sock, sk); + + if (msg->msg_flags & MSG_OOB) + return -EOPNOTSUPP; + + if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE)) + return -EINVAL; + + if (len < 4 || len > HCI_MAX_FRAME_SIZE) + return -EINVAL; + + lock_sock(sk); + + if (!(hdev = hci_pi(sk)->hdev)) { + err = -EBADFD; + goto done; + } + + if (!test_bit(HCI_UP, &hdev->flags)) { + err = -ENETDOWN; + goto done; + } + + if (!(skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err))) + goto done; + + if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { + err = -EFAULT; + goto drop; + } + + bt_cb(skb)->pkt_type = *((unsigned char *) skb->data); + skb_pull(skb, 1); + skb->dev = (void *) hdev; + + if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) { + u16 opcode = get_unaligned_le16(skb->data); + u16 ogf = hci_opcode_ogf(opcode); + u16 ocf = hci_opcode_ocf(opcode); + + if (((ogf > HCI_SFLT_MAX_OGF) || + !hci_test_bit(ocf & HCI_FLT_OCF_BITS, &hci_sec_filter.ocf_mask[ogf])) && + !capable(CAP_NET_RAW)) { + err = -EPERM; + goto drop; + } + + if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) { + skb_queue_tail(&hdev->raw_q, skb); + tasklet_schedule(&hdev->tx_task); + } else { + skb_queue_tail(&hdev->cmd_q, skb); + tasklet_schedule(&hdev->cmd_task); + } + } else { + if (!capable(CAP_NET_RAW)) { + err = -EPERM; + goto drop; + } + + skb_queue_tail(&hdev->raw_q, skb); + tasklet_schedule(&hdev->tx_task); + } + + err = len; + +done: + release_sock(sk); + return err; + +drop: + kfree_skb(skb); + goto done; +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,31)) +static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int len) +#else +static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int len) +#endif +{ + struct hci_ufilter uf = { .opcode = 0 }; + struct sock *sk = sock->sk; + int err = 0, opt = 0; + + BT_DBG("sk %p, opt %d", sk, optname); + + lock_sock(sk); + + switch (optname) { + case HCI_DATA_DIR: + if (get_user(opt, (int __user *)optval)) { + err = -EFAULT; + break; + } + + if (opt) + hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR; + else + hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR; + break; + + case HCI_TIME_STAMP: + if (get_user(opt, (int __user *)optval)) { + err = -EFAULT; + break; + } + + if (opt) + hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP; + else + hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP; + break; + + case HCI_FILTER: + { + struct hci_filter *f = &hci_pi(sk)->filter; + + uf.type_mask = f->type_mask; + uf.opcode = f->opcode; + uf.event_mask[0] = *((u32 *) f->event_mask + 0); + uf.event_mask[1] = *((u32 *) f->event_mask + 1); + } + + len = min_t(unsigned int, len, sizeof(uf)); + if (copy_from_user(&uf, optval, len)) { + err = -EFAULT; + break; + } + + if (!capable(CAP_NET_RAW)) { + uf.type_mask &= hci_sec_filter.type_mask; + uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0); + uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1); + } + + { + struct hci_filter *f = &hci_pi(sk)->filter; + + f->type_mask = uf.type_mask; + f->opcode = uf.opcode; + *((u32 *) f->event_mask + 0) = uf.event_mask[0]; + *((u32 *) f->event_mask + 1) = uf.event_mask[1]; + } + break; + + default: + err = -ENOPROTOOPT; + break; + } + + release_sock(sk); + return err; +} + +static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) +{ + struct hci_ufilter uf; + struct sock *sk = sock->sk; + int len, opt; + + if (get_user(len, optlen)) + return -EFAULT; + + switch (optname) { + case HCI_DATA_DIR: + if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR) + opt = 1; + else + opt = 0; + + if (put_user(opt, optval)) + return -EFAULT; + break; + + case HCI_TIME_STAMP: + if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP) + opt = 1; + else + opt = 0; + + if (put_user(opt, optval)) + return -EFAULT; + break; + + case HCI_FILTER: + { + struct hci_filter *f = &hci_pi(sk)->filter; + + uf.type_mask = f->type_mask; + uf.opcode = f->opcode; + uf.event_mask[0] = *((u32 *) f->event_mask + 0); + uf.event_mask[1] = *((u32 *) f->event_mask + 1); + } + + len = min_t(unsigned int, len, sizeof(uf)); + if (copy_to_user(optval, &uf, len)) + return -EFAULT; + break; + + default: + return -ENOPROTOOPT; + break; + } + + return 0; +} + +static const struct proto_ops hci_sock_ops = { + .family = PF_BLUETOOTH, + .owner = THIS_MODULE, + .release = hci_sock_release, + .bind = hci_sock_bind, + .getname = hci_sock_getname, + .sendmsg = hci_sock_sendmsg, + .recvmsg = hci_sock_recvmsg, + .ioctl = hci_sock_ioctl, + .poll = datagram_poll, + .listen = sock_no_listen, + .shutdown = sock_no_shutdown, + .setsockopt = hci_sock_setsockopt, + .getsockopt = hci_sock_getsockopt, + .connect = sock_no_connect, + .socketpair = sock_no_socketpair, + .accept = sock_no_accept, + .mmap = sock_no_mmap +}; + +static struct proto hci_sk_proto = { + .name = "HCI", + .owner = THIS_MODULE, + .obj_size = sizeof(struct hci_pinfo) +}; + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32)) +static int hci_sock_create(struct net *net, struct socket *sock, int protocol, + int kern) +#else +static int hci_sock_create(struct net *net, struct socket *sock, int protocol) +#endif +{ + struct sock *sk; + + BT_DBG("sock %p", sock); + + if (sock->type != SOCK_RAW) + return -ESOCKTNOSUPPORT; + + sock->ops = &hci_sock_ops; + + sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto); + if (!sk) + return -ENOMEM; + + sock_init_data(sock, sk); + + sock_reset_flag(sk, SOCK_ZAPPED); + + sk->sk_protocol = protocol; + + sock->state = SS_UNCONNECTED; + sk->sk_state = BT_OPEN; + + bt_sock_link(&hci_sk_list, sk); + return 0; +} + +static int hci_sock_dev_event(struct notifier_block *this, unsigned long event, void *ptr) +{ + struct hci_dev *hdev = (struct hci_dev *) ptr; + struct hci_ev_si_device ev; + + BT_DBG("hdev %s event %ld", hdev->name, event); + + /* Send event to sockets */ + ev.event = event; + ev.dev_id = hdev->id; + hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev); + + if (event == HCI_DEV_UNREG) { + struct sock *sk; + struct hlist_node *node; + + /* Detach sockets from device */ + read_lock(&hci_sk_list.lock); + sk_for_each(sk, node, &hci_sk_list.head) { + local_bh_disable(); + bh_lock_sock_nested(sk); + if (hci_pi(sk)->hdev == hdev) { + hci_pi(sk)->hdev = NULL; + sk->sk_err = EPIPE; + sk->sk_state = BT_OPEN; + sk->sk_state_change(sk); + + hci_dev_put(hdev); + } + bh_unlock_sock(sk); + local_bh_enable(); + } + read_unlock(&hci_sk_list.lock); + } + + return NOTIFY_DONE; +} + +static const struct net_proto_family hci_sock_family_ops = { + .family = PF_BLUETOOTH, + .owner = THIS_MODULE, + .create = hci_sock_create, +}; + +static struct notifier_block hci_sock_nblock = { + .notifier_call = hci_sock_dev_event +}; + +int __init hci_sock_init(void) +{ + int err; + + err = proto_register(&hci_sk_proto, 0); + if (err < 0) + return err; + + err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops); + if (err < 0) + goto error; + + hci_register_notifier(&hci_sock_nblock); + + BT_INFO("HCI socket layer initialized"); + + return 0; + +error: + BT_ERR("HCI socket registration failed"); + proto_unregister(&hci_sk_proto); + return err; +} + +void __exit hci_sock_cleanup(void) +{ + if (bt_sock_unregister(BTPROTO_HCI) < 0) + BT_ERR("HCI socket unregistration failed"); + + hci_unregister_notifier(&hci_sock_nblock); + + proto_unregister(&hci_sk_proto); +} diff --git a/net/bluetooth/hci_sock.c.orig b/net/bluetooth/hci_sock.c.orig new file mode 100644 index 0000000..38f08f6 --- /dev/null +++ b/net/bluetooth/hci_sock.c.orig @@ -0,0 +1,740 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + Copyright (C) 2000-2001 Qualcomm Incorporated + + Written 2000,2001 by Maxim Krasnyansky + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +/* Bluetooth HCI sockets. */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +/* ----- HCI socket interface ----- */ + +static inline int hci_test_bit(int nr, void *addr) +{ + return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31)); +} + +/* Security filter */ +static struct hci_sec_filter hci_sec_filter = { + /* Packet types */ + 0x10, + /* Events */ + { 0x1000d9fe, 0x0000b00c }, + /* Commands */ + { + { 0x0 }, + /* OGF_LINK_CTL */ + { 0xbe000006, 0x00000001, 0x00000000, 0x00 }, + /* OGF_LINK_POLICY */ + { 0x00005200, 0x00000000, 0x00000000, 0x00 }, + /* OGF_HOST_CTL */ + { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 }, + /* OGF_INFO_PARAM */ + { 0x000002be, 0x00000000, 0x00000000, 0x00 }, + /* OGF_STATUS_PARAM */ + { 0x000000ea, 0x00000000, 0x00000000, 0x00 } + } +}; + +static struct bt_sock_list hci_sk_list = { + .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock) +}; + +/* Send frame to RAW socket */ +void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct sock *sk; + struct hlist_node *node; + + BT_DBG("hdev %p len %d", hdev, skb->len); + + read_lock(&hci_sk_list.lock); + sk_for_each(sk, node, &hci_sk_list.head) { + struct hci_filter *flt; + struct sk_buff *nskb; + + if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev) + continue; + + /* Don't send frame to the socket it came from */ + if (skb->sk == sk) + continue; + + /* Apply filter */ + flt = &hci_pi(sk)->filter; + + if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ? + 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS), &flt->type_mask)) + continue; + + if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) { + register int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS); + + if (!hci_test_bit(evt, &flt->event_mask)) + continue; + + if (flt->opcode && + ((evt == HCI_EV_CMD_COMPLETE && + flt->opcode != + get_unaligned((__le16 *)(skb->data + 3))) || + (evt == HCI_EV_CMD_STATUS && + flt->opcode != + get_unaligned((__le16 *)(skb->data + 4))))) + continue; + } + + if (!(nskb = skb_clone(skb, GFP_ATOMIC))) + continue; + + /* Put type byte before the data */ + memcpy(skb_push(nskb, 1), &bt_cb(nskb)->pkt_type, 1); + + if (sock_queue_rcv_skb(sk, nskb)) + kfree_skb(nskb); + } + read_unlock(&hci_sk_list.lock); +} + +static int hci_sock_release(struct socket *sock) +{ + struct sock *sk = sock->sk; + struct hci_dev *hdev; + + BT_DBG("sock %p sk %p", sock, sk); + + if (!sk) + return 0; + + hdev = hci_pi(sk)->hdev; + + bt_sock_unlink(&hci_sk_list, sk); + + if (hdev) { + atomic_dec(&hdev->promisc); + hci_dev_put(hdev); + } + + sock_orphan(sk); + + skb_queue_purge(&sk->sk_receive_queue); + skb_queue_purge(&sk->sk_write_queue); + + sock_put(sk); + return 0; +} + +/* Ioctls that require bound socket */ +static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg) +{ + struct hci_dev *hdev = hci_pi(sk)->hdev; + + if (!hdev) + return -EBADFD; + + switch (cmd) { + case HCISETRAW: + if (!capable(CAP_NET_ADMIN)) + return -EACCES; + + if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) + return -EPERM; + + if (arg) + set_bit(HCI_RAW, &hdev->flags); + else + clear_bit(HCI_RAW, &hdev->flags); + + return 0; + + case HCIGETCONNINFO: + return hci_get_conn_info(hdev, (void __user *) arg); + + case HCIGETAUTHINFO: + return hci_get_auth_info(hdev, (void __user *) arg); + + default: + if (hdev->ioctl) + return hdev->ioctl(hdev, cmd, arg); + return -EINVAL; + } +} + +static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) +{ + struct sock *sk = sock->sk; + void __user *argp = (void __user *) arg; + int err; + + BT_DBG("cmd %x arg %lx", cmd, arg); + + switch (cmd) { + case HCIGETDEVLIST: + return hci_get_dev_list(argp); + + case HCIGETDEVINFO: + return hci_get_dev_info(argp); + + case HCIGETCONNLIST: + return hci_get_conn_list(argp); + + case HCIDEVUP: + if (!capable(CAP_NET_ADMIN)) + return -EACCES; + return hci_dev_open(arg); + + case HCIDEVDOWN: + if (!capable(CAP_NET_ADMIN)) + return -EACCES; + return hci_dev_close(arg); + + case HCIDEVRESET: + if (!capable(CAP_NET_ADMIN)) + return -EACCES; + return hci_dev_reset(arg); + + case HCIDEVRESTAT: + if (!capable(CAP_NET_ADMIN)) + return -EACCES; + return hci_dev_reset_stat(arg); + + case HCISETSCAN: + case HCISETAUTH: + case HCISETENCRYPT: + case HCISETPTYPE: + case HCISETLINKPOL: + case HCISETLINKMODE: + case HCISETACLMTU: + case HCISETSCOMTU: + if (!capable(CAP_NET_ADMIN)) + return -EACCES; + return hci_dev_cmd(cmd, argp); + + case HCIINQUIRY: + return hci_inquiry(argp); + + default: + lock_sock(sk); + err = hci_sock_bound_ioctl(sk, cmd, arg); + release_sock(sk); + return err; + } +} + +static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) +{ + struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr; + struct sock *sk = sock->sk; + struct hci_dev *hdev = NULL; + int err = 0; + + BT_DBG("sock %p sk %p", sock, sk); + + if (!haddr || haddr->hci_family != AF_BLUETOOTH) + return -EINVAL; + + lock_sock(sk); + + if (hci_pi(sk)->hdev) { + err = -EALREADY; + goto done; + } + + if (haddr->hci_dev != HCI_DEV_NONE) { + if (!(hdev = hci_dev_get(haddr->hci_dev))) { + err = -ENODEV; + goto done; + } + + atomic_inc(&hdev->promisc); + } + + hci_pi(sk)->hdev = hdev; + sk->sk_state = BT_BOUND; + +done: + release_sock(sk); + return err; +} + +static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer) +{ + struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr; + struct sock *sk = sock->sk; + struct hci_dev *hdev = hci_pi(sk)->hdev; + + BT_DBG("sock %p sk %p", sock, sk); + + if (!hdev) + return -EBADFD; + + lock_sock(sk); + + *addr_len = sizeof(*haddr); + haddr->hci_family = AF_BLUETOOTH; + haddr->hci_dev = hdev->id; + + release_sock(sk); + return 0; +} + +static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) +{ + __u32 mask = hci_pi(sk)->cmsg_mask; + + if (mask & HCI_CMSG_DIR) { + int incoming = bt_cb(skb)->incoming; + put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), &incoming); + } + + if (mask & HCI_CMSG_TSTAMP) { +#ifdef CONFIG_COMPAT + struct compat_timeval ctv; +#endif + struct timeval tv; + void *data; + int len; + + skb_get_timestamp(skb, &tv); + + data = &tv; + len = sizeof(tv); +#ifdef CONFIG_COMPAT + if (msg->msg_flags & MSG_CMSG_COMPAT) { + ctv.tv_sec = tv.tv_sec; + ctv.tv_usec = tv.tv_usec; + data = &ctv; + len = sizeof(ctv); + } +#endif + + put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data); + } +} + +static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock, + struct msghdr *msg, size_t len, int flags) +{ + int noblock = flags & MSG_DONTWAIT; + struct sock *sk = sock->sk; + struct sk_buff *skb; + int copied, err; + + BT_DBG("sock %p, sk %p", sock, sk); + + if (flags & (MSG_OOB)) + return -EOPNOTSUPP; + + if (sk->sk_state == BT_CLOSED) + return 0; + + if (!(skb = skb_recv_datagram(sk, flags, noblock, &err))) + return err; + + msg->msg_namelen = 0; + + copied = skb->len; + if (len < copied) { + msg->msg_flags |= MSG_TRUNC; + copied = len; + } + + skb_reset_transport_header(skb); + err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + + hci_sock_cmsg(sk, msg, skb); + + skb_free_datagram(sk, skb); + + return err ? : copied; +} + +static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock, + struct msghdr *msg, size_t len) +{ + struct sock *sk = sock->sk; + struct hci_dev *hdev; + struct sk_buff *skb; + int err; + + BT_DBG("sock %p sk %p", sock, sk); + + if (msg->msg_flags & MSG_OOB) + return -EOPNOTSUPP; + + if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE)) + return -EINVAL; + + if (len < 4 || len > HCI_MAX_FRAME_SIZE) + return -EINVAL; + + lock_sock(sk); + + if (!(hdev = hci_pi(sk)->hdev)) { + err = -EBADFD; + goto done; + } + + if (!test_bit(HCI_UP, &hdev->flags)) { + err = -ENETDOWN; + goto done; + } + + if (!(skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err))) + goto done; + + if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { + err = -EFAULT; + goto drop; + } + + bt_cb(skb)->pkt_type = *((unsigned char *) skb->data); + skb_pull(skb, 1); + skb->dev = (void *) hdev; + + if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) { + u16 opcode = get_unaligned_le16(skb->data); + u16 ogf = hci_opcode_ogf(opcode); + u16 ocf = hci_opcode_ocf(opcode); + + if (((ogf > HCI_SFLT_MAX_OGF) || + !hci_test_bit(ocf & HCI_FLT_OCF_BITS, &hci_sec_filter.ocf_mask[ogf])) && + !capable(CAP_NET_RAW)) { + err = -EPERM; + goto drop; + } + + if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) { + skb_queue_tail(&hdev->raw_q, skb); + tasklet_schedule(&hdev->tx_task); + } else { + skb_queue_tail(&hdev->cmd_q, skb); + tasklet_schedule(&hdev->cmd_task); + } + } else { + if (!capable(CAP_NET_RAW)) { + err = -EPERM; + goto drop; + } + + skb_queue_tail(&hdev->raw_q, skb); + tasklet_schedule(&hdev->tx_task); + } + + err = len; + +done: + release_sock(sk); + return err; + +drop: + kfree_skb(skb); + goto done; +} + +static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int len) +{ + struct hci_ufilter uf = { .opcode = 0 }; + struct sock *sk = sock->sk; + int err = 0, opt = 0; + + BT_DBG("sk %p, opt %d", sk, optname); + + lock_sock(sk); + + switch (optname) { + case HCI_DATA_DIR: + if (get_user(opt, (int __user *)optval)) { + err = -EFAULT; + break; + } + + if (opt) + hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR; + else + hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR; + break; + + case HCI_TIME_STAMP: + if (get_user(opt, (int __user *)optval)) { + err = -EFAULT; + break; + } + + if (opt) + hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP; + else + hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP; + break; + + case HCI_FILTER: + { + struct hci_filter *f = &hci_pi(sk)->filter; + + uf.type_mask = f->type_mask; + uf.opcode = f->opcode; + uf.event_mask[0] = *((u32 *) f->event_mask + 0); + uf.event_mask[1] = *((u32 *) f->event_mask + 1); + } + + len = min_t(unsigned int, len, sizeof(uf)); + if (copy_from_user(&uf, optval, len)) { + err = -EFAULT; + break; + } + + if (!capable(CAP_NET_RAW)) { + uf.type_mask &= hci_sec_filter.type_mask; + uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0); + uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1); + } + + { + struct hci_filter *f = &hci_pi(sk)->filter; + + f->type_mask = uf.type_mask; + f->opcode = uf.opcode; + *((u32 *) f->event_mask + 0) = uf.event_mask[0]; + *((u32 *) f->event_mask + 1) = uf.event_mask[1]; + } + break; + + default: + err = -ENOPROTOOPT; + break; + } + + release_sock(sk); + return err; +} + +static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) +{ + struct hci_ufilter uf; + struct sock *sk = sock->sk; + int len, opt; + + if (get_user(len, optlen)) + return -EFAULT; + + switch (optname) { + case HCI_DATA_DIR: + if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR) + opt = 1; + else + opt = 0; + + if (put_user(opt, optval)) + return -EFAULT; + break; + + case HCI_TIME_STAMP: + if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP) + opt = 1; + else + opt = 0; + + if (put_user(opt, optval)) + return -EFAULT; + break; + + case HCI_FILTER: + { + struct hci_filter *f = &hci_pi(sk)->filter; + + uf.type_mask = f->type_mask; + uf.opcode = f->opcode; + uf.event_mask[0] = *((u32 *) f->event_mask + 0); + uf.event_mask[1] = *((u32 *) f->event_mask + 1); + } + + len = min_t(unsigned int, len, sizeof(uf)); + if (copy_to_user(optval, &uf, len)) + return -EFAULT; + break; + + default: + return -ENOPROTOOPT; + break; + } + + return 0; +} + +static const struct proto_ops hci_sock_ops = { + .family = PF_BLUETOOTH, + .owner = THIS_MODULE, + .release = hci_sock_release, + .bind = hci_sock_bind, + .getname = hci_sock_getname, + .sendmsg = hci_sock_sendmsg, + .recvmsg = hci_sock_recvmsg, + .ioctl = hci_sock_ioctl, + .poll = datagram_poll, + .listen = sock_no_listen, + .shutdown = sock_no_shutdown, + .setsockopt = hci_sock_setsockopt, + .getsockopt = hci_sock_getsockopt, + .connect = sock_no_connect, + .socketpair = sock_no_socketpair, + .accept = sock_no_accept, + .mmap = sock_no_mmap +}; + +static struct proto hci_sk_proto = { + .name = "HCI", + .owner = THIS_MODULE, + .obj_size = sizeof(struct hci_pinfo) +}; + +static int hci_sock_create(struct net *net, struct socket *sock, int protocol, + int kern) +{ + struct sock *sk; + + BT_DBG("sock %p", sock); + + if (sock->type != SOCK_RAW) + return -ESOCKTNOSUPPORT; + + sock->ops = &hci_sock_ops; + + sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto); + if (!sk) + return -ENOMEM; + + sock_init_data(sock, sk); + + sock_reset_flag(sk, SOCK_ZAPPED); + + sk->sk_protocol = protocol; + + sock->state = SS_UNCONNECTED; + sk->sk_state = BT_OPEN; + + bt_sock_link(&hci_sk_list, sk); + return 0; +} + +static int hci_sock_dev_event(struct notifier_block *this, unsigned long event, void *ptr) +{ + struct hci_dev *hdev = (struct hci_dev *) ptr; + struct hci_ev_si_device ev; + + BT_DBG("hdev %s event %ld", hdev->name, event); + + /* Send event to sockets */ + ev.event = event; + ev.dev_id = hdev->id; + hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev); + + if (event == HCI_DEV_UNREG) { + struct sock *sk; + struct hlist_node *node; + + /* Detach sockets from device */ + read_lock(&hci_sk_list.lock); + sk_for_each(sk, node, &hci_sk_list.head) { + local_bh_disable(); + bh_lock_sock_nested(sk); + if (hci_pi(sk)->hdev == hdev) { + hci_pi(sk)->hdev = NULL; + sk->sk_err = EPIPE; + sk->sk_state = BT_OPEN; + sk->sk_state_change(sk); + + hci_dev_put(hdev); + } + bh_unlock_sock(sk); + local_bh_enable(); + } + read_unlock(&hci_sk_list.lock); + } + + return NOTIFY_DONE; +} + +static const struct net_proto_family hci_sock_family_ops = { + .family = PF_BLUETOOTH, + .owner = THIS_MODULE, + .create = hci_sock_create, +}; + +static struct notifier_block hci_sock_nblock = { + .notifier_call = hci_sock_dev_event +}; + +int __init hci_sock_init(void) +{ + int err; + + err = proto_register(&hci_sk_proto, 0); + if (err < 0) + return err; + + err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops); + if (err < 0) + goto error; + + hci_register_notifier(&hci_sock_nblock); + + BT_INFO("HCI socket layer initialized"); + + return 0; + +error: + BT_ERR("HCI socket registration failed"); + proto_unregister(&hci_sk_proto); + return err; +} + +void __exit hci_sock_cleanup(void) +{ + if (bt_sock_unregister(BTPROTO_HCI) < 0) + BT_ERR("HCI socket unregistration failed"); + + hci_unregister_notifier(&hci_sock_nblock); + + proto_unregister(&hci_sk_proto); +} diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c new file mode 100644 index 0000000..65c1898 --- /dev/null +++ b/net/bluetooth/hci_sysfs.c @@ -0,0 +1,524 @@ +/* Bluetooth HCI driver model support. */ + +#include +#include +#include +#include + +#include +#include + +struct class *bt_class = NULL; +EXPORT_SYMBOL_GPL(bt_class); + +struct dentry *bt_debugfs = NULL; +EXPORT_SYMBOL_GPL(bt_debugfs); + +static struct workqueue_struct *bt_workq; + +static inline char *link_typetostr(int type) +{ + switch (type) { + case ACL_LINK: + return "ACL"; + case SCO_LINK: + return "SCO"; + case ESCO_LINK: + return "eSCO"; + default: + return "UNKNOWN"; + } +} + +static ssize_t show_link_type(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hci_conn *conn = dev_get_drvdata(dev); + return sprintf(buf, "%s\n", link_typetostr(conn->type)); +} + +static ssize_t show_link_address(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hci_conn *conn = dev_get_drvdata(dev); + bdaddr_t bdaddr; + baswap(&bdaddr, &conn->dst); + return sprintf(buf, "%s\n", batostr(&bdaddr)); +} + +static ssize_t show_link_features(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hci_conn *conn = dev_get_drvdata(dev); + + return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", + conn->features[0], conn->features[1], + conn->features[2], conn->features[3], + conn->features[4], conn->features[5], + conn->features[6], conn->features[7]); +} + +#define LINK_ATTR(_name,_mode,_show,_store) \ +struct device_attribute link_attr_##_name = __ATTR(_name,_mode,_show,_store) + +static LINK_ATTR(type, S_IRUGO, show_link_type, NULL); +static LINK_ATTR(address, S_IRUGO, show_link_address, NULL); +static LINK_ATTR(features, S_IRUGO, show_link_features, NULL); + +static struct attribute *bt_link_attrs[] = { + &link_attr_type.attr, + &link_attr_address.attr, + &link_attr_features.attr, + NULL +}; + +static struct attribute_group bt_link_group = { + .attrs = bt_link_attrs, +}; + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,31)) +static const struct attribute_group *bt_link_groups[] = { +#else +static struct attribute_group *bt_link_groups[] = { +#endif + &bt_link_group, + NULL +}; + +static void bt_link_release(struct device *dev) +{ + void *data = dev_get_drvdata(dev); + kfree(data); +} + +static struct device_type bt_link = { + .name = "link", + .groups = bt_link_groups, + .release = bt_link_release, +}; + +static void add_conn(struct work_struct *work) +{ + struct hci_conn *conn = container_of(work, struct hci_conn, work_add); + struct hci_dev *hdev = conn->hdev; + + dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); + + dev_set_drvdata(&conn->dev, conn); + + if (device_add(&conn->dev) < 0) { + BT_ERR("Failed to register connection device"); + return; + } + + hci_dev_hold(hdev); +} + +/* + * The rfcomm tty device will possibly retain even when conn + * is down, and sysfs doesn't support move zombie device, + * so we should move the device before conn device is destroyed. + */ +static int __match_tty(struct device *dev, void *data) +{ + return !strncmp(dev_name(dev), "rfcomm", 6); +} + +static void del_conn(struct work_struct *work) +{ + struct hci_conn *conn = container_of(work, struct hci_conn, work_del); + struct hci_dev *hdev = conn->hdev; + + if (!device_is_registered(&conn->dev)) + return; + + while (1) { + struct device *dev; + + dev = device_find_child(&conn->dev, NULL, __match_tty); + if (!dev) + break; +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,29)) + device_move(dev, NULL, DPM_ORDER_DEV_LAST); +#else + device_move(dev, NULL); +#endif + put_device(dev); + } + + device_del(&conn->dev); + put_device(&conn->dev); + + hci_dev_put(hdev); +} + +void hci_conn_init_sysfs(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + + BT_DBG("conn %p", conn); + + conn->dev.type = &bt_link; + conn->dev.class = bt_class; + conn->dev.parent = &hdev->dev; + + device_initialize(&conn->dev); + + INIT_WORK(&conn->work_add, add_conn); + INIT_WORK(&conn->work_del, del_conn); +} + +void hci_conn_add_sysfs(struct hci_conn *conn) +{ + BT_DBG("conn %p", conn); + + queue_work(bt_workq, &conn->work_add); +} + +void hci_conn_del_sysfs(struct hci_conn *conn) +{ + BT_DBG("conn %p", conn); + + queue_work(bt_workq, &conn->work_del); +} + +static inline char *host_bustostr(int bus) +{ + switch (bus) { + case HCI_VIRTUAL: + return "VIRTUAL"; + case HCI_USB: + return "USB"; + case HCI_PCCARD: + return "PCCARD"; + case HCI_UART: + return "UART"; + case HCI_RS232: + return "RS232"; + case HCI_PCI: + return "PCI"; + case HCI_SDIO: + return "SDIO"; + default: + return "UNKNOWN"; + } +} + +static inline char *host_typetostr(int type) +{ + switch (type) { + case HCI_BREDR: + return "BR/EDR"; + case HCI_80211: + return "802.11"; + default: + return "UNKNOWN"; + } +} + +static ssize_t show_bus(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hci_dev *hdev = dev_get_drvdata(dev); + return sprintf(buf, "%s\n", host_bustostr(hdev->bus)); +} + +static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hci_dev *hdev = dev_get_drvdata(dev); + return sprintf(buf, "%s\n", host_typetostr(hdev->dev_type)); +} + +static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hci_dev *hdev = dev_get_drvdata(dev); + char name[249]; + int i; + + for (i = 0; i < 248; i++) + name[i] = hdev->dev_name[i]; + + name[248] = '\0'; + return sprintf(buf, "%s\n", name); +} + +static ssize_t show_class(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hci_dev *hdev = dev_get_drvdata(dev); + return sprintf(buf, "0x%.2x%.2x%.2x\n", + hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); +} + +static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hci_dev *hdev = dev_get_drvdata(dev); + bdaddr_t bdaddr; + baswap(&bdaddr, &hdev->bdaddr); + return sprintf(buf, "%s\n", batostr(&bdaddr)); +} + +static ssize_t show_features(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hci_dev *hdev = dev_get_drvdata(dev); + + return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", + hdev->features[0], hdev->features[1], + hdev->features[2], hdev->features[3], + hdev->features[4], hdev->features[5], + hdev->features[6], hdev->features[7]); +} + +static ssize_t show_manufacturer(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hci_dev *hdev = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", hdev->manufacturer); +} + +static ssize_t show_hci_version(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hci_dev *hdev = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", hdev->hci_ver); +} + +static ssize_t show_hci_revision(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hci_dev *hdev = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", hdev->hci_rev); +} + +static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hci_dev *hdev = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", hdev->idle_timeout); +} + +static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +{ + struct hci_dev *hdev = dev_get_drvdata(dev); + char *ptr; + __u32 val; + + val = simple_strtoul(buf, &ptr, 10); + if (ptr == buf) + return -EINVAL; + + if (val != 0 && (val < 500 || val > 3600000)) + return -EINVAL; + + hdev->idle_timeout = val; + + return count; +} + +static ssize_t show_sniff_max_interval(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hci_dev *hdev = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", hdev->sniff_max_interval); +} + +static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +{ + struct hci_dev *hdev = dev_get_drvdata(dev); + char *ptr; + __u16 val; + + val = simple_strtoul(buf, &ptr, 10); + if (ptr == buf) + return -EINVAL; + + if (val < 0x0002 || val > 0xFFFE || val % 2) + return -EINVAL; + + if (val < hdev->sniff_min_interval) + return -EINVAL; + + hdev->sniff_max_interval = val; + + return count; +} + +static ssize_t show_sniff_min_interval(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hci_dev *hdev = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", hdev->sniff_min_interval); +} + +static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +{ + struct hci_dev *hdev = dev_get_drvdata(dev); + char *ptr; + __u16 val; + + val = simple_strtoul(buf, &ptr, 10); + if (ptr == buf) + return -EINVAL; + + if (val < 0x0002 || val > 0xFFFE || val % 2) + return -EINVAL; + + if (val > hdev->sniff_max_interval) + return -EINVAL; + + hdev->sniff_min_interval = val; + + return count; +} + +static DEVICE_ATTR(bus, S_IRUGO, show_bus, NULL); +static DEVICE_ATTR(type, S_IRUGO, show_type, NULL); +static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); +static DEVICE_ATTR(class, S_IRUGO, show_class, NULL); +static DEVICE_ATTR(address, S_IRUGO, show_address, NULL); +static DEVICE_ATTR(features, S_IRUGO, show_features, NULL); +static DEVICE_ATTR(manufacturer, S_IRUGO, show_manufacturer, NULL); +static DEVICE_ATTR(hci_version, S_IRUGO, show_hci_version, NULL); +static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL); + +static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR, + show_idle_timeout, store_idle_timeout); +static DEVICE_ATTR(sniff_max_interval, S_IRUGO | S_IWUSR, + show_sniff_max_interval, store_sniff_max_interval); +static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR, + show_sniff_min_interval, store_sniff_min_interval); + +static struct attribute *bt_host_attrs[] = { + &dev_attr_bus.attr, + &dev_attr_type.attr, + &dev_attr_name.attr, + &dev_attr_class.attr, + &dev_attr_address.attr, + &dev_attr_features.attr, + &dev_attr_manufacturer.attr, + &dev_attr_hci_version.attr, + &dev_attr_hci_revision.attr, + &dev_attr_idle_timeout.attr, + &dev_attr_sniff_max_interval.attr, + &dev_attr_sniff_min_interval.attr, + NULL +}; + +static struct attribute_group bt_host_group = { + .attrs = bt_host_attrs, +}; + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,31)) +static const struct attribute_group *bt_host_groups[] = { +#else +static struct attribute_group *bt_host_groups[] = { +#endif + &bt_host_group, + NULL +}; + +static void bt_host_release(struct device *dev) +{ + void *data = dev_get_drvdata(dev); + kfree(data); +} + +static struct device_type bt_host = { + .name = "host", + .groups = bt_host_groups, + .release = bt_host_release, +}; + +static int inquiry_cache_show(struct seq_file *f, void *p) +{ + struct hci_dev *hdev = f->private; + struct inquiry_cache *cache = &hdev->inq_cache; + struct inquiry_entry *e; + + hci_dev_lock_bh(hdev); + + for (e = cache->list; e; e = e->next) { + struct inquiry_data *data = &e->data; + bdaddr_t bdaddr; + baswap(&bdaddr, &data->bdaddr); + seq_printf(f, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n", + batostr(&bdaddr), + data->pscan_rep_mode, data->pscan_period_mode, + data->pscan_mode, data->dev_class[2], + data->dev_class[1], data->dev_class[0], + __le16_to_cpu(data->clock_offset), + data->rssi, data->ssp_mode, e->timestamp); + } + + hci_dev_unlock_bh(hdev); + + return 0; +} + +static int inquiry_cache_open(struct inode *inode, struct file *file) +{ + return single_open(file, inquiry_cache_show, inode->i_private); +} + +static const struct file_operations inquiry_cache_fops = { + .open = inquiry_cache_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +int hci_register_sysfs(struct hci_dev *hdev) +{ + struct device *dev = &hdev->dev; + int err; + + BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); + + dev->type = &bt_host; + dev->class = bt_class; + dev->parent = hdev->parent; + + dev_set_name(dev, "%s", hdev->name); + + dev_set_drvdata(dev, hdev); + + err = device_register(dev); + if (err < 0) + return err; + + if (!bt_debugfs) + return 0; + + hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs); + if (!hdev->debugfs) + return 0; + + debugfs_create_file("inquiry_cache", 0444, hdev->debugfs, + hdev, &inquiry_cache_fops); + + return 0; +} + +void hci_unregister_sysfs(struct hci_dev *hdev) +{ + BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); + + debugfs_remove_recursive(hdev->debugfs); + + device_del(&hdev->dev); +} + +int __init bt_sysfs_init(void) +{ + bt_workq = create_singlethread_workqueue("bluetooth"); + if (!bt_workq) + return -ENOMEM; + + bt_debugfs = debugfs_create_dir("bluetooth", NULL); + + bt_class = class_create(THIS_MODULE, "bluetooth"); + if (IS_ERR(bt_class)) { + destroy_workqueue(bt_workq); + return PTR_ERR(bt_class); + } + + return 0; +} + +void bt_sysfs_cleanup(void) +{ + class_destroy(bt_class); + + debugfs_remove_recursive(bt_debugfs); + + destroy_workqueue(bt_workq); +} diff --git a/net/bluetooth/hci_sysfs.c.orig b/net/bluetooth/hci_sysfs.c.orig new file mode 100644 index 0000000..cafb55b --- /dev/null +++ b/net/bluetooth/hci_sysfs.c.orig @@ -0,0 +1,512 @@ +/* Bluetooth HCI driver model support. */ + +#include +#include +#include +#include + +#include +#include + +struct class *bt_class = NULL; +EXPORT_SYMBOL_GPL(bt_class); + +struct dentry *bt_debugfs = NULL; +EXPORT_SYMBOL_GPL(bt_debugfs); + +static struct workqueue_struct *bt_workq; + +static inline char *link_typetostr(int type) +{ + switch (type) { + case ACL_LINK: + return "ACL"; + case SCO_LINK: + return "SCO"; + case ESCO_LINK: + return "eSCO"; + default: + return "UNKNOWN"; + } +} + +static ssize_t show_link_type(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hci_conn *conn = dev_get_drvdata(dev); + return sprintf(buf, "%s\n", link_typetostr(conn->type)); +} + +static ssize_t show_link_address(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hci_conn *conn = dev_get_drvdata(dev); + bdaddr_t bdaddr; + baswap(&bdaddr, &conn->dst); + return sprintf(buf, "%s\n", batostr(&bdaddr)); +} + +static ssize_t show_link_features(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hci_conn *conn = dev_get_drvdata(dev); + + return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", + conn->features[0], conn->features[1], + conn->features[2], conn->features[3], + conn->features[4], conn->features[5], + conn->features[6], conn->features[7]); +} + +#define LINK_ATTR(_name,_mode,_show,_store) \ +struct device_attribute link_attr_##_name = __ATTR(_name,_mode,_show,_store) + +static LINK_ATTR(type, S_IRUGO, show_link_type, NULL); +static LINK_ATTR(address, S_IRUGO, show_link_address, NULL); +static LINK_ATTR(features, S_IRUGO, show_link_features, NULL); + +static struct attribute *bt_link_attrs[] = { + &link_attr_type.attr, + &link_attr_address.attr, + &link_attr_features.attr, + NULL +}; + +static struct attribute_group bt_link_group = { + .attrs = bt_link_attrs, +}; + +static const struct attribute_group *bt_link_groups[] = { + &bt_link_group, + NULL +}; + +static void bt_link_release(struct device *dev) +{ + void *data = dev_get_drvdata(dev); + kfree(data); +} + +static struct device_type bt_link = { + .name = "link", + .groups = bt_link_groups, + .release = bt_link_release, +}; + +static void add_conn(struct work_struct *work) +{ + struct hci_conn *conn = container_of(work, struct hci_conn, work_add); + struct hci_dev *hdev = conn->hdev; + + dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); + + dev_set_drvdata(&conn->dev, conn); + + if (device_add(&conn->dev) < 0) { + BT_ERR("Failed to register connection device"); + return; + } + + hci_dev_hold(hdev); +} + +/* + * The rfcomm tty device will possibly retain even when conn + * is down, and sysfs doesn't support move zombie device, + * so we should move the device before conn device is destroyed. + */ +static int __match_tty(struct device *dev, void *data) +{ + return !strncmp(dev_name(dev), "rfcomm", 6); +} + +static void del_conn(struct work_struct *work) +{ + struct hci_conn *conn = container_of(work, struct hci_conn, work_del); + struct hci_dev *hdev = conn->hdev; + + if (!device_is_registered(&conn->dev)) + return; + + while (1) { + struct device *dev; + + dev = device_find_child(&conn->dev, NULL, __match_tty); + if (!dev) + break; + device_move(dev, NULL, DPM_ORDER_DEV_LAST); + put_device(dev); + } + + device_del(&conn->dev); + put_device(&conn->dev); + + hci_dev_put(hdev); +} + +void hci_conn_init_sysfs(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + + BT_DBG("conn %p", conn); + + conn->dev.type = &bt_link; + conn->dev.class = bt_class; + conn->dev.parent = &hdev->dev; + + device_initialize(&conn->dev); + + INIT_WORK(&conn->work_add, add_conn); + INIT_WORK(&conn->work_del, del_conn); +} + +void hci_conn_add_sysfs(struct hci_conn *conn) +{ + BT_DBG("conn %p", conn); + + queue_work(bt_workq, &conn->work_add); +} + +void hci_conn_del_sysfs(struct hci_conn *conn) +{ + BT_DBG("conn %p", conn); + + queue_work(bt_workq, &conn->work_del); +} + +static inline char *host_bustostr(int bus) +{ + switch (bus) { + case HCI_VIRTUAL: + return "VIRTUAL"; + case HCI_USB: + return "USB"; + case HCI_PCCARD: + return "PCCARD"; + case HCI_UART: + return "UART"; + case HCI_RS232: + return "RS232"; + case HCI_PCI: + return "PCI"; + case HCI_SDIO: + return "SDIO"; + default: + return "UNKNOWN"; + } +} + +static inline char *host_typetostr(int type) +{ + switch (type) { + case HCI_BREDR: + return "BR/EDR"; + case HCI_80211: + return "802.11"; + default: + return "UNKNOWN"; + } +} + +static ssize_t show_bus(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hci_dev *hdev = dev_get_drvdata(dev); + return sprintf(buf, "%s\n", host_bustostr(hdev->bus)); +} + +static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hci_dev *hdev = dev_get_drvdata(dev); + return sprintf(buf, "%s\n", host_typetostr(hdev->dev_type)); +} + +static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hci_dev *hdev = dev_get_drvdata(dev); + char name[249]; + int i; + + for (i = 0; i < 248; i++) + name[i] = hdev->dev_name[i]; + + name[248] = '\0'; + return sprintf(buf, "%s\n", name); +} + +static ssize_t show_class(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hci_dev *hdev = dev_get_drvdata(dev); + return sprintf(buf, "0x%.2x%.2x%.2x\n", + hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); +} + +static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hci_dev *hdev = dev_get_drvdata(dev); + bdaddr_t bdaddr; + baswap(&bdaddr, &hdev->bdaddr); + return sprintf(buf, "%s\n", batostr(&bdaddr)); +} + +static ssize_t show_features(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hci_dev *hdev = dev_get_drvdata(dev); + + return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", + hdev->features[0], hdev->features[1], + hdev->features[2], hdev->features[3], + hdev->features[4], hdev->features[5], + hdev->features[6], hdev->features[7]); +} + +static ssize_t show_manufacturer(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hci_dev *hdev = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", hdev->manufacturer); +} + +static ssize_t show_hci_version(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hci_dev *hdev = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", hdev->hci_ver); +} + +static ssize_t show_hci_revision(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hci_dev *hdev = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", hdev->hci_rev); +} + +static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hci_dev *hdev = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", hdev->idle_timeout); +} + +static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +{ + struct hci_dev *hdev = dev_get_drvdata(dev); + char *ptr; + __u32 val; + + val = simple_strtoul(buf, &ptr, 10); + if (ptr == buf) + return -EINVAL; + + if (val != 0 && (val < 500 || val > 3600000)) + return -EINVAL; + + hdev->idle_timeout = val; + + return count; +} + +static ssize_t show_sniff_max_interval(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hci_dev *hdev = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", hdev->sniff_max_interval); +} + +static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +{ + struct hci_dev *hdev = dev_get_drvdata(dev); + char *ptr; + __u16 val; + + val = simple_strtoul(buf, &ptr, 10); + if (ptr == buf) + return -EINVAL; + + if (val < 0x0002 || val > 0xFFFE || val % 2) + return -EINVAL; + + if (val < hdev->sniff_min_interval) + return -EINVAL; + + hdev->sniff_max_interval = val; + + return count; +} + +static ssize_t show_sniff_min_interval(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hci_dev *hdev = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", hdev->sniff_min_interval); +} + +static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +{ + struct hci_dev *hdev = dev_get_drvdata(dev); + char *ptr; + __u16 val; + + val = simple_strtoul(buf, &ptr, 10); + if (ptr == buf) + return -EINVAL; + + if (val < 0x0002 || val > 0xFFFE || val % 2) + return -EINVAL; + + if (val > hdev->sniff_max_interval) + return -EINVAL; + + hdev->sniff_min_interval = val; + + return count; +} + +static DEVICE_ATTR(bus, S_IRUGO, show_bus, NULL); +static DEVICE_ATTR(type, S_IRUGO, show_type, NULL); +static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); +static DEVICE_ATTR(class, S_IRUGO, show_class, NULL); +static DEVICE_ATTR(address, S_IRUGO, show_address, NULL); +static DEVICE_ATTR(features, S_IRUGO, show_features, NULL); +static DEVICE_ATTR(manufacturer, S_IRUGO, show_manufacturer, NULL); +static DEVICE_ATTR(hci_version, S_IRUGO, show_hci_version, NULL); +static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL); + +static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR, + show_idle_timeout, store_idle_timeout); +static DEVICE_ATTR(sniff_max_interval, S_IRUGO | S_IWUSR, + show_sniff_max_interval, store_sniff_max_interval); +static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR, + show_sniff_min_interval, store_sniff_min_interval); + +static struct attribute *bt_host_attrs[] = { + &dev_attr_bus.attr, + &dev_attr_type.attr, + &dev_attr_name.attr, + &dev_attr_class.attr, + &dev_attr_address.attr, + &dev_attr_features.attr, + &dev_attr_manufacturer.attr, + &dev_attr_hci_version.attr, + &dev_attr_hci_revision.attr, + &dev_attr_idle_timeout.attr, + &dev_attr_sniff_max_interval.attr, + &dev_attr_sniff_min_interval.attr, + NULL +}; + +static struct attribute_group bt_host_group = { + .attrs = bt_host_attrs, +}; + +static const struct attribute_group *bt_host_groups[] = { + &bt_host_group, + NULL +}; + +static void bt_host_release(struct device *dev) +{ + void *data = dev_get_drvdata(dev); + kfree(data); +} + +static struct device_type bt_host = { + .name = "host", + .groups = bt_host_groups, + .release = bt_host_release, +}; + +static int inquiry_cache_show(struct seq_file *f, void *p) +{ + struct hci_dev *hdev = f->private; + struct inquiry_cache *cache = &hdev->inq_cache; + struct inquiry_entry *e; + + hci_dev_lock_bh(hdev); + + for (e = cache->list; e; e = e->next) { + struct inquiry_data *data = &e->data; + bdaddr_t bdaddr; + baswap(&bdaddr, &data->bdaddr); + seq_printf(f, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n", + batostr(&bdaddr), + data->pscan_rep_mode, data->pscan_period_mode, + data->pscan_mode, data->dev_class[2], + data->dev_class[1], data->dev_class[0], + __le16_to_cpu(data->clock_offset), + data->rssi, data->ssp_mode, e->timestamp); + } + + hci_dev_unlock_bh(hdev); + + return 0; +} + +static int inquiry_cache_open(struct inode *inode, struct file *file) +{ + return single_open(file, inquiry_cache_show, inode->i_private); +} + +static const struct file_operations inquiry_cache_fops = { + .open = inquiry_cache_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +int hci_register_sysfs(struct hci_dev *hdev) +{ + struct device *dev = &hdev->dev; + int err; + + BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); + + dev->type = &bt_host; + dev->class = bt_class; + dev->parent = hdev->parent; + + dev_set_name(dev, "%s", hdev->name); + + dev_set_drvdata(dev, hdev); + + err = device_register(dev); + if (err < 0) + return err; + + if (!bt_debugfs) + return 0; + + hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs); + if (!hdev->debugfs) + return 0; + + debugfs_create_file("inquiry_cache", 0444, hdev->debugfs, + hdev, &inquiry_cache_fops); + + return 0; +} + +void hci_unregister_sysfs(struct hci_dev *hdev) +{ + BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); + + debugfs_remove_recursive(hdev->debugfs); + + device_del(&hdev->dev); +} + +int __init bt_sysfs_init(void) +{ + bt_workq = create_singlethread_workqueue("bluetooth"); + if (!bt_workq) + return -ENOMEM; + + bt_debugfs = debugfs_create_dir("bluetooth", NULL); + + bt_class = class_create(THIS_MODULE, "bluetooth"); + if (IS_ERR(bt_class)) { + destroy_workqueue(bt_workq); + return PTR_ERR(bt_class); + } + + return 0; +} + +void bt_sysfs_cleanup(void) +{ + class_destroy(bt_class); + + debugfs_remove_recursive(bt_debugfs); + + destroy_workqueue(bt_workq); +} diff --git a/net/bluetooth/hidp/Makefile b/net/bluetooth/hidp/Makefile new file mode 100644 index 0000000..a9ee115 --- /dev/null +++ b/net/bluetooth/hidp/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for the Linux Bluetooth HIDP layer +# + +obj-$(CONFIG_BT_HIDP) += hidp.o + +hidp-objs := core.o sock.o diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c new file mode 100644 index 0000000..6734b07 --- /dev/null +++ b/net/bluetooth/hidp/core.c @@ -0,0 +1,1186 @@ +/* + HIDP implementation for Linux Bluetooth stack (BlueZ). + Copyright (C) 2003-2004 Marcel Holtmann + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include "hidp.h" + +#define VERSION "1.2" + +static DECLARE_RWSEM(hidp_session_sem); +static LIST_HEAD(hidp_session_list); + +static unsigned char hidp_keycode[256] = { + 0, 0, 0, 0, 30, 48, 46, 32, 18, 33, 34, 35, 23, 36, 37, 38, + 50, 49, 24, 25, 16, 19, 31, 20, 22, 47, 17, 45, 21, 44, 2, 3, + 4, 5, 6, 7, 8, 9, 10, 11, 28, 1, 14, 15, 57, 12, 13, 26, + 27, 43, 43, 39, 40, 41, 51, 52, 53, 58, 59, 60, 61, 62, 63, 64, + 65, 66, 67, 68, 87, 88, 99, 70,119,110,102,104,111,107,109,106, + 105,108,103, 69, 98, 55, 74, 78, 96, 79, 80, 81, 75, 76, 77, 71, + 72, 73, 82, 83, 86,127,116,117,183,184,185,186,187,188,189,190, + 191,192,193,194,134,138,130,132,128,129,131,137,133,135,136,113, + 115,114, 0, 0, 0,121, 0, 89, 93,124, 92, 94, 95, 0, 0, 0, + 122,123, 90, 91, 85, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 29, 42, 56,125, 97, 54,100,126,164,166,165,163,161,115,114,113, + 150,158,159,128,136,177,178,176,142,152,173,140 +}; + +static unsigned char hidp_mkeyspat[] = { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01 }; + +static struct hidp_session *__hidp_get_session(bdaddr_t *bdaddr) +{ + struct hidp_session *session; + struct list_head *p; + + BT_DBG(""); + + list_for_each(p, &hidp_session_list) { + session = list_entry(p, struct hidp_session, list); + if (!bacmp(bdaddr, &session->bdaddr)) + return session; + } + return NULL; +} + +static void __hidp_link_session(struct hidp_session *session) +{ + __module_get(THIS_MODULE); + list_add(&session->list, &hidp_session_list); + + hci_conn_hold_device(session->conn); +} + +static void __hidp_unlink_session(struct hidp_session *session) +{ + hci_conn_put_device(session->conn); + + list_del(&session->list); + module_put(THIS_MODULE); +} + +static void __hidp_copy_session(struct hidp_session *session, struct hidp_conninfo *ci) +{ + bacpy(&ci->bdaddr, &session->bdaddr); + + ci->flags = session->flags; + ci->state = session->state; + + ci->vendor = 0x0000; + ci->product = 0x0000; + ci->version = 0x0000; + memset(ci->name, 0, 128); + + if (session->input) { + ci->vendor = session->input->id.vendor; + ci->product = session->input->id.product; + ci->version = session->input->id.version; + if (session->input->name) + strncpy(ci->name, session->input->name, 128); + else + strncpy(ci->name, "HID Boot Device", 128); + } + + if (session->hid) { + ci->vendor = session->hid->vendor; + ci->product = session->hid->product; + ci->version = session->hid->version; + strncpy(ci->name, session->hid->name, 128); + } +} + +static int hidp_queue_event(struct hidp_session *session, struct input_dev *dev, + unsigned int type, unsigned int code, int value) +{ + unsigned char newleds; + struct sk_buff *skb; + + BT_DBG("session %p type %d code %d value %d", session, type, code, value); + + if (type != EV_LED) + return -1; + + newleds = (!!test_bit(LED_KANA, dev->led) << 3) | + (!!test_bit(LED_COMPOSE, dev->led) << 3) | + (!!test_bit(LED_SCROLLL, dev->led) << 2) | + (!!test_bit(LED_CAPSL, dev->led) << 1) | + (!!test_bit(LED_NUML, dev->led)); + + if (session->leds == newleds) + return 0; + + session->leds = newleds; + + if (!(skb = alloc_skb(3, GFP_ATOMIC))) { + BT_ERR("Can't allocate memory for new frame"); + return -ENOMEM; + } + + *skb_put(skb, 1) = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT; + *skb_put(skb, 1) = 0x01; + *skb_put(skb, 1) = newleds; + + skb_queue_tail(&session->intr_transmit, skb); + + hidp_schedule(session); + + return 0; +} + +static int hidp_hidinput_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) +{ + struct hid_device *hid = input_get_drvdata(dev); + struct hidp_session *session = hid->driver_data; + + return hidp_queue_event(session, dev, type, code, value); +} + +static int hidp_input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) +{ + struct hidp_session *session = input_get_drvdata(dev); + + return hidp_queue_event(session, dev, type, code, value); +} + +static void hidp_input_report(struct hidp_session *session, struct sk_buff *skb) +{ + struct input_dev *dev = session->input; + unsigned char *keys = session->keys; + unsigned char *udata = skb->data + 1; + signed char *sdata = skb->data + 1; + int i, size = skb->len - 1; + + switch (skb->data[0]) { + case 0x01: /* Keyboard report */ + for (i = 0; i < 8; i++) + input_report_key(dev, hidp_keycode[i + 224], (udata[0] >> i) & 1); + + /* If all the key codes have been set to 0x01, it means + * too many keys were pressed at the same time. */ + if (!memcmp(udata + 2, hidp_mkeyspat, 6)) + break; + + for (i = 2; i < 8; i++) { + if (keys[i] > 3 && memscan(udata + 2, keys[i], 6) == udata + 8) { + if (hidp_keycode[keys[i]]) + input_report_key(dev, hidp_keycode[keys[i]], 0); + else + BT_ERR("Unknown key (scancode %#x) released.", keys[i]); + } + + if (udata[i] > 3 && memscan(keys + 2, udata[i], 6) == keys + 8) { + if (hidp_keycode[udata[i]]) + input_report_key(dev, hidp_keycode[udata[i]], 1); + else + BT_ERR("Unknown key (scancode %#x) pressed.", udata[i]); + } + } + + memcpy(keys, udata, 8); + break; + + case 0x02: /* Mouse report */ + input_report_key(dev, BTN_LEFT, sdata[0] & 0x01); + input_report_key(dev, BTN_RIGHT, sdata[0] & 0x02); + input_report_key(dev, BTN_MIDDLE, sdata[0] & 0x04); + input_report_key(dev, BTN_SIDE, sdata[0] & 0x08); + input_report_key(dev, BTN_EXTRA, sdata[0] & 0x10); + + input_report_rel(dev, REL_X, sdata[1]); + input_report_rel(dev, REL_Y, sdata[2]); + + if (size > 3) + input_report_rel(dev, REL_WHEEL, sdata[3]); + break; + } + + input_sync(dev); +} + +static int __hidp_send_ctrl_message(struct hidp_session *session, + unsigned char hdr, unsigned char *data, int size) +{ + struct sk_buff *skb; + + BT_DBG("session %p data %p size %d", session, data, size); + + if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) { + BT_ERR("Can't allocate memory for new frame"); + return -ENOMEM; + } + + *skb_put(skb, 1) = hdr; + if (data && size > 0) + memcpy(skb_put(skb, size), data, size); + + skb_queue_tail(&session->ctrl_transmit, skb); + + return 0; +} + +static inline int hidp_send_ctrl_message(struct hidp_session *session, + unsigned char hdr, unsigned char *data, int size) +{ + int err; + + err = __hidp_send_ctrl_message(session, hdr, data, size); + + hidp_schedule(session); + + return err; +} + +static int hidp_queue_report(struct hidp_session *session, + unsigned char *data, int size) +{ + struct sk_buff *skb; + + BT_DBG("session %p hid %p data %p size %d", session, session->hid, data, size); + + if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) { + BT_ERR("Can't allocate memory for new frame"); + return -ENOMEM; + } + + *skb_put(skb, 1) = 0xa2; + if (size > 0) + memcpy(skb_put(skb, size), data, size); + + skb_queue_tail(&session->intr_transmit, skb); + + hidp_schedule(session); + + return 0; +} + +static int hidp_send_report(struct hidp_session *session, struct hid_report *report) +{ + unsigned char buf[32]; + int rsize; + + rsize = ((report->size - 1) >> 3) + 1 + (report->id > 0); + if (rsize > sizeof(buf)) + return -EIO; + + hid_output_report(report, buf); + + return hidp_queue_report(session, buf, rsize); +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) +static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count, + unsigned char report_type) +{ + switch (report_type) { + case HID_FEATURE_REPORT: + report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_FEATURE; + break; + case HID_OUTPUT_REPORT: + report_type = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT; + break; + default: + return -EINVAL; + } + + if (hidp_send_ctrl_message(hid->driver_data, report_type, + data, count)) + return -ENOMEM; + return count; +} +#endif + +static void hidp_idle_timeout(unsigned long arg) +{ + struct hidp_session *session = (struct hidp_session *) arg; + + atomic_inc(&session->terminate); + hidp_schedule(session); +} + +static void hidp_set_timer(struct hidp_session *session) +{ + if (session->idle_to > 0) + mod_timer(&session->timer, jiffies + HZ * session->idle_to); +} + +static inline void hidp_del_timer(struct hidp_session *session) +{ + if (session->idle_to > 0) + del_timer(&session->timer); +} + +static void hidp_process_handshake(struct hidp_session *session, + unsigned char param) +{ + BT_DBG("session %p param 0x%02x", session, param); + + switch (param) { + case HIDP_HSHK_SUCCESSFUL: + /* FIXME: Call into SET_ GET_ handlers here */ + break; + + case HIDP_HSHK_NOT_READY: + case HIDP_HSHK_ERR_INVALID_REPORT_ID: + case HIDP_HSHK_ERR_UNSUPPORTED_REQUEST: + case HIDP_HSHK_ERR_INVALID_PARAMETER: + /* FIXME: Call into SET_ GET_ handlers here */ + break; + + case HIDP_HSHK_ERR_UNKNOWN: + break; + + case HIDP_HSHK_ERR_FATAL: + /* Device requests a reboot, as this is the only way this error + * can be recovered. */ + __hidp_send_ctrl_message(session, + HIDP_TRANS_HID_CONTROL | HIDP_CTRL_SOFT_RESET, NULL, 0); + break; + + default: + __hidp_send_ctrl_message(session, + HIDP_TRANS_HANDSHAKE | HIDP_HSHK_ERR_INVALID_PARAMETER, NULL, 0); + break; + } +} + +static void hidp_process_hid_control(struct hidp_session *session, + unsigned char param) +{ + BT_DBG("session %p param 0x%02x", session, param); + + if (param == HIDP_CTRL_VIRTUAL_CABLE_UNPLUG) { + /* Flush the transmit queues */ + skb_queue_purge(&session->ctrl_transmit); + skb_queue_purge(&session->intr_transmit); + + /* Kill session thread */ + atomic_inc(&session->terminate); + hidp_schedule(session); + } +} + +static void hidp_process_data(struct hidp_session *session, struct sk_buff *skb, + unsigned char param) +{ + BT_DBG("session %p skb %p len %d param 0x%02x", session, skb, skb->len, param); + + switch (param) { + case HIDP_DATA_RTYPE_INPUT: + hidp_set_timer(session); + + if (session->input) + hidp_input_report(session, skb); + + if (session->hid) + hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 0); + + break; + + case HIDP_DATA_RTYPE_OTHER: + case HIDP_DATA_RTYPE_OUPUT: + case HIDP_DATA_RTYPE_FEATURE: + break; + + default: + __hidp_send_ctrl_message(session, + HIDP_TRANS_HANDSHAKE | HIDP_HSHK_ERR_INVALID_PARAMETER, NULL, 0); + } +} + +static void hidp_recv_ctrl_frame(struct hidp_session *session, + struct sk_buff *skb) +{ + unsigned char hdr, type, param; + + BT_DBG("session %p skb %p len %d", session, skb, skb->len); + + hdr = skb->data[0]; + skb_pull(skb, 1); + + type = hdr & HIDP_HEADER_TRANS_MASK; + param = hdr & HIDP_HEADER_PARAM_MASK; + + switch (type) { + case HIDP_TRANS_HANDSHAKE: + hidp_process_handshake(session, param); + break; + + case HIDP_TRANS_HID_CONTROL: + hidp_process_hid_control(session, param); + break; + + case HIDP_TRANS_DATA: + hidp_process_data(session, skb, param); + break; + + default: + __hidp_send_ctrl_message(session, + HIDP_TRANS_HANDSHAKE | HIDP_HSHK_ERR_UNSUPPORTED_REQUEST, NULL, 0); + break; + } + + kfree_skb(skb); +} + +static void hidp_recv_intr_frame(struct hidp_session *session, + struct sk_buff *skb) +{ + unsigned char hdr; + + BT_DBG("session %p skb %p len %d", session, skb, skb->len); + + hdr = skb->data[0]; + skb_pull(skb, 1); + + if (hdr == (HIDP_TRANS_DATA | HIDP_DATA_RTYPE_INPUT)) { + hidp_set_timer(session); + + if (session->input) + hidp_input_report(session, skb); + + if (session->hid) { + hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 1); + BT_DBG("report len %d", skb->len); + } + } else { + BT_DBG("Unsupported protocol header 0x%02x", hdr); + } + + kfree_skb(skb); +} + +static int hidp_send_frame(struct socket *sock, unsigned char *data, int len) +{ + struct kvec iv = { data, len }; + struct msghdr msg; + + BT_DBG("sock %p data %p len %d", sock, data, len); + + if (!len) + return 0; + + memset(&msg, 0, sizeof(msg)); + + return kernel_sendmsg(sock, &msg, &iv, 1, len); +} + +static void hidp_process_transmit(struct hidp_session *session) +{ + struct sk_buff *skb; + + BT_DBG("session %p", session); + + while ((skb = skb_dequeue(&session->ctrl_transmit))) { + if (hidp_send_frame(session->ctrl_sock, skb->data, skb->len) < 0) { + skb_queue_head(&session->ctrl_transmit, skb); + break; + } + + hidp_set_timer(session); + kfree_skb(skb); + } + + while ((skb = skb_dequeue(&session->intr_transmit))) { + if (hidp_send_frame(session->intr_sock, skb->data, skb->len) < 0) { + skb_queue_head(&session->intr_transmit, skb); + break; + } + + hidp_set_timer(session); + kfree_skb(skb); + } +} + +static int hidp_session(void *arg) +{ + struct hidp_session *session = arg; + struct sock *ctrl_sk = session->ctrl_sock->sk; + struct sock *intr_sk = session->intr_sock->sk; + struct sk_buff *skb; + int vendor = 0x0000, product = 0x0000; + wait_queue_t ctrl_wait, intr_wait; + + BT_DBG("session %p", session); + + if (session->input) { + vendor = session->input->id.vendor; + product = session->input->id.product; + } + + if (session->hid) { + vendor = session->hid->vendor; + product = session->hid->product; + } + + daemonize("khidpd_%04x%04x", vendor, product); + set_user_nice(current, -15); + + init_waitqueue_entry(&ctrl_wait, current); + init_waitqueue_entry(&intr_wait, current); + add_wait_queue(ctrl_sk->sk_sleep, &ctrl_wait); + add_wait_queue(intr_sk->sk_sleep, &intr_wait); + while (!atomic_read(&session->terminate)) { + set_current_state(TASK_INTERRUPTIBLE); + + if (ctrl_sk->sk_state != BT_CONNECTED || intr_sk->sk_state != BT_CONNECTED) + break; + + while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) { + skb_orphan(skb); + hidp_recv_ctrl_frame(session, skb); + } + + while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) { + skb_orphan(skb); + hidp_recv_intr_frame(session, skb); + } + + hidp_process_transmit(session); + + schedule(); + } + set_current_state(TASK_RUNNING); + remove_wait_queue(intr_sk->sk_sleep, &intr_wait); + remove_wait_queue(ctrl_sk->sk_sleep, &ctrl_wait); + + down_write(&hidp_session_sem); + + hidp_del_timer(session); + + if (session->input) { + input_unregister_device(session->input); + session->input = NULL; + } + + if (session->hid) { +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) + hid_destroy_device(session->hid); + session->hid = NULL; +#else + if (session->hid->claimed & HID_CLAIMED_INPUT) + hidinput_disconnect(session->hid); + hid_free_device(session->hid); +#endif + } + + /* Wakeup user-space polling for socket errors */ + session->intr_sock->sk->sk_err = EUNATCH; + session->ctrl_sock->sk->sk_err = EUNATCH; + + hidp_schedule(session); + + fput(session->intr_sock->file); + + wait_event_timeout(*(ctrl_sk->sk_sleep), + (ctrl_sk->sk_state == BT_CLOSED), msecs_to_jiffies(500)); + + fput(session->ctrl_sock->file); + + __hidp_unlink_session(session); + + up_write(&hidp_session_sem); + + kfree(session); + return 0; +} + +static struct device *hidp_get_device(struct hidp_session *session) +{ + bdaddr_t *src = &bt_sk(session->ctrl_sock->sk)->src; + bdaddr_t *dst = &bt_sk(session->ctrl_sock->sk)->dst; + struct device *device = NULL; + struct hci_dev *hdev; + + hdev = hci_get_route(dst, src); + if (!hdev) + return NULL; + + session->conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); + if (session->conn) + device = &session->conn->dev; + + hci_dev_put(hdev); + + return device; +} + +static int hidp_setup_input(struct hidp_session *session, + struct hidp_connadd_req *req) +{ + struct input_dev *input; + int err, i; + + input = input_allocate_device(); + if (!input) + return -ENOMEM; + + session->input = input; + + input_set_drvdata(input, session); + + input->name = "Bluetooth HID Boot Protocol Device"; + + input->id.bustype = BUS_BLUETOOTH; + input->id.vendor = req->vendor; + input->id.product = req->product; + input->id.version = req->version; + + if (req->subclass & 0x40) { + set_bit(EV_KEY, input->evbit); + set_bit(EV_LED, input->evbit); + set_bit(EV_REP, input->evbit); + + set_bit(LED_NUML, input->ledbit); + set_bit(LED_CAPSL, input->ledbit); + set_bit(LED_SCROLLL, input->ledbit); + set_bit(LED_COMPOSE, input->ledbit); + set_bit(LED_KANA, input->ledbit); + + for (i = 0; i < sizeof(hidp_keycode); i++) + set_bit(hidp_keycode[i], input->keybit); + clear_bit(0, input->keybit); + } + + if (req->subclass & 0x80) { + input->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); + input->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_LEFT) | + BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE); + input->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y); + input->keybit[BIT_WORD(BTN_MOUSE)] |= BIT_MASK(BTN_SIDE) | + BIT_MASK(BTN_EXTRA); + input->relbit[0] |= BIT_MASK(REL_WHEEL); + } + + input->dev.parent = hidp_get_device(session); + + input->event = hidp_input_event; + + err = input_register_device(input); + if (err < 0) { + hci_conn_put_device(session->conn); + return err; + } + + return 0; +} + +static int hidp_open(struct hid_device *hid) +{ + return 0; +} + +static void hidp_close(struct hid_device *hid) +{ +} + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27)) +static const struct { + __u16 idVendor; + __u16 idProduct; + unsigned quirks; +} hidp_blacklist[] = { + /* Apple wireless Mighty Mouse */ + { 0x05ac, 0x030c, HID_QUIRK_MIGHTYMOUSE | HID_QUIRK_INVERT_HWHEEL }, + + { } /* Terminating entry */ +}; +static void hidp_setup_quirks(struct hid_device *hid) +{ + unsigned int n; + + for (n = 0; hidp_blacklist[n].idVendor; n++) + if (hidp_blacklist[n].idVendor == le16_to_cpu(hid->vendor) && + hidp_blacklist[n].idProduct == le16_to_cpu(hid->product)) + hid->quirks = hidp_blacklist[n].quirks; +} + +static void hidp_setup_hid(struct hidp_session *session, + struct hidp_connadd_req *req) +{ + struct hid_device *hid = session->hid; + struct hid_report *report; + bdaddr_t src, dst; + + session->hid = hid; + + hid->driver_data = session; + + baswap(&src, &bt_sk(session->ctrl_sock->sk)->src); + baswap(&dst, &bt_sk(session->ctrl_sock->sk)->dst); + + hid->bus = BUS_BLUETOOTH; + hid->vendor = req->vendor; + hid->product = req->product; + hid->version = req->version; + hid->country = req->country; + + strncpy(hid->name, req->name, 128); + strncpy(hid->phys, batostr(&src), 64); + strncpy(hid->uniq, batostr(&dst), 64); + + hid->dev = hidp_get_device(session); + hid->hid_open = hidp_open; + hid->hid_close = hidp_close; + + hid->hidinput_input_event = hidp_hidinput_event; + + hidp_setup_quirks(hid); + + list_for_each_entry(report, &hid->report_enum[HID_INPUT_REPORT].report_list, list) + hidp_send_report(session, report); + + list_for_each_entry(report, &hid->report_enum[HID_FEATURE_REPORT].report_list, list) + hidp_send_report(session, report); + + if (hidinput_connect(hid) == 0) + hid->claimed |= HID_CLAIMED_INPUT; +} +#else + +static int hidp_parse(struct hid_device *hid) +{ + struct hidp_session *session = hid->driver_data; + + return hid_parse_report(session->hid, session->rd_data, + session->rd_size); +} + +static int hidp_start(struct hid_device *hid) +{ + struct hidp_session *session = hid->driver_data; + struct hid_report *report; + + list_for_each_entry(report, &hid->report_enum[HID_INPUT_REPORT]. + report_list, list) + hidp_send_report(session, report); + + list_for_each_entry(report, &hid->report_enum[HID_FEATURE_REPORT]. + report_list, list) + hidp_send_report(session, report); + + return 0; +} + +static void hidp_stop(struct hid_device *hid) +{ + struct hidp_session *session = hid->driver_data; + + skb_queue_purge(&session->ctrl_transmit); + skb_queue_purge(&session->intr_transmit); + + hid->claimed = 0; +} + +static struct hid_ll_driver hidp_hid_driver = { + .parse = hidp_parse, + .start = hidp_start, + .stop = hidp_stop, + .open = hidp_open, + .close = hidp_close, + .hidinput_input_event = hidp_hidinput_event, +}; + +static int hidp_setup_hid(struct hidp_session *session, + struct hidp_connadd_req *req) +{ + struct hid_device *hid; + bdaddr_t src, dst; + int err; + + session->rd_data = kzalloc(req->rd_size, GFP_KERNEL); + if (!session->rd_data) + return -ENOMEM; + + if (copy_from_user(session->rd_data, req->rd_data, req->rd_size)) { + err = -EFAULT; + goto fault; + } + session->rd_size = req->rd_size; + + hid = hid_allocate_device(); + if (IS_ERR(hid)) { + err = PTR_ERR(hid); + goto fault; + } + + session->hid = hid; + + hid->driver_data = session; + + baswap(&src, &bt_sk(session->ctrl_sock->sk)->src); + baswap(&dst, &bt_sk(session->ctrl_sock->sk)->dst); + + hid->bus = BUS_BLUETOOTH; + hid->vendor = req->vendor; + hid->product = req->product; + hid->version = req->version; + hid->country = req->country; + + strncpy(hid->name, req->name, 128); + strncpy(hid->phys, batostr(&src), 64); + strncpy(hid->uniq, batostr(&dst), 64); + + hid->dev.parent = hidp_get_device(session); + hid->ll_driver = &hidp_hid_driver; + + hid->hid_output_raw_report = hidp_output_raw_report; + + err = hid_add_device(hid); + if (err < 0) + goto failed; + + return 0; + +failed: + hid_destroy_device(hid); + session->hid = NULL; + +fault: + kfree(session->rd_data); + session->rd_data = NULL; + + return err; +} +#endif + +int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock) +{ + struct hidp_session *session, *s; + int err; + + BT_DBG(""); + + if (bacmp(&bt_sk(ctrl_sock->sk)->src, &bt_sk(intr_sock->sk)->src) || + bacmp(&bt_sk(ctrl_sock->sk)->dst, &bt_sk(intr_sock->sk)->dst)) + return -ENOTUNIQ; + + session = kzalloc(sizeof(struct hidp_session), GFP_KERNEL); + if (!session) + return -ENOMEM; + + BT_DBG("rd_data %p rd_size %d", req->rd_data, req->rd_size); + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27)) + if (req->rd_size > 0) { + unsigned char *buf = kmalloc(req->rd_size, GFP_KERNEL); + + if (!buf) { + kfree(session); + return -ENOMEM; + } + + if (copy_from_user(buf, req->rd_data, req->rd_size)) { + kfree(buf); + kfree(session); + return -EFAULT; + } + + session->hid = hid_parse_report(buf, req->rd_size); + + kfree(buf); + + if (!session->hid) { + kfree(session); + return -EINVAL; + } + } + + if (!session->hid) { + session->input = input_allocate_device(); + if (!session->input) { + kfree(session); + return -ENOMEM; + } + } +#endif + down_write(&hidp_session_sem); + + s = __hidp_get_session(&bt_sk(ctrl_sock->sk)->dst); + if (s && s->state == BT_CONNECTED) { + err = -EEXIST; + goto failed; + } + + bacpy(&session->bdaddr, &bt_sk(ctrl_sock->sk)->dst); + + session->ctrl_mtu = min_t(uint, l2cap_pi(ctrl_sock->sk)->omtu, l2cap_pi(ctrl_sock->sk)->imtu); + session->intr_mtu = min_t(uint, l2cap_pi(intr_sock->sk)->omtu, l2cap_pi(intr_sock->sk)->imtu); + + BT_DBG("ctrl mtu %d intr mtu %d", session->ctrl_mtu, session->intr_mtu); + + session->ctrl_sock = ctrl_sock; + session->intr_sock = intr_sock; + session->state = BT_CONNECTED; + + setup_timer(&session->timer, hidp_idle_timeout, (unsigned long)session); + + skb_queue_head_init(&session->ctrl_transmit); + skb_queue_head_init(&session->intr_transmit); + + session->flags = req->flags & (1 << HIDP_BLUETOOTH_VENDOR_ID); + session->idle_to = req->idle_to; + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) + if (req->rd_size > 0) { + err = hidp_setup_hid(session, req); + if (err && err != -ENODEV) + goto purge; + } + + if (!session->hid) { + err = hidp_setup_input(session, req); + if (err < 0) + goto purge; + } +#else + if (session->input) { + err = hidp_setup_input(session, req); + if (err < 0) + goto failed; + } + + if (session->hid) + hidp_setup_hid(session, req); +#endif + + __hidp_link_session(session); + + hidp_set_timer(session); + + err = kernel_thread(hidp_session, session, CLONE_KERNEL); + if (err < 0) + goto unlink; + + if (session->input) { + hidp_send_ctrl_message(session, + HIDP_TRANS_SET_PROTOCOL | HIDP_PROTO_BOOT, NULL, 0); + session->flags |= (1 << HIDP_BOOT_PROTOCOL_MODE); + + session->leds = 0xff; + hidp_input_event(session->input, EV_LED, 0, 0); + } + + up_write(&hidp_session_sem); + return 0; + +unlink: + hidp_del_timer(session); + + __hidp_unlink_session(session); + + if (session->input) { + input_unregister_device(session->input); + session->input = NULL; + } + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) + if (session->hid) { + hid_destroy_device(session->hid); + session->hid = NULL; + } + + kfree(session->rd_data); + session->rd_data = NULL; + +purge: + skb_queue_purge(&session->ctrl_transmit); + skb_queue_purge(&session->intr_transmit); +#endif + +failed: + up_write(&hidp_session_sem); + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27)) + if (session->hid) + hid_free_device(session->hid); +#endif + input_free_device(session->input); + kfree(session); + return err; +} + +int hidp_del_connection(struct hidp_conndel_req *req) +{ + struct hidp_session *session; + int err = 0; + + BT_DBG(""); + + down_read(&hidp_session_sem); + + session = __hidp_get_session(&req->bdaddr); + if (session) { + if (req->flags & (1 << HIDP_VIRTUAL_CABLE_UNPLUG)) { + hidp_send_ctrl_message(session, + HIDP_TRANS_HID_CONTROL | HIDP_CTRL_VIRTUAL_CABLE_UNPLUG, NULL, 0); + } else { + /* Flush the transmit queues */ + skb_queue_purge(&session->ctrl_transmit); + skb_queue_purge(&session->intr_transmit); + + /* Wakeup user-space polling for socket errors */ + session->intr_sock->sk->sk_err = EUNATCH; + session->ctrl_sock->sk->sk_err = EUNATCH; + + /* Kill session thread */ + atomic_inc(&session->terminate); + hidp_schedule(session); + } + } else + err = -ENOENT; + + up_read(&hidp_session_sem); + return err; +} + +int hidp_get_connlist(struct hidp_connlist_req *req) +{ + struct list_head *p; + int err = 0, n = 0; + + BT_DBG(""); + + down_read(&hidp_session_sem); + + list_for_each(p, &hidp_session_list) { + struct hidp_session *session; + struct hidp_conninfo ci; + + session = list_entry(p, struct hidp_session, list); + + __hidp_copy_session(session, &ci); + + if (copy_to_user(req->ci, &ci, sizeof(ci))) { + err = -EFAULT; + break; + } + + if (++n >= req->cnum) + break; + + req->ci++; + } + req->cnum = n; + + up_read(&hidp_session_sem); + return err; +} + +int hidp_get_conninfo(struct hidp_conninfo *ci) +{ + struct hidp_session *session; + int err = 0; + + down_read(&hidp_session_sem); + + session = __hidp_get_session(&ci->bdaddr); + if (session) + __hidp_copy_session(session, ci); + else + err = -ENOENT; + + up_read(&hidp_session_sem); + return err; +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) +static const struct hid_device_id hidp_table[] = { + { HID_BLUETOOTH_DEVICE(HID_ANY_ID, HID_ANY_ID) }, + { } +}; + +static struct hid_driver hidp_driver = { + .name = "generic-bluetooth", + .id_table = hidp_table, +}; +#endif + +static int __init hidp_init(void) +{ + int ret; + + l2cap_load(); + + BT_INFO("HIDP (Human Interface Emulation) ver %s", VERSION); + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) + ret = hid_register_driver(&hidp_driver); + if (ret) + goto err; +#endif + + ret = hidp_init_sockets(); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) + if (ret) + goto err_drv; + + return 0; +err_drv: + hid_unregister_driver(&hidp_driver); +err: +#endif + return ret; +} + +static void __exit hidp_exit(void) +{ + hidp_cleanup_sockets(); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) + hid_unregister_driver(&hidp_driver); +#endif +} + +module_init(hidp_init); +module_exit(hidp_exit); + +MODULE_AUTHOR("Marcel Holtmann "); +MODULE_DESCRIPTION("Bluetooth HIDP ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("bt-proto-6"); diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h new file mode 100644 index 0000000..a4e215d --- /dev/null +++ b/net/bluetooth/hidp/hidp.h @@ -0,0 +1,175 @@ +/* + HIDP implementation for Linux Bluetooth stack (BlueZ). + Copyright (C) 2003-2004 Marcel Holtmann + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#ifndef __HIDP_H +#define __HIDP_H + +#include +#include + +/* HIDP header masks */ +#define HIDP_HEADER_TRANS_MASK 0xf0 +#define HIDP_HEADER_PARAM_MASK 0x0f + +/* HIDP transaction types */ +#define HIDP_TRANS_HANDSHAKE 0x00 +#define HIDP_TRANS_HID_CONTROL 0x10 +#define HIDP_TRANS_GET_REPORT 0x40 +#define HIDP_TRANS_SET_REPORT 0x50 +#define HIDP_TRANS_GET_PROTOCOL 0x60 +#define HIDP_TRANS_SET_PROTOCOL 0x70 +#define HIDP_TRANS_GET_IDLE 0x80 +#define HIDP_TRANS_SET_IDLE 0x90 +#define HIDP_TRANS_DATA 0xa0 +#define HIDP_TRANS_DATC 0xb0 + +/* HIDP handshake results */ +#define HIDP_HSHK_SUCCESSFUL 0x00 +#define HIDP_HSHK_NOT_READY 0x01 +#define HIDP_HSHK_ERR_INVALID_REPORT_ID 0x02 +#define HIDP_HSHK_ERR_UNSUPPORTED_REQUEST 0x03 +#define HIDP_HSHK_ERR_INVALID_PARAMETER 0x04 +#define HIDP_HSHK_ERR_UNKNOWN 0x0e +#define HIDP_HSHK_ERR_FATAL 0x0f + +/* HIDP control operation parameters */ +#define HIDP_CTRL_NOP 0x00 +#define HIDP_CTRL_HARD_RESET 0x01 +#define HIDP_CTRL_SOFT_RESET 0x02 +#define HIDP_CTRL_SUSPEND 0x03 +#define HIDP_CTRL_EXIT_SUSPEND 0x04 +#define HIDP_CTRL_VIRTUAL_CABLE_UNPLUG 0x05 + +/* HIDP data transaction headers */ +#define HIDP_DATA_RTYPE_MASK 0x03 +#define HIDP_DATA_RSRVD_MASK 0x0c +#define HIDP_DATA_RTYPE_OTHER 0x00 +#define HIDP_DATA_RTYPE_INPUT 0x01 +#define HIDP_DATA_RTYPE_OUPUT 0x02 +#define HIDP_DATA_RTYPE_FEATURE 0x03 + +/* HIDP protocol header parameters */ +#define HIDP_PROTO_BOOT 0x00 +#define HIDP_PROTO_REPORT 0x01 + +/* HIDP ioctl defines */ +#define HIDPCONNADD _IOW('H', 200, int) +#define HIDPCONNDEL _IOW('H', 201, int) +#define HIDPGETCONNLIST _IOR('H', 210, int) +#define HIDPGETCONNINFO _IOR('H', 211, int) + +#define HIDP_VIRTUAL_CABLE_UNPLUG 0 +#define HIDP_BOOT_PROTOCOL_MODE 1 +#define HIDP_BLUETOOTH_VENDOR_ID 9 + +struct hidp_connadd_req { + int ctrl_sock; // Connected control socket + int intr_sock; // Connteted interrupt socket + __u16 parser; + __u16 rd_size; + __u8 __user *rd_data; + __u8 country; + __u8 subclass; + __u16 vendor; + __u16 product; + __u16 version; + __u32 flags; + __u32 idle_to; + char name[128]; +}; + +struct hidp_conndel_req { + bdaddr_t bdaddr; + __u32 flags; +}; + +struct hidp_conninfo { + bdaddr_t bdaddr; + __u32 flags; + __u16 state; + __u16 vendor; + __u16 product; + __u16 version; + char name[128]; +}; + +struct hidp_connlist_req { + __u32 cnum; + struct hidp_conninfo __user *ci; +}; + +int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock); +int hidp_del_connection(struct hidp_conndel_req *req); +int hidp_get_connlist(struct hidp_connlist_req *req); +int hidp_get_conninfo(struct hidp_conninfo *ci); + +/* HIDP session defines */ +struct hidp_session { + struct list_head list; + + struct hci_conn *conn; + + struct socket *ctrl_sock; + struct socket *intr_sock; + + bdaddr_t bdaddr; + + unsigned long state; + unsigned long flags; + unsigned long idle_to; + + uint ctrl_mtu; + uint intr_mtu; + + atomic_t terminate; + + unsigned char keys[8]; + unsigned char leds; + + struct input_dev *input; + + struct hid_device *hid; + + struct timer_list timer; + + struct sk_buff_head ctrl_transmit; + struct sk_buff_head intr_transmit; + + /* Report descriptor */ + __u8 *rd_data; + uint rd_size; +}; + +static inline void hidp_schedule(struct hidp_session *session) +{ + struct sock *ctrl_sk = session->ctrl_sock->sk; + struct sock *intr_sk = session->intr_sock->sk; + + wake_up_interruptible(ctrl_sk->sk_sleep); + wake_up_interruptible(intr_sk->sk_sleep); +} + +/* HIDP init defines */ +extern int __init hidp_init_sockets(void); +extern void __exit hidp_cleanup_sockets(void); + +#endif /* __HIDP_H */ diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c new file mode 100644 index 0000000..1ca957a --- /dev/null +++ b/net/bluetooth/hidp/sock.c @@ -0,0 +1,308 @@ +/* + HIDP implementation for Linux Bluetooth stack (BlueZ). + Copyright (C) 2003-2004 Marcel Holtmann + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hidp.h" + +static int hidp_sock_release(struct socket *sock) +{ + struct sock *sk = sock->sk; + + BT_DBG("sock %p sk %p", sock, sk); + + if (!sk) + return 0; + + sock_orphan(sk); + sock_put(sk); + + return 0; +} + +static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) +{ + void __user *argp = (void __user *) arg; + struct hidp_connadd_req ca; + struct hidp_conndel_req cd; + struct hidp_connlist_req cl; + struct hidp_conninfo ci; + struct socket *csock; + struct socket *isock; + int err; + + BT_DBG("cmd %x arg %lx", cmd, arg); + + switch (cmd) { + case HIDPCONNADD: + if (!capable(CAP_NET_ADMIN)) + return -EACCES; + + if (copy_from_user(&ca, argp, sizeof(ca))) + return -EFAULT; + + csock = sockfd_lookup(ca.ctrl_sock, &err); + if (!csock) + return err; + + isock = sockfd_lookup(ca.intr_sock, &err); + if (!isock) { + sockfd_put(csock); + return err; + } + + if (csock->sk->sk_state != BT_CONNECTED || isock->sk->sk_state != BT_CONNECTED) { + sockfd_put(csock); + sockfd_put(isock); + return -EBADFD; + } + + err = hidp_add_connection(&ca, csock, isock); + if (!err) { + if (copy_to_user(argp, &ca, sizeof(ca))) + err = -EFAULT; + } else { + sockfd_put(csock); + sockfd_put(isock); + } + + return err; + + case HIDPCONNDEL: + if (!capable(CAP_NET_ADMIN)) + return -EACCES; + + if (copy_from_user(&cd, argp, sizeof(cd))) + return -EFAULT; + + return hidp_del_connection(&cd); + + case HIDPGETCONNLIST: + if (copy_from_user(&cl, argp, sizeof(cl))) + return -EFAULT; + + if (cl.cnum <= 0) + return -EINVAL; + + err = hidp_get_connlist(&cl); + if (!err && copy_to_user(argp, &cl, sizeof(cl))) + return -EFAULT; + + return err; + + case HIDPGETCONNINFO: + if (copy_from_user(&ci, argp, sizeof(ci))) + return -EFAULT; + + err = hidp_get_conninfo(&ci); + if (!err && copy_to_user(argp, &ci, sizeof(ci))) + return -EFAULT; + + return err; + } + + return -EINVAL; +} + +#ifdef CONFIG_COMPAT +struct compat_hidp_connadd_req { + int ctrl_sock; // Connected control socket + int intr_sock; // Connteted interrupt socket + __u16 parser; + __u16 rd_size; + compat_uptr_t rd_data; + __u8 country; + __u8 subclass; + __u16 vendor; + __u16 product; + __u16 version; + __u32 flags; + __u32 idle_to; + char name[128]; +}; + +static int hidp_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) +{ + if (cmd == HIDPGETCONNLIST) { + struct hidp_connlist_req cl; + uint32_t uci; + int err; + + if (get_user(cl.cnum, (uint32_t __user *) arg) || + get_user(uci, (u32 __user *) (arg + 4))) + return -EFAULT; + + cl.ci = compat_ptr(uci); + + if (cl.cnum <= 0) + return -EINVAL; + + err = hidp_get_connlist(&cl); + + if (!err && put_user(cl.cnum, (uint32_t __user *) arg)) + err = -EFAULT; + + return err; + } else if (cmd == HIDPCONNADD) { + struct compat_hidp_connadd_req ca; + struct hidp_connadd_req __user *uca; + + uca = compat_alloc_user_space(sizeof(*uca)); + + if (copy_from_user(&ca, (void __user *) arg, sizeof(ca))) + return -EFAULT; + + if (put_user(ca.ctrl_sock, &uca->ctrl_sock) || + put_user(ca.intr_sock, &uca->intr_sock) || + put_user(ca.parser, &uca->parser) || + put_user(ca.rd_size, &uca->rd_size) || + put_user(compat_ptr(ca.rd_data), &uca->rd_data) || + put_user(ca.country, &uca->country) || + put_user(ca.subclass, &uca->subclass) || + put_user(ca.vendor, &uca->vendor) || + put_user(ca.product, &uca->product) || + put_user(ca.version, &uca->version) || + put_user(ca.flags, &uca->flags) || + put_user(ca.idle_to, &uca->idle_to) || + copy_to_user(&uca->name[0], &ca.name[0], 128)) + return -EFAULT; + + arg = (unsigned long) uca; + + /* Fall through. We don't actually write back any _changes_ + to the structure anyway, so there's no need to copy back + into the original compat version */ + } + + return hidp_sock_ioctl(sock, cmd, arg); +} +#endif + +static const struct proto_ops hidp_sock_ops = { + .family = PF_BLUETOOTH, + .owner = THIS_MODULE, + .release = hidp_sock_release, + .ioctl = hidp_sock_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = hidp_sock_compat_ioctl, +#endif + .bind = sock_no_bind, + .getname = sock_no_getname, + .sendmsg = sock_no_sendmsg, + .recvmsg = sock_no_recvmsg, + .poll = sock_no_poll, + .listen = sock_no_listen, + .shutdown = sock_no_shutdown, + .setsockopt = sock_no_setsockopt, + .getsockopt = sock_no_getsockopt, + .connect = sock_no_connect, + .socketpair = sock_no_socketpair, + .accept = sock_no_accept, + .mmap = sock_no_mmap +}; + +static struct proto hidp_proto = { + .name = "HIDP", + .owner = THIS_MODULE, + .obj_size = sizeof(struct bt_sock) +}; + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32)) +static int hidp_sock_create(struct net *net, struct socket *sock, int protocol, + int kern) +#else +static int hidp_sock_create(struct net *net, struct socket *sock, int protocol) +#endif +{ + struct sock *sk; + + BT_DBG("sock %p", sock); + + if (sock->type != SOCK_RAW) + return -ESOCKTNOSUPPORT; + + sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hidp_proto); + if (!sk) + return -ENOMEM; + + sock_init_data(sock, sk); + + sock->ops = &hidp_sock_ops; + + sock->state = SS_UNCONNECTED; + + sock_reset_flag(sk, SOCK_ZAPPED); + + sk->sk_protocol = protocol; + sk->sk_state = BT_OPEN; + + return 0; +} + +static const struct net_proto_family hidp_sock_family_ops = { + .family = PF_BLUETOOTH, + .owner = THIS_MODULE, + .create = hidp_sock_create +}; + +int __init hidp_init_sockets(void) +{ + int err; + + err = proto_register(&hidp_proto, 0); + if (err < 0) + return err; + + err = bt_sock_register(BTPROTO_HIDP, &hidp_sock_family_ops); + if (err < 0) + goto error; + + return 0; + +error: + BT_ERR("Can't register HIDP socket"); + proto_unregister(&hidp_proto); + return err; +} + +void __exit hidp_cleanup_sockets(void) +{ + if (bt_sock_unregister(BTPROTO_HIDP) < 0) + BT_ERR("Can't unregister HIDP socket"); + + proto_unregister(&hidp_proto); +} diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c new file mode 100644 index 0000000..b86413b --- /dev/null +++ b/net/bluetooth/l2cap.c @@ -0,0 +1,4084 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + Copyright (C) 2000-2001 Qualcomm Incorporated + + Written 2000,2001 by Maxim Krasnyansky + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +/* Bluetooth L2CAP core and sockets. */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#define VERSION "2.14" + +static int enable_ertm = 0; +static int max_transmit = L2CAP_DEFAULT_MAX_TX; + +static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN; +static u8 l2cap_fixed_chan[8] = { 0x02, }; + +static const struct proto_ops l2cap_sock_ops; + +static struct bt_sock_list l2cap_sk_list = { + .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock) +}; + +static void __l2cap_sock_close(struct sock *sk, int reason); +static void l2cap_sock_close(struct sock *sk); +static void l2cap_sock_kill(struct sock *sk); + +static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, + u8 code, u8 ident, u16 dlen, void *data); + +/* ---- L2CAP timers ---- */ +static void l2cap_sock_timeout(unsigned long arg) +{ + struct sock *sk = (struct sock *) arg; + int reason; + + BT_DBG("sock %p state %d", sk, sk->sk_state); + + bh_lock_sock(sk); + + if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG) + reason = ECONNREFUSED; + else if (sk->sk_state == BT_CONNECT && + l2cap_pi(sk)->sec_level != BT_SECURITY_SDP) + reason = ECONNREFUSED; + else + reason = ETIMEDOUT; + + __l2cap_sock_close(sk, reason); + + bh_unlock_sock(sk); + + l2cap_sock_kill(sk); + sock_put(sk); +} + +static void l2cap_sock_set_timer(struct sock *sk, long timeout) +{ + BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout); + sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout); +} + +static void l2cap_sock_clear_timer(struct sock *sk) +{ + BT_DBG("sock %p state %d", sk, sk->sk_state); + sk_stop_timer(sk, &sk->sk_timer); +} + +/* ---- L2CAP channels ---- */ +static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid) +{ + struct sock *s; + for (s = l->head; s; s = l2cap_pi(s)->next_c) { + if (l2cap_pi(s)->dcid == cid) + break; + } + return s; +} + +static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid) +{ + struct sock *s; + for (s = l->head; s; s = l2cap_pi(s)->next_c) { + if (l2cap_pi(s)->scid == cid) + break; + } + return s; +} + +/* Find channel with given SCID. + * Returns locked socket */ +static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid) +{ + struct sock *s; + read_lock(&l->lock); + s = __l2cap_get_chan_by_scid(l, cid); + if (s) + bh_lock_sock(s); + read_unlock(&l->lock); + return s; +} + +static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident) +{ + struct sock *s; + for (s = l->head; s; s = l2cap_pi(s)->next_c) { + if (l2cap_pi(s)->ident == ident) + break; + } + return s; +} + +static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident) +{ + struct sock *s; + read_lock(&l->lock); + s = __l2cap_get_chan_by_ident(l, ident); + if (s) + bh_lock_sock(s); + read_unlock(&l->lock); + return s; +} + +static u16 l2cap_alloc_cid(struct l2cap_chan_list *l) +{ + u16 cid = L2CAP_CID_DYN_START; + + for (; cid < L2CAP_CID_DYN_END; cid++) { + if (!__l2cap_get_chan_by_scid(l, cid)) + return cid; + } + + return 0; +} + +static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk) +{ + sock_hold(sk); + + if (l->head) + l2cap_pi(l->head)->prev_c = sk; + + l2cap_pi(sk)->next_c = l->head; + l2cap_pi(sk)->prev_c = NULL; + l->head = sk; +} + +static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk) +{ + struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c; + + write_lock_bh(&l->lock); + if (sk == l->head) + l->head = next; + + if (next) + l2cap_pi(next)->prev_c = prev; + if (prev) + l2cap_pi(prev)->next_c = next; + write_unlock_bh(&l->lock); + + __sock_put(sk); +} + +static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent) +{ + struct l2cap_chan_list *l = &conn->chan_list; + + BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, + l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid); + + conn->disc_reason = 0x13; + + l2cap_pi(sk)->conn = conn; + + if (sk->sk_type == SOCK_SEQPACKET) { + /* Alloc CID for connection-oriented socket */ + l2cap_pi(sk)->scid = l2cap_alloc_cid(l); + } else if (sk->sk_type == SOCK_DGRAM) { + /* Connectionless socket */ + l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS; + l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS; + l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU; + } else { + /* Raw socket can send/recv signalling messages only */ + l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING; + l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING; + l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU; + } + + __l2cap_chan_link(l, sk); + + if (parent) + bt_accept_enqueue(parent, sk); +} + +/* Delete channel. + * Must be called on the locked socket. */ +static void l2cap_chan_del(struct sock *sk, int err) +{ + struct l2cap_conn *conn = l2cap_pi(sk)->conn; + struct sock *parent = bt_sk(sk)->parent; + + l2cap_sock_clear_timer(sk); + + BT_DBG("sk %p, conn %p, err %d", sk, conn, err); + + if (conn) { + /* Unlink from channel list */ + l2cap_chan_unlink(&conn->chan_list, sk); + l2cap_pi(sk)->conn = NULL; + hci_conn_put(conn->hcon); + } + + sk->sk_state = BT_CLOSED; + sock_set_flag(sk, SOCK_ZAPPED); + + if (err) + sk->sk_err = err; + + if (parent) { + bt_accept_unlink(sk); + parent->sk_data_ready(parent, 0); + } else + sk->sk_state_change(sk); +} + +/* Service level security */ +static inline int l2cap_check_security(struct sock *sk) +{ + struct l2cap_conn *conn = l2cap_pi(sk)->conn; + __u8 auth_type; + + if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) { + if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH) + auth_type = HCI_AT_NO_BONDING_MITM; + else + auth_type = HCI_AT_NO_BONDING; + + if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW) + l2cap_pi(sk)->sec_level = BT_SECURITY_SDP; + } else { + switch (l2cap_pi(sk)->sec_level) { + case BT_SECURITY_HIGH: + auth_type = HCI_AT_GENERAL_BONDING_MITM; + break; + case BT_SECURITY_MEDIUM: + auth_type = HCI_AT_GENERAL_BONDING; + break; + default: + auth_type = HCI_AT_NO_BONDING; + break; + } + } + + return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level, + auth_type); +} + +static inline u8 l2cap_get_ident(struct l2cap_conn *conn) +{ + u8 id; + + /* Get next available identificator. + * 1 - 128 are used by kernel. + * 129 - 199 are reserved. + * 200 - 254 are used by utilities like l2ping, etc. + */ + + spin_lock_bh(&conn->lock); + + if (++conn->tx_ident > 128) + conn->tx_ident = 1; + + id = conn->tx_ident; + + spin_unlock_bh(&conn->lock); + + return id; +} + +static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data) +{ + struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data); + + BT_DBG("code 0x%2.2x", code); + + if (!skb) + return -ENOMEM; + + return hci_send_acl(conn->hcon, skb, 0); +} + +static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control) +{ + struct sk_buff *skb; + struct l2cap_hdr *lh; + struct l2cap_conn *conn = pi->conn; + int count, hlen = L2CAP_HDR_SIZE + 2; + + if (pi->fcs == L2CAP_FCS_CRC16) + hlen += 2; + + BT_DBG("pi %p, control 0x%2.2x", pi, control); + + count = min_t(unsigned int, conn->mtu, hlen); + control |= L2CAP_CTRL_FRAME_TYPE; + + skb = bt_skb_alloc(count, GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); + lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE); + lh->cid = cpu_to_le16(pi->dcid); + put_unaligned_le16(control, skb_put(skb, 2)); + + if (pi->fcs == L2CAP_FCS_CRC16) { + u16 fcs = crc16(0, (u8 *)lh, count - 2); + put_unaligned_le16(fcs, skb_put(skb, 2)); + } + + return hci_send_acl(pi->conn->hcon, skb, 0); +} + +static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control) +{ + if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) + control |= L2CAP_SUPER_RCV_NOT_READY; + else + control |= L2CAP_SUPER_RCV_READY; + + control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; + + return l2cap_send_sframe(pi, control); +} + +static void l2cap_do_start(struct sock *sk) +{ + struct l2cap_conn *conn = l2cap_pi(sk)->conn; + + if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) { + if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) + return; + + if (l2cap_check_security(sk)) { + struct l2cap_conn_req req; + req.scid = cpu_to_le16(l2cap_pi(sk)->scid); + req.psm = l2cap_pi(sk)->psm; + + l2cap_pi(sk)->ident = l2cap_get_ident(conn); + + l2cap_send_cmd(conn, l2cap_pi(sk)->ident, + L2CAP_CONN_REQ, sizeof(req), &req); + } + } else { + struct l2cap_info_req req; + req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); + + conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; + conn->info_ident = l2cap_get_ident(conn); + + mod_timer(&conn->info_timer, jiffies + + msecs_to_jiffies(L2CAP_INFO_TIMEOUT)); + + l2cap_send_cmd(conn, conn->info_ident, + L2CAP_INFO_REQ, sizeof(req), &req); + } +} + +static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk) +{ + struct l2cap_disconn_req req; + + req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid); + req.scid = cpu_to_le16(l2cap_pi(sk)->scid); + l2cap_send_cmd(conn, l2cap_get_ident(conn), + L2CAP_DISCONN_REQ, sizeof(req), &req); +} + +/* ---- L2CAP connections ---- */ +static void l2cap_conn_start(struct l2cap_conn *conn) +{ + struct l2cap_chan_list *l = &conn->chan_list; + struct sock *sk; + + BT_DBG("conn %p", conn); + + read_lock(&l->lock); + + for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { + bh_lock_sock(sk); + + if (sk->sk_type != SOCK_SEQPACKET) { + bh_unlock_sock(sk); + continue; + } + + if (sk->sk_state == BT_CONNECT) { + if (l2cap_check_security(sk)) { + struct l2cap_conn_req req; + req.scid = cpu_to_le16(l2cap_pi(sk)->scid); + req.psm = l2cap_pi(sk)->psm; + + l2cap_pi(sk)->ident = l2cap_get_ident(conn); + + l2cap_send_cmd(conn, l2cap_pi(sk)->ident, + L2CAP_CONN_REQ, sizeof(req), &req); + } + } else if (sk->sk_state == BT_CONNECT2) { + struct l2cap_conn_rsp rsp; + rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); + rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); + + if (l2cap_check_security(sk)) { + if (bt_sk(sk)->defer_setup) { + struct sock *parent = bt_sk(sk)->parent; + rsp.result = cpu_to_le16(L2CAP_CR_PEND); + rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND); + parent->sk_data_ready(parent, 0); + + } else { + sk->sk_state = BT_CONFIG; + rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); + rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); + } + } else { + rsp.result = cpu_to_le16(L2CAP_CR_PEND); + rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND); + } + + l2cap_send_cmd(conn, l2cap_pi(sk)->ident, + L2CAP_CONN_RSP, sizeof(rsp), &rsp); + } + + bh_unlock_sock(sk); + } + + read_unlock(&l->lock); +} + +static void l2cap_conn_ready(struct l2cap_conn *conn) +{ + struct l2cap_chan_list *l = &conn->chan_list; + struct sock *sk; + + BT_DBG("conn %p", conn); + + read_lock(&l->lock); + + for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { + bh_lock_sock(sk); + + if (sk->sk_type != SOCK_SEQPACKET) { + l2cap_sock_clear_timer(sk); + sk->sk_state = BT_CONNECTED; + sk->sk_state_change(sk); + } else if (sk->sk_state == BT_CONNECT) + l2cap_do_start(sk); + + bh_unlock_sock(sk); + } + + read_unlock(&l->lock); +} + +/* Notify sockets that we cannot guaranty reliability anymore */ +static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err) +{ + struct l2cap_chan_list *l = &conn->chan_list; + struct sock *sk; + + BT_DBG("conn %p", conn); + + read_lock(&l->lock); + + for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { + if (l2cap_pi(sk)->force_reliable) + sk->sk_err = err; + } + + read_unlock(&l->lock); +} + +static void l2cap_info_timeout(unsigned long arg) +{ + struct l2cap_conn *conn = (void *) arg; + + conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; + conn->info_ident = 0; + + l2cap_conn_start(conn); +} + +static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) +{ + struct l2cap_conn *conn = hcon->l2cap_data; + + if (conn || status) + return conn; + + conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC); + if (!conn) + return NULL; + + hcon->l2cap_data = conn; + conn->hcon = hcon; + + BT_DBG("hcon %p conn %p", hcon, conn); + + conn->mtu = hcon->hdev->acl_mtu; + conn->src = &hcon->hdev->bdaddr; + conn->dst = &hcon->dst; + + conn->feat_mask = 0; + + spin_lock_init(&conn->lock); + rwlock_init(&conn->chan_list.lock); + + setup_timer(&conn->info_timer, l2cap_info_timeout, + (unsigned long) conn); + + conn->disc_reason = 0x13; + + return conn; +} + +static void l2cap_conn_del(struct hci_conn *hcon, int err) +{ + struct l2cap_conn *conn = hcon->l2cap_data; + struct sock *sk; + + if (!conn) + return; + + BT_DBG("hcon %p conn %p, err %d", hcon, conn, err); + + kfree_skb(conn->rx_skb); + + /* Kill channels */ + while ((sk = conn->chan_list.head)) { + bh_lock_sock(sk); + l2cap_chan_del(sk, err); + bh_unlock_sock(sk); + l2cap_sock_kill(sk); + } + + if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) + del_timer_sync(&conn->info_timer); + + hcon->l2cap_data = NULL; + kfree(conn); +} + +static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent) +{ + struct l2cap_chan_list *l = &conn->chan_list; + write_lock_bh(&l->lock); + __l2cap_chan_add(conn, sk, parent); + write_unlock_bh(&l->lock); +} + +/* ---- Socket interface ---- */ +static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src) +{ + struct sock *sk; + struct hlist_node *node; + sk_for_each(sk, node, &l2cap_sk_list.head) + if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src)) + goto found; + sk = NULL; +found: + return sk; +} + +/* Find socket with psm and source bdaddr. + * Returns closest match. + */ +static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src) +{ + struct sock *sk = NULL, *sk1 = NULL; + struct hlist_node *node; + + sk_for_each(sk, node, &l2cap_sk_list.head) { + if (state && sk->sk_state != state) + continue; + + if (l2cap_pi(sk)->psm == psm) { + /* Exact match. */ + if (!bacmp(&bt_sk(sk)->src, src)) + break; + + /* Closest match */ + if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) + sk1 = sk; + } + } + return node ? sk : sk1; +} + +/* Find socket with given address (psm, src). + * Returns locked socket */ +static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src) +{ + struct sock *s; + read_lock(&l2cap_sk_list.lock); + s = __l2cap_get_sock_by_psm(state, psm, src); + if (s) + bh_lock_sock(s); + read_unlock(&l2cap_sk_list.lock); + return s; +} + +static void l2cap_sock_destruct(struct sock *sk) +{ + BT_DBG("sk %p", sk); + + skb_queue_purge(&sk->sk_receive_queue); + skb_queue_purge(&sk->sk_write_queue); +} + +static void l2cap_sock_cleanup_listen(struct sock *parent) +{ + struct sock *sk; + + BT_DBG("parent %p", parent); + + /* Close not yet accepted channels */ + while ((sk = bt_accept_dequeue(parent, NULL))) + l2cap_sock_close(sk); + + parent->sk_state = BT_CLOSED; + sock_set_flag(parent, SOCK_ZAPPED); +} + +/* Kill socket (only if zapped and orphan) + * Must be called on unlocked socket. + */ +static void l2cap_sock_kill(struct sock *sk) +{ + if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) + return; + + BT_DBG("sk %p state %d", sk, sk->sk_state); + + /* Kill poor orphan */ + bt_sock_unlink(&l2cap_sk_list, sk); + sock_set_flag(sk, SOCK_DEAD); + sock_put(sk); +} + +static void __l2cap_sock_close(struct sock *sk, int reason) +{ + BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket); + + switch (sk->sk_state) { + case BT_LISTEN: + l2cap_sock_cleanup_listen(sk); + break; + + case BT_CONNECTED: + case BT_CONFIG: + if (sk->sk_type == SOCK_SEQPACKET) { + struct l2cap_conn *conn = l2cap_pi(sk)->conn; + + sk->sk_state = BT_DISCONN; + l2cap_sock_set_timer(sk, sk->sk_sndtimeo); + l2cap_send_disconn_req(conn, sk); + } else + l2cap_chan_del(sk, reason); + break; + + case BT_CONNECT2: + if (sk->sk_type == SOCK_SEQPACKET) { + struct l2cap_conn *conn = l2cap_pi(sk)->conn; + struct l2cap_conn_rsp rsp; + __u16 result; + + if (bt_sk(sk)->defer_setup) + result = L2CAP_CR_SEC_BLOCK; + else + result = L2CAP_CR_BAD_PSM; + + rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); + rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); + rsp.result = cpu_to_le16(result); + rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); + l2cap_send_cmd(conn, l2cap_pi(sk)->ident, + L2CAP_CONN_RSP, sizeof(rsp), &rsp); + } else + l2cap_chan_del(sk, reason); + break; + + case BT_CONNECT: + case BT_DISCONN: + l2cap_chan_del(sk, reason); + break; + + default: + sock_set_flag(sk, SOCK_ZAPPED); + break; + } +} + +/* Must be called on unlocked socket. */ +static void l2cap_sock_close(struct sock *sk) +{ + l2cap_sock_clear_timer(sk); + lock_sock(sk); + __l2cap_sock_close(sk, ECONNRESET); + release_sock(sk); + l2cap_sock_kill(sk); +} + +static void l2cap_sock_init(struct sock *sk, struct sock *parent) +{ + struct l2cap_pinfo *pi = l2cap_pi(sk); + + BT_DBG("sk %p", sk); + + if (parent) { + sk->sk_type = parent->sk_type; + bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup; + + pi->imtu = l2cap_pi(parent)->imtu; + pi->omtu = l2cap_pi(parent)->omtu; + pi->mode = l2cap_pi(parent)->mode; + pi->fcs = l2cap_pi(parent)->fcs; + pi->sec_level = l2cap_pi(parent)->sec_level; + pi->role_switch = l2cap_pi(parent)->role_switch; + pi->force_reliable = l2cap_pi(parent)->force_reliable; + } else { + pi->imtu = L2CAP_DEFAULT_MTU; + pi->omtu = 0; + pi->mode = L2CAP_MODE_BASIC; + pi->fcs = L2CAP_FCS_CRC16; + pi->sec_level = BT_SECURITY_LOW; + pi->role_switch = 0; + pi->force_reliable = 0; + } + + /* Default config options */ + pi->conf_len = 0; + pi->flush_to = L2CAP_DEFAULT_FLUSH_TO; + skb_queue_head_init(TX_QUEUE(sk)); + skb_queue_head_init(SREJ_QUEUE(sk)); + INIT_LIST_HEAD(SREJ_LIST(sk)); +} + +static struct proto l2cap_proto = { + .name = "L2CAP", + .owner = THIS_MODULE, + .obj_size = sizeof(struct l2cap_pinfo) +}; + +static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio) +{ + struct sock *sk; + + sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto); + if (!sk) + return NULL; + + sock_init_data(sock, sk); + INIT_LIST_HEAD(&bt_sk(sk)->accept_q); + + sk->sk_destruct = l2cap_sock_destruct; + sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT); + + sock_reset_flag(sk, SOCK_ZAPPED); + + sk->sk_protocol = proto; + sk->sk_state = BT_OPEN; + + setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk); + + bt_sock_link(&l2cap_sk_list, sk); + return sk; +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32)) +static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol, + int kern) +#else +static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol) +#endif +{ + struct sock *sk; + + BT_DBG("sock %p", sock); + + sock->state = SS_UNCONNECTED; + + if (sock->type != SOCK_SEQPACKET && + sock->type != SOCK_DGRAM && sock->type != SOCK_RAW) + return -ESOCKTNOSUPPORT; + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32)) + if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW)) +#else + if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW)) +#endif + return -EPERM; + + sock->ops = &l2cap_sock_ops; + + sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC); + if (!sk) + return -ENOMEM; + + l2cap_sock_init(sk, NULL); + return 0; +} + +static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) +{ + struct sock *sk = sock->sk; + struct sockaddr_l2 la; + int len, err = 0; + + BT_DBG("sk %p", sk); + + if (!addr || addr->sa_family != AF_BLUETOOTH) + return -EINVAL; + + memset(&la, 0, sizeof(la)); + len = min_t(unsigned int, sizeof(la), alen); + memcpy(&la, addr, len); + + if (la.l2_cid) + return -EINVAL; + + lock_sock(sk); + + if (sk->sk_state != BT_OPEN) { + err = -EBADFD; + goto done; + } + + if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 && + !capable(CAP_NET_BIND_SERVICE)) { + err = -EACCES; + goto done; + } + + write_lock_bh(&l2cap_sk_list.lock); + + if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) { + err = -EADDRINUSE; + } else { + /* Save source address */ + bacpy(&bt_sk(sk)->src, &la.l2_bdaddr); + l2cap_pi(sk)->psm = la.l2_psm; + l2cap_pi(sk)->sport = la.l2_psm; + sk->sk_state = BT_BOUND; + + if (__le16_to_cpu(la.l2_psm) == 0x0001 || + __le16_to_cpu(la.l2_psm) == 0x0003) + l2cap_pi(sk)->sec_level = BT_SECURITY_SDP; + } + + write_unlock_bh(&l2cap_sk_list.lock); + +done: + release_sock(sk); + return err; +} + +static int l2cap_do_connect(struct sock *sk) +{ + bdaddr_t *src = &bt_sk(sk)->src; + bdaddr_t *dst = &bt_sk(sk)->dst; + struct l2cap_conn *conn; + struct hci_conn *hcon; + struct hci_dev *hdev; + __u8 auth_type; + int err; + + BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), + l2cap_pi(sk)->psm); + + hdev = hci_get_route(dst, src); + if (!hdev) + return -EHOSTUNREACH; + + hci_dev_lock_bh(hdev); + + err = -ENOMEM; + + if (sk->sk_type == SOCK_RAW) { + switch (l2cap_pi(sk)->sec_level) { + case BT_SECURITY_HIGH: + auth_type = HCI_AT_DEDICATED_BONDING_MITM; + break; + case BT_SECURITY_MEDIUM: + auth_type = HCI_AT_DEDICATED_BONDING; + break; + default: + auth_type = HCI_AT_NO_BONDING; + break; + } + } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) { + if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH) + auth_type = HCI_AT_NO_BONDING_MITM; + else + auth_type = HCI_AT_NO_BONDING; + + if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW) + l2cap_pi(sk)->sec_level = BT_SECURITY_SDP; + } else { + switch (l2cap_pi(sk)->sec_level) { + case BT_SECURITY_HIGH: + auth_type = HCI_AT_GENERAL_BONDING_MITM; + break; + case BT_SECURITY_MEDIUM: + auth_type = HCI_AT_GENERAL_BONDING; + break; + default: + auth_type = HCI_AT_NO_BONDING; + break; + } + } + + hcon = hci_connect(hdev, ACL_LINK, dst, + l2cap_pi(sk)->sec_level, auth_type); + if (!hcon) + goto done; + + conn = l2cap_conn_add(hcon, 0); + if (!conn) { + hci_conn_put(hcon); + goto done; + } + + err = 0; + + /* Update source addr of the socket */ + bacpy(src, conn->src); + + l2cap_chan_add(conn, sk, NULL); + + sk->sk_state = BT_CONNECT; + l2cap_sock_set_timer(sk, sk->sk_sndtimeo); + + if (hcon->state == BT_CONNECTED) { + if (sk->sk_type != SOCK_SEQPACKET) { + l2cap_sock_clear_timer(sk); + sk->sk_state = BT_CONNECTED; + } else + l2cap_do_start(sk); + } + +done: + hci_dev_unlock_bh(hdev); + hci_dev_put(hdev); + return err; +} + +static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) +{ + struct sock *sk = sock->sk; + struct sockaddr_l2 la; + int len, err = 0; + + BT_DBG("sk %p", sk); + + if (!addr || addr->sa_family != AF_BLUETOOTH) + return -EINVAL; + + memset(&la, 0, sizeof(la)); + len = min_t(unsigned int, sizeof(la), alen); + memcpy(&la, addr, len); + + if (la.l2_cid) + return -EINVAL; + + lock_sock(sk); + + if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) { + err = -EINVAL; + goto done; + } + + switch (l2cap_pi(sk)->mode) { + case L2CAP_MODE_BASIC: + break; + case L2CAP_MODE_ERTM: + case L2CAP_MODE_STREAMING: + if (enable_ertm) + break; + /* fall through */ + default: + err = -ENOTSUPP; + goto done; + } + + switch (sk->sk_state) { + case BT_CONNECT: + case BT_CONNECT2: + case BT_CONFIG: + /* Already connecting */ + goto wait; + + case BT_CONNECTED: + /* Already connected */ + goto done; + + case BT_OPEN: + case BT_BOUND: + /* Can connect */ + break; + + default: + err = -EBADFD; + goto done; + } + + /* Set destination address and psm */ + bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr); + l2cap_pi(sk)->psm = la.l2_psm; + + err = l2cap_do_connect(sk); + if (err) + goto done; + +wait: + err = bt_sock_wait_state(sk, BT_CONNECTED, + sock_sndtimeo(sk, flags & O_NONBLOCK)); +done: + release_sock(sk); + return err; +} + +static int l2cap_sock_listen(struct socket *sock, int backlog) +{ + struct sock *sk = sock->sk; + int err = 0; + + BT_DBG("sk %p backlog %d", sk, backlog); + + lock_sock(sk); + + if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) { + err = -EBADFD; + goto done; + } + + switch (l2cap_pi(sk)->mode) { + case L2CAP_MODE_BASIC: + break; + case L2CAP_MODE_ERTM: + case L2CAP_MODE_STREAMING: + if (enable_ertm) + break; + /* fall through */ + default: + err = -ENOTSUPP; + goto done; + } + + if (!l2cap_pi(sk)->psm) { + bdaddr_t *src = &bt_sk(sk)->src; + u16 psm; + + err = -EINVAL; + + write_lock_bh(&l2cap_sk_list.lock); + + for (psm = 0x1001; psm < 0x1100; psm += 2) + if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) { + l2cap_pi(sk)->psm = cpu_to_le16(psm); + l2cap_pi(sk)->sport = cpu_to_le16(psm); + err = 0; + break; + } + + write_unlock_bh(&l2cap_sk_list.lock); + + if (err < 0) + goto done; + } + + sk->sk_max_ack_backlog = backlog; + sk->sk_ack_backlog = 0; + sk->sk_state = BT_LISTEN; + +done: + release_sock(sk); + return err; +} + +static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags) +{ + DECLARE_WAITQUEUE(wait, current); + struct sock *sk = sock->sk, *nsk; + long timeo; + int err = 0; + + lock_sock_nested(sk, SINGLE_DEPTH_NESTING); + + if (sk->sk_state != BT_LISTEN) { + err = -EBADFD; + goto done; + } + + timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); + + BT_DBG("sk %p timeo %ld", sk, timeo); + + /* Wait for an incoming connection. (wake-one). */ + add_wait_queue_exclusive(sk->sk_sleep, &wait); + while (!(nsk = bt_accept_dequeue(sk, newsock))) { + set_current_state(TASK_INTERRUPTIBLE); + if (!timeo) { + err = -EAGAIN; + break; + } + + release_sock(sk); + timeo = schedule_timeout(timeo); + lock_sock_nested(sk, SINGLE_DEPTH_NESTING); + + if (sk->sk_state != BT_LISTEN) { + err = -EBADFD; + break; + } + + if (signal_pending(current)) { + err = sock_intr_errno(timeo); + break; + } + } + set_current_state(TASK_RUNNING); + remove_wait_queue(sk->sk_sleep, &wait); + + if (err) + goto done; + + newsock->state = SS_CONNECTED; + + BT_DBG("new socket %p", nsk); + +done: + release_sock(sk); + return err; +} + +static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer) +{ + struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr; + struct sock *sk = sock->sk; + + BT_DBG("sock %p, sk %p", sock, sk); + + addr->sa_family = AF_BLUETOOTH; + *len = sizeof(struct sockaddr_l2); + + if (peer) { + la->l2_psm = l2cap_pi(sk)->psm; + bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst); + la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid); + } else { + la->l2_psm = l2cap_pi(sk)->sport; + bacpy(&la->l2_bdaddr, &bt_sk(sk)->src); + la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid); + } + + return 0; +} + +static void l2cap_monitor_timeout(unsigned long arg) +{ + struct sock *sk = (void *) arg; + u16 control; + + bh_lock_sock(sk); + if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) { + l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk); + bh_unlock_sock(sk); + return; + } + + l2cap_pi(sk)->retry_count++; + __mod_monitor_timer(); + + control = L2CAP_CTRL_POLL; + l2cap_send_rr_or_rnr(l2cap_pi(sk), control); + bh_unlock_sock(sk); +} + +static void l2cap_retrans_timeout(unsigned long arg) +{ + struct sock *sk = (void *) arg; + u16 control; + + bh_lock_sock(sk); + l2cap_pi(sk)->retry_count = 1; + __mod_monitor_timer(); + + l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F; + + control = L2CAP_CTRL_POLL; + l2cap_send_rr_or_rnr(l2cap_pi(sk), control); + bh_unlock_sock(sk); +} + +static void l2cap_drop_acked_frames(struct sock *sk) +{ + struct sk_buff *skb; + + while ((skb = skb_peek(TX_QUEUE(sk)))) { + if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq) + break; + + skb = skb_dequeue(TX_QUEUE(sk)); + kfree_skb(skb); + + l2cap_pi(sk)->unacked_frames--; + } + + if (!l2cap_pi(sk)->unacked_frames) + del_timer(&l2cap_pi(sk)->retrans_timer); + + return; +} + +static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb) +{ + struct l2cap_pinfo *pi = l2cap_pi(sk); + int err; + + BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len); + + err = hci_send_acl(pi->conn->hcon, skb, 0); + if (err < 0) + kfree_skb(skb); + + return err; +} + +static int l2cap_streaming_send(struct sock *sk) +{ + struct sk_buff *skb, *tx_skb; + struct l2cap_pinfo *pi = l2cap_pi(sk); + u16 control, fcs; + int err; + + while ((skb = sk->sk_send_head)) { + tx_skb = skb_clone(skb, GFP_ATOMIC); + + control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); + control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT; + put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE); + + if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) { + fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2); + put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2); + } + + err = l2cap_do_send(sk, tx_skb); + if (err < 0) { + l2cap_send_disconn_req(pi->conn, sk); + return err; + } + + pi->next_tx_seq = (pi->next_tx_seq + 1) % 64; + + if (skb_queue_is_last(TX_QUEUE(sk), skb)) + sk->sk_send_head = NULL; + else + sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb); + + skb = skb_dequeue(TX_QUEUE(sk)); + kfree_skb(skb); + } + return 0; +} + +static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq) +{ + struct l2cap_pinfo *pi = l2cap_pi(sk); + struct sk_buff *skb, *tx_skb; + u16 control, fcs; + int err; + + skb = skb_peek(TX_QUEUE(sk)); + do { + if (bt_cb(skb)->tx_seq != tx_seq) { + if (skb_queue_is_last(TX_QUEUE(sk), skb)) + break; + skb = skb_queue_next(TX_QUEUE(sk), skb); + continue; + } + + if (pi->remote_max_tx && + bt_cb(skb)->retries == pi->remote_max_tx) { + l2cap_send_disconn_req(pi->conn, sk); + break; + } + + tx_skb = skb_clone(skb, GFP_ATOMIC); + bt_cb(skb)->retries++; + control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); + control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT) + | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT); + put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE); + + if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) { + fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2); + put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2); + } + + err = l2cap_do_send(sk, tx_skb); + if (err < 0) { + l2cap_send_disconn_req(pi->conn, sk); + return err; + } + break; + } while(1); + return 0; +} + +static int l2cap_ertm_send(struct sock *sk) +{ + struct sk_buff *skb, *tx_skb; + struct l2cap_pinfo *pi = l2cap_pi(sk); + u16 control, fcs; + int err; + + if (pi->conn_state & L2CAP_CONN_WAIT_F) + return 0; + + while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) && + !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) { + + if (pi->remote_max_tx && + bt_cb(skb)->retries == pi->remote_max_tx) { + l2cap_send_disconn_req(pi->conn, sk); + break; + } + + tx_skb = skb_clone(skb, GFP_ATOMIC); + + bt_cb(skb)->retries++; + + control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); + control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT) + | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT); + put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE); + + + if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) { + fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2); + put_unaligned_le16(fcs, skb->data + tx_skb->len - 2); + } + + err = l2cap_do_send(sk, tx_skb); + if (err < 0) { + l2cap_send_disconn_req(pi->conn, sk); + return err; + } + __mod_retrans_timer(); + + bt_cb(skb)->tx_seq = pi->next_tx_seq; + pi->next_tx_seq = (pi->next_tx_seq + 1) % 64; + + pi->unacked_frames++; + + if (skb_queue_is_last(TX_QUEUE(sk), skb)) + sk->sk_send_head = NULL; + else + sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb); + } + + return 0; +} + +static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb) +{ + struct l2cap_conn *conn = l2cap_pi(sk)->conn; + struct sk_buff **frag; + int err, sent = 0; + + if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) { + return -EFAULT; + } + + sent += count; + len -= count; + + /* Continuation fragments (no L2CAP header) */ + frag = &skb_shinfo(skb)->frag_list; + while (len) { + count = min_t(unsigned int, conn->mtu, len); + + *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err); + if (!*frag) + return -EFAULT; + if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) + return -EFAULT; + + sent += count; + len -= count; + + frag = &(*frag)->next; + } + + return sent; +} + +static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len) +{ + struct l2cap_conn *conn = l2cap_pi(sk)->conn; + struct sk_buff *skb; + int err, count, hlen = L2CAP_HDR_SIZE + 2; + struct l2cap_hdr *lh; + + BT_DBG("sk %p len %d", sk, (int)len); + + count = min_t(unsigned int, (conn->mtu - hlen), len); + skb = bt_skb_send_alloc(sk, count + hlen, + msg->msg_flags & MSG_DONTWAIT, &err); + if (!skb) + return ERR_PTR(-ENOMEM); + + /* Create L2CAP header */ + lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); + lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid); + lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); + put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2)); + + err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb); + if (unlikely(err < 0)) { + kfree_skb(skb); + return ERR_PTR(err); + } + return skb; +} + +static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len) +{ + struct l2cap_conn *conn = l2cap_pi(sk)->conn; + struct sk_buff *skb; + int err, count, hlen = L2CAP_HDR_SIZE; + struct l2cap_hdr *lh; + + BT_DBG("sk %p len %d", sk, (int)len); + + count = min_t(unsigned int, (conn->mtu - hlen), len); + skb = bt_skb_send_alloc(sk, count + hlen, + msg->msg_flags & MSG_DONTWAIT, &err); + if (!skb) + return ERR_PTR(-ENOMEM); + + /* Create L2CAP header */ + lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); + lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid); + lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); + + err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb); + if (unlikely(err < 0)) { + kfree_skb(skb); + return ERR_PTR(err); + } + return skb; +} + +static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen) +{ + struct l2cap_conn *conn = l2cap_pi(sk)->conn; + struct sk_buff *skb; + int err, count, hlen = L2CAP_HDR_SIZE + 2; + struct l2cap_hdr *lh; + + BT_DBG("sk %p len %d", sk, (int)len); + + if (sdulen) + hlen += 2; + + if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) + hlen += 2; + + count = min_t(unsigned int, (conn->mtu - hlen), len); + skb = bt_skb_send_alloc(sk, count + hlen, + msg->msg_flags & MSG_DONTWAIT, &err); + if (!skb) + return ERR_PTR(-ENOMEM); + + /* Create L2CAP header */ + lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); + lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid); + lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); + put_unaligned_le16(control, skb_put(skb, 2)); + if (sdulen) + put_unaligned_le16(sdulen, skb_put(skb, 2)); + + err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb); + if (unlikely(err < 0)) { + kfree_skb(skb); + return ERR_PTR(err); + } + + if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) + put_unaligned_le16(0, skb_put(skb, 2)); + + bt_cb(skb)->retries = 0; + return skb; +} + +static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len) +{ + struct l2cap_pinfo *pi = l2cap_pi(sk); + struct sk_buff *skb; + struct sk_buff_head sar_queue; + u16 control; + size_t size = 0; + + __skb_queue_head_init(&sar_queue); + control = L2CAP_SDU_START; + skb = l2cap_create_iframe_pdu(sk, msg, pi->max_pdu_size, control, len); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + __skb_queue_tail(&sar_queue, skb); + len -= pi->max_pdu_size; + size +=pi->max_pdu_size; + control = 0; + + while (len > 0) { + size_t buflen; + + if (len > pi->max_pdu_size) { + control |= L2CAP_SDU_CONTINUE; + buflen = pi->max_pdu_size; + } else { + control |= L2CAP_SDU_END; + buflen = len; + } + + skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0); + if (IS_ERR(skb)) { + skb_queue_purge(&sar_queue); + return PTR_ERR(skb); + } + + __skb_queue_tail(&sar_queue, skb); + len -= buflen; + size += buflen; + control = 0; + } + skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk)); + if (sk->sk_send_head == NULL) + sk->sk_send_head = sar_queue.next; + + return size; +} + +static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) +{ + struct sock *sk = sock->sk; + struct l2cap_pinfo *pi = l2cap_pi(sk); + struct sk_buff *skb; + u16 control; + int err; + + BT_DBG("sock %p, sk %p", sock, sk); + + err = sock_error(sk); + if (err) + return err; + + if (msg->msg_flags & MSG_OOB) + return -EOPNOTSUPP; + + /* Check outgoing MTU */ + if (sk->sk_type == SOCK_SEQPACKET && pi->mode == L2CAP_MODE_BASIC && + len > pi->omtu) + return -EINVAL; + + lock_sock(sk); + + if (sk->sk_state != BT_CONNECTED) { + err = -ENOTCONN; + goto done; + } + + /* Connectionless channel */ + if (sk->sk_type == SOCK_DGRAM) { + skb = l2cap_create_connless_pdu(sk, msg, len); + err = l2cap_do_send(sk, skb); + goto done; + } + + switch (pi->mode) { + case L2CAP_MODE_BASIC: + /* Create a basic PDU */ + skb = l2cap_create_basic_pdu(sk, msg, len); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + goto done; + } + + err = l2cap_do_send(sk, skb); + if (!err) + err = len; + break; + + case L2CAP_MODE_ERTM: + case L2CAP_MODE_STREAMING: + /* Entire SDU fits into one PDU */ + if (len <= pi->max_pdu_size) { + control = L2CAP_SDU_UNSEGMENTED; + skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + goto done; + } + __skb_queue_tail(TX_QUEUE(sk), skb); + if (sk->sk_send_head == NULL) + sk->sk_send_head = skb; + } else { + /* Segment SDU into multiples PDUs */ + err = l2cap_sar_segment_sdu(sk, msg, len); + if (err < 0) + goto done; + } + + if (pi->mode == L2CAP_MODE_STREAMING) + err = l2cap_streaming_send(sk); + else + err = l2cap_ertm_send(sk); + + if (!err) + err = len; + break; + + default: + BT_DBG("bad state %1.1x", pi->mode); + err = -EINVAL; + } + +done: + release_sock(sk); + return err; +} + +static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) +{ + struct sock *sk = sock->sk; + + lock_sock(sk); + + if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) { + struct l2cap_conn_rsp rsp; + + sk->sk_state = BT_CONFIG; + + rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); + rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); + rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); + rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); + l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident, + L2CAP_CONN_RSP, sizeof(rsp), &rsp); + + release_sock(sk); + return 0; + } + + release_sock(sk); + + return bt_sock_recvmsg(iocb, sock, msg, len, flags); +} + +static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen) +{ + struct sock *sk = sock->sk; + struct l2cap_options opts; + int len, err = 0; + u32 opt; + + BT_DBG("sk %p", sk); + + lock_sock(sk); + + switch (optname) { + case L2CAP_OPTIONS: + opts.imtu = l2cap_pi(sk)->imtu; + opts.omtu = l2cap_pi(sk)->omtu; + opts.flush_to = l2cap_pi(sk)->flush_to; + opts.mode = l2cap_pi(sk)->mode; + opts.fcs = l2cap_pi(sk)->fcs; + + len = min_t(unsigned int, sizeof(opts), optlen); + if (copy_from_user((char *) &opts, optval, len)) { + err = -EFAULT; + break; + } + + l2cap_pi(sk)->imtu = opts.imtu; + l2cap_pi(sk)->omtu = opts.omtu; + l2cap_pi(sk)->mode = opts.mode; + l2cap_pi(sk)->fcs = opts.fcs; + break; + + case L2CAP_LM: + if (get_user(opt, (u32 __user *) optval)) { + err = -EFAULT; + break; + } + + if (opt & L2CAP_LM_AUTH) + l2cap_pi(sk)->sec_level = BT_SECURITY_LOW; + if (opt & L2CAP_LM_ENCRYPT) + l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM; + if (opt & L2CAP_LM_SECURE) + l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH; + + l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER); + l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE); + break; + + default: + err = -ENOPROTOOPT; + break; + } + + release_sock(sk); + return err; +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,31)) +static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) +#else +static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen) +#endif +{ + struct sock *sk = sock->sk; + struct bt_security sec; + int len, err = 0; + u32 opt; + + BT_DBG("sk %p", sk); + + if (level == SOL_L2CAP) + return l2cap_sock_setsockopt_old(sock, optname, optval, optlen); + + if (level != SOL_BLUETOOTH) + return -ENOPROTOOPT; + + lock_sock(sk); + + switch (optname) { + case BT_SECURITY: + if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) { + err = -EINVAL; + break; + } + + sec.level = BT_SECURITY_LOW; + + len = min_t(unsigned int, sizeof(sec), optlen); + if (copy_from_user((char *) &sec, optval, len)) { + err = -EFAULT; + break; + } + + if (sec.level < BT_SECURITY_LOW || + sec.level > BT_SECURITY_HIGH) { + err = -EINVAL; + break; + } + + l2cap_pi(sk)->sec_level = sec.level; + break; + + case BT_DEFER_SETUP: + if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { + err = -EINVAL; + break; + } + + if (get_user(opt, (u32 __user *) optval)) { + err = -EFAULT; + break; + } + + bt_sk(sk)->defer_setup = opt; + break; + + default: + err = -ENOPROTOOPT; + break; + } + + release_sock(sk); + return err; +} + +static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen) +{ + struct sock *sk = sock->sk; + struct l2cap_options opts; + struct l2cap_conninfo cinfo; + int len, err = 0; + u32 opt; + + BT_DBG("sk %p", sk); + + if (get_user(len, optlen)) + return -EFAULT; + + lock_sock(sk); + + switch (optname) { + case L2CAP_OPTIONS: + opts.imtu = l2cap_pi(sk)->imtu; + opts.omtu = l2cap_pi(sk)->omtu; + opts.flush_to = l2cap_pi(sk)->flush_to; + opts.mode = l2cap_pi(sk)->mode; + opts.fcs = l2cap_pi(sk)->fcs; + + len = min_t(unsigned int, len, sizeof(opts)); + if (copy_to_user(optval, (char *) &opts, len)) + err = -EFAULT; + + break; + + case L2CAP_LM: + switch (l2cap_pi(sk)->sec_level) { + case BT_SECURITY_LOW: + opt = L2CAP_LM_AUTH; + break; + case BT_SECURITY_MEDIUM: + opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT; + break; + case BT_SECURITY_HIGH: + opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT | + L2CAP_LM_SECURE; + break; + default: + opt = 0; + break; + } + + if (l2cap_pi(sk)->role_switch) + opt |= L2CAP_LM_MASTER; + + if (l2cap_pi(sk)->force_reliable) + opt |= L2CAP_LM_RELIABLE; + + if (put_user(opt, (u32 __user *) optval)) + err = -EFAULT; + break; + + case L2CAP_CONNINFO: + if (sk->sk_state != BT_CONNECTED && + !(sk->sk_state == BT_CONNECT2 && + bt_sk(sk)->defer_setup)) { + err = -ENOTCONN; + break; + } + + cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle; + memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3); + + len = min_t(unsigned int, len, sizeof(cinfo)); + if (copy_to_user(optval, (char *) &cinfo, len)) + err = -EFAULT; + + break; + + default: + err = -ENOPROTOOPT; + break; + } + + release_sock(sk); + return err; +} + +static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) +{ + struct sock *sk = sock->sk; + struct bt_security sec; + int len, err = 0; + + BT_DBG("sk %p", sk); + + if (level == SOL_L2CAP) + return l2cap_sock_getsockopt_old(sock, optname, optval, optlen); + + if (level != SOL_BLUETOOTH) + return -ENOPROTOOPT; + + if (get_user(len, optlen)) + return -EFAULT; + + lock_sock(sk); + + switch (optname) { + case BT_SECURITY: + if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) { + err = -EINVAL; + break; + } + + sec.level = l2cap_pi(sk)->sec_level; + + len = min_t(unsigned int, len, sizeof(sec)); + if (copy_to_user(optval, (char *) &sec, len)) + err = -EFAULT; + + break; + + case BT_DEFER_SETUP: + if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { + err = -EINVAL; + break; + } + + if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval)) + err = -EFAULT; + + break; + + default: + err = -ENOPROTOOPT; + break; + } + + release_sock(sk); + return err; +} + +static int l2cap_sock_shutdown(struct socket *sock, int how) +{ + struct sock *sk = sock->sk; + int err = 0; + + BT_DBG("sock %p, sk %p", sock, sk); + + if (!sk) + return 0; + + lock_sock(sk); + if (!sk->sk_shutdown) { + sk->sk_shutdown = SHUTDOWN_MASK; + l2cap_sock_clear_timer(sk); + __l2cap_sock_close(sk, 0); + + if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) + err = bt_sock_wait_state(sk, BT_CLOSED, + sk->sk_lingertime); + } + release_sock(sk); + return err; +} + +static int l2cap_sock_release(struct socket *sock) +{ + struct sock *sk = sock->sk; + int err; + + BT_DBG("sock %p, sk %p", sock, sk); + + if (!sk) + return 0; + + err = l2cap_sock_shutdown(sock, 2); + + sock_orphan(sk); + l2cap_sock_kill(sk); + return err; +} + +static void l2cap_chan_ready(struct sock *sk) +{ + struct sock *parent = bt_sk(sk)->parent; + + BT_DBG("sk %p, parent %p", sk, parent); + + l2cap_pi(sk)->conf_state = 0; + l2cap_sock_clear_timer(sk); + + if (!parent) { + /* Outgoing channel. + * Wake up socket sleeping on connect. + */ + sk->sk_state = BT_CONNECTED; + sk->sk_state_change(sk); + } else { + /* Incoming channel. + * Wake up socket sleeping on accept. + */ + parent->sk_data_ready(parent, 0); + } +} + +/* Copy frame to all raw sockets on that connection */ +static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) +{ + struct l2cap_chan_list *l = &conn->chan_list; + struct sk_buff *nskb; + struct sock *sk; + + BT_DBG("conn %p", conn); + + read_lock(&l->lock); + for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { + if (sk->sk_type != SOCK_RAW) + continue; + + /* Don't send frame to the socket it came from */ + if (skb->sk == sk) + continue; + nskb = skb_clone(skb, GFP_ATOMIC); + if (!nskb) + continue; + + if (sock_queue_rcv_skb(sk, nskb)) + kfree_skb(nskb); + } + read_unlock(&l->lock); +} + +/* ---- L2CAP signalling commands ---- */ +static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, + u8 code, u8 ident, u16 dlen, void *data) +{ + struct sk_buff *skb, **frag; + struct l2cap_cmd_hdr *cmd; + struct l2cap_hdr *lh; + int len, count; + + BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", + conn, code, ident, dlen); + + len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen; + count = min_t(unsigned int, conn->mtu, len); + + skb = bt_skb_alloc(count, GFP_ATOMIC); + if (!skb) + return NULL; + + lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); + lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen); + lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING); + + cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE); + cmd->code = code; + cmd->ident = ident; + cmd->len = cpu_to_le16(dlen); + + if (dlen) { + count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE; + memcpy(skb_put(skb, count), data, count); + data += count; + } + + len -= skb->len; + + /* Continuation fragments (no L2CAP header) */ + frag = &skb_shinfo(skb)->frag_list; + while (len) { + count = min_t(unsigned int, conn->mtu, len); + + *frag = bt_skb_alloc(count, GFP_ATOMIC); + if (!*frag) + goto fail; + + memcpy(skb_put(*frag, count), data, count); + + len -= count; + data += count; + + frag = &(*frag)->next; + } + + return skb; + +fail: + kfree_skb(skb); + return NULL; +} + +static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val) +{ + struct l2cap_conf_opt *opt = *ptr; + int len; + + len = L2CAP_CONF_OPT_SIZE + opt->len; + *ptr += len; + + *type = opt->type; + *olen = opt->len; + + switch (opt->len) { + case 1: + *val = *((u8 *) opt->val); + break; + + case 2: + *val = __le16_to_cpu(*((__le16 *) opt->val)); + break; + + case 4: + *val = __le32_to_cpu(*((__le32 *) opt->val)); + break; + + default: + *val = (unsigned long) opt->val; + break; + } + + BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val); + return len; +} + +static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val) +{ + struct l2cap_conf_opt *opt = *ptr; + + BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val); + + opt->type = type; + opt->len = len; + + switch (len) { + case 1: + *((u8 *) opt->val) = val; + break; + + case 2: + *((__le16 *) opt->val) = cpu_to_le16(val); + break; + + case 4: + *((__le32 *) opt->val) = cpu_to_le32(val); + break; + + default: + memcpy(opt->val, (void *) val, len); + break; + } + + *ptr += L2CAP_CONF_OPT_SIZE + len; +} + +static inline void l2cap_ertm_init(struct sock *sk) +{ + l2cap_pi(sk)->expected_ack_seq = 0; + l2cap_pi(sk)->unacked_frames = 0; + l2cap_pi(sk)->buffer_seq = 0; + l2cap_pi(sk)->num_to_ack = 0; + + setup_timer(&l2cap_pi(sk)->retrans_timer, + l2cap_retrans_timeout, (unsigned long) sk); + setup_timer(&l2cap_pi(sk)->monitor_timer, + l2cap_monitor_timeout, (unsigned long) sk); + + __skb_queue_head_init(SREJ_QUEUE(sk)); +} + +static int l2cap_mode_supported(__u8 mode, __u32 feat_mask) +{ + u32 local_feat_mask = l2cap_feat_mask; + if (enable_ertm) + local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING; + + switch (mode) { + case L2CAP_MODE_ERTM: + return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask; + case L2CAP_MODE_STREAMING: + return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask; + default: + return 0x00; + } +} + +static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) +{ + switch (mode) { + case L2CAP_MODE_STREAMING: + case L2CAP_MODE_ERTM: + if (l2cap_mode_supported(mode, remote_feat_mask)) + return mode; + /* fall through */ + default: + return L2CAP_MODE_BASIC; + } +} + +static int l2cap_build_conf_req(struct sock *sk, void *data) +{ + struct l2cap_pinfo *pi = l2cap_pi(sk); + struct l2cap_conf_req *req = data; + struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; + void *ptr = req->data; + + BT_DBG("sk %p", sk); + + if (pi->num_conf_req || pi->num_conf_rsp) + goto done; + + switch (pi->mode) { + case L2CAP_MODE_STREAMING: + case L2CAP_MODE_ERTM: + pi->conf_state |= L2CAP_CONF_STATE2_DEVICE; + if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask)) + l2cap_send_disconn_req(pi->conn, sk); + break; + default: + pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask); + break; + } + +done: + switch (pi->mode) { + case L2CAP_MODE_BASIC: + if (pi->imtu != L2CAP_DEFAULT_MTU) + l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu); + break; + + case L2CAP_MODE_ERTM: + rfc.mode = L2CAP_MODE_ERTM; + rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW; + rfc.max_transmit = max_transmit; + rfc.retrans_timeout = 0; + rfc.monitor_timeout = 0; + rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE); + + l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, + sizeof(rfc), (unsigned long) &rfc); + + if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS)) + break; + + if (pi->fcs == L2CAP_FCS_NONE || + pi->conf_state & L2CAP_CONF_NO_FCS_RECV) { + pi->fcs = L2CAP_FCS_NONE; + l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs); + } + break; + + case L2CAP_MODE_STREAMING: + rfc.mode = L2CAP_MODE_STREAMING; + rfc.txwin_size = 0; + rfc.max_transmit = 0; + rfc.retrans_timeout = 0; + rfc.monitor_timeout = 0; + rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE); + + l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, + sizeof(rfc), (unsigned long) &rfc); + + if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS)) + break; + + if (pi->fcs == L2CAP_FCS_NONE || + pi->conf_state & L2CAP_CONF_NO_FCS_RECV) { + pi->fcs = L2CAP_FCS_NONE; + l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs); + } + break; + } + + /* FIXME: Need actual value of the flush timeout */ + //if (flush_to != L2CAP_DEFAULT_FLUSH_TO) + // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to); + + req->dcid = cpu_to_le16(pi->dcid); + req->flags = cpu_to_le16(0); + + return ptr - data; +} + +static int l2cap_parse_conf_req(struct sock *sk, void *data) +{ + struct l2cap_pinfo *pi = l2cap_pi(sk); + struct l2cap_conf_rsp *rsp = data; + void *ptr = rsp->data; + void *req = pi->conf_req; + int len = pi->conf_len; + int type, hint, olen; + unsigned long val; + struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; + u16 mtu = L2CAP_DEFAULT_MTU; + u16 result = L2CAP_CONF_SUCCESS; + + BT_DBG("sk %p", sk); + + while (len >= L2CAP_CONF_OPT_SIZE) { + len -= l2cap_get_conf_opt(&req, &type, &olen, &val); + + hint = type & L2CAP_CONF_HINT; + type &= L2CAP_CONF_MASK; + + switch (type) { + case L2CAP_CONF_MTU: + mtu = val; + break; + + case L2CAP_CONF_FLUSH_TO: + pi->flush_to = val; + break; + + case L2CAP_CONF_QOS: + break; + + case L2CAP_CONF_RFC: + if (olen == sizeof(rfc)) + memcpy(&rfc, (void *) val, olen); + break; + + case L2CAP_CONF_FCS: + if (val == L2CAP_FCS_NONE) + pi->conf_state |= L2CAP_CONF_NO_FCS_RECV; + + break; + + default: + if (hint) + break; + + result = L2CAP_CONF_UNKNOWN; + *((u8 *) ptr++) = type; + break; + } + } + + if (pi->num_conf_rsp || pi->num_conf_req) + goto done; + + switch (pi->mode) { + case L2CAP_MODE_STREAMING: + case L2CAP_MODE_ERTM: + pi->conf_state |= L2CAP_CONF_STATE2_DEVICE; + if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask)) + return -ECONNREFUSED; + break; + default: + pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask); + break; + } + +done: + if (pi->mode != rfc.mode) { + result = L2CAP_CONF_UNACCEPT; + rfc.mode = pi->mode; + + if (pi->num_conf_rsp == 1) + return -ECONNREFUSED; + + l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, + sizeof(rfc), (unsigned long) &rfc); + } + + + if (result == L2CAP_CONF_SUCCESS) { + /* Configure output options and let the other side know + * which ones we don't like. */ + + if (mtu < L2CAP_DEFAULT_MIN_MTU) + result = L2CAP_CONF_UNACCEPT; + else { + pi->omtu = mtu; + pi->conf_state |= L2CAP_CONF_MTU_DONE; + } + l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu); + + switch (rfc.mode) { + case L2CAP_MODE_BASIC: + pi->fcs = L2CAP_FCS_NONE; + pi->conf_state |= L2CAP_CONF_MODE_DONE; + break; + + case L2CAP_MODE_ERTM: + pi->remote_tx_win = rfc.txwin_size; + pi->remote_max_tx = rfc.max_transmit; + pi->max_pdu_size = rfc.max_pdu_size; + + rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO; + rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO; + + pi->conf_state |= L2CAP_CONF_MODE_DONE; + + l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, + sizeof(rfc), (unsigned long) &rfc); + + break; + + case L2CAP_MODE_STREAMING: + pi->remote_tx_win = rfc.txwin_size; + pi->max_pdu_size = rfc.max_pdu_size; + + pi->conf_state |= L2CAP_CONF_MODE_DONE; + + l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, + sizeof(rfc), (unsigned long) &rfc); + + break; + + default: + result = L2CAP_CONF_UNACCEPT; + + memset(&rfc, 0, sizeof(rfc)); + rfc.mode = pi->mode; + } + + if (result == L2CAP_CONF_SUCCESS) + pi->conf_state |= L2CAP_CONF_OUTPUT_DONE; + } + rsp->scid = cpu_to_le16(pi->dcid); + rsp->result = cpu_to_le16(result); + rsp->flags = cpu_to_le16(0x0000); + + return ptr - data; +} + +static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result) +{ + struct l2cap_pinfo *pi = l2cap_pi(sk); + struct l2cap_conf_req *req = data; + void *ptr = req->data; + int type, olen; + unsigned long val; + struct l2cap_conf_rfc rfc; + + BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data); + + while (len >= L2CAP_CONF_OPT_SIZE) { + len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val); + + switch (type) { + case L2CAP_CONF_MTU: + if (val < L2CAP_DEFAULT_MIN_MTU) { + *result = L2CAP_CONF_UNACCEPT; + pi->omtu = L2CAP_DEFAULT_MIN_MTU; + } else + pi->omtu = val; + l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu); + break; + + case L2CAP_CONF_FLUSH_TO: + pi->flush_to = val; + l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, + 2, pi->flush_to); + break; + + case L2CAP_CONF_RFC: + if (olen == sizeof(rfc)) + memcpy(&rfc, (void *)val, olen); + + if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) && + rfc.mode != pi->mode) + return -ECONNREFUSED; + + pi->mode = rfc.mode; + pi->fcs = 0; + + l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, + sizeof(rfc), (unsigned long) &rfc); + break; + } + } + + if (*result == L2CAP_CONF_SUCCESS) { + switch (rfc.mode) { + case L2CAP_MODE_ERTM: + pi->remote_tx_win = rfc.txwin_size; + pi->retrans_timeout = rfc.retrans_timeout; + pi->monitor_timeout = rfc.monitor_timeout; + pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size); + break; + case L2CAP_MODE_STREAMING: + pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size); + break; + } + } + + req->dcid = cpu_to_le16(pi->dcid); + req->flags = cpu_to_le16(0x0000); + + return ptr - data; +} + +static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags) +{ + struct l2cap_conf_rsp *rsp = data; + void *ptr = rsp->data; + + BT_DBG("sk %p", sk); + + rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid); + rsp->result = cpu_to_le16(result); + rsp->flags = cpu_to_le16(flags); + + return ptr - data; +} + +static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) +{ + struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data; + + if (rej->reason != 0x0000) + return 0; + + if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) && + cmd->ident == conn->info_ident) { + del_timer(&conn->info_timer); + + conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; + conn->info_ident = 0; + + l2cap_conn_start(conn); + } + + return 0; +} + +static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) +{ + struct l2cap_chan_list *list = &conn->chan_list; + struct l2cap_conn_req *req = (struct l2cap_conn_req *) data; + struct l2cap_conn_rsp rsp; + struct sock *sk, *parent; + int result, status = L2CAP_CS_NO_INFO; + + u16 dcid = 0, scid = __le16_to_cpu(req->scid); + __le16 psm = req->psm; + + BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid); + + /* Check if we have socket listening on psm */ + parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src); + if (!parent) { + result = L2CAP_CR_BAD_PSM; + goto sendresp; + } + + /* Check if the ACL is secure enough (if not SDP) */ + if (psm != cpu_to_le16(0x0001) && + !hci_conn_check_link_mode(conn->hcon)) { + conn->disc_reason = 0x05; + result = L2CAP_CR_SEC_BLOCK; + goto response; + } + + result = L2CAP_CR_NO_MEM; + + /* Check for backlog size */ + if (sk_acceptq_is_full(parent)) { + BT_DBG("backlog full %d", parent->sk_ack_backlog); + goto response; + } + + sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC); + if (!sk) + goto response; + + write_lock_bh(&list->lock); + + /* Check if we already have channel with that dcid */ + if (__l2cap_get_chan_by_dcid(list, scid)) { + write_unlock_bh(&list->lock); + sock_set_flag(sk, SOCK_ZAPPED); + l2cap_sock_kill(sk); + goto response; + } + + hci_conn_hold(conn->hcon); + + l2cap_sock_init(sk, parent); + bacpy(&bt_sk(sk)->src, conn->src); + bacpy(&bt_sk(sk)->dst, conn->dst); + l2cap_pi(sk)->psm = psm; + l2cap_pi(sk)->dcid = scid; + + __l2cap_chan_add(conn, sk, parent); + dcid = l2cap_pi(sk)->scid; + + l2cap_sock_set_timer(sk, sk->sk_sndtimeo); + + l2cap_pi(sk)->ident = cmd->ident; + + if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) { + if (l2cap_check_security(sk)) { + if (bt_sk(sk)->defer_setup) { + sk->sk_state = BT_CONNECT2; + result = L2CAP_CR_PEND; + status = L2CAP_CS_AUTHOR_PEND; + parent->sk_data_ready(parent, 0); + } else { + sk->sk_state = BT_CONFIG; + result = L2CAP_CR_SUCCESS; + status = L2CAP_CS_NO_INFO; + } + } else { + sk->sk_state = BT_CONNECT2; + result = L2CAP_CR_PEND; + status = L2CAP_CS_AUTHEN_PEND; + } + } else { + sk->sk_state = BT_CONNECT2; + result = L2CAP_CR_PEND; + status = L2CAP_CS_NO_INFO; + } + + write_unlock_bh(&list->lock); + +response: + bh_unlock_sock(parent); + +sendresp: + rsp.scid = cpu_to_le16(scid); + rsp.dcid = cpu_to_le16(dcid); + rsp.result = cpu_to_le16(result); + rsp.status = cpu_to_le16(status); + l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); + + if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) { + struct l2cap_info_req info; + info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); + + conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; + conn->info_ident = l2cap_get_ident(conn); + + mod_timer(&conn->info_timer, jiffies + + msecs_to_jiffies(L2CAP_INFO_TIMEOUT)); + + l2cap_send_cmd(conn, conn->info_ident, + L2CAP_INFO_REQ, sizeof(info), &info); + } + + return 0; +} + +static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) +{ + struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data; + u16 scid, dcid, result, status; + struct sock *sk; + u8 req[128]; + + scid = __le16_to_cpu(rsp->scid); + dcid = __le16_to_cpu(rsp->dcid); + result = __le16_to_cpu(rsp->result); + status = __le16_to_cpu(rsp->status); + + BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status); + + if (scid) { + sk = l2cap_get_chan_by_scid(&conn->chan_list, scid); + if (!sk) + return 0; + } else { + sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident); + if (!sk) + return 0; + } + + switch (result) { + case L2CAP_CR_SUCCESS: + sk->sk_state = BT_CONFIG; + l2cap_pi(sk)->ident = 0; + l2cap_pi(sk)->dcid = dcid; + l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT; + + l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND; + + l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, + l2cap_build_conf_req(sk, req), req); + l2cap_pi(sk)->num_conf_req++; + break; + + case L2CAP_CR_PEND: + l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND; + break; + + default: + l2cap_chan_del(sk, ECONNREFUSED); + break; + } + + bh_unlock_sock(sk); + return 0; +} + +static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) +{ + struct l2cap_conf_req *req = (struct l2cap_conf_req *) data; + u16 dcid, flags; + u8 rsp[64]; + struct sock *sk; + int len; + + dcid = __le16_to_cpu(req->dcid); + flags = __le16_to_cpu(req->flags); + + BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags); + + sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid); + if (!sk) + return -ENOENT; + + if (sk->sk_state == BT_DISCONN) + goto unlock; + + /* Reject if config buffer is too small. */ + len = cmd_len - sizeof(*req); + if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) { + l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, + l2cap_build_conf_rsp(sk, rsp, + L2CAP_CONF_REJECT, flags), rsp); + goto unlock; + } + + /* Store config. */ + memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len); + l2cap_pi(sk)->conf_len += len; + + if (flags & 0x0001) { + /* Incomplete config. Send empty response. */ + l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, + l2cap_build_conf_rsp(sk, rsp, + L2CAP_CONF_SUCCESS, 0x0001), rsp); + goto unlock; + } + + /* Complete config. */ + len = l2cap_parse_conf_req(sk, rsp); + if (len < 0) { + l2cap_send_disconn_req(conn, sk); + goto unlock; + } + + l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp); + l2cap_pi(sk)->num_conf_rsp++; + + /* Reset config buffer. */ + l2cap_pi(sk)->conf_len = 0; + + if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE)) + goto unlock; + + if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) { + if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) || + l2cap_pi(sk)->fcs != L2CAP_FCS_NONE) + l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16; + + sk->sk_state = BT_CONNECTED; + + l2cap_pi(sk)->next_tx_seq = 0; + l2cap_pi(sk)->expected_tx_seq = 0; + __skb_queue_head_init(TX_QUEUE(sk)); + if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) + l2cap_ertm_init(sk); + + l2cap_chan_ready(sk); + goto unlock; + } + + if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) { + u8 buf[64]; + l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, + l2cap_build_conf_req(sk, buf), buf); + l2cap_pi(sk)->num_conf_req++; + } + +unlock: + bh_unlock_sock(sk); + return 0; +} + +static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) +{ + struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data; + u16 scid, flags, result; + struct sock *sk; + + scid = __le16_to_cpu(rsp->scid); + flags = __le16_to_cpu(rsp->flags); + result = __le16_to_cpu(rsp->result); + + BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", + scid, flags, result); + + sk = l2cap_get_chan_by_scid(&conn->chan_list, scid); + if (!sk) + return 0; + + switch (result) { + case L2CAP_CONF_SUCCESS: + break; + + case L2CAP_CONF_UNACCEPT: + if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) { + int len = cmd->len - sizeof(*rsp); + char req[64]; + + /* throw out any old stored conf requests */ + result = L2CAP_CONF_SUCCESS; + len = l2cap_parse_conf_rsp(sk, rsp->data, + len, req, &result); + if (len < 0) { + l2cap_send_disconn_req(conn, sk); + goto done; + } + + l2cap_send_cmd(conn, l2cap_get_ident(conn), + L2CAP_CONF_REQ, len, req); + l2cap_pi(sk)->num_conf_req++; + if (result != L2CAP_CONF_SUCCESS) + goto done; + break; + } + + default: + sk->sk_state = BT_DISCONN; + sk->sk_err = ECONNRESET; + l2cap_sock_set_timer(sk, HZ * 5); + l2cap_send_disconn_req(conn, sk); + goto done; + } + + if (flags & 0x01) + goto done; + + l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE; + + if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) { + if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) || + l2cap_pi(sk)->fcs != L2CAP_FCS_NONE) + l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16; + + sk->sk_state = BT_CONNECTED; + l2cap_pi(sk)->next_tx_seq = 0; + l2cap_pi(sk)->expected_tx_seq = 0; + __skb_queue_head_init(TX_QUEUE(sk)); + if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) + l2cap_ertm_init(sk); + + l2cap_chan_ready(sk); + } + +done: + bh_unlock_sock(sk); + return 0; +} + +static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) +{ + struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data; + struct l2cap_disconn_rsp rsp; + u16 dcid, scid; + struct sock *sk; + + scid = __le16_to_cpu(req->scid); + dcid = __le16_to_cpu(req->dcid); + + BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid); + + sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid); + if (!sk) + return 0; + + rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); + rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); + l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp); + + sk->sk_shutdown = SHUTDOWN_MASK; + + skb_queue_purge(TX_QUEUE(sk)); + + if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) { + skb_queue_purge(SREJ_QUEUE(sk)); + del_timer(&l2cap_pi(sk)->retrans_timer); + del_timer(&l2cap_pi(sk)->monitor_timer); + } + + l2cap_chan_del(sk, ECONNRESET); + bh_unlock_sock(sk); + + l2cap_sock_kill(sk); + return 0; +} + +static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) +{ + struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data; + u16 dcid, scid; + struct sock *sk; + + scid = __le16_to_cpu(rsp->scid); + dcid = __le16_to_cpu(rsp->dcid); + + BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid); + + sk = l2cap_get_chan_by_scid(&conn->chan_list, scid); + if (!sk) + return 0; + + skb_queue_purge(TX_QUEUE(sk)); + + if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) { + skb_queue_purge(SREJ_QUEUE(sk)); + del_timer(&l2cap_pi(sk)->retrans_timer); + del_timer(&l2cap_pi(sk)->monitor_timer); + } + + l2cap_chan_del(sk, 0); + bh_unlock_sock(sk); + + l2cap_sock_kill(sk); + return 0; +} + +static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) +{ + struct l2cap_info_req *req = (struct l2cap_info_req *) data; + u16 type; + + type = __le16_to_cpu(req->type); + + BT_DBG("type 0x%4.4x", type); + + if (type == L2CAP_IT_FEAT_MASK) { + u8 buf[8]; + u32 feat_mask = l2cap_feat_mask; + struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; + rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK); + rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); + if (enable_ertm) + feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING + | L2CAP_FEAT_FCS; + put_unaligned_le32(feat_mask, rsp->data); + l2cap_send_cmd(conn, cmd->ident, + L2CAP_INFO_RSP, sizeof(buf), buf); + } else if (type == L2CAP_IT_FIXED_CHAN) { + u8 buf[12]; + struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; + rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); + rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); + memcpy(buf + 4, l2cap_fixed_chan, 8); + l2cap_send_cmd(conn, cmd->ident, + L2CAP_INFO_RSP, sizeof(buf), buf); + } else { + struct l2cap_info_rsp rsp; + rsp.type = cpu_to_le16(type); + rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP); + l2cap_send_cmd(conn, cmd->ident, + L2CAP_INFO_RSP, sizeof(rsp), &rsp); + } + + return 0; +} + +static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) +{ + struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data; + u16 type, result; + + type = __le16_to_cpu(rsp->type); + result = __le16_to_cpu(rsp->result); + + BT_DBG("type 0x%4.4x result 0x%2.2x", type, result); + + del_timer(&conn->info_timer); + + if (type == L2CAP_IT_FEAT_MASK) { + conn->feat_mask = get_unaligned_le32(rsp->data); + + if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) { + struct l2cap_info_req req; + req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); + + conn->info_ident = l2cap_get_ident(conn); + + l2cap_send_cmd(conn, conn->info_ident, + L2CAP_INFO_REQ, sizeof(req), &req); + } else { + conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; + conn->info_ident = 0; + + l2cap_conn_start(conn); + } + } else if (type == L2CAP_IT_FIXED_CHAN) { + conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; + conn->info_ident = 0; + + l2cap_conn_start(conn); + } + + return 0; +} + +static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb) +{ + u8 *data = skb->data; + int len = skb->len; + struct l2cap_cmd_hdr cmd; + int err = 0; + + l2cap_raw_recv(conn, skb); + + while (len >= L2CAP_CMD_HDR_SIZE) { + u16 cmd_len; + memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE); + data += L2CAP_CMD_HDR_SIZE; + len -= L2CAP_CMD_HDR_SIZE; + + cmd_len = le16_to_cpu(cmd.len); + + BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident); + + if (cmd_len > len || !cmd.ident) { + BT_DBG("corrupted command"); + break; + } + + switch (cmd.code) { + case L2CAP_COMMAND_REJ: + l2cap_command_rej(conn, &cmd, data); + break; + + case L2CAP_CONN_REQ: + err = l2cap_connect_req(conn, &cmd, data); + break; + + case L2CAP_CONN_RSP: + err = l2cap_connect_rsp(conn, &cmd, data); + break; + + case L2CAP_CONF_REQ: + err = l2cap_config_req(conn, &cmd, cmd_len, data); + break; + + case L2CAP_CONF_RSP: + err = l2cap_config_rsp(conn, &cmd, data); + break; + + case L2CAP_DISCONN_REQ: + err = l2cap_disconnect_req(conn, &cmd, data); + break; + + case L2CAP_DISCONN_RSP: + err = l2cap_disconnect_rsp(conn, &cmd, data); + break; + + case L2CAP_ECHO_REQ: + l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data); + break; + + case L2CAP_ECHO_RSP: + break; + + case L2CAP_INFO_REQ: + err = l2cap_information_req(conn, &cmd, data); + break; + + case L2CAP_INFO_RSP: + err = l2cap_information_rsp(conn, &cmd, data); + break; + + default: + BT_ERR("Unknown signaling command 0x%2.2x", cmd.code); + err = -EINVAL; + break; + } + + if (err) { + struct l2cap_cmd_rej rej; + BT_DBG("error %d", err); + + /* FIXME: Map err to a valid reason */ + rej.reason = cpu_to_le16(0); + l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej); + } + + data += cmd_len; + len -= cmd_len; + } + + kfree_skb(skb); +} + +static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb) +{ + u16 our_fcs, rcv_fcs; + int hdr_size = L2CAP_HDR_SIZE + 2; + + if (pi->fcs == L2CAP_FCS_CRC16) { + skb_trim(skb, skb->len - 2); + rcv_fcs = get_unaligned_le16(skb->data + skb->len); + our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size); + + if (our_fcs != rcv_fcs) + return -EINVAL; + } + return 0; +} + +static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar) +{ + struct sk_buff *next_skb; + + bt_cb(skb)->tx_seq = tx_seq; + bt_cb(skb)->sar = sar; + + next_skb = skb_peek(SREJ_QUEUE(sk)); + if (!next_skb) { + __skb_queue_tail(SREJ_QUEUE(sk), skb); + return; + } + + do { + if (bt_cb(next_skb)->tx_seq > tx_seq) { + __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb); + return; + } + + if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb)) + break; + + } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb))); + + __skb_queue_tail(SREJ_QUEUE(sk), skb); +} + +static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control) +{ + struct l2cap_pinfo *pi = l2cap_pi(sk); + struct sk_buff *_skb; + int err = -EINVAL; + + switch (control & L2CAP_CTRL_SAR) { + case L2CAP_SDU_UNSEGMENTED: + if (pi->conn_state & L2CAP_CONN_SAR_SDU) { + kfree_skb(pi->sdu); + break; + } + + err = sock_queue_rcv_skb(sk, skb); + if (!err) + return 0; + + break; + + case L2CAP_SDU_START: + if (pi->conn_state & L2CAP_CONN_SAR_SDU) { + kfree_skb(pi->sdu); + break; + } + + pi->sdu_len = get_unaligned_le16(skb->data); + skb_pull(skb, 2); + + pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC); + if (!pi->sdu) { + err = -ENOMEM; + break; + } + + memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len); + + pi->conn_state |= L2CAP_CONN_SAR_SDU; + pi->partial_sdu_len = skb->len; + err = 0; + break; + + case L2CAP_SDU_CONTINUE: + if (!(pi->conn_state & L2CAP_CONN_SAR_SDU)) + break; + + memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len); + + pi->partial_sdu_len += skb->len; + if (pi->partial_sdu_len > pi->sdu_len) + kfree_skb(pi->sdu); + else + err = 0; + + break; + + case L2CAP_SDU_END: + if (!(pi->conn_state & L2CAP_CONN_SAR_SDU)) + break; + + memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len); + + pi->conn_state &= ~L2CAP_CONN_SAR_SDU; + pi->partial_sdu_len += skb->len; + + if (pi->partial_sdu_len == pi->sdu_len) { + _skb = skb_clone(pi->sdu, GFP_ATOMIC); + err = sock_queue_rcv_skb(sk, _skb); + if (err < 0) + kfree_skb(_skb); + } + kfree_skb(pi->sdu); + err = 0; + + break; + } + + kfree_skb(skb); + return err; +} + +static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq) +{ + struct sk_buff *skb; + u16 control = 0; + + while((skb = skb_peek(SREJ_QUEUE(sk)))) { + if (bt_cb(skb)->tx_seq != tx_seq) + break; + + skb = skb_dequeue(SREJ_QUEUE(sk)); + control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT; + l2cap_sar_reassembly_sdu(sk, skb, control); + l2cap_pi(sk)->buffer_seq_srej = + (l2cap_pi(sk)->buffer_seq_srej + 1) % 64; + tx_seq++; + } +} + +static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq) +{ + struct l2cap_pinfo *pi = l2cap_pi(sk); + struct srej_list *l, *tmp; + u16 control; + + list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) { + if (l->tx_seq == tx_seq) { + list_del(&l->list); + kfree(l); + return; + } + control = L2CAP_SUPER_SELECT_REJECT; + control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT; + l2cap_send_sframe(pi, control); + list_del(&l->list); + list_add_tail(&l->list, SREJ_LIST(sk)); + } +} + +static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq) +{ + struct l2cap_pinfo *pi = l2cap_pi(sk); + struct srej_list *new; + u16 control; + + while (tx_seq != pi->expected_tx_seq) { + control = L2CAP_SUPER_SELECT_REJECT; + control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT; + if (pi->conn_state & L2CAP_CONN_SEND_PBIT) { + control |= L2CAP_CTRL_POLL; + pi->conn_state &= ~L2CAP_CONN_SEND_PBIT; + } + l2cap_send_sframe(pi, control); + + new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC); + new->tx_seq = pi->expected_tx_seq++; + list_add_tail(&new->list, SREJ_LIST(sk)); + } + pi->expected_tx_seq++; +} + +static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb) +{ + struct l2cap_pinfo *pi = l2cap_pi(sk); + u8 tx_seq = __get_txseq(rx_control); + u8 req_seq = __get_reqseq(rx_control); + u16 tx_control = 0; + u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT; + int err = 0; + + BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len); + + pi->expected_ack_seq = req_seq; + l2cap_drop_acked_frames(sk); + + if (tx_seq == pi->expected_tx_seq) + goto expected; + + if (pi->conn_state & L2CAP_CONN_SREJ_SENT) { + struct srej_list *first; + + first = list_first_entry(SREJ_LIST(sk), + struct srej_list, list); + if (tx_seq == first->tx_seq) { + l2cap_add_to_srej_queue(sk, skb, tx_seq, sar); + l2cap_check_srej_gap(sk, tx_seq); + + list_del(&first->list); + kfree(first); + + if (list_empty(SREJ_LIST(sk))) { + pi->buffer_seq = pi->buffer_seq_srej; + pi->conn_state &= ~L2CAP_CONN_SREJ_SENT; + } + } else { + struct srej_list *l; + l2cap_add_to_srej_queue(sk, skb, tx_seq, sar); + + list_for_each_entry(l, SREJ_LIST(sk), list) { + if (l->tx_seq == tx_seq) { + l2cap_resend_srejframe(sk, tx_seq); + return 0; + } + } + l2cap_send_srejframe(sk, tx_seq); + } + } else { + pi->conn_state |= L2CAP_CONN_SREJ_SENT; + + INIT_LIST_HEAD(SREJ_LIST(sk)); + pi->buffer_seq_srej = pi->buffer_seq; + + __skb_queue_head_init(SREJ_QUEUE(sk)); + l2cap_add_to_srej_queue(sk, skb, tx_seq, sar); + + pi->conn_state |= L2CAP_CONN_SEND_PBIT; + + l2cap_send_srejframe(sk, tx_seq); + } + return 0; + +expected: + pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64; + + if (pi->conn_state & L2CAP_CONN_SREJ_SENT) { + l2cap_add_to_srej_queue(sk, skb, tx_seq, sar); + return 0; + } + + if (rx_control & L2CAP_CTRL_FINAL) { + if (pi->conn_state & L2CAP_CONN_REJ_ACT) + pi->conn_state &= ~L2CAP_CONN_REJ_ACT; + else { + sk->sk_send_head = TX_QUEUE(sk)->next; + pi->next_tx_seq = pi->expected_ack_seq; + l2cap_ertm_send(sk); + } + } + + pi->buffer_seq = (pi->buffer_seq + 1) % 64; + + err = l2cap_sar_reassembly_sdu(sk, skb, rx_control); + if (err < 0) + return err; + + pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK; + if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1) { + tx_control |= L2CAP_SUPER_RCV_READY; + tx_control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; + l2cap_send_sframe(pi, tx_control); + } + return 0; +} + +static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb) +{ + struct l2cap_pinfo *pi = l2cap_pi(sk); + u8 tx_seq = __get_reqseq(rx_control); + + BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len); + + switch (rx_control & L2CAP_CTRL_SUPERVISE) { + case L2CAP_SUPER_RCV_READY: + if (rx_control & L2CAP_CTRL_POLL) { + u16 control = L2CAP_CTRL_FINAL; + control |= L2CAP_SUPER_RCV_READY | + (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT); + l2cap_send_sframe(l2cap_pi(sk), control); + pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; + + } else if (rx_control & L2CAP_CTRL_FINAL) { + pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; + pi->expected_ack_seq = tx_seq; + l2cap_drop_acked_frames(sk); + + if (pi->conn_state & L2CAP_CONN_REJ_ACT) + pi->conn_state &= ~L2CAP_CONN_REJ_ACT; + else { + sk->sk_send_head = TX_QUEUE(sk)->next; + pi->next_tx_seq = pi->expected_ack_seq; + l2cap_ertm_send(sk); + } + + if (!(pi->conn_state & L2CAP_CONN_WAIT_F)) + break; + + pi->conn_state &= ~L2CAP_CONN_WAIT_F; + del_timer(&pi->monitor_timer); + + if (pi->unacked_frames > 0) + __mod_retrans_timer(); + } else { + pi->expected_ack_seq = tx_seq; + l2cap_drop_acked_frames(sk); + + if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) && + (pi->unacked_frames > 0)) + __mod_retrans_timer(); + + pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; + l2cap_ertm_send(sk); + } + break; + + case L2CAP_SUPER_REJECT: + pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; + + pi->expected_ack_seq = __get_reqseq(rx_control); + l2cap_drop_acked_frames(sk); + + if (rx_control & L2CAP_CTRL_FINAL) { + if (pi->conn_state & L2CAP_CONN_REJ_ACT) + pi->conn_state &= ~L2CAP_CONN_REJ_ACT; + else { + sk->sk_send_head = TX_QUEUE(sk)->next; + pi->next_tx_seq = pi->expected_ack_seq; + l2cap_ertm_send(sk); + } + } else { + sk->sk_send_head = TX_QUEUE(sk)->next; + pi->next_tx_seq = pi->expected_ack_seq; + l2cap_ertm_send(sk); + + if (pi->conn_state & L2CAP_CONN_WAIT_F) { + pi->srej_save_reqseq = tx_seq; + pi->conn_state |= L2CAP_CONN_REJ_ACT; + } + } + + break; + + case L2CAP_SUPER_SELECT_REJECT: + pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; + + if (rx_control & L2CAP_CTRL_POLL) { + pi->expected_ack_seq = tx_seq; + l2cap_drop_acked_frames(sk); + l2cap_retransmit_frame(sk, tx_seq); + l2cap_ertm_send(sk); + if (pi->conn_state & L2CAP_CONN_WAIT_F) { + pi->srej_save_reqseq = tx_seq; + pi->conn_state |= L2CAP_CONN_SREJ_ACT; + } + } else if (rx_control & L2CAP_CTRL_FINAL) { + if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) && + pi->srej_save_reqseq == tx_seq) + pi->conn_state &= ~L2CAP_CONN_SREJ_ACT; + else + l2cap_retransmit_frame(sk, tx_seq); + } + else { + l2cap_retransmit_frame(sk, tx_seq); + if (pi->conn_state & L2CAP_CONN_WAIT_F) { + pi->srej_save_reqseq = tx_seq; + pi->conn_state |= L2CAP_CONN_SREJ_ACT; + } + } + break; + + case L2CAP_SUPER_RCV_NOT_READY: + pi->conn_state |= L2CAP_CONN_REMOTE_BUSY; + pi->expected_ack_seq = tx_seq; + l2cap_drop_acked_frames(sk); + + del_timer(&l2cap_pi(sk)->retrans_timer); + if (rx_control & L2CAP_CTRL_POLL) { + u16 control = L2CAP_CTRL_FINAL; + l2cap_send_rr_or_rnr(l2cap_pi(sk), control); + } + break; + } + + return 0; +} + +static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb) +{ + struct sock *sk; + struct l2cap_pinfo *pi; + u16 control, len; + u8 tx_seq; + + sk = l2cap_get_chan_by_scid(&conn->chan_list, cid); + if (!sk) { + BT_DBG("unknown cid 0x%4.4x", cid); + goto drop; + } + + pi = l2cap_pi(sk); + + BT_DBG("sk %p, len %d", sk, skb->len); + + if (sk->sk_state != BT_CONNECTED) + goto drop; + + switch (pi->mode) { + case L2CAP_MODE_BASIC: + /* If socket recv buffers overflows we drop data here + * which is *bad* because L2CAP has to be reliable. + * But we don't have any other choice. L2CAP doesn't + * provide flow control mechanism. */ + + if (pi->imtu < skb->len) + goto drop; + + if (!sock_queue_rcv_skb(sk, skb)) + goto done; + break; + + case L2CAP_MODE_ERTM: + control = get_unaligned_le16(skb->data); + skb_pull(skb, 2); + len = skb->len; + + if (__is_sar_start(control)) + len -= 2; + + if (pi->fcs == L2CAP_FCS_CRC16) + len -= 2; + + /* + * We can just drop the corrupted I-frame here. + * Receiver will miss it and start proper recovery + * procedures and ask retransmission. + */ + if (len > L2CAP_DEFAULT_MAX_PDU_SIZE) + goto drop; + + if (l2cap_check_fcs(pi, skb)) + goto drop; + + if (__is_iframe(control)) + l2cap_data_channel_iframe(sk, control, skb); + else + l2cap_data_channel_sframe(sk, control, skb); + + goto done; + + case L2CAP_MODE_STREAMING: + control = get_unaligned_le16(skb->data); + skb_pull(skb, 2); + len = skb->len; + + if (__is_sar_start(control)) + len -= 2; + + if (pi->fcs == L2CAP_FCS_CRC16) + len -= 2; + + if (len > L2CAP_DEFAULT_MAX_PDU_SIZE || __is_sframe(control)) + goto drop; + + if (l2cap_check_fcs(pi, skb)) + goto drop; + + tx_seq = __get_txseq(control); + + if (pi->expected_tx_seq == tx_seq) + pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64; + else + pi->expected_tx_seq = tx_seq + 1; + + l2cap_sar_reassembly_sdu(sk, skb, control); + + goto done; + + default: + BT_DBG("sk %p: bad mode 0x%2.2x", sk, l2cap_pi(sk)->mode); + break; + } + +drop: + kfree_skb(skb); + +done: + if (sk) + bh_unlock_sock(sk); + + return 0; +} + +static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb) +{ + struct sock *sk; + + sk = l2cap_get_sock_by_psm(0, psm, conn->src); + if (!sk) + goto drop; + + BT_DBG("sk %p, len %d", sk, skb->len); + + if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED) + goto drop; + + if (l2cap_pi(sk)->imtu < skb->len) + goto drop; + + if (!sock_queue_rcv_skb(sk, skb)) + goto done; + +drop: + kfree_skb(skb); + +done: + if (sk) + bh_unlock_sock(sk); + return 0; +} + +static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb) +{ + struct l2cap_hdr *lh = (void *) skb->data; + u16 cid, len; + __le16 psm; + + skb_pull(skb, L2CAP_HDR_SIZE); + cid = __le16_to_cpu(lh->cid); + len = __le16_to_cpu(lh->len); + + if (len != skb->len) { + kfree_skb(skb); + return; + } + + BT_DBG("len %d, cid 0x%4.4x", len, cid); + + switch (cid) { + case L2CAP_CID_SIGNALING: + l2cap_sig_channel(conn, skb); + break; + + case L2CAP_CID_CONN_LESS: + psm = get_unaligned_le16(skb->data); + skb_pull(skb, 2); + l2cap_conless_channel(conn, psm, skb); + break; + + default: + l2cap_data_channel(conn, cid, skb); + break; + } +} + +/* ---- L2CAP interface with lower layer (HCI) ---- */ + +static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) +{ + int exact = 0, lm1 = 0, lm2 = 0; + register struct sock *sk; + struct hlist_node *node; + + if (type != ACL_LINK) + return 0; + + BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr)); + + /* Find listening sockets and check their link_mode */ + read_lock(&l2cap_sk_list.lock); + sk_for_each(sk, node, &l2cap_sk_list.head) { + if (sk->sk_state != BT_LISTEN) + continue; + + if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) { + lm1 |= HCI_LM_ACCEPT; + if (l2cap_pi(sk)->role_switch) + lm1 |= HCI_LM_MASTER; + exact++; + } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) { + lm2 |= HCI_LM_ACCEPT; + if (l2cap_pi(sk)->role_switch) + lm2 |= HCI_LM_MASTER; + } + } + read_unlock(&l2cap_sk_list.lock); + + return exact ? lm1 : lm2; +} + +static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status) +{ + struct l2cap_conn *conn; + + BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status); + + if (hcon->type != ACL_LINK) + return 0; + + if (!status) { + conn = l2cap_conn_add(hcon, status); + if (conn) + l2cap_conn_ready(conn); + } else + l2cap_conn_del(hcon, bt_err(status)); + + return 0; +} + +static int l2cap_disconn_ind(struct hci_conn *hcon) +{ + struct l2cap_conn *conn = hcon->l2cap_data; + + BT_DBG("hcon %p", hcon); + + if (hcon->type != ACL_LINK || !conn) + return 0x13; + + return conn->disc_reason; +} + +static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason) +{ + BT_DBG("hcon %p reason %d", hcon, reason); + + if (hcon->type != ACL_LINK) + return 0; + + l2cap_conn_del(hcon, bt_err(reason)); + + return 0; +} + +static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt) +{ + if (sk->sk_type != SOCK_SEQPACKET) + return; + + if (encrypt == 0x00) { + if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) { + l2cap_sock_clear_timer(sk); + l2cap_sock_set_timer(sk, HZ * 5); + } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH) + __l2cap_sock_close(sk, ECONNREFUSED); + } else { + if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) + l2cap_sock_clear_timer(sk); + } +} + +static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) +{ + struct l2cap_chan_list *l; + struct l2cap_conn *conn = hcon->l2cap_data; + struct sock *sk; + + if (!conn) + return 0; + + l = &conn->chan_list; + + BT_DBG("conn %p", conn); + + read_lock(&l->lock); + + for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { + bh_lock_sock(sk); + + if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) { + bh_unlock_sock(sk); + continue; + } + + if (!status && (sk->sk_state == BT_CONNECTED || + sk->sk_state == BT_CONFIG)) { + l2cap_check_encryption(sk, encrypt); + bh_unlock_sock(sk); + continue; + } + + if (sk->sk_state == BT_CONNECT) { + if (!status) { + struct l2cap_conn_req req; + req.scid = cpu_to_le16(l2cap_pi(sk)->scid); + req.psm = l2cap_pi(sk)->psm; + + l2cap_pi(sk)->ident = l2cap_get_ident(conn); + + l2cap_send_cmd(conn, l2cap_pi(sk)->ident, + L2CAP_CONN_REQ, sizeof(req), &req); + } else { + l2cap_sock_clear_timer(sk); + l2cap_sock_set_timer(sk, HZ / 10); + } + } else if (sk->sk_state == BT_CONNECT2) { + struct l2cap_conn_rsp rsp; + __u16 result; + + if (!status) { + sk->sk_state = BT_CONFIG; + result = L2CAP_CR_SUCCESS; + } else { + sk->sk_state = BT_DISCONN; + l2cap_sock_set_timer(sk, HZ / 10); + result = L2CAP_CR_SEC_BLOCK; + } + + rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); + rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); + rsp.result = cpu_to_le16(result); + rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); + l2cap_send_cmd(conn, l2cap_pi(sk)->ident, + L2CAP_CONN_RSP, sizeof(rsp), &rsp); + } + + bh_unlock_sock(sk); + } + + read_unlock(&l->lock); + + return 0; +} + +static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) +{ + struct l2cap_conn *conn = hcon->l2cap_data; + + if (!conn && !(conn = l2cap_conn_add(hcon, 0))) + goto drop; + + BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags); + + if (flags & ACL_START) { + struct l2cap_hdr *hdr; + int len; + + if (conn->rx_len) { + BT_ERR("Unexpected start frame (len %d)", skb->len); + kfree_skb(conn->rx_skb); + conn->rx_skb = NULL; + conn->rx_len = 0; + l2cap_conn_unreliable(conn, ECOMM); + } + + if (skb->len < 2) { + BT_ERR("Frame is too short (len %d)", skb->len); + l2cap_conn_unreliable(conn, ECOMM); + goto drop; + } + + hdr = (struct l2cap_hdr *) skb->data; + len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE; + + if (len == skb->len) { + /* Complete frame received */ + l2cap_recv_frame(conn, skb); + return 0; + } + + BT_DBG("Start: total len %d, frag len %d", len, skb->len); + + if (skb->len > len) { + BT_ERR("Frame is too long (len %d, expected len %d)", + skb->len, len); + l2cap_conn_unreliable(conn, ECOMM); + goto drop; + } + + /* Allocate skb for the complete frame (with header) */ + conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC); + if (!conn->rx_skb) + goto drop; + + skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), + skb->len); + conn->rx_len = len - skb->len; + } else { + BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len); + + if (!conn->rx_len) { + BT_ERR("Unexpected continuation frame (len %d)", skb->len); + l2cap_conn_unreliable(conn, ECOMM); + goto drop; + } + + if (skb->len > conn->rx_len) { + BT_ERR("Fragment is too long (len %d, expected %d)", + skb->len, conn->rx_len); + kfree_skb(conn->rx_skb); + conn->rx_skb = NULL; + conn->rx_len = 0; + l2cap_conn_unreliable(conn, ECOMM); + goto drop; + } + + skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), + skb->len); + conn->rx_len -= skb->len; + + if (!conn->rx_len) { + /* Complete frame received */ + l2cap_recv_frame(conn, conn->rx_skb); + conn->rx_skb = NULL; + } + } + +drop: + kfree_skb(skb); + return 0; +} + +static ssize_t l2cap_sysfs_show(struct class *dev, + struct class_attribute *attr, + char *buf) +{ + struct sock *sk; + struct hlist_node *node; + char *str = buf; + + read_lock_bh(&l2cap_sk_list.lock); + + sk_for_each(sk, node, &l2cap_sk_list.head) { + struct l2cap_pinfo *pi = l2cap_pi(sk); + + str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n", + batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), + sk->sk_state, __le16_to_cpu(pi->psm), pi->scid, + pi->dcid, pi->imtu, pi->omtu, pi->sec_level); + } + + read_unlock_bh(&l2cap_sk_list.lock); + + return str - buf; +} + +static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL); + +static const struct proto_ops l2cap_sock_ops = { + .family = PF_BLUETOOTH, + .owner = THIS_MODULE, + .release = l2cap_sock_release, + .bind = l2cap_sock_bind, + .connect = l2cap_sock_connect, + .listen = l2cap_sock_listen, + .accept = l2cap_sock_accept, + .getname = l2cap_sock_getname, + .sendmsg = l2cap_sock_sendmsg, + .recvmsg = l2cap_sock_recvmsg, + .poll = bt_sock_poll, + .ioctl = bt_sock_ioctl, + .mmap = sock_no_mmap, + .socketpair = sock_no_socketpair, + .shutdown = l2cap_sock_shutdown, + .setsockopt = l2cap_sock_setsockopt, + .getsockopt = l2cap_sock_getsockopt +}; + +static const struct net_proto_family l2cap_sock_family_ops = { + .family = PF_BLUETOOTH, + .owner = THIS_MODULE, + .create = l2cap_sock_create, +}; + +static struct hci_proto l2cap_hci_proto = { + .name = "L2CAP", + .id = HCI_PROTO_L2CAP, + .connect_ind = l2cap_connect_ind, + .connect_cfm = l2cap_connect_cfm, + .disconn_ind = l2cap_disconn_ind, + .disconn_cfm = l2cap_disconn_cfm, + .security_cfm = l2cap_security_cfm, + .recv_acldata = l2cap_recv_acldata +}; + +static int __init l2cap_init(void) +{ + int err; + + err = proto_register(&l2cap_proto, 0); + if (err < 0) + return err; + + err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops); + if (err < 0) { + BT_ERR("L2CAP socket registration failed"); + goto error; + } + + err = hci_register_proto(&l2cap_hci_proto); + if (err < 0) { + BT_ERR("L2CAP protocol registration failed"); + bt_sock_unregister(BTPROTO_L2CAP); + goto error; + } + + if (class_create_file(bt_class, &class_attr_l2cap) < 0) + BT_ERR("Failed to create L2CAP info file"); + + BT_INFO("L2CAP ver %s", VERSION); + BT_INFO("L2CAP socket layer initialized"); + + return 0; + +error: + proto_unregister(&l2cap_proto); + return err; +} + +static void __exit l2cap_exit(void) +{ + class_remove_file(bt_class, &class_attr_l2cap); + + if (bt_sock_unregister(BTPROTO_L2CAP) < 0) + BT_ERR("L2CAP socket unregistration failed"); + + if (hci_unregister_proto(&l2cap_hci_proto) < 0) + BT_ERR("L2CAP protocol unregistration failed"); + + proto_unregister(&l2cap_proto); +} + +void l2cap_load(void) +{ + /* Dummy function to trigger automatic L2CAP module loading by + * other modules that use L2CAP sockets but don't use any other + * symbols from it. */ + return; +} +EXPORT_SYMBOL(l2cap_load); + +module_init(l2cap_init); +module_exit(l2cap_exit); + +module_param(enable_ertm, bool, 0644); +MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode"); + +module_param(max_transmit, uint, 0644); +MODULE_PARM_DESC(max_transmit, "Max transmit value (default = 3)"); + +MODULE_AUTHOR("Marcel Holtmann "); +MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("bt-proto-0"); diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c new file mode 100644 index 0000000..ad2af58 --- /dev/null +++ b/net/bluetooth/lib.c @@ -0,0 +1,152 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + Copyright (C) 2000-2001 Qualcomm Incorporated + + Written 2000,2001 by Maxim Krasnyansky + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +/* Bluetooth kernel library. */ + +#include + +#include +#include +#include +#include + +#include + +void baswap(bdaddr_t *dst, bdaddr_t *src) +{ + unsigned char *d = (unsigned char *) dst; + unsigned char *s = (unsigned char *) src; + unsigned int i; + + for (i = 0; i < 6; i++) + d[i] = s[5 - i]; +} +EXPORT_SYMBOL(baswap); + +char *batostr(bdaddr_t *ba) +{ + static char str[2][18]; + static int i = 1; + + i ^= 1; + sprintf(str[i], "%2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X", + ba->b[0], ba->b[1], ba->b[2], + ba->b[3], ba->b[4], ba->b[5]); + + return str[i]; +} +EXPORT_SYMBOL(batostr); + +/* Bluetooth error codes to Unix errno mapping */ +int bt_err(__u16 code) +{ + switch (code) { + case 0: + return 0; + + case 0x01: + return EBADRQC; + + case 0x02: + return ENOTCONN; + + case 0x03: + return EIO; + + case 0x04: + return EHOSTDOWN; + + case 0x05: + return EACCES; + + case 0x06: + return EBADE; + + case 0x07: + return ENOMEM; + + case 0x08: + return ETIMEDOUT; + + case 0x09: + return EMLINK; + + case 0x0a: + return EMLINK; + + case 0x0b: + return EALREADY; + + case 0x0c: + return EBUSY; + + case 0x0d: + case 0x0e: + case 0x0f: + return ECONNREFUSED; + + case 0x10: + return ETIMEDOUT; + + case 0x11: + case 0x27: + case 0x29: + case 0x20: + return EOPNOTSUPP; + + case 0x12: + return EINVAL; + + case 0x13: + case 0x14: + case 0x15: + return ECONNRESET; + + case 0x16: + return ECONNABORTED; + + case 0x17: + return ELOOP; + + case 0x18: + return EACCES; + + case 0x1a: + return EPROTONOSUPPORT; + + case 0x1b: + return ECONNREFUSED; + + case 0x19: + case 0x1e: + case 0x23: + case 0x24: + case 0x25: + return EPROTO; + + default: + return ENOSYS; + } +} +EXPORT_SYMBOL(bt_err); diff --git a/net/bluetooth/rfcomm/Makefile b/net/bluetooth/rfcomm/Makefile new file mode 100644 index 0000000..fe07988 --- /dev/null +++ b/net/bluetooth/rfcomm/Makefile @@ -0,0 +1,8 @@ +# +# Makefile for the Linux Bluetooth RFCOMM layer. +# + +obj-$(CONFIG_BT_RFCOMM) += rfcomm.o + +rfcomm-y := core.o sock.o +rfcomm-$(CONFIG_BT_RFCOMM_TTY) += tty.o diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c new file mode 100644 index 0000000..db8a68e --- /dev/null +++ b/net/bluetooth/rfcomm/core.c @@ -0,0 +1,2204 @@ +/* + RFCOMM implementation for Linux Bluetooth stack (BlueZ). + Copyright (C) 2002 Maxim Krasnyansky + Copyright (C) 2002 Marcel Holtmann + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +/* + * Bluetooth RFCOMM core. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +#define VERSION "1.11" + +static int disable_cfc = 0; +static int channel_mtu = -1; +static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU; +static int l2cap_ertm = 0; + +static struct task_struct *rfcomm_thread; + +static DEFINE_MUTEX(rfcomm_mutex); +#define rfcomm_lock() mutex_lock(&rfcomm_mutex) +#define rfcomm_unlock() mutex_unlock(&rfcomm_mutex) + +static unsigned long rfcomm_event; + +static LIST_HEAD(session_list); + +static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len); +static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci); +static int rfcomm_send_disc(struct rfcomm_session *s, u8 dlci); +static int rfcomm_queue_disc(struct rfcomm_dlc *d); +static int rfcomm_send_nsc(struct rfcomm_session *s, int cr, u8 type); +static int rfcomm_send_pn(struct rfcomm_session *s, int cr, struct rfcomm_dlc *d); +static int rfcomm_send_msc(struct rfcomm_session *s, int cr, u8 dlci, u8 v24_sig); +static int rfcomm_send_test(struct rfcomm_session *s, int cr, u8 *pattern, int len); +static int rfcomm_send_credits(struct rfcomm_session *s, u8 addr, u8 credits); +static void rfcomm_make_uih(struct sk_buff *skb, u8 addr); + +static void rfcomm_process_connect(struct rfcomm_session *s); + +static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst, int *err); +static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst); +static void rfcomm_session_del(struct rfcomm_session *s); + +/* ---- RFCOMM frame parsing macros ---- */ +#define __get_dlci(b) ((b & 0xfc) >> 2) +#define __get_channel(b) ((b & 0xf8) >> 3) +#define __get_dir(b) ((b & 0x04) >> 2) +#define __get_type(b) ((b & 0xef)) + +#define __test_ea(b) ((b & 0x01)) +#define __test_cr(b) ((b & 0x02)) +#define __test_pf(b) ((b & 0x10)) + +#define __addr(cr, dlci) (((dlci & 0x3f) << 2) | (cr << 1) | 0x01) +#define __ctrl(type, pf) (((type & 0xef) | (pf << 4))) +#define __dlci(dir, chn) (((chn & 0x1f) << 1) | dir) +#define __srv_channel(dlci) (dlci >> 1) +#define __dir(dlci) (dlci & 0x01) + +#define __len8(len) (((len) << 1) | 1) +#define __len16(len) ((len) << 1) + +/* MCC macros */ +#define __mcc_type(cr, type) (((type << 2) | (cr << 1) | 0x01)) +#define __get_mcc_type(b) ((b & 0xfc) >> 2) +#define __get_mcc_len(b) ((b & 0xfe) >> 1) + +/* RPN macros */ +#define __rpn_line_settings(data, stop, parity) ((data & 0x3) | ((stop & 0x1) << 2) | ((parity & 0x7) << 3)) +#define __get_rpn_data_bits(line) ((line) & 0x3) +#define __get_rpn_stop_bits(line) (((line) >> 2) & 0x1) +#define __get_rpn_parity(line) (((line) >> 3) & 0x7) + +static inline void rfcomm_schedule(uint event) +{ + if (!rfcomm_thread) + return; + //set_bit(event, &rfcomm_event); + set_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event); + wake_up_process(rfcomm_thread); +} + +static inline void rfcomm_session_put(struct rfcomm_session *s) +{ + if (atomic_dec_and_test(&s->refcnt)) + rfcomm_session_del(s); +} + +/* ---- RFCOMM FCS computation ---- */ + +/* reversed, 8-bit, poly=0x07 */ +static unsigned char rfcomm_crc_table[256] = { + 0x00, 0x91, 0xe3, 0x72, 0x07, 0x96, 0xe4, 0x75, + 0x0e, 0x9f, 0xed, 0x7c, 0x09, 0x98, 0xea, 0x7b, + 0x1c, 0x8d, 0xff, 0x6e, 0x1b, 0x8a, 0xf8, 0x69, + 0x12, 0x83, 0xf1, 0x60, 0x15, 0x84, 0xf6, 0x67, + + 0x38, 0xa9, 0xdb, 0x4a, 0x3f, 0xae, 0xdc, 0x4d, + 0x36, 0xa7, 0xd5, 0x44, 0x31, 0xa0, 0xd2, 0x43, + 0x24, 0xb5, 0xc7, 0x56, 0x23, 0xb2, 0xc0, 0x51, + 0x2a, 0xbb, 0xc9, 0x58, 0x2d, 0xbc, 0xce, 0x5f, + + 0x70, 0xe1, 0x93, 0x02, 0x77, 0xe6, 0x94, 0x05, + 0x7e, 0xef, 0x9d, 0x0c, 0x79, 0xe8, 0x9a, 0x0b, + 0x6c, 0xfd, 0x8f, 0x1e, 0x6b, 0xfa, 0x88, 0x19, + 0x62, 0xf3, 0x81, 0x10, 0x65, 0xf4, 0x86, 0x17, + + 0x48, 0xd9, 0xab, 0x3a, 0x4f, 0xde, 0xac, 0x3d, + 0x46, 0xd7, 0xa5, 0x34, 0x41, 0xd0, 0xa2, 0x33, + 0x54, 0xc5, 0xb7, 0x26, 0x53, 0xc2, 0xb0, 0x21, + 0x5a, 0xcb, 0xb9, 0x28, 0x5d, 0xcc, 0xbe, 0x2f, + + 0xe0, 0x71, 0x03, 0x92, 0xe7, 0x76, 0x04, 0x95, + 0xee, 0x7f, 0x0d, 0x9c, 0xe9, 0x78, 0x0a, 0x9b, + 0xfc, 0x6d, 0x1f, 0x8e, 0xfb, 0x6a, 0x18, 0x89, + 0xf2, 0x63, 0x11, 0x80, 0xf5, 0x64, 0x16, 0x87, + + 0xd8, 0x49, 0x3b, 0xaa, 0xdf, 0x4e, 0x3c, 0xad, + 0xd6, 0x47, 0x35, 0xa4, 0xd1, 0x40, 0x32, 0xa3, + 0xc4, 0x55, 0x27, 0xb6, 0xc3, 0x52, 0x20, 0xb1, + 0xca, 0x5b, 0x29, 0xb8, 0xcd, 0x5c, 0x2e, 0xbf, + + 0x90, 0x01, 0x73, 0xe2, 0x97, 0x06, 0x74, 0xe5, + 0x9e, 0x0f, 0x7d, 0xec, 0x99, 0x08, 0x7a, 0xeb, + 0x8c, 0x1d, 0x6f, 0xfe, 0x8b, 0x1a, 0x68, 0xf9, + 0x82, 0x13, 0x61, 0xf0, 0x85, 0x14, 0x66, 0xf7, + + 0xa8, 0x39, 0x4b, 0xda, 0xaf, 0x3e, 0x4c, 0xdd, + 0xa6, 0x37, 0x45, 0xd4, 0xa1, 0x30, 0x42, 0xd3, + 0xb4, 0x25, 0x57, 0xc6, 0xb3, 0x22, 0x50, 0xc1, + 0xba, 0x2b, 0x59, 0xc8, 0xbd, 0x2c, 0x5e, 0xcf +}; + +/* CRC on 2 bytes */ +#define __crc(data) (rfcomm_crc_table[rfcomm_crc_table[0xff ^ data[0]] ^ data[1]]) + +/* FCS on 2 bytes */ +static inline u8 __fcs(u8 *data) +{ + return (0xff - __crc(data)); +} + +/* FCS on 3 bytes */ +static inline u8 __fcs2(u8 *data) +{ + return (0xff - rfcomm_crc_table[__crc(data) ^ data[2]]); +} + +/* Check FCS */ +static inline int __check_fcs(u8 *data, int type, u8 fcs) +{ + u8 f = __crc(data); + + if (type != RFCOMM_UIH) + f = rfcomm_crc_table[f ^ data[2]]; + + return rfcomm_crc_table[f ^ fcs] != 0xcf; +} + +/* ---- L2CAP callbacks ---- */ +static void rfcomm_l2state_change(struct sock *sk) +{ + BT_DBG("%p state %d", sk, sk->sk_state); + rfcomm_schedule(RFCOMM_SCHED_STATE); +} + +static void rfcomm_l2data_ready(struct sock *sk, int bytes) +{ + BT_DBG("%p bytes %d", sk, bytes); + rfcomm_schedule(RFCOMM_SCHED_RX); +} + +static int rfcomm_l2sock_create(struct socket **sock) +{ + int err; + + BT_DBG(""); + + err = sock_create_kern(PF_BLUETOOTH, SOCK_SEQPACKET, BTPROTO_L2CAP, sock); + if (!err) { + struct sock *sk = (*sock)->sk; + sk->sk_data_ready = rfcomm_l2data_ready; + sk->sk_state_change = rfcomm_l2state_change; + } + return err; +} + +static inline int rfcomm_check_security(struct rfcomm_dlc *d) +{ + struct sock *sk = d->session->sock->sk; + __u8 auth_type; + + switch (d->sec_level) { + case BT_SECURITY_HIGH: + auth_type = HCI_AT_GENERAL_BONDING_MITM; + break; + case BT_SECURITY_MEDIUM: + auth_type = HCI_AT_GENERAL_BONDING; + break; + default: + auth_type = HCI_AT_NO_BONDING; + break; + } + + return hci_conn_security(l2cap_pi(sk)->conn->hcon, d->sec_level, + auth_type); +} + +static void rfcomm_session_timeout(unsigned long arg) +{ + struct rfcomm_session *s = (void *) arg; + + BT_DBG("session %p state %ld", s, s->state); + + set_bit(RFCOMM_TIMED_OUT, &s->flags); + rfcomm_schedule(RFCOMM_SCHED_TIMEO); +} + +static void rfcomm_session_set_timer(struct rfcomm_session *s, long timeout) +{ + BT_DBG("session %p state %ld timeout %ld", s, s->state, timeout); + + if (!mod_timer(&s->timer, jiffies + timeout)) + rfcomm_session_hold(s); +} + +static void rfcomm_session_clear_timer(struct rfcomm_session *s) +{ + BT_DBG("session %p state %ld", s, s->state); + + if (timer_pending(&s->timer) && del_timer(&s->timer)) + rfcomm_session_put(s); +} + +/* ---- RFCOMM DLCs ---- */ +static void rfcomm_dlc_timeout(unsigned long arg) +{ + struct rfcomm_dlc *d = (void *) arg; + + BT_DBG("dlc %p state %ld", d, d->state); + + set_bit(RFCOMM_TIMED_OUT, &d->flags); + rfcomm_dlc_put(d); + rfcomm_schedule(RFCOMM_SCHED_TIMEO); +} + +static void rfcomm_dlc_set_timer(struct rfcomm_dlc *d, long timeout) +{ + BT_DBG("dlc %p state %ld timeout %ld", d, d->state, timeout); + + if (!mod_timer(&d->timer, jiffies + timeout)) + rfcomm_dlc_hold(d); +} + +static void rfcomm_dlc_clear_timer(struct rfcomm_dlc *d) +{ + BT_DBG("dlc %p state %ld", d, d->state); + + if (timer_pending(&d->timer) && del_timer(&d->timer)) + rfcomm_dlc_put(d); +} + +static void rfcomm_dlc_clear_state(struct rfcomm_dlc *d) +{ + BT_DBG("%p", d); + + d->state = BT_OPEN; + d->flags = 0; + d->mscex = 0; + d->mtu = RFCOMM_DEFAULT_MTU; + d->v24_sig = RFCOMM_V24_RTC | RFCOMM_V24_RTR | RFCOMM_V24_DV; + + d->cfc = RFCOMM_CFC_DISABLED; + d->rx_credits = RFCOMM_DEFAULT_CREDITS; +} + +struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio) +{ + struct rfcomm_dlc *d = kzalloc(sizeof(*d), prio); + + if (!d) + return NULL; + + setup_timer(&d->timer, rfcomm_dlc_timeout, (unsigned long)d); + + skb_queue_head_init(&d->tx_queue); + spin_lock_init(&d->lock); + atomic_set(&d->refcnt, 1); + + rfcomm_dlc_clear_state(d); + + BT_DBG("%p", d); + + return d; +} + +void rfcomm_dlc_free(struct rfcomm_dlc *d) +{ + BT_DBG("%p", d); + + skb_queue_purge(&d->tx_queue); + kfree(d); +} + +static void rfcomm_dlc_link(struct rfcomm_session *s, struct rfcomm_dlc *d) +{ + BT_DBG("dlc %p session %p", d, s); + + rfcomm_session_hold(s); + + rfcomm_session_clear_timer(s); + rfcomm_dlc_hold(d); + list_add(&d->list, &s->dlcs); + d->session = s; +} + +static void rfcomm_dlc_unlink(struct rfcomm_dlc *d) +{ + struct rfcomm_session *s = d->session; + + BT_DBG("dlc %p refcnt %d session %p", d, atomic_read(&d->refcnt), s); + + list_del(&d->list); + d->session = NULL; + rfcomm_dlc_put(d); + + if (list_empty(&s->dlcs)) + rfcomm_session_set_timer(s, RFCOMM_IDLE_TIMEOUT); + + rfcomm_session_put(s); +} + +static struct rfcomm_dlc *rfcomm_dlc_get(struct rfcomm_session *s, u8 dlci) +{ + struct rfcomm_dlc *d; + struct list_head *p; + + list_for_each(p, &s->dlcs) { + d = list_entry(p, struct rfcomm_dlc, list); + if (d->dlci == dlci) + return d; + } + return NULL; +} + +static int __rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, u8 channel) +{ + struct rfcomm_session *s; + int err = 0; + u8 dlci; + + BT_DBG("dlc %p state %ld %s %s channel %d", + d, d->state, batostr(src), batostr(dst), channel); + + if (channel < 1 || channel > 30) + return -EINVAL; + + if (d->state != BT_OPEN && d->state != BT_CLOSED) + return 0; + + s = rfcomm_session_get(src, dst); + if (!s) { + s = rfcomm_session_create(src, dst, &err); + if (!s) + return err; + } + + dlci = __dlci(!s->initiator, channel); + + /* Check if DLCI already exists */ + if (rfcomm_dlc_get(s, dlci)) + return -EBUSY; + + rfcomm_dlc_clear_state(d); + + d->dlci = dlci; + d->addr = __addr(s->initiator, dlci); + d->priority = 7; + + d->state = BT_CONFIG; + rfcomm_dlc_link(s, d); + + d->out = 1; + + d->mtu = s->mtu; + d->cfc = (s->cfc == RFCOMM_CFC_UNKNOWN) ? 0 : s->cfc; + + if (s->state == BT_CONNECTED) { + if (rfcomm_check_security(d)) + rfcomm_send_pn(s, 1, d); + else + set_bit(RFCOMM_AUTH_PENDING, &d->flags); + } + + rfcomm_dlc_set_timer(d, RFCOMM_CONN_TIMEOUT); + + return 0; +} + +int rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, u8 channel) +{ + int r; + + rfcomm_lock(); + + r = __rfcomm_dlc_open(d, src, dst, channel); + + rfcomm_unlock(); + return r; +} + +static int __rfcomm_dlc_close(struct rfcomm_dlc *d, int err) +{ + struct rfcomm_session *s = d->session; + if (!s) + return 0; + + BT_DBG("dlc %p state %ld dlci %d err %d session %p", + d, d->state, d->dlci, err, s); + + switch (d->state) { + case BT_CONNECT: + case BT_CONFIG: + if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { + set_bit(RFCOMM_AUTH_REJECT, &d->flags); + rfcomm_schedule(RFCOMM_SCHED_AUTH); + break; + } + /* Fall through */ + + case BT_CONNECTED: + d->state = BT_DISCONN; + if (skb_queue_empty(&d->tx_queue)) { + rfcomm_send_disc(s, d->dlci); + rfcomm_dlc_set_timer(d, RFCOMM_DISC_TIMEOUT); + } else { + rfcomm_queue_disc(d); + rfcomm_dlc_set_timer(d, RFCOMM_DISC_TIMEOUT * 2); + } + break; + + case BT_OPEN: + case BT_CONNECT2: + if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { + set_bit(RFCOMM_AUTH_REJECT, &d->flags); + rfcomm_schedule(RFCOMM_SCHED_AUTH); + break; + } + /* Fall through */ + + default: + rfcomm_dlc_clear_timer(d); + + rfcomm_dlc_lock(d); + d->state = BT_CLOSED; + d->state_change(d, err); + rfcomm_dlc_unlock(d); + + skb_queue_purge(&d->tx_queue); + rfcomm_dlc_unlink(d); + } + + return 0; +} + +int rfcomm_dlc_close(struct rfcomm_dlc *d, int err) +{ + int r; + + rfcomm_lock(); + + r = __rfcomm_dlc_close(d, err); + + rfcomm_unlock(); + return r; +} + +int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb) +{ + int len = skb->len; + + if (d->state != BT_CONNECTED) + return -ENOTCONN; + + BT_DBG("dlc %p mtu %d len %d", d, d->mtu, len); + + if (len > d->mtu) + return -EINVAL; + + rfcomm_make_uih(skb, d->addr); + skb_queue_tail(&d->tx_queue, skb); + + if (!test_bit(RFCOMM_TX_THROTTLED, &d->flags)) + rfcomm_schedule(RFCOMM_SCHED_TX); + return len; +} + +void __rfcomm_dlc_throttle(struct rfcomm_dlc *d) +{ + BT_DBG("dlc %p state %ld", d, d->state); + + if (!d->cfc) { + d->v24_sig |= RFCOMM_V24_FC; + set_bit(RFCOMM_MSC_PENDING, &d->flags); + } + rfcomm_schedule(RFCOMM_SCHED_TX); +} + +void __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d) +{ + BT_DBG("dlc %p state %ld", d, d->state); + + if (!d->cfc) { + d->v24_sig &= ~RFCOMM_V24_FC; + set_bit(RFCOMM_MSC_PENDING, &d->flags); + } + rfcomm_schedule(RFCOMM_SCHED_TX); +} + +/* + Set/get modem status functions use _local_ status i.e. what we report + to the other side. + Remote status is provided by dlc->modem_status() callback. + */ +int rfcomm_dlc_set_modem_status(struct rfcomm_dlc *d, u8 v24_sig) +{ + BT_DBG("dlc %p state %ld v24_sig 0x%x", + d, d->state, v24_sig); + + if (test_bit(RFCOMM_RX_THROTTLED, &d->flags)) + v24_sig |= RFCOMM_V24_FC; + else + v24_sig &= ~RFCOMM_V24_FC; + + d->v24_sig = v24_sig; + + if (!test_and_set_bit(RFCOMM_MSC_PENDING, &d->flags)) + rfcomm_schedule(RFCOMM_SCHED_TX); + + return 0; +} + +int rfcomm_dlc_get_modem_status(struct rfcomm_dlc *d, u8 *v24_sig) +{ + BT_DBG("dlc %p state %ld v24_sig 0x%x", + d, d->state, d->v24_sig); + + *v24_sig = d->v24_sig; + return 0; +} + +/* ---- RFCOMM sessions ---- */ +static struct rfcomm_session *rfcomm_session_add(struct socket *sock, int state) +{ + struct rfcomm_session *s = kzalloc(sizeof(*s), GFP_KERNEL); + + if (!s) + return NULL; + + BT_DBG("session %p sock %p", s, sock); + + setup_timer(&s->timer, rfcomm_session_timeout, (unsigned long) s); + + INIT_LIST_HEAD(&s->dlcs); + s->state = state; + s->sock = sock; + + s->mtu = RFCOMM_DEFAULT_MTU; + s->cfc = disable_cfc ? RFCOMM_CFC_DISABLED : RFCOMM_CFC_UNKNOWN; + + /* Do not increment module usage count for listening sessions. + * Otherwise we won't be able to unload the module. */ + if (state != BT_LISTEN) + if (!try_module_get(THIS_MODULE)) { + kfree(s); + return NULL; + } + + list_add(&s->list, &session_list); + + return s; +} + +static void rfcomm_session_del(struct rfcomm_session *s) +{ + int state = s->state; + + BT_DBG("session %p state %ld", s, s->state); + + list_del(&s->list); + + if (state == BT_CONNECTED) + rfcomm_send_disc(s, 0); + + rfcomm_session_clear_timer(s); + sock_release(s->sock); + kfree(s); + + if (state != BT_LISTEN) + module_put(THIS_MODULE); +} + +static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst) +{ + struct rfcomm_session *s; + struct list_head *p, *n; + struct bt_sock *sk; + list_for_each_safe(p, n, &session_list) { + s = list_entry(p, struct rfcomm_session, list); + sk = bt_sk(s->sock->sk); + + if ((!bacmp(src, BDADDR_ANY) || !bacmp(&sk->src, src)) && + !bacmp(&sk->dst, dst)) + return s; + } + return NULL; +} + +static void rfcomm_session_close(struct rfcomm_session *s, int err) +{ + struct rfcomm_dlc *d; + struct list_head *p, *n; + + BT_DBG("session %p state %ld err %d", s, s->state, err); + + rfcomm_session_hold(s); + + s->state = BT_CLOSED; + + /* Close all dlcs */ + list_for_each_safe(p, n, &s->dlcs) { + d = list_entry(p, struct rfcomm_dlc, list); + d->state = BT_CLOSED; + __rfcomm_dlc_close(d, err); + } + + rfcomm_session_clear_timer(s); + rfcomm_session_put(s); +} + +static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst, int *err) +{ + struct rfcomm_session *s = NULL; + struct sockaddr_l2 addr; + struct socket *sock; + struct sock *sk; + + BT_DBG("%s %s", batostr(src), batostr(dst)); + + *err = rfcomm_l2sock_create(&sock); + if (*err < 0) + return NULL; + + bacpy(&addr.l2_bdaddr, src); + addr.l2_family = AF_BLUETOOTH; + addr.l2_psm = 0; + addr.l2_cid = 0; + *err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr)); + if (*err < 0) + goto failed; + + /* Set L2CAP options */ + sk = sock->sk; + lock_sock(sk); + l2cap_pi(sk)->imtu = l2cap_mtu; + if (l2cap_ertm) + l2cap_pi(sk)->mode = L2CAP_MODE_ERTM; + release_sock(sk); + + s = rfcomm_session_add(sock, BT_BOUND); + if (!s) { + *err = -ENOMEM; + goto failed; + } + + s->initiator = 1; + + bacpy(&addr.l2_bdaddr, dst); + addr.l2_family = AF_BLUETOOTH; + addr.l2_psm = cpu_to_le16(RFCOMM_PSM); + addr.l2_cid = 0; + *err = kernel_connect(sock, (struct sockaddr *) &addr, sizeof(addr), O_NONBLOCK); + if (*err == 0 || *err == -EINPROGRESS) + return s; + + rfcomm_session_del(s); + return NULL; + +failed: + sock_release(sock); + return NULL; +} + +void rfcomm_session_getaddr(struct rfcomm_session *s, bdaddr_t *src, bdaddr_t *dst) +{ + struct sock *sk = s->sock->sk; + if (src) + bacpy(src, &bt_sk(sk)->src); + if (dst) + bacpy(dst, &bt_sk(sk)->dst); +} + +/* ---- RFCOMM frame sending ---- */ +static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len) +{ + struct socket *sock = s->sock; + struct kvec iv = { data, len }; + struct msghdr msg; + + BT_DBG("session %p len %d", s, len); + + memset(&msg, 0, sizeof(msg)); + + return kernel_sendmsg(sock, &msg, &iv, 1, len); +} + +static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci) +{ + struct rfcomm_cmd cmd; + + BT_DBG("%p dlci %d", s, dlci); + + cmd.addr = __addr(s->initiator, dlci); + cmd.ctrl = __ctrl(RFCOMM_SABM, 1); + cmd.len = __len8(0); + cmd.fcs = __fcs2((u8 *) &cmd); + + return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd)); +} + +static int rfcomm_send_ua(struct rfcomm_session *s, u8 dlci) +{ + struct rfcomm_cmd cmd; + + BT_DBG("%p dlci %d", s, dlci); + + cmd.addr = __addr(!s->initiator, dlci); + cmd.ctrl = __ctrl(RFCOMM_UA, 1); + cmd.len = __len8(0); + cmd.fcs = __fcs2((u8 *) &cmd); + + return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd)); +} + +static int rfcomm_send_disc(struct rfcomm_session *s, u8 dlci) +{ + struct rfcomm_cmd cmd; + + BT_DBG("%p dlci %d", s, dlci); + + cmd.addr = __addr(s->initiator, dlci); + cmd.ctrl = __ctrl(RFCOMM_DISC, 1); + cmd.len = __len8(0); + cmd.fcs = __fcs2((u8 *) &cmd); + + return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd)); +} + +static int rfcomm_queue_disc(struct rfcomm_dlc *d) +{ + struct rfcomm_cmd *cmd; + struct sk_buff *skb; + + BT_DBG("dlc %p dlci %d", d, d->dlci); + + skb = alloc_skb(sizeof(*cmd), GFP_KERNEL); + if (!skb) + return -ENOMEM; + + cmd = (void *) __skb_put(skb, sizeof(*cmd)); + cmd->addr = d->addr; + cmd->ctrl = __ctrl(RFCOMM_DISC, 1); + cmd->len = __len8(0); + cmd->fcs = __fcs2((u8 *) cmd); + + skb_queue_tail(&d->tx_queue, skb); + rfcomm_schedule(RFCOMM_SCHED_TX); + return 0; +} + +static int rfcomm_send_dm(struct rfcomm_session *s, u8 dlci) +{ + struct rfcomm_cmd cmd; + + BT_DBG("%p dlci %d", s, dlci); + + cmd.addr = __addr(!s->initiator, dlci); + cmd.ctrl = __ctrl(RFCOMM_DM, 1); + cmd.len = __len8(0); + cmd.fcs = __fcs2((u8 *) &cmd); + + return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd)); +} + +static int rfcomm_send_nsc(struct rfcomm_session *s, int cr, u8 type) +{ + struct rfcomm_hdr *hdr; + struct rfcomm_mcc *mcc; + u8 buf[16], *ptr = buf; + + BT_DBG("%p cr %d type %d", s, cr, type); + + hdr = (void *) ptr; ptr += sizeof(*hdr); + hdr->addr = __addr(s->initiator, 0); + hdr->ctrl = __ctrl(RFCOMM_UIH, 0); + hdr->len = __len8(sizeof(*mcc) + 1); + + mcc = (void *) ptr; ptr += sizeof(*mcc); + mcc->type = __mcc_type(cr, RFCOMM_NSC); + mcc->len = __len8(1); + + /* Type that we didn't like */ + *ptr = __mcc_type(cr, type); ptr++; + + *ptr = __fcs(buf); ptr++; + + return rfcomm_send_frame(s, buf, ptr - buf); +} + +static int rfcomm_send_pn(struct rfcomm_session *s, int cr, struct rfcomm_dlc *d) +{ + struct rfcomm_hdr *hdr; + struct rfcomm_mcc *mcc; + struct rfcomm_pn *pn; + u8 buf[16], *ptr = buf; + + BT_DBG("%p cr %d dlci %d mtu %d", s, cr, d->dlci, d->mtu); + + hdr = (void *) ptr; ptr += sizeof(*hdr); + hdr->addr = __addr(s->initiator, 0); + hdr->ctrl = __ctrl(RFCOMM_UIH, 0); + hdr->len = __len8(sizeof(*mcc) + sizeof(*pn)); + + mcc = (void *) ptr; ptr += sizeof(*mcc); + mcc->type = __mcc_type(cr, RFCOMM_PN); + mcc->len = __len8(sizeof(*pn)); + + pn = (void *) ptr; ptr += sizeof(*pn); + pn->dlci = d->dlci; + pn->priority = d->priority; + pn->ack_timer = 0; + pn->max_retrans = 0; + + if (s->cfc) { + pn->flow_ctrl = cr ? 0xf0 : 0xe0; + pn->credits = RFCOMM_DEFAULT_CREDITS; + } else { + pn->flow_ctrl = 0; + pn->credits = 0; + } + + if (cr && channel_mtu >= 0) + pn->mtu = cpu_to_le16(channel_mtu); + else + pn->mtu = cpu_to_le16(d->mtu); + + *ptr = __fcs(buf); ptr++; + + return rfcomm_send_frame(s, buf, ptr - buf); +} + +int rfcomm_send_rpn(struct rfcomm_session *s, int cr, u8 dlci, + u8 bit_rate, u8 data_bits, u8 stop_bits, + u8 parity, u8 flow_ctrl_settings, + u8 xon_char, u8 xoff_char, u16 param_mask) +{ + struct rfcomm_hdr *hdr; + struct rfcomm_mcc *mcc; + struct rfcomm_rpn *rpn; + u8 buf[16], *ptr = buf; + + BT_DBG("%p cr %d dlci %d bit_r 0x%x data_b 0x%x stop_b 0x%x parity 0x%x" + " flwc_s 0x%x xon_c 0x%x xoff_c 0x%x p_mask 0x%x", + s, cr, dlci, bit_rate, data_bits, stop_bits, parity, + flow_ctrl_settings, xon_char, xoff_char, param_mask); + + hdr = (void *) ptr; ptr += sizeof(*hdr); + hdr->addr = __addr(s->initiator, 0); + hdr->ctrl = __ctrl(RFCOMM_UIH, 0); + hdr->len = __len8(sizeof(*mcc) + sizeof(*rpn)); + + mcc = (void *) ptr; ptr += sizeof(*mcc); + mcc->type = __mcc_type(cr, RFCOMM_RPN); + mcc->len = __len8(sizeof(*rpn)); + + rpn = (void *) ptr; ptr += sizeof(*rpn); + rpn->dlci = __addr(1, dlci); + rpn->bit_rate = bit_rate; + rpn->line_settings = __rpn_line_settings(data_bits, stop_bits, parity); + rpn->flow_ctrl = flow_ctrl_settings; + rpn->xon_char = xon_char; + rpn->xoff_char = xoff_char; + rpn->param_mask = cpu_to_le16(param_mask); + + *ptr = __fcs(buf); ptr++; + + return rfcomm_send_frame(s, buf, ptr - buf); +} + +static int rfcomm_send_rls(struct rfcomm_session *s, int cr, u8 dlci, u8 status) +{ + struct rfcomm_hdr *hdr; + struct rfcomm_mcc *mcc; + struct rfcomm_rls *rls; + u8 buf[16], *ptr = buf; + + BT_DBG("%p cr %d status 0x%x", s, cr, status); + + hdr = (void *) ptr; ptr += sizeof(*hdr); + hdr->addr = __addr(s->initiator, 0); + hdr->ctrl = __ctrl(RFCOMM_UIH, 0); + hdr->len = __len8(sizeof(*mcc) + sizeof(*rls)); + + mcc = (void *) ptr; ptr += sizeof(*mcc); + mcc->type = __mcc_type(cr, RFCOMM_RLS); + mcc->len = __len8(sizeof(*rls)); + + rls = (void *) ptr; ptr += sizeof(*rls); + rls->dlci = __addr(1, dlci); + rls->status = status; + + *ptr = __fcs(buf); ptr++; + + return rfcomm_send_frame(s, buf, ptr - buf); +} + +static int rfcomm_send_msc(struct rfcomm_session *s, int cr, u8 dlci, u8 v24_sig) +{ + struct rfcomm_hdr *hdr; + struct rfcomm_mcc *mcc; + struct rfcomm_msc *msc; + u8 buf[16], *ptr = buf; + + BT_DBG("%p cr %d v24 0x%x", s, cr, v24_sig); + + hdr = (void *) ptr; ptr += sizeof(*hdr); + hdr->addr = __addr(s->initiator, 0); + hdr->ctrl = __ctrl(RFCOMM_UIH, 0); + hdr->len = __len8(sizeof(*mcc) + sizeof(*msc)); + + mcc = (void *) ptr; ptr += sizeof(*mcc); + mcc->type = __mcc_type(cr, RFCOMM_MSC); + mcc->len = __len8(sizeof(*msc)); + + msc = (void *) ptr; ptr += sizeof(*msc); + msc->dlci = __addr(1, dlci); + msc->v24_sig = v24_sig | 0x01; + + *ptr = __fcs(buf); ptr++; + + return rfcomm_send_frame(s, buf, ptr - buf); +} + +static int rfcomm_send_fcoff(struct rfcomm_session *s, int cr) +{ + struct rfcomm_hdr *hdr; + struct rfcomm_mcc *mcc; + u8 buf[16], *ptr = buf; + + BT_DBG("%p cr %d", s, cr); + + hdr = (void *) ptr; ptr += sizeof(*hdr); + hdr->addr = __addr(s->initiator, 0); + hdr->ctrl = __ctrl(RFCOMM_UIH, 0); + hdr->len = __len8(sizeof(*mcc)); + + mcc = (void *) ptr; ptr += sizeof(*mcc); + mcc->type = __mcc_type(cr, RFCOMM_FCOFF); + mcc->len = __len8(0); + + *ptr = __fcs(buf); ptr++; + + return rfcomm_send_frame(s, buf, ptr - buf); +} + +static int rfcomm_send_fcon(struct rfcomm_session *s, int cr) +{ + struct rfcomm_hdr *hdr; + struct rfcomm_mcc *mcc; + u8 buf[16], *ptr = buf; + + BT_DBG("%p cr %d", s, cr); + + hdr = (void *) ptr; ptr += sizeof(*hdr); + hdr->addr = __addr(s->initiator, 0); + hdr->ctrl = __ctrl(RFCOMM_UIH, 0); + hdr->len = __len8(sizeof(*mcc)); + + mcc = (void *) ptr; ptr += sizeof(*mcc); + mcc->type = __mcc_type(cr, RFCOMM_FCON); + mcc->len = __len8(0); + + *ptr = __fcs(buf); ptr++; + + return rfcomm_send_frame(s, buf, ptr - buf); +} + +static int rfcomm_send_test(struct rfcomm_session *s, int cr, u8 *pattern, int len) +{ + struct socket *sock = s->sock; + struct kvec iv[3]; + struct msghdr msg; + unsigned char hdr[5], crc[1]; + + if (len > 125) + return -EINVAL; + + BT_DBG("%p cr %d", s, cr); + + hdr[0] = __addr(s->initiator, 0); + hdr[1] = __ctrl(RFCOMM_UIH, 0); + hdr[2] = 0x01 | ((len + 2) << 1); + hdr[3] = 0x01 | ((cr & 0x01) << 1) | (RFCOMM_TEST << 2); + hdr[4] = 0x01 | (len << 1); + + crc[0] = __fcs(hdr); + + iv[0].iov_base = hdr; + iv[0].iov_len = 5; + iv[1].iov_base = pattern; + iv[1].iov_len = len; + iv[2].iov_base = crc; + iv[2].iov_len = 1; + + memset(&msg, 0, sizeof(msg)); + + return kernel_sendmsg(sock, &msg, iv, 3, 6 + len); +} + +static int rfcomm_send_credits(struct rfcomm_session *s, u8 addr, u8 credits) +{ + struct rfcomm_hdr *hdr; + u8 buf[16], *ptr = buf; + + BT_DBG("%p addr %d credits %d", s, addr, credits); + + hdr = (void *) ptr; ptr += sizeof(*hdr); + hdr->addr = addr; + hdr->ctrl = __ctrl(RFCOMM_UIH, 1); + hdr->len = __len8(0); + + *ptr = credits; ptr++; + + *ptr = __fcs(buf); ptr++; + + return rfcomm_send_frame(s, buf, ptr - buf); +} + +static void rfcomm_make_uih(struct sk_buff *skb, u8 addr) +{ + struct rfcomm_hdr *hdr; + int len = skb->len; + u8 *crc; + + if (len > 127) { + hdr = (void *) skb_push(skb, 4); + put_unaligned(cpu_to_le16(__len16(len)), (__le16 *) &hdr->len); + } else { + hdr = (void *) skb_push(skb, 3); + hdr->len = __len8(len); + } + hdr->addr = addr; + hdr->ctrl = __ctrl(RFCOMM_UIH, 0); + + crc = skb_put(skb, 1); + *crc = __fcs((void *) hdr); +} + +/* ---- RFCOMM frame reception ---- */ +static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci) +{ + BT_DBG("session %p state %ld dlci %d", s, s->state, dlci); + + if (dlci) { + /* Data channel */ + struct rfcomm_dlc *d = rfcomm_dlc_get(s, dlci); + if (!d) { + rfcomm_send_dm(s, dlci); + return 0; + } + + switch (d->state) { + case BT_CONNECT: + rfcomm_dlc_clear_timer(d); + + rfcomm_dlc_lock(d); + d->state = BT_CONNECTED; + d->state_change(d, 0); + rfcomm_dlc_unlock(d); + + rfcomm_send_msc(s, 1, dlci, d->v24_sig); + break; + + case BT_DISCONN: + d->state = BT_CLOSED; + __rfcomm_dlc_close(d, 0); + + if (list_empty(&s->dlcs)) { + s->state = BT_DISCONN; + rfcomm_send_disc(s, 0); + } + + break; + } + } else { + /* Control channel */ + switch (s->state) { + case BT_CONNECT: + s->state = BT_CONNECTED; + rfcomm_process_connect(s); + break; + + case BT_DISCONN: + /* When socket is closed and we are not RFCOMM + * initiator rfcomm_process_rx already calls + * rfcomm_session_put() */ + if (s->sock->sk->sk_state != BT_CLOSED) + rfcomm_session_put(s); + break; + } + } + return 0; +} + +static int rfcomm_recv_dm(struct rfcomm_session *s, u8 dlci) +{ + int err = 0; + + BT_DBG("session %p state %ld dlci %d", s, s->state, dlci); + + if (dlci) { + /* Data DLC */ + struct rfcomm_dlc *d = rfcomm_dlc_get(s, dlci); + if (d) { + if (d->state == BT_CONNECT || d->state == BT_CONFIG) + err = ECONNREFUSED; + else + err = ECONNRESET; + + d->state = BT_CLOSED; + __rfcomm_dlc_close(d, err); + } + } else { + if (s->state == BT_CONNECT) + err = ECONNREFUSED; + else + err = ECONNRESET; + + s->state = BT_CLOSED; + rfcomm_session_close(s, err); + } + return 0; +} + +static int rfcomm_recv_disc(struct rfcomm_session *s, u8 dlci) +{ + int err = 0; + + BT_DBG("session %p state %ld dlci %d", s, s->state, dlci); + + if (dlci) { + struct rfcomm_dlc *d = rfcomm_dlc_get(s, dlci); + if (d) { + rfcomm_send_ua(s, dlci); + + if (d->state == BT_CONNECT || d->state == BT_CONFIG) + err = ECONNREFUSED; + else + err = ECONNRESET; + + d->state = BT_CLOSED; + __rfcomm_dlc_close(d, err); + } else + rfcomm_send_dm(s, dlci); + + } else { + rfcomm_send_ua(s, 0); + + if (s->state == BT_CONNECT) + err = ECONNREFUSED; + else + err = ECONNRESET; + + s->state = BT_CLOSED; + rfcomm_session_close(s, err); + } + + return 0; +} + +void rfcomm_dlc_accept(struct rfcomm_dlc *d) +{ + struct sock *sk = d->session->sock->sk; + + BT_DBG("dlc %p", d); + + rfcomm_send_ua(d->session, d->dlci); + + rfcomm_dlc_clear_timer(d); + + rfcomm_dlc_lock(d); + d->state = BT_CONNECTED; + d->state_change(d, 0); + rfcomm_dlc_unlock(d); + + if (d->role_switch) + hci_conn_switch_role(l2cap_pi(sk)->conn->hcon, 0x00); + + rfcomm_send_msc(d->session, 1, d->dlci, d->v24_sig); +} + +static void rfcomm_check_accept(struct rfcomm_dlc *d) +{ + if (rfcomm_check_security(d)) { + if (d->defer_setup) { + set_bit(RFCOMM_DEFER_SETUP, &d->flags); + rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT); + + rfcomm_dlc_lock(d); + d->state = BT_CONNECT2; + d->state_change(d, 0); + rfcomm_dlc_unlock(d); + } else + rfcomm_dlc_accept(d); + } else { + set_bit(RFCOMM_AUTH_PENDING, &d->flags); + rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT); + } +} + +static int rfcomm_recv_sabm(struct rfcomm_session *s, u8 dlci) +{ + struct rfcomm_dlc *d; + u8 channel; + + BT_DBG("session %p state %ld dlci %d", s, s->state, dlci); + + if (!dlci) { + rfcomm_send_ua(s, 0); + + if (s->state == BT_OPEN) { + s->state = BT_CONNECTED; + rfcomm_process_connect(s); + } + return 0; + } + + /* Check if DLC exists */ + d = rfcomm_dlc_get(s, dlci); + if (d) { + if (d->state == BT_OPEN) { + /* DLC was previously opened by PN request */ + rfcomm_check_accept(d); + } + return 0; + } + + /* Notify socket layer about incoming connection */ + channel = __srv_channel(dlci); + if (rfcomm_connect_ind(s, channel, &d)) { + d->dlci = dlci; + d->addr = __addr(s->initiator, dlci); + rfcomm_dlc_link(s, d); + + rfcomm_check_accept(d); + } else { + rfcomm_send_dm(s, dlci); + } + + return 0; +} + +static int rfcomm_apply_pn(struct rfcomm_dlc *d, int cr, struct rfcomm_pn *pn) +{ + struct rfcomm_session *s = d->session; + + BT_DBG("dlc %p state %ld dlci %d mtu %d fc 0x%x credits %d", + d, d->state, d->dlci, pn->mtu, pn->flow_ctrl, pn->credits); + + if ((pn->flow_ctrl == 0xf0 && s->cfc != RFCOMM_CFC_DISABLED) || + pn->flow_ctrl == 0xe0) { + d->cfc = RFCOMM_CFC_ENABLED; + d->tx_credits = pn->credits; + } else { + d->cfc = RFCOMM_CFC_DISABLED; + set_bit(RFCOMM_TX_THROTTLED, &d->flags); + } + + if (s->cfc == RFCOMM_CFC_UNKNOWN) + s->cfc = d->cfc; + + d->priority = pn->priority; + + d->mtu = __le16_to_cpu(pn->mtu); + + if (cr && d->mtu > s->mtu) + d->mtu = s->mtu; + + return 0; +} + +static int rfcomm_recv_pn(struct rfcomm_session *s, int cr, struct sk_buff *skb) +{ + struct rfcomm_pn *pn = (void *) skb->data; + struct rfcomm_dlc *d; + u8 dlci = pn->dlci; + + BT_DBG("session %p state %ld dlci %d", s, s->state, dlci); + + if (!dlci) + return 0; + + d = rfcomm_dlc_get(s, dlci); + if (d) { + if (cr) { + /* PN request */ + rfcomm_apply_pn(d, cr, pn); + rfcomm_send_pn(s, 0, d); + } else { + /* PN response */ + switch (d->state) { + case BT_CONFIG: + rfcomm_apply_pn(d, cr, pn); + + d->state = BT_CONNECT; + rfcomm_send_sabm(s, d->dlci); + break; + } + } + } else { + u8 channel = __srv_channel(dlci); + + if (!cr) + return 0; + + /* PN request for non existing DLC. + * Assume incoming connection. */ + if (rfcomm_connect_ind(s, channel, &d)) { + d->dlci = dlci; + d->addr = __addr(s->initiator, dlci); + rfcomm_dlc_link(s, d); + + rfcomm_apply_pn(d, cr, pn); + + d->state = BT_OPEN; + rfcomm_send_pn(s, 0, d); + } else { + rfcomm_send_dm(s, dlci); + } + } + return 0; +} + +static int rfcomm_recv_rpn(struct rfcomm_session *s, int cr, int len, struct sk_buff *skb) +{ + struct rfcomm_rpn *rpn = (void *) skb->data; + u8 dlci = __get_dlci(rpn->dlci); + + u8 bit_rate = 0; + u8 data_bits = 0; + u8 stop_bits = 0; + u8 parity = 0; + u8 flow_ctrl = 0; + u8 xon_char = 0; + u8 xoff_char = 0; + u16 rpn_mask = RFCOMM_RPN_PM_ALL; + + BT_DBG("dlci %d cr %d len 0x%x bitr 0x%x line 0x%x flow 0x%x xonc 0x%x xoffc 0x%x pm 0x%x", + dlci, cr, len, rpn->bit_rate, rpn->line_settings, rpn->flow_ctrl, + rpn->xon_char, rpn->xoff_char, rpn->param_mask); + + if (!cr) + return 0; + + if (len == 1) { + /* This is a request, return default settings */ + bit_rate = RFCOMM_RPN_BR_115200; + data_bits = RFCOMM_RPN_DATA_8; + stop_bits = RFCOMM_RPN_STOP_1; + parity = RFCOMM_RPN_PARITY_NONE; + flow_ctrl = RFCOMM_RPN_FLOW_NONE; + xon_char = RFCOMM_RPN_XON_CHAR; + xoff_char = RFCOMM_RPN_XOFF_CHAR; + goto rpn_out; + } + + /* Check for sane values, ignore/accept bit_rate, 8 bits, 1 stop bit, + * no parity, no flow control lines, normal XON/XOFF chars */ + + if (rpn->param_mask & cpu_to_le16(RFCOMM_RPN_PM_BITRATE)) { + bit_rate = rpn->bit_rate; + if (bit_rate != RFCOMM_RPN_BR_115200) { + BT_DBG("RPN bit rate mismatch 0x%x", bit_rate); + bit_rate = RFCOMM_RPN_BR_115200; + rpn_mask ^= RFCOMM_RPN_PM_BITRATE; + } + } + + if (rpn->param_mask & cpu_to_le16(RFCOMM_RPN_PM_DATA)) { + data_bits = __get_rpn_data_bits(rpn->line_settings); + if (data_bits != RFCOMM_RPN_DATA_8) { + BT_DBG("RPN data bits mismatch 0x%x", data_bits); + data_bits = RFCOMM_RPN_DATA_8; + rpn_mask ^= RFCOMM_RPN_PM_DATA; + } + } + + if (rpn->param_mask & cpu_to_le16(RFCOMM_RPN_PM_STOP)) { + stop_bits = __get_rpn_stop_bits(rpn->line_settings); + if (stop_bits != RFCOMM_RPN_STOP_1) { + BT_DBG("RPN stop bits mismatch 0x%x", stop_bits); + stop_bits = RFCOMM_RPN_STOP_1; + rpn_mask ^= RFCOMM_RPN_PM_STOP; + } + } + + if (rpn->param_mask & cpu_to_le16(RFCOMM_RPN_PM_PARITY)) { + parity = __get_rpn_parity(rpn->line_settings); + if (parity != RFCOMM_RPN_PARITY_NONE) { + BT_DBG("RPN parity mismatch 0x%x", parity); + parity = RFCOMM_RPN_PARITY_NONE; + rpn_mask ^= RFCOMM_RPN_PM_PARITY; + } + } + + if (rpn->param_mask & cpu_to_le16(RFCOMM_RPN_PM_FLOW)) { + flow_ctrl = rpn->flow_ctrl; + if (flow_ctrl != RFCOMM_RPN_FLOW_NONE) { + BT_DBG("RPN flow ctrl mismatch 0x%x", flow_ctrl); + flow_ctrl = RFCOMM_RPN_FLOW_NONE; + rpn_mask ^= RFCOMM_RPN_PM_FLOW; + } + } + + if (rpn->param_mask & cpu_to_le16(RFCOMM_RPN_PM_XON)) { + xon_char = rpn->xon_char; + if (xon_char != RFCOMM_RPN_XON_CHAR) { + BT_DBG("RPN XON char mismatch 0x%x", xon_char); + xon_char = RFCOMM_RPN_XON_CHAR; + rpn_mask ^= RFCOMM_RPN_PM_XON; + } + } + + if (rpn->param_mask & cpu_to_le16(RFCOMM_RPN_PM_XOFF)) { + xoff_char = rpn->xoff_char; + if (xoff_char != RFCOMM_RPN_XOFF_CHAR) { + BT_DBG("RPN XOFF char mismatch 0x%x", xoff_char); + xoff_char = RFCOMM_RPN_XOFF_CHAR; + rpn_mask ^= RFCOMM_RPN_PM_XOFF; + } + } + +rpn_out: + rfcomm_send_rpn(s, 0, dlci, bit_rate, data_bits, stop_bits, + parity, flow_ctrl, xon_char, xoff_char, rpn_mask); + + return 0; +} + +static int rfcomm_recv_rls(struct rfcomm_session *s, int cr, struct sk_buff *skb) +{ + struct rfcomm_rls *rls = (void *) skb->data; + u8 dlci = __get_dlci(rls->dlci); + + BT_DBG("dlci %d cr %d status 0x%x", dlci, cr, rls->status); + + if (!cr) + return 0; + + /* We should probably do something with this information here. But + * for now it's sufficient just to reply -- Bluetooth 1.1 says it's + * mandatory to recognise and respond to RLS */ + + rfcomm_send_rls(s, 0, dlci, rls->status); + + return 0; +} + +static int rfcomm_recv_msc(struct rfcomm_session *s, int cr, struct sk_buff *skb) +{ + struct rfcomm_msc *msc = (void *) skb->data; + struct rfcomm_dlc *d; + u8 dlci = __get_dlci(msc->dlci); + + BT_DBG("dlci %d cr %d v24 0x%x", dlci, cr, msc->v24_sig); + + d = rfcomm_dlc_get(s, dlci); + if (!d) + return 0; + + if (cr) { + if (msc->v24_sig & RFCOMM_V24_FC && !d->cfc) + set_bit(RFCOMM_TX_THROTTLED, &d->flags); + else + clear_bit(RFCOMM_TX_THROTTLED, &d->flags); + + rfcomm_dlc_lock(d); + + d->remote_v24_sig = msc->v24_sig; + + if (d->modem_status) + d->modem_status(d, msc->v24_sig); + + rfcomm_dlc_unlock(d); + + rfcomm_send_msc(s, 0, dlci, msc->v24_sig); + + d->mscex |= RFCOMM_MSCEX_RX; + } else + d->mscex |= RFCOMM_MSCEX_TX; + + return 0; +} + +static int rfcomm_recv_mcc(struct rfcomm_session *s, struct sk_buff *skb) +{ + struct rfcomm_mcc *mcc = (void *) skb->data; + u8 type, cr, len; + + cr = __test_cr(mcc->type); + type = __get_mcc_type(mcc->type); + len = __get_mcc_len(mcc->len); + + BT_DBG("%p type 0x%x cr %d", s, type, cr); + + skb_pull(skb, 2); + + switch (type) { + case RFCOMM_PN: + rfcomm_recv_pn(s, cr, skb); + break; + + case RFCOMM_RPN: + rfcomm_recv_rpn(s, cr, len, skb); + break; + + case RFCOMM_RLS: + rfcomm_recv_rls(s, cr, skb); + break; + + case RFCOMM_MSC: + rfcomm_recv_msc(s, cr, skb); + break; + + case RFCOMM_FCOFF: + if (cr) { + set_bit(RFCOMM_TX_THROTTLED, &s->flags); + rfcomm_send_fcoff(s, 0); + } + break; + + case RFCOMM_FCON: + if (cr) { + clear_bit(RFCOMM_TX_THROTTLED, &s->flags); + rfcomm_send_fcon(s, 0); + } + break; + + case RFCOMM_TEST: + if (cr) + rfcomm_send_test(s, 0, skb->data, skb->len); + break; + + case RFCOMM_NSC: + break; + + default: + BT_ERR("Unknown control type 0x%02x", type); + rfcomm_send_nsc(s, cr, type); + break; + } + return 0; +} + +static int rfcomm_recv_data(struct rfcomm_session *s, u8 dlci, int pf, struct sk_buff *skb) +{ + struct rfcomm_dlc *d; + + BT_DBG("session %p state %ld dlci %d pf %d", s, s->state, dlci, pf); + + d = rfcomm_dlc_get(s, dlci); + if (!d) { + rfcomm_send_dm(s, dlci); + goto drop; + } + + if (pf && d->cfc) { + u8 credits = *(u8 *) skb->data; skb_pull(skb, 1); + + d->tx_credits += credits; + if (d->tx_credits) + clear_bit(RFCOMM_TX_THROTTLED, &d->flags); + } + + if (skb->len && d->state == BT_CONNECTED) { + rfcomm_dlc_lock(d); + d->rx_credits--; + d->data_ready(d, skb); + rfcomm_dlc_unlock(d); + return 0; + } + +drop: + kfree_skb(skb); + return 0; +} + +static int rfcomm_recv_frame(struct rfcomm_session *s, struct sk_buff *skb) +{ + struct rfcomm_hdr *hdr = (void *) skb->data; + u8 type, dlci, fcs; + + dlci = __get_dlci(hdr->addr); + type = __get_type(hdr->ctrl); + + /* Trim FCS */ + skb->len--; skb->tail--; + fcs = *(u8 *)skb_tail_pointer(skb); + + if (__check_fcs(skb->data, type, fcs)) { + BT_ERR("bad checksum in packet"); + kfree_skb(skb); + return -EILSEQ; + } + + if (__test_ea(hdr->len)) + skb_pull(skb, 3); + else + skb_pull(skb, 4); + + switch (type) { + case RFCOMM_SABM: + if (__test_pf(hdr->ctrl)) + rfcomm_recv_sabm(s, dlci); + break; + + case RFCOMM_DISC: + if (__test_pf(hdr->ctrl)) + rfcomm_recv_disc(s, dlci); + break; + + case RFCOMM_UA: + if (__test_pf(hdr->ctrl)) + rfcomm_recv_ua(s, dlci); + break; + + case RFCOMM_DM: + rfcomm_recv_dm(s, dlci); + break; + + case RFCOMM_UIH: + if (dlci) + return rfcomm_recv_data(s, dlci, __test_pf(hdr->ctrl), skb); + + rfcomm_recv_mcc(s, skb); + break; + + default: + BT_ERR("Unknown packet type 0x%02x\n", type); + break; + } + kfree_skb(skb); + return 0; +} + +/* ---- Connection and data processing ---- */ + +static void rfcomm_process_connect(struct rfcomm_session *s) +{ + struct rfcomm_dlc *d; + struct list_head *p, *n; + + BT_DBG("session %p state %ld", s, s->state); + + list_for_each_safe(p, n, &s->dlcs) { + d = list_entry(p, struct rfcomm_dlc, list); + if (d->state == BT_CONFIG) { + d->mtu = s->mtu; + if (rfcomm_check_security(d)) { + rfcomm_send_pn(s, 1, d); + } else { + set_bit(RFCOMM_AUTH_PENDING, &d->flags); + rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT); + } + } + } +} + +/* Send data queued for the DLC. + * Return number of frames left in the queue. + */ +static inline int rfcomm_process_tx(struct rfcomm_dlc *d) +{ + struct sk_buff *skb; + int err; + + BT_DBG("dlc %p state %ld cfc %d rx_credits %d tx_credits %d", + d, d->state, d->cfc, d->rx_credits, d->tx_credits); + + /* Send pending MSC */ + if (test_and_clear_bit(RFCOMM_MSC_PENDING, &d->flags)) + rfcomm_send_msc(d->session, 1, d->dlci, d->v24_sig); + + if (d->cfc) { + /* CFC enabled. + * Give them some credits */ + if (!test_bit(RFCOMM_RX_THROTTLED, &d->flags) && + d->rx_credits <= (d->cfc >> 2)) { + rfcomm_send_credits(d->session, d->addr, d->cfc - d->rx_credits); + d->rx_credits = d->cfc; + } + } else { + /* CFC disabled. + * Give ourselves some credits */ + d->tx_credits = 5; + } + + if (test_bit(RFCOMM_TX_THROTTLED, &d->flags)) + return skb_queue_len(&d->tx_queue); + + while (d->tx_credits && (skb = skb_dequeue(&d->tx_queue))) { + err = rfcomm_send_frame(d->session, skb->data, skb->len); + if (err < 0) { + skb_queue_head(&d->tx_queue, skb); + break; + } + kfree_skb(skb); + d->tx_credits--; + } + + if (d->cfc && !d->tx_credits) { + /* We're out of TX credits. + * Set TX_THROTTLED flag to avoid unnesary wakeups by dlc_send. */ + set_bit(RFCOMM_TX_THROTTLED, &d->flags); + } + + return skb_queue_len(&d->tx_queue); +} + +static inline void rfcomm_process_dlcs(struct rfcomm_session *s) +{ + struct rfcomm_dlc *d; + struct list_head *p, *n; + + BT_DBG("session %p state %ld", s, s->state); + + list_for_each_safe(p, n, &s->dlcs) { + d = list_entry(p, struct rfcomm_dlc, list); + + if (test_bit(RFCOMM_TIMED_OUT, &d->flags)) { + __rfcomm_dlc_close(d, ETIMEDOUT); + continue; + } + + if (test_and_clear_bit(RFCOMM_AUTH_ACCEPT, &d->flags)) { + rfcomm_dlc_clear_timer(d); + if (d->out) { + rfcomm_send_pn(s, 1, d); + rfcomm_dlc_set_timer(d, RFCOMM_CONN_TIMEOUT); + } else { + if (d->defer_setup) { + set_bit(RFCOMM_DEFER_SETUP, &d->flags); + rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT); + + rfcomm_dlc_lock(d); + d->state = BT_CONNECT2; + d->state_change(d, 0); + rfcomm_dlc_unlock(d); + } else + rfcomm_dlc_accept(d); + } + continue; + } else if (test_and_clear_bit(RFCOMM_AUTH_REJECT, &d->flags)) { + rfcomm_dlc_clear_timer(d); + if (!d->out) + rfcomm_send_dm(s, d->dlci); + else + d->state = BT_CLOSED; + __rfcomm_dlc_close(d, ECONNREFUSED); + continue; + } + + if (test_bit(RFCOMM_SEC_PENDING, &d->flags)) + continue; + + if (test_bit(RFCOMM_TX_THROTTLED, &s->flags)) + continue; + + if ((d->state == BT_CONNECTED || d->state == BT_DISCONN) && + d->mscex == RFCOMM_MSCEX_OK) + rfcomm_process_tx(d); + } +} + +static inline void rfcomm_process_rx(struct rfcomm_session *s) +{ + struct socket *sock = s->sock; + struct sock *sk = sock->sk; + struct sk_buff *skb; + + BT_DBG("session %p state %ld qlen %d", s, s->state, skb_queue_len(&sk->sk_receive_queue)); + + /* Get data directly from socket receive queue without copying it. */ + while ((skb = skb_dequeue(&sk->sk_receive_queue))) { + skb_orphan(skb); + rfcomm_recv_frame(s, skb); + } + + if (sk->sk_state == BT_CLOSED) { + if (!s->initiator) + rfcomm_session_put(s); + + rfcomm_session_close(s, sk->sk_err); + } +} + +static inline void rfcomm_accept_connection(struct rfcomm_session *s) +{ + struct socket *sock = s->sock, *nsock; + int err; + + /* Fast check for a new connection. + * Avoids unnesesary socket allocations. */ + if (list_empty(&bt_sk(sock->sk)->accept_q)) + return; + + BT_DBG("session %p", s); + + err = kernel_accept(sock, &nsock, O_NONBLOCK); + if (err < 0) + return; + + /* Set our callbacks */ + nsock->sk->sk_data_ready = rfcomm_l2data_ready; + nsock->sk->sk_state_change = rfcomm_l2state_change; + + s = rfcomm_session_add(nsock, BT_OPEN); + if (s) { + rfcomm_session_hold(s); + + /* We should adjust MTU on incoming sessions. + * L2CAP MTU minus UIH header and FCS. */ + s->mtu = min(l2cap_pi(nsock->sk)->omtu, l2cap_pi(nsock->sk)->imtu) - 5; + + rfcomm_schedule(RFCOMM_SCHED_RX); + } else + sock_release(nsock); +} + +static inline void rfcomm_check_connection(struct rfcomm_session *s) +{ + struct sock *sk = s->sock->sk; + + BT_DBG("%p state %ld", s, s->state); + + switch(sk->sk_state) { + case BT_CONNECTED: + s->state = BT_CONNECT; + + /* We can adjust MTU on outgoing sessions. + * L2CAP MTU minus UIH header and FCS. */ + s->mtu = min(l2cap_pi(sk)->omtu, l2cap_pi(sk)->imtu) - 5; + + rfcomm_send_sabm(s, 0); + break; + + case BT_CLOSED: + s->state = BT_CLOSED; + rfcomm_session_close(s, sk->sk_err); + break; + } +} + +static inline void rfcomm_process_sessions(void) +{ + struct list_head *p, *n; + + rfcomm_lock(); + + list_for_each_safe(p, n, &session_list) { + struct rfcomm_session *s; + s = list_entry(p, struct rfcomm_session, list); + + if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) { + s->state = BT_DISCONN; + rfcomm_send_disc(s, 0); + rfcomm_session_put(s); + continue; + } + + if (s->state == BT_LISTEN) { + rfcomm_accept_connection(s); + continue; + } + + rfcomm_session_hold(s); + + switch (s->state) { + case BT_BOUND: + rfcomm_check_connection(s); + break; + + default: + rfcomm_process_rx(s); + break; + } + + rfcomm_process_dlcs(s); + + rfcomm_session_put(s); + } + + rfcomm_unlock(); +} + +static int rfcomm_add_listener(bdaddr_t *ba) +{ + struct sockaddr_l2 addr; + struct socket *sock; + struct sock *sk; + struct rfcomm_session *s; + int err = 0; + + /* Create socket */ + err = rfcomm_l2sock_create(&sock); + if (err < 0) { + BT_ERR("Create socket failed %d", err); + return err; + } + + /* Bind socket */ + bacpy(&addr.l2_bdaddr, ba); + addr.l2_family = AF_BLUETOOTH; + addr.l2_psm = cpu_to_le16(RFCOMM_PSM); + addr.l2_cid = 0; + err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr)); + if (err < 0) { + BT_ERR("Bind failed %d", err); + goto failed; + } + + /* Set L2CAP options */ + sk = sock->sk; + lock_sock(sk); + l2cap_pi(sk)->imtu = l2cap_mtu; + release_sock(sk); + + /* Start listening on the socket */ + err = kernel_listen(sock, 10); + if (err) { + BT_ERR("Listen failed %d", err); + goto failed; + } + + /* Add listening session */ + s = rfcomm_session_add(sock, BT_LISTEN); + if (!s) + goto failed; + + rfcomm_session_hold(s); + return 0; +failed: + sock_release(sock); + return err; +} + +static void rfcomm_kill_listener(void) +{ + struct rfcomm_session *s; + struct list_head *p, *n; + + BT_DBG(""); + + list_for_each_safe(p, n, &session_list) { + s = list_entry(p, struct rfcomm_session, list); + rfcomm_session_del(s); + } +} + +static int rfcomm_run(void *unused) +{ + BT_DBG(""); + + set_user_nice(current, -10); + + rfcomm_add_listener(BDADDR_ANY); + + while (!kthread_should_stop()) { + set_current_state(TASK_INTERRUPTIBLE); + if (!test_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event)) { + /* No pending events. Let's sleep. + * Incoming connections and data will wake us up. */ + schedule(); + } + set_current_state(TASK_RUNNING); + + /* Process stuff */ + clear_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event); + rfcomm_process_sessions(); + } + + rfcomm_kill_listener(); + + return 0; +} + +static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt) +{ + struct rfcomm_session *s; + struct rfcomm_dlc *d; + struct list_head *p, *n; + + BT_DBG("conn %p status 0x%02x encrypt 0x%02x", conn, status, encrypt); + + s = rfcomm_session_get(&conn->hdev->bdaddr, &conn->dst); + if (!s) + return; + + rfcomm_session_hold(s); + + list_for_each_safe(p, n, &s->dlcs) { + d = list_entry(p, struct rfcomm_dlc, list); + + if (test_and_clear_bit(RFCOMM_SEC_PENDING, &d->flags)) { + rfcomm_dlc_clear_timer(d); + if (status || encrypt == 0x00) { + __rfcomm_dlc_close(d, ECONNREFUSED); + continue; + } + } + + if (d->state == BT_CONNECTED && !status && encrypt == 0x00) { + if (d->sec_level == BT_SECURITY_MEDIUM) { + set_bit(RFCOMM_SEC_PENDING, &d->flags); + rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT); + continue; + } else if (d->sec_level == BT_SECURITY_HIGH) { + __rfcomm_dlc_close(d, ECONNREFUSED); + continue; + } + } + + if (!test_and_clear_bit(RFCOMM_AUTH_PENDING, &d->flags)) + continue; + + if (!status) + set_bit(RFCOMM_AUTH_ACCEPT, &d->flags); + else + set_bit(RFCOMM_AUTH_REJECT, &d->flags); + } + + rfcomm_session_put(s); + + rfcomm_schedule(RFCOMM_SCHED_AUTH); +} + +static struct hci_cb rfcomm_cb = { + .name = "RFCOMM", + .security_cfm = rfcomm_security_cfm +}; + +static ssize_t rfcomm_dlc_sysfs_show(struct class *dev, + struct class_attribute *attr, + char *buf) +{ + struct rfcomm_session *s; + struct list_head *pp, *p; + char *str = buf; + + rfcomm_lock(); + + list_for_each(p, &session_list) { + s = list_entry(p, struct rfcomm_session, list); + list_for_each(pp, &s->dlcs) { + struct sock *sk = s->sock->sk; + struct rfcomm_dlc *d = list_entry(pp, struct rfcomm_dlc, list); + + str += sprintf(str, "%s %s %ld %d %d %d %d\n", + batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), + d->state, d->dlci, d->mtu, d->rx_credits, d->tx_credits); + } + } + + rfcomm_unlock(); + + return (str - buf); +} + +static CLASS_ATTR(rfcomm_dlc, S_IRUGO, rfcomm_dlc_sysfs_show, NULL); + +/* ---- Initialization ---- */ +static int __init rfcomm_init(void) +{ + int err; + + l2cap_load(); + + hci_register_cb(&rfcomm_cb); + + rfcomm_thread = kthread_run(rfcomm_run, NULL, "krfcommd"); + if (IS_ERR(rfcomm_thread)) { + err = PTR_ERR(rfcomm_thread); + goto unregister; + } + + if (class_create_file(bt_class, &class_attr_rfcomm_dlc) < 0) + BT_ERR("Failed to create RFCOMM info file"); + + err = rfcomm_init_ttys(); + if (err < 0) + goto stop; + + err = rfcomm_init_sockets(); + if (err < 0) + goto cleanup; + + BT_INFO("RFCOMM ver %s", VERSION); + + return 0; + +cleanup: + rfcomm_cleanup_ttys(); + +stop: + kthread_stop(rfcomm_thread); + +unregister: + hci_unregister_cb(&rfcomm_cb); + + return err; +} + +static void __exit rfcomm_exit(void) +{ + class_remove_file(bt_class, &class_attr_rfcomm_dlc); + + hci_unregister_cb(&rfcomm_cb); + + kthread_stop(rfcomm_thread); + + rfcomm_cleanup_ttys(); + + rfcomm_cleanup_sockets(); +} + +module_init(rfcomm_init); +module_exit(rfcomm_exit); + +module_param(disable_cfc, bool, 0644); +MODULE_PARM_DESC(disable_cfc, "Disable credit based flow control"); + +module_param(channel_mtu, int, 0644); +MODULE_PARM_DESC(channel_mtu, "Default MTU for the RFCOMM channel"); + +module_param(l2cap_mtu, uint, 0644); +MODULE_PARM_DESC(l2cap_mtu, "Default MTU for the L2CAP connection"); + +module_param(l2cap_ertm, bool, 0644); +MODULE_PARM_DESC(l2cap_ertm, "Use L2CAP ERTM mode for connection"); + +MODULE_AUTHOR("Marcel Holtmann "); +MODULE_DESCRIPTION("Bluetooth RFCOMM ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("bt-proto-3"); diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c new file mode 100644 index 0000000..170b626 --- /dev/null +++ b/net/bluetooth/rfcomm/sock.c @@ -0,0 +1,1159 @@ +/* + RFCOMM implementation for Linux Bluetooth stack (BlueZ). + Copyright (C) 2002 Maxim Krasnyansky + Copyright (C) 2002 Marcel Holtmann + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +/* + * RFCOMM sockets. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +static const struct proto_ops rfcomm_sock_ops; + +static struct bt_sock_list rfcomm_sk_list = { + .lock = __RW_LOCK_UNLOCKED(rfcomm_sk_list.lock) +}; + +static void rfcomm_sock_close(struct sock *sk); +static void rfcomm_sock_kill(struct sock *sk); + +/* ---- DLC callbacks ---- + * + * called under rfcomm_dlc_lock() + */ +static void rfcomm_sk_data_ready(struct rfcomm_dlc *d, struct sk_buff *skb) +{ + struct sock *sk = d->owner; + if (!sk) + return; + + atomic_add(skb->len, &sk->sk_rmem_alloc); + skb_queue_tail(&sk->sk_receive_queue, skb); + sk->sk_data_ready(sk, skb->len); + + if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) + rfcomm_dlc_throttle(d); +} + +static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err) +{ + struct sock *sk = d->owner, *parent; + if (!sk) + return; + + BT_DBG("dlc %p state %ld err %d", d, d->state, err); + + bh_lock_sock(sk); + + if (err) + sk->sk_err = err; + + sk->sk_state = d->state; + + parent = bt_sk(sk)->parent; + if (parent) { + if (d->state == BT_CLOSED) { + sock_set_flag(sk, SOCK_ZAPPED); + bt_accept_unlink(sk); + } + parent->sk_data_ready(parent, 0); + } else { + if (d->state == BT_CONNECTED) + rfcomm_session_getaddr(d->session, &bt_sk(sk)->src, NULL); + sk->sk_state_change(sk); + } + + bh_unlock_sock(sk); + + if (parent && sock_flag(sk, SOCK_ZAPPED)) { + /* We have to drop DLC lock here, otherwise + * rfcomm_sock_destruct() will dead lock. */ + rfcomm_dlc_unlock(d); + rfcomm_sock_kill(sk); + rfcomm_dlc_lock(d); + } +} + +/* ---- Socket functions ---- */ +static struct sock *__rfcomm_get_sock_by_addr(u8 channel, bdaddr_t *src) +{ + struct sock *sk = NULL; + struct hlist_node *node; + + sk_for_each(sk, node, &rfcomm_sk_list.head) { + if (rfcomm_pi(sk)->channel == channel && + !bacmp(&bt_sk(sk)->src, src)) + break; + } + + return node ? sk : NULL; +} + +/* Find socket with channel and source bdaddr. + * Returns closest match. + */ +static struct sock *__rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *src) +{ + struct sock *sk = NULL, *sk1 = NULL; + struct hlist_node *node; + + sk_for_each(sk, node, &rfcomm_sk_list.head) { + if (state && sk->sk_state != state) + continue; + + if (rfcomm_pi(sk)->channel == channel) { + /* Exact match. */ + if (!bacmp(&bt_sk(sk)->src, src)) + break; + + /* Closest match */ + if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) + sk1 = sk; + } + } + return node ? sk : sk1; +} + +/* Find socket with given address (channel, src). + * Returns locked socket */ +static inline struct sock *rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *src) +{ + struct sock *s; + read_lock(&rfcomm_sk_list.lock); + s = __rfcomm_get_sock_by_channel(state, channel, src); + if (s) bh_lock_sock(s); + read_unlock(&rfcomm_sk_list.lock); + return s; +} + +static void rfcomm_sock_destruct(struct sock *sk) +{ + struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; + + BT_DBG("sk %p dlc %p", sk, d); + + skb_queue_purge(&sk->sk_receive_queue); + skb_queue_purge(&sk->sk_write_queue); + + rfcomm_dlc_lock(d); + rfcomm_pi(sk)->dlc = NULL; + + /* Detach DLC if it's owned by this socket */ + if (d->owner == sk) + d->owner = NULL; + rfcomm_dlc_unlock(d); + + rfcomm_dlc_put(d); +} + +static void rfcomm_sock_cleanup_listen(struct sock *parent) +{ + struct sock *sk; + + BT_DBG("parent %p", parent); + + /* Close not yet accepted dlcs */ + while ((sk = bt_accept_dequeue(parent, NULL))) { + rfcomm_sock_close(sk); + rfcomm_sock_kill(sk); + } + + parent->sk_state = BT_CLOSED; + sock_set_flag(parent, SOCK_ZAPPED); +} + +/* Kill socket (only if zapped and orphan) + * Must be called on unlocked socket. + */ +static void rfcomm_sock_kill(struct sock *sk) +{ + if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) + return; + + BT_DBG("sk %p state %d refcnt %d", sk, sk->sk_state, atomic_read(&sk->sk_refcnt)); + + /* Kill poor orphan */ + bt_sock_unlink(&rfcomm_sk_list, sk); + sock_set_flag(sk, SOCK_DEAD); + sock_put(sk); +} + +static void __rfcomm_sock_close(struct sock *sk) +{ + struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; + + BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket); + + switch (sk->sk_state) { + case BT_LISTEN: + rfcomm_sock_cleanup_listen(sk); + break; + + case BT_CONNECT: + case BT_CONNECT2: + case BT_CONFIG: + case BT_CONNECTED: + rfcomm_dlc_close(d, 0); + + default: + sock_set_flag(sk, SOCK_ZAPPED); + break; + } +} + +/* Close socket. + * Must be called on unlocked socket. + */ +static void rfcomm_sock_close(struct sock *sk) +{ + lock_sock(sk); + __rfcomm_sock_close(sk); + release_sock(sk); +} + +static void rfcomm_sock_init(struct sock *sk, struct sock *parent) +{ + struct rfcomm_pinfo *pi = rfcomm_pi(sk); + + BT_DBG("sk %p", sk); + + if (parent) { + sk->sk_type = parent->sk_type; + pi->dlc->defer_setup = bt_sk(parent)->defer_setup; + + pi->sec_level = rfcomm_pi(parent)->sec_level; + pi->role_switch = rfcomm_pi(parent)->role_switch; + } else { + pi->dlc->defer_setup = 0; + + pi->sec_level = BT_SECURITY_LOW; + pi->role_switch = 0; + } + + pi->dlc->sec_level = pi->sec_level; + pi->dlc->role_switch = pi->role_switch; +} + +static struct proto rfcomm_proto = { + .name = "RFCOMM", + .owner = THIS_MODULE, + .obj_size = sizeof(struct rfcomm_pinfo) +}; + +static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio) +{ + struct rfcomm_dlc *d; + struct sock *sk; + + sk = sk_alloc(net, PF_BLUETOOTH, prio, &rfcomm_proto); + if (!sk) + return NULL; + + sock_init_data(sock, sk); + INIT_LIST_HEAD(&bt_sk(sk)->accept_q); + + d = rfcomm_dlc_alloc(prio); + if (!d) { + sk_free(sk); + return NULL; + } + + d->data_ready = rfcomm_sk_data_ready; + d->state_change = rfcomm_sk_state_change; + + rfcomm_pi(sk)->dlc = d; + d->owner = sk; + + sk->sk_destruct = rfcomm_sock_destruct; + sk->sk_sndtimeo = RFCOMM_CONN_TIMEOUT; + + sk->sk_sndbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10; + sk->sk_rcvbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10; + + sock_reset_flag(sk, SOCK_ZAPPED); + + sk->sk_protocol = proto; + sk->sk_state = BT_OPEN; + + bt_sock_link(&rfcomm_sk_list, sk); + + BT_DBG("sk %p", sk); + return sk; +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32)) +static int rfcomm_sock_create(struct net *net, struct socket *sock, + int protocol, int kern) +#else +static int rfcomm_sock_create(struct net *net, struct socket *sock, + int protocol) +#endif +{ + struct sock *sk; + + BT_DBG("sock %p", sock); + + sock->state = SS_UNCONNECTED; + + if (sock->type != SOCK_STREAM && sock->type != SOCK_RAW) + return -ESOCKTNOSUPPORT; + + sock->ops = &rfcomm_sock_ops; + + sk = rfcomm_sock_alloc(net, sock, protocol, GFP_ATOMIC); + if (!sk) + return -ENOMEM; + + rfcomm_sock_init(sk, NULL); + return 0; +} + +static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) +{ + struct sockaddr_rc *sa = (struct sockaddr_rc *) addr; + struct sock *sk = sock->sk; + int err = 0; + + BT_DBG("sk %p %s", sk, batostr(&sa->rc_bdaddr)); + + if (!addr || addr->sa_family != AF_BLUETOOTH) + return -EINVAL; + + lock_sock(sk); + + if (sk->sk_state != BT_OPEN) { + err = -EBADFD; + goto done; + } + + if (sk->sk_type != SOCK_STREAM) { + err = -EINVAL; + goto done; + } + + write_lock_bh(&rfcomm_sk_list.lock); + + if (sa->rc_channel && __rfcomm_get_sock_by_addr(sa->rc_channel, &sa->rc_bdaddr)) { + err = -EADDRINUSE; + } else { + /* Save source address */ + bacpy(&bt_sk(sk)->src, &sa->rc_bdaddr); + rfcomm_pi(sk)->channel = sa->rc_channel; + sk->sk_state = BT_BOUND; + } + + write_unlock_bh(&rfcomm_sk_list.lock); + +done: + release_sock(sk); + return err; +} + +static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) +{ + struct sockaddr_rc *sa = (struct sockaddr_rc *) addr; + struct sock *sk = sock->sk; + struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; + int err = 0; + + BT_DBG("sk %p", sk); + + if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_rc)) + return -EINVAL; + + lock_sock(sk); + + if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) { + err = -EBADFD; + goto done; + } + + if (sk->sk_type != SOCK_STREAM) { + err = -EINVAL; + goto done; + } + + sk->sk_state = BT_CONNECT; + bacpy(&bt_sk(sk)->dst, &sa->rc_bdaddr); + rfcomm_pi(sk)->channel = sa->rc_channel; + + d->sec_level = rfcomm_pi(sk)->sec_level; + d->role_switch = rfcomm_pi(sk)->role_switch; + + err = rfcomm_dlc_open(d, &bt_sk(sk)->src, &sa->rc_bdaddr, sa->rc_channel); + if (!err) + err = bt_sock_wait_state(sk, BT_CONNECTED, + sock_sndtimeo(sk, flags & O_NONBLOCK)); + +done: + release_sock(sk); + return err; +} + +static int rfcomm_sock_listen(struct socket *sock, int backlog) +{ + struct sock *sk = sock->sk; + int err = 0; + + BT_DBG("sk %p backlog %d", sk, backlog); + + lock_sock(sk); + + if (sk->sk_state != BT_BOUND) { + err = -EBADFD; + goto done; + } + + if (sk->sk_type != SOCK_STREAM) { + err = -EINVAL; + goto done; + } + + if (!rfcomm_pi(sk)->channel) { + bdaddr_t *src = &bt_sk(sk)->src; + u8 channel; + + err = -EINVAL; + + write_lock_bh(&rfcomm_sk_list.lock); + + for (channel = 1; channel < 31; channel++) + if (!__rfcomm_get_sock_by_addr(channel, src)) { + rfcomm_pi(sk)->channel = channel; + err = 0; + break; + } + + write_unlock_bh(&rfcomm_sk_list.lock); + + if (err < 0) + goto done; + } + + sk->sk_max_ack_backlog = backlog; + sk->sk_ack_backlog = 0; + sk->sk_state = BT_LISTEN; + +done: + release_sock(sk); + return err; +} + +static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags) +{ + DECLARE_WAITQUEUE(wait, current); + struct sock *sk = sock->sk, *nsk; + long timeo; + int err = 0; + + lock_sock(sk); + + if (sk->sk_state != BT_LISTEN) { + err = -EBADFD; + goto done; + } + + if (sk->sk_type != SOCK_STREAM) { + err = -EINVAL; + goto done; + } + + timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); + + BT_DBG("sk %p timeo %ld", sk, timeo); + + /* Wait for an incoming connection. (wake-one). */ + add_wait_queue_exclusive(sk->sk_sleep, &wait); + while (!(nsk = bt_accept_dequeue(sk, newsock))) { + set_current_state(TASK_INTERRUPTIBLE); + if (!timeo) { + err = -EAGAIN; + break; + } + + release_sock(sk); + timeo = schedule_timeout(timeo); + lock_sock(sk); + + if (sk->sk_state != BT_LISTEN) { + err = -EBADFD; + break; + } + + if (signal_pending(current)) { + err = sock_intr_errno(timeo); + break; + } + } + set_current_state(TASK_RUNNING); + remove_wait_queue(sk->sk_sleep, &wait); + + if (err) + goto done; + + newsock->state = SS_CONNECTED; + + BT_DBG("new socket %p", nsk); + +done: + release_sock(sk); + return err; +} + +static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer) +{ + struct sockaddr_rc *sa = (struct sockaddr_rc *) addr; + struct sock *sk = sock->sk; + + BT_DBG("sock %p, sk %p", sock, sk); + + sa->rc_family = AF_BLUETOOTH; + sa->rc_channel = rfcomm_pi(sk)->channel; + if (peer) + bacpy(&sa->rc_bdaddr, &bt_sk(sk)->dst); + else + bacpy(&sa->rc_bdaddr, &bt_sk(sk)->src); + + *len = sizeof(struct sockaddr_rc); + return 0; +} + +static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock, + struct msghdr *msg, size_t len) +{ + struct sock *sk = sock->sk; + struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; + struct sk_buff *skb; + int sent = 0; + + if (test_bit(RFCOMM_DEFER_SETUP, &d->flags)) + return -ENOTCONN; + + if (msg->msg_flags & MSG_OOB) + return -EOPNOTSUPP; + + if (sk->sk_shutdown & SEND_SHUTDOWN) + return -EPIPE; + + BT_DBG("sock %p, sk %p", sock, sk); + + lock_sock(sk); + + while (len) { + size_t size = min_t(size_t, len, d->mtu); + int err; + + skb = sock_alloc_send_skb(sk, size + RFCOMM_SKB_RESERVE, + msg->msg_flags & MSG_DONTWAIT, &err); + if (!skb) { + if (sent == 0) + sent = err; + break; + } + skb_reserve(skb, RFCOMM_SKB_HEAD_RESERVE); + + err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); + if (err) { + kfree_skb(skb); + if (sent == 0) + sent = err; + break; + } + + err = rfcomm_dlc_send(d, skb); + if (err < 0) { + kfree_skb(skb); + if (sent == 0) + sent = err; + break; + } + + sent += size; + len -= size; + } + + release_sock(sk); + + return sent; +} + +static long rfcomm_sock_data_wait(struct sock *sk, long timeo) +{ + DECLARE_WAITQUEUE(wait, current); + + add_wait_queue(sk->sk_sleep, &wait); + for (;;) { + set_current_state(TASK_INTERRUPTIBLE); + + if (!skb_queue_empty(&sk->sk_receive_queue) || + sk->sk_err || + (sk->sk_shutdown & RCV_SHUTDOWN) || + signal_pending(current) || + !timeo) + break; + + set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); + release_sock(sk); + timeo = schedule_timeout(timeo); + lock_sock(sk); + clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); + } + + __set_current_state(TASK_RUNNING); + remove_wait_queue(sk->sk_sleep, &wait); + return timeo; +} + +static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock, + struct msghdr *msg, size_t size, int flags) +{ + struct sock *sk = sock->sk; + struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; + int err = 0; + size_t target, copied = 0; + long timeo; + + if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { + rfcomm_dlc_accept(d); + return 0; + } + + if (flags & MSG_OOB) + return -EOPNOTSUPP; + + msg->msg_namelen = 0; + + BT_DBG("sk %p size %zu", sk, size); + + lock_sock(sk); + + target = sock_rcvlowat(sk, flags & MSG_WAITALL, size); + timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); + + do { + struct sk_buff *skb; + int chunk; + + skb = skb_dequeue(&sk->sk_receive_queue); + if (!skb) { + if (copied >= target) + break; + + if ((err = sock_error(sk)) != 0) + break; + if (sk->sk_shutdown & RCV_SHUTDOWN) + break; + + err = -EAGAIN; + if (!timeo) + break; + + timeo = rfcomm_sock_data_wait(sk, timeo); + + if (signal_pending(current)) { + err = sock_intr_errno(timeo); + goto out; + } + continue; + } + + chunk = min_t(unsigned int, skb->len, size); + if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) { + skb_queue_head(&sk->sk_receive_queue, skb); + if (!copied) + copied = -EFAULT; + break; + } + copied += chunk; + size -= chunk; + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32)) + sock_recv_ts_and_drops(msg, sk, skb); +#else + sock_recv_timestamp(msg, sk, skb); +#endif + + if (!(flags & MSG_PEEK)) { + atomic_sub(chunk, &sk->sk_rmem_alloc); + + skb_pull(skb, chunk); + if (skb->len) { + skb_queue_head(&sk->sk_receive_queue, skb); + break; + } + kfree_skb(skb); + + } else { + /* put message back and return */ + skb_queue_head(&sk->sk_receive_queue, skb); + break; + } + } while (size); + +out: + if (atomic_read(&sk->sk_rmem_alloc) <= (sk->sk_rcvbuf >> 2)) + rfcomm_dlc_unthrottle(rfcomm_pi(sk)->dlc); + + release_sock(sk); + return copied ? : err; +} + +static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen) +{ + struct sock *sk = sock->sk; + int err = 0; + u32 opt; + + BT_DBG("sk %p", sk); + + lock_sock(sk); + + switch (optname) { + case RFCOMM_LM: + if (get_user(opt, (u32 __user *) optval)) { + err = -EFAULT; + break; + } + + if (opt & RFCOMM_LM_AUTH) + rfcomm_pi(sk)->sec_level = BT_SECURITY_LOW; + if (opt & RFCOMM_LM_ENCRYPT) + rfcomm_pi(sk)->sec_level = BT_SECURITY_MEDIUM; + if (opt & RFCOMM_LM_SECURE) + rfcomm_pi(sk)->sec_level = BT_SECURITY_HIGH; + + rfcomm_pi(sk)->role_switch = (opt & RFCOMM_LM_MASTER); + break; + + default: + err = -ENOPROTOOPT; + break; + } + + release_sock(sk); + return err; +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,31)) +static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) +#else +static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen) +#endif +{ + struct sock *sk = sock->sk; + struct bt_security sec; + int len, err = 0; + u32 opt; + + BT_DBG("sk %p", sk); + + if (level == SOL_RFCOMM) + return rfcomm_sock_setsockopt_old(sock, optname, optval, optlen); + + if (level != SOL_BLUETOOTH) + return -ENOPROTOOPT; + + lock_sock(sk); + + switch (optname) { + case BT_SECURITY: + if (sk->sk_type != SOCK_STREAM) { + err = -EINVAL; + break; + } + + sec.level = BT_SECURITY_LOW; + + len = min_t(unsigned int, sizeof(sec), optlen); + if (copy_from_user((char *) &sec, optval, len)) { + err = -EFAULT; + break; + } + + if (sec.level > BT_SECURITY_HIGH) { + err = -EINVAL; + break; + } + + rfcomm_pi(sk)->sec_level = sec.level; + break; + + case BT_DEFER_SETUP: + if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { + err = -EINVAL; + break; + } + + if (get_user(opt, (u32 __user *) optval)) { + err = -EFAULT; + break; + } + + bt_sk(sk)->defer_setup = opt; + break; + + default: + err = -ENOPROTOOPT; + break; + } + + release_sock(sk); + return err; +} + +static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen) +{ + struct sock *sk = sock->sk; + struct sock *l2cap_sk; + struct rfcomm_conninfo cinfo; + int len, err = 0; + u32 opt; + + BT_DBG("sk %p", sk); + + if (get_user(len, optlen)) + return -EFAULT; + + lock_sock(sk); + + switch (optname) { + case RFCOMM_LM: + switch (rfcomm_pi(sk)->sec_level) { + case BT_SECURITY_LOW: + opt = RFCOMM_LM_AUTH; + break; + case BT_SECURITY_MEDIUM: + opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT; + break; + case BT_SECURITY_HIGH: + opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT | + RFCOMM_LM_SECURE; + break; + default: + opt = 0; + break; + } + + if (rfcomm_pi(sk)->role_switch) + opt |= RFCOMM_LM_MASTER; + + if (put_user(opt, (u32 __user *) optval)) + err = -EFAULT; + break; + + case RFCOMM_CONNINFO: + if (sk->sk_state != BT_CONNECTED && + !rfcomm_pi(sk)->dlc->defer_setup) { + err = -ENOTCONN; + break; + } + + l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk; + + cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle; + memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3); + + len = min_t(unsigned int, len, sizeof(cinfo)); + if (copy_to_user(optval, (char *) &cinfo, len)) + err = -EFAULT; + + break; + + default: + err = -ENOPROTOOPT; + break; + } + + release_sock(sk); + return err; +} + +static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) +{ + struct sock *sk = sock->sk; + struct bt_security sec; + int len, err = 0; + + BT_DBG("sk %p", sk); + + if (level == SOL_RFCOMM) + return rfcomm_sock_getsockopt_old(sock, optname, optval, optlen); + + if (level != SOL_BLUETOOTH) + return -ENOPROTOOPT; + + if (get_user(len, optlen)) + return -EFAULT; + + lock_sock(sk); + + switch (optname) { + case BT_SECURITY: + if (sk->sk_type != SOCK_STREAM) { + err = -EINVAL; + break; + } + + sec.level = rfcomm_pi(sk)->sec_level; + + len = min_t(unsigned int, len, sizeof(sec)); + if (copy_to_user(optval, (char *) &sec, len)) + err = -EFAULT; + + break; + + case BT_DEFER_SETUP: + if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { + err = -EINVAL; + break; + } + + if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval)) + err = -EFAULT; + + break; + + default: + err = -ENOPROTOOPT; + break; + } + + release_sock(sk); + return err; +} + +static int rfcomm_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) +{ + struct sock *sk __maybe_unused = sock->sk; + int err; + + BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg); + + err = bt_sock_ioctl(sock, cmd, arg); + + if (err == -ENOIOCTLCMD) { +#ifdef CONFIG_BT_RFCOMM_TTY + lock_sock(sk); + err = rfcomm_dev_ioctl(sk, cmd, (void __user *) arg); + release_sock(sk); +#else + err = -EOPNOTSUPP; +#endif + } + + return err; +} + +static int rfcomm_sock_shutdown(struct socket *sock, int how) +{ + struct sock *sk = sock->sk; + int err = 0; + + BT_DBG("sock %p, sk %p", sock, sk); + + if (!sk) return 0; + + lock_sock(sk); + if (!sk->sk_shutdown) { + sk->sk_shutdown = SHUTDOWN_MASK; + __rfcomm_sock_close(sk); + + if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) + err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); + } + release_sock(sk); + return err; +} + +static int rfcomm_sock_release(struct socket *sock) +{ + struct sock *sk = sock->sk; + int err; + + BT_DBG("sock %p, sk %p", sock, sk); + + if (!sk) + return 0; + + err = rfcomm_sock_shutdown(sock, 2); + + sock_orphan(sk); + rfcomm_sock_kill(sk); + return err; +} + +/* ---- RFCOMM core layer callbacks ---- + * + * called under rfcomm_lock() + */ +int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc **d) +{ + struct sock *sk, *parent; + bdaddr_t src, dst; + int result = 0; + + BT_DBG("session %p channel %d", s, channel); + + rfcomm_session_getaddr(s, &src, &dst); + + /* Check if we have socket listening on channel */ + parent = rfcomm_get_sock_by_channel(BT_LISTEN, channel, &src); + if (!parent) + return 0; + + /* Check for backlog size */ + if (sk_acceptq_is_full(parent)) { + BT_DBG("backlog full %d", parent->sk_ack_backlog); + goto done; + } + + sk = rfcomm_sock_alloc(sock_net(parent), NULL, BTPROTO_RFCOMM, GFP_ATOMIC); + if (!sk) + goto done; + + rfcomm_sock_init(sk, parent); + bacpy(&bt_sk(sk)->src, &src); + bacpy(&bt_sk(sk)->dst, &dst); + rfcomm_pi(sk)->channel = channel; + + sk->sk_state = BT_CONFIG; + bt_accept_enqueue(parent, sk); + + /* Accept connection and return socket DLC */ + *d = rfcomm_pi(sk)->dlc; + result = 1; + +done: + bh_unlock_sock(parent); + + if (bt_sk(parent)->defer_setup) + parent->sk_state_change(parent); + + return result; +} + +static ssize_t rfcomm_sock_sysfs_show(struct class *dev, + struct class_attribute *attr, + char *buf) +{ + struct sock *sk; + struct hlist_node *node; + char *str = buf; + + read_lock_bh(&rfcomm_sk_list.lock); + + sk_for_each(sk, node, &rfcomm_sk_list.head) { + str += sprintf(str, "%s %s %d %d\n", + batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), + sk->sk_state, rfcomm_pi(sk)->channel); + } + + read_unlock_bh(&rfcomm_sk_list.lock); + + return (str - buf); +} + +static CLASS_ATTR(rfcomm, S_IRUGO, rfcomm_sock_sysfs_show, NULL); + +static const struct proto_ops rfcomm_sock_ops = { + .family = PF_BLUETOOTH, + .owner = THIS_MODULE, + .release = rfcomm_sock_release, + .bind = rfcomm_sock_bind, + .connect = rfcomm_sock_connect, + .listen = rfcomm_sock_listen, + .accept = rfcomm_sock_accept, + .getname = rfcomm_sock_getname, + .sendmsg = rfcomm_sock_sendmsg, + .recvmsg = rfcomm_sock_recvmsg, + .shutdown = rfcomm_sock_shutdown, + .setsockopt = rfcomm_sock_setsockopt, + .getsockopt = rfcomm_sock_getsockopt, + .ioctl = rfcomm_sock_ioctl, + .poll = bt_sock_poll, + .socketpair = sock_no_socketpair, + .mmap = sock_no_mmap +}; + +static const struct net_proto_family rfcomm_sock_family_ops = { + .family = PF_BLUETOOTH, + .owner = THIS_MODULE, + .create = rfcomm_sock_create +}; + +int __init rfcomm_init_sockets(void) +{ + int err; + + err = proto_register(&rfcomm_proto, 0); + if (err < 0) + return err; + + err = bt_sock_register(BTPROTO_RFCOMM, &rfcomm_sock_family_ops); + if (err < 0) + goto error; + + if (class_create_file(bt_class, &class_attr_rfcomm) < 0) + BT_ERR("Failed to create RFCOMM info file"); + + BT_INFO("RFCOMM socket layer initialized"); + + return 0; + +error: + BT_ERR("RFCOMM socket layer registration failed"); + proto_unregister(&rfcomm_proto); + return err; +} + +void rfcomm_cleanup_sockets(void) +{ + class_remove_file(bt_class, &class_attr_rfcomm); + + if (bt_sock_unregister(BTPROTO_RFCOMM) < 0) + BT_ERR("RFCOMM socket layer unregistration failed"); + + proto_unregister(&rfcomm_proto); +} diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c new file mode 100644 index 0000000..6ada289 --- /dev/null +++ b/net/bluetooth/rfcomm/tty.c @@ -0,0 +1,1200 @@ +/* + RFCOMM implementation for Linux Bluetooth stack (BlueZ). + Copyright (C) 2002 Maxim Krasnyansky + Copyright (C) 2002 Marcel Holtmann + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +/* + * RFCOMM TTY. + */ + +#include + +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#define RFCOMM_TTY_MAGIC 0x6d02 /* magic number for rfcomm struct */ +#define RFCOMM_TTY_PORTS RFCOMM_MAX_DEV /* whole lotta rfcomm devices */ +#define RFCOMM_TTY_MAJOR 216 /* device node major id of the usb/bluetooth.c driver */ +#define RFCOMM_TTY_MINOR 0 + +static struct tty_driver *rfcomm_tty_driver; + +struct rfcomm_dev { + struct list_head list; + atomic_t refcnt; + + char name[12]; + int id; + unsigned long flags; + atomic_t opened; + int err; + + bdaddr_t src; + bdaddr_t dst; + u8 channel; + + uint modem_status; + + struct rfcomm_dlc *dlc; + struct tty_struct *tty; + wait_queue_head_t wait; + struct tasklet_struct wakeup_task; + + struct device *tty_dev; + + atomic_t wmem_alloc; + + struct sk_buff_head pending; +}; + +static LIST_HEAD(rfcomm_dev_list); +static DEFINE_RWLOCK(rfcomm_dev_lock); + +static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb); +static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err); +static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig); + +static void rfcomm_tty_wakeup(unsigned long arg); + +/* ---- Device functions ---- */ +static void rfcomm_dev_destruct(struct rfcomm_dev *dev) +{ + struct rfcomm_dlc *dlc = dev->dlc; + + BT_DBG("dev %p dlc %p", dev, dlc); + + /* Refcount should only hit zero when called from rfcomm_dev_del() + which will have taken us off the list. Everything else are + refcounting bugs. */ + BUG_ON(!list_empty(&dev->list)); + + rfcomm_dlc_lock(dlc); + /* Detach DLC if it's owned by this dev */ + if (dlc->owner == dev) + dlc->owner = NULL; + rfcomm_dlc_unlock(dlc); + + rfcomm_dlc_put(dlc); + + tty_unregister_device(rfcomm_tty_driver, dev->id); + + kfree(dev); + + /* It's safe to call module_put() here because socket still + holds reference to this module. */ + module_put(THIS_MODULE); +} + +static inline void rfcomm_dev_hold(struct rfcomm_dev *dev) +{ + atomic_inc(&dev->refcnt); +} + +static inline void rfcomm_dev_put(struct rfcomm_dev *dev) +{ + /* The reason this isn't actually a race, as you no + doubt have a little voice screaming at you in your + head, is that the refcount should never actually + reach zero unless the device has already been taken + off the list, in rfcomm_dev_del(). And if that's not + true, we'll hit the BUG() in rfcomm_dev_destruct() + anyway. */ + if (atomic_dec_and_test(&dev->refcnt)) + rfcomm_dev_destruct(dev); +} + +static struct rfcomm_dev *__rfcomm_dev_get(int id) +{ + struct rfcomm_dev *dev; + struct list_head *p; + + list_for_each(p, &rfcomm_dev_list) { + dev = list_entry(p, struct rfcomm_dev, list); + if (dev->id == id) + return dev; + } + + return NULL; +} + +static inline struct rfcomm_dev *rfcomm_dev_get(int id) +{ + struct rfcomm_dev *dev; + + read_lock(&rfcomm_dev_lock); + + dev = __rfcomm_dev_get(id); + + if (dev) { + if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags)) + dev = NULL; + else + rfcomm_dev_hold(dev); + } + + read_unlock(&rfcomm_dev_lock); + + return dev; +} + +static struct device *rfcomm_get_device(struct rfcomm_dev *dev) +{ + struct hci_dev *hdev; + struct hci_conn *conn; + + hdev = hci_get_route(&dev->dst, &dev->src); + if (!hdev) + return NULL; + + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &dev->dst); + + hci_dev_put(hdev); + + return conn ? &conn->dev : NULL; +} + +static ssize_t show_address(struct device *tty_dev, struct device_attribute *attr, char *buf) +{ + struct rfcomm_dev *dev = dev_get_drvdata(tty_dev); + bdaddr_t bdaddr; + baswap(&bdaddr, &dev->dst); + return sprintf(buf, "%s\n", batostr(&bdaddr)); +} + +static ssize_t show_channel(struct device *tty_dev, struct device_attribute *attr, char *buf) +{ + struct rfcomm_dev *dev = dev_get_drvdata(tty_dev); + return sprintf(buf, "%d\n", dev->channel); +} + +static DEVICE_ATTR(address, S_IRUGO, show_address, NULL); +static DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL); + +static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc) +{ + struct rfcomm_dev *dev; + struct list_head *head = &rfcomm_dev_list, *p; + int err = 0; + + BT_DBG("id %d channel %d", req->dev_id, req->channel); + + dev = kzalloc(sizeof(struct rfcomm_dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + write_lock_bh(&rfcomm_dev_lock); + + if (req->dev_id < 0) { + dev->id = 0; + + list_for_each(p, &rfcomm_dev_list) { + if (list_entry(p, struct rfcomm_dev, list)->id != dev->id) + break; + + dev->id++; + head = p; + } + } else { + dev->id = req->dev_id; + + list_for_each(p, &rfcomm_dev_list) { + struct rfcomm_dev *entry = list_entry(p, struct rfcomm_dev, list); + + if (entry->id == dev->id) { + err = -EADDRINUSE; + goto out; + } + + if (entry->id > dev->id - 1) + break; + + head = p; + } + } + + if ((dev->id < 0) || (dev->id > RFCOMM_MAX_DEV - 1)) { + err = -ENFILE; + goto out; + } + + sprintf(dev->name, "rfcomm%d", dev->id); + + list_add(&dev->list, head); + atomic_set(&dev->refcnt, 1); + + bacpy(&dev->src, &req->src); + bacpy(&dev->dst, &req->dst); + dev->channel = req->channel; + + dev->flags = req->flags & + ((1 << RFCOMM_RELEASE_ONHUP) | (1 << RFCOMM_REUSE_DLC)); + + atomic_set(&dev->opened, 0); + + init_waitqueue_head(&dev->wait); + tasklet_init(&dev->wakeup_task, rfcomm_tty_wakeup, (unsigned long) dev); + + skb_queue_head_init(&dev->pending); + + rfcomm_dlc_lock(dlc); + + if (req->flags & (1 << RFCOMM_REUSE_DLC)) { + struct sock *sk = dlc->owner; + struct sk_buff *skb; + + BUG_ON(!sk); + + rfcomm_dlc_throttle(dlc); + + while ((skb = skb_dequeue(&sk->sk_receive_queue))) { + skb_orphan(skb); + skb_queue_tail(&dev->pending, skb); + atomic_sub(skb->len, &sk->sk_rmem_alloc); + } + } + + dlc->data_ready = rfcomm_dev_data_ready; + dlc->state_change = rfcomm_dev_state_change; + dlc->modem_status = rfcomm_dev_modem_status; + + dlc->owner = dev; + dev->dlc = dlc; + + rfcomm_dev_modem_status(dlc, dlc->remote_v24_sig); + + rfcomm_dlc_unlock(dlc); + + /* It's safe to call __module_get() here because socket already + holds reference to this module. */ + __module_get(THIS_MODULE); + +out: + write_unlock_bh(&rfcomm_dev_lock); + + if (err < 0) + goto free; + + dev->tty_dev = tty_register_device(rfcomm_tty_driver, dev->id, NULL); + + if (IS_ERR(dev->tty_dev)) { + err = PTR_ERR(dev->tty_dev); + list_del(&dev->list); + goto free; + } + + dev_set_drvdata(dev->tty_dev, dev); + + if (device_create_file(dev->tty_dev, &dev_attr_address) < 0) + BT_ERR("Failed to create address attribute"); + + if (device_create_file(dev->tty_dev, &dev_attr_channel) < 0) + BT_ERR("Failed to create channel attribute"); + + return dev->id; + +free: + kfree(dev); + return err; +} + +static void rfcomm_dev_del(struct rfcomm_dev *dev) +{ + BT_DBG("dev %p", dev); + + BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags)); + + if (atomic_read(&dev->opened) > 0) + return; + + write_lock_bh(&rfcomm_dev_lock); + list_del_init(&dev->list); + write_unlock_bh(&rfcomm_dev_lock); + + rfcomm_dev_put(dev); +} + +/* ---- Send buffer ---- */ +static inline unsigned int rfcomm_room(struct rfcomm_dlc *dlc) +{ + /* We can't let it be zero, because we don't get a callback + when tx_credits becomes nonzero, hence we'd never wake up */ + return dlc->mtu * (dlc->tx_credits?:1); +} + +static void rfcomm_wfree(struct sk_buff *skb) +{ + struct rfcomm_dev *dev = (void *) skb->sk; + atomic_sub(skb->truesize, &dev->wmem_alloc); + if (test_bit(RFCOMM_TTY_ATTACHED, &dev->flags)) + tasklet_schedule(&dev->wakeup_task); + rfcomm_dev_put(dev); +} + +static inline void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *dev) +{ + rfcomm_dev_hold(dev); + atomic_add(skb->truesize, &dev->wmem_alloc); + skb->sk = (void *) dev; + skb->destructor = rfcomm_wfree; +} + +static struct sk_buff *rfcomm_wmalloc(struct rfcomm_dev *dev, unsigned long size, gfp_t priority) +{ + if (atomic_read(&dev->wmem_alloc) < rfcomm_room(dev->dlc)) { + struct sk_buff *skb = alloc_skb(size, priority); + if (skb) { + rfcomm_set_owner_w(skb, dev); + return skb; + } + } + return NULL; +} + +/* ---- Device IOCTLs ---- */ + +#define NOCAP_FLAGS ((1 << RFCOMM_REUSE_DLC) | (1 << RFCOMM_RELEASE_ONHUP)) + +static int rfcomm_create_dev(struct sock *sk, void __user *arg) +{ + struct rfcomm_dev_req req; + struct rfcomm_dlc *dlc; + int id; + + if (copy_from_user(&req, arg, sizeof(req))) + return -EFAULT; + + BT_DBG("sk %p dev_id %d flags 0x%x", sk, req.dev_id, req.flags); + + if (req.flags != NOCAP_FLAGS && !capable(CAP_NET_ADMIN)) + return -EPERM; + + if (req.flags & (1 << RFCOMM_REUSE_DLC)) { + /* Socket must be connected */ + if (sk->sk_state != BT_CONNECTED) + return -EBADFD; + + dlc = rfcomm_pi(sk)->dlc; + rfcomm_dlc_hold(dlc); + } else { + dlc = rfcomm_dlc_alloc(GFP_KERNEL); + if (!dlc) + return -ENOMEM; + } + + id = rfcomm_dev_add(&req, dlc); + if (id < 0) { + rfcomm_dlc_put(dlc); + return id; + } + + if (req.flags & (1 << RFCOMM_REUSE_DLC)) { + /* DLC is now used by device. + * Socket must be disconnected */ + sk->sk_state = BT_CLOSED; + } + + return id; +} + +static int rfcomm_release_dev(void __user *arg) +{ + struct rfcomm_dev_req req; + struct rfcomm_dev *dev; + + if (copy_from_user(&req, arg, sizeof(req))) + return -EFAULT; + + BT_DBG("dev_id %d flags 0x%x", req.dev_id, req.flags); + + if (!(dev = rfcomm_dev_get(req.dev_id))) + return -ENODEV; + + if (dev->flags != NOCAP_FLAGS && !capable(CAP_NET_ADMIN)) { + rfcomm_dev_put(dev); + return -EPERM; + } + + if (req.flags & (1 << RFCOMM_HANGUP_NOW)) + rfcomm_dlc_close(dev->dlc, 0); + + /* Shut down TTY synchronously before freeing rfcomm_dev */ + if (dev->tty) + tty_vhangup(dev->tty); + + if (!test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) + rfcomm_dev_del(dev); + rfcomm_dev_put(dev); + return 0; +} + +static int rfcomm_get_dev_list(void __user *arg) +{ + struct rfcomm_dev_list_req *dl; + struct rfcomm_dev_info *di; + struct list_head *p; + int n = 0, size, err; + u16 dev_num; + + BT_DBG(""); + + if (get_user(dev_num, (u16 __user *) arg)) + return -EFAULT; + + if (!dev_num || dev_num > (PAGE_SIZE * 4) / sizeof(*di)) + return -EINVAL; + + size = sizeof(*dl) + dev_num * sizeof(*di); + + if (!(dl = kmalloc(size, GFP_KERNEL))) + return -ENOMEM; + + di = dl->dev_info; + + read_lock_bh(&rfcomm_dev_lock); + + list_for_each(p, &rfcomm_dev_list) { + struct rfcomm_dev *dev = list_entry(p, struct rfcomm_dev, list); + if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags)) + continue; + (di + n)->id = dev->id; + (di + n)->flags = dev->flags; + (di + n)->state = dev->dlc->state; + (di + n)->channel = dev->channel; + bacpy(&(di + n)->src, &dev->src); + bacpy(&(di + n)->dst, &dev->dst); + if (++n >= dev_num) + break; + } + + read_unlock_bh(&rfcomm_dev_lock); + + dl->dev_num = n; + size = sizeof(*dl) + n * sizeof(*di); + + err = copy_to_user(arg, dl, size); + kfree(dl); + + return err ? -EFAULT : 0; +} + +static int rfcomm_get_dev_info(void __user *arg) +{ + struct rfcomm_dev *dev; + struct rfcomm_dev_info di; + int err = 0; + + BT_DBG(""); + + if (copy_from_user(&di, arg, sizeof(di))) + return -EFAULT; + + if (!(dev = rfcomm_dev_get(di.id))) + return -ENODEV; + + di.flags = dev->flags; + di.channel = dev->channel; + di.state = dev->dlc->state; + bacpy(&di.src, &dev->src); + bacpy(&di.dst, &dev->dst); + + if (copy_to_user(arg, &di, sizeof(di))) + err = -EFAULT; + + rfcomm_dev_put(dev); + return err; +} + +int rfcomm_dev_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) +{ + BT_DBG("cmd %d arg %p", cmd, arg); + + switch (cmd) { + case RFCOMMCREATEDEV: + return rfcomm_create_dev(sk, arg); + + case RFCOMMRELEASEDEV: + return rfcomm_release_dev(arg); + + case RFCOMMGETDEVLIST: + return rfcomm_get_dev_list(arg); + + case RFCOMMGETDEVINFO: + return rfcomm_get_dev_info(arg); + } + + return -EINVAL; +} + +/* ---- DLC callbacks ---- */ +static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb) +{ + struct rfcomm_dev *dev = dlc->owner; + struct tty_struct *tty; + + if (!dev) { + kfree_skb(skb); + return; + } + + if (!(tty = dev->tty) || !skb_queue_empty(&dev->pending)) { + skb_queue_tail(&dev->pending, skb); + return; + } + + BT_DBG("dlc %p tty %p len %d", dlc, tty, skb->len); + + tty_insert_flip_string(tty, skb->data, skb->len); + tty_flip_buffer_push(tty); + + kfree_skb(skb); +} + +static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err) +{ + struct rfcomm_dev *dev = dlc->owner; + if (!dev) + return; + + BT_DBG("dlc %p dev %p err %d", dlc, dev, err); + + dev->err = err; + wake_up_interruptible(&dev->wait); + + if (dlc->state == BT_CLOSED) { + if (!dev->tty) { + if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) { + /* Drop DLC lock here to avoid deadlock + * 1. rfcomm_dev_get will take rfcomm_dev_lock + * but in rfcomm_dev_add there's lock order: + * rfcomm_dev_lock -> dlc lock + * 2. rfcomm_dev_put will deadlock if it's + * the last reference + */ + rfcomm_dlc_unlock(dlc); + if (rfcomm_dev_get(dev->id) == NULL) { + rfcomm_dlc_lock(dlc); + return; + } + + rfcomm_dev_del(dev); + rfcomm_dev_put(dev); + rfcomm_dlc_lock(dlc); + } + } else + tty_hangup(dev->tty); + } +} + +static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig) +{ + struct rfcomm_dev *dev = dlc->owner; + if (!dev) + return; + + BT_DBG("dlc %p dev %p v24_sig 0x%02x", dlc, dev, v24_sig); + + if ((dev->modem_status & TIOCM_CD) && !(v24_sig & RFCOMM_V24_DV)) { + if (dev->tty && !C_CLOCAL(dev->tty)) + tty_hangup(dev->tty); + } + + dev->modem_status = + ((v24_sig & RFCOMM_V24_RTC) ? (TIOCM_DSR | TIOCM_DTR) : 0) | + ((v24_sig & RFCOMM_V24_RTR) ? (TIOCM_RTS | TIOCM_CTS) : 0) | + ((v24_sig & RFCOMM_V24_IC) ? TIOCM_RI : 0) | + ((v24_sig & RFCOMM_V24_DV) ? TIOCM_CD : 0); +} + +/* ---- TTY functions ---- */ +static void rfcomm_tty_wakeup(unsigned long arg) +{ + struct rfcomm_dev *dev = (void *) arg; + struct tty_struct *tty = dev->tty; + if (!tty) + return; + + BT_DBG("dev %p tty %p", dev, tty); + tty_wakeup(tty); +} + +static void rfcomm_tty_copy_pending(struct rfcomm_dev *dev) +{ + struct tty_struct *tty = dev->tty; + struct sk_buff *skb; + int inserted = 0; + + if (!tty) + return; + + BT_DBG("dev %p tty %p", dev, tty); + + rfcomm_dlc_lock(dev->dlc); + + while ((skb = skb_dequeue(&dev->pending))) { + inserted += tty_insert_flip_string(tty, skb->data, skb->len); + kfree_skb(skb); + } + + rfcomm_dlc_unlock(dev->dlc); + + if (inserted > 0) + tty_flip_buffer_push(tty); +} + +static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp) +{ + DECLARE_WAITQUEUE(wait, current); + struct rfcomm_dev *dev; + struct rfcomm_dlc *dlc; + int err, id; + + id = tty->index; + + BT_DBG("tty %p id %d", tty, id); + + /* We don't leak this refcount. For reasons which are not entirely + clear, the TTY layer will call our ->close() method even if the + open fails. We decrease the refcount there, and decreasing it + here too would cause breakage. */ + dev = rfcomm_dev_get(id); + if (!dev) + return -ENODEV; + + BT_DBG("dev %p dst %s channel %d opened %d", dev, batostr(&dev->dst), + dev->channel, atomic_read(&dev->opened)); + + if (atomic_inc_return(&dev->opened) > 1) + return 0; + + dlc = dev->dlc; + + /* Attach TTY and open DLC */ + + rfcomm_dlc_lock(dlc); + tty->driver_data = dev; + dev->tty = tty; + rfcomm_dlc_unlock(dlc); + set_bit(RFCOMM_TTY_ATTACHED, &dev->flags); + + err = rfcomm_dlc_open(dlc, &dev->src, &dev->dst, dev->channel); + if (err < 0) + return err; + + /* Wait for DLC to connect */ + add_wait_queue(&dev->wait, &wait); + while (1) { + set_current_state(TASK_INTERRUPTIBLE); + + if (dlc->state == BT_CLOSED) { + err = -dev->err; + break; + } + + if (dlc->state == BT_CONNECTED) + break; + + if (signal_pending(current)) { + err = -EINTR; + break; + } + + schedule(); + } + set_current_state(TASK_RUNNING); + remove_wait_queue(&dev->wait, &wait); + + if (err == 0) +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,29)) + device_move(dev->tty_dev, rfcomm_get_device(dev), + DPM_ORDER_DEV_AFTER_PARENT); +#else + device_move(dev->tty_dev, rfcomm_get_device(dev)); +#endif + + rfcomm_tty_copy_pending(dev); + + rfcomm_dlc_unthrottle(dev->dlc); + + return err; +} + +static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp) +{ + struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; + if (!dev) + return; + + BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc, + atomic_read(&dev->opened)); + + if (atomic_dec_and_test(&dev->opened)) { + if (dev->tty_dev->parent) +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,29)) + device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST); +#else + device_move(dev->tty_dev, NULL); +#endif + + /* Close DLC and dettach TTY */ + rfcomm_dlc_close(dev->dlc, 0); + + clear_bit(RFCOMM_TTY_ATTACHED, &dev->flags); + tasklet_kill(&dev->wakeup_task); + + rfcomm_dlc_lock(dev->dlc); + tty->driver_data = NULL; + dev->tty = NULL; + rfcomm_dlc_unlock(dev->dlc); + + if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags)) { + write_lock_bh(&rfcomm_dev_lock); + list_del_init(&dev->list); + write_unlock_bh(&rfcomm_dev_lock); + + rfcomm_dev_put(dev); + } + } + + rfcomm_dev_put(dev); +} + +static int rfcomm_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) +{ + struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; + struct rfcomm_dlc *dlc = dev->dlc; + struct sk_buff *skb; + int err = 0, sent = 0, size; + + BT_DBG("tty %p count %d", tty, count); + + while (count) { + size = min_t(uint, count, dlc->mtu); + + skb = rfcomm_wmalloc(dev, size + RFCOMM_SKB_RESERVE, GFP_ATOMIC); + + if (!skb) + break; + + skb_reserve(skb, RFCOMM_SKB_HEAD_RESERVE); + + memcpy(skb_put(skb, size), buf + sent, size); + + if ((err = rfcomm_dlc_send(dlc, skb)) < 0) { + kfree_skb(skb); + break; + } + + sent += size; + count -= size; + } + + return sent ? sent : err; +} + +static int rfcomm_tty_write_room(struct tty_struct *tty) +{ + struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; + int room; + + BT_DBG("tty %p", tty); + + if (!dev || !dev->dlc) + return 0; + + room = rfcomm_room(dev->dlc) - atomic_read(&dev->wmem_alloc); + if (room < 0) + room = 0; + + return room; +} + +static int rfcomm_tty_ioctl(struct tty_struct *tty, struct file *filp, unsigned int cmd, unsigned long arg) +{ + BT_DBG("tty %p cmd 0x%02x", tty, cmd); + + switch (cmd) { + case TCGETS: + BT_DBG("TCGETS is not supported"); + return -ENOIOCTLCMD; + + case TCSETS: + BT_DBG("TCSETS is not supported"); + return -ENOIOCTLCMD; + + case TIOCMIWAIT: + BT_DBG("TIOCMIWAIT"); + break; + + case TIOCGICOUNT: + BT_DBG("TIOCGICOUNT"); + break; + + case TIOCGSERIAL: + BT_ERR("TIOCGSERIAL is not supported"); + return -ENOIOCTLCMD; + + case TIOCSSERIAL: + BT_ERR("TIOCSSERIAL is not supported"); + return -ENOIOCTLCMD; + + case TIOCSERGSTRUCT: + BT_ERR("TIOCSERGSTRUCT is not supported"); + return -ENOIOCTLCMD; + + case TIOCSERGETLSR: + BT_ERR("TIOCSERGETLSR is not supported"); + return -ENOIOCTLCMD; + + case TIOCSERCONFIG: + BT_ERR("TIOCSERCONFIG is not supported"); + return -ENOIOCTLCMD; + + default: + return -ENOIOCTLCMD; /* ioctls which we must ignore */ + + } + + return -ENOIOCTLCMD; +} + +static void rfcomm_tty_set_termios(struct tty_struct *tty, struct ktermios *old) +{ + struct ktermios *new = tty->termios; + int old_baud_rate = tty_termios_baud_rate(old); + int new_baud_rate = tty_termios_baud_rate(new); + + u8 baud, data_bits, stop_bits, parity, x_on, x_off; + u16 changes = 0; + + struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; + + BT_DBG("tty %p termios %p", tty, old); + + if (!dev || !dev->dlc || !dev->dlc->session) + return; + + /* Handle turning off CRTSCTS */ + if ((old->c_cflag & CRTSCTS) && !(new->c_cflag & CRTSCTS)) + BT_DBG("Turning off CRTSCTS unsupported"); + + /* Parity on/off and when on, odd/even */ + if (((old->c_cflag & PARENB) != (new->c_cflag & PARENB)) || + ((old->c_cflag & PARODD) != (new->c_cflag & PARODD)) ) { + changes |= RFCOMM_RPN_PM_PARITY; + BT_DBG("Parity change detected."); + } + + /* Mark and space parity are not supported! */ + if (new->c_cflag & PARENB) { + if (new->c_cflag & PARODD) { + BT_DBG("Parity is ODD"); + parity = RFCOMM_RPN_PARITY_ODD; + } else { + BT_DBG("Parity is EVEN"); + parity = RFCOMM_RPN_PARITY_EVEN; + } + } else { + BT_DBG("Parity is OFF"); + parity = RFCOMM_RPN_PARITY_NONE; + } + + /* Setting the x_on / x_off characters */ + if (old->c_cc[VSTOP] != new->c_cc[VSTOP]) { + BT_DBG("XOFF custom"); + x_on = new->c_cc[VSTOP]; + changes |= RFCOMM_RPN_PM_XON; + } else { + BT_DBG("XOFF default"); + x_on = RFCOMM_RPN_XON_CHAR; + } + + if (old->c_cc[VSTART] != new->c_cc[VSTART]) { + BT_DBG("XON custom"); + x_off = new->c_cc[VSTART]; + changes |= RFCOMM_RPN_PM_XOFF; + } else { + BT_DBG("XON default"); + x_off = RFCOMM_RPN_XOFF_CHAR; + } + + /* Handle setting of stop bits */ + if ((old->c_cflag & CSTOPB) != (new->c_cflag & CSTOPB)) + changes |= RFCOMM_RPN_PM_STOP; + + /* POSIX does not support 1.5 stop bits and RFCOMM does not + * support 2 stop bits. So a request for 2 stop bits gets + * translated to 1.5 stop bits */ + if (new->c_cflag & CSTOPB) { + stop_bits = RFCOMM_RPN_STOP_15; + } else { + stop_bits = RFCOMM_RPN_STOP_1; + } + + /* Handle number of data bits [5-8] */ + if ((old->c_cflag & CSIZE) != (new->c_cflag & CSIZE)) + changes |= RFCOMM_RPN_PM_DATA; + + switch (new->c_cflag & CSIZE) { + case CS5: + data_bits = RFCOMM_RPN_DATA_5; + break; + case CS6: + data_bits = RFCOMM_RPN_DATA_6; + break; + case CS7: + data_bits = RFCOMM_RPN_DATA_7; + break; + case CS8: + data_bits = RFCOMM_RPN_DATA_8; + break; + default: + data_bits = RFCOMM_RPN_DATA_8; + break; + } + + /* Handle baudrate settings */ + if (old_baud_rate != new_baud_rate) + changes |= RFCOMM_RPN_PM_BITRATE; + + switch (new_baud_rate) { + case 2400: + baud = RFCOMM_RPN_BR_2400; + break; + case 4800: + baud = RFCOMM_RPN_BR_4800; + break; + case 7200: + baud = RFCOMM_RPN_BR_7200; + break; + case 9600: + baud = RFCOMM_RPN_BR_9600; + break; + case 19200: + baud = RFCOMM_RPN_BR_19200; + break; + case 38400: + baud = RFCOMM_RPN_BR_38400; + break; + case 57600: + baud = RFCOMM_RPN_BR_57600; + break; + case 115200: + baud = RFCOMM_RPN_BR_115200; + break; + case 230400: + baud = RFCOMM_RPN_BR_230400; + break; + default: + /* 9600 is standard accordinag to the RFCOMM specification */ + baud = RFCOMM_RPN_BR_9600; + break; + + } + + if (changes) + rfcomm_send_rpn(dev->dlc->session, 1, dev->dlc->dlci, baud, + data_bits, stop_bits, parity, + RFCOMM_RPN_FLOW_NONE, x_on, x_off, changes); + + return; +} + +static void rfcomm_tty_throttle(struct tty_struct *tty) +{ + struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; + + BT_DBG("tty %p dev %p", tty, dev); + + rfcomm_dlc_throttle(dev->dlc); +} + +static void rfcomm_tty_unthrottle(struct tty_struct *tty) +{ + struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; + + BT_DBG("tty %p dev %p", tty, dev); + + rfcomm_dlc_unthrottle(dev->dlc); +} + +static int rfcomm_tty_chars_in_buffer(struct tty_struct *tty) +{ + struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; + + BT_DBG("tty %p dev %p", tty, dev); + + if (!dev || !dev->dlc) + return 0; + + if (!skb_queue_empty(&dev->dlc->tx_queue)) + return dev->dlc->mtu; + + return 0; +} + +static void rfcomm_tty_flush_buffer(struct tty_struct *tty) +{ + struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; + + BT_DBG("tty %p dev %p", tty, dev); + + if (!dev || !dev->dlc) + return; + + skb_queue_purge(&dev->dlc->tx_queue); + tty_wakeup(tty); +} + +static void rfcomm_tty_send_xchar(struct tty_struct *tty, char ch) +{ + BT_DBG("tty %p ch %c", tty, ch); +} + +static void rfcomm_tty_wait_until_sent(struct tty_struct *tty, int timeout) +{ + BT_DBG("tty %p timeout %d", tty, timeout); +} + +static void rfcomm_tty_hangup(struct tty_struct *tty) +{ + struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; + + BT_DBG("tty %p dev %p", tty, dev); + + if (!dev) + return; + + rfcomm_tty_flush_buffer(tty); + + if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) { + if (rfcomm_dev_get(dev->id) == NULL) + return; + rfcomm_dev_del(dev); + rfcomm_dev_put(dev); + } +} + +static int rfcomm_tty_tiocmget(struct tty_struct *tty, struct file *filp) +{ + struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; + + BT_DBG("tty %p dev %p", tty, dev); + + return dev->modem_status; +} + +static int rfcomm_tty_tiocmset(struct tty_struct *tty, struct file *filp, unsigned int set, unsigned int clear) +{ + struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; + struct rfcomm_dlc *dlc = dev->dlc; + u8 v24_sig; + + BT_DBG("tty %p dev %p set 0x%02x clear 0x%02x", tty, dev, set, clear); + + rfcomm_dlc_get_modem_status(dlc, &v24_sig); + + if (set & TIOCM_DSR || set & TIOCM_DTR) + v24_sig |= RFCOMM_V24_RTC; + if (set & TIOCM_RTS || set & TIOCM_CTS) + v24_sig |= RFCOMM_V24_RTR; + if (set & TIOCM_RI) + v24_sig |= RFCOMM_V24_IC; + if (set & TIOCM_CD) + v24_sig |= RFCOMM_V24_DV; + + if (clear & TIOCM_DSR || clear & TIOCM_DTR) + v24_sig &= ~RFCOMM_V24_RTC; + if (clear & TIOCM_RTS || clear & TIOCM_CTS) + v24_sig &= ~RFCOMM_V24_RTR; + if (clear & TIOCM_RI) + v24_sig &= ~RFCOMM_V24_IC; + if (clear & TIOCM_CD) + v24_sig &= ~RFCOMM_V24_DV; + + rfcomm_dlc_set_modem_status(dlc, v24_sig); + + return 0; +} + +/* ---- TTY structure ---- */ + +static const struct tty_operations rfcomm_ops = { + .open = rfcomm_tty_open, + .close = rfcomm_tty_close, + .write = rfcomm_tty_write, + .write_room = rfcomm_tty_write_room, + .chars_in_buffer = rfcomm_tty_chars_in_buffer, + .flush_buffer = rfcomm_tty_flush_buffer, + .ioctl = rfcomm_tty_ioctl, + .throttle = rfcomm_tty_throttle, + .unthrottle = rfcomm_tty_unthrottle, + .set_termios = rfcomm_tty_set_termios, + .send_xchar = rfcomm_tty_send_xchar, + .hangup = rfcomm_tty_hangup, + .wait_until_sent = rfcomm_tty_wait_until_sent, + .tiocmget = rfcomm_tty_tiocmget, + .tiocmset = rfcomm_tty_tiocmset, +}; + +int rfcomm_init_ttys(void) +{ + rfcomm_tty_driver = alloc_tty_driver(RFCOMM_TTY_PORTS); + if (!rfcomm_tty_driver) + return -1; + + rfcomm_tty_driver->owner = THIS_MODULE; + rfcomm_tty_driver->driver_name = "rfcomm"; + rfcomm_tty_driver->name = "rfcomm"; + rfcomm_tty_driver->major = RFCOMM_TTY_MAJOR; + rfcomm_tty_driver->minor_start = RFCOMM_TTY_MINOR; + rfcomm_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; + rfcomm_tty_driver->subtype = SERIAL_TYPE_NORMAL; + rfcomm_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; + rfcomm_tty_driver->init_termios = tty_std_termios; + rfcomm_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; + rfcomm_tty_driver->init_termios.c_lflag &= ~ICANON; + tty_set_operations(rfcomm_tty_driver, &rfcomm_ops); + + if (tty_register_driver(rfcomm_tty_driver)) { + BT_ERR("Can't register RFCOMM TTY driver"); + put_tty_driver(rfcomm_tty_driver); + return -1; + } + + BT_INFO("RFCOMM TTY layer initialized"); + + return 0; +} + +void rfcomm_cleanup_ttys(void) +{ + tty_unregister_driver(rfcomm_tty_driver); + put_tty_driver(rfcomm_tty_driver); +} diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c new file mode 100644 index 0000000..0ec77b7 --- /dev/null +++ b/net/bluetooth/sco.c @@ -0,0 +1,1079 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + Copyright (C) 2000-2001 Qualcomm Incorporated + + Written 2000,2001 by Maxim Krasnyansky + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +/* Bluetooth SCO sockets. */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#define VERSION "0.6" + +static int disable_esco = 0; + +static const struct proto_ops sco_sock_ops; + +static struct bt_sock_list sco_sk_list = { + .lock = __RW_LOCK_UNLOCKED(sco_sk_list.lock) +}; + +static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent); +static void sco_chan_del(struct sock *sk, int err); + +static int sco_conn_del(struct hci_conn *conn, int err); + +static void sco_sock_close(struct sock *sk); +static void sco_sock_kill(struct sock *sk); + +/* ---- SCO timers ---- */ +static void sco_sock_timeout(unsigned long arg) +{ + struct sock *sk = (struct sock *) arg; + + BT_DBG("sock %p state %d", sk, sk->sk_state); + + bh_lock_sock(sk); + sk->sk_err = ETIMEDOUT; + sk->sk_state_change(sk); + bh_unlock_sock(sk); + + sco_sock_kill(sk); + sock_put(sk); +} + +static void sco_sock_set_timer(struct sock *sk, long timeout) +{ + BT_DBG("sock %p state %d timeout %ld", sk, sk->sk_state, timeout); + sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout); +} + +static void sco_sock_clear_timer(struct sock *sk) +{ + BT_DBG("sock %p state %d", sk, sk->sk_state); + sk_stop_timer(sk, &sk->sk_timer); +} + +/* ---- SCO connections ---- */ +static struct sco_conn *sco_conn_add(struct hci_conn *hcon, __u8 status) +{ + struct hci_dev *hdev = hcon->hdev; + struct sco_conn *conn = hcon->sco_data; + + if (conn || status) + return conn; + + conn = kzalloc(sizeof(struct sco_conn), GFP_ATOMIC); + if (!conn) + return NULL; + + spin_lock_init(&conn->lock); + + hcon->sco_data = conn; + conn->hcon = hcon; + + conn->src = &hdev->bdaddr; + conn->dst = &hcon->dst; + + if (hdev->sco_mtu > 0) + conn->mtu = hdev->sco_mtu; + else + conn->mtu = 60; + + BT_DBG("hcon %p conn %p", hcon, conn); + + return conn; +} + +static inline struct sock *sco_chan_get(struct sco_conn *conn) +{ + struct sock *sk = NULL; + sco_conn_lock(conn); + sk = conn->sk; + sco_conn_unlock(conn); + return sk; +} + +static int sco_conn_del(struct hci_conn *hcon, int err) +{ + struct sco_conn *conn; + struct sock *sk; + + if (!(conn = hcon->sco_data)) + return 0; + + BT_DBG("hcon %p conn %p, err %d", hcon, conn, err); + + /* Kill socket */ + if ((sk = sco_chan_get(conn))) { + bh_lock_sock(sk); + sco_sock_clear_timer(sk); + sco_chan_del(sk, err); + bh_unlock_sock(sk); + sco_sock_kill(sk); + } + + hcon->sco_data = NULL; + kfree(conn); + return 0; +} + +static inline int sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent) +{ + int err = 0; + + sco_conn_lock(conn); + if (conn->sk) { + err = -EBUSY; + } else { + __sco_chan_add(conn, sk, parent); + } + sco_conn_unlock(conn); + return err; +} + +static int sco_connect(struct sock *sk) +{ + bdaddr_t *src = &bt_sk(sk)->src; + bdaddr_t *dst = &bt_sk(sk)->dst; + struct sco_conn *conn; + struct hci_conn *hcon; + struct hci_dev *hdev; + int err, type; + + BT_DBG("%s -> %s", batostr(src), batostr(dst)); + + if (!(hdev = hci_get_route(dst, src))) + return -EHOSTUNREACH; + + hci_dev_lock_bh(hdev); + + err = -ENOMEM; + + if (lmp_esco_capable(hdev) && !disable_esco) + type = ESCO_LINK; + else + type = SCO_LINK; + + hcon = hci_connect(hdev, type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING); + if (!hcon) + goto done; + + conn = sco_conn_add(hcon, 0); + if (!conn) { + hci_conn_put(hcon); + goto done; + } + + /* Update source addr of the socket */ + bacpy(src, conn->src); + + err = sco_chan_add(conn, sk, NULL); + if (err) + goto done; + + if (hcon->state == BT_CONNECTED) { + sco_sock_clear_timer(sk); + sk->sk_state = BT_CONNECTED; + } else { + sk->sk_state = BT_CONNECT; + sco_sock_set_timer(sk, sk->sk_sndtimeo); + } + +done: + hci_dev_unlock_bh(hdev); + hci_dev_put(hdev); + return err; +} + +static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len) +{ + struct sco_conn *conn = sco_pi(sk)->conn; + struct sk_buff *skb; + int err, count; + + /* Check outgoing MTU */ + if (len > conn->mtu) + return -EINVAL; + + BT_DBG("sk %p len %d", sk, len); + + count = min_t(unsigned int, conn->mtu, len); + if (!(skb = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err))) + return err; + + if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) { + err = -EFAULT; + goto fail; + } + + if ((err = hci_send_sco(conn->hcon, skb)) < 0) + return err; + + return count; + +fail: + kfree_skb(skb); + return err; +} + +static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb) +{ + struct sock *sk = sco_chan_get(conn); + + if (!sk) + goto drop; + + BT_DBG("sk %p len %d", sk, skb->len); + + if (sk->sk_state != BT_CONNECTED) + goto drop; + + if (!sock_queue_rcv_skb(sk, skb)) + return; + +drop: + kfree_skb(skb); + return; +} + +/* -------- Socket interface ---------- */ +static struct sock *__sco_get_sock_by_addr(bdaddr_t *ba) +{ + struct sock *sk; + struct hlist_node *node; + + sk_for_each(sk, node, &sco_sk_list.head) + if (!bacmp(&bt_sk(sk)->src, ba)) + goto found; + sk = NULL; +found: + return sk; +} + +/* Find socket listening on source bdaddr. + * Returns closest match. + */ +static struct sock *sco_get_sock_listen(bdaddr_t *src) +{ + struct sock *sk = NULL, *sk1 = NULL; + struct hlist_node *node; + + read_lock(&sco_sk_list.lock); + + sk_for_each(sk, node, &sco_sk_list.head) { + if (sk->sk_state != BT_LISTEN) + continue; + + /* Exact match. */ + if (!bacmp(&bt_sk(sk)->src, src)) + break; + + /* Closest match */ + if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) + sk1 = sk; + } + + read_unlock(&sco_sk_list.lock); + + return node ? sk : sk1; +} + +static void sco_sock_destruct(struct sock *sk) +{ + BT_DBG("sk %p", sk); + + skb_queue_purge(&sk->sk_receive_queue); + skb_queue_purge(&sk->sk_write_queue); +} + +static void sco_sock_cleanup_listen(struct sock *parent) +{ + struct sock *sk; + + BT_DBG("parent %p", parent); + + /* Close not yet accepted channels */ + while ((sk = bt_accept_dequeue(parent, NULL))) { + sco_sock_close(sk); + sco_sock_kill(sk); + } + + parent->sk_state = BT_CLOSED; + sock_set_flag(parent, SOCK_ZAPPED); +} + +/* Kill socket (only if zapped and orphan) + * Must be called on unlocked socket. + */ +static void sco_sock_kill(struct sock *sk) +{ + if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) + return; + + BT_DBG("sk %p state %d", sk, sk->sk_state); + + /* Kill poor orphan */ + bt_sock_unlink(&sco_sk_list, sk); + sock_set_flag(sk, SOCK_DEAD); + sock_put(sk); +} + +static void __sco_sock_close(struct sock *sk) +{ + BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket); + + switch (sk->sk_state) { + case BT_LISTEN: + sco_sock_cleanup_listen(sk); + break; + + case BT_CONNECTED: + case BT_CONFIG: + case BT_CONNECT: + case BT_DISCONN: + sco_chan_del(sk, ECONNRESET); + break; + + default: + sock_set_flag(sk, SOCK_ZAPPED); + break; + } +} + +/* Must be called on unlocked socket. */ +static void sco_sock_close(struct sock *sk) +{ + sco_sock_clear_timer(sk); + lock_sock(sk); + __sco_sock_close(sk); + release_sock(sk); + sco_sock_kill(sk); +} + +static void sco_sock_init(struct sock *sk, struct sock *parent) +{ + BT_DBG("sk %p", sk); + + if (parent) + sk->sk_type = parent->sk_type; +} + +static struct proto sco_proto = { + .name = "SCO", + .owner = THIS_MODULE, + .obj_size = sizeof(struct sco_pinfo) +}; + +static struct sock *sco_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio) +{ + struct sock *sk; + + sk = sk_alloc(net, PF_BLUETOOTH, prio, &sco_proto); + if (!sk) + return NULL; + + sock_init_data(sock, sk); + INIT_LIST_HEAD(&bt_sk(sk)->accept_q); + + sk->sk_destruct = sco_sock_destruct; + sk->sk_sndtimeo = SCO_CONN_TIMEOUT; + + sock_reset_flag(sk, SOCK_ZAPPED); + + sk->sk_protocol = proto; + sk->sk_state = BT_OPEN; + + setup_timer(&sk->sk_timer, sco_sock_timeout, (unsigned long)sk); + + bt_sock_link(&sco_sk_list, sk); + return sk; +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32)) +static int sco_sock_create(struct net *net, struct socket *sock, int protocol, + int kern) +#else +static int sco_sock_create(struct net *net, struct socket *sock, int protocol) +#endif +{ + struct sock *sk; + + BT_DBG("sock %p", sock); + + sock->state = SS_UNCONNECTED; + + if (sock->type != SOCK_SEQPACKET) + return -ESOCKTNOSUPPORT; + + sock->ops = &sco_sock_ops; + + sk = sco_sock_alloc(net, sock, protocol, GFP_ATOMIC); + if (!sk) + return -ENOMEM; + + sco_sock_init(sk, NULL); + return 0; +} + +static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) +{ + struct sockaddr_sco *sa = (struct sockaddr_sco *) addr; + struct sock *sk = sock->sk; + bdaddr_t *src = &sa->sco_bdaddr; + int err = 0; + + BT_DBG("sk %p %s", sk, batostr(&sa->sco_bdaddr)); + + if (!addr || addr->sa_family != AF_BLUETOOTH) + return -EINVAL; + + lock_sock(sk); + + if (sk->sk_state != BT_OPEN) { + err = -EBADFD; + goto done; + } + + write_lock_bh(&sco_sk_list.lock); + + if (bacmp(src, BDADDR_ANY) && __sco_get_sock_by_addr(src)) { + err = -EADDRINUSE; + } else { + /* Save source address */ + bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr); + sk->sk_state = BT_BOUND; + } + + write_unlock_bh(&sco_sk_list.lock); + +done: + release_sock(sk); + return err; +} + +static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) +{ + struct sockaddr_sco *sa = (struct sockaddr_sco *) addr; + struct sock *sk = sock->sk; + int err = 0; + + + BT_DBG("sk %p", sk); + + if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_sco)) + return -EINVAL; + + if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) + return -EBADFD; + + if (sk->sk_type != SOCK_SEQPACKET) + return -EINVAL; + + lock_sock(sk); + + /* Set destination address and psm */ + bacpy(&bt_sk(sk)->dst, &sa->sco_bdaddr); + + if ((err = sco_connect(sk))) + goto done; + + err = bt_sock_wait_state(sk, BT_CONNECTED, + sock_sndtimeo(sk, flags & O_NONBLOCK)); + +done: + release_sock(sk); + return err; +} + +static int sco_sock_listen(struct socket *sock, int backlog) +{ + struct sock *sk = sock->sk; + int err = 0; + + BT_DBG("sk %p backlog %d", sk, backlog); + + lock_sock(sk); + + if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) { + err = -EBADFD; + goto done; + } + + sk->sk_max_ack_backlog = backlog; + sk->sk_ack_backlog = 0; + sk->sk_state = BT_LISTEN; + +done: + release_sock(sk); + return err; +} + +static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flags) +{ + DECLARE_WAITQUEUE(wait, current); + struct sock *sk = sock->sk, *ch; + long timeo; + int err = 0; + + lock_sock(sk); + + if (sk->sk_state != BT_LISTEN) { + err = -EBADFD; + goto done; + } + + timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); + + BT_DBG("sk %p timeo %ld", sk, timeo); + + /* Wait for an incoming connection. (wake-one). */ + add_wait_queue_exclusive(sk->sk_sleep, &wait); + while (!(ch = bt_accept_dequeue(sk, newsock))) { + set_current_state(TASK_INTERRUPTIBLE); + if (!timeo) { + err = -EAGAIN; + break; + } + + release_sock(sk); + timeo = schedule_timeout(timeo); + lock_sock(sk); + + if (sk->sk_state != BT_LISTEN) { + err = -EBADFD; + break; + } + + if (signal_pending(current)) { + err = sock_intr_errno(timeo); + break; + } + } + set_current_state(TASK_RUNNING); + remove_wait_queue(sk->sk_sleep, &wait); + + if (err) + goto done; + + newsock->state = SS_CONNECTED; + + BT_DBG("new socket %p", ch); + +done: + release_sock(sk); + return err; +} + +static int sco_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer) +{ + struct sockaddr_sco *sa = (struct sockaddr_sco *) addr; + struct sock *sk = sock->sk; + + BT_DBG("sock %p, sk %p", sock, sk); + + addr->sa_family = AF_BLUETOOTH; + *len = sizeof(struct sockaddr_sco); + + if (peer) + bacpy(&sa->sco_bdaddr, &bt_sk(sk)->dst); + else + bacpy(&sa->sco_bdaddr, &bt_sk(sk)->src); + + return 0; +} + +static int sco_sock_sendmsg(struct kiocb *iocb, struct socket *sock, + struct msghdr *msg, size_t len) +{ + struct sock *sk = sock->sk; + int err = 0; + + BT_DBG("sock %p, sk %p", sock, sk); + + err = sock_error(sk); + if (err) + return err; + + if (msg->msg_flags & MSG_OOB) + return -EOPNOTSUPP; + + lock_sock(sk); + + if (sk->sk_state == BT_CONNECTED) + err = sco_send_frame(sk, msg, len); + else + err = -ENOTCONN; + + release_sock(sk); + return err; +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,31)) +static int sco_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) +#else +static int sco_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen) +#endif +{ + struct sock *sk = sock->sk; + int err = 0; + + BT_DBG("sk %p", sk); + + lock_sock(sk); + + switch (optname) { + default: + err = -ENOPROTOOPT; + break; + } + + release_sock(sk); + return err; +} + +static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen) +{ + struct sock *sk = sock->sk; + struct sco_options opts; + struct sco_conninfo cinfo; + int len, err = 0; + + BT_DBG("sk %p", sk); + + if (get_user(len, optlen)) + return -EFAULT; + + lock_sock(sk); + + switch (optname) { + case SCO_OPTIONS: + if (sk->sk_state != BT_CONNECTED) { + err = -ENOTCONN; + break; + } + + opts.mtu = sco_pi(sk)->conn->mtu; + + BT_DBG("mtu %d", opts.mtu); + + len = min_t(unsigned int, len, sizeof(opts)); + if (copy_to_user(optval, (char *)&opts, len)) + err = -EFAULT; + + break; + + case SCO_CONNINFO: + if (sk->sk_state != BT_CONNECTED) { + err = -ENOTCONN; + break; + } + + cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle; + memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3); + + len = min_t(unsigned int, len, sizeof(cinfo)); + if (copy_to_user(optval, (char *)&cinfo, len)) + err = -EFAULT; + + break; + + default: + err = -ENOPROTOOPT; + break; + } + + release_sock(sk); + return err; +} + +static int sco_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) +{ + struct sock *sk = sock->sk; + int len, err = 0; + + BT_DBG("sk %p", sk); + + if (level == SOL_SCO) + return sco_sock_getsockopt_old(sock, optname, optval, optlen); + + if (get_user(len, optlen)) + return -EFAULT; + + lock_sock(sk); + + switch (optname) { + default: + err = -ENOPROTOOPT; + break; + } + + release_sock(sk); + return err; +} + +static int sco_sock_shutdown(struct socket *sock, int how) +{ + struct sock *sk = sock->sk; + int err = 0; + + BT_DBG("sock %p, sk %p", sock, sk); + + if (!sk) + return 0; + + lock_sock(sk); + if (!sk->sk_shutdown) { + sk->sk_shutdown = SHUTDOWN_MASK; + sco_sock_clear_timer(sk); + __sco_sock_close(sk); + + if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) + err = bt_sock_wait_state(sk, BT_CLOSED, + sk->sk_lingertime); + } + release_sock(sk); + return err; +} + +static int sco_sock_release(struct socket *sock) +{ + struct sock *sk = sock->sk; + int err = 0; + + BT_DBG("sock %p, sk %p", sock, sk); + + if (!sk) + return 0; + + sco_sock_close(sk); + + if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) { + lock_sock(sk); + err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); + release_sock(sk); + } + + sock_orphan(sk); + sco_sock_kill(sk); + return err; +} + +static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent) +{ + BT_DBG("conn %p", conn); + + sco_pi(sk)->conn = conn; + conn->sk = sk; + + if (parent) + bt_accept_enqueue(parent, sk); +} + +/* Delete channel. + * Must be called on the locked socket. */ +static void sco_chan_del(struct sock *sk, int err) +{ + struct sco_conn *conn; + + conn = sco_pi(sk)->conn; + + BT_DBG("sk %p, conn %p, err %d", sk, conn, err); + + if (conn) { + sco_conn_lock(conn); + conn->sk = NULL; + sco_pi(sk)->conn = NULL; + sco_conn_unlock(conn); + hci_conn_put(conn->hcon); + } + + sk->sk_state = BT_CLOSED; + sk->sk_err = err; + sk->sk_state_change(sk); + + sock_set_flag(sk, SOCK_ZAPPED); +} + +static void sco_conn_ready(struct sco_conn *conn) +{ + struct sock *parent, *sk; + + BT_DBG("conn %p", conn); + + sco_conn_lock(conn); + + if ((sk = conn->sk)) { + sco_sock_clear_timer(sk); + bh_lock_sock(sk); + sk->sk_state = BT_CONNECTED; + sk->sk_state_change(sk); + bh_unlock_sock(sk); + } else { + parent = sco_get_sock_listen(conn->src); + if (!parent) + goto done; + + bh_lock_sock(parent); + + sk = sco_sock_alloc(sock_net(parent), NULL, BTPROTO_SCO, GFP_ATOMIC); + if (!sk) { + bh_unlock_sock(parent); + goto done; + } + + sco_sock_init(sk, parent); + + bacpy(&bt_sk(sk)->src, conn->src); + bacpy(&bt_sk(sk)->dst, conn->dst); + + hci_conn_hold(conn->hcon); + __sco_chan_add(conn, sk, parent); + + sk->sk_state = BT_CONNECTED; + + /* Wake up parent */ + parent->sk_data_ready(parent, 1); + + bh_unlock_sock(parent); + } + +done: + sco_conn_unlock(conn); +} + +/* ----- SCO interface with lower layer (HCI) ----- */ +static int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type) +{ + register struct sock *sk; + struct hlist_node *node; + int lm = 0; + + if (type != SCO_LINK && type != ESCO_LINK) + return 0; + + BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr)); + + /* Find listening sockets */ + read_lock(&sco_sk_list.lock); + sk_for_each(sk, node, &sco_sk_list.head) { + if (sk->sk_state != BT_LISTEN) + continue; + + if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr) || + !bacmp(&bt_sk(sk)->src, BDADDR_ANY)) { + lm |= HCI_LM_ACCEPT; + break; + } + } + read_unlock(&sco_sk_list.lock); + + return lm; +} + +static int sco_connect_cfm(struct hci_conn *hcon, __u8 status) +{ + BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status); + + if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) + return 0; + + if (!status) { + struct sco_conn *conn; + + conn = sco_conn_add(hcon, status); + if (conn) + sco_conn_ready(conn); + } else + sco_conn_del(hcon, bt_err(status)); + + return 0; +} + +static int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason) +{ + BT_DBG("hcon %p reason %d", hcon, reason); + + if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) + return 0; + + sco_conn_del(hcon, bt_err(reason)); + + return 0; +} + +static int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb) +{ + struct sco_conn *conn = hcon->sco_data; + + if (!conn) + goto drop; + + BT_DBG("conn %p len %d", conn, skb->len); + + if (skb->len) { + sco_recv_frame(conn, skb); + return 0; + } + +drop: + kfree_skb(skb); + return 0; +} + +static ssize_t sco_sysfs_show(struct class *dev, + struct class_attribute *attr, + char *buf) +{ + struct sock *sk; + struct hlist_node *node; + char *str = buf; + + read_lock_bh(&sco_sk_list.lock); + + sk_for_each(sk, node, &sco_sk_list.head) { + str += sprintf(str, "%s %s %d\n", + batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), + sk->sk_state); + } + + read_unlock_bh(&sco_sk_list.lock); + + return (str - buf); +} + +static CLASS_ATTR(sco, S_IRUGO, sco_sysfs_show, NULL); + +static const struct proto_ops sco_sock_ops = { + .family = PF_BLUETOOTH, + .owner = THIS_MODULE, + .release = sco_sock_release, + .bind = sco_sock_bind, + .connect = sco_sock_connect, + .listen = sco_sock_listen, + .accept = sco_sock_accept, + .getname = sco_sock_getname, + .sendmsg = sco_sock_sendmsg, + .recvmsg = bt_sock_recvmsg, + .poll = bt_sock_poll, + .ioctl = bt_sock_ioctl, + .mmap = sock_no_mmap, + .socketpair = sock_no_socketpair, + .shutdown = sco_sock_shutdown, + .setsockopt = sco_sock_setsockopt, + .getsockopt = sco_sock_getsockopt +}; + +static const struct net_proto_family sco_sock_family_ops = { + .family = PF_BLUETOOTH, + .owner = THIS_MODULE, + .create = sco_sock_create, +}; + +static struct hci_proto sco_hci_proto = { + .name = "SCO", + .id = HCI_PROTO_SCO, + .connect_ind = sco_connect_ind, + .connect_cfm = sco_connect_cfm, + .disconn_cfm = sco_disconn_cfm, + .recv_scodata = sco_recv_scodata +}; + +static int __init sco_init(void) +{ + int err; + + err = proto_register(&sco_proto, 0); + if (err < 0) + return err; + + err = bt_sock_register(BTPROTO_SCO, &sco_sock_family_ops); + if (err < 0) { + BT_ERR("SCO socket registration failed"); + goto error; + } + + err = hci_register_proto(&sco_hci_proto); + if (err < 0) { + BT_ERR("SCO protocol registration failed"); + bt_sock_unregister(BTPROTO_SCO); + goto error; + } + + if (class_create_file(bt_class, &class_attr_sco) < 0) + BT_ERR("Failed to create SCO info file"); + + BT_INFO("SCO (Voice Link) ver %s", VERSION); + BT_INFO("SCO socket layer initialized"); + + return 0; + +error: + proto_unregister(&sco_proto); + return err; +} + +static void __exit sco_exit(void) +{ + class_remove_file(bt_class, &class_attr_sco); + + if (bt_sock_unregister(BTPROTO_SCO) < 0) + BT_ERR("SCO socket unregistration failed"); + + if (hci_unregister_proto(&sco_hci_proto) < 0) + BT_ERR("SCO protocol unregistration failed"); + + proto_unregister(&sco_proto); +} + +module_init(sco_init); +module_exit(sco_exit); + +module_param(disable_esco, bool, 0644); +MODULE_PARM_DESC(disable_esco, "Disable eSCO connection creation"); + +MODULE_AUTHOR("Marcel Holtmann "); +MODULE_DESCRIPTION("Bluetooth SCO ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("bt-proto-2"); diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile new file mode 100644 index 0000000..0442029 --- /dev/null +++ b/net/mac80211/Makefile @@ -0,0 +1,56 @@ +obj-$(CONFIG_MAC80211) += mac80211.o + +# mac80211 objects +mac80211-y := \ + main.o status.o \ + sta_info.o \ + wep.o \ + wpa.o \ + scan.o offchannel.o \ + ht.o agg-tx.o agg-rx.o \ + ibss.o \ + mlme.o work.o \ + iface.o \ + rate.o \ + michael.o \ + tkip.o \ + aes_ccm.o \ + aes_cmac.o \ + cfg.o \ + rx.o \ + spectmgmt.o \ + tx.o \ + key.o \ + util.o \ + wme.o \ + event.o + +mac80211-$(CONFIG_MAC80211_LEDS) += led.o +mac80211-$(CONFIG_MAC80211_DEBUGFS) += \ + debugfs.o \ + debugfs_sta.o \ + debugfs_netdev.o \ + debugfs_key.o + +mac80211-$(CONFIG_MAC80211_MESH) += \ + mesh.o \ + mesh_pathtbl.o \ + mesh_plink.o \ + mesh_hwmp.o + +mac80211-$(CONFIG_PM) += pm.o + +mac80211-$(CONFIG_MAC80211_DRIVER_API_TRACER) += driver-trace.o +CFLAGS_driver-trace.o := -I$(src) + +# objects for PID algorithm +rc80211_pid-y := rc80211_pid_algo.o +rc80211_pid-$(CONFIG_MAC80211_DEBUGFS) += rc80211_pid_debugfs.o + +rc80211_minstrel-y := rc80211_minstrel.o +rc80211_minstrel-$(CONFIG_MAC80211_DEBUGFS) += rc80211_minstrel_debugfs.o + +mac80211-$(CONFIG_MAC80211_RC_PID) += $(rc80211_pid-y) +mac80211-$(CONFIG_MAC80211_RC_MINSTREL) += $(rc80211_minstrel-y) + +ccflags-y += -D__CHECK_ENDIAN__ diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c new file mode 100644 index 0000000..a87cb3b --- /dev/null +++ b/net/mac80211/aes_ccm.c @@ -0,0 +1,154 @@ +/* + * Copyright 2003-2004, Instant802 Networks, Inc. + * Copyright 2005-2006, Devicescape Software, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include + +#include +#include "key.h" +#include "aes_ccm.h" + +static void aes_ccm_prepare(struct crypto_cipher *tfm, u8 *scratch, u8 *a) +{ + int i; + u8 *b_0, *aad, *b, *s_0; + + b_0 = scratch + 3 * AES_BLOCK_LEN; + aad = scratch + 4 * AES_BLOCK_LEN; + b = scratch; + s_0 = scratch + AES_BLOCK_LEN; + + crypto_cipher_encrypt_one(tfm, b, b_0); + + /* Extra Authenticate-only data (always two AES blocks) */ + for (i = 0; i < AES_BLOCK_LEN; i++) + aad[i] ^= b[i]; + crypto_cipher_encrypt_one(tfm, b, aad); + + aad += AES_BLOCK_LEN; + + for (i = 0; i < AES_BLOCK_LEN; i++) + aad[i] ^= b[i]; + crypto_cipher_encrypt_one(tfm, a, aad); + + /* Mask out bits from auth-only-b_0 */ + b_0[0] &= 0x07; + + /* S_0 is used to encrypt T (= MIC) */ + b_0[14] = 0; + b_0[15] = 0; + crypto_cipher_encrypt_one(tfm, s_0, b_0); +} + + +void ieee80211_aes_ccm_encrypt(struct crypto_cipher *tfm, u8 *scratch, + u8 *data, size_t data_len, + u8 *cdata, u8 *mic) +{ + int i, j, last_len, num_blocks; + u8 *pos, *cpos, *b, *s_0, *e, *b_0, *aad; + + b = scratch; + s_0 = scratch + AES_BLOCK_LEN; + e = scratch + 2 * AES_BLOCK_LEN; + b_0 = scratch + 3 * AES_BLOCK_LEN; + aad = scratch + 4 * AES_BLOCK_LEN; + + num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN); + last_len = data_len % AES_BLOCK_LEN; + aes_ccm_prepare(tfm, scratch, b); + + /* Process payload blocks */ + pos = data; + cpos = cdata; + for (j = 1; j <= num_blocks; j++) { + int blen = (j == num_blocks && last_len) ? + last_len : AES_BLOCK_LEN; + + /* Authentication followed by encryption */ + for (i = 0; i < blen; i++) + b[i] ^= pos[i]; + crypto_cipher_encrypt_one(tfm, b, b); + + b_0[14] = (j >> 8) & 0xff; + b_0[15] = j & 0xff; + crypto_cipher_encrypt_one(tfm, e, b_0); + for (i = 0; i < blen; i++) + *cpos++ = *pos++ ^ e[i]; + } + + for (i = 0; i < CCMP_MIC_LEN; i++) + mic[i] = b[i] ^ s_0[i]; +} + + +int ieee80211_aes_ccm_decrypt(struct crypto_cipher *tfm, u8 *scratch, + u8 *cdata, size_t data_len, u8 *mic, u8 *data) +{ + int i, j, last_len, num_blocks; + u8 *pos, *cpos, *b, *s_0, *a, *b_0, *aad; + + b = scratch; + s_0 = scratch + AES_BLOCK_LEN; + a = scratch + 2 * AES_BLOCK_LEN; + b_0 = scratch + 3 * AES_BLOCK_LEN; + aad = scratch + 4 * AES_BLOCK_LEN; + + num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN); + last_len = data_len % AES_BLOCK_LEN; + aes_ccm_prepare(tfm, scratch, a); + + /* Process payload blocks */ + cpos = cdata; + pos = data; + for (j = 1; j <= num_blocks; j++) { + int blen = (j == num_blocks && last_len) ? + last_len : AES_BLOCK_LEN; + + /* Decryption followed by authentication */ + b_0[14] = (j >> 8) & 0xff; + b_0[15] = j & 0xff; + crypto_cipher_encrypt_one(tfm, b, b_0); + for (i = 0; i < blen; i++) { + *pos = *cpos++ ^ b[i]; + a[i] ^= *pos++; + } + crypto_cipher_encrypt_one(tfm, a, a); + } + + for (i = 0; i < CCMP_MIC_LEN; i++) { + if ((mic[i] ^ s_0[i]) != a[i]) + return -1; + } + + return 0; +} + + +struct crypto_cipher *ieee80211_aes_key_setup_encrypt(const u8 key[]) +{ + struct crypto_cipher *tfm; + + tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) + return NULL; + + crypto_cipher_setkey(tfm, key, ALG_CCMP_KEY_LEN); + + return tfm; +} + + +void ieee80211_aes_key_free(struct crypto_cipher *tfm) +{ + if (tfm) + crypto_free_cipher(tfm); +} diff --git a/net/mac80211/aes_ccm.h b/net/mac80211/aes_ccm.h new file mode 100644 index 0000000..6e7820e --- /dev/null +++ b/net/mac80211/aes_ccm.h @@ -0,0 +1,26 @@ +/* + * Copyright 2003-2004, Instant802 Networks, Inc. + * Copyright 2006, Devicescape Software, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef AES_CCM_H +#define AES_CCM_H + +#include + +#define AES_BLOCK_LEN 16 + +struct crypto_cipher *ieee80211_aes_key_setup_encrypt(const u8 key[]); +void ieee80211_aes_ccm_encrypt(struct crypto_cipher *tfm, u8 *scratch, + u8 *data, size_t data_len, + u8 *cdata, u8 *mic); +int ieee80211_aes_ccm_decrypt(struct crypto_cipher *tfm, u8 *scratch, + u8 *cdata, size_t data_len, + u8 *mic, u8 *data); +void ieee80211_aes_key_free(struct crypto_cipher *tfm); + +#endif /* AES_CCM_H */ diff --git a/net/mac80211/aes_cmac.c b/net/mac80211/aes_cmac.c new file mode 100644 index 0000000..3d097b3 --- /dev/null +++ b/net/mac80211/aes_cmac.c @@ -0,0 +1,135 @@ +/* + * AES-128-CMAC with TLen 16 for IEEE 802.11w BIP + * Copyright 2008, Jouni Malinen + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include + +#include +#include "key.h" +#include "aes_cmac.h" + +#define AES_BLOCK_SIZE 16 +#define AES_CMAC_KEY_LEN 16 +#define CMAC_TLEN 8 /* CMAC TLen = 64 bits (8 octets) */ +#define AAD_LEN 20 + + +static void gf_mulx(u8 *pad) +{ + int i, carry; + + carry = pad[0] & 0x80; + for (i = 0; i < AES_BLOCK_SIZE - 1; i++) + pad[i] = (pad[i] << 1) | (pad[i + 1] >> 7); + pad[AES_BLOCK_SIZE - 1] <<= 1; + if (carry) + pad[AES_BLOCK_SIZE - 1] ^= 0x87; +} + + +static void aes_128_cmac_vector(struct crypto_cipher *tfm, u8 *scratch, + size_t num_elem, + const u8 *addr[], const size_t *len, u8 *mac) +{ + u8 *cbc, *pad; + const u8 *pos, *end; + size_t i, e, left, total_len; + + cbc = scratch; + pad = scratch + AES_BLOCK_SIZE; + + memset(cbc, 0, AES_BLOCK_SIZE); + + total_len = 0; + for (e = 0; e < num_elem; e++) + total_len += len[e]; + left = total_len; + + e = 0; + pos = addr[0]; + end = pos + len[0]; + + while (left >= AES_BLOCK_SIZE) { + for (i = 0; i < AES_BLOCK_SIZE; i++) { + cbc[i] ^= *pos++; + if (pos >= end) { + e++; + pos = addr[e]; + end = pos + len[e]; + } + } + if (left > AES_BLOCK_SIZE) + crypto_cipher_encrypt_one(tfm, cbc, cbc); + left -= AES_BLOCK_SIZE; + } + + memset(pad, 0, AES_BLOCK_SIZE); + crypto_cipher_encrypt_one(tfm, pad, pad); + gf_mulx(pad); + + if (left || total_len == 0) { + for (i = 0; i < left; i++) { + cbc[i] ^= *pos++; + if (pos >= end) { + e++; + pos = addr[e]; + end = pos + len[e]; + } + } + cbc[left] ^= 0x80; + gf_mulx(pad); + } + + for (i = 0; i < AES_BLOCK_SIZE; i++) + pad[i] ^= cbc[i]; + crypto_cipher_encrypt_one(tfm, pad, pad); + memcpy(mac, pad, CMAC_TLEN); +} + + +void ieee80211_aes_cmac(struct crypto_cipher *tfm, u8 *scratch, const u8 *aad, + const u8 *data, size_t data_len, u8 *mic) +{ + const u8 *addr[3]; + size_t len[3]; + u8 zero[CMAC_TLEN]; + + memset(zero, 0, CMAC_TLEN); + addr[0] = aad; + len[0] = AAD_LEN; + addr[1] = data; + len[1] = data_len - CMAC_TLEN; + addr[2] = zero; + len[2] = CMAC_TLEN; + + aes_128_cmac_vector(tfm, scratch, 3, addr, len, mic); +} + + +struct crypto_cipher * ieee80211_aes_cmac_key_setup(const u8 key[]) +{ + struct crypto_cipher *tfm; + + tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) + return NULL; + + crypto_cipher_setkey(tfm, key, AES_CMAC_KEY_LEN); + + return tfm; +} + + +void ieee80211_aes_cmac_key_free(struct crypto_cipher *tfm) +{ + if (tfm) + crypto_free_cipher(tfm); +} diff --git a/net/mac80211/aes_cmac.h b/net/mac80211/aes_cmac.h new file mode 100644 index 0000000..0eb9a48 --- /dev/null +++ b/net/mac80211/aes_cmac.h @@ -0,0 +1,19 @@ +/* + * Copyright 2008, Jouni Malinen + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef AES_CMAC_H +#define AES_CMAC_H + +#include + +struct crypto_cipher * ieee80211_aes_cmac_key_setup(const u8 key[]); +void ieee80211_aes_cmac(struct crypto_cipher *tfm, u8 *scratch, const u8 *aad, + const u8 *data, size_t data_len, u8 *mic); +void ieee80211_aes_cmac_key_free(struct crypto_cipher *tfm); + +#endif /* AES_CMAC_H */ diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c new file mode 100644 index 0000000..a978e66 --- /dev/null +++ b/net/mac80211/agg-rx.c @@ -0,0 +1,310 @@ +/* + * HT handling + * + * Copyright 2003, Jouni Malinen + * Copyright 2002-2005, Instant802 Networks, Inc. + * Copyright 2005-2006, Devicescape Software, Inc. + * Copyright 2006-2007 Jiri Benc + * Copyright 2007, Michael Wu + * Copyright 2007-2008, Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include "ieee80211_i.h" +#include "driver-ops.h" + +void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, + u16 initiator, u16 reason) +{ + struct ieee80211_local *local = sta->local; + int i; + + /* check if TID is in operational state */ + spin_lock_bh(&sta->lock); + if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL) { + spin_unlock_bh(&sta->lock); + return; + } + + sta->ampdu_mlme.tid_state_rx[tid] = + HT_AGG_STATE_REQ_STOP_BA_MSK | + (initiator << HT_AGG_STATE_INITIATOR_SHIFT); + spin_unlock_bh(&sta->lock); + +#ifdef CONFIG_MAC80211_HT_DEBUG + printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n", + sta->sta.addr, tid); +#endif /* CONFIG_MAC80211_HT_DEBUG */ + + if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP, + &sta->sta, tid, NULL)) + printk(KERN_DEBUG "HW problem - can not stop rx " + "aggregation for tid %d\n", tid); + + /* shutdown timer has not expired */ + if (initiator != WLAN_BACK_TIMER) + del_timer_sync(&sta->ampdu_mlme.tid_rx[tid]->session_timer); + + /* check if this is a self generated aggregation halt */ + if (initiator == WLAN_BACK_RECIPIENT || initiator == WLAN_BACK_TIMER) + ieee80211_send_delba(sta->sdata, sta->sta.addr, + tid, 0, reason); + + /* free the reordering buffer */ + for (i = 0; i < sta->ampdu_mlme.tid_rx[tid]->buf_size; i++) { + if (sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]) { + /* release the reordered frames */ + dev_kfree_skb(sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]); + sta->ampdu_mlme.tid_rx[tid]->stored_mpdu_num--; + sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i] = NULL; + } + } + + spin_lock_bh(&sta->lock); + /* free resources */ + kfree(sta->ampdu_mlme.tid_rx[tid]->reorder_buf); + kfree(sta->ampdu_mlme.tid_rx[tid]->reorder_time); + + if (!sta->ampdu_mlme.tid_rx[tid]->shutdown) { + kfree(sta->ampdu_mlme.tid_rx[tid]); + sta->ampdu_mlme.tid_rx[tid] = NULL; + } + + sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_IDLE; + spin_unlock_bh(&sta->lock); +} + +void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, + u16 initiator, u16 reason) +{ + struct sta_info *sta; + + rcu_read_lock(); + + sta = sta_info_get(sdata, ra); + if (!sta) { + rcu_read_unlock(); + return; + } + + __ieee80211_stop_rx_ba_session(sta, tid, initiator, reason); + + rcu_read_unlock(); +} + +/* + * After accepting the AddBA Request we activated a timer, + * resetting it after each frame that arrives from the originator. + * if this timer expires ieee80211_sta_stop_rx_ba_session will be executed. + */ +static void sta_rx_agg_session_timer_expired(unsigned long data) +{ + /* not an elegant detour, but there is no choice as the timer passes + * only one argument, and various sta_info are needed here, so init + * flow in sta_info_create gives the TID as data, while the timer_to_id + * array gives the sta through container_of */ + u8 *ptid = (u8 *)data; + u8 *timer_to_id = ptid - *ptid; + struct sta_info *sta = container_of(timer_to_id, struct sta_info, + timer_to_tid[0]); + +#ifdef CONFIG_MAC80211_HT_DEBUG + printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); +#endif + ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr, + (u16)*ptid, WLAN_BACK_TIMER, + WLAN_REASON_QSTA_TIMEOUT); +} + +static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid, + u8 dialog_token, u16 status, u16 policy, + u16 buf_size, u16 timeout) +{ + struct ieee80211_local *local = sdata->local; + struct sk_buff *skb; + struct ieee80211_mgmt *mgmt; + u16 capab; + + skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); + + if (!skb) { + printk(KERN_DEBUG "%s: failed to allocate buffer " + "for addba resp frame\n", sdata->name); + return; + } + + skb_reserve(skb, local->hw.extra_tx_headroom); + mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); + memset(mgmt, 0, 24); + memcpy(mgmt->da, da, ETH_ALEN); + memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); + if (sdata->vif.type == NL80211_IFTYPE_AP || + sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); + else if (sdata->vif.type == NL80211_IFTYPE_STATION) + memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN); + + mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION); + + skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_resp)); + mgmt->u.action.category = WLAN_CATEGORY_BACK; + mgmt->u.action.u.addba_resp.action_code = WLAN_ACTION_ADDBA_RESP; + mgmt->u.action.u.addba_resp.dialog_token = dialog_token; + + capab = (u16)(policy << 1); /* bit 1 aggregation policy */ + capab |= (u16)(tid << 2); /* bit 5:2 TID number */ + capab |= (u16)(buf_size << 6); /* bit 15:6 max size of aggregation */ + + mgmt->u.action.u.addba_resp.capab = cpu_to_le16(capab); + mgmt->u.action.u.addba_resp.timeout = cpu_to_le16(timeout); + mgmt->u.action.u.addba_resp.status = cpu_to_le16(status); + + ieee80211_tx_skb(sdata, skb); +} + +void ieee80211_process_addba_request(struct ieee80211_local *local, + struct sta_info *sta, + struct ieee80211_mgmt *mgmt, + size_t len) +{ + struct ieee80211_hw *hw = &local->hw; + struct ieee80211_conf *conf = &hw->conf; + struct tid_ampdu_rx *tid_agg_rx; + u16 capab, tid, timeout, ba_policy, buf_size, start_seq_num, status; + u8 dialog_token; + int ret = -EOPNOTSUPP; + + /* extract session parameters from addba request frame */ + dialog_token = mgmt->u.action.u.addba_req.dialog_token; + timeout = le16_to_cpu(mgmt->u.action.u.addba_req.timeout); + start_seq_num = + le16_to_cpu(mgmt->u.action.u.addba_req.start_seq_num) >> 4; + + capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab); + ba_policy = (capab & IEEE80211_ADDBA_PARAM_POLICY_MASK) >> 1; + tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; + buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6; + + status = WLAN_STATUS_REQUEST_DECLINED; + + if (test_sta_flags(sta, WLAN_STA_SUSPEND)) { +#ifdef CONFIG_MAC80211_HT_DEBUG + printk(KERN_DEBUG "Suspend in progress. " + "Denying ADDBA request\n"); +#endif + goto end_no_lock; + } + + /* sanity check for incoming parameters: + * check if configuration can support the BA policy + * and if buffer size does not exceeds max value */ + /* XXX: check own ht delayed BA capability?? */ + if (((ba_policy != 1) && + (!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA))) || + (buf_size > IEEE80211_MAX_AMPDU_BUF)) { + status = WLAN_STATUS_INVALID_QOS_PARAM; +#ifdef CONFIG_MAC80211_HT_DEBUG + if (net_ratelimit()) + printk(KERN_DEBUG "AddBA Req with bad params from " + "%pM on tid %u. policy %d, buffer size %d\n", + mgmt->sa, tid, ba_policy, + buf_size); +#endif /* CONFIG_MAC80211_HT_DEBUG */ + goto end_no_lock; + } + /* determine default buffer size */ + if (buf_size == 0) { + struct ieee80211_supported_band *sband; + + sband = local->hw.wiphy->bands[conf->channel->band]; + buf_size = IEEE80211_MIN_AMPDU_BUF; + buf_size = buf_size << sband->ht_cap.ampdu_factor; + } + + + /* examine state machine */ + spin_lock_bh(&sta->lock); + + if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_IDLE) { +#ifdef CONFIG_MAC80211_HT_DEBUG + if (net_ratelimit()) + printk(KERN_DEBUG "unexpected AddBA Req from " + "%pM on tid %u\n", + mgmt->sa, tid); +#endif /* CONFIG_MAC80211_HT_DEBUG */ + goto end; + } + + /* prepare A-MPDU MLME for Rx aggregation */ + sta->ampdu_mlme.tid_rx[tid] = + kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC); + if (!sta->ampdu_mlme.tid_rx[tid]) { +#ifdef CONFIG_MAC80211_HT_DEBUG + if (net_ratelimit()) + printk(KERN_ERR "allocate rx mlme to tid %d failed\n", + tid); +#endif + goto end; + } + /* rx timer */ + sta->ampdu_mlme.tid_rx[tid]->session_timer.function = + sta_rx_agg_session_timer_expired; + sta->ampdu_mlme.tid_rx[tid]->session_timer.data = + (unsigned long)&sta->timer_to_tid[tid]; + init_timer(&sta->ampdu_mlme.tid_rx[tid]->session_timer); + + tid_agg_rx = sta->ampdu_mlme.tid_rx[tid]; + + /* prepare reordering buffer */ + tid_agg_rx->reorder_buf = + kcalloc(buf_size, sizeof(struct sk_buff *), GFP_ATOMIC); + tid_agg_rx->reorder_time = + kcalloc(buf_size, sizeof(unsigned long), GFP_ATOMIC); + if (!tid_agg_rx->reorder_buf || !tid_agg_rx->reorder_time) { +#ifdef CONFIG_MAC80211_HT_DEBUG + if (net_ratelimit()) + printk(KERN_ERR "can not allocate reordering buffer " + "to tid %d\n", tid); +#endif + kfree(tid_agg_rx->reorder_buf); + kfree(tid_agg_rx->reorder_time); + kfree(sta->ampdu_mlme.tid_rx[tid]); + sta->ampdu_mlme.tid_rx[tid] = NULL; + goto end; + } + + ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_START, + &sta->sta, tid, &start_seq_num); +#ifdef CONFIG_MAC80211_HT_DEBUG + printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret); +#endif /* CONFIG_MAC80211_HT_DEBUG */ + + if (ret) { + kfree(tid_agg_rx->reorder_buf); + kfree(tid_agg_rx); + sta->ampdu_mlme.tid_rx[tid] = NULL; + goto end; + } + + /* change state and send addba resp */ + sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_OPERATIONAL; + tid_agg_rx->dialog_token = dialog_token; + tid_agg_rx->ssn = start_seq_num; + tid_agg_rx->head_seq_num = start_seq_num; + tid_agg_rx->buf_size = buf_size; + tid_agg_rx->timeout = timeout; + tid_agg_rx->stored_mpdu_num = 0; + status = WLAN_STATUS_SUCCESS; +end: + spin_unlock_bh(&sta->lock); + +end_no_lock: + ieee80211_send_addba_resp(sta->sdata, sta->sta.addr, tid, + dialog_token, status, 1, buf_size, timeout); +} diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c new file mode 100644 index 0000000..5538e1b --- /dev/null +++ b/net/mac80211/agg-tx.c @@ -0,0 +1,696 @@ +/* + * HT handling + * + * Copyright 2003, Jouni Malinen + * Copyright 2002-2005, Instant802 Networks, Inc. + * Copyright 2005-2006, Devicescape Software, Inc. + * Copyright 2006-2007 Jiri Benc + * Copyright 2007, Michael Wu + * Copyright 2007-2009, Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include "ieee80211_i.h" +#include "driver-ops.h" +#include "wme.h" + +/** + * DOC: TX aggregation + * + * Aggregation on the TX side requires setting the hardware flag + * %IEEE80211_HW_AMPDU_AGGREGATION as well as, if present, the @ampdu_queues + * hardware parameter to the number of hardware AMPDU queues. If there are no + * hardware queues then the driver will (currently) have to do all frame + * buffering. + * + * When TX aggregation is started by some subsystem (usually the rate control + * algorithm would be appropriate) by calling the + * ieee80211_start_tx_ba_session() function, the driver will be notified via + * its @ampdu_action function, with the %IEEE80211_AMPDU_TX_START action. + * + * In response to that, the driver is later required to call the + * ieee80211_start_tx_ba_cb() (or ieee80211_start_tx_ba_cb_irqsafe()) + * function, which will start the aggregation session. + * + * Similarly, when the aggregation session is stopped by + * ieee80211_stop_tx_ba_session(), the driver's @ampdu_action function will + * be called with the action %IEEE80211_AMPDU_TX_STOP. In this case, the + * call must not fail, and the driver must later call ieee80211_stop_tx_ba_cb() + * (or ieee80211_stop_tx_ba_cb_irqsafe()). + */ + +static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata, + const u8 *da, u16 tid, + u8 dialog_token, u16 start_seq_num, + u16 agg_size, u16 timeout) +{ + struct ieee80211_local *local = sdata->local; + struct sk_buff *skb; + struct ieee80211_mgmt *mgmt; + u16 capab; + + skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); + + if (!skb) { + printk(KERN_ERR "%s: failed to allocate buffer " + "for addba request frame\n", sdata->name); + return; + } + skb_reserve(skb, local->hw.extra_tx_headroom); + mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); + memset(mgmt, 0, 24); + memcpy(mgmt->da, da, ETH_ALEN); + memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); + if (sdata->vif.type == NL80211_IFTYPE_AP || + sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); + else if (sdata->vif.type == NL80211_IFTYPE_STATION) + memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN); + + mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION); + + skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req)); + + mgmt->u.action.category = WLAN_CATEGORY_BACK; + mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ; + + mgmt->u.action.u.addba_req.dialog_token = dialog_token; + capab = (u16)(1 << 1); /* bit 1 aggregation policy */ + capab |= (u16)(tid << 2); /* bit 5:2 TID number */ + capab |= (u16)(agg_size << 6); /* bit 15:6 max size of aggergation */ + + mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab); + + mgmt->u.action.u.addba_req.timeout = cpu_to_le16(timeout); + mgmt->u.action.u.addba_req.start_seq_num = + cpu_to_le16(start_seq_num << 4); + + ieee80211_tx_skb(sdata, skb); +} + +void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn) +{ + struct ieee80211_local *local = sdata->local; + struct sk_buff *skb; + struct ieee80211_bar *bar; + u16 bar_control = 0; + + skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom); + if (!skb) { + printk(KERN_ERR "%s: failed to allocate buffer for " + "bar frame\n", sdata->name); + return; + } + skb_reserve(skb, local->hw.extra_tx_headroom); + bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar)); + memset(bar, 0, sizeof(*bar)); + bar->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | + IEEE80211_STYPE_BACK_REQ); + memcpy(bar->ra, ra, ETH_ALEN); + memcpy(bar->ta, sdata->vif.addr, ETH_ALEN); + bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL; + bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA; + bar_control |= (u16)(tid << 12); + bar->control = cpu_to_le16(bar_control); + bar->start_seq_num = cpu_to_le16(ssn); + + IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; + ieee80211_tx_skb(sdata, skb); +} + +int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, + enum ieee80211_back_parties initiator) +{ + struct ieee80211_local *local = sta->local; + int ret; + u8 *state; + +#ifdef CONFIG_MAC80211_HT_DEBUG + printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n", + sta->sta.addr, tid); +#endif /* CONFIG_MAC80211_HT_DEBUG */ + + state = &sta->ampdu_mlme.tid_state_tx[tid]; + + if (*state == HT_AGG_STATE_OPERATIONAL) + sta->ampdu_mlme.addba_req_num[tid] = 0; + + *state = HT_AGG_STATE_REQ_STOP_BA_MSK | + (initiator << HT_AGG_STATE_INITIATOR_SHIFT); + + ret = drv_ampdu_action(local, sta->sdata, + IEEE80211_AMPDU_TX_STOP, + &sta->sta, tid, NULL); + + /* HW shall not deny going back to legacy */ + if (WARN_ON(ret)) { + /* + * We may have pending packets get stuck in this case... + * Not bothering with a workaround for now. + */ + } + + return ret; +} + +/* + * After sending add Block Ack request we activated a timer until + * add Block Ack response will arrive from the recipient. + * If this timer expires sta_addba_resp_timer_expired will be executed. + */ +static void sta_addba_resp_timer_expired(unsigned long data) +{ + /* not an elegant detour, but there is no choice as the timer passes + * only one argument, and both sta_info and TID are needed, so init + * flow in sta_info_create gives the TID as data, while the timer_to_id + * array gives the sta through container_of */ + u16 tid = *(u8 *)data; + struct sta_info *sta = container_of((void *)data, + struct sta_info, timer_to_tid[tid]); + u8 *state; + + state = &sta->ampdu_mlme.tid_state_tx[tid]; + + /* check if the TID waits for addBA response */ + spin_lock_bh(&sta->lock); + if ((*state & (HT_ADDBA_REQUESTED_MSK | HT_ADDBA_RECEIVED_MSK | + HT_AGG_STATE_REQ_STOP_BA_MSK)) != + HT_ADDBA_REQUESTED_MSK) { + spin_unlock_bh(&sta->lock); + *state = HT_AGG_STATE_IDLE; +#ifdef CONFIG_MAC80211_HT_DEBUG + printk(KERN_DEBUG "timer expired on tid %d but we are not " + "(or no longer) expecting addBA response there", + tid); +#endif + return; + } + +#ifdef CONFIG_MAC80211_HT_DEBUG + printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid); +#endif + + ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR); + spin_unlock_bh(&sta->lock); +} + +static inline int ieee80211_ac_from_tid(int tid) +{ + return ieee802_1d_to_ac[tid & 7]; +} + +int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) +{ + struct sta_info *sta = container_of(pubsta, struct sta_info, sta); + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct ieee80211_local *local = sdata->local; + u8 *state; + int ret = 0; + u16 start_seq_num; + + if (WARN_ON(!local->ops->ampdu_action)) + return -EINVAL; + + if ((tid >= STA_TID_NUM) || + !(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) + return -EINVAL; + +#ifdef CONFIG_MAC80211_HT_DEBUG + printk(KERN_DEBUG "Open BA session requested for %pM tid %u\n", + pubsta->addr, tid); +#endif /* CONFIG_MAC80211_HT_DEBUG */ + + /* + * The aggregation code is not prepared to handle + * anything but STA/AP due to the BSSID handling. + * IBSS could work in the code but isn't supported + * by drivers or the standard. + */ + if (sdata->vif.type != NL80211_IFTYPE_STATION && + sdata->vif.type != NL80211_IFTYPE_AP_VLAN && + sdata->vif.type != NL80211_IFTYPE_AP) + return -EINVAL; + + if (test_sta_flags(sta, WLAN_STA_DISASSOC)) { +#ifdef CONFIG_MAC80211_HT_DEBUG + printk(KERN_DEBUG "Disassociation is in progress. " + "Denying BA session request\n"); +#endif + return -EINVAL; + } + + if (test_sta_flags(sta, WLAN_STA_SUSPEND)) { +#ifdef CONFIG_MAC80211_HT_DEBUG + printk(KERN_DEBUG "Suspend in progress. " + "Denying BA session request\n"); +#endif + return -EINVAL; + } + + spin_lock_bh(&sta->lock); + spin_lock(&local->ampdu_lock); + + /* we have tried too many times, receiver does not want A-MPDU */ + if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) { + ret = -EBUSY; + goto err_unlock_sta; + } + + state = &sta->ampdu_mlme.tid_state_tx[tid]; + /* check if the TID is not in aggregation flow already */ + if (*state != HT_AGG_STATE_IDLE) { +#ifdef CONFIG_MAC80211_HT_DEBUG + printk(KERN_DEBUG "BA request denied - session is not " + "idle on tid %u\n", tid); +#endif /* CONFIG_MAC80211_HT_DEBUG */ + ret = -EAGAIN; + goto err_unlock_sta; + } + + /* + * While we're asking the driver about the aggregation, + * stop the AC queue so that we don't have to worry + * about frames that came in while we were doing that, + * which would require us to put them to the AC pending + * afterwards which just makes the code more complex. + */ + ieee80211_stop_queue_by_reason( + &local->hw, ieee80211_ac_from_tid(tid), + IEEE80211_QUEUE_STOP_REASON_AGGREGATION); + + /* prepare A-MPDU MLME for Tx aggregation */ + sta->ampdu_mlme.tid_tx[tid] = + kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC); + if (!sta->ampdu_mlme.tid_tx[tid]) { +#ifdef CONFIG_MAC80211_HT_DEBUG + if (net_ratelimit()) + printk(KERN_ERR "allocate tx mlme to tid %d failed\n", + tid); +#endif + ret = -ENOMEM; + goto err_wake_queue; + } + + skb_queue_head_init(&sta->ampdu_mlme.tid_tx[tid]->pending); + + /* Tx timer */ + sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function = + sta_addba_resp_timer_expired; + sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.data = + (unsigned long)&sta->timer_to_tid[tid]; + init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); + + /* Ok, the Addba frame hasn't been sent yet, but if the driver calls the + * call back right away, it must see that the flow has begun */ + *state |= HT_ADDBA_REQUESTED_MSK; + + start_seq_num = sta->tid_seq[tid] >> 4; + + ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START, + pubsta, tid, &start_seq_num); + + if (ret) { +#ifdef CONFIG_MAC80211_HT_DEBUG + printk(KERN_DEBUG "BA request denied - HW unavailable for" + " tid %d\n", tid); +#endif /* CONFIG_MAC80211_HT_DEBUG */ + *state = HT_AGG_STATE_IDLE; + goto err_free; + } + + /* Driver vetoed or OKed, but we can take packets again now */ + ieee80211_wake_queue_by_reason( + &local->hw, ieee80211_ac_from_tid(tid), + IEEE80211_QUEUE_STOP_REASON_AGGREGATION); + + spin_unlock(&local->ampdu_lock); + spin_unlock_bh(&sta->lock); + + /* send an addBA request */ + sta->ampdu_mlme.dialog_token_allocator++; + sta->ampdu_mlme.tid_tx[tid]->dialog_token = + sta->ampdu_mlme.dialog_token_allocator; + sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num; + + ieee80211_send_addba_request(sdata, pubsta->addr, tid, + sta->ampdu_mlme.tid_tx[tid]->dialog_token, + sta->ampdu_mlme.tid_tx[tid]->ssn, + 0x40, 5000); + sta->ampdu_mlme.addba_req_num[tid]++; + /* activate the timer for the recipient's addBA response */ + sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires = + jiffies + ADDBA_RESP_INTERVAL; + add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); +#ifdef CONFIG_MAC80211_HT_DEBUG + printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid); +#endif + return 0; + + err_free: + kfree(sta->ampdu_mlme.tid_tx[tid]); + sta->ampdu_mlme.tid_tx[tid] = NULL; + err_wake_queue: + ieee80211_wake_queue_by_reason( + &local->hw, ieee80211_ac_from_tid(tid), + IEEE80211_QUEUE_STOP_REASON_AGGREGATION); + err_unlock_sta: + spin_unlock(&local->ampdu_lock); + spin_unlock_bh(&sta->lock); + return ret; +} +EXPORT_SYMBOL(ieee80211_start_tx_ba_session); + +/* + * splice packets from the STA's pending to the local pending, + * requires a call to ieee80211_agg_splice_finish and holding + * local->ampdu_lock across both calls. + */ +static void ieee80211_agg_splice_packets(struct ieee80211_local *local, + struct sta_info *sta, u16 tid) +{ + unsigned long flags; + u16 queue = ieee80211_ac_from_tid(tid); + + ieee80211_stop_queue_by_reason( + &local->hw, queue, + IEEE80211_QUEUE_STOP_REASON_AGGREGATION); + + if (!(sta->ampdu_mlme.tid_state_tx[tid] & HT_ADDBA_REQUESTED_MSK)) + return; + + if (WARN(!sta->ampdu_mlme.tid_tx[tid], + "TID %d gone but expected when splicing aggregates from" + "the pending queue\n", tid)) + return; + + if (!skb_queue_empty(&sta->ampdu_mlme.tid_tx[tid]->pending)) { + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + /* copy over remaining packets */ + skb_queue_splice_tail_init( + &sta->ampdu_mlme.tid_tx[tid]->pending, + &local->pending[queue]); + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); + } +} + +static void ieee80211_agg_splice_finish(struct ieee80211_local *local, + struct sta_info *sta, u16 tid) +{ + u16 queue = ieee80211_ac_from_tid(tid); + + ieee80211_wake_queue_by_reason( + &local->hw, queue, + IEEE80211_QUEUE_STOP_REASON_AGGREGATION); +} + +/* caller must hold sta->lock */ +static void ieee80211_agg_tx_operational(struct ieee80211_local *local, + struct sta_info *sta, u16 tid) +{ +#ifdef CONFIG_MAC80211_HT_DEBUG + printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid); +#endif + + spin_lock(&local->ampdu_lock); + ieee80211_agg_splice_packets(local, sta, tid); + /* + * NB: we rely on sta->lock being taken in the TX + * processing here when adding to the pending queue, + * otherwise we could only change the state of the + * session to OPERATIONAL _here_. + */ + ieee80211_agg_splice_finish(local, sta, tid); + spin_unlock(&local->ampdu_lock); + + drv_ampdu_action(local, sta->sdata, + IEEE80211_AMPDU_TX_OPERATIONAL, + &sta->sta, tid, NULL); +} + +void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + u8 *state; + + if (tid >= STA_TID_NUM) { +#ifdef CONFIG_MAC80211_HT_DEBUG + printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n", + tid, STA_TID_NUM); +#endif + return; + } + + rcu_read_lock(); + sta = sta_info_get(sdata, ra); + if (!sta) { + rcu_read_unlock(); +#ifdef CONFIG_MAC80211_HT_DEBUG + printk(KERN_DEBUG "Could not find station: %pM\n", ra); +#endif + return; + } + + state = &sta->ampdu_mlme.tid_state_tx[tid]; + spin_lock_bh(&sta->lock); + + if (WARN_ON(!(*state & HT_ADDBA_REQUESTED_MSK))) { +#ifdef CONFIG_MAC80211_HT_DEBUG + printk(KERN_DEBUG "addBA was not requested yet, state is %d\n", + *state); +#endif + spin_unlock_bh(&sta->lock); + rcu_read_unlock(); + return; + } + + if (WARN_ON(*state & HT_ADDBA_DRV_READY_MSK)) + goto out; + + *state |= HT_ADDBA_DRV_READY_MSK; + + if (*state == HT_AGG_STATE_OPERATIONAL) + ieee80211_agg_tx_operational(local, sta, tid); + + out: + spin_unlock_bh(&sta->lock); + rcu_read_unlock(); +} +EXPORT_SYMBOL(ieee80211_start_tx_ba_cb); + +void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, + const u8 *ra, u16 tid) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct ieee80211_local *local = sdata->local; + struct ieee80211_ra_tid *ra_tid; + struct sk_buff *skb = dev_alloc_skb(0); + + if (unlikely(!skb)) { +#ifdef CONFIG_MAC80211_HT_DEBUG + if (net_ratelimit()) + printk(KERN_WARNING "%s: Not enough memory, " + "dropping start BA session", sdata->name); +#endif + return; + } + ra_tid = (struct ieee80211_ra_tid *) &skb->cb; + memcpy(&ra_tid->ra, ra, ETH_ALEN); + ra_tid->tid = tid; + ra_tid->vif = vif; + + skb->pkt_type = IEEE80211_ADDBA_MSG; + skb_queue_tail(&local->skb_queue, skb); + tasklet_schedule(&local->tasklet); +} +EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe); + +int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, + enum ieee80211_back_parties initiator) +{ + u8 *state; + int ret; + + /* check if the TID is in aggregation */ + state = &sta->ampdu_mlme.tid_state_tx[tid]; + spin_lock_bh(&sta->lock); + + if (*state != HT_AGG_STATE_OPERATIONAL) { + ret = -ENOENT; + goto unlock; + } + + ret = ___ieee80211_stop_tx_ba_session(sta, tid, initiator); + + unlock: + spin_unlock_bh(&sta->lock); + return ret; +} + +int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, + enum ieee80211_back_parties initiator) +{ + struct sta_info *sta = container_of(pubsta, struct sta_info, sta); + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct ieee80211_local *local = sdata->local; + + if (!local->ops->ampdu_action) + return -EINVAL; + + if (tid >= STA_TID_NUM) + return -EINVAL; + + return __ieee80211_stop_tx_ba_session(sta, tid, initiator); +} +EXPORT_SYMBOL(ieee80211_stop_tx_ba_session); + +void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + u8 *state; + + if (tid >= STA_TID_NUM) { +#ifdef CONFIG_MAC80211_HT_DEBUG + printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n", + tid, STA_TID_NUM); +#endif + return; + } + +#ifdef CONFIG_MAC80211_HT_DEBUG + printk(KERN_DEBUG "Stopping Tx BA session for %pM tid %d\n", + ra, tid); +#endif /* CONFIG_MAC80211_HT_DEBUG */ + + rcu_read_lock(); + sta = sta_info_get(sdata, ra); + if (!sta) { +#ifdef CONFIG_MAC80211_HT_DEBUG + printk(KERN_DEBUG "Could not find station: %pM\n", ra); +#endif + rcu_read_unlock(); + return; + } + state = &sta->ampdu_mlme.tid_state_tx[tid]; + + /* NOTE: no need to use sta->lock in this state check, as + * ieee80211_stop_tx_ba_session will let only one stop call to + * pass through per sta/tid + */ + if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) { +#ifdef CONFIG_MAC80211_HT_DEBUG + printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n"); +#endif + rcu_read_unlock(); + return; + } + + if (*state & HT_AGG_STATE_INITIATOR_MSK) + ieee80211_send_delba(sta->sdata, ra, tid, + WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); + + spin_lock_bh(&sta->lock); + spin_lock(&local->ampdu_lock); + + ieee80211_agg_splice_packets(local, sta, tid); + + *state = HT_AGG_STATE_IDLE; + /* from now on packets are no longer put onto sta->pending */ + kfree(sta->ampdu_mlme.tid_tx[tid]); + sta->ampdu_mlme.tid_tx[tid] = NULL; + + ieee80211_agg_splice_finish(local, sta, tid); + + spin_unlock(&local->ampdu_lock); + spin_unlock_bh(&sta->lock); + + rcu_read_unlock(); +} +EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb); + +void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, + const u8 *ra, u16 tid) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct ieee80211_local *local = sdata->local; + struct ieee80211_ra_tid *ra_tid; + struct sk_buff *skb = dev_alloc_skb(0); + + if (unlikely(!skb)) { +#ifdef CONFIG_MAC80211_HT_DEBUG + if (net_ratelimit()) + printk(KERN_WARNING "%s: Not enough memory, " + "dropping stop BA session", sdata->name); +#endif + return; + } + ra_tid = (struct ieee80211_ra_tid *) &skb->cb; + memcpy(&ra_tid->ra, ra, ETH_ALEN); + ra_tid->tid = tid; + ra_tid->vif = vif; + + skb->pkt_type = IEEE80211_DELBA_MSG; + skb_queue_tail(&local->skb_queue, skb); + tasklet_schedule(&local->tasklet); +} +EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe); + + +void ieee80211_process_addba_resp(struct ieee80211_local *local, + struct sta_info *sta, + struct ieee80211_mgmt *mgmt, + size_t len) +{ + u16 capab, tid; + u8 *state; + + capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); + tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; + + state = &sta->ampdu_mlme.tid_state_tx[tid]; + + spin_lock_bh(&sta->lock); + + if (!(*state & HT_ADDBA_REQUESTED_MSK)) + goto out; + + if (mgmt->u.action.u.addba_resp.dialog_token != + sta->ampdu_mlme.tid_tx[tid]->dialog_token) { +#ifdef CONFIG_MAC80211_HT_DEBUG + printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid); +#endif /* CONFIG_MAC80211_HT_DEBUG */ + goto out; + } + + del_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); + +#ifdef CONFIG_MAC80211_HT_DEBUG + printk(KERN_DEBUG "switched off addBA timer for tid %d \n", tid); +#endif /* CONFIG_MAC80211_HT_DEBUG */ + + if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) + == WLAN_STATUS_SUCCESS) { + u8 curstate = *state; + + *state |= HT_ADDBA_RECEIVED_MSK; + + if (*state != curstate && *state == HT_AGG_STATE_OPERATIONAL) + ieee80211_agg_tx_operational(local, sta, tid); + + sta->ampdu_mlme.addba_req_num[tid] = 0; + } else { + ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR); + } + + out: + spin_unlock_bh(&sta->lock); +} diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c new file mode 100644 index 0000000..f6e14cb --- /dev/null +++ b/net/mac80211/cfg.c @@ -0,0 +1,1511 @@ +/* + * mac80211 configuration hooks for cfg80211 + * + * Copyright 2006-2010 Johannes Berg + * + * This file is GPLv2 as found in COPYING. + */ + +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) +#include +#endif +#include +#include +#include "ieee80211_i.h" +#include "driver-ops.h" +#include "cfg.h" +#include "rate.h" +#include "mesh.h" + +static bool nl80211_type_check(enum nl80211_iftype type) +{ + switch (type) { + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_MONITOR: +#ifdef CONFIG_MAC80211_MESH + case NL80211_IFTYPE_MESH_POINT: +#endif + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_AP_VLAN: + case NL80211_IFTYPE_WDS: + return true; + default: + return false; + } +} + +static bool nl80211_params_check(enum nl80211_iftype type, + struct vif_params *params) +{ + if (!nl80211_type_check(type)) + return false; + + return true; +} + +static int ieee80211_add_iface(struct wiphy *wiphy, char *name, + enum nl80211_iftype type, u32 *flags, + struct vif_params *params) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + struct net_device *dev; + struct ieee80211_sub_if_data *sdata; + int err; + + if (!nl80211_params_check(type, params)) + return -EINVAL; + + err = ieee80211_if_add(local, name, &dev, type, params); + if (err || type != NL80211_IFTYPE_MONITOR || !flags) + return err; + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + sdata->u.mntr_flags = *flags; + return 0; +} + +static int ieee80211_del_iface(struct wiphy *wiphy, struct net_device *dev) +{ + ieee80211_if_remove(IEEE80211_DEV_TO_SUB_IF(dev)); + + return 0; +} + +static int ieee80211_change_iface(struct wiphy *wiphy, + struct net_device *dev, + enum nl80211_iftype type, u32 *flags, + struct vif_params *params) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + int ret; + + if (ieee80211_sdata_running(sdata)) + return -EBUSY; + + if (!nl80211_params_check(type, params)) + return -EINVAL; + + ret = ieee80211_if_change_type(sdata, type); + if (ret) + return ret; + + if (ieee80211_vif_is_mesh(&sdata->vif) && params->mesh_id_len) + ieee80211_sdata_set_mesh_id(sdata, + params->mesh_id_len, + params->mesh_id); + + if (sdata->vif.type != NL80211_IFTYPE_MONITOR || !flags) + return 0; + + if (type == NL80211_IFTYPE_AP_VLAN && + params && params->use_4addr == 0) + rcu_assign_pointer(sdata->u.vlan.sta, NULL); + else if (type == NL80211_IFTYPE_STATION && + params && params->use_4addr >= 0) + sdata->u.mgd.use_4addr = params->use_4addr; + + sdata->u.mntr_flags = *flags; + return 0; +} + +static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, + u8 key_idx, const u8 *mac_addr, + struct key_params *params) +{ + struct ieee80211_sub_if_data *sdata; + struct sta_info *sta = NULL; + enum ieee80211_key_alg alg; + struct ieee80211_key *key; + int err; + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + switch (params->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + alg = ALG_WEP; + break; + case WLAN_CIPHER_SUITE_TKIP: + alg = ALG_TKIP; + break; + case WLAN_CIPHER_SUITE_CCMP: + alg = ALG_CCMP; + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + alg = ALG_AES_CMAC; + break; + default: + return -EINVAL; + } + + key = ieee80211_key_alloc(alg, key_idx, params->key_len, params->key, + params->seq_len, params->seq); + if (!key) + return -ENOMEM; + + rcu_read_lock(); + + if (mac_addr) { + sta = sta_info_get_bss(sdata, mac_addr); + if (!sta) { + ieee80211_key_free(key); + err = -ENOENT; + goto out_unlock; + } + } + + ieee80211_key_link(key, sdata, sta); + + err = 0; + out_unlock: + rcu_read_unlock(); + + return err; +} + +static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev, + u8 key_idx, const u8 *mac_addr) +{ + struct ieee80211_sub_if_data *sdata; + struct sta_info *sta; + int ret; + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + rcu_read_lock(); + + if (mac_addr) { + ret = -ENOENT; + + sta = sta_info_get_bss(sdata, mac_addr); + if (!sta) + goto out_unlock; + + if (sta->key) { + ieee80211_key_free(sta->key); + WARN_ON(sta->key); + ret = 0; + } + + goto out_unlock; + } + + if (!sdata->keys[key_idx]) { + ret = -ENOENT; + goto out_unlock; + } + + ieee80211_key_free(sdata->keys[key_idx]); + WARN_ON(sdata->keys[key_idx]); + + ret = 0; + out_unlock: + rcu_read_unlock(); + + return ret; +} + +static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev, + u8 key_idx, const u8 *mac_addr, void *cookie, + void (*callback)(void *cookie, + struct key_params *params)) +{ + struct ieee80211_sub_if_data *sdata; + struct sta_info *sta = NULL; + u8 seq[6] = {0}; + struct key_params params; + struct ieee80211_key *key; + u32 iv32; + u16 iv16; + int err = -ENOENT; + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + rcu_read_lock(); + + if (mac_addr) { + sta = sta_info_get_bss(sdata, mac_addr); + if (!sta) + goto out; + + key = sta->key; + } else + key = sdata->keys[key_idx]; + + if (!key) + goto out; + + memset(¶ms, 0, sizeof(params)); + + switch (key->conf.alg) { + case ALG_TKIP: + params.cipher = WLAN_CIPHER_SUITE_TKIP; + + iv32 = key->u.tkip.tx.iv32; + iv16 = key->u.tkip.tx.iv16; + + if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) + drv_get_tkip_seq(sdata->local, + key->conf.hw_key_idx, + &iv32, &iv16); + + seq[0] = iv16 & 0xff; + seq[1] = (iv16 >> 8) & 0xff; + seq[2] = iv32 & 0xff; + seq[3] = (iv32 >> 8) & 0xff; + seq[4] = (iv32 >> 16) & 0xff; + seq[5] = (iv32 >> 24) & 0xff; + params.seq = seq; + params.seq_len = 6; + break; + case ALG_CCMP: + params.cipher = WLAN_CIPHER_SUITE_CCMP; + seq[0] = key->u.ccmp.tx_pn[5]; + seq[1] = key->u.ccmp.tx_pn[4]; + seq[2] = key->u.ccmp.tx_pn[3]; + seq[3] = key->u.ccmp.tx_pn[2]; + seq[4] = key->u.ccmp.tx_pn[1]; + seq[5] = key->u.ccmp.tx_pn[0]; + params.seq = seq; + params.seq_len = 6; + break; + case ALG_WEP: + if (key->conf.keylen == 5) + params.cipher = WLAN_CIPHER_SUITE_WEP40; + else + params.cipher = WLAN_CIPHER_SUITE_WEP104; + break; + case ALG_AES_CMAC: + params.cipher = WLAN_CIPHER_SUITE_AES_CMAC; + seq[0] = key->u.aes_cmac.tx_pn[5]; + seq[1] = key->u.aes_cmac.tx_pn[4]; + seq[2] = key->u.aes_cmac.tx_pn[3]; + seq[3] = key->u.aes_cmac.tx_pn[2]; + seq[4] = key->u.aes_cmac.tx_pn[1]; + seq[5] = key->u.aes_cmac.tx_pn[0]; + params.seq = seq; + params.seq_len = 6; + break; + } + + params.key = key->conf.key; + params.key_len = key->conf.keylen; + + callback(cookie, ¶ms); + err = 0; + + out: + rcu_read_unlock(); + return err; +} + +static int ieee80211_config_default_key(struct wiphy *wiphy, + struct net_device *dev, + u8 key_idx) +{ + struct ieee80211_sub_if_data *sdata; + + rcu_read_lock(); + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + ieee80211_set_default_key(sdata, key_idx); + + rcu_read_unlock(); + + return 0; +} + +static int ieee80211_config_default_mgmt_key(struct wiphy *wiphy, + struct net_device *dev, + u8 key_idx) +{ + struct ieee80211_sub_if_data *sdata; + + rcu_read_lock(); + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + ieee80211_set_default_mgmt_key(sdata, key_idx); + + rcu_read_unlock(); + + return 0; +} + +static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + + sinfo->generation = sdata->local->sta_generation; + + sinfo->filled = STATION_INFO_INACTIVE_TIME | + STATION_INFO_RX_BYTES | + STATION_INFO_TX_BYTES | + STATION_INFO_RX_PACKETS | + STATION_INFO_TX_PACKETS | + STATION_INFO_TX_BITRATE; + + sinfo->inactive_time = jiffies_to_msecs(jiffies - sta->last_rx); + sinfo->rx_bytes = sta->rx_bytes; + sinfo->tx_bytes = sta->tx_bytes; + sinfo->rx_packets = sta->rx_packets; + sinfo->tx_packets = sta->tx_packets; + + if ((sta->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) || + (sta->local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)) { + sinfo->filled |= STATION_INFO_SIGNAL; + sinfo->signal = (s8)sta->last_signal; + } + + sinfo->txrate.flags = 0; + if (sta->last_tx_rate.flags & IEEE80211_TX_RC_MCS) + sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS; + if (sta->last_tx_rate.flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + sinfo->txrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH; + if (sta->last_tx_rate.flags & IEEE80211_TX_RC_SHORT_GI) + sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; + + if (!(sta->last_tx_rate.flags & IEEE80211_TX_RC_MCS)) { + struct ieee80211_supported_band *sband; + sband = sta->local->hw.wiphy->bands[ + sta->local->hw.conf.channel->band]; + sinfo->txrate.legacy = + sband->bitrates[sta->last_tx_rate.idx].bitrate; + } else + sinfo->txrate.mcs = sta->last_tx_rate.idx; + + if (ieee80211_vif_is_mesh(&sdata->vif)) { +#ifdef CONFIG_MAC80211_MESH + sinfo->filled |= STATION_INFO_LLID | + STATION_INFO_PLID | + STATION_INFO_PLINK_STATE; + + sinfo->llid = le16_to_cpu(sta->llid); + sinfo->plid = le16_to_cpu(sta->plid); + sinfo->plink_state = sta->plink_state; +#endif + } +} + + +static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev, + int idx, u8 *mac, struct station_info *sinfo) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct sta_info *sta; + int ret = -ENOENT; + + rcu_read_lock(); + + sta = sta_info_get_by_idx(sdata, idx); + if (sta) { + ret = 0; + memcpy(mac, sta->sta.addr, ETH_ALEN); + sta_set_sinfo(sta, sinfo); + } + + rcu_read_unlock(); + + return ret; +} + +static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev, + u8 *mac, struct station_info *sinfo) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct sta_info *sta; + int ret = -ENOENT; + + rcu_read_lock(); + + sta = sta_info_get_bss(sdata, mac); + if (sta) { + ret = 0; + sta_set_sinfo(sta, sinfo); + } + + rcu_read_unlock(); + + return ret; +} + +/* + * This handles both adding a beacon and setting new beacon info + */ +static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata, + struct beacon_parameters *params) +{ + struct beacon_data *new, *old; + int new_head_len, new_tail_len; + int size; + int err = -EINVAL; + + old = sdata->u.ap.beacon; + + /* head must not be zero-length */ + if (params->head && !params->head_len) + return -EINVAL; + + /* + * This is a kludge. beacon interval should really be part + * of the beacon information. + */ + if (params->interval && + (sdata->vif.bss_conf.beacon_int != params->interval)) { + sdata->vif.bss_conf.beacon_int = params->interval; + ieee80211_bss_info_change_notify(sdata, + BSS_CHANGED_BEACON_INT); + } + + /* Need to have a beacon head if we don't have one yet */ + if (!params->head && !old) + return err; + + /* sorry, no way to start beaconing without dtim period */ + if (!params->dtim_period && !old) + return err; + + /* new or old head? */ + if (params->head) + new_head_len = params->head_len; + else + new_head_len = old->head_len; + + /* new or old tail? */ + if (params->tail || !old) + /* params->tail_len will be zero for !params->tail */ + new_tail_len = params->tail_len; + else + new_tail_len = old->tail_len; + + size = sizeof(*new) + new_head_len + new_tail_len; + + new = kzalloc(size, GFP_KERNEL); + if (!new) + return -ENOMEM; + + /* start filling the new info now */ + + /* new or old dtim period? */ + if (params->dtim_period) + new->dtim_period = params->dtim_period; + else + new->dtim_period = old->dtim_period; + + /* + * pointers go into the block we allocated, + * memory is | beacon_data | head | tail | + */ + new->head = ((u8 *) new) + sizeof(*new); + new->tail = new->head + new_head_len; + new->head_len = new_head_len; + new->tail_len = new_tail_len; + + /* copy in head */ + if (params->head) + memcpy(new->head, params->head, new_head_len); + else + memcpy(new->head, old->head, new_head_len); + + /* copy in optional tail */ + if (params->tail) + memcpy(new->tail, params->tail, new_tail_len); + else + if (old) + memcpy(new->tail, old->tail, new_tail_len); + + sdata->vif.bss_conf.dtim_period = new->dtim_period; + + rcu_assign_pointer(sdata->u.ap.beacon, new); + + synchronize_rcu(); + + kfree(old); + + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED | + BSS_CHANGED_BEACON); + return 0; +} + +static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev, + struct beacon_parameters *params) +{ + struct ieee80211_sub_if_data *sdata; + struct beacon_data *old; + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + old = sdata->u.ap.beacon; + + if (old) + return -EALREADY; + + return ieee80211_config_beacon(sdata, params); +} + +static int ieee80211_set_beacon(struct wiphy *wiphy, struct net_device *dev, + struct beacon_parameters *params) +{ + struct ieee80211_sub_if_data *sdata; + struct beacon_data *old; + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + old = sdata->u.ap.beacon; + + if (!old) + return -ENOENT; + + return ieee80211_config_beacon(sdata, params); +} + +static int ieee80211_del_beacon(struct wiphy *wiphy, struct net_device *dev) +{ + struct ieee80211_sub_if_data *sdata; + struct beacon_data *old; + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + old = sdata->u.ap.beacon; + + if (!old) + return -ENOENT; + + rcu_assign_pointer(sdata->u.ap.beacon, NULL); + synchronize_rcu(); + kfree(old); + + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); + return 0; +} + +/* Layer 2 Update frame (802.2 Type 1 LLC XID Update response) */ +struct iapp_layer2_update { + u8 da[ETH_ALEN]; /* broadcast */ + u8 sa[ETH_ALEN]; /* STA addr */ + __be16 len; /* 6 */ + u8 dsap; /* 0 */ + u8 ssap; /* 0 */ + u8 control; + u8 xid_info[3]; +} __attribute__ ((packed)); + +static void ieee80211_send_layer2_update(struct sta_info *sta) +{ + struct iapp_layer2_update *msg; + struct sk_buff *skb; + + /* Send Level 2 Update Frame to update forwarding tables in layer 2 + * bridge devices */ + + skb = dev_alloc_skb(sizeof(*msg)); + if (!skb) + return; + msg = (struct iapp_layer2_update *)skb_put(skb, sizeof(*msg)); + + /* 802.2 Type 1 Logical Link Control (LLC) Exchange Identifier (XID) + * Update response frame; IEEE Std 802.2-1998, 5.4.1.2.1 */ + + memset(msg->da, 0xff, ETH_ALEN); + memcpy(msg->sa, sta->sta.addr, ETH_ALEN); + msg->len = htons(6); + msg->dsap = 0; + msg->ssap = 0x01; /* NULL LSAP, CR Bit: Response */ + msg->control = 0xaf; /* XID response lsb.1111F101. + * F=0 (no poll command; unsolicited frame) */ + msg->xid_info[0] = 0x81; /* XID format identifier */ + msg->xid_info[1] = 1; /* LLC types/classes: Type 1 LLC */ + msg->xid_info[2] = 0; /* XID sender's receive window size (RW) */ + + skb->dev = sta->sdata->dev; + skb->protocol = eth_type_trans(skb, sta->sdata->dev); + memset(skb->cb, 0, sizeof(skb->cb)); + netif_rx(skb); +} + +static void sta_apply_parameters(struct ieee80211_local *local, + struct sta_info *sta, + struct station_parameters *params) +{ + u32 rates; + int i, j; + struct ieee80211_supported_band *sband; + struct ieee80211_sub_if_data *sdata = sta->sdata; + u32 mask, set; + + sband = local->hw.wiphy->bands[local->oper_channel->band]; + + spin_lock_bh(&sta->lock); + mask = params->sta_flags_mask; + set = params->sta_flags_set; + + if (mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) { + sta->flags &= ~WLAN_STA_AUTHORIZED; + if (set & BIT(NL80211_STA_FLAG_AUTHORIZED)) + sta->flags |= WLAN_STA_AUTHORIZED; + } + + if (mask & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE)) { + sta->flags &= ~WLAN_STA_SHORT_PREAMBLE; + if (set & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE)) + sta->flags |= WLAN_STA_SHORT_PREAMBLE; + } + + if (mask & BIT(NL80211_STA_FLAG_WME)) { + sta->flags &= ~WLAN_STA_WME; + if (set & BIT(NL80211_STA_FLAG_WME)) + sta->flags |= WLAN_STA_WME; + } + + if (mask & BIT(NL80211_STA_FLAG_MFP)) { + sta->flags &= ~WLAN_STA_MFP; + if (set & BIT(NL80211_STA_FLAG_MFP)) + sta->flags |= WLAN_STA_MFP; + } + spin_unlock_bh(&sta->lock); + + /* + * cfg80211 validates this (1-2007) and allows setting the AID + * only when creating a new station entry + */ + if (params->aid) + sta->sta.aid = params->aid; + + /* + * FIXME: updating the following information is racy when this + * function is called from ieee80211_change_station(). + * However, all this information should be static so + * maybe we should just reject attemps to change it. + */ + + if (params->listen_interval >= 0) + sta->listen_interval = params->listen_interval; + + if (params->supported_rates) { + rates = 0; + + for (i = 0; i < params->supported_rates_len; i++) { + int rate = (params->supported_rates[i] & 0x7f) * 5; + for (j = 0; j < sband->n_bitrates; j++) { + if (sband->bitrates[j].bitrate == rate) + rates |= BIT(j); + } + } + sta->sta.supp_rates[local->oper_channel->band] = rates; + } + + if (params->ht_capa) + ieee80211_ht_cap_ie_to_sta_ht_cap(sband, + params->ht_capa, + &sta->sta.ht_cap); + + if (ieee80211_vif_is_mesh(&sdata->vif) && params->plink_action) { + switch (params->plink_action) { + case PLINK_ACTION_OPEN: + mesh_plink_open(sta); + break; + case PLINK_ACTION_BLOCK: + mesh_plink_block(sta); + break; + } + } +} + +static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev, + u8 *mac, struct station_parameters *params) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + struct sta_info *sta; + struct ieee80211_sub_if_data *sdata; + int err; + int layer2_update; + + if (params->vlan) { + sdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); + + if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN && + sdata->vif.type != NL80211_IFTYPE_AP) + return -EINVAL; + } else + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + if (compare_ether_addr(mac, sdata->vif.addr) == 0) + return -EINVAL; + + if (is_multicast_ether_addr(mac)) + return -EINVAL; + + sta = sta_info_alloc(sdata, mac, GFP_KERNEL); + if (!sta) + return -ENOMEM; + + sta->flags = WLAN_STA_AUTH | WLAN_STA_ASSOC; + + sta_apply_parameters(local, sta, params); + + rate_control_rate_init(sta); + + layer2_update = sdata->vif.type == NL80211_IFTYPE_AP_VLAN || + sdata->vif.type == NL80211_IFTYPE_AP; + + err = sta_info_insert_rcu(sta); + if (err) { + rcu_read_unlock(); + return err; + } + + if (layer2_update) + ieee80211_send_layer2_update(sta); + + rcu_read_unlock(); + + return 0; +} + +static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev, + u8 *mac) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + struct ieee80211_sub_if_data *sdata; + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + if (mac) + return sta_info_destroy_addr_bss(sdata, mac); + + sta_info_flush(local, sdata); + return 0; +} + +static int ieee80211_change_station(struct wiphy *wiphy, + struct net_device *dev, + u8 *mac, + struct station_parameters *params) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = wiphy_priv(wiphy); + struct sta_info *sta; + struct ieee80211_sub_if_data *vlansdata; + + rcu_read_lock(); + + sta = sta_info_get_bss(sdata, mac); + if (!sta) { + rcu_read_unlock(); + return -ENOENT; + } + + if (params->vlan && params->vlan != sta->sdata->dev) { + vlansdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); + + if (vlansdata->vif.type != NL80211_IFTYPE_AP_VLAN && + vlansdata->vif.type != NL80211_IFTYPE_AP) { + rcu_read_unlock(); + return -EINVAL; + } + + if (params->vlan->ieee80211_ptr->use_4addr) { + if (vlansdata->u.vlan.sta) { + rcu_read_unlock(); + return -EBUSY; + } + + rcu_assign_pointer(vlansdata->u.vlan.sta, sta); + } + + sta->sdata = vlansdata; + ieee80211_send_layer2_update(sta); + } + + sta_apply_parameters(local, sta, params); + + rcu_read_unlock(); + + return 0; +} + +#ifdef CONFIG_MAC80211_MESH +static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev, + u8 *dst, u8 *next_hop) +{ + struct ieee80211_sub_if_data *sdata; + struct mesh_path *mpath; + struct sta_info *sta; + int err; + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + rcu_read_lock(); + sta = sta_info_get(sdata, next_hop); + if (!sta) { + rcu_read_unlock(); + return -ENOENT; + } + + err = mesh_path_add(dst, sdata); + if (err) { + rcu_read_unlock(); + return err; + } + + mpath = mesh_path_lookup(dst, sdata); + if (!mpath) { + rcu_read_unlock(); + return -ENXIO; + } + mesh_path_fix_nexthop(mpath, sta); + + rcu_read_unlock(); + return 0; +} + +static int ieee80211_del_mpath(struct wiphy *wiphy, struct net_device *dev, + u8 *dst) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + if (dst) + return mesh_path_del(dst, sdata); + + mesh_path_flush(sdata); + return 0; +} + +static int ieee80211_change_mpath(struct wiphy *wiphy, + struct net_device *dev, + u8 *dst, u8 *next_hop) +{ + struct ieee80211_sub_if_data *sdata; + struct mesh_path *mpath; + struct sta_info *sta; + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + rcu_read_lock(); + + sta = sta_info_get(sdata, next_hop); + if (!sta) { + rcu_read_unlock(); + return -ENOENT; + } + + mpath = mesh_path_lookup(dst, sdata); + if (!mpath) { + rcu_read_unlock(); + return -ENOENT; + } + + mesh_path_fix_nexthop(mpath, sta); + + rcu_read_unlock(); + return 0; +} + +static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop, + struct mpath_info *pinfo) +{ + if (mpath->next_hop) + memcpy(next_hop, mpath->next_hop->sta.addr, ETH_ALEN); + else + memset(next_hop, 0, ETH_ALEN); + + pinfo->generation = mesh_paths_generation; + + pinfo->filled = MPATH_INFO_FRAME_QLEN | + MPATH_INFO_SN | + MPATH_INFO_METRIC | + MPATH_INFO_EXPTIME | + MPATH_INFO_DISCOVERY_TIMEOUT | + MPATH_INFO_DISCOVERY_RETRIES | + MPATH_INFO_FLAGS; + + pinfo->frame_qlen = mpath->frame_queue.qlen; + pinfo->sn = mpath->sn; + pinfo->metric = mpath->metric; + if (time_before(jiffies, mpath->exp_time)) + pinfo->exptime = jiffies_to_msecs(mpath->exp_time - jiffies); + pinfo->discovery_timeout = + jiffies_to_msecs(mpath->discovery_timeout); + pinfo->discovery_retries = mpath->discovery_retries; + pinfo->flags = 0; + if (mpath->flags & MESH_PATH_ACTIVE) + pinfo->flags |= NL80211_MPATH_FLAG_ACTIVE; + if (mpath->flags & MESH_PATH_RESOLVING) + pinfo->flags |= NL80211_MPATH_FLAG_RESOLVING; + if (mpath->flags & MESH_PATH_SN_VALID) + pinfo->flags |= NL80211_MPATH_FLAG_SN_VALID; + if (mpath->flags & MESH_PATH_FIXED) + pinfo->flags |= NL80211_MPATH_FLAG_FIXED; + if (mpath->flags & MESH_PATH_RESOLVING) + pinfo->flags |= NL80211_MPATH_FLAG_RESOLVING; + + pinfo->flags = mpath->flags; +} + +static int ieee80211_get_mpath(struct wiphy *wiphy, struct net_device *dev, + u8 *dst, u8 *next_hop, struct mpath_info *pinfo) + +{ + struct ieee80211_sub_if_data *sdata; + struct mesh_path *mpath; + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + rcu_read_lock(); + mpath = mesh_path_lookup(dst, sdata); + if (!mpath) { + rcu_read_unlock(); + return -ENOENT; + } + memcpy(dst, mpath->dst, ETH_ALEN); + mpath_set_pinfo(mpath, next_hop, pinfo); + rcu_read_unlock(); + return 0; +} + +static int ieee80211_dump_mpath(struct wiphy *wiphy, struct net_device *dev, + int idx, u8 *dst, u8 *next_hop, + struct mpath_info *pinfo) +{ + struct ieee80211_sub_if_data *sdata; + struct mesh_path *mpath; + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + rcu_read_lock(); + mpath = mesh_path_lookup_by_idx(idx, sdata); + if (!mpath) { + rcu_read_unlock(); + return -ENOENT; + } + memcpy(dst, mpath->dst, ETH_ALEN); + mpath_set_pinfo(mpath, next_hop, pinfo); + rcu_read_unlock(); + return 0; +} + +static int ieee80211_get_mesh_params(struct wiphy *wiphy, + struct net_device *dev, + struct mesh_config *conf) +{ + struct ieee80211_sub_if_data *sdata; + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + memcpy(conf, &(sdata->u.mesh.mshcfg), sizeof(struct mesh_config)); + return 0; +} + +static inline bool _chg_mesh_attr(enum nl80211_meshconf_params parm, u32 mask) +{ + return (mask >> (parm-1)) & 0x1; +} + +static int ieee80211_set_mesh_params(struct wiphy *wiphy, + struct net_device *dev, + const struct mesh_config *nconf, u32 mask) +{ + struct mesh_config *conf; + struct ieee80211_sub_if_data *sdata; + struct ieee80211_if_mesh *ifmsh; + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + ifmsh = &sdata->u.mesh; + + /* Set the config options which we are interested in setting */ + conf = &(sdata->u.mesh.mshcfg); + if (_chg_mesh_attr(NL80211_MESHCONF_RETRY_TIMEOUT, mask)) + conf->dot11MeshRetryTimeout = nconf->dot11MeshRetryTimeout; + if (_chg_mesh_attr(NL80211_MESHCONF_CONFIRM_TIMEOUT, mask)) + conf->dot11MeshConfirmTimeout = nconf->dot11MeshConfirmTimeout; + if (_chg_mesh_attr(NL80211_MESHCONF_HOLDING_TIMEOUT, mask)) + conf->dot11MeshHoldingTimeout = nconf->dot11MeshHoldingTimeout; + if (_chg_mesh_attr(NL80211_MESHCONF_MAX_PEER_LINKS, mask)) + conf->dot11MeshMaxPeerLinks = nconf->dot11MeshMaxPeerLinks; + if (_chg_mesh_attr(NL80211_MESHCONF_MAX_RETRIES, mask)) + conf->dot11MeshMaxRetries = nconf->dot11MeshMaxRetries; + if (_chg_mesh_attr(NL80211_MESHCONF_TTL, mask)) + conf->dot11MeshTTL = nconf->dot11MeshTTL; + if (_chg_mesh_attr(NL80211_MESHCONF_AUTO_OPEN_PLINKS, mask)) + conf->auto_open_plinks = nconf->auto_open_plinks; + if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, mask)) + conf->dot11MeshHWMPmaxPREQretries = + nconf->dot11MeshHWMPmaxPREQretries; + if (_chg_mesh_attr(NL80211_MESHCONF_PATH_REFRESH_TIME, mask)) + conf->path_refresh_time = nconf->path_refresh_time; + if (_chg_mesh_attr(NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT, mask)) + conf->min_discovery_timeout = nconf->min_discovery_timeout; + if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT, mask)) + conf->dot11MeshHWMPactivePathTimeout = + nconf->dot11MeshHWMPactivePathTimeout; + if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, mask)) + conf->dot11MeshHWMPpreqMinInterval = + nconf->dot11MeshHWMPpreqMinInterval; + if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, + mask)) + conf->dot11MeshHWMPnetDiameterTraversalTime = + nconf->dot11MeshHWMPnetDiameterTraversalTime; + if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_ROOTMODE, mask)) { + conf->dot11MeshHWMPRootMode = nconf->dot11MeshHWMPRootMode; + ieee80211_mesh_root_setup(ifmsh); + } + return 0; +} + +#endif + +static int ieee80211_change_bss(struct wiphy *wiphy, + struct net_device *dev, + struct bss_parameters *params) +{ + struct ieee80211_sub_if_data *sdata; + u32 changed = 0; + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + if (params->use_cts_prot >= 0) { + sdata->vif.bss_conf.use_cts_prot = params->use_cts_prot; + changed |= BSS_CHANGED_ERP_CTS_PROT; + } + if (params->use_short_preamble >= 0) { + sdata->vif.bss_conf.use_short_preamble = + params->use_short_preamble; + changed |= BSS_CHANGED_ERP_PREAMBLE; + } + + if (!sdata->vif.bss_conf.use_short_slot && + sdata->local->hw.conf.channel->band == IEEE80211_BAND_5GHZ) { + sdata->vif.bss_conf.use_short_slot = true; + changed |= BSS_CHANGED_ERP_SLOT; + } + + if (params->use_short_slot_time >= 0) { + sdata->vif.bss_conf.use_short_slot = + params->use_short_slot_time; + changed |= BSS_CHANGED_ERP_SLOT; + } + + if (params->basic_rates) { + int i, j; + u32 rates = 0; + struct ieee80211_local *local = wiphy_priv(wiphy); + struct ieee80211_supported_band *sband = + wiphy->bands[local->oper_channel->band]; + + for (i = 0; i < params->basic_rates_len; i++) { + int rate = (params->basic_rates[i] & 0x7f) * 5; + for (j = 0; j < sband->n_bitrates; j++) { + if (sband->bitrates[j].bitrate == rate) + rates |= BIT(j); + } + } + sdata->vif.bss_conf.basic_rates = rates; + changed |= BSS_CHANGED_BASIC_RATES; + } + + ieee80211_bss_info_change_notify(sdata, changed); + + return 0; +} + +static int ieee80211_set_txq_params(struct wiphy *wiphy, + struct ieee80211_txq_params *params) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + struct ieee80211_tx_queue_params p; + + if (!local->ops->conf_tx) + return -EOPNOTSUPP; + + memset(&p, 0, sizeof(p)); + p.aifs = params->aifs; + p.cw_max = params->cwmax; + p.cw_min = params->cwmin; + p.txop = params->txop; + + /* + * Setting tx queue params disables u-apsd because it's only + * called in master mode. + */ + p.uapsd = false; + + if (drv_conf_tx(local, params->queue, &p)) { + printk(KERN_DEBUG "%s: failed to set TX queue " + "parameters for queue %d\n", + wiphy_name(local->hw.wiphy), params->queue); + return -EINVAL; + } + + return 0; +} + +static int ieee80211_set_channel(struct wiphy *wiphy, + struct ieee80211_channel *chan, + enum nl80211_channel_type channel_type) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + + local->oper_channel = chan; + local->oper_channel_type = channel_type; + + return ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); +} + +#ifdef CONFIG_PM +static int ieee80211_suspend(struct wiphy *wiphy) +{ + return __ieee80211_suspend(wiphy_priv(wiphy)); +} + +static int ieee80211_resume(struct wiphy *wiphy) +{ + return __ieee80211_resume(wiphy_priv(wiphy)); +} +#else +#define ieee80211_suspend NULL +#define ieee80211_resume NULL +#endif + +static int ieee80211_scan(struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_scan_request *req) +{ + struct ieee80211_sub_if_data *sdata; + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + if (sdata->vif.type != NL80211_IFTYPE_STATION && + sdata->vif.type != NL80211_IFTYPE_ADHOC && + sdata->vif.type != NL80211_IFTYPE_MESH_POINT && + (sdata->vif.type != NL80211_IFTYPE_AP || sdata->u.ap.beacon)) + return -EOPNOTSUPP; + + return ieee80211_request_scan(sdata, req); +} + +static int ieee80211_auth(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_auth_request *req) +{ + return ieee80211_mgd_auth(IEEE80211_DEV_TO_SUB_IF(dev), req); +} + +static int ieee80211_assoc(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_assoc_request *req) +{ + return ieee80211_mgd_assoc(IEEE80211_DEV_TO_SUB_IF(dev), req); +} + +static int ieee80211_deauth(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_deauth_request *req, + void *cookie) +{ + return ieee80211_mgd_deauth(IEEE80211_DEV_TO_SUB_IF(dev), + req, cookie); +} + +static int ieee80211_disassoc(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_disassoc_request *req, + void *cookie) +{ + return ieee80211_mgd_disassoc(IEEE80211_DEV_TO_SUB_IF(dev), + req, cookie); +} + +static int ieee80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_ibss_params *params) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + return ieee80211_ibss_join(sdata, params); +} + +static int ieee80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + return ieee80211_ibss_leave(sdata); +} + +static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + int err; + + if (changed & WIPHY_PARAM_COVERAGE_CLASS) { + err = drv_set_coverage_class(local, wiphy->coverage_class); + + if (err) + return err; + } + + if (changed & WIPHY_PARAM_RTS_THRESHOLD) { + err = drv_set_rts_threshold(local, wiphy->rts_threshold); + + if (err) + return err; + } + + if (changed & WIPHY_PARAM_RETRY_SHORT) + local->hw.conf.short_frame_max_tx_count = wiphy->retry_short; + if (changed & WIPHY_PARAM_RETRY_LONG) + local->hw.conf.long_frame_max_tx_count = wiphy->retry_long; + if (changed & + (WIPHY_PARAM_RETRY_SHORT | WIPHY_PARAM_RETRY_LONG)) + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_RETRY_LIMITS); + + return 0; +} + +static int ieee80211_set_tx_power(struct wiphy *wiphy, + enum tx_power_setting type, int dbm) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + struct ieee80211_channel *chan = local->hw.conf.channel; + u32 changes = 0; + + switch (type) { + case TX_POWER_AUTOMATIC: + local->user_power_level = -1; + break; + case TX_POWER_LIMITED: + if (dbm < 0) + return -EINVAL; + local->user_power_level = dbm; + break; + case TX_POWER_FIXED: + if (dbm < 0) + return -EINVAL; + /* TODO: move to cfg80211 when it knows the channel */ + if (dbm > chan->max_power) + return -EINVAL; + local->user_power_level = dbm; + break; + } + + ieee80211_hw_config(local, changes); + + return 0; +} + +static int ieee80211_get_tx_power(struct wiphy *wiphy, int *dbm) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + + *dbm = local->hw.conf.power_level; + + return 0; +} + +static int ieee80211_set_wds_peer(struct wiphy *wiphy, struct net_device *dev, + u8 *addr) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + memcpy(&sdata->u.wds.remote_addr, addr, ETH_ALEN); + + return 0; +} + +static void ieee80211_rfkill_poll(struct wiphy *wiphy) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + + drv_rfkill_poll(local); +} + +#ifdef CONFIG_NL80211_TESTMODE +static int ieee80211_testmode_cmd(struct wiphy *wiphy, void *data, int len) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + + if (!local->ops->testmode_cmd) + return -EOPNOTSUPP; + + return local->ops->testmode_cmd(&local->hw, data, len); +} +#endif + +int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata, + enum ieee80211_smps_mode smps_mode) +{ + const u8 *ap; + enum ieee80211_smps_mode old_req; + int err; + + old_req = sdata->u.mgd.req_smps; + sdata->u.mgd.req_smps = smps_mode; + + if (old_req == smps_mode && + smps_mode != IEEE80211_SMPS_AUTOMATIC) + return 0; + + /* + * If not associated, or current association is not an HT + * association, there's no need to send an action frame. + */ + if (!sdata->u.mgd.associated || + sdata->local->oper_channel_type == NL80211_CHAN_NO_HT) { + mutex_lock(&sdata->local->iflist_mtx); + ieee80211_recalc_smps(sdata->local, sdata); + mutex_unlock(&sdata->local->iflist_mtx); + return 0; + } + + ap = sdata->u.mgd.associated->bssid; + + if (smps_mode == IEEE80211_SMPS_AUTOMATIC) { + if (sdata->u.mgd.powersave) + smps_mode = IEEE80211_SMPS_DYNAMIC; + else + smps_mode = IEEE80211_SMPS_OFF; + } + + /* send SM PS frame to AP */ + err = ieee80211_send_smps_action(sdata, smps_mode, + ap, ap); + if (err) + sdata->u.mgd.req_smps = old_req; + + return err; +} + +static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, + bool enabled, int timeout) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); + struct ieee80211_conf *conf = &local->hw.conf; + + if (sdata->vif.type != NL80211_IFTYPE_STATION) + return -EOPNOTSUPP; + + if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS)) + return -EOPNOTSUPP; + + if (enabled == sdata->u.mgd.powersave && + timeout == conf->dynamic_ps_timeout) + return 0; + + sdata->u.mgd.powersave = enabled; + conf->dynamic_ps_timeout = timeout; + + /* no change, but if automatic follow powersave */ + mutex_lock(&sdata->u.mgd.mtx); + __ieee80211_request_smps(sdata, sdata->u.mgd.req_smps); + mutex_unlock(&sdata->u.mgd.mtx); + + if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS) + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); + + ieee80211_recalc_ps(local, -1); + + return 0; +} + +static int ieee80211_set_bitrate_mask(struct wiphy *wiphy, + struct net_device *dev, + const u8 *addr, + const struct cfg80211_bitrate_mask *mask) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); + int i; + + /* + * This _could_ be supported by providing a hook for + * drivers for this function, but at this point it + * doesn't seem worth bothering. + */ + if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) + return -EOPNOTSUPP; + + + for (i = 0; i < IEEE80211_NUM_BANDS; i++) + sdata->rc_rateidx_mask[i] = mask->control[i].legacy; + + return 0; +} + +static int ieee80211_remain_on_channel(struct wiphy *wiphy, + struct net_device *dev, + struct ieee80211_channel *chan, + enum nl80211_channel_type channel_type, + unsigned int duration, + u64 *cookie) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + return ieee80211_wk_remain_on_channel(sdata, chan, channel_type, + duration, cookie); +} + +static int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy, + struct net_device *dev, + u64 cookie) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + return ieee80211_wk_cancel_remain_on_channel(sdata, cookie); +} + +static int ieee80211_action(struct wiphy *wiphy, struct net_device *dev, + struct ieee80211_channel *chan, + enum nl80211_channel_type channel_type, + const u8 *buf, size_t len, u64 *cookie) +{ + return ieee80211_mgd_action(IEEE80211_DEV_TO_SUB_IF(dev), chan, + channel_type, buf, len, cookie); +} + +struct cfg80211_ops mac80211_config_ops = { + .add_virtual_intf = ieee80211_add_iface, + .del_virtual_intf = ieee80211_del_iface, + .change_virtual_intf = ieee80211_change_iface, + .add_key = ieee80211_add_key, + .del_key = ieee80211_del_key, + .get_key = ieee80211_get_key, + .set_default_key = ieee80211_config_default_key, + .set_default_mgmt_key = ieee80211_config_default_mgmt_key, + .add_beacon = ieee80211_add_beacon, + .set_beacon = ieee80211_set_beacon, + .del_beacon = ieee80211_del_beacon, + .add_station = ieee80211_add_station, + .del_station = ieee80211_del_station, + .change_station = ieee80211_change_station, + .get_station = ieee80211_get_station, + .dump_station = ieee80211_dump_station, +#ifdef CONFIG_MAC80211_MESH + .add_mpath = ieee80211_add_mpath, + .del_mpath = ieee80211_del_mpath, + .change_mpath = ieee80211_change_mpath, + .get_mpath = ieee80211_get_mpath, + .dump_mpath = ieee80211_dump_mpath, + .set_mesh_params = ieee80211_set_mesh_params, + .get_mesh_params = ieee80211_get_mesh_params, +#endif + .change_bss = ieee80211_change_bss, + .set_txq_params = ieee80211_set_txq_params, + .set_channel = ieee80211_set_channel, + .suspend = ieee80211_suspend, + .resume = ieee80211_resume, + .scan = ieee80211_scan, + .auth = ieee80211_auth, + .assoc = ieee80211_assoc, + .deauth = ieee80211_deauth, + .disassoc = ieee80211_disassoc, + .join_ibss = ieee80211_join_ibss, + .leave_ibss = ieee80211_leave_ibss, + .set_wiphy_params = ieee80211_set_wiphy_params, + .set_tx_power = ieee80211_set_tx_power, + .get_tx_power = ieee80211_get_tx_power, + .set_wds_peer = ieee80211_set_wds_peer, + .rfkill_poll = ieee80211_rfkill_poll, + CFG80211_TESTMODE_CMD(ieee80211_testmode_cmd) + .set_power_mgmt = ieee80211_set_power_mgmt, + .set_bitrate_mask = ieee80211_set_bitrate_mask, + .remain_on_channel = ieee80211_remain_on_channel, + .cancel_remain_on_channel = ieee80211_cancel_remain_on_channel, + .action = ieee80211_action, +}; diff --git a/net/mac80211/cfg.h b/net/mac80211/cfg.h new file mode 100644 index 0000000..7d7879f --- /dev/null +++ b/net/mac80211/cfg.h @@ -0,0 +1,9 @@ +/* + * mac80211 configuration hooks for cfg80211 + */ +#ifndef __CFG_H +#define __CFG_H + +extern struct cfg80211_ops mac80211_config_ops; + +#endif /* __CFG_H */ diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c new file mode 100644 index 0000000..637929b --- /dev/null +++ b/net/mac80211/debugfs.c @@ -0,0 +1,485 @@ + +/* + * mac80211 debugfs for wireless PHYs + * + * Copyright 2007 Johannes Berg + * + * GPLv2 + * + */ + +#include +#include +#include "ieee80211_i.h" +#include "driver-ops.h" +#include "rate.h" +#include "debugfs.h" + +int mac80211_open_file_generic(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +#define DEBUGFS_READONLY_FILE(name, buflen, fmt, value...) \ +static ssize_t name## _read(struct file *file, char __user *userbuf, \ + size_t count, loff_t *ppos) \ +{ \ + struct ieee80211_local *local = file->private_data; \ + char buf[buflen]; \ + int res; \ + \ + res = scnprintf(buf, buflen, fmt "\n", ##value); \ + return simple_read_from_buffer(userbuf, count, ppos, buf, res); \ +} \ + \ +static const struct file_operations name## _ops = { \ + .read = name## _read, \ + .open = mac80211_open_file_generic, \ +}; + +#define DEBUGFS_ADD(name) \ + debugfs_create_file(#name, 0400, phyd, local, &name## _ops); + +#define DEBUGFS_ADD_MODE(name, mode) \ + debugfs_create_file(#name, mode, phyd, local, &name## _ops); + + +DEBUGFS_READONLY_FILE(frequency, 20, "%d", + local->hw.conf.channel->center_freq); +DEBUGFS_READONLY_FILE(total_ps_buffered, 20, "%d", + local->total_ps_buffered); +DEBUGFS_READONLY_FILE(wep_iv, 20, "%#08x", + local->wep_iv & 0xffffff); +DEBUGFS_READONLY_FILE(rate_ctrl_alg, 100, "%s", + local->rate_ctrl ? local->rate_ctrl->ops->name : "hw/driver"); + +static ssize_t tsf_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_local *local = file->private_data; + u64 tsf; + char buf[100]; + + tsf = drv_get_tsf(local); + + snprintf(buf, sizeof(buf), "0x%016llx\n", (unsigned long long) tsf); + + return simple_read_from_buffer(user_buf, count, ppos, buf, 19); +} + +static ssize_t tsf_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_local *local = file->private_data; + unsigned long long tsf; + char buf[100]; + size_t len; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + buf[len] = '\0'; + + if (strncmp(buf, "reset", 5) == 0) { + if (local->ops->reset_tsf) { + drv_reset_tsf(local); + printk(KERN_INFO "%s: debugfs reset TSF\n", wiphy_name(local->hw.wiphy)); + } + } else { + tsf = simple_strtoul(buf, NULL, 0); + if (local->ops->set_tsf) { + drv_set_tsf(local, tsf); + printk(KERN_INFO "%s: debugfs set TSF to %#018llx\n", wiphy_name(local->hw.wiphy), tsf); + } + } + + return count; +} + +static const struct file_operations tsf_ops = { + .read = tsf_read, + .write = tsf_write, + .open = mac80211_open_file_generic +}; + +static ssize_t reset_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_local *local = file->private_data; + + rtnl_lock(); + __ieee80211_suspend(&local->hw); + __ieee80211_resume(&local->hw); + rtnl_unlock(); + + return count; +} + +static const struct file_operations reset_ops = { + .write = reset_write, + .open = mac80211_open_file_generic, +}; + +static ssize_t noack_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_local *local = file->private_data; + int res; + char buf[10]; + + res = scnprintf(buf, sizeof(buf), "%d\n", local->wifi_wme_noack_test); + + return simple_read_from_buffer(user_buf, count, ppos, buf, res); +} + +static ssize_t noack_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_local *local = file->private_data; + char buf[10]; + size_t len; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + buf[len] = '\0'; + + local->wifi_wme_noack_test = !!simple_strtoul(buf, NULL, 0); + + return count; +} + +static const struct file_operations noack_ops = { + .read = noack_read, + .write = noack_write, + .open = mac80211_open_file_generic +}; + +static ssize_t uapsd_queues_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_local *local = file->private_data; + int res; + char buf[10]; + + res = scnprintf(buf, sizeof(buf), "0x%x\n", local->uapsd_queues); + + return simple_read_from_buffer(user_buf, count, ppos, buf, res); +} + +static ssize_t uapsd_queues_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_local *local = file->private_data; + unsigned long val; + char buf[10]; + size_t len; + int ret; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + buf[len] = '\0'; + + ret = strict_strtoul(buf, 0, &val); + + if (ret) + return -EINVAL; + + if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK) + return -ERANGE; + + local->uapsd_queues = val; + + return count; +} + +static const struct file_operations uapsd_queues_ops = { + .read = uapsd_queues_read, + .write = uapsd_queues_write, + .open = mac80211_open_file_generic +}; + +static ssize_t uapsd_max_sp_len_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_local *local = file->private_data; + int res; + char buf[10]; + + res = scnprintf(buf, sizeof(buf), "0x%x\n", local->uapsd_max_sp_len); + + return simple_read_from_buffer(user_buf, count, ppos, buf, res); +} + +static ssize_t uapsd_max_sp_len_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_local *local = file->private_data; + unsigned long val; + char buf[10]; + size_t len; + int ret; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + buf[len] = '\0'; + + ret = strict_strtoul(buf, 0, &val); + + if (ret) + return -EINVAL; + + if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK) + return -ERANGE; + + local->uapsd_max_sp_len = val; + + return count; +} + +static const struct file_operations uapsd_max_sp_len_ops = { + .read = uapsd_max_sp_len_read, + .write = uapsd_max_sp_len_write, + .open = mac80211_open_file_generic +}; + +static ssize_t channel_type_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_local *local = file->private_data; + const char *buf; + + switch (local->hw.conf.channel_type) { + case NL80211_CHAN_NO_HT: + buf = "no ht\n"; + break; + case NL80211_CHAN_HT20: + buf = "ht20\n"; + break; + case NL80211_CHAN_HT40MINUS: + buf = "ht40-\n"; + break; + case NL80211_CHAN_HT40PLUS: + buf = "ht40+\n"; + break; + default: + buf = "???"; + break; + } + + return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); +} + +static const struct file_operations channel_type_ops = { + .read = channel_type_read, + .open = mac80211_open_file_generic +}; + +static ssize_t queues_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_local *local = file->private_data; + unsigned long flags; + char buf[IEEE80211_MAX_QUEUES * 20]; + int q, res = 0; + + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + for (q = 0; q < local->hw.queues; q++) + res += sprintf(buf + res, "%02d: %#.8lx/%d\n", q, + local->queue_stop_reasons[q], + skb_queue_len(&local->pending[q])); + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); + + return simple_read_from_buffer(user_buf, count, ppos, buf, res); +} + +static const struct file_operations queues_ops = { + .read = queues_read, + .open = mac80211_open_file_generic +}; + +/* statistics stuff */ + +#define DEBUGFS_STATS_FILE(name, buflen, fmt, value...) \ + DEBUGFS_READONLY_FILE(stats_ ##name, buflen, fmt, ##value) + +static ssize_t format_devstat_counter(struct ieee80211_local *local, + char __user *userbuf, + size_t count, loff_t *ppos, + int (*printvalue)(struct ieee80211_low_level_stats *stats, char *buf, + int buflen)) +{ + struct ieee80211_low_level_stats stats; + char buf[20]; + int res; + + rtnl_lock(); + res = drv_get_stats(local, &stats); + rtnl_unlock(); + if (res) + return res; + res = printvalue(&stats, buf, sizeof(buf)); + return simple_read_from_buffer(userbuf, count, ppos, buf, res); +} + +#define DEBUGFS_DEVSTATS_FILE(name) \ +static int print_devstats_##name(struct ieee80211_low_level_stats *stats,\ + char *buf, int buflen) \ +{ \ + return scnprintf(buf, buflen, "%u\n", stats->name); \ +} \ +static ssize_t stats_ ##name## _read(struct file *file, \ + char __user *userbuf, \ + size_t count, loff_t *ppos) \ +{ \ + return format_devstat_counter(file->private_data, \ + userbuf, \ + count, \ + ppos, \ + print_devstats_##name); \ +} \ + \ +static const struct file_operations stats_ ##name## _ops = { \ + .read = stats_ ##name## _read, \ + .open = mac80211_open_file_generic, \ +}; + +#define DEBUGFS_STATS_ADD(name) \ + debugfs_create_file(#name, 0400, statsd, local, &stats_ ##name## _ops); + +DEBUGFS_STATS_FILE(transmitted_fragment_count, 20, "%u", + local->dot11TransmittedFragmentCount); +DEBUGFS_STATS_FILE(multicast_transmitted_frame_count, 20, "%u", + local->dot11MulticastTransmittedFrameCount); +DEBUGFS_STATS_FILE(failed_count, 20, "%u", + local->dot11FailedCount); +DEBUGFS_STATS_FILE(retry_count, 20, "%u", + local->dot11RetryCount); +DEBUGFS_STATS_FILE(multiple_retry_count, 20, "%u", + local->dot11MultipleRetryCount); +DEBUGFS_STATS_FILE(frame_duplicate_count, 20, "%u", + local->dot11FrameDuplicateCount); +DEBUGFS_STATS_FILE(received_fragment_count, 20, "%u", + local->dot11ReceivedFragmentCount); +DEBUGFS_STATS_FILE(multicast_received_frame_count, 20, "%u", + local->dot11MulticastReceivedFrameCount); +DEBUGFS_STATS_FILE(transmitted_frame_count, 20, "%u", + local->dot11TransmittedFrameCount); +#ifdef CONFIG_MAC80211_DEBUG_COUNTERS +DEBUGFS_STATS_FILE(tx_handlers_drop, 20, "%u", + local->tx_handlers_drop); +DEBUGFS_STATS_FILE(tx_handlers_queued, 20, "%u", + local->tx_handlers_queued); +DEBUGFS_STATS_FILE(tx_handlers_drop_unencrypted, 20, "%u", + local->tx_handlers_drop_unencrypted); +DEBUGFS_STATS_FILE(tx_handlers_drop_fragment, 20, "%u", + local->tx_handlers_drop_fragment); +DEBUGFS_STATS_FILE(tx_handlers_drop_wep, 20, "%u", + local->tx_handlers_drop_wep); +DEBUGFS_STATS_FILE(tx_handlers_drop_not_assoc, 20, "%u", + local->tx_handlers_drop_not_assoc); +DEBUGFS_STATS_FILE(tx_handlers_drop_unauth_port, 20, "%u", + local->tx_handlers_drop_unauth_port); +DEBUGFS_STATS_FILE(rx_handlers_drop, 20, "%u", + local->rx_handlers_drop); +DEBUGFS_STATS_FILE(rx_handlers_queued, 20, "%u", + local->rx_handlers_queued); +DEBUGFS_STATS_FILE(rx_handlers_drop_nullfunc, 20, "%u", + local->rx_handlers_drop_nullfunc); +DEBUGFS_STATS_FILE(rx_handlers_drop_defrag, 20, "%u", + local->rx_handlers_drop_defrag); +DEBUGFS_STATS_FILE(rx_handlers_drop_short, 20, "%u", + local->rx_handlers_drop_short); +DEBUGFS_STATS_FILE(rx_handlers_drop_passive_scan, 20, "%u", + local->rx_handlers_drop_passive_scan); +DEBUGFS_STATS_FILE(tx_expand_skb_head, 20, "%u", + local->tx_expand_skb_head); +DEBUGFS_STATS_FILE(tx_expand_skb_head_cloned, 20, "%u", + local->tx_expand_skb_head_cloned); +DEBUGFS_STATS_FILE(rx_expand_skb_head, 20, "%u", + local->rx_expand_skb_head); +DEBUGFS_STATS_FILE(rx_expand_skb_head2, 20, "%u", + local->rx_expand_skb_head2); +DEBUGFS_STATS_FILE(rx_handlers_fragments, 20, "%u", + local->rx_handlers_fragments); +DEBUGFS_STATS_FILE(tx_status_drop, 20, "%u", + local->tx_status_drop); + +#endif + +DEBUGFS_DEVSTATS_FILE(dot11ACKFailureCount); +DEBUGFS_DEVSTATS_FILE(dot11RTSFailureCount); +DEBUGFS_DEVSTATS_FILE(dot11FCSErrorCount); +DEBUGFS_DEVSTATS_FILE(dot11RTSSuccessCount); + + +void debugfs_hw_add(struct ieee80211_local *local) +{ + struct dentry *phyd = local->hw.wiphy->debugfsdir; + struct dentry *statsd; + + if (!phyd) + return; + + local->debugfs.stations = debugfs_create_dir("stations", phyd); + local->debugfs.keys = debugfs_create_dir("keys", phyd); + + DEBUGFS_ADD(frequency); + DEBUGFS_ADD(total_ps_buffered); + DEBUGFS_ADD(wep_iv); + DEBUGFS_ADD(tsf); + DEBUGFS_ADD(queues); + DEBUGFS_ADD_MODE(reset, 0200); + DEBUGFS_ADD(noack); + DEBUGFS_ADD(uapsd_queues); + DEBUGFS_ADD(uapsd_max_sp_len); + DEBUGFS_ADD(channel_type); + + statsd = debugfs_create_dir("statistics", phyd); + + /* if the dir failed, don't put all the other things into the root! */ + if (!statsd) + return; + + DEBUGFS_STATS_ADD(transmitted_fragment_count); + DEBUGFS_STATS_ADD(multicast_transmitted_frame_count); + DEBUGFS_STATS_ADD(failed_count); + DEBUGFS_STATS_ADD(retry_count); + DEBUGFS_STATS_ADD(multiple_retry_count); + DEBUGFS_STATS_ADD(frame_duplicate_count); + DEBUGFS_STATS_ADD(received_fragment_count); + DEBUGFS_STATS_ADD(multicast_received_frame_count); + DEBUGFS_STATS_ADD(transmitted_frame_count); +#ifdef CONFIG_MAC80211_DEBUG_COUNTERS + DEBUGFS_STATS_ADD(tx_handlers_drop); + DEBUGFS_STATS_ADD(tx_handlers_queued); + DEBUGFS_STATS_ADD(tx_handlers_drop_unencrypted); + DEBUGFS_STATS_ADD(tx_handlers_drop_fragment); + DEBUGFS_STATS_ADD(tx_handlers_drop_wep); + DEBUGFS_STATS_ADD(tx_handlers_drop_not_assoc); + DEBUGFS_STATS_ADD(tx_handlers_drop_unauth_port); + DEBUGFS_STATS_ADD(rx_handlers_drop); + DEBUGFS_STATS_ADD(rx_handlers_queued); + DEBUGFS_STATS_ADD(rx_handlers_drop_nullfunc); + DEBUGFS_STATS_ADD(rx_handlers_drop_defrag); + DEBUGFS_STATS_ADD(rx_handlers_drop_short); + DEBUGFS_STATS_ADD(rx_handlers_drop_passive_scan); + DEBUGFS_STATS_ADD(tx_expand_skb_head); + DEBUGFS_STATS_ADD(tx_expand_skb_head_cloned); + DEBUGFS_STATS_ADD(rx_expand_skb_head); + DEBUGFS_STATS_ADD(rx_expand_skb_head2); + DEBUGFS_STATS_ADD(rx_handlers_fragments); + DEBUGFS_STATS_ADD(tx_status_drop); +#endif + DEBUGFS_STATS_ADD(dot11ACKFailureCount); + DEBUGFS_STATS_ADD(dot11RTSFailureCount); + DEBUGFS_STATS_ADD(dot11FCSErrorCount); + DEBUGFS_STATS_ADD(dot11RTSSuccessCount); +} diff --git a/net/mac80211/debugfs.h b/net/mac80211/debugfs.h new file mode 100644 index 0000000..68e6a20 --- /dev/null +++ b/net/mac80211/debugfs.h @@ -0,0 +1,14 @@ +#ifndef __MAC80211_DEBUGFS_H +#define __MAC80211_DEBUGFS_H + +#ifdef CONFIG_MAC80211_DEBUGFS +extern void debugfs_hw_add(struct ieee80211_local *local); +extern int mac80211_open_file_generic(struct inode *inode, struct file *file); +#else +static inline void debugfs_hw_add(struct ieee80211_local *local) +{ + return; +} +#endif + +#endif /* __MAC80211_DEBUGFS_H */ diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c new file mode 100644 index 0000000..d12e743 --- /dev/null +++ b/net/mac80211/debugfs_key.c @@ -0,0 +1,345 @@ +/* + * Copyright 2003-2005 Devicescape Software, Inc. + * Copyright (c) 2006 Jiri Benc + * Copyright 2007 Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include "ieee80211_i.h" +#include "key.h" +#include "debugfs.h" +#include "debugfs_key.h" + +#define KEY_READ(name, prop, buflen, format_string) \ +static ssize_t key_##name##_read(struct file *file, \ + char __user *userbuf, \ + size_t count, loff_t *ppos) \ +{ \ + char buf[buflen]; \ + struct ieee80211_key *key = file->private_data; \ + int res = scnprintf(buf, buflen, format_string, key->prop); \ + return simple_read_from_buffer(userbuf, count, ppos, buf, res); \ +} +#define KEY_READ_D(name) KEY_READ(name, name, 20, "%d\n") +#define KEY_READ_X(name) KEY_READ(name, name, 20, "0x%x\n") + +#define KEY_OPS(name) \ +static const struct file_operations key_ ##name## _ops = { \ + .read = key_##name##_read, \ + .open = mac80211_open_file_generic, \ +} + +#define KEY_FILE(name, format) \ + KEY_READ_##format(name) \ + KEY_OPS(name) + +#define KEY_CONF_READ(name, buflen, format_string) \ + KEY_READ(conf_##name, conf.name, buflen, format_string) +#define KEY_CONF_READ_D(name) KEY_CONF_READ(name, 20, "%d\n") + +#define KEY_CONF_OPS(name) \ +static const struct file_operations key_ ##name## _ops = { \ + .read = key_conf_##name##_read, \ + .open = mac80211_open_file_generic, \ +} + +#define KEY_CONF_FILE(name, format) \ + KEY_CONF_READ_##format(name) \ + KEY_CONF_OPS(name) + +KEY_CONF_FILE(keylen, D); +KEY_CONF_FILE(keyidx, D); +KEY_CONF_FILE(hw_key_idx, D); +KEY_FILE(flags, X); +KEY_FILE(tx_rx_count, D); +KEY_READ(ifindex, sdata->name, IFNAMSIZ + 2, "%s\n"); +KEY_OPS(ifindex); + +static ssize_t key_algorithm_read(struct file *file, + char __user *userbuf, + size_t count, loff_t *ppos) +{ + char *alg; + struct ieee80211_key *key = file->private_data; + + switch (key->conf.alg) { + case ALG_WEP: + alg = "WEP\n"; + break; + case ALG_TKIP: + alg = "TKIP\n"; + break; + case ALG_CCMP: + alg = "CCMP\n"; + break; + case ALG_AES_CMAC: + alg = "AES-128-CMAC\n"; + break; + default: + return 0; + } + return simple_read_from_buffer(userbuf, count, ppos, alg, strlen(alg)); +} +KEY_OPS(algorithm); + +static ssize_t key_tx_spec_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + const u8 *tpn; + char buf[20]; + int len; + struct ieee80211_key *key = file->private_data; + + switch (key->conf.alg) { + case ALG_WEP: + len = scnprintf(buf, sizeof(buf), "\n"); + break; + case ALG_TKIP: + len = scnprintf(buf, sizeof(buf), "%08x %04x\n", + key->u.tkip.tx.iv32, + key->u.tkip.tx.iv16); + break; + case ALG_CCMP: + tpn = key->u.ccmp.tx_pn; + len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n", + tpn[0], tpn[1], tpn[2], tpn[3], tpn[4], tpn[5]); + break; + case ALG_AES_CMAC: + tpn = key->u.aes_cmac.tx_pn; + len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n", + tpn[0], tpn[1], tpn[2], tpn[3], tpn[4], + tpn[5]); + break; + default: + return 0; + } + return simple_read_from_buffer(userbuf, count, ppos, buf, len); +} +KEY_OPS(tx_spec); + +static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct ieee80211_key *key = file->private_data; + char buf[14*NUM_RX_DATA_QUEUES+1], *p = buf; + int i, len; + const u8 *rpn; + + switch (key->conf.alg) { + case ALG_WEP: + len = scnprintf(buf, sizeof(buf), "\n"); + break; + case ALG_TKIP: + for (i = 0; i < NUM_RX_DATA_QUEUES; i++) + p += scnprintf(p, sizeof(buf)+buf-p, + "%08x %04x\n", + key->u.tkip.rx[i].iv32, + key->u.tkip.rx[i].iv16); + len = p - buf; + break; + case ALG_CCMP: + for (i = 0; i < NUM_RX_DATA_QUEUES; i++) { + rpn = key->u.ccmp.rx_pn[i]; + p += scnprintf(p, sizeof(buf)+buf-p, + "%02x%02x%02x%02x%02x%02x\n", + rpn[0], rpn[1], rpn[2], + rpn[3], rpn[4], rpn[5]); + } + len = p - buf; + break; + case ALG_AES_CMAC: + rpn = key->u.aes_cmac.rx_pn; + p += scnprintf(p, sizeof(buf)+buf-p, + "%02x%02x%02x%02x%02x%02x\n", + rpn[0], rpn[1], rpn[2], + rpn[3], rpn[4], rpn[5]); + len = p - buf; + break; + default: + return 0; + } + return simple_read_from_buffer(userbuf, count, ppos, buf, len); +} +KEY_OPS(rx_spec); + +static ssize_t key_replays_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct ieee80211_key *key = file->private_data; + char buf[20]; + int len; + + switch (key->conf.alg) { + case ALG_CCMP: + len = scnprintf(buf, sizeof(buf), "%u\n", key->u.ccmp.replays); + break; + case ALG_AES_CMAC: + len = scnprintf(buf, sizeof(buf), "%u\n", + key->u.aes_cmac.replays); + break; + default: + return 0; + } + return simple_read_from_buffer(userbuf, count, ppos, buf, len); +} +KEY_OPS(replays); + +static ssize_t key_icverrors_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct ieee80211_key *key = file->private_data; + char buf[20]; + int len; + + switch (key->conf.alg) { + case ALG_AES_CMAC: + len = scnprintf(buf, sizeof(buf), "%u\n", + key->u.aes_cmac.icverrors); + break; + default: + return 0; + } + return simple_read_from_buffer(userbuf, count, ppos, buf, len); +} +KEY_OPS(icverrors); + +static ssize_t key_key_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct ieee80211_key *key = file->private_data; + int i, res, bufsize = 2 * key->conf.keylen + 2; + char *buf = kmalloc(bufsize, GFP_KERNEL); + char *p = buf; + + for (i = 0; i < key->conf.keylen; i++) + p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]); + p += scnprintf(p, bufsize+buf-p, "\n"); + res = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); + kfree(buf); + return res; +} +KEY_OPS(key); + +#define DEBUGFS_ADD(name) \ + debugfs_create_file(#name, 0400, key->debugfs.dir, \ + key, &key_##name##_ops); + +void ieee80211_debugfs_key_add(struct ieee80211_key *key) + { + static int keycount; + char buf[50]; + struct sta_info *sta; + + if (!key->local->debugfs.keys) + return; + + sprintf(buf, "%d", keycount); + key->debugfs.cnt = keycount; + keycount++; + key->debugfs.dir = debugfs_create_dir(buf, + key->local->debugfs.keys); + + if (!key->debugfs.dir) + return; + + rcu_read_lock(); + sta = rcu_dereference(key->sta); + if (sta) + sprintf(buf, "../../stations/%pM", sta->sta.addr); + rcu_read_unlock(); + + /* using sta as a boolean is fine outside RCU lock */ + if (sta) + key->debugfs.stalink = + debugfs_create_symlink("station", key->debugfs.dir, buf); + + DEBUGFS_ADD(keylen); + DEBUGFS_ADD(flags); + DEBUGFS_ADD(keyidx); + DEBUGFS_ADD(hw_key_idx); + DEBUGFS_ADD(tx_rx_count); + DEBUGFS_ADD(algorithm); + DEBUGFS_ADD(tx_spec); + DEBUGFS_ADD(rx_spec); + DEBUGFS_ADD(replays); + DEBUGFS_ADD(icverrors); + DEBUGFS_ADD(key); + DEBUGFS_ADD(ifindex); +}; + +void ieee80211_debugfs_key_remove(struct ieee80211_key *key) +{ + if (!key) + return; + + debugfs_remove_recursive(key->debugfs.dir); + key->debugfs.dir = NULL; +} +void ieee80211_debugfs_key_add_default(struct ieee80211_sub_if_data *sdata) +{ + char buf[50]; + struct ieee80211_key *key; + + if (!sdata->debugfs.dir) + return; + + /* this is running under the key lock */ + + key = sdata->default_key; + if (key) { + sprintf(buf, "../keys/%d", key->debugfs.cnt); + sdata->debugfs.default_key = + debugfs_create_symlink("default_key", + sdata->debugfs.dir, buf); + } else + ieee80211_debugfs_key_remove_default(sdata); +} + +void ieee80211_debugfs_key_remove_default(struct ieee80211_sub_if_data *sdata) +{ + if (!sdata) + return; + + debugfs_remove(sdata->debugfs.default_key); + sdata->debugfs.default_key = NULL; +} + +void ieee80211_debugfs_key_add_mgmt_default(struct ieee80211_sub_if_data *sdata) +{ + char buf[50]; + struct ieee80211_key *key; + + if (!sdata->debugfs.dir) + return; + + /* this is running under the key lock */ + + key = sdata->default_mgmt_key; + if (key) { + sprintf(buf, "../keys/%d", key->debugfs.cnt); + sdata->debugfs.default_mgmt_key = + debugfs_create_symlink("default_mgmt_key", + sdata->debugfs.dir, buf); + } else + ieee80211_debugfs_key_remove_mgmt_default(sdata); +} + +void ieee80211_debugfs_key_remove_mgmt_default(struct ieee80211_sub_if_data *sdata) +{ + if (!sdata) + return; + + debugfs_remove(sdata->debugfs.default_mgmt_key); + sdata->debugfs.default_mgmt_key = NULL; +} + +void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key, + struct sta_info *sta) +{ + debugfs_remove(key->debugfs.stalink); + key->debugfs.stalink = NULL; +} diff --git a/net/mac80211/debugfs_key.h b/net/mac80211/debugfs_key.h new file mode 100644 index 0000000..54717b4 --- /dev/null +++ b/net/mac80211/debugfs_key.h @@ -0,0 +1,37 @@ +#ifndef __MAC80211_DEBUGFS_KEY_H +#define __MAC80211_DEBUGFS_KEY_H + +#ifdef CONFIG_MAC80211_DEBUGFS +void ieee80211_debugfs_key_add(struct ieee80211_key *key); +void ieee80211_debugfs_key_remove(struct ieee80211_key *key); +void ieee80211_debugfs_key_add_default(struct ieee80211_sub_if_data *sdata); +void ieee80211_debugfs_key_remove_default(struct ieee80211_sub_if_data *sdata); +void ieee80211_debugfs_key_add_mgmt_default( + struct ieee80211_sub_if_data *sdata); +void ieee80211_debugfs_key_remove_mgmt_default( + struct ieee80211_sub_if_data *sdata); +void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key, + struct sta_info *sta); +#else +static inline void ieee80211_debugfs_key_add(struct ieee80211_key *key) +{} +static inline void ieee80211_debugfs_key_remove(struct ieee80211_key *key) +{} +static inline void ieee80211_debugfs_key_add_default( + struct ieee80211_sub_if_data *sdata) +{} +static inline void ieee80211_debugfs_key_remove_default( + struct ieee80211_sub_if_data *sdata) +{} +static inline void ieee80211_debugfs_key_add_mgmt_default( + struct ieee80211_sub_if_data *sdata) +{} +static inline void ieee80211_debugfs_key_remove_mgmt_default( + struct ieee80211_sub_if_data *sdata) +{} +static inline void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key, + struct sta_info *sta) +{} +#endif + +#endif /* __MAC80211_DEBUGFS_KEY_H */ diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c new file mode 100644 index 0000000..9affe2c --- /dev/null +++ b/net/mac80211/debugfs_netdev.c @@ -0,0 +1,421 @@ +/* + * Copyright (c) 2006 Jiri Benc + * Copyright 2007 Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ieee80211_i.h" +#include "rate.h" +#include "debugfs.h" +#include "debugfs_netdev.h" + +static ssize_t ieee80211_if_read( + struct ieee80211_sub_if_data *sdata, + char __user *userbuf, + size_t count, loff_t *ppos, + ssize_t (*format)(const struct ieee80211_sub_if_data *, char *, int)) +{ + char buf[70]; + ssize_t ret = -EINVAL; + + read_lock(&dev_base_lock); + if (sdata->dev->reg_state == NETREG_REGISTERED) + ret = (*format)(sdata, buf, sizeof(buf)); + read_unlock(&dev_base_lock); + + if (ret != -EINVAL) + ret = simple_read_from_buffer(userbuf, count, ppos, buf, ret); + + return ret; +} + +static ssize_t ieee80211_if_write( + struct ieee80211_sub_if_data *sdata, + const char __user *userbuf, + size_t count, loff_t *ppos, + ssize_t (*write)(struct ieee80211_sub_if_data *, const char *, int)) +{ + u8 *buf; + ssize_t ret = -ENODEV; + + buf = kzalloc(count, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + if (copy_from_user(buf, userbuf, count)) + return -EFAULT; + + rtnl_lock(); + if (sdata->dev->reg_state == NETREG_REGISTERED) + ret = (*write)(sdata, buf, count); + rtnl_unlock(); + + return ret; +} + +#define IEEE80211_IF_FMT(name, field, format_string) \ +static ssize_t ieee80211_if_fmt_##name( \ + const struct ieee80211_sub_if_data *sdata, char *buf, \ + int buflen) \ +{ \ + return scnprintf(buf, buflen, format_string, sdata->field); \ +} +#define IEEE80211_IF_FMT_DEC(name, field) \ + IEEE80211_IF_FMT(name, field, "%d\n") +#define IEEE80211_IF_FMT_HEX(name, field) \ + IEEE80211_IF_FMT(name, field, "%#x\n") +#define IEEE80211_IF_FMT_SIZE(name, field) \ + IEEE80211_IF_FMT(name, field, "%zd\n") + +#define IEEE80211_IF_FMT_ATOMIC(name, field) \ +static ssize_t ieee80211_if_fmt_##name( \ + const struct ieee80211_sub_if_data *sdata, \ + char *buf, int buflen) \ +{ \ + return scnprintf(buf, buflen, "%d\n", atomic_read(&sdata->field));\ +} + +#define IEEE80211_IF_FMT_MAC(name, field) \ +static ssize_t ieee80211_if_fmt_##name( \ + const struct ieee80211_sub_if_data *sdata, char *buf, \ + int buflen) \ +{ \ + return scnprintf(buf, buflen, "%pM\n", sdata->field); \ +} + +#define __IEEE80211_IF_FILE(name, _write) \ +static ssize_t ieee80211_if_read_##name(struct file *file, \ + char __user *userbuf, \ + size_t count, loff_t *ppos) \ +{ \ + return ieee80211_if_read(file->private_data, \ + userbuf, count, ppos, \ + ieee80211_if_fmt_##name); \ +} \ +static const struct file_operations name##_ops = { \ + .read = ieee80211_if_read_##name, \ + .write = (_write), \ + .open = mac80211_open_file_generic, \ +} + +#define __IEEE80211_IF_FILE_W(name) \ +static ssize_t ieee80211_if_write_##name(struct file *file, \ + const char __user *userbuf, \ + size_t count, loff_t *ppos) \ +{ \ + return ieee80211_if_write(file->private_data, userbuf, count, \ + ppos, ieee80211_if_parse_##name); \ +} \ +__IEEE80211_IF_FILE(name, ieee80211_if_write_##name) + + +#define IEEE80211_IF_FILE(name, field, format) \ + IEEE80211_IF_FMT_##format(name, field) \ + __IEEE80211_IF_FILE(name, NULL) + +/* common attributes */ +IEEE80211_IF_FILE(drop_unencrypted, drop_unencrypted, DEC); +IEEE80211_IF_FILE(rc_rateidx_mask_2ghz, rc_rateidx_mask[IEEE80211_BAND_2GHZ], + HEX); +IEEE80211_IF_FILE(rc_rateidx_mask_5ghz, rc_rateidx_mask[IEEE80211_BAND_5GHZ], + HEX); + +/* STA attributes */ +IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC); +IEEE80211_IF_FILE(aid, u.mgd.aid, DEC); + +static int ieee80211_set_smps(struct ieee80211_sub_if_data *sdata, + enum ieee80211_smps_mode smps_mode) +{ + struct ieee80211_local *local = sdata->local; + int err; + + if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_STATIC_SMPS) && + smps_mode == IEEE80211_SMPS_STATIC) + return -EINVAL; + + /* auto should be dynamic if in PS mode */ + if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS) && + (smps_mode == IEEE80211_SMPS_DYNAMIC || + smps_mode == IEEE80211_SMPS_AUTOMATIC)) + return -EINVAL; + + /* supported only on managed interfaces for now */ + if (sdata->vif.type != NL80211_IFTYPE_STATION) + return -EOPNOTSUPP; + + mutex_lock(&local->iflist_mtx); + err = __ieee80211_request_smps(sdata, smps_mode); + mutex_unlock(&local->iflist_mtx); + + return err; +} + +static const char *smps_modes[IEEE80211_SMPS_NUM_MODES] = { + [IEEE80211_SMPS_AUTOMATIC] = "auto", + [IEEE80211_SMPS_OFF] = "off", + [IEEE80211_SMPS_STATIC] = "static", + [IEEE80211_SMPS_DYNAMIC] = "dynamic", +}; + +static ssize_t ieee80211_if_fmt_smps(const struct ieee80211_sub_if_data *sdata, + char *buf, int buflen) +{ + if (sdata->vif.type != NL80211_IFTYPE_STATION) + return -EOPNOTSUPP; + + return snprintf(buf, buflen, "request: %s\nused: %s\n", + smps_modes[sdata->u.mgd.req_smps], + smps_modes[sdata->u.mgd.ap_smps]); +} + +static ssize_t ieee80211_if_parse_smps(struct ieee80211_sub_if_data *sdata, + const char *buf, int buflen) +{ + enum ieee80211_smps_mode mode; + + for (mode = 0; mode < IEEE80211_SMPS_NUM_MODES; mode++) { + if (strncmp(buf, smps_modes[mode], buflen) == 0) { + int err = ieee80211_set_smps(sdata, mode); + if (!err) + return buflen; + return err; + } + } + + return -EINVAL; +} + +__IEEE80211_IF_FILE_W(smps); + +/* AP attributes */ +IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC); +IEEE80211_IF_FILE(dtim_count, u.ap.dtim_count, DEC); + +static ssize_t ieee80211_if_fmt_num_buffered_multicast( + const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) +{ + return scnprintf(buf, buflen, "%u\n", + skb_queue_len(&sdata->u.ap.ps_bc_buf)); +} +__IEEE80211_IF_FILE(num_buffered_multicast, NULL); + +/* WDS attributes */ +IEEE80211_IF_FILE(peer, u.wds.remote_addr, MAC); + +#ifdef CONFIG_MAC80211_MESH +/* Mesh stats attributes */ +IEEE80211_IF_FILE(fwded_mcast, u.mesh.mshstats.fwded_mcast, DEC); +IEEE80211_IF_FILE(fwded_unicast, u.mesh.mshstats.fwded_unicast, DEC); +IEEE80211_IF_FILE(fwded_frames, u.mesh.mshstats.fwded_frames, DEC); +IEEE80211_IF_FILE(dropped_frames_ttl, u.mesh.mshstats.dropped_frames_ttl, DEC); +IEEE80211_IF_FILE(dropped_frames_no_route, + u.mesh.mshstats.dropped_frames_no_route, DEC); +IEEE80211_IF_FILE(estab_plinks, u.mesh.mshstats.estab_plinks, ATOMIC); + +/* Mesh parameters */ +IEEE80211_IF_FILE(dot11MeshMaxRetries, + u.mesh.mshcfg.dot11MeshMaxRetries, DEC); +IEEE80211_IF_FILE(dot11MeshRetryTimeout, + u.mesh.mshcfg.dot11MeshRetryTimeout, DEC); +IEEE80211_IF_FILE(dot11MeshConfirmTimeout, + u.mesh.mshcfg.dot11MeshConfirmTimeout, DEC); +IEEE80211_IF_FILE(dot11MeshHoldingTimeout, + u.mesh.mshcfg.dot11MeshHoldingTimeout, DEC); +IEEE80211_IF_FILE(dot11MeshTTL, u.mesh.mshcfg.dot11MeshTTL, DEC); +IEEE80211_IF_FILE(auto_open_plinks, u.mesh.mshcfg.auto_open_plinks, DEC); +IEEE80211_IF_FILE(dot11MeshMaxPeerLinks, + u.mesh.mshcfg.dot11MeshMaxPeerLinks, DEC); +IEEE80211_IF_FILE(dot11MeshHWMPactivePathTimeout, + u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout, DEC); +IEEE80211_IF_FILE(dot11MeshHWMPpreqMinInterval, + u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval, DEC); +IEEE80211_IF_FILE(dot11MeshHWMPnetDiameterTraversalTime, + u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime, DEC); +IEEE80211_IF_FILE(dot11MeshHWMPmaxPREQretries, + u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries, DEC); +IEEE80211_IF_FILE(path_refresh_time, + u.mesh.mshcfg.path_refresh_time, DEC); +IEEE80211_IF_FILE(min_discovery_timeout, + u.mesh.mshcfg.min_discovery_timeout, DEC); +IEEE80211_IF_FILE(dot11MeshHWMPRootMode, + u.mesh.mshcfg.dot11MeshHWMPRootMode, DEC); +#endif + + +#define DEBUGFS_ADD(name) \ + debugfs_create_file(#name, 0400, sdata->debugfs.dir, \ + sdata, &name##_ops); + +#define DEBUGFS_ADD_MODE(name, mode) \ + debugfs_create_file(#name, mode, sdata->debugfs.dir, \ + sdata, &name##_ops); + +static void add_sta_files(struct ieee80211_sub_if_data *sdata) +{ + DEBUGFS_ADD(drop_unencrypted); + DEBUGFS_ADD(rc_rateidx_mask_2ghz); + DEBUGFS_ADD(rc_rateidx_mask_5ghz); + + DEBUGFS_ADD(bssid); + DEBUGFS_ADD(aid); + DEBUGFS_ADD_MODE(smps, 0600); +} + +static void add_ap_files(struct ieee80211_sub_if_data *sdata) +{ + DEBUGFS_ADD(drop_unencrypted); + DEBUGFS_ADD(rc_rateidx_mask_2ghz); + DEBUGFS_ADD(rc_rateidx_mask_5ghz); + + DEBUGFS_ADD(num_sta_ps); + DEBUGFS_ADD(dtim_count); + DEBUGFS_ADD(num_buffered_multicast); +} + +static void add_wds_files(struct ieee80211_sub_if_data *sdata) +{ + DEBUGFS_ADD(drop_unencrypted); + DEBUGFS_ADD(rc_rateidx_mask_2ghz); + DEBUGFS_ADD(rc_rateidx_mask_5ghz); + + DEBUGFS_ADD(peer); +} + +static void add_vlan_files(struct ieee80211_sub_if_data *sdata) +{ + DEBUGFS_ADD(drop_unencrypted); + DEBUGFS_ADD(rc_rateidx_mask_2ghz); + DEBUGFS_ADD(rc_rateidx_mask_5ghz); +} + +static void add_monitor_files(struct ieee80211_sub_if_data *sdata) +{ +} + +#ifdef CONFIG_MAC80211_MESH + +static void add_mesh_stats(struct ieee80211_sub_if_data *sdata) +{ + struct dentry *dir = debugfs_create_dir("mesh_stats", + sdata->debugfs.dir); + +#define MESHSTATS_ADD(name)\ + debugfs_create_file(#name, 0400, dir, sdata, &name##_ops); + + MESHSTATS_ADD(fwded_mcast); + MESHSTATS_ADD(fwded_unicast); + MESHSTATS_ADD(fwded_frames); + MESHSTATS_ADD(dropped_frames_ttl); + MESHSTATS_ADD(dropped_frames_no_route); + MESHSTATS_ADD(estab_plinks); +#undef MESHSTATS_ADD +} + +static void add_mesh_config(struct ieee80211_sub_if_data *sdata) +{ + struct dentry *dir = debugfs_create_dir("mesh_config", + sdata->debugfs.dir); + +#define MESHPARAMS_ADD(name) \ + debugfs_create_file(#name, 0600, dir, sdata, &name##_ops); + + MESHPARAMS_ADD(dot11MeshMaxRetries); + MESHPARAMS_ADD(dot11MeshRetryTimeout); + MESHPARAMS_ADD(dot11MeshConfirmTimeout); + MESHPARAMS_ADD(dot11MeshHoldingTimeout); + MESHPARAMS_ADD(dot11MeshTTL); + MESHPARAMS_ADD(auto_open_plinks); + MESHPARAMS_ADD(dot11MeshMaxPeerLinks); + MESHPARAMS_ADD(dot11MeshHWMPactivePathTimeout); + MESHPARAMS_ADD(dot11MeshHWMPpreqMinInterval); + MESHPARAMS_ADD(dot11MeshHWMPnetDiameterTraversalTime); + MESHPARAMS_ADD(dot11MeshHWMPmaxPREQretries); + MESHPARAMS_ADD(path_refresh_time); + MESHPARAMS_ADD(min_discovery_timeout); + +#undef MESHPARAMS_ADD +} +#endif + +static void add_files(struct ieee80211_sub_if_data *sdata) +{ + if (!sdata->debugfs.dir) + return; + + switch (sdata->vif.type) { + case NL80211_IFTYPE_MESH_POINT: +#ifdef CONFIG_MAC80211_MESH + add_mesh_stats(sdata); + add_mesh_config(sdata); +#endif + break; + case NL80211_IFTYPE_STATION: + add_sta_files(sdata); + break; + case NL80211_IFTYPE_ADHOC: + /* XXX */ + break; + case NL80211_IFTYPE_AP: + add_ap_files(sdata); + break; + case NL80211_IFTYPE_WDS: + add_wds_files(sdata); + break; + case NL80211_IFTYPE_MONITOR: + add_monitor_files(sdata); + break; + case NL80211_IFTYPE_AP_VLAN: + add_vlan_files(sdata); + break; + default: + break; + } +} + +void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata) +{ + char buf[10+IFNAMSIZ]; + + sprintf(buf, "netdev:%s", sdata->name); + sdata->debugfs.dir = debugfs_create_dir(buf, + sdata->local->hw.wiphy->debugfsdir); + add_files(sdata); +} + +void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata) +{ + if (!sdata->debugfs.dir) + return; + + debugfs_remove_recursive(sdata->debugfs.dir); + sdata->debugfs.dir = NULL; +} + +void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata) +{ + struct dentry *dir; + char buf[10 + IFNAMSIZ]; + + dir = sdata->debugfs.dir; + + if (!dir) + return; + + sprintf(buf, "netdev:%s", sdata->name); + if (!debugfs_rename(dir->d_parent, dir, dir->d_parent, buf)) + printk(KERN_ERR "mac80211: debugfs: failed to rename debugfs " + "dir to %s\n", buf); +} diff --git a/net/mac80211/debugfs_netdev.h b/net/mac80211/debugfs_netdev.h new file mode 100644 index 0000000..79025e7 --- /dev/null +++ b/net/mac80211/debugfs_netdev.h @@ -0,0 +1,22 @@ +/* routines exported for debugfs handling */ + +#ifndef __IEEE80211_DEBUGFS_NETDEV_H +#define __IEEE80211_DEBUGFS_NETDEV_H + +#ifdef CONFIG_MAC80211_DEBUGFS +void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata); +void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata); +void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata); +#else +static inline void ieee80211_debugfs_add_netdev( + struct ieee80211_sub_if_data *sdata) +{} +static inline void ieee80211_debugfs_remove_netdev( + struct ieee80211_sub_if_data *sdata) +{} +static inline void ieee80211_debugfs_rename_netdev( + struct ieee80211_sub_if_data *sdata) +{} +#endif + +#endif /* __IEEE80211_DEBUGFS_NETDEV_H */ diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c new file mode 100644 index 0000000..d92800b --- /dev/null +++ b/net/mac80211/debugfs_sta.c @@ -0,0 +1,301 @@ +/* + * Copyright 2003-2005 Devicescape Software, Inc. + * Copyright (c) 2006 Jiri Benc + * Copyright 2007 Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include "ieee80211_i.h" +#include "debugfs.h" +#include "debugfs_sta.h" +#include "sta_info.h" + +/* sta attributtes */ + +#define STA_READ(name, buflen, field, format_string) \ +static ssize_t sta_ ##name## _read(struct file *file, \ + char __user *userbuf, \ + size_t count, loff_t *ppos) \ +{ \ + int res; \ + struct sta_info *sta = file->private_data; \ + char buf[buflen]; \ + res = scnprintf(buf, buflen, format_string, sta->field); \ + return simple_read_from_buffer(userbuf, count, ppos, buf, res); \ +} +#define STA_READ_D(name, field) STA_READ(name, 20, field, "%d\n") +#define STA_READ_U(name, field) STA_READ(name, 20, field, "%u\n") +#define STA_READ_LU(name, field) STA_READ(name, 20, field, "%lu\n") +#define STA_READ_S(name, field) STA_READ(name, 20, field, "%s\n") + +#define STA_OPS(name) \ +static const struct file_operations sta_ ##name## _ops = { \ + .read = sta_##name##_read, \ + .open = mac80211_open_file_generic, \ +} + +#define STA_FILE(name, field, format) \ + STA_READ_##format(name, field) \ + STA_OPS(name) + +STA_FILE(aid, sta.aid, D); +STA_FILE(dev, sdata->name, S); +STA_FILE(rx_packets, rx_packets, LU); +STA_FILE(tx_packets, tx_packets, LU); +STA_FILE(rx_bytes, rx_bytes, LU); +STA_FILE(tx_bytes, tx_bytes, LU); +STA_FILE(rx_duplicates, num_duplicates, LU); +STA_FILE(rx_fragments, rx_fragments, LU); +STA_FILE(rx_dropped, rx_dropped, LU); +STA_FILE(tx_fragments, tx_fragments, LU); +STA_FILE(tx_filtered, tx_filtered_count, LU); +STA_FILE(tx_retry_failed, tx_retry_failed, LU); +STA_FILE(tx_retry_count, tx_retry_count, LU); +STA_FILE(last_signal, last_signal, D); +STA_FILE(last_noise, last_noise, D); +STA_FILE(wep_weak_iv_count, wep_weak_iv_count, LU); + +static ssize_t sta_flags_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + char buf[100]; + struct sta_info *sta = file->private_data; + u32 staflags = get_sta_flags(sta); + int res = scnprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s", + staflags & WLAN_STA_AUTH ? "AUTH\n" : "", + staflags & WLAN_STA_ASSOC ? "ASSOC\n" : "", + staflags & WLAN_STA_PS_STA ? "PS (sta)\n" : "", + staflags & WLAN_STA_PS_DRIVER ? "PS (driver)\n" : "", + staflags & WLAN_STA_AUTHORIZED ? "AUTHORIZED\n" : "", + staflags & WLAN_STA_SHORT_PREAMBLE ? "SHORT PREAMBLE\n" : "", + staflags & WLAN_STA_WME ? "WME\n" : "", + staflags & WLAN_STA_WDS ? "WDS\n" : "", + staflags & WLAN_STA_MFP ? "MFP\n" : ""); + return simple_read_from_buffer(userbuf, count, ppos, buf, res); +} +STA_OPS(flags); + +static ssize_t sta_num_ps_buf_frames_read(struct file *file, + char __user *userbuf, + size_t count, loff_t *ppos) +{ + char buf[20]; + struct sta_info *sta = file->private_data; + int res = scnprintf(buf, sizeof(buf), "%u\n", + skb_queue_len(&sta->ps_tx_buf)); + return simple_read_from_buffer(userbuf, count, ppos, buf, res); +} +STA_OPS(num_ps_buf_frames); + +static ssize_t sta_inactive_ms_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + char buf[20]; + struct sta_info *sta = file->private_data; + int res = scnprintf(buf, sizeof(buf), "%d\n", + jiffies_to_msecs(jiffies - sta->last_rx)); + return simple_read_from_buffer(userbuf, count, ppos, buf, res); +} +STA_OPS(inactive_ms); + +static ssize_t sta_last_seq_ctrl_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + char buf[15*NUM_RX_DATA_QUEUES], *p = buf; + int i; + struct sta_info *sta = file->private_data; + for (i = 0; i < NUM_RX_DATA_QUEUES; i++) + p += scnprintf(p, sizeof(buf)+buf-p, "%x ", + le16_to_cpu(sta->last_seq_ctrl[i])); + p += scnprintf(p, sizeof(buf)+buf-p, "\n"); + return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); +} +STA_OPS(last_seq_ctrl); + +static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + char buf[64 + STA_TID_NUM * 40], *p = buf; + int i; + struct sta_info *sta = file->private_data; + + spin_lock_bh(&sta->lock); + p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n", + sta->ampdu_mlme.dialog_token_allocator + 1); + p += scnprintf(p, sizeof(buf) + buf - p, + "TID\t\tRX\tDTKN\tSSN\t\tTX\tDTKN\tSSN\tpending\n"); + for (i = 0; i < STA_TID_NUM; i++) { + p += scnprintf(p, sizeof(buf) + buf - p, "%02d", i); + p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", + sta->ampdu_mlme.tid_state_rx[i]); + p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x", + sta->ampdu_mlme.tid_state_rx[i] ? + sta->ampdu_mlme.tid_rx[i]->dialog_token : 0); + p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x", + sta->ampdu_mlme.tid_state_rx[i] ? + sta->ampdu_mlme.tid_rx[i]->ssn : 0); + + p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", + sta->ampdu_mlme.tid_state_tx[i]); + p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x", + sta->ampdu_mlme.tid_state_tx[i] ? + sta->ampdu_mlme.tid_tx[i]->dialog_token : 0); + p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x", + sta->ampdu_mlme.tid_state_tx[i] ? + sta->ampdu_mlme.tid_tx[i]->ssn : 0); + p += scnprintf(p, sizeof(buf) + buf - p, "\t%03d", + sta->ampdu_mlme.tid_state_tx[i] ? + skb_queue_len(&sta->ampdu_mlme.tid_tx[i]->pending) : 0); + p += scnprintf(p, sizeof(buf) + buf - p, "\n"); + } + spin_unlock_bh(&sta->lock); + + return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); +} +STA_OPS(agg_status); + +static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ +#define PRINT_HT_CAP(_cond, _str) \ + do { \ + if (_cond) \ + p += scnprintf(p, sizeof(buf)+buf-p, "\t" _str "\n"); \ + } while (0) + char buf[512], *p = buf; + int i; + struct sta_info *sta = file->private_data; + struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap; + + p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n", + htc->ht_supported ? "" : "not "); + if (htc->ht_supported) { + p += scnprintf(p, sizeof(buf)+buf-p, "cap: %#.4x\n", htc->cap); + + PRINT_HT_CAP((htc->cap & BIT(0)), "RX LDCP"); + PRINT_HT_CAP((htc->cap & BIT(1)), "HT20/HT40"); + PRINT_HT_CAP(!(htc->cap & BIT(1)), "HT20"); + + PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 0, "Static SM Power Save"); + PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 1, "Dynamic SM Power Save"); + PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 3, "SM Power Save disabled"); + + PRINT_HT_CAP((htc->cap & BIT(4)), "RX Greenfield"); + PRINT_HT_CAP((htc->cap & BIT(5)), "RX HT20 SGI"); + PRINT_HT_CAP((htc->cap & BIT(6)), "RX HT40 SGI"); + PRINT_HT_CAP((htc->cap & BIT(7)), "TX STBC"); + + PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 0, "No RX STBC"); + PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 1, "RX STBC 1-stream"); + PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 2, "RX STBC 2-streams"); + PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 3, "RX STBC 3-streams"); + + PRINT_HT_CAP((htc->cap & BIT(10)), "HT Delayed Block Ack"); + + PRINT_HT_CAP((htc->cap & BIT(11)), "Max AMSDU length: " + "3839 bytes"); + PRINT_HT_CAP(!(htc->cap & BIT(11)), "Max AMSDU length: " + "7935 bytes"); + + /* + * For beacons and probe response this would mean the BSS + * does or does not allow the usage of DSSS/CCK HT40. + * Otherwise it means the STA does or does not use + * DSSS/CCK HT40. + */ + PRINT_HT_CAP((htc->cap & BIT(12)), "DSSS/CCK HT40"); + PRINT_HT_CAP(!(htc->cap & BIT(12)), "No DSSS/CCK HT40"); + + /* BIT(13) is reserved */ + + PRINT_HT_CAP((htc->cap & BIT(14)), "40 MHz Intolerant"); + + PRINT_HT_CAP((htc->cap & BIT(15)), "L-SIG TXOP protection"); + + p += scnprintf(p, sizeof(buf)+buf-p, "ampdu factor/density: %d/%d\n", + htc->ampdu_factor, htc->ampdu_density); + p += scnprintf(p, sizeof(buf)+buf-p, "MCS mask:"); + + for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) + p += scnprintf(p, sizeof(buf)+buf-p, " %.2x", + htc->mcs.rx_mask[i]); + p += scnprintf(p, sizeof(buf)+buf-p, "\n"); + + /* If not set this is meaningless */ + if (le16_to_cpu(htc->mcs.rx_highest)) { + p += scnprintf(p, sizeof(buf)+buf-p, + "MCS rx highest: %d Mbps\n", + le16_to_cpu(htc->mcs.rx_highest)); + } + + p += scnprintf(p, sizeof(buf)+buf-p, "MCS tx params: %x\n", + htc->mcs.tx_params); + } + + return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); +} +STA_OPS(ht_capa); + +#define DEBUGFS_ADD(name) \ + debugfs_create_file(#name, 0400, \ + sta->debugfs.dir, sta, &sta_ ##name## _ops); + + +void ieee80211_sta_debugfs_add(struct sta_info *sta) +{ + struct dentry *stations_dir = sta->local->debugfs.stations; + u8 mac[3*ETH_ALEN]; + + sta->debugfs.add_has_run = true; + + if (!stations_dir) + return; + + snprintf(mac, sizeof(mac), "%pM", sta->sta.addr); + + /* + * This might fail due to a race condition: + * When mac80211 unlinks a station, the debugfs entries + * remain, but it is already possible to link a new + * station with the same address which triggers adding + * it to debugfs; therefore, if the old station isn't + * destroyed quickly enough the old station's debugfs + * dir might still be around. + */ + sta->debugfs.dir = debugfs_create_dir(mac, stations_dir); + if (!sta->debugfs.dir) + return; + + DEBUGFS_ADD(flags); + DEBUGFS_ADD(num_ps_buf_frames); + DEBUGFS_ADD(inactive_ms); + DEBUGFS_ADD(last_seq_ctrl); + DEBUGFS_ADD(agg_status); + DEBUGFS_ADD(dev); + DEBUGFS_ADD(rx_packets); + DEBUGFS_ADD(tx_packets); + DEBUGFS_ADD(rx_bytes); + DEBUGFS_ADD(tx_bytes); + DEBUGFS_ADD(rx_duplicates); + DEBUGFS_ADD(rx_fragments); + DEBUGFS_ADD(rx_dropped); + DEBUGFS_ADD(tx_fragments); + DEBUGFS_ADD(tx_filtered); + DEBUGFS_ADD(tx_retry_failed); + DEBUGFS_ADD(tx_retry_count); + DEBUGFS_ADD(last_signal); + DEBUGFS_ADD(last_noise); + DEBUGFS_ADD(wep_weak_iv_count); + DEBUGFS_ADD(ht_capa); +} + +void ieee80211_sta_debugfs_remove(struct sta_info *sta) +{ + debugfs_remove_recursive(sta->debugfs.dir); + sta->debugfs.dir = NULL; +} diff --git a/net/mac80211/debugfs_sta.h b/net/mac80211/debugfs_sta.h new file mode 100644 index 0000000..8b60890 --- /dev/null +++ b/net/mac80211/debugfs_sta.h @@ -0,0 +1,14 @@ +#ifndef __MAC80211_DEBUGFS_STA_H +#define __MAC80211_DEBUGFS_STA_H + +#include "sta_info.h" + +#ifdef CONFIG_MAC80211_DEBUGFS +void ieee80211_sta_debugfs_add(struct sta_info *sta); +void ieee80211_sta_debugfs_remove(struct sta_info *sta); +#else +static inline void ieee80211_sta_debugfs_add(struct sta_info *sta) {} +static inline void ieee80211_sta_debugfs_remove(struct sta_info *sta) {} +#endif + +#endif /* __MAC80211_DEBUGFS_STA_H */ diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h new file mode 100644 index 0000000..c3d8440 --- /dev/null +++ b/net/mac80211/driver-ops.h @@ -0,0 +1,366 @@ +#ifndef __MAC80211_DRIVER_OPS +#define __MAC80211_DRIVER_OPS + +#include +#include "ieee80211_i.h" +#include "driver-trace.h" + +static inline int drv_tx(struct ieee80211_local *local, struct sk_buff *skb) +{ + return local->ops->tx(&local->hw, skb); +} + +static inline int drv_start(struct ieee80211_local *local) +{ + int ret; + + might_sleep(); + + local->started = true; + smp_mb(); + ret = local->ops->start(&local->hw); + trace_drv_start(local, ret); + return ret; +} + +static inline void drv_stop(struct ieee80211_local *local) +{ + might_sleep(); + + local->ops->stop(&local->hw); + trace_drv_stop(local); + + /* sync away all work on the tasklet before clearing started */ + tasklet_disable(&local->tasklet); + tasklet_enable(&local->tasklet); + + barrier(); + + local->started = false; +} + +static inline int drv_add_interface(struct ieee80211_local *local, + struct ieee80211_vif *vif) +{ + int ret; + + might_sleep(); + + ret = local->ops->add_interface(&local->hw, vif); + trace_drv_add_interface(local, vif_to_sdata(vif), ret); + return ret; +} + +static inline void drv_remove_interface(struct ieee80211_local *local, + struct ieee80211_vif *vif) +{ + might_sleep(); + + local->ops->remove_interface(&local->hw, vif); + trace_drv_remove_interface(local, vif_to_sdata(vif)); +} + +static inline int drv_config(struct ieee80211_local *local, u32 changed) +{ + int ret; + + might_sleep(); + + ret = local->ops->config(&local->hw, changed); + trace_drv_config(local, changed, ret); + return ret; +} + +static inline void drv_bss_info_changed(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_bss_conf *info, + u32 changed) +{ + might_sleep(); + + if (local->ops->bss_info_changed) + local->ops->bss_info_changed(&local->hw, &sdata->vif, info, changed); + trace_drv_bss_info_changed(local, sdata, info, changed); +} + +static inline u64 drv_prepare_multicast(struct ieee80211_local *local, + int mc_count, + struct dev_addr_list *mc_list) +{ + u64 ret = 0; + + if (local->ops->prepare_multicast) + ret = local->ops->prepare_multicast(&local->hw, mc_count, + mc_list); + + trace_drv_prepare_multicast(local, mc_count, ret); + + return ret; +} + +static inline void drv_configure_filter(struct ieee80211_local *local, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) +{ + might_sleep(); + + local->ops->configure_filter(&local->hw, changed_flags, total_flags, + multicast); + trace_drv_configure_filter(local, changed_flags, total_flags, + multicast); +} + +static inline int drv_set_tim(struct ieee80211_local *local, + struct ieee80211_sta *sta, bool set) +{ + int ret = 0; + if (local->ops->set_tim) + ret = local->ops->set_tim(&local->hw, sta, set); + trace_drv_set_tim(local, sta, set, ret); + return ret; +} + +static inline int drv_set_key(struct ieee80211_local *local, + enum set_key_cmd cmd, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + int ret; + + might_sleep(); + + ret = local->ops->set_key(&local->hw, cmd, &sdata->vif, sta, key); + trace_drv_set_key(local, cmd, sdata, sta, key, ret); + return ret; +} + +static inline void drv_update_tkip_key(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_key_conf *conf, + struct sta_info *sta, u32 iv32, + u16 *phase1key) +{ + struct ieee80211_sta *ista = NULL; + + if (sta) + ista = &sta->sta; + + if (local->ops->update_tkip_key) + local->ops->update_tkip_key(&local->hw, &sdata->vif, conf, + ista, iv32, phase1key); + trace_drv_update_tkip_key(local, sdata, conf, ista, iv32); +} + +static inline int drv_hw_scan(struct ieee80211_local *local, + struct cfg80211_scan_request *req) +{ + int ret; + + might_sleep(); + + ret = local->ops->hw_scan(&local->hw, req); + trace_drv_hw_scan(local, req, ret); + return ret; +} + +static inline void drv_sw_scan_start(struct ieee80211_local *local) +{ + might_sleep(); + + if (local->ops->sw_scan_start) + local->ops->sw_scan_start(&local->hw); + trace_drv_sw_scan_start(local); +} + +static inline void drv_sw_scan_complete(struct ieee80211_local *local) +{ + might_sleep(); + + if (local->ops->sw_scan_complete) + local->ops->sw_scan_complete(&local->hw); + trace_drv_sw_scan_complete(local); +} + +static inline int drv_get_stats(struct ieee80211_local *local, + struct ieee80211_low_level_stats *stats) +{ + int ret = -EOPNOTSUPP; + + might_sleep(); + + if (local->ops->get_stats) + ret = local->ops->get_stats(&local->hw, stats); + trace_drv_get_stats(local, stats, ret); + + return ret; +} + +static inline void drv_get_tkip_seq(struct ieee80211_local *local, + u8 hw_key_idx, u32 *iv32, u16 *iv16) +{ + if (local->ops->get_tkip_seq) + local->ops->get_tkip_seq(&local->hw, hw_key_idx, iv32, iv16); + trace_drv_get_tkip_seq(local, hw_key_idx, iv32, iv16); +} + +static inline int drv_set_rts_threshold(struct ieee80211_local *local, + u32 value) +{ + int ret = 0; + + might_sleep(); + + if (local->ops->set_rts_threshold) + ret = local->ops->set_rts_threshold(&local->hw, value); + trace_drv_set_rts_threshold(local, value, ret); + return ret; +} + +static inline int drv_set_coverage_class(struct ieee80211_local *local, + u8 value) +{ + int ret = 0; + might_sleep(); + + if (local->ops->set_coverage_class) + local->ops->set_coverage_class(&local->hw, value); + else + ret = -EOPNOTSUPP; + + trace_drv_set_coverage_class(local, value, ret); + return ret; +} + +static inline void drv_sta_notify(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + enum sta_notify_cmd cmd, + struct ieee80211_sta *sta) +{ + if (local->ops->sta_notify) + local->ops->sta_notify(&local->hw, &sdata->vif, cmd, sta); + trace_drv_sta_notify(local, sdata, cmd, sta); +} + +static inline int drv_sta_add(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta) +{ + int ret = 0; + + might_sleep(); + + if (local->ops->sta_add) + ret = local->ops->sta_add(&local->hw, &sdata->vif, sta); + else if (local->ops->sta_notify) + local->ops->sta_notify(&local->hw, &sdata->vif, + STA_NOTIFY_ADD, sta); + + trace_drv_sta_add(local, sdata, sta, ret); + + return ret; +} + +static inline void drv_sta_remove(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta) +{ + might_sleep(); + + if (local->ops->sta_remove) + local->ops->sta_remove(&local->hw, &sdata->vif, sta); + else if (local->ops->sta_notify) + local->ops->sta_notify(&local->hw, &sdata->vif, + STA_NOTIFY_REMOVE, sta); + + trace_drv_sta_remove(local, sdata, sta); +} + +static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue, + const struct ieee80211_tx_queue_params *params) +{ + int ret = -EOPNOTSUPP; + + might_sleep(); + + if (local->ops->conf_tx) + ret = local->ops->conf_tx(&local->hw, queue, params); + trace_drv_conf_tx(local, queue, params, ret); + return ret; +} + +static inline u64 drv_get_tsf(struct ieee80211_local *local) +{ + u64 ret = -1ULL; + + might_sleep(); + + if (local->ops->get_tsf) + ret = local->ops->get_tsf(&local->hw); + trace_drv_get_tsf(local, ret); + return ret; +} + +static inline void drv_set_tsf(struct ieee80211_local *local, u64 tsf) +{ + might_sleep(); + + if (local->ops->set_tsf) + local->ops->set_tsf(&local->hw, tsf); + trace_drv_set_tsf(local, tsf); +} + +static inline void drv_reset_tsf(struct ieee80211_local *local) +{ + might_sleep(); + + if (local->ops->reset_tsf) + local->ops->reset_tsf(&local->hw); + trace_drv_reset_tsf(local); +} + +static inline int drv_tx_last_beacon(struct ieee80211_local *local) +{ + int ret = 1; + + might_sleep(); + + if (local->ops->tx_last_beacon) + ret = local->ops->tx_last_beacon(&local->hw); + trace_drv_tx_last_beacon(local, ret); + return ret; +} + +static inline int drv_ampdu_action(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, + u16 *ssn) +{ + int ret = -EOPNOTSUPP; + if (local->ops->ampdu_action) + ret = local->ops->ampdu_action(&local->hw, &sdata->vif, action, + sta, tid, ssn); + trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn, ret); + return ret; +} + + +static inline void drv_rfkill_poll(struct ieee80211_local *local) +{ + might_sleep(); + + if (local->ops->rfkill_poll) + local->ops->rfkill_poll(&local->hw); +} + +static inline void drv_flush(struct ieee80211_local *local, bool drop) +{ + might_sleep(); + + trace_drv_flush(local, drop); + if (local->ops->flush) + local->ops->flush(&local->hw, drop); +} +#endif /* __MAC80211_DRIVER_OPS */ diff --git a/net/mac80211/driver-trace.c b/net/mac80211/driver-trace.c new file mode 100644 index 0000000..8ed8711 --- /dev/null +++ b/net/mac80211/driver-trace.c @@ -0,0 +1,9 @@ +/* bug in tracepoint.h, it should include this */ +#include + +/* sparse isn't too happy with all macros... */ +#ifndef __CHECKER__ +#include "driver-ops.h" +#define CREATE_TRACE_POINTS +#include "driver-trace.h" +#endif diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h new file mode 100644 index 0000000..1e07abb --- /dev/null +++ b/net/mac80211/driver-trace.h @@ -0,0 +1,779 @@ +#if !defined(__MAC80211_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ) +#define __MAC80211_DRIVER_TRACE + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) +#include +#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) */ +#include +#include "ieee80211_i.h" + +#if !defined(CONFIG_MAC80211_DRIVER_API_TRACER) || defined(__CHECKER__) +#undef TRACE_EVENT +#define TRACE_EVENT(name, proto, ...) \ +static inline void trace_ ## name(proto) {} +#endif + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM mac80211 + +#define MAXNAME 32 +#define LOCAL_ENTRY __array(char, wiphy_name, 32) +#define LOCAL_ASSIGN strlcpy(__entry->wiphy_name, wiphy_name(local->hw.wiphy), MAXNAME) +#define LOCAL_PR_FMT "%s" +#define LOCAL_PR_ARG __entry->wiphy_name + +#define STA_ENTRY __array(char, sta_addr, ETH_ALEN) +#define STA_ASSIGN (sta ? memcpy(__entry->sta_addr, sta->addr, ETH_ALEN) : memset(__entry->sta_addr, 0, ETH_ALEN)) +#define STA_PR_FMT " sta:%pM" +#define STA_PR_ARG __entry->sta_addr + +#define VIF_ENTRY __field(enum nl80211_iftype, vif_type) __field(void *, sdata) \ + __string(vif_name, sdata->dev ? sdata->dev->name : "") +#define VIF_ASSIGN __entry->vif_type = sdata->vif.type; __entry->sdata = sdata; \ + __assign_str(vif_name, sdata->dev ? sdata->dev->name : "") +#define VIF_PR_FMT " vif:%s(%d)" +#define VIF_PR_ARG __get_str(vif_name), __entry->vif_type + +TRACE_EVENT(drv_start, + TP_PROTO(struct ieee80211_local *local, int ret), + + TP_ARGS(local, ret), + + TP_STRUCT__entry( + LOCAL_ENTRY + __field(int, ret) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + __entry->ret = ret; + ), + + TP_printk( + LOCAL_PR_FMT, LOCAL_PR_ARG + ) +); + +TRACE_EVENT(drv_stop, + TP_PROTO(struct ieee80211_local *local), + + TP_ARGS(local), + + TP_STRUCT__entry( + LOCAL_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + ), + + TP_printk( + LOCAL_PR_FMT, LOCAL_PR_ARG + ) +); + +TRACE_EVENT(drv_add_interface, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + int ret), + + TP_ARGS(local, sdata, ret), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + __array(char, addr, 6) + __field(int, ret) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + memcpy(__entry->addr, sdata->vif.addr, 6); + __entry->ret = ret; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT " addr:%pM ret:%d", + LOCAL_PR_ARG, VIF_PR_ARG, __entry->addr, __entry->ret + ) +); + +TRACE_EVENT(drv_remove_interface, + TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata), + + TP_ARGS(local, sdata), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + __array(char, addr, 6) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + memcpy(__entry->addr, sdata->vif.addr, 6); + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT " addr:%pM", + LOCAL_PR_ARG, VIF_PR_ARG, __entry->addr + ) +); + +TRACE_EVENT(drv_config, + TP_PROTO(struct ieee80211_local *local, + u32 changed, + int ret), + + TP_ARGS(local, changed, ret), + + TP_STRUCT__entry( + LOCAL_ENTRY + __field(u32, changed) + __field(int, ret) + __field(u32, flags) + __field(int, power_level) + __field(int, dynamic_ps_timeout) + __field(int, max_sleep_period) + __field(u16, listen_interval) + __field(u8, long_frame_max_tx_count) + __field(u8, short_frame_max_tx_count) + __field(int, center_freq) + __field(int, channel_type) + __field(int, smps) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + __entry->changed = changed; + __entry->ret = ret; + __entry->flags = local->hw.conf.flags; + __entry->power_level = local->hw.conf.power_level; + __entry->dynamic_ps_timeout = local->hw.conf.dynamic_ps_timeout; + __entry->max_sleep_period = local->hw.conf.max_sleep_period; + __entry->listen_interval = local->hw.conf.listen_interval; + __entry->long_frame_max_tx_count = local->hw.conf.long_frame_max_tx_count; + __entry->short_frame_max_tx_count = local->hw.conf.short_frame_max_tx_count; + __entry->center_freq = local->hw.conf.channel->center_freq; + __entry->channel_type = local->hw.conf.channel_type; + __entry->smps = local->hw.conf.smps_mode; + ), + + TP_printk( + LOCAL_PR_FMT " ch:%#x freq:%d ret:%d", + LOCAL_PR_ARG, __entry->changed, __entry->center_freq, __entry->ret + ) +); + +TRACE_EVENT(drv_bss_info_changed, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_bss_conf *info, + u32 changed), + + TP_ARGS(local, sdata, info, changed), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + __field(bool, assoc) + __field(u16, aid) + __field(bool, cts) + __field(bool, shortpre) + __field(bool, shortslot) + __field(u8, dtimper) + __field(u16, bcnint) + __field(u16, assoc_cap) + __field(u64, timestamp) + __field(u32, basic_rates) + __field(u32, changed) + __field(bool, enable_beacon) + __field(u16, ht_operation_mode) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + __entry->changed = changed; + __entry->aid = info->aid; + __entry->assoc = info->assoc; + __entry->shortpre = info->use_short_preamble; + __entry->cts = info->use_cts_prot; + __entry->shortslot = info->use_short_slot; + __entry->dtimper = info->dtim_period; + __entry->bcnint = info->beacon_int; + __entry->assoc_cap = info->assoc_capability; + __entry->timestamp = info->timestamp; + __entry->basic_rates = info->basic_rates; + __entry->enable_beacon = info->enable_beacon; + __entry->ht_operation_mode = info->ht_operation_mode; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT " changed:%#x", + LOCAL_PR_ARG, VIF_PR_ARG, __entry->changed + ) +); + +TRACE_EVENT(drv_prepare_multicast, + TP_PROTO(struct ieee80211_local *local, int mc_count, u64 ret), + + TP_ARGS(local, mc_count, ret), + + TP_STRUCT__entry( + LOCAL_ENTRY + __field(int, mc_count) + __field(u64, ret) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + __entry->mc_count = mc_count; + __entry->ret = ret; + ), + + TP_printk( + LOCAL_PR_FMT " prepare mc (%d): %llx", + LOCAL_PR_ARG, __entry->mc_count, + (unsigned long long) __entry->ret + ) +); + +TRACE_EVENT(drv_configure_filter, + TP_PROTO(struct ieee80211_local *local, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast), + + TP_ARGS(local, changed_flags, total_flags, multicast), + + TP_STRUCT__entry( + LOCAL_ENTRY + __field(unsigned int, changed) + __field(unsigned int, total) + __field(u64, multicast) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + __entry->changed = changed_flags; + __entry->total = *total_flags; + __entry->multicast = multicast; + ), + + TP_printk( + LOCAL_PR_FMT " changed:%#x total:%#x", + LOCAL_PR_ARG, __entry->changed, __entry->total + ) +); + +TRACE_EVENT(drv_set_tim, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sta *sta, bool set, int ret), + + TP_ARGS(local, sta, set, ret), + + TP_STRUCT__entry( + LOCAL_ENTRY + STA_ENTRY + __field(bool, set) + __field(int, ret) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + STA_ASSIGN; + __entry->set = set; + __entry->ret = ret; + ), + + TP_printk( + LOCAL_PR_FMT STA_PR_FMT " set:%d ret:%d", + LOCAL_PR_ARG, STA_PR_FMT, __entry->set, __entry->ret + ) +); + +TRACE_EVENT(drv_set_key, + TP_PROTO(struct ieee80211_local *local, + enum set_key_cmd cmd, struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, int ret), + + TP_ARGS(local, cmd, sdata, sta, key, ret), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + STA_ENTRY + __field(enum ieee80211_key_alg, alg) + __field(u8, hw_key_idx) + __field(u8, flags) + __field(s8, keyidx) + __field(int, ret) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + STA_ASSIGN; + __entry->alg = key->alg; + __entry->flags = key->flags; + __entry->keyidx = key->keyidx; + __entry->hw_key_idx = key->hw_key_idx; + __entry->ret = ret; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " ret:%d", + LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->ret + ) +); + +TRACE_EVENT(drv_update_tkip_key, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_key_conf *conf, + struct ieee80211_sta *sta, u32 iv32), + + TP_ARGS(local, sdata, conf, sta, iv32), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + STA_ENTRY + __field(u32, iv32) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + STA_ASSIGN; + __entry->iv32 = iv32; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " iv32:%#x", + LOCAL_PR_ARG,VIF_PR_ARG,STA_PR_ARG, __entry->iv32 + ) +); + +TRACE_EVENT(drv_hw_scan, + TP_PROTO(struct ieee80211_local *local, + struct cfg80211_scan_request *req, int ret), + + TP_ARGS(local, req, ret), + + TP_STRUCT__entry( + LOCAL_ENTRY + __field(int, ret) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + __entry->ret = ret; + ), + + TP_printk( + LOCAL_PR_FMT " ret:%d", + LOCAL_PR_ARG, __entry->ret + ) +); + +TRACE_EVENT(drv_sw_scan_start, + TP_PROTO(struct ieee80211_local *local), + + TP_ARGS(local), + + TP_STRUCT__entry( + LOCAL_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + ), + + TP_printk( + LOCAL_PR_FMT, LOCAL_PR_ARG + ) +); + +TRACE_EVENT(drv_sw_scan_complete, + TP_PROTO(struct ieee80211_local *local), + + TP_ARGS(local), + + TP_STRUCT__entry( + LOCAL_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + ), + + TP_printk( + LOCAL_PR_FMT, LOCAL_PR_ARG + ) +); + +TRACE_EVENT(drv_get_stats, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_low_level_stats *stats, + int ret), + + TP_ARGS(local, stats, ret), + + TP_STRUCT__entry( + LOCAL_ENTRY + __field(int, ret) + __field(unsigned int, ackfail) + __field(unsigned int, rtsfail) + __field(unsigned int, fcserr) + __field(unsigned int, rtssucc) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + __entry->ret = ret; + __entry->ackfail = stats->dot11ACKFailureCount; + __entry->rtsfail = stats->dot11RTSFailureCount; + __entry->fcserr = stats->dot11FCSErrorCount; + __entry->rtssucc = stats->dot11RTSSuccessCount; + ), + + TP_printk( + LOCAL_PR_FMT " ret:%d", + LOCAL_PR_ARG, __entry->ret + ) +); + +TRACE_EVENT(drv_get_tkip_seq, + TP_PROTO(struct ieee80211_local *local, + u8 hw_key_idx, u32 *iv32, u16 *iv16), + + TP_ARGS(local, hw_key_idx, iv32, iv16), + + TP_STRUCT__entry( + LOCAL_ENTRY + __field(u8, hw_key_idx) + __field(u32, iv32) + __field(u16, iv16) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + __entry->hw_key_idx = hw_key_idx; + __entry->iv32 = *iv32; + __entry->iv16 = *iv16; + ), + + TP_printk( + LOCAL_PR_FMT, LOCAL_PR_ARG + ) +); + +TRACE_EVENT(drv_set_rts_threshold, + TP_PROTO(struct ieee80211_local *local, u32 value, int ret), + + TP_ARGS(local, value, ret), + + TP_STRUCT__entry( + LOCAL_ENTRY + __field(u32, value) + __field(int, ret) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + __entry->ret = ret; + __entry->value = value; + ), + + TP_printk( + LOCAL_PR_FMT " value:%d ret:%d", + LOCAL_PR_ARG, __entry->value, __entry->ret + ) +); + +TRACE_EVENT(drv_set_coverage_class, + TP_PROTO(struct ieee80211_local *local, u8 value, int ret), + + TP_ARGS(local, value, ret), + + TP_STRUCT__entry( + LOCAL_ENTRY + __field(u8, value) + __field(int, ret) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + __entry->ret = ret; + __entry->value = value; + ), + + TP_printk( + LOCAL_PR_FMT " value:%d ret:%d", + LOCAL_PR_ARG, __entry->value, __entry->ret + ) +); + +TRACE_EVENT(drv_sta_notify, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + enum sta_notify_cmd cmd, + struct ieee80211_sta *sta), + + TP_ARGS(local, sdata, cmd, sta), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + STA_ENTRY + __field(u32, cmd) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + STA_ASSIGN; + __entry->cmd = cmd; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " cmd:%d", + LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->cmd + ) +); + +TRACE_EVENT(drv_sta_add, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, int ret), + + TP_ARGS(local, sdata, sta, ret), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + STA_ENTRY + __field(int, ret) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + STA_ASSIGN; + __entry->ret = ret; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " ret:%d", + LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->ret + ) +); + +TRACE_EVENT(drv_sta_remove, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta), + + TP_ARGS(local, sdata, sta), + + TP_STRUCT__entry( + LOCAL_ENTRY + VIF_ENTRY + STA_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + STA_ASSIGN; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT, + LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG + ) +); + +TRACE_EVENT(drv_conf_tx, + TP_PROTO(struct ieee80211_local *local, u16 queue, + const struct ieee80211_tx_queue_params *params, + int ret), + + TP_ARGS(local, queue, params, ret), + + TP_STRUCT__entry( + LOCAL_ENTRY + __field(u16, queue) + __field(u16, txop) + __field(u16, cw_min) + __field(u16, cw_max) + __field(u8, aifs) + __field(int, ret) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + __entry->queue = queue; + __entry->ret = ret; + __entry->txop = params->txop; + __entry->cw_max = params->cw_max; + __entry->cw_min = params->cw_min; + __entry->aifs = params->aifs; + ), + + TP_printk( + LOCAL_PR_FMT " queue:%d ret:%d", + LOCAL_PR_ARG, __entry->queue, __entry->ret + ) +); + +TRACE_EVENT(drv_get_tsf, + TP_PROTO(struct ieee80211_local *local, u64 ret), + + TP_ARGS(local, ret), + + TP_STRUCT__entry( + LOCAL_ENTRY + __field(u64, ret) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + __entry->ret = ret; + ), + + TP_printk( + LOCAL_PR_FMT " ret:%llu", + LOCAL_PR_ARG, (unsigned long long)__entry->ret + ) +); + +TRACE_EVENT(drv_set_tsf, + TP_PROTO(struct ieee80211_local *local, u64 tsf), + + TP_ARGS(local, tsf), + + TP_STRUCT__entry( + LOCAL_ENTRY + __field(u64, tsf) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + __entry->tsf = tsf; + ), + + TP_printk( + LOCAL_PR_FMT " tsf:%llu", + LOCAL_PR_ARG, (unsigned long long)__entry->tsf + ) +); + +TRACE_EVENT(drv_reset_tsf, + TP_PROTO(struct ieee80211_local *local), + + TP_ARGS(local), + + TP_STRUCT__entry( + LOCAL_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + ), + + TP_printk( + LOCAL_PR_FMT, LOCAL_PR_ARG + ) +); + +TRACE_EVENT(drv_tx_last_beacon, + TP_PROTO(struct ieee80211_local *local, int ret), + + TP_ARGS(local, ret), + + TP_STRUCT__entry( + LOCAL_ENTRY + __field(int, ret) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + __entry->ret = ret; + ), + + TP_printk( + LOCAL_PR_FMT " ret:%d", + LOCAL_PR_ARG, __entry->ret + ) +); + +TRACE_EVENT(drv_ampdu_action, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, + u16 *ssn, int ret), + + TP_ARGS(local, sdata, action, sta, tid, ssn, ret), + + TP_STRUCT__entry( + LOCAL_ENTRY + STA_ENTRY + __field(u32, action) + __field(u16, tid) + __field(u16, ssn) + __field(int, ret) + VIF_ENTRY + ), + + TP_fast_assign( + LOCAL_ASSIGN; + VIF_ASSIGN; + STA_ASSIGN; + __entry->ret = ret; + __entry->action = action; + __entry->tid = tid; + __entry->ssn = ssn ? *ssn : 0; + ), + + TP_printk( + LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d ret:%d", + LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, __entry->tid, __entry->ret + ) +); + +TRACE_EVENT(drv_flush, + TP_PROTO(struct ieee80211_local *local, bool drop), + + TP_ARGS(local, drop), + + TP_STRUCT__entry( + LOCAL_ENTRY + __field(bool, drop) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + __entry->drop = drop; + ), + + TP_printk( + LOCAL_PR_FMT " drop:%d", + LOCAL_PR_ARG, __entry->drop + ) +); +#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE driver-trace +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,30)) +#include +#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,30)) */ diff --git a/net/mac80211/event.c b/net/mac80211/event.c new file mode 100644 index 0000000..01ae759 --- /dev/null +++ b/net/mac80211/event.c @@ -0,0 +1,27 @@ +/* + * Copyright 2007 Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * mac80211 - events + */ +#include +#include "ieee80211_i.h" + +/* + * Indicate a failed Michael MIC to userspace. If the caller knows the TSC of + * the frame that generated the MIC failure (i.e., if it was provided by the + * driver or is still in the frame), it should provide that information. + */ +void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx, + struct ieee80211_hdr *hdr, const u8 *tsc, + gfp_t gfp) +{ + cfg80211_michael_mic_failure(sdata->dev, hdr->addr2, + (hdr->addr1[0] & 0x01) ? + NL80211_KEYTYPE_GROUP : + NL80211_KEYTYPE_PAIRWISE, + keyidx, tsc, gfp); +} diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c new file mode 100644 index 0000000..bb677a7 --- /dev/null +++ b/net/mac80211/ht.c @@ -0,0 +1,234 @@ +/* + * HT handling + * + * Copyright 2003, Jouni Malinen + * Copyright 2002-2005, Instant802 Networks, Inc. + * Copyright 2005-2006, Devicescape Software, Inc. + * Copyright 2006-2007 Jiri Benc + * Copyright 2007, Michael Wu + * Copyright 2007-2008, Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include "ieee80211_i.h" +#include "rate.h" + +void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband, + struct ieee80211_ht_cap *ht_cap_ie, + struct ieee80211_sta_ht_cap *ht_cap) +{ + u8 ampdu_info, tx_mcs_set_cap; + int i, max_tx_streams; + + BUG_ON(!ht_cap); + + memset(ht_cap, 0, sizeof(*ht_cap)); + + if (!ht_cap_ie) + return; + + ht_cap->ht_supported = true; + + /* + * The bits listed in this expression should be + * the same for the peer and us, if the station + * advertises more then we can't use those thus + * we mask them out. + */ + ht_cap->cap = le16_to_cpu(ht_cap_ie->cap_info) & + (sband->ht_cap.cap | + ~(IEEE80211_HT_CAP_LDPC_CODING | + IEEE80211_HT_CAP_SUP_WIDTH_20_40 | + IEEE80211_HT_CAP_GRN_FLD | + IEEE80211_HT_CAP_SGI_20 | + IEEE80211_HT_CAP_SGI_40 | + IEEE80211_HT_CAP_DSSSCCK40)); + /* + * The STBC bits are asymmetric -- if we don't have + * TX then mask out the peer's RX and vice versa. + */ + if (!(sband->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC)) + ht_cap->cap &= ~IEEE80211_HT_CAP_RX_STBC; + if (!(sband->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)) + ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC; + + ampdu_info = ht_cap_ie->ampdu_params_info; + ht_cap->ampdu_factor = + ampdu_info & IEEE80211_HT_AMPDU_PARM_FACTOR; + ht_cap->ampdu_density = + (ampdu_info & IEEE80211_HT_AMPDU_PARM_DENSITY) >> 2; + + /* own MCS TX capabilities */ + tx_mcs_set_cap = sband->ht_cap.mcs.tx_params; + + /* can we TX with MCS rates? */ + if (!(tx_mcs_set_cap & IEEE80211_HT_MCS_TX_DEFINED)) + return; + + /* Counting from 0, therefore +1 */ + if (tx_mcs_set_cap & IEEE80211_HT_MCS_TX_RX_DIFF) + max_tx_streams = + ((tx_mcs_set_cap & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK) + >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT) + 1; + else + max_tx_streams = IEEE80211_HT_MCS_TX_MAX_STREAMS; + + /* + * 802.11n D5.0 20.3.5 / 20.6 says: + * - indices 0 to 7 and 32 are single spatial stream + * - 8 to 31 are multiple spatial streams using equal modulation + * [8..15 for two streams, 16..23 for three and 24..31 for four] + * - remainder are multiple spatial streams using unequal modulation + */ + for (i = 0; i < max_tx_streams; i++) + ht_cap->mcs.rx_mask[i] = + sband->ht_cap.mcs.rx_mask[i] & ht_cap_ie->mcs.rx_mask[i]; + + if (tx_mcs_set_cap & IEEE80211_HT_MCS_TX_UNEQUAL_MODULATION) + for (i = IEEE80211_HT_MCS_UNEQUAL_MODULATION_START_BYTE; + i < IEEE80211_HT_MCS_MASK_LEN; i++) + ht_cap->mcs.rx_mask[i] = + sband->ht_cap.mcs.rx_mask[i] & + ht_cap_ie->mcs.rx_mask[i]; + + /* handle MCS rate 32 too */ + if (sband->ht_cap.mcs.rx_mask[32/8] & ht_cap_ie->mcs.rx_mask[32/8] & 1) + ht_cap->mcs.rx_mask[32/8] |= 1; +} + +void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta) +{ + int i; + + for (i = 0; i < STA_TID_NUM; i++) { + __ieee80211_stop_tx_ba_session(sta, i, WLAN_BACK_INITIATOR); + __ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT, + WLAN_REASON_QSTA_LEAVE_QBSS); + } +} + +void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, + const u8 *da, u16 tid, + u16 initiator, u16 reason_code) +{ + struct ieee80211_local *local = sdata->local; + struct sk_buff *skb; + struct ieee80211_mgmt *mgmt; + u16 params; + + skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); + + if (!skb) { + printk(KERN_ERR "%s: failed to allocate buffer " + "for delba frame\n", sdata->name); + return; + } + + skb_reserve(skb, local->hw.extra_tx_headroom); + mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); + memset(mgmt, 0, 24); + memcpy(mgmt->da, da, ETH_ALEN); + memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); + if (sdata->vif.type == NL80211_IFTYPE_AP || + sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); + else if (sdata->vif.type == NL80211_IFTYPE_STATION) + memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN); + + mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION); + + skb_put(skb, 1 + sizeof(mgmt->u.action.u.delba)); + + mgmt->u.action.category = WLAN_CATEGORY_BACK; + mgmt->u.action.u.delba.action_code = WLAN_ACTION_DELBA; + params = (u16)(initiator << 11); /* bit 11 initiator */ + params |= (u16)(tid << 12); /* bit 15:12 TID number */ + + mgmt->u.action.u.delba.params = cpu_to_le16(params); + mgmt->u.action.u.delba.reason_code = cpu_to_le16(reason_code); + + ieee80211_tx_skb(sdata, skb); +} + +void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, + struct ieee80211_mgmt *mgmt, size_t len) +{ + u16 tid, params; + u16 initiator; + + params = le16_to_cpu(mgmt->u.action.u.delba.params); + tid = (params & IEEE80211_DELBA_PARAM_TID_MASK) >> 12; + initiator = (params & IEEE80211_DELBA_PARAM_INITIATOR_MASK) >> 11; + +#ifdef CONFIG_MAC80211_HT_DEBUG + if (net_ratelimit()) + printk(KERN_DEBUG "delba from %pM (%s) tid %d reason code %d\n", + mgmt->sa, initiator ? "initiator" : "recipient", tid, + le16_to_cpu(mgmt->u.action.u.delba.reason_code)); +#endif /* CONFIG_MAC80211_HT_DEBUG */ + + if (initiator == WLAN_BACK_INITIATOR) + ieee80211_sta_stop_rx_ba_session(sdata, sta->sta.addr, tid, + WLAN_BACK_INITIATOR, 0); + else { /* WLAN_BACK_RECIPIENT */ + spin_lock_bh(&sta->lock); + if (sta->ampdu_mlme.tid_state_tx[tid] & HT_ADDBA_REQUESTED_MSK) + ___ieee80211_stop_tx_ba_session(sta, tid, + WLAN_BACK_RECIPIENT); + spin_unlock_bh(&sta->lock); + } +} + +int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata, + enum ieee80211_smps_mode smps, const u8 *da, + const u8 *bssid) +{ + struct ieee80211_local *local = sdata->local; + struct sk_buff *skb; + struct ieee80211_mgmt *action_frame; + + /* 27 = header + category + action + smps mode */ + skb = dev_alloc_skb(27 + local->hw.extra_tx_headroom); + if (!skb) + return -ENOMEM; + + skb_reserve(skb, local->hw.extra_tx_headroom); + action_frame = (void *)skb_put(skb, 27); + memcpy(action_frame->da, da, ETH_ALEN); + memcpy(action_frame->sa, sdata->dev->dev_addr, ETH_ALEN); + memcpy(action_frame->bssid, bssid, ETH_ALEN); + action_frame->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION); + action_frame->u.action.category = WLAN_CATEGORY_HT; + action_frame->u.action.u.ht_smps.action = WLAN_HT_ACTION_SMPS; + switch (smps) { + case IEEE80211_SMPS_AUTOMATIC: + case IEEE80211_SMPS_NUM_MODES: + WARN_ON(1); + case IEEE80211_SMPS_OFF: + action_frame->u.action.u.ht_smps.smps_control = + WLAN_HT_SMPS_CONTROL_DISABLED; + break; + case IEEE80211_SMPS_STATIC: + action_frame->u.action.u.ht_smps.smps_control = + WLAN_HT_SMPS_CONTROL_STATIC; + break; + case IEEE80211_SMPS_DYNAMIC: + action_frame->u.action.u.ht_smps.smps_control = + WLAN_HT_SMPS_CONTROL_DYNAMIC; + break; + } + + /* we'll do more on status of this frame */ + IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; + ieee80211_tx_skb(sdata, skb); + + return 0; +} diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c new file mode 100644 index 0000000..f3e9424 --- /dev/null +++ b/net/mac80211/ibss.c @@ -0,0 +1,964 @@ +/* + * IBSS mode implementation + * Copyright 2003-2008, Jouni Malinen + * Copyright 2004, Instant802 Networks, Inc. + * Copyright 2005, Devicescape Software, Inc. + * Copyright 2006-2007 Jiri Benc + * Copyright 2007, Michael Wu + * Copyright 2009, Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ieee80211_i.h" +#include "driver-ops.h" +#include "rate.h" + +#define IEEE80211_SCAN_INTERVAL (2 * HZ) +#define IEEE80211_SCAN_INTERVAL_SLOW (15 * HZ) +#define IEEE80211_IBSS_JOIN_TIMEOUT (7 * HZ) + +#define IEEE80211_IBSS_MERGE_INTERVAL (30 * HZ) +#define IEEE80211_IBSS_MERGE_DELAY 0x400000 +#define IEEE80211_IBSS_INACTIVITY_LIMIT (60 * HZ) + +#define IEEE80211_IBSS_MAX_STA_ENTRIES 128 + + +static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, + size_t len) +{ + u16 auth_alg, auth_transaction, status_code; + + if (len < 24 + 6) + return; + + auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); + auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); + status_code = le16_to_cpu(mgmt->u.auth.status_code); + + /* + * IEEE 802.11 standard does not require authentication in IBSS + * networks and most implementations do not seem to use it. + * However, try to reply to authentication attempts if someone + * has actually implemented this. + */ + if (auth_alg == WLAN_AUTH_OPEN && auth_transaction == 1) + ieee80211_send_auth(sdata, 2, WLAN_AUTH_OPEN, NULL, 0, + sdata->u.ibss.bssid, NULL, 0, 0); +} + +static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, + const u8 *bssid, const int beacon_int, + struct ieee80211_channel *chan, + const u32 basic_rates, + const u16 capability, u64 tsf) +{ + struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + struct ieee80211_local *local = sdata->local; + int rates, i; + struct sk_buff *skb; + struct ieee80211_mgmt *mgmt; + u8 *pos; + struct ieee80211_supported_band *sband; + struct cfg80211_bss *bss; + u32 bss_change; + u8 supp_rates[IEEE80211_MAX_SUPP_RATES]; + + /* Reset own TSF to allow time synchronization work. */ + drv_reset_tsf(local); + + skb = ifibss->skb; + rcu_assign_pointer(ifibss->presp, NULL); + synchronize_rcu(); + skb->data = skb->head; + skb->len = 0; + skb_reset_tail_pointer(skb); + skb_reserve(skb, sdata->local->hw.extra_tx_headroom); + + if (memcmp(ifibss->bssid, bssid, ETH_ALEN)) + sta_info_flush(sdata->local, sdata); + + memcpy(ifibss->bssid, bssid, ETH_ALEN); + + sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0; + + local->oper_channel = chan; + local->oper_channel_type = NL80211_CHAN_NO_HT; + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); + + sband = local->hw.wiphy->bands[chan->band]; + + /* build supported rates array */ + pos = supp_rates; + for (i = 0; i < sband->n_bitrates; i++) { + int rate = sband->bitrates[i].bitrate; + u8 basic = 0; + if (basic_rates & BIT(i)) + basic = 0x80; + *pos++ = basic | (u8) (rate / 5); + } + + /* Build IBSS probe response */ + mgmt = (void *) skb_put(skb, 24 + sizeof(mgmt->u.beacon)); + memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon)); + mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_PROBE_RESP); + memset(mgmt->da, 0xff, ETH_ALEN); + memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); + memcpy(mgmt->bssid, ifibss->bssid, ETH_ALEN); + mgmt->u.beacon.beacon_int = cpu_to_le16(beacon_int); + mgmt->u.beacon.timestamp = cpu_to_le64(tsf); + mgmt->u.beacon.capab_info = cpu_to_le16(capability); + + pos = skb_put(skb, 2 + ifibss->ssid_len); + *pos++ = WLAN_EID_SSID; + *pos++ = ifibss->ssid_len; + memcpy(pos, ifibss->ssid, ifibss->ssid_len); + + rates = sband->n_bitrates; + if (rates > 8) + rates = 8; + pos = skb_put(skb, 2 + rates); + *pos++ = WLAN_EID_SUPP_RATES; + *pos++ = rates; + memcpy(pos, supp_rates, rates); + + if (sband->band == IEEE80211_BAND_2GHZ) { + pos = skb_put(skb, 2 + 1); + *pos++ = WLAN_EID_DS_PARAMS; + *pos++ = 1; + *pos++ = ieee80211_frequency_to_channel(chan->center_freq); + } + + pos = skb_put(skb, 2 + 2); + *pos++ = WLAN_EID_IBSS_PARAMS; + *pos++ = 2; + /* FIX: set ATIM window based on scan results */ + *pos++ = 0; + *pos++ = 0; + + if (sband->n_bitrates > 8) { + rates = sband->n_bitrates - 8; + pos = skb_put(skb, 2 + rates); + *pos++ = WLAN_EID_EXT_SUPP_RATES; + *pos++ = rates; + memcpy(pos, &supp_rates[8], rates); + } + + if (ifibss->ie_len) + memcpy(skb_put(skb, ifibss->ie_len), + ifibss->ie, ifibss->ie_len); + + rcu_assign_pointer(ifibss->presp, skb); + + sdata->vif.bss_conf.beacon_int = beacon_int; + bss_change = BSS_CHANGED_BEACON_INT; + bss_change |= ieee80211_reset_erp_info(sdata); + bss_change |= BSS_CHANGED_BSSID; + bss_change |= BSS_CHANGED_BEACON; + bss_change |= BSS_CHANGED_BEACON_ENABLED; + ieee80211_bss_info_change_notify(sdata, bss_change); + + ieee80211_sta_def_wmm_params(sdata, sband->n_bitrates, supp_rates); + + ifibss->state = IEEE80211_IBSS_MLME_JOINED; + mod_timer(&ifibss->timer, + round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL)); + + bss = cfg80211_inform_bss_frame(local->hw.wiphy, local->hw.conf.channel, + mgmt, skb->len, 0, GFP_KERNEL); + cfg80211_put_bss(bss); + cfg80211_ibss_joined(sdata->dev, ifibss->bssid, GFP_KERNEL); +} + +static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, + struct ieee80211_bss *bss) +{ + struct cfg80211_bss *cbss = + container_of((void *)bss, struct cfg80211_bss, priv); + struct ieee80211_supported_band *sband; + u32 basic_rates; + int i, j; + u16 beacon_int = cbss->beacon_interval; + + if (beacon_int < 10) + beacon_int = 10; + + sband = sdata->local->hw.wiphy->bands[cbss->channel->band]; + + basic_rates = 0; + + for (i = 0; i < bss->supp_rates_len; i++) { + int rate = (bss->supp_rates[i] & 0x7f) * 5; + bool is_basic = !!(bss->supp_rates[i] & 0x80); + + for (j = 0; j < sband->n_bitrates; j++) { + if (sband->bitrates[j].bitrate == rate) { + if (is_basic) + basic_rates |= BIT(j); + break; + } + } + } + + __ieee80211_sta_join_ibss(sdata, cbss->bssid, + beacon_int, + cbss->channel, + basic_rates, + cbss->capability, + cbss->tsf); +} + +static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, + size_t len, + struct ieee80211_rx_status *rx_status, + struct ieee802_11_elems *elems, + bool beacon) +{ + struct ieee80211_local *local = sdata->local; + int freq; + struct cfg80211_bss *cbss; + struct ieee80211_bss *bss; + struct sta_info *sta; + struct ieee80211_channel *channel; + u64 beacon_timestamp, rx_timestamp; + u32 supp_rates = 0; + enum ieee80211_band band = rx_status->band; + + if (elems->ds_params && elems->ds_params_len == 1) + freq = ieee80211_channel_to_frequency(elems->ds_params[0]); + else + freq = rx_status->freq; + + channel = ieee80211_get_channel(local->hw.wiphy, freq); + + if (!channel || channel->flags & IEEE80211_CHAN_DISABLED) + return; + + if (sdata->vif.type == NL80211_IFTYPE_ADHOC && elems->supp_rates && + memcmp(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN) == 0) { + supp_rates = ieee80211_sta_get_rates(local, elems, band); + + rcu_read_lock(); + + sta = sta_info_get(sdata, mgmt->sa); + if (sta) { + u32 prev_rates; + + prev_rates = sta->sta.supp_rates[band]; + /* make sure mandatory rates are always added */ + sta->sta.supp_rates[band] = supp_rates | + ieee80211_mandatory_rates(local, band); + +#ifdef CONFIG_MAC80211_IBSS_DEBUG + if (sta->sta.supp_rates[band] != prev_rates) + printk(KERN_DEBUG "%s: updated supp_rates set " + "for %pM based on beacon info (0x%llx | " + "0x%llx -> 0x%llx)\n", + sdata->name, + sta->sta.addr, + (unsigned long long) prev_rates, + (unsigned long long) supp_rates, + (unsigned long long) sta->sta.supp_rates[band]); +#endif + rcu_read_unlock(); + } else { + rcu_read_unlock(); + ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, + supp_rates, GFP_KERNEL); + } + } + + bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, elems, + channel, beacon); + if (!bss) + return; + + cbss = container_of((void *)bss, struct cfg80211_bss, priv); + + /* was just updated in ieee80211_bss_info_update */ + beacon_timestamp = cbss->tsf; + + /* check if we need to merge IBSS */ + + /* we use a fixed BSSID */ + if (sdata->u.ibss.fixed_bssid) + goto put_bss; + + /* not an IBSS */ + if (!(cbss->capability & WLAN_CAPABILITY_IBSS)) + goto put_bss; + + /* different channel */ + if (cbss->channel != local->oper_channel) + goto put_bss; + + /* different SSID */ + if (elems->ssid_len != sdata->u.ibss.ssid_len || + memcmp(elems->ssid, sdata->u.ibss.ssid, + sdata->u.ibss.ssid_len)) + goto put_bss; + + /* same BSSID */ + if (memcmp(cbss->bssid, sdata->u.ibss.bssid, ETH_ALEN) == 0) + goto put_bss; + + if (rx_status->flag & RX_FLAG_TSFT) { + /* + * For correct IBSS merging we need mactime; since mactime is + * defined as the time the first data symbol of the frame hits + * the PHY, and the timestamp of the beacon is defined as "the + * time that the data symbol containing the first bit of the + * timestamp is transmitted to the PHY plus the transmitting + * STA's delays through its local PHY from the MAC-PHY + * interface to its interface with the WM" (802.11 11.1.2) + * - equals the time this bit arrives at the receiver - we have + * to take into account the offset between the two. + * + * E.g. at 1 MBit that means mactime is 192 usec earlier + * (=24 bytes * 8 usecs/byte) than the beacon timestamp. + */ + int rate; + + if (rx_status->flag & RX_FLAG_HT) + rate = 65; /* TODO: HT rates */ + else + rate = local->hw.wiphy->bands[band]-> + bitrates[rx_status->rate_idx].bitrate; + + rx_timestamp = rx_status->mactime + (24 * 8 * 10 / rate); + } else { + /* + * second best option: get current TSF + * (will return -1 if not supported) + */ + rx_timestamp = drv_get_tsf(local); + } + +#ifdef CONFIG_MAC80211_IBSS_DEBUG + printk(KERN_DEBUG "RX beacon SA=%pM BSSID=" + "%pM TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n", + mgmt->sa, mgmt->bssid, + (unsigned long long)rx_timestamp, + (unsigned long long)beacon_timestamp, + (unsigned long long)(rx_timestamp - beacon_timestamp), + jiffies); +#endif + + /* give slow hardware some time to do the TSF sync */ + if (rx_timestamp < IEEE80211_IBSS_MERGE_DELAY) + goto put_bss; + + if (beacon_timestamp > rx_timestamp) { +#ifdef CONFIG_MAC80211_IBSS_DEBUG + printk(KERN_DEBUG "%s: beacon TSF higher than " + "local TSF - IBSS merge with BSSID %pM\n", + sdata->name, mgmt->bssid); +#endif + ieee80211_sta_join_ibss(sdata, bss); + ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, + supp_rates, GFP_KERNEL); + } + + put_bss: + ieee80211_rx_bss_put(local, bss); +} + +/* + * Add a new IBSS station, will also be called by the RX code when, + * in IBSS mode, receiving a frame from a yet-unknown station, hence + * must be callable in atomic context. + */ +struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, + u8 *bssid,u8 *addr, u32 supp_rates, + gfp_t gfp) +{ + struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + int band = local->hw.conf.channel->band; + + /* + * XXX: Consider removing the least recently used entry and + * allow new one to be added. + */ + if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) { + if (net_ratelimit()) + printk(KERN_DEBUG "%s: No room for a new IBSS STA entry %pM\n", + sdata->name, addr); + return NULL; + } + + if (ifibss->state == IEEE80211_IBSS_MLME_SEARCH) + return NULL; + + if (compare_ether_addr(bssid, sdata->u.ibss.bssid)) + return NULL; + +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + printk(KERN_DEBUG "%s: Adding new IBSS station %pM (dev=%s)\n", + wiphy_name(local->hw.wiphy), addr, sdata->name); +#endif + + sta = sta_info_alloc(sdata, addr, gfp); + if (!sta) + return NULL; + + set_sta_flags(sta, WLAN_STA_AUTHORIZED); + + /* make sure mandatory rates are always added */ + sta->sta.supp_rates[band] = supp_rates | + ieee80211_mandatory_rates(local, band); + + rate_control_rate_init(sta); + + /* If it fails, maybe we raced another insertion? */ + if (sta_info_insert(sta)) + return sta_info_get(sdata, addr); + return sta; +} + +static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + int active = 0; + struct sta_info *sta; + + rcu_read_lock(); + + list_for_each_entry_rcu(sta, &local->sta_list, list) { + if (sta->sdata == sdata && + time_after(sta->last_rx + IEEE80211_IBSS_MERGE_INTERVAL, + jiffies)) { + active++; + break; + } + } + + rcu_read_unlock(); + + return active; +} + +/* + * This function is called with state == IEEE80211_IBSS_MLME_JOINED + */ + +static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + + mod_timer(&ifibss->timer, + round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL)); + + ieee80211_sta_expire(sdata, IEEE80211_IBSS_INACTIVITY_LIMIT); + + if (time_before(jiffies, ifibss->last_scan_completed + + IEEE80211_IBSS_MERGE_INTERVAL)) + return; + + if (ieee80211_sta_active_ibss(sdata)) + return; + + if (ifibss->fixed_channel) + return; + + printk(KERN_DEBUG "%s: No active IBSS STAs - trying to scan for other " + "IBSS networks with same SSID (merge)\n", sdata->name); + + ieee80211_request_internal_scan(sdata, ifibss->ssid, ifibss->ssid_len); +} + +static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + struct ieee80211_local *local = sdata->local; + struct ieee80211_supported_band *sband; + u8 bssid[ETH_ALEN]; + u16 capability; + int i; + + if (ifibss->fixed_bssid) { + memcpy(bssid, ifibss->bssid, ETH_ALEN); + } else { + /* Generate random, not broadcast, locally administered BSSID. Mix in + * own MAC address to make sure that devices that do not have proper + * random number generator get different BSSID. */ + get_random_bytes(bssid, ETH_ALEN); + for (i = 0; i < ETH_ALEN; i++) + bssid[i] ^= sdata->vif.addr[i]; + bssid[0] &= ~0x01; + bssid[0] |= 0x02; + } + + printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %pM\n", + sdata->name, bssid); + + sband = local->hw.wiphy->bands[ifibss->channel->band]; + + capability = WLAN_CAPABILITY_IBSS; + + if (ifibss->privacy) + capability |= WLAN_CAPABILITY_PRIVACY; + else + sdata->drop_unencrypted = 0; + + __ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int, + ifibss->channel, 3, /* first two are basic */ + capability, 0); +} + +/* + * This function is called with state == IEEE80211_IBSS_MLME_SEARCH + */ + +static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + struct ieee80211_local *local = sdata->local; + struct cfg80211_bss *cbss; + struct ieee80211_channel *chan = NULL; + const u8 *bssid = NULL; + int active_ibss; + u16 capability; + + active_ibss = ieee80211_sta_active_ibss(sdata); +#ifdef CONFIG_MAC80211_IBSS_DEBUG + printk(KERN_DEBUG "%s: sta_find_ibss (active_ibss=%d)\n", + sdata->name, active_ibss); +#endif /* CONFIG_MAC80211_IBSS_DEBUG */ + + if (active_ibss) + return; + + capability = WLAN_CAPABILITY_IBSS; + if (ifibss->privacy) + capability |= WLAN_CAPABILITY_PRIVACY; + if (ifibss->fixed_bssid) + bssid = ifibss->bssid; + if (ifibss->fixed_channel) + chan = ifibss->channel; + if (!is_zero_ether_addr(ifibss->bssid)) + bssid = ifibss->bssid; + cbss = cfg80211_get_bss(local->hw.wiphy, chan, bssid, + ifibss->ssid, ifibss->ssid_len, + WLAN_CAPABILITY_IBSS | WLAN_CAPABILITY_PRIVACY, + capability); + + if (cbss) { + struct ieee80211_bss *bss; + + bss = (void *)cbss->priv; +#ifdef CONFIG_MAC80211_IBSS_DEBUG + printk(KERN_DEBUG " sta_find_ibss: selected %pM current " + "%pM\n", cbss->bssid, ifibss->bssid); +#endif /* CONFIG_MAC80211_IBSS_DEBUG */ + + printk(KERN_DEBUG "%s: Selected IBSS BSSID %pM" + " based on configured SSID\n", + sdata->name, cbss->bssid); + + ieee80211_sta_join_ibss(sdata, bss); + ieee80211_rx_bss_put(local, bss); + return; + } + +#ifdef CONFIG_MAC80211_IBSS_DEBUG + printk(KERN_DEBUG " did not try to join ibss\n"); +#endif /* CONFIG_MAC80211_IBSS_DEBUG */ + + /* Selected IBSS not found in current scan results - try to scan */ + if (time_after(jiffies, ifibss->last_scan_completed + + IEEE80211_SCAN_INTERVAL)) { + printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to " + "join\n", sdata->name); + + ieee80211_request_internal_scan(sdata, ifibss->ssid, + ifibss->ssid_len); + } else { + int interval = IEEE80211_SCAN_INTERVAL; + + if (time_after(jiffies, ifibss->ibss_join_req + + IEEE80211_IBSS_JOIN_TIMEOUT)) { + if (!(local->oper_channel->flags & IEEE80211_CHAN_NO_IBSS)) { + ieee80211_sta_create_ibss(sdata); + return; + } + printk(KERN_DEBUG "%s: IBSS not allowed on" + " %d MHz\n", sdata->name, + local->hw.conf.channel->center_freq); + + /* No IBSS found - decrease scan interval and continue + * scanning. */ + interval = IEEE80211_SCAN_INTERVAL_SLOW; + } + + mod_timer(&ifibss->timer, + round_jiffies(jiffies + interval)); + } +} + +static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, + size_t len) +{ + struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + struct ieee80211_local *local = sdata->local; + int tx_last_beacon; + struct sk_buff *skb; + struct ieee80211_mgmt *resp; + u8 *pos, *end; + + if (ifibss->state != IEEE80211_IBSS_MLME_JOINED || + len < 24 + 2 || !ifibss->presp) + return; + + tx_last_beacon = drv_tx_last_beacon(local); + +#ifdef CONFIG_MAC80211_IBSS_DEBUG + printk(KERN_DEBUG "%s: RX ProbeReq SA=%pM DA=%pM BSSID=%pM" + " (tx_last_beacon=%d)\n", + sdata->name, mgmt->sa, mgmt->da, + mgmt->bssid, tx_last_beacon); +#endif /* CONFIG_MAC80211_IBSS_DEBUG */ + + if (!tx_last_beacon) + return; + + if (memcmp(mgmt->bssid, ifibss->bssid, ETH_ALEN) != 0 && + memcmp(mgmt->bssid, "\xff\xff\xff\xff\xff\xff", ETH_ALEN) != 0) + return; + + end = ((u8 *) mgmt) + len; + pos = mgmt->u.probe_req.variable; + if (pos[0] != WLAN_EID_SSID || + pos + 2 + pos[1] > end) { +#ifdef CONFIG_MAC80211_IBSS_DEBUG + printk(KERN_DEBUG "%s: Invalid SSID IE in ProbeReq " + "from %pM\n", + sdata->name, mgmt->sa); +#endif + return; + } + if (pos[1] != 0 && + (pos[1] != ifibss->ssid_len || + memcmp(pos + 2, ifibss->ssid, ifibss->ssid_len))) { + /* Ignore ProbeReq for foreign SSID */ + return; + } + + /* Reply with ProbeResp */ + skb = skb_copy(ifibss->presp, GFP_KERNEL); + if (!skb) + return; + + resp = (struct ieee80211_mgmt *) skb->data; + memcpy(resp->da, mgmt->sa, ETH_ALEN); +#ifdef CONFIG_MAC80211_IBSS_DEBUG + printk(KERN_DEBUG "%s: Sending ProbeResp to %pM\n", + sdata->name, resp->da); +#endif /* CONFIG_MAC80211_IBSS_DEBUG */ + IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; + ieee80211_tx_skb(sdata, skb); +} + +static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, + size_t len, + struct ieee80211_rx_status *rx_status) +{ + size_t baselen; + struct ieee802_11_elems elems; + + if (memcmp(mgmt->da, sdata->vif.addr, ETH_ALEN)) + return; /* ignore ProbeResp to foreign address */ + + baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; + if (baselen > len) + return; + + ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen, + &elems); + + ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false); +} + +static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, + size_t len, + struct ieee80211_rx_status *rx_status) +{ + size_t baselen; + struct ieee802_11_elems elems; + + /* Process beacon from the current BSS */ + baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt; + if (baselen > len) + return; + + ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems); + + ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, true); +} + +static void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_rx_status *rx_status; + struct ieee80211_mgmt *mgmt; + u16 fc; + + rx_status = IEEE80211_SKB_RXCB(skb); + mgmt = (struct ieee80211_mgmt *) skb->data; + fc = le16_to_cpu(mgmt->frame_control); + + switch (fc & IEEE80211_FCTL_STYPE) { + case IEEE80211_STYPE_PROBE_REQ: + ieee80211_rx_mgmt_probe_req(sdata, mgmt, skb->len); + break; + case IEEE80211_STYPE_PROBE_RESP: + ieee80211_rx_mgmt_probe_resp(sdata, mgmt, skb->len, + rx_status); + break; + case IEEE80211_STYPE_BEACON: + ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, + rx_status); + break; + case IEEE80211_STYPE_AUTH: + ieee80211_rx_mgmt_auth_ibss(sdata, mgmt, skb->len); + break; + } + + kfree_skb(skb); +} + +static void ieee80211_ibss_work(struct work_struct *work) +{ + struct ieee80211_sub_if_data *sdata = + container_of(work, struct ieee80211_sub_if_data, u.ibss.work); + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_ibss *ifibss; + struct sk_buff *skb; + + if (WARN_ON(local->suspended)) + return; + + if (!ieee80211_sdata_running(sdata)) + return; + + if (local->scanning) + return; + + if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_ADHOC)) + return; + ifibss = &sdata->u.ibss; + + while ((skb = skb_dequeue(&ifibss->skb_queue))) + ieee80211_ibss_rx_queued_mgmt(sdata, skb); + + if (!test_and_clear_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request)) + return; + + switch (ifibss->state) { + case IEEE80211_IBSS_MLME_SEARCH: + ieee80211_sta_find_ibss(sdata); + break; + case IEEE80211_IBSS_MLME_JOINED: + ieee80211_sta_merge_ibss(sdata); + break; + default: + WARN_ON(1); + break; + } +} + +static void ieee80211_ibss_timer(unsigned long data) +{ + struct ieee80211_sub_if_data *sdata = + (struct ieee80211_sub_if_data *) data; + struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + struct ieee80211_local *local = sdata->local; + + if (local->quiescing) { + ifibss->timer_running = true; + return; + } + + set_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request); + ieee80211_queue_work(&local->hw, &ifibss->work); +} + +#ifdef CONFIG_PM +void ieee80211_ibss_quiesce(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + + cancel_work_sync(&ifibss->work); + if (del_timer_sync(&ifibss->timer)) + ifibss->timer_running = true; +} + +void ieee80211_ibss_restart(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + + if (ifibss->timer_running) { + add_timer(&ifibss->timer); + ifibss->timer_running = false; + } +} +#endif + +void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + + INIT_WORK(&ifibss->work, ieee80211_ibss_work); + setup_timer(&ifibss->timer, ieee80211_ibss_timer, + (unsigned long) sdata); + skb_queue_head_init(&ifibss->skb_queue); +} + +/* scan finished notification */ +void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local) +{ + struct ieee80211_sub_if_data *sdata; + + mutex_lock(&local->iflist_mtx); + list_for_each_entry(sdata, &local->interfaces, list) { + if (!ieee80211_sdata_running(sdata)) + continue; + if (sdata->vif.type != NL80211_IFTYPE_ADHOC) + continue; + if (!sdata->u.ibss.ssid_len) + continue; + sdata->u.ibss.last_scan_completed = jiffies; + mod_timer(&sdata->u.ibss.timer, 0); + } + mutex_unlock(&local->iflist_mtx); +} + +ieee80211_rx_result +ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_mgmt *mgmt; + u16 fc; + + if (skb->len < 24) + return RX_DROP_MONITOR; + + mgmt = (struct ieee80211_mgmt *) skb->data; + fc = le16_to_cpu(mgmt->frame_control); + + switch (fc & IEEE80211_FCTL_STYPE) { + case IEEE80211_STYPE_PROBE_RESP: + case IEEE80211_STYPE_BEACON: + case IEEE80211_STYPE_PROBE_REQ: + case IEEE80211_STYPE_AUTH: + skb_queue_tail(&sdata->u.ibss.skb_queue, skb); + ieee80211_queue_work(&local->hw, &sdata->u.ibss.work); + return RX_QUEUED; + } + + return RX_DROP_MONITOR; +} + +int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, + struct cfg80211_ibss_params *params) +{ + struct sk_buff *skb; + + if (params->bssid) { + memcpy(sdata->u.ibss.bssid, params->bssid, ETH_ALEN); + sdata->u.ibss.fixed_bssid = true; + } else + sdata->u.ibss.fixed_bssid = false; + + sdata->u.ibss.privacy = params->privacy; + + sdata->vif.bss_conf.beacon_int = params->beacon_interval; + + sdata->u.ibss.channel = params->channel; + sdata->u.ibss.fixed_channel = params->channel_fixed; + + if (params->ie) { + sdata->u.ibss.ie = kmemdup(params->ie, params->ie_len, + GFP_KERNEL); + if (sdata->u.ibss.ie) + sdata->u.ibss.ie_len = params->ie_len; + } + + skb = dev_alloc_skb(sdata->local->hw.extra_tx_headroom + + 36 /* bitrates */ + + 34 /* SSID */ + + 3 /* DS params */ + + 4 /* IBSS params */ + + params->ie_len); + if (!skb) + return -ENOMEM; + + sdata->u.ibss.skb = skb; + sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH; + sdata->u.ibss.ibss_join_req = jiffies; + + memcpy(sdata->u.ibss.ssid, params->ssid, IEEE80211_MAX_SSID_LEN); + + /* + * The ssid_len setting below is used to see whether + * we are active, and we need all other settings + * before that may get visible. + */ + mb(); + + sdata->u.ibss.ssid_len = params->ssid_len; + + ieee80211_recalc_idle(sdata->local); + + set_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request); + ieee80211_queue_work(&sdata->local->hw, &sdata->u.ibss.work); + + return 0; +} + +int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) +{ + struct sk_buff *skb; + + del_timer_sync(&sdata->u.ibss.timer); + clear_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request); + cancel_work_sync(&sdata->u.ibss.work); + clear_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request); + + sta_info_flush(sdata->local, sdata); + + /* remove beacon */ + kfree(sdata->u.ibss.ie); + skb = sdata->u.ibss.presp; + rcu_assign_pointer(sdata->u.ibss.presp, NULL); + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); + synchronize_rcu(); + kfree_skb(skb); + + skb_queue_purge(&sdata->u.ibss.skb_queue); + memset(sdata->u.ibss.bssid, 0, ETH_ALEN); + sdata->u.ibss.ssid_len = 0; + + ieee80211_recalc_idle(sdata->local); + + return 0; +} diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h new file mode 100644 index 0000000..241533e --- /dev/null +++ b/net/mac80211/ieee80211_i.h @@ -0,0 +1,1219 @@ +/* + * Copyright 2002-2005, Instant802 Networks, Inc. + * Copyright 2005, Devicescape Software, Inc. + * Copyright 2006-2007 Jiri Benc + * Copyright 2007-2010 Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef IEEE80211_I_H +#define IEEE80211_I_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "key.h" +#include "sta_info.h" + +struct ieee80211_local; + +/* Maximum number of broadcast/multicast frames to buffer when some of the + * associated stations are using power saving. */ +#define AP_MAX_BC_BUFFER 128 + +/* Maximum number of frames buffered to all STAs, including multicast frames. + * Note: increasing this limit increases the potential memory requirement. Each + * frame can be up to about 2 kB long. */ +#define TOTAL_MAX_TX_BUFFER 512 + +/* Required encryption head and tailroom */ +#define IEEE80211_ENCRYPT_HEADROOM 8 +#define IEEE80211_ENCRYPT_TAILROOM 18 + +/* IEEE 802.11 (Ch. 9.5 Defragmentation) requires support for concurrent + * reception of at least three fragmented frames. This limit can be increased + * by changing this define, at the cost of slower frame reassembly and + * increased memory use (about 2 kB of RAM per entry). */ +#define IEEE80211_FRAGMENT_MAX 4 + +/* + * Time after which we ignore scan results and no longer report/use + * them in any way. + */ +#define IEEE80211_SCAN_RESULT_EXPIRE (10 * HZ) + +#define TU_TO_EXP_TIME(x) (jiffies + usecs_to_jiffies((x) * 1024)) + +#define IEEE80211_DEFAULT_UAPSD_QUEUES \ + (IEEE80211_WMM_IE_STA_QOSINFO_AC_BK | \ + IEEE80211_WMM_IE_STA_QOSINFO_AC_BE | \ + IEEE80211_WMM_IE_STA_QOSINFO_AC_VI | \ + IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) + +#define IEEE80211_DEFAULT_MAX_SP_LEN \ + IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL + +struct ieee80211_fragment_entry { + unsigned long first_frag_time; + unsigned int seq; + unsigned int rx_queue; + unsigned int last_frag; + unsigned int extra_len; + struct sk_buff_head skb_list; + int ccmp; /* Whether fragments were encrypted with CCMP */ + u8 last_pn[6]; /* PN of the last fragment if CCMP was used */ +}; + + +struct ieee80211_bss { + /* don't want to look up all the time */ + size_t ssid_len; + u8 ssid[IEEE80211_MAX_SSID_LEN]; + + u8 dtim_period; + + bool wmm_used; + bool uapsd_supported; + + unsigned long last_probe_resp; + +#ifdef CONFIG_MAC80211_MESH + u8 *mesh_id; + size_t mesh_id_len; + u8 *mesh_cfg; +#endif + +#define IEEE80211_MAX_SUPP_RATES 32 + u8 supp_rates[IEEE80211_MAX_SUPP_RATES]; + size_t supp_rates_len; + + /* + * During assocation, we save an ERP value from a probe response so + * that we can feed ERP info to the driver when handling the + * association completes. these fields probably won't be up-to-date + * otherwise, you probably don't want to use them. + */ + bool has_erp_value; + u8 erp_value; +}; + +static inline u8 *bss_mesh_cfg(struct ieee80211_bss *bss) +{ +#ifdef CONFIG_MAC80211_MESH + return bss->mesh_cfg; +#endif + return NULL; +} + +static inline u8 *bss_mesh_id(struct ieee80211_bss *bss) +{ +#ifdef CONFIG_MAC80211_MESH + return bss->mesh_id; +#endif + return NULL; +} + +static inline u8 bss_mesh_id_len(struct ieee80211_bss *bss) +{ +#ifdef CONFIG_MAC80211_MESH + return bss->mesh_id_len; +#endif + return 0; +} + + +typedef unsigned __bitwise__ ieee80211_tx_result; +#define TX_CONTINUE ((__force ieee80211_tx_result) 0u) +#define TX_DROP ((__force ieee80211_tx_result) 1u) +#define TX_QUEUED ((__force ieee80211_tx_result) 2u) + +#define IEEE80211_TX_FRAGMENTED BIT(0) +#define IEEE80211_TX_UNICAST BIT(1) +#define IEEE80211_TX_PS_BUFFERED BIT(2) + +struct ieee80211_tx_data { + struct sk_buff *skb; + struct ieee80211_local *local; + struct ieee80211_sub_if_data *sdata; + struct sta_info *sta; + struct ieee80211_key *key; + + struct ieee80211_channel *channel; + + u16 ethertype; + unsigned int flags; +}; + + +typedef unsigned __bitwise__ ieee80211_rx_result; +#define RX_CONTINUE ((__force ieee80211_rx_result) 0u) +#define RX_DROP_UNUSABLE ((__force ieee80211_rx_result) 1u) +#define RX_DROP_MONITOR ((__force ieee80211_rx_result) 2u) +#define RX_QUEUED ((__force ieee80211_rx_result) 3u) + +#define IEEE80211_RX_IN_SCAN BIT(0) +/* frame is destined to interface currently processed (incl. multicast frames) */ +#define IEEE80211_RX_RA_MATCH BIT(1) +#define IEEE80211_RX_AMSDU BIT(2) +#define IEEE80211_RX_FRAGMENTED BIT(3) +/* only add flags here that do not change with subframes of an aMPDU */ + +struct ieee80211_rx_data { + struct sk_buff *skb; + struct ieee80211_local *local; + struct ieee80211_sub_if_data *sdata; + struct sta_info *sta; + struct ieee80211_key *key; + + unsigned int flags; + int queue; + u32 tkip_iv32; + u16 tkip_iv16; +}; + +struct beacon_data { + u8 *head, *tail; + int head_len, tail_len; + int dtim_period; +}; + +struct ieee80211_if_ap { + struct beacon_data *beacon; + + struct list_head vlans; + + /* yes, this looks ugly, but guarantees that we can later use + * bitmap_empty :) + * NB: don't touch this bitmap, use sta_info_{set,clear}_tim_bit */ + u8 tim[sizeof(unsigned long) * BITS_TO_LONGS(IEEE80211_MAX_AID + 1)]; + struct sk_buff_head ps_bc_buf; + atomic_t num_sta_ps; /* number of stations in PS mode */ + int dtim_count; +}; + +struct ieee80211_if_wds { + struct sta_info *sta; + u8 remote_addr[ETH_ALEN]; +}; + +struct ieee80211_if_vlan { + struct list_head list; + + /* used for all tx if the VLAN is configured to 4-addr mode */ + struct sta_info *sta; +}; + +struct mesh_stats { + __u32 fwded_mcast; /* Mesh forwarded multicast frames */ + __u32 fwded_unicast; /* Mesh forwarded unicast frames */ + __u32 fwded_frames; /* Mesh total forwarded frames */ + __u32 dropped_frames_ttl; /* Not transmitted since mesh_ttl == 0*/ + __u32 dropped_frames_no_route; /* Not transmitted, no route found */ + atomic_t estab_plinks; +}; + +#define PREQ_Q_F_START 0x1 +#define PREQ_Q_F_REFRESH 0x2 +struct mesh_preq_queue { + struct list_head list; + u8 dst[ETH_ALEN]; + u8 flags; +}; + +enum ieee80211_work_type { + IEEE80211_WORK_ABORT, + IEEE80211_WORK_DIRECT_PROBE, + IEEE80211_WORK_AUTH, + IEEE80211_WORK_ASSOC, + IEEE80211_WORK_REMAIN_ON_CHANNEL, +}; + +/** + * enum work_done_result - indicates what to do after work was done + * + * @WORK_DONE_DESTROY: This work item is no longer needed, destroy. + * @WORK_DONE_REQUEUE: This work item was reset to be reused, and + * should be requeued. + */ +enum work_done_result { + WORK_DONE_DESTROY, + WORK_DONE_REQUEUE, +}; + +struct ieee80211_work { + struct list_head list; + + struct rcu_head rcu_head; + + struct ieee80211_sub_if_data *sdata; + + enum work_done_result (*done)(struct ieee80211_work *wk, + struct sk_buff *skb); + + struct ieee80211_channel *chan; + enum nl80211_channel_type chan_type; + + unsigned long timeout; + enum ieee80211_work_type type; + + u8 filter_ta[ETH_ALEN]; + + bool started; + + union { + struct { + int tries; + u16 algorithm, transaction; + u8 ssid[IEEE80211_MAX_SSID_LEN]; + u8 ssid_len; + u8 key[WLAN_KEY_LEN_WEP104]; + u8 key_len, key_idx; + bool privacy; + } probe_auth; + struct { + struct cfg80211_bss *bss; + const u8 *supp_rates; + const u8 *ht_information_ie; + enum ieee80211_smps_mode smps; + int tries; + u16 capability; + u8 prev_bssid[ETH_ALEN]; + u8 ssid[IEEE80211_MAX_SSID_LEN]; + u8 ssid_len; + u8 supp_rates_len; + bool wmm_used, use_11n, uapsd_used; + } assoc; + struct { + u32 duration; + } remain; + }; + + int ie_len; + /* must be last */ + u8 ie[0]; +}; + +/* flags used in struct ieee80211_if_managed.flags */ +enum ieee80211_sta_flags { + IEEE80211_STA_BEACON_POLL = BIT(0), + IEEE80211_STA_CONNECTION_POLL = BIT(1), + IEEE80211_STA_CONTROL_PORT = BIT(2), + IEEE80211_STA_DISABLE_11N = BIT(4), + IEEE80211_STA_CSA_RECEIVED = BIT(5), + IEEE80211_STA_MFP_ENABLED = BIT(6), + IEEE80211_STA_UAPSD_ENABLED = BIT(7), + IEEE80211_STA_NULLFUNC_ACKED = BIT(8), +}; + +struct ieee80211_if_managed { + struct timer_list timer; + struct timer_list conn_mon_timer; + struct timer_list bcn_mon_timer; + struct timer_list chswitch_timer; + struct work_struct work; + struct work_struct monitor_work; + struct work_struct chswitch_work; + struct work_struct beacon_loss_work; + + unsigned long probe_timeout; + int probe_send_count; + + struct mutex mtx; + struct cfg80211_bss *associated; + + u8 bssid[ETH_ALEN]; + + u16 aid; + + struct sk_buff_head skb_queue; + + unsigned long timers_running; /* used for quiesce/restart */ + bool powersave; /* powersave requested for this iface */ + enum ieee80211_smps_mode req_smps, /* requested smps mode */ + ap_smps; /* smps mode AP thinks we're in */ + + unsigned int flags; + + u32 beacon_crc; + + enum { + IEEE80211_MFP_DISABLED, + IEEE80211_MFP_OPTIONAL, + IEEE80211_MFP_REQUIRED + } mfp; /* management frame protection */ + + int wmm_last_param_set; + + u8 use_4addr; +}; + +enum ieee80211_ibss_request { + IEEE80211_IBSS_REQ_RUN = 0, +}; + +struct ieee80211_if_ibss { + struct timer_list timer; + struct work_struct work; + + struct sk_buff_head skb_queue; + + unsigned long request; + unsigned long last_scan_completed; + + bool timer_running; + + bool fixed_bssid; + bool fixed_channel; + bool privacy; + + u8 bssid[ETH_ALEN]; + u8 ssid[IEEE80211_MAX_SSID_LEN]; + u8 ssid_len, ie_len; + u8 *ie; + struct ieee80211_channel *channel; + + unsigned long ibss_join_req; + /* probe response/beacon for IBSS */ + struct sk_buff *presp, *skb; + + enum { + IEEE80211_IBSS_MLME_SEARCH, + IEEE80211_IBSS_MLME_JOINED, + } state; +}; + +struct ieee80211_if_mesh { + struct work_struct work; + struct timer_list housekeeping_timer; + struct timer_list mesh_path_timer; + struct timer_list mesh_path_root_timer; + struct sk_buff_head skb_queue; + + unsigned long timers_running; + + unsigned long wrkq_flags; + + u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN]; + size_t mesh_id_len; + /* Active Path Selection Protocol Identifier */ + u8 mesh_pp_id; + /* Active Path Selection Metric Identifier */ + u8 mesh_pm_id; + /* Congestion Control Mode Identifier */ + u8 mesh_cc_id; + /* Synchronization Protocol Identifier */ + u8 mesh_sp_id; + /* Authentication Protocol Identifier */ + u8 mesh_auth_id; + /* Local mesh Sequence Number */ + u32 sn; + /* Last used PREQ ID */ + u32 preq_id; + atomic_t mpaths; + /* Timestamp of last SN update */ + unsigned long last_sn_update; + /* Timestamp of last SN sent */ + unsigned long last_preq; + struct mesh_rmc *rmc; + spinlock_t mesh_preq_queue_lock; + struct mesh_preq_queue preq_queue; + int preq_queue_len; + struct mesh_stats mshstats; + struct mesh_config mshcfg; + u32 mesh_seqnum; + bool accepting_plinks; +}; + +#ifdef CONFIG_MAC80211_MESH +#define IEEE80211_IFSTA_MESH_CTR_INC(msh, name) \ + do { (msh)->mshstats.name++; } while (0) +#else +#define IEEE80211_IFSTA_MESH_CTR_INC(msh, name) \ + do { } while (0) +#endif + +/** + * enum ieee80211_sub_if_data_flags - virtual interface flags + * + * @IEEE80211_SDATA_ALLMULTI: interface wants all multicast packets + * @IEEE80211_SDATA_PROMISC: interface is promisc + * @IEEE80211_SDATA_OPERATING_GMODE: operating in G-only mode + * @IEEE80211_SDATA_DONT_BRIDGE_PACKETS: bridge packets between + * associated stations and deliver multicast frames both + * back to wireless media and to the local net stack. + */ +enum ieee80211_sub_if_data_flags { + IEEE80211_SDATA_ALLMULTI = BIT(0), + IEEE80211_SDATA_PROMISC = BIT(1), + IEEE80211_SDATA_OPERATING_GMODE = BIT(2), + IEEE80211_SDATA_DONT_BRIDGE_PACKETS = BIT(3), +}; + +struct ieee80211_sub_if_data { + struct list_head list; + + struct wireless_dev wdev; + + /* keys */ + struct list_head key_list; + + struct net_device *dev; + struct ieee80211_local *local; + + unsigned int flags; + + int drop_unencrypted; + + char name[IFNAMSIZ]; + + /* + * keep track of whether the HT opmode (stored in + * vif.bss_info.ht_operation_mode) is valid. + */ + bool ht_opmode_valid; + + /* Fragment table for host-based reassembly */ + struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX]; + unsigned int fragment_next; + +#define NUM_DEFAULT_KEYS 4 +#define NUM_DEFAULT_MGMT_KEYS 2 + struct ieee80211_key *keys[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS]; + struct ieee80211_key *default_key; + struct ieee80211_key *default_mgmt_key; + + u16 sequence_number; + + /* + * AP this belongs to: self in AP mode and + * corresponding AP in VLAN mode, NULL for + * all others (might be needed later in IBSS) + */ + struct ieee80211_if_ap *bss; + + /* bitmap of allowed (non-MCS) rate indexes for rate control */ + u32 rc_rateidx_mask[IEEE80211_NUM_BANDS]; + + union { + struct ieee80211_if_ap ap; + struct ieee80211_if_wds wds; + struct ieee80211_if_vlan vlan; + struct ieee80211_if_managed mgd; + struct ieee80211_if_ibss ibss; +#ifdef CONFIG_MAC80211_MESH + struct ieee80211_if_mesh mesh; +#endif + u32 mntr_flags; + } u; + +#ifdef CONFIG_MAC80211_DEBUGFS + struct { + struct dentry *dir; + struct dentry *default_key; + struct dentry *default_mgmt_key; + } debugfs; +#endif + /* must be last, dynamically sized area in this! */ + struct ieee80211_vif vif; +}; + +static inline +struct ieee80211_sub_if_data *vif_to_sdata(struct ieee80211_vif *p) +{ + return container_of(p, struct ieee80211_sub_if_data, vif); +} + +static inline void +ieee80211_sdata_set_mesh_id(struct ieee80211_sub_if_data *sdata, + u8 mesh_id_len, u8 *mesh_id) +{ +#ifdef CONFIG_MAC80211_MESH + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + ifmsh->mesh_id_len = mesh_id_len; + memcpy(ifmsh->mesh_id, mesh_id, mesh_id_len); +#else + WARN_ON(1); +#endif +} + +enum { + IEEE80211_RX_MSG = 1, + IEEE80211_TX_STATUS_MSG = 2, + IEEE80211_DELBA_MSG = 3, + IEEE80211_ADDBA_MSG = 4, +}; + +enum queue_stop_reason { + IEEE80211_QUEUE_STOP_REASON_DRIVER, + IEEE80211_QUEUE_STOP_REASON_PS, + IEEE80211_QUEUE_STOP_REASON_CSA, + IEEE80211_QUEUE_STOP_REASON_AGGREGATION, + IEEE80211_QUEUE_STOP_REASON_SUSPEND, + IEEE80211_QUEUE_STOP_REASON_SKB_ADD, +}; + +/** + * mac80211 scan flags - currently active scan mode + * + * @SCAN_SW_SCANNING: We're currently in the process of scanning but may as + * well be on the operating channel + * @SCAN_HW_SCANNING: The hardware is scanning for us, we have no way to + * determine if we are on the operating channel or not + * @SCAN_OFF_CHANNEL: We're off our operating channel for scanning, + * gets only set in conjunction with SCAN_SW_SCANNING + */ +enum { + SCAN_SW_SCANNING, + SCAN_HW_SCANNING, + SCAN_OFF_CHANNEL, +}; + +/** + * enum mac80211_scan_state - scan state machine states + * + * @SCAN_DECISION: Main entry point to the scan state machine, this state + * determines if we should keep on scanning or switch back to the + * operating channel + * @SCAN_SET_CHANNEL: Set the next channel to be scanned + * @SCAN_SEND_PROBE: Send probe requests and wait for probe responses + * @SCAN_LEAVE_OPER_CHANNEL: Leave the operating channel, notify the AP + * about us leaving the channel and stop all associated STA interfaces + * @SCAN_ENTER_OPER_CHANNEL: Enter the operating channel again, notify the + * AP about us being back and restart all associated STA interfaces + */ +enum mac80211_scan_state { + SCAN_DECISION, + SCAN_SET_CHANNEL, + SCAN_SEND_PROBE, + SCAN_LEAVE_OPER_CHANNEL, + SCAN_ENTER_OPER_CHANNEL, +}; + +struct ieee80211_local { + /* embed the driver visible part. + * don't cast (use the static inlines below), but we keep + * it first anyway so they become a no-op */ + struct ieee80211_hw hw; + + const struct ieee80211_ops *ops; + + /* + * work stuff, potentially off-channel (in the future) + */ + struct mutex work_mtx; + struct list_head work_list; + struct timer_list work_timer; + struct work_struct work_work; + struct sk_buff_head work_skb_queue; + + /* + * private workqueue to mac80211. mac80211 makes this accessible + * via ieee80211_queue_work() + */ + struct workqueue_struct *workqueue; + + unsigned long queue_stop_reasons[IEEE80211_MAX_QUEUES]; + /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */ + spinlock_t queue_stop_reason_lock; + + int open_count; + int monitors, cooked_mntrs; + /* number of interfaces with corresponding FIF_ flags */ + int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll; + unsigned int filter_flags; /* FIF_* */ + + /* protects the aggregated multicast list and filter calls */ + spinlock_t filter_lock; + + /* used for uploading changed mc list */ + struct work_struct reconfig_filter; + + /* used to reconfigure hardware SM PS */ + struct work_struct recalc_smps; + + /* aggregated multicast list */ + struct dev_addr_list *mc_list; + int mc_count; + + bool tim_in_locked_section; /* see ieee80211_beacon_get() */ + + /* + * suspended is true if we finished all the suspend _and_ we have + * not yet come up from resume. This is to be used by mac80211 + * to ensure driver sanity during suspend and mac80211's own + * sanity. It can eventually be used for WoW as well. + */ + bool suspended; + + /* + * Resuming is true while suspended, but when we're reprogramming the + * hardware -- at that time it's allowed to use ieee80211_queue_work() + * again even though some other parts of the stack are still suspended + * and we still drop received frames to avoid waking the stack. + */ + bool resuming; + + /* + * quiescing is true during the suspend process _only_ to + * ease timer cancelling etc. + */ + bool quiescing; + + /* device is started */ + bool started; + + int tx_headroom; /* required headroom for hardware/radiotap */ + + /* Tasklet and skb queue to process calls from IRQ mode. All frames + * added to skb_queue will be processed, but frames in + * skb_queue_unreliable may be dropped if the total length of these + * queues increases over the limit. */ +#define IEEE80211_IRQSAFE_QUEUE_LIMIT 128 + struct tasklet_struct tasklet; + struct sk_buff_head skb_queue; + struct sk_buff_head skb_queue_unreliable; + + /* Station data */ + /* + * The mutex only protects the list and counter, + * reads are done in RCU. + * Additionally, the lock protects the hash table, + * the pending list and each BSS's TIM bitmap. + */ + struct mutex sta_mtx; + spinlock_t sta_lock; + unsigned long num_sta; + struct list_head sta_list, sta_pending_list; + struct sta_info *sta_hash[STA_HASH_SIZE]; + struct timer_list sta_cleanup; + struct work_struct sta_finish_work; + int sta_generation; + + struct sk_buff_head pending[IEEE80211_MAX_QUEUES]; + struct tasklet_struct tx_pending_tasklet; + + /* + * This lock is used to prevent concurrent A-MPDU + * session start/stop processing, this thus also + * synchronises the ->ampdu_action() callback to + * drivers and limits it to one at a time. + */ + spinlock_t ampdu_lock; + + /* number of interfaces with corresponding IFF_ flags */ + atomic_t iff_allmultis, iff_promiscs; + + struct rate_control_ref *rate_ctrl; + + struct crypto_blkcipher *wep_tx_tfm; + struct crypto_blkcipher *wep_rx_tfm; + u32 wep_iv; + + /* see iface.c */ + struct list_head interfaces; + struct mutex iflist_mtx; + + /* + * Key lock, protects sdata's key_list and sta_info's + * key pointers (write access, they're RCU.) + */ + spinlock_t key_lock; + + + /* Scanning and BSS list */ + struct mutex scan_mtx; + unsigned long scanning; + struct cfg80211_ssid scan_ssid; + struct cfg80211_scan_request *int_scan_req; + struct cfg80211_scan_request *scan_req, *hw_scan_req; + struct ieee80211_channel *scan_channel; + enum ieee80211_band hw_scan_band; + int scan_channel_idx; + int scan_ies_len; + + enum mac80211_scan_state next_scan_state; + struct delayed_work scan_work; + struct ieee80211_sub_if_data *scan_sdata; + enum nl80211_channel_type oper_channel_type; + struct ieee80211_channel *oper_channel, *csa_channel; + + /* Temporary remain-on-channel for off-channel operations */ + struct ieee80211_channel *tmp_channel; + enum nl80211_channel_type tmp_channel_type; + + /* SNMP counters */ + /* dot11CountersTable */ + u32 dot11TransmittedFragmentCount; + u32 dot11MulticastTransmittedFrameCount; + u32 dot11FailedCount; + u32 dot11RetryCount; + u32 dot11MultipleRetryCount; + u32 dot11FrameDuplicateCount; + u32 dot11ReceivedFragmentCount; + u32 dot11MulticastReceivedFrameCount; + u32 dot11TransmittedFrameCount; + +#ifdef CONFIG_MAC80211_LEDS + int tx_led_counter, rx_led_counter; + struct led_trigger *tx_led, *rx_led, *assoc_led, *radio_led; + char tx_led_name[32], rx_led_name[32], + assoc_led_name[32], radio_led_name[32]; +#endif + +#ifdef CONFIG_MAC80211_DEBUG_COUNTERS + /* TX/RX handler statistics */ + unsigned int tx_handlers_drop; + unsigned int tx_handlers_queued; + unsigned int tx_handlers_drop_unencrypted; + unsigned int tx_handlers_drop_fragment; + unsigned int tx_handlers_drop_wep; + unsigned int tx_handlers_drop_not_assoc; + unsigned int tx_handlers_drop_unauth_port; + unsigned int rx_handlers_drop; + unsigned int rx_handlers_queued; + unsigned int rx_handlers_drop_nullfunc; + unsigned int rx_handlers_drop_defrag; + unsigned int rx_handlers_drop_short; + unsigned int rx_handlers_drop_passive_scan; + unsigned int tx_expand_skb_head; + unsigned int tx_expand_skb_head_cloned; + unsigned int rx_expand_skb_head; + unsigned int rx_expand_skb_head2; + unsigned int rx_handlers_fragments; + unsigned int tx_status_drop; +#define I802_DEBUG_INC(c) (c)++ +#else /* CONFIG_MAC80211_DEBUG_COUNTERS */ +#define I802_DEBUG_INC(c) do { } while (0) +#endif /* CONFIG_MAC80211_DEBUG_COUNTERS */ + + + int total_ps_buffered; /* total number of all buffered unicast and + * multicast packets for power saving stations + */ + int wifi_wme_noack_test; + unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */ + + /* + * Bitmask of enabled u-apsd queues, + * IEEE80211_WMM_IE_STA_QOSINFO_AC_BE & co. Needs a new association + * to take effect. + */ + unsigned int uapsd_queues; + + /* + * Maximum number of buffered frames AP can deliver during a + * service period, IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL or similar. + * Needs a new association to take effect. + */ + unsigned int uapsd_max_sp_len; + + bool pspolling; + bool offchannel_ps_enabled; + /* + * PS can only be enabled when we have exactly one managed + * interface (and monitors) in PS, this then points there. + */ + struct ieee80211_sub_if_data *ps_sdata; + struct work_struct dynamic_ps_enable_work; + struct work_struct dynamic_ps_disable_work; + struct timer_list dynamic_ps_timer; + struct notifier_block network_latency_notifier; + + int user_power_level; /* in dBm */ + int power_constr_level; /* in dBm */ + + enum ieee80211_smps_mode smps_mode; + + struct work_struct restart_work; + +#ifdef CONFIG_MAC80211_DEBUGFS + struct local_debugfsdentries { + struct dentry *rcdir; + struct dentry *stations; + struct dentry *keys; + } debugfs; +#endif +}; + +static inline struct ieee80211_sub_if_data * +IEEE80211_DEV_TO_SUB_IF(struct net_device *dev) +{ + return netdev_priv(dev); +} + +/* this struct represents 802.11n's RA/TID combination along with our vif */ +struct ieee80211_ra_tid { + struct ieee80211_vif *vif; + u8 ra[ETH_ALEN]; + u16 tid; +}; + +/* Parsed Information Elements */ +struct ieee802_11_elems { + u8 *ie_start; + size_t total_len; + + /* pointers to IEs */ + u8 *ssid; + u8 *supp_rates; + u8 *fh_params; + u8 *ds_params; + u8 *cf_params; + struct ieee80211_tim_ie *tim; + u8 *ibss_params; + u8 *challenge; + u8 *wpa; + u8 *rsn; + u8 *erp_info; + u8 *ext_supp_rates; + u8 *wmm_info; + u8 *wmm_param; + struct ieee80211_ht_cap *ht_cap_elem; + struct ieee80211_ht_info *ht_info_elem; + struct ieee80211_meshconf_ie *mesh_config; + u8 *mesh_id; + u8 *peer_link; + u8 *preq; + u8 *prep; + u8 *perr; + struct ieee80211_rann_ie *rann; + u8 *ch_switch_elem; + u8 *country_elem; + u8 *pwr_constr_elem; + u8 *quiet_elem; /* first quite element */ + u8 *timeout_int; + + /* length of them, respectively */ + u8 ssid_len; + u8 supp_rates_len; + u8 fh_params_len; + u8 ds_params_len; + u8 cf_params_len; + u8 tim_len; + u8 ibss_params_len; + u8 challenge_len; + u8 wpa_len; + u8 rsn_len; + u8 erp_info_len; + u8 ext_supp_rates_len; + u8 wmm_info_len; + u8 wmm_param_len; + u8 mesh_id_len; + u8 peer_link_len; + u8 preq_len; + u8 prep_len; + u8 perr_len; + u8 ch_switch_elem_len; + u8 country_elem_len; + u8 pwr_constr_elem_len; + u8 quiet_elem_len; + u8 num_of_quiet_elem; /* can be more the one */ + u8 timeout_int_len; +}; + +static inline struct ieee80211_local *hw_to_local( + struct ieee80211_hw *hw) +{ + return container_of(hw, struct ieee80211_local, hw); +} + +static inline struct ieee80211_hw *local_to_hw( + struct ieee80211_local *local) +{ + return &local->hw; +} + + +static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr) +{ + return compare_ether_addr(raddr, addr) == 0 || + is_broadcast_ether_addr(raddr); +} + + +int ieee80211_hw_config(struct ieee80211_local *local, u32 changed); +void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx); +void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, + u32 changed); +void ieee80211_configure_filter(struct ieee80211_local *local); +u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata); + +extern bool ieee80211_disable_40mhz_24ghz; + +/* STA code */ +void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata); +int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, + struct cfg80211_auth_request *req); +int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, + struct cfg80211_assoc_request *req); +int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, + struct cfg80211_deauth_request *req, + void *cookie); +int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, + struct cfg80211_disassoc_request *req, + void *cookie); +int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata, + struct ieee80211_channel *chan, + enum nl80211_channel_type channel_type, + const u8 *buf, size_t len, u64 *cookie); +ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb); +void ieee80211_send_pspoll(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata); +void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency); +int ieee80211_max_network_latency(struct notifier_block *nb, + unsigned long data, void *dummy); +void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, + struct ieee80211_channel_sw_ie *sw_elem, + struct ieee80211_bss *bss); +void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata); +void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata); + +/* IBSS code */ +void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local); +void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata); +ieee80211_rx_result +ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); +struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, + u8 *bssid, u8 *addr, u32 supp_rates, + gfp_t gfp); +int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, + struct cfg80211_ibss_params *params); +int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata); +void ieee80211_ibss_quiesce(struct ieee80211_sub_if_data *sdata); +void ieee80211_ibss_restart(struct ieee80211_sub_if_data *sdata); + +/* scan/BSS handling */ +void ieee80211_scan_work(struct work_struct *work); +int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata, + const u8 *ssid, u8 ssid_len); +int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata, + struct cfg80211_scan_request *req); +void ieee80211_scan_cancel(struct ieee80211_local *local); +ieee80211_rx_result +ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); + +void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local); +struct ieee80211_bss * +ieee80211_bss_info_update(struct ieee80211_local *local, + struct ieee80211_rx_status *rx_status, + struct ieee80211_mgmt *mgmt, + size_t len, + struct ieee802_11_elems *elems, + struct ieee80211_channel *channel, + bool beacon); +struct ieee80211_bss * +ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq, + u8 *ssid, u8 ssid_len); +void ieee80211_rx_bss_put(struct ieee80211_local *local, + struct ieee80211_bss *bss); + +/* off-channel helpers */ +void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local); +void ieee80211_offchannel_stop_station(struct ieee80211_local *local); +void ieee80211_offchannel_return(struct ieee80211_local *local, + bool enable_beaconing); + +/* interface handling */ +int ieee80211_iface_init(void); +void ieee80211_iface_exit(void); +int ieee80211_if_add(struct ieee80211_local *local, const char *name, + struct net_device **new_dev, enum nl80211_iftype type, + struct vif_params *params); +int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, + enum nl80211_iftype type); +void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata); +void ieee80211_remove_interfaces(struct ieee80211_local *local); +u32 __ieee80211_recalc_idle(struct ieee80211_local *local); +void ieee80211_recalc_idle(struct ieee80211_local *local); + +static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata) +{ + return netif_running(sdata->dev); +} + +/* tx handling */ +void ieee80211_clear_tx_pending(struct ieee80211_local *local); +void ieee80211_tx_pending(unsigned long data); +netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, + struct net_device *dev); +netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, + struct net_device *dev); + +/* + * radiotap header for status frames + */ +struct ieee80211_tx_status_rtap_hdr { + struct ieee80211_radiotap_header hdr; + u8 rate; + u8 padding_for_rate; + __le16 tx_flags; + u8 data_retries; +} __attribute__ ((packed)); + + +/* HT */ +void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband, + struct ieee80211_ht_cap *ht_cap_ie, + struct ieee80211_sta_ht_cap *ht_cap); +void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn); +void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, + const u8 *da, u16 tid, + u16 initiator, u16 reason_code); +int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata, + enum ieee80211_smps_mode smps, const u8 *da, + const u8 *bssid); + +void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *da, + u16 tid, u16 initiator, u16 reason); +void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, + u16 initiator, u16 reason); +void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta); +void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, + struct ieee80211_mgmt *mgmt, size_t len); +void ieee80211_process_addba_resp(struct ieee80211_local *local, + struct sta_info *sta, + struct ieee80211_mgmt *mgmt, + size_t len); +void ieee80211_process_addba_request(struct ieee80211_local *local, + struct sta_info *sta, + struct ieee80211_mgmt *mgmt, + size_t len); + +int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, + enum ieee80211_back_parties initiator); +int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, + enum ieee80211_back_parties initiator); + +/* Spectrum management */ +void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, + size_t len); + +/* Suspend/resume and hw reconfiguration */ +int ieee80211_reconfig(struct ieee80211_local *local); +void ieee80211_stop_device(struct ieee80211_local *local); + +#ifdef CONFIG_PM +int __ieee80211_suspend(struct ieee80211_hw *hw); + +static inline int __ieee80211_resume(struct ieee80211_hw *hw) +{ + return ieee80211_reconfig(hw_to_local(hw)); +} +#else +static inline int __ieee80211_suspend(struct ieee80211_hw *hw) +{ + return 0; +} + +static inline int __ieee80211_resume(struct ieee80211_hw *hw) +{ + return 0; +} +#endif + +/* utility functions/constants */ +extern void *mac80211_wiphy_privid; /* for wiphy privid */ +u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, + enum nl80211_iftype type); +int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, + int rate, int erp, int short_preamble); +void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx, + struct ieee80211_hdr *hdr, const u8 *tsc, + gfp_t gfp); +void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata); +void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); +void ieee802_11_parse_elems(u8 *start, size_t len, + struct ieee802_11_elems *elems); +u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, + struct ieee802_11_elems *elems, + u64 filter, u32 crc); +u32 ieee80211_mandatory_rates(struct ieee80211_local *local, + enum ieee80211_band band); + +void ieee80211_dynamic_ps_enable_work(struct work_struct *work); +void ieee80211_dynamic_ps_disable_work(struct work_struct *work); +void ieee80211_dynamic_ps_timer(unsigned long data); +void ieee80211_send_nullfunc(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + int powersave); +void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, + struct ieee80211_hdr *hdr); +void ieee80211_beacon_loss_work(struct work_struct *work); + +void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, + enum queue_stop_reason reason); +void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw, + enum queue_stop_reason reason); +void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, + enum queue_stop_reason reason); +void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue, + enum queue_stop_reason reason); +void ieee80211_add_pending_skb(struct ieee80211_local *local, + struct sk_buff *skb); +int ieee80211_add_pending_skbs(struct ieee80211_local *local, + struct sk_buff_head *skbs); + +void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, + u16 transaction, u16 auth_alg, + u8 *extra, size_t extra_len, const u8 *bssid, + const u8 *key, u8 key_len, u8 key_idx); +int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer, + const u8 *ie, size_t ie_len, + enum ieee80211_band band); +void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, + const u8 *ssid, size_t ssid_len, + const u8 *ie, size_t ie_len); + +void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata, + const size_t supp_rates_len, + const u8 *supp_rates); +u32 ieee80211_sta_get_rates(struct ieee80211_local *local, + struct ieee802_11_elems *elems, + enum ieee80211_band band); +int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata, + enum ieee80211_smps_mode smps_mode); +void ieee80211_recalc_smps(struct ieee80211_local *local, + struct ieee80211_sub_if_data *forsdata); + +size_t ieee80211_ie_split(const u8 *ies, size_t ielen, + const u8 *ids, int n_ids, size_t offset); +size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset); + +/* internal work items */ +void ieee80211_work_init(struct ieee80211_local *local); +void ieee80211_add_work(struct ieee80211_work *wk); +void free_work(struct ieee80211_work *wk); +void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata); +ieee80211_rx_result ieee80211_work_rx_mgmt(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb); +int ieee80211_wk_remain_on_channel(struct ieee80211_sub_if_data *sdata, + struct ieee80211_channel *chan, + enum nl80211_channel_type channel_type, + unsigned int duration, u64 *cookie); +int ieee80211_wk_cancel_remain_on_channel( + struct ieee80211_sub_if_data *sdata, u64 cookie); + +#ifdef CONFIG_MAC80211_NOINLINE +#define debug_noinline noinline +#else +#define debug_noinline +#endif + +#endif /* IEEE80211_I_H */ diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c new file mode 100644 index 0000000..9fe5d6d --- /dev/null +++ b/net/mac80211/iface.c @@ -0,0 +1,1111 @@ +/* + * Interface handling (except master interface) + * + * Copyright 2002-2005, Instant802 Networks, Inc. + * Copyright 2005-2006, Devicescape Software, Inc. + * Copyright (c) 2006 Jiri Benc + * Copyright 2008, Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include "ieee80211_i.h" +#include "sta_info.h" +#include "debugfs_netdev.h" +#include "mesh.h" +#include "led.h" +#include "driver-ops.h" +#include "wme.h" + +/** + * DOC: Interface list locking + * + * The interface list in each struct ieee80211_local is protected + * three-fold: + * + * (1) modifications may only be done under the RTNL + * (2) modifications and readers are protected against each other by + * the iflist_mtx. + * (3) modifications are done in an RCU manner so atomic readers + * can traverse the list in RCU-safe blocks. + * + * As a consequence, reads (traversals) of the list can be protected + * by either the RTNL, the iflist_mtx or RCU. + */ + + +static int ieee80211_change_mtu(struct net_device *dev, int new_mtu) +{ + int meshhdrlen; + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + meshhdrlen = (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) ? 5 : 0; + + /* FIX: what would be proper limits for MTU? + * This interface uses 802.3 frames. */ + if (new_mtu < 256 || + new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6 - meshhdrlen) { + return -EINVAL; + } + +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + printk(KERN_DEBUG "%s: setting MTU %d\n", dev->name, new_mtu); +#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ + dev->mtu = new_mtu; + return 0; +} + +static int ieee80211_change_mac(struct net_device *dev, void *addr) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct sockaddr *sa = addr; + int ret; + + if (ieee80211_sdata_running(sdata)) + return -EBUSY; + + ret = eth_mac_addr(dev, sa); + + if (ret == 0) + memcpy(sdata->vif.addr, sa->sa_data, ETH_ALEN); + + return ret; +} + +static inline int identical_mac_addr_allowed(int type1, int type2) +{ + return type1 == NL80211_IFTYPE_MONITOR || + type2 == NL80211_IFTYPE_MONITOR || + (type1 == NL80211_IFTYPE_AP && type2 == NL80211_IFTYPE_WDS) || + (type1 == NL80211_IFTYPE_WDS && + (type2 == NL80211_IFTYPE_WDS || + type2 == NL80211_IFTYPE_AP)) || + (type1 == NL80211_IFTYPE_AP && type2 == NL80211_IFTYPE_AP_VLAN) || + (type1 == NL80211_IFTYPE_AP_VLAN && + (type2 == NL80211_IFTYPE_AP || + type2 == NL80211_IFTYPE_AP_VLAN)); +} + +static int ieee80211_open(struct net_device *dev) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_sub_if_data *nsdata; + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + u32 changed = 0; + int res; + u32 hw_reconf_flags = 0; + u8 null_addr[ETH_ALEN] = {0}; + + /* fail early if user set an invalid address */ + if (compare_ether_addr(dev->dev_addr, null_addr) && + !is_valid_ether_addr(dev->dev_addr)) + return -EADDRNOTAVAIL; + + /* we hold the RTNL here so can safely walk the list */ + list_for_each_entry(nsdata, &local->interfaces, list) { + struct net_device *ndev = nsdata->dev; + + if (ndev != dev && ieee80211_sdata_running(nsdata)) { + /* + * Allow only a single IBSS interface to be up at any + * time. This is restricted because beacon distribution + * cannot work properly if both are in the same IBSS. + * + * To remove this restriction we'd have to disallow them + * from setting the same SSID on different IBSS interfaces + * belonging to the same hardware. Then, however, we're + * faced with having to adopt two different TSF timers... + */ + if (sdata->vif.type == NL80211_IFTYPE_ADHOC && + nsdata->vif.type == NL80211_IFTYPE_ADHOC) + return -EBUSY; + + /* + * The remaining checks are only performed for interfaces + * with the same MAC address. + */ + if (compare_ether_addr(dev->dev_addr, ndev->dev_addr)) + continue; + + /* + * check whether it may have the same address + */ + if (!identical_mac_addr_allowed(sdata->vif.type, + nsdata->vif.type)) + return -ENOTUNIQ; + + /* + * can only add VLANs to enabled APs + */ + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN && + nsdata->vif.type == NL80211_IFTYPE_AP) + sdata->bss = &nsdata->u.ap; + } + } + + switch (sdata->vif.type) { + case NL80211_IFTYPE_WDS: + if (!is_valid_ether_addr(sdata->u.wds.remote_addr)) + return -ENOLINK; + break; + case NL80211_IFTYPE_AP_VLAN: + if (!sdata->bss) + return -ENOLINK; + list_add(&sdata->u.vlan.list, &sdata->bss->vlans); + break; + case NL80211_IFTYPE_AP: + sdata->bss = &sdata->u.ap; + break; + case NL80211_IFTYPE_MESH_POINT: + if (!ieee80211_vif_is_mesh(&sdata->vif)) + break; + /* mesh ifaces must set allmulti to forward mcast traffic */ + atomic_inc(&local->iff_allmultis); + break; + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_MONITOR: + case NL80211_IFTYPE_ADHOC: + /* no special treatment */ + break; + case NL80211_IFTYPE_UNSPECIFIED: + case __NL80211_IFTYPE_AFTER_LAST: + /* cannot happen */ + WARN_ON(1); + break; + } + + if (local->open_count == 0) { + res = drv_start(local); + if (res) + goto err_del_bss; + /* we're brought up, everything changes */ + hw_reconf_flags = ~0; + ieee80211_led_radio(local, true); + } + + /* + * Check all interfaces and copy the hopefully now-present + * MAC address to those that have the special null one. + */ + list_for_each_entry(nsdata, &local->interfaces, list) { + struct net_device *ndev = nsdata->dev; + + /* + * No need to check running since we do not allow + * it to start up with this invalid address. + */ + if (compare_ether_addr(null_addr, ndev->dev_addr) == 0) { + memcpy(ndev->dev_addr, + local->hw.wiphy->perm_addr, + ETH_ALEN); + memcpy(ndev->perm_addr, ndev->dev_addr, ETH_ALEN); + } + } + + /* + * Validate the MAC address for this device. + */ + if (!is_valid_ether_addr(dev->dev_addr)) { + if (!local->open_count) + drv_stop(local); + return -EADDRNOTAVAIL; + } + + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP_VLAN: + /* no need to tell driver */ + break; + case NL80211_IFTYPE_MONITOR: + if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) { + local->cooked_mntrs++; + break; + } + + /* must be before the call to ieee80211_configure_filter */ + local->monitors++; + if (local->monitors == 1) { + local->hw.conf.flags |= IEEE80211_CONF_MONITOR; + hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR; + } + + if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL) + local->fif_fcsfail++; + if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL) + local->fif_plcpfail++; + if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL) { + local->fif_control++; + local->fif_pspoll++; + } + if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS) + local->fif_other_bss++; + + ieee80211_configure_filter(local); + break; + default: + res = drv_add_interface(local, &sdata->vif); + if (res) + goto err_stop; + + if (ieee80211_vif_is_mesh(&sdata->vif)) { + local->fif_other_bss++; + ieee80211_configure_filter(local); + + ieee80211_start_mesh(sdata); + } else if (sdata->vif.type == NL80211_IFTYPE_AP) { + local->fif_pspoll++; + + ieee80211_configure_filter(local); + } + + changed |= ieee80211_reset_erp_info(sdata); + ieee80211_bss_info_change_notify(sdata, changed); + ieee80211_enable_keys(sdata); + + if (sdata->vif.type == NL80211_IFTYPE_STATION) + netif_carrier_off(dev); + else + netif_carrier_on(dev); + } + + if (sdata->vif.type == NL80211_IFTYPE_WDS) { + /* Create STA entry for the WDS peer */ + sta = sta_info_alloc(sdata, sdata->u.wds.remote_addr, + GFP_KERNEL); + if (!sta) { + res = -ENOMEM; + goto err_del_interface; + } + + /* no locking required since STA is not live yet */ + sta->flags |= WLAN_STA_AUTHORIZED; + + res = sta_info_insert(sta); + if (res) { + /* STA has been freed */ + goto err_del_interface; + } + } + + /* + * set_multicast_list will be invoked by the networking core + * which will check whether any increments here were done in + * error and sync them down to the hardware as filter flags. + */ + if (sdata->flags & IEEE80211_SDATA_ALLMULTI) + atomic_inc(&local->iff_allmultis); + + if (sdata->flags & IEEE80211_SDATA_PROMISC) + atomic_inc(&local->iff_promiscs); + + hw_reconf_flags |= __ieee80211_recalc_idle(local); + + local->open_count++; + if (hw_reconf_flags) { + ieee80211_hw_config(local, hw_reconf_flags); + /* + * set default queue parameters so drivers don't + * need to initialise the hardware if the hardware + * doesn't start up with sane defaults + */ + ieee80211_set_wmm_default(sdata); + } + + ieee80211_recalc_ps(local, -1); + + /* + * ieee80211_sta_work is disabled while network interface + * is down. Therefore, some configuration changes may not + * yet be effective. Trigger execution of ieee80211_sta_work + * to fix this. + */ + if (sdata->vif.type == NL80211_IFTYPE_STATION) + ieee80211_queue_work(&local->hw, &sdata->u.mgd.work); + + netif_tx_start_all_queues(dev); + + return 0; + err_del_interface: + drv_remove_interface(local, &sdata->vif); + err_stop: + if (!local->open_count) + drv_stop(local); + err_del_bss: + sdata->bss = NULL; + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + list_del(&sdata->u.vlan.list); + return res; +} + +static int ieee80211_stop(struct net_device *dev) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + unsigned long flags; + struct sk_buff *skb, *tmp; + u32 hw_reconf_flags = 0; + int i; + + /* + * Stop TX on this interface first. + */ + netif_tx_stop_all_queues(dev); + + /* + * Purge work for this interface. + */ + ieee80211_work_purge(sdata); + + /* + * Now delete all active aggregation sessions. + */ + rcu_read_lock(); + + list_for_each_entry_rcu(sta, &local->sta_list, list) { + if (sta->sdata == sdata) + ieee80211_sta_tear_down_BA_sessions(sta); + } + + rcu_read_unlock(); + + /* + * Remove all stations associated with this interface. + * + * This must be done before calling ops->remove_interface() + * because otherwise we can later invoke ops->sta_notify() + * whenever the STAs are removed, and that invalidates driver + * assumptions about always getting a vif pointer that is valid + * (because if we remove a STA after ops->remove_interface() + * the driver will have removed the vif info already!) + * + * We could relax this and only unlink the stations from the + * hash table and list but keep them on a per-sdata list that + * will be inserted back again when the interface is brought + * up again, but I don't currently see a use case for that, + * except with WDS which gets a STA entry created when it is + * brought up. + */ + sta_info_flush(local, sdata); + + /* + * Don't count this interface for promisc/allmulti while it + * is down. dev_mc_unsync() will invoke set_multicast_list + * on the master interface which will sync these down to the + * hardware as filter flags. + */ + if (sdata->flags & IEEE80211_SDATA_ALLMULTI) + atomic_dec(&local->iff_allmultis); + + if (sdata->flags & IEEE80211_SDATA_PROMISC) + atomic_dec(&local->iff_promiscs); + + if (sdata->vif.type == NL80211_IFTYPE_AP) + local->fif_pspoll--; + + netif_addr_lock_bh(dev); + spin_lock_bh(&local->filter_lock); + __dev_addr_unsync(&local->mc_list, &local->mc_count, + &dev->mc_list, &dev->mc_count); + spin_unlock_bh(&local->filter_lock); + netif_addr_unlock_bh(dev); + + ieee80211_configure_filter(local); + + del_timer_sync(&local->dynamic_ps_timer); + cancel_work_sync(&local->dynamic_ps_enable_work); + + /* APs need special treatment */ + if (sdata->vif.type == NL80211_IFTYPE_AP) { + struct ieee80211_sub_if_data *vlan, *tmpsdata; + struct beacon_data *old_beacon = sdata->u.ap.beacon; + + /* remove beacon */ + rcu_assign_pointer(sdata->u.ap.beacon, NULL); + synchronize_rcu(); + kfree(old_beacon); + + /* down all dependent devices, that is VLANs */ + list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans, + u.vlan.list) + dev_close(vlan->dev); + WARN_ON(!list_empty(&sdata->u.ap.vlans)); + } + + local->open_count--; + + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP_VLAN: + list_del(&sdata->u.vlan.list); + /* no need to tell driver */ + break; + case NL80211_IFTYPE_MONITOR: + if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) { + local->cooked_mntrs--; + break; + } + + local->monitors--; + if (local->monitors == 0) { + local->hw.conf.flags &= ~IEEE80211_CONF_MONITOR; + hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR; + } + + if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL) + local->fif_fcsfail--; + if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL) + local->fif_plcpfail--; + if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL) { + local->fif_pspoll--; + local->fif_control--; + } + if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS) + local->fif_other_bss--; + + ieee80211_configure_filter(local); + break; + case NL80211_IFTYPE_STATION: + del_timer_sync(&sdata->u.mgd.chswitch_timer); + del_timer_sync(&sdata->u.mgd.timer); + del_timer_sync(&sdata->u.mgd.conn_mon_timer); + del_timer_sync(&sdata->u.mgd.bcn_mon_timer); + /* + * If any of the timers fired while we waited for it, it will + * have queued its work. Now the work will be running again + * but will not rearm the timer again because it checks + * whether the interface is running, which, at this point, + * it no longer is. + */ + cancel_work_sync(&sdata->u.mgd.work); + cancel_work_sync(&sdata->u.mgd.chswitch_work); + cancel_work_sync(&sdata->u.mgd.monitor_work); + cancel_work_sync(&sdata->u.mgd.beacon_loss_work); + + /* + * When we get here, the interface is marked down. + * Call synchronize_rcu() to wait for the RX path + * should it be using the interface and enqueuing + * frames at this very time on another CPU. + */ + synchronize_rcu(); + skb_queue_purge(&sdata->u.mgd.skb_queue); + /* fall through */ + case NL80211_IFTYPE_ADHOC: + if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { + del_timer_sync(&sdata->u.ibss.timer); + cancel_work_sync(&sdata->u.ibss.work); + synchronize_rcu(); + skb_queue_purge(&sdata->u.ibss.skb_queue); + } + /* fall through */ + case NL80211_IFTYPE_MESH_POINT: + if (ieee80211_vif_is_mesh(&sdata->vif)) { + /* other_bss and allmulti are always set on mesh + * ifaces */ + local->fif_other_bss--; + atomic_dec(&local->iff_allmultis); + + ieee80211_configure_filter(local); + + ieee80211_stop_mesh(sdata); + } + /* fall through */ + default: + if (local->scan_sdata == sdata) + ieee80211_scan_cancel(local); + + /* + * Disable beaconing for AP and mesh, IBSS can't + * still be joined to a network at this point. + */ + if (sdata->vif.type == NL80211_IFTYPE_AP || + sdata->vif.type == NL80211_IFTYPE_MESH_POINT) { + ieee80211_bss_info_change_notify(sdata, + BSS_CHANGED_BEACON_ENABLED); + } + + /* disable all keys for as long as this netdev is down */ + ieee80211_disable_keys(sdata); + drv_remove_interface(local, &sdata->vif); + } + + sdata->bss = NULL; + + hw_reconf_flags |= __ieee80211_recalc_idle(local); + + ieee80211_recalc_ps(local, -1); + + if (local->open_count == 0) { + ieee80211_clear_tx_pending(local); + ieee80211_stop_device(local); + + /* no reconfiguring after stop! */ + hw_reconf_flags = 0; + } + + /* do after stop to avoid reconfiguring when we stop anyway */ + if (hw_reconf_flags) + ieee80211_hw_config(local, hw_reconf_flags); + + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + for (i = 0; i < IEEE80211_MAX_QUEUES; i++) { + skb_queue_walk_safe(&local->pending[i], skb, tmp) { + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + if (info->control.vif == &sdata->vif) { + __skb_unlink(skb, &local->pending[i]); + dev_kfree_skb_irq(skb); + } + } + } + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); + + return 0; +} + +static void ieee80211_set_multicast_list(struct net_device *dev) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + int allmulti, promisc, sdata_allmulti, sdata_promisc; + + allmulti = !!(dev->flags & IFF_ALLMULTI); + promisc = !!(dev->flags & IFF_PROMISC); + sdata_allmulti = !!(sdata->flags & IEEE80211_SDATA_ALLMULTI); + sdata_promisc = !!(sdata->flags & IEEE80211_SDATA_PROMISC); + + if (allmulti != sdata_allmulti) { + if (dev->flags & IFF_ALLMULTI) + atomic_inc(&local->iff_allmultis); + else + atomic_dec(&local->iff_allmultis); + sdata->flags ^= IEEE80211_SDATA_ALLMULTI; + } + + if (promisc != sdata_promisc) { + if (dev->flags & IFF_PROMISC) + atomic_inc(&local->iff_promiscs); + else + atomic_dec(&local->iff_promiscs); + sdata->flags ^= IEEE80211_SDATA_PROMISC; + } + spin_lock_bh(&local->filter_lock); + __dev_addr_sync(&local->mc_list, &local->mc_count, + &dev->mc_list, &dev->mc_count); + spin_unlock_bh(&local->filter_lock); + ieee80211_queue_work(&local->hw, &local->reconfig_filter); +} + +/* + * Called when the netdev is removed or, by the code below, before + * the interface type changes. + */ +static void ieee80211_teardown_sdata(struct net_device *dev) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + struct beacon_data *beacon; + struct sk_buff *skb; + int flushed; + int i; + + /* free extra data */ + ieee80211_free_keys(sdata); + + ieee80211_debugfs_remove_netdev(sdata); + + for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) + __skb_queue_purge(&sdata->fragments[i].skb_list); + sdata->fragment_next = 0; + + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP: + beacon = sdata->u.ap.beacon; + rcu_assign_pointer(sdata->u.ap.beacon, NULL); + synchronize_rcu(); + kfree(beacon); + + while ((skb = skb_dequeue(&sdata->u.ap.ps_bc_buf))) { + local->total_ps_buffered--; + dev_kfree_skb(skb); + } + + break; + case NL80211_IFTYPE_MESH_POINT: + if (ieee80211_vif_is_mesh(&sdata->vif)) + mesh_rmc_free(sdata); + break; + case NL80211_IFTYPE_ADHOC: + if (WARN_ON(sdata->u.ibss.presp)) + kfree_skb(sdata->u.ibss.presp); + break; + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_WDS: + case NL80211_IFTYPE_AP_VLAN: + case NL80211_IFTYPE_MONITOR: + break; + case NL80211_IFTYPE_UNSPECIFIED: + case __NL80211_IFTYPE_AFTER_LAST: + BUG(); + break; + } + + flushed = sta_info_flush(local, sdata); + WARN_ON(flushed); +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) +static u16 ieee80211_netdev_select_queue(struct net_device *dev, + struct sk_buff *skb) +{ + return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); +} +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) +static const struct net_device_ops ieee80211_dataif_ops = { + .ndo_open = ieee80211_open, + .ndo_stop = ieee80211_stop, + .ndo_uninit = ieee80211_teardown_sdata, + .ndo_start_xmit = ieee80211_subif_start_xmit, + .ndo_set_multicast_list = ieee80211_set_multicast_list, + .ndo_change_mtu = ieee80211_change_mtu, + .ndo_set_mac_address = ieee80211_change_mac, + .ndo_select_queue = ieee80211_netdev_select_queue, +}; +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) +static u16 ieee80211_monitor_select_queue(struct net_device *dev, + struct sk_buff *skb) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + struct ieee80211_hdr *hdr; + struct ieee80211_radiotap_header *rtap = (void *)skb->data; + u8 *p; + + if (local->hw.queues < 4) + return 0; + + if (skb->len < 4 || + skb->len < le16_to_cpu(rtap->it_len) + 2 /* frame control */) + return 0; /* doesn't matter, frame will be dropped */ + + hdr = (void *)((u8 *)skb->data + le16_to_cpu(rtap->it_len)); + + if (!ieee80211_is_data(hdr->frame_control)) { + skb->priority = 7; + return ieee802_1d_to_ac[skb->priority]; + } + if (!ieee80211_is_data_qos(hdr->frame_control)) { + skb->priority = 0; + return ieee802_1d_to_ac[skb->priority]; + } + + p = ieee80211_get_qos_ctl(hdr); + skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK; + + return ieee80211_downgrade_queue(local, skb); +} +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) +static const struct net_device_ops ieee80211_monitorif_ops = { + .ndo_open = ieee80211_open, + .ndo_stop = ieee80211_stop, + .ndo_uninit = ieee80211_teardown_sdata, + .ndo_start_xmit = ieee80211_monitor_start_xmit, + .ndo_set_multicast_list = ieee80211_set_multicast_list, + .ndo_change_mtu = ieee80211_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_select_queue = ieee80211_monitor_select_queue, +}; +#endif + +static void ieee80211_if_setup(struct net_device *dev) +{ + ether_setup(dev); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + dev->netdev_ops = &ieee80211_dataif_ops; +#else + dev->hard_start_xmit = ieee80211_subif_start_xmit; + dev->set_multicast_list = ieee80211_set_multicast_list; + dev->change_mtu = ieee80211_change_mtu; + dev->set_mac_address = ieee80211_change_mac; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) + dev->select_queue = ieee80211_netdev_select_queue; +#endif + dev->open = ieee80211_open; + dev->stop = ieee80211_stop; + /* we will validate the address ourselves in ->open */ + dev->validate_addr = NULL; +#endif + dev->destructor = free_netdev; +} + + +/* + * Helper function to initialise an interface to a specific type. + */ +static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata, + enum nl80211_iftype type) +{ + /* clear type-dependent union */ + memset(&sdata->u, 0, sizeof(sdata->u)); + + /* and set some type-dependent values */ + sdata->vif.type = type; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + sdata->dev->netdev_ops = &ieee80211_dataif_ops; +#else + sdata->dev->hard_start_xmit = ieee80211_subif_start_xmit; +#endif + sdata->wdev.iftype = type; + + /* only monitor differs */ + sdata->dev->type = ARPHRD_ETHER; + + switch (type) { + case NL80211_IFTYPE_AP: + skb_queue_head_init(&sdata->u.ap.ps_bc_buf); + INIT_LIST_HEAD(&sdata->u.ap.vlans); + break; + case NL80211_IFTYPE_STATION: + ieee80211_sta_setup_sdata(sdata); + break; + case NL80211_IFTYPE_ADHOC: + ieee80211_ibss_setup_sdata(sdata); + break; + case NL80211_IFTYPE_MESH_POINT: + if (ieee80211_vif_is_mesh(&sdata->vif)) + ieee80211_mesh_init_sdata(sdata); + break; + case NL80211_IFTYPE_MONITOR: + sdata->dev->type = ARPHRD_IEEE80211_RADIOTAP; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + sdata->dev->netdev_ops = &ieee80211_monitorif_ops; +#else + sdata->dev->hard_start_xmit = ieee80211_monitor_start_xmit; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) + sdata->dev->select_queue = ieee80211_monitor_select_queue; +#endif + sdata->dev->set_mac_address = eth_mac_addr; +#endif + sdata->u.mntr_flags = MONITOR_FLAG_CONTROL | + MONITOR_FLAG_OTHER_BSS; + break; + case NL80211_IFTYPE_WDS: + case NL80211_IFTYPE_AP_VLAN: + break; + case NL80211_IFTYPE_UNSPECIFIED: + case __NL80211_IFTYPE_AFTER_LAST: + BUG(); + break; + } + + ieee80211_debugfs_add_netdev(sdata); +} + +int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, + enum nl80211_iftype type) +{ + ASSERT_RTNL(); + + if (type == sdata->vif.type) + return 0; + + /* Setting ad-hoc mode on non-IBSS channel is not supported. */ + if (sdata->local->oper_channel->flags & IEEE80211_CHAN_NO_IBSS && + type == NL80211_IFTYPE_ADHOC) + return -EOPNOTSUPP; + + /* + * We could, here, on changes between IBSS/STA/MESH modes, + * invoke an MLME function instead that disassociates etc. + * and goes into the requested mode. + */ + + if (ieee80211_sdata_running(sdata)) + return -EBUSY; + + /* Purge and reset type-dependent state. */ + ieee80211_teardown_sdata(sdata->dev); + ieee80211_setup_sdata(sdata, type); + + /* reset some values that shouldn't be kept across type changes */ + sdata->vif.bss_conf.basic_rates = + ieee80211_mandatory_rates(sdata->local, + sdata->local->hw.conf.channel->band); + sdata->drop_unencrypted = 0; + if (type == NL80211_IFTYPE_STATION) + sdata->u.mgd.use_4addr = false; + + return 0; +} + +int ieee80211_if_add(struct ieee80211_local *local, const char *name, + struct net_device **new_dev, enum nl80211_iftype type, + struct vif_params *params) +{ + struct net_device *ndev; + struct ieee80211_sub_if_data *sdata = NULL; + int ret, i; + + ASSERT_RTNL(); + + ndev = alloc_netdev_mq(sizeof(*sdata) + local->hw.vif_data_size, + name, ieee80211_if_setup, local->hw.queues); + if (!ndev) + return -ENOMEM; + dev_net_set(ndev, wiphy_net(local->hw.wiphy)); + +/* This is an optimization, just ignore for older kernels */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) + ndev->needed_headroom = local->tx_headroom + + 4*6 /* four MAC addresses */ + + 2 + 2 + 2 + 2 /* ctl, dur, seq, qos */ + + 6 /* mesh */ + + 8 /* rfc1042/bridge tunnel */ + - ETH_HLEN /* ethernet hard_header_len */ + + IEEE80211_ENCRYPT_HEADROOM; + ndev->needed_tailroom = IEEE80211_ENCRYPT_TAILROOM; +#endif + + ret = dev_alloc_name(ndev, ndev->name); + if (ret < 0) + goto fail; + + memcpy(ndev->dev_addr, local->hw.wiphy->perm_addr, ETH_ALEN); + memcpy(ndev->perm_addr, ndev->dev_addr, ETH_ALEN); + SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy)); + + /* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */ + sdata = netdev_priv(ndev); + ndev->ieee80211_ptr = &sdata->wdev; + memcpy(sdata->vif.addr, ndev->dev_addr, ETH_ALEN); + memcpy(sdata->name, ndev->name, IFNAMSIZ); + + /* initialise type-independent data */ + sdata->wdev.wiphy = local->hw.wiphy; + sdata->local = local; + sdata->dev = ndev; + + for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) + skb_queue_head_init(&sdata->fragments[i].skb_list); + + INIT_LIST_HEAD(&sdata->key_list); + + for (i = 0; i < IEEE80211_NUM_BANDS; i++) { + struct ieee80211_supported_band *sband; + sband = local->hw.wiphy->bands[i]; + sdata->rc_rateidx_mask[i] = + sband ? (1 << sband->n_bitrates) - 1 : 0; + } + + /* setup type-dependent data */ + ieee80211_setup_sdata(sdata, type); + + if (params) { + ndev->ieee80211_ptr->use_4addr = params->use_4addr; + if (type == NL80211_IFTYPE_STATION) + sdata->u.mgd.use_4addr = params->use_4addr; + } + + ret = register_netdevice(ndev); + if (ret) + goto fail; + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,28)) + ndev->uninit = ieee80211_teardown_sdata; +#endif + + if (ieee80211_vif_is_mesh(&sdata->vif) && + params && params->mesh_id_len) + ieee80211_sdata_set_mesh_id(sdata, + params->mesh_id_len, + params->mesh_id); + + mutex_lock(&local->iflist_mtx); + list_add_tail_rcu(&sdata->list, &local->interfaces); + mutex_unlock(&local->iflist_mtx); + + if (new_dev) + *new_dev = ndev; + + return 0; + + fail: + free_netdev(ndev); + return ret; +} + +void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata) +{ + ASSERT_RTNL(); + + mutex_lock(&sdata->local->iflist_mtx); + list_del_rcu(&sdata->list); + mutex_unlock(&sdata->local->iflist_mtx); + + synchronize_rcu(); + unregister_netdevice(sdata->dev); +} + +/* + * Remove all interfaces, may only be called at hardware unregistration + * time because it doesn't do RCU-safe list removals. + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)) +void ieee80211_remove_interfaces(struct ieee80211_local *local) +{ + struct ieee80211_sub_if_data *sdata, *tmp; + LIST_HEAD(unreg_list); + + ASSERT_RTNL(); + + mutex_lock(&local->iflist_mtx); + list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { + list_del(&sdata->list); + + unregister_netdevice_queue(sdata->dev, &unreg_list); + } + mutex_unlock(&local->iflist_mtx); + unregister_netdevice_many(&unreg_list); +} +#else +void ieee80211_remove_interfaces(struct ieee80211_local *local) +{ + struct ieee80211_sub_if_data *sdata, *tmp; + + ASSERT_RTNL(); + + list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { + mutex_lock(&local->iflist_mtx); + list_del(&sdata->list); + mutex_unlock(&local->iflist_mtx); + + unregister_netdevice(sdata->dev); + } +} +#endif + +static u32 ieee80211_idle_off(struct ieee80211_local *local, + const char *reason) +{ + if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE)) + return 0; + +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + printk(KERN_DEBUG "%s: device no longer idle - %s\n", + wiphy_name(local->hw.wiphy), reason); +#endif + + local->hw.conf.flags &= ~IEEE80211_CONF_IDLE; + return IEEE80211_CONF_CHANGE_IDLE; +} + +static u32 ieee80211_idle_on(struct ieee80211_local *local) +{ + if (local->hw.conf.flags & IEEE80211_CONF_IDLE) + return 0; + +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + printk(KERN_DEBUG "%s: device now idle\n", + wiphy_name(local->hw.wiphy)); +#endif + + drv_flush(local, false); + + local->hw.conf.flags |= IEEE80211_CONF_IDLE; + return IEEE80211_CONF_CHANGE_IDLE; +} + +u32 __ieee80211_recalc_idle(struct ieee80211_local *local) +{ + struct ieee80211_sub_if_data *sdata; + int count = 0; + + if (!list_empty(&local->work_list)) + return ieee80211_idle_off(local, "working"); + + if (local->scanning) + return ieee80211_idle_off(local, "scanning"); + + list_for_each_entry(sdata, &local->interfaces, list) { + if (!ieee80211_sdata_running(sdata)) + continue; + /* do not count disabled managed interfaces */ + if (sdata->vif.type == NL80211_IFTYPE_STATION && + !sdata->u.mgd.associated) + continue; + /* do not count unused IBSS interfaces */ + if (sdata->vif.type == NL80211_IFTYPE_ADHOC && + !sdata->u.ibss.ssid_len) + continue; + /* count everything else */ + count++; + } + + if (!count) + return ieee80211_idle_on(local); + else + return ieee80211_idle_off(local, "in use"); + + return 0; +} + +void ieee80211_recalc_idle(struct ieee80211_local *local) +{ + u32 chg; + + mutex_lock(&local->iflist_mtx); + chg = __ieee80211_recalc_idle(local); + mutex_unlock(&local->iflist_mtx); + if (chg) + ieee80211_hw_config(local, chg); +} + +static int netdev_notify(struct notifier_block *nb, + unsigned long state, + void *ndev) +{ + struct net_device *dev = ndev; + struct ieee80211_sub_if_data *sdata; + + if (state != NETDEV_CHANGENAME) + return 0; + + if (!dev->ieee80211_ptr || !dev->ieee80211_ptr->wiphy) + return 0; + + if (dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid) + return 0; + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + memcpy(sdata->name, dev->name, IFNAMSIZ); + + ieee80211_debugfs_rename_netdev(sdata); + return 0; +} + +static struct notifier_block mac80211_netdev_notifier = { + .notifier_call = netdev_notify, +}; + +int ieee80211_iface_init(void) +{ + return register_netdevice_notifier(&mac80211_netdev_notifier); +} + +void ieee80211_iface_exit(void) +{ + unregister_netdevice_notifier(&mac80211_netdev_notifier); +} diff --git a/net/mac80211/key.c b/net/mac80211/key.c new file mode 100644 index 0000000..bdf03f5 --- /dev/null +++ b/net/mac80211/key.c @@ -0,0 +1,629 @@ +/* + * Copyright 2002-2005, Instant802 Networks, Inc. + * Copyright 2005-2006, Devicescape Software, Inc. + * Copyright 2006-2007 Jiri Benc + * Copyright 2007-2008 Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include "ieee80211_i.h" +#include "driver-ops.h" +#include "debugfs_key.h" +#include "aes_ccm.h" +#include "aes_cmac.h" + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29)) +#include +#endif + +/** + * DOC: Key handling basics + * + * Key handling in mac80211 is done based on per-interface (sub_if_data) + * keys and per-station keys. Since each station belongs to an interface, + * each station key also belongs to that interface. + * + * Hardware acceleration is done on a best-effort basis, for each key + * that is eligible the hardware is asked to enable that key but if + * it cannot do that they key is simply kept for software encryption. + * There is currently no way of knowing this except by looking into + * debugfs. + * + * All key operations are protected internally so you can call them at + * any time. + * + * Within mac80211, key references are, just as STA structure references, + * protected by RCU. Note, however, that some things are unprotected, + * namely the key->sta dereferences within the hardware acceleration + * functions. This means that sta_info_destroy() must flush the key todo + * list. + * + * All the direct key list manipulation functions must not sleep because + * they can operate on STA info structs that are protected by RCU. + */ + +static const u8 bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; + +/* key mutex: used to synchronise todo runners */ +static DEFINE_MUTEX(key_mutex); +static DEFINE_SPINLOCK(todo_lock); +static LIST_HEAD(todo_list); + +static void key_todo(struct work_struct *work) +{ + ieee80211_key_todo(); +} + +static DECLARE_WORK(todo_work, key_todo); + +/** + * add_todo - add todo item for a key + * + * @key: key to add to do item for + * @flag: todo flag(s) + * + * Must be called with IRQs or softirqs disabled. + */ +static void add_todo(struct ieee80211_key *key, u32 flag) +{ + if (!key) + return; + + spin_lock(&todo_lock); + key->flags |= flag; + /* + * Remove again if already on the list so that we move it to the end. + */ + if (!list_empty(&key->todo)) + list_del(&key->todo); + list_add_tail(&key->todo, &todo_list); + schedule_work(&todo_work); + spin_unlock(&todo_lock); +} + +/** + * ieee80211_key_lock - lock the mac80211 key operation lock + * + * This locks the (global) mac80211 key operation lock, all + * key operations must be done under this lock. + */ +static void ieee80211_key_lock(void) +{ + mutex_lock(&key_mutex); +} + +/** + * ieee80211_key_unlock - unlock the mac80211 key operation lock + */ +static void ieee80211_key_unlock(void) +{ + mutex_unlock(&key_mutex); +} + +static void assert_key_lock(void) +{ + WARN_ON(!mutex_is_locked(&key_mutex)); +} + +static struct ieee80211_sta *get_sta_for_key(struct ieee80211_key *key) +{ + if (key->sta) + return &key->sta->sta; + + return NULL; +} + +static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key) +{ + struct ieee80211_sub_if_data *sdata; + struct ieee80211_sta *sta; + int ret; + + assert_key_lock(); + might_sleep(); + + if (!key->local->ops->set_key) + return; + + sta = get_sta_for_key(key); + + sdata = key->sdata; + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + sdata = container_of(sdata->bss, + struct ieee80211_sub_if_data, + u.ap); + + ret = drv_set_key(key->local, SET_KEY, sdata, sta, &key->conf); + + if (!ret) { + spin_lock_bh(&todo_lock); + key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; + spin_unlock_bh(&todo_lock); + } + + if (ret && ret != -ENOSPC && ret != -EOPNOTSUPP) + printk(KERN_ERR "mac80211-%s: failed to set key " + "(%d, %pM) to hardware (%d)\n", + wiphy_name(key->local->hw.wiphy), + key->conf.keyidx, sta ? sta->addr : bcast_addr, ret); +} + +static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key) +{ + struct ieee80211_sub_if_data *sdata; + struct ieee80211_sta *sta; + int ret; + + assert_key_lock(); + might_sleep(); + + if (!key || !key->local->ops->set_key) + return; + + spin_lock_bh(&todo_lock); + if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) { + spin_unlock_bh(&todo_lock); + return; + } + spin_unlock_bh(&todo_lock); + + sta = get_sta_for_key(key); + sdata = key->sdata; + + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + sdata = container_of(sdata->bss, + struct ieee80211_sub_if_data, + u.ap); + + ret = drv_set_key(key->local, DISABLE_KEY, sdata, + sta, &key->conf); + + if (ret) + printk(KERN_ERR "mac80211-%s: failed to remove key " + "(%d, %pM) from hardware (%d)\n", + wiphy_name(key->local->hw.wiphy), + key->conf.keyidx, sta ? sta->addr : bcast_addr, ret); + + spin_lock_bh(&todo_lock); + key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; + spin_unlock_bh(&todo_lock); +} + +static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, + int idx) +{ + struct ieee80211_key *key = NULL; + + if (idx >= 0 && idx < NUM_DEFAULT_KEYS) + key = sdata->keys[idx]; + + rcu_assign_pointer(sdata->default_key, key); + + if (key) + add_todo(key, KEY_FLAG_TODO_DEFKEY); +} + +void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx) +{ + unsigned long flags; + + spin_lock_irqsave(&sdata->local->key_lock, flags); + __ieee80211_set_default_key(sdata, idx); + spin_unlock_irqrestore(&sdata->local->key_lock, flags); +} + +static void +__ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata, int idx) +{ + struct ieee80211_key *key = NULL; + + if (idx >= NUM_DEFAULT_KEYS && + idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) + key = sdata->keys[idx]; + + rcu_assign_pointer(sdata->default_mgmt_key, key); + + if (key) + add_todo(key, KEY_FLAG_TODO_DEFMGMTKEY); +} + +void ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata, + int idx) +{ + unsigned long flags; + + spin_lock_irqsave(&sdata->local->key_lock, flags); + __ieee80211_set_default_mgmt_key(sdata, idx); + spin_unlock_irqrestore(&sdata->local->key_lock, flags); +} + + +static void __ieee80211_key_replace(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, + struct ieee80211_key *old, + struct ieee80211_key *new) +{ + int idx, defkey, defmgmtkey; + + if (new) + list_add(&new->list, &sdata->key_list); + + if (sta) { + rcu_assign_pointer(sta->key, new); + } else { + WARN_ON(new && old && new->conf.keyidx != old->conf.keyidx); + + if (old) + idx = old->conf.keyidx; + else + idx = new->conf.keyidx; + + defkey = old && sdata->default_key == old; + defmgmtkey = old && sdata->default_mgmt_key == old; + + if (defkey && !new) + __ieee80211_set_default_key(sdata, -1); + if (defmgmtkey && !new) + __ieee80211_set_default_mgmt_key(sdata, -1); + + rcu_assign_pointer(sdata->keys[idx], new); + if (defkey && new) + __ieee80211_set_default_key(sdata, new->conf.keyidx); + if (defmgmtkey && new) + __ieee80211_set_default_mgmt_key(sdata, + new->conf.keyidx); + } + + if (old) { + /* + * We'll use an empty list to indicate that the key + * has already been removed. + */ + list_del_init(&old->list); + } +} + +struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg, + int idx, + size_t key_len, + const u8 *key_data, + size_t seq_len, const u8 *seq) +{ + struct ieee80211_key *key; + int i, j; + + BUG_ON(idx < 0 || idx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS); + + key = kzalloc(sizeof(struct ieee80211_key) + key_len, GFP_KERNEL); + if (!key) + return NULL; + + /* + * Default to software encryption; we'll later upload the + * key to the hardware if possible. + */ + key->conf.flags = 0; + key->flags = 0; + + key->conf.alg = alg; + key->conf.keyidx = idx; + key->conf.keylen = key_len; + switch (alg) { + case ALG_WEP: + key->conf.iv_len = WEP_IV_LEN; + key->conf.icv_len = WEP_ICV_LEN; + break; + case ALG_TKIP: + key->conf.iv_len = TKIP_IV_LEN; + key->conf.icv_len = TKIP_ICV_LEN; + if (seq) { + for (i = 0; i < NUM_RX_DATA_QUEUES; i++) { + key->u.tkip.rx[i].iv32 = + get_unaligned_le32(&seq[2]); + key->u.tkip.rx[i].iv16 = + get_unaligned_le16(seq); + } + } + break; + case ALG_CCMP: + key->conf.iv_len = CCMP_HDR_LEN; + key->conf.icv_len = CCMP_MIC_LEN; + if (seq) { + for (i = 0; i < NUM_RX_DATA_QUEUES; i++) + for (j = 0; j < CCMP_PN_LEN; j++) + key->u.ccmp.rx_pn[i][j] = + seq[CCMP_PN_LEN - j - 1]; + } + break; + case ALG_AES_CMAC: + key->conf.iv_len = 0; + key->conf.icv_len = sizeof(struct ieee80211_mmie); + if (seq) + for (j = 0; j < 6; j++) + key->u.aes_cmac.rx_pn[j] = seq[6 - j - 1]; + break; + } + memcpy(key->conf.key, key_data, key_len); + INIT_LIST_HEAD(&key->list); + INIT_LIST_HEAD(&key->todo); + + if (alg == ALG_CCMP) { + /* + * Initialize AES key state here as an optimization so that + * it does not need to be initialized for every packet. + */ + key->u.ccmp.tfm = ieee80211_aes_key_setup_encrypt(key_data); + if (!key->u.ccmp.tfm) { + kfree(key); + return NULL; + } + } + + if (alg == ALG_AES_CMAC) { + /* + * Initialize AES key state here as an optimization so that + * it does not need to be initialized for every packet. + */ + key->u.aes_cmac.tfm = + ieee80211_aes_cmac_key_setup(key_data); + if (!key->u.aes_cmac.tfm) { + kfree(key); + return NULL; + } + } + + return key; +} + +void ieee80211_key_link(struct ieee80211_key *key, + struct ieee80211_sub_if_data *sdata, + struct sta_info *sta) +{ + struct ieee80211_key *old_key; + unsigned long flags; + int idx; + + BUG_ON(!sdata); + BUG_ON(!key); + + idx = key->conf.keyidx; + key->local = sdata->local; + key->sdata = sdata; + key->sta = sta; + + if (sta) { + /* + * some hardware cannot handle TKIP with QoS, so + * we indicate whether QoS could be in use. + */ + if (test_sta_flags(sta, WLAN_STA_WME)) + key->conf.flags |= IEEE80211_KEY_FLAG_WMM_STA; + + /* + * This key is for a specific sta interface, + * inform the driver that it should try to store + * this key as pairwise key. + */ + key->conf.flags |= IEEE80211_KEY_FLAG_PAIRWISE; + } else { + if (sdata->vif.type == NL80211_IFTYPE_STATION) { + struct sta_info *ap; + + /* + * We're getting a sta pointer in, + * so must be under RCU read lock. + */ + + /* same here, the AP could be using QoS */ + ap = sta_info_get(key->sdata, key->sdata->u.mgd.bssid); + if (ap) { + if (test_sta_flags(ap, WLAN_STA_WME)) + key->conf.flags |= + IEEE80211_KEY_FLAG_WMM_STA; + } + } + } + + spin_lock_irqsave(&sdata->local->key_lock, flags); + + if (sta) + old_key = sta->key; + else + old_key = sdata->keys[idx]; + + __ieee80211_key_replace(sdata, sta, old_key, key); + + /* free old key later */ + add_todo(old_key, KEY_FLAG_TODO_DELETE); + + add_todo(key, KEY_FLAG_TODO_ADD_DEBUGFS); + if (ieee80211_sdata_running(sdata)) + add_todo(key, KEY_FLAG_TODO_HWACCEL_ADD); + + spin_unlock_irqrestore(&sdata->local->key_lock, flags); +} + +static void __ieee80211_key_free(struct ieee80211_key *key) +{ + /* + * Replace key with nothingness if it was ever used. + */ + if (key->sdata) + __ieee80211_key_replace(key->sdata, key->sta, + key, NULL); + + add_todo(key, KEY_FLAG_TODO_DELETE); +} + +void ieee80211_key_free(struct ieee80211_key *key) +{ + unsigned long flags; + + if (!key) + return; + + if (!key->sdata) { + /* The key has not been linked yet, simply free it + * and don't Oops */ + if (key->conf.alg == ALG_CCMP) + ieee80211_aes_key_free(key->u.ccmp.tfm); + kfree(key); + return; + } + + spin_lock_irqsave(&key->sdata->local->key_lock, flags); + __ieee80211_key_free(key); + spin_unlock_irqrestore(&key->sdata->local->key_lock, flags); +} + +/* + * To be safe against concurrent manipulations of the list (which shouldn't + * actually happen) we need to hold the spinlock. But under the spinlock we + * can't actually do much, so we defer processing to the todo list. Then run + * the todo list to be sure the operation and possibly previously pending + * operations are completed. + */ +static void ieee80211_todo_for_each_key(struct ieee80211_sub_if_data *sdata, + u32 todo_flags) +{ + struct ieee80211_key *key; + unsigned long flags; + + might_sleep(); + + spin_lock_irqsave(&sdata->local->key_lock, flags); + list_for_each_entry(key, &sdata->key_list, list) + add_todo(key, todo_flags); + spin_unlock_irqrestore(&sdata->local->key_lock, flags); + + ieee80211_key_todo(); +} + +void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata) +{ + ASSERT_RTNL(); + + if (WARN_ON(!ieee80211_sdata_running(sdata))) + return; + + ieee80211_todo_for_each_key(sdata, KEY_FLAG_TODO_HWACCEL_ADD); +} + +void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata) +{ + ASSERT_RTNL(); + + ieee80211_todo_for_each_key(sdata, KEY_FLAG_TODO_HWACCEL_REMOVE); +} + +static void __ieee80211_key_destroy(struct ieee80211_key *key) +{ + if (!key) + return; + + ieee80211_key_disable_hw_accel(key); + + if (key->conf.alg == ALG_CCMP) + ieee80211_aes_key_free(key->u.ccmp.tfm); + if (key->conf.alg == ALG_AES_CMAC) + ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm); + ieee80211_debugfs_key_remove(key); + + kfree(key); +} + +static void __ieee80211_key_todo(void) +{ + struct ieee80211_key *key; + bool work_done; + u32 todoflags; + + /* + * NB: sta_info_destroy relies on this! + */ + synchronize_rcu(); + + spin_lock_bh(&todo_lock); + while (!list_empty(&todo_list)) { + key = list_first_entry(&todo_list, struct ieee80211_key, todo); + list_del_init(&key->todo); + todoflags = key->flags & (KEY_FLAG_TODO_ADD_DEBUGFS | + KEY_FLAG_TODO_DEFKEY | + KEY_FLAG_TODO_DEFMGMTKEY | + KEY_FLAG_TODO_HWACCEL_ADD | + KEY_FLAG_TODO_HWACCEL_REMOVE | + KEY_FLAG_TODO_DELETE); + key->flags &= ~todoflags; + spin_unlock_bh(&todo_lock); + + work_done = false; + + if (todoflags & KEY_FLAG_TODO_ADD_DEBUGFS) { + ieee80211_debugfs_key_add(key); + work_done = true; + } + if (todoflags & KEY_FLAG_TODO_DEFKEY) { + ieee80211_debugfs_key_remove_default(key->sdata); + ieee80211_debugfs_key_add_default(key->sdata); + work_done = true; + } + if (todoflags & KEY_FLAG_TODO_DEFMGMTKEY) { + ieee80211_debugfs_key_remove_mgmt_default(key->sdata); + ieee80211_debugfs_key_add_mgmt_default(key->sdata); + work_done = true; + } + if (todoflags & KEY_FLAG_TODO_HWACCEL_ADD) { + ieee80211_key_enable_hw_accel(key); + work_done = true; + } + if (todoflags & KEY_FLAG_TODO_HWACCEL_REMOVE) { + ieee80211_key_disable_hw_accel(key); + work_done = true; + } + if (todoflags & KEY_FLAG_TODO_DELETE) { + __ieee80211_key_destroy(key); + work_done = true; + } + + WARN_ON(!work_done); + + spin_lock_bh(&todo_lock); + } + spin_unlock_bh(&todo_lock); +} + +void ieee80211_key_todo(void) +{ + ieee80211_key_lock(); + __ieee80211_key_todo(); + ieee80211_key_unlock(); +} + +void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_key *key, *tmp; + unsigned long flags; + + ieee80211_key_lock(); + + ieee80211_debugfs_key_remove_default(sdata); + ieee80211_debugfs_key_remove_mgmt_default(sdata); + + spin_lock_irqsave(&sdata->local->key_lock, flags); + list_for_each_entry_safe(key, tmp, &sdata->key_list, list) + __ieee80211_key_free(key); + spin_unlock_irqrestore(&sdata->local->key_lock, flags); + + __ieee80211_key_todo(); + + ieee80211_key_unlock(); +} diff --git a/net/mac80211/key.h b/net/mac80211/key.h new file mode 100644 index 0000000..bdc2968 --- /dev/null +++ b/net/mac80211/key.h @@ -0,0 +1,160 @@ +/* + * Copyright 2002-2004, Instant802 Networks, Inc. + * Copyright 2005, Devicescape Software, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef IEEE80211_KEY_H +#define IEEE80211_KEY_H + +#include +#include +#include +#include +#include + +#define WEP_IV_LEN 4 +#define WEP_ICV_LEN 4 +#define ALG_TKIP_KEY_LEN 32 +#define ALG_CCMP_KEY_LEN 16 +#define CCMP_HDR_LEN 8 +#define CCMP_MIC_LEN 8 +#define CCMP_TK_LEN 16 +#define CCMP_PN_LEN 6 +#define TKIP_IV_LEN 8 +#define TKIP_ICV_LEN 4 + +#define NUM_RX_DATA_QUEUES 17 + +struct ieee80211_local; +struct ieee80211_sub_if_data; +struct sta_info; + +/** + * enum ieee80211_internal_key_flags - internal key flags + * + * @KEY_FLAG_UPLOADED_TO_HARDWARE: Indicates that this key is present + * in the hardware for TX crypto hardware acceleration. + * @KEY_FLAG_TODO_DELETE: Key is marked for deletion and will, after an + * RCU grace period, no longer be reachable other than from the + * todo list. + * @KEY_FLAG_TODO_HWACCEL_ADD: Key needs to be added to hardware acceleration. + * @KEY_FLAG_TODO_HWACCEL_REMOVE: Key needs to be removed from hardware + * acceleration. + * @KEY_FLAG_TODO_DEFKEY: Key is default key and debugfs needs to be updated. + * @KEY_FLAG_TODO_ADD_DEBUGFS: Key needs to be added to debugfs. + * @KEY_FLAG_TODO_DEFMGMTKEY: Key is default management key and debugfs needs + * to be updated. + */ +enum ieee80211_internal_key_flags { + KEY_FLAG_UPLOADED_TO_HARDWARE = BIT(0), + KEY_FLAG_TODO_DELETE = BIT(1), + KEY_FLAG_TODO_HWACCEL_ADD = BIT(2), + KEY_FLAG_TODO_HWACCEL_REMOVE = BIT(3), + KEY_FLAG_TODO_DEFKEY = BIT(4), + KEY_FLAG_TODO_ADD_DEBUGFS = BIT(5), + KEY_FLAG_TODO_DEFMGMTKEY = BIT(6), +}; + +enum ieee80211_internal_tkip_state { + TKIP_STATE_NOT_INIT, + TKIP_STATE_PHASE1_DONE, + TKIP_STATE_PHASE1_HW_UPLOADED, +}; + +struct tkip_ctx { + u32 iv32; + u16 iv16; + u16 p1k[5]; + enum ieee80211_internal_tkip_state state; +}; + +struct ieee80211_key { + struct ieee80211_local *local; + struct ieee80211_sub_if_data *sdata; + struct sta_info *sta; + + /* for sdata list */ + struct list_head list; + /* for todo list */ + struct list_head todo; + + /* protected by todo lock! */ + unsigned int flags; + + union { + struct { + /* last used TSC */ + struct tkip_ctx tx; + + /* last received RSC */ + struct tkip_ctx rx[NUM_RX_DATA_QUEUES]; + } tkip; + struct { + u8 tx_pn[6]; + u8 rx_pn[NUM_RX_DATA_QUEUES][6]; + struct crypto_cipher *tfm; + u32 replays; /* dot11RSNAStatsCCMPReplays */ + /* scratch buffers for virt_to_page() (crypto API) */ +#ifndef AES_BLOCK_LEN +#define AES_BLOCK_LEN 16 +#endif + u8 tx_crypto_buf[6 * AES_BLOCK_LEN]; + u8 rx_crypto_buf[6 * AES_BLOCK_LEN]; + } ccmp; + struct { + u8 tx_pn[6]; + u8 rx_pn[6]; + struct crypto_cipher *tfm; + u32 replays; /* dot11RSNAStatsCMACReplays */ + u32 icverrors; /* dot11RSNAStatsCMACICVErrors */ + /* scratch buffers for virt_to_page() (crypto API) */ + u8 tx_crypto_buf[2 * AES_BLOCK_LEN]; + u8 rx_crypto_buf[2 * AES_BLOCK_LEN]; + } aes_cmac; + } u; + + /* number of times this key has been used */ + int tx_rx_count; + +#ifdef CONFIG_MAC80211_DEBUGFS + struct { + struct dentry *stalink; + struct dentry *dir; + int cnt; + } debugfs; +#endif + + /* + * key config, must be last because it contains key + * material as variable length member + */ + struct ieee80211_key_conf conf; +}; + +struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg, + int idx, + size_t key_len, + const u8 *key_data, + size_t seq_len, const u8 *seq); +/* + * Insert a key into data structures (sdata, sta if necessary) + * to make it used, free old key. + */ +void ieee80211_key_link(struct ieee80211_key *key, + struct ieee80211_sub_if_data *sdata, + struct sta_info *sta); +void ieee80211_key_free(struct ieee80211_key *key); +void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx); +void ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata, + int idx); +void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata); +void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata); +void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata); + +void ieee80211_key_todo(void); + +#endif /* IEEE80211_KEY_H */ diff --git a/net/mac80211/led.c b/net/mac80211/led.c new file mode 100644 index 0000000..162a643 --- /dev/null +++ b/net/mac80211/led.c @@ -0,0 +1,161 @@ +/* + * Copyright 2006, Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* just for IFNAMSIZ */ +#include +#include "led.h" + +void ieee80211_led_rx(struct ieee80211_local *local) +{ + if (unlikely(!local->rx_led)) + return; + if (local->rx_led_counter++ % 2 == 0) + led_trigger_event(local->rx_led, LED_OFF); + else + led_trigger_event(local->rx_led, LED_FULL); +} + +/* q is 1 if a packet was enqueued, 0 if it has been transmitted */ +void ieee80211_led_tx(struct ieee80211_local *local, int q) +{ + if (unlikely(!local->tx_led)) + return; + /* not sure how this is supposed to work ... */ + local->tx_led_counter += 2*q-1; + if (local->tx_led_counter % 2 == 0) + led_trigger_event(local->tx_led, LED_OFF); + else + led_trigger_event(local->tx_led, LED_FULL); +} + +void ieee80211_led_assoc(struct ieee80211_local *local, bool associated) +{ + if (unlikely(!local->assoc_led)) + return; + if (associated) + led_trigger_event(local->assoc_led, LED_FULL); + else + led_trigger_event(local->assoc_led, LED_OFF); +} + +void ieee80211_led_radio(struct ieee80211_local *local, bool enabled) +{ + if (unlikely(!local->radio_led)) + return; + if (enabled) + led_trigger_event(local->radio_led, LED_FULL); + else + led_trigger_event(local->radio_led, LED_OFF); +} + +void ieee80211_led_init(struct ieee80211_local *local) +{ + local->rx_led = kzalloc(sizeof(struct led_trigger), GFP_KERNEL); + if (local->rx_led) { + snprintf(local->rx_led_name, sizeof(local->rx_led_name), + "%srx", wiphy_name(local->hw.wiphy)); + local->rx_led->name = local->rx_led_name; + if (led_trigger_register(local->rx_led)) { + kfree(local->rx_led); + local->rx_led = NULL; + } + } + + local->tx_led = kzalloc(sizeof(struct led_trigger), GFP_KERNEL); + if (local->tx_led) { + snprintf(local->tx_led_name, sizeof(local->tx_led_name), + "%stx", wiphy_name(local->hw.wiphy)); + local->tx_led->name = local->tx_led_name; + if (led_trigger_register(local->tx_led)) { + kfree(local->tx_led); + local->tx_led = NULL; + } + } + + local->assoc_led = kzalloc(sizeof(struct led_trigger), GFP_KERNEL); + if (local->assoc_led) { + snprintf(local->assoc_led_name, sizeof(local->assoc_led_name), + "%sassoc", wiphy_name(local->hw.wiphy)); + local->assoc_led->name = local->assoc_led_name; + if (led_trigger_register(local->assoc_led)) { + kfree(local->assoc_led); + local->assoc_led = NULL; + } + } + + local->radio_led = kzalloc(sizeof(struct led_trigger), GFP_KERNEL); + if (local->radio_led) { + snprintf(local->radio_led_name, sizeof(local->radio_led_name), + "%sradio", wiphy_name(local->hw.wiphy)); + local->radio_led->name = local->radio_led_name; + if (led_trigger_register(local->radio_led)) { + kfree(local->radio_led); + local->radio_led = NULL; + } + } +} + +void ieee80211_led_exit(struct ieee80211_local *local) +{ + if (local->radio_led) { + led_trigger_unregister(local->radio_led); + kfree(local->radio_led); + } + if (local->assoc_led) { + led_trigger_unregister(local->assoc_led); + kfree(local->assoc_led); + } + if (local->tx_led) { + led_trigger_unregister(local->tx_led); + kfree(local->tx_led); + } + if (local->rx_led) { + led_trigger_unregister(local->rx_led); + kfree(local->rx_led); + } +} + +char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw) +{ + struct ieee80211_local *local = hw_to_local(hw); + + if (local->radio_led) + return local->radio_led_name; + return NULL; +} +EXPORT_SYMBOL(__ieee80211_get_radio_led_name); + +char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw) +{ + struct ieee80211_local *local = hw_to_local(hw); + + if (local->assoc_led) + return local->assoc_led_name; + return NULL; +} +EXPORT_SYMBOL(__ieee80211_get_assoc_led_name); + +char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw) +{ + struct ieee80211_local *local = hw_to_local(hw); + + if (local->tx_led) + return local->tx_led_name; + return NULL; +} +EXPORT_SYMBOL(__ieee80211_get_tx_led_name); + +char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw) +{ + struct ieee80211_local *local = hw_to_local(hw); + + if (local->rx_led) + return local->rx_led_name; + return NULL; +} +EXPORT_SYMBOL(__ieee80211_get_rx_led_name); diff --git a/net/mac80211/led.h b/net/mac80211/led.h new file mode 100644 index 0000000..77b1e1b --- /dev/null +++ b/net/mac80211/led.h @@ -0,0 +1,44 @@ +/* + * Copyright 2006, Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include "ieee80211_i.h" + +#ifdef CONFIG_MAC80211_LEDS +extern void ieee80211_led_rx(struct ieee80211_local *local); +extern void ieee80211_led_tx(struct ieee80211_local *local, int q); +extern void ieee80211_led_assoc(struct ieee80211_local *local, + bool associated); +extern void ieee80211_led_radio(struct ieee80211_local *local, + bool enabled); +extern void ieee80211_led_init(struct ieee80211_local *local); +extern void ieee80211_led_exit(struct ieee80211_local *local); +#else +static inline void ieee80211_led_rx(struct ieee80211_local *local) +{ +} +static inline void ieee80211_led_tx(struct ieee80211_local *local, int q) +{ +} +static inline void ieee80211_led_assoc(struct ieee80211_local *local, + bool associated) +{ +} +static inline void ieee80211_led_radio(struct ieee80211_local *local, + bool enabled) +{ +} +static inline void ieee80211_led_init(struct ieee80211_local *local) +{ +} +static inline void ieee80211_led_exit(struct ieee80211_local *local) +{ +} +#endif diff --git a/net/mac80211/main.c b/net/mac80211/main.c new file mode 100644 index 0000000..f7455e7 --- /dev/null +++ b/net/mac80211/main.c @@ -0,0 +1,756 @@ +/* + * Copyright 2002-2005, Instant802 Networks, Inc. + * Copyright 2005-2006, Devicescape Software, Inc. + * Copyright 2006-2007 Jiri Benc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) +#include +#endif +#include + +#include "ieee80211_i.h" +#include "driver-ops.h" +#include "rate.h" +#include "mesh.h" +#include "wep.h" +#include "led.h" +#include "cfg.h" +#include "debugfs.h" + + +bool ieee80211_disable_40mhz_24ghz; +module_param(ieee80211_disable_40mhz_24ghz, bool, 0644); +MODULE_PARM_DESC(ieee80211_disable_40mhz_24ghz, + "Disable 40MHz support in the 2.4GHz band"); + +void ieee80211_configure_filter(struct ieee80211_local *local) +{ + u64 mc; + unsigned int changed_flags; + unsigned int new_flags = 0; + + if (atomic_read(&local->iff_promiscs)) + new_flags |= FIF_PROMISC_IN_BSS; + + if (atomic_read(&local->iff_allmultis)) + new_flags |= FIF_ALLMULTI; + + if (local->monitors || local->scanning) + new_flags |= FIF_BCN_PRBRESP_PROMISC; + + if (local->fif_fcsfail) + new_flags |= FIF_FCSFAIL; + + if (local->fif_plcpfail) + new_flags |= FIF_PLCPFAIL; + + if (local->fif_control) + new_flags |= FIF_CONTROL; + + if (local->fif_other_bss) + new_flags |= FIF_OTHER_BSS; + + if (local->fif_pspoll) + new_flags |= FIF_PSPOLL; + + spin_lock_bh(&local->filter_lock); + changed_flags = local->filter_flags ^ new_flags; + + mc = drv_prepare_multicast(local, local->mc_count, local->mc_list); + spin_unlock_bh(&local->filter_lock); + + /* be a bit nasty */ + new_flags |= (1<<31); + + drv_configure_filter(local, changed_flags, &new_flags, mc); + + WARN_ON(new_flags & (1<<31)); + + local->filter_flags = new_flags & ~(1<<31); +} + +static void ieee80211_reconfig_filter(struct work_struct *work) +{ + struct ieee80211_local *local = + container_of(work, struct ieee80211_local, reconfig_filter); + + ieee80211_configure_filter(local); +} + +int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) +{ + struct ieee80211_channel *chan, *scan_chan; + int ret = 0; + int power; + enum nl80211_channel_type channel_type; + + might_sleep(); + + scan_chan = local->scan_channel; + + if (scan_chan) { + chan = scan_chan; + channel_type = NL80211_CHAN_NO_HT; + } else if (local->tmp_channel) { + chan = scan_chan = local->tmp_channel; + channel_type = local->tmp_channel_type; + } else { + chan = local->oper_channel; + channel_type = local->oper_channel_type; + } + + if (chan != local->hw.conf.channel || + channel_type != local->hw.conf.channel_type) { + local->hw.conf.channel = chan; + local->hw.conf.channel_type = channel_type; + changed |= IEEE80211_CONF_CHANGE_CHANNEL; + } + + if (!conf_is_ht(&local->hw.conf)) { + /* + * mac80211.h documents that this is only valid + * when the channel is set to an HT type, and + * that otherwise STATIC is used. + */ + local->hw.conf.smps_mode = IEEE80211_SMPS_STATIC; + } else if (local->hw.conf.smps_mode != local->smps_mode) { + local->hw.conf.smps_mode = local->smps_mode; + changed |= IEEE80211_CONF_CHANGE_SMPS; + } + + if (scan_chan) + power = chan->max_power; + else + power = local->power_constr_level ? + (chan->max_power - local->power_constr_level) : + chan->max_power; + + if (local->user_power_level >= 0) + power = min(power, local->user_power_level); + + if (local->hw.conf.power_level != power) { + changed |= IEEE80211_CONF_CHANGE_POWER; + local->hw.conf.power_level = power; + } + + if (changed && local->open_count) { + ret = drv_config(local, changed); + /* + * Goal: + * HW reconfiguration should never fail, the driver has told + * us what it can support so it should live up to that promise. + * + * Current status: + * rfkill is not integrated with mac80211 and a + * configuration command can thus fail if hardware rfkill + * is enabled + * + * FIXME: integrate rfkill with mac80211 and then add this + * WARN_ON() back + * + */ + /* WARN_ON(ret); */ + } + + return ret; +} + +void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, + u32 changed) +{ + struct ieee80211_local *local = sdata->local; + static const u8 zero[ETH_ALEN] = { 0 }; + + if (!changed) + return; + + if (sdata->vif.type == NL80211_IFTYPE_STATION) { + /* + * While not associated, claim a BSSID of all-zeroes + * so that drivers don't do any weird things with the + * BSSID at that time. + */ + if (sdata->vif.bss_conf.assoc) + sdata->vif.bss_conf.bssid = sdata->u.mgd.bssid; + else + sdata->vif.bss_conf.bssid = zero; + } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) + sdata->vif.bss_conf.bssid = sdata->u.ibss.bssid; + else if (sdata->vif.type == NL80211_IFTYPE_AP) + sdata->vif.bss_conf.bssid = sdata->vif.addr; + else if (ieee80211_vif_is_mesh(&sdata->vif)) { + sdata->vif.bss_conf.bssid = zero; + } else { + WARN_ON(1); + return; + } + + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_MESH_POINT: + break; + default: + /* do not warn to simplify caller in scan.c */ + changed &= ~BSS_CHANGED_BEACON_ENABLED; + if (WARN_ON(changed & BSS_CHANGED_BEACON)) + return; + break; + } + + if (changed & BSS_CHANGED_BEACON_ENABLED) { + if (local->quiescing || !ieee80211_sdata_running(sdata) || + test_bit(SCAN_SW_SCANNING, &local->scanning)) { + sdata->vif.bss_conf.enable_beacon = false; + } else { + /* + * Beacon should be enabled, but AP mode must + * check whether there is a beacon configured. + */ + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP: + sdata->vif.bss_conf.enable_beacon = + !!rcu_dereference(sdata->u.ap.beacon); + break; + case NL80211_IFTYPE_ADHOC: + sdata->vif.bss_conf.enable_beacon = + !!rcu_dereference(sdata->u.ibss.presp); + break; + case NL80211_IFTYPE_MESH_POINT: + sdata->vif.bss_conf.enable_beacon = true; + break; + default: + /* not reached */ + WARN_ON(1); + break; + } + } + } + + drv_bss_info_changed(local, sdata, &sdata->vif.bss_conf, changed); +} + +u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata) +{ + sdata->vif.bss_conf.use_cts_prot = false; + sdata->vif.bss_conf.use_short_preamble = false; + sdata->vif.bss_conf.use_short_slot = false; + return BSS_CHANGED_ERP_CTS_PROT | + BSS_CHANGED_ERP_PREAMBLE | + BSS_CHANGED_ERP_SLOT; +} + +static void ieee80211_tasklet_handler(unsigned long data) +{ + struct ieee80211_local *local = (struct ieee80211_local *) data; + struct sk_buff *skb; + struct ieee80211_ra_tid *ra_tid; + + while ((skb = skb_dequeue(&local->skb_queue)) || + (skb = skb_dequeue(&local->skb_queue_unreliable))) { + switch (skb->pkt_type) { + case IEEE80211_RX_MSG: + /* Clear skb->pkt_type in order to not confuse kernel + * netstack. */ + skb->pkt_type = 0; + ieee80211_rx(local_to_hw(local), skb); + break; + case IEEE80211_TX_STATUS_MSG: + skb->pkt_type = 0; + ieee80211_tx_status(local_to_hw(local), skb); + break; + case IEEE80211_DELBA_MSG: + ra_tid = (struct ieee80211_ra_tid *) &skb->cb; + ieee80211_stop_tx_ba_cb(ra_tid->vif, ra_tid->ra, + ra_tid->tid); + dev_kfree_skb(skb); + break; + case IEEE80211_ADDBA_MSG: + ra_tid = (struct ieee80211_ra_tid *) &skb->cb; + ieee80211_start_tx_ba_cb(ra_tid->vif, ra_tid->ra, + ra_tid->tid); + dev_kfree_skb(skb); + break ; + default: + WARN(1, "mac80211: Packet is of unknown type %d\n", + skb->pkt_type); + dev_kfree_skb(skb); + break; + } + } +} + +static void ieee80211_restart_work(struct work_struct *work) +{ + struct ieee80211_local *local = + container_of(work, struct ieee80211_local, restart_work); + + rtnl_lock(); + ieee80211_reconfig(local); + rtnl_unlock(); +} + +void ieee80211_restart_hw(struct ieee80211_hw *hw) +{ + struct ieee80211_local *local = hw_to_local(hw); + + /* use this reason, __ieee80211_resume will unblock it */ + ieee80211_stop_queues_by_reason(hw, + IEEE80211_QUEUE_STOP_REASON_SUSPEND); + + schedule_work(&local->restart_work); +} +EXPORT_SYMBOL(ieee80211_restart_hw); + +static void ieee80211_recalc_smps_work(struct work_struct *work) +{ + struct ieee80211_local *local = + container_of(work, struct ieee80211_local, recalc_smps); + + mutex_lock(&local->iflist_mtx); + ieee80211_recalc_smps(local, NULL); + mutex_unlock(&local->iflist_mtx); +} + +struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, + const struct ieee80211_ops *ops) +{ + struct ieee80211_local *local; + int priv_size, i; + struct wiphy *wiphy; + + /* Ensure 32-byte alignment of our private data and hw private data. + * We use the wiphy priv data for both our ieee80211_local and for + * the driver's private data + * + * In memory it'll be like this: + * + * +-------------------------+ + * | struct wiphy | + * +-------------------------+ + * | struct ieee80211_local | + * +-------------------------+ + * | driver's private data | + * +-------------------------+ + * + */ + priv_size = ALIGN(sizeof(*local), NETDEV_ALIGN) + priv_data_len; + + wiphy = wiphy_new(&mac80211_config_ops, priv_size); + + if (!wiphy) + return NULL; + + wiphy->flags |= WIPHY_FLAG_NETNS_OK | + WIPHY_FLAG_4ADDR_AP | + WIPHY_FLAG_4ADDR_STATION; + wiphy->privid = mac80211_wiphy_privid; + + wiphy->bss_priv_size = sizeof(struct ieee80211_bss); + + local = wiphy_priv(wiphy); + + local->hw.wiphy = wiphy; + + local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN); + + BUG_ON(!ops->tx); + BUG_ON(!ops->start); + BUG_ON(!ops->stop); + BUG_ON(!ops->config); + BUG_ON(!ops->add_interface); + BUG_ON(!ops->remove_interface); + BUG_ON(!ops->configure_filter); + local->ops = ops; + + /* set up some defaults */ + local->hw.queues = 1; + local->hw.max_rates = 1; + local->hw.conf.long_frame_max_tx_count = wiphy->retry_long; + local->hw.conf.short_frame_max_tx_count = wiphy->retry_short; + local->user_power_level = -1; + local->uapsd_queues = IEEE80211_DEFAULT_UAPSD_QUEUES; + local->uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN; + + INIT_LIST_HEAD(&local->interfaces); + mutex_init(&local->iflist_mtx); + mutex_init(&local->scan_mtx); + + spin_lock_init(&local->key_lock); + spin_lock_init(&local->filter_lock); + spin_lock_init(&local->queue_stop_reason_lock); + + INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work); + + ieee80211_work_init(local); + + INIT_WORK(&local->restart_work, ieee80211_restart_work); + + INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter); + INIT_WORK(&local->recalc_smps, ieee80211_recalc_smps_work); + local->smps_mode = IEEE80211_SMPS_OFF; + + INIT_WORK(&local->dynamic_ps_enable_work, + ieee80211_dynamic_ps_enable_work); + INIT_WORK(&local->dynamic_ps_disable_work, + ieee80211_dynamic_ps_disable_work); + setup_timer(&local->dynamic_ps_timer, + ieee80211_dynamic_ps_timer, (unsigned long) local); + + sta_info_init(local); + + for (i = 0; i < IEEE80211_MAX_QUEUES; i++) + skb_queue_head_init(&local->pending[i]); + tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending, + (unsigned long)local); + + tasklet_init(&local->tasklet, + ieee80211_tasklet_handler, + (unsigned long) local); + + skb_queue_head_init(&local->skb_queue); + skb_queue_head_init(&local->skb_queue_unreliable); + + spin_lock_init(&local->ampdu_lock); + + return local_to_hw(local); +} +EXPORT_SYMBOL(ieee80211_alloc_hw); + +int ieee80211_register_hw(struct ieee80211_hw *hw) +{ + struct ieee80211_local *local = hw_to_local(hw); + int result; + enum ieee80211_band band; + int channels, i, j, max_bitrates; + bool supp_ht; + static const u32 cipher_suites[] = { + WLAN_CIPHER_SUITE_WEP40, + WLAN_CIPHER_SUITE_WEP104, + WLAN_CIPHER_SUITE_TKIP, + WLAN_CIPHER_SUITE_CCMP, + + /* keep last -- depends on hw flags! */ + WLAN_CIPHER_SUITE_AES_CMAC + }; + + /* + * generic code guarantees at least one band, + * set this very early because much code assumes + * that hw.conf.channel is assigned + */ + channels = 0; + max_bitrates = 0; + supp_ht = false; + for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + struct ieee80211_supported_band *sband; + + sband = local->hw.wiphy->bands[band]; + if (!sband) + continue; + if (!local->oper_channel) { + /* init channel we're on */ + local->hw.conf.channel = + local->oper_channel = &sband->channels[0]; + local->hw.conf.channel_type = NL80211_CHAN_NO_HT; + } + channels += sband->n_channels; + + if (max_bitrates < sband->n_bitrates) + max_bitrates = sband->n_bitrates; + supp_ht = supp_ht || sband->ht_cap.ht_supported; + } + + local->int_scan_req = kzalloc(sizeof(*local->int_scan_req) + + sizeof(void *) * channels, GFP_KERNEL); + if (!local->int_scan_req) + return -ENOMEM; + + /* if low-level driver supports AP, we also support VLAN */ + if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) + local->hw.wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN); + + /* mac80211 always supports monitor */ + local->hw.wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR); + + if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) + local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; + else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC) + local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC; + + WARN((local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD) + && (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK), + "U-APSD not supported with HW_PS_NULLFUNC_STACK\n"); + + /* + * Calculate scan IE length -- we need this to alloc + * memory and to subtract from the driver limit. It + * includes the (extended) supported rates and HT + * information -- SSID is the driver's responsibility. + */ + local->scan_ies_len = 4 + max_bitrates; /* (ext) supp rates */ + if (supp_ht) + local->scan_ies_len += 2 + sizeof(struct ieee80211_ht_cap); + + if (!local->ops->hw_scan) { + /* For hw_scan, driver needs to set these up. */ + local->hw.wiphy->max_scan_ssids = 4; + local->hw.wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; + } + + /* + * If the driver supports any scan IEs, then assume the + * limit includes the IEs mac80211 will add, otherwise + * leave it at zero and let the driver sort it out; we + * still pass our IEs to the driver but userspace will + * not be allowed to in that case. + */ + if (local->hw.wiphy->max_scan_ie_len) + local->hw.wiphy->max_scan_ie_len -= local->scan_ies_len; + + local->hw.wiphy->cipher_suites = cipher_suites; + local->hw.wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); + if (!(local->hw.flags & IEEE80211_HW_MFP_CAPABLE)) + local->hw.wiphy->n_cipher_suites--; + + result = wiphy_register(local->hw.wiphy); + if (result < 0) + goto fail_wiphy_register; + + /* + * We use the number of queues for feature tests (QoS, HT) internally + * so restrict them appropriately. + */ + if (hw->queues > IEEE80211_MAX_QUEUES) + hw->queues = IEEE80211_MAX_QUEUES; + + local->workqueue = + create_singlethread_workqueue(wiphy_name(local->hw.wiphy)); + if (!local->workqueue) { + result = -ENOMEM; + goto fail_workqueue; + } + + /* + * The hardware needs headroom for sending the frame, + * and we need some headroom for passing the frame to monitor + * interfaces, but never both at the same time. + */ + BUILD_BUG_ON(IEEE80211_TX_STATUS_HEADROOM != + sizeof(struct ieee80211_tx_status_rtap_hdr)); + local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom, + sizeof(struct ieee80211_tx_status_rtap_hdr)); + + debugfs_hw_add(local); + + /* + * if the driver doesn't specify a max listen interval we + * use 5 which should be a safe default + */ + if (local->hw.max_listen_interval == 0) + local->hw.max_listen_interval = 5; + + local->hw.conf.listen_interval = local->hw.max_listen_interval; + + result = sta_info_start(local); + if (result < 0) + goto fail_sta_info; + + result = ieee80211_wep_init(local); + if (result < 0) { + printk(KERN_DEBUG "%s: Failed to initialize wep: %d\n", + wiphy_name(local->hw.wiphy), result); + goto fail_wep; + } + + rtnl_lock(); + + result = ieee80211_init_rate_ctrl_alg(local, + hw->rate_control_algorithm); + if (result < 0) { + printk(KERN_DEBUG "%s: Failed to initialize rate control " + "algorithm\n", wiphy_name(local->hw.wiphy)); + goto fail_rate; + } + + /* add one default STA interface if supported */ + if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION)) { + result = ieee80211_if_add(local, "wlan%d", NULL, + NL80211_IFTYPE_STATION, NULL); + if (result) + printk(KERN_WARNING "%s: Failed to add default virtual iface\n", + wiphy_name(local->hw.wiphy)); + } + + rtnl_unlock(); + + ieee80211_led_init(local); + + /* alloc internal scan request */ + i = 0; + local->int_scan_req->ssids = &local->scan_ssid; + local->int_scan_req->n_ssids = 1; + for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + if (!hw->wiphy->bands[band]) + continue; + for (j = 0; j < hw->wiphy->bands[band]->n_channels; j++) { + local->int_scan_req->channels[i] = + &hw->wiphy->bands[band]->channels[j]; + i++; + } + } + local->int_scan_req->n_channels = i; + + local->network_latency_notifier.notifier_call = + ieee80211_max_network_latency; + result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY, + &local->network_latency_notifier); + + if (result) { + rtnl_lock(); + goto fail_pm_qos; + } + + return 0; + + fail_pm_qos: + ieee80211_led_exit(local); + ieee80211_remove_interfaces(local); + fail_rate: + rtnl_unlock(); + ieee80211_wep_free(local); + fail_wep: + sta_info_stop(local); + fail_sta_info: + destroy_workqueue(local->workqueue); + fail_workqueue: + wiphy_unregister(local->hw.wiphy); + fail_wiphy_register: + kfree(local->int_scan_req); + return result; +} +EXPORT_SYMBOL(ieee80211_register_hw); + +void ieee80211_unregister_hw(struct ieee80211_hw *hw) +{ + struct ieee80211_local *local = hw_to_local(hw); + + tasklet_kill(&local->tx_pending_tasklet); + tasklet_kill(&local->tasklet); + + pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY, + &local->network_latency_notifier); + + rtnl_lock(); + + /* + * At this point, interface list manipulations are fine + * because the driver cannot be handing us frames any + * more and the tasklet is killed. + */ + ieee80211_remove_interfaces(local); + + rtnl_unlock(); + + cancel_work_sync(&local->reconfig_filter); + + ieee80211_clear_tx_pending(local); + sta_info_stop(local); + rate_control_deinitialize(local); + + if (skb_queue_len(&local->skb_queue) || + skb_queue_len(&local->skb_queue_unreliable)) + printk(KERN_WARNING "%s: skb_queue not empty\n", + wiphy_name(local->hw.wiphy)); + skb_queue_purge(&local->skb_queue); + skb_queue_purge(&local->skb_queue_unreliable); + + destroy_workqueue(local->workqueue); + wiphy_unregister(local->hw.wiphy); + ieee80211_wep_free(local); + ieee80211_led_exit(local); + kfree(local->int_scan_req); +} +EXPORT_SYMBOL(ieee80211_unregister_hw); + +void ieee80211_free_hw(struct ieee80211_hw *hw) +{ + struct ieee80211_local *local = hw_to_local(hw); + + mutex_destroy(&local->iflist_mtx); + mutex_destroy(&local->scan_mtx); + + wiphy_free(local->hw.wiphy); +} +EXPORT_SYMBOL(ieee80211_free_hw); + +static int __init ieee80211_init(void) +{ + struct sk_buff *skb; + int ret; + + BUILD_BUG_ON(sizeof(struct ieee80211_tx_info) > sizeof(skb->cb)); + BUILD_BUG_ON(offsetof(struct ieee80211_tx_info, driver_data) + + IEEE80211_TX_INFO_DRIVER_DATA_SIZE > sizeof(skb->cb)); + + ret = rc80211_minstrel_init(); + if (ret) + return ret; + + ret = rc80211_pid_init(); + if (ret) + goto err_pid; + + ret = ieee80211_iface_init(); + if (ret) + goto err_netdev; + + return 0; + err_netdev: + rc80211_pid_exit(); + err_pid: + rc80211_minstrel_exit(); + + return ret; +} + +static void __exit ieee80211_exit(void) +{ + rc80211_pid_exit(); + rc80211_minstrel_exit(); + + /* + * For key todo, it'll be empty by now but the work + * might still be scheduled. + */ + flush_scheduled_work(); + + if (mesh_allocated) + ieee80211s_stop(); + + ieee80211_iface_exit(); +} + + +subsys_initcall(ieee80211_init); +module_exit(ieee80211_exit); + +MODULE_DESCRIPTION("IEEE 802.11 subsystem"); +MODULE_LICENSE("GPL"); diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c new file mode 100644 index 0000000..61080c5 --- /dev/null +++ b/net/mac80211/mesh.c @@ -0,0 +1,763 @@ +/* + * Copyright (c) 2008, 2009 open80211s Ltd. + * Authors: Luis Carlos Cobo + * Javier Cardona + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include "ieee80211_i.h" +#include "mesh.h" + +#define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ) +#define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ) +#define IEEE80211_MESH_RANN_INTERVAL (1 * HZ) + +#define MESHCONF_CAPAB_ACCEPT_PLINKS 0x01 +#define MESHCONF_CAPAB_FORWARDING 0x08 + +#define TMR_RUNNING_HK 0 +#define TMR_RUNNING_MP 1 +#define TMR_RUNNING_MPR 2 + +int mesh_allocated; +static struct kmem_cache *rm_cache; + +void ieee80211s_init(void) +{ + mesh_pathtbl_init(); + mesh_allocated = 1; + rm_cache = kmem_cache_create("mesh_rmc", sizeof(struct rmc_entry), + 0, 0, NULL); +} + +void ieee80211s_stop(void) +{ + mesh_pathtbl_unregister(); + kmem_cache_destroy(rm_cache); +} + +static void ieee80211_mesh_housekeeping_timer(unsigned long data) +{ + struct ieee80211_sub_if_data *sdata = (void *) data; + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + + set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags); + + if (local->quiescing) { + set_bit(TMR_RUNNING_HK, &ifmsh->timers_running); + return; + } + + ieee80211_queue_work(&local->hw, &ifmsh->work); +} + +/** + * mesh_matches_local - check if the config of a mesh point matches ours + * + * @ie: information elements of a management frame from the mesh peer + * @sdata: local mesh subif + * + * This function checks if the mesh configuration of a mesh point matches the + * local mesh configuration, i.e. if both nodes belong to the same mesh network. + */ +bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + + /* + * As support for each feature is added, check for matching + * - On mesh config capabilities + * - Power Save Support En + * - Sync support enabled + * - Sync support active + * - Sync support required from peer + * - MDA enabled + * - Power management control on fc + */ + if (ifmsh->mesh_id_len == ie->mesh_id_len && + memcmp(ifmsh->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 && + (ifmsh->mesh_pp_id == ie->mesh_config->meshconf_psel) && + (ifmsh->mesh_pm_id == ie->mesh_config->meshconf_pmetric) && + (ifmsh->mesh_cc_id == ie->mesh_config->meshconf_congest) && + (ifmsh->mesh_sp_id == ie->mesh_config->meshconf_synch) && + (ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth)) + return true; + + return false; +} + +/** + * mesh_peer_accepts_plinks - check if an mp is willing to establish peer links + * + * @ie: information elements of a management frame from the mesh peer + */ +bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie) +{ + return (ie->mesh_config->meshconf_cap & + MESHCONF_CAPAB_ACCEPT_PLINKS) != 0; +} + +/** + * mesh_accept_plinks_update: update accepting_plink in local mesh beacons + * + * @sdata: mesh interface in which mesh beacons are going to be updated + */ +void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata) +{ + bool free_plinks; + + /* In case mesh_plink_free_count > 0 and mesh_plinktbl_capacity == 0, + * the mesh interface might be able to establish plinks with peers that + * are already on the table but are not on PLINK_ESTAB state. However, + * in general the mesh interface is not accepting peer link requests + * from new peers, and that must be reflected in the beacon + */ + free_plinks = mesh_plink_availables(sdata); + + if (free_plinks != sdata->u.mesh.accepting_plinks) + ieee80211_mesh_housekeeping_timer((unsigned long) sdata); +} + +void mesh_ids_set_default(struct ieee80211_if_mesh *sta) +{ + sta->mesh_pp_id = 0; /* HWMP */ + sta->mesh_pm_id = 0; /* Airtime */ + sta->mesh_cc_id = 0; /* Disabled */ + sta->mesh_sp_id = 0; /* Neighbor Offset */ + sta->mesh_auth_id = 0; /* Disabled */ +} + +int mesh_rmc_init(struct ieee80211_sub_if_data *sdata) +{ + int i; + + sdata->u.mesh.rmc = kmalloc(sizeof(struct mesh_rmc), GFP_KERNEL); + if (!sdata->u.mesh.rmc) + return -ENOMEM; + sdata->u.mesh.rmc->idx_mask = RMC_BUCKETS - 1; + for (i = 0; i < RMC_BUCKETS; i++) + INIT_LIST_HEAD(&sdata->u.mesh.rmc->bucket[i].list); + return 0; +} + +void mesh_rmc_free(struct ieee80211_sub_if_data *sdata) +{ + struct mesh_rmc *rmc = sdata->u.mesh.rmc; + struct rmc_entry *p, *n; + int i; + + if (!sdata->u.mesh.rmc) + return; + + for (i = 0; i < RMC_BUCKETS; i++) + list_for_each_entry_safe(p, n, &rmc->bucket[i].list, list) { + list_del(&p->list); + kmem_cache_free(rm_cache, p); + } + + kfree(rmc); + sdata->u.mesh.rmc = NULL; +} + +/** + * mesh_rmc_check - Check frame in recent multicast cache and add if absent. + * + * @sa: source address + * @mesh_hdr: mesh_header + * + * Returns: 0 if the frame is not in the cache, nonzero otherwise. + * + * Checks using the source address and the mesh sequence number if we have + * received this frame lately. If the frame is not in the cache, it is added to + * it. + */ +int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr, + struct ieee80211_sub_if_data *sdata) +{ + struct mesh_rmc *rmc = sdata->u.mesh.rmc; + u32 seqnum = 0; + int entries = 0; + u8 idx; + struct rmc_entry *p, *n; + + /* Don't care about endianness since only match matters */ + memcpy(&seqnum, &mesh_hdr->seqnum, sizeof(mesh_hdr->seqnum)); + idx = le32_to_cpu(mesh_hdr->seqnum) & rmc->idx_mask; + list_for_each_entry_safe(p, n, &rmc->bucket[idx].list, list) { + ++entries; + if (time_after(jiffies, p->exp_time) || + (entries == RMC_QUEUE_MAX_LEN)) { + list_del(&p->list); + kmem_cache_free(rm_cache, p); + --entries; + } else if ((seqnum == p->seqnum) && + (memcmp(sa, p->sa, ETH_ALEN) == 0)) + return -1; + } + + p = kmem_cache_alloc(rm_cache, GFP_ATOMIC); + if (!p) { + printk(KERN_DEBUG "o11s: could not allocate RMC entry\n"); + return 0; + } + p->seqnum = seqnum; + p->exp_time = jiffies + RMC_TIMEOUT; + memcpy(p->sa, sa, ETH_ALEN); + list_add(&p->list, &rmc->bucket[idx].list); + return 0; +} + +void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_supported_band *sband; + u8 *pos; + int len, i, rate; + u8 neighbors; + + sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; + len = sband->n_bitrates; + if (len > 8) + len = 8; + pos = skb_put(skb, len + 2); + *pos++ = WLAN_EID_SUPP_RATES; + *pos++ = len; + for (i = 0; i < len; i++) { + rate = sband->bitrates[i].bitrate; + *pos++ = (u8) (rate / 5); + } + + if (sband->n_bitrates > len) { + pos = skb_put(skb, sband->n_bitrates - len + 2); + *pos++ = WLAN_EID_EXT_SUPP_RATES; + *pos++ = sband->n_bitrates - len; + for (i = len; i < sband->n_bitrates; i++) { + rate = sband->bitrates[i].bitrate; + *pos++ = (u8) (rate / 5); + } + } + + if (sband->band == IEEE80211_BAND_2GHZ) { + pos = skb_put(skb, 2 + 1); + *pos++ = WLAN_EID_DS_PARAMS; + *pos++ = 1; + *pos++ = ieee80211_frequency_to_channel(local->hw.conf.channel->center_freq); + } + + pos = skb_put(skb, 2 + sdata->u.mesh.mesh_id_len); + *pos++ = WLAN_EID_MESH_ID; + *pos++ = sdata->u.mesh.mesh_id_len; + if (sdata->u.mesh.mesh_id_len) + memcpy(pos, sdata->u.mesh.mesh_id, sdata->u.mesh.mesh_id_len); + + pos = skb_put(skb, 2 + sizeof(struct ieee80211_meshconf_ie)); + *pos++ = WLAN_EID_MESH_CONFIG; + *pos++ = sizeof(struct ieee80211_meshconf_ie); + + /* Active path selection protocol ID */ + *pos++ = sdata->u.mesh.mesh_pp_id; + + /* Active path selection metric ID */ + *pos++ = sdata->u.mesh.mesh_pm_id; + + /* Congestion control mode identifier */ + *pos++ = sdata->u.mesh.mesh_cc_id; + + /* Synchronization protocol identifier */ + *pos++ = sdata->u.mesh.mesh_sp_id; + + /* Authentication Protocol identifier */ + *pos++ = sdata->u.mesh.mesh_auth_id; + + /* Mesh Formation Info - number of neighbors */ + neighbors = atomic_read(&sdata->u.mesh.mshstats.estab_plinks); + /* Number of neighbor mesh STAs or 15 whichever is smaller */ + neighbors = (neighbors > 15) ? 15 : neighbors; + *pos++ = neighbors << 1; + + /* Mesh capability */ + sdata->u.mesh.accepting_plinks = mesh_plink_availables(sdata); + *pos = MESHCONF_CAPAB_FORWARDING; + *pos++ |= sdata->u.mesh.accepting_plinks ? + MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00; + *pos++ = 0x00; + + return; +} + +u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, struct mesh_table *tbl) +{ + /* Use last four bytes of hw addr and interface index as hash index */ + return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd) + & tbl->hash_mask; +} + +struct mesh_table *mesh_table_alloc(int size_order) +{ + int i; + struct mesh_table *newtbl; + + newtbl = kmalloc(sizeof(struct mesh_table), GFP_KERNEL); + if (!newtbl) + return NULL; + + newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) * + (1 << size_order), GFP_KERNEL); + + if (!newtbl->hash_buckets) { + kfree(newtbl); + return NULL; + } + + newtbl->hashwlock = kmalloc(sizeof(spinlock_t) * + (1 << size_order), GFP_KERNEL); + if (!newtbl->hashwlock) { + kfree(newtbl->hash_buckets); + kfree(newtbl); + return NULL; + } + + newtbl->size_order = size_order; + newtbl->hash_mask = (1 << size_order) - 1; + atomic_set(&newtbl->entries, 0); + get_random_bytes(&newtbl->hash_rnd, + sizeof(newtbl->hash_rnd)); + for (i = 0; i <= newtbl->hash_mask; i++) + spin_lock_init(&newtbl->hashwlock[i]); + + return newtbl; +} + + +static void ieee80211_mesh_path_timer(unsigned long data) +{ + struct ieee80211_sub_if_data *sdata = + (struct ieee80211_sub_if_data *) data; + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct ieee80211_local *local = sdata->local; + + if (local->quiescing) { + set_bit(TMR_RUNNING_MP, &ifmsh->timers_running); + return; + } + + ieee80211_queue_work(&local->hw, &ifmsh->work); +} + +static void ieee80211_mesh_path_root_timer(unsigned long data) +{ + struct ieee80211_sub_if_data *sdata = + (struct ieee80211_sub_if_data *) data; + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct ieee80211_local *local = sdata->local; + + set_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags); + + if (local->quiescing) { + set_bit(TMR_RUNNING_MPR, &ifmsh->timers_running); + return; + } + + ieee80211_queue_work(&local->hw, &ifmsh->work); +} + +void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh) +{ + if (ifmsh->mshcfg.dot11MeshHWMPRootMode) + set_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags); + else { + clear_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags); + /* stop running timer */ + del_timer_sync(&ifmsh->mesh_path_root_timer); + } +} + +/** + * ieee80211_fill_mesh_addresses - fill addresses of a locally originated mesh frame + * @hdr: 802.11 frame header + * @fc: frame control field + * @meshda: destination address in the mesh + * @meshsa: source address address in the mesh. Same as TA, as frame is + * locally originated. + * + * Return the length of the 802.11 (does not include a mesh control header) + */ +int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc, + const u8 *meshda, const u8 *meshsa) +{ + if (is_multicast_ether_addr(meshda)) { + *fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); + /* DA TA SA */ + memcpy(hdr->addr1, meshda, ETH_ALEN); + memcpy(hdr->addr2, meshsa, ETH_ALEN); + memcpy(hdr->addr3, meshsa, ETH_ALEN); + return 24; + } else { + *fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | + IEEE80211_FCTL_TODS); + /* RA TA DA SA */ + memset(hdr->addr1, 0, ETH_ALEN); /* RA is resolved later */ + memcpy(hdr->addr2, meshsa, ETH_ALEN); + memcpy(hdr->addr3, meshda, ETH_ALEN); + memcpy(hdr->addr4, meshsa, ETH_ALEN); + return 30; + } +} + +/** + * ieee80211_new_mesh_header - create a new mesh header + * @meshhdr: uninitialized mesh header + * @sdata: mesh interface to be used + * @addr4: addr4 of the mesh frame (1st in ae header) + * may be NULL + * @addr5: addr5 of the mesh frame (1st or 2nd in ae header) + * may be NULL unless addr6 is present + * @addr6: addr6 of the mesh frame (2nd or 3rd in ae header) + * may be NULL unless addr5 is present + * + * Return the header length. + */ +int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr, + struct ieee80211_sub_if_data *sdata, char *addr4, + char *addr5, char *addr6) +{ + int aelen = 0; + memset(meshhdr, 0, sizeof(*meshhdr)); + meshhdr->ttl = sdata->u.mesh.mshcfg.dot11MeshTTL; + put_unaligned(cpu_to_le32(sdata->u.mesh.mesh_seqnum), &meshhdr->seqnum); + sdata->u.mesh.mesh_seqnum++; + if (addr4) { + meshhdr->flags |= MESH_FLAGS_AE_A4; + aelen += ETH_ALEN; + memcpy(meshhdr->eaddr1, addr4, ETH_ALEN); + } + if (addr5 && addr6) { + meshhdr->flags |= MESH_FLAGS_AE_A5_A6; + aelen += 2 * ETH_ALEN; + if (!addr4) { + memcpy(meshhdr->eaddr1, addr5, ETH_ALEN); + memcpy(meshhdr->eaddr2, addr6, ETH_ALEN); + } else { + memcpy(meshhdr->eaddr2, addr5, ETH_ALEN); + memcpy(meshhdr->eaddr3, addr6, ETH_ALEN); + } + } + return 6 + aelen; +} + +static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata, + struct ieee80211_if_mesh *ifmsh) +{ + bool free_plinks; + +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + printk(KERN_DEBUG "%s: running mesh housekeeping\n", + sdata->name); +#endif + + ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT); + mesh_path_expire(sdata); + + free_plinks = mesh_plink_availables(sdata); + if (free_plinks != sdata->u.mesh.accepting_plinks) + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); + + mod_timer(&ifmsh->housekeeping_timer, + round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL)); +} + +static void ieee80211_mesh_rootpath(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + + mesh_path_tx_root_frame(sdata); + mod_timer(&ifmsh->mesh_path_root_timer, + round_jiffies(jiffies + IEEE80211_MESH_RANN_INTERVAL)); +} + +#ifdef CONFIG_PM +void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + + /* might restart the timer but that doesn't matter */ + cancel_work_sync(&ifmsh->work); + + /* use atomic bitops in case both timers fire at the same time */ + + if (del_timer_sync(&ifmsh->housekeeping_timer)) + set_bit(TMR_RUNNING_HK, &ifmsh->timers_running); + if (del_timer_sync(&ifmsh->mesh_path_timer)) + set_bit(TMR_RUNNING_MP, &ifmsh->timers_running); + if (del_timer_sync(&ifmsh->mesh_path_root_timer)) + set_bit(TMR_RUNNING_MPR, &ifmsh->timers_running); +} + +void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + + if (test_and_clear_bit(TMR_RUNNING_HK, &ifmsh->timers_running)) + add_timer(&ifmsh->housekeeping_timer); + if (test_and_clear_bit(TMR_RUNNING_MP, &ifmsh->timers_running)) + add_timer(&ifmsh->mesh_path_timer); + if (test_and_clear_bit(TMR_RUNNING_MPR, &ifmsh->timers_running)) + add_timer(&ifmsh->mesh_path_root_timer); + ieee80211_mesh_root_setup(ifmsh); +} +#endif + +void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct ieee80211_local *local = sdata->local; + + set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags); + ieee80211_mesh_root_setup(ifmsh); + ieee80211_queue_work(&local->hw, &ifmsh->work); + sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL; + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON | + BSS_CHANGED_BEACON_ENABLED | + BSS_CHANGED_BEACON_INT); +} + +void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) +{ + del_timer_sync(&sdata->u.mesh.housekeeping_timer); + del_timer_sync(&sdata->u.mesh.mesh_path_root_timer); + /* + * If the timer fired while we waited for it, it will have + * requeued the work. Now the work will be running again + * but will not rearm the timer again because it checks + * whether the interface is running, which, at this point, + * it no longer is. + */ + cancel_work_sync(&sdata->u.mesh.work); + + /* + * When we get here, the interface is marked down. + * Call synchronize_rcu() to wait for the RX path + * should it be using the interface and enqueuing + * frames at this very time on another CPU. + */ + rcu_barrier(); /* Wait for RX path and call_rcu()'s */ + skb_queue_purge(&sdata->u.mesh.skb_queue); +} + +static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, + u16 stype, + struct ieee80211_mgmt *mgmt, + size_t len, + struct ieee80211_rx_status *rx_status) +{ + struct ieee80211_local *local = sdata->local; + struct ieee802_11_elems elems; + struct ieee80211_channel *channel; + u32 supp_rates = 0; + size_t baselen; + int freq; + enum ieee80211_band band = rx_status->band; + + /* ignore ProbeResp to foreign address */ + if (stype == IEEE80211_STYPE_PROBE_RESP && + compare_ether_addr(mgmt->da, sdata->vif.addr)) + return; + + baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; + if (baselen > len) + return; + + ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen, + &elems); + + if (elems.ds_params && elems.ds_params_len == 1) + freq = ieee80211_channel_to_frequency(elems.ds_params[0]); + else + freq = rx_status->freq; + + channel = ieee80211_get_channel(local->hw.wiphy, freq); + + if (!channel || channel->flags & IEEE80211_CHAN_DISABLED) + return; + + if (elems.mesh_id && elems.mesh_config && + mesh_matches_local(&elems, sdata)) { + supp_rates = ieee80211_sta_get_rates(local, &elems, band); + + mesh_neighbour_update(mgmt->sa, supp_rates, sdata, + mesh_peer_accepts_plinks(&elems)); + } +} + +static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, + size_t len, + struct ieee80211_rx_status *rx_status) +{ + switch (mgmt->u.action.category) { + case MESH_PLINK_CATEGORY: + mesh_rx_plink_frame(sdata, mgmt, len, rx_status); + break; + case MESH_PATH_SEL_CATEGORY: + mesh_rx_path_sel_frame(sdata, mgmt, len); + break; + } +} + +static void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_rx_status *rx_status; + struct ieee80211_if_mesh *ifmsh; + struct ieee80211_mgmt *mgmt; + u16 stype; + + ifmsh = &sdata->u.mesh; + + rx_status = IEEE80211_SKB_RXCB(skb); + mgmt = (struct ieee80211_mgmt *) skb->data; + stype = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE; + + switch (stype) { + case IEEE80211_STYPE_PROBE_RESP: + case IEEE80211_STYPE_BEACON: + ieee80211_mesh_rx_bcn_presp(sdata, stype, mgmt, skb->len, + rx_status); + break; + case IEEE80211_STYPE_ACTION: + ieee80211_mesh_rx_mgmt_action(sdata, mgmt, skb->len, rx_status); + break; + } + + kfree_skb(skb); +} + +static void ieee80211_mesh_work(struct work_struct *work) +{ + struct ieee80211_sub_if_data *sdata = + container_of(work, struct ieee80211_sub_if_data, u.mesh.work); + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct sk_buff *skb; + + if (!ieee80211_sdata_running(sdata)) + return; + + if (local->scanning) + return; + + while ((skb = skb_dequeue(&ifmsh->skb_queue))) + ieee80211_mesh_rx_queued_mgmt(sdata, skb); + + if (ifmsh->preq_queue_len && + time_after(jiffies, + ifmsh->last_preq + msecs_to_jiffies(ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval))) + mesh_path_start_discovery(sdata); + + if (test_and_clear_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags)) + mesh_mpath_table_grow(); + + if (test_and_clear_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags)) + mesh_mpp_table_grow(); + + if (test_and_clear_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags)) + ieee80211_mesh_housekeeping(sdata, ifmsh); + + if (test_and_clear_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags)) + ieee80211_mesh_rootpath(sdata); +} + +void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) +{ + struct ieee80211_sub_if_data *sdata; + + rcu_read_lock(); + list_for_each_entry_rcu(sdata, &local->interfaces, list) + if (ieee80211_vif_is_mesh(&sdata->vif)) + ieee80211_queue_work(&local->hw, &sdata->u.mesh.work); + rcu_read_unlock(); +} + +void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + + INIT_WORK(&ifmsh->work, ieee80211_mesh_work); + setup_timer(&ifmsh->housekeeping_timer, + ieee80211_mesh_housekeeping_timer, + (unsigned long) sdata); + skb_queue_head_init(&sdata->u.mesh.skb_queue); + + ifmsh->mshcfg.dot11MeshRetryTimeout = MESH_RET_T; + ifmsh->mshcfg.dot11MeshConfirmTimeout = MESH_CONF_T; + ifmsh->mshcfg.dot11MeshHoldingTimeout = MESH_HOLD_T; + ifmsh->mshcfg.dot11MeshMaxRetries = MESH_MAX_RETR; + ifmsh->mshcfg.dot11MeshTTL = MESH_TTL; + ifmsh->mshcfg.auto_open_plinks = true; + ifmsh->mshcfg.dot11MeshMaxPeerLinks = + MESH_MAX_ESTAB_PLINKS; + ifmsh->mshcfg.dot11MeshHWMPactivePathTimeout = + MESH_PATH_TIMEOUT; + ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval = + MESH_PREQ_MIN_INT; + ifmsh->mshcfg.dot11MeshHWMPnetDiameterTraversalTime = + MESH_DIAM_TRAVERSAL_TIME; + ifmsh->mshcfg.dot11MeshHWMPmaxPREQretries = + MESH_MAX_PREQ_RETRIES; + ifmsh->mshcfg.path_refresh_time = + MESH_PATH_REFRESH_TIME; + ifmsh->mshcfg.min_discovery_timeout = + MESH_MIN_DISCOVERY_TIMEOUT; + ifmsh->accepting_plinks = true; + ifmsh->preq_id = 0; + ifmsh->sn = 0; + atomic_set(&ifmsh->mpaths, 0); + mesh_rmc_init(sdata); + ifmsh->last_preq = jiffies; + /* Allocate all mesh structures when creating the first mesh interface. */ + if (!mesh_allocated) + ieee80211s_init(); + mesh_ids_set_default(ifmsh); + setup_timer(&ifmsh->mesh_path_timer, + ieee80211_mesh_path_timer, + (unsigned long) sdata); + setup_timer(&ifmsh->mesh_path_root_timer, + ieee80211_mesh_path_root_timer, + (unsigned long) sdata); + INIT_LIST_HEAD(&ifmsh->preq_queue.list); + spin_lock_init(&ifmsh->mesh_preq_queue_lock); +} + +ieee80211_rx_result +ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct ieee80211_mgmt *mgmt; + u16 fc; + + if (skb->len < 24) + return RX_DROP_MONITOR; + + mgmt = (struct ieee80211_mgmt *) skb->data; + fc = le16_to_cpu(mgmt->frame_control); + + switch (fc & IEEE80211_FCTL_STYPE) { + case IEEE80211_STYPE_ACTION: + if (skb->len < IEEE80211_MIN_ACTION_SIZE) + return RX_DROP_MONITOR; + /* fall through */ + case IEEE80211_STYPE_PROBE_RESP: + case IEEE80211_STYPE_BEACON: + skb_queue_tail(&ifmsh->skb_queue, skb); + ieee80211_queue_work(&local->hw, &ifmsh->work); + return RX_QUEUED; + } + + return RX_CONTINUE; +} diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h new file mode 100644 index 0000000..85562c5 --- /dev/null +++ b/net/mac80211/mesh.h @@ -0,0 +1,347 @@ +/* + * Copyright (c) 2008, 2009 open80211s Ltd. + * Authors: Luis Carlos Cobo + * Javier Cardona + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef IEEE80211S_H +#define IEEE80211S_H + +#include +#include +#include +#include "ieee80211_i.h" + + +/* Data structures */ + +/** + * enum mesh_path_flags - mac80211 mesh path flags + * + * + * + * @MESH_PATH_ACTIVE: the mesh path can be used for forwarding + * @MESH_PATH_RESOLVING: the discovery process is running for this mesh path + * @MESH_PATH_SN_VALID: the mesh path contains a valid destination sequence + * number + * @MESH_PATH_FIXED: the mesh path has been manually set and should not be + * modified + * @MESH_PATH_RESOLVED: the mesh path can has been resolved + * + * MESH_PATH_RESOLVED is used by the mesh path timer to + * decide when to stop or cancel the mesh path discovery. + */ +enum mesh_path_flags { + MESH_PATH_ACTIVE = BIT(0), + MESH_PATH_RESOLVING = BIT(1), + MESH_PATH_SN_VALID = BIT(2), + MESH_PATH_FIXED = BIT(3), + MESH_PATH_RESOLVED = BIT(4), +}; + +/** + * enum mesh_deferred_task_flags - mac80211 mesh deferred tasks + * + * + * + * @MESH_WORK_HOUSEKEEPING: run the periodic mesh housekeeping tasks + * @MESH_WORK_GROW_MPATH_TABLE: the mesh path table is full and needs + * to grow. + * @MESH_WORK_GROW_MPP_TABLE: the mesh portals table is full and needs to + * grow + * @MESH_WORK_ROOT: the mesh root station needs to send a frame + */ +enum mesh_deferred_task_flags { + MESH_WORK_HOUSEKEEPING, + MESH_WORK_GROW_MPATH_TABLE, + MESH_WORK_GROW_MPP_TABLE, + MESH_WORK_ROOT, +}; + +/** + * struct mesh_path - mac80211 mesh path structure + * + * @dst: mesh path destination mac address + * @sdata: mesh subif + * @next_hop: mesh neighbor to which frames for this destination will be + * forwarded + * @timer: mesh path discovery timer + * @frame_queue: pending queue for frames sent to this destination while the + * path is unresolved + * @sn: target sequence number + * @metric: current metric to this destination + * @hop_count: hops to destination + * @exp_time: in jiffies, when the path will expire or when it expired + * @discovery_timeout: timeout (lapse in jiffies) used for the last discovery + * retry + * @discovery_retries: number of discovery retries + * @flags: mesh path flags, as specified on &enum mesh_path_flags + * @state_lock: mesh path state lock + * + * + * The combination of dst and sdata is unique in the mesh path table. Since the + * next_hop STA is only protected by RCU as well, deleting the STA must also + * remove/substitute the mesh_path structure and wait until that is no longer + * reachable before destroying the STA completely. + */ +struct mesh_path { + u8 dst[ETH_ALEN]; + u8 mpp[ETH_ALEN]; /* used for MPP or MAP */ + struct ieee80211_sub_if_data *sdata; + struct sta_info *next_hop; + struct timer_list timer; + struct sk_buff_head frame_queue; + struct rcu_head rcu; + u32 sn; + u32 metric; + u8 hop_count; + unsigned long exp_time; + u32 discovery_timeout; + u8 discovery_retries; + enum mesh_path_flags flags; + spinlock_t state_lock; +}; + +/** + * struct mesh_table + * + * @hash_buckets: array of hash buckets of the table + * @hashwlock: array of locks to protect write operations, one per bucket + * @hash_mask: 2^size_order - 1, used to compute hash idx + * @hash_rnd: random value used for hash computations + * @entries: number of entries in the table + * @free_node: function to free nodes of the table + * @copy_node: fuction to copy nodes of the table + * @size_order: determines size of the table, there will be 2^size_order hash + * buckets + * @mean_chain_len: maximum average length for the hash buckets' list, if it is + * reached, the table will grow + */ +struct mesh_table { + /* Number of buckets will be 2^N */ + struct hlist_head *hash_buckets; + spinlock_t *hashwlock; /* One per bucket, for add/del */ + unsigned int hash_mask; /* (2^size_order) - 1 */ + __u32 hash_rnd; /* Used for hash generation */ + atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ + void (*free_node) (struct hlist_node *p, bool free_leafs); + int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl); + int size_order; + int mean_chain_len; +}; + +/* Recent multicast cache */ +/* RMC_BUCKETS must be a power of 2, maximum 256 */ +#define RMC_BUCKETS 256 +#define RMC_QUEUE_MAX_LEN 4 +#define RMC_TIMEOUT (3 * HZ) + +/** + * struct rmc_entry - entry in the Recent Multicast Cache + * + * @seqnum: mesh sequence number of the frame + * @exp_time: expiration time of the entry, in jiffies + * @sa: source address of the frame + * + * The Recent Multicast Cache keeps track of the latest multicast frames that + * have been received by a mesh interface and discards received multicast frames + * that are found in the cache. + */ +struct rmc_entry { + struct list_head list; + u32 seqnum; + unsigned long exp_time; + u8 sa[ETH_ALEN]; +}; + +struct mesh_rmc { + struct rmc_entry bucket[RMC_BUCKETS]; + u32 idx_mask; +}; + + +/* + * MESH_CFG_COMP_LEN Includes: + * - Active path selection protocol ID. + * - Active path selection metric ID. + * - Congestion control mode identifier. + * - Channel precedence. + * Does not include mesh capabilities, which may vary across nodes in the same + * mesh + */ +#define MESH_CFG_CMP_LEN (IEEE80211_MESH_CONFIG_LEN - 2) + +/* Default values, timeouts in ms */ +#define MESH_TTL 31 +#define MESH_MAX_RETR 3 +#define MESH_RET_T 100 +#define MESH_CONF_T 100 +#define MESH_HOLD_T 100 + +#define MESH_PATH_TIMEOUT 5000 +/* Minimum interval between two consecutive PREQs originated by the same + * interface + */ +#define MESH_PREQ_MIN_INT 10 +#define MESH_DIAM_TRAVERSAL_TIME 50 +/* A path will be refreshed if it is used PATH_REFRESH_TIME milliseconds before + * timing out. This way it will remain ACTIVE and no data frames will be + * unnecesarily held in the pending queue. + */ +#define MESH_PATH_REFRESH_TIME 1000 +#define MESH_MIN_DISCOVERY_TIMEOUT (2 * MESH_DIAM_TRAVERSAL_TIME) +#define MESH_DEFAULT_BEACON_INTERVAL 1000 /* in 1024 us units */ + +#define MESH_MAX_PREQ_RETRIES 4 +#define MESH_PATH_EXPIRE (600 * HZ) + +/* Default maximum number of established plinks per interface */ +#define MESH_MAX_ESTAB_PLINKS 32 + +/* Default maximum number of plinks per interface */ +#define MESH_MAX_PLINKS 256 + +/* Maximum number of paths per interface */ +#define MESH_MAX_MPATHS 1024 + +/* Pending ANA approval */ +#define MESH_PLINK_CATEGORY 30 +#define MESH_PATH_SEL_CATEGORY 32 +#define MESH_PATH_SEL_ACTION 0 + +/* PERR reason codes */ +#define PEER_RCODE_UNSPECIFIED 11 +#define PERR_RCODE_NO_ROUTE 12 +#define PERR_RCODE_DEST_UNREACH 13 + +/* Public interfaces */ +/* Various */ +int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc, + const u8 *da, const u8 *sa); +int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr, + struct ieee80211_sub_if_data *sdata, char *addr4, + char *addr5, char *addr6); +int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr, + struct ieee80211_sub_if_data *sdata); +bool mesh_matches_local(struct ieee802_11_elems *ie, + struct ieee80211_sub_if_data *sdata); +void mesh_ids_set_default(struct ieee80211_if_mesh *mesh); +void mesh_mgmt_ies_add(struct sk_buff *skb, + struct ieee80211_sub_if_data *sdata); +void mesh_rmc_free(struct ieee80211_sub_if_data *sdata); +int mesh_rmc_init(struct ieee80211_sub_if_data *sdata); +void ieee80211s_init(void); +void ieee80211s_update_metric(struct ieee80211_local *local, + struct sta_info *stainfo, struct sk_buff *skb); +void ieee80211s_stop(void); +void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata); +ieee80211_rx_result +ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); +void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata); +void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata); +void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh); + +/* Mesh paths */ +int mesh_nexthop_lookup(struct sk_buff *skb, + struct ieee80211_sub_if_data *sdata); +void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata); +struct mesh_path *mesh_path_lookup(u8 *dst, + struct ieee80211_sub_if_data *sdata); +struct mesh_path *mpp_path_lookup(u8 *dst, + struct ieee80211_sub_if_data *sdata); +int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata); +struct mesh_path *mesh_path_lookup_by_idx(int idx, + struct ieee80211_sub_if_data *sdata); +void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop); +void mesh_path_expire(struct ieee80211_sub_if_data *sdata); +void mesh_path_flush(struct ieee80211_sub_if_data *sdata); +void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len); +int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata); +/* Mesh plinks */ +void mesh_neighbour_update(u8 *hw_addr, u32 rates, + struct ieee80211_sub_if_data *sdata, bool add); +bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie); +void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata); +void mesh_plink_broken(struct sta_info *sta); +void mesh_plink_deactivate(struct sta_info *sta); +int mesh_plink_open(struct sta_info *sta); +void mesh_plink_block(struct sta_info *sta); +void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len, + struct ieee80211_rx_status *rx_status); + +/* Private interfaces */ +/* Mesh tables */ +struct mesh_table *mesh_table_alloc(int size_order); +void mesh_table_free(struct mesh_table *tbl, bool free_leafs); +void mesh_mpath_table_grow(void); +void mesh_mpp_table_grow(void); +u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, + struct mesh_table *tbl); +/* Mesh paths */ +int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn, __le16 target_rcode, + const u8 *ra, struct ieee80211_sub_if_data *sdata); +void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta); +void mesh_path_flush_pending(struct mesh_path *mpath); +void mesh_path_tx_pending(struct mesh_path *mpath); +int mesh_pathtbl_init(void); +void mesh_pathtbl_unregister(void); +int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata); +void mesh_path_timer(unsigned long data); +void mesh_path_flush_by_nexthop(struct sta_info *sta); +void mesh_path_discard_frame(struct sk_buff *skb, + struct ieee80211_sub_if_data *sdata); +void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata); +void mesh_path_restart(struct ieee80211_sub_if_data *sdata); +void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata); + +extern int mesh_paths_generation; + +#ifdef CONFIG_MAC80211_MESH +extern int mesh_allocated; + +static inline int mesh_plink_free_count(struct ieee80211_sub_if_data *sdata) +{ + return sdata->u.mesh.mshcfg.dot11MeshMaxPeerLinks - + atomic_read(&sdata->u.mesh.mshstats.estab_plinks); +} + +static inline bool mesh_plink_availables(struct ieee80211_sub_if_data *sdata) +{ + return (min_t(long, mesh_plink_free_count(sdata), + MESH_MAX_PLINKS - sdata->local->num_sta)) > 0; +} + +static inline void mesh_path_activate(struct mesh_path *mpath) +{ + mpath->flags |= MESH_PATH_ACTIVE | MESH_PATH_RESOLVED; +} + +#define for_each_mesh_entry(x, p, node, i) \ + for (i = 0; i <= x->hash_mask; i++) \ + hlist_for_each_entry_rcu(node, p, &x->hash_buckets[i], list) + +void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local); + +void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata); +void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata); +void mesh_plink_quiesce(struct sta_info *sta); +void mesh_plink_restart(struct sta_info *sta); +#else +#define mesh_allocated 0 +static inline void +ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) {} +static inline void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata) +{} +static inline void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata) +{} +static inline void mesh_plink_quiesce(struct sta_info *sta) {} +static inline void mesh_plink_restart(struct sta_info *sta) {} +#endif + +#endif /* IEEE80211S_H */ diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c new file mode 100644 index 0000000..ce84237 --- /dev/null +++ b/net/mac80211/mesh_hwmp.c @@ -0,0 +1,1017 @@ +/* + * Copyright (c) 2008, 2009 open80211s Ltd. + * Author: Luis Carlos Cobo + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include "mesh.h" + +#ifdef CONFIG_MAC80211_VERBOSE_MHWMP_DEBUG +#define mhwmp_dbg(fmt, args...) printk(KERN_DEBUG "Mesh HWMP: " fmt, ##args) +#else +#define mhwmp_dbg(fmt, args...) do { (void)(0); } while (0) +#endif + +#define TEST_FRAME_LEN 8192 +#define MAX_METRIC 0xffffffff +#define ARITH_SHIFT 8 + +/* Number of frames buffered per destination for unresolved destinations */ +#define MESH_FRAME_QUEUE_LEN 10 +#define MAX_PREQ_QUEUE_LEN 64 + +/* Destination only */ +#define MP_F_DO 0x1 +/* Reply and forward */ +#define MP_F_RF 0x2 +/* Unknown Sequence Number */ +#define MP_F_USN 0x01 +/* Reason code Present */ +#define MP_F_RCODE 0x02 + +static void mesh_queue_preq(struct mesh_path *, u8); + +static inline u32 u32_field_get(u8 *preq_elem, int offset, bool ae) +{ + if (ae) + offset += 6; + return get_unaligned_le32(preq_elem + offset); +} + +static inline u32 u16_field_get(u8 *preq_elem, int offset, bool ae) +{ + if (ae) + offset += 6; + return get_unaligned_le16(preq_elem + offset); +} + +/* HWMP IE processing macros */ +#define AE_F (1<<6) +#define AE_F_SET(x) (*x & AE_F) +#define PREQ_IE_FLAGS(x) (*(x)) +#define PREQ_IE_HOPCOUNT(x) (*(x + 1)) +#define PREQ_IE_TTL(x) (*(x + 2)) +#define PREQ_IE_PREQ_ID(x) u32_field_get(x, 3, 0) +#define PREQ_IE_ORIG_ADDR(x) (x + 7) +#define PREQ_IE_ORIG_SN(x) u32_field_get(x, 13, 0); +#define PREQ_IE_LIFETIME(x) u32_field_get(x, 17, AE_F_SET(x)); +#define PREQ_IE_METRIC(x) u32_field_get(x, 21, AE_F_SET(x)); +#define PREQ_IE_TARGET_F(x) (*(AE_F_SET(x) ? x + 32 : x + 26)) +#define PREQ_IE_TARGET_ADDR(x) (AE_F_SET(x) ? x + 33 : x + 27) +#define PREQ_IE_TARGET_SN(x) u32_field_get(x, 33, AE_F_SET(x)); + + +#define PREP_IE_FLAGS(x) PREQ_IE_FLAGS(x) +#define PREP_IE_HOPCOUNT(x) PREQ_IE_HOPCOUNT(x) +#define PREP_IE_TTL(x) PREQ_IE_TTL(x) +#define PREP_IE_ORIG_ADDR(x) (x + 3) +#define PREP_IE_ORIG_SN(x) u32_field_get(x, 9, 0); +#define PREP_IE_LIFETIME(x) u32_field_get(x, 13, AE_F_SET(x)); +#define PREP_IE_METRIC(x) u32_field_get(x, 17, AE_F_SET(x)); +#define PREP_IE_TARGET_ADDR(x) (AE_F_SET(x) ? x + 27 : x + 21) +#define PREP_IE_TARGET_SN(x) u32_field_get(x, 27, AE_F_SET(x)); + +#define PERR_IE_TTL(x) (*(x)) +#define PERR_IE_TARGET_FLAGS(x) (*(x + 2)) +#define PERR_IE_TARGET_ADDR(x) (x + 3) +#define PERR_IE_TARGET_SN(x) u32_field_get(x, 9, 0); +#define PERR_IE_TARGET_RCODE(x) u16_field_get(x, 13, 0); + +#define MSEC_TO_TU(x) (x*1000/1024) +#define SN_GT(x, y) ((long) (y) - (long) (x) < 0) +#define SN_LT(x, y) ((long) (x) - (long) (y) < 0) + +#define net_traversal_jiffies(s) \ + msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime) +#define default_lifetime(s) \ + MSEC_TO_TU(s->u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout) +#define min_preq_int_jiff(s) \ + (msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval)) +#define max_preq_retries(s) (s->u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries) +#define disc_timeout_jiff(s) \ + msecs_to_jiffies(sdata->u.mesh.mshcfg.min_discovery_timeout) + +enum mpath_frame_type { + MPATH_PREQ = 0, + MPATH_PREP, + MPATH_PERR, + MPATH_RANN +}; + +static const u8 broadcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + +static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags, + u8 *orig_addr, __le32 orig_sn, u8 target_flags, u8 *target, + __le32 target_sn, const u8 *da, u8 hop_count, u8 ttl, + __le32 lifetime, __le32 metric, __le32 preq_id, + struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); + struct ieee80211_mgmt *mgmt; + u8 *pos; + int ie_len; + + if (!skb) + return -1; + skb_reserve(skb, local->hw.extra_tx_headroom); + /* 25 is the size of the common mgmt part (24) plus the size of the + * common action part (1) + */ + mgmt = (struct ieee80211_mgmt *) + skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action)); + memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action)); + mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION); + + memcpy(mgmt->da, da, ETH_ALEN); + memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); + /* BSSID == SA */ + memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); + mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; + mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION; + + switch (action) { + case MPATH_PREQ: + mhwmp_dbg("sending PREQ to %pM\n", target); + ie_len = 37; + pos = skb_put(skb, 2 + ie_len); + *pos++ = WLAN_EID_PREQ; + break; + case MPATH_PREP: + mhwmp_dbg("sending PREP to %pM\n", target); + ie_len = 31; + pos = skb_put(skb, 2 + ie_len); + *pos++ = WLAN_EID_PREP; + break; + case MPATH_RANN: + mhwmp_dbg("sending RANN from %pM\n", orig_addr); + ie_len = sizeof(struct ieee80211_rann_ie); + pos = skb_put(skb, 2 + ie_len); + *pos++ = WLAN_EID_RANN; + break; + default: + kfree_skb(skb); + return -ENOTSUPP; + break; + } + *pos++ = ie_len; + *pos++ = flags; + *pos++ = hop_count; + *pos++ = ttl; + if (action == MPATH_PREQ) { + memcpy(pos, &preq_id, 4); + pos += 4; + } + memcpy(pos, orig_addr, ETH_ALEN); + pos += ETH_ALEN; + memcpy(pos, &orig_sn, 4); + pos += 4; + if (action != MPATH_RANN) { + memcpy(pos, &lifetime, 4); + pos += 4; + } + memcpy(pos, &metric, 4); + pos += 4; + if (action == MPATH_PREQ) { + /* destination count */ + *pos++ = 1; + *pos++ = target_flags; + } + if (action != MPATH_RANN) { + memcpy(pos, target, ETH_ALEN); + pos += ETH_ALEN; + memcpy(pos, &target_sn, 4); + } + + ieee80211_tx_skb(sdata, skb); + return 0; +} + +/** + * mesh_send_path error - Sends a PERR mesh management frame + * + * @target: broken destination + * @target_sn: SN of the broken destination + * @target_rcode: reason code for this PERR + * @ra: node this frame is addressed to + */ +int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn, + __le16 target_rcode, const u8 *ra, + struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); + struct ieee80211_mgmt *mgmt; + u8 *pos; + int ie_len; + + if (!skb) + return -1; + skb_reserve(skb, local->hw.extra_tx_headroom); + /* 25 is the size of the common mgmt part (24) plus the size of the + * common action part (1) + */ + mgmt = (struct ieee80211_mgmt *) + skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action)); + memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action)); + mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION); + + memcpy(mgmt->da, ra, ETH_ALEN); + memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); + /* BSSID is left zeroed, wildcard value */ + mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; + mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION; + ie_len = 15; + pos = skb_put(skb, 2 + ie_len); + *pos++ = WLAN_EID_PERR; + *pos++ = ie_len; + /* ttl */ + *pos++ = MESH_TTL; + /* number of destinations */ + *pos++ = 1; + /* + * flags bit, bit 1 is unset if we know the sequence number and + * bit 2 is set if we have a reason code + */ + *pos = 0; + if (!target_sn) + *pos |= MP_F_USN; + if (target_rcode) + *pos |= MP_F_RCODE; + pos++; + memcpy(pos, target, ETH_ALEN); + pos += ETH_ALEN; + memcpy(pos, &target_sn, 4); + pos += 4; + memcpy(pos, &target_rcode, 2); + + ieee80211_tx_skb(sdata, skb); + return 0; +} + +void ieee80211s_update_metric(struct ieee80211_local *local, + struct sta_info *stainfo, struct sk_buff *skb) +{ + struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + int failed; + + if (!ieee80211_is_data(hdr->frame_control)) + return; + + failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK); + + /* moving average, scaled to 100 */ + stainfo->fail_avg = ((80 * stainfo->fail_avg + 5) / 100 + 20 * failed); + if (stainfo->fail_avg > 95) + mesh_plink_broken(stainfo); +} + +static u32 airtime_link_metric_get(struct ieee80211_local *local, + struct sta_info *sta) +{ + struct ieee80211_supported_band *sband; + /* This should be adjusted for each device */ + int device_constant = 1 << ARITH_SHIFT; + int test_frame_len = TEST_FRAME_LEN << ARITH_SHIFT; + int s_unit = 1 << ARITH_SHIFT; + int rate, err; + u32 tx_time, estimated_retx; + u64 result; + + sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; + + if (sta->fail_avg >= 100) + return MAX_METRIC; + + if (sta->last_tx_rate.flags & IEEE80211_TX_RC_MCS) + return MAX_METRIC; + + err = (sta->fail_avg << ARITH_SHIFT) / 100; + + /* bitrate is in units of 100 Kbps, while we need rate in units of + * 1Mbps. This will be corrected on tx_time computation. + */ + rate = sband->bitrates[sta->last_tx_rate.idx].bitrate; + tx_time = (device_constant + 10 * test_frame_len / rate); + estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err)); + result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT) ; + return (u32)result; +} + +/** + * hwmp_route_info_get - Update routing info to originator and transmitter + * + * @sdata: local mesh subif + * @mgmt: mesh management frame + * @hwmp_ie: hwmp information element (PREP or PREQ) + * + * This function updates the path routing information to the originator and the + * transmitter of a HWMP PREQ or PREP frame. + * + * Returns: metric to frame originator or 0 if the frame should not be further + * processed + * + * Notes: this function is the only place (besides user-provided info) where + * path routing information is updated. + */ +static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, + u8 *hwmp_ie, enum mpath_frame_type action) +{ + struct ieee80211_local *local = sdata->local; + struct mesh_path *mpath; + struct sta_info *sta; + bool fresh_info; + u8 *orig_addr, *ta; + u32 orig_sn, orig_metric; + unsigned long orig_lifetime, exp_time; + u32 last_hop_metric, new_metric; + bool process = true; + + rcu_read_lock(); + sta = sta_info_get(sdata, mgmt->sa); + if (!sta) { + rcu_read_unlock(); + return 0; + } + + last_hop_metric = airtime_link_metric_get(local, sta); + /* Update and check originator routing info */ + fresh_info = true; + + switch (action) { + case MPATH_PREQ: + orig_addr = PREQ_IE_ORIG_ADDR(hwmp_ie); + orig_sn = PREQ_IE_ORIG_SN(hwmp_ie); + orig_lifetime = PREQ_IE_LIFETIME(hwmp_ie); + orig_metric = PREQ_IE_METRIC(hwmp_ie); + break; + case MPATH_PREP: + /* Originator here refers to the MP that was the destination in + * the Path Request. The draft refers to that MP as the + * destination address, even though usually it is the origin of + * the PREP frame. We divert from the nomenclature in the draft + * so that we can easily use a single function to gather path + * information from both PREQ and PREP frames. + */ + orig_addr = PREP_IE_ORIG_ADDR(hwmp_ie); + orig_sn = PREP_IE_ORIG_SN(hwmp_ie); + orig_lifetime = PREP_IE_LIFETIME(hwmp_ie); + orig_metric = PREP_IE_METRIC(hwmp_ie); + break; + default: + rcu_read_unlock(); + return 0; + } + new_metric = orig_metric + last_hop_metric; + if (new_metric < orig_metric) + new_metric = MAX_METRIC; + exp_time = TU_TO_EXP_TIME(orig_lifetime); + + if (memcmp(orig_addr, sdata->vif.addr, ETH_ALEN) == 0) { + /* This MP is the originator, we are not interested in this + * frame, except for updating transmitter's path info. + */ + process = false; + fresh_info = false; + } else { + mpath = mesh_path_lookup(orig_addr, sdata); + if (mpath) { + spin_lock_bh(&mpath->state_lock); + if (mpath->flags & MESH_PATH_FIXED) + fresh_info = false; + else if ((mpath->flags & MESH_PATH_ACTIVE) && + (mpath->flags & MESH_PATH_SN_VALID)) { + if (SN_GT(mpath->sn, orig_sn) || + (mpath->sn == orig_sn && + action == MPATH_PREQ && + new_metric > mpath->metric)) { + process = false; + fresh_info = false; + } + } + } else { + mesh_path_add(orig_addr, sdata); + mpath = mesh_path_lookup(orig_addr, sdata); + if (!mpath) { + rcu_read_unlock(); + return 0; + } + spin_lock_bh(&mpath->state_lock); + } + + if (fresh_info) { + mesh_path_assign_nexthop(mpath, sta); + mpath->flags |= MESH_PATH_SN_VALID; + mpath->metric = new_metric; + mpath->sn = orig_sn; + mpath->exp_time = time_after(mpath->exp_time, exp_time) + ? mpath->exp_time : exp_time; + mesh_path_activate(mpath); + spin_unlock_bh(&mpath->state_lock); + mesh_path_tx_pending(mpath); + /* draft says preq_id should be saved to, but there does + * not seem to be any use for it, skipping by now + */ + } else + spin_unlock_bh(&mpath->state_lock); + } + + /* Update and check transmitter routing info */ + ta = mgmt->sa; + if (memcmp(orig_addr, ta, ETH_ALEN) == 0) + fresh_info = false; + else { + fresh_info = true; + + mpath = mesh_path_lookup(ta, sdata); + if (mpath) { + spin_lock_bh(&mpath->state_lock); + if ((mpath->flags & MESH_PATH_FIXED) || + ((mpath->flags & MESH_PATH_ACTIVE) && + (last_hop_metric > mpath->metric))) + fresh_info = false; + } else { + mesh_path_add(ta, sdata); + mpath = mesh_path_lookup(ta, sdata); + if (!mpath) { + rcu_read_unlock(); + return 0; + } + spin_lock_bh(&mpath->state_lock); + } + + if (fresh_info) { + mesh_path_assign_nexthop(mpath, sta); + mpath->flags &= ~MESH_PATH_SN_VALID; + mpath->metric = last_hop_metric; + mpath->exp_time = time_after(mpath->exp_time, exp_time) + ? mpath->exp_time : exp_time; + mesh_path_activate(mpath); + spin_unlock_bh(&mpath->state_lock); + mesh_path_tx_pending(mpath); + } else + spin_unlock_bh(&mpath->state_lock); + } + + rcu_read_unlock(); + + return process ? new_metric : 0; +} + +static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, + u8 *preq_elem, u32 metric) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct mesh_path *mpath; + u8 *target_addr, *orig_addr; + u8 target_flags, ttl; + u32 orig_sn, target_sn, lifetime; + bool reply = false; + bool forward = true; + + /* Update target SN, if present */ + target_addr = PREQ_IE_TARGET_ADDR(preq_elem); + orig_addr = PREQ_IE_ORIG_ADDR(preq_elem); + target_sn = PREQ_IE_TARGET_SN(preq_elem); + orig_sn = PREQ_IE_ORIG_SN(preq_elem); + target_flags = PREQ_IE_TARGET_F(preq_elem); + + mhwmp_dbg("received PREQ from %pM\n", orig_addr); + + if (memcmp(target_addr, sdata->vif.addr, ETH_ALEN) == 0) { + mhwmp_dbg("PREQ is for us\n"); + forward = false; + reply = true; + metric = 0; + if (time_after(jiffies, ifmsh->last_sn_update + + net_traversal_jiffies(sdata)) || + time_before(jiffies, ifmsh->last_sn_update)) { + target_sn = ++ifmsh->sn; + ifmsh->last_sn_update = jiffies; + } + } else { + rcu_read_lock(); + mpath = mesh_path_lookup(target_addr, sdata); + if (mpath) { + if ((!(mpath->flags & MESH_PATH_SN_VALID)) || + SN_LT(mpath->sn, target_sn)) { + mpath->sn = target_sn; + mpath->flags |= MESH_PATH_SN_VALID; + } else if ((!(target_flags & MP_F_DO)) && + (mpath->flags & MESH_PATH_ACTIVE)) { + reply = true; + metric = mpath->metric; + target_sn = mpath->sn; + if (target_flags & MP_F_RF) + target_flags |= MP_F_DO; + else + forward = false; + } + } + rcu_read_unlock(); + } + + if (reply) { + lifetime = PREQ_IE_LIFETIME(preq_elem); + ttl = ifmsh->mshcfg.dot11MeshTTL; + if (ttl != 0) { + mhwmp_dbg("replying to the PREQ\n"); + mesh_path_sel_frame_tx(MPATH_PREP, 0, target_addr, + cpu_to_le32(target_sn), 0, orig_addr, + cpu_to_le32(orig_sn), mgmt->sa, 0, ttl, + cpu_to_le32(lifetime), cpu_to_le32(metric), + 0, sdata); + } else + ifmsh->mshstats.dropped_frames_ttl++; + } + + if (forward) { + u32 preq_id; + u8 hopcount, flags; + + ttl = PREQ_IE_TTL(preq_elem); + lifetime = PREQ_IE_LIFETIME(preq_elem); + if (ttl <= 1) { + ifmsh->mshstats.dropped_frames_ttl++; + return; + } + mhwmp_dbg("forwarding the PREQ from %pM\n", orig_addr); + --ttl; + flags = PREQ_IE_FLAGS(preq_elem); + preq_id = PREQ_IE_PREQ_ID(preq_elem); + hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1; + mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr, + cpu_to_le32(orig_sn), target_flags, target_addr, + cpu_to_le32(target_sn), broadcast_addr, + hopcount, ttl, cpu_to_le32(lifetime), + cpu_to_le32(metric), cpu_to_le32(preq_id), + sdata); + ifmsh->mshstats.fwded_mcast++; + ifmsh->mshstats.fwded_frames++; + } +} + + +static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, + u8 *prep_elem, u32 metric) +{ + struct mesh_path *mpath; + u8 *target_addr, *orig_addr; + u8 ttl, hopcount, flags; + u8 next_hop[ETH_ALEN]; + u32 target_sn, orig_sn, lifetime; + + mhwmp_dbg("received PREP from %pM\n", PREP_IE_ORIG_ADDR(prep_elem)); + + /* Note that we divert from the draft nomenclature and denominate + * destination to what the draft refers to as origininator. So in this + * function destnation refers to the final destination of the PREP, + * which corresponds with the originator of the PREQ which this PREP + * replies + */ + target_addr = PREP_IE_TARGET_ADDR(prep_elem); + if (memcmp(target_addr, sdata->vif.addr, ETH_ALEN) == 0) + /* destination, no forwarding required */ + return; + + ttl = PREP_IE_TTL(prep_elem); + if (ttl <= 1) { + sdata->u.mesh.mshstats.dropped_frames_ttl++; + return; + } + + rcu_read_lock(); + mpath = mesh_path_lookup(target_addr, sdata); + if (mpath) + spin_lock_bh(&mpath->state_lock); + else + goto fail; + if (!(mpath->flags & MESH_PATH_ACTIVE)) { + spin_unlock_bh(&mpath->state_lock); + goto fail; + } + memcpy(next_hop, mpath->next_hop->sta.addr, ETH_ALEN); + spin_unlock_bh(&mpath->state_lock); + --ttl; + flags = PREP_IE_FLAGS(prep_elem); + lifetime = PREP_IE_LIFETIME(prep_elem); + hopcount = PREP_IE_HOPCOUNT(prep_elem) + 1; + orig_addr = PREP_IE_ORIG_ADDR(prep_elem); + target_sn = PREP_IE_TARGET_SN(prep_elem); + orig_sn = PREP_IE_ORIG_SN(prep_elem); + + mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr, + cpu_to_le32(orig_sn), 0, target_addr, + cpu_to_le32(target_sn), mpath->next_hop->sta.addr, hopcount, + ttl, cpu_to_le32(lifetime), cpu_to_le32(metric), + 0, sdata); + rcu_read_unlock(); + + sdata->u.mesh.mshstats.fwded_unicast++; + sdata->u.mesh.mshstats.fwded_frames++; + return; + +fail: + rcu_read_unlock(); + sdata->u.mesh.mshstats.dropped_frames_no_route++; + return; +} + +static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, u8 *perr_elem) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct mesh_path *mpath; + u8 ttl; + u8 *ta, *target_addr; + u8 target_flags; + u32 target_sn; + u16 target_rcode; + + ta = mgmt->sa; + ttl = PERR_IE_TTL(perr_elem); + if (ttl <= 1) { + ifmsh->mshstats.dropped_frames_ttl++; + return; + } + ttl--; + target_flags = PERR_IE_TARGET_FLAGS(perr_elem); + target_addr = PERR_IE_TARGET_ADDR(perr_elem); + target_sn = PERR_IE_TARGET_SN(perr_elem); + target_rcode = PERR_IE_TARGET_RCODE(perr_elem); + + rcu_read_lock(); + mpath = mesh_path_lookup(target_addr, sdata); + if (mpath) { + spin_lock_bh(&mpath->state_lock); + if (mpath->flags & MESH_PATH_ACTIVE && + memcmp(ta, mpath->next_hop->sta.addr, ETH_ALEN) == 0 && + (!(mpath->flags & MESH_PATH_SN_VALID) || + SN_GT(target_sn, mpath->sn))) { + mpath->flags &= ~MESH_PATH_ACTIVE; + mpath->sn = target_sn; + spin_unlock_bh(&mpath->state_lock); + mesh_path_error_tx(ttl, target_addr, cpu_to_le32(target_sn), + cpu_to_le16(target_rcode), + broadcast_addr, sdata); + } else + spin_unlock_bh(&mpath->state_lock); + } + rcu_read_unlock(); +} + +static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, + struct ieee80211_rann_ie *rann) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct mesh_path *mpath; + u8 *ta; + u8 ttl, flags, hopcount; + u8 *orig_addr; + u32 orig_sn, metric; + + ta = mgmt->sa; + ttl = rann->rann_ttl; + if (ttl <= 1) { + ifmsh->mshstats.dropped_frames_ttl++; + return; + } + ttl--; + flags = rann->rann_flags; + orig_addr = rann->rann_addr; + orig_sn = rann->rann_seq; + hopcount = rann->rann_hopcount; + hopcount++; + metric = rann->rann_metric; + mhwmp_dbg("received RANN from %pM\n", orig_addr); + + rcu_read_lock(); + mpath = mesh_path_lookup(orig_addr, sdata); + if (!mpath) { + mesh_path_add(orig_addr, sdata); + mpath = mesh_path_lookup(orig_addr, sdata); + if (!mpath) { + rcu_read_unlock(); + sdata->u.mesh.mshstats.dropped_frames_no_route++; + return; + } + mesh_queue_preq(mpath, + PREQ_Q_F_START | PREQ_Q_F_REFRESH); + } + if (mpath->sn < orig_sn) { + mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr, + cpu_to_le32(orig_sn), + 0, NULL, 0, broadcast_addr, + hopcount, ttl, 0, + cpu_to_le32(metric + mpath->metric), + 0, sdata); + mpath->sn = orig_sn; + } + rcu_read_unlock(); +} + + +void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, + size_t len) +{ + struct ieee802_11_elems elems; + size_t baselen; + u32 last_hop_metric; + + /* need action_code */ + if (len < IEEE80211_MIN_ACTION_SIZE + 1) + return; + + baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt; + ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable, + len - baselen, &elems); + + if (elems.preq) { + if (elems.preq_len != 37) + /* Right now we support just 1 destination and no AE */ + return; + last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.preq, + MPATH_PREQ); + if (last_hop_metric) + hwmp_preq_frame_process(sdata, mgmt, elems.preq, + last_hop_metric); + } + if (elems.prep) { + if (elems.prep_len != 31) + /* Right now we support no AE */ + return; + last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.prep, + MPATH_PREP); + if (last_hop_metric) + hwmp_prep_frame_process(sdata, mgmt, elems.prep, + last_hop_metric); + } + if (elems.perr) { + if (elems.perr_len != 15) + /* Right now we support only one destination per PERR */ + return; + hwmp_perr_frame_process(sdata, mgmt, elems.perr); + } + if (elems.rann) + hwmp_rann_frame_process(sdata, mgmt, elems.rann); +} + +/** + * mesh_queue_preq - queue a PREQ to a given destination + * + * @mpath: mesh path to discover + * @flags: special attributes of the PREQ to be sent + * + * Locking: the function must be called from within a rcu read lock block. + * + */ +static void mesh_queue_preq(struct mesh_path *mpath, u8 flags) +{ + struct ieee80211_sub_if_data *sdata = mpath->sdata; + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct mesh_preq_queue *preq_node; + + preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_ATOMIC); + if (!preq_node) { + mhwmp_dbg("could not allocate PREQ node\n"); + return; + } + + spin_lock(&ifmsh->mesh_preq_queue_lock); + if (ifmsh->preq_queue_len == MAX_PREQ_QUEUE_LEN) { + spin_unlock(&ifmsh->mesh_preq_queue_lock); + kfree(preq_node); + if (printk_ratelimit()) + mhwmp_dbg("PREQ node queue full\n"); + return; + } + + memcpy(preq_node->dst, mpath->dst, ETH_ALEN); + preq_node->flags = flags; + + list_add_tail(&preq_node->list, &ifmsh->preq_queue.list); + ++ifmsh->preq_queue_len; + spin_unlock(&ifmsh->mesh_preq_queue_lock); + + if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata))) + ieee80211_queue_work(&sdata->local->hw, &ifmsh->work); + + else if (time_before(jiffies, ifmsh->last_preq)) { + /* avoid long wait if did not send preqs for a long time + * and jiffies wrapped around + */ + ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1; + ieee80211_queue_work(&sdata->local->hw, &ifmsh->work); + } else + mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq + + min_preq_int_jiff(sdata)); +} + +/** + * mesh_path_start_discovery - launch a path discovery from the PREQ queue + * + * @sdata: local mesh subif + */ +void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct mesh_preq_queue *preq_node; + struct mesh_path *mpath; + u8 ttl, target_flags; + u32 lifetime; + + spin_lock_bh(&ifmsh->mesh_preq_queue_lock); + if (!ifmsh->preq_queue_len || + time_before(jiffies, ifmsh->last_preq + + min_preq_int_jiff(sdata))) { + spin_unlock_bh(&ifmsh->mesh_preq_queue_lock); + return; + } + + preq_node = list_first_entry(&ifmsh->preq_queue.list, + struct mesh_preq_queue, list); + list_del(&preq_node->list); + --ifmsh->preq_queue_len; + spin_unlock_bh(&ifmsh->mesh_preq_queue_lock); + + rcu_read_lock(); + mpath = mesh_path_lookup(preq_node->dst, sdata); + if (!mpath) + goto enddiscovery; + + spin_lock_bh(&mpath->state_lock); + if (preq_node->flags & PREQ_Q_F_START) { + if (mpath->flags & MESH_PATH_RESOLVING) { + spin_unlock_bh(&mpath->state_lock); + goto enddiscovery; + } else { + mpath->flags &= ~MESH_PATH_RESOLVED; + mpath->flags |= MESH_PATH_RESOLVING; + mpath->discovery_retries = 0; + mpath->discovery_timeout = disc_timeout_jiff(sdata); + } + } else if (!(mpath->flags & MESH_PATH_RESOLVING) || + mpath->flags & MESH_PATH_RESOLVED) { + mpath->flags &= ~MESH_PATH_RESOLVING; + spin_unlock_bh(&mpath->state_lock); + goto enddiscovery; + } + + ifmsh->last_preq = jiffies; + + if (time_after(jiffies, ifmsh->last_sn_update + + net_traversal_jiffies(sdata)) || + time_before(jiffies, ifmsh->last_sn_update)) { + ++ifmsh->sn; + sdata->u.mesh.last_sn_update = jiffies; + } + lifetime = default_lifetime(sdata); + ttl = sdata->u.mesh.mshcfg.dot11MeshTTL; + if (ttl == 0) { + sdata->u.mesh.mshstats.dropped_frames_ttl++; + spin_unlock_bh(&mpath->state_lock); + goto enddiscovery; + } + + if (preq_node->flags & PREQ_Q_F_REFRESH) + target_flags = MP_F_DO; + else + target_flags = MP_F_RF; + + spin_unlock_bh(&mpath->state_lock); + mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr, + cpu_to_le32(ifmsh->sn), target_flags, mpath->dst, + cpu_to_le32(mpath->sn), broadcast_addr, 0, + ttl, cpu_to_le32(lifetime), 0, + cpu_to_le32(ifmsh->preq_id++), sdata); + mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout); + +enddiscovery: + rcu_read_unlock(); + kfree(preq_node); +} + +/** + * mesh_nexthop_lookup - put the appropriate next hop on a mesh frame + * + * @skb: 802.11 frame to be sent + * @sdata: network subif the frame will be sent through + * + * Returns: 0 if the next hop was found. Nonzero otherwise. If no next hop is + * found, the function will start a path discovery and queue the frame so it is + * sent when the path is resolved. This means the caller must not free the skb + * in this case. + */ +int mesh_nexthop_lookup(struct sk_buff *skb, + struct ieee80211_sub_if_data *sdata) +{ + struct sk_buff *skb_to_free = NULL; + struct mesh_path *mpath; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + u8 *target_addr = hdr->addr3; + int err = 0; + + rcu_read_lock(); + mpath = mesh_path_lookup(target_addr, sdata); + + if (!mpath) { + mesh_path_add(target_addr, sdata); + mpath = mesh_path_lookup(target_addr, sdata); + if (!mpath) { + sdata->u.mesh.mshstats.dropped_frames_no_route++; + err = -ENOSPC; + goto endlookup; + } + } + + if (mpath->flags & MESH_PATH_ACTIVE) { + if (time_after(jiffies, + mpath->exp_time - + msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) && + !memcmp(sdata->vif.addr, hdr->addr4, ETH_ALEN) && + !(mpath->flags & MESH_PATH_RESOLVING) && + !(mpath->flags & MESH_PATH_FIXED)) { + mesh_queue_preq(mpath, + PREQ_Q_F_START | PREQ_Q_F_REFRESH); + } + memcpy(hdr->addr1, mpath->next_hop->sta.addr, ETH_ALEN); + } else { + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + if (!(mpath->flags & MESH_PATH_RESOLVING)) { + /* Start discovery only if it is not running yet */ + mesh_queue_preq(mpath, PREQ_Q_F_START); + } + + if (skb_queue_len(&mpath->frame_queue) >= + MESH_FRAME_QUEUE_LEN) + skb_to_free = skb_dequeue(&mpath->frame_queue); + + info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; + skb_queue_tail(&mpath->frame_queue, skb); + if (skb_to_free) + mesh_path_discard_frame(skb_to_free, sdata); + err = -ENOENT; + } + +endlookup: + rcu_read_unlock(); + return err; +} + +void mesh_path_timer(unsigned long data) +{ + struct ieee80211_sub_if_data *sdata; + struct mesh_path *mpath; + + rcu_read_lock(); + mpath = (struct mesh_path *) data; + mpath = rcu_dereference(mpath); + if (!mpath) + goto endmpathtimer; + sdata = mpath->sdata; + + if (sdata->local->quiescing) { + rcu_read_unlock(); + return; + } + + spin_lock_bh(&mpath->state_lock); + if (mpath->flags & MESH_PATH_RESOLVED || + (!(mpath->flags & MESH_PATH_RESOLVING))) + mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED); + else if (mpath->discovery_retries < max_preq_retries(sdata)) { + ++mpath->discovery_retries; + mpath->discovery_timeout *= 2; + mesh_queue_preq(mpath, 0); + } else { + mpath->flags = 0; + mpath->exp_time = jiffies; + mesh_path_flush_pending(mpath); + } + + spin_unlock_bh(&mpath->state_lock); +endmpathtimer: + rcu_read_unlock(); +} + +void +mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + + mesh_path_sel_frame_tx(MPATH_RANN, 0, sdata->vif.addr, + cpu_to_le32(++ifmsh->sn), + 0, NULL, 0, broadcast_addr, + 0, MESH_TTL, 0, 0, 0, sdata); +} diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c new file mode 100644 index 0000000..2312efe --- /dev/null +++ b/net/mac80211/mesh_pathtbl.c @@ -0,0 +1,742 @@ +/* + * Copyright (c) 2008, 2009 open80211s Ltd. + * Author: Luis Carlos Cobo + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include "ieee80211_i.h" +#include "mesh.h" + +/* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */ +#define INIT_PATHS_SIZE_ORDER 2 + +/* Keep the mean chain length below this constant */ +#define MEAN_CHAIN_LEN 2 + +#define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \ + time_after(jiffies, mpath->exp_time) && \ + !(mpath->flags & MESH_PATH_FIXED)) + +struct mpath_node { + struct hlist_node list; + struct rcu_head rcu; + /* This indirection allows two different tables to point to the same + * mesh_path structure, useful when resizing + */ + struct mesh_path *mpath; +}; + +static struct mesh_table *mesh_paths; +static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */ + +int mesh_paths_generation; +static void __mesh_table_free(struct mesh_table *tbl) +{ + kfree(tbl->hash_buckets); + kfree(tbl->hashwlock); + kfree(tbl); +} + +void mesh_table_free(struct mesh_table *tbl, bool free_leafs) +{ + struct hlist_head *mesh_hash; + struct hlist_node *p, *q; + int i; + + mesh_hash = tbl->hash_buckets; + for (i = 0; i <= tbl->hash_mask; i++) { + spin_lock(&tbl->hashwlock[i]); + hlist_for_each_safe(p, q, &mesh_hash[i]) { + tbl->free_node(p, free_leafs); + atomic_dec(&tbl->entries); + } + spin_unlock(&tbl->hashwlock[i]); + } + __mesh_table_free(tbl); +} + +static struct mesh_table *mesh_table_grow(struct mesh_table *tbl) +{ + struct mesh_table *newtbl; + struct hlist_head *oldhash; + struct hlist_node *p, *q; + int i; + + if (atomic_read(&tbl->entries) + < tbl->mean_chain_len * (tbl->hash_mask + 1)) + goto endgrow; + + newtbl = mesh_table_alloc(tbl->size_order + 1); + if (!newtbl) + goto endgrow; + + newtbl->free_node = tbl->free_node; + newtbl->mean_chain_len = tbl->mean_chain_len; + newtbl->copy_node = tbl->copy_node; + atomic_set(&newtbl->entries, atomic_read(&tbl->entries)); + + oldhash = tbl->hash_buckets; + for (i = 0; i <= tbl->hash_mask; i++) + hlist_for_each(p, &oldhash[i]) + if (tbl->copy_node(p, newtbl) < 0) + goto errcopy; + + return newtbl; + +errcopy: + for (i = 0; i <= newtbl->hash_mask; i++) { + hlist_for_each_safe(p, q, &newtbl->hash_buckets[i]) + tbl->free_node(p, 0); + } + __mesh_table_free(newtbl); +endgrow: + return NULL; +} + + +/* This lock will have the grow table function as writer and add / delete nodes + * as readers. When reading the table (i.e. doing lookups) we are well protected + * by RCU + */ +static DEFINE_RWLOCK(pathtbl_resize_lock); + +/** + * + * mesh_path_assign_nexthop - update mesh path next hop + * + * @mpath: mesh path to update + * @sta: next hop to assign + * + * Locking: mpath->state_lock must be held when calling this function + */ +void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta) +{ + struct sk_buff *skb; + struct ieee80211_hdr *hdr; + struct sk_buff_head tmpq; + unsigned long flags; + + rcu_assign_pointer(mpath->next_hop, sta); + + __skb_queue_head_init(&tmpq); + + spin_lock_irqsave(&mpath->frame_queue.lock, flags); + + while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) { + hdr = (struct ieee80211_hdr *) skb->data; + memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN); + __skb_queue_tail(&tmpq, skb); + } + + skb_queue_splice(&tmpq, &mpath->frame_queue); + spin_unlock_irqrestore(&mpath->frame_queue.lock, flags); +} + + +/** + * mesh_path_lookup - look up a path in the mesh path table + * @dst: hardware address (ETH_ALEN length) of destination + * @sdata: local subif + * + * Returns: pointer to the mesh path structure, or NULL if not found + * + * Locking: must be called within a read rcu section. + */ +struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) +{ + struct mesh_path *mpath; + struct hlist_node *n; + struct hlist_head *bucket; + struct mesh_table *tbl; + struct mpath_node *node; + + tbl = rcu_dereference(mesh_paths); + + bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; + hlist_for_each_entry_rcu(node, n, bucket, list) { + mpath = node->mpath; + if (mpath->sdata == sdata && + memcmp(dst, mpath->dst, ETH_ALEN) == 0) { + if (MPATH_EXPIRED(mpath)) { + spin_lock_bh(&mpath->state_lock); + if (MPATH_EXPIRED(mpath)) + mpath->flags &= ~MESH_PATH_ACTIVE; + spin_unlock_bh(&mpath->state_lock); + } + return mpath; + } + } + return NULL; +} + +struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) +{ + struct mesh_path *mpath; + struct hlist_node *n; + struct hlist_head *bucket; + struct mesh_table *tbl; + struct mpath_node *node; + + tbl = rcu_dereference(mpp_paths); + + bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; + hlist_for_each_entry_rcu(node, n, bucket, list) { + mpath = node->mpath; + if (mpath->sdata == sdata && + memcmp(dst, mpath->dst, ETH_ALEN) == 0) { + if (MPATH_EXPIRED(mpath)) { + spin_lock_bh(&mpath->state_lock); + if (MPATH_EXPIRED(mpath)) + mpath->flags &= ~MESH_PATH_ACTIVE; + spin_unlock_bh(&mpath->state_lock); + } + return mpath; + } + } + return NULL; +} + + +/** + * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index + * @idx: index + * @sdata: local subif, or NULL for all entries + * + * Returns: pointer to the mesh path structure, or NULL if not found. + * + * Locking: must be called within a read rcu section. + */ +struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata) +{ + struct mpath_node *node; + struct hlist_node *p; + int i; + int j = 0; + + for_each_mesh_entry(mesh_paths, p, node, i) { + if (sdata && node->mpath->sdata != sdata) + continue; + if (j++ == idx) { + if (MPATH_EXPIRED(node->mpath)) { + spin_lock_bh(&node->mpath->state_lock); + if (MPATH_EXPIRED(node->mpath)) + node->mpath->flags &= ~MESH_PATH_ACTIVE; + spin_unlock_bh(&node->mpath->state_lock); + } + return node->mpath; + } + } + + return NULL; +} + +/** + * mesh_path_add - allocate and add a new path to the mesh path table + * @addr: destination address of the path (ETH_ALEN length) + * @sdata: local subif + * + * Returns: 0 on success + * + * State: the initial state of the new path is set to 0 + */ +int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct ieee80211_local *local = sdata->local; + struct mesh_path *mpath, *new_mpath; + struct mpath_node *node, *new_node; + struct hlist_head *bucket; + struct hlist_node *n; + int grow = 0; + int err = 0; + u32 hash_idx; + + if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0) + /* never add ourselves as neighbours */ + return -ENOTSUPP; + + if (is_multicast_ether_addr(dst)) + return -ENOTSUPP; + + if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0) + return -ENOSPC; + + err = -ENOMEM; + new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC); + if (!new_mpath) + goto err_path_alloc; + + new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC); + if (!new_node) + goto err_node_alloc; + + read_lock(&pathtbl_resize_lock); + memcpy(new_mpath->dst, dst, ETH_ALEN); + new_mpath->sdata = sdata; + new_mpath->flags = 0; + skb_queue_head_init(&new_mpath->frame_queue); + new_node->mpath = new_mpath; + new_mpath->timer.data = (unsigned long) new_mpath; + new_mpath->timer.function = mesh_path_timer; + new_mpath->exp_time = jiffies; + spin_lock_init(&new_mpath->state_lock); + init_timer(&new_mpath->timer); + + hash_idx = mesh_table_hash(dst, sdata, mesh_paths); + bucket = &mesh_paths->hash_buckets[hash_idx]; + + spin_lock(&mesh_paths->hashwlock[hash_idx]); + + err = -EEXIST; + hlist_for_each_entry(node, n, bucket, list) { + mpath = node->mpath; + if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0) + goto err_exists; + } + + hlist_add_head_rcu(&new_node->list, bucket); + if (atomic_inc_return(&mesh_paths->entries) >= + mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1)) + grow = 1; + + mesh_paths_generation++; + + spin_unlock(&mesh_paths->hashwlock[hash_idx]); + read_unlock(&pathtbl_resize_lock); + if (grow) { + set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); + ieee80211_queue_work(&local->hw, &ifmsh->work); + } + return 0; + +err_exists: + spin_unlock(&mesh_paths->hashwlock[hash_idx]); + read_unlock(&pathtbl_resize_lock); + kfree(new_node); +err_node_alloc: + kfree(new_mpath); +err_path_alloc: + atomic_dec(&sdata->u.mesh.mpaths); + return err; +} + +void mesh_mpath_table_grow(void) +{ + struct mesh_table *oldtbl, *newtbl; + + write_lock(&pathtbl_resize_lock); + oldtbl = mesh_paths; + newtbl = mesh_table_grow(mesh_paths); + if (!newtbl) { + write_unlock(&pathtbl_resize_lock); + return; + } + rcu_assign_pointer(mesh_paths, newtbl); + write_unlock(&pathtbl_resize_lock); + + synchronize_rcu(); + mesh_table_free(oldtbl, false); +} + +void mesh_mpp_table_grow(void) +{ + struct mesh_table *oldtbl, *newtbl; + + write_lock(&pathtbl_resize_lock); + oldtbl = mpp_paths; + newtbl = mesh_table_grow(mpp_paths); + if (!newtbl) { + write_unlock(&pathtbl_resize_lock); + return; + } + rcu_assign_pointer(mpp_paths, newtbl); + write_unlock(&pathtbl_resize_lock); + + synchronize_rcu(); + mesh_table_free(oldtbl, false); +} + +int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct ieee80211_local *local = sdata->local; + struct mesh_path *mpath, *new_mpath; + struct mpath_node *node, *new_node; + struct hlist_head *bucket; + struct hlist_node *n; + int grow = 0; + int err = 0; + u32 hash_idx; + + if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0) + /* never add ourselves as neighbours */ + return -ENOTSUPP; + + if (is_multicast_ether_addr(dst)) + return -ENOTSUPP; + + err = -ENOMEM; + new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC); + if (!new_mpath) + goto err_path_alloc; + + new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC); + if (!new_node) + goto err_node_alloc; + + read_lock(&pathtbl_resize_lock); + memcpy(new_mpath->dst, dst, ETH_ALEN); + memcpy(new_mpath->mpp, mpp, ETH_ALEN); + new_mpath->sdata = sdata; + new_mpath->flags = 0; + skb_queue_head_init(&new_mpath->frame_queue); + new_node->mpath = new_mpath; + new_mpath->exp_time = jiffies; + spin_lock_init(&new_mpath->state_lock); + + hash_idx = mesh_table_hash(dst, sdata, mpp_paths); + bucket = &mpp_paths->hash_buckets[hash_idx]; + + spin_lock(&mpp_paths->hashwlock[hash_idx]); + + err = -EEXIST; + hlist_for_each_entry(node, n, bucket, list) { + mpath = node->mpath; + if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0) + goto err_exists; + } + + hlist_add_head_rcu(&new_node->list, bucket); + if (atomic_inc_return(&mpp_paths->entries) >= + mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1)) + grow = 1; + + spin_unlock(&mpp_paths->hashwlock[hash_idx]); + read_unlock(&pathtbl_resize_lock); + if (grow) { + set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); + ieee80211_queue_work(&local->hw, &ifmsh->work); + } + return 0; + +err_exists: + spin_unlock(&mpp_paths->hashwlock[hash_idx]); + read_unlock(&pathtbl_resize_lock); + kfree(new_node); +err_node_alloc: + kfree(new_mpath); +err_path_alloc: + return err; +} + + +/** + * mesh_plink_broken - deactivates paths and sends perr when a link breaks + * + * @sta: broken peer link + * + * This function must be called from the rate control algorithm if enough + * delivery errors suggest that a peer link is no longer usable. + */ +void mesh_plink_broken(struct sta_info *sta) +{ + static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + struct mesh_path *mpath; + struct mpath_node *node; + struct hlist_node *p; + struct ieee80211_sub_if_data *sdata = sta->sdata; + int i; + + rcu_read_lock(); + for_each_mesh_entry(mesh_paths, p, node, i) { + mpath = node->mpath; + spin_lock_bh(&mpath->state_lock); + if (mpath->next_hop == sta && + mpath->flags & MESH_PATH_ACTIVE && + !(mpath->flags & MESH_PATH_FIXED)) { + mpath->flags &= ~MESH_PATH_ACTIVE; + ++mpath->sn; + spin_unlock_bh(&mpath->state_lock); + mesh_path_error_tx(MESH_TTL, mpath->dst, + cpu_to_le32(mpath->sn), + cpu_to_le16(PERR_RCODE_DEST_UNREACH), + bcast, sdata); + } else + spin_unlock_bh(&mpath->state_lock); + } + rcu_read_unlock(); +} + +/** + * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches + * + * @sta - mesh peer to match + * + * RCU notes: this function is called when a mesh plink transitions from + * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that + * allows path creation. This will happen before the sta can be freed (because + * sta_info_destroy() calls this) so any reader in a rcu read block will be + * protected against the plink disappearing. + */ +void mesh_path_flush_by_nexthop(struct sta_info *sta) +{ + struct mesh_path *mpath; + struct mpath_node *node; + struct hlist_node *p; + int i; + + for_each_mesh_entry(mesh_paths, p, node, i) { + mpath = node->mpath; + if (mpath->next_hop == sta) + mesh_path_del(mpath->dst, mpath->sdata); + } +} + +void mesh_path_flush(struct ieee80211_sub_if_data *sdata) +{ + struct mesh_path *mpath; + struct mpath_node *node; + struct hlist_node *p; + int i; + + for_each_mesh_entry(mesh_paths, p, node, i) { + mpath = node->mpath; + if (mpath->sdata == sdata) + mesh_path_del(mpath->dst, mpath->sdata); + } +} + +static void mesh_path_node_reclaim(struct rcu_head *rp) +{ + struct mpath_node *node = container_of(rp, struct mpath_node, rcu); + struct ieee80211_sub_if_data *sdata = node->mpath->sdata; + + del_timer_sync(&node->mpath->timer); + atomic_dec(&sdata->u.mesh.mpaths); + kfree(node->mpath); + kfree(node); +} + +/** + * mesh_path_del - delete a mesh path from the table + * + * @addr: dst address (ETH_ALEN length) + * @sdata: local subif + * + * Returns: 0 if successful + */ +int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) +{ + struct mesh_path *mpath; + struct mpath_node *node; + struct hlist_head *bucket; + struct hlist_node *n; + int hash_idx; + int err = 0; + + read_lock(&pathtbl_resize_lock); + hash_idx = mesh_table_hash(addr, sdata, mesh_paths); + bucket = &mesh_paths->hash_buckets[hash_idx]; + + spin_lock(&mesh_paths->hashwlock[hash_idx]); + hlist_for_each_entry(node, n, bucket, list) { + mpath = node->mpath; + if (mpath->sdata == sdata && + memcmp(addr, mpath->dst, ETH_ALEN) == 0) { + spin_lock_bh(&mpath->state_lock); + mpath->flags |= MESH_PATH_RESOLVING; + hlist_del_rcu(&node->list); + call_rcu(&node->rcu, mesh_path_node_reclaim); + atomic_dec(&mesh_paths->entries); + spin_unlock_bh(&mpath->state_lock); + goto enddel; + } + } + + err = -ENXIO; +enddel: + mesh_paths_generation++; + spin_unlock(&mesh_paths->hashwlock[hash_idx]); + read_unlock(&pathtbl_resize_lock); + return err; +} + +/** + * mesh_path_tx_pending - sends pending frames in a mesh path queue + * + * @mpath: mesh path to activate + * + * Locking: the state_lock of the mpath structure must NOT be held when calling + * this function. + */ +void mesh_path_tx_pending(struct mesh_path *mpath) +{ + if (mpath->flags & MESH_PATH_ACTIVE) + ieee80211_add_pending_skbs(mpath->sdata->local, + &mpath->frame_queue); +} + +/** + * mesh_path_discard_frame - discard a frame whose path could not be resolved + * + * @skb: frame to discard + * @sdata: network subif the frame was to be sent through + * + * If the frame was being forwarded from another MP, a PERR frame will be sent + * to the precursor. The precursor's address (i.e. the previous hop) was saved + * in addr1 of the frame-to-be-forwarded, and would only be overwritten once + * the destination is successfully resolved. + * + * Locking: the function must me called within a rcu_read_lock region + */ +void mesh_path_discard_frame(struct sk_buff *skb, + struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct mesh_path *mpath; + u32 sn = 0; + + if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) { + u8 *ra, *da; + + da = hdr->addr3; + ra = hdr->addr1; + mpath = mesh_path_lookup(da, sdata); + if (mpath) + sn = ++mpath->sn; + mesh_path_error_tx(MESH_TTL, skb->data, cpu_to_le32(sn), + cpu_to_le16(PERR_RCODE_NO_ROUTE), ra, sdata); + } + + kfree_skb(skb); + sdata->u.mesh.mshstats.dropped_frames_no_route++; +} + +/** + * mesh_path_flush_pending - free the pending queue of a mesh path + * + * @mpath: mesh path whose queue has to be freed + * + * Locking: the function must me called withing a rcu_read_lock region + */ +void mesh_path_flush_pending(struct mesh_path *mpath) +{ + struct sk_buff *skb; + + while ((skb = skb_dequeue(&mpath->frame_queue)) && + (mpath->flags & MESH_PATH_ACTIVE)) + mesh_path_discard_frame(skb, mpath->sdata); +} + +/** + * mesh_path_fix_nexthop - force a specific next hop for a mesh path + * + * @mpath: the mesh path to modify + * @next_hop: the next hop to force + * + * Locking: this function must be called holding mpath->state_lock + */ +void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop) +{ + spin_lock_bh(&mpath->state_lock); + mesh_path_assign_nexthop(mpath, next_hop); + mpath->sn = 0xffff; + mpath->metric = 0; + mpath->hop_count = 0; + mpath->exp_time = 0; + mpath->flags |= MESH_PATH_FIXED; + mesh_path_activate(mpath); + spin_unlock_bh(&mpath->state_lock); + mesh_path_tx_pending(mpath); +} + +static void mesh_path_node_free(struct hlist_node *p, bool free_leafs) +{ + struct mesh_path *mpath; + struct mpath_node *node = hlist_entry(p, struct mpath_node, list); + mpath = node->mpath; + hlist_del_rcu(p); + if (free_leafs) + kfree(mpath); + kfree(node); +} + +static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl) +{ + struct mesh_path *mpath; + struct mpath_node *node, *new_node; + u32 hash_idx; + + new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC); + if (new_node == NULL) + return -ENOMEM; + + node = hlist_entry(p, struct mpath_node, list); + mpath = node->mpath; + new_node->mpath = mpath; + hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl); + hlist_add_head(&new_node->list, + &newtbl->hash_buckets[hash_idx]); + return 0; +} + +int mesh_pathtbl_init(void) +{ + mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); + if (!mesh_paths) + return -ENOMEM; + mesh_paths->free_node = &mesh_path_node_free; + mesh_paths->copy_node = &mesh_path_node_copy; + mesh_paths->mean_chain_len = MEAN_CHAIN_LEN; + + mpp_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); + if (!mpp_paths) { + mesh_table_free(mesh_paths, true); + return -ENOMEM; + } + mpp_paths->free_node = &mesh_path_node_free; + mpp_paths->copy_node = &mesh_path_node_copy; + mpp_paths->mean_chain_len = MEAN_CHAIN_LEN; + + return 0; +} + +void mesh_path_expire(struct ieee80211_sub_if_data *sdata) +{ + struct mesh_path *mpath; + struct mpath_node *node; + struct hlist_node *p; + int i; + + read_lock(&pathtbl_resize_lock); + for_each_mesh_entry(mesh_paths, p, node, i) { + if (node->mpath->sdata != sdata) + continue; + mpath = node->mpath; + spin_lock_bh(&mpath->state_lock); + if ((!(mpath->flags & MESH_PATH_RESOLVING)) && + (!(mpath->flags & MESH_PATH_FIXED)) && + time_after(jiffies, + mpath->exp_time + MESH_PATH_EXPIRE)) { + spin_unlock_bh(&mpath->state_lock); + mesh_path_del(mpath->dst, mpath->sdata); + } else + spin_unlock_bh(&mpath->state_lock); + } + read_unlock(&pathtbl_resize_lock); +} + +void mesh_pathtbl_unregister(void) +{ + mesh_table_free(mesh_paths, true); + mesh_table_free(mpp_paths, true); +} diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c new file mode 100644 index 0000000..1a29c4a --- /dev/null +++ b/net/mac80211/mesh_plink.c @@ -0,0 +1,754 @@ +/* + * Copyright (c) 2008, 2009 open80211s Ltd. + * Author: Luis Carlos Cobo + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include "ieee80211_i.h" +#include "rate.h" +#include "mesh.h" + +#ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG +#define mpl_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args) +#else +#define mpl_dbg(fmt, args...) do { (void)(0); } while (0) +#endif + +#define PLINK_GET_LLID(p) (p + 4) +#define PLINK_GET_PLID(p) (p + 6) + +#define mod_plink_timer(s, t) (mod_timer(&s->plink_timer, \ + jiffies + HZ * t / 1000)) + +/* Peer link cancel reasons, all subject to ANA approval */ +#define MESH_LINK_CANCELLED 2 +#define MESH_MAX_NEIGHBORS 3 +#define MESH_CAPABILITY_POLICY_VIOLATION 4 +#define MESH_CLOSE_RCVD 5 +#define MESH_MAX_RETRIES 6 +#define MESH_CONFIRM_TIMEOUT 7 +#define MESH_SECURITY_ROLE_NEGOTIATION_DIFFERS 8 +#define MESH_SECURITY_AUTHENTICATION_IMPOSSIBLE 9 +#define MESH_SECURITY_FAILED_VERIFICATION 10 + +#define dot11MeshMaxRetries(s) (s->u.mesh.mshcfg.dot11MeshMaxRetries) +#define dot11MeshRetryTimeout(s) (s->u.mesh.mshcfg.dot11MeshRetryTimeout) +#define dot11MeshConfirmTimeout(s) (s->u.mesh.mshcfg.dot11MeshConfirmTimeout) +#define dot11MeshHoldingTimeout(s) (s->u.mesh.mshcfg.dot11MeshHoldingTimeout) +#define dot11MeshMaxPeerLinks(s) (s->u.mesh.mshcfg.dot11MeshMaxPeerLinks) + +enum plink_frame_type { + PLINK_OPEN = 0, + PLINK_CONFIRM, + PLINK_CLOSE +}; + +enum plink_event { + PLINK_UNDEFINED, + OPN_ACPT, + OPN_RJCT, + OPN_IGNR, + CNF_ACPT, + CNF_RJCT, + CNF_IGNR, + CLS_ACPT, + CLS_IGNR +}; + +static inline +void mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata) +{ + atomic_inc(&sdata->u.mesh.mshstats.estab_plinks); + mesh_accept_plinks_update(sdata); + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); +} + +static inline +void mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata) +{ + atomic_dec(&sdata->u.mesh.mshstats.estab_plinks); + mesh_accept_plinks_update(sdata); + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); +} + +/** + * mesh_plink_fsm_restart - restart a mesh peer link finite state machine + * + * @sta: mesh peer link to restart + * + * Locking: this function must be called holding sta->lock + */ +static inline void mesh_plink_fsm_restart(struct sta_info *sta) +{ + sta->plink_state = PLINK_LISTEN; + sta->llid = sta->plid = sta->reason = 0; + sta->plink_retries = 0; +} + +/* + * NOTE: This is just an alias for sta_info_alloc(), see notes + * on it in the lifecycle management section! + */ +static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata, + u8 *hw_addr, u32 rates) +{ + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + + if (local->num_sta >= MESH_MAX_PLINKS) + return NULL; + + sta = sta_info_alloc(sdata, hw_addr, GFP_KERNEL); + if (!sta) + return NULL; + + sta->flags = WLAN_STA_AUTHORIZED; + sta->sta.supp_rates[local->hw.conf.channel->band] = rates; + rate_control_rate_init(sta); + + return sta; +} + +/** + * mesh_plink_deactivate - deactivate mesh peer link + * + * @sta: mesh peer link to deactivate + * + * All mesh paths with this peer as next hop will be flushed + * + * Locking: the caller must hold sta->lock + */ +static void __mesh_plink_deactivate(struct sta_info *sta) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + + if (sta->plink_state == PLINK_ESTAB) + mesh_plink_dec_estab_count(sdata); + sta->plink_state = PLINK_BLOCKED; + mesh_path_flush_by_nexthop(sta); +} + +/** + * __mesh_plink_deactivate - deactivate mesh peer link + * + * @sta: mesh peer link to deactivate + * + * All mesh paths with this peer as next hop will be flushed + */ +void mesh_plink_deactivate(struct sta_info *sta) +{ + spin_lock_bh(&sta->lock); + __mesh_plink_deactivate(sta); + spin_unlock_bh(&sta->lock); +} + +static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, + enum plink_frame_type action, u8 *da, __le16 llid, __le16 plid, + __le16 reason) { + struct ieee80211_local *local = sdata->local; + struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); + struct ieee80211_mgmt *mgmt; + bool include_plid = false; + static const u8 meshpeeringproto[] = { 0x00, 0x0F, 0xAC, 0x2A }; + u8 *pos; + int ie_len; + + if (!skb) + return -1; + skb_reserve(skb, local->hw.extra_tx_headroom); + /* 25 is the size of the common mgmt part (24) plus the size of the + * common action part (1) + */ + mgmt = (struct ieee80211_mgmt *) + skb_put(skb, 25 + sizeof(mgmt->u.action.u.plink_action)); + memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.plink_action)); + mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION); + memcpy(mgmt->da, da, ETH_ALEN); + memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); + /* BSSID is left zeroed, wildcard value */ + mgmt->u.action.category = MESH_PLINK_CATEGORY; + mgmt->u.action.u.plink_action.action_code = action; + + if (action == PLINK_CLOSE) + mgmt->u.action.u.plink_action.aux = reason; + else { + mgmt->u.action.u.plink_action.aux = cpu_to_le16(0x0); + if (action == PLINK_CONFIRM) { + pos = skb_put(skb, 4); + /* two-byte status code followed by two-byte AID */ + memset(pos, 0, 2); + memcpy(pos + 2, &plid, 2); + } + mesh_mgmt_ies_add(skb, sdata); + } + + /* Add Peer Link Management element */ + switch (action) { + case PLINK_OPEN: + ie_len = 6; + break; + case PLINK_CONFIRM: + ie_len = 8; + include_plid = true; + break; + case PLINK_CLOSE: + default: + if (!plid) + ie_len = 8; + else { + ie_len = 10; + include_plid = true; + } + break; + } + + pos = skb_put(skb, 2 + ie_len); + *pos++ = WLAN_EID_PEER_LINK; + *pos++ = ie_len; + memcpy(pos, meshpeeringproto, sizeof(meshpeeringproto)); + pos += 4; + memcpy(pos, &llid, 2); + if (include_plid) { + pos += 2; + memcpy(pos, &plid, 2); + } + if (action == PLINK_CLOSE) { + pos += 2; + memcpy(pos, &reason, 2); + } + + ieee80211_tx_skb(sdata, skb); + return 0; +} + +void mesh_neighbour_update(u8 *hw_addr, u32 rates, struct ieee80211_sub_if_data *sdata, + bool peer_accepting_plinks) +{ + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + + rcu_read_lock(); + + sta = sta_info_get(sdata, hw_addr); + if (!sta) { + rcu_read_unlock(); + + sta = mesh_plink_alloc(sdata, hw_addr, rates); + if (!sta) + return; + if (sta_info_insert_rcu(sta)) { + rcu_read_unlock(); + return; + } + } + + sta->last_rx = jiffies; + sta->sta.supp_rates[local->hw.conf.channel->band] = rates; + if (peer_accepting_plinks && sta->plink_state == PLINK_LISTEN && + sdata->u.mesh.accepting_plinks && + sdata->u.mesh.mshcfg.auto_open_plinks) + mesh_plink_open(sta); + + rcu_read_unlock(); +} + +static void mesh_plink_timer(unsigned long data) +{ + struct sta_info *sta; + __le16 llid, plid, reason; + struct ieee80211_sub_if_data *sdata; + + /* + * This STA is valid because sta_info_destroy() will + * del_timer_sync() this timer after having made sure + * it cannot be readded (by deleting the plink.) + */ + sta = (struct sta_info *) data; + + if (sta->sdata->local->quiescing) { + sta->plink_timer_was_running = true; + return; + } + + spin_lock_bh(&sta->lock); + if (sta->ignore_plink_timer) { + sta->ignore_plink_timer = false; + spin_unlock_bh(&sta->lock); + return; + } + mpl_dbg("Mesh plink timer for %pM fired on state %d\n", + sta->sta.addr, sta->plink_state); + reason = 0; + llid = sta->llid; + plid = sta->plid; + sdata = sta->sdata; + + switch (sta->plink_state) { + case PLINK_OPN_RCVD: + case PLINK_OPN_SNT: + /* retry timer */ + if (sta->plink_retries < dot11MeshMaxRetries(sdata)) { + u32 rand; + mpl_dbg("Mesh plink for %pM (retry, timeout): %d %d\n", + sta->sta.addr, sta->plink_retries, + sta->plink_timeout); + get_random_bytes(&rand, sizeof(u32)); + sta->plink_timeout = sta->plink_timeout + + rand % sta->plink_timeout; + ++sta->plink_retries; + mod_plink_timer(sta, sta->plink_timeout); + spin_unlock_bh(&sta->lock); + mesh_plink_frame_tx(sdata, PLINK_OPEN, sta->sta.addr, llid, + 0, 0); + break; + } + reason = cpu_to_le16(MESH_MAX_RETRIES); + /* fall through on else */ + case PLINK_CNF_RCVD: + /* confirm timer */ + if (!reason) + reason = cpu_to_le16(MESH_CONFIRM_TIMEOUT); + sta->plink_state = PLINK_HOLDING; + mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); + spin_unlock_bh(&sta->lock); + mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, plid, + reason); + break; + case PLINK_HOLDING: + /* holding timer */ + del_timer(&sta->plink_timer); + mesh_plink_fsm_restart(sta); + spin_unlock_bh(&sta->lock); + break; + default: + spin_unlock_bh(&sta->lock); + break; + } +} + +#ifdef CONFIG_PM +void mesh_plink_quiesce(struct sta_info *sta) +{ + if (del_timer_sync(&sta->plink_timer)) + sta->plink_timer_was_running = true; +} + +void mesh_plink_restart(struct sta_info *sta) +{ + if (sta->plink_timer_was_running) { + add_timer(&sta->plink_timer); + sta->plink_timer_was_running = false; + } +} +#endif + +static inline void mesh_plink_timer_set(struct sta_info *sta, int timeout) +{ + sta->plink_timer.expires = jiffies + (HZ * timeout / 1000); + sta->plink_timer.data = (unsigned long) sta; + sta->plink_timer.function = mesh_plink_timer; + sta->plink_timeout = timeout; + add_timer(&sta->plink_timer); +} + +int mesh_plink_open(struct sta_info *sta) +{ + __le16 llid; + struct ieee80211_sub_if_data *sdata = sta->sdata; + + spin_lock_bh(&sta->lock); + get_random_bytes(&llid, 2); + sta->llid = llid; + if (sta->plink_state != PLINK_LISTEN) { + spin_unlock_bh(&sta->lock); + return -EBUSY; + } + sta->plink_state = PLINK_OPN_SNT; + mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata)); + spin_unlock_bh(&sta->lock); + mpl_dbg("Mesh plink: starting establishment with %pM\n", + sta->sta.addr); + + return mesh_plink_frame_tx(sdata, PLINK_OPEN, + sta->sta.addr, llid, 0, 0); +} + +void mesh_plink_block(struct sta_info *sta) +{ + spin_lock_bh(&sta->lock); + __mesh_plink_deactivate(sta); + sta->plink_state = PLINK_BLOCKED; + spin_unlock_bh(&sta->lock); +} + + +void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, + size_t len, struct ieee80211_rx_status *rx_status) +{ + struct ieee80211_local *local = sdata->local; + struct ieee802_11_elems elems; + struct sta_info *sta; + enum plink_event event; + enum plink_frame_type ftype; + size_t baselen; + u8 ie_len; + u8 *baseaddr; + __le16 plid, llid, reason; +#ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG + static const char *mplstates[] = { + [PLINK_LISTEN] = "LISTEN", + [PLINK_OPN_SNT] = "OPN-SNT", + [PLINK_OPN_RCVD] = "OPN-RCVD", + [PLINK_CNF_RCVD] = "CNF_RCVD", + [PLINK_ESTAB] = "ESTAB", + [PLINK_HOLDING] = "HOLDING", + [PLINK_BLOCKED] = "BLOCKED" + }; +#endif + + /* need action_code, aux */ + if (len < IEEE80211_MIN_ACTION_SIZE + 3) + return; + + if (is_multicast_ether_addr(mgmt->da)) { + mpl_dbg("Mesh plink: ignore frame from multicast address"); + return; + } + + baseaddr = mgmt->u.action.u.plink_action.variable; + baselen = (u8 *) mgmt->u.action.u.plink_action.variable - (u8 *) mgmt; + if (mgmt->u.action.u.plink_action.action_code == PLINK_CONFIRM) { + baseaddr += 4; + baselen += 4; + } + ieee802_11_parse_elems(baseaddr, len - baselen, &elems); + if (!elems.peer_link) { + mpl_dbg("Mesh plink: missing necessary peer link ie\n"); + return; + } + + ftype = mgmt->u.action.u.plink_action.action_code; + ie_len = elems.peer_link_len; + if ((ftype == PLINK_OPEN && ie_len != 6) || + (ftype == PLINK_CONFIRM && ie_len != 8) || + (ftype == PLINK_CLOSE && ie_len != 8 && ie_len != 10)) { + mpl_dbg("Mesh plink: incorrect plink ie length %d %d\n", + ftype, ie_len); + return; + } + + if (ftype != PLINK_CLOSE && (!elems.mesh_id || !elems.mesh_config)) { + mpl_dbg("Mesh plink: missing necessary ie\n"); + return; + } + /* Note the lines below are correct, the llid in the frame is the plid + * from the point of view of this host. + */ + memcpy(&plid, PLINK_GET_LLID(elems.peer_link), 2); + if (ftype == PLINK_CONFIRM || (ftype == PLINK_CLOSE && ie_len == 10)) + memcpy(&llid, PLINK_GET_PLID(elems.peer_link), 2); + + rcu_read_lock(); + + sta = sta_info_get(sdata, mgmt->sa); + if (!sta && ftype != PLINK_OPEN) { + mpl_dbg("Mesh plink: cls or cnf from unknown peer\n"); + rcu_read_unlock(); + return; + } + + if (sta && sta->plink_state == PLINK_BLOCKED) { + rcu_read_unlock(); + return; + } + + /* Now we will figure out the appropriate event... */ + event = PLINK_UNDEFINED; + if (ftype != PLINK_CLOSE && (!mesh_matches_local(&elems, sdata))) { + switch (ftype) { + case PLINK_OPEN: + event = OPN_RJCT; + break; + case PLINK_CONFIRM: + event = CNF_RJCT; + break; + case PLINK_CLOSE: + /* avoid warning */ + break; + } + spin_lock_bh(&sta->lock); + } else if (!sta) { + /* ftype == PLINK_OPEN */ + u32 rates; + + rcu_read_unlock(); + + if (!mesh_plink_free_count(sdata)) { + mpl_dbg("Mesh plink error: no more free plinks\n"); + return; + } + + rates = ieee80211_sta_get_rates(local, &elems, rx_status->band); + sta = mesh_plink_alloc(sdata, mgmt->sa, rates); + if (!sta) { + mpl_dbg("Mesh plink error: plink table full\n"); + return; + } + if (sta_info_insert_rcu(sta)) { + rcu_read_unlock(); + return; + } + event = OPN_ACPT; + spin_lock_bh(&sta->lock); + } else { + spin_lock_bh(&sta->lock); + switch (ftype) { + case PLINK_OPEN: + if (!mesh_plink_free_count(sdata) || + (sta->plid && sta->plid != plid)) + event = OPN_IGNR; + else + event = OPN_ACPT; + break; + case PLINK_CONFIRM: + if (!mesh_plink_free_count(sdata) || + (sta->llid != llid || sta->plid != plid)) + event = CNF_IGNR; + else + event = CNF_ACPT; + break; + case PLINK_CLOSE: + if (sta->plink_state == PLINK_ESTAB) + /* Do not check for llid or plid. This does not + * follow the standard but since multiple plinks + * per sta are not supported, it is necessary in + * order to avoid a livelock when MP A sees an + * establish peer link to MP B but MP B does not + * see it. This can be caused by a timeout in + * B's peer link establishment or B beign + * restarted. + */ + event = CLS_ACPT; + else if (sta->plid != plid) + event = CLS_IGNR; + else if (ie_len == 7 && sta->llid != llid) + event = CLS_IGNR; + else + event = CLS_ACPT; + break; + default: + mpl_dbg("Mesh plink: unknown frame subtype\n"); + spin_unlock_bh(&sta->lock); + rcu_read_unlock(); + return; + } + } + + mpl_dbg("Mesh plink (peer, state, llid, plid, event): %pM %s %d %d %d\n", + mgmt->sa, mplstates[sta->plink_state], + le16_to_cpu(sta->llid), le16_to_cpu(sta->plid), + event); + reason = 0; + switch (sta->plink_state) { + /* spin_unlock as soon as state is updated at each case */ + case PLINK_LISTEN: + switch (event) { + case CLS_ACPT: + mesh_plink_fsm_restart(sta); + spin_unlock_bh(&sta->lock); + break; + case OPN_ACPT: + sta->plink_state = PLINK_OPN_RCVD; + sta->plid = plid; + get_random_bytes(&llid, 2); + sta->llid = llid; + mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata)); + spin_unlock_bh(&sta->lock); + mesh_plink_frame_tx(sdata, PLINK_OPEN, sta->sta.addr, llid, + 0, 0); + mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, + llid, plid, 0); + break; + default: + spin_unlock_bh(&sta->lock); + break; + } + break; + + case PLINK_OPN_SNT: + switch (event) { + case OPN_RJCT: + case CNF_RJCT: + reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION); + case CLS_ACPT: + if (!reason) + reason = cpu_to_le16(MESH_CLOSE_RCVD); + sta->reason = reason; + sta->plink_state = PLINK_HOLDING; + if (!mod_plink_timer(sta, + dot11MeshHoldingTimeout(sdata))) + sta->ignore_plink_timer = true; + + llid = sta->llid; + spin_unlock_bh(&sta->lock); + mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, + plid, reason); + break; + case OPN_ACPT: + /* retry timer is left untouched */ + sta->plink_state = PLINK_OPN_RCVD; + sta->plid = plid; + llid = sta->llid; + spin_unlock_bh(&sta->lock); + mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid, + plid, 0); + break; + case CNF_ACPT: + sta->plink_state = PLINK_CNF_RCVD; + if (!mod_plink_timer(sta, + dot11MeshConfirmTimeout(sdata))) + sta->ignore_plink_timer = true; + + spin_unlock_bh(&sta->lock); + break; + default: + spin_unlock_bh(&sta->lock); + break; + } + break; + + case PLINK_OPN_RCVD: + switch (event) { + case OPN_RJCT: + case CNF_RJCT: + reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION); + case CLS_ACPT: + if (!reason) + reason = cpu_to_le16(MESH_CLOSE_RCVD); + sta->reason = reason; + sta->plink_state = PLINK_HOLDING; + if (!mod_plink_timer(sta, + dot11MeshHoldingTimeout(sdata))) + sta->ignore_plink_timer = true; + + llid = sta->llid; + spin_unlock_bh(&sta->lock); + mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, + plid, reason); + break; + case OPN_ACPT: + llid = sta->llid; + spin_unlock_bh(&sta->lock); + mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid, + plid, 0); + break; + case CNF_ACPT: + del_timer(&sta->plink_timer); + sta->plink_state = PLINK_ESTAB; + mesh_plink_inc_estab_count(sdata); + spin_unlock_bh(&sta->lock); + mpl_dbg("Mesh plink with %pM ESTABLISHED\n", + sta->sta.addr); + break; + default: + spin_unlock_bh(&sta->lock); + break; + } + break; + + case PLINK_CNF_RCVD: + switch (event) { + case OPN_RJCT: + case CNF_RJCT: + reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION); + case CLS_ACPT: + if (!reason) + reason = cpu_to_le16(MESH_CLOSE_RCVD); + sta->reason = reason; + sta->plink_state = PLINK_HOLDING; + if (!mod_plink_timer(sta, + dot11MeshHoldingTimeout(sdata))) + sta->ignore_plink_timer = true; + + llid = sta->llid; + spin_unlock_bh(&sta->lock); + mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, + plid, reason); + break; + case OPN_ACPT: + del_timer(&sta->plink_timer); + sta->plink_state = PLINK_ESTAB; + mesh_plink_inc_estab_count(sdata); + spin_unlock_bh(&sta->lock); + mpl_dbg("Mesh plink with %pM ESTABLISHED\n", + sta->sta.addr); + mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid, + plid, 0); + break; + default: + spin_unlock_bh(&sta->lock); + break; + } + break; + + case PLINK_ESTAB: + switch (event) { + case CLS_ACPT: + reason = cpu_to_le16(MESH_CLOSE_RCVD); + sta->reason = reason; + __mesh_plink_deactivate(sta); + sta->plink_state = PLINK_HOLDING; + llid = sta->llid; + mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); + spin_unlock_bh(&sta->lock); + mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, + plid, reason); + break; + case OPN_ACPT: + llid = sta->llid; + spin_unlock_bh(&sta->lock); + mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid, + plid, 0); + break; + default: + spin_unlock_bh(&sta->lock); + break; + } + break; + case PLINK_HOLDING: + switch (event) { + case CLS_ACPT: + if (del_timer(&sta->plink_timer)) + sta->ignore_plink_timer = 1; + mesh_plink_fsm_restart(sta); + spin_unlock_bh(&sta->lock); + break; + case OPN_ACPT: + case CNF_ACPT: + case OPN_RJCT: + case CNF_RJCT: + llid = sta->llid; + reason = sta->reason; + spin_unlock_bh(&sta->lock); + mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, + llid, plid, reason); + break; + default: + spin_unlock_bh(&sta->lock); + } + break; + default: + /* should not get here, PLINK_BLOCKED is dealt with at the + * beginning of the function + */ + spin_unlock_bh(&sta->lock); + break; + } + + rcu_read_unlock(); +} diff --git a/net/mac80211/michael.c b/net/mac80211/michael.c new file mode 100644 index 0000000..408649b --- /dev/null +++ b/net/mac80211/michael.c @@ -0,0 +1,86 @@ +/* + * Michael MIC implementation - optimized for TKIP MIC operations + * Copyright 2002-2003, Instant802 Networks, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include + +#include "michael.h" + +static void michael_block(struct michael_mic_ctx *mctx, u32 val) +{ + mctx->l ^= val; + mctx->r ^= rol32(mctx->l, 17); + mctx->l += mctx->r; + mctx->r ^= ((mctx->l & 0xff00ff00) >> 8) | + ((mctx->l & 0x00ff00ff) << 8); + mctx->l += mctx->r; + mctx->r ^= rol32(mctx->l, 3); + mctx->l += mctx->r; + mctx->r ^= ror32(mctx->l, 2); + mctx->l += mctx->r; +} + +static void michael_mic_hdr(struct michael_mic_ctx *mctx, const u8 *key, + struct ieee80211_hdr *hdr) +{ + u8 *da, *sa, tid; + + da = ieee80211_get_DA(hdr); + sa = ieee80211_get_SA(hdr); + if (ieee80211_is_data_qos(hdr->frame_control)) + tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; + else + tid = 0; + + mctx->l = get_unaligned_le32(key); + mctx->r = get_unaligned_le32(key + 4); + + /* + * A pseudo header (DA, SA, Priority, 0, 0, 0) is used in Michael MIC + * calculation, but it is _not_ transmitted + */ + michael_block(mctx, get_unaligned_le32(da)); + michael_block(mctx, get_unaligned_le16(&da[4]) | + (get_unaligned_le16(sa) << 16)); + michael_block(mctx, get_unaligned_le32(&sa[2])); + michael_block(mctx, tid); +} + +void michael_mic(const u8 *key, struct ieee80211_hdr *hdr, + const u8 *data, size_t data_len, u8 *mic) +{ + u32 val; + size_t block, blocks, left; + struct michael_mic_ctx mctx; + + michael_mic_hdr(&mctx, key, hdr); + + /* Real data */ + blocks = data_len / 4; + left = data_len % 4; + + for (block = 0; block < blocks; block++) + michael_block(&mctx, get_unaligned_le32(&data[block * 4])); + + /* Partial block of 0..3 bytes and padding: 0x5a + 4..7 zeros to make + * total length a multiple of 4. */ + val = 0x5a; + while (left > 0) { + val <<= 8; + left--; + val |= data[blocks * 4 + left]; + } + + michael_block(&mctx, val); + michael_block(&mctx, 0); + + put_unaligned_le32(mctx.l, mic); + put_unaligned_le32(mctx.r, mic + 4); +} diff --git a/net/mac80211/michael.h b/net/mac80211/michael.h new file mode 100644 index 0000000..3b848da --- /dev/null +++ b/net/mac80211/michael.h @@ -0,0 +1,24 @@ +/* + * Michael MIC implementation - optimized for TKIP MIC operations + * Copyright 2002-2003, Instant802 Networks, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef MICHAEL_H +#define MICHAEL_H + +#include + +#define MICHAEL_MIC_LEN 8 + +struct michael_mic_ctx { + u32 l, r; +}; + +void michael_mic(const u8 *key, struct ieee80211_hdr *hdr, + const u8 *data, size_t data_len, u8 *mic); + +#endif /* MICHAEL_H */ diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c new file mode 100644 index 0000000..0ab284c --- /dev/null +++ b/net/mac80211/mlme.c @@ -0,0 +1,2134 @@ +/* + * BSS client mode implementation + * Copyright 2003-2008, Jouni Malinen + * Copyright 2004, Instant802 Networks, Inc. + * Copyright 2005, Devicescape Software, Inc. + * Copyright 2006-2007 Jiri Benc + * Copyright 2007, Michael Wu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ieee80211_i.h" +#include "driver-ops.h" +#include "rate.h" +#include "led.h" + +#define IEEE80211_MAX_PROBE_TRIES 5 + +/* + * beacon loss detection timeout + * XXX: should depend on beacon interval + */ +#define IEEE80211_BEACON_LOSS_TIME (2 * HZ) +/* + * Time the connection can be idle before we probe + * it to see if we can still talk to the AP. + */ +#define IEEE80211_CONNECTION_IDLE_TIME (30 * HZ) +/* + * Time we wait for a probe response after sending + * a probe request because of beacon loss or for + * checking the connection still works. + */ +#define IEEE80211_PROBE_WAIT (HZ / 2) + +#define TMR_RUNNING_TIMER 0 +#define TMR_RUNNING_CHANSW 1 + +/* + * All cfg80211 functions have to be called outside a locked + * section so that they can acquire a lock themselves... This + * is much simpler than queuing up things in cfg80211, but we + * do need some indirection for that here. + */ +enum rx_mgmt_action { + /* no action required */ + RX_MGMT_NONE, + + /* caller must call cfg80211_send_rx_auth() */ + RX_MGMT_CFG80211_AUTH, + + /* caller must call cfg80211_send_rx_assoc() */ + RX_MGMT_CFG80211_ASSOC, + + /* caller must call cfg80211_send_deauth() */ + RX_MGMT_CFG80211_DEAUTH, + + /* caller must call cfg80211_send_disassoc() */ + RX_MGMT_CFG80211_DISASSOC, + + /* caller must tell cfg80211 about internal error */ + RX_MGMT_CFG80211_ASSOC_ERROR, +}; + +/* utils */ +static inline void ASSERT_MGD_MTX(struct ieee80211_if_managed *ifmgd) +{ + WARN_ON(!mutex_is_locked(&ifmgd->mtx)); +} + +/* + * We can have multiple work items (and connection probing) + * scheduling this timer, but we need to take care to only + * reschedule it when it should fire _earlier_ than it was + * asked for before, or if it's not pending right now. This + * function ensures that. Note that it then is required to + * run this function for all timeouts after the first one + * has happened -- the work that runs from this timer will + * do that. + */ +static void run_again(struct ieee80211_if_managed *ifmgd, + unsigned long timeout) +{ + ASSERT_MGD_MTX(ifmgd); + + if (!timer_pending(&ifmgd->timer) || + time_before(timeout, ifmgd->timer.expires)) + mod_timer(&ifmgd->timer, timeout); +} + +static void mod_beacon_timer(struct ieee80211_sub_if_data *sdata) +{ + if (sdata->local->hw.flags & IEEE80211_HW_BEACON_FILTER) + return; + + mod_timer(&sdata->u.mgd.bcn_mon_timer, + round_jiffies_up(jiffies + IEEE80211_BEACON_LOSS_TIME)); +} + +static int ecw2cw(int ecw) +{ + return (1 << ecw) - 1; +} + +/* + * ieee80211_enable_ht should be called only after the operating band + * has been determined as ht configuration depends on the hw's + * HT abilities for a specific band. + */ +static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata, + struct ieee80211_ht_info *hti, + const u8 *bssid, u16 ap_ht_cap_flags) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_supported_band *sband; + struct sta_info *sta; + u32 changed = 0; + u16 ht_opmode; + bool enable_ht = true, ht_changed; + enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; + + sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; + + /* HT is not supported */ + if (!sband->ht_cap.ht_supported) + enable_ht = false; + + /* check that channel matches the right operating channel */ + if (local->hw.conf.channel->center_freq != + ieee80211_channel_to_frequency(hti->control_chan)) + enable_ht = false; + + if (enable_ht) { + channel_type = NL80211_CHAN_HT20; + + if (!(ap_ht_cap_flags & IEEE80211_HT_CAP_40MHZ_INTOLERANT) && + (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) && + (hti->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) { + switch(hti->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { + case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: + if (!(local->hw.conf.channel->flags & + IEEE80211_CHAN_NO_HT40PLUS)) + channel_type = NL80211_CHAN_HT40PLUS; + break; + case IEEE80211_HT_PARAM_CHA_SEC_BELOW: + if (!(local->hw.conf.channel->flags & + IEEE80211_CHAN_NO_HT40MINUS)) + channel_type = NL80211_CHAN_HT40MINUS; + break; + } + } + } + + ht_changed = conf_is_ht(&local->hw.conf) != enable_ht || + channel_type != local->hw.conf.channel_type; + + local->oper_channel_type = channel_type; + + if (ht_changed) { + /* channel_type change automatically detected */ + ieee80211_hw_config(local, 0); + + rcu_read_lock(); + sta = sta_info_get(sdata, bssid); + if (sta) + rate_control_rate_update(local, sband, sta, + IEEE80211_RC_HT_CHANGED, + local->oper_channel_type); + rcu_read_unlock(); + } + + /* disable HT */ + if (!enable_ht) + return 0; + + ht_opmode = le16_to_cpu(hti->operation_mode); + + /* if bss configuration changed store the new one */ + if (!sdata->ht_opmode_valid || + sdata->vif.bss_conf.ht_operation_mode != ht_opmode) { + changed |= BSS_CHANGED_HT; + sdata->vif.bss_conf.ht_operation_mode = ht_opmode; + sdata->ht_opmode_valid = true; + } + + return changed; +} + +/* frame sending functions */ + +static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, + const u8 *bssid, u16 stype, u16 reason, + void *cookie) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct sk_buff *skb; + struct ieee80211_mgmt *mgmt; + + skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt)); + if (!skb) { + printk(KERN_DEBUG "%s: failed to allocate buffer for " + "deauth/disassoc frame\n", sdata->name); + return; + } + skb_reserve(skb, local->hw.extra_tx_headroom); + + mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); + memset(mgmt, 0, 24); + memcpy(mgmt->da, bssid, ETH_ALEN); + memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); + memcpy(mgmt->bssid, bssid, ETH_ALEN); + mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype); + skb_put(skb, 2); + /* u.deauth.reason_code == u.disassoc.reason_code */ + mgmt->u.deauth.reason_code = cpu_to_le16(reason); + + if (stype == IEEE80211_STYPE_DEAUTH) + if (cookie) + __cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); + else + cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); + else + if (cookie) + __cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len); + else + cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len); + if (!(ifmgd->flags & IEEE80211_STA_MFP_ENABLED)) + IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; + ieee80211_tx_skb(sdata, skb); +} + +void ieee80211_send_pspoll(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_pspoll *pspoll; + struct sk_buff *skb; + + skb = ieee80211_pspoll_get(&local->hw, &sdata->vif); + if (!skb) + return; + + pspoll = (struct ieee80211_pspoll *) skb->data; + pspoll->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); + + IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; + ieee80211_tx_skb(sdata, skb); +} + +void ieee80211_send_nullfunc(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + int powersave) +{ + struct sk_buff *skb; + struct ieee80211_hdr_3addr *nullfunc; + + skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif); + if (!skb) + return; + + nullfunc = (struct ieee80211_hdr_3addr *) skb->data; + if (powersave) + nullfunc->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); + + IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; + ieee80211_tx_skb(sdata, skb); +} + +static void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) +{ + struct sk_buff *skb; + struct ieee80211_hdr *nullfunc; + __le16 fc; + + if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) + return; + + skb = dev_alloc_skb(local->hw.extra_tx_headroom + 30); + if (!skb) { + printk(KERN_DEBUG "%s: failed to allocate buffer for 4addr " + "nullfunc frame\n", sdata->name); + return; + } + skb_reserve(skb, local->hw.extra_tx_headroom); + + nullfunc = (struct ieee80211_hdr *) skb_put(skb, 30); + memset(nullfunc, 0, 30); + fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC | + IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); + nullfunc->frame_control = fc; + memcpy(nullfunc->addr1, sdata->u.mgd.bssid, ETH_ALEN); + memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN); + memcpy(nullfunc->addr3, sdata->u.mgd.bssid, ETH_ALEN); + memcpy(nullfunc->addr4, sdata->vif.addr, ETH_ALEN); + + IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; + ieee80211_tx_skb(sdata, skb); +} + +/* spectrum management related things */ +static void ieee80211_chswitch_work(struct work_struct *work) +{ + struct ieee80211_sub_if_data *sdata = + container_of(work, struct ieee80211_sub_if_data, u.mgd.chswitch_work); + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + + if (!ieee80211_sdata_running(sdata)) + return; + + mutex_lock(&ifmgd->mtx); + if (!ifmgd->associated) + goto out; + + sdata->local->oper_channel = sdata->local->csa_channel; + ieee80211_hw_config(sdata->local, IEEE80211_CONF_CHANGE_CHANNEL); + + /* XXX: shouldn't really modify cfg80211-owned data! */ + ifmgd->associated->channel = sdata->local->oper_channel; + + ieee80211_wake_queues_by_reason(&sdata->local->hw, + IEEE80211_QUEUE_STOP_REASON_CSA); + out: + ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED; + mutex_unlock(&ifmgd->mtx); +} + +static void ieee80211_chswitch_timer(unsigned long data) +{ + struct ieee80211_sub_if_data *sdata = + (struct ieee80211_sub_if_data *) data; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + + if (sdata->local->quiescing) { + set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running); + return; + } + + ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work); +} + +void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, + struct ieee80211_channel_sw_ie *sw_elem, + struct ieee80211_bss *bss) +{ + struct cfg80211_bss *cbss = + container_of((void *)bss, struct cfg80211_bss, priv); + struct ieee80211_channel *new_ch; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num); + + ASSERT_MGD_MTX(ifmgd); + + if (!ifmgd->associated) + return; + + if (sdata->local->scanning) + return; + + /* Disregard subsequent beacons if we are already running a timer + processing a CSA */ + + if (ifmgd->flags & IEEE80211_STA_CSA_RECEIVED) + return; + + new_ch = ieee80211_get_channel(sdata->local->hw.wiphy, new_freq); + if (!new_ch || new_ch->flags & IEEE80211_CHAN_DISABLED) + return; + + sdata->local->csa_channel = new_ch; + + if (sw_elem->count <= 1) { + ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work); + } else { + ieee80211_stop_queues_by_reason(&sdata->local->hw, + IEEE80211_QUEUE_STOP_REASON_CSA); + ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED; + mod_timer(&ifmgd->chswitch_timer, + jiffies + + msecs_to_jiffies(sw_elem->count * + cbss->beacon_interval)); + } +} + +static void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata, + u16 capab_info, u8 *pwr_constr_elem, + u8 pwr_constr_elem_len) +{ + struct ieee80211_conf *conf = &sdata->local->hw.conf; + + if (!(capab_info & WLAN_CAPABILITY_SPECTRUM_MGMT)) + return; + + /* Power constraint IE length should be 1 octet */ + if (pwr_constr_elem_len != 1) + return; + + if ((*pwr_constr_elem <= conf->channel->max_power) && + (*pwr_constr_elem != sdata->local->power_constr_level)) { + sdata->local->power_constr_level = *pwr_constr_elem; + ieee80211_hw_config(sdata->local, 0); + } +} + +/* powersave */ +static void ieee80211_enable_ps(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_conf *conf = &local->hw.conf; + + /* + * If we are scanning right now then the parameters will + * take effect when scan finishes. + */ + if (local->scanning) + return; + + if (conf->dynamic_ps_timeout > 0 && + !(local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)) { + mod_timer(&local->dynamic_ps_timer, jiffies + + msecs_to_jiffies(conf->dynamic_ps_timeout)); + } else { + if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) + ieee80211_send_nullfunc(local, sdata, 1); + + if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) { + conf->flags |= IEEE80211_CONF_PS; + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); + } + } +} + +static void ieee80211_change_ps(struct ieee80211_local *local) +{ + struct ieee80211_conf *conf = &local->hw.conf; + + if (local->ps_sdata) { + ieee80211_enable_ps(local, local->ps_sdata); + } else if (conf->flags & IEEE80211_CONF_PS) { + conf->flags &= ~IEEE80211_CONF_PS; + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); + del_timer_sync(&local->dynamic_ps_timer); + cancel_work_sync(&local->dynamic_ps_enable_work); + } +} + +/* need to hold RTNL or interface lock */ +void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency) +{ + struct ieee80211_sub_if_data *sdata, *found = NULL; + int count = 0; + + if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS)) { + local->ps_sdata = NULL; + return; + } + + if (!list_empty(&local->work_list)) { + local->ps_sdata = NULL; + goto change; + } + + list_for_each_entry(sdata, &local->interfaces, list) { + if (!ieee80211_sdata_running(sdata)) + continue; + if (sdata->vif.type != NL80211_IFTYPE_STATION) + continue; + found = sdata; + count++; + } + + if (count == 1 && found->u.mgd.powersave && + found->u.mgd.associated && + found->u.mgd.associated->beacon_ies && + !(found->u.mgd.flags & (IEEE80211_STA_BEACON_POLL | + IEEE80211_STA_CONNECTION_POLL))) { + s32 beaconint_us; + + if (latency < 0) + latency = pm_qos_requirement(PM_QOS_NETWORK_LATENCY); + + beaconint_us = ieee80211_tu_to_usec( + found->vif.bss_conf.beacon_int); + + if (beaconint_us > latency) { + local->ps_sdata = NULL; + } else { + struct ieee80211_bss *bss; + int maxslp = 1; + u8 dtimper; + + bss = (void *)found->u.mgd.associated->priv; + dtimper = bss->dtim_period; + + /* If the TIM IE is invalid, pretend the value is 1 */ + if (!dtimper) + dtimper = 1; + else if (dtimper > 1) + maxslp = min_t(int, dtimper, + latency / beaconint_us); + + local->hw.conf.max_sleep_period = maxslp; + local->hw.conf.ps_dtim_period = dtimper; + local->ps_sdata = found; + } + } else { + local->ps_sdata = NULL; + } + + change: + ieee80211_change_ps(local); +} + +void ieee80211_dynamic_ps_disable_work(struct work_struct *work) +{ + struct ieee80211_local *local = + container_of(work, struct ieee80211_local, + dynamic_ps_disable_work); + + if (local->hw.conf.flags & IEEE80211_CONF_PS) { + local->hw.conf.flags &= ~IEEE80211_CONF_PS; + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); + } + + ieee80211_wake_queues_by_reason(&local->hw, + IEEE80211_QUEUE_STOP_REASON_PS); +} + +void ieee80211_dynamic_ps_enable_work(struct work_struct *work) +{ + struct ieee80211_local *local = + container_of(work, struct ieee80211_local, + dynamic_ps_enable_work); + struct ieee80211_sub_if_data *sdata = local->ps_sdata; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + + /* can only happen when PS was just disabled anyway */ + if (!sdata) + return; + + if (local->hw.conf.flags & IEEE80211_CONF_PS) + return; + + if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) && + (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED))) + ieee80211_send_nullfunc(local, sdata, 1); + + if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) || + (ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) { + ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED; + local->hw.conf.flags |= IEEE80211_CONF_PS; + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); + } +} + +void ieee80211_dynamic_ps_timer(unsigned long data) +{ + struct ieee80211_local *local = (void *) data; + + if (local->quiescing || local->suspended) + return; + + ieee80211_queue_work(&local->hw, &local->dynamic_ps_enable_work); +} + +/* MLME */ +static void ieee80211_sta_wmm_params(struct ieee80211_local *local, + struct ieee80211_if_managed *ifmgd, + u8 *wmm_param, size_t wmm_param_len) +{ + struct ieee80211_tx_queue_params params; + size_t left; + int count; + u8 *pos, uapsd_queues = 0; + + if (local->hw.queues < 4) + return; + + if (!wmm_param) + return; + + if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1) + return; + + if (ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED) + uapsd_queues = local->uapsd_queues; + + count = wmm_param[6] & 0x0f; + if (count == ifmgd->wmm_last_param_set) + return; + ifmgd->wmm_last_param_set = count; + + pos = wmm_param + 8; + left = wmm_param_len - 8; + + memset(¶ms, 0, sizeof(params)); + + local->wmm_acm = 0; + for (; left >= 4; left -= 4, pos += 4) { + int aci = (pos[0] >> 5) & 0x03; + int acm = (pos[0] >> 4) & 0x01; + bool uapsd = false; + int queue; + + switch (aci) { + case 1: /* AC_BK */ + queue = 3; + if (acm) + local->wmm_acm |= BIT(1) | BIT(2); /* BK/- */ + if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) + uapsd = true; + break; + case 2: /* AC_VI */ + queue = 1; + if (acm) + local->wmm_acm |= BIT(4) | BIT(5); /* CL/VI */ + if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) + uapsd = true; + break; + case 3: /* AC_VO */ + queue = 0; + if (acm) + local->wmm_acm |= BIT(6) | BIT(7); /* VO/NC */ + if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) + uapsd = true; + break; + case 0: /* AC_BE */ + default: + queue = 2; + if (acm) + local->wmm_acm |= BIT(0) | BIT(3); /* BE/EE */ + if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) + uapsd = true; + break; + } + + params.aifs = pos[0] & 0x0f; + params.cw_max = ecw2cw((pos[1] & 0xf0) >> 4); + params.cw_min = ecw2cw(pos[1] & 0x0f); + params.txop = get_unaligned_le16(pos + 2); + params.uapsd = uapsd; + +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d " + "cWmin=%d cWmax=%d txop=%d uapsd=%d\n", + wiphy_name(local->hw.wiphy), queue, aci, acm, + params.aifs, params.cw_min, params.cw_max, params.txop, + params.uapsd); +#endif + if (drv_conf_tx(local, queue, ¶ms) && local->ops->conf_tx) + printk(KERN_DEBUG "%s: failed to set TX queue " + "parameters for queue %d\n", + wiphy_name(local->hw.wiphy), queue); + } +} + +static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, + u16 capab, bool erp_valid, u8 erp) +{ + struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; + u32 changed = 0; + bool use_protection; + bool use_short_preamble; + bool use_short_slot; + + if (erp_valid) { + use_protection = (erp & WLAN_ERP_USE_PROTECTION) != 0; + use_short_preamble = (erp & WLAN_ERP_BARKER_PREAMBLE) == 0; + } else { + use_protection = false; + use_short_preamble = !!(capab & WLAN_CAPABILITY_SHORT_PREAMBLE); + } + + use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME); + if (sdata->local->hw.conf.channel->band == IEEE80211_BAND_5GHZ) + use_short_slot = true; + + if (use_protection != bss_conf->use_cts_prot) { + bss_conf->use_cts_prot = use_protection; + changed |= BSS_CHANGED_ERP_CTS_PROT; + } + + if (use_short_preamble != bss_conf->use_short_preamble) { + bss_conf->use_short_preamble = use_short_preamble; + changed |= BSS_CHANGED_ERP_PREAMBLE; + } + + if (use_short_slot != bss_conf->use_short_slot) { + bss_conf->use_short_slot = use_short_slot; + changed |= BSS_CHANGED_ERP_SLOT; + } + + return changed; +} + +static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, + struct cfg80211_bss *cbss, + u32 bss_info_changed) +{ + struct ieee80211_bss *bss = (void *)cbss->priv; + struct ieee80211_local *local = sdata->local; + + bss_info_changed |= BSS_CHANGED_ASSOC; + /* set timing information */ + sdata->vif.bss_conf.beacon_int = cbss->beacon_interval; + sdata->vif.bss_conf.timestamp = cbss->tsf; + + bss_info_changed |= BSS_CHANGED_BEACON_INT; + bss_info_changed |= ieee80211_handle_bss_capability(sdata, + cbss->capability, bss->has_erp_value, bss->erp_value); + + sdata->u.mgd.associated = cbss; + memcpy(sdata->u.mgd.bssid, cbss->bssid, ETH_ALEN); + + /* just to be sure */ + sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL | + IEEE80211_STA_BEACON_POLL); + + /* + * Always handle WMM once after association regardless + * of the first value the AP uses. Setting -1 here has + * that effect because the AP values is an unsigned + * 4-bit value. + */ + sdata->u.mgd.wmm_last_param_set = -1; + + ieee80211_led_assoc(local, 1); + + sdata->vif.bss_conf.assoc = 1; + /* + * For now just always ask the driver to update the basic rateset + * when we have associated, we aren't checking whether it actually + * changed or not. + */ + bss_info_changed |= BSS_CHANGED_BASIC_RATES; + + /* And the BSSID changed - we're associated now */ + bss_info_changed |= BSS_CHANGED_BSSID; + + ieee80211_bss_info_change_notify(sdata, bss_info_changed); + + mutex_lock(&local->iflist_mtx); + ieee80211_recalc_ps(local, -1); + ieee80211_recalc_smps(local, sdata); + mutex_unlock(&local->iflist_mtx); + + netif_tx_start_all_queues(sdata->dev); + netif_carrier_on(sdata->dev); +} + +static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + u32 changed = 0, config_changed = 0; + u8 bssid[ETH_ALEN]; + + ASSERT_MGD_MTX(ifmgd); + + if (WARN_ON(!ifmgd->associated)) + return; + + memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN); + + ifmgd->associated = NULL; + memset(ifmgd->bssid, 0, ETH_ALEN); + + /* + * we need to commit the associated = NULL change because the + * scan code uses that to determine whether this iface should + * go to/wake up from powersave or not -- and could otherwise + * wake the queues erroneously. + */ + smp_mb(); + + /* + * Thus, we can only afterwards stop the queues -- to account + * for the case where another CPU is finishing a scan at this + * time -- we don't want the scan code to enable queues. + */ + + netif_tx_stop_all_queues(sdata->dev); + netif_carrier_off(sdata->dev); + + rcu_read_lock(); + sta = sta_info_get(sdata, bssid); + if (sta) { + set_sta_flags(sta, WLAN_STA_DISASSOC); + ieee80211_sta_tear_down_BA_sessions(sta); + } + rcu_read_unlock(); + + changed |= ieee80211_reset_erp_info(sdata); + + ieee80211_led_assoc(local, 0); + changed |= BSS_CHANGED_ASSOC; + sdata->vif.bss_conf.assoc = false; + + ieee80211_set_wmm_default(sdata); + + /* channel(_type) changes are handled by ieee80211_hw_config */ + local->oper_channel_type = NL80211_CHAN_NO_HT; + + /* on the next assoc, re-program HT parameters */ + sdata->ht_opmode_valid = false; + + local->power_constr_level = 0; + + del_timer_sync(&local->dynamic_ps_timer); + cancel_work_sync(&local->dynamic_ps_enable_work); + + if (local->hw.conf.flags & IEEE80211_CONF_PS) { + local->hw.conf.flags &= ~IEEE80211_CONF_PS; + config_changed |= IEEE80211_CONF_CHANGE_PS; + } + + ieee80211_hw_config(local, config_changed); + + /* And the BSSID changed -- not very interesting here */ + changed |= BSS_CHANGED_BSSID; + ieee80211_bss_info_change_notify(sdata, changed); + + sta_info_destroy_addr(sdata, bssid); +} + +void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, + struct ieee80211_hdr *hdr) +{ + /* + * We can postpone the mgd.timer whenever receiving unicast frames + * from AP because we know that the connection is working both ways + * at that time. But multicast frames (and hence also beacons) must + * be ignored here, because we need to trigger the timer during + * data idle periods for sending the periodic probe request to the + * AP we're connected to. + */ + if (is_multicast_ether_addr(hdr->addr1)) + return; + + mod_timer(&sdata->u.mgd.conn_mon_timer, + round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME)); +} + +static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + const u8 *ssid; + + ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID); + ieee80211_send_probe_req(sdata, ifmgd->associated->bssid, + ssid + 2, ssid[1], NULL, 0); + + ifmgd->probe_send_count++; + ifmgd->probe_timeout = jiffies + IEEE80211_PROBE_WAIT; + run_again(ifmgd, ifmgd->probe_timeout); +} + +static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata, + bool beacon) +{ + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + bool already = false; + + if (!ieee80211_sdata_running(sdata)) + return; + + if (sdata->local->scanning) + return; + + if (sdata->local->tmp_channel) + return; + + mutex_lock(&ifmgd->mtx); + + if (!ifmgd->associated) + goto out; + +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + if (beacon && net_ratelimit()) + printk(KERN_DEBUG "%s: detected beacon loss from AP " + "- sending probe request\n", sdata->name); +#endif + + /* + * The driver/our work has already reported this event or the + * connection monitoring has kicked in and we have already sent + * a probe request. Or maybe the AP died and the driver keeps + * reporting until we disassociate... + * + * In either case we have to ignore the current call to this + * function (except for setting the correct probe reason bit) + * because otherwise we would reset the timer every time and + * never check whether we received a probe response! + */ + if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL | + IEEE80211_STA_CONNECTION_POLL)) + already = true; + + if (beacon) + ifmgd->flags |= IEEE80211_STA_BEACON_POLL; + else + ifmgd->flags |= IEEE80211_STA_CONNECTION_POLL; + + if (already) + goto out; + + mutex_lock(&sdata->local->iflist_mtx); + ieee80211_recalc_ps(sdata->local, -1); + mutex_unlock(&sdata->local->iflist_mtx); + + ifmgd->probe_send_count = 0; + ieee80211_mgd_probe_ap_send(sdata); + out: + mutex_unlock(&ifmgd->mtx); +} + +void ieee80211_beacon_loss_work(struct work_struct *work) +{ + struct ieee80211_sub_if_data *sdata = + container_of(work, struct ieee80211_sub_if_data, + u.mgd.beacon_loss_work); + + ieee80211_mgd_probe_ap(sdata, true); +} + +void ieee80211_beacon_loss(struct ieee80211_vif *vif) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + + ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.beacon_loss_work); +} +EXPORT_SYMBOL(ieee80211_beacon_loss); + +static enum rx_mgmt_action __must_check +ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len) +{ + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + const u8 *bssid = NULL; + u16 reason_code; + + if (len < 24 + 2) + return RX_MGMT_NONE; + + ASSERT_MGD_MTX(ifmgd); + + bssid = ifmgd->associated->bssid; + + reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); + + printk(KERN_DEBUG "%s: deauthenticated from %pM (Reason: %u)\n", + sdata->name, bssid, reason_code); + + ieee80211_set_disassoc(sdata); + ieee80211_recalc_idle(sdata->local); + + return RX_MGMT_CFG80211_DEAUTH; +} + + +static enum rx_mgmt_action __must_check +ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len) +{ + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + u16 reason_code; + + if (len < 24 + 2) + return RX_MGMT_NONE; + + ASSERT_MGD_MTX(ifmgd); + + if (WARN_ON(!ifmgd->associated)) + return RX_MGMT_NONE; + + if (WARN_ON(memcmp(ifmgd->associated->bssid, mgmt->sa, ETH_ALEN))) + return RX_MGMT_NONE; + + reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); + + printk(KERN_DEBUG "%s: disassociated from %pM (Reason: %u)\n", + sdata->name, mgmt->sa, reason_code); + + ieee80211_set_disassoc(sdata); + ieee80211_recalc_idle(sdata->local); + return RX_MGMT_CFG80211_DISASSOC; +} + + +static bool ieee80211_assoc_success(struct ieee80211_work *wk, + struct ieee80211_mgmt *mgmt, size_t len) +{ + struct ieee80211_sub_if_data *sdata = wk->sdata; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct ieee80211_local *local = sdata->local; + struct ieee80211_supported_band *sband; + struct sta_info *sta; + struct cfg80211_bss *cbss = wk->assoc.bss; + u8 *pos; + u32 rates, basic_rates; + u16 capab_info, aid; + struct ieee802_11_elems elems; + struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; + u32 changed = 0; + int i, j, err; + bool have_higher_than_11mbit = false; + u16 ap_ht_cap_flags; + + /* AssocResp and ReassocResp have identical structure */ + + aid = le16_to_cpu(mgmt->u.assoc_resp.aid); + capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info); + + if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14))) + printk(KERN_DEBUG "%s: invalid aid value %d; bits 15:14 not " + "set\n", sdata->name, aid); + aid &= ~(BIT(15) | BIT(14)); + + pos = mgmt->u.assoc_resp.variable; + ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems); + + if (!elems.supp_rates) { + printk(KERN_DEBUG "%s: no SuppRates element in AssocResp\n", + sdata->name); + return false; + } + + ifmgd->aid = aid; + + sta = sta_info_alloc(sdata, cbss->bssid, GFP_KERNEL); + if (!sta) { + printk(KERN_DEBUG "%s: failed to alloc STA entry for" + " the AP\n", sdata->name); + return false; + } + + set_sta_flags(sta, WLAN_STA_AUTH | WLAN_STA_ASSOC | + WLAN_STA_ASSOC_AP); + if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT)) + set_sta_flags(sta, WLAN_STA_AUTHORIZED); + + rates = 0; + basic_rates = 0; + sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; + + for (i = 0; i < elems.supp_rates_len; i++) { + int rate = (elems.supp_rates[i] & 0x7f) * 5; + bool is_basic = !!(elems.supp_rates[i] & 0x80); + + if (rate > 110) + have_higher_than_11mbit = true; + + for (j = 0; j < sband->n_bitrates; j++) { + if (sband->bitrates[j].bitrate == rate) { + rates |= BIT(j); + if (is_basic) + basic_rates |= BIT(j); + break; + } + } + } + + for (i = 0; i < elems.ext_supp_rates_len; i++) { + int rate = (elems.ext_supp_rates[i] & 0x7f) * 5; + bool is_basic = !!(elems.ext_supp_rates[i] & 0x80); + + if (rate > 110) + have_higher_than_11mbit = true; + + for (j = 0; j < sband->n_bitrates; j++) { + if (sband->bitrates[j].bitrate == rate) { + rates |= BIT(j); + if (is_basic) + basic_rates |= BIT(j); + break; + } + } + } + + sta->sta.supp_rates[local->hw.conf.channel->band] = rates; + sdata->vif.bss_conf.basic_rates = basic_rates; + + /* cf. IEEE 802.11 9.2.12 */ + if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ && + have_higher_than_11mbit) + sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE; + else + sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE; + + if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) + ieee80211_ht_cap_ie_to_sta_ht_cap(sband, + elems.ht_cap_elem, &sta->sta.ht_cap); + + ap_ht_cap_flags = sta->sta.ht_cap.cap; + + rate_control_rate_init(sta); + + if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED) + set_sta_flags(sta, WLAN_STA_MFP); + + if (elems.wmm_param) + set_sta_flags(sta, WLAN_STA_WME); + + err = sta_info_insert(sta); + sta = NULL; + if (err) { + printk(KERN_DEBUG "%s: failed to insert STA entry for" + " the AP (error %d)\n", sdata->name, err); + return false; + } + + if (elems.wmm_param) + ieee80211_sta_wmm_params(local, ifmgd, elems.wmm_param, + elems.wmm_param_len); + else + ieee80211_set_wmm_default(sdata); + + local->oper_channel = wk->chan; + + if (elems.ht_info_elem && elems.wmm_param && + (sdata->local->hw.queues >= 4) && + !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) + changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem, + cbss->bssid, ap_ht_cap_flags); + + /* set AID and assoc capability, + * ieee80211_set_associated() will tell the driver */ + bss_conf->aid = aid; + bss_conf->assoc_capability = capab_info; + ieee80211_set_associated(sdata, cbss, changed); + + /* + * If we're using 4-addr mode, let the AP know that we're + * doing so, so that it can create the STA VLAN on its side + */ + if (ifmgd->use_4addr) + ieee80211_send_4addr_nullfunc(local, sdata); + + /* + * Start timer to probe the connection to the AP now. + * Also start the timer that will detect beacon loss. + */ + ieee80211_sta_rx_notify(sdata, (struct ieee80211_hdr *)mgmt); + mod_beacon_timer(sdata); + + return true; +} + + +static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, + size_t len, + struct ieee80211_rx_status *rx_status, + struct ieee802_11_elems *elems, + bool beacon) +{ + struct ieee80211_local *local = sdata->local; + int freq; + struct ieee80211_bss *bss; + struct ieee80211_channel *channel; + bool need_ps = false; + + if (sdata->u.mgd.associated) { + bss = (void *)sdata->u.mgd.associated->priv; + /* not previously set so we may need to recalc */ + need_ps = !bss->dtim_period; + } + + if (elems->ds_params && elems->ds_params_len == 1) + freq = ieee80211_channel_to_frequency(elems->ds_params[0]); + else + freq = rx_status->freq; + + channel = ieee80211_get_channel(local->hw.wiphy, freq); + + if (!channel || channel->flags & IEEE80211_CHAN_DISABLED) + return; + + bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, elems, + channel, beacon); + if (bss) + ieee80211_rx_bss_put(local, bss); + + if (!sdata->u.mgd.associated) + return; + + if (need_ps) { + mutex_lock(&local->iflist_mtx); + ieee80211_recalc_ps(local, -1); + mutex_unlock(&local->iflist_mtx); + } + + if (elems->ch_switch_elem && (elems->ch_switch_elem_len == 3) && + (memcmp(mgmt->bssid, sdata->u.mgd.associated->bssid, + ETH_ALEN) == 0)) { + struct ieee80211_channel_sw_ie *sw_elem = + (struct ieee80211_channel_sw_ie *)elems->ch_switch_elem; + ieee80211_sta_process_chanswitch(sdata, sw_elem, bss); + } +} + + +static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_mgmt *mgmt = (void *)skb->data; + struct ieee80211_if_managed *ifmgd; + struct ieee80211_rx_status *rx_status = (void *) skb->cb; + size_t baselen, len = skb->len; + struct ieee802_11_elems elems; + + ifmgd = &sdata->u.mgd; + + ASSERT_MGD_MTX(ifmgd); + + if (memcmp(mgmt->da, sdata->vif.addr, ETH_ALEN)) + return; /* ignore ProbeResp to foreign address */ + + baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; + if (baselen > len) + return; + + ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen, + &elems); + + ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false); + + if (ifmgd->associated && + memcmp(mgmt->bssid, ifmgd->associated->bssid, ETH_ALEN) == 0 && + ifmgd->flags & (IEEE80211_STA_BEACON_POLL | + IEEE80211_STA_CONNECTION_POLL)) { + ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL | + IEEE80211_STA_BEACON_POLL); + mutex_lock(&sdata->local->iflist_mtx); + ieee80211_recalc_ps(sdata->local, -1); + mutex_unlock(&sdata->local->iflist_mtx); + /* + * We've received a probe response, but are not sure whether + * we have or will be receiving any beacons or data, so let's + * schedule the timers again, just in case. + */ + mod_beacon_timer(sdata); + mod_timer(&ifmgd->conn_mon_timer, + round_jiffies_up(jiffies + + IEEE80211_CONNECTION_IDLE_TIME)); + } +} + +/* + * This is the canonical list of information elements we care about, + * the filter code also gives us all changes to the Microsoft OUI + * (00:50:F2) vendor IE which is used for WMM which we need to track. + * + * We implement beacon filtering in software since that means we can + * avoid processing the frame here and in cfg80211, and userspace + * will not be able to tell whether the hardware supports it or not. + * + * XXX: This list needs to be dynamic -- userspace needs to be able to + * add items it requires. It also needs to be able to tell us to + * look out for other vendor IEs. + */ +static const u64 care_about_ies = + (1ULL << WLAN_EID_COUNTRY) | + (1ULL << WLAN_EID_ERP_INFO) | + (1ULL << WLAN_EID_CHANNEL_SWITCH) | + (1ULL << WLAN_EID_PWR_CONSTRAINT) | + (1ULL << WLAN_EID_HT_CAPABILITY) | + (1ULL << WLAN_EID_HT_INFORMATION); + +static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, + size_t len, + struct ieee80211_rx_status *rx_status) +{ + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + size_t baselen; + struct ieee802_11_elems elems; + struct ieee80211_local *local = sdata->local; + u32 changed = 0; + bool erp_valid, directed_tim = false; + u8 erp_value = 0; + u32 ncrc; + u8 *bssid; + + ASSERT_MGD_MTX(ifmgd); + + /* Process beacon from the current BSS */ + baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt; + if (baselen > len) + return; + + if (rx_status->freq != local->hw.conf.channel->center_freq) + return; + + /* + * We might have received a number of frames, among them a + * disassoc frame and a beacon... + */ + if (!ifmgd->associated) + return; + + bssid = ifmgd->associated->bssid; + + /* + * And in theory even frames from a different AP we were just + * associated to a split-second ago! + */ + if (memcmp(bssid, mgmt->bssid, ETH_ALEN) != 0) + return; + + if (ifmgd->flags & IEEE80211_STA_BEACON_POLL) { +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + if (net_ratelimit()) { + printk(KERN_DEBUG "%s: cancelling probereq poll due " + "to a received beacon\n", sdata->name); + } +#endif + ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL; + mutex_lock(&local->iflist_mtx); + ieee80211_recalc_ps(local, -1); + mutex_unlock(&local->iflist_mtx); + } + + /* + * Push the beacon loss detection into the future since + * we are processing a beacon from the AP just now. + */ + mod_beacon_timer(sdata); + + ncrc = crc32_be(0, (void *)&mgmt->u.beacon.beacon_int, 4); + ncrc = ieee802_11_parse_elems_crc(mgmt->u.beacon.variable, + len - baselen, &elems, + care_about_ies, ncrc); + + if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) + directed_tim = ieee80211_check_tim(elems.tim, elems.tim_len, + ifmgd->aid); + + if (ncrc != ifmgd->beacon_crc) { + ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, + true); + + ieee80211_sta_wmm_params(local, ifmgd, elems.wmm_param, + elems.wmm_param_len); + } + + if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) { + if (directed_tim) { + if (local->hw.conf.dynamic_ps_timeout > 0) { + local->hw.conf.flags &= ~IEEE80211_CONF_PS; + ieee80211_hw_config(local, + IEEE80211_CONF_CHANGE_PS); + ieee80211_send_nullfunc(local, sdata, 0); + } else { + local->pspolling = true; + + /* + * Here is assumed that the driver will be + * able to send ps-poll frame and receive a + * response even though power save mode is + * enabled, but some drivers might require + * to disable power save here. This needs + * to be investigated. + */ + ieee80211_send_pspoll(local, sdata); + } + } + } + + if (ncrc == ifmgd->beacon_crc) + return; + ifmgd->beacon_crc = ncrc; + + if (elems.erp_info && elems.erp_info_len >= 1) { + erp_valid = true; + erp_value = elems.erp_info[0]; + } else { + erp_valid = false; + } + changed |= ieee80211_handle_bss_capability(sdata, + le16_to_cpu(mgmt->u.beacon.capab_info), + erp_valid, erp_value); + + + if (elems.ht_cap_elem && elems.ht_info_elem && elems.wmm_param && + !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) { + struct sta_info *sta; + struct ieee80211_supported_band *sband; + u16 ap_ht_cap_flags; + + rcu_read_lock(); + + sta = sta_info_get(sdata, bssid); + if (WARN_ON(!sta)) { + rcu_read_unlock(); + return; + } + + sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; + + ieee80211_ht_cap_ie_to_sta_ht_cap(sband, + elems.ht_cap_elem, &sta->sta.ht_cap); + + ap_ht_cap_flags = sta->sta.ht_cap.cap; + + rcu_read_unlock(); + + changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem, + bssid, ap_ht_cap_flags); + } + + /* Note: country IE parsing is done for us by cfg80211 */ + if (elems.country_elem) { + /* TODO: IBSS also needs this */ + if (elems.pwr_constr_elem) + ieee80211_handle_pwr_constr(sdata, + le16_to_cpu(mgmt->u.probe_resp.capab_info), + elems.pwr_constr_elem, + elems.pwr_constr_elem_len); + } + + ieee80211_bss_info_change_notify(sdata, changed); +} + +ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_mgmt *mgmt; + u16 fc; + + if (skb->len < 24) + return RX_DROP_MONITOR; + + mgmt = (struct ieee80211_mgmt *) skb->data; + fc = le16_to_cpu(mgmt->frame_control); + + switch (fc & IEEE80211_FCTL_STYPE) { + case IEEE80211_STYPE_PROBE_RESP: + case IEEE80211_STYPE_BEACON: + case IEEE80211_STYPE_DEAUTH: + case IEEE80211_STYPE_DISASSOC: + case IEEE80211_STYPE_ACTION: + skb_queue_tail(&sdata->u.mgd.skb_queue, skb); + ieee80211_queue_work(&local->hw, &sdata->u.mgd.work); + return RX_QUEUED; + } + + return RX_DROP_MONITOR; +} + +static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct ieee80211_rx_status *rx_status; + struct ieee80211_mgmt *mgmt; + enum rx_mgmt_action rma = RX_MGMT_NONE; + u16 fc; + + rx_status = (struct ieee80211_rx_status *) skb->cb; + mgmt = (struct ieee80211_mgmt *) skb->data; + fc = le16_to_cpu(mgmt->frame_control); + + mutex_lock(&ifmgd->mtx); + + if (ifmgd->associated && + memcmp(ifmgd->associated->bssid, mgmt->bssid, ETH_ALEN) == 0) { + switch (fc & IEEE80211_FCTL_STYPE) { + case IEEE80211_STYPE_BEACON: + ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, + rx_status); + break; + case IEEE80211_STYPE_PROBE_RESP: + ieee80211_rx_mgmt_probe_resp(sdata, skb); + break; + case IEEE80211_STYPE_DEAUTH: + rma = ieee80211_rx_mgmt_deauth(sdata, mgmt, skb->len); + break; + case IEEE80211_STYPE_DISASSOC: + rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); + break; + case IEEE80211_STYPE_ACTION: + if (mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT) + break; + + ieee80211_sta_process_chanswitch(sdata, + &mgmt->u.action.u.chan_switch.sw_elem, + (void *)ifmgd->associated->priv); + break; + } + mutex_unlock(&ifmgd->mtx); + + switch (rma) { + case RX_MGMT_NONE: + /* no action */ + break; + case RX_MGMT_CFG80211_DEAUTH: + cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); + break; + case RX_MGMT_CFG80211_DISASSOC: + cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len); + break; + default: + WARN(1, "unexpected: %d", rma); + } + goto out; + } + + mutex_unlock(&ifmgd->mtx); + + if (skb->len >= 24 + 2 /* mgmt + deauth reason */ && + (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH) + cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); + + out: + kfree_skb(skb); +} + +static void ieee80211_sta_timer(unsigned long data) +{ + struct ieee80211_sub_if_data *sdata = + (struct ieee80211_sub_if_data *) data; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct ieee80211_local *local = sdata->local; + + if (local->quiescing) { + set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running); + return; + } + + ieee80211_queue_work(&local->hw, &ifmgd->work); +} + +static void ieee80211_sta_work(struct work_struct *work) +{ + struct ieee80211_sub_if_data *sdata = + container_of(work, struct ieee80211_sub_if_data, u.mgd.work); + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_managed *ifmgd; + struct sk_buff *skb; + + if (!ieee80211_sdata_running(sdata)) + return; + + if (local->scanning) + return; + + if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) + return; + + /* + * ieee80211_queue_work() should have picked up most cases, + * here we'll pick the the rest. + */ + if (WARN(local->suspended, "STA MLME work scheduled while " + "going to suspend\n")) + return; + + ifmgd = &sdata->u.mgd; + + /* first process frames to avoid timing out while a frame is pending */ + while ((skb = skb_dequeue(&ifmgd->skb_queue))) + ieee80211_sta_rx_queued_mgmt(sdata, skb); + + /* then process the rest of the work */ + mutex_lock(&ifmgd->mtx); + + if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL | + IEEE80211_STA_CONNECTION_POLL) && + ifmgd->associated) { + u8 bssid[ETH_ALEN]; + + memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN); + if (time_is_after_jiffies(ifmgd->probe_timeout)) + run_again(ifmgd, ifmgd->probe_timeout); + + else if (ifmgd->probe_send_count < IEEE80211_MAX_PROBE_TRIES) { +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + printk(KERN_DEBUG "No probe response from AP %pM" + " after %dms, try %d\n", bssid, + (1000 * IEEE80211_PROBE_WAIT)/HZ, + ifmgd->probe_send_count); +#endif + ieee80211_mgd_probe_ap_send(sdata); + } else { + /* + * We actually lost the connection ... or did we? + * Let's make sure! + */ + ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL | + IEEE80211_STA_BEACON_POLL); + printk(KERN_DEBUG "No probe response from AP %pM" + " after %dms, disconnecting.\n", + bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ); + ieee80211_set_disassoc(sdata); + ieee80211_recalc_idle(local); + mutex_unlock(&ifmgd->mtx); + /* + * must be outside lock due to cfg80211, + * but that's not a problem. + */ + ieee80211_send_deauth_disassoc(sdata, bssid, + IEEE80211_STYPE_DEAUTH, + WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, + NULL); + mutex_lock(&ifmgd->mtx); + } + } + + mutex_unlock(&ifmgd->mtx); +} + +static void ieee80211_sta_bcn_mon_timer(unsigned long data) +{ + struct ieee80211_sub_if_data *sdata = + (struct ieee80211_sub_if_data *) data; + struct ieee80211_local *local = sdata->local; + + if (local->quiescing) + return; + + ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.beacon_loss_work); +} + +static void ieee80211_sta_conn_mon_timer(unsigned long data) +{ + struct ieee80211_sub_if_data *sdata = + (struct ieee80211_sub_if_data *) data; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct ieee80211_local *local = sdata->local; + + if (local->quiescing) + return; + + ieee80211_queue_work(&local->hw, &ifmgd->monitor_work); +} + +static void ieee80211_sta_monitor_work(struct work_struct *work) +{ + struct ieee80211_sub_if_data *sdata = + container_of(work, struct ieee80211_sub_if_data, + u.mgd.monitor_work); + + ieee80211_mgd_probe_ap(sdata, false); +} + +static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata) +{ + if (sdata->vif.type == NL80211_IFTYPE_STATION) { + sdata->u.mgd.flags &= ~(IEEE80211_STA_BEACON_POLL | + IEEE80211_STA_CONNECTION_POLL); + + /* let's probe the connection once */ + ieee80211_queue_work(&sdata->local->hw, + &sdata->u.mgd.monitor_work); + /* and do all the other regular work too */ + ieee80211_queue_work(&sdata->local->hw, + &sdata->u.mgd.work); + } +} + +#ifdef CONFIG_PM +void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + + /* + * we need to use atomic bitops for the running bits + * only because both timers might fire at the same + * time -- the code here is properly synchronised. + */ + + cancel_work_sync(&ifmgd->work); + cancel_work_sync(&ifmgd->beacon_loss_work); + if (del_timer_sync(&ifmgd->timer)) + set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running); + + cancel_work_sync(&ifmgd->chswitch_work); + if (del_timer_sync(&ifmgd->chswitch_timer)) + set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running); + + cancel_work_sync(&ifmgd->monitor_work); + /* these will just be re-established on connection */ + del_timer_sync(&ifmgd->conn_mon_timer); + del_timer_sync(&ifmgd->bcn_mon_timer); +} + +void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + + if (test_and_clear_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running)) + add_timer(&ifmgd->timer); + if (test_and_clear_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running)) + add_timer(&ifmgd->chswitch_timer); +} +#endif + +/* interface setup */ +void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_managed *ifmgd; + + ifmgd = &sdata->u.mgd; + INIT_WORK(&ifmgd->work, ieee80211_sta_work); + INIT_WORK(&ifmgd->monitor_work, ieee80211_sta_monitor_work); + INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work); + INIT_WORK(&ifmgd->beacon_loss_work, ieee80211_beacon_loss_work); + setup_timer(&ifmgd->timer, ieee80211_sta_timer, + (unsigned long) sdata); + setup_timer(&ifmgd->bcn_mon_timer, ieee80211_sta_bcn_mon_timer, + (unsigned long) sdata); + setup_timer(&ifmgd->conn_mon_timer, ieee80211_sta_conn_mon_timer, + (unsigned long) sdata); + setup_timer(&ifmgd->chswitch_timer, ieee80211_chswitch_timer, + (unsigned long) sdata); + skb_queue_head_init(&ifmgd->skb_queue); + + ifmgd->flags = 0; + + mutex_init(&ifmgd->mtx); + + if (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS) + ifmgd->req_smps = IEEE80211_SMPS_AUTOMATIC; + else + ifmgd->req_smps = IEEE80211_SMPS_OFF; +} + +/* scan finished notification */ +void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local) +{ + struct ieee80211_sub_if_data *sdata = local->scan_sdata; + + /* Restart STA timers */ + rcu_read_lock(); + list_for_each_entry_rcu(sdata, &local->interfaces, list) + ieee80211_restart_sta_timer(sdata); + rcu_read_unlock(); +} + +int ieee80211_max_network_latency(struct notifier_block *nb, + unsigned long data, void *dummy) +{ + s32 latency_usec = (s32) data; + struct ieee80211_local *local = + container_of(nb, struct ieee80211_local, + network_latency_notifier); + + mutex_lock(&local->iflist_mtx); + ieee80211_recalc_ps(local, latency_usec); + mutex_unlock(&local->iflist_mtx); + + return 0; +} + +/* config hooks */ +static enum work_done_result +ieee80211_probe_auth_done(struct ieee80211_work *wk, + struct sk_buff *skb) +{ + if (!skb) { + cfg80211_send_auth_timeout(wk->sdata->dev, wk->filter_ta); + return WORK_DONE_DESTROY; + } + + if (wk->type == IEEE80211_WORK_AUTH) { + cfg80211_send_rx_auth(wk->sdata->dev, skb->data, skb->len); + return WORK_DONE_DESTROY; + } + + mutex_lock(&wk->sdata->u.mgd.mtx); + ieee80211_rx_mgmt_probe_resp(wk->sdata, skb); + mutex_unlock(&wk->sdata->u.mgd.mtx); + + wk->type = IEEE80211_WORK_AUTH; + wk->probe_auth.tries = 0; + return WORK_DONE_REQUEUE; +} + +int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, + struct cfg80211_auth_request *req) +{ + const u8 *ssid; + struct ieee80211_work *wk; + u16 auth_alg; + + switch (req->auth_type) { + case NL80211_AUTHTYPE_OPEN_SYSTEM: + auth_alg = WLAN_AUTH_OPEN; + break; + case NL80211_AUTHTYPE_SHARED_KEY: + auth_alg = WLAN_AUTH_SHARED_KEY; + break; + case NL80211_AUTHTYPE_FT: + auth_alg = WLAN_AUTH_FT; + break; + case NL80211_AUTHTYPE_NETWORK_EAP: + auth_alg = WLAN_AUTH_LEAP; + break; + default: + return -EOPNOTSUPP; + } + + wk = kzalloc(sizeof(*wk) + req->ie_len, GFP_KERNEL); + if (!wk) + return -ENOMEM; + + memcpy(wk->filter_ta, req->bss->bssid, ETH_ALEN); + + if (req->ie && req->ie_len) { + memcpy(wk->ie, req->ie, req->ie_len); + wk->ie_len = req->ie_len; + } + + if (req->key && req->key_len) { + wk->probe_auth.key_len = req->key_len; + wk->probe_auth.key_idx = req->key_idx; + memcpy(wk->probe_auth.key, req->key, req->key_len); + } + + ssid = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID); + memcpy(wk->probe_auth.ssid, ssid + 2, ssid[1]); + wk->probe_auth.ssid_len = ssid[1]; + + wk->probe_auth.algorithm = auth_alg; + wk->probe_auth.privacy = req->bss->capability & WLAN_CAPABILITY_PRIVACY; + + /* if we already have a probe, don't probe again */ + if (req->bss->proberesp_ies) + wk->type = IEEE80211_WORK_AUTH; + else + wk->type = IEEE80211_WORK_DIRECT_PROBE; + wk->chan = req->bss->channel; + wk->sdata = sdata; + wk->done = ieee80211_probe_auth_done; + + ieee80211_add_work(wk); + return 0; +} + +static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk, + struct sk_buff *skb) +{ + struct ieee80211_mgmt *mgmt; + u16 status; + + if (!skb) { + cfg80211_send_assoc_timeout(wk->sdata->dev, wk->filter_ta); + return WORK_DONE_DESTROY; + } + + mgmt = (void *)skb->data; + status = le16_to_cpu(mgmt->u.assoc_resp.status_code); + + if (status == WLAN_STATUS_SUCCESS) { + mutex_lock(&wk->sdata->u.mgd.mtx); + if (!ieee80211_assoc_success(wk, mgmt, skb->len)) { + mutex_unlock(&wk->sdata->u.mgd.mtx); + /* oops -- internal error -- send timeout for now */ + cfg80211_send_assoc_timeout(wk->sdata->dev, + wk->filter_ta); + return WORK_DONE_DESTROY; + } + mutex_unlock(&wk->sdata->u.mgd.mtx); + } + + cfg80211_send_rx_assoc(wk->sdata->dev, skb->data, skb->len); + return WORK_DONE_DESTROY; +} + +int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, + struct cfg80211_assoc_request *req) +{ + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct ieee80211_bss *bss = (void *)req->bss->priv; + struct ieee80211_work *wk; + const u8 *ssid; + int i; + + mutex_lock(&ifmgd->mtx); + if (ifmgd->associated) { + if (!req->prev_bssid || + memcmp(req->prev_bssid, ifmgd->associated->bssid, + ETH_ALEN)) { + /* + * We are already associated and the request was not a + * reassociation request from the current BSS, so + * reject it. + */ + mutex_unlock(&ifmgd->mtx); + return -EALREADY; + } + + /* Trying to reassociate - clear previous association state */ + ieee80211_set_disassoc(sdata); + } + mutex_unlock(&ifmgd->mtx); + + wk = kzalloc(sizeof(*wk) + req->ie_len, GFP_KERNEL); + if (!wk) + return -ENOMEM; + + ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N; + ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED; + + for (i = 0; i < req->crypto.n_ciphers_pairwise; i++) + if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 || + req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP || + req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) + ifmgd->flags |= IEEE80211_STA_DISABLE_11N; + + + if (req->ie && req->ie_len) { + memcpy(wk->ie, req->ie, req->ie_len); + wk->ie_len = req->ie_len; + } else + wk->ie_len = 0; + + wk->assoc.bss = req->bss; + + memcpy(wk->filter_ta, req->bss->bssid, ETH_ALEN); + + /* new association always uses requested smps mode */ + if (ifmgd->req_smps == IEEE80211_SMPS_AUTOMATIC) { + if (ifmgd->powersave) + ifmgd->ap_smps = IEEE80211_SMPS_DYNAMIC; + else + ifmgd->ap_smps = IEEE80211_SMPS_OFF; + } else + ifmgd->ap_smps = ifmgd->req_smps; + + wk->assoc.smps = ifmgd->ap_smps; + /* + * IEEE802.11n does not allow TKIP/WEP as pairwise ciphers in HT mode. + * We still associate in non-HT mode (11a/b/g) if any one of these + * ciphers is configured as pairwise. + * We can set this to true for non-11n hardware, that'll be checked + * separately along with the peer capabilities. + */ + wk->assoc.use_11n = !(ifmgd->flags & IEEE80211_STA_DISABLE_11N); + wk->assoc.capability = req->bss->capability; + wk->assoc.wmm_used = bss->wmm_used; + wk->assoc.supp_rates = bss->supp_rates; + wk->assoc.supp_rates_len = bss->supp_rates_len; + wk->assoc.ht_information_ie = + ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_INFORMATION); + + if (bss->wmm_used && bss->uapsd_supported && + (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) { + wk->assoc.uapsd_used = true; + ifmgd->flags |= IEEE80211_STA_UAPSD_ENABLED; + } else { + wk->assoc.uapsd_used = false; + ifmgd->flags &= ~IEEE80211_STA_UAPSD_ENABLED; + } + + ssid = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID); + memcpy(wk->assoc.ssid, ssid + 2, ssid[1]); + wk->assoc.ssid_len = ssid[1]; + + if (req->prev_bssid) + memcpy(wk->assoc.prev_bssid, req->prev_bssid, ETH_ALEN); + + wk->type = IEEE80211_WORK_ASSOC; + wk->chan = req->bss->channel; + wk->sdata = sdata; + wk->done = ieee80211_assoc_done; + + if (req->use_mfp) { + ifmgd->mfp = IEEE80211_MFP_REQUIRED; + ifmgd->flags |= IEEE80211_STA_MFP_ENABLED; + } else { + ifmgd->mfp = IEEE80211_MFP_DISABLED; + ifmgd->flags &= ~IEEE80211_STA_MFP_ENABLED; + } + + if (req->crypto.control_port) + ifmgd->flags |= IEEE80211_STA_CONTROL_PORT; + else + ifmgd->flags &= ~IEEE80211_STA_CONTROL_PORT; + + ieee80211_add_work(wk); + return 0; +} + +int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, + struct cfg80211_deauth_request *req, + void *cookie) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct ieee80211_work *wk; + const u8 *bssid = req->bss->bssid; + + mutex_lock(&ifmgd->mtx); + + if (ifmgd->associated == req->bss) { + bssid = req->bss->bssid; + ieee80211_set_disassoc(sdata); + mutex_unlock(&ifmgd->mtx); + } else { + bool not_auth_yet = false; + + mutex_unlock(&ifmgd->mtx); + + mutex_lock(&local->work_mtx); + list_for_each_entry(wk, &local->work_list, list) { + if (wk->sdata != sdata) + continue; + + if (wk->type != IEEE80211_WORK_DIRECT_PROBE && + wk->type != IEEE80211_WORK_AUTH) + continue; + + if (memcmp(req->bss->bssid, wk->filter_ta, ETH_ALEN)) + continue; + + not_auth_yet = wk->type == IEEE80211_WORK_DIRECT_PROBE; + list_del_rcu(&wk->list); + free_work(wk); + break; + } + mutex_unlock(&local->work_mtx); + + /* + * If somebody requests authentication and we haven't + * sent out an auth frame yet there's no need to send + * out a deauth frame either. If the state was PROBE, + * then this is the case. If it's AUTH we have sent a + * frame, and if it's IDLE we have completed the auth + * process already. + */ + if (not_auth_yet) { + __cfg80211_auth_canceled(sdata->dev, bssid); + return 0; + } + } + + printk(KERN_DEBUG "%s: deauthenticating from %pM by local choice (reason=%d)\n", + sdata->name, bssid, req->reason_code); + + ieee80211_send_deauth_disassoc(sdata, bssid, + IEEE80211_STYPE_DEAUTH, req->reason_code, + cookie); + + ieee80211_recalc_idle(sdata->local); + + return 0; +} + +int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, + struct cfg80211_disassoc_request *req, + void *cookie) +{ + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + + mutex_lock(&ifmgd->mtx); + + /* + * cfg80211 should catch this ... but it's racy since + * we can receive a disassoc frame, process it, hand it + * to cfg80211 while that's in a locked section already + * trying to tell us that the user wants to disconnect. + */ + if (ifmgd->associated != req->bss) { + mutex_unlock(&ifmgd->mtx); + return -ENOLINK; + } + + printk(KERN_DEBUG "%s: disassociating from %pM by local choice (reason=%d)\n", + sdata->name, req->bss->bssid, req->reason_code); + + ieee80211_set_disassoc(sdata); + + mutex_unlock(&ifmgd->mtx); + + ieee80211_send_deauth_disassoc(sdata, req->bss->bssid, + IEEE80211_STYPE_DISASSOC, req->reason_code, + cookie); + + ieee80211_recalc_idle(sdata->local); + + return 0; +} + +int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata, + struct ieee80211_channel *chan, + enum nl80211_channel_type channel_type, + const u8 *buf, size_t len, u64 *cookie) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct sk_buff *skb; + + /* Check that we are on the requested channel for transmission */ + if ((chan != local->tmp_channel || + channel_type != local->tmp_channel_type) && + (chan != local->oper_channel || + channel_type != local->oper_channel_type)) + return -EBUSY; + + skb = dev_alloc_skb(local->hw.extra_tx_headroom + len); + if (!skb) + return -ENOMEM; + skb_reserve(skb, local->hw.extra_tx_headroom); + + memcpy(skb_put(skb, len), buf, len); + + if (!(ifmgd->flags & IEEE80211_STA_MFP_ENABLED)) + IEEE80211_SKB_CB(skb)->flags |= + IEEE80211_TX_INTFL_DONT_ENCRYPT; + IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_NL80211_FRAME_TX | + IEEE80211_TX_CTL_REQ_TX_STATUS; + skb->dev = sdata->dev; + ieee80211_tx_skb(sdata, skb); + + *cookie = (unsigned long) skb; + return 0; +} diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c new file mode 100644 index 0000000..c36b191 --- /dev/null +++ b/net/mac80211/offchannel.c @@ -0,0 +1,170 @@ +/* + * Off-channel operation helpers + * + * Copyright 2003, Jouni Malinen + * Copyright 2004, Instant802 Networks, Inc. + * Copyright 2005, Devicescape Software, Inc. + * Copyright 2006-2007 Jiri Benc + * Copyright 2007, Michael Wu + * Copyright 2009 Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include "ieee80211_i.h" + +/* + * inform AP that we will go to sleep so that it will buffer the frames + * while we scan + */ +static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + + local->offchannel_ps_enabled = false; + + /* FIXME: what to do when local->pspolling is true? */ + + del_timer_sync(&local->dynamic_ps_timer); + cancel_work_sync(&local->dynamic_ps_enable_work); + + if (local->hw.conf.flags & IEEE80211_CONF_PS) { + local->offchannel_ps_enabled = true; + local->hw.conf.flags &= ~IEEE80211_CONF_PS; + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); + } + + if (!(local->offchannel_ps_enabled) || + !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)) + /* + * If power save was enabled, no need to send a nullfunc + * frame because AP knows that we are sleeping. But if the + * hardware is creating the nullfunc frame for power save + * status (ie. IEEE80211_HW_PS_NULLFUNC_STACK is not + * enabled) and power save was enabled, the firmware just + * sent a null frame with power save disabled. So we need + * to send a new nullfunc frame to inform the AP that we + * are again sleeping. + */ + ieee80211_send_nullfunc(local, sdata, 1); +} + +/* inform AP that we are awake again, unless power save is enabled */ +static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + + if (!local->ps_sdata) + ieee80211_send_nullfunc(local, sdata, 0); + else if (local->offchannel_ps_enabled) { + /* + * In !IEEE80211_HW_PS_NULLFUNC_STACK case the hardware + * will send a nullfunc frame with the powersave bit set + * even though the AP already knows that we are sleeping. + * This could be avoided by sending a null frame with power + * save bit disabled before enabling the power save, but + * this doesn't gain anything. + * + * When IEEE80211_HW_PS_NULLFUNC_STACK is enabled, no need + * to send a nullfunc frame because AP already knows that + * we are sleeping, let's just enable power save mode in + * hardware. + */ + local->hw.conf.flags |= IEEE80211_CONF_PS; + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); + } else if (local->hw.conf.dynamic_ps_timeout > 0) { + /* + * If IEEE80211_CONF_PS was not set and the dynamic_ps_timer + * had been running before leaving the operating channel, + * restart the timer now and send a nullfunc frame to inform + * the AP that we are awake. + */ + ieee80211_send_nullfunc(local, sdata, 0); + mod_timer(&local->dynamic_ps_timer, jiffies + + msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); + } +} + +void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local) +{ + struct ieee80211_sub_if_data *sdata; + + mutex_lock(&local->iflist_mtx); + list_for_each_entry(sdata, &local->interfaces, list) { + if (!ieee80211_sdata_running(sdata)) + continue; + + /* disable beaconing */ + if (sdata->vif.type == NL80211_IFTYPE_AP || + sdata->vif.type == NL80211_IFTYPE_ADHOC || + sdata->vif.type == NL80211_IFTYPE_MESH_POINT) + ieee80211_bss_info_change_notify( + sdata, BSS_CHANGED_BEACON_ENABLED); + + /* + * only handle non-STA interfaces here, STA interfaces + * are handled in ieee80211_offchannel_stop_station(), + * e.g., from the background scan state machine. + * + * In addition, do not stop monitor interface to allow it to be + * used from user space controlled off-channel operations. + */ + if (sdata->vif.type != NL80211_IFTYPE_STATION && + sdata->vif.type != NL80211_IFTYPE_MONITOR) + netif_tx_stop_all_queues(sdata->dev); + } + mutex_unlock(&local->iflist_mtx); +} + +void ieee80211_offchannel_stop_station(struct ieee80211_local *local) +{ + struct ieee80211_sub_if_data *sdata; + + /* + * notify the AP about us leaving the channel and stop all STA interfaces + */ + mutex_lock(&local->iflist_mtx); + list_for_each_entry(sdata, &local->interfaces, list) { + if (!ieee80211_sdata_running(sdata)) + continue; + + if (sdata->vif.type == NL80211_IFTYPE_STATION) { + netif_tx_stop_all_queues(sdata->dev); + if (sdata->u.mgd.associated) + ieee80211_offchannel_ps_enable(sdata); + } + } + mutex_unlock(&local->iflist_mtx); +} + +void ieee80211_offchannel_return(struct ieee80211_local *local, + bool enable_beaconing) +{ + struct ieee80211_sub_if_data *sdata; + + mutex_lock(&local->iflist_mtx); + list_for_each_entry(sdata, &local->interfaces, list) { + if (!ieee80211_sdata_running(sdata)) + continue; + + /* Tell AP we're back */ + if (sdata->vif.type == NL80211_IFTYPE_STATION) { + if (sdata->u.mgd.associated) + ieee80211_offchannel_ps_disable(sdata); + } + + if (sdata->vif.type != NL80211_IFTYPE_MONITOR) + netif_tx_wake_all_queues(sdata->dev); + + /* re-enable beaconing */ + if (enable_beaconing && + (sdata->vif.type == NL80211_IFTYPE_AP || + sdata->vif.type == NL80211_IFTYPE_ADHOC || + sdata->vif.type == NL80211_IFTYPE_MESH_POINT)) + ieee80211_bss_info_change_notify( + sdata, BSS_CHANGED_BEACON_ENABLED); + } + mutex_unlock(&local->iflist_mtx); +} diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c new file mode 100644 index 0000000..0e64484 --- /dev/null +++ b/net/mac80211/pm.c @@ -0,0 +1,119 @@ +#include +#include + +#include "ieee80211_i.h" +#include "mesh.h" +#include "driver-ops.h" +#include "led.h" + +int __ieee80211_suspend(struct ieee80211_hw *hw) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_sub_if_data *sdata; + struct sta_info *sta; + + ieee80211_scan_cancel(local); + + ieee80211_stop_queues_by_reason(hw, + IEEE80211_QUEUE_STOP_REASON_SUSPEND); + + /* flush out all packets */ + synchronize_net(); + + local->quiescing = true; + /* make quiescing visible to timers everywhere */ + mb(); + + flush_workqueue(local->workqueue); + + /* Don't try to run timers while suspended. */ + del_timer_sync(&local->sta_cleanup); + + /* + * Note that this particular timer doesn't need to be + * restarted at resume. + */ + cancel_work_sync(&local->dynamic_ps_enable_work); + del_timer_sync(&local->dynamic_ps_timer); + + /* disable keys */ + list_for_each_entry(sdata, &local->interfaces, list) + ieee80211_disable_keys(sdata); + + /* Tear down aggregation sessions */ + + rcu_read_lock(); + + if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) { + list_for_each_entry_rcu(sta, &local->sta_list, list) { + set_sta_flags(sta, WLAN_STA_SUSPEND); + ieee80211_sta_tear_down_BA_sessions(sta); + } + } + + rcu_read_unlock(); + + /* remove STAs */ + mutex_lock(&local->sta_mtx); + list_for_each_entry(sta, &local->sta_list, list) { + if (sta->uploaded) { + sdata = sta->sdata; + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + sdata = container_of(sdata->bss, + struct ieee80211_sub_if_data, + u.ap); + + drv_sta_remove(local, sdata, &sta->sta); + } + + mesh_plink_quiesce(sta); + } + mutex_unlock(&local->sta_mtx); + + /* remove all interfaces */ + list_for_each_entry(sdata, &local->interfaces, list) { + switch(sdata->vif.type) { + case NL80211_IFTYPE_STATION: + ieee80211_sta_quiesce(sdata); + break; + case NL80211_IFTYPE_ADHOC: + ieee80211_ibss_quiesce(sdata); + break; + case NL80211_IFTYPE_MESH_POINT: + ieee80211_mesh_quiesce(sdata); + break; + case NL80211_IFTYPE_AP_VLAN: + case NL80211_IFTYPE_MONITOR: + /* don't tell driver about this */ + continue; + default: + break; + } + + if (!ieee80211_sdata_running(sdata)) + continue; + + /* disable beaconing */ + ieee80211_bss_info_change_notify(sdata, + BSS_CHANGED_BEACON_ENABLED); + + drv_remove_interface(local, &sdata->vif); + } + + /* stop hardware - this must stop RX */ + if (local->open_count) + ieee80211_stop_device(local); + + local->suspended = true; + /* need suspended to be visible before quiescing is false */ + barrier(); + local->quiescing = false; + + return 0; +} + +/* + * __ieee80211_resume() is a static inline which just calls + * ieee80211_reconfig(), which is also needed for hardware + * hang/firmware failure/etc. recovery. + */ diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c new file mode 100644 index 0000000..4df1085 --- /dev/null +++ b/net/mac80211/rate.c @@ -0,0 +1,399 @@ +/* + * Copyright 2002-2005, Instant802 Networks, Inc. + * Copyright 2005-2006, Devicescape Software, Inc. + * Copyright (c) 2006 Jiri Benc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include "rate.h" +#include "ieee80211_i.h" +#include "debugfs.h" + +struct rate_control_alg { + struct list_head list; + struct rate_control_ops *ops; +}; + +static LIST_HEAD(rate_ctrl_algs); +static DEFINE_MUTEX(rate_ctrl_mutex); + +static char *ieee80211_default_rc_algo = CONFIG_COMPAT_MAC80211_RC_DEFAULT; +module_param(ieee80211_default_rc_algo, charp, 0644); +MODULE_PARM_DESC(ieee80211_default_rc_algo, + "Default rate control algorithm for mac80211 to use"); + +int ieee80211_rate_control_register(struct rate_control_ops *ops) +{ + struct rate_control_alg *alg; + + if (!ops->name) + return -EINVAL; + + mutex_lock(&rate_ctrl_mutex); + list_for_each_entry(alg, &rate_ctrl_algs, list) { + if (!strcmp(alg->ops->name, ops->name)) { + /* don't register an algorithm twice */ + WARN_ON(1); + mutex_unlock(&rate_ctrl_mutex); + return -EALREADY; + } + } + + alg = kzalloc(sizeof(*alg), GFP_KERNEL); + if (alg == NULL) { + mutex_unlock(&rate_ctrl_mutex); + return -ENOMEM; + } + alg->ops = ops; + + list_add_tail(&alg->list, &rate_ctrl_algs); + mutex_unlock(&rate_ctrl_mutex); + + return 0; +} +EXPORT_SYMBOL(ieee80211_rate_control_register); + +void ieee80211_rate_control_unregister(struct rate_control_ops *ops) +{ + struct rate_control_alg *alg; + + mutex_lock(&rate_ctrl_mutex); + list_for_each_entry(alg, &rate_ctrl_algs, list) { + if (alg->ops == ops) { + list_del(&alg->list); + kfree(alg); + break; + } + } + mutex_unlock(&rate_ctrl_mutex); +} +EXPORT_SYMBOL(ieee80211_rate_control_unregister); + +static struct rate_control_ops * +ieee80211_try_rate_control_ops_get(const char *name) +{ + struct rate_control_alg *alg; + struct rate_control_ops *ops = NULL; + + if (!name) + return NULL; + + mutex_lock(&rate_ctrl_mutex); + list_for_each_entry(alg, &rate_ctrl_algs, list) { + if (!strcmp(alg->ops->name, name)) + if (try_module_get(alg->ops->module)) { + ops = alg->ops; + break; + } + } + mutex_unlock(&rate_ctrl_mutex); + return ops; +} + +/* Get the rate control algorithm. */ +static struct rate_control_ops * +ieee80211_rate_control_ops_get(const char *name) +{ + struct rate_control_ops *ops; + const char *alg_name; + + if (!name) + alg_name = ieee80211_default_rc_algo; + else + alg_name = name; + + ops = ieee80211_try_rate_control_ops_get(alg_name); + if (!ops) { + request_module("rc80211_%s", alg_name); + ops = ieee80211_try_rate_control_ops_get(alg_name); + } + if (!ops && name) + /* try default if specific alg requested but not found */ + ops = ieee80211_try_rate_control_ops_get(ieee80211_default_rc_algo); + + /* try built-in one if specific alg requested but not found */ + if (!ops && strlen(CONFIG_COMPAT_MAC80211_RC_DEFAULT)) + ops = ieee80211_try_rate_control_ops_get(CONFIG_COMPAT_MAC80211_RC_DEFAULT); + + return ops; +} + +static void ieee80211_rate_control_ops_put(struct rate_control_ops *ops) +{ + module_put(ops->module); +} + +#ifdef CONFIG_MAC80211_DEBUGFS +static ssize_t rcname_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct rate_control_ref *ref = file->private_data; + int len = strlen(ref->ops->name); + + return simple_read_from_buffer(userbuf, count, ppos, + ref->ops->name, len); +} + +static const struct file_operations rcname_ops = { + .read = rcname_read, + .open = mac80211_open_file_generic, +}; +#endif + +static struct rate_control_ref *rate_control_alloc(const char *name, + struct ieee80211_local *local) +{ + struct dentry *debugfsdir = NULL; + struct rate_control_ref *ref; + + ref = kmalloc(sizeof(struct rate_control_ref), GFP_KERNEL); + if (!ref) + goto fail_ref; + kref_init(&ref->kref); + ref->local = local; + ref->ops = ieee80211_rate_control_ops_get(name); + if (!ref->ops) + goto fail_ops; + +#ifdef CONFIG_MAC80211_DEBUGFS + debugfsdir = debugfs_create_dir("rc", local->hw.wiphy->debugfsdir); + local->debugfs.rcdir = debugfsdir; + debugfs_create_file("name", 0400, debugfsdir, ref, &rcname_ops); +#endif + + ref->priv = ref->ops->alloc(&local->hw, debugfsdir); + if (!ref->priv) + goto fail_priv; + return ref; + +fail_priv: + ieee80211_rate_control_ops_put(ref->ops); +fail_ops: + kfree(ref); +fail_ref: + return NULL; +} + +static void rate_control_release(struct kref *kref) +{ + struct rate_control_ref *ctrl_ref; + + ctrl_ref = container_of(kref, struct rate_control_ref, kref); + ctrl_ref->ops->free(ctrl_ref->priv); + +#ifdef CONFIG_MAC80211_DEBUGFS + debugfs_remove_recursive(ctrl_ref->local->debugfs.rcdir); + ctrl_ref->local->debugfs.rcdir = NULL; +#endif + + ieee80211_rate_control_ops_put(ctrl_ref->ops); + kfree(ctrl_ref); +} + +static bool rc_no_data_or_no_ack(struct ieee80211_tx_rate_control *txrc) +{ + struct sk_buff *skb = txrc->skb; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + __le16 fc; + + fc = hdr->frame_control; + + return ((info->flags & IEEE80211_TX_CTL_NO_ACK) || !ieee80211_is_data(fc)); +} + +static void rc_send_low_broadcast(s8 *idx, u32 basic_rates, u8 max_rate_idx) +{ + u8 i; + + if (basic_rates == 0) + return; /* assume basic rates unknown and accept rate */ + if (*idx < 0) + return; + if (basic_rates & (1 << *idx)) + return; /* selected rate is a basic rate */ + + for (i = *idx + 1; i <= max_rate_idx; i++) { + if (basic_rates & (1 << i)) { + *idx = i; + return; + } + } + + /* could not find a basic rate; use original selection */ +} + +bool rate_control_send_low(struct ieee80211_sta *sta, + void *priv_sta, + struct ieee80211_tx_rate_control *txrc) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb); + + if (!sta || !priv_sta || rc_no_data_or_no_ack(txrc)) { + info->control.rates[0].idx = rate_lowest_index(txrc->sband, sta); + info->control.rates[0].count = + (info->flags & IEEE80211_TX_CTL_NO_ACK) ? + 1 : txrc->hw->max_rate_tries; + if (!sta && txrc->ap) + rc_send_low_broadcast(&info->control.rates[0].idx, + txrc->bss_conf->basic_rates, + txrc->sband->n_bitrates); + return true; + } + return false; +} +EXPORT_SYMBOL(rate_control_send_low); + +static void rate_idx_match_mask(struct ieee80211_tx_rate *rate, + int n_bitrates, u32 mask) +{ + int j; + + /* See whether the selected rate or anything below it is allowed. */ + for (j = rate->idx; j >= 0; j--) { + if (mask & (1 << j)) { + /* Okay, found a suitable rate. Use it. */ + rate->idx = j; + return; + } + } + + /* Try to find a higher rate that would be allowed */ + for (j = rate->idx + 1; j < n_bitrates; j++) { + if (mask & (1 << j)) { + /* Okay, found a suitable rate. Use it. */ + rate->idx = j; + return; + } + } + + /* + * Uh.. No suitable rate exists. This should not really happen with + * sane TX rate mask configurations. However, should someone manage to + * configure supported rates and TX rate mask in incompatible way, + * allow the frame to be transmitted with whatever the rate control + * selected. + */ +} + +void rate_control_get_rate(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, + struct ieee80211_tx_rate_control *txrc) +{ + struct rate_control_ref *ref = sdata->local->rate_ctrl; + void *priv_sta = NULL; + struct ieee80211_sta *ista = NULL; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb); + int i; + u32 mask; + + if (sta) { + ista = &sta->sta; + priv_sta = sta->rate_ctrl_priv; + } + + for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { + info->control.rates[i].idx = -1; + info->control.rates[i].flags = 0; + info->control.rates[i].count = 1; + } + + if (sdata->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) + return; + + ref->ops->get_rate(ref->priv, ista, priv_sta, txrc); + + /* + * Try to enforce the rateidx mask the user wanted. skip this if the + * default mask (allow all rates) is used to save some processing for + * the common case. + */ + mask = sdata->rc_rateidx_mask[info->band]; + if (mask != (1 << txrc->sband->n_bitrates) - 1) { + if (sta) { + /* Filter out rates that the STA does not support */ + mask &= sta->sta.supp_rates[info->band]; + } + /* + * Make sure the rate index selected for each TX rate is + * included in the configured mask and change the rate indexes + * if needed. + */ + for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { + /* Rate masking supports only legacy rates for now */ + if (info->control.rates[i].flags & IEEE80211_TX_RC_MCS) + continue; + rate_idx_match_mask(&info->control.rates[i], + txrc->sband->n_bitrates, mask); + } + } + + BUG_ON(info->control.rates[0].idx < 0); +} + +struct rate_control_ref *rate_control_get(struct rate_control_ref *ref) +{ + kref_get(&ref->kref); + return ref; +} + +void rate_control_put(struct rate_control_ref *ref) +{ + kref_put(&ref->kref, rate_control_release); +} + +int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local, + const char *name) +{ + struct rate_control_ref *ref, *old; + + ASSERT_RTNL(); + + if (local->open_count) + return -EBUSY; + + if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) { + if (WARN_ON(!local->ops->set_rts_threshold)) + return -EINVAL; + return 0; + } + + ref = rate_control_alloc(name, local); + if (!ref) { + printk(KERN_WARNING "%s: Failed to select rate control " + "algorithm\n", wiphy_name(local->hw.wiphy)); + return -ENOENT; + } + + old = local->rate_ctrl; + local->rate_ctrl = ref; + if (old) { + rate_control_put(old); + sta_info_flush(local, NULL); + } + + printk(KERN_DEBUG "%s: Selected rate control " + "algorithm '%s'\n", wiphy_name(local->hw.wiphy), + ref->ops->name); + + return 0; +} + +void rate_control_deinitialize(struct ieee80211_local *local) +{ + struct rate_control_ref *ref; + + ref = local->rate_ctrl; + + if (!ref) + return; + + local->rate_ctrl = NULL; + rate_control_put(ref); +} + diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h new file mode 100644 index 0000000..065a961 --- /dev/null +++ b/net/mac80211/rate.h @@ -0,0 +1,151 @@ +/* + * Copyright 2002-2005, Instant802 Networks, Inc. + * Copyright 2005, Devicescape Software, Inc. + * Copyright (c) 2006 Jiri Benc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef IEEE80211_RATE_H +#define IEEE80211_RATE_H + +#include +#include +#include +#include +#include +#include "ieee80211_i.h" +#include "sta_info.h" + +struct rate_control_ref { + struct ieee80211_local *local; + struct rate_control_ops *ops; + void *priv; + struct kref kref; +}; + +void rate_control_get_rate(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, + struct ieee80211_tx_rate_control *txrc); +struct rate_control_ref *rate_control_get(struct rate_control_ref *ref); +void rate_control_put(struct rate_control_ref *ref); + +static inline void rate_control_tx_status(struct ieee80211_local *local, + struct ieee80211_supported_band *sband, + struct sta_info *sta, + struct sk_buff *skb) +{ + struct rate_control_ref *ref = local->rate_ctrl; + struct ieee80211_sta *ista = &sta->sta; + void *priv_sta = sta->rate_ctrl_priv; + + if (!ref) + return; + + ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb); +} + + +static inline void rate_control_rate_init(struct sta_info *sta) +{ + struct ieee80211_local *local = sta->sdata->local; + struct rate_control_ref *ref = sta->rate_ctrl; + struct ieee80211_sta *ista = &sta->sta; + void *priv_sta = sta->rate_ctrl_priv; + struct ieee80211_supported_band *sband; + + if (!ref) + return; + + sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; + + ref->ops->rate_init(ref->priv, sband, ista, priv_sta); +} + +static inline void rate_control_rate_update(struct ieee80211_local *local, + struct ieee80211_supported_band *sband, + struct sta_info *sta, u32 changed, + enum nl80211_channel_type oper_chan_type) +{ + struct rate_control_ref *ref = local->rate_ctrl; + struct ieee80211_sta *ista = &sta->sta; + void *priv_sta = sta->rate_ctrl_priv; + + if (ref && ref->ops->rate_update) + ref->ops->rate_update(ref->priv, sband, ista, + priv_sta, changed, oper_chan_type); +} + +static inline void *rate_control_alloc_sta(struct rate_control_ref *ref, + struct ieee80211_sta *sta, + gfp_t gfp) +{ + return ref->ops->alloc_sta(ref->priv, sta, gfp); +} + +static inline void rate_control_free_sta(struct sta_info *sta) +{ + struct rate_control_ref *ref = sta->rate_ctrl; + struct ieee80211_sta *ista = &sta->sta; + void *priv_sta = sta->rate_ctrl_priv; + + ref->ops->free_sta(ref->priv, ista, priv_sta); +} + +static inline void rate_control_add_sta_debugfs(struct sta_info *sta) +{ +#ifdef CONFIG_MAC80211_DEBUGFS + struct rate_control_ref *ref = sta->rate_ctrl; + if (ref && sta->debugfs.dir && ref->ops->add_sta_debugfs) + ref->ops->add_sta_debugfs(ref->priv, sta->rate_ctrl_priv, + sta->debugfs.dir); +#endif +} + +static inline void rate_control_remove_sta_debugfs(struct sta_info *sta) +{ +#ifdef CONFIG_MAC80211_DEBUGFS + struct rate_control_ref *ref = sta->rate_ctrl; + if (ref && ref->ops->remove_sta_debugfs) + ref->ops->remove_sta_debugfs(ref->priv, sta->rate_ctrl_priv); +#endif +} + +/* Get a reference to the rate control algorithm. If `name' is NULL, get the + * first available algorithm. */ +int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local, + const char *name); +void rate_control_deinitialize(struct ieee80211_local *local); + + +/* Rate control algorithms */ +#ifdef CONFIG_MAC80211_RC_PID +extern int rc80211_pid_init(void); +extern void rc80211_pid_exit(void); +#else +static inline int rc80211_pid_init(void) +{ + return 0; +} +static inline void rc80211_pid_exit(void) +{ +} +#endif + +#ifdef CONFIG_MAC80211_RC_MINSTREL +extern int rc80211_minstrel_init(void); +extern void rc80211_minstrel_exit(void); +#else +static inline int rc80211_minstrel_init(void) +{ + return 0; +} +static inline void rc80211_minstrel_exit(void) +{ +} +#endif + + +#endif /* IEEE80211_RATE_H */ diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c new file mode 100644 index 0000000..6e5d68b --- /dev/null +++ b/net/mac80211/rc80211_minstrel.c @@ -0,0 +1,570 @@ +/* + * Copyright (C) 2008 Felix Fietkau + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Based on minstrel.c: + * Copyright (C) 2005-2007 Derek Smithies + * Sponsored by Indranet Technologies Ltd + * + * Based on sample.c: + * Copyright (c) 2005 John Bicket + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any + * redistribution must be conditioned upon including a substantially + * similar Disclaimer requirement for further binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY + * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, + * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGES. + */ +#include +#include +#include +#include +#include +#include +#include +#include "rate.h" +#include "rc80211_minstrel.h" + +#define SAMPLE_COLUMNS 10 +#define SAMPLE_TBL(_mi, _idx, _col) \ + _mi->sample_table[(_idx * SAMPLE_COLUMNS) + _col] + +/* convert mac80211 rate index to local array index */ +static inline int +rix_to_ndx(struct minstrel_sta_info *mi, int rix) +{ + int i = rix; + for (i = rix; i >= 0; i--) + if (mi->r[i].rix == rix) + break; + WARN_ON(i < 0); + return i; +} + +static void +minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi) +{ + u32 max_tp = 0, index_max_tp = 0, index_max_tp2 = 0; + u32 max_prob = 0, index_max_prob = 0; + u32 usecs; + u32 p; + int i; + + mi->stats_update = jiffies; + for (i = 0; i < mi->n_rates; i++) { + struct minstrel_rate *mr = &mi->r[i]; + + usecs = mr->perfect_tx_time; + if (!usecs) + usecs = 1000000; + + /* To avoid rounding issues, probabilities scale from 0 (0%) + * to 18000 (100%) */ + if (mr->attempts) { + p = (mr->success * 18000) / mr->attempts; + mr->succ_hist += mr->success; + mr->att_hist += mr->attempts; + mr->cur_prob = p; + p = ((p * (100 - mp->ewma_level)) + (mr->probability * + mp->ewma_level)) / 100; + mr->probability = p; + mr->cur_tp = p * (1000000 / usecs); + } + + mr->last_success = mr->success; + mr->last_attempts = mr->attempts; + mr->success = 0; + mr->attempts = 0; + + /* Sample less often below the 10% chance of success. + * Sample less often above the 95% chance of success. */ + if ((mr->probability > 17100) || (mr->probability < 1800)) { + mr->adjusted_retry_count = mr->retry_count >> 1; + if (mr->adjusted_retry_count > 2) + mr->adjusted_retry_count = 2; + mr->sample_limit = 4; + } else { + mr->sample_limit = -1; + mr->adjusted_retry_count = mr->retry_count; + } + if (!mr->adjusted_retry_count) + mr->adjusted_retry_count = 2; + } + + for (i = 0; i < mi->n_rates; i++) { + struct minstrel_rate *mr = &mi->r[i]; + if (max_tp < mr->cur_tp) { + index_max_tp = i; + max_tp = mr->cur_tp; + } + if (max_prob < mr->probability) { + index_max_prob = i; + max_prob = mr->probability; + } + } + + max_tp = 0; + for (i = 0; i < mi->n_rates; i++) { + struct minstrel_rate *mr = &mi->r[i]; + + if (i == index_max_tp) + continue; + + if (max_tp < mr->cur_tp) { + index_max_tp2 = i; + max_tp = mr->cur_tp; + } + } + mi->max_tp_rate = index_max_tp; + mi->max_tp_rate2 = index_max_tp2; + mi->max_prob_rate = index_max_prob; +} + +static void +minstrel_tx_status(void *priv, struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta, + struct sk_buff *skb) +{ + struct minstrel_sta_info *mi = priv_sta; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_tx_rate *ar = info->status.rates; + int i, ndx; + int success; + + success = !!(info->flags & IEEE80211_TX_STAT_ACK); + + for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { + if (ar[i].idx < 0) + break; + + ndx = rix_to_ndx(mi, ar[i].idx); + if (ndx < 0) + continue; + + mi->r[ndx].attempts += ar[i].count; + + if ((i != IEEE80211_TX_MAX_RATES - 1) && (ar[i + 1].idx < 0)) + mi->r[ndx].success += success; + } + + if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && (i >= 0)) + mi->sample_count++; + + if (mi->sample_deferred > 0) + mi->sample_deferred--; +} + + +static inline unsigned int +minstrel_get_retry_count(struct minstrel_rate *mr, + struct ieee80211_tx_info *info) +{ + unsigned int retry = mr->adjusted_retry_count; + + if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) + retry = max(2U, min(mr->retry_count_rtscts, retry)); + else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) + retry = max(2U, min(mr->retry_count_cts, retry)); + return retry; +} + + +static int +minstrel_get_next_sample(struct minstrel_sta_info *mi) +{ + unsigned int sample_ndx; + sample_ndx = SAMPLE_TBL(mi, mi->sample_idx, mi->sample_column); + mi->sample_idx++; + if ((int) mi->sample_idx > (mi->n_rates - 2)) { + mi->sample_idx = 0; + mi->sample_column++; + if (mi->sample_column >= SAMPLE_COLUMNS) + mi->sample_column = 0; + } + return sample_ndx; +} + +static void +minstrel_get_rate(void *priv, struct ieee80211_sta *sta, + void *priv_sta, struct ieee80211_tx_rate_control *txrc) +{ + struct sk_buff *skb = txrc->skb; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct minstrel_sta_info *mi = priv_sta; + struct minstrel_priv *mp = priv; + struct ieee80211_tx_rate *ar = info->control.rates; + unsigned int ndx, sample_ndx = 0; + bool mrr; + bool sample_slower = false; + bool sample = false; + int i, delta; + int mrr_ndx[3]; + int sample_rate; + + if (rate_control_send_low(sta, priv_sta, txrc)) + return; + + mrr = mp->has_mrr && !txrc->rts && !txrc->bss_conf->use_cts_prot; + + if (time_after(jiffies, mi->stats_update + (mp->update_interval * + HZ) / 1000)) + minstrel_update_stats(mp, mi); + + ndx = mi->max_tp_rate; + + if (mrr) + sample_rate = mp->lookaround_rate_mrr; + else + sample_rate = mp->lookaround_rate; + + mi->packet_count++; + delta = (mi->packet_count * sample_rate / 100) - + (mi->sample_count + mi->sample_deferred / 2); + + /* delta > 0: sampling required */ + if ((delta > 0) && (mrr || !mi->prev_sample)) { + struct minstrel_rate *msr; + if (mi->packet_count >= 10000) { + mi->sample_deferred = 0; + mi->sample_count = 0; + mi->packet_count = 0; + } else if (delta > mi->n_rates * 2) { + /* With multi-rate retry, not every planned sample + * attempt actually gets used, due to the way the retry + * chain is set up - [max_tp,sample,prob,lowest] for + * sample_rate < max_tp. + * + * If there's too much sampling backlog and the link + * starts getting worse, minstrel would start bursting + * out lots of sampling frames, which would result + * in a large throughput loss. */ + mi->sample_count += (delta - mi->n_rates * 2); + } + + sample_ndx = minstrel_get_next_sample(mi); + msr = &mi->r[sample_ndx]; + sample = true; + sample_slower = mrr && (msr->perfect_tx_time > + mi->r[ndx].perfect_tx_time); + + if (!sample_slower) { + if (msr->sample_limit != 0) { + ndx = sample_ndx; + mi->sample_count++; + if (msr->sample_limit > 0) + msr->sample_limit--; + } else { + sample = false; + } + } else { + /* Only use IEEE80211_TX_CTL_RATE_CTRL_PROBE to mark + * packets that have the sampling rate deferred to the + * second MRR stage. Increase the sample counter only + * if the deferred sample rate was actually used. + * Use the sample_deferred counter to make sure that + * the sampling is not done in large bursts */ + info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; + mi->sample_deferred++; + } + } + mi->prev_sample = sample; + + /* If we're not using MRR and the sampling rate already + * has a probability of >95%, we shouldn't be attempting + * to use it, as this only wastes precious airtime */ + if (!mrr && sample && (mi->r[ndx].probability > 17100)) + ndx = mi->max_tp_rate; + + ar[0].idx = mi->r[ndx].rix; + ar[0].count = minstrel_get_retry_count(&mi->r[ndx], info); + + if (!mrr) { + if (!sample) + ar[0].count = mp->max_retry; + ar[1].idx = mi->lowest_rix; + ar[1].count = mp->max_retry; + return; + } + + /* MRR setup */ + if (sample) { + if (sample_slower) + mrr_ndx[0] = sample_ndx; + else + mrr_ndx[0] = mi->max_tp_rate; + } else { + mrr_ndx[0] = mi->max_tp_rate2; + } + mrr_ndx[1] = mi->max_prob_rate; + mrr_ndx[2] = 0; + for (i = 1; i < 4; i++) { + ar[i].idx = mi->r[mrr_ndx[i - 1]].rix; + ar[i].count = mi->r[mrr_ndx[i - 1]].adjusted_retry_count; + } +} + + +static void +calc_rate_durations(struct minstrel_sta_info *mi, struct ieee80211_local *local, + struct minstrel_rate *d, struct ieee80211_rate *rate) +{ + int erp = !!(rate->flags & IEEE80211_RATE_ERP_G); + + d->perfect_tx_time = ieee80211_frame_duration(local, 1200, + rate->bitrate, erp, 1); + d->ack_time = ieee80211_frame_duration(local, 10, + rate->bitrate, erp, 1); +} + +static void +init_sample_table(struct minstrel_sta_info *mi) +{ + unsigned int i, col, new_idx; + unsigned int n_srates = mi->n_rates - 1; + u8 rnd[8]; + + mi->sample_column = 0; + mi->sample_idx = 0; + memset(mi->sample_table, 0, SAMPLE_COLUMNS * mi->n_rates); + + for (col = 0; col < SAMPLE_COLUMNS; col++) { + for (i = 0; i < n_srates; i++) { + get_random_bytes(rnd, sizeof(rnd)); + new_idx = (i + rnd[i & 7]) % n_srates; + + while (SAMPLE_TBL(mi, new_idx, col) != 0) + new_idx = (new_idx + 1) % n_srates; + + /* Don't sample the slowest rate (i.e. slowest base + * rate). We must presume that the slowest rate works + * fine, or else other management frames will also be + * failing and the link will break */ + SAMPLE_TBL(mi, new_idx, col) = i + 1; + } + } +} + +static void +minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta) +{ + struct minstrel_sta_info *mi = priv_sta; + struct minstrel_priv *mp = priv; + struct ieee80211_local *local = hw_to_local(mp->hw); + struct ieee80211_rate *ctl_rate; + unsigned int i, n = 0; + unsigned int t_slot = 9; /* FIXME: get real slot time */ + + mi->lowest_rix = rate_lowest_index(sband, sta); + ctl_rate = &sband->bitrates[mi->lowest_rix]; + mi->sp_ack_dur = ieee80211_frame_duration(local, 10, ctl_rate->bitrate, + !!(ctl_rate->flags & IEEE80211_RATE_ERP_G), 1); + + for (i = 0; i < sband->n_bitrates; i++) { + struct minstrel_rate *mr = &mi->r[n]; + unsigned int tx_time = 0, tx_time_cts = 0, tx_time_rtscts = 0; + unsigned int tx_time_single; + unsigned int cw = mp->cw_min; + + if (!rate_supported(sta, sband->band, i)) + continue; + n++; + memset(mr, 0, sizeof(*mr)); + + mr->rix = i; + mr->bitrate = sband->bitrates[i].bitrate / 5; + calc_rate_durations(mi, local, mr, + &sband->bitrates[i]); + + /* calculate maximum number of retransmissions before + * fallback (based on maximum segment size) */ + mr->sample_limit = -1; + mr->retry_count = 1; + mr->retry_count_cts = 1; + mr->retry_count_rtscts = 1; + tx_time = mr->perfect_tx_time + mi->sp_ack_dur; + do { + /* add one retransmission */ + tx_time_single = mr->ack_time + mr->perfect_tx_time; + + /* contention window */ + tx_time_single += t_slot + min(cw, mp->cw_max); + cw = (cw << 1) | 1; + + tx_time += tx_time_single; + tx_time_cts += tx_time_single + mi->sp_ack_dur; + tx_time_rtscts += tx_time_single + 2 * mi->sp_ack_dur; + if ((tx_time_cts < mp->segment_size) && + (mr->retry_count_cts < mp->max_retry)) + mr->retry_count_cts++; + if ((tx_time_rtscts < mp->segment_size) && + (mr->retry_count_rtscts < mp->max_retry)) + mr->retry_count_rtscts++; + } while ((tx_time < mp->segment_size) && + (++mr->retry_count < mp->max_retry)); + mr->adjusted_retry_count = mr->retry_count; + } + + for (i = n; i < sband->n_bitrates; i++) { + struct minstrel_rate *mr = &mi->r[i]; + mr->rix = -1; + } + + mi->n_rates = n; + mi->stats_update = jiffies; + + init_sample_table(mi); +} + +static void * +minstrel_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp) +{ + struct ieee80211_supported_band *sband; + struct minstrel_sta_info *mi; + struct minstrel_priv *mp = priv; + struct ieee80211_hw *hw = mp->hw; + int max_rates = 0; + int i; + + mi = kzalloc(sizeof(struct minstrel_sta_info), gfp); + if (!mi) + return NULL; + + for (i = 0; i < IEEE80211_NUM_BANDS; i++) { + sband = hw->wiphy->bands[i]; + if (sband && sband->n_bitrates > max_rates) + max_rates = sband->n_bitrates; + } + + mi->r = kzalloc(sizeof(struct minstrel_rate) * max_rates, gfp); + if (!mi->r) + goto error; + + mi->sample_table = kmalloc(SAMPLE_COLUMNS * max_rates, gfp); + if (!mi->sample_table) + goto error1; + + mi->stats_update = jiffies; + return mi; + +error1: + kfree(mi->r); +error: + kfree(mi); + return NULL; +} + +static void +minstrel_free_sta(void *priv, struct ieee80211_sta *sta, void *priv_sta) +{ + struct minstrel_sta_info *mi = priv_sta; + + kfree(mi->sample_table); + kfree(mi->r); + kfree(mi); +} + +static void * +minstrel_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) +{ + struct minstrel_priv *mp; + + mp = kzalloc(sizeof(struct minstrel_priv), GFP_ATOMIC); + if (!mp) + return NULL; + + /* contention window settings + * Just an approximation. Using the per-queue values would complicate + * the calculations and is probably unnecessary */ + mp->cw_min = 15; + mp->cw_max = 1023; + + /* number of packets (in %) to use for sampling other rates + * sample less often for non-mrr packets, because the overhead + * is much higher than with mrr */ + mp->lookaround_rate = 5; + mp->lookaround_rate_mrr = 10; + + /* moving average weight for EWMA */ + mp->ewma_level = 75; + + /* maximum time that the hw is allowed to stay in one MRR segment */ + mp->segment_size = 6000; + + if (hw->max_rate_tries > 0) + mp->max_retry = hw->max_rate_tries; + else + /* safe default, does not necessarily have to match hw properties */ + mp->max_retry = 7; + + if (hw->max_rates >= 4) + mp->has_mrr = true; + + mp->hw = hw; + mp->update_interval = 100; + + return mp; +} + +static void +minstrel_free(void *priv) +{ + kfree(priv); +} + +static struct rate_control_ops mac80211_minstrel = { + .name = "minstrel", + .tx_status = minstrel_tx_status, + .get_rate = minstrel_get_rate, + .rate_init = minstrel_rate_init, + .alloc = minstrel_alloc, + .free = minstrel_free, + .alloc_sta = minstrel_alloc_sta, + .free_sta = minstrel_free_sta, +#ifdef CONFIG_MAC80211_DEBUGFS + .add_sta_debugfs = minstrel_add_sta_debugfs, + .remove_sta_debugfs = minstrel_remove_sta_debugfs, +#endif +}; + +int __init +rc80211_minstrel_init(void) +{ + return ieee80211_rate_control_register(&mac80211_minstrel); +} + +void +rc80211_minstrel_exit(void) +{ + ieee80211_rate_control_unregister(&mac80211_minstrel); +} + diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h new file mode 100644 index 0000000..38bf416 --- /dev/null +++ b/net/mac80211/rc80211_minstrel.h @@ -0,0 +1,86 @@ +/* + * Copyright (C) 2008 Felix Fietkau + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __RC_MINSTREL_H +#define __RC_MINSTREL_H + +struct minstrel_rate { + int bitrate; + int rix; + + unsigned int perfect_tx_time; + unsigned int ack_time; + + int sample_limit; + unsigned int retry_count; + unsigned int retry_count_cts; + unsigned int retry_count_rtscts; + unsigned int adjusted_retry_count; + + u32 success; + u32 attempts; + u32 last_attempts; + u32 last_success; + + /* parts per thousand */ + u32 cur_prob; + u32 probability; + + /* per-rate throughput */ + u32 cur_tp; + + u64 succ_hist; + u64 att_hist; +}; + +struct minstrel_sta_info { + unsigned long stats_update; + unsigned int sp_ack_dur; + unsigned int rate_avg; + + unsigned int lowest_rix; + + unsigned int max_tp_rate; + unsigned int max_tp_rate2; + unsigned int max_prob_rate; + unsigned int packet_count; + unsigned int sample_count; + int sample_deferred; + + unsigned int sample_idx; + unsigned int sample_column; + + int n_rates; + struct minstrel_rate *r; + bool prev_sample; + + /* sampling table */ + u8 *sample_table; + +#ifdef CONFIG_MAC80211_DEBUGFS + struct dentry *dbg_stats; +#endif +}; + +struct minstrel_priv { + struct ieee80211_hw *hw; + bool has_mrr; + unsigned int cw_min; + unsigned int cw_max; + unsigned int max_retry; + unsigned int ewma_level; + unsigned int segment_size; + unsigned int update_interval; + unsigned int lookaround_rate; + unsigned int lookaround_rate_mrr; +}; + +void minstrel_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir); +void minstrel_remove_sta_debugfs(void *priv, void *priv_sta); + +#endif diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c new file mode 100644 index 0000000..a715d94 --- /dev/null +++ b/net/mac80211/rc80211_minstrel_debugfs.c @@ -0,0 +1,164 @@ +/* + * Copyright (C) 2008 Felix Fietkau + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Based on minstrel.c: + * Copyright (C) 2005-2007 Derek Smithies + * Sponsored by Indranet Technologies Ltd + * + * Based on sample.c: + * Copyright (c) 2005 John Bicket + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any + * redistribution must be conditioned upon including a substantially + * similar Disclaimer requirement for further binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY + * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, + * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGES. + */ +#include +#include +#include +#include +#include +#include +#include "rc80211_minstrel.h" + +struct minstrel_stats_info { + struct minstrel_sta_info *mi; + char buf[4096]; + size_t len; +}; + +static int +minstrel_stats_open(struct inode *inode, struct file *file) +{ + struct minstrel_sta_info *mi = inode->i_private; + struct minstrel_stats_info *ms; + unsigned int i, tp, prob, eprob; + char *p; + + ms = kmalloc(sizeof(*ms), GFP_KERNEL); + if (!ms) + return -ENOMEM; + + file->private_data = ms; + p = ms->buf; + p += sprintf(p, "rate throughput ewma prob this prob " + "this succ/attempt success attempts\n"); + for (i = 0; i < mi->n_rates; i++) { + struct minstrel_rate *mr = &mi->r[i]; + + *(p++) = (i == mi->max_tp_rate) ? 'T' : ' '; + *(p++) = (i == mi->max_tp_rate2) ? 't' : ' '; + *(p++) = (i == mi->max_prob_rate) ? 'P' : ' '; + p += sprintf(p, "%3u%s", mr->bitrate / 2, + (mr->bitrate & 1 ? ".5" : " ")); + + tp = mr->cur_tp / ((18000 << 10) / 96); + prob = mr->cur_prob / 18; + eprob = mr->probability / 18; + + p += sprintf(p, " %6u.%1u %6u.%1u %6u.%1u " + "%3u(%3u) %8llu %8llu\n", + tp / 10, tp % 10, + eprob / 10, eprob % 10, + prob / 10, prob % 10, + mr->last_success, + mr->last_attempts, + (unsigned long long)mr->succ_hist, + (unsigned long long)mr->att_hist); + } + p += sprintf(p, "\nTotal packet count:: ideal %d " + "lookaround %d\n\n", + mi->packet_count - mi->sample_count, + mi->sample_count); + ms->len = p - ms->buf; + + return 0; +} + +static ssize_t +minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *o) +{ + struct minstrel_stats_info *ms; + char *src; + + ms = file->private_data; + src = ms->buf; + + len = min(len, ms->len); + if (len <= *o) + return 0; + + src += *o; + len -= *o; + *o += len; + + if (copy_to_user(buf, src, len)) + return -EFAULT; + + return len; +} + +static int +minstrel_stats_release(struct inode *inode, struct file *file) +{ + struct minstrel_stats_info *ms = file->private_data; + + kfree(ms); + + return 0; +} + +static const struct file_operations minstrel_stat_fops = { + .owner = THIS_MODULE, + .open = minstrel_stats_open, + .read = minstrel_stats_read, + .release = minstrel_stats_release, +}; + +void +minstrel_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir) +{ + struct minstrel_sta_info *mi = priv_sta; + + mi->dbg_stats = debugfs_create_file("rc_stats", S_IRUGO, dir, mi, + &minstrel_stat_fops); +} + +void +minstrel_remove_sta_debugfs(void *priv, void *priv_sta) +{ + struct minstrel_sta_info *mi = priv_sta; + + debugfs_remove(mi->dbg_stats); +} diff --git a/net/mac80211/rc80211_pid.h b/net/mac80211/rc80211_pid.h new file mode 100644 index 0000000..1a873f0 --- /dev/null +++ b/net/mac80211/rc80211_pid.h @@ -0,0 +1,281 @@ +/* + * Copyright 2007, Mattias Nissler + * Copyright 2007, Stefano Brivio + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef RC80211_PID_H +#define RC80211_PID_H + +/* Sampling period for measuring percentage of failed frames in ms. */ +#define RC_PID_INTERVAL 125 + +/* Exponential averaging smoothness (used for I part of PID controller) */ +#define RC_PID_SMOOTHING_SHIFT 3 +#define RC_PID_SMOOTHING (1 << RC_PID_SMOOTHING_SHIFT) + +/* Sharpening factor (used for D part of PID controller) */ +#define RC_PID_SHARPENING_FACTOR 0 +#define RC_PID_SHARPENING_DURATION 0 + +/* Fixed point arithmetic shifting amount. */ +#define RC_PID_ARITH_SHIFT 8 + +/* Fixed point arithmetic factor. */ +#define RC_PID_ARITH_FACTOR (1 << RC_PID_ARITH_SHIFT) + +/* Proportional PID component coefficient. */ +#define RC_PID_COEFF_P 15 +/* Integral PID component coefficient. */ +#define RC_PID_COEFF_I 9 +/* Derivative PID component coefficient. */ +#define RC_PID_COEFF_D 15 + +/* Target failed frames rate for the PID controller. NB: This effectively gives + * maximum failed frames percentage we're willing to accept. If the wireless + * link quality is good, the controller will fail to adjust failed frames + * percentage to the target. This is intentional. + */ +#define RC_PID_TARGET_PF 14 + +/* Rate behaviour normalization quantity over time. */ +#define RC_PID_NORM_OFFSET 3 + +/* Push high rates right after loading. */ +#define RC_PID_FAST_START 0 + +/* Arithmetic right shift for positive and negative values for ISO C. */ +#define RC_PID_DO_ARITH_RIGHT_SHIFT(x, y) \ + ((x) < 0 ? -((-(x)) >> (y)) : (x) >> (y)) + +enum rc_pid_event_type { + RC_PID_EVENT_TYPE_TX_STATUS, + RC_PID_EVENT_TYPE_RATE_CHANGE, + RC_PID_EVENT_TYPE_TX_RATE, + RC_PID_EVENT_TYPE_PF_SAMPLE, +}; + +union rc_pid_event_data { + /* RC_PID_EVENT_TX_STATUS */ + struct { + u32 flags; + struct ieee80211_tx_info tx_status; + }; + /* RC_PID_EVENT_TYPE_RATE_CHANGE */ + /* RC_PID_EVENT_TYPE_TX_RATE */ + struct { + int index; + int rate; + }; + /* RC_PID_EVENT_TYPE_PF_SAMPLE */ + struct { + s32 pf_sample; + s32 prop_err; + s32 int_err; + s32 der_err; + }; +}; + +struct rc_pid_event { + /* The time when the event occured */ + unsigned long timestamp; + + /* Event ID number */ + unsigned int id; + + /* Type of event */ + enum rc_pid_event_type type; + + /* type specific data */ + union rc_pid_event_data data; +}; + +/* Size of the event ring buffer. */ +#define RC_PID_EVENT_RING_SIZE 32 + +struct rc_pid_event_buffer { + /* Counter that generates event IDs */ + unsigned int ev_count; + + /* Ring buffer of events */ + struct rc_pid_event ring[RC_PID_EVENT_RING_SIZE]; + + /* Index to the entry in events_buf to be reused */ + unsigned int next_entry; + + /* Lock that guards against concurrent access to this buffer struct */ + spinlock_t lock; + + /* Wait queue for poll/select and blocking I/O */ + wait_queue_head_t waitqueue; +}; + +struct rc_pid_events_file_info { + /* The event buffer we read */ + struct rc_pid_event_buffer *events; + + /* The entry we have should read next */ + unsigned int next_entry; +}; + +/** + * struct rc_pid_debugfs_entries - tunable parameters + * + * Algorithm parameters, tunable via debugfs. + * @target: target percentage for failed frames + * @sampling_period: error sampling interval in milliseconds + * @coeff_p: absolute value of the proportional coefficient + * @coeff_i: absolute value of the integral coefficient + * @coeff_d: absolute value of the derivative coefficient + * @smoothing_shift: absolute value of the integral smoothing factor (i.e. + * amount of smoothing introduced by the exponential moving average) + * @sharpen_factor: absolute value of the derivative sharpening factor (i.e. + * amount of emphasis given to the derivative term after low activity + * events) + * @sharpen_duration: duration of the sharpening effect after the detected low + * activity event, relative to sampling_period + * @norm_offset: amount of normalization periodically performed on the learnt + * rate behaviour values (lower means we should trust more what we learnt + * about behaviour of rates, higher means we should trust more the natural + * ordering of rates) + */ +struct rc_pid_debugfs_entries { + struct dentry *target; + struct dentry *sampling_period; + struct dentry *coeff_p; + struct dentry *coeff_i; + struct dentry *coeff_d; + struct dentry *smoothing_shift; + struct dentry *sharpen_factor; + struct dentry *sharpen_duration; + struct dentry *norm_offset; +}; + +void rate_control_pid_event_tx_status(struct rc_pid_event_buffer *buf, + struct ieee80211_tx_info *stat); + +void rate_control_pid_event_rate_change(struct rc_pid_event_buffer *buf, + int index, int rate); + +void rate_control_pid_event_tx_rate(struct rc_pid_event_buffer *buf, + int index, int rate); + +void rate_control_pid_event_pf_sample(struct rc_pid_event_buffer *buf, + s32 pf_sample, s32 prop_err, + s32 int_err, s32 der_err); + +void rate_control_pid_add_sta_debugfs(void *priv, void *priv_sta, + struct dentry *dir); + +void rate_control_pid_remove_sta_debugfs(void *priv, void *priv_sta); + +struct rc_pid_sta_info { + unsigned long last_change; + unsigned long last_sample; + + u32 tx_num_failed; + u32 tx_num_xmit; + + int txrate_idx; + + /* Average failed frames percentage error (i.e. actual vs. target + * percentage), scaled by RC_PID_SMOOTHING. This value is computed + * using using an exponential weighted average technique: + * + * (RC_PID_SMOOTHING - 1) * err_avg_old + err + * err_avg = ------------------------------------------ + * RC_PID_SMOOTHING + * + * where err_avg is the new approximation, err_avg_old the previous one + * and err is the error w.r.t. to the current failed frames percentage + * sample. Note that the bigger RC_PID_SMOOTHING the more weight is + * given to the previous estimate, resulting in smoother behavior (i.e. + * corresponding to a longer integration window). + * + * For computation, we actually don't use the above formula, but this + * one: + * + * err_avg_scaled = err_avg_old_scaled - err_avg_old + err + * + * where: + * err_avg_scaled = err * RC_PID_SMOOTHING + * err_avg_old_scaled = err_avg_old * RC_PID_SMOOTHING + * + * This avoids floating point numbers and the per_failed_old value can + * easily be obtained by shifting per_failed_old_scaled right by + * RC_PID_SMOOTHING_SHIFT. + */ + s32 err_avg_sc; + + /* Last framed failes percentage sample. */ + u32 last_pf; + + /* Sharpening needed. */ + u8 sharp_cnt; + +#ifdef CONFIG_MAC80211_DEBUGFS + /* Event buffer */ + struct rc_pid_event_buffer events; + + /* Events debugfs file entry */ + struct dentry *events_entry; +#endif +}; + +/* Algorithm parameters. We keep them on a per-algorithm approach, so they can + * be tuned individually for each interface. + */ +struct rc_pid_rateinfo { + + /* Map sorted rates to rates in ieee80211_hw_mode. */ + int index; + + /* Map rates in ieee80211_hw_mode to sorted rates. */ + int rev_index; + + /* Did we do any measurement on this rate? */ + bool valid; + + /* Comparison with the lowest rate. */ + int diff; +}; + +struct rc_pid_info { + + /* The failed frames percentage target. */ + unsigned int target; + + /* Rate at which failed frames percentage is sampled in 0.001s. */ + unsigned int sampling_period; + + /* P, I and D coefficients. */ + int coeff_p; + int coeff_i; + int coeff_d; + + /* Exponential averaging shift. */ + unsigned int smoothing_shift; + + /* Sharpening factor and duration. */ + unsigned int sharpen_factor; + unsigned int sharpen_duration; + + /* Normalization offset. */ + unsigned int norm_offset; + + /* Rates information. */ + struct rc_pid_rateinfo *rinfo; + + /* Index of the last used rate. */ + int oldrate; + +#ifdef CONFIG_MAC80211_DEBUGFS + /* Debugfs entries created for the parameters above. */ + struct rc_pid_debugfs_entries dentries; +#endif +}; + +#endif /* RC80211_PID_H */ diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c new file mode 100644 index 0000000..2652a37 --- /dev/null +++ b/net/mac80211/rc80211_pid_algo.c @@ -0,0 +1,476 @@ +/* + * Copyright 2002-2005, Instant802 Networks, Inc. + * Copyright 2005, Devicescape Software, Inc. + * Copyright 2007, Mattias Nissler + * Copyright 2007-2008, Stefano Brivio + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include "rate.h" +#include "mesh.h" +#include "rc80211_pid.h" + + +/* This is an implementation of a TX rate control algorithm that uses a PID + * controller. Given a target failed frames rate, the controller decides about + * TX rate changes to meet the target failed frames rate. + * + * The controller basically computes the following: + * + * adj = CP * err + CI * err_avg + CD * (err - last_err) * (1 + sharpening) + * + * where + * adj adjustment value that is used to switch TX rate (see below) + * err current error: target vs. current failed frames percentage + * last_err last error + * err_avg average (i.e. poor man's integral) of recent errors + * sharpening non-zero when fast response is needed (i.e. right after + * association or no frames sent for a long time), heading + * to zero over time + * CP Proportional coefficient + * CI Integral coefficient + * CD Derivative coefficient + * + * CP, CI, CD are subject to careful tuning. + * + * The integral component uses a exponential moving average approach instead of + * an actual sliding window. The advantage is that we don't need to keep an + * array of the last N error values and computation is easier. + * + * Once we have the adj value, we map it to a rate by means of a learning + * algorithm. This algorithm keeps the state of the percentual failed frames + * difference between rates. The behaviour of the lowest available rate is kept + * as a reference value, and every time we switch between two rates, we compute + * the difference between the failed frames each rate exhibited. By doing so, + * we compare behaviours which different rates exhibited in adjacent timeslices, + * thus the comparison is minimally affected by external conditions. This + * difference gets propagated to the whole set of measurements, so that the + * reference is always the same. Periodically, we normalize this set so that + * recent events weigh the most. By comparing the adj value with this set, we + * avoid pejorative switches to lower rates and allow for switches to higher + * rates if they behaved well. + * + * Note that for the computations we use a fixed-point representation to avoid + * floating point arithmetic. Hence, all values are shifted left by + * RC_PID_ARITH_SHIFT. + */ + + +/* Adjust the rate while ensuring that we won't switch to a lower rate if it + * exhibited a worse failed frames behaviour and we'll choose the highest rate + * whose failed frames behaviour is not worse than the one of the original rate + * target. While at it, check that the new rate is valid. */ +static void rate_control_pid_adjust_rate(struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, + struct rc_pid_sta_info *spinfo, int adj, + struct rc_pid_rateinfo *rinfo) +{ + int cur_sorted, new_sorted, probe, tmp, n_bitrates, band; + int cur = spinfo->txrate_idx; + + band = sband->band; + n_bitrates = sband->n_bitrates; + + /* Map passed arguments to sorted values. */ + cur_sorted = rinfo[cur].rev_index; + new_sorted = cur_sorted + adj; + + /* Check limits. */ + if (new_sorted < 0) + new_sorted = rinfo[0].rev_index; + else if (new_sorted >= n_bitrates) + new_sorted = rinfo[n_bitrates - 1].rev_index; + + tmp = new_sorted; + + if (adj < 0) { + /* Ensure that the rate decrease isn't disadvantageous. */ + for (probe = cur_sorted; probe >= new_sorted; probe--) + if (rinfo[probe].diff <= rinfo[cur_sorted].diff && + rate_supported(sta, band, rinfo[probe].index)) + tmp = probe; + } else { + /* Look for rate increase with zero (or below) cost. */ + for (probe = new_sorted + 1; probe < n_bitrates; probe++) + if (rinfo[probe].diff <= rinfo[new_sorted].diff && + rate_supported(sta, band, rinfo[probe].index)) + tmp = probe; + } + + /* Fit the rate found to the nearest supported rate. */ + do { + if (rate_supported(sta, band, rinfo[tmp].index)) { + spinfo->txrate_idx = rinfo[tmp].index; + break; + } + if (adj < 0) + tmp--; + else + tmp++; + } while (tmp < n_bitrates && tmp >= 0); + +#ifdef CONFIG_MAC80211_DEBUGFS + rate_control_pid_event_rate_change(&spinfo->events, + spinfo->txrate_idx, + sband->bitrates[spinfo->txrate_idx].bitrate); +#endif +} + +/* Normalize the failed frames per-rate differences. */ +static void rate_control_pid_normalize(struct rc_pid_info *pinfo, int l) +{ + int i, norm_offset = pinfo->norm_offset; + struct rc_pid_rateinfo *r = pinfo->rinfo; + + if (r[0].diff > norm_offset) + r[0].diff -= norm_offset; + else if (r[0].diff < -norm_offset) + r[0].diff += norm_offset; + for (i = 0; i < l - 1; i++) + if (r[i + 1].diff > r[i].diff + norm_offset) + r[i + 1].diff -= norm_offset; + else if (r[i + 1].diff <= r[i].diff) + r[i + 1].diff += norm_offset; +} + +static void rate_control_pid_sample(struct rc_pid_info *pinfo, + struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, + struct rc_pid_sta_info *spinfo) +{ + struct rc_pid_rateinfo *rinfo = pinfo->rinfo; + u32 pf; + s32 err_avg; + u32 err_prop; + u32 err_int; + u32 err_der; + int adj, i, j, tmp; + unsigned long period; + + /* In case nothing happened during the previous control interval, turn + * the sharpening factor on. */ + period = msecs_to_jiffies(pinfo->sampling_period); + if (jiffies - spinfo->last_sample > 2 * period) + spinfo->sharp_cnt = pinfo->sharpen_duration; + + spinfo->last_sample = jiffies; + + /* This should never happen, but in case, we assume the old sample is + * still a good measurement and copy it. */ + if (unlikely(spinfo->tx_num_xmit == 0)) + pf = spinfo->last_pf; + else + pf = spinfo->tx_num_failed * 100 / spinfo->tx_num_xmit; + + spinfo->tx_num_xmit = 0; + spinfo->tx_num_failed = 0; + + /* If we just switched rate, update the rate behaviour info. */ + if (pinfo->oldrate != spinfo->txrate_idx) { + + i = rinfo[pinfo->oldrate].rev_index; + j = rinfo[spinfo->txrate_idx].rev_index; + + tmp = (pf - spinfo->last_pf); + tmp = RC_PID_DO_ARITH_RIGHT_SHIFT(tmp, RC_PID_ARITH_SHIFT); + + rinfo[j].diff = rinfo[i].diff + tmp; + pinfo->oldrate = spinfo->txrate_idx; + } + rate_control_pid_normalize(pinfo, sband->n_bitrates); + + /* Compute the proportional, integral and derivative errors. */ + err_prop = (pinfo->target - pf) << RC_PID_ARITH_SHIFT; + + err_avg = spinfo->err_avg_sc >> pinfo->smoothing_shift; + spinfo->err_avg_sc = spinfo->err_avg_sc - err_avg + err_prop; + err_int = spinfo->err_avg_sc >> pinfo->smoothing_shift; + + err_der = (pf - spinfo->last_pf) * + (1 + pinfo->sharpen_factor * spinfo->sharp_cnt); + spinfo->last_pf = pf; + if (spinfo->sharp_cnt) + spinfo->sharp_cnt--; + +#ifdef CONFIG_MAC80211_DEBUGFS + rate_control_pid_event_pf_sample(&spinfo->events, pf, err_prop, err_int, + err_der); +#endif + + /* Compute the controller output. */ + adj = (err_prop * pinfo->coeff_p + err_int * pinfo->coeff_i + + err_der * pinfo->coeff_d); + adj = RC_PID_DO_ARITH_RIGHT_SHIFT(adj, 2 * RC_PID_ARITH_SHIFT); + + /* Change rate. */ + if (adj) + rate_control_pid_adjust_rate(sband, sta, spinfo, adj, rinfo); +} + +static void rate_control_pid_tx_status(void *priv, struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta, + struct sk_buff *skb) +{ + struct rc_pid_info *pinfo = priv; + struct rc_pid_sta_info *spinfo = priv_sta; + unsigned long period; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + if (!spinfo) + return; + + /* Ignore all frames that were sent with a different rate than the rate + * we currently advise mac80211 to use. */ + if (info->status.rates[0].idx != spinfo->txrate_idx) + return; + + spinfo->tx_num_xmit++; + +#ifdef CONFIG_MAC80211_DEBUGFS + rate_control_pid_event_tx_status(&spinfo->events, info); +#endif + + /* We count frames that totally failed to be transmitted as two bad + * frames, those that made it out but had some retries as one good and + * one bad frame. */ + if (!(info->flags & IEEE80211_TX_STAT_ACK)) { + spinfo->tx_num_failed += 2; + spinfo->tx_num_xmit++; + } else if (info->status.rates[0].count > 1) { + spinfo->tx_num_failed++; + spinfo->tx_num_xmit++; + } + + /* Update PID controller state. */ + period = msecs_to_jiffies(pinfo->sampling_period); + if (time_after(jiffies, spinfo->last_sample + period)) + rate_control_pid_sample(pinfo, sband, sta, spinfo); +} + +static void +rate_control_pid_get_rate(void *priv, struct ieee80211_sta *sta, + void *priv_sta, + struct ieee80211_tx_rate_control *txrc) +{ + struct sk_buff *skb = txrc->skb; + struct ieee80211_supported_band *sband = txrc->sband; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct rc_pid_sta_info *spinfo = priv_sta; + int rateidx; + + if (txrc->rts) + info->control.rates[0].count = + txrc->hw->conf.long_frame_max_tx_count; + else + info->control.rates[0].count = + txrc->hw->conf.short_frame_max_tx_count; + + /* Send management frames and NO_ACK data using lowest rate. */ + if (rate_control_send_low(sta, priv_sta, txrc)) + return; + + rateidx = spinfo->txrate_idx; + + if (rateidx >= sband->n_bitrates) + rateidx = sband->n_bitrates - 1; + + info->control.rates[0].idx = rateidx; + +#ifdef CONFIG_MAC80211_DEBUGFS + rate_control_pid_event_tx_rate(&spinfo->events, + rateidx, sband->bitrates[rateidx].bitrate); +#endif +} + +static void +rate_control_pid_rate_init(void *priv, struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta) +{ + struct rc_pid_sta_info *spinfo = priv_sta; + struct rc_pid_info *pinfo = priv; + struct rc_pid_rateinfo *rinfo = pinfo->rinfo; + int i, j, tmp; + bool s; + + /* TODO: This routine should consider using RSSI from previous packets + * as we need to have IEEE 802.1X auth succeed immediately after assoc.. + * Until that method is implemented, we will use the lowest supported + * rate as a workaround. */ + + /* Sort the rates. This is optimized for the most common case (i.e. + * almost-sorted CCK+OFDM rates). Kind of bubble-sort with reversed + * mapping too. */ + for (i = 0; i < sband->n_bitrates; i++) { + rinfo[i].index = i; + rinfo[i].rev_index = i; + if (RC_PID_FAST_START) + rinfo[i].diff = 0; + else + rinfo[i].diff = i * pinfo->norm_offset; + } + for (i = 1; i < sband->n_bitrates; i++) { + s = 0; + for (j = 0; j < sband->n_bitrates - i; j++) + if (unlikely(sband->bitrates[rinfo[j].index].bitrate > + sband->bitrates[rinfo[j + 1].index].bitrate)) { + tmp = rinfo[j].index; + rinfo[j].index = rinfo[j + 1].index; + rinfo[j + 1].index = tmp; + rinfo[rinfo[j].index].rev_index = j; + rinfo[rinfo[j + 1].index].rev_index = j + 1; + s = 1; + } + if (!s) + break; + } + + spinfo->txrate_idx = rate_lowest_index(sband, sta); +} + +static void *rate_control_pid_alloc(struct ieee80211_hw *hw, + struct dentry *debugfsdir) +{ + struct rc_pid_info *pinfo; + struct rc_pid_rateinfo *rinfo; + struct ieee80211_supported_band *sband; + int i, max_rates = 0; +#ifdef CONFIG_MAC80211_DEBUGFS + struct rc_pid_debugfs_entries *de; +#endif + + pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC); + if (!pinfo) + return NULL; + + for (i = 0; i < IEEE80211_NUM_BANDS; i++) { + sband = hw->wiphy->bands[i]; + if (sband && sband->n_bitrates > max_rates) + max_rates = sband->n_bitrates; + } + + rinfo = kmalloc(sizeof(*rinfo) * max_rates, GFP_ATOMIC); + if (!rinfo) { + kfree(pinfo); + return NULL; + } + + pinfo->target = RC_PID_TARGET_PF; + pinfo->sampling_period = RC_PID_INTERVAL; + pinfo->coeff_p = RC_PID_COEFF_P; + pinfo->coeff_i = RC_PID_COEFF_I; + pinfo->coeff_d = RC_PID_COEFF_D; + pinfo->smoothing_shift = RC_PID_SMOOTHING_SHIFT; + pinfo->sharpen_factor = RC_PID_SHARPENING_FACTOR; + pinfo->sharpen_duration = RC_PID_SHARPENING_DURATION; + pinfo->norm_offset = RC_PID_NORM_OFFSET; + pinfo->rinfo = rinfo; + pinfo->oldrate = 0; + +#ifdef CONFIG_MAC80211_DEBUGFS + de = &pinfo->dentries; + de->target = debugfs_create_u32("target_pf", S_IRUSR | S_IWUSR, + debugfsdir, &pinfo->target); + de->sampling_period = debugfs_create_u32("sampling_period", + S_IRUSR | S_IWUSR, debugfsdir, + &pinfo->sampling_period); + de->coeff_p = debugfs_create_u32("coeff_p", S_IRUSR | S_IWUSR, + debugfsdir, (u32 *)&pinfo->coeff_p); + de->coeff_i = debugfs_create_u32("coeff_i", S_IRUSR | S_IWUSR, + debugfsdir, (u32 *)&pinfo->coeff_i); + de->coeff_d = debugfs_create_u32("coeff_d", S_IRUSR | S_IWUSR, + debugfsdir, (u32 *)&pinfo->coeff_d); + de->smoothing_shift = debugfs_create_u32("smoothing_shift", + S_IRUSR | S_IWUSR, debugfsdir, + &pinfo->smoothing_shift); + de->sharpen_factor = debugfs_create_u32("sharpen_factor", + S_IRUSR | S_IWUSR, debugfsdir, + &pinfo->sharpen_factor); + de->sharpen_duration = debugfs_create_u32("sharpen_duration", + S_IRUSR | S_IWUSR, debugfsdir, + &pinfo->sharpen_duration); + de->norm_offset = debugfs_create_u32("norm_offset", + S_IRUSR | S_IWUSR, debugfsdir, + &pinfo->norm_offset); +#endif + + return pinfo; +} + +static void rate_control_pid_free(void *priv) +{ + struct rc_pid_info *pinfo = priv; +#ifdef CONFIG_MAC80211_DEBUGFS + struct rc_pid_debugfs_entries *de = &pinfo->dentries; + + debugfs_remove(de->norm_offset); + debugfs_remove(de->sharpen_duration); + debugfs_remove(de->sharpen_factor); + debugfs_remove(de->smoothing_shift); + debugfs_remove(de->coeff_d); + debugfs_remove(de->coeff_i); + debugfs_remove(de->coeff_p); + debugfs_remove(de->sampling_period); + debugfs_remove(de->target); +#endif + + kfree(pinfo->rinfo); + kfree(pinfo); +} + +static void *rate_control_pid_alloc_sta(void *priv, struct ieee80211_sta *sta, + gfp_t gfp) +{ + struct rc_pid_sta_info *spinfo; + + spinfo = kzalloc(sizeof(*spinfo), gfp); + if (spinfo == NULL) + return NULL; + + spinfo->last_sample = jiffies; + +#ifdef CONFIG_MAC80211_DEBUGFS + spin_lock_init(&spinfo->events.lock); + init_waitqueue_head(&spinfo->events.waitqueue); +#endif + + return spinfo; +} + +static void rate_control_pid_free_sta(void *priv, struct ieee80211_sta *sta, + void *priv_sta) +{ + kfree(priv_sta); +} + +static struct rate_control_ops mac80211_rcpid = { + .name = "pid", + .tx_status = rate_control_pid_tx_status, + .get_rate = rate_control_pid_get_rate, + .rate_init = rate_control_pid_rate_init, + .alloc = rate_control_pid_alloc, + .free = rate_control_pid_free, + .alloc_sta = rate_control_pid_alloc_sta, + .free_sta = rate_control_pid_free_sta, +#ifdef CONFIG_MAC80211_DEBUGFS + .add_sta_debugfs = rate_control_pid_add_sta_debugfs, + .remove_sta_debugfs = rate_control_pid_remove_sta_debugfs, +#endif +}; + +int __init rc80211_pid_init(void) +{ + return ieee80211_rate_control_register(&mac80211_rcpid); +} + +void rc80211_pid_exit(void) +{ + ieee80211_rate_control_unregister(&mac80211_rcpid); +} diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c new file mode 100644 index 0000000..4566705 --- /dev/null +++ b/net/mac80211/rc80211_pid_debugfs.c @@ -0,0 +1,225 @@ +/* + * Copyright 2007, Mattias Nissler + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include "rate.h" + +#include "rc80211_pid.h" + +static void rate_control_pid_event(struct rc_pid_event_buffer *buf, + enum rc_pid_event_type type, + union rc_pid_event_data *data) +{ + struct rc_pid_event *ev; + unsigned long status; + + spin_lock_irqsave(&buf->lock, status); + ev = &(buf->ring[buf->next_entry]); + buf->next_entry = (buf->next_entry + 1) % RC_PID_EVENT_RING_SIZE; + + ev->timestamp = jiffies; + ev->id = buf->ev_count++; + ev->type = type; + ev->data = *data; + + spin_unlock_irqrestore(&buf->lock, status); + + wake_up_all(&buf->waitqueue); +} + +void rate_control_pid_event_tx_status(struct rc_pid_event_buffer *buf, + struct ieee80211_tx_info *stat) +{ + union rc_pid_event_data evd; + + evd.flags = stat->flags; + memcpy(&evd.tx_status, stat, sizeof(struct ieee80211_tx_info)); + rate_control_pid_event(buf, RC_PID_EVENT_TYPE_TX_STATUS, &evd); +} + +void rate_control_pid_event_rate_change(struct rc_pid_event_buffer *buf, + int index, int rate) +{ + union rc_pid_event_data evd; + + evd.index = index; + evd.rate = rate; + rate_control_pid_event(buf, RC_PID_EVENT_TYPE_RATE_CHANGE, &evd); +} + +void rate_control_pid_event_tx_rate(struct rc_pid_event_buffer *buf, + int index, int rate) +{ + union rc_pid_event_data evd; + + evd.index = index; + evd.rate = rate; + rate_control_pid_event(buf, RC_PID_EVENT_TYPE_TX_RATE, &evd); +} + +void rate_control_pid_event_pf_sample(struct rc_pid_event_buffer *buf, + s32 pf_sample, s32 prop_err, + s32 int_err, s32 der_err) +{ + union rc_pid_event_data evd; + + evd.pf_sample = pf_sample; + evd.prop_err = prop_err; + evd.int_err = int_err; + evd.der_err = der_err; + rate_control_pid_event(buf, RC_PID_EVENT_TYPE_PF_SAMPLE, &evd); +} + +static int rate_control_pid_events_open(struct inode *inode, struct file *file) +{ + struct rc_pid_sta_info *sinfo = inode->i_private; + struct rc_pid_event_buffer *events = &sinfo->events; + struct rc_pid_events_file_info *file_info; + unsigned long status; + + /* Allocate a state struct */ + file_info = kmalloc(sizeof(*file_info), GFP_KERNEL); + if (file_info == NULL) + return -ENOMEM; + + spin_lock_irqsave(&events->lock, status); + + file_info->next_entry = events->next_entry; + file_info->events = events; + + spin_unlock_irqrestore(&events->lock, status); + + file->private_data = file_info; + + return 0; +} + +static int rate_control_pid_events_release(struct inode *inode, + struct file *file) +{ + struct rc_pid_events_file_info *file_info = file->private_data; + + kfree(file_info); + + return 0; +} + +static unsigned int rate_control_pid_events_poll(struct file *file, + poll_table *wait) +{ + struct rc_pid_events_file_info *file_info = file->private_data; + + poll_wait(file, &file_info->events->waitqueue, wait); + + return POLLIN | POLLRDNORM; +} + +#define RC_PID_PRINT_BUF_SIZE 64 + +static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf, + size_t length, loff_t *offset) +{ + struct rc_pid_events_file_info *file_info = file->private_data; + struct rc_pid_event_buffer *events = file_info->events; + struct rc_pid_event *ev; + char pb[RC_PID_PRINT_BUF_SIZE]; + int ret; + int p; + unsigned long status; + + /* Check if there is something to read. */ + if (events->next_entry == file_info->next_entry) { + if (file->f_flags & O_NONBLOCK) + return -EAGAIN; + + /* Wait */ + ret = wait_event_interruptible(events->waitqueue, + events->next_entry != file_info->next_entry); + + if (ret) + return ret; + } + + /* Write out one event per call. I don't care whether it's a little + * inefficient, this is debugging code anyway. */ + spin_lock_irqsave(&events->lock, status); + + /* Get an event */ + ev = &(events->ring[file_info->next_entry]); + file_info->next_entry = (file_info->next_entry + 1) % + RC_PID_EVENT_RING_SIZE; + + /* Print information about the event. Note that userpace needs to + * provide large enough buffers. */ + length = length < RC_PID_PRINT_BUF_SIZE ? + length : RC_PID_PRINT_BUF_SIZE; + p = snprintf(pb, length, "%u %lu ", ev->id, ev->timestamp); + switch (ev->type) { + case RC_PID_EVENT_TYPE_TX_STATUS: + p += snprintf(pb + p, length - p, "tx_status %u %u", + !(ev->data.flags & IEEE80211_TX_STAT_ACK), + ev->data.tx_status.status.rates[0].idx); + break; + case RC_PID_EVENT_TYPE_RATE_CHANGE: + p += snprintf(pb + p, length - p, "rate_change %d %d", + ev->data.index, ev->data.rate); + break; + case RC_PID_EVENT_TYPE_TX_RATE: + p += snprintf(pb + p, length - p, "tx_rate %d %d", + ev->data.index, ev->data.rate); + break; + case RC_PID_EVENT_TYPE_PF_SAMPLE: + p += snprintf(pb + p, length - p, + "pf_sample %d %d %d %d", + ev->data.pf_sample, ev->data.prop_err, + ev->data.int_err, ev->data.der_err); + break; + } + p += snprintf(pb + p, length - p, "\n"); + + spin_unlock_irqrestore(&events->lock, status); + + if (copy_to_user(buf, pb, p)) + return -EFAULT; + + return p; +} + +#undef RC_PID_PRINT_BUF_SIZE + +static const struct file_operations rc_pid_fop_events = { + .owner = THIS_MODULE, + .read = rate_control_pid_events_read, + .poll = rate_control_pid_events_poll, + .open = rate_control_pid_events_open, + .release = rate_control_pid_events_release, +}; + +void rate_control_pid_add_sta_debugfs(void *priv, void *priv_sta, + struct dentry *dir) +{ + struct rc_pid_sta_info *spinfo = priv_sta; + + spinfo->events_entry = debugfs_create_file("rc_pid_events", S_IRUGO, + dir, spinfo, + &rc_pid_fop_events); +} + +void rate_control_pid_remove_sta_debugfs(void *priv, void *priv_sta) +{ + struct rc_pid_sta_info *spinfo = priv_sta; + + debugfs_remove(spinfo->events_entry); +} diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c new file mode 100644 index 0000000..0e034a0 --- /dev/null +++ b/net/mac80211/rx.c @@ -0,0 +1,2587 @@ +/* + * Copyright 2002-2005, Instant802 Networks, Inc. + * Copyright 2005-2006, Devicescape Software, Inc. + * Copyright 2006-2007 Jiri Benc + * Copyright 2007-2010 Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ieee80211_i.h" +#include "driver-ops.h" +#include "led.h" +#include "mesh.h" +#include "wep.h" +#include "wpa.h" +#include "tkip.h" +#include "wme.h" + +/* + * monitor mode reception + * + * This function cleans up the SKB, i.e. it removes all the stuff + * only useful for monitoring. + */ +static struct sk_buff *remove_monitor_info(struct ieee80211_local *local, + struct sk_buff *skb) +{ + if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) { + if (likely(skb->len > FCS_LEN)) + skb_trim(skb, skb->len - FCS_LEN); + else { + /* driver bug */ + WARN_ON(1); + dev_kfree_skb(skb); + skb = NULL; + } + } + + return skb; +} + +static inline int should_drop_frame(struct sk_buff *skb, + int present_fcs_len) +{ + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + + if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) + return 1; + if (unlikely(skb->len < 16 + present_fcs_len)) + return 1; + if (ieee80211_is_ctl(hdr->frame_control) && + !ieee80211_is_pspoll(hdr->frame_control) && + !ieee80211_is_back_req(hdr->frame_control)) + return 1; + return 0; +} + +static int +ieee80211_rx_radiotap_len(struct ieee80211_local *local, + struct ieee80211_rx_status *status) +{ + int len; + + /* always present fields */ + len = sizeof(struct ieee80211_radiotap_header) + 9; + + if (status->flag & RX_FLAG_TSFT) + len += 8; + if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) + len += 1; + if (local->hw.flags & IEEE80211_HW_NOISE_DBM) + len += 1; + + if (len & 1) /* padding for RX_FLAGS if necessary */ + len++; + + return len; +} + +/* + * ieee80211_add_rx_radiotap_header - add radiotap header + * + * add a radiotap header containing all the fields which the hardware provided. + */ +static void +ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, + struct sk_buff *skb, + struct ieee80211_rate *rate, + int rtap_len) +{ + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_radiotap_header *rthdr; + unsigned char *pos; + u16 rx_flags = 0; + + rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len); + memset(rthdr, 0, rtap_len); + + /* radiotap header, set always present flags */ + rthdr->it_present = + cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | + (1 << IEEE80211_RADIOTAP_CHANNEL) | + (1 << IEEE80211_RADIOTAP_ANTENNA) | + (1 << IEEE80211_RADIOTAP_RX_FLAGS)); + rthdr->it_len = cpu_to_le16(rtap_len); + + pos = (unsigned char *)(rthdr+1); + + /* the order of the following fields is important */ + + /* IEEE80211_RADIOTAP_TSFT */ + if (status->flag & RX_FLAG_TSFT) { + put_unaligned_le64(status->mactime, pos); + rthdr->it_present |= + cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT); + pos += 8; + } + + /* IEEE80211_RADIOTAP_FLAGS */ + if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) + *pos |= IEEE80211_RADIOTAP_F_FCS; + if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) + *pos |= IEEE80211_RADIOTAP_F_BADFCS; + if (status->flag & RX_FLAG_SHORTPRE) + *pos |= IEEE80211_RADIOTAP_F_SHORTPRE; + pos++; + + /* IEEE80211_RADIOTAP_RATE */ + if (status->flag & RX_FLAG_HT) { + /* + * TODO: add following information into radiotap header once + * suitable fields are defined for it: + * - MCS index (status->rate_idx) + * - HT40 (status->flag & RX_FLAG_40MHZ) + * - short-GI (status->flag & RX_FLAG_SHORT_GI) + */ + *pos = 0; + } else { + rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE); + *pos = rate->bitrate / 5; + } + pos++; + + /* IEEE80211_RADIOTAP_CHANNEL */ + put_unaligned_le16(status->freq, pos); + pos += 2; + if (status->band == IEEE80211_BAND_5GHZ) + put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ, + pos); + else if (status->flag & RX_FLAG_HT) + put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ, + pos); + else if (rate->flags & IEEE80211_RATE_ERP_G) + put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ, + pos); + else + put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ, + pos); + pos += 2; + + /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */ + if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) { + *pos = status->signal; + rthdr->it_present |= + cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL); + pos++; + } + + /* IEEE80211_RADIOTAP_DBM_ANTNOISE */ + if (local->hw.flags & IEEE80211_HW_NOISE_DBM) { + *pos = status->noise; + rthdr->it_present |= + cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE); + pos++; + } + + /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */ + + /* IEEE80211_RADIOTAP_ANTENNA */ + *pos = status->antenna; + pos++; + + /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */ + + /* IEEE80211_RADIOTAP_RX_FLAGS */ + /* ensure 2 byte alignment for the 2 byte field as required */ + if ((pos - (u8 *)rthdr) & 1) + pos++; + if (status->flag & RX_FLAG_FAILED_PLCP_CRC) + rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP; + put_unaligned_le16(rx_flags, pos); + pos += 2; +} + +/* + * This function copies a received frame to all monitor interfaces and + * returns a cleaned-up SKB that no longer includes the FCS nor the + * radiotap header the driver might have added. + */ +static struct sk_buff * +ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, + struct ieee80211_rate *rate) +{ + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb); + struct ieee80211_sub_if_data *sdata; + int needed_headroom = 0; + struct sk_buff *skb, *skb2; + struct net_device *prev_dev = NULL; + int present_fcs_len = 0; + + /* + * First, we may need to make a copy of the skb because + * (1) we need to modify it for radiotap (if not present), and + * (2) the other RX handlers will modify the skb we got. + * + * We don't need to, of course, if we aren't going to return + * the SKB because it has a bad FCS/PLCP checksum. + */ + + /* room for the radiotap header based on driver features */ + needed_headroom = ieee80211_rx_radiotap_len(local, status); + + if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) + present_fcs_len = FCS_LEN; + + if (!local->monitors) { + if (should_drop_frame(origskb, present_fcs_len)) { + dev_kfree_skb(origskb); + return NULL; + } + + return remove_monitor_info(local, origskb); + } + + if (should_drop_frame(origskb, present_fcs_len)) { + /* only need to expand headroom if necessary */ + skb = origskb; + origskb = NULL; + + /* + * This shouldn't trigger often because most devices have an + * RX header they pull before we get here, and that should + * be big enough for our radiotap information. We should + * probably export the length to drivers so that we can have + * them allocate enough headroom to start with. + */ + if (skb_headroom(skb) < needed_headroom && + pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) { + dev_kfree_skb(skb); + return NULL; + } + } else { + /* + * Need to make a copy and possibly remove radiotap header + * and FCS from the original. + */ + skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC); + + origskb = remove_monitor_info(local, origskb); + + if (!skb) + return origskb; + } + + /* prepend radiotap information */ + ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom); + + skb_reset_mac_header(skb); + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->pkt_type = PACKET_OTHERHOST; + skb->protocol = htons(ETH_P_802_2); + + list_for_each_entry_rcu(sdata, &local->interfaces, list) { + if (sdata->vif.type != NL80211_IFTYPE_MONITOR) + continue; + + if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) + continue; + + if (!ieee80211_sdata_running(sdata)) + continue; + + if (prev_dev) { + skb2 = skb_clone(skb, GFP_ATOMIC); + if (skb2) { + skb2->dev = prev_dev; + netif_rx(skb2); + } + } + + prev_dev = sdata->dev; + sdata->dev->stats.rx_packets++; + sdata->dev->stats.rx_bytes += skb->len; + } + + if (prev_dev) { + skb->dev = prev_dev; + netif_rx(skb); + } else + dev_kfree_skb(skb); + + return origskb; +} + + +static void ieee80211_parse_qos(struct ieee80211_rx_data *rx) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; + int tid; + + /* does the frame have a qos control field? */ + if (ieee80211_is_data_qos(hdr->frame_control)) { + u8 *qc = ieee80211_get_qos_ctl(hdr); + /* frame has qos control */ + tid = *qc & IEEE80211_QOS_CTL_TID_MASK; + if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT) + rx->flags |= IEEE80211_RX_AMSDU; + else + rx->flags &= ~IEEE80211_RX_AMSDU; + } else { + /* + * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"): + * + * Sequence numbers for management frames, QoS data + * frames with a broadcast/multicast address in the + * Address 1 field, and all non-QoS data frames sent + * by QoS STAs are assigned using an additional single + * modulo-4096 counter, [...] + * + * We also use that counter for non-QoS STAs. + */ + tid = NUM_RX_DATA_QUEUES - 1; + } + + rx->queue = tid; + /* Set skb->priority to 1d tag if highest order bit of TID is not set. + * For now, set skb->priority to 0 for other cases. */ + rx->skb->priority = (tid > 7) ? 0 : tid; +} + +/** + * DOC: Packet alignment + * + * Drivers always need to pass packets that are aligned to two-byte boundaries + * to the stack. + * + * Additionally, should, if possible, align the payload data in a way that + * guarantees that the contained IP header is aligned to a four-byte + * boundary. In the case of regular frames, this simply means aligning the + * payload to a four-byte boundary (because either the IP header is directly + * contained, or IV/RFC1042 headers that have a length divisible by four are + * in front of it). If the payload data is not properly aligned and the + * architecture doesn't support efficient unaligned operations, mac80211 + * will align the data. + * + * With A-MSDU frames, however, the payload data address must yield two modulo + * four because there are 14-byte 802.3 headers within the A-MSDU frames that + * push the IP header further back to a multiple of four again. Thankfully, the + * specs were sane enough this time around to require padding each A-MSDU + * subframe to a length that is a multiple of four. + * + * Padding like Atheros hardware adds which is inbetween the 802.11 header and + * the payload is not supported, the driver is required to move the 802.11 + * header to be directly in front of the payload in that case. + */ +static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx) +{ +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + WARN_ONCE((unsigned long)rx->skb->data & 1, + "unaligned packet at 0x%p\n", rx->skb->data); +#endif +} + + +/* rx handlers */ + +static ieee80211_rx_result debug_noinline +ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx) +{ + struct ieee80211_local *local = rx->local; + struct sk_buff *skb = rx->skb; + + if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning))) + return ieee80211_scan_rx(rx->sdata, skb); + + if (unlikely(test_bit(SCAN_SW_SCANNING, &local->scanning) && + (rx->flags & IEEE80211_RX_IN_SCAN))) { + /* drop all the other packets during a software scan anyway */ + if (ieee80211_scan_rx(rx->sdata, skb) != RX_QUEUED) + dev_kfree_skb(skb); + return RX_QUEUED; + } + + if (unlikely(rx->flags & IEEE80211_RX_IN_SCAN)) { + /* scanning finished during invoking of handlers */ + I802_DEBUG_INC(local->rx_handlers_drop_passive_scan); + return RX_DROP_UNUSABLE; + } + + return RX_CONTINUE; +} + + +static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + + if (skb->len < 24 || is_multicast_ether_addr(hdr->addr1)) + return 0; + + return ieee80211_is_robust_mgmt_frame(hdr); +} + + +static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + + if (skb->len < 24 || !is_multicast_ether_addr(hdr->addr1)) + return 0; + + return ieee80211_is_robust_mgmt_frame(hdr); +} + + +/* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */ +static int ieee80211_get_mmie_keyidx(struct sk_buff *skb) +{ + struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data; + struct ieee80211_mmie *mmie; + + if (skb->len < 24 + sizeof(*mmie) || + !is_multicast_ether_addr(hdr->da)) + return -1; + + if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr)) + return -1; /* not a robust management frame */ + + mmie = (struct ieee80211_mmie *) + (skb->data + skb->len - sizeof(*mmie)); + if (mmie->element_id != WLAN_EID_MMIE || + mmie->length != sizeof(*mmie) - 2) + return -1; + + return le16_to_cpu(mmie->key_id); +} + + +static ieee80211_rx_result +ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; + unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); + char *dev_addr = rx->sdata->vif.addr; + + if (ieee80211_is_data(hdr->frame_control)) { + if (is_multicast_ether_addr(hdr->addr1)) { + if (ieee80211_has_tods(hdr->frame_control) || + !ieee80211_has_fromds(hdr->frame_control)) + return RX_DROP_MONITOR; + if (memcmp(hdr->addr3, dev_addr, ETH_ALEN) == 0) + return RX_DROP_MONITOR; + } else { + if (!ieee80211_has_a4(hdr->frame_control)) + return RX_DROP_MONITOR; + if (memcmp(hdr->addr4, dev_addr, ETH_ALEN) == 0) + return RX_DROP_MONITOR; + } + } + + /* If there is not an established peer link and this is not a peer link + * establisment frame, beacon or probe, drop the frame. + */ + + if (!rx->sta || sta_plink_state(rx->sta) != PLINK_ESTAB) { + struct ieee80211_mgmt *mgmt; + + if (!ieee80211_is_mgmt(hdr->frame_control)) + return RX_DROP_MONITOR; + + if (ieee80211_is_action(hdr->frame_control)) { + mgmt = (struct ieee80211_mgmt *)hdr; + if (mgmt->u.action.category != MESH_PLINK_CATEGORY) + return RX_DROP_MONITOR; + return RX_CONTINUE; + } + + if (ieee80211_is_probe_req(hdr->frame_control) || + ieee80211_is_probe_resp(hdr->frame_control) || + ieee80211_is_beacon(hdr->frame_control)) + return RX_CONTINUE; + + return RX_DROP_MONITOR; + + } + +#define msh_h_get(h, l) ((struct ieee80211s_hdr *) ((u8 *)h + l)) + + if (ieee80211_is_data(hdr->frame_control) && + is_multicast_ether_addr(hdr->addr1) && + mesh_rmc_check(hdr->addr3, msh_h_get(hdr, hdrlen), rx->sdata)) + return RX_DROP_MONITOR; +#undef msh_h_get + + return RX_CONTINUE; +} + +#define SEQ_MODULO 0x1000 +#define SEQ_MASK 0xfff + +static inline int seq_less(u16 sq1, u16 sq2) +{ + return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1); +} + +static inline u16 seq_inc(u16 sq) +{ + return (sq + 1) & SEQ_MASK; +} + +static inline u16 seq_sub(u16 sq1, u16 sq2) +{ + return (sq1 - sq2) & SEQ_MASK; +} + + +static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw, + struct tid_ampdu_rx *tid_agg_rx, + int index, + struct sk_buff_head *frames) +{ + struct ieee80211_supported_band *sband; + struct ieee80211_rate *rate = NULL; + struct sk_buff *skb = tid_agg_rx->reorder_buf[index]; + struct ieee80211_rx_status *status; + + if (!skb) + goto no_frame; + + status = IEEE80211_SKB_RXCB(skb); + + /* release the reordered frames to stack */ + sband = hw->wiphy->bands[status->band]; + if (!(status->flag & RX_FLAG_HT)) + rate = &sband->bitrates[status->rate_idx]; + tid_agg_rx->stored_mpdu_num--; + tid_agg_rx->reorder_buf[index] = NULL; + __skb_queue_tail(frames, skb); + +no_frame: + tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); +} + +static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw, + struct tid_ampdu_rx *tid_agg_rx, + u16 head_seq_num, + struct sk_buff_head *frames) +{ + int index; + + while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) { + index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % + tid_agg_rx->buf_size; + ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames); + } +} + +/* + * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If + * the skb was added to the buffer longer than this time ago, the earlier + * frames that have not yet been received are assumed to be lost and the skb + * can be released for processing. This may also release other skb's from the + * reorder buffer if there are no additional gaps between the frames. + */ +#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) + +/* + * As this function belongs to the RX path it must be under + * rcu_read_lock protection. It returns false if the frame + * can be processed immediately, true if it was consumed. + */ +static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, + struct tid_ampdu_rx *tid_agg_rx, + struct sk_buff *skb, + struct sk_buff_head *frames) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + u16 sc = le16_to_cpu(hdr->seq_ctrl); + u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; + u16 head_seq_num, buf_size; + int index; + + buf_size = tid_agg_rx->buf_size; + head_seq_num = tid_agg_rx->head_seq_num; + + /* frame with out of date sequence number */ + if (seq_less(mpdu_seq_num, head_seq_num)) { + dev_kfree_skb(skb); + return true; + } + + /* + * If frame the sequence number exceeds our buffering window + * size release some previous frames to make room for this one. + */ + if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) { + head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size)); + /* release stored frames up to new head to stack */ + ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num, + frames); + } + + /* Now the new frame is always in the range of the reordering buffer */ + + index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn) % tid_agg_rx->buf_size; + + /* check if we already stored this frame */ + if (tid_agg_rx->reorder_buf[index]) { + dev_kfree_skb(skb); + return true; + } + + /* + * If the current MPDU is in the right order and nothing else + * is stored we can process it directly, no need to buffer it. + */ + if (mpdu_seq_num == tid_agg_rx->head_seq_num && + tid_agg_rx->stored_mpdu_num == 0) { + tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); + return false; + } + + /* put the frame in the reordering buffer */ + tid_agg_rx->reorder_buf[index] = skb; + tid_agg_rx->reorder_time[index] = jiffies; + tid_agg_rx->stored_mpdu_num++; + /* release the buffer until next missing frame */ + index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % + tid_agg_rx->buf_size; + if (!tid_agg_rx->reorder_buf[index] && + tid_agg_rx->stored_mpdu_num > 1) { + /* + * No buffers ready to be released, but check whether any + * frames in the reorder buffer have timed out. + */ + int j; + int skipped = 1; + for (j = (index + 1) % tid_agg_rx->buf_size; j != index; + j = (j + 1) % tid_agg_rx->buf_size) { + if (!tid_agg_rx->reorder_buf[j]) { + skipped++; + continue; + } + if (!time_after(jiffies, tid_agg_rx->reorder_time[j] + + HT_RX_REORDER_BUF_TIMEOUT)) + break; + +#ifdef CONFIG_MAC80211_HT_DEBUG + if (net_ratelimit()) + printk(KERN_DEBUG "%s: release an RX reorder " + "frame due to timeout on earlier " + "frames\n", + wiphy_name(hw->wiphy)); +#endif + ieee80211_release_reorder_frame(hw, tid_agg_rx, + j, frames); + + /* + * Increment the head seq# also for the skipped slots. + */ + tid_agg_rx->head_seq_num = + (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK; + skipped = 0; + } + } else while (tid_agg_rx->reorder_buf[index]) { + ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames); + index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % + tid_agg_rx->buf_size; + } + + return true; +} + +/* + * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns + * true if the MPDU was buffered, false if it should be processed. + */ +static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, + struct sk_buff_head *frames) +{ + struct sk_buff *skb = rx->skb; + struct ieee80211_local *local = rx->local; + struct ieee80211_hw *hw = &local->hw; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct sta_info *sta = rx->sta; + struct tid_ampdu_rx *tid_agg_rx; + u16 sc; + int tid; + + if (!ieee80211_is_data_qos(hdr->frame_control)) + goto dont_reorder; + + /* + * filter the QoS data rx stream according to + * STA/TID and check if this STA/TID is on aggregation + */ + + if (!sta) + goto dont_reorder; + + tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; + + if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL) + goto dont_reorder; + + tid_agg_rx = sta->ampdu_mlme.tid_rx[tid]; + + /* qos null data frames are excluded */ + if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) + goto dont_reorder; + + /* new, potentially un-ordered, ampdu frame - process it */ + + /* reset session timer */ + if (tid_agg_rx->timeout) + mod_timer(&tid_agg_rx->session_timer, + TU_TO_EXP_TIME(tid_agg_rx->timeout)); + + /* if this mpdu is fragmented - terminate rx aggregation session */ + sc = le16_to_cpu(hdr->seq_ctrl); + if (sc & IEEE80211_SCTL_FRAG) { + ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr, + tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP); + dev_kfree_skb(skb); + return; + } + + if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames)) + return; + + dont_reorder: + __skb_queue_tail(frames, skb); +} + +static ieee80211_rx_result debug_noinline +ieee80211_rx_h_check(struct ieee80211_rx_data *rx) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; + + /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */ + if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) { + if (unlikely(ieee80211_has_retry(hdr->frame_control) && + rx->sta->last_seq_ctrl[rx->queue] == + hdr->seq_ctrl)) { + if (rx->flags & IEEE80211_RX_RA_MATCH) { + rx->local->dot11FrameDuplicateCount++; + rx->sta->num_duplicates++; + } + return RX_DROP_MONITOR; + } else + rx->sta->last_seq_ctrl[rx->queue] = hdr->seq_ctrl; + } + + if (unlikely(rx->skb->len < 16)) { + I802_DEBUG_INC(rx->local->rx_handlers_drop_short); + return RX_DROP_MONITOR; + } + + /* Drop disallowed frame classes based on STA auth/assoc state; + * IEEE 802.11, Chap 5.5. + * + * mac80211 filters only based on association state, i.e. it drops + * Class 3 frames from not associated stations. hostapd sends + * deauth/disassoc frames when needed. In addition, hostapd is + * responsible for filtering on both auth and assoc states. + */ + + if (ieee80211_vif_is_mesh(&rx->sdata->vif)) + return ieee80211_rx_mesh_check(rx); + + if (unlikely((ieee80211_is_data(hdr->frame_control) || + ieee80211_is_pspoll(hdr->frame_control)) && + rx->sdata->vif.type != NL80211_IFTYPE_ADHOC && + (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) { + if ((!ieee80211_has_fromds(hdr->frame_control) && + !ieee80211_has_tods(hdr->frame_control) && + ieee80211_is_data(hdr->frame_control)) || + !(rx->flags & IEEE80211_RX_RA_MATCH)) { + /* Drop IBSS frames and frames for other hosts + * silently. */ + return RX_DROP_MONITOR; + } + + return RX_DROP_MONITOR; + } + + return RX_CONTINUE; +} + + +static ieee80211_rx_result debug_noinline +ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) +{ + struct sk_buff *skb = rx->skb; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + int keyidx; + int hdrlen; + ieee80211_rx_result result = RX_DROP_UNUSABLE; + struct ieee80211_key *stakey = NULL; + int mmie_keyidx = -1; + + /* + * Key selection 101 + * + * There are four types of keys: + * - GTK (group keys) + * - IGTK (group keys for management frames) + * - PTK (pairwise keys) + * - STK (station-to-station pairwise keys) + * + * When selecting a key, we have to distinguish between multicast + * (including broadcast) and unicast frames, the latter can only + * use PTKs and STKs while the former always use GTKs and IGTKs. + * Unless, of course, actual WEP keys ("pre-RSNA") are used, then + * unicast frames can also use key indices like GTKs. Hence, if we + * don't have a PTK/STK we check the key index for a WEP key. + * + * Note that in a regular BSS, multicast frames are sent by the + * AP only, associated stations unicast the frame to the AP first + * which then multicasts it on their behalf. + * + * There is also a slight problem in IBSS mode: GTKs are negotiated + * with each station, that is something we don't currently handle. + * The spec seems to expect that one negotiates the same key with + * every station but there's no such requirement; VLANs could be + * possible. + */ + + /* + * No point in finding a key and decrypting if the frame is neither + * addressed to us nor a multicast frame. + */ + if (!(rx->flags & IEEE80211_RX_RA_MATCH)) + return RX_CONTINUE; + + /* start without a key */ + rx->key = NULL; + + if (rx->sta) + stakey = rcu_dereference(rx->sta->key); + + if (!ieee80211_has_protected(hdr->frame_control)) + mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb); + + if (!is_multicast_ether_addr(hdr->addr1) && stakey) { + rx->key = stakey; + /* Skip decryption if the frame is not protected. */ + if (!ieee80211_has_protected(hdr->frame_control)) + return RX_CONTINUE; + } else if (mmie_keyidx >= 0) { + /* Broadcast/multicast robust management frame / BIP */ + if ((status->flag & RX_FLAG_DECRYPTED) && + (status->flag & RX_FLAG_IV_STRIPPED)) + return RX_CONTINUE; + + if (mmie_keyidx < NUM_DEFAULT_KEYS || + mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) + return RX_DROP_MONITOR; /* unexpected BIP keyidx */ + rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]); + } else if (!ieee80211_has_protected(hdr->frame_control)) { + /* + * The frame was not protected, so skip decryption. However, we + * need to set rx->key if there is a key that could have been + * used so that the frame may be dropped if encryption would + * have been expected. + */ + struct ieee80211_key *key = NULL; + if (ieee80211_is_mgmt(hdr->frame_control) && + is_multicast_ether_addr(hdr->addr1) && + (key = rcu_dereference(rx->sdata->default_mgmt_key))) + rx->key = key; + else if ((key = rcu_dereference(rx->sdata->default_key))) + rx->key = key; + return RX_CONTINUE; + } else { + /* + * The device doesn't give us the IV so we won't be + * able to look up the key. That's ok though, we + * don't need to decrypt the frame, we just won't + * be able to keep statistics accurate. + * Except for key threshold notifications, should + * we somehow allow the driver to tell us which key + * the hardware used if this flag is set? + */ + if ((status->flag & RX_FLAG_DECRYPTED) && + (status->flag & RX_FLAG_IV_STRIPPED)) + return RX_CONTINUE; + + hdrlen = ieee80211_hdrlen(hdr->frame_control); + + if (rx->skb->len < 8 + hdrlen) + return RX_DROP_UNUSABLE; /* TODO: count this? */ + + /* + * no need to call ieee80211_wep_get_keyidx, + * it verifies a bunch of things we've done already + */ + keyidx = rx->skb->data[hdrlen + 3] >> 6; + + rx->key = rcu_dereference(rx->sdata->keys[keyidx]); + + /* + * RSNA-protected unicast frames should always be sent with + * pairwise or station-to-station keys, but for WEP we allow + * using a key index as well. + */ + if (rx->key && rx->key->conf.alg != ALG_WEP && + !is_multicast_ether_addr(hdr->addr1)) + rx->key = NULL; + } + + if (rx->key) { + rx->key->tx_rx_count++; + /* TODO: add threshold stuff again */ + } else { + return RX_DROP_MONITOR; + } + + /* Check for weak IVs if possible */ + if (rx->sta && rx->key->conf.alg == ALG_WEP && + ieee80211_is_data(hdr->frame_control) && + (!(status->flag & RX_FLAG_IV_STRIPPED) || + !(status->flag & RX_FLAG_DECRYPTED)) && + ieee80211_wep_is_weak_iv(rx->skb, rx->key)) + rx->sta->wep_weak_iv_count++; + + switch (rx->key->conf.alg) { + case ALG_WEP: + result = ieee80211_crypto_wep_decrypt(rx); + break; + case ALG_TKIP: + result = ieee80211_crypto_tkip_decrypt(rx); + break; + case ALG_CCMP: + result = ieee80211_crypto_ccmp_decrypt(rx); + break; + case ALG_AES_CMAC: + result = ieee80211_crypto_aes_cmac_decrypt(rx); + break; + } + + /* either the frame has been decrypted or will be dropped */ + status->flag |= RX_FLAG_DECRYPTED; + + return result; +} + +static ieee80211_rx_result debug_noinline +ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx) +{ + struct ieee80211_local *local; + struct ieee80211_hdr *hdr; + struct sk_buff *skb; + + local = rx->local; + skb = rx->skb; + hdr = (struct ieee80211_hdr *) skb->data; + + if (!local->pspolling) + return RX_CONTINUE; + + if (!ieee80211_has_fromds(hdr->frame_control)) + /* this is not from AP */ + return RX_CONTINUE; + + if (!ieee80211_is_data(hdr->frame_control)) + return RX_CONTINUE; + + if (!ieee80211_has_moredata(hdr->frame_control)) { + /* AP has no more frames buffered for us */ + local->pspolling = false; + return RX_CONTINUE; + } + + /* more data bit is set, let's request a new frame from the AP */ + ieee80211_send_pspoll(local, rx->sdata); + + return RX_CONTINUE; +} + +static void ap_sta_ps_start(struct sta_info *sta) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct ieee80211_local *local = sdata->local; + + atomic_inc(&sdata->bss->num_sta_ps); + set_sta_flags(sta, WLAN_STA_PS_STA); + drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta); +#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG + printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n", + sdata->name, sta->sta.addr, sta->sta.aid); +#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ +} + +static void ap_sta_ps_end(struct sta_info *sta) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + + atomic_dec(&sdata->bss->num_sta_ps); + + clear_sta_flags(sta, WLAN_STA_PS_STA); + +#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG + printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n", + sdata->name, sta->sta.addr, sta->sta.aid); +#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ + + if (test_sta_flags(sta, WLAN_STA_PS_DRIVER)) { +#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG + printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n", + sdata->name, sta->sta.addr, sta->sta.aid); +#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ + return; + } + + ieee80211_sta_ps_deliver_wakeup(sta); +} + +static ieee80211_rx_result debug_noinline +ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) +{ + struct sta_info *sta = rx->sta; + struct sk_buff *skb = rx->skb; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + + if (!sta) + return RX_CONTINUE; + + /* + * Update last_rx only for IBSS packets which are for the current + * BSSID to avoid keeping the current IBSS network alive in cases + * where other STAs start using different BSSID. + */ + if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) { + u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, + NL80211_IFTYPE_ADHOC); + if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0) + sta->last_rx = jiffies; + } else if (!is_multicast_ether_addr(hdr->addr1)) { + /* + * Mesh beacons will update last_rx when if they are found to + * match the current local configuration when processed. + */ + sta->last_rx = jiffies; + } + + if (!(rx->flags & IEEE80211_RX_RA_MATCH)) + return RX_CONTINUE; + + if (rx->sdata->vif.type == NL80211_IFTYPE_STATION) + ieee80211_sta_rx_notify(rx->sdata, hdr); + + sta->rx_fragments++; + sta->rx_bytes += rx->skb->len; + sta->last_signal = status->signal; + sta->last_noise = status->noise; + + /* + * Change STA power saving mode only at the end of a frame + * exchange sequence. + */ + if (!ieee80211_has_morefrags(hdr->frame_control) && + (rx->sdata->vif.type == NL80211_IFTYPE_AP || + rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) { + if (test_sta_flags(sta, WLAN_STA_PS_STA)) { + /* + * Ignore doze->wake transitions that are + * indicated by non-data frames, the standard + * is unclear here, but for example going to + * PS mode and then scanning would cause a + * doze->wake transition for the probe request, + * and that is clearly undesirable. + */ + if (ieee80211_is_data(hdr->frame_control) && + !ieee80211_has_pm(hdr->frame_control)) + ap_sta_ps_end(sta); + } else { + if (ieee80211_has_pm(hdr->frame_control)) + ap_sta_ps_start(sta); + } + } + + /* + * Drop (qos-)data::nullfunc frames silently, since they + * are used only to control station power saving mode. + */ + if (ieee80211_is_nullfunc(hdr->frame_control) || + ieee80211_is_qos_nullfunc(hdr->frame_control)) { + I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); + + /* + * If we receive a 4-addr nullfunc frame from a STA + * that was not moved to a 4-addr STA vlan yet, drop + * the frame to the monitor interface, to make sure + * that hostapd sees it + */ + if (ieee80211_has_a4(hdr->frame_control) && + (rx->sdata->vif.type == NL80211_IFTYPE_AP || + (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && + !rx->sdata->u.vlan.sta))) + return RX_DROP_MONITOR; + /* + * Update counter and free packet here to avoid + * counting this as a dropped packed. + */ + sta->rx_packets++; + dev_kfree_skb(rx->skb); + return RX_QUEUED; + } + + return RX_CONTINUE; +} /* ieee80211_rx_h_sta_process */ + +static inline struct ieee80211_fragment_entry * +ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata, + unsigned int frag, unsigned int seq, int rx_queue, + struct sk_buff **skb) +{ + struct ieee80211_fragment_entry *entry; + int idx; + + idx = sdata->fragment_next; + entry = &sdata->fragments[sdata->fragment_next++]; + if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX) + sdata->fragment_next = 0; + + if (!skb_queue_empty(&entry->skb_list)) { +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + struct ieee80211_hdr *hdr = + (struct ieee80211_hdr *) entry->skb_list.next->data; + printk(KERN_DEBUG "%s: RX reassembly removed oldest " + "fragment entry (idx=%d age=%lu seq=%d last_frag=%d " + "addr1=%pM addr2=%pM\n", + sdata->name, idx, + jiffies - entry->first_frag_time, entry->seq, + entry->last_frag, hdr->addr1, hdr->addr2); +#endif + __skb_queue_purge(&entry->skb_list); + } + + __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */ + *skb = NULL; + entry->first_frag_time = jiffies; + entry->seq = seq; + entry->rx_queue = rx_queue; + entry->last_frag = frag; + entry->ccmp = 0; + entry->extra_len = 0; + + return entry; +} + +static inline struct ieee80211_fragment_entry * +ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, + unsigned int frag, unsigned int seq, + int rx_queue, struct ieee80211_hdr *hdr) +{ + struct ieee80211_fragment_entry *entry; + int i, idx; + + idx = sdata->fragment_next; + for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { + struct ieee80211_hdr *f_hdr; + + idx--; + if (idx < 0) + idx = IEEE80211_FRAGMENT_MAX - 1; + + entry = &sdata->fragments[idx]; + if (skb_queue_empty(&entry->skb_list) || entry->seq != seq || + entry->rx_queue != rx_queue || + entry->last_frag + 1 != frag) + continue; + + f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data; + + /* + * Check ftype and addresses are equal, else check next fragment + */ + if (((hdr->frame_control ^ f_hdr->frame_control) & + cpu_to_le16(IEEE80211_FCTL_FTYPE)) || + compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 || + compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0) + continue; + + if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) { + __skb_queue_purge(&entry->skb_list); + continue; + } + return entry; + } + + return NULL; +} + +static ieee80211_rx_result debug_noinline +ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) +{ + struct ieee80211_hdr *hdr; + u16 sc; + __le16 fc; + unsigned int frag, seq; + struct ieee80211_fragment_entry *entry; + struct sk_buff *skb; + + hdr = (struct ieee80211_hdr *)rx->skb->data; + fc = hdr->frame_control; + sc = le16_to_cpu(hdr->seq_ctrl); + frag = sc & IEEE80211_SCTL_FRAG; + + if (likely((!ieee80211_has_morefrags(fc) && frag == 0) || + (rx->skb)->len < 24 || + is_multicast_ether_addr(hdr->addr1))) { + /* not fragmented */ + goto out; + } + I802_DEBUG_INC(rx->local->rx_handlers_fragments); + + seq = (sc & IEEE80211_SCTL_SEQ) >> 4; + + if (frag == 0) { + /* This is the first fragment of a new frame. */ + entry = ieee80211_reassemble_add(rx->sdata, frag, seq, + rx->queue, &(rx->skb)); + if (rx->key && rx->key->conf.alg == ALG_CCMP && + ieee80211_has_protected(fc)) { + /* Store CCMP PN so that we can verify that the next + * fragment has a sequential PN value. */ + entry->ccmp = 1; + memcpy(entry->last_pn, + rx->key->u.ccmp.rx_pn[rx->queue], + CCMP_PN_LEN); + } + return RX_QUEUED; + } + + /* This is a fragment for a frame that should already be pending in + * fragment cache. Add this fragment to the end of the pending entry. + */ + entry = ieee80211_reassemble_find(rx->sdata, frag, seq, rx->queue, hdr); + if (!entry) { + I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); + return RX_DROP_MONITOR; + } + + /* Verify that MPDUs within one MSDU have sequential PN values. + * (IEEE 802.11i, 8.3.3.4.5) */ + if (entry->ccmp) { + int i; + u8 pn[CCMP_PN_LEN], *rpn; + if (!rx->key || rx->key->conf.alg != ALG_CCMP) + return RX_DROP_UNUSABLE; + memcpy(pn, entry->last_pn, CCMP_PN_LEN); + for (i = CCMP_PN_LEN - 1; i >= 0; i--) { + pn[i]++; + if (pn[i]) + break; + } + rpn = rx->key->u.ccmp.rx_pn[rx->queue]; + if (memcmp(pn, rpn, CCMP_PN_LEN)) + return RX_DROP_UNUSABLE; + memcpy(entry->last_pn, pn, CCMP_PN_LEN); + } + + skb_pull(rx->skb, ieee80211_hdrlen(fc)); + __skb_queue_tail(&entry->skb_list, rx->skb); + entry->last_frag = frag; + entry->extra_len += rx->skb->len; + if (ieee80211_has_morefrags(fc)) { + rx->skb = NULL; + return RX_QUEUED; + } + + rx->skb = __skb_dequeue(&entry->skb_list); + if (skb_tailroom(rx->skb) < entry->extra_len) { + I802_DEBUG_INC(rx->local->rx_expand_skb_head2); + if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len, + GFP_ATOMIC))) { + I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); + __skb_queue_purge(&entry->skb_list); + return RX_DROP_UNUSABLE; + } + } + while ((skb = __skb_dequeue(&entry->skb_list))) { + memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len); + dev_kfree_skb(skb); + } + + /* Complete frame has been reassembled - process it now */ + rx->flags |= IEEE80211_RX_FRAGMENTED; + + out: + if (rx->sta) + rx->sta->rx_packets++; + if (is_multicast_ether_addr(hdr->addr1)) + rx->local->dot11MulticastReceivedFrameCount++; + else + ieee80211_led_rx(rx->local); + return RX_CONTINUE; +} + +static ieee80211_rx_result debug_noinline +ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx) +{ + struct ieee80211_sub_if_data *sdata = rx->sdata; + __le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control; + + if (likely(!rx->sta || !ieee80211_is_pspoll(fc) || + !(rx->flags & IEEE80211_RX_RA_MATCH))) + return RX_CONTINUE; + + if ((sdata->vif.type != NL80211_IFTYPE_AP) && + (sdata->vif.type != NL80211_IFTYPE_AP_VLAN)) + return RX_DROP_UNUSABLE; + + if (!test_sta_flags(rx->sta, WLAN_STA_PS_DRIVER)) + ieee80211_sta_ps_deliver_poll_response(rx->sta); + else + set_sta_flags(rx->sta, WLAN_STA_PSPOLL); + + /* Free PS Poll skb here instead of returning RX_DROP that would + * count as an dropped frame. */ + dev_kfree_skb(rx->skb); + + return RX_QUEUED; +} + +static ieee80211_rx_result debug_noinline +ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx) +{ + u8 *data = rx->skb->data; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data; + + if (!ieee80211_is_data_qos(hdr->frame_control)) + return RX_CONTINUE; + + /* remove the qos control field, update frame type and meta-data */ + memmove(data + IEEE80211_QOS_CTL_LEN, data, + ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN); + hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN); + /* change frame type to non QOS */ + hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA); + + return RX_CONTINUE; +} + +static int +ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) +{ + if (unlikely(!rx->sta || + !test_sta_flags(rx->sta, WLAN_STA_AUTHORIZED))) + return -EACCES; + + return 0; +} + +static int +ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) +{ + struct sk_buff *skb = rx->skb; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + + /* + * Pass through unencrypted frames if the hardware has + * decrypted them already. + */ + if (status->flag & RX_FLAG_DECRYPTED) + return 0; + + /* Drop unencrypted frames if key is set. */ + if (unlikely(!ieee80211_has_protected(fc) && + !ieee80211_is_nullfunc(fc) && + ieee80211_is_data(fc) && + (rx->key || rx->sdata->drop_unencrypted))) + return -EACCES; + + return 0; +} + +static int +ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; + __le16 fc = hdr->frame_control; + int res; + + res = ieee80211_drop_unencrypted(rx, fc); + if (unlikely(res)) + return res; + + if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) { + if (unlikely(ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && + rx->key)) + return -EACCES; + /* BIP does not use Protected field, so need to check MMIE */ + if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) && + ieee80211_get_mmie_keyidx(rx->skb) < 0 && + rx->key)) + return -EACCES; + /* + * When using MFP, Action frames are not allowed prior to + * having configured keys. + */ + if (unlikely(ieee80211_is_action(fc) && !rx->key && + ieee80211_is_robust_mgmt_frame( + (struct ieee80211_hdr *) rx->skb->data))) + return -EACCES; + } + + return 0; +} + +static int +__ieee80211_data_to_8023(struct ieee80211_rx_data *rx) +{ + struct ieee80211_sub_if_data *sdata = rx->sdata; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; + + if (ieee80211_has_a4(hdr->frame_control) && + sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta) + return -1; + + if (is_multicast_ether_addr(hdr->addr1) && + ((sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) || + (sdata->vif.type == NL80211_IFTYPE_STATION && sdata->u.mgd.use_4addr))) + return -1; + + return ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type); +} + +/* + * requires that rx->skb is a frame with ethernet header + */ +static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc) +{ + static const u8 pae_group_addr[ETH_ALEN] __aligned(2) + = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 }; + struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; + + /* + * Allow EAPOL frames to us/the PAE group address regardless + * of whether the frame was encrypted or not. + */ + if (ehdr->h_proto == htons(ETH_P_PAE) && + (compare_ether_addr(ehdr->h_dest, rx->sdata->vif.addr) == 0 || + compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0)) + return true; + + if (ieee80211_802_1x_port_control(rx) || + ieee80211_drop_unencrypted(rx, fc)) + return false; + + return true; +} + +/* + * requires that rx->skb is a frame with ethernet header + */ +static void +ieee80211_deliver_skb(struct ieee80211_rx_data *rx) +{ + struct ieee80211_sub_if_data *sdata = rx->sdata; + struct net_device *dev = sdata->dev; + struct sk_buff *skb, *xmit_skb; + struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; + struct sta_info *dsta; + + skb = rx->skb; + xmit_skb = NULL; + + if ((sdata->vif.type == NL80211_IFTYPE_AP || + sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && + !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && + (rx->flags & IEEE80211_RX_RA_MATCH) && + (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) { + if (is_multicast_ether_addr(ehdr->h_dest)) { + /* + * send multicast frames both to higher layers in + * local net stack and back to the wireless medium + */ + xmit_skb = skb_copy(skb, GFP_ATOMIC); + if (!xmit_skb && net_ratelimit()) + printk(KERN_DEBUG "%s: failed to clone " + "multicast frame\n", dev->name); + } else { + dsta = sta_info_get(sdata, skb->data); + if (dsta) { + /* + * The destination station is associated to + * this AP (in this VLAN), so send the frame + * directly to it and do not pass it to local + * net stack. + */ + xmit_skb = skb; + skb = NULL; + } + } + } + + if (skb) { + int align __maybe_unused; + +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + /* + * 'align' will only take the values 0 or 2 here + * since all frames are required to be aligned + * to 2-byte boundaries when being passed to + * mac80211. That also explains the __skb_push() + * below. + */ + align = ((unsigned long)(skb->data + sizeof(struct ethhdr))) & 3; + if (align) { + if (WARN_ON(skb_headroom(skb) < 3)) { + dev_kfree_skb(skb); + skb = NULL; + } else { + u8 *data = skb->data; + size_t len = skb_headlen(skb); + skb->data -= align; + memmove(skb->data, data, len); + skb_set_tail_pointer(skb, len); + } + } +#endif + + if (skb) { + /* deliver to local stack */ + skb->protocol = eth_type_trans(skb, dev); + memset(skb->cb, 0, sizeof(skb->cb)); + netif_rx(skb); + } + } + + if (xmit_skb) { + /* send to wireless media */ + xmit_skb->protocol = htons(ETH_P_802_3); + skb_reset_network_header(xmit_skb); + skb_reset_mac_header(xmit_skb); + dev_queue_xmit(xmit_skb); + } +} + +static ieee80211_rx_result debug_noinline +ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) +{ + struct net_device *dev = rx->sdata->dev; + struct sk_buff *skb = rx->skb; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + __le16 fc = hdr->frame_control; + struct sk_buff_head frame_list; + + if (unlikely(!ieee80211_is_data(fc))) + return RX_CONTINUE; + + if (unlikely(!ieee80211_is_data_present(fc))) + return RX_DROP_MONITOR; + + if (!(rx->flags & IEEE80211_RX_AMSDU)) + return RX_CONTINUE; + + if (ieee80211_has_a4(hdr->frame_control) && + rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && + !rx->sdata->u.vlan.sta) + return RX_DROP_UNUSABLE; + + if (is_multicast_ether_addr(hdr->addr1) && + ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && + rx->sdata->u.vlan.sta) || + (rx->sdata->vif.type == NL80211_IFTYPE_STATION && + rx->sdata->u.mgd.use_4addr))) + return RX_DROP_UNUSABLE; + + skb->dev = dev; + __skb_queue_head_init(&frame_list); + + ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, + rx->sdata->vif.type, + rx->local->hw.extra_tx_headroom); + + while (!skb_queue_empty(&frame_list)) { + rx->skb = __skb_dequeue(&frame_list); + + if (!ieee80211_frame_allowed(rx, fc)) { + dev_kfree_skb(rx->skb); + continue; + } + dev->stats.rx_packets++; + dev->stats.rx_bytes += rx->skb->len; + + ieee80211_deliver_skb(rx); + } + + return RX_QUEUED; +} + +#ifdef CONFIG_MAC80211_MESH +static ieee80211_rx_result +ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) +{ + struct ieee80211_hdr *hdr; + struct ieee80211s_hdr *mesh_hdr; + unsigned int hdrlen; + struct sk_buff *skb = rx->skb, *fwd_skb; + struct ieee80211_local *local = rx->local; + struct ieee80211_sub_if_data *sdata = rx->sdata; + + hdr = (struct ieee80211_hdr *) skb->data; + hdrlen = ieee80211_hdrlen(hdr->frame_control); + mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); + + if (!ieee80211_is_data(hdr->frame_control)) + return RX_CONTINUE; + + if (!mesh_hdr->ttl) + /* illegal frame */ + return RX_DROP_MONITOR; + + if (mesh_hdr->flags & MESH_FLAGS_AE) { + struct mesh_path *mppath; + char *proxied_addr; + char *mpp_addr; + + if (is_multicast_ether_addr(hdr->addr1)) { + mpp_addr = hdr->addr3; + proxied_addr = mesh_hdr->eaddr1; + } else { + mpp_addr = hdr->addr4; + proxied_addr = mesh_hdr->eaddr2; + } + + rcu_read_lock(); + mppath = mpp_path_lookup(proxied_addr, sdata); + if (!mppath) { + mpp_path_add(proxied_addr, mpp_addr, sdata); + } else { + spin_lock_bh(&mppath->state_lock); + if (compare_ether_addr(mppath->mpp, mpp_addr) != 0) + memcpy(mppath->mpp, mpp_addr, ETH_ALEN); + spin_unlock_bh(&mppath->state_lock); + } + rcu_read_unlock(); + } + + /* Frame has reached destination. Don't forward */ + if (!is_multicast_ether_addr(hdr->addr1) && + compare_ether_addr(sdata->vif.addr, hdr->addr3) == 0) + return RX_CONTINUE; + + mesh_hdr->ttl--; + + if (rx->flags & IEEE80211_RX_RA_MATCH) { + if (!mesh_hdr->ttl) + IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh, + dropped_frames_ttl); + else { + struct ieee80211_hdr *fwd_hdr; + struct ieee80211_tx_info *info; + + fwd_skb = skb_copy(skb, GFP_ATOMIC); + + if (!fwd_skb && net_ratelimit()) + printk(KERN_DEBUG "%s: failed to clone mesh frame\n", + sdata->name); + + fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; + memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN); + info = IEEE80211_SKB_CB(fwd_skb); + memset(info, 0, sizeof(*info)); + info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; + info->control.vif = &rx->sdata->vif; + skb_set_queue_mapping(skb, + ieee80211_select_queue(rx->sdata, fwd_skb)); + ieee80211_set_qos_hdr(local, skb); + if (is_multicast_ether_addr(fwd_hdr->addr1)) + IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh, + fwded_mcast); + else { + int err; + /* + * Save TA to addr1 to send TA a path error if a + * suitable next hop is not found + */ + memcpy(fwd_hdr->addr1, fwd_hdr->addr2, + ETH_ALEN); + err = mesh_nexthop_lookup(fwd_skb, sdata); + /* Failed to immediately resolve next hop: + * fwded frame was dropped or will be added + * later to the pending skb queue. */ + if (err) + return RX_DROP_MONITOR; + + IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh, + fwded_unicast); + } + IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh, + fwded_frames); + ieee80211_add_pending_skb(local, fwd_skb); + } + } + + if (is_multicast_ether_addr(hdr->addr1) || + sdata->dev->flags & IFF_PROMISC) + return RX_CONTINUE; + else + return RX_DROP_MONITOR; +} +#endif + +static ieee80211_rx_result debug_noinline +ieee80211_rx_h_data(struct ieee80211_rx_data *rx) +{ + struct ieee80211_sub_if_data *sdata = rx->sdata; + struct ieee80211_local *local = rx->local; + struct net_device *dev = sdata->dev; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; + __le16 fc = hdr->frame_control; + int err; + + if (unlikely(!ieee80211_is_data(hdr->frame_control))) + return RX_CONTINUE; + + if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) + return RX_DROP_MONITOR; + + /* + * Allow the cooked monitor interface of an AP to see 4-addr frames so + * that a 4-addr station can be detected and moved into a separate VLAN + */ + if (ieee80211_has_a4(hdr->frame_control) && + sdata->vif.type == NL80211_IFTYPE_AP) + return RX_DROP_MONITOR; + + err = __ieee80211_data_to_8023(rx); + if (unlikely(err)) + return RX_DROP_UNUSABLE; + + if (!ieee80211_frame_allowed(rx, fc)) + return RX_DROP_MONITOR; + + rx->skb->dev = dev; + + dev->stats.rx_packets++; + dev->stats.rx_bytes += rx->skb->len; + + if (ieee80211_is_data(hdr->frame_control) && + !is_multicast_ether_addr(hdr->addr1) && + local->hw.conf.dynamic_ps_timeout > 0 && local->ps_sdata) { + mod_timer(&local->dynamic_ps_timer, jiffies + + msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); + } + + ieee80211_deliver_skb(rx); + + return RX_QUEUED; +} + +static ieee80211_rx_result debug_noinline +ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) +{ + struct ieee80211_local *local = rx->local; + struct ieee80211_hw *hw = &local->hw; + struct sk_buff *skb = rx->skb; + struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; + struct tid_ampdu_rx *tid_agg_rx; + u16 start_seq_num; + u16 tid; + + if (likely(!ieee80211_is_ctl(bar->frame_control))) + return RX_CONTINUE; + + if (ieee80211_is_back_req(bar->frame_control)) { + if (!rx->sta) + return RX_DROP_MONITOR; + tid = le16_to_cpu(bar->control) >> 12; + if (rx->sta->ampdu_mlme.tid_state_rx[tid] + != HT_AGG_STATE_OPERATIONAL) + return RX_DROP_MONITOR; + tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid]; + + start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4; + + /* reset session timer */ + if (tid_agg_rx->timeout) + mod_timer(&tid_agg_rx->session_timer, + TU_TO_EXP_TIME(tid_agg_rx->timeout)); + + /* release stored frames up to start of BAR */ + ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num, + frames); + kfree_skb(skb); + return RX_QUEUED; + } + + return RX_CONTINUE; +} + +static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, + size_t len) +{ + struct ieee80211_local *local = sdata->local; + struct sk_buff *skb; + struct ieee80211_mgmt *resp; + + if (compare_ether_addr(mgmt->da, sdata->vif.addr) != 0) { + /* Not to own unicast address */ + return; + } + + if (compare_ether_addr(mgmt->sa, sdata->u.mgd.bssid) != 0 || + compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid) != 0) { + /* Not from the current AP or not associated yet. */ + return; + } + + if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) { + /* Too short SA Query request frame */ + return; + } + + skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom); + if (skb == NULL) + return; + + skb_reserve(skb, local->hw.extra_tx_headroom); + resp = (struct ieee80211_mgmt *) skb_put(skb, 24); + memset(resp, 0, 24); + memcpy(resp->da, mgmt->sa, ETH_ALEN); + memcpy(resp->sa, sdata->vif.addr, ETH_ALEN); + memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN); + resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION); + skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query)); + resp->u.action.category = WLAN_CATEGORY_SA_QUERY; + resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE; + memcpy(resp->u.action.u.sa_query.trans_id, + mgmt->u.action.u.sa_query.trans_id, + WLAN_SA_QUERY_TR_ID_LEN); + + ieee80211_tx_skb(sdata, skb); +} + +static ieee80211_rx_result debug_noinline +ieee80211_rx_h_action(struct ieee80211_rx_data *rx) +{ + struct ieee80211_local *local = rx->local; + struct ieee80211_sub_if_data *sdata = rx->sdata; + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; + struct sk_buff *nskb; + struct ieee80211_rx_status *status; + int len = rx->skb->len; + + if (!ieee80211_is_action(mgmt->frame_control)) + return RX_CONTINUE; + + /* drop too small frames */ + if (len < IEEE80211_MIN_ACTION_SIZE) + return RX_DROP_UNUSABLE; + + if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) + return RX_DROP_UNUSABLE; + + if (!(rx->flags & IEEE80211_RX_RA_MATCH)) + return RX_DROP_UNUSABLE; + + if (ieee80211_drop_unencrypted_mgmt(rx)) + return RX_DROP_UNUSABLE; + + switch (mgmt->u.action.category) { + case WLAN_CATEGORY_BACK: + /* + * The aggregation code is not prepared to handle + * anything but STA/AP due to the BSSID handling; + * IBSS could work in the code but isn't supported + * by drivers or the standard. + */ + if (sdata->vif.type != NL80211_IFTYPE_STATION && + sdata->vif.type != NL80211_IFTYPE_AP_VLAN && + sdata->vif.type != NL80211_IFTYPE_AP) + break; + + /* verify action_code is present */ + if (len < IEEE80211_MIN_ACTION_SIZE + 1) + break; + + switch (mgmt->u.action.u.addba_req.action_code) { + case WLAN_ACTION_ADDBA_REQ: + if (len < (IEEE80211_MIN_ACTION_SIZE + + sizeof(mgmt->u.action.u.addba_req))) + return RX_DROP_MONITOR; + ieee80211_process_addba_request(local, rx->sta, mgmt, len); + goto handled; + case WLAN_ACTION_ADDBA_RESP: + if (len < (IEEE80211_MIN_ACTION_SIZE + + sizeof(mgmt->u.action.u.addba_resp))) + break; + ieee80211_process_addba_resp(local, rx->sta, mgmt, len); + goto handled; + case WLAN_ACTION_DELBA: + if (len < (IEEE80211_MIN_ACTION_SIZE + + sizeof(mgmt->u.action.u.delba))) + break; + ieee80211_process_delba(sdata, rx->sta, mgmt, len); + goto handled; + } + break; + case WLAN_CATEGORY_SPECTRUM_MGMT: + if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ) + break; + + if (sdata->vif.type != NL80211_IFTYPE_STATION) + break; + + /* verify action_code is present */ + if (len < IEEE80211_MIN_ACTION_SIZE + 1) + break; + + switch (mgmt->u.action.u.measurement.action_code) { + case WLAN_ACTION_SPCT_MSR_REQ: + if (len < (IEEE80211_MIN_ACTION_SIZE + + sizeof(mgmt->u.action.u.measurement))) + break; + ieee80211_process_measurement_req(sdata, mgmt, len); + goto handled; + case WLAN_ACTION_SPCT_CHL_SWITCH: + if (len < (IEEE80211_MIN_ACTION_SIZE + + sizeof(mgmt->u.action.u.chan_switch))) + break; + + if (sdata->vif.type != NL80211_IFTYPE_STATION) + break; + + if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN)) + break; + + return ieee80211_sta_rx_mgmt(sdata, rx->skb); + } + break; + case WLAN_CATEGORY_SA_QUERY: + if (len < (IEEE80211_MIN_ACTION_SIZE + + sizeof(mgmt->u.action.u.sa_query))) + break; + + switch (mgmt->u.action.u.sa_query.action) { + case WLAN_ACTION_SA_QUERY_REQUEST: + if (sdata->vif.type != NL80211_IFTYPE_STATION) + break; + ieee80211_process_sa_query_req(sdata, mgmt, len); + goto handled; + } + break; + } + + /* + * For AP mode, hostapd is responsible for handling any action + * frames that we didn't handle, including returning unknown + * ones. For all other modes we will return them to the sender, + * setting the 0x80 bit in the action category, as required by + * 802.11-2007 7.3.1.11. + */ + if (sdata->vif.type == NL80211_IFTYPE_AP || + sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + return RX_DROP_MONITOR; + + /* + * Getting here means the kernel doesn't know how to handle + * it, but maybe userspace does ... include returned frames + * so userspace can register for those to know whether ones + * it transmitted were processed or returned. + */ + status = IEEE80211_SKB_RXCB(rx->skb); + + if (sdata->vif.type == NL80211_IFTYPE_STATION && + cfg80211_rx_action(rx->sdata->dev, status->freq, + rx->skb->data, rx->skb->len, + GFP_ATOMIC)) + goto handled; + + /* do not return rejected action frames */ + if (mgmt->u.action.category & 0x80) + return RX_DROP_UNUSABLE; + + nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0, + GFP_ATOMIC); + if (nskb) { + struct ieee80211_mgmt *mgmt = (void *)nskb->data; + + mgmt->u.action.category |= 0x80; + memcpy(mgmt->da, mgmt->sa, ETH_ALEN); + memcpy(mgmt->sa, rx->sdata->vif.addr, ETH_ALEN); + + memset(nskb->cb, 0, sizeof(nskb->cb)); + + ieee80211_tx_skb(rx->sdata, nskb); + } + + handled: + if (rx->sta) + rx->sta->rx_packets++; + dev_kfree_skb(rx->skb); + return RX_QUEUED; +} + +static ieee80211_rx_result debug_noinline +ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) +{ + struct ieee80211_sub_if_data *sdata = rx->sdata; + ieee80211_rx_result rxs; + + if (!(rx->flags & IEEE80211_RX_RA_MATCH)) + return RX_DROP_MONITOR; + + if (ieee80211_drop_unencrypted_mgmt(rx)) + return RX_DROP_UNUSABLE; + + rxs = ieee80211_work_rx_mgmt(rx->sdata, rx->skb); + if (rxs != RX_CONTINUE) + return rxs; + + if (ieee80211_vif_is_mesh(&sdata->vif)) + return ieee80211_mesh_rx_mgmt(sdata, rx->skb); + + if (sdata->vif.type == NL80211_IFTYPE_ADHOC) + return ieee80211_ibss_rx_mgmt(sdata, rx->skb); + + if (sdata->vif.type == NL80211_IFTYPE_STATION) + return ieee80211_sta_rx_mgmt(sdata, rx->skb); + + return RX_DROP_MONITOR; +} + +static void ieee80211_rx_michael_mic_report(struct ieee80211_hdr *hdr, + struct ieee80211_rx_data *rx) +{ + int keyidx; + unsigned int hdrlen; + + hdrlen = ieee80211_hdrlen(hdr->frame_control); + if (rx->skb->len >= hdrlen + 4) + keyidx = rx->skb->data[hdrlen + 3] >> 6; + else + keyidx = -1; + + if (!rx->sta) { + /* + * Some hardware seem to generate incorrect Michael MIC + * reports; ignore them to avoid triggering countermeasures. + */ + return; + } + + if (!ieee80211_has_protected(hdr->frame_control)) + return; + + if (rx->sdata->vif.type == NL80211_IFTYPE_AP && keyidx) { + /* + * APs with pairwise keys should never receive Michael MIC + * errors for non-zero keyidx because these are reserved for + * group keys and only the AP is sending real multicast + * frames in the BSS. + */ + return; + } + + if (!ieee80211_is_data(hdr->frame_control) && + !ieee80211_is_auth(hdr->frame_control)) + return; + + mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr, NULL, + GFP_ATOMIC); +} + +/* TODO: use IEEE80211_RX_FRAGMENTED */ +static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, + struct ieee80211_rate *rate) +{ + struct ieee80211_sub_if_data *sdata; + struct ieee80211_local *local = rx->local; + struct ieee80211_rtap_hdr { + struct ieee80211_radiotap_header hdr; + u8 flags; + u8 rate_or_pad; + __le16 chan_freq; + __le16 chan_flags; + } __attribute__ ((packed)) *rthdr; + struct sk_buff *skb = rx->skb, *skb2; + struct net_device *prev_dev = NULL; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + + if (status->flag & RX_FLAG_INTERNAL_CMTR) + goto out_free_skb; + + if (skb_headroom(skb) < sizeof(*rthdr) && + pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC)) + goto out_free_skb; + + rthdr = (void *)skb_push(skb, sizeof(*rthdr)); + memset(rthdr, 0, sizeof(*rthdr)); + rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr)); + rthdr->hdr.it_present = + cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | + (1 << IEEE80211_RADIOTAP_CHANNEL)); + + if (rate) { + rthdr->rate_or_pad = rate->bitrate / 5; + rthdr->hdr.it_present |= + cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE); + } + rthdr->chan_freq = cpu_to_le16(status->freq); + + if (status->band == IEEE80211_BAND_5GHZ) + rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_OFDM | + IEEE80211_CHAN_5GHZ); + else + rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_DYN | + IEEE80211_CHAN_2GHZ); + + skb_set_mac_header(skb, 0); + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->pkt_type = PACKET_OTHERHOST; + skb->protocol = htons(ETH_P_802_2); + + list_for_each_entry_rcu(sdata, &local->interfaces, list) { + if (!ieee80211_sdata_running(sdata)) + continue; + + if (sdata->vif.type != NL80211_IFTYPE_MONITOR || + !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)) + continue; + + if (prev_dev) { + skb2 = skb_clone(skb, GFP_ATOMIC); + if (skb2) { + skb2->dev = prev_dev; + netif_rx(skb2); + } + } + + prev_dev = sdata->dev; + sdata->dev->stats.rx_packets++; + sdata->dev->stats.rx_bytes += skb->len; + } + + if (prev_dev) { + skb->dev = prev_dev; + netif_rx(skb); + skb = NULL; + } else + goto out_free_skb; + + status->flag |= RX_FLAG_INTERNAL_CMTR; + return; + + out_free_skb: + dev_kfree_skb(skb); +} + + +static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata, + struct ieee80211_rx_data *rx, + struct sk_buff *skb, + struct ieee80211_rate *rate) +{ + struct sk_buff_head reorder_release; + ieee80211_rx_result res = RX_DROP_MONITOR; + + __skb_queue_head_init(&reorder_release); + + rx->skb = skb; + rx->sdata = sdata; + +#define CALL_RXH(rxh) \ + do { \ + res = rxh(rx); \ + if (res != RX_CONTINUE) \ + goto rxh_next; \ + } while (0); + + /* + * NB: the rxh_next label works even if we jump + * to it from here because then the list will + * be empty, which is a trivial check + */ + CALL_RXH(ieee80211_rx_h_passive_scan) + CALL_RXH(ieee80211_rx_h_check) + + ieee80211_rx_reorder_ampdu(rx, &reorder_release); + + while ((skb = __skb_dequeue(&reorder_release))) { + /* + * all the other fields are valid across frames + * that belong to an aMPDU since they are on the + * same TID from the same station + */ + rx->skb = skb; + + CALL_RXH(ieee80211_rx_h_decrypt) + CALL_RXH(ieee80211_rx_h_check_more_data) + CALL_RXH(ieee80211_rx_h_sta_process) + CALL_RXH(ieee80211_rx_h_defragment) + CALL_RXH(ieee80211_rx_h_ps_poll) + CALL_RXH(ieee80211_rx_h_michael_mic_verify) + /* must be after MMIC verify so header is counted in MPDU mic */ + CALL_RXH(ieee80211_rx_h_remove_qos_control) + CALL_RXH(ieee80211_rx_h_amsdu) +#ifdef CONFIG_MAC80211_MESH + if (ieee80211_vif_is_mesh(&sdata->vif)) + CALL_RXH(ieee80211_rx_h_mesh_fwding); +#endif + CALL_RXH(ieee80211_rx_h_data) + + /* special treatment -- needs the queue */ + res = ieee80211_rx_h_ctrl(rx, &reorder_release); + if (res != RX_CONTINUE) + goto rxh_next; + + CALL_RXH(ieee80211_rx_h_action) + CALL_RXH(ieee80211_rx_h_mgmt) + +#undef CALL_RXH + + rxh_next: + switch (res) { + case RX_DROP_MONITOR: + I802_DEBUG_INC(sdata->local->rx_handlers_drop); + if (rx->sta) + rx->sta->rx_dropped++; + /* fall through */ + case RX_CONTINUE: + ieee80211_rx_cooked_monitor(rx, rate); + break; + case RX_DROP_UNUSABLE: + I802_DEBUG_INC(sdata->local->rx_handlers_drop); + if (rx->sta) + rx->sta->rx_dropped++; + dev_kfree_skb(rx->skb); + break; + case RX_QUEUED: + I802_DEBUG_INC(sdata->local->rx_handlers_queued); + break; + } + } +} + +/* main receive path */ + +static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata, + struct ieee80211_rx_data *rx, + struct ieee80211_hdr *hdr) +{ + struct sk_buff *skb = rx->skb; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type); + int multicast = is_multicast_ether_addr(hdr->addr1); + + switch (sdata->vif.type) { + case NL80211_IFTYPE_STATION: + if (!bssid && !sdata->u.mgd.use_4addr) + return 0; + if (!multicast && + compare_ether_addr(sdata->vif.addr, hdr->addr1) != 0) { + if (!(sdata->dev->flags & IFF_PROMISC)) + return 0; + rx->flags &= ~IEEE80211_RX_RA_MATCH; + } + break; + case NL80211_IFTYPE_ADHOC: + if (!bssid) + return 0; + if (ieee80211_is_beacon(hdr->frame_control)) { + return 1; + } + else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) { + if (!(rx->flags & IEEE80211_RX_IN_SCAN)) + return 0; + rx->flags &= ~IEEE80211_RX_RA_MATCH; + } else if (!multicast && + compare_ether_addr(sdata->vif.addr, + hdr->addr1) != 0) { + if (!(sdata->dev->flags & IFF_PROMISC)) + return 0; + rx->flags &= ~IEEE80211_RX_RA_MATCH; + } else if (!rx->sta) { + int rate_idx; + if (status->flag & RX_FLAG_HT) + rate_idx = 0; /* TODO: HT rates */ + else + rate_idx = status->rate_idx; + rx->sta = ieee80211_ibss_add_sta(sdata, bssid, + hdr->addr2, BIT(rate_idx), GFP_ATOMIC); + } + break; + case NL80211_IFTYPE_MESH_POINT: + if (!multicast && + compare_ether_addr(sdata->vif.addr, + hdr->addr1) != 0) { + if (!(sdata->dev->flags & IFF_PROMISC)) + return 0; + + rx->flags &= ~IEEE80211_RX_RA_MATCH; + } + break; + case NL80211_IFTYPE_AP_VLAN: + case NL80211_IFTYPE_AP: + if (!bssid) { + if (compare_ether_addr(sdata->vif.addr, + hdr->addr1)) + return 0; + } else if (!ieee80211_bssid_match(bssid, + sdata->vif.addr)) { + if (!(rx->flags & IEEE80211_RX_IN_SCAN)) + return 0; + rx->flags &= ~IEEE80211_RX_RA_MATCH; + } + break; + case NL80211_IFTYPE_WDS: + if (bssid || !ieee80211_is_data(hdr->frame_control)) + return 0; + if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2)) + return 0; + break; + case NL80211_IFTYPE_MONITOR: + case NL80211_IFTYPE_UNSPECIFIED: + case __NL80211_IFTYPE_AFTER_LAST: + /* should never get here */ + WARN_ON(1); + break; + } + + return 1; +} + +/* + * This is the actual Rx frames handler. as it blongs to Rx path it must + * be called with rcu_read_lock protection. + */ +static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, + struct sk_buff *skb, + struct ieee80211_rate *rate) +{ + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_sub_if_data *sdata; + struct ieee80211_hdr *hdr; + struct ieee80211_rx_data rx; + int prepares; + struct ieee80211_sub_if_data *prev = NULL; + struct sk_buff *skb_new; + struct sta_info *sta, *tmp; + bool found_sta = false; + + hdr = (struct ieee80211_hdr *)skb->data; + memset(&rx, 0, sizeof(rx)); + rx.skb = skb; + rx.local = local; + + if (ieee80211_is_data(hdr->frame_control) || ieee80211_is_mgmt(hdr->frame_control)) + local->dot11ReceivedFragmentCount++; + + if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) || + test_bit(SCAN_OFF_CHANNEL, &local->scanning))) + rx.flags |= IEEE80211_RX_IN_SCAN; + + ieee80211_parse_qos(&rx); + ieee80211_verify_alignment(&rx); + + if (ieee80211_is_data(hdr->frame_control)) { + for_each_sta_info(local, hdr->addr2, sta, tmp) { + rx.sta = sta; + found_sta = true; + rx.sdata = sta->sdata; + + rx.flags |= IEEE80211_RX_RA_MATCH; + prepares = prepare_for_handlers(rx.sdata, &rx, hdr); + if (prepares) { + if (status->flag & RX_FLAG_MMIC_ERROR) { + if (rx.flags & IEEE80211_RX_RA_MATCH) + ieee80211_rx_michael_mic_report(hdr, &rx); + } else + prev = rx.sdata; + } + } + } + if (!found_sta) { + list_for_each_entry_rcu(sdata, &local->interfaces, list) { + if (!ieee80211_sdata_running(sdata)) + continue; + + if (sdata->vif.type == NL80211_IFTYPE_MONITOR || + sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + continue; + + /* + * frame is destined for this interface, but if it's + * not also for the previous one we handle that after + * the loop to avoid copying the SKB once too much + */ + + if (!prev) { + prev = sdata; + continue; + } + + rx.sta = sta_info_get_bss(prev, hdr->addr2); + + rx.flags |= IEEE80211_RX_RA_MATCH; + prepares = prepare_for_handlers(prev, &rx, hdr); + + if (!prepares) + goto next; + + if (status->flag & RX_FLAG_MMIC_ERROR) { + rx.sdata = prev; + if (rx.flags & IEEE80211_RX_RA_MATCH) + ieee80211_rx_michael_mic_report(hdr, + &rx); + goto next; + } + + /* + * frame was destined for the previous interface + * so invoke RX handlers for it + */ + + skb_new = skb_copy(skb, GFP_ATOMIC); + if (!skb_new) { + if (net_ratelimit()) + printk(KERN_DEBUG "%s: failed to copy " + "multicast frame for %s\n", + wiphy_name(local->hw.wiphy), + prev->name); + goto next; + } + ieee80211_invoke_rx_handlers(prev, &rx, skb_new, rate); +next: + prev = sdata; + } + + if (prev) { + rx.sta = sta_info_get_bss(prev, hdr->addr2); + + rx.flags |= IEEE80211_RX_RA_MATCH; + prepares = prepare_for_handlers(prev, &rx, hdr); + + if (!prepares) + prev = NULL; + } + } + if (prev) + ieee80211_invoke_rx_handlers(prev, &rx, skb, rate); + else + dev_kfree_skb(skb); +} + +/* + * This is the receive path handler. It is called by a low level driver when an + * 802.11 MPDU is received from the hardware. + */ +void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_rate *rate = NULL; + struct ieee80211_supported_band *sband; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + + WARN_ON_ONCE(softirq_count() == 0); + + if (WARN_ON(status->band < 0 || + status->band >= IEEE80211_NUM_BANDS)) + goto drop; + + sband = local->hw.wiphy->bands[status->band]; + if (WARN_ON(!sband)) + goto drop; + + /* + * If we're suspending, it is possible although not too likely + * that we'd be receiving frames after having already partially + * quiesced the stack. We can't process such frames then since + * that might, for example, cause stations to be added or other + * driver callbacks be invoked. + */ + if (unlikely(local->quiescing || local->suspended)) + goto drop; + + /* + * The same happens when we're not even started, + * but that's worth a warning. + */ + if (WARN_ON(!local->started)) + goto drop; + + if (status->flag & RX_FLAG_HT) { + /* + * rate_idx is MCS index, which can be [0-76] as documented on: + * + * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n + * + * Anything else would be some sort of driver or hardware error. + * The driver should catch hardware errors. + */ + if (WARN((status->rate_idx < 0 || + status->rate_idx > 76), + "Rate marked as an HT rate but passed " + "status->rate_idx is not " + "an MCS index [0-76]: %d (0x%02x)\n", + status->rate_idx, + status->rate_idx)) + goto drop; + } else { + if (WARN_ON(status->rate_idx < 0 || + status->rate_idx >= sband->n_bitrates)) + goto drop; + rate = &sband->bitrates[status->rate_idx]; + } + + /* + * key references and virtual interfaces are protected using RCU + * and this requires that we are in a read-side RCU section during + * receive processing + */ + rcu_read_lock(); + + /* + * Frames with failed FCS/PLCP checksum are not returned, + * all other frames are returned without radiotap header + * if it was previously present. + * Also, frames with less than 16 bytes are dropped. + */ + skb = ieee80211_rx_monitor(local, skb, rate); + if (!skb) { + rcu_read_unlock(); + return; + } + + __ieee80211_rx_handle_packet(hw, skb, rate); + + rcu_read_unlock(); + + return; + drop: + kfree_skb(skb); +} +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) +EXPORT_SYMBOL(ieee80211_rx); +#else +EXPORT_SYMBOL(mac80211_ieee80211_rx); +#endif + + +/* This is a version of the rx handler that can be called from hard irq + * context. Post the skb on the queue and schedule the tasklet */ +void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct ieee80211_local *local = hw_to_local(hw); + + BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb)); + + skb->pkt_type = IEEE80211_RX_MSG; + skb_queue_tail(&local->skb_queue, skb); + tasklet_schedule(&local->tasklet); +} +EXPORT_SYMBOL(ieee80211_rx_irqsafe); diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c new file mode 100644 index 0000000..b822dce --- /dev/null +++ b/net/mac80211/scan.c @@ -0,0 +1,706 @@ +/* + * Scanning implementation + * + * Copyright 2003, Jouni Malinen + * Copyright 2004, Instant802 Networks, Inc. + * Copyright 2005, Devicescape Software, Inc. + * Copyright 2006-2007 Jiri Benc + * Copyright 2007, Michael Wu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include + +#include "ieee80211_i.h" +#include "driver-ops.h" +#include "mesh.h" + +#define IEEE80211_PROBE_DELAY (HZ / 33) +#define IEEE80211_CHANNEL_TIME (HZ / 33) +#define IEEE80211_PASSIVE_CHANNEL_TIME (HZ / 8) + +struct ieee80211_bss * +ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq, + u8 *ssid, u8 ssid_len) +{ + struct cfg80211_bss *cbss; + + cbss = cfg80211_get_bss(local->hw.wiphy, + ieee80211_get_channel(local->hw.wiphy, freq), + bssid, ssid, ssid_len, 0, 0); + if (!cbss) + return NULL; + return (void *)cbss->priv; +} + +static void ieee80211_rx_bss_free(struct cfg80211_bss *cbss) +{ + struct ieee80211_bss *bss = (void *)cbss->priv; + + kfree(bss_mesh_id(bss)); + kfree(bss_mesh_cfg(bss)); +} + +void ieee80211_rx_bss_put(struct ieee80211_local *local, + struct ieee80211_bss *bss) +{ + if (!bss) + return; + cfg80211_put_bss(container_of((void *)bss, struct cfg80211_bss, priv)); +} + +static bool is_uapsd_supported(struct ieee802_11_elems *elems) +{ + u8 qos_info; + + if (elems->wmm_info && elems->wmm_info_len == 7 + && elems->wmm_info[5] == 1) + qos_info = elems->wmm_info[6]; + else if (elems->wmm_param && elems->wmm_param_len == 24 + && elems->wmm_param[5] == 1) + qos_info = elems->wmm_param[6]; + else + /* no valid wmm information or parameter element found */ + return false; + + return qos_info & IEEE80211_WMM_IE_AP_QOSINFO_UAPSD; +} + +struct ieee80211_bss * +ieee80211_bss_info_update(struct ieee80211_local *local, + struct ieee80211_rx_status *rx_status, + struct ieee80211_mgmt *mgmt, + size_t len, + struct ieee802_11_elems *elems, + struct ieee80211_channel *channel, + bool beacon) +{ + struct cfg80211_bss *cbss; + struct ieee80211_bss *bss; + int clen; + s32 signal = 0; + + if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) + signal = rx_status->signal * 100; + else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC) + signal = (rx_status->signal * 100) / local->hw.max_signal; + + cbss = cfg80211_inform_bss_frame(local->hw.wiphy, channel, + mgmt, len, signal, GFP_ATOMIC); + + if (!cbss) + return NULL; + + cbss->free_priv = ieee80211_rx_bss_free; + bss = (void *)cbss->priv; + + /* save the ERP value so that it is available at association time */ + if (elems->erp_info && elems->erp_info_len >= 1) { + bss->erp_value = elems->erp_info[0]; + bss->has_erp_value = 1; + } + + if (elems->tim) { + struct ieee80211_tim_ie *tim_ie = + (struct ieee80211_tim_ie *)elems->tim; + bss->dtim_period = tim_ie->dtim_period; + } + + bss->supp_rates_len = 0; + if (elems->supp_rates) { + clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len; + if (clen > elems->supp_rates_len) + clen = elems->supp_rates_len; + memcpy(&bss->supp_rates[bss->supp_rates_len], elems->supp_rates, + clen); + bss->supp_rates_len += clen; + } + if (elems->ext_supp_rates) { + clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len; + if (clen > elems->ext_supp_rates_len) + clen = elems->ext_supp_rates_len; + memcpy(&bss->supp_rates[bss->supp_rates_len], + elems->ext_supp_rates, clen); + bss->supp_rates_len += clen; + } + + bss->wmm_used = elems->wmm_param || elems->wmm_info; + bss->uapsd_supported = is_uapsd_supported(elems); + + if (!beacon) + bss->last_probe_resp = jiffies; + + return bss; +} + +ieee80211_rx_result +ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) +{ + struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_mgmt *mgmt; + struct ieee80211_bss *bss; + u8 *elements; + struct ieee80211_channel *channel; + size_t baselen; + int freq; + __le16 fc; + bool presp, beacon = false; + struct ieee802_11_elems elems; + + if (skb->len < 2) + return RX_DROP_UNUSABLE; + + mgmt = (struct ieee80211_mgmt *) skb->data; + fc = mgmt->frame_control; + + if (ieee80211_is_ctl(fc)) + return RX_CONTINUE; + + if (skb->len < 24) + return RX_DROP_MONITOR; + + presp = ieee80211_is_probe_resp(fc); + if (presp) { + /* ignore ProbeResp to foreign address */ + if (memcmp(mgmt->da, sdata->vif.addr, ETH_ALEN)) + return RX_DROP_MONITOR; + + presp = true; + elements = mgmt->u.probe_resp.variable; + baselen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); + } else { + beacon = ieee80211_is_beacon(fc); + baselen = offsetof(struct ieee80211_mgmt, u.beacon.variable); + elements = mgmt->u.beacon.variable; + } + + if (!presp && !beacon) + return RX_CONTINUE; + + if (baselen > skb->len) + return RX_DROP_MONITOR; + + ieee802_11_parse_elems(elements, skb->len - baselen, &elems); + + if (elems.ds_params && elems.ds_params_len == 1) + freq = ieee80211_channel_to_frequency(elems.ds_params[0]); + else + freq = rx_status->freq; + + channel = ieee80211_get_channel(sdata->local->hw.wiphy, freq); + + if (!channel || channel->flags & IEEE80211_CHAN_DISABLED) + return RX_DROP_MONITOR; + + bss = ieee80211_bss_info_update(sdata->local, rx_status, + mgmt, skb->len, &elems, + channel, beacon); + if (bss) + ieee80211_rx_bss_put(sdata->local, bss); + + dev_kfree_skb(skb); + return RX_QUEUED; +} + +/* return false if no more work */ +static bool ieee80211_prep_hw_scan(struct ieee80211_local *local) +{ + struct cfg80211_scan_request *req = local->scan_req; + enum ieee80211_band band; + int i, ielen, n_chans; + + do { + if (local->hw_scan_band == IEEE80211_NUM_BANDS) + return false; + + band = local->hw_scan_band; + n_chans = 0; + for (i = 0; i < req->n_channels; i++) { + if (req->channels[i]->band == band) { + local->hw_scan_req->channels[n_chans] = + req->channels[i]; + n_chans++; + } + } + + local->hw_scan_band++; + } while (!n_chans); + + local->hw_scan_req->n_channels = n_chans; + + ielen = ieee80211_build_preq_ies(local, (u8 *)local->hw_scan_req->ie, + req->ie, req->ie_len, band); + local->hw_scan_req->ie_len = ielen; + + return true; +} + +void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) +{ + struct ieee80211_local *local = hw_to_local(hw); + bool was_hw_scan; + + mutex_lock(&local->scan_mtx); + + /* + * It's ok to abort a not-yet-running scan (that + * we have one at all will be verified by checking + * local->scan_req next), but not to complete it + * successfully. + */ + if (WARN_ON(!local->scanning && !aborted)) + aborted = true; + + if (WARN_ON(!local->scan_req)) { + mutex_unlock(&local->scan_mtx); + return; + } + + was_hw_scan = test_bit(SCAN_HW_SCANNING, &local->scanning); + if (was_hw_scan && !aborted && ieee80211_prep_hw_scan(local)) { + ieee80211_queue_delayed_work(&local->hw, + &local->scan_work, 0); + mutex_unlock(&local->scan_mtx); + return; + } + + kfree(local->hw_scan_req); + local->hw_scan_req = NULL; + + if (local->scan_req != local->int_scan_req) + cfg80211_scan_done(local->scan_req, aborted); + local->scan_req = NULL; + local->scan_sdata = NULL; + + local->scanning = 0; + local->scan_channel = NULL; + + /* we only have to protect scan_req and hw/sw scan */ + mutex_unlock(&local->scan_mtx); + + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); + if (was_hw_scan) + goto done; + + ieee80211_configure_filter(local); + + drv_sw_scan_complete(local); + + ieee80211_offchannel_return(local, true); + + done: + ieee80211_recalc_idle(local); + ieee80211_mlme_notify_scan_completed(local); + ieee80211_ibss_notify_scan_completed(local); + ieee80211_mesh_notify_scan_completed(local); + ieee80211_queue_work(&local->hw, &local->work_work); +} +EXPORT_SYMBOL(ieee80211_scan_completed); + +static int ieee80211_start_sw_scan(struct ieee80211_local *local) +{ + /* + * Hardware/driver doesn't support hw_scan, so use software + * scanning instead. First send a nullfunc frame with power save + * bit on so that AP will buffer the frames for us while we are not + * listening, then send probe requests to each channel and wait for + * the responses. After all channels are scanned, tune back to the + * original channel and send a nullfunc frame with power save bit + * off to trigger the AP to send us all the buffered frames. + * + * Note that while local->sw_scanning is true everything else but + * nullfunc frames and probe requests will be dropped in + * ieee80211_tx_h_check_assoc(). + */ + drv_sw_scan_start(local); + + ieee80211_offchannel_stop_beaconing(local); + + local->next_scan_state = SCAN_DECISION; + local->scan_channel_idx = 0; + + drv_flush(local, false); + + ieee80211_configure_filter(local); + + ieee80211_queue_delayed_work(&local->hw, + &local->scan_work, + IEEE80211_CHANNEL_TIME); + + return 0; +} + + +static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata, + struct cfg80211_scan_request *req) +{ + struct ieee80211_local *local = sdata->local; + int rc; + + if (local->scan_req) + return -EBUSY; + + if (!list_empty(&local->work_list)) { + /* wait for the work to finish/time out */ + local->scan_req = req; + local->scan_sdata = sdata; + return 0; + } + + if (local->ops->hw_scan) { + u8 *ies; + + local->hw_scan_req = kmalloc( + sizeof(*local->hw_scan_req) + + req->n_channels * sizeof(req->channels[0]) + + 2 + IEEE80211_MAX_SSID_LEN + local->scan_ies_len + + req->ie_len, GFP_KERNEL); + if (!local->hw_scan_req) + return -ENOMEM; + + local->hw_scan_req->ssids = req->ssids; + local->hw_scan_req->n_ssids = req->n_ssids; + ies = (u8 *)local->hw_scan_req + + sizeof(*local->hw_scan_req) + + req->n_channels * sizeof(req->channels[0]); + local->hw_scan_req->ie = ies; + + local->hw_scan_band = 0; + + /* + * After allocating local->hw_scan_req, we must + * go through until ieee80211_prep_hw_scan(), so + * anything that might be changed here and leave + * this function early must not go after this + * allocation. + */ + } + + local->scan_req = req; + local->scan_sdata = sdata; + + if (local->ops->hw_scan) + __set_bit(SCAN_HW_SCANNING, &local->scanning); + else + __set_bit(SCAN_SW_SCANNING, &local->scanning); + + /* + * Kicking off the scan need not be protected, + * only the scan variable stuff, since now + * local->scan_req is assigned and other callers + * will abort their scan attempts. + * + * This avoids too many locking dependencies + * so that the scan completed calls have more + * locking freedom. + */ + + ieee80211_recalc_idle(local); + mutex_unlock(&local->scan_mtx); + + if (local->ops->hw_scan) { + WARN_ON(!ieee80211_prep_hw_scan(local)); + rc = drv_hw_scan(local, local->hw_scan_req); + } else + rc = ieee80211_start_sw_scan(local); + + mutex_lock(&local->scan_mtx); + + if (rc) { + kfree(local->hw_scan_req); + local->hw_scan_req = NULL; + local->scanning = 0; + + ieee80211_recalc_idle(local); + + local->scan_req = NULL; + local->scan_sdata = NULL; + } + + return rc; +} + +static int ieee80211_scan_state_decision(struct ieee80211_local *local, + unsigned long *next_delay) +{ + bool associated = false; + struct ieee80211_sub_if_data *sdata; + + /* if no more bands/channels left, complete scan and advance to the idle state */ + if (local->scan_channel_idx >= local->scan_req->n_channels) { + ieee80211_scan_completed(&local->hw, false); + return 1; + } + + /* check if at least one STA interface is associated */ + mutex_lock(&local->iflist_mtx); + list_for_each_entry(sdata, &local->interfaces, list) { + if (!ieee80211_sdata_running(sdata)) + continue; + + if (sdata->vif.type == NL80211_IFTYPE_STATION) { + if (sdata->u.mgd.associated) { + associated = true; + break; + } + } + } + mutex_unlock(&local->iflist_mtx); + + if (local->scan_channel) { + /* + * we're currently scanning a different channel, let's + * switch back to the operating channel now if at least + * one interface is associated. Otherwise just scan the + * next channel + */ + if (associated) + local->next_scan_state = SCAN_ENTER_OPER_CHANNEL; + else + local->next_scan_state = SCAN_SET_CHANNEL; + } else { + /* + * we're on the operating channel currently, let's + * leave that channel now to scan another one + */ + local->next_scan_state = SCAN_LEAVE_OPER_CHANNEL; + } + + *next_delay = 0; + return 0; +} + +static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *local, + unsigned long *next_delay) +{ + ieee80211_offchannel_stop_station(local); + + __set_bit(SCAN_OFF_CHANNEL, &local->scanning); + + /* + * What if the nullfunc frames didn't arrive? + */ + drv_flush(local, false); + if (local->ops->flush) + *next_delay = 0; + else + *next_delay = HZ / 10; + + /* advance to the next channel to be scanned */ + local->next_scan_state = SCAN_SET_CHANNEL; +} + +static void ieee80211_scan_state_enter_oper_channel(struct ieee80211_local *local, + unsigned long *next_delay) +{ + /* switch back to the operating channel */ + local->scan_channel = NULL; + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); + + /* + * Only re-enable station mode interface now; beaconing will be + * re-enabled once the full scan has been completed. + */ + ieee80211_offchannel_return(local, false); + + __clear_bit(SCAN_OFF_CHANNEL, &local->scanning); + + *next_delay = HZ / 5; + local->next_scan_state = SCAN_DECISION; +} + +static void ieee80211_scan_state_set_channel(struct ieee80211_local *local, + unsigned long *next_delay) +{ + int skip; + struct ieee80211_channel *chan; + + skip = 0; + chan = local->scan_req->channels[local->scan_channel_idx]; + + local->scan_channel = chan; + if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL)) + skip = 1; + + /* advance state machine to next channel/band */ + local->scan_channel_idx++; + + if (skip) { + /* if we skip this channel return to the decision state */ + local->next_scan_state = SCAN_DECISION; + return; + } + + /* + * Probe delay is used to update the NAV, cf. 11.1.3.2.2 + * (which unfortunately doesn't say _why_ step a) is done, + * but it waits for the probe delay or until a frame is + * received - and the received frame would update the NAV). + * For now, we do not support waiting until a frame is + * received. + * + * In any case, it is not necessary for a passive scan. + */ + if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN || + !local->scan_req->n_ssids) { + *next_delay = IEEE80211_PASSIVE_CHANNEL_TIME; + local->next_scan_state = SCAN_DECISION; + return; + } + + /* active scan, send probes */ + *next_delay = IEEE80211_PROBE_DELAY; + local->next_scan_state = SCAN_SEND_PROBE; +} + +static void ieee80211_scan_state_send_probe(struct ieee80211_local *local, + unsigned long *next_delay) +{ + int i; + struct ieee80211_sub_if_data *sdata = local->scan_sdata; + + for (i = 0; i < local->scan_req->n_ssids; i++) + ieee80211_send_probe_req( + sdata, NULL, + local->scan_req->ssids[i].ssid, + local->scan_req->ssids[i].ssid_len, + local->scan_req->ie, local->scan_req->ie_len); + + /* + * After sending probe requests, wait for probe responses + * on the channel. + */ + *next_delay = IEEE80211_CHANNEL_TIME; + local->next_scan_state = SCAN_DECISION; +} + +void ieee80211_scan_work(struct work_struct *work) +{ + struct ieee80211_local *local = + container_of(work, struct ieee80211_local, scan_work.work); + struct ieee80211_sub_if_data *sdata = local->scan_sdata; + unsigned long next_delay = 0; + + mutex_lock(&local->scan_mtx); + if (!sdata || !local->scan_req) { + mutex_unlock(&local->scan_mtx); + return; + } + + if (local->hw_scan_req) { + int rc = drv_hw_scan(local, local->hw_scan_req); + mutex_unlock(&local->scan_mtx); + if (rc) + ieee80211_scan_completed(&local->hw, true); + return; + } + + if (local->scan_req && !local->scanning) { + struct cfg80211_scan_request *req = local->scan_req; + int rc; + + local->scan_req = NULL; + local->scan_sdata = NULL; + + rc = __ieee80211_start_scan(sdata, req); + mutex_unlock(&local->scan_mtx); + + if (rc) + ieee80211_scan_completed(&local->hw, true); + return; + } + + mutex_unlock(&local->scan_mtx); + + /* + * Avoid re-scheduling when the sdata is going away. + */ + if (!ieee80211_sdata_running(sdata)) { + ieee80211_scan_completed(&local->hw, true); + return; + } + + /* + * as long as no delay is required advance immediately + * without scheduling a new work + */ + do { + switch (local->next_scan_state) { + case SCAN_DECISION: + if (ieee80211_scan_state_decision(local, &next_delay)) + return; + break; + case SCAN_SET_CHANNEL: + ieee80211_scan_state_set_channel(local, &next_delay); + break; + case SCAN_SEND_PROBE: + ieee80211_scan_state_send_probe(local, &next_delay); + break; + case SCAN_LEAVE_OPER_CHANNEL: + ieee80211_scan_state_leave_oper_channel(local, &next_delay); + break; + case SCAN_ENTER_OPER_CHANNEL: + ieee80211_scan_state_enter_oper_channel(local, &next_delay); + break; + } + } while (next_delay == 0); + + ieee80211_queue_delayed_work(&local->hw, &local->scan_work, next_delay); +} + +int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata, + struct cfg80211_scan_request *req) +{ + int res; + + mutex_lock(&sdata->local->scan_mtx); + res = __ieee80211_start_scan(sdata, req); + mutex_unlock(&sdata->local->scan_mtx); + + return res; +} + +int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata, + const u8 *ssid, u8 ssid_len) +{ + struct ieee80211_local *local = sdata->local; + int ret = -EBUSY; + + mutex_lock(&local->scan_mtx); + + /* busy scanning */ + if (local->scan_req) + goto unlock; + + memcpy(local->int_scan_req->ssids[0].ssid, ssid, IEEE80211_MAX_SSID_LEN); + local->int_scan_req->ssids[0].ssid_len = ssid_len; + + ret = __ieee80211_start_scan(sdata, sdata->local->int_scan_req); + unlock: + mutex_unlock(&local->scan_mtx); + return ret; +} + +void ieee80211_scan_cancel(struct ieee80211_local *local) +{ + bool abortscan; + + cancel_delayed_work_sync(&local->scan_work); + + /* + * Only call this function when a scan can't be + * queued -- mostly at suspend under RTNL. + */ + mutex_lock(&local->scan_mtx); + abortscan = test_bit(SCAN_SW_SCANNING, &local->scanning) || + (!local->scanning && local->scan_req); + mutex_unlock(&local->scan_mtx); + + if (abortscan) + ieee80211_scan_completed(&local->hw, true); +} diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c new file mode 100644 index 0000000..7733f66 --- /dev/null +++ b/net/mac80211/spectmgmt.c @@ -0,0 +1,86 @@ +/* + * spectrum management + * + * Copyright 2003, Jouni Malinen + * Copyright 2002-2005, Instant802 Networks, Inc. + * Copyright 2005-2006, Devicescape Software, Inc. + * Copyright 2006-2007 Jiri Benc + * Copyright 2007, Michael Wu + * Copyright 2007-2008, Intel Corporation + * Copyright 2008, Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include "ieee80211_i.h" +#include "sta_info.h" +#include "wme.h" + +static void ieee80211_send_refuse_measurement_request(struct ieee80211_sub_if_data *sdata, + struct ieee80211_msrment_ie *request_ie, + const u8 *da, const u8 *bssid, + u8 dialog_token) +{ + struct ieee80211_local *local = sdata->local; + struct sk_buff *skb; + struct ieee80211_mgmt *msr_report; + + skb = dev_alloc_skb(sizeof(*msr_report) + local->hw.extra_tx_headroom + + sizeof(struct ieee80211_msrment_ie)); + + if (!skb) { + printk(KERN_ERR "%s: failed to allocate buffer for " + "measurement report frame\n", sdata->name); + return; + } + + skb_reserve(skb, local->hw.extra_tx_headroom); + msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24); + memset(msr_report, 0, 24); + memcpy(msr_report->da, da, ETH_ALEN); + memcpy(msr_report->sa, sdata->vif.addr, ETH_ALEN); + memcpy(msr_report->bssid, bssid, ETH_ALEN); + msr_report->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION); + + skb_put(skb, 1 + sizeof(msr_report->u.action.u.measurement)); + msr_report->u.action.category = WLAN_CATEGORY_SPECTRUM_MGMT; + msr_report->u.action.u.measurement.action_code = + WLAN_ACTION_SPCT_MSR_RPRT; + msr_report->u.action.u.measurement.dialog_token = dialog_token; + + msr_report->u.action.u.measurement.element_id = WLAN_EID_MEASURE_REPORT; + msr_report->u.action.u.measurement.length = + sizeof(struct ieee80211_msrment_ie); + + memset(&msr_report->u.action.u.measurement.msr_elem, 0, + sizeof(struct ieee80211_msrment_ie)); + msr_report->u.action.u.measurement.msr_elem.token = request_ie->token; + msr_report->u.action.u.measurement.msr_elem.mode |= + IEEE80211_SPCT_MSR_RPRT_MODE_REFUSED; + msr_report->u.action.u.measurement.msr_elem.type = request_ie->type; + + ieee80211_tx_skb(sdata, skb); +} + +void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, + size_t len) +{ + /* + * Ignoring measurement request is spec violation. + * Mandatory measurements must be reported optional + * measurements might be refused or reported incapable + * For now just refuse + * TODO: Answer basic measurement as unmeasured + */ + ieee80211_send_refuse_measurement_request(sdata, + &mgmt->u.action.u.measurement.msr_elem, + mgmt->sa, mgmt->bssid, + mgmt->u.action.u.measurement.dialog_token); +} diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c new file mode 100644 index 0000000..211c475 --- /dev/null +++ b/net/mac80211/sta_info.c @@ -0,0 +1,987 @@ +/* + * Copyright 2002-2005, Instant802 Networks, Inc. + * Copyright 2006-2007 Jiri Benc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "ieee80211_i.h" +#include "driver-ops.h" +#include "rate.h" +#include "sta_info.h" +#include "debugfs_sta.h" +#include "mesh.h" + +/** + * DOC: STA information lifetime rules + * + * STA info structures (&struct sta_info) are managed in a hash table + * for faster lookup and a list for iteration. They are managed using + * RCU, i.e. access to the list and hash table is protected by RCU. + * + * Upon allocating a STA info structure with sta_info_alloc(), the caller + * owns that structure. It must then insert it into the hash table using + * either sta_info_insert() or sta_info_insert_rcu(); only in the latter + * case (which acquires an rcu read section but must not be called from + * within one) will the pointer still be valid after the call. Note that + * the caller may not do much with the STA info before inserting it, in + * particular, it may not start any mesh peer link management or add + * encryption keys. + * + * When the insertion fails (sta_info_insert()) returns non-zero), the + * structure will have been freed by sta_info_insert()! + * + * Station entries are added by mac80211 when you establish a link with a + * peer. This means different things for the different type of interfaces + * we support. For a regular station this mean we add the AP sta when we + * receive an assocation response from the AP. For IBSS this occurs when + * get to know about a peer on the same IBSS. For WDS we add the sta for + * the peer imediately upon device open. When using AP mode we add stations + * for each respective station upon request from userspace through nl80211. + * + * In order to remove a STA info structure, various sta_info_destroy_*() + * calls are available. + * + * There is no concept of ownership on a STA entry, each structure is + * owned by the global hash table/list until it is removed. All users of + * the structure need to be RCU protected so that the structure won't be + * freed before they are done using it. + */ + +/* Caller must hold local->sta_lock */ +static int sta_info_hash_del(struct ieee80211_local *local, + struct sta_info *sta) +{ + struct sta_info *s; + + s = local->sta_hash[STA_HASH(sta->sta.addr)]; + if (!s) + return -ENOENT; + if (s == sta) { + rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)], + s->hnext); + return 0; + } + + while (s->hnext && s->hnext != sta) + s = s->hnext; + if (s->hnext) { + rcu_assign_pointer(s->hnext, sta->hnext); + return 0; + } + + return -ENOENT; +} + +/* protected by RCU */ +struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata, + const u8 *addr) +{ + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + + sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]); + while (sta) { + if (sta->sdata == sdata && + memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) + break; + sta = rcu_dereference(sta->hnext); + } + return sta; +} + +/* + * Get sta info either from the specified interface + * or from one of its vlans + */ +struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata, + const u8 *addr) +{ + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + + sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]); + while (sta) { + if ((sta->sdata == sdata || + sta->sdata->bss == sdata->bss) && + memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) + break; + sta = rcu_dereference(sta->hnext); + } + return sta; +} + +struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata, + int idx) +{ + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + int i = 0; + + list_for_each_entry_rcu(sta, &local->sta_list, list) { + if (sdata != sta->sdata) + continue; + if (i < idx) { + ++i; + continue; + } + return sta; + } + + return NULL; +} + +/** + * __sta_info_free - internal STA free helper + * + * @local: pointer to the global information + * @sta: STA info to free + * + * This function must undo everything done by sta_info_alloc() + * that may happen before sta_info_insert(). + */ +static void __sta_info_free(struct ieee80211_local *local, + struct sta_info *sta) +{ + if (sta->rate_ctrl) { + rate_control_free_sta(sta); + rate_control_put(sta->rate_ctrl); + } + +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + printk(KERN_DEBUG "%s: Destroyed STA %pM\n", + wiphy_name(local->hw.wiphy), sta->sta.addr); +#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ + + kfree(sta); +} + +/* Caller must hold local->sta_lock */ +static void sta_info_hash_add(struct ieee80211_local *local, + struct sta_info *sta) +{ + sta->hnext = local->sta_hash[STA_HASH(sta->sta.addr)]; + rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)], sta); +} + +static void sta_unblock(struct work_struct *wk) +{ + struct sta_info *sta; + + sta = container_of(wk, struct sta_info, drv_unblock_wk); + + if (sta->dead) + return; + + if (!test_sta_flags(sta, WLAN_STA_PS_STA)) + ieee80211_sta_ps_deliver_wakeup(sta); + else if (test_and_clear_sta_flags(sta, WLAN_STA_PSPOLL)) + ieee80211_sta_ps_deliver_poll_response(sta); +} + +static int sta_prepare_rate_control(struct ieee80211_local *local, + struct sta_info *sta, gfp_t gfp) +{ + if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) + return 0; + + sta->rate_ctrl = rate_control_get(local->rate_ctrl); + sta->rate_ctrl_priv = rate_control_alloc_sta(sta->rate_ctrl, + &sta->sta, gfp); + if (!sta->rate_ctrl_priv) { + rate_control_put(sta->rate_ctrl); + return -ENOMEM; + } + + return 0; +} + +struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, + u8 *addr, gfp_t gfp) +{ + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + int i; + + sta = kzalloc(sizeof(*sta) + local->hw.sta_data_size, gfp); + if (!sta) + return NULL; + + spin_lock_init(&sta->lock); + spin_lock_init(&sta->flaglock); + INIT_WORK(&sta->drv_unblock_wk, sta_unblock); + + memcpy(sta->sta.addr, addr, ETH_ALEN); + sta->local = local; + sta->sdata = sdata; + + if (sta_prepare_rate_control(local, sta, gfp)) { + kfree(sta); + return NULL; + } + + for (i = 0; i < STA_TID_NUM; i++) { + /* timer_to_tid must be initialized with identity mapping to + * enable session_timer's data differentiation. refer to + * sta_rx_agg_session_timer_expired for useage */ + sta->timer_to_tid[i] = i; + /* rx */ + sta->ampdu_mlme.tid_state_rx[i] = HT_AGG_STATE_IDLE; + sta->ampdu_mlme.tid_rx[i] = NULL; + /* tx */ + sta->ampdu_mlme.tid_state_tx[i] = HT_AGG_STATE_IDLE; + sta->ampdu_mlme.tid_tx[i] = NULL; + sta->ampdu_mlme.addba_req_num[i] = 0; + } + skb_queue_head_init(&sta->ps_tx_buf); + skb_queue_head_init(&sta->tx_filtered); + + for (i = 0; i < NUM_RX_DATA_QUEUES; i++) + sta->last_seq_ctrl[i] = cpu_to_le16(USHORT_MAX); + +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + printk(KERN_DEBUG "%s: Allocated STA %pM\n", + wiphy_name(local->hw.wiphy), sta->sta.addr); +#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ + +#ifdef CONFIG_MAC80211_MESH + sta->plink_state = PLINK_LISTEN; + init_timer(&sta->plink_timer); +#endif + + return sta; +} + +static int sta_info_finish_insert(struct sta_info *sta, bool async) +{ + struct ieee80211_local *local = sta->local; + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct station_info sinfo; + unsigned long flags; + int err = 0; + + WARN_ON(!mutex_is_locked(&local->sta_mtx)); + + /* notify driver */ + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + sdata = container_of(sdata->bss, + struct ieee80211_sub_if_data, + u.ap); + err = drv_sta_add(local, sdata, &sta->sta); + if (err) { + if (!async) + return err; + printk(KERN_DEBUG "%s: failed to add IBSS STA %pM to driver (%d)" + " - keeping it anyway.\n", + sdata->name, sta->sta.addr, err); + } else { + sta->uploaded = true; +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + if (async) + printk(KERN_DEBUG "%s: Finished adding IBSS STA %pM\n", + wiphy_name(local->hw.wiphy), sta->sta.addr); +#endif + } + + sdata = sta->sdata; + + if (!async) { + local->num_sta++; + local->sta_generation++; + smp_mb(); + + /* make the station visible */ + spin_lock_irqsave(&local->sta_lock, flags); + sta_info_hash_add(local, sta); + spin_unlock_irqrestore(&local->sta_lock, flags); + } + + list_add(&sta->list, &local->sta_list); + + ieee80211_sta_debugfs_add(sta); + rate_control_add_sta_debugfs(sta); + + sinfo.filled = 0; + sinfo.generation = local->sta_generation; + cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL); + + + return 0; +} + +static void sta_info_finish_pending(struct ieee80211_local *local) +{ + struct sta_info *sta; + unsigned long flags; + + spin_lock_irqsave(&local->sta_lock, flags); + while (!list_empty(&local->sta_pending_list)) { + sta = list_first_entry(&local->sta_pending_list, + struct sta_info, list); + list_del(&sta->list); + spin_unlock_irqrestore(&local->sta_lock, flags); + + sta_info_finish_insert(sta, true); + + spin_lock_irqsave(&local->sta_lock, flags); + } + spin_unlock_irqrestore(&local->sta_lock, flags); +} + +static void sta_info_finish_work(struct work_struct *work) +{ + struct ieee80211_local *local = + container_of(work, struct ieee80211_local, sta_finish_work); + + mutex_lock(&local->sta_mtx); + sta_info_finish_pending(local); + mutex_unlock(&local->sta_mtx); +} + +int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU) +{ + struct ieee80211_local *local = sta->local; + struct ieee80211_sub_if_data *sdata = sta->sdata; + unsigned long flags; + int err = 0; + + /* + * Can't be a WARN_ON because it can be triggered through a race: + * something inserts a STA (on one CPU) without holding the RTNL + * and another CPU turns off the net device. + */ + if (unlikely(!ieee80211_sdata_running(sdata))) { + err = -ENETDOWN; + rcu_read_lock(); + goto out_free; + } + + if (WARN_ON(compare_ether_addr(sta->sta.addr, sdata->vif.addr) == 0 || + is_multicast_ether_addr(sta->sta.addr))) { + err = -EINVAL; + rcu_read_lock(); + goto out_free; + } + + /* + * In ad-hoc mode, we sometimes need to insert stations + * from tasklet context from the RX path. To avoid races, + * always do so in that case -- see the comment below. + */ + if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { + spin_lock_irqsave(&local->sta_lock, flags); + /* check if STA exists already */ + if (sta_info_get_bss(sdata, sta->sta.addr)) { + spin_unlock_irqrestore(&local->sta_lock, flags); + rcu_read_lock(); + err = -EEXIST; + goto out_free; + } + + local->num_sta++; + local->sta_generation++; + smp_mb(); + sta_info_hash_add(local, sta); + + list_add_tail(&sta->list, &local->sta_pending_list); + + rcu_read_lock(); + spin_unlock_irqrestore(&local->sta_lock, flags); + +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + printk(KERN_DEBUG "%s: Added IBSS STA %pM\n", + wiphy_name(local->hw.wiphy), sta->sta.addr); +#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ + + ieee80211_queue_work(&local->hw, &local->sta_finish_work); + + return 0; + } + + /* + * On first glance, this will look racy, because the code + * below this point, which inserts a station with sleeping, + * unlocks the sta_lock between checking existence in the + * hash table and inserting into it. + * + * However, it is not racy against itself because it keeps + * the mutex locked. It still seems to race against the + * above code that atomically inserts the station... That, + * however, is not true because the above code can only + * be invoked for IBSS interfaces, and the below code will + * not be -- and the two do not race against each other as + * the hash table also keys off the interface. + */ + + might_sleep(); + + mutex_lock(&local->sta_mtx); + + spin_lock_irqsave(&local->sta_lock, flags); + /* check if STA exists already */ + if (sta_info_get_bss(sdata, sta->sta.addr)) { + spin_unlock_irqrestore(&local->sta_lock, flags); + rcu_read_lock(); + err = -EEXIST; + goto out_free; + } + + spin_unlock_irqrestore(&local->sta_lock, flags); + + err = sta_info_finish_insert(sta, false); + if (err) { + mutex_unlock(&local->sta_mtx); + rcu_read_lock(); + goto out_free; + } + +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + printk(KERN_DEBUG "%s: Inserted STA %pM\n", + wiphy_name(local->hw.wiphy), sta->sta.addr); +#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ + + /* move reference to rcu-protected */ + rcu_read_lock(); + mutex_unlock(&local->sta_mtx); + + if (ieee80211_vif_is_mesh(&sdata->vif)) + mesh_accept_plinks_update(sdata); + + return 0; + out_free: + BUG_ON(!err); + __sta_info_free(local, sta); + return err; +} + +int sta_info_insert(struct sta_info *sta) +{ + int err = sta_info_insert_rcu(sta); + + rcu_read_unlock(); + + return err; +} + +static inline void __bss_tim_set(struct ieee80211_if_ap *bss, u16 aid) +{ + /* + * This format has been mandated by the IEEE specifications, + * so this line may not be changed to use the __set_bit() format. + */ + bss->tim[aid / 8] |= (1 << (aid % 8)); +} + +static inline void __bss_tim_clear(struct ieee80211_if_ap *bss, u16 aid) +{ + /* + * This format has been mandated by the IEEE specifications, + * so this line may not be changed to use the __clear_bit() format. + */ + bss->tim[aid / 8] &= ~(1 << (aid % 8)); +} + +static void __sta_info_set_tim_bit(struct ieee80211_if_ap *bss, + struct sta_info *sta) +{ + BUG_ON(!bss); + + __bss_tim_set(bss, sta->sta.aid); + + if (sta->local->ops->set_tim) { + sta->local->tim_in_locked_section = true; + drv_set_tim(sta->local, &sta->sta, true); + sta->local->tim_in_locked_section = false; + } +} + +void sta_info_set_tim_bit(struct sta_info *sta) +{ + unsigned long flags; + + BUG_ON(!sta->sdata->bss); + + spin_lock_irqsave(&sta->local->sta_lock, flags); + __sta_info_set_tim_bit(sta->sdata->bss, sta); + spin_unlock_irqrestore(&sta->local->sta_lock, flags); +} + +static void __sta_info_clear_tim_bit(struct ieee80211_if_ap *bss, + struct sta_info *sta) +{ + BUG_ON(!bss); + + __bss_tim_clear(bss, sta->sta.aid); + + if (sta->local->ops->set_tim) { + sta->local->tim_in_locked_section = true; + drv_set_tim(sta->local, &sta->sta, false); + sta->local->tim_in_locked_section = false; + } +} + +void sta_info_clear_tim_bit(struct sta_info *sta) +{ + unsigned long flags; + + BUG_ON(!sta->sdata->bss); + + spin_lock_irqsave(&sta->local->sta_lock, flags); + __sta_info_clear_tim_bit(sta->sdata->bss, sta); + spin_unlock_irqrestore(&sta->local->sta_lock, flags); +} + +static int sta_info_buffer_expired(struct sta_info *sta, + struct sk_buff *skb) +{ + struct ieee80211_tx_info *info; + int timeout; + + if (!skb) + return 0; + + info = IEEE80211_SKB_CB(skb); + + /* Timeout: (2 * listen_interval * beacon_int * 1024 / 1000000) sec */ + timeout = (sta->listen_interval * + sta->sdata->vif.bss_conf.beacon_int * + 32 / 15625) * HZ; + if (timeout < STA_TX_BUFFER_EXPIRE) + timeout = STA_TX_BUFFER_EXPIRE; + return time_after(jiffies, info->control.jiffies + timeout); +} + + +static void sta_info_cleanup_expire_buffered(struct ieee80211_local *local, + struct sta_info *sta) +{ + unsigned long flags; + struct sk_buff *skb; + struct ieee80211_sub_if_data *sdata; + + if (skb_queue_empty(&sta->ps_tx_buf)) + return; + + for (;;) { + spin_lock_irqsave(&sta->ps_tx_buf.lock, flags); + skb = skb_peek(&sta->ps_tx_buf); + if (sta_info_buffer_expired(sta, skb)) + skb = __skb_dequeue(&sta->ps_tx_buf); + else + skb = NULL; + spin_unlock_irqrestore(&sta->ps_tx_buf.lock, flags); + + if (!skb) + break; + + sdata = sta->sdata; + local->total_ps_buffered--; +#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG + printk(KERN_DEBUG "Buffered frame expired (STA %pM)\n", + sta->sta.addr); +#endif + dev_kfree_skb(skb); + + if (skb_queue_empty(&sta->ps_tx_buf)) + sta_info_clear_tim_bit(sta); + } +} + +static int __must_check __sta_info_destroy(struct sta_info *sta) +{ + struct ieee80211_local *local; + struct ieee80211_sub_if_data *sdata; + struct sk_buff *skb; + unsigned long flags; + int ret, i; + + might_sleep(); + + if (!sta) + return -ENOENT; + + local = sta->local; + sdata = sta->sdata; + + spin_lock_irqsave(&local->sta_lock, flags); + ret = sta_info_hash_del(local, sta); + /* this might still be the pending list ... which is fine */ + if (!ret) + list_del(&sta->list); + spin_unlock_irqrestore(&local->sta_lock, flags); + if (ret) + return ret; + + if (sta->key) { + ieee80211_key_free(sta->key); + /* + * We have only unlinked the key, and actually destroying it + * may mean it is removed from hardware which requires that + * the key->sta pointer is still valid, so flush the key todo + * list here. + * + * ieee80211_key_todo() will synchronize_rcu() so after this + * nothing can reference this sta struct any more. + */ + ieee80211_key_todo(); + + WARN_ON(sta->key); + } + + sta->dead = true; + + if (test_and_clear_sta_flags(sta, + WLAN_STA_PS_STA | WLAN_STA_PS_DRIVER)) { + BUG_ON(!sdata->bss); + + atomic_dec(&sdata->bss->num_sta_ps); + __sta_info_clear_tim_bit(sdata->bss, sta); + } + + local->num_sta--; + local->sta_generation++; + + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + rcu_assign_pointer(sdata->u.vlan.sta, NULL); + + if (sta->uploaded) { + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + sdata = container_of(sdata->bss, + struct ieee80211_sub_if_data, + u.ap); + drv_sta_remove(local, sdata, &sta->sta); + sdata = sta->sdata; + } + +#ifdef CONFIG_MAC80211_MESH + if (ieee80211_vif_is_mesh(&sdata->vif)) { + mesh_accept_plinks_update(sdata); + del_timer(&sta->plink_timer); + } +#endif + +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + printk(KERN_DEBUG "%s: Removed STA %pM\n", + wiphy_name(local->hw.wiphy), sta->sta.addr); +#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ + cancel_work_sync(&sta->drv_unblock_wk); + + rate_control_remove_sta_debugfs(sta); + ieee80211_sta_debugfs_remove(sta); + +#ifdef CONFIG_MAC80211_MESH + if (ieee80211_vif_is_mesh(&sta->sdata->vif)) { + mesh_plink_deactivate(sta); + del_timer_sync(&sta->plink_timer); + } +#endif + + while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) { + local->total_ps_buffered--; + dev_kfree_skb_any(skb); + } + + while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) + dev_kfree_skb_any(skb); + + for (i = 0; i < STA_TID_NUM; i++) { + struct tid_ampdu_rx *tid_rx; + struct tid_ampdu_tx *tid_tx; + + spin_lock_bh(&sta->lock); + tid_rx = sta->ampdu_mlme.tid_rx[i]; + /* Make sure timer won't free the tid_rx struct, see below */ + if (tid_rx) + tid_rx->shutdown = true; + + spin_unlock_bh(&sta->lock); + + /* + * Outside spinlock - shutdown is true now so that the timer + * won't free tid_rx, we have to do that now. Can't let the + * timer do it because we have to sync the timer outside the + * lock that it takes itself. + */ + if (tid_rx) { + del_timer_sync(&tid_rx->session_timer); + kfree(tid_rx); + } + + /* + * No need to do such complications for TX agg sessions, the + * path leading to freeing the tid_tx struct goes via a call + * from the driver, and thus needs to look up the sta struct + * again, which cannot be found when we get here. Hence, we + * just need to delete the timer and free the aggregation + * info; we won't be telling the peer about it then but that + * doesn't matter if we're not talking to it again anyway. + */ + tid_tx = sta->ampdu_mlme.tid_tx[i]; + if (tid_tx) { + del_timer_sync(&tid_tx->addba_resp_timer); + /* + * STA removed while aggregation session being + * started? Bit odd, but purge frames anyway. + */ + skb_queue_purge(&tid_tx->pending); + kfree(tid_tx); + } + } + + __sta_info_free(local, sta); + + return 0; +} + +int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata, const u8 *addr) +{ + struct sta_info *sta; + int ret; + + mutex_lock(&sdata->local->sta_mtx); + sta = sta_info_get(sdata, addr); + ret = __sta_info_destroy(sta); + mutex_unlock(&sdata->local->sta_mtx); + + return ret; +} + +int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata, + const u8 *addr) +{ + struct sta_info *sta; + int ret; + + mutex_lock(&sdata->local->sta_mtx); + sta = sta_info_get_bss(sdata, addr); + ret = __sta_info_destroy(sta); + mutex_unlock(&sdata->local->sta_mtx); + + return ret; +} + +static void sta_info_cleanup(unsigned long data) +{ + struct ieee80211_local *local = (struct ieee80211_local *) data; + struct sta_info *sta; + + rcu_read_lock(); + list_for_each_entry_rcu(sta, &local->sta_list, list) + sta_info_cleanup_expire_buffered(local, sta); + rcu_read_unlock(); + + if (local->quiescing) + return; + + local->sta_cleanup.expires = + round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL); + add_timer(&local->sta_cleanup); +} + +void sta_info_init(struct ieee80211_local *local) +{ + spin_lock_init(&local->sta_lock); + mutex_init(&local->sta_mtx); + INIT_LIST_HEAD(&local->sta_list); + INIT_LIST_HEAD(&local->sta_pending_list); + INIT_WORK(&local->sta_finish_work, sta_info_finish_work); + + setup_timer(&local->sta_cleanup, sta_info_cleanup, + (unsigned long)local); + local->sta_cleanup.expires = + round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL); +} + +int sta_info_start(struct ieee80211_local *local) +{ + add_timer(&local->sta_cleanup); + return 0; +} + +void sta_info_stop(struct ieee80211_local *local) +{ + del_timer(&local->sta_cleanup); + sta_info_flush(local, NULL); +} + +/** + * sta_info_flush - flush matching STA entries from the STA table + * + * Returns the number of removed STA entries. + * + * @local: local interface data + * @sdata: matching rule for the net device (sta->dev) or %NULL to match all STAs + */ +int sta_info_flush(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata) +{ + struct sta_info *sta, *tmp; + int ret = 0; + + might_sleep(); + + mutex_lock(&local->sta_mtx); + + sta_info_finish_pending(local); + + list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { + if (!sdata || sdata == sta->sdata) + WARN_ON(__sta_info_destroy(sta)); + } + mutex_unlock(&local->sta_mtx); + + return ret; +} + +void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, + unsigned long exp_time) +{ + struct ieee80211_local *local = sdata->local; + struct sta_info *sta, *tmp; + + mutex_lock(&local->sta_mtx); + list_for_each_entry_safe(sta, tmp, &local->sta_list, list) + if (time_after(jiffies, sta->last_rx + exp_time)) { +#ifdef CONFIG_MAC80211_IBSS_DEBUG + printk(KERN_DEBUG "%s: expiring inactive STA %pM\n", + sdata->name, sta->sta.addr); +#endif + WARN_ON(__sta_info_destroy(sta)); + } + mutex_unlock(&local->sta_mtx); +} + +struct ieee80211_sta *ieee80211_find_sta_by_hw(struct ieee80211_hw *hw, + const u8 *addr) +{ + struct sta_info *sta, *nxt; + + /* Just return a random station ... first in list ... */ + for_each_sta_info(hw_to_local(hw), addr, sta, nxt) + return &sta->sta; + return NULL; +} +EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_hw); + +struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_vif *vif, + const u8 *addr) +{ + struct ieee80211_sub_if_data *sdata; + + if (!vif) + return NULL; + + sdata = vif_to_sdata(vif); + + return ieee80211_find_sta_by_hw(&sdata->local->hw, addr); +} +EXPORT_SYMBOL(ieee80211_find_sta); + +/* powersave support code */ +void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct ieee80211_local *local = sdata->local; + int sent, buffered; + + drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta); + + if (!skb_queue_empty(&sta->ps_tx_buf)) + sta_info_clear_tim_bit(sta); + + /* Send all buffered frames to the station */ + sent = ieee80211_add_pending_skbs(local, &sta->tx_filtered); + buffered = ieee80211_add_pending_skbs(local, &sta->ps_tx_buf); + sent += buffered; + local->total_ps_buffered -= buffered; + +#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG + printk(KERN_DEBUG "%s: STA %pM aid %d sending %d filtered/%d PS frames " + "since STA not sleeping anymore\n", sdata->name, + sta->sta.addr, sta->sta.aid, sent - buffered, buffered); +#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ +} + +void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct ieee80211_local *local = sdata->local; + struct sk_buff *skb; + int no_pending_pkts; + + skb = skb_dequeue(&sta->tx_filtered); + if (!skb) { + skb = skb_dequeue(&sta->ps_tx_buf); + if (skb) + local->total_ps_buffered--; + } + no_pending_pkts = skb_queue_empty(&sta->tx_filtered) && + skb_queue_empty(&sta->ps_tx_buf); + + if (skb) { + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = + (struct ieee80211_hdr *) skb->data; + + /* + * Tell TX path to send this frame even though the STA may + * still remain is PS mode after this frame exchange. + */ + info->flags |= IEEE80211_TX_CTL_PSPOLL_RESPONSE; + +#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG + printk(KERN_DEBUG "STA %pM aid %d: PS Poll (entries after %d)\n", + sta->sta.addr, sta->sta.aid, + skb_queue_len(&sta->ps_tx_buf)); +#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ + + /* Use MoreData flag to indicate whether there are more + * buffered frames for this STA */ + if (no_pending_pkts) + hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREDATA); + else + hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); + + ieee80211_add_pending_skb(local, skb); + + if (no_pending_pkts) + sta_info_clear_tim_bit(sta); +#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG + } else { + /* + * FIXME: This can be the result of a race condition between + * us expiring a frame and the station polling for it. + * Should we send it a null-func frame indicating we + * have nothing buffered for it? + */ + printk(KERN_DEBUG "%s: STA %pM sent PS Poll even " + "though there are no buffered frames for it\n", + sdata->name, sta->sta.addr); +#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ + } +} + +void ieee80211_sta_block_awake(struct ieee80211_hw *hw, + struct ieee80211_sta *pubsta, bool block) +{ + struct sta_info *sta = container_of(pubsta, struct sta_info, sta); + + if (block) + set_sta_flags(sta, WLAN_STA_PS_DRIVER); + else + ieee80211_queue_work(hw, &sta->drv_unblock_wk); +} +EXPORT_SYMBOL(ieee80211_sta_block_awake); diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h new file mode 100644 index 0000000..822d845 --- /dev/null +++ b/net/mac80211/sta_info.h @@ -0,0 +1,474 @@ +/* + * Copyright 2002-2005, Devicescape Software, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef STA_INFO_H +#define STA_INFO_H + +#include +#include +#include +#include +#include "key.h" + +/** + * enum ieee80211_sta_info_flags - Stations flags + * + * These flags are used with &struct sta_info's @flags member. + * + * @WLAN_STA_AUTH: Station is authenticated. + * @WLAN_STA_ASSOC: Station is associated. + * @WLAN_STA_PS_STA: Station is in power-save mode + * @WLAN_STA_AUTHORIZED: Station is authorized to send/receive traffic. + * This bit is always checked so needs to be enabled for all stations + * when virtual port control is not in use. + * @WLAN_STA_SHORT_PREAMBLE: Station is capable of receiving short-preamble + * frames. + * @WLAN_STA_ASSOC_AP: We're associated to that station, it is an AP. + * @WLAN_STA_WME: Station is a QoS-STA. + * @WLAN_STA_WDS: Station is one of our WDS peers. + * @WLAN_STA_CLEAR_PS_FILT: Clear PS filter in hardware (using the + * IEEE80211_TX_CTL_CLEAR_PS_FILT control flag) when the next + * frame to this station is transmitted. + * @WLAN_STA_MFP: Management frame protection is used with this STA. + * @WLAN_STA_SUSPEND: Set/cleared during a suspend/resume cycle. + * Used to deny ADDBA requests (both TX and RX). + * @WLAN_STA_PS_DRIVER: driver requires keeping this station in + * power-save mode logically to flush frames that might still + * be in the queues + * @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping + * station in power-save mode, reply when the driver unblocks. + * @WLAN_STA_DISASSOC: Disassociation in progress. + * This is used to reject TX BA session requests when disassociation + * is in progress. + */ +enum ieee80211_sta_info_flags { + WLAN_STA_AUTH = 1<<0, + WLAN_STA_ASSOC = 1<<1, + WLAN_STA_PS_STA = 1<<2, + WLAN_STA_AUTHORIZED = 1<<3, + WLAN_STA_SHORT_PREAMBLE = 1<<4, + WLAN_STA_ASSOC_AP = 1<<5, + WLAN_STA_WME = 1<<6, + WLAN_STA_WDS = 1<<7, + WLAN_STA_CLEAR_PS_FILT = 1<<9, + WLAN_STA_MFP = 1<<10, + WLAN_STA_SUSPEND = 1<<11, + WLAN_STA_PS_DRIVER = 1<<12, + WLAN_STA_PSPOLL = 1<<13, + WLAN_STA_DISASSOC = 1<<14, +}; + +#define STA_TID_NUM 16 +#define ADDBA_RESP_INTERVAL HZ +#define HT_AGG_MAX_RETRIES (0x3) + +#define HT_AGG_STATE_INITIATOR_SHIFT (4) + +#define HT_ADDBA_REQUESTED_MSK BIT(0) +#define HT_ADDBA_DRV_READY_MSK BIT(1) +#define HT_ADDBA_RECEIVED_MSK BIT(2) +#define HT_AGG_STATE_REQ_STOP_BA_MSK BIT(3) +#define HT_AGG_STATE_INITIATOR_MSK BIT(HT_AGG_STATE_INITIATOR_SHIFT) +#define HT_AGG_STATE_IDLE (0x0) +#define HT_AGG_STATE_OPERATIONAL (HT_ADDBA_REQUESTED_MSK | \ + HT_ADDBA_DRV_READY_MSK | \ + HT_ADDBA_RECEIVED_MSK) + +/** + * struct tid_ampdu_tx - TID aggregation information (Tx). + * + * @addba_resp_timer: timer for peer's response to addba request + * @pending: pending frames queue -- use sta's spinlock to protect + * @ssn: Starting Sequence Number expected to be aggregated. + * @dialog_token: dialog token for aggregation session + */ +struct tid_ampdu_tx { + struct timer_list addba_resp_timer; + struct sk_buff_head pending; + u16 ssn; + u8 dialog_token; +}; + +/** + * struct tid_ampdu_rx - TID aggregation information (Rx). + * + * @reorder_buf: buffer to reorder incoming aggregated MPDUs + * @reorder_time: jiffies when skb was added + * @session_timer: check if peer keeps Tx-ing on the TID (by timeout value) + * @head_seq_num: head sequence number in reordering buffer. + * @stored_mpdu_num: number of MPDUs in reordering buffer + * @ssn: Starting Sequence Number expected to be aggregated. + * @buf_size: buffer size for incoming A-MPDUs + * @timeout: reset timer value (in TUs). + * @dialog_token: dialog token for aggregation session + * @shutdown: this session is being shut down due to STA removal + */ +struct tid_ampdu_rx { + struct sk_buff **reorder_buf; + unsigned long *reorder_time; + struct timer_list session_timer; + u16 head_seq_num; + u16 stored_mpdu_num; + u16 ssn; + u16 buf_size; + u16 timeout; + u8 dialog_token; + bool shutdown; +}; + +/** + * enum plink_state - state of a mesh peer link finite state machine + * + * @PLINK_LISTEN: initial state, considered the implicit state of non existant + * mesh peer links + * @PLINK_OPN_SNT: mesh plink open frame has been sent to this mesh peer + * @PLINK_OPN_RCVD: mesh plink open frame has been received from this mesh peer + * @PLINK_CNF_RCVD: mesh plink confirm frame has been received from this mesh + * peer + * @PLINK_ESTAB: mesh peer link is established + * @PLINK_HOLDING: mesh peer link is being closed or cancelled + * @PLINK_BLOCKED: all frames transmitted from this mesh plink are discarded + */ +enum plink_state { + PLINK_LISTEN, + PLINK_OPN_SNT, + PLINK_OPN_RCVD, + PLINK_CNF_RCVD, + PLINK_ESTAB, + PLINK_HOLDING, + PLINK_BLOCKED +}; + +/** + * struct sta_ampdu_mlme - STA aggregation information. + * + * @tid_state_rx: TID's state in Rx session state machine. + * @tid_rx: aggregation info for Rx per TID + * @tid_state_tx: TID's state in Tx session state machine. + * @tid_tx: aggregation info for Tx per TID + * @addba_req_num: number of times addBA request has been sent. + * @dialog_token_allocator: dialog token enumerator for each new session; + */ +struct sta_ampdu_mlme { + /* rx */ + u8 tid_state_rx[STA_TID_NUM]; + struct tid_ampdu_rx *tid_rx[STA_TID_NUM]; + /* tx */ + u8 tid_state_tx[STA_TID_NUM]; + struct tid_ampdu_tx *tid_tx[STA_TID_NUM]; + u8 addba_req_num[STA_TID_NUM]; + u8 dialog_token_allocator; +}; + + +/** + * struct sta_info - STA information + * + * This structure collects information about a station that + * mac80211 is communicating with. + * + * @list: global linked list entry + * @hnext: hash table linked list pointer + * @local: pointer to the global information + * @sdata: virtual interface this station belongs to + * @key: peer key negotiated with this station, if any + * @rate_ctrl: rate control algorithm reference + * @rate_ctrl_priv: rate control private per-STA pointer + * @last_tx_rate: rate used for last transmit, to report to userspace as + * "the" transmit rate + * @lock: used for locking all fields that require locking, see comments + * in the header file. + * @flaglock: spinlock for flags accesses + * @drv_unblock_wk: used for driver PS unblocking + * @listen_interval: listen interval of this station, when we're acting as AP + * @flags: STA flags, see &enum ieee80211_sta_info_flags + * @ps_tx_buf: buffer of frames to transmit to this station + * when it leaves power saving state + * @tx_filtered: buffer of frames we already tried to transmit + * but were filtered by hardware due to STA having entered + * power saving state + * @rx_packets: Number of MSDUs received from this STA + * @rx_bytes: Number of bytes received from this STA + * @wep_weak_iv_count: number of weak WEP IVs received from this station + * @last_rx: time (in jiffies) when last frame was received from this STA + * @num_duplicates: number of duplicate frames received from this STA + * @rx_fragments: number of received MPDUs + * @rx_dropped: number of dropped MPDUs from this STA + * @last_signal: signal of last received frame from this STA + * @last_noise: noise of last received frame from this STA + * @last_seq_ctrl: last received seq/frag number from this STA (per RX queue) + * @tx_filtered_count: number of frames the hardware filtered for this STA + * @tx_retry_failed: number of frames that failed retry + * @tx_retry_count: total number of retries for frames to this STA + * @fail_avg: moving percentage of failed MSDUs + * @tx_packets: number of RX/TX MSDUs + * @tx_bytes: number of bytes transmitted to this STA + * @tx_fragments: number of transmitted MPDUs + * @tid_seq: per-TID sequence numbers for sending to this STA + * @ampdu_mlme: A-MPDU state machine state + * @timer_to_tid: identity mapping to ID timers + * @llid: Local link ID + * @plid: Peer link ID + * @reason: Cancel reason on PLINK_HOLDING state + * @plink_retries: Retries in establishment + * @ignore_plink_timer: ignore the peer-link timer (used internally) + * @plink_state: peer link state + * @plink_timeout: timeout of peer link + * @plink_timer: peer link watch timer + * @plink_timer_was_running: used by suspend/resume to restore timers + * @debugfs: debug filesystem info + * @sta: station information we share with the driver + * @dead: set to true when sta is unlinked + * @uploaded: set to true when sta is uploaded to the driver + */ +struct sta_info { + /* General information, mostly static */ + struct list_head list; + struct sta_info *hnext; + struct ieee80211_local *local; + struct ieee80211_sub_if_data *sdata; + struct ieee80211_key *key; + struct rate_control_ref *rate_ctrl; + void *rate_ctrl_priv; + spinlock_t lock; + spinlock_t flaglock; + + struct work_struct drv_unblock_wk; + + u16 listen_interval; + + bool dead; + + bool uploaded; + + /* + * frequently updated, locked with own spinlock (flaglock), + * use the accessors defined below + */ + u32 flags; + + /* + * STA powersave frame queues, no more than the internal + * locking required. + */ + struct sk_buff_head ps_tx_buf; + struct sk_buff_head tx_filtered; + + /* Updated from RX path only, no locking requirements */ + unsigned long rx_packets, rx_bytes; + unsigned long wep_weak_iv_count; + unsigned long last_rx; + unsigned long num_duplicates; + unsigned long rx_fragments; + unsigned long rx_dropped; + int last_signal; + int last_noise; + __le16 last_seq_ctrl[NUM_RX_DATA_QUEUES]; + + /* Updated from TX status path only, no locking requirements */ + unsigned long tx_filtered_count; + unsigned long tx_retry_failed, tx_retry_count; + /* moving percentage of failed MSDUs */ + unsigned int fail_avg; + + /* Updated from TX path only, no locking requirements */ + unsigned long tx_packets; + unsigned long tx_bytes; + unsigned long tx_fragments; + struct ieee80211_tx_rate last_tx_rate; + u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1]; + + /* + * Aggregation information, locked with lock. + */ + struct sta_ampdu_mlme ampdu_mlme; + u8 timer_to_tid[STA_TID_NUM]; + +#ifdef CONFIG_MAC80211_MESH + /* + * Mesh peer link attributes + * TODO: move to a sub-structure that is referenced with pointer? + */ + __le16 llid; + __le16 plid; + __le16 reason; + u8 plink_retries; + bool ignore_plink_timer; + bool plink_timer_was_running; + enum plink_state plink_state; + u32 plink_timeout; + struct timer_list plink_timer; +#endif + +#ifdef CONFIG_MAC80211_DEBUGFS + struct sta_info_debugfsdentries { + struct dentry *dir; + bool add_has_run; + } debugfs; +#endif + + /* keep last! */ + struct ieee80211_sta sta; +}; + +static inline enum plink_state sta_plink_state(struct sta_info *sta) +{ +#ifdef CONFIG_MAC80211_MESH + return sta->plink_state; +#endif + return PLINK_LISTEN; +} + +static inline void set_sta_flags(struct sta_info *sta, const u32 flags) +{ + unsigned long irqfl; + + spin_lock_irqsave(&sta->flaglock, irqfl); + sta->flags |= flags; + spin_unlock_irqrestore(&sta->flaglock, irqfl); +} + +static inline void clear_sta_flags(struct sta_info *sta, const u32 flags) +{ + unsigned long irqfl; + + spin_lock_irqsave(&sta->flaglock, irqfl); + sta->flags &= ~flags; + spin_unlock_irqrestore(&sta->flaglock, irqfl); +} + +static inline u32 test_sta_flags(struct sta_info *sta, const u32 flags) +{ + u32 ret; + unsigned long irqfl; + + spin_lock_irqsave(&sta->flaglock, irqfl); + ret = sta->flags & flags; + spin_unlock_irqrestore(&sta->flaglock, irqfl); + + return ret; +} + +static inline u32 test_and_clear_sta_flags(struct sta_info *sta, + const u32 flags) +{ + u32 ret; + unsigned long irqfl; + + spin_lock_irqsave(&sta->flaglock, irqfl); + ret = sta->flags & flags; + sta->flags &= ~flags; + spin_unlock_irqrestore(&sta->flaglock, irqfl); + + return ret; +} + +static inline u32 get_sta_flags(struct sta_info *sta) +{ + u32 ret; + unsigned long irqfl; + + spin_lock_irqsave(&sta->flaglock, irqfl); + ret = sta->flags; + spin_unlock_irqrestore(&sta->flaglock, irqfl); + + return ret; +} + + + +#define STA_HASH_SIZE 256 +#define STA_HASH(sta) (sta[5]) + + +/* Maximum number of frames to buffer per power saving station */ +#define STA_MAX_TX_BUFFER 128 + +/* Minimum buffered frame expiry time. If STA uses listen interval that is + * smaller than this value, the minimum value here is used instead. */ +#define STA_TX_BUFFER_EXPIRE (10 * HZ) + +/* How often station data is cleaned up (e.g., expiration of buffered frames) + */ +#define STA_INFO_CLEANUP_INTERVAL (10 * HZ) + +/* + * Get a STA info, must be under RCU read lock. + */ +struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata, + const u8 *addr); + +struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata, + const u8 *addr); + +static inline +void for_each_sta_info_type_check(struct ieee80211_local *local, + const u8 *addr, + struct sta_info *sta, + struct sta_info *nxt) +{ +} + +#define for_each_sta_info(local, _addr, sta, nxt) \ + for ( /* initialise loop */ \ + sta = rcu_dereference(local->sta_hash[STA_HASH(_addr)]),\ + nxt = sta ? rcu_dereference(sta->hnext) : NULL; \ + /* typecheck */ \ + for_each_sta_info_type_check(local, (_addr), sta, nxt), \ + /* continue condition */ \ + sta; \ + /* advance loop */ \ + sta = nxt, \ + nxt = sta ? rcu_dereference(sta->hnext) : NULL \ + ) \ + /* compare address and run code only if it matches */ \ + if (memcmp(sta->sta.addr, (_addr), ETH_ALEN) == 0) + +/* + * Get STA info by index, BROKEN! + */ +struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata, + int idx); +/* + * Create a new STA info, caller owns returned structure + * until sta_info_insert(). + */ +struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, + u8 *addr, gfp_t gfp); +/* + * Insert STA info into hash table/list, returns zero or a + * -EEXIST if (if the same MAC address is already present). + * + * Calling the non-rcu version makes the caller relinquish, + * the _rcu version calls read_lock_rcu() and must be called + * without it held. + */ +int sta_info_insert(struct sta_info *sta); +int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU); +int sta_info_insert_atomic(struct sta_info *sta); + +int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata, + const u8 *addr); +int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata, + const u8 *addr); + +void sta_info_set_tim_bit(struct sta_info *sta); +void sta_info_clear_tim_bit(struct sta_info *sta); + +void sta_info_init(struct ieee80211_local *local); +int sta_info_start(struct ieee80211_local *local); +void sta_info_stop(struct ieee80211_local *local); +int sta_info_flush(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata); +void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, + unsigned long exp_time); + +void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta); +void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta); + +#endif /* STA_INFO_H */ diff --git a/net/mac80211/status.c b/net/mac80211/status.c new file mode 100644 index 0000000..56d5b9a --- /dev/null +++ b/net/mac80211/status.c @@ -0,0 +1,388 @@ +/* + * Copyright 2002-2005, Instant802 Networks, Inc. + * Copyright 2005-2006, Devicescape Software, Inc. + * Copyright 2006-2007 Jiri Benc + * Copyright 2008-2010 Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include "ieee80211_i.h" +#include "rate.h" +#include "mesh.h" +#include "led.h" + + +void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw, + struct sk_buff *skb) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + int tmp; + + skb->pkt_type = IEEE80211_TX_STATUS_MSG; + skb_queue_tail(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS ? + &local->skb_queue : &local->skb_queue_unreliable, skb); + tmp = skb_queue_len(&local->skb_queue) + + skb_queue_len(&local->skb_queue_unreliable); + while (tmp > IEEE80211_IRQSAFE_QUEUE_LIMIT && + (skb = skb_dequeue(&local->skb_queue_unreliable))) { + dev_kfree_skb_irq(skb); + tmp--; + I802_DEBUG_INC(local->tx_status_drop); + } + tasklet_schedule(&local->tasklet); +} +EXPORT_SYMBOL(ieee80211_tx_status_irqsafe); + +static void ieee80211_handle_filtered_frame(struct ieee80211_local *local, + struct sta_info *sta, + struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + /* + * This skb 'survived' a round-trip through the driver, and + * hopefully the driver didn't mangle it too badly. However, + * we can definitely not rely on the the control information + * being correct. Clear it so we don't get junk there, and + * indicate that it needs new processing, but must not be + * modified/encrypted again. + */ + memset(&info->control, 0, sizeof(info->control)); + + info->control.jiffies = jiffies; + info->control.vif = &sta->sdata->vif; + info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING | + IEEE80211_TX_INTFL_RETRANSMISSION; + + sta->tx_filtered_count++; + + /* + * Clear the TX filter mask for this STA when sending the next + * packet. If the STA went to power save mode, this will happen + * when it wakes up for the next time. + */ + set_sta_flags(sta, WLAN_STA_CLEAR_PS_FILT); + + /* + * This code races in the following way: + * + * (1) STA sends frame indicating it will go to sleep and does so + * (2) hardware/firmware adds STA to filter list, passes frame up + * (3) hardware/firmware processes TX fifo and suppresses a frame + * (4) we get TX status before having processed the frame and + * knowing that the STA has gone to sleep. + * + * This is actually quite unlikely even when both those events are + * processed from interrupts coming in quickly after one another or + * even at the same time because we queue both TX status events and + * RX frames to be processed by a tasklet and process them in the + * same order that they were received or TX status last. Hence, there + * is no race as long as the frame RX is processed before the next TX + * status, which drivers can ensure, see below. + * + * Note that this can only happen if the hardware or firmware can + * actually add STAs to the filter list, if this is done by the + * driver in response to set_tim() (which will only reduce the race + * this whole filtering tries to solve, not completely solve it) + * this situation cannot happen. + * + * To completely solve this race drivers need to make sure that they + * (a) don't mix the irq-safe/not irq-safe TX status/RX processing + * functions and + * (b) always process RX events before TX status events if ordering + * can be unknown, for example with different interrupt status + * bits. + */ + if (test_sta_flags(sta, WLAN_STA_PS_STA) && + skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) { + skb_queue_tail(&sta->tx_filtered, skb); + return; + } + + if (!test_sta_flags(sta, WLAN_STA_PS_STA) && + !(info->flags & IEEE80211_TX_INTFL_RETRIED)) { + /* Software retry the packet once */ + info->flags |= IEEE80211_TX_INTFL_RETRIED; + ieee80211_add_pending_skb(local, skb); + return; + } + +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + if (net_ratelimit()) + printk(KERN_DEBUG "%s: dropped TX filtered frame, " + "queue_len=%d PS=%d @%lu\n", + wiphy_name(local->hw.wiphy), + skb_queue_len(&sta->tx_filtered), + !!test_sta_flags(sta, WLAN_STA_PS_STA), jiffies); +#endif + dev_kfree_skb(skb); +} + +static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb) +{ + struct ieee80211_mgmt *mgmt = (void *) skb->data; + struct ieee80211_local *local = sta->local; + struct ieee80211_sub_if_data *sdata = sta->sdata; + + if (ieee80211_is_action(mgmt->frame_control) && + sdata->vif.type == NL80211_IFTYPE_STATION && + mgmt->u.action.category == WLAN_CATEGORY_HT && + mgmt->u.action.u.ht_smps.action == WLAN_HT_ACTION_SMPS) { + /* + * This update looks racy, but isn't -- if we come + * here we've definitely got a station that we're + * talking to, and on a managed interface that can + * only be the AP. And the only other place updating + * this variable is before we're associated. + */ + switch (mgmt->u.action.u.ht_smps.smps_control) { + case WLAN_HT_SMPS_CONTROL_DYNAMIC: + sta->sdata->u.mgd.ap_smps = IEEE80211_SMPS_DYNAMIC; + break; + case WLAN_HT_SMPS_CONTROL_STATIC: + sta->sdata->u.mgd.ap_smps = IEEE80211_SMPS_STATIC; + break; + case WLAN_HT_SMPS_CONTROL_DISABLED: + default: /* shouldn't happen since we don't send that */ + sta->sdata->u.mgd.ap_smps = IEEE80211_SMPS_OFF; + break; + } + + ieee80211_queue_work(&local->hw, &local->recalc_smps); + } +} + +void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct sk_buff *skb2; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + u16 frag, type; + __le16 fc; + struct ieee80211_supported_band *sband; + struct ieee80211_tx_status_rtap_hdr *rthdr; + struct ieee80211_sub_if_data *sdata; + struct net_device *prev_dev = NULL; + struct sta_info *sta, *tmp; + int retry_count = -1, i; + bool injected; + + for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { + /* the HW cannot have attempted that rate */ + if (i >= hw->max_rates) { + info->status.rates[i].idx = -1; + info->status.rates[i].count = 0; + } + + retry_count += info->status.rates[i].count; + } + if (retry_count < 0) + retry_count = 0; + + rcu_read_lock(); + + sband = local->hw.wiphy->bands[info->band]; + fc = hdr->frame_control; + + for_each_sta_info(local, hdr->addr1, sta, tmp) { + /* skip wrong virtual interface */ + if (memcmp(hdr->addr2, sta->sdata->vif.addr, ETH_ALEN)) + continue; + + if (!(info->flags & IEEE80211_TX_STAT_ACK) && + test_sta_flags(sta, WLAN_STA_PS_STA)) { + /* + * The STA is in power save mode, so assume + * that this TX packet failed because of that. + */ + ieee80211_handle_filtered_frame(local, sta, skb); + rcu_read_unlock(); + return; + } + + if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) && + (ieee80211_is_data_qos(fc))) { + u16 tid, ssn; + u8 *qc; + + qc = ieee80211_get_qos_ctl(hdr); + tid = qc[0] & 0xf; + ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10) + & IEEE80211_SCTL_SEQ); + ieee80211_send_bar(sta->sdata, hdr->addr1, + tid, ssn); + } + + if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) { + ieee80211_handle_filtered_frame(local, sta, skb); + rcu_read_unlock(); + return; + } else { + if (!(info->flags & IEEE80211_TX_STAT_ACK)) + sta->tx_retry_failed++; + sta->tx_retry_count += retry_count; + } + + rate_control_tx_status(local, sband, sta, skb); + if (ieee80211_vif_is_mesh(&sta->sdata->vif)) + ieee80211s_update_metric(local, sta, skb); + + if (!(info->flags & IEEE80211_TX_CTL_INJECTED) && + (info->flags & IEEE80211_TX_STAT_ACK)) + ieee80211_frame_acked(sta, skb); + } + + rcu_read_unlock(); + + ieee80211_led_tx(local, 0); + + /* SNMP counters + * Fragments are passed to low-level drivers as separate skbs, so these + * are actually fragments, not frames. Update frame counters only for + * the first fragment of the frame. */ + + frag = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; + type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE; + + if (info->flags & IEEE80211_TX_STAT_ACK) { + if (frag == 0) { + local->dot11TransmittedFrameCount++; + if (is_multicast_ether_addr(hdr->addr1)) + local->dot11MulticastTransmittedFrameCount++; + if (retry_count > 0) + local->dot11RetryCount++; + if (retry_count > 1) + local->dot11MultipleRetryCount++; + } + + /* This counter shall be incremented for an acknowledged MPDU + * with an individual address in the address 1 field or an MPDU + * with a multicast address in the address 1 field of type Data + * or Management. */ + if (!is_multicast_ether_addr(hdr->addr1) || + type == IEEE80211_FTYPE_DATA || + type == IEEE80211_FTYPE_MGMT) + local->dot11TransmittedFragmentCount++; + } else { + if (frag == 0) + local->dot11FailedCount++; + } + + if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc) && + (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) && + !(info->flags & IEEE80211_TX_CTL_INJECTED) && + local->ps_sdata && !(local->scanning)) { + if (info->flags & IEEE80211_TX_STAT_ACK) { + local->ps_sdata->u.mgd.flags |= + IEEE80211_STA_NULLFUNC_ACKED; + ieee80211_queue_work(&local->hw, + &local->dynamic_ps_enable_work); + } else + mod_timer(&local->dynamic_ps_timer, jiffies + + msecs_to_jiffies(10)); + } + + if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) + cfg80211_action_tx_status( + skb->dev, (unsigned long) skb, skb->data, skb->len, + !!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC); + + /* this was a transmitted frame, but now we want to reuse it */ + skb_orphan(skb); + + /* + * This is a bit racy but we can avoid a lot of work + * with this test... + */ + if (!local->monitors && !local->cooked_mntrs) { + dev_kfree_skb(skb); + return; + } + + /* send frame to monitor interfaces now */ + + if (skb_headroom(skb) < sizeof(*rthdr)) { + printk(KERN_ERR "ieee80211_tx_status: headroom too small\n"); + dev_kfree_skb(skb); + return; + } + + rthdr = (struct ieee80211_tx_status_rtap_hdr *) + skb_push(skb, sizeof(*rthdr)); + + memset(rthdr, 0, sizeof(*rthdr)); + rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr)); + rthdr->hdr.it_present = + cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) | + (1 << IEEE80211_RADIOTAP_DATA_RETRIES) | + (1 << IEEE80211_RADIOTAP_RATE)); + + if (!(info->flags & IEEE80211_TX_STAT_ACK) && + !is_multicast_ether_addr(hdr->addr1)) + rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL); + + /* + * XXX: Once radiotap gets the bitmap reset thing the vendor + * extensions proposal contains, we can actually report + * the whole set of tries we did. + */ + if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) || + (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)) + rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS); + else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) + rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS); + if (info->status.rates[0].idx >= 0 && + !(info->status.rates[0].flags & IEEE80211_TX_RC_MCS)) + rthdr->rate = sband->bitrates[ + info->status.rates[0].idx].bitrate / 5; + + /* for now report the total retry_count */ + rthdr->data_retries = retry_count; + + /* Need to make a copy before skb->cb gets cleared */ + injected = !!(info->flags & IEEE80211_TX_CTL_INJECTED); + + /* XXX: is this sufficient for BPF? */ + skb_set_mac_header(skb, 0); + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->pkt_type = PACKET_OTHERHOST; + skb->protocol = htons(ETH_P_802_2); + memset(skb->cb, 0, sizeof(skb->cb)); + + rcu_read_lock(); + list_for_each_entry_rcu(sdata, &local->interfaces, list) { + if (sdata->vif.type == NL80211_IFTYPE_MONITOR) { + if (!ieee80211_sdata_running(sdata)) + continue; + + if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) && + !injected && + (type == IEEE80211_FTYPE_DATA)) + continue; + + if (prev_dev) { + skb2 = skb_clone(skb, GFP_ATOMIC); + if (skb2) { + skb2->dev = prev_dev; + netif_rx(skb2); + } + } + + prev_dev = sdata->dev; + } + } + if (prev_dev) { + skb->dev = prev_dev; + netif_rx(skb); + skb = NULL; + } + rcu_read_unlock(); + dev_kfree_skb(skb); +} +EXPORT_SYMBOL(ieee80211_tx_status); diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c new file mode 100644 index 0000000..7ef491e --- /dev/null +++ b/net/mac80211/tkip.c @@ -0,0 +1,342 @@ +/* + * Copyright 2002-2004, Instant802 Networks, Inc. + * Copyright 2005, Devicescape Software, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include + +#include +#include "driver-ops.h" +#include "key.h" +#include "tkip.h" +#include "wep.h" + +#define PHASE1_LOOP_COUNT 8 + +/* + * 2-byte by 2-byte subset of the full AES S-box table; second part of this + * table is identical to first part but byte-swapped + */ +static const u16 tkip_sbox[256] = +{ + 0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154, + 0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A, + 0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B, + 0x41EC, 0xB367, 0x5FFD, 0x45EA, 0x23BF, 0x53F7, 0xE496, 0x9B5B, + 0x75C2, 0xE11C, 0x3DAE, 0x4C6A, 0x6C5A, 0x7E41, 0xF502, 0x834F, + 0x685C, 0x51F4, 0xD134, 0xF908, 0xE293, 0xAB73, 0x6253, 0x2A3F, + 0x080C, 0x9552, 0x4665, 0x9D5E, 0x3028, 0x37A1, 0x0A0F, 0x2FB5, + 0x0E09, 0x2436, 0x1B9B, 0xDF3D, 0xCD26, 0x4E69, 0x7FCD, 0xEA9F, + 0x121B, 0x1D9E, 0x5874, 0x342E, 0x362D, 0xDCB2, 0xB4EE, 0x5BFB, + 0xA4F6, 0x764D, 0xB761, 0x7DCE, 0x527B, 0xDD3E, 0x5E71, 0x1397, + 0xA6F5, 0xB968, 0x0000, 0xC12C, 0x4060, 0xE31F, 0x79C8, 0xB6ED, + 0xD4BE, 0x8D46, 0x67D9, 0x724B, 0x94DE, 0x98D4, 0xB0E8, 0x854A, + 0xBB6B, 0xC52A, 0x4FE5, 0xED16, 0x86C5, 0x9AD7, 0x6655, 0x1194, + 0x8ACF, 0xE910, 0x0406, 0xFE81, 0xA0F0, 0x7844, 0x25BA, 0x4BE3, + 0xA2F3, 0x5DFE, 0x80C0, 0x058A, 0x3FAD, 0x21BC, 0x7048, 0xF104, + 0x63DF, 0x77C1, 0xAF75, 0x4263, 0x2030, 0xE51A, 0xFD0E, 0xBF6D, + 0x814C, 0x1814, 0x2635, 0xC32F, 0xBEE1, 0x35A2, 0x88CC, 0x2E39, + 0x9357, 0x55F2, 0xFC82, 0x7A47, 0xC8AC, 0xBAE7, 0x322B, 0xE695, + 0xC0A0, 0x1998, 0x9ED1, 0xA37F, 0x4466, 0x547E, 0x3BAB, 0x0B83, + 0x8CCA, 0xC729, 0x6BD3, 0x283C, 0xA779, 0xBCE2, 0x161D, 0xAD76, + 0xDB3B, 0x6456, 0x744E, 0x141E, 0x92DB, 0x0C0A, 0x486C, 0xB8E4, + 0x9F5D, 0xBD6E, 0x43EF, 0xC4A6, 0x39A8, 0x31A4, 0xD337, 0xF28B, + 0xD532, 0x8B43, 0x6E59, 0xDAB7, 0x018C, 0xB164, 0x9CD2, 0x49E0, + 0xD8B4, 0xACFA, 0xF307, 0xCF25, 0xCAAF, 0xF48E, 0x47E9, 0x1018, + 0x6FD5, 0xF088, 0x4A6F, 0x5C72, 0x3824, 0x57F1, 0x73C7, 0x9751, + 0xCB23, 0xA17C, 0xE89C, 0x3E21, 0x96DD, 0x61DC, 0x0D86, 0x0F85, + 0xE090, 0x7C42, 0x71C4, 0xCCAA, 0x90D8, 0x0605, 0xF701, 0x1C12, + 0xC2A3, 0x6A5F, 0xAEF9, 0x69D0, 0x1791, 0x9958, 0x3A27, 0x27B9, + 0xD938, 0xEB13, 0x2BB3, 0x2233, 0xD2BB, 0xA970, 0x0789, 0x33A7, + 0x2DB6, 0x3C22, 0x1592, 0xC920, 0x8749, 0xAAFF, 0x5078, 0xA57A, + 0x038F, 0x59F8, 0x0980, 0x1A17, 0x65DA, 0xD731, 0x84C6, 0xD0B8, + 0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A, +}; + +static u16 tkipS(u16 val) +{ + return tkip_sbox[val & 0xff] ^ swab16(tkip_sbox[val >> 8]); +} + +static u8 *write_tkip_iv(u8 *pos, u16 iv16) +{ + *pos++ = iv16 >> 8; + *pos++ = ((iv16 >> 8) | 0x20) & 0x7f; + *pos++ = iv16 & 0xFF; + return pos; +} + +/* + * P1K := Phase1(TA, TK, TSC) + * TA = transmitter address (48 bits) + * TK = dot11DefaultKeyValue or dot11KeyMappingValue (128 bits) + * TSC = TKIP sequence counter (48 bits, only 32 msb bits used) + * P1K: 80 bits + */ +static void tkip_mixing_phase1(const u8 *tk, struct tkip_ctx *ctx, + const u8 *ta, u32 tsc_IV32) +{ + int i, j; + u16 *p1k = ctx->p1k; + + p1k[0] = tsc_IV32 & 0xFFFF; + p1k[1] = tsc_IV32 >> 16; + p1k[2] = get_unaligned_le16(ta + 0); + p1k[3] = get_unaligned_le16(ta + 2); + p1k[4] = get_unaligned_le16(ta + 4); + + for (i = 0; i < PHASE1_LOOP_COUNT; i++) { + j = 2 * (i & 1); + p1k[0] += tkipS(p1k[4] ^ get_unaligned_le16(tk + 0 + j)); + p1k[1] += tkipS(p1k[0] ^ get_unaligned_le16(tk + 4 + j)); + p1k[2] += tkipS(p1k[1] ^ get_unaligned_le16(tk + 8 + j)); + p1k[3] += tkipS(p1k[2] ^ get_unaligned_le16(tk + 12 + j)); + p1k[4] += tkipS(p1k[3] ^ get_unaligned_le16(tk + 0 + j)) + i; + } + ctx->state = TKIP_STATE_PHASE1_DONE; +} + +static void tkip_mixing_phase2(const u8 *tk, struct tkip_ctx *ctx, + u16 tsc_IV16, u8 *rc4key) +{ + u16 ppk[6]; + const u16 *p1k = ctx->p1k; + int i; + + ppk[0] = p1k[0]; + ppk[1] = p1k[1]; + ppk[2] = p1k[2]; + ppk[3] = p1k[3]; + ppk[4] = p1k[4]; + ppk[5] = p1k[4] + tsc_IV16; + + ppk[0] += tkipS(ppk[5] ^ get_unaligned_le16(tk + 0)); + ppk[1] += tkipS(ppk[0] ^ get_unaligned_le16(tk + 2)); + ppk[2] += tkipS(ppk[1] ^ get_unaligned_le16(tk + 4)); + ppk[3] += tkipS(ppk[2] ^ get_unaligned_le16(tk + 6)); + ppk[4] += tkipS(ppk[3] ^ get_unaligned_le16(tk + 8)); + ppk[5] += tkipS(ppk[4] ^ get_unaligned_le16(tk + 10)); + ppk[0] += ror16(ppk[5] ^ get_unaligned_le16(tk + 12), 1); + ppk[1] += ror16(ppk[0] ^ get_unaligned_le16(tk + 14), 1); + ppk[2] += ror16(ppk[1], 1); + ppk[3] += ror16(ppk[2], 1); + ppk[4] += ror16(ppk[3], 1); + ppk[5] += ror16(ppk[4], 1); + + rc4key = write_tkip_iv(rc4key, tsc_IV16); + *rc4key++ = ((ppk[5] ^ get_unaligned_le16(tk)) >> 1) & 0xFF; + + for (i = 0; i < 6; i++) + put_unaligned_le16(ppk[i], rc4key + 2 * i); +} + +/* Add TKIP IV and Ext. IV at @pos. @iv0, @iv1, and @iv2 are the first octets + * of the IV. Returns pointer to the octet following IVs (i.e., beginning of + * the packet payload). */ +u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key, u16 iv16) +{ + pos = write_tkip_iv(pos, iv16); + *pos++ = (key->conf.keyidx << 6) | (1 << 5) /* Ext IV */; + put_unaligned_le32(key->u.tkip.tx.iv32, pos); + return pos + 4; +} + +void ieee80211_get_tkip_key(struct ieee80211_key_conf *keyconf, + struct sk_buff *skb, enum ieee80211_tkip_key_type type, + u8 *outkey) +{ + struct ieee80211_key *key = (struct ieee80211_key *) + container_of(keyconf, struct ieee80211_key, conf); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + u8 *data; + const u8 *tk; + struct tkip_ctx *ctx; + u16 iv16; + u32 iv32; + + data = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control); + iv16 = data[2] | (data[0] << 8); + iv32 = get_unaligned_le32(&data[4]); + + tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY]; + ctx = &key->u.tkip.tx; + +#ifdef CONFIG_MAC80211_TKIP_DEBUG + printk(KERN_DEBUG "TKIP encrypt: iv16 = 0x%04x, iv32 = 0x%08x\n", + iv16, iv32); + + if (iv32 != ctx->iv32) { + printk(KERN_DEBUG "skb: iv32 = 0x%08x key: iv32 = 0x%08x\n", + iv32, ctx->iv32); + printk(KERN_DEBUG "Wrap around of iv16 in the middle of a " + "fragmented packet\n"); + } +#endif + + /* Update the p1k only when the iv16 in the packet wraps around, this + * might occur after the wrap around of iv16 in the key in case of + * fragmented packets. */ + if (iv16 == 0 || ctx->state == TKIP_STATE_NOT_INIT) + tkip_mixing_phase1(tk, ctx, hdr->addr2, iv32); + + if (type == IEEE80211_TKIP_P1_KEY) { + memcpy(outkey, ctx->p1k, sizeof(u16) * 5); + return; + } + + tkip_mixing_phase2(tk, ctx, iv16, outkey); +} +EXPORT_SYMBOL(ieee80211_get_tkip_key); + +/* + * Encrypt packet payload with TKIP using @key. @pos is a pointer to the + * beginning of the buffer containing payload. This payload must include + * the IV/Ext.IV and space for (taildroom) four octets for ICV. + * @payload_len is the length of payload (_not_ including IV/ICV length). + * @ta is the transmitter addresses. + */ +void ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm, + struct ieee80211_key *key, + u8 *pos, size_t payload_len, u8 *ta) +{ + u8 rc4key[16]; + struct tkip_ctx *ctx = &key->u.tkip.tx; + const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY]; + + /* Calculate per-packet key */ + if (ctx->iv16 == 0 || ctx->state == TKIP_STATE_NOT_INIT) + tkip_mixing_phase1(tk, ctx, ta, ctx->iv32); + + tkip_mixing_phase2(tk, ctx, ctx->iv16, rc4key); + + ieee80211_wep_encrypt_data(tfm, rc4key, 16, pos, payload_len); +} + +/* Decrypt packet payload with TKIP using @key. @pos is a pointer to the + * beginning of the buffer containing IEEE 802.11 header payload, i.e., + * including IV, Ext. IV, real data, Michael MIC, ICV. @payload_len is the + * length of payload, including IV, Ext. IV, MIC, ICV. */ +int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm, + struct ieee80211_key *key, + u8 *payload, size_t payload_len, u8 *ta, + u8 *ra, int only_iv, int queue, + u32 *out_iv32, u16 *out_iv16) +{ + u32 iv32; + u32 iv16; + u8 rc4key[16], keyid, *pos = payload; + int res; + const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY]; + + if (payload_len < 12) + return -1; + + iv16 = (pos[0] << 8) | pos[2]; + keyid = pos[3]; + iv32 = get_unaligned_le32(pos + 4); + pos += 8; +#ifdef CONFIG_MAC80211_TKIP_DEBUG + { + int i; + printk(KERN_DEBUG "TKIP decrypt: data(len=%zd)", payload_len); + for (i = 0; i < payload_len; i++) + printk(" %02x", payload[i]); + printk("\n"); + printk(KERN_DEBUG "TKIP decrypt: iv16=%04x iv32=%08x\n", + iv16, iv32); + } +#endif + + if (!(keyid & (1 << 5))) + return TKIP_DECRYPT_NO_EXT_IV; + + if ((keyid >> 6) != key->conf.keyidx) + return TKIP_DECRYPT_INVALID_KEYIDX; + + if (key->u.tkip.rx[queue].state != TKIP_STATE_NOT_INIT && + (iv32 < key->u.tkip.rx[queue].iv32 || + (iv32 == key->u.tkip.rx[queue].iv32 && + iv16 <= key->u.tkip.rx[queue].iv16))) { +#ifdef CONFIG_MAC80211_TKIP_DEBUG + printk(KERN_DEBUG "TKIP replay detected for RX frame from " + "%pM (RX IV (%04x,%02x) <= prev. IV (%04x,%02x)\n", + ta, + iv32, iv16, key->u.tkip.rx[queue].iv32, + key->u.tkip.rx[queue].iv16); +#endif + return TKIP_DECRYPT_REPLAY; + } + + if (only_iv) { + res = TKIP_DECRYPT_OK; + key->u.tkip.rx[queue].state = TKIP_STATE_PHASE1_HW_UPLOADED; + goto done; + } + + if (key->u.tkip.rx[queue].state == TKIP_STATE_NOT_INIT || + key->u.tkip.rx[queue].iv32 != iv32) { + /* IV16 wrapped around - perform TKIP phase 1 */ + tkip_mixing_phase1(tk, &key->u.tkip.rx[queue], ta, iv32); +#ifdef CONFIG_MAC80211_TKIP_DEBUG + { + int i; + u8 key_offset = NL80211_TKIP_DATA_OFFSET_ENCR_KEY; + printk(KERN_DEBUG "TKIP decrypt: Phase1 TA=%pM" + " TK=", ta); + for (i = 0; i < 16; i++) + printk("%02x ", + key->conf.key[key_offset + i]); + printk("\n"); + printk(KERN_DEBUG "TKIP decrypt: P1K="); + for (i = 0; i < 5; i++) + printk("%04x ", key->u.tkip.rx[queue].p1k[i]); + printk("\n"); + } +#endif + } + if (key->local->ops->update_tkip_key && + key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE && + key->u.tkip.rx[queue].state != TKIP_STATE_PHASE1_HW_UPLOADED) { + struct ieee80211_sub_if_data *sdata = key->sdata; + + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + sdata = container_of(key->sdata->bss, + struct ieee80211_sub_if_data, u.ap); + drv_update_tkip_key(key->local, sdata, &key->conf, key->sta, + iv32, key->u.tkip.rx[queue].p1k); + key->u.tkip.rx[queue].state = TKIP_STATE_PHASE1_HW_UPLOADED; + } + + tkip_mixing_phase2(tk, &key->u.tkip.rx[queue], iv16, rc4key); +#ifdef CONFIG_MAC80211_TKIP_DEBUG + { + int i; + printk(KERN_DEBUG "TKIP decrypt: Phase2 rc4key="); + for (i = 0; i < 16; i++) + printk("%02x ", rc4key[i]); + printk("\n"); + } +#endif + + res = ieee80211_wep_decrypt_data(tfm, rc4key, 16, pos, payload_len - 12); + done: + if (res == TKIP_DECRYPT_OK) { + /* + * Record previously received IV, will be copied into the + * key information after MIC verification. It is possible + * that we don't catch replays of fragments but that's ok + * because the Michael MIC verication will then fail. + */ + *out_iv32 = iv32; + *out_iv16 = iv16; + } + + return res; +} diff --git a/net/mac80211/tkip.h b/net/mac80211/tkip.h new file mode 100644 index 0000000..d471438 --- /dev/null +++ b/net/mac80211/tkip.h @@ -0,0 +1,33 @@ +/* + * Copyright 2002-2004, Instant802 Networks, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef TKIP_H +#define TKIP_H + +#include +#include +#include "key.h" + +u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key, u16 iv16); + +void ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm, + struct ieee80211_key *key, + u8 *pos, size_t payload_len, u8 *ta); +enum { + TKIP_DECRYPT_OK = 0, + TKIP_DECRYPT_NO_EXT_IV = -1, + TKIP_DECRYPT_INVALID_KEYIDX = -2, + TKIP_DECRYPT_REPLAY = -3, +}; +int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm, + struct ieee80211_key *key, + u8 *payload, size_t payload_len, u8 *ta, + u8 *ra, int only_iv, int queue, + u32 *out_iv32, u16 *out_iv16); + +#endif /* TKIP_H */ diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c new file mode 100644 index 0000000..c9d219b --- /dev/null +++ b/net/mac80211/tx.c @@ -0,0 +1,2493 @@ +/* + * Copyright 2002-2005, Instant802 Networks, Inc. + * Copyright 2005-2006, Devicescape Software, Inc. + * Copyright 2006-2007 Jiri Benc + * Copyright 2007 Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * + * Transmit and frame generation functions. + */ + +#include +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) +#include +#endif +#include +#include +#include +#include + +#include "ieee80211_i.h" +#include "driver-ops.h" +#include "led.h" +#include "mesh.h" +#include "wep.h" +#include "wpa.h" +#include "wme.h" +#include "rate.h" + +#define IEEE80211_TX_OK 0 +#define IEEE80211_TX_AGAIN 1 +#define IEEE80211_TX_PENDING 2 + +/* misc utils */ + +static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr, + int next_frag_len) +{ + int rate, mrate, erp, dur, i; + struct ieee80211_rate *txrate; + struct ieee80211_local *local = tx->local; + struct ieee80211_supported_band *sband; + struct ieee80211_hdr *hdr; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); + + /* assume HW handles this */ + if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS) + return 0; + + /* uh huh? */ + if (WARN_ON_ONCE(info->control.rates[0].idx < 0)) + return 0; + + sband = local->hw.wiphy->bands[tx->channel->band]; + txrate = &sband->bitrates[info->control.rates[0].idx]; + + erp = txrate->flags & IEEE80211_RATE_ERP_G; + + /* + * data and mgmt (except PS Poll): + * - during CFP: 32768 + * - during contention period: + * if addr1 is group address: 0 + * if more fragments = 0 and addr1 is individual address: time to + * transmit one ACK plus SIFS + * if more fragments = 1 and addr1 is individual address: time to + * transmit next fragment plus 2 x ACK plus 3 x SIFS + * + * IEEE 802.11, 9.6: + * - control response frame (CTS or ACK) shall be transmitted using the + * same rate as the immediately previous frame in the frame exchange + * sequence, if this rate belongs to the PHY mandatory rates, or else + * at the highest possible rate belonging to the PHY rates in the + * BSSBasicRateSet + */ + hdr = (struct ieee80211_hdr *)tx->skb->data; + if (ieee80211_is_ctl(hdr->frame_control)) { + /* TODO: These control frames are not currently sent by + * mac80211, but should they be implemented, this function + * needs to be updated to support duration field calculation. + * + * RTS: time needed to transmit pending data/mgmt frame plus + * one CTS frame plus one ACK frame plus 3 x SIFS + * CTS: duration of immediately previous RTS minus time + * required to transmit CTS and its SIFS + * ACK: 0 if immediately previous directed data/mgmt had + * more=0, with more=1 duration in ACK frame is duration + * from previous frame minus time needed to transmit ACK + * and its SIFS + * PS Poll: BIT(15) | BIT(14) | aid + */ + return 0; + } + + /* data/mgmt */ + if (0 /* FIX: data/mgmt during CFP */) + return cpu_to_le16(32768); + + if (group_addr) /* Group address as the destination - no ACK */ + return 0; + + /* Individual destination address: + * IEEE 802.11, Ch. 9.6 (after IEEE 802.11g changes) + * CTS and ACK frames shall be transmitted using the highest rate in + * basic rate set that is less than or equal to the rate of the + * immediately previous frame and that is using the same modulation + * (CCK or OFDM). If no basic rate set matches with these requirements, + * the highest mandatory rate of the PHY that is less than or equal to + * the rate of the previous frame is used. + * Mandatory rates for IEEE 802.11g PHY: 1, 2, 5.5, 11, 6, 12, 24 Mbps + */ + rate = -1; + /* use lowest available if everything fails */ + mrate = sband->bitrates[0].bitrate; + for (i = 0; i < sband->n_bitrates; i++) { + struct ieee80211_rate *r = &sband->bitrates[i]; + + if (r->bitrate > txrate->bitrate) + break; + + if (tx->sdata->vif.bss_conf.basic_rates & BIT(i)) + rate = r->bitrate; + + switch (sband->band) { + case IEEE80211_BAND_2GHZ: { + u32 flag; + if (tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) + flag = IEEE80211_RATE_MANDATORY_G; + else + flag = IEEE80211_RATE_MANDATORY_B; + if (r->flags & flag) + mrate = r->bitrate; + break; + } + case IEEE80211_BAND_5GHZ: + if (r->flags & IEEE80211_RATE_MANDATORY_A) + mrate = r->bitrate; + break; + case IEEE80211_NUM_BANDS: + WARN_ON(1); + break; + } + } + if (rate == -1) { + /* No matching basic rate found; use highest suitable mandatory + * PHY rate */ + rate = mrate; + } + + /* Time needed to transmit ACK + * (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up + * to closest integer */ + + dur = ieee80211_frame_duration(local, 10, rate, erp, + tx->sdata->vif.bss_conf.use_short_preamble); + + if (next_frag_len) { + /* Frame is fragmented: duration increases with time needed to + * transmit next fragment plus ACK and 2 x SIFS. */ + dur *= 2; /* ACK + SIFS */ + /* next fragment */ + dur += ieee80211_frame_duration(local, next_frag_len, + txrate->bitrate, erp, + tx->sdata->vif.bss_conf.use_short_preamble); + } + + return cpu_to_le16(dur); +} + +static int inline is_ieee80211_device(struct ieee80211_local *local, + struct net_device *dev) +{ + return local == wdev_priv(dev->ieee80211_ptr); +} + +/* tx handlers */ +static ieee80211_tx_result debug_noinline +ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx) +{ + struct ieee80211_local *local = tx->local; + struct ieee80211_if_managed *ifmgd; + + /* driver doesn't support power save */ + if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS)) + return TX_CONTINUE; + + /* hardware does dynamic power save */ + if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS) + return TX_CONTINUE; + + /* dynamic power save disabled */ + if (local->hw.conf.dynamic_ps_timeout <= 0) + return TX_CONTINUE; + + /* we are scanning, don't enable power save */ + if (local->scanning) + return TX_CONTINUE; + + if (!local->ps_sdata) + return TX_CONTINUE; + + /* No point if we're going to suspend */ + if (local->quiescing) + return TX_CONTINUE; + + /* dynamic ps is supported only in managed mode */ + if (tx->sdata->vif.type != NL80211_IFTYPE_STATION) + return TX_CONTINUE; + + ifmgd = &tx->sdata->u.mgd; + + /* + * Don't wakeup from power save if u-apsd is enabled, voip ac has + * u-apsd enabled and the frame is in voip class. This effectively + * means that even if all access categories have u-apsd enabled, in + * practise u-apsd is only used with the voip ac. This is a + * workaround for the case when received voip class packets do not + * have correct qos tag for some reason, due the network or the + * peer application. + * + * Note: local->uapsd_queues access is racy here. If the value is + * changed via debugfs, user needs to reassociate manually to have + * everything in sync. + */ + if ((ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED) + && (local->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) + && skb_get_queue_mapping(tx->skb) == 0) + return TX_CONTINUE; + + if (local->hw.conf.flags & IEEE80211_CONF_PS) { + ieee80211_stop_queues_by_reason(&local->hw, + IEEE80211_QUEUE_STOP_REASON_PS); + ieee80211_queue_work(&local->hw, + &local->dynamic_ps_disable_work); + } + + mod_timer(&local->dynamic_ps_timer, jiffies + + msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); + + return TX_CONTINUE; +} + +static ieee80211_tx_result debug_noinline +ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) +{ + + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); + u32 sta_flags; + + if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) + return TX_CONTINUE; + + if (unlikely(test_bit(SCAN_OFF_CHANNEL, &tx->local->scanning)) && + !ieee80211_is_probe_req(hdr->frame_control) && + !ieee80211_is_nullfunc(hdr->frame_control)) + /* + * When software scanning only nullfunc frames (to notify + * the sleep state to the AP) and probe requests (for the + * active scan) are allowed, all other frames should not be + * sent and we should not get here, but if we do + * nonetheless, drop them to avoid sending them + * off-channel. See the link below and + * ieee80211_start_scan() for more. + * + * http://article.gmane.org/gmane.linux.kernel.wireless.general/30089 + */ + return TX_DROP; + + if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT) + return TX_CONTINUE; + + if (tx->flags & IEEE80211_TX_PS_BUFFERED) + return TX_CONTINUE; + + sta_flags = tx->sta ? get_sta_flags(tx->sta) : 0; + + if (likely(tx->flags & IEEE80211_TX_UNICAST)) { + if (unlikely(!(sta_flags & WLAN_STA_ASSOC) && + tx->sdata->vif.type != NL80211_IFTYPE_ADHOC && + ieee80211_is_data(hdr->frame_control))) { +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + printk(KERN_DEBUG "%s: dropped data frame to not " + "associated station %pM\n", + tx->sdata->name, hdr->addr1); +#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ + I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc); + return TX_DROP; + } + } else { + if (unlikely(ieee80211_is_data(hdr->frame_control) && + tx->local->num_sta == 0 && + tx->sdata->vif.type != NL80211_IFTYPE_ADHOC)) { + /* + * No associated STAs - no need to send multicast + * frames. + */ + return TX_DROP; + } + return TX_CONTINUE; + } + + return TX_CONTINUE; +} + +/* This function is called whenever the AP is about to exceed the maximum limit + * of buffered frames for power saving STAs. This situation should not really + * happen often during normal operation, so dropping the oldest buffered packet + * from each queue should be OK to make some room for new frames. */ +static void purge_old_ps_buffers(struct ieee80211_local *local) +{ + int total = 0, purged = 0; + struct sk_buff *skb; + struct ieee80211_sub_if_data *sdata; + struct sta_info *sta; + + /* + * virtual interfaces are protected by RCU + */ + rcu_read_lock(); + + list_for_each_entry_rcu(sdata, &local->interfaces, list) { + struct ieee80211_if_ap *ap; + if (sdata->vif.type != NL80211_IFTYPE_AP) + continue; + ap = &sdata->u.ap; + skb = skb_dequeue(&ap->ps_bc_buf); + if (skb) { + purged++; + dev_kfree_skb(skb); + } + total += skb_queue_len(&ap->ps_bc_buf); + } + + list_for_each_entry_rcu(sta, &local->sta_list, list) { + skb = skb_dequeue(&sta->ps_tx_buf); + if (skb) { + purged++; + dev_kfree_skb(skb); + } + total += skb_queue_len(&sta->ps_tx_buf); + } + + rcu_read_unlock(); + + local->total_ps_buffered = total; +#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG + printk(KERN_DEBUG "%s: PS buffers full - purged %d frames\n", + wiphy_name(local->hw.wiphy), purged); +#endif +} + +static ieee80211_tx_result +ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; + + /* + * broadcast/multicast frame + * + * If any of the associated stations is in power save mode, + * the frame is buffered to be sent after DTIM beacon frame. + * This is done either by the hardware or us. + */ + + /* powersaving STAs only in AP/VLAN mode */ + if (!tx->sdata->bss) + return TX_CONTINUE; + + /* no buffering for ordered frames */ + if (ieee80211_has_order(hdr->frame_control)) + return TX_CONTINUE; + + /* no stations in PS mode */ + if (!atomic_read(&tx->sdata->bss->num_sta_ps)) + return TX_CONTINUE; + + info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM; + + /* device releases frame after DTIM beacon */ + if (!(tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING)) + return TX_CONTINUE; + + /* buffered in mac80211 */ + if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) + purge_old_ps_buffers(tx->local); + + if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >= AP_MAX_BC_BUFFER) { +#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG + if (net_ratelimit()) + printk(KERN_DEBUG "%s: BC TX buffer full - dropping the oldest frame\n", + tx->sdata->name); +#endif + dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf)); + } else + tx->local->total_ps_buffered++; + + skb_queue_tail(&tx->sdata->bss->ps_bc_buf, tx->skb); + + return TX_QUEUED; +} + +static int ieee80211_use_mfp(__le16 fc, struct sta_info *sta, + struct sk_buff *skb) +{ + if (!ieee80211_is_mgmt(fc)) + return 0; + + if (sta == NULL || !test_sta_flags(sta, WLAN_STA_MFP)) + return 0; + + if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) + skb->data)) + return 0; + + return 1; +} + +static ieee80211_tx_result +ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) +{ + struct sta_info *sta = tx->sta; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; + u32 staflags; + + if (unlikely(!sta || + ieee80211_is_probe_resp(hdr->frame_control) || + ieee80211_is_auth(hdr->frame_control) || + ieee80211_is_assoc_resp(hdr->frame_control) || + ieee80211_is_reassoc_resp(hdr->frame_control))) + return TX_CONTINUE; + + staflags = get_sta_flags(sta); + + if (unlikely((staflags & (WLAN_STA_PS_STA | WLAN_STA_PS_DRIVER)) && + !(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE))) { +#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG + printk(KERN_DEBUG "STA %pM aid %d: PS buffer (entries " + "before %d)\n", + sta->sta.addr, sta->sta.aid, + skb_queue_len(&sta->ps_tx_buf)); +#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ + if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) + purge_old_ps_buffers(tx->local); + if (skb_queue_len(&sta->ps_tx_buf) >= STA_MAX_TX_BUFFER) { + struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf); +#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG + if (net_ratelimit()) { + printk(KERN_DEBUG "%s: STA %pM TX " + "buffer full - dropping oldest frame\n", + tx->sdata->name, sta->sta.addr); + } +#endif + dev_kfree_skb(old); + } else + tx->local->total_ps_buffered++; + + /* + * Queue frame to be sent after STA wakes up/polls, + * but don't set the TIM bit if the driver is blocking + * wakeup or poll response transmissions anyway. + */ + if (skb_queue_empty(&sta->ps_tx_buf) && + !(staflags & WLAN_STA_PS_DRIVER)) + sta_info_set_tim_bit(sta); + + info->control.jiffies = jiffies; + info->control.vif = &tx->sdata->vif; + info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; + skb_queue_tail(&sta->ps_tx_buf, tx->skb); + return TX_QUEUED; + } +#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG + else if (unlikely(staflags & WLAN_STA_PS_STA)) { + printk(KERN_DEBUG "%s: STA %pM in PS mode, but pspoll " + "set -> send frame\n", tx->sdata->name, + sta->sta.addr); + } +#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ + + return TX_CONTINUE; +} + +static ieee80211_tx_result debug_noinline +ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx) +{ + if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED)) + return TX_CONTINUE; + + if (tx->flags & IEEE80211_TX_UNICAST) + return ieee80211_tx_h_unicast_ps_buf(tx); + else + return ieee80211_tx_h_multicast_ps_buf(tx); +} + +static ieee80211_tx_result debug_noinline +ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) +{ + struct ieee80211_key *key = NULL; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; + + if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) + tx->key = NULL; + else if (tx->sta && (key = rcu_dereference(tx->sta->key))) + tx->key = key; + else if (ieee80211_is_mgmt(hdr->frame_control) && + (key = rcu_dereference(tx->sdata->default_mgmt_key))) + tx->key = key; + else if ((key = rcu_dereference(tx->sdata->default_key))) + tx->key = key; + else if (tx->sdata->drop_unencrypted && + (tx->skb->protocol != cpu_to_be16(ETH_P_PAE)) && + !(info->flags & IEEE80211_TX_CTL_INJECTED) && + (!ieee80211_is_robust_mgmt_frame(hdr) || + (ieee80211_is_action(hdr->frame_control) && + tx->sta && test_sta_flags(tx->sta, WLAN_STA_MFP)))) { + I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted); + return TX_DROP; + } else + tx->key = NULL; + + if (tx->key) { + bool skip_hw = false; + + tx->key->tx_rx_count++; + /* TODO: add threshold stuff again */ + + switch (tx->key->conf.alg) { + case ALG_WEP: + if (ieee80211_is_auth(hdr->frame_control)) + break; + case ALG_TKIP: + if (!ieee80211_is_data_present(hdr->frame_control)) + tx->key = NULL; + break; + case ALG_CCMP: + if (!ieee80211_is_data_present(hdr->frame_control) && + !ieee80211_use_mfp(hdr->frame_control, tx->sta, + tx->skb)) + tx->key = NULL; + else + skip_hw = (tx->key->conf.flags & + IEEE80211_KEY_FLAG_SW_MGMT) && + ieee80211_is_mgmt(hdr->frame_control); + break; + case ALG_AES_CMAC: + if (!ieee80211_is_mgmt(hdr->frame_control)) + tx->key = NULL; + break; + } + + if (!skip_hw && tx->key && + tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) + info->control.hw_key = &tx->key->conf; + } + + return TX_CONTINUE; +} + +static ieee80211_tx_result debug_noinline +ieee80211_tx_h_sta(struct ieee80211_tx_data *tx) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); + + if (tx->sta && tx->sta->uploaded) + info->control.sta = &tx->sta->sta; + + return TX_CONTINUE; +} + +static ieee80211_tx_result debug_noinline +ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); + struct ieee80211_hdr *hdr = (void *)tx->skb->data; + struct ieee80211_supported_band *sband; + struct ieee80211_rate *rate; + int i, len; + bool inval = false, rts = false, short_preamble = false; + struct ieee80211_tx_rate_control txrc; + u32 sta_flags; + + memset(&txrc, 0, sizeof(txrc)); + + sband = tx->local->hw.wiphy->bands[tx->channel->band]; + + len = min_t(int, tx->skb->len + FCS_LEN, + tx->local->hw.wiphy->frag_threshold); + + /* set up the tx rate control struct we give the RC algo */ + txrc.hw = local_to_hw(tx->local); + txrc.sband = sband; + txrc.bss_conf = &tx->sdata->vif.bss_conf; + txrc.skb = tx->skb; + txrc.reported_rate.idx = -1; + txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[tx->channel->band]; + if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1) + txrc.max_rate_idx = -1; + else + txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1; + txrc.ap = tx->sdata->vif.type == NL80211_IFTYPE_AP; + + /* set up RTS protection if desired */ + if (len > tx->local->hw.wiphy->rts_threshold) { + txrc.rts = rts = true; + } + + /* + * Use short preamble if the BSS can handle it, but not for + * management frames unless we know the receiver can handle + * that -- the management frame might be to a station that + * just wants a probe response. + */ + if (tx->sdata->vif.bss_conf.use_short_preamble && + (ieee80211_is_data(hdr->frame_control) || + (tx->sta && test_sta_flags(tx->sta, WLAN_STA_SHORT_PREAMBLE)))) + txrc.short_preamble = short_preamble = true; + + sta_flags = tx->sta ? get_sta_flags(tx->sta) : 0; + + /* + * Lets not bother rate control if we're associated and cannot + * talk to the sta. This should not happen. + */ + if (WARN(test_bit(SCAN_SW_SCANNING, &tx->local->scanning) && + (sta_flags & WLAN_STA_ASSOC) && + !rate_usable_index_exists(sband, &tx->sta->sta), + "%s: Dropped data frame as no usable bitrate found while " + "scanning and associated. Target station: " + "%pM on %d GHz band\n", + tx->sdata->name, hdr->addr1, + tx->channel->band ? 5 : 2)) + return TX_DROP; + + /* + * If we're associated with the sta at this point we know we can at + * least send the frame at the lowest bit rate. + */ + rate_control_get_rate(tx->sdata, tx->sta, &txrc); + + if (unlikely(info->control.rates[0].idx < 0)) + return TX_DROP; + + if (txrc.reported_rate.idx < 0) + txrc.reported_rate = info->control.rates[0]; + + if (tx->sta) + tx->sta->last_tx_rate = txrc.reported_rate; + + if (unlikely(!info->control.rates[0].count)) + info->control.rates[0].count = 1; + + if (WARN_ON_ONCE((info->control.rates[0].count > 1) && + (info->flags & IEEE80211_TX_CTL_NO_ACK))) + info->control.rates[0].count = 1; + + if (is_multicast_ether_addr(hdr->addr1)) { + /* + * XXX: verify the rate is in the basic rateset + */ + return TX_CONTINUE; + } + + /* + * set up the RTS/CTS rate as the fastest basic rate + * that is not faster than the data rate + * + * XXX: Should this check all retry rates? + */ + if (!(info->control.rates[0].flags & IEEE80211_TX_RC_MCS)) { + s8 baserate = 0; + + rate = &sband->bitrates[info->control.rates[0].idx]; + + for (i = 0; i < sband->n_bitrates; i++) { + /* must be a basic rate */ + if (!(tx->sdata->vif.bss_conf.basic_rates & BIT(i))) + continue; + /* must not be faster than the data rate */ + if (sband->bitrates[i].bitrate > rate->bitrate) + continue; + /* maximum */ + if (sband->bitrates[baserate].bitrate < + sband->bitrates[i].bitrate) + baserate = i; + } + + info->control.rts_cts_rate_idx = baserate; + } + + for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { + /* + * make sure there's no valid rate following + * an invalid one, just in case drivers don't + * take the API seriously to stop at -1. + */ + if (inval) { + info->control.rates[i].idx = -1; + continue; + } + if (info->control.rates[i].idx < 0) { + inval = true; + continue; + } + + /* + * For now assume MCS is already set up correctly, this + * needs to be fixed. + */ + if (info->control.rates[i].flags & IEEE80211_TX_RC_MCS) { + WARN_ON(info->control.rates[i].idx > 76); + continue; + } + + /* set up RTS protection if desired */ + if (rts) + info->control.rates[i].flags |= + IEEE80211_TX_RC_USE_RTS_CTS; + + /* RC is busted */ + if (WARN_ON_ONCE(info->control.rates[i].idx >= + sband->n_bitrates)) { + info->control.rates[i].idx = -1; + continue; + } + + rate = &sband->bitrates[info->control.rates[i].idx]; + + /* set up short preamble */ + if (short_preamble && + rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) + info->control.rates[i].flags |= + IEEE80211_TX_RC_USE_SHORT_PREAMBLE; + + /* set up G protection */ + if (!rts && tx->sdata->vif.bss_conf.use_cts_prot && + rate->flags & IEEE80211_RATE_ERP_G) + info->control.rates[i].flags |= + IEEE80211_TX_RC_USE_CTS_PROTECT; + } + + return TX_CONTINUE; +} + +static ieee80211_tx_result debug_noinline +ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; + u16 *seq; + u8 *qc; + int tid; + + /* + * Packet injection may want to control the sequence + * number, if we have no matching interface then we + * neither assign one ourselves nor ask the driver to. + */ + if (unlikely(info->control.vif->type == NL80211_IFTYPE_MONITOR)) + return TX_CONTINUE; + + if (unlikely(ieee80211_is_ctl(hdr->frame_control))) + return TX_CONTINUE; + + if (ieee80211_hdrlen(hdr->frame_control) < 24) + return TX_CONTINUE; + + /* + * Anything but QoS data that has a sequence number field + * (is long enough) gets a sequence number from the global + * counter. + */ + if (!ieee80211_is_data_qos(hdr->frame_control)) { + /* driver should assign sequence number */ + info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; + /* for pure STA mode without beacons, we can do it */ + hdr->seq_ctrl = cpu_to_le16(tx->sdata->sequence_number); + tx->sdata->sequence_number += 0x10; + return TX_CONTINUE; + } + + /* + * This should be true for injected/management frames only, for + * management frames we have set the IEEE80211_TX_CTL_ASSIGN_SEQ + * above since they are not QoS-data frames. + */ + if (!tx->sta) + return TX_CONTINUE; + + /* include per-STA, per-TID sequence counter */ + + qc = ieee80211_get_qos_ctl(hdr); + tid = *qc & IEEE80211_QOS_CTL_TID_MASK; + seq = &tx->sta->tid_seq[tid]; + + hdr->seq_ctrl = cpu_to_le16(*seq); + + /* Increase the sequence number. */ + *seq = (*seq + 0x10) & IEEE80211_SCTL_SEQ; + + return TX_CONTINUE; +} + +static int ieee80211_fragment(struct ieee80211_local *local, + struct sk_buff *skb, int hdrlen, + int frag_threshold) +{ + struct sk_buff *tail = skb, *tmp; + int per_fragm = frag_threshold - hdrlen - FCS_LEN; + int pos = hdrlen + per_fragm; + int rem = skb->len - hdrlen - per_fragm; + + if (WARN_ON(rem < 0)) + return -EINVAL; + + while (rem) { + int fraglen = per_fragm; + + if (fraglen > rem) + fraglen = rem; + rem -= fraglen; + tmp = dev_alloc_skb(local->tx_headroom + + frag_threshold + + IEEE80211_ENCRYPT_HEADROOM + + IEEE80211_ENCRYPT_TAILROOM); + if (!tmp) + return -ENOMEM; + tail->next = tmp; + tail = tmp; + skb_reserve(tmp, local->tx_headroom + + IEEE80211_ENCRYPT_HEADROOM); + /* copy control information */ + memcpy(tmp->cb, skb->cb, sizeof(tmp->cb)); + skb_copy_queue_mapping(tmp, skb); + tmp->priority = skb->priority; + tmp->dev = skb->dev; + + /* copy header and data */ + memcpy(skb_put(tmp, hdrlen), skb->data, hdrlen); + memcpy(skb_put(tmp, fraglen), skb->data + pos, fraglen); + + pos += fraglen; + } + + skb->len = hdrlen + per_fragm; + return 0; +} + +static ieee80211_tx_result debug_noinline +ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) +{ + struct sk_buff *skb = tx->skb; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (void *)skb->data; + int frag_threshold = tx->local->hw.wiphy->frag_threshold; + int hdrlen; + int fragnum; + + if (!(tx->flags & IEEE80211_TX_FRAGMENTED)) + return TX_CONTINUE; + + /* + * Warn when submitting a fragmented A-MPDU frame and drop it. + * This scenario is handled in ieee80211_tx_prepare but extra + * caution taken here as fragmented ampdu may cause Tx stop. + */ + if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU)) + return TX_DROP; + + hdrlen = ieee80211_hdrlen(hdr->frame_control); + + /* internal error, why is TX_FRAGMENTED set? */ + if (WARN_ON(skb->len + FCS_LEN <= frag_threshold)) + return TX_DROP; + + /* + * Now fragment the frame. This will allocate all the fragments and + * chain them (using skb as the first fragment) to skb->next. + * During transmission, we will remove the successfully transmitted + * fragments from this list. When the low-level driver rejects one + * of the fragments then we will simply pretend to accept the skb + * but store it away as pending. + */ + if (ieee80211_fragment(tx->local, skb, hdrlen, frag_threshold)) + return TX_DROP; + + /* update duration/seq/flags of fragments */ + fragnum = 0; + do { + int next_len; + const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); + + hdr = (void *)skb->data; + info = IEEE80211_SKB_CB(skb); + + if (skb->next) { + hdr->frame_control |= morefrags; + next_len = skb->next->len; + /* + * No multi-rate retries for fragmented frames, that + * would completely throw off the NAV at other STAs. + */ + info->control.rates[1].idx = -1; + info->control.rates[2].idx = -1; + info->control.rates[3].idx = -1; + info->control.rates[4].idx = -1; + BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 5); + info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE; + } else { + hdr->frame_control &= ~morefrags; + next_len = 0; + } + hdr->duration_id = ieee80211_duration(tx, 0, next_len); + hdr->seq_ctrl |= cpu_to_le16(fragnum & IEEE80211_SCTL_FRAG); + fragnum++; + } while ((skb = skb->next)); + + return TX_CONTINUE; +} + +static ieee80211_tx_result debug_noinline +ieee80211_tx_h_stats(struct ieee80211_tx_data *tx) +{ + struct sk_buff *skb = tx->skb; + + if (!tx->sta) + return TX_CONTINUE; + + tx->sta->tx_packets++; + do { + tx->sta->tx_fragments++; + tx->sta->tx_bytes += skb->len; + } while ((skb = skb->next)); + + return TX_CONTINUE; +} + +static ieee80211_tx_result debug_noinline +ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx) +{ + if (!tx->key) + return TX_CONTINUE; + + switch (tx->key->conf.alg) { + case ALG_WEP: + return ieee80211_crypto_wep_encrypt(tx); + case ALG_TKIP: + return ieee80211_crypto_tkip_encrypt(tx); + case ALG_CCMP: + return ieee80211_crypto_ccmp_encrypt(tx); + case ALG_AES_CMAC: + return ieee80211_crypto_aes_cmac_encrypt(tx); + } + + /* not reached */ + WARN_ON(1); + return TX_DROP; +} + +static ieee80211_tx_result debug_noinline +ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx) +{ + struct sk_buff *skb = tx->skb; + struct ieee80211_hdr *hdr; + int next_len; + bool group_addr; + + do { + hdr = (void *) skb->data; + if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) + break; /* must not overwrite AID */ + next_len = skb->next ? skb->next->len : 0; + group_addr = is_multicast_ether_addr(hdr->addr1); + + hdr->duration_id = + ieee80211_duration(tx, group_addr, next_len); + } while ((skb = skb->next)); + + return TX_CONTINUE; +} + +/* actual transmit path */ + +/* + * deal with packet injection down monitor interface + * with Radiotap Header -- only called for monitor mode interface + */ +static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx, + struct sk_buff *skb) +{ + /* + * this is the moment to interpret and discard the radiotap header that + * must be at the start of the packet injected in Monitor mode + * + * Need to take some care with endian-ness since radiotap + * args are little-endian + */ + + struct ieee80211_radiotap_iterator iterator; + struct ieee80211_radiotap_header *rthdr = + (struct ieee80211_radiotap_header *) skb->data; + struct ieee80211_supported_band *sband; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len, + NULL); + + sband = tx->local->hw.wiphy->bands[tx->channel->band]; + + info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; + tx->flags &= ~IEEE80211_TX_FRAGMENTED; + + /* + * for every radiotap entry that is present + * (ieee80211_radiotap_iterator_next returns -ENOENT when no more + * entries present, or -EINVAL on error) + */ + + while (!ret) { + ret = ieee80211_radiotap_iterator_next(&iterator); + + if (ret) + continue; + + /* see if this argument is something we can use */ + switch (iterator.this_arg_index) { + /* + * You must take care when dereferencing iterator.this_arg + * for multibyte types... the pointer is not aligned. Use + * get_unaligned((type *)iterator.this_arg) to dereference + * iterator.this_arg for type "type" safely on all arches. + */ + case IEEE80211_RADIOTAP_FLAGS: + if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FCS) { + /* + * this indicates that the skb we have been + * handed has the 32-bit FCS CRC at the end... + * we should react to that by snipping it off + * because it will be recomputed and added + * on transmission + */ + if (skb->len < (iterator._max_length + FCS_LEN)) + return false; + + skb_trim(skb, skb->len - FCS_LEN); + } + if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP) + info->flags &= ~IEEE80211_TX_INTFL_DONT_ENCRYPT; + if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG) + tx->flags |= IEEE80211_TX_FRAGMENTED; + break; + + /* + * Please update the file + * Documentation/networking/mac80211-injection.txt + * when parsing new fields here. + */ + + default: + break; + } + } + + if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */ + return false; + + /* + * remove the radiotap header + * iterator->_max_length was sanity-checked against + * skb->len by iterator init + */ + skb_pull(skb, iterator._max_length); + + return true; +} + +/* + * initialises @tx + */ +static ieee80211_tx_result +ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, + struct ieee80211_tx_data *tx, + struct sk_buff *skb) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_hdr *hdr; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + int hdrlen, tid; + u8 *qc, *state; + bool queued = false; + + memset(tx, 0, sizeof(*tx)); + tx->skb = skb; + tx->local = local; + tx->sdata = sdata; + tx->channel = local->hw.conf.channel; + /* + * Set this flag (used below to indicate "automatic fragmentation"), + * it will be cleared/left by radiotap as desired. + */ + tx->flags |= IEEE80211_TX_FRAGMENTED; + + /* process and remove the injection radiotap header */ + if (unlikely(info->flags & IEEE80211_TX_INTFL_HAS_RADIOTAP)) { + if (!__ieee80211_parse_tx_radiotap(tx, skb)) + return TX_DROP; + + /* + * __ieee80211_parse_tx_radiotap has now removed + * the radiotap header that was present and pre-filled + * 'tx' with tx control information. + */ + info->flags &= ~IEEE80211_TX_INTFL_HAS_RADIOTAP; + } + + /* + * If this flag is set to true anywhere, and we get here, + * we are doing the needed processing, so remove the flag + * now. + */ + info->flags &= ~IEEE80211_TX_INTFL_NEED_TXPROCESSING; + + hdr = (struct ieee80211_hdr *) skb->data; + + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { + tx->sta = rcu_dereference(sdata->u.vlan.sta); + if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr) + return TX_DROP; + } else if (info->flags & IEEE80211_TX_CTL_INJECTED) { + tx->sta = sta_info_get_bss(sdata, hdr->addr1); + } + if (!tx->sta) + tx->sta = sta_info_get(sdata, hdr->addr1); + + if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) && + (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) { + unsigned long flags; + struct tid_ampdu_tx *tid_tx; + + qc = ieee80211_get_qos_ctl(hdr); + tid = *qc & IEEE80211_QOS_CTL_TID_MASK; + + spin_lock_irqsave(&tx->sta->lock, flags); + /* + * XXX: This spinlock could be fairly expensive, but see the + * comment in agg-tx.c:ieee80211_agg_tx_operational(). + * One way to solve this would be to do something RCU-like + * for managing the tid_tx struct and using atomic bitops + * for the actual state -- by introducing an actual + * 'operational' bit that would be possible. It would + * require changing ieee80211_agg_tx_operational() to + * set that bit, and changing the way tid_tx is managed + * everywhere, including races between that bit and + * tid_tx going away (tid_tx being added can be easily + * committed to memory before the 'operational' bit). + */ + tid_tx = tx->sta->ampdu_mlme.tid_tx[tid]; + state = &tx->sta->ampdu_mlme.tid_state_tx[tid]; + if (*state == HT_AGG_STATE_OPERATIONAL) { + info->flags |= IEEE80211_TX_CTL_AMPDU; + } else if (*state != HT_AGG_STATE_IDLE) { + /* in progress */ + queued = true; + info->control.vif = &sdata->vif; + info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; + __skb_queue_tail(&tid_tx->pending, skb); + } + spin_unlock_irqrestore(&tx->sta->lock, flags); + + if (unlikely(queued)) + return TX_QUEUED; + } + + if (is_multicast_ether_addr(hdr->addr1)) { + tx->flags &= ~IEEE80211_TX_UNICAST; + info->flags |= IEEE80211_TX_CTL_NO_ACK; + } else { + tx->flags |= IEEE80211_TX_UNICAST; + if (unlikely(local->wifi_wme_noack_test)) + info->flags |= IEEE80211_TX_CTL_NO_ACK; + else + info->flags &= ~IEEE80211_TX_CTL_NO_ACK; + } + + if (tx->flags & IEEE80211_TX_FRAGMENTED) { + if ((tx->flags & IEEE80211_TX_UNICAST) && + skb->len + FCS_LEN > local->hw.wiphy->frag_threshold && + !(info->flags & IEEE80211_TX_CTL_AMPDU)) + tx->flags |= IEEE80211_TX_FRAGMENTED; + else + tx->flags &= ~IEEE80211_TX_FRAGMENTED; + } + + if (!tx->sta) + info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; + else if (test_and_clear_sta_flags(tx->sta, WLAN_STA_CLEAR_PS_FILT)) + info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; + + hdrlen = ieee80211_hdrlen(hdr->frame_control); + if (skb->len > hdrlen + sizeof(rfc1042_header) + 2) { + u8 *pos = &skb->data[hdrlen + sizeof(rfc1042_header)]; + tx->ethertype = (pos[0] << 8) | pos[1]; + } + info->flags |= IEEE80211_TX_CTL_FIRST_FRAGMENT; + + return TX_CONTINUE; +} + +static int __ieee80211_tx(struct ieee80211_local *local, + struct sk_buff **skbp, + struct sta_info *sta, + bool txpending) +{ + struct sk_buff *skb = *skbp, *next; + struct ieee80211_tx_info *info; + struct ieee80211_sub_if_data *sdata; + unsigned long flags; + int ret, len; + bool fragm = false; + + while (skb) { + int q = skb_get_queue_mapping(skb); + + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + ret = IEEE80211_TX_OK; + if (local->queue_stop_reasons[q] || + (!txpending && !skb_queue_empty(&local->pending[q]))) + ret = IEEE80211_TX_PENDING; + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); + if (ret != IEEE80211_TX_OK) + return ret; + + info = IEEE80211_SKB_CB(skb); + + if (fragm) + info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT | + IEEE80211_TX_CTL_FIRST_FRAGMENT); + + next = skb->next; + len = skb->len; + + if (next) + info->flags |= IEEE80211_TX_CTL_MORE_FRAMES; + + sdata = vif_to_sdata(info->control.vif); + + switch (sdata->vif.type) { + case NL80211_IFTYPE_MONITOR: + info->control.vif = NULL; + break; + case NL80211_IFTYPE_AP_VLAN: + info->control.vif = &container_of(sdata->bss, + struct ieee80211_sub_if_data, u.ap)->vif; + break; + default: + /* keep */ + break; + } + + ret = drv_tx(local, skb); + if (WARN_ON(ret != NETDEV_TX_OK && skb->len != len)) { + dev_kfree_skb(skb); + ret = NETDEV_TX_OK; + } + if (ret != NETDEV_TX_OK) { + info->control.vif = &sdata->vif; + return IEEE80211_TX_AGAIN; + } + + *skbp = skb = next; + ieee80211_led_tx(local, 1); + fragm = true; + } + + return IEEE80211_TX_OK; +} + +/* + * Invoke TX handlers, return 0 on success and non-zero if the + * frame was dropped or queued. + */ +static int invoke_tx_handlers(struct ieee80211_tx_data *tx) +{ + struct sk_buff *skb = tx->skb; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + ieee80211_tx_result res = TX_DROP; + +#define CALL_TXH(txh) \ + do { \ + res = txh(tx); \ + if (res != TX_CONTINUE) \ + goto txh_done; \ + } while (0) + + CALL_TXH(ieee80211_tx_h_dynamic_ps); + CALL_TXH(ieee80211_tx_h_check_assoc); + CALL_TXH(ieee80211_tx_h_ps_buf); + CALL_TXH(ieee80211_tx_h_select_key); + CALL_TXH(ieee80211_tx_h_sta); + if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)) + CALL_TXH(ieee80211_tx_h_rate_ctrl); + + if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION)) + goto txh_done; + + CALL_TXH(ieee80211_tx_h_michael_mic_add); + CALL_TXH(ieee80211_tx_h_sequence); + CALL_TXH(ieee80211_tx_h_fragment); + /* handlers after fragment must be aware of tx info fragmentation! */ + CALL_TXH(ieee80211_tx_h_stats); + CALL_TXH(ieee80211_tx_h_encrypt); + CALL_TXH(ieee80211_tx_h_calculate_duration); +#undef CALL_TXH + + txh_done: + if (unlikely(res == TX_DROP)) { + I802_DEBUG_INC(tx->local->tx_handlers_drop); + while (skb) { + struct sk_buff *next; + + next = skb->next; + dev_kfree_skb(skb); + skb = next; + } + return -1; + } else if (unlikely(res == TX_QUEUED)) { + I802_DEBUG_INC(tx->local->tx_handlers_queued); + return -1; + } + + return 0; +} + +static void ieee80211_tx(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, bool txpending) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_tx_data tx; + ieee80211_tx_result res_prepare; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct sk_buff *next; + unsigned long flags; + int ret, retries; + u16 queue; + + queue = skb_get_queue_mapping(skb); + + if (unlikely(skb->len < 10)) { + dev_kfree_skb(skb); + return; + } + + rcu_read_lock(); + + /* initialises tx */ + res_prepare = ieee80211_tx_prepare(sdata, &tx, skb); + + if (unlikely(res_prepare == TX_DROP)) { + dev_kfree_skb(skb); + rcu_read_unlock(); + return; + } else if (unlikely(res_prepare == TX_QUEUED)) { + rcu_read_unlock(); + return; + } + + tx.channel = local->hw.conf.channel; + info->band = tx.channel->band; + + if (invoke_tx_handlers(&tx)) + goto out; + + retries = 0; + retry: + ret = __ieee80211_tx(local, &tx.skb, tx.sta, txpending); + switch (ret) { + case IEEE80211_TX_OK: + break; + case IEEE80211_TX_AGAIN: + /* + * Since there are no fragmented frames on A-MPDU + * queues, there's no reason for a driver to reject + * a frame there, warn and drop it. + */ + if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU)) + goto drop; + /* fall through */ + case IEEE80211_TX_PENDING: + skb = tx.skb; + + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + + if (local->queue_stop_reasons[queue] || + !skb_queue_empty(&local->pending[queue])) { + /* + * if queue is stopped, queue up frames for later + * transmission from the tasklet + */ + do { + next = skb->next; + skb->next = NULL; + if (unlikely(txpending)) + __skb_queue_head(&local->pending[queue], + skb); + else + __skb_queue_tail(&local->pending[queue], + skb); + } while ((skb = next)); + + spin_unlock_irqrestore(&local->queue_stop_reason_lock, + flags); + } else { + /* + * otherwise retry, but this is a race condition or + * a driver bug (which we warn about if it persists) + */ + spin_unlock_irqrestore(&local->queue_stop_reason_lock, + flags); + + retries++; + if (WARN(retries > 10, "tx refused but queue active\n")) + goto drop; + goto retry; + } + } + out: + rcu_read_unlock(); + return; + + drop: + rcu_read_unlock(); + + skb = tx.skb; + while (skb) { + next = skb->next; + dev_kfree_skb(skb); + skb = next; + } +} + +/* device xmit handlers */ + +static int ieee80211_skb_resize(struct ieee80211_local *local, + struct sk_buff *skb, + int head_need, bool may_encrypt) +{ + int tail_need = 0; + + /* + * This could be optimised, devices that do full hardware + * crypto (including TKIP MMIC) need no tailroom... But we + * have no drivers for such devices currently. + */ + if (may_encrypt) { + tail_need = IEEE80211_ENCRYPT_TAILROOM; + tail_need -= skb_tailroom(skb); + tail_need = max_t(int, tail_need, 0); + } + + if (head_need || tail_need) { + /* Sorry. Can't account for this any more */ + skb_orphan(skb); + } + + if (skb_header_cloned(skb)) + I802_DEBUG_INC(local->tx_expand_skb_head_cloned); + else + I802_DEBUG_INC(local->tx_expand_skb_head); + + if (pskb_expand_head(skb, head_need, tail_need, GFP_ATOMIC)) { + printk(KERN_DEBUG "%s: failed to reallocate TX buffer\n", + wiphy_name(local->hw.wiphy)); + return -ENOMEM; + } + + /* update truesize too */ + skb->truesize += head_need + tail_need; + + return 0; +} + +static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct ieee80211_sub_if_data *tmp_sdata; + int headroom; + bool may_encrypt; + + rcu_read_lock(); + + if (unlikely(sdata->vif.type == NL80211_IFTYPE_MONITOR)) { + int hdrlen; + u16 len_rthdr; + + info->flags |= IEEE80211_TX_CTL_INJECTED | + IEEE80211_TX_INTFL_HAS_RADIOTAP; + + len_rthdr = ieee80211_get_radiotap_len(skb->data); + hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr); + hdrlen = ieee80211_hdrlen(hdr->frame_control); + + /* check the header is complete in the frame */ + if (likely(skb->len >= len_rthdr + hdrlen)) { + /* + * We process outgoing injected frames that have a + * local address we handle as though they are our + * own frames. + * This code here isn't entirely correct, the local + * MAC address is not necessarily enough to find + * the interface to use; for that proper VLAN/WDS + * support we will need a different mechanism. + */ + + list_for_each_entry_rcu(tmp_sdata, &local->interfaces, + list) { + if (!ieee80211_sdata_running(tmp_sdata)) + continue; + if (tmp_sdata->vif.type != NL80211_IFTYPE_AP) + continue; + if (compare_ether_addr(tmp_sdata->vif.addr, + hdr->addr2) == 0) { + sdata = tmp_sdata; + break; + } + } + } + } + + may_encrypt = !(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT); + + headroom = local->tx_headroom; + if (may_encrypt) + headroom += IEEE80211_ENCRYPT_HEADROOM; + headroom -= skb_headroom(skb); + headroom = max_t(int, 0, headroom); + + if (ieee80211_skb_resize(local, skb, headroom, may_encrypt)) { + dev_kfree_skb(skb); + rcu_read_unlock(); + return; + } + + info->control.vif = &sdata->vif; + + if (ieee80211_vif_is_mesh(&sdata->vif) && + ieee80211_is_data(hdr->frame_control) && + !is_multicast_ether_addr(hdr->addr1)) + if (mesh_nexthop_lookup(skb, sdata)) { + /* skb queued: don't free */ + rcu_read_unlock(); + return; + } + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)) + /* Older kernels do not have the select_queue callback */ + skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb)); +#endif + ieee80211_set_qos_hdr(local, skb); + ieee80211_tx(sdata, skb, false); + rcu_read_unlock(); +} + +netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); + struct ieee80211_channel *chan = local->hw.conf.channel; + struct ieee80211_radiotap_header *prthdr = + (struct ieee80211_radiotap_header *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + u16 len_rthdr; + + /* + * Frame injection is not allowed if beaconing is not allowed + * or if we need radar detection. Beaconing is usually not allowed when + * the mode or operation (Adhoc, AP, Mesh) does not support DFS. + * Passive scan is also used in world regulatory domains where + * your country is not known and as such it should be treated as + * NO TX unless the channel is explicitly allowed in which case + * your current regulatory domain would not have the passive scan + * flag. + * + * Since AP mode uses monitor interfaces to inject/TX management + * frames we can make AP mode the exception to this rule once it + * supports radar detection as its implementation can deal with + * radar detection by itself. We can do that later by adding a + * monitor flag interfaces used for AP support. + */ + if ((chan->flags & (IEEE80211_CHAN_NO_IBSS | IEEE80211_CHAN_RADAR | + IEEE80211_CHAN_PASSIVE_SCAN))) + goto fail; + + /* check for not even having the fixed radiotap header part */ + if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header))) + goto fail; /* too short to be possibly valid */ + + /* is it a header version we can trust to find length from? */ + if (unlikely(prthdr->it_version)) + goto fail; /* only version 0 is supported */ + + /* then there must be a radiotap header with a length we can use */ + len_rthdr = ieee80211_get_radiotap_len(skb->data); + + /* does the skb contain enough to deliver on the alleged length? */ + if (unlikely(skb->len < len_rthdr)) + goto fail; /* skb too short for claimed rt header extent */ + + /* + * fix up the pointers accounting for the radiotap + * header still being in there. We are being given + * a precooked IEEE80211 header so no need for + * normal processing + */ + skb_set_mac_header(skb, len_rthdr); + /* + * these are just fixed to the end of the rt area since we + * don't have any better information and at this point, nobody cares + */ + skb_set_network_header(skb, len_rthdr); + skb_set_transport_header(skb, len_rthdr); + + memset(info, 0, sizeof(*info)); + + info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; + + /* pass the radiotap header up to xmit */ + ieee80211_xmit(IEEE80211_DEV_TO_SUB_IF(dev), skb); + return NETDEV_TX_OK; + +fail: + dev_kfree_skb(skb); + return NETDEV_TX_OK; /* meaning, we dealt with the skb */ +} + +/** + * ieee80211_subif_start_xmit - netif start_xmit function for Ethernet-type + * subinterfaces (wlan#, WDS, and VLAN interfaces) + * @skb: packet to be sent + * @dev: incoming interface + * + * Returns: 0 on success (and frees skb in this case) or 1 on failure (skb will + * not be freed, and caller is responsible for either retrying later or freeing + * skb). + * + * This function takes in an Ethernet header and encapsulates it with suitable + * IEEE 802.11 header based on which interface the packet is coming in. The + * encapsulated packet will then be passed to master interface, wlan#.11, for + * transmission (through low-level driver). + */ +netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + int ret = NETDEV_TX_BUSY, head_need; + u16 ethertype, hdrlen, meshhdrlen = 0; + __le16 fc; + struct ieee80211_hdr hdr; + struct ieee80211s_hdr mesh_hdr; + const u8 *encaps_data; + int encaps_len, skip_header_bytes; + int nh_pos, h_pos; + struct sta_info *sta = NULL; + u32 sta_flags = 0; + + if (unlikely(skb->len < ETH_HLEN)) { + ret = NETDEV_TX_OK; + goto fail; + } + + nh_pos = skb_network_header(skb) - skb->data; + h_pos = skb_transport_header(skb) - skb->data; + + /* convert Ethernet header to proper 802.11 header (based on + * operation mode) */ + ethertype = (skb->data[12] << 8) | skb->data[13]; + fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA); + + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP_VLAN: + rcu_read_lock(); + sta = rcu_dereference(sdata->u.vlan.sta); + if (sta) { + fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); + /* RA TA DA SA */ + memcpy(hdr.addr1, sta->sta.addr, ETH_ALEN); + memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN); + memcpy(hdr.addr3, skb->data, ETH_ALEN); + memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); + hdrlen = 30; + sta_flags = get_sta_flags(sta); + } + rcu_read_unlock(); + if (sta) + break; + /* fall through */ + case NL80211_IFTYPE_AP: + fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); + /* DA BSSID SA */ + memcpy(hdr.addr1, skb->data, ETH_ALEN); + memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN); + memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN); + hdrlen = 24; + break; + case NL80211_IFTYPE_WDS: + fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); + /* RA TA DA SA */ + memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN); + memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN); + memcpy(hdr.addr3, skb->data, ETH_ALEN); + memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); + hdrlen = 30; + break; +#ifdef CONFIG_MAC80211_MESH + case NL80211_IFTYPE_MESH_POINT: + if (!sdata->u.mesh.mshcfg.dot11MeshTTL) { + /* Do not send frames with mesh_ttl == 0 */ + sdata->u.mesh.mshstats.dropped_frames_ttl++; + ret = NETDEV_TX_OK; + goto fail; + } + + if (compare_ether_addr(sdata->vif.addr, + skb->data + ETH_ALEN) == 0) { + hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, + skb->data, skb->data + ETH_ALEN); + meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, + sdata, NULL, NULL, NULL); + } else { + /* packet from other interface */ + struct mesh_path *mppath; + int is_mesh_mcast = 1; + const u8 *mesh_da; + + rcu_read_lock(); + if (is_multicast_ether_addr(skb->data)) + /* DA TA mSA AE:SA */ + mesh_da = skb->data; + else { + static const u8 bcast[ETH_ALEN] = + { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + + mppath = mpp_path_lookup(skb->data, sdata); + if (mppath) { + /* RA TA mDA mSA AE:DA SA */ + mesh_da = mppath->mpp; + is_mesh_mcast = 0; + } else { + /* DA TA mSA AE:SA */ + mesh_da = bcast; + } + } + hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, + mesh_da, sdata->vif.addr); + rcu_read_unlock(); + if (is_mesh_mcast) + meshhdrlen = + ieee80211_new_mesh_header(&mesh_hdr, + sdata, + skb->data + ETH_ALEN, + NULL, + NULL); + else + meshhdrlen = + ieee80211_new_mesh_header(&mesh_hdr, + sdata, + NULL, + skb->data, + skb->data + ETH_ALEN); + + } + break; +#endif + case NL80211_IFTYPE_STATION: + memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN); + if (sdata->u.mgd.use_4addr && ethertype != ETH_P_PAE) { + fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); + /* RA TA DA SA */ + memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN); + memcpy(hdr.addr3, skb->data, ETH_ALEN); + memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); + hdrlen = 30; + } else { + fc |= cpu_to_le16(IEEE80211_FCTL_TODS); + /* BSSID SA DA */ + memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); + memcpy(hdr.addr3, skb->data, ETH_ALEN); + hdrlen = 24; + } + break; + case NL80211_IFTYPE_ADHOC: + /* DA SA BSSID */ + memcpy(hdr.addr1, skb->data, ETH_ALEN); + memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); + memcpy(hdr.addr3, sdata->u.ibss.bssid, ETH_ALEN); + hdrlen = 24; + break; + default: + ret = NETDEV_TX_OK; + goto fail; + } + + /* + * There's no need to try to look up the destination + * if it is a multicast address (which can only happen + * in AP mode) + */ + if (!is_multicast_ether_addr(hdr.addr1)) { + rcu_read_lock(); + sta = sta_info_get(sdata, hdr.addr1); + if (sta) + sta_flags = get_sta_flags(sta); + rcu_read_unlock(); + } + + /* receiver and we are QoS enabled, use a QoS type frame */ + if ((sta_flags & WLAN_STA_WME) && local->hw.queues >= 4) { + fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA); + hdrlen += 2; + } + + /* + * Drop unicast frames to unauthorised stations unless they are + * EAPOL frames from the local station. + */ + if (!ieee80211_vif_is_mesh(&sdata->vif) && + unlikely(!is_multicast_ether_addr(hdr.addr1) && + !(sta_flags & WLAN_STA_AUTHORIZED) && + !(ethertype == ETH_P_PAE && + compare_ether_addr(sdata->vif.addr, + skb->data + ETH_ALEN) == 0))) { +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + if (net_ratelimit()) + printk(KERN_DEBUG "%s: dropped frame to %pM" + " (unauthorized port)\n", dev->name, + hdr.addr1); +#endif + + I802_DEBUG_INC(local->tx_handlers_drop_unauth_port); + + ret = NETDEV_TX_OK; + goto fail; + } + + hdr.frame_control = fc; + hdr.duration_id = 0; + hdr.seq_ctrl = 0; + + skip_header_bytes = ETH_HLEN; + if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) { + encaps_data = bridge_tunnel_header; + encaps_len = sizeof(bridge_tunnel_header); + skip_header_bytes -= 2; + } else if (ethertype >= 0x600) { + encaps_data = rfc1042_header; + encaps_len = sizeof(rfc1042_header); + skip_header_bytes -= 2; + } else { + encaps_data = NULL; + encaps_len = 0; + } + + skb_pull(skb, skip_header_bytes); + nh_pos -= skip_header_bytes; + h_pos -= skip_header_bytes; + + head_need = hdrlen + encaps_len + meshhdrlen - skb_headroom(skb); + + /* + * So we need to modify the skb header and hence need a copy of + * that. The head_need variable above doesn't, so far, include + * the needed header space that we don't need right away. If we + * can, then we don't reallocate right now but only after the + * frame arrives at the master device (if it does...) + * + * If we cannot, however, then we will reallocate to include all + * the ever needed space. Also, if we need to reallocate it anyway, + * make it big enough for everything we may ever need. + */ + + if (head_need > 0 || skb_cloned(skb)) { + head_need += IEEE80211_ENCRYPT_HEADROOM; + head_need += local->tx_headroom; + head_need = max_t(int, 0, head_need); + if (ieee80211_skb_resize(local, skb, head_need, true)) + goto fail; + } + + if (encaps_data) { + memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len); + nh_pos += encaps_len; + h_pos += encaps_len; + } + + if (meshhdrlen > 0) { + memcpy(skb_push(skb, meshhdrlen), &mesh_hdr, meshhdrlen); + nh_pos += meshhdrlen; + h_pos += meshhdrlen; + } + + if (ieee80211_is_data_qos(fc)) { + __le16 *qos_control; + + qos_control = (__le16*) skb_push(skb, 2); + memcpy(skb_push(skb, hdrlen - 2), &hdr, hdrlen - 2); + /* + * Maybe we could actually set some fields here, for now just + * initialise to zero to indicate no special operation. + */ + *qos_control = 0; + } else + memcpy(skb_push(skb, hdrlen), &hdr, hdrlen); + + nh_pos += hdrlen; + h_pos += hdrlen; + + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + + /* Update skb pointers to various headers since this modified frame + * is going to go through Linux networking code that may potentially + * need things like pointer to IP header. */ + skb_set_mac_header(skb, 0); + skb_set_network_header(skb, nh_pos); + skb_set_transport_header(skb, h_pos); + + memset(info, 0, sizeof(*info)); + + dev->trans_start = jiffies; + ieee80211_xmit(sdata, skb); + + return NETDEV_TX_OK; + + fail: + if (ret == NETDEV_TX_OK) + dev_kfree_skb(skb); + + return ret; +} + + +/* + * ieee80211_clear_tx_pending may not be called in a context where + * it is possible that it packets could come in again. + */ +void ieee80211_clear_tx_pending(struct ieee80211_local *local) +{ + int i; + + for (i = 0; i < local->hw.queues; i++) + skb_queue_purge(&local->pending[i]); +} + +static bool ieee80211_tx_pending_skb(struct ieee80211_local *local, + struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_sub_if_data *sdata; + struct sta_info *sta; + struct ieee80211_hdr *hdr; + int ret; + bool result = true; + + sdata = vif_to_sdata(info->control.vif); + + if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) { + ieee80211_tx(sdata, skb, true); + } else { + hdr = (struct ieee80211_hdr *)skb->data; + sta = sta_info_get(sdata, hdr->addr1); + + ret = __ieee80211_tx(local, &skb, sta, true); + if (ret != IEEE80211_TX_OK) + result = false; + } + + return result; +} + +/* + * Transmit all pending packets. Called from tasklet. + */ +void ieee80211_tx_pending(unsigned long data) +{ + struct ieee80211_local *local = (struct ieee80211_local *)data; + unsigned long flags; + int i; + bool txok; + + rcu_read_lock(); + + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + for (i = 0; i < local->hw.queues; i++) { + /* + * If queue is stopped by something other than due to pending + * frames, or we have no pending frames, proceed to next queue. + */ + if (local->queue_stop_reasons[i] || + skb_queue_empty(&local->pending[i])) + continue; + + while (!skb_queue_empty(&local->pending[i])) { + struct sk_buff *skb = __skb_dequeue(&local->pending[i]); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_sub_if_data *sdata; + + if (WARN_ON(!info->control.vif)) { + kfree_skb(skb); + continue; + } + + sdata = vif_to_sdata(info->control.vif); + spin_unlock_irqrestore(&local->queue_stop_reason_lock, + flags); + + txok = ieee80211_tx_pending_skb(local, skb); + if (!txok) + __skb_queue_head(&local->pending[i], skb); + spin_lock_irqsave(&local->queue_stop_reason_lock, + flags); + if (!txok) + break; + } + } + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); + + rcu_read_unlock(); +} + +/* functions for drivers to get certain frames */ + +static void ieee80211_beacon_add_tim(struct ieee80211_if_ap *bss, + struct sk_buff *skb, + struct beacon_data *beacon) +{ + u8 *pos, *tim; + int aid0 = 0; + int i, have_bits = 0, n1, n2; + + /* Generate bitmap for TIM only if there are any STAs in power save + * mode. */ + if (atomic_read(&bss->num_sta_ps) > 0) + /* in the hope that this is faster than + * checking byte-for-byte */ + have_bits = !bitmap_empty((unsigned long*)bss->tim, + IEEE80211_MAX_AID+1); + + if (bss->dtim_count == 0) + bss->dtim_count = beacon->dtim_period - 1; + else + bss->dtim_count--; + + tim = pos = (u8 *) skb_put(skb, 6); + *pos++ = WLAN_EID_TIM; + *pos++ = 4; + *pos++ = bss->dtim_count; + *pos++ = beacon->dtim_period; + + if (bss->dtim_count == 0 && !skb_queue_empty(&bss->ps_bc_buf)) + aid0 = 1; + + if (have_bits) { + /* Find largest even number N1 so that bits numbered 1 through + * (N1 x 8) - 1 in the bitmap are 0 and number N2 so that bits + * (N2 + 1) x 8 through 2007 are 0. */ + n1 = 0; + for (i = 0; i < IEEE80211_MAX_TIM_LEN; i++) { + if (bss->tim[i]) { + n1 = i & 0xfe; + break; + } + } + n2 = n1; + for (i = IEEE80211_MAX_TIM_LEN - 1; i >= n1; i--) { + if (bss->tim[i]) { + n2 = i; + break; + } + } + + /* Bitmap control */ + *pos++ = n1 | aid0; + /* Part Virt Bitmap */ + memcpy(pos, bss->tim + n1, n2 - n1 + 1); + + tim[1] = n2 - n1 + 4; + skb_put(skb, n2 - n1); + } else { + *pos++ = aid0; /* Bitmap control */ + *pos++ = 0; /* Part Virt Bitmap */ + } +} + +struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u16 *tim_offset, u16 *tim_length) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct sk_buff *skb = NULL; + struct ieee80211_tx_info *info; + struct ieee80211_sub_if_data *sdata = NULL; + struct ieee80211_if_ap *ap = NULL; + struct beacon_data *beacon; + struct ieee80211_supported_band *sband; + enum ieee80211_band band = local->hw.conf.channel->band; + struct ieee80211_tx_rate_control txrc; + + sband = local->hw.wiphy->bands[band]; + + rcu_read_lock(); + + sdata = vif_to_sdata(vif); + + if (tim_offset) + *tim_offset = 0; + if (tim_length) + *tim_length = 0; + + if (sdata->vif.type == NL80211_IFTYPE_AP) { + ap = &sdata->u.ap; + beacon = rcu_dereference(ap->beacon); + if (ap && beacon) { + /* + * headroom, head length, + * tail length and maximum TIM length + */ + skb = dev_alloc_skb(local->tx_headroom + + beacon->head_len + + beacon->tail_len + 256); + if (!skb) + goto out; + + skb_reserve(skb, local->tx_headroom); + memcpy(skb_put(skb, beacon->head_len), beacon->head, + beacon->head_len); + + /* + * Not very nice, but we want to allow the driver to call + * ieee80211_beacon_get() as a response to the set_tim() + * callback. That, however, is already invoked under the + * sta_lock to guarantee consistent and race-free update + * of the tim bitmap in mac80211 and the driver. + */ + if (local->tim_in_locked_section) { + ieee80211_beacon_add_tim(ap, skb, beacon); + } else { + unsigned long flags; + + spin_lock_irqsave(&local->sta_lock, flags); + ieee80211_beacon_add_tim(ap, skb, beacon); + spin_unlock_irqrestore(&local->sta_lock, flags); + } + + if (tim_offset) + *tim_offset = beacon->head_len; + if (tim_length) + *tim_length = skb->len - beacon->head_len; + + if (beacon->tail) + memcpy(skb_put(skb, beacon->tail_len), + beacon->tail, beacon->tail_len); + } else + goto out; + } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { + struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; + struct ieee80211_hdr *hdr; + struct sk_buff *presp = rcu_dereference(ifibss->presp); + + if (!presp) + goto out; + + skb = skb_copy(presp, GFP_ATOMIC); + if (!skb) + goto out; + + hdr = (struct ieee80211_hdr *) skb->data; + hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_BEACON); + } else if (ieee80211_vif_is_mesh(&sdata->vif)) { + struct ieee80211_mgmt *mgmt; + u8 *pos; + + /* headroom, head length, tail length and maximum TIM length */ + skb = dev_alloc_skb(local->tx_headroom + 400); + if (!skb) + goto out; + + skb_reserve(skb, local->hw.extra_tx_headroom); + mgmt = (struct ieee80211_mgmt *) + skb_put(skb, 24 + sizeof(mgmt->u.beacon)); + memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon)); + mgmt->frame_control = + cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON); + memset(mgmt->da, 0xff, ETH_ALEN); + memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); + memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); + mgmt->u.beacon.beacon_int = + cpu_to_le16(sdata->vif.bss_conf.beacon_int); + mgmt->u.beacon.capab_info = 0x0; /* 0x0 for MPs */ + + pos = skb_put(skb, 2); + *pos++ = WLAN_EID_SSID; + *pos++ = 0x0; + + mesh_mgmt_ies_add(skb, sdata); + } else { + WARN_ON(1); + goto out; + } + + info = IEEE80211_SKB_CB(skb); + + info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; + info->flags |= IEEE80211_TX_CTL_NO_ACK; + info->band = band; + + memset(&txrc, 0, sizeof(txrc)); + txrc.hw = hw; + txrc.sband = sband; + txrc.bss_conf = &sdata->vif.bss_conf; + txrc.skb = skb; + txrc.reported_rate.idx = -1; + txrc.rate_idx_mask = sdata->rc_rateidx_mask[band]; + if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1) + txrc.max_rate_idx = -1; + else + txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1; + txrc.ap = true; + rate_control_get_rate(sdata, NULL, &txrc); + + info->control.vif = vif; + + info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; + info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; + out: + rcu_read_unlock(); + return skb; +} +EXPORT_SYMBOL(ieee80211_beacon_get_tim); + +struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ieee80211_sub_if_data *sdata; + struct ieee80211_if_managed *ifmgd; + struct ieee80211_pspoll *pspoll; + struct ieee80211_local *local; + struct sk_buff *skb; + + if (WARN_ON(vif->type != NL80211_IFTYPE_STATION)) + return NULL; + + sdata = vif_to_sdata(vif); + ifmgd = &sdata->u.mgd; + local = sdata->local; + + skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll)); + if (!skb) { + printk(KERN_DEBUG "%s: failed to allocate buffer for " + "pspoll template\n", sdata->name); + return NULL; + } + skb_reserve(skb, local->hw.extra_tx_headroom); + + pspoll = (struct ieee80211_pspoll *) skb_put(skb, sizeof(*pspoll)); + memset(pspoll, 0, sizeof(*pspoll)); + pspoll->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | + IEEE80211_STYPE_PSPOLL); + pspoll->aid = cpu_to_le16(ifmgd->aid); + + /* aid in PS-Poll has its two MSBs each set to 1 */ + pspoll->aid |= cpu_to_le16(1 << 15 | 1 << 14); + + memcpy(pspoll->bssid, ifmgd->bssid, ETH_ALEN); + memcpy(pspoll->ta, vif->addr, ETH_ALEN); + + return skb; +} +EXPORT_SYMBOL(ieee80211_pspoll_get); + +struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ieee80211_hdr_3addr *nullfunc; + struct ieee80211_sub_if_data *sdata; + struct ieee80211_if_managed *ifmgd; + struct ieee80211_local *local; + struct sk_buff *skb; + + if (WARN_ON(vif->type != NL80211_IFTYPE_STATION)) + return NULL; + + sdata = vif_to_sdata(vif); + ifmgd = &sdata->u.mgd; + local = sdata->local; + + skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*nullfunc)); + if (!skb) { + printk(KERN_DEBUG "%s: failed to allocate buffer for nullfunc " + "template\n", sdata->name); + return NULL; + } + skb_reserve(skb, local->hw.extra_tx_headroom); + + nullfunc = (struct ieee80211_hdr_3addr *) skb_put(skb, + sizeof(*nullfunc)); + memset(nullfunc, 0, sizeof(*nullfunc)); + nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | + IEEE80211_STYPE_NULLFUNC | + IEEE80211_FCTL_TODS); + memcpy(nullfunc->addr1, ifmgd->bssid, ETH_ALEN); + memcpy(nullfunc->addr2, vif->addr, ETH_ALEN); + memcpy(nullfunc->addr3, ifmgd->bssid, ETH_ALEN); + + return skb; +} +EXPORT_SYMBOL(ieee80211_nullfunc_get); + +struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const u8 *ssid, size_t ssid_len, + const u8 *ie, size_t ie_len) +{ + struct ieee80211_sub_if_data *sdata; + struct ieee80211_local *local; + struct ieee80211_hdr_3addr *hdr; + struct sk_buff *skb; + size_t ie_ssid_len; + u8 *pos; + + sdata = vif_to_sdata(vif); + local = sdata->local; + ie_ssid_len = 2 + ssid_len; + + skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*hdr) + + ie_ssid_len + ie_len); + if (!skb) { + printk(KERN_DEBUG "%s: failed to allocate buffer for probe " + "request template\n", sdata->name); + return NULL; + } + + skb_reserve(skb, local->hw.extra_tx_headroom); + + hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr)); + memset(hdr, 0, sizeof(*hdr)); + hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_PROBE_REQ); + memset(hdr->addr1, 0xff, ETH_ALEN); + memcpy(hdr->addr2, vif->addr, ETH_ALEN); + memset(hdr->addr3, 0xff, ETH_ALEN); + + pos = skb_put(skb, ie_ssid_len); + *pos++ = WLAN_EID_SSID; + *pos++ = ssid_len; + if (ssid) + memcpy(pos, ssid, ssid_len); + pos += ssid_len; + + if (ie) { + pos = skb_put(skb, ie_len); + memcpy(pos, ie, ie_len); + } + + return skb; +} +EXPORT_SYMBOL(ieee80211_probereq_get); + +void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + const void *frame, size_t frame_len, + const struct ieee80211_tx_info *frame_txctl, + struct ieee80211_rts *rts) +{ + const struct ieee80211_hdr *hdr = frame; + + rts->frame_control = + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS); + rts->duration = ieee80211_rts_duration(hw, vif, frame_len, + frame_txctl); + memcpy(rts->ra, hdr->addr1, sizeof(rts->ra)); + memcpy(rts->ta, hdr->addr2, sizeof(rts->ta)); +} +EXPORT_SYMBOL(ieee80211_rts_get); + +void ieee80211_ctstoself_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + const void *frame, size_t frame_len, + const struct ieee80211_tx_info *frame_txctl, + struct ieee80211_cts *cts) +{ + const struct ieee80211_hdr *hdr = frame; + + cts->frame_control = + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS); + cts->duration = ieee80211_ctstoself_duration(hw, vif, + frame_len, frame_txctl); + memcpy(cts->ra, hdr->addr1, sizeof(cts->ra)); +} +EXPORT_SYMBOL(ieee80211_ctstoself_get); + +struct sk_buff * +ieee80211_get_buffered_bc(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct sk_buff *skb = NULL; + struct sta_info *sta; + struct ieee80211_tx_data tx; + struct ieee80211_sub_if_data *sdata; + struct ieee80211_if_ap *bss = NULL; + struct beacon_data *beacon; + struct ieee80211_tx_info *info; + + sdata = vif_to_sdata(vif); + bss = &sdata->u.ap; + + rcu_read_lock(); + beacon = rcu_dereference(bss->beacon); + + if (sdata->vif.type != NL80211_IFTYPE_AP || !beacon || !beacon->head) + goto out; + + if (bss->dtim_count != 0) + goto out; /* send buffered bc/mc only after DTIM beacon */ + + while (1) { + skb = skb_dequeue(&bss->ps_bc_buf); + if (!skb) + goto out; + local->total_ps_buffered--; + + if (!skb_queue_empty(&bss->ps_bc_buf) && skb->len >= 2) { + struct ieee80211_hdr *hdr = + (struct ieee80211_hdr *) skb->data; + /* more buffered multicast/broadcast frames ==> set + * MoreData flag in IEEE 802.11 header to inform PS + * STAs */ + hdr->frame_control |= + cpu_to_le16(IEEE80211_FCTL_MOREDATA); + } + + if (!ieee80211_tx_prepare(sdata, &tx, skb)) + break; + dev_kfree_skb_any(skb); + } + + info = IEEE80211_SKB_CB(skb); + + sta = tx.sta; + tx.flags |= IEEE80211_TX_PS_BUFFERED; + tx.channel = local->hw.conf.channel; + info->band = tx.channel->band; + + if (invoke_tx_handlers(&tx)) + skb = NULL; + out: + rcu_read_unlock(); + + return skb; +} +EXPORT_SYMBOL(ieee80211_get_buffered_bc); + +void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) +{ + skb_set_mac_header(skb, 0); + skb_set_network_header(skb, 0); + skb_set_transport_header(skb, 0); + + /* send all internal mgmt frames on VO */ + skb_set_queue_mapping(skb, 0); + + /* + * The other path calling ieee80211_xmit is from the tasklet, + * and while we can handle concurrent transmissions locking + * requirements are that we do not come into tx with bhs on. + */ + local_bh_disable(); + ieee80211_xmit(sdata, skb); + local_bh_enable(); +} diff --git a/net/mac80211/util.c b/net/mac80211/util.c new file mode 100644 index 0000000..80f5556 --- /dev/null +++ b/net/mac80211/util.c @@ -0,0 +1,1394 @@ +/* + * Copyright 2002-2005, Instant802 Networks, Inc. + * Copyright 2005-2006, Devicescape Software, Inc. + * Copyright 2006-2007 Jiri Benc + * Copyright 2007 Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * utilities for mac80211 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) +#include +#endif +#include +#include + +#include "ieee80211_i.h" +#include "driver-ops.h" +#include "rate.h" +#include "mesh.h" +#include "wme.h" +#include "led.h" +#include "wep.h" + +/* privid for wiphys to determine whether they belong to us or not */ +void *mac80211_wiphy_privid = &mac80211_wiphy_privid; + +struct ieee80211_hw *wiphy_to_ieee80211_hw(struct wiphy *wiphy) +{ + struct ieee80211_local *local; + BUG_ON(!wiphy); + + local = wiphy_priv(wiphy); + return &local->hw; +} +EXPORT_SYMBOL(wiphy_to_ieee80211_hw); + +u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, + enum nl80211_iftype type) +{ + __le16 fc = hdr->frame_control; + + /* drop ACK/CTS frames and incorrect hdr len (ctrl) */ + if (len < 16) + return NULL; + + if (ieee80211_is_data(fc)) { + if (len < 24) /* drop incorrect hdr len (data) */ + return NULL; + + if (ieee80211_has_a4(fc)) + return NULL; + if (ieee80211_has_tods(fc)) + return hdr->addr1; + if (ieee80211_has_fromds(fc)) + return hdr->addr2; + + return hdr->addr3; + } + + if (ieee80211_is_mgmt(fc)) { + if (len < 24) /* drop incorrect hdr len (mgmt) */ + return NULL; + return hdr->addr3; + } + + if (ieee80211_is_ctl(fc)) { + if(ieee80211_is_pspoll(fc)) + return hdr->addr1; + + if (ieee80211_is_back_req(fc)) { + switch (type) { + case NL80211_IFTYPE_STATION: + return hdr->addr2; + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_AP_VLAN: + return hdr->addr1; + default: + break; /* fall through to the return */ + } + } + } + + return NULL; +} + +void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx) +{ + struct sk_buff *skb = tx->skb; + struct ieee80211_hdr *hdr; + + do { + hdr = (struct ieee80211_hdr *) skb->data; + hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); + } while ((skb = skb->next)); +} + +int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, + int rate, int erp, int short_preamble) +{ + int dur; + + /* calculate duration (in microseconds, rounded up to next higher + * integer if it includes a fractional microsecond) to send frame of + * len bytes (does not include FCS) at the given rate. Duration will + * also include SIFS. + * + * rate is in 100 kbps, so divident is multiplied by 10 in the + * DIV_ROUND_UP() operations. + */ + + if (local->hw.conf.channel->band == IEEE80211_BAND_5GHZ || erp) { + /* + * OFDM: + * + * N_DBPS = DATARATE x 4 + * N_SYM = Ceiling((16+8xLENGTH+6) / N_DBPS) + * (16 = SIGNAL time, 6 = tail bits) + * TXTIME = T_PREAMBLE + T_SIGNAL + T_SYM x N_SYM + Signal Ext + * + * T_SYM = 4 usec + * 802.11a - 17.5.2: aSIFSTime = 16 usec + * 802.11g - 19.8.4: aSIFSTime = 10 usec + + * signal ext = 6 usec + */ + dur = 16; /* SIFS + signal ext */ + dur += 16; /* 17.3.2.3: T_PREAMBLE = 16 usec */ + dur += 4; /* 17.3.2.3: T_SIGNAL = 4 usec */ + dur += 4 * DIV_ROUND_UP((16 + 8 * (len + 4) + 6) * 10, + 4 * rate); /* T_SYM x N_SYM */ + } else { + /* + * 802.11b or 802.11g with 802.11b compatibility: + * 18.3.4: TXTIME = PreambleLength + PLCPHeaderTime + + * Ceiling(((LENGTH+PBCC)x8)/DATARATE). PBCC=0. + * + * 802.11 (DS): 15.3.3, 802.11b: 18.3.4 + * aSIFSTime = 10 usec + * aPreambleLength = 144 usec or 72 usec with short preamble + * aPLCPHeaderLength = 48 usec or 24 usec with short preamble + */ + dur = 10; /* aSIFSTime = 10 usec */ + dur += short_preamble ? (72 + 24) : (144 + 48); + + dur += DIV_ROUND_UP(8 * (len + 4) * 10, rate); + } + + return dur; +} + +/* Exported duration function for driver use */ +__le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + size_t frame_len, + struct ieee80211_rate *rate) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_sub_if_data *sdata; + u16 dur; + int erp; + bool short_preamble = false; + + erp = 0; + if (vif) { + sdata = vif_to_sdata(vif); + short_preamble = sdata->vif.bss_conf.use_short_preamble; + if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) + erp = rate->flags & IEEE80211_RATE_ERP_G; + } + + dur = ieee80211_frame_duration(local, frame_len, rate->bitrate, erp, + short_preamble); + + return cpu_to_le16(dur); +} +EXPORT_SYMBOL(ieee80211_generic_frame_duration); + +__le16 ieee80211_rts_duration(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, size_t frame_len, + const struct ieee80211_tx_info *frame_txctl) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_rate *rate; + struct ieee80211_sub_if_data *sdata; + bool short_preamble; + int erp; + u16 dur; + struct ieee80211_supported_band *sband; + + sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; + + short_preamble = false; + + rate = &sband->bitrates[frame_txctl->control.rts_cts_rate_idx]; + + erp = 0; + if (vif) { + sdata = vif_to_sdata(vif); + short_preamble = sdata->vif.bss_conf.use_short_preamble; + if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) + erp = rate->flags & IEEE80211_RATE_ERP_G; + } + + /* CTS duration */ + dur = ieee80211_frame_duration(local, 10, rate->bitrate, + erp, short_preamble); + /* Data frame duration */ + dur += ieee80211_frame_duration(local, frame_len, rate->bitrate, + erp, short_preamble); + /* ACK duration */ + dur += ieee80211_frame_duration(local, 10, rate->bitrate, + erp, short_preamble); + + return cpu_to_le16(dur); +} +EXPORT_SYMBOL(ieee80211_rts_duration); + +__le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + size_t frame_len, + const struct ieee80211_tx_info *frame_txctl) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_rate *rate; + struct ieee80211_sub_if_data *sdata; + bool short_preamble; + int erp; + u16 dur; + struct ieee80211_supported_band *sband; + + sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; + + short_preamble = false; + + rate = &sband->bitrates[frame_txctl->control.rts_cts_rate_idx]; + erp = 0; + if (vif) { + sdata = vif_to_sdata(vif); + short_preamble = sdata->vif.bss_conf.use_short_preamble; + if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) + erp = rate->flags & IEEE80211_RATE_ERP_G; + } + + /* Data frame duration */ + dur = ieee80211_frame_duration(local, frame_len, rate->bitrate, + erp, short_preamble); + if (!(frame_txctl->flags & IEEE80211_TX_CTL_NO_ACK)) { + /* ACK duration */ + dur += ieee80211_frame_duration(local, 10, rate->bitrate, + erp, short_preamble); + } + + return cpu_to_le16(dur); +} +EXPORT_SYMBOL(ieee80211_ctstoself_duration); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) +static bool ieee80211_all_queues_started(struct ieee80211_hw *hw) +{ + unsigned int queue; + + for (queue = 0; queue < hw->queues; queue++) + if (ieee80211_queue_stopped(hw, queue)) + return false; + return true; +} +#endif + +static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue, + enum queue_stop_reason reason) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_sub_if_data *sdata; + + if (WARN_ON(queue >= hw->queues)) + return; + + __clear_bit(reason, &local->queue_stop_reasons[queue]); + + if (local->queue_stop_reasons[queue] != 0) + /* someone still has this queue stopped */ + return; + + if (!skb_queue_empty(&local->pending[queue])) + tasklet_schedule(&local->tx_pending_tasklet); + + rcu_read_lock(); + list_for_each_entry_rcu(sdata, &local->interfaces, list) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) + netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue)); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) + netif_start_subqueue(sdata->dev, queue); +#else + if (ieee80211_all_queues_started(hw)) + netif_wake_queue(sdata->dev); +#endif + rcu_read_unlock(); +} + +void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, + enum queue_stop_reason reason) +{ + struct ieee80211_local *local = hw_to_local(hw); + unsigned long flags; + + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + __ieee80211_wake_queue(hw, queue, reason); + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); +} + +void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue) +{ + ieee80211_wake_queue_by_reason(hw, queue, + IEEE80211_QUEUE_STOP_REASON_DRIVER); +} +EXPORT_SYMBOL(ieee80211_wake_queue); + +static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue, + enum queue_stop_reason reason) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_sub_if_data *sdata; + + if (WARN_ON(queue >= hw->queues)) + return; + + __set_bit(reason, &local->queue_stop_reasons[queue]); + + rcu_read_lock(); + list_for_each_entry_rcu(sdata, &local->interfaces, list) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) + netif_tx_stop_queue(netdev_get_tx_queue(sdata->dev, queue)); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) + netif_stop_subqueue(sdata->dev, queue); +#else + netif_stop_queue(sdata->dev); +#endif + rcu_read_unlock(); +} + +void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue, + enum queue_stop_reason reason) +{ + struct ieee80211_local *local = hw_to_local(hw); + unsigned long flags; + + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + __ieee80211_stop_queue(hw, queue, reason); + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); +} + +void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue) +{ + ieee80211_stop_queue_by_reason(hw, queue, + IEEE80211_QUEUE_STOP_REASON_DRIVER); +} +EXPORT_SYMBOL(ieee80211_stop_queue); + +void ieee80211_add_pending_skb(struct ieee80211_local *local, + struct sk_buff *skb) +{ + struct ieee80211_hw *hw = &local->hw; + unsigned long flags; + int queue = skb_get_queue_mapping(skb); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + if (WARN_ON(!info->control.vif)) { + kfree_skb(skb); + return; + } + + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + __ieee80211_stop_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_SKB_ADD); + __skb_queue_tail(&local->pending[queue], skb); + __ieee80211_wake_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_SKB_ADD); + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); +} + +int ieee80211_add_pending_skbs(struct ieee80211_local *local, + struct sk_buff_head *skbs) +{ + struct ieee80211_hw *hw = &local->hw; + struct sk_buff *skb; + unsigned long flags; + int queue, ret = 0, i; + + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + for (i = 0; i < hw->queues; i++) + __ieee80211_stop_queue(hw, i, + IEEE80211_QUEUE_STOP_REASON_SKB_ADD); + + while ((skb = skb_dequeue(skbs))) { + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + if (WARN_ON(!info->control.vif)) { + kfree_skb(skb); + continue; + } + + ret++; + queue = skb_get_queue_mapping(skb); + __skb_queue_tail(&local->pending[queue], skb); + } + + for (i = 0; i < hw->queues; i++) + __ieee80211_wake_queue(hw, i, + IEEE80211_QUEUE_STOP_REASON_SKB_ADD); + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); + + return ret; +} + +void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw, + enum queue_stop_reason reason) +{ + struct ieee80211_local *local = hw_to_local(hw); + unsigned long flags; + int i; + + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + + for (i = 0; i < hw->queues; i++) + __ieee80211_stop_queue(hw, i, reason); + + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); +} + +void ieee80211_stop_queues(struct ieee80211_hw *hw) +{ + ieee80211_stop_queues_by_reason(hw, + IEEE80211_QUEUE_STOP_REASON_DRIVER); +} +EXPORT_SYMBOL(ieee80211_stop_queues); + +int ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue) +{ + struct ieee80211_local *local = hw_to_local(hw); + unsigned long flags; + int ret; + + if (WARN_ON(queue >= hw->queues)) + return true; + + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + ret = !!local->queue_stop_reasons[queue]; + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); + return ret; +} +EXPORT_SYMBOL(ieee80211_queue_stopped); + +void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, + enum queue_stop_reason reason) +{ + struct ieee80211_local *local = hw_to_local(hw); + unsigned long flags; + int i; + + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + + for (i = 0; i < hw->queues; i++) + __ieee80211_wake_queue(hw, i, reason); + + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); +} + +void ieee80211_wake_queues(struct ieee80211_hw *hw) +{ + ieee80211_wake_queues_by_reason(hw, IEEE80211_QUEUE_STOP_REASON_DRIVER); +} +EXPORT_SYMBOL(ieee80211_wake_queues); + +void ieee80211_iterate_active_interfaces( + struct ieee80211_hw *hw, + void (*iterator)(void *data, u8 *mac, + struct ieee80211_vif *vif), + void *data) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_sub_if_data *sdata; + + mutex_lock(&local->iflist_mtx); + + list_for_each_entry(sdata, &local->interfaces, list) { + switch (sdata->vif.type) { + case __NL80211_IFTYPE_AFTER_LAST: + case NL80211_IFTYPE_UNSPECIFIED: + case NL80211_IFTYPE_MONITOR: + case NL80211_IFTYPE_AP_VLAN: + continue; + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_WDS: + case NL80211_IFTYPE_MESH_POINT: + break; + } + if (ieee80211_sdata_running(sdata)) + iterator(data, sdata->vif.addr, + &sdata->vif); + } + + mutex_unlock(&local->iflist_mtx); +} +EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces); + +void ieee80211_iterate_active_interfaces_atomic( + struct ieee80211_hw *hw, + void (*iterator)(void *data, u8 *mac, + struct ieee80211_vif *vif), + void *data) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_sub_if_data *sdata; + + rcu_read_lock(); + + list_for_each_entry_rcu(sdata, &local->interfaces, list) { + switch (sdata->vif.type) { + case __NL80211_IFTYPE_AFTER_LAST: + case NL80211_IFTYPE_UNSPECIFIED: + case NL80211_IFTYPE_MONITOR: + case NL80211_IFTYPE_AP_VLAN: + continue; + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_WDS: + case NL80211_IFTYPE_MESH_POINT: + break; + } + if (ieee80211_sdata_running(sdata)) + iterator(data, sdata->vif.addr, + &sdata->vif); + } + + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic); + +/* + * Nothing should have been stuffed into the workqueue during + * the suspend->resume cycle. If this WARN is seen then there + * is a bug with either the driver suspend or something in + * mac80211 stuffing into the workqueue which we haven't yet + * cleared during mac80211's suspend cycle. + */ +static bool ieee80211_can_queue_work(struct ieee80211_local *local) +{ + if (WARN(local->suspended && !local->resuming, + "queueing ieee80211 work while going to suspend\n")) + return false; + + return true; +} + +void ieee80211_queue_work(struct ieee80211_hw *hw, struct work_struct *work) +{ + struct ieee80211_local *local = hw_to_local(hw); + + if (!ieee80211_can_queue_work(local)) + return; + + queue_work(local->workqueue, work); +} +EXPORT_SYMBOL(ieee80211_queue_work); + +void ieee80211_queue_delayed_work(struct ieee80211_hw *hw, + struct delayed_work *dwork, + unsigned long delay) +{ + struct ieee80211_local *local = hw_to_local(hw); + + if (!ieee80211_can_queue_work(local)) + return; + + queue_delayed_work(local->workqueue, dwork, delay); +} +EXPORT_SYMBOL(ieee80211_queue_delayed_work); + +void ieee802_11_parse_elems(u8 *start, size_t len, + struct ieee802_11_elems *elems) +{ + ieee802_11_parse_elems_crc(start, len, elems, 0, 0); +} + +u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, + struct ieee802_11_elems *elems, + u64 filter, u32 crc) +{ + size_t left = len; + u8 *pos = start; + bool calc_crc = filter != 0; + + memset(elems, 0, sizeof(*elems)); + elems->ie_start = start; + elems->total_len = len; + + while (left >= 2) { + u8 id, elen; + + id = *pos++; + elen = *pos++; + left -= 2; + + if (elen > left) + break; + + if (calc_crc && id < 64 && (filter & (1ULL << id))) + crc = crc32_be(crc, pos - 2, elen + 2); + + switch (id) { + case WLAN_EID_SSID: + elems->ssid = pos; + elems->ssid_len = elen; + break; + case WLAN_EID_SUPP_RATES: + elems->supp_rates = pos; + elems->supp_rates_len = elen; + break; + case WLAN_EID_FH_PARAMS: + elems->fh_params = pos; + elems->fh_params_len = elen; + break; + case WLAN_EID_DS_PARAMS: + elems->ds_params = pos; + elems->ds_params_len = elen; + break; + case WLAN_EID_CF_PARAMS: + elems->cf_params = pos; + elems->cf_params_len = elen; + break; + case WLAN_EID_TIM: + if (elen >= sizeof(struct ieee80211_tim_ie)) { + elems->tim = (void *)pos; + elems->tim_len = elen; + } + break; + case WLAN_EID_IBSS_PARAMS: + elems->ibss_params = pos; + elems->ibss_params_len = elen; + break; + case WLAN_EID_CHALLENGE: + elems->challenge = pos; + elems->challenge_len = elen; + break; + case WLAN_EID_VENDOR_SPECIFIC: + if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 && + pos[2] == 0xf2) { + /* Microsoft OUI (00:50:F2) */ + + if (calc_crc) + crc = crc32_be(crc, pos - 2, elen + 2); + + if (pos[3] == 1) { + /* OUI Type 1 - WPA IE */ + elems->wpa = pos; + elems->wpa_len = elen; + } else if (elen >= 5 && pos[3] == 2) { + /* OUI Type 2 - WMM IE */ + if (pos[4] == 0) { + elems->wmm_info = pos; + elems->wmm_info_len = elen; + } else if (pos[4] == 1) { + elems->wmm_param = pos; + elems->wmm_param_len = elen; + } + } + } + break; + case WLAN_EID_RSN: + elems->rsn = pos; + elems->rsn_len = elen; + break; + case WLAN_EID_ERP_INFO: + elems->erp_info = pos; + elems->erp_info_len = elen; + break; + case WLAN_EID_EXT_SUPP_RATES: + elems->ext_supp_rates = pos; + elems->ext_supp_rates_len = elen; + break; + case WLAN_EID_HT_CAPABILITY: + if (elen >= sizeof(struct ieee80211_ht_cap)) + elems->ht_cap_elem = (void *)pos; + break; + case WLAN_EID_HT_INFORMATION: + if (elen >= sizeof(struct ieee80211_ht_info)) + elems->ht_info_elem = (void *)pos; + break; + case WLAN_EID_MESH_ID: + elems->mesh_id = pos; + elems->mesh_id_len = elen; + break; + case WLAN_EID_MESH_CONFIG: + if (elen >= sizeof(struct ieee80211_meshconf_ie)) + elems->mesh_config = (void *)pos; + break; + case WLAN_EID_PEER_LINK: + elems->peer_link = pos; + elems->peer_link_len = elen; + break; + case WLAN_EID_PREQ: + elems->preq = pos; + elems->preq_len = elen; + break; + case WLAN_EID_PREP: + elems->prep = pos; + elems->prep_len = elen; + break; + case WLAN_EID_PERR: + elems->perr = pos; + elems->perr_len = elen; + break; + case WLAN_EID_RANN: + if (elen >= sizeof(struct ieee80211_rann_ie)) + elems->rann = (void *)pos; + break; + case WLAN_EID_CHANNEL_SWITCH: + elems->ch_switch_elem = pos; + elems->ch_switch_elem_len = elen; + break; + case WLAN_EID_QUIET: + if (!elems->quiet_elem) { + elems->quiet_elem = pos; + elems->quiet_elem_len = elen; + } + elems->num_of_quiet_elem++; + break; + case WLAN_EID_COUNTRY: + elems->country_elem = pos; + elems->country_elem_len = elen; + break; + case WLAN_EID_PWR_CONSTRAINT: + elems->pwr_constr_elem = pos; + elems->pwr_constr_elem_len = elen; + break; + case WLAN_EID_TIMEOUT_INTERVAL: + elems->timeout_int = pos; + elems->timeout_int_len = elen; + break; + default: + break; + } + + left -= elen; + pos += elen; + } + + return crc; +} + +void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_tx_queue_params qparam; + int queue; + bool use_11b; + int aCWmin, aCWmax; + + if (!local->ops->conf_tx) + return; + + memset(&qparam, 0, sizeof(qparam)); + + use_11b = (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ) && + !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE); + + for (queue = 0; queue < local_to_hw(local)->queues; queue++) { + /* Set defaults according to 802.11-2007 Table 7-37 */ + aCWmax = 1023; + if (use_11b) + aCWmin = 31; + else + aCWmin = 15; + + switch (queue) { + case 3: /* AC_BK */ + qparam.cw_max = aCWmax; + qparam.cw_min = aCWmin; + qparam.txop = 0; + qparam.aifs = 7; + break; + default: /* never happens but let's not leave undefined */ + case 2: /* AC_BE */ + qparam.cw_max = aCWmax; + qparam.cw_min = aCWmin; + qparam.txop = 0; + qparam.aifs = 3; + break; + case 1: /* AC_VI */ + qparam.cw_max = aCWmin; + qparam.cw_min = (aCWmin + 1) / 2 - 1; + if (use_11b) + qparam.txop = 6016/32; + else + qparam.txop = 3008/32; + qparam.aifs = 2; + break; + case 0: /* AC_VO */ + qparam.cw_max = (aCWmin + 1) / 2 - 1; + qparam.cw_min = (aCWmin + 1) / 4 - 1; + if (use_11b) + qparam.txop = 3264/32; + else + qparam.txop = 1504/32; + qparam.aifs = 2; + break; + } + + qparam.uapsd = false; + + drv_conf_tx(local, queue, &qparam); + } +} + +void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata, + const size_t supp_rates_len, + const u8 *supp_rates) +{ + struct ieee80211_local *local = sdata->local; + int i, have_higher_than_11mbit = 0; + + /* cf. IEEE 802.11 9.2.12 */ + for (i = 0; i < supp_rates_len; i++) + if ((supp_rates[i] & 0x7f) * 5 > 110) + have_higher_than_11mbit = 1; + + if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ && + have_higher_than_11mbit) + sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE; + else + sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE; + + ieee80211_set_wmm_default(sdata); +} + +u32 ieee80211_mandatory_rates(struct ieee80211_local *local, + enum ieee80211_band band) +{ + struct ieee80211_supported_band *sband; + struct ieee80211_rate *bitrates; + u32 mandatory_rates; + enum ieee80211_rate_flags mandatory_flag; + int i; + + sband = local->hw.wiphy->bands[band]; + if (!sband) { + WARN_ON(1); + sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; + } + + if (band == IEEE80211_BAND_2GHZ) + mandatory_flag = IEEE80211_RATE_MANDATORY_B; + else + mandatory_flag = IEEE80211_RATE_MANDATORY_A; + + bitrates = sband->bitrates; + mandatory_rates = 0; + for (i = 0; i < sband->n_bitrates; i++) + if (bitrates[i].flags & mandatory_flag) + mandatory_rates |= BIT(i); + return mandatory_rates; +} + +void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, + u16 transaction, u16 auth_alg, + u8 *extra, size_t extra_len, const u8 *bssid, + const u8 *key, u8 key_len, u8 key_idx) +{ + struct ieee80211_local *local = sdata->local; + struct sk_buff *skb; + struct ieee80211_mgmt *mgmt; + int err; + + skb = dev_alloc_skb(local->hw.extra_tx_headroom + + sizeof(*mgmt) + 6 + extra_len); + if (!skb) { + printk(KERN_DEBUG "%s: failed to allocate buffer for auth " + "frame\n", sdata->name); + return; + } + skb_reserve(skb, local->hw.extra_tx_headroom); + + mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6); + memset(mgmt, 0, 24 + 6); + mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_AUTH); + memcpy(mgmt->da, bssid, ETH_ALEN); + memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); + memcpy(mgmt->bssid, bssid, ETH_ALEN); + mgmt->u.auth.auth_alg = cpu_to_le16(auth_alg); + mgmt->u.auth.auth_transaction = cpu_to_le16(transaction); + mgmt->u.auth.status_code = cpu_to_le16(0); + if (extra) + memcpy(skb_put(skb, extra_len), extra, extra_len); + + if (auth_alg == WLAN_AUTH_SHARED_KEY && transaction == 3) { + mgmt->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); + err = ieee80211_wep_encrypt(local, skb, key, key_len, key_idx); + WARN_ON(err); + } + + IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; + ieee80211_tx_skb(sdata, skb); +} + +int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer, + const u8 *ie, size_t ie_len, + enum ieee80211_band band) +{ + struct ieee80211_supported_band *sband; + u8 *pos; + size_t offset = 0, noffset; + int supp_rates_len, i; + + sband = local->hw.wiphy->bands[band]; + + pos = buffer; + + supp_rates_len = min_t(int, sband->n_bitrates, 8); + + *pos++ = WLAN_EID_SUPP_RATES; + *pos++ = supp_rates_len; + + for (i = 0; i < supp_rates_len; i++) { + int rate = sband->bitrates[i].bitrate; + *pos++ = (u8) (rate / 5); + } + + /* insert "request information" if in custom IEs */ + if (ie && ie_len) { + static const u8 before_extrates[] = { + WLAN_EID_SSID, + WLAN_EID_SUPP_RATES, + WLAN_EID_REQUEST, + }; + noffset = ieee80211_ie_split(ie, ie_len, + before_extrates, + ARRAY_SIZE(before_extrates), + offset); + memcpy(pos, ie + offset, noffset - offset); + pos += noffset - offset; + offset = noffset; + } + + if (sband->n_bitrates > i) { + *pos++ = WLAN_EID_EXT_SUPP_RATES; + *pos++ = sband->n_bitrates - i; + + for (; i < sband->n_bitrates; i++) { + int rate = sband->bitrates[i].bitrate; + *pos++ = (u8) (rate / 5); + } + } + + /* insert custom IEs that go before HT */ + if (ie && ie_len) { + static const u8 before_ht[] = { + WLAN_EID_SSID, + WLAN_EID_SUPP_RATES, + WLAN_EID_REQUEST, + WLAN_EID_EXT_SUPP_RATES, + WLAN_EID_DS_PARAMS, + WLAN_EID_SUPPORTED_REGULATORY_CLASSES, + }; + noffset = ieee80211_ie_split(ie, ie_len, + before_ht, ARRAY_SIZE(before_ht), + offset); + memcpy(pos, ie + offset, noffset - offset); + pos += noffset - offset; + offset = noffset; + } + + if (sband->ht_cap.ht_supported) { + u16 cap = sband->ht_cap.cap; + __le16 tmp; + + if (ieee80211_disable_40mhz_24ghz && + sband->band == IEEE80211_BAND_2GHZ) { + cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; + cap &= ~IEEE80211_HT_CAP_SGI_40; + } + + *pos++ = WLAN_EID_HT_CAPABILITY; + *pos++ = sizeof(struct ieee80211_ht_cap); + memset(pos, 0, sizeof(struct ieee80211_ht_cap)); + tmp = cpu_to_le16(cap); + memcpy(pos, &tmp, sizeof(u16)); + pos += sizeof(u16); + *pos++ = sband->ht_cap.ampdu_factor | + (sband->ht_cap.ampdu_density << + IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT); + memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs)); + pos += sizeof(sband->ht_cap.mcs); + pos += 2 + 4 + 1; /* ext info, BF cap, antsel */ + } + + /* + * If adding more here, adjust code in main.c + * that calculates local->scan_ies_len. + */ + + /* add any remaining custom IEs */ + if (ie && ie_len) { + noffset = ie_len; + memcpy(pos, ie + offset, noffset - offset); + pos += noffset - offset; + } + + return pos - buffer; +} + +void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, + const u8 *ssid, size_t ssid_len, + const u8 *ie, size_t ie_len) +{ + struct ieee80211_local *local = sdata->local; + struct sk_buff *skb; + struct ieee80211_mgmt *mgmt; + size_t buf_len; + u8 *buf; + + /* FIXME: come up with a proper value */ + buf = kmalloc(200 + ie_len, GFP_KERNEL); + if (!buf) { + printk(KERN_DEBUG "%s: failed to allocate temporary IE " + "buffer\n", sdata->name); + return; + } + + buf_len = ieee80211_build_preq_ies(local, buf, ie, ie_len, + local->hw.conf.channel->band); + + skb = ieee80211_probereq_get(&local->hw, &sdata->vif, + ssid, ssid_len, + buf, buf_len); + + if (dst) { + mgmt = (struct ieee80211_mgmt *) skb->data; + memcpy(mgmt->da, dst, ETH_ALEN); + memcpy(mgmt->bssid, dst, ETH_ALEN); + } + + IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; + ieee80211_tx_skb(sdata, skb); + kfree(buf); +} + +u32 ieee80211_sta_get_rates(struct ieee80211_local *local, + struct ieee802_11_elems *elems, + enum ieee80211_band band) +{ + struct ieee80211_supported_band *sband; + struct ieee80211_rate *bitrates; + size_t num_rates; + u32 supp_rates; + int i, j; + sband = local->hw.wiphy->bands[band]; + + if (!sband) { + WARN_ON(1); + sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; + } + + bitrates = sband->bitrates; + num_rates = sband->n_bitrates; + supp_rates = 0; + for (i = 0; i < elems->supp_rates_len + + elems->ext_supp_rates_len; i++) { + u8 rate = 0; + int own_rate; + if (i < elems->supp_rates_len) + rate = elems->supp_rates[i]; + else if (elems->ext_supp_rates) + rate = elems->ext_supp_rates + [i - elems->supp_rates_len]; + own_rate = 5 * (rate & 0x7f); + for (j = 0; j < num_rates; j++) + if (bitrates[j].bitrate == own_rate) + supp_rates |= BIT(j); + } + return supp_rates; +} + +void ieee80211_stop_device(struct ieee80211_local *local) +{ + ieee80211_led_radio(local, false); + + cancel_work_sync(&local->reconfig_filter); + + flush_workqueue(local->workqueue); + drv_stop(local); +} + +int ieee80211_reconfig(struct ieee80211_local *local) +{ + struct ieee80211_hw *hw = &local->hw; + struct ieee80211_sub_if_data *sdata; + struct sta_info *sta; + int res; + + if (local->suspended) + local->resuming = true; + + /* restart hardware */ + if (local->open_count) { + /* + * Upon resume hardware can sometimes be goofy due to + * various platform / driver / bus issues, so restarting + * the device may at times not work immediately. Propagate + * the error. + */ + res = drv_start(local); + if (res) { + WARN(local->suspended, "Harware became unavailable " + "upon resume. This is could be a software issue" + "prior to suspend or a hardware issue\n"); + return res; + } + + ieee80211_led_radio(local, true); + } + + /* add interfaces */ + list_for_each_entry(sdata, &local->interfaces, list) { + if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN && + sdata->vif.type != NL80211_IFTYPE_MONITOR && + ieee80211_sdata_running(sdata)) + res = drv_add_interface(local, &sdata->vif); + } + + /* add STAs back */ + mutex_lock(&local->sta_mtx); + list_for_each_entry(sta, &local->sta_list, list) { + if (sta->uploaded) { + sdata = sta->sdata; + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + sdata = container_of(sdata->bss, + struct ieee80211_sub_if_data, + u.ap); + + WARN_ON(drv_sta_add(local, sdata, &sta->sta)); + } + } + mutex_unlock(&local->sta_mtx); + + /* Clear Suspend state so that ADDBA requests can be processed */ + + rcu_read_lock(); + + if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) { + list_for_each_entry_rcu(sta, &local->sta_list, list) { + clear_sta_flags(sta, WLAN_STA_SUSPEND); + } + } + + rcu_read_unlock(); + + /* setup RTS threshold */ + drv_set_rts_threshold(local, hw->wiphy->rts_threshold); + + /* reconfigure hardware */ + ieee80211_hw_config(local, ~0); + + ieee80211_configure_filter(local); + + /* Finally also reconfigure all the BSS information */ + list_for_each_entry(sdata, &local->interfaces, list) { + u32 changed = ~0; + if (!ieee80211_sdata_running(sdata)) + continue; + switch (sdata->vif.type) { + case NL80211_IFTYPE_STATION: + /* disable beacon change bits */ + changed &= ~(BSS_CHANGED_BEACON | + BSS_CHANGED_BEACON_ENABLED); + /* fall through */ + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_MESH_POINT: + ieee80211_bss_info_change_notify(sdata, changed); + break; + case NL80211_IFTYPE_WDS: + break; + case NL80211_IFTYPE_AP_VLAN: + case NL80211_IFTYPE_MONITOR: + /* ignore virtual */ + break; + case NL80211_IFTYPE_UNSPECIFIED: + case __NL80211_IFTYPE_AFTER_LAST: + WARN_ON(1); + break; + } + } + + rcu_read_lock(); + if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) { + list_for_each_entry_rcu(sta, &local->sta_list, list) { + ieee80211_sta_tear_down_BA_sessions(sta); + } + } + rcu_read_unlock(); + + /* add back keys */ + list_for_each_entry(sdata, &local->interfaces, list) + if (ieee80211_sdata_running(sdata)) + ieee80211_enable_keys(sdata); + + ieee80211_wake_queues_by_reason(hw, + IEEE80211_QUEUE_STOP_REASON_SUSPEND); + + /* + * If this is for hw restart things are still running. + * We may want to change that later, however. + */ + if (!local->suspended) + return 0; + +#ifdef CONFIG_PM + /* first set suspended false, then resuming */ + local->suspended = false; + mb(); + local->resuming = false; + + list_for_each_entry(sdata, &local->interfaces, list) { + switch(sdata->vif.type) { + case NL80211_IFTYPE_STATION: + ieee80211_sta_restart(sdata); + break; + case NL80211_IFTYPE_ADHOC: + ieee80211_ibss_restart(sdata); + break; + case NL80211_IFTYPE_MESH_POINT: + ieee80211_mesh_restart(sdata); + break; + default: + break; + } + } + + add_timer(&local->sta_cleanup); + + mutex_lock(&local->sta_mtx); + list_for_each_entry(sta, &local->sta_list, list) + mesh_plink_restart(sta); + mutex_unlock(&local->sta_mtx); +#else + WARN_ON(1); +#endif + return 0; +} + +static int check_mgd_smps(struct ieee80211_if_managed *ifmgd, + enum ieee80211_smps_mode *smps_mode) +{ + if (ifmgd->associated) { + *smps_mode = ifmgd->ap_smps; + + if (*smps_mode == IEEE80211_SMPS_AUTOMATIC) { + if (ifmgd->powersave) + *smps_mode = IEEE80211_SMPS_DYNAMIC; + else + *smps_mode = IEEE80211_SMPS_OFF; + } + + return 1; + } + + return 0; +} + +/* must hold iflist_mtx */ +void ieee80211_recalc_smps(struct ieee80211_local *local, + struct ieee80211_sub_if_data *forsdata) +{ + struct ieee80211_sub_if_data *sdata; + enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_OFF; + int count = 0; + + if (forsdata) + WARN_ON(!mutex_is_locked(&forsdata->u.mgd.mtx)); + + WARN_ON(!mutex_is_locked(&local->iflist_mtx)); + + /* + * This function could be improved to handle multiple + * interfaces better, but right now it makes any + * non-station interfaces force SM PS to be turned + * off. If there are multiple station interfaces it + * could also use the best possible mode, e.g. if + * one is in static and the other in dynamic then + * dynamic is ok. + */ + + list_for_each_entry(sdata, &local->interfaces, list) { + if (!netif_running(sdata->dev)) + continue; + if (sdata->vif.type != NL80211_IFTYPE_STATION) + goto set; + if (sdata != forsdata) { + /* + * This nested is ok -- we are holding the iflist_mtx + * so can't get here twice or so. But it's required + * since normally we acquire it first and then the + * iflist_mtx. + */ + mutex_lock_nested(&sdata->u.mgd.mtx, SINGLE_DEPTH_NESTING); + count += check_mgd_smps(&sdata->u.mgd, &smps_mode); + mutex_unlock(&sdata->u.mgd.mtx); + } else + count += check_mgd_smps(&sdata->u.mgd, &smps_mode); + + if (count > 1) { + smps_mode = IEEE80211_SMPS_OFF; + break; + } + } + + if (smps_mode == local->smps_mode) + return; + + set: + local->smps_mode = smps_mode; + /* changed flag is auto-detected for this */ + ieee80211_hw_config(local, 0); +} + +static bool ieee80211_id_in_list(const u8 *ids, int n_ids, u8 id) +{ + int i; + + for (i = 0; i < n_ids; i++) + if (ids[i] == id) + return true; + return false; +} + +/** + * ieee80211_ie_split - split an IE buffer according to ordering + * + * @ies: the IE buffer + * @ielen: the length of the IE buffer + * @ids: an array with element IDs that are allowed before + * the split + * @n_ids: the size of the element ID array + * @offset: offset where to start splitting in the buffer + * + * This function splits an IE buffer by updating the @offset + * variable to point to the location where the buffer should be + * split. + * + * It assumes that the given IE buffer is well-formed, this + * has to be guaranteed by the caller! + * + * It also assumes that the IEs in the buffer are ordered + * correctly, if not the result of using this function will not + * be ordered correctly either, i.e. it does no reordering. + * + * The function returns the offset where the next part of the + * buffer starts, which may be @ielen if the entire (remainder) + * of the buffer should be used. + */ +size_t ieee80211_ie_split(const u8 *ies, size_t ielen, + const u8 *ids, int n_ids, size_t offset) +{ + size_t pos = offset; + + while (pos < ielen && ieee80211_id_in_list(ids, n_ids, ies[pos])) + pos += 2 + ies[pos + 1]; + + return pos; +} + +size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset) +{ + size_t pos = offset; + + while (pos < ielen && ies[pos] != WLAN_EID_VENDOR_SPECIFIC) + pos += 2 + ies[pos + 1]; + + return pos; +} diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c new file mode 100644 index 0000000..5d745f2 --- /dev/null +++ b/net/mac80211/wep.c @@ -0,0 +1,340 @@ +/* + * Software WEP encryption implementation + * Copyright 2002, Jouni Malinen + * Copyright 2003, Instant802 Networks, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "ieee80211_i.h" +#include "wep.h" + + +int ieee80211_wep_init(struct ieee80211_local *local) +{ + /* start WEP IV from a random value */ + get_random_bytes(&local->wep_iv, WEP_IV_LEN); + + local->wep_tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(local->wep_tx_tfm)) + return PTR_ERR(local->wep_tx_tfm); + + local->wep_rx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(local->wep_rx_tfm)) { + crypto_free_blkcipher(local->wep_tx_tfm); + return PTR_ERR(local->wep_rx_tfm); + } + + return 0; +} + +void ieee80211_wep_free(struct ieee80211_local *local) +{ + crypto_free_blkcipher(local->wep_tx_tfm); + crypto_free_blkcipher(local->wep_rx_tfm); +} + +static inline bool ieee80211_wep_weak_iv(u32 iv, int keylen) +{ + /* + * Fluhrer, Mantin, and Shamir have reported weaknesses in the + * key scheduling algorithm of RC4. At least IVs (KeyByte + 3, + * 0xff, N) can be used to speedup attacks, so avoid using them. + */ + if ((iv & 0xff00) == 0xff00) { + u8 B = (iv >> 16) & 0xff; + if (B >= 3 && B < 3 + keylen) + return true; + } + return false; +} + + +static void ieee80211_wep_get_iv(struct ieee80211_local *local, + int keylen, int keyidx, u8 *iv) +{ + local->wep_iv++; + if (ieee80211_wep_weak_iv(local->wep_iv, keylen)) + local->wep_iv += 0x0100; + + if (!iv) + return; + + *iv++ = (local->wep_iv >> 16) & 0xff; + *iv++ = (local->wep_iv >> 8) & 0xff; + *iv++ = local->wep_iv & 0xff; + *iv++ = keyidx << 6; +} + + +static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local, + struct sk_buff *skb, + int keylen, int keyidx) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + unsigned int hdrlen; + u8 *newhdr; + + hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); + + if (WARN_ON(skb_tailroom(skb) < WEP_ICV_LEN || + skb_headroom(skb) < WEP_IV_LEN)) + return NULL; + + hdrlen = ieee80211_hdrlen(hdr->frame_control); + newhdr = skb_push(skb, WEP_IV_LEN); + memmove(newhdr, newhdr + WEP_IV_LEN, hdrlen); + ieee80211_wep_get_iv(local, keylen, keyidx, newhdr + hdrlen); + return newhdr + hdrlen; +} + + +static void ieee80211_wep_remove_iv(struct ieee80211_local *local, + struct sk_buff *skb, + struct ieee80211_key *key) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + unsigned int hdrlen; + + hdrlen = ieee80211_hdrlen(hdr->frame_control); + memmove(skb->data + WEP_IV_LEN, skb->data, hdrlen); + skb_pull(skb, WEP_IV_LEN); +} + + +/* Perform WEP encryption using given key. data buffer must have tailroom + * for 4-byte ICV. data_len must not include this ICV. Note: this function + * does _not_ add IV. data = RC4(data | CRC32(data)) */ +void ieee80211_wep_encrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key, + size_t klen, u8 *data, size_t data_len) +{ + struct blkcipher_desc desc = { .tfm = tfm }; + struct scatterlist sg; + __le32 icv; + + icv = cpu_to_le32(~crc32_le(~0, data, data_len)); + put_unaligned(icv, (__le32 *)(data + data_len)); + + crypto_blkcipher_setkey(tfm, rc4key, klen); + sg_init_one(&sg, data, data_len + WEP_ICV_LEN); + crypto_blkcipher_encrypt(&desc, &sg, &sg, sg.length); +} + + +/* Perform WEP encryption on given skb. 4 bytes of extra space (IV) in the + * beginning of the buffer 4 bytes of extra space (ICV) in the end of the + * buffer will be added. Both IV and ICV will be transmitted, so the + * payload length increases with 8 bytes. + * + * WEP frame payload: IV + TX key idx, RC4(data), ICV = RC4(CRC32(data)) + */ +int ieee80211_wep_encrypt(struct ieee80211_local *local, + struct sk_buff *skb, + const u8 *key, int keylen, int keyidx) +{ + u8 *iv; + size_t len; + u8 rc4key[3 + WLAN_KEY_LEN_WEP104]; + + iv = ieee80211_wep_add_iv(local, skb, keylen, keyidx); + if (!iv) + return -1; + + len = skb->len - (iv + WEP_IV_LEN - skb->data); + + /* Prepend 24-bit IV to RC4 key */ + memcpy(rc4key, iv, 3); + + /* Copy rest of the WEP key (the secret part) */ + memcpy(rc4key + 3, key, keylen); + + /* Add room for ICV */ + skb_put(skb, WEP_ICV_LEN); + + ieee80211_wep_encrypt_data(local->wep_tx_tfm, rc4key, keylen + 3, + iv + WEP_IV_LEN, len); + + return 0; +} + + +/* Perform WEP decryption using given key. data buffer includes encrypted + * payload, including 4-byte ICV, but _not_ IV. data_len must not include ICV. + * Return 0 on success and -1 on ICV mismatch. */ +int ieee80211_wep_decrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key, + size_t klen, u8 *data, size_t data_len) +{ + struct blkcipher_desc desc = { .tfm = tfm }; + struct scatterlist sg; + __le32 crc; + + crypto_blkcipher_setkey(tfm, rc4key, klen); + sg_init_one(&sg, data, data_len + WEP_ICV_LEN); + crypto_blkcipher_decrypt(&desc, &sg, &sg, sg.length); + + crc = cpu_to_le32(~crc32_le(~0, data, data_len)); + if (memcmp(&crc, data + data_len, WEP_ICV_LEN) != 0) + /* ICV mismatch */ + return -1; + + return 0; +} + + +/* Perform WEP decryption on given skb. Buffer includes whole WEP part of + * the frame: IV (4 bytes), encrypted payload (including SNAP header), + * ICV (4 bytes). skb->len includes both IV and ICV. + * + * Returns 0 if frame was decrypted successfully and ICV was correct and -1 on + * failure. If frame is OK, IV and ICV will be removed, i.e., decrypted payload + * is moved to the beginning of the skb and skb length will be reduced. + */ +static int ieee80211_wep_decrypt(struct ieee80211_local *local, + struct sk_buff *skb, + struct ieee80211_key *key) +{ + u32 klen; + u8 *rc4key; + u8 keyidx; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + unsigned int hdrlen; + size_t len; + int ret = 0; + + if (!ieee80211_has_protected(hdr->frame_control)) + return -1; + + hdrlen = ieee80211_hdrlen(hdr->frame_control); + if (skb->len < hdrlen + WEP_IV_LEN + WEP_ICV_LEN) + return -1; + + len = skb->len - hdrlen - WEP_IV_LEN - WEP_ICV_LEN; + + keyidx = skb->data[hdrlen + 3] >> 6; + + if (!key || keyidx != key->conf.keyidx || key->conf.alg != ALG_WEP) + return -1; + + klen = 3 + key->conf.keylen; + + rc4key = kmalloc(klen, GFP_ATOMIC); + if (!rc4key) + return -1; + + /* Prepend 24-bit IV to RC4 key */ + memcpy(rc4key, skb->data + hdrlen, 3); + + /* Copy rest of the WEP key (the secret part) */ + memcpy(rc4key + 3, key->conf.key, key->conf.keylen); + + if (ieee80211_wep_decrypt_data(local->wep_rx_tfm, rc4key, klen, + skb->data + hdrlen + WEP_IV_LEN, + len)) + ret = -1; + + kfree(rc4key); + + /* Trim ICV */ + skb_trim(skb, skb->len - WEP_ICV_LEN); + + /* Remove IV */ + memmove(skb->data + WEP_IV_LEN, skb->data, hdrlen); + skb_pull(skb, WEP_IV_LEN); + + return ret; +} + + +bool ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + unsigned int hdrlen; + u8 *ivpos; + u32 iv; + + if (!ieee80211_has_protected(hdr->frame_control)) + return false; + + hdrlen = ieee80211_hdrlen(hdr->frame_control); + ivpos = skb->data + hdrlen; + iv = (ivpos[0] << 16) | (ivpos[1] << 8) | ivpos[2]; + + return ieee80211_wep_weak_iv(iv, key->conf.keylen); +} + +ieee80211_rx_result +ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx) +{ + struct sk_buff *skb = rx->skb; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + + if (!ieee80211_is_data(hdr->frame_control) && + !ieee80211_is_auth(hdr->frame_control)) + return RX_CONTINUE; + + if (!(status->flag & RX_FLAG_DECRYPTED)) { + if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key)) + return RX_DROP_UNUSABLE; + } else if (!(status->flag & RX_FLAG_IV_STRIPPED)) { + ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key); + /* remove ICV */ + skb_trim(rx->skb, rx->skb->len - WEP_ICV_LEN); + } + + return RX_CONTINUE; +} + +static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + if (!info->control.hw_key) { + if (ieee80211_wep_encrypt(tx->local, skb, tx->key->conf.key, + tx->key->conf.keylen, + tx->key->conf.keyidx)) + return -1; + } else if (info->control.hw_key->flags & + IEEE80211_KEY_FLAG_GENERATE_IV) { + if (!ieee80211_wep_add_iv(tx->local, skb, + tx->key->conf.keylen, + tx->key->conf.keyidx)) + return -1; + } + + return 0; +} + +ieee80211_tx_result +ieee80211_crypto_wep_encrypt(struct ieee80211_tx_data *tx) +{ + struct sk_buff *skb; + + ieee80211_tx_set_protected(tx); + + skb = tx->skb; + do { + if (wep_encrypt_skb(tx, skb) < 0) { + I802_DEBUG_INC(tx->local->tx_handlers_drop_wep); + return TX_DROP; + } + } while ((skb = skb->next)); + + return TX_CONTINUE; +} diff --git a/net/mac80211/wep.h b/net/mac80211/wep.h new file mode 100644 index 0000000..fe29d7e --- /dev/null +++ b/net/mac80211/wep.h @@ -0,0 +1,35 @@ +/* + * Software WEP encryption implementation + * Copyright 2002, Jouni Malinen + * Copyright 2003, Instant802 Networks, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef WEP_H +#define WEP_H + +#include +#include +#include "ieee80211_i.h" +#include "key.h" + +int ieee80211_wep_init(struct ieee80211_local *local); +void ieee80211_wep_free(struct ieee80211_local *local); +void ieee80211_wep_encrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key, + size_t klen, u8 *data, size_t data_len); +int ieee80211_wep_encrypt(struct ieee80211_local *local, + struct sk_buff *skb, + const u8 *key, int keylen, int keyidx); +int ieee80211_wep_decrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key, + size_t klen, u8 *data, size_t data_len); +bool ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key); + +ieee80211_rx_result +ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx); +ieee80211_tx_result +ieee80211_crypto_wep_encrypt(struct ieee80211_tx_data *tx); + +#endif /* WEP_H */ diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c new file mode 100644 index 0000000..34e6d02 --- /dev/null +++ b/net/mac80211/wme.c @@ -0,0 +1,159 @@ +/* + * Copyright 2004, Instant802 Networks, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include "ieee80211_i.h" +#include "wme.h" + +/* Default mapping in classifier to work with default + * queue setup. + */ +const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 }; + +static int wme_downgrade_ac(struct sk_buff *skb) +{ + switch (skb->priority) { + case 6: + case 7: + skb->priority = 5; /* VO -> VI */ + return 0; + case 4: + case 5: + skb->priority = 3; /* VI -> BE */ + return 0; + case 0: + case 3: + skb->priority = 2; /* BE -> BK */ + return 0; + default: + return -1; + } +} + + +/* Indicate which queue to use. */ +u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_local *local = sdata->local; + struct sta_info *sta = NULL; + u32 sta_flags = 0; + const u8 *ra = NULL; + bool qos = false; + + if (local->hw.queues < 4 || skb->len < 6) { + skb->priority = 0; /* required for correct WPA/11i MIC */ + return min_t(u16, local->hw.queues - 1, + ieee802_1d_to_ac[skb->priority]); + } + + rcu_read_lock(); + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP_VLAN: + rcu_read_lock(); + sta = rcu_dereference(sdata->u.vlan.sta); + if (sta) + sta_flags = get_sta_flags(sta); + rcu_read_unlock(); + if (sta) + break; + case NL80211_IFTYPE_AP: + ra = skb->data; + break; + case NL80211_IFTYPE_WDS: + ra = sdata->u.wds.remote_addr; + break; +#ifdef CONFIG_MAC80211_MESH + case NL80211_IFTYPE_MESH_POINT: + /* + * XXX: This is clearly broken ... but already was before, + * because ieee80211_fill_mesh_addresses() would clear A1 + * except for multicast addresses. + */ + break; +#endif + case NL80211_IFTYPE_STATION: + ra = sdata->u.mgd.bssid; + break; + case NL80211_IFTYPE_ADHOC: + ra = skb->data; + break; + default: + break; + } + + if (!sta && ra && !is_multicast_ether_addr(ra)) { + sta = sta_info_get(sdata, ra); + if (sta) + sta_flags = get_sta_flags(sta); + } + + if (sta_flags & WLAN_STA_WME) + qos = true; + + rcu_read_unlock(); + + if (!qos) { + skb->priority = 0; /* required for correct WPA/11i MIC */ + return ieee802_1d_to_ac[skb->priority]; + } + + /* use the data classifier to determine what 802.1d tag the + * data frame has */ + skb->priority = cfg80211_classify8021d(skb); + + return ieee80211_downgrade_queue(local, skb); +} + +u16 ieee80211_downgrade_queue(struct ieee80211_local *local, + struct sk_buff *skb) +{ + /* in case we are a client verify acm is not set for this ac */ + while (unlikely(local->wmm_acm & BIT(skb->priority))) { + if (wme_downgrade_ac(skb)) { + /* + * This should not really happen. The AP has marked all + * lower ACs to require admission control which is not + * a reasonable configuration. Allow the frame to be + * transmitted using AC_BK as a workaround. + */ + break; + } + } + + /* look up which queue to use for frames with this 1d tag */ + return ieee802_1d_to_ac[skb->priority]; +} + +void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (void *)skb->data; + + /* Fill in the QoS header if there is one. */ + if (ieee80211_is_data_qos(hdr->frame_control)) { + u8 *p = ieee80211_get_qos_ctl(hdr); + u8 ack_policy = 0, tid; + + tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; + + if (unlikely(local->wifi_wme_noack_test)) + ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK << + QOS_CONTROL_ACK_POLICY_SHIFT; + /* qos header is 2 bytes, second reserved */ + *p++ = ack_policy | tid; + *p = 0; + } +} diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h new file mode 100644 index 0000000..6053b1c --- /dev/null +++ b/net/mac80211/wme.h @@ -0,0 +1,30 @@ +/* + * Copyright 2004, Instant802 Networks, Inc. + * Copyright 2005, Devicescape Software, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _WME_H +#define _WME_H + +#include +#include "ieee80211_i.h" + +#define QOS_CONTROL_ACK_POLICY_NORMAL 0 +#define QOS_CONTROL_ACK_POLICY_NOACK 1 + +#define QOS_CONTROL_ACK_POLICY_SHIFT 5 + +extern const int ieee802_1d_to_ac[8]; + +u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb); +void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb); +u16 ieee80211_downgrade_queue(struct ieee80211_local *local, + struct sk_buff *skb); + + +#endif /* _WME_H */ diff --git a/net/mac80211/work.c b/net/mac80211/work.c new file mode 100644 index 0000000..1e1ea30 --- /dev/null +++ b/net/mac80211/work.c @@ -0,0 +1,1100 @@ +/* + * mac80211 work implementation + * + * Copyright 2003-2008, Jouni Malinen + * Copyright 2004, Instant802 Networks, Inc. + * Copyright 2005, Devicescape Software, Inc. + * Copyright 2006-2007 Jiri Benc + * Copyright 2007, Michael Wu + * Copyright 2009, Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ieee80211_i.h" +#include "rate.h" + +#define IEEE80211_AUTH_TIMEOUT (HZ / 5) +#define IEEE80211_AUTH_MAX_TRIES 3 +#define IEEE80211_ASSOC_TIMEOUT (HZ / 5) +#define IEEE80211_ASSOC_MAX_TRIES 3 +#define IEEE80211_MAX_PROBE_TRIES 5 + +enum work_action { + WORK_ACT_NONE, + WORK_ACT_TIMEOUT, + WORK_ACT_DONE, +}; + + +/* utils */ +static inline void ASSERT_WORK_MTX(struct ieee80211_local *local) +{ + WARN_ON(!mutex_is_locked(&local->work_mtx)); +} + +/* + * We can have multiple work items (and connection probing) + * scheduling this timer, but we need to take care to only + * reschedule it when it should fire _earlier_ than it was + * asked for before, or if it's not pending right now. This + * function ensures that. Note that it then is required to + * run this function for all timeouts after the first one + * has happened -- the work that runs from this timer will + * do that. + */ +static void run_again(struct ieee80211_local *local, + unsigned long timeout) +{ + ASSERT_WORK_MTX(local); + + if (!timer_pending(&local->work_timer) || + time_before(timeout, local->work_timer.expires)) + mod_timer(&local->work_timer, timeout); +} + +static void work_free_rcu(struct rcu_head *head) +{ + struct ieee80211_work *wk = + container_of(head, struct ieee80211_work, rcu_head); + + kfree(wk); +} + +void free_work(struct ieee80211_work *wk) +{ + call_rcu(&wk->rcu_head, work_free_rcu); +} + +static int ieee80211_compatible_rates(const u8 *supp_rates, int supp_rates_len, + struct ieee80211_supported_band *sband, + u32 *rates) +{ + int i, j, count; + *rates = 0; + count = 0; + for (i = 0; i < supp_rates_len; i++) { + int rate = (supp_rates[i] & 0x7F) * 5; + + for (j = 0; j < sband->n_bitrates; j++) + if (sband->bitrates[j].bitrate == rate) { + *rates |= BIT(j); + count++; + break; + } + } + + return count; +} + +/* frame sending functions */ + +static void ieee80211_add_ht_ie(struct sk_buff *skb, const u8 *ht_info_ie, + struct ieee80211_supported_band *sband, + struct ieee80211_channel *channel, + enum ieee80211_smps_mode smps) +{ + struct ieee80211_ht_info *ht_info; + u8 *pos; + u32 flags = channel->flags; + u16 cap = sband->ht_cap.cap; + __le16 tmp; + + if (!sband->ht_cap.ht_supported) + return; + + if (!ht_info_ie) + return; + + if (ht_info_ie[1] < sizeof(struct ieee80211_ht_info)) + return; + + ht_info = (struct ieee80211_ht_info *)(ht_info_ie + 2); + + /* determine capability flags */ + + if (ieee80211_disable_40mhz_24ghz && + sband->band == IEEE80211_BAND_2GHZ) { + cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; + cap &= ~IEEE80211_HT_CAP_SGI_40; + } + + switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { + case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: + if (flags & IEEE80211_CHAN_NO_HT40PLUS) { + cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; + cap &= ~IEEE80211_HT_CAP_SGI_40; + } + break; + case IEEE80211_HT_PARAM_CHA_SEC_BELOW: + if (flags & IEEE80211_CHAN_NO_HT40MINUS) { + cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; + cap &= ~IEEE80211_HT_CAP_SGI_40; + } + break; + } + + /* set SM PS mode properly */ + cap &= ~IEEE80211_HT_CAP_SM_PS; + switch (smps) { + case IEEE80211_SMPS_AUTOMATIC: + case IEEE80211_SMPS_NUM_MODES: + WARN_ON(1); + case IEEE80211_SMPS_OFF: + cap |= WLAN_HT_CAP_SM_PS_DISABLED << + IEEE80211_HT_CAP_SM_PS_SHIFT; + break; + case IEEE80211_SMPS_STATIC: + cap |= WLAN_HT_CAP_SM_PS_STATIC << + IEEE80211_HT_CAP_SM_PS_SHIFT; + break; + case IEEE80211_SMPS_DYNAMIC: + cap |= WLAN_HT_CAP_SM_PS_DYNAMIC << + IEEE80211_HT_CAP_SM_PS_SHIFT; + break; + } + + /* reserve and fill IE */ + + pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2); + *pos++ = WLAN_EID_HT_CAPABILITY; + *pos++ = sizeof(struct ieee80211_ht_cap); + memset(pos, 0, sizeof(struct ieee80211_ht_cap)); + + /* capability flags */ + tmp = cpu_to_le16(cap); + memcpy(pos, &tmp, sizeof(u16)); + pos += sizeof(u16); + + /* AMPDU parameters */ + *pos++ = sband->ht_cap.ampdu_factor | + (sband->ht_cap.ampdu_density << + IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT); + + /* MCS set */ + memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs)); + pos += sizeof(sband->ht_cap.mcs); + + /* extended capabilities */ + pos += sizeof(__le16); + + /* BF capabilities */ + pos += sizeof(__le32); + + /* antenna selection */ + pos += sizeof(u8); +} + +static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata, + struct ieee80211_work *wk) +{ + struct ieee80211_local *local = sdata->local; + struct sk_buff *skb; + struct ieee80211_mgmt *mgmt; + u8 *pos, qos_info; + const u8 *ies; + size_t offset = 0, noffset; + int i, len, count, rates_len, supp_rates_len; + u16 capab; + struct ieee80211_supported_band *sband; + u32 rates = 0; + + sband = local->hw.wiphy->bands[wk->chan->band]; + + /* + * Get all rates supported by the device and the AP as + * some APs don't like getting a superset of their rates + * in the association request (e.g. D-Link DAP 1353 in + * b-only mode)... + */ + rates_len = ieee80211_compatible_rates(wk->assoc.supp_rates, + wk->assoc.supp_rates_len, + sband, &rates); + + skb = alloc_skb(local->hw.extra_tx_headroom + + sizeof(*mgmt) + /* bit too much but doesn't matter */ + 2 + wk->assoc.ssid_len + /* SSID */ + 4 + rates_len + /* (extended) rates */ + 4 + /* power capability */ + 2 + 2 * sband->n_channels + /* supported channels */ + 2 + sizeof(struct ieee80211_ht_cap) + /* HT */ + wk->ie_len + /* extra IEs */ + 9, /* WMM */ + GFP_KERNEL); + if (!skb) { + printk(KERN_DEBUG "%s: failed to allocate buffer for assoc " + "frame\n", sdata->name); + return; + } + skb_reserve(skb, local->hw.extra_tx_headroom); + + capab = WLAN_CAPABILITY_ESS; + + if (sband->band == IEEE80211_BAND_2GHZ) { + if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE)) + capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME; + if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE)) + capab |= WLAN_CAPABILITY_SHORT_PREAMBLE; + } + + if (wk->assoc.capability & WLAN_CAPABILITY_PRIVACY) + capab |= WLAN_CAPABILITY_PRIVACY; + + if ((wk->assoc.capability & WLAN_CAPABILITY_SPECTRUM_MGMT) && + (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT)) + capab |= WLAN_CAPABILITY_SPECTRUM_MGMT; + + mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); + memset(mgmt, 0, 24); + memcpy(mgmt->da, wk->filter_ta, ETH_ALEN); + memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); + memcpy(mgmt->bssid, wk->filter_ta, ETH_ALEN); + + if (!is_zero_ether_addr(wk->assoc.prev_bssid)) { + skb_put(skb, 10); + mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_REASSOC_REQ); + mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab); + mgmt->u.reassoc_req.listen_interval = + cpu_to_le16(local->hw.conf.listen_interval); + memcpy(mgmt->u.reassoc_req.current_ap, wk->assoc.prev_bssid, + ETH_ALEN); + } else { + skb_put(skb, 4); + mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ASSOC_REQ); + mgmt->u.assoc_req.capab_info = cpu_to_le16(capab); + mgmt->u.assoc_req.listen_interval = + cpu_to_le16(local->hw.conf.listen_interval); + } + + /* SSID */ + ies = pos = skb_put(skb, 2 + wk->assoc.ssid_len); + *pos++ = WLAN_EID_SSID; + *pos++ = wk->assoc.ssid_len; + memcpy(pos, wk->assoc.ssid, wk->assoc.ssid_len); + + /* add all rates which were marked to be used above */ + supp_rates_len = rates_len; + if (supp_rates_len > 8) + supp_rates_len = 8; + + len = sband->n_bitrates; + pos = skb_put(skb, supp_rates_len + 2); + *pos++ = WLAN_EID_SUPP_RATES; + *pos++ = supp_rates_len; + + count = 0; + for (i = 0; i < sband->n_bitrates; i++) { + if (BIT(i) & rates) { + int rate = sband->bitrates[i].bitrate; + *pos++ = (u8) (rate / 5); + if (++count == 8) + break; + } + } + + if (rates_len > count) { + pos = skb_put(skb, rates_len - count + 2); + *pos++ = WLAN_EID_EXT_SUPP_RATES; + *pos++ = rates_len - count; + + for (i++; i < sband->n_bitrates; i++) { + if (BIT(i) & rates) { + int rate = sband->bitrates[i].bitrate; + *pos++ = (u8) (rate / 5); + } + } + } + + if (capab & WLAN_CAPABILITY_SPECTRUM_MGMT) { + /* 1. power capabilities */ + pos = skb_put(skb, 4); + *pos++ = WLAN_EID_PWR_CAPABILITY; + *pos++ = 2; + *pos++ = 0; /* min tx power */ + *pos++ = wk->chan->max_power; /* max tx power */ + + /* 2. supported channels */ + /* TODO: get this in reg domain format */ + pos = skb_put(skb, 2 * sband->n_channels + 2); + *pos++ = WLAN_EID_SUPPORTED_CHANNELS; + *pos++ = 2 * sband->n_channels; + for (i = 0; i < sband->n_channels; i++) { + *pos++ = ieee80211_frequency_to_channel( + sband->channels[i].center_freq); + *pos++ = 1; /* one channel in the subband*/ + } + } + + /* if present, add any custom IEs that go before HT */ + if (wk->ie_len && wk->ie) { + static const u8 before_ht[] = { + WLAN_EID_SSID, + WLAN_EID_SUPP_RATES, + WLAN_EID_EXT_SUPP_RATES, + WLAN_EID_PWR_CAPABILITY, + WLAN_EID_SUPPORTED_CHANNELS, + WLAN_EID_RSN, + WLAN_EID_QOS_CAPA, + WLAN_EID_RRM_ENABLED_CAPABILITIES, + WLAN_EID_MOBILITY_DOMAIN, + WLAN_EID_SUPPORTED_REGULATORY_CLASSES, + }; + noffset = ieee80211_ie_split(wk->ie, wk->ie_len, + before_ht, ARRAY_SIZE(before_ht), + offset); + pos = skb_put(skb, noffset - offset); + memcpy(pos, wk->ie + offset, noffset - offset); + offset = noffset; + } + + if (wk->assoc.use_11n && wk->assoc.wmm_used && + local->hw.queues >= 4) + ieee80211_add_ht_ie(skb, wk->assoc.ht_information_ie, + sband, wk->chan, wk->assoc.smps); + + /* if present, add any custom non-vendor IEs that go after HT */ + if (wk->ie_len && wk->ie) { + noffset = ieee80211_ie_split_vendor(wk->ie, wk->ie_len, + offset); + pos = skb_put(skb, noffset - offset); + memcpy(pos, wk->ie + offset, noffset - offset); + offset = noffset; + } + + if (wk->assoc.wmm_used && local->hw.queues >= 4) { + if (wk->assoc.uapsd_used) { + qos_info = local->uapsd_queues; + qos_info |= (local->uapsd_max_sp_len << + IEEE80211_WMM_IE_STA_QOSINFO_SP_SHIFT); + } else { + qos_info = 0; + } + + pos = skb_put(skb, 9); + *pos++ = WLAN_EID_VENDOR_SPECIFIC; + *pos++ = 7; /* len */ + *pos++ = 0x00; /* Microsoft OUI 00:50:F2 */ + *pos++ = 0x50; + *pos++ = 0xf2; + *pos++ = 2; /* WME */ + *pos++ = 0; /* WME info */ + *pos++ = 1; /* WME ver */ + *pos++ = qos_info; + } + + /* add any remaining custom (i.e. vendor specific here) IEs */ + if (wk->ie_len && wk->ie) { + noffset = wk->ie_len; + pos = skb_put(skb, noffset - offset); + memcpy(pos, wk->ie + offset, noffset - offset); + } + + IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; + ieee80211_tx_skb(sdata, skb); +} + +static void ieee80211_remove_auth_bss(struct ieee80211_local *local, + struct ieee80211_work *wk) +{ + struct cfg80211_bss *cbss; + u16 capa_val = WLAN_CAPABILITY_ESS; + + if (wk->probe_auth.privacy) + capa_val |= WLAN_CAPABILITY_PRIVACY; + + cbss = cfg80211_get_bss(local->hw.wiphy, wk->chan, wk->filter_ta, + wk->probe_auth.ssid, wk->probe_auth.ssid_len, + WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_PRIVACY, + capa_val); + if (!cbss) + return; + + cfg80211_unlink_bss(local->hw.wiphy, cbss); + cfg80211_put_bss(cbss); +} + +static enum work_action __must_check +ieee80211_direct_probe(struct ieee80211_work *wk) +{ + struct ieee80211_sub_if_data *sdata = wk->sdata; + struct ieee80211_local *local = sdata->local; + + wk->probe_auth.tries++; + if (wk->probe_auth.tries > IEEE80211_AUTH_MAX_TRIES) { + printk(KERN_DEBUG "%s: direct probe to %pM timed out\n", + sdata->name, wk->filter_ta); + + /* + * Most likely AP is not in the range so remove the + * bss struct for that AP. + */ + ieee80211_remove_auth_bss(local, wk); + + return WORK_ACT_TIMEOUT; + } + + printk(KERN_DEBUG "%s: direct probe to %pM (try %d)\n", + sdata->name, wk->filter_ta, wk->probe_auth.tries); + + /* + * Direct probe is sent to broadcast address as some APs + * will not answer to direct packet in unassociated state. + */ + ieee80211_send_probe_req(sdata, NULL, wk->probe_auth.ssid, + wk->probe_auth.ssid_len, NULL, 0); + + wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; + run_again(local, wk->timeout); + + return WORK_ACT_NONE; +} + + +static enum work_action __must_check +ieee80211_authenticate(struct ieee80211_work *wk) +{ + struct ieee80211_sub_if_data *sdata = wk->sdata; + struct ieee80211_local *local = sdata->local; + + wk->probe_auth.tries++; + if (wk->probe_auth.tries > IEEE80211_AUTH_MAX_TRIES) { + printk(KERN_DEBUG "%s: authentication with %pM" + " timed out\n", sdata->name, wk->filter_ta); + + /* + * Most likely AP is not in the range so remove the + * bss struct for that AP. + */ + ieee80211_remove_auth_bss(local, wk); + + return WORK_ACT_TIMEOUT; + } + + printk(KERN_DEBUG "%s: authenticate with %pM (try %d)\n", + sdata->name, wk->filter_ta, wk->probe_auth.tries); + + ieee80211_send_auth(sdata, 1, wk->probe_auth.algorithm, wk->ie, + wk->ie_len, wk->filter_ta, NULL, 0, 0); + wk->probe_auth.transaction = 2; + + wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; + run_again(local, wk->timeout); + + return WORK_ACT_NONE; +} + +static enum work_action __must_check +ieee80211_associate(struct ieee80211_work *wk) +{ + struct ieee80211_sub_if_data *sdata = wk->sdata; + struct ieee80211_local *local = sdata->local; + + wk->assoc.tries++; + if (wk->assoc.tries > IEEE80211_ASSOC_MAX_TRIES) { + printk(KERN_DEBUG "%s: association with %pM" + " timed out\n", + sdata->name, wk->filter_ta); + + /* + * Most likely AP is not in the range so remove the + * bss struct for that AP. + */ + if (wk->assoc.bss) + cfg80211_unlink_bss(local->hw.wiphy, wk->assoc.bss); + + return WORK_ACT_TIMEOUT; + } + + printk(KERN_DEBUG "%s: associate with %pM (try %d)\n", + sdata->name, wk->filter_ta, wk->assoc.tries); + ieee80211_send_assoc(sdata, wk); + + wk->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT; + run_again(local, wk->timeout); + + return WORK_ACT_NONE; +} + +static enum work_action __must_check +ieee80211_remain_on_channel_timeout(struct ieee80211_work *wk) +{ + /* + * First time we run, do nothing -- the generic code will + * have switched to the right channel etc. + */ + if (!wk->started) { + wk->timeout = jiffies + msecs_to_jiffies(wk->remain.duration); + + cfg80211_ready_on_channel(wk->sdata->dev, (unsigned long) wk, + wk->chan, wk->chan_type, + wk->remain.duration, GFP_KERNEL); + + return WORK_ACT_NONE; + } + + return WORK_ACT_TIMEOUT; +} + +static void ieee80211_auth_challenge(struct ieee80211_work *wk, + struct ieee80211_mgmt *mgmt, + size_t len) +{ + struct ieee80211_sub_if_data *sdata = wk->sdata; + u8 *pos; + struct ieee802_11_elems elems; + + pos = mgmt->u.auth.variable; + ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems); + if (!elems.challenge) + return; + ieee80211_send_auth(sdata, 3, wk->probe_auth.algorithm, + elems.challenge - 2, elems.challenge_len + 2, + wk->filter_ta, wk->probe_auth.key, + wk->probe_auth.key_len, wk->probe_auth.key_idx); + wk->probe_auth.transaction = 4; +} + +static enum work_action __must_check +ieee80211_rx_mgmt_auth(struct ieee80211_work *wk, + struct ieee80211_mgmt *mgmt, size_t len) +{ + u16 auth_alg, auth_transaction, status_code; + + if (wk->type != IEEE80211_WORK_AUTH) + return WORK_ACT_NONE; + + if (len < 24 + 6) + return WORK_ACT_NONE; + + auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); + auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); + status_code = le16_to_cpu(mgmt->u.auth.status_code); + + if (auth_alg != wk->probe_auth.algorithm || + auth_transaction != wk->probe_auth.transaction) + return WORK_ACT_NONE; + + if (status_code != WLAN_STATUS_SUCCESS) { + printk(KERN_DEBUG "%s: %pM denied authentication (status %d)\n", + wk->sdata->name, mgmt->sa, status_code); + return WORK_ACT_DONE; + } + + switch (wk->probe_auth.algorithm) { + case WLAN_AUTH_OPEN: + case WLAN_AUTH_LEAP: + case WLAN_AUTH_FT: + break; + case WLAN_AUTH_SHARED_KEY: + if (wk->probe_auth.transaction != 4) { + ieee80211_auth_challenge(wk, mgmt, len); + /* need another frame */ + return WORK_ACT_NONE; + } + break; + default: + WARN_ON(1); + return WORK_ACT_NONE; + } + + printk(KERN_DEBUG "%s: authenticated\n", wk->sdata->name); + return WORK_ACT_DONE; +} + +static enum work_action __must_check +ieee80211_rx_mgmt_assoc_resp(struct ieee80211_work *wk, + struct ieee80211_mgmt *mgmt, size_t len, + bool reassoc) +{ + struct ieee80211_sub_if_data *sdata = wk->sdata; + struct ieee80211_local *local = sdata->local; + u16 capab_info, status_code, aid; + struct ieee802_11_elems elems; + u8 *pos; + + /* + * AssocResp and ReassocResp have identical structure, so process both + * of them in this function. + */ + + if (len < 24 + 6) + return WORK_ACT_NONE; + + capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info); + status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code); + aid = le16_to_cpu(mgmt->u.assoc_resp.aid); + + printk(KERN_DEBUG "%s: RX %sssocResp from %pM (capab=0x%x " + "status=%d aid=%d)\n", + sdata->name, reassoc ? "Rea" : "A", mgmt->sa, + capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14)))); + + pos = mgmt->u.assoc_resp.variable; + ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems); + + if (status_code == WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY && + elems.timeout_int && elems.timeout_int_len == 5 && + elems.timeout_int[0] == WLAN_TIMEOUT_ASSOC_COMEBACK) { + u32 tu, ms; + tu = get_unaligned_le32(elems.timeout_int + 1); + ms = tu * 1024 / 1000; + printk(KERN_DEBUG "%s: %pM rejected association temporarily; " + "comeback duration %u TU (%u ms)\n", + sdata->name, mgmt->sa, tu, ms); + wk->timeout = jiffies + msecs_to_jiffies(ms); + if (ms > IEEE80211_ASSOC_TIMEOUT) + run_again(local, wk->timeout); + return WORK_ACT_NONE; + } + + if (status_code != WLAN_STATUS_SUCCESS) + printk(KERN_DEBUG "%s: %pM denied association (code=%d)\n", + sdata->name, mgmt->sa, status_code); + else + printk(KERN_DEBUG "%s: associated\n", sdata->name); + + return WORK_ACT_DONE; +} + +static enum work_action __must_check +ieee80211_rx_mgmt_probe_resp(struct ieee80211_work *wk, + struct ieee80211_mgmt *mgmt, size_t len, + struct ieee80211_rx_status *rx_status) +{ + struct ieee80211_sub_if_data *sdata = wk->sdata; + struct ieee80211_local *local = sdata->local; + size_t baselen; + + ASSERT_WORK_MTX(local); + + baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; + if (baselen > len) + return WORK_ACT_NONE; + + printk(KERN_DEBUG "%s: direct probe responded\n", sdata->name); + return WORK_ACT_DONE; +} + +static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local, + struct sk_buff *skb) +{ + struct ieee80211_rx_status *rx_status; + struct ieee80211_mgmt *mgmt; + struct ieee80211_work *wk; + enum work_action rma = WORK_ACT_NONE; + u16 fc; + + rx_status = (struct ieee80211_rx_status *) skb->cb; + mgmt = (struct ieee80211_mgmt *) skb->data; + fc = le16_to_cpu(mgmt->frame_control); + + mutex_lock(&local->work_mtx); + + list_for_each_entry(wk, &local->work_list, list) { + const u8 *bssid = NULL; + + switch (wk->type) { + case IEEE80211_WORK_DIRECT_PROBE: + case IEEE80211_WORK_AUTH: + case IEEE80211_WORK_ASSOC: + bssid = wk->filter_ta; + break; + default: + continue; + } + + /* + * Before queuing, we already verified mgmt->sa, + * so this is needed just for matching. + */ + if (compare_ether_addr(bssid, mgmt->bssid)) + continue; + + switch (fc & IEEE80211_FCTL_STYPE) { + case IEEE80211_STYPE_PROBE_RESP: + rma = ieee80211_rx_mgmt_probe_resp(wk, mgmt, skb->len, + rx_status); + break; + case IEEE80211_STYPE_AUTH: + rma = ieee80211_rx_mgmt_auth(wk, mgmt, skb->len); + break; + case IEEE80211_STYPE_ASSOC_RESP: + rma = ieee80211_rx_mgmt_assoc_resp(wk, mgmt, + skb->len, false); + break; + case IEEE80211_STYPE_REASSOC_RESP: + rma = ieee80211_rx_mgmt_assoc_resp(wk, mgmt, + skb->len, true); + break; + default: + WARN_ON(1); + } + /* + * We've processed this frame for that work, so it can't + * belong to another work struct. + * NB: this is also required for correctness for 'rma'! + */ + break; + } + + switch (rma) { + case WORK_ACT_NONE: + break; + case WORK_ACT_DONE: + list_del_rcu(&wk->list); + break; + default: + WARN(1, "unexpected: %d", rma); + } + + mutex_unlock(&local->work_mtx); + + if (rma != WORK_ACT_DONE) + goto out; + + switch (wk->done(wk, skb)) { + case WORK_DONE_DESTROY: + free_work(wk); + break; + case WORK_DONE_REQUEUE: + synchronize_rcu(); + wk->started = false; /* restart */ + mutex_lock(&local->work_mtx); + list_add_tail(&wk->list, &local->work_list); + mutex_unlock(&local->work_mtx); + } + + out: + kfree_skb(skb); +} + +static void ieee80211_work_timer(unsigned long data) +{ + struct ieee80211_local *local = (void *) data; + + if (local->quiescing) + return; + + ieee80211_queue_work(&local->hw, &local->work_work); +} + +static void ieee80211_work_work(struct work_struct *work) +{ + struct ieee80211_local *local = + container_of(work, struct ieee80211_local, work_work); + struct sk_buff *skb; + struct ieee80211_work *wk, *tmp; + LIST_HEAD(free_work); + enum work_action rma; + bool remain_off_channel = false; + + if (local->scanning) + return; + + /* + * ieee80211_queue_work() should have picked up most cases, + * here we'll pick the the rest. + */ + if (WARN(local->suspended, "work scheduled while going to suspend\n")) + return; + + /* first process frames to avoid timing out while a frame is pending */ + while ((skb = skb_dequeue(&local->work_skb_queue))) + ieee80211_work_rx_queued_mgmt(local, skb); + + ieee80211_recalc_idle(local); + + mutex_lock(&local->work_mtx); + + list_for_each_entry_safe(wk, tmp, &local->work_list, list) { + bool started = wk->started; + + /* mark work as started if it's on the current off-channel */ + if (!started && local->tmp_channel && + wk->chan == local->tmp_channel && + wk->chan_type == local->tmp_channel_type) { + started = true; + wk->timeout = jiffies; + } + + if (!started && !local->tmp_channel) { + /* + * TODO: could optimize this by leaving the + * station vifs in awake mode if they + * happen to be on the same channel as + * the requested channel + */ + ieee80211_offchannel_stop_beaconing(local); + ieee80211_offchannel_stop_station(local); + + local->tmp_channel = wk->chan; + local->tmp_channel_type = wk->chan_type; + ieee80211_hw_config(local, 0); + started = true; + wk->timeout = jiffies; + } + + /* don't try to work with items that aren't started */ + if (!started) + continue; + + if (time_is_after_jiffies(wk->timeout)) { + /* + * This work item isn't supposed to be worked on + * right now, but take care to adjust the timer + * properly. + */ + run_again(local, wk->timeout); + continue; + } + + switch (wk->type) { + default: + WARN_ON(1); + /* nothing */ + rma = WORK_ACT_NONE; + break; + case IEEE80211_WORK_ABORT: + rma = WORK_ACT_TIMEOUT; + break; + case IEEE80211_WORK_DIRECT_PROBE: + rma = ieee80211_direct_probe(wk); + break; + case IEEE80211_WORK_AUTH: + rma = ieee80211_authenticate(wk); + break; + case IEEE80211_WORK_ASSOC: + rma = ieee80211_associate(wk); + break; + case IEEE80211_WORK_REMAIN_ON_CHANNEL: + rma = ieee80211_remain_on_channel_timeout(wk); + break; + } + + wk->started = started; + + switch (rma) { + case WORK_ACT_NONE: + /* might have changed the timeout */ + run_again(local, wk->timeout); + break; + case WORK_ACT_TIMEOUT: + list_del_rcu(&wk->list); + synchronize_rcu(); + list_add(&wk->list, &free_work); + break; + default: + WARN(1, "unexpected: %d", rma); + } + } + + list_for_each_entry(wk, &local->work_list, list) { + if (!wk->started) + continue; + if (wk->chan != local->tmp_channel) + continue; + if (wk->chan_type != local->tmp_channel_type) + continue; + remain_off_channel = true; + } + + if (!remain_off_channel && local->tmp_channel) { + local->tmp_channel = NULL; + ieee80211_hw_config(local, 0); + ieee80211_offchannel_return(local, true); + /* give connection some time to breathe */ + run_again(local, jiffies + HZ/2); + } + + if (list_empty(&local->work_list) && local->scan_req) + ieee80211_queue_delayed_work(&local->hw, + &local->scan_work, + round_jiffies_relative(0)); + + mutex_unlock(&local->work_mtx); + + ieee80211_recalc_idle(local); + + list_for_each_entry_safe(wk, tmp, &free_work, list) { + wk->done(wk, NULL); + list_del(&wk->list); + kfree(wk); + } +} + +void ieee80211_add_work(struct ieee80211_work *wk) +{ + struct ieee80211_local *local; + + if (WARN_ON(!wk->chan)) + return; + + if (WARN_ON(!wk->sdata)) + return; + + if (WARN_ON(!wk->done)) + return; + + if (WARN_ON(!ieee80211_sdata_running(wk->sdata))) + return; + + wk->started = false; + + local = wk->sdata->local; + mutex_lock(&local->work_mtx); + list_add_tail(&wk->list, &local->work_list); + mutex_unlock(&local->work_mtx); + + ieee80211_queue_work(&local->hw, &local->work_work); +} + +void ieee80211_work_init(struct ieee80211_local *local) +{ + mutex_init(&local->work_mtx); + INIT_LIST_HEAD(&local->work_list); + setup_timer(&local->work_timer, ieee80211_work_timer, + (unsigned long)local); + INIT_WORK(&local->work_work, ieee80211_work_work); + skb_queue_head_init(&local->work_skb_queue); +} + +void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_work *wk; + + mutex_lock(&local->work_mtx); + list_for_each_entry(wk, &local->work_list, list) { + if (wk->sdata != sdata) + continue; + wk->type = IEEE80211_WORK_ABORT; + wk->started = true; + wk->timeout = jiffies; + } + mutex_unlock(&local->work_mtx); + + /* run cleanups etc. */ + ieee80211_work_work(&local->work_work); + + mutex_lock(&local->work_mtx); + list_for_each_entry(wk, &local->work_list, list) { + if (wk->sdata != sdata) + continue; + WARN_ON(1); + break; + } + mutex_unlock(&local->work_mtx); +} + +ieee80211_rx_result ieee80211_work_rx_mgmt(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_mgmt *mgmt; + struct ieee80211_work *wk; + u16 fc; + + if (skb->len < 24) + return RX_DROP_MONITOR; + + mgmt = (struct ieee80211_mgmt *) skb->data; + fc = le16_to_cpu(mgmt->frame_control); + + list_for_each_entry_rcu(wk, &local->work_list, list) { + if (sdata != wk->sdata) + continue; + if (compare_ether_addr(wk->filter_ta, mgmt->sa)) + continue; + if (compare_ether_addr(wk->filter_ta, mgmt->bssid)) + continue; + + switch (fc & IEEE80211_FCTL_STYPE) { + case IEEE80211_STYPE_AUTH: + case IEEE80211_STYPE_PROBE_RESP: + case IEEE80211_STYPE_ASSOC_RESP: + case IEEE80211_STYPE_REASSOC_RESP: + skb_queue_tail(&local->work_skb_queue, skb); + ieee80211_queue_work(&local->hw, &local->work_work); + return RX_QUEUED; + } + } + + return RX_CONTINUE; +} + +static enum work_done_result ieee80211_remain_done(struct ieee80211_work *wk, + struct sk_buff *skb) +{ + /* + * We are done serving the remain-on-channel command. + */ + cfg80211_remain_on_channel_expired(wk->sdata->dev, (unsigned long) wk, + wk->chan, wk->chan_type, + GFP_KERNEL); + + return WORK_DONE_DESTROY; +} + +int ieee80211_wk_remain_on_channel(struct ieee80211_sub_if_data *sdata, + struct ieee80211_channel *chan, + enum nl80211_channel_type channel_type, + unsigned int duration, u64 *cookie) +{ + struct ieee80211_work *wk; + + wk = kzalloc(sizeof(*wk), GFP_KERNEL); + if (!wk) + return -ENOMEM; + + wk->type = IEEE80211_WORK_REMAIN_ON_CHANNEL; + wk->chan = chan; + wk->chan_type = channel_type; + wk->sdata = sdata; + wk->done = ieee80211_remain_done; + + wk->remain.duration = duration; + + *cookie = (unsigned long) wk; + + ieee80211_add_work(wk); + + return 0; +} + +int ieee80211_wk_cancel_remain_on_channel(struct ieee80211_sub_if_data *sdata, + u64 cookie) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_work *wk, *tmp; + bool found = false; + + mutex_lock(&local->work_mtx); + list_for_each_entry_safe(wk, tmp, &local->work_list, list) { + if ((unsigned long) wk == cookie) { + wk->timeout = jiffies; + found = true; + break; + } + } + mutex_unlock(&local->work_mtx); + + if (!found) + return -ENOENT; + + ieee80211_queue_work(&local->hw, &local->work_work); + + return 0; +} diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c new file mode 100644 index 0000000..f4971cd --- /dev/null +++ b/net/mac80211/wpa.c @@ -0,0 +1,602 @@ +/* + * Copyright 2002-2004, Instant802 Networks, Inc. + * Copyright 2008, Jouni Malinen + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ieee80211_i.h" +#include "michael.h" +#include "tkip.h" +#include "aes_ccm.h" +#include "aes_cmac.h" +#include "wpa.h" + +ieee80211_tx_result +ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx) +{ + u8 *data, *key, *mic, key_offset; + size_t data_len; + unsigned int hdrlen; + struct ieee80211_hdr *hdr; + struct sk_buff *skb = tx->skb; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + int authenticator; + int tail; + + hdr = (struct ieee80211_hdr *)skb->data; + if (!tx->key || tx->key->conf.alg != ALG_TKIP || skb->len < 24 || + !ieee80211_is_data_present(hdr->frame_control)) + return TX_CONTINUE; + + hdrlen = ieee80211_hdrlen(hdr->frame_control); + if (skb->len < hdrlen) + return TX_DROP; + + data = skb->data + hdrlen; + data_len = skb->len - hdrlen; + + if (info->control.hw_key && + !(tx->flags & IEEE80211_TX_FRAGMENTED) && + !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) { + /* hwaccel - with no need for SW-generated MMIC */ + return TX_CONTINUE; + } + + tail = MICHAEL_MIC_LEN; + if (!info->control.hw_key) + tail += TKIP_ICV_LEN; + + if (WARN_ON(skb_tailroom(skb) < tail || + skb_headroom(skb) < TKIP_IV_LEN)) + return TX_DROP; + +#if 0 + authenticator = fc & IEEE80211_FCTL_FROMDS; /* FIX */ +#else + authenticator = 1; +#endif + key_offset = authenticator ? + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY : + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY; + key = &tx->key->conf.key[key_offset]; + mic = skb_put(skb, MICHAEL_MIC_LEN); + michael_mic(key, hdr, data, data_len, mic); + + return TX_CONTINUE; +} + + +ieee80211_rx_result +ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx) +{ + u8 *data, *key = NULL, key_offset; + size_t data_len; + unsigned int hdrlen; + u8 mic[MICHAEL_MIC_LEN]; + struct sk_buff *skb = rx->skb; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + int authenticator = 1, wpa_test = 0; + + /* No way to verify the MIC if the hardware stripped it */ + if (status->flag & RX_FLAG_MMIC_STRIPPED) + return RX_CONTINUE; + + if (!rx->key || rx->key->conf.alg != ALG_TKIP || + !ieee80211_has_protected(hdr->frame_control) || + !ieee80211_is_data_present(hdr->frame_control)) + return RX_CONTINUE; + + hdrlen = ieee80211_hdrlen(hdr->frame_control); + if (skb->len < hdrlen + MICHAEL_MIC_LEN) + return RX_DROP_UNUSABLE; + + data = skb->data + hdrlen; + data_len = skb->len - hdrlen - MICHAEL_MIC_LEN; + +#if 0 + authenticator = fc & IEEE80211_FCTL_TODS; /* FIX */ +#else + authenticator = 1; +#endif + key_offset = authenticator ? + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY : + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY; + key = &rx->key->conf.key[key_offset]; + michael_mic(key, hdr, data, data_len, mic); + if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0 || wpa_test) { + if (!(rx->flags & IEEE80211_RX_RA_MATCH)) + return RX_DROP_UNUSABLE; + + mac80211_ev_michael_mic_failure(rx->sdata, rx->key->conf.keyidx, + (void *) skb->data, NULL, + GFP_ATOMIC); + return RX_DROP_UNUSABLE; + } + + /* remove Michael MIC from payload */ + skb_trim(skb, skb->len - MICHAEL_MIC_LEN); + + /* update IV in key information to be able to detect replays */ + rx->key->u.tkip.rx[rx->queue].iv32 = rx->tkip_iv32; + rx->key->u.tkip.rx[rx->queue].iv16 = rx->tkip_iv16; + + return RX_CONTINUE; +} + + +static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct ieee80211_key *key = tx->key; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + unsigned int hdrlen; + int len, tail; + u8 *pos; + + if (info->control.hw_key && + !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV)) { + /* hwaccel - with no need for software-generated IV */ + return 0; + } + + hdrlen = ieee80211_hdrlen(hdr->frame_control); + len = skb->len - hdrlen; + + if (info->control.hw_key) + tail = 0; + else + tail = TKIP_ICV_LEN; + + if (WARN_ON(skb_tailroom(skb) < tail || + skb_headroom(skb) < TKIP_IV_LEN)) + return -1; + + pos = skb_push(skb, TKIP_IV_LEN); + memmove(pos, pos + TKIP_IV_LEN, hdrlen); + pos += hdrlen; + + /* Increase IV for the frame */ + key->u.tkip.tx.iv16++; + if (key->u.tkip.tx.iv16 == 0) + key->u.tkip.tx.iv32++; + + pos = ieee80211_tkip_add_iv(pos, key, key->u.tkip.tx.iv16); + + /* hwaccel - with software IV */ + if (info->control.hw_key) + return 0; + + /* Add room for ICV */ + skb_put(skb, TKIP_ICV_LEN); + + hdr = (struct ieee80211_hdr *) skb->data; + ieee80211_tkip_encrypt_data(tx->local->wep_tx_tfm, + key, pos, len, hdr->addr2); + return 0; +} + + +ieee80211_tx_result +ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx) +{ + struct sk_buff *skb = tx->skb; + + ieee80211_tx_set_protected(tx); + + do { + if (tkip_encrypt_skb(tx, skb) < 0) + return TX_DROP; + } while ((skb = skb->next)); + + return TX_CONTINUE; +} + + +ieee80211_rx_result +ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; + int hdrlen, res, hwaccel = 0, wpa_test = 0; + struct ieee80211_key *key = rx->key; + struct sk_buff *skb = rx->skb; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + + hdrlen = ieee80211_hdrlen(hdr->frame_control); + + if (!ieee80211_is_data(hdr->frame_control)) + return RX_CONTINUE; + + if (!rx->sta || skb->len - hdrlen < 12) + return RX_DROP_UNUSABLE; + + if (status->flag & RX_FLAG_DECRYPTED) { + if (status->flag & RX_FLAG_IV_STRIPPED) { + /* + * Hardware took care of all processing, including + * replay protection, and stripped the ICV/IV so + * we cannot do any checks here. + */ + return RX_CONTINUE; + } + + /* let TKIP code verify IV, but skip decryption */ + hwaccel = 1; + } + + res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm, + key, skb->data + hdrlen, + skb->len - hdrlen, rx->sta->sta.addr, + hdr->addr1, hwaccel, rx->queue, + &rx->tkip_iv32, + &rx->tkip_iv16); + if (res != TKIP_DECRYPT_OK || wpa_test) + return RX_DROP_UNUSABLE; + + /* Trim ICV */ + skb_trim(skb, skb->len - TKIP_ICV_LEN); + + /* Remove IV */ + memmove(skb->data + TKIP_IV_LEN, skb->data, hdrlen); + skb_pull(skb, TKIP_IV_LEN); + + return RX_CONTINUE; +} + + +static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *scratch, + int encrypted) +{ + __le16 mask_fc; + int a4_included, mgmt; + u8 qos_tid; + u8 *b_0, *aad; + u16 data_len, len_a; + unsigned int hdrlen; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + + b_0 = scratch + 3 * AES_BLOCK_LEN; + aad = scratch + 4 * AES_BLOCK_LEN; + + /* + * Mask FC: zero subtype b4 b5 b6 (if not mgmt) + * Retry, PwrMgt, MoreData; set Protected + */ + mgmt = ieee80211_is_mgmt(hdr->frame_control); + mask_fc = hdr->frame_control; + mask_fc &= ~cpu_to_le16(IEEE80211_FCTL_RETRY | + IEEE80211_FCTL_PM | IEEE80211_FCTL_MOREDATA); + if (!mgmt) + mask_fc &= ~cpu_to_le16(0x0070); + mask_fc |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); + + hdrlen = ieee80211_hdrlen(hdr->frame_control); + len_a = hdrlen - 2; + a4_included = ieee80211_has_a4(hdr->frame_control); + + if (ieee80211_is_data_qos(hdr->frame_control)) + qos_tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; + else + qos_tid = 0; + + data_len = skb->len - hdrlen - CCMP_HDR_LEN; + if (encrypted) + data_len -= CCMP_MIC_LEN; + + /* First block, b_0 */ + b_0[0] = 0x59; /* flags: Adata: 1, M: 011, L: 001 */ + /* Nonce: Nonce Flags | A2 | PN + * Nonce Flags: Priority (b0..b3) | Management (b4) | Reserved (b5..b7) + */ + b_0[1] = qos_tid | (mgmt << 4); + memcpy(&b_0[2], hdr->addr2, ETH_ALEN); + memcpy(&b_0[8], pn, CCMP_PN_LEN); + /* l(m) */ + put_unaligned_be16(data_len, &b_0[14]); + + /* AAD (extra authenticate-only data) / masked 802.11 header + * FC | A1 | A2 | A3 | SC | [A4] | [QC] */ + put_unaligned_be16(len_a, &aad[0]); + put_unaligned(mask_fc, (__le16 *)&aad[2]); + memcpy(&aad[4], &hdr->addr1, 3 * ETH_ALEN); + + /* Mask Seq#, leave Frag# */ + aad[22] = *((u8 *) &hdr->seq_ctrl) & 0x0f; + aad[23] = 0; + + if (a4_included) { + memcpy(&aad[24], hdr->addr4, ETH_ALEN); + aad[30] = qos_tid; + aad[31] = 0; + } else { + memset(&aad[24], 0, ETH_ALEN + IEEE80211_QOS_CTL_LEN); + aad[24] = qos_tid; + } +} + + +static inline void ccmp_pn2hdr(u8 *hdr, u8 *pn, int key_id) +{ + hdr[0] = pn[5]; + hdr[1] = pn[4]; + hdr[2] = 0; + hdr[3] = 0x20 | (key_id << 6); + hdr[4] = pn[3]; + hdr[5] = pn[2]; + hdr[6] = pn[1]; + hdr[7] = pn[0]; +} + + +static inline void ccmp_hdr2pn(u8 *pn, u8 *hdr) +{ + pn[0] = hdr[7]; + pn[1] = hdr[6]; + pn[2] = hdr[5]; + pn[3] = hdr[4]; + pn[4] = hdr[1]; + pn[5] = hdr[0]; +} + + +static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct ieee80211_key *key = tx->key; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + int hdrlen, len, tail; + u8 *pos, *pn; + int i; + + if (info->control.hw_key && + !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV)) { + /* + * hwaccel has no need for preallocated room for CCMP + * header or MIC fields + */ + return 0; + } + + hdrlen = ieee80211_hdrlen(hdr->frame_control); + len = skb->len - hdrlen; + + if (info->control.hw_key) + tail = 0; + else + tail = CCMP_MIC_LEN; + + if (WARN_ON(skb_tailroom(skb) < tail || + skb_headroom(skb) < CCMP_HDR_LEN)) + return -1; + + pos = skb_push(skb, CCMP_HDR_LEN); + memmove(pos, pos + CCMP_HDR_LEN, hdrlen); + hdr = (struct ieee80211_hdr *) pos; + pos += hdrlen; + + /* PN = PN + 1 */ + pn = key->u.ccmp.tx_pn; + + for (i = CCMP_PN_LEN - 1; i >= 0; i--) { + pn[i]++; + if (pn[i]) + break; + } + + ccmp_pn2hdr(pos, pn, key->conf.keyidx); + + /* hwaccel - with software CCMP header */ + if (info->control.hw_key) + return 0; + + pos += CCMP_HDR_LEN; + ccmp_special_blocks(skb, pn, key->u.ccmp.tx_crypto_buf, 0); + ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, key->u.ccmp.tx_crypto_buf, pos, len, + pos, skb_put(skb, CCMP_MIC_LEN)); + + return 0; +} + + +ieee80211_tx_result +ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx) +{ + struct sk_buff *skb = tx->skb; + + ieee80211_tx_set_protected(tx); + + do { + if (ccmp_encrypt_skb(tx, skb) < 0) + return TX_DROP; + } while ((skb = skb->next)); + + return TX_CONTINUE; +} + + +ieee80211_rx_result +ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; + int hdrlen; + struct ieee80211_key *key = rx->key; + struct sk_buff *skb = rx->skb; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + u8 pn[CCMP_PN_LEN]; + int data_len; + + hdrlen = ieee80211_hdrlen(hdr->frame_control); + + if (!ieee80211_is_data(hdr->frame_control) && + !ieee80211_is_robust_mgmt_frame(hdr)) + return RX_CONTINUE; + + data_len = skb->len - hdrlen - CCMP_HDR_LEN - CCMP_MIC_LEN; + if (!rx->sta || data_len < 0) + return RX_DROP_UNUSABLE; + + if ((status->flag & RX_FLAG_DECRYPTED) && + (status->flag & RX_FLAG_IV_STRIPPED)) + return RX_CONTINUE; + + ccmp_hdr2pn(pn, skb->data + hdrlen); + + if (memcmp(pn, key->u.ccmp.rx_pn[rx->queue], CCMP_PN_LEN) <= 0) { + key->u.ccmp.replays++; + return RX_DROP_UNUSABLE; + } + + if (!(status->flag & RX_FLAG_DECRYPTED)) { + /* hardware didn't decrypt/verify MIC */ + ccmp_special_blocks(skb, pn, key->u.ccmp.rx_crypto_buf, 1); + + if (ieee80211_aes_ccm_decrypt( + key->u.ccmp.tfm, key->u.ccmp.rx_crypto_buf, + skb->data + hdrlen + CCMP_HDR_LEN, data_len, + skb->data + skb->len - CCMP_MIC_LEN, + skb->data + hdrlen + CCMP_HDR_LEN)) + return RX_DROP_UNUSABLE; + } + + memcpy(key->u.ccmp.rx_pn[rx->queue], pn, CCMP_PN_LEN); + + /* Remove CCMP header and MIC */ + skb_trim(skb, skb->len - CCMP_MIC_LEN); + memmove(skb->data + CCMP_HDR_LEN, skb->data, hdrlen); + skb_pull(skb, CCMP_HDR_LEN); + + return RX_CONTINUE; +} + + +static void bip_aad(struct sk_buff *skb, u8 *aad) +{ + /* BIP AAD: FC(masked) || A1 || A2 || A3 */ + + /* FC type/subtype */ + aad[0] = skb->data[0]; + /* Mask FC Retry, PwrMgt, MoreData flags to zero */ + aad[1] = skb->data[1] & ~(BIT(4) | BIT(5) | BIT(6)); + /* A1 || A2 || A3 */ + memcpy(aad + 2, skb->data + 4, 3 * ETH_ALEN); +} + + +static inline void bip_ipn_swap(u8 *d, const u8 *s) +{ + *d++ = s[5]; + *d++ = s[4]; + *d++ = s[3]; + *d++ = s[2]; + *d++ = s[1]; + *d = s[0]; +} + + +ieee80211_tx_result +ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx) +{ + struct sk_buff *skb = tx->skb; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_key *key = tx->key; + struct ieee80211_mmie *mmie; + u8 *pn, aad[20]; + int i; + + if (info->control.hw_key) + return 0; + + if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie))) + return TX_DROP; + + mmie = (struct ieee80211_mmie *) skb_put(skb, sizeof(*mmie)); + mmie->element_id = WLAN_EID_MMIE; + mmie->length = sizeof(*mmie) - 2; + mmie->key_id = cpu_to_le16(key->conf.keyidx); + + /* PN = PN + 1 */ + pn = key->u.aes_cmac.tx_pn; + + for (i = sizeof(key->u.aes_cmac.tx_pn) - 1; i >= 0; i--) { + pn[i]++; + if (pn[i]) + break; + } + bip_ipn_swap(mmie->sequence_number, pn); + + bip_aad(skb, aad); + + /* + * MIC = AES-128-CMAC(IGTK, AAD || Management Frame Body || MMIE, 64) + */ + ieee80211_aes_cmac(key->u.aes_cmac.tfm, key->u.aes_cmac.tx_crypto_buf, + aad, skb->data + 24, skb->len - 24, mmie->mic); + + return TX_CONTINUE; +} + + +ieee80211_rx_result +ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx) +{ + struct sk_buff *skb = rx->skb; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_key *key = rx->key; + struct ieee80211_mmie *mmie; + u8 aad[20], mic[8], ipn[6]; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + + if (!ieee80211_is_mgmt(hdr->frame_control)) + return RX_CONTINUE; + + if ((status->flag & RX_FLAG_DECRYPTED) && + (status->flag & RX_FLAG_IV_STRIPPED)) + return RX_CONTINUE; + + if (skb->len < 24 + sizeof(*mmie)) + return RX_DROP_UNUSABLE; + + mmie = (struct ieee80211_mmie *) + (skb->data + skb->len - sizeof(*mmie)); + if (mmie->element_id != WLAN_EID_MMIE || + mmie->length != sizeof(*mmie) - 2) + return RX_DROP_UNUSABLE; /* Invalid MMIE */ + + bip_ipn_swap(ipn, mmie->sequence_number); + + if (memcmp(ipn, key->u.aes_cmac.rx_pn, 6) <= 0) { + key->u.aes_cmac.replays++; + return RX_DROP_UNUSABLE; + } + + if (!(status->flag & RX_FLAG_DECRYPTED)) { + /* hardware didn't decrypt/verify MIC */ + bip_aad(skb, aad); + ieee80211_aes_cmac(key->u.aes_cmac.tfm, + key->u.aes_cmac.rx_crypto_buf, aad, + skb->data + 24, skb->len - 24, mic); + if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) { + key->u.aes_cmac.icverrors++; + return RX_DROP_UNUSABLE; + } + } + + memcpy(key->u.aes_cmac.rx_pn, ipn, 6); + + /* Remove MMIE */ + skb_trim(skb, skb->len - sizeof(*mmie)); + + return RX_CONTINUE; +} diff --git a/net/mac80211/wpa.h b/net/mac80211/wpa.h new file mode 100644 index 0000000..baba060 --- /dev/null +++ b/net/mac80211/wpa.h @@ -0,0 +1,36 @@ +/* + * Copyright 2002-2004, Instant802 Networks, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef WPA_H +#define WPA_H + +#include +#include +#include "ieee80211_i.h" + +ieee80211_tx_result +ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx); +ieee80211_rx_result +ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx); + +ieee80211_tx_result +ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx); +ieee80211_rx_result +ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx); + +ieee80211_tx_result +ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx); +ieee80211_rx_result +ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx); + +ieee80211_tx_result +ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx); +ieee80211_rx_result +ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx); + +#endif /* WPA_H */ diff --git a/net/rfkill/Makefile b/net/rfkill/Makefile new file mode 100644 index 0000000..dc32c41 --- /dev/null +++ b/net/rfkill/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for the RF switch subsystem. +# + +rfkill_backport-y += core.o +rfkill_backport-$(CONFIG_RFKILL_BACKPORT_INPUT) += input.o +obj-$(CONFIG_RFKILL_BACKPORT) += rfkill_backport.o diff --git a/net/rfkill/core.c b/net/rfkill/core.c new file mode 100644 index 0000000..15d366a --- /dev/null +++ b/net/rfkill/core.c @@ -0,0 +1,1252 @@ +/* + * Copyright (C) 2006 - 2007 Ivo van Doorn + * Copyright (C) 2007 Dmitry Torokhov + * Copyright 2009 Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the + * Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rfkill.h" + +#define POLL_INTERVAL (5 * HZ) + +#define RFKILL_BLOCK_HW BIT(0) +#define RFKILL_BLOCK_SW BIT(1) +#define RFKILL_BLOCK_SW_PREV BIT(2) +#define RFKILL_BLOCK_ANY (RFKILL_BLOCK_HW |\ + RFKILL_BLOCK_SW |\ + RFKILL_BLOCK_SW_PREV) +#define RFKILL_BLOCK_SW_SETCALL BIT(31) + +struct rfkill { + spinlock_t lock; + + const char *name; + enum rfkill_type type; + + unsigned long state; + + u32 idx; + + bool registered; + bool persistent; + + const struct rfkill_ops *ops; + void *data; + +#ifdef CONFIG_RFKILL_BACKPORT_LEDS + struct led_trigger led_trigger; + const char *ledtrigname; +#endif + + struct device dev; + struct list_head node; + + struct delayed_work poll_work; + struct work_struct uevent_work; + struct work_struct sync_work; +}; +#define to_rfkill(d) container_of(d, struct rfkill, dev) + +struct rfkill_int_event { + struct list_head list; + struct rfkill_event ev; +}; + +struct rfkill_data { + struct list_head list; + struct list_head events; + struct mutex mtx; + wait_queue_head_t read_wait; + bool input_handler; +}; + + +MODULE_AUTHOR("Ivo van Doorn "); +MODULE_AUTHOR("Johannes Berg "); +MODULE_DESCRIPTION("RF switch support"); +MODULE_LICENSE("GPL"); + + +/* + * The locking here should be made much smarter, we currently have + * a bit of a stupid situation because drivers might want to register + * the rfkill struct under their own lock, and take this lock during + * rfkill method calls -- which will cause an AB-BA deadlock situation. + * + * To fix that, we need to rework this code here to be mostly lock-free + * and only use the mutex for list manipulations, not to protect the + * various other global variables. Then we can avoid holding the mutex + * around driver operations, and all is happy. + */ +static LIST_HEAD(rfkill_list); /* list of registered rf switches */ +static DEFINE_MUTEX(rfkill_global_mutex); +static LIST_HEAD(rfkill_fds); /* list of open fds of /dev/rfkill */ + +static unsigned int rfkill_default_state = 1; +module_param_named(default_state, rfkill_default_state, uint, 0444); +MODULE_PARM_DESC(default_state, + "Default initial state for all radio types, 0 = radio off"); + +static struct { + bool cur, sav; +} rfkill_global_states[NUM_RFKILL_TYPES]; + +static bool rfkill_epo_lock_active; + + +#ifdef CONFIG_RFKILL_BACKPORT_LEDS +static void rfkill_led_trigger_event(struct rfkill *rfkill) +{ + struct led_trigger *trigger; + + if (!rfkill->registered) + return; + + trigger = &rfkill->led_trigger; + + if (rfkill->state & RFKILL_BLOCK_ANY) + led_trigger_event(trigger, LED_OFF); + else + led_trigger_event(trigger, LED_FULL); +} + +static void rfkill_led_trigger_activate(struct led_classdev *led) +{ + struct rfkill *rfkill; + + rfkill = container_of(led->trigger, struct rfkill, led_trigger); + + rfkill_led_trigger_event(rfkill); +} + +const char *rfkill_get_led_trigger_name(struct rfkill *rfkill) +{ + return rfkill->led_trigger.name; +} +EXPORT_SYMBOL(rfkill_get_led_trigger_name); + +void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name) +{ + BUG_ON(!rfkill); + + rfkill->ledtrigname = name; +} +EXPORT_SYMBOL(rfkill_set_led_trigger_name); + +static int rfkill_led_trigger_register(struct rfkill *rfkill) +{ + rfkill->led_trigger.name = rfkill->ledtrigname + ? : dev_name(&rfkill->dev); + rfkill->led_trigger.activate = rfkill_led_trigger_activate; + return led_trigger_register(&rfkill->led_trigger); +} + +static void rfkill_led_trigger_unregister(struct rfkill *rfkill) +{ + led_trigger_unregister(&rfkill->led_trigger); +} +#else +static void rfkill_led_trigger_event(struct rfkill *rfkill) +{ +} + +static inline int rfkill_led_trigger_register(struct rfkill *rfkill) +{ + return 0; +} + +static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill) +{ +} +#endif /* CONFIG_RFKILL_LEDS */ + +static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill, + enum rfkill_operation op) +{ + unsigned long flags; + + ev->idx = rfkill->idx; + ev->type = rfkill->type; + ev->op = op; + + spin_lock_irqsave(&rfkill->lock, flags); + ev->hard = !!(rfkill->state & RFKILL_BLOCK_HW); + ev->soft = !!(rfkill->state & (RFKILL_BLOCK_SW | + RFKILL_BLOCK_SW_PREV)); + spin_unlock_irqrestore(&rfkill->lock, flags); +} + +static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op) +{ + struct rfkill_data *data; + struct rfkill_int_event *ev; + + list_for_each_entry(data, &rfkill_fds, list) { + ev = kzalloc(sizeof(*ev), GFP_KERNEL); + if (!ev) + continue; + rfkill_fill_event(&ev->ev, rfkill, op); + mutex_lock(&data->mtx); + list_add_tail(&ev->list, &data->events); + mutex_unlock(&data->mtx); + wake_up_interruptible(&data->read_wait); + } +} + +static void rfkill_event(struct rfkill *rfkill) +{ + if (!rfkill->registered) + return; + + kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE); + + /* also send event to /dev/rfkill */ + rfkill_send_events(rfkill, RFKILL_OP_CHANGE); +} + +static bool __rfkill_set_hw_state(struct rfkill *rfkill, + bool blocked, bool *change) +{ + unsigned long flags; + bool prev, any; + + BUG_ON(!rfkill); + + spin_lock_irqsave(&rfkill->lock, flags); + prev = !!(rfkill->state & RFKILL_BLOCK_HW); + if (blocked) + rfkill->state |= RFKILL_BLOCK_HW; + else + rfkill->state &= ~RFKILL_BLOCK_HW; + *change = prev != blocked; + any = rfkill->state & RFKILL_BLOCK_ANY; + spin_unlock_irqrestore(&rfkill->lock, flags); + + rfkill_led_trigger_event(rfkill); + + return any; +} + +/** + * rfkill_set_block - wrapper for set_block method + * + * @rfkill: the rfkill struct to use + * @blocked: the new software state + * + * Calls the set_block method (when applicable) and handles notifications + * etc. as well. + */ +static void rfkill_set_block(struct rfkill *rfkill, bool blocked) +{ + unsigned long flags; + int err; + + if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP)) + return; + + /* + * Some platforms (...!) generate input events which affect the + * _hard_ kill state -- whenever something tries to change the + * current software state query the hardware state too. + */ + if (rfkill->ops->query) + rfkill->ops->query(rfkill, rfkill->data); + + spin_lock_irqsave(&rfkill->lock, flags); + if (rfkill->state & RFKILL_BLOCK_SW) + rfkill->state |= RFKILL_BLOCK_SW_PREV; + else + rfkill->state &= ~RFKILL_BLOCK_SW_PREV; + + if (blocked) + rfkill->state |= RFKILL_BLOCK_SW; + else + rfkill->state &= ~RFKILL_BLOCK_SW; + + rfkill->state |= RFKILL_BLOCK_SW_SETCALL; + spin_unlock_irqrestore(&rfkill->lock, flags); + + err = rfkill->ops->set_block(rfkill->data, blocked); + + spin_lock_irqsave(&rfkill->lock, flags); + if (err) { + /* + * Failed -- reset status to _prev, this may be different + * from what set set _PREV to earlier in this function + * if rfkill_set_sw_state was invoked. + */ + if (rfkill->state & RFKILL_BLOCK_SW_PREV) + rfkill->state |= RFKILL_BLOCK_SW; + else + rfkill->state &= ~RFKILL_BLOCK_SW; + } + rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL; + rfkill->state &= ~RFKILL_BLOCK_SW_PREV; + spin_unlock_irqrestore(&rfkill->lock, flags); + + rfkill_led_trigger_event(rfkill); + rfkill_event(rfkill); +} + +#ifdef CONFIG_RFKILL_BACKPORT_INPUT +static atomic_t rfkill_input_disabled = ATOMIC_INIT(0); + +/** + * __rfkill_switch_all - Toggle state of all switches of given type + * @type: type of interfaces to be affected + * @state: the new state + * + * This function sets the state of all switches of given type, + * unless a specific switch is claimed by userspace (in which case, + * that switch is left alone) or suspended. + * + * Caller must have acquired rfkill_global_mutex. + */ +static void __rfkill_switch_all(const enum rfkill_type type, bool blocked) +{ + struct rfkill *rfkill; + + rfkill_global_states[type].cur = blocked; + list_for_each_entry(rfkill, &rfkill_list, node) { + if (rfkill->type != type) + continue; + + rfkill_set_block(rfkill, blocked); + } +} + +/** + * rfkill_switch_all - Toggle state of all switches of given type + * @type: type of interfaces to be affected + * @state: the new state + * + * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state). + * Please refer to __rfkill_switch_all() for details. + * + * Does nothing if the EPO lock is active. + */ +void rfkill_switch_all(enum rfkill_type type, bool blocked) +{ + if (atomic_read(&rfkill_input_disabled)) + return; + + mutex_lock(&rfkill_global_mutex); + + if (!rfkill_epo_lock_active) + __rfkill_switch_all(type, blocked); + + mutex_unlock(&rfkill_global_mutex); +} + +/** + * rfkill_epo - emergency power off all transmitters + * + * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED, + * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex. + * + * The global state before the EPO is saved and can be restored later + * using rfkill_restore_states(). + */ +void rfkill_epo(void) +{ + struct rfkill *rfkill; + int i; + + if (atomic_read(&rfkill_input_disabled)) + return; + + mutex_lock(&rfkill_global_mutex); + + rfkill_epo_lock_active = true; + list_for_each_entry(rfkill, &rfkill_list, node) + rfkill_set_block(rfkill, true); + + for (i = 0; i < NUM_RFKILL_TYPES; i++) { + rfkill_global_states[i].sav = rfkill_global_states[i].cur; + rfkill_global_states[i].cur = true; + } + + mutex_unlock(&rfkill_global_mutex); +} + +/** + * rfkill_restore_states - restore global states + * + * Restore (and sync switches to) the global state from the + * states in rfkill_default_states. This can undo the effects of + * a call to rfkill_epo(). + */ +void rfkill_restore_states(void) +{ + int i; + + if (atomic_read(&rfkill_input_disabled)) + return; + + mutex_lock(&rfkill_global_mutex); + + rfkill_epo_lock_active = false; + for (i = 0; i < NUM_RFKILL_TYPES; i++) + __rfkill_switch_all(i, rfkill_global_states[i].sav); + mutex_unlock(&rfkill_global_mutex); +} + +/** + * rfkill_remove_epo_lock - unlock state changes + * + * Used by rfkill-input manually unlock state changes, when + * the EPO switch is deactivated. + */ +void rfkill_remove_epo_lock(void) +{ + if (atomic_read(&rfkill_input_disabled)) + return; + + mutex_lock(&rfkill_global_mutex); + rfkill_epo_lock_active = false; + mutex_unlock(&rfkill_global_mutex); +} + +/** + * rfkill_is_epo_lock_active - returns true EPO is active + * + * Returns 0 (false) if there is NOT an active EPO contidion, + * and 1 (true) if there is an active EPO contition, which + * locks all radios in one of the BLOCKED states. + * + * Can be called in atomic context. + */ +bool rfkill_is_epo_lock_active(void) +{ + return rfkill_epo_lock_active; +} + +/** + * rfkill_get_global_sw_state - returns global state for a type + * @type: the type to get the global state of + * + * Returns the current global state for a given wireless + * device type. + */ +bool rfkill_get_global_sw_state(const enum rfkill_type type) +{ + return rfkill_global_states[type].cur; +} +#endif + + +bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked) +{ + bool ret, change; + + ret = __rfkill_set_hw_state(rfkill, blocked, &change); + + if (!rfkill->registered) + return ret; + + if (change) + schedule_work(&rfkill->uevent_work); + + return ret; +} +EXPORT_SYMBOL(rfkill_set_hw_state); + +static void __rfkill_set_sw_state(struct rfkill *rfkill, bool blocked) +{ + u32 bit = RFKILL_BLOCK_SW; + + /* if in a ops->set_block right now, use other bit */ + if (rfkill->state & RFKILL_BLOCK_SW_SETCALL) + bit = RFKILL_BLOCK_SW_PREV; + + if (blocked) + rfkill->state |= bit; + else + rfkill->state &= ~bit; +} + +bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked) +{ + unsigned long flags; + bool prev, hwblock; + + BUG_ON(!rfkill); + + spin_lock_irqsave(&rfkill->lock, flags); + prev = !!(rfkill->state & RFKILL_BLOCK_SW); + __rfkill_set_sw_state(rfkill, blocked); + hwblock = !!(rfkill->state & RFKILL_BLOCK_HW); + blocked = blocked || hwblock; + spin_unlock_irqrestore(&rfkill->lock, flags); + + if (!rfkill->registered) + return blocked; + + if (prev != blocked && !hwblock) + schedule_work(&rfkill->uevent_work); + + rfkill_led_trigger_event(rfkill); + + return blocked; +} +EXPORT_SYMBOL(rfkill_set_sw_state); + +void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked) +{ + unsigned long flags; + + BUG_ON(!rfkill); + BUG_ON(rfkill->registered); + + spin_lock_irqsave(&rfkill->lock, flags); + __rfkill_set_sw_state(rfkill, blocked); + rfkill->persistent = true; + spin_unlock_irqrestore(&rfkill->lock, flags); +} +EXPORT_SYMBOL(rfkill_init_sw_state); + +void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw) +{ + unsigned long flags; + bool swprev, hwprev; + + BUG_ON(!rfkill); + + spin_lock_irqsave(&rfkill->lock, flags); + + /* + * No need to care about prev/setblock ... this is for uevent only + * and that will get triggered by rfkill_set_block anyway. + */ + swprev = !!(rfkill->state & RFKILL_BLOCK_SW); + hwprev = !!(rfkill->state & RFKILL_BLOCK_HW); + __rfkill_set_sw_state(rfkill, sw); + if (hw) + rfkill->state |= RFKILL_BLOCK_HW; + else + rfkill->state &= ~RFKILL_BLOCK_HW; + + spin_unlock_irqrestore(&rfkill->lock, flags); + + if (!rfkill->registered) { + rfkill->persistent = true; + } else { + if (swprev != sw || hwprev != hw) + schedule_work(&rfkill->uevent_work); + + rfkill_led_trigger_event(rfkill); + } +} +EXPORT_SYMBOL(rfkill_set_states); + +static ssize_t rfkill_name_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct rfkill *rfkill = to_rfkill(dev); + + return sprintf(buf, "%s\n", rfkill->name); +} + +static const char *rfkill_get_type_str(enum rfkill_type type) +{ + BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_FM + 1); + + switch (type) { + case RFKILL_TYPE_WLAN: + return "wlan"; + case RFKILL_TYPE_BLUETOOTH: + return "bluetooth"; + case RFKILL_TYPE_UWB: + return "ultrawideband"; + case RFKILL_TYPE_WIMAX: + return "wimax"; + case RFKILL_TYPE_WWAN: + return "wwan"; + case RFKILL_TYPE_GPS: + return "gps"; + case RFKILL_TYPE_FM: + return "fm"; + default: + BUG(); + } +} + +static ssize_t rfkill_type_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct rfkill *rfkill = to_rfkill(dev); + + return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type)); +} + +static ssize_t rfkill_idx_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct rfkill *rfkill = to_rfkill(dev); + + return sprintf(buf, "%d\n", rfkill->idx); +} + +static ssize_t rfkill_persistent_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct rfkill *rfkill = to_rfkill(dev); + + return sprintf(buf, "%d\n", rfkill->persistent); +} + +static u8 user_state_from_blocked(unsigned long state) +{ + if (state & RFKILL_BLOCK_HW) + return RFKILL_USER_STATE_HARD_BLOCKED; + if (state & RFKILL_BLOCK_SW) + return RFKILL_USER_STATE_SOFT_BLOCKED; + + return RFKILL_USER_STATE_UNBLOCKED; +} + +static ssize_t rfkill_state_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct rfkill *rfkill = to_rfkill(dev); + unsigned long flags; + u32 state; + + spin_lock_irqsave(&rfkill->lock, flags); + state = rfkill->state; + spin_unlock_irqrestore(&rfkill->lock, flags); + + return sprintf(buf, "%d\n", user_state_from_blocked(state)); +} + +static ssize_t rfkill_state_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct rfkill *rfkill = to_rfkill(dev); + unsigned long state; + int err; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + err = strict_strtoul(buf, 0, &state); + if (err) + return err; + + if (state != RFKILL_USER_STATE_SOFT_BLOCKED && + state != RFKILL_USER_STATE_UNBLOCKED) + return -EINVAL; + + mutex_lock(&rfkill_global_mutex); + rfkill_set_block(rfkill, state == RFKILL_USER_STATE_SOFT_BLOCKED); + mutex_unlock(&rfkill_global_mutex); + + return err ?: count; +} + +static ssize_t rfkill_claim_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "%d\n", 0); +} + +static ssize_t rfkill_claim_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + return -EOPNOTSUPP; +} + +static struct device_attribute rfkill_dev_attrs[] = { + __ATTR(name, S_IRUGO, rfkill_name_show, NULL), + __ATTR(type, S_IRUGO, rfkill_type_show, NULL), + __ATTR(index, S_IRUGO, rfkill_idx_show, NULL), + __ATTR(persistent, S_IRUGO, rfkill_persistent_show, NULL), + __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store), + __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store), + __ATTR_NULL +}; + +static void rfkill_release(struct device *dev) +{ + struct rfkill *rfkill = to_rfkill(dev); + + kfree(rfkill); +} + +static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct rfkill *rfkill = to_rfkill(dev); + unsigned long flags; + u32 state; + int error; + + error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name); + if (error) + return error; + error = add_uevent_var(env, "RFKILL_TYPE=%s", + rfkill_get_type_str(rfkill->type)); + if (error) + return error; + spin_lock_irqsave(&rfkill->lock, flags); + state = rfkill->state; + spin_unlock_irqrestore(&rfkill->lock, flags); + error = add_uevent_var(env, "RFKILL_STATE=%d", + user_state_from_blocked(state)); + return error; +} + +void rfkill_pause_polling(struct rfkill *rfkill) +{ + BUG_ON(!rfkill); + + if (!rfkill->ops->poll) + return; + + cancel_delayed_work_sync(&rfkill->poll_work); +} +EXPORT_SYMBOL(rfkill_pause_polling); + +void rfkill_resume_polling(struct rfkill *rfkill) +{ + BUG_ON(!rfkill); + + if (!rfkill->ops->poll) + return; + + schedule_work(&rfkill->poll_work.work); +} +EXPORT_SYMBOL(rfkill_resume_polling); + +static int rfkill_suspend(struct device *dev, pm_message_t state) +{ + struct rfkill *rfkill = to_rfkill(dev); + + rfkill_pause_polling(rfkill); + + return 0; +} + +static int rfkill_resume(struct device *dev) +{ + struct rfkill *rfkill = to_rfkill(dev); + bool cur; + + if (!rfkill->persistent) { + cur = !!(rfkill->state & RFKILL_BLOCK_SW); + rfkill_set_block(rfkill, cur); + } + + rfkill_resume_polling(rfkill); + + return 0; +} + +static struct class rfkill_class = { + .name = "rfkill_backport", + .dev_release = rfkill_release, + .dev_attrs = rfkill_dev_attrs, + .dev_uevent = rfkill_dev_uevent, + .suspend = rfkill_suspend, + .resume = rfkill_resume, +}; + +bool rfkill_blocked(struct rfkill *rfkill) +{ + unsigned long flags; + u32 state; + + spin_lock_irqsave(&rfkill->lock, flags); + state = rfkill->state; + spin_unlock_irqrestore(&rfkill->lock, flags); + + return !!(state & RFKILL_BLOCK_ANY); +} +EXPORT_SYMBOL(rfkill_blocked); + + +struct rfkill * __must_check rfkill_alloc(const char *name, + struct device *parent, + const enum rfkill_type type, + const struct rfkill_ops *ops, + void *ops_data) +{ + struct rfkill *rfkill; + struct device *dev; + + if (WARN_ON(!ops)) + return NULL; + + if (WARN_ON(!ops->set_block)) + return NULL; + + if (WARN_ON(!name)) + return NULL; + + if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES)) + return NULL; + + rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL); + if (!rfkill) + return NULL; + + spin_lock_init(&rfkill->lock); + INIT_LIST_HEAD(&rfkill->node); + rfkill->type = type; + rfkill->name = name; + rfkill->ops = ops; + rfkill->data = ops_data; + + dev = &rfkill->dev; + dev->class = &rfkill_class; + dev->parent = parent; + device_initialize(dev); + + return rfkill; +} +EXPORT_SYMBOL(rfkill_alloc); + +static void rfkill_poll(struct work_struct *work) +{ + struct rfkill *rfkill; + + rfkill = container_of(work, struct rfkill, poll_work.work); + + /* + * Poll hardware state -- driver will use one of the + * rfkill_set{,_hw,_sw}_state functions and use its + * return value to update the current status. + */ + rfkill->ops->poll(rfkill, rfkill->data); + + schedule_delayed_work(&rfkill->poll_work, + round_jiffies_relative(POLL_INTERVAL)); +} + +static void rfkill_uevent_work(struct work_struct *work) +{ + struct rfkill *rfkill; + + rfkill = container_of(work, struct rfkill, uevent_work); + + mutex_lock(&rfkill_global_mutex); + rfkill_event(rfkill); + mutex_unlock(&rfkill_global_mutex); +} + +static void rfkill_sync_work(struct work_struct *work) +{ + struct rfkill *rfkill; + bool cur; + + rfkill = container_of(work, struct rfkill, sync_work); + + mutex_lock(&rfkill_global_mutex); + cur = rfkill_global_states[rfkill->type].cur; + rfkill_set_block(rfkill, cur); + mutex_unlock(&rfkill_global_mutex); +} + +int __must_check rfkill_register(struct rfkill *rfkill) +{ + static unsigned long rfkill_no; + struct device *dev = &rfkill->dev; + int error; + + BUG_ON(!rfkill); + + mutex_lock(&rfkill_global_mutex); + + if (rfkill->registered) { + error = -EALREADY; + goto unlock; + } + + rfkill->idx = rfkill_no; + dev_set_name(dev, "rfkill%lu", rfkill_no); + rfkill_no++; + + list_add_tail(&rfkill->node, &rfkill_list); + + error = device_add(dev); + if (error) + goto remove; + + error = rfkill_led_trigger_register(rfkill); + if (error) + goto devdel; + + rfkill->registered = true; + + INIT_DELAYED_WORK(&rfkill->poll_work, rfkill_poll); + INIT_WORK(&rfkill->uevent_work, rfkill_uevent_work); + INIT_WORK(&rfkill->sync_work, rfkill_sync_work); + + if (rfkill->ops->poll) + schedule_delayed_work(&rfkill->poll_work, + round_jiffies_relative(POLL_INTERVAL)); + + if (!rfkill->persistent || rfkill_epo_lock_active) { + schedule_work(&rfkill->sync_work); + } else { +#ifdef CONFIG_RFKILL_BACKPORT_INPUT + bool soft_blocked = !!(rfkill->state & RFKILL_BLOCK_SW); + + if (!atomic_read(&rfkill_input_disabled)) + __rfkill_switch_all(rfkill->type, soft_blocked); +#endif + } + + rfkill_send_events(rfkill, RFKILL_OP_ADD); + + mutex_unlock(&rfkill_global_mutex); + return 0; + + devdel: + device_del(&rfkill->dev); + remove: + list_del_init(&rfkill->node); + unlock: + mutex_unlock(&rfkill_global_mutex); + return error; +} +EXPORT_SYMBOL(rfkill_register); + +void rfkill_unregister(struct rfkill *rfkill) +{ + BUG_ON(!rfkill); + + if (rfkill->ops->poll) + cancel_delayed_work_sync(&rfkill->poll_work); + + cancel_work_sync(&rfkill->uevent_work); + cancel_work_sync(&rfkill->sync_work); + + rfkill->registered = false; + + device_del(&rfkill->dev); + + mutex_lock(&rfkill_global_mutex); + rfkill_send_events(rfkill, RFKILL_OP_DEL); + list_del_init(&rfkill->node); + mutex_unlock(&rfkill_global_mutex); + + rfkill_led_trigger_unregister(rfkill); +} +EXPORT_SYMBOL(rfkill_unregister); + +void rfkill_destroy(struct rfkill *rfkill) +{ + if (rfkill) + put_device(&rfkill->dev); +} +EXPORT_SYMBOL(rfkill_destroy); + +static int rfkill_fop_open(struct inode *inode, struct file *file) +{ + struct rfkill_data *data; + struct rfkill *rfkill; + struct rfkill_int_event *ev, *tmp; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + INIT_LIST_HEAD(&data->events); + mutex_init(&data->mtx); + init_waitqueue_head(&data->read_wait); + + mutex_lock(&rfkill_global_mutex); + mutex_lock(&data->mtx); + /* + * start getting events from elsewhere but hold mtx to get + * startup events added first + */ + list_add(&data->list, &rfkill_fds); + + list_for_each_entry(rfkill, &rfkill_list, node) { + ev = kzalloc(sizeof(*ev), GFP_KERNEL); + if (!ev) + goto free; + rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD); + list_add_tail(&ev->list, &data->events); + } + mutex_unlock(&data->mtx); + mutex_unlock(&rfkill_global_mutex); + + file->private_data = data; + + return nonseekable_open(inode, file); + + free: + mutex_unlock(&data->mtx); + mutex_unlock(&rfkill_global_mutex); + mutex_destroy(&data->mtx); + list_for_each_entry_safe(ev, tmp, &data->events, list) + kfree(ev); + kfree(data); + return -ENOMEM; +} + +static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait) +{ + struct rfkill_data *data = file->private_data; + unsigned int res = POLLOUT | POLLWRNORM; + + poll_wait(file, &data->read_wait, wait); + + mutex_lock(&data->mtx); + if (!list_empty(&data->events)) + res = POLLIN | POLLRDNORM; + mutex_unlock(&data->mtx); + + return res; +} + +static bool rfkill_readable(struct rfkill_data *data) +{ + bool r; + + mutex_lock(&data->mtx); + r = !list_empty(&data->events); + mutex_unlock(&data->mtx); + + return r; +} + +static ssize_t rfkill_fop_read(struct file *file, char __user *buf, + size_t count, loff_t *pos) +{ + struct rfkill_data *data = file->private_data; + struct rfkill_int_event *ev; + unsigned long sz; + int ret; + + mutex_lock(&data->mtx); + + while (list_empty(&data->events)) { + if (file->f_flags & O_NONBLOCK) { + ret = -EAGAIN; + goto out; + } + mutex_unlock(&data->mtx); + ret = wait_event_interruptible(data->read_wait, + rfkill_readable(data)); + mutex_lock(&data->mtx); + + if (ret) + goto out; + } + + ev = list_first_entry(&data->events, struct rfkill_int_event, + list); + + sz = min_t(unsigned long, sizeof(ev->ev), count); + ret = sz; + if (copy_to_user(buf, &ev->ev, sz)) + ret = -EFAULT; + + list_del(&ev->list); + kfree(ev); + out: + mutex_unlock(&data->mtx); + return ret; +} + +static ssize_t rfkill_fop_write(struct file *file, const char __user *buf, + size_t count, loff_t *pos) +{ + struct rfkill *rfkill; + struct rfkill_event ev; + + /* we don't need the 'hard' variable but accept it */ + if (count < RFKILL_EVENT_SIZE_V1 - 1) + return -EINVAL; + + /* + * Copy as much data as we can accept into our 'ev' buffer, + * but tell userspace how much we've copied so it can determine + * our API version even in a write() call, if it cares. + */ + count = min(count, sizeof(ev)); + if (copy_from_user(&ev, buf, count)) + return -EFAULT; + + if (ev.op != RFKILL_OP_CHANGE && ev.op != RFKILL_OP_CHANGE_ALL) + return -EINVAL; + + if (ev.type >= NUM_RFKILL_TYPES) + return -EINVAL; + + mutex_lock(&rfkill_global_mutex); + + if (ev.op == RFKILL_OP_CHANGE_ALL) { + if (ev.type == RFKILL_TYPE_ALL) { + enum rfkill_type i; + for (i = 0; i < NUM_RFKILL_TYPES; i++) + rfkill_global_states[i].cur = ev.soft; + } else { + rfkill_global_states[ev.type].cur = ev.soft; + } + } + + list_for_each_entry(rfkill, &rfkill_list, node) { + if (rfkill->idx != ev.idx && ev.op != RFKILL_OP_CHANGE_ALL) + continue; + + if (rfkill->type != ev.type && ev.type != RFKILL_TYPE_ALL) + continue; + + rfkill_set_block(rfkill, ev.soft); + } + mutex_unlock(&rfkill_global_mutex); + + return count; +} + +static int rfkill_fop_release(struct inode *inode, struct file *file) +{ + struct rfkill_data *data = file->private_data; + struct rfkill_int_event *ev, *tmp; + + mutex_lock(&rfkill_global_mutex); + list_del(&data->list); + mutex_unlock(&rfkill_global_mutex); + + mutex_destroy(&data->mtx); + list_for_each_entry_safe(ev, tmp, &data->events, list) + kfree(ev); + +#ifdef CONFIG_RFKILL_BACKPORT_INPUT + if (data->input_handler) + if (atomic_dec_return(&rfkill_input_disabled) == 0) + printk(KERN_DEBUG "rfkill: input handler enabled\n"); +#endif + + kfree(data); + + return 0; +} + +#ifdef CONFIG_RFKILL_BACKPORT_INPUT +static long rfkill_fop_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct rfkill_data *data = file->private_data; + + if (_IOC_TYPE(cmd) != RFKILL_IOC_MAGIC) + return -ENOSYS; + + if (_IOC_NR(cmd) != RFKILL_IOC_NOINPUT) + return -ENOSYS; + + mutex_lock(&data->mtx); + + if (!data->input_handler) { + if (atomic_inc_return(&rfkill_input_disabled) == 1) + printk(KERN_DEBUG "rfkill: input handler disabled\n"); + data->input_handler = true; + } + + mutex_unlock(&data->mtx); + + return 0; +} +#endif + +static const struct file_operations rfkill_fops = { + .owner = THIS_MODULE, + .open = rfkill_fop_open, + .read = rfkill_fop_read, + .write = rfkill_fop_write, + .poll = rfkill_fop_poll, + .release = rfkill_fop_release, +#ifdef CONFIG_RFKILL_BACKPORT_INPUT + .unlocked_ioctl = rfkill_fop_ioctl, + .compat_ioctl = rfkill_fop_ioctl, +#endif +}; + +static struct miscdevice rfkill_miscdev = { + .name = "rfkill", + .fops = &rfkill_fops, + .minor = MISC_DYNAMIC_MINOR, +}; + +static int __init rfkill_init(void) +{ + int error; + int i; + + for (i = 0; i < NUM_RFKILL_TYPES; i++) + rfkill_global_states[i].cur = !rfkill_default_state; + + error = class_register(&rfkill_class); + if (error) + goto out; + + error = misc_register(&rfkill_miscdev); + if (error) { + class_unregister(&rfkill_class); + goto out; + } + +#ifdef CONFIG_RFKILL_BACKPORT_INPUT + error = rfkill_handler_init(); + if (error) { + misc_deregister(&rfkill_miscdev); + class_unregister(&rfkill_class); + goto out; + } +#endif + + out: + return error; +} +subsys_initcall(rfkill_init); + +static void __exit rfkill_exit(void) +{ +#ifdef CONFIG_RFKILL_BACKPORT_INPUT + rfkill_handler_exit(); +#endif + misc_deregister(&rfkill_miscdev); + class_unregister(&rfkill_class); +} +module_exit(rfkill_exit); diff --git a/net/rfkill/input.c b/net/rfkill/input.c new file mode 100644 index 0000000..5a98290 --- /dev/null +++ b/net/rfkill/input.c @@ -0,0 +1,354 @@ +/* + * Input layer to RF Kill interface connector + * + * Copyright (c) 2007 Dmitry Torokhov + * Copyright 2009 Johannes Berg + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * If you ever run into a situation in which you have a SW_ type rfkill + * input device, then you can revive code that was removed in the patch + * "rfkill-input: remove unused code". + */ + +#include +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)) +#include +#else +#include +#endif +#include + +#include "rfkill.h" + +enum rfkill_input_master_mode { + RFKILL_INPUT_MASTER_UNLOCK = 0, + RFKILL_INPUT_MASTER_RESTORE = 1, + RFKILL_INPUT_MASTER_UNBLOCKALL = 2, + NUM_RFKILL_INPUT_MASTER_MODES +}; + +/* Delay (in ms) between consecutive switch ops */ +#define RFKILL_OPS_DELAY 200 + +static enum rfkill_input_master_mode rfkill_master_switch_mode = + RFKILL_INPUT_MASTER_UNBLOCKALL; +module_param_named(master_switch_mode, rfkill_master_switch_mode, uint, 0); +MODULE_PARM_DESC(master_switch_mode, + "SW_RFKILL_ALL ON should: 0=do nothing (only unlock); 1=restore; 2=unblock all"); + +static spinlock_t rfkill_op_lock; +static bool rfkill_op_pending; +static unsigned long rfkill_sw_pending[BITS_TO_LONGS(NUM_RFKILL_TYPES)]; +static unsigned long rfkill_sw_state[BITS_TO_LONGS(NUM_RFKILL_TYPES)]; + +enum rfkill_sched_op { + RFKILL_GLOBAL_OP_EPO = 0, + RFKILL_GLOBAL_OP_RESTORE, + RFKILL_GLOBAL_OP_UNLOCK, + RFKILL_GLOBAL_OP_UNBLOCK, +}; + +static enum rfkill_sched_op rfkill_master_switch_op; +static enum rfkill_sched_op rfkill_op; + +static void __rfkill_handle_global_op(enum rfkill_sched_op op) +{ + unsigned int i; + + switch (op) { + case RFKILL_GLOBAL_OP_EPO: + rfkill_epo(); + break; + case RFKILL_GLOBAL_OP_RESTORE: + rfkill_restore_states(); + break; + case RFKILL_GLOBAL_OP_UNLOCK: + rfkill_remove_epo_lock(); + break; + case RFKILL_GLOBAL_OP_UNBLOCK: + rfkill_remove_epo_lock(); + for (i = 0; i < NUM_RFKILL_TYPES; i++) + rfkill_switch_all(i, false); + break; + default: + /* memory corruption or bug, fail safely */ + rfkill_epo(); + WARN(1, "Unknown requested operation %d! " + "rfkill Emergency Power Off activated\n", + op); + } +} + +static void __rfkill_handle_normal_op(const enum rfkill_type type, + const bool complement) +{ + bool blocked; + + blocked = rfkill_get_global_sw_state(type); + if (complement) + blocked = !blocked; + + rfkill_switch_all(type, blocked); +} + +static void rfkill_op_handler(struct work_struct *work) +{ + unsigned int i; + bool c; + + spin_lock_irq(&rfkill_op_lock); + do { + if (rfkill_op_pending) { + enum rfkill_sched_op op = rfkill_op; + rfkill_op_pending = false; + memset(rfkill_sw_pending, 0, + sizeof(rfkill_sw_pending)); + spin_unlock_irq(&rfkill_op_lock); + + __rfkill_handle_global_op(op); + + spin_lock_irq(&rfkill_op_lock); + + /* + * handle global ops first -- during unlocked period + * we might have gotten a new global op. + */ + if (rfkill_op_pending) + continue; + } + + if (rfkill_is_epo_lock_active()) + continue; + + for (i = 0; i < NUM_RFKILL_TYPES; i++) { + if (__test_and_clear_bit(i, rfkill_sw_pending)) { + c = __test_and_clear_bit(i, rfkill_sw_state); + spin_unlock_irq(&rfkill_op_lock); + + __rfkill_handle_normal_op(i, c); + + spin_lock_irq(&rfkill_op_lock); + } + } + } while (rfkill_op_pending); + spin_unlock_irq(&rfkill_op_lock); +} + +static DECLARE_DELAYED_WORK(rfkill_op_work, rfkill_op_handler); +static unsigned long rfkill_last_scheduled; + +static unsigned long rfkill_ratelimit(const unsigned long last) +{ + const unsigned long delay = msecs_to_jiffies(RFKILL_OPS_DELAY); + return (time_after(jiffies, last + delay)) ? 0 : delay; +} + +static void rfkill_schedule_ratelimited(void) +{ + if (delayed_work_pending(&rfkill_op_work)) + return; + schedule_delayed_work(&rfkill_op_work, + rfkill_ratelimit(rfkill_last_scheduled)); + rfkill_last_scheduled = jiffies; +} + +static void rfkill_schedule_global_op(enum rfkill_sched_op op) +{ + unsigned long flags; + + spin_lock_irqsave(&rfkill_op_lock, flags); + rfkill_op = op; + rfkill_op_pending = true; + if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) { + /* bypass the limiter for EPO */ + cancel_delayed_work(&rfkill_op_work); + schedule_delayed_work(&rfkill_op_work, 0); + rfkill_last_scheduled = jiffies; + } else + rfkill_schedule_ratelimited(); + spin_unlock_irqrestore(&rfkill_op_lock, flags); +} + +static void rfkill_schedule_toggle(enum rfkill_type type) +{ + unsigned long flags; + + if (rfkill_is_epo_lock_active()) + return; + + spin_lock_irqsave(&rfkill_op_lock, flags); + if (!rfkill_op_pending) { + __set_bit(type, rfkill_sw_pending); + __change_bit(type, rfkill_sw_state); + rfkill_schedule_ratelimited(); + } + spin_unlock_irqrestore(&rfkill_op_lock, flags); +} + +static void rfkill_schedule_evsw_rfkillall(int state) +{ + if (state) + rfkill_schedule_global_op(rfkill_master_switch_op); + else + rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO); +} + +static void rfkill_event(struct input_handle *handle, unsigned int type, + unsigned int code, int data) +{ + if (type == EV_KEY && data == 1) { + switch (code) { + case KEY_WLAN: + rfkill_schedule_toggle(RFKILL_TYPE_WLAN); + break; + case KEY_BLUETOOTH: + rfkill_schedule_toggle(RFKILL_TYPE_BLUETOOTH); + break; + case KEY_UWB: + rfkill_schedule_toggle(RFKILL_TYPE_UWB); + break; + case KEY_WIMAX: + rfkill_schedule_toggle(RFKILL_TYPE_WIMAX); + break; + case KEY_RFKILL: + rfkill_schedule_toggle(RFKILL_TYPE_ALL); + break; + } + } else if (type == EV_SW && code == SW_RFKILL_ALL) + rfkill_schedule_evsw_rfkillall(data); +} + +static int rfkill_connect(struct input_handler *handler, struct input_dev *dev, + const struct input_device_id *id) +{ + struct input_handle *handle; + int error; + + handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL); + if (!handle) + return -ENOMEM; + + handle->dev = dev; + handle->handler = handler; + handle->name = "rfkill_backport"; + + /* causes rfkill_start() to be called */ + error = input_register_handle(handle); + if (error) + goto err_free_handle; + + error = input_open_device(handle); + if (error) + goto err_unregister_handle; + + return 0; + + err_unregister_handle: + input_unregister_handle(handle); + err_free_handle: + kfree(handle); + return error; +} + +static void rfkill_start(struct input_handle *handle) +{ + /* + * Take event_lock to guard against configuration changes, we + * should be able to deal with concurrency with rfkill_event() + * just fine (which event_lock will also avoid). + */ + spin_lock_irq(&handle->dev->event_lock); + + if (test_bit(EV_SW, handle->dev->evbit) && + test_bit(SW_RFKILL_ALL, handle->dev->swbit)) + rfkill_schedule_evsw_rfkillall(test_bit(SW_RFKILL_ALL, + handle->dev->sw)); + + spin_unlock_irq(&handle->dev->event_lock); +} + +static void rfkill_disconnect(struct input_handle *handle) +{ + input_close_device(handle); + input_unregister_handle(handle); + kfree(handle); +} + +static const struct input_device_id rfkill_ids[] = { + { + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, + .evbit = { BIT_MASK(EV_KEY) }, + .keybit = { [BIT_WORD(KEY_WLAN)] = BIT_MASK(KEY_WLAN) }, + }, + { + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, + .evbit = { BIT_MASK(EV_KEY) }, + .keybit = { [BIT_WORD(KEY_BLUETOOTH)] = BIT_MASK(KEY_BLUETOOTH) }, + }, + { + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, + .evbit = { BIT_MASK(EV_KEY) }, + .keybit = { [BIT_WORD(KEY_UWB)] = BIT_MASK(KEY_UWB) }, + }, + { + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, + .evbit = { BIT_MASK(EV_KEY) }, + .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) }, + }, + { + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, + .evbit = { BIT_MASK(EV_KEY) }, + .keybit = { [BIT_WORD(KEY_RFKILL)] = BIT_MASK(KEY_RFKILL) }, + }, + { + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT, + .evbit = { BIT(EV_SW) }, + .swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) }, + }, + { } +}; + +static struct input_handler rfkill_handler = { + .name = "rfkill", + .event = rfkill_event, + .connect = rfkill_connect, + .start = rfkill_start, + .disconnect = rfkill_disconnect, + .id_table = rfkill_ids, +}; + +int __init rfkill_handler_init(void) +{ + switch (rfkill_master_switch_mode) { + case RFKILL_INPUT_MASTER_UNBLOCKALL: + rfkill_master_switch_op = RFKILL_GLOBAL_OP_UNBLOCK; + break; + case RFKILL_INPUT_MASTER_RESTORE: + rfkill_master_switch_op = RFKILL_GLOBAL_OP_RESTORE; + break; + case RFKILL_INPUT_MASTER_UNLOCK: + rfkill_master_switch_op = RFKILL_GLOBAL_OP_UNLOCK; + break; + default: + return -EINVAL; + } + + spin_lock_init(&rfkill_op_lock); + + /* Avoid delay at first schedule */ + rfkill_last_scheduled = + jiffies - msecs_to_jiffies(RFKILL_OPS_DELAY) - 1; + return input_register_handler(&rfkill_handler); +} + +void __exit rfkill_handler_exit(void) +{ + input_unregister_handler(&rfkill_handler); + cancel_delayed_work_sync(&rfkill_op_work); +} diff --git a/net/rfkill/input.c.orig b/net/rfkill/input.c.orig new file mode 100644 index 0000000..3713d7e --- /dev/null +++ b/net/rfkill/input.c.orig @@ -0,0 +1,350 @@ +/* + * Input layer to RF Kill interface connector + * + * Copyright (c) 2007 Dmitry Torokhov + * Copyright 2009 Johannes Berg + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * If you ever run into a situation in which you have a SW_ type rfkill + * input device, then you can revive code that was removed in the patch + * "rfkill-input: remove unused code". + */ + +#include +#include +#include +#include +#include +#include + +#include "rfkill.h" + +enum rfkill_input_master_mode { + RFKILL_INPUT_MASTER_UNLOCK = 0, + RFKILL_INPUT_MASTER_RESTORE = 1, + RFKILL_INPUT_MASTER_UNBLOCKALL = 2, + NUM_RFKILL_INPUT_MASTER_MODES +}; + +/* Delay (in ms) between consecutive switch ops */ +#define RFKILL_OPS_DELAY 200 + +static enum rfkill_input_master_mode rfkill_master_switch_mode = + RFKILL_INPUT_MASTER_UNBLOCKALL; +module_param_named(master_switch_mode, rfkill_master_switch_mode, uint, 0); +MODULE_PARM_DESC(master_switch_mode, + "SW_RFKILL_ALL ON should: 0=do nothing (only unlock); 1=restore; 2=unblock all"); + +static spinlock_t rfkill_op_lock; +static bool rfkill_op_pending; +static unsigned long rfkill_sw_pending[BITS_TO_LONGS(NUM_RFKILL_TYPES)]; +static unsigned long rfkill_sw_state[BITS_TO_LONGS(NUM_RFKILL_TYPES)]; + +enum rfkill_sched_op { + RFKILL_GLOBAL_OP_EPO = 0, + RFKILL_GLOBAL_OP_RESTORE, + RFKILL_GLOBAL_OP_UNLOCK, + RFKILL_GLOBAL_OP_UNBLOCK, +}; + +static enum rfkill_sched_op rfkill_master_switch_op; +static enum rfkill_sched_op rfkill_op; + +static void __rfkill_handle_global_op(enum rfkill_sched_op op) +{ + unsigned int i; + + switch (op) { + case RFKILL_GLOBAL_OP_EPO: + rfkill_epo(); + break; + case RFKILL_GLOBAL_OP_RESTORE: + rfkill_restore_states(); + break; + case RFKILL_GLOBAL_OP_UNLOCK: + rfkill_remove_epo_lock(); + break; + case RFKILL_GLOBAL_OP_UNBLOCK: + rfkill_remove_epo_lock(); + for (i = 0; i < NUM_RFKILL_TYPES; i++) + rfkill_switch_all(i, false); + break; + default: + /* memory corruption or bug, fail safely */ + rfkill_epo(); + WARN(1, "Unknown requested operation %d! " + "rfkill Emergency Power Off activated\n", + op); + } +} + +static void __rfkill_handle_normal_op(const enum rfkill_type type, + const bool complement) +{ + bool blocked; + + blocked = rfkill_get_global_sw_state(type); + if (complement) + blocked = !blocked; + + rfkill_switch_all(type, blocked); +} + +static void rfkill_op_handler(struct work_struct *work) +{ + unsigned int i; + bool c; + + spin_lock_irq(&rfkill_op_lock); + do { + if (rfkill_op_pending) { + enum rfkill_sched_op op = rfkill_op; + rfkill_op_pending = false; + memset(rfkill_sw_pending, 0, + sizeof(rfkill_sw_pending)); + spin_unlock_irq(&rfkill_op_lock); + + __rfkill_handle_global_op(op); + + spin_lock_irq(&rfkill_op_lock); + + /* + * handle global ops first -- during unlocked period + * we might have gotten a new global op. + */ + if (rfkill_op_pending) + continue; + } + + if (rfkill_is_epo_lock_active()) + continue; + + for (i = 0; i < NUM_RFKILL_TYPES; i++) { + if (__test_and_clear_bit(i, rfkill_sw_pending)) { + c = __test_and_clear_bit(i, rfkill_sw_state); + spin_unlock_irq(&rfkill_op_lock); + + __rfkill_handle_normal_op(i, c); + + spin_lock_irq(&rfkill_op_lock); + } + } + } while (rfkill_op_pending); + spin_unlock_irq(&rfkill_op_lock); +} + +static DECLARE_DELAYED_WORK(rfkill_op_work, rfkill_op_handler); +static unsigned long rfkill_last_scheduled; + +static unsigned long rfkill_ratelimit(const unsigned long last) +{ + const unsigned long delay = msecs_to_jiffies(RFKILL_OPS_DELAY); + return (time_after(jiffies, last + delay)) ? 0 : delay; +} + +static void rfkill_schedule_ratelimited(void) +{ + if (delayed_work_pending(&rfkill_op_work)) + return; + schedule_delayed_work(&rfkill_op_work, + rfkill_ratelimit(rfkill_last_scheduled)); + rfkill_last_scheduled = jiffies; +} + +static void rfkill_schedule_global_op(enum rfkill_sched_op op) +{ + unsigned long flags; + + spin_lock_irqsave(&rfkill_op_lock, flags); + rfkill_op = op; + rfkill_op_pending = true; + if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) { + /* bypass the limiter for EPO */ + cancel_delayed_work(&rfkill_op_work); + schedule_delayed_work(&rfkill_op_work, 0); + rfkill_last_scheduled = jiffies; + } else + rfkill_schedule_ratelimited(); + spin_unlock_irqrestore(&rfkill_op_lock, flags); +} + +static void rfkill_schedule_toggle(enum rfkill_type type) +{ + unsigned long flags; + + if (rfkill_is_epo_lock_active()) + return; + + spin_lock_irqsave(&rfkill_op_lock, flags); + if (!rfkill_op_pending) { + __set_bit(type, rfkill_sw_pending); + __change_bit(type, rfkill_sw_state); + rfkill_schedule_ratelimited(); + } + spin_unlock_irqrestore(&rfkill_op_lock, flags); +} + +static void rfkill_schedule_evsw_rfkillall(int state) +{ + if (state) + rfkill_schedule_global_op(rfkill_master_switch_op); + else + rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO); +} + +static void rfkill_event(struct input_handle *handle, unsigned int type, + unsigned int code, int data) +{ + if (type == EV_KEY && data == 1) { + switch (code) { + case KEY_WLAN: + rfkill_schedule_toggle(RFKILL_TYPE_WLAN); + break; + case KEY_BLUETOOTH: + rfkill_schedule_toggle(RFKILL_TYPE_BLUETOOTH); + break; + case KEY_UWB: + rfkill_schedule_toggle(RFKILL_TYPE_UWB); + break; + case KEY_WIMAX: + rfkill_schedule_toggle(RFKILL_TYPE_WIMAX); + break; + case KEY_RFKILL: + rfkill_schedule_toggle(RFKILL_TYPE_ALL); + break; + } + } else if (type == EV_SW && code == SW_RFKILL_ALL) + rfkill_schedule_evsw_rfkillall(data); +} + +static int rfkill_connect(struct input_handler *handler, struct input_dev *dev, + const struct input_device_id *id) +{ + struct input_handle *handle; + int error; + + handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL); + if (!handle) + return -ENOMEM; + + handle->dev = dev; + handle->handler = handler; + handle->name = "rfkill"; + + /* causes rfkill_start() to be called */ + error = input_register_handle(handle); + if (error) + goto err_free_handle; + + error = input_open_device(handle); + if (error) + goto err_unregister_handle; + + return 0; + + err_unregister_handle: + input_unregister_handle(handle); + err_free_handle: + kfree(handle); + return error; +} + +static void rfkill_start(struct input_handle *handle) +{ + /* + * Take event_lock to guard against configuration changes, we + * should be able to deal with concurrency with rfkill_event() + * just fine (which event_lock will also avoid). + */ + spin_lock_irq(&handle->dev->event_lock); + + if (test_bit(EV_SW, handle->dev->evbit) && + test_bit(SW_RFKILL_ALL, handle->dev->swbit)) + rfkill_schedule_evsw_rfkillall(test_bit(SW_RFKILL_ALL, + handle->dev->sw)); + + spin_unlock_irq(&handle->dev->event_lock); +} + +static void rfkill_disconnect(struct input_handle *handle) +{ + input_close_device(handle); + input_unregister_handle(handle); + kfree(handle); +} + +static const struct input_device_id rfkill_ids[] = { + { + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, + .evbit = { BIT_MASK(EV_KEY) }, + .keybit = { [BIT_WORD(KEY_WLAN)] = BIT_MASK(KEY_WLAN) }, + }, + { + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, + .evbit = { BIT_MASK(EV_KEY) }, + .keybit = { [BIT_WORD(KEY_BLUETOOTH)] = BIT_MASK(KEY_BLUETOOTH) }, + }, + { + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, + .evbit = { BIT_MASK(EV_KEY) }, + .keybit = { [BIT_WORD(KEY_UWB)] = BIT_MASK(KEY_UWB) }, + }, + { + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, + .evbit = { BIT_MASK(EV_KEY) }, + .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) }, + }, + { + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, + .evbit = { BIT_MASK(EV_KEY) }, + .keybit = { [BIT_WORD(KEY_RFKILL)] = BIT_MASK(KEY_RFKILL) }, + }, + { + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT, + .evbit = { BIT(EV_SW) }, + .swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) }, + }, + { } +}; + +static struct input_handler rfkill_handler = { + .name = "rfkill", + .event = rfkill_event, + .connect = rfkill_connect, + .start = rfkill_start, + .disconnect = rfkill_disconnect, + .id_table = rfkill_ids, +}; + +int __init rfkill_handler_init(void) +{ + switch (rfkill_master_switch_mode) { + case RFKILL_INPUT_MASTER_UNBLOCKALL: + rfkill_master_switch_op = RFKILL_GLOBAL_OP_UNBLOCK; + break; + case RFKILL_INPUT_MASTER_RESTORE: + rfkill_master_switch_op = RFKILL_GLOBAL_OP_RESTORE; + break; + case RFKILL_INPUT_MASTER_UNLOCK: + rfkill_master_switch_op = RFKILL_GLOBAL_OP_UNLOCK; + break; + default: + return -EINVAL; + } + + spin_lock_init(&rfkill_op_lock); + + /* Avoid delay at first schedule */ + rfkill_last_scheduled = + jiffies - msecs_to_jiffies(RFKILL_OPS_DELAY) - 1; + return input_register_handler(&rfkill_handler); +} + +void __exit rfkill_handler_exit(void) +{ + input_unregister_handler(&rfkill_handler); + cancel_delayed_work_sync(&rfkill_op_work); +} diff --git a/net/rfkill/rfkill.h b/net/rfkill/rfkill.h new file mode 100644 index 0000000..d1117cb --- /dev/null +++ b/net/rfkill/rfkill.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2007 Ivo van Doorn + * Copyright 2009 Johannes Berg + */ + +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#ifndef __RFKILL_INPUT_H +#define __RFKILL_INPUT_H + +/* core code */ +void rfkill_switch_all(const enum rfkill_type type, bool blocked); +void rfkill_epo(void); +void rfkill_restore_states(void); +void rfkill_remove_epo_lock(void); +bool rfkill_is_epo_lock_active(void); +bool rfkill_get_global_sw_state(const enum rfkill_type type); + +/* input handler */ +int rfkill_handler_init(void); +void rfkill_handler_exit(void); + +#endif /* __RFKILL_INPUT_H */ diff --git a/net/wireless/Makefile b/net/wireless/Makefile new file mode 100644 index 0000000..13bee1f --- /dev/null +++ b/net/wireless/Makefile @@ -0,0 +1,18 @@ +obj-$(CONFIG_CFG80211) += cfg80211.o +obj-$(CONFIG_LIB80211) += lib80211.o +obj-$(CONFIG_LIB80211_CRYPT_WEP) += lib80211_crypt_wep.o +obj-$(CONFIG_LIB80211_CRYPT_CCMP) += lib80211_crypt_ccmp.o +obj-$(CONFIG_LIB80211_CRYPT_TKIP) += lib80211_crypt_tkip.o + +cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o +cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o +cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o +cfg80211-$(CONFIG_CFG80211_WEXT) += wext-compat.o wext-sme.o +cfg80211-$(CONFIG_CFG80211_INTERNAL_REGDB) += regdb.o + +ccflags-y += -D__CHECK_ENDIAN__ + +$(obj)/regdb.c: $(src)/db.txt $(src)/genregdb.awk + @$(AWK) -f $(srctree)/$(src)/genregdb.awk < $< > $@ + +clean-files := regdb.c diff --git a/net/wireless/chan.c b/net/wireless/chan.c new file mode 100644 index 0000000..bf1737f --- /dev/null +++ b/net/wireless/chan.c @@ -0,0 +1,102 @@ +/* + * This file contains helper code to handle channel + * settings and keeping track of what is possible at + * any point in time. + * + * Copyright 2009 Johannes Berg + */ + +#include +#include "core.h" + +struct ieee80211_channel * +rdev_fixed_channel(struct cfg80211_registered_device *rdev, + struct wireless_dev *for_wdev) +{ + struct wireless_dev *wdev; + struct ieee80211_channel *result = NULL; + + WARN_ON(!mutex_is_locked(&rdev->devlist_mtx)); + + list_for_each_entry(wdev, &rdev->netdev_list, list) { + if (wdev == for_wdev) + continue; + + /* + * Lock manually to tell lockdep about allowed + * nesting here if for_wdev->mtx is held already. + * This is ok as it's all under the rdev devlist + * mutex and as such can only be done once at any + * given time. + */ + mutex_lock_nested(&wdev->mtx, SINGLE_DEPTH_NESTING); + if (wdev->current_bss) + result = wdev->current_bss->pub.channel; + wdev_unlock(wdev); + + if (result) + break; + } + + return result; +} + +struct ieee80211_channel * +rdev_freq_to_chan(struct cfg80211_registered_device *rdev, + int freq, enum nl80211_channel_type channel_type) +{ + struct ieee80211_channel *chan; + struct ieee80211_sta_ht_cap *ht_cap; + + chan = ieee80211_get_channel(&rdev->wiphy, freq); + + /* Primary channel not allowed */ + if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) + return NULL; + + if (channel_type == NL80211_CHAN_HT40MINUS && + chan->flags & IEEE80211_CHAN_NO_HT40MINUS) + return NULL; + else if (channel_type == NL80211_CHAN_HT40PLUS && + chan->flags & IEEE80211_CHAN_NO_HT40PLUS) + return NULL; + + ht_cap = &rdev->wiphy.bands[chan->band]->ht_cap; + + if (channel_type != NL80211_CHAN_NO_HT) { + if (!ht_cap->ht_supported) + return NULL; + + if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) || + ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT) + return NULL; + } + + return chan; +} + +int rdev_set_freq(struct cfg80211_registered_device *rdev, + struct wireless_dev *for_wdev, + int freq, enum nl80211_channel_type channel_type) +{ + struct ieee80211_channel *chan; + int result; + + if (rdev_fixed_channel(rdev, for_wdev)) + return -EBUSY; + + if (!rdev->ops->set_channel) + return -EOPNOTSUPP; + + chan = rdev_freq_to_chan(rdev, freq, channel_type); + if (!chan) + return -EINVAL; + + result = rdev->ops->set_channel(&rdev->wiphy, chan, channel_type); + if (result) + return result; + + rdev->channel = chan; + + return 0; +} diff --git a/net/wireless/core.c b/net/wireless/core.c new file mode 100644 index 0000000..6d9a358 --- /dev/null +++ b/net/wireless/core.c @@ -0,0 +1,925 @@ +/* + * This is the linux wireless configuration interface. + * + * Copyright 2006-2010 Johannes Berg + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "nl80211.h" +#include "core.h" +#include "sysfs.h" +#include "debugfs.h" +#include "wext-compat.h" +#include "ethtool.h" + +/* name for sysfs, %d is appended */ +#define PHY_NAME "phy" + +MODULE_AUTHOR("Johannes Berg"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("wireless configuration support"); + +/* RCU-protected (and cfg80211_mutex for writers) */ +LIST_HEAD(cfg80211_rdev_list); +int cfg80211_rdev_list_generation; + +DEFINE_MUTEX(cfg80211_mutex); + +/* for debugfs */ +static struct dentry *ieee80211_debugfs_dir; + +/* for the cleanup, scan and event works */ +struct workqueue_struct *cfg80211_wq; + +/* requires cfg80211_mutex to be held! */ +struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx) +{ + struct cfg80211_registered_device *result = NULL, *rdev; + + if (!wiphy_idx_valid(wiphy_idx)) + return NULL; + + assert_cfg80211_lock(); + + list_for_each_entry(rdev, &cfg80211_rdev_list, list) { + if (rdev->wiphy_idx == wiphy_idx) { + result = rdev; + break; + } + } + + return result; +} + +int get_wiphy_idx(struct wiphy *wiphy) +{ + struct cfg80211_registered_device *rdev; + if (!wiphy) + return WIPHY_IDX_STALE; + rdev = wiphy_to_dev(wiphy); + return rdev->wiphy_idx; +} + +/* requires cfg80211_rdev_mutex to be held! */ +struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx) +{ + struct cfg80211_registered_device *rdev; + + if (!wiphy_idx_valid(wiphy_idx)) + return NULL; + + assert_cfg80211_lock(); + + rdev = cfg80211_rdev_by_wiphy_idx(wiphy_idx); + if (!rdev) + return NULL; + return &rdev->wiphy; +} + +/* requires cfg80211_mutex to be held! */ +struct cfg80211_registered_device * +__cfg80211_rdev_from_info(struct genl_info *info) +{ + int ifindex; + struct cfg80211_registered_device *bywiphyidx = NULL, *byifidx = NULL; + struct net_device *dev; + int err = -EINVAL; + + assert_cfg80211_lock(); + + if (info->attrs[NL80211_ATTR_WIPHY]) { + bywiphyidx = cfg80211_rdev_by_wiphy_idx( + nla_get_u32(info->attrs[NL80211_ATTR_WIPHY])); + err = -ENODEV; + } + + if (info->attrs[NL80211_ATTR_IFINDEX]) { + ifindex = nla_get_u32(info->attrs[NL80211_ATTR_IFINDEX]); + dev = dev_get_by_index(genl_info_net(info), ifindex); + if (dev) { + if (dev->ieee80211_ptr) + byifidx = + wiphy_to_dev(dev->ieee80211_ptr->wiphy); + dev_put(dev); + } + err = -ENODEV; + } + + if (bywiphyidx && byifidx) { + if (bywiphyidx != byifidx) + return ERR_PTR(-EINVAL); + else + return bywiphyidx; /* == byifidx */ + } + if (bywiphyidx) + return bywiphyidx; + + if (byifidx) + return byifidx; + + return ERR_PTR(err); +} + +struct cfg80211_registered_device * +cfg80211_get_dev_from_info(struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + + mutex_lock(&cfg80211_mutex); + rdev = __cfg80211_rdev_from_info(info); + + /* if it is not an error we grab the lock on + * it to assure it won't be going away while + * we operate on it */ + if (!IS_ERR(rdev)) + mutex_lock(&rdev->mtx); + + mutex_unlock(&cfg80211_mutex); + + return rdev; +} + +struct cfg80211_registered_device * +cfg80211_get_dev_from_ifindex(struct net *net, int ifindex) +{ + struct cfg80211_registered_device *rdev = ERR_PTR(-ENODEV); + struct net_device *dev; + + mutex_lock(&cfg80211_mutex); + dev = dev_get_by_index(net, ifindex); + if (!dev) + goto out; + if (dev->ieee80211_ptr) { + rdev = wiphy_to_dev(dev->ieee80211_ptr->wiphy); + mutex_lock(&rdev->mtx); + } else + rdev = ERR_PTR(-ENODEV); + dev_put(dev); + out: + mutex_unlock(&cfg80211_mutex); + return rdev; +} + +/* requires cfg80211_mutex to be held */ +int cfg80211_dev_rename(struct cfg80211_registered_device *rdev, + char *newname) +{ + struct cfg80211_registered_device *rdev2; + int wiphy_idx, taken = -1, result, digits; + + assert_cfg80211_lock(); + + /* prohibit calling the thing phy%d when %d is not its number */ + sscanf(newname, PHY_NAME "%d%n", &wiphy_idx, &taken); + if (taken == strlen(newname) && wiphy_idx != rdev->wiphy_idx) { + /* count number of places needed to print wiphy_idx */ + digits = 1; + while (wiphy_idx /= 10) + digits++; + /* + * deny the name if it is phy where is printed + * without leading zeroes. taken == strlen(newname) here + */ + if (taken == strlen(PHY_NAME) + digits) + return -EINVAL; + } + + + /* Ignore nop renames */ + if (strcmp(newname, dev_name(&rdev->wiphy.dev)) == 0) + return 0; + + /* Ensure another device does not already have this name. */ + list_for_each_entry(rdev2, &cfg80211_rdev_list, list) + if (strcmp(newname, dev_name(&rdev2->wiphy.dev)) == 0) + return -EINVAL; + + result = device_rename(&rdev->wiphy.dev, newname); + if (result) + return result; + + if (rdev->wiphy.debugfsdir && + !debugfs_rename(rdev->wiphy.debugfsdir->d_parent, + rdev->wiphy.debugfsdir, + rdev->wiphy.debugfsdir->d_parent, + newname)) + printk(KERN_ERR "cfg80211: failed to rename debugfs dir to %s!\n", + newname); + + nl80211_notify_dev_rename(rdev); + + return 0; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) +int cfg80211_switch_netns(struct cfg80211_registered_device *rdev, + struct net *net) +{ + struct wireless_dev *wdev; + int err = 0; + + if (!(rdev->wiphy.flags & WIPHY_FLAG_NETNS_OK)) + return -EOPNOTSUPP; + + list_for_each_entry(wdev, &rdev->netdev_list, list) { + wdev->netdev->features &= ~NETIF_F_NETNS_LOCAL; + err = dev_change_net_namespace(wdev->netdev, net, "wlan%d"); + if (err) + break; + wdev->netdev->features |= NETIF_F_NETNS_LOCAL; + } + + if (err) { + /* failed -- clean up to old netns */ + net = wiphy_net(&rdev->wiphy); + + list_for_each_entry_continue_reverse(wdev, &rdev->netdev_list, + list) { + wdev->netdev->features &= ~NETIF_F_NETNS_LOCAL; + err = dev_change_net_namespace(wdev->netdev, net, + "wlan%d"); + WARN_ON(err); + wdev->netdev->features |= NETIF_F_NETNS_LOCAL; + } + } + + wiphy_net_set(&rdev->wiphy, net); + + return err; +} +#endif + +static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data) +{ + struct cfg80211_registered_device *rdev = data; + + rdev->ops->rfkill_poll(&rdev->wiphy); +} + +static int cfg80211_rfkill_set_block(void *data, bool blocked) +{ + struct cfg80211_registered_device *rdev = data; + struct wireless_dev *wdev; + + if (!blocked) + return 0; + + rtnl_lock(); + mutex_lock(&rdev->devlist_mtx); + + list_for_each_entry(wdev, &rdev->netdev_list, list) + dev_close(wdev->netdev); + + mutex_unlock(&rdev->devlist_mtx); + rtnl_unlock(); + + return 0; +} + +static void cfg80211_rfkill_sync_work(struct work_struct *work) +{ + struct cfg80211_registered_device *rdev; + + rdev = container_of(work, struct cfg80211_registered_device, rfkill_sync); + cfg80211_rfkill_set_block(rdev, rfkill_blocked(rdev->rfkill)); +} + +static void cfg80211_event_work(struct work_struct *work) +{ + struct cfg80211_registered_device *rdev; + + rdev = container_of(work, struct cfg80211_registered_device, + event_work); + + rtnl_lock(); + cfg80211_lock_rdev(rdev); + + cfg80211_process_rdev_events(rdev); + cfg80211_unlock_rdev(rdev); + rtnl_unlock(); +} + +/* exported functions */ + +struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv) +{ + static int wiphy_counter; + + struct cfg80211_registered_device *rdev; + int alloc_size; + + WARN_ON(ops->add_key && (!ops->del_key || !ops->set_default_key)); + WARN_ON(ops->auth && (!ops->assoc || !ops->deauth || !ops->disassoc)); + WARN_ON(ops->connect && !ops->disconnect); + WARN_ON(ops->join_ibss && !ops->leave_ibss); + WARN_ON(ops->add_virtual_intf && !ops->del_virtual_intf); + WARN_ON(ops->add_station && !ops->del_station); + WARN_ON(ops->add_mpath && !ops->del_mpath); + + alloc_size = sizeof(*rdev) + sizeof_priv; + + rdev = kzalloc(alloc_size, GFP_KERNEL); + if (!rdev) + return NULL; + + rdev->ops = ops; + + mutex_lock(&cfg80211_mutex); + + rdev->wiphy_idx = wiphy_counter++; + + if (unlikely(!wiphy_idx_valid(rdev->wiphy_idx))) { + wiphy_counter--; + mutex_unlock(&cfg80211_mutex); + /* ugh, wrapped! */ + kfree(rdev); + return NULL; + } + + mutex_unlock(&cfg80211_mutex); + + /* give it a proper name */ + dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx); + + mutex_init(&rdev->mtx); + mutex_init(&rdev->devlist_mtx); + INIT_LIST_HEAD(&rdev->netdev_list); + spin_lock_init(&rdev->bss_lock); + INIT_LIST_HEAD(&rdev->bss_list); + INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done); + +#ifdef CONFIG_CFG80211_WEXT + rdev->wiphy.wext = &cfg80211_wext_handler; +#endif + + device_initialize(&rdev->wiphy.dev); + rdev->wiphy.dev.class = &ieee80211_class; + rdev->wiphy.dev.platform_data = rdev; + +#ifdef CONFIG_CFG80211_DEFAULT_PS + rdev->wiphy.flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) + wiphy_net_set(&rdev->wiphy, &init_net); +#endif + + rdev->rfkill_ops.set_block = cfg80211_rfkill_set_block; + rdev->rfkill = rfkill_alloc(dev_name(&rdev->wiphy.dev), + &rdev->wiphy.dev, RFKILL_TYPE_WLAN, + &rdev->rfkill_ops, rdev); + + if (!rdev->rfkill) { + kfree(rdev); + return NULL; + } + + INIT_WORK(&rdev->rfkill_sync, cfg80211_rfkill_sync_work); + INIT_WORK(&rdev->conn_work, cfg80211_conn_work); + INIT_WORK(&rdev->event_work, cfg80211_event_work); + + init_waitqueue_head(&rdev->dev_wait); + + /* + * Initialize wiphy parameters to IEEE 802.11 MIB default values. + * Fragmentation and RTS threshold are disabled by default with the + * special -1 value. + */ + rdev->wiphy.retry_short = 7; + rdev->wiphy.retry_long = 4; + rdev->wiphy.frag_threshold = (u32) -1; + rdev->wiphy.rts_threshold = (u32) -1; + rdev->wiphy.coverage_class = 0; + + return &rdev->wiphy; +} +EXPORT_SYMBOL(wiphy_new); + +int wiphy_register(struct wiphy *wiphy) +{ + struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + int res; + enum ieee80211_band band; + struct ieee80211_supported_band *sband; + bool have_band = false; + int i; + u16 ifmodes = wiphy->interface_modes; + + if (WARN_ON(wiphy->addresses && !wiphy->n_addresses)) + return -EINVAL; + + if (WARN_ON(wiphy->addresses && + !is_zero_ether_addr(wiphy->perm_addr) && + memcmp(wiphy->perm_addr, wiphy->addresses[0].addr, + ETH_ALEN))) + return -EINVAL; + + if (wiphy->addresses) + memcpy(wiphy->perm_addr, wiphy->addresses[0].addr, ETH_ALEN); + + /* sanity check ifmodes */ + WARN_ON(!ifmodes); + ifmodes &= ((1 << __NL80211_IFTYPE_AFTER_LAST) - 1) & ~1; + if (WARN_ON(ifmodes != wiphy->interface_modes)) + wiphy->interface_modes = ifmodes; + + /* sanity check supported bands/channels */ + for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + sband = wiphy->bands[band]; + if (!sband) + continue; + + sband->band = band; + + if (WARN_ON(!sband->n_channels || !sband->n_bitrates)) + return -EINVAL; + + /* + * Since we use a u32 for rate bitmaps in + * ieee80211_get_response_rate, we cannot + * have more than 32 legacy rates. + */ + if (WARN_ON(sband->n_bitrates > 32)) + return -EINVAL; + + for (i = 0; i < sband->n_channels; i++) { + sband->channels[i].orig_flags = + sband->channels[i].flags; + sband->channels[i].orig_mag = + sband->channels[i].max_antenna_gain; + sband->channels[i].orig_mpwr = + sband->channels[i].max_power; + sband->channels[i].band = band; + } + + have_band = true; + } + + if (!have_band) { + WARN_ON(1); + return -EINVAL; + } + + /* check and set up bitrates */ + ieee80211_set_bitrate_flags(wiphy); + + res = device_add(&rdev->wiphy.dev); + if (res) + return res; + + res = rfkill_register(rdev->rfkill); + if (res) + goto out_rm_dev; + + mutex_lock(&cfg80211_mutex); + + /* set up regulatory info */ + wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE); + + list_add_rcu(&rdev->list, &cfg80211_rdev_list); + cfg80211_rdev_list_generation++; + + mutex_unlock(&cfg80211_mutex); + + /* add to debugfs */ + rdev->wiphy.debugfsdir = + debugfs_create_dir(wiphy_name(&rdev->wiphy), + ieee80211_debugfs_dir); + if (IS_ERR(rdev->wiphy.debugfsdir)) + rdev->wiphy.debugfsdir = NULL; + + if (wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY) { + struct regulatory_request request; + + request.wiphy_idx = get_wiphy_idx(wiphy); + request.initiator = NL80211_REGDOM_SET_BY_DRIVER; + request.alpha2[0] = '9'; + request.alpha2[1] = '9'; + + nl80211_send_reg_change_event(&request); + } + + cfg80211_debugfs_rdev_add(rdev); + + return 0; + + out_rm_dev: + device_del(&rdev->wiphy.dev); + return res; +} +EXPORT_SYMBOL(wiphy_register); + +void wiphy_rfkill_start_polling(struct wiphy *wiphy) +{ + struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + + if (!rdev->ops->rfkill_poll) + return; + rdev->rfkill_ops.poll = cfg80211_rfkill_poll; + rfkill_resume_polling(rdev->rfkill); +} +EXPORT_SYMBOL(wiphy_rfkill_start_polling); + +void wiphy_rfkill_stop_polling(struct wiphy *wiphy) +{ + struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + + rfkill_pause_polling(rdev->rfkill); +} +EXPORT_SYMBOL(wiphy_rfkill_stop_polling); + +void wiphy_unregister(struct wiphy *wiphy) +{ + struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + + rfkill_unregister(rdev->rfkill); + + /* protect the device list */ + mutex_lock(&cfg80211_mutex); + + wait_event(rdev->dev_wait, ({ + int __count; + mutex_lock(&rdev->devlist_mtx); + __count = rdev->opencount; + mutex_unlock(&rdev->devlist_mtx); + __count == 0;})); + + mutex_lock(&rdev->devlist_mtx); + BUG_ON(!list_empty(&rdev->netdev_list)); + mutex_unlock(&rdev->devlist_mtx); + + /* + * First remove the hardware from everywhere, this makes + * it impossible to find from userspace. + */ + debugfs_remove_recursive(rdev->wiphy.debugfsdir); + list_del_rcu(&rdev->list); + synchronize_rcu(); + + /* + * Try to grab rdev->mtx. If a command is still in progress, + * hopefully the driver will refuse it since it's tearing + * down the device already. We wait for this command to complete + * before unlinking the item from the list. + * Note: as codified by the BUG_ON above we cannot get here if + * a virtual interface is still present. Hence, we can only get + * to lock contention here if userspace issues a command that + * identified the hardware by wiphy index. + */ + cfg80211_lock_rdev(rdev); + /* nothing */ + cfg80211_unlock_rdev(rdev); + + /* If this device got a regulatory hint tell core its + * free to listen now to a new shiny device regulatory hint */ + reg_device_remove(wiphy); + + cfg80211_rdev_list_generation++; + device_del(&rdev->wiphy.dev); + + mutex_unlock(&cfg80211_mutex); + + flush_work(&rdev->scan_done_wk); + cancel_work_sync(&rdev->conn_work); + flush_work(&rdev->event_work); +} +EXPORT_SYMBOL(wiphy_unregister); + +void cfg80211_dev_free(struct cfg80211_registered_device *rdev) +{ + struct cfg80211_internal_bss *scan, *tmp; + rfkill_destroy(rdev->rfkill); + mutex_destroy(&rdev->mtx); + mutex_destroy(&rdev->devlist_mtx); + list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list) + cfg80211_put_bss(&scan->pub); + kfree(rdev); +} + +void wiphy_free(struct wiphy *wiphy) +{ + put_device(&wiphy->dev); +} +EXPORT_SYMBOL(wiphy_free); + +void wiphy_rfkill_set_hw_state(struct wiphy *wiphy, bool blocked) +{ + struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + + if (rfkill_set_hw_state(rdev->rfkill, blocked)) + schedule_work(&rdev->rfkill_sync); +} +EXPORT_SYMBOL(wiphy_rfkill_set_hw_state); + +static void wdev_cleanup_work(struct work_struct *work) +{ + struct wireless_dev *wdev; + struct cfg80211_registered_device *rdev; + + wdev = container_of(work, struct wireless_dev, cleanup_work); + rdev = wiphy_to_dev(wdev->wiphy); + + cfg80211_lock_rdev(rdev); + + if (WARN_ON(rdev->scan_req && rdev->scan_req->dev == wdev->netdev)) { + rdev->scan_req->aborted = true; + ___cfg80211_scan_done(rdev, true); + } + + cfg80211_unlock_rdev(rdev); + + mutex_lock(&rdev->devlist_mtx); + rdev->opencount--; + mutex_unlock(&rdev->devlist_mtx); + wake_up(&rdev->dev_wait); + + dev_put(wdev->netdev); +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) +static struct device_type wiphy_type = { + .name = "wlan", +}; +#endif + +static int cfg80211_netdev_notifier_call(struct notifier_block * nb, + unsigned long state, + void *ndev) +{ + struct net_device *dev = ndev; + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev; + + if (!wdev) + return NOTIFY_DONE; + + rdev = wiphy_to_dev(wdev->wiphy); + + WARN_ON(wdev->iftype == NL80211_IFTYPE_UNSPECIFIED); + + switch (state) { + case NETDEV_POST_INIT: + SET_NETDEV_DEVTYPE(dev, &wiphy_type); + break; + case NETDEV_REGISTER: + /* + * NB: cannot take rdev->mtx here because this may be + * called within code protected by it when interfaces + * are added with nl80211. + */ + mutex_init(&wdev->mtx); + INIT_WORK(&wdev->cleanup_work, wdev_cleanup_work); + INIT_LIST_HEAD(&wdev->event_list); + spin_lock_init(&wdev->event_lock); + INIT_LIST_HEAD(&wdev->action_registrations); + spin_lock_init(&wdev->action_registrations_lock); + + mutex_lock(&rdev->devlist_mtx); + list_add_rcu(&wdev->list, &rdev->netdev_list); + rdev->devlist_generation++; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) + /* can only change netns with wiphy */ + dev->features |= NETIF_F_NETNS_LOCAL; +#endif + + if (sysfs_create_link(&dev->dev.kobj, &rdev->wiphy.dev.kobj, + "phy80211")) { + printk(KERN_ERR "wireless: failed to add phy80211 " + "symlink to netdev!\n"); + } + wdev->netdev = dev; + wdev->sme_state = CFG80211_SME_IDLE; + mutex_unlock(&rdev->devlist_mtx); +#ifdef CONFIG_CFG80211_WEXT +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,32)) + if (!dev->wireless_handlers) + dev->wireless_handlers = &cfg80211_wext_handler; +#endif + wdev->wext.default_key = -1; + wdev->wext.default_mgmt_key = -1; + wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; +#endif + + if (wdev->wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT) + wdev->ps = true; + else + wdev->ps = false; + wdev->ps_timeout = 100; + if (rdev->ops->set_power_mgmt) + if (rdev->ops->set_power_mgmt(wdev->wiphy, dev, + wdev->ps, + wdev->ps_timeout)) { + /* assume this means it's off */ + wdev->ps = false; + } + + if (!dev->ethtool_ops) + dev->ethtool_ops = &cfg80211_ethtool_ops; + + if ((wdev->iftype == NL80211_IFTYPE_STATION || + wdev->iftype == NL80211_IFTYPE_ADHOC) && !wdev->use_4addr) + dev->priv_flags |= IFF_DONT_BRIDGE; + break; + case NETDEV_GOING_DOWN: + switch (wdev->iftype) { + case NL80211_IFTYPE_ADHOC: + cfg80211_leave_ibss(rdev, dev, true); + break; + case NL80211_IFTYPE_STATION: + wdev_lock(wdev); +#ifdef CONFIG_CFG80211_WEXT + kfree(wdev->wext.ie); + wdev->wext.ie = NULL; + wdev->wext.ie_len = 0; + wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; +#endif + __cfg80211_disconnect(rdev, dev, + WLAN_REASON_DEAUTH_LEAVING, true); + cfg80211_mlme_down(rdev, dev); + wdev_unlock(wdev); + break; + default: + break; + } + break; + case NETDEV_DOWN: + dev_hold(dev); + queue_work(cfg80211_wq, &wdev->cleanup_work); + break; + case NETDEV_UP: + /* + * If we have a really quick DOWN/UP succession we may + * have this work still pending ... cancel it and see + * if it was pending, in which case we need to account + * for some of the work it would have done. + */ + if (cancel_work_sync(&wdev->cleanup_work)) { + mutex_lock(&rdev->devlist_mtx); + rdev->opencount--; + mutex_unlock(&rdev->devlist_mtx); + dev_put(dev); + } + cfg80211_lock_rdev(rdev); + mutex_lock(&rdev->devlist_mtx); +#ifdef CONFIG_CFG80211_WEXT + wdev_lock(wdev); + switch (wdev->iftype) { + case NL80211_IFTYPE_ADHOC: + cfg80211_ibss_wext_join(rdev, wdev); + break; + case NL80211_IFTYPE_STATION: + cfg80211_mgd_wext_connect(rdev, wdev); + break; + default: + break; + } + wdev_unlock(wdev); +#endif + rdev->opencount++; + mutex_unlock(&rdev->devlist_mtx); + cfg80211_unlock_rdev(rdev); + break; + case NETDEV_UNREGISTER: + /* + * NB: cannot take rdev->mtx here because this may be + * called within code protected by it when interfaces + * are removed with nl80211. + */ + mutex_lock(&rdev->devlist_mtx); + /* + * It is possible to get NETDEV_UNREGISTER + * multiple times. To detect that, check + * that the interface is still on the list + * of registered interfaces, and only then + * remove and clean it up. + */ + if (!list_empty(&wdev->list)) { + sysfs_remove_link(&dev->dev.kobj, "phy80211"); + list_del_rcu(&wdev->list); + rdev->devlist_generation++; + cfg80211_mlme_purge_actions(wdev); +#ifdef CONFIG_CFG80211_WEXT + kfree(wdev->wext.keys); +#endif + } + mutex_unlock(&rdev->devlist_mtx); + /* + * synchronise (so that we won't find this netdev + * from other code any more) and then clear the list + * head so that the above code can safely check for + * !list_empty() to avoid double-cleanup. + */ + synchronize_rcu(); + INIT_LIST_HEAD(&wdev->list); + break; + case NETDEV_PRE_UP: + if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype))) + return notifier_from_errno(-EOPNOTSUPP); + if (rfkill_blocked(rdev->rfkill)) + return notifier_from_errno(-ERFKILL); + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block cfg80211_netdev_notifier = { + .notifier_call = cfg80211_netdev_notifier_call, +}; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) +static void __net_exit cfg80211_pernet_exit(struct net *net) +{ + struct cfg80211_registered_device *rdev; + + rtnl_lock(); + mutex_lock(&cfg80211_mutex); + list_for_each_entry(rdev, &cfg80211_rdev_list, list) { + if (net_eq(wiphy_net(&rdev->wiphy), net)) + WARN_ON(cfg80211_switch_netns(rdev, &init_net)); + } + mutex_unlock(&cfg80211_mutex); + rtnl_unlock(); +} + +static struct pernet_operations cfg80211_pernet_ops = { + .exit = cfg80211_pernet_exit, +}; +#endif + +static int __init cfg80211_init(void) +{ + int err; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) + err = register_pernet_device(&cfg80211_pernet_ops); + if (err) + goto out_fail_pernet; +#endif + + err = wiphy_sysfs_init(); + if (err) + goto out_fail_sysfs; + + err = register_netdevice_notifier(&cfg80211_netdev_notifier); + if (err) + goto out_fail_notifier; + + err = nl80211_init(); + if (err) + goto out_fail_nl80211; + + ieee80211_debugfs_dir = debugfs_create_dir("ieee80211", NULL); + + err = regulatory_init(); + if (err) + goto out_fail_reg; + + cfg80211_wq = create_singlethread_workqueue("cfg80211"); + if (!cfg80211_wq) + goto out_fail_wq; + + return 0; + +out_fail_wq: + regulatory_exit(); +out_fail_reg: + debugfs_remove(ieee80211_debugfs_dir); +out_fail_nl80211: + unregister_netdevice_notifier(&cfg80211_netdev_notifier); +out_fail_notifier: + wiphy_sysfs_exit(); +out_fail_sysfs: +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) + unregister_pernet_device(&cfg80211_pernet_ops); +out_fail_pernet: +#endif + return err; +} +subsys_initcall(cfg80211_init); + +static void cfg80211_exit(void) +{ + debugfs_remove(ieee80211_debugfs_dir); + nl80211_exit(); + unregister_netdevice_notifier(&cfg80211_netdev_notifier); + wiphy_sysfs_exit(); + regulatory_exit(); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) + unregister_pernet_device(&cfg80211_pernet_ops); +#endif + destroy_workqueue(cfg80211_wq); +} +module_exit(cfg80211_exit); diff --git a/net/wireless/core.h b/net/wireless/core.h new file mode 100644 index 0000000..ec99f1e --- /dev/null +++ b/net/wireless/core.h @@ -0,0 +1,412 @@ +/* + * Wireless configuration interface internals. + * + * Copyright 2006-2010 Johannes Berg + */ +#ifndef __NET_WIRELESS_CORE_H +#define __NET_WIRELESS_CORE_H +#include +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)) +#include +#else +#include +#endif +#include +#include +#include +#include "reg.h" + +struct cfg80211_registered_device { + const struct cfg80211_ops *ops; + struct list_head list; + /* we hold this mutex during any call so that + * we cannot do multiple calls at once, and also + * to avoid the deregister call to proceed while + * any call is in progress */ + struct mutex mtx; + + /* rfkill support */ + struct rfkill_ops rfkill_ops; + struct rfkill *rfkill; + struct work_struct rfkill_sync; + + /* ISO / IEC 3166 alpha2 for which this device is receiving + * country IEs on, this can help disregard country IEs from APs + * on the same alpha2 quickly. The alpha2 may differ from + * cfg80211_regdomain's alpha2 when an intersection has occurred. + * If the AP is reconfigured this can also be used to tell us if + * the country on the country IE changed. */ + char country_ie_alpha2[2]; + + /* If a Country IE has been received this tells us the environment + * which its telling us its in. This defaults to ENVIRON_ANY */ + enum environment_cap env; + + /* wiphy index, internal only */ + int wiphy_idx; + + /* associate netdev list */ + struct mutex devlist_mtx; + /* protected by devlist_mtx or RCU */ + struct list_head netdev_list; + int devlist_generation; + int opencount; /* also protected by devlist_mtx */ + wait_queue_head_t dev_wait; + + /* BSSes/scanning */ + spinlock_t bss_lock; + struct list_head bss_list; + struct rb_root bss_tree; + u32 bss_generation; + struct cfg80211_scan_request *scan_req; /* protected by RTNL */ + unsigned long suspend_at; + struct work_struct scan_done_wk; + +#ifdef CONFIG_NL80211_TESTMODE + struct genl_info *testmode_info; +#endif + + struct work_struct conn_work; + struct work_struct event_work; + + /* current channel */ + struct ieee80211_channel *channel; + + /* must be last because of the way we do wiphy_priv(), + * and it should at least be aligned to NETDEV_ALIGN */ + struct wiphy wiphy __attribute__((__aligned__(NETDEV_ALIGN))); +}; + +static inline +struct cfg80211_registered_device *wiphy_to_dev(struct wiphy *wiphy) +{ + BUG_ON(!wiphy); + return container_of(wiphy, struct cfg80211_registered_device, wiphy); +} + +/* Note 0 is valid, hence phy0 */ +static inline +bool wiphy_idx_valid(int wiphy_idx) +{ + return (wiphy_idx >= 0); +} + + +extern struct workqueue_struct *cfg80211_wq; +extern struct mutex cfg80211_mutex; +extern struct list_head cfg80211_rdev_list; +extern int cfg80211_rdev_list_generation; + +#define assert_cfg80211_lock() WARN_ON(!mutex_is_locked(&cfg80211_mutex)) + +/* + * You can use this to mark a wiphy_idx as not having an associated wiphy. + * It guarantees cfg80211_rdev_by_wiphy_idx(wiphy_idx) will return NULL + */ +#define WIPHY_IDX_STALE -1 + +struct cfg80211_internal_bss { + struct list_head list; + struct rb_node rbn; + unsigned long ts; + struct kref ref; + atomic_t hold; + bool beacon_ies_allocated; + bool proberesp_ies_allocated; + + /* must be last because of priv member */ + struct cfg80211_bss pub; +}; + +static inline struct cfg80211_internal_bss *bss_from_pub(struct cfg80211_bss *pub) +{ + return container_of(pub, struct cfg80211_internal_bss, pub); +} + +static inline void cfg80211_ref_bss(struct cfg80211_internal_bss *bss) +{ + kref_get(&bss->ref); +} + +static inline void cfg80211_hold_bss(struct cfg80211_internal_bss *bss) +{ + atomic_inc(&bss->hold); +} + +static inline void cfg80211_unhold_bss(struct cfg80211_internal_bss *bss) +{ + int r = atomic_dec_return(&bss->hold); + WARN_ON(r < 0); +} + + +struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx); +int get_wiphy_idx(struct wiphy *wiphy); + +struct cfg80211_registered_device * +__cfg80211_rdev_from_info(struct genl_info *info); + +/* + * This function returns a pointer to the driver + * that the genl_info item that is passed refers to. + * If successful, it returns non-NULL and also locks + * the driver's mutex! + * + * This means that you need to call cfg80211_unlock_rdev() + * before being allowed to acquire &cfg80211_mutex! + * + * This is necessary because we need to lock the global + * mutex to get an item off the list safely, and then + * we lock the rdev mutex so it doesn't go away under us. + * + * We don't want to keep cfg80211_mutex locked + * for all the time in order to allow requests on + * other interfaces to go through at the same time. + * + * The result of this can be a PTR_ERR and hence must + * be checked with IS_ERR() for errors. + */ +extern struct cfg80211_registered_device * +cfg80211_get_dev_from_info(struct genl_info *info); + +/* requires cfg80211_rdev_mutex to be held! */ +struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx); + +/* identical to cfg80211_get_dev_from_info but only operate on ifindex */ +extern struct cfg80211_registered_device * +cfg80211_get_dev_from_ifindex(struct net *net, int ifindex); + +int cfg80211_switch_netns(struct cfg80211_registered_device *rdev, + struct net *net); + +static inline void cfg80211_lock_rdev(struct cfg80211_registered_device *rdev) +{ + mutex_lock(&rdev->mtx); +} + +static inline void cfg80211_unlock_rdev(struct cfg80211_registered_device *rdev) +{ + BUG_ON(IS_ERR(rdev) || !rdev); + mutex_unlock(&rdev->mtx); +} + +static inline void wdev_lock(struct wireless_dev *wdev) + __acquires(wdev) +{ + mutex_lock(&wdev->mtx); + __acquire(wdev->mtx); +} + +static inline void wdev_unlock(struct wireless_dev *wdev) + __releases(wdev) +{ + __release(wdev->mtx); + mutex_unlock(&wdev->mtx); +} + +#define ASSERT_RDEV_LOCK(rdev) WARN_ON(!mutex_is_locked(&(rdev)->mtx)); +#define ASSERT_WDEV_LOCK(wdev) WARN_ON(!mutex_is_locked(&(wdev)->mtx)); + +enum cfg80211_event_type { + EVENT_CONNECT_RESULT, + EVENT_ROAMED, + EVENT_DISCONNECTED, + EVENT_IBSS_JOINED, +}; + +struct cfg80211_event { + struct list_head list; + enum cfg80211_event_type type; + + union { + struct { + u8 bssid[ETH_ALEN]; + const u8 *req_ie; + const u8 *resp_ie; + size_t req_ie_len; + size_t resp_ie_len; + u16 status; + } cr; + struct { + u8 bssid[ETH_ALEN]; + const u8 *req_ie; + const u8 *resp_ie; + size_t req_ie_len; + size_t resp_ie_len; + } rm; + struct { + const u8 *ie; + size_t ie_len; + u16 reason; + } dc; + struct { + u8 bssid[ETH_ALEN]; + } ij; + }; +}; + +struct cfg80211_cached_keys { + struct key_params params[6]; + u8 data[6][WLAN_MAX_KEY_LEN]; + int def, defmgmt; +}; + + +/* free object */ +extern void cfg80211_dev_free(struct cfg80211_registered_device *rdev); + +extern int cfg80211_dev_rename(struct cfg80211_registered_device *rdev, + char *newname); + +void ieee80211_set_bitrate_flags(struct wiphy *wiphy); +void wiphy_update_regulatory(struct wiphy *wiphy, + enum nl80211_reg_initiator setby); + +void cfg80211_bss_expire(struct cfg80211_registered_device *dev); +void cfg80211_bss_age(struct cfg80211_registered_device *dev, + unsigned long age_secs); + +/* IBSS */ +int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev, + struct net_device *dev, + struct cfg80211_ibss_params *params, + struct cfg80211_cached_keys *connkeys); +int cfg80211_join_ibss(struct cfg80211_registered_device *rdev, + struct net_device *dev, + struct cfg80211_ibss_params *params, + struct cfg80211_cached_keys *connkeys); +void cfg80211_clear_ibss(struct net_device *dev, bool nowext); +int __cfg80211_leave_ibss(struct cfg80211_registered_device *rdev, + struct net_device *dev, bool nowext); +int cfg80211_leave_ibss(struct cfg80211_registered_device *rdev, + struct net_device *dev, bool nowext); +void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid); +int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev, + struct wireless_dev *wdev); + +/* MLME */ +int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, + struct net_device *dev, + struct ieee80211_channel *chan, + enum nl80211_auth_type auth_type, + const u8 *bssid, + const u8 *ssid, int ssid_len, + const u8 *ie, int ie_len, + const u8 *key, int key_len, int key_idx); +int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, + struct net_device *dev, struct ieee80211_channel *chan, + enum nl80211_auth_type auth_type, const u8 *bssid, + const u8 *ssid, int ssid_len, + const u8 *ie, int ie_len, + const u8 *key, int key_len, int key_idx); +int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, + struct net_device *dev, + struct ieee80211_channel *chan, + const u8 *bssid, const u8 *prev_bssid, + const u8 *ssid, int ssid_len, + const u8 *ie, int ie_len, bool use_mfp, + struct cfg80211_crypto_settings *crypt); +int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, + struct net_device *dev, struct ieee80211_channel *chan, + const u8 *bssid, const u8 *prev_bssid, + const u8 *ssid, int ssid_len, + const u8 *ie, int ie_len, bool use_mfp, + struct cfg80211_crypto_settings *crypt); +int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, + struct net_device *dev, const u8 *bssid, + const u8 *ie, int ie_len, u16 reason); +int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, + struct net_device *dev, const u8 *bssid, + const u8 *ie, int ie_len, u16 reason); +int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, + struct net_device *dev, const u8 *bssid, + const u8 *ie, int ie_len, u16 reason); +void cfg80211_mlme_down(struct cfg80211_registered_device *rdev, + struct net_device *dev); +void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, + const u8 *req_ie, size_t req_ie_len, + const u8 *resp_ie, size_t resp_ie_len, + u16 status, bool wextev, + struct cfg80211_bss *bss); +int cfg80211_mlme_register_action(struct wireless_dev *wdev, u32 snd_pid, + const u8 *match_data, int match_len); +void cfg80211_mlme_unregister_actions(struct wireless_dev *wdev, u32 nlpid); +void cfg80211_mlme_purge_actions(struct wireless_dev *wdev); +int cfg80211_mlme_action(struct cfg80211_registered_device *rdev, + struct net_device *dev, + struct ieee80211_channel *chan, + enum nl80211_channel_type channel_type, + const u8 *buf, size_t len, u64 *cookie); + +/* SME */ +int __cfg80211_connect(struct cfg80211_registered_device *rdev, + struct net_device *dev, + struct cfg80211_connect_params *connect, + struct cfg80211_cached_keys *connkeys, + const u8 *prev_bssid); +int cfg80211_connect(struct cfg80211_registered_device *rdev, + struct net_device *dev, + struct cfg80211_connect_params *connect, + struct cfg80211_cached_keys *connkeys); +int __cfg80211_disconnect(struct cfg80211_registered_device *rdev, + struct net_device *dev, u16 reason, + bool wextev); +int cfg80211_disconnect(struct cfg80211_registered_device *rdev, + struct net_device *dev, u16 reason, + bool wextev); +void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid, + const u8 *req_ie, size_t req_ie_len, + const u8 *resp_ie, size_t resp_ie_len); +int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev, + struct wireless_dev *wdev); + +void cfg80211_conn_work(struct work_struct *work); +void cfg80211_sme_failed_assoc(struct wireless_dev *wdev); +bool cfg80211_sme_failed_reassoc(struct wireless_dev *wdev); + +/* internal helpers */ +int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev, + struct key_params *params, int key_idx, + const u8 *mac_addr); +void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, + size_t ie_len, u16 reason, bool from_ap); +void cfg80211_sme_scan_done(struct net_device *dev); +void cfg80211_sme_rx_auth(struct net_device *dev, const u8 *buf, size_t len); +void cfg80211_sme_disassoc(struct net_device *dev, int idx); +void __cfg80211_scan_done(struct work_struct *wk); +void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak); +void cfg80211_upload_connect_keys(struct wireless_dev *wdev); +int cfg80211_change_iface(struct cfg80211_registered_device *rdev, + struct net_device *dev, enum nl80211_iftype ntype, + u32 *flags, struct vif_params *params); +void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev); + +struct ieee80211_channel * +rdev_fixed_channel(struct cfg80211_registered_device *rdev, + struct wireless_dev *for_wdev); +struct ieee80211_channel * +rdev_freq_to_chan(struct cfg80211_registered_device *rdev, + int freq, enum nl80211_channel_type channel_type); +int rdev_set_freq(struct cfg80211_registered_device *rdev, + struct wireless_dev *for_wdev, + int freq, enum nl80211_channel_type channel_type); + +u16 cfg80211_calculate_bitrate(struct rate_info *rate); + +#ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS +#define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond) +#else +/* + * Trick to enable using it as a condition, + * and also not give a warning when it's + * not used that way. + */ +#define CFG80211_DEV_WARN_ON(cond) ({bool __r = (cond); __r; }) +#endif + +#endif /* __NET_WIRELESS_CORE_H */ diff --git a/net/wireless/debugfs.c b/net/wireless/debugfs.c new file mode 100644 index 0000000..2e48956 --- /dev/null +++ b/net/wireless/debugfs.c @@ -0,0 +1,118 @@ +/* + * cfg80211 debugfs + * + * Copyright 2009 Luis R. Rodriguez + * Copyright 2007 Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include "core.h" +#include "debugfs.h" + +static int cfg80211_open_file_generic(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +#define DEBUGFS_READONLY_FILE(name, buflen, fmt, value...) \ +static ssize_t name## _read(struct file *file, char __user *userbuf, \ + size_t count, loff_t *ppos) \ +{ \ + struct wiphy *wiphy= file->private_data; \ + char buf[buflen]; \ + int res; \ + \ + res = scnprintf(buf, buflen, fmt "\n", ##value); \ + return simple_read_from_buffer(userbuf, count, ppos, buf, res); \ +} \ + \ +static const struct file_operations name## _ops = { \ + .read = name## _read, \ + .open = cfg80211_open_file_generic, \ +}; + +DEBUGFS_READONLY_FILE(rts_threshold, 20, "%d", + wiphy->rts_threshold) +DEBUGFS_READONLY_FILE(fragmentation_threshold, 20, "%d", + wiphy->frag_threshold); +DEBUGFS_READONLY_FILE(short_retry_limit, 20, "%d", + wiphy->retry_short) +DEBUGFS_READONLY_FILE(long_retry_limit, 20, "%d", + wiphy->retry_long); + +static int ht_print_chan(struct ieee80211_channel *chan, + char *buf, int buf_size, int offset) +{ + if (WARN_ON(offset > buf_size)) + return 0; + + if (chan->flags & IEEE80211_CHAN_DISABLED) + return snprintf(buf + offset, + buf_size - offset, + "%d Disabled\n", + chan->center_freq); + + return snprintf(buf + offset, + buf_size - offset, + "%d HT40 %c%c\n", + chan->center_freq, + (chan->flags & IEEE80211_CHAN_NO_HT40MINUS) ? ' ' : '-', + (chan->flags & IEEE80211_CHAN_NO_HT40PLUS) ? ' ' : '+'); +} + +static ssize_t ht40allow_map_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wiphy *wiphy = file->private_data; + char *buf; + unsigned int offset = 0, buf_size = PAGE_SIZE, i, r; + enum ieee80211_band band; + struct ieee80211_supported_band *sband; + + buf = kzalloc(buf_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + mutex_lock(&cfg80211_mutex); + + for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + sband = wiphy->bands[band]; + if (!sband) + continue; + for (i = 0; i < sband->n_channels; i++) + offset += ht_print_chan(&sband->channels[i], + buf, buf_size, offset); + } + + mutex_unlock(&cfg80211_mutex); + + r = simple_read_from_buffer(user_buf, count, ppos, buf, offset); + + kfree(buf); + + return r; +} + +static const struct file_operations ht40allow_map_ops = { + .read = ht40allow_map_read, + .open = cfg80211_open_file_generic, +}; + +#define DEBUGFS_ADD(name) \ + debugfs_create_file(#name, S_IRUGO, phyd, &rdev->wiphy, &name## _ops); + +void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev) +{ + struct dentry *phyd = rdev->wiphy.debugfsdir; + + DEBUGFS_ADD(rts_threshold); + DEBUGFS_ADD(fragmentation_threshold); + DEBUGFS_ADD(short_retry_limit); + DEBUGFS_ADD(long_retry_limit); + DEBUGFS_ADD(ht40allow_map); +} diff --git a/net/wireless/debugfs.h b/net/wireless/debugfs.h new file mode 100644 index 0000000..74fdd38 --- /dev/null +++ b/net/wireless/debugfs.h @@ -0,0 +1,11 @@ +#ifndef __CFG80211_DEBUGFS_H +#define __CFG80211_DEBUGFS_H + +#ifdef CONFIG_CFG80211_DEBUGFS +void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev); +#else +static inline +void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev) {} +#endif + +#endif /* __CFG80211_DEBUGFS_H */ diff --git a/net/wireless/ethtool.c b/net/wireless/ethtool.c new file mode 100644 index 0000000..ca4c825 --- /dev/null +++ b/net/wireless/ethtool.c @@ -0,0 +1,45 @@ +#include +#include +#include "ethtool.h" + +static void cfg80211_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + + strlcpy(info->driver, wiphy_dev(wdev->wiphy)->driver->name, + sizeof(info->driver)); + + strlcpy(info->version, init_utsname()->release, sizeof(info->version)); + + if (wdev->wiphy->fw_version[0]) + strncpy(info->fw_version, wdev->wiphy->fw_version, + sizeof(info->fw_version)); + else + strncpy(info->fw_version, "N/A", sizeof(info->fw_version)); + + strlcpy(info->bus_info, dev_name(wiphy_dev(wdev->wiphy)), + sizeof(info->bus_info)); +} + +static int cfg80211_get_regs_len(struct net_device *dev) +{ + /* For now, return 0... */ + return 0; +} + +static void cfg80211_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *data) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + + regs->version = wdev->wiphy->hw_version; + regs->len = 0; +} + +const struct ethtool_ops cfg80211_ethtool_ops = { + .get_drvinfo = cfg80211_get_drvinfo, + .get_regs_len = cfg80211_get_regs_len, + .get_regs = cfg80211_get_regs, + .get_link = ethtool_op_get_link, +}; diff --git a/net/wireless/ethtool.h b/net/wireless/ethtool.h new file mode 100644 index 0000000..695ecad --- /dev/null +++ b/net/wireless/ethtool.h @@ -0,0 +1,6 @@ +#ifndef __CFG80211_ETHTOOL__ +#define __CFG80211_ETHTOOL__ + +extern const struct ethtool_ops cfg80211_ethtool_ops; + +#endif /* __CFG80211_ETHTOOL__ */ diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c new file mode 100644 index 0000000..6ef5a49 --- /dev/null +++ b/net/wireless/ibss.c @@ -0,0 +1,509 @@ +/* + * Some IBSS support code for cfg80211. + * + * Copyright 2009 Johannes Berg + */ + +#include +#include +#include +#include "wext-compat.h" +#include "nl80211.h" + + +void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_bss *bss; +#ifdef CONFIG_CFG80211_WEXT + union iwreq_data wrqu; +#endif + + if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) + return; + + if (!wdev->ssid_len) + return; + + bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid, + wdev->ssid, wdev->ssid_len, + WLAN_CAPABILITY_IBSS, WLAN_CAPABILITY_IBSS); + + if (WARN_ON(!bss)) + return; + + if (wdev->current_bss) { + cfg80211_unhold_bss(wdev->current_bss); + cfg80211_put_bss(&wdev->current_bss->pub); + } + + cfg80211_hold_bss(bss_from_pub(bss)); + wdev->current_bss = bss_from_pub(bss); + + cfg80211_upload_connect_keys(wdev); + + nl80211_send_ibss_bssid(wiphy_to_dev(wdev->wiphy), dev, bssid, + GFP_KERNEL); +#ifdef CONFIG_CFG80211_WEXT + memset(&wrqu, 0, sizeof(wrqu)); + memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN); + wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); +#endif +} + +void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + struct cfg80211_event *ev; + unsigned long flags; + + CFG80211_DEV_WARN_ON(!wdev->ssid_len); + + ev = kzalloc(sizeof(*ev), gfp); + if (!ev) + return; + + ev->type = EVENT_IBSS_JOINED; + memcpy(ev->cr.bssid, bssid, ETH_ALEN); + + spin_lock_irqsave(&wdev->event_lock, flags); + list_add_tail(&ev->list, &wdev->event_list); + spin_unlock_irqrestore(&wdev->event_lock, flags); + queue_work(cfg80211_wq, &rdev->event_work); +} +EXPORT_SYMBOL(cfg80211_ibss_joined); + +int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev, + struct net_device *dev, + struct cfg80211_ibss_params *params, + struct cfg80211_cached_keys *connkeys) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct ieee80211_channel *chan; + int err; + + ASSERT_WDEV_LOCK(wdev); + + chan = rdev_fixed_channel(rdev, wdev); + if (chan && chan != params->channel) + return -EBUSY; + + if (wdev->ssid_len) + return -EALREADY; + + if (WARN_ON(wdev->connect_keys)) + kfree(wdev->connect_keys); + wdev->connect_keys = connkeys; + +#ifdef CONFIG_CFG80211_WEXT + wdev->wext.ibss.channel = params->channel; +#endif + err = rdev->ops->join_ibss(&rdev->wiphy, dev, params); + if (err) { + wdev->connect_keys = NULL; + return err; + } + + memcpy(wdev->ssid, params->ssid, params->ssid_len); + wdev->ssid_len = params->ssid_len; + + return 0; +} + +int cfg80211_join_ibss(struct cfg80211_registered_device *rdev, + struct net_device *dev, + struct cfg80211_ibss_params *params, + struct cfg80211_cached_keys *connkeys) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + int err; + + mutex_lock(&rdev->devlist_mtx); + wdev_lock(wdev); + err = __cfg80211_join_ibss(rdev, dev, params, connkeys); + wdev_unlock(wdev); + mutex_unlock(&rdev->devlist_mtx); + + return err; +} + +static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + int i; + + ASSERT_WDEV_LOCK(wdev); + + kfree(wdev->connect_keys); + wdev->connect_keys = NULL; + + /* + * Delete all the keys ... pairwise keys can't really + * exist any more anyway, but default keys might. + */ + if (rdev->ops->del_key) + for (i = 0; i < 6; i++) + rdev->ops->del_key(wdev->wiphy, dev, i, NULL); + + if (wdev->current_bss) { + cfg80211_unhold_bss(wdev->current_bss); + cfg80211_put_bss(&wdev->current_bss->pub); + } + + wdev->current_bss = NULL; + wdev->ssid_len = 0; +#ifdef CONFIG_CFG80211_WEXT + if (!nowext) + wdev->wext.ibss.ssid_len = 0; +#endif +} + +void cfg80211_clear_ibss(struct net_device *dev, bool nowext) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + + wdev_lock(wdev); + __cfg80211_clear_ibss(dev, nowext); + wdev_unlock(wdev); +} + +int __cfg80211_leave_ibss(struct cfg80211_registered_device *rdev, + struct net_device *dev, bool nowext) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + int err; + + ASSERT_WDEV_LOCK(wdev); + + if (!wdev->ssid_len) + return -ENOLINK; + + err = rdev->ops->leave_ibss(&rdev->wiphy, dev); + + if (err) + return err; + + __cfg80211_clear_ibss(dev, nowext); + + return 0; +} + +int cfg80211_leave_ibss(struct cfg80211_registered_device *rdev, + struct net_device *dev, bool nowext) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + int err; + + wdev_lock(wdev); + err = __cfg80211_leave_ibss(rdev, dev, nowext); + wdev_unlock(wdev); + + return err; +} + +#ifdef CONFIG_CFG80211_WEXT +int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev, + struct wireless_dev *wdev) +{ + struct cfg80211_cached_keys *ck = NULL; + enum ieee80211_band band; + int i, err; + + ASSERT_WDEV_LOCK(wdev); + + if (!wdev->wext.ibss.beacon_interval) + wdev->wext.ibss.beacon_interval = 100; + + /* try to find an IBSS channel if none requested ... */ + if (!wdev->wext.ibss.channel) { + for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + struct ieee80211_supported_band *sband; + struct ieee80211_channel *chan; + + sband = rdev->wiphy.bands[band]; + if (!sband) + continue; + + for (i = 0; i < sband->n_channels; i++) { + chan = &sband->channels[i]; + if (chan->flags & IEEE80211_CHAN_NO_IBSS) + continue; + if (chan->flags & IEEE80211_CHAN_DISABLED) + continue; + wdev->wext.ibss.channel = chan; + break; + } + + if (wdev->wext.ibss.channel) + break; + } + + if (!wdev->wext.ibss.channel) + return -EINVAL; + } + + /* don't join -- SSID is not there */ + if (!wdev->wext.ibss.ssid_len) + return 0; + + if (!netif_running(wdev->netdev)) + return 0; + + if (wdev->wext.keys) + wdev->wext.keys->def = wdev->wext.default_key; + + wdev->wext.ibss.privacy = wdev->wext.default_key != -1; + + if (wdev->wext.keys) { + ck = kmemdup(wdev->wext.keys, sizeof(*ck), GFP_KERNEL); + if (!ck) + return -ENOMEM; + for (i = 0; i < 6; i++) + ck->params[i].key = ck->data[i]; + } + err = __cfg80211_join_ibss(rdev, wdev->netdev, + &wdev->wext.ibss, ck); + if (err) + kfree(ck); + + return err; +} + +int cfg80211_ibss_wext_siwfreq(struct net_device *dev, + struct iw_request_info *info, + struct iw_freq *wextfreq, char *extra) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + struct ieee80211_channel *chan = NULL; + int err, freq; + + /* call only for ibss! */ + if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) + return -EINVAL; + + if (!rdev->ops->join_ibss) + return -EOPNOTSUPP; + + freq = cfg80211_wext_freq(wdev->wiphy, wextfreq); + if (freq < 0) + return freq; + + if (freq) { + chan = ieee80211_get_channel(wdev->wiphy, freq); + if (!chan) + return -EINVAL; + if (chan->flags & IEEE80211_CHAN_NO_IBSS || + chan->flags & IEEE80211_CHAN_DISABLED) + return -EINVAL; + } + + if (wdev->wext.ibss.channel == chan) + return 0; + + wdev_lock(wdev); + err = 0; + if (wdev->ssid_len) + err = __cfg80211_leave_ibss(rdev, dev, true); + wdev_unlock(wdev); + + if (err) + return err; + + if (chan) { + wdev->wext.ibss.channel = chan; + wdev->wext.ibss.channel_fixed = true; + } else { + /* cfg80211_ibss_wext_join will pick one if needed */ + wdev->wext.ibss.channel_fixed = false; + } + + mutex_lock(&rdev->devlist_mtx); + wdev_lock(wdev); + err = cfg80211_ibss_wext_join(rdev, wdev); + wdev_unlock(wdev); + mutex_unlock(&rdev->devlist_mtx); + + return err; +} + +int cfg80211_ibss_wext_giwfreq(struct net_device *dev, + struct iw_request_info *info, + struct iw_freq *freq, char *extra) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct ieee80211_channel *chan = NULL; + + /* call only for ibss! */ + if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) + return -EINVAL; + + wdev_lock(wdev); + if (wdev->current_bss) + chan = wdev->current_bss->pub.channel; + else if (wdev->wext.ibss.channel) + chan = wdev->wext.ibss.channel; + wdev_unlock(wdev); + + if (chan) { + freq->m = chan->center_freq; + freq->e = 6; + return 0; + } + + /* no channel if not joining */ + return -EINVAL; +} + +int cfg80211_ibss_wext_siwessid(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *data, char *ssid) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + size_t len = data->length; + int err; + + /* call only for ibss! */ + if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) + return -EINVAL; + + if (!rdev->ops->join_ibss) + return -EOPNOTSUPP; + + wdev_lock(wdev); + err = 0; + if (wdev->ssid_len) + err = __cfg80211_leave_ibss(rdev, dev, true); + wdev_unlock(wdev); + + if (err) + return err; + + /* iwconfig uses nul termination in SSID.. */ + if (len > 0 && ssid[len - 1] == '\0') + len--; + + wdev->wext.ibss.ssid = wdev->ssid; + memcpy(wdev->wext.ibss.ssid, ssid, len); + wdev->wext.ibss.ssid_len = len; + + mutex_lock(&rdev->devlist_mtx); + wdev_lock(wdev); + err = cfg80211_ibss_wext_join(rdev, wdev); + wdev_unlock(wdev); + mutex_unlock(&rdev->devlist_mtx); + + return err; +} + +int cfg80211_ibss_wext_giwessid(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *data, char *ssid) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + + /* call only for ibss! */ + if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) + return -EINVAL; + + data->flags = 0; + + wdev_lock(wdev); + if (wdev->ssid_len) { + data->flags = 1; + data->length = wdev->ssid_len; + memcpy(ssid, wdev->ssid, data->length); + } else if (wdev->wext.ibss.ssid && wdev->wext.ibss.ssid_len) { + data->flags = 1; + data->length = wdev->wext.ibss.ssid_len; + memcpy(ssid, wdev->wext.ibss.ssid, data->length); + } + wdev_unlock(wdev); + + return 0; +} + +int cfg80211_ibss_wext_siwap(struct net_device *dev, + struct iw_request_info *info, + struct sockaddr *ap_addr, char *extra) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + u8 *bssid = ap_addr->sa_data; + int err; + + /* call only for ibss! */ + if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) + return -EINVAL; + + if (!rdev->ops->join_ibss) + return -EOPNOTSUPP; + + if (ap_addr->sa_family != ARPHRD_ETHER) + return -EINVAL; + + /* automatic mode */ + if (is_zero_ether_addr(bssid) || is_broadcast_ether_addr(bssid)) + bssid = NULL; + + /* both automatic */ + if (!bssid && !wdev->wext.ibss.bssid) + return 0; + + /* fixed already - and no change */ + if (wdev->wext.ibss.bssid && bssid && + compare_ether_addr(bssid, wdev->wext.ibss.bssid) == 0) + return 0; + + wdev_lock(wdev); + err = 0; + if (wdev->ssid_len) + err = __cfg80211_leave_ibss(rdev, dev, true); + wdev_unlock(wdev); + + if (err) + return err; + + if (bssid) { + memcpy(wdev->wext.bssid, bssid, ETH_ALEN); + wdev->wext.ibss.bssid = wdev->wext.bssid; + } else + wdev->wext.ibss.bssid = NULL; + + mutex_lock(&rdev->devlist_mtx); + wdev_lock(wdev); + err = cfg80211_ibss_wext_join(rdev, wdev); + wdev_unlock(wdev); + mutex_unlock(&rdev->devlist_mtx); + + return err; +} + +int cfg80211_ibss_wext_giwap(struct net_device *dev, + struct iw_request_info *info, + struct sockaddr *ap_addr, char *extra) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + + /* call only for ibss! */ + if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) + return -EINVAL; + + ap_addr->sa_family = ARPHRD_ETHER; + + wdev_lock(wdev); + if (wdev->current_bss) + memcpy(ap_addr->sa_data, wdev->current_bss->pub.bssid, ETH_ALEN); + else if (wdev->wext.ibss.bssid) + memcpy(ap_addr->sa_data, wdev->wext.ibss.bssid, ETH_ALEN); + else + memset(ap_addr->sa_data, 0, ETH_ALEN); + + wdev_unlock(wdev); + + return 0; +} +#endif diff --git a/net/wireless/lib80211.c b/net/wireless/lib80211.c new file mode 100644 index 0000000..97d411f --- /dev/null +++ b/net/wireless/lib80211.c @@ -0,0 +1,284 @@ +/* + * lib80211 -- common bits for IEEE802.11 drivers + * + * Copyright(c) 2008 John W. Linville + * + * Portions copied from old ieee80211 component, w/ original copyright + * notices below: + * + * Host AP crypto routines + * + * Copyright (c) 2002-2003, Jouni Malinen + * Portions Copyright (C) 2004, Intel Corporation + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#define DRV_NAME "lib80211" + +#define DRV_DESCRIPTION "common routines for IEEE802.11 drivers" + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_AUTHOR("John W. Linville "); +MODULE_LICENSE("GPL"); + +struct lib80211_crypto_alg { + struct list_head list; + struct lib80211_crypto_ops *ops; +}; + +static LIST_HEAD(lib80211_crypto_algs); +static DEFINE_SPINLOCK(lib80211_crypto_lock); + +const char *print_ssid(char *buf, const char *ssid, u8 ssid_len) +{ + const char *s = ssid; + char *d = buf; + + ssid_len = min_t(u8, ssid_len, IEEE80211_MAX_SSID_LEN); + while (ssid_len--) { + if (isprint(*s)) { + *d++ = *s++; + continue; + } + + *d++ = '\\'; + if (*s == '\0') + *d++ = '0'; + else if (*s == '\n') + *d++ = 'n'; + else if (*s == '\r') + *d++ = 'r'; + else if (*s == '\t') + *d++ = 't'; + else if (*s == '\\') + *d++ = '\\'; + else + d += snprintf(d, 3, "%03o", *s); + s++; + } + *d = '\0'; + return buf; +} +EXPORT_SYMBOL(print_ssid); + +int lib80211_crypt_info_init(struct lib80211_crypt_info *info, char *name, + spinlock_t *lock) +{ + memset(info, 0, sizeof(*info)); + + info->name = name; + info->lock = lock; + + INIT_LIST_HEAD(&info->crypt_deinit_list); + setup_timer(&info->crypt_deinit_timer, lib80211_crypt_deinit_handler, + (unsigned long)info); + + return 0; +} +EXPORT_SYMBOL(lib80211_crypt_info_init); + +void lib80211_crypt_info_free(struct lib80211_crypt_info *info) +{ + int i; + + lib80211_crypt_quiescing(info); + del_timer_sync(&info->crypt_deinit_timer); + lib80211_crypt_deinit_entries(info, 1); + + for (i = 0; i < NUM_WEP_KEYS; i++) { + struct lib80211_crypt_data *crypt = info->crypt[i]; + if (crypt) { + if (crypt->ops) { + crypt->ops->deinit(crypt->priv); + module_put(crypt->ops->owner); + } + kfree(crypt); + info->crypt[i] = NULL; + } + } +} +EXPORT_SYMBOL(lib80211_crypt_info_free); + +void lib80211_crypt_deinit_entries(struct lib80211_crypt_info *info, int force) +{ + struct lib80211_crypt_data *entry, *next; + unsigned long flags; + + spin_lock_irqsave(info->lock, flags); + list_for_each_entry_safe(entry, next, &info->crypt_deinit_list, list) { + if (atomic_read(&entry->refcnt) != 0 && !force) + continue; + + list_del(&entry->list); + + if (entry->ops) { + entry->ops->deinit(entry->priv); + module_put(entry->ops->owner); + } + kfree(entry); + } + spin_unlock_irqrestore(info->lock, flags); +} +EXPORT_SYMBOL(lib80211_crypt_deinit_entries); + +/* After this, crypt_deinit_list won't accept new members */ +void lib80211_crypt_quiescing(struct lib80211_crypt_info *info) +{ + unsigned long flags; + + spin_lock_irqsave(info->lock, flags); + info->crypt_quiesced = 1; + spin_unlock_irqrestore(info->lock, flags); +} +EXPORT_SYMBOL(lib80211_crypt_quiescing); + +void lib80211_crypt_deinit_handler(unsigned long data) +{ + struct lib80211_crypt_info *info = (struct lib80211_crypt_info *)data; + unsigned long flags; + + lib80211_crypt_deinit_entries(info, 0); + + spin_lock_irqsave(info->lock, flags); + if (!list_empty(&info->crypt_deinit_list) && !info->crypt_quiesced) { + printk(KERN_DEBUG "%s: entries remaining in delayed crypt " + "deletion list\n", info->name); + info->crypt_deinit_timer.expires = jiffies + HZ; + add_timer(&info->crypt_deinit_timer); + } + spin_unlock_irqrestore(info->lock, flags); +} +EXPORT_SYMBOL(lib80211_crypt_deinit_handler); + +void lib80211_crypt_delayed_deinit(struct lib80211_crypt_info *info, + struct lib80211_crypt_data **crypt) +{ + struct lib80211_crypt_data *tmp; + unsigned long flags; + + if (*crypt == NULL) + return; + + tmp = *crypt; + *crypt = NULL; + + /* must not run ops->deinit() while there may be pending encrypt or + * decrypt operations. Use a list of delayed deinits to avoid needing + * locking. */ + + spin_lock_irqsave(info->lock, flags); + if (!info->crypt_quiesced) { + list_add(&tmp->list, &info->crypt_deinit_list); + if (!timer_pending(&info->crypt_deinit_timer)) { + info->crypt_deinit_timer.expires = jiffies + HZ; + add_timer(&info->crypt_deinit_timer); + } + } + spin_unlock_irqrestore(info->lock, flags); +} +EXPORT_SYMBOL(lib80211_crypt_delayed_deinit); + +int lib80211_register_crypto_ops(struct lib80211_crypto_ops *ops) +{ + unsigned long flags; + struct lib80211_crypto_alg *alg; + + alg = kzalloc(sizeof(*alg), GFP_KERNEL); + if (alg == NULL) + return -ENOMEM; + + alg->ops = ops; + + spin_lock_irqsave(&lib80211_crypto_lock, flags); + list_add(&alg->list, &lib80211_crypto_algs); + spin_unlock_irqrestore(&lib80211_crypto_lock, flags); + + printk(KERN_DEBUG "lib80211_crypt: registered algorithm '%s'\n", + ops->name); + + return 0; +} +EXPORT_SYMBOL(lib80211_register_crypto_ops); + +int lib80211_unregister_crypto_ops(struct lib80211_crypto_ops *ops) +{ + struct lib80211_crypto_alg *alg; + unsigned long flags; + + spin_lock_irqsave(&lib80211_crypto_lock, flags); + list_for_each_entry(alg, &lib80211_crypto_algs, list) { + if (alg->ops == ops) + goto found; + } + spin_unlock_irqrestore(&lib80211_crypto_lock, flags); + return -EINVAL; + + found: + printk(KERN_DEBUG "lib80211_crypt: unregistered algorithm " + "'%s'\n", ops->name); + list_del(&alg->list); + spin_unlock_irqrestore(&lib80211_crypto_lock, flags); + kfree(alg); + return 0; +} +EXPORT_SYMBOL(lib80211_unregister_crypto_ops); + +struct lib80211_crypto_ops *lib80211_get_crypto_ops(const char *name) +{ + struct lib80211_crypto_alg *alg; + unsigned long flags; + + spin_lock_irqsave(&lib80211_crypto_lock, flags); + list_for_each_entry(alg, &lib80211_crypto_algs, list) { + if (strcmp(alg->ops->name, name) == 0) + goto found; + } + spin_unlock_irqrestore(&lib80211_crypto_lock, flags); + return NULL; + + found: + spin_unlock_irqrestore(&lib80211_crypto_lock, flags); + return alg->ops; +} +EXPORT_SYMBOL(lib80211_get_crypto_ops); + +static void *lib80211_crypt_null_init(int keyidx) +{ + return (void *)1; +} + +static void lib80211_crypt_null_deinit(void *priv) +{ +} + +static struct lib80211_crypto_ops lib80211_crypt_null = { + .name = "NULL", + .init = lib80211_crypt_null_init, + .deinit = lib80211_crypt_null_deinit, + .owner = THIS_MODULE, +}; + +static int __init lib80211_init(void) +{ + printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION "\n"); + return lib80211_register_crypto_ops(&lib80211_crypt_null); +} + +static void __exit lib80211_exit(void) +{ + lib80211_unregister_crypto_ops(&lib80211_crypt_null); + BUG_ON(!list_empty(&lib80211_crypto_algs)); +} + +module_init(lib80211_init); +module_exit(lib80211_exit); diff --git a/net/wireless/lib80211_crypt_ccmp.c b/net/wireless/lib80211_crypt_ccmp.c new file mode 100644 index 0000000..b7fa31d --- /dev/null +++ b/net/wireless/lib80211_crypt_ccmp.c @@ -0,0 +1,494 @@ +/* + * lib80211 crypt: host-based CCMP encryption implementation for lib80211 + * + * Copyright (c) 2003-2004, Jouni Malinen + * Copyright (c) 2008, John W. Linville + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. See README and COPYING for + * more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include + +MODULE_AUTHOR("Jouni Malinen"); +MODULE_DESCRIPTION("Host AP crypt: CCMP"); +MODULE_LICENSE("GPL"); + +#define AES_BLOCK_LEN 16 +#define CCMP_HDR_LEN 8 +#define CCMP_MIC_LEN 8 +#define CCMP_TK_LEN 16 +#define CCMP_PN_LEN 6 + +struct lib80211_ccmp_data { + u8 key[CCMP_TK_LEN]; + int key_set; + + u8 tx_pn[CCMP_PN_LEN]; + u8 rx_pn[CCMP_PN_LEN]; + + u32 dot11RSNAStatsCCMPFormatErrors; + u32 dot11RSNAStatsCCMPReplays; + u32 dot11RSNAStatsCCMPDecryptErrors; + + int key_idx; + + struct crypto_cipher *tfm; + + /* scratch buffers for virt_to_page() (crypto API) */ + u8 tx_b0[AES_BLOCK_LEN], tx_b[AES_BLOCK_LEN], + tx_e[AES_BLOCK_LEN], tx_s0[AES_BLOCK_LEN]; + u8 rx_b0[AES_BLOCK_LEN], rx_b[AES_BLOCK_LEN], rx_a[AES_BLOCK_LEN]; +}; + +static inline void lib80211_ccmp_aes_encrypt(struct crypto_cipher *tfm, + const u8 pt[16], u8 ct[16]) +{ + crypto_cipher_encrypt_one(tfm, ct, pt); +} + +static void *lib80211_ccmp_init(int key_idx) +{ + struct lib80211_ccmp_data *priv; + + priv = kzalloc(sizeof(*priv), GFP_ATOMIC); + if (priv == NULL) + goto fail; + priv->key_idx = key_idx; + + priv->tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(priv->tfm)) { + printk(KERN_DEBUG "lib80211_crypt_ccmp: could not allocate " + "crypto API aes\n"); + priv->tfm = NULL; + goto fail; + } + + return priv; + + fail: + if (priv) { + if (priv->tfm) + crypto_free_cipher(priv->tfm); + kfree(priv); + } + + return NULL; +} + +static void lib80211_ccmp_deinit(void *priv) +{ + struct lib80211_ccmp_data *_priv = priv; + if (_priv && _priv->tfm) + crypto_free_cipher(_priv->tfm); + kfree(priv); +} + +static inline void xor_block(u8 * b, u8 * a, size_t len) +{ + int i; + for (i = 0; i < len; i++) + b[i] ^= a[i]; +} + +static void ccmp_init_blocks(struct crypto_cipher *tfm, + struct ieee80211_hdr *hdr, + u8 * pn, size_t dlen, u8 * b0, u8 * auth, u8 * s0) +{ + u8 *pos, qc = 0; + size_t aad_len; + int a4_included, qc_included; + u8 aad[2 * AES_BLOCK_LEN]; + + a4_included = ieee80211_has_a4(hdr->frame_control); + qc_included = ieee80211_is_data_qos(hdr->frame_control); + + aad_len = 22; + if (a4_included) + aad_len += 6; + if (qc_included) { + pos = (u8 *) & hdr->addr4; + if (a4_included) + pos += 6; + qc = *pos & 0x0f; + aad_len += 2; + } + + /* CCM Initial Block: + * Flag (Include authentication header, M=3 (8-octet MIC), + * L=1 (2-octet Dlen)) + * Nonce: 0x00 | A2 | PN + * Dlen */ + b0[0] = 0x59; + b0[1] = qc; + memcpy(b0 + 2, hdr->addr2, ETH_ALEN); + memcpy(b0 + 8, pn, CCMP_PN_LEN); + b0[14] = (dlen >> 8) & 0xff; + b0[15] = dlen & 0xff; + + /* AAD: + * FC with bits 4..6 and 11..13 masked to zero; 14 is always one + * A1 | A2 | A3 + * SC with bits 4..15 (seq#) masked to zero + * A4 (if present) + * QC (if present) + */ + pos = (u8 *) hdr; + aad[0] = 0; /* aad_len >> 8 */ + aad[1] = aad_len & 0xff; + aad[2] = pos[0] & 0x8f; + aad[3] = pos[1] & 0xc7; + memcpy(aad + 4, hdr->addr1, 3 * ETH_ALEN); + pos = (u8 *) & hdr->seq_ctrl; + aad[22] = pos[0] & 0x0f; + aad[23] = 0; /* all bits masked */ + memset(aad + 24, 0, 8); + if (a4_included) + memcpy(aad + 24, hdr->addr4, ETH_ALEN); + if (qc_included) { + aad[a4_included ? 30 : 24] = qc; + /* rest of QC masked */ + } + + /* Start with the first block and AAD */ + lib80211_ccmp_aes_encrypt(tfm, b0, auth); + xor_block(auth, aad, AES_BLOCK_LEN); + lib80211_ccmp_aes_encrypt(tfm, auth, auth); + xor_block(auth, &aad[AES_BLOCK_LEN], AES_BLOCK_LEN); + lib80211_ccmp_aes_encrypt(tfm, auth, auth); + b0[0] &= 0x07; + b0[14] = b0[15] = 0; + lib80211_ccmp_aes_encrypt(tfm, b0, s0); +} + +static int lib80211_ccmp_hdr(struct sk_buff *skb, int hdr_len, + u8 *aeskey, int keylen, void *priv) +{ + struct lib80211_ccmp_data *key = priv; + int i; + u8 *pos; + + if (skb_headroom(skb) < CCMP_HDR_LEN || skb->len < hdr_len) + return -1; + + if (aeskey != NULL && keylen >= CCMP_TK_LEN) + memcpy(aeskey, key->key, CCMP_TK_LEN); + + pos = skb_push(skb, CCMP_HDR_LEN); + memmove(pos, pos + CCMP_HDR_LEN, hdr_len); + pos += hdr_len; + + i = CCMP_PN_LEN - 1; + while (i >= 0) { + key->tx_pn[i]++; + if (key->tx_pn[i] != 0) + break; + i--; + } + + *pos++ = key->tx_pn[5]; + *pos++ = key->tx_pn[4]; + *pos++ = 0; + *pos++ = (key->key_idx << 6) | (1 << 5) /* Ext IV included */ ; + *pos++ = key->tx_pn[3]; + *pos++ = key->tx_pn[2]; + *pos++ = key->tx_pn[1]; + *pos++ = key->tx_pn[0]; + + return CCMP_HDR_LEN; +} + +static int lib80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv) +{ + struct lib80211_ccmp_data *key = priv; + int data_len, i, blocks, last, len; + u8 *pos, *mic; + struct ieee80211_hdr *hdr; + u8 *b0 = key->tx_b0; + u8 *b = key->tx_b; + u8 *e = key->tx_e; + u8 *s0 = key->tx_s0; + + if (skb_tailroom(skb) < CCMP_MIC_LEN || skb->len < hdr_len) + return -1; + + data_len = skb->len - hdr_len; + len = lib80211_ccmp_hdr(skb, hdr_len, NULL, 0, priv); + if (len < 0) + return -1; + + pos = skb->data + hdr_len + CCMP_HDR_LEN; + hdr = (struct ieee80211_hdr *)skb->data; + ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0); + + blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN); + last = data_len % AES_BLOCK_LEN; + + for (i = 1; i <= blocks; i++) { + len = (i == blocks && last) ? last : AES_BLOCK_LEN; + /* Authentication */ + xor_block(b, pos, len); + lib80211_ccmp_aes_encrypt(key->tfm, b, b); + /* Encryption, with counter */ + b0[14] = (i >> 8) & 0xff; + b0[15] = i & 0xff; + lib80211_ccmp_aes_encrypt(key->tfm, b0, e); + xor_block(pos, e, len); + pos += len; + } + + mic = skb_put(skb, CCMP_MIC_LEN); + for (i = 0; i < CCMP_MIC_LEN; i++) + mic[i] = b[i] ^ s0[i]; + + return 0; +} + +/* + * deal with seq counter wrapping correctly. + * refer to timer_after() for jiffies wrapping handling + */ +static inline int ccmp_replay_check(u8 *pn_n, u8 *pn_o) +{ + u32 iv32_n, iv16_n; + u32 iv32_o, iv16_o; + + iv32_n = (pn_n[0] << 24) | (pn_n[1] << 16) | (pn_n[2] << 8) | pn_n[3]; + iv16_n = (pn_n[4] << 8) | pn_n[5]; + + iv32_o = (pn_o[0] << 24) | (pn_o[1] << 16) | (pn_o[2] << 8) | pn_o[3]; + iv16_o = (pn_o[4] << 8) | pn_o[5]; + + if ((s32)iv32_n - (s32)iv32_o < 0 || + (iv32_n == iv32_o && iv16_n <= iv16_o)) + return 1; + return 0; +} + +static int lib80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) +{ + struct lib80211_ccmp_data *key = priv; + u8 keyidx, *pos; + struct ieee80211_hdr *hdr; + u8 *b0 = key->rx_b0; + u8 *b = key->rx_b; + u8 *a = key->rx_a; + u8 pn[6]; + int i, blocks, last, len; + size_t data_len = skb->len - hdr_len - CCMP_HDR_LEN - CCMP_MIC_LEN; + u8 *mic = skb->data + skb->len - CCMP_MIC_LEN; + + if (skb->len < hdr_len + CCMP_HDR_LEN + CCMP_MIC_LEN) { + key->dot11RSNAStatsCCMPFormatErrors++; + return -1; + } + + hdr = (struct ieee80211_hdr *)skb->data; + pos = skb->data + hdr_len; + keyidx = pos[3]; + if (!(keyidx & (1 << 5))) { + if (net_ratelimit()) { + printk(KERN_DEBUG "CCMP: received packet without ExtIV" + " flag from %pM\n", hdr->addr2); + } + key->dot11RSNAStatsCCMPFormatErrors++; + return -2; + } + keyidx >>= 6; + if (key->key_idx != keyidx) { + printk(KERN_DEBUG "CCMP: RX tkey->key_idx=%d frame " + "keyidx=%d priv=%p\n", key->key_idx, keyidx, priv); + return -6; + } + if (!key->key_set) { + if (net_ratelimit()) { + printk(KERN_DEBUG "CCMP: received packet from %pM" + " with keyid=%d that does not have a configured" + " key\n", hdr->addr2, keyidx); + } + return -3; + } + + pn[0] = pos[7]; + pn[1] = pos[6]; + pn[2] = pos[5]; + pn[3] = pos[4]; + pn[4] = pos[1]; + pn[5] = pos[0]; + pos += 8; + + if (ccmp_replay_check(pn, key->rx_pn)) { +#ifdef CONFIG_LIB80211_DEBUG + if (net_ratelimit()) { + printk(KERN_DEBUG "CCMP: replay detected: STA=%pM " + "previous PN %02x%02x%02x%02x%02x%02x " + "received PN %02x%02x%02x%02x%02x%02x\n", + hdr->addr2, + key->rx_pn[0], key->rx_pn[1], key->rx_pn[2], + key->rx_pn[3], key->rx_pn[4], key->rx_pn[5], + pn[0], pn[1], pn[2], pn[3], pn[4], pn[5]); + } +#endif + key->dot11RSNAStatsCCMPReplays++; + return -4; + } + + ccmp_init_blocks(key->tfm, hdr, pn, data_len, b0, a, b); + xor_block(mic, b, CCMP_MIC_LEN); + + blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN); + last = data_len % AES_BLOCK_LEN; + + for (i = 1; i <= blocks; i++) { + len = (i == blocks && last) ? last : AES_BLOCK_LEN; + /* Decrypt, with counter */ + b0[14] = (i >> 8) & 0xff; + b0[15] = i & 0xff; + lib80211_ccmp_aes_encrypt(key->tfm, b0, b); + xor_block(pos, b, len); + /* Authentication */ + xor_block(a, pos, len); + lib80211_ccmp_aes_encrypt(key->tfm, a, a); + pos += len; + } + + if (memcmp(mic, a, CCMP_MIC_LEN) != 0) { + if (net_ratelimit()) { + printk(KERN_DEBUG "CCMP: decrypt failed: STA=" + "%pM\n", hdr->addr2); + } + key->dot11RSNAStatsCCMPDecryptErrors++; + return -5; + } + + memcpy(key->rx_pn, pn, CCMP_PN_LEN); + + /* Remove hdr and MIC */ + memmove(skb->data + CCMP_HDR_LEN, skb->data, hdr_len); + skb_pull(skb, CCMP_HDR_LEN); + skb_trim(skb, skb->len - CCMP_MIC_LEN); + + return keyidx; +} + +static int lib80211_ccmp_set_key(void *key, int len, u8 * seq, void *priv) +{ + struct lib80211_ccmp_data *data = priv; + int keyidx; + struct crypto_cipher *tfm = data->tfm; + + keyidx = data->key_idx; + memset(data, 0, sizeof(*data)); + data->key_idx = keyidx; + data->tfm = tfm; + if (len == CCMP_TK_LEN) { + memcpy(data->key, key, CCMP_TK_LEN); + data->key_set = 1; + if (seq) { + data->rx_pn[0] = seq[5]; + data->rx_pn[1] = seq[4]; + data->rx_pn[2] = seq[3]; + data->rx_pn[3] = seq[2]; + data->rx_pn[4] = seq[1]; + data->rx_pn[5] = seq[0]; + } + crypto_cipher_setkey(data->tfm, data->key, CCMP_TK_LEN); + } else if (len == 0) + data->key_set = 0; + else + return -1; + + return 0; +} + +static int lib80211_ccmp_get_key(void *key, int len, u8 * seq, void *priv) +{ + struct lib80211_ccmp_data *data = priv; + + if (len < CCMP_TK_LEN) + return -1; + + if (!data->key_set) + return 0; + memcpy(key, data->key, CCMP_TK_LEN); + + if (seq) { + seq[0] = data->tx_pn[5]; + seq[1] = data->tx_pn[4]; + seq[2] = data->tx_pn[3]; + seq[3] = data->tx_pn[2]; + seq[4] = data->tx_pn[1]; + seq[5] = data->tx_pn[0]; + } + + return CCMP_TK_LEN; +} + +static char *lib80211_ccmp_print_stats(char *p, void *priv) +{ + struct lib80211_ccmp_data *ccmp = priv; + + p += sprintf(p, "key[%d] alg=CCMP key_set=%d " + "tx_pn=%02x%02x%02x%02x%02x%02x " + "rx_pn=%02x%02x%02x%02x%02x%02x " + "format_errors=%d replays=%d decrypt_errors=%d\n", + ccmp->key_idx, ccmp->key_set, + ccmp->tx_pn[0], ccmp->tx_pn[1], ccmp->tx_pn[2], + ccmp->tx_pn[3], ccmp->tx_pn[4], ccmp->tx_pn[5], + ccmp->rx_pn[0], ccmp->rx_pn[1], ccmp->rx_pn[2], + ccmp->rx_pn[3], ccmp->rx_pn[4], ccmp->rx_pn[5], + ccmp->dot11RSNAStatsCCMPFormatErrors, + ccmp->dot11RSNAStatsCCMPReplays, + ccmp->dot11RSNAStatsCCMPDecryptErrors); + + return p; +} + +static struct lib80211_crypto_ops lib80211_crypt_ccmp = { + .name = "CCMP", + .init = lib80211_ccmp_init, + .deinit = lib80211_ccmp_deinit, + .build_iv = lib80211_ccmp_hdr, + .encrypt_mpdu = lib80211_ccmp_encrypt, + .decrypt_mpdu = lib80211_ccmp_decrypt, + .encrypt_msdu = NULL, + .decrypt_msdu = NULL, + .set_key = lib80211_ccmp_set_key, + .get_key = lib80211_ccmp_get_key, + .print_stats = lib80211_ccmp_print_stats, + .extra_mpdu_prefix_len = CCMP_HDR_LEN, + .extra_mpdu_postfix_len = CCMP_MIC_LEN, + .owner = THIS_MODULE, +}; + +static int __init lib80211_crypto_ccmp_init(void) +{ + return lib80211_register_crypto_ops(&lib80211_crypt_ccmp); +} + +static void __exit lib80211_crypto_ccmp_exit(void) +{ + lib80211_unregister_crypto_ops(&lib80211_crypt_ccmp); +} + +module_init(lib80211_crypto_ccmp_init); +module_exit(lib80211_crypto_ccmp_exit); diff --git a/net/wireless/lib80211_crypt_tkip.c b/net/wireless/lib80211_crypt_tkip.c new file mode 100644 index 0000000..8cbdb32 --- /dev/null +++ b/net/wireless/lib80211_crypt_tkip.c @@ -0,0 +1,787 @@ +/* + * lib80211 crypt: host-based TKIP encryption implementation for lib80211 + * + * Copyright (c) 2003-2004, Jouni Malinen + * Copyright (c) 2008, John W. Linville + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. See README and COPYING for + * more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include + +MODULE_AUTHOR("Jouni Malinen"); +MODULE_DESCRIPTION("lib80211 crypt: TKIP"); +MODULE_LICENSE("GPL"); + +#define TKIP_HDR_LEN 8 + +struct lib80211_tkip_data { +#define TKIP_KEY_LEN 32 + u8 key[TKIP_KEY_LEN]; + int key_set; + + u32 tx_iv32; + u16 tx_iv16; + u16 tx_ttak[5]; + int tx_phase1_done; + + u32 rx_iv32; + u16 rx_iv16; + u16 rx_ttak[5]; + int rx_phase1_done; + u32 rx_iv32_new; + u16 rx_iv16_new; + + u32 dot11RSNAStatsTKIPReplays; + u32 dot11RSNAStatsTKIPICVErrors; + u32 dot11RSNAStatsTKIPLocalMICFailures; + + int key_idx; + + struct crypto_blkcipher *rx_tfm_arc4; + struct crypto_hash *rx_tfm_michael; + struct crypto_blkcipher *tx_tfm_arc4; + struct crypto_hash *tx_tfm_michael; + + /* scratch buffers for virt_to_page() (crypto API) */ + u8 rx_hdr[16], tx_hdr[16]; + + unsigned long flags; +}; + +static unsigned long lib80211_tkip_set_flags(unsigned long flags, void *priv) +{ + struct lib80211_tkip_data *_priv = priv; + unsigned long old_flags = _priv->flags; + _priv->flags = flags; + return old_flags; +} + +static unsigned long lib80211_tkip_get_flags(void *priv) +{ + struct lib80211_tkip_data *_priv = priv; + return _priv->flags; +} + +static void *lib80211_tkip_init(int key_idx) +{ + struct lib80211_tkip_data *priv; + + priv = kzalloc(sizeof(*priv), GFP_ATOMIC); + if (priv == NULL) + goto fail; + + priv->key_idx = key_idx; + + priv->tx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(priv->tx_tfm_arc4)) { + printk(KERN_DEBUG "lib80211_crypt_tkip: could not allocate " + "crypto API arc4\n"); + priv->tx_tfm_arc4 = NULL; + goto fail; + } + + priv->tx_tfm_michael = crypto_alloc_hash("michael_mic", 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(priv->tx_tfm_michael)) { + printk(KERN_DEBUG "lib80211_crypt_tkip: could not allocate " + "crypto API michael_mic\n"); + priv->tx_tfm_michael = NULL; + goto fail; + } + + priv->rx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(priv->rx_tfm_arc4)) { + printk(KERN_DEBUG "lib80211_crypt_tkip: could not allocate " + "crypto API arc4\n"); + priv->rx_tfm_arc4 = NULL; + goto fail; + } + + priv->rx_tfm_michael = crypto_alloc_hash("michael_mic", 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(priv->rx_tfm_michael)) { + printk(KERN_DEBUG "lib80211_crypt_tkip: could not allocate " + "crypto API michael_mic\n"); + priv->rx_tfm_michael = NULL; + goto fail; + } + + return priv; + + fail: + if (priv) { + if (priv->tx_tfm_michael) + crypto_free_hash(priv->tx_tfm_michael); + if (priv->tx_tfm_arc4) + crypto_free_blkcipher(priv->tx_tfm_arc4); + if (priv->rx_tfm_michael) + crypto_free_hash(priv->rx_tfm_michael); + if (priv->rx_tfm_arc4) + crypto_free_blkcipher(priv->rx_tfm_arc4); + kfree(priv); + } + + return NULL; +} + +static void lib80211_tkip_deinit(void *priv) +{ + struct lib80211_tkip_data *_priv = priv; + if (_priv) { + if (_priv->tx_tfm_michael) + crypto_free_hash(_priv->tx_tfm_michael); + if (_priv->tx_tfm_arc4) + crypto_free_blkcipher(_priv->tx_tfm_arc4); + if (_priv->rx_tfm_michael) + crypto_free_hash(_priv->rx_tfm_michael); + if (_priv->rx_tfm_arc4) + crypto_free_blkcipher(_priv->rx_tfm_arc4); + } + kfree(priv); +} + +static inline u16 RotR1(u16 val) +{ + return (val >> 1) | (val << 15); +} + +static inline u8 Lo8(u16 val) +{ + return val & 0xff; +} + +static inline u8 Hi8(u16 val) +{ + return val >> 8; +} + +static inline u16 Lo16(u32 val) +{ + return val & 0xffff; +} + +static inline u16 Hi16(u32 val) +{ + return val >> 16; +} + +static inline u16 Mk16(u8 hi, u8 lo) +{ + return lo | (((u16) hi) << 8); +} + +static inline u16 Mk16_le(__le16 * v) +{ + return le16_to_cpu(*v); +} + +static const u16 Sbox[256] = { + 0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154, + 0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A, + 0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B, + 0x41EC, 0xB367, 0x5FFD, 0x45EA, 0x23BF, 0x53F7, 0xE496, 0x9B5B, + 0x75C2, 0xE11C, 0x3DAE, 0x4C6A, 0x6C5A, 0x7E41, 0xF502, 0x834F, + 0x685C, 0x51F4, 0xD134, 0xF908, 0xE293, 0xAB73, 0x6253, 0x2A3F, + 0x080C, 0x9552, 0x4665, 0x9D5E, 0x3028, 0x37A1, 0x0A0F, 0x2FB5, + 0x0E09, 0x2436, 0x1B9B, 0xDF3D, 0xCD26, 0x4E69, 0x7FCD, 0xEA9F, + 0x121B, 0x1D9E, 0x5874, 0x342E, 0x362D, 0xDCB2, 0xB4EE, 0x5BFB, + 0xA4F6, 0x764D, 0xB761, 0x7DCE, 0x527B, 0xDD3E, 0x5E71, 0x1397, + 0xA6F5, 0xB968, 0x0000, 0xC12C, 0x4060, 0xE31F, 0x79C8, 0xB6ED, + 0xD4BE, 0x8D46, 0x67D9, 0x724B, 0x94DE, 0x98D4, 0xB0E8, 0x854A, + 0xBB6B, 0xC52A, 0x4FE5, 0xED16, 0x86C5, 0x9AD7, 0x6655, 0x1194, + 0x8ACF, 0xE910, 0x0406, 0xFE81, 0xA0F0, 0x7844, 0x25BA, 0x4BE3, + 0xA2F3, 0x5DFE, 0x80C0, 0x058A, 0x3FAD, 0x21BC, 0x7048, 0xF104, + 0x63DF, 0x77C1, 0xAF75, 0x4263, 0x2030, 0xE51A, 0xFD0E, 0xBF6D, + 0x814C, 0x1814, 0x2635, 0xC32F, 0xBEE1, 0x35A2, 0x88CC, 0x2E39, + 0x9357, 0x55F2, 0xFC82, 0x7A47, 0xC8AC, 0xBAE7, 0x322B, 0xE695, + 0xC0A0, 0x1998, 0x9ED1, 0xA37F, 0x4466, 0x547E, 0x3BAB, 0x0B83, + 0x8CCA, 0xC729, 0x6BD3, 0x283C, 0xA779, 0xBCE2, 0x161D, 0xAD76, + 0xDB3B, 0x6456, 0x744E, 0x141E, 0x92DB, 0x0C0A, 0x486C, 0xB8E4, + 0x9F5D, 0xBD6E, 0x43EF, 0xC4A6, 0x39A8, 0x31A4, 0xD337, 0xF28B, + 0xD532, 0x8B43, 0x6E59, 0xDAB7, 0x018C, 0xB164, 0x9CD2, 0x49E0, + 0xD8B4, 0xACFA, 0xF307, 0xCF25, 0xCAAF, 0xF48E, 0x47E9, 0x1018, + 0x6FD5, 0xF088, 0x4A6F, 0x5C72, 0x3824, 0x57F1, 0x73C7, 0x9751, + 0xCB23, 0xA17C, 0xE89C, 0x3E21, 0x96DD, 0x61DC, 0x0D86, 0x0F85, + 0xE090, 0x7C42, 0x71C4, 0xCCAA, 0x90D8, 0x0605, 0xF701, 0x1C12, + 0xC2A3, 0x6A5F, 0xAEF9, 0x69D0, 0x1791, 0x9958, 0x3A27, 0x27B9, + 0xD938, 0xEB13, 0x2BB3, 0x2233, 0xD2BB, 0xA970, 0x0789, 0x33A7, + 0x2DB6, 0x3C22, 0x1592, 0xC920, 0x8749, 0xAAFF, 0x5078, 0xA57A, + 0x038F, 0x59F8, 0x0980, 0x1A17, 0x65DA, 0xD731, 0x84C6, 0xD0B8, + 0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A, +}; + +static inline u16 _S_(u16 v) +{ + u16 t = Sbox[Hi8(v)]; + return Sbox[Lo8(v)] ^ ((t << 8) | (t >> 8)); +} + +#define PHASE1_LOOP_COUNT 8 + +static void tkip_mixing_phase1(u16 * TTAK, const u8 * TK, const u8 * TA, + u32 IV32) +{ + int i, j; + + /* Initialize the 80-bit TTAK from TSC (IV32) and TA[0..5] */ + TTAK[0] = Lo16(IV32); + TTAK[1] = Hi16(IV32); + TTAK[2] = Mk16(TA[1], TA[0]); + TTAK[3] = Mk16(TA[3], TA[2]); + TTAK[4] = Mk16(TA[5], TA[4]); + + for (i = 0; i < PHASE1_LOOP_COUNT; i++) { + j = 2 * (i & 1); + TTAK[0] += _S_(TTAK[4] ^ Mk16(TK[1 + j], TK[0 + j])); + TTAK[1] += _S_(TTAK[0] ^ Mk16(TK[5 + j], TK[4 + j])); + TTAK[2] += _S_(TTAK[1] ^ Mk16(TK[9 + j], TK[8 + j])); + TTAK[3] += _S_(TTAK[2] ^ Mk16(TK[13 + j], TK[12 + j])); + TTAK[4] += _S_(TTAK[3] ^ Mk16(TK[1 + j], TK[0 + j])) + i; + } +} + +static void tkip_mixing_phase2(u8 * WEPSeed, const u8 * TK, const u16 * TTAK, + u16 IV16) +{ + /* Make temporary area overlap WEP seed so that the final copy can be + * avoided on little endian hosts. */ + u16 *PPK = (u16 *) & WEPSeed[4]; + + /* Step 1 - make copy of TTAK and bring in TSC */ + PPK[0] = TTAK[0]; + PPK[1] = TTAK[1]; + PPK[2] = TTAK[2]; + PPK[3] = TTAK[3]; + PPK[4] = TTAK[4]; + PPK[5] = TTAK[4] + IV16; + + /* Step 2 - 96-bit bijective mixing using S-box */ + PPK[0] += _S_(PPK[5] ^ Mk16_le((__le16 *) & TK[0])); + PPK[1] += _S_(PPK[0] ^ Mk16_le((__le16 *) & TK[2])); + PPK[2] += _S_(PPK[1] ^ Mk16_le((__le16 *) & TK[4])); + PPK[3] += _S_(PPK[2] ^ Mk16_le((__le16 *) & TK[6])); + PPK[4] += _S_(PPK[3] ^ Mk16_le((__le16 *) & TK[8])); + PPK[5] += _S_(PPK[4] ^ Mk16_le((__le16 *) & TK[10])); + + PPK[0] += RotR1(PPK[5] ^ Mk16_le((__le16 *) & TK[12])); + PPK[1] += RotR1(PPK[0] ^ Mk16_le((__le16 *) & TK[14])); + PPK[2] += RotR1(PPK[1]); + PPK[3] += RotR1(PPK[2]); + PPK[4] += RotR1(PPK[3]); + PPK[5] += RotR1(PPK[4]); + + /* Step 3 - bring in last of TK bits, assign 24-bit WEP IV value + * WEPSeed[0..2] is transmitted as WEP IV */ + WEPSeed[0] = Hi8(IV16); + WEPSeed[1] = (Hi8(IV16) | 0x20) & 0x7F; + WEPSeed[2] = Lo8(IV16); + WEPSeed[3] = Lo8((PPK[5] ^ Mk16_le((__le16 *) & TK[0])) >> 1); + +#ifdef __BIG_ENDIAN + { + int i; + for (i = 0; i < 6; i++) + PPK[i] = (PPK[i] << 8) | (PPK[i] >> 8); + } +#endif +} + +static int lib80211_tkip_hdr(struct sk_buff *skb, int hdr_len, + u8 * rc4key, int keylen, void *priv) +{ + struct lib80211_tkip_data *tkey = priv; + u8 *pos; + struct ieee80211_hdr *hdr; + + hdr = (struct ieee80211_hdr *)skb->data; + + if (skb_headroom(skb) < TKIP_HDR_LEN || skb->len < hdr_len) + return -1; + + if (rc4key == NULL || keylen < 16) + return -1; + + if (!tkey->tx_phase1_done) { + tkip_mixing_phase1(tkey->tx_ttak, tkey->key, hdr->addr2, + tkey->tx_iv32); + tkey->tx_phase1_done = 1; + } + tkip_mixing_phase2(rc4key, tkey->key, tkey->tx_ttak, tkey->tx_iv16); + + pos = skb_push(skb, TKIP_HDR_LEN); + memmove(pos, pos + TKIP_HDR_LEN, hdr_len); + pos += hdr_len; + + *pos++ = *rc4key; + *pos++ = *(rc4key + 1); + *pos++ = *(rc4key + 2); + *pos++ = (tkey->key_idx << 6) | (1 << 5) /* Ext IV included */ ; + *pos++ = tkey->tx_iv32 & 0xff; + *pos++ = (tkey->tx_iv32 >> 8) & 0xff; + *pos++ = (tkey->tx_iv32 >> 16) & 0xff; + *pos++ = (tkey->tx_iv32 >> 24) & 0xff; + + tkey->tx_iv16++; + if (tkey->tx_iv16 == 0) { + tkey->tx_phase1_done = 0; + tkey->tx_iv32++; + } + + return TKIP_HDR_LEN; +} + +static int lib80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) +{ + struct lib80211_tkip_data *tkey = priv; + struct blkcipher_desc desc = { .tfm = tkey->tx_tfm_arc4 }; + int len; + u8 rc4key[16], *pos, *icv; + u32 crc; + struct scatterlist sg; + + if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) { + if (net_ratelimit()) { + struct ieee80211_hdr *hdr = + (struct ieee80211_hdr *)skb->data; + printk(KERN_DEBUG ": TKIP countermeasures: dropped " + "TX packet to %pM\n", hdr->addr1); + } + return -1; + } + + if (skb_tailroom(skb) < 4 || skb->len < hdr_len) + return -1; + + len = skb->len - hdr_len; + pos = skb->data + hdr_len; + + if ((lib80211_tkip_hdr(skb, hdr_len, rc4key, 16, priv)) < 0) + return -1; + + crc = ~crc32_le(~0, pos, len); + icv = skb_put(skb, 4); + icv[0] = crc; + icv[1] = crc >> 8; + icv[2] = crc >> 16; + icv[3] = crc >> 24; + + crypto_blkcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16); + sg_init_one(&sg, pos, len + 4); + return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4); +} + +/* + * deal with seq counter wrapping correctly. + * refer to timer_after() for jiffies wrapping handling + */ +static inline int tkip_replay_check(u32 iv32_n, u16 iv16_n, + u32 iv32_o, u16 iv16_o) +{ + if ((s32)iv32_n - (s32)iv32_o < 0 || + (iv32_n == iv32_o && iv16_n <= iv16_o)) + return 1; + return 0; +} + +static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) +{ + struct lib80211_tkip_data *tkey = priv; + struct blkcipher_desc desc = { .tfm = tkey->rx_tfm_arc4 }; + u8 rc4key[16]; + u8 keyidx, *pos; + u32 iv32; + u16 iv16; + struct ieee80211_hdr *hdr; + u8 icv[4]; + u32 crc; + struct scatterlist sg; + int plen; + + hdr = (struct ieee80211_hdr *)skb->data; + + if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) { + if (net_ratelimit()) { + printk(KERN_DEBUG ": TKIP countermeasures: dropped " + "received packet from %pM\n", hdr->addr2); + } + return -1; + } + + if (skb->len < hdr_len + TKIP_HDR_LEN + 4) + return -1; + + pos = skb->data + hdr_len; + keyidx = pos[3]; + if (!(keyidx & (1 << 5))) { + if (net_ratelimit()) { + printk(KERN_DEBUG "TKIP: received packet without ExtIV" + " flag from %pM\n", hdr->addr2); + } + return -2; + } + keyidx >>= 6; + if (tkey->key_idx != keyidx) { + printk(KERN_DEBUG "TKIP: RX tkey->key_idx=%d frame " + "keyidx=%d priv=%p\n", tkey->key_idx, keyidx, priv); + return -6; + } + if (!tkey->key_set) { + if (net_ratelimit()) { + printk(KERN_DEBUG "TKIP: received packet from %pM" + " with keyid=%d that does not have a configured" + " key\n", hdr->addr2, keyidx); + } + return -3; + } + iv16 = (pos[0] << 8) | pos[2]; + iv32 = pos[4] | (pos[5] << 8) | (pos[6] << 16) | (pos[7] << 24); + pos += TKIP_HDR_LEN; + + if (tkip_replay_check(iv32, iv16, tkey->rx_iv32, tkey->rx_iv16)) { +#ifdef CONFIG_LIB80211_DEBUG + if (net_ratelimit()) { + printk(KERN_DEBUG "TKIP: replay detected: STA=%pM" + " previous TSC %08x%04x received TSC " + "%08x%04x\n", hdr->addr2, + tkey->rx_iv32, tkey->rx_iv16, iv32, iv16); + } +#endif + tkey->dot11RSNAStatsTKIPReplays++; + return -4; + } + + if (iv32 != tkey->rx_iv32 || !tkey->rx_phase1_done) { + tkip_mixing_phase1(tkey->rx_ttak, tkey->key, hdr->addr2, iv32); + tkey->rx_phase1_done = 1; + } + tkip_mixing_phase2(rc4key, tkey->key, tkey->rx_ttak, iv16); + + plen = skb->len - hdr_len - 12; + + crypto_blkcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16); + sg_init_one(&sg, pos, plen + 4); + if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) { + if (net_ratelimit()) { + printk(KERN_DEBUG ": TKIP: failed to decrypt " + "received packet from %pM\n", + hdr->addr2); + } + return -7; + } + + crc = ~crc32_le(~0, pos, plen); + icv[0] = crc; + icv[1] = crc >> 8; + icv[2] = crc >> 16; + icv[3] = crc >> 24; + if (memcmp(icv, pos + plen, 4) != 0) { + if (iv32 != tkey->rx_iv32) { + /* Previously cached Phase1 result was already lost, so + * it needs to be recalculated for the next packet. */ + tkey->rx_phase1_done = 0; + } +#ifdef CONFIG_LIB80211_DEBUG + if (net_ratelimit()) { + printk(KERN_DEBUG "TKIP: ICV error detected: STA=" + "%pM\n", hdr->addr2); + } +#endif + tkey->dot11RSNAStatsTKIPICVErrors++; + return -5; + } + + /* Update real counters only after Michael MIC verification has + * completed */ + tkey->rx_iv32_new = iv32; + tkey->rx_iv16_new = iv16; + + /* Remove IV and ICV */ + memmove(skb->data + TKIP_HDR_LEN, skb->data, hdr_len); + skb_pull(skb, TKIP_HDR_LEN); + skb_trim(skb, skb->len - 4); + + return keyidx; +} + +static int michael_mic(struct crypto_hash *tfm_michael, u8 * key, u8 * hdr, + u8 * data, size_t data_len, u8 * mic) +{ + struct hash_desc desc; + struct scatterlist sg[2]; + + if (tfm_michael == NULL) { + printk(KERN_WARNING "michael_mic: tfm_michael == NULL\n"); + return -1; + } + sg_init_table(sg, 2); + sg_set_buf(&sg[0], hdr, 16); + sg_set_buf(&sg[1], data, data_len); + + if (crypto_hash_setkey(tfm_michael, key, 8)) + return -1; + + desc.tfm = tfm_michael; + desc.flags = 0; + return crypto_hash_digest(&desc, sg, data_len + 16, mic); +} + +static void michael_mic_hdr(struct sk_buff *skb, u8 * hdr) +{ + struct ieee80211_hdr *hdr11; + + hdr11 = (struct ieee80211_hdr *)skb->data; + + switch (le16_to_cpu(hdr11->frame_control) & + (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) { + case IEEE80211_FCTL_TODS: + memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */ + memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */ + break; + case IEEE80211_FCTL_FROMDS: + memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */ + memcpy(hdr + ETH_ALEN, hdr11->addr3, ETH_ALEN); /* SA */ + break; + case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS: + memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */ + memcpy(hdr + ETH_ALEN, hdr11->addr4, ETH_ALEN); /* SA */ + break; + case 0: + memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */ + memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */ + break; + } + + if (ieee80211_is_data_qos(hdr11->frame_control)) { + hdr[12] = le16_to_cpu(*ieee80211_get_qos_ctl(hdr11)) + & IEEE80211_QOS_CTL_TID_MASK; + } else + hdr[12] = 0; /* priority */ + + hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */ +} + +static int lib80211_michael_mic_add(struct sk_buff *skb, int hdr_len, + void *priv) +{ + struct lib80211_tkip_data *tkey = priv; + u8 *pos; + + if (skb_tailroom(skb) < 8 || skb->len < hdr_len) { + printk(KERN_DEBUG "Invalid packet for Michael MIC add " + "(tailroom=%d hdr_len=%d skb->len=%d)\n", + skb_tailroom(skb), hdr_len, skb->len); + return -1; + } + + michael_mic_hdr(skb, tkey->tx_hdr); + pos = skb_put(skb, 8); + if (michael_mic(tkey->tx_tfm_michael, &tkey->key[16], tkey->tx_hdr, + skb->data + hdr_len, skb->len - 8 - hdr_len, pos)) + return -1; + + return 0; +} + +static void lib80211_michael_mic_failure(struct net_device *dev, + struct ieee80211_hdr *hdr, + int keyidx) +{ + union iwreq_data wrqu; + struct iw_michaelmicfailure ev; + + /* TODO: needed parameters: count, keyid, key type, TSC */ + memset(&ev, 0, sizeof(ev)); + ev.flags = keyidx & IW_MICFAILURE_KEY_ID; + if (hdr->addr1[0] & 0x01) + ev.flags |= IW_MICFAILURE_GROUP; + else + ev.flags |= IW_MICFAILURE_PAIRWISE; + ev.src_addr.sa_family = ARPHRD_ETHER; + memcpy(ev.src_addr.sa_data, hdr->addr2, ETH_ALEN); + memset(&wrqu, 0, sizeof(wrqu)); + wrqu.data.length = sizeof(ev); + wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu, (char *)&ev); +} + +static int lib80211_michael_mic_verify(struct sk_buff *skb, int keyidx, + int hdr_len, void *priv) +{ + struct lib80211_tkip_data *tkey = priv; + u8 mic[8]; + + if (!tkey->key_set) + return -1; + + michael_mic_hdr(skb, tkey->rx_hdr); + if (michael_mic(tkey->rx_tfm_michael, &tkey->key[24], tkey->rx_hdr, + skb->data + hdr_len, skb->len - 8 - hdr_len, mic)) + return -1; + if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) { + struct ieee80211_hdr *hdr; + hdr = (struct ieee80211_hdr *)skb->data; + printk(KERN_DEBUG "%s: Michael MIC verification failed for " + "MSDU from %pM keyidx=%d\n", + skb->dev ? skb->dev->name : "N/A", hdr->addr2, + keyidx); + if (skb->dev) + lib80211_michael_mic_failure(skb->dev, hdr, keyidx); + tkey->dot11RSNAStatsTKIPLocalMICFailures++; + return -1; + } + + /* Update TSC counters for RX now that the packet verification has + * completed. */ + tkey->rx_iv32 = tkey->rx_iv32_new; + tkey->rx_iv16 = tkey->rx_iv16_new; + + skb_trim(skb, skb->len - 8); + + return 0; +} + +static int lib80211_tkip_set_key(void *key, int len, u8 * seq, void *priv) +{ + struct lib80211_tkip_data *tkey = priv; + int keyidx; + struct crypto_hash *tfm = tkey->tx_tfm_michael; + struct crypto_blkcipher *tfm2 = tkey->tx_tfm_arc4; + struct crypto_hash *tfm3 = tkey->rx_tfm_michael; + struct crypto_blkcipher *tfm4 = tkey->rx_tfm_arc4; + + keyidx = tkey->key_idx; + memset(tkey, 0, sizeof(*tkey)); + tkey->key_idx = keyidx; + tkey->tx_tfm_michael = tfm; + tkey->tx_tfm_arc4 = tfm2; + tkey->rx_tfm_michael = tfm3; + tkey->rx_tfm_arc4 = tfm4; + if (len == TKIP_KEY_LEN) { + memcpy(tkey->key, key, TKIP_KEY_LEN); + tkey->key_set = 1; + tkey->tx_iv16 = 1; /* TSC is initialized to 1 */ + if (seq) { + tkey->rx_iv32 = (seq[5] << 24) | (seq[4] << 16) | + (seq[3] << 8) | seq[2]; + tkey->rx_iv16 = (seq[1] << 8) | seq[0]; + } + } else if (len == 0) + tkey->key_set = 0; + else + return -1; + + return 0; +} + +static int lib80211_tkip_get_key(void *key, int len, u8 * seq, void *priv) +{ + struct lib80211_tkip_data *tkey = priv; + + if (len < TKIP_KEY_LEN) + return -1; + + if (!tkey->key_set) + return 0; + memcpy(key, tkey->key, TKIP_KEY_LEN); + + if (seq) { + /* Return the sequence number of the last transmitted frame. */ + u16 iv16 = tkey->tx_iv16; + u32 iv32 = tkey->tx_iv32; + if (iv16 == 0) + iv32--; + iv16--; + seq[0] = tkey->tx_iv16; + seq[1] = tkey->tx_iv16 >> 8; + seq[2] = tkey->tx_iv32; + seq[3] = tkey->tx_iv32 >> 8; + seq[4] = tkey->tx_iv32 >> 16; + seq[5] = tkey->tx_iv32 >> 24; + } + + return TKIP_KEY_LEN; +} + +static char *lib80211_tkip_print_stats(char *p, void *priv) +{ + struct lib80211_tkip_data *tkip = priv; + p += sprintf(p, "key[%d] alg=TKIP key_set=%d " + "tx_pn=%02x%02x%02x%02x%02x%02x " + "rx_pn=%02x%02x%02x%02x%02x%02x " + "replays=%d icv_errors=%d local_mic_failures=%d\n", + tkip->key_idx, tkip->key_set, + (tkip->tx_iv32 >> 24) & 0xff, + (tkip->tx_iv32 >> 16) & 0xff, + (tkip->tx_iv32 >> 8) & 0xff, + tkip->tx_iv32 & 0xff, + (tkip->tx_iv16 >> 8) & 0xff, + tkip->tx_iv16 & 0xff, + (tkip->rx_iv32 >> 24) & 0xff, + (tkip->rx_iv32 >> 16) & 0xff, + (tkip->rx_iv32 >> 8) & 0xff, + tkip->rx_iv32 & 0xff, + (tkip->rx_iv16 >> 8) & 0xff, + tkip->rx_iv16 & 0xff, + tkip->dot11RSNAStatsTKIPReplays, + tkip->dot11RSNAStatsTKIPICVErrors, + tkip->dot11RSNAStatsTKIPLocalMICFailures); + return p; +} + +static struct lib80211_crypto_ops lib80211_crypt_tkip = { + .name = "TKIP", + .init = lib80211_tkip_init, + .deinit = lib80211_tkip_deinit, + .build_iv = lib80211_tkip_hdr, + .encrypt_mpdu = lib80211_tkip_encrypt, + .decrypt_mpdu = lib80211_tkip_decrypt, + .encrypt_msdu = lib80211_michael_mic_add, + .decrypt_msdu = lib80211_michael_mic_verify, + .set_key = lib80211_tkip_set_key, + .get_key = lib80211_tkip_get_key, + .print_stats = lib80211_tkip_print_stats, + .extra_mpdu_prefix_len = 4 + 4, /* IV + ExtIV */ + .extra_mpdu_postfix_len = 4, /* ICV */ + .extra_msdu_postfix_len = 8, /* MIC */ + .get_flags = lib80211_tkip_get_flags, + .set_flags = lib80211_tkip_set_flags, + .owner = THIS_MODULE, +}; + +static int __init lib80211_crypto_tkip_init(void) +{ + return lib80211_register_crypto_ops(&lib80211_crypt_tkip); +} + +static void __exit lib80211_crypto_tkip_exit(void) +{ + lib80211_unregister_crypto_ops(&lib80211_crypt_tkip); +} + +module_init(lib80211_crypto_tkip_init); +module_exit(lib80211_crypto_tkip_exit); diff --git a/net/wireless/lib80211_crypt_wep.c b/net/wireless/lib80211_crypt_wep.c new file mode 100644 index 0000000..6d41e05 --- /dev/null +++ b/net/wireless/lib80211_crypt_wep.c @@ -0,0 +1,296 @@ +/* + * lib80211 crypt: host-based WEP encryption implementation for lib80211 + * + * Copyright (c) 2002-2004, Jouni Malinen + * Copyright (c) 2008, John W. Linville + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. See README and COPYING for + * more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +MODULE_AUTHOR("Jouni Malinen"); +MODULE_DESCRIPTION("lib80211 crypt: WEP"); +MODULE_LICENSE("GPL"); + +struct lib80211_wep_data { + u32 iv; +#define WEP_KEY_LEN 13 + u8 key[WEP_KEY_LEN + 1]; + u8 key_len; + u8 key_idx; + struct crypto_blkcipher *tx_tfm; + struct crypto_blkcipher *rx_tfm; +}; + +static void *lib80211_wep_init(int keyidx) +{ + struct lib80211_wep_data *priv; + + priv = kzalloc(sizeof(*priv), GFP_ATOMIC); + if (priv == NULL) + goto fail; + priv->key_idx = keyidx; + + priv->tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(priv->tx_tfm)) { + printk(KERN_DEBUG "lib80211_crypt_wep: could not allocate " + "crypto API arc4\n"); + priv->tx_tfm = NULL; + goto fail; + } + + priv->rx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(priv->rx_tfm)) { + printk(KERN_DEBUG "lib80211_crypt_wep: could not allocate " + "crypto API arc4\n"); + priv->rx_tfm = NULL; + goto fail; + } + /* start WEP IV from a random value */ + get_random_bytes(&priv->iv, 4); + + return priv; + + fail: + if (priv) { + if (priv->tx_tfm) + crypto_free_blkcipher(priv->tx_tfm); + if (priv->rx_tfm) + crypto_free_blkcipher(priv->rx_tfm); + kfree(priv); + } + return NULL; +} + +static void lib80211_wep_deinit(void *priv) +{ + struct lib80211_wep_data *_priv = priv; + if (_priv) { + if (_priv->tx_tfm) + crypto_free_blkcipher(_priv->tx_tfm); + if (_priv->rx_tfm) + crypto_free_blkcipher(_priv->rx_tfm); + } + kfree(priv); +} + +/* Add WEP IV/key info to a frame that has at least 4 bytes of headroom */ +static int lib80211_wep_build_iv(struct sk_buff *skb, int hdr_len, + u8 *key, int keylen, void *priv) +{ + struct lib80211_wep_data *wep = priv; + u32 klen, len; + u8 *pos; + + if (skb_headroom(skb) < 4 || skb->len < hdr_len) + return -1; + + len = skb->len - hdr_len; + pos = skb_push(skb, 4); + memmove(pos, pos + 4, hdr_len); + pos += hdr_len; + + klen = 3 + wep->key_len; + + wep->iv++; + + /* Fluhrer, Mantin, and Shamir have reported weaknesses in the key + * scheduling algorithm of RC4. At least IVs (KeyByte + 3, 0xff, N) + * can be used to speedup attacks, so avoid using them. */ + if ((wep->iv & 0xff00) == 0xff00) { + u8 B = (wep->iv >> 16) & 0xff; + if (B >= 3 && B < klen) + wep->iv += 0x0100; + } + + /* Prepend 24-bit IV to RC4 key and TX frame */ + *pos++ = (wep->iv >> 16) & 0xff; + *pos++ = (wep->iv >> 8) & 0xff; + *pos++ = wep->iv & 0xff; + *pos++ = wep->key_idx << 6; + + return 0; +} + +/* Perform WEP encryption on given skb that has at least 4 bytes of headroom + * for IV and 4 bytes of tailroom for ICV. Both IV and ICV will be transmitted, + * so the payload length increases with 8 bytes. + * + * WEP frame payload: IV + TX key idx, RC4(data), ICV = RC4(CRC32(data)) + */ +static int lib80211_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) +{ + struct lib80211_wep_data *wep = priv; + struct blkcipher_desc desc = { .tfm = wep->tx_tfm }; + u32 crc, klen, len; + u8 *pos, *icv; + struct scatterlist sg; + u8 key[WEP_KEY_LEN + 3]; + + /* other checks are in lib80211_wep_build_iv */ + if (skb_tailroom(skb) < 4) + return -1; + + /* add the IV to the frame */ + if (lib80211_wep_build_iv(skb, hdr_len, NULL, 0, priv)) + return -1; + + /* Copy the IV into the first 3 bytes of the key */ + skb_copy_from_linear_data_offset(skb, hdr_len, key, 3); + + /* Copy rest of the WEP key (the secret part) */ + memcpy(key + 3, wep->key, wep->key_len); + + len = skb->len - hdr_len - 4; + pos = skb->data + hdr_len + 4; + klen = 3 + wep->key_len; + + /* Append little-endian CRC32 over only the data and encrypt it to produce ICV */ + crc = ~crc32_le(~0, pos, len); + icv = skb_put(skb, 4); + icv[0] = crc; + icv[1] = crc >> 8; + icv[2] = crc >> 16; + icv[3] = crc >> 24; + + crypto_blkcipher_setkey(wep->tx_tfm, key, klen); + sg_init_one(&sg, pos, len + 4); + return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4); +} + +/* Perform WEP decryption on given buffer. Buffer includes whole WEP part of + * the frame: IV (4 bytes), encrypted payload (including SNAP header), + * ICV (4 bytes). len includes both IV and ICV. + * + * Returns 0 if frame was decrypted successfully and ICV was correct and -1 on + * failure. If frame is OK, IV and ICV will be removed. + */ +static int lib80211_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv) +{ + struct lib80211_wep_data *wep = priv; + struct blkcipher_desc desc = { .tfm = wep->rx_tfm }; + u32 crc, klen, plen; + u8 key[WEP_KEY_LEN + 3]; + u8 keyidx, *pos, icv[4]; + struct scatterlist sg; + + if (skb->len < hdr_len + 8) + return -1; + + pos = skb->data + hdr_len; + key[0] = *pos++; + key[1] = *pos++; + key[2] = *pos++; + keyidx = *pos++ >> 6; + if (keyidx != wep->key_idx) + return -1; + + klen = 3 + wep->key_len; + + /* Copy rest of the WEP key (the secret part) */ + memcpy(key + 3, wep->key, wep->key_len); + + /* Apply RC4 to data and compute CRC32 over decrypted data */ + plen = skb->len - hdr_len - 8; + + crypto_blkcipher_setkey(wep->rx_tfm, key, klen); + sg_init_one(&sg, pos, plen + 4); + if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) + return -7; + + crc = ~crc32_le(~0, pos, plen); + icv[0] = crc; + icv[1] = crc >> 8; + icv[2] = crc >> 16; + icv[3] = crc >> 24; + if (memcmp(icv, pos + plen, 4) != 0) { + /* ICV mismatch - drop frame */ + return -2; + } + + /* Remove IV and ICV */ + memmove(skb->data + 4, skb->data, hdr_len); + skb_pull(skb, 4); + skb_trim(skb, skb->len - 4); + + return 0; +} + +static int lib80211_wep_set_key(void *key, int len, u8 * seq, void *priv) +{ + struct lib80211_wep_data *wep = priv; + + if (len < 0 || len > WEP_KEY_LEN) + return -1; + + memcpy(wep->key, key, len); + wep->key_len = len; + + return 0; +} + +static int lib80211_wep_get_key(void *key, int len, u8 * seq, void *priv) +{ + struct lib80211_wep_data *wep = priv; + + if (len < wep->key_len) + return -1; + + memcpy(key, wep->key, wep->key_len); + + return wep->key_len; +} + +static char *lib80211_wep_print_stats(char *p, void *priv) +{ + struct lib80211_wep_data *wep = priv; + p += sprintf(p, "key[%d] alg=WEP len=%d\n", wep->key_idx, wep->key_len); + return p; +} + +static struct lib80211_crypto_ops lib80211_crypt_wep = { + .name = "WEP", + .init = lib80211_wep_init, + .deinit = lib80211_wep_deinit, + .build_iv = lib80211_wep_build_iv, + .encrypt_mpdu = lib80211_wep_encrypt, + .decrypt_mpdu = lib80211_wep_decrypt, + .encrypt_msdu = NULL, + .decrypt_msdu = NULL, + .set_key = lib80211_wep_set_key, + .get_key = lib80211_wep_get_key, + .print_stats = lib80211_wep_print_stats, + .extra_mpdu_prefix_len = 4, /* IV */ + .extra_mpdu_postfix_len = 4, /* ICV */ + .owner = THIS_MODULE, +}; + +static int __init lib80211_crypto_wep_init(void) +{ + return lib80211_register_crypto_ops(&lib80211_crypt_wep); +} + +static void __exit lib80211_crypto_wep_exit(void) +{ + lib80211_unregister_crypto_ops(&lib80211_crypt_wep); +} + +module_init(lib80211_crypto_wep_init); +module_exit(lib80211_crypto_wep_exit); diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c new file mode 100644 index 0000000..62bc885 --- /dev/null +++ b/net/wireless/mlme.c @@ -0,0 +1,896 @@ +/* + * cfg80211 MLME SAP interface + * + * Copyright (c) 2009, Jouni Malinen + */ + +#include +#include +#include +#include +#include +#include +#include +#include "core.h" +#include "nl80211.h" + +void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct wiphy *wiphy = wdev->wiphy; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; + u8 *bssid = mgmt->bssid; + int i; + u16 status = le16_to_cpu(mgmt->u.auth.status_code); + bool done = false; + + wdev_lock(wdev); + + for (i = 0; i < MAX_AUTH_BSSES; i++) { + if (wdev->authtry_bsses[i] && + memcmp(wdev->authtry_bsses[i]->pub.bssid, bssid, + ETH_ALEN) == 0) { + if (status == WLAN_STATUS_SUCCESS) { + wdev->auth_bsses[i] = wdev->authtry_bsses[i]; + } else { + cfg80211_unhold_bss(wdev->authtry_bsses[i]); + cfg80211_put_bss(&wdev->authtry_bsses[i]->pub); + } + wdev->authtry_bsses[i] = NULL; + done = true; + break; + } + } + + WARN_ON(!done); + + nl80211_send_rx_auth(rdev, dev, buf, len, GFP_KERNEL); + cfg80211_sme_rx_auth(dev, buf, len); + + wdev_unlock(wdev); +} +EXPORT_SYMBOL(cfg80211_send_rx_auth); + +void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len) +{ + u16 status_code; + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct wiphy *wiphy = wdev->wiphy; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; + u8 *ie = mgmt->u.assoc_resp.variable; + int i, ieoffs = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable); + struct cfg80211_internal_bss *bss = NULL; + + wdev_lock(wdev); + + status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code); + + /* + * This is a bit of a hack, we don't notify userspace of + * a (re-)association reply if we tried to send a reassoc + * and got a reject -- we only try again with an assoc + * frame instead of reassoc. + */ + if (status_code != WLAN_STATUS_SUCCESS && wdev->conn && + cfg80211_sme_failed_reassoc(wdev)) + goto out; + + nl80211_send_rx_assoc(rdev, dev, buf, len, GFP_KERNEL); + + if (status_code == WLAN_STATUS_SUCCESS) { + for (i = 0; i < MAX_AUTH_BSSES; i++) { + if (!wdev->auth_bsses[i]) + continue; + if (memcmp(wdev->auth_bsses[i]->pub.bssid, mgmt->bssid, + ETH_ALEN) == 0) { + bss = wdev->auth_bsses[i]; + wdev->auth_bsses[i] = NULL; + /* additional reference to drop hold */ + cfg80211_ref_bss(bss); + break; + } + } + + /* + * We might be coming here because the driver reported + * a successful association at the same time as the + * user requested a deauth. In that case, we will have + * removed the BSS from the auth_bsses list due to the + * deauth request when the assoc response makes it. If + * the two code paths acquire the lock the other way + * around, that's just the standard situation of a + * deauth being requested while connected. + */ + if (!bss) + goto out; + } else if (wdev->conn) { + cfg80211_sme_failed_assoc(wdev); + /* + * do not call connect_result() now because the + * sme will schedule work that does it later. + */ + goto out; + } + + if (!wdev->conn && wdev->sme_state == CFG80211_SME_IDLE) { + /* + * This is for the userspace SME, the CONNECTING + * state will be changed to CONNECTED by + * __cfg80211_connect_result() below. + */ + wdev->sme_state = CFG80211_SME_CONNECTING; + } + + /* this consumes one bss reference (unless bss is NULL) */ + __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, ie, len - ieoffs, + status_code, + status_code == WLAN_STATUS_SUCCESS, + bss ? &bss->pub : NULL); + /* drop hold now, and also reference acquired above */ + if (bss) { + cfg80211_unhold_bss(bss); + cfg80211_put_bss(&bss->pub); + } + + out: + wdev_unlock(wdev); +} +EXPORT_SYMBOL(cfg80211_send_rx_assoc); + +void __cfg80211_send_deauth(struct net_device *dev, + const u8 *buf, size_t len) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct wiphy *wiphy = wdev->wiphy; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; + const u8 *bssid = mgmt->bssid; + int i; + bool found = false; + + ASSERT_WDEV_LOCK(wdev); + + if (wdev->current_bss && + memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) { + cfg80211_unhold_bss(wdev->current_bss); + cfg80211_put_bss(&wdev->current_bss->pub); + wdev->current_bss = NULL; + found = true; + } else for (i = 0; i < MAX_AUTH_BSSES; i++) { + if (wdev->auth_bsses[i] && + memcmp(wdev->auth_bsses[i]->pub.bssid, bssid, ETH_ALEN) == 0) { + cfg80211_unhold_bss(wdev->auth_bsses[i]); + cfg80211_put_bss(&wdev->auth_bsses[i]->pub); + wdev->auth_bsses[i] = NULL; + found = true; + break; + } + if (wdev->authtry_bsses[i] && + memcmp(wdev->authtry_bsses[i]->pub.bssid, bssid, ETH_ALEN) == 0) { + cfg80211_unhold_bss(wdev->authtry_bsses[i]); + cfg80211_put_bss(&wdev->authtry_bsses[i]->pub); + wdev->authtry_bsses[i] = NULL; + found = true; + break; + } + } + + if (!found) + return; + + nl80211_send_deauth(rdev, dev, buf, len, GFP_KERNEL); + + if (wdev->sme_state == CFG80211_SME_CONNECTED) { + u16 reason_code; + bool from_ap; + + reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); + + from_ap = memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0; + __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap); + } else if (wdev->sme_state == CFG80211_SME_CONNECTING) { + __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, NULL, 0, + WLAN_STATUS_UNSPECIFIED_FAILURE, + false, NULL); + } +} +EXPORT_SYMBOL(__cfg80211_send_deauth); + +void cfg80211_send_deauth(struct net_device *dev, const u8 *buf, size_t len) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + + wdev_lock(wdev); + __cfg80211_send_deauth(dev, buf, len); + wdev_unlock(wdev); +} +EXPORT_SYMBOL(cfg80211_send_deauth); + +void __cfg80211_send_disassoc(struct net_device *dev, + const u8 *buf, size_t len) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct wiphy *wiphy = wdev->wiphy; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; + const u8 *bssid = mgmt->bssid; + int i; + u16 reason_code; + bool from_ap; + bool done = false; + + ASSERT_WDEV_LOCK(wdev); + + nl80211_send_disassoc(rdev, dev, buf, len, GFP_KERNEL); + + if (wdev->sme_state != CFG80211_SME_CONNECTED) + return; + + if (wdev->current_bss && + memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) { + for (i = 0; i < MAX_AUTH_BSSES; i++) { + if (wdev->authtry_bsses[i] || wdev->auth_bsses[i]) + continue; + wdev->auth_bsses[i] = wdev->current_bss; + wdev->current_bss = NULL; + done = true; + cfg80211_sme_disassoc(dev, i); + break; + } + WARN_ON(!done); + } else + WARN_ON(1); + + + reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); + + from_ap = memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0; + __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap); +} +EXPORT_SYMBOL(__cfg80211_send_disassoc); + +void cfg80211_send_disassoc(struct net_device *dev, const u8 *buf, size_t len) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + + wdev_lock(wdev); + __cfg80211_send_disassoc(dev, buf, len); + wdev_unlock(wdev); +} +EXPORT_SYMBOL(cfg80211_send_disassoc); + +static void __cfg80211_auth_remove(struct wireless_dev *wdev, const u8 *addr) +{ + int i; + bool done = false; + + ASSERT_WDEV_LOCK(wdev); + + for (i = 0; addr && i < MAX_AUTH_BSSES; i++) { + if (wdev->authtry_bsses[i] && + memcmp(wdev->authtry_bsses[i]->pub.bssid, + addr, ETH_ALEN) == 0) { + cfg80211_unhold_bss(wdev->authtry_bsses[i]); + cfg80211_put_bss(&wdev->authtry_bsses[i]->pub); + wdev->authtry_bsses[i] = NULL; + done = true; + break; + } + } + + WARN_ON(!done); +} + +void __cfg80211_auth_canceled(struct net_device *dev, const u8 *addr) +{ + __cfg80211_auth_remove(dev->ieee80211_ptr, addr); +} +EXPORT_SYMBOL(__cfg80211_auth_canceled); + +void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct wiphy *wiphy = wdev->wiphy; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + + wdev_lock(wdev); + + nl80211_send_auth_timeout(rdev, dev, addr, GFP_KERNEL); + if (wdev->sme_state == CFG80211_SME_CONNECTING) + __cfg80211_connect_result(dev, addr, NULL, 0, NULL, 0, + WLAN_STATUS_UNSPECIFIED_FAILURE, + false, NULL); + + __cfg80211_auth_remove(wdev, addr); + + wdev_unlock(wdev); +} +EXPORT_SYMBOL(cfg80211_send_auth_timeout); + +void cfg80211_send_assoc_timeout(struct net_device *dev, const u8 *addr) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct wiphy *wiphy = wdev->wiphy; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + int i; + bool done = false; + + wdev_lock(wdev); + + nl80211_send_assoc_timeout(rdev, dev, addr, GFP_KERNEL); + if (wdev->sme_state == CFG80211_SME_CONNECTING) + __cfg80211_connect_result(dev, addr, NULL, 0, NULL, 0, + WLAN_STATUS_UNSPECIFIED_FAILURE, + false, NULL); + + for (i = 0; addr && i < MAX_AUTH_BSSES; i++) { + if (wdev->auth_bsses[i] && + memcmp(wdev->auth_bsses[i]->pub.bssid, + addr, ETH_ALEN) == 0) { + cfg80211_unhold_bss(wdev->auth_bsses[i]); + cfg80211_put_bss(&wdev->auth_bsses[i]->pub); + wdev->auth_bsses[i] = NULL; + done = true; + break; + } + } + + WARN_ON(!done); + + wdev_unlock(wdev); +} +EXPORT_SYMBOL(cfg80211_send_assoc_timeout); + +void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr, + enum nl80211_key_type key_type, int key_id, + const u8 *tsc, gfp_t gfp) +{ + struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); +#ifdef CONFIG_CFG80211_WEXT + union iwreq_data wrqu; + char *buf = kmalloc(128, gfp); + + if (buf) { + sprintf(buf, "MLME-MICHAELMICFAILURE.indication(" + "keyid=%d %scast addr=%pM)", key_id, + key_type == NL80211_KEYTYPE_GROUP ? "broad" : "uni", + addr); + memset(&wrqu, 0, sizeof(wrqu)); + wrqu.data.length = strlen(buf); + wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf); + kfree(buf); + } +#endif + + nl80211_michael_mic_failure(rdev, dev, addr, key_type, key_id, tsc, gfp); +} +EXPORT_SYMBOL(cfg80211_michael_mic_failure); + +/* some MLME handling for userspace SME */ +int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, + struct net_device *dev, + struct ieee80211_channel *chan, + enum nl80211_auth_type auth_type, + const u8 *bssid, + const u8 *ssid, int ssid_len, + const u8 *ie, int ie_len, + const u8 *key, int key_len, int key_idx) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_auth_request req; + struct cfg80211_internal_bss *bss; + int i, err, slot = -1, nfree = 0; + + ASSERT_WDEV_LOCK(wdev); + + if (auth_type == NL80211_AUTHTYPE_SHARED_KEY) + if (!key || !key_len || key_idx < 0 || key_idx > 4) + return -EINVAL; + + if (wdev->current_bss && + memcmp(bssid, wdev->current_bss->pub.bssid, ETH_ALEN) == 0) + return -EALREADY; + + for (i = 0; i < MAX_AUTH_BSSES; i++) { + if (wdev->authtry_bsses[i] && + memcmp(bssid, wdev->authtry_bsses[i]->pub.bssid, + ETH_ALEN) == 0) + return -EALREADY; + if (wdev->auth_bsses[i] && + memcmp(bssid, wdev->auth_bsses[i]->pub.bssid, + ETH_ALEN) == 0) + return -EALREADY; + } + + memset(&req, 0, sizeof(req)); + + req.ie = ie; + req.ie_len = ie_len; + req.auth_type = auth_type; + req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len, + WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); + req.key = key; + req.key_len = key_len; + req.key_idx = key_idx; + if (!req.bss) + return -ENOENT; + + bss = bss_from_pub(req.bss); + + for (i = 0; i < MAX_AUTH_BSSES; i++) { + if (!wdev->auth_bsses[i] && !wdev->authtry_bsses[i]) { + slot = i; + nfree++; + } + } + + /* we need one free slot for disassoc and one for this auth */ + if (nfree < 2) { + err = -ENOSPC; + goto out; + } + + wdev->authtry_bsses[slot] = bss; + cfg80211_hold_bss(bss); + + err = rdev->ops->auth(&rdev->wiphy, dev, &req); + if (err) { + wdev->authtry_bsses[slot] = NULL; + cfg80211_unhold_bss(bss); + } + + out: + if (err) + cfg80211_put_bss(req.bss); + return err; +} + +int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, + struct net_device *dev, struct ieee80211_channel *chan, + enum nl80211_auth_type auth_type, const u8 *bssid, + const u8 *ssid, int ssid_len, + const u8 *ie, int ie_len, + const u8 *key, int key_len, int key_idx) +{ + int err; + + wdev_lock(dev->ieee80211_ptr); + err = __cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid, + ssid, ssid_len, ie, ie_len, + key, key_len, key_idx); + wdev_unlock(dev->ieee80211_ptr); + + return err; +} + +int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, + struct net_device *dev, + struct ieee80211_channel *chan, + const u8 *bssid, const u8 *prev_bssid, + const u8 *ssid, int ssid_len, + const u8 *ie, int ie_len, bool use_mfp, + struct cfg80211_crypto_settings *crypt) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_assoc_request req; + struct cfg80211_internal_bss *bss; + int i, err, slot = -1; + bool was_connected = false; + + ASSERT_WDEV_LOCK(wdev); + + memset(&req, 0, sizeof(req)); + + if (wdev->current_bss && prev_bssid && + memcmp(wdev->current_bss->pub.bssid, prev_bssid, ETH_ALEN) == 0) { + /* + * Trying to reassociate: Allow this to proceed and let the old + * association to be dropped when the new one is completed. + */ + if (wdev->sme_state == CFG80211_SME_CONNECTED) { + was_connected = true; + wdev->sme_state = CFG80211_SME_CONNECTING; + } + } else if (wdev->current_bss) + return -EALREADY; + + req.ie = ie; + req.ie_len = ie_len; + memcpy(&req.crypto, crypt, sizeof(req.crypto)); + req.use_mfp = use_mfp; + req.prev_bssid = prev_bssid; + req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len, + WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); + if (!req.bss) { + if (was_connected) + wdev->sme_state = CFG80211_SME_CONNECTED; + return -ENOENT; + } + + bss = bss_from_pub(req.bss); + + for (i = 0; i < MAX_AUTH_BSSES; i++) { + if (bss == wdev->auth_bsses[i]) { + slot = i; + break; + } + } + + if (slot < 0) { + err = -ENOTCONN; + goto out; + } + + err = rdev->ops->assoc(&rdev->wiphy, dev, &req); + out: + if (err && was_connected) + wdev->sme_state = CFG80211_SME_CONNECTED; + /* still a reference in wdev->auth_bsses[slot] */ + cfg80211_put_bss(req.bss); + return err; +} + +int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, + struct net_device *dev, + struct ieee80211_channel *chan, + const u8 *bssid, const u8 *prev_bssid, + const u8 *ssid, int ssid_len, + const u8 *ie, int ie_len, bool use_mfp, + struct cfg80211_crypto_settings *crypt) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + int err; + + wdev_lock(wdev); + err = __cfg80211_mlme_assoc(rdev, dev, chan, bssid, prev_bssid, + ssid, ssid_len, ie, ie_len, use_mfp, crypt); + wdev_unlock(wdev); + + return err; +} + +int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, + struct net_device *dev, const u8 *bssid, + const u8 *ie, int ie_len, u16 reason) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_deauth_request req; + int i; + + ASSERT_WDEV_LOCK(wdev); + + memset(&req, 0, sizeof(req)); + req.reason_code = reason; + req.ie = ie; + req.ie_len = ie_len; + if (wdev->current_bss && + memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) { + req.bss = &wdev->current_bss->pub; + } else for (i = 0; i < MAX_AUTH_BSSES; i++) { + if (wdev->auth_bsses[i] && + memcmp(bssid, wdev->auth_bsses[i]->pub.bssid, ETH_ALEN) == 0) { + req.bss = &wdev->auth_bsses[i]->pub; + break; + } + if (wdev->authtry_bsses[i] && + memcmp(bssid, wdev->authtry_bsses[i]->pub.bssid, ETH_ALEN) == 0) { + req.bss = &wdev->authtry_bsses[i]->pub; + break; + } + } + + if (!req.bss) + return -ENOTCONN; + + return rdev->ops->deauth(&rdev->wiphy, dev, &req, wdev); +} + +int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, + struct net_device *dev, const u8 *bssid, + const u8 *ie, int ie_len, u16 reason) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + int err; + + wdev_lock(wdev); + err = __cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason); + wdev_unlock(wdev); + + return err; +} + +static int __cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, + struct net_device *dev, const u8 *bssid, + const u8 *ie, int ie_len, u16 reason) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_disassoc_request req; + + ASSERT_WDEV_LOCK(wdev); + + if (wdev->sme_state != CFG80211_SME_CONNECTED) + return -ENOTCONN; + + if (WARN_ON(!wdev->current_bss)) + return -ENOTCONN; + + memset(&req, 0, sizeof(req)); + req.reason_code = reason; + req.ie = ie; + req.ie_len = ie_len; + if (memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) + req.bss = &wdev->current_bss->pub; + else + return -ENOTCONN; + + return rdev->ops->disassoc(&rdev->wiphy, dev, &req, wdev); +} + +int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, + struct net_device *dev, const u8 *bssid, + const u8 *ie, int ie_len, u16 reason) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + int err; + + wdev_lock(wdev); + err = __cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason); + wdev_unlock(wdev); + + return err; +} + +void cfg80211_mlme_down(struct cfg80211_registered_device *rdev, + struct net_device *dev) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_deauth_request req; + int i; + + ASSERT_WDEV_LOCK(wdev); + + if (!rdev->ops->deauth) + return; + + memset(&req, 0, sizeof(req)); + req.reason_code = WLAN_REASON_DEAUTH_LEAVING; + req.ie = NULL; + req.ie_len = 0; + + if (wdev->current_bss) { + req.bss = &wdev->current_bss->pub; + rdev->ops->deauth(&rdev->wiphy, dev, &req, wdev); + if (wdev->current_bss) { + cfg80211_unhold_bss(wdev->current_bss); + cfg80211_put_bss(&wdev->current_bss->pub); + wdev->current_bss = NULL; + } + } + + for (i = 0; i < MAX_AUTH_BSSES; i++) { + if (wdev->auth_bsses[i]) { + req.bss = &wdev->auth_bsses[i]->pub; + rdev->ops->deauth(&rdev->wiphy, dev, &req, wdev); + if (wdev->auth_bsses[i]) { + cfg80211_unhold_bss(wdev->auth_bsses[i]); + cfg80211_put_bss(&wdev->auth_bsses[i]->pub); + wdev->auth_bsses[i] = NULL; + } + } + if (wdev->authtry_bsses[i]) { + req.bss = &wdev->authtry_bsses[i]->pub; + rdev->ops->deauth(&rdev->wiphy, dev, &req, wdev); + if (wdev->authtry_bsses[i]) { + cfg80211_unhold_bss(wdev->authtry_bsses[i]); + cfg80211_put_bss(&wdev->authtry_bsses[i]->pub); + wdev->authtry_bsses[i] = NULL; + } + } + } +} + +void cfg80211_ready_on_channel(struct net_device *dev, u64 cookie, + struct ieee80211_channel *chan, + enum nl80211_channel_type channel_type, + unsigned int duration, gfp_t gfp) +{ + struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + + nl80211_send_remain_on_channel(rdev, dev, cookie, chan, channel_type, + duration, gfp); +} +EXPORT_SYMBOL(cfg80211_ready_on_channel); + +void cfg80211_remain_on_channel_expired(struct net_device *dev, + u64 cookie, + struct ieee80211_channel *chan, + enum nl80211_channel_type channel_type, + gfp_t gfp) +{ + struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + + nl80211_send_remain_on_channel_cancel(rdev, dev, cookie, chan, + channel_type, gfp); +} +EXPORT_SYMBOL(cfg80211_remain_on_channel_expired); + +void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr, + struct station_info *sinfo, gfp_t gfp) +{ + struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + + nl80211_send_sta_event(rdev, dev, mac_addr, sinfo, gfp); +} +EXPORT_SYMBOL(cfg80211_new_sta); + +struct cfg80211_action_registration { + struct list_head list; + + u32 nlpid; + + int match_len; + + u8 match[]; +}; + +int cfg80211_mlme_register_action(struct wireless_dev *wdev, u32 snd_pid, + const u8 *match_data, int match_len) +{ + struct cfg80211_action_registration *reg, *nreg; + int err = 0; + + nreg = kzalloc(sizeof(*reg) + match_len, GFP_KERNEL); + if (!nreg) + return -ENOMEM; + + spin_lock_bh(&wdev->action_registrations_lock); + + list_for_each_entry(reg, &wdev->action_registrations, list) { + int mlen = min(match_len, reg->match_len); + + if (memcmp(reg->match, match_data, mlen) == 0) { + err = -EALREADY; + break; + } + } + + if (err) { + kfree(nreg); + goto out; + } + + memcpy(nreg->match, match_data, match_len); + nreg->match_len = match_len; + nreg->nlpid = snd_pid; + list_add(&nreg->list, &wdev->action_registrations); + + out: + spin_unlock_bh(&wdev->action_registrations_lock); + return err; +} + +void cfg80211_mlme_unregister_actions(struct wireless_dev *wdev, u32 nlpid) +{ + struct cfg80211_action_registration *reg, *tmp; + + spin_lock_bh(&wdev->action_registrations_lock); + + list_for_each_entry_safe(reg, tmp, &wdev->action_registrations, list) { + if (reg->nlpid == nlpid) { + list_del(®->list); + kfree(reg); + } + } + + spin_unlock_bh(&wdev->action_registrations_lock); +} + +void cfg80211_mlme_purge_actions(struct wireless_dev *wdev) +{ + struct cfg80211_action_registration *reg, *tmp; + + spin_lock_bh(&wdev->action_registrations_lock); + + list_for_each_entry_safe(reg, tmp, &wdev->action_registrations, list) { + list_del(®->list); + kfree(reg); + } + + spin_unlock_bh(&wdev->action_registrations_lock); +} + +int cfg80211_mlme_action(struct cfg80211_registered_device *rdev, + struct net_device *dev, + struct ieee80211_channel *chan, + enum nl80211_channel_type channel_type, + const u8 *buf, size_t len, u64 *cookie) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + const struct ieee80211_mgmt *mgmt; + + if (rdev->ops->action == NULL) + return -EOPNOTSUPP; + if (len < 24 + 1) + return -EINVAL; + + mgmt = (const struct ieee80211_mgmt *) buf; + if (!ieee80211_is_action(mgmt->frame_control)) + return -EINVAL; + if (mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) { + /* Verify that we are associated with the destination AP */ + if (!wdev->current_bss || + memcmp(wdev->current_bss->pub.bssid, mgmt->bssid, + ETH_ALEN) != 0 || + memcmp(wdev->current_bss->pub.bssid, mgmt->da, + ETH_ALEN) != 0) + return -ENOTCONN; + } + + if (memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0) + return -EINVAL; + + /* Transmit the Action frame as requested by user space */ + return rdev->ops->action(&rdev->wiphy, dev, chan, channel_type, + buf, len, cookie); +} + +bool cfg80211_rx_action(struct net_device *dev, int freq, const u8 *buf, + size_t len, gfp_t gfp) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct wiphy *wiphy = wdev->wiphy; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + struct cfg80211_action_registration *reg; + const u8 *action_data; + int action_data_len; + bool result = false; + + /* frame length - min size excluding category */ + action_data_len = len - (IEEE80211_MIN_ACTION_SIZE - 1); + + /* action data starts with category */ + action_data = buf + IEEE80211_MIN_ACTION_SIZE - 1; + + spin_lock_bh(&wdev->action_registrations_lock); + + list_for_each_entry(reg, &wdev->action_registrations, list) { + if (reg->match_len > action_data_len) + continue; + + if (memcmp(reg->match, action_data, reg->match_len)) + continue; + + /* found match! */ + + /* Indicate the received Action frame to user space */ + if (nl80211_send_action(rdev, dev, reg->nlpid, freq, + buf, len, gfp)) + continue; + + result = true; + break; + } + + spin_unlock_bh(&wdev->action_registrations_lock); + + return result; +} +EXPORT_SYMBOL(cfg80211_rx_action); + +void cfg80211_action_tx_status(struct net_device *dev, u64 cookie, + const u8 *buf, size_t len, bool ack, gfp_t gfp) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct wiphy *wiphy = wdev->wiphy; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + + /* Indicate TX status of the Action frame to user space */ + nl80211_send_action_tx_status(rdev, dev, cookie, buf, len, ack, gfp); +} +EXPORT_SYMBOL(cfg80211_action_tx_status); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c new file mode 100644 index 0000000..9331a97 --- /dev/null +++ b/net/wireless/nl80211.c @@ -0,0 +1,5914 @@ +/* + * This is the new netlink-based wireless configuration interface. + * + * Copyright 2006-2010 Johannes Berg + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) +#include +#endif +#include +#include +#include +#include "core.h" +#include "nl80211.h" +#include "reg.h" + +/* the netlink family */ +static struct genl_family nl80211_fam = { + .id = GENL_ID_GENERATE, /* don't bother with a hardcoded ID */ + .name = "nl80211", /* have users key off the name instead */ + .hdrsize = 0, /* no private header */ + .version = 1, /* no particular meaning now */ + .maxattr = NL80211_ATTR_MAX, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) + .netnsok = true, +#endif +}; + +/* internal helper: get rdev and dev */ +static int get_rdev_dev_by_info_ifindex(struct genl_info *info, + struct cfg80211_registered_device **rdev, + struct net_device **dev) +{ + struct nlattr **attrs = info->attrs; + int ifindex; + + if (!attrs[NL80211_ATTR_IFINDEX]) + return -EINVAL; + + ifindex = nla_get_u32(attrs[NL80211_ATTR_IFINDEX]); + *dev = dev_get_by_index(genl_info_net(info), ifindex); + if (!*dev) + return -ENODEV; + + *rdev = cfg80211_get_dev_from_ifindex(genl_info_net(info), ifindex); + if (IS_ERR(*rdev)) { + dev_put(*dev); + return PTR_ERR(*rdev); + } + + return 0; +} + +/* policy for the attributes */ +static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = { + [NL80211_ATTR_WIPHY] = { .type = NLA_U32 }, + [NL80211_ATTR_WIPHY_NAME] = { .type = NLA_NUL_STRING, + .len = 20-1 }, + [NL80211_ATTR_WIPHY_TXQ_PARAMS] = { .type = NLA_NESTED }, + [NL80211_ATTR_WIPHY_FREQ] = { .type = NLA_U32 }, + [NL80211_ATTR_WIPHY_CHANNEL_TYPE] = { .type = NLA_U32 }, + [NL80211_ATTR_WIPHY_RETRY_SHORT] = { .type = NLA_U8 }, + [NL80211_ATTR_WIPHY_RETRY_LONG] = { .type = NLA_U8 }, + [NL80211_ATTR_WIPHY_FRAG_THRESHOLD] = { .type = NLA_U32 }, + [NL80211_ATTR_WIPHY_RTS_THRESHOLD] = { .type = NLA_U32 }, + [NL80211_ATTR_WIPHY_COVERAGE_CLASS] = { .type = NLA_U8 }, + + [NL80211_ATTR_IFTYPE] = { .type = NLA_U32 }, + [NL80211_ATTR_IFINDEX] = { .type = NLA_U32 }, + [NL80211_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 }, + + [NL80211_ATTR_MAC] = { .type = NLA_BINARY, .len = ETH_ALEN }, + [NL80211_ATTR_PREV_BSSID] = { .type = NLA_BINARY, .len = ETH_ALEN }, + + [NL80211_ATTR_KEY] = { .type = NLA_NESTED, }, + [NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY, + .len = WLAN_MAX_KEY_LEN }, + [NL80211_ATTR_KEY_IDX] = { .type = NLA_U8 }, + [NL80211_ATTR_KEY_CIPHER] = { .type = NLA_U32 }, + [NL80211_ATTR_KEY_DEFAULT] = { .type = NLA_FLAG }, + [NL80211_ATTR_KEY_SEQ] = { .type = NLA_BINARY, .len = 8 }, + + [NL80211_ATTR_BEACON_INTERVAL] = { .type = NLA_U32 }, + [NL80211_ATTR_DTIM_PERIOD] = { .type = NLA_U32 }, + [NL80211_ATTR_BEACON_HEAD] = { .type = NLA_BINARY, + .len = IEEE80211_MAX_DATA_LEN }, + [NL80211_ATTR_BEACON_TAIL] = { .type = NLA_BINARY, + .len = IEEE80211_MAX_DATA_LEN }, + [NL80211_ATTR_STA_AID] = { .type = NLA_U16 }, + [NL80211_ATTR_STA_FLAGS] = { .type = NLA_NESTED }, + [NL80211_ATTR_STA_LISTEN_INTERVAL] = { .type = NLA_U16 }, + [NL80211_ATTR_STA_SUPPORTED_RATES] = { .type = NLA_BINARY, + .len = NL80211_MAX_SUPP_RATES }, + [NL80211_ATTR_STA_PLINK_ACTION] = { .type = NLA_U8 }, + [NL80211_ATTR_STA_VLAN] = { .type = NLA_U32 }, + [NL80211_ATTR_MNTR_FLAGS] = { /* NLA_NESTED can't be empty */ }, + [NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY, + .len = IEEE80211_MAX_MESH_ID_LEN }, + [NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_U32 }, + + [NL80211_ATTR_REG_ALPHA2] = { .type = NLA_STRING, .len = 2 }, + [NL80211_ATTR_REG_RULES] = { .type = NLA_NESTED }, + + [NL80211_ATTR_BSS_CTS_PROT] = { .type = NLA_U8 }, + [NL80211_ATTR_BSS_SHORT_PREAMBLE] = { .type = NLA_U8 }, + [NL80211_ATTR_BSS_SHORT_SLOT_TIME] = { .type = NLA_U8 }, + [NL80211_ATTR_BSS_BASIC_RATES] = { .type = NLA_BINARY, + .len = NL80211_MAX_SUPP_RATES }, + + [NL80211_ATTR_MESH_PARAMS] = { .type = NLA_NESTED }, + + [NL80211_ATTR_HT_CAPABILITY] = { .type = NLA_BINARY, + .len = NL80211_HT_CAPABILITY_LEN }, + + [NL80211_ATTR_MGMT_SUBTYPE] = { .type = NLA_U8 }, + [NL80211_ATTR_IE] = { .type = NLA_BINARY, + .len = IEEE80211_MAX_DATA_LEN }, + [NL80211_ATTR_SCAN_FREQUENCIES] = { .type = NLA_NESTED }, + [NL80211_ATTR_SCAN_SSIDS] = { .type = NLA_NESTED }, + + [NL80211_ATTR_SSID] = { .type = NLA_BINARY, + .len = IEEE80211_MAX_SSID_LEN }, + [NL80211_ATTR_AUTH_TYPE] = { .type = NLA_U32 }, + [NL80211_ATTR_REASON_CODE] = { .type = NLA_U16 }, + [NL80211_ATTR_FREQ_FIXED] = { .type = NLA_FLAG }, + [NL80211_ATTR_TIMED_OUT] = { .type = NLA_FLAG }, + [NL80211_ATTR_USE_MFP] = { .type = NLA_U32 }, + [NL80211_ATTR_STA_FLAGS2] = { + .len = sizeof(struct nl80211_sta_flag_update), + }, + [NL80211_ATTR_CONTROL_PORT] = { .type = NLA_FLAG }, + [NL80211_ATTR_PRIVACY] = { .type = NLA_FLAG }, + [NL80211_ATTR_CIPHER_SUITE_GROUP] = { .type = NLA_U32 }, + [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 }, + [NL80211_ATTR_PID] = { .type = NLA_U32 }, + [NL80211_ATTR_4ADDR] = { .type = NLA_U8 }, + [NL80211_ATTR_PMKID] = { .type = NLA_BINARY, + .len = WLAN_PMKID_LEN }, + [NL80211_ATTR_DURATION] = { .type = NLA_U32 }, + [NL80211_ATTR_COOKIE] = { .type = NLA_U64 }, + [NL80211_ATTR_TX_RATES] = { .type = NLA_NESTED }, + [NL80211_ATTR_FRAME] = { .type = NLA_BINARY, + .len = IEEE80211_MAX_DATA_LEN }, + [NL80211_ATTR_FRAME_MATCH] = { .type = NLA_BINARY, }, + [NL80211_ATTR_PS_STATE] = { .type = NLA_U32 }, +}; + +/* policy for the attributes */ +static const struct nla_policy nl80211_key_policy[NL80211_KEY_MAX + 1] = { + [NL80211_KEY_DATA] = { .type = NLA_BINARY, .len = WLAN_MAX_KEY_LEN }, + [NL80211_KEY_IDX] = { .type = NLA_U8 }, + [NL80211_KEY_CIPHER] = { .type = NLA_U32 }, + [NL80211_KEY_SEQ] = { .type = NLA_BINARY, .len = 8 }, + [NL80211_KEY_DEFAULT] = { .type = NLA_FLAG }, + [NL80211_KEY_DEFAULT_MGMT] = { .type = NLA_FLAG }, +}; + +/* ifidx get helper */ +static int nl80211_get_ifidx(struct netlink_callback *cb) +{ + int res; + + res = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, + nl80211_fam.attrbuf, nl80211_fam.maxattr, + nl80211_policy); + if (res) + return res; + + if (!nl80211_fam.attrbuf[NL80211_ATTR_IFINDEX]) + return -EINVAL; + + res = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_IFINDEX]); + if (!res) + return -EINVAL; + return res; +} + +/* IE validation */ +static bool is_valid_ie_attr(const struct nlattr *attr) +{ + const u8 *pos; + int len; + + if (!attr) + return true; + + pos = nla_data(attr); + len = nla_len(attr); + + while (len) { + u8 elemlen; + + if (len < 2) + return false; + len -= 2; + + elemlen = pos[1]; + if (elemlen > len) + return false; + + len -= elemlen; + pos += 2 + elemlen; + } + + return true; +} + +/* message building helper */ +static inline void *nl80211hdr_put(struct sk_buff *skb, u32 pid, u32 seq, + int flags, u8 cmd) +{ + /* since there is no private header just add the generic one */ + return genlmsg_put(skb, pid, seq, &nl80211_fam, flags, cmd); +} + +static int nl80211_msg_put_channel(struct sk_buff *msg, + struct ieee80211_channel *chan) +{ + NLA_PUT_U32(msg, NL80211_FREQUENCY_ATTR_FREQ, + chan->center_freq); + + if (chan->flags & IEEE80211_CHAN_DISABLED) + NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_DISABLED); + if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) + NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_PASSIVE_SCAN); + if (chan->flags & IEEE80211_CHAN_NO_IBSS) + NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_NO_IBSS); + if (chan->flags & IEEE80211_CHAN_RADAR) + NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_RADAR); + + NLA_PUT_U32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, + DBM_TO_MBM(chan->max_power)); + + return 0; + + nla_put_failure: + return -ENOBUFS; +} + +/* netlink command implementations */ + +struct key_parse { + struct key_params p; + int idx; + bool def, defmgmt; +}; + +static int nl80211_parse_key_new(struct nlattr *key, struct key_parse *k) +{ + struct nlattr *tb[NL80211_KEY_MAX + 1]; + int err = nla_parse_nested(tb, NL80211_KEY_MAX, key, + nl80211_key_policy); + if (err) + return err; + + k->def = !!tb[NL80211_KEY_DEFAULT]; + k->defmgmt = !!tb[NL80211_KEY_DEFAULT_MGMT]; + + if (tb[NL80211_KEY_IDX]) + k->idx = nla_get_u8(tb[NL80211_KEY_IDX]); + + if (tb[NL80211_KEY_DATA]) { + k->p.key = nla_data(tb[NL80211_KEY_DATA]); + k->p.key_len = nla_len(tb[NL80211_KEY_DATA]); + } + + if (tb[NL80211_KEY_SEQ]) { + k->p.seq = nla_data(tb[NL80211_KEY_SEQ]); + k->p.seq_len = nla_len(tb[NL80211_KEY_SEQ]); + } + + if (tb[NL80211_KEY_CIPHER]) + k->p.cipher = nla_get_u32(tb[NL80211_KEY_CIPHER]); + + return 0; +} + +static int nl80211_parse_key_old(struct genl_info *info, struct key_parse *k) +{ + if (info->attrs[NL80211_ATTR_KEY_DATA]) { + k->p.key = nla_data(info->attrs[NL80211_ATTR_KEY_DATA]); + k->p.key_len = nla_len(info->attrs[NL80211_ATTR_KEY_DATA]); + } + + if (info->attrs[NL80211_ATTR_KEY_SEQ]) { + k->p.seq = nla_data(info->attrs[NL80211_ATTR_KEY_SEQ]); + k->p.seq_len = nla_len(info->attrs[NL80211_ATTR_KEY_SEQ]); + } + + if (info->attrs[NL80211_ATTR_KEY_IDX]) + k->idx = nla_get_u8(info->attrs[NL80211_ATTR_KEY_IDX]); + + if (info->attrs[NL80211_ATTR_KEY_CIPHER]) + k->p.cipher = nla_get_u32(info->attrs[NL80211_ATTR_KEY_CIPHER]); + + k->def = !!info->attrs[NL80211_ATTR_KEY_DEFAULT]; + k->defmgmt = !!info->attrs[NL80211_ATTR_KEY_DEFAULT_MGMT]; + + return 0; +} + +static int nl80211_parse_key(struct genl_info *info, struct key_parse *k) +{ + int err; + + memset(k, 0, sizeof(*k)); + k->idx = -1; + + if (info->attrs[NL80211_ATTR_KEY]) + err = nl80211_parse_key_new(info->attrs[NL80211_ATTR_KEY], k); + else + err = nl80211_parse_key_old(info, k); + + if (err) + return err; + + if (k->def && k->defmgmt) + return -EINVAL; + + if (k->idx != -1) { + if (k->defmgmt) { + if (k->idx < 4 || k->idx > 5) + return -EINVAL; + } else if (k->def) { + if (k->idx < 0 || k->idx > 3) + return -EINVAL; + } else { + if (k->idx < 0 || k->idx > 5) + return -EINVAL; + } + } + + return 0; +} + +static struct cfg80211_cached_keys * +nl80211_parse_connkeys(struct cfg80211_registered_device *rdev, + struct nlattr *keys) +{ + struct key_parse parse; + struct nlattr *key; + struct cfg80211_cached_keys *result; + int rem, err, def = 0; + + result = kzalloc(sizeof(*result), GFP_KERNEL); + if (!result) + return ERR_PTR(-ENOMEM); + + result->def = -1; + result->defmgmt = -1; + + nla_for_each_nested(key, keys, rem) { + memset(&parse, 0, sizeof(parse)); + parse.idx = -1; + + err = nl80211_parse_key_new(key, &parse); + if (err) + goto error; + err = -EINVAL; + if (!parse.p.key) + goto error; + if (parse.idx < 0 || parse.idx > 4) + goto error; + if (parse.def) { + if (def) + goto error; + def = 1; + result->def = parse.idx; + } else if (parse.defmgmt) + goto error; + err = cfg80211_validate_key_settings(rdev, &parse.p, + parse.idx, NULL); + if (err) + goto error; + result->params[parse.idx].cipher = parse.p.cipher; + result->params[parse.idx].key_len = parse.p.key_len; + result->params[parse.idx].key = result->data[parse.idx]; + memcpy(result->data[parse.idx], parse.p.key, parse.p.key_len); + } + + return result; + error: + kfree(result); + return ERR_PTR(err); +} + +static int nl80211_key_allowed(struct wireless_dev *wdev) +{ + ASSERT_WDEV_LOCK(wdev); + + if (!netif_running(wdev->netdev)) + return -ENETDOWN; + + switch (wdev->iftype) { + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_AP_VLAN: + break; + case NL80211_IFTYPE_ADHOC: + if (!wdev->current_bss) + return -ENOLINK; + break; + case NL80211_IFTYPE_STATION: + if (wdev->sme_state != CFG80211_SME_CONNECTED) + return -ENOLINK; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, + struct cfg80211_registered_device *dev) +{ + void *hdr; + struct nlattr *nl_bands, *nl_band; + struct nlattr *nl_freqs, *nl_freq; + struct nlattr *nl_rates, *nl_rate; + struct nlattr *nl_modes; + struct nlattr *nl_cmds; + enum ieee80211_band band; + struct ieee80211_channel *chan; + struct ieee80211_rate *rate; + int i; + u16 ifmodes = dev->wiphy.interface_modes; + + hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_WIPHY); + if (!hdr) + return -1; + + NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->wiphy_idx); + NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy)); + + NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, + cfg80211_rdev_list_generation); + + NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT, + dev->wiphy.retry_short); + NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_RETRY_LONG, + dev->wiphy.retry_long); + NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FRAG_THRESHOLD, + dev->wiphy.frag_threshold); + NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD, + dev->wiphy.rts_threshold); + NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS, + dev->wiphy.coverage_class); + + NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS, + dev->wiphy.max_scan_ssids); + NLA_PUT_U16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN, + dev->wiphy.max_scan_ie_len); + + NLA_PUT(msg, NL80211_ATTR_CIPHER_SUITES, + sizeof(u32) * dev->wiphy.n_cipher_suites, + dev->wiphy.cipher_suites); + + NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_PMKIDS, + dev->wiphy.max_num_pmkids); + + nl_modes = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_IFTYPES); + if (!nl_modes) + goto nla_put_failure; + + i = 0; + while (ifmodes) { + if (ifmodes & 1) + NLA_PUT_FLAG(msg, i); + ifmodes >>= 1; + i++; + } + + nla_nest_end(msg, nl_modes); + + nl_bands = nla_nest_start(msg, NL80211_ATTR_WIPHY_BANDS); + if (!nl_bands) + goto nla_put_failure; + + for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + if (!dev->wiphy.bands[band]) + continue; + + nl_band = nla_nest_start(msg, band); + if (!nl_band) + goto nla_put_failure; + + /* add HT info */ + if (dev->wiphy.bands[band]->ht_cap.ht_supported) { + NLA_PUT(msg, NL80211_BAND_ATTR_HT_MCS_SET, + sizeof(dev->wiphy.bands[band]->ht_cap.mcs), + &dev->wiphy.bands[band]->ht_cap.mcs); + NLA_PUT_U16(msg, NL80211_BAND_ATTR_HT_CAPA, + dev->wiphy.bands[band]->ht_cap.cap); + NLA_PUT_U8(msg, NL80211_BAND_ATTR_HT_AMPDU_FACTOR, + dev->wiphy.bands[band]->ht_cap.ampdu_factor); + NLA_PUT_U8(msg, NL80211_BAND_ATTR_HT_AMPDU_DENSITY, + dev->wiphy.bands[band]->ht_cap.ampdu_density); + } + + /* add frequencies */ + nl_freqs = nla_nest_start(msg, NL80211_BAND_ATTR_FREQS); + if (!nl_freqs) + goto nla_put_failure; + + for (i = 0; i < dev->wiphy.bands[band]->n_channels; i++) { + nl_freq = nla_nest_start(msg, i); + if (!nl_freq) + goto nla_put_failure; + + chan = &dev->wiphy.bands[band]->channels[i]; + + if (nl80211_msg_put_channel(msg, chan)) + goto nla_put_failure; + + nla_nest_end(msg, nl_freq); + } + + nla_nest_end(msg, nl_freqs); + + /* add bitrates */ + nl_rates = nla_nest_start(msg, NL80211_BAND_ATTR_RATES); + if (!nl_rates) + goto nla_put_failure; + + for (i = 0; i < dev->wiphy.bands[band]->n_bitrates; i++) { + nl_rate = nla_nest_start(msg, i); + if (!nl_rate) + goto nla_put_failure; + + rate = &dev->wiphy.bands[band]->bitrates[i]; + NLA_PUT_U32(msg, NL80211_BITRATE_ATTR_RATE, + rate->bitrate); + if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) + NLA_PUT_FLAG(msg, + NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE); + + nla_nest_end(msg, nl_rate); + } + + nla_nest_end(msg, nl_rates); + + nla_nest_end(msg, nl_band); + } + nla_nest_end(msg, nl_bands); + + nl_cmds = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_COMMANDS); + if (!nl_cmds) + goto nla_put_failure; + + i = 0; +#define CMD(op, n) \ + do { \ + if (dev->ops->op) { \ + i++; \ + NLA_PUT_U32(msg, i, NL80211_CMD_ ## n); \ + } \ + } while (0) + + CMD(add_virtual_intf, NEW_INTERFACE); + CMD(change_virtual_intf, SET_INTERFACE); + CMD(add_key, NEW_KEY); + CMD(add_beacon, NEW_BEACON); + CMD(add_station, NEW_STATION); + CMD(add_mpath, NEW_MPATH); + CMD(set_mesh_params, SET_MESH_PARAMS); + CMD(change_bss, SET_BSS); + CMD(auth, AUTHENTICATE); + CMD(assoc, ASSOCIATE); + CMD(deauth, DEAUTHENTICATE); + CMD(disassoc, DISASSOCIATE); + CMD(join_ibss, JOIN_IBSS); + CMD(set_pmksa, SET_PMKSA); + CMD(del_pmksa, DEL_PMKSA); + CMD(flush_pmksa, FLUSH_PMKSA); + CMD(remain_on_channel, REMAIN_ON_CHANNEL); + CMD(set_bitrate_mask, SET_TX_BITRATE_MASK); + CMD(action, ACTION); + if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) { + i++; + NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS); + } + +#undef CMD + + if (dev->ops->connect || dev->ops->auth) { + i++; + NLA_PUT_U32(msg, i, NL80211_CMD_CONNECT); + } + + if (dev->ops->disconnect || dev->ops->deauth) { + i++; + NLA_PUT_U32(msg, i, NL80211_CMD_DISCONNECT); + } + + nla_nest_end(msg, nl_cmds); + + return genlmsg_end(msg, hdr); + + nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) +{ + int idx = 0; + int start = cb->args[0]; + struct cfg80211_registered_device *dev; + + mutex_lock(&cfg80211_mutex); + list_for_each_entry(dev, &cfg80211_rdev_list, list) { + if (!net_eq(wiphy_net(&dev->wiphy), sock_net(skb->sk))) + continue; + if (++idx <= start) + continue; + if (nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).pid, + cb->nlh->nlmsg_seq, NLM_F_MULTI, + dev) < 0) { + idx--; + break; + } + } + mutex_unlock(&cfg80211_mutex); + + cb->args[0] = idx; + + return skb->len; +} + +static int nl80211_get_wiphy(struct sk_buff *skb, struct genl_info *info) +{ + struct sk_buff *msg; + struct cfg80211_registered_device *dev; + + dev = cfg80211_get_dev_from_info(info); + if (IS_ERR(dev)) + return PTR_ERR(dev); + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + goto out_err; + + if (nl80211_send_wiphy(msg, info->snd_pid, info->snd_seq, 0, dev) < 0) + goto out_free; + + cfg80211_unlock_rdev(dev); + + return genlmsg_reply(msg, info); + + out_free: + nlmsg_free(msg); + out_err: + cfg80211_unlock_rdev(dev); + return -ENOBUFS; +} + +static const struct nla_policy txq_params_policy[NL80211_TXQ_ATTR_MAX + 1] = { + [NL80211_TXQ_ATTR_QUEUE] = { .type = NLA_U8 }, + [NL80211_TXQ_ATTR_TXOP] = { .type = NLA_U16 }, + [NL80211_TXQ_ATTR_CWMIN] = { .type = NLA_U16 }, + [NL80211_TXQ_ATTR_CWMAX] = { .type = NLA_U16 }, + [NL80211_TXQ_ATTR_AIFS] = { .type = NLA_U8 }, +}; + +static int parse_txq_params(struct nlattr *tb[], + struct ieee80211_txq_params *txq_params) +{ + if (!tb[NL80211_TXQ_ATTR_QUEUE] || !tb[NL80211_TXQ_ATTR_TXOP] || + !tb[NL80211_TXQ_ATTR_CWMIN] || !tb[NL80211_TXQ_ATTR_CWMAX] || + !tb[NL80211_TXQ_ATTR_AIFS]) + return -EINVAL; + + txq_params->queue = nla_get_u8(tb[NL80211_TXQ_ATTR_QUEUE]); + txq_params->txop = nla_get_u16(tb[NL80211_TXQ_ATTR_TXOP]); + txq_params->cwmin = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMIN]); + txq_params->cwmax = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMAX]); + txq_params->aifs = nla_get_u8(tb[NL80211_TXQ_ATTR_AIFS]); + + return 0; +} + +static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + int result = 0, rem_txq_params = 0; + struct nlattr *nl_txq_params; + u32 changed; + u8 retry_short = 0, retry_long = 0; + u32 frag_threshold = 0, rts_threshold = 0; + u8 coverage_class = 0; + + rtnl_lock(); + + mutex_lock(&cfg80211_mutex); + + rdev = __cfg80211_rdev_from_info(info); + if (IS_ERR(rdev)) { + mutex_unlock(&cfg80211_mutex); + result = PTR_ERR(rdev); + goto unlock; + } + + mutex_lock(&rdev->mtx); + + if (info->attrs[NL80211_ATTR_WIPHY_NAME]) + result = cfg80211_dev_rename( + rdev, nla_data(info->attrs[NL80211_ATTR_WIPHY_NAME])); + + mutex_unlock(&cfg80211_mutex); + + if (result) + goto bad_res; + + if (info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS]) { + struct ieee80211_txq_params txq_params; + struct nlattr *tb[NL80211_TXQ_ATTR_MAX + 1]; + + if (!rdev->ops->set_txq_params) { + result = -EOPNOTSUPP; + goto bad_res; + } + + nla_for_each_nested(nl_txq_params, + info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS], + rem_txq_params) { + nla_parse(tb, NL80211_TXQ_ATTR_MAX, + nla_data(nl_txq_params), + nla_len(nl_txq_params), + txq_params_policy); + result = parse_txq_params(tb, &txq_params); + if (result) + goto bad_res; + + result = rdev->ops->set_txq_params(&rdev->wiphy, + &txq_params); + if (result) + goto bad_res; + } + } + + if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { + enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; + u32 freq; + + result = -EINVAL; + + if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { + channel_type = nla_get_u32(info->attrs[ + NL80211_ATTR_WIPHY_CHANNEL_TYPE]); + if (channel_type != NL80211_CHAN_NO_HT && + channel_type != NL80211_CHAN_HT20 && + channel_type != NL80211_CHAN_HT40PLUS && + channel_type != NL80211_CHAN_HT40MINUS) + goto bad_res; + } + + freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); + + mutex_lock(&rdev->devlist_mtx); + result = rdev_set_freq(rdev, NULL, freq, channel_type); + mutex_unlock(&rdev->devlist_mtx); + if (result) + goto bad_res; + } + + changed = 0; + + if (info->attrs[NL80211_ATTR_WIPHY_RETRY_SHORT]) { + retry_short = nla_get_u8( + info->attrs[NL80211_ATTR_WIPHY_RETRY_SHORT]); + if (retry_short == 0) { + result = -EINVAL; + goto bad_res; + } + changed |= WIPHY_PARAM_RETRY_SHORT; + } + + if (info->attrs[NL80211_ATTR_WIPHY_RETRY_LONG]) { + retry_long = nla_get_u8( + info->attrs[NL80211_ATTR_WIPHY_RETRY_LONG]); + if (retry_long == 0) { + result = -EINVAL; + goto bad_res; + } + changed |= WIPHY_PARAM_RETRY_LONG; + } + + if (info->attrs[NL80211_ATTR_WIPHY_FRAG_THRESHOLD]) { + frag_threshold = nla_get_u32( + info->attrs[NL80211_ATTR_WIPHY_FRAG_THRESHOLD]); + if (frag_threshold < 256) { + result = -EINVAL; + goto bad_res; + } + if (frag_threshold != (u32) -1) { + /* + * Fragments (apart from the last one) are required to + * have even length. Make the fragmentation code + * simpler by stripping LSB should someone try to use + * odd threshold value. + */ + frag_threshold &= ~0x1; + } + changed |= WIPHY_PARAM_FRAG_THRESHOLD; + } + + if (info->attrs[NL80211_ATTR_WIPHY_RTS_THRESHOLD]) { + rts_threshold = nla_get_u32( + info->attrs[NL80211_ATTR_WIPHY_RTS_THRESHOLD]); + changed |= WIPHY_PARAM_RTS_THRESHOLD; + } + + if (info->attrs[NL80211_ATTR_WIPHY_COVERAGE_CLASS]) { + coverage_class = nla_get_u8( + info->attrs[NL80211_ATTR_WIPHY_COVERAGE_CLASS]); + changed |= WIPHY_PARAM_COVERAGE_CLASS; + } + + if (changed) { + u8 old_retry_short, old_retry_long; + u32 old_frag_threshold, old_rts_threshold; + u8 old_coverage_class; + + if (!rdev->ops->set_wiphy_params) { + result = -EOPNOTSUPP; + goto bad_res; + } + + old_retry_short = rdev->wiphy.retry_short; + old_retry_long = rdev->wiphy.retry_long; + old_frag_threshold = rdev->wiphy.frag_threshold; + old_rts_threshold = rdev->wiphy.rts_threshold; + old_coverage_class = rdev->wiphy.coverage_class; + + if (changed & WIPHY_PARAM_RETRY_SHORT) + rdev->wiphy.retry_short = retry_short; + if (changed & WIPHY_PARAM_RETRY_LONG) + rdev->wiphy.retry_long = retry_long; + if (changed & WIPHY_PARAM_FRAG_THRESHOLD) + rdev->wiphy.frag_threshold = frag_threshold; + if (changed & WIPHY_PARAM_RTS_THRESHOLD) + rdev->wiphy.rts_threshold = rts_threshold; + if (changed & WIPHY_PARAM_COVERAGE_CLASS) + rdev->wiphy.coverage_class = coverage_class; + + result = rdev->ops->set_wiphy_params(&rdev->wiphy, changed); + if (result) { + rdev->wiphy.retry_short = old_retry_short; + rdev->wiphy.retry_long = old_retry_long; + rdev->wiphy.frag_threshold = old_frag_threshold; + rdev->wiphy.rts_threshold = old_rts_threshold; + rdev->wiphy.coverage_class = old_coverage_class; + } + } + + bad_res: + mutex_unlock(&rdev->mtx); + unlock: + rtnl_unlock(); + return result; +} + + +static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags, + struct cfg80211_registered_device *rdev, + struct net_device *dev) +{ + void *hdr; + + hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_INTERFACE); + if (!hdr) + return -1; + + NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); + NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); + NLA_PUT_STRING(msg, NL80211_ATTR_IFNAME, dev->name); + NLA_PUT_U32(msg, NL80211_ATTR_IFTYPE, dev->ieee80211_ptr->iftype); + + NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, + rdev->devlist_generation ^ + (cfg80211_rdev_list_generation << 2)); + + return genlmsg_end(msg, hdr); + + nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *cb) +{ + int wp_idx = 0; + int if_idx = 0; + int wp_start = cb->args[0]; + int if_start = cb->args[1]; + struct cfg80211_registered_device *rdev; + struct wireless_dev *wdev; + + mutex_lock(&cfg80211_mutex); + list_for_each_entry(rdev, &cfg80211_rdev_list, list) { + if (!net_eq(wiphy_net(&rdev->wiphy), sock_net(skb->sk))) + continue; + if (wp_idx < wp_start) { + wp_idx++; + continue; + } + if_idx = 0; + + mutex_lock(&rdev->devlist_mtx); + list_for_each_entry(wdev, &rdev->netdev_list, list) { + if (if_idx < if_start) { + if_idx++; + continue; + } + if (nl80211_send_iface(skb, NETLINK_CB(cb->skb).pid, + cb->nlh->nlmsg_seq, NLM_F_MULTI, + rdev, wdev->netdev) < 0) { + mutex_unlock(&rdev->devlist_mtx); + goto out; + } + if_idx++; + } + mutex_unlock(&rdev->devlist_mtx); + + wp_idx++; + } + out: + mutex_unlock(&cfg80211_mutex); + + cb->args[0] = wp_idx; + cb->args[1] = if_idx; + + return skb->len; +} + +static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info) +{ + struct sk_buff *msg; + struct cfg80211_registered_device *dev; + struct net_device *netdev; + int err; + + err = get_rdev_dev_by_info_ifindex(info, &dev, &netdev); + if (err) + return err; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + goto out_err; + + if (nl80211_send_iface(msg, info->snd_pid, info->snd_seq, 0, + dev, netdev) < 0) + goto out_free; + + dev_put(netdev); + cfg80211_unlock_rdev(dev); + + return genlmsg_reply(msg, info); + + out_free: + nlmsg_free(msg); + out_err: + dev_put(netdev); + cfg80211_unlock_rdev(dev); + return -ENOBUFS; +} + +static const struct nla_policy mntr_flags_policy[NL80211_MNTR_FLAG_MAX + 1] = { + [NL80211_MNTR_FLAG_FCSFAIL] = { .type = NLA_FLAG }, + [NL80211_MNTR_FLAG_PLCPFAIL] = { .type = NLA_FLAG }, + [NL80211_MNTR_FLAG_CONTROL] = { .type = NLA_FLAG }, + [NL80211_MNTR_FLAG_OTHER_BSS] = { .type = NLA_FLAG }, + [NL80211_MNTR_FLAG_COOK_FRAMES] = { .type = NLA_FLAG }, +}; + +static int parse_monitor_flags(struct nlattr *nla, u32 *mntrflags) +{ + struct nlattr *flags[NL80211_MNTR_FLAG_MAX + 1]; + int flag; + + *mntrflags = 0; + + if (!nla) + return -EINVAL; + + if (nla_parse_nested(flags, NL80211_MNTR_FLAG_MAX, + nla, mntr_flags_policy)) + return -EINVAL; + + for (flag = 1; flag <= NL80211_MNTR_FLAG_MAX; flag++) + if (flags[flag]) + *mntrflags |= (1<br_port) + return -EBUSY; + return 0; + } + + switch (iftype) { + case NL80211_IFTYPE_AP_VLAN: + if (rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP) + return 0; + break; + case NL80211_IFTYPE_STATION: + if (rdev->wiphy.flags & WIPHY_FLAG_4ADDR_STATION) + return 0; + break; + default: + break; + } + + return -EOPNOTSUPP; +} + +static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + struct vif_params params; + int err; + enum nl80211_iftype otype, ntype; + struct net_device *dev; + u32 _flags, *flags = NULL; + bool change = false; + + memset(¶ms, 0, sizeof(params)); + + rtnl_lock(); + + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); + if (err) + goto unlock_rtnl; + + otype = ntype = dev->ieee80211_ptr->iftype; + + if (info->attrs[NL80211_ATTR_IFTYPE]) { + ntype = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]); + if (otype != ntype) + change = true; + if (ntype > NL80211_IFTYPE_MAX) { + err = -EINVAL; + goto unlock; + } + } + + if (info->attrs[NL80211_ATTR_MESH_ID]) { + if (ntype != NL80211_IFTYPE_MESH_POINT) { + err = -EINVAL; + goto unlock; + } + params.mesh_id = nla_data(info->attrs[NL80211_ATTR_MESH_ID]); + params.mesh_id_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]); + change = true; + } + + if (info->attrs[NL80211_ATTR_4ADDR]) { + params.use_4addr = !!nla_get_u8(info->attrs[NL80211_ATTR_4ADDR]); + change = true; + err = nl80211_valid_4addr(rdev, dev, params.use_4addr, ntype); + if (err) + goto unlock; + } else { + params.use_4addr = -1; + } + + if (info->attrs[NL80211_ATTR_MNTR_FLAGS]) { + if (ntype != NL80211_IFTYPE_MONITOR) { + err = -EINVAL; + goto unlock; + } + err = parse_monitor_flags(info->attrs[NL80211_ATTR_MNTR_FLAGS], + &_flags); + if (err) + goto unlock; + + flags = &_flags; + change = true; + } + + if (change) + err = cfg80211_change_iface(rdev, dev, ntype, flags, ¶ms); + else + err = 0; + + if (!err && params.use_4addr != -1) + dev->ieee80211_ptr->use_4addr = params.use_4addr; + + unlock: + dev_put(dev); + cfg80211_unlock_rdev(rdev); + unlock_rtnl: + rtnl_unlock(); + return err; +} + +static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + struct vif_params params; + int err; + enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED; + u32 flags; + + memset(¶ms, 0, sizeof(params)); + + if (!info->attrs[NL80211_ATTR_IFNAME]) + return -EINVAL; + + if (info->attrs[NL80211_ATTR_IFTYPE]) { + type = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]); + if (type > NL80211_IFTYPE_MAX) + return -EINVAL; + } + + rtnl_lock(); + + rdev = cfg80211_get_dev_from_info(info); + if (IS_ERR(rdev)) { + err = PTR_ERR(rdev); + goto unlock_rtnl; + } + + if (!rdev->ops->add_virtual_intf || + !(rdev->wiphy.interface_modes & (1 << type))) { + err = -EOPNOTSUPP; + goto unlock; + } + + if (type == NL80211_IFTYPE_MESH_POINT && + info->attrs[NL80211_ATTR_MESH_ID]) { + params.mesh_id = nla_data(info->attrs[NL80211_ATTR_MESH_ID]); + params.mesh_id_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]); + } + + if (info->attrs[NL80211_ATTR_4ADDR]) { + params.use_4addr = !!nla_get_u8(info->attrs[NL80211_ATTR_4ADDR]); + err = nl80211_valid_4addr(rdev, NULL, params.use_4addr, type); + if (err) + goto unlock; + } + + err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ? + info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL, + &flags); + err = rdev->ops->add_virtual_intf(&rdev->wiphy, + nla_data(info->attrs[NL80211_ATTR_IFNAME]), + type, err ? NULL : &flags, ¶ms); + + unlock: + cfg80211_unlock_rdev(rdev); + unlock_rtnl: + rtnl_unlock(); + return err; +} + +static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + int err; + struct net_device *dev; + + rtnl_lock(); + + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); + if (err) + goto unlock_rtnl; + + if (!rdev->ops->del_virtual_intf) { + err = -EOPNOTSUPP; + goto out; + } + + err = rdev->ops->del_virtual_intf(&rdev->wiphy, dev); + + out: + cfg80211_unlock_rdev(rdev); + dev_put(dev); + unlock_rtnl: + rtnl_unlock(); + return err; +} + +struct get_key_cookie { + struct sk_buff *msg; + int error; + int idx; +}; + +static void get_key_callback(void *c, struct key_params *params) +{ + struct nlattr *key; + struct get_key_cookie *cookie = c; + + if (params->key) + NLA_PUT(cookie->msg, NL80211_ATTR_KEY_DATA, + params->key_len, params->key); + + if (params->seq) + NLA_PUT(cookie->msg, NL80211_ATTR_KEY_SEQ, + params->seq_len, params->seq); + + if (params->cipher) + NLA_PUT_U32(cookie->msg, NL80211_ATTR_KEY_CIPHER, + params->cipher); + + key = nla_nest_start(cookie->msg, NL80211_ATTR_KEY); + if (!key) + goto nla_put_failure; + + if (params->key) + NLA_PUT(cookie->msg, NL80211_KEY_DATA, + params->key_len, params->key); + + if (params->seq) + NLA_PUT(cookie->msg, NL80211_KEY_SEQ, + params->seq_len, params->seq); + + if (params->cipher) + NLA_PUT_U32(cookie->msg, NL80211_KEY_CIPHER, + params->cipher); + + NLA_PUT_U8(cookie->msg, NL80211_ATTR_KEY_IDX, cookie->idx); + + nla_nest_end(cookie->msg, key); + + return; + nla_put_failure: + cookie->error = 1; +} + +static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + int err; + struct net_device *dev; + u8 key_idx = 0; + u8 *mac_addr = NULL; + struct get_key_cookie cookie = { + .error = 0, + }; + void *hdr; + struct sk_buff *msg; + + if (info->attrs[NL80211_ATTR_KEY_IDX]) + key_idx = nla_get_u8(info->attrs[NL80211_ATTR_KEY_IDX]); + + if (key_idx > 5) + return -EINVAL; + + if (info->attrs[NL80211_ATTR_MAC]) + mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); + + rtnl_lock(); + + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); + if (err) + goto unlock_rtnl; + + if (!rdev->ops->get_key) { + err = -EOPNOTSUPP; + goto out; + } + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) { + err = -ENOMEM; + goto out; + } + + hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0, + NL80211_CMD_NEW_KEY); + + if (IS_ERR(hdr)) { + err = PTR_ERR(hdr); + goto free_msg; + } + + cookie.msg = msg; + cookie.idx = key_idx; + + NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); + NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_idx); + if (mac_addr) + NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr); + + err = rdev->ops->get_key(&rdev->wiphy, dev, key_idx, mac_addr, + &cookie, get_key_callback); + + if (err) + goto free_msg; + + if (cookie.error) + goto nla_put_failure; + + genlmsg_end(msg, hdr); + err = genlmsg_reply(msg, info); + goto out; + + nla_put_failure: + err = -ENOBUFS; + free_msg: + nlmsg_free(msg); + out: + cfg80211_unlock_rdev(rdev); + dev_put(dev); + unlock_rtnl: + rtnl_unlock(); + + return err; +} + +static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + struct key_parse key; + int err; + struct net_device *dev; + int (*func)(struct wiphy *wiphy, struct net_device *netdev, + u8 key_index); + + err = nl80211_parse_key(info, &key); + if (err) + return err; + + if (key.idx < 0) + return -EINVAL; + + /* only support setting default key */ + if (!key.def && !key.defmgmt) + return -EINVAL; + + rtnl_lock(); + + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); + if (err) + goto unlock_rtnl; + + if (key.def) + func = rdev->ops->set_default_key; + else + func = rdev->ops->set_default_mgmt_key; + + if (!func) { + err = -EOPNOTSUPP; + goto out; + } + + wdev_lock(dev->ieee80211_ptr); + err = nl80211_key_allowed(dev->ieee80211_ptr); + if (!err) + err = func(&rdev->wiphy, dev, key.idx); + +#ifdef CONFIG_CFG80211_WEXT + if (!err) { + if (func == rdev->ops->set_default_key) + dev->ieee80211_ptr->wext.default_key = key.idx; + else + dev->ieee80211_ptr->wext.default_mgmt_key = key.idx; + } +#endif + wdev_unlock(dev->ieee80211_ptr); + + out: + cfg80211_unlock_rdev(rdev); + dev_put(dev); + + unlock_rtnl: + rtnl_unlock(); + + return err; +} + +static int nl80211_new_key(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + int err; + struct net_device *dev; + struct key_parse key; + u8 *mac_addr = NULL; + + err = nl80211_parse_key(info, &key); + if (err) + return err; + + if (!key.p.key) + return -EINVAL; + + if (info->attrs[NL80211_ATTR_MAC]) + mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); + + rtnl_lock(); + + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); + if (err) + goto unlock_rtnl; + + if (!rdev->ops->add_key) { + err = -EOPNOTSUPP; + goto out; + } + + if (cfg80211_validate_key_settings(rdev, &key.p, key.idx, mac_addr)) { + err = -EINVAL; + goto out; + } + + wdev_lock(dev->ieee80211_ptr); + err = nl80211_key_allowed(dev->ieee80211_ptr); + if (!err) + err = rdev->ops->add_key(&rdev->wiphy, dev, key.idx, + mac_addr, &key.p); + wdev_unlock(dev->ieee80211_ptr); + + out: + cfg80211_unlock_rdev(rdev); + dev_put(dev); + unlock_rtnl: + rtnl_unlock(); + + return err; +} + +static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + int err; + struct net_device *dev; + u8 *mac_addr = NULL; + struct key_parse key; + + err = nl80211_parse_key(info, &key); + if (err) + return err; + + if (info->attrs[NL80211_ATTR_MAC]) + mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); + + rtnl_lock(); + + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); + if (err) + goto unlock_rtnl; + + if (!rdev->ops->del_key) { + err = -EOPNOTSUPP; + goto out; + } + + wdev_lock(dev->ieee80211_ptr); + err = nl80211_key_allowed(dev->ieee80211_ptr); + if (!err) + err = rdev->ops->del_key(&rdev->wiphy, dev, key.idx, mac_addr); + +#ifdef CONFIG_CFG80211_WEXT + if (!err) { + if (key.idx == dev->ieee80211_ptr->wext.default_key) + dev->ieee80211_ptr->wext.default_key = -1; + else if (key.idx == dev->ieee80211_ptr->wext.default_mgmt_key) + dev->ieee80211_ptr->wext.default_mgmt_key = -1; + } +#endif + wdev_unlock(dev->ieee80211_ptr); + + out: + cfg80211_unlock_rdev(rdev); + dev_put(dev); + + unlock_rtnl: + rtnl_unlock(); + + return err; +} + +static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info) +{ + int (*call)(struct wiphy *wiphy, struct net_device *dev, + struct beacon_parameters *info); + struct cfg80211_registered_device *rdev; + int err; + struct net_device *dev; + struct beacon_parameters params; + int haveinfo = 0; + + if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_BEACON_TAIL])) + return -EINVAL; + + rtnl_lock(); + + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); + if (err) + goto unlock_rtnl; + + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP) { + err = -EOPNOTSUPP; + goto out; + } + + switch (info->genlhdr->cmd) { + case NL80211_CMD_NEW_BEACON: + /* these are required for NEW_BEACON */ + if (!info->attrs[NL80211_ATTR_BEACON_INTERVAL] || + !info->attrs[NL80211_ATTR_DTIM_PERIOD] || + !info->attrs[NL80211_ATTR_BEACON_HEAD]) { + err = -EINVAL; + goto out; + } + + call = rdev->ops->add_beacon; + break; + case NL80211_CMD_SET_BEACON: + call = rdev->ops->set_beacon; + break; + default: + WARN_ON(1); + err = -EOPNOTSUPP; + goto out; + } + + if (!call) { + err = -EOPNOTSUPP; + goto out; + } + + memset(¶ms, 0, sizeof(params)); + + if (info->attrs[NL80211_ATTR_BEACON_INTERVAL]) { + params.interval = + nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]); + haveinfo = 1; + } + + if (info->attrs[NL80211_ATTR_DTIM_PERIOD]) { + params.dtim_period = + nla_get_u32(info->attrs[NL80211_ATTR_DTIM_PERIOD]); + haveinfo = 1; + } + + if (info->attrs[NL80211_ATTR_BEACON_HEAD]) { + params.head = nla_data(info->attrs[NL80211_ATTR_BEACON_HEAD]); + params.head_len = + nla_len(info->attrs[NL80211_ATTR_BEACON_HEAD]); + haveinfo = 1; + } + + if (info->attrs[NL80211_ATTR_BEACON_TAIL]) { + params.tail = nla_data(info->attrs[NL80211_ATTR_BEACON_TAIL]); + params.tail_len = + nla_len(info->attrs[NL80211_ATTR_BEACON_TAIL]); + haveinfo = 1; + } + + if (!haveinfo) { + err = -EINVAL; + goto out; + } + + err = call(&rdev->wiphy, dev, ¶ms); + + out: + cfg80211_unlock_rdev(rdev); + dev_put(dev); + unlock_rtnl: + rtnl_unlock(); + + return err; +} + +static int nl80211_del_beacon(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + int err; + struct net_device *dev; + + rtnl_lock(); + + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); + if (err) + goto unlock_rtnl; + + if (!rdev->ops->del_beacon) { + err = -EOPNOTSUPP; + goto out; + } + + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP) { + err = -EOPNOTSUPP; + goto out; + } + err = rdev->ops->del_beacon(&rdev->wiphy, dev); + + out: + cfg80211_unlock_rdev(rdev); + dev_put(dev); + unlock_rtnl: + rtnl_unlock(); + + return err; +} + +static const struct nla_policy sta_flags_policy[NL80211_STA_FLAG_MAX + 1] = { + [NL80211_STA_FLAG_AUTHORIZED] = { .type = NLA_FLAG }, + [NL80211_STA_FLAG_SHORT_PREAMBLE] = { .type = NLA_FLAG }, + [NL80211_STA_FLAG_WME] = { .type = NLA_FLAG }, + [NL80211_STA_FLAG_MFP] = { .type = NLA_FLAG }, +}; + +static int parse_station_flags(struct genl_info *info, + struct station_parameters *params) +{ + struct nlattr *flags[NL80211_STA_FLAG_MAX + 1]; + struct nlattr *nla; + int flag; + + /* + * Try parsing the new attribute first so userspace + * can specify both for older kernels. + */ + nla = info->attrs[NL80211_ATTR_STA_FLAGS2]; + if (nla) { + struct nl80211_sta_flag_update *sta_flags; + + sta_flags = nla_data(nla); + params->sta_flags_mask = sta_flags->mask; + params->sta_flags_set = sta_flags->set; + if ((params->sta_flags_mask | + params->sta_flags_set) & BIT(__NL80211_STA_FLAG_INVALID)) + return -EINVAL; + return 0; + } + + /* if present, parse the old attribute */ + + nla = info->attrs[NL80211_ATTR_STA_FLAGS]; + if (!nla) + return 0; + + if (nla_parse_nested(flags, NL80211_STA_FLAG_MAX, + nla, sta_flags_policy)) + return -EINVAL; + + params->sta_flags_mask = (1 << __NL80211_STA_FLAG_AFTER_LAST) - 1; + params->sta_flags_mask &= ~1; + + for (flag = 1; flag <= NL80211_STA_FLAG_MAX; flag++) + if (flags[flag]) + params->sta_flags_set |= (1<ifindex); + NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr); + + NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, sinfo->generation); + + sinfoattr = nla_nest_start(msg, NL80211_ATTR_STA_INFO); + if (!sinfoattr) + goto nla_put_failure; + if (sinfo->filled & STATION_INFO_INACTIVE_TIME) + NLA_PUT_U32(msg, NL80211_STA_INFO_INACTIVE_TIME, + sinfo->inactive_time); + if (sinfo->filled & STATION_INFO_RX_BYTES) + NLA_PUT_U32(msg, NL80211_STA_INFO_RX_BYTES, + sinfo->rx_bytes); + if (sinfo->filled & STATION_INFO_TX_BYTES) + NLA_PUT_U32(msg, NL80211_STA_INFO_TX_BYTES, + sinfo->tx_bytes); + if (sinfo->filled & STATION_INFO_LLID) + NLA_PUT_U16(msg, NL80211_STA_INFO_LLID, + sinfo->llid); + if (sinfo->filled & STATION_INFO_PLID) + NLA_PUT_U16(msg, NL80211_STA_INFO_PLID, + sinfo->plid); + if (sinfo->filled & STATION_INFO_PLINK_STATE) + NLA_PUT_U8(msg, NL80211_STA_INFO_PLINK_STATE, + sinfo->plink_state); + if (sinfo->filled & STATION_INFO_SIGNAL) + NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL, + sinfo->signal); + if (sinfo->filled & STATION_INFO_TX_BITRATE) { + txrate = nla_nest_start(msg, NL80211_STA_INFO_TX_BITRATE); + if (!txrate) + goto nla_put_failure; + + /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */ + bitrate = cfg80211_calculate_bitrate(&sinfo->txrate); + if (bitrate > 0) + NLA_PUT_U16(msg, NL80211_RATE_INFO_BITRATE, bitrate); + + if (sinfo->txrate.flags & RATE_INFO_FLAGS_MCS) + NLA_PUT_U8(msg, NL80211_RATE_INFO_MCS, + sinfo->txrate.mcs); + if (sinfo->txrate.flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) + NLA_PUT_FLAG(msg, NL80211_RATE_INFO_40_MHZ_WIDTH); + if (sinfo->txrate.flags & RATE_INFO_FLAGS_SHORT_GI) + NLA_PUT_FLAG(msg, NL80211_RATE_INFO_SHORT_GI); + + nla_nest_end(msg, txrate); + } + if (sinfo->filled & STATION_INFO_RX_PACKETS) + NLA_PUT_U32(msg, NL80211_STA_INFO_RX_PACKETS, + sinfo->rx_packets); + if (sinfo->filled & STATION_INFO_TX_PACKETS) + NLA_PUT_U32(msg, NL80211_STA_INFO_TX_PACKETS, + sinfo->tx_packets); + nla_nest_end(msg, sinfoattr); + + return genlmsg_end(msg, hdr); + + nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +static int nl80211_dump_station(struct sk_buff *skb, + struct netlink_callback *cb) +{ + struct station_info sinfo; + struct cfg80211_registered_device *dev; + struct net_device *netdev; + u8 mac_addr[ETH_ALEN]; + int ifidx = cb->args[0]; + int sta_idx = cb->args[1]; + int err; + + if (!ifidx) + ifidx = nl80211_get_ifidx(cb); + if (ifidx < 0) + return ifidx; + + rtnl_lock(); + + netdev = __dev_get_by_index(sock_net(skb->sk), ifidx); + if (!netdev) { + err = -ENODEV; + goto out_rtnl; + } + + dev = cfg80211_get_dev_from_ifindex(sock_net(skb->sk), ifidx); + if (IS_ERR(dev)) { + err = PTR_ERR(dev); + goto out_rtnl; + } + + if (!dev->ops->dump_station) { + err = -EOPNOTSUPP; + goto out_err; + } + + while (1) { + err = dev->ops->dump_station(&dev->wiphy, netdev, sta_idx, + mac_addr, &sinfo); + if (err == -ENOENT) + break; + if (err) + goto out_err; + + if (nl80211_send_station(skb, + NETLINK_CB(cb->skb).pid, + cb->nlh->nlmsg_seq, NLM_F_MULTI, + netdev, mac_addr, + &sinfo) < 0) + goto out; + + sta_idx++; + } + + + out: + cb->args[1] = sta_idx; + err = skb->len; + out_err: + cfg80211_unlock_rdev(dev); + out_rtnl: + rtnl_unlock(); + + return err; +} + +static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + int err; + struct net_device *dev; + struct station_info sinfo; + struct sk_buff *msg; + u8 *mac_addr = NULL; + + memset(&sinfo, 0, sizeof(sinfo)); + + if (!info->attrs[NL80211_ATTR_MAC]) + return -EINVAL; + + mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); + + rtnl_lock(); + + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); + if (err) + goto out_rtnl; + + if (!rdev->ops->get_station) { + err = -EOPNOTSUPP; + goto out; + } + + err = rdev->ops->get_station(&rdev->wiphy, dev, mac_addr, &sinfo); + if (err) + goto out; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + goto out; + + if (nl80211_send_station(msg, info->snd_pid, info->snd_seq, 0, + dev, mac_addr, &sinfo) < 0) + goto out_free; + + err = genlmsg_reply(msg, info); + goto out; + + out_free: + nlmsg_free(msg); + out: + cfg80211_unlock_rdev(rdev); + dev_put(dev); + out_rtnl: + rtnl_unlock(); + + return err; +} + +/* + * Get vlan interface making sure it is running and on the right wiphy. + */ +static int get_vlan(struct genl_info *info, + struct cfg80211_registered_device *rdev, + struct net_device **vlan) +{ + struct nlattr *vlanattr = info->attrs[NL80211_ATTR_STA_VLAN]; + *vlan = NULL; + + if (vlanattr) { + *vlan = dev_get_by_index(genl_info_net(info), + nla_get_u32(vlanattr)); + if (!*vlan) + return -ENODEV; + if (!(*vlan)->ieee80211_ptr) + return -EINVAL; + if ((*vlan)->ieee80211_ptr->wiphy != &rdev->wiphy) + return -EINVAL; + if (!netif_running(*vlan)) + return -ENETDOWN; + } + return 0; +} + +static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + int err; + struct net_device *dev; + struct station_parameters params; + u8 *mac_addr = NULL; + + memset(¶ms, 0, sizeof(params)); + + params.listen_interval = -1; + + if (info->attrs[NL80211_ATTR_STA_AID]) + return -EINVAL; + + if (!info->attrs[NL80211_ATTR_MAC]) + return -EINVAL; + + mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); + + if (info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]) { + params.supported_rates = + nla_data(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]); + params.supported_rates_len = + nla_len(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]); + } + + if (info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]) + params.listen_interval = + nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); + + if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) + params.ht_capa = + nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); + + if (parse_station_flags(info, ¶ms)) + return -EINVAL; + + if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION]) + params.plink_action = + nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]); + + rtnl_lock(); + + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); + if (err) + goto out_rtnl; + + err = get_vlan(info, rdev, ¶ms.vlan); + if (err) + goto out; + + /* validate settings */ + err = 0; + + switch (dev->ieee80211_ptr->iftype) { + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_AP_VLAN: + /* disallow mesh-specific things */ + if (params.plink_action) + err = -EINVAL; + break; + case NL80211_IFTYPE_STATION: + /* disallow everything but AUTHORIZED flag */ + if (params.plink_action) + err = -EINVAL; + if (params.vlan) + err = -EINVAL; + if (params.supported_rates) + err = -EINVAL; + if (params.ht_capa) + err = -EINVAL; + if (params.listen_interval >= 0) + err = -EINVAL; + if (params.sta_flags_mask & ~BIT(NL80211_STA_FLAG_AUTHORIZED)) + err = -EINVAL; + break; + case NL80211_IFTYPE_MESH_POINT: + /* disallow things mesh doesn't support */ + if (params.vlan) + err = -EINVAL; + if (params.ht_capa) + err = -EINVAL; + if (params.listen_interval >= 0) + err = -EINVAL; + if (params.supported_rates) + err = -EINVAL; + if (params.sta_flags_mask) + err = -EINVAL; + break; + default: + err = -EINVAL; + } + + if (err) + goto out; + + if (!rdev->ops->change_station) { + err = -EOPNOTSUPP; + goto out; + } + + err = rdev->ops->change_station(&rdev->wiphy, dev, mac_addr, ¶ms); + + out: + if (params.vlan) + dev_put(params.vlan); + cfg80211_unlock_rdev(rdev); + dev_put(dev); + out_rtnl: + rtnl_unlock(); + + return err; +} + +static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + int err; + struct net_device *dev; + struct station_parameters params; + u8 *mac_addr = NULL; + + memset(¶ms, 0, sizeof(params)); + + if (!info->attrs[NL80211_ATTR_MAC]) + return -EINVAL; + + if (!info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]) + return -EINVAL; + + if (!info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]) + return -EINVAL; + + if (!info->attrs[NL80211_ATTR_STA_AID]) + return -EINVAL; + + mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); + params.supported_rates = + nla_data(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]); + params.supported_rates_len = + nla_len(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]); + params.listen_interval = + nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); + + params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]); + if (!params.aid || params.aid > IEEE80211_MAX_AID) + return -EINVAL; + + if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) + params.ht_capa = + nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); + + if (parse_station_flags(info, ¶ms)) + return -EINVAL; + + rtnl_lock(); + + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); + if (err) + goto out_rtnl; + + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && + dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) { + err = -EINVAL; + goto out; + } + + err = get_vlan(info, rdev, ¶ms.vlan); + if (err) + goto out; + + /* validate settings */ + err = 0; + + if (!rdev->ops->add_station) { + err = -EOPNOTSUPP; + goto out; + } + + if (!netif_running(dev)) { + err = -ENETDOWN; + goto out; + } + + err = rdev->ops->add_station(&rdev->wiphy, dev, mac_addr, ¶ms); + + out: + if (params.vlan) + dev_put(params.vlan); + cfg80211_unlock_rdev(rdev); + dev_put(dev); + out_rtnl: + rtnl_unlock(); + + return err; +} + +static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + int err; + struct net_device *dev; + u8 *mac_addr = NULL; + + if (info->attrs[NL80211_ATTR_MAC]) + mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); + + rtnl_lock(); + + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); + if (err) + goto out_rtnl; + + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && + dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) { + err = -EINVAL; + goto out; + } + + if (!rdev->ops->del_station) { + err = -EOPNOTSUPP; + goto out; + } + + err = rdev->ops->del_station(&rdev->wiphy, dev, mac_addr); + + out: + cfg80211_unlock_rdev(rdev); + dev_put(dev); + out_rtnl: + rtnl_unlock(); + + return err; +} + +static int nl80211_send_mpath(struct sk_buff *msg, u32 pid, u32 seq, + int flags, struct net_device *dev, + u8 *dst, u8 *next_hop, + struct mpath_info *pinfo) +{ + void *hdr; + struct nlattr *pinfoattr; + + hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_STATION); + if (!hdr) + return -1; + + NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); + NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, dst); + NLA_PUT(msg, NL80211_ATTR_MPATH_NEXT_HOP, ETH_ALEN, next_hop); + + NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, pinfo->generation); + + pinfoattr = nla_nest_start(msg, NL80211_ATTR_MPATH_INFO); + if (!pinfoattr) + goto nla_put_failure; + if (pinfo->filled & MPATH_INFO_FRAME_QLEN) + NLA_PUT_U32(msg, NL80211_MPATH_INFO_FRAME_QLEN, + pinfo->frame_qlen); + if (pinfo->filled & MPATH_INFO_SN) + NLA_PUT_U32(msg, NL80211_MPATH_INFO_SN, + pinfo->sn); + if (pinfo->filled & MPATH_INFO_METRIC) + NLA_PUT_U32(msg, NL80211_MPATH_INFO_METRIC, + pinfo->metric); + if (pinfo->filled & MPATH_INFO_EXPTIME) + NLA_PUT_U32(msg, NL80211_MPATH_INFO_EXPTIME, + pinfo->exptime); + if (pinfo->filled & MPATH_INFO_FLAGS) + NLA_PUT_U8(msg, NL80211_MPATH_INFO_FLAGS, + pinfo->flags); + if (pinfo->filled & MPATH_INFO_DISCOVERY_TIMEOUT) + NLA_PUT_U32(msg, NL80211_MPATH_INFO_DISCOVERY_TIMEOUT, + pinfo->discovery_timeout); + if (pinfo->filled & MPATH_INFO_DISCOVERY_RETRIES) + NLA_PUT_U8(msg, NL80211_MPATH_INFO_DISCOVERY_RETRIES, + pinfo->discovery_retries); + + nla_nest_end(msg, pinfoattr); + + return genlmsg_end(msg, hdr); + + nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +static int nl80211_dump_mpath(struct sk_buff *skb, + struct netlink_callback *cb) +{ + struct mpath_info pinfo; + struct cfg80211_registered_device *dev; + struct net_device *netdev; + u8 dst[ETH_ALEN]; + u8 next_hop[ETH_ALEN]; + int ifidx = cb->args[0]; + int path_idx = cb->args[1]; + int err; + + if (!ifidx) + ifidx = nl80211_get_ifidx(cb); + if (ifidx < 0) + return ifidx; + + rtnl_lock(); + + netdev = __dev_get_by_index(sock_net(skb->sk), ifidx); + if (!netdev) { + err = -ENODEV; + goto out_rtnl; + } + + dev = cfg80211_get_dev_from_ifindex(sock_net(skb->sk), ifidx); + if (IS_ERR(dev)) { + err = PTR_ERR(dev); + goto out_rtnl; + } + + if (!dev->ops->dump_mpath) { + err = -EOPNOTSUPP; + goto out_err; + } + + if (netdev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) { + err = -EOPNOTSUPP; + goto out_err; + } + + while (1) { + err = dev->ops->dump_mpath(&dev->wiphy, netdev, path_idx, + dst, next_hop, &pinfo); + if (err == -ENOENT) + break; + if (err) + goto out_err; + + if (nl80211_send_mpath(skb, NETLINK_CB(cb->skb).pid, + cb->nlh->nlmsg_seq, NLM_F_MULTI, + netdev, dst, next_hop, + &pinfo) < 0) + goto out; + + path_idx++; + } + + + out: + cb->args[1] = path_idx; + err = skb->len; + out_err: + cfg80211_unlock_rdev(dev); + out_rtnl: + rtnl_unlock(); + + return err; +} + +static int nl80211_get_mpath(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + int err; + struct net_device *dev; + struct mpath_info pinfo; + struct sk_buff *msg; + u8 *dst = NULL; + u8 next_hop[ETH_ALEN]; + + memset(&pinfo, 0, sizeof(pinfo)); + + if (!info->attrs[NL80211_ATTR_MAC]) + return -EINVAL; + + dst = nla_data(info->attrs[NL80211_ATTR_MAC]); + + rtnl_lock(); + + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); + if (err) + goto out_rtnl; + + if (!rdev->ops->get_mpath) { + err = -EOPNOTSUPP; + goto out; + } + + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) { + err = -EOPNOTSUPP; + goto out; + } + + err = rdev->ops->get_mpath(&rdev->wiphy, dev, dst, next_hop, &pinfo); + if (err) + goto out; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + goto out; + + if (nl80211_send_mpath(msg, info->snd_pid, info->snd_seq, 0, + dev, dst, next_hop, &pinfo) < 0) + goto out_free; + + err = genlmsg_reply(msg, info); + goto out; + + out_free: + nlmsg_free(msg); + out: + cfg80211_unlock_rdev(rdev); + dev_put(dev); + out_rtnl: + rtnl_unlock(); + + return err; +} + +static int nl80211_set_mpath(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + int err; + struct net_device *dev; + u8 *dst = NULL; + u8 *next_hop = NULL; + + if (!info->attrs[NL80211_ATTR_MAC]) + return -EINVAL; + + if (!info->attrs[NL80211_ATTR_MPATH_NEXT_HOP]) + return -EINVAL; + + dst = nla_data(info->attrs[NL80211_ATTR_MAC]); + next_hop = nla_data(info->attrs[NL80211_ATTR_MPATH_NEXT_HOP]); + + rtnl_lock(); + + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); + if (err) + goto out_rtnl; + + if (!rdev->ops->change_mpath) { + err = -EOPNOTSUPP; + goto out; + } + + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) { + err = -EOPNOTSUPP; + goto out; + } + + if (!netif_running(dev)) { + err = -ENETDOWN; + goto out; + } + + err = rdev->ops->change_mpath(&rdev->wiphy, dev, dst, next_hop); + + out: + cfg80211_unlock_rdev(rdev); + dev_put(dev); + out_rtnl: + rtnl_unlock(); + + return err; +} +static int nl80211_new_mpath(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + int err; + struct net_device *dev; + u8 *dst = NULL; + u8 *next_hop = NULL; + + if (!info->attrs[NL80211_ATTR_MAC]) + return -EINVAL; + + if (!info->attrs[NL80211_ATTR_MPATH_NEXT_HOP]) + return -EINVAL; + + dst = nla_data(info->attrs[NL80211_ATTR_MAC]); + next_hop = nla_data(info->attrs[NL80211_ATTR_MPATH_NEXT_HOP]); + + rtnl_lock(); + + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); + if (err) + goto out_rtnl; + + if (!rdev->ops->add_mpath) { + err = -EOPNOTSUPP; + goto out; + } + + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) { + err = -EOPNOTSUPP; + goto out; + } + + if (!netif_running(dev)) { + err = -ENETDOWN; + goto out; + } + + err = rdev->ops->add_mpath(&rdev->wiphy, dev, dst, next_hop); + + out: + cfg80211_unlock_rdev(rdev); + dev_put(dev); + out_rtnl: + rtnl_unlock(); + + return err; +} + +static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + int err; + struct net_device *dev; + u8 *dst = NULL; + + if (info->attrs[NL80211_ATTR_MAC]) + dst = nla_data(info->attrs[NL80211_ATTR_MAC]); + + rtnl_lock(); + + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); + if (err) + goto out_rtnl; + + if (!rdev->ops->del_mpath) { + err = -EOPNOTSUPP; + goto out; + } + + err = rdev->ops->del_mpath(&rdev->wiphy, dev, dst); + + out: + cfg80211_unlock_rdev(rdev); + dev_put(dev); + out_rtnl: + rtnl_unlock(); + + return err; +} + +static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + int err; + struct net_device *dev; + struct bss_parameters params; + + memset(¶ms, 0, sizeof(params)); + /* default to not changing parameters */ + params.use_cts_prot = -1; + params.use_short_preamble = -1; + params.use_short_slot_time = -1; + + if (info->attrs[NL80211_ATTR_BSS_CTS_PROT]) + params.use_cts_prot = + nla_get_u8(info->attrs[NL80211_ATTR_BSS_CTS_PROT]); + if (info->attrs[NL80211_ATTR_BSS_SHORT_PREAMBLE]) + params.use_short_preamble = + nla_get_u8(info->attrs[NL80211_ATTR_BSS_SHORT_PREAMBLE]); + if (info->attrs[NL80211_ATTR_BSS_SHORT_SLOT_TIME]) + params.use_short_slot_time = + nla_get_u8(info->attrs[NL80211_ATTR_BSS_SHORT_SLOT_TIME]); + if (info->attrs[NL80211_ATTR_BSS_BASIC_RATES]) { + params.basic_rates = + nla_data(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); + params.basic_rates_len = + nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); + } + + rtnl_lock(); + + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); + if (err) + goto out_rtnl; + + if (!rdev->ops->change_bss) { + err = -EOPNOTSUPP; + goto out; + } + + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP) { + err = -EOPNOTSUPP; + goto out; + } + + err = rdev->ops->change_bss(&rdev->wiphy, dev, ¶ms); + + out: + cfg80211_unlock_rdev(rdev); + dev_put(dev); + out_rtnl: + rtnl_unlock(); + + return err; +} + +static const struct nla_policy reg_rule_policy[NL80211_REG_RULE_ATTR_MAX + 1] = { + [NL80211_ATTR_REG_RULE_FLAGS] = { .type = NLA_U32 }, + [NL80211_ATTR_FREQ_RANGE_START] = { .type = NLA_U32 }, + [NL80211_ATTR_FREQ_RANGE_END] = { .type = NLA_U32 }, + [NL80211_ATTR_FREQ_RANGE_MAX_BW] = { .type = NLA_U32 }, + [NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN] = { .type = NLA_U32 }, + [NL80211_ATTR_POWER_RULE_MAX_EIRP] = { .type = NLA_U32 }, +}; + +static int parse_reg_rule(struct nlattr *tb[], + struct ieee80211_reg_rule *reg_rule) +{ + struct ieee80211_freq_range *freq_range = ®_rule->freq_range; + struct ieee80211_power_rule *power_rule = ®_rule->power_rule; + + if (!tb[NL80211_ATTR_REG_RULE_FLAGS]) + return -EINVAL; + if (!tb[NL80211_ATTR_FREQ_RANGE_START]) + return -EINVAL; + if (!tb[NL80211_ATTR_FREQ_RANGE_END]) + return -EINVAL; + if (!tb[NL80211_ATTR_FREQ_RANGE_MAX_BW]) + return -EINVAL; + if (!tb[NL80211_ATTR_POWER_RULE_MAX_EIRP]) + return -EINVAL; + + reg_rule->flags = nla_get_u32(tb[NL80211_ATTR_REG_RULE_FLAGS]); + + freq_range->start_freq_khz = + nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_START]); + freq_range->end_freq_khz = + nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_END]); + freq_range->max_bandwidth_khz = + nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_MAX_BW]); + + power_rule->max_eirp = + nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_EIRP]); + + if (tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN]) + power_rule->max_antenna_gain = + nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN]); + + return 0; +} + +static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info) +{ + int r; + char *data = NULL; + + /* + * You should only get this when cfg80211 hasn't yet initialized + * completely when built-in to the kernel right between the time + * window between nl80211_init() and regulatory_init(), if that is + * even possible. + */ + mutex_lock(&cfg80211_mutex); + if (unlikely(!cfg80211_regdomain)) { + mutex_unlock(&cfg80211_mutex); + return -EINPROGRESS; + } + mutex_unlock(&cfg80211_mutex); + + if (!info->attrs[NL80211_ATTR_REG_ALPHA2]) + return -EINVAL; + + data = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]); + + r = regulatory_hint_user(data); + + return r; +} + +static int nl80211_get_mesh_params(struct sk_buff *skb, + struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + struct mesh_config cur_params; + int err; + struct net_device *dev; + void *hdr; + struct nlattr *pinfoattr; + struct sk_buff *msg; + + rtnl_lock(); + + /* Look up our device */ + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); + if (err) + goto out_rtnl; + + if (!rdev->ops->get_mesh_params) { + err = -EOPNOTSUPP; + goto out; + } + + /* Get the mesh params */ + err = rdev->ops->get_mesh_params(&rdev->wiphy, dev, &cur_params); + if (err) + goto out; + + /* Draw up a netlink message to send back */ + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) { + err = -ENOBUFS; + goto out; + } + hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0, + NL80211_CMD_GET_MESH_PARAMS); + if (!hdr) + goto nla_put_failure; + pinfoattr = nla_nest_start(msg, NL80211_ATTR_MESH_PARAMS); + if (!pinfoattr) + goto nla_put_failure; + NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); + NLA_PUT_U16(msg, NL80211_MESHCONF_RETRY_TIMEOUT, + cur_params.dot11MeshRetryTimeout); + NLA_PUT_U16(msg, NL80211_MESHCONF_CONFIRM_TIMEOUT, + cur_params.dot11MeshConfirmTimeout); + NLA_PUT_U16(msg, NL80211_MESHCONF_HOLDING_TIMEOUT, + cur_params.dot11MeshHoldingTimeout); + NLA_PUT_U16(msg, NL80211_MESHCONF_MAX_PEER_LINKS, + cur_params.dot11MeshMaxPeerLinks); + NLA_PUT_U8(msg, NL80211_MESHCONF_MAX_RETRIES, + cur_params.dot11MeshMaxRetries); + NLA_PUT_U8(msg, NL80211_MESHCONF_TTL, + cur_params.dot11MeshTTL); + NLA_PUT_U8(msg, NL80211_MESHCONF_AUTO_OPEN_PLINKS, + cur_params.auto_open_plinks); + NLA_PUT_U8(msg, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, + cur_params.dot11MeshHWMPmaxPREQretries); + NLA_PUT_U32(msg, NL80211_MESHCONF_PATH_REFRESH_TIME, + cur_params.path_refresh_time); + NLA_PUT_U16(msg, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT, + cur_params.min_discovery_timeout); + NLA_PUT_U32(msg, NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT, + cur_params.dot11MeshHWMPactivePathTimeout); + NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, + cur_params.dot11MeshHWMPpreqMinInterval); + NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, + cur_params.dot11MeshHWMPnetDiameterTraversalTime); + NLA_PUT_U8(msg, NL80211_MESHCONF_HWMP_ROOTMODE, + cur_params.dot11MeshHWMPRootMode); + nla_nest_end(msg, pinfoattr); + genlmsg_end(msg, hdr); + err = genlmsg_reply(msg, info); + goto out; + + nla_put_failure: + genlmsg_cancel(msg, hdr); + err = -EMSGSIZE; + out: + /* Cleanup */ + cfg80211_unlock_rdev(rdev); + dev_put(dev); + out_rtnl: + rtnl_unlock(); + + return err; +} + +#define FILL_IN_MESH_PARAM_IF_SET(table, cfg, param, mask, attr_num, nla_fn) \ +do {\ + if (table[attr_num]) {\ + cfg.param = nla_fn(table[attr_num]); \ + mask |= (1 << (attr_num - 1)); \ + } \ +} while (0);\ + +static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_ATTR_MAX+1] = { + [NL80211_MESHCONF_RETRY_TIMEOUT] = { .type = NLA_U16 }, + [NL80211_MESHCONF_CONFIRM_TIMEOUT] = { .type = NLA_U16 }, + [NL80211_MESHCONF_HOLDING_TIMEOUT] = { .type = NLA_U16 }, + [NL80211_MESHCONF_MAX_PEER_LINKS] = { .type = NLA_U16 }, + [NL80211_MESHCONF_MAX_RETRIES] = { .type = NLA_U8 }, + [NL80211_MESHCONF_TTL] = { .type = NLA_U8 }, + [NL80211_MESHCONF_AUTO_OPEN_PLINKS] = { .type = NLA_U8 }, + + [NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES] = { .type = NLA_U8 }, + [NL80211_MESHCONF_PATH_REFRESH_TIME] = { .type = NLA_U32 }, + [NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT] = { .type = NLA_U16 }, + [NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT] = { .type = NLA_U32 }, + [NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL] = { .type = NLA_U16 }, + [NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME] = { .type = NLA_U16 }, +}; + +static int nl80211_set_mesh_params(struct sk_buff *skb, struct genl_info *info) +{ + int err; + u32 mask; + struct cfg80211_registered_device *rdev; + struct net_device *dev; + struct mesh_config cfg; + struct nlattr *tb[NL80211_MESHCONF_ATTR_MAX + 1]; + struct nlattr *parent_attr; + + parent_attr = info->attrs[NL80211_ATTR_MESH_PARAMS]; + if (!parent_attr) + return -EINVAL; + if (nla_parse_nested(tb, NL80211_MESHCONF_ATTR_MAX, + parent_attr, nl80211_meshconf_params_policy)) + return -EINVAL; + + rtnl_lock(); + + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); + if (err) + goto out_rtnl; + + if (!rdev->ops->set_mesh_params) { + err = -EOPNOTSUPP; + goto out; + } + + /* This makes sure that there aren't more than 32 mesh config + * parameters (otherwise our bitfield scheme would not work.) */ + BUILD_BUG_ON(NL80211_MESHCONF_ATTR_MAX > 32); + + /* Fill in the params struct */ + mask = 0; + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshRetryTimeout, + mask, NL80211_MESHCONF_RETRY_TIMEOUT, nla_get_u16); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshConfirmTimeout, + mask, NL80211_MESHCONF_CONFIRM_TIMEOUT, nla_get_u16); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHoldingTimeout, + mask, NL80211_MESHCONF_HOLDING_TIMEOUT, nla_get_u16); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxPeerLinks, + mask, NL80211_MESHCONF_MAX_PEER_LINKS, nla_get_u16); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxRetries, + mask, NL80211_MESHCONF_MAX_RETRIES, nla_get_u8); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshTTL, + mask, NL80211_MESHCONF_TTL, nla_get_u8); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, auto_open_plinks, + mask, NL80211_MESHCONF_AUTO_OPEN_PLINKS, nla_get_u8); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPmaxPREQretries, + mask, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, + nla_get_u8); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, path_refresh_time, + mask, NL80211_MESHCONF_PATH_REFRESH_TIME, nla_get_u32); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, min_discovery_timeout, + mask, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT, + nla_get_u16); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathTimeout, + mask, NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT, + nla_get_u32); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPpreqMinInterval, + mask, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, + nla_get_u16); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, + dot11MeshHWMPnetDiameterTraversalTime, + mask, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, + nla_get_u16); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, + dot11MeshHWMPRootMode, mask, + NL80211_MESHCONF_HWMP_ROOTMODE, + nla_get_u8); + + /* Apply changes */ + err = rdev->ops->set_mesh_params(&rdev->wiphy, dev, &cfg, mask); + + out: + /* cleanup */ + cfg80211_unlock_rdev(rdev); + dev_put(dev); + out_rtnl: + rtnl_unlock(); + + return err; +} + +#undef FILL_IN_MESH_PARAM_IF_SET + +static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info) +{ + struct sk_buff *msg; + void *hdr = NULL; + struct nlattr *nl_reg_rules; + unsigned int i; + int err = -EINVAL; + + mutex_lock(&cfg80211_mutex); + + if (!cfg80211_regdomain) + goto out; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) { + err = -ENOBUFS; + goto out; + } + + hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0, + NL80211_CMD_GET_REG); + if (!hdr) + goto nla_put_failure; + + NLA_PUT_STRING(msg, NL80211_ATTR_REG_ALPHA2, + cfg80211_regdomain->alpha2); + + nl_reg_rules = nla_nest_start(msg, NL80211_ATTR_REG_RULES); + if (!nl_reg_rules) + goto nla_put_failure; + + for (i = 0; i < cfg80211_regdomain->n_reg_rules; i++) { + struct nlattr *nl_reg_rule; + const struct ieee80211_reg_rule *reg_rule; + const struct ieee80211_freq_range *freq_range; + const struct ieee80211_power_rule *power_rule; + + reg_rule = &cfg80211_regdomain->reg_rules[i]; + freq_range = ®_rule->freq_range; + power_rule = ®_rule->power_rule; + + nl_reg_rule = nla_nest_start(msg, i); + if (!nl_reg_rule) + goto nla_put_failure; + + NLA_PUT_U32(msg, NL80211_ATTR_REG_RULE_FLAGS, + reg_rule->flags); + NLA_PUT_U32(msg, NL80211_ATTR_FREQ_RANGE_START, + freq_range->start_freq_khz); + NLA_PUT_U32(msg, NL80211_ATTR_FREQ_RANGE_END, + freq_range->end_freq_khz); + NLA_PUT_U32(msg, NL80211_ATTR_FREQ_RANGE_MAX_BW, + freq_range->max_bandwidth_khz); + NLA_PUT_U32(msg, NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN, + power_rule->max_antenna_gain); + NLA_PUT_U32(msg, NL80211_ATTR_POWER_RULE_MAX_EIRP, + power_rule->max_eirp); + + nla_nest_end(msg, nl_reg_rule); + } + + nla_nest_end(msg, nl_reg_rules); + + genlmsg_end(msg, hdr); + err = genlmsg_reply(msg, info); + goto out; + +nla_put_failure: + genlmsg_cancel(msg, hdr); + err = -EMSGSIZE; +out: + mutex_unlock(&cfg80211_mutex); + return err; +} + +static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info) +{ + struct nlattr *tb[NL80211_REG_RULE_ATTR_MAX + 1]; + struct nlattr *nl_reg_rule; + char *alpha2 = NULL; + int rem_reg_rules = 0, r = 0; + u32 num_rules = 0, rule_idx = 0, size_of_regd; + struct ieee80211_regdomain *rd = NULL; + + if (!info->attrs[NL80211_ATTR_REG_ALPHA2]) + return -EINVAL; + + if (!info->attrs[NL80211_ATTR_REG_RULES]) + return -EINVAL; + + alpha2 = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]); + + nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES], + rem_reg_rules) { + num_rules++; + if (num_rules > NL80211_MAX_SUPP_REG_RULES) + return -EINVAL; + } + + mutex_lock(&cfg80211_mutex); + + if (!reg_is_valid_request(alpha2)) { + r = -EINVAL; + goto bad_reg; + } + + size_of_regd = sizeof(struct ieee80211_regdomain) + + (num_rules * sizeof(struct ieee80211_reg_rule)); + + rd = kzalloc(size_of_regd, GFP_KERNEL); + if (!rd) { + r = -ENOMEM; + goto bad_reg; + } + + rd->n_reg_rules = num_rules; + rd->alpha2[0] = alpha2[0]; + rd->alpha2[1] = alpha2[1]; + + nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES], + rem_reg_rules) { + nla_parse(tb, NL80211_REG_RULE_ATTR_MAX, + nla_data(nl_reg_rule), nla_len(nl_reg_rule), + reg_rule_policy); + r = parse_reg_rule(tb, &rd->reg_rules[rule_idx]); + if (r) + goto bad_reg; + + rule_idx++; + + if (rule_idx > NL80211_MAX_SUPP_REG_RULES) { + r = -EINVAL; + goto bad_reg; + } + } + + BUG_ON(rule_idx != num_rules); + + r = set_regdom(rd); + + mutex_unlock(&cfg80211_mutex); + + return r; + + bad_reg: + mutex_unlock(&cfg80211_mutex); + kfree(rd); + return r; +} + +static int validate_scan_freqs(struct nlattr *freqs) +{ + struct nlattr *attr1, *attr2; + int n_channels = 0, tmp1, tmp2; + + nla_for_each_nested(attr1, freqs, tmp1) { + n_channels++; + /* + * Some hardware has a limited channel list for + * scanning, and it is pretty much nonsensical + * to scan for a channel twice, so disallow that + * and don't require drivers to check that the + * channel list they get isn't longer than what + * they can scan, as long as they can scan all + * the channels they registered at once. + */ + nla_for_each_nested(attr2, freqs, tmp2) + if (attr1 != attr2 && + nla_get_u32(attr1) == nla_get_u32(attr2)) + return 0; + } + + return n_channels; +} + +static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + struct net_device *dev; + struct cfg80211_scan_request *request; + struct cfg80211_ssid *ssid; + struct ieee80211_channel *channel; + struct nlattr *attr; + struct wiphy *wiphy; + int err, tmp, n_ssids = 0, n_channels, i; + enum ieee80211_band band; + size_t ie_len; + + if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) + return -EINVAL; + + rtnl_lock(); + + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); + if (err) + goto out_rtnl; + + wiphy = &rdev->wiphy; + + if (!rdev->ops->scan) { + err = -EOPNOTSUPP; + goto out; + } + + if (!netif_running(dev)) { + err = -ENETDOWN; + goto out; + } + + if (rdev->scan_req) { + err = -EBUSY; + goto out; + } + + if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { + n_channels = validate_scan_freqs( + info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]); + if (!n_channels) { + err = -EINVAL; + goto out; + } + } else { + n_channels = 0; + + for (band = 0; band < IEEE80211_NUM_BANDS; band++) + if (wiphy->bands[band]) + n_channels += wiphy->bands[band]->n_channels; + } + + if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) + nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) + n_ssids++; + + if (n_ssids > wiphy->max_scan_ssids) { + err = -EINVAL; + goto out; + } + + if (info->attrs[NL80211_ATTR_IE]) + ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); + else + ie_len = 0; + + if (ie_len > wiphy->max_scan_ie_len) { + err = -EINVAL; + goto out; + } + + request = kzalloc(sizeof(*request) + + sizeof(*ssid) * n_ssids + + sizeof(channel) * n_channels + + ie_len, GFP_KERNEL); + if (!request) { + err = -ENOMEM; + goto out; + } + + if (n_ssids) + request->ssids = (void *)&request->channels[n_channels]; + request->n_ssids = n_ssids; + if (ie_len) { + if (request->ssids) + request->ie = (void *)(request->ssids + n_ssids); + else + request->ie = (void *)(request->channels + n_channels); + } + + i = 0; + if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { + /* user specified, bail out if channel not found */ + nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_FREQUENCIES], tmp) { + struct ieee80211_channel *chan; + + chan = ieee80211_get_channel(wiphy, nla_get_u32(attr)); + + if (!chan) { + err = -EINVAL; + goto out_free; + } + + /* ignore disabled channels */ + if (chan->flags & IEEE80211_CHAN_DISABLED) + continue; + + request->channels[i] = chan; + i++; + } + } else { + /* all channels */ + for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + int j; + if (!wiphy->bands[band]) + continue; + for (j = 0; j < wiphy->bands[band]->n_channels; j++) { + struct ieee80211_channel *chan; + + chan = &wiphy->bands[band]->channels[j]; + + if (chan->flags & IEEE80211_CHAN_DISABLED) + continue; + + request->channels[i] = chan; + i++; + } + } + } + + if (!i) { + err = -EINVAL; + goto out_free; + } + + request->n_channels = i; + + i = 0; + if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) { + nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) { + if (request->ssids[i].ssid_len > IEEE80211_MAX_SSID_LEN) { + err = -EINVAL; + goto out_free; + } + memcpy(request->ssids[i].ssid, nla_data(attr), nla_len(attr)); + request->ssids[i].ssid_len = nla_len(attr); + i++; + } + } + + if (info->attrs[NL80211_ATTR_IE]) { + request->ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); + memcpy((void *)request->ie, + nla_data(info->attrs[NL80211_ATTR_IE]), + request->ie_len); + } + + request->dev = dev; + request->wiphy = &rdev->wiphy; + + rdev->scan_req = request; + err = rdev->ops->scan(&rdev->wiphy, dev, request); + + if (!err) { + nl80211_send_scan_start(rdev, dev); + dev_hold(dev); + } + + out_free: + if (err) { + rdev->scan_req = NULL; + kfree(request); + } + out: + cfg80211_unlock_rdev(rdev); + dev_put(dev); + out_rtnl: + rtnl_unlock(); + + return err; +} + +static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags, + struct cfg80211_registered_device *rdev, + struct wireless_dev *wdev, + struct cfg80211_internal_bss *intbss) +{ + struct cfg80211_bss *res = &intbss->pub; + void *hdr; + struct nlattr *bss; + int i; + + ASSERT_WDEV_LOCK(wdev); + + hdr = nl80211hdr_put(msg, pid, seq, flags, + NL80211_CMD_NEW_SCAN_RESULTS); + if (!hdr) + return -1; + + NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, rdev->bss_generation); + NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex); + + bss = nla_nest_start(msg, NL80211_ATTR_BSS); + if (!bss) + goto nla_put_failure; + if (!is_zero_ether_addr(res->bssid)) + NLA_PUT(msg, NL80211_BSS_BSSID, ETH_ALEN, res->bssid); + if (res->information_elements && res->len_information_elements) + NLA_PUT(msg, NL80211_BSS_INFORMATION_ELEMENTS, + res->len_information_elements, + res->information_elements); + if (res->beacon_ies && res->len_beacon_ies && + res->beacon_ies != res->information_elements) + NLA_PUT(msg, NL80211_BSS_BEACON_IES, + res->len_beacon_ies, res->beacon_ies); + if (res->tsf) + NLA_PUT_U64(msg, NL80211_BSS_TSF, res->tsf); + if (res->beacon_interval) + NLA_PUT_U16(msg, NL80211_BSS_BEACON_INTERVAL, res->beacon_interval); + NLA_PUT_U16(msg, NL80211_BSS_CAPABILITY, res->capability); + NLA_PUT_U32(msg, NL80211_BSS_FREQUENCY, res->channel->center_freq); + NLA_PUT_U32(msg, NL80211_BSS_SEEN_MS_AGO, + jiffies_to_msecs(jiffies - intbss->ts)); + + switch (rdev->wiphy.signal_type) { + case CFG80211_SIGNAL_TYPE_MBM: + NLA_PUT_U32(msg, NL80211_BSS_SIGNAL_MBM, res->signal); + break; + case CFG80211_SIGNAL_TYPE_UNSPEC: + NLA_PUT_U8(msg, NL80211_BSS_SIGNAL_UNSPEC, res->signal); + break; + default: + break; + } + + switch (wdev->iftype) { + case NL80211_IFTYPE_STATION: + if (intbss == wdev->current_bss) + NLA_PUT_U32(msg, NL80211_BSS_STATUS, + NL80211_BSS_STATUS_ASSOCIATED); + else for (i = 0; i < MAX_AUTH_BSSES; i++) { + if (intbss != wdev->auth_bsses[i]) + continue; + NLA_PUT_U32(msg, NL80211_BSS_STATUS, + NL80211_BSS_STATUS_AUTHENTICATED); + break; + } + break; + case NL80211_IFTYPE_ADHOC: + if (intbss == wdev->current_bss) + NLA_PUT_U32(msg, NL80211_BSS_STATUS, + NL80211_BSS_STATUS_IBSS_JOINED); + break; + default: + break; + } + + nla_nest_end(msg, bss); + + return genlmsg_end(msg, hdr); + + nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +static int nl80211_dump_scan(struct sk_buff *skb, + struct netlink_callback *cb) +{ + struct cfg80211_registered_device *rdev; + struct net_device *dev; + struct cfg80211_internal_bss *scan; + struct wireless_dev *wdev; + int ifidx = cb->args[0]; + int start = cb->args[1], idx = 0; + int err; + + if (!ifidx) + ifidx = nl80211_get_ifidx(cb); + if (ifidx < 0) + return ifidx; + cb->args[0] = ifidx; + + dev = dev_get_by_index(sock_net(skb->sk), ifidx); + if (!dev) + return -ENODEV; + + rdev = cfg80211_get_dev_from_ifindex(sock_net(skb->sk), ifidx); + if (IS_ERR(rdev)) { + err = PTR_ERR(rdev); + goto out_put_netdev; + } + + wdev = dev->ieee80211_ptr; + + wdev_lock(wdev); + spin_lock_bh(&rdev->bss_lock); + cfg80211_bss_expire(rdev); + + list_for_each_entry(scan, &rdev->bss_list, list) { + if (++idx <= start) + continue; + if (nl80211_send_bss(skb, + NETLINK_CB(cb->skb).pid, + cb->nlh->nlmsg_seq, NLM_F_MULTI, + rdev, wdev, scan) < 0) { + idx--; + goto out; + } + } + + out: + spin_unlock_bh(&rdev->bss_lock); + wdev_unlock(wdev); + + cb->args[1] = idx; + err = skb->len; + cfg80211_unlock_rdev(rdev); + out_put_netdev: + dev_put(dev); + + return err; +} + +static int nl80211_send_survey(struct sk_buff *msg, u32 pid, u32 seq, + int flags, struct net_device *dev, + struct survey_info *survey) +{ + void *hdr; + struct nlattr *infoattr; + + /* Survey without a channel doesn't make sense */ + if (!survey->channel) + return -EINVAL; + + hdr = nl80211hdr_put(msg, pid, seq, flags, + NL80211_CMD_NEW_SURVEY_RESULTS); + if (!hdr) + return -ENOMEM; + + NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); + + infoattr = nla_nest_start(msg, NL80211_ATTR_SURVEY_INFO); + if (!infoattr) + goto nla_put_failure; + + NLA_PUT_U32(msg, NL80211_SURVEY_INFO_FREQUENCY, + survey->channel->center_freq); + if (survey->filled & SURVEY_INFO_NOISE_DBM) + NLA_PUT_U8(msg, NL80211_SURVEY_INFO_NOISE, + survey->noise); + + nla_nest_end(msg, infoattr); + + return genlmsg_end(msg, hdr); + + nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +static int nl80211_dump_survey(struct sk_buff *skb, + struct netlink_callback *cb) +{ + struct survey_info survey; + struct cfg80211_registered_device *dev; + struct net_device *netdev; + int ifidx = cb->args[0]; + int survey_idx = cb->args[1]; + int res; + + if (!ifidx) + ifidx = nl80211_get_ifidx(cb); + if (ifidx < 0) + return ifidx; + cb->args[0] = ifidx; + + rtnl_lock(); + + netdev = __dev_get_by_index(sock_net(skb->sk), ifidx); + if (!netdev) { + res = -ENODEV; + goto out_rtnl; + } + + dev = cfg80211_get_dev_from_ifindex(sock_net(skb->sk), ifidx); + if (IS_ERR(dev)) { + res = PTR_ERR(dev); + goto out_rtnl; + } + + if (!dev->ops->dump_survey) { + res = -EOPNOTSUPP; + goto out_err; + } + + while (1) { + res = dev->ops->dump_survey(&dev->wiphy, netdev, survey_idx, + &survey); + if (res == -ENOENT) + break; + if (res) + goto out_err; + + if (nl80211_send_survey(skb, + NETLINK_CB(cb->skb).pid, + cb->nlh->nlmsg_seq, NLM_F_MULTI, + netdev, + &survey) < 0) + goto out; + survey_idx++; + } + + out: + cb->args[1] = survey_idx; + res = skb->len; + out_err: + cfg80211_unlock_rdev(dev); + out_rtnl: + rtnl_unlock(); + + return res; +} + +static bool nl80211_valid_auth_type(enum nl80211_auth_type auth_type) +{ + return auth_type <= NL80211_AUTHTYPE_MAX; +} + +static bool nl80211_valid_wpa_versions(u32 wpa_versions) +{ + return !(wpa_versions & ~(NL80211_WPA_VERSION_1 | + NL80211_WPA_VERSION_2)); +} + +static bool nl80211_valid_akm_suite(u32 akm) +{ + return akm == WLAN_AKM_SUITE_8021X || + akm == WLAN_AKM_SUITE_PSK; +} + +static bool nl80211_valid_cipher_suite(u32 cipher) +{ + return cipher == WLAN_CIPHER_SUITE_WEP40 || + cipher == WLAN_CIPHER_SUITE_WEP104 || + cipher == WLAN_CIPHER_SUITE_TKIP || + cipher == WLAN_CIPHER_SUITE_CCMP || + cipher == WLAN_CIPHER_SUITE_AES_CMAC; +} + + +static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + struct net_device *dev; + struct ieee80211_channel *chan; + const u8 *bssid, *ssid, *ie = NULL; + int err, ssid_len, ie_len = 0; + enum nl80211_auth_type auth_type; + struct key_parse key; + + if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) + return -EINVAL; + + if (!info->attrs[NL80211_ATTR_MAC]) + return -EINVAL; + + if (!info->attrs[NL80211_ATTR_AUTH_TYPE]) + return -EINVAL; + + if (!info->attrs[NL80211_ATTR_SSID]) + return -EINVAL; + + if (!info->attrs[NL80211_ATTR_WIPHY_FREQ]) + return -EINVAL; + + err = nl80211_parse_key(info, &key); + if (err) + return err; + + if (key.idx >= 0) { + if (!key.p.key || !key.p.key_len) + return -EINVAL; + if ((key.p.cipher != WLAN_CIPHER_SUITE_WEP40 || + key.p.key_len != WLAN_KEY_LEN_WEP40) && + (key.p.cipher != WLAN_CIPHER_SUITE_WEP104 || + key.p.key_len != WLAN_KEY_LEN_WEP104)) + return -EINVAL; + if (key.idx > 4) + return -EINVAL; + } else { + key.p.key_len = 0; + key.p.key = NULL; + } + + rtnl_lock(); + + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); + if (err) + goto unlock_rtnl; + + if (!rdev->ops->auth) { + err = -EOPNOTSUPP; + goto out; + } + + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { + err = -EOPNOTSUPP; + goto out; + } + + if (!netif_running(dev)) { + err = -ENETDOWN; + goto out; + } + + bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); + chan = ieee80211_get_channel(&rdev->wiphy, + nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ])); + if (!chan || (chan->flags & IEEE80211_CHAN_DISABLED)) { + err = -EINVAL; + goto out; + } + + ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); + ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); + + if (info->attrs[NL80211_ATTR_IE]) { + ie = nla_data(info->attrs[NL80211_ATTR_IE]); + ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); + } + + auth_type = nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]); + if (!nl80211_valid_auth_type(auth_type)) { + err = -EINVAL; + goto out; + } + + err = cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid, + ssid, ssid_len, ie, ie_len, + key.p.key, key.p.key_len, key.idx); + +out: + cfg80211_unlock_rdev(rdev); + dev_put(dev); +unlock_rtnl: + rtnl_unlock(); + return err; +} + +static int nl80211_crypto_settings(struct genl_info *info, + struct cfg80211_crypto_settings *settings, + int cipher_limit) +{ + memset(settings, 0, sizeof(*settings)); + + settings->control_port = info->attrs[NL80211_ATTR_CONTROL_PORT]; + + if (info->attrs[NL80211_ATTR_CIPHER_SUITES_PAIRWISE]) { + void *data; + int len, i; + + data = nla_data(info->attrs[NL80211_ATTR_CIPHER_SUITES_PAIRWISE]); + len = nla_len(info->attrs[NL80211_ATTR_CIPHER_SUITES_PAIRWISE]); + settings->n_ciphers_pairwise = len / sizeof(u32); + + if (len % sizeof(u32)) + return -EINVAL; + + if (settings->n_ciphers_pairwise > cipher_limit) + return -EINVAL; + + memcpy(settings->ciphers_pairwise, data, len); + + for (i = 0; i < settings->n_ciphers_pairwise; i++) + if (!nl80211_valid_cipher_suite( + settings->ciphers_pairwise[i])) + return -EINVAL; + } + + if (info->attrs[NL80211_ATTR_CIPHER_SUITE_GROUP]) { + settings->cipher_group = + nla_get_u32(info->attrs[NL80211_ATTR_CIPHER_SUITE_GROUP]); + if (!nl80211_valid_cipher_suite(settings->cipher_group)) + return -EINVAL; + } + + if (info->attrs[NL80211_ATTR_WPA_VERSIONS]) { + settings->wpa_versions = + nla_get_u32(info->attrs[NL80211_ATTR_WPA_VERSIONS]); + if (!nl80211_valid_wpa_versions(settings->wpa_versions)) + return -EINVAL; + } + + if (info->attrs[NL80211_ATTR_AKM_SUITES]) { + void *data; + int len, i; + + data = nla_data(info->attrs[NL80211_ATTR_AKM_SUITES]); + len = nla_len(info->attrs[NL80211_ATTR_AKM_SUITES]); + settings->n_akm_suites = len / sizeof(u32); + + if (len % sizeof(u32)) + return -EINVAL; + + memcpy(settings->akm_suites, data, len); + + for (i = 0; i < settings->n_ciphers_pairwise; i++) + if (!nl80211_valid_akm_suite(settings->akm_suites[i])) + return -EINVAL; + } + + return 0; +} + +static int nl80211_associate(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + struct net_device *dev; + struct wireless_dev *wdev; + struct cfg80211_crypto_settings crypto; + struct ieee80211_channel *chan, *fixedchan; + const u8 *bssid, *ssid, *ie = NULL, *prev_bssid = NULL; + int err, ssid_len, ie_len = 0; + bool use_mfp = false; + + if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) + return -EINVAL; + + if (!info->attrs[NL80211_ATTR_MAC] || + !info->attrs[NL80211_ATTR_SSID] || + !info->attrs[NL80211_ATTR_WIPHY_FREQ]) + return -EINVAL; + + rtnl_lock(); + + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); + if (err) + goto unlock_rtnl; + + if (!rdev->ops->assoc) { + err = -EOPNOTSUPP; + goto out; + } + + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { + err = -EOPNOTSUPP; + goto out; + } + + if (!netif_running(dev)) { + err = -ENETDOWN; + goto out; + } + + bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); + + chan = ieee80211_get_channel(&rdev->wiphy, + nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ])); + if (!chan || (chan->flags & IEEE80211_CHAN_DISABLED)) { + err = -EINVAL; + goto out; + } + + mutex_lock(&rdev->devlist_mtx); + wdev = dev->ieee80211_ptr; + fixedchan = rdev_fixed_channel(rdev, wdev); + if (fixedchan && chan != fixedchan) { + err = -EBUSY; + mutex_unlock(&rdev->devlist_mtx); + goto out; + } + mutex_unlock(&rdev->devlist_mtx); + + ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); + ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); + + if (info->attrs[NL80211_ATTR_IE]) { + ie = nla_data(info->attrs[NL80211_ATTR_IE]); + ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); + } + + if (info->attrs[NL80211_ATTR_USE_MFP]) { + enum nl80211_mfp mfp = + nla_get_u32(info->attrs[NL80211_ATTR_USE_MFP]); + if (mfp == NL80211_MFP_REQUIRED) + use_mfp = true; + else if (mfp != NL80211_MFP_NO) { + err = -EINVAL; + goto out; + } + } + + if (info->attrs[NL80211_ATTR_PREV_BSSID]) + prev_bssid = nla_data(info->attrs[NL80211_ATTR_PREV_BSSID]); + + err = nl80211_crypto_settings(info, &crypto, 1); + if (!err) + err = cfg80211_mlme_assoc(rdev, dev, chan, bssid, prev_bssid, + ssid, ssid_len, ie, ie_len, use_mfp, + &crypto); + +out: + cfg80211_unlock_rdev(rdev); + dev_put(dev); +unlock_rtnl: + rtnl_unlock(); + return err; +} + +static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + struct net_device *dev; + const u8 *ie = NULL, *bssid; + int err, ie_len = 0; + u16 reason_code; + + if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) + return -EINVAL; + + if (!info->attrs[NL80211_ATTR_MAC]) + return -EINVAL; + + if (!info->attrs[NL80211_ATTR_REASON_CODE]) + return -EINVAL; + + rtnl_lock(); + + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); + if (err) + goto unlock_rtnl; + + if (!rdev->ops->deauth) { + err = -EOPNOTSUPP; + goto out; + } + + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { + err = -EOPNOTSUPP; + goto out; + } + + if (!netif_running(dev)) { + err = -ENETDOWN; + goto out; + } + + bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); + + reason_code = nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]); + if (reason_code == 0) { + /* Reason Code 0 is reserved */ + err = -EINVAL; + goto out; + } + + if (info->attrs[NL80211_ATTR_IE]) { + ie = nla_data(info->attrs[NL80211_ATTR_IE]); + ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); + } + + err = cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason_code); + +out: + cfg80211_unlock_rdev(rdev); + dev_put(dev); +unlock_rtnl: + rtnl_unlock(); + return err; +} + +static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + struct net_device *dev; + const u8 *ie = NULL, *bssid; + int err, ie_len = 0; + u16 reason_code; + + if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) + return -EINVAL; + + if (!info->attrs[NL80211_ATTR_MAC]) + return -EINVAL; + + if (!info->attrs[NL80211_ATTR_REASON_CODE]) + return -EINVAL; + + rtnl_lock(); + + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); + if (err) + goto unlock_rtnl; + + if (!rdev->ops->disassoc) { + err = -EOPNOTSUPP; + goto out; + } + + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { + err = -EOPNOTSUPP; + goto out; + } + + if (!netif_running(dev)) { + err = -ENETDOWN; + goto out; + } + + bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); + + reason_code = nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]); + if (reason_code == 0) { + /* Reason Code 0 is reserved */ + err = -EINVAL; + goto out; + } + + if (info->attrs[NL80211_ATTR_IE]) { + ie = nla_data(info->attrs[NL80211_ATTR_IE]); + ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); + } + + err = cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason_code); + +out: + cfg80211_unlock_rdev(rdev); + dev_put(dev); +unlock_rtnl: + rtnl_unlock(); + return err; +} + +static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + struct net_device *dev; + struct cfg80211_ibss_params ibss; + struct wiphy *wiphy; + struct cfg80211_cached_keys *connkeys = NULL; + int err; + + memset(&ibss, 0, sizeof(ibss)); + + if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) + return -EINVAL; + + if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] || + !info->attrs[NL80211_ATTR_SSID] || + !nla_len(info->attrs[NL80211_ATTR_SSID])) + return -EINVAL; + + ibss.beacon_interval = 100; + + if (info->attrs[NL80211_ATTR_BEACON_INTERVAL]) { + ibss.beacon_interval = + nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]); + if (ibss.beacon_interval < 1 || ibss.beacon_interval > 10000) + return -EINVAL; + } + + rtnl_lock(); + + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); + if (err) + goto unlock_rtnl; + + if (!rdev->ops->join_ibss) { + err = -EOPNOTSUPP; + goto out; + } + + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC) { + err = -EOPNOTSUPP; + goto out; + } + + if (!netif_running(dev)) { + err = -ENETDOWN; + goto out; + } + + wiphy = &rdev->wiphy; + + if (info->attrs[NL80211_ATTR_MAC]) + ibss.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); + ibss.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); + ibss.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); + + if (info->attrs[NL80211_ATTR_IE]) { + ibss.ie = nla_data(info->attrs[NL80211_ATTR_IE]); + ibss.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); + } + + ibss.channel = ieee80211_get_channel(wiphy, + nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ])); + if (!ibss.channel || + ibss.channel->flags & IEEE80211_CHAN_NO_IBSS || + ibss.channel->flags & IEEE80211_CHAN_DISABLED) { + err = -EINVAL; + goto out; + } + + ibss.channel_fixed = !!info->attrs[NL80211_ATTR_FREQ_FIXED]; + ibss.privacy = !!info->attrs[NL80211_ATTR_PRIVACY]; + + if (ibss.privacy && info->attrs[NL80211_ATTR_KEYS]) { + connkeys = nl80211_parse_connkeys(rdev, + info->attrs[NL80211_ATTR_KEYS]); + if (IS_ERR(connkeys)) { + err = PTR_ERR(connkeys); + connkeys = NULL; + goto out; + } + } + + err = cfg80211_join_ibss(rdev, dev, &ibss, connkeys); + +out: + cfg80211_unlock_rdev(rdev); + dev_put(dev); +unlock_rtnl: + if (err) + kfree(connkeys); + rtnl_unlock(); + return err; +} + +static int nl80211_leave_ibss(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + struct net_device *dev; + int err; + + rtnl_lock(); + + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); + if (err) + goto unlock_rtnl; + + if (!rdev->ops->leave_ibss) { + err = -EOPNOTSUPP; + goto out; + } + + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC) { + err = -EOPNOTSUPP; + goto out; + } + + if (!netif_running(dev)) { + err = -ENETDOWN; + goto out; + } + + err = cfg80211_leave_ibss(rdev, dev, false); + +out: + cfg80211_unlock_rdev(rdev); + dev_put(dev); +unlock_rtnl: + rtnl_unlock(); + return err; +} + +#ifdef CONFIG_NL80211_TESTMODE +static struct genl_multicast_group nl80211_testmode_mcgrp = { + .name = "testmode", +}; + +static int nl80211_testmode_do(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + int err; + + if (!info->attrs[NL80211_ATTR_TESTDATA]) + return -EINVAL; + + rtnl_lock(); + + rdev = cfg80211_get_dev_from_info(info); + if (IS_ERR(rdev)) { + err = PTR_ERR(rdev); + goto unlock_rtnl; + } + + err = -EOPNOTSUPP; + if (rdev->ops->testmode_cmd) { + rdev->testmode_info = info; + err = rdev->ops->testmode_cmd(&rdev->wiphy, + nla_data(info->attrs[NL80211_ATTR_TESTDATA]), + nla_len(info->attrs[NL80211_ATTR_TESTDATA])); + rdev->testmode_info = NULL; + } + + cfg80211_unlock_rdev(rdev); + + unlock_rtnl: + rtnl_unlock(); + return err; +} + +static struct sk_buff * +__cfg80211_testmode_alloc_skb(struct cfg80211_registered_device *rdev, + int approxlen, u32 pid, u32 seq, gfp_t gfp) +{ + struct sk_buff *skb; + void *hdr; + struct nlattr *data; + + skb = nlmsg_new(approxlen + 100, gfp); + if (!skb) + return NULL; + + hdr = nl80211hdr_put(skb, pid, seq, 0, NL80211_CMD_TESTMODE); + if (!hdr) { + kfree_skb(skb); + return NULL; + } + + NLA_PUT_U32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx); + data = nla_nest_start(skb, NL80211_ATTR_TESTDATA); + + ((void **)skb->cb)[0] = rdev; + ((void **)skb->cb)[1] = hdr; + ((void **)skb->cb)[2] = data; + + return skb; + + nla_put_failure: + kfree_skb(skb); + return NULL; +} + +struct sk_buff *cfg80211_testmode_alloc_reply_skb(struct wiphy *wiphy, + int approxlen) +{ + struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + + if (WARN_ON(!rdev->testmode_info)) + return NULL; + + return __cfg80211_testmode_alloc_skb(rdev, approxlen, + rdev->testmode_info->snd_pid, + rdev->testmode_info->snd_seq, + GFP_KERNEL); +} +EXPORT_SYMBOL(cfg80211_testmode_alloc_reply_skb); + +int cfg80211_testmode_reply(struct sk_buff *skb) +{ + struct cfg80211_registered_device *rdev = ((void **)skb->cb)[0]; + void *hdr = ((void **)skb->cb)[1]; + struct nlattr *data = ((void **)skb->cb)[2]; + + if (WARN_ON(!rdev->testmode_info)) { + kfree_skb(skb); + return -EINVAL; + } + + nla_nest_end(skb, data); + genlmsg_end(skb, hdr); + return genlmsg_reply(skb, rdev->testmode_info); +} +EXPORT_SYMBOL(cfg80211_testmode_reply); + +struct sk_buff *cfg80211_testmode_alloc_event_skb(struct wiphy *wiphy, + int approxlen, gfp_t gfp) +{ + struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + + return __cfg80211_testmode_alloc_skb(rdev, approxlen, 0, 0, gfp); +} +EXPORT_SYMBOL(cfg80211_testmode_alloc_event_skb); + +void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp) +{ + void *hdr = ((void **)skb->cb)[1]; + struct nlattr *data = ((void **)skb->cb)[2]; + + nla_nest_end(skb, data); + genlmsg_end(skb, hdr); + genlmsg_multicast(skb, 0, nl80211_testmode_mcgrp.id, gfp); +} +EXPORT_SYMBOL(cfg80211_testmode_event); +#endif + +static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + struct net_device *dev; + struct cfg80211_connect_params connect; + struct wiphy *wiphy; + struct cfg80211_cached_keys *connkeys = NULL; + int err; + + memset(&connect, 0, sizeof(connect)); + + if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) + return -EINVAL; + + if (!info->attrs[NL80211_ATTR_SSID] || + !nla_len(info->attrs[NL80211_ATTR_SSID])) + return -EINVAL; + + if (info->attrs[NL80211_ATTR_AUTH_TYPE]) { + connect.auth_type = + nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]); + if (!nl80211_valid_auth_type(connect.auth_type)) + return -EINVAL; + } else + connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; + + connect.privacy = info->attrs[NL80211_ATTR_PRIVACY]; + + err = nl80211_crypto_settings(info, &connect.crypto, + NL80211_MAX_NR_CIPHER_SUITES); + if (err) + return err; + rtnl_lock(); + + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); + if (err) + goto unlock_rtnl; + + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { + err = -EOPNOTSUPP; + goto out; + } + + if (!netif_running(dev)) { + err = -ENETDOWN; + goto out; + } + + wiphy = &rdev->wiphy; + + if (info->attrs[NL80211_ATTR_MAC]) + connect.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); + connect.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); + connect.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); + + if (info->attrs[NL80211_ATTR_IE]) { + connect.ie = nla_data(info->attrs[NL80211_ATTR_IE]); + connect.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); + } + + if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { + connect.channel = + ieee80211_get_channel(wiphy, + nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ])); + if (!connect.channel || + connect.channel->flags & IEEE80211_CHAN_DISABLED) { + err = -EINVAL; + goto out; + } + } + + if (connect.privacy && info->attrs[NL80211_ATTR_KEYS]) { + connkeys = nl80211_parse_connkeys(rdev, + info->attrs[NL80211_ATTR_KEYS]); + if (IS_ERR(connkeys)) { + err = PTR_ERR(connkeys); + connkeys = NULL; + goto out; + } + } + + err = cfg80211_connect(rdev, dev, &connect, connkeys); + +out: + cfg80211_unlock_rdev(rdev); + dev_put(dev); +unlock_rtnl: + if (err) + kfree(connkeys); + rtnl_unlock(); + return err; +} + +static int nl80211_disconnect(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + struct net_device *dev; + int err; + u16 reason; + + if (!info->attrs[NL80211_ATTR_REASON_CODE]) + reason = WLAN_REASON_DEAUTH_LEAVING; + else + reason = nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]); + + if (reason == 0) + return -EINVAL; + + rtnl_lock(); + + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); + if (err) + goto unlock_rtnl; + + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { + err = -EOPNOTSUPP; + goto out; + } + + if (!netif_running(dev)) { + err = -ENETDOWN; + goto out; + } + + err = cfg80211_disconnect(rdev, dev, reason, true); + +out: + cfg80211_unlock_rdev(rdev); + dev_put(dev); +unlock_rtnl: + rtnl_unlock(); + return err; +} + +static int nl80211_wiphy_netns(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + struct net *net; + int err; + u32 pid; + + if (!info->attrs[NL80211_ATTR_PID]) + return -EINVAL; + + pid = nla_get_u32(info->attrs[NL80211_ATTR_PID]); + + rtnl_lock(); + + rdev = cfg80211_get_dev_from_info(info); + if (IS_ERR(rdev)) { + err = PTR_ERR(rdev); + goto out_rtnl; + } + + net = get_net_ns_by_pid(pid); + if (IS_ERR(net)) { + err = PTR_ERR(net); + goto out; + } + + err = 0; + + /* check if anything to do */ + if (net_eq(wiphy_net(&rdev->wiphy), net)) + goto out_put_net; + + err = cfg80211_switch_netns(rdev, net); + out_put_net: +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) + put_net(net); +#endif + out: + cfg80211_unlock_rdev(rdev); + out_rtnl: + rtnl_unlock(); + return err; +} + +static int nl80211_setdel_pmksa(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + int (*rdev_ops)(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_pmksa *pmksa) = NULL; + int err; + struct net_device *dev; + struct cfg80211_pmksa pmksa; + + memset(&pmksa, 0, sizeof(struct cfg80211_pmksa)); + + if (!info->attrs[NL80211_ATTR_MAC]) + return -EINVAL; + + if (!info->attrs[NL80211_ATTR_PMKID]) + return -EINVAL; + + rtnl_lock(); + + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); + if (err) + goto out_rtnl; + + pmksa.pmkid = nla_data(info->attrs[NL80211_ATTR_PMKID]); + pmksa.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); + + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { + err = -EOPNOTSUPP; + goto out; + } + + switch (info->genlhdr->cmd) { + case NL80211_CMD_SET_PMKSA: + rdev_ops = rdev->ops->set_pmksa; + break; + case NL80211_CMD_DEL_PMKSA: + rdev_ops = rdev->ops->del_pmksa; + break; + default: + WARN_ON(1); + break; + } + + if (!rdev_ops) { + err = -EOPNOTSUPP; + goto out; + } + + err = rdev_ops(&rdev->wiphy, dev, &pmksa); + + out: + cfg80211_unlock_rdev(rdev); + dev_put(dev); + out_rtnl: + rtnl_unlock(); + + return err; +} + +static int nl80211_flush_pmksa(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + int err; + struct net_device *dev; + + rtnl_lock(); + + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); + if (err) + goto out_rtnl; + + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { + err = -EOPNOTSUPP; + goto out; + } + + if (!rdev->ops->flush_pmksa) { + err = -EOPNOTSUPP; + goto out; + } + + err = rdev->ops->flush_pmksa(&rdev->wiphy, dev); + + out: + cfg80211_unlock_rdev(rdev); + dev_put(dev); + out_rtnl: + rtnl_unlock(); + + return err; + +} + +static int nl80211_remain_on_channel(struct sk_buff *skb, + struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + struct net_device *dev; + struct ieee80211_channel *chan; + struct sk_buff *msg; + void *hdr; + u64 cookie; + enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; + u32 freq, duration; + int err; + + if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] || + !info->attrs[NL80211_ATTR_DURATION]) + return -EINVAL; + + duration = nla_get_u32(info->attrs[NL80211_ATTR_DURATION]); + + /* + * We should be on that channel for at least one jiffie, + * and more than 5 seconds seems excessive. + */ + if (!duration || !msecs_to_jiffies(duration) || duration > 5000) + return -EINVAL; + + rtnl_lock(); + + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); + if (err) + goto unlock_rtnl; + + if (!rdev->ops->remain_on_channel) { + err = -EOPNOTSUPP; + goto out; + } + + if (!netif_running(dev)) { + err = -ENETDOWN; + goto out; + } + + if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { + channel_type = nla_get_u32( + info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]); + if (channel_type != NL80211_CHAN_NO_HT && + channel_type != NL80211_CHAN_HT20 && + channel_type != NL80211_CHAN_HT40PLUS && + channel_type != NL80211_CHAN_HT40MINUS) + err = -EINVAL; + goto out; + } + + freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); + chan = rdev_freq_to_chan(rdev, freq, channel_type); + if (chan == NULL) { + err = -EINVAL; + goto out; + } + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) { + err = -ENOMEM; + goto out; + } + + hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0, + NL80211_CMD_REMAIN_ON_CHANNEL); + + if (IS_ERR(hdr)) { + err = PTR_ERR(hdr); + goto free_msg; + } + + err = rdev->ops->remain_on_channel(&rdev->wiphy, dev, chan, + channel_type, duration, &cookie); + + if (err) + goto free_msg; + + NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); + + genlmsg_end(msg, hdr); + err = genlmsg_reply(msg, info); + goto out; + + nla_put_failure: + err = -ENOBUFS; + free_msg: + nlmsg_free(msg); + out: + cfg80211_unlock_rdev(rdev); + dev_put(dev); + unlock_rtnl: + rtnl_unlock(); + return err; +} + +static int nl80211_cancel_remain_on_channel(struct sk_buff *skb, + struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + struct net_device *dev; + u64 cookie; + int err; + + if (!info->attrs[NL80211_ATTR_COOKIE]) + return -EINVAL; + + rtnl_lock(); + + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); + if (err) + goto unlock_rtnl; + + if (!rdev->ops->cancel_remain_on_channel) { + err = -EOPNOTSUPP; + goto out; + } + + if (!netif_running(dev)) { + err = -ENETDOWN; + goto out; + } + + cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]); + + err = rdev->ops->cancel_remain_on_channel(&rdev->wiphy, dev, cookie); + + out: + cfg80211_unlock_rdev(rdev); + dev_put(dev); + unlock_rtnl: + rtnl_unlock(); + return err; +} + +static u32 rateset_to_mask(struct ieee80211_supported_band *sband, + u8 *rates, u8 rates_len) +{ + u8 i; + u32 mask = 0; + + for (i = 0; i < rates_len; i++) { + int rate = (rates[i] & 0x7f) * 5; + int ridx; + for (ridx = 0; ridx < sband->n_bitrates; ridx++) { + struct ieee80211_rate *srate = + &sband->bitrates[ridx]; + if (rate == srate->bitrate) { + mask |= 1 << ridx; + break; + } + } + if (ridx == sband->n_bitrates) + return 0; /* rate not found */ + } + + return mask; +} + +static const struct nla_policy nl80211_txattr_policy[NL80211_TXRATE_MAX + 1] = { + [NL80211_TXRATE_LEGACY] = { .type = NLA_BINARY, + .len = NL80211_MAX_SUPP_RATES }, +}; + +static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb, + struct genl_info *info) +{ + struct nlattr *tb[NL80211_TXRATE_MAX + 1]; + struct cfg80211_registered_device *rdev; + struct cfg80211_bitrate_mask mask; + int err, rem, i; + struct net_device *dev; + struct nlattr *tx_rates; + struct ieee80211_supported_band *sband; + + if (info->attrs[NL80211_ATTR_TX_RATES] == NULL) + return -EINVAL; + + rtnl_lock(); + + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); + if (err) + goto unlock_rtnl; + + if (!rdev->ops->set_bitrate_mask) { + err = -EOPNOTSUPP; + goto unlock; + } + + memset(&mask, 0, sizeof(mask)); + /* Default to all rates enabled */ + for (i = 0; i < IEEE80211_NUM_BANDS; i++) { + sband = rdev->wiphy.bands[i]; + mask.control[i].legacy = + sband ? (1 << sband->n_bitrates) - 1 : 0; + } + + /* + * The nested attribute uses enum nl80211_band as the index. This maps + * directly to the enum ieee80211_band values used in cfg80211. + */ + nla_for_each_nested(tx_rates, info->attrs[NL80211_ATTR_TX_RATES], rem) + { + enum ieee80211_band band = nla_type(tx_rates); + if (band < 0 || band >= IEEE80211_NUM_BANDS) { + err = -EINVAL; + goto unlock; + } + sband = rdev->wiphy.bands[band]; + if (sband == NULL) { + err = -EINVAL; + goto unlock; + } + nla_parse(tb, NL80211_TXRATE_MAX, nla_data(tx_rates), + nla_len(tx_rates), nl80211_txattr_policy); + if (tb[NL80211_TXRATE_LEGACY]) { + mask.control[band].legacy = rateset_to_mask( + sband, + nla_data(tb[NL80211_TXRATE_LEGACY]), + nla_len(tb[NL80211_TXRATE_LEGACY])); + if (mask.control[band].legacy == 0) { + err = -EINVAL; + goto unlock; + } + } + } + + err = rdev->ops->set_bitrate_mask(&rdev->wiphy, dev, NULL, &mask); + + unlock: + dev_put(dev); + cfg80211_unlock_rdev(rdev); + unlock_rtnl: + rtnl_unlock(); + return err; +} + +static int nl80211_register_action(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + struct net_device *dev; + int err; + + if (!info->attrs[NL80211_ATTR_FRAME_MATCH]) + return -EINVAL; + + if (nla_len(info->attrs[NL80211_ATTR_FRAME_MATCH]) < 1) + return -EINVAL; + + rtnl_lock(); + + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); + if (err) + goto unlock_rtnl; + + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { + err = -EOPNOTSUPP; + goto out; + } + + /* not much point in registering if we can't reply */ + if (!rdev->ops->action) { + err = -EOPNOTSUPP; + goto out; + } + + err = cfg80211_mlme_register_action(dev->ieee80211_ptr, info->snd_pid, + nla_data(info->attrs[NL80211_ATTR_FRAME_MATCH]), + nla_len(info->attrs[NL80211_ATTR_FRAME_MATCH])); + out: + cfg80211_unlock_rdev(rdev); + dev_put(dev); + unlock_rtnl: + rtnl_unlock(); + return err; +} + +static int nl80211_action(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + struct net_device *dev; + struct ieee80211_channel *chan; + enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; + u32 freq; + int err; + void *hdr; + u64 cookie; + struct sk_buff *msg; + + if (!info->attrs[NL80211_ATTR_FRAME] || + !info->attrs[NL80211_ATTR_WIPHY_FREQ]) + return -EINVAL; + + rtnl_lock(); + + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); + if (err) + goto unlock_rtnl; + + if (!rdev->ops->action) { + err = -EOPNOTSUPP; + goto out; + } + + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { + err = -EOPNOTSUPP; + goto out; + } + + if (!netif_running(dev)) { + err = -ENETDOWN; + goto out; + } + + if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { + channel_type = nla_get_u32( + info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]); + if (channel_type != NL80211_CHAN_NO_HT && + channel_type != NL80211_CHAN_HT20 && + channel_type != NL80211_CHAN_HT40PLUS && + channel_type != NL80211_CHAN_HT40MINUS) + err = -EINVAL; + goto out; + } + + freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); + chan = rdev_freq_to_chan(rdev, freq, channel_type); + if (chan == NULL) { + err = -EINVAL; + goto out; + } + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) { + err = -ENOMEM; + goto out; + } + + hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0, + NL80211_CMD_ACTION); + + if (IS_ERR(hdr)) { + err = PTR_ERR(hdr); + goto free_msg; + } + err = cfg80211_mlme_action(rdev, dev, chan, channel_type, + nla_data(info->attrs[NL80211_ATTR_FRAME]), + nla_len(info->attrs[NL80211_ATTR_FRAME]), + &cookie); + if (err) + goto free_msg; + + NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); + + genlmsg_end(msg, hdr); + err = genlmsg_reply(msg, info); + goto out; + + nla_put_failure: + err = -ENOBUFS; + free_msg: + nlmsg_free(msg); + out: + cfg80211_unlock_rdev(rdev); + dev_put(dev); +unlock_rtnl: + rtnl_unlock(); + return err; +} + +static int nl80211_set_power_save(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + struct wireless_dev *wdev; + struct net_device *dev; + u8 ps_state; + bool state; + int err; + + if (!info->attrs[NL80211_ATTR_PS_STATE]) { + err = -EINVAL; + goto out; + } + + ps_state = nla_get_u32(info->attrs[NL80211_ATTR_PS_STATE]); + + if (ps_state != NL80211_PS_DISABLED && ps_state != NL80211_PS_ENABLED) { + err = -EINVAL; + goto out; + } + + rtnl_lock(); + + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); + if (err) + goto unlock_rdev; + + wdev = dev->ieee80211_ptr; + + if (!rdev->ops->set_power_mgmt) { + err = -EOPNOTSUPP; + goto unlock_rdev; + } + + state = (ps_state == NL80211_PS_ENABLED) ? true : false; + + if (state == wdev->ps) + goto unlock_rdev; + + wdev->ps = state; + + if (rdev->ops->set_power_mgmt(wdev->wiphy, dev, wdev->ps, + wdev->ps_timeout)) + /* assume this means it's off */ + wdev->ps = false; + +unlock_rdev: + cfg80211_unlock_rdev(rdev); + dev_put(dev); + rtnl_unlock(); + +out: + return err; +} + +static int nl80211_get_power_save(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + enum nl80211_ps_state ps_state; + struct wireless_dev *wdev; + struct net_device *dev; + struct sk_buff *msg; + void *hdr; + int err; + + rtnl_lock(); + + err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); + if (err) + goto unlock_rtnl; + + wdev = dev->ieee80211_ptr; + + if (!rdev->ops->set_power_mgmt) { + err = -EOPNOTSUPP; + goto out; + } + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) { + err = -ENOMEM; + goto out; + } + + hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0, + NL80211_CMD_GET_POWER_SAVE); + if (!hdr) { + err = -ENOMEM; + goto free_msg; + } + + if (wdev->ps) + ps_state = NL80211_PS_ENABLED; + else + ps_state = NL80211_PS_DISABLED; + + NLA_PUT_U32(msg, NL80211_ATTR_PS_STATE, ps_state); + + genlmsg_end(msg, hdr); + err = genlmsg_reply(msg, info); + goto out; + +nla_put_failure: + err = -ENOBUFS; + +free_msg: + nlmsg_free(msg); + +out: + cfg80211_unlock_rdev(rdev); + dev_put(dev); + +unlock_rtnl: + rtnl_unlock(); + + return err; +} + +static struct genl_ops nl80211_ops[] = { + { + .cmd = NL80211_CMD_GET_WIPHY, + .doit = nl80211_get_wiphy, + .dumpit = nl80211_dump_wiphy, + .policy = nl80211_policy, + /* can be retrieved by unprivileged users */ + }, + { + .cmd = NL80211_CMD_SET_WIPHY, + .doit = nl80211_set_wiphy, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_GET_INTERFACE, + .doit = nl80211_get_interface, + .dumpit = nl80211_dump_interface, + .policy = nl80211_policy, + /* can be retrieved by unprivileged users */ + }, + { + .cmd = NL80211_CMD_SET_INTERFACE, + .doit = nl80211_set_interface, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_NEW_INTERFACE, + .doit = nl80211_new_interface, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_DEL_INTERFACE, + .doit = nl80211_del_interface, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_GET_KEY, + .doit = nl80211_get_key, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_SET_KEY, + .doit = nl80211_set_key, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_NEW_KEY, + .doit = nl80211_new_key, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_DEL_KEY, + .doit = nl80211_del_key, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_SET_BEACON, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + .doit = nl80211_addset_beacon, + }, + { + .cmd = NL80211_CMD_NEW_BEACON, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + .doit = nl80211_addset_beacon, + }, + { + .cmd = NL80211_CMD_DEL_BEACON, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + .doit = nl80211_del_beacon, + }, + { + .cmd = NL80211_CMD_GET_STATION, + .doit = nl80211_get_station, + .dumpit = nl80211_dump_station, + .policy = nl80211_policy, + }, + { + .cmd = NL80211_CMD_SET_STATION, + .doit = nl80211_set_station, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_NEW_STATION, + .doit = nl80211_new_station, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_DEL_STATION, + .doit = nl80211_del_station, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_GET_MPATH, + .doit = nl80211_get_mpath, + .dumpit = nl80211_dump_mpath, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_SET_MPATH, + .doit = nl80211_set_mpath, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_NEW_MPATH, + .doit = nl80211_new_mpath, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_DEL_MPATH, + .doit = nl80211_del_mpath, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_SET_BSS, + .doit = nl80211_set_bss, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_GET_REG, + .doit = nl80211_get_reg, + .policy = nl80211_policy, + /* can be retrieved by unprivileged users */ + }, + { + .cmd = NL80211_CMD_SET_REG, + .doit = nl80211_set_reg, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_REQ_SET_REG, + .doit = nl80211_req_set_reg, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_GET_MESH_PARAMS, + .doit = nl80211_get_mesh_params, + .policy = nl80211_policy, + /* can be retrieved by unprivileged users */ + }, + { + .cmd = NL80211_CMD_SET_MESH_PARAMS, + .doit = nl80211_set_mesh_params, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_TRIGGER_SCAN, + .doit = nl80211_trigger_scan, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_GET_SCAN, + .policy = nl80211_policy, + .dumpit = nl80211_dump_scan, + }, + { + .cmd = NL80211_CMD_AUTHENTICATE, + .doit = nl80211_authenticate, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_ASSOCIATE, + .doit = nl80211_associate, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_DEAUTHENTICATE, + .doit = nl80211_deauthenticate, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_DISASSOCIATE, + .doit = nl80211_disassociate, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_JOIN_IBSS, + .doit = nl80211_join_ibss, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_LEAVE_IBSS, + .doit = nl80211_leave_ibss, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, +#ifdef CONFIG_NL80211_TESTMODE + { + .cmd = NL80211_CMD_TESTMODE, + .doit = nl80211_testmode_do, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, +#endif + { + .cmd = NL80211_CMD_CONNECT, + .doit = nl80211_connect, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_DISCONNECT, + .doit = nl80211_disconnect, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_SET_WIPHY_NETNS, + .doit = nl80211_wiphy_netns, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_GET_SURVEY, + .policy = nl80211_policy, + .dumpit = nl80211_dump_survey, + }, + { + .cmd = NL80211_CMD_SET_PMKSA, + .doit = nl80211_setdel_pmksa, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_DEL_PMKSA, + .doit = nl80211_setdel_pmksa, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_FLUSH_PMKSA, + .doit = nl80211_flush_pmksa, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_REMAIN_ON_CHANNEL, + .doit = nl80211_remain_on_channel, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL, + .doit = nl80211_cancel_remain_on_channel, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_SET_TX_BITRATE_MASK, + .doit = nl80211_set_tx_bitrate_mask, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_REGISTER_ACTION, + .doit = nl80211_register_action, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_ACTION, + .doit = nl80211_action, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_SET_POWER_SAVE, + .doit = nl80211_set_power_save, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_GET_POWER_SAVE, + .doit = nl80211_get_power_save, + .policy = nl80211_policy, + /* can be retrieved by unprivileged users */ + }, +}; + +static struct genl_multicast_group nl80211_mlme_mcgrp = { + .name = "mlme", +}; + +/* multicast groups */ +static struct genl_multicast_group nl80211_config_mcgrp = { + .name = "config", +}; +static struct genl_multicast_group nl80211_scan_mcgrp = { + .name = "scan", +}; +static struct genl_multicast_group nl80211_regulatory_mcgrp = { + .name = "regulatory", +}; + +/* notification functions */ + +void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev) +{ + struct sk_buff *msg; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return; + + if (nl80211_send_wiphy(msg, 0, 0, 0, rdev) < 0) { + nlmsg_free(msg); + return; + } + + genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, + nl80211_config_mcgrp.id, GFP_KERNEL); +} + +static int nl80211_add_scan_req(struct sk_buff *msg, + struct cfg80211_registered_device *rdev) +{ + struct cfg80211_scan_request *req = rdev->scan_req; + struct nlattr *nest; + int i; + + ASSERT_RDEV_LOCK(rdev); + + if (WARN_ON(!req)) + return 0; + + nest = nla_nest_start(msg, NL80211_ATTR_SCAN_SSIDS); + if (!nest) + goto nla_put_failure; + for (i = 0; i < req->n_ssids; i++) + NLA_PUT(msg, i, req->ssids[i].ssid_len, req->ssids[i].ssid); + nla_nest_end(msg, nest); + + nest = nla_nest_start(msg, NL80211_ATTR_SCAN_FREQUENCIES); + if (!nest) + goto nla_put_failure; + for (i = 0; i < req->n_channels; i++) + NLA_PUT_U32(msg, i, req->channels[i]->center_freq); + nla_nest_end(msg, nest); + + if (req->ie) + NLA_PUT(msg, NL80211_ATTR_IE, req->ie_len, req->ie); + + return 0; + nla_put_failure: + return -ENOBUFS; +} + +static int nl80211_send_scan_msg(struct sk_buff *msg, + struct cfg80211_registered_device *rdev, + struct net_device *netdev, + u32 pid, u32 seq, int flags, + u32 cmd) +{ + void *hdr; + + hdr = nl80211hdr_put(msg, pid, seq, flags, cmd); + if (!hdr) + return -1; + + NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); + NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); + + /* ignore errors and send incomplete event anyway */ + nl80211_add_scan_req(msg, rdev); + + return genlmsg_end(msg, hdr); + + nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +void nl80211_send_scan_start(struct cfg80211_registered_device *rdev, + struct net_device *netdev) +{ + struct sk_buff *msg; + + msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!msg) + return; + + if (nl80211_send_scan_msg(msg, rdev, netdev, 0, 0, 0, + NL80211_CMD_TRIGGER_SCAN) < 0) { + nlmsg_free(msg); + return; + } + + genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, + nl80211_scan_mcgrp.id, GFP_KERNEL); +} + +void nl80211_send_scan_done(struct cfg80211_registered_device *rdev, + struct net_device *netdev) +{ + struct sk_buff *msg; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return; + + if (nl80211_send_scan_msg(msg, rdev, netdev, 0, 0, 0, + NL80211_CMD_NEW_SCAN_RESULTS) < 0) { + nlmsg_free(msg); + return; + } + + genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, + nl80211_scan_mcgrp.id, GFP_KERNEL); +} + +void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev, + struct net_device *netdev) +{ + struct sk_buff *msg; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return; + + if (nl80211_send_scan_msg(msg, rdev, netdev, 0, 0, 0, + NL80211_CMD_SCAN_ABORTED) < 0) { + nlmsg_free(msg); + return; + } + + genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, + nl80211_scan_mcgrp.id, GFP_KERNEL); +} + +/* + * This can happen on global regulatory changes or device specific settings + * based on custom world regulatory domains. + */ +void nl80211_send_reg_change_event(struct regulatory_request *request) +{ + struct sk_buff *msg; + void *hdr; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return; + + hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_REG_CHANGE); + if (!hdr) { + nlmsg_free(msg); + return; + } + + /* Userspace can always count this one always being set */ + NLA_PUT_U8(msg, NL80211_ATTR_REG_INITIATOR, request->initiator); + + if (request->alpha2[0] == '0' && request->alpha2[1] == '0') + NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE, + NL80211_REGDOM_TYPE_WORLD); + else if (request->alpha2[0] == '9' && request->alpha2[1] == '9') + NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE, + NL80211_REGDOM_TYPE_CUSTOM_WORLD); + else if ((request->alpha2[0] == '9' && request->alpha2[1] == '8') || + request->intersect) + NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE, + NL80211_REGDOM_TYPE_INTERSECTION); + else { + NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE, + NL80211_REGDOM_TYPE_COUNTRY); + NLA_PUT_STRING(msg, NL80211_ATTR_REG_ALPHA2, request->alpha2); + } + + if (wiphy_idx_valid(request->wiphy_idx)) + NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, request->wiphy_idx); + + if (genlmsg_end(msg, hdr) < 0) { + nlmsg_free(msg); + return; + } + + rcu_read_lock(); + genlmsg_multicast_allns(msg, 0, nl80211_regulatory_mcgrp.id, + GFP_ATOMIC); + rcu_read_unlock(); + + return; + +nla_put_failure: + genlmsg_cancel(msg, hdr); + nlmsg_free(msg); +} + +static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev, + struct net_device *netdev, + const u8 *buf, size_t len, + enum nl80211_commands cmd, gfp_t gfp) +{ + struct sk_buff *msg; + void *hdr; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); + if (!msg) + return; + + hdr = nl80211hdr_put(msg, 0, 0, 0, cmd); + if (!hdr) { + nlmsg_free(msg); + return; + } + + NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); + NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); + NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf); + + if (genlmsg_end(msg, hdr) < 0) { + nlmsg_free(msg); + return; + } + + genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, + nl80211_mlme_mcgrp.id, gfp); + return; + + nla_put_failure: + genlmsg_cancel(msg, hdr); + nlmsg_free(msg); +} + +void nl80211_send_rx_auth(struct cfg80211_registered_device *rdev, + struct net_device *netdev, const u8 *buf, + size_t len, gfp_t gfp) +{ + nl80211_send_mlme_event(rdev, netdev, buf, len, + NL80211_CMD_AUTHENTICATE, gfp); +} + +void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev, + struct net_device *netdev, const u8 *buf, + size_t len, gfp_t gfp) +{ + nl80211_send_mlme_event(rdev, netdev, buf, len, + NL80211_CMD_ASSOCIATE, gfp); +} + +void nl80211_send_deauth(struct cfg80211_registered_device *rdev, + struct net_device *netdev, const u8 *buf, + size_t len, gfp_t gfp) +{ + nl80211_send_mlme_event(rdev, netdev, buf, len, + NL80211_CMD_DEAUTHENTICATE, gfp); +} + +void nl80211_send_disassoc(struct cfg80211_registered_device *rdev, + struct net_device *netdev, const u8 *buf, + size_t len, gfp_t gfp) +{ + nl80211_send_mlme_event(rdev, netdev, buf, len, + NL80211_CMD_DISASSOCIATE, gfp); +} + +static void nl80211_send_mlme_timeout(struct cfg80211_registered_device *rdev, + struct net_device *netdev, int cmd, + const u8 *addr, gfp_t gfp) +{ + struct sk_buff *msg; + void *hdr; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); + if (!msg) + return; + + hdr = nl80211hdr_put(msg, 0, 0, 0, cmd); + if (!hdr) { + nlmsg_free(msg); + return; + } + + NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); + NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); + NLA_PUT_FLAG(msg, NL80211_ATTR_TIMED_OUT); + NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr); + + if (genlmsg_end(msg, hdr) < 0) { + nlmsg_free(msg); + return; + } + + genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, + nl80211_mlme_mcgrp.id, gfp); + return; + + nla_put_failure: + genlmsg_cancel(msg, hdr); + nlmsg_free(msg); +} + +void nl80211_send_auth_timeout(struct cfg80211_registered_device *rdev, + struct net_device *netdev, const u8 *addr, + gfp_t gfp) +{ + nl80211_send_mlme_timeout(rdev, netdev, NL80211_CMD_AUTHENTICATE, + addr, gfp); +} + +void nl80211_send_assoc_timeout(struct cfg80211_registered_device *rdev, + struct net_device *netdev, const u8 *addr, + gfp_t gfp) +{ + nl80211_send_mlme_timeout(rdev, netdev, NL80211_CMD_ASSOCIATE, + addr, gfp); +} + +void nl80211_send_connect_result(struct cfg80211_registered_device *rdev, + struct net_device *netdev, const u8 *bssid, + const u8 *req_ie, size_t req_ie_len, + const u8 *resp_ie, size_t resp_ie_len, + u16 status, gfp_t gfp) +{ + struct sk_buff *msg; + void *hdr; + + msg = nlmsg_new(NLMSG_GOODSIZE, gfp); + if (!msg) + return; + + hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_CONNECT); + if (!hdr) { + nlmsg_free(msg); + return; + } + + NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); + NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); + if (bssid) + NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid); + NLA_PUT_U16(msg, NL80211_ATTR_STATUS_CODE, status); + if (req_ie) + NLA_PUT(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie); + if (resp_ie) + NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie); + + if (genlmsg_end(msg, hdr) < 0) { + nlmsg_free(msg); + return; + } + + genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, + nl80211_mlme_mcgrp.id, gfp); + return; + + nla_put_failure: + genlmsg_cancel(msg, hdr); + nlmsg_free(msg); + +} + +void nl80211_send_roamed(struct cfg80211_registered_device *rdev, + struct net_device *netdev, const u8 *bssid, + const u8 *req_ie, size_t req_ie_len, + const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp) +{ + struct sk_buff *msg; + void *hdr; + + msg = nlmsg_new(NLMSG_GOODSIZE, gfp); + if (!msg) + return; + + hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_ROAM); + if (!hdr) { + nlmsg_free(msg); + return; + } + + NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); + NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); + NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid); + if (req_ie) + NLA_PUT(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie); + if (resp_ie) + NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie); + + if (genlmsg_end(msg, hdr) < 0) { + nlmsg_free(msg); + return; + } + + genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, + nl80211_mlme_mcgrp.id, gfp); + return; + + nla_put_failure: + genlmsg_cancel(msg, hdr); + nlmsg_free(msg); + +} + +void nl80211_send_disconnected(struct cfg80211_registered_device *rdev, + struct net_device *netdev, u16 reason, + const u8 *ie, size_t ie_len, bool from_ap) +{ + struct sk_buff *msg; + void *hdr; + + msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!msg) + return; + + hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_DISCONNECT); + if (!hdr) { + nlmsg_free(msg); + return; + } + + NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); + NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); + if (from_ap && reason) + NLA_PUT_U16(msg, NL80211_ATTR_REASON_CODE, reason); + if (from_ap) + NLA_PUT_FLAG(msg, NL80211_ATTR_DISCONNECTED_BY_AP); + if (ie) + NLA_PUT(msg, NL80211_ATTR_IE, ie_len, ie); + + if (genlmsg_end(msg, hdr) < 0) { + nlmsg_free(msg); + return; + } + + genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, + nl80211_mlme_mcgrp.id, GFP_KERNEL); + return; + + nla_put_failure: + genlmsg_cancel(msg, hdr); + nlmsg_free(msg); + +} + +void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev, + struct net_device *netdev, const u8 *bssid, + gfp_t gfp) +{ + struct sk_buff *msg; + void *hdr; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); + if (!msg) + return; + + hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_JOIN_IBSS); + if (!hdr) { + nlmsg_free(msg); + return; + } + + NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); + NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); + NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid); + + if (genlmsg_end(msg, hdr) < 0) { + nlmsg_free(msg); + return; + } + + genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, + nl80211_mlme_mcgrp.id, gfp); + return; + + nla_put_failure: + genlmsg_cancel(msg, hdr); + nlmsg_free(msg); +} + +void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev, + struct net_device *netdev, const u8 *addr, + enum nl80211_key_type key_type, int key_id, + const u8 *tsc, gfp_t gfp) +{ + struct sk_buff *msg; + void *hdr; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); + if (!msg) + return; + + hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_MICHAEL_MIC_FAILURE); + if (!hdr) { + nlmsg_free(msg); + return; + } + + NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); + NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); + if (addr) + NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr); + NLA_PUT_U32(msg, NL80211_ATTR_KEY_TYPE, key_type); + NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_id); + if (tsc) + NLA_PUT(msg, NL80211_ATTR_KEY_SEQ, 6, tsc); + + if (genlmsg_end(msg, hdr) < 0) { + nlmsg_free(msg); + return; + } + + genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, + nl80211_mlme_mcgrp.id, gfp); + return; + + nla_put_failure: + genlmsg_cancel(msg, hdr); + nlmsg_free(msg); +} + +void nl80211_send_beacon_hint_event(struct wiphy *wiphy, + struct ieee80211_channel *channel_before, + struct ieee80211_channel *channel_after) +{ + struct sk_buff *msg; + void *hdr; + struct nlattr *nl_freq; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); + if (!msg) + return; + + hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_REG_BEACON_HINT); + if (!hdr) { + nlmsg_free(msg); + return; + } + + /* + * Since we are applying the beacon hint to a wiphy we know its + * wiphy_idx is valid + */ + NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, get_wiphy_idx(wiphy)); + + /* Before */ + nl_freq = nla_nest_start(msg, NL80211_ATTR_FREQ_BEFORE); + if (!nl_freq) + goto nla_put_failure; + if (nl80211_msg_put_channel(msg, channel_before)) + goto nla_put_failure; + nla_nest_end(msg, nl_freq); + + /* After */ + nl_freq = nla_nest_start(msg, NL80211_ATTR_FREQ_AFTER); + if (!nl_freq) + goto nla_put_failure; + if (nl80211_msg_put_channel(msg, channel_after)) + goto nla_put_failure; + nla_nest_end(msg, nl_freq); + + if (genlmsg_end(msg, hdr) < 0) { + nlmsg_free(msg); + return; + } + + rcu_read_lock(); + genlmsg_multicast_allns(msg, 0, nl80211_regulatory_mcgrp.id, + GFP_ATOMIC); + rcu_read_unlock(); + + return; + +nla_put_failure: + genlmsg_cancel(msg, hdr); + nlmsg_free(msg); +} + +static void nl80211_send_remain_on_chan_event( + int cmd, struct cfg80211_registered_device *rdev, + struct net_device *netdev, u64 cookie, + struct ieee80211_channel *chan, + enum nl80211_channel_type channel_type, + unsigned int duration, gfp_t gfp) +{ + struct sk_buff *msg; + void *hdr; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); + if (!msg) + return; + + hdr = nl80211hdr_put(msg, 0, 0, 0, cmd); + if (!hdr) { + nlmsg_free(msg); + return; + } + + NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); + NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); + NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq); + NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, channel_type); + NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); + + if (cmd == NL80211_CMD_REMAIN_ON_CHANNEL) + NLA_PUT_U32(msg, NL80211_ATTR_DURATION, duration); + + if (genlmsg_end(msg, hdr) < 0) { + nlmsg_free(msg); + return; + } + + genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, + nl80211_mlme_mcgrp.id, gfp); + return; + + nla_put_failure: + genlmsg_cancel(msg, hdr); + nlmsg_free(msg); +} + +void nl80211_send_remain_on_channel(struct cfg80211_registered_device *rdev, + struct net_device *netdev, u64 cookie, + struct ieee80211_channel *chan, + enum nl80211_channel_type channel_type, + unsigned int duration, gfp_t gfp) +{ + nl80211_send_remain_on_chan_event(NL80211_CMD_REMAIN_ON_CHANNEL, + rdev, netdev, cookie, chan, + channel_type, duration, gfp); +} + +void nl80211_send_remain_on_channel_cancel( + struct cfg80211_registered_device *rdev, struct net_device *netdev, + u64 cookie, struct ieee80211_channel *chan, + enum nl80211_channel_type channel_type, gfp_t gfp) +{ + nl80211_send_remain_on_chan_event(NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL, + rdev, netdev, cookie, chan, + channel_type, 0, gfp); +} + +void nl80211_send_sta_event(struct cfg80211_registered_device *rdev, + struct net_device *dev, const u8 *mac_addr, + struct station_info *sinfo, gfp_t gfp) +{ + struct sk_buff *msg; + + msg = nlmsg_new(NLMSG_GOODSIZE, gfp); + if (!msg) + return; + + if (nl80211_send_station(msg, 0, 0, 0, dev, mac_addr, sinfo) < 0) { + nlmsg_free(msg); + return; + } + + genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, + nl80211_mlme_mcgrp.id, gfp); +} + +int nl80211_send_action(struct cfg80211_registered_device *rdev, + struct net_device *netdev, u32 nlpid, + int freq, const u8 *buf, size_t len, gfp_t gfp) +{ + struct sk_buff *msg; + void *hdr; + int err; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); + if (!msg) + return -ENOMEM; + + hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_ACTION); + if (!hdr) { + nlmsg_free(msg); + return -ENOMEM; + } + + NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); + NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); + NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq); + NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf); + + err = genlmsg_end(msg, hdr); + if (err < 0) { + nlmsg_free(msg); + return err; + } + + err = genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid); + if (err < 0) + return err; + return 0; + + nla_put_failure: + genlmsg_cancel(msg, hdr); + nlmsg_free(msg); + return -ENOBUFS; +} + +void nl80211_send_action_tx_status(struct cfg80211_registered_device *rdev, + struct net_device *netdev, u64 cookie, + const u8 *buf, size_t len, bool ack, + gfp_t gfp) +{ + struct sk_buff *msg; + void *hdr; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); + if (!msg) + return; + + hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_ACTION_TX_STATUS); + if (!hdr) { + nlmsg_free(msg); + return; + } + + NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); + NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); + NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf); + NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); + if (ack) + NLA_PUT_FLAG(msg, NL80211_ATTR_ACK); + + if (genlmsg_end(msg, hdr) < 0) { + nlmsg_free(msg); + return; + } + + genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, gfp); + return; + + nla_put_failure: + genlmsg_cancel(msg, hdr); + nlmsg_free(msg); +} + +static int nl80211_netlink_notify(struct notifier_block * nb, + unsigned long state, + void *_notify) +{ + struct netlink_notify *notify = _notify; + struct cfg80211_registered_device *rdev; + struct wireless_dev *wdev; + + if (state != NETLINK_URELEASE) + return NOTIFY_DONE; + + rcu_read_lock(); + + list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) + list_for_each_entry_rcu(wdev, &rdev->netdev_list, list) + cfg80211_mlme_unregister_actions(wdev, notify->pid); + + rcu_read_unlock(); + + return NOTIFY_DONE; +} + +static struct notifier_block nl80211_netlink_notifier = { + .notifier_call = nl80211_netlink_notify, +}; + +/* initialisation/exit functions */ + +int nl80211_init(void) +{ + int err; + + err = genl_register_family_with_ops(&nl80211_fam, + nl80211_ops, ARRAY_SIZE(nl80211_ops)); + if (err) + return err; + + err = genl_register_mc_group(&nl80211_fam, &nl80211_config_mcgrp); + if (err) + goto err_out; + + err = genl_register_mc_group(&nl80211_fam, &nl80211_scan_mcgrp); + if (err) + goto err_out; + + err = genl_register_mc_group(&nl80211_fam, &nl80211_regulatory_mcgrp); + if (err) + goto err_out; + + err = genl_register_mc_group(&nl80211_fam, &nl80211_mlme_mcgrp); + if (err) + goto err_out; + +#ifdef CONFIG_NL80211_TESTMODE + err = genl_register_mc_group(&nl80211_fam, &nl80211_testmode_mcgrp); + if (err) + goto err_out; +#endif + + err = netlink_register_notifier(&nl80211_netlink_notifier); + if (err) + goto err_out; + + return 0; + err_out: + genl_unregister_family(&nl80211_fam); + return err; +} + +void nl80211_exit(void) +{ + netlink_unregister_notifier(&nl80211_netlink_notifier); + genl_unregister_family(&nl80211_fam); +} diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h new file mode 100644 index 0000000..4ca5111 --- /dev/null +++ b/net/wireless/nl80211.h @@ -0,0 +1,85 @@ +#ifndef __NET_WIRELESS_NL80211_H +#define __NET_WIRELESS_NL80211_H + +#include "core.h" + +int nl80211_init(void); +void nl80211_exit(void); +void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev); +void nl80211_send_scan_start(struct cfg80211_registered_device *rdev, + struct net_device *netdev); +void nl80211_send_scan_done(struct cfg80211_registered_device *rdev, + struct net_device *netdev); +void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev, + struct net_device *netdev); +void nl80211_send_reg_change_event(struct regulatory_request *request); +void nl80211_send_rx_auth(struct cfg80211_registered_device *rdev, + struct net_device *netdev, + const u8 *buf, size_t len, gfp_t gfp); +void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev, + struct net_device *netdev, + const u8 *buf, size_t len, gfp_t gfp); +void nl80211_send_deauth(struct cfg80211_registered_device *rdev, + struct net_device *netdev, + const u8 *buf, size_t len, gfp_t gfp); +void nl80211_send_disassoc(struct cfg80211_registered_device *rdev, + struct net_device *netdev, + const u8 *buf, size_t len, gfp_t gfp); +void nl80211_send_auth_timeout(struct cfg80211_registered_device *rdev, + struct net_device *netdev, + const u8 *addr, gfp_t gfp); +void nl80211_send_assoc_timeout(struct cfg80211_registered_device *rdev, + struct net_device *netdev, + const u8 *addr, gfp_t gfp); +void nl80211_send_connect_result(struct cfg80211_registered_device *rdev, + struct net_device *netdev, const u8 *bssid, + const u8 *req_ie, size_t req_ie_len, + const u8 *resp_ie, size_t resp_ie_len, + u16 status, gfp_t gfp); +void nl80211_send_roamed(struct cfg80211_registered_device *rdev, + struct net_device *netdev, const u8 *bssid, + const u8 *req_ie, size_t req_ie_len, + const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp); +void nl80211_send_disconnected(struct cfg80211_registered_device *rdev, + struct net_device *netdev, u16 reason, + const u8 *ie, size_t ie_len, bool from_ap); + +void +nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev, + struct net_device *netdev, const u8 *addr, + enum nl80211_key_type key_type, + int key_id, const u8 *tsc, gfp_t gfp); + +void +nl80211_send_beacon_hint_event(struct wiphy *wiphy, + struct ieee80211_channel *channel_before, + struct ieee80211_channel *channel_after); + +void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev, + struct net_device *netdev, const u8 *bssid, + gfp_t gfp); + +void nl80211_send_remain_on_channel(struct cfg80211_registered_device *rdev, + struct net_device *netdev, + u64 cookie, + struct ieee80211_channel *chan, + enum nl80211_channel_type channel_type, + unsigned int duration, gfp_t gfp); +void nl80211_send_remain_on_channel_cancel( + struct cfg80211_registered_device *rdev, struct net_device *netdev, + u64 cookie, struct ieee80211_channel *chan, + enum nl80211_channel_type channel_type, gfp_t gfp); + +void nl80211_send_sta_event(struct cfg80211_registered_device *rdev, + struct net_device *dev, const u8 *mac_addr, + struct station_info *sinfo, gfp_t gfp); + +int nl80211_send_action(struct cfg80211_registered_device *rdev, + struct net_device *netdev, u32 nlpid, int freq, + const u8 *buf, size_t len, gfp_t gfp); +void nl80211_send_action_tx_status(struct cfg80211_registered_device *rdev, + struct net_device *netdev, u64 cookie, + const u8 *buf, size_t len, bool ack, + gfp_t gfp); + +#endif /* __NET_WIRELESS_NL80211_H */ diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c new file mode 100644 index 0000000..1332c44 --- /dev/null +++ b/net/wireless/radiotap.c @@ -0,0 +1,350 @@ +/* + * Radiotap parser + * + * Copyright 2007 Andy Green + * Copyright 2009 Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Alternatively, this software may be distributed under the terms of BSD + * license. + * + * See COPYING for more details. + */ + +#include +#include +#include + +/* function prototypes and related defs are in include/net/cfg80211.h */ + +static const struct radiotap_align_size rtap_namespace_sizes[] = { + [IEEE80211_RADIOTAP_TSFT] = { .align = 8, .size = 8, }, + [IEEE80211_RADIOTAP_FLAGS] = { .align = 1, .size = 1, }, + [IEEE80211_RADIOTAP_RATE] = { .align = 1, .size = 1, }, + [IEEE80211_RADIOTAP_CHANNEL] = { .align = 2, .size = 4, }, + [IEEE80211_RADIOTAP_FHSS] = { .align = 2, .size = 2, }, + [IEEE80211_RADIOTAP_DBM_ANTSIGNAL] = { .align = 1, .size = 1, }, + [IEEE80211_RADIOTAP_DBM_ANTNOISE] = { .align = 1, .size = 1, }, + [IEEE80211_RADIOTAP_LOCK_QUALITY] = { .align = 2, .size = 2, }, + [IEEE80211_RADIOTAP_TX_ATTENUATION] = { .align = 2, .size = 2, }, + [IEEE80211_RADIOTAP_DB_TX_ATTENUATION] = { .align = 2, .size = 2, }, + [IEEE80211_RADIOTAP_DBM_TX_POWER] = { .align = 1, .size = 1, }, + [IEEE80211_RADIOTAP_ANTENNA] = { .align = 1, .size = 1, }, + [IEEE80211_RADIOTAP_DB_ANTSIGNAL] = { .align = 1, .size = 1, }, + [IEEE80211_RADIOTAP_DB_ANTNOISE] = { .align = 1, .size = 1, }, + [IEEE80211_RADIOTAP_RX_FLAGS] = { .align = 2, .size = 2, }, + [IEEE80211_RADIOTAP_TX_FLAGS] = { .align = 2, .size = 2, }, + [IEEE80211_RADIOTAP_RTS_RETRIES] = { .align = 1, .size = 1, }, + [IEEE80211_RADIOTAP_DATA_RETRIES] = { .align = 1, .size = 1, }, + /* + * add more here as they are defined in radiotap.h + */ +}; + +static const struct ieee80211_radiotap_namespace radiotap_ns = { + .n_bits = sizeof(rtap_namespace_sizes) / sizeof(rtap_namespace_sizes[0]), + .align_size = rtap_namespace_sizes, +}; + +/** + * ieee80211_radiotap_iterator_init - radiotap parser iterator initialization + * @iterator: radiotap_iterator to initialize + * @radiotap_header: radiotap header to parse + * @max_length: total length we can parse into (eg, whole packet length) + * + * Returns: 0 or a negative error code if there is a problem. + * + * This function initializes an opaque iterator struct which can then + * be passed to ieee80211_radiotap_iterator_next() to visit every radiotap + * argument which is present in the header. It knows about extended + * present headers and handles them. + * + * How to use: + * call __ieee80211_radiotap_iterator_init() to init a semi-opaque iterator + * struct ieee80211_radiotap_iterator (no need to init the struct beforehand) + * checking for a good 0 return code. Then loop calling + * __ieee80211_radiotap_iterator_next()... it returns either 0, + * -ENOENT if there are no more args to parse, or -EINVAL if there is a problem. + * The iterator's @this_arg member points to the start of the argument + * associated with the current argument index that is present, which can be + * found in the iterator's @this_arg_index member. This arg index corresponds + * to the IEEE80211_RADIOTAP_... defines. + * + * Radiotap header length: + * You can find the CPU-endian total radiotap header length in + * iterator->max_length after executing ieee80211_radiotap_iterator_init() + * successfully. + * + * Alignment Gotcha: + * You must take care when dereferencing iterator.this_arg + * for multibyte types... the pointer is not aligned. Use + * get_unaligned((type *)iterator.this_arg) to dereference + * iterator.this_arg for type "type" safely on all arches. + * + * Example code: + * See Documentation/networking/radiotap-headers.txt + */ + +int ieee80211_radiotap_iterator_init( + struct ieee80211_radiotap_iterator *iterator, + struct ieee80211_radiotap_header *radiotap_header, + int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns) +{ + /* Linux only supports version 0 radiotap format */ + if (radiotap_header->it_version) + return -EINVAL; + + /* sanity check for allowed length and radiotap length field */ + if (max_length < get_unaligned_le16(&radiotap_header->it_len)) + return -EINVAL; + + iterator->_rtheader = radiotap_header; + iterator->_max_length = get_unaligned_le16(&radiotap_header->it_len); + iterator->_arg_index = 0; + iterator->_bitmap_shifter = get_unaligned_le32(&radiotap_header->it_present); + iterator->_arg = (uint8_t *)radiotap_header + sizeof(*radiotap_header); + iterator->_reset_on_ext = 0; + iterator->_next_bitmap = &radiotap_header->it_present; + iterator->_next_bitmap++; + iterator->_vns = vns; + iterator->current_namespace = &radiotap_ns; + iterator->is_radiotap_ns = 1; + + /* find payload start allowing for extended bitmap(s) */ + + if (iterator->_bitmap_shifter & (1<_arg) & + (1 << IEEE80211_RADIOTAP_EXT)) { + iterator->_arg += sizeof(uint32_t); + + /* + * check for insanity where the present bitmaps + * keep claiming to extend up to or even beyond the + * stated radiotap header length + */ + + if ((unsigned long)iterator->_arg - + (unsigned long)iterator->_rtheader > + (unsigned long)iterator->_max_length) + return -EINVAL; + } + + iterator->_arg += sizeof(uint32_t); + + /* + * no need to check again for blowing past stated radiotap + * header length, because ieee80211_radiotap_iterator_next + * checks it before it is dereferenced + */ + } + + iterator->this_arg = iterator->_arg; + + /* we are all initialized happily */ + + return 0; +} +EXPORT_SYMBOL(ieee80211_radiotap_iterator_init); + +static void find_ns(struct ieee80211_radiotap_iterator *iterator, + uint32_t oui, uint8_t subns) +{ + int i; + + iterator->current_namespace = NULL; + + if (!iterator->_vns) + return; + + for (i = 0; i < iterator->_vns->n_ns; i++) { + if (iterator->_vns->ns[i].oui != oui) + continue; + if (iterator->_vns->ns[i].subns != subns) + continue; + + iterator->current_namespace = &iterator->_vns->ns[i]; + break; + } +} + + + +/** + * ieee80211_radiotap_iterator_next - return next radiotap parser iterator arg + * @iterator: radiotap_iterator to move to next arg (if any) + * + * Returns: 0 if there is an argument to handle, + * -ENOENT if there are no more args or -EINVAL + * if there is something else wrong. + * + * This function provides the next radiotap arg index (IEEE80211_RADIOTAP_*) + * in @this_arg_index and sets @this_arg to point to the + * payload for the field. It takes care of alignment handling and extended + * present fields. @this_arg can be changed by the caller (eg, + * incremented to move inside a compound argument like + * IEEE80211_RADIOTAP_CHANNEL). The args pointed to are in + * little-endian format whatever the endianess of your CPU. + * + * Alignment Gotcha: + * You must take care when dereferencing iterator.this_arg + * for multibyte types... the pointer is not aligned. Use + * get_unaligned((type *)iterator.this_arg) to dereference + * iterator.this_arg for type "type" safely on all arches. + */ + +int ieee80211_radiotap_iterator_next( + struct ieee80211_radiotap_iterator *iterator) +{ + while (1) { + int hit = 0; + int pad, align, size, subns, vnslen; + uint32_t oui; + + /* if no more EXT bits, that's it */ + if ((iterator->_arg_index % 32) == IEEE80211_RADIOTAP_EXT && + !(iterator->_bitmap_shifter & 1)) + return -ENOENT; + + if (!(iterator->_bitmap_shifter & 1)) + goto next_entry; /* arg not present */ + + /* get alignment/size of data */ + switch (iterator->_arg_index % 32) { + case IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE: + case IEEE80211_RADIOTAP_EXT: + align = 1; + size = 0; + break; + case IEEE80211_RADIOTAP_VENDOR_NAMESPACE: + align = 2; + size = 6; + break; + default: + if (!iterator->current_namespace || + iterator->_arg_index >= iterator->current_namespace->n_bits) { + if (iterator->current_namespace == &radiotap_ns) + return -ENOENT; + align = 0; + } else { + align = iterator->current_namespace->align_size[iterator->_arg_index].align; + size = iterator->current_namespace->align_size[iterator->_arg_index].size; + } + if (!align) { + /* skip all subsequent data */ + iterator->_arg = iterator->_next_ns_data; + /* give up on this namespace */ + iterator->current_namespace = NULL; + goto next_entry; + } + break; + } + + /* + * arg is present, account for alignment padding + * + * Note that these alignments are relative to the start + * of the radiotap header. There is no guarantee + * that the radiotap header itself is aligned on any + * kind of boundary. + * + * The above is why get_unaligned() is used to dereference + * multibyte elements from the radiotap area. + */ + + pad = ((unsigned long)iterator->_arg - + (unsigned long)iterator->_rtheader) & (align - 1); + + if (pad) + iterator->_arg += align - pad; + + /* + * this is what we will return to user, but we need to + * move on first so next call has something fresh to test + */ + iterator->this_arg_index = iterator->_arg_index; + iterator->this_arg = iterator->_arg; + iterator->this_arg_size = size; + + /* internally move on the size of this arg */ + iterator->_arg += size; + + /* + * check for insanity where we are given a bitmap that + * claims to have more arg content than the length of the + * radiotap section. We will normally end up equalling this + * max_length on the last arg, never exceeding it. + */ + + if ((unsigned long)iterator->_arg - + (unsigned long)iterator->_rtheader > + (unsigned long)iterator->_max_length) + return -EINVAL; + + /* these special ones are valid in each bitmap word */ + switch (iterator->_arg_index % 32) { + case IEEE80211_RADIOTAP_VENDOR_NAMESPACE: + iterator->_bitmap_shifter >>= 1; + iterator->_arg_index++; + + iterator->_reset_on_ext = 1; + + vnslen = get_unaligned_le16(iterator->this_arg + 4); + iterator->_next_ns_data = iterator->_arg + vnslen; + oui = (*iterator->this_arg << 16) | + (*(iterator->this_arg + 1) << 8) | + *(iterator->this_arg + 2); + subns = *(iterator->this_arg + 3); + + find_ns(iterator, oui, subns); + + iterator->is_radiotap_ns = 0; + /* allow parsers to show this information */ + iterator->this_arg_index = + IEEE80211_RADIOTAP_VENDOR_NAMESPACE; + iterator->this_arg_size += vnslen; + if ((unsigned long)iterator->this_arg + + iterator->this_arg_size - + (unsigned long)iterator->_rtheader > + (unsigned long)(unsigned long)iterator->_max_length) + return -EINVAL; + hit = 1; + break; + case IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE: + iterator->_bitmap_shifter >>= 1; + iterator->_arg_index++; + + iterator->_reset_on_ext = 1; + iterator->current_namespace = &radiotap_ns; + iterator->is_radiotap_ns = 1; + break; + case IEEE80211_RADIOTAP_EXT: + /* + * bit 31 was set, there is more + * -- move to next u32 bitmap + */ + iterator->_bitmap_shifter = + get_unaligned_le32(iterator->_next_bitmap); + iterator->_next_bitmap++; + if (iterator->_reset_on_ext) + iterator->_arg_index = 0; + else + iterator->_arg_index++; + iterator->_reset_on_ext = 0; + break; + default: + /* we've got a hit! */ + hit = 1; + next_entry: + iterator->_bitmap_shifter >>= 1; + iterator->_arg_index++; + } + + /* if we found a valid arg earlier, return it now */ + if (hit) + return 0; + } +} +EXPORT_SYMBOL(ieee80211_radiotap_iterator_next); diff --git a/net/wireless/reg.c b/net/wireless/reg.c new file mode 100644 index 0000000..ed89c59 --- /dev/null +++ b/net/wireless/reg.c @@ -0,0 +1,2729 @@ +/* + * Copyright 2002-2005, Instant802 Networks, Inc. + * Copyright 2005-2006, Devicescape Software, Inc. + * Copyright 2007 Johannes Berg + * Copyright 2008 Luis R. Rodriguez + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/** + * DOC: Wireless regulatory infrastructure + * + * The usual implementation is for a driver to read a device EEPROM to + * determine which regulatory domain it should be operating under, then + * looking up the allowable channels in a driver-local table and finally + * registering those channels in the wiphy structure. + * + * Another set of compliance enforcement is for drivers to use their + * own compliance limits which can be stored on the EEPROM. The host + * driver or firmware may ensure these are used. + * + * In addition to all this we provide an extra layer of regulatory + * conformance. For drivers which do not have any regulatory + * information CRDA provides the complete regulatory solution. + * For others it provides a community effort on further restrictions + * to enhance compliance. + * + * Note: When number of rules --> infinity we will not be able to + * index on alpha2 any more, instead we'll probably have to + * rely on some SHA1 checksum of the regdomain for example. + * + */ +#include +#include +#include +#include +#include +#include +#include "core.h" +#include "reg.h" +#include "regdb.h" +#include "nl80211.h" + +#ifdef CONFIG_CFG80211_REG_DEBUG +#define REG_DBG_PRINT(format, args...) \ + do { \ + printk(KERN_DEBUG format , ## args); \ + } while (0) +#else +#define REG_DBG_PRINT(args...) +#endif + +/* Receipt of information from last regulatory request */ +static struct regulatory_request *last_request; + +/* To trigger userspace events */ +static struct platform_device *reg_pdev; + +/* + * Central wireless core regulatory domains, we only need two, + * the current one and a world regulatory domain in case we have no + * information to give us an alpha2 + */ +const struct ieee80211_regdomain *cfg80211_regdomain; + +/* + * We use this as a place for the rd structure built from the + * last parsed country IE to rest until CRDA gets back to us with + * what it thinks should apply for the same country + */ +static const struct ieee80211_regdomain *country_ie_regdomain; + +/* + * Protects static reg.c components: + * - cfg80211_world_regdom + * - cfg80211_regdom + * - country_ie_regdomain + * - last_request + */ +DEFINE_MUTEX(reg_mutex); +#define assert_reg_lock() WARN_ON(!mutex_is_locked(®_mutex)) + +/* Used to queue up regulatory hints */ +static LIST_HEAD(reg_requests_list); +static spinlock_t reg_requests_lock; + +/* Used to queue up beacon hints for review */ +static LIST_HEAD(reg_pending_beacons); +static spinlock_t reg_pending_beacons_lock; + +/* Used to keep track of processed beacon hints */ +static LIST_HEAD(reg_beacon_list); + +struct reg_beacon { + struct list_head list; + struct ieee80211_channel chan; +}; + +/* We keep a static world regulatory domain in case of the absence of CRDA */ +static const struct ieee80211_regdomain world_regdom = { + .n_reg_rules = 5, + .alpha2 = "00", + .reg_rules = { + /* IEEE 802.11b/g, channels 1..11 */ + REG_RULE(2412-10, 2462+10, 40, 6, 20, 0), + /* IEEE 802.11b/g, channels 12..13. No HT40 + * channel fits here. */ + REG_RULE(2467-10, 2472+10, 20, 6, 20, + NL80211_RRF_PASSIVE_SCAN | + NL80211_RRF_NO_IBSS), + /* IEEE 802.11 channel 14 - Only JP enables + * this and for 802.11b only */ + REG_RULE(2484-10, 2484+10, 20, 6, 20, + NL80211_RRF_PASSIVE_SCAN | + NL80211_RRF_NO_IBSS | + NL80211_RRF_NO_OFDM), + /* IEEE 802.11a, channel 36..48 */ + REG_RULE(5180-10, 5240+10, 40, 6, 20, + NL80211_RRF_PASSIVE_SCAN | + NL80211_RRF_NO_IBSS), + + /* NB: 5260 MHz - 5700 MHz requies DFS */ + + /* IEEE 802.11a, channel 149..165 */ + REG_RULE(5745-10, 5825+10, 40, 6, 20, + NL80211_RRF_PASSIVE_SCAN | + NL80211_RRF_NO_IBSS), + } +}; + +static const struct ieee80211_regdomain *cfg80211_world_regdom = + &world_regdom; + +static char *ieee80211_regdom = "00"; +static char user_alpha2[2]; + +module_param(ieee80211_regdom, charp, 0444); +MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code"); + +static void reset_regdomains(void) +{ + /* avoid freeing static information or freeing something twice */ + if (cfg80211_regdomain == cfg80211_world_regdom) + cfg80211_regdomain = NULL; + if (cfg80211_world_regdom == &world_regdom) + cfg80211_world_regdom = NULL; + if (cfg80211_regdomain == &world_regdom) + cfg80211_regdomain = NULL; + + kfree(cfg80211_regdomain); + kfree(cfg80211_world_regdom); + + cfg80211_world_regdom = &world_regdom; + cfg80211_regdomain = NULL; +} + +/* + * Dynamic world regulatory domain requested by the wireless + * core upon initialization + */ +static void update_world_regdomain(const struct ieee80211_regdomain *rd) +{ + BUG_ON(!last_request); + + reset_regdomains(); + + cfg80211_world_regdom = rd; + cfg80211_regdomain = rd; +} + +bool is_world_regdom(const char *alpha2) +{ + if (!alpha2) + return false; + if (alpha2[0] == '0' && alpha2[1] == '0') + return true; + return false; +} + +static bool is_alpha2_set(const char *alpha2) +{ + if (!alpha2) + return false; + if (alpha2[0] != 0 && alpha2[1] != 0) + return true; + return false; +} + +static bool is_alpha_upper(char letter) +{ + /* ASCII A - Z */ + if (letter >= 65 && letter <= 90) + return true; + return false; +} + +static bool is_unknown_alpha2(const char *alpha2) +{ + if (!alpha2) + return false; + /* + * Special case where regulatory domain was built by driver + * but a specific alpha2 cannot be determined + */ + if (alpha2[0] == '9' && alpha2[1] == '9') + return true; + return false; +} + +static bool is_intersected_alpha2(const char *alpha2) +{ + if (!alpha2) + return false; + /* + * Special case where regulatory domain is the + * result of an intersection between two regulatory domain + * structures + */ + if (alpha2[0] == '9' && alpha2[1] == '8') + return true; + return false; +} + +static bool is_an_alpha2(const char *alpha2) +{ + if (!alpha2) + return false; + if (is_alpha_upper(alpha2[0]) && is_alpha_upper(alpha2[1])) + return true; + return false; +} + +static bool alpha2_equal(const char *alpha2_x, const char *alpha2_y) +{ + if (!alpha2_x || !alpha2_y) + return false; + if (alpha2_x[0] == alpha2_y[0] && + alpha2_x[1] == alpha2_y[1]) + return true; + return false; +} + +static bool regdom_changes(const char *alpha2) +{ + assert_cfg80211_lock(); + + if (!cfg80211_regdomain) + return true; + if (alpha2_equal(cfg80211_regdomain->alpha2, alpha2)) + return false; + return true; +} + +/* + * The NL80211_REGDOM_SET_BY_USER regdom alpha2 is cached, this lets + * you know if a valid regulatory hint with NL80211_REGDOM_SET_BY_USER + * has ever been issued. + */ +static bool is_user_regdom_saved(void) +{ + if (user_alpha2[0] == '9' && user_alpha2[1] == '7') + return false; + + /* This would indicate a mistake on the design */ + if (WARN((!is_world_regdom(user_alpha2) && + !is_an_alpha2(user_alpha2)), + "Unexpected user alpha2: %c%c\n", + user_alpha2[0], + user_alpha2[1])) + return false; + + return true; +} + +/** + * country_ie_integrity_changes - tells us if the country IE has changed + * @checksum: checksum of country IE of fields we are interested in + * + * If the country IE has not changed you can ignore it safely. This is + * useful to determine if two devices are seeing two different country IEs + * even on the same alpha2. Note that this will return false if no IE has + * been set on the wireless core yet. + */ +static bool country_ie_integrity_changes(u32 checksum) +{ + /* If no IE has been set then the checksum doesn't change */ + if (unlikely(!last_request->country_ie_checksum)) + return false; + if (unlikely(last_request->country_ie_checksum != checksum)) + return true; + return false; +} + +static int reg_copy_regd(const struct ieee80211_regdomain **dst_regd, + const struct ieee80211_regdomain *src_regd) +{ + struct ieee80211_regdomain *regd; + int size_of_regd = 0; + unsigned int i; + + size_of_regd = sizeof(struct ieee80211_regdomain) + + ((src_regd->n_reg_rules + 1) * sizeof(struct ieee80211_reg_rule)); + + regd = kzalloc(size_of_regd, GFP_KERNEL); + if (!regd) + return -ENOMEM; + + memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain)); + + for (i = 0; i < src_regd->n_reg_rules; i++) + memcpy(®d->reg_rules[i], &src_regd->reg_rules[i], + sizeof(struct ieee80211_reg_rule)); + + *dst_regd = regd; + return 0; +} + +#ifdef CONFIG_CFG80211_INTERNAL_REGDB +struct reg_regdb_search_request { + char alpha2[2]; + struct list_head list; +}; + +static LIST_HEAD(reg_regdb_search_list); +static DEFINE_SPINLOCK(reg_regdb_search_lock); + +static void reg_regdb_search(struct work_struct *work) +{ + struct reg_regdb_search_request *request; + const struct ieee80211_regdomain *curdom, *regdom; + int i, r; + + spin_lock(®_regdb_search_lock); + while (!list_empty(®_regdb_search_list)) { + request = list_first_entry(®_regdb_search_list, + struct reg_regdb_search_request, + list); + list_del(&request->list); + + for (i=0; ialpha2, curdom->alpha2, 2)) { + r = reg_copy_regd(®dom, curdom); + if (r) + break; + spin_unlock(®_regdb_search_lock); + mutex_lock(&cfg80211_mutex); + set_regdom(regdom); + mutex_unlock(&cfg80211_mutex); + spin_lock(®_regdb_search_lock); + break; + } + } + + kfree(request); + } + spin_unlock(®_regdb_search_lock); +} + +static DECLARE_WORK(reg_regdb_work, reg_regdb_search); + +static void reg_regdb_query(const char *alpha2) +{ + struct reg_regdb_search_request *request; + + if (!alpha2) + return; + + request = kzalloc(sizeof(struct reg_regdb_search_request), GFP_KERNEL); + if (!request) + return; + + memcpy(request->alpha2, alpha2, 2); + + spin_lock(®_regdb_search_lock); + list_add_tail(&request->list, ®_regdb_search_list); + spin_unlock(®_regdb_search_lock); + + schedule_work(®_regdb_work); +} +#else +static inline void reg_regdb_query(const char *alpha2) {} +#endif /* CONFIG_CFG80211_INTERNAL_REGDB */ + +/* + * This lets us keep regulatory code which is updated on a regulatory + * basis in userspace. + */ +static int call_crda(const char *alpha2) +{ + char country_env[9 + 2] = "COUNTRY="; + char *envp[] = { + country_env, + NULL + }; + + if (!is_world_regdom((char *) alpha2)) + printk(KERN_INFO "cfg80211: Calling CRDA for country: %c%c\n", + alpha2[0], alpha2[1]); + else + printk(KERN_INFO "cfg80211: Calling CRDA to update world " + "regulatory domain\n"); + + /* query internal regulatory database (if it exists) */ + reg_regdb_query(alpha2); + + country_env[8] = alpha2[0]; + country_env[9] = alpha2[1]; + + return kobject_uevent_env(®_pdev->dev.kobj, KOBJ_CHANGE, envp); +} + +/* Used by nl80211 before kmalloc'ing our regulatory domain */ +bool reg_is_valid_request(const char *alpha2) +{ + assert_cfg80211_lock(); + + if (!last_request) + return false; + + return alpha2_equal(last_request->alpha2, alpha2); +} + +/* Sanity check on a regulatory rule */ +static bool is_valid_reg_rule(const struct ieee80211_reg_rule *rule) +{ + const struct ieee80211_freq_range *freq_range = &rule->freq_range; + u32 freq_diff; + + if (freq_range->start_freq_khz <= 0 || freq_range->end_freq_khz <= 0) + return false; + + if (freq_range->start_freq_khz > freq_range->end_freq_khz) + return false; + + freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz; + + if (freq_range->end_freq_khz <= freq_range->start_freq_khz || + freq_range->max_bandwidth_khz > freq_diff) + return false; + + return true; +} + +static bool is_valid_rd(const struct ieee80211_regdomain *rd) +{ + const struct ieee80211_reg_rule *reg_rule = NULL; + unsigned int i; + + if (!rd->n_reg_rules) + return false; + + if (WARN_ON(rd->n_reg_rules > NL80211_MAX_SUPP_REG_RULES)) + return false; + + for (i = 0; i < rd->n_reg_rules; i++) { + reg_rule = &rd->reg_rules[i]; + if (!is_valid_reg_rule(reg_rule)) + return false; + } + + return true; +} + +static bool reg_does_bw_fit(const struct ieee80211_freq_range *freq_range, + u32 center_freq_khz, + u32 bw_khz) +{ + u32 start_freq_khz, end_freq_khz; + + start_freq_khz = center_freq_khz - (bw_khz/2); + end_freq_khz = center_freq_khz + (bw_khz/2); + + if (start_freq_khz >= freq_range->start_freq_khz && + end_freq_khz <= freq_range->end_freq_khz) + return true; + + return false; +} + +/** + * freq_in_rule_band - tells us if a frequency is in a frequency band + * @freq_range: frequency rule we want to query + * @freq_khz: frequency we are inquiring about + * + * This lets us know if a specific frequency rule is or is not relevant to + * a specific frequency's band. Bands are device specific and artificial + * definitions (the "2.4 GHz band" and the "5 GHz band"), however it is + * safe for now to assume that a frequency rule should not be part of a + * frequency's band if the start freq or end freq are off by more than 2 GHz. + * This resolution can be lowered and should be considered as we add + * regulatory rule support for other "bands". + **/ +static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range, + u32 freq_khz) +{ +#define ONE_GHZ_IN_KHZ 1000000 + if (abs(freq_khz - freq_range->start_freq_khz) <= (2 * ONE_GHZ_IN_KHZ)) + return true; + if (abs(freq_khz - freq_range->end_freq_khz) <= (2 * ONE_GHZ_IN_KHZ)) + return true; + return false; +#undef ONE_GHZ_IN_KHZ +} + +/* + * This is a work around for sanity checking ieee80211_channel_to_frequency()'s + * work. ieee80211_channel_to_frequency() can for example currently provide a + * 2 GHz channel when in fact a 5 GHz channel was desired. An example would be + * an AP providing channel 8 on a country IE triplet when it sent this on the + * 5 GHz band, that channel is designed to be channel 8 on 5 GHz, not a 2 GHz + * channel. + * + * This can be removed once ieee80211_channel_to_frequency() takes in a band. + */ +static bool chan_in_band(int chan, enum ieee80211_band band) +{ + int center_freq = ieee80211_channel_to_frequency(chan); + + switch (band) { + case IEEE80211_BAND_2GHZ: + if (center_freq <= 2484) + return true; + return false; + case IEEE80211_BAND_5GHZ: + if (center_freq >= 5005) + return true; + return false; + default: + return false; + } +} + +/* + * Some APs may send a country IE triplet for each channel they + * support and while this is completely overkill and silly we still + * need to support it. We avoid making a single rule for each channel + * though and to help us with this we use this helper to find the + * actual subband end channel. These type of country IE triplet + * scenerios are handled then, all yielding two regulaotry rules from + * parsing a country IE: + * + * [1] + * [2] + * [36] + * [40] + * + * [1] + * [2-4] + * [5-12] + * [36] + * [40-44] + * + * [1-4] + * [5-7] + * [36-44] + * [48-64] + * + * [36-36] + * [40-40] + * [44-44] + * [48-48] + * [52-52] + * [56-56] + * [60-60] + * [64-64] + * [100-100] + * [104-104] + * [108-108] + * [112-112] + * [116-116] + * [120-120] + * [124-124] + * [128-128] + * [132-132] + * [136-136] + * [140-140] + * + * Returns 0 if the IE has been found to be invalid in the middle + * somewhere. + */ +static int max_subband_chan(enum ieee80211_band band, + int orig_cur_chan, + int orig_end_channel, + s8 orig_max_power, + u8 **country_ie, + u8 *country_ie_len) +{ + u8 *triplets_start = *country_ie; + u8 len_at_triplet = *country_ie_len; + int end_subband_chan = orig_end_channel; + + /* + * We'll deal with padding for the caller unless + * its not immediate and we don't process any channels + */ + if (*country_ie_len == 1) { + *country_ie += 1; + *country_ie_len -= 1; + return orig_end_channel; + } + + /* Move to the next triplet and then start search */ + *country_ie += 3; + *country_ie_len -= 3; + + if (!chan_in_band(orig_cur_chan, band)) + return 0; + + while (*country_ie_len >= 3) { + int end_channel = 0; + struct ieee80211_country_ie_triplet *triplet = + (struct ieee80211_country_ie_triplet *) *country_ie; + int cur_channel = 0, next_expected_chan; + + /* means last triplet is completely unrelated to this one */ + if (triplet->ext.reg_extension_id >= + IEEE80211_COUNTRY_EXTENSION_ID) { + *country_ie -= 3; + *country_ie_len += 3; + break; + } + + if (triplet->chans.first_channel == 0) { + *country_ie += 1; + *country_ie_len -= 1; + if (*country_ie_len != 0) + return 0; + break; + } + + if (triplet->chans.num_channels == 0) + return 0; + + /* Monitonically increasing channel order */ + if (triplet->chans.first_channel <= end_subband_chan) + return 0; + + if (!chan_in_band(triplet->chans.first_channel, band)) + return 0; + + /* 2 GHz */ + if (triplet->chans.first_channel <= 14) { + end_channel = triplet->chans.first_channel + + triplet->chans.num_channels - 1; + } + else { + end_channel = triplet->chans.first_channel + + (4 * (triplet->chans.num_channels - 1)); + } + + if (!chan_in_band(end_channel, band)) + return 0; + + if (orig_max_power != triplet->chans.max_power) { + *country_ie -= 3; + *country_ie_len += 3; + break; + } + + cur_channel = triplet->chans.first_channel; + + /* The key is finding the right next expected channel */ + if (band == IEEE80211_BAND_2GHZ) + next_expected_chan = end_subband_chan + 1; + else + next_expected_chan = end_subband_chan + 4; + + if (cur_channel != next_expected_chan) { + *country_ie -= 3; + *country_ie_len += 3; + break; + } + + end_subband_chan = end_channel; + + /* Move to the next one */ + *country_ie += 3; + *country_ie_len -= 3; + + /* + * Padding needs to be dealt with if we processed + * some channels. + */ + if (*country_ie_len == 1) { + *country_ie += 1; + *country_ie_len -= 1; + break; + } + + /* If seen, the IE is invalid */ + if (*country_ie_len == 2) + return 0; + } + + if (end_subband_chan == orig_end_channel) { + *country_ie = triplets_start; + *country_ie_len = len_at_triplet; + return orig_end_channel; + } + + return end_subband_chan; +} + +/* + * Converts a country IE to a regulatory domain. A regulatory domain + * structure has a lot of information which the IE doesn't yet have, + * so for the other values we use upper max values as we will intersect + * with our userspace regulatory agent to get lower bounds. + */ +static struct ieee80211_regdomain *country_ie_2_rd( + enum ieee80211_band band, + u8 *country_ie, + u8 country_ie_len, + u32 *checksum) +{ + struct ieee80211_regdomain *rd = NULL; + unsigned int i = 0; + char alpha2[2]; + u32 flags = 0; + u32 num_rules = 0, size_of_regd = 0; + u8 *triplets_start = NULL; + u8 len_at_triplet = 0; + /* the last channel we have registered in a subband (triplet) */ + int last_sub_max_channel = 0; + + *checksum = 0xDEADBEEF; + + /* Country IE requirements */ + BUG_ON(country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN || + country_ie_len & 0x01); + + alpha2[0] = country_ie[0]; + alpha2[1] = country_ie[1]; + + /* + * Third octet can be: + * 'I' - Indoor + * 'O' - Outdoor + * + * anything else we assume is no restrictions + */ + if (country_ie[2] == 'I') + flags = NL80211_RRF_NO_OUTDOOR; + else if (country_ie[2] == 'O') + flags = NL80211_RRF_NO_INDOOR; + + country_ie += 3; + country_ie_len -= 3; + + triplets_start = country_ie; + len_at_triplet = country_ie_len; + + *checksum ^= ((flags ^ alpha2[0] ^ alpha2[1]) << 8); + + /* + * We need to build a reg rule for each triplet, but first we must + * calculate the number of reg rules we will need. We will need one + * for each channel subband + */ + while (country_ie_len >= 3) { + int end_channel = 0; + struct ieee80211_country_ie_triplet *triplet = + (struct ieee80211_country_ie_triplet *) country_ie; + int cur_sub_max_channel = 0, cur_channel = 0; + + if (triplet->ext.reg_extension_id >= + IEEE80211_COUNTRY_EXTENSION_ID) { + country_ie += 3; + country_ie_len -= 3; + continue; + } + + /* + * APs can add padding to make length divisible + * by two, required by the spec. + */ + if (triplet->chans.first_channel == 0) { + country_ie++; + country_ie_len--; + /* This is expected to be at the very end only */ + if (country_ie_len != 0) + return NULL; + break; + } + + if (triplet->chans.num_channels == 0) + return NULL; + + if (!chan_in_band(triplet->chans.first_channel, band)) + return NULL; + + /* 2 GHz */ + if (band == IEEE80211_BAND_2GHZ) + end_channel = triplet->chans.first_channel + + triplet->chans.num_channels - 1; + else + /* + * 5 GHz -- For example in country IEs if the first + * channel given is 36 and the number of channels is 4 + * then the individual channel numbers defined for the + * 5 GHz PHY by these parameters are: 36, 40, 44, and 48 + * and not 36, 37, 38, 39. + * + * See: http://tinyurl.com/11d-clarification + */ + end_channel = triplet->chans.first_channel + + (4 * (triplet->chans.num_channels - 1)); + + cur_channel = triplet->chans.first_channel; + + /* + * Enhancement for APs that send a triplet for every channel + * or for whatever reason sends triplets with multiple channels + * separated when in fact they should be together. + */ + end_channel = max_subband_chan(band, + cur_channel, + end_channel, + triplet->chans.max_power, + &country_ie, + &country_ie_len); + if (!end_channel) + return NULL; + + if (!chan_in_band(end_channel, band)) + return NULL; + + cur_sub_max_channel = end_channel; + + /* Basic sanity check */ + if (cur_sub_max_channel < cur_channel) + return NULL; + + /* + * Do not allow overlapping channels. Also channels + * passed in each subband must be monotonically + * increasing + */ + if (last_sub_max_channel) { + if (cur_channel <= last_sub_max_channel) + return NULL; + if (cur_sub_max_channel <= last_sub_max_channel) + return NULL; + } + + /* + * When dot11RegulatoryClassesRequired is supported + * we can throw ext triplets as part of this soup, + * for now we don't care when those change as we + * don't support them + */ + *checksum ^= ((cur_channel ^ cur_sub_max_channel) << 8) | + ((cur_sub_max_channel ^ cur_sub_max_channel) << 16) | + ((triplet->chans.max_power ^ cur_sub_max_channel) << 24); + + last_sub_max_channel = cur_sub_max_channel; + + num_rules++; + + if (country_ie_len >= 3) { + country_ie += 3; + country_ie_len -= 3; + } + + /* + * Note: this is not a IEEE requirement but + * simply a memory requirement + */ + if (num_rules > NL80211_MAX_SUPP_REG_RULES) + return NULL; + } + + country_ie = triplets_start; + country_ie_len = len_at_triplet; + + size_of_regd = sizeof(struct ieee80211_regdomain) + + (num_rules * sizeof(struct ieee80211_reg_rule)); + + rd = kzalloc(size_of_regd, GFP_KERNEL); + if (!rd) + return NULL; + + rd->n_reg_rules = num_rules; + rd->alpha2[0] = alpha2[0]; + rd->alpha2[1] = alpha2[1]; + + /* This time around we fill in the rd */ + while (country_ie_len >= 3) { + int end_channel = 0; + struct ieee80211_country_ie_triplet *triplet = + (struct ieee80211_country_ie_triplet *) country_ie; + struct ieee80211_reg_rule *reg_rule = NULL; + struct ieee80211_freq_range *freq_range = NULL; + struct ieee80211_power_rule *power_rule = NULL; + + /* + * Must parse if dot11RegulatoryClassesRequired is true, + * we don't support this yet + */ + if (triplet->ext.reg_extension_id >= + IEEE80211_COUNTRY_EXTENSION_ID) { + country_ie += 3; + country_ie_len -= 3; + continue; + } + + if (triplet->chans.first_channel == 0) { + country_ie++; + country_ie_len--; + break; + } + + reg_rule = &rd->reg_rules[i]; + freq_range = ®_rule->freq_range; + power_rule = ®_rule->power_rule; + + reg_rule->flags = flags; + + /* 2 GHz */ + if (band == IEEE80211_BAND_2GHZ) + end_channel = triplet->chans.first_channel + + triplet->chans.num_channels -1; + else + end_channel = triplet->chans.first_channel + + (4 * (triplet->chans.num_channels - 1)); + + end_channel = max_subband_chan(band, + triplet->chans.first_channel, + end_channel, + triplet->chans.max_power, + &country_ie, + &country_ie_len); + + /* + * The +10 is since the regulatory domain expects + * the actual band edge, not the center of freq for + * its start and end freqs, assuming 20 MHz bandwidth on + * the channels passed + */ + freq_range->start_freq_khz = + MHZ_TO_KHZ(ieee80211_channel_to_frequency( + triplet->chans.first_channel) - 10); + freq_range->end_freq_khz = + MHZ_TO_KHZ(ieee80211_channel_to_frequency( + end_channel) + 10); + + /* + * These are large arbitrary values we use to intersect later. + * Increment this if we ever support >= 40 MHz channels + * in IEEE 802.11 + */ + freq_range->max_bandwidth_khz = MHZ_TO_KHZ(40); + power_rule->max_antenna_gain = DBI_TO_MBI(100); + power_rule->max_eirp = DBM_TO_MBM(triplet->chans.max_power); + + i++; + + if (country_ie_len >= 3) { + country_ie += 3; + country_ie_len -= 3; + } + + BUG_ON(i > NL80211_MAX_SUPP_REG_RULES); + } + + return rd; +} + + +/* + * Helper for regdom_intersect(), this does the real + * mathematical intersection fun + */ +static int reg_rules_intersect( + const struct ieee80211_reg_rule *rule1, + const struct ieee80211_reg_rule *rule2, + struct ieee80211_reg_rule *intersected_rule) +{ + const struct ieee80211_freq_range *freq_range1, *freq_range2; + struct ieee80211_freq_range *freq_range; + const struct ieee80211_power_rule *power_rule1, *power_rule2; + struct ieee80211_power_rule *power_rule; + u32 freq_diff; + + freq_range1 = &rule1->freq_range; + freq_range2 = &rule2->freq_range; + freq_range = &intersected_rule->freq_range; + + power_rule1 = &rule1->power_rule; + power_rule2 = &rule2->power_rule; + power_rule = &intersected_rule->power_rule; + + freq_range->start_freq_khz = max(freq_range1->start_freq_khz, + freq_range2->start_freq_khz); + freq_range->end_freq_khz = min(freq_range1->end_freq_khz, + freq_range2->end_freq_khz); + freq_range->max_bandwidth_khz = min(freq_range1->max_bandwidth_khz, + freq_range2->max_bandwidth_khz); + + freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz; + if (freq_range->max_bandwidth_khz > freq_diff) + freq_range->max_bandwidth_khz = freq_diff; + + power_rule->max_eirp = min(power_rule1->max_eirp, + power_rule2->max_eirp); + power_rule->max_antenna_gain = min(power_rule1->max_antenna_gain, + power_rule2->max_antenna_gain); + + intersected_rule->flags = (rule1->flags | rule2->flags); + + if (!is_valid_reg_rule(intersected_rule)) + return -EINVAL; + + return 0; +} + +/** + * regdom_intersect - do the intersection between two regulatory domains + * @rd1: first regulatory domain + * @rd2: second regulatory domain + * + * Use this function to get the intersection between two regulatory domains. + * Once completed we will mark the alpha2 for the rd as intersected, "98", + * as no one single alpha2 can represent this regulatory domain. + * + * Returns a pointer to the regulatory domain structure which will hold the + * resulting intersection of rules between rd1 and rd2. We will + * kzalloc() this structure for you. + */ +static struct ieee80211_regdomain *regdom_intersect( + const struct ieee80211_regdomain *rd1, + const struct ieee80211_regdomain *rd2) +{ + int r, size_of_regd; + unsigned int x, y; + unsigned int num_rules = 0, rule_idx = 0; + const struct ieee80211_reg_rule *rule1, *rule2; + struct ieee80211_reg_rule *intersected_rule; + struct ieee80211_regdomain *rd; + /* This is just a dummy holder to help us count */ + struct ieee80211_reg_rule irule; + + /* Uses the stack temporarily for counter arithmetic */ + intersected_rule = &irule; + + memset(intersected_rule, 0, sizeof(struct ieee80211_reg_rule)); + + if (!rd1 || !rd2) + return NULL; + + /* + * First we get a count of the rules we'll need, then we actually + * build them. This is to so we can malloc() and free() a + * regdomain once. The reason we use reg_rules_intersect() here + * is it will return -EINVAL if the rule computed makes no sense. + * All rules that do check out OK are valid. + */ + + for (x = 0; x < rd1->n_reg_rules; x++) { + rule1 = &rd1->reg_rules[x]; + for (y = 0; y < rd2->n_reg_rules; y++) { + rule2 = &rd2->reg_rules[y]; + if (!reg_rules_intersect(rule1, rule2, + intersected_rule)) + num_rules++; + memset(intersected_rule, 0, + sizeof(struct ieee80211_reg_rule)); + } + } + + if (!num_rules) + return NULL; + + size_of_regd = sizeof(struct ieee80211_regdomain) + + ((num_rules + 1) * sizeof(struct ieee80211_reg_rule)); + + rd = kzalloc(size_of_regd, GFP_KERNEL); + if (!rd) + return NULL; + + for (x = 0; x < rd1->n_reg_rules; x++) { + rule1 = &rd1->reg_rules[x]; + for (y = 0; y < rd2->n_reg_rules; y++) { + rule2 = &rd2->reg_rules[y]; + /* + * This time around instead of using the stack lets + * write to the target rule directly saving ourselves + * a memcpy() + */ + intersected_rule = &rd->reg_rules[rule_idx]; + r = reg_rules_intersect(rule1, rule2, + intersected_rule); + /* + * No need to memset here the intersected rule here as + * we're not using the stack anymore + */ + if (r) + continue; + rule_idx++; + } + } + + if (rule_idx != num_rules) { + kfree(rd); + return NULL; + } + + rd->n_reg_rules = num_rules; + rd->alpha2[0] = '9'; + rd->alpha2[1] = '8'; + + return rd; +} + +/* + * XXX: add support for the rest of enum nl80211_reg_rule_flags, we may + * want to just have the channel structure use these + */ +static u32 map_regdom_flags(u32 rd_flags) +{ + u32 channel_flags = 0; + if (rd_flags & NL80211_RRF_PASSIVE_SCAN) + channel_flags |= IEEE80211_CHAN_PASSIVE_SCAN; + if (rd_flags & NL80211_RRF_NO_IBSS) + channel_flags |= IEEE80211_CHAN_NO_IBSS; + if (rd_flags & NL80211_RRF_DFS) + channel_flags |= IEEE80211_CHAN_RADAR; + return channel_flags; +} + +static int freq_reg_info_regd(struct wiphy *wiphy, + u32 center_freq, + u32 desired_bw_khz, + const struct ieee80211_reg_rule **reg_rule, + const struct ieee80211_regdomain *custom_regd) +{ + int i; + bool band_rule_found = false; + const struct ieee80211_regdomain *regd; + bool bw_fits = false; + + if (!desired_bw_khz) + desired_bw_khz = MHZ_TO_KHZ(20); + + regd = custom_regd ? custom_regd : cfg80211_regdomain; + + /* + * Follow the driver's regulatory domain, if present, unless a country + * IE has been processed or a user wants to help complaince further + */ + if (last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE && + last_request->initiator != NL80211_REGDOM_SET_BY_USER && + wiphy->regd) + regd = wiphy->regd; + + if (!regd) + return -EINVAL; + + for (i = 0; i < regd->n_reg_rules; i++) { + const struct ieee80211_reg_rule *rr; + const struct ieee80211_freq_range *fr = NULL; + const struct ieee80211_power_rule *pr = NULL; + + rr = ®d->reg_rules[i]; + fr = &rr->freq_range; + pr = &rr->power_rule; + + /* + * We only need to know if one frequency rule was + * was in center_freq's band, that's enough, so lets + * not overwrite it once found + */ + if (!band_rule_found) + band_rule_found = freq_in_rule_band(fr, center_freq); + + bw_fits = reg_does_bw_fit(fr, + center_freq, + desired_bw_khz); + + if (band_rule_found && bw_fits) { + *reg_rule = rr; + return 0; + } + } + + if (!band_rule_found) + return -ERANGE; + + return -EINVAL; +} +EXPORT_SYMBOL(freq_reg_info); + +int freq_reg_info(struct wiphy *wiphy, + u32 center_freq, + u32 desired_bw_khz, + const struct ieee80211_reg_rule **reg_rule) +{ + assert_cfg80211_lock(); + return freq_reg_info_regd(wiphy, + center_freq, + desired_bw_khz, + reg_rule, + NULL); +} + +/* + * Note that right now we assume the desired channel bandwidth + * is always 20 MHz for each individual channel (HT40 uses 20 MHz + * per channel, the primary and the extension channel). To support + * smaller custom bandwidths such as 5 MHz or 10 MHz we'll need a + * new ieee80211_channel.target_bw and re run the regulatory check + * on the wiphy with the target_bw specified. Then we can simply use + * that below for the desired_bw_khz below. + */ +static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band, + unsigned int chan_idx) +{ + int r; + u32 flags, bw_flags = 0; + u32 desired_bw_khz = MHZ_TO_KHZ(20); + const struct ieee80211_reg_rule *reg_rule = NULL; + const struct ieee80211_power_rule *power_rule = NULL; + const struct ieee80211_freq_range *freq_range = NULL; + struct ieee80211_supported_band *sband; + struct ieee80211_channel *chan; + struct wiphy *request_wiphy = NULL; + + assert_cfg80211_lock(); + + request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx); + + sband = wiphy->bands[band]; + BUG_ON(chan_idx >= sband->n_channels); + chan = &sband->channels[chan_idx]; + + flags = chan->orig_flags; + + r = freq_reg_info(wiphy, + MHZ_TO_KHZ(chan->center_freq), + desired_bw_khz, + ®_rule); + + if (r) { + /* + * This means no regulatory rule was found in the country IE + * with a frequency range on the center_freq's band, since + * IEEE-802.11 allows for a country IE to have a subset of the + * regulatory information provided in a country we ignore + * disabling the channel unless at least one reg rule was + * found on the center_freq's band. For details see this + * clarification: + * + * http://tinyurl.com/11d-clarification + */ + if (r == -ERANGE && + last_request->initiator == + NL80211_REGDOM_SET_BY_COUNTRY_IE) { + REG_DBG_PRINT("cfg80211: Leaving channel %d MHz " + "intact on %s - no rule found in band on " + "Country IE\n", + chan->center_freq, wiphy_name(wiphy)); + } else { + /* + * In this case we know the country IE has at least one reg rule + * for the band so we respect its band definitions + */ + if (last_request->initiator == + NL80211_REGDOM_SET_BY_COUNTRY_IE) + REG_DBG_PRINT("cfg80211: Disabling " + "channel %d MHz on %s due to " + "Country IE\n", + chan->center_freq, wiphy_name(wiphy)); + flags |= IEEE80211_CHAN_DISABLED; + chan->flags = flags; + } + return; + } + + power_rule = ®_rule->power_rule; + freq_range = ®_rule->freq_range; + + if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(40)) + bw_flags = IEEE80211_CHAN_NO_HT40; + + if (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER && + request_wiphy && request_wiphy == wiphy && + request_wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) { + /* + * This gaurantees the driver's requested regulatory domain + * will always be used as a base for further regulatory + * settings + */ + chan->flags = chan->orig_flags = + map_regdom_flags(reg_rule->flags) | bw_flags; + chan->max_antenna_gain = chan->orig_mag = + (int) MBI_TO_DBI(power_rule->max_antenna_gain); + chan->max_power = chan->orig_mpwr = + (int) MBM_TO_DBM(power_rule->max_eirp); + return; + } + + chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags); + chan->max_antenna_gain = min(chan->orig_mag, + (int) MBI_TO_DBI(power_rule->max_antenna_gain)); + if (chan->orig_mpwr) + chan->max_power = min(chan->orig_mpwr, + (int) MBM_TO_DBM(power_rule->max_eirp)); + else + chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp); +} + +static void handle_band(struct wiphy *wiphy, enum ieee80211_band band) +{ + unsigned int i; + struct ieee80211_supported_band *sband; + + BUG_ON(!wiphy->bands[band]); + sband = wiphy->bands[band]; + + for (i = 0; i < sband->n_channels; i++) + handle_channel(wiphy, band, i); +} + +static bool ignore_reg_update(struct wiphy *wiphy, + enum nl80211_reg_initiator initiator) +{ + if (!last_request) + return true; + if (initiator == NL80211_REGDOM_SET_BY_CORE && + wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY) + return true; + /* + * wiphy->regd will be set once the device has its own + * desired regulatory domain set + */ + if (wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY && !wiphy->regd && + !is_world_regdom(last_request->alpha2)) + return true; + return false; +} + +static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator) +{ + struct cfg80211_registered_device *rdev; + + list_for_each_entry(rdev, &cfg80211_rdev_list, list) + wiphy_update_regulatory(&rdev->wiphy, initiator); +} + +static void handle_reg_beacon(struct wiphy *wiphy, + unsigned int chan_idx, + struct reg_beacon *reg_beacon) +{ + struct ieee80211_supported_band *sband; + struct ieee80211_channel *chan; + bool channel_changed = false; + struct ieee80211_channel chan_before; + + assert_cfg80211_lock(); + + sband = wiphy->bands[reg_beacon->chan.band]; + chan = &sband->channels[chan_idx]; + + if (likely(chan->center_freq != reg_beacon->chan.center_freq)) + return; + + if (chan->beacon_found) + return; + + chan->beacon_found = true; + + if (wiphy->flags & WIPHY_FLAG_DISABLE_BEACON_HINTS) + return; + + chan_before.center_freq = chan->center_freq; + chan_before.flags = chan->flags; + + if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) { + chan->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; + channel_changed = true; + } + + if (chan->flags & IEEE80211_CHAN_NO_IBSS) { + chan->flags &= ~IEEE80211_CHAN_NO_IBSS; + channel_changed = true; + } + + if (channel_changed) + nl80211_send_beacon_hint_event(wiphy, &chan_before, chan); +} + +/* + * Called when a scan on a wiphy finds a beacon on + * new channel + */ +static void wiphy_update_new_beacon(struct wiphy *wiphy, + struct reg_beacon *reg_beacon) +{ + unsigned int i; + struct ieee80211_supported_band *sband; + + assert_cfg80211_lock(); + + if (!wiphy->bands[reg_beacon->chan.band]) + return; + + sband = wiphy->bands[reg_beacon->chan.band]; + + for (i = 0; i < sband->n_channels; i++) + handle_reg_beacon(wiphy, i, reg_beacon); +} + +/* + * Called upon reg changes or a new wiphy is added + */ +static void wiphy_update_beacon_reg(struct wiphy *wiphy) +{ + unsigned int i; + struct ieee80211_supported_band *sband; + struct reg_beacon *reg_beacon; + + assert_cfg80211_lock(); + + if (list_empty(®_beacon_list)) + return; + + list_for_each_entry(reg_beacon, ®_beacon_list, list) { + if (!wiphy->bands[reg_beacon->chan.band]) + continue; + sband = wiphy->bands[reg_beacon->chan.band]; + for (i = 0; i < sband->n_channels; i++) + handle_reg_beacon(wiphy, i, reg_beacon); + } +} + +static bool reg_is_world_roaming(struct wiphy *wiphy) +{ + if (is_world_regdom(cfg80211_regdomain->alpha2) || + (wiphy->regd && is_world_regdom(wiphy->regd->alpha2))) + return true; + if (last_request && + last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE && + wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY) + return true; + return false; +} + +/* Reap the advantages of previously found beacons */ +static void reg_process_beacons(struct wiphy *wiphy) +{ + /* + * Means we are just firing up cfg80211, so no beacons would + * have been processed yet. + */ + if (!last_request) + return; + if (!reg_is_world_roaming(wiphy)) + return; + wiphy_update_beacon_reg(wiphy); +} + +static bool is_ht40_not_allowed(struct ieee80211_channel *chan) +{ + if (!chan) + return true; + if (chan->flags & IEEE80211_CHAN_DISABLED) + return true; + /* This would happen when regulatory rules disallow HT40 completely */ + if (IEEE80211_CHAN_NO_HT40 == (chan->flags & (IEEE80211_CHAN_NO_HT40))) + return true; + return false; +} + +static void reg_process_ht_flags_channel(struct wiphy *wiphy, + enum ieee80211_band band, + unsigned int chan_idx) +{ + struct ieee80211_supported_band *sband; + struct ieee80211_channel *channel; + struct ieee80211_channel *channel_before = NULL, *channel_after = NULL; + unsigned int i; + + assert_cfg80211_lock(); + + sband = wiphy->bands[band]; + BUG_ON(chan_idx >= sband->n_channels); + channel = &sband->channels[chan_idx]; + + if (is_ht40_not_allowed(channel)) { + channel->flags |= IEEE80211_CHAN_NO_HT40; + return; + } + + /* + * We need to ensure the extension channels exist to + * be able to use HT40- or HT40+, this finds them (or not) + */ + for (i = 0; i < sband->n_channels; i++) { + struct ieee80211_channel *c = &sband->channels[i]; + if (c->center_freq == (channel->center_freq - 20)) + channel_before = c; + if (c->center_freq == (channel->center_freq + 20)) + channel_after = c; + } + + /* + * Please note that this assumes target bandwidth is 20 MHz, + * if that ever changes we also need to change the below logic + * to include that as well. + */ + if (is_ht40_not_allowed(channel_before)) + channel->flags |= IEEE80211_CHAN_NO_HT40MINUS; + else + channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS; + + if (is_ht40_not_allowed(channel_after)) + channel->flags |= IEEE80211_CHAN_NO_HT40PLUS; + else + channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS; +} + +static void reg_process_ht_flags_band(struct wiphy *wiphy, + enum ieee80211_band band) +{ + unsigned int i; + struct ieee80211_supported_band *sband; + + BUG_ON(!wiphy->bands[band]); + sband = wiphy->bands[band]; + + for (i = 0; i < sband->n_channels; i++) + reg_process_ht_flags_channel(wiphy, band, i); +} + +static void reg_process_ht_flags(struct wiphy *wiphy) +{ + enum ieee80211_band band; + + if (!wiphy) + return; + + for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + if (wiphy->bands[band]) + reg_process_ht_flags_band(wiphy, band); + } + +} + +void wiphy_update_regulatory(struct wiphy *wiphy, + enum nl80211_reg_initiator initiator) +{ + enum ieee80211_band band; + + if (ignore_reg_update(wiphy, initiator)) + goto out; + for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + if (wiphy->bands[band]) + handle_band(wiphy, band); + } +out: + reg_process_beacons(wiphy); + reg_process_ht_flags(wiphy); + if (wiphy->reg_notifier) + wiphy->reg_notifier(wiphy, last_request); +} + +static void handle_channel_custom(struct wiphy *wiphy, + enum ieee80211_band band, + unsigned int chan_idx, + const struct ieee80211_regdomain *regd) +{ + int r; + u32 desired_bw_khz = MHZ_TO_KHZ(20); + u32 bw_flags = 0; + const struct ieee80211_reg_rule *reg_rule = NULL; + const struct ieee80211_power_rule *power_rule = NULL; + const struct ieee80211_freq_range *freq_range = NULL; + struct ieee80211_supported_band *sband; + struct ieee80211_channel *chan; + + assert_reg_lock(); + + sband = wiphy->bands[band]; + BUG_ON(chan_idx >= sband->n_channels); + chan = &sband->channels[chan_idx]; + + r = freq_reg_info_regd(wiphy, + MHZ_TO_KHZ(chan->center_freq), + desired_bw_khz, + ®_rule, + regd); + + if (r) { + chan->flags = IEEE80211_CHAN_DISABLED; + return; + } + + power_rule = ®_rule->power_rule; + freq_range = ®_rule->freq_range; + + if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(40)) + bw_flags = IEEE80211_CHAN_NO_HT40; + + chan->flags |= map_regdom_flags(reg_rule->flags) | bw_flags; + chan->max_antenna_gain = (int) MBI_TO_DBI(power_rule->max_antenna_gain); + chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp); +} + +static void handle_band_custom(struct wiphy *wiphy, enum ieee80211_band band, + const struct ieee80211_regdomain *regd) +{ + unsigned int i; + struct ieee80211_supported_band *sband; + + BUG_ON(!wiphy->bands[band]); + sband = wiphy->bands[band]; + + for (i = 0; i < sband->n_channels; i++) + handle_channel_custom(wiphy, band, i, regd); +} + +/* Used by drivers prior to wiphy registration */ +void wiphy_apply_custom_regulatory(struct wiphy *wiphy, + const struct ieee80211_regdomain *regd) +{ + enum ieee80211_band band; + unsigned int bands_set = 0; + + mutex_lock(®_mutex); + for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + if (!wiphy->bands[band]) + continue; + handle_band_custom(wiphy, band, regd); + bands_set++; + } + mutex_unlock(®_mutex); + + /* + * no point in calling this if it won't have any effect + * on your device's supportd bands. + */ + WARN_ON(!bands_set); +} +EXPORT_SYMBOL(wiphy_apply_custom_regulatory); + +/* + * Return value which can be used by ignore_request() to indicate + * it has been determined we should intersect two regulatory domains + */ +#define REG_INTERSECT 1 + +/* This has the logic which determines when a new request + * should be ignored. */ +static int ignore_request(struct wiphy *wiphy, + struct regulatory_request *pending_request) +{ + struct wiphy *last_wiphy = NULL; + + assert_cfg80211_lock(); + + /* All initial requests are respected */ + if (!last_request) + return 0; + + switch (pending_request->initiator) { + case NL80211_REGDOM_SET_BY_CORE: + return 0; + case NL80211_REGDOM_SET_BY_COUNTRY_IE: + + last_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx); + + if (unlikely(!is_an_alpha2(pending_request->alpha2))) + return -EINVAL; + if (last_request->initiator == + NL80211_REGDOM_SET_BY_COUNTRY_IE) { + if (last_wiphy != wiphy) { + /* + * Two cards with two APs claiming different + * Country IE alpha2s. We could + * intersect them, but that seems unlikely + * to be correct. Reject second one for now. + */ + if (regdom_changes(pending_request->alpha2)) + return -EOPNOTSUPP; + return -EALREADY; + } + /* + * Two consecutive Country IE hints on the same wiphy. + * This should be picked up early by the driver/stack + */ + if (WARN_ON(regdom_changes(pending_request->alpha2))) + return 0; + return -EALREADY; + } + return REG_INTERSECT; + case NL80211_REGDOM_SET_BY_DRIVER: + if (last_request->initiator == NL80211_REGDOM_SET_BY_CORE) { + if (regdom_changes(pending_request->alpha2)) + return 0; + return -EALREADY; + } + + /* + * This would happen if you unplug and plug your card + * back in or if you add a new device for which the previously + * loaded card also agrees on the regulatory domain. + */ + if (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER && + !regdom_changes(pending_request->alpha2)) + return -EALREADY; + + return REG_INTERSECT; + case NL80211_REGDOM_SET_BY_USER: + if (last_request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) + return REG_INTERSECT; + /* + * If the user knows better the user should set the regdom + * to their country before the IE is picked up + */ + if (last_request->initiator == NL80211_REGDOM_SET_BY_USER && + last_request->intersect) + return -EOPNOTSUPP; + /* + * Process user requests only after previous user/driver/core + * requests have been processed + */ + if (last_request->initiator == NL80211_REGDOM_SET_BY_CORE || + last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER || + last_request->initiator == NL80211_REGDOM_SET_BY_USER) { + if (regdom_changes(last_request->alpha2)) + return -EAGAIN; + } + + if (!regdom_changes(pending_request->alpha2)) + return -EALREADY; + + return 0; + } + + return -EINVAL; +} + +/** + * __regulatory_hint - hint to the wireless core a regulatory domain + * @wiphy: if the hint comes from country information from an AP, this + * is required to be set to the wiphy that received the information + * @pending_request: the regulatory request currently being processed + * + * The Wireless subsystem can use this function to hint to the wireless core + * what it believes should be the current regulatory domain. + * + * Returns zero if all went fine, %-EALREADY if a regulatory domain had + * already been set or other standard error codes. + * + * Caller must hold &cfg80211_mutex and ®_mutex + */ +static int __regulatory_hint(struct wiphy *wiphy, + struct regulatory_request *pending_request) +{ + bool intersect = false; + int r = 0; + + assert_cfg80211_lock(); + + r = ignore_request(wiphy, pending_request); + + if (r == REG_INTERSECT) { + if (pending_request->initiator == + NL80211_REGDOM_SET_BY_DRIVER) { + r = reg_copy_regd(&wiphy->regd, cfg80211_regdomain); + if (r) { + kfree(pending_request); + return r; + } + } + intersect = true; + } else if (r) { + /* + * If the regulatory domain being requested by the + * driver has already been set just copy it to the + * wiphy + */ + if (r == -EALREADY && + pending_request->initiator == + NL80211_REGDOM_SET_BY_DRIVER) { + r = reg_copy_regd(&wiphy->regd, cfg80211_regdomain); + if (r) { + kfree(pending_request); + return r; + } + r = -EALREADY; + goto new_request; + } + kfree(pending_request); + return r; + } + +new_request: + kfree(last_request); + + last_request = pending_request; + last_request->intersect = intersect; + + pending_request = NULL; + + if (last_request->initiator == NL80211_REGDOM_SET_BY_USER) { + user_alpha2[0] = last_request->alpha2[0]; + user_alpha2[1] = last_request->alpha2[1]; + } + + /* When r == REG_INTERSECT we do need to call CRDA */ + if (r < 0) { + /* + * Since CRDA will not be called in this case as we already + * have applied the requested regulatory domain before we just + * inform userspace we have processed the request + */ + if (r == -EALREADY) + nl80211_send_reg_change_event(last_request); + return r; + } + + return call_crda(last_request->alpha2); +} + +/* This processes *all* regulatory hints */ +static void reg_process_hint(struct regulatory_request *reg_request) +{ + int r = 0; + struct wiphy *wiphy = NULL; + + BUG_ON(!reg_request->alpha2); + + mutex_lock(&cfg80211_mutex); + mutex_lock(®_mutex); + + if (wiphy_idx_valid(reg_request->wiphy_idx)) + wiphy = wiphy_idx_to_wiphy(reg_request->wiphy_idx); + + if (reg_request->initiator == NL80211_REGDOM_SET_BY_DRIVER && + !wiphy) { + kfree(reg_request); + goto out; + } + + r = __regulatory_hint(wiphy, reg_request); + /* This is required so that the orig_* parameters are saved */ + if (r == -EALREADY && wiphy && + wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) + wiphy_update_regulatory(wiphy, reg_request->initiator); +out: + mutex_unlock(®_mutex); + mutex_unlock(&cfg80211_mutex); +} + +/* Processes regulatory hints, this is all the NL80211_REGDOM_SET_BY_* */ +static void reg_process_pending_hints(void) + { + struct regulatory_request *reg_request; + + spin_lock(®_requests_lock); + while (!list_empty(®_requests_list)) { + reg_request = list_first_entry(®_requests_list, + struct regulatory_request, + list); + list_del_init(®_request->list); + + spin_unlock(®_requests_lock); + reg_process_hint(reg_request); + spin_lock(®_requests_lock); + } + spin_unlock(®_requests_lock); +} + +/* Processes beacon hints -- this has nothing to do with country IEs */ +static void reg_process_pending_beacon_hints(void) +{ + struct cfg80211_registered_device *rdev; + struct reg_beacon *pending_beacon, *tmp; + + /* + * No need to hold the reg_mutex here as we just touch wiphys + * and do not read or access regulatory variables. + */ + mutex_lock(&cfg80211_mutex); + + /* This goes through the _pending_ beacon list */ + spin_lock_bh(®_pending_beacons_lock); + + if (list_empty(®_pending_beacons)) { + spin_unlock_bh(®_pending_beacons_lock); + goto out; + } + + list_for_each_entry_safe(pending_beacon, tmp, + ®_pending_beacons, list) { + + list_del_init(&pending_beacon->list); + + /* Applies the beacon hint to current wiphys */ + list_for_each_entry(rdev, &cfg80211_rdev_list, list) + wiphy_update_new_beacon(&rdev->wiphy, pending_beacon); + + /* Remembers the beacon hint for new wiphys or reg changes */ + list_add_tail(&pending_beacon->list, ®_beacon_list); + } + + spin_unlock_bh(®_pending_beacons_lock); +out: + mutex_unlock(&cfg80211_mutex); +} + +static void reg_todo(struct work_struct *work) +{ + reg_process_pending_hints(); + reg_process_pending_beacon_hints(); +} + +static DECLARE_WORK(reg_work, reg_todo); + +static void queue_regulatory_request(struct regulatory_request *request) +{ + spin_lock(®_requests_lock); + list_add_tail(&request->list, ®_requests_list); + spin_unlock(®_requests_lock); + + schedule_work(®_work); +} + +/* + * Core regulatory hint -- happens during cfg80211_init() + * and when we restore regulatory settings. + */ +static int regulatory_hint_core(const char *alpha2) +{ + struct regulatory_request *request; + + kfree(last_request); + last_request = NULL; + + request = kzalloc(sizeof(struct regulatory_request), + GFP_KERNEL); + if (!request) + return -ENOMEM; + + request->alpha2[0] = alpha2[0]; + request->alpha2[1] = alpha2[1]; + request->initiator = NL80211_REGDOM_SET_BY_CORE; + + /* + * This ensures last_request is populated once modules + * come swinging in and calling regulatory hints and + * wiphy_apply_custom_regulatory(). + */ + reg_process_hint(request); + + return 0; +} + +/* User hints */ +int regulatory_hint_user(const char *alpha2) +{ + struct regulatory_request *request; + + BUG_ON(!alpha2); + + request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL); + if (!request) + return -ENOMEM; + + request->wiphy_idx = WIPHY_IDX_STALE; + request->alpha2[0] = alpha2[0]; + request->alpha2[1] = alpha2[1]; + request->initiator = NL80211_REGDOM_SET_BY_USER; + + queue_regulatory_request(request); + + return 0; +} + +/* Driver hints */ +int regulatory_hint(struct wiphy *wiphy, const char *alpha2) +{ + struct regulatory_request *request; + + BUG_ON(!alpha2); + BUG_ON(!wiphy); + + request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL); + if (!request) + return -ENOMEM; + + request->wiphy_idx = get_wiphy_idx(wiphy); + + /* Must have registered wiphy first */ + BUG_ON(!wiphy_idx_valid(request->wiphy_idx)); + + request->alpha2[0] = alpha2[0]; + request->alpha2[1] = alpha2[1]; + request->initiator = NL80211_REGDOM_SET_BY_DRIVER; + + queue_regulatory_request(request); + + return 0; +} +EXPORT_SYMBOL(regulatory_hint); + +/* Caller must hold reg_mutex */ +static bool reg_same_country_ie_hint(struct wiphy *wiphy, + u32 country_ie_checksum) +{ + struct wiphy *request_wiphy; + + assert_reg_lock(); + + if (unlikely(last_request->initiator != + NL80211_REGDOM_SET_BY_COUNTRY_IE)) + return false; + + request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx); + + if (!request_wiphy) + return false; + + if (likely(request_wiphy != wiphy)) + return !country_ie_integrity_changes(country_ie_checksum); + /* + * We should not have let these through at this point, they + * should have been picked up earlier by the first alpha2 check + * on the device + */ + if (WARN_ON(!country_ie_integrity_changes(country_ie_checksum))) + return true; + return false; +} + +/* + * We hold wdev_lock() here so we cannot hold cfg80211_mutex() and + * therefore cannot iterate over the rdev list here. + */ +void regulatory_hint_11d(struct wiphy *wiphy, + enum ieee80211_band band, + u8 *country_ie, + u8 country_ie_len) +{ + struct ieee80211_regdomain *rd = NULL; + char alpha2[2]; + u32 checksum = 0; + enum environment_cap env = ENVIRON_ANY; + struct regulatory_request *request; + + mutex_lock(®_mutex); + + if (unlikely(!last_request)) + goto out; + + /* IE len must be evenly divisible by 2 */ + if (country_ie_len & 0x01) + goto out; + + if (country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN) + goto out; + + /* + * Pending country IE processing, this can happen after we + * call CRDA and wait for a response if a beacon was received before + * we were able to process the last regulatory_hint_11d() call + */ + if (country_ie_regdomain) + goto out; + + alpha2[0] = country_ie[0]; + alpha2[1] = country_ie[1]; + + if (country_ie[2] == 'I') + env = ENVIRON_INDOOR; + else if (country_ie[2] == 'O') + env = ENVIRON_OUTDOOR; + + /* + * We will run this only upon a successful connection on cfg80211. + * We leave conflict resolution to the workqueue, where can hold + * cfg80211_mutex. + */ + if (likely(last_request->initiator == + NL80211_REGDOM_SET_BY_COUNTRY_IE && + wiphy_idx_valid(last_request->wiphy_idx))) + goto out; + + rd = country_ie_2_rd(band, country_ie, country_ie_len, &checksum); + if (!rd) { + REG_DBG_PRINT("cfg80211: Ignoring bogus country IE\n"); + goto out; + } + + /* + * This will not happen right now but we leave it here for the + * the future when we want to add suspend/resume support and having + * the user move to another country after doing so, or having the user + * move to another AP. Right now we just trust the first AP. + * + * If we hit this before we add this support we want to be informed of + * it as it would indicate a mistake in the current design + */ + if (WARN_ON(reg_same_country_ie_hint(wiphy, checksum))) + goto free_rd_out; + + request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL); + if (!request) + goto free_rd_out; + + /* + * We keep this around for when CRDA comes back with a response so + * we can intersect with that + */ + country_ie_regdomain = rd; + + request->wiphy_idx = get_wiphy_idx(wiphy); + request->alpha2[0] = rd->alpha2[0]; + request->alpha2[1] = rd->alpha2[1]; + request->initiator = NL80211_REGDOM_SET_BY_COUNTRY_IE; + request->country_ie_checksum = checksum; + request->country_ie_env = env; + + mutex_unlock(®_mutex); + + queue_regulatory_request(request); + + return; + +free_rd_out: + kfree(rd); +out: + mutex_unlock(®_mutex); +} + +static void restore_alpha2(char *alpha2, bool reset_user) +{ + /* indicates there is no alpha2 to consider for restoration */ + alpha2[0] = '9'; + alpha2[1] = '7'; + + /* The user setting has precedence over the module parameter */ + if (is_user_regdom_saved()) { + /* Unless we're asked to ignore it and reset it */ + if (reset_user) { + REG_DBG_PRINT("cfg80211: Restoring regulatory settings " + "including user preference\n"); + user_alpha2[0] = '9'; + user_alpha2[1] = '7'; + + /* + * If we're ignoring user settings, we still need to + * check the module parameter to ensure we put things + * back as they were for a full restore. + */ + if (!is_world_regdom(ieee80211_regdom)) { + REG_DBG_PRINT("cfg80211: Keeping preference on " + "module parameter ieee80211_regdom: %c%c\n", + ieee80211_regdom[0], + ieee80211_regdom[1]); + alpha2[0] = ieee80211_regdom[0]; + alpha2[1] = ieee80211_regdom[1]; + } + } else { + REG_DBG_PRINT("cfg80211: Restoring regulatory settings " + "while preserving user preference for: %c%c\n", + user_alpha2[0], + user_alpha2[1]); + alpha2[0] = user_alpha2[0]; + alpha2[1] = user_alpha2[1]; + } + } else if (!is_world_regdom(ieee80211_regdom)) { + REG_DBG_PRINT("cfg80211: Keeping preference on " + "module parameter ieee80211_regdom: %c%c\n", + ieee80211_regdom[0], + ieee80211_regdom[1]); + alpha2[0] = ieee80211_regdom[0]; + alpha2[1] = ieee80211_regdom[1]; + } else + REG_DBG_PRINT("cfg80211: Restoring regulatory settings\n"); +} + +/* + * Restoring regulatory settings involves ingoring any + * possibly stale country IE information and user regulatory + * settings if so desired, this includes any beacon hints + * learned as we could have traveled outside to another country + * after disconnection. To restore regulatory settings we do + * exactly what we did at bootup: + * + * - send a core regulatory hint + * - send a user regulatory hint if applicable + * + * Device drivers that send a regulatory hint for a specific country + * keep their own regulatory domain on wiphy->regd so that does does + * not need to be remembered. + */ +static void restore_regulatory_settings(bool reset_user) +{ + char alpha2[2]; + struct reg_beacon *reg_beacon, *btmp; + + mutex_lock(&cfg80211_mutex); + mutex_lock(®_mutex); + + reset_regdomains(); + restore_alpha2(alpha2, reset_user); + + /* Clear beacon hints */ + spin_lock_bh(®_pending_beacons_lock); + if (!list_empty(®_pending_beacons)) { + list_for_each_entry_safe(reg_beacon, btmp, + ®_pending_beacons, list) { + list_del(®_beacon->list); + kfree(reg_beacon); + } + } + spin_unlock_bh(®_pending_beacons_lock); + + if (!list_empty(®_beacon_list)) { + list_for_each_entry_safe(reg_beacon, btmp, + ®_beacon_list, list) { + list_del(®_beacon->list); + kfree(reg_beacon); + } + } + + /* First restore to the basic regulatory settings */ + cfg80211_regdomain = cfg80211_world_regdom; + + mutex_unlock(®_mutex); + mutex_unlock(&cfg80211_mutex); + + regulatory_hint_core(cfg80211_regdomain->alpha2); + + /* + * This restores the ieee80211_regdom module parameter + * preference or the last user requested regulatory + * settings, user regulatory settings takes precedence. + */ + if (is_an_alpha2(alpha2)) + regulatory_hint_user(user_alpha2); +} + + +void regulatory_hint_disconnect(void) +{ + REG_DBG_PRINT("cfg80211: All devices are disconnected, going to " + "restore regulatory settings\n"); + restore_regulatory_settings(false); +} + +static bool freq_is_chan_12_13_14(u16 freq) +{ + if (freq == ieee80211_channel_to_frequency(12) || + freq == ieee80211_channel_to_frequency(13) || + freq == ieee80211_channel_to_frequency(14)) + return true; + return false; +} + +int regulatory_hint_found_beacon(struct wiphy *wiphy, + struct ieee80211_channel *beacon_chan, + gfp_t gfp) +{ + struct reg_beacon *reg_beacon; + + if (likely((beacon_chan->beacon_found || + (beacon_chan->flags & IEEE80211_CHAN_RADAR) || + (beacon_chan->band == IEEE80211_BAND_2GHZ && + !freq_is_chan_12_13_14(beacon_chan->center_freq))))) + return 0; + + reg_beacon = kzalloc(sizeof(struct reg_beacon), gfp); + if (!reg_beacon) + return -ENOMEM; + + REG_DBG_PRINT("cfg80211: Found new beacon on " + "frequency: %d MHz (Ch %d) on %s\n", + beacon_chan->center_freq, + ieee80211_frequency_to_channel(beacon_chan->center_freq), + wiphy_name(wiphy)); + + memcpy(®_beacon->chan, beacon_chan, + sizeof(struct ieee80211_channel)); + + + /* + * Since we can be called from BH or and non-BH context + * we must use spin_lock_bh() + */ + spin_lock_bh(®_pending_beacons_lock); + list_add_tail(®_beacon->list, ®_pending_beacons); + spin_unlock_bh(®_pending_beacons_lock); + + schedule_work(®_work); + + return 0; +} + +static void print_rd_rules(const struct ieee80211_regdomain *rd) +{ + unsigned int i; + const struct ieee80211_reg_rule *reg_rule = NULL; + const struct ieee80211_freq_range *freq_range = NULL; + const struct ieee80211_power_rule *power_rule = NULL; + + printk(KERN_INFO " (start_freq - end_freq @ bandwidth), " + "(max_antenna_gain, max_eirp)\n"); + + for (i = 0; i < rd->n_reg_rules; i++) { + reg_rule = &rd->reg_rules[i]; + freq_range = ®_rule->freq_range; + power_rule = ®_rule->power_rule; + + /* + * There may not be documentation for max antenna gain + * in certain regions + */ + if (power_rule->max_antenna_gain) + printk(KERN_INFO " (%d KHz - %d KHz @ %d KHz), " + "(%d mBi, %d mBm)\n", + freq_range->start_freq_khz, + freq_range->end_freq_khz, + freq_range->max_bandwidth_khz, + power_rule->max_antenna_gain, + power_rule->max_eirp); + else + printk(KERN_INFO " (%d KHz - %d KHz @ %d KHz), " + "(N/A, %d mBm)\n", + freq_range->start_freq_khz, + freq_range->end_freq_khz, + freq_range->max_bandwidth_khz, + power_rule->max_eirp); + } +} + +static void print_regdomain(const struct ieee80211_regdomain *rd) +{ + + if (is_intersected_alpha2(rd->alpha2)) { + + if (last_request->initiator == + NL80211_REGDOM_SET_BY_COUNTRY_IE) { + struct cfg80211_registered_device *rdev; + rdev = cfg80211_rdev_by_wiphy_idx( + last_request->wiphy_idx); + if (rdev) { + printk(KERN_INFO "cfg80211: Current regulatory " + "domain updated by AP to: %c%c\n", + rdev->country_ie_alpha2[0], + rdev->country_ie_alpha2[1]); + } else + printk(KERN_INFO "cfg80211: Current regulatory " + "domain intersected: \n"); + } else + printk(KERN_INFO "cfg80211: Current regulatory " + "domain intersected: \n"); + } else if (is_world_regdom(rd->alpha2)) + printk(KERN_INFO "cfg80211: World regulatory " + "domain updated:\n"); + else { + if (is_unknown_alpha2(rd->alpha2)) + printk(KERN_INFO "cfg80211: Regulatory domain " + "changed to driver built-in settings " + "(unknown country)\n"); + else + printk(KERN_INFO "cfg80211: Regulatory domain " + "changed to country: %c%c\n", + rd->alpha2[0], rd->alpha2[1]); + } + print_rd_rules(rd); +} + +static void print_regdomain_info(const struct ieee80211_regdomain *rd) +{ + printk(KERN_INFO "cfg80211: Regulatory domain: %c%c\n", + rd->alpha2[0], rd->alpha2[1]); + print_rd_rules(rd); +} + +#ifdef CONFIG_CFG80211_REG_DEBUG +static void reg_country_ie_process_debug( + const struct ieee80211_regdomain *rd, + const struct ieee80211_regdomain *country_ie_regdomain, + const struct ieee80211_regdomain *intersected_rd) +{ + printk(KERN_DEBUG "cfg80211: Received country IE:\n"); + print_regdomain_info(country_ie_regdomain); + printk(KERN_DEBUG "cfg80211: CRDA thinks this should applied:\n"); + print_regdomain_info(rd); + if (intersected_rd) { + printk(KERN_DEBUG "cfg80211: We intersect both of these " + "and get:\n"); + print_regdomain_info(intersected_rd); + return; + } + printk(KERN_DEBUG "cfg80211: Intersection between both failed\n"); +} +#else +static inline void reg_country_ie_process_debug( + const struct ieee80211_regdomain *rd, + const struct ieee80211_regdomain *country_ie_regdomain, + const struct ieee80211_regdomain *intersected_rd) +{ +} +#endif + +/* Takes ownership of rd only if it doesn't fail */ +static int __set_regdom(const struct ieee80211_regdomain *rd) +{ + const struct ieee80211_regdomain *intersected_rd = NULL; + struct cfg80211_registered_device *rdev = NULL; + struct wiphy *request_wiphy; + /* Some basic sanity checks first */ + + if (is_world_regdom(rd->alpha2)) { + if (WARN_ON(!reg_is_valid_request(rd->alpha2))) + return -EINVAL; + update_world_regdomain(rd); + return 0; + } + + if (!is_alpha2_set(rd->alpha2) && !is_an_alpha2(rd->alpha2) && + !is_unknown_alpha2(rd->alpha2)) + return -EINVAL; + + if (!last_request) + return -EINVAL; + + /* + * Lets only bother proceeding on the same alpha2 if the current + * rd is non static (it means CRDA was present and was used last) + * and the pending request came in from a country IE + */ + if (last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE) { + /* + * If someone else asked us to change the rd lets only bother + * checking if the alpha2 changes if CRDA was already called + */ + if (!regdom_changes(rd->alpha2)) + return -EINVAL; + } + + /* + * Now lets set the regulatory domain, update all driver channels + * and finally inform them of what we have done, in case they want + * to review or adjust their own settings based on their own + * internal EEPROM data + */ + + if (WARN_ON(!reg_is_valid_request(rd->alpha2))) + return -EINVAL; + + if (!is_valid_rd(rd)) { + printk(KERN_ERR "cfg80211: Invalid " + "regulatory domain detected:\n"); + print_regdomain_info(rd); + return -EINVAL; + } + + request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx); + + if (!last_request->intersect) { + int r; + + if (last_request->initiator != NL80211_REGDOM_SET_BY_DRIVER) { + reset_regdomains(); + cfg80211_regdomain = rd; + return 0; + } + + /* + * For a driver hint, lets copy the regulatory domain the + * driver wanted to the wiphy to deal with conflicts + */ + + /* + * Userspace could have sent two replies with only + * one kernel request. + */ + if (request_wiphy->regd) + return -EALREADY; + + r = reg_copy_regd(&request_wiphy->regd, rd); + if (r) + return r; + + reset_regdomains(); + cfg80211_regdomain = rd; + return 0; + } + + /* Intersection requires a bit more work */ + + if (last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE) { + + intersected_rd = regdom_intersect(rd, cfg80211_regdomain); + if (!intersected_rd) + return -EINVAL; + + /* + * We can trash what CRDA provided now. + * However if a driver requested this specific regulatory + * domain we keep it for its private use + */ + if (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER) + request_wiphy->regd = rd; + else + kfree(rd); + + rd = NULL; + + reset_regdomains(); + cfg80211_regdomain = intersected_rd; + + return 0; + } + + /* + * Country IE requests are handled a bit differently, we intersect + * the country IE rd with what CRDA believes that country should have + */ + + /* + * Userspace could have sent two replies with only + * one kernel request. By the second reply we would have + * already processed and consumed the country_ie_regdomain. + */ + if (!country_ie_regdomain) + return -EALREADY; + BUG_ON(rd == country_ie_regdomain); + + /* + * Intersect what CRDA returned and our what we + * had built from the Country IE received + */ + + intersected_rd = regdom_intersect(rd, country_ie_regdomain); + + reg_country_ie_process_debug(rd, + country_ie_regdomain, + intersected_rd); + + kfree(country_ie_regdomain); + country_ie_regdomain = NULL; + + if (!intersected_rd) + return -EINVAL; + + rdev = wiphy_to_dev(request_wiphy); + + rdev->country_ie_alpha2[0] = rd->alpha2[0]; + rdev->country_ie_alpha2[1] = rd->alpha2[1]; + rdev->env = last_request->country_ie_env; + + BUG_ON(intersected_rd == rd); + + kfree(rd); + rd = NULL; + + reset_regdomains(); + cfg80211_regdomain = intersected_rd; + + return 0; +} + + +/* + * Use this call to set the current regulatory domain. Conflicts with + * multiple drivers can be ironed out later. Caller must've already + * kmalloc'd the rd structure. Caller must hold cfg80211_mutex + */ +int set_regdom(const struct ieee80211_regdomain *rd) +{ + int r; + + assert_cfg80211_lock(); + + mutex_lock(®_mutex); + + /* Note that this doesn't update the wiphys, this is done below */ + r = __set_regdom(rd); + if (r) { + kfree(rd); + mutex_unlock(®_mutex); + return r; + } + + /* This would make this whole thing pointless */ + if (!last_request->intersect) + BUG_ON(rd != cfg80211_regdomain); + + /* update all wiphys now with the new established regulatory domain */ + update_all_wiphy_regulatory(last_request->initiator); + + print_regdomain(cfg80211_regdomain); + + nl80211_send_reg_change_event(last_request); + + mutex_unlock(®_mutex); + + return r; +} + +/* Caller must hold cfg80211_mutex */ +void reg_device_remove(struct wiphy *wiphy) +{ + struct wiphy *request_wiphy = NULL; + + assert_cfg80211_lock(); + + mutex_lock(®_mutex); + + kfree(wiphy->regd); + + if (last_request) + request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx); + + if (!request_wiphy || request_wiphy != wiphy) + goto out; + + last_request->wiphy_idx = WIPHY_IDX_STALE; + last_request->country_ie_env = ENVIRON_ANY; +out: + mutex_unlock(®_mutex); +} + +int regulatory_init(void) +{ + int err = 0; + + reg_pdev = platform_device_register_simple("regulatory", 0, NULL, 0); + if (IS_ERR(reg_pdev)) + return PTR_ERR(reg_pdev); + + spin_lock_init(®_requests_lock); + spin_lock_init(®_pending_beacons_lock); + + cfg80211_regdomain = cfg80211_world_regdom; + + user_alpha2[0] = '9'; + user_alpha2[1] = '7'; + + /* We always try to get an update for the static regdomain */ + err = regulatory_hint_core(cfg80211_regdomain->alpha2); + if (err) { + if (err == -ENOMEM) + return err; + /* + * N.B. kobject_uevent_env() can fail mainly for when we're out + * memory which is handled and propagated appropriately above + * but it can also fail during a netlink_broadcast() or during + * early boot for call_usermodehelper(). For now treat these + * errors as non-fatal. + */ + printk(KERN_ERR "cfg80211: kobject_uevent_env() was unable " + "to call CRDA during init"); +#ifdef CONFIG_CFG80211_REG_DEBUG + /* We want to find out exactly why when debugging */ + WARN_ON(err); +#endif + } + + /* + * Finally, if the user set the module parameter treat it + * as a user hint. + */ + if (!is_world_regdom(ieee80211_regdom)) + regulatory_hint_user(ieee80211_regdom); + + return 0; +} + +void regulatory_exit(void) +{ + struct regulatory_request *reg_request, *tmp; + struct reg_beacon *reg_beacon, *btmp; + + cancel_work_sync(®_work); + + mutex_lock(&cfg80211_mutex); + mutex_lock(®_mutex); + + reset_regdomains(); + + kfree(country_ie_regdomain); + country_ie_regdomain = NULL; + + kfree(last_request); + + platform_device_unregister(reg_pdev); + + spin_lock_bh(®_pending_beacons_lock); + if (!list_empty(®_pending_beacons)) { + list_for_each_entry_safe(reg_beacon, btmp, + ®_pending_beacons, list) { + list_del(®_beacon->list); + kfree(reg_beacon); + } + } + spin_unlock_bh(®_pending_beacons_lock); + + if (!list_empty(®_beacon_list)) { + list_for_each_entry_safe(reg_beacon, btmp, + ®_beacon_list, list) { + list_del(®_beacon->list); + kfree(reg_beacon); + } + } + + spin_lock(®_requests_lock); + if (!list_empty(®_requests_list)) { + list_for_each_entry_safe(reg_request, tmp, + ®_requests_list, list) { + list_del(®_request->list); + kfree(reg_request); + } + } + spin_unlock(®_requests_lock); + + mutex_unlock(®_mutex); + mutex_unlock(&cfg80211_mutex); +} diff --git a/net/wireless/reg.h b/net/wireless/reg.h new file mode 100644 index 0000000..b26224a --- /dev/null +++ b/net/wireless/reg.h @@ -0,0 +1,84 @@ +#ifndef __NET_WIRELESS_REG_H +#define __NET_WIRELESS_REG_H + +extern const struct ieee80211_regdomain *cfg80211_regdomain; + +bool is_world_regdom(const char *alpha2); +bool reg_is_valid_request(const char *alpha2); + +int regulatory_hint_user(const char *alpha2); + +void reg_device_remove(struct wiphy *wiphy); + +int regulatory_init(void); +void regulatory_exit(void); + +int set_regdom(const struct ieee80211_regdomain *rd); + +/** + * regulatory_hint_found_beacon - hints a beacon was found on a channel + * @wiphy: the wireless device where the beacon was found on + * @beacon_chan: the channel on which the beacon was found on + * @gfp: context flags + * + * This informs the wireless core that a beacon from an AP was found on + * the channel provided. This allows the wireless core to make educated + * guesses on regulatory to help with world roaming. This is only used for + * world roaming -- when we do not know our current location. This is + * only useful on channels 12, 13 and 14 on the 2 GHz band as channels + * 1-11 are already enabled by the world regulatory domain; and on + * non-radar 5 GHz channels. + * + * Drivers do not need to call this, cfg80211 will do it for after a scan + * on a newly found BSS. If you cannot make use of this feature you can + * set the wiphy->disable_beacon_hints to true. + */ +int regulatory_hint_found_beacon(struct wiphy *wiphy, + struct ieee80211_channel *beacon_chan, + gfp_t gfp); + +/** + * regulatory_hint_11d - hints a country IE as a regulatory domain + * @wiphy: the wireless device giving the hint (used only for reporting + * conflicts) + * @band: the band on which the country IE was received on. This determines + * the band we'll process the country IE channel triplets for. + * @country_ie: pointer to the country IE + * @country_ie_len: length of the country IE + * + * We will intersect the rd with the what CRDA tells us should apply + * for the alpha2 this country IE belongs to, this prevents APs from + * sending us incorrect or outdated information against a country. + * + * The AP is expected to provide Country IE channel triplets for the + * band it is on. It is technically possible for APs to send channel + * country IE triplets even for channels outside of the band they are + * in but for that they would have to use the regulatory extension + * in combination with a triplet but this behaviour is currently + * not observed. For this reason if a triplet is seen with channel + * information for a band the BSS is not present in it will be ignored. + */ +void regulatory_hint_11d(struct wiphy *wiphy, + enum ieee80211_band band, + u8 *country_ie, + u8 country_ie_len); + +/** + * regulatory_hint_disconnect - informs all devices have been disconneted + * + * Regulotory rules can be enhanced further upon scanning and upon + * connection to an AP. These rules become stale if we disconnect + * and go to another country, whether or not we suspend and resume. + * If we suspend, go to another country and resume we'll automatically + * get disconnected shortly after resuming and things will be reset as well. + * This routine is a helper to restore regulatory settings to how they were + * prior to our first connect attempt. This includes ignoring country IE and + * beacon regulatory hints. The ieee80211_regdom module parameter will always + * be respected but if a user had set the regulatory domain that will take + * precedence. + * + * Must be called from process context. + */ +void regulatory_hint_disconnect(void); + +#endif /* __NET_WIRELESS_REG_H */ diff --git a/net/wireless/regdb.h b/net/wireless/regdb.h new file mode 100644 index 0000000..818222c --- /dev/null +++ b/net/wireless/regdb.h @@ -0,0 +1,7 @@ +#ifndef __REGDB_H__ +#define __REGDB_H__ + +extern const struct ieee80211_regdomain *reg_regdb[]; +extern int reg_regdb_size; + +#endif /* __REGDB_H__ */ diff --git a/net/wireless/scan.c b/net/wireless/scan.c new file mode 100644 index 0000000..03b836d --- /dev/null +++ b/net/wireless/scan.c @@ -0,0 +1,1111 @@ +/* + * cfg80211 scan result handling + * + * Copyright 2008 Johannes Berg + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "core.h" +#include "nl80211.h" +#include "wext-compat.h" + +#define IEEE80211_SCAN_RESULT_EXPIRE (15 * HZ) + +void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak) +{ + struct cfg80211_scan_request *request; + struct net_device *dev; +#ifdef CONFIG_CFG80211_WEXT + union iwreq_data wrqu; +#endif + + ASSERT_RDEV_LOCK(rdev); + + request = rdev->scan_req; + + if (!request) + return; + + dev = request->dev; + + /* + * This must be before sending the other events! + * Otherwise, wpa_supplicant gets completely confused with + * wext events. + */ + cfg80211_sme_scan_done(dev); + + if (request->aborted) + nl80211_send_scan_aborted(rdev, dev); + else + nl80211_send_scan_done(rdev, dev); + +#ifdef CONFIG_CFG80211_WEXT + if (!request->aborted) { + memset(&wrqu, 0, sizeof(wrqu)); + + wireless_send_event(dev, SIOCGIWSCAN, &wrqu, NULL); + } +#endif + + dev_put(dev); + + rdev->scan_req = NULL; + + /* + * OK. If this is invoked with "leak" then we can't + * free this ... but we've cleaned it up anyway. The + * driver failed to call the scan_done callback, so + * all bets are off, it might still be trying to use + * the scan request or not ... if it accesses the dev + * in there (it shouldn't anyway) then it may crash. + */ + if (!leak) + kfree(request); +} + +void __cfg80211_scan_done(struct work_struct *wk) +{ + struct cfg80211_registered_device *rdev; + + rdev = container_of(wk, struct cfg80211_registered_device, + scan_done_wk); + + cfg80211_lock_rdev(rdev); + ___cfg80211_scan_done(rdev, false); + cfg80211_unlock_rdev(rdev); +} + +void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted) +{ + WARN_ON(request != wiphy_to_dev(request->wiphy)->scan_req); + + request->aborted = aborted; + queue_work(cfg80211_wq, &wiphy_to_dev(request->wiphy)->scan_done_wk); +} +EXPORT_SYMBOL(cfg80211_scan_done); + +static void bss_release(struct kref *ref) +{ + struct cfg80211_internal_bss *bss; + + bss = container_of(ref, struct cfg80211_internal_bss, ref); + if (bss->pub.free_priv) + bss->pub.free_priv(&bss->pub); + + if (bss->beacon_ies_allocated) + kfree(bss->pub.beacon_ies); + if (bss->proberesp_ies_allocated) + kfree(bss->pub.proberesp_ies); + + BUG_ON(atomic_read(&bss->hold)); + + kfree(bss); +} + +/* must hold dev->bss_lock! */ +void cfg80211_bss_age(struct cfg80211_registered_device *dev, + unsigned long age_secs) +{ + struct cfg80211_internal_bss *bss; + unsigned long age_jiffies = msecs_to_jiffies(age_secs * MSEC_PER_SEC); + + list_for_each_entry(bss, &dev->bss_list, list) { + bss->ts -= age_jiffies; + } +} + +/* must hold dev->bss_lock! */ +void cfg80211_bss_expire(struct cfg80211_registered_device *dev) +{ + struct cfg80211_internal_bss *bss, *tmp; + bool expired = false; + + list_for_each_entry_safe(bss, tmp, &dev->bss_list, list) { + if (atomic_read(&bss->hold)) + continue; + if (!time_after(jiffies, bss->ts + IEEE80211_SCAN_RESULT_EXPIRE)) + continue; + list_del(&bss->list); + rb_erase(&bss->rbn, &dev->bss_tree); + kref_put(&bss->ref, bss_release); + expired = true; + } + + if (expired) + dev->bss_generation++; +} + +const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len) +{ + while (len > 2 && ies[0] != eid) { + len -= ies[1] + 2; + ies += ies[1] + 2; + } + if (len < 2) + return NULL; + if (len < 2 + ies[1]) + return NULL; + return ies; +} +EXPORT_SYMBOL(cfg80211_find_ie); + +static int cmp_ies(u8 num, u8 *ies1, size_t len1, u8 *ies2, size_t len2) +{ + const u8 *ie1 = cfg80211_find_ie(num, ies1, len1); + const u8 *ie2 = cfg80211_find_ie(num, ies2, len2); + int r; + + if (!ie1 && !ie2) + return 0; + if (!ie1 || !ie2) + return -1; + + r = memcmp(ie1 + 2, ie2 + 2, min(ie1[1], ie2[1])); + if (r == 0 && ie1[1] != ie2[1]) + return ie2[1] - ie1[1]; + return r; +} + +static bool is_bss(struct cfg80211_bss *a, + const u8 *bssid, + const u8 *ssid, size_t ssid_len) +{ + const u8 *ssidie; + + if (bssid && compare_ether_addr(a->bssid, bssid)) + return false; + + if (!ssid) + return true; + + ssidie = cfg80211_find_ie(WLAN_EID_SSID, + a->information_elements, + a->len_information_elements); + if (!ssidie) + return false; + if (ssidie[1] != ssid_len) + return false; + return memcmp(ssidie + 2, ssid, ssid_len) == 0; +} + +static bool is_mesh(struct cfg80211_bss *a, + const u8 *meshid, size_t meshidlen, + const u8 *meshcfg) +{ + const u8 *ie; + + if (!is_zero_ether_addr(a->bssid)) + return false; + + ie = cfg80211_find_ie(WLAN_EID_MESH_ID, + a->information_elements, + a->len_information_elements); + if (!ie) + return false; + if (ie[1] != meshidlen) + return false; + if (memcmp(ie + 2, meshid, meshidlen)) + return false; + + ie = cfg80211_find_ie(WLAN_EID_MESH_CONFIG, + a->information_elements, + a->len_information_elements); + if (!ie) + return false; + if (ie[1] != sizeof(struct ieee80211_meshconf_ie)) + return false; + + /* + * Ignore mesh capability (last two bytes of the IE) when + * comparing since that may differ between stations taking + * part in the same mesh. + */ + return memcmp(ie + 2, meshcfg, + sizeof(struct ieee80211_meshconf_ie) - 2) == 0; +} + +static int cmp_bss(struct cfg80211_bss *a, + struct cfg80211_bss *b) +{ + int r; + + if (a->channel != b->channel) + return b->channel->center_freq - a->channel->center_freq; + + r = memcmp(a->bssid, b->bssid, ETH_ALEN); + if (r) + return r; + + if (is_zero_ether_addr(a->bssid)) { + r = cmp_ies(WLAN_EID_MESH_ID, + a->information_elements, + a->len_information_elements, + b->information_elements, + b->len_information_elements); + if (r) + return r; + return cmp_ies(WLAN_EID_MESH_CONFIG, + a->information_elements, + a->len_information_elements, + b->information_elements, + b->len_information_elements); + } + + return cmp_ies(WLAN_EID_SSID, + a->information_elements, + a->len_information_elements, + b->information_elements, + b->len_information_elements); +} + +struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy, + struct ieee80211_channel *channel, + const u8 *bssid, + const u8 *ssid, size_t ssid_len, + u16 capa_mask, u16 capa_val) +{ + struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy); + struct cfg80211_internal_bss *bss, *res = NULL; + + spin_lock_bh(&dev->bss_lock); + + list_for_each_entry(bss, &dev->bss_list, list) { + if ((bss->pub.capability & capa_mask) != capa_val) + continue; + if (channel && bss->pub.channel != channel) + continue; + if (is_bss(&bss->pub, bssid, ssid, ssid_len)) { + res = bss; + kref_get(&res->ref); + break; + } + } + + spin_unlock_bh(&dev->bss_lock); + if (!res) + return NULL; + return &res->pub; +} +EXPORT_SYMBOL(cfg80211_get_bss); + +struct cfg80211_bss *cfg80211_get_mesh(struct wiphy *wiphy, + struct ieee80211_channel *channel, + const u8 *meshid, size_t meshidlen, + const u8 *meshcfg) +{ + struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy); + struct cfg80211_internal_bss *bss, *res = NULL; + + spin_lock_bh(&dev->bss_lock); + + list_for_each_entry(bss, &dev->bss_list, list) { + if (channel && bss->pub.channel != channel) + continue; + if (is_mesh(&bss->pub, meshid, meshidlen, meshcfg)) { + res = bss; + kref_get(&res->ref); + break; + } + } + + spin_unlock_bh(&dev->bss_lock); + if (!res) + return NULL; + return &res->pub; +} +EXPORT_SYMBOL(cfg80211_get_mesh); + + +static void rb_insert_bss(struct cfg80211_registered_device *dev, + struct cfg80211_internal_bss *bss) +{ + struct rb_node **p = &dev->bss_tree.rb_node; + struct rb_node *parent = NULL; + struct cfg80211_internal_bss *tbss; + int cmp; + + while (*p) { + parent = *p; + tbss = rb_entry(parent, struct cfg80211_internal_bss, rbn); + + cmp = cmp_bss(&bss->pub, &tbss->pub); + + if (WARN_ON(!cmp)) { + /* will sort of leak this BSS */ + return; + } + + if (cmp < 0) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + rb_link_node(&bss->rbn, parent, p); + rb_insert_color(&bss->rbn, &dev->bss_tree); +} + +static struct cfg80211_internal_bss * +rb_find_bss(struct cfg80211_registered_device *dev, + struct cfg80211_internal_bss *res) +{ + struct rb_node *n = dev->bss_tree.rb_node; + struct cfg80211_internal_bss *bss; + int r; + + while (n) { + bss = rb_entry(n, struct cfg80211_internal_bss, rbn); + r = cmp_bss(&res->pub, &bss->pub); + + if (r == 0) + return bss; + else if (r < 0) + n = n->rb_left; + else + n = n->rb_right; + } + + return NULL; +} + +static struct cfg80211_internal_bss * +cfg80211_bss_update(struct cfg80211_registered_device *dev, + struct cfg80211_internal_bss *res) +{ + struct cfg80211_internal_bss *found = NULL; + const u8 *meshid, *meshcfg; + + /* + * The reference to "res" is donated to this function. + */ + + if (WARN_ON(!res->pub.channel)) { + kref_put(&res->ref, bss_release); + return NULL; + } + + res->ts = jiffies; + + if (is_zero_ether_addr(res->pub.bssid)) { + /* must be mesh, verify */ + meshid = cfg80211_find_ie(WLAN_EID_MESH_ID, + res->pub.information_elements, + res->pub.len_information_elements); + meshcfg = cfg80211_find_ie(WLAN_EID_MESH_CONFIG, + res->pub.information_elements, + res->pub.len_information_elements); + if (!meshid || !meshcfg || + meshcfg[1] != sizeof(struct ieee80211_meshconf_ie)) { + /* bogus mesh */ + kref_put(&res->ref, bss_release); + return NULL; + } + } + + spin_lock_bh(&dev->bss_lock); + + found = rb_find_bss(dev, res); + + if (found) { + found->pub.beacon_interval = res->pub.beacon_interval; + found->pub.tsf = res->pub.tsf; + found->pub.signal = res->pub.signal; + found->pub.capability = res->pub.capability; + found->ts = res->ts; + + /* Update IEs */ + if (res->pub.proberesp_ies) { + size_t used = dev->wiphy.bss_priv_size + sizeof(*res); + size_t ielen = res->pub.len_proberesp_ies; + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,28) + if (0) { + used = 0; /* just to shut up the compiler */ +#else + if (found->pub.proberesp_ies && + !found->proberesp_ies_allocated && + ksize(found) >= used + ielen) { +#endif + memcpy(found->pub.proberesp_ies, + res->pub.proberesp_ies, ielen); + found->pub.len_proberesp_ies = ielen; + } else { + u8 *ies = found->pub.proberesp_ies; + + if (found->proberesp_ies_allocated) + ies = krealloc(ies, ielen, GFP_ATOMIC); + else + ies = kmalloc(ielen, GFP_ATOMIC); + + if (ies) { + memcpy(ies, res->pub.proberesp_ies, + ielen); + found->proberesp_ies_allocated = true; + found->pub.proberesp_ies = ies; + found->pub.len_proberesp_ies = ielen; + } + } + + /* Override possible earlier Beacon frame IEs */ + found->pub.information_elements = + found->pub.proberesp_ies; + found->pub.len_information_elements = + found->pub.len_proberesp_ies; + } + if (res->pub.beacon_ies) { + size_t used = dev->wiphy.bss_priv_size + sizeof(*res); + size_t ielen = res->pub.len_beacon_ies; + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,28) + if (0) { + used = 0; /* just to shut up the compiler */ +#else + if (found->pub.beacon_ies && + !found->beacon_ies_allocated && + ksize(found) >= used + ielen) { +#endif + memcpy(found->pub.beacon_ies, + res->pub.beacon_ies, ielen); + found->pub.len_beacon_ies = ielen; + } else { + u8 *ies = found->pub.beacon_ies; + + if (found->beacon_ies_allocated) + ies = krealloc(ies, ielen, GFP_ATOMIC); + else + ies = kmalloc(ielen, GFP_ATOMIC); + + if (ies) { + memcpy(ies, res->pub.beacon_ies, + ielen); + found->beacon_ies_allocated = true; + found->pub.beacon_ies = ies; + found->pub.len_beacon_ies = ielen; + } + } + } + + kref_put(&res->ref, bss_release); + } else { + /* this "consumes" the reference */ + list_add_tail(&res->list, &dev->bss_list); + rb_insert_bss(dev, res); + found = res; + } + + dev->bss_generation++; + spin_unlock_bh(&dev->bss_lock); + + kref_get(&found->ref); + return found; +} + +struct cfg80211_bss* +cfg80211_inform_bss(struct wiphy *wiphy, + struct ieee80211_channel *channel, + const u8 *bssid, + u64 timestamp, u16 capability, u16 beacon_interval, + const u8 *ie, size_t ielen, + s32 signal, gfp_t gfp) +{ + struct cfg80211_internal_bss *res; + size_t privsz; + + if (WARN_ON(!wiphy)) + return NULL; + + privsz = wiphy->bss_priv_size; + + if (WARN_ON(wiphy->signal_type == NL80211_BSS_SIGNAL_UNSPEC && + (signal < 0 || signal > 100))) + return NULL; + + res = kzalloc(sizeof(*res) + privsz + ielen, gfp); + if (!res) + return NULL; + + memcpy(res->pub.bssid, bssid, ETH_ALEN); + res->pub.channel = channel; + res->pub.signal = signal; + res->pub.tsf = timestamp; + res->pub.beacon_interval = beacon_interval; + res->pub.capability = capability; + /* + * Since we do not know here whether the IEs are from a Beacon or Probe + * Response frame, we need to pick one of the options and only use it + * with the driver that does not provide the full Beacon/Probe Response + * frame. Use Beacon frame pointer to avoid indicating that this should + * override the information_elements pointer should we have received an + * earlier indication of Probe Response data. + * + * The initial buffer for the IEs is allocated with the BSS entry and + * is located after the private area. + */ + res->pub.beacon_ies = (u8 *)res + sizeof(*res) + privsz; + memcpy(res->pub.beacon_ies, ie, ielen); + res->pub.len_beacon_ies = ielen; + res->pub.information_elements = res->pub.beacon_ies; + res->pub.len_information_elements = res->pub.len_beacon_ies; + + kref_init(&res->ref); + + res = cfg80211_bss_update(wiphy_to_dev(wiphy), res); + if (!res) + return NULL; + + if (res->pub.capability & WLAN_CAPABILITY_ESS) + regulatory_hint_found_beacon(wiphy, channel, gfp); + + /* cfg80211_bss_update gives us a referenced result */ + return &res->pub; +} +EXPORT_SYMBOL(cfg80211_inform_bss); + +struct cfg80211_bss * +cfg80211_inform_bss_frame(struct wiphy *wiphy, + struct ieee80211_channel *channel, + struct ieee80211_mgmt *mgmt, size_t len, + s32 signal, gfp_t gfp) +{ + struct cfg80211_internal_bss *res; + size_t ielen = len - offsetof(struct ieee80211_mgmt, + u.probe_resp.variable); + size_t privsz = wiphy->bss_priv_size; + + if (WARN_ON(wiphy->signal_type == NL80211_BSS_SIGNAL_UNSPEC && + (signal < 0 || signal > 100))) + return NULL; + + if (WARN_ON(!mgmt || !wiphy || + len < offsetof(struct ieee80211_mgmt, u.probe_resp.variable))) + return NULL; + + res = kzalloc(sizeof(*res) + privsz + ielen, gfp); + if (!res) + return NULL; + + memcpy(res->pub.bssid, mgmt->bssid, ETH_ALEN); + res->pub.channel = channel; + res->pub.signal = signal; + res->pub.tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp); + res->pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int); + res->pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info); + /* + * The initial buffer for the IEs is allocated with the BSS entry and + * is located after the private area. + */ + if (ieee80211_is_probe_resp(mgmt->frame_control)) { + res->pub.proberesp_ies = (u8 *) res + sizeof(*res) + privsz; + memcpy(res->pub.proberesp_ies, mgmt->u.probe_resp.variable, + ielen); + res->pub.len_proberesp_ies = ielen; + res->pub.information_elements = res->pub.proberesp_ies; + res->pub.len_information_elements = res->pub.len_proberesp_ies; + } else { + res->pub.beacon_ies = (u8 *) res + sizeof(*res) + privsz; + memcpy(res->pub.beacon_ies, mgmt->u.beacon.variable, ielen); + res->pub.len_beacon_ies = ielen; + res->pub.information_elements = res->pub.beacon_ies; + res->pub.len_information_elements = res->pub.len_beacon_ies; + } + + kref_init(&res->ref); + + res = cfg80211_bss_update(wiphy_to_dev(wiphy), res); + if (!res) + return NULL; + + if (res->pub.capability & WLAN_CAPABILITY_ESS) + regulatory_hint_found_beacon(wiphy, channel, gfp); + + /* cfg80211_bss_update gives us a referenced result */ + return &res->pub; +} +EXPORT_SYMBOL(cfg80211_inform_bss_frame); + +void cfg80211_put_bss(struct cfg80211_bss *pub) +{ + struct cfg80211_internal_bss *bss; + + if (!pub) + return; + + bss = container_of(pub, struct cfg80211_internal_bss, pub); + kref_put(&bss->ref, bss_release); +} +EXPORT_SYMBOL(cfg80211_put_bss); + +void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *pub) +{ + struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy); + struct cfg80211_internal_bss *bss; + + if (WARN_ON(!pub)) + return; + + bss = container_of(pub, struct cfg80211_internal_bss, pub); + + spin_lock_bh(&dev->bss_lock); + + list_del(&bss->list); + dev->bss_generation++; + rb_erase(&bss->rbn, &dev->bss_tree); + + spin_unlock_bh(&dev->bss_lock); + + kref_put(&bss->ref, bss_release); +} +EXPORT_SYMBOL(cfg80211_unlink_bss); + +#ifdef CONFIG_CFG80211_WEXT +int cfg80211_wext_siwscan(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct cfg80211_registered_device *rdev; + struct wiphy *wiphy; + struct iw_scan_req *wreq = NULL; + struct cfg80211_scan_request *creq = NULL; + int i, err, n_channels = 0; + enum ieee80211_band band; + + if (!netif_running(dev)) + return -ENETDOWN; + + if (wrqu->data.length == sizeof(struct iw_scan_req)) + wreq = (struct iw_scan_req *)extra; + + rdev = cfg80211_get_dev_from_ifindex(dev_net(dev), dev->ifindex); + + if (IS_ERR(rdev)) + return PTR_ERR(rdev); + + if (rdev->scan_req) { + err = -EBUSY; + goto out; + } + + wiphy = &rdev->wiphy; + + /* Determine number of channels, needed to allocate creq */ + if (wreq && wreq->num_channels) + n_channels = wreq->num_channels; + else { + for (band = 0; band < IEEE80211_NUM_BANDS; band++) + if (wiphy->bands[band]) + n_channels += wiphy->bands[band]->n_channels; + } + + creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) + + n_channels * sizeof(void *), + GFP_ATOMIC); + if (!creq) { + err = -ENOMEM; + goto out; + } + + creq->wiphy = wiphy; + creq->dev = dev; + /* SSIDs come after channels */ + creq->ssids = (void *)&creq->channels[n_channels]; + creq->n_channels = n_channels; + creq->n_ssids = 1; + + /* translate "Scan on frequencies" request */ + i = 0; + for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + int j; + + if (!wiphy->bands[band]) + continue; + + for (j = 0; j < wiphy->bands[band]->n_channels; j++) { + /* ignore disabled channels */ + if (wiphy->bands[band]->channels[j].flags & + IEEE80211_CHAN_DISABLED) + continue; + + /* If we have a wireless request structure and the + * wireless request specifies frequencies, then search + * for the matching hardware channel. + */ + if (wreq && wreq->num_channels) { + int k; + int wiphy_freq = wiphy->bands[band]->channels[j].center_freq; + for (k = 0; k < wreq->num_channels; k++) { + int wext_freq = cfg80211_wext_freq(wiphy, &wreq->channel_list[k]); + if (wext_freq == wiphy_freq) + goto wext_freq_found; + } + goto wext_freq_not_found; + } + + wext_freq_found: + creq->channels[i] = &wiphy->bands[band]->channels[j]; + i++; + wext_freq_not_found: ; + } + } + /* No channels found? */ + if (!i) { + err = -EINVAL; + goto out; + } + + /* Set real number of channels specified in creq->channels[] */ + creq->n_channels = i; + + /* translate "Scan for SSID" request */ + if (wreq) { + if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { + if (wreq->essid_len > IEEE80211_MAX_SSID_LEN) { + err = -EINVAL; + goto out; + } + memcpy(creq->ssids[0].ssid, wreq->essid, wreq->essid_len); + creq->ssids[0].ssid_len = wreq->essid_len; + } + if (wreq->scan_type == IW_SCAN_TYPE_PASSIVE) + creq->n_ssids = 0; + } + + rdev->scan_req = creq; + err = rdev->ops->scan(wiphy, dev, creq); + if (err) { + rdev->scan_req = NULL; + /* creq will be freed below */ + } else { + nl80211_send_scan_start(rdev, dev); + /* creq now owned by driver */ + creq = NULL; + dev_hold(dev); + } + out: + kfree(creq); + cfg80211_unlock_rdev(rdev); + return err; +} +EXPORT_SYMBOL_GPL(cfg80211_wext_siwscan); + +static void ieee80211_scan_add_ies(struct iw_request_info *info, + struct cfg80211_bss *bss, + char **current_ev, char *end_buf) +{ + u8 *pos, *end, *next; + struct iw_event iwe; + + if (!bss->information_elements || + !bss->len_information_elements) + return; + + /* + * If needed, fragment the IEs buffer (at IE boundaries) into short + * enough fragments to fit into IW_GENERIC_IE_MAX octet messages. + */ + pos = bss->information_elements; + end = pos + bss->len_information_elements; + + while (end - pos > IW_GENERIC_IE_MAX) { + next = pos + 2 + pos[1]; + while (next + 2 + next[1] - pos < IW_GENERIC_IE_MAX) + next = next + 2 + next[1]; + + memset(&iwe, 0, sizeof(iwe)); + iwe.cmd = IWEVGENIE; + iwe.u.data.length = next - pos; + *current_ev = iwe_stream_add_point(info, *current_ev, + end_buf, &iwe, pos); + + pos = next; + } + + if (end > pos) { + memset(&iwe, 0, sizeof(iwe)); + iwe.cmd = IWEVGENIE; + iwe.u.data.length = end - pos; + *current_ev = iwe_stream_add_point(info, *current_ev, + end_buf, &iwe, pos); + } +} + +static inline unsigned int elapsed_jiffies_msecs(unsigned long start) +{ + unsigned long end = jiffies; + + if (end >= start) + return jiffies_to_msecs(end - start); + + return jiffies_to_msecs(end + (MAX_JIFFY_OFFSET - start) + 1); +} + +static char * +ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info, + struct cfg80211_internal_bss *bss, char *current_ev, + char *end_buf) +{ + struct iw_event iwe; + u8 *buf, *cfg, *p; + u8 *ie = bss->pub.information_elements; + int rem = bss->pub.len_information_elements, i, sig; + bool ismesh = false; + + memset(&iwe, 0, sizeof(iwe)); + iwe.cmd = SIOCGIWAP; + iwe.u.ap_addr.sa_family = ARPHRD_ETHER; + memcpy(iwe.u.ap_addr.sa_data, bss->pub.bssid, ETH_ALEN); + current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, + IW_EV_ADDR_LEN); + + memset(&iwe, 0, sizeof(iwe)); + iwe.cmd = SIOCGIWFREQ; + iwe.u.freq.m = ieee80211_frequency_to_channel(bss->pub.channel->center_freq); + iwe.u.freq.e = 0; + current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, + IW_EV_FREQ_LEN); + + memset(&iwe, 0, sizeof(iwe)); + iwe.cmd = SIOCGIWFREQ; + iwe.u.freq.m = bss->pub.channel->center_freq; + iwe.u.freq.e = 6; + current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, + IW_EV_FREQ_LEN); + + if (wiphy->signal_type != CFG80211_SIGNAL_TYPE_NONE) { + memset(&iwe, 0, sizeof(iwe)); + iwe.cmd = IWEVQUAL; + iwe.u.qual.updated = IW_QUAL_LEVEL_UPDATED | + IW_QUAL_NOISE_INVALID | + IW_QUAL_QUAL_UPDATED; + switch (wiphy->signal_type) { + case CFG80211_SIGNAL_TYPE_MBM: + sig = bss->pub.signal / 100; + iwe.u.qual.level = sig; + iwe.u.qual.updated |= IW_QUAL_DBM; + if (sig < -110) /* rather bad */ + sig = -110; + else if (sig > -40) /* perfect */ + sig = -40; + /* will give a range of 0 .. 70 */ + iwe.u.qual.qual = sig + 110; + break; + case CFG80211_SIGNAL_TYPE_UNSPEC: + iwe.u.qual.level = bss->pub.signal; + /* will give range 0 .. 100 */ + iwe.u.qual.qual = bss->pub.signal; + break; + default: + /* not reached */ + break; + } + current_ev = iwe_stream_add_event(info, current_ev, end_buf, + &iwe, IW_EV_QUAL_LEN); + } + + memset(&iwe, 0, sizeof(iwe)); + iwe.cmd = SIOCGIWENCODE; + if (bss->pub.capability & WLAN_CAPABILITY_PRIVACY) + iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; + else + iwe.u.data.flags = IW_ENCODE_DISABLED; + iwe.u.data.length = 0; + current_ev = iwe_stream_add_point(info, current_ev, end_buf, + &iwe, ""); + + while (rem >= 2) { + /* invalid data */ + if (ie[1] > rem - 2) + break; + + switch (ie[0]) { + case WLAN_EID_SSID: + memset(&iwe, 0, sizeof(iwe)); + iwe.cmd = SIOCGIWESSID; + iwe.u.data.length = ie[1]; + iwe.u.data.flags = 1; + current_ev = iwe_stream_add_point(info, current_ev, end_buf, + &iwe, ie + 2); + break; + case WLAN_EID_MESH_ID: + memset(&iwe, 0, sizeof(iwe)); + iwe.cmd = SIOCGIWESSID; + iwe.u.data.length = ie[1]; + iwe.u.data.flags = 1; + current_ev = iwe_stream_add_point(info, current_ev, end_buf, + &iwe, ie + 2); + break; + case WLAN_EID_MESH_CONFIG: + ismesh = true; + if (ie[1] != sizeof(struct ieee80211_meshconf_ie)) + break; + buf = kmalloc(50, GFP_ATOMIC); + if (!buf) + break; + cfg = ie + 2; + memset(&iwe, 0, sizeof(iwe)); + iwe.cmd = IWEVCUSTOM; + sprintf(buf, "Mesh Network Path Selection Protocol ID: " + "0x%02X", cfg[0]); + iwe.u.data.length = strlen(buf); + current_ev = iwe_stream_add_point(info, current_ev, + end_buf, + &iwe, buf); + sprintf(buf, "Path Selection Metric ID: 0x%02X", + cfg[1]); + iwe.u.data.length = strlen(buf); + current_ev = iwe_stream_add_point(info, current_ev, + end_buf, + &iwe, buf); + sprintf(buf, "Congestion Control Mode ID: 0x%02X", + cfg[2]); + iwe.u.data.length = strlen(buf); + current_ev = iwe_stream_add_point(info, current_ev, + end_buf, + &iwe, buf); + sprintf(buf, "Synchronization ID: 0x%02X", cfg[3]); + iwe.u.data.length = strlen(buf); + current_ev = iwe_stream_add_point(info, current_ev, + end_buf, + &iwe, buf); + sprintf(buf, "Authentication ID: 0x%02X", cfg[4]); + iwe.u.data.length = strlen(buf); + current_ev = iwe_stream_add_point(info, current_ev, + end_buf, + &iwe, buf); + sprintf(buf, "Formation Info: 0x%02X", cfg[5]); + iwe.u.data.length = strlen(buf); + current_ev = iwe_stream_add_point(info, current_ev, + end_buf, + &iwe, buf); + sprintf(buf, "Capabilities: 0x%02X", cfg[6]); + iwe.u.data.length = strlen(buf); + current_ev = iwe_stream_add_point(info, current_ev, + end_buf, + &iwe, buf); + kfree(buf); + break; + case WLAN_EID_SUPP_RATES: + case WLAN_EID_EXT_SUPP_RATES: + /* display all supported rates in readable format */ + p = current_ev + iwe_stream_lcp_len(info); + + memset(&iwe, 0, sizeof(iwe)); + iwe.cmd = SIOCGIWRATE; + /* Those two flags are ignored... */ + iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; + + for (i = 0; i < ie[1]; i++) { + iwe.u.bitrate.value = + ((ie[i + 2] & 0x7f) * 500000); + p = iwe_stream_add_value(info, current_ev, p, + end_buf, &iwe, IW_EV_PARAM_LEN); + } + current_ev = p; + break; + } + rem -= ie[1] + 2; + ie += ie[1] + 2; + } + + if (bss->pub.capability & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS) || + ismesh) { + memset(&iwe, 0, sizeof(iwe)); + iwe.cmd = SIOCGIWMODE; + if (ismesh) + iwe.u.mode = IW_MODE_MESH; + else if (bss->pub.capability & WLAN_CAPABILITY_ESS) + iwe.u.mode = IW_MODE_MASTER; + else + iwe.u.mode = IW_MODE_ADHOC; + current_ev = iwe_stream_add_event(info, current_ev, end_buf, + &iwe, IW_EV_UINT_LEN); + } + + buf = kmalloc(30, GFP_ATOMIC); + if (buf) { + memset(&iwe, 0, sizeof(iwe)); + iwe.cmd = IWEVCUSTOM; + sprintf(buf, "tsf=%016llx", (unsigned long long)(bss->pub.tsf)); + iwe.u.data.length = strlen(buf); + current_ev = iwe_stream_add_point(info, current_ev, end_buf, + &iwe, buf); + memset(&iwe, 0, sizeof(iwe)); + iwe.cmd = IWEVCUSTOM; + sprintf(buf, " Last beacon: %ums ago", + elapsed_jiffies_msecs(bss->ts)); + iwe.u.data.length = strlen(buf); + current_ev = iwe_stream_add_point(info, current_ev, + end_buf, &iwe, buf); + kfree(buf); + } + + ieee80211_scan_add_ies(info, &bss->pub, ¤t_ev, end_buf); + + return current_ev; +} + + +static int ieee80211_scan_results(struct cfg80211_registered_device *dev, + struct iw_request_info *info, + char *buf, size_t len) +{ + char *current_ev = buf; + char *end_buf = buf + len; + struct cfg80211_internal_bss *bss; + + spin_lock_bh(&dev->bss_lock); + cfg80211_bss_expire(dev); + + list_for_each_entry(bss, &dev->bss_list, list) { + if (buf + len - current_ev <= IW_EV_ADDR_LEN) { + spin_unlock_bh(&dev->bss_lock); + return -E2BIG; + } + current_ev = ieee80211_bss(&dev->wiphy, info, bss, + current_ev, end_buf); + } + spin_unlock_bh(&dev->bss_lock); + return current_ev - buf; +} + + +int cfg80211_wext_giwscan(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *data, char *extra) +{ + struct cfg80211_registered_device *rdev; + int res; + + if (!netif_running(dev)) + return -ENETDOWN; + + rdev = cfg80211_get_dev_from_ifindex(dev_net(dev), dev->ifindex); + + if (IS_ERR(rdev)) + return PTR_ERR(rdev); + + if (rdev->scan_req) { + res = -EAGAIN; + goto out; + } + + res = ieee80211_scan_results(rdev, info, extra, data->length); + data->length = 0; + if (res >= 0) { + data->length = res; + res = 0; + } + + out: + cfg80211_unlock_rdev(rdev); + return res; +} +EXPORT_SYMBOL_GPL(cfg80211_wext_giwscan); +#endif diff --git a/net/wireless/sme.c b/net/wireless/sme.c new file mode 100644 index 0000000..17fde0d --- /dev/null +++ b/net/wireless/sme.c @@ -0,0 +1,999 @@ +/* + * SME code for cfg80211's connect emulation. + * + * Copyright 2009 Johannes Berg + * Copyright (C) 2009 Intel Corporation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "nl80211.h" +#include "reg.h" + +struct cfg80211_conn { + struct cfg80211_connect_params params; + /* these are sub-states of the _CONNECTING sme_state */ + enum { + CFG80211_CONN_IDLE, + CFG80211_CONN_SCANNING, + CFG80211_CONN_SCAN_AGAIN, + CFG80211_CONN_AUTHENTICATE_NEXT, + CFG80211_CONN_AUTHENTICATING, + CFG80211_CONN_ASSOCIATE_NEXT, + CFG80211_CONN_ASSOCIATING, + CFG80211_CONN_DEAUTH_ASSOC_FAIL, + } state; + u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN]; + u8 *ie; + size_t ie_len; + bool auto_auth, prev_bssid_valid; +}; + +bool cfg80211_is_all_idle(void) +{ + struct cfg80211_registered_device *rdev; + struct wireless_dev *wdev; + bool is_all_idle = true; + + mutex_lock(&cfg80211_mutex); + + /* + * All devices must be idle as otherwise if you are actively + * scanning some new beacon hints could be learned and would + * count as new regulatory hints. + */ + list_for_each_entry(rdev, &cfg80211_rdev_list, list) { + cfg80211_lock_rdev(rdev); + list_for_each_entry(wdev, &rdev->netdev_list, list) { + wdev_lock(wdev); + if (wdev->sme_state != CFG80211_SME_IDLE) + is_all_idle = false; + wdev_unlock(wdev); + } + cfg80211_unlock_rdev(rdev); + } + + mutex_unlock(&cfg80211_mutex); + + return is_all_idle; +} + +static void disconnect_work(struct work_struct *work) +{ + if (!cfg80211_is_all_idle()) + return; + + regulatory_hint_disconnect(); +} + +static DECLARE_WORK(cfg80211_disconnect_work, disconnect_work); + +static int cfg80211_conn_scan(struct wireless_dev *wdev) +{ + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + struct cfg80211_scan_request *request; + int n_channels, err; + + ASSERT_RTNL(); + ASSERT_RDEV_LOCK(rdev); + ASSERT_WDEV_LOCK(wdev); + + if (rdev->scan_req) + return -EBUSY; + + if (wdev->conn->params.channel) { + n_channels = 1; + } else { + enum ieee80211_band band; + n_channels = 0; + + for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + if (!wdev->wiphy->bands[band]) + continue; + n_channels += wdev->wiphy->bands[band]->n_channels; + } + } + request = kzalloc(sizeof(*request) + sizeof(request->ssids[0]) + + sizeof(request->channels[0]) * n_channels, + GFP_KERNEL); + if (!request) + return -ENOMEM; + + if (wdev->conn->params.channel) + request->channels[0] = wdev->conn->params.channel; + else { + int i = 0, j; + enum ieee80211_band band; + + for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + if (!wdev->wiphy->bands[band]) + continue; + for (j = 0; j < wdev->wiphy->bands[band]->n_channels; + i++, j++) + request->channels[i] = + &wdev->wiphy->bands[band]->channels[j]; + } + } + request->n_channels = n_channels; + request->ssids = (void *)&request->channels[n_channels]; + request->n_ssids = 1; + + memcpy(request->ssids[0].ssid, wdev->conn->params.ssid, + wdev->conn->params.ssid_len); + request->ssids[0].ssid_len = wdev->conn->params.ssid_len; + + request->dev = wdev->netdev; + request->wiphy = &rdev->wiphy; + + rdev->scan_req = request; + + err = rdev->ops->scan(wdev->wiphy, wdev->netdev, request); + if (!err) { + wdev->conn->state = CFG80211_CONN_SCANNING; + nl80211_send_scan_start(rdev, wdev->netdev); + dev_hold(wdev->netdev); + } else { + rdev->scan_req = NULL; + kfree(request); + } + return err; +} + +static int cfg80211_conn_do_work(struct wireless_dev *wdev) +{ + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + struct cfg80211_connect_params *params; + const u8 *prev_bssid = NULL; + int err; + + ASSERT_WDEV_LOCK(wdev); + + if (!wdev->conn) + return 0; + + params = &wdev->conn->params; + + switch (wdev->conn->state) { + case CFG80211_CONN_SCAN_AGAIN: + return cfg80211_conn_scan(wdev); + case CFG80211_CONN_AUTHENTICATE_NEXT: + BUG_ON(!rdev->ops->auth); + wdev->conn->state = CFG80211_CONN_AUTHENTICATING; + return __cfg80211_mlme_auth(rdev, wdev->netdev, + params->channel, params->auth_type, + params->bssid, + params->ssid, params->ssid_len, + NULL, 0, + params->key, params->key_len, + params->key_idx); + case CFG80211_CONN_ASSOCIATE_NEXT: + BUG_ON(!rdev->ops->assoc); + wdev->conn->state = CFG80211_CONN_ASSOCIATING; + if (wdev->conn->prev_bssid_valid) + prev_bssid = wdev->conn->prev_bssid; + err = __cfg80211_mlme_assoc(rdev, wdev->netdev, + params->channel, params->bssid, + prev_bssid, + params->ssid, params->ssid_len, + params->ie, params->ie_len, + false, ¶ms->crypto); + if (err) + __cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, + NULL, 0, + WLAN_REASON_DEAUTH_LEAVING); + return err; + case CFG80211_CONN_DEAUTH_ASSOC_FAIL: + __cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, + NULL, 0, + WLAN_REASON_DEAUTH_LEAVING); + /* return an error so that we call __cfg80211_connect_result() */ + return -EINVAL; + default: + return 0; + } +} + +void cfg80211_conn_work(struct work_struct *work) +{ + struct cfg80211_registered_device *rdev = + container_of(work, struct cfg80211_registered_device, conn_work); + struct wireless_dev *wdev; + u8 bssid_buf[ETH_ALEN], *bssid = NULL; + + rtnl_lock(); + cfg80211_lock_rdev(rdev); + mutex_lock(&rdev->devlist_mtx); + + list_for_each_entry(wdev, &rdev->netdev_list, list) { + wdev_lock(wdev); + if (!netif_running(wdev->netdev)) { + wdev_unlock(wdev); + continue; + } + if (wdev->sme_state != CFG80211_SME_CONNECTING) { + wdev_unlock(wdev); + continue; + } + if (wdev->conn->params.bssid) { + memcpy(bssid_buf, wdev->conn->params.bssid, ETH_ALEN); + bssid = bssid_buf; + } + if (cfg80211_conn_do_work(wdev)) + __cfg80211_connect_result( + wdev->netdev, bssid, + NULL, 0, NULL, 0, + WLAN_STATUS_UNSPECIFIED_FAILURE, + false, NULL); + wdev_unlock(wdev); + } + + mutex_unlock(&rdev->devlist_mtx); + cfg80211_unlock_rdev(rdev); + rtnl_unlock(); +} + +static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev) +{ + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + struct cfg80211_bss *bss; + u16 capa = WLAN_CAPABILITY_ESS; + + ASSERT_WDEV_LOCK(wdev); + + if (wdev->conn->params.privacy) + capa |= WLAN_CAPABILITY_PRIVACY; + + bss = cfg80211_get_bss(wdev->wiphy, NULL, wdev->conn->params.bssid, + wdev->conn->params.ssid, + wdev->conn->params.ssid_len, + WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_PRIVACY, + capa); + if (!bss) + return NULL; + + memcpy(wdev->conn->bssid, bss->bssid, ETH_ALEN); + wdev->conn->params.bssid = wdev->conn->bssid; + wdev->conn->params.channel = bss->channel; + wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT; + schedule_work(&rdev->conn_work); + + return bss; +} + +static void __cfg80211_sme_scan_done(struct net_device *dev) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + struct cfg80211_bss *bss; + + ASSERT_WDEV_LOCK(wdev); + + if (wdev->sme_state != CFG80211_SME_CONNECTING) + return; + + if (!wdev->conn) + return; + + if (wdev->conn->state != CFG80211_CONN_SCANNING && + wdev->conn->state != CFG80211_CONN_SCAN_AGAIN) + return; + + bss = cfg80211_get_conn_bss(wdev); + if (bss) { + cfg80211_put_bss(bss); + } else { + /* not found */ + if (wdev->conn->state == CFG80211_CONN_SCAN_AGAIN) + schedule_work(&rdev->conn_work); + else + __cfg80211_connect_result( + wdev->netdev, + wdev->conn->params.bssid, + NULL, 0, NULL, 0, + WLAN_STATUS_UNSPECIFIED_FAILURE, + false, NULL); + } +} + +void cfg80211_sme_scan_done(struct net_device *dev) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + + mutex_lock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx); + wdev_lock(wdev); + __cfg80211_sme_scan_done(dev); + wdev_unlock(wdev); + mutex_unlock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx); +} + +void cfg80211_sme_rx_auth(struct net_device *dev, + const u8 *buf, size_t len) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct wiphy *wiphy = wdev->wiphy; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; + u16 status_code = le16_to_cpu(mgmt->u.auth.status_code); + + ASSERT_WDEV_LOCK(wdev); + + /* should only RX auth frames when connecting */ + if (wdev->sme_state != CFG80211_SME_CONNECTING) + return; + + if (WARN_ON(!wdev->conn)) + return; + + if (status_code == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG && + wdev->conn->auto_auth && + wdev->conn->params.auth_type != NL80211_AUTHTYPE_NETWORK_EAP) { + /* select automatically between only open, shared, leap */ + switch (wdev->conn->params.auth_type) { + case NL80211_AUTHTYPE_OPEN_SYSTEM: + if (wdev->connect_keys) + wdev->conn->params.auth_type = + NL80211_AUTHTYPE_SHARED_KEY; + else + wdev->conn->params.auth_type = + NL80211_AUTHTYPE_NETWORK_EAP; + break; + case NL80211_AUTHTYPE_SHARED_KEY: + wdev->conn->params.auth_type = + NL80211_AUTHTYPE_NETWORK_EAP; + break; + default: + /* huh? */ + wdev->conn->params.auth_type = + NL80211_AUTHTYPE_OPEN_SYSTEM; + break; + } + wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT; + schedule_work(&rdev->conn_work); + } else if (status_code != WLAN_STATUS_SUCCESS) { + __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, NULL, 0, + status_code, false, NULL); + } else if (wdev->sme_state == CFG80211_SME_CONNECTING && + wdev->conn->state == CFG80211_CONN_AUTHENTICATING) { + wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT; + schedule_work(&rdev->conn_work); + } +} + +bool cfg80211_sme_failed_reassoc(struct wireless_dev *wdev) +{ + struct wiphy *wiphy = wdev->wiphy; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + + if (WARN_ON(!wdev->conn)) + return false; + + if (!wdev->conn->prev_bssid_valid) + return false; + + /* + * Some stupid APs don't accept reassoc, so we + * need to fall back to trying regular assoc. + */ + wdev->conn->prev_bssid_valid = false; + wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT; + schedule_work(&rdev->conn_work); + + return true; +} + +void cfg80211_sme_failed_assoc(struct wireless_dev *wdev) +{ + struct wiphy *wiphy = wdev->wiphy; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + + wdev->conn->state = CFG80211_CONN_DEAUTH_ASSOC_FAIL; + schedule_work(&rdev->conn_work); +} + +void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, + const u8 *req_ie, size_t req_ie_len, + const u8 *resp_ie, size_t resp_ie_len, + u16 status, bool wextev, + struct cfg80211_bss *bss) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + u8 *country_ie; +#ifdef CONFIG_CFG80211_WEXT + union iwreq_data wrqu; +#endif + + ASSERT_WDEV_LOCK(wdev); + + if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) + return; + + if (wdev->sme_state != CFG80211_SME_CONNECTING) + return; + + nl80211_send_connect_result(wiphy_to_dev(wdev->wiphy), dev, + bssid, req_ie, req_ie_len, + resp_ie, resp_ie_len, + status, GFP_KERNEL); + +#ifdef CONFIG_CFG80211_WEXT + if (wextev) { + if (req_ie && status == WLAN_STATUS_SUCCESS) { + memset(&wrqu, 0, sizeof(wrqu)); + wrqu.data.length = req_ie_len; + wireless_send_event(dev, IWEVASSOCREQIE, &wrqu, req_ie); + } + + if (resp_ie && status == WLAN_STATUS_SUCCESS) { + memset(&wrqu, 0, sizeof(wrqu)); + wrqu.data.length = resp_ie_len; + wireless_send_event(dev, IWEVASSOCRESPIE, &wrqu, resp_ie); + } + + memset(&wrqu, 0, sizeof(wrqu)); + wrqu.ap_addr.sa_family = ARPHRD_ETHER; + if (bssid && status == WLAN_STATUS_SUCCESS) { + memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN); + memcpy(wdev->wext.prev_bssid, bssid, ETH_ALEN); + wdev->wext.prev_bssid_valid = true; + } + wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); + } +#endif + + if (wdev->current_bss) { + cfg80211_unhold_bss(wdev->current_bss); + cfg80211_put_bss(&wdev->current_bss->pub); + wdev->current_bss = NULL; + } + + if (wdev->conn) + wdev->conn->state = CFG80211_CONN_IDLE; + + if (status != WLAN_STATUS_SUCCESS) { + wdev->sme_state = CFG80211_SME_IDLE; + if (wdev->conn) + kfree(wdev->conn->ie); + kfree(wdev->conn); + wdev->conn = NULL; + kfree(wdev->connect_keys); + wdev->connect_keys = NULL; + wdev->ssid_len = 0; + return; + } + + if (!bss) + bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid, + wdev->ssid, wdev->ssid_len, + WLAN_CAPABILITY_ESS, + WLAN_CAPABILITY_ESS); + + if (WARN_ON(!bss)) + return; + + cfg80211_hold_bss(bss_from_pub(bss)); + wdev->current_bss = bss_from_pub(bss); + + wdev->sme_state = CFG80211_SME_CONNECTED; + cfg80211_upload_connect_keys(wdev); + + country_ie = (u8 *) ieee80211_bss_get_ie(bss, WLAN_EID_COUNTRY); + + if (!country_ie) + return; + + /* + * ieee80211_bss_get_ie() ensures we can access: + * - country_ie + 2, the start of the country ie data, and + * - and country_ie[1] which is the IE length + */ + regulatory_hint_11d(wdev->wiphy, + bss->channel->band, + country_ie + 2, + country_ie[1]); +} + +void cfg80211_connect_result(struct net_device *dev, const u8 *bssid, + const u8 *req_ie, size_t req_ie_len, + const u8 *resp_ie, size_t resp_ie_len, + u16 status, gfp_t gfp) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + struct cfg80211_event *ev; + unsigned long flags; + + CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTING); + + ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp); + if (!ev) + return; + + ev->type = EVENT_CONNECT_RESULT; + if (bssid) + memcpy(ev->cr.bssid, bssid, ETH_ALEN); + ev->cr.req_ie = ((u8 *)ev) + sizeof(*ev); + ev->cr.req_ie_len = req_ie_len; + memcpy((void *)ev->cr.req_ie, req_ie, req_ie_len); + ev->cr.resp_ie = ((u8 *)ev) + sizeof(*ev) + req_ie_len; + ev->cr.resp_ie_len = resp_ie_len; + memcpy((void *)ev->cr.resp_ie, resp_ie, resp_ie_len); + ev->cr.status = status; + + spin_lock_irqsave(&wdev->event_lock, flags); + list_add_tail(&ev->list, &wdev->event_list); + spin_unlock_irqrestore(&wdev->event_lock, flags); + queue_work(cfg80211_wq, &rdev->event_work); +} +EXPORT_SYMBOL(cfg80211_connect_result); + +void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid, + const u8 *req_ie, size_t req_ie_len, + const u8 *resp_ie, size_t resp_ie_len) +{ + struct cfg80211_bss *bss; +#ifdef CONFIG_CFG80211_WEXT + union iwreq_data wrqu; +#endif + + ASSERT_WDEV_LOCK(wdev); + + if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) + return; + + if (wdev->sme_state != CFG80211_SME_CONNECTED) + return; + + /* internal error -- how did we get to CONNECTED w/o BSS? */ + if (WARN_ON(!wdev->current_bss)) { + return; + } + + cfg80211_unhold_bss(wdev->current_bss); + cfg80211_put_bss(&wdev->current_bss->pub); + wdev->current_bss = NULL; + + bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid, + wdev->ssid, wdev->ssid_len, + WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); + + if (WARN_ON(!bss)) + return; + + cfg80211_hold_bss(bss_from_pub(bss)); + wdev->current_bss = bss_from_pub(bss); + + nl80211_send_roamed(wiphy_to_dev(wdev->wiphy), wdev->netdev, bssid, + req_ie, req_ie_len, resp_ie, resp_ie_len, + GFP_KERNEL); + +#ifdef CONFIG_CFG80211_WEXT + if (req_ie) { + memset(&wrqu, 0, sizeof(wrqu)); + wrqu.data.length = req_ie_len; + wireless_send_event(wdev->netdev, IWEVASSOCREQIE, + &wrqu, req_ie); + } + + if (resp_ie) { + memset(&wrqu, 0, sizeof(wrqu)); + wrqu.data.length = resp_ie_len; + wireless_send_event(wdev->netdev, IWEVASSOCRESPIE, + &wrqu, resp_ie); + } + + memset(&wrqu, 0, sizeof(wrqu)); + wrqu.ap_addr.sa_family = ARPHRD_ETHER; + memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN); + memcpy(wdev->wext.prev_bssid, bssid, ETH_ALEN); + wdev->wext.prev_bssid_valid = true; + wireless_send_event(wdev->netdev, SIOCGIWAP, &wrqu, NULL); +#endif +} + +void cfg80211_roamed(struct net_device *dev, const u8 *bssid, + const u8 *req_ie, size_t req_ie_len, + const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + struct cfg80211_event *ev; + unsigned long flags; + + CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTED); + + ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp); + if (!ev) + return; + + ev->type = EVENT_ROAMED; + memcpy(ev->rm.bssid, bssid, ETH_ALEN); + ev->rm.req_ie = ((u8 *)ev) + sizeof(*ev); + ev->rm.req_ie_len = req_ie_len; + memcpy((void *)ev->rm.req_ie, req_ie, req_ie_len); + ev->rm.resp_ie = ((u8 *)ev) + sizeof(*ev) + req_ie_len; + ev->rm.resp_ie_len = resp_ie_len; + memcpy((void *)ev->rm.resp_ie, resp_ie, resp_ie_len); + + spin_lock_irqsave(&wdev->event_lock, flags); + list_add_tail(&ev->list, &wdev->event_list); + spin_unlock_irqrestore(&wdev->event_lock, flags); + queue_work(cfg80211_wq, &rdev->event_work); +} +EXPORT_SYMBOL(cfg80211_roamed); + +void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, + size_t ie_len, u16 reason, bool from_ap) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + int i; +#ifdef CONFIG_CFG80211_WEXT + union iwreq_data wrqu; +#endif + + ASSERT_WDEV_LOCK(wdev); + + if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) + return; + + if (wdev->sme_state != CFG80211_SME_CONNECTED) + return; + + if (wdev->current_bss) { + cfg80211_unhold_bss(wdev->current_bss); + cfg80211_put_bss(&wdev->current_bss->pub); + } + + wdev->current_bss = NULL; + wdev->sme_state = CFG80211_SME_IDLE; + wdev->ssid_len = 0; + + if (wdev->conn) { + const u8 *bssid; + int ret; + + kfree(wdev->conn->ie); + wdev->conn->ie = NULL; + kfree(wdev->conn); + wdev->conn = NULL; + + /* + * If this disconnect was due to a disassoc, we + * we might still have an auth BSS around. For + * the userspace SME that's currently expected, + * but for the kernel SME (nl80211 CONNECT or + * wireless extensions) we want to clear up all + * state. + */ + for (i = 0; i < MAX_AUTH_BSSES; i++) { + if (!wdev->auth_bsses[i]) + continue; + bssid = wdev->auth_bsses[i]->pub.bssid; + ret = __cfg80211_mlme_deauth(rdev, dev, bssid, NULL, 0, + WLAN_REASON_DEAUTH_LEAVING); + WARN(ret, "deauth failed: %d\n", ret); + } + } + + nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap); + + /* + * Delete all the keys ... pairwise keys can't really + * exist any more anyway, but default keys might. + */ + if (rdev->ops->del_key) + for (i = 0; i < 6; i++) + rdev->ops->del_key(wdev->wiphy, dev, i, NULL); + +#ifdef CONFIG_CFG80211_WEXT + memset(&wrqu, 0, sizeof(wrqu)); + wrqu.ap_addr.sa_family = ARPHRD_ETHER; + wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); + wdev->wext.connect.ssid_len = 0; +#endif + + schedule_work(&cfg80211_disconnect_work); +} + +void cfg80211_disconnected(struct net_device *dev, u16 reason, + u8 *ie, size_t ie_len, gfp_t gfp) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + struct cfg80211_event *ev; + unsigned long flags; + + CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTED); + + ev = kzalloc(sizeof(*ev) + ie_len, gfp); + if (!ev) + return; + + ev->type = EVENT_DISCONNECTED; + ev->dc.ie = ((u8 *)ev) + sizeof(*ev); + ev->dc.ie_len = ie_len; + memcpy((void *)ev->dc.ie, ie, ie_len); + ev->dc.reason = reason; + + spin_lock_irqsave(&wdev->event_lock, flags); + list_add_tail(&ev->list, &wdev->event_list); + spin_unlock_irqrestore(&wdev->event_lock, flags); + queue_work(cfg80211_wq, &rdev->event_work); +} +EXPORT_SYMBOL(cfg80211_disconnected); + +int __cfg80211_connect(struct cfg80211_registered_device *rdev, + struct net_device *dev, + struct cfg80211_connect_params *connect, + struct cfg80211_cached_keys *connkeys, + const u8 *prev_bssid) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct ieee80211_channel *chan; + struct cfg80211_bss *bss = NULL; + int err; + + ASSERT_WDEV_LOCK(wdev); + + if (wdev->sme_state != CFG80211_SME_IDLE) + return -EALREADY; + + chan = rdev_fixed_channel(rdev, wdev); + if (chan && chan != connect->channel) + return -EBUSY; + + if (WARN_ON(wdev->connect_keys)) { + kfree(wdev->connect_keys); + wdev->connect_keys = NULL; + } + + if (connkeys && connkeys->def >= 0) { + int idx; + u32 cipher; + + idx = connkeys->def; + cipher = connkeys->params[idx].cipher; + /* If given a WEP key we may need it for shared key auth */ + if (cipher == WLAN_CIPHER_SUITE_WEP40 || + cipher == WLAN_CIPHER_SUITE_WEP104) { + connect->key_idx = idx; + connect->key = connkeys->params[idx].key; + connect->key_len = connkeys->params[idx].key_len; + + /* + * If ciphers are not set (e.g. when going through + * iwconfig), we have to set them appropriately here. + */ + if (connect->crypto.cipher_group == 0) + connect->crypto.cipher_group = cipher; + + if (connect->crypto.n_ciphers_pairwise == 0) { + connect->crypto.n_ciphers_pairwise = 1; + connect->crypto.ciphers_pairwise[0] = cipher; + } + } + } + + if (!rdev->ops->connect) { + if (!rdev->ops->auth || !rdev->ops->assoc) + return -EOPNOTSUPP; + + if (WARN_ON(wdev->conn)) + return -EINPROGRESS; + + wdev->conn = kzalloc(sizeof(*wdev->conn), GFP_KERNEL); + if (!wdev->conn) + return -ENOMEM; + + /* + * Copy all parameters, and treat explicitly IEs, BSSID, SSID. + */ + memcpy(&wdev->conn->params, connect, sizeof(*connect)); + if (connect->bssid) { + wdev->conn->params.bssid = wdev->conn->bssid; + memcpy(wdev->conn->bssid, connect->bssid, ETH_ALEN); + } + + if (connect->ie) { + wdev->conn->ie = kmemdup(connect->ie, connect->ie_len, + GFP_KERNEL); + wdev->conn->params.ie = wdev->conn->ie; + if (!wdev->conn->ie) { + kfree(wdev->conn); + wdev->conn = NULL; + return -ENOMEM; + } + } + + if (connect->auth_type == NL80211_AUTHTYPE_AUTOMATIC) { + wdev->conn->auto_auth = true; + /* start with open system ... should mostly work */ + wdev->conn->params.auth_type = + NL80211_AUTHTYPE_OPEN_SYSTEM; + } else { + wdev->conn->auto_auth = false; + } + + memcpy(wdev->ssid, connect->ssid, connect->ssid_len); + wdev->ssid_len = connect->ssid_len; + wdev->conn->params.ssid = wdev->ssid; + wdev->conn->params.ssid_len = connect->ssid_len; + + /* see if we have the bss already */ + bss = cfg80211_get_conn_bss(wdev); + + wdev->sme_state = CFG80211_SME_CONNECTING; + wdev->connect_keys = connkeys; + + if (prev_bssid) { + memcpy(wdev->conn->prev_bssid, prev_bssid, ETH_ALEN); + wdev->conn->prev_bssid_valid = true; + } + + /* we're good if we have a matching bss struct */ + if (bss) { + wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT; + err = cfg80211_conn_do_work(wdev); + cfg80211_put_bss(bss); + } else { + /* otherwise we'll need to scan for the AP first */ + err = cfg80211_conn_scan(wdev); + /* + * If we can't scan right now, then we need to scan again + * after the current scan finished, since the parameters + * changed (unless we find a good AP anyway). + */ + if (err == -EBUSY) { + err = 0; + wdev->conn->state = CFG80211_CONN_SCAN_AGAIN; + } + } + if (err) { + kfree(wdev->conn->ie); + kfree(wdev->conn); + wdev->conn = NULL; + wdev->sme_state = CFG80211_SME_IDLE; + wdev->connect_keys = NULL; + wdev->ssid_len = 0; + } + + return err; + } else { + wdev->sme_state = CFG80211_SME_CONNECTING; + wdev->connect_keys = connkeys; + err = rdev->ops->connect(&rdev->wiphy, dev, connect); + if (err) { + wdev->connect_keys = NULL; + wdev->sme_state = CFG80211_SME_IDLE; + return err; + } + + memcpy(wdev->ssid, connect->ssid, connect->ssid_len); + wdev->ssid_len = connect->ssid_len; + + return 0; + } +} + +int cfg80211_connect(struct cfg80211_registered_device *rdev, + struct net_device *dev, + struct cfg80211_connect_params *connect, + struct cfg80211_cached_keys *connkeys) +{ + int err; + + mutex_lock(&rdev->devlist_mtx); + wdev_lock(dev->ieee80211_ptr); + err = __cfg80211_connect(rdev, dev, connect, connkeys, NULL); + wdev_unlock(dev->ieee80211_ptr); + mutex_unlock(&rdev->devlist_mtx); + + return err; +} + +int __cfg80211_disconnect(struct cfg80211_registered_device *rdev, + struct net_device *dev, u16 reason, bool wextev) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + int err; + + ASSERT_WDEV_LOCK(wdev); + + if (wdev->sme_state == CFG80211_SME_IDLE) + return -EINVAL; + + kfree(wdev->connect_keys); + wdev->connect_keys = NULL; + + if (!rdev->ops->disconnect) { + if (!rdev->ops->deauth) + return -EOPNOTSUPP; + + /* was it connected by userspace SME? */ + if (!wdev->conn) { + cfg80211_mlme_down(rdev, dev); + return 0; + } + + if (wdev->sme_state == CFG80211_SME_CONNECTING && + (wdev->conn->state == CFG80211_CONN_SCANNING || + wdev->conn->state == CFG80211_CONN_SCAN_AGAIN)) { + wdev->sme_state = CFG80211_SME_IDLE; + kfree(wdev->conn->ie); + kfree(wdev->conn); + wdev->conn = NULL; + wdev->ssid_len = 0; + return 0; + } + + /* wdev->conn->params.bssid must be set if > SCANNING */ + err = __cfg80211_mlme_deauth(rdev, dev, + wdev->conn->params.bssid, + NULL, 0, reason); + if (err) + return err; + } else { + err = rdev->ops->disconnect(&rdev->wiphy, dev, reason); + if (err) + return err; + } + + if (wdev->sme_state == CFG80211_SME_CONNECTED) + __cfg80211_disconnected(dev, NULL, 0, 0, false); + else if (wdev->sme_state == CFG80211_SME_CONNECTING) + __cfg80211_connect_result(dev, NULL, NULL, 0, NULL, 0, + WLAN_STATUS_UNSPECIFIED_FAILURE, + wextev, NULL); + + return 0; +} + +int cfg80211_disconnect(struct cfg80211_registered_device *rdev, + struct net_device *dev, + u16 reason, bool wextev) +{ + int err; + + wdev_lock(dev->ieee80211_ptr); + err = __cfg80211_disconnect(rdev, dev, reason, wextev); + wdev_unlock(dev->ieee80211_ptr); + + return err; +} + +void cfg80211_sme_disassoc(struct net_device *dev, int idx) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + u8 bssid[ETH_ALEN]; + + ASSERT_WDEV_LOCK(wdev); + + if (!wdev->conn) + return; + + if (wdev->conn->state == CFG80211_CONN_IDLE) + return; + + /* + * Ok, so the association was made by this SME -- we don't + * want it any more so deauthenticate too. + */ + + if (!wdev->auth_bsses[idx]) + return; + + memcpy(bssid, wdev->auth_bsses[idx]->pub.bssid, ETH_ALEN); + if (__cfg80211_mlme_deauth(rdev, dev, bssid, + NULL, 0, WLAN_REASON_DEAUTH_LEAVING)) { + /* whatever -- assume gone anyway */ + cfg80211_unhold_bss(wdev->auth_bsses[idx]); + cfg80211_put_bss(&wdev->auth_bsses[idx]->pub); + wdev->auth_bsses[idx] = NULL; + } +} diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c new file mode 100644 index 0000000..9f2cef3 --- /dev/null +++ b/net/wireless/sysfs.c @@ -0,0 +1,133 @@ +/* + * This file provides /sys/class/ieee80211// + * and some default attributes. + * + * Copyright 2005-2006 Jiri Benc + * Copyright 2006 Johannes Berg + * + * This file is GPLv2 as found in COPYING. + */ + +#include +#include +#include +#include +#include +#include +#include "sysfs.h" +#include "core.h" + +static inline struct cfg80211_registered_device *dev_to_rdev( + struct device *dev) +{ + return container_of(dev, struct cfg80211_registered_device, wiphy.dev); +} + +#define SHOW_FMT(name, fmt, member) \ +static ssize_t name ## _show(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + return sprintf(buf, fmt "\n", dev_to_rdev(dev)->member); \ +} + +SHOW_FMT(index, "%d", wiphy_idx); +SHOW_FMT(macaddress, "%pM", wiphy.perm_addr); +SHOW_FMT(address_mask, "%pM", wiphy.addr_mask); + +static ssize_t addresses_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct wiphy *wiphy = &dev_to_rdev(dev)->wiphy; + char *start = buf; + int i; + + if (!wiphy->addresses) + return sprintf(buf, "%pM\n", wiphy->perm_addr); + + for (i = 0; i < wiphy->n_addresses; i++) + buf += sprintf(buf, "%pM\n", &wiphy->addresses[i].addr); + + return buf - start; +} + +static struct device_attribute ieee80211_dev_attrs[] = { + __ATTR_RO(index), + __ATTR_RO(macaddress), + __ATTR_RO(address_mask), + __ATTR_RO(addresses), + {} +}; + +static void wiphy_dev_release(struct device *dev) +{ + struct cfg80211_registered_device *rdev = dev_to_rdev(dev); + + cfg80211_dev_free(rdev); +} + +#ifdef CONFIG_HOTPLUG +static int wiphy_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + /* TODO, we probably need stuff here */ + return 0; +} +#endif + +static int wiphy_suspend(struct device *dev, pm_message_t state) +{ + struct cfg80211_registered_device *rdev = dev_to_rdev(dev); + int ret = 0; + + rdev->suspend_at = get_seconds(); + + if (rdev->ops->suspend) { + rtnl_lock(); + ret = rdev->ops->suspend(&rdev->wiphy); + rtnl_unlock(); + } + + return ret; +} + +static int wiphy_resume(struct device *dev) +{ + struct cfg80211_registered_device *rdev = dev_to_rdev(dev); + int ret = 0; + + /* Age scan results with time spent in suspend */ + spin_lock_bh(&rdev->bss_lock); + cfg80211_bss_age(rdev, get_seconds() - rdev->suspend_at); + spin_unlock_bh(&rdev->bss_lock); + + if (rdev->ops->resume) { + rtnl_lock(); + ret = rdev->ops->resume(&rdev->wiphy); + rtnl_unlock(); + } + + return ret; +} + +struct class ieee80211_class = { + .name = "ieee80211", + .owner = THIS_MODULE, + .dev_release = wiphy_dev_release, + .dev_attrs = ieee80211_dev_attrs, +#ifdef CONFIG_HOTPLUG + .dev_uevent = wiphy_uevent, +#endif + .suspend = wiphy_suspend, + .resume = wiphy_resume, +}; + +int wiphy_sysfs_init(void) +{ + return class_register(&ieee80211_class); +} + +void wiphy_sysfs_exit(void) +{ + class_unregister(&ieee80211_class); +} diff --git a/net/wireless/sysfs.h b/net/wireless/sysfs.h new file mode 100644 index 0000000..65acbeb --- /dev/null +++ b/net/wireless/sysfs.h @@ -0,0 +1,9 @@ +#ifndef __WIRELESS_SYSFS_H +#define __WIRELESS_SYSFS_H + +extern int wiphy_sysfs_init(void); +extern void wiphy_sysfs_exit(void); + +extern struct class ieee80211_class; + +#endif /* __WIRELESS_SYSFS_H */ diff --git a/net/wireless/util.c b/net/wireless/util.c new file mode 100644 index 0000000..be2ab8c --- /dev/null +++ b/net/wireless/util.c @@ -0,0 +1,853 @@ +/* + * Wireless utility functions + * + * Copyright 2007-2009 Johannes Berg + */ +#include +#include +#include +#include +#include "core.h" + +struct ieee80211_rate * +ieee80211_get_response_rate(struct ieee80211_supported_band *sband, + u32 basic_rates, int bitrate) +{ + struct ieee80211_rate *result = &sband->bitrates[0]; + int i; + + for (i = 0; i < sband->n_bitrates; i++) { + if (!(basic_rates & BIT(i))) + continue; + if (sband->bitrates[i].bitrate > bitrate) + continue; + result = &sband->bitrates[i]; + } + + return result; +} +EXPORT_SYMBOL(ieee80211_get_response_rate); + +int ieee80211_channel_to_frequency(int chan) +{ + if (chan < 14) + return 2407 + chan * 5; + + if (chan == 14) + return 2484; + + /* FIXME: 802.11j 17.3.8.3.2 */ + return (chan + 1000) * 5; +} +EXPORT_SYMBOL(ieee80211_channel_to_frequency); + +int ieee80211_frequency_to_channel(int freq) +{ + if (freq == 2484) + return 14; + + if (freq < 2484) + return (freq - 2407) / 5; + + /* FIXME: 802.11j 17.3.8.3.2 */ + return freq/5 - 1000; +} +EXPORT_SYMBOL(ieee80211_frequency_to_channel); + +struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy, + int freq) +{ + enum ieee80211_band band; + struct ieee80211_supported_band *sband; + int i; + + for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + sband = wiphy->bands[band]; + + if (!sband) + continue; + + for (i = 0; i < sband->n_channels; i++) { + if (sband->channels[i].center_freq == freq) + return &sband->channels[i]; + } + } + + return NULL; +} +EXPORT_SYMBOL(__ieee80211_get_channel); + +static void set_mandatory_flags_band(struct ieee80211_supported_band *sband, + enum ieee80211_band band) +{ + int i, want; + + switch (band) { + case IEEE80211_BAND_5GHZ: + want = 3; + for (i = 0; i < sband->n_bitrates; i++) { + if (sband->bitrates[i].bitrate == 60 || + sband->bitrates[i].bitrate == 120 || + sband->bitrates[i].bitrate == 240) { + sband->bitrates[i].flags |= + IEEE80211_RATE_MANDATORY_A; + want--; + } + } + WARN_ON(want); + break; + case IEEE80211_BAND_2GHZ: + want = 7; + for (i = 0; i < sband->n_bitrates; i++) { + if (sband->bitrates[i].bitrate == 10) { + sband->bitrates[i].flags |= + IEEE80211_RATE_MANDATORY_B | + IEEE80211_RATE_MANDATORY_G; + want--; + } + + if (sband->bitrates[i].bitrate == 20 || + sband->bitrates[i].bitrate == 55 || + sband->bitrates[i].bitrate == 110 || + sband->bitrates[i].bitrate == 60 || + sband->bitrates[i].bitrate == 120 || + sband->bitrates[i].bitrate == 240) { + sband->bitrates[i].flags |= + IEEE80211_RATE_MANDATORY_G; + want--; + } + + if (sband->bitrates[i].bitrate != 10 && + sband->bitrates[i].bitrate != 20 && + sband->bitrates[i].bitrate != 55 && + sband->bitrates[i].bitrate != 110) + sband->bitrates[i].flags |= + IEEE80211_RATE_ERP_G; + } + WARN_ON(want != 0 && want != 3 && want != 6); + break; + case IEEE80211_NUM_BANDS: + WARN_ON(1); + break; + } +} + +void ieee80211_set_bitrate_flags(struct wiphy *wiphy) +{ + enum ieee80211_band band; + + for (band = 0; band < IEEE80211_NUM_BANDS; band++) + if (wiphy->bands[band]) + set_mandatory_flags_band(wiphy->bands[band], band); +} + +int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev, + struct key_params *params, int key_idx, + const u8 *mac_addr) +{ + int i; + + if (key_idx > 5) + return -EINVAL; + + /* + * Disallow pairwise keys with non-zero index unless it's WEP + * (because current deployments use pairwise WEP keys with + * non-zero indizes but 802.11i clearly specifies to use zero) + */ + if (mac_addr && key_idx && + params->cipher != WLAN_CIPHER_SUITE_WEP40 && + params->cipher != WLAN_CIPHER_SUITE_WEP104) + return -EINVAL; + + switch (params->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + if (params->key_len != WLAN_KEY_LEN_WEP40) + return -EINVAL; + break; + case WLAN_CIPHER_SUITE_TKIP: + if (params->key_len != WLAN_KEY_LEN_TKIP) + return -EINVAL; + break; + case WLAN_CIPHER_SUITE_CCMP: + if (params->key_len != WLAN_KEY_LEN_CCMP) + return -EINVAL; + break; + case WLAN_CIPHER_SUITE_WEP104: + if (params->key_len != WLAN_KEY_LEN_WEP104) + return -EINVAL; + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + if (params->key_len != WLAN_KEY_LEN_AES_CMAC) + return -EINVAL; + break; + default: + return -EINVAL; + } + + if (params->seq) { + switch (params->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + /* These ciphers do not use key sequence */ + return -EINVAL; + case WLAN_CIPHER_SUITE_TKIP: + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_AES_CMAC: + if (params->seq_len != 6) + return -EINVAL; + break; + } + } + + for (i = 0; i < rdev->wiphy.n_cipher_suites; i++) + if (params->cipher == rdev->wiphy.cipher_suites[i]) + break; + if (i == rdev->wiphy.n_cipher_suites) + return -EINVAL; + + return 0; +} + +/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */ +/* Ethernet-II snap header (RFC1042 for most EtherTypes) */ +const unsigned char rfc1042_header[] __aligned(2) = + { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; +EXPORT_SYMBOL(rfc1042_header); + +/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */ +const unsigned char bridge_tunnel_header[] __aligned(2) = + { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 }; +EXPORT_SYMBOL(bridge_tunnel_header); + +unsigned int ieee80211_hdrlen(__le16 fc) +{ + unsigned int hdrlen = 24; + + if (ieee80211_is_data(fc)) { + if (ieee80211_has_a4(fc)) + hdrlen = 30; + if (ieee80211_is_data_qos(fc)) { + hdrlen += IEEE80211_QOS_CTL_LEN; + if (ieee80211_has_order(fc)) + hdrlen += IEEE80211_HT_CTL_LEN; + } + goto out; + } + + if (ieee80211_is_ctl(fc)) { + /* + * ACK and CTS are 10 bytes, all others 16. To see how + * to get this condition consider + * subtype mask: 0b0000000011110000 (0x00F0) + * ACK subtype: 0b0000000011010000 (0x00D0) + * CTS subtype: 0b0000000011000000 (0x00C0) + * bits that matter: ^^^ (0x00E0) + * value of those: 0b0000000011000000 (0x00C0) + */ + if ((fc & cpu_to_le16(0x00E0)) == cpu_to_le16(0x00C0)) + hdrlen = 10; + else + hdrlen = 16; + } +out: + return hdrlen; +} +EXPORT_SYMBOL(ieee80211_hdrlen); + +unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb) +{ + const struct ieee80211_hdr *hdr = + (const struct ieee80211_hdr *)skb->data; + unsigned int hdrlen; + + if (unlikely(skb->len < 10)) + return 0; + hdrlen = ieee80211_hdrlen(hdr->frame_control); + if (unlikely(hdrlen > skb->len)) + return 0; + return hdrlen; +} +EXPORT_SYMBOL(ieee80211_get_hdrlen_from_skb); + +static int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr) +{ + int ae = meshhdr->flags & MESH_FLAGS_AE; + /* 7.1.3.5a.2 */ + switch (ae) { + case 0: + return 6; + case MESH_FLAGS_AE_A4: + return 12; + case MESH_FLAGS_AE_A5_A6: + return 18; + case (MESH_FLAGS_AE_A4 | MESH_FLAGS_AE_A5_A6): + return 24; + default: + return 6; + } +} + +int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, + enum nl80211_iftype iftype) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + u16 hdrlen, ethertype; + u8 *payload; + u8 dst[ETH_ALEN]; + u8 src[ETH_ALEN] __aligned(2); + + if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) + return -1; + + hdrlen = ieee80211_hdrlen(hdr->frame_control); + + /* convert IEEE 802.11 header + possible LLC headers into Ethernet + * header + * IEEE 802.11 address fields: + * ToDS FromDS Addr1 Addr2 Addr3 Addr4 + * 0 0 DA SA BSSID n/a + * 0 1 DA BSSID SA n/a + * 1 0 BSSID SA DA n/a + * 1 1 RA TA DA SA + */ + memcpy(dst, ieee80211_get_DA(hdr), ETH_ALEN); + memcpy(src, ieee80211_get_SA(hdr), ETH_ALEN); + + switch (hdr->frame_control & + cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) { + case cpu_to_le16(IEEE80211_FCTL_TODS): + if (unlikely(iftype != NL80211_IFTYPE_AP && + iftype != NL80211_IFTYPE_AP_VLAN)) + return -1; + break; + case cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS): + if (unlikely(iftype != NL80211_IFTYPE_WDS && + iftype != NL80211_IFTYPE_MESH_POINT && + iftype != NL80211_IFTYPE_AP_VLAN && + iftype != NL80211_IFTYPE_STATION)) + return -1; + if (iftype == NL80211_IFTYPE_MESH_POINT) { + struct ieee80211s_hdr *meshdr = + (struct ieee80211s_hdr *) (skb->data + hdrlen); + hdrlen += ieee80211_get_mesh_hdrlen(meshdr); + if (meshdr->flags & MESH_FLAGS_AE_A5_A6) { + memcpy(dst, meshdr->eaddr1, ETH_ALEN); + memcpy(src, meshdr->eaddr2, ETH_ALEN); + } + } + break; + case cpu_to_le16(IEEE80211_FCTL_FROMDS): + if ((iftype != NL80211_IFTYPE_STATION && + iftype != NL80211_IFTYPE_MESH_POINT) || + (is_multicast_ether_addr(dst) && + !compare_ether_addr(src, addr))) + return -1; + if (iftype == NL80211_IFTYPE_MESH_POINT) { + struct ieee80211s_hdr *meshdr = + (struct ieee80211s_hdr *) (skb->data + hdrlen); + hdrlen += ieee80211_get_mesh_hdrlen(meshdr); + if (meshdr->flags & MESH_FLAGS_AE_A4) + memcpy(src, meshdr->eaddr1, ETH_ALEN); + } + break; + case cpu_to_le16(0): + if (iftype != NL80211_IFTYPE_ADHOC) + return -1; + break; + } + + if (unlikely(skb->len - hdrlen < 8)) + return -1; + + payload = skb->data + hdrlen; + ethertype = (payload[6] << 8) | payload[7]; + + if (likely((compare_ether_addr(payload, rfc1042_header) == 0 && + ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) || + compare_ether_addr(payload, bridge_tunnel_header) == 0)) { + /* remove RFC1042 or Bridge-Tunnel encapsulation and + * replace EtherType */ + skb_pull(skb, hdrlen + 6); + memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN); + memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN); + } else { + struct ethhdr *ehdr; + __be16 len; + + skb_pull(skb, hdrlen); + len = htons(skb->len); + ehdr = (struct ethhdr *) skb_push(skb, sizeof(struct ethhdr)); + memcpy(ehdr->h_dest, dst, ETH_ALEN); + memcpy(ehdr->h_source, src, ETH_ALEN); + ehdr->h_proto = len; + } + return 0; +} +EXPORT_SYMBOL(ieee80211_data_to_8023); + +int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr, + enum nl80211_iftype iftype, u8 *bssid, bool qos) +{ + struct ieee80211_hdr hdr; + u16 hdrlen, ethertype; + __le16 fc; + const u8 *encaps_data; + int encaps_len, skip_header_bytes; + int nh_pos, h_pos; + int head_need; + + if (unlikely(skb->len < ETH_HLEN)) + return -EINVAL; + + nh_pos = skb_network_header(skb) - skb->data; + h_pos = skb_transport_header(skb) - skb->data; + + /* convert Ethernet header to proper 802.11 header (based on + * operation mode) */ + ethertype = (skb->data[12] << 8) | skb->data[13]; + fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA); + + switch (iftype) { + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_AP_VLAN: + fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); + /* DA BSSID SA */ + memcpy(hdr.addr1, skb->data, ETH_ALEN); + memcpy(hdr.addr2, addr, ETH_ALEN); + memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN); + hdrlen = 24; + break; + case NL80211_IFTYPE_STATION: + fc |= cpu_to_le16(IEEE80211_FCTL_TODS); + /* BSSID SA DA */ + memcpy(hdr.addr1, bssid, ETH_ALEN); + memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); + memcpy(hdr.addr3, skb->data, ETH_ALEN); + hdrlen = 24; + break; + case NL80211_IFTYPE_ADHOC: + /* DA SA BSSID */ + memcpy(hdr.addr1, skb->data, ETH_ALEN); + memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); + memcpy(hdr.addr3, bssid, ETH_ALEN); + hdrlen = 24; + break; + default: + return -EOPNOTSUPP; + } + + if (qos) { + fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA); + hdrlen += 2; + } + + hdr.frame_control = fc; + hdr.duration_id = 0; + hdr.seq_ctrl = 0; + + skip_header_bytes = ETH_HLEN; + if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) { + encaps_data = bridge_tunnel_header; + encaps_len = sizeof(bridge_tunnel_header); + skip_header_bytes -= 2; + } else if (ethertype > 0x600) { + encaps_data = rfc1042_header; + encaps_len = sizeof(rfc1042_header); + skip_header_bytes -= 2; + } else { + encaps_data = NULL; + encaps_len = 0; + } + + skb_pull(skb, skip_header_bytes); + nh_pos -= skip_header_bytes; + h_pos -= skip_header_bytes; + + head_need = hdrlen + encaps_len - skb_headroom(skb); + + if (head_need > 0 || skb_cloned(skb)) { + head_need = max(head_need, 0); + if (head_need) + skb_orphan(skb); + + if (pskb_expand_head(skb, head_need, 0, GFP_ATOMIC)) { + printk(KERN_ERR "failed to reallocate Tx buffer\n"); + return -ENOMEM; + } + skb->truesize += head_need; + } + + if (encaps_data) { + memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len); + nh_pos += encaps_len; + h_pos += encaps_len; + } + + memcpy(skb_push(skb, hdrlen), &hdr, hdrlen); + + nh_pos += hdrlen; + h_pos += hdrlen; + + /* Update skb pointers to various headers since this modified frame + * is going to go through Linux networking code that may potentially + * need things like pointer to IP header. */ + skb_set_mac_header(skb, 0); + skb_set_network_header(skb, nh_pos); + skb_set_transport_header(skb, h_pos); + + return 0; +} +EXPORT_SYMBOL(ieee80211_data_from_8023); + + +void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, + const u8 *addr, enum nl80211_iftype iftype, + const unsigned int extra_headroom) +{ + struct sk_buff *frame = NULL; + u16 ethertype; + u8 *payload; + const struct ethhdr *eth; + int remaining, err; + u8 dst[ETH_ALEN], src[ETH_ALEN]; + + err = ieee80211_data_to_8023(skb, addr, iftype); + if (err) + goto out; + + /* skip the wrapping header */ + eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr)); + if (!eth) + goto out; + + while (skb != frame) { + u8 padding; + __be16 len = eth->h_proto; + unsigned int subframe_len = sizeof(struct ethhdr) + ntohs(len); + + remaining = skb->len; + memcpy(dst, eth->h_dest, ETH_ALEN); + memcpy(src, eth->h_source, ETH_ALEN); + + padding = (4 - subframe_len) & 0x3; + /* the last MSDU has no padding */ + if (subframe_len > remaining) + goto purge; + + skb_pull(skb, sizeof(struct ethhdr)); + /* reuse skb for the last subframe */ + if (remaining <= subframe_len + padding) + frame = skb; + else { + unsigned int hlen = ALIGN(extra_headroom, 4); + /* + * Allocate and reserve two bytes more for payload + * alignment since sizeof(struct ethhdr) is 14. + */ + frame = dev_alloc_skb(hlen + subframe_len + 2); + if (!frame) + goto purge; + + skb_reserve(frame, hlen + sizeof(struct ethhdr) + 2); + memcpy(skb_put(frame, ntohs(len)), skb->data, + ntohs(len)); + + eth = (struct ethhdr *)skb_pull(skb, ntohs(len) + + padding); + if (!eth) { + dev_kfree_skb(frame); + goto purge; + } + } + + skb_reset_network_header(frame); + frame->dev = skb->dev; + frame->priority = skb->priority; + + payload = frame->data; + ethertype = (payload[6] << 8) | payload[7]; + + if (likely((compare_ether_addr(payload, rfc1042_header) == 0 && + ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) || + compare_ether_addr(payload, + bridge_tunnel_header) == 0)) { + /* remove RFC1042 or Bridge-Tunnel + * encapsulation and replace EtherType */ + skb_pull(frame, 6); + memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN); + memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN); + } else { + memcpy(skb_push(frame, sizeof(__be16)), &len, + sizeof(__be16)); + memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN); + memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN); + } + __skb_queue_tail(list, frame); + } + + return; + + purge: + __skb_queue_purge(list); + out: + dev_kfree_skb(skb); +} +EXPORT_SYMBOL(ieee80211_amsdu_to_8023s); + +/* Given a data frame determine the 802.1p/1d tag to use. */ +unsigned int cfg80211_classify8021d(struct sk_buff *skb) +{ + unsigned int dscp; + + /* skb->priority values from 256->263 are magic values to + * directly indicate a specific 802.1d priority. This is used + * to allow 802.1d priority to be passed directly in from VLAN + * tags, etc. + */ + if (skb->priority >= 256 && skb->priority <= 263) + return skb->priority - 256; + + switch (skb->protocol) { + case htons(ETH_P_IP): + dscp = ip_hdr(skb)->tos & 0xfc; + break; + default: + return 0; + } + + return dscp >> 5; +} +EXPORT_SYMBOL(cfg80211_classify8021d); + +const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 ie) +{ + u8 *end, *pos; + + pos = bss->information_elements; + if (pos == NULL) + return NULL; + end = pos + bss->len_information_elements; + + while (pos + 1 < end) { + if (pos + 2 + pos[1] > end) + break; + if (pos[0] == ie) + return pos; + pos += 2 + pos[1]; + } + + return NULL; +} +EXPORT_SYMBOL(ieee80211_bss_get_ie); + +void cfg80211_upload_connect_keys(struct wireless_dev *wdev) +{ + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + struct net_device *dev = wdev->netdev; + int i; + + if (!wdev->connect_keys) + return; + + for (i = 0; i < 6; i++) { + if (!wdev->connect_keys->params[i].cipher) + continue; + if (rdev->ops->add_key(wdev->wiphy, dev, i, NULL, + &wdev->connect_keys->params[i])) { + printk(KERN_ERR "%s: failed to set key %d\n", + dev->name, i); + continue; + } + if (wdev->connect_keys->def == i) + if (rdev->ops->set_default_key(wdev->wiphy, dev, i)) { + printk(KERN_ERR "%s: failed to set defkey %d\n", + dev->name, i); + continue; + } + if (wdev->connect_keys->defmgmt == i) + if (rdev->ops->set_default_mgmt_key(wdev->wiphy, dev, i)) + printk(KERN_ERR "%s: failed to set mgtdef %d\n", + dev->name, i); + } + + kfree(wdev->connect_keys); + wdev->connect_keys = NULL; +} + +static void cfg80211_process_wdev_events(struct wireless_dev *wdev) +{ + struct cfg80211_event *ev; + unsigned long flags; + const u8 *bssid = NULL; + + spin_lock_irqsave(&wdev->event_lock, flags); + while (!list_empty(&wdev->event_list)) { + ev = list_first_entry(&wdev->event_list, + struct cfg80211_event, list); + list_del(&ev->list); + spin_unlock_irqrestore(&wdev->event_lock, flags); + + wdev_lock(wdev); + switch (ev->type) { + case EVENT_CONNECT_RESULT: + if (!is_zero_ether_addr(ev->cr.bssid)) + bssid = ev->cr.bssid; + __cfg80211_connect_result( + wdev->netdev, bssid, + ev->cr.req_ie, ev->cr.req_ie_len, + ev->cr.resp_ie, ev->cr.resp_ie_len, + ev->cr.status, + ev->cr.status == WLAN_STATUS_SUCCESS, + NULL); + break; + case EVENT_ROAMED: + __cfg80211_roamed(wdev, ev->rm.bssid, + ev->rm.req_ie, ev->rm.req_ie_len, + ev->rm.resp_ie, ev->rm.resp_ie_len); + break; + case EVENT_DISCONNECTED: + __cfg80211_disconnected(wdev->netdev, + ev->dc.ie, ev->dc.ie_len, + ev->dc.reason, true); + break; + case EVENT_IBSS_JOINED: + __cfg80211_ibss_joined(wdev->netdev, ev->ij.bssid); + break; + } + wdev_unlock(wdev); + + kfree(ev); + + spin_lock_irqsave(&wdev->event_lock, flags); + } + spin_unlock_irqrestore(&wdev->event_lock, flags); +} + +void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev) +{ + struct wireless_dev *wdev; + + ASSERT_RTNL(); + ASSERT_RDEV_LOCK(rdev); + + mutex_lock(&rdev->devlist_mtx); + + list_for_each_entry(wdev, &rdev->netdev_list, list) + cfg80211_process_wdev_events(wdev); + + mutex_unlock(&rdev->devlist_mtx); +} + +int cfg80211_change_iface(struct cfg80211_registered_device *rdev, + struct net_device *dev, enum nl80211_iftype ntype, + u32 *flags, struct vif_params *params) +{ + int err; + enum nl80211_iftype otype = dev->ieee80211_ptr->iftype; + + ASSERT_RDEV_LOCK(rdev); + + /* don't support changing VLANs, you just re-create them */ + if (otype == NL80211_IFTYPE_AP_VLAN) + return -EOPNOTSUPP; + + if (!rdev->ops->change_virtual_intf || + !(rdev->wiphy.interface_modes & (1 << ntype))) + return -EOPNOTSUPP; + + /* if it's part of a bridge, reject changing type to station/ibss */ + if (dev->br_port && (ntype == NL80211_IFTYPE_ADHOC || + ntype == NL80211_IFTYPE_STATION)) + return -EBUSY; + + if (ntype != otype) { + dev->ieee80211_ptr->use_4addr = false; + + switch (otype) { + case NL80211_IFTYPE_ADHOC: + cfg80211_leave_ibss(rdev, dev, false); + break; + case NL80211_IFTYPE_STATION: + cfg80211_disconnect(rdev, dev, + WLAN_REASON_DEAUTH_LEAVING, true); + break; + case NL80211_IFTYPE_MESH_POINT: + /* mesh should be handled? */ + break; + default: + break; + } + + cfg80211_process_rdev_events(rdev); + } + + err = rdev->ops->change_virtual_intf(&rdev->wiphy, dev, + ntype, flags, params); + + WARN_ON(!err && dev->ieee80211_ptr->iftype != ntype); + + if (!err && params && params->use_4addr != -1) + dev->ieee80211_ptr->use_4addr = params->use_4addr; + + if (!err) { + dev->priv_flags &= ~IFF_DONT_BRIDGE; + switch (ntype) { + case NL80211_IFTYPE_STATION: + if (dev->ieee80211_ptr->use_4addr) + break; + /* fall through */ + case NL80211_IFTYPE_ADHOC: + dev->priv_flags |= IFF_DONT_BRIDGE; + break; + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_AP_VLAN: + case NL80211_IFTYPE_WDS: + case NL80211_IFTYPE_MESH_POINT: + /* bridging OK */ + break; + case NL80211_IFTYPE_MONITOR: + /* monitor can't bridge anyway */ + break; + case NL80211_IFTYPE_UNSPECIFIED: + case __NL80211_IFTYPE_AFTER_LAST: + /* not happening */ + break; + } + } + + return err; +} + +u16 cfg80211_calculate_bitrate(struct rate_info *rate) +{ + int modulation, streams, bitrate; + + if (!(rate->flags & RATE_INFO_FLAGS_MCS)) + return rate->legacy; + + /* the formula below does only work for MCS values smaller than 32 */ + if (rate->mcs >= 32) + return 0; + + modulation = rate->mcs & 7; + streams = (rate->mcs >> 3) + 1; + + bitrate = (rate->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) ? + 13500000 : 6500000; + + if (modulation < 4) + bitrate *= (modulation + 1); + else if (modulation == 4) + bitrate *= (modulation + 2); + else + bitrate *= (modulation + 3); + + bitrate *= streams; + + if (rate->flags & RATE_INFO_FLAGS_SHORT_GI) + bitrate = (bitrate / 9) * 10; + + /* do NOT round down here */ + return (bitrate + 50000) / 100000; +} diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c new file mode 100644 index 0000000..9ab5183 --- /dev/null +++ b/net/wireless/wext-compat.c @@ -0,0 +1,1508 @@ +/* + * cfg80211 - wext compat code + * + * This is temporary code until all wireless functionality is migrated + * into cfg80211, when that happens all the exports here go away and + * we directly assign the wireless handlers of wireless interfaces. + * + * Copyright 2008-2009 Johannes Berg + */ + +#include +#include +#include +#include +#include +#include +#include "wext-compat.h" +#include "core.h" + +int cfg80211_wext_giwname(struct net_device *dev, + struct iw_request_info *info, + char *name, char *extra) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct ieee80211_supported_band *sband; + bool is_ht = false, is_a = false, is_b = false, is_g = false; + + if (!wdev) + return -EOPNOTSUPP; + + sband = wdev->wiphy->bands[IEEE80211_BAND_5GHZ]; + if (sband) { + is_a = true; + is_ht |= sband->ht_cap.ht_supported; + } + + sband = wdev->wiphy->bands[IEEE80211_BAND_2GHZ]; + if (sband) { + int i; + /* Check for mandatory rates */ + for (i = 0; i < sband->n_bitrates; i++) { + if (sband->bitrates[i].bitrate == 10) + is_b = true; + if (sband->bitrates[i].bitrate == 60) + is_g = true; + } + is_ht |= sband->ht_cap.ht_supported; + } + + strcpy(name, "IEEE 802.11"); + if (is_a) + strcat(name, "a"); + if (is_b) + strcat(name, "b"); + if (is_g) + strcat(name, "g"); + if (is_ht) + strcat(name, "n"); + + return 0; +} +EXPORT_SYMBOL_GPL(cfg80211_wext_giwname); + +int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info, + u32 *mode, char *extra) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev; + struct vif_params vifparams; + enum nl80211_iftype type; + int ret; + + rdev = wiphy_to_dev(wdev->wiphy); + + switch (*mode) { + case IW_MODE_INFRA: + type = NL80211_IFTYPE_STATION; + break; + case IW_MODE_ADHOC: + type = NL80211_IFTYPE_ADHOC; + break; + case IW_MODE_REPEAT: + type = NL80211_IFTYPE_WDS; + break; + case IW_MODE_MONITOR: + type = NL80211_IFTYPE_MONITOR; + break; + default: + return -EINVAL; + } + + if (type == wdev->iftype) + return 0; + + memset(&vifparams, 0, sizeof(vifparams)); + + cfg80211_lock_rdev(rdev); + ret = cfg80211_change_iface(rdev, dev, type, NULL, &vifparams); + cfg80211_unlock_rdev(rdev); + + return ret; +} +EXPORT_SYMBOL_GPL(cfg80211_wext_siwmode); + +int cfg80211_wext_giwmode(struct net_device *dev, struct iw_request_info *info, + u32 *mode, char *extra) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + + if (!wdev) + return -EOPNOTSUPP; + + switch (wdev->iftype) { + case NL80211_IFTYPE_AP: + *mode = IW_MODE_MASTER; + break; + case NL80211_IFTYPE_STATION: + *mode = IW_MODE_INFRA; + break; + case NL80211_IFTYPE_ADHOC: + *mode = IW_MODE_ADHOC; + break; + case NL80211_IFTYPE_MONITOR: + *mode = IW_MODE_MONITOR; + break; + case NL80211_IFTYPE_WDS: + *mode = IW_MODE_REPEAT; + break; + case NL80211_IFTYPE_AP_VLAN: + *mode = IW_MODE_SECOND; /* FIXME */ + break; + default: + *mode = IW_MODE_AUTO; + break; + } + return 0; +} +EXPORT_SYMBOL_GPL(cfg80211_wext_giwmode); + + +int cfg80211_wext_giwrange(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *data, char *extra) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct iw_range *range = (struct iw_range *) extra; + enum ieee80211_band band; + int i, c = 0; + + if (!wdev) + return -EOPNOTSUPP; + + data->length = sizeof(struct iw_range); + memset(range, 0, sizeof(struct iw_range)); + + range->we_version_compiled = WIRELESS_EXT; + range->we_version_source = 21; + range->retry_capa = IW_RETRY_LIMIT; + range->retry_flags = IW_RETRY_LIMIT; + range->min_retry = 0; + range->max_retry = 255; + range->min_rts = 0; + range->max_rts = 2347; + range->min_frag = 256; + range->max_frag = 2346; + + range->max_encoding_tokens = 4; + + range->max_qual.updated = IW_QUAL_NOISE_INVALID; + + switch (wdev->wiphy->signal_type) { + case CFG80211_SIGNAL_TYPE_NONE: + break; + case CFG80211_SIGNAL_TYPE_MBM: + range->max_qual.level = -110; + range->max_qual.qual = 70; + range->avg_qual.qual = 35; + range->max_qual.updated |= IW_QUAL_DBM; + range->max_qual.updated |= IW_QUAL_QUAL_UPDATED; + range->max_qual.updated |= IW_QUAL_LEVEL_UPDATED; + break; + case CFG80211_SIGNAL_TYPE_UNSPEC: + range->max_qual.level = 100; + range->max_qual.qual = 100; + range->avg_qual.qual = 50; + range->max_qual.updated |= IW_QUAL_QUAL_UPDATED; + range->max_qual.updated |= IW_QUAL_LEVEL_UPDATED; + break; + } + + range->avg_qual.level = range->max_qual.level / 2; + range->avg_qual.noise = range->max_qual.noise / 2; + range->avg_qual.updated = range->max_qual.updated; + + for (i = 0; i < wdev->wiphy->n_cipher_suites; i++) { + switch (wdev->wiphy->cipher_suites[i]) { + case WLAN_CIPHER_SUITE_TKIP: + range->enc_capa |= (IW_ENC_CAPA_CIPHER_TKIP | + IW_ENC_CAPA_WPA); + break; + + case WLAN_CIPHER_SUITE_CCMP: + range->enc_capa |= (IW_ENC_CAPA_CIPHER_CCMP | + IW_ENC_CAPA_WPA2); + break; + + case WLAN_CIPHER_SUITE_WEP40: + range->encoding_size[range->num_encoding_sizes++] = + WLAN_KEY_LEN_WEP40; + break; + + case WLAN_CIPHER_SUITE_WEP104: + range->encoding_size[range->num_encoding_sizes++] = + WLAN_KEY_LEN_WEP104; + break; + } + } + + for (band = 0; band < IEEE80211_NUM_BANDS; band ++) { + struct ieee80211_supported_band *sband; + + sband = wdev->wiphy->bands[band]; + + if (!sband) + continue; + + for (i = 0; i < sband->n_channels && c < IW_MAX_FREQUENCIES; i++) { + struct ieee80211_channel *chan = &sband->channels[i]; + + if (!(chan->flags & IEEE80211_CHAN_DISABLED)) { + range->freq[c].i = + ieee80211_frequency_to_channel( + chan->center_freq); + range->freq[c].m = chan->center_freq; + range->freq[c].e = 6; + c++; + } + } + } + range->num_channels = c; + range->num_frequency = c; + + IW_EVENT_CAPA_SET_KERNEL(range->event_capa); + IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP); + IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN); + + if (wdev->wiphy->max_scan_ssids > 0) + range->scan_capa |= IW_SCAN_CAPA_ESSID; + + return 0; +} +EXPORT_SYMBOL_GPL(cfg80211_wext_giwrange); + + +/** + * cfg80211_wext_freq - get wext frequency for non-"auto" + * @wiphy: the wiphy + * @freq: the wext freq encoding + * + * Returns a frequency, or a negative error code, or 0 for auto. + */ +int cfg80211_wext_freq(struct wiphy *wiphy, struct iw_freq *freq) +{ + /* + * Parse frequency - return 0 for auto and + * -EINVAL for impossible things. + */ + if (freq->e == 0) { + if (freq->m < 0) + return 0; + return ieee80211_channel_to_frequency(freq->m); + } else { + int i, div = 1000000; + for (i = 0; i < freq->e; i++) + div /= 10; + if (div <= 0) + return -EINVAL; + return freq->m / div; + } +} + +int cfg80211_wext_siwrts(struct net_device *dev, + struct iw_request_info *info, + struct iw_param *rts, char *extra) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + u32 orts = wdev->wiphy->rts_threshold; + int err; + + if (rts->disabled || !rts->fixed) + wdev->wiphy->rts_threshold = (u32) -1; + else if (rts->value < 0) + return -EINVAL; + else + wdev->wiphy->rts_threshold = rts->value; + + err = rdev->ops->set_wiphy_params(wdev->wiphy, + WIPHY_PARAM_RTS_THRESHOLD); + if (err) + wdev->wiphy->rts_threshold = orts; + + return err; +} +EXPORT_SYMBOL_GPL(cfg80211_wext_siwrts); + +int cfg80211_wext_giwrts(struct net_device *dev, + struct iw_request_info *info, + struct iw_param *rts, char *extra) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + + rts->value = wdev->wiphy->rts_threshold; + rts->disabled = rts->value == (u32) -1; + rts->fixed = 1; + + return 0; +} +EXPORT_SYMBOL_GPL(cfg80211_wext_giwrts); + +int cfg80211_wext_siwfrag(struct net_device *dev, + struct iw_request_info *info, + struct iw_param *frag, char *extra) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + u32 ofrag = wdev->wiphy->frag_threshold; + int err; + + if (frag->disabled || !frag->fixed) + wdev->wiphy->frag_threshold = (u32) -1; + else if (frag->value < 256) + return -EINVAL; + else { + /* Fragment length must be even, so strip LSB. */ + wdev->wiphy->frag_threshold = frag->value & ~0x1; + } + + err = rdev->ops->set_wiphy_params(wdev->wiphy, + WIPHY_PARAM_FRAG_THRESHOLD); + if (err) + wdev->wiphy->frag_threshold = ofrag; + + return err; +} +EXPORT_SYMBOL_GPL(cfg80211_wext_siwfrag); + +int cfg80211_wext_giwfrag(struct net_device *dev, + struct iw_request_info *info, + struct iw_param *frag, char *extra) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + + frag->value = wdev->wiphy->frag_threshold; + frag->disabled = frag->value == (u32) -1; + frag->fixed = 1; + + return 0; +} +EXPORT_SYMBOL_GPL(cfg80211_wext_giwfrag); + +int cfg80211_wext_siwretry(struct net_device *dev, + struct iw_request_info *info, + struct iw_param *retry, char *extra) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + u32 changed = 0; + u8 olong = wdev->wiphy->retry_long; + u8 oshort = wdev->wiphy->retry_short; + int err; + + if (retry->disabled || + (retry->flags & IW_RETRY_TYPE) != IW_RETRY_LIMIT) + return -EINVAL; + + if (retry->flags & IW_RETRY_LONG) { + wdev->wiphy->retry_long = retry->value; + changed |= WIPHY_PARAM_RETRY_LONG; + } else if (retry->flags & IW_RETRY_SHORT) { + wdev->wiphy->retry_short = retry->value; + changed |= WIPHY_PARAM_RETRY_SHORT; + } else { + wdev->wiphy->retry_short = retry->value; + wdev->wiphy->retry_long = retry->value; + changed |= WIPHY_PARAM_RETRY_LONG; + changed |= WIPHY_PARAM_RETRY_SHORT; + } + + if (!changed) + return 0; + + err = rdev->ops->set_wiphy_params(wdev->wiphy, changed); + if (err) { + wdev->wiphy->retry_short = oshort; + wdev->wiphy->retry_long = olong; + } + + return err; +} +EXPORT_SYMBOL_GPL(cfg80211_wext_siwretry); + +int cfg80211_wext_giwretry(struct net_device *dev, + struct iw_request_info *info, + struct iw_param *retry, char *extra) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + + retry->disabled = 0; + + if (retry->flags == 0 || (retry->flags & IW_RETRY_SHORT)) { + /* + * First return short value, iwconfig will ask long value + * later if needed + */ + retry->flags |= IW_RETRY_LIMIT; + retry->value = wdev->wiphy->retry_short; + if (wdev->wiphy->retry_long != wdev->wiphy->retry_short) + retry->flags |= IW_RETRY_LONG; + + return 0; + } + + if (retry->flags & IW_RETRY_LONG) { + retry->flags = IW_RETRY_LIMIT | IW_RETRY_LONG; + retry->value = wdev->wiphy->retry_long; + } + + return 0; +} +EXPORT_SYMBOL_GPL(cfg80211_wext_giwretry); + +static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev, + struct net_device *dev, const u8 *addr, + bool remove, bool tx_key, int idx, + struct key_params *params) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + int err, i; + bool rejoin = false; + + if (!wdev->wext.keys) { + wdev->wext.keys = kzalloc(sizeof(*wdev->wext.keys), + GFP_KERNEL); + if (!wdev->wext.keys) + return -ENOMEM; + for (i = 0; i < 6; i++) + wdev->wext.keys->params[i].key = + wdev->wext.keys->data[i]; + } + + if (wdev->iftype != NL80211_IFTYPE_ADHOC && + wdev->iftype != NL80211_IFTYPE_STATION) + return -EOPNOTSUPP; + + if (params->cipher == WLAN_CIPHER_SUITE_AES_CMAC) { + if (!wdev->current_bss) + return -ENOLINK; + + if (!rdev->ops->set_default_mgmt_key) + return -EOPNOTSUPP; + + if (idx < 4 || idx > 5) + return -EINVAL; + } else if (idx < 0 || idx > 3) + return -EINVAL; + + if (remove) { + err = 0; + if (wdev->current_bss) { + /* + * If removing the current TX key, we will need to + * join a new IBSS without the privacy bit clear. + */ + if (idx == wdev->wext.default_key && + wdev->iftype == NL80211_IFTYPE_ADHOC) { + __cfg80211_leave_ibss(rdev, wdev->netdev, true); + rejoin = true; + } + err = rdev->ops->del_key(&rdev->wiphy, dev, idx, addr); + } + wdev->wext.connect.privacy = false; + /* + * Applications using wireless extensions expect to be + * able to delete keys that don't exist, so allow that. + */ + if (err == -ENOENT) + err = 0; + if (!err) { + if (!addr) { + wdev->wext.keys->params[idx].key_len = 0; + wdev->wext.keys->params[idx].cipher = 0; + } + if (idx == wdev->wext.default_key) + wdev->wext.default_key = -1; + else if (idx == wdev->wext.default_mgmt_key) + wdev->wext.default_mgmt_key = -1; + } + + if (!err && rejoin) + err = cfg80211_ibss_wext_join(rdev, wdev); + + return err; + } + + if (addr) + tx_key = false; + + if (cfg80211_validate_key_settings(rdev, params, idx, addr)) + return -EINVAL; + + err = 0; + if (wdev->current_bss) + err = rdev->ops->add_key(&rdev->wiphy, dev, idx, addr, params); + if (err) + return err; + + if (!addr) { + wdev->wext.keys->params[idx] = *params; + memcpy(wdev->wext.keys->data[idx], + params->key, params->key_len); + wdev->wext.keys->params[idx].key = + wdev->wext.keys->data[idx]; + } + + if ((params->cipher == WLAN_CIPHER_SUITE_WEP40 || + params->cipher == WLAN_CIPHER_SUITE_WEP104) && + (tx_key || (!addr && wdev->wext.default_key == -1))) { + if (wdev->current_bss) { + /* + * If we are getting a new TX key from not having + * had one before we need to join a new IBSS with + * the privacy bit set. + */ + if (wdev->iftype == NL80211_IFTYPE_ADHOC && + wdev->wext.default_key == -1) { + __cfg80211_leave_ibss(rdev, wdev->netdev, true); + rejoin = true; + } + err = rdev->ops->set_default_key(&rdev->wiphy, + dev, idx); + } + if (!err) { + wdev->wext.default_key = idx; + if (rejoin) + err = cfg80211_ibss_wext_join(rdev, wdev); + } + return err; + } + + if (params->cipher == WLAN_CIPHER_SUITE_AES_CMAC && + (tx_key || (!addr && wdev->wext.default_mgmt_key == -1))) { + if (wdev->current_bss) + err = rdev->ops->set_default_mgmt_key(&rdev->wiphy, + dev, idx); + if (!err) + wdev->wext.default_mgmt_key = idx; + return err; + } + + return 0; +} + +static int cfg80211_set_encryption(struct cfg80211_registered_device *rdev, + struct net_device *dev, const u8 *addr, + bool remove, bool tx_key, int idx, + struct key_params *params) +{ + int err; + + /* devlist mutex needed for possible IBSS re-join */ + mutex_lock(&rdev->devlist_mtx); + wdev_lock(dev->ieee80211_ptr); + err = __cfg80211_set_encryption(rdev, dev, addr, remove, + tx_key, idx, params); + wdev_unlock(dev->ieee80211_ptr); + mutex_unlock(&rdev->devlist_mtx); + + return err; +} + +int cfg80211_wext_siwencode(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *erq, char *keybuf) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + int idx, err; + bool remove = false; + struct key_params params; + + if (wdev->iftype != NL80211_IFTYPE_STATION && + wdev->iftype != NL80211_IFTYPE_ADHOC) + return -EOPNOTSUPP; + + /* no use -- only MFP (set_default_mgmt_key) is optional */ + if (!rdev->ops->del_key || + !rdev->ops->add_key || + !rdev->ops->set_default_key) + return -EOPNOTSUPP; + + idx = erq->flags & IW_ENCODE_INDEX; + if (idx == 0) { + idx = wdev->wext.default_key; + if (idx < 0) + idx = 0; + } else if (idx < 1 || idx > 4) + return -EINVAL; + else + idx--; + + if (erq->flags & IW_ENCODE_DISABLED) + remove = true; + else if (erq->length == 0) { + /* No key data - just set the default TX key index */ + err = 0; + wdev_lock(wdev); + if (wdev->current_bss) + err = rdev->ops->set_default_key(&rdev->wiphy, + dev, idx); + if (!err) + wdev->wext.default_key = idx; + wdev_unlock(wdev); + return err; + } + + memset(¶ms, 0, sizeof(params)); + params.key = keybuf; + params.key_len = erq->length; + if (erq->length == 5) + params.cipher = WLAN_CIPHER_SUITE_WEP40; + else if (erq->length == 13) + params.cipher = WLAN_CIPHER_SUITE_WEP104; + else if (!remove) + return -EINVAL; + + return cfg80211_set_encryption(rdev, dev, NULL, remove, + wdev->wext.default_key == -1, + idx, ¶ms); +} +EXPORT_SYMBOL_GPL(cfg80211_wext_siwencode); + +int cfg80211_wext_siwencodeext(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *erq, char *extra) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + struct iw_encode_ext *ext = (struct iw_encode_ext *) extra; + const u8 *addr; + int idx; + bool remove = false; + struct key_params params; + u32 cipher; + + if (wdev->iftype != NL80211_IFTYPE_STATION && + wdev->iftype != NL80211_IFTYPE_ADHOC) + return -EOPNOTSUPP; + + /* no use -- only MFP (set_default_mgmt_key) is optional */ + if (!rdev->ops->del_key || + !rdev->ops->add_key || + !rdev->ops->set_default_key) + return -EOPNOTSUPP; + + switch (ext->alg) { + case IW_ENCODE_ALG_NONE: + remove = true; + cipher = 0; + break; + case IW_ENCODE_ALG_WEP: + if (ext->key_len == 5) + cipher = WLAN_CIPHER_SUITE_WEP40; + else if (ext->key_len == 13) + cipher = WLAN_CIPHER_SUITE_WEP104; + else + return -EINVAL; + break; + case IW_ENCODE_ALG_TKIP: + cipher = WLAN_CIPHER_SUITE_TKIP; + break; + case IW_ENCODE_ALG_CCMP: + cipher = WLAN_CIPHER_SUITE_CCMP; + break; + case IW_ENCODE_ALG_AES_CMAC: + cipher = WLAN_CIPHER_SUITE_AES_CMAC; + break; + default: + return -EOPNOTSUPP; + } + + if (erq->flags & IW_ENCODE_DISABLED) + remove = true; + + idx = erq->flags & IW_ENCODE_INDEX; + if (cipher == WLAN_CIPHER_SUITE_AES_CMAC) { + if (idx < 4 || idx > 5) { + idx = wdev->wext.default_mgmt_key; + if (idx < 0) + return -EINVAL; + } else + idx--; + } else { + if (idx < 1 || idx > 4) { + idx = wdev->wext.default_key; + if (idx < 0) + return -EINVAL; + } else + idx--; + } + + addr = ext->addr.sa_data; + if (is_broadcast_ether_addr(addr)) + addr = NULL; + + memset(¶ms, 0, sizeof(params)); + params.key = ext->key; + params.key_len = ext->key_len; + params.cipher = cipher; + + if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) { + params.seq = ext->rx_seq; + params.seq_len = 6; + } + + return cfg80211_set_encryption( + rdev, dev, addr, remove, + ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY, + idx, ¶ms); +} +EXPORT_SYMBOL_GPL(cfg80211_wext_siwencodeext); + +int cfg80211_wext_giwencode(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *erq, char *keybuf) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + int idx; + + if (wdev->iftype != NL80211_IFTYPE_STATION && + wdev->iftype != NL80211_IFTYPE_ADHOC) + return -EOPNOTSUPP; + + idx = erq->flags & IW_ENCODE_INDEX; + if (idx == 0) { + idx = wdev->wext.default_key; + if (idx < 0) + idx = 0; + } else if (idx < 1 || idx > 4) + return -EINVAL; + else + idx--; + + erq->flags = idx + 1; + + if (!wdev->wext.keys || !wdev->wext.keys->params[idx].cipher) { + erq->flags |= IW_ENCODE_DISABLED; + erq->length = 0; + return 0; + } + + erq->length = min_t(size_t, erq->length, + wdev->wext.keys->params[idx].key_len); + memcpy(keybuf, wdev->wext.keys->params[idx].key, erq->length); + erq->flags |= IW_ENCODE_ENABLED; + + return 0; +} +EXPORT_SYMBOL_GPL(cfg80211_wext_giwencode); + +int cfg80211_wext_siwfreq(struct net_device *dev, + struct iw_request_info *info, + struct iw_freq *wextfreq, char *extra) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + int freq, err; + + switch (wdev->iftype) { + case NL80211_IFTYPE_STATION: + return cfg80211_mgd_wext_siwfreq(dev, info, wextfreq, extra); + case NL80211_IFTYPE_ADHOC: + return cfg80211_ibss_wext_siwfreq(dev, info, wextfreq, extra); + default: + freq = cfg80211_wext_freq(wdev->wiphy, wextfreq); + if (freq < 0) + return freq; + if (freq == 0) + return -EINVAL; + mutex_lock(&rdev->devlist_mtx); + err = rdev_set_freq(rdev, NULL, freq, NL80211_CHAN_NO_HT); + mutex_unlock(&rdev->devlist_mtx); + return err; + } +} +EXPORT_SYMBOL_GPL(cfg80211_wext_siwfreq); + +int cfg80211_wext_giwfreq(struct net_device *dev, + struct iw_request_info *info, + struct iw_freq *freq, char *extra) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + + switch (wdev->iftype) { + case NL80211_IFTYPE_STATION: + return cfg80211_mgd_wext_giwfreq(dev, info, freq, extra); + case NL80211_IFTYPE_ADHOC: + return cfg80211_ibss_wext_giwfreq(dev, info, freq, extra); + default: + if (!rdev->channel) + return -EINVAL; + freq->m = rdev->channel->center_freq; + freq->e = 6; + return 0; + } +} +EXPORT_SYMBOL_GPL(cfg80211_wext_giwfreq); + +int cfg80211_wext_siwtxpower(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *data, char *extra) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + enum tx_power_setting type; + int dbm = 0; + + if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM) + return -EINVAL; + if (data->txpower.flags & IW_TXPOW_RANGE) + return -EINVAL; + + if (!rdev->ops->set_tx_power) + return -EOPNOTSUPP; + + /* only change when not disabling */ + if (!data->txpower.disabled) { + rfkill_set_sw_state(rdev->rfkill, false); + + if (data->txpower.fixed) { + /* + * wext doesn't support negative values, see + * below where it's for automatic + */ + if (data->txpower.value < 0) + return -EINVAL; + dbm = data->txpower.value; + type = TX_POWER_FIXED; + /* TODO: do regulatory check! */ + } else { + /* + * Automatic power level setting, max being the value + * passed in from userland. + */ + if (data->txpower.value < 0) { + type = TX_POWER_AUTOMATIC; + } else { + dbm = data->txpower.value; + type = TX_POWER_LIMITED; + } + } + } else { + rfkill_set_sw_state(rdev->rfkill, true); + schedule_work(&rdev->rfkill_sync); + return 0; + } + + return rdev->ops->set_tx_power(wdev->wiphy, type, dbm); +} +EXPORT_SYMBOL_GPL(cfg80211_wext_siwtxpower); + +int cfg80211_wext_giwtxpower(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *data, char *extra) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + int err, val; + + if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM) + return -EINVAL; + if (data->txpower.flags & IW_TXPOW_RANGE) + return -EINVAL; + + if (!rdev->ops->get_tx_power) + return -EOPNOTSUPP; + + err = rdev->ops->get_tx_power(wdev->wiphy, &val); + if (err) + return err; + + /* well... oh well */ + data->txpower.fixed = 1; + data->txpower.disabled = rfkill_blocked(rdev->rfkill); + data->txpower.value = val; + data->txpower.flags = IW_TXPOW_DBM; + + return 0; +} +EXPORT_SYMBOL_GPL(cfg80211_wext_giwtxpower); + +static int cfg80211_set_auth_alg(struct wireless_dev *wdev, + s32 auth_alg) +{ + int nr_alg = 0; + + if (!auth_alg) + return -EINVAL; + + if (auth_alg & ~(IW_AUTH_ALG_OPEN_SYSTEM | + IW_AUTH_ALG_SHARED_KEY | + IW_AUTH_ALG_LEAP)) + return -EINVAL; + + if (auth_alg & IW_AUTH_ALG_OPEN_SYSTEM) { + nr_alg++; + wdev->wext.connect.auth_type = NL80211_AUTHTYPE_OPEN_SYSTEM; + } + + if (auth_alg & IW_AUTH_ALG_SHARED_KEY) { + nr_alg++; + wdev->wext.connect.auth_type = NL80211_AUTHTYPE_SHARED_KEY; + } + + if (auth_alg & IW_AUTH_ALG_LEAP) { + nr_alg++; + wdev->wext.connect.auth_type = NL80211_AUTHTYPE_NETWORK_EAP; + } + + if (nr_alg > 1) + wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; + + return 0; +} + +static int cfg80211_set_wpa_version(struct wireless_dev *wdev, u32 wpa_versions) +{ + if (wpa_versions & ~(IW_AUTH_WPA_VERSION_WPA | + IW_AUTH_WPA_VERSION_WPA2| + IW_AUTH_WPA_VERSION_DISABLED)) + return -EINVAL; + + if ((wpa_versions & IW_AUTH_WPA_VERSION_DISABLED) && + (wpa_versions & (IW_AUTH_WPA_VERSION_WPA| + IW_AUTH_WPA_VERSION_WPA2))) + return -EINVAL; + + if (wpa_versions & IW_AUTH_WPA_VERSION_DISABLED) + wdev->wext.connect.crypto.wpa_versions &= + ~(NL80211_WPA_VERSION_1|NL80211_WPA_VERSION_2); + + if (wpa_versions & IW_AUTH_WPA_VERSION_WPA) + wdev->wext.connect.crypto.wpa_versions |= + NL80211_WPA_VERSION_1; + + if (wpa_versions & IW_AUTH_WPA_VERSION_WPA2) + wdev->wext.connect.crypto.wpa_versions |= + NL80211_WPA_VERSION_2; + + return 0; +} + +static int cfg80211_set_cipher_group(struct wireless_dev *wdev, u32 cipher) +{ + if (cipher & IW_AUTH_CIPHER_WEP40) + wdev->wext.connect.crypto.cipher_group = + WLAN_CIPHER_SUITE_WEP40; + else if (cipher & IW_AUTH_CIPHER_WEP104) + wdev->wext.connect.crypto.cipher_group = + WLAN_CIPHER_SUITE_WEP104; + else if (cipher & IW_AUTH_CIPHER_TKIP) + wdev->wext.connect.crypto.cipher_group = + WLAN_CIPHER_SUITE_TKIP; + else if (cipher & IW_AUTH_CIPHER_CCMP) + wdev->wext.connect.crypto.cipher_group = + WLAN_CIPHER_SUITE_CCMP; + else if (cipher & IW_AUTH_CIPHER_AES_CMAC) + wdev->wext.connect.crypto.cipher_group = + WLAN_CIPHER_SUITE_AES_CMAC; + else if (cipher & IW_AUTH_CIPHER_NONE) + wdev->wext.connect.crypto.cipher_group = 0; + else + return -EINVAL; + + return 0; +} + +static int cfg80211_set_cipher_pairwise(struct wireless_dev *wdev, u32 cipher) +{ + int nr_ciphers = 0; + u32 *ciphers_pairwise = wdev->wext.connect.crypto.ciphers_pairwise; + + if (cipher & IW_AUTH_CIPHER_WEP40) { + ciphers_pairwise[nr_ciphers] = WLAN_CIPHER_SUITE_WEP40; + nr_ciphers++; + } + + if (cipher & IW_AUTH_CIPHER_WEP104) { + ciphers_pairwise[nr_ciphers] = WLAN_CIPHER_SUITE_WEP104; + nr_ciphers++; + } + + if (cipher & IW_AUTH_CIPHER_TKIP) { + ciphers_pairwise[nr_ciphers] = WLAN_CIPHER_SUITE_TKIP; + nr_ciphers++; + } + + if (cipher & IW_AUTH_CIPHER_CCMP) { + ciphers_pairwise[nr_ciphers] = WLAN_CIPHER_SUITE_CCMP; + nr_ciphers++; + } + + if (cipher & IW_AUTH_CIPHER_AES_CMAC) { + ciphers_pairwise[nr_ciphers] = WLAN_CIPHER_SUITE_AES_CMAC; + nr_ciphers++; + } + + BUILD_BUG_ON(NL80211_MAX_NR_CIPHER_SUITES < 5); + + wdev->wext.connect.crypto.n_ciphers_pairwise = nr_ciphers; + + return 0; +} + + +static int cfg80211_set_key_mgt(struct wireless_dev *wdev, u32 key_mgt) +{ + int nr_akm_suites = 0; + + if (key_mgt & ~(IW_AUTH_KEY_MGMT_802_1X | + IW_AUTH_KEY_MGMT_PSK)) + return -EINVAL; + + if (key_mgt & IW_AUTH_KEY_MGMT_802_1X) { + wdev->wext.connect.crypto.akm_suites[nr_akm_suites] = + WLAN_AKM_SUITE_8021X; + nr_akm_suites++; + } + + if (key_mgt & IW_AUTH_KEY_MGMT_PSK) { + wdev->wext.connect.crypto.akm_suites[nr_akm_suites] = + WLAN_AKM_SUITE_PSK; + nr_akm_suites++; + } + + wdev->wext.connect.crypto.n_akm_suites = nr_akm_suites; + + return 0; +} + +int cfg80211_wext_siwauth(struct net_device *dev, + struct iw_request_info *info, + struct iw_param *data, char *extra) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + + if (wdev->iftype != NL80211_IFTYPE_STATION) + return -EOPNOTSUPP; + + switch (data->flags & IW_AUTH_INDEX) { + case IW_AUTH_PRIVACY_INVOKED: + wdev->wext.connect.privacy = data->value; + return 0; + case IW_AUTH_WPA_VERSION: + return cfg80211_set_wpa_version(wdev, data->value); + case IW_AUTH_CIPHER_GROUP: + return cfg80211_set_cipher_group(wdev, data->value); + case IW_AUTH_KEY_MGMT: + return cfg80211_set_key_mgt(wdev, data->value); + case IW_AUTH_CIPHER_PAIRWISE: + return cfg80211_set_cipher_pairwise(wdev, data->value); + case IW_AUTH_80211_AUTH_ALG: + return cfg80211_set_auth_alg(wdev, data->value); + case IW_AUTH_WPA_ENABLED: + case IW_AUTH_RX_UNENCRYPTED_EAPOL: + case IW_AUTH_DROP_UNENCRYPTED: + case IW_AUTH_MFP: + return 0; + default: + return -EOPNOTSUPP; + } +} +EXPORT_SYMBOL_GPL(cfg80211_wext_siwauth); + +int cfg80211_wext_giwauth(struct net_device *dev, + struct iw_request_info *info, + struct iw_param *data, char *extra) +{ + /* XXX: what do we need? */ + + return -EOPNOTSUPP; +} +EXPORT_SYMBOL_GPL(cfg80211_wext_giwauth); + +int cfg80211_wext_siwpower(struct net_device *dev, + struct iw_request_info *info, + struct iw_param *wrq, char *extra) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + bool ps = wdev->ps; + int timeout = wdev->ps_timeout; + int err; + + if (wdev->iftype != NL80211_IFTYPE_STATION) + return -EINVAL; + + if (!rdev->ops->set_power_mgmt) + return -EOPNOTSUPP; + + if (wrq->disabled) { + ps = false; + } else { + switch (wrq->flags & IW_POWER_MODE) { + case IW_POWER_ON: /* If not specified */ + case IW_POWER_MODE: /* If set all mask */ + case IW_POWER_ALL_R: /* If explicitely state all */ + ps = true; + break; + default: /* Otherwise we ignore */ + return -EINVAL; + } + + if (wrq->flags & ~(IW_POWER_MODE | IW_POWER_TIMEOUT)) + return -EINVAL; + + if (wrq->flags & IW_POWER_TIMEOUT) + timeout = wrq->value / 1000; + } + + err = rdev->ops->set_power_mgmt(wdev->wiphy, dev, ps, timeout); + if (err) + return err; + + wdev->ps = ps; + wdev->ps_timeout = timeout; + + return 0; + +} +EXPORT_SYMBOL_GPL(cfg80211_wext_siwpower); + +int cfg80211_wext_giwpower(struct net_device *dev, + struct iw_request_info *info, + struct iw_param *wrq, char *extra) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + + wrq->disabled = !wdev->ps; + + return 0; +} +EXPORT_SYMBOL_GPL(cfg80211_wext_giwpower); + +static int cfg80211_wds_wext_siwap(struct net_device *dev, + struct iw_request_info *info, + struct sockaddr *addr, char *extra) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + int err; + + if (WARN_ON(wdev->iftype != NL80211_IFTYPE_WDS)) + return -EINVAL; + + if (addr->sa_family != ARPHRD_ETHER) + return -EINVAL; + + if (netif_running(dev)) + return -EBUSY; + + if (!rdev->ops->set_wds_peer) + return -EOPNOTSUPP; + + err = rdev->ops->set_wds_peer(wdev->wiphy, dev, (u8 *) &addr->sa_data); + if (err) + return err; + + memcpy(&wdev->wext.bssid, (u8 *) &addr->sa_data, ETH_ALEN); + + return 0; +} + +static int cfg80211_wds_wext_giwap(struct net_device *dev, + struct iw_request_info *info, + struct sockaddr *addr, char *extra) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + + if (WARN_ON(wdev->iftype != NL80211_IFTYPE_WDS)) + return -EINVAL; + + addr->sa_family = ARPHRD_ETHER; + memcpy(&addr->sa_data, wdev->wext.bssid, ETH_ALEN); + + return 0; +} + +int cfg80211_wext_siwrate(struct net_device *dev, + struct iw_request_info *info, + struct iw_param *rate, char *extra) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + struct cfg80211_bitrate_mask mask; + u32 fixed, maxrate; + struct ieee80211_supported_band *sband; + int band, ridx; + bool match = false; + + if (!rdev->ops->set_bitrate_mask) + return -EOPNOTSUPP; + + memset(&mask, 0, sizeof(mask)); + fixed = 0; + maxrate = (u32)-1; + + if (rate->value < 0) { + /* nothing */ + } else if (rate->fixed) { + fixed = rate->value / 100000; + } else { + maxrate = rate->value / 100000; + } + + for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + sband = wdev->wiphy->bands[band]; + if (sband == NULL) + continue; + for (ridx = 0; ridx < sband->n_bitrates; ridx++) { + struct ieee80211_rate *srate = &sband->bitrates[ridx]; + if (fixed == srate->bitrate) { + mask.control[band].legacy = 1 << ridx; + match = true; + break; + } + if (srate->bitrate <= maxrate) { + mask.control[band].legacy |= 1 << ridx; + match = true; + } + } + } + + if (!match) + return -EINVAL; + + return rdev->ops->set_bitrate_mask(wdev->wiphy, dev, NULL, &mask); +} +EXPORT_SYMBOL_GPL(cfg80211_wext_siwrate); + +int cfg80211_wext_giwrate(struct net_device *dev, + struct iw_request_info *info, + struct iw_param *rate, char *extra) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + /* we are under RTNL - globally locked - so can use a static struct */ + static struct station_info sinfo; + u8 addr[ETH_ALEN]; + int err; + + if (wdev->iftype != NL80211_IFTYPE_STATION) + return -EOPNOTSUPP; + + if (!rdev->ops->get_station) + return -EOPNOTSUPP; + + err = 0; + wdev_lock(wdev); + if (wdev->current_bss) + memcpy(addr, wdev->current_bss->pub.bssid, ETH_ALEN); + else + err = -EOPNOTSUPP; + wdev_unlock(wdev); + if (err) + return err; + + err = rdev->ops->get_station(&rdev->wiphy, dev, addr, &sinfo); + if (err) + return err; + + if (!(sinfo.filled & STATION_INFO_TX_BITRATE)) + return -EOPNOTSUPP; + + rate->value = 100000 * cfg80211_calculate_bitrate(&sinfo.txrate); + + return 0; +} +EXPORT_SYMBOL_GPL(cfg80211_wext_giwrate); + +/* Get wireless statistics. Called by /proc/net/wireless and by SIOCGIWSTATS */ +struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + /* we are under RTNL - globally locked - so can use static structs */ + static struct iw_statistics wstats; + static struct station_info sinfo; + u8 bssid[ETH_ALEN]; + + if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) + return NULL; + + if (!rdev->ops->get_station) + return NULL; + + /* Grab BSSID of current BSS, if any */ + wdev_lock(wdev); + if (!wdev->current_bss) { + wdev_unlock(wdev); + return NULL; + } + memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN); + wdev_unlock(wdev); + + if (rdev->ops->get_station(&rdev->wiphy, dev, bssid, &sinfo)) + return NULL; + + memset(&wstats, 0, sizeof(wstats)); + + switch (rdev->wiphy.signal_type) { + case CFG80211_SIGNAL_TYPE_MBM: + if (sinfo.filled & STATION_INFO_SIGNAL) { + int sig = sinfo.signal; + wstats.qual.updated |= IW_QUAL_LEVEL_UPDATED; + wstats.qual.updated |= IW_QUAL_QUAL_UPDATED; + wstats.qual.updated |= IW_QUAL_DBM; + wstats.qual.level = sig; + if (sig < -110) + sig = -110; + else if (sig > -40) + sig = -40; + wstats.qual.qual = sig + 110; + break; + } + case CFG80211_SIGNAL_TYPE_UNSPEC: + if (sinfo.filled & STATION_INFO_SIGNAL) { + wstats.qual.updated |= IW_QUAL_LEVEL_UPDATED; + wstats.qual.updated |= IW_QUAL_QUAL_UPDATED; + wstats.qual.level = sinfo.signal; + wstats.qual.qual = sinfo.signal; + break; + } + default: + wstats.qual.updated |= IW_QUAL_LEVEL_INVALID; + wstats.qual.updated |= IW_QUAL_QUAL_INVALID; + } + + wstats.qual.updated |= IW_QUAL_NOISE_INVALID; + + return &wstats; +} +EXPORT_SYMBOL_GPL(cfg80211_wireless_stats); + +int cfg80211_wext_siwap(struct net_device *dev, + struct iw_request_info *info, + struct sockaddr *ap_addr, char *extra) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + + switch (wdev->iftype) { + case NL80211_IFTYPE_ADHOC: + return cfg80211_ibss_wext_siwap(dev, info, ap_addr, extra); + case NL80211_IFTYPE_STATION: + return cfg80211_mgd_wext_siwap(dev, info, ap_addr, extra); + case NL80211_IFTYPE_WDS: + return cfg80211_wds_wext_siwap(dev, info, ap_addr, extra); + default: + return -EOPNOTSUPP; + } +} +EXPORT_SYMBOL_GPL(cfg80211_wext_siwap); + +int cfg80211_wext_giwap(struct net_device *dev, + struct iw_request_info *info, + struct sockaddr *ap_addr, char *extra) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + + switch (wdev->iftype) { + case NL80211_IFTYPE_ADHOC: + return cfg80211_ibss_wext_giwap(dev, info, ap_addr, extra); + case NL80211_IFTYPE_STATION: + return cfg80211_mgd_wext_giwap(dev, info, ap_addr, extra); + case NL80211_IFTYPE_WDS: + return cfg80211_wds_wext_giwap(dev, info, ap_addr, extra); + default: + return -EOPNOTSUPP; + } +} +EXPORT_SYMBOL_GPL(cfg80211_wext_giwap); + +int cfg80211_wext_siwessid(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *data, char *ssid) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + + switch (wdev->iftype) { + case NL80211_IFTYPE_ADHOC: + return cfg80211_ibss_wext_siwessid(dev, info, data, ssid); + case NL80211_IFTYPE_STATION: + return cfg80211_mgd_wext_siwessid(dev, info, data, ssid); + default: + return -EOPNOTSUPP; + } +} +EXPORT_SYMBOL_GPL(cfg80211_wext_siwessid); + +int cfg80211_wext_giwessid(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *data, char *ssid) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + + switch (wdev->iftype) { + case NL80211_IFTYPE_ADHOC: + return cfg80211_ibss_wext_giwessid(dev, info, data, ssid); + case NL80211_IFTYPE_STATION: + return cfg80211_mgd_wext_giwessid(dev, info, data, ssid); + default: + return -EOPNOTSUPP; + } +} +EXPORT_SYMBOL_GPL(cfg80211_wext_giwessid); + +int cfg80211_wext_siwpmksa(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *data, char *extra) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + struct cfg80211_pmksa cfg_pmksa; + struct iw_pmksa *pmksa = (struct iw_pmksa *)extra; + + memset(&cfg_pmksa, 0, sizeof(struct cfg80211_pmksa)); + + if (wdev->iftype != NL80211_IFTYPE_STATION) + return -EINVAL; + + cfg_pmksa.bssid = pmksa->bssid.sa_data; + cfg_pmksa.pmkid = pmksa->pmkid; + + switch (pmksa->cmd) { + case IW_PMKSA_ADD: + if (!rdev->ops->set_pmksa) + return -EOPNOTSUPP; + + return rdev->ops->set_pmksa(&rdev->wiphy, dev, &cfg_pmksa); + + case IW_PMKSA_REMOVE: + if (!rdev->ops->del_pmksa) + return -EOPNOTSUPP; + + return rdev->ops->del_pmksa(&rdev->wiphy, dev, &cfg_pmksa); + + case IW_PMKSA_FLUSH: + if (!rdev->ops->flush_pmksa) + return -EOPNOTSUPP; + + return rdev->ops->flush_pmksa(&rdev->wiphy, dev); + + default: + return -EOPNOTSUPP; + } +} + +static const iw_handler cfg80211_handlers[] = { + [IW_IOCTL_IDX(SIOCGIWNAME)] = (iw_handler) cfg80211_wext_giwname, + [IW_IOCTL_IDX(SIOCSIWFREQ)] = (iw_handler) cfg80211_wext_siwfreq, + [IW_IOCTL_IDX(SIOCGIWFREQ)] = (iw_handler) cfg80211_wext_giwfreq, + [IW_IOCTL_IDX(SIOCSIWMODE)] = (iw_handler) cfg80211_wext_siwmode, + [IW_IOCTL_IDX(SIOCGIWMODE)] = (iw_handler) cfg80211_wext_giwmode, + [IW_IOCTL_IDX(SIOCGIWRANGE)] = (iw_handler) cfg80211_wext_giwrange, + [IW_IOCTL_IDX(SIOCSIWAP)] = (iw_handler) cfg80211_wext_siwap, + [IW_IOCTL_IDX(SIOCGIWAP)] = (iw_handler) cfg80211_wext_giwap, + [IW_IOCTL_IDX(SIOCSIWMLME)] = (iw_handler) cfg80211_wext_siwmlme, + [IW_IOCTL_IDX(SIOCSIWSCAN)] = (iw_handler) cfg80211_wext_siwscan, + [IW_IOCTL_IDX(SIOCGIWSCAN)] = (iw_handler) cfg80211_wext_giwscan, + [IW_IOCTL_IDX(SIOCSIWESSID)] = (iw_handler) cfg80211_wext_siwessid, + [IW_IOCTL_IDX(SIOCGIWESSID)] = (iw_handler) cfg80211_wext_giwessid, + [IW_IOCTL_IDX(SIOCSIWRATE)] = (iw_handler) cfg80211_wext_siwrate, + [IW_IOCTL_IDX(SIOCGIWRATE)] = (iw_handler) cfg80211_wext_giwrate, + [IW_IOCTL_IDX(SIOCSIWRTS)] = (iw_handler) cfg80211_wext_siwrts, + [IW_IOCTL_IDX(SIOCGIWRTS)] = (iw_handler) cfg80211_wext_giwrts, + [IW_IOCTL_IDX(SIOCSIWFRAG)] = (iw_handler) cfg80211_wext_siwfrag, + [IW_IOCTL_IDX(SIOCGIWFRAG)] = (iw_handler) cfg80211_wext_giwfrag, + [IW_IOCTL_IDX(SIOCSIWTXPOW)] = (iw_handler) cfg80211_wext_siwtxpower, + [IW_IOCTL_IDX(SIOCGIWTXPOW)] = (iw_handler) cfg80211_wext_giwtxpower, + [IW_IOCTL_IDX(SIOCSIWRETRY)] = (iw_handler) cfg80211_wext_siwretry, + [IW_IOCTL_IDX(SIOCGIWRETRY)] = (iw_handler) cfg80211_wext_giwretry, + [IW_IOCTL_IDX(SIOCSIWENCODE)] = (iw_handler) cfg80211_wext_siwencode, + [IW_IOCTL_IDX(SIOCGIWENCODE)] = (iw_handler) cfg80211_wext_giwencode, + [IW_IOCTL_IDX(SIOCSIWPOWER)] = (iw_handler) cfg80211_wext_siwpower, + [IW_IOCTL_IDX(SIOCGIWPOWER)] = (iw_handler) cfg80211_wext_giwpower, + [IW_IOCTL_IDX(SIOCSIWGENIE)] = (iw_handler) cfg80211_wext_siwgenie, + [IW_IOCTL_IDX(SIOCSIWAUTH)] = (iw_handler) cfg80211_wext_siwauth, + [IW_IOCTL_IDX(SIOCGIWAUTH)] = (iw_handler) cfg80211_wext_giwauth, + [IW_IOCTL_IDX(SIOCSIWENCODEEXT)]= (iw_handler) cfg80211_wext_siwencodeext, + [IW_IOCTL_IDX(SIOCSIWPMKSA)] = (iw_handler) cfg80211_wext_siwpmksa, +}; + +const struct iw_handler_def cfg80211_wext_handler = { + .num_standard = ARRAY_SIZE(cfg80211_handlers), + .standard = cfg80211_handlers, + .get_wireless_stats = cfg80211_wireless_stats, +}; diff --git a/net/wireless/wext-compat.h b/net/wireless/wext-compat.h new file mode 100644 index 0000000..20b3dae --- /dev/null +++ b/net/wireless/wext-compat.h @@ -0,0 +1,49 @@ +#ifndef __WEXT_COMPAT +#define __WEXT_COMPAT + +#include +#include + +int cfg80211_ibss_wext_siwfreq(struct net_device *dev, + struct iw_request_info *info, + struct iw_freq *freq, char *extra); +int cfg80211_ibss_wext_giwfreq(struct net_device *dev, + struct iw_request_info *info, + struct iw_freq *freq, char *extra); +int cfg80211_ibss_wext_siwap(struct net_device *dev, + struct iw_request_info *info, + struct sockaddr *ap_addr, char *extra); +int cfg80211_ibss_wext_giwap(struct net_device *dev, + struct iw_request_info *info, + struct sockaddr *ap_addr, char *extra); +int cfg80211_ibss_wext_siwessid(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *data, char *ssid); +int cfg80211_ibss_wext_giwessid(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *data, char *ssid); + +int cfg80211_mgd_wext_siwfreq(struct net_device *dev, + struct iw_request_info *info, + struct iw_freq *freq, char *extra); +int cfg80211_mgd_wext_giwfreq(struct net_device *dev, + struct iw_request_info *info, + struct iw_freq *freq, char *extra); +int cfg80211_mgd_wext_siwap(struct net_device *dev, + struct iw_request_info *info, + struct sockaddr *ap_addr, char *extra); +int cfg80211_mgd_wext_giwap(struct net_device *dev, + struct iw_request_info *info, + struct sockaddr *ap_addr, char *extra); +int cfg80211_mgd_wext_siwessid(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *data, char *ssid); +int cfg80211_mgd_wext_giwessid(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *data, char *ssid); + +int cfg80211_wext_freq(struct wiphy *wiphy, struct iw_freq *freq); + + +extern const struct iw_handler_def cfg80211_wext_handler; +#endif /* __WEXT_COMPAT */ diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c new file mode 100644 index 0000000..43700eb --- /dev/null +++ b/net/wireless/wext-core.c @@ -0,0 +1,1103 @@ +/* + * This file implement the Wireless Extensions core API. + * + * Authors : Jean Tourrilhes - HPL - + * Copyright (c) 1997-2007 Jean Tourrilhes, All Rights Reserved. + * Copyright 2009 Johannes Berg + * + * (As all part of the Linux kernel, this file is GPL) + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +typedef int (*wext_ioctl_func)(struct net_device *, struct iwreq *, + unsigned int, struct iw_request_info *, + iw_handler); + + +/* + * Meta-data about all the standard Wireless Extension request we + * know about. + */ +static const struct iw_ioctl_description standard_ioctl[] = { + [SIOCSIWCOMMIT - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_NULL, + }, + [SIOCGIWNAME - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_CHAR, + .flags = IW_DESCR_FLAG_DUMP, + }, + [SIOCSIWNWID - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_PARAM, + .flags = IW_DESCR_FLAG_EVENT, + }, + [SIOCGIWNWID - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_PARAM, + .flags = IW_DESCR_FLAG_DUMP, + }, + [SIOCSIWFREQ - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_FREQ, + .flags = IW_DESCR_FLAG_EVENT, + }, + [SIOCGIWFREQ - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_FREQ, + .flags = IW_DESCR_FLAG_DUMP, + }, + [SIOCSIWMODE - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_UINT, + .flags = IW_DESCR_FLAG_EVENT, + }, + [SIOCGIWMODE - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_UINT, + .flags = IW_DESCR_FLAG_DUMP, + }, + [SIOCSIWSENS - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_PARAM, + }, + [SIOCGIWSENS - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_PARAM, + }, + [SIOCSIWRANGE - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_NULL, + }, + [SIOCGIWRANGE - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_POINT, + .token_size = 1, + .max_tokens = sizeof(struct iw_range), + .flags = IW_DESCR_FLAG_DUMP, + }, + [SIOCSIWPRIV - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_NULL, + }, + [SIOCGIWPRIV - SIOCIWFIRST] = { /* (handled directly by us) */ + .header_type = IW_HEADER_TYPE_POINT, + .token_size = sizeof(struct iw_priv_args), + .max_tokens = 16, + .flags = IW_DESCR_FLAG_NOMAX, + }, + [SIOCSIWSTATS - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_NULL, + }, + [SIOCGIWSTATS - SIOCIWFIRST] = { /* (handled directly by us) */ + .header_type = IW_HEADER_TYPE_POINT, + .token_size = 1, + .max_tokens = sizeof(struct iw_statistics), + .flags = IW_DESCR_FLAG_DUMP, + }, + [SIOCSIWSPY - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_POINT, + .token_size = sizeof(struct sockaddr), + .max_tokens = IW_MAX_SPY, + }, + [SIOCGIWSPY - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_POINT, + .token_size = sizeof(struct sockaddr) + + sizeof(struct iw_quality), + .max_tokens = IW_MAX_SPY, + }, + [SIOCSIWTHRSPY - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_POINT, + .token_size = sizeof(struct iw_thrspy), + .min_tokens = 1, + .max_tokens = 1, + }, + [SIOCGIWTHRSPY - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_POINT, + .token_size = sizeof(struct iw_thrspy), + .min_tokens = 1, + .max_tokens = 1, + }, + [SIOCSIWAP - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_ADDR, + }, + [SIOCGIWAP - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_ADDR, + .flags = IW_DESCR_FLAG_DUMP, + }, + [SIOCSIWMLME - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_POINT, + .token_size = 1, + .min_tokens = sizeof(struct iw_mlme), + .max_tokens = sizeof(struct iw_mlme), + }, + [SIOCGIWAPLIST - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_POINT, + .token_size = sizeof(struct sockaddr) + + sizeof(struct iw_quality), + .max_tokens = IW_MAX_AP, + .flags = IW_DESCR_FLAG_NOMAX, + }, + [SIOCSIWSCAN - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_POINT, + .token_size = 1, + .min_tokens = 0, + .max_tokens = sizeof(struct iw_scan_req), + }, + [SIOCGIWSCAN - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_POINT, + .token_size = 1, + .max_tokens = IW_SCAN_MAX_DATA, + .flags = IW_DESCR_FLAG_NOMAX, + }, + [SIOCSIWESSID - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_POINT, + .token_size = 1, + .max_tokens = IW_ESSID_MAX_SIZE, + .flags = IW_DESCR_FLAG_EVENT, + }, + [SIOCGIWESSID - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_POINT, + .token_size = 1, + .max_tokens = IW_ESSID_MAX_SIZE, + .flags = IW_DESCR_FLAG_DUMP, + }, + [SIOCSIWNICKN - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_POINT, + .token_size = 1, + .max_tokens = IW_ESSID_MAX_SIZE, + }, + [SIOCGIWNICKN - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_POINT, + .token_size = 1, + .max_tokens = IW_ESSID_MAX_SIZE, + }, + [SIOCSIWRATE - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_PARAM, + }, + [SIOCGIWRATE - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_PARAM, + }, + [SIOCSIWRTS - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_PARAM, + }, + [SIOCGIWRTS - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_PARAM, + }, + [SIOCSIWFRAG - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_PARAM, + }, + [SIOCGIWFRAG - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_PARAM, + }, + [SIOCSIWTXPOW - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_PARAM, + }, + [SIOCGIWTXPOW - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_PARAM, + }, + [SIOCSIWRETRY - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_PARAM, + }, + [SIOCGIWRETRY - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_PARAM, + }, + [SIOCSIWENCODE - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_POINT, + .token_size = 1, + .max_tokens = IW_ENCODING_TOKEN_MAX, + .flags = IW_DESCR_FLAG_EVENT | IW_DESCR_FLAG_RESTRICT, + }, + [SIOCGIWENCODE - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_POINT, + .token_size = 1, + .max_tokens = IW_ENCODING_TOKEN_MAX, + .flags = IW_DESCR_FLAG_DUMP | IW_DESCR_FLAG_RESTRICT, + }, + [SIOCSIWPOWER - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_PARAM, + }, + [SIOCGIWPOWER - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_PARAM, + }, + [SIOCSIWGENIE - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_POINT, + .token_size = 1, + .max_tokens = IW_GENERIC_IE_MAX, + }, + [SIOCGIWGENIE - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_POINT, + .token_size = 1, + .max_tokens = IW_GENERIC_IE_MAX, + }, + [SIOCSIWAUTH - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_PARAM, + }, + [SIOCGIWAUTH - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_PARAM, + }, + [SIOCSIWENCODEEXT - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_POINT, + .token_size = 1, + .min_tokens = sizeof(struct iw_encode_ext), + .max_tokens = sizeof(struct iw_encode_ext) + + IW_ENCODING_TOKEN_MAX, + }, + [SIOCGIWENCODEEXT - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_POINT, + .token_size = 1, + .min_tokens = sizeof(struct iw_encode_ext), + .max_tokens = sizeof(struct iw_encode_ext) + + IW_ENCODING_TOKEN_MAX, + }, + [SIOCSIWPMKSA - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_POINT, + .token_size = 1, + .min_tokens = sizeof(struct iw_pmksa), + .max_tokens = sizeof(struct iw_pmksa), + }, +}; +static const unsigned standard_ioctl_num = ARRAY_SIZE(standard_ioctl); + +/* + * Meta-data about all the additional standard Wireless Extension events + * we know about. + */ +static const struct iw_ioctl_description standard_event[] = { + [IWEVTXDROP - IWEVFIRST] = { + .header_type = IW_HEADER_TYPE_ADDR, + }, + [IWEVQUAL - IWEVFIRST] = { + .header_type = IW_HEADER_TYPE_QUAL, + }, + [IWEVCUSTOM - IWEVFIRST] = { + .header_type = IW_HEADER_TYPE_POINT, + .token_size = 1, + .max_tokens = IW_CUSTOM_MAX, + }, + [IWEVREGISTERED - IWEVFIRST] = { + .header_type = IW_HEADER_TYPE_ADDR, + }, + [IWEVEXPIRED - IWEVFIRST] = { + .header_type = IW_HEADER_TYPE_ADDR, + }, + [IWEVGENIE - IWEVFIRST] = { + .header_type = IW_HEADER_TYPE_POINT, + .token_size = 1, + .max_tokens = IW_GENERIC_IE_MAX, + }, + [IWEVMICHAELMICFAILURE - IWEVFIRST] = { + .header_type = IW_HEADER_TYPE_POINT, + .token_size = 1, + .max_tokens = sizeof(struct iw_michaelmicfailure), + }, + [IWEVASSOCREQIE - IWEVFIRST] = { + .header_type = IW_HEADER_TYPE_POINT, + .token_size = 1, + .max_tokens = IW_GENERIC_IE_MAX, + }, + [IWEVASSOCRESPIE - IWEVFIRST] = { + .header_type = IW_HEADER_TYPE_POINT, + .token_size = 1, + .max_tokens = IW_GENERIC_IE_MAX, + }, + [IWEVPMKIDCAND - IWEVFIRST] = { + .header_type = IW_HEADER_TYPE_POINT, + .token_size = 1, + .max_tokens = sizeof(struct iw_pmkid_cand), + }, +}; +static const unsigned standard_event_num = ARRAY_SIZE(standard_event); + +/* Size (in bytes) of various events */ +static const int event_type_size[] = { + IW_EV_LCP_LEN, /* IW_HEADER_TYPE_NULL */ + 0, + IW_EV_CHAR_LEN, /* IW_HEADER_TYPE_CHAR */ + 0, + IW_EV_UINT_LEN, /* IW_HEADER_TYPE_UINT */ + IW_EV_FREQ_LEN, /* IW_HEADER_TYPE_FREQ */ + IW_EV_ADDR_LEN, /* IW_HEADER_TYPE_ADDR */ + 0, + IW_EV_POINT_LEN, /* Without variable payload */ + IW_EV_PARAM_LEN, /* IW_HEADER_TYPE_PARAM */ + IW_EV_QUAL_LEN, /* IW_HEADER_TYPE_QUAL */ +}; + +#ifdef CONFIG_COMPAT +static const int compat_event_type_size[] = { + IW_EV_COMPAT_LCP_LEN, /* IW_HEADER_TYPE_NULL */ + 0, + IW_EV_COMPAT_CHAR_LEN, /* IW_HEADER_TYPE_CHAR */ + 0, + IW_EV_COMPAT_UINT_LEN, /* IW_HEADER_TYPE_UINT */ + IW_EV_COMPAT_FREQ_LEN, /* IW_HEADER_TYPE_FREQ */ + IW_EV_COMPAT_ADDR_LEN, /* IW_HEADER_TYPE_ADDR */ + 0, + IW_EV_COMPAT_POINT_LEN, /* Without variable payload */ + IW_EV_COMPAT_PARAM_LEN, /* IW_HEADER_TYPE_PARAM */ + IW_EV_COMPAT_QUAL_LEN, /* IW_HEADER_TYPE_QUAL */ +}; +#endif + + +/* IW event code */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) +static int __net_init wext_pernet_init(struct net *net) +{ + skb_queue_head_init(&net->wext_nlevents); + return 0; +} + +static void __net_exit wext_pernet_exit(struct net *net) +{ + skb_queue_purge(&net->wext_nlevents); +} + +static struct pernet_operations wext_pernet_ops = { + .init = wext_pernet_init, + .exit = wext_pernet_exit, +}; + +static int __init wireless_nlevent_init(void) +{ + return register_pernet_subsys(&wext_pernet_ops); +} + +subsys_initcall(wireless_nlevent_init); + +/* Process events generated by the wireless layer or the driver. */ +static void wireless_nlevent_process(struct work_struct *work) +{ + struct sk_buff *skb; + struct net *net; + + rtnl_lock(); + + for_each_net(net) { + while ((skb = skb_dequeue(&net->wext_nlevents))) + rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, + GFP_KERNEL); + } + + rtnl_unlock(); +} + +static DECLARE_WORK(wireless_nlevent_work, wireless_nlevent_process); + +#else +/* Older kernels get the old way of doing stuff*/ +static struct sk_buff_head wireless_nlevent_queue; + +static int __init wireless_nlevent_init(void) +{ + skb_queue_head_init(&wireless_nlevent_queue); + return 0; +} + +subsys_initcall(wireless_nlevent_init); + +static void wireless_nlevent_process(unsigned long data) +{ + struct sk_buff *skb; + while ((skb = skb_dequeue(&wireless_nlevent_queue))) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) + rtnl_notify(skb, &init_net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); +#else + rtnl_notify(skb, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); +#endif +} + +static DECLARE_TASKLET(wireless_nlevent_tasklet, wireless_nlevent_process, 0); + +#endif + +static struct nlmsghdr *rtnetlink_ifinfo_prep(struct net_device *dev, + struct sk_buff *skb) +{ + struct ifinfomsg *r; + struct nlmsghdr *nlh; + + nlh = nlmsg_put(skb, 0, 0, RTM_NEWLINK, sizeof(*r), 0); + if (!nlh) + return NULL; + + r = nlmsg_data(nlh); + r->ifi_family = AF_UNSPEC; + r->__ifi_pad = 0; + r->ifi_type = dev->type; + r->ifi_index = dev->ifindex; + r->ifi_flags = dev_get_flags(dev); + r->ifi_change = 0; /* Wireless changes don't affect those flags */ + + NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name); + + return nlh; + nla_put_failure: + nlmsg_cancel(skb, nlh); + return NULL; +} + + +/* + * Main event dispatcher. Called from other parts and drivers. + * Send the event on the appropriate channels. + * May be called from interrupt context. + */ +void wireless_send_event(struct net_device * dev, + unsigned int cmd, + union iwreq_data * wrqu, + const char * extra) +{ + const struct iw_ioctl_description * descr = NULL; + int extra_len = 0; + struct iw_event *event; /* Mallocated whole event */ + int event_len; /* Its size */ + int hdr_len; /* Size of the event header */ + int wrqu_off = 0; /* Offset in wrqu */ + /* Don't "optimise" the following variable, it will crash */ + unsigned cmd_index; /* *MUST* be unsigned */ + struct sk_buff *skb; + struct nlmsghdr *nlh; + struct nlattr *nla; +#ifdef CONFIG_COMPAT + struct __compat_iw_event *compat_event; + struct compat_iw_point compat_wrqu; + struct sk_buff *compskb; +#endif + + /* + * Nothing in the kernel sends scan events with data, be safe. + * This is necessary because we cannot fix up scan event data + * for compat, due to being contained in 'extra', but normally + * applications are required to retrieve the scan data anyway + * and no data is included in the event, this codifies that + * practice. + */ + if (WARN_ON(cmd == SIOCGIWSCAN && extra)) + extra = NULL; + + /* Get the description of the Event */ + if (cmd <= SIOCIWLAST) { + cmd_index = cmd - SIOCIWFIRST; + if (cmd_index < standard_ioctl_num) + descr = &(standard_ioctl[cmd_index]); + } else { + cmd_index = cmd - IWEVFIRST; + if (cmd_index < standard_event_num) + descr = &(standard_event[cmd_index]); + } + /* Don't accept unknown events */ + if (descr == NULL) { + /* Note : we don't return an error to the driver, because + * the driver would not know what to do about it. It can't + * return an error to the user, because the event is not + * initiated by a user request. + * The best the driver could do is to log an error message. + * We will do it ourselves instead... + */ + printk(KERN_ERR "%s (WE) : Invalid/Unknown Wireless Event (0x%04X)\n", + dev->name, cmd); + return; + } + + /* Check extra parameters and set extra_len */ + if (descr->header_type == IW_HEADER_TYPE_POINT) { + /* Check if number of token fits within bounds */ + if (wrqu->data.length > descr->max_tokens) { + printk(KERN_ERR "%s (WE) : Wireless Event too big (%d)\n", dev->name, wrqu->data.length); + return; + } + if (wrqu->data.length < descr->min_tokens) { + printk(KERN_ERR "%s (WE) : Wireless Event too small (%d)\n", dev->name, wrqu->data.length); + return; + } + /* Calculate extra_len - extra is NULL for restricted events */ + if (extra != NULL) + extra_len = wrqu->data.length * descr->token_size; + /* Always at an offset in wrqu */ + wrqu_off = IW_EV_POINT_OFF; + } + + /* Total length of the event */ + hdr_len = event_type_size[descr->header_type]; + event_len = hdr_len + extra_len; + + /* + * The problem for 64/32 bit. + * + * On 64-bit, a regular event is laid out as follows: + * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | + * | event.len | event.cmd | p a d d i n g | + * | wrqu data ... (with the correct size) | + * + * This padding exists because we manipulate event->u, + * and 'event' is not packed. + * + * An iw_point event is laid out like this instead: + * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | + * | event.len | event.cmd | p a d d i n g | + * | iwpnt.len | iwpnt.flg | p a d d i n g | + * | extra data ... + * + * The second padding exists because struct iw_point is extended, + * but this depends on the platform... + * + * On 32-bit, all the padding shouldn't be there. + */ + + skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); + if (!skb) + return; + + /* Send via the RtNetlink event channel */ + nlh = rtnetlink_ifinfo_prep(dev, skb); + if (WARN_ON(!nlh)) { + kfree_skb(skb); + return; + } + + /* Add the wireless events in the netlink packet */ + nla = nla_reserve(skb, IFLA_WIRELESS, event_len); + if (!nla) { + kfree_skb(skb); + return; + } + event = nla_data(nla); + + /* Fill event - first clear to avoid data leaking */ + memset(event, 0, hdr_len); + event->len = event_len; + event->cmd = cmd; + memcpy(&event->u, ((char *) wrqu) + wrqu_off, hdr_len - IW_EV_LCP_LEN); + if (extra_len) + memcpy(((char *) event) + hdr_len, extra, extra_len); + + nlmsg_end(skb, nlh); +#ifdef CONFIG_COMPAT + hdr_len = compat_event_type_size[descr->header_type]; + event_len = hdr_len + extra_len; + + compskb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); + if (!compskb) { + kfree_skb(skb); + return; + } + + /* Send via the RtNetlink event channel */ + nlh = rtnetlink_ifinfo_prep(dev, compskb); + if (WARN_ON(!nlh)) { + kfree_skb(skb); + kfree_skb(compskb); + return; + } + + /* Add the wireless events in the netlink packet */ + nla = nla_reserve(compskb, IFLA_WIRELESS, event_len); + if (!nla) { + kfree_skb(skb); + kfree_skb(compskb); + return; + } + compat_event = nla_data(nla); + + compat_event->len = event_len; + compat_event->cmd = cmd; + if (descr->header_type == IW_HEADER_TYPE_POINT) { + compat_wrqu.length = wrqu->data.length; + compat_wrqu.flags = wrqu->data.flags; + memcpy(&compat_event->pointer, + ((char *) &compat_wrqu) + IW_EV_COMPAT_POINT_OFF, + hdr_len - IW_EV_COMPAT_LCP_LEN); + if (extra_len) + memcpy(((char *) compat_event) + hdr_len, + extra, extra_len); + } else { + /* extra_len must be zero, so no if (extra) needed */ + memcpy(&compat_event->pointer, wrqu, + hdr_len - IW_EV_COMPAT_LCP_LEN); + } + + nlmsg_end(compskb, nlh); + + skb_shinfo(skb)->frag_list = compskb; +#endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) + skb_queue_tail(&dev_net(dev)->wext_nlevents, skb); + schedule_work(&wireless_nlevent_work); +#else + skb_queue_tail(&wireless_nlevent_queue, skb); + tasklet_schedule(&wireless_nlevent_tasklet); +#endif +} +EXPORT_SYMBOL(wireless_send_event); + + + +/* IW handlers */ + +struct iw_statistics *get_wireless_stats(struct net_device *dev) +{ +#ifdef CONFIG_WIRELESS_EXT + if ((dev->wireless_handlers != NULL) && + (dev->wireless_handlers->get_wireless_stats != NULL)) + return dev->wireless_handlers->get_wireless_stats(dev); +#endif + +#ifdef CONFIG_CFG80211_WEXT + if (dev->ieee80211_ptr && dev->ieee80211_ptr && + dev->ieee80211_ptr->wiphy && + dev->ieee80211_ptr->wiphy->wext && + dev->ieee80211_ptr->wiphy->wext->get_wireless_stats) + return dev->ieee80211_ptr->wiphy->wext->get_wireless_stats(dev); +#endif + + /* not found */ + return NULL; +} + +static int iw_handler_get_iwstats(struct net_device * dev, + struct iw_request_info * info, + union iwreq_data * wrqu, + char * extra) +{ + /* Get stats from the driver */ + struct iw_statistics *stats; + + stats = get_wireless_stats(dev); + if (stats) { + /* Copy statistics to extra */ + memcpy(extra, stats, sizeof(struct iw_statistics)); + wrqu->data.length = sizeof(struct iw_statistics); + + /* Check if we need to clear the updated flag */ + if (wrqu->data.flags != 0) + stats->qual.updated &= ~IW_QUAL_ALL_UPDATED; + return 0; + } else + return -EOPNOTSUPP; +} + +static iw_handler get_handler(struct net_device *dev, unsigned int cmd) +{ + /* Don't "optimise" the following variable, it will crash */ + unsigned int index; /* *MUST* be unsigned */ + const struct iw_handler_def *handlers = NULL; + +#ifdef CONFIG_CFG80211_WEXT + if (dev->ieee80211_ptr && dev->ieee80211_ptr->wiphy) + handlers = dev->ieee80211_ptr->wiphy->wext; +#endif +#ifdef CONFIG_WIRELESS_EXT + if (dev->wireless_handlers) + handlers = dev->wireless_handlers; +#endif + + if (!handlers) + return NULL; + + /* Try as a standard command */ + index = cmd - SIOCIWFIRST; + if (index < handlers->num_standard) + return handlers->standard[index]; + +#ifdef CONFIG_WEXT_PRIV + /* Try as a private command */ + index = cmd - SIOCIWFIRSTPRIV; + if (index < handlers->num_private) + return handlers->private[index]; +#endif + + /* Not found */ + return NULL; +} + +static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd, + const struct iw_ioctl_description *descr, + iw_handler handler, struct net_device *dev, + struct iw_request_info *info) +{ + int err, extra_size, user_length = 0, essid_compat = 0; + char *extra; + + /* Calculate space needed by arguments. Always allocate + * for max space. + */ + extra_size = descr->max_tokens * descr->token_size; + + /* Check need for ESSID compatibility for WE < 21 */ + switch (cmd) { + case SIOCSIWESSID: + case SIOCGIWESSID: + case SIOCSIWNICKN: + case SIOCGIWNICKN: + if (iwp->length == descr->max_tokens + 1) + essid_compat = 1; + else if (IW_IS_SET(cmd) && (iwp->length != 0)) { + char essid[IW_ESSID_MAX_SIZE + 1]; + unsigned int len; + len = iwp->length * descr->token_size; + + if (len > IW_ESSID_MAX_SIZE) + return -EFAULT; + + err = copy_from_user(essid, iwp->pointer, len); + if (err) + return -EFAULT; + + if (essid[iwp->length - 1] == '\0') + essid_compat = 1; + } + break; + default: + break; + } + + iwp->length -= essid_compat; + + /* Check what user space is giving us */ + if (IW_IS_SET(cmd)) { + /* Check NULL pointer */ + if (!iwp->pointer && iwp->length != 0) + return -EFAULT; + /* Check if number of token fits within bounds */ + if (iwp->length > descr->max_tokens) + return -E2BIG; + if (iwp->length < descr->min_tokens) + return -EINVAL; + } else { + /* Check NULL pointer */ + if (!iwp->pointer) + return -EFAULT; + /* Save user space buffer size for checking */ + user_length = iwp->length; + + /* Don't check if user_length > max to allow forward + * compatibility. The test user_length < min is + * implied by the test at the end. + */ + + /* Support for very large requests */ + if ((descr->flags & IW_DESCR_FLAG_NOMAX) && + (user_length > descr->max_tokens)) { + /* Allow userspace to GET more than max so + * we can support any size GET requests. + * There is still a limit : -ENOMEM. + */ + extra_size = user_length * descr->token_size; + + /* Note : user_length is originally a __u16, + * and token_size is controlled by us, + * so extra_size won't get negative and + * won't overflow... + */ + } + } + + /* kzalloc() ensures NULL-termination for essid_compat. */ + extra = kzalloc(extra_size, GFP_KERNEL); + if (!extra) + return -ENOMEM; + + /* If it is a SET, get all the extra data in here */ + if (IW_IS_SET(cmd) && (iwp->length != 0)) { + if (copy_from_user(extra, iwp->pointer, + iwp->length * + descr->token_size)) { + err = -EFAULT; + goto out; + } + + if (cmd == SIOCSIWENCODEEXT) { + struct iw_encode_ext *ee = (void *) extra; + + if (iwp->length < sizeof(*ee) + ee->key_len) + return -EFAULT; + } + } + + err = handler(dev, info, (union iwreq_data *) iwp, extra); + + iwp->length += essid_compat; + + /* If we have something to return to the user */ + if (!err && IW_IS_GET(cmd)) { + /* Check if there is enough buffer up there */ + if (user_length < iwp->length) { + err = -E2BIG; + goto out; + } + + if (copy_to_user(iwp->pointer, extra, + iwp->length * + descr->token_size)) { + err = -EFAULT; + goto out; + } + } + + /* Generate an event to notify listeners of the change */ + if ((descr->flags & IW_DESCR_FLAG_EVENT) && + ((err == 0) || (err == -EIWCOMMIT))) { + union iwreq_data *data = (union iwreq_data *) iwp; + + if (descr->flags & IW_DESCR_FLAG_RESTRICT) + /* If the event is restricted, don't + * export the payload. + */ + wireless_send_event(dev, cmd, data, NULL); + else + wireless_send_event(dev, cmd, data, extra); + } + +out: + kfree(extra); + return err; +} + +/* + * Call the commit handler in the driver + * (if exist and if conditions are right) + * + * Note : our current commit strategy is currently pretty dumb, + * but we will be able to improve on that... + * The goal is to try to agreagate as many changes as possible + * before doing the commit. Drivers that will define a commit handler + * are usually those that need a reset after changing parameters, so + * we want to minimise the number of reset. + * A cool idea is to use a timer : at each "set" command, we re-set the + * timer, when the timer eventually fires, we call the driver. + * Hopefully, more on that later. + * + * Also, I'm waiting to see how many people will complain about the + * netif_running(dev) test. I'm open on that one... + * Hopefully, the driver will remember to do a commit in "open()" ;-) + */ +int call_commit_handler(struct net_device *dev) +{ +#ifdef CONFIG_WIRELESS_EXT + if ((netif_running(dev)) && + (dev->wireless_handlers->standard[0] != NULL)) + /* Call the commit handler on the driver */ + return dev->wireless_handlers->standard[0](dev, NULL, + NULL, NULL); + else + return 0; /* Command completed successfully */ +#else + /* cfg80211 has no commit */ + return 0; +#endif +} + +/* + * Main IOCTl dispatcher. + * Check the type of IOCTL and call the appropriate wrapper... + */ +static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, + unsigned int cmd, + struct iw_request_info *info, + wext_ioctl_func standard, + wext_ioctl_func private) +{ + struct iwreq *iwr = (struct iwreq *) ifr; + struct net_device *dev; + iw_handler handler; + + /* Permissions are already checked in dev_ioctl() before calling us. + * The copy_to/from_user() of ifr is also dealt with in there */ + + /* Make sure the device exist */ + if ((dev = __dev_get_by_name(net, ifr->ifr_name)) == NULL) + return -ENODEV; + + /* A bunch of special cases, then the generic case... + * Note that 'cmd' is already filtered in dev_ioctl() with + * (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) */ + if (cmd == SIOCGIWSTATS) + return standard(dev, iwr, cmd, info, + &iw_handler_get_iwstats); + +#ifdef CONFIG_WEXT_PRIV + if (cmd == SIOCGIWPRIV && dev->wireless_handlers) + return standard(dev, iwr, cmd, info, + iw_handler_get_private); +#endif + + /* Basic check */ + if (!netif_device_present(dev)) + return -ENODEV; + + /* New driver API : try to find the handler */ + handler = get_handler(dev, cmd); + if (handler) { + /* Standard and private are not the same */ + if (cmd < SIOCIWFIRSTPRIV) + return standard(dev, iwr, cmd, info, handler); + else if (private) + return private(dev, iwr, cmd, info, handler); + } + /* Old driver API : call driver ioctl handler */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + if (dev->netdev_ops->ndo_do_ioctl) + return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd); +#else + if (dev->do_ioctl) + return dev->do_ioctl(dev, ifr, cmd); +#endif + return -EOPNOTSUPP; +} + +/* If command is `set a parameter', or `get the encoding parameters', + * check if the user has the right to do it. + */ +static int wext_permission_check(unsigned int cmd) +{ + if ((IW_IS_SET(cmd) || cmd == SIOCGIWENCODE || + cmd == SIOCGIWENCODEEXT) && + !capable(CAP_NET_ADMIN)) + return -EPERM; + + return 0; +} + +/* entry point from dev ioctl */ +static int wext_ioctl_dispatch(struct net *net, struct ifreq *ifr, + unsigned int cmd, struct iw_request_info *info, + wext_ioctl_func standard, + wext_ioctl_func private) +{ + int ret = wext_permission_check(cmd); + + if (ret) + return ret; + + dev_load(net, ifr->ifr_name); + rtnl_lock(); + ret = wireless_process_ioctl(net, ifr, cmd, info, standard, private); + rtnl_unlock(); + + return ret; +} + +/* + * Wrapper to call a standard Wireless Extension handler. + * We do various checks and also take care of moving data between + * user space and kernel space. + */ +static int ioctl_standard_call(struct net_device * dev, + struct iwreq *iwr, + unsigned int cmd, + struct iw_request_info *info, + iw_handler handler) +{ + const struct iw_ioctl_description * descr; + int ret = -EINVAL; + + /* Get the description of the IOCTL */ + if ((cmd - SIOCIWFIRST) >= standard_ioctl_num) + return -EOPNOTSUPP; + descr = &(standard_ioctl[cmd - SIOCIWFIRST]); + + /* Check if we have a pointer to user space data or not */ + if (descr->header_type != IW_HEADER_TYPE_POINT) { + + /* No extra arguments. Trivial to handle */ + ret = handler(dev, info, &(iwr->u), NULL); + + /* Generate an event to notify listeners of the change */ + if ((descr->flags & IW_DESCR_FLAG_EVENT) && + ((ret == 0) || (ret == -EIWCOMMIT))) + wireless_send_event(dev, cmd, &(iwr->u), NULL); + } else { + ret = ioctl_standard_iw_point(&iwr->u.data, cmd, descr, + handler, dev, info); + } + + /* Call commit handler if needed and defined */ + if (ret == -EIWCOMMIT) + ret = call_commit_handler(dev); + + /* Here, we will generate the appropriate event if needed */ + + return ret; +} + + +int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, + void __user *arg) +{ + struct iw_request_info info = { .cmd = cmd, .flags = 0 }; + int ret; + + ret = wext_ioctl_dispatch(net, ifr, cmd, &info, + ioctl_standard_call, + ioctl_private_call); + if (ret >= 0 && + IW_IS_GET(cmd) && + copy_to_user(arg, ifr, sizeof(struct iwreq))) + return -EFAULT; + + return ret; +} + +#ifdef CONFIG_COMPAT +static int compat_standard_call(struct net_device *dev, + struct iwreq *iwr, + unsigned int cmd, + struct iw_request_info *info, + iw_handler handler) +{ + const struct iw_ioctl_description *descr; + struct compat_iw_point *iwp_compat; + struct iw_point iwp; + int err; + + descr = standard_ioctl + (cmd - SIOCIWFIRST); + + if (descr->header_type != IW_HEADER_TYPE_POINT) + return ioctl_standard_call(dev, iwr, cmd, info, handler); + + iwp_compat = (struct compat_iw_point *) &iwr->u.data; + iwp.pointer = compat_ptr(iwp_compat->pointer); + iwp.length = iwp_compat->length; + iwp.flags = iwp_compat->flags; + + err = ioctl_standard_iw_point(&iwp, cmd, descr, handler, dev, info); + + iwp_compat->pointer = ptr_to_compat(iwp.pointer); + iwp_compat->length = iwp.length; + iwp_compat->flags = iwp.flags; + + return err; +} + +int compat_wext_handle_ioctl(struct net *net, unsigned int cmd, + unsigned long arg) +{ + void __user *argp = (void __user *)arg; + struct iw_request_info info; + struct iwreq iwr; + char *colon; + int ret; + + if (copy_from_user(&iwr, argp, sizeof(struct iwreq))) + return -EFAULT; + + iwr.ifr_name[IFNAMSIZ-1] = 0; + colon = strchr(iwr.ifr_name, ':'); + if (colon) + *colon = 0; + + info.cmd = cmd; + info.flags = IW_REQUEST_FLAG_COMPAT; + + ret = wext_ioctl_dispatch(net, (struct ifreq *) &iwr, cmd, &info, + compat_standard_call, + compat_private_call); + + if (ret >= 0 && + IW_IS_GET(cmd) && + copy_to_user(argp, &iwr, sizeof(struct iwreq))) + return -EFAULT; + + return ret; +} +#endif diff --git a/net/wireless/wext-priv.c b/net/wireless/wext-priv.c new file mode 100644 index 0000000..a3c2277 --- /dev/null +++ b/net/wireless/wext-priv.c @@ -0,0 +1,248 @@ +/* + * This file implement the Wireless Extensions priv API. + * + * Authors : Jean Tourrilhes - HPL - + * Copyright (c) 1997-2007 Jean Tourrilhes, All Rights Reserved. + * Copyright 2009 Johannes Berg + * + * (As all part of the Linux kernel, this file is GPL) + */ +#include +#include +#include +#include + +int iw_handler_get_private(struct net_device * dev, + struct iw_request_info * info, + union iwreq_data * wrqu, + char * extra) +{ + /* Check if the driver has something to export */ + if ((dev->wireless_handlers->num_private_args == 0) || + (dev->wireless_handlers->private_args == NULL)) + return -EOPNOTSUPP; + + /* Check if there is enough buffer up there */ + if (wrqu->data.length < dev->wireless_handlers->num_private_args) { + /* User space can't know in advance how large the buffer + * needs to be. Give it a hint, so that we can support + * any size buffer we want somewhat efficiently... */ + wrqu->data.length = dev->wireless_handlers->num_private_args; + return -E2BIG; + } + + /* Set the number of available ioctls. */ + wrqu->data.length = dev->wireless_handlers->num_private_args; + + /* Copy structure to the user buffer. */ + memcpy(extra, dev->wireless_handlers->private_args, + sizeof(struct iw_priv_args) * wrqu->data.length); + + return 0; +} + +/* Size (in bytes) of the various private data types */ +static const char iw_priv_type_size[] = { + 0, /* IW_PRIV_TYPE_NONE */ + 1, /* IW_PRIV_TYPE_BYTE */ + 1, /* IW_PRIV_TYPE_CHAR */ + 0, /* Not defined */ + sizeof(__u32), /* IW_PRIV_TYPE_INT */ + sizeof(struct iw_freq), /* IW_PRIV_TYPE_FLOAT */ + sizeof(struct sockaddr), /* IW_PRIV_TYPE_ADDR */ + 0, /* Not defined */ +}; + +static int get_priv_size(__u16 args) +{ + int num = args & IW_PRIV_SIZE_MASK; + int type = (args & IW_PRIV_TYPE_MASK) >> 12; + + return num * iw_priv_type_size[type]; +} + +static int adjust_priv_size(__u16 args, struct iw_point *iwp) +{ + int num = iwp->length; + int max = args & IW_PRIV_SIZE_MASK; + int type = (args & IW_PRIV_TYPE_MASK) >> 12; + + /* Make sure the driver doesn't goof up */ + if (max < num) + num = max; + + return num * iw_priv_type_size[type]; +} + +/* + * Wrapper to call a private Wireless Extension handler. + * We do various checks and also take care of moving data between + * user space and kernel space. + * It's not as nice and slimline as the standard wrapper. The cause + * is struct iw_priv_args, which was not really designed for the + * job we are going here. + * + * IMPORTANT : This function prevent to set and get data on the same + * IOCTL and enforce the SET/GET convention. Not doing it would be + * far too hairy... + * If you need to set and get data at the same time, please don't use + * a iw_handler but process it in your ioctl handler (i.e. use the + * old driver API). + */ +static int get_priv_descr_and_size(struct net_device *dev, unsigned int cmd, + const struct iw_priv_args **descrp) +{ + const struct iw_priv_args *descr; + int i, extra_size; + + descr = NULL; + for (i = 0; i < dev->wireless_handlers->num_private_args; i++) { + if (cmd == dev->wireless_handlers->private_args[i].cmd) { + descr = &dev->wireless_handlers->private_args[i]; + break; + } + } + + extra_size = 0; + if (descr) { + if (IW_IS_SET(cmd)) { + int offset = 0; /* For sub-ioctls */ + /* Check for sub-ioctl handler */ + if (descr->name[0] == '\0') + /* Reserve one int for sub-ioctl index */ + offset = sizeof(__u32); + + /* Size of set arguments */ + extra_size = get_priv_size(descr->set_args); + + /* Does it fits in iwr ? */ + if ((descr->set_args & IW_PRIV_SIZE_FIXED) && + ((extra_size + offset) <= IFNAMSIZ)) + extra_size = 0; + } else { + /* Size of get arguments */ + extra_size = get_priv_size(descr->get_args); + + /* Does it fits in iwr ? */ + if ((descr->get_args & IW_PRIV_SIZE_FIXED) && + (extra_size <= IFNAMSIZ)) + extra_size = 0; + } + } + *descrp = descr; + return extra_size; +} + +static int ioctl_private_iw_point(struct iw_point *iwp, unsigned int cmd, + const struct iw_priv_args *descr, + iw_handler handler, struct net_device *dev, + struct iw_request_info *info, int extra_size) +{ + char *extra; + int err; + + /* Check what user space is giving us */ + if (IW_IS_SET(cmd)) { + if (!iwp->pointer && iwp->length != 0) + return -EFAULT; + + if (iwp->length > (descr->set_args & IW_PRIV_SIZE_MASK)) + return -E2BIG; + } else if (!iwp->pointer) + return -EFAULT; + + extra = kmalloc(extra_size, GFP_KERNEL); + if (!extra) + return -ENOMEM; + + /* If it is a SET, get all the extra data in here */ + if (IW_IS_SET(cmd) && (iwp->length != 0)) { + if (copy_from_user(extra, iwp->pointer, extra_size)) { + err = -EFAULT; + goto out; + } + } + + /* Call the handler */ + err = handler(dev, info, (union iwreq_data *) iwp, extra); + + /* If we have something to return to the user */ + if (!err && IW_IS_GET(cmd)) { + /* Adjust for the actual length if it's variable, + * avoid leaking kernel bits outside. + */ + if (!(descr->get_args & IW_PRIV_SIZE_FIXED)) + extra_size = adjust_priv_size(descr->get_args, iwp); + + if (copy_to_user(iwp->pointer, extra, extra_size)) + err = -EFAULT; + } + +out: + kfree(extra); + return err; +} + +int ioctl_private_call(struct net_device *dev, struct iwreq *iwr, + unsigned int cmd, struct iw_request_info *info, + iw_handler handler) +{ + int extra_size = 0, ret = -EINVAL; + const struct iw_priv_args *descr; + + extra_size = get_priv_descr_and_size(dev, cmd, &descr); + + /* Check if we have a pointer to user space data or not. */ + if (extra_size == 0) { + /* No extra arguments. Trivial to handle */ + ret = handler(dev, info, &(iwr->u), (char *) &(iwr->u)); + } else { + ret = ioctl_private_iw_point(&iwr->u.data, cmd, descr, + handler, dev, info, extra_size); + } + + /* Call commit handler if needed and defined */ + if (ret == -EIWCOMMIT) + ret = call_commit_handler(dev); + + return ret; +} + +#ifdef CONFIG_COMPAT +int compat_private_call(struct net_device *dev, struct iwreq *iwr, + unsigned int cmd, struct iw_request_info *info, + iw_handler handler) +{ + const struct iw_priv_args *descr; + int ret, extra_size; + + extra_size = get_priv_descr_and_size(dev, cmd, &descr); + + /* Check if we have a pointer to user space data or not. */ + if (extra_size == 0) { + /* No extra arguments. Trivial to handle */ + ret = handler(dev, info, &(iwr->u), (char *) &(iwr->u)); + } else { + struct compat_iw_point *iwp_compat; + struct iw_point iwp; + + iwp_compat = (struct compat_iw_point *) &iwr->u.data; + iwp.pointer = compat_ptr(iwp_compat->pointer); + iwp.length = iwp_compat->length; + iwp.flags = iwp_compat->flags; + + ret = ioctl_private_iw_point(&iwp, cmd, descr, + handler, dev, info, extra_size); + + iwp_compat->pointer = ptr_to_compat(iwp.pointer); + iwp_compat->length = iwp.length; + iwp_compat->flags = iwp.flags; + } + + /* Call commit handler if needed and defined */ + if (ret == -EIWCOMMIT) + ret = call_commit_handler(dev); + + return ret; +} +#endif diff --git a/net/wireless/wext-proc.c b/net/wireless/wext-proc.c new file mode 100644 index 0000000..f2fbfa6 --- /dev/null +++ b/net/wireless/wext-proc.c @@ -0,0 +1,159 @@ +/* + * This file implement the Wireless Extensions proc API. + * + * Authors : Jean Tourrilhes - HPL - + * Copyright (c) 1997-2007 Jean Tourrilhes, All Rights Reserved. + * + * (As all part of the Linux kernel, this file is GPL) + */ + +/* + * The /proc/net/wireless file is a human readable user-space interface + * exporting various wireless specific statistics from the wireless devices. + * This is the most popular part of the Wireless Extensions ;-) + * + * This interface is a pure clone of /proc/net/dev (in net/core/dev.c). + * The content of the file is basically the content of "struct iw_statistics". + */ + +#include +#include +#include +#include +#include +#include +#include +#include + + +static void wireless_seq_printf_stats(struct seq_file *seq, + struct net_device *dev) +{ + /* Get stats from the driver */ + struct iw_statistics *stats = get_wireless_stats(dev); + static struct iw_statistics nullstats = {}; + + /* show device if it's wireless regardless of current stats */ + if (!stats) { +#ifdef CONFIG_WIRELESS_EXT + if (dev->wireless_handlers) + stats = &nullstats; +#endif +#ifdef CONFIG_CFG80211 + if (dev->ieee80211_ptr) + stats = &nullstats; +#endif + } + + if (stats) { + seq_printf(seq, "%6s: %04x %3d%c %3d%c %3d%c %6d %6d %6d " + "%6d %6d %6d\n", + dev->name, stats->status, stats->qual.qual, + stats->qual.updated & IW_QUAL_QUAL_UPDATED + ? '.' : ' ', + ((__s32) stats->qual.level) - + ((stats->qual.updated & IW_QUAL_DBM) ? 0x100 : 0), + stats->qual.updated & IW_QUAL_LEVEL_UPDATED + ? '.' : ' ', + ((__s32) stats->qual.noise) - + ((stats->qual.updated & IW_QUAL_DBM) ? 0x100 : 0), + stats->qual.updated & IW_QUAL_NOISE_UPDATED + ? '.' : ' ', + stats->discard.nwid, stats->discard.code, + stats->discard.fragment, stats->discard.retries, + stats->discard.misc, stats->miss.beacon); + + if (stats != &nullstats) + stats->qual.updated &= ~IW_QUAL_ALL_UPDATED; + } +} + +/* ---------------------------------------------------------------- */ +/* + * Print info for /proc/net/wireless (print all entries) + */ +static int wireless_dev_seq_show(struct seq_file *seq, void *v) +{ + might_sleep(); + + if (v == SEQ_START_TOKEN) + seq_printf(seq, "Inter-| sta-| Quality | Discarded " + "packets | Missed | WE\n" + " face | tus | link level noise | nwid " + "crypt frag retry misc | beacon | %d\n", + WIRELESS_EXT); + else + wireless_seq_printf_stats(seq, v); + return 0; +} + +static void *wireless_dev_seq_start(struct seq_file *seq, loff_t *pos) +{ + struct net *net = seq_file_net(seq); + loff_t off; + struct net_device *dev; + + rtnl_lock(); + if (!*pos) + return SEQ_START_TOKEN; + + off = 1; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) + for_each_netdev(net, dev) +#else + for_each_netdev(net) +#endif + if (off++ == *pos) + return dev; + return NULL; +} + +static void *wireless_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + struct net *net = seq_file_net(seq); + + ++*pos; + + return v == SEQ_START_TOKEN ? + first_net_device(net) : next_net_device(v); +} + +static void wireless_dev_seq_stop(struct seq_file *seq, void *v) +{ + rtnl_unlock(); +} + +static const struct seq_operations wireless_seq_ops = { + .start = wireless_dev_seq_start, + .next = wireless_dev_seq_next, + .stop = wireless_dev_seq_stop, + .show = wireless_dev_seq_show, +}; + +static int seq_open_wireless(struct inode *inode, struct file *file) +{ + return seq_open_net(inode, file, &wireless_seq_ops, + sizeof(struct seq_net_private)); +} + +static const struct file_operations wireless_seq_fops = { + .owner = THIS_MODULE, + .open = seq_open_wireless, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_net, +}; + +int __net_init wext_proc_init(struct net *net) +{ + /* Create /proc/net/wireless entry */ + if (!proc_net_fops_create(net, "wireless", S_IRUGO, &wireless_seq_fops)) + return -ENOMEM; + + return 0; +} + +void __net_exit wext_proc_exit(struct net *net) +{ + proc_net_remove(net, "wireless"); +} diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c new file mode 100644 index 0000000..5615a88 --- /dev/null +++ b/net/wireless/wext-sme.c @@ -0,0 +1,402 @@ +/* + * cfg80211 wext compat for managed mode. + * + * Copyright 2009 Johannes Berg + * Copyright (C) 2009 Intel Corporation. All rights reserved. + */ + +#include +#include +#include +#include "wext-compat.h" +#include "nl80211.h" + +int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev, + struct wireless_dev *wdev) +{ + struct cfg80211_cached_keys *ck = NULL; + const u8 *prev_bssid = NULL; + int err, i; + + ASSERT_RDEV_LOCK(rdev); + ASSERT_WDEV_LOCK(wdev); + + if (!netif_running(wdev->netdev)) + return 0; + + wdev->wext.connect.ie = wdev->wext.ie; + wdev->wext.connect.ie_len = wdev->wext.ie_len; + + if (wdev->wext.keys) { + wdev->wext.keys->def = wdev->wext.default_key; + wdev->wext.keys->defmgmt = wdev->wext.default_mgmt_key; + if (wdev->wext.default_key != -1) + wdev->wext.connect.privacy = true; + } + + if (!wdev->wext.connect.ssid_len) + return 0; + + if (wdev->wext.keys) { + ck = kmemdup(wdev->wext.keys, sizeof(*ck), GFP_KERNEL); + if (!ck) + return -ENOMEM; + for (i = 0; i < 6; i++) + ck->params[i].key = ck->data[i]; + } + + if (wdev->wext.prev_bssid_valid) + prev_bssid = wdev->wext.prev_bssid; + + err = __cfg80211_connect(rdev, wdev->netdev, + &wdev->wext.connect, ck, prev_bssid); + if (err) + kfree(ck); + + return err; +} + +int cfg80211_mgd_wext_siwfreq(struct net_device *dev, + struct iw_request_info *info, + struct iw_freq *wextfreq, char *extra) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + struct ieee80211_channel *chan = NULL; + int err, freq; + + /* call only for station! */ + if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) + return -EINVAL; + + freq = cfg80211_wext_freq(wdev->wiphy, wextfreq); + if (freq < 0) + return freq; + + if (freq) { + chan = ieee80211_get_channel(wdev->wiphy, freq); + if (!chan) + return -EINVAL; + if (chan->flags & IEEE80211_CHAN_DISABLED) + return -EINVAL; + } + + cfg80211_lock_rdev(rdev); + mutex_lock(&rdev->devlist_mtx); + wdev_lock(wdev); + + if (wdev->sme_state != CFG80211_SME_IDLE) { + bool event = true; + + if (wdev->wext.connect.channel == chan) { + err = 0; + goto out; + } + + /* if SSID set, we'll try right again, avoid event */ + if (wdev->wext.connect.ssid_len) + event = false; + err = __cfg80211_disconnect(rdev, dev, + WLAN_REASON_DEAUTH_LEAVING, event); + if (err) + goto out; + } + + + wdev->wext.connect.channel = chan; + + /* SSID is not set, we just want to switch channel */ + if (chan && !wdev->wext.connect.ssid_len) { + err = rdev_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT); + goto out; + } + + err = cfg80211_mgd_wext_connect(rdev, wdev); + out: + wdev_unlock(wdev); + mutex_unlock(&rdev->devlist_mtx); + cfg80211_unlock_rdev(rdev); + return err; +} + +int cfg80211_mgd_wext_giwfreq(struct net_device *dev, + struct iw_request_info *info, + struct iw_freq *freq, char *extra) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct ieee80211_channel *chan = NULL; + + /* call only for station! */ + if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) + return -EINVAL; + + wdev_lock(wdev); + if (wdev->current_bss) + chan = wdev->current_bss->pub.channel; + else if (wdev->wext.connect.channel) + chan = wdev->wext.connect.channel; + wdev_unlock(wdev); + + if (chan) { + freq->m = chan->center_freq; + freq->e = 6; + return 0; + } + + /* no channel if not joining */ + return -EINVAL; +} + +int cfg80211_mgd_wext_siwessid(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *data, char *ssid) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + size_t len = data->length; + int err; + + /* call only for station! */ + if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) + return -EINVAL; + + if (!data->flags) + len = 0; + + /* iwconfig uses nul termination in SSID.. */ + if (len > 0 && ssid[len - 1] == '\0') + len--; + + cfg80211_lock_rdev(rdev); + mutex_lock(&rdev->devlist_mtx); + wdev_lock(wdev); + + err = 0; + + if (wdev->sme_state != CFG80211_SME_IDLE) { + bool event = true; + + if (wdev->wext.connect.ssid && len && + len == wdev->wext.connect.ssid_len && + memcmp(wdev->wext.connect.ssid, ssid, len) == 0) + goto out; + + /* if SSID set now, we'll try to connect, avoid event */ + if (len) + event = false; + err = __cfg80211_disconnect(rdev, dev, + WLAN_REASON_DEAUTH_LEAVING, event); + if (err) + goto out; + } + + wdev->wext.prev_bssid_valid = false; + wdev->wext.connect.ssid = wdev->wext.ssid; + memcpy(wdev->wext.ssid, ssid, len); + wdev->wext.connect.ssid_len = len; + + wdev->wext.connect.crypto.control_port = false; + + err = cfg80211_mgd_wext_connect(rdev, wdev); + out: + wdev_unlock(wdev); + mutex_unlock(&rdev->devlist_mtx); + cfg80211_unlock_rdev(rdev); + return err; +} + +int cfg80211_mgd_wext_giwessid(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *data, char *ssid) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + + /* call only for station! */ + if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) + return -EINVAL; + + data->flags = 0; + + wdev_lock(wdev); + if (wdev->current_bss) { + const u8 *ie = ieee80211_bss_get_ie(&wdev->current_bss->pub, + WLAN_EID_SSID); + if (ie) { + data->flags = 1; + data->length = ie[1]; + memcpy(ssid, ie + 2, data->length); + } + } else if (wdev->wext.connect.ssid && wdev->wext.connect.ssid_len) { + data->flags = 1; + data->length = wdev->wext.connect.ssid_len; + memcpy(ssid, wdev->wext.connect.ssid, data->length); + } + wdev_unlock(wdev); + + return 0; +} + +int cfg80211_mgd_wext_siwap(struct net_device *dev, + struct iw_request_info *info, + struct sockaddr *ap_addr, char *extra) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + u8 *bssid = ap_addr->sa_data; + int err; + + /* call only for station! */ + if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) + return -EINVAL; + + if (ap_addr->sa_family != ARPHRD_ETHER) + return -EINVAL; + + /* automatic mode */ + if (is_zero_ether_addr(bssid) || is_broadcast_ether_addr(bssid)) + bssid = NULL; + + cfg80211_lock_rdev(rdev); + mutex_lock(&rdev->devlist_mtx); + wdev_lock(wdev); + + if (wdev->sme_state != CFG80211_SME_IDLE) { + err = 0; + /* both automatic */ + if (!bssid && !wdev->wext.connect.bssid) + goto out; + + /* fixed already - and no change */ + if (wdev->wext.connect.bssid && bssid && + compare_ether_addr(bssid, wdev->wext.connect.bssid) == 0) + goto out; + + err = __cfg80211_disconnect(rdev, dev, + WLAN_REASON_DEAUTH_LEAVING, false); + if (err) + goto out; + } + + if (bssid) { + memcpy(wdev->wext.bssid, bssid, ETH_ALEN); + wdev->wext.connect.bssid = wdev->wext.bssid; + } else + wdev->wext.connect.bssid = NULL; + + err = cfg80211_mgd_wext_connect(rdev, wdev); + out: + wdev_unlock(wdev); + mutex_unlock(&rdev->devlist_mtx); + cfg80211_unlock_rdev(rdev); + return err; +} + +int cfg80211_mgd_wext_giwap(struct net_device *dev, + struct iw_request_info *info, + struct sockaddr *ap_addr, char *extra) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + + /* call only for station! */ + if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) + return -EINVAL; + + ap_addr->sa_family = ARPHRD_ETHER; + + wdev_lock(wdev); + if (wdev->current_bss) + memcpy(ap_addr->sa_data, wdev->current_bss->pub.bssid, ETH_ALEN); + else + memset(ap_addr->sa_data, 0, ETH_ALEN); + wdev_unlock(wdev); + + return 0; +} + +int cfg80211_wext_siwgenie(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *data, char *extra) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + u8 *ie = extra; + int ie_len = data->length, err; + + if (wdev->iftype != NL80211_IFTYPE_STATION) + return -EOPNOTSUPP; + + if (!ie_len) + ie = NULL; + + wdev_lock(wdev); + + /* no change */ + err = 0; + if (wdev->wext.ie_len == ie_len && + memcmp(wdev->wext.ie, ie, ie_len) == 0) + goto out; + + if (ie_len) { + ie = kmemdup(extra, ie_len, GFP_KERNEL); + if (!ie) { + err = -ENOMEM; + goto out; + } + } else + ie = NULL; + + kfree(wdev->wext.ie); + wdev->wext.ie = ie; + wdev->wext.ie_len = ie_len; + + if (wdev->sme_state != CFG80211_SME_IDLE) { + err = __cfg80211_disconnect(rdev, dev, + WLAN_REASON_DEAUTH_LEAVING, false); + if (err) + goto out; + } + + /* userspace better not think we'll reconnect */ + err = 0; + out: + wdev_unlock(wdev); + return err; +} +EXPORT_SYMBOL_GPL(cfg80211_wext_siwgenie); + +int cfg80211_wext_siwmlme(struct net_device *dev, + struct iw_request_info *info, + struct iw_point *data, char *extra) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct iw_mlme *mlme = (struct iw_mlme *)extra; + struct cfg80211_registered_device *rdev; + int err; + + if (!wdev) + return -EOPNOTSUPP; + + rdev = wiphy_to_dev(wdev->wiphy); + + if (wdev->iftype != NL80211_IFTYPE_STATION) + return -EINVAL; + + if (mlme->addr.sa_family != ARPHRD_ETHER) + return -EINVAL; + + wdev_lock(wdev); + switch (mlme->cmd) { + case IW_MLME_DEAUTH: + case IW_MLME_DISASSOC: + err = __cfg80211_disconnect(rdev, dev, mlme->reason_code, + true); + break; + default: + err = -EOPNOTSUPP; + break; + } + wdev_unlock(wdev); + + return err; +} +EXPORT_SYMBOL_GPL(cfg80211_wext_siwmlme); diff --git a/net/wireless/wext-spy.c b/net/wireless/wext-spy.c new file mode 100644 index 0000000..6dcfe65 --- /dev/null +++ b/net/wireless/wext-spy.c @@ -0,0 +1,231 @@ +/* + * This file implement the Wireless Extensions spy API. + * + * Authors : Jean Tourrilhes - HPL - + * Copyright (c) 1997-2007 Jean Tourrilhes, All Rights Reserved. + * + * (As all part of the Linux kernel, this file is GPL) + */ + +#include +#include +#include +#include +#include +#include + +static inline struct iw_spy_data *get_spydata(struct net_device *dev) +{ + /* This is the new way */ + if (dev->wireless_data) + return dev->wireless_data->spy_data; + return NULL; +} + +int iw_handler_set_spy(struct net_device * dev, + struct iw_request_info * info, + union iwreq_data * wrqu, + char * extra) +{ + struct iw_spy_data * spydata = get_spydata(dev); + struct sockaddr * address = (struct sockaddr *) extra; + + /* Make sure driver is not buggy or using the old API */ + if (!spydata) + return -EOPNOTSUPP; + + /* Disable spy collection while we copy the addresses. + * While we copy addresses, any call to wireless_spy_update() + * will NOP. This is OK, as anyway the addresses are changing. */ + spydata->spy_number = 0; + + /* We want to operate without locking, because wireless_spy_update() + * most likely will happen in the interrupt handler, and therefore + * have its own locking constraints and needs performance. + * The rtnl_lock() make sure we don't race with the other iw_handlers. + * This make sure wireless_spy_update() "see" that the spy list + * is temporarily disabled. */ + smp_wmb(); + + /* Are there are addresses to copy? */ + if (wrqu->data.length > 0) { + int i; + + /* Copy addresses */ + for (i = 0; i < wrqu->data.length; i++) + memcpy(spydata->spy_address[i], address[i].sa_data, + ETH_ALEN); + /* Reset stats */ + memset(spydata->spy_stat, 0, + sizeof(struct iw_quality) * IW_MAX_SPY); + } + + /* Make sure above is updated before re-enabling */ + smp_wmb(); + + /* Enable addresses */ + spydata->spy_number = wrqu->data.length; + + return 0; +} +EXPORT_SYMBOL(iw_handler_set_spy); + +int iw_handler_get_spy(struct net_device * dev, + struct iw_request_info * info, + union iwreq_data * wrqu, + char * extra) +{ + struct iw_spy_data * spydata = get_spydata(dev); + struct sockaddr * address = (struct sockaddr *) extra; + int i; + + /* Make sure driver is not buggy or using the old API */ + if (!spydata) + return -EOPNOTSUPP; + + wrqu->data.length = spydata->spy_number; + + /* Copy addresses. */ + for (i = 0; i < spydata->spy_number; i++) { + memcpy(address[i].sa_data, spydata->spy_address[i], ETH_ALEN); + address[i].sa_family = AF_UNIX; + } + /* Copy stats to the user buffer (just after). */ + if (spydata->spy_number > 0) + memcpy(extra + (sizeof(struct sockaddr) *spydata->spy_number), + spydata->spy_stat, + sizeof(struct iw_quality) * spydata->spy_number); + /* Reset updated flags. */ + for (i = 0; i < spydata->spy_number; i++) + spydata->spy_stat[i].updated &= ~IW_QUAL_ALL_UPDATED; + return 0; +} +EXPORT_SYMBOL(iw_handler_get_spy); + +/*------------------------------------------------------------------*/ +/* + * Standard Wireless Handler : set spy threshold + */ +int iw_handler_set_thrspy(struct net_device * dev, + struct iw_request_info *info, + union iwreq_data * wrqu, + char * extra) +{ + struct iw_spy_data * spydata = get_spydata(dev); + struct iw_thrspy * threshold = (struct iw_thrspy *) extra; + + /* Make sure driver is not buggy or using the old API */ + if (!spydata) + return -EOPNOTSUPP; + + /* Just do it */ + memcpy(&(spydata->spy_thr_low), &(threshold->low), + 2 * sizeof(struct iw_quality)); + + /* Clear flag */ + memset(spydata->spy_thr_under, '\0', sizeof(spydata->spy_thr_under)); + + return 0; +} +EXPORT_SYMBOL(iw_handler_set_thrspy); + +/*------------------------------------------------------------------*/ +/* + * Standard Wireless Handler : get spy threshold + */ +int iw_handler_get_thrspy(struct net_device * dev, + struct iw_request_info *info, + union iwreq_data * wrqu, + char * extra) +{ + struct iw_spy_data * spydata = get_spydata(dev); + struct iw_thrspy * threshold = (struct iw_thrspy *) extra; + + /* Make sure driver is not buggy or using the old API */ + if (!spydata) + return -EOPNOTSUPP; + + /* Just do it */ + memcpy(&(threshold->low), &(spydata->spy_thr_low), + 2 * sizeof(struct iw_quality)); + + return 0; +} +EXPORT_SYMBOL(iw_handler_get_thrspy); + +/*------------------------------------------------------------------*/ +/* + * Prepare and send a Spy Threshold event + */ +static void iw_send_thrspy_event(struct net_device * dev, + struct iw_spy_data * spydata, + unsigned char * address, + struct iw_quality * wstats) +{ + union iwreq_data wrqu; + struct iw_thrspy threshold; + + /* Init */ + wrqu.data.length = 1; + wrqu.data.flags = 0; + /* Copy address */ + memcpy(threshold.addr.sa_data, address, ETH_ALEN); + threshold.addr.sa_family = ARPHRD_ETHER; + /* Copy stats */ + memcpy(&(threshold.qual), wstats, sizeof(struct iw_quality)); + /* Copy also thresholds */ + memcpy(&(threshold.low), &(spydata->spy_thr_low), + 2 * sizeof(struct iw_quality)); + + /* Send event to user space */ + wireless_send_event(dev, SIOCGIWTHRSPY, &wrqu, (char *) &threshold); +} + +/* ---------------------------------------------------------------- */ +/* + * Call for the driver to update the spy data. + * For now, the spy data is a simple array. As the size of the array is + * small, this is good enough. If we wanted to support larger number of + * spy addresses, we should use something more efficient... + */ +void wireless_spy_update(struct net_device * dev, + unsigned char * address, + struct iw_quality * wstats) +{ + struct iw_spy_data * spydata = get_spydata(dev); + int i; + int match = -1; + + /* Make sure driver is not buggy or using the old API */ + if (!spydata) + return; + + /* Update all records that match */ + for (i = 0; i < spydata->spy_number; i++) + if (!compare_ether_addr(address, spydata->spy_address[i])) { + memcpy(&(spydata->spy_stat[i]), wstats, + sizeof(struct iw_quality)); + match = i; + } + + /* Generate an event if we cross the spy threshold. + * To avoid event storms, we have a simple hysteresis : we generate + * event only when we go under the low threshold or above the + * high threshold. */ + if (match >= 0) { + if (spydata->spy_thr_under[match]) { + if (wstats->level > spydata->spy_thr_high.level) { + spydata->spy_thr_under[match] = 0; + iw_send_thrspy_event(dev, spydata, + address, wstats); + } + } else { + if (wstats->level < spydata->spy_thr_low.level) { + spydata->spy_thr_under[match] = 1; + iw_send_thrspy_event(dev, spydata, + address, wstats); + } + } + } +} +EXPORT_SYMBOL(wireless_spy_update); diff --git a/patches/01-netdev.patch b/patches/01-netdev.patch new file mode 100644 index 0000000..5ba7e41 --- /dev/null +++ b/patches/01-netdev.patch @@ -0,0 +1,802 @@ + +This patch backports the struct net_device_ops changes added +on 2.6.29. It also backports the namespace changes added +through net/wireless/wext.c. Note that there is another +patch file which also addresses netns changes, we leave +them separate as there is no easy way to split the stuff +without creating a headache on maintenance of the pathes. + +--- a/drivers/net/usb/rndis_host.c ++++ b/drivers/net/usb/rndis_host.c +@@ -274,6 +274,7 @@ response_error: + return -EDOM; + } + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + /* same as usbnet_netdev_ops but MTU change not allowed */ + static const struct net_device_ops rndis_netdev_ops = { + .ndo_open = usbnet_open, +@@ -283,6 +284,7 @@ static const struct net_device_ops rndis + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + }; ++#endif + + int + generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags) +@@ -345,7 +347,11 @@ generic_rndis_bind(struct usbnet *dev, s + dev->rx_urb_size &= ~(dev->maxpacket - 1); + u.init->max_transfer_size = cpu_to_le32(dev->rx_urb_size); + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + net->netdev_ops = &rndis_netdev_ops; ++#else ++ net->change_mtu = NULL; ++#endif + + retval = rndis_command(dev, u.header, CONTROL_BUFFER_SIZE); + if (unlikely(retval < 0)) { +--- a/drivers/net/usb/usbnet.c ++++ b/drivers/net/usb/usbnet.c +@@ -1236,6 +1236,7 @@ void usbnet_disconnect (struct usb_inter + } + EXPORT_SYMBOL_GPL(usbnet_disconnect); + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + static const struct net_device_ops usbnet_netdev_ops = { + .ndo_open = usbnet_open, + .ndo_stop = usbnet_stop, +@@ -1245,6 +1246,7 @@ static const struct net_device_ops usbne + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + }; ++#endif + + /*-------------------------------------------------------------------------*/ + +@@ -1324,7 +1326,15 @@ usbnet_probe (struct usb_interface *udev + net->features |= NETIF_F_HIGHDMA; + #endif + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + net->netdev_ops = &usbnet_netdev_ops; ++#else ++ net->change_mtu = usbnet_change_mtu; ++ net->hard_start_xmit = usbnet_start_xmit; ++ net->open = usbnet_open; ++ net->stop = usbnet_stop; ++ net->tx_timeout = usbnet_tx_timeout; ++#endif + net->watchdog_timeo = TX_TIMEOUT_JIFFIES; + net->ethtool_ops = &usbnet_ethtool_ops; + +--- a/drivers/net/wireless/rndis_wlan.c ++++ b/drivers/net/wireless/rndis_wlan.c +@@ -2703,6 +2703,7 @@ static int bcm4320b_early_init(struct us + return 0; + } + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + /* same as rndis_netdev_ops but with local multicast handler */ + static const struct net_device_ops rndis_wlan_netdev_ops = { + .ndo_open = usbnet_open, +@@ -2713,6 +2714,7 @@ static const struct net_device_ops rndis + .ndo_validate_addr = eth_validate_addr, + .ndo_set_multicast_list = rndis_wlan_set_multicast_list, + }; ++#endif + + static int rndis_wlan_bind(struct usbnet *usbdev, struct usb_interface *intf) + { +@@ -2760,7 +2762,11 @@ static int rndis_wlan_bind(struct usbnet + * rndis_host wants to avoid all OID as much as possible + * so do promisc/multicast handling in rndis_wlan. + */ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + usbdev->net->netdev_ops = &rndis_wlan_netdev_ops; ++#else ++ usbdev->net->set_multicast_list = rndis_wlan_set_multicast_list; ++#endif + + tmp = RNDIS_PACKET_TYPE_DIRECTED | RNDIS_PACKET_TYPE_BROADCAST; + retval = rndis_set_oid(usbdev, OID_GEN_CURRENT_PACKET_FILTER, &tmp, +--- a/net/mac80211/iface.c ++++ b/net/mac80211/iface.c +@@ -666,6 +666,7 @@ static u16 ieee80211_netdev_select_queue + return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); + } + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + static const struct net_device_ops ieee80211_dataif_ops = { + .ndo_open = ieee80211_open, + .ndo_stop = ieee80211_stop, +@@ -676,6 +677,7 @@ static const struct net_device_ops ieee8 + .ndo_set_mac_address = ieee80211_change_mac, + .ndo_select_queue = ieee80211_netdev_select_queue, + }; ++#endif + + static u16 ieee80211_monitor_select_queue(struct net_device *dev, + struct sk_buff *skb) +@@ -710,6 +712,7 @@ static u16 ieee80211_monitor_select_queu + return ieee80211_downgrade_queue(local, skb); + } + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + static const struct net_device_ops ieee80211_monitorif_ops = { + .ndo_open = ieee80211_open, + .ndo_stop = ieee80211_stop, +@@ -720,14 +723,28 @@ static const struct net_device_ops ieee8 + .ndo_set_mac_address = eth_mac_addr, + .ndo_select_queue = ieee80211_monitor_select_queue, + }; ++#endif + + static void ieee80211_if_setup(struct net_device *dev) + { + ether_setup(dev); ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + dev->netdev_ops = &ieee80211_dataif_ops; ++#else ++ dev->hard_start_xmit = ieee80211_subif_start_xmit; ++ dev->set_multicast_list = ieee80211_set_multicast_list; ++ dev->change_mtu = ieee80211_change_mtu; ++ dev->set_mac_address = ieee80211_change_mac; ++ dev->select_queue = ieee80211_netdev_select_queue; ++ dev->open = ieee80211_open; ++ dev->stop = ieee80211_stop; ++ /* we will validate the address ourselves in ->open */ ++ dev->validate_addr = NULL; ++#endif + dev->destructor = free_netdev; + } + ++ + /* + * Helper function to initialise an interface to a specific type. + */ +@@ -739,7 +756,11 @@ static void ieee80211_setup_sdata(struct + + /* and set some type-dependent values */ + sdata->vif.type = type; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + sdata->dev->netdev_ops = &ieee80211_dataif_ops; ++#else ++ sdata->dev->hard_start_xmit = ieee80211_subif_start_xmit; ++#endif + sdata->wdev.iftype = type; + + /* only monitor differs */ +@@ -762,7 +783,13 @@ static void ieee80211_setup_sdata(struct + break; + case NL80211_IFTYPE_MONITOR: + sdata->dev->type = ARPHRD_IEEE80211_RADIOTAP; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + sdata->dev->netdev_ops = &ieee80211_monitorif_ops; ++#else ++ sdata->dev->hard_start_xmit = ieee80211_monitor_start_xmit; ++ sdata->dev->select_queue = ieee80211_monitor_select_queue; ++ sdata->dev->set_mac_address = eth_mac_addr; ++#endif + sdata->u.mntr_flags = MONITOR_FLAG_CONTROL | + MONITOR_FLAG_OTHER_BSS; + break; +@@ -831,6 +858,8 @@ int ieee80211_if_add(struct ieee80211_lo + return -ENOMEM; + dev_net_set(ndev, wiphy_net(local->hw.wiphy)); + ++/* This is an optimization, just ignore for older kernels */ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) + ndev->needed_headroom = local->tx_headroom + + 4*6 /* four MAC addresses */ + + 2 + 2 + 2 + 2 /* ctl, dur, seq, qos */ +@@ -839,6 +868,7 @@ int ieee80211_if_add(struct ieee80211_lo + - ETH_HLEN /* ethernet hard_header_len */ + + IEEE80211_ENCRYPT_HEADROOM; + ndev->needed_tailroom = IEEE80211_ENCRYPT_TAILROOM; ++#endif + + ret = dev_alloc_name(ndev, ndev->name); + if (ret < 0) +@@ -884,6 +914,10 @@ int ieee80211_if_add(struct ieee80211_lo + if (ret) + goto fail; + ++#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,28)) ++ ndev->uninit = ieee80211_teardown_sdata; ++#endif ++ + if (ieee80211_vif_is_mesh(&sdata->vif) && + params && params->mesh_id_len) + ieee80211_sdata_set_mesh_id(sdata, +--- a/drivers/net/b44.c ++++ b/drivers/net/b44.c +@@ -2109,6 +2109,7 @@ static int __devinit b44_get_invariants( + return err; + } + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + static const struct net_device_ops b44_netdev_ops = { + .ndo_open = b44_open, + .ndo_stop = b44_close, +@@ -2124,6 +2125,7 @@ static const struct net_device_ops b44_n + .ndo_poll_controller = b44_poll_controller, + #endif + }; ++#endif + + static int __devinit b44_init_one(struct ssb_device *sdev, + const struct ssb_device_id *ent) +@@ -2163,9 +2165,26 @@ static int __devinit b44_init_one(struct + bp->rx_pending = B44_DEF_RX_RING_PENDING; + bp->tx_pending = B44_DEF_TX_RING_PENDING; + ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29)) ++ dev->open = b44_open; ++ dev->stop = b44_close; ++ dev->hard_start_xmit = b44_start_xmit; ++ dev->get_stats = b44_get_stats; ++ dev->set_multicast_list = b44_set_rx_mode; ++ dev->set_mac_address = b44_set_mac_addr; ++ dev->do_ioctl = b44_ioctl; ++ dev->tx_timeout = b44_tx_timeout; ++ netif_napi_add(dev, &bp->napi, b44_poll, 64); ++ dev->watchdog_timeo = B44_TX_TIMEOUT; ++#ifdef CONFIG_NET_POLL_CONTROLLER ++ dev->poll_controller = b44_poll_controller; ++#endif ++ dev->change_mtu = b44_change_mtu; ++#else + dev->netdev_ops = &b44_netdev_ops; + netif_napi_add(dev, &bp->napi, b44_poll, 64); + dev->watchdog_timeo = B44_TX_TIMEOUT; ++#endif + dev->irq = sdev->irq; + SET_ETHTOOL_OPS(dev, &b44_ethtool_ops); + +--- a/net/wireless/wext-core.c ++++ b/net/wireless/wext-core.c +@@ -340,6 +340,7 @@ static const int compat_event_type_size[ + + /* IW event code */ + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) + static int __net_init wext_pernet_init(struct net *net) + { + skb_queue_head_init(&net->wext_nlevents); +@@ -382,6 +383,29 @@ static void wireless_nlevent_process(str + + static DECLARE_WORK(wireless_nlevent_work, wireless_nlevent_process); + ++#else ++/* Older kernels get the old way of doing stuff*/ ++static struct sk_buff_head wireless_nlevent_queue; ++ ++static int __init wireless_nlevent_init(void) ++{ ++ skb_queue_head_init(&wireless_nlevent_queue); ++ return 0; ++} ++ ++subsys_initcall(wireless_nlevent_init); ++ ++static void wireless_nlevent_process(unsigned long data) ++{ ++ struct sk_buff *skb; ++ while ((skb = skb_dequeue(&wireless_nlevent_queue))) ++ rtnl_notify(skb, &init_net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); ++} ++ ++static DECLARE_TASKLET(wireless_nlevent_tasklet, wireless_nlevent_process, 0); ++ ++#endif ++ + static struct nlmsghdr *rtnetlink_ifinfo_prep(struct net_device *dev, + struct sk_buff *skb) + { +@@ -592,8 +616,13 @@ void wireless_send_event(struct net_devi + + skb_shinfo(skb)->frag_list = compskb; + #endif ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) + skb_queue_tail(&dev_net(dev)->wext_nlevents, skb); + schedule_work(&wireless_nlevent_work); ++#else ++ skb_queue_tail(&wireless_nlevent_queue, skb); ++ tasklet_schedule(&wireless_nlevent_tasklet); ++#endif + } + EXPORT_SYMBOL(wireless_send_event); + +@@ -902,8 +931,13 @@ static int wireless_process_ioctl(struct + return private(dev, iwr, cmd, info, handler); + } + /* Old driver API : call driver ioctl handler */ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + if (dev->netdev_ops->ndo_do_ioctl) + return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd); ++#else ++ if (dev->do_ioctl) ++ return dev->do_ioctl(dev, ifr, cmd); ++#endif + return -EOPNOTSUPP; + } + +--- a/drivers/net/wireless/ipw2x00/ipw2100.c ++++ b/drivers/net/wireless/ipw2x00/ipw2100.c +@@ -6091,6 +6091,7 @@ static void ipw2100_rf_kill(struct work_ + + static void ipw2100_irq_tasklet(struct ipw2100_priv *priv); + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + static const struct net_device_ops ipw2100_netdev_ops = { + .ndo_open = ipw2100_open, + .ndo_stop = ipw2100_close, +@@ -6101,6 +6102,7 @@ static const struct net_device_ops ipw21 + .ndo_set_mac_address = ipw2100_set_address, + .ndo_validate_addr = eth_validate_addr, + }; ++#endif + + /* Look into using netdev destructor to shutdown ieee80211? */ + +@@ -6126,7 +6128,16 @@ static struct net_device *ipw2100_alloc_ + priv->ieee->perfect_rssi = -20; + priv->ieee->worst_rssi = -85; + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + dev->netdev_ops = &ipw2100_netdev_ops; ++#else ++ dev->open = ipw2100_open; ++ dev->stop = ipw2100_close; ++ dev->init = ipw2100_net_init; ++ dev->tx_timeout = ipw2100_tx_timeout; ++ dev->set_mac_address = ipw2100_set_address; ++#endif ++ + dev->ethtool_ops = &ipw2100_ethtool_ops; + dev->wireless_handlers = &ipw2100_wx_handler_def; + priv->wireless_data.libipw = priv->ieee; +--- a/drivers/net/wireless/ipw2x00/ipw2200.c ++++ b/drivers/net/wireless/ipw2x00/ipw2200.c +@@ -11635,6 +11635,7 @@ static netdev_tx_t ipw_prom_hard_start_x + return NETDEV_TX_OK; + } + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + static const struct net_device_ops ipw_prom_netdev_ops = { + .ndo_open = ipw_prom_open, + .ndo_stop = ipw_prom_stop, +@@ -11643,6 +11644,7 @@ static const struct net_device_ops ipw_p + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + }; ++#endif + + static int ipw_prom_alloc(struct ipw_priv *priv) + { +@@ -11663,7 +11665,13 @@ static int ipw_prom_alloc(struct ipw_pri + memcpy(priv->prom_net_dev->dev_addr, priv->mac_addr, ETH_ALEN); + + priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + priv->prom_net_dev->netdev_ops = &ipw_prom_netdev_ops; ++#else ++ priv->prom_net_dev->open = ipw_prom_open; ++ priv->prom_net_dev->stop = ipw_prom_stop; ++ priv->prom_net_dev->hard_start_xmit = ipw_prom_hard_start_xmit; ++#endif + + priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR; + SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev); +@@ -11691,6 +11699,7 @@ static void ipw_prom_free(struct ipw_pri + + #endif + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + static const struct net_device_ops ipw_netdev_ops = { + .ndo_init = ipw_net_init, + .ndo_open = ipw_net_open, +@@ -11701,6 +11710,7 @@ static const struct net_device_ops ipw_n + .ndo_change_mtu = libipw_change_mtu, + .ndo_validate_addr = eth_validate_addr, + }; ++#endif + + static int __devinit ipw_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +@@ -11802,7 +11812,15 @@ static int __devinit ipw_pci_probe(struc + priv->ieee->perfect_rssi = -20; + priv->ieee->worst_rssi = -85; + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + net_dev->netdev_ops = &ipw_netdev_ops; ++#else ++ net_dev->open = ipw_net_open; ++ net_dev->stop = ipw_net_stop; ++ net_dev->init = ipw_net_init; ++ net_dev->set_multicast_list = ipw_net_set_multicast_list; ++ net_dev->set_mac_address = ipw_net_set_mac_address; ++#endif + priv->wireless_data.spy_data = &priv->ieee->spy_data; + net_dev->wireless_data = &priv->wireless_data; + net_dev->wireless_handlers = &ipw_wx_handler_def; +--- a/drivers/net/wireless/ipw2x00/libipw_module.c ++++ b/drivers/net/wireless/ipw2x00/libipw_module.c +@@ -157,6 +157,10 @@ struct net_device *alloc_ieee80211(int s + goto failed; + } + ieee = netdev_priv(dev); ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29)) ++ dev->hard_start_xmit = libipw_xmit; ++ dev->change_mtu = libipw_change_mtu; ++#endif + + ieee->dev = dev; + +--- a/drivers/net/wireless/libertas/main.c ++++ b/drivers/net/wireless/libertas/main.c +@@ -881,6 +881,7 @@ static void lbs_free_adapter(struct lbs_ + lbs_deb_leave(LBS_DEB_MAIN); + } + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + static const struct net_device_ops lbs_netdev_ops = { + .ndo_open = lbs_dev_open, + .ndo_stop = lbs_eth_stop, +@@ -891,6 +892,7 @@ static const struct net_device_ops lbs_n + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + }; ++#endif + + /** + * @brief This function adds the card. it will probe the +@@ -936,7 +938,16 @@ struct lbs_private *lbs_add_card(void *c + wdev->netdev = dev; + priv->dev = dev; + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + dev->netdev_ops = &lbs_netdev_ops; ++#else ++ dev->open = lbs_dev_open; ++ dev->hard_start_xmit = lbs_hard_start_xmit; ++ dev->stop = lbs_eth_stop; ++ dev->set_mac_address = lbs_set_mac_address; ++ dev->tx_timeout = lbs_tx_timeout; ++ dev->set_multicast_list = lbs_set_multicast_list; ++#endif + dev->watchdog_timeo = 5 * HZ; + dev->ethtool_ops = &lbs_ethtool_ops; + #ifdef WIRELESS_EXT +@@ -1242,11 +1253,13 @@ out: + lbs_deb_leave(LBS_DEB_MAIN); + } + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + static const struct net_device_ops rtap_netdev_ops = { + .ndo_open = lbs_rtap_open, + .ndo_stop = lbs_rtap_stop, + .ndo_start_xmit = lbs_rtap_hard_start_xmit, + }; ++#endif + + static int lbs_add_rtap(struct lbs_private *priv) + { +@@ -1267,7 +1280,13 @@ static int lbs_add_rtap(struct lbs_priva + + memcpy(rtap_dev->dev_addr, priv->current_addr, ETH_ALEN); + rtap_dev->type = ARPHRD_IEEE80211_RADIOTAP; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + rtap_dev->netdev_ops = &rtap_netdev_ops; ++#else ++ rtap_dev->open = lbs_rtap_open; ++ rtap_dev->stop = lbs_rtap_stop; ++ rtap_dev->hard_start_xmit = lbs_rtap_hard_start_xmit; ++#endif + rtap_dev->ml_priv = priv; + SET_NETDEV_DEV(rtap_dev, priv->dev->dev.parent); + +--- a/drivers/net/wireless/libertas/mesh.c ++++ b/drivers/net/wireless/libertas/mesh.c +@@ -332,6 +332,7 @@ static int lbs_mesh_dev_open(struct net_ + return ret; + } + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + static const struct net_device_ops mesh_netdev_ops = { + .ndo_open = lbs_mesh_dev_open, + .ndo_stop = lbs_mesh_stop, +@@ -339,6 +340,7 @@ static const struct net_device_ops mesh_ + .ndo_set_mac_address = lbs_set_mac_address, + .ndo_set_multicast_list = lbs_set_multicast_list, + }; ++#endif + + /** + * @brief This function adds mshX interface +@@ -363,7 +365,15 @@ int lbs_add_mesh(struct lbs_private *pri + mesh_dev->ml_priv = priv; + priv->mesh_dev = mesh_dev; + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + mesh_dev->netdev_ops = &mesh_netdev_ops; ++#else ++ mesh_dev->open = lbs_mesh_dev_open; ++ mesh_dev->hard_start_xmit = lbs_hard_start_xmit; ++ mesh_dev->stop = lbs_mesh_stop; ++ mesh_dev->set_mac_address = lbs_set_mac_address; ++ mesh_dev->set_multicast_list = lbs_set_multicast_list; ++#endif + mesh_dev->ethtool_ops = &lbs_ethtool_ops; + memcpy(mesh_dev->dev_addr, priv->dev->dev_addr, ETH_ALEN); + +--- a/drivers/net/wireless/libertas/defs.h ++++ b/drivers/net/wireless/libertas/defs.h +@@ -16,6 +16,14 @@ + #define DRV_NAME "libertas" + #endif + ++/* ++ * Really nasty hack to avoid stuffing compat.diff with tons of ifdefs, ++ * we could add this to a compat header file but too lazy to check ml_priv ++ * is not used anywhere else ++ */ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) ++#define ml_priv priv ++#endif + + #define LBS_DEB_ENTER 0x00000001 + #define LBS_DEB_LEAVE 0x00000002 +--- a/drivers/net/wireless/mac80211_hwsim.c ++++ b/drivers/net/wireless/mac80211_hwsim.c +@@ -1015,16 +1015,22 @@ static struct device_driver mac80211_hws + .name = "mac80211_hwsim" + }; + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + static const struct net_device_ops hwsim_netdev_ops = { + .ndo_start_xmit = hwsim_mon_xmit, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + }; ++#endif + + static void hwsim_mon_setup(struct net_device *dev) + { ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + dev->netdev_ops = &hwsim_netdev_ops; ++#else ++ dev->hard_start_xmit = hwsim_mon_xmit; ++#endif + dev->destructor = free_netdev; + ether_setup(dev); + dev->tx_queue_len = 0; +--- a/net/bluetooth/bnep/netdev.c ++++ b/net/bluetooth/bnep/netdev.c +@@ -167,8 +167,12 @@ static inline int bnep_net_proto_filter( + } + #endif + +-static netdev_tx_t bnep_net_xmit(struct sk_buff *skb, +- struct net_device *dev) ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,31)) ++static netdev_tx_t bnep_net_xmit(struct sk_buff *skb, struct net_device *dev) ++#else ++static int bnep_net_xmit(struct sk_buff *skb, struct net_device *dev) ++#endif ++ + { + struct bnep_session *s = netdev_priv(dev); + struct sock *sk = s->sock->sk; +@@ -209,6 +213,7 @@ static netdev_tx_t bnep_net_xmit(struct + return NETDEV_TX_OK; + } + ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,28)) + static const struct net_device_ops bnep_netdev_ops = { + .ndo_open = bnep_net_open, + .ndo_stop = bnep_net_close, +@@ -220,6 +225,16 @@ static const struct net_device_ops bnep_ + .ndo_change_mtu = eth_change_mtu, + + }; ++#else ++static struct net_device_stats *bnep_net_get_stats(struct net_device *dev) ++{ ++ return &dev->stats; ++} ++static int bnep_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) ++{ ++ return -EINVAL; ++} ++#endif + + void bnep_net_setup(struct net_device *dev) + { +@@ -228,7 +243,18 @@ void bnep_net_setup(struct net_device *d + dev->addr_len = ETH_ALEN; + + ether_setup(dev); ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,28)) + dev->netdev_ops = &bnep_netdev_ops; ++#else ++ dev->open = bnep_net_open; ++ dev->stop = bnep_net_close; ++ dev->hard_start_xmit = bnep_net_xmit; ++ dev->get_stats = bnep_net_get_stats; ++ dev->do_ioctl = bnep_net_ioctl; ++ dev->set_mac_address = bnep_net_set_mac_addr; ++ dev->set_multicast_list = bnep_net_set_mc_list; ++ dev->tx_timeout = bnep_net_timeout; ++#endif + + dev->watchdog_timeo = HZ * 2; + } +--- a/drivers/net/atl1e/atl1e_main.c ++++ b/drivers/net/atl1e/atl1e_main.c +@@ -2202,6 +2202,7 @@ static void atl1e_shutdown(struct pci_de + atl1e_suspend(pdev, PMSG_SUSPEND); + } + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + static const struct net_device_ops atl1e_netdev_ops = { + .ndo_open = atl1e_open, + .ndo_stop = atl1e_close, +@@ -2219,6 +2220,7 @@ static const struct net_device_ops atl1e + #endif + + }; ++#endif + + static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev) + { +@@ -2226,7 +2228,23 @@ static int atl1e_init_netdev(struct net_ + pci_set_drvdata(pdev, netdev); + + netdev->irq = pdev->irq; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + netdev->netdev_ops = &atl1e_netdev_ops; ++#else ++ netdev->change_mtu = atl1e_change_mtu; ++ netdev->hard_start_xmit = atl1e_xmit_frame; ++ netdev->open = atl1e_open; ++ netdev->stop = atl1e_close; ++ netdev->tx_timeout = atl1e_tx_timeout; ++ netdev->set_mac_address = atl1e_set_mac_addr; ++ netdev->do_ioctl = atl1e_ioctl; ++ netdev->get_stats = atl1e_get_stats; ++ netdev->set_multicast_list = atl1e_set_multi; ++ netdev->vlan_rx_register = atl1e_vlan_rx_register; ++#ifdef CONFIG_NET_POLL_CONTROLLER ++ netdev->poll_controller = atl1e_netpoll; ++#endif ++#endif + + netdev->watchdog_timeo = AT_TX_WATCHDOG; + atl1e_set_ethtool_ops(netdev); +--- a/drivers/net/atl1c/atl1c_main.c ++++ b/drivers/net/atl1c/atl1c_main.c +@@ -2534,6 +2534,7 @@ static void atl1c_shutdown(struct pci_de + atl1c_suspend(pdev, PMSG_SUSPEND); + } + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + static const struct net_device_ops atl1c_netdev_ops = { + .ndo_open = atl1c_open, + .ndo_stop = atl1c_close, +@@ -2550,6 +2551,7 @@ static const struct net_device_ops atl1c + .ndo_poll_controller = atl1c_netpoll, + #endif + }; ++#endif + + static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev) + { +@@ -2557,7 +2559,23 @@ static int atl1c_init_netdev(struct net_ + pci_set_drvdata(pdev, netdev); + + netdev->irq = pdev->irq; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + netdev->netdev_ops = &atl1c_netdev_ops; ++#else ++ netdev->change_mtu = atl1c_change_mtu; ++ netdev->hard_start_xmit = atl1c_xmit_frame; ++ netdev->open = atl1c_open; ++ netdev->stop = atl1c_close; ++ netdev->tx_timeout = atl1c_tx_timeout; ++ netdev->set_mac_address = atl1c_set_mac_addr; ++ netdev->do_ioctl = atl1c_ioctl; ++ netdev->get_stats = atl1c_get_stats; ++ netdev->set_multicast_list = atl1c_set_multi; ++ netdev->vlan_rx_register = atl1c_vlan_rx_register; ++#ifdef CONFIG_NET_POLL_CONTROLLER ++ netdev->poll_controller = atl1c_netpoll; ++#endif ++#endif + netdev->watchdog_timeo = AT_TX_WATCHDOG; + atl1c_set_ethtool_ops(netdev); + +--- a/drivers/net/atlx/atl1.c ++++ b/drivers/net/atlx/atl1.c +@@ -2878,6 +2878,7 @@ static void atl1_poll_controller(struct + } + #endif + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + static const struct net_device_ops atl1_netdev_ops = { + .ndo_open = atl1_open, + .ndo_stop = atl1_close, +@@ -2893,6 +2894,7 @@ static const struct net_device_ops atl1_ + .ndo_poll_controller = atl1_poll_controller, + #endif + }; ++#endif + + /* + * atl1_probe - Device Initialization Routine +@@ -2981,7 +2983,22 @@ static int __devinit atl1_probe(struct p + adapter->mii.phy_id_mask = 0x1f; + adapter->mii.reg_num_mask = 0x1f; + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + netdev->netdev_ops = &atl1_netdev_ops; ++#else ++ netdev->change_mtu = atl1_change_mtu; ++ netdev->hard_start_xmit = atl1_xmit_frame; ++ netdev->open = atl1_open; ++ netdev->stop = atl1_close; ++ netdev->tx_timeout = atlx_tx_timeout; ++ netdev->set_mac_address = atl1_set_mac; ++ netdev->do_ioctl = atlx_ioctl; ++ netdev->set_multicast_list = atlx_set_multi; ++ netdev->vlan_rx_register = atlx_vlan_rx_register; ++#ifdef CONFIG_NET_POLL_CONTROLLER ++ netdev->poll_controller = atl1_poll_controller; ++#endif ++#endif + netdev->watchdog_timeo = 5 * HZ; + + netdev->ethtool_ops = &atl1_ethtool_ops; +--- a/drivers/net/atlx/atl2.c ++++ b/drivers/net/atlx/atl2.c +@@ -1306,6 +1306,7 @@ static void atl2_poll_controller(struct + #endif + + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + static const struct net_device_ops atl2_netdev_ops = { + .ndo_open = atl2_open, + .ndo_stop = atl2_close, +@@ -1321,6 +1322,7 @@ static const struct net_device_ops atl2_ + .ndo_poll_controller = atl2_poll_controller, + #endif + }; ++#endif + + /* + * atl2_probe - Device Initialization Routine +@@ -1395,7 +1397,22 @@ static int __devinit atl2_probe(struct p + + atl2_setup_pcicmd(pdev); + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + netdev->netdev_ops = &atl2_netdev_ops; ++#else ++ netdev->change_mtu = atl2_change_mtu; ++ netdev->hard_start_xmit = atl2_xmit_frame; ++ netdev->open = atl2_open; ++ netdev->stop = atl2_close; ++ netdev->tx_timeout = atl2_tx_timeout; ++ netdev->set_mac_address = atl2_set_mac; ++ netdev->do_ioctl = atl2_ioctl; ++ netdev->set_multicast_list = atl2_set_multi; ++ netdev->vlan_rx_register = atl2_vlan_rx_register; ++#ifdef CONFIG_NET_POLL_CONTROLLER ++ netdev->poll_controller = atl2_poll_controller; ++#endif ++#endif + atl2_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); diff --git a/patches/02-ksize.patch b/patches/02-ksize.patch new file mode 100644 index 0000000..28bacba --- /dev/null +++ b/patches/02-ksize.patch @@ -0,0 +1,44 @@ +ksize() was added as of 2.6.29, it gives you the actual +size of the allocated data. Since we have no support for +this we simply do not optimize for it and deal with +large alloocations for the IEs. + +We technically could backport this as + +define ksize(bleh) SOME_LARGE_NUMBER + +but doing it this way emphasis careful review +of the situation. + +--- a/net/wireless/scan.c ++++ b/net/wireless/scan.c +@@ -426,9 +426,14 @@ cfg80211_bss_update(struct cfg80211_regi + size_t used = dev->wiphy.bss_priv_size + sizeof(*res); + size_t ielen = res->pub.len_proberesp_ies; + ++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,28) ++ if (0) { ++ used = 0; /* just to shut up the compiler */ ++#else + if (found->pub.proberesp_ies && + !found->proberesp_ies_allocated && + ksize(found) >= used + ielen) { ++#endif + memcpy(found->pub.proberesp_ies, + res->pub.proberesp_ies, ielen); + found->pub.len_proberesp_ies = ielen; +@@ -459,9 +464,14 @@ cfg80211_bss_update(struct cfg80211_regi + size_t used = dev->wiphy.bss_priv_size + sizeof(*res); + size_t ielen = res->pub.len_beacon_ies; + ++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,28) ++ if (0) { ++ used = 0; /* just to shut up the compiler */ ++#else + if (found->pub.beacon_ies && + !found->beacon_ies_allocated && + ksize(found) >= used + ielen) { ++#endif + memcpy(found->pub.beacon_ies, + res->pub.beacon_ies, ielen); + found->pub.len_beacon_ies = ielen; diff --git a/patches/03-rfkill.patch b/patches/03-rfkill.patch new file mode 100644 index 0000000..cb5fac3 --- /dev/null +++ b/patches/03-rfkill.patch @@ -0,0 +1,233 @@ +rfkill was re-implemented on 2.6.31. We port it to +older kernels with a simple hack, just rename the +module as a new one rfkill_backport, and every +exported symbol gets redefined with a _backport +postfix through compat-2.6.31.h. The changes below +are the ones we could not do through compat-2.6.31.h + +Do older kernels have /dev/rfkill ? I not then we can +just keep /dev/rfkill and not /dev/rfkill_backport. + +Note that 2.6.31 added netdevice notifier upon interface +dev_open() which on cfg80211 will check if checks to see +if rfkill is enabled (or if the mode of operation is not +supported) on the cfg80211_netdev_notifier_call() and if +so deny bringing the interface up. This was added via +commit: + +3b8bcfd5d31ea0fec58681d035544ace707d2536 + +Since older kernels will not have the notifier call +on dev_open() if we *really want* to port this we could have +mac80211's subif_open() call : + + ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev); + ret = notifier_to_errno(ret); + if (ret) + return ret; + +This would do the policing from within mac80211. + +--- a/net/rfkill/Makefile ++++ b/net/rfkill/Makefile +@@ -2,6 +2,6 @@ + # Makefile for the RF switch subsystem. + # + +-rfkill-y += core.o +-rfkill-$(CONFIG_RFKILL_INPUT) += input.o +-obj-$(CONFIG_RFKILL) += rfkill.o ++rfkill_backport-y += core.o ++rfkill_backport-$(CONFIG_RFKILL_BACKPORT_INPUT) += input.o ++obj-$(CONFIG_RFKILL_BACKPORT) += rfkill_backport.o +--- a/net/rfkill/input.c ++++ b/net/rfkill/input.c +@@ -17,7 +17,11 @@ + #include + #include + #include ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)) + #include ++#else ++#include ++#endif + #include + + #include "rfkill.h" +@@ -229,7 +233,7 @@ static int rfkill_connect(struct input_h + + handle->dev = dev; + handle->handler = handler; +- handle->name = "rfkill"; ++ handle->name = "rfkill_backport"; + + /* causes rfkill_start() to be called */ + error = input_register_handle(handle); +--- a/net/rfkill/core.c ++++ b/net/rfkill/core.c +@@ -26,7 +26,7 @@ + #include + #include + #include +-#include ++#include + #include + #include + #include +@@ -62,7 +62,7 @@ struct rfkill { + const struct rfkill_ops *ops; + void *data; + +-#ifdef CONFIG_RFKILL_LEDS ++#ifdef CONFIG_RFKILL_BACKPORT_LEDS + struct led_trigger led_trigger; + const char *ledtrigname; + #endif +@@ -123,7 +123,7 @@ static struct { + static bool rfkill_epo_lock_active; + + +-#ifdef CONFIG_RFKILL_LEDS ++#ifdef CONFIG_RFKILL_BACKPORT_LEDS + static void rfkill_led_trigger_event(struct rfkill *rfkill) + { + struct led_trigger *trigger; +@@ -317,7 +317,7 @@ static void rfkill_set_block(struct rfki + rfkill_event(rfkill); + } + +-#ifdef CONFIG_RFKILL_INPUT ++#ifdef CONFIG_RFKILL_BACKPORT_INPUT + static atomic_t rfkill_input_disabled = ATOMIC_INIT(0); + + /** +@@ -779,7 +779,7 @@ static int rfkill_resume(struct device * + } + + static struct class rfkill_class = { +- .name = "rfkill", ++ .name = "rfkill_backport", + .dev_release = rfkill_release, + .dev_attrs = rfkill_dev_attrs, + .dev_uevent = rfkill_dev_uevent, +@@ -925,7 +925,7 @@ int __must_check rfkill_register(struct + if (!rfkill->persistent || rfkill_epo_lock_active) { + schedule_work(&rfkill->sync_work); + } else { +-#ifdef CONFIG_RFKILL_INPUT ++#ifdef CONFIG_RFKILL_BACKPORT_INPUT + bool soft_blocked = !!(rfkill->state & RFKILL_BLOCK_SW); + + if (!atomic_read(&rfkill_input_disabled)) +@@ -1153,7 +1153,7 @@ static int rfkill_fop_release(struct ino + list_for_each_entry_safe(ev, tmp, &data->events, list) + kfree(ev); + +-#ifdef CONFIG_RFKILL_INPUT ++#ifdef CONFIG_RFKILL_BACKPORT_INPUT + if (data->input_handler) + if (atomic_dec_return(&rfkill_input_disabled) == 0) + printk(KERN_DEBUG "rfkill: input handler enabled\n"); +@@ -1164,7 +1164,7 @@ static int rfkill_fop_release(struct ino + return 0; + } + +-#ifdef CONFIG_RFKILL_INPUT ++#ifdef CONFIG_RFKILL_BACKPORT_INPUT + static long rfkill_fop_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) + { +@@ -1197,7 +1197,7 @@ static const struct file_operations rfki + .write = rfkill_fop_write, + .poll = rfkill_fop_poll, + .release = rfkill_fop_release, +-#ifdef CONFIG_RFKILL_INPUT ++#ifdef CONFIG_RFKILL_BACKPORT_INPUT + .unlocked_ioctl = rfkill_fop_ioctl, + .compat_ioctl = rfkill_fop_ioctl, + #endif +@@ -1227,7 +1227,7 @@ static int __init rfkill_init(void) + goto out; + } + +-#ifdef CONFIG_RFKILL_INPUT ++#ifdef CONFIG_RFKILL_BACKPORT_INPUT + error = rfkill_handler_init(); + if (error) { + misc_deregister(&rfkill_miscdev); +@@ -1243,7 +1243,7 @@ subsys_initcall(rfkill_init); + + static void __exit rfkill_exit(void) + { +-#ifdef CONFIG_RFKILL_INPUT ++#ifdef CONFIG_RFKILL_BACKPORT_INPUT + rfkill_handler_exit(); + #endif + misc_deregister(&rfkill_miscdev); +--- a/include/linux/rfkill_backport.h ++++ b/include/linux/rfkill_backport.h +@@ -20,6 +20,7 @@ + */ + + #include ++#include + + /* define userspace visible states */ + #define RFKILL_STATE_SOFT_BLOCKED 0 +@@ -148,7 +149,7 @@ struct rfkill_ops { + int (*set_block)(void *data, bool blocked); + }; + +-#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) ++#if defined(CONFIG_RFKILL_BACKPORT) || defined(CONFIG_RFKILL_BACKPORT_MODULE) + /** + * rfkill_alloc - allocate rfkill structure + * @name: name of the struct -- the string is not copied internally +@@ -355,7 +356,7 @@ static inline bool rfkill_blocked(struct + #endif /* RFKILL || RFKILL_MODULE */ + + +-#ifdef CONFIG_RFKILL_LEDS ++#ifdef CONFIG_RFKILL_BACKPORT_LEDS + /** + * rfkill_get_led_trigger_name - Get the LED trigger name for the button's LED. + * This function might return a NULL pointer if registering of the +--- a/net/wireless/core.h ++++ b/net/wireless/core.h +@@ -11,7 +11,11 @@ + #include + #include + #include ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)) + #include ++#else ++#include ++#endif + #include + #include + #include +--- a/drivers/net/wireless/ath/ath9k/hw.c ++++ b/drivers/net/wireless/ath/ath9k/hw.c +@@ -3224,7 +3224,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw + + pCap->hw_caps |= ATH9K_HW_CAP_ENHANCEDPM; + +-#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) ++#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)) && defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)) || ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)) && defined(CONFIG_RFKILL_BACKPORT) || defined(CONFIG_RFKILL_BACKPORT_MODULE)) + ah->rfsilent = ah->eep_ops->get_eeprom(ah, EEP_RF_SILENT); + if (ah->rfsilent & EEP_RFSILENT_ENABLED) { + ah->rfkill_gpio = +--- a/drivers/net/wireless/ath/ath5k/base.h ++++ b/drivers/net/wireless/ath/ath5k/base.h +@@ -46,7 +46,11 @@ + #include + #include + #include ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)) + #include ++#else ++#include ++#endif + + #include "ath5k.h" + #include "debug.h" diff --git a/patches/04-netns.patch b/patches/04-netns.patch new file mode 100644 index 0000000..58dc92b --- /dev/null +++ b/patches/04-netns.patch @@ -0,0 +1,142 @@ +The only other namespace change. Note that pach 01-netdev.patch +also has some other namespace changes there, look at that file +for the other changes. It'd be nice to figure out a way to +bring thise here cleanly and seprately but they touch the same +files... + +--- a/net/wireless/nl80211.c ++++ b/net/wireless/nl80211.c +@@ -29,7 +29,9 @@ static struct genl_family nl80211_fam = + .hdrsize = 0, /* no private header */ + .version = 1, /* no particular meaning now */ + .maxattr = NL80211_ATTR_MAX, ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) + .netnsok = true, ++#endif + }; + + /* internal helper: get rdev and dev */ +@@ -4187,7 +4189,9 @@ static int nl80211_wiphy_netns(struct sk + + err = cfg80211_switch_netns(rdev, net); + out_put_net: ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) + put_net(net); ++#endif + out: + cfg80211_unlock_rdev(rdev); + out_rtnl: +--- a/net/wireless/core.c ++++ b/net/wireless/core.c +@@ -223,6 +223,7 @@ int cfg80211_dev_rename(struct cfg80211_ + return 0; + } + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) + int cfg80211_switch_netns(struct cfg80211_registered_device *rdev, + struct net *net) + { +@@ -258,6 +259,7 @@ int cfg80211_switch_netns(struct cfg8021 + + return err; + } ++#endif + + static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data) + { +@@ -370,7 +372,9 @@ struct wiphy *wiphy_new(const struct cfg + rdev->wiphy.flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; + #endif + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) + wiphy_net_set(&rdev->wiphy, &init_net); ++#endif + + rdev->rfkill_ops.set_block = cfg80211_rfkill_set_block; + rdev->rfkill = rfkill_alloc(dev_name(&rdev->wiphy.dev), +@@ -683,8 +687,10 @@ static int cfg80211_netdev_notifier_call + mutex_lock(&rdev->devlist_mtx); + list_add_rcu(&wdev->list, &rdev->netdev_list); + rdev->devlist_generation++; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) + /* can only change netns with wiphy */ + dev->features |= NETIF_F_NETNS_LOCAL; ++#endif + + if (sysfs_create_link(&dev->dev.kobj, &rdev->wiphy.dev.kobj, + "phy80211")) { +@@ -827,6 +833,7 @@ static struct notifier_block cfg80211_ne + .notifier_call = cfg80211_netdev_notifier_call, + }; + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) + static void __net_exit cfg80211_pernet_exit(struct net *net) + { + struct cfg80211_registered_device *rdev; +@@ -844,14 +851,17 @@ static void __net_exit cfg80211_pernet_e + static struct pernet_operations cfg80211_pernet_ops = { + .exit = cfg80211_pernet_exit, + }; ++#endif + + static int __init cfg80211_init(void) + { + int err; + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) + err = register_pernet_device(&cfg80211_pernet_ops); + if (err) + goto out_fail_pernet; ++#endif + + err = wiphy_sysfs_init(); + if (err) +@@ -886,8 +896,10 @@ out_fail_nl80211: + out_fail_notifier: + wiphy_sysfs_exit(); + out_fail_sysfs: ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) + unregister_pernet_device(&cfg80211_pernet_ops); + out_fail_pernet: ++#endif + return err; + } + subsys_initcall(cfg80211_init); +@@ -899,7 +911,9 @@ static void cfg80211_exit(void) + unregister_netdevice_notifier(&cfg80211_netdev_notifier); + wiphy_sysfs_exit(); + regulatory_exit(); ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) + unregister_pernet_device(&cfg80211_pernet_ops); ++#endif + destroy_workqueue(cfg80211_wq); + } + module_exit(cfg80211_exit); +--- a/net/wireless/wext-core.c ++++ b/net/wireless/wext-core.c +@@ -399,7 +399,11 @@ static void wireless_nlevent_process(uns + { + struct sk_buff *skb; + while ((skb = skb_dequeue(&wireless_nlevent_queue))) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) + rtnl_notify(skb, &init_net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); ++#else ++ rtnl_notify(skb, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); ++#endif + } + + static DECLARE_TASKLET(wireless_nlevent_tasklet, wireless_nlevent_process, 0); +--- a/net/wireless/wext-proc.c ++++ b/net/wireless/wext-proc.c +@@ -98,7 +98,11 @@ static void *wireless_dev_seq_start(stru + return SEQ_START_TOKEN; + + off = 1; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) + for_each_netdev(net, dev) ++#else ++ for_each_netdev(net) ++#endif + if (off++ == *pos) + return dev; + return NULL; diff --git a/patches/05-usb.patch b/patches/05-usb.patch new file mode 100644 index 0000000..9ff2e1e --- /dev/null +++ b/patches/05-usb.patch @@ -0,0 +1,14 @@ +USB opt soft_unbid was added as of 2.6.27. + +--- a/drivers/net/wireless/p54/p54usb.c ++++ b/drivers/net/wireless/p54/p54usb.c +@@ -1054,7 +1054,9 @@ static struct usb_driver p54u_driver = { + .resume = p54u_resume, + .reset_resume = p54u_resume, + #endif /* CONFIG_PM */ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) + .soft_unbind = 1, ++#endif + }; + + static int __init p54u_init(void) diff --git a/patches/06-header-changes.patch b/patches/06-header-changes.patch new file mode 100644 index 0000000..65f5be0 --- /dev/null +++ b/patches/06-header-changes.patch @@ -0,0 +1,126 @@ + +Every kernel release there are a few changes to headers +made. Some code gets shifted around between headers or +new headers are defined. This patch deals with such +cases. + +--- a/drivers/net/wireless/b43/phy_common.h ++++ b/drivers/net/wireless/b43/phy_common.h +@@ -2,6 +2,9 @@ + #define LINUX_B43_PHY_COMMON_H_ + + #include ++#if (LINUX_VERSION_CODE == KERNEL_VERSION(2,6,28)) ++#include ++#endif + + struct b43_wldev; + +--- a/drivers/net/wireless/libertas/assoc.c ++++ b/drivers/net/wireless/libertas/assoc.c +@@ -5,6 +5,9 @@ + #include + #include + #include ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29)) ++#include ++#endif + + #include "assoc.h" + #include "decl.h" +--- a/drivers/net/wireless/wl12xx/wl1251_main.c ++++ b/drivers/net/wireless/wl12xx/wl1251_main.c +@@ -26,6 +26,9 @@ + #include + #include + #include ++#if (LINUX_VERSION_CODE == KERNEL_VERSION(2,6,28)) ++#include ++#endif + #include + #include + #include +--- a/drivers/net/wireless/wl12xx/wl1251_spi.c ++++ b/drivers/net/wireless/wl12xx/wl1251_spi.c +@@ -24,6 +24,9 @@ + #include + #include + #include ++#if (LINUX_VERSION_CODE == KERNEL_VERSION(2,6,28)) ++#include ++#endif + #include + #include + +--- a/net/mac80211/key.c ++++ b/net/mac80211/key.c +@@ -21,6 +21,9 @@ + #include "aes_ccm.h" + #include "aes_cmac.h" + ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29)) ++#include ++#endif + + /** + * DOC: Key handling basics +--- a/net/mac80211/main.c ++++ b/net/mac80211/main.c +@@ -20,7 +20,9 @@ + #include + #include + #include ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) + #include ++#endif + #include + + #include "ieee80211_i.h" +--- a/net/mac80211/cfg.c ++++ b/net/mac80211/cfg.c +@@ -9,7 +9,9 @@ + #include + #include + #include ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) + #include ++#endif + #include + #include + #include "ieee80211_i.h" +--- a/net/mac80211/tx.c ++++ b/net/mac80211/tx.c +@@ -18,7 +18,9 @@ + #include + #include + #include ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) + #include ++#endif + #include + #include + #include +--- a/net/mac80211/util.c ++++ b/net/mac80211/util.c +@@ -20,7 +20,9 @@ + #include + #include + #include ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) + #include ++#endif + #include + #include + +--- a/net/wireless/nl80211.c ++++ b/net/wireless/nl80211.c +@@ -14,7 +14,9 @@ + #include + #include + #include ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) + #include ++#endif + #include + #include + #include diff --git a/patches/07-change-default-rate-alg.patch b/patches/07-change-default-rate-alg.patch new file mode 100644 index 0000000..9c21825 --- /dev/null +++ b/patches/07-change-default-rate-alg.patch @@ -0,0 +1,34 @@ + +Your current kernels configuration (.config and linux/autoconf.h) +are always respected when compiling external modules. Because +of this if you are using an old kernel which preferred the +PID rate control algorithm we cannot force it to use minstrel +instead. Minstrel is now the default rate control algorithm +and we want you to use it. To let you use it we redefine here +the CONFIG_MAC80211_RC_DEFAULT to CONFIG_COMPAT_MAC80211_RC_DEFAULT +and define CONFIG_COMPAT_MAC80211_RC_DEFAULT on config.mk. +Through the compat autoconf we then get it also defined there +at compilation time. + +--- a/net/mac80211/rate.c ++++ b/net/mac80211/rate.c +@@ -22,7 +22,7 @@ struct rate_control_alg { + static LIST_HEAD(rate_ctrl_algs); + static DEFINE_MUTEX(rate_ctrl_mutex); + +-static char *ieee80211_default_rc_algo = CONFIG_MAC80211_RC_DEFAULT; ++static char *ieee80211_default_rc_algo = CONFIG_COMPAT_MAC80211_RC_DEFAULT; + module_param(ieee80211_default_rc_algo, charp, 0644); + MODULE_PARM_DESC(ieee80211_default_rc_algo, + "Default rate control algorithm for mac80211 to use"); +@@ -117,8 +117,8 @@ ieee80211_rate_control_ops_get(const cha + ops = ieee80211_try_rate_control_ops_get(ieee80211_default_rc_algo); + + /* try built-in one if specific alg requested but not found */ +- if (!ops && strlen(CONFIG_MAC80211_RC_DEFAULT)) +- ops = ieee80211_try_rate_control_ops_get(CONFIG_MAC80211_RC_DEFAULT); ++ if (!ops && strlen(CONFIG_COMPAT_MAC80211_RC_DEFAULT)) ++ ops = ieee80211_try_rate_control_ops_get(CONFIG_COMPAT_MAC80211_RC_DEFAULT); + + return ops; + } diff --git a/patches/08-rename-iwl4965-config.patch b/patches/08-rename-iwl4965-config.patch new file mode 100644 index 0000000..83b7c2a --- /dev/null +++ b/patches/08-rename-iwl4965-config.patch @@ -0,0 +1,31 @@ +In kernel 2.6.26 and older CONFIG_IWL4965 was build as an extra +module, but now it is directly included in the iwlagn. + +CONFIG_IWL4965 has to be set to y, to build correctly. + +--- a/drivers/net/wireless/iwlwifi/Makefile ++++ b/drivers/net/wireless/iwlwifi/Makefile +@@ -11,7 +11,7 @@ CFLAGS_iwl-devtrace.o := -I$(src) + obj-$(CONFIG_IWLAGN) += iwlagn.o + iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o + +-iwlagn-$(CONFIG_IWL4965) += iwl-4965.o ++iwlagn-$(CONFIG_COMPAT_IWL4965) += iwl-4965.o + iwlagn-$(CONFIG_IWL5000) += iwl-5000.o + iwlagn-$(CONFIG_IWL5000) += iwl-6000.o + iwlagn-$(CONFIG_IWL5000) += iwl-1000.o +--- a/drivers/net/wireless/iwlwifi/iwl-agn.c ++++ b/drivers/net/wireless/iwlwifi/iwl-agn.c +@@ -3796,10 +3796,10 @@ static void __devexit iwl_pci_remove(str + + /* Hardware specific file defines the PCI IDs table for that hardware module */ + static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { +-#ifdef CONFIG_IWL4965 ++#ifdef CONFIG_COMPAT_IWL4965 + {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)}, + {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)}, +-#endif /* CONFIG_IWL4965 */ ++#endif /* CONFIG_COMPAT_IWL4965 */ + #ifdef CONFIG_IWL5000 + /* 5100 Series WiFi */ + {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */ diff --git a/patches/09-threaded-irq.patch b/patches/09-threaded-irq.patch new file mode 100644 index 0000000..0f36707 --- /dev/null +++ b/patches/09-threaded-irq.patch @@ -0,0 +1,63 @@ +The 2.6.31 kernel has threaded IRQ support and b43 is the first +wireless driver that makes use of it. To support threaded IRSs +on older kernels we built our own struct compat_threaded_irq +to queue_work() onto it as the kernel thread be running the +thread in process context as well. + +--- a/drivers/net/wireless/b43/main.c ++++ b/drivers/net/wireless/b43/main.c +@@ -3915,8 +3915,13 @@ redo: + if (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) { + b43_sdio_free_irq(dev); + } else { ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) ++ compat_synchronize_threaded_irq(&dev->irq_compat); ++ compat_free_threaded_irq(&dev->irq_compat); ++#else + synchronize_irq(dev->dev->irq); + free_irq(dev->dev->irq, dev); ++#endif + } + mutex_lock(&wl->mutex); + dev = wl->current_dev; +@@ -3956,9 +3961,17 @@ static int b43_wireless_core_start(struc + goto out; + } + } else { ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) ++ err = compat_request_threaded_irq(&dev->irq_compat, ++ dev->dev->irq, ++ b43_interrupt_handler, ++ b43_interrupt_thread_handler, ++ IRQF_SHARED, KBUILD_MODNAME, dev); ++#else + err = request_threaded_irq(dev->dev->irq, b43_interrupt_handler, + b43_interrupt_thread_handler, + IRQF_SHARED, KBUILD_MODNAME, dev); ++#endif + if (err) { + b43err(dev->wl, "Cannot request IRQ-%d\n", dev->dev->irq); + goto out; +@@ -4663,6 +4676,10 @@ static int b43_setup_bands(struct b43_wl + + static void b43_wireless_core_detach(struct b43_wldev *dev) + { ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) ++ if (dev->dev->bus->bustype != SSB_BUSTYPE_SDIO) ++ compat_destroy_threaded_irq(&dev->irq_compat); ++#endif + /* We release firmware that late to not be required to re-request + * is all the time when we reinit the core. */ + b43_release_firmware(dev); +--- a/drivers/net/wireless/b43/b43.h ++++ b/drivers/net/wireless/b43/b43.h +@@ -754,6 +754,9 @@ struct b43_wldev { + unsigned int tx_count; + unsigned int rx_count; + #endif ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) ++ struct compat_threaded_irq irq_compat; ++#endif + }; + + /* Data structure for the WLAN parts (802.11 cores) of the b43 chip. */ diff --git a/patches/10-add-wext-handlers-to-netdev.patch b/patches/10-add-wext-handlers-to-netdev.patch new file mode 100644 index 0000000..5c4ee95 --- /dev/null +++ b/patches/10-add-wext-handlers-to-netdev.patch @@ -0,0 +1,26 @@ +The patch "wext: refactor" by Johannes Berg refactored +wext code so that new kernels no longer get the wext +handlers through struct netdevice, instead they get +it through the struct wiphy which is cfg80211 specific. + +For old kernels this means you get not wext handlers +anymore when backporting code, this adds the wext handler +back to the netdevice wireless_handlers to let compat +users use wext again. + +We only do this for kernels <= 2.6.32 as 2.6.33 will use +the struct wiphy wireless handler. + +--- a/net/wireless/core.c ++++ b/net/wireless/core.c +@@ -701,6 +701,10 @@ static int cfg80211_netdev_notifier_call + wdev->sme_state = CFG80211_SME_IDLE; + mutex_unlock(&rdev->devlist_mtx); + #ifdef CONFIG_CFG80211_WEXT ++#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,32)) ++ if (!dev->wireless_handlers) ++ dev->wireless_handlers = &cfg80211_wext_handler; ++#endif + wdev->wext.default_key = -1; + wdev->wext.default_mgmt_key = -1; + wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; diff --git a/patches/11-dev-pm-ops.patch b/patches/11-dev-pm-ops.patch new file mode 100644 index 0000000..7b3500a --- /dev/null +++ b/patches/11-dev-pm-ops.patch @@ -0,0 +1,58 @@ +The 2.6.29 kernel has new struct dev_pm_ops [1] which are used +on the pci device to distinguish power management hooks for suspend +to RAM and hibernation. Older kernels don't have these so we need +to resort back to the good ol' suspend/resume. Fortunately the calls +are not so different so it should be possible to resuse the same +calls on compat code with only slight modifications. + +[1] http://lxr.linux.no/#linux+v2.6.29/include/linux/pm.h#L170 + +--- a/drivers/net/wireless/ath/ath5k/base.c ++++ b/drivers/net/wireless/ath/ath5k/base.c +@@ -197,6 +197,33 @@ static void __devexit ath5k_pci_remove(s + #ifdef CONFIG_PM + static int ath5k_pci_suspend(struct device *dev); + static int ath5k_pci_resume(struct device *dev); ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29)) ++static int ath5k_pci_suspend_compat(struct pci_dev *pdev, pm_message_t state) ++{ ++ int r; ++ ++ r = ath5k_pci_suspend(&pdev->dev); ++ if (r) ++ return r; ++ ++ pci_save_state(pdev); ++ pci_disable_device(pdev); ++ pci_set_power_state(pdev, PCI_D3hot); ++ return 0; ++} ++ ++static int ath5k_pci_resume_compat(struct pci_dev *pdev) ++{ ++ int r; ++ ++ pci_restore_state(pdev); ++ r = pci_enable_device(pdev); ++ if (r) ++ return r; ++ ++ return ath5k_pci_resume(&pdev->dev); ++} ++#endif + + SIMPLE_DEV_PM_OPS(ath5k_pm_ops, ath5k_pci_suspend, ath5k_pci_resume); + #define ATH5K_PM_OPS (&ath5k_pm_ops) +@@ -209,7 +236,12 @@ static struct pci_driver ath5k_pci_drive + .id_table = ath5k_pci_id_table, + .probe = ath5k_pci_probe, + .remove = __devexit_p(ath5k_pci_remove), ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + .driver.pm = ATH5K_PM_OPS, ++#elif defined(CONFIG_PM) ++ .suspend = ath5k_pci_suspend_compat, ++ .resume = ath5k_pci_resume_compat, ++#endif + }; + + diff --git a/patches/12-iw_handler-changes.patch b/patches/12-iw_handler-changes.patch new file mode 100644 index 0000000..ffbc2cb --- /dev/null +++ b/patches/12-iw_handler-changes.patch @@ -0,0 +1,14 @@ +--- a/drivers/net/wireless/ipw2x00/ipw2100.c ++++ b/drivers/net/wireless/ipw2x00/ipw2100.c +@@ -6140,7 +6140,11 @@ static struct net_device *ipw2100_alloc_ + + dev->ethtool_ops = &ipw2100_ethtool_ops; + dev->wireless_handlers = &ipw2100_wx_handler_def; ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,31)) + priv->wireless_data.libipw = priv->ieee; ++#else ++ priv->wireless_data.ieee80211 = (struct ieee80211_device *) priv->ieee; ++#endif + dev->wireless_data = &priv->wireless_data; + dev->watchdog_timeo = 3 * HZ; + dev->irq = 0; diff --git a/patches/13-trace.patch b/patches/13-trace.patch new file mode 100644 index 0000000..8c000f3 --- /dev/null +++ b/patches/13-trace.patch @@ -0,0 +1,44 @@ +Older kernels do not have trace/define_trace.h [1] and if there +is something there that we can backport we will define it +in or compat headers. + +[1] http://lxr.linux.no/#linux+v2.6.31/include/trace/define_trace.h + +--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h ++++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h +@@ -27,7 +27,9 @@ + #if !defined(__IWLWIFI_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ) + #define __IWLWIFI_DEVICE_TRACE + ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) + #include ++#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) */ + #include "iwl-dev.h" + + #if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__) +@@ -264,4 +266,6 @@ TRACE_EVENT(iwlwifi_dev_ucode_event, + #define TRACE_INCLUDE_PATH . + #undef TRACE_INCLUDE_FILE + #define TRACE_INCLUDE_FILE iwl-devtrace ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,30)) + #include ++#endif +--- a/net/mac80211/driver-trace.h ++++ b/net/mac80211/driver-trace.h +@@ -1,7 +1,9 @@ + #if !defined(__MAC80211_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ) + #define __MAC80211_DRIVER_TRACE + ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) + #include ++#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) */ + #include + #include "ieee80211_i.h" + +@@ -772,4 +774,6 @@ TRACE_EVENT(drv_flush, + #define TRACE_INCLUDE_PATH . + #undef TRACE_INCLUDE_FILE + #define TRACE_INCLUDE_FILE driver-trace ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,30)) + #include ++#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,30)) */ diff --git a/patches/14-device-type.patch b/patches/14-device-type.patch new file mode 100644 index 0000000..5b6c126 --- /dev/null +++ b/patches/14-device-type.patch @@ -0,0 +1,56 @@ +Kernels >= 2.6.32 can identify the type of device netdevice +so that sysfs can be used to get this. We never really had a +systematic way of doing this -- now we do through the +SET_NETDEV_DEVTYPE() macro. For older kernels we make the +SET_NETDEV_DEVTYPE() be a no-op this means the wireless type +we define is unused so we ucomment it simply to avoid a +compile warning. + +--- a/net/wireless/core.c ++++ b/net/wireless/core.c +@@ -648,9 +648,11 @@ static void wdev_cleanup_work(struct wor + dev_put(wdev->netdev); + } + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) + static struct device_type wiphy_type = { + .name = "wlan", + }; ++#endif + + static int cfg80211_netdev_notifier_call(struct notifier_block * nb, + unsigned long state, +--- a/net/bluetooth/bnep/core.c ++++ b/net/bluetooth/bnep/core.c +@@ -536,9 +536,11 @@ static struct device *bnep_get_device(st + return conn ? &conn->dev : NULL; + } + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) + static struct device_type bnep_type = { + .name = "bluetooth", + }; ++#endif + + int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock) + { +--- a/drivers/net/usb/usbnet.c ++++ b/drivers/net/usb/usbnet.c +@@ -1252,13 +1252,17 @@ static const struct net_device_ops usbne + + // precondition: never called in_interrupt + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) + static struct device_type wlan_type = { + .name = "wlan", + }; ++#endif + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) + static struct device_type wwan_type = { + .name = "wwan", + }; ++#endif + + int + usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) diff --git a/patches/15-symbol-export-conflicts.patch b/patches/15-symbol-export-conflicts.patch new file mode 100644 index 0000000..798a089 --- /dev/null +++ b/patches/15-symbol-export-conflicts.patch @@ -0,0 +1,18 @@ +In kernel < 2.6.32 libipw also exports ieee80211_rx. +To avoid conflicts with the other export we rename our. + +--- a/net/mac80211/rx.c ++++ b/net/mac80211/rx.c +@@ -2565,7 +2565,12 @@ void ieee80211_rx(struct ieee80211_hw *h + drop: + kfree_skb(skb); + } ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) + EXPORT_SYMBOL(ieee80211_rx); ++#else ++EXPORT_SYMBOL(mac80211_ieee80211_rx); ++#endif ++ + + /* This is a version of the rx handler that can be called from hard irq + * context. Post the skb on the queue and schedule the tasklet */ diff --git a/patches/16-bluetooth.patch b/patches/16-bluetooth.patch new file mode 100644 index 0000000..c0353e9 --- /dev/null +++ b/patches/16-bluetooth.patch @@ -0,0 +1,609 @@ +These changes are required to backport blueooth. A lot can be optimized +here still, but for now we keep this here. + +--- a/drivers/bluetooth/hci_ldisc.c ++++ b/drivers/bluetooth/hci_ldisc.c +@@ -277,8 +277,13 @@ static int hci_uart_tty_open(struct tty_ + /* FIXME: why is this needed. Note don't use ldisc_ref here as the + open path is before the ldisc is referencable */ + ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,30)) + if (tty->ldisc->ops->flush_buffer) + tty->ldisc->ops->flush_buffer(tty); ++#else ++ if (tty->ldisc.ops->flush_buffer) ++ tty->ldisc.ops->flush_buffer(tty); ++#endif + tty_driver_flush_buffer(tty); + + return 0; +@@ -478,7 +483,11 @@ static int hci_uart_tty_ioctl(struct tty + return -EUNATCH; + + default: ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) + err = n_tty_ioctl_helper(tty, file, cmd, arg); ++#else ++ err = n_tty_ioctl(tty, file, cmd, arg); ++#endif + break; + }; + +--- a/net/bluetooth/af_bluetooth.c ++++ b/net/bluetooth/af_bluetooth.c +@@ -126,8 +126,12 @@ int bt_sock_unregister(int proto) + } + EXPORT_SYMBOL(bt_sock_unregister); + ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32)) + static int bt_sock_create(struct net *net, struct socket *sock, int proto, + int kern) ++#else ++static int bt_sock_create(struct net *net, struct socket *sock, int proto) ++#endif + { + int err; + +@@ -145,7 +149,11 @@ static int bt_sock_create(struct net *ne + read_lock(&bt_proto_lock); + + if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) { ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32)) + err = bt_proto[proto]->create(net, sock, proto, kern); ++#else ++ err = bt_proto[proto]->create(net, sock, proto); ++#endif + bt_sock_reclassify_lock(sock, proto); + module_put(bt_proto[proto]->owner); + } +@@ -258,7 +266,11 @@ int bt_sock_recvmsg(struct kiocb *iocb, + skb_reset_transport_header(skb); + err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + if (err == 0) ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32)) + sock_recv_ts_and_drops(msg, sk, skb); ++#else ++ sock_recv_timestamp(msg, sk, skb); ++#endif + + skb_free_datagram(sk, skb); + +@@ -338,7 +350,11 @@ int bt_sock_ioctl(struct socket *sock, u + if (sk->sk_state == BT_LISTEN) + return -EINVAL; + ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,31)) + amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); ++#else ++ amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); ++#endif + if (amount < 0) + amount = 0; + err = put_user(amount, (int __user *) arg); +--- a/net/bluetooth/cmtp/capi.c ++++ b/net/bluetooth/cmtp/capi.c +@@ -383,7 +383,11 @@ static void cmtp_reset_ctr(struct capi_c + + BT_DBG("ctrl %p", ctrl); + ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,30)) + capi_ctr_down(ctrl); ++#else ++ capi_ctr_reseted(ctrl); ++#endif + + atomic_inc(&session->terminate); + cmtp_schedule(session); +--- a/net/bluetooth/hci_core.c ++++ b/net/bluetooth/hci_core.c +@@ -39,7 +39,12 @@ + #include + #include + #include ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)) + #include ++#else ++#include ++#endif ++ + #include + + #include +--- a/net/bluetooth/hci_sock.c ++++ b/net/bluetooth/hci_sock.c +@@ -471,7 +471,11 @@ drop: + goto done; + } + ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,31)) + static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int len) ++#else ++static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int len) ++#endif + { + struct hci_ufilter uf = { .opcode = 0 }; + struct sock *sk = sock->sk; +@@ -626,8 +630,12 @@ static struct proto hci_sk_proto = { + .obj_size = sizeof(struct hci_pinfo) + }; + ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32)) + static int hci_sock_create(struct net *net, struct socket *sock, int protocol, + int kern) ++#else ++static int hci_sock_create(struct net *net, struct socket *sock, int protocol) ++#endif + { + struct sock *sk; + +--- a/net/bluetooth/hci_sysfs.c ++++ b/net/bluetooth/hci_sysfs.c +@@ -68,7 +68,11 @@ static struct attribute_group bt_link_gr + .attrs = bt_link_attrs, + }; + ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,31)) + static const struct attribute_group *bt_link_groups[] = { ++#else ++static struct attribute_group *bt_link_groups[] = { ++#endif + &bt_link_group, + NULL + }; +@@ -126,7 +130,11 @@ static void del_conn(struct work_struct + dev = device_find_child(&conn->dev, NULL, __match_tty); + if (!dev) + break; ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,29)) + device_move(dev, NULL, DPM_ORDER_DEV_LAST); ++#else ++ device_move(dev, NULL); ++#endif + put_device(dev); + } + +@@ -392,7 +400,11 @@ static struct attribute_group bt_host_gr + .attrs = bt_host_attrs, + }; + ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,31)) + static const struct attribute_group *bt_host_groups[] = { ++#else ++static struct attribute_group *bt_host_groups[] = { ++#endif + &bt_host_group, + NULL + }; +--- a/net/bluetooth/hidp/core.c ++++ b/net/bluetooth/hidp/core.c +@@ -313,6 +313,7 @@ static int hidp_send_report(struct hidp_ + return hidp_queue_report(session, buf, rsize); + } + ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) + static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count, + unsigned char report_type) + { +@@ -332,6 +333,7 @@ static int hidp_output_raw_report(struct + return -ENOMEM; + return count; + } ++#endif + + static void hidp_idle_timeout(unsigned long arg) + { +@@ -596,10 +598,16 @@ static int hidp_session(void *arg) + session->input = NULL; + } + +- if (session->hid) { +- hid_destroy_device(session->hid); +- session->hid = NULL; +- } ++ if (session->hid) { ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) ++ hid_destroy_device(session->hid); ++ session->hid = NULL; ++#else ++ if (session->hid->claimed & HID_CLAIMED_INPUT) ++ hidinput_disconnect(session->hid); ++ hid_free_device(session->hid); ++#endif ++ } + + /* Wakeup user-space polling for socket errors */ + session->intr_sock->sk->sk_err = EUNATCH; +@@ -711,6 +719,70 @@ static void hidp_close(struct hid_device + { + } + ++#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27)) ++static const struct { ++ __u16 idVendor; ++ __u16 idProduct; ++ unsigned quirks; ++} hidp_blacklist[] = { ++ /* Apple wireless Mighty Mouse */ ++ { 0x05ac, 0x030c, HID_QUIRK_MIGHTYMOUSE | HID_QUIRK_INVERT_HWHEEL }, ++ ++ { } /* Terminating entry */ ++}; ++static void hidp_setup_quirks(struct hid_device *hid) ++{ ++ unsigned int n; ++ ++ for (n = 0; hidp_blacklist[n].idVendor; n++) ++ if (hidp_blacklist[n].idVendor == le16_to_cpu(hid->vendor) && ++ hidp_blacklist[n].idProduct == le16_to_cpu(hid->product)) ++ hid->quirks = hidp_blacklist[n].quirks; ++} ++ ++static void hidp_setup_hid(struct hidp_session *session, ++ struct hidp_connadd_req *req) ++{ ++ struct hid_device *hid = session->hid; ++ struct hid_report *report; ++ bdaddr_t src, dst; ++ ++ session->hid = hid; ++ ++ hid->driver_data = session; ++ ++ baswap(&src, &bt_sk(session->ctrl_sock->sk)->src); ++ baswap(&dst, &bt_sk(session->ctrl_sock->sk)->dst); ++ ++ hid->bus = BUS_BLUETOOTH; ++ hid->vendor = req->vendor; ++ hid->product = req->product; ++ hid->version = req->version; ++ hid->country = req->country; ++ ++ strncpy(hid->name, req->name, 128); ++ strncpy(hid->phys, batostr(&src), 64); ++ strncpy(hid->uniq, batostr(&dst), 64); ++ ++ hid->dev = hidp_get_device(session); ++ hid->hid_open = hidp_open; ++ hid->hid_close = hidp_close; ++ ++ hid->hidinput_input_event = hidp_hidinput_event; ++ ++ hidp_setup_quirks(hid); ++ ++ list_for_each_entry(report, &hid->report_enum[HID_INPUT_REPORT].report_list, list) ++ hidp_send_report(session, report); ++ ++ list_for_each_entry(report, &hid->report_enum[HID_FEATURE_REPORT].report_list, list) ++ hidp_send_report(session, report); ++ ++ if (hidinput_connect(hid) == 0) ++ hid->claimed |= HID_CLAIMED_INPUT; ++} ++#else ++ + static int hidp_parse(struct hid_device *hid) + { + struct hidp_session *session = hid->driver_data; +@@ -815,6 +887,7 @@ fault: + + return err; + } ++#endif + + int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock) + { +@@ -833,6 +906,39 @@ int hidp_add_connection(struct hidp_conn + + BT_DBG("rd_data %p rd_size %d", req->rd_data, req->rd_size); + ++#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27)) ++ if (req->rd_size > 0) { ++ unsigned char *buf = kmalloc(req->rd_size, GFP_KERNEL); ++ ++ if (!buf) { ++ kfree(session); ++ return -ENOMEM; ++ } ++ ++ if (copy_from_user(buf, req->rd_data, req->rd_size)) { ++ kfree(buf); ++ kfree(session); ++ return -EFAULT; ++ } ++ ++ session->hid = hid_parse_report(buf, req->rd_size); ++ ++ kfree(buf); ++ ++ if (!session->hid) { ++ kfree(session); ++ return -EINVAL; ++ } ++ } ++ ++ if (!session->hid) { ++ session->input = input_allocate_device(); ++ if (!session->input) { ++ kfree(session); ++ return -ENOMEM; ++ } ++ } ++#endif + down_write(&hidp_session_sem); + + s = __hidp_get_session(&bt_sk(ctrl_sock->sk)->dst); +@@ -860,6 +966,7 @@ int hidp_add_connection(struct hidp_conn + session->flags = req->flags & (1 << HIDP_BLUETOOTH_VENDOR_ID); + session->idle_to = req->idle_to; + ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) + if (req->rd_size > 0) { + err = hidp_setup_hid(session, req); + if (err && err != -ENODEV) +@@ -871,6 +978,16 @@ int hidp_add_connection(struct hidp_conn + if (err < 0) + goto purge; + } ++#else ++ if (session->input) { ++ err = hidp_setup_input(session, req); ++ if (err < 0) ++ goto failed; ++ } ++ ++ if (session->hid) ++ hidp_setup_hid(session, req); ++#endif + + __hidp_link_session(session); + +@@ -902,6 +1019,7 @@ unlink: + session->input = NULL; + } + ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) + if (session->hid) { + hid_destroy_device(session->hid); + session->hid = NULL; +@@ -913,10 +1031,15 @@ unlink: + purge: + skb_queue_purge(&session->ctrl_transmit); + skb_queue_purge(&session->intr_transmit); ++#endif + + failed: + up_write(&hidp_session_sem); + ++#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27)) ++ if (session->hid) ++ hid_free_device(session->hid); ++#endif + input_free_device(session->input); + kfree(session); + return err; +@@ -1006,6 +1129,7 @@ int hidp_get_conninfo(struct hidp_connin + return err; + } + ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) + static const struct hid_device_id hidp_table[] = { + { HID_BLUETOOTH_DEVICE(HID_ANY_ID, HID_ANY_ID) }, + { } +@@ -1015,6 +1139,7 @@ static struct hid_driver hidp_driver = { + .name = "generic-bluetooth", + .id_table = hidp_table, + }; ++#endif + + static int __init hidp_init(void) + { +@@ -1024,11 +1149,14 @@ static int __init hidp_init(void) + + BT_INFO("HIDP (Human Interface Emulation) ver %s", VERSION); + ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) + ret = hid_register_driver(&hidp_driver); + if (ret) + goto err; ++#endif + + ret = hidp_init_sockets(); ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) + if (ret) + goto err_drv; + +@@ -1036,13 +1164,16 @@ static int __init hidp_init(void) + err_drv: + hid_unregister_driver(&hidp_driver); + err: ++#endif + return ret; + } + + static void __exit hidp_exit(void) + { + hidp_cleanup_sockets(); ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) + hid_unregister_driver(&hidp_driver); ++#endif + } + + module_init(hidp_init); +--- a/net/bluetooth/rfcomm/sock.c ++++ b/net/bluetooth/rfcomm/sock.c +@@ -323,8 +323,13 @@ static struct sock *rfcomm_sock_alloc(st + return sk; + } + ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32)) + static int rfcomm_sock_create(struct net *net, struct socket *sock, + int protocol, int kern) ++#else ++static int rfcomm_sock_create(struct net *net, struct socket *sock, ++ int protocol) ++#endif + { + struct sock *sk; + +@@ -704,7 +709,11 @@ static int rfcomm_sock_recvmsg(struct ki + copied += chunk; + size -= chunk; + ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32)) + sock_recv_ts_and_drops(msg, sk, skb); ++#else ++ sock_recv_timestamp(msg, sk, skb); ++#endif + + if (!(flags & MSG_PEEK)) { + atomic_sub(chunk, &sk->sk_rmem_alloc); +@@ -767,7 +776,11 @@ static int rfcomm_sock_setsockopt_old(st + return err; + } + ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,31)) + static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) ++#else ++static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen) ++#endif + { + struct sock *sk = sock->sk; + struct bt_security sec; +--- a/net/bluetooth/rfcomm/tty.c ++++ b/net/bluetooth/rfcomm/tty.c +@@ -731,8 +731,12 @@ static int rfcomm_tty_open(struct tty_st + remove_wait_queue(&dev->wait, &wait); + + if (err == 0) ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,29)) + device_move(dev->tty_dev, rfcomm_get_device(dev), + DPM_ORDER_DEV_AFTER_PARENT); ++#else ++ device_move(dev->tty_dev, rfcomm_get_device(dev)); ++#endif + + rfcomm_tty_copy_pending(dev); + +@@ -752,7 +756,11 @@ static void rfcomm_tty_close(struct tty_ + + if (atomic_dec_and_test(&dev->opened)) { + if (dev->tty_dev->parent) ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,29)) + device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST); ++#else ++ device_move(dev->tty_dev, NULL); ++#endif + + /* Close DLC and dettach TTY */ + rfcomm_dlc_close(dev->dlc, 0); +--- a/net/bluetooth/sco.c ++++ b/net/bluetooth/sco.c +@@ -430,8 +430,12 @@ static struct sock *sco_sock_alloc(struc + return sk; + } + ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32)) + static int sco_sock_create(struct net *net, struct socket *sock, int protocol, + int kern) ++#else ++static int sco_sock_create(struct net *net, struct socket *sock, int protocol) ++#endif + { + struct sock *sk; + +@@ -645,7 +649,11 @@ static int sco_sock_sendmsg(struct kiocb + return err; + } + ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,31)) + static int sco_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) ++#else ++static int sco_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen) ++#endif + { + struct sock *sk = sock->sk; + int err = 0; +--- a/net/bluetooth/bnep/sock.c ++++ b/net/bluetooth/bnep/sock.c +@@ -195,8 +195,12 @@ static struct proto bnep_proto = { + .obj_size = sizeof(struct bt_sock) + }; + ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32)) + static int bnep_sock_create(struct net *net, struct socket *sock, int protocol, + int kern) ++#else ++static int bnep_sock_create(struct net *net, struct socket *sock, int protocol) ++#endif + { + struct sock *sk; + +--- a/net/bluetooth/cmtp/sock.c ++++ b/net/bluetooth/cmtp/sock.c +@@ -190,8 +190,12 @@ static struct proto cmtp_proto = { + .obj_size = sizeof(struct bt_sock) + }; + ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32)) + static int cmtp_sock_create(struct net *net, struct socket *sock, int protocol, + int kern) ++#else ++static int cmtp_sock_create(struct net *net, struct socket *sock, int protocol) ++#endif + { + struct sock *sk; + +--- a/net/bluetooth/hidp/sock.c ++++ b/net/bluetooth/hidp/sock.c +@@ -241,8 +241,12 @@ static struct proto hidp_proto = { + .obj_size = sizeof(struct bt_sock) + }; + ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32)) + static int hidp_sock_create(struct net *net, struct socket *sock, int protocol, + int kern) ++#else ++static int hidp_sock_create(struct net *net, struct socket *sock, int protocol) ++#endif + { + struct sock *sk; + +--- a/net/bluetooth/l2cap.c ++++ b/net/bluetooth/l2cap.c +@@ -822,8 +822,12 @@ static struct sock *l2cap_sock_alloc(str + return sk; + } + ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32)) + static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol, + int kern) ++#else ++static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol) ++#endif + { + struct sock *sk; + +@@ -835,7 +839,11 @@ static int l2cap_sock_create(struct net + sock->type != SOCK_DGRAM && sock->type != SOCK_RAW) + return -ESOCKTNOSUPPORT; + ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32)) + if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW)) ++#else ++ if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW)) ++#endif + return -EPERM; + + sock->ops = &l2cap_sock_ops; +@@ -1764,7 +1772,11 @@ static int l2cap_sock_setsockopt_old(str + return err; + } + ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,31)) + static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) ++#else ++static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen) ++#endif + { + struct sock *sk = sock->sk; + struct bt_security sec; diff --git a/patches/17-netdev-queue.patch b/patches/17-netdev-queue.patch new file mode 100644 index 0000000..34224e5 --- /dev/null +++ b/patches/17-netdev-queue.patch @@ -0,0 +1,47 @@ +This patch addresses changes made by usage of new symbols +like unregister_netdevice_queue() which are not possible to backport +due to their reliance on internal symbols on net/core/dev.c + +The patch that introduced this on mac80211 was: + + mac80211: Speedup ieee80211_remove_interfaces() + + Speedup ieee80211_remove_interfaces() by factorizing synchronize_rcu() calls + + Signed-off-by: Eric Dumazet + Reviewed-by: Johannes Berg + Signed-off-by: John W. Linville + +--- a/net/mac80211/iface.c ++++ b/net/mac80211/iface.c +@@ -954,6 +954,7 @@ void ieee80211_if_remove(struct ieee8021 + * Remove all interfaces, may only be called at hardware unregistration + * time because it doesn't do RCU-safe list removals. + */ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)) + void ieee80211_remove_interfaces(struct ieee80211_local *local) + { + struct ieee80211_sub_if_data *sdata, *tmp; +@@ -970,6 +971,22 @@ void ieee80211_remove_interfaces(struct + mutex_unlock(&local->iflist_mtx); + unregister_netdevice_many(&unreg_list); + } ++#else ++void ieee80211_remove_interfaces(struct ieee80211_local *local) ++{ ++ struct ieee80211_sub_if_data *sdata, *tmp; ++ ++ ASSERT_RTNL(); ++ ++ list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { ++ mutex_lock(&local->iflist_mtx); ++ list_del(&sdata->list); ++ mutex_unlock(&local->iflist_mtx); ++ ++ unregister_netdevice(sdata->dev); ++ } ++} ++#endif + + static u32 ieee80211_idle_off(struct ieee80211_local *local, + const char *reason) diff --git a/patches/18-rename-usb-net-symbols.patch b/patches/18-rename-usb-net-symbols.patch new file mode 100644 index 0000000..5ac90e2 --- /dev/null +++ b/patches/18-rename-usb-net-symbols.patch @@ -0,0 +1,52 @@ +Rename config names for usbnet to deactivate them also if activated in +the main kernel configuration. This is needed because +usb_autopm_put_interface_async and usb_autopm_get_interface_async are +not backported to kernel 2.6.28 and earlier. +Remove this patch if these symbols are backported. + +--- a/drivers/net/usb/Makefile ++++ b/drivers/net/usb/Makefile +@@ -8,18 +8,18 @@ obj-$(CONFIG_USB_PEGASUS) += pegasus.o + obj-$(CONFIG_USB_RTL8150) += rtl8150.o + obj-$(CONFIG_USB_HSO) += hso.o + obj-$(CONFIG_USB_NET_AX8817X) += asix.o +-obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o ++obj-$(CONFIG_USB_NET_COMPAT_CDCETHER) += cdc_ether.o + obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o + obj-$(CONFIG_USB_NET_DM9601) += dm9601.o + obj-$(CONFIG_USB_NET_SMSC95XX) += smsc95xx.o + obj-$(CONFIG_USB_NET_GL620A) += gl620a.o + obj-$(CONFIG_USB_NET_NET1080) += net1080.o + obj-$(CONFIG_USB_NET_PLUSB) += plusb.o +-obj-$(CONFIG_USB_NET_RNDIS_HOST) += rndis_host.o ++obj-$(CONFIG_USB_NET_COMPAT_RNDIS_HOST) += rndis_host.o + obj-$(CONFIG_USB_NET_CDC_SUBSET) += cdc_subset.o + obj-$(CONFIG_USB_NET_ZAURUS) += zaurus.o + obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o +-obj-$(CONFIG_USB_USBNET) += usbnet.o ++obj-$(CONFIG_USB_COMPAT_USBNET) += usbnet.o + obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o + obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o + +--- a/drivers/net/usb/cdc_ether.c ++++ b/drivers/net/usb/cdc_ether.c +@@ -33,7 +33,7 @@ + #include + + +-#if defined(CONFIG_USB_NET_RNDIS_HOST) || defined(CONFIG_USB_NET_RNDIS_HOST_MODULE) ++#if defined(CONFIG_USB_NET_COMPAT_RNDIS_HOST) || defined(CONFIG_USB_NET_COMPAT_RNDIS_HOST_MODULE) + + static int is_rndis(struct usb_interface_descriptor *desc) + { +--- a/drivers/net/wireless/Makefile ++++ b/drivers/net/wireless/Makefile +@@ -29,7 +29,7 @@ obj-$(CONFIG_RTL8187) += rtl818x/ + obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o + obj-$(CONFIG_PCMCIA_WL3501) += wl3501_cs.o + +-obj-$(CONFIG_USB_NET_RNDIS_WLAN) += rndis_wlan.o ++obj-$(CONFIG_USB_NET_COMPAT_RNDIS_WLAN) += rndis_wlan.o + + obj-$(CONFIG_USB_ZD1201) += zd1201.o + obj-$(CONFIG_LIBERTAS) += libertas/ diff --git a/patches/19-kfifo.patch b/patches/19-kfifo.patch new file mode 100644 index 0000000..ea3821c --- /dev/null +++ b/patches/19-kfifo.patch @@ -0,0 +1,47 @@ +These parts of the new generic kernel FIFO implementation (kfifo) can +not be backported easily with defines in the compat module. + +--- a/drivers/net/wireless/libertas/dev.h ++++ b/drivers/net/wireless/libertas/dev.h +@@ -118,7 +118,11 @@ struct lbs_private { + u32 resp_len[2]; + + /* Events sent from hardware to driver */ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)) + struct kfifo event_fifo; ++#else ++ struct kfifo *event_fifo; ++#endif + + /** thread to service interrupts */ + struct task_struct *main_thread; +--- a/drivers/net/wireless/libertas/main.c ++++ b/drivers/net/wireless/libertas/main.c +@@ -855,8 +855,14 @@ static int lbs_init_adapter(struct lbs_p + priv->resp_len[0] = priv->resp_len[1] = 0; + + /* Create the event FIFO */ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)) + ret = kfifo_alloc(&priv->event_fifo, sizeof(u32) * 16, GFP_KERNEL); + if (ret) { ++#else ++ priv->event_fifo = kfifo_alloc(sizeof(u32) * 16, GFP_KERNEL, NULL); ++ if (IS_ERR(priv->event_fifo)) { ++ ret = -ENOMEM; ++#endif + lbs_pr_err("Out of memory allocating event FIFO buffer\n"); + goto out; + } +@@ -872,7 +878,12 @@ static void lbs_free_adapter(struct lbs_ + lbs_deb_enter(LBS_DEB_MAIN); + + lbs_free_cmd_buffer(priv); ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)) + kfifo_free(&priv->event_fifo); ++#else ++ if (priv->event_fifo) ++ kfifo_free(priv->event_fifo); ++#endif + del_timer(&priv->command_timer); + del_timer(&priv->auto_deepsleep_timer); + kfree(priv->networks); diff --git a/patches/20-pcidev.patch b/patches/20-pcidev.patch new file mode 100644 index 0000000..cb7f1ca --- /dev/null +++ b/patches/20-pcidev.patch @@ -0,0 +1,18 @@ +Older kernels than 2.6.24 do not have the is_pcie member as part of +the struct pci_dev, to help with that we use the compat-2.6.24.c supplied +compat_is_pcie() when needed. + +--- a/drivers/net/wireless/ath/ath9k/pci.c ++++ b/drivers/net/wireless/ath/ath9k/pci.c +@@ -79,7 +79,11 @@ static void ath_pci_bt_coex_prep(struct + struct pci_dev *pdev = to_pci_dev(sc->dev); + u8 aspm; + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) + if (!pdev->is_pcie) ++#else ++ if (!compat_is_pcie(pdev)) ++#endif + return; + + pci_read_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, &aspm); diff --git a/patches/21-capi-proc_fops.patch b/patches/21-capi-proc_fops.patch new file mode 100644 index 0000000..2f2ca7d --- /dev/null +++ b/patches/21-capi-proc_fops.patch @@ -0,0 +1,73 @@ +Backport kernel patch 9a58a80a701bdb2d220cdab4914218df5b48d781 +proc_fops: convert drivers/isdn/ to seq_file + +--- a/net/bluetooth/cmtp/capi.c ++++ b/net/bluetooth/cmtp/capi.c +@@ -21,8 +21,10 @@ + */ + + #include ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34)) + #include + #include ++#endif + #include + #include + #include +@@ -521,6 +523,7 @@ static char *cmtp_procinfo(struct capi_c + return "CAPI Message Transport Protocol"; + } + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34)) + static int cmtp_proc_show(struct seq_file *m, void *v) + { + struct capi_ctr *ctrl = m->private; +@@ -553,6 +556,36 @@ static const struct file_operations cmtp + .release = single_release, + }; + ++#else ++ ++static int cmtp_ctr_read_proc(char *page, char **start, off_t off, int count, int *eof, struct capi_ctr *ctrl) ++{ ++ struct cmtp_session *session = ctrl->driverdata; ++ struct cmtp_application *app; ++ struct list_head *p, *n; ++ int len = 0; ++ ++ len += sprintf(page + len, "%s\n\n", cmtp_procinfo(ctrl)); ++ len += sprintf(page + len, "addr %s\n", session->name); ++ len += sprintf(page + len, "ctrl %d\n", session->num); ++ ++ list_for_each_safe(p, n, &session->applications) { ++ app = list_entry(p, struct cmtp_application, list); ++ len += sprintf(page + len, "appl %d -> %d\n", app->appl, app->mapping); ++ } ++ ++ if (off + count >= len) ++ *eof = 1; ++ ++ if (len < off) ++ return 0; ++ ++ *start = page + off; ++ ++ return ((count < len - off) ? count : len - off); ++} ++#endif ++ + int cmtp_attach_device(struct cmtp_session *session) + { + unsigned char buf[4]; +@@ -591,7 +624,11 @@ int cmtp_attach_device(struct cmtp_sessi + session->ctrl.send_message = cmtp_send_message; + + session->ctrl.procinfo = cmtp_procinfo; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34)) + session->ctrl.proc_fops = &cmtp_proc_fops; ++#else ++ session->ctrl.ctr_read_proc = cmtp_ctr_read_proc; ++#endif + + if (attach_capi_ctr(&session->ctrl) < 0) { + BT_ERR("Can't attach new controller"); diff --git a/patches/22-multiqueue.patch b/patches/22-multiqueue.patch new file mode 100644 index 0000000..421c32f --- /dev/null +++ b/patches/22-multiqueue.patch @@ -0,0 +1,161 @@ +Backport multiqueue support for kernels < 2.6.27 + +The 2.6.23 kernel added some initial multiqueue support. +That release relied on the on the notion of struct +net_device_subqueue attached to the netdevice struct +as an array. The 2.6.27 renamed these to struct netdev_queue, +and enhanced MQ support by providing locks separately onto +each queue. MQ support on 2.6.27 also extended each netdev +to be able to assign a select_queue callback to be used by +core networking for prior to pushing the skb out to the device +driver so that queue selection can be dealt with and +customized internally on the driver. + +For kernels 2.6.23..2.6.26 then we backport MQ support by +using the equivalent calls on the struct netdev_queue to +the struct net_device_subqueue. The performance penalty +here is just that all these queues share a common lock +so stateful operations on one queue would imply a delay +on other queues. + +For older kernels than 2.6.23 we can only stop all the +queues then and wake them up only if no other queue had +been stopped previously. This means for kernels older +than 2.6.23 there is a performance penalty and congestion +on one queue would imply propagating the same congestion +impact on all the other queues. + +The select_queue callback was only added as of 2.6.27 via +commit eae792b7 so for kernels older than 2.6.23 and up +to 2.6.27 we must ensure we do the selection of the queue +once the core networking calls mac80211's dev_hard_start_xmit() +(ndo_start_xmit() callback on newer kernels). + +This patch then consists of three parts: + + 1) Addresses the lack of select_queue on older kernels than 2.6.27 + 2) Extends the backport of net_device_ops for select_queue for kernels >= 2.6.27 + 3) Backporting wake/stop queue for older kernels: + - Handle with net_device_subqueue for >= 2.6.23 + - Treat each queue operation as an aggregate for all queues + +Monitor interfaces have their own select_queue -- monitor interfaces +are used for injecting frames so they have their own respective queue +handling, but mac80211 just always sends management frames on VO +queue by using skb_set_queue_mapping(skb, 0) through ieee80211_tx_skb() + +--- a/net/mac80211/util.c ++++ b/net/mac80211/util.c +@@ -266,6 +266,18 @@ __le16 ieee80211_ctstoself_duration(stru + } + EXPORT_SYMBOL(ieee80211_ctstoself_duration); + ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) ++static bool ieee80211_all_queues_started(struct ieee80211_hw *hw) ++{ ++ unsigned int queue; ++ ++ for (queue = 0; queue < hw->queues; queue++) ++ if (ieee80211_queue_stopped(hw, queue)) ++ return false; ++ return true; ++} ++#endif ++ + static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue, + enum queue_stop_reason reason) + { +@@ -286,7 +298,14 @@ static void __ieee80211_wake_queue(struc + + rcu_read_lock(); + list_for_each_entry_rcu(sdata, &local->interfaces, list) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) + netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue)); ++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) ++ netif_start_subqueue(sdata->dev, queue); ++#else ++ if (ieee80211_all_queues_started(hw)) ++ netif_wake_queue(sdata->dev); ++#endif + rcu_read_unlock(); + } + +@@ -321,7 +340,13 @@ static void __ieee80211_stop_queue(struc + + rcu_read_lock(); + list_for_each_entry_rcu(sdata, &local->interfaces, list) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) + netif_tx_stop_queue(netdev_get_tx_queue(sdata->dev, queue)); ++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) ++ netif_stop_subqueue(sdata->dev, queue); ++#else ++ netif_stop_queue(sdata->dev); ++#endif + rcu_read_unlock(); + } + +--- a/net/mac80211/tx.c ++++ b/net/mac80211/tx.c +@@ -1564,6 +1564,10 @@ static void ieee80211_xmit(struct ieee80 + return; + } + ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)) ++ /* Older kernels do not have the select_queue callback */ ++ skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb)); ++#endif + ieee80211_set_qos_hdr(local, skb); + ieee80211_tx(sdata, skb, false); + rcu_read_unlock(); +--- a/net/mac80211/iface.c ++++ b/net/mac80211/iface.c +@@ -660,11 +660,13 @@ static void ieee80211_teardown_sdata(str + WARN_ON(flushed); + } + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) + static u16 ieee80211_netdev_select_queue(struct net_device *dev, + struct sk_buff *skb) + { + return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); + } ++#endif + + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + static const struct net_device_ops ieee80211_dataif_ops = { +@@ -679,6 +681,7 @@ static const struct net_device_ops ieee8 + }; + #endif + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) + static u16 ieee80211_monitor_select_queue(struct net_device *dev, + struct sk_buff *skb) + { +@@ -711,6 +714,7 @@ static u16 ieee80211_monitor_select_queu + + return ieee80211_downgrade_queue(local, skb); + } ++#endif + + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + static const struct net_device_ops ieee80211_monitorif_ops = { +@@ -735,7 +739,9 @@ static void ieee80211_if_setup(struct ne + dev->set_multicast_list = ieee80211_set_multicast_list; + dev->change_mtu = ieee80211_change_mtu; + dev->set_mac_address = ieee80211_change_mac; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) + dev->select_queue = ieee80211_netdev_select_queue; ++#endif + dev->open = ieee80211_open; + dev->stop = ieee80211_stop; + /* we will validate the address ourselves in ->open */ +@@ -787,7 +793,9 @@ static void ieee80211_setup_sdata(struct + sdata->dev->netdev_ops = &ieee80211_monitorif_ops; + #else + sdata->dev->hard_start_xmit = ieee80211_monitor_start_xmit; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) + sdata->dev->select_queue = ieee80211_monitor_select_queue; ++#endif + sdata->dev->set_mac_address = eth_mac_addr; + #endif + sdata->u.mntr_flags = MONITOR_FLAG_CONTROL | diff --git a/patches/98-add-compat-wireless.patch b/patches/98-add-compat-wireless.patch new file mode 100644 index 0000000..5e637ad --- /dev/null +++ b/patches/98-add-compat-wireless.patch @@ -0,0 +1,30 @@ + +This patch file includes the changes required to add +compat-wireless support. Besides all the patches in this +directory we have our own compat-2.6.*.[ch] files which +help backport more. These the respective compat-2.6.2x.c +files get compiled and enabled if you are on older kernels +to bring in support for new features when possible. + +Lastly, the cfg80211.h change is what allows us to +get all compat-2.6.*.h files included. Its our only +point of entry through compat code. It works because +all drivers we are supporting through there one way +or another depend on cfg80211. New wireless drivers +are either mac80211 drivers or cfg80211 drivers, and +all mac80211 drivesr are cfg80211 drivers. + +If your driver is not a cfg80211 driver you must +ensure you include compat.h then. I think we +added compat.h also for ssb, I forget. + +--- a/include/net/cfg80211.h ++++ b/include/net/cfg80211.h +@@ -23,6 +23,7 @@ + /* remove once we remove the wext stuff */ + #include + #include ++#include + + + /* diff --git a/patches/99-change-makefiles.patch b/patches/99-change-makefiles.patch new file mode 100644 index 0000000..2fe68e5 --- /dev/null +++ b/patches/99-change-makefiles.patch @@ -0,0 +1,90 @@ + +This patch removes all drivers we do not support +or do not want to support. This lets us build +only the wireless stuff. + +--- a/drivers/misc/eeprom/Makefile ++++ b/drivers/misc/eeprom/Makefile +@@ -1,5 +1 @@ +-obj-$(CONFIG_EEPROM_AT24) += at24.o +-obj-$(CONFIG_EEPROM_AT25) += at25.o +-obj-$(CONFIG_EEPROM_LEGACY) += eeprom.o +-obj-$(CONFIG_EEPROM_MAX6875) += max6875.o + obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o +--- a/drivers/net/usb/Makefile ++++ b/drivers/net/usb/Makefile +@@ -2,24 +2,7 @@ + # Makefile for USB Network drivers + # + +-obj-$(CONFIG_USB_CATC) += catc.o +-obj-$(CONFIG_USB_KAWETH) += kaweth.o +-obj-$(CONFIG_USB_PEGASUS) += pegasus.o +-obj-$(CONFIG_USB_RTL8150) += rtl8150.o +-obj-$(CONFIG_USB_HSO) += hso.o +-obj-$(CONFIG_USB_NET_AX8817X) += asix.o + obj-$(CONFIG_USB_NET_COMPAT_CDCETHER) += cdc_ether.o +-obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o +-obj-$(CONFIG_USB_NET_DM9601) += dm9601.o +-obj-$(CONFIG_USB_NET_SMSC95XX) += smsc95xx.o +-obj-$(CONFIG_USB_NET_GL620A) += gl620a.o +-obj-$(CONFIG_USB_NET_NET1080) += net1080.o +-obj-$(CONFIG_USB_NET_PLUSB) += plusb.o + obj-$(CONFIG_USB_NET_COMPAT_RNDIS_HOST) += rndis_host.o +-obj-$(CONFIG_USB_NET_CDC_SUBSET) += cdc_subset.o +-obj-$(CONFIG_USB_NET_ZAURUS) += zaurus.o +-obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o + obj-$(CONFIG_USB_COMPAT_USBNET) += usbnet.o +-obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o +-obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o + +--- a/drivers/net/wireless/Makefile ++++ b/drivers/net/wireless/Makefile +@@ -5,33 +5,16 @@ + obj-$(CONFIG_IPW2100) += ipw2x00/ + obj-$(CONFIG_IPW2200) += ipw2x00/ + +-obj-$(CONFIG_HERMES) += orinoco/ +- +-obj-$(CONFIG_AIRO) += airo.o +-obj-$(CONFIG_AIRO_CS) += airo_cs.o airo.o +- +-obj-$(CONFIG_ATMEL) += atmel.o +-obj-$(CONFIG_PCI_ATMEL) += atmel_pci.o +-obj-$(CONFIG_PCMCIA_ATMEL) += atmel_cs.o +- + obj-$(CONFIG_AT76C50X_USB) += at76c50x-usb.o + +-obj-$(CONFIG_PRISM54) += prism54/ +- +-obj-$(CONFIG_HOSTAP) += hostap/ + obj-$(CONFIG_B43) += b43/ + obj-$(CONFIG_B43LEGACY) += b43legacy/ + obj-$(CONFIG_ZD1211RW) += zd1211rw/ + obj-$(CONFIG_RTL8180) += rtl818x/ + obj-$(CONFIG_RTL8187) += rtl818x/ + +-# 16-bit wireless PCMCIA client drivers +-obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o +-obj-$(CONFIG_PCMCIA_WL3501) += wl3501_cs.o +- + obj-$(CONFIG_USB_NET_COMPAT_RNDIS_WLAN) += rndis_wlan.o + +-obj-$(CONFIG_USB_ZD1201) += zd1201.o + obj-$(CONFIG_LIBERTAS) += libertas/ + + obj-$(CONFIG_LIBERTAS_THINFIRM) += libertas_tf/ +--- a/net/wireless/Makefile ++++ b/net/wireless/Makefile +@@ -4,11 +4,6 @@ obj-$(CONFIG_LIB80211_CRYPT_WEP) += lib8 + obj-$(CONFIG_LIB80211_CRYPT_CCMP) += lib80211_crypt_ccmp.o + obj-$(CONFIG_LIB80211_CRYPT_TKIP) += lib80211_crypt_tkip.o + +-obj-$(CONFIG_WEXT_CORE) += wext-core.o +-obj-$(CONFIG_WEXT_PROC) += wext-proc.o +-obj-$(CONFIG_WEXT_SPY) += wext-spy.o +-obj-$(CONFIG_WEXT_PRIV) += wext-priv.o +- + cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o + cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o + cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o diff --git a/patches/README b/patches/README new file mode 100644 index 0000000..e048cb4 --- /dev/null +++ b/patches/README @@ -0,0 +1,16 @@ + +compat-wireless patches +======================= + +You must have a really good reason to be adding files +in this directory. Your reasoning should either match the +explanation already present on the top of each patch file +or you should add your own. + +We try to avoid having patch files because: + + * Its a pain in the ass to maintain them. + + * Most backport changes can be pulled off through + some macro magic or new files which implement + the new functionality on the old kernels. diff --git a/scripts/admin-clean.sh b/scripts/admin-clean.sh new file mode 100755 index 0000000..ae5f188 --- /dev/null +++ b/scripts/admin-clean.sh @@ -0,0 +1,13 @@ +#!/bin/bash +if [ -d net ] ; then + make clean +fi +rm -rf net drivers include +rm -rf drivers +rm -rf include +rm -f Module.symvers +rm -f git-describe +rm -f master-tag +rm -f compat-git-release +rm -f compat-release +echo "Cleaned wireless-bt-compat-2.6" diff --git a/scripts/admin-refresh.sh b/scripts/admin-refresh.sh new file mode 100755 index 0000000..5848b1e --- /dev/null +++ b/scripts/admin-refresh.sh @@ -0,0 +1,3 @@ +#!/bin/bash +./scripts/admin-clean.sh $@ +./scripts/admin-update.sh $@ diff --git a/scripts/admin-update.sh b/scripts/admin-update.sh new file mode 100755 index 0000000..c75af30 --- /dev/null +++ b/scripts/admin-update.sh @@ -0,0 +1,338 @@ +#!/bin/bash +# +# Copyright 2007, 2008 Luis R. Rodriguez +# +# Use this to update compat-wireless-2.6 to the latest +# wireless-testing.git tree you have. +# +# Usage: you should have the latest pull of wireless-2.6.git +# git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git +# We assume you have it on your ~/devel/wireless-testing/ directory. If you do, +# just run this script from the compat-wireless-2.6 directory. +# You can specify where your GIT_TREE is by doing: +# +# export GIT_TREE=/home/mcgrof/wireless-testing/ +# +# for example +# +GIT_URL="git://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git" +GIT_COMPAT_URL="git://git.kernel.org/pub/scm/linux/kernel/git/mcgrof/compat.git" + +INCLUDE_NET_BT="hci_core.h l2cap.h bluetooth.h rfcomm.h hci.h" +NET_BT_DIRS="bluetooth bluetooth/bnep bluetooth/cmtp bluetooth/rfcomm bluetooth/hidp" + +INCLUDE_LINUX="ieee80211.h nl80211.h wireless.h" +INCLUDE_LINUX="$INCLUDE_LINUX pci_ids.h eeprom_93cx6.h" +INCLUDE_LINUX="$INCLUDE_LINUX ath9k_platform.h" + +# For rndis_wext +INCLUDE_LINUX_USB="usbnet.h rndis_host.h" + +INCLUDE_LINUX_SPI="wl12xx.h libertas_spi.h" + +# The good new yummy stuff +INCLUDE_NET="cfg80211.h ieee80211_radiotap.h" +INCLUDE_NET="$INCLUDE_NET mac80211.h wext.h lib80211.h regulatory.h" + +# Pretty colors +GREEN="\033[01;32m" +YELLOW="\033[01;33m" +NORMAL="\033[00m" +BLUE="\033[34m" +RED="\033[31m" +PURPLE="\033[35m" +CYAN="\033[36m" +UNDERLINE="\033[02m" + +NET_DIRS="wireless mac80211 rfkill" +# User exported this variable +if [ -z $GIT_TREE ]; then + GIT_TREE="/home/$USER/linux-next/" + if [ ! -d $GIT_TREE ]; then + echo "Please tell me where your linux-next git tree is." + echo "You can do this by exporting its location as follows:" + echo + echo " export GIT_TREE=/home/$USER/linux-next/" + echo + echo "If you do not have one you can clone the repository:" + echo " git-clone $GIT_URL" + exit 1 + fi +else + echo "You said to use git tree at: $GIT_TREE for linux-next" +fi + +if [ -z $GIT_COMPAT_TREE ]; then + GIT_COMPAT_TREE="/home/$USER/compat/" + if [ ! -d $GIT_COMPAT_TREE ]; then + echo "Please tell me where your compat git tree is." + echo "You can do this by exporting its location as follows:" + echo + echo " export GIT_COMPAT_TREE=/home/$USER/compat/" + echo + echo "If you do not have one you can clone the repository:" + echo " git-clone $GIT_COMPAT_URL" + exit 1 + fi +else + echo "You said to use git tree at: $GIT_COMPAT_TREE for compat" +fi + +# Drivers that have their own directory +DRIVERS="drivers/net/wireless/ath" +DRIVERS="$DRIVERS drivers/net/wireless/ath/ar9170" +DRIVERS="$DRIVERS drivers/net/wireless/ath/ath5k" +DRIVERS="$DRIVERS drivers/net/wireless/ath/ath9k" +DRIVERS="$DRIVERS drivers/ssb" +DRIVERS="$DRIVERS drivers/net/wireless/b43" +DRIVERS="$DRIVERS drivers/net/wireless/b43legacy" +DRIVERS="$DRIVERS drivers/net/wireless/iwlwifi" +DRIVERS="$DRIVERS drivers/net/wireless/rt2x00" +DRIVERS="$DRIVERS drivers/net/wireless/zd1211rw" +DRIVERS="$DRIVERS drivers/net/wireless/libertas" +DRIVERS="$DRIVERS drivers/net/wireless/p54" +DRIVERS="$DRIVERS drivers/net/wireless/rtl818x" +DRIVERS="$DRIVERS drivers/net/wireless/libertas_tf" +DRIVERS="$DRIVERS drivers/net/wireless/ipw2x00" +DRIVERS="$DRIVERS drivers/net/wireless/wl12xx" +DRIVERS="$DRIVERS drivers/net/wireless/iwmc3200wifi" + +# Ethernet drivers +DRIVERS="$DRIVERS drivers/net/atl1c" +DRIVERS="$DRIVERS drivers/net/atl1e" +DRIVERS="$DRIVERS drivers/net/atlx" + +# Bluetooth drivers +DRIVERS_BT="drivers/bluetooth" + +# Drivers that belong the the wireless directory +DRIVER_FILES="adm8211.c adm8211.h" +DRIVER_FILES="$DRIVER_FILES rndis_wlan.c" +DRIVER_FILES="$DRIVER_FILES mac80211_hwsim.c" +DRIVER_FILES="$DRIVER_FILES at76c50x-usb.c at76c50x-usb.h" +DRIVER_FILES="$DRIVER_FILES mwl8k.c" + +mkdir -p include/linux/ include/net/ include/linux/usb \ + include/linux/unaligned \ + include/linux/spi \ + net/mac80211/ net/wireless/ \ + net/rfkill/ \ + drivers/ssb/ \ + drivers/net/usb/ \ + drivers/net/wireless/ +mkdir -p include/net/bluetooth/ + +# include/linux +DIR="include/linux" +for i in $INCLUDE_LINUX; do + echo "Copying $GIT_TREE/$DIR/$i" + cp "$GIT_TREE/$DIR/$i" $DIR/ +done + +cp -a $GIT_TREE/include/linux/ssb include/linux/ +cp -a $GIT_TREE/include/linux/rfkill.h include/linux/rfkill_backport.h + +# include/net +DIR="include/net" +for i in $INCLUDE_NET; do + echo "Copying $GIT_TREE/$DIR/$i" + cp "$GIT_TREE/$DIR/$i" $DIR/ +done + +DIR="include/net/bluetooth" +for i in $INCLUDE_NET_BT; do + echo "Copying $GIT_TREE/$DIR/$i" + cp $GIT_TREE/$DIR/$i $DIR/ +done + +DIR="include/linux/usb" +for i in $INCLUDE_LINUX_USB; do + echo "Copying $GIT_TREE/$DIR/$i" + cp $GIT_TREE/$DIR/$i $DIR/ +done + +DIR="include/linux/spi" +for i in $INCLUDE_LINUX_SPI; do + echo "Copying $GIT_TREE/$DIR/$i" + cp $GIT_TREE/$DIR/$i $DIR/ +done + +# net/wireless and net/mac80211 +for i in $NET_DIRS; do + echo "Copying $GIT_TREE/net/$i/*.[ch]" + cp $GIT_TREE/net/$i/*.[ch] net/$i/ + cp $GIT_TREE/net/$i/Makefile net/$i/ + rm -f net/$i/*.mod.c +done + +# net/bluetooth +for i in $NET_BT_DIRS; do + mkdir -p net/$i + echo "Copying $GIT_TREE/net/$i/*.[ch]" + cp $GIT_TREE/net/$i/*.[ch] net/$i/ + cp $GIT_TREE/net/$i/Makefile net/$i/ + rm -f net/$i/*.mod.c +done + +# Drivers in their own directory +for i in $DRIVERS; do + mkdir -p $i + echo "Copying $GIT_TREE/$i/*.[ch]" + cp $GIT_TREE/$i/*.[ch] $i/ + cp $GIT_TREE/$i/Makefile $i/ + rm -f $i/*.mod.c +done + +for i in $DRIVERS_BT; do + mkdir -p $i + echo "Copying $GIT_TREE/$i/*.[ch]" + cp $GIT_TREE/$i/*.[ch] $i/ + cp $GIT_TREE/$i/Makefile $i/ + rm -f $i/*.mod.c +done + +# For rndis_wlan, we need a new rndis_host.ko, cdc_ether.ko and usbnet.ko +RNDIS_REQS="Makefile rndis_host.c cdc_ether.c usbnet.c" +DIR="drivers/net/usb" +for i in $RNDIS_REQS; do + echo "Copying $GIT_TREE/$DIR/$i" + cp $GIT_TREE/$DIR/$i $DIR/ +done + +DIR="drivers/net" +echo > $DIR/Makefile +cp $GIT_TREE/$DIR/b44.[ch] $DIR +# Not yet +echo "obj-\$(CONFIG_B44) += b44.o" >> $DIR/Makefile +echo "obj-\$(CONFIG_ATL1) += atlx/" >> $DIR/Makefile +echo "obj-\$(CONFIG_ATL2) += atlx/" >> $DIR/Makefile +echo "obj-\$(CONFIG_ATL1E) += atl1e/" >> $DIR/Makefile +echo "obj-\$(CONFIG_ATL1C) += atl1c/" >> $DIR/Makefile + +# Misc +mkdir -p drivers/misc/eeprom/ +cp $GIT_TREE/drivers/misc/eeprom/eeprom_93cx6.c drivers/misc/eeprom/ +cp $GIT_TREE/drivers/misc/eeprom/Makefile drivers/misc/eeprom/ + +DIR="drivers/net/wireless" +# Drivers part of the wireless directory +for i in $DRIVER_FILES; do + cp $GIT_TREE/$DIR/$i $DIR/ +done + +# Top level wireless driver Makefile +cp $GIT_TREE/$DIR/Makefile $DIR + +# Compat stuff +COMPAT="compat" +mkdir -p $COMPAT +echo "Copying $GIT_COMPAT_TREE/ files..." +cp $GIT_COMPAT_TREE/compat/*.c $COMPAT/ +cp $GIT_COMPAT_TREE/compat/Makefile $COMPAT/ +cp -a $GIT_COMPAT_TREE/udev/ . +cp -a $GIT_COMPAT_TREE/scripts/ $COMPAT/ +cp -a $GIT_COMPAT_TREE/include/linux/* include/linux/ +rm -f $COMPAT/*.mod.c + +# Refresh patches using quilt +patchRefresh() { + if [ -d patches.orig ] ; then + rm -rf .pc patches/series + else + mkdir patches.orig + fi + + export QUILT_PATCHES=$1 + + mv -u $1/* patches.orig/ + + for i in patches.orig/*.patch; do + if [ ! -f "$i" ]; then + echo -e "${RED}No patches found in $1${NORMAL}" + break; + fi + echo -e "${GREEN}Refresh backport patch${NORMAL}: ${BLUE}$i${NORMAL}" + quilt import $i + quilt push -f + RET=$? + if [[ $RET -ne 0 ]]; then + echo -e "${RED}Refreshing $i failed${NORMAL}, update it" + echo -e "use ${CYAN}quilt edit [filename]${NORMAL} to apply the failed part manually" + echo -e "use ${CYAN}quilt refresh${NORMAL} after the files are corrected and rerun this script" + cp patches.orig/README $1/README + exit $RET + fi + QUILT_DIFF_OPTS="-p" quilt refresh -p ab --no-index --no-timestamp + done + quilt pop -a + + cp patches.orig/README $1/README + rm -rf patches.orig .pc $1/series +} + +if [[ "$1" = "refresh" ]]; then + patchRefresh patches + patchRefresh linux-next-cherry-picks +fi + +for dir in patches linux-next-cherry-picks; do + FOUND=$(find $dir/ -name \*.patch | wc -l) + if [ $FOUND -eq 0 ]; then + continue + fi + for i in $dir/*.patch; do + echo -e "${GREEN}Applying backport patch${NORMAL}: ${BLUE}$i${NORMAL}" + patch -p1 -N -t < $i + RET=$? + if [[ $RET -ne 0 ]]; then + echo -e "${RED}Patching $i failed${NORMAL}, update it" + exit $RET + fi + done +done + +DIR="$PWD" +cd $GIT_TREE +GIT_DESCRIBE=$(git describe) +echo -e "${GREEN}Updated${NORMAL} from local tree: ${BLUE}${GIT_TREE}${NORMAL}" +echo -e "Origin remote URL: ${CYAN}$(git config remote.origin.url)${NORMAL}" +cd $DIR +if [ -d ./.git ]; then + git describe > compat-release + + cd $GIT_TREE + TREE_NAME=$(git config remote.origin.url) + TREE_NAME=${TREE_NAME##*/} + + echo $TREE_NAME > $DIR/git-describe + echo $GIT_DESCRIBE >> $DIR/git-describe + + echo -e "git-describe for $TREE_NAME says: ${PURPLE}$GIT_DESCRIBE${NORMAL}" + + rm -f $DIR/master-tag + case $TREE_NAME in + "wireless-testing.git") # John's wireless-testing + MASTER_TAG=$(git tag -l| grep master | tail -1) + echo $MASTER_TAG > $DIR/master-tag + echo -e "This is a ${RED}bleeding edge${NORMAL} compat-wireless release based on: ${PURPLE}$MASTER_TAG${NORMAL}" + ;; + "linux-next.git") # The linux-next integration testing tree + MASTER_TAG=$(git tag -l| grep next | tail -1) + echo $MASTER_TAG > $DIR/master-tag + echo -e "This is a ${RED}bleeding edge${NORMAL} compat-wireless release based on: ${PURPLE}$MASTER_TAG${NORMAL}" + ;; + "linux-2.6-allstable.git") # HPA's all stable tree + echo -e "This is a ${GREEN}stable${NORMAL} compat-wireless release based on: ${PURPLE}$(git describe --abbrev=0)${NORMAL}" + ;; + "linux-2.6.git") # Linus' 2.6 tree + ;; + *) + ;; + esac + + cd $DIR +fi + +./scripts/driver-select restore + +echo -e "This is compat-release: ${YELLOW}$(cat compat-release)${NORMAL}" diff --git a/scripts/athenable b/scripts/athenable new file mode 100755 index 0000000..c9b67a0 --- /dev/null +++ b/scripts/athenable @@ -0,0 +1,46 @@ +#!/bin/bash +# +# Copyright 2007 Luis R. Rodriguez +# +# Makes sure either ath5k or MadWifi are ready to be used. This allows +# us to choose any driver without blacklisting each other. + +. /usr/lib/compat-wireless/modlib.sh + +if [[ $UID -ne 0 ]]; then + echo "Run with root privileges" + exit +fi + +ATH5K="ath5k" +ATH9K="ath9k" +MADWIFI="ath_pci" +# Appended to module file at the end when we want to ignore one +IGNORE_SUFFIX=".ignore" +USAGE="Usage: $0 [ ath5k | madwifi ]" + +# Default behavior: disables any MadWifi driver present and makes sure +# ath5k is enabled +if [ $# -eq 0 ]; then + module_disable $MADWIFI + module_enable $ATH5K + module_enable $ATH9K + exit +elif [ $# -ne 1 ]; then + echo "$USAGE" + exit +fi + +MODULE=$1 +if [ "$MODULE" == "ath5k" ]; then + module_disable $MADWIFI + module_enable $ATH5K + module_enable $ATH9K +elif [ "$MODULE" == "madwifi" ]; then + module_disable $ATH5K + module_disable $ATH9K + module_enable $MADWIFI +else + echo "$USAGE" + exit +fi diff --git a/scripts/athload b/scripts/athload new file mode 100755 index 0000000..01b3072 --- /dev/null +++ b/scripts/athload @@ -0,0 +1,57 @@ +#!/bin/bash +# Copyright 2007 Luis R. Rodriguez +# +# Loads ath5k or madwifi + +. /usr/lib/compat-wireless/modlib.sh + +if [[ $UID -ne 0 ]]; then + echo "Run with root privileges" + exit +fi + + +USAGE="Usage: $0 [ ath5k | madwifi ]" + +# Default behavior: unload MadWifi and load ath5k +if [ $# -eq 0 ]; then + athenable ath5k + exit +elif [ $# -ne 1 ]; then + echo "$USAGE" + exit +fi + +MODULE=$1 +if [ "$MODULE" == "ath5k" ]; then + madwifi-unload + athenable ath5k + modprobe ath5k + CHECK=`modprobe -l ath5k` + if [ ! -z $CHECK ]; then + echo "ath5k loaded successfully" + fi + modprobe ath9k + CHECK=`modprobe -l ath9k` + if [ ! -z $CHECK ]; then + echo "ath9k loaded successfully" + fi +elif [ "$MODULE" == "madwifi" ]; then + CHECK=`modprobe -l ath5k` + if [ ! -z $CHECK ]; then + echo "ath5k currently loaded, going to try to unload the module..." + modprobe -r --ignore-remove ath5k + fi + athenable madwifi + # MadWifi may be loaded, but it doesn't mean devices + # currently available were picked up + madwifi-unload 2>&1 > /dev/null + modprobe ath_pci + CHECK=`modprobe -l ath_pci` + if [ ! -z $CHECK ]; then + echo "MadWifi loaded successfully!" + fi +else + echo "$USAGE" + exit +fi diff --git a/scripts/b43enable b/scripts/b43enable new file mode 100755 index 0000000..b0b9851 --- /dev/null +++ b/scripts/b43enable @@ -0,0 +1,59 @@ +#!/bin/bash +# +# Copyright 2007 Luis R. Rodriguez +# +# Makes sure either b43, b43legacy (new mac80211 drivers) or bcm43xx +# is enabled to be used. This allows us to choose any driver without +# blacklisting each other. + +. /usr/lib/compat-wireless/modlib.sh + +if [[ $UID -ne 0 ]]; then + echo "Run with root privileges" + exit +fi + +B43S="b43 b43legacy" +B43_OLD="bcm43xx" +B43_PROP="wl" + +# Appended to module file at the end when we want to ignore one +USAGE="Usage: $0 [ b43 | bcm43xx | wl ]" + +function enable_b43 { + module_disable $B43_OLD + module_disable $B43_PROP + for i in $B43S; do + module_enable $i + done +} + +# Default behavior: disables the old bcm43xx driver and enables b43 +# and b43legacy +if [ $# -eq 0 ]; then + enable_b43 + exit +elif [ $# -ne 1 ]; then + echo "$USAGE" + exit +fi + +MODULE=$1 +if [ "$MODULE" == "bcm43xx" ]; then + for i in $B43S; do + module_disable $i + done + module_disable $B43_PROP + module_enable $B43_OLD +elif [ "$MODULE" == "wl" ]; then + for i in $B43S; do + module_disable $i + done + module_disable $B43_OLD + module_enable $B43_PROP +elif [ "$MODULE" == "b43" ]; then + enable_b43 +else + echo "$USAGE" + exit +fi diff --git a/scripts/b43load b/scripts/b43load new file mode 100755 index 0000000..5114ffa --- /dev/null +++ b/scripts/b43load @@ -0,0 +1,65 @@ +#!/bin/bash +# Copyright 2007 Luis R. Rodriguez +# +# Loads new broadcom drivers (b43 and b43legacy) or the old ones (bcm43xx) + +. /usr/lib/compat-wireless/modlib.sh + +if [[ $UID -ne 0 ]]; then + echo "Run with root privileges" + exit +fi + + +USAGE="Usage: $0 [ b43 | bcm43xx ]" + +# Default behavior: unload bcm43xx and load b43 and b43legacy +if [ $# -eq 0 ]; then + 1=b43 +elif [ $# -ne 1 ]; then + echo "$USAGE" + exit +fi + +MODULE=$1 +if [ "$MODULE" == "b43" ]; then + grep bcm43xx /proc/modules 2>&1 > /dev/null + if [ $? -eq 0 ]; then + echo Unloading $i... + modprobe -r --ignore-remove bcm43xx + fi + # Enables both b43 and b43legacy + b43enable b43 + modprobe b43 + modprobe b43legacy + CHECK=`modprobe -l b43` + if [ ! -z $CHECK ]; then + echo "b43 loaded successfully" + fi + CHECK=`modprobe -l b43legacy` + if [ ! -z $CHECK ]; then + echo "b43legacy loaded successfully" + fi +elif [ "$MODULE" == "bcm43xx" ]; then + CHECK=`modprobe -l b43` + if [ ! -z $CHECK ]; then + echo "b43 currently loaded, going to try to unload the module..." + modprobe -r --ignore-remove b43 + fi + CHECK=`modprobe -l b43legacy` + if [ ! -z $CHECK ]; then + echo "b43legacy currently loaded, going to try to unload the module..." + modprobe -r --ignore-remove b43legacy + fi + b43enable bcm43xx + # bcm43xx may be loaded already lets remove them first + modprobe -r --ignore-remov bcm43xx 2>&1 > /dev/null + modprobe bcm43xx + CHECK=`modprobe -l bcm43xx` + if [ ! -z $CHECK ]; then + echo "bcm43xx loaded successfully!" + fi +else + echo "$USAGE" + exit +fi diff --git a/scripts/btload.sh b/scripts/btload.sh new file mode 100755 index 0000000..50f37b6 --- /dev/null +++ b/scripts/btload.sh @@ -0,0 +1,10 @@ +#!/bin/bash +MODULES="bluetooth btusb l2cap sco hidp rfcomm bnep" +for i in $MODULES; do + echo Loading $i... + modprobe $i +done +echo Starting bluetooth service.. +sudo service bluetooth start +sudo service bluetooth status + diff --git a/scripts/btunload.sh b/scripts/btunload.sh new file mode 100755 index 0000000..c14f8c6 --- /dev/null +++ b/scripts/btunload.sh @@ -0,0 +1,13 @@ +#!/bin/bash +MODULES="hidp rfcomm bnep l2cap sco btusb bluetooth" +echo Stoping bluetooth service.. +sudo service bluetooth stop +sudo service bluetooth status + +for i in $MODULES; do + grep ^$i /proc/modules 2>&1 > /dev/null + if [ $? -eq 0 ]; then + echo Unloading $i... + modprobe -r --ignore-remove $i + fi +done diff --git a/scripts/check_config.sh b/scripts/check_config.sh new file mode 100755 index 0000000..1ac9620 --- /dev/null +++ b/scripts/check_config.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# This script checks the compat-wireless configuration file and if changes were made +# regenerates the compat_autoconf header. + +# These variables are expected to be exported: +#COMPAT_CONFIG="config" +#CONFIG_CHECK=".${COMPAT_CONFIG}.md5" +#COMPAT_AUTOCONF="include/linux/compat_autoconf.h" + +function gen_compat_autoconf { + echo "./scripts/gen-compat-autoconf.sh $COMPAT_CONFIG > $COMPAT_AUTOCONF" + ./scripts/gen-compat-autoconf.sh $COMPAT_CONFIG > $COMPAT_AUTOCONF + md5sum $COMPAT_CONFIG > $CONFIG_CHECK +} + +which md5sum 2>&1 > /dev/null +if [ $? -ne 0 ]; then + echo "md5sum required to detect changes on config file" + exit -1 +fi + +if [ ! -f $CONFIG_CHECK ]; then + gen_compat_autoconf + exit +fi + +md5sum -c $CONFIG_CHECK 2> /dev/null 1>/dev/null + +if [ $? -ne 0 ]; then + echo "Changes to compat-wireless's configuration was detected, regenerating autoconf..." + gen_compat_autoconf +fi + diff --git a/scripts/check_depmod b/scripts/check_depmod new file mode 100755 index 0000000..f127a6a --- /dev/null +++ b/scripts/check_depmod @@ -0,0 +1,83 @@ +#!/bin/bash +# Copyright 2009 Luis R. Rodriguez +# +# Ensures your distribution likes to prefer updates/ over the kernel/ +# search updates built-in + +# Seems Mandriva has an $DEPMOD_DIR but it doesn't have any files, +# so lets deal with those distributions. +DEPMOD_CONF="/etc/depmod.conf" +DEPMOD_CONF_TMP="$DEPMOD_CONF.compat-wireless.old" +DEPMOD_DIR="/etc/depmod.d/" +COMPAT_DEPMOD_FILE=compat-wireless.conf +GREP_REGEX_UPDATES="^[[:space:]]*search.*[[:space:]]updates\([[:space:]]\|$\)" +GREP_REGEX_SEARCH="^[[:space:]]*search[[:space:]].\+$" +DEPMOD_CMD="depmod" + +function add_compat_depmod_conf { + echo "NOTE: Your distribution lacks an $DEPMOD_DIR directory with " + echo "updates/ directory being prioritized for modules, we're adding " + echo "one for you." + mkdir -p $DEPMOD_DIR + FIRST_FILE=$(ls $DEPMOD_DIR|head -1) + [ -n "$FIRST_FILE" ] && while [[ $FIRST_FILE < $COMPAT_DEPMOD_FILE ]]; do + COMPAT_DEPMOD_FILE="0$COMPAT_DEPMOD_FILE" + done + echo "search updates" > $DEPMOD_DIR/$COMPAT_DEPMOD_FILE +} + +function add_global_depmod_conf { + echo "NOTE: Your distribution lacks updates/ directory being" + echo "prioritized for modules, we're adding it to $DEPMOD_CONF." + rm -f $DEPMOD_CONF_TMP + [ -f $DEPMOD_CONF ] && cp -f $DEPMOD_CONF $DEPMOD_CONF_TMP + echo "search updates" > $DEPMOD_CONF + [ -f $DEPMOD_CONF_TMP ] && cat $DEPMOD_CONF_TMP >> $DEPMOD_CONF +} + +function depmod_updates_ok { + echo "depmod will prefer updates/ over kernel/ -- OK!" +} + +function add_depmod_conf { + if [ -f "$DEPMOD_CONF" ]; then + add_global_depmod_conf + else + DEPMOD_VERSION=$($DEPMOD_CMD --version | cut -d" " -f2 | sed "s/\.//") + if [[ $DEPMOD_VERSION -gt 36 ]]; then + add_compat_depmod_conf + else + add_global_depmod_conf + fi + fi +} + +# ============================================================================= +# === MAIN ==================================================================== +# ============================================================================= + +GREP_FILES="" +[ -f $DEPMOD_CONF ] && GREP_FILES="$DEPMOD_CONF" +if [ -d $DEPMOD_DIR ]; then + DEPMOD_FILE_COUNT=$(ls $DEPMOD_DIR | wc -l) + [[ $DEPMOD_FILE_COUNT -gt 0 ]] && GREP_FILES="$GREP_FILES $DEPMOD_DIR/*" +fi + +if [ -n "$GREP_FILES" ]; then + grep -q "$GREP_REGEX_SEARCH" $GREP_FILES + if [[ $? -eq 0 ]]; then + grep -q "$GREP_REGEX_UPDATES" $GREP_FILES + if [[ $? -eq 0 ]]; then + depmod_updates_ok + else + add_depmod_conf + fi + else + depmod_updates_ok + fi +else + depmod_updates_ok +fi + +exit 0 + diff --git a/scripts/compress_modules b/scripts/compress_modules new file mode 100755 index 0000000..cf2a76c --- /dev/null +++ b/scripts/compress_modules @@ -0,0 +1,40 @@ +#!/bin/bash +# To be used by distributions using compressed modules + +COMPRESSION_FOUND="n" +COUNT=0; + +for i in $(modprobe -l mac80211); do + let COUNT=$COUNT+1 + i=${i##*/} + if [ "$i" = "mac80211.ko.gz" ]; then + COMPRESSION_FOUND="y" + continue + fi +done + +if [ $COUNT -gt 2 ]; then + echo "More than two mac80211 modules are detected, please report this." + exit +fi + +if [ $COMPRESSION_FOUND = "n" ]; then + exit +fi + +DIRS="$KLIB/$KMODDIR/net/mac80211/" +# This handles both drivers/net/ and drivers/net/wireless/ +DIRS="$DIRS $KLIB/$KMODDIR/net/" +DIRS="$DIRS $KLIB/$KMODDIR/drivers/ssb/" +DIRS="$DIRS $KLIB/$KMODDIR/drivers/net/usb/" +DIRS="$DIRS $KLIB/$KMODDIR/drivers/net/wireless/" +DIRS="$DIRS $KLIB/$KMODDIR/drivers/misc/eeprom/" + +for i in $DIRS; do + if [ ! -d $i ]; then + continue; + fi + for driver in $(find $i -type f -name *.ko); do + gzip -9 $driver + done +done diff --git a/scripts/driver-select b/scripts/driver-select new file mode 100755 index 0000000..229861e --- /dev/null +++ b/scripts/driver-select @@ -0,0 +1,289 @@ +#!/bin/bash +# Copyright 2009 Luis R. Rodriguez +# +# This this to select your compat-wireless driver and +# reduce compilation time. + +DRIVERS_MAKEFILE="drivers/net/wireless/Makefile" +ATH_MAKEFILE="drivers/net/wireless/ath/Makefile" +ATH9K_MAKEFILE="drivers/net/wireless/ath/ath9k/Makefile" +NET_WIRELESS_MAKEFILE="net/wireless/Makefile" +EEPROM_MAKEFILE="drivers/misc/eeprom/Makefile" +DRIVERS_NET="drivers/net/Makefile" +DRIVERS_NET_USB_MAKEFILE="drivers/net/usb/Makefile" +SSB_MAKEFILE="drivers/ssb/Makefile" + +# used to backup files from foo to foo.${BACKUP_EXT} +# If you change this also modify restore_compat() and +# restore_file() below I couldn't find a way to use +# the $BACKUP_EXT there. +BACKUP_EXT="bk" + +# Pretty colors +GREEN="\033[01;32m" +YELLOW="\033[01;33m" +NORMAL="\033[00m" +BLUE="\033[34m" +RED="\033[31m" +PURPLE="\033[35m" +CYAN="\033[36m" +UNDERLINE="\033[02m" + +SUPPORTED_DRIVERS="ath5k ath9k ar9170 zd1211rw" + +function usage { + echo -e "${GREEN}Usage${NORMAL}: ${CYAN}$0${NORMAL} [ ${PURPLE}${NORMAL} | ${PURPLE}${NORMAL} | ${GREEN}restore${NORMAL} ]" + + # These should match the switch below. + echo -e "Supported drivers:" + for i in $SUPPORTED_DRIVERS; do + echo -e "\t${PURPLE}${i}${NORMAL}" + done + + # These should match the switch below. + echo -e "Supported group drivers:" + echo -e "\t${CYAN}atheros${NORMAL} < ${PURPLE} ath5k ath9k ar9170 zd1211rw ${NORMAL}>" + echo -e "\t${CYAN}ath${NORMAL} < ${PURPLE} ath5k ath9k ar9170 ${NORMAL}>" + echo -e "\t${CYAN}intel${NORMAL} < ${PURPLE} iwl3945 iwlagn ipw2100 ipw2200 ${NORMAL}>" + echo -e "\t${CYAN}iwlwifi${NORMAL} < ${PURPLE} iwl3945 iwlagn ${NORMAL}>" + echo -e "\t${CYAN}rtl818x${NORMAL} < ${PURPLE} rtl8180 rtl8187 ${NORMAL}>" + echo -e "\t${CYAN}wl12xx${NORMAL} < ${PURPLE} wl1251 (SPI and SDIO) wl1271 ${NORMAL}>" + + echo -e "Restoring compat-wireless:" + echo -e "\t${GREEN}restore${NORMAL}: you can use this option to restore compat-wireless to the original state" +} + +function backup_file { + if [ -f $1.${BACKUP_EXT} ]; then + echo -e "Backup exists: ${CYAN}${1}.${BACKUP_EXT}${NORMAL}" + return + fi + echo -e "Backing up makefile: ${CYAN}${1}.${BACKUP_EXT}${NORMAL}" + cp $1 $1.bk +} + +function disable_makefile +{ + backup_file $1 + echo > $1 +} + +function select_driver +{ + backup_file $DRIVERS_MAKEFILE + perl -i -ne 'print if /'$1'/ ' $DRIVERS_MAKEFILE +} + +function select_drivers +{ + backup_file $DRIVERS_MAKEFILE + CONFIGS="" + COUNT=0 + for i in $@; do + if [[ "$CONFIGS" = "" ]]; then + CONFIGS="$i" + else + CONFIGS="${CONFIGS}|$i" + fi + done + egrep "$CONFIGS" $DRIVERS_MAKEFILE > ${DRIVERS_MAKEFILE}.tmp + mv ${DRIVERS_MAKEFILE}.tmp ${DRIVERS_MAKEFILE} +} + +function disable_lib80211 +{ + backup_file $NET_WIRELESS_MAKEFILE + perl -i -ne 'print if ! /LIB80211/ ' $NET_WIRELESS_MAKEFILE +} + +function disable_b44 { + backup_file $DRIVERS_NET + perl -i -ne 'print if ! /CONFIG_B44/ ' $DRIVERS_NET +} + +function disable_ssb +{ + disable_b44 + disable_makefile ${SSB_MAKEFILE} + perl -i -ne 'print if ! /drivers\/ssb\/ \\/ ' Makefile +} + +function disable_eeprom +{ + disable_makefile ${EEPROM_MAKEFILE} + perl -i -ne 'print if ! /drivers\/misc\/eeprom\/ \\/' Makefile +} + +function disable_usbnet +{ + disable_makefile ${DRIVERS_NET_USB_MAKEFILE} + perl -i -ne 'print if ! /drivers\/net\/usb\/ \\/' Makefile +} + +function disable_usbnet { + perl -i -ne 'print if ! /CONFIG_COMPAT_NET_USB_MODULES/' Makefile +} + +function disable_ethernet { + perl -i -ne 'print if ! /CONFIG_COMPAT_NETWORK_MODULES/' Makefile +} + +function disable_var_03 { + perl -i -ne 'print if ! /CONFIG_COMPAT_BLUETOOTH/' Makefile +} + +function disable_bt { + perl -i -ne 'print if ! /CONFIG_COMPAT_VAR_MODULES/' Makefile +} + +function disable_bt_usb_ethernet_var { + backup_file Makefile + disable_usbnet + disable_ethernet + disable_var_03 + disable_bt +} + +function disable_var { + disable_ssb + disable_usbnet + disable_eeprom +} + +function disable_var_01 { + disable_lib80211 + disable_var +} + +function disable_var_02 { + #var_01 with eeprom not disabled + disable_lib80211 + disable_ssb + disable_usbnet +} + +function select_ath_driver +{ + backup_file $ATH_MAKEFILE + perl -i -ne 'print if /'$1'/ || /CONFIG_ATH_/ || /ath-objs/ || /regd.o/ || /hw.o/ ' $ATH_MAKEFILE + disable_var_01 +} + +function select_ath9k_driver +{ + select_ath_driver CONFIG_ATH9K_HW + # In the future here we'll add stuff to disable ath9k_htc +} + +# iwlwifi stuff needs more work +function select_iwl_driver +{ + perl -i -ne 'print if /'$1'/ || /CONFIG_BLEH/ ' drivers/net/wireless/iwlwifi/Makefile + disable_var_01 +} + +function restore_file { + #ORIG=$(shell ${1%%.${BACKUP_EXT}}) + ORIG=${1%%.bk} + cp $1 $ORIG + rm -f $1 + echo -e "Restored makefile: ${CYAN}${ORIG}${NORMAL} (and removed backup)" +} + +function restore_compat { + #FILES=$(find ./ -type f -name *.$BACKUP_EXT) + FILES=$(find ./ -type f -name *.bk) + for i in $FILES; do + restore_file $i + done +} + +if [ $# -ne 1 ]; then + usage + exit +fi + +if [ ! -f compat-release ]; then + echo "Must run $0 from the compat-wireless top level directory" + exit +fi + +if [[ ! -f built-in.o ]]; then + if [[ "$1" != "restore" ]]; then + echo -e "${YELLOW}Processing new driver-select request...${NORMAL}" + fi +fi + +# Always backup the top level Makefile, unless restoring +if [[ "$1" != "restore" ]]; then + backup_file Makefile +fi + +# If a user selects a new driver make sure we clean up for them +# first and also restore the backup makefiles then. Otherwise +# we'll be trying to leave drivers on Makefiles which are not +# already there from a previous run. +if [ -f built-in.o ]; then + echo -e "${YELLOW}Old build found, going to clean this up first...${NORMAL}" + make clean + echo -e "${YELLOW}Restoring Makefiles...${NORMAL}" + ./$0 restore +fi + +case $1 in + restore) + restore_compat + ;; + atheros) + select_drivers CONFIG_ATH_COMMON \ + CONFIG_ZD1211RW + disable_var_01 + ;; + ath) + disable_bt_usb_ethernet_var + select_drivers CONFIG_ATH_COMMON + disable_var_01 + ;; + intel) + select_drivers CONFIG_IWLWIFI \ + CONFIG_IPW + disable_var + ;; + iwlwifi) + select_driver CONFIG_IWLWIFI + disable_var_01 + ;; + wl12xx) + select_drivers CONFIG_WL12XX + disable_var_01 + ;; + ath5k) + disable_bt_usb_ethernet_var + select_driver CONFIG_ATH_COMMON + select_ath_driver CONFIG_ATH5K + #patch -p1 < enable-older-kernels/enable-2.6.23.patch + ;; + ath9k) + disable_bt_usb_ethernet_var + select_driver CONFIG_ATH_COMMON + select_ath9k_driver + #patch -p1 < enable-older-kernels/enable-2.6.23.patch + ;; + ar9170) + disable_bt_usb_ethernet_var + select_driver CONFIG_ATH_COMMON + select_ath_driver CONFIG_AR9170_USB + ;; + rtl818x) + select_drivers CONFIG_RTL8180 CONFIG_RTL8187 + disable_var_02 + ;; + zd1211rw) + select_driver CONFIG_ZD1211RW + disable_var_01 + ;; + *) + echo "Unsupported driver" + exit + ;; +esac diff --git a/scripts/gen-compat-autoconf.sh b/scripts/gen-compat-autoconf.sh new file mode 100755 index 0000000..6c7cae7 --- /dev/null +++ b/scripts/gen-compat-autoconf.sh @@ -0,0 +1,199 @@ +#!/bin/bash +# +# Copyright 2007 Luis R. Rodriguez +# +# Use this to parse a small .config equivalent looking file to generate +# our own autoconf.h. This file has defines for each config option +# just like the kernels include/linux/autoconf.h +# +# XXX: consider using scripts/kconfig/confdata.c instead. +# On the downside this would require the user to have libc though. + +# This indicates which is the oldest kernel we support +# Update this if you are adding support for older kernels. +OLDEST_KERNEL_SUPPORTED="2.6.25" +COMPAT_RELEASE="compat-release" +KERNEL_RELEASE="git-describe" +MULT_DEP_FILE=".compat_pivot_dep" + +if [ $# -ne 1 ]; then + echo "Usage $0 config-file" + exit +fi + +COMPAT_CONFIG="$1" + +if [ ! -f $COMPAT_CONFIG ]; then + echo "File $1 is not a file" + exit +fi + +if [ ! -f $COMPAT_RELEASE -o ! -f $KERNEL_RELEASE ]; then + echo "Error: $COMPAT_RELEASE or $KERNEL_RELEASE file is missing" + exit +fi + +CREL=$(cat $COMPAT_RELEASE | tail -1) +KREL=$(cat $KERNEL_RELEASE | tail -1) +DATE=$(date) + +# Defines a CONFIG_ option if not defined yet, this helps respect +# linux/autoconf.h +function define_config { + VAR=$1 + VALUE=$2 + case $VALUE in + n) # Try to undefine it + echo "#undef $VAR" + ;; + y) + echo "#ifndef $VAR" + echo "#define $VAR 1" + echo "#endif /* $VAR */ " + ;; + m) + echo "#ifndef $VAR" + echo "#define $VAR 1" + echo "#endif /* $VAR */ " + ;; + *) # Assume string + # XXX: add better checks to make sure what was on + # the right was indeed a string + echo "#ifndef $VAR" + echo "#define $VAR \"$VALUE\"" + echo "#endif /* $VAR */ " + ;; + esac +} + +# This deals with core compat-wireless kernel requirements. +function define_config_req { + VAR=$1 + echo "#ifndef $VAR" + echo -n "#error Compat-wireless requirement: $VAR must be enabled " + echo "in your kernel" + echo "#endif /* $VAR */" +} + +# This handles modules which have dependencies from the kernel +# which compat-wireless isn't providing yet either because +# the dependency is not available as kernel module or +# the module simply isn't provided by compat-wireless. +function define_config_dep { + VAR=$1 + VALUE=$2 + DEP=$3 + WARN_VAR="COMPAT_WARN_$VAR" + echo "#ifdef $DEP" + define_config $VAR $VALUE + echo "#else" + # XXX: figure out a way to warn only once + # define only once in case user tried to enable config option + # twice in config.mk + echo "#ifndef $WARN_VAR" + # Lets skip these for now.. they might be too annoying + #echo "#warning Skipping $VAR as $DEP was needed... " + #echo "#warning This just means $VAR won't be built and is not fatal." + echo "#define $WARN_VAR" + echo "#endif /* $VAR */" + echo "#endif /* $WARN_VAR */" +} + +# This handles options which have *multiple* dependencies from the kernel +function define_config_multiple_deps { + VAR=$1 + VALUE=$2 + DEP_ARRAY=$3 + + # First, put all ifdefs + for i in $(cat $MULT_DEP_FILE); do + echo "#ifdef $i" + done + + # Now put our option in the middle + define_config $VAR $VALUE + + # Now close all ifdefs + # First, put all ifdefs + for i in $(cat $MULT_DEP_FILE); do + echo "#endif" + done + +} + +function kernel_version_req { + VERSION=$(echo $1 | sed -e 's/\./,/g') + echo "#if (LINUX_VERSION_CODE < KERNEL_VERSION($VERSION))" + echo "#error Compat-wireless requirement: Linux >= $VERSION" + echo "#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION($VERSION) */ " +} + +cat <= 2.6.23 + # CONFIG_MAC80211_QOS on these kernels requires + # CONFIG_NET_SCHED and CONFIG_NETDEVICES_MULTIQUEUE + rm -f $MULT_DEP_FILE + echo CONFIG_NET_SCHED >> $MULT_DEP_FILE + echo CONFIG_NETDEVICES_MULTIQUEUE >> $MULT_DEP_FILE + define_config_multiple_deps CONFIG_MAC80211_QOS y $ALL_DEPS + rm -f $MULT_DEP_FILE + fi +fi +echo "#endif /* COMPAT_AUTOCONF_INCLUDED */" diff --git a/scripts/gen-stable-release.sh b/scripts/gen-stable-release.sh new file mode 100755 index 0000000..a6a10f9 --- /dev/null +++ b/scripts/gen-stable-release.sh @@ -0,0 +1,90 @@ +#!/bin/bash +# Copyright 2009 Luis R. Rodriguez +# +# You can use this to make stable compat-wireless releases +# +# The assumption is you have the linux-2.6-allstable git tree on your $HOME +# git://git.kernel.org/pub/scm/linux/kernel/git/hpa/linux-2.6-allstable.git +# +# Local branches will be created based on the remote linux-2.6.X.y branches. +# If your branch already exists we will nuke it for you to avoid rebasing. +# +# If no kernel is specified we use the latest rc-release, which will be on the +# remove master branch. Your master branch should be clean. + +ALL_STABLE_TREE="linux-2.6-allstable" +STAGING=/tmp/staging/compat-wireless/ + +if [[ $# -gt 1 ]]; then + echo "Usage: $0 " + echo + echo Examples usages: + echo + echo $0 + echo $0 linux-2.6.29.y + echo $0 linux-2.6.30.y + echo + echo "If no kernel is specified we try to make a release based on the latest RC kernel." + echo "If a kernel release is specified X is the next stable release as 31 in 2.6.31.y." + exit +fi + +# branch we want to use from hpa's tree +BRANCH="$1" + +export GIT_TREE=$HOME/$ALL_STABLE_TREE +COMPAT_WIRELESS_DIR=$(pwd) + +cd $GIT_TREE +# --abbrev=0 on branch should work but I guess it doesn't on some releases +LOCAL_BRANCH=$(git branch | grep \* | awk '{print $2}') + +case $LOCAL_BRANCH in +"master") # Preparing a new stable compat-wireless release based on an RC kernel + git checkout -f + git pull + # Rebase will be done automatically if our tree is clean + echo "On master branch on $ALL_STABLE_TREE" + ;; +*) # Based on a stable 2.6.x.y release, lets just move to the master branch, + # git pull, nuke the old branch and start a fresh new branch. + echo "On non-master branch on $ALL_STABLE_TREE: $LOCAL_BRANCH" + git checkout -f + git checkout master + git pull + git branch -D $LOCAL_BRANCH + git checkout -b $LOCAL_BRANCH origin/$LOCAL_BRANCH + ;; +esac + +# We should now be on the branch we want +KERNEL_RELEASE=$(git describe --abbrev=0 | sed -e 's/v//g') +RELEASE="compat-wireless-$KERNEL_RELEASE" +RELEASE_TAR="$RELEASE.tar.bz2" + +rm -rf $STAGING +mkdir -p $STAGING +cp -a $COMPAT_WIRELESS_DIR $STAGING/$RELEASE +cd $STAGING/$RELEASE + +./scripts/admin-update.sh +rm -rf $STAGING/$RELEASE/.git + +# Remove any gunk +echo +echo "Cleaning up the release ..." +make clean 2>&1 > /dev/null +find ./ -type f -name *.orig | xargs rm -f +find ./ -type f -name *.rej | xargs rm -f + +cd $STAGING/ + +echo "Creating $RELEASE_TAR ..." +tar -jcf $RELEASE_TAR $RELEASE/ + +echo +echo "Compat-wireles release: $RELEASE" +echo "Size: $(du -h $RELEASE_TAR)" +echo "sha1sum: $(sha1sum $RELEASE_TAR)" +echo +echo "Release: ${STAGING}$RELEASE_TAR" diff --git a/scripts/iwl-enable b/scripts/iwl-enable new file mode 100755 index 0000000..765195b --- /dev/null +++ b/scripts/iwl-enable @@ -0,0 +1,47 @@ +#!/bin/bash +# +# Copyright 2007 Luis R. Rodriguez +# +# Makes sure either iwlagn (new) or iwl4965 (old) +# is enabled to be used. This allows us to choose any driver without +# blacklisting each other. + +. /usr/lib/compat-wireless/modlib.sh + +if [[ $UID -ne 0 ]]; then + echo "Run with root privileges" + exit +fi + +IWL_NEW="iwlagn" +IWL_OLD="iwl4965" + +# Appended to module file at the end when we want to ignore one +USAGE="Usage: $0 [ $IWL_NEW | $IWL_OLD ]" + +function enable_iwlagn { + module_disable $IWL_OLD + for i in $IWL_NEW; do + module_enable $i + done +} + +# Default behavior: disables the old iwl4965 driver and enables iwlagn +if [ $# -eq 0 ]; then + enable_iwlagn + exit +elif [ $# -ne 1 ]; then + echo "$USAGE" + exit +fi + +MODULE=$1 +if [ "$MODULE" == "iwl4965" ]; then + module_disable $IWL_NEW + module_enable $IWL_OLD +elif [ "$MODULE" == "iwlagn" ]; then + enable_iwlagn +else + echo "$USAGE" + exit +fi diff --git a/scripts/iwl-load b/scripts/iwl-load new file mode 100755 index 0000000..96b6d89 --- /dev/null +++ b/scripts/iwl-load @@ -0,0 +1,58 @@ +#!/bin/bash +# Copyright 2008 Luis R. Rodriguez +# +# Loads new Intel iwl (iwlagn) or the old ones (iwl4965) + +. /usr/lib/compat-wireless/modlib.sh + +IWL_OLD="iwl4965" +IWL_NEW="iwlagn" + +if [[ $UID -ne 0 ]]; then + echo "Run with root privileges" + exit +fi + + +USAGE="Usage: $0 [ iwlagn | iwl4965 ]" + +# Default behavior: unload iwl4965 and load iwlagn +if [ $# -eq 0 ]; then + 1=iwlagn +elif [ $# -ne 1 ]; then + echo "$USAGE" + exit +fi + +MODULE=$1 +if [ "$MODULE" == "iwlagn" ]; then + grep iwl4965 /proc/modules 2>&1 > /dev/null + if [ $? -eq 0 ]; then + echo Unloading $i... + modprobe -r --ignore-remove iwl4965 + fi + # Enables both b43 and b43legacy + iwl-enable iwlagn + modprobe iwlagn + CHECK=`modprobe -l iwlagn` + if [ ! -z $CHECK ]; then + echo "iwlagn loaded successfully" + fi +elif [ "$MODULE" == "iwl4965" ]; then + CHECK=`modprobe -l iwlagn` + if [ ! -z $CHECK ]; then + echo "iwlagn currently loaded, going to try to unload the module..." + modprobe -r --ignore-remove iwlagn + fi + iwl-enable iwl4965 + # iwl4965 may be loaded already lets remove it first + modprobe -r --ignore-remov iwl4965 2>&1 > /dev/null + modprobe iwl4965 + CHECK=`modprobe -l iwl4965` + if [ ! -z $CHECK ]; then + echo "iwl4965 loaded successfully!" + fi +else + echo "$USAGE" + exit +fi diff --git a/scripts/load.sh b/scripts/load.sh new file mode 100755 index 0000000..9f82e2c --- /dev/null +++ b/scripts/load.sh @@ -0,0 +1,26 @@ +#!/bin/bash +MODULES="ipw2100 ipw2200 libertas_cs usb8xxx" +MODULES="$MODULES p54pci p54usb" +MODULES="$MODULES adm8211 zd1211rw" +MODULES="$MODULES rtl8180 rtl8187" +MODULES="$MODULES p54pci p54usb" +MODULES="$MODULES iwl3945 iwlagn" +MODULES="$MODULES ath ar9170usb" +MODULES="$MODULES rtl8180 rtl8187" +MODULES="$MODULES rt2400pci rt2500pci rt61pci" +MODULES="$MODULES rt2500usb rt73usb" +MODULES="$MODULES rndis_wlan at76_usb" +MODULES="$MODULES mwl8k mac80211_hwsim" +MODULES="$MODULES at76c50x_usb" +MODULES="$MODULES bluetooth btusb l2cap sco hidp rfcomm bnep" +for i in $MODULES; do + echo Loading $i... + modprobe $i +done +# For ath5k we must be sure to unload MadWifi first +athload ath5k +# For b43 we must make sure to unload bcm43xx first +b43load b43 +echo Starting bluetooth service.. +sudo service bluetooth start +sudo service bluetooth status diff --git a/scripts/madwifi-unload b/scripts/madwifi-unload new file mode 100755 index 0000000..0cde286 --- /dev/null +++ b/scripts/madwifi-unload @@ -0,0 +1,58 @@ +#!/bin/sh +# Copyright 2006 Kel Modderman +# +# Taken from madwifi scripts. This unloads madwifi + +: ${PATTERN='\(ath_.*\|wlan_.*\|wlan\)$'} +: ${MAX_TRIES=10} + +test "`id -u`" = 0 || { + echo "ERROR: You must be root to run this script" >&2 + exit 1 +} + +test -r /proc/modules || { + echo "ERROR: Cannot read /proc/modules" >&2 + exit 1 +} + +tries="$MAX_TRIES" +while test "$tries" != "0"; do + skipped=0 + IFS=' +' + for line in `cat /proc/modules`; do + IFS=' ' + set x $line + name="$2" + size="$3" + use_count="$4" + use_name="$5" + state="$6" + expr "$name" : "$PATTERN" >/dev/null || continue + + # Compatibility for Linux 2.4.x + test -z "$state" && { use_name="-"; state="Live"; } + + if test "$state" != "Live" || test "$use_count" != "0" || \ + test "$use_name" != "-"; then + # Don't skip unload in the last run + if test "$tries" != "1"; then + skipped=1 + continue + fi + fi + + echo "Unloading \"$name\"" + sync # to be safe + /sbin/rmmod "$name" || { + echo "ERROR: cannot unload module \"$name\"" >&2 + exit 1 + } + sync # to be even safer + done + test "$skipped" = "0" && break + tries=$(($tries - 1)) +done + +exit 0 diff --git a/scripts/modlib.sh b/scripts/modlib.sh new file mode 100755 index 0000000..91e53f1 --- /dev/null +++ b/scripts/modlib.sh @@ -0,0 +1,87 @@ +#!/bin/bash +# +# Copyright 2007 Luis R. Rodriguez +# +# You can use these to enable/disable modules without blacklisting them + +# Make sure our imporant paths are included +PATH=$PATH:/usr/sbin:/sbin + +# Appended to module file at the end when we want to ignore one +IGNORE_SUFFIX=".ignore" +VER=`uname -r` + +# If 'module' is found, its renamed to 'module.ignore' +function module_disable { + # Basic check to see if this is a module available for loading + MODULE_CHECK=`modprobe -l $1` + if [ -z $MODULE_CHECK ]; then + echo "Module $1 not detected -- this is fine" + return + fi + MODULE=$1 + MODULE_KO=${MODULE}.ko + # In case there are more than one of these modules. This can + # happen, for example if your distribution provides one and you have + # compiled one in yourself later. + MODULE_COUNT=`find /lib/modules/$VER/ -name $MODULE_KO | wc -l` + ALL_MODULES=`find /lib/modules/$VER/ -name $MODULE_KO` + COUNT=1 + CHECK=`modprobe -l $MODULE` + for i in $ALL_MODULES; do + if [[ $MODULE_COUNT -gt 1 ]]; then + if [[ $COUNT -eq 1 ]]; then + echo -en "$MODULE_COUNT $MODULE modules found " + echo -e "we'll disable all of them" + fi + echo -en "Disabling $MODULE ($COUNT) ..." + else + echo -en "Disabling $MODULE ..." + fi + mv -f $i ${i}${IGNORE_SUFFIX} + depmod -ae + CHECK_AGAIN=`modprobe -l $MODULE` + if [ "$CHECK" != "$CHECK_AGAIN" ]; then + echo -e "\t[OK]\tModule disabled:" + echo "$CHECK" + else + echo -e "[ERROR]\tModule is still being detected:" + echo "$CHECK" + fi + let COUNT=$COUNT+1 + done +} + +# If 'module.ignore' is found, rename it back to 'module' +function module_enable { + MODULE=$1 + MODULE_KO=${MODULE}.ko + IGNORED_MODULE=${MODULE_KO}${IGNORE_SUFFIX} + # In case there are more than one of these modules. This can + # happen, for example if your distribution provides one and you have + # compiled one in yourself later. + ALL_MODULES=`find /lib/modules/$VER/ -name $IGNORED_MODULE` + for i in $ALL_MODULES; do + echo -en "Enabling $MODULE ..." + DIR=`dirname $i` + mv $i $DIR/$MODULE_KO + depmod -ae + CHECK=`modprobe -l $MODULE` + if [ "$DIR/$MODULE_KO" != $CHECK ]; then + if [ -z $CHECK ]; then + echo -e "\t[ERROR]\tModule could not be enabled" + else + echo -en "\t[OK]\tModule renamed but another " + echo "module file is being preferred" + echo -e "Renamed module:\t\t$DIR/$MODULE_KO" + echo -e "Preferred module:\t$CHECK" + fi + else + echo -e "\t[OK]\tModule enabled: " + echo "$DIR/$MODULE_KO" + fi + # Lets only do this for the first module found + break + done +} + diff --git a/scripts/unload.sh b/scripts/unload.sh new file mode 100755 index 0000000..23d1ae5 --- /dev/null +++ b/scripts/unload.sh @@ -0,0 +1,58 @@ +#!/bin/bash + +# The old stack drivers and the mac80211 rc80211_simple modules +# which is no longer on recent kernels (its internal) +OLD_MODULES="iwlwifi_mac80211 rc80211_simple zd1211rw-mac80211" +OLD_MODULES="$OLD_MODULES ieee80211_crypt_tkip ieee80211_crypt_ccmp" +OLD_MODULES="$OLD_MODULES ieee80211softmac ieee80211_crypt ieee80211" +OLD_MODULES="$OLD_MODULES bcm43xx rndis_wext iwl4965" +MODULES="$OLD_MODULES" +MODULES="$MODULES ipw2100 ipw2200 libipw" +MODULES="$MODULES libertas_cs usb8xxx libertas libertas_sdio libertas_spi" +MODULES="$MODULES libertas_tf libertas_tf_usb" +MODULES="$MODULES adm8211 zd1211rw" +MODULES="$MODULES b43 b43legacy ssb" +MODULES="$MODULES iwl3945 iwlagn iwlcore" +MODULES="$MODULES ath9k ath5k ar9170usb ath" +MODULES="$MODULES p54pci p54usb p54spi p54common" +MODULES="$MODULES rt2400pci rt2500pci rt61pci" +MODULES="$MODULES rt2500usb rt73usb" +MODULES="$MODULES rt2x00usb rt2x00lib" +MODULES="$MODULES rtl8180 rtl8187" +MODULES="$MODULES mwl8k mac80211_hwsim" +MODULES="$MODULES at76c50x_usb at76_usb" +MODULES="$MODULES rndis_wlan rndis_host cdc_ether usbnet" +# eeprom_93cx6 is used by rt2x00 (rt61pci, rt2500pci, rt2400pci) +# and Realtek drivers ( rtl8187, rtl8180) +MODULES="$MODULES eeprom_93cx6" +MODULES="$MODULES lib80211_crypt_ccmp lib80211_crypt_tkip lib80211_crypt_wep" +MODULES="$MODULES mac80211 cfg80211 lib80211" +MODULES="$MODULES hidp rfcomm bnep l2cap sco btusb bluetooth" +echo Stoping bluetooth service.. +sudo service bluetooth stop +sudo service bluetooth status +MADWIFI_MODULES="ath_pci ath_rate_sample wlan_scan_sta wlan ath_hal" +IPW3945D="/sbin/ipw3945d-`uname -r`" + +if [ -f $IPW3945D ]; then + $IPW3945D --isrunning + if [ ! $? ]; then + echo -n "Detected ipw3945 daemon loaded we're going to " + echo "shut the daemon down now and remove the module." + modprobe -r --ignore-remove ipw3945 + fi +fi + +grep ath_pci /proc/modules 2>&1 > /dev/null +if [ $? -eq 0 ]; then + echo "MadWifi driver is loaded, going to try to unload it..." + ./scripts/madwifi-unload +fi + +for i in $MODULES; do + grep ^$i /proc/modules 2>&1 > /dev/null + if [ $? -eq 0 ]; then + echo Unloading $i... + modprobe -r --ignore-remove $i + fi +done diff --git a/scripts/update-initramfs b/scripts/update-initramfs new file mode 100755 index 0000000..412d885 --- /dev/null +++ b/scripts/update-initramfs @@ -0,0 +1,33 @@ +#!/bin/bash +# Copyright 2009 Luis R. Rodriguez +# +# Since we provide ssb, the Ethernet module b44 some people may +# rely on it to netboot, so update the initrafms for each +# distribution. +# +# Note that in the future people may want to wireless-boot +# so this will help with that as well. + +LSB_RED_ID=$(/usr/bin/lsb_release -i -s) + +KLIB=/lib/modules/2.6.31-wl/build +ver=$(echo $KLIB | awk -F "/lib/modules/" '{print $2}' | awk -F"/" '{print $1}') +dir=/boot/ + +case $LSB_RED_ID in +"Ubuntu") + echo "Updating Ubuntu's initramfs for $ver under $dir ..." + mkinitramfs -o $dir/initrd.img-$ver $ver + echo "Will now run update-grub to ensure grub will find the new initramfs ..." + update-grub + ;; +*) + echo "Warning:" + echo "You may or may not need to update your initframfs, you should if" + echo "any of the modules installed are part of your initramfs. To add" + echo "support for your distribution to do this automatically send a" + echo "patch against $0. If your distribution does not require this" + echo "send a patch against the '/usr/bin/lsb_release -i -s': $LSB_RED_ID" + echo "tag for your distribution to avoid this warning." + ;; +esac diff --git a/scripts/wlload.sh b/scripts/wlload.sh new file mode 100755 index 0000000..7bc8fae --- /dev/null +++ b/scripts/wlload.sh @@ -0,0 +1,22 @@ +#!/bin/bash +MODULES="ipw2100 ipw2200 libertas_cs usb8xxx" +MODULES="$MODULES p54pci p54usb" +MODULES="$MODULES adm8211 zd1211rw" +MODULES="$MODULES rtl8180 rtl8187" +MODULES="$MODULES p54pci p54usb" +MODULES="$MODULES iwl3945 iwlagn" +MODULES="$MODULES ath ar9170usb" +MODULES="$MODULES rtl8180 rtl8187" +MODULES="$MODULES rt2400pci rt2500pci rt61pci" +MODULES="$MODULES rt2500usb rt73usb" +MODULES="$MODULES rndis_wlan at76_usb" +MODULES="$MODULES mwl8k mac80211_hwsim" +MODULES="$MODULES at76c50x_usb" +for i in $MODULES; do + echo Loading $i... + modprobe $i +done +# For ath5k we must be sure to unload MadWifi first +athload ath5k +# For b43 we must make sure to unload bcm43xx first +b43load b43 diff --git a/scripts/wlunload.sh b/scripts/wlunload.sh new file mode 100755 index 0000000..810ce00 --- /dev/null +++ b/scripts/wlunload.sh @@ -0,0 +1,54 @@ +#!/bin/bash + +# The old stack drivers and the mac80211 rc80211_simple modules +# which is no longer on recent kernels (its internal) +OLD_MODULES="iwlwifi_mac80211 rc80211_simple zd1211rw-mac80211" +OLD_MODULES="$OLD_MODULES ieee80211_crypt_tkip ieee80211_crypt_ccmp" +OLD_MODULES="$OLD_MODULES ieee80211softmac ieee80211_crypt ieee80211" +OLD_MODULES="$OLD_MODULES bcm43xx rndis_wext iwl4965" +MODULES="$OLD_MODULES" +MODULES="$MODULES ipw2100 ipw2200 libipw" +MODULES="$MODULES libertas_cs usb8xxx libertas libertas_sdio libertas_spi" +MODULES="$MODULES libertas_tf libertas_tf_usb" +MODULES="$MODULES adm8211 zd1211rw" +MODULES="$MODULES b43 b43legacy ssb" +MODULES="$MODULES iwl3945 iwlagn iwlcore" +MODULES="$MODULES ath9k ath5k ar9170usb ath" +MODULES="$MODULES p54pci p54usb p54spi p54common" +MODULES="$MODULES rt2400pci rt2500pci rt61pci" +MODULES="$MODULES rt2500usb rt73usb" +MODULES="$MODULES rt2x00usb rt2x00lib" +MODULES="$MODULES rtl8180 rtl8187" +MODULES="$MODULES mwl8k mac80211_hwsim" +MODULES="$MODULES at76c50x_usb at76_usb" +MODULES="$MODULES rndis_wlan rndis_host cdc_ether usbnet" +# eeprom_93cx6 is used by rt2x00 (rt61pci, rt2500pci, rt2400pci) +# and Realtek drivers ( rtl8187, rtl8180) +MODULES="$MODULES eeprom_93cx6" +MODULES="$MODULES lib80211_crypt_ccmp lib80211_crypt_tkip lib80211_crypt_wep" +MODULES="$MODULES mac80211 cfg80211 lib80211" +MADWIFI_MODULES="ath_pci ath_rate_sample wlan_scan_sta wlan ath_hal" +IPW3945D="/sbin/ipw3945d-`uname -r`" + +if [ -f $IPW3945D ]; then + $IPW3945D --isrunning + if [ ! $? ]; then + echo -n "Detected ipw3945 daemon loaded we're going to " + echo "shut the daemon down now and remove the module." + modprobe -r --ignore-remove ipw3945 + fi +fi + +grep ath_pci /proc/modules 2>&1 > /dev/null +if [ $? -eq 0 ]; then + echo "MadWifi driver is loaded, going to try to unload it..." + ./scripts/madwifi-unload +fi + +for i in $MODULES; do + grep ^$i /proc/modules 2>&1 > /dev/null + if [ $? -eq 0 ]; then + echo Unloading $i... + modprobe -r --ignore-remove $i + fi +done diff --git a/udev/50-compat_firmware.rules b/udev/50-compat_firmware.rules new file mode 100644 index 0000000..6473788 --- /dev/null +++ b/udev/50-compat_firmware.rules @@ -0,0 +1,4 @@ +# do not edit this file, it will be overwritten on update + +# compat_firmware-class requests, copies files into the kernel +SUBSYSTEM=="compat_firmware", ACTION=="add", RUN+="compat_firmware.sh" diff --git a/udev/compat_firmware.sh b/udev/compat_firmware.sh new file mode 100755 index 0000000..ef609e7 --- /dev/null +++ b/udev/compat_firmware.sh @@ -0,0 +1,35 @@ +#!/bin/sh -e + +# This is ported from Ubuntu but ubuntu uses these directories which +# other distributions don't care about: +# FIRMWARE_DIRS="/lib/firmware/updates/$(uname -r) /lib/firmware/updates \ +# /lib/firmware/$(uname -r) /lib/firmware" +# If your distribution looks for firmware in other directories +# feel free to extend this and add your own directory here. +# +FIRMWARE_DIRS="/lib/firmware" + +err() { + echo "$@" >&2 + logger -t "${0##*/}[$$]" "$@" 2>/dev/null || true +} + +if [ ! -e /sys$DEVPATH/loading ]; then + err "udev firmware loader misses sysfs directory" + exit 1 +fi + +for DIR in $FIRMWARE_DIRS; do + [ -e "$DIR/$FIRMWARE" ] || continue + echo 1 > /sys$DEVPATH/loading + cat "$DIR/$FIRMWARE" > /sys$DEVPATH/data + echo 0 > /sys$DEVPATH/loading + exit 0 +done + +echo -1 > /sys$DEVPATH/loading +err "Cannot find firmware file '$FIRMWARE'" +mkdir -p /dev/.udev/firmware-missing +file=$(echo "$FIRMWARE" | sed 's:/:\\x2f:g') +ln -s -f "$DEVPATH" /dev/.udev/firmware-missing/$file +exit 1 diff --git a/udev/ubuntu/50-compat_firmware.rules b/udev/ubuntu/50-compat_firmware.rules new file mode 100644 index 0000000..6473788 --- /dev/null +++ b/udev/ubuntu/50-compat_firmware.rules @@ -0,0 +1,4 @@ +# do not edit this file, it will be overwritten on update + +# compat_firmware-class requests, copies files into the kernel +SUBSYSTEM=="compat_firmware", ACTION=="add", RUN+="compat_firmware.sh" diff --git a/udev/ubuntu/compat_firmware.sh b/udev/ubuntu/compat_firmware.sh new file mode 100755 index 0000000..9d4659a --- /dev/null +++ b/udev/ubuntu/compat_firmware.sh @@ -0,0 +1,29 @@ +#!/bin/sh -e + +FIRMWARE_DIRS="/lib/firmware/updates/$(uname -r) /lib/firmware/updates \ + /lib/firmware/$(uname -r) /lib/firmware" + +err() { + echo "$@" >&2 + logger -t "${0##*/}[$$]" "$@" 2>/dev/null || true +} + +if [ ! -e /sys$DEVPATH/loading ]; then + err "udev firmware loader misses sysfs directory" + exit 1 +fi + +for DIR in $FIRMWARE_DIRS; do + [ -e "$DIR/$FIRMWARE" ] || continue + echo 1 > /sys$DEVPATH/loading + cat "$DIR/$FIRMWARE" > /sys$DEVPATH/data + echo 0 > /sys$DEVPATH/loading + exit 0 +done + +echo -1 > /sys$DEVPATH/loading +err "Cannot find firmware file '$FIRMWARE'" +mkdir -p /dev/.udev/firmware-missing +file=$(echo "$FIRMWARE" | sed 's:/:\\x2f:g') +ln -s -f "$DEVPATH" /dev/.udev/firmware-missing/$file +exit 1 -- 2.39.5